diff --git a/CITATION.cff b/CITATION.cff index 62b75a422a..3e0106f767 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -1,8 +1,8 @@ -cff-version: 1.2.0 -message: "If you use this software, please cite it as below." -authors: - - name: "MMPose Contributors" -title: "OpenMMLab Pose Estimation Toolbox and Benchmark" -date-released: 2020-08-31 -url: "https://github.com/open-mmlab/mmpose" -license: Apache-2.0 +cff-version: 1.2.0 +message: "If you use this software, please cite it as below." +authors: + - name: "MMPose Contributors" +title: "OpenMMLab Pose Estimation Toolbox and Benchmark" +date-released: 2020-08-31 +url: "https://github.com/open-mmlab/mmpose" +license: Apache-2.0 diff --git a/LICENSE b/LICENSE index b712427afe..a17f705f20 100644 --- a/LICENSE +++ b/LICENSE @@ -1,203 +1,203 @@ -Copyright 2018-2020 Open-MMLab. All rights reserved. - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2018-2020 Open-MMLab. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. +Copyright 2018-2020 Open-MMLab. All rights reserved. + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2018-2020 Open-MMLab. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/MANIFEST.in b/MANIFEST.in index c6d3090b1c..317437fc9c 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,6 +1,6 @@ -include requirements/*.txt -include mmpose/.mim/model-index.yml -include mmpose/.mim/dataset-index.yml -recursive-include mmpose/.mim/configs *.py *.yml -recursive-include mmpose/.mim/tools *.py *.sh -recursive-include mmpose/.mim/demo *.py +include requirements/*.txt +include mmpose/.mim/model-index.yml +include mmpose/.mim/dataset-index.yml +recursive-include mmpose/.mim/configs *.py *.yml +recursive-include mmpose/.mim/tools *.py *.sh +recursive-include mmpose/.mim/demo *.py diff --git a/MYREADME.md b/MYREADME.md new file mode 100644 index 0000000000..632fb6317b --- /dev/null +++ b/MYREADME.md @@ -0,0 +1,38 @@ +# OCTSB1 +``` +11/23 03:46:19 - mmengine - INFO - Epoch(train) [1258][100/163] lr: 1.000000e-05 eta: 0:03:22 time: 0.520031 data_time: 0.012644 memory: 14376 loss: 0.000183 loss/heatmap: 0.000112 loss/displacement: 0.000071 +11/23 03:46:23 - mmengine - INFO - Exp name: dekr_testmodel-w32_8xb10-140e_octsegflat-512x512_20231121_182822 +11/23 03:46:45 - mmengine - INFO - Epoch(train) [1258][150/163] lr: 1.000000e-05 eta: 0:02:56 time: 0.521060 data_time: 0.012762 memory: 14376 loss: 0.000178 loss/heatmap: 0.000116 loss/displacement: 0.000062 +11/23 03:46:51 - mmengine - INFO - Exp name: dekr_testmodel-w32_8xb10-140e_octsegflat-512x512_20231121_182822 +11/23 03:47:17 - mmengine - INFO - Epoch(train) [1259][ 50/163] lr: 1.000000e-05 eta: 0:02:23 time: 0.523196 data_time: 0.016144 memory: 14376 loss: 0.000186 loss/heatmap: 0.000123 loss/displacement: 0.000063 +11/23 03:47:44 - mmengine - INFO - Epoch(train) [1259][100/163] lr: 1.000000e-05 eta: 0:01:57 time: 0.523867 data_time: 0.016063 memory: 14376 loss: 0.000195 loss/heatmap: 0.000125 loss/displacement: 0.000071 +11/23 03:48:10 - mmengine - INFO - Epoch(train) [1259][150/163] lr: 1.000000e-05 eta: 0:01:31 time: 0.520021 data_time: 0.012742 memory: 14376 loss: 0.000180 loss/heatmap: 0.000116 loss/displacement: 0.000064 +11/23 03:48:16 - mmengine - INFO - Exp name: dekr_testmodel-w32_8xb10-140e_octsegflat-512x512_20231121_182822 +11/23 03:48:42 - mmengine - INFO - Epoch(train) [1260][ 50/163] lr: 1.000000e-05 eta: 0:00:58 time: 0.521599 data_time: 0.015162 memory: 14376 loss: 0.000178 loss/heatmap: 0.000114 loss/displacement: 0.000064 +11/23 03:49:08 - mmengine - INFO - Epoch(train) [1260][100/163] lr: 1.000000e-05 eta: 0:00:32 time: 0.521193 data_time: 0.013141 memory: 14376 loss: 0.000211 loss/heatmap: 0.000133 loss/displacement: 0.000078 +11/23 03:49:35 - mmengine - INFO - Epoch(train) [1260][150/163] lr: 1.000000e-05 eta: 0:00:06 time: 0.522587 data_time: 0.013465 memory: 14376 loss: 0.000189 loss/heatmap: 0.000120 loss/displacement: 0.000068 + +... + + +Loading and preparing results... +DONE (t=2.05s) +creating index... +index created! +Running per image evaluation... +Evaluate annotation type *keypoints* +DONE (t=1.41s). +Accumulating evaluation results... +DONE (t=0.22s). + Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets= 20 ] = 0.645 + Average Precision (AP) @[ IoU=0.50 | area= all | maxDets= 20 ] = 0.650 + Average Precision (AP) @[ IoU=0.75 | area= all | maxDets= 20 ] = 0.650 + Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets= 20 ] = -1.000 + Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets= 20 ] = 0.933 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 20 ] = 0.998 + Average Recall (AR) @[ IoU=0.50 | area= all | maxDets= 20 ] = 1.000 + Average Recall (AR) @[ IoU=0.75 | area= all | maxDets= 20 ] = 1.000 + Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets= 20 ] = -1.000 + Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets= 20 ] = 0.998 +11/23 04:00:28 - mmengine - INFO - Epoch(val) [1260][5715/5715] coco/AP: 0.645355 coco/AP .5: 0.650285 coco/AP .75: 0.650285 coco/AP (M): -1.000000 coco/AP (L): 0.932906 coco/AR: 0.998463 coco/AR .5: 1.000000 coco/AR .75: 1.000000 coco/AR (M): -1.000000 coco/AR (L): 0.998463 data_time: 0.000598 time: 0.110494 +``` \ No newline at end of file diff --git a/README.md b/README.md index b250d570b3..1c5f9d131f 100644 --- a/README.md +++ b/README.md @@ -1,368 +1,368 @@ -
- -
 
-
- OpenMMLab website - - - HOT - - -      - OpenMMLab platform - - - TRY IT OUT - - -
-
 
- -[![Documentation](https://readthedocs.org/projects/mmpose/badge/?version=latest)](https://mmpose.readthedocs.io/en/latest/?badge=latest) -[![actions](https://github.com/open-mmlab/mmpose/workflows/build/badge.svg)](https://github.com/open-mmlab/mmpose/actions) -[![codecov](https://codecov.io/gh/open-mmlab/mmpose/branch/latest/graph/badge.svg)](https://codecov.io/gh/open-mmlab/mmpose) -[![PyPI](https://img.shields.io/pypi/v/mmpose)](https://pypi.org/project/mmpose/) -[![LICENSE](https://img.shields.io/github/license/open-mmlab/mmpose.svg)](https://github.com/open-mmlab/mmpose/blob/main/LICENSE) -[![Average time to resolve an issue](https://isitmaintained.com/badge/resolution/open-mmlab/mmpose.svg)](https://github.com/open-mmlab/mmpose/issues) -[![Percentage of issues still open](https://isitmaintained.com/badge/open/open-mmlab/mmpose.svg)](https://github.com/open-mmlab/mmpose/issues) - -[📘Documentation](https://mmpose.readthedocs.io/en/latest/) | -[🛠️Installation](https://mmpose.readthedocs.io/en/latest/installation.html) | -[👀Model Zoo](https://mmpose.readthedocs.io/en/latest/model_zoo.html) | -[📜Papers](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/algorithms.html) | -[🆕Update News](https://mmpose.readthedocs.io/en/latest/notes/changelog.html) | -[🤔Reporting Issues](https://github.com/open-mmlab/mmpose/issues/new/choose) | -[🔥RTMPose](/projects/rtmpose/) - -
- -
- - - - - - - - - - - - - - - - - -
- -## Introduction - -English | [简体中文](README_CN.md) - -MMPose is an open-source toolbox for pose estimation based on PyTorch. -It is a part of the [OpenMMLab project](https://github.com/open-mmlab). - -The main branch works with **PyTorch 1.8+**. - -https://user-images.githubusercontent.com/15977946/124654387-0fd3c500-ded1-11eb-84f6-24eeddbf4d91.mp4 - -
- -
-Major Features - -- **Support diverse tasks** - - We support a wide spectrum of mainstream pose analysis tasks in current research community, including 2d multi-person human pose estimation, 2d hand pose estimation, 2d face landmark detection, 133 keypoint whole-body human pose estimation, 3d human mesh recovery, fashion landmark detection and animal pose estimation. - See [Demo](demo/docs/en) for more information. - -- **Higher efficiency and higher accuracy** - - MMPose implements multiple state-of-the-art (SOTA) deep learning models, including both top-down & bottom-up approaches. We achieve faster training speed and higher accuracy than other popular codebases, such as [HRNet](https://github.com/leoxiaobin/deep-high-resolution-net.pytorch). - See [benchmark.md](docs/en/notes/benchmark.md) for more information. - -- **Support for various datasets** - - The toolbox directly supports multiple popular and representative datasets, COCO, AIC, MPII, MPII-TRB, OCHuman etc. - See [dataset_zoo](docs/en/dataset_zoo) for more information. - -- **Well designed, tested and documented** - - We decompose MMPose into different components and one can easily construct a customized - pose estimation framework by combining different modules. - We provide detailed documentation and API reference, as well as unittests. - -
- -## What's New - -- We are glad to support 3 new datasets: - - (CVPR 2023) [Human-Art](https://github.com/IDEA-Research/HumanArt) - - (CVPR 2022) [Animal Kingdom](https://github.com/sutdcv/Animal-Kingdom) - - (AAAI 2020) [LaPa](https://github.com/JDAI-CV/lapa-dataset/) - -![image](https://github.com/open-mmlab/mmpose/assets/13503330/c9171dbb-7e7a-4c39-98e3-c92932182efb) - -- Welcome to [*projects of MMPose*](/projects/README.md), where you can access to the latest features of MMPose, and share your ideas and codes with the community at once. Contribution to MMPose will be simple and smooth: - - - Provide an easy and agile way to integrate algorithms, features and applications into MMPose - - Allow flexible code structure and style; only need a short code review process - - Build individual projects with full power of MMPose but not bound up with heavy frameworks - - Checkout new projects: - - [RTMPose](/projects/rtmpose/) - - [YOLOX-Pose](/projects/yolox_pose/) - - [MMPose4AIGC](/projects/mmpose4aigc/) - - [Simple Keypoints](/projects/skps/) - - Become a contributors and make MMPose greater. Start your journey from the [example project](/projects/example_project/) - -
- -- 2023-07-04: MMPose [v1.1.0](https://github.com/open-mmlab/mmpose/releases/tag/v1.1.0) is officially released, with the main updates including: - - - Support new datasets: Human-Art, Animal Kingdom and LaPa. - - Support new config type that is more user-friendly and flexible. - - Improve RTMPose with better performance. - - Migrate 3D pose estimation models on h36m. - - Inference speedup and webcam inference with all demo scripts. - - Please refer to the [release notes](https://github.com/open-mmlab/mmpose/releases/tag/v1.1.0) for more updates brought by MMPose v1.1.0! - -## 0.x / 1.x Migration - -MMPose v1.0.0 is a major update, including many API and config file changes. Currently, a part of the algorithms have been migrated to v1.0.0, and the remaining algorithms will be completed in subsequent versions. We will show the migration progress in the following list. - -
-Migration Progress - -| Algorithm | Status | -| :-------------------------------- | :---------: | -| MTUT (CVPR 2019) | | -| MSPN (ArXiv 2019) | done | -| InterNet (ECCV 2020) | | -| DEKR (CVPR 2021) | done | -| HigherHRNet (CVPR 2020) | | -| DeepPose (CVPR 2014) | done | -| RLE (ICCV 2021) | done | -| SoftWingloss (TIP 2021) | done | -| VideoPose3D (CVPR 2019) | done | -| Hourglass (ECCV 2016) | done | -| LiteHRNet (CVPR 2021) | done | -| AdaptiveWingloss (ICCV 2019) | done | -| SimpleBaseline2D (ECCV 2018) | done | -| PoseWarper (NeurIPS 2019) | | -| SimpleBaseline3D (ICCV 2017) | done | -| HMR (CVPR 2018) | | -| UDP (CVPR 2020) | done | -| VIPNAS (CVPR 2021) | done | -| Wingloss (CVPR 2018) | done | -| DarkPose (CVPR 2020) | done | -| Associative Embedding (NIPS 2017) | in progress | -| VoxelPose (ECCV 2020) | | -| RSN (ECCV 2020) | done | -| CID (CVPR 2022) | done | -| CPM (CVPR 2016) | done | -| HRNet (CVPR 2019) | done | -| HRNetv2 (TPAMI 2019) | done | -| SCNet (CVPR 2020) | done | - -
- -If your algorithm has not been migrated, you can continue to use the [0.x branch](https://github.com/open-mmlab/mmpose/tree/0.x) and [old documentation](https://mmpose.readthedocs.io/en/0.x/). - -## Installation - -Please refer to [installation.md](https://mmpose.readthedocs.io/en/latest/installation.html) for more detailed installation and dataset preparation. - -## Getting Started - -We provided a series of tutorials about the basic usage of MMPose for new users: - -1. For the basic usage of MMPose: - - - [A 20-minute Tour to MMPose](https://mmpose.readthedocs.io/en/latest/guide_to_framework.html) - - [Demos](https://mmpose.readthedocs.io/en/latest/demos.html) - - [Inference](https://mmpose.readthedocs.io/en/latest/user_guides/inference.html) - - [Configs](https://mmpose.readthedocs.io/en/latest/user_guides/configs.html) - - [Prepare Datasets](https://mmpose.readthedocs.io/en/latest/user_guides/prepare_datasets.html) - - [Train and Test](https://mmpose.readthedocs.io/en/latest/user_guides/train_and_test.html) - -2. For developers who wish to develop based on MMPose: - - - [Learn about Codecs](https://mmpose.readthedocs.io/en/latest/advanced_guides/codecs.html) - - [Dataflow in MMPose](https://mmpose.readthedocs.io/en/latest/advanced_guides/dataflow.html) - - [Implement New Models](https://mmpose.readthedocs.io/en/latest/advanced_guides/implement_new_models.html) - - [Customize Datasets](https://mmpose.readthedocs.io/en/latest/advanced_guides/customize_datasets.html) - - [Customize Data Transforms](https://mmpose.readthedocs.io/en/latest/advanced_guides/customize_transforms.html) - - [Customize Optimizer](https://mmpose.readthedocs.io/en/latest/advanced_guides/customize_optimizer.html) - - [Customize Logging](https://mmpose.readthedocs.io/en/latest/advanced_guides/customize_logging.html) - - [How to Deploy](https://mmpose.readthedocs.io/en/latest/advanced_guides/how_to_deploy.html) - - [Model Analysis](https://mmpose.readthedocs.io/en/latest/advanced_guides/model_analysis.html) - - [Migration Guide](https://mmpose.readthedocs.io/en/latest/migration.html) - -3. For researchers and developers who are willing to contribute to MMPose: - - - [Contribution Guide](https://mmpose.readthedocs.io/en/latest/contribution_guide.html) - -4. For some common issues, we provide a FAQ list: - - - [FAQ](https://mmpose.readthedocs.io/en/latest/faq.html) - -## Model Zoo - -Results and models are available in the **README.md** of each method's config directory. -A summary can be found in the [Model Zoo](https://mmpose.readthedocs.io/en/latest/model_zoo.html) page. - -
-Supported algorithms: - -- [x] [DeepPose](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/algorithms.html#deeppose-cvpr-2014) (CVPR'2014) -- [x] [CPM](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/backbones.html#cpm-cvpr-2016) (CVPR'2016) -- [x] [Hourglass](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/backbones.html#hourglass-eccv-2016) (ECCV'2016) -- [x] [SimpleBaseline3D](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/algorithms.html#simplebaseline3d-iccv-2017) (ICCV'2017) -- [ ] [Associative Embedding](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/algorithms.html#associative-embedding-nips-2017) (NeurIPS'2017) -- [x] [SimpleBaseline2D](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/algorithms.html#simplebaseline2d-eccv-2018) (ECCV'2018) -- [x] [DSNT](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/algorithms.html#dsnt-2018) (ArXiv'2021) -- [x] [HRNet](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/backbones.html#hrnet-cvpr-2019) (CVPR'2019) -- [x] [IPR](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/algorithms.html#ipr-eccv-2018) (ECCV'2018) -- [x] [VideoPose3D](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/algorithms.html#videopose3d-cvpr-2019) (CVPR'2019) -- [x] [HRNetv2](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/backbones.html#hrnetv2-tpami-2019) (TPAMI'2019) -- [x] [MSPN](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/backbones.html#mspn-arxiv-2019) (ArXiv'2019) -- [x] [SCNet](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/backbones.html#scnet-cvpr-2020) (CVPR'2020) -- [ ] [HigherHRNet](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/backbones.html#higherhrnet-cvpr-2020) (CVPR'2020) -- [x] [RSN](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/backbones.html#rsn-eccv-2020) (ECCV'2020) -- [ ] [InterNet](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/algorithms.html#internet-eccv-2020) (ECCV'2020) -- [ ] [VoxelPose](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/algorithms.html#voxelpose-eccv-2020) (ECCV'2020) -- [x] [LiteHRNet](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/backbones.html#litehrnet-cvpr-2021) (CVPR'2021) -- [x] [ViPNAS](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/backbones.html#vipnas-cvpr-2021) (CVPR'2021) -- [x] [Debias-IPR](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/algorithms.html#debias-ipr-iccv-2021) (ICCV'2021) -- [x] [SimCC](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/algorithms.html#simcc-eccv-2022) (ECCV'2022) - -
- -
-Supported techniques: - -- [x] [FPN](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/techniques.html#fpn-cvpr-2017) (CVPR'2017) -- [x] [FP16](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/techniques.html#fp16-arxiv-2017) (ArXiv'2017) -- [x] [Wingloss](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/techniques.html#wingloss-cvpr-2018) (CVPR'2018) -- [x] [AdaptiveWingloss](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/techniques.html#adaptivewingloss-iccv-2019) (ICCV'2019) -- [x] [DarkPose](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/techniques.html#darkpose-cvpr-2020) (CVPR'2020) -- [x] [UDP](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/techniques.html#udp-cvpr-2020) (CVPR'2020) -- [x] [Albumentations](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/techniques.html#albumentations-information-2020) (Information'2020) -- [x] [SoftWingloss](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/techniques.html#softwingloss-tip-2021) (TIP'2021) -- [x] [RLE](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/techniques.html#rle-iccv-2021) (ICCV'2021) - -
- -
-Supported datasets: - -- [x] [AFLW](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#aflw-iccvw-2011) \[[homepage](https://www.tugraz.at/institute/icg/research/team-bischof/lrs/downloads/aflw/)\] (ICCVW'2011) -- [x] [sub-JHMDB](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#jhmdb-iccv-2013) \[[homepage](http://jhmdb.is.tue.mpg.de/dataset)\] (ICCV'2013) -- [x] [COFW](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#cofw-iccv-2013) \[[homepage](http://www.vision.caltech.edu/xpburgos/ICCV13/)\] (ICCV'2013) -- [x] [MPII](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#mpii-cvpr-2014) \[[homepage](http://human-pose.mpi-inf.mpg.de/)\] (CVPR'2014) -- [x] [Human3.6M](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#human3-6m-tpami-2014) \[[homepage](http://vision.imar.ro/human3.6m/description.php)\] (TPAMI'2014) -- [x] [COCO](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#coco-eccv-2014) \[[homepage](http://cocodataset.org/)\] (ECCV'2014) -- [x] [CMU Panoptic](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#cmu-panoptic-iccv-2015) \[[homepage](http://domedb.perception.cs.cmu.edu/)\] (ICCV'2015) -- [x] [DeepFashion](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#deepfashion-cvpr-2016) \[[homepage](http://mmlab.ie.cuhk.edu.hk/projects/DeepFashion/LandmarkDetection.html)\] (CVPR'2016) -- [x] [300W](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#300w-imavis-2016) \[[homepage](https://ibug.doc.ic.ac.uk/resources/300-W/)\] (IMAVIS'2016) -- [x] [RHD](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#rhd-iccv-2017) \[[homepage](https://lmb.informatik.uni-freiburg.de/resources/datasets/RenderedHandposeDataset.en.html)\] (ICCV'2017) -- [x] [CMU Panoptic HandDB](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#cmu-panoptic-handdb-cvpr-2017) \[[homepage](http://domedb.perception.cs.cmu.edu/handdb.html)\] (CVPR'2017) -- [x] [AI Challenger](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#ai-challenger-arxiv-2017) \[[homepage](https://github.com/AIChallenger/AI_Challenger_2017)\] (ArXiv'2017) -- [x] [MHP](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#mhp-acm-mm-2018) \[[homepage](https://lv-mhp.github.io/dataset)\] (ACM MM'2018) -- [x] [WFLW](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#wflw-cvpr-2018) \[[homepage](https://wywu.github.io/projects/LAB/WFLW.html)\] (CVPR'2018) -- [x] [PoseTrack18](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#posetrack18-cvpr-2018) \[[homepage](https://posetrack.net/users/download.php)\] (CVPR'2018) -- [x] [OCHuman](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#ochuman-cvpr-2019) \[[homepage](https://github.com/liruilong940607/OCHumanApi)\] (CVPR'2019) -- [x] [CrowdPose](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#crowdpose-cvpr-2019) \[[homepage](https://github.com/Jeff-sjtu/CrowdPose)\] (CVPR'2019) -- [x] [MPII-TRB](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#mpii-trb-iccv-2019) \[[homepage](https://github.com/kennymckormick/Triplet-Representation-of-human-Body)\] (ICCV'2019) -- [x] [FreiHand](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#freihand-iccv-2019) \[[homepage](https://lmb.informatik.uni-freiburg.de/projects/freihand/)\] (ICCV'2019) -- [x] [Animal-Pose](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#animal-pose-iccv-2019) \[[homepage](https://sites.google.com/view/animal-pose/)\] (ICCV'2019) -- [x] [OneHand10K](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#onehand10k-tcsvt-2019) \[[homepage](https://www.yangangwang.com/papers/WANG-MCC-2018-10.html)\] (TCSVT'2019) -- [x] [Vinegar Fly](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#vinegar-fly-nature-methods-2019) \[[homepage](https://github.com/jgraving/DeepPoseKit-Data)\] (Nature Methods'2019) -- [x] [Desert Locust](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#desert-locust-elife-2019) \[[homepage](https://github.com/jgraving/DeepPoseKit-Data)\] (Elife'2019) -- [x] [Grévy’s Zebra](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#grevys-zebra-elife-2019) \[[homepage](https://github.com/jgraving/DeepPoseKit-Data)\] (Elife'2019) -- [x] [ATRW](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#atrw-acm-mm-2020) \[[homepage](https://cvwc2019.github.io/challenge.html)\] (ACM MM'2020) -- [x] [Halpe](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#halpe-cvpr-2020) \[[homepage](https://github.com/Fang-Haoshu/Halpe-FullBody/)\] (CVPR'2020) -- [x] [COCO-WholeBody](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#coco-wholebody-eccv-2020) \[[homepage](https://github.com/jin-s13/COCO-WholeBody/)\] (ECCV'2020) -- [x] [MacaquePose](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#macaquepose-biorxiv-2020) \[[homepage](http://www.pri.kyoto-u.ac.jp/datasets/macaquepose/index.html)\] (bioRxiv'2020) -- [x] [InterHand2.6M](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#interhand2-6m-eccv-2020) \[[homepage](https://mks0601.github.io/InterHand2.6M/)\] (ECCV'2020) -- [x] [AP-10K](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#ap-10k-neurips-2021) \[[homepage](https://github.com/AlexTheBad/AP-10K)\] (NeurIPS'2021) -- [x] [Horse-10](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#horse-10-wacv-2021) \[[homepage](http://www.mackenziemathislab.org/horse10)\] (WACV'2021) -- [x] [Human-Art](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#human-art-cvpr-2023) \[[homepage](https://idea-research.github.io/HumanArt/)\] (CVPR'2023) -- [x] [LaPa](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#lapa-aaai-2020) \[[homepage](https://github.com/JDAI-CV/lapa-dataset)\] (AAAI'2020) - -
- -
-Supported backbones: - -- [x] [AlexNet](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/backbones.html#alexnet-neurips-2012) (NeurIPS'2012) -- [x] [VGG](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/backbones.html#vgg-iclr-2015) (ICLR'2015) -- [x] [ResNet](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/backbones.html#resnet-cvpr-2016) (CVPR'2016) -- [x] [ResNext](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/backbones.html#resnext-cvpr-2017) (CVPR'2017) -- [x] [SEResNet](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/backbones.html#seresnet-cvpr-2018) (CVPR'2018) -- [x] [ShufflenetV1](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/backbones.html#shufflenetv1-cvpr-2018) (CVPR'2018) -- [x] [ShufflenetV2](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/backbones.html#shufflenetv2-eccv-2018) (ECCV'2018) -- [x] [MobilenetV2](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/backbones.html#mobilenetv2-cvpr-2018) (CVPR'2018) -- [x] [ResNetV1D](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/backbones.html#resnetv1d-cvpr-2019) (CVPR'2019) -- [x] [ResNeSt](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/backbones.html#resnest-arxiv-2020) (ArXiv'2020) -- [x] [Swin](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/backbones.html#swin-cvpr-2021) (CVPR'2021) -- [x] [HRFormer](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/backbones.html#hrformer-nips-2021) (NIPS'2021) -- [x] [PVT](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/backbones.html#pvt-iccv-2021) (ICCV'2021) -- [x] [PVTV2](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/backbones.html#pvtv2-cvmj-2022) (CVMJ'2022) - -
- -### Model Request - -We will keep up with the latest progress of the community, and support more popular algorithms and frameworks. If you have any feature requests, please feel free to leave a comment in [MMPose Roadmap](https://github.com/open-mmlab/mmpose/issues/2258). - -## Contributing - -We appreciate all contributions to improve MMPose. Please refer to [CONTRIBUTING.md](https://mmpose.readthedocs.io/en/latest/contribution_guide.html) for the contributing guideline. - -## Acknowledgement - -MMPose is an open source project that is contributed by researchers and engineers from various colleges and companies. -We appreciate all the contributors who implement their methods or add new features, as well as users who give valuable feedbacks. -We wish that the toolbox and benchmark could serve the growing research community by providing a flexible toolkit to reimplement existing methods and develop their own new models. - -## Citation - -If you find this project useful in your research, please consider cite: - -```bibtex -@misc{mmpose2020, - title={OpenMMLab Pose Estimation Toolbox and Benchmark}, - author={MMPose Contributors}, - howpublished = {\url{https://github.com/open-mmlab/mmpose}}, - year={2020} -} -``` - -## License - -This project is released under the [Apache 2.0 license](LICENSE). - -## Projects in OpenMMLab - -- [MMEngine](https://github.com/open-mmlab/mmengine): OpenMMLab foundational library for training deep learning models. -- [MMCV](https://github.com/open-mmlab/mmcv): OpenMMLab foundational library for computer vision. -- [MMPreTrain](https://github.com/open-mmlab/mmpretrain): OpenMMLab pre-training toolbox and benchmark. -- [MMagic](https://github.com/open-mmlab/mmagic): Open**MM**Lab **A**dvanced, **G**enerative and **I**ntelligent **C**reation toolbox. -- [MMDetection](https://github.com/open-mmlab/mmdetection): OpenMMLab detection toolbox and benchmark. -- [MMDetection3D](https://github.com/open-mmlab/mmdetection3d): OpenMMLab's next-generation platform for general 3D object detection. -- [MMRotate](https://github.com/open-mmlab/mmrotate): OpenMMLab rotated object detection toolbox and benchmark. -- [MMTracking](https://github.com/open-mmlab/mmtracking): OpenMMLab video perception toolbox and benchmark. -- [MMSegmentation](https://github.com/open-mmlab/mmsegmentation): OpenMMLab semantic segmentation toolbox and benchmark. -- [MMOCR](https://github.com/open-mmlab/mmocr): OpenMMLab text detection, recognition, and understanding toolbox. -- [MMPose](https://github.com/open-mmlab/mmpose): OpenMMLab pose estimation toolbox and benchmark. -- [MMHuman3D](https://github.com/open-mmlab/mmhuman3d): OpenMMLab 3D human parametric model toolbox and benchmark. -- [MMFewShot](https://github.com/open-mmlab/mmfewshot): OpenMMLab fewshot learning toolbox and benchmark. -- [MMAction2](https://github.com/open-mmlab/mmaction2): OpenMMLab's next-generation action understanding toolbox and benchmark. -- [MMFlow](https://github.com/open-mmlab/mmflow): OpenMMLab optical flow toolbox and benchmark. -- [MMDeploy](https://github.com/open-mmlab/mmdeploy): OpenMMLab Model Deployment Framework. -- [MMRazor](https://github.com/open-mmlab/mmrazor): OpenMMLab model compression toolbox and benchmark. -- [MIM](https://github.com/open-mmlab/mim): MIM installs OpenMMLab packages. -- [Playground](https://github.com/open-mmlab/playground): A central hub for gathering and showcasing amazing projects built upon OpenMMLab. +
+ +
 
+
+ OpenMMLab website + + + HOT + + +      + OpenMMLab platform + + + TRY IT OUT + + +
+
 
+ +[![Documentation](https://readthedocs.org/projects/mmpose/badge/?version=latest)](https://mmpose.readthedocs.io/en/latest/?badge=latest) +[![actions](https://github.com/open-mmlab/mmpose/workflows/build/badge.svg)](https://github.com/open-mmlab/mmpose/actions) +[![codecov](https://codecov.io/gh/open-mmlab/mmpose/branch/latest/graph/badge.svg)](https://codecov.io/gh/open-mmlab/mmpose) +[![PyPI](https://img.shields.io/pypi/v/mmpose)](https://pypi.org/project/mmpose/) +[![LICENSE](https://img.shields.io/github/license/open-mmlab/mmpose.svg)](https://github.com/open-mmlab/mmpose/blob/main/LICENSE) +[![Average time to resolve an issue](https://isitmaintained.com/badge/resolution/open-mmlab/mmpose.svg)](https://github.com/open-mmlab/mmpose/issues) +[![Percentage of issues still open](https://isitmaintained.com/badge/open/open-mmlab/mmpose.svg)](https://github.com/open-mmlab/mmpose/issues) + +[📘Documentation](https://mmpose.readthedocs.io/en/latest/) | +[🛠️Installation](https://mmpose.readthedocs.io/en/latest/installation.html) | +[👀Model Zoo](https://mmpose.readthedocs.io/en/latest/model_zoo.html) | +[📜Papers](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/algorithms.html) | +[🆕Update News](https://mmpose.readthedocs.io/en/latest/notes/changelog.html) | +[🤔Reporting Issues](https://github.com/open-mmlab/mmpose/issues/new/choose) | +[🔥RTMPose](/projects/rtmpose/) + +
+ +
+ + + + + + + + + + + + + + + + + +
+ +## Introduction + +English | [简体中文](README_CN.md) + +MMPose is an open-source toolbox for pose estimation based on PyTorch. +It is a part of the [OpenMMLab project](https://github.com/open-mmlab). + +The main branch works with **PyTorch 1.8+**. + +https://user-images.githubusercontent.com/15977946/124654387-0fd3c500-ded1-11eb-84f6-24eeddbf4d91.mp4 + +
+ +
+Major Features + +- **Support diverse tasks** + + We support a wide spectrum of mainstream pose analysis tasks in current research community, including 2d multi-person human pose estimation, 2d hand pose estimation, 2d face landmark detection, 133 keypoint whole-body human pose estimation, 3d human mesh recovery, fashion landmark detection and animal pose estimation. + See [Demo](demo/docs/en) for more information. + +- **Higher efficiency and higher accuracy** + + MMPose implements multiple state-of-the-art (SOTA) deep learning models, including both top-down & bottom-up approaches. We achieve faster training speed and higher accuracy than other popular codebases, such as [HRNet](https://github.com/leoxiaobin/deep-high-resolution-net.pytorch). + See [benchmark.md](docs/en/notes/benchmark.md) for more information. + +- **Support for various datasets** + + The toolbox directly supports multiple popular and representative datasets, COCO, AIC, MPII, MPII-TRB, OCHuman etc. + See [dataset_zoo](docs/en/dataset_zoo) for more information. + +- **Well designed, tested and documented** + + We decompose MMPose into different components and one can easily construct a customized + pose estimation framework by combining different modules. + We provide detailed documentation and API reference, as well as unittests. + +
+ +## What's New + +- We are glad to support 3 new datasets: + - (CVPR 2023) [Human-Art](https://github.com/IDEA-Research/HumanArt) + - (CVPR 2022) [Animal Kingdom](https://github.com/sutdcv/Animal-Kingdom) + - (AAAI 2020) [LaPa](https://github.com/JDAI-CV/lapa-dataset/) + +![image](https://github.com/open-mmlab/mmpose/assets/13503330/c9171dbb-7e7a-4c39-98e3-c92932182efb) + +- Welcome to [*projects of MMPose*](/projects/README.md), where you can access to the latest features of MMPose, and share your ideas and codes with the community at once. Contribution to MMPose will be simple and smooth: + + - Provide an easy and agile way to integrate algorithms, features and applications into MMPose + - Allow flexible code structure and style; only need a short code review process + - Build individual projects with full power of MMPose but not bound up with heavy frameworks + - Checkout new projects: + - [RTMPose](/projects/rtmpose/) + - [YOLOX-Pose](/projects/yolox_pose/) + - [MMPose4AIGC](/projects/mmpose4aigc/) + - [Simple Keypoints](/projects/skps/) + - Become a contributors and make MMPose greater. Start your journey from the [example project](/projects/example_project/) + +
+ +- 2023-07-04: MMPose [v1.1.0](https://github.com/open-mmlab/mmpose/releases/tag/v1.1.0) is officially released, with the main updates including: + + - Support new datasets: Human-Art, Animal Kingdom and LaPa. + - Support new config type that is more user-friendly and flexible. + - Improve RTMPose with better performance. + - Migrate 3D pose estimation models on h36m. + - Inference speedup and webcam inference with all demo scripts. + + Please refer to the [release notes](https://github.com/open-mmlab/mmpose/releases/tag/v1.1.0) for more updates brought by MMPose v1.1.0! + +## 0.x / 1.x Migration + +MMPose v1.0.0 is a major update, including many API and config file changes. Currently, a part of the algorithms have been migrated to v1.0.0, and the remaining algorithms will be completed in subsequent versions. We will show the migration progress in the following list. + +
+Migration Progress + +| Algorithm | Status | +| :-------------------------------- | :---------: | +| MTUT (CVPR 2019) | | +| MSPN (ArXiv 2019) | done | +| InterNet (ECCV 2020) | | +| DEKR (CVPR 2021) | done | +| HigherHRNet (CVPR 2020) | | +| DeepPose (CVPR 2014) | done | +| RLE (ICCV 2021) | done | +| SoftWingloss (TIP 2021) | done | +| VideoPose3D (CVPR 2019) | done | +| Hourglass (ECCV 2016) | done | +| LiteHRNet (CVPR 2021) | done | +| AdaptiveWingloss (ICCV 2019) | done | +| SimpleBaseline2D (ECCV 2018) | done | +| PoseWarper (NeurIPS 2019) | | +| SimpleBaseline3D (ICCV 2017) | done | +| HMR (CVPR 2018) | | +| UDP (CVPR 2020) | done | +| VIPNAS (CVPR 2021) | done | +| Wingloss (CVPR 2018) | done | +| DarkPose (CVPR 2020) | done | +| Associative Embedding (NIPS 2017) | in progress | +| VoxelPose (ECCV 2020) | | +| RSN (ECCV 2020) | done | +| CID (CVPR 2022) | done | +| CPM (CVPR 2016) | done | +| HRNet (CVPR 2019) | done | +| HRNetv2 (TPAMI 2019) | done | +| SCNet (CVPR 2020) | done | + +
+ +If your algorithm has not been migrated, you can continue to use the [0.x branch](https://github.com/open-mmlab/mmpose/tree/0.x) and [old documentation](https://mmpose.readthedocs.io/en/0.x/). + +## Installation + +Please refer to [installation.md](https://mmpose.readthedocs.io/en/latest/installation.html) for more detailed installation and dataset preparation. + +## Getting Started + +We provided a series of tutorials about the basic usage of MMPose for new users: + +1. For the basic usage of MMPose: + + - [A 20-minute Tour to MMPose](https://mmpose.readthedocs.io/en/latest/guide_to_framework.html) + - [Demos](https://mmpose.readthedocs.io/en/latest/demos.html) + - [Inference](https://mmpose.readthedocs.io/en/latest/user_guides/inference.html) + - [Configs](https://mmpose.readthedocs.io/en/latest/user_guides/configs.html) + - [Prepare Datasets](https://mmpose.readthedocs.io/en/latest/user_guides/prepare_datasets.html) + - [Train and Test](https://mmpose.readthedocs.io/en/latest/user_guides/train_and_test.html) + +2. For developers who wish to develop based on MMPose: + + - [Learn about Codecs](https://mmpose.readthedocs.io/en/latest/advanced_guides/codecs.html) + - [Dataflow in MMPose](https://mmpose.readthedocs.io/en/latest/advanced_guides/dataflow.html) + - [Implement New Models](https://mmpose.readthedocs.io/en/latest/advanced_guides/implement_new_models.html) + - [Customize Datasets](https://mmpose.readthedocs.io/en/latest/advanced_guides/customize_datasets.html) + - [Customize Data Transforms](https://mmpose.readthedocs.io/en/latest/advanced_guides/customize_transforms.html) + - [Customize Optimizer](https://mmpose.readthedocs.io/en/latest/advanced_guides/customize_optimizer.html) + - [Customize Logging](https://mmpose.readthedocs.io/en/latest/advanced_guides/customize_logging.html) + - [How to Deploy](https://mmpose.readthedocs.io/en/latest/advanced_guides/how_to_deploy.html) + - [Model Analysis](https://mmpose.readthedocs.io/en/latest/advanced_guides/model_analysis.html) + - [Migration Guide](https://mmpose.readthedocs.io/en/latest/migration.html) + +3. For researchers and developers who are willing to contribute to MMPose: + + - [Contribution Guide](https://mmpose.readthedocs.io/en/latest/contribution_guide.html) + +4. For some common issues, we provide a FAQ list: + + - [FAQ](https://mmpose.readthedocs.io/en/latest/faq.html) + +## Model Zoo + +Results and models are available in the **README.md** of each method's config directory. +A summary can be found in the [Model Zoo](https://mmpose.readthedocs.io/en/latest/model_zoo.html) page. + +
+Supported algorithms: + +- [x] [DeepPose](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/algorithms.html#deeppose-cvpr-2014) (CVPR'2014) +- [x] [CPM](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/backbones.html#cpm-cvpr-2016) (CVPR'2016) +- [x] [Hourglass](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/backbones.html#hourglass-eccv-2016) (ECCV'2016) +- [x] [SimpleBaseline3D](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/algorithms.html#simplebaseline3d-iccv-2017) (ICCV'2017) +- [ ] [Associative Embedding](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/algorithms.html#associative-embedding-nips-2017) (NeurIPS'2017) +- [x] [SimpleBaseline2D](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/algorithms.html#simplebaseline2d-eccv-2018) (ECCV'2018) +- [x] [DSNT](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/algorithms.html#dsnt-2018) (ArXiv'2021) +- [x] [HRNet](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/backbones.html#hrnet-cvpr-2019) (CVPR'2019) +- [x] [IPR](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/algorithms.html#ipr-eccv-2018) (ECCV'2018) +- [x] [VideoPose3D](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/algorithms.html#videopose3d-cvpr-2019) (CVPR'2019) +- [x] [HRNetv2](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/backbones.html#hrnetv2-tpami-2019) (TPAMI'2019) +- [x] [MSPN](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/backbones.html#mspn-arxiv-2019) (ArXiv'2019) +- [x] [SCNet](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/backbones.html#scnet-cvpr-2020) (CVPR'2020) +- [ ] [HigherHRNet](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/backbones.html#higherhrnet-cvpr-2020) (CVPR'2020) +- [x] [RSN](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/backbones.html#rsn-eccv-2020) (ECCV'2020) +- [ ] [InterNet](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/algorithms.html#internet-eccv-2020) (ECCV'2020) +- [ ] [VoxelPose](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/algorithms.html#voxelpose-eccv-2020) (ECCV'2020) +- [x] [LiteHRNet](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/backbones.html#litehrnet-cvpr-2021) (CVPR'2021) +- [x] [ViPNAS](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/backbones.html#vipnas-cvpr-2021) (CVPR'2021) +- [x] [Debias-IPR](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/algorithms.html#debias-ipr-iccv-2021) (ICCV'2021) +- [x] [SimCC](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/algorithms.html#simcc-eccv-2022) (ECCV'2022) + +
+ +
+Supported techniques: + +- [x] [FPN](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/techniques.html#fpn-cvpr-2017) (CVPR'2017) +- [x] [FP16](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/techniques.html#fp16-arxiv-2017) (ArXiv'2017) +- [x] [Wingloss](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/techniques.html#wingloss-cvpr-2018) (CVPR'2018) +- [x] [AdaptiveWingloss](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/techniques.html#adaptivewingloss-iccv-2019) (ICCV'2019) +- [x] [DarkPose](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/techniques.html#darkpose-cvpr-2020) (CVPR'2020) +- [x] [UDP](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/techniques.html#udp-cvpr-2020) (CVPR'2020) +- [x] [Albumentations](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/techniques.html#albumentations-information-2020) (Information'2020) +- [x] [SoftWingloss](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/techniques.html#softwingloss-tip-2021) (TIP'2021) +- [x] [RLE](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/techniques.html#rle-iccv-2021) (ICCV'2021) + +
+ +
+Supported datasets: + +- [x] [AFLW](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#aflw-iccvw-2011) \[[homepage](https://www.tugraz.at/institute/icg/research/team-bischof/lrs/downloads/aflw/)\] (ICCVW'2011) +- [x] [sub-JHMDB](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#jhmdb-iccv-2013) \[[homepage](http://jhmdb.is.tue.mpg.de/dataset)\] (ICCV'2013) +- [x] [COFW](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#cofw-iccv-2013) \[[homepage](http://www.vision.caltech.edu/xpburgos/ICCV13/)\] (ICCV'2013) +- [x] [MPII](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#mpii-cvpr-2014) \[[homepage](http://human-pose.mpi-inf.mpg.de/)\] (CVPR'2014) +- [x] [Human3.6M](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#human3-6m-tpami-2014) \[[homepage](http://vision.imar.ro/human3.6m/description.php)\] (TPAMI'2014) +- [x] [COCO](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#coco-eccv-2014) \[[homepage](http://cocodataset.org/)\] (ECCV'2014) +- [x] [CMU Panoptic](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#cmu-panoptic-iccv-2015) \[[homepage](http://domedb.perception.cs.cmu.edu/)\] (ICCV'2015) +- [x] [DeepFashion](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#deepfashion-cvpr-2016) \[[homepage](http://mmlab.ie.cuhk.edu.hk/projects/DeepFashion/LandmarkDetection.html)\] (CVPR'2016) +- [x] [300W](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#300w-imavis-2016) \[[homepage](https://ibug.doc.ic.ac.uk/resources/300-W/)\] (IMAVIS'2016) +- [x] [RHD](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#rhd-iccv-2017) \[[homepage](https://lmb.informatik.uni-freiburg.de/resources/datasets/RenderedHandposeDataset.en.html)\] (ICCV'2017) +- [x] [CMU Panoptic HandDB](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#cmu-panoptic-handdb-cvpr-2017) \[[homepage](http://domedb.perception.cs.cmu.edu/handdb.html)\] (CVPR'2017) +- [x] [AI Challenger](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#ai-challenger-arxiv-2017) \[[homepage](https://github.com/AIChallenger/AI_Challenger_2017)\] (ArXiv'2017) +- [x] [MHP](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#mhp-acm-mm-2018) \[[homepage](https://lv-mhp.github.io/dataset)\] (ACM MM'2018) +- [x] [WFLW](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#wflw-cvpr-2018) \[[homepage](https://wywu.github.io/projects/LAB/WFLW.html)\] (CVPR'2018) +- [x] [PoseTrack18](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#posetrack18-cvpr-2018) \[[homepage](https://posetrack.net/users/download.php)\] (CVPR'2018) +- [x] [OCHuman](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#ochuman-cvpr-2019) \[[homepage](https://github.com/liruilong940607/OCHumanApi)\] (CVPR'2019) +- [x] [CrowdPose](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#crowdpose-cvpr-2019) \[[homepage](https://github.com/Jeff-sjtu/CrowdPose)\] (CVPR'2019) +- [x] [MPII-TRB](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#mpii-trb-iccv-2019) \[[homepage](https://github.com/kennymckormick/Triplet-Representation-of-human-Body)\] (ICCV'2019) +- [x] [FreiHand](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#freihand-iccv-2019) \[[homepage](https://lmb.informatik.uni-freiburg.de/projects/freihand/)\] (ICCV'2019) +- [x] [Animal-Pose](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#animal-pose-iccv-2019) \[[homepage](https://sites.google.com/view/animal-pose/)\] (ICCV'2019) +- [x] [OneHand10K](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#onehand10k-tcsvt-2019) \[[homepage](https://www.yangangwang.com/papers/WANG-MCC-2018-10.html)\] (TCSVT'2019) +- [x] [Vinegar Fly](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#vinegar-fly-nature-methods-2019) \[[homepage](https://github.com/jgraving/DeepPoseKit-Data)\] (Nature Methods'2019) +- [x] [Desert Locust](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#desert-locust-elife-2019) \[[homepage](https://github.com/jgraving/DeepPoseKit-Data)\] (Elife'2019) +- [x] [Grévy’s Zebra](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#grevys-zebra-elife-2019) \[[homepage](https://github.com/jgraving/DeepPoseKit-Data)\] (Elife'2019) +- [x] [ATRW](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#atrw-acm-mm-2020) \[[homepage](https://cvwc2019.github.io/challenge.html)\] (ACM MM'2020) +- [x] [Halpe](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#halpe-cvpr-2020) \[[homepage](https://github.com/Fang-Haoshu/Halpe-FullBody/)\] (CVPR'2020) +- [x] [COCO-WholeBody](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#coco-wholebody-eccv-2020) \[[homepage](https://github.com/jin-s13/COCO-WholeBody/)\] (ECCV'2020) +- [x] [MacaquePose](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#macaquepose-biorxiv-2020) \[[homepage](http://www.pri.kyoto-u.ac.jp/datasets/macaquepose/index.html)\] (bioRxiv'2020) +- [x] [InterHand2.6M](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#interhand2-6m-eccv-2020) \[[homepage](https://mks0601.github.io/InterHand2.6M/)\] (ECCV'2020) +- [x] [AP-10K](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#ap-10k-neurips-2021) \[[homepage](https://github.com/AlexTheBad/AP-10K)\] (NeurIPS'2021) +- [x] [Horse-10](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#horse-10-wacv-2021) \[[homepage](http://www.mackenziemathislab.org/horse10)\] (WACV'2021) +- [x] [Human-Art](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#human-art-cvpr-2023) \[[homepage](https://idea-research.github.io/HumanArt/)\] (CVPR'2023) +- [x] [LaPa](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#lapa-aaai-2020) \[[homepage](https://github.com/JDAI-CV/lapa-dataset)\] (AAAI'2020) + +
+ +
+Supported backbones: + +- [x] [AlexNet](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/backbones.html#alexnet-neurips-2012) (NeurIPS'2012) +- [x] [VGG](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/backbones.html#vgg-iclr-2015) (ICLR'2015) +- [x] [ResNet](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/backbones.html#resnet-cvpr-2016) (CVPR'2016) +- [x] [ResNext](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/backbones.html#resnext-cvpr-2017) (CVPR'2017) +- [x] [SEResNet](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/backbones.html#seresnet-cvpr-2018) (CVPR'2018) +- [x] [ShufflenetV1](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/backbones.html#shufflenetv1-cvpr-2018) (CVPR'2018) +- [x] [ShufflenetV2](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/backbones.html#shufflenetv2-eccv-2018) (ECCV'2018) +- [x] [MobilenetV2](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/backbones.html#mobilenetv2-cvpr-2018) (CVPR'2018) +- [x] [ResNetV1D](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/backbones.html#resnetv1d-cvpr-2019) (CVPR'2019) +- [x] [ResNeSt](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/backbones.html#resnest-arxiv-2020) (ArXiv'2020) +- [x] [Swin](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/backbones.html#swin-cvpr-2021) (CVPR'2021) +- [x] [HRFormer](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/backbones.html#hrformer-nips-2021) (NIPS'2021) +- [x] [PVT](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/backbones.html#pvt-iccv-2021) (ICCV'2021) +- [x] [PVTV2](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/backbones.html#pvtv2-cvmj-2022) (CVMJ'2022) + +
+ +### Model Request + +We will keep up with the latest progress of the community, and support more popular algorithms and frameworks. If you have any feature requests, please feel free to leave a comment in [MMPose Roadmap](https://github.com/open-mmlab/mmpose/issues/2258). + +## Contributing + +We appreciate all contributions to improve MMPose. Please refer to [CONTRIBUTING.md](https://mmpose.readthedocs.io/en/latest/contribution_guide.html) for the contributing guideline. + +## Acknowledgement + +MMPose is an open source project that is contributed by researchers and engineers from various colleges and companies. +We appreciate all the contributors who implement their methods or add new features, as well as users who give valuable feedbacks. +We wish that the toolbox and benchmark could serve the growing research community by providing a flexible toolkit to reimplement existing methods and develop their own new models. + +## Citation + +If you find this project useful in your research, please consider cite: + +```bibtex +@misc{mmpose2020, + title={OpenMMLab Pose Estimation Toolbox and Benchmark}, + author={MMPose Contributors}, + howpublished = {\url{https://github.com/open-mmlab/mmpose}}, + year={2020} +} +``` + +## License + +This project is released under the [Apache 2.0 license](LICENSE). + +## Projects in OpenMMLab + +- [MMEngine](https://github.com/open-mmlab/mmengine): OpenMMLab foundational library for training deep learning models. +- [MMCV](https://github.com/open-mmlab/mmcv): OpenMMLab foundational library for computer vision. +- [MMPreTrain](https://github.com/open-mmlab/mmpretrain): OpenMMLab pre-training toolbox and benchmark. +- [MMagic](https://github.com/open-mmlab/mmagic): Open**MM**Lab **A**dvanced, **G**enerative and **I**ntelligent **C**reation toolbox. +- [MMDetection](https://github.com/open-mmlab/mmdetection): OpenMMLab detection toolbox and benchmark. +- [MMDetection3D](https://github.com/open-mmlab/mmdetection3d): OpenMMLab's next-generation platform for general 3D object detection. +- [MMRotate](https://github.com/open-mmlab/mmrotate): OpenMMLab rotated object detection toolbox and benchmark. +- [MMTracking](https://github.com/open-mmlab/mmtracking): OpenMMLab video perception toolbox and benchmark. +- [MMSegmentation](https://github.com/open-mmlab/mmsegmentation): OpenMMLab semantic segmentation toolbox and benchmark. +- [MMOCR](https://github.com/open-mmlab/mmocr): OpenMMLab text detection, recognition, and understanding toolbox. +- [MMPose](https://github.com/open-mmlab/mmpose): OpenMMLab pose estimation toolbox and benchmark. +- [MMHuman3D](https://github.com/open-mmlab/mmhuman3d): OpenMMLab 3D human parametric model toolbox and benchmark. +- [MMFewShot](https://github.com/open-mmlab/mmfewshot): OpenMMLab fewshot learning toolbox and benchmark. +- [MMAction2](https://github.com/open-mmlab/mmaction2): OpenMMLab's next-generation action understanding toolbox and benchmark. +- [MMFlow](https://github.com/open-mmlab/mmflow): OpenMMLab optical flow toolbox and benchmark. +- [MMDeploy](https://github.com/open-mmlab/mmdeploy): OpenMMLab Model Deployment Framework. +- [MMRazor](https://github.com/open-mmlab/mmrazor): OpenMMLab model compression toolbox and benchmark. +- [MIM](https://github.com/open-mmlab/mim): MIM installs OpenMMLab packages. +- [Playground](https://github.com/open-mmlab/playground): A central hub for gathering and showcasing amazing projects built upon OpenMMLab. diff --git a/README_CN.md b/README_CN.md index 48672c2a88..f0649d2c37 100644 --- a/README_CN.md +++ b/README_CN.md @@ -1,384 +1,384 @@ -
- -
 
-
- OpenMMLab 官网 - - - HOT - - -      - OpenMMLab 开放平台 - - - TRY IT OUT - - -
-
 
- -[![Documentation](https://readthedocs.org/projects/mmpose/badge/?version=latest)](https://mmpose.readthedocs.io/en/latest/?badge=latest) -[![actions](https://github.com/open-mmlab/mmpose/workflows/build/badge.svg)](https://github.com/open-mmlab/mmpose/actions) -[![codecov](https://codecov.io/gh/open-mmlab/mmpose/branch/latest/graph/badge.svg)](https://codecov.io/gh/open-mmlab/mmpose) -[![PyPI](https://img.shields.io/pypi/v/mmpose)](https://pypi.org/project/mmpose/) -[![LICENSE](https://img.shields.io/github/license/open-mmlab/mmpose.svg)](https://github.com/open-mmlab/mmpose/blob/main/LICENSE) -[![Average time to resolve an issue](https://isitmaintained.com/badge/resolution/open-mmlab/mmpose.svg)](https://github.com/open-mmlab/mmpose/issues) -[![Percentage of issues still open](https://isitmaintained.com/badge/open/open-mmlab/mmpose.svg)](https://github.com/open-mmlab/mmpose/issues) - -[📘文档](https://mmpose.readthedocs.io/zh_CN/latest/) | -[🛠️安装](https://mmpose.readthedocs.io/zh_CN/latest/installation.html) | -[👀模型库](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo.html) | -[📜论文库](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/algorithms.html) | -[🆕更新日志](https://mmpose.readthedocs.io/zh_CN/latest/notes/changelog.html) | -[🤔报告问题](https://github.com/open-mmlab/mmpose/issues/new/choose) | -[🔥RTMPose](/projects/rtmpose/) - -
- -
- - - - - - - - - - - - - - - - - -
- -## Introduction - -[English](./README.md) | 简体中文 - -MMPose 是一款基于 PyTorch 的姿态分析的开源工具箱,是 [OpenMMLab](https://github.com/open-mmlab) 项目的成员之一。 - -主分支代码目前支持 **PyTorch 1.8 以上**的版本。 - -https://user-images.githubusercontent.com/15977946/124654387-0fd3c500-ded1-11eb-84f6-24eeddbf4d91.mp4 - -
-主要特性 - -- **支持多种人体姿态分析相关任务** - - MMPose 支持当前学界广泛关注的主流姿态分析任务:主要包括 2D多人姿态估计、2D手部姿态估计、2D人脸关键点检测、133关键点的全身人体姿态估计、3D人体形状恢复、服饰关键点检测、动物关键点检测等。 - 具体请参考 [功能演示](demo/docs/zh_cn/)。 - -- **更高的精度和更快的速度** - - MMPose 复现了多种学界最先进的人体姿态分析模型,包括“自顶向下”和“自底向上”两大类算法。MMPose 相比于其他主流的代码库,具有更高的模型精度和训练速度。 - 具体请参考 [基准测试](docs/en/notes/benchmark.md)(英文)。 - -- **支持多样的数据集** - - MMPose 支持了很多主流数据集的准备和构建,如 COCO、 MPII 等。 具体请参考 [数据集](docs/zh_cn/dataset_zoo)。 - -- **模块化设计** - - MMPose 将统一的人体姿态分析框架解耦成不同的模块组件,通过组合不同的模块组件,用户可以便捷地构建自定义的人体姿态分析模型。 - -- **详尽的单元测试和文档** - - MMPose 提供了详尽的说明文档,API 接口说明,全面的单元测试,以供社区参考。 - -
- -## 最新进展 - -- 我们支持了三个新的数据集: - - (CVPR 2023) [Human-Art](https://github.com/IDEA-Research/HumanArt) - - (CVPR 2022) [Animal Kingdom](https://github.com/sutdcv/Animal-Kingdom) - - (AAAI 2020) [LaPa](https://github.com/JDAI-CV/lapa-dataset/) - -![yolox-pose_intro](https://user-images.githubusercontent.com/26127467/226655503-3cee746e-6e42-40be-82ae-6e7cae2a4c7e.jpg) - -- 欢迎使用 [*MMPose 项目*](/projects/README.md)。在这里,您可以发现 MMPose 中的最新功能和算法,并且可以通过最快的方式与社区分享自己的创意和代码实现。向 MMPose 中添加新功能从此变得简单丝滑: - - - 提供了一种简单迅捷的方式,将新的算法、功能和应用添加到 MMPose 中 - - 更灵活的代码结构和风格,更少的限制,更简短的代码审核流程 - - 通过独立项目的形式,利用 MMPose 的强大功能,同时不被代码框架所束缚 - - 最新添加的项目包括: - - [RTMPose](/projects/rtmpose/) - - [YOLOX-Pose](/projects/yolox_pose/) - - [MMPose4AIGC](/projects/mmpose4aigc/) - - [Simple Keypoints](/projects/skps/) - - 从简单的 [示例项目](/projects/example_project/) 开启您的 MMPose 代码贡献者之旅吧,让我们共同打造更好用的 MMPose! - -
- -- 2023-07-04:MMPose [v1.1.0](https://github.com/open-mmlab/mmpose/releases/tag/v1.1.0) 正式发布了,主要更新包括: - - - 支持新数据集:Human-Art、Animal Kingdom、LaPa。 - - 支持新的配置文件风格,支持 IDE 跳转和搜索。 - - 提供更强性能的 RTMPose 模型。 - - 迁移 3D 姿态估计算法。 - - 加速推理脚本,全部 demo 脚本支持摄像头推理。 - - 请查看完整的 [版本说明](https://github.com/open-mmlab/mmpose/releases/tag/v1.1.0) 以了解更多 MMPose v1.1.0 带来的更新! - -## 0.x / 1.x 迁移 - -MMPose v1.0.0 是一个重大更新,包括了大量的 API 和配置文件的变化。目前 v1.0.0 中已经完成了一部分算法的迁移工作,剩余的算法将在后续的版本中陆续完成,我们将在下面的列表中展示迁移进度。 - -
-迁移进度 - -| 算法名称 | 迁移进度 | -| :-------------------------------- | :---------: | -| MTUT (CVPR 2019) | | -| MSPN (ArXiv 2019) | done | -| InterNet (ECCV 2020) | | -| DEKR (CVPR 2021) | done | -| HigherHRNet (CVPR 2020) | | -| DeepPose (CVPR 2014) | done | -| RLE (ICCV 2021) | done | -| SoftWingloss (TIP 2021) | done | -| VideoPose3D (CVPR 2019) | done | -| Hourglass (ECCV 2016) | done | -| LiteHRNet (CVPR 2021) | done | -| AdaptiveWingloss (ICCV 2019) | done | -| SimpleBaseline2D (ECCV 2018) | done | -| PoseWarper (NeurIPS 2019) | | -| SimpleBaseline3D (ICCV 2017) | done | -| HMR (CVPR 2018) | | -| UDP (CVPR 2020) | done | -| VIPNAS (CVPR 2021) | done | -| Wingloss (CVPR 2018) | done | -| DarkPose (CVPR 2020) | done | -| Associative Embedding (NIPS 2017) | in progress | -| VoxelPose (ECCV 2020) | | -| RSN (ECCV 2020) | done | -| CID (CVPR 2022) | done | -| CPM (CVPR 2016) | done | -| HRNet (CVPR 2019) | done | -| HRNetv2 (TPAMI 2019) | done | -| SCNet (CVPR 2020) | done | - -
- -如果您使用的算法还没有完成迁移,您也可以继续使用访问 [0.x 分支](https://github.com/open-mmlab/mmpose/tree/0.x) 和 [旧版文档](https://mmpose.readthedocs.io/zh_CN/0.x/) - -## 安装 - -关于安装的详细说明请参考[安装文档](https://mmpose.readthedocs.io/zh_CN/latest/installation.html)。 - -## 教程 - -我们提供了一系列简明的教程,帮助 MMPose 的新用户轻松上手使用: - -1. MMPose 的基本使用方法: - - - [20 分钟上手教程](https://mmpose.readthedocs.io/zh_CN/latest/guide_to_framework.html) - - [Demos](https://mmpose.readthedocs.io/zh_CN/latest/demos.html) - - [模型推理](https://mmpose.readthedocs.io/zh_CN/latest/user_guides/inference.html) - - [配置文件](https://mmpose.readthedocs.io/zh_CN/latest/user_guides/configs.html) - - [准备数据集](https://mmpose.readthedocs.io/zh_CN/latest/user_guides/prepare_datasets.html) - - [训练与测试](https://mmpose.readthedocs.io/zh_CN/latest/user_guides/train_and_test.html) - -2. 对于希望基于 MMPose 进行开发的研究者和开发者: - - - [编解码器](https://mmpose.readthedocs.io/zh_CN/latest/advanced_guides/codecs.html) - - [数据流](https://mmpose.readthedocs.io/zh_CN/latest/advanced_guides/dataflow.html) - - [实现新模型](https://mmpose.readthedocs.io/zh_CN/latest/advanced_guides/implement_new_models.html) - - [自定义数据集](https://mmpose.readthedocs.io/zh_CN/latest/advanced_guides/customize_datasets.html) - - [自定义数据变换](https://mmpose.readthedocs.io/zh_CN/latest/advanced_guides/customize_transforms.html) - - [自定义优化器](https://mmpose.readthedocs.io/zh_CN/latest/advanced_guides/customize_optimizer.html) - - [自定义日志](https://mmpose.readthedocs.io/zh_CN/latest/advanced_guides/customize_logging.html) - - [模型部署](https://mmpose.readthedocs.io/zh_CN/latest/advanced_guides/how_to_deploy.html) - - [模型分析工具](https://mmpose.readthedocs.io/zh_CN/latest/advanced_guides/model_analysis.html) - - [迁移指南](https://mmpose.readthedocs.io/zh_CN/latest/migration.html) - -3. 对于希望加入开源社区,向 MMPose 贡献代码的研究者和开发者: - - - [参与贡献代码](https://mmpose.readthedocs.io/zh_CN/latest/contribution_guide.html) - -4. 对于使用过程中的常见问题: - - - [FAQ](https://mmpose.readthedocs.io/zh_CN/latest/faq.html) - -## 模型库 - -各个模型的结果和设置都可以在对应的 config(配置)目录下的 **README.md** 中查看。 -整体的概况也可也在 [模型库](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo.html) 页面中查看。 - -
-支持的算法 - -- [x] [DeepPose](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/algorithms.html#deeppose-cvpr-2014) (CVPR'2014) -- [x] [CPM](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/backbones.html#cpm-cvpr-2016) (CVPR'2016) -- [x] [Hourglass](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/backbones.html#hourglass-eccv-2016) (ECCV'2016) -- [x] [SimpleBaseline3D](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/algorithms.html#simplebaseline3d-iccv-2017) (ICCV'2017) -- [ ] [Associative Embedding](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/algorithms.html#associative-embedding-nips-2017) (NeurIPS'2017) -- [x] [SimpleBaseline2D](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/algorithms.html#simplebaseline2d-eccv-2018) (ECCV'2018) -- [x] [DSNT](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/algorithms.html#dsnt-2018) (ArXiv'2021) -- [x] [HRNet](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/backbones.html#hrnet-cvpr-2019) (CVPR'2019) -- [x] [IPR](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/algorithms.html#ipr-eccv-2018) (ECCV'2018) -- [x] [VideoPose3D](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/algorithms.html#videopose3d-cvpr-2019) (CVPR'2019) -- [x] [HRNetv2](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/backbones.html#hrnetv2-tpami-2019) (TPAMI'2019) -- [x] [MSPN](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/backbones.html#mspn-arxiv-2019) (ArXiv'2019) -- [x] [SCNet](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/backbones.html#scnet-cvpr-2020) (CVPR'2020) -- [ ] [HigherHRNet](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/backbones.html#higherhrnet-cvpr-2020) (CVPR'2020) -- [x] [RSN](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/backbones.html#rsn-eccv-2020) (ECCV'2020) -- [ ] [InterNet](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/algorithms.html#internet-eccv-2020) (ECCV'2020) -- [ ] [VoxelPose](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/algorithms.html#voxelpose-eccv-2020) (ECCV'2020) -- [x] [LiteHRNet](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/backbones.html#litehrnet-cvpr-2021) (CVPR'2021) -- [x] [ViPNAS](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/backbones.html#vipnas-cvpr-2021) (CVPR'2021) -- [x] [Debias-IPR](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/algorithms.html#debias-ipr-iccv-2021) (ICCV'2021) -- [x] [SimCC](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/algorithms.html#simcc-eccv-2022) (ECCV'2022) - -
- -
-支持的技术 - -- [x] [FPN](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/techniques.html#fpn-cvpr-2017) (CVPR'2017) -- [x] [FP16](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/techniques.html#fp16-arxiv-2017) (ArXiv'2017) -- [x] [Wingloss](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/techniques.html#wingloss-cvpr-2018) (CVPR'2018) -- [x] [AdaptiveWingloss](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/techniques.html#adaptivewingloss-iccv-2019) (ICCV'2019) -- [x] [DarkPose](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/techniques.html#darkpose-cvpr-2020) (CVPR'2020) -- [x] [UDP](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/techniques.html#udp-cvpr-2020) (CVPR'2020) -- [x] [Albumentations](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/techniques.html#albumentations-information-2020) (Information'2020) -- [x] [SoftWingloss](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/techniques.html#softwingloss-tip-2021) (TIP'2021) -- [x] [RLE](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/techniques.html#rle-iccv-2021) (ICCV'2021) - -
- -
-支持的数据集 - -- [x] [AFLW](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#aflw-iccvw-2011) \[[主页](https://www.tugraz.at/institute/icg/research/team-bischof/lrs/downloads/aflw/)\] (ICCVW'2011) -- [x] [sub-JHMDB](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#jhmdb-iccv-2013) \[[主页](http://jhmdb.is.tue.mpg.de/dataset)\] (ICCV'2013) -- [x] [COFW](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#cofw-iccv-2013) \[[主页](http://www.vision.caltech.edu/xpburgos/ICCV13/)\] (ICCV'2013) -- [x] [MPII](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#mpii-cvpr-2014) \[[主页](http://human-pose.mpi-inf.mpg.de/)\] (CVPR'2014) -- [x] [Human3.6M](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#human3-6m-tpami-2014) \[[主页](http://vision.imar.ro/human3.6m/description.php)\] (TPAMI'2014) -- [x] [COCO](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#coco-eccv-2014) \[[主页](http://cocodataset.org/)\] (ECCV'2014) -- [x] [CMU Panoptic](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#cmu-panoptic-iccv-2015) (ICCV'2015) -- [x] [DeepFashion](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#deepfashion-cvpr-2016) \[[主页](http://mmlab.ie.cuhk.edu.hk/projects/DeepFashion/LandmarkDetection.html)\] (CVPR'2016) -- [x] [300W](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#300w-imavis-2016) \[[主页](https://ibug.doc.ic.ac.uk/resources/300-W/)\] (IMAVIS'2016) -- [x] [RHD](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#rhd-iccv-2017) \[[主页](https://lmb.informatik.uni-freiburg.de/resources/datasets/RenderedHandposeDataset.en.html)\] (ICCV'2017) -- [x] [CMU Panoptic](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#cmu-panoptic-iccv-2015) \[[主页](http://domedb.perception.cs.cmu.edu/)\] (ICCV'2015) -- [x] [AI Challenger](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#ai-challenger-arxiv-2017) \[[主页](https://github.com/AIChallenger/AI_Challenger_2017)\] (ArXiv'2017) -- [x] [MHP](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#mhp-acm-mm-2018) \[[主页](https://lv-mhp.github.io/dataset)\] (ACM MM'2018) -- [x] [WFLW](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#wflw-cvpr-2018) \[[主页](https://wywu.github.io/projects/LAB/WFLW.html)\] (CVPR'2018) -- [x] [PoseTrack18](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#posetrack18-cvpr-2018) \[[主页](https://posetrack.net/users/download.php)\] (CVPR'2018) -- [x] [OCHuman](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#ochuman-cvpr-2019) \[[主页](https://github.com/liruilong940607/OCHumanApi)\] (CVPR'2019) -- [x] [CrowdPose](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#crowdpose-cvpr-2019) \[[主页](https://github.com/Jeff-sjtu/CrowdPose)\] (CVPR'2019) -- [x] [MPII-TRB](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#mpii-trb-iccv-2019) \[[主页](https://github.com/kennymckormick/Triplet-Representation-of-human-Body)\] (ICCV'2019) -- [x] [FreiHand](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#freihand-iccv-2019) \[[主页](https://lmb.informatik.uni-freiburg.de/projects/freihand/)\] (ICCV'2019) -- [x] [Animal-Pose](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#animal-pose-iccv-2019) \[[主页](https://sites.google.com/view/animal-pose/)\] (ICCV'2019) -- [x] [OneHand10K](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#onehand10k-tcsvt-2019) \[[主页](https://www.yangangwang.com/papers/WANG-MCC-2018-10.html)\] (TCSVT'2019) -- [x] [Vinegar Fly](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#vinegar-fly-nature-methods-2019) \[[主页](https://github.com/jgraving/DeepPoseKit-Data)\] (Nature Methods'2019) -- [x] [Desert Locust](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#desert-locust-elife-2019) \[[主页](https://github.com/jgraving/DeepPoseKit-Data)\] (Elife'2019) -- [x] [Grévy’s Zebra](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#grevys-zebra-elife-2019) \[[主页](https://github.com/jgraving/DeepPoseKit-Data)\] (Elife'2019) -- [x] [ATRW](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#atrw-acm-mm-2020) \[[主页](https://cvwc2019.github.io/challenge.html)\] (ACM MM'2020) -- [x] [Halpe](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#halpe-cvpr-2020) \[[主页](https://github.com/Fang-Haoshu/Halpe-FullBody/)\] (CVPR'2020) -- [x] [COCO-WholeBody](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#coco-wholebody-eccv-2020) \[[主页](https://github.com/jin-s13/COCO-WholeBody/)\] (ECCV'2020) -- [x] [MacaquePose](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#macaquepose-biorxiv-2020) \[[主页](http://www.pri.kyoto-u.ac.jp/datasets/macaquepose/index.html)\] (bioRxiv'2020) -- [x] [InterHand2.6M](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#interhand2-6m-eccv-2020) \[[主页](https://mks0601.github.io/InterHand2.6M/)\] (ECCV'2020) -- [x] [AP-10K](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#ap-10k-neurips-2021) \[[主页](https://github.com/AlexTheBad/AP-10K)\] (NeurIPS'2021) -- [x] [Horse-10](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#horse-10-wacv-2021) \[[主页](http://www.mackenziemathislab.org/horse10)\] (WACV'2021) -- [x] [Human-Art](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#human-art-cvpr-2023) \[[主页](https://idea-research.github.io/HumanArt/)\] (CVPR'2023) -- [x] [LaPa](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#lapa-aaai-2020) \[[主页](https://github.com/JDAI-CV/lapa-dataset)\] (AAAI'2020) - -
- -
-支持的骨干网络 - -- [x] [AlexNet](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/backbones.html#alexnet-neurips-2012) (NeurIPS'2012) -- [x] [VGG](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/backbones.html#vgg-iclr-2015) (ICLR'2015) -- [x] [ResNet](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/backbones.html#resnet-cvpr-2016) (CVPR'2016) -- [x] [ResNext](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/backbones.html#resnext-cvpr-2017) (CVPR'2017) -- [x] [SEResNet](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/backbones.html#seresnet-cvpr-2018) (CVPR'2018) -- [x] [ShufflenetV1](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/backbones.html#shufflenetv1-cvpr-2018) (CVPR'2018) -- [x] [ShufflenetV2](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/backbones.html#shufflenetv2-eccv-2018) (ECCV'2018) -- [x] [MobilenetV2](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/backbones.html#mobilenetv2-cvpr-2018) (CVPR'2018) -- [x] [ResNetV1D](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/backbones.html#resnetv1d-cvpr-2019) (CVPR'2019) -- [x] [ResNeSt](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/backbones.html#resnest-arxiv-2020) (ArXiv'2020) -- [x] [Swin](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/backbones.html#swin-cvpr-2021) (CVPR'2021) -- [x] [HRFormer](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/backbones.html#hrformer-nips-2021) (NIPS'2021) -- [x] [PVT](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/backbones.html#pvt-iccv-2021) (ICCV'2021) -- [x] [PVTV2](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/backbones.html#pvtv2-cvmj-2022) (CVMJ'2022) - -
- -### 模型需求 - -我们将跟进学界的最新进展,并支持更多算法和框架。如果您对 MMPose 有任何功能需求,请随时在 [MMPose Roadmap](https://github.com/open-mmlab/mmpose/issues/2258) 中留言。 - -## 参与贡献 - -我们非常欢迎用户对于 MMPose 做出的任何贡献,可以参考 [贡献指南](https://mmpose.readthedocs.io/zh_CN/latest/contribution_guide.html) 文件了解更多细节。 - -## 致谢 - -MMPose 是一款由不同学校和公司共同贡献的开源项目。我们感谢所有为项目提供算法复现和新功能支持的贡献者,以及提供宝贵反馈的用户。 -我们希望该工具箱和基准测试可以为社区提供灵活的代码工具,供用户复现现有算法并开发自己的新模型,从而不断为开源社区提供贡献。 - -## 引用 - -如果您觉得 MMPose 对您的研究有所帮助,请考虑引用它: - -```bibtex -@misc{mmpose2020, - title={OpenMMLab Pose Estimation Toolbox and Benchmark}, - author={MMPose Contributors}, - howpublished = {\url{https://github.com/open-mmlab/mmpose}}, - year={2020} -} -``` - -## 许可证 - -该项目采用 [Apache 2.0 license](LICENSE) 开源协议。 - -## OpenMMLab的其他项目 - -- [MMEngine](https://github.com/open-mmlab/mmengine): OpenMMLab 深度学习模型训练基础库 -- [MMCV](https://github.com/open-mmlab/mmcv): OpenMMLab 计算机视觉基础库 -- [MMPreTrain](https://github.com/open-mmlab/mmpretrain): OpenMMLab 深度学习预训练工具箱 -- [MMagic](https://github.com/open-mmlab/mmagic): OpenMMLab 新一代人工智能内容生成(AIGC)工具箱 -- [MMDetection](https://github.com/open-mmlab/mmdetection): OpenMMLab 目标检测工具箱 -- [MMDetection3D](https://github.com/open-mmlab/mmdetection3d): OpenMMLab 新一代通用 3D 目标检测平台 -- [MMRotate](https://github.com/open-mmlab/mmrotate): OpenMMLab 旋转框检测工具箱与测试基准 -- [MMTracking](https://github.com/open-mmlab/mmtracking): OpenMMLab 一体化视频目标感知平台 -- [MMSegmentation](https://github.com/open-mmlab/mmsegmentation): OpenMMLab 语义分割工具箱 -- [MMOCR](https://github.com/open-mmlab/mmocr): OpenMMLab 全流程文字检测识别理解工具包 -- [MMPose](https://github.com/open-mmlab/mmpose): OpenMMLab 姿态估计工具箱 -- [MMHuman3D](https://github.com/open-mmlab/mmhuman3d): OpenMMLab 人体参数化模型工具箱与测试基准 -- [MMFewShot](https://github.com/open-mmlab/mmfewshot): OpenMMLab 少样本学习工具箱与测试基准 -- [MMAction2](https://github.com/open-mmlab/mmaction2): OpenMMLab 新一代视频理解工具箱 -- [MMFlow](https://github.com/open-mmlab/mmflow): OpenMMLab 光流估计工具箱与测试基准 -- [MMDeploy](https://github.com/open-mmlab/mmdeploy): OpenMMLab 模型部署框架 -- [MMRazor](https://github.com/open-mmlab/mmrazor): OpenMMLab 模型压缩工具箱与测试基准 -- [MIM](https://github.com/open-mmlab/mim): OpenMMlab 项目、算法、模型的统一入口 -- [Playground](https://github.com/open-mmlab/playground): 收集和展示 OpenMMLab 相关的前沿、有趣的社区项目 - -## 欢迎加入 OpenMMLab 社区 - -扫描下方的二维码可关注 OpenMMLab 团队的 [知乎官方账号](https://www.zhihu.com/people/openmmlab),联络 OpenMMLab [官方微信小助手](https://user-images.githubusercontent.com/25839884/205872898-e2e6009d-c6bb-4d27-8d07-117e697a3da8.jpg)或加入 OpenMMLab 团队的 [官方交流 QQ 群](https://jq.qq.com/?_wv=1027&k=K0QI8ByU) - -
- -
- -我们会在 OpenMMLab 社区为大家 - -- 📢 分享 AI 框架的前沿核心技术 -- 💻 解读 PyTorch 常用模块源码 -- 📰 发布 OpenMMLab 的相关新闻 -- 🚀 介绍 OpenMMLab 开发的前沿算法 -- 🏃 获取更高效的问题答疑和意见反馈 -- 🔥 提供与各行各业开发者充分交流的平台 - -干货满满 📘,等你来撩 💗,OpenMMLab 社区期待您的加入 👬 +
+ +
 
+
+ OpenMMLab 官网 + + + HOT + + +      + OpenMMLab 开放平台 + + + TRY IT OUT + + +
+
 
+ +[![Documentation](https://readthedocs.org/projects/mmpose/badge/?version=latest)](https://mmpose.readthedocs.io/en/latest/?badge=latest) +[![actions](https://github.com/open-mmlab/mmpose/workflows/build/badge.svg)](https://github.com/open-mmlab/mmpose/actions) +[![codecov](https://codecov.io/gh/open-mmlab/mmpose/branch/latest/graph/badge.svg)](https://codecov.io/gh/open-mmlab/mmpose) +[![PyPI](https://img.shields.io/pypi/v/mmpose)](https://pypi.org/project/mmpose/) +[![LICENSE](https://img.shields.io/github/license/open-mmlab/mmpose.svg)](https://github.com/open-mmlab/mmpose/blob/main/LICENSE) +[![Average time to resolve an issue](https://isitmaintained.com/badge/resolution/open-mmlab/mmpose.svg)](https://github.com/open-mmlab/mmpose/issues) +[![Percentage of issues still open](https://isitmaintained.com/badge/open/open-mmlab/mmpose.svg)](https://github.com/open-mmlab/mmpose/issues) + +[📘文档](https://mmpose.readthedocs.io/zh_CN/latest/) | +[🛠️安装](https://mmpose.readthedocs.io/zh_CN/latest/installation.html) | +[👀模型库](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo.html) | +[📜论文库](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/algorithms.html) | +[🆕更新日志](https://mmpose.readthedocs.io/zh_CN/latest/notes/changelog.html) | +[🤔报告问题](https://github.com/open-mmlab/mmpose/issues/new/choose) | +[🔥RTMPose](/projects/rtmpose/) + +
+ +
+ + + + + + + + + + + + + + + + + +
+ +## Introduction + +[English](./README.md) | 简体中文 + +MMPose 是一款基于 PyTorch 的姿态分析的开源工具箱,是 [OpenMMLab](https://github.com/open-mmlab) 项目的成员之一。 + +主分支代码目前支持 **PyTorch 1.8 以上**的版本。 + +https://user-images.githubusercontent.com/15977946/124654387-0fd3c500-ded1-11eb-84f6-24eeddbf4d91.mp4 + +
+主要特性 + +- **支持多种人体姿态分析相关任务** + + MMPose 支持当前学界广泛关注的主流姿态分析任务:主要包括 2D多人姿态估计、2D手部姿态估计、2D人脸关键点检测、133关键点的全身人体姿态估计、3D人体形状恢复、服饰关键点检测、动物关键点检测等。 + 具体请参考 [功能演示](demo/docs/zh_cn/)。 + +- **更高的精度和更快的速度** + + MMPose 复现了多种学界最先进的人体姿态分析模型,包括“自顶向下”和“自底向上”两大类算法。MMPose 相比于其他主流的代码库,具有更高的模型精度和训练速度。 + 具体请参考 [基准测试](docs/en/notes/benchmark.md)(英文)。 + +- **支持多样的数据集** + + MMPose 支持了很多主流数据集的准备和构建,如 COCO、 MPII 等。 具体请参考 [数据集](docs/zh_cn/dataset_zoo)。 + +- **模块化设计** + + MMPose 将统一的人体姿态分析框架解耦成不同的模块组件,通过组合不同的模块组件,用户可以便捷地构建自定义的人体姿态分析模型。 + +- **详尽的单元测试和文档** + + MMPose 提供了详尽的说明文档,API 接口说明,全面的单元测试,以供社区参考。 + +
+ +## 最新进展 + +- 我们支持了三个新的数据集: + - (CVPR 2023) [Human-Art](https://github.com/IDEA-Research/HumanArt) + - (CVPR 2022) [Animal Kingdom](https://github.com/sutdcv/Animal-Kingdom) + - (AAAI 2020) [LaPa](https://github.com/JDAI-CV/lapa-dataset/) + +![yolox-pose_intro](https://user-images.githubusercontent.com/26127467/226655503-3cee746e-6e42-40be-82ae-6e7cae2a4c7e.jpg) + +- 欢迎使用 [*MMPose 项目*](/projects/README.md)。在这里,您可以发现 MMPose 中的最新功能和算法,并且可以通过最快的方式与社区分享自己的创意和代码实现。向 MMPose 中添加新功能从此变得简单丝滑: + + - 提供了一种简单迅捷的方式,将新的算法、功能和应用添加到 MMPose 中 + - 更灵活的代码结构和风格,更少的限制,更简短的代码审核流程 + - 通过独立项目的形式,利用 MMPose 的强大功能,同时不被代码框架所束缚 + - 最新添加的项目包括: + - [RTMPose](/projects/rtmpose/) + - [YOLOX-Pose](/projects/yolox_pose/) + - [MMPose4AIGC](/projects/mmpose4aigc/) + - [Simple Keypoints](/projects/skps/) + - 从简单的 [示例项目](/projects/example_project/) 开启您的 MMPose 代码贡献者之旅吧,让我们共同打造更好用的 MMPose! + +
+ +- 2023-07-04:MMPose [v1.1.0](https://github.com/open-mmlab/mmpose/releases/tag/v1.1.0) 正式发布了,主要更新包括: + + - 支持新数据集:Human-Art、Animal Kingdom、LaPa。 + - 支持新的配置文件风格,支持 IDE 跳转和搜索。 + - 提供更强性能的 RTMPose 模型。 + - 迁移 3D 姿态估计算法。 + - 加速推理脚本,全部 demo 脚本支持摄像头推理。 + + 请查看完整的 [版本说明](https://github.com/open-mmlab/mmpose/releases/tag/v1.1.0) 以了解更多 MMPose v1.1.0 带来的更新! + +## 0.x / 1.x 迁移 + +MMPose v1.0.0 是一个重大更新,包括了大量的 API 和配置文件的变化。目前 v1.0.0 中已经完成了一部分算法的迁移工作,剩余的算法将在后续的版本中陆续完成,我们将在下面的列表中展示迁移进度。 + +
+迁移进度 + +| 算法名称 | 迁移进度 | +| :-------------------------------- | :---------: | +| MTUT (CVPR 2019) | | +| MSPN (ArXiv 2019) | done | +| InterNet (ECCV 2020) | | +| DEKR (CVPR 2021) | done | +| HigherHRNet (CVPR 2020) | | +| DeepPose (CVPR 2014) | done | +| RLE (ICCV 2021) | done | +| SoftWingloss (TIP 2021) | done | +| VideoPose3D (CVPR 2019) | done | +| Hourglass (ECCV 2016) | done | +| LiteHRNet (CVPR 2021) | done | +| AdaptiveWingloss (ICCV 2019) | done | +| SimpleBaseline2D (ECCV 2018) | done | +| PoseWarper (NeurIPS 2019) | | +| SimpleBaseline3D (ICCV 2017) | done | +| HMR (CVPR 2018) | | +| UDP (CVPR 2020) | done | +| VIPNAS (CVPR 2021) | done | +| Wingloss (CVPR 2018) | done | +| DarkPose (CVPR 2020) | done | +| Associative Embedding (NIPS 2017) | in progress | +| VoxelPose (ECCV 2020) | | +| RSN (ECCV 2020) | done | +| CID (CVPR 2022) | done | +| CPM (CVPR 2016) | done | +| HRNet (CVPR 2019) | done | +| HRNetv2 (TPAMI 2019) | done | +| SCNet (CVPR 2020) | done | + +
+ +如果您使用的算法还没有完成迁移,您也可以继续使用访问 [0.x 分支](https://github.com/open-mmlab/mmpose/tree/0.x) 和 [旧版文档](https://mmpose.readthedocs.io/zh_CN/0.x/) + +## 安装 + +关于安装的详细说明请参考[安装文档](https://mmpose.readthedocs.io/zh_CN/latest/installation.html)。 + +## 教程 + +我们提供了一系列简明的教程,帮助 MMPose 的新用户轻松上手使用: + +1. MMPose 的基本使用方法: + + - [20 分钟上手教程](https://mmpose.readthedocs.io/zh_CN/latest/guide_to_framework.html) + - [Demos](https://mmpose.readthedocs.io/zh_CN/latest/demos.html) + - [模型推理](https://mmpose.readthedocs.io/zh_CN/latest/user_guides/inference.html) + - [配置文件](https://mmpose.readthedocs.io/zh_CN/latest/user_guides/configs.html) + - [准备数据集](https://mmpose.readthedocs.io/zh_CN/latest/user_guides/prepare_datasets.html) + - [训练与测试](https://mmpose.readthedocs.io/zh_CN/latest/user_guides/train_and_test.html) + +2. 对于希望基于 MMPose 进行开发的研究者和开发者: + + - [编解码器](https://mmpose.readthedocs.io/zh_CN/latest/advanced_guides/codecs.html) + - [数据流](https://mmpose.readthedocs.io/zh_CN/latest/advanced_guides/dataflow.html) + - [实现新模型](https://mmpose.readthedocs.io/zh_CN/latest/advanced_guides/implement_new_models.html) + - [自定义数据集](https://mmpose.readthedocs.io/zh_CN/latest/advanced_guides/customize_datasets.html) + - [自定义数据变换](https://mmpose.readthedocs.io/zh_CN/latest/advanced_guides/customize_transforms.html) + - [自定义优化器](https://mmpose.readthedocs.io/zh_CN/latest/advanced_guides/customize_optimizer.html) + - [自定义日志](https://mmpose.readthedocs.io/zh_CN/latest/advanced_guides/customize_logging.html) + - [模型部署](https://mmpose.readthedocs.io/zh_CN/latest/advanced_guides/how_to_deploy.html) + - [模型分析工具](https://mmpose.readthedocs.io/zh_CN/latest/advanced_guides/model_analysis.html) + - [迁移指南](https://mmpose.readthedocs.io/zh_CN/latest/migration.html) + +3. 对于希望加入开源社区,向 MMPose 贡献代码的研究者和开发者: + + - [参与贡献代码](https://mmpose.readthedocs.io/zh_CN/latest/contribution_guide.html) + +4. 对于使用过程中的常见问题: + + - [FAQ](https://mmpose.readthedocs.io/zh_CN/latest/faq.html) + +## 模型库 + +各个模型的结果和设置都可以在对应的 config(配置)目录下的 **README.md** 中查看。 +整体的概况也可也在 [模型库](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo.html) 页面中查看。 + +
+支持的算法 + +- [x] [DeepPose](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/algorithms.html#deeppose-cvpr-2014) (CVPR'2014) +- [x] [CPM](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/backbones.html#cpm-cvpr-2016) (CVPR'2016) +- [x] [Hourglass](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/backbones.html#hourglass-eccv-2016) (ECCV'2016) +- [x] [SimpleBaseline3D](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/algorithms.html#simplebaseline3d-iccv-2017) (ICCV'2017) +- [ ] [Associative Embedding](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/algorithms.html#associative-embedding-nips-2017) (NeurIPS'2017) +- [x] [SimpleBaseline2D](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/algorithms.html#simplebaseline2d-eccv-2018) (ECCV'2018) +- [x] [DSNT](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/algorithms.html#dsnt-2018) (ArXiv'2021) +- [x] [HRNet](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/backbones.html#hrnet-cvpr-2019) (CVPR'2019) +- [x] [IPR](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/algorithms.html#ipr-eccv-2018) (ECCV'2018) +- [x] [VideoPose3D](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/algorithms.html#videopose3d-cvpr-2019) (CVPR'2019) +- [x] [HRNetv2](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/backbones.html#hrnetv2-tpami-2019) (TPAMI'2019) +- [x] [MSPN](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/backbones.html#mspn-arxiv-2019) (ArXiv'2019) +- [x] [SCNet](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/backbones.html#scnet-cvpr-2020) (CVPR'2020) +- [ ] [HigherHRNet](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/backbones.html#higherhrnet-cvpr-2020) (CVPR'2020) +- [x] [RSN](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/backbones.html#rsn-eccv-2020) (ECCV'2020) +- [ ] [InterNet](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/algorithms.html#internet-eccv-2020) (ECCV'2020) +- [ ] [VoxelPose](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/algorithms.html#voxelpose-eccv-2020) (ECCV'2020) +- [x] [LiteHRNet](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/backbones.html#litehrnet-cvpr-2021) (CVPR'2021) +- [x] [ViPNAS](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/backbones.html#vipnas-cvpr-2021) (CVPR'2021) +- [x] [Debias-IPR](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/algorithms.html#debias-ipr-iccv-2021) (ICCV'2021) +- [x] [SimCC](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/algorithms.html#simcc-eccv-2022) (ECCV'2022) + +
+ +
+支持的技术 + +- [x] [FPN](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/techniques.html#fpn-cvpr-2017) (CVPR'2017) +- [x] [FP16](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/techniques.html#fp16-arxiv-2017) (ArXiv'2017) +- [x] [Wingloss](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/techniques.html#wingloss-cvpr-2018) (CVPR'2018) +- [x] [AdaptiveWingloss](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/techniques.html#adaptivewingloss-iccv-2019) (ICCV'2019) +- [x] [DarkPose](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/techniques.html#darkpose-cvpr-2020) (CVPR'2020) +- [x] [UDP](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/techniques.html#udp-cvpr-2020) (CVPR'2020) +- [x] [Albumentations](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/techniques.html#albumentations-information-2020) (Information'2020) +- [x] [SoftWingloss](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/techniques.html#softwingloss-tip-2021) (TIP'2021) +- [x] [RLE](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/techniques.html#rle-iccv-2021) (ICCV'2021) + +
+ +
+支持的数据集 + +- [x] [AFLW](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#aflw-iccvw-2011) \[[主页](https://www.tugraz.at/institute/icg/research/team-bischof/lrs/downloads/aflw/)\] (ICCVW'2011) +- [x] [sub-JHMDB](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#jhmdb-iccv-2013) \[[主页](http://jhmdb.is.tue.mpg.de/dataset)\] (ICCV'2013) +- [x] [COFW](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#cofw-iccv-2013) \[[主页](http://www.vision.caltech.edu/xpburgos/ICCV13/)\] (ICCV'2013) +- [x] [MPII](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#mpii-cvpr-2014) \[[主页](http://human-pose.mpi-inf.mpg.de/)\] (CVPR'2014) +- [x] [Human3.6M](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#human3-6m-tpami-2014) \[[主页](http://vision.imar.ro/human3.6m/description.php)\] (TPAMI'2014) +- [x] [COCO](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#coco-eccv-2014) \[[主页](http://cocodataset.org/)\] (ECCV'2014) +- [x] [CMU Panoptic](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#cmu-panoptic-iccv-2015) (ICCV'2015) +- [x] [DeepFashion](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#deepfashion-cvpr-2016) \[[主页](http://mmlab.ie.cuhk.edu.hk/projects/DeepFashion/LandmarkDetection.html)\] (CVPR'2016) +- [x] [300W](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#300w-imavis-2016) \[[主页](https://ibug.doc.ic.ac.uk/resources/300-W/)\] (IMAVIS'2016) +- [x] [RHD](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#rhd-iccv-2017) \[[主页](https://lmb.informatik.uni-freiburg.de/resources/datasets/RenderedHandposeDataset.en.html)\] (ICCV'2017) +- [x] [CMU Panoptic](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#cmu-panoptic-iccv-2015) \[[主页](http://domedb.perception.cs.cmu.edu/)\] (ICCV'2015) +- [x] [AI Challenger](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#ai-challenger-arxiv-2017) \[[主页](https://github.com/AIChallenger/AI_Challenger_2017)\] (ArXiv'2017) +- [x] [MHP](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#mhp-acm-mm-2018) \[[主页](https://lv-mhp.github.io/dataset)\] (ACM MM'2018) +- [x] [WFLW](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#wflw-cvpr-2018) \[[主页](https://wywu.github.io/projects/LAB/WFLW.html)\] (CVPR'2018) +- [x] [PoseTrack18](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#posetrack18-cvpr-2018) \[[主页](https://posetrack.net/users/download.php)\] (CVPR'2018) +- [x] [OCHuman](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#ochuman-cvpr-2019) \[[主页](https://github.com/liruilong940607/OCHumanApi)\] (CVPR'2019) +- [x] [CrowdPose](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#crowdpose-cvpr-2019) \[[主页](https://github.com/Jeff-sjtu/CrowdPose)\] (CVPR'2019) +- [x] [MPII-TRB](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#mpii-trb-iccv-2019) \[[主页](https://github.com/kennymckormick/Triplet-Representation-of-human-Body)\] (ICCV'2019) +- [x] [FreiHand](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#freihand-iccv-2019) \[[主页](https://lmb.informatik.uni-freiburg.de/projects/freihand/)\] (ICCV'2019) +- [x] [Animal-Pose](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#animal-pose-iccv-2019) \[[主页](https://sites.google.com/view/animal-pose/)\] (ICCV'2019) +- [x] [OneHand10K](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#onehand10k-tcsvt-2019) \[[主页](https://www.yangangwang.com/papers/WANG-MCC-2018-10.html)\] (TCSVT'2019) +- [x] [Vinegar Fly](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#vinegar-fly-nature-methods-2019) \[[主页](https://github.com/jgraving/DeepPoseKit-Data)\] (Nature Methods'2019) +- [x] [Desert Locust](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#desert-locust-elife-2019) \[[主页](https://github.com/jgraving/DeepPoseKit-Data)\] (Elife'2019) +- [x] [Grévy’s Zebra](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#grevys-zebra-elife-2019) \[[主页](https://github.com/jgraving/DeepPoseKit-Data)\] (Elife'2019) +- [x] [ATRW](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#atrw-acm-mm-2020) \[[主页](https://cvwc2019.github.io/challenge.html)\] (ACM MM'2020) +- [x] [Halpe](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#halpe-cvpr-2020) \[[主页](https://github.com/Fang-Haoshu/Halpe-FullBody/)\] (CVPR'2020) +- [x] [COCO-WholeBody](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#coco-wholebody-eccv-2020) \[[主页](https://github.com/jin-s13/COCO-WholeBody/)\] (ECCV'2020) +- [x] [MacaquePose](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#macaquepose-biorxiv-2020) \[[主页](http://www.pri.kyoto-u.ac.jp/datasets/macaquepose/index.html)\] (bioRxiv'2020) +- [x] [InterHand2.6M](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#interhand2-6m-eccv-2020) \[[主页](https://mks0601.github.io/InterHand2.6M/)\] (ECCV'2020) +- [x] [AP-10K](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/datasets.html#ap-10k-neurips-2021) \[[主页](https://github.com/AlexTheBad/AP-10K)\] (NeurIPS'2021) +- [x] [Horse-10](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#horse-10-wacv-2021) \[[主页](http://www.mackenziemathislab.org/horse10)\] (WACV'2021) +- [x] [Human-Art](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#human-art-cvpr-2023) \[[主页](https://idea-research.github.io/HumanArt/)\] (CVPR'2023) +- [x] [LaPa](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/datasets.html#lapa-aaai-2020) \[[主页](https://github.com/JDAI-CV/lapa-dataset)\] (AAAI'2020) + +
+ +
+支持的骨干网络 + +- [x] [AlexNet](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/backbones.html#alexnet-neurips-2012) (NeurIPS'2012) +- [x] [VGG](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/backbones.html#vgg-iclr-2015) (ICLR'2015) +- [x] [ResNet](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/backbones.html#resnet-cvpr-2016) (CVPR'2016) +- [x] [ResNext](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/backbones.html#resnext-cvpr-2017) (CVPR'2017) +- [x] [SEResNet](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/backbones.html#seresnet-cvpr-2018) (CVPR'2018) +- [x] [ShufflenetV1](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/backbones.html#shufflenetv1-cvpr-2018) (CVPR'2018) +- [x] [ShufflenetV2](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/backbones.html#shufflenetv2-eccv-2018) (ECCV'2018) +- [x] [MobilenetV2](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/backbones.html#mobilenetv2-cvpr-2018) (CVPR'2018) +- [x] [ResNetV1D](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/backbones.html#resnetv1d-cvpr-2019) (CVPR'2019) +- [x] [ResNeSt](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/backbones.html#resnest-arxiv-2020) (ArXiv'2020) +- [x] [Swin](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/backbones.html#swin-cvpr-2021) (CVPR'2021) +- [x] [HRFormer](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/backbones.html#hrformer-nips-2021) (NIPS'2021) +- [x] [PVT](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/backbones.html#pvt-iccv-2021) (ICCV'2021) +- [x] [PVTV2](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo_papers/backbones.html#pvtv2-cvmj-2022) (CVMJ'2022) + +
+ +### 模型需求 + +我们将跟进学界的最新进展,并支持更多算法和框架。如果您对 MMPose 有任何功能需求,请随时在 [MMPose Roadmap](https://github.com/open-mmlab/mmpose/issues/2258) 中留言。 + +## 参与贡献 + +我们非常欢迎用户对于 MMPose 做出的任何贡献,可以参考 [贡献指南](https://mmpose.readthedocs.io/zh_CN/latest/contribution_guide.html) 文件了解更多细节。 + +## 致谢 + +MMPose 是一款由不同学校和公司共同贡献的开源项目。我们感谢所有为项目提供算法复现和新功能支持的贡献者,以及提供宝贵反馈的用户。 +我们希望该工具箱和基准测试可以为社区提供灵活的代码工具,供用户复现现有算法并开发自己的新模型,从而不断为开源社区提供贡献。 + +## 引用 + +如果您觉得 MMPose 对您的研究有所帮助,请考虑引用它: + +```bibtex +@misc{mmpose2020, + title={OpenMMLab Pose Estimation Toolbox and Benchmark}, + author={MMPose Contributors}, + howpublished = {\url{https://github.com/open-mmlab/mmpose}}, + year={2020} +} +``` + +## 许可证 + +该项目采用 [Apache 2.0 license](LICENSE) 开源协议。 + +## OpenMMLab的其他项目 + +- [MMEngine](https://github.com/open-mmlab/mmengine): OpenMMLab 深度学习模型训练基础库 +- [MMCV](https://github.com/open-mmlab/mmcv): OpenMMLab 计算机视觉基础库 +- [MMPreTrain](https://github.com/open-mmlab/mmpretrain): OpenMMLab 深度学习预训练工具箱 +- [MMagic](https://github.com/open-mmlab/mmagic): OpenMMLab 新一代人工智能内容生成(AIGC)工具箱 +- [MMDetection](https://github.com/open-mmlab/mmdetection): OpenMMLab 目标检测工具箱 +- [MMDetection3D](https://github.com/open-mmlab/mmdetection3d): OpenMMLab 新一代通用 3D 目标检测平台 +- [MMRotate](https://github.com/open-mmlab/mmrotate): OpenMMLab 旋转框检测工具箱与测试基准 +- [MMTracking](https://github.com/open-mmlab/mmtracking): OpenMMLab 一体化视频目标感知平台 +- [MMSegmentation](https://github.com/open-mmlab/mmsegmentation): OpenMMLab 语义分割工具箱 +- [MMOCR](https://github.com/open-mmlab/mmocr): OpenMMLab 全流程文字检测识别理解工具包 +- [MMPose](https://github.com/open-mmlab/mmpose): OpenMMLab 姿态估计工具箱 +- [MMHuman3D](https://github.com/open-mmlab/mmhuman3d): OpenMMLab 人体参数化模型工具箱与测试基准 +- [MMFewShot](https://github.com/open-mmlab/mmfewshot): OpenMMLab 少样本学习工具箱与测试基准 +- [MMAction2](https://github.com/open-mmlab/mmaction2): OpenMMLab 新一代视频理解工具箱 +- [MMFlow](https://github.com/open-mmlab/mmflow): OpenMMLab 光流估计工具箱与测试基准 +- [MMDeploy](https://github.com/open-mmlab/mmdeploy): OpenMMLab 模型部署框架 +- [MMRazor](https://github.com/open-mmlab/mmrazor): OpenMMLab 模型压缩工具箱与测试基准 +- [MIM](https://github.com/open-mmlab/mim): OpenMMlab 项目、算法、模型的统一入口 +- [Playground](https://github.com/open-mmlab/playground): 收集和展示 OpenMMLab 相关的前沿、有趣的社区项目 + +## 欢迎加入 OpenMMLab 社区 + +扫描下方的二维码可关注 OpenMMLab 团队的 [知乎官方账号](https://www.zhihu.com/people/openmmlab),联络 OpenMMLab [官方微信小助手](https://user-images.githubusercontent.com/25839884/205872898-e2e6009d-c6bb-4d27-8d07-117e697a3da8.jpg)或加入 OpenMMLab 团队的 [官方交流 QQ 群](https://jq.qq.com/?_wv=1027&k=K0QI8ByU) + +
+ +
+ +我们会在 OpenMMLab 社区为大家 + +- 📢 分享 AI 框架的前沿核心技术 +- 💻 解读 PyTorch 常用模块源码 +- 📰 发布 OpenMMLab 的相关新闻 +- 🚀 介绍 OpenMMLab 开发的前沿算法 +- 🏃 获取更高效的问题答疑和意见反馈 +- 🔥 提供与各行各业开发者充分交流的平台 + +干货满满 📘,等你来撩 💗,OpenMMLab 社区期待您的加入 👬 diff --git a/configs/_base_/datasets/300w.py b/configs/_base_/datasets/300w.py index 2c3728da1d..29200238b2 100644 --- a/configs/_base_/datasets/300w.py +++ b/configs/_base_/datasets/300w.py @@ -1,134 +1,134 @@ -dataset_info = dict( - dataset_name='300w', - paper_info=dict( - author='Sagonas, Christos and Antonakos, Epameinondas ' - 'and Tzimiropoulos, Georgios and Zafeiriou, Stefanos ' - 'and Pantic, Maja', - title='300 faces in-the-wild challenge: ' - 'Database and results', - container='Image and vision computing', - year='2016', - homepage='https://ibug.doc.ic.ac.uk/resources/300-W/', - ), - keypoint_info={ - 0: dict(name='kpt-0', id=0, color=[255, 0, 0], type='', swap='kpt-16'), - 1: dict(name='kpt-1', id=1, color=[255, 0, 0], type='', swap='kpt-15'), - 2: dict(name='kpt-2', id=2, color=[255, 0, 0], type='', swap='kpt-14'), - 3: dict(name='kpt-3', id=3, color=[255, 0, 0], type='', swap='kpt-13'), - 4: dict(name='kpt-4', id=4, color=[255, 0, 0], type='', swap='kpt-12'), - 5: dict(name='kpt-5', id=5, color=[255, 0, 0], type='', swap='kpt-11'), - 6: dict(name='kpt-6', id=6, color=[255, 0, 0], type='', swap='kpt-10'), - 7: dict(name='kpt-7', id=7, color=[255, 0, 0], type='', swap='kpt-9'), - 8: dict(name='kpt-8', id=8, color=[255, 0, 0], type='', swap=''), - 9: dict(name='kpt-9', id=9, color=[255, 0, 0], type='', swap='kpt-7'), - 10: - dict(name='kpt-10', id=10, color=[255, 0, 0], type='', swap='kpt-6'), - 11: - dict(name='kpt-11', id=11, color=[255, 0, 0], type='', swap='kpt-5'), - 12: - dict(name='kpt-12', id=12, color=[255, 0, 0], type='', swap='kpt-4'), - 13: - dict(name='kpt-13', id=13, color=[255, 0, 0], type='', swap='kpt-3'), - 14: - dict(name='kpt-14', id=14, color=[255, 0, 0], type='', swap='kpt-2'), - 15: - dict(name='kpt-15', id=15, color=[255, 0, 0], type='', swap='kpt-1'), - 16: - dict(name='kpt-16', id=16, color=[255, 0, 0], type='', swap='kpt-0'), - 17: - dict(name='kpt-17', id=17, color=[255, 0, 0], type='', swap='kpt-26'), - 18: - dict(name='kpt-18', id=18, color=[255, 0, 0], type='', swap='kpt-25'), - 19: - dict(name='kpt-19', id=19, color=[255, 0, 0], type='', swap='kpt-24'), - 20: - dict(name='kpt-20', id=20, color=[255, 0, 0], type='', swap='kpt-23'), - 21: - dict(name='kpt-21', id=21, color=[255, 0, 0], type='', swap='kpt-22'), - 22: - dict(name='kpt-22', id=22, color=[255, 0, 0], type='', swap='kpt-21'), - 23: - dict(name='kpt-23', id=23, color=[255, 0, 0], type='', swap='kpt-20'), - 24: - dict(name='kpt-24', id=24, color=[255, 0, 0], type='', swap='kpt-19'), - 25: - dict(name='kpt-25', id=25, color=[255, 0, 0], type='', swap='kpt-18'), - 26: - dict(name='kpt-26', id=26, color=[255, 0, 0], type='', swap='kpt-17'), - 27: dict(name='kpt-27', id=27, color=[255, 0, 0], type='', swap=''), - 28: dict(name='kpt-28', id=28, color=[255, 0, 0], type='', swap=''), - 29: dict(name='kpt-29', id=29, color=[255, 0, 0], type='', swap=''), - 30: dict(name='kpt-30', id=30, color=[255, 0, 0], type='', swap=''), - 31: - dict(name='kpt-31', id=31, color=[255, 0, 0], type='', swap='kpt-35'), - 32: - dict(name='kpt-32', id=32, color=[255, 0, 0], type='', swap='kpt-34'), - 33: dict(name='kpt-33', id=33, color=[255, 0, 0], type='', swap=''), - 34: - dict(name='kpt-34', id=34, color=[255, 0, 0], type='', swap='kpt-32'), - 35: - dict(name='kpt-35', id=35, color=[255, 0, 0], type='', swap='kpt-31'), - 36: - dict(name='kpt-36', id=36, color=[255, 0, 0], type='', swap='kpt-45'), - 37: - dict(name='kpt-37', id=37, color=[255, 0, 0], type='', swap='kpt-44'), - 38: - dict(name='kpt-38', id=38, color=[255, 0, 0], type='', swap='kpt-43'), - 39: - dict(name='kpt-39', id=39, color=[255, 0, 0], type='', swap='kpt-42'), - 40: - dict(name='kpt-40', id=40, color=[255, 0, 0], type='', swap='kpt-47'), - 41: dict( - name='kpt-41', id=41, color=[255, 0, 0], type='', swap='kpt-46'), - 42: dict( - name='kpt-42', id=42, color=[255, 0, 0], type='', swap='kpt-39'), - 43: dict( - name='kpt-43', id=43, color=[255, 0, 0], type='', swap='kpt-38'), - 44: dict( - name='kpt-44', id=44, color=[255, 0, 0], type='', swap='kpt-37'), - 45: dict( - name='kpt-45', id=45, color=[255, 0, 0], type='', swap='kpt-36'), - 46: dict( - name='kpt-46', id=46, color=[255, 0, 0], type='', swap='kpt-41'), - 47: dict( - name='kpt-47', id=47, color=[255, 0, 0], type='', swap='kpt-40'), - 48: dict( - name='kpt-48', id=48, color=[255, 0, 0], type='', swap='kpt-54'), - 49: dict( - name='kpt-49', id=49, color=[255, 0, 0], type='', swap='kpt-53'), - 50: dict( - name='kpt-50', id=50, color=[255, 0, 0], type='', swap='kpt-52'), - 51: dict(name='kpt-51', id=51, color=[255, 0, 0], type='', swap=''), - 52: dict( - name='kpt-52', id=52, color=[255, 0, 0], type='', swap='kpt-50'), - 53: dict( - name='kpt-53', id=53, color=[255, 0, 0], type='', swap='kpt-49'), - 54: dict( - name='kpt-54', id=54, color=[255, 0, 0], type='', swap='kpt-48'), - 55: dict( - name='kpt-55', id=55, color=[255, 0, 0], type='', swap='kpt-59'), - 56: dict( - name='kpt-56', id=56, color=[255, 0, 0], type='', swap='kpt-58'), - 57: dict(name='kpt-57', id=57, color=[255, 0, 0], type='', swap=''), - 58: dict( - name='kpt-58', id=58, color=[255, 0, 0], type='', swap='kpt-56'), - 59: dict( - name='kpt-59', id=59, color=[255, 0, 0], type='', swap='kpt-55'), - 60: dict( - name='kpt-60', id=60, color=[255, 0, 0], type='', swap='kpt-64'), - 61: dict( - name='kpt-61', id=61, color=[255, 0, 0], type='', swap='kpt-63'), - 62: dict(name='kpt-62', id=62, color=[255, 0, 0], type='', swap=''), - 63: dict( - name='kpt-63', id=63, color=[255, 0, 0], type='', swap='kpt-61'), - 64: dict( - name='kpt-64', id=64, color=[255, 0, 0], type='', swap='kpt-60'), - 65: dict( - name='kpt-65', id=65, color=[255, 0, 0], type='', swap='kpt-67'), - 66: dict(name='kpt-66', id=66, color=[255, 0, 0], type='', swap=''), - 67: dict( - name='kpt-67', id=67, color=[255, 0, 0], type='', swap='kpt-65'), - }, - skeleton_info={}, - joint_weights=[1.] * 68, - sigmas=[]) +dataset_info = dict( + dataset_name='300w', + paper_info=dict( + author='Sagonas, Christos and Antonakos, Epameinondas ' + 'and Tzimiropoulos, Georgios and Zafeiriou, Stefanos ' + 'and Pantic, Maja', + title='300 faces in-the-wild challenge: ' + 'Database and results', + container='Image and vision computing', + year='2016', + homepage='https://ibug.doc.ic.ac.uk/resources/300-W/', + ), + keypoint_info={ + 0: dict(name='kpt-0', id=0, color=[255, 0, 0], type='', swap='kpt-16'), + 1: dict(name='kpt-1', id=1, color=[255, 0, 0], type='', swap='kpt-15'), + 2: dict(name='kpt-2', id=2, color=[255, 0, 0], type='', swap='kpt-14'), + 3: dict(name='kpt-3', id=3, color=[255, 0, 0], type='', swap='kpt-13'), + 4: dict(name='kpt-4', id=4, color=[255, 0, 0], type='', swap='kpt-12'), + 5: dict(name='kpt-5', id=5, color=[255, 0, 0], type='', swap='kpt-11'), + 6: dict(name='kpt-6', id=6, color=[255, 0, 0], type='', swap='kpt-10'), + 7: dict(name='kpt-7', id=7, color=[255, 0, 0], type='', swap='kpt-9'), + 8: dict(name='kpt-8', id=8, color=[255, 0, 0], type='', swap=''), + 9: dict(name='kpt-9', id=9, color=[255, 0, 0], type='', swap='kpt-7'), + 10: + dict(name='kpt-10', id=10, color=[255, 0, 0], type='', swap='kpt-6'), + 11: + dict(name='kpt-11', id=11, color=[255, 0, 0], type='', swap='kpt-5'), + 12: + dict(name='kpt-12', id=12, color=[255, 0, 0], type='', swap='kpt-4'), + 13: + dict(name='kpt-13', id=13, color=[255, 0, 0], type='', swap='kpt-3'), + 14: + dict(name='kpt-14', id=14, color=[255, 0, 0], type='', swap='kpt-2'), + 15: + dict(name='kpt-15', id=15, color=[255, 0, 0], type='', swap='kpt-1'), + 16: + dict(name='kpt-16', id=16, color=[255, 0, 0], type='', swap='kpt-0'), + 17: + dict(name='kpt-17', id=17, color=[255, 0, 0], type='', swap='kpt-26'), + 18: + dict(name='kpt-18', id=18, color=[255, 0, 0], type='', swap='kpt-25'), + 19: + dict(name='kpt-19', id=19, color=[255, 0, 0], type='', swap='kpt-24'), + 20: + dict(name='kpt-20', id=20, color=[255, 0, 0], type='', swap='kpt-23'), + 21: + dict(name='kpt-21', id=21, color=[255, 0, 0], type='', swap='kpt-22'), + 22: + dict(name='kpt-22', id=22, color=[255, 0, 0], type='', swap='kpt-21'), + 23: + dict(name='kpt-23', id=23, color=[255, 0, 0], type='', swap='kpt-20'), + 24: + dict(name='kpt-24', id=24, color=[255, 0, 0], type='', swap='kpt-19'), + 25: + dict(name='kpt-25', id=25, color=[255, 0, 0], type='', swap='kpt-18'), + 26: + dict(name='kpt-26', id=26, color=[255, 0, 0], type='', swap='kpt-17'), + 27: dict(name='kpt-27', id=27, color=[255, 0, 0], type='', swap=''), + 28: dict(name='kpt-28', id=28, color=[255, 0, 0], type='', swap=''), + 29: dict(name='kpt-29', id=29, color=[255, 0, 0], type='', swap=''), + 30: dict(name='kpt-30', id=30, color=[255, 0, 0], type='', swap=''), + 31: + dict(name='kpt-31', id=31, color=[255, 0, 0], type='', swap='kpt-35'), + 32: + dict(name='kpt-32', id=32, color=[255, 0, 0], type='', swap='kpt-34'), + 33: dict(name='kpt-33', id=33, color=[255, 0, 0], type='', swap=''), + 34: + dict(name='kpt-34', id=34, color=[255, 0, 0], type='', swap='kpt-32'), + 35: + dict(name='kpt-35', id=35, color=[255, 0, 0], type='', swap='kpt-31'), + 36: + dict(name='kpt-36', id=36, color=[255, 0, 0], type='', swap='kpt-45'), + 37: + dict(name='kpt-37', id=37, color=[255, 0, 0], type='', swap='kpt-44'), + 38: + dict(name='kpt-38', id=38, color=[255, 0, 0], type='', swap='kpt-43'), + 39: + dict(name='kpt-39', id=39, color=[255, 0, 0], type='', swap='kpt-42'), + 40: + dict(name='kpt-40', id=40, color=[255, 0, 0], type='', swap='kpt-47'), + 41: dict( + name='kpt-41', id=41, color=[255, 0, 0], type='', swap='kpt-46'), + 42: dict( + name='kpt-42', id=42, color=[255, 0, 0], type='', swap='kpt-39'), + 43: dict( + name='kpt-43', id=43, color=[255, 0, 0], type='', swap='kpt-38'), + 44: dict( + name='kpt-44', id=44, color=[255, 0, 0], type='', swap='kpt-37'), + 45: dict( + name='kpt-45', id=45, color=[255, 0, 0], type='', swap='kpt-36'), + 46: dict( + name='kpt-46', id=46, color=[255, 0, 0], type='', swap='kpt-41'), + 47: dict( + name='kpt-47', id=47, color=[255, 0, 0], type='', swap='kpt-40'), + 48: dict( + name='kpt-48', id=48, color=[255, 0, 0], type='', swap='kpt-54'), + 49: dict( + name='kpt-49', id=49, color=[255, 0, 0], type='', swap='kpt-53'), + 50: dict( + name='kpt-50', id=50, color=[255, 0, 0], type='', swap='kpt-52'), + 51: dict(name='kpt-51', id=51, color=[255, 0, 0], type='', swap=''), + 52: dict( + name='kpt-52', id=52, color=[255, 0, 0], type='', swap='kpt-50'), + 53: dict( + name='kpt-53', id=53, color=[255, 0, 0], type='', swap='kpt-49'), + 54: dict( + name='kpt-54', id=54, color=[255, 0, 0], type='', swap='kpt-48'), + 55: dict( + name='kpt-55', id=55, color=[255, 0, 0], type='', swap='kpt-59'), + 56: dict( + name='kpt-56', id=56, color=[255, 0, 0], type='', swap='kpt-58'), + 57: dict(name='kpt-57', id=57, color=[255, 0, 0], type='', swap=''), + 58: dict( + name='kpt-58', id=58, color=[255, 0, 0], type='', swap='kpt-56'), + 59: dict( + name='kpt-59', id=59, color=[255, 0, 0], type='', swap='kpt-55'), + 60: dict( + name='kpt-60', id=60, color=[255, 0, 0], type='', swap='kpt-64'), + 61: dict( + name='kpt-61', id=61, color=[255, 0, 0], type='', swap='kpt-63'), + 62: dict(name='kpt-62', id=62, color=[255, 0, 0], type='', swap=''), + 63: dict( + name='kpt-63', id=63, color=[255, 0, 0], type='', swap='kpt-61'), + 64: dict( + name='kpt-64', id=64, color=[255, 0, 0], type='', swap='kpt-60'), + 65: dict( + name='kpt-65', id=65, color=[255, 0, 0], type='', swap='kpt-67'), + 66: dict(name='kpt-66', id=66, color=[255, 0, 0], type='', swap=''), + 67: dict( + name='kpt-67', id=67, color=[255, 0, 0], type='', swap='kpt-65'), + }, + skeleton_info={}, + joint_weights=[1.] * 68, + sigmas=[]) diff --git a/configs/_base_/datasets/aflw.py b/configs/_base_/datasets/aflw.py index cf5e10964d..e092de6021 100644 --- a/configs/_base_/datasets/aflw.py +++ b/configs/_base_/datasets/aflw.py @@ -1,44 +1,44 @@ -dataset_info = dict( - dataset_name='aflw', - paper_info=dict( - author='Koestinger, Martin and Wohlhart, Paul and ' - 'Roth, Peter M and Bischof, Horst', - title='Annotated facial landmarks in the wild: ' - 'A large-scale, real-world database for facial ' - 'landmark localization', - container='2011 IEEE international conference on computer ' - 'vision workshops (ICCV workshops)', - year='2011', - homepage='https://www.tugraz.at/institute/icg/research/' - 'team-bischof/lrs/downloads/aflw/', - ), - keypoint_info={ - 0: dict(name='kpt-0', id=0, color=[255, 0, 0], type='', swap='kpt-5'), - 1: dict(name='kpt-1', id=1, color=[255, 0, 0], type='', swap='kpt-4'), - 2: dict(name='kpt-2', id=2, color=[255, 0, 0], type='', swap='kpt-3'), - 3: dict(name='kpt-3', id=3, color=[255, 0, 0], type='', swap='kpt-2'), - 4: dict(name='kpt-4', id=4, color=[255, 0, 0], type='', swap='kpt-1'), - 5: dict(name='kpt-5', id=5, color=[255, 0, 0], type='', swap='kpt-0'), - 6: dict(name='kpt-6', id=6, color=[255, 0, 0], type='', swap='kpt-11'), - 7: dict(name='kpt-7', id=7, color=[255, 0, 0], type='', swap='kpt-10'), - 8: dict(name='kpt-8', id=8, color=[255, 0, 0], type='', swap='kpt-9'), - 9: dict(name='kpt-9', id=9, color=[255, 0, 0], type='', swap='kpt-8'), - 10: - dict(name='kpt-10', id=10, color=[255, 0, 0], type='', swap='kpt-7'), - 11: - dict(name='kpt-11', id=11, color=[255, 0, 0], type='', swap='kpt-6'), - 12: - dict(name='kpt-12', id=12, color=[255, 0, 0], type='', swap='kpt-14'), - 13: dict(name='kpt-13', id=13, color=[255, 0, 0], type='', swap=''), - 14: - dict(name='kpt-14', id=14, color=[255, 0, 0], type='', swap='kpt-12'), - 15: - dict(name='kpt-15', id=15, color=[255, 0, 0], type='', swap='kpt-17'), - 16: dict(name='kpt-16', id=16, color=[255, 0, 0], type='', swap=''), - 17: - dict(name='kpt-17', id=17, color=[255, 0, 0], type='', swap='kpt-15'), - 18: dict(name='kpt-18', id=18, color=[255, 0, 0], type='', swap='') - }, - skeleton_info={}, - joint_weights=[1.] * 19, - sigmas=[]) +dataset_info = dict( + dataset_name='aflw', + paper_info=dict( + author='Koestinger, Martin and Wohlhart, Paul and ' + 'Roth, Peter M and Bischof, Horst', + title='Annotated facial landmarks in the wild: ' + 'A large-scale, real-world database for facial ' + 'landmark localization', + container='2011 IEEE international conference on computer ' + 'vision workshops (ICCV workshops)', + year='2011', + homepage='https://www.tugraz.at/institute/icg/research/' + 'team-bischof/lrs/downloads/aflw/', + ), + keypoint_info={ + 0: dict(name='kpt-0', id=0, color=[255, 0, 0], type='', swap='kpt-5'), + 1: dict(name='kpt-1', id=1, color=[255, 0, 0], type='', swap='kpt-4'), + 2: dict(name='kpt-2', id=2, color=[255, 0, 0], type='', swap='kpt-3'), + 3: dict(name='kpt-3', id=3, color=[255, 0, 0], type='', swap='kpt-2'), + 4: dict(name='kpt-4', id=4, color=[255, 0, 0], type='', swap='kpt-1'), + 5: dict(name='kpt-5', id=5, color=[255, 0, 0], type='', swap='kpt-0'), + 6: dict(name='kpt-6', id=6, color=[255, 0, 0], type='', swap='kpt-11'), + 7: dict(name='kpt-7', id=7, color=[255, 0, 0], type='', swap='kpt-10'), + 8: dict(name='kpt-8', id=8, color=[255, 0, 0], type='', swap='kpt-9'), + 9: dict(name='kpt-9', id=9, color=[255, 0, 0], type='', swap='kpt-8'), + 10: + dict(name='kpt-10', id=10, color=[255, 0, 0], type='', swap='kpt-7'), + 11: + dict(name='kpt-11', id=11, color=[255, 0, 0], type='', swap='kpt-6'), + 12: + dict(name='kpt-12', id=12, color=[255, 0, 0], type='', swap='kpt-14'), + 13: dict(name='kpt-13', id=13, color=[255, 0, 0], type='', swap=''), + 14: + dict(name='kpt-14', id=14, color=[255, 0, 0], type='', swap='kpt-12'), + 15: + dict(name='kpt-15', id=15, color=[255, 0, 0], type='', swap='kpt-17'), + 16: dict(name='kpt-16', id=16, color=[255, 0, 0], type='', swap=''), + 17: + dict(name='kpt-17', id=17, color=[255, 0, 0], type='', swap='kpt-15'), + 18: dict(name='kpt-18', id=18, color=[255, 0, 0], type='', swap='') + }, + skeleton_info={}, + joint_weights=[1.] * 19, + sigmas=[]) diff --git a/configs/_base_/datasets/aic.py b/configs/_base_/datasets/aic.py index 9ecdbe3f0a..8d30f60600 100644 --- a/configs/_base_/datasets/aic.py +++ b/configs/_base_/datasets/aic.py @@ -1,140 +1,140 @@ -dataset_info = dict( - dataset_name='aic', - paper_info=dict( - author='Wu, Jiahong and Zheng, He and Zhao, Bo and ' - 'Li, Yixin and Yan, Baoming and Liang, Rui and ' - 'Wang, Wenjia and Zhou, Shipei and Lin, Guosen and ' - 'Fu, Yanwei and others', - title='Ai challenger: A large-scale dataset for going ' - 'deeper in image understanding', - container='arXiv', - year='2017', - homepage='https://github.com/AIChallenger/AI_Challenger_2017', - ), - keypoint_info={ - 0: - dict( - name='right_shoulder', - id=0, - color=[255, 128, 0], - type='upper', - swap='left_shoulder'), - 1: - dict( - name='right_elbow', - id=1, - color=[255, 128, 0], - type='upper', - swap='left_elbow'), - 2: - dict( - name='right_wrist', - id=2, - color=[255, 128, 0], - type='upper', - swap='left_wrist'), - 3: - dict( - name='left_shoulder', - id=3, - color=[0, 255, 0], - type='upper', - swap='right_shoulder'), - 4: - dict( - name='left_elbow', - id=4, - color=[0, 255, 0], - type='upper', - swap='right_elbow'), - 5: - dict( - name='left_wrist', - id=5, - color=[0, 255, 0], - type='upper', - swap='right_wrist'), - 6: - dict( - name='right_hip', - id=6, - color=[255, 128, 0], - type='lower', - swap='left_hip'), - 7: - dict( - name='right_knee', - id=7, - color=[255, 128, 0], - type='lower', - swap='left_knee'), - 8: - dict( - name='right_ankle', - id=8, - color=[255, 128, 0], - type='lower', - swap='left_ankle'), - 9: - dict( - name='left_hip', - id=9, - color=[0, 255, 0], - type='lower', - swap='right_hip'), - 10: - dict( - name='left_knee', - id=10, - color=[0, 255, 0], - type='lower', - swap='right_knee'), - 11: - dict( - name='left_ankle', - id=11, - color=[0, 255, 0], - type='lower', - swap='right_ankle'), - 12: - dict( - name='head_top', - id=12, - color=[51, 153, 255], - type='upper', - swap=''), - 13: - dict(name='neck', id=13, color=[51, 153, 255], type='upper', swap='') - }, - skeleton_info={ - 0: - dict(link=('right_wrist', 'right_elbow'), id=0, color=[255, 128, 0]), - 1: dict( - link=('right_elbow', 'right_shoulder'), id=1, color=[255, 128, 0]), - 2: dict(link=('right_shoulder', 'neck'), id=2, color=[51, 153, 255]), - 3: dict(link=('neck', 'left_shoulder'), id=3, color=[51, 153, 255]), - 4: dict(link=('left_shoulder', 'left_elbow'), id=4, color=[0, 255, 0]), - 5: dict(link=('left_elbow', 'left_wrist'), id=5, color=[0, 255, 0]), - 6: dict(link=('right_ankle', 'right_knee'), id=6, color=[255, 128, 0]), - 7: dict(link=('right_knee', 'right_hip'), id=7, color=[255, 128, 0]), - 8: dict(link=('right_hip', 'left_hip'), id=8, color=[51, 153, 255]), - 9: dict(link=('left_hip', 'left_knee'), id=9, color=[0, 255, 0]), - 10: dict(link=('left_knee', 'left_ankle'), id=10, color=[0, 255, 0]), - 11: dict(link=('head_top', 'neck'), id=11, color=[51, 153, 255]), - 12: dict( - link=('right_shoulder', 'right_hip'), id=12, color=[51, 153, 255]), - 13: - dict(link=('left_shoulder', 'left_hip'), id=13, color=[51, 153, 255]) - }, - joint_weights=[ - 1., 1.2, 1.5, 1., 1.2, 1.5, 1., 1.2, 1.5, 1., 1.2, 1.5, 1., 1. - ], - - # 'https://github.com/AIChallenger/AI_Challenger_2017/blob/master/' - # 'Evaluation/keypoint_eval/keypoint_eval.py#L50' - # delta = 2 x sigma - sigmas=[ - 0.01388152, 0.01515228, 0.01057665, 0.01417709, 0.01497891, 0.01402144, - 0.03909642, 0.03686941, 0.01981803, 0.03843971, 0.03412318, 0.02415081, - 0.01291456, 0.01236173 - ]) +dataset_info = dict( + dataset_name='aic', + paper_info=dict( + author='Wu, Jiahong and Zheng, He and Zhao, Bo and ' + 'Li, Yixin and Yan, Baoming and Liang, Rui and ' + 'Wang, Wenjia and Zhou, Shipei and Lin, Guosen and ' + 'Fu, Yanwei and others', + title='Ai challenger: A large-scale dataset for going ' + 'deeper in image understanding', + container='arXiv', + year='2017', + homepage='https://github.com/AIChallenger/AI_Challenger_2017', + ), + keypoint_info={ + 0: + dict( + name='right_shoulder', + id=0, + color=[255, 128, 0], + type='upper', + swap='left_shoulder'), + 1: + dict( + name='right_elbow', + id=1, + color=[255, 128, 0], + type='upper', + swap='left_elbow'), + 2: + dict( + name='right_wrist', + id=2, + color=[255, 128, 0], + type='upper', + swap='left_wrist'), + 3: + dict( + name='left_shoulder', + id=3, + color=[0, 255, 0], + type='upper', + swap='right_shoulder'), + 4: + dict( + name='left_elbow', + id=4, + color=[0, 255, 0], + type='upper', + swap='right_elbow'), + 5: + dict( + name='left_wrist', + id=5, + color=[0, 255, 0], + type='upper', + swap='right_wrist'), + 6: + dict( + name='right_hip', + id=6, + color=[255, 128, 0], + type='lower', + swap='left_hip'), + 7: + dict( + name='right_knee', + id=7, + color=[255, 128, 0], + type='lower', + swap='left_knee'), + 8: + dict( + name='right_ankle', + id=8, + color=[255, 128, 0], + type='lower', + swap='left_ankle'), + 9: + dict( + name='left_hip', + id=9, + color=[0, 255, 0], + type='lower', + swap='right_hip'), + 10: + dict( + name='left_knee', + id=10, + color=[0, 255, 0], + type='lower', + swap='right_knee'), + 11: + dict( + name='left_ankle', + id=11, + color=[0, 255, 0], + type='lower', + swap='right_ankle'), + 12: + dict( + name='head_top', + id=12, + color=[51, 153, 255], + type='upper', + swap=''), + 13: + dict(name='neck', id=13, color=[51, 153, 255], type='upper', swap='') + }, + skeleton_info={ + 0: + dict(link=('right_wrist', 'right_elbow'), id=0, color=[255, 128, 0]), + 1: dict( + link=('right_elbow', 'right_shoulder'), id=1, color=[255, 128, 0]), + 2: dict(link=('right_shoulder', 'neck'), id=2, color=[51, 153, 255]), + 3: dict(link=('neck', 'left_shoulder'), id=3, color=[51, 153, 255]), + 4: dict(link=('left_shoulder', 'left_elbow'), id=4, color=[0, 255, 0]), + 5: dict(link=('left_elbow', 'left_wrist'), id=5, color=[0, 255, 0]), + 6: dict(link=('right_ankle', 'right_knee'), id=6, color=[255, 128, 0]), + 7: dict(link=('right_knee', 'right_hip'), id=7, color=[255, 128, 0]), + 8: dict(link=('right_hip', 'left_hip'), id=8, color=[51, 153, 255]), + 9: dict(link=('left_hip', 'left_knee'), id=9, color=[0, 255, 0]), + 10: dict(link=('left_knee', 'left_ankle'), id=10, color=[0, 255, 0]), + 11: dict(link=('head_top', 'neck'), id=11, color=[51, 153, 255]), + 12: dict( + link=('right_shoulder', 'right_hip'), id=12, color=[51, 153, 255]), + 13: + dict(link=('left_shoulder', 'left_hip'), id=13, color=[51, 153, 255]) + }, + joint_weights=[ + 1., 1.2, 1.5, 1., 1.2, 1.5, 1., 1.2, 1.5, 1., 1.2, 1.5, 1., 1. + ], + + # 'https://github.com/AIChallenger/AI_Challenger_2017/blob/master/' + # 'Evaluation/keypoint_eval/keypoint_eval.py#L50' + # delta = 2 x sigma + sigmas=[ + 0.01388152, 0.01515228, 0.01057665, 0.01417709, 0.01497891, 0.01402144, + 0.03909642, 0.03686941, 0.01981803, 0.03843971, 0.03412318, 0.02415081, + 0.01291456, 0.01236173 + ]) diff --git a/configs/_base_/datasets/ak.py b/configs/_base_/datasets/ak.py index e8b12f5a31..ddc9e27064 100644 --- a/configs/_base_/datasets/ak.py +++ b/configs/_base_/datasets/ak.py @@ -1,267 +1,267 @@ -dataset_info = dict( - dataset_name='Animal Kingdom', - paper_info=dict( - author='Singapore University of Technology and Design, Singapore.' - ' Xun Long Ng, Kian Eng Ong, Qichen Zheng,' - ' Yun Ni, Si Yong Yeo, Jun Liu.', - title='Animal Kingdom: ' - 'A Large and Diverse Dataset for Animal Behavior Understanding', - container='Conference on Computer Vision ' - 'and Pattern Recognition (CVPR)', - year='2022', - homepage='https://sutdcv.github.io/Animal-Kingdom', - version='1.0 (2022-06)', - date_created='2022-06', - ), - keypoint_info={ - 0: - dict( - name='Head_Mid_Top', - id=0, - color=(225, 0, 255), - type='upper', - swap=''), - 1: - dict( - name='Eye_Left', - id=1, - color=[220, 20, 60], - type='upper', - swap='Eye_Right'), - 2: - dict( - name='Eye_Right', - id=2, - color=[0, 255, 255], - type='upper', - swap='Eye_Left'), - 3: - dict( - name='Mouth_Front_Top', - id=3, - color=(0, 255, 42), - type='upper', - swap=''), - 4: - dict( - name='Mouth_Back_Left', - id=4, - color=[221, 160, 221], - type='upper', - swap='Mouth_Back_Right'), - 5: - dict( - name='Mouth_Back_Right', - id=5, - color=[135, 206, 250], - type='upper', - swap='Mouth_Back_Left'), - 6: - dict( - name='Mouth_Front_Bottom', - id=6, - color=[50, 205, 50], - type='upper', - swap=''), - 7: - dict( - name='Shoulder_Left', - id=7, - color=[255, 182, 193], - type='upper', - swap='Shoulder_Right'), - 8: - dict( - name='Shoulder_Right', - id=8, - color=[0, 191, 255], - type='upper', - swap='Shoulder_Left'), - 9: - dict( - name='Elbow_Left', - id=9, - color=[255, 105, 180], - type='upper', - swap='Elbow_Right'), - 10: - dict( - name='Elbow_Right', - id=10, - color=[30, 144, 255], - type='upper', - swap='Elbow_Left'), - 11: - dict( - name='Wrist_Left', - id=11, - color=[255, 20, 147], - type='upper', - swap='Wrist_Right'), - 12: - dict( - name='Wrist_Right', - id=12, - color=[0, 0, 255], - type='upper', - swap='Wrist_Left'), - 13: - dict( - name='Torso_Mid_Back', - id=13, - color=(185, 3, 221), - type='upper', - swap=''), - 14: - dict( - name='Hip_Left', - id=14, - color=[255, 215, 0], - type='lower', - swap='Hip_Right'), - 15: - dict( - name='Hip_Right', - id=15, - color=[147, 112, 219], - type='lower', - swap='Hip_Left'), - 16: - dict( - name='Knee_Left', - id=16, - color=[255, 165, 0], - type='lower', - swap='Knee_Right'), - 17: - dict( - name='Knee_Right', - id=17, - color=[138, 43, 226], - type='lower', - swap='Knee_Left'), - 18: - dict( - name='Ankle_Left', - id=18, - color=[255, 140, 0], - type='lower', - swap='Ankle_Right'), - 19: - dict( - name='Ankle_Right', - id=19, - color=[128, 0, 128], - type='lower', - swap='Ankle_Left'), - 20: - dict( - name='Tail_Top_Back', - id=20, - color=(0, 251, 255), - type='lower', - swap=''), - 21: - dict( - name='Tail_Mid_Back', - id=21, - color=[32, 178, 170], - type='lower', - swap=''), - 22: - dict( - name='Tail_End_Back', - id=22, - color=(0, 102, 102), - type='lower', - swap='') - }, - skeleton_info={ - 0: - dict(link=('Eye_Left', 'Head_Mid_Top'), id=0, color=[220, 20, 60]), - 1: - dict(link=('Eye_Right', 'Head_Mid_Top'), id=1, color=[0, 255, 255]), - 2: - dict( - link=('Mouth_Front_Top', 'Mouth_Back_Left'), - id=2, - color=[221, 160, 221]), - 3: - dict( - link=('Mouth_Front_Top', 'Mouth_Back_Right'), - id=3, - color=[135, 206, 250]), - 4: - dict( - link=('Mouth_Front_Bottom', 'Mouth_Back_Left'), - id=4, - color=[221, 160, 221]), - 5: - dict( - link=('Mouth_Front_Bottom', 'Mouth_Back_Right'), - id=5, - color=[135, 206, 250]), - 6: - dict( - link=('Head_Mid_Top', 'Torso_Mid_Back'), id=6, - color=(225, 0, 255)), - 7: - dict( - link=('Torso_Mid_Back', 'Tail_Top_Back'), - id=7, - color=(185, 3, 221)), - 8: - dict( - link=('Tail_Top_Back', 'Tail_Mid_Back'), id=8, - color=(0, 251, 255)), - 9: - dict( - link=('Tail_Mid_Back', 'Tail_End_Back'), - id=9, - color=[32, 178, 170]), - 10: - dict( - link=('Head_Mid_Top', 'Shoulder_Left'), - id=10, - color=[255, 182, 193]), - 11: - dict( - link=('Head_Mid_Top', 'Shoulder_Right'), - id=11, - color=[0, 191, 255]), - 12: - dict( - link=('Shoulder_Left', 'Elbow_Left'), id=12, color=[255, 105, - 180]), - 13: - dict( - link=('Shoulder_Right', 'Elbow_Right'), - id=13, - color=[30, 144, 255]), - 14: - dict(link=('Elbow_Left', 'Wrist_Left'), id=14, color=[255, 20, 147]), - 15: - dict(link=('Elbow_Right', 'Wrist_Right'), id=15, color=[0, 0, 255]), - 16: - dict(link=('Tail_Top_Back', 'Hip_Left'), id=16, color=[255, 215, 0]), - 17: - dict( - link=('Tail_Top_Back', 'Hip_Right'), id=17, color=[147, 112, 219]), - 18: - dict(link=('Hip_Left', 'Knee_Left'), id=18, color=[255, 165, 0]), - 19: - dict(link=('Hip_Right', 'Knee_Right'), id=19, color=[138, 43, 226]), - 20: - dict(link=('Knee_Left', 'Ankle_Left'), id=20, color=[255, 140, 0]), - 21: - dict(link=('Knee_Right', 'Ankle_Right'), id=21, color=[128, 0, 128]) - }, - joint_weights=[ - 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., - 1., 1., 1., 1., 1. - ], - sigmas=[ - 0.025, 0.025, 0.025, 0.025, 0.025, 0.025, 0.025, 0.025, 0.025, 0.025, - 0.025, 0.025, 0.025, 0.025, 0.025, 0.025, 0.025, 0.025, 0.025, 0.025, - 0.025, 0.025, 0.025 - ]) +dataset_info = dict( + dataset_name='Animal Kingdom', + paper_info=dict( + author='Singapore University of Technology and Design, Singapore.' + ' Xun Long Ng, Kian Eng Ong, Qichen Zheng,' + ' Yun Ni, Si Yong Yeo, Jun Liu.', + title='Animal Kingdom: ' + 'A Large and Diverse Dataset for Animal Behavior Understanding', + container='Conference on Computer Vision ' + 'and Pattern Recognition (CVPR)', + year='2022', + homepage='https://sutdcv.github.io/Animal-Kingdom', + version='1.0 (2022-06)', + date_created='2022-06', + ), + keypoint_info={ + 0: + dict( + name='Head_Mid_Top', + id=0, + color=(225, 0, 255), + type='upper', + swap=''), + 1: + dict( + name='Eye_Left', + id=1, + color=[220, 20, 60], + type='upper', + swap='Eye_Right'), + 2: + dict( + name='Eye_Right', + id=2, + color=[0, 255, 255], + type='upper', + swap='Eye_Left'), + 3: + dict( + name='Mouth_Front_Top', + id=3, + color=(0, 255, 42), + type='upper', + swap=''), + 4: + dict( + name='Mouth_Back_Left', + id=4, + color=[221, 160, 221], + type='upper', + swap='Mouth_Back_Right'), + 5: + dict( + name='Mouth_Back_Right', + id=5, + color=[135, 206, 250], + type='upper', + swap='Mouth_Back_Left'), + 6: + dict( + name='Mouth_Front_Bottom', + id=6, + color=[50, 205, 50], + type='upper', + swap=''), + 7: + dict( + name='Shoulder_Left', + id=7, + color=[255, 182, 193], + type='upper', + swap='Shoulder_Right'), + 8: + dict( + name='Shoulder_Right', + id=8, + color=[0, 191, 255], + type='upper', + swap='Shoulder_Left'), + 9: + dict( + name='Elbow_Left', + id=9, + color=[255, 105, 180], + type='upper', + swap='Elbow_Right'), + 10: + dict( + name='Elbow_Right', + id=10, + color=[30, 144, 255], + type='upper', + swap='Elbow_Left'), + 11: + dict( + name='Wrist_Left', + id=11, + color=[255, 20, 147], + type='upper', + swap='Wrist_Right'), + 12: + dict( + name='Wrist_Right', + id=12, + color=[0, 0, 255], + type='upper', + swap='Wrist_Left'), + 13: + dict( + name='Torso_Mid_Back', + id=13, + color=(185, 3, 221), + type='upper', + swap=''), + 14: + dict( + name='Hip_Left', + id=14, + color=[255, 215, 0], + type='lower', + swap='Hip_Right'), + 15: + dict( + name='Hip_Right', + id=15, + color=[147, 112, 219], + type='lower', + swap='Hip_Left'), + 16: + dict( + name='Knee_Left', + id=16, + color=[255, 165, 0], + type='lower', + swap='Knee_Right'), + 17: + dict( + name='Knee_Right', + id=17, + color=[138, 43, 226], + type='lower', + swap='Knee_Left'), + 18: + dict( + name='Ankle_Left', + id=18, + color=[255, 140, 0], + type='lower', + swap='Ankle_Right'), + 19: + dict( + name='Ankle_Right', + id=19, + color=[128, 0, 128], + type='lower', + swap='Ankle_Left'), + 20: + dict( + name='Tail_Top_Back', + id=20, + color=(0, 251, 255), + type='lower', + swap=''), + 21: + dict( + name='Tail_Mid_Back', + id=21, + color=[32, 178, 170], + type='lower', + swap=''), + 22: + dict( + name='Tail_End_Back', + id=22, + color=(0, 102, 102), + type='lower', + swap='') + }, + skeleton_info={ + 0: + dict(link=('Eye_Left', 'Head_Mid_Top'), id=0, color=[220, 20, 60]), + 1: + dict(link=('Eye_Right', 'Head_Mid_Top'), id=1, color=[0, 255, 255]), + 2: + dict( + link=('Mouth_Front_Top', 'Mouth_Back_Left'), + id=2, + color=[221, 160, 221]), + 3: + dict( + link=('Mouth_Front_Top', 'Mouth_Back_Right'), + id=3, + color=[135, 206, 250]), + 4: + dict( + link=('Mouth_Front_Bottom', 'Mouth_Back_Left'), + id=4, + color=[221, 160, 221]), + 5: + dict( + link=('Mouth_Front_Bottom', 'Mouth_Back_Right'), + id=5, + color=[135, 206, 250]), + 6: + dict( + link=('Head_Mid_Top', 'Torso_Mid_Back'), id=6, + color=(225, 0, 255)), + 7: + dict( + link=('Torso_Mid_Back', 'Tail_Top_Back'), + id=7, + color=(185, 3, 221)), + 8: + dict( + link=('Tail_Top_Back', 'Tail_Mid_Back'), id=8, + color=(0, 251, 255)), + 9: + dict( + link=('Tail_Mid_Back', 'Tail_End_Back'), + id=9, + color=[32, 178, 170]), + 10: + dict( + link=('Head_Mid_Top', 'Shoulder_Left'), + id=10, + color=[255, 182, 193]), + 11: + dict( + link=('Head_Mid_Top', 'Shoulder_Right'), + id=11, + color=[0, 191, 255]), + 12: + dict( + link=('Shoulder_Left', 'Elbow_Left'), id=12, color=[255, 105, + 180]), + 13: + dict( + link=('Shoulder_Right', 'Elbow_Right'), + id=13, + color=[30, 144, 255]), + 14: + dict(link=('Elbow_Left', 'Wrist_Left'), id=14, color=[255, 20, 147]), + 15: + dict(link=('Elbow_Right', 'Wrist_Right'), id=15, color=[0, 0, 255]), + 16: + dict(link=('Tail_Top_Back', 'Hip_Left'), id=16, color=[255, 215, 0]), + 17: + dict( + link=('Tail_Top_Back', 'Hip_Right'), id=17, color=[147, 112, 219]), + 18: + dict(link=('Hip_Left', 'Knee_Left'), id=18, color=[255, 165, 0]), + 19: + dict(link=('Hip_Right', 'Knee_Right'), id=19, color=[138, 43, 226]), + 20: + dict(link=('Knee_Left', 'Ankle_Left'), id=20, color=[255, 140, 0]), + 21: + dict(link=('Knee_Right', 'Ankle_Right'), id=21, color=[128, 0, 128]) + }, + joint_weights=[ + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1. + ], + sigmas=[ + 0.025, 0.025, 0.025, 0.025, 0.025, 0.025, 0.025, 0.025, 0.025, 0.025, + 0.025, 0.025, 0.025, 0.025, 0.025, 0.025, 0.025, 0.025, 0.025, 0.025, + 0.025, 0.025, 0.025 + ]) diff --git a/configs/_base_/datasets/animalpose.py b/configs/_base_/datasets/animalpose.py index d5bb62d951..7f614f75a6 100644 --- a/configs/_base_/datasets/animalpose.py +++ b/configs/_base_/datasets/animalpose.py @@ -1,166 +1,166 @@ -dataset_info = dict( - dataset_name='animalpose', - paper_info=dict( - author='Cao, Jinkun and Tang, Hongyang and Fang, Hao-Shu and ' - 'Shen, Xiaoyong and Lu, Cewu and Tai, Yu-Wing', - title='Cross-Domain Adaptation for Animal Pose Estimation', - container='The IEEE International Conference on ' - 'Computer Vision (ICCV)', - year='2019', - homepage='https://sites.google.com/view/animal-pose/', - ), - keypoint_info={ - 0: - dict( - name='L_Eye', id=0, color=[0, 255, 0], type='upper', swap='R_Eye'), - 1: - dict( - name='R_Eye', - id=1, - color=[255, 128, 0], - type='upper', - swap='L_Eye'), - 2: - dict( - name='L_EarBase', - id=2, - color=[0, 255, 0], - type='upper', - swap='R_EarBase'), - 3: - dict( - name='R_EarBase', - id=3, - color=[255, 128, 0], - type='upper', - swap='L_EarBase'), - 4: - dict(name='Nose', id=4, color=[51, 153, 255], type='upper', swap=''), - 5: - dict(name='Throat', id=5, color=[51, 153, 255], type='upper', swap=''), - 6: - dict( - name='TailBase', id=6, color=[51, 153, 255], type='lower', - swap=''), - 7: - dict( - name='Withers', id=7, color=[51, 153, 255], type='upper', swap=''), - 8: - dict( - name='L_F_Elbow', - id=8, - color=[0, 255, 0], - type='upper', - swap='R_F_Elbow'), - 9: - dict( - name='R_F_Elbow', - id=9, - color=[255, 128, 0], - type='upper', - swap='L_F_Elbow'), - 10: - dict( - name='L_B_Elbow', - id=10, - color=[0, 255, 0], - type='lower', - swap='R_B_Elbow'), - 11: - dict( - name='R_B_Elbow', - id=11, - color=[255, 128, 0], - type='lower', - swap='L_B_Elbow'), - 12: - dict( - name='L_F_Knee', - id=12, - color=[0, 255, 0], - type='upper', - swap='R_F_Knee'), - 13: - dict( - name='R_F_Knee', - id=13, - color=[255, 128, 0], - type='upper', - swap='L_F_Knee'), - 14: - dict( - name='L_B_Knee', - id=14, - color=[0, 255, 0], - type='lower', - swap='R_B_Knee'), - 15: - dict( - name='R_B_Knee', - id=15, - color=[255, 128, 0], - type='lower', - swap='L_B_Knee'), - 16: - dict( - name='L_F_Paw', - id=16, - color=[0, 255, 0], - type='upper', - swap='R_F_Paw'), - 17: - dict( - name='R_F_Paw', - id=17, - color=[255, 128, 0], - type='upper', - swap='L_F_Paw'), - 18: - dict( - name='L_B_Paw', - id=18, - color=[0, 255, 0], - type='lower', - swap='R_B_Paw'), - 19: - dict( - name='R_B_Paw', - id=19, - color=[255, 128, 0], - type='lower', - swap='L_B_Paw') - }, - skeleton_info={ - 0: dict(link=('L_Eye', 'R_Eye'), id=0, color=[51, 153, 255]), - 1: dict(link=('L_Eye', 'L_EarBase'), id=1, color=[0, 255, 0]), - 2: dict(link=('R_Eye', 'R_EarBase'), id=2, color=[255, 128, 0]), - 3: dict(link=('L_Eye', 'Nose'), id=3, color=[0, 255, 0]), - 4: dict(link=('R_Eye', 'Nose'), id=4, color=[255, 128, 0]), - 5: dict(link=('Nose', 'Throat'), id=5, color=[51, 153, 255]), - 6: dict(link=('Throat', 'Withers'), id=6, color=[51, 153, 255]), - 7: dict(link=('TailBase', 'Withers'), id=7, color=[51, 153, 255]), - 8: dict(link=('Throat', 'L_F_Elbow'), id=8, color=[0, 255, 0]), - 9: dict(link=('L_F_Elbow', 'L_F_Knee'), id=9, color=[0, 255, 0]), - 10: dict(link=('L_F_Knee', 'L_F_Paw'), id=10, color=[0, 255, 0]), - 11: dict(link=('Throat', 'R_F_Elbow'), id=11, color=[255, 128, 0]), - 12: dict(link=('R_F_Elbow', 'R_F_Knee'), id=12, color=[255, 128, 0]), - 13: dict(link=('R_F_Knee', 'R_F_Paw'), id=13, color=[255, 128, 0]), - 14: dict(link=('TailBase', 'L_B_Elbow'), id=14, color=[0, 255, 0]), - 15: dict(link=('L_B_Elbow', 'L_B_Knee'), id=15, color=[0, 255, 0]), - 16: dict(link=('L_B_Knee', 'L_B_Paw'), id=16, color=[0, 255, 0]), - 17: dict(link=('TailBase', 'R_B_Elbow'), id=17, color=[255, 128, 0]), - 18: dict(link=('R_B_Elbow', 'R_B_Knee'), id=18, color=[255, 128, 0]), - 19: dict(link=('R_B_Knee', 'R_B_Paw'), id=19, color=[255, 128, 0]) - }, - joint_weights=[ - 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.2, 1.2, 1.2, 1.2, - 1.5, 1.5, 1.5, 1.5 - ], - - # Note: The original paper did not provide enough information about - # the sigmas. We modified from 'https://github.com/cocodataset/' - # 'cocoapi/blob/master/PythonAPI/pycocotools/cocoeval.py#L523' - sigmas=[ - 0.025, 0.025, 0.026, 0.035, 0.035, 0.10, 0.10, 0.10, 0.107, 0.107, - 0.107, 0.107, 0.087, 0.087, 0.087, 0.087, 0.089, 0.089, 0.089, 0.089 - ]) +dataset_info = dict( + dataset_name='animalpose', + paper_info=dict( + author='Cao, Jinkun and Tang, Hongyang and Fang, Hao-Shu and ' + 'Shen, Xiaoyong and Lu, Cewu and Tai, Yu-Wing', + title='Cross-Domain Adaptation for Animal Pose Estimation', + container='The IEEE International Conference on ' + 'Computer Vision (ICCV)', + year='2019', + homepage='https://sites.google.com/view/animal-pose/', + ), + keypoint_info={ + 0: + dict( + name='L_Eye', id=0, color=[0, 255, 0], type='upper', swap='R_Eye'), + 1: + dict( + name='R_Eye', + id=1, + color=[255, 128, 0], + type='upper', + swap='L_Eye'), + 2: + dict( + name='L_EarBase', + id=2, + color=[0, 255, 0], + type='upper', + swap='R_EarBase'), + 3: + dict( + name='R_EarBase', + id=3, + color=[255, 128, 0], + type='upper', + swap='L_EarBase'), + 4: + dict(name='Nose', id=4, color=[51, 153, 255], type='upper', swap=''), + 5: + dict(name='Throat', id=5, color=[51, 153, 255], type='upper', swap=''), + 6: + dict( + name='TailBase', id=6, color=[51, 153, 255], type='lower', + swap=''), + 7: + dict( + name='Withers', id=7, color=[51, 153, 255], type='upper', swap=''), + 8: + dict( + name='L_F_Elbow', + id=8, + color=[0, 255, 0], + type='upper', + swap='R_F_Elbow'), + 9: + dict( + name='R_F_Elbow', + id=9, + color=[255, 128, 0], + type='upper', + swap='L_F_Elbow'), + 10: + dict( + name='L_B_Elbow', + id=10, + color=[0, 255, 0], + type='lower', + swap='R_B_Elbow'), + 11: + dict( + name='R_B_Elbow', + id=11, + color=[255, 128, 0], + type='lower', + swap='L_B_Elbow'), + 12: + dict( + name='L_F_Knee', + id=12, + color=[0, 255, 0], + type='upper', + swap='R_F_Knee'), + 13: + dict( + name='R_F_Knee', + id=13, + color=[255, 128, 0], + type='upper', + swap='L_F_Knee'), + 14: + dict( + name='L_B_Knee', + id=14, + color=[0, 255, 0], + type='lower', + swap='R_B_Knee'), + 15: + dict( + name='R_B_Knee', + id=15, + color=[255, 128, 0], + type='lower', + swap='L_B_Knee'), + 16: + dict( + name='L_F_Paw', + id=16, + color=[0, 255, 0], + type='upper', + swap='R_F_Paw'), + 17: + dict( + name='R_F_Paw', + id=17, + color=[255, 128, 0], + type='upper', + swap='L_F_Paw'), + 18: + dict( + name='L_B_Paw', + id=18, + color=[0, 255, 0], + type='lower', + swap='R_B_Paw'), + 19: + dict( + name='R_B_Paw', + id=19, + color=[255, 128, 0], + type='lower', + swap='L_B_Paw') + }, + skeleton_info={ + 0: dict(link=('L_Eye', 'R_Eye'), id=0, color=[51, 153, 255]), + 1: dict(link=('L_Eye', 'L_EarBase'), id=1, color=[0, 255, 0]), + 2: dict(link=('R_Eye', 'R_EarBase'), id=2, color=[255, 128, 0]), + 3: dict(link=('L_Eye', 'Nose'), id=3, color=[0, 255, 0]), + 4: dict(link=('R_Eye', 'Nose'), id=4, color=[255, 128, 0]), + 5: dict(link=('Nose', 'Throat'), id=5, color=[51, 153, 255]), + 6: dict(link=('Throat', 'Withers'), id=6, color=[51, 153, 255]), + 7: dict(link=('TailBase', 'Withers'), id=7, color=[51, 153, 255]), + 8: dict(link=('Throat', 'L_F_Elbow'), id=8, color=[0, 255, 0]), + 9: dict(link=('L_F_Elbow', 'L_F_Knee'), id=9, color=[0, 255, 0]), + 10: dict(link=('L_F_Knee', 'L_F_Paw'), id=10, color=[0, 255, 0]), + 11: dict(link=('Throat', 'R_F_Elbow'), id=11, color=[255, 128, 0]), + 12: dict(link=('R_F_Elbow', 'R_F_Knee'), id=12, color=[255, 128, 0]), + 13: dict(link=('R_F_Knee', 'R_F_Paw'), id=13, color=[255, 128, 0]), + 14: dict(link=('TailBase', 'L_B_Elbow'), id=14, color=[0, 255, 0]), + 15: dict(link=('L_B_Elbow', 'L_B_Knee'), id=15, color=[0, 255, 0]), + 16: dict(link=('L_B_Knee', 'L_B_Paw'), id=16, color=[0, 255, 0]), + 17: dict(link=('TailBase', 'R_B_Elbow'), id=17, color=[255, 128, 0]), + 18: dict(link=('R_B_Elbow', 'R_B_Knee'), id=18, color=[255, 128, 0]), + 19: dict(link=('R_B_Knee', 'R_B_Paw'), id=19, color=[255, 128, 0]) + }, + joint_weights=[ + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.2, 1.2, 1.2, 1.2, + 1.5, 1.5, 1.5, 1.5 + ], + + # Note: The original paper did not provide enough information about + # the sigmas. We modified from 'https://github.com/cocodataset/' + # 'cocoapi/blob/master/PythonAPI/pycocotools/cocoeval.py#L523' + sigmas=[ + 0.025, 0.025, 0.026, 0.035, 0.035, 0.10, 0.10, 0.10, 0.107, 0.107, + 0.107, 0.107, 0.087, 0.087, 0.087, 0.087, 0.089, 0.089, 0.089, 0.089 + ]) diff --git a/configs/_base_/datasets/ap10k.py b/configs/_base_/datasets/ap10k.py index c0df579acb..aecc173c26 100644 --- a/configs/_base_/datasets/ap10k.py +++ b/configs/_base_/datasets/ap10k.py @@ -1,142 +1,142 @@ -dataset_info = dict( - dataset_name='ap10k', - paper_info=dict( - author='Yu, Hang and Xu, Yufei and Zhang, Jing and ' - 'Zhao, Wei and Guan, Ziyu and Tao, Dacheng', - title='AP-10K: A Benchmark for Animal Pose Estimation in the Wild', - container='35th Conference on Neural Information Processing Systems ' - '(NeurIPS 2021) Track on Datasets and Bench-marks.', - year='2021', - homepage='https://github.com/AlexTheBad/AP-10K', - ), - keypoint_info={ - 0: - dict( - name='L_Eye', id=0, color=[0, 255, 0], type='upper', swap='R_Eye'), - 1: - dict( - name='R_Eye', - id=1, - color=[255, 128, 0], - type='upper', - swap='L_Eye'), - 2: - dict(name='Nose', id=2, color=[51, 153, 255], type='upper', swap=''), - 3: - dict(name='Neck', id=3, color=[51, 153, 255], type='upper', swap=''), - 4: - dict( - name='Root of tail', - id=4, - color=[51, 153, 255], - type='lower', - swap=''), - 5: - dict( - name='L_Shoulder', - id=5, - color=[51, 153, 255], - type='upper', - swap='R_Shoulder'), - 6: - dict( - name='L_Elbow', - id=6, - color=[51, 153, 255], - type='upper', - swap='R_Elbow'), - 7: - dict( - name='L_F_Paw', - id=7, - color=[0, 255, 0], - type='upper', - swap='R_F_Paw'), - 8: - dict( - name='R_Shoulder', - id=8, - color=[0, 255, 0], - type='upper', - swap='L_Shoulder'), - 9: - dict( - name='R_Elbow', - id=9, - color=[255, 128, 0], - type='upper', - swap='L_Elbow'), - 10: - dict( - name='R_F_Paw', - id=10, - color=[0, 255, 0], - type='lower', - swap='L_F_Paw'), - 11: - dict( - name='L_Hip', - id=11, - color=[255, 128, 0], - type='lower', - swap='R_Hip'), - 12: - dict( - name='L_Knee', - id=12, - color=[255, 128, 0], - type='lower', - swap='R_Knee'), - 13: - dict( - name='L_B_Paw', - id=13, - color=[0, 255, 0], - type='lower', - swap='R_B_Paw'), - 14: - dict( - name='R_Hip', id=14, color=[0, 255, 0], type='lower', - swap='L_Hip'), - 15: - dict( - name='R_Knee', - id=15, - color=[0, 255, 0], - type='lower', - swap='L_Knee'), - 16: - dict( - name='R_B_Paw', - id=16, - color=[0, 255, 0], - type='lower', - swap='L_B_Paw'), - }, - skeleton_info={ - 0: dict(link=('L_Eye', 'R_Eye'), id=0, color=[0, 0, 255]), - 1: dict(link=('L_Eye', 'Nose'), id=1, color=[0, 0, 255]), - 2: dict(link=('R_Eye', 'Nose'), id=2, color=[0, 0, 255]), - 3: dict(link=('Nose', 'Neck'), id=3, color=[0, 255, 0]), - 4: dict(link=('Neck', 'Root of tail'), id=4, color=[0, 255, 0]), - 5: dict(link=('Neck', 'L_Shoulder'), id=5, color=[0, 255, 255]), - 6: dict(link=('L_Shoulder', 'L_Elbow'), id=6, color=[0, 255, 255]), - 7: dict(link=('L_Elbow', 'L_F_Paw'), id=6, color=[0, 255, 255]), - 8: dict(link=('Neck', 'R_Shoulder'), id=7, color=[6, 156, 250]), - 9: dict(link=('R_Shoulder', 'R_Elbow'), id=8, color=[6, 156, 250]), - 10: dict(link=('R_Elbow', 'R_F_Paw'), id=9, color=[6, 156, 250]), - 11: dict(link=('Root of tail', 'L_Hip'), id=10, color=[0, 255, 255]), - 12: dict(link=('L_Hip', 'L_Knee'), id=11, color=[0, 255, 255]), - 13: dict(link=('L_Knee', 'L_B_Paw'), id=12, color=[0, 255, 255]), - 14: dict(link=('Root of tail', 'R_Hip'), id=13, color=[6, 156, 250]), - 15: dict(link=('R_Hip', 'R_Knee'), id=14, color=[6, 156, 250]), - 16: dict(link=('R_Knee', 'R_B_Paw'), id=15, color=[6, 156, 250]), - }, - joint_weights=[ - 1., 1., 1., 1., 1., 1., 1., 1.2, 1.2, 1.5, 1.5, 1., 1., 1.2, 1.2, 1.5, - 1.5 - ], - sigmas=[ - 0.025, 0.025, 0.026, 0.035, 0.035, 0.079, 0.072, 0.062, 0.079, 0.072, - 0.062, 0.107, 0.087, 0.089, 0.107, 0.087, 0.089 - ]) +dataset_info = dict( + dataset_name='ap10k', + paper_info=dict( + author='Yu, Hang and Xu, Yufei and Zhang, Jing and ' + 'Zhao, Wei and Guan, Ziyu and Tao, Dacheng', + title='AP-10K: A Benchmark for Animal Pose Estimation in the Wild', + container='35th Conference on Neural Information Processing Systems ' + '(NeurIPS 2021) Track on Datasets and Bench-marks.', + year='2021', + homepage='https://github.com/AlexTheBad/AP-10K', + ), + keypoint_info={ + 0: + dict( + name='L_Eye', id=0, color=[0, 255, 0], type='upper', swap='R_Eye'), + 1: + dict( + name='R_Eye', + id=1, + color=[255, 128, 0], + type='upper', + swap='L_Eye'), + 2: + dict(name='Nose', id=2, color=[51, 153, 255], type='upper', swap=''), + 3: + dict(name='Neck', id=3, color=[51, 153, 255], type='upper', swap=''), + 4: + dict( + name='Root of tail', + id=4, + color=[51, 153, 255], + type='lower', + swap=''), + 5: + dict( + name='L_Shoulder', + id=5, + color=[51, 153, 255], + type='upper', + swap='R_Shoulder'), + 6: + dict( + name='L_Elbow', + id=6, + color=[51, 153, 255], + type='upper', + swap='R_Elbow'), + 7: + dict( + name='L_F_Paw', + id=7, + color=[0, 255, 0], + type='upper', + swap='R_F_Paw'), + 8: + dict( + name='R_Shoulder', + id=8, + color=[0, 255, 0], + type='upper', + swap='L_Shoulder'), + 9: + dict( + name='R_Elbow', + id=9, + color=[255, 128, 0], + type='upper', + swap='L_Elbow'), + 10: + dict( + name='R_F_Paw', + id=10, + color=[0, 255, 0], + type='lower', + swap='L_F_Paw'), + 11: + dict( + name='L_Hip', + id=11, + color=[255, 128, 0], + type='lower', + swap='R_Hip'), + 12: + dict( + name='L_Knee', + id=12, + color=[255, 128, 0], + type='lower', + swap='R_Knee'), + 13: + dict( + name='L_B_Paw', + id=13, + color=[0, 255, 0], + type='lower', + swap='R_B_Paw'), + 14: + dict( + name='R_Hip', id=14, color=[0, 255, 0], type='lower', + swap='L_Hip'), + 15: + dict( + name='R_Knee', + id=15, + color=[0, 255, 0], + type='lower', + swap='L_Knee'), + 16: + dict( + name='R_B_Paw', + id=16, + color=[0, 255, 0], + type='lower', + swap='L_B_Paw'), + }, + skeleton_info={ + 0: dict(link=('L_Eye', 'R_Eye'), id=0, color=[0, 0, 255]), + 1: dict(link=('L_Eye', 'Nose'), id=1, color=[0, 0, 255]), + 2: dict(link=('R_Eye', 'Nose'), id=2, color=[0, 0, 255]), + 3: dict(link=('Nose', 'Neck'), id=3, color=[0, 255, 0]), + 4: dict(link=('Neck', 'Root of tail'), id=4, color=[0, 255, 0]), + 5: dict(link=('Neck', 'L_Shoulder'), id=5, color=[0, 255, 255]), + 6: dict(link=('L_Shoulder', 'L_Elbow'), id=6, color=[0, 255, 255]), + 7: dict(link=('L_Elbow', 'L_F_Paw'), id=6, color=[0, 255, 255]), + 8: dict(link=('Neck', 'R_Shoulder'), id=7, color=[6, 156, 250]), + 9: dict(link=('R_Shoulder', 'R_Elbow'), id=8, color=[6, 156, 250]), + 10: dict(link=('R_Elbow', 'R_F_Paw'), id=9, color=[6, 156, 250]), + 11: dict(link=('Root of tail', 'L_Hip'), id=10, color=[0, 255, 255]), + 12: dict(link=('L_Hip', 'L_Knee'), id=11, color=[0, 255, 255]), + 13: dict(link=('L_Knee', 'L_B_Paw'), id=12, color=[0, 255, 255]), + 14: dict(link=('Root of tail', 'R_Hip'), id=13, color=[6, 156, 250]), + 15: dict(link=('R_Hip', 'R_Knee'), id=14, color=[6, 156, 250]), + 16: dict(link=('R_Knee', 'R_B_Paw'), id=15, color=[6, 156, 250]), + }, + joint_weights=[ + 1., 1., 1., 1., 1., 1., 1., 1.2, 1.2, 1.5, 1.5, 1., 1., 1.2, 1.2, 1.5, + 1.5 + ], + sigmas=[ + 0.025, 0.025, 0.026, 0.035, 0.035, 0.079, 0.072, 0.062, 0.079, 0.072, + 0.062, 0.107, 0.087, 0.089, 0.107, 0.087, 0.089 + ]) diff --git a/configs/_base_/datasets/atrw.py b/configs/_base_/datasets/atrw.py index 7ec71c8c50..84d3fb370c 100644 --- a/configs/_base_/datasets/atrw.py +++ b/configs/_base_/datasets/atrw.py @@ -1,144 +1,144 @@ -dataset_info = dict( - dataset_name='atrw', - paper_info=dict( - author='Li, Shuyuan and Li, Jianguo and Tang, Hanlin ' - 'and Qian, Rui and Lin, Weiyao', - title='ATRW: A Benchmark for Amur Tiger ' - 'Re-identification in the Wild', - container='Proceedings of the 28th ACM ' - 'International Conference on Multimedia', - year='2020', - homepage='https://cvwc2019.github.io/challenge.html', - ), - keypoint_info={ - 0: - dict( - name='left_ear', - id=0, - color=[51, 153, 255], - type='upper', - swap='right_ear'), - 1: - dict( - name='right_ear', - id=1, - color=[51, 153, 255], - type='upper', - swap='left_ear'), - 2: - dict(name='nose', id=2, color=[51, 153, 255], type='upper', swap=''), - 3: - dict( - name='right_shoulder', - id=3, - color=[255, 128, 0], - type='upper', - swap='left_shoulder'), - 4: - dict( - name='right_front_paw', - id=4, - color=[255, 128, 0], - type='upper', - swap='left_front_paw'), - 5: - dict( - name='left_shoulder', - id=5, - color=[0, 255, 0], - type='upper', - swap='right_shoulder'), - 6: - dict( - name='left_front_paw', - id=6, - color=[0, 255, 0], - type='upper', - swap='right_front_paw'), - 7: - dict( - name='right_hip', - id=7, - color=[255, 128, 0], - type='lower', - swap='left_hip'), - 8: - dict( - name='right_knee', - id=8, - color=[255, 128, 0], - type='lower', - swap='left_knee'), - 9: - dict( - name='right_back_paw', - id=9, - color=[255, 128, 0], - type='lower', - swap='left_back_paw'), - 10: - dict( - name='left_hip', - id=10, - color=[0, 255, 0], - type='lower', - swap='right_hip'), - 11: - dict( - name='left_knee', - id=11, - color=[0, 255, 0], - type='lower', - swap='right_knee'), - 12: - dict( - name='left_back_paw', - id=12, - color=[0, 255, 0], - type='lower', - swap='right_back_paw'), - 13: - dict(name='tail', id=13, color=[51, 153, 255], type='lower', swap=''), - 14: - dict( - name='center', id=14, color=[51, 153, 255], type='lower', swap=''), - }, - skeleton_info={ - 0: - dict(link=('left_ear', 'nose'), id=0, color=[51, 153, 255]), - 1: - dict(link=('right_ear', 'nose'), id=1, color=[51, 153, 255]), - 2: - dict(link=('nose', 'center'), id=2, color=[51, 153, 255]), - 3: - dict( - link=('left_shoulder', 'left_front_paw'), id=3, color=[0, 255, 0]), - 4: - dict(link=('left_shoulder', 'center'), id=4, color=[0, 255, 0]), - 5: - dict( - link=('right_shoulder', 'right_front_paw'), - id=5, - color=[255, 128, 0]), - 6: - dict(link=('right_shoulder', 'center'), id=6, color=[255, 128, 0]), - 7: - dict(link=('tail', 'center'), id=7, color=[51, 153, 255]), - 8: - dict(link=('right_back_paw', 'right_knee'), id=8, color=[255, 128, 0]), - 9: - dict(link=('right_knee', 'right_hip'), id=9, color=[255, 128, 0]), - 10: - dict(link=('right_hip', 'tail'), id=10, color=[255, 128, 0]), - 11: - dict(link=('left_back_paw', 'left_knee'), id=11, color=[0, 255, 0]), - 12: - dict(link=('left_knee', 'left_hip'), id=12, color=[0, 255, 0]), - 13: - dict(link=('left_hip', 'tail'), id=13, color=[0, 255, 0]), - }, - joint_weights=[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.], - sigmas=[ - 0.0277, 0.0823, 0.0831, 0.0202, 0.0716, 0.0263, 0.0646, 0.0302, 0.0440, - 0.0316, 0.0333, 0.0547, 0.0263, 0.0683, 0.0539 - ]) +dataset_info = dict( + dataset_name='atrw', + paper_info=dict( + author='Li, Shuyuan and Li, Jianguo and Tang, Hanlin ' + 'and Qian, Rui and Lin, Weiyao', + title='ATRW: A Benchmark for Amur Tiger ' + 'Re-identification in the Wild', + container='Proceedings of the 28th ACM ' + 'International Conference on Multimedia', + year='2020', + homepage='https://cvwc2019.github.io/challenge.html', + ), + keypoint_info={ + 0: + dict( + name='left_ear', + id=0, + color=[51, 153, 255], + type='upper', + swap='right_ear'), + 1: + dict( + name='right_ear', + id=1, + color=[51, 153, 255], + type='upper', + swap='left_ear'), + 2: + dict(name='nose', id=2, color=[51, 153, 255], type='upper', swap=''), + 3: + dict( + name='right_shoulder', + id=3, + color=[255, 128, 0], + type='upper', + swap='left_shoulder'), + 4: + dict( + name='right_front_paw', + id=4, + color=[255, 128, 0], + type='upper', + swap='left_front_paw'), + 5: + dict( + name='left_shoulder', + id=5, + color=[0, 255, 0], + type='upper', + swap='right_shoulder'), + 6: + dict( + name='left_front_paw', + id=6, + color=[0, 255, 0], + type='upper', + swap='right_front_paw'), + 7: + dict( + name='right_hip', + id=7, + color=[255, 128, 0], + type='lower', + swap='left_hip'), + 8: + dict( + name='right_knee', + id=8, + color=[255, 128, 0], + type='lower', + swap='left_knee'), + 9: + dict( + name='right_back_paw', + id=9, + color=[255, 128, 0], + type='lower', + swap='left_back_paw'), + 10: + dict( + name='left_hip', + id=10, + color=[0, 255, 0], + type='lower', + swap='right_hip'), + 11: + dict( + name='left_knee', + id=11, + color=[0, 255, 0], + type='lower', + swap='right_knee'), + 12: + dict( + name='left_back_paw', + id=12, + color=[0, 255, 0], + type='lower', + swap='right_back_paw'), + 13: + dict(name='tail', id=13, color=[51, 153, 255], type='lower', swap=''), + 14: + dict( + name='center', id=14, color=[51, 153, 255], type='lower', swap=''), + }, + skeleton_info={ + 0: + dict(link=('left_ear', 'nose'), id=0, color=[51, 153, 255]), + 1: + dict(link=('right_ear', 'nose'), id=1, color=[51, 153, 255]), + 2: + dict(link=('nose', 'center'), id=2, color=[51, 153, 255]), + 3: + dict( + link=('left_shoulder', 'left_front_paw'), id=3, color=[0, 255, 0]), + 4: + dict(link=('left_shoulder', 'center'), id=4, color=[0, 255, 0]), + 5: + dict( + link=('right_shoulder', 'right_front_paw'), + id=5, + color=[255, 128, 0]), + 6: + dict(link=('right_shoulder', 'center'), id=6, color=[255, 128, 0]), + 7: + dict(link=('tail', 'center'), id=7, color=[51, 153, 255]), + 8: + dict(link=('right_back_paw', 'right_knee'), id=8, color=[255, 128, 0]), + 9: + dict(link=('right_knee', 'right_hip'), id=9, color=[255, 128, 0]), + 10: + dict(link=('right_hip', 'tail'), id=10, color=[255, 128, 0]), + 11: + dict(link=('left_back_paw', 'left_knee'), id=11, color=[0, 255, 0]), + 12: + dict(link=('left_knee', 'left_hip'), id=12, color=[0, 255, 0]), + 13: + dict(link=('left_hip', 'tail'), id=13, color=[0, 255, 0]), + }, + joint_weights=[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.], + sigmas=[ + 0.0277, 0.0823, 0.0831, 0.0202, 0.0716, 0.0263, 0.0646, 0.0302, 0.0440, + 0.0316, 0.0333, 0.0547, 0.0263, 0.0683, 0.0539 + ]) diff --git a/configs/_base_/datasets/campus.py b/configs/_base_/datasets/campus.py index 334316e9c2..06cc7ecc16 100644 --- a/configs/_base_/datasets/campus.py +++ b/configs/_base_/datasets/campus.py @@ -1,151 +1,151 @@ -dataset_info = dict( - dataset_name='campus', - paper_info=dict( - author='Belagiannis, Vasileios and Amin, Sikandar and Andriluka, ' - 'Mykhaylo and Schiele, Bernt and Navab, Nassir and Ilic, Slobodan', - title='3D Pictorial Structures for Multiple Human Pose Estimation', - container='IEEE Computer Society Conference on Computer Vision and ' - 'Pattern Recognition (CVPR)', - year='2014', - homepage='http://campar.in.tum.de/Chair/MultiHumanPose', - ), - keypoint_info={ - 0: - dict( - name='right_ankle', - id=0, - color=[255, 128, 0], - type='lower', - swap='left_ankle'), - 1: - dict( - name='right_knee', - id=1, - color=[255, 128, 0], - type='lower', - swap='left_knee'), - 2: - dict( - name='right_hip', - id=2, - color=[255, 128, 0], - type='lower', - swap='left_hip'), - 3: - dict( - name='left_hip', - id=3, - color=[0, 255, 0], - type='lower', - swap='right_hip'), - 4: - dict( - name='left_knee', - id=4, - color=[0, 255, 0], - type='lower', - swap='right_knee'), - 5: - dict( - name='left_ankle', - id=5, - color=[0, 255, 0], - type='lower', - swap='right_ankle'), - 6: - dict( - name='right_wrist', - id=6, - color=[255, 128, 0], - type='upper', - swap='left_wrist'), - 7: - dict( - name='right_elbow', - id=7, - color=[255, 128, 0], - type='upper', - swap='left_elbow'), - 8: - dict( - name='right_shoulder', - id=8, - color=[255, 128, 0], - type='upper', - swap='left_shoulder'), - 9: - dict( - name='left_shoulder', - id=9, - color=[0, 255, 0], - type='upper', - swap='right_shoulder'), - 10: - dict( - name='left_elbow', - id=10, - color=[0, 255, 0], - type='upper', - swap='right_elbow'), - 11: - dict( - name='left_wrist', - id=11, - color=[0, 255, 0], - type='upper', - swap='right_wrist'), - 12: - dict( - name='bottom_head', - id=12, - color=[51, 153, 255], - type='upper', - swap=''), - 13: - dict( - name='top_head', - id=13, - color=[51, 153, 255], - type='upper', - swap=''), - }, - skeleton_info={ - 0: - dict(link=('right_ankle', 'right_knee'), id=0, color=[255, 128, 0]), - 1: - dict(link=('right_knee', 'right_hip'), id=1, color=[255, 128, 0]), - 2: - dict(link=('left_hip', 'left_knee'), id=2, color=[0, 255, 0]), - 3: - dict(link=('left_knee', 'left_ankle'), id=3, color=[0, 255, 0]), - 4: - dict(link=('right_hip', 'left_hip'), id=4, color=[51, 153, 255]), - 5: - dict(link=('right_wrist', 'right_elbow'), id=5, color=[255, 128, 0]), - 6: - dict( - link=('right_elbow', 'right_shoulder'), id=6, color=[255, 128, 0]), - 7: - dict(link=('left_shoulder', 'left_elbow'), id=7, color=[0, 255, 0]), - 8: - dict(link=('left_elbow', 'left_wrist'), id=8, color=[0, 255, 0]), - 9: - dict(link=('right_hip', 'right_shoulder'), id=9, color=[255, 128, 0]), - 10: - dict(link=('left_hip', 'left_shoulder'), id=10, color=[0, 255, 0]), - 11: - dict( - link=('right_shoulder', 'bottom_head'), id=11, color=[255, 128, - 0]), - 12: - dict(link=('left_shoulder', 'bottom_head'), id=12, color=[0, 255, 0]), - 13: - dict(link=('bottom_head', 'top_head'), id=13, color=[51, 153, 255]), - }, - joint_weights=[ - 1.5, 1.2, 1.0, 1.0, 1.2, 1.5, 1.5, 1.2, 1.0, 1.0, 1.2, 1.5, 1.0, 1.0 - ], - sigmas=[ - 0.089, 0.087, 0.107, 0.107, 0.087, 0.089, 0.062, 0.072, 0.079, 0.079, - 0.072, 0.062, 0.026, 0.026 - ]) +dataset_info = dict( + dataset_name='campus', + paper_info=dict( + author='Belagiannis, Vasileios and Amin, Sikandar and Andriluka, ' + 'Mykhaylo and Schiele, Bernt and Navab, Nassir and Ilic, Slobodan', + title='3D Pictorial Structures for Multiple Human Pose Estimation', + container='IEEE Computer Society Conference on Computer Vision and ' + 'Pattern Recognition (CVPR)', + year='2014', + homepage='http://campar.in.tum.de/Chair/MultiHumanPose', + ), + keypoint_info={ + 0: + dict( + name='right_ankle', + id=0, + color=[255, 128, 0], + type='lower', + swap='left_ankle'), + 1: + dict( + name='right_knee', + id=1, + color=[255, 128, 0], + type='lower', + swap='left_knee'), + 2: + dict( + name='right_hip', + id=2, + color=[255, 128, 0], + type='lower', + swap='left_hip'), + 3: + dict( + name='left_hip', + id=3, + color=[0, 255, 0], + type='lower', + swap='right_hip'), + 4: + dict( + name='left_knee', + id=4, + color=[0, 255, 0], + type='lower', + swap='right_knee'), + 5: + dict( + name='left_ankle', + id=5, + color=[0, 255, 0], + type='lower', + swap='right_ankle'), + 6: + dict( + name='right_wrist', + id=6, + color=[255, 128, 0], + type='upper', + swap='left_wrist'), + 7: + dict( + name='right_elbow', + id=7, + color=[255, 128, 0], + type='upper', + swap='left_elbow'), + 8: + dict( + name='right_shoulder', + id=8, + color=[255, 128, 0], + type='upper', + swap='left_shoulder'), + 9: + dict( + name='left_shoulder', + id=9, + color=[0, 255, 0], + type='upper', + swap='right_shoulder'), + 10: + dict( + name='left_elbow', + id=10, + color=[0, 255, 0], + type='upper', + swap='right_elbow'), + 11: + dict( + name='left_wrist', + id=11, + color=[0, 255, 0], + type='upper', + swap='right_wrist'), + 12: + dict( + name='bottom_head', + id=12, + color=[51, 153, 255], + type='upper', + swap=''), + 13: + dict( + name='top_head', + id=13, + color=[51, 153, 255], + type='upper', + swap=''), + }, + skeleton_info={ + 0: + dict(link=('right_ankle', 'right_knee'), id=0, color=[255, 128, 0]), + 1: + dict(link=('right_knee', 'right_hip'), id=1, color=[255, 128, 0]), + 2: + dict(link=('left_hip', 'left_knee'), id=2, color=[0, 255, 0]), + 3: + dict(link=('left_knee', 'left_ankle'), id=3, color=[0, 255, 0]), + 4: + dict(link=('right_hip', 'left_hip'), id=4, color=[51, 153, 255]), + 5: + dict(link=('right_wrist', 'right_elbow'), id=5, color=[255, 128, 0]), + 6: + dict( + link=('right_elbow', 'right_shoulder'), id=6, color=[255, 128, 0]), + 7: + dict(link=('left_shoulder', 'left_elbow'), id=7, color=[0, 255, 0]), + 8: + dict(link=('left_elbow', 'left_wrist'), id=8, color=[0, 255, 0]), + 9: + dict(link=('right_hip', 'right_shoulder'), id=9, color=[255, 128, 0]), + 10: + dict(link=('left_hip', 'left_shoulder'), id=10, color=[0, 255, 0]), + 11: + dict( + link=('right_shoulder', 'bottom_head'), id=11, color=[255, 128, + 0]), + 12: + dict(link=('left_shoulder', 'bottom_head'), id=12, color=[0, 255, 0]), + 13: + dict(link=('bottom_head', 'top_head'), id=13, color=[51, 153, 255]), + }, + joint_weights=[ + 1.5, 1.2, 1.0, 1.0, 1.2, 1.5, 1.5, 1.2, 1.0, 1.0, 1.2, 1.5, 1.0, 1.0 + ], + sigmas=[ + 0.089, 0.087, 0.107, 0.107, 0.087, 0.089, 0.062, 0.072, 0.079, 0.079, + 0.072, 0.062, 0.026, 0.026 + ]) diff --git a/configs/_base_/datasets/coco.py b/configs/_base_/datasets/coco.py index 865a95bc02..787e834b8a 100644 --- a/configs/_base_/datasets/coco.py +++ b/configs/_base_/datasets/coco.py @@ -1,181 +1,181 @@ -dataset_info = dict( - dataset_name='coco', - paper_info=dict( - author='Lin, Tsung-Yi and Maire, Michael and ' - 'Belongie, Serge and Hays, James and ' - 'Perona, Pietro and Ramanan, Deva and ' - r'Doll{\'a}r, Piotr and Zitnick, C Lawrence', - title='Microsoft coco: Common objects in context', - container='European conference on computer vision', - year='2014', - homepage='http://cocodataset.org/', - ), - keypoint_info={ - 0: - dict(name='nose', id=0, color=[51, 153, 255], type='upper', swap=''), - 1: - dict( - name='left_eye', - id=1, - color=[51, 153, 255], - type='upper', - swap='right_eye'), - 2: - dict( - name='right_eye', - id=2, - color=[51, 153, 255], - type='upper', - swap='left_eye'), - 3: - dict( - name='left_ear', - id=3, - color=[51, 153, 255], - type='upper', - swap='right_ear'), - 4: - dict( - name='right_ear', - id=4, - color=[51, 153, 255], - type='upper', - swap='left_ear'), - 5: - dict( - name='left_shoulder', - id=5, - color=[0, 255, 0], - type='upper', - swap='right_shoulder'), - 6: - dict( - name='right_shoulder', - id=6, - color=[255, 128, 0], - type='upper', - swap='left_shoulder'), - 7: - dict( - name='left_elbow', - id=7, - color=[0, 255, 0], - type='upper', - swap='right_elbow'), - 8: - dict( - name='right_elbow', - id=8, - color=[255, 128, 0], - type='upper', - swap='left_elbow'), - 9: - dict( - name='left_wrist', - id=9, - color=[0, 255, 0], - type='upper', - swap='right_wrist'), - 10: - dict( - name='right_wrist', - id=10, - color=[255, 128, 0], - type='upper', - swap='left_wrist'), - 11: - dict( - name='left_hip', - id=11, - color=[0, 255, 0], - type='lower', - swap='right_hip'), - 12: - dict( - name='right_hip', - id=12, - color=[255, 128, 0], - type='lower', - swap='left_hip'), - 13: - dict( - name='left_knee', - id=13, - color=[0, 255, 0], - type='lower', - swap='right_knee'), - 14: - dict( - name='right_knee', - id=14, - color=[255, 128, 0], - type='lower', - swap='left_knee'), - 15: - dict( - name='left_ankle', - id=15, - color=[0, 255, 0], - type='lower', - swap='right_ankle'), - 16: - dict( - name='right_ankle', - id=16, - color=[255, 128, 0], - type='lower', - swap='left_ankle') - }, - skeleton_info={ - 0: - dict(link=('left_ankle', 'left_knee'), id=0, color=[0, 255, 0]), - 1: - dict(link=('left_knee', 'left_hip'), id=1, color=[0, 255, 0]), - 2: - dict(link=('right_ankle', 'right_knee'), id=2, color=[255, 128, 0]), - 3: - dict(link=('right_knee', 'right_hip'), id=3, color=[255, 128, 0]), - 4: - dict(link=('left_hip', 'right_hip'), id=4, color=[51, 153, 255]), - 5: - dict(link=('left_shoulder', 'left_hip'), id=5, color=[51, 153, 255]), - 6: - dict(link=('right_shoulder', 'right_hip'), id=6, color=[51, 153, 255]), - 7: - dict( - link=('left_shoulder', 'right_shoulder'), - id=7, - color=[51, 153, 255]), - 8: - dict(link=('left_shoulder', 'left_elbow'), id=8, color=[0, 255, 0]), - 9: - dict( - link=('right_shoulder', 'right_elbow'), id=9, color=[255, 128, 0]), - 10: - dict(link=('left_elbow', 'left_wrist'), id=10, color=[0, 255, 0]), - 11: - dict(link=('right_elbow', 'right_wrist'), id=11, color=[255, 128, 0]), - 12: - dict(link=('left_eye', 'right_eye'), id=12, color=[51, 153, 255]), - 13: - dict(link=('nose', 'left_eye'), id=13, color=[51, 153, 255]), - 14: - dict(link=('nose', 'right_eye'), id=14, color=[51, 153, 255]), - 15: - dict(link=('left_eye', 'left_ear'), id=15, color=[51, 153, 255]), - 16: - dict(link=('right_eye', 'right_ear'), id=16, color=[51, 153, 255]), - 17: - dict(link=('left_ear', 'left_shoulder'), id=17, color=[51, 153, 255]), - 18: - dict( - link=('right_ear', 'right_shoulder'), id=18, color=[51, 153, 255]) - }, - joint_weights=[ - 1., 1., 1., 1., 1., 1., 1., 1.2, 1.2, 1.5, 1.5, 1., 1., 1.2, 1.2, 1.5, - 1.5 - ], - sigmas=[ - 0.026, 0.025, 0.025, 0.035, 0.035, 0.079, 0.079, 0.072, 0.072, 0.062, - 0.062, 0.107, 0.107, 0.087, 0.087, 0.089, 0.089 - ]) +dataset_info = dict( + dataset_name='coco', + paper_info=dict( + author='Lin, Tsung-Yi and Maire, Michael and ' + 'Belongie, Serge and Hays, James and ' + 'Perona, Pietro and Ramanan, Deva and ' + r'Doll{\'a}r, Piotr and Zitnick, C Lawrence', + title='Microsoft coco: Common objects in context', + container='European conference on computer vision', + year='2014', + homepage='http://cocodataset.org/', + ), + keypoint_info={ + 0: + dict(name='nose', id=0, color=[51, 153, 255], type='upper', swap=''), + 1: + dict( + name='left_eye', + id=1, + color=[51, 153, 255], + type='upper', + swap='right_eye'), + 2: + dict( + name='right_eye', + id=2, + color=[51, 153, 255], + type='upper', + swap='left_eye'), + 3: + dict( + name='left_ear', + id=3, + color=[51, 153, 255], + type='upper', + swap='right_ear'), + 4: + dict( + name='right_ear', + id=4, + color=[51, 153, 255], + type='upper', + swap='left_ear'), + 5: + dict( + name='left_shoulder', + id=5, + color=[0, 255, 0], + type='upper', + swap='right_shoulder'), + 6: + dict( + name='right_shoulder', + id=6, + color=[255, 128, 0], + type='upper', + swap='left_shoulder'), + 7: + dict( + name='left_elbow', + id=7, + color=[0, 255, 0], + type='upper', + swap='right_elbow'), + 8: + dict( + name='right_elbow', + id=8, + color=[255, 128, 0], + type='upper', + swap='left_elbow'), + 9: + dict( + name='left_wrist', + id=9, + color=[0, 255, 0], + type='upper', + swap='right_wrist'), + 10: + dict( + name='right_wrist', + id=10, + color=[255, 128, 0], + type='upper', + swap='left_wrist'), + 11: + dict( + name='left_hip', + id=11, + color=[0, 255, 0], + type='lower', + swap='right_hip'), + 12: + dict( + name='right_hip', + id=12, + color=[255, 128, 0], + type='lower', + swap='left_hip'), + 13: + dict( + name='left_knee', + id=13, + color=[0, 255, 0], + type='lower', + swap='right_knee'), + 14: + dict( + name='right_knee', + id=14, + color=[255, 128, 0], + type='lower', + swap='left_knee'), + 15: + dict( + name='left_ankle', + id=15, + color=[0, 255, 0], + type='lower', + swap='right_ankle'), + 16: + dict( + name='right_ankle', + id=16, + color=[255, 128, 0], + type='lower', + swap='left_ankle') + }, + skeleton_info={ + 0: + dict(link=('left_ankle', 'left_knee'), id=0, color=[0, 255, 0]), + 1: + dict(link=('left_knee', 'left_hip'), id=1, color=[0, 255, 0]), + 2: + dict(link=('right_ankle', 'right_knee'), id=2, color=[255, 128, 0]), + 3: + dict(link=('right_knee', 'right_hip'), id=3, color=[255, 128, 0]), + 4: + dict(link=('left_hip', 'right_hip'), id=4, color=[51, 153, 255]), + 5: + dict(link=('left_shoulder', 'left_hip'), id=5, color=[51, 153, 255]), + 6: + dict(link=('right_shoulder', 'right_hip'), id=6, color=[51, 153, 255]), + 7: + dict( + link=('left_shoulder', 'right_shoulder'), + id=7, + color=[51, 153, 255]), + 8: + dict(link=('left_shoulder', 'left_elbow'), id=8, color=[0, 255, 0]), + 9: + dict( + link=('right_shoulder', 'right_elbow'), id=9, color=[255, 128, 0]), + 10: + dict(link=('left_elbow', 'left_wrist'), id=10, color=[0, 255, 0]), + 11: + dict(link=('right_elbow', 'right_wrist'), id=11, color=[255, 128, 0]), + 12: + dict(link=('left_eye', 'right_eye'), id=12, color=[51, 153, 255]), + 13: + dict(link=('nose', 'left_eye'), id=13, color=[51, 153, 255]), + 14: + dict(link=('nose', 'right_eye'), id=14, color=[51, 153, 255]), + 15: + dict(link=('left_eye', 'left_ear'), id=15, color=[51, 153, 255]), + 16: + dict(link=('right_eye', 'right_ear'), id=16, color=[51, 153, 255]), + 17: + dict(link=('left_ear', 'left_shoulder'), id=17, color=[51, 153, 255]), + 18: + dict( + link=('right_ear', 'right_shoulder'), id=18, color=[51, 153, 255]) + }, + joint_weights=[ + 1., 1., 1., 1., 1., 1., 1., 1.2, 1.2, 1.5, 1.5, 1., 1., 1.2, 1.2, 1.5, + 1.5 + ], + sigmas=[ + 0.026, 0.025, 0.025, 0.035, 0.035, 0.079, 0.079, 0.072, 0.072, 0.062, + 0.062, 0.107, 0.107, 0.087, 0.087, 0.089, 0.089 + ]) diff --git a/configs/_base_/datasets/coco_aic.py b/configs/_base_/datasets/coco_aic.py index a084247468..edd636beb2 100644 --- a/configs/_base_/datasets/coco_aic.py +++ b/configs/_base_/datasets/coco_aic.py @@ -1,205 +1,205 @@ -dataset_info = dict( - dataset_name='coco', - paper_info=[ - dict( - author='Lin, Tsung-Yi and Maire, Michael and ' - 'Belongie, Serge and Hays, James and ' - 'Perona, Pietro and Ramanan, Deva and ' - r'Doll{\'a}r, Piotr and Zitnick, C Lawrence', - title='Microsoft coco: Common objects in context', - container='European conference on computer vision', - year='2014', - homepage='http://cocodataset.org/', - ), - dict( - author='Wu, Jiahong and Zheng, He and Zhao, Bo and ' - 'Li, Yixin and Yan, Baoming and Liang, Rui and ' - 'Wang, Wenjia and Zhou, Shipei and Lin, Guosen and ' - 'Fu, Yanwei and others', - title='Ai challenger: A large-scale dataset for going ' - 'deeper in image understanding', - container='arXiv', - year='2017', - homepage='https://github.com/AIChallenger/AI_Challenger_2017', - ), - ], - keypoint_info={ - 0: - dict(name='nose', id=0, color=[51, 153, 255], type='upper', swap=''), - 1: - dict( - name='left_eye', - id=1, - color=[51, 153, 255], - type='upper', - swap='right_eye'), - 2: - dict( - name='right_eye', - id=2, - color=[51, 153, 255], - type='upper', - swap='left_eye'), - 3: - dict( - name='left_ear', - id=3, - color=[51, 153, 255], - type='upper', - swap='right_ear'), - 4: - dict( - name='right_ear', - id=4, - color=[51, 153, 255], - type='upper', - swap='left_ear'), - 5: - dict( - name='left_shoulder', - id=5, - color=[0, 255, 0], - type='upper', - swap='right_shoulder'), - 6: - dict( - name='right_shoulder', - id=6, - color=[255, 128, 0], - type='upper', - swap='left_shoulder'), - 7: - dict( - name='left_elbow', - id=7, - color=[0, 255, 0], - type='upper', - swap='right_elbow'), - 8: - dict( - name='right_elbow', - id=8, - color=[255, 128, 0], - type='upper', - swap='left_elbow'), - 9: - dict( - name='left_wrist', - id=9, - color=[0, 255, 0], - type='upper', - swap='right_wrist'), - 10: - dict( - name='right_wrist', - id=10, - color=[255, 128, 0], - type='upper', - swap='left_wrist'), - 11: - dict( - name='left_hip', - id=11, - color=[0, 255, 0], - type='lower', - swap='right_hip'), - 12: - dict( - name='right_hip', - id=12, - color=[255, 128, 0], - type='lower', - swap='left_hip'), - 13: - dict( - name='left_knee', - id=13, - color=[0, 255, 0], - type='lower', - swap='right_knee'), - 14: - dict( - name='right_knee', - id=14, - color=[255, 128, 0], - type='lower', - swap='left_knee'), - 15: - dict( - name='left_ankle', - id=15, - color=[0, 255, 0], - type='lower', - swap='right_ankle'), - 16: - dict( - name='right_ankle', - id=16, - color=[255, 128, 0], - type='lower', - swap='left_ankle'), - 17: - dict( - name='head_top', - id=17, - color=[51, 153, 255], - type='upper', - swap=''), - 18: - dict(name='neck', id=18, color=[51, 153, 255], type='upper', swap='') - }, - skeleton_info={ - 0: - dict(link=('left_ankle', 'left_knee'), id=0, color=[0, 255, 0]), - 1: - dict(link=('left_knee', 'left_hip'), id=1, color=[0, 255, 0]), - 2: - dict(link=('right_ankle', 'right_knee'), id=2, color=[255, 128, 0]), - 3: - dict(link=('right_knee', 'right_hip'), id=3, color=[255, 128, 0]), - 4: - dict(link=('left_hip', 'right_hip'), id=4, color=[51, 153, 255]), - 5: - dict(link=('left_shoulder', 'left_hip'), id=5, color=[51, 153, 255]), - 6: - dict(link=('right_shoulder', 'right_hip'), id=6, color=[51, 153, 255]), - 7: - dict( - link=('left_shoulder', 'right_shoulder'), - id=7, - color=[51, 153, 255]), - 8: - dict(link=('left_shoulder', 'left_elbow'), id=8, color=[0, 255, 0]), - 9: - dict( - link=('right_shoulder', 'right_elbow'), id=9, color=[255, 128, 0]), - 10: - dict(link=('left_elbow', 'left_wrist'), id=10, color=[0, 255, 0]), - 11: - dict(link=('right_elbow', 'right_wrist'), id=11, color=[255, 128, 0]), - 12: - dict(link=('left_eye', 'right_eye'), id=12, color=[51, 153, 255]), - 13: - dict(link=('nose', 'left_eye'), id=13, color=[51, 153, 255]), - 14: - dict(link=('nose', 'right_eye'), id=14, color=[51, 153, 255]), - 15: - dict(link=('left_eye', 'left_ear'), id=15, color=[51, 153, 255]), - 16: - dict(link=('right_eye', 'right_ear'), id=16, color=[51, 153, 255]), - 17: - dict(link=('left_ear', 'left_shoulder'), id=17, color=[51, 153, 255]), - 18: - dict( - link=('right_ear', 'right_shoulder'), id=18, color=[51, 153, 255]), - 19: - dict(link=('head_top', 'neck'), id=11, color=[51, 153, 255]), - }, - joint_weights=[ - 1., 1., 1., 1., 1., 1., 1., 1.2, 1.2, 1.5, 1.5, 1., 1., 1.2, 1.2, 1.5, - 1.5, 1.5 - ], - sigmas=[ - 0.026, 0.025, 0.025, 0.035, 0.035, 0.079, 0.079, 0.072, 0.072, 0.062, - 0.062, 0.107, 0.107, 0.087, 0.087, 0.089, 0.089, 0.026, 0.026 - ]) +dataset_info = dict( + dataset_name='coco', + paper_info=[ + dict( + author='Lin, Tsung-Yi and Maire, Michael and ' + 'Belongie, Serge and Hays, James and ' + 'Perona, Pietro and Ramanan, Deva and ' + r'Doll{\'a}r, Piotr and Zitnick, C Lawrence', + title='Microsoft coco: Common objects in context', + container='European conference on computer vision', + year='2014', + homepage='http://cocodataset.org/', + ), + dict( + author='Wu, Jiahong and Zheng, He and Zhao, Bo and ' + 'Li, Yixin and Yan, Baoming and Liang, Rui and ' + 'Wang, Wenjia and Zhou, Shipei and Lin, Guosen and ' + 'Fu, Yanwei and others', + title='Ai challenger: A large-scale dataset for going ' + 'deeper in image understanding', + container='arXiv', + year='2017', + homepage='https://github.com/AIChallenger/AI_Challenger_2017', + ), + ], + keypoint_info={ + 0: + dict(name='nose', id=0, color=[51, 153, 255], type='upper', swap=''), + 1: + dict( + name='left_eye', + id=1, + color=[51, 153, 255], + type='upper', + swap='right_eye'), + 2: + dict( + name='right_eye', + id=2, + color=[51, 153, 255], + type='upper', + swap='left_eye'), + 3: + dict( + name='left_ear', + id=3, + color=[51, 153, 255], + type='upper', + swap='right_ear'), + 4: + dict( + name='right_ear', + id=4, + color=[51, 153, 255], + type='upper', + swap='left_ear'), + 5: + dict( + name='left_shoulder', + id=5, + color=[0, 255, 0], + type='upper', + swap='right_shoulder'), + 6: + dict( + name='right_shoulder', + id=6, + color=[255, 128, 0], + type='upper', + swap='left_shoulder'), + 7: + dict( + name='left_elbow', + id=7, + color=[0, 255, 0], + type='upper', + swap='right_elbow'), + 8: + dict( + name='right_elbow', + id=8, + color=[255, 128, 0], + type='upper', + swap='left_elbow'), + 9: + dict( + name='left_wrist', + id=9, + color=[0, 255, 0], + type='upper', + swap='right_wrist'), + 10: + dict( + name='right_wrist', + id=10, + color=[255, 128, 0], + type='upper', + swap='left_wrist'), + 11: + dict( + name='left_hip', + id=11, + color=[0, 255, 0], + type='lower', + swap='right_hip'), + 12: + dict( + name='right_hip', + id=12, + color=[255, 128, 0], + type='lower', + swap='left_hip'), + 13: + dict( + name='left_knee', + id=13, + color=[0, 255, 0], + type='lower', + swap='right_knee'), + 14: + dict( + name='right_knee', + id=14, + color=[255, 128, 0], + type='lower', + swap='left_knee'), + 15: + dict( + name='left_ankle', + id=15, + color=[0, 255, 0], + type='lower', + swap='right_ankle'), + 16: + dict( + name='right_ankle', + id=16, + color=[255, 128, 0], + type='lower', + swap='left_ankle'), + 17: + dict( + name='head_top', + id=17, + color=[51, 153, 255], + type='upper', + swap=''), + 18: + dict(name='neck', id=18, color=[51, 153, 255], type='upper', swap='') + }, + skeleton_info={ + 0: + dict(link=('left_ankle', 'left_knee'), id=0, color=[0, 255, 0]), + 1: + dict(link=('left_knee', 'left_hip'), id=1, color=[0, 255, 0]), + 2: + dict(link=('right_ankle', 'right_knee'), id=2, color=[255, 128, 0]), + 3: + dict(link=('right_knee', 'right_hip'), id=3, color=[255, 128, 0]), + 4: + dict(link=('left_hip', 'right_hip'), id=4, color=[51, 153, 255]), + 5: + dict(link=('left_shoulder', 'left_hip'), id=5, color=[51, 153, 255]), + 6: + dict(link=('right_shoulder', 'right_hip'), id=6, color=[51, 153, 255]), + 7: + dict( + link=('left_shoulder', 'right_shoulder'), + id=7, + color=[51, 153, 255]), + 8: + dict(link=('left_shoulder', 'left_elbow'), id=8, color=[0, 255, 0]), + 9: + dict( + link=('right_shoulder', 'right_elbow'), id=9, color=[255, 128, 0]), + 10: + dict(link=('left_elbow', 'left_wrist'), id=10, color=[0, 255, 0]), + 11: + dict(link=('right_elbow', 'right_wrist'), id=11, color=[255, 128, 0]), + 12: + dict(link=('left_eye', 'right_eye'), id=12, color=[51, 153, 255]), + 13: + dict(link=('nose', 'left_eye'), id=13, color=[51, 153, 255]), + 14: + dict(link=('nose', 'right_eye'), id=14, color=[51, 153, 255]), + 15: + dict(link=('left_eye', 'left_ear'), id=15, color=[51, 153, 255]), + 16: + dict(link=('right_eye', 'right_ear'), id=16, color=[51, 153, 255]), + 17: + dict(link=('left_ear', 'left_shoulder'), id=17, color=[51, 153, 255]), + 18: + dict( + link=('right_ear', 'right_shoulder'), id=18, color=[51, 153, 255]), + 19: + dict(link=('head_top', 'neck'), id=11, color=[51, 153, 255]), + }, + joint_weights=[ + 1., 1., 1., 1., 1., 1., 1., 1.2, 1.2, 1.5, 1.5, 1., 1., 1.2, 1.2, 1.5, + 1.5, 1.5 + ], + sigmas=[ + 0.026, 0.025, 0.025, 0.035, 0.035, 0.079, 0.079, 0.072, 0.072, 0.062, + 0.062, 0.107, 0.107, 0.087, 0.087, 0.089, 0.089, 0.026, 0.026 + ]) diff --git a/configs/_base_/datasets/coco_openpose.py b/configs/_base_/datasets/coco_openpose.py index cce11b27f1..7bab5013a4 100644 --- a/configs/_base_/datasets/coco_openpose.py +++ b/configs/_base_/datasets/coco_openpose.py @@ -1,157 +1,157 @@ -dataset_info = dict( - dataset_name='coco_openpose', - paper_info=dict( - author='Zhe, Cao and Tomas, Simon and ' - 'Shih-En, Wei and Yaser, Sheikh', - title='OpenPose: Realtime Multi-Person 2D Pose ' - 'Estimation using Part Affinity Fields', - container='IEEE Transactions on Pattern Analysis ' - 'and Machine Intelligence', - year='2019', - homepage='https://github.com/CMU-Perceptual-Computing-Lab/openpose/', - ), - keypoint_info={ - 0: - dict(name='nose', id=0, color=[255, 0, 0], type='upper', swap=''), - 1: - dict(name='neck', id=1, color=[255, 85, 0], type='upper', swap=''), - 2: - dict( - name='right_shoulder', - id=2, - color=[255, 170, 0], - type='upper', - swap='left_shoulder'), - 3: - dict( - name='right_elbow', - id=3, - color=[255, 255, 0], - type='upper', - swap='left_elbow'), - 4: - dict( - name='right_wrist', - id=4, - color=[170, 255, 0], - type='upper', - swap='left_wrist'), - 5: - dict( - name='left_shoulder', - id=5, - color=[85, 255, 0], - type='upper', - swap='right_shoulder'), - 6: - dict( - name='left_elbow', - id=6, - color=[0, 255, 0], - type='upper', - swap='right_elbow'), - 7: - dict( - name='left_wrist', - id=7, - color=[0, 255, 85], - type='upper', - swap='right_wrist'), - 8: - dict( - name='right_hip', - id=8, - color=[0, 255, 170], - type='lower', - swap='left_hip'), - 9: - dict( - name='right_knee', - id=9, - color=[0, 255, 255], - type='lower', - swap='left_knee'), - 10: - dict( - name='right_ankle', - id=10, - color=[0, 170, 255], - type='lower', - swap='left_ankle'), - 11: - dict( - name='left_hip', - id=11, - color=[0, 85, 255], - type='lower', - swap='right_hip'), - 12: - dict( - name='left_knee', - id=12, - color=[0, 0, 255], - type='lower', - swap='right_knee'), - 13: - dict( - name='left_ankle', - id=13, - color=[85, 0, 255], - type='lower', - swap='right_ankle'), - 14: - dict( - name='right_eye', - id=14, - color=[170, 0, 255], - type='upper', - swap='left_eye'), - 15: - dict( - name='left_eye', - id=15, - color=[255, 0, 255], - type='upper', - swap='right_eye'), - 16: - dict( - name='right_ear', - id=16, - color=[255, 0, 170], - type='upper', - swap='left_ear'), - 17: - dict( - name='left_ear', - id=17, - color=[255, 0, 85], - type='upper', - swap='right_ear'), - }, - skeleton_info={ - 0: dict(link=('neck', 'right_shoulder'), id=0, color=[255, 0, 0]), - 1: dict(link=('neck', 'left_shoulder'), id=1, color=[255, 85, 0]), - 2: dict( - link=('right_shoulder', 'right_elbow'), id=2, color=[255, 170, 0]), - 3: - dict(link=('right_elbow', 'right_wrist'), id=3, color=[255, 255, 0]), - 4: - dict(link=('left_shoulder', 'left_elbow'), id=4, color=[170, 255, 0]), - 5: dict(link=('left_elbow', 'left_wrist'), id=5, color=[85, 255, 0]), - 6: dict(link=('neck', 'right_hip'), id=6, color=[0, 255, 0]), - 7: dict(link=('right_hip', 'right_knee'), id=7, color=[0, 255, 85]), - 8: dict(link=('right_knee', 'right_ankle'), id=8, color=[0, 255, 170]), - 9: dict(link=('neck', 'left_hip'), id=9, color=[0, 255, 225]), - 10: dict(link=('left_hip', 'left_knee'), id=10, color=[0, 170, 255]), - 11: dict(link=('left_knee', 'left_ankle'), id=11, color=[0, 85, 255]), - 12: dict(link=('neck', 'nose'), id=12, color=[0, 0, 255]), - 13: dict(link=('nose', 'right_eye'), id=13, color=[255, 0, 170]), - 14: dict(link=('right_eye', 'right_ear'), id=14, color=[170, 0, 255]), - 15: dict(link=('nose', 'left_eye'), id=15, color=[255, 0, 255]), - 16: dict(link=('left_eye', 'left_ear'), id=16, color=[255, 0, 170]), - }, - joint_weights=[1.] * 18, - sigmas=[ - 0.026, 0.025, 0.025, 0.035, 0.035, 0.079, 0.079, 0.072, 0.072, 0.062, - 0.062, 0.107, 0.107, 0.087, 0.087, 0.089, 0.089, 0.082 - ]) +dataset_info = dict( + dataset_name='coco_openpose', + paper_info=dict( + author='Zhe, Cao and Tomas, Simon and ' + 'Shih-En, Wei and Yaser, Sheikh', + title='OpenPose: Realtime Multi-Person 2D Pose ' + 'Estimation using Part Affinity Fields', + container='IEEE Transactions on Pattern Analysis ' + 'and Machine Intelligence', + year='2019', + homepage='https://github.com/CMU-Perceptual-Computing-Lab/openpose/', + ), + keypoint_info={ + 0: + dict(name='nose', id=0, color=[255, 0, 0], type='upper', swap=''), + 1: + dict(name='neck', id=1, color=[255, 85, 0], type='upper', swap=''), + 2: + dict( + name='right_shoulder', + id=2, + color=[255, 170, 0], + type='upper', + swap='left_shoulder'), + 3: + dict( + name='right_elbow', + id=3, + color=[255, 255, 0], + type='upper', + swap='left_elbow'), + 4: + dict( + name='right_wrist', + id=4, + color=[170, 255, 0], + type='upper', + swap='left_wrist'), + 5: + dict( + name='left_shoulder', + id=5, + color=[85, 255, 0], + type='upper', + swap='right_shoulder'), + 6: + dict( + name='left_elbow', + id=6, + color=[0, 255, 0], + type='upper', + swap='right_elbow'), + 7: + dict( + name='left_wrist', + id=7, + color=[0, 255, 85], + type='upper', + swap='right_wrist'), + 8: + dict( + name='right_hip', + id=8, + color=[0, 255, 170], + type='lower', + swap='left_hip'), + 9: + dict( + name='right_knee', + id=9, + color=[0, 255, 255], + type='lower', + swap='left_knee'), + 10: + dict( + name='right_ankle', + id=10, + color=[0, 170, 255], + type='lower', + swap='left_ankle'), + 11: + dict( + name='left_hip', + id=11, + color=[0, 85, 255], + type='lower', + swap='right_hip'), + 12: + dict( + name='left_knee', + id=12, + color=[0, 0, 255], + type='lower', + swap='right_knee'), + 13: + dict( + name='left_ankle', + id=13, + color=[85, 0, 255], + type='lower', + swap='right_ankle'), + 14: + dict( + name='right_eye', + id=14, + color=[170, 0, 255], + type='upper', + swap='left_eye'), + 15: + dict( + name='left_eye', + id=15, + color=[255, 0, 255], + type='upper', + swap='right_eye'), + 16: + dict( + name='right_ear', + id=16, + color=[255, 0, 170], + type='upper', + swap='left_ear'), + 17: + dict( + name='left_ear', + id=17, + color=[255, 0, 85], + type='upper', + swap='right_ear'), + }, + skeleton_info={ + 0: dict(link=('neck', 'right_shoulder'), id=0, color=[255, 0, 0]), + 1: dict(link=('neck', 'left_shoulder'), id=1, color=[255, 85, 0]), + 2: dict( + link=('right_shoulder', 'right_elbow'), id=2, color=[255, 170, 0]), + 3: + dict(link=('right_elbow', 'right_wrist'), id=3, color=[255, 255, 0]), + 4: + dict(link=('left_shoulder', 'left_elbow'), id=4, color=[170, 255, 0]), + 5: dict(link=('left_elbow', 'left_wrist'), id=5, color=[85, 255, 0]), + 6: dict(link=('neck', 'right_hip'), id=6, color=[0, 255, 0]), + 7: dict(link=('right_hip', 'right_knee'), id=7, color=[0, 255, 85]), + 8: dict(link=('right_knee', 'right_ankle'), id=8, color=[0, 255, 170]), + 9: dict(link=('neck', 'left_hip'), id=9, color=[0, 255, 225]), + 10: dict(link=('left_hip', 'left_knee'), id=10, color=[0, 170, 255]), + 11: dict(link=('left_knee', 'left_ankle'), id=11, color=[0, 85, 255]), + 12: dict(link=('neck', 'nose'), id=12, color=[0, 0, 255]), + 13: dict(link=('nose', 'right_eye'), id=13, color=[255, 0, 170]), + 14: dict(link=('right_eye', 'right_ear'), id=14, color=[170, 0, 255]), + 15: dict(link=('nose', 'left_eye'), id=15, color=[255, 0, 255]), + 16: dict(link=('left_eye', 'left_ear'), id=16, color=[255, 0, 170]), + }, + joint_weights=[1.] * 18, + sigmas=[ + 0.026, 0.025, 0.025, 0.035, 0.035, 0.079, 0.079, 0.072, 0.072, 0.062, + 0.062, 0.107, 0.107, 0.087, 0.087, 0.089, 0.089, 0.082 + ]) diff --git a/configs/_base_/datasets/coco_wholebody.py b/configs/_base_/datasets/coco_wholebody.py index ef9b707017..a739c97a72 100644 --- a/configs/_base_/datasets/coco_wholebody.py +++ b/configs/_base_/datasets/coco_wholebody.py @@ -1,1154 +1,1154 @@ -dataset_info = dict( - dataset_name='coco_wholebody', - paper_info=dict( - author='Jin, Sheng and Xu, Lumin and Xu, Jin and ' - 'Wang, Can and Liu, Wentao and ' - 'Qian, Chen and Ouyang, Wanli and Luo, Ping', - title='Whole-Body Human Pose Estimation in the Wild', - container='Proceedings of the European ' - 'Conference on Computer Vision (ECCV)', - year='2020', - homepage='https://github.com/jin-s13/COCO-WholeBody/', - ), - keypoint_info={ - 0: - dict(name='nose', id=0, color=[51, 153, 255], type='upper', swap=''), - 1: - dict( - name='left_eye', - id=1, - color=[51, 153, 255], - type='upper', - swap='right_eye'), - 2: - dict( - name='right_eye', - id=2, - color=[51, 153, 255], - type='upper', - swap='left_eye'), - 3: - dict( - name='left_ear', - id=3, - color=[51, 153, 255], - type='upper', - swap='right_ear'), - 4: - dict( - name='right_ear', - id=4, - color=[51, 153, 255], - type='upper', - swap='left_ear'), - 5: - dict( - name='left_shoulder', - id=5, - color=[0, 255, 0], - type='upper', - swap='right_shoulder'), - 6: - dict( - name='right_shoulder', - id=6, - color=[255, 128, 0], - type='upper', - swap='left_shoulder'), - 7: - dict( - name='left_elbow', - id=7, - color=[0, 255, 0], - type='upper', - swap='right_elbow'), - 8: - dict( - name='right_elbow', - id=8, - color=[255, 128, 0], - type='upper', - swap='left_elbow'), - 9: - dict( - name='left_wrist', - id=9, - color=[0, 255, 0], - type='upper', - swap='right_wrist'), - 10: - dict( - name='right_wrist', - id=10, - color=[255, 128, 0], - type='upper', - swap='left_wrist'), - 11: - dict( - name='left_hip', - id=11, - color=[0, 255, 0], - type='lower', - swap='right_hip'), - 12: - dict( - name='right_hip', - id=12, - color=[255, 128, 0], - type='lower', - swap='left_hip'), - 13: - dict( - name='left_knee', - id=13, - color=[0, 255, 0], - type='lower', - swap='right_knee'), - 14: - dict( - name='right_knee', - id=14, - color=[255, 128, 0], - type='lower', - swap='left_knee'), - 15: - dict( - name='left_ankle', - id=15, - color=[0, 255, 0], - type='lower', - swap='right_ankle'), - 16: - dict( - name='right_ankle', - id=16, - color=[255, 128, 0], - type='lower', - swap='left_ankle'), - 17: - dict( - name='left_big_toe', - id=17, - color=[255, 128, 0], - type='lower', - swap='right_big_toe'), - 18: - dict( - name='left_small_toe', - id=18, - color=[255, 128, 0], - type='lower', - swap='right_small_toe'), - 19: - dict( - name='left_heel', - id=19, - color=[255, 128, 0], - type='lower', - swap='right_heel'), - 20: - dict( - name='right_big_toe', - id=20, - color=[255, 128, 0], - type='lower', - swap='left_big_toe'), - 21: - dict( - name='right_small_toe', - id=21, - color=[255, 128, 0], - type='lower', - swap='left_small_toe'), - 22: - dict( - name='right_heel', - id=22, - color=[255, 128, 0], - type='lower', - swap='left_heel'), - 23: - dict( - name='face-0', - id=23, - color=[255, 255, 255], - type='', - swap='face-16'), - 24: - dict( - name='face-1', - id=24, - color=[255, 255, 255], - type='', - swap='face-15'), - 25: - dict( - name='face-2', - id=25, - color=[255, 255, 255], - type='', - swap='face-14'), - 26: - dict( - name='face-3', - id=26, - color=[255, 255, 255], - type='', - swap='face-13'), - 27: - dict( - name='face-4', - id=27, - color=[255, 255, 255], - type='', - swap='face-12'), - 28: - dict( - name='face-5', - id=28, - color=[255, 255, 255], - type='', - swap='face-11'), - 29: - dict( - name='face-6', - id=29, - color=[255, 255, 255], - type='', - swap='face-10'), - 30: - dict( - name='face-7', - id=30, - color=[255, 255, 255], - type='', - swap='face-9'), - 31: - dict(name='face-8', id=31, color=[255, 255, 255], type='', swap=''), - 32: - dict( - name='face-9', - id=32, - color=[255, 255, 255], - type='', - swap='face-7'), - 33: - dict( - name='face-10', - id=33, - color=[255, 255, 255], - type='', - swap='face-6'), - 34: - dict( - name='face-11', - id=34, - color=[255, 255, 255], - type='', - swap='face-5'), - 35: - dict( - name='face-12', - id=35, - color=[255, 255, 255], - type='', - swap='face-4'), - 36: - dict( - name='face-13', - id=36, - color=[255, 255, 255], - type='', - swap='face-3'), - 37: - dict( - name='face-14', - id=37, - color=[255, 255, 255], - type='', - swap='face-2'), - 38: - dict( - name='face-15', - id=38, - color=[255, 255, 255], - type='', - swap='face-1'), - 39: - dict( - name='face-16', - id=39, - color=[255, 255, 255], - type='', - swap='face-0'), - 40: - dict( - name='face-17', - id=40, - color=[255, 255, 255], - type='', - swap='face-26'), - 41: - dict( - name='face-18', - id=41, - color=[255, 255, 255], - type='', - swap='face-25'), - 42: - dict( - name='face-19', - id=42, - color=[255, 255, 255], - type='', - swap='face-24'), - 43: - dict( - name='face-20', - id=43, - color=[255, 255, 255], - type='', - swap='face-23'), - 44: - dict( - name='face-21', - id=44, - color=[255, 255, 255], - type='', - swap='face-22'), - 45: - dict( - name='face-22', - id=45, - color=[255, 255, 255], - type='', - swap='face-21'), - 46: - dict( - name='face-23', - id=46, - color=[255, 255, 255], - type='', - swap='face-20'), - 47: - dict( - name='face-24', - id=47, - color=[255, 255, 255], - type='', - swap='face-19'), - 48: - dict( - name='face-25', - id=48, - color=[255, 255, 255], - type='', - swap='face-18'), - 49: - dict( - name='face-26', - id=49, - color=[255, 255, 255], - type='', - swap='face-17'), - 50: - dict(name='face-27', id=50, color=[255, 255, 255], type='', swap=''), - 51: - dict(name='face-28', id=51, color=[255, 255, 255], type='', swap=''), - 52: - dict(name='face-29', id=52, color=[255, 255, 255], type='', swap=''), - 53: - dict(name='face-30', id=53, color=[255, 255, 255], type='', swap=''), - 54: - dict( - name='face-31', - id=54, - color=[255, 255, 255], - type='', - swap='face-35'), - 55: - dict( - name='face-32', - id=55, - color=[255, 255, 255], - type='', - swap='face-34'), - 56: - dict(name='face-33', id=56, color=[255, 255, 255], type='', swap=''), - 57: - dict( - name='face-34', - id=57, - color=[255, 255, 255], - type='', - swap='face-32'), - 58: - dict( - name='face-35', - id=58, - color=[255, 255, 255], - type='', - swap='face-31'), - 59: - dict( - name='face-36', - id=59, - color=[255, 255, 255], - type='', - swap='face-45'), - 60: - dict( - name='face-37', - id=60, - color=[255, 255, 255], - type='', - swap='face-44'), - 61: - dict( - name='face-38', - id=61, - color=[255, 255, 255], - type='', - swap='face-43'), - 62: - dict( - name='face-39', - id=62, - color=[255, 255, 255], - type='', - swap='face-42'), - 63: - dict( - name='face-40', - id=63, - color=[255, 255, 255], - type='', - swap='face-47'), - 64: - dict( - name='face-41', - id=64, - color=[255, 255, 255], - type='', - swap='face-46'), - 65: - dict( - name='face-42', - id=65, - color=[255, 255, 255], - type='', - swap='face-39'), - 66: - dict( - name='face-43', - id=66, - color=[255, 255, 255], - type='', - swap='face-38'), - 67: - dict( - name='face-44', - id=67, - color=[255, 255, 255], - type='', - swap='face-37'), - 68: - dict( - name='face-45', - id=68, - color=[255, 255, 255], - type='', - swap='face-36'), - 69: - dict( - name='face-46', - id=69, - color=[255, 255, 255], - type='', - swap='face-41'), - 70: - dict( - name='face-47', - id=70, - color=[255, 255, 255], - type='', - swap='face-40'), - 71: - dict( - name='face-48', - id=71, - color=[255, 255, 255], - type='', - swap='face-54'), - 72: - dict( - name='face-49', - id=72, - color=[255, 255, 255], - type='', - swap='face-53'), - 73: - dict( - name='face-50', - id=73, - color=[255, 255, 255], - type='', - swap='face-52'), - 74: - dict(name='face-51', id=74, color=[255, 255, 255], type='', swap=''), - 75: - dict( - name='face-52', - id=75, - color=[255, 255, 255], - type='', - swap='face-50'), - 76: - dict( - name='face-53', - id=76, - color=[255, 255, 255], - type='', - swap='face-49'), - 77: - dict( - name='face-54', - id=77, - color=[255, 255, 255], - type='', - swap='face-48'), - 78: - dict( - name='face-55', - id=78, - color=[255, 255, 255], - type='', - swap='face-59'), - 79: - dict( - name='face-56', - id=79, - color=[255, 255, 255], - type='', - swap='face-58'), - 80: - dict(name='face-57', id=80, color=[255, 255, 255], type='', swap=''), - 81: - dict( - name='face-58', - id=81, - color=[255, 255, 255], - type='', - swap='face-56'), - 82: - dict( - name='face-59', - id=82, - color=[255, 255, 255], - type='', - swap='face-55'), - 83: - dict( - name='face-60', - id=83, - color=[255, 255, 255], - type='', - swap='face-64'), - 84: - dict( - name='face-61', - id=84, - color=[255, 255, 255], - type='', - swap='face-63'), - 85: - dict(name='face-62', id=85, color=[255, 255, 255], type='', swap=''), - 86: - dict( - name='face-63', - id=86, - color=[255, 255, 255], - type='', - swap='face-61'), - 87: - dict( - name='face-64', - id=87, - color=[255, 255, 255], - type='', - swap='face-60'), - 88: - dict( - name='face-65', - id=88, - color=[255, 255, 255], - type='', - swap='face-67'), - 89: - dict(name='face-66', id=89, color=[255, 255, 255], type='', swap=''), - 90: - dict( - name='face-67', - id=90, - color=[255, 255, 255], - type='', - swap='face-65'), - 91: - dict( - name='left_hand_root', - id=91, - color=[255, 255, 255], - type='', - swap='right_hand_root'), - 92: - dict( - name='left_thumb1', - id=92, - color=[255, 128, 0], - type='', - swap='right_thumb1'), - 93: - dict( - name='left_thumb2', - id=93, - color=[255, 128, 0], - type='', - swap='right_thumb2'), - 94: - dict( - name='left_thumb3', - id=94, - color=[255, 128, 0], - type='', - swap='right_thumb3'), - 95: - dict( - name='left_thumb4', - id=95, - color=[255, 128, 0], - type='', - swap='right_thumb4'), - 96: - dict( - name='left_forefinger1', - id=96, - color=[255, 153, 255], - type='', - swap='right_forefinger1'), - 97: - dict( - name='left_forefinger2', - id=97, - color=[255, 153, 255], - type='', - swap='right_forefinger2'), - 98: - dict( - name='left_forefinger3', - id=98, - color=[255, 153, 255], - type='', - swap='right_forefinger3'), - 99: - dict( - name='left_forefinger4', - id=99, - color=[255, 153, 255], - type='', - swap='right_forefinger4'), - 100: - dict( - name='left_middle_finger1', - id=100, - color=[102, 178, 255], - type='', - swap='right_middle_finger1'), - 101: - dict( - name='left_middle_finger2', - id=101, - color=[102, 178, 255], - type='', - swap='right_middle_finger2'), - 102: - dict( - name='left_middle_finger3', - id=102, - color=[102, 178, 255], - type='', - swap='right_middle_finger3'), - 103: - dict( - name='left_middle_finger4', - id=103, - color=[102, 178, 255], - type='', - swap='right_middle_finger4'), - 104: - dict( - name='left_ring_finger1', - id=104, - color=[255, 51, 51], - type='', - swap='right_ring_finger1'), - 105: - dict( - name='left_ring_finger2', - id=105, - color=[255, 51, 51], - type='', - swap='right_ring_finger2'), - 106: - dict( - name='left_ring_finger3', - id=106, - color=[255, 51, 51], - type='', - swap='right_ring_finger3'), - 107: - dict( - name='left_ring_finger4', - id=107, - color=[255, 51, 51], - type='', - swap='right_ring_finger4'), - 108: - dict( - name='left_pinky_finger1', - id=108, - color=[0, 255, 0], - type='', - swap='right_pinky_finger1'), - 109: - dict( - name='left_pinky_finger2', - id=109, - color=[0, 255, 0], - type='', - swap='right_pinky_finger2'), - 110: - dict( - name='left_pinky_finger3', - id=110, - color=[0, 255, 0], - type='', - swap='right_pinky_finger3'), - 111: - dict( - name='left_pinky_finger4', - id=111, - color=[0, 255, 0], - type='', - swap='right_pinky_finger4'), - 112: - dict( - name='right_hand_root', - id=112, - color=[255, 255, 255], - type='', - swap='left_hand_root'), - 113: - dict( - name='right_thumb1', - id=113, - color=[255, 128, 0], - type='', - swap='left_thumb1'), - 114: - dict( - name='right_thumb2', - id=114, - color=[255, 128, 0], - type='', - swap='left_thumb2'), - 115: - dict( - name='right_thumb3', - id=115, - color=[255, 128, 0], - type='', - swap='left_thumb3'), - 116: - dict( - name='right_thumb4', - id=116, - color=[255, 128, 0], - type='', - swap='left_thumb4'), - 117: - dict( - name='right_forefinger1', - id=117, - color=[255, 153, 255], - type='', - swap='left_forefinger1'), - 118: - dict( - name='right_forefinger2', - id=118, - color=[255, 153, 255], - type='', - swap='left_forefinger2'), - 119: - dict( - name='right_forefinger3', - id=119, - color=[255, 153, 255], - type='', - swap='left_forefinger3'), - 120: - dict( - name='right_forefinger4', - id=120, - color=[255, 153, 255], - type='', - swap='left_forefinger4'), - 121: - dict( - name='right_middle_finger1', - id=121, - color=[102, 178, 255], - type='', - swap='left_middle_finger1'), - 122: - dict( - name='right_middle_finger2', - id=122, - color=[102, 178, 255], - type='', - swap='left_middle_finger2'), - 123: - dict( - name='right_middle_finger3', - id=123, - color=[102, 178, 255], - type='', - swap='left_middle_finger3'), - 124: - dict( - name='right_middle_finger4', - id=124, - color=[102, 178, 255], - type='', - swap='left_middle_finger4'), - 125: - dict( - name='right_ring_finger1', - id=125, - color=[255, 51, 51], - type='', - swap='left_ring_finger1'), - 126: - dict( - name='right_ring_finger2', - id=126, - color=[255, 51, 51], - type='', - swap='left_ring_finger2'), - 127: - dict( - name='right_ring_finger3', - id=127, - color=[255, 51, 51], - type='', - swap='left_ring_finger3'), - 128: - dict( - name='right_ring_finger4', - id=128, - color=[255, 51, 51], - type='', - swap='left_ring_finger4'), - 129: - dict( - name='right_pinky_finger1', - id=129, - color=[0, 255, 0], - type='', - swap='left_pinky_finger1'), - 130: - dict( - name='right_pinky_finger2', - id=130, - color=[0, 255, 0], - type='', - swap='left_pinky_finger2'), - 131: - dict( - name='right_pinky_finger3', - id=131, - color=[0, 255, 0], - type='', - swap='left_pinky_finger3'), - 132: - dict( - name='right_pinky_finger4', - id=132, - color=[0, 255, 0], - type='', - swap='left_pinky_finger4') - }, - skeleton_info={ - 0: - dict(link=('left_ankle', 'left_knee'), id=0, color=[0, 255, 0]), - 1: - dict(link=('left_knee', 'left_hip'), id=1, color=[0, 255, 0]), - 2: - dict(link=('right_ankle', 'right_knee'), id=2, color=[255, 128, 0]), - 3: - dict(link=('right_knee', 'right_hip'), id=3, color=[255, 128, 0]), - 4: - dict(link=('left_hip', 'right_hip'), id=4, color=[51, 153, 255]), - 5: - dict(link=('left_shoulder', 'left_hip'), id=5, color=[51, 153, 255]), - 6: - dict(link=('right_shoulder', 'right_hip'), id=6, color=[51, 153, 255]), - 7: - dict( - link=('left_shoulder', 'right_shoulder'), - id=7, - color=[51, 153, 255]), - 8: - dict(link=('left_shoulder', 'left_elbow'), id=8, color=[0, 255, 0]), - 9: - dict( - link=('right_shoulder', 'right_elbow'), id=9, color=[255, 128, 0]), - 10: - dict(link=('left_elbow', 'left_wrist'), id=10, color=[0, 255, 0]), - 11: - dict(link=('right_elbow', 'right_wrist'), id=11, color=[255, 128, 0]), - 12: - dict(link=('left_eye', 'right_eye'), id=12, color=[51, 153, 255]), - 13: - dict(link=('nose', 'left_eye'), id=13, color=[51, 153, 255]), - 14: - dict(link=('nose', 'right_eye'), id=14, color=[51, 153, 255]), - 15: - dict(link=('left_eye', 'left_ear'), id=15, color=[51, 153, 255]), - 16: - dict(link=('right_eye', 'right_ear'), id=16, color=[51, 153, 255]), - 17: - dict(link=('left_ear', 'left_shoulder'), id=17, color=[51, 153, 255]), - 18: - dict( - link=('right_ear', 'right_shoulder'), id=18, color=[51, 153, 255]), - 19: - dict(link=('left_ankle', 'left_big_toe'), id=19, color=[0, 255, 0]), - 20: - dict(link=('left_ankle', 'left_small_toe'), id=20, color=[0, 255, 0]), - 21: - dict(link=('left_ankle', 'left_heel'), id=21, color=[0, 255, 0]), - 22: - dict( - link=('right_ankle', 'right_big_toe'), id=22, color=[255, 128, 0]), - 23: - dict( - link=('right_ankle', 'right_small_toe'), - id=23, - color=[255, 128, 0]), - 24: - dict(link=('right_ankle', 'right_heel'), id=24, color=[255, 128, 0]), - 25: - dict( - link=('left_hand_root', 'left_thumb1'), id=25, color=[255, 128, - 0]), - 26: - dict(link=('left_thumb1', 'left_thumb2'), id=26, color=[255, 128, 0]), - 27: - dict(link=('left_thumb2', 'left_thumb3'), id=27, color=[255, 128, 0]), - 28: - dict(link=('left_thumb3', 'left_thumb4'), id=28, color=[255, 128, 0]), - 29: - dict( - link=('left_hand_root', 'left_forefinger1'), - id=29, - color=[255, 153, 255]), - 30: - dict( - link=('left_forefinger1', 'left_forefinger2'), - id=30, - color=[255, 153, 255]), - 31: - dict( - link=('left_forefinger2', 'left_forefinger3'), - id=31, - color=[255, 153, 255]), - 32: - dict( - link=('left_forefinger3', 'left_forefinger4'), - id=32, - color=[255, 153, 255]), - 33: - dict( - link=('left_hand_root', 'left_middle_finger1'), - id=33, - color=[102, 178, 255]), - 34: - dict( - link=('left_middle_finger1', 'left_middle_finger2'), - id=34, - color=[102, 178, 255]), - 35: - dict( - link=('left_middle_finger2', 'left_middle_finger3'), - id=35, - color=[102, 178, 255]), - 36: - dict( - link=('left_middle_finger3', 'left_middle_finger4'), - id=36, - color=[102, 178, 255]), - 37: - dict( - link=('left_hand_root', 'left_ring_finger1'), - id=37, - color=[255, 51, 51]), - 38: - dict( - link=('left_ring_finger1', 'left_ring_finger2'), - id=38, - color=[255, 51, 51]), - 39: - dict( - link=('left_ring_finger2', 'left_ring_finger3'), - id=39, - color=[255, 51, 51]), - 40: - dict( - link=('left_ring_finger3', 'left_ring_finger4'), - id=40, - color=[255, 51, 51]), - 41: - dict( - link=('left_hand_root', 'left_pinky_finger1'), - id=41, - color=[0, 255, 0]), - 42: - dict( - link=('left_pinky_finger1', 'left_pinky_finger2'), - id=42, - color=[0, 255, 0]), - 43: - dict( - link=('left_pinky_finger2', 'left_pinky_finger3'), - id=43, - color=[0, 255, 0]), - 44: - dict( - link=('left_pinky_finger3', 'left_pinky_finger4'), - id=44, - color=[0, 255, 0]), - 45: - dict( - link=('right_hand_root', 'right_thumb1'), - id=45, - color=[255, 128, 0]), - 46: - dict( - link=('right_thumb1', 'right_thumb2'), id=46, color=[255, 128, 0]), - 47: - dict( - link=('right_thumb2', 'right_thumb3'), id=47, color=[255, 128, 0]), - 48: - dict( - link=('right_thumb3', 'right_thumb4'), id=48, color=[255, 128, 0]), - 49: - dict( - link=('right_hand_root', 'right_forefinger1'), - id=49, - color=[255, 153, 255]), - 50: - dict( - link=('right_forefinger1', 'right_forefinger2'), - id=50, - color=[255, 153, 255]), - 51: - dict( - link=('right_forefinger2', 'right_forefinger3'), - id=51, - color=[255, 153, 255]), - 52: - dict( - link=('right_forefinger3', 'right_forefinger4'), - id=52, - color=[255, 153, 255]), - 53: - dict( - link=('right_hand_root', 'right_middle_finger1'), - id=53, - color=[102, 178, 255]), - 54: - dict( - link=('right_middle_finger1', 'right_middle_finger2'), - id=54, - color=[102, 178, 255]), - 55: - dict( - link=('right_middle_finger2', 'right_middle_finger3'), - id=55, - color=[102, 178, 255]), - 56: - dict( - link=('right_middle_finger3', 'right_middle_finger4'), - id=56, - color=[102, 178, 255]), - 57: - dict( - link=('right_hand_root', 'right_ring_finger1'), - id=57, - color=[255, 51, 51]), - 58: - dict( - link=('right_ring_finger1', 'right_ring_finger2'), - id=58, - color=[255, 51, 51]), - 59: - dict( - link=('right_ring_finger2', 'right_ring_finger3'), - id=59, - color=[255, 51, 51]), - 60: - dict( - link=('right_ring_finger3', 'right_ring_finger4'), - id=60, - color=[255, 51, 51]), - 61: - dict( - link=('right_hand_root', 'right_pinky_finger1'), - id=61, - color=[0, 255, 0]), - 62: - dict( - link=('right_pinky_finger1', 'right_pinky_finger2'), - id=62, - color=[0, 255, 0]), - 63: - dict( - link=('right_pinky_finger2', 'right_pinky_finger3'), - id=63, - color=[0, 255, 0]), - 64: - dict( - link=('right_pinky_finger3', 'right_pinky_finger4'), - id=64, - color=[0, 255, 0]) - }, - joint_weights=[1.] * 133, - # 'https://github.com/jin-s13/COCO-WholeBody/blob/master/' - # 'evaluation/myeval_wholebody.py#L175' - sigmas=[ - 0.026, 0.025, 0.025, 0.035, 0.035, 0.079, 0.079, 0.072, 0.072, 0.062, - 0.062, 0.107, 0.107, 0.087, 0.087, 0.089, 0.089, 0.068, 0.066, 0.066, - 0.092, 0.094, 0.094, 0.042, 0.043, 0.044, 0.043, 0.040, 0.035, 0.031, - 0.025, 0.020, 0.023, 0.029, 0.032, 0.037, 0.038, 0.043, 0.041, 0.045, - 0.013, 0.012, 0.011, 0.011, 0.012, 0.012, 0.011, 0.011, 0.013, 0.015, - 0.009, 0.007, 0.007, 0.007, 0.012, 0.009, 0.008, 0.016, 0.010, 0.017, - 0.011, 0.009, 0.011, 0.009, 0.007, 0.013, 0.008, 0.011, 0.012, 0.010, - 0.034, 0.008, 0.008, 0.009, 0.008, 0.008, 0.007, 0.010, 0.008, 0.009, - 0.009, 0.009, 0.007, 0.007, 0.008, 0.011, 0.008, 0.008, 0.008, 0.01, - 0.008, 0.029, 0.022, 0.035, 0.037, 0.047, 0.026, 0.025, 0.024, 0.035, - 0.018, 0.024, 0.022, 0.026, 0.017, 0.021, 0.021, 0.032, 0.02, 0.019, - 0.022, 0.031, 0.029, 0.022, 0.035, 0.037, 0.047, 0.026, 0.025, 0.024, - 0.035, 0.018, 0.024, 0.022, 0.026, 0.017, 0.021, 0.021, 0.032, 0.02, - 0.019, 0.022, 0.031 - ]) +dataset_info = dict( + dataset_name='coco_wholebody', + paper_info=dict( + author='Jin, Sheng and Xu, Lumin and Xu, Jin and ' + 'Wang, Can and Liu, Wentao and ' + 'Qian, Chen and Ouyang, Wanli and Luo, Ping', + title='Whole-Body Human Pose Estimation in the Wild', + container='Proceedings of the European ' + 'Conference on Computer Vision (ECCV)', + year='2020', + homepage='https://github.com/jin-s13/COCO-WholeBody/', + ), + keypoint_info={ + 0: + dict(name='nose', id=0, color=[51, 153, 255], type='upper', swap=''), + 1: + dict( + name='left_eye', + id=1, + color=[51, 153, 255], + type='upper', + swap='right_eye'), + 2: + dict( + name='right_eye', + id=2, + color=[51, 153, 255], + type='upper', + swap='left_eye'), + 3: + dict( + name='left_ear', + id=3, + color=[51, 153, 255], + type='upper', + swap='right_ear'), + 4: + dict( + name='right_ear', + id=4, + color=[51, 153, 255], + type='upper', + swap='left_ear'), + 5: + dict( + name='left_shoulder', + id=5, + color=[0, 255, 0], + type='upper', + swap='right_shoulder'), + 6: + dict( + name='right_shoulder', + id=6, + color=[255, 128, 0], + type='upper', + swap='left_shoulder'), + 7: + dict( + name='left_elbow', + id=7, + color=[0, 255, 0], + type='upper', + swap='right_elbow'), + 8: + dict( + name='right_elbow', + id=8, + color=[255, 128, 0], + type='upper', + swap='left_elbow'), + 9: + dict( + name='left_wrist', + id=9, + color=[0, 255, 0], + type='upper', + swap='right_wrist'), + 10: + dict( + name='right_wrist', + id=10, + color=[255, 128, 0], + type='upper', + swap='left_wrist'), + 11: + dict( + name='left_hip', + id=11, + color=[0, 255, 0], + type='lower', + swap='right_hip'), + 12: + dict( + name='right_hip', + id=12, + color=[255, 128, 0], + type='lower', + swap='left_hip'), + 13: + dict( + name='left_knee', + id=13, + color=[0, 255, 0], + type='lower', + swap='right_knee'), + 14: + dict( + name='right_knee', + id=14, + color=[255, 128, 0], + type='lower', + swap='left_knee'), + 15: + dict( + name='left_ankle', + id=15, + color=[0, 255, 0], + type='lower', + swap='right_ankle'), + 16: + dict( + name='right_ankle', + id=16, + color=[255, 128, 0], + type='lower', + swap='left_ankle'), + 17: + dict( + name='left_big_toe', + id=17, + color=[255, 128, 0], + type='lower', + swap='right_big_toe'), + 18: + dict( + name='left_small_toe', + id=18, + color=[255, 128, 0], + type='lower', + swap='right_small_toe'), + 19: + dict( + name='left_heel', + id=19, + color=[255, 128, 0], + type='lower', + swap='right_heel'), + 20: + dict( + name='right_big_toe', + id=20, + color=[255, 128, 0], + type='lower', + swap='left_big_toe'), + 21: + dict( + name='right_small_toe', + id=21, + color=[255, 128, 0], + type='lower', + swap='left_small_toe'), + 22: + dict( + name='right_heel', + id=22, + color=[255, 128, 0], + type='lower', + swap='left_heel'), + 23: + dict( + name='face-0', + id=23, + color=[255, 255, 255], + type='', + swap='face-16'), + 24: + dict( + name='face-1', + id=24, + color=[255, 255, 255], + type='', + swap='face-15'), + 25: + dict( + name='face-2', + id=25, + color=[255, 255, 255], + type='', + swap='face-14'), + 26: + dict( + name='face-3', + id=26, + color=[255, 255, 255], + type='', + swap='face-13'), + 27: + dict( + name='face-4', + id=27, + color=[255, 255, 255], + type='', + swap='face-12'), + 28: + dict( + name='face-5', + id=28, + color=[255, 255, 255], + type='', + swap='face-11'), + 29: + dict( + name='face-6', + id=29, + color=[255, 255, 255], + type='', + swap='face-10'), + 30: + dict( + name='face-7', + id=30, + color=[255, 255, 255], + type='', + swap='face-9'), + 31: + dict(name='face-8', id=31, color=[255, 255, 255], type='', swap=''), + 32: + dict( + name='face-9', + id=32, + color=[255, 255, 255], + type='', + swap='face-7'), + 33: + dict( + name='face-10', + id=33, + color=[255, 255, 255], + type='', + swap='face-6'), + 34: + dict( + name='face-11', + id=34, + color=[255, 255, 255], + type='', + swap='face-5'), + 35: + dict( + name='face-12', + id=35, + color=[255, 255, 255], + type='', + swap='face-4'), + 36: + dict( + name='face-13', + id=36, + color=[255, 255, 255], + type='', + swap='face-3'), + 37: + dict( + name='face-14', + id=37, + color=[255, 255, 255], + type='', + swap='face-2'), + 38: + dict( + name='face-15', + id=38, + color=[255, 255, 255], + type='', + swap='face-1'), + 39: + dict( + name='face-16', + id=39, + color=[255, 255, 255], + type='', + swap='face-0'), + 40: + dict( + name='face-17', + id=40, + color=[255, 255, 255], + type='', + swap='face-26'), + 41: + dict( + name='face-18', + id=41, + color=[255, 255, 255], + type='', + swap='face-25'), + 42: + dict( + name='face-19', + id=42, + color=[255, 255, 255], + type='', + swap='face-24'), + 43: + dict( + name='face-20', + id=43, + color=[255, 255, 255], + type='', + swap='face-23'), + 44: + dict( + name='face-21', + id=44, + color=[255, 255, 255], + type='', + swap='face-22'), + 45: + dict( + name='face-22', + id=45, + color=[255, 255, 255], + type='', + swap='face-21'), + 46: + dict( + name='face-23', + id=46, + color=[255, 255, 255], + type='', + swap='face-20'), + 47: + dict( + name='face-24', + id=47, + color=[255, 255, 255], + type='', + swap='face-19'), + 48: + dict( + name='face-25', + id=48, + color=[255, 255, 255], + type='', + swap='face-18'), + 49: + dict( + name='face-26', + id=49, + color=[255, 255, 255], + type='', + swap='face-17'), + 50: + dict(name='face-27', id=50, color=[255, 255, 255], type='', swap=''), + 51: + dict(name='face-28', id=51, color=[255, 255, 255], type='', swap=''), + 52: + dict(name='face-29', id=52, color=[255, 255, 255], type='', swap=''), + 53: + dict(name='face-30', id=53, color=[255, 255, 255], type='', swap=''), + 54: + dict( + name='face-31', + id=54, + color=[255, 255, 255], + type='', + swap='face-35'), + 55: + dict( + name='face-32', + id=55, + color=[255, 255, 255], + type='', + swap='face-34'), + 56: + dict(name='face-33', id=56, color=[255, 255, 255], type='', swap=''), + 57: + dict( + name='face-34', + id=57, + color=[255, 255, 255], + type='', + swap='face-32'), + 58: + dict( + name='face-35', + id=58, + color=[255, 255, 255], + type='', + swap='face-31'), + 59: + dict( + name='face-36', + id=59, + color=[255, 255, 255], + type='', + swap='face-45'), + 60: + dict( + name='face-37', + id=60, + color=[255, 255, 255], + type='', + swap='face-44'), + 61: + dict( + name='face-38', + id=61, + color=[255, 255, 255], + type='', + swap='face-43'), + 62: + dict( + name='face-39', + id=62, + color=[255, 255, 255], + type='', + swap='face-42'), + 63: + dict( + name='face-40', + id=63, + color=[255, 255, 255], + type='', + swap='face-47'), + 64: + dict( + name='face-41', + id=64, + color=[255, 255, 255], + type='', + swap='face-46'), + 65: + dict( + name='face-42', + id=65, + color=[255, 255, 255], + type='', + swap='face-39'), + 66: + dict( + name='face-43', + id=66, + color=[255, 255, 255], + type='', + swap='face-38'), + 67: + dict( + name='face-44', + id=67, + color=[255, 255, 255], + type='', + swap='face-37'), + 68: + dict( + name='face-45', + id=68, + color=[255, 255, 255], + type='', + swap='face-36'), + 69: + dict( + name='face-46', + id=69, + color=[255, 255, 255], + type='', + swap='face-41'), + 70: + dict( + name='face-47', + id=70, + color=[255, 255, 255], + type='', + swap='face-40'), + 71: + dict( + name='face-48', + id=71, + color=[255, 255, 255], + type='', + swap='face-54'), + 72: + dict( + name='face-49', + id=72, + color=[255, 255, 255], + type='', + swap='face-53'), + 73: + dict( + name='face-50', + id=73, + color=[255, 255, 255], + type='', + swap='face-52'), + 74: + dict(name='face-51', id=74, color=[255, 255, 255], type='', swap=''), + 75: + dict( + name='face-52', + id=75, + color=[255, 255, 255], + type='', + swap='face-50'), + 76: + dict( + name='face-53', + id=76, + color=[255, 255, 255], + type='', + swap='face-49'), + 77: + dict( + name='face-54', + id=77, + color=[255, 255, 255], + type='', + swap='face-48'), + 78: + dict( + name='face-55', + id=78, + color=[255, 255, 255], + type='', + swap='face-59'), + 79: + dict( + name='face-56', + id=79, + color=[255, 255, 255], + type='', + swap='face-58'), + 80: + dict(name='face-57', id=80, color=[255, 255, 255], type='', swap=''), + 81: + dict( + name='face-58', + id=81, + color=[255, 255, 255], + type='', + swap='face-56'), + 82: + dict( + name='face-59', + id=82, + color=[255, 255, 255], + type='', + swap='face-55'), + 83: + dict( + name='face-60', + id=83, + color=[255, 255, 255], + type='', + swap='face-64'), + 84: + dict( + name='face-61', + id=84, + color=[255, 255, 255], + type='', + swap='face-63'), + 85: + dict(name='face-62', id=85, color=[255, 255, 255], type='', swap=''), + 86: + dict( + name='face-63', + id=86, + color=[255, 255, 255], + type='', + swap='face-61'), + 87: + dict( + name='face-64', + id=87, + color=[255, 255, 255], + type='', + swap='face-60'), + 88: + dict( + name='face-65', + id=88, + color=[255, 255, 255], + type='', + swap='face-67'), + 89: + dict(name='face-66', id=89, color=[255, 255, 255], type='', swap=''), + 90: + dict( + name='face-67', + id=90, + color=[255, 255, 255], + type='', + swap='face-65'), + 91: + dict( + name='left_hand_root', + id=91, + color=[255, 255, 255], + type='', + swap='right_hand_root'), + 92: + dict( + name='left_thumb1', + id=92, + color=[255, 128, 0], + type='', + swap='right_thumb1'), + 93: + dict( + name='left_thumb2', + id=93, + color=[255, 128, 0], + type='', + swap='right_thumb2'), + 94: + dict( + name='left_thumb3', + id=94, + color=[255, 128, 0], + type='', + swap='right_thumb3'), + 95: + dict( + name='left_thumb4', + id=95, + color=[255, 128, 0], + type='', + swap='right_thumb4'), + 96: + dict( + name='left_forefinger1', + id=96, + color=[255, 153, 255], + type='', + swap='right_forefinger1'), + 97: + dict( + name='left_forefinger2', + id=97, + color=[255, 153, 255], + type='', + swap='right_forefinger2'), + 98: + dict( + name='left_forefinger3', + id=98, + color=[255, 153, 255], + type='', + swap='right_forefinger3'), + 99: + dict( + name='left_forefinger4', + id=99, + color=[255, 153, 255], + type='', + swap='right_forefinger4'), + 100: + dict( + name='left_middle_finger1', + id=100, + color=[102, 178, 255], + type='', + swap='right_middle_finger1'), + 101: + dict( + name='left_middle_finger2', + id=101, + color=[102, 178, 255], + type='', + swap='right_middle_finger2'), + 102: + dict( + name='left_middle_finger3', + id=102, + color=[102, 178, 255], + type='', + swap='right_middle_finger3'), + 103: + dict( + name='left_middle_finger4', + id=103, + color=[102, 178, 255], + type='', + swap='right_middle_finger4'), + 104: + dict( + name='left_ring_finger1', + id=104, + color=[255, 51, 51], + type='', + swap='right_ring_finger1'), + 105: + dict( + name='left_ring_finger2', + id=105, + color=[255, 51, 51], + type='', + swap='right_ring_finger2'), + 106: + dict( + name='left_ring_finger3', + id=106, + color=[255, 51, 51], + type='', + swap='right_ring_finger3'), + 107: + dict( + name='left_ring_finger4', + id=107, + color=[255, 51, 51], + type='', + swap='right_ring_finger4'), + 108: + dict( + name='left_pinky_finger1', + id=108, + color=[0, 255, 0], + type='', + swap='right_pinky_finger1'), + 109: + dict( + name='left_pinky_finger2', + id=109, + color=[0, 255, 0], + type='', + swap='right_pinky_finger2'), + 110: + dict( + name='left_pinky_finger3', + id=110, + color=[0, 255, 0], + type='', + swap='right_pinky_finger3'), + 111: + dict( + name='left_pinky_finger4', + id=111, + color=[0, 255, 0], + type='', + swap='right_pinky_finger4'), + 112: + dict( + name='right_hand_root', + id=112, + color=[255, 255, 255], + type='', + swap='left_hand_root'), + 113: + dict( + name='right_thumb1', + id=113, + color=[255, 128, 0], + type='', + swap='left_thumb1'), + 114: + dict( + name='right_thumb2', + id=114, + color=[255, 128, 0], + type='', + swap='left_thumb2'), + 115: + dict( + name='right_thumb3', + id=115, + color=[255, 128, 0], + type='', + swap='left_thumb3'), + 116: + dict( + name='right_thumb4', + id=116, + color=[255, 128, 0], + type='', + swap='left_thumb4'), + 117: + dict( + name='right_forefinger1', + id=117, + color=[255, 153, 255], + type='', + swap='left_forefinger1'), + 118: + dict( + name='right_forefinger2', + id=118, + color=[255, 153, 255], + type='', + swap='left_forefinger2'), + 119: + dict( + name='right_forefinger3', + id=119, + color=[255, 153, 255], + type='', + swap='left_forefinger3'), + 120: + dict( + name='right_forefinger4', + id=120, + color=[255, 153, 255], + type='', + swap='left_forefinger4'), + 121: + dict( + name='right_middle_finger1', + id=121, + color=[102, 178, 255], + type='', + swap='left_middle_finger1'), + 122: + dict( + name='right_middle_finger2', + id=122, + color=[102, 178, 255], + type='', + swap='left_middle_finger2'), + 123: + dict( + name='right_middle_finger3', + id=123, + color=[102, 178, 255], + type='', + swap='left_middle_finger3'), + 124: + dict( + name='right_middle_finger4', + id=124, + color=[102, 178, 255], + type='', + swap='left_middle_finger4'), + 125: + dict( + name='right_ring_finger1', + id=125, + color=[255, 51, 51], + type='', + swap='left_ring_finger1'), + 126: + dict( + name='right_ring_finger2', + id=126, + color=[255, 51, 51], + type='', + swap='left_ring_finger2'), + 127: + dict( + name='right_ring_finger3', + id=127, + color=[255, 51, 51], + type='', + swap='left_ring_finger3'), + 128: + dict( + name='right_ring_finger4', + id=128, + color=[255, 51, 51], + type='', + swap='left_ring_finger4'), + 129: + dict( + name='right_pinky_finger1', + id=129, + color=[0, 255, 0], + type='', + swap='left_pinky_finger1'), + 130: + dict( + name='right_pinky_finger2', + id=130, + color=[0, 255, 0], + type='', + swap='left_pinky_finger2'), + 131: + dict( + name='right_pinky_finger3', + id=131, + color=[0, 255, 0], + type='', + swap='left_pinky_finger3'), + 132: + dict( + name='right_pinky_finger4', + id=132, + color=[0, 255, 0], + type='', + swap='left_pinky_finger4') + }, + skeleton_info={ + 0: + dict(link=('left_ankle', 'left_knee'), id=0, color=[0, 255, 0]), + 1: + dict(link=('left_knee', 'left_hip'), id=1, color=[0, 255, 0]), + 2: + dict(link=('right_ankle', 'right_knee'), id=2, color=[255, 128, 0]), + 3: + dict(link=('right_knee', 'right_hip'), id=3, color=[255, 128, 0]), + 4: + dict(link=('left_hip', 'right_hip'), id=4, color=[51, 153, 255]), + 5: + dict(link=('left_shoulder', 'left_hip'), id=5, color=[51, 153, 255]), + 6: + dict(link=('right_shoulder', 'right_hip'), id=6, color=[51, 153, 255]), + 7: + dict( + link=('left_shoulder', 'right_shoulder'), + id=7, + color=[51, 153, 255]), + 8: + dict(link=('left_shoulder', 'left_elbow'), id=8, color=[0, 255, 0]), + 9: + dict( + link=('right_shoulder', 'right_elbow'), id=9, color=[255, 128, 0]), + 10: + dict(link=('left_elbow', 'left_wrist'), id=10, color=[0, 255, 0]), + 11: + dict(link=('right_elbow', 'right_wrist'), id=11, color=[255, 128, 0]), + 12: + dict(link=('left_eye', 'right_eye'), id=12, color=[51, 153, 255]), + 13: + dict(link=('nose', 'left_eye'), id=13, color=[51, 153, 255]), + 14: + dict(link=('nose', 'right_eye'), id=14, color=[51, 153, 255]), + 15: + dict(link=('left_eye', 'left_ear'), id=15, color=[51, 153, 255]), + 16: + dict(link=('right_eye', 'right_ear'), id=16, color=[51, 153, 255]), + 17: + dict(link=('left_ear', 'left_shoulder'), id=17, color=[51, 153, 255]), + 18: + dict( + link=('right_ear', 'right_shoulder'), id=18, color=[51, 153, 255]), + 19: + dict(link=('left_ankle', 'left_big_toe'), id=19, color=[0, 255, 0]), + 20: + dict(link=('left_ankle', 'left_small_toe'), id=20, color=[0, 255, 0]), + 21: + dict(link=('left_ankle', 'left_heel'), id=21, color=[0, 255, 0]), + 22: + dict( + link=('right_ankle', 'right_big_toe'), id=22, color=[255, 128, 0]), + 23: + dict( + link=('right_ankle', 'right_small_toe'), + id=23, + color=[255, 128, 0]), + 24: + dict(link=('right_ankle', 'right_heel'), id=24, color=[255, 128, 0]), + 25: + dict( + link=('left_hand_root', 'left_thumb1'), id=25, color=[255, 128, + 0]), + 26: + dict(link=('left_thumb1', 'left_thumb2'), id=26, color=[255, 128, 0]), + 27: + dict(link=('left_thumb2', 'left_thumb3'), id=27, color=[255, 128, 0]), + 28: + dict(link=('left_thumb3', 'left_thumb4'), id=28, color=[255, 128, 0]), + 29: + dict( + link=('left_hand_root', 'left_forefinger1'), + id=29, + color=[255, 153, 255]), + 30: + dict( + link=('left_forefinger1', 'left_forefinger2'), + id=30, + color=[255, 153, 255]), + 31: + dict( + link=('left_forefinger2', 'left_forefinger3'), + id=31, + color=[255, 153, 255]), + 32: + dict( + link=('left_forefinger3', 'left_forefinger4'), + id=32, + color=[255, 153, 255]), + 33: + dict( + link=('left_hand_root', 'left_middle_finger1'), + id=33, + color=[102, 178, 255]), + 34: + dict( + link=('left_middle_finger1', 'left_middle_finger2'), + id=34, + color=[102, 178, 255]), + 35: + dict( + link=('left_middle_finger2', 'left_middle_finger3'), + id=35, + color=[102, 178, 255]), + 36: + dict( + link=('left_middle_finger3', 'left_middle_finger4'), + id=36, + color=[102, 178, 255]), + 37: + dict( + link=('left_hand_root', 'left_ring_finger1'), + id=37, + color=[255, 51, 51]), + 38: + dict( + link=('left_ring_finger1', 'left_ring_finger2'), + id=38, + color=[255, 51, 51]), + 39: + dict( + link=('left_ring_finger2', 'left_ring_finger3'), + id=39, + color=[255, 51, 51]), + 40: + dict( + link=('left_ring_finger3', 'left_ring_finger4'), + id=40, + color=[255, 51, 51]), + 41: + dict( + link=('left_hand_root', 'left_pinky_finger1'), + id=41, + color=[0, 255, 0]), + 42: + dict( + link=('left_pinky_finger1', 'left_pinky_finger2'), + id=42, + color=[0, 255, 0]), + 43: + dict( + link=('left_pinky_finger2', 'left_pinky_finger3'), + id=43, + color=[0, 255, 0]), + 44: + dict( + link=('left_pinky_finger3', 'left_pinky_finger4'), + id=44, + color=[0, 255, 0]), + 45: + dict( + link=('right_hand_root', 'right_thumb1'), + id=45, + color=[255, 128, 0]), + 46: + dict( + link=('right_thumb1', 'right_thumb2'), id=46, color=[255, 128, 0]), + 47: + dict( + link=('right_thumb2', 'right_thumb3'), id=47, color=[255, 128, 0]), + 48: + dict( + link=('right_thumb3', 'right_thumb4'), id=48, color=[255, 128, 0]), + 49: + dict( + link=('right_hand_root', 'right_forefinger1'), + id=49, + color=[255, 153, 255]), + 50: + dict( + link=('right_forefinger1', 'right_forefinger2'), + id=50, + color=[255, 153, 255]), + 51: + dict( + link=('right_forefinger2', 'right_forefinger3'), + id=51, + color=[255, 153, 255]), + 52: + dict( + link=('right_forefinger3', 'right_forefinger4'), + id=52, + color=[255, 153, 255]), + 53: + dict( + link=('right_hand_root', 'right_middle_finger1'), + id=53, + color=[102, 178, 255]), + 54: + dict( + link=('right_middle_finger1', 'right_middle_finger2'), + id=54, + color=[102, 178, 255]), + 55: + dict( + link=('right_middle_finger2', 'right_middle_finger3'), + id=55, + color=[102, 178, 255]), + 56: + dict( + link=('right_middle_finger3', 'right_middle_finger4'), + id=56, + color=[102, 178, 255]), + 57: + dict( + link=('right_hand_root', 'right_ring_finger1'), + id=57, + color=[255, 51, 51]), + 58: + dict( + link=('right_ring_finger1', 'right_ring_finger2'), + id=58, + color=[255, 51, 51]), + 59: + dict( + link=('right_ring_finger2', 'right_ring_finger3'), + id=59, + color=[255, 51, 51]), + 60: + dict( + link=('right_ring_finger3', 'right_ring_finger4'), + id=60, + color=[255, 51, 51]), + 61: + dict( + link=('right_hand_root', 'right_pinky_finger1'), + id=61, + color=[0, 255, 0]), + 62: + dict( + link=('right_pinky_finger1', 'right_pinky_finger2'), + id=62, + color=[0, 255, 0]), + 63: + dict( + link=('right_pinky_finger2', 'right_pinky_finger3'), + id=63, + color=[0, 255, 0]), + 64: + dict( + link=('right_pinky_finger3', 'right_pinky_finger4'), + id=64, + color=[0, 255, 0]) + }, + joint_weights=[1.] * 133, + # 'https://github.com/jin-s13/COCO-WholeBody/blob/master/' + # 'evaluation/myeval_wholebody.py#L175' + sigmas=[ + 0.026, 0.025, 0.025, 0.035, 0.035, 0.079, 0.079, 0.072, 0.072, 0.062, + 0.062, 0.107, 0.107, 0.087, 0.087, 0.089, 0.089, 0.068, 0.066, 0.066, + 0.092, 0.094, 0.094, 0.042, 0.043, 0.044, 0.043, 0.040, 0.035, 0.031, + 0.025, 0.020, 0.023, 0.029, 0.032, 0.037, 0.038, 0.043, 0.041, 0.045, + 0.013, 0.012, 0.011, 0.011, 0.012, 0.012, 0.011, 0.011, 0.013, 0.015, + 0.009, 0.007, 0.007, 0.007, 0.012, 0.009, 0.008, 0.016, 0.010, 0.017, + 0.011, 0.009, 0.011, 0.009, 0.007, 0.013, 0.008, 0.011, 0.012, 0.010, + 0.034, 0.008, 0.008, 0.009, 0.008, 0.008, 0.007, 0.010, 0.008, 0.009, + 0.009, 0.009, 0.007, 0.007, 0.008, 0.011, 0.008, 0.008, 0.008, 0.01, + 0.008, 0.029, 0.022, 0.035, 0.037, 0.047, 0.026, 0.025, 0.024, 0.035, + 0.018, 0.024, 0.022, 0.026, 0.017, 0.021, 0.021, 0.032, 0.02, 0.019, + 0.022, 0.031, 0.029, 0.022, 0.035, 0.037, 0.047, 0.026, 0.025, 0.024, + 0.035, 0.018, 0.024, 0.022, 0.026, 0.017, 0.021, 0.021, 0.032, 0.02, + 0.019, 0.022, 0.031 + ]) diff --git a/configs/_base_/datasets/coco_wholebody_face.py b/configs/_base_/datasets/coco_wholebody_face.py index a3fe1e5b33..e208671348 100644 --- a/configs/_base_/datasets/coco_wholebody_face.py +++ b/configs/_base_/datasets/coco_wholebody_face.py @@ -1,154 +1,154 @@ -dataset_info = dict( - dataset_name='coco_wholebody_face', - paper_info=dict( - author='Jin, Sheng and Xu, Lumin and Xu, Jin and ' - 'Wang, Can and Liu, Wentao and ' - 'Qian, Chen and Ouyang, Wanli and Luo, Ping', - title='Whole-Body Human Pose Estimation in the Wild', - container='Proceedings of the European ' - 'Conference on Computer Vision (ECCV)', - year='2020', - homepage='https://github.com/jin-s13/COCO-WholeBody/', - ), - keypoint_info={ - 0: - dict(name='face-0', id=0, color=[255, 0, 0], type='', swap='face-16'), - 1: - dict(name='face-1', id=1, color=[255, 0, 0], type='', swap='face-15'), - 2: - dict(name='face-2', id=2, color=[255, 0, 0], type='', swap='face-14'), - 3: - dict(name='face-3', id=3, color=[255, 0, 0], type='', swap='face-13'), - 4: - dict(name='face-4', id=4, color=[255, 0, 0], type='', swap='face-12'), - 5: - dict(name='face-5', id=5, color=[255, 0, 0], type='', swap='face-11'), - 6: - dict(name='face-6', id=6, color=[255, 0, 0], type='', swap='face-10'), - 7: - dict(name='face-7', id=7, color=[255, 0, 0], type='', swap='face-9'), - 8: dict(name='face-8', id=8, color=[255, 0, 0], type='', swap=''), - 9: - dict(name='face-9', id=9, color=[255, 0, 0], type='', swap='face-7'), - 10: - dict(name='face-10', id=10, color=[255, 0, 0], type='', swap='face-6'), - 11: - dict(name='face-11', id=11, color=[255, 0, 0], type='', swap='face-5'), - 12: - dict(name='face-12', id=12, color=[255, 0, 0], type='', swap='face-4'), - 13: - dict(name='face-13', id=13, color=[255, 0, 0], type='', swap='face-3'), - 14: - dict(name='face-14', id=14, color=[255, 0, 0], type='', swap='face-2'), - 15: - dict(name='face-15', id=15, color=[255, 0, 0], type='', swap='face-1'), - 16: - dict(name='face-16', id=16, color=[255, 0, 0], type='', swap='face-0'), - 17: dict( - name='face-17', id=17, color=[255, 0, 0], type='', swap='face-26'), - 18: dict( - name='face-18', id=18, color=[255, 0, 0], type='', swap='face-25'), - 19: dict( - name='face-19', id=19, color=[255, 0, 0], type='', swap='face-24'), - 20: dict( - name='face-20', id=20, color=[255, 0, 0], type='', swap='face-23'), - 21: dict( - name='face-21', id=21, color=[255, 0, 0], type='', swap='face-22'), - 22: dict( - name='face-22', id=22, color=[255, 0, 0], type='', swap='face-21'), - 23: dict( - name='face-23', id=23, color=[255, 0, 0], type='', swap='face-20'), - 24: dict( - name='face-24', id=24, color=[255, 0, 0], type='', swap='face-19'), - 25: dict( - name='face-25', id=25, color=[255, 0, 0], type='', swap='face-18'), - 26: dict( - name='face-26', id=26, color=[255, 0, 0], type='', swap='face-17'), - 27: dict(name='face-27', id=27, color=[255, 0, 0], type='', swap=''), - 28: dict(name='face-28', id=28, color=[255, 0, 0], type='', swap=''), - 29: dict(name='face-29', id=29, color=[255, 0, 0], type='', swap=''), - 30: dict(name='face-30', id=30, color=[255, 0, 0], type='', swap=''), - 31: dict( - name='face-31', id=31, color=[255, 0, 0], type='', swap='face-35'), - 32: dict( - name='face-32', id=32, color=[255, 0, 0], type='', swap='face-34'), - 33: dict(name='face-33', id=33, color=[255, 0, 0], type='', swap=''), - 34: dict( - name='face-34', id=34, color=[255, 0, 0], type='', swap='face-32'), - 35: dict( - name='face-35', id=35, color=[255, 0, 0], type='', swap='face-31'), - 36: dict( - name='face-36', id=36, color=[255, 0, 0], type='', swap='face-45'), - 37: dict( - name='face-37', id=37, color=[255, 0, 0], type='', swap='face-44'), - 38: dict( - name='face-38', id=38, color=[255, 0, 0], type='', swap='face-43'), - 39: dict( - name='face-39', id=39, color=[255, 0, 0], type='', swap='face-42'), - 40: dict( - name='face-40', id=40, color=[255, 0, 0], type='', swap='face-47'), - 41: dict( - name='face-41', id=41, color=[255, 0, 0], type='', swap='face-46'), - 42: dict( - name='face-42', id=42, color=[255, 0, 0], type='', swap='face-39'), - 43: dict( - name='face-43', id=43, color=[255, 0, 0], type='', swap='face-38'), - 44: dict( - name='face-44', id=44, color=[255, 0, 0], type='', swap='face-37'), - 45: dict( - name='face-45', id=45, color=[255, 0, 0], type='', swap='face-36'), - 46: dict( - name='face-46', id=46, color=[255, 0, 0], type='', swap='face-41'), - 47: dict( - name='face-47', id=47, color=[255, 0, 0], type='', swap='face-40'), - 48: dict( - name='face-48', id=48, color=[255, 0, 0], type='', swap='face-54'), - 49: dict( - name='face-49', id=49, color=[255, 0, 0], type='', swap='face-53'), - 50: dict( - name='face-50', id=50, color=[255, 0, 0], type='', swap='face-52'), - 51: dict(name='face-51', id=52, color=[255, 0, 0], type='', swap=''), - 52: dict( - name='face-52', id=52, color=[255, 0, 0], type='', swap='face-50'), - 53: dict( - name='face-53', id=53, color=[255, 0, 0], type='', swap='face-49'), - 54: dict( - name='face-54', id=54, color=[255, 0, 0], type='', swap='face-48'), - 55: dict( - name='face-55', id=55, color=[255, 0, 0], type='', swap='face-59'), - 56: dict( - name='face-56', id=56, color=[255, 0, 0], type='', swap='face-58'), - 57: dict(name='face-57', id=57, color=[255, 0, 0], type='', swap=''), - 58: dict( - name='face-58', id=58, color=[255, 0, 0], type='', swap='face-56'), - 59: dict( - name='face-59', id=59, color=[255, 0, 0], type='', swap='face-55'), - 60: dict( - name='face-60', id=60, color=[255, 0, 0], type='', swap='face-64'), - 61: dict( - name='face-61', id=61, color=[255, 0, 0], type='', swap='face-63'), - 62: dict(name='face-62', id=62, color=[255, 0, 0], type='', swap=''), - 63: dict( - name='face-63', id=63, color=[255, 0, 0], type='', swap='face-61'), - 64: dict( - name='face-64', id=64, color=[255, 0, 0], type='', swap='face-60'), - 65: dict( - name='face-65', id=65, color=[255, 0, 0], type='', swap='face-67'), - 66: dict(name='face-66', id=66, color=[255, 0, 0], type='', swap=''), - 67: dict( - name='face-67', id=67, color=[255, 0, 0], type='', swap='face-65') - }, - skeleton_info={}, - joint_weights=[1.] * 68, - - # 'https://github.com/jin-s13/COCO-WholeBody/blob/master/' - # 'evaluation/myeval_wholebody.py#L177' - sigmas=[ - 0.042, 0.043, 0.044, 0.043, 0.040, 0.035, 0.031, 0.025, 0.020, 0.023, - 0.029, 0.032, 0.037, 0.038, 0.043, 0.041, 0.045, 0.013, 0.012, 0.011, - 0.011, 0.012, 0.012, 0.011, 0.011, 0.013, 0.015, 0.009, 0.007, 0.007, - 0.007, 0.012, 0.009, 0.008, 0.016, 0.010, 0.017, 0.011, 0.009, 0.011, - 0.009, 0.007, 0.013, 0.008, 0.011, 0.012, 0.010, 0.034, 0.008, 0.008, - 0.009, 0.008, 0.008, 0.007, 0.010, 0.008, 0.009, 0.009, 0.009, 0.007, - 0.007, 0.008, 0.011, 0.008, 0.008, 0.008, 0.01, 0.008 - ]) +dataset_info = dict( + dataset_name='coco_wholebody_face', + paper_info=dict( + author='Jin, Sheng and Xu, Lumin and Xu, Jin and ' + 'Wang, Can and Liu, Wentao and ' + 'Qian, Chen and Ouyang, Wanli and Luo, Ping', + title='Whole-Body Human Pose Estimation in the Wild', + container='Proceedings of the European ' + 'Conference on Computer Vision (ECCV)', + year='2020', + homepage='https://github.com/jin-s13/COCO-WholeBody/', + ), + keypoint_info={ + 0: + dict(name='face-0', id=0, color=[255, 0, 0], type='', swap='face-16'), + 1: + dict(name='face-1', id=1, color=[255, 0, 0], type='', swap='face-15'), + 2: + dict(name='face-2', id=2, color=[255, 0, 0], type='', swap='face-14'), + 3: + dict(name='face-3', id=3, color=[255, 0, 0], type='', swap='face-13'), + 4: + dict(name='face-4', id=4, color=[255, 0, 0], type='', swap='face-12'), + 5: + dict(name='face-5', id=5, color=[255, 0, 0], type='', swap='face-11'), + 6: + dict(name='face-6', id=6, color=[255, 0, 0], type='', swap='face-10'), + 7: + dict(name='face-7', id=7, color=[255, 0, 0], type='', swap='face-9'), + 8: dict(name='face-8', id=8, color=[255, 0, 0], type='', swap=''), + 9: + dict(name='face-9', id=9, color=[255, 0, 0], type='', swap='face-7'), + 10: + dict(name='face-10', id=10, color=[255, 0, 0], type='', swap='face-6'), + 11: + dict(name='face-11', id=11, color=[255, 0, 0], type='', swap='face-5'), + 12: + dict(name='face-12', id=12, color=[255, 0, 0], type='', swap='face-4'), + 13: + dict(name='face-13', id=13, color=[255, 0, 0], type='', swap='face-3'), + 14: + dict(name='face-14', id=14, color=[255, 0, 0], type='', swap='face-2'), + 15: + dict(name='face-15', id=15, color=[255, 0, 0], type='', swap='face-1'), + 16: + dict(name='face-16', id=16, color=[255, 0, 0], type='', swap='face-0'), + 17: dict( + name='face-17', id=17, color=[255, 0, 0], type='', swap='face-26'), + 18: dict( + name='face-18', id=18, color=[255, 0, 0], type='', swap='face-25'), + 19: dict( + name='face-19', id=19, color=[255, 0, 0], type='', swap='face-24'), + 20: dict( + name='face-20', id=20, color=[255, 0, 0], type='', swap='face-23'), + 21: dict( + name='face-21', id=21, color=[255, 0, 0], type='', swap='face-22'), + 22: dict( + name='face-22', id=22, color=[255, 0, 0], type='', swap='face-21'), + 23: dict( + name='face-23', id=23, color=[255, 0, 0], type='', swap='face-20'), + 24: dict( + name='face-24', id=24, color=[255, 0, 0], type='', swap='face-19'), + 25: dict( + name='face-25', id=25, color=[255, 0, 0], type='', swap='face-18'), + 26: dict( + name='face-26', id=26, color=[255, 0, 0], type='', swap='face-17'), + 27: dict(name='face-27', id=27, color=[255, 0, 0], type='', swap=''), + 28: dict(name='face-28', id=28, color=[255, 0, 0], type='', swap=''), + 29: dict(name='face-29', id=29, color=[255, 0, 0], type='', swap=''), + 30: dict(name='face-30', id=30, color=[255, 0, 0], type='', swap=''), + 31: dict( + name='face-31', id=31, color=[255, 0, 0], type='', swap='face-35'), + 32: dict( + name='face-32', id=32, color=[255, 0, 0], type='', swap='face-34'), + 33: dict(name='face-33', id=33, color=[255, 0, 0], type='', swap=''), + 34: dict( + name='face-34', id=34, color=[255, 0, 0], type='', swap='face-32'), + 35: dict( + name='face-35', id=35, color=[255, 0, 0], type='', swap='face-31'), + 36: dict( + name='face-36', id=36, color=[255, 0, 0], type='', swap='face-45'), + 37: dict( + name='face-37', id=37, color=[255, 0, 0], type='', swap='face-44'), + 38: dict( + name='face-38', id=38, color=[255, 0, 0], type='', swap='face-43'), + 39: dict( + name='face-39', id=39, color=[255, 0, 0], type='', swap='face-42'), + 40: dict( + name='face-40', id=40, color=[255, 0, 0], type='', swap='face-47'), + 41: dict( + name='face-41', id=41, color=[255, 0, 0], type='', swap='face-46'), + 42: dict( + name='face-42', id=42, color=[255, 0, 0], type='', swap='face-39'), + 43: dict( + name='face-43', id=43, color=[255, 0, 0], type='', swap='face-38'), + 44: dict( + name='face-44', id=44, color=[255, 0, 0], type='', swap='face-37'), + 45: dict( + name='face-45', id=45, color=[255, 0, 0], type='', swap='face-36'), + 46: dict( + name='face-46', id=46, color=[255, 0, 0], type='', swap='face-41'), + 47: dict( + name='face-47', id=47, color=[255, 0, 0], type='', swap='face-40'), + 48: dict( + name='face-48', id=48, color=[255, 0, 0], type='', swap='face-54'), + 49: dict( + name='face-49', id=49, color=[255, 0, 0], type='', swap='face-53'), + 50: dict( + name='face-50', id=50, color=[255, 0, 0], type='', swap='face-52'), + 51: dict(name='face-51', id=52, color=[255, 0, 0], type='', swap=''), + 52: dict( + name='face-52', id=52, color=[255, 0, 0], type='', swap='face-50'), + 53: dict( + name='face-53', id=53, color=[255, 0, 0], type='', swap='face-49'), + 54: dict( + name='face-54', id=54, color=[255, 0, 0], type='', swap='face-48'), + 55: dict( + name='face-55', id=55, color=[255, 0, 0], type='', swap='face-59'), + 56: dict( + name='face-56', id=56, color=[255, 0, 0], type='', swap='face-58'), + 57: dict(name='face-57', id=57, color=[255, 0, 0], type='', swap=''), + 58: dict( + name='face-58', id=58, color=[255, 0, 0], type='', swap='face-56'), + 59: dict( + name='face-59', id=59, color=[255, 0, 0], type='', swap='face-55'), + 60: dict( + name='face-60', id=60, color=[255, 0, 0], type='', swap='face-64'), + 61: dict( + name='face-61', id=61, color=[255, 0, 0], type='', swap='face-63'), + 62: dict(name='face-62', id=62, color=[255, 0, 0], type='', swap=''), + 63: dict( + name='face-63', id=63, color=[255, 0, 0], type='', swap='face-61'), + 64: dict( + name='face-64', id=64, color=[255, 0, 0], type='', swap='face-60'), + 65: dict( + name='face-65', id=65, color=[255, 0, 0], type='', swap='face-67'), + 66: dict(name='face-66', id=66, color=[255, 0, 0], type='', swap=''), + 67: dict( + name='face-67', id=67, color=[255, 0, 0], type='', swap='face-65') + }, + skeleton_info={}, + joint_weights=[1.] * 68, + + # 'https://github.com/jin-s13/COCO-WholeBody/blob/master/' + # 'evaluation/myeval_wholebody.py#L177' + sigmas=[ + 0.042, 0.043, 0.044, 0.043, 0.040, 0.035, 0.031, 0.025, 0.020, 0.023, + 0.029, 0.032, 0.037, 0.038, 0.043, 0.041, 0.045, 0.013, 0.012, 0.011, + 0.011, 0.012, 0.012, 0.011, 0.011, 0.013, 0.015, 0.009, 0.007, 0.007, + 0.007, 0.012, 0.009, 0.008, 0.016, 0.010, 0.017, 0.011, 0.009, 0.011, + 0.009, 0.007, 0.013, 0.008, 0.011, 0.012, 0.010, 0.034, 0.008, 0.008, + 0.009, 0.008, 0.008, 0.007, 0.010, 0.008, 0.009, 0.009, 0.009, 0.007, + 0.007, 0.008, 0.011, 0.008, 0.008, 0.008, 0.01, 0.008 + ]) diff --git a/configs/_base_/datasets/coco_wholebody_hand.py b/configs/_base_/datasets/coco_wholebody_hand.py index 1910b2ced5..585ed789bd 100644 --- a/configs/_base_/datasets/coco_wholebody_hand.py +++ b/configs/_base_/datasets/coco_wholebody_hand.py @@ -1,147 +1,147 @@ -dataset_info = dict( - dataset_name='coco_wholebody_hand', - paper_info=dict( - author='Jin, Sheng and Xu, Lumin and Xu, Jin and ' - 'Wang, Can and Liu, Wentao and ' - 'Qian, Chen and Ouyang, Wanli and Luo, Ping', - title='Whole-Body Human Pose Estimation in the Wild', - container='Proceedings of the European ' - 'Conference on Computer Vision (ECCV)', - year='2020', - homepage='https://github.com/jin-s13/COCO-WholeBody/', - ), - keypoint_info={ - 0: - dict(name='wrist', id=0, color=[255, 255, 255], type='', swap=''), - 1: - dict(name='thumb1', id=1, color=[255, 128, 0], type='', swap=''), - 2: - dict(name='thumb2', id=2, color=[255, 128, 0], type='', swap=''), - 3: - dict(name='thumb3', id=3, color=[255, 128, 0], type='', swap=''), - 4: - dict(name='thumb4', id=4, color=[255, 128, 0], type='', swap=''), - 5: - dict( - name='forefinger1', id=5, color=[255, 153, 255], type='', swap=''), - 6: - dict( - name='forefinger2', id=6, color=[255, 153, 255], type='', swap=''), - 7: - dict( - name='forefinger3', id=7, color=[255, 153, 255], type='', swap=''), - 8: - dict( - name='forefinger4', id=8, color=[255, 153, 255], type='', swap=''), - 9: - dict( - name='middle_finger1', - id=9, - color=[102, 178, 255], - type='', - swap=''), - 10: - dict( - name='middle_finger2', - id=10, - color=[102, 178, 255], - type='', - swap=''), - 11: - dict( - name='middle_finger3', - id=11, - color=[102, 178, 255], - type='', - swap=''), - 12: - dict( - name='middle_finger4', - id=12, - color=[102, 178, 255], - type='', - swap=''), - 13: - dict( - name='ring_finger1', id=13, color=[255, 51, 51], type='', swap=''), - 14: - dict( - name='ring_finger2', id=14, color=[255, 51, 51], type='', swap=''), - 15: - dict( - name='ring_finger3', id=15, color=[255, 51, 51], type='', swap=''), - 16: - dict( - name='ring_finger4', id=16, color=[255, 51, 51], type='', swap=''), - 17: - dict(name='pinky_finger1', id=17, color=[0, 255, 0], type='', swap=''), - 18: - dict(name='pinky_finger2', id=18, color=[0, 255, 0], type='', swap=''), - 19: - dict(name='pinky_finger3', id=19, color=[0, 255, 0], type='', swap=''), - 20: - dict(name='pinky_finger4', id=20, color=[0, 255, 0], type='', swap='') - }, - skeleton_info={ - 0: - dict(link=('wrist', 'thumb1'), id=0, color=[255, 128, 0]), - 1: - dict(link=('thumb1', 'thumb2'), id=1, color=[255, 128, 0]), - 2: - dict(link=('thumb2', 'thumb3'), id=2, color=[255, 128, 0]), - 3: - dict(link=('thumb3', 'thumb4'), id=3, color=[255, 128, 0]), - 4: - dict(link=('wrist', 'forefinger1'), id=4, color=[255, 153, 255]), - 5: - dict(link=('forefinger1', 'forefinger2'), id=5, color=[255, 153, 255]), - 6: - dict(link=('forefinger2', 'forefinger3'), id=6, color=[255, 153, 255]), - 7: - dict(link=('forefinger3', 'forefinger4'), id=7, color=[255, 153, 255]), - 8: - dict(link=('wrist', 'middle_finger1'), id=8, color=[102, 178, 255]), - 9: - dict( - link=('middle_finger1', 'middle_finger2'), - id=9, - color=[102, 178, 255]), - 10: - dict( - link=('middle_finger2', 'middle_finger3'), - id=10, - color=[102, 178, 255]), - 11: - dict( - link=('middle_finger3', 'middle_finger4'), - id=11, - color=[102, 178, 255]), - 12: - dict(link=('wrist', 'ring_finger1'), id=12, color=[255, 51, 51]), - 13: - dict( - link=('ring_finger1', 'ring_finger2'), id=13, color=[255, 51, 51]), - 14: - dict( - link=('ring_finger2', 'ring_finger3'), id=14, color=[255, 51, 51]), - 15: - dict( - link=('ring_finger3', 'ring_finger4'), id=15, color=[255, 51, 51]), - 16: - dict(link=('wrist', 'pinky_finger1'), id=16, color=[0, 255, 0]), - 17: - dict( - link=('pinky_finger1', 'pinky_finger2'), id=17, color=[0, 255, 0]), - 18: - dict( - link=('pinky_finger2', 'pinky_finger3'), id=18, color=[0, 255, 0]), - 19: - dict( - link=('pinky_finger3', 'pinky_finger4'), id=19, color=[0, 255, 0]) - }, - joint_weights=[1.] * 21, - sigmas=[ - 0.029, 0.022, 0.035, 0.037, 0.047, 0.026, 0.025, 0.024, 0.035, 0.018, - 0.024, 0.022, 0.026, 0.017, 0.021, 0.021, 0.032, 0.02, 0.019, 0.022, - 0.031 - ]) +dataset_info = dict( + dataset_name='coco_wholebody_hand', + paper_info=dict( + author='Jin, Sheng and Xu, Lumin and Xu, Jin and ' + 'Wang, Can and Liu, Wentao and ' + 'Qian, Chen and Ouyang, Wanli and Luo, Ping', + title='Whole-Body Human Pose Estimation in the Wild', + container='Proceedings of the European ' + 'Conference on Computer Vision (ECCV)', + year='2020', + homepage='https://github.com/jin-s13/COCO-WholeBody/', + ), + keypoint_info={ + 0: + dict(name='wrist', id=0, color=[255, 255, 255], type='', swap=''), + 1: + dict(name='thumb1', id=1, color=[255, 128, 0], type='', swap=''), + 2: + dict(name='thumb2', id=2, color=[255, 128, 0], type='', swap=''), + 3: + dict(name='thumb3', id=3, color=[255, 128, 0], type='', swap=''), + 4: + dict(name='thumb4', id=4, color=[255, 128, 0], type='', swap=''), + 5: + dict( + name='forefinger1', id=5, color=[255, 153, 255], type='', swap=''), + 6: + dict( + name='forefinger2', id=6, color=[255, 153, 255], type='', swap=''), + 7: + dict( + name='forefinger3', id=7, color=[255, 153, 255], type='', swap=''), + 8: + dict( + name='forefinger4', id=8, color=[255, 153, 255], type='', swap=''), + 9: + dict( + name='middle_finger1', + id=9, + color=[102, 178, 255], + type='', + swap=''), + 10: + dict( + name='middle_finger2', + id=10, + color=[102, 178, 255], + type='', + swap=''), + 11: + dict( + name='middle_finger3', + id=11, + color=[102, 178, 255], + type='', + swap=''), + 12: + dict( + name='middle_finger4', + id=12, + color=[102, 178, 255], + type='', + swap=''), + 13: + dict( + name='ring_finger1', id=13, color=[255, 51, 51], type='', swap=''), + 14: + dict( + name='ring_finger2', id=14, color=[255, 51, 51], type='', swap=''), + 15: + dict( + name='ring_finger3', id=15, color=[255, 51, 51], type='', swap=''), + 16: + dict( + name='ring_finger4', id=16, color=[255, 51, 51], type='', swap=''), + 17: + dict(name='pinky_finger1', id=17, color=[0, 255, 0], type='', swap=''), + 18: + dict(name='pinky_finger2', id=18, color=[0, 255, 0], type='', swap=''), + 19: + dict(name='pinky_finger3', id=19, color=[0, 255, 0], type='', swap=''), + 20: + dict(name='pinky_finger4', id=20, color=[0, 255, 0], type='', swap='') + }, + skeleton_info={ + 0: + dict(link=('wrist', 'thumb1'), id=0, color=[255, 128, 0]), + 1: + dict(link=('thumb1', 'thumb2'), id=1, color=[255, 128, 0]), + 2: + dict(link=('thumb2', 'thumb3'), id=2, color=[255, 128, 0]), + 3: + dict(link=('thumb3', 'thumb4'), id=3, color=[255, 128, 0]), + 4: + dict(link=('wrist', 'forefinger1'), id=4, color=[255, 153, 255]), + 5: + dict(link=('forefinger1', 'forefinger2'), id=5, color=[255, 153, 255]), + 6: + dict(link=('forefinger2', 'forefinger3'), id=6, color=[255, 153, 255]), + 7: + dict(link=('forefinger3', 'forefinger4'), id=7, color=[255, 153, 255]), + 8: + dict(link=('wrist', 'middle_finger1'), id=8, color=[102, 178, 255]), + 9: + dict( + link=('middle_finger1', 'middle_finger2'), + id=9, + color=[102, 178, 255]), + 10: + dict( + link=('middle_finger2', 'middle_finger3'), + id=10, + color=[102, 178, 255]), + 11: + dict( + link=('middle_finger3', 'middle_finger4'), + id=11, + color=[102, 178, 255]), + 12: + dict(link=('wrist', 'ring_finger1'), id=12, color=[255, 51, 51]), + 13: + dict( + link=('ring_finger1', 'ring_finger2'), id=13, color=[255, 51, 51]), + 14: + dict( + link=('ring_finger2', 'ring_finger3'), id=14, color=[255, 51, 51]), + 15: + dict( + link=('ring_finger3', 'ring_finger4'), id=15, color=[255, 51, 51]), + 16: + dict(link=('wrist', 'pinky_finger1'), id=16, color=[0, 255, 0]), + 17: + dict( + link=('pinky_finger1', 'pinky_finger2'), id=17, color=[0, 255, 0]), + 18: + dict( + link=('pinky_finger2', 'pinky_finger3'), id=18, color=[0, 255, 0]), + 19: + dict( + link=('pinky_finger3', 'pinky_finger4'), id=19, color=[0, 255, 0]) + }, + joint_weights=[1.] * 21, + sigmas=[ + 0.029, 0.022, 0.035, 0.037, 0.047, 0.026, 0.025, 0.024, 0.035, 0.018, + 0.024, 0.022, 0.026, 0.017, 0.021, 0.021, 0.032, 0.02, 0.019, 0.022, + 0.031 + ]) diff --git a/configs/_base_/datasets/cofw.py b/configs/_base_/datasets/cofw.py index d528bf2f2f..88792546bc 100644 --- a/configs/_base_/datasets/cofw.py +++ b/configs/_base_/datasets/cofw.py @@ -1,57 +1,57 @@ -dataset_info = dict( - dataset_name='cofw', - paper_info=dict( - author='Burgos-Artizzu, Xavier P and Perona, ' - r'Pietro and Doll{\'a}r, Piotr', - title='Robust face landmark estimation under occlusion', - container='Proceedings of the IEEE international ' - 'conference on computer vision', - year='2013', - homepage='http://www.vision.caltech.edu/xpburgos/ICCV13/', - ), - keypoint_info={ - 0: dict(name='kpt-0', id=0, color=[255, 0, 0], type='', swap='kpt-1'), - 1: dict(name='kpt-1', id=1, color=[255, 0, 0], type='', swap='kpt-0'), - 2: dict(name='kpt-2', id=2, color=[255, 0, 0], type='', swap='kpt-3'), - 3: dict(name='kpt-3', id=3, color=[255, 0, 0], type='', swap='kpt-2'), - 4: dict(name='kpt-4', id=4, color=[255, 0, 0], type='', swap='kpt-6'), - 5: dict(name='kpt-5', id=5, color=[255, 0, 0], type='', swap='kpt-7'), - 6: dict(name='kpt-6', id=6, color=[255, 0, 0], type='', swap='kpt-4'), - 7: dict(name='kpt-7', id=7, color=[255, 0, 0], type='', swap='kpt-5'), - 8: dict(name='kpt-8', id=8, color=[255, 0, 0], type='', swap='kpt-9'), - 9: dict(name='kpt-9', id=9, color=[255, 0, 0], type='', swap='kpt-8'), - 10: - dict(name='kpt-10', id=10, color=[255, 0, 0], type='', swap='kpt-11'), - 11: - dict(name='kpt-11', id=11, color=[255, 0, 0], type='', swap='kpt-10'), - 12: - dict(name='kpt-12', id=12, color=[255, 0, 0], type='', swap='kpt-14'), - 13: - dict(name='kpt-13', id=13, color=[255, 0, 0], type='', swap='kpt-15'), - 14: - dict(name='kpt-14', id=14, color=[255, 0, 0], type='', swap='kpt-12'), - 15: - dict(name='kpt-15', id=15, color=[255, 0, 0], type='', swap='kpt-13'), - 16: - dict(name='kpt-16', id=16, color=[255, 0, 0], type='', swap='kpt-17'), - 17: - dict(name='kpt-17', id=17, color=[255, 0, 0], type='', swap='kpt-16'), - 18: - dict(name='kpt-18', id=18, color=[255, 0, 0], type='', swap='kpt-19'), - 19: - dict(name='kpt-19', id=19, color=[255, 0, 0], type='', swap='kpt-18'), - 20: dict(name='kpt-20', id=20, color=[255, 0, 0], type='', swap=''), - 21: dict(name='kpt-21', id=21, color=[255, 0, 0], type='', swap=''), - 22: - dict(name='kpt-22', id=22, color=[255, 0, 0], type='', swap='kpt-23'), - 23: - dict(name='kpt-23', id=23, color=[255, 0, 0], type='', swap='kpt-22'), - 24: dict(name='kpt-24', id=24, color=[255, 0, 0], type='', swap=''), - 25: dict(name='kpt-25', id=25, color=[255, 0, 0], type='', swap=''), - 26: dict(name='kpt-26', id=26, color=[255, 0, 0], type='', swap=''), - 27: dict(name='kpt-27', id=27, color=[255, 0, 0], type='', swap=''), - 28: dict(name='kpt-28', id=28, color=[255, 0, 0], type='', swap='') - }, - skeleton_info={}, - joint_weights=[1.] * 29, - sigmas=[]) +dataset_info = dict( + dataset_name='cofw', + paper_info=dict( + author='Burgos-Artizzu, Xavier P and Perona, ' + r'Pietro and Doll{\'a}r, Piotr', + title='Robust face landmark estimation under occlusion', + container='Proceedings of the IEEE international ' + 'conference on computer vision', + year='2013', + homepage='http://www.vision.caltech.edu/xpburgos/ICCV13/', + ), + keypoint_info={ + 0: dict(name='kpt-0', id=0, color=[255, 0, 0], type='', swap='kpt-1'), + 1: dict(name='kpt-1', id=1, color=[255, 0, 0], type='', swap='kpt-0'), + 2: dict(name='kpt-2', id=2, color=[255, 0, 0], type='', swap='kpt-3'), + 3: dict(name='kpt-3', id=3, color=[255, 0, 0], type='', swap='kpt-2'), + 4: dict(name='kpt-4', id=4, color=[255, 0, 0], type='', swap='kpt-6'), + 5: dict(name='kpt-5', id=5, color=[255, 0, 0], type='', swap='kpt-7'), + 6: dict(name='kpt-6', id=6, color=[255, 0, 0], type='', swap='kpt-4'), + 7: dict(name='kpt-7', id=7, color=[255, 0, 0], type='', swap='kpt-5'), + 8: dict(name='kpt-8', id=8, color=[255, 0, 0], type='', swap='kpt-9'), + 9: dict(name='kpt-9', id=9, color=[255, 0, 0], type='', swap='kpt-8'), + 10: + dict(name='kpt-10', id=10, color=[255, 0, 0], type='', swap='kpt-11'), + 11: + dict(name='kpt-11', id=11, color=[255, 0, 0], type='', swap='kpt-10'), + 12: + dict(name='kpt-12', id=12, color=[255, 0, 0], type='', swap='kpt-14'), + 13: + dict(name='kpt-13', id=13, color=[255, 0, 0], type='', swap='kpt-15'), + 14: + dict(name='kpt-14', id=14, color=[255, 0, 0], type='', swap='kpt-12'), + 15: + dict(name='kpt-15', id=15, color=[255, 0, 0], type='', swap='kpt-13'), + 16: + dict(name='kpt-16', id=16, color=[255, 0, 0], type='', swap='kpt-17'), + 17: + dict(name='kpt-17', id=17, color=[255, 0, 0], type='', swap='kpt-16'), + 18: + dict(name='kpt-18', id=18, color=[255, 0, 0], type='', swap='kpt-19'), + 19: + dict(name='kpt-19', id=19, color=[255, 0, 0], type='', swap='kpt-18'), + 20: dict(name='kpt-20', id=20, color=[255, 0, 0], type='', swap=''), + 21: dict(name='kpt-21', id=21, color=[255, 0, 0], type='', swap=''), + 22: + dict(name='kpt-22', id=22, color=[255, 0, 0], type='', swap='kpt-23'), + 23: + dict(name='kpt-23', id=23, color=[255, 0, 0], type='', swap='kpt-22'), + 24: dict(name='kpt-24', id=24, color=[255, 0, 0], type='', swap=''), + 25: dict(name='kpt-25', id=25, color=[255, 0, 0], type='', swap=''), + 26: dict(name='kpt-26', id=26, color=[255, 0, 0], type='', swap=''), + 27: dict(name='kpt-27', id=27, color=[255, 0, 0], type='', swap=''), + 28: dict(name='kpt-28', id=28, color=[255, 0, 0], type='', swap='') + }, + skeleton_info={}, + joint_weights=[1.] * 29, + sigmas=[]) diff --git a/configs/_base_/datasets/crowdpose.py b/configs/_base_/datasets/crowdpose.py index 45086531a6..358d36f25a 100644 --- a/configs/_base_/datasets/crowdpose.py +++ b/configs/_base_/datasets/crowdpose.py @@ -1,147 +1,147 @@ -dataset_info = dict( - dataset_name='crowdpose', - paper_info=dict( - author='Li, Jiefeng and Wang, Can and Zhu, Hao and ' - 'Mao, Yihuan and Fang, Hao-Shu and Lu, Cewu', - title='CrowdPose: Efficient Crowded Scenes Pose Estimation ' - 'and A New Benchmark', - container='Proceedings of IEEE Conference on Computer ' - 'Vision and Pattern Recognition (CVPR)', - year='2019', - homepage='https://github.com/Jeff-sjtu/CrowdPose', - ), - keypoint_info={ - 0: - dict( - name='left_shoulder', - id=0, - color=[51, 153, 255], - type='upper', - swap='right_shoulder'), - 1: - dict( - name='right_shoulder', - id=1, - color=[51, 153, 255], - type='upper', - swap='left_shoulder'), - 2: - dict( - name='left_elbow', - id=2, - color=[51, 153, 255], - type='upper', - swap='right_elbow'), - 3: - dict( - name='right_elbow', - id=3, - color=[51, 153, 255], - type='upper', - swap='left_elbow'), - 4: - dict( - name='left_wrist', - id=4, - color=[51, 153, 255], - type='upper', - swap='right_wrist'), - 5: - dict( - name='right_wrist', - id=5, - color=[0, 255, 0], - type='upper', - swap='left_wrist'), - 6: - dict( - name='left_hip', - id=6, - color=[255, 128, 0], - type='lower', - swap='right_hip'), - 7: - dict( - name='right_hip', - id=7, - color=[0, 255, 0], - type='lower', - swap='left_hip'), - 8: - dict( - name='left_knee', - id=8, - color=[255, 128, 0], - type='lower', - swap='right_knee'), - 9: - dict( - name='right_knee', - id=9, - color=[0, 255, 0], - type='lower', - swap='left_knee'), - 10: - dict( - name='left_ankle', - id=10, - color=[255, 128, 0], - type='lower', - swap='right_ankle'), - 11: - dict( - name='right_ankle', - id=11, - color=[0, 255, 0], - type='lower', - swap='left_ankle'), - 12: - dict( - name='top_head', id=12, color=[255, 128, 0], type='upper', - swap=''), - 13: - dict(name='neck', id=13, color=[0, 255, 0], type='upper', swap='') - }, - skeleton_info={ - 0: - dict(link=('left_ankle', 'left_knee'), id=0, color=[0, 255, 0]), - 1: - dict(link=('left_knee', 'left_hip'), id=1, color=[0, 255, 0]), - 2: - dict(link=('right_ankle', 'right_knee'), id=2, color=[255, 128, 0]), - 3: - dict(link=('right_knee', 'right_hip'), id=3, color=[255, 128, 0]), - 4: - dict(link=('left_hip', 'right_hip'), id=4, color=[51, 153, 255]), - 5: - dict(link=('left_shoulder', 'left_hip'), id=5, color=[51, 153, 255]), - 6: - dict(link=('right_shoulder', 'right_hip'), id=6, color=[51, 153, 255]), - 7: - dict( - link=('left_shoulder', 'right_shoulder'), - id=7, - color=[51, 153, 255]), - 8: - dict(link=('left_shoulder', 'left_elbow'), id=8, color=[0, 255, 0]), - 9: - dict( - link=('right_shoulder', 'right_elbow'), id=9, color=[255, 128, 0]), - 10: - dict(link=('left_elbow', 'left_wrist'), id=10, color=[0, 255, 0]), - 11: - dict(link=('right_elbow', 'right_wrist'), id=11, color=[255, 128, 0]), - 12: - dict(link=('top_head', 'neck'), id=12, color=[51, 153, 255]), - 13: - dict(link=('right_shoulder', 'neck'), id=13, color=[51, 153, 255]), - 14: - dict(link=('left_shoulder', 'neck'), id=14, color=[51, 153, 255]) - }, - joint_weights=[ - 0.2, 0.2, 0.2, 1.3, 1.5, 0.2, 1.3, 1.5, 0.2, 0.2, 0.5, 0.2, 0.2, 0.5 - ], - sigmas=[ - 0.079, 0.079, 0.072, 0.072, 0.062, 0.062, 0.107, 0.107, 0.087, 0.087, - 0.089, 0.089, 0.079, 0.079 - ]) +dataset_info = dict( + dataset_name='crowdpose', + paper_info=dict( + author='Li, Jiefeng and Wang, Can and Zhu, Hao and ' + 'Mao, Yihuan and Fang, Hao-Shu and Lu, Cewu', + title='CrowdPose: Efficient Crowded Scenes Pose Estimation ' + 'and A New Benchmark', + container='Proceedings of IEEE Conference on Computer ' + 'Vision and Pattern Recognition (CVPR)', + year='2019', + homepage='https://github.com/Jeff-sjtu/CrowdPose', + ), + keypoint_info={ + 0: + dict( + name='left_shoulder', + id=0, + color=[51, 153, 255], + type='upper', + swap='right_shoulder'), + 1: + dict( + name='right_shoulder', + id=1, + color=[51, 153, 255], + type='upper', + swap='left_shoulder'), + 2: + dict( + name='left_elbow', + id=2, + color=[51, 153, 255], + type='upper', + swap='right_elbow'), + 3: + dict( + name='right_elbow', + id=3, + color=[51, 153, 255], + type='upper', + swap='left_elbow'), + 4: + dict( + name='left_wrist', + id=4, + color=[51, 153, 255], + type='upper', + swap='right_wrist'), + 5: + dict( + name='right_wrist', + id=5, + color=[0, 255, 0], + type='upper', + swap='left_wrist'), + 6: + dict( + name='left_hip', + id=6, + color=[255, 128, 0], + type='lower', + swap='right_hip'), + 7: + dict( + name='right_hip', + id=7, + color=[0, 255, 0], + type='lower', + swap='left_hip'), + 8: + dict( + name='left_knee', + id=8, + color=[255, 128, 0], + type='lower', + swap='right_knee'), + 9: + dict( + name='right_knee', + id=9, + color=[0, 255, 0], + type='lower', + swap='left_knee'), + 10: + dict( + name='left_ankle', + id=10, + color=[255, 128, 0], + type='lower', + swap='right_ankle'), + 11: + dict( + name='right_ankle', + id=11, + color=[0, 255, 0], + type='lower', + swap='left_ankle'), + 12: + dict( + name='top_head', id=12, color=[255, 128, 0], type='upper', + swap=''), + 13: + dict(name='neck', id=13, color=[0, 255, 0], type='upper', swap='') + }, + skeleton_info={ + 0: + dict(link=('left_ankle', 'left_knee'), id=0, color=[0, 255, 0]), + 1: + dict(link=('left_knee', 'left_hip'), id=1, color=[0, 255, 0]), + 2: + dict(link=('right_ankle', 'right_knee'), id=2, color=[255, 128, 0]), + 3: + dict(link=('right_knee', 'right_hip'), id=3, color=[255, 128, 0]), + 4: + dict(link=('left_hip', 'right_hip'), id=4, color=[51, 153, 255]), + 5: + dict(link=('left_shoulder', 'left_hip'), id=5, color=[51, 153, 255]), + 6: + dict(link=('right_shoulder', 'right_hip'), id=6, color=[51, 153, 255]), + 7: + dict( + link=('left_shoulder', 'right_shoulder'), + id=7, + color=[51, 153, 255]), + 8: + dict(link=('left_shoulder', 'left_elbow'), id=8, color=[0, 255, 0]), + 9: + dict( + link=('right_shoulder', 'right_elbow'), id=9, color=[255, 128, 0]), + 10: + dict(link=('left_elbow', 'left_wrist'), id=10, color=[0, 255, 0]), + 11: + dict(link=('right_elbow', 'right_wrist'), id=11, color=[255, 128, 0]), + 12: + dict(link=('top_head', 'neck'), id=12, color=[51, 153, 255]), + 13: + dict(link=('right_shoulder', 'neck'), id=13, color=[51, 153, 255]), + 14: + dict(link=('left_shoulder', 'neck'), id=14, color=[51, 153, 255]) + }, + joint_weights=[ + 0.2, 0.2, 0.2, 1.3, 1.5, 0.2, 1.3, 1.5, 0.2, 0.2, 0.5, 0.2, 0.2, 0.5 + ], + sigmas=[ + 0.079, 0.079, 0.072, 0.072, 0.062, 0.062, 0.107, 0.107, 0.087, 0.087, + 0.089, 0.089, 0.079, 0.079 + ]) diff --git a/configs/_base_/datasets/deepfashion2.py b/configs/_base_/datasets/deepfashion2.py index f65d1bb591..de7004e434 100644 --- a/configs/_base_/datasets/deepfashion2.py +++ b/configs/_base_/datasets/deepfashion2.py @@ -1,2660 +1,2660 @@ -colors = dict( - sss=[255, 128, 0], # short_sleeve_shirt - lss=[255, 0, 128], # long_sleeved_shirt - sso=[128, 0, 255], # short_sleeved_outwear - lso=[0, 128, 255], # long_sleeved_outwear - vest=[0, 128, 128], # vest - sling=[0, 0, 128], # sling - shorts=[128, 128, 128], # shorts - trousers=[128, 0, 128], # trousers - skirt=[64, 128, 128], # skirt - ssd=[64, 64, 128], # short_sleeved_dress - lsd=[128, 64, 0], # long_sleeved_dress - vd=[128, 64, 255], # vest_dress - sd=[128, 64, 0], # sling_dress -) -dataset_info = dict( - dataset_name='deepfashion2', - paper_info=dict( - author='Yuying Ge and Ruimao Zhang and Lingyun Wu ' - 'and Xiaogang Wang and Xiaoou Tang and Ping Luo', - title='DeepFashion2: A Versatile Benchmark for ' - 'Detection, Pose Estimation, Segmentation and ' - 'Re-Identification of Clothing Images', - container='Proceedings of IEEE Conference on Computer ' - 'Vision and Pattern Recognition (CVPR)', - year='2019', - homepage='https://github.com/switchablenorms/DeepFashion2', - ), - keypoint_info={ - # short_sleeved_shirt - 0: - dict(name='sss_kpt1', id=0, color=colors['sss'], type='', swap=''), - 1: - dict( - name='sss_kpt2', - id=1, - color=colors['sss'], - type='', - swap='sss_kpt6'), - 2: - dict( - name='sss_kpt3', - id=2, - color=colors['sss'], - type='', - swap='sss_kpt5'), - 3: - dict(name='sss_kpt4', id=3, color=colors['sss'], type='', swap=''), - 4: - dict( - name='sss_kpt5', - id=4, - color=colors['sss'], - type='', - swap='sss_kpt3'), - 5: - dict( - name='sss_kpt6', - id=5, - color=colors['sss'], - type='', - swap='sss_kpt2'), - 6: - dict( - name='sss_kpt7', - id=6, - color=colors['sss'], - type='', - swap='sss_kpt25'), - 7: - dict( - name='sss_kpt8', - id=7, - color=colors['sss'], - type='', - swap='sss_kpt24'), - 8: - dict( - name='sss_kpt9', - id=8, - color=colors['sss'], - type='', - swap='sss_kpt23'), - 9: - dict( - name='sss_kpt10', - id=9, - color=colors['sss'], - type='', - swap='sss_kpt22'), - 10: - dict( - name='sss_kpt11', - id=10, - color=colors['sss'], - type='', - swap='sss_kpt21'), - 11: - dict( - name='sss_kpt12', - id=11, - color=colors['sss'], - type='', - swap='sss_kpt20'), - 12: - dict( - name='sss_kpt13', - id=12, - color=colors['sss'], - type='', - swap='sss_kpt19'), - 13: - dict( - name='sss_kpt14', - id=13, - color=colors['sss'], - type='', - swap='sss_kpt18'), - 14: - dict( - name='sss_kpt15', - id=14, - color=colors['sss'], - type='', - swap='sss_kpt17'), - 15: - dict(name='sss_kpt16', id=15, color=colors['sss'], type='', swap=''), - 16: - dict( - name='sss_kpt17', - id=16, - color=colors['sss'], - type='', - swap='sss_kpt15'), - 17: - dict( - name='sss_kpt18', - id=17, - color=colors['sss'], - type='', - swap='sss_kpt14'), - 18: - dict( - name='sss_kpt19', - id=18, - color=colors['sss'], - type='', - swap='sss_kpt13'), - 19: - dict( - name='sss_kpt20', - id=19, - color=colors['sss'], - type='', - swap='sss_kpt12'), - 20: - dict( - name='sss_kpt21', - id=20, - color=colors['sss'], - type='', - swap='sss_kpt11'), - 21: - dict( - name='sss_kpt22', - id=21, - color=colors['sss'], - type='', - swap='sss_kpt10'), - 22: - dict( - name='sss_kpt23', - id=22, - color=colors['sss'], - type='', - swap='sss_kpt9'), - 23: - dict( - name='sss_kpt24', - id=23, - color=colors['sss'], - type='', - swap='sss_kpt8'), - 24: - dict( - name='sss_kpt25', - id=24, - color=colors['sss'], - type='', - swap='sss_kpt7'), - # long_sleeved_shirt - 25: - dict(name='lss_kpt1', id=25, color=colors['lss'], type='', swap=''), - 26: - dict( - name='lss_kpt2', - id=26, - color=colors['lss'], - type='', - swap='lss_kpt6'), - 27: - dict( - name='lss_kpt3', - id=27, - color=colors['lss'], - type='', - swap='lss_kpt5'), - 28: - dict(name='lss_kpt4', id=28, color=colors['lss'], type='', swap=''), - 29: - dict( - name='lss_kpt5', - id=29, - color=colors['lss'], - type='', - swap='lss_kpt3'), - 30: - dict( - name='lss_kpt6', - id=30, - color=colors['lss'], - type='', - swap='lss_kpt2'), - 31: - dict( - name='lss_kpt7', - id=31, - color=colors['lss'], - type='', - swap='lss_kpt33'), - 32: - dict( - name='lss_kpt8', - id=32, - color=colors['lss'], - type='', - swap='lss_kpt32'), - 33: - dict( - name='lss_kpt9', - id=33, - color=colors['lss'], - type='', - swap='lss_kpt31'), - 34: - dict( - name='lss_kpt10', - id=34, - color=colors['lss'], - type='', - swap='lss_kpt30'), - 35: - dict( - name='lss_kpt11', - id=35, - color=colors['lss'], - type='', - swap='lss_kpt29'), - 36: - dict( - name='lss_kpt12', - id=36, - color=colors['lss'], - type='', - swap='lss_kpt28'), - 37: - dict( - name='lss_kpt13', - id=37, - color=colors['lss'], - type='', - swap='lss_kpt27'), - 38: - dict( - name='lss_kpt14', - id=38, - color=colors['lss'], - type='', - swap='lss_kpt26'), - 39: - dict( - name='lss_kpt15', - id=39, - color=colors['lss'], - type='', - swap='lss_kpt25'), - 40: - dict( - name='lss_kpt16', - id=40, - color=colors['lss'], - type='', - swap='lss_kpt24'), - 41: - dict( - name='lss_kpt17', - id=41, - color=colors['lss'], - type='', - swap='lss_kpt23'), - 42: - dict( - name='lss_kpt18', - id=42, - color=colors['lss'], - type='', - swap='lss_kpt22'), - 43: - dict( - name='lss_kpt19', - id=43, - color=colors['lss'], - type='', - swap='lss_kpt21'), - 44: - dict(name='lss_kpt20', id=44, color=colors['lss'], type='', swap=''), - 45: - dict( - name='lss_kpt21', - id=45, - color=colors['lss'], - type='', - swap='lss_kpt19'), - 46: - dict( - name='lss_kpt22', - id=46, - color=colors['lss'], - type='', - swap='lss_kpt18'), - 47: - dict( - name='lss_kpt23', - id=47, - color=colors['lss'], - type='', - swap='lss_kpt17'), - 48: - dict( - name='lss_kpt24', - id=48, - color=colors['lss'], - type='', - swap='lss_kpt16'), - 49: - dict( - name='lss_kpt25', - id=49, - color=colors['lss'], - type='', - swap='lss_kpt15'), - 50: - dict( - name='lss_kpt26', - id=50, - color=colors['lss'], - type='', - swap='lss_kpt14'), - 51: - dict( - name='lss_kpt27', - id=51, - color=colors['lss'], - type='', - swap='lss_kpt13'), - 52: - dict( - name='lss_kpt28', - id=52, - color=colors['lss'], - type='', - swap='lss_kpt12'), - 53: - dict( - name='lss_kpt29', - id=53, - color=colors['lss'], - type='', - swap='lss_kpt11'), - 54: - dict( - name='lss_kpt30', - id=54, - color=colors['lss'], - type='', - swap='lss_kpt10'), - 55: - dict( - name='lss_kpt31', - id=55, - color=colors['lss'], - type='', - swap='lss_kpt9'), - 56: - dict( - name='lss_kpt32', - id=56, - color=colors['lss'], - type='', - swap='lss_kpt8'), - 57: - dict( - name='lss_kpt33', - id=57, - color=colors['lss'], - type='', - swap='lss_kpt7'), - # short_sleeved_outwear - 58: - dict(name='sso_kpt1', id=58, color=colors['sso'], type='', swap=''), - 59: - dict( - name='sso_kpt2', - id=59, - color=colors['sso'], - type='', - swap='sso_kpt26'), - 60: - dict( - name='sso_kpt3', - id=60, - color=colors['sso'], - type='', - swap='sso_kpt5'), - 61: - dict( - name='sso_kpt4', - id=61, - color=colors['sso'], - type='', - swap='sso_kpt6'), - 62: - dict( - name='sso_kpt5', - id=62, - color=colors['sso'], - type='', - swap='sso_kpt3'), - 63: - dict( - name='sso_kpt6', - id=63, - color=colors['sso'], - type='', - swap='sso_kpt4'), - 64: - dict( - name='sso_kpt7', - id=64, - color=colors['sso'], - type='', - swap='sso_kpt25'), - 65: - dict( - name='sso_kpt8', - id=65, - color=colors['sso'], - type='', - swap='sso_kpt24'), - 66: - dict( - name='sso_kpt9', - id=66, - color=colors['sso'], - type='', - swap='sso_kpt23'), - 67: - dict( - name='sso_kpt10', - id=67, - color=colors['sso'], - type='', - swap='sso_kpt22'), - 68: - dict( - name='sso_kpt11', - id=68, - color=colors['sso'], - type='', - swap='sso_kpt21'), - 69: - dict( - name='sso_kpt12', - id=69, - color=colors['sso'], - type='', - swap='sso_kpt20'), - 70: - dict( - name='sso_kpt13', - id=70, - color=colors['sso'], - type='', - swap='sso_kpt19'), - 71: - dict( - name='sso_kpt14', - id=71, - color=colors['sso'], - type='', - swap='sso_kpt18'), - 72: - dict( - name='sso_kpt15', - id=72, - color=colors['sso'], - type='', - swap='sso_kpt17'), - 73: - dict( - name='sso_kpt16', - id=73, - color=colors['sso'], - type='', - swap='sso_kpt29'), - 74: - dict( - name='sso_kpt17', - id=74, - color=colors['sso'], - type='', - swap='sso_kpt15'), - 75: - dict( - name='sso_kpt18', - id=75, - color=colors['sso'], - type='', - swap='sso_kpt14'), - 76: - dict( - name='sso_kpt19', - id=76, - color=colors['sso'], - type='', - swap='sso_kpt13'), - 77: - dict( - name='sso_kpt20', - id=77, - color=colors['sso'], - type='', - swap='sso_kpt12'), - 78: - dict( - name='sso_kpt21', - id=78, - color=colors['sso'], - type='', - swap='sso_kpt11'), - 79: - dict( - name='sso_kpt22', - id=79, - color=colors['sso'], - type='', - swap='sso_kpt10'), - 80: - dict( - name='sso_kpt23', - id=80, - color=colors['sso'], - type='', - swap='sso_kpt9'), - 81: - dict( - name='sso_kpt24', - id=81, - color=colors['sso'], - type='', - swap='sso_kpt8'), - 82: - dict( - name='sso_kpt25', - id=82, - color=colors['sso'], - type='', - swap='sso_kpt7'), - 83: - dict( - name='sso_kpt26', - id=83, - color=colors['sso'], - type='', - swap='sso_kpt2'), - 84: - dict( - name='sso_kpt27', - id=84, - color=colors['sso'], - type='', - swap='sso_kpt30'), - 85: - dict( - name='sso_kpt28', - id=85, - color=colors['sso'], - type='', - swap='sso_kpt31'), - 86: - dict( - name='sso_kpt29', - id=86, - color=colors['sso'], - type='', - swap='sso_kpt16'), - 87: - dict( - name='sso_kpt30', - id=87, - color=colors['sso'], - type='', - swap='sso_kpt27'), - 88: - dict( - name='sso_kpt31', - id=88, - color=colors['sso'], - type='', - swap='sso_kpt28'), - # long_sleeved_outwear - 89: - dict(name='lso_kpt1', id=89, color=colors['lso'], type='', swap=''), - 90: - dict( - name='lso_kpt2', - id=90, - color=colors['lso'], - type='', - swap='lso_kpt6'), - 91: - dict( - name='lso_kpt3', - id=91, - color=colors['lso'], - type='', - swap='lso_kpt5'), - 92: - dict( - name='lso_kpt4', - id=92, - color=colors['lso'], - type='', - swap='lso_kpt34'), - 93: - dict( - name='lso_kpt5', - id=93, - color=colors['lso'], - type='', - swap='lso_kpt3'), - 94: - dict( - name='lso_kpt6', - id=94, - color=colors['lso'], - type='', - swap='lso_kpt2'), - 95: - dict( - name='lso_kpt7', - id=95, - color=colors['lso'], - type='', - swap='lso_kpt33'), - 96: - dict( - name='lso_kpt8', - id=96, - color=colors['lso'], - type='', - swap='lso_kpt32'), - 97: - dict( - name='lso_kpt9', - id=97, - color=colors['lso'], - type='', - swap='lso_kpt31'), - 98: - dict( - name='lso_kpt10', - id=98, - color=colors['lso'], - type='', - swap='lso_kpt30'), - 99: - dict( - name='lso_kpt11', - id=99, - color=colors['lso'], - type='', - swap='lso_kpt29'), - 100: - dict( - name='lso_kpt12', - id=100, - color=colors['lso'], - type='', - swap='lso_kpt28'), - 101: - dict( - name='lso_kpt13', - id=101, - color=colors['lso'], - type='', - swap='lso_kpt27'), - 102: - dict( - name='lso_kpt14', - id=102, - color=colors['lso'], - type='', - swap='lso_kpt26'), - 103: - dict( - name='lso_kpt15', - id=103, - color=colors['lso'], - type='', - swap='lso_kpt25'), - 104: - dict( - name='lso_kpt16', - id=104, - color=colors['lso'], - type='', - swap='lso_kpt24'), - 105: - dict( - name='lso_kpt17', - id=105, - color=colors['lso'], - type='', - swap='lso_kpt23'), - 106: - dict( - name='lso_kpt18', - id=106, - color=colors['lso'], - type='', - swap='lso_kpt22'), - 107: - dict( - name='lso_kpt19', - id=107, - color=colors['lso'], - type='', - swap='lso_kpt21'), - 108: - dict( - name='lso_kpt20', - id=108, - color=colors['lso'], - type='', - swap='lso_kpt37'), - 109: - dict( - name='lso_kpt21', - id=109, - color=colors['lso'], - type='', - swap='lso_kpt19'), - 110: - dict( - name='lso_kpt22', - id=110, - color=colors['lso'], - type='', - swap='lso_kpt18'), - 111: - dict( - name='lso_kpt23', - id=111, - color=colors['lso'], - type='', - swap='lso_kpt17'), - 112: - dict( - name='lso_kpt24', - id=112, - color=colors['lso'], - type='', - swap='lso_kpt16'), - 113: - dict( - name='lso_kpt25', - id=113, - color=colors['lso'], - type='', - swap='lso_kpt15'), - 114: - dict( - name='lso_kpt26', - id=114, - color=colors['lso'], - type='', - swap='lso_kpt14'), - 115: - dict( - name='lso_kpt27', - id=115, - color=colors['lso'], - type='', - swap='lso_kpt13'), - 116: - dict( - name='lso_kpt28', - id=116, - color=colors['lso'], - type='', - swap='lso_kpt12'), - 117: - dict( - name='lso_kpt29', - id=117, - color=colors['lso'], - type='', - swap='lso_kpt11'), - 118: - dict( - name='lso_kpt30', - id=118, - color=colors['lso'], - type='', - swap='lso_kpt10'), - 119: - dict( - name='lso_kpt31', - id=119, - color=colors['lso'], - type='', - swap='lso_kpt9'), - 120: - dict( - name='lso_kpt32', - id=120, - color=colors['lso'], - type='', - swap='lso_kpt8'), - 121: - dict( - name='lso_kpt33', - id=121, - color=colors['lso'], - type='', - swap='lso_kpt7'), - 122: - dict( - name='lso_kpt34', - id=122, - color=colors['lso'], - type='', - swap='lso_kpt4'), - 123: - dict( - name='lso_kpt35', - id=123, - color=colors['lso'], - type='', - swap='lso_kpt38'), - 124: - dict( - name='lso_kpt36', - id=124, - color=colors['lso'], - type='', - swap='lso_kpt39'), - 125: - dict( - name='lso_kpt37', - id=125, - color=colors['lso'], - type='', - swap='lso_kpt20'), - 126: - dict( - name='lso_kpt38', - id=126, - color=colors['lso'], - type='', - swap='lso_kpt35'), - 127: - dict( - name='lso_kpt39', - id=127, - color=colors['lso'], - type='', - swap='lso_kpt36'), - # vest - 128: - dict(name='vest_kpt1', id=128, color=colors['vest'], type='', swap=''), - 129: - dict( - name='vest_kpt2', - id=129, - color=colors['vest'], - type='', - swap='vest_kpt6'), - 130: - dict( - name='vest_kpt3', - id=130, - color=colors['vest'], - type='', - swap='vest_kpt5'), - 131: - dict(name='vest_kpt4', id=131, color=colors['vest'], type='', swap=''), - 132: - dict( - name='vest_kpt5', - id=132, - color=colors['vest'], - type='', - swap='vest_kpt3'), - 133: - dict( - name='vest_kpt6', - id=133, - color=colors['vest'], - type='', - swap='vest_kpt2'), - 134: - dict( - name='vest_kpt7', - id=134, - color=colors['vest'], - type='', - swap='vest_kpt15'), - 135: - dict( - name='vest_kpt8', - id=135, - color=colors['vest'], - type='', - swap='vest_kpt14'), - 136: - dict( - name='vest_kpt9', - id=136, - color=colors['vest'], - type='', - swap='vest_kpt13'), - 137: - dict( - name='vest_kpt10', - id=137, - color=colors['vest'], - type='', - swap='vest_kpt12'), - 138: - dict( - name='vest_kpt11', id=138, color=colors['vest'], type='', swap=''), - 139: - dict( - name='vest_kpt12', - id=139, - color=colors['vest'], - type='', - swap='vest_kpt10'), - 140: - dict( - name='vest_kpt13', id=140, color=colors['vest'], type='', swap=''), - 141: - dict( - name='vest_kpt14', - id=141, - color=colors['vest'], - type='', - swap='vest_kpt8'), - 142: - dict( - name='vest_kpt15', - id=142, - color=colors['vest'], - type='', - swap='vest_kpt7'), - # sling - 143: - dict( - name='sling_kpt1', id=143, color=colors['sling'], type='', - swap=''), - 144: - dict( - name='sling_kpt2', - id=144, - color=colors['sling'], - type='', - swap='sling_kpt6'), - 145: - dict( - name='sling_kpt3', - id=145, - color=colors['sling'], - type='', - swap='sling_kpt5'), - 146: - dict( - name='sling_kpt4', id=146, color=colors['sling'], type='', - swap=''), - 147: - dict( - name='sling_kpt5', - id=147, - color=colors['sling'], - type='', - swap='sling_kpt3'), - 148: - dict( - name='sling_kpt6', - id=148, - color=colors['sling'], - type='', - swap='sling_kpt2'), - 149: - dict( - name='sling_kpt7', - id=149, - color=colors['sling'], - type='', - swap='sling_kpt15'), - 150: - dict( - name='sling_kpt8', - id=150, - color=colors['sling'], - type='', - swap='sling_kpt14'), - 151: - dict( - name='sling_kpt9', - id=151, - color=colors['sling'], - type='', - swap='sling_kpt13'), - 152: - dict( - name='sling_kpt10', - id=152, - color=colors['sling'], - type='', - swap='sling_kpt12'), - 153: - dict( - name='sling_kpt11', - id=153, - color=colors['sling'], - type='', - swap=''), - 154: - dict( - name='sling_kpt12', - id=154, - color=colors['sling'], - type='', - swap='sling_kpt10'), - 155: - dict( - name='sling_kpt13', - id=155, - color=colors['sling'], - type='', - swap='sling_kpt9'), - 156: - dict( - name='sling_kpt14', - id=156, - color=colors['sling'], - type='', - swap='sling_kpt8'), - 157: - dict( - name='sling_kpt15', - id=157, - color=colors['sling'], - type='', - swap='sling_kpt7'), - # shorts - 158: - dict( - name='shorts_kpt1', - id=158, - color=colors['shorts'], - type='', - swap='shorts_kpt3'), - 159: - dict( - name='shorts_kpt2', - id=159, - color=colors['shorts'], - type='', - swap=''), - 160: - dict( - name='shorts_kpt3', - id=160, - color=colors['shorts'], - type='', - swap='shorts_kpt1'), - 161: - dict( - name='shorts_kpt4', - id=161, - color=colors['shorts'], - type='', - swap='shorts_kpt10'), - 162: - dict( - name='shorts_kpt5', - id=162, - color=colors['shorts'], - type='', - swap='shorts_kpt9'), - 163: - dict( - name='shorts_kpt6', - id=163, - color=colors['shorts'], - type='', - swap='shorts_kpt8'), - 164: - dict( - name='shorts_kpt7', - id=164, - color=colors['shorts'], - type='', - swap=''), - 165: - dict( - name='shorts_kpt8', - id=165, - color=colors['shorts'], - type='', - swap='shorts_kpt6'), - 166: - dict( - name='shorts_kpt9', - id=166, - color=colors['shorts'], - type='', - swap='shorts_kpt5'), - 167: - dict( - name='shorts_kpt10', - id=167, - color=colors['shorts'], - type='', - swap='shorts_kpt4'), - # trousers - 168: - dict( - name='trousers_kpt1', - id=168, - color=colors['trousers'], - type='', - swap='trousers_kpt3'), - 169: - dict( - name='trousers_kpt2', - id=169, - color=colors['trousers'], - type='', - swap=''), - 170: - dict( - name='trousers_kpt3', - id=170, - color=colors['trousers'], - type='', - swap='trousers_kpt1'), - 171: - dict( - name='trousers_kpt4', - id=171, - color=colors['trousers'], - type='', - swap='trousers_kpt14'), - 172: - dict( - name='trousers_kpt5', - id=172, - color=colors['trousers'], - type='', - swap='trousers_kpt13'), - 173: - dict( - name='trousers_kpt6', - id=173, - color=colors['trousers'], - type='', - swap='trousers_kpt12'), - 174: - dict( - name='trousers_kpt7', - id=174, - color=colors['trousers'], - type='', - swap='trousers_kpt11'), - 175: - dict( - name='trousers_kpt8', - id=175, - color=colors['trousers'], - type='', - swap='trousers_kpt10'), - 176: - dict( - name='trousers_kpt9', - id=176, - color=colors['trousers'], - type='', - swap=''), - 177: - dict( - name='trousers_kpt10', - id=177, - color=colors['trousers'], - type='', - swap='trousers_kpt8'), - 178: - dict( - name='trousers_kpt11', - id=178, - color=colors['trousers'], - type='', - swap='trousers_kpt7'), - 179: - dict( - name='trousers_kpt12', - id=179, - color=colors['trousers'], - type='', - swap='trousers_kpt6'), - 180: - dict( - name='trousers_kpt13', - id=180, - color=colors['trousers'], - type='', - swap='trousers_kpt5'), - 181: - dict( - name='trousers_kpt14', - id=181, - color=colors['trousers'], - type='', - swap='trousers_kpt4'), - # skirt - 182: - dict( - name='skirt_kpt1', - id=182, - color=colors['skirt'], - type='', - swap='skirt_kpt3'), - 183: - dict( - name='skirt_kpt2', id=183, color=colors['skirt'], type='', - swap=''), - 184: - dict( - name='skirt_kpt3', - id=184, - color=colors['skirt'], - type='', - swap='skirt_kpt1'), - 185: - dict( - name='skirt_kpt4', - id=185, - color=colors['skirt'], - type='', - swap='skirt_kpt8'), - 186: - dict( - name='skirt_kpt5', - id=186, - color=colors['skirt'], - type='', - swap='skirt_kpt7'), - 187: - dict( - name='skirt_kpt6', id=187, color=colors['skirt'], type='', - swap=''), - 188: - dict( - name='skirt_kpt7', - id=188, - color=colors['skirt'], - type='', - swap='skirt_kpt5'), - 189: - dict( - name='skirt_kpt8', - id=189, - color=colors['skirt'], - type='', - swap='skirt_kpt4'), - # short_sleeved_dress - 190: - dict(name='ssd_kpt1', id=190, color=colors['ssd'], type='', swap=''), - 191: - dict( - name='ssd_kpt2', - id=191, - color=colors['ssd'], - type='', - swap='ssd_kpt6'), - 192: - dict( - name='ssd_kpt3', - id=192, - color=colors['ssd'], - type='', - swap='ssd_kpt5'), - 193: - dict(name='ssd_kpt4', id=193, color=colors['ssd'], type='', swap=''), - 194: - dict( - name='ssd_kpt5', - id=194, - color=colors['ssd'], - type='', - swap='ssd_kpt3'), - 195: - dict( - name='ssd_kpt6', - id=195, - color=colors['ssd'], - type='', - swap='ssd_kpt2'), - 196: - dict( - name='ssd_kpt7', - id=196, - color=colors['ssd'], - type='', - swap='ssd_kpt29'), - 197: - dict( - name='ssd_kpt8', - id=197, - color=colors['ssd'], - type='', - swap='ssd_kpt28'), - 198: - dict( - name='ssd_kpt9', - id=198, - color=colors['ssd'], - type='', - swap='ssd_kpt27'), - 199: - dict( - name='ssd_kpt10', - id=199, - color=colors['ssd'], - type='', - swap='ssd_kpt26'), - 200: - dict( - name='ssd_kpt11', - id=200, - color=colors['ssd'], - type='', - swap='ssd_kpt25'), - 201: - dict( - name='ssd_kpt12', - id=201, - color=colors['ssd'], - type='', - swap='ssd_kpt24'), - 202: - dict( - name='ssd_kpt13', - id=202, - color=colors['ssd'], - type='', - swap='ssd_kpt23'), - 203: - dict( - name='ssd_kpt14', - id=203, - color=colors['ssd'], - type='', - swap='ssd_kpt22'), - 204: - dict( - name='ssd_kpt15', - id=204, - color=colors['ssd'], - type='', - swap='ssd_kpt21'), - 205: - dict( - name='ssd_kpt16', - id=205, - color=colors['ssd'], - type='', - swap='ssd_kpt20'), - 206: - dict( - name='ssd_kpt17', - id=206, - color=colors['ssd'], - type='', - swap='ssd_kpt19'), - 207: - dict(name='ssd_kpt18', id=207, color=colors['ssd'], type='', swap=''), - 208: - dict( - name='ssd_kpt19', - id=208, - color=colors['ssd'], - type='', - swap='ssd_kpt17'), - 209: - dict( - name='ssd_kpt20', - id=209, - color=colors['ssd'], - type='', - swap='ssd_kpt16'), - 210: - dict( - name='ssd_kpt21', - id=210, - color=colors['ssd'], - type='', - swap='ssd_kpt15'), - 211: - dict( - name='ssd_kpt22', - id=211, - color=colors['ssd'], - type='', - swap='ssd_kpt14'), - 212: - dict( - name='ssd_kpt23', - id=212, - color=colors['ssd'], - type='', - swap='ssd_kpt13'), - 213: - dict( - name='ssd_kpt24', - id=213, - color=colors['ssd'], - type='', - swap='ssd_kpt12'), - 214: - dict( - name='ssd_kpt25', - id=214, - color=colors['ssd'], - type='', - swap='ssd_kpt11'), - 215: - dict( - name='ssd_kpt26', - id=215, - color=colors['ssd'], - type='', - swap='ssd_kpt10'), - 216: - dict( - name='ssd_kpt27', - id=216, - color=colors['ssd'], - type='', - swap='ssd_kpt9'), - 217: - dict( - name='ssd_kpt28', - id=217, - color=colors['ssd'], - type='', - swap='ssd_kpt8'), - 218: - dict( - name='ssd_kpt29', - id=218, - color=colors['ssd'], - type='', - swap='ssd_kpt7'), - # long_sleeved_dress - 219: - dict(name='lsd_kpt1', id=219, color=colors['lsd'], type='', swap=''), - 220: - dict( - name='lsd_kpt2', - id=220, - color=colors['lsd'], - type='', - swap='lsd_kpt6'), - 221: - dict( - name='lsd_kpt3', - id=221, - color=colors['lsd'], - type='', - swap='lsd_kpt5'), - 222: - dict(name='lsd_kpt4', id=222, color=colors['lsd'], type='', swap=''), - 223: - dict( - name='lsd_kpt5', - id=223, - color=colors['lsd'], - type='', - swap='lsd_kpt3'), - 224: - dict( - name='lsd_kpt6', - id=224, - color=colors['lsd'], - type='', - swap='lsd_kpt2'), - 225: - dict( - name='lsd_kpt7', - id=225, - color=colors['lsd'], - type='', - swap='lsd_kpt37'), - 226: - dict( - name='lsd_kpt8', - id=226, - color=colors['lsd'], - type='', - swap='lsd_kpt36'), - 227: - dict( - name='lsd_kpt9', - id=227, - color=colors['lsd'], - type='', - swap='lsd_kpt35'), - 228: - dict( - name='lsd_kpt10', - id=228, - color=colors['lsd'], - type='', - swap='lsd_kpt34'), - 229: - dict( - name='lsd_kpt11', - id=229, - color=colors['lsd'], - type='', - swap='lsd_kpt33'), - 230: - dict( - name='lsd_kpt12', - id=230, - color=colors['lsd'], - type='', - swap='lsd_kpt32'), - 231: - dict( - name='lsd_kpt13', - id=231, - color=colors['lsd'], - type='', - swap='lsd_kpt31'), - 232: - dict( - name='lsd_kpt14', - id=232, - color=colors['lsd'], - type='', - swap='lsd_kpt30'), - 233: - dict( - name='lsd_kpt15', - id=233, - color=colors['lsd'], - type='', - swap='lsd_kpt29'), - 234: - dict( - name='lsd_kpt16', - id=234, - color=colors['lsd'], - type='', - swap='lsd_kpt28'), - 235: - dict( - name='lsd_kpt17', - id=235, - color=colors['lsd'], - type='', - swap='lsd_kpt27'), - 236: - dict( - name='lsd_kpt18', - id=236, - color=colors['lsd'], - type='', - swap='lsd_kpt26'), - 237: - dict( - name='lsd_kpt19', - id=237, - color=colors['lsd'], - type='', - swap='lsd_kpt25'), - 238: - dict( - name='lsd_kpt20', - id=238, - color=colors['lsd'], - type='', - swap='lsd_kpt24'), - 239: - dict( - name='lsd_kpt21', - id=239, - color=colors['lsd'], - type='', - swap='lsd_kpt23'), - 240: - dict(name='lsd_kpt22', id=240, color=colors['lsd'], type='', swap=''), - 241: - dict( - name='lsd_kpt23', - id=241, - color=colors['lsd'], - type='', - swap='lsd_kpt21'), - 242: - dict( - name='lsd_kpt24', - id=242, - color=colors['lsd'], - type='', - swap='lsd_kpt20'), - 243: - dict( - name='lsd_kpt25', - id=243, - color=colors['lsd'], - type='', - swap='lsd_kpt19'), - 244: - dict( - name='lsd_kpt26', - id=244, - color=colors['lsd'], - type='', - swap='lsd_kpt18'), - 245: - dict( - name='lsd_kpt27', - id=245, - color=colors['lsd'], - type='', - swap='lsd_kpt17'), - 246: - dict( - name='lsd_kpt28', - id=246, - color=colors['lsd'], - type='', - swap='lsd_kpt16'), - 247: - dict( - name='lsd_kpt29', - id=247, - color=colors['lsd'], - type='', - swap='lsd_kpt15'), - 248: - dict( - name='lsd_kpt30', - id=248, - color=colors['lsd'], - type='', - swap='lsd_kpt14'), - 249: - dict( - name='lsd_kpt31', - id=249, - color=colors['lsd'], - type='', - swap='lsd_kpt13'), - 250: - dict( - name='lsd_kpt32', - id=250, - color=colors['lsd'], - type='', - swap='lsd_kpt12'), - 251: - dict( - name='lsd_kpt33', - id=251, - color=colors['lsd'], - type='', - swap='lsd_kpt11'), - 252: - dict( - name='lsd_kpt34', - id=252, - color=colors['lsd'], - type='', - swap='lsd_kpt10'), - 253: - dict( - name='lsd_kpt35', - id=253, - color=colors['lsd'], - type='', - swap='lsd_kpt9'), - 254: - dict( - name='lsd_kpt36', - id=254, - color=colors['lsd'], - type='', - swap='lsd_kpt8'), - 255: - dict( - name='lsd_kpt37', - id=255, - color=colors['lsd'], - type='', - swap='lsd_kpt7'), - # vest_dress - 256: - dict(name='vd_kpt1', id=256, color=colors['vd'], type='', swap=''), - 257: - dict( - name='vd_kpt2', - id=257, - color=colors['vd'], - type='', - swap='vd_kpt6'), - 258: - dict( - name='vd_kpt3', - id=258, - color=colors['vd'], - type='', - swap='vd_kpt5'), - 259: - dict(name='vd_kpt4', id=259, color=colors['vd'], type='', swap=''), - 260: - dict( - name='vd_kpt5', - id=260, - color=colors['vd'], - type='', - swap='vd_kpt3'), - 261: - dict( - name='vd_kpt6', - id=261, - color=colors['vd'], - type='', - swap='vd_kpt2'), - 262: - dict( - name='vd_kpt7', - id=262, - color=colors['vd'], - type='', - swap='vd_kpt19'), - 263: - dict( - name='vd_kpt8', - id=263, - color=colors['vd'], - type='', - swap='vd_kpt18'), - 264: - dict( - name='vd_kpt9', - id=264, - color=colors['vd'], - type='', - swap='vd_kpt17'), - 265: - dict( - name='vd_kpt10', - id=265, - color=colors['vd'], - type='', - swap='vd_kpt16'), - 266: - dict( - name='vd_kpt11', - id=266, - color=colors['vd'], - type='', - swap='vd_kpt15'), - 267: - dict( - name='vd_kpt12', - id=267, - color=colors['vd'], - type='', - swap='vd_kpt14'), - 268: - dict(name='vd_kpt13', id=268, color=colors['vd'], type='', swap=''), - 269: - dict( - name='vd_kpt14', - id=269, - color=colors['vd'], - type='', - swap='vd_kpt12'), - 270: - dict( - name='vd_kpt15', - id=270, - color=colors['vd'], - type='', - swap='vd_kpt11'), - 271: - dict( - name='vd_kpt16', - id=271, - color=colors['vd'], - type='', - swap='vd_kpt10'), - 272: - dict( - name='vd_kpt17', - id=272, - color=colors['vd'], - type='', - swap='vd_kpt9'), - 273: - dict( - name='vd_kpt18', - id=273, - color=colors['vd'], - type='', - swap='vd_kpt8'), - 274: - dict( - name='vd_kpt19', - id=274, - color=colors['vd'], - type='', - swap='vd_kpt7'), - # sling_dress - 275: - dict(name='sd_kpt1', id=275, color=colors['sd'], type='', swap=''), - 276: - dict( - name='sd_kpt2', - id=276, - color=colors['sd'], - type='', - swap='sd_kpt6'), - 277: - dict( - name='sd_kpt3', - id=277, - color=colors['sd'], - type='', - swap='sd_kpt5'), - 278: - dict(name='sd_kpt4', id=278, color=colors['sd'], type='', swap=''), - 279: - dict( - name='sd_kpt5', - id=279, - color=colors['sd'], - type='', - swap='sd_kpt3'), - 280: - dict( - name='sd_kpt6', - id=280, - color=colors['sd'], - type='', - swap='sd_kpt2'), - 281: - dict( - name='sd_kpt7', - id=281, - color=colors['sd'], - type='', - swap='sd_kpt19'), - 282: - dict( - name='sd_kpt8', - id=282, - color=colors['sd'], - type='', - swap='sd_kpt18'), - 283: - dict( - name='sd_kpt9', - id=283, - color=colors['sd'], - type='', - swap='sd_kpt17'), - 284: - dict( - name='sd_kpt10', - id=284, - color=colors['sd'], - type='', - swap='sd_kpt16'), - 285: - dict( - name='sd_kpt11', - id=285, - color=colors['sd'], - type='', - swap='sd_kpt15'), - 286: - dict( - name='sd_kpt12', - id=286, - color=colors['sd'], - type='', - swap='sd_kpt14'), - 287: - dict(name='sd_kpt13', id=287, color=colors['sd'], type='', swap=''), - 288: - dict( - name='sd_kpt14', - id=288, - color=colors['sd'], - type='', - swap='sd_kpt12'), - 289: - dict( - name='sd_kpt15', - id=289, - color=colors['sd'], - type='', - swap='sd_kpt11'), - 290: - dict( - name='sd_kpt16', - id=290, - color=colors['sd'], - type='', - swap='sd_kpt10'), - 291: - dict( - name='sd_kpt17', - id=291, - color=colors['sd'], - type='', - swap='sd_kpt9'), - 292: - dict( - name='sd_kpt18', - id=292, - color=colors['sd'], - type='', - swap='sd_kpt8'), - 293: - dict( - name='sd_kpt19', - id=293, - color=colors['sd'], - type='', - swap='sd_kpt7'), - }, - skeleton_info={ - # short_sleeved_shirt - 0: - dict(link=('sss_kpt1', 'sss_kpt2'), id=0, color=[255, 128, 0]), - 1: - dict(link=('sss_kpt2', 'sss_kpt7'), id=1, color=[255, 128, 0]), - 2: - dict(link=('sss_kpt7', 'sss_kpt8'), id=2, color=[255, 128, 0]), - 3: - dict(link=('sss_kpt8', 'sss_kpt9'), id=3, color=[255, 128, 0]), - 4: - dict(link=('sss_kpt9', 'sss_kpt10'), id=4, color=[255, 128, 0]), - 5: - dict(link=('sss_kpt10', 'sss_kpt11'), id=5, color=[255, 128, 0]), - 6: - dict(link=('sss_kpt11', 'sss_kpt12'), id=6, color=[255, 128, 0]), - 7: - dict(link=('sss_kpt12', 'sss_kpt13'), id=7, color=[255, 128, 0]), - 8: - dict(link=('sss_kpt13', 'sss_kpt14'), id=8, color=[255, 128, 0]), - 9: - dict(link=('sss_kpt14', 'sss_kpt15'), id=9, color=[255, 128, 0]), - 10: - dict(link=('sss_kpt15', 'sss_kpt16'), id=10, color=[255, 128, 0]), - 11: - dict(link=('sss_kpt16', 'sss_kpt17'), id=11, color=[255, 128, 0]), - 12: - dict(link=('sss_kpt17', 'sss_kpt18'), id=12, color=[255, 128, 0]), - 13: - dict(link=('sss_kpt18', 'sss_kpt19'), id=13, color=[255, 128, 0]), - 14: - dict(link=('sss_kpt19', 'sss_kpt20'), id=14, color=[255, 128, 0]), - 15: - dict(link=('sss_kpt20', 'sss_kpt21'), id=15, color=[255, 128, 0]), - 16: - dict(link=('sss_kpt21', 'sss_kpt22'), id=16, color=[255, 128, 0]), - 17: - dict(link=('sss_kpt22', 'sss_kpt23'), id=17, color=[255, 128, 0]), - 18: - dict(link=('sss_kpt23', 'sss_kpt24'), id=18, color=[255, 128, 0]), - 19: - dict(link=('sss_kpt24', 'sss_kpt25'), id=19, color=[255, 128, 0]), - 20: - dict(link=('sss_kpt25', 'sss_kpt6'), id=20, color=[255, 128, 0]), - 21: - dict(link=('sss_kpt6', 'sss_kpt1'), id=21, color=[255, 128, 0]), - 22: - dict(link=('sss_kpt2', 'sss_kpt3'), id=22, color=[255, 128, 0]), - 23: - dict(link=('sss_kpt3', 'sss_kpt4'), id=23, color=[255, 128, 0]), - 24: - dict(link=('sss_kpt4', 'sss_kpt5'), id=24, color=[255, 128, 0]), - 25: - dict(link=('sss_kpt5', 'sss_kpt6'), id=25, color=[255, 128, 0]), - # long_sleeve_shirt - 26: - dict(link=('lss_kpt1', 'lss_kpt2'), id=26, color=[255, 0, 128]), - 27: - dict(link=('lss_kpt2', 'lss_kpt7'), id=27, color=[255, 0, 128]), - 28: - dict(link=('lss_kpt7', 'lss_kpt8'), id=28, color=[255, 0, 128]), - 29: - dict(link=('lss_kpt8', 'lss_kpt9'), id=29, color=[255, 0, 128]), - 30: - dict(link=('lss_kpt9', 'lss_kpt10'), id=30, color=[255, 0, 128]), - 31: - dict(link=('lss_kpt10', 'lss_kpt11'), id=31, color=[255, 0, 128]), - 32: - dict(link=('lss_kpt11', 'lss_kpt12'), id=32, color=[255, 0, 128]), - 33: - dict(link=('lss_kpt12', 'lss_kpt13'), id=33, color=[255, 0, 128]), - 34: - dict(link=('lss_kpt13', 'lss_kpt14'), id=34, color=[255, 0, 128]), - 35: - dict(link=('lss_kpt14', 'lss_kpt15'), id=35, color=[255, 0, 128]), - 36: - dict(link=('lss_kpt15', 'lss_kpt16'), id=36, color=[255, 0, 128]), - 37: - dict(link=('lss_kpt16', 'lss_kpt17'), id=37, color=[255, 0, 128]), - 38: - dict(link=('lss_kpt17', 'lss_kpt18'), id=38, color=[255, 0, 128]), - 39: - dict(link=('lss_kpt18', 'lss_kpt19'), id=39, color=[255, 0, 128]), - 40: - dict(link=('lss_kpt19', 'lss_kpt20'), id=40, color=[255, 0, 128]), - 41: - dict(link=('lss_kpt20', 'lss_kpt21'), id=41, color=[255, 0, 128]), - 42: - dict(link=('lss_kpt21', 'lss_kpt22'), id=42, color=[255, 0, 128]), - 43: - dict(link=('lss_kpt22', 'lss_kpt23'), id=43, color=[255, 0, 128]), - 44: - dict(link=('lss_kpt23', 'lss_kpt24'), id=44, color=[255, 0, 128]), - 45: - dict(link=('lss_kpt24', 'lss_kpt25'), id=45, color=[255, 0, 128]), - 46: - dict(link=('lss_kpt25', 'lss_kpt26'), id=46, color=[255, 0, 128]), - 47: - dict(link=('lss_kpt26', 'lss_kpt27'), id=47, color=[255, 0, 128]), - 48: - dict(link=('lss_kpt27', 'lss_kpt28'), id=48, color=[255, 0, 128]), - 49: - dict(link=('lss_kpt28', 'lss_kpt29'), id=49, color=[255, 0, 128]), - 50: - dict(link=('lss_kpt29', 'lss_kpt30'), id=50, color=[255, 0, 128]), - 51: - dict(link=('lss_kpt30', 'lss_kpt31'), id=51, color=[255, 0, 128]), - 52: - dict(link=('lss_kpt31', 'lss_kpt32'), id=52, color=[255, 0, 128]), - 53: - dict(link=('lss_kpt32', 'lss_kpt33'), id=53, color=[255, 0, 128]), - 54: - dict(link=('lss_kpt33', 'lss_kpt6'), id=54, color=[255, 0, 128]), - 55: - dict(link=('lss_kpt6', 'lss_kpt5'), id=55, color=[255, 0, 128]), - 56: - dict(link=('lss_kpt5', 'lss_kpt4'), id=56, color=[255, 0, 128]), - 57: - dict(link=('lss_kpt4', 'lss_kpt3'), id=57, color=[255, 0, 128]), - 58: - dict(link=('lss_kpt3', 'lss_kpt2'), id=58, color=[255, 0, 128]), - 59: - dict(link=('lss_kpt6', 'lss_kpt1'), id=59, color=[255, 0, 128]), - # short_sleeved_outwear - 60: - dict(link=('sso_kpt1', 'sso_kpt4'), id=60, color=[128, 0, 255]), - 61: - dict(link=('sso_kpt4', 'sso_kpt7'), id=61, color=[128, 0, 255]), - 62: - dict(link=('sso_kpt7', 'sso_kpt8'), id=62, color=[128, 0, 255]), - 63: - dict(link=('sso_kpt8', 'sso_kpt9'), id=63, color=[128, 0, 255]), - 64: - dict(link=('sso_kpt9', 'sso_kpt10'), id=64, color=[128, 0, 255]), - 65: - dict(link=('sso_kpt10', 'sso_kpt11'), id=65, color=[128, 0, 255]), - 66: - dict(link=('sso_kpt11', 'sso_kpt12'), id=66, color=[128, 0, 255]), - 67: - dict(link=('sso_kpt12', 'sso_kpt13'), id=67, color=[128, 0, 255]), - 68: - dict(link=('sso_kpt13', 'sso_kpt14'), id=68, color=[128, 0, 255]), - 69: - dict(link=('sso_kpt14', 'sso_kpt15'), id=69, color=[128, 0, 255]), - 70: - dict(link=('sso_kpt15', 'sso_kpt16'), id=70, color=[128, 0, 255]), - 71: - dict(link=('sso_kpt16', 'sso_kpt31'), id=71, color=[128, 0, 255]), - 72: - dict(link=('sso_kpt31', 'sso_kpt30'), id=72, color=[128, 0, 255]), - 73: - dict(link=('sso_kpt30', 'sso_kpt2'), id=73, color=[128, 0, 255]), - 74: - dict(link=('sso_kpt2', 'sso_kpt3'), id=74, color=[128, 0, 255]), - 75: - dict(link=('sso_kpt3', 'sso_kpt4'), id=75, color=[128, 0, 255]), - 76: - dict(link=('sso_kpt1', 'sso_kpt6'), id=76, color=[128, 0, 255]), - 77: - dict(link=('sso_kpt6', 'sso_kpt25'), id=77, color=[128, 0, 255]), - 78: - dict(link=('sso_kpt25', 'sso_kpt24'), id=78, color=[128, 0, 255]), - 79: - dict(link=('sso_kpt24', 'sso_kpt23'), id=79, color=[128, 0, 255]), - 80: - dict(link=('sso_kpt23', 'sso_kpt22'), id=80, color=[128, 0, 255]), - 81: - dict(link=('sso_kpt22', 'sso_kpt21'), id=81, color=[128, 0, 255]), - 82: - dict(link=('sso_kpt21', 'sso_kpt20'), id=82, color=[128, 0, 255]), - 83: - dict(link=('sso_kpt20', 'sso_kpt19'), id=83, color=[128, 0, 255]), - 84: - dict(link=('sso_kpt19', 'sso_kpt18'), id=84, color=[128, 0, 255]), - 85: - dict(link=('sso_kpt18', 'sso_kpt17'), id=85, color=[128, 0, 255]), - 86: - dict(link=('sso_kpt17', 'sso_kpt29'), id=86, color=[128, 0, 255]), - 87: - dict(link=('sso_kpt29', 'sso_kpt28'), id=87, color=[128, 0, 255]), - 88: - dict(link=('sso_kpt28', 'sso_kpt27'), id=88, color=[128, 0, 255]), - 89: - dict(link=('sso_kpt27', 'sso_kpt26'), id=89, color=[128, 0, 255]), - 90: - dict(link=('sso_kpt26', 'sso_kpt5'), id=90, color=[128, 0, 255]), - 91: - dict(link=('sso_kpt5', 'sso_kpt6'), id=91, color=[128, 0, 255]), - # long_sleeved_outwear - 92: - dict(link=('lso_kpt1', 'lso_kpt2'), id=92, color=[0, 128, 255]), - 93: - dict(link=('lso_kpt2', 'lso_kpt7'), id=93, color=[0, 128, 255]), - 94: - dict(link=('lso_kpt7', 'lso_kpt8'), id=94, color=[0, 128, 255]), - 95: - dict(link=('lso_kpt8', 'lso_kpt9'), id=95, color=[0, 128, 255]), - 96: - dict(link=('lso_kpt9', 'lso_kpt10'), id=96, color=[0, 128, 255]), - 97: - dict(link=('lso_kpt10', 'lso_kpt11'), id=97, color=[0, 128, 255]), - 98: - dict(link=('lso_kpt11', 'lso_kpt12'), id=98, color=[0, 128, 255]), - 99: - dict(link=('lso_kpt12', 'lso_kpt13'), id=99, color=[0, 128, 255]), - 100: - dict(link=('lso_kpt13', 'lso_kpt14'), id=100, color=[0, 128, 255]), - 101: - dict(link=('lso_kpt14', 'lso_kpt15'), id=101, color=[0, 128, 255]), - 102: - dict(link=('lso_kpt15', 'lso_kpt16'), id=102, color=[0, 128, 255]), - 103: - dict(link=('lso_kpt16', 'lso_kpt17'), id=103, color=[0, 128, 255]), - 104: - dict(link=('lso_kpt17', 'lso_kpt18'), id=104, color=[0, 128, 255]), - 105: - dict(link=('lso_kpt18', 'lso_kpt19'), id=105, color=[0, 128, 255]), - 106: - dict(link=('lso_kpt19', 'lso_kpt20'), id=106, color=[0, 128, 255]), - 107: - dict(link=('lso_kpt20', 'lso_kpt39'), id=107, color=[0, 128, 255]), - 108: - dict(link=('lso_kpt39', 'lso_kpt38'), id=108, color=[0, 128, 255]), - 109: - dict(link=('lso_kpt38', 'lso_kpt4'), id=109, color=[0, 128, 255]), - 110: - dict(link=('lso_kpt4', 'lso_kpt3'), id=110, color=[0, 128, 255]), - 111: - dict(link=('lso_kpt3', 'lso_kpt2'), id=111, color=[0, 128, 255]), - 112: - dict(link=('lso_kpt1', 'lso_kpt6'), id=112, color=[0, 128, 255]), - 113: - dict(link=('lso_kpt6', 'lso_kpt33'), id=113, color=[0, 128, 255]), - 114: - dict(link=('lso_kpt33', 'lso_kpt32'), id=114, color=[0, 128, 255]), - 115: - dict(link=('lso_kpt32', 'lso_kpt31'), id=115, color=[0, 128, 255]), - 116: - dict(link=('lso_kpt31', 'lso_kpt30'), id=116, color=[0, 128, 255]), - 117: - dict(link=('lso_kpt30', 'lso_kpt29'), id=117, color=[0, 128, 255]), - 118: - dict(link=('lso_kpt29', 'lso_kpt28'), id=118, color=[0, 128, 255]), - 119: - dict(link=('lso_kpt28', 'lso_kpt27'), id=119, color=[0, 128, 255]), - 120: - dict(link=('lso_kpt27', 'lso_kpt26'), id=120, color=[0, 128, 255]), - 121: - dict(link=('lso_kpt26', 'lso_kpt25'), id=121, color=[0, 128, 255]), - 122: - dict(link=('lso_kpt25', 'lso_kpt24'), id=122, color=[0, 128, 255]), - 123: - dict(link=('lso_kpt24', 'lso_kpt23'), id=123, color=[0, 128, 255]), - 124: - dict(link=('lso_kpt23', 'lso_kpt22'), id=124, color=[0, 128, 255]), - 125: - dict(link=('lso_kpt22', 'lso_kpt21'), id=125, color=[0, 128, 255]), - 126: - dict(link=('lso_kpt21', 'lso_kpt37'), id=126, color=[0, 128, 255]), - 127: - dict(link=('lso_kpt37', 'lso_kpt36'), id=127, color=[0, 128, 255]), - 128: - dict(link=('lso_kpt36', 'lso_kpt35'), id=128, color=[0, 128, 255]), - 129: - dict(link=('lso_kpt35', 'lso_kpt34'), id=129, color=[0, 128, 255]), - 130: - dict(link=('lso_kpt34', 'lso_kpt5'), id=130, color=[0, 128, 255]), - 131: - dict(link=('lso_kpt5', 'lso_kpt6'), id=131, color=[0, 128, 255]), - # vest - 132: - dict(link=('vest_kpt1', 'vest_kpt2'), id=132, color=[0, 128, 128]), - 133: - dict(link=('vest_kpt2', 'vest_kpt7'), id=133, color=[0, 128, 128]), - 134: - dict(link=('vest_kpt7', 'vest_kpt8'), id=134, color=[0, 128, 128]), - 135: - dict(link=('vest_kpt8', 'vest_kpt9'), id=135, color=[0, 128, 128]), - 136: - dict(link=('vest_kpt9', 'vest_kpt10'), id=136, color=[0, 128, 128]), - 137: - dict(link=('vest_kpt10', 'vest_kpt11'), id=137, color=[0, 128, 128]), - 138: - dict(link=('vest_kpt11', 'vest_kpt12'), id=138, color=[0, 128, 128]), - 139: - dict(link=('vest_kpt12', 'vest_kpt13'), id=139, color=[0, 128, 128]), - 140: - dict(link=('vest_kpt13', 'vest_kpt14'), id=140, color=[0, 128, 128]), - 141: - dict(link=('vest_kpt14', 'vest_kpt15'), id=141, color=[0, 128, 128]), - 142: - dict(link=('vest_kpt15', 'vest_kpt6'), id=142, color=[0, 128, 128]), - 143: - dict(link=('vest_kpt6', 'vest_kpt1'), id=143, color=[0, 128, 128]), - 144: - dict(link=('vest_kpt2', 'vest_kpt3'), id=144, color=[0, 128, 128]), - 145: - dict(link=('vest_kpt3', 'vest_kpt4'), id=145, color=[0, 128, 128]), - 146: - dict(link=('vest_kpt4', 'vest_kpt5'), id=146, color=[0, 128, 128]), - 147: - dict(link=('vest_kpt5', 'vest_kpt6'), id=147, color=[0, 128, 128]), - # sling - 148: - dict(link=('sling_kpt1', 'sling_kpt2'), id=148, color=[0, 0, 128]), - 149: - dict(link=('sling_kpt2', 'sling_kpt8'), id=149, color=[0, 0, 128]), - 150: - dict(link=('sling_kpt8', 'sling_kpt9'), id=150, color=[0, 0, 128]), - 151: - dict(link=('sling_kpt9', 'sling_kpt10'), id=151, color=[0, 0, 128]), - 152: - dict(link=('sling_kpt10', 'sling_kpt11'), id=152, color=[0, 0, 128]), - 153: - dict(link=('sling_kpt11', 'sling_kpt12'), id=153, color=[0, 0, 128]), - 154: - dict(link=('sling_kpt12', 'sling_kpt13'), id=154, color=[0, 0, 128]), - 155: - dict(link=('sling_kpt13', 'sling_kpt14'), id=155, color=[0, 0, 128]), - 156: - dict(link=('sling_kpt14', 'sling_kpt6'), id=156, color=[0, 0, 128]), - 157: - dict(link=('sling_kpt2', 'sling_kpt7'), id=157, color=[0, 0, 128]), - 158: - dict(link=('sling_kpt6', 'sling_kpt15'), id=158, color=[0, 0, 128]), - 159: - dict(link=('sling_kpt2', 'sling_kpt3'), id=159, color=[0, 0, 128]), - 160: - dict(link=('sling_kpt3', 'sling_kpt4'), id=160, color=[0, 0, 128]), - 161: - dict(link=('sling_kpt4', 'sling_kpt5'), id=161, color=[0, 0, 128]), - 162: - dict(link=('sling_kpt5', 'sling_kpt6'), id=162, color=[0, 0, 128]), - 163: - dict(link=('sling_kpt1', 'sling_kpt6'), id=163, color=[0, 0, 128]), - # shorts - 164: - dict( - link=('shorts_kpt1', 'shorts_kpt4'), id=164, color=[128, 128, - 128]), - 165: - dict( - link=('shorts_kpt4', 'shorts_kpt5'), id=165, color=[128, 128, - 128]), - 166: - dict( - link=('shorts_kpt5', 'shorts_kpt6'), id=166, color=[128, 128, - 128]), - 167: - dict( - link=('shorts_kpt6', 'shorts_kpt7'), id=167, color=[128, 128, - 128]), - 168: - dict( - link=('shorts_kpt7', 'shorts_kpt8'), id=168, color=[128, 128, - 128]), - 169: - dict( - link=('shorts_kpt8', 'shorts_kpt9'), id=169, color=[128, 128, - 128]), - 170: - dict( - link=('shorts_kpt9', 'shorts_kpt10'), - id=170, - color=[128, 128, 128]), - 171: - dict( - link=('shorts_kpt10', 'shorts_kpt3'), - id=171, - color=[128, 128, 128]), - 172: - dict( - link=('shorts_kpt3', 'shorts_kpt2'), id=172, color=[128, 128, - 128]), - 173: - dict( - link=('shorts_kpt2', 'shorts_kpt1'), id=173, color=[128, 128, - 128]), - # trousers - 174: - dict( - link=('trousers_kpt1', 'trousers_kpt4'), - id=174, - color=[128, 0, 128]), - 175: - dict( - link=('trousers_kpt4', 'trousers_kpt5'), - id=175, - color=[128, 0, 128]), - 176: - dict( - link=('trousers_kpt5', 'trousers_kpt6'), - id=176, - color=[128, 0, 128]), - 177: - dict( - link=('trousers_kpt6', 'trousers_kpt7'), - id=177, - color=[128, 0, 128]), - 178: - dict( - link=('trousers_kpt7', 'trousers_kpt8'), - id=178, - color=[128, 0, 128]), - 179: - dict( - link=('trousers_kpt8', 'trousers_kpt9'), - id=179, - color=[128, 0, 128]), - 180: - dict( - link=('trousers_kpt9', 'trousers_kpt10'), - id=180, - color=[128, 0, 128]), - 181: - dict( - link=('trousers_kpt10', 'trousers_kpt11'), - id=181, - color=[128, 0, 128]), - 182: - dict( - link=('trousers_kpt11', 'trousers_kpt12'), - id=182, - color=[128, 0, 128]), - 183: - dict( - link=('trousers_kpt12', 'trousers_kpt13'), - id=183, - color=[128, 0, 128]), - 184: - dict( - link=('trousers_kpt13', 'trousers_kpt14'), - id=184, - color=[128, 0, 128]), - 185: - dict( - link=('trousers_kpt14', 'trousers_kpt3'), - id=185, - color=[128, 0, 128]), - 186: - dict( - link=('trousers_kpt3', 'trousers_kpt2'), - id=186, - color=[128, 0, 128]), - 187: - dict( - link=('trousers_kpt2', 'trousers_kpt1'), - id=187, - color=[128, 0, 128]), - # skirt - 188: - dict(link=('skirt_kpt1', 'skirt_kpt4'), id=188, color=[64, 128, 128]), - 189: - dict(link=('skirt_kpt4', 'skirt_kpt5'), id=189, color=[64, 128, 128]), - 190: - dict(link=('skirt_kpt5', 'skirt_kpt6'), id=190, color=[64, 128, 128]), - 191: - dict(link=('skirt_kpt6', 'skirt_kpt7'), id=191, color=[64, 128, 128]), - 192: - dict(link=('skirt_kpt7', 'skirt_kpt8'), id=192, color=[64, 128, 128]), - 193: - dict(link=('skirt_kpt8', 'skirt_kpt3'), id=193, color=[64, 128, 128]), - 194: - dict(link=('skirt_kpt3', 'skirt_kpt2'), id=194, color=[64, 128, 128]), - 195: - dict(link=('skirt_kpt2', 'skirt_kpt1'), id=195, color=[64, 128, 128]), - # short_sleeved_dress - 196: - dict(link=('ssd_kpt1', 'ssd_kpt2'), id=196, color=[64, 64, 128]), - 197: - dict(link=('ssd_kpt2', 'ssd_kpt7'), id=197, color=[64, 64, 128]), - 198: - dict(link=('ssd_kpt7', 'ssd_kpt8'), id=198, color=[64, 64, 128]), - 199: - dict(link=('ssd_kpt8', 'ssd_kpt9'), id=199, color=[64, 64, 128]), - 200: - dict(link=('ssd_kpt9', 'ssd_kpt10'), id=200, color=[64, 64, 128]), - 201: - dict(link=('ssd_kpt10', 'ssd_kpt11'), id=201, color=[64, 64, 128]), - 202: - dict(link=('ssd_kpt11', 'ssd_kpt12'), id=202, color=[64, 64, 128]), - 203: - dict(link=('ssd_kpt12', 'ssd_kpt13'), id=203, color=[64, 64, 128]), - 204: - dict(link=('ssd_kpt13', 'ssd_kpt14'), id=204, color=[64, 64, 128]), - 205: - dict(link=('ssd_kpt14', 'ssd_kpt15'), id=205, color=[64, 64, 128]), - 206: - dict(link=('ssd_kpt15', 'ssd_kpt16'), id=206, color=[64, 64, 128]), - 207: - dict(link=('ssd_kpt16', 'ssd_kpt17'), id=207, color=[64, 64, 128]), - 208: - dict(link=('ssd_kpt17', 'ssd_kpt18'), id=208, color=[64, 64, 128]), - 209: - dict(link=('ssd_kpt18', 'ssd_kpt19'), id=209, color=[64, 64, 128]), - 210: - dict(link=('ssd_kpt19', 'ssd_kpt20'), id=210, color=[64, 64, 128]), - 211: - dict(link=('ssd_kpt20', 'ssd_kpt21'), id=211, color=[64, 64, 128]), - 212: - dict(link=('ssd_kpt21', 'ssd_kpt22'), id=212, color=[64, 64, 128]), - 213: - dict(link=('ssd_kpt22', 'ssd_kpt23'), id=213, color=[64, 64, 128]), - 214: - dict(link=('ssd_kpt23', 'ssd_kpt24'), id=214, color=[64, 64, 128]), - 215: - dict(link=('ssd_kpt24', 'ssd_kpt25'), id=215, color=[64, 64, 128]), - 216: - dict(link=('ssd_kpt25', 'ssd_kpt26'), id=216, color=[64, 64, 128]), - 217: - dict(link=('ssd_kpt26', 'ssd_kpt27'), id=217, color=[64, 64, 128]), - 218: - dict(link=('ssd_kpt27', 'ssd_kpt28'), id=218, color=[64, 64, 128]), - 219: - dict(link=('ssd_kpt28', 'ssd_kpt29'), id=219, color=[64, 64, 128]), - 220: - dict(link=('ssd_kpt29', 'ssd_kpt6'), id=220, color=[64, 64, 128]), - 221: - dict(link=('ssd_kpt6', 'ssd_kpt5'), id=221, color=[64, 64, 128]), - 222: - dict(link=('ssd_kpt5', 'ssd_kpt4'), id=222, color=[64, 64, 128]), - 223: - dict(link=('ssd_kpt4', 'ssd_kpt3'), id=223, color=[64, 64, 128]), - 224: - dict(link=('ssd_kpt3', 'ssd_kpt2'), id=224, color=[64, 64, 128]), - 225: - dict(link=('ssd_kpt6', 'ssd_kpt1'), id=225, color=[64, 64, 128]), - # long_sleeved_dress - 226: - dict(link=('lsd_kpt1', 'lsd_kpt2'), id=226, color=[128, 64, 0]), - 227: - dict(link=('lsd_kpt2', 'lsd_kpt7'), id=228, color=[128, 64, 0]), - 228: - dict(link=('lsd_kpt7', 'lsd_kpt8'), id=228, color=[128, 64, 0]), - 229: - dict(link=('lsd_kpt8', 'lsd_kpt9'), id=229, color=[128, 64, 0]), - 230: - dict(link=('lsd_kpt9', 'lsd_kpt10'), id=230, color=[128, 64, 0]), - 231: - dict(link=('lsd_kpt10', 'lsd_kpt11'), id=231, color=[128, 64, 0]), - 232: - dict(link=('lsd_kpt11', 'lsd_kpt12'), id=232, color=[128, 64, 0]), - 233: - dict(link=('lsd_kpt12', 'lsd_kpt13'), id=233, color=[128, 64, 0]), - 234: - dict(link=('lsd_kpt13', 'lsd_kpt14'), id=234, color=[128, 64, 0]), - 235: - dict(link=('lsd_kpt14', 'lsd_kpt15'), id=235, color=[128, 64, 0]), - 236: - dict(link=('lsd_kpt15', 'lsd_kpt16'), id=236, color=[128, 64, 0]), - 237: - dict(link=('lsd_kpt16', 'lsd_kpt17'), id=237, color=[128, 64, 0]), - 238: - dict(link=('lsd_kpt17', 'lsd_kpt18'), id=238, color=[128, 64, 0]), - 239: - dict(link=('lsd_kpt18', 'lsd_kpt19'), id=239, color=[128, 64, 0]), - 240: - dict(link=('lsd_kpt19', 'lsd_kpt20'), id=240, color=[128, 64, 0]), - 241: - dict(link=('lsd_kpt20', 'lsd_kpt21'), id=241, color=[128, 64, 0]), - 242: - dict(link=('lsd_kpt21', 'lsd_kpt22'), id=242, color=[128, 64, 0]), - 243: - dict(link=('lsd_kpt22', 'lsd_kpt23'), id=243, color=[128, 64, 0]), - 244: - dict(link=('lsd_kpt23', 'lsd_kpt24'), id=244, color=[128, 64, 0]), - 245: - dict(link=('lsd_kpt24', 'lsd_kpt25'), id=245, color=[128, 64, 0]), - 246: - dict(link=('lsd_kpt25', 'lsd_kpt26'), id=246, color=[128, 64, 0]), - 247: - dict(link=('lsd_kpt26', 'lsd_kpt27'), id=247, color=[128, 64, 0]), - 248: - dict(link=('lsd_kpt27', 'lsd_kpt28'), id=248, color=[128, 64, 0]), - 249: - dict(link=('lsd_kpt28', 'lsd_kpt29'), id=249, color=[128, 64, 0]), - 250: - dict(link=('lsd_kpt29', 'lsd_kpt30'), id=250, color=[128, 64, 0]), - 251: - dict(link=('lsd_kpt30', 'lsd_kpt31'), id=251, color=[128, 64, 0]), - 252: - dict(link=('lsd_kpt31', 'lsd_kpt32'), id=252, color=[128, 64, 0]), - 253: - dict(link=('lsd_kpt32', 'lsd_kpt33'), id=253, color=[128, 64, 0]), - 254: - dict(link=('lsd_kpt33', 'lsd_kpt34'), id=254, color=[128, 64, 0]), - 255: - dict(link=('lsd_kpt34', 'lsd_kpt35'), id=255, color=[128, 64, 0]), - 256: - dict(link=('lsd_kpt35', 'lsd_kpt36'), id=256, color=[128, 64, 0]), - 257: - dict(link=('lsd_kpt36', 'lsd_kpt37'), id=257, color=[128, 64, 0]), - 258: - dict(link=('lsd_kpt37', 'lsd_kpt6'), id=258, color=[128, 64, 0]), - 259: - dict(link=('lsd_kpt6', 'lsd_kpt5'), id=259, color=[128, 64, 0]), - 260: - dict(link=('lsd_kpt5', 'lsd_kpt4'), id=260, color=[128, 64, 0]), - 261: - dict(link=('lsd_kpt4', 'lsd_kpt3'), id=261, color=[128, 64, 0]), - 262: - dict(link=('lsd_kpt3', 'lsd_kpt2'), id=262, color=[128, 64, 0]), - 263: - dict(link=('lsd_kpt6', 'lsd_kpt1'), id=263, color=[128, 64, 0]), - # vest_dress - 264: - dict(link=('vd_kpt1', 'vd_kpt2'), id=264, color=[128, 64, 255]), - 265: - dict(link=('vd_kpt2', 'vd_kpt7'), id=265, color=[128, 64, 255]), - 266: - dict(link=('vd_kpt7', 'vd_kpt8'), id=266, color=[128, 64, 255]), - 267: - dict(link=('vd_kpt8', 'vd_kpt9'), id=267, color=[128, 64, 255]), - 268: - dict(link=('vd_kpt9', 'vd_kpt10'), id=268, color=[128, 64, 255]), - 269: - dict(link=('vd_kpt10', 'vd_kpt11'), id=269, color=[128, 64, 255]), - 270: - dict(link=('vd_kpt11', 'vd_kpt12'), id=270, color=[128, 64, 255]), - 271: - dict(link=('vd_kpt12', 'vd_kpt13'), id=271, color=[128, 64, 255]), - 272: - dict(link=('vd_kpt13', 'vd_kpt14'), id=272, color=[128, 64, 255]), - 273: - dict(link=('vd_kpt14', 'vd_kpt15'), id=273, color=[128, 64, 255]), - 274: - dict(link=('vd_kpt15', 'vd_kpt16'), id=274, color=[128, 64, 255]), - 275: - dict(link=('vd_kpt16', 'vd_kpt17'), id=275, color=[128, 64, 255]), - 276: - dict(link=('vd_kpt17', 'vd_kpt18'), id=276, color=[128, 64, 255]), - 277: - dict(link=('vd_kpt18', 'vd_kpt19'), id=277, color=[128, 64, 255]), - 278: - dict(link=('vd_kpt19', 'vd_kpt6'), id=278, color=[128, 64, 255]), - 279: - dict(link=('vd_kpt6', 'vd_kpt5'), id=279, color=[128, 64, 255]), - 280: - dict(link=('vd_kpt5', 'vd_kpt4'), id=280, color=[128, 64, 255]), - 281: - dict(link=('vd_kpt4', 'vd_kpt3'), id=281, color=[128, 64, 255]), - 282: - dict(link=('vd_kpt3', 'vd_kpt2'), id=282, color=[128, 64, 255]), - 283: - dict(link=('vd_kpt6', 'vd_kpt1'), id=283, color=[128, 64, 255]), - # sling_dress - 284: - dict(link=('sd_kpt1', 'sd_kpt2'), id=284, color=[128, 64, 0]), - 285: - dict(link=('sd_kpt2', 'sd_kpt8'), id=285, color=[128, 64, 0]), - 286: - dict(link=('sd_kpt8', 'sd_kpt9'), id=286, color=[128, 64, 0]), - 287: - dict(link=('sd_kpt9', 'sd_kpt10'), id=287, color=[128, 64, 0]), - 288: - dict(link=('sd_kpt10', 'sd_kpt11'), id=288, color=[128, 64, 0]), - 289: - dict(link=('sd_kpt11', 'sd_kpt12'), id=289, color=[128, 64, 0]), - 290: - dict(link=('sd_kpt12', 'sd_kpt13'), id=290, color=[128, 64, 0]), - 291: - dict(link=('sd_kpt13', 'sd_kpt14'), id=291, color=[128, 64, 0]), - 292: - dict(link=('sd_kpt14', 'sd_kpt15'), id=292, color=[128, 64, 0]), - 293: - dict(link=('sd_kpt15', 'sd_kpt16'), id=293, color=[128, 64, 0]), - 294: - dict(link=('sd_kpt16', 'sd_kpt17'), id=294, color=[128, 64, 0]), - 295: - dict(link=('sd_kpt17', 'sd_kpt18'), id=295, color=[128, 64, 0]), - 296: - dict(link=('sd_kpt18', 'sd_kpt6'), id=296, color=[128, 64, 0]), - 297: - dict(link=('sd_kpt6', 'sd_kpt5'), id=297, color=[128, 64, 0]), - 298: - dict(link=('sd_kpt5', 'sd_kpt4'), id=298, color=[128, 64, 0]), - 299: - dict(link=('sd_kpt4', 'sd_kpt3'), id=299, color=[128, 64, 0]), - 300: - dict(link=('sd_kpt3', 'sd_kpt2'), id=300, color=[128, 64, 0]), - 301: - dict(link=('sd_kpt2', 'sd_kpt7'), id=301, color=[128, 64, 0]), - 302: - dict(link=('sd_kpt6', 'sd_kpt19'), id=302, color=[128, 64, 0]), - 303: - dict(link=('sd_kpt6', 'sd_kpt1'), id=303, color=[128, 64, 0]), - }, - joint_weights=[1.] * 294, - sigmas=[]) +colors = dict( + sss=[255, 128, 0], # short_sleeve_shirt + lss=[255, 0, 128], # long_sleeved_shirt + sso=[128, 0, 255], # short_sleeved_outwear + lso=[0, 128, 255], # long_sleeved_outwear + vest=[0, 128, 128], # vest + sling=[0, 0, 128], # sling + shorts=[128, 128, 128], # shorts + trousers=[128, 0, 128], # trousers + skirt=[64, 128, 128], # skirt + ssd=[64, 64, 128], # short_sleeved_dress + lsd=[128, 64, 0], # long_sleeved_dress + vd=[128, 64, 255], # vest_dress + sd=[128, 64, 0], # sling_dress +) +dataset_info = dict( + dataset_name='deepfashion2', + paper_info=dict( + author='Yuying Ge and Ruimao Zhang and Lingyun Wu ' + 'and Xiaogang Wang and Xiaoou Tang and Ping Luo', + title='DeepFashion2: A Versatile Benchmark for ' + 'Detection, Pose Estimation, Segmentation and ' + 'Re-Identification of Clothing Images', + container='Proceedings of IEEE Conference on Computer ' + 'Vision and Pattern Recognition (CVPR)', + year='2019', + homepage='https://github.com/switchablenorms/DeepFashion2', + ), + keypoint_info={ + # short_sleeved_shirt + 0: + dict(name='sss_kpt1', id=0, color=colors['sss'], type='', swap=''), + 1: + dict( + name='sss_kpt2', + id=1, + color=colors['sss'], + type='', + swap='sss_kpt6'), + 2: + dict( + name='sss_kpt3', + id=2, + color=colors['sss'], + type='', + swap='sss_kpt5'), + 3: + dict(name='sss_kpt4', id=3, color=colors['sss'], type='', swap=''), + 4: + dict( + name='sss_kpt5', + id=4, + color=colors['sss'], + type='', + swap='sss_kpt3'), + 5: + dict( + name='sss_kpt6', + id=5, + color=colors['sss'], + type='', + swap='sss_kpt2'), + 6: + dict( + name='sss_kpt7', + id=6, + color=colors['sss'], + type='', + swap='sss_kpt25'), + 7: + dict( + name='sss_kpt8', + id=7, + color=colors['sss'], + type='', + swap='sss_kpt24'), + 8: + dict( + name='sss_kpt9', + id=8, + color=colors['sss'], + type='', + swap='sss_kpt23'), + 9: + dict( + name='sss_kpt10', + id=9, + color=colors['sss'], + type='', + swap='sss_kpt22'), + 10: + dict( + name='sss_kpt11', + id=10, + color=colors['sss'], + type='', + swap='sss_kpt21'), + 11: + dict( + name='sss_kpt12', + id=11, + color=colors['sss'], + type='', + swap='sss_kpt20'), + 12: + dict( + name='sss_kpt13', + id=12, + color=colors['sss'], + type='', + swap='sss_kpt19'), + 13: + dict( + name='sss_kpt14', + id=13, + color=colors['sss'], + type='', + swap='sss_kpt18'), + 14: + dict( + name='sss_kpt15', + id=14, + color=colors['sss'], + type='', + swap='sss_kpt17'), + 15: + dict(name='sss_kpt16', id=15, color=colors['sss'], type='', swap=''), + 16: + dict( + name='sss_kpt17', + id=16, + color=colors['sss'], + type='', + swap='sss_kpt15'), + 17: + dict( + name='sss_kpt18', + id=17, + color=colors['sss'], + type='', + swap='sss_kpt14'), + 18: + dict( + name='sss_kpt19', + id=18, + color=colors['sss'], + type='', + swap='sss_kpt13'), + 19: + dict( + name='sss_kpt20', + id=19, + color=colors['sss'], + type='', + swap='sss_kpt12'), + 20: + dict( + name='sss_kpt21', + id=20, + color=colors['sss'], + type='', + swap='sss_kpt11'), + 21: + dict( + name='sss_kpt22', + id=21, + color=colors['sss'], + type='', + swap='sss_kpt10'), + 22: + dict( + name='sss_kpt23', + id=22, + color=colors['sss'], + type='', + swap='sss_kpt9'), + 23: + dict( + name='sss_kpt24', + id=23, + color=colors['sss'], + type='', + swap='sss_kpt8'), + 24: + dict( + name='sss_kpt25', + id=24, + color=colors['sss'], + type='', + swap='sss_kpt7'), + # long_sleeved_shirt + 25: + dict(name='lss_kpt1', id=25, color=colors['lss'], type='', swap=''), + 26: + dict( + name='lss_kpt2', + id=26, + color=colors['lss'], + type='', + swap='lss_kpt6'), + 27: + dict( + name='lss_kpt3', + id=27, + color=colors['lss'], + type='', + swap='lss_kpt5'), + 28: + dict(name='lss_kpt4', id=28, color=colors['lss'], type='', swap=''), + 29: + dict( + name='lss_kpt5', + id=29, + color=colors['lss'], + type='', + swap='lss_kpt3'), + 30: + dict( + name='lss_kpt6', + id=30, + color=colors['lss'], + type='', + swap='lss_kpt2'), + 31: + dict( + name='lss_kpt7', + id=31, + color=colors['lss'], + type='', + swap='lss_kpt33'), + 32: + dict( + name='lss_kpt8', + id=32, + color=colors['lss'], + type='', + swap='lss_kpt32'), + 33: + dict( + name='lss_kpt9', + id=33, + color=colors['lss'], + type='', + swap='lss_kpt31'), + 34: + dict( + name='lss_kpt10', + id=34, + color=colors['lss'], + type='', + swap='lss_kpt30'), + 35: + dict( + name='lss_kpt11', + id=35, + color=colors['lss'], + type='', + swap='lss_kpt29'), + 36: + dict( + name='lss_kpt12', + id=36, + color=colors['lss'], + type='', + swap='lss_kpt28'), + 37: + dict( + name='lss_kpt13', + id=37, + color=colors['lss'], + type='', + swap='lss_kpt27'), + 38: + dict( + name='lss_kpt14', + id=38, + color=colors['lss'], + type='', + swap='lss_kpt26'), + 39: + dict( + name='lss_kpt15', + id=39, + color=colors['lss'], + type='', + swap='lss_kpt25'), + 40: + dict( + name='lss_kpt16', + id=40, + color=colors['lss'], + type='', + swap='lss_kpt24'), + 41: + dict( + name='lss_kpt17', + id=41, + color=colors['lss'], + type='', + swap='lss_kpt23'), + 42: + dict( + name='lss_kpt18', + id=42, + color=colors['lss'], + type='', + swap='lss_kpt22'), + 43: + dict( + name='lss_kpt19', + id=43, + color=colors['lss'], + type='', + swap='lss_kpt21'), + 44: + dict(name='lss_kpt20', id=44, color=colors['lss'], type='', swap=''), + 45: + dict( + name='lss_kpt21', + id=45, + color=colors['lss'], + type='', + swap='lss_kpt19'), + 46: + dict( + name='lss_kpt22', + id=46, + color=colors['lss'], + type='', + swap='lss_kpt18'), + 47: + dict( + name='lss_kpt23', + id=47, + color=colors['lss'], + type='', + swap='lss_kpt17'), + 48: + dict( + name='lss_kpt24', + id=48, + color=colors['lss'], + type='', + swap='lss_kpt16'), + 49: + dict( + name='lss_kpt25', + id=49, + color=colors['lss'], + type='', + swap='lss_kpt15'), + 50: + dict( + name='lss_kpt26', + id=50, + color=colors['lss'], + type='', + swap='lss_kpt14'), + 51: + dict( + name='lss_kpt27', + id=51, + color=colors['lss'], + type='', + swap='lss_kpt13'), + 52: + dict( + name='lss_kpt28', + id=52, + color=colors['lss'], + type='', + swap='lss_kpt12'), + 53: + dict( + name='lss_kpt29', + id=53, + color=colors['lss'], + type='', + swap='lss_kpt11'), + 54: + dict( + name='lss_kpt30', + id=54, + color=colors['lss'], + type='', + swap='lss_kpt10'), + 55: + dict( + name='lss_kpt31', + id=55, + color=colors['lss'], + type='', + swap='lss_kpt9'), + 56: + dict( + name='lss_kpt32', + id=56, + color=colors['lss'], + type='', + swap='lss_kpt8'), + 57: + dict( + name='lss_kpt33', + id=57, + color=colors['lss'], + type='', + swap='lss_kpt7'), + # short_sleeved_outwear + 58: + dict(name='sso_kpt1', id=58, color=colors['sso'], type='', swap=''), + 59: + dict( + name='sso_kpt2', + id=59, + color=colors['sso'], + type='', + swap='sso_kpt26'), + 60: + dict( + name='sso_kpt3', + id=60, + color=colors['sso'], + type='', + swap='sso_kpt5'), + 61: + dict( + name='sso_kpt4', + id=61, + color=colors['sso'], + type='', + swap='sso_kpt6'), + 62: + dict( + name='sso_kpt5', + id=62, + color=colors['sso'], + type='', + swap='sso_kpt3'), + 63: + dict( + name='sso_kpt6', + id=63, + color=colors['sso'], + type='', + swap='sso_kpt4'), + 64: + dict( + name='sso_kpt7', + id=64, + color=colors['sso'], + type='', + swap='sso_kpt25'), + 65: + dict( + name='sso_kpt8', + id=65, + color=colors['sso'], + type='', + swap='sso_kpt24'), + 66: + dict( + name='sso_kpt9', + id=66, + color=colors['sso'], + type='', + swap='sso_kpt23'), + 67: + dict( + name='sso_kpt10', + id=67, + color=colors['sso'], + type='', + swap='sso_kpt22'), + 68: + dict( + name='sso_kpt11', + id=68, + color=colors['sso'], + type='', + swap='sso_kpt21'), + 69: + dict( + name='sso_kpt12', + id=69, + color=colors['sso'], + type='', + swap='sso_kpt20'), + 70: + dict( + name='sso_kpt13', + id=70, + color=colors['sso'], + type='', + swap='sso_kpt19'), + 71: + dict( + name='sso_kpt14', + id=71, + color=colors['sso'], + type='', + swap='sso_kpt18'), + 72: + dict( + name='sso_kpt15', + id=72, + color=colors['sso'], + type='', + swap='sso_kpt17'), + 73: + dict( + name='sso_kpt16', + id=73, + color=colors['sso'], + type='', + swap='sso_kpt29'), + 74: + dict( + name='sso_kpt17', + id=74, + color=colors['sso'], + type='', + swap='sso_kpt15'), + 75: + dict( + name='sso_kpt18', + id=75, + color=colors['sso'], + type='', + swap='sso_kpt14'), + 76: + dict( + name='sso_kpt19', + id=76, + color=colors['sso'], + type='', + swap='sso_kpt13'), + 77: + dict( + name='sso_kpt20', + id=77, + color=colors['sso'], + type='', + swap='sso_kpt12'), + 78: + dict( + name='sso_kpt21', + id=78, + color=colors['sso'], + type='', + swap='sso_kpt11'), + 79: + dict( + name='sso_kpt22', + id=79, + color=colors['sso'], + type='', + swap='sso_kpt10'), + 80: + dict( + name='sso_kpt23', + id=80, + color=colors['sso'], + type='', + swap='sso_kpt9'), + 81: + dict( + name='sso_kpt24', + id=81, + color=colors['sso'], + type='', + swap='sso_kpt8'), + 82: + dict( + name='sso_kpt25', + id=82, + color=colors['sso'], + type='', + swap='sso_kpt7'), + 83: + dict( + name='sso_kpt26', + id=83, + color=colors['sso'], + type='', + swap='sso_kpt2'), + 84: + dict( + name='sso_kpt27', + id=84, + color=colors['sso'], + type='', + swap='sso_kpt30'), + 85: + dict( + name='sso_kpt28', + id=85, + color=colors['sso'], + type='', + swap='sso_kpt31'), + 86: + dict( + name='sso_kpt29', + id=86, + color=colors['sso'], + type='', + swap='sso_kpt16'), + 87: + dict( + name='sso_kpt30', + id=87, + color=colors['sso'], + type='', + swap='sso_kpt27'), + 88: + dict( + name='sso_kpt31', + id=88, + color=colors['sso'], + type='', + swap='sso_kpt28'), + # long_sleeved_outwear + 89: + dict(name='lso_kpt1', id=89, color=colors['lso'], type='', swap=''), + 90: + dict( + name='lso_kpt2', + id=90, + color=colors['lso'], + type='', + swap='lso_kpt6'), + 91: + dict( + name='lso_kpt3', + id=91, + color=colors['lso'], + type='', + swap='lso_kpt5'), + 92: + dict( + name='lso_kpt4', + id=92, + color=colors['lso'], + type='', + swap='lso_kpt34'), + 93: + dict( + name='lso_kpt5', + id=93, + color=colors['lso'], + type='', + swap='lso_kpt3'), + 94: + dict( + name='lso_kpt6', + id=94, + color=colors['lso'], + type='', + swap='lso_kpt2'), + 95: + dict( + name='lso_kpt7', + id=95, + color=colors['lso'], + type='', + swap='lso_kpt33'), + 96: + dict( + name='lso_kpt8', + id=96, + color=colors['lso'], + type='', + swap='lso_kpt32'), + 97: + dict( + name='lso_kpt9', + id=97, + color=colors['lso'], + type='', + swap='lso_kpt31'), + 98: + dict( + name='lso_kpt10', + id=98, + color=colors['lso'], + type='', + swap='lso_kpt30'), + 99: + dict( + name='lso_kpt11', + id=99, + color=colors['lso'], + type='', + swap='lso_kpt29'), + 100: + dict( + name='lso_kpt12', + id=100, + color=colors['lso'], + type='', + swap='lso_kpt28'), + 101: + dict( + name='lso_kpt13', + id=101, + color=colors['lso'], + type='', + swap='lso_kpt27'), + 102: + dict( + name='lso_kpt14', + id=102, + color=colors['lso'], + type='', + swap='lso_kpt26'), + 103: + dict( + name='lso_kpt15', + id=103, + color=colors['lso'], + type='', + swap='lso_kpt25'), + 104: + dict( + name='lso_kpt16', + id=104, + color=colors['lso'], + type='', + swap='lso_kpt24'), + 105: + dict( + name='lso_kpt17', + id=105, + color=colors['lso'], + type='', + swap='lso_kpt23'), + 106: + dict( + name='lso_kpt18', + id=106, + color=colors['lso'], + type='', + swap='lso_kpt22'), + 107: + dict( + name='lso_kpt19', + id=107, + color=colors['lso'], + type='', + swap='lso_kpt21'), + 108: + dict( + name='lso_kpt20', + id=108, + color=colors['lso'], + type='', + swap='lso_kpt37'), + 109: + dict( + name='lso_kpt21', + id=109, + color=colors['lso'], + type='', + swap='lso_kpt19'), + 110: + dict( + name='lso_kpt22', + id=110, + color=colors['lso'], + type='', + swap='lso_kpt18'), + 111: + dict( + name='lso_kpt23', + id=111, + color=colors['lso'], + type='', + swap='lso_kpt17'), + 112: + dict( + name='lso_kpt24', + id=112, + color=colors['lso'], + type='', + swap='lso_kpt16'), + 113: + dict( + name='lso_kpt25', + id=113, + color=colors['lso'], + type='', + swap='lso_kpt15'), + 114: + dict( + name='lso_kpt26', + id=114, + color=colors['lso'], + type='', + swap='lso_kpt14'), + 115: + dict( + name='lso_kpt27', + id=115, + color=colors['lso'], + type='', + swap='lso_kpt13'), + 116: + dict( + name='lso_kpt28', + id=116, + color=colors['lso'], + type='', + swap='lso_kpt12'), + 117: + dict( + name='lso_kpt29', + id=117, + color=colors['lso'], + type='', + swap='lso_kpt11'), + 118: + dict( + name='lso_kpt30', + id=118, + color=colors['lso'], + type='', + swap='lso_kpt10'), + 119: + dict( + name='lso_kpt31', + id=119, + color=colors['lso'], + type='', + swap='lso_kpt9'), + 120: + dict( + name='lso_kpt32', + id=120, + color=colors['lso'], + type='', + swap='lso_kpt8'), + 121: + dict( + name='lso_kpt33', + id=121, + color=colors['lso'], + type='', + swap='lso_kpt7'), + 122: + dict( + name='lso_kpt34', + id=122, + color=colors['lso'], + type='', + swap='lso_kpt4'), + 123: + dict( + name='lso_kpt35', + id=123, + color=colors['lso'], + type='', + swap='lso_kpt38'), + 124: + dict( + name='lso_kpt36', + id=124, + color=colors['lso'], + type='', + swap='lso_kpt39'), + 125: + dict( + name='lso_kpt37', + id=125, + color=colors['lso'], + type='', + swap='lso_kpt20'), + 126: + dict( + name='lso_kpt38', + id=126, + color=colors['lso'], + type='', + swap='lso_kpt35'), + 127: + dict( + name='lso_kpt39', + id=127, + color=colors['lso'], + type='', + swap='lso_kpt36'), + # vest + 128: + dict(name='vest_kpt1', id=128, color=colors['vest'], type='', swap=''), + 129: + dict( + name='vest_kpt2', + id=129, + color=colors['vest'], + type='', + swap='vest_kpt6'), + 130: + dict( + name='vest_kpt3', + id=130, + color=colors['vest'], + type='', + swap='vest_kpt5'), + 131: + dict(name='vest_kpt4', id=131, color=colors['vest'], type='', swap=''), + 132: + dict( + name='vest_kpt5', + id=132, + color=colors['vest'], + type='', + swap='vest_kpt3'), + 133: + dict( + name='vest_kpt6', + id=133, + color=colors['vest'], + type='', + swap='vest_kpt2'), + 134: + dict( + name='vest_kpt7', + id=134, + color=colors['vest'], + type='', + swap='vest_kpt15'), + 135: + dict( + name='vest_kpt8', + id=135, + color=colors['vest'], + type='', + swap='vest_kpt14'), + 136: + dict( + name='vest_kpt9', + id=136, + color=colors['vest'], + type='', + swap='vest_kpt13'), + 137: + dict( + name='vest_kpt10', + id=137, + color=colors['vest'], + type='', + swap='vest_kpt12'), + 138: + dict( + name='vest_kpt11', id=138, color=colors['vest'], type='', swap=''), + 139: + dict( + name='vest_kpt12', + id=139, + color=colors['vest'], + type='', + swap='vest_kpt10'), + 140: + dict( + name='vest_kpt13', id=140, color=colors['vest'], type='', swap=''), + 141: + dict( + name='vest_kpt14', + id=141, + color=colors['vest'], + type='', + swap='vest_kpt8'), + 142: + dict( + name='vest_kpt15', + id=142, + color=colors['vest'], + type='', + swap='vest_kpt7'), + # sling + 143: + dict( + name='sling_kpt1', id=143, color=colors['sling'], type='', + swap=''), + 144: + dict( + name='sling_kpt2', + id=144, + color=colors['sling'], + type='', + swap='sling_kpt6'), + 145: + dict( + name='sling_kpt3', + id=145, + color=colors['sling'], + type='', + swap='sling_kpt5'), + 146: + dict( + name='sling_kpt4', id=146, color=colors['sling'], type='', + swap=''), + 147: + dict( + name='sling_kpt5', + id=147, + color=colors['sling'], + type='', + swap='sling_kpt3'), + 148: + dict( + name='sling_kpt6', + id=148, + color=colors['sling'], + type='', + swap='sling_kpt2'), + 149: + dict( + name='sling_kpt7', + id=149, + color=colors['sling'], + type='', + swap='sling_kpt15'), + 150: + dict( + name='sling_kpt8', + id=150, + color=colors['sling'], + type='', + swap='sling_kpt14'), + 151: + dict( + name='sling_kpt9', + id=151, + color=colors['sling'], + type='', + swap='sling_kpt13'), + 152: + dict( + name='sling_kpt10', + id=152, + color=colors['sling'], + type='', + swap='sling_kpt12'), + 153: + dict( + name='sling_kpt11', + id=153, + color=colors['sling'], + type='', + swap=''), + 154: + dict( + name='sling_kpt12', + id=154, + color=colors['sling'], + type='', + swap='sling_kpt10'), + 155: + dict( + name='sling_kpt13', + id=155, + color=colors['sling'], + type='', + swap='sling_kpt9'), + 156: + dict( + name='sling_kpt14', + id=156, + color=colors['sling'], + type='', + swap='sling_kpt8'), + 157: + dict( + name='sling_kpt15', + id=157, + color=colors['sling'], + type='', + swap='sling_kpt7'), + # shorts + 158: + dict( + name='shorts_kpt1', + id=158, + color=colors['shorts'], + type='', + swap='shorts_kpt3'), + 159: + dict( + name='shorts_kpt2', + id=159, + color=colors['shorts'], + type='', + swap=''), + 160: + dict( + name='shorts_kpt3', + id=160, + color=colors['shorts'], + type='', + swap='shorts_kpt1'), + 161: + dict( + name='shorts_kpt4', + id=161, + color=colors['shorts'], + type='', + swap='shorts_kpt10'), + 162: + dict( + name='shorts_kpt5', + id=162, + color=colors['shorts'], + type='', + swap='shorts_kpt9'), + 163: + dict( + name='shorts_kpt6', + id=163, + color=colors['shorts'], + type='', + swap='shorts_kpt8'), + 164: + dict( + name='shorts_kpt7', + id=164, + color=colors['shorts'], + type='', + swap=''), + 165: + dict( + name='shorts_kpt8', + id=165, + color=colors['shorts'], + type='', + swap='shorts_kpt6'), + 166: + dict( + name='shorts_kpt9', + id=166, + color=colors['shorts'], + type='', + swap='shorts_kpt5'), + 167: + dict( + name='shorts_kpt10', + id=167, + color=colors['shorts'], + type='', + swap='shorts_kpt4'), + # trousers + 168: + dict( + name='trousers_kpt1', + id=168, + color=colors['trousers'], + type='', + swap='trousers_kpt3'), + 169: + dict( + name='trousers_kpt2', + id=169, + color=colors['trousers'], + type='', + swap=''), + 170: + dict( + name='trousers_kpt3', + id=170, + color=colors['trousers'], + type='', + swap='trousers_kpt1'), + 171: + dict( + name='trousers_kpt4', + id=171, + color=colors['trousers'], + type='', + swap='trousers_kpt14'), + 172: + dict( + name='trousers_kpt5', + id=172, + color=colors['trousers'], + type='', + swap='trousers_kpt13'), + 173: + dict( + name='trousers_kpt6', + id=173, + color=colors['trousers'], + type='', + swap='trousers_kpt12'), + 174: + dict( + name='trousers_kpt7', + id=174, + color=colors['trousers'], + type='', + swap='trousers_kpt11'), + 175: + dict( + name='trousers_kpt8', + id=175, + color=colors['trousers'], + type='', + swap='trousers_kpt10'), + 176: + dict( + name='trousers_kpt9', + id=176, + color=colors['trousers'], + type='', + swap=''), + 177: + dict( + name='trousers_kpt10', + id=177, + color=colors['trousers'], + type='', + swap='trousers_kpt8'), + 178: + dict( + name='trousers_kpt11', + id=178, + color=colors['trousers'], + type='', + swap='trousers_kpt7'), + 179: + dict( + name='trousers_kpt12', + id=179, + color=colors['trousers'], + type='', + swap='trousers_kpt6'), + 180: + dict( + name='trousers_kpt13', + id=180, + color=colors['trousers'], + type='', + swap='trousers_kpt5'), + 181: + dict( + name='trousers_kpt14', + id=181, + color=colors['trousers'], + type='', + swap='trousers_kpt4'), + # skirt + 182: + dict( + name='skirt_kpt1', + id=182, + color=colors['skirt'], + type='', + swap='skirt_kpt3'), + 183: + dict( + name='skirt_kpt2', id=183, color=colors['skirt'], type='', + swap=''), + 184: + dict( + name='skirt_kpt3', + id=184, + color=colors['skirt'], + type='', + swap='skirt_kpt1'), + 185: + dict( + name='skirt_kpt4', + id=185, + color=colors['skirt'], + type='', + swap='skirt_kpt8'), + 186: + dict( + name='skirt_kpt5', + id=186, + color=colors['skirt'], + type='', + swap='skirt_kpt7'), + 187: + dict( + name='skirt_kpt6', id=187, color=colors['skirt'], type='', + swap=''), + 188: + dict( + name='skirt_kpt7', + id=188, + color=colors['skirt'], + type='', + swap='skirt_kpt5'), + 189: + dict( + name='skirt_kpt8', + id=189, + color=colors['skirt'], + type='', + swap='skirt_kpt4'), + # short_sleeved_dress + 190: + dict(name='ssd_kpt1', id=190, color=colors['ssd'], type='', swap=''), + 191: + dict( + name='ssd_kpt2', + id=191, + color=colors['ssd'], + type='', + swap='ssd_kpt6'), + 192: + dict( + name='ssd_kpt3', + id=192, + color=colors['ssd'], + type='', + swap='ssd_kpt5'), + 193: + dict(name='ssd_kpt4', id=193, color=colors['ssd'], type='', swap=''), + 194: + dict( + name='ssd_kpt5', + id=194, + color=colors['ssd'], + type='', + swap='ssd_kpt3'), + 195: + dict( + name='ssd_kpt6', + id=195, + color=colors['ssd'], + type='', + swap='ssd_kpt2'), + 196: + dict( + name='ssd_kpt7', + id=196, + color=colors['ssd'], + type='', + swap='ssd_kpt29'), + 197: + dict( + name='ssd_kpt8', + id=197, + color=colors['ssd'], + type='', + swap='ssd_kpt28'), + 198: + dict( + name='ssd_kpt9', + id=198, + color=colors['ssd'], + type='', + swap='ssd_kpt27'), + 199: + dict( + name='ssd_kpt10', + id=199, + color=colors['ssd'], + type='', + swap='ssd_kpt26'), + 200: + dict( + name='ssd_kpt11', + id=200, + color=colors['ssd'], + type='', + swap='ssd_kpt25'), + 201: + dict( + name='ssd_kpt12', + id=201, + color=colors['ssd'], + type='', + swap='ssd_kpt24'), + 202: + dict( + name='ssd_kpt13', + id=202, + color=colors['ssd'], + type='', + swap='ssd_kpt23'), + 203: + dict( + name='ssd_kpt14', + id=203, + color=colors['ssd'], + type='', + swap='ssd_kpt22'), + 204: + dict( + name='ssd_kpt15', + id=204, + color=colors['ssd'], + type='', + swap='ssd_kpt21'), + 205: + dict( + name='ssd_kpt16', + id=205, + color=colors['ssd'], + type='', + swap='ssd_kpt20'), + 206: + dict( + name='ssd_kpt17', + id=206, + color=colors['ssd'], + type='', + swap='ssd_kpt19'), + 207: + dict(name='ssd_kpt18', id=207, color=colors['ssd'], type='', swap=''), + 208: + dict( + name='ssd_kpt19', + id=208, + color=colors['ssd'], + type='', + swap='ssd_kpt17'), + 209: + dict( + name='ssd_kpt20', + id=209, + color=colors['ssd'], + type='', + swap='ssd_kpt16'), + 210: + dict( + name='ssd_kpt21', + id=210, + color=colors['ssd'], + type='', + swap='ssd_kpt15'), + 211: + dict( + name='ssd_kpt22', + id=211, + color=colors['ssd'], + type='', + swap='ssd_kpt14'), + 212: + dict( + name='ssd_kpt23', + id=212, + color=colors['ssd'], + type='', + swap='ssd_kpt13'), + 213: + dict( + name='ssd_kpt24', + id=213, + color=colors['ssd'], + type='', + swap='ssd_kpt12'), + 214: + dict( + name='ssd_kpt25', + id=214, + color=colors['ssd'], + type='', + swap='ssd_kpt11'), + 215: + dict( + name='ssd_kpt26', + id=215, + color=colors['ssd'], + type='', + swap='ssd_kpt10'), + 216: + dict( + name='ssd_kpt27', + id=216, + color=colors['ssd'], + type='', + swap='ssd_kpt9'), + 217: + dict( + name='ssd_kpt28', + id=217, + color=colors['ssd'], + type='', + swap='ssd_kpt8'), + 218: + dict( + name='ssd_kpt29', + id=218, + color=colors['ssd'], + type='', + swap='ssd_kpt7'), + # long_sleeved_dress + 219: + dict(name='lsd_kpt1', id=219, color=colors['lsd'], type='', swap=''), + 220: + dict( + name='lsd_kpt2', + id=220, + color=colors['lsd'], + type='', + swap='lsd_kpt6'), + 221: + dict( + name='lsd_kpt3', + id=221, + color=colors['lsd'], + type='', + swap='lsd_kpt5'), + 222: + dict(name='lsd_kpt4', id=222, color=colors['lsd'], type='', swap=''), + 223: + dict( + name='lsd_kpt5', + id=223, + color=colors['lsd'], + type='', + swap='lsd_kpt3'), + 224: + dict( + name='lsd_kpt6', + id=224, + color=colors['lsd'], + type='', + swap='lsd_kpt2'), + 225: + dict( + name='lsd_kpt7', + id=225, + color=colors['lsd'], + type='', + swap='lsd_kpt37'), + 226: + dict( + name='lsd_kpt8', + id=226, + color=colors['lsd'], + type='', + swap='lsd_kpt36'), + 227: + dict( + name='lsd_kpt9', + id=227, + color=colors['lsd'], + type='', + swap='lsd_kpt35'), + 228: + dict( + name='lsd_kpt10', + id=228, + color=colors['lsd'], + type='', + swap='lsd_kpt34'), + 229: + dict( + name='lsd_kpt11', + id=229, + color=colors['lsd'], + type='', + swap='lsd_kpt33'), + 230: + dict( + name='lsd_kpt12', + id=230, + color=colors['lsd'], + type='', + swap='lsd_kpt32'), + 231: + dict( + name='lsd_kpt13', + id=231, + color=colors['lsd'], + type='', + swap='lsd_kpt31'), + 232: + dict( + name='lsd_kpt14', + id=232, + color=colors['lsd'], + type='', + swap='lsd_kpt30'), + 233: + dict( + name='lsd_kpt15', + id=233, + color=colors['lsd'], + type='', + swap='lsd_kpt29'), + 234: + dict( + name='lsd_kpt16', + id=234, + color=colors['lsd'], + type='', + swap='lsd_kpt28'), + 235: + dict( + name='lsd_kpt17', + id=235, + color=colors['lsd'], + type='', + swap='lsd_kpt27'), + 236: + dict( + name='lsd_kpt18', + id=236, + color=colors['lsd'], + type='', + swap='lsd_kpt26'), + 237: + dict( + name='lsd_kpt19', + id=237, + color=colors['lsd'], + type='', + swap='lsd_kpt25'), + 238: + dict( + name='lsd_kpt20', + id=238, + color=colors['lsd'], + type='', + swap='lsd_kpt24'), + 239: + dict( + name='lsd_kpt21', + id=239, + color=colors['lsd'], + type='', + swap='lsd_kpt23'), + 240: + dict(name='lsd_kpt22', id=240, color=colors['lsd'], type='', swap=''), + 241: + dict( + name='lsd_kpt23', + id=241, + color=colors['lsd'], + type='', + swap='lsd_kpt21'), + 242: + dict( + name='lsd_kpt24', + id=242, + color=colors['lsd'], + type='', + swap='lsd_kpt20'), + 243: + dict( + name='lsd_kpt25', + id=243, + color=colors['lsd'], + type='', + swap='lsd_kpt19'), + 244: + dict( + name='lsd_kpt26', + id=244, + color=colors['lsd'], + type='', + swap='lsd_kpt18'), + 245: + dict( + name='lsd_kpt27', + id=245, + color=colors['lsd'], + type='', + swap='lsd_kpt17'), + 246: + dict( + name='lsd_kpt28', + id=246, + color=colors['lsd'], + type='', + swap='lsd_kpt16'), + 247: + dict( + name='lsd_kpt29', + id=247, + color=colors['lsd'], + type='', + swap='lsd_kpt15'), + 248: + dict( + name='lsd_kpt30', + id=248, + color=colors['lsd'], + type='', + swap='lsd_kpt14'), + 249: + dict( + name='lsd_kpt31', + id=249, + color=colors['lsd'], + type='', + swap='lsd_kpt13'), + 250: + dict( + name='lsd_kpt32', + id=250, + color=colors['lsd'], + type='', + swap='lsd_kpt12'), + 251: + dict( + name='lsd_kpt33', + id=251, + color=colors['lsd'], + type='', + swap='lsd_kpt11'), + 252: + dict( + name='lsd_kpt34', + id=252, + color=colors['lsd'], + type='', + swap='lsd_kpt10'), + 253: + dict( + name='lsd_kpt35', + id=253, + color=colors['lsd'], + type='', + swap='lsd_kpt9'), + 254: + dict( + name='lsd_kpt36', + id=254, + color=colors['lsd'], + type='', + swap='lsd_kpt8'), + 255: + dict( + name='lsd_kpt37', + id=255, + color=colors['lsd'], + type='', + swap='lsd_kpt7'), + # vest_dress + 256: + dict(name='vd_kpt1', id=256, color=colors['vd'], type='', swap=''), + 257: + dict( + name='vd_kpt2', + id=257, + color=colors['vd'], + type='', + swap='vd_kpt6'), + 258: + dict( + name='vd_kpt3', + id=258, + color=colors['vd'], + type='', + swap='vd_kpt5'), + 259: + dict(name='vd_kpt4', id=259, color=colors['vd'], type='', swap=''), + 260: + dict( + name='vd_kpt5', + id=260, + color=colors['vd'], + type='', + swap='vd_kpt3'), + 261: + dict( + name='vd_kpt6', + id=261, + color=colors['vd'], + type='', + swap='vd_kpt2'), + 262: + dict( + name='vd_kpt7', + id=262, + color=colors['vd'], + type='', + swap='vd_kpt19'), + 263: + dict( + name='vd_kpt8', + id=263, + color=colors['vd'], + type='', + swap='vd_kpt18'), + 264: + dict( + name='vd_kpt9', + id=264, + color=colors['vd'], + type='', + swap='vd_kpt17'), + 265: + dict( + name='vd_kpt10', + id=265, + color=colors['vd'], + type='', + swap='vd_kpt16'), + 266: + dict( + name='vd_kpt11', + id=266, + color=colors['vd'], + type='', + swap='vd_kpt15'), + 267: + dict( + name='vd_kpt12', + id=267, + color=colors['vd'], + type='', + swap='vd_kpt14'), + 268: + dict(name='vd_kpt13', id=268, color=colors['vd'], type='', swap=''), + 269: + dict( + name='vd_kpt14', + id=269, + color=colors['vd'], + type='', + swap='vd_kpt12'), + 270: + dict( + name='vd_kpt15', + id=270, + color=colors['vd'], + type='', + swap='vd_kpt11'), + 271: + dict( + name='vd_kpt16', + id=271, + color=colors['vd'], + type='', + swap='vd_kpt10'), + 272: + dict( + name='vd_kpt17', + id=272, + color=colors['vd'], + type='', + swap='vd_kpt9'), + 273: + dict( + name='vd_kpt18', + id=273, + color=colors['vd'], + type='', + swap='vd_kpt8'), + 274: + dict( + name='vd_kpt19', + id=274, + color=colors['vd'], + type='', + swap='vd_kpt7'), + # sling_dress + 275: + dict(name='sd_kpt1', id=275, color=colors['sd'], type='', swap=''), + 276: + dict( + name='sd_kpt2', + id=276, + color=colors['sd'], + type='', + swap='sd_kpt6'), + 277: + dict( + name='sd_kpt3', + id=277, + color=colors['sd'], + type='', + swap='sd_kpt5'), + 278: + dict(name='sd_kpt4', id=278, color=colors['sd'], type='', swap=''), + 279: + dict( + name='sd_kpt5', + id=279, + color=colors['sd'], + type='', + swap='sd_kpt3'), + 280: + dict( + name='sd_kpt6', + id=280, + color=colors['sd'], + type='', + swap='sd_kpt2'), + 281: + dict( + name='sd_kpt7', + id=281, + color=colors['sd'], + type='', + swap='sd_kpt19'), + 282: + dict( + name='sd_kpt8', + id=282, + color=colors['sd'], + type='', + swap='sd_kpt18'), + 283: + dict( + name='sd_kpt9', + id=283, + color=colors['sd'], + type='', + swap='sd_kpt17'), + 284: + dict( + name='sd_kpt10', + id=284, + color=colors['sd'], + type='', + swap='sd_kpt16'), + 285: + dict( + name='sd_kpt11', + id=285, + color=colors['sd'], + type='', + swap='sd_kpt15'), + 286: + dict( + name='sd_kpt12', + id=286, + color=colors['sd'], + type='', + swap='sd_kpt14'), + 287: + dict(name='sd_kpt13', id=287, color=colors['sd'], type='', swap=''), + 288: + dict( + name='sd_kpt14', + id=288, + color=colors['sd'], + type='', + swap='sd_kpt12'), + 289: + dict( + name='sd_kpt15', + id=289, + color=colors['sd'], + type='', + swap='sd_kpt11'), + 290: + dict( + name='sd_kpt16', + id=290, + color=colors['sd'], + type='', + swap='sd_kpt10'), + 291: + dict( + name='sd_kpt17', + id=291, + color=colors['sd'], + type='', + swap='sd_kpt9'), + 292: + dict( + name='sd_kpt18', + id=292, + color=colors['sd'], + type='', + swap='sd_kpt8'), + 293: + dict( + name='sd_kpt19', + id=293, + color=colors['sd'], + type='', + swap='sd_kpt7'), + }, + skeleton_info={ + # short_sleeved_shirt + 0: + dict(link=('sss_kpt1', 'sss_kpt2'), id=0, color=[255, 128, 0]), + 1: + dict(link=('sss_kpt2', 'sss_kpt7'), id=1, color=[255, 128, 0]), + 2: + dict(link=('sss_kpt7', 'sss_kpt8'), id=2, color=[255, 128, 0]), + 3: + dict(link=('sss_kpt8', 'sss_kpt9'), id=3, color=[255, 128, 0]), + 4: + dict(link=('sss_kpt9', 'sss_kpt10'), id=4, color=[255, 128, 0]), + 5: + dict(link=('sss_kpt10', 'sss_kpt11'), id=5, color=[255, 128, 0]), + 6: + dict(link=('sss_kpt11', 'sss_kpt12'), id=6, color=[255, 128, 0]), + 7: + dict(link=('sss_kpt12', 'sss_kpt13'), id=7, color=[255, 128, 0]), + 8: + dict(link=('sss_kpt13', 'sss_kpt14'), id=8, color=[255, 128, 0]), + 9: + dict(link=('sss_kpt14', 'sss_kpt15'), id=9, color=[255, 128, 0]), + 10: + dict(link=('sss_kpt15', 'sss_kpt16'), id=10, color=[255, 128, 0]), + 11: + dict(link=('sss_kpt16', 'sss_kpt17'), id=11, color=[255, 128, 0]), + 12: + dict(link=('sss_kpt17', 'sss_kpt18'), id=12, color=[255, 128, 0]), + 13: + dict(link=('sss_kpt18', 'sss_kpt19'), id=13, color=[255, 128, 0]), + 14: + dict(link=('sss_kpt19', 'sss_kpt20'), id=14, color=[255, 128, 0]), + 15: + dict(link=('sss_kpt20', 'sss_kpt21'), id=15, color=[255, 128, 0]), + 16: + dict(link=('sss_kpt21', 'sss_kpt22'), id=16, color=[255, 128, 0]), + 17: + dict(link=('sss_kpt22', 'sss_kpt23'), id=17, color=[255, 128, 0]), + 18: + dict(link=('sss_kpt23', 'sss_kpt24'), id=18, color=[255, 128, 0]), + 19: + dict(link=('sss_kpt24', 'sss_kpt25'), id=19, color=[255, 128, 0]), + 20: + dict(link=('sss_kpt25', 'sss_kpt6'), id=20, color=[255, 128, 0]), + 21: + dict(link=('sss_kpt6', 'sss_kpt1'), id=21, color=[255, 128, 0]), + 22: + dict(link=('sss_kpt2', 'sss_kpt3'), id=22, color=[255, 128, 0]), + 23: + dict(link=('sss_kpt3', 'sss_kpt4'), id=23, color=[255, 128, 0]), + 24: + dict(link=('sss_kpt4', 'sss_kpt5'), id=24, color=[255, 128, 0]), + 25: + dict(link=('sss_kpt5', 'sss_kpt6'), id=25, color=[255, 128, 0]), + # long_sleeve_shirt + 26: + dict(link=('lss_kpt1', 'lss_kpt2'), id=26, color=[255, 0, 128]), + 27: + dict(link=('lss_kpt2', 'lss_kpt7'), id=27, color=[255, 0, 128]), + 28: + dict(link=('lss_kpt7', 'lss_kpt8'), id=28, color=[255, 0, 128]), + 29: + dict(link=('lss_kpt8', 'lss_kpt9'), id=29, color=[255, 0, 128]), + 30: + dict(link=('lss_kpt9', 'lss_kpt10'), id=30, color=[255, 0, 128]), + 31: + dict(link=('lss_kpt10', 'lss_kpt11'), id=31, color=[255, 0, 128]), + 32: + dict(link=('lss_kpt11', 'lss_kpt12'), id=32, color=[255, 0, 128]), + 33: + dict(link=('lss_kpt12', 'lss_kpt13'), id=33, color=[255, 0, 128]), + 34: + dict(link=('lss_kpt13', 'lss_kpt14'), id=34, color=[255, 0, 128]), + 35: + dict(link=('lss_kpt14', 'lss_kpt15'), id=35, color=[255, 0, 128]), + 36: + dict(link=('lss_kpt15', 'lss_kpt16'), id=36, color=[255, 0, 128]), + 37: + dict(link=('lss_kpt16', 'lss_kpt17'), id=37, color=[255, 0, 128]), + 38: + dict(link=('lss_kpt17', 'lss_kpt18'), id=38, color=[255, 0, 128]), + 39: + dict(link=('lss_kpt18', 'lss_kpt19'), id=39, color=[255, 0, 128]), + 40: + dict(link=('lss_kpt19', 'lss_kpt20'), id=40, color=[255, 0, 128]), + 41: + dict(link=('lss_kpt20', 'lss_kpt21'), id=41, color=[255, 0, 128]), + 42: + dict(link=('lss_kpt21', 'lss_kpt22'), id=42, color=[255, 0, 128]), + 43: + dict(link=('lss_kpt22', 'lss_kpt23'), id=43, color=[255, 0, 128]), + 44: + dict(link=('lss_kpt23', 'lss_kpt24'), id=44, color=[255, 0, 128]), + 45: + dict(link=('lss_kpt24', 'lss_kpt25'), id=45, color=[255, 0, 128]), + 46: + dict(link=('lss_kpt25', 'lss_kpt26'), id=46, color=[255, 0, 128]), + 47: + dict(link=('lss_kpt26', 'lss_kpt27'), id=47, color=[255, 0, 128]), + 48: + dict(link=('lss_kpt27', 'lss_kpt28'), id=48, color=[255, 0, 128]), + 49: + dict(link=('lss_kpt28', 'lss_kpt29'), id=49, color=[255, 0, 128]), + 50: + dict(link=('lss_kpt29', 'lss_kpt30'), id=50, color=[255, 0, 128]), + 51: + dict(link=('lss_kpt30', 'lss_kpt31'), id=51, color=[255, 0, 128]), + 52: + dict(link=('lss_kpt31', 'lss_kpt32'), id=52, color=[255, 0, 128]), + 53: + dict(link=('lss_kpt32', 'lss_kpt33'), id=53, color=[255, 0, 128]), + 54: + dict(link=('lss_kpt33', 'lss_kpt6'), id=54, color=[255, 0, 128]), + 55: + dict(link=('lss_kpt6', 'lss_kpt5'), id=55, color=[255, 0, 128]), + 56: + dict(link=('lss_kpt5', 'lss_kpt4'), id=56, color=[255, 0, 128]), + 57: + dict(link=('lss_kpt4', 'lss_kpt3'), id=57, color=[255, 0, 128]), + 58: + dict(link=('lss_kpt3', 'lss_kpt2'), id=58, color=[255, 0, 128]), + 59: + dict(link=('lss_kpt6', 'lss_kpt1'), id=59, color=[255, 0, 128]), + # short_sleeved_outwear + 60: + dict(link=('sso_kpt1', 'sso_kpt4'), id=60, color=[128, 0, 255]), + 61: + dict(link=('sso_kpt4', 'sso_kpt7'), id=61, color=[128, 0, 255]), + 62: + dict(link=('sso_kpt7', 'sso_kpt8'), id=62, color=[128, 0, 255]), + 63: + dict(link=('sso_kpt8', 'sso_kpt9'), id=63, color=[128, 0, 255]), + 64: + dict(link=('sso_kpt9', 'sso_kpt10'), id=64, color=[128, 0, 255]), + 65: + dict(link=('sso_kpt10', 'sso_kpt11'), id=65, color=[128, 0, 255]), + 66: + dict(link=('sso_kpt11', 'sso_kpt12'), id=66, color=[128, 0, 255]), + 67: + dict(link=('sso_kpt12', 'sso_kpt13'), id=67, color=[128, 0, 255]), + 68: + dict(link=('sso_kpt13', 'sso_kpt14'), id=68, color=[128, 0, 255]), + 69: + dict(link=('sso_kpt14', 'sso_kpt15'), id=69, color=[128, 0, 255]), + 70: + dict(link=('sso_kpt15', 'sso_kpt16'), id=70, color=[128, 0, 255]), + 71: + dict(link=('sso_kpt16', 'sso_kpt31'), id=71, color=[128, 0, 255]), + 72: + dict(link=('sso_kpt31', 'sso_kpt30'), id=72, color=[128, 0, 255]), + 73: + dict(link=('sso_kpt30', 'sso_kpt2'), id=73, color=[128, 0, 255]), + 74: + dict(link=('sso_kpt2', 'sso_kpt3'), id=74, color=[128, 0, 255]), + 75: + dict(link=('sso_kpt3', 'sso_kpt4'), id=75, color=[128, 0, 255]), + 76: + dict(link=('sso_kpt1', 'sso_kpt6'), id=76, color=[128, 0, 255]), + 77: + dict(link=('sso_kpt6', 'sso_kpt25'), id=77, color=[128, 0, 255]), + 78: + dict(link=('sso_kpt25', 'sso_kpt24'), id=78, color=[128, 0, 255]), + 79: + dict(link=('sso_kpt24', 'sso_kpt23'), id=79, color=[128, 0, 255]), + 80: + dict(link=('sso_kpt23', 'sso_kpt22'), id=80, color=[128, 0, 255]), + 81: + dict(link=('sso_kpt22', 'sso_kpt21'), id=81, color=[128, 0, 255]), + 82: + dict(link=('sso_kpt21', 'sso_kpt20'), id=82, color=[128, 0, 255]), + 83: + dict(link=('sso_kpt20', 'sso_kpt19'), id=83, color=[128, 0, 255]), + 84: + dict(link=('sso_kpt19', 'sso_kpt18'), id=84, color=[128, 0, 255]), + 85: + dict(link=('sso_kpt18', 'sso_kpt17'), id=85, color=[128, 0, 255]), + 86: + dict(link=('sso_kpt17', 'sso_kpt29'), id=86, color=[128, 0, 255]), + 87: + dict(link=('sso_kpt29', 'sso_kpt28'), id=87, color=[128, 0, 255]), + 88: + dict(link=('sso_kpt28', 'sso_kpt27'), id=88, color=[128, 0, 255]), + 89: + dict(link=('sso_kpt27', 'sso_kpt26'), id=89, color=[128, 0, 255]), + 90: + dict(link=('sso_kpt26', 'sso_kpt5'), id=90, color=[128, 0, 255]), + 91: + dict(link=('sso_kpt5', 'sso_kpt6'), id=91, color=[128, 0, 255]), + # long_sleeved_outwear + 92: + dict(link=('lso_kpt1', 'lso_kpt2'), id=92, color=[0, 128, 255]), + 93: + dict(link=('lso_kpt2', 'lso_kpt7'), id=93, color=[0, 128, 255]), + 94: + dict(link=('lso_kpt7', 'lso_kpt8'), id=94, color=[0, 128, 255]), + 95: + dict(link=('lso_kpt8', 'lso_kpt9'), id=95, color=[0, 128, 255]), + 96: + dict(link=('lso_kpt9', 'lso_kpt10'), id=96, color=[0, 128, 255]), + 97: + dict(link=('lso_kpt10', 'lso_kpt11'), id=97, color=[0, 128, 255]), + 98: + dict(link=('lso_kpt11', 'lso_kpt12'), id=98, color=[0, 128, 255]), + 99: + dict(link=('lso_kpt12', 'lso_kpt13'), id=99, color=[0, 128, 255]), + 100: + dict(link=('lso_kpt13', 'lso_kpt14'), id=100, color=[0, 128, 255]), + 101: + dict(link=('lso_kpt14', 'lso_kpt15'), id=101, color=[0, 128, 255]), + 102: + dict(link=('lso_kpt15', 'lso_kpt16'), id=102, color=[0, 128, 255]), + 103: + dict(link=('lso_kpt16', 'lso_kpt17'), id=103, color=[0, 128, 255]), + 104: + dict(link=('lso_kpt17', 'lso_kpt18'), id=104, color=[0, 128, 255]), + 105: + dict(link=('lso_kpt18', 'lso_kpt19'), id=105, color=[0, 128, 255]), + 106: + dict(link=('lso_kpt19', 'lso_kpt20'), id=106, color=[0, 128, 255]), + 107: + dict(link=('lso_kpt20', 'lso_kpt39'), id=107, color=[0, 128, 255]), + 108: + dict(link=('lso_kpt39', 'lso_kpt38'), id=108, color=[0, 128, 255]), + 109: + dict(link=('lso_kpt38', 'lso_kpt4'), id=109, color=[0, 128, 255]), + 110: + dict(link=('lso_kpt4', 'lso_kpt3'), id=110, color=[0, 128, 255]), + 111: + dict(link=('lso_kpt3', 'lso_kpt2'), id=111, color=[0, 128, 255]), + 112: + dict(link=('lso_kpt1', 'lso_kpt6'), id=112, color=[0, 128, 255]), + 113: + dict(link=('lso_kpt6', 'lso_kpt33'), id=113, color=[0, 128, 255]), + 114: + dict(link=('lso_kpt33', 'lso_kpt32'), id=114, color=[0, 128, 255]), + 115: + dict(link=('lso_kpt32', 'lso_kpt31'), id=115, color=[0, 128, 255]), + 116: + dict(link=('lso_kpt31', 'lso_kpt30'), id=116, color=[0, 128, 255]), + 117: + dict(link=('lso_kpt30', 'lso_kpt29'), id=117, color=[0, 128, 255]), + 118: + dict(link=('lso_kpt29', 'lso_kpt28'), id=118, color=[0, 128, 255]), + 119: + dict(link=('lso_kpt28', 'lso_kpt27'), id=119, color=[0, 128, 255]), + 120: + dict(link=('lso_kpt27', 'lso_kpt26'), id=120, color=[0, 128, 255]), + 121: + dict(link=('lso_kpt26', 'lso_kpt25'), id=121, color=[0, 128, 255]), + 122: + dict(link=('lso_kpt25', 'lso_kpt24'), id=122, color=[0, 128, 255]), + 123: + dict(link=('lso_kpt24', 'lso_kpt23'), id=123, color=[0, 128, 255]), + 124: + dict(link=('lso_kpt23', 'lso_kpt22'), id=124, color=[0, 128, 255]), + 125: + dict(link=('lso_kpt22', 'lso_kpt21'), id=125, color=[0, 128, 255]), + 126: + dict(link=('lso_kpt21', 'lso_kpt37'), id=126, color=[0, 128, 255]), + 127: + dict(link=('lso_kpt37', 'lso_kpt36'), id=127, color=[0, 128, 255]), + 128: + dict(link=('lso_kpt36', 'lso_kpt35'), id=128, color=[0, 128, 255]), + 129: + dict(link=('lso_kpt35', 'lso_kpt34'), id=129, color=[0, 128, 255]), + 130: + dict(link=('lso_kpt34', 'lso_kpt5'), id=130, color=[0, 128, 255]), + 131: + dict(link=('lso_kpt5', 'lso_kpt6'), id=131, color=[0, 128, 255]), + # vest + 132: + dict(link=('vest_kpt1', 'vest_kpt2'), id=132, color=[0, 128, 128]), + 133: + dict(link=('vest_kpt2', 'vest_kpt7'), id=133, color=[0, 128, 128]), + 134: + dict(link=('vest_kpt7', 'vest_kpt8'), id=134, color=[0, 128, 128]), + 135: + dict(link=('vest_kpt8', 'vest_kpt9'), id=135, color=[0, 128, 128]), + 136: + dict(link=('vest_kpt9', 'vest_kpt10'), id=136, color=[0, 128, 128]), + 137: + dict(link=('vest_kpt10', 'vest_kpt11'), id=137, color=[0, 128, 128]), + 138: + dict(link=('vest_kpt11', 'vest_kpt12'), id=138, color=[0, 128, 128]), + 139: + dict(link=('vest_kpt12', 'vest_kpt13'), id=139, color=[0, 128, 128]), + 140: + dict(link=('vest_kpt13', 'vest_kpt14'), id=140, color=[0, 128, 128]), + 141: + dict(link=('vest_kpt14', 'vest_kpt15'), id=141, color=[0, 128, 128]), + 142: + dict(link=('vest_kpt15', 'vest_kpt6'), id=142, color=[0, 128, 128]), + 143: + dict(link=('vest_kpt6', 'vest_kpt1'), id=143, color=[0, 128, 128]), + 144: + dict(link=('vest_kpt2', 'vest_kpt3'), id=144, color=[0, 128, 128]), + 145: + dict(link=('vest_kpt3', 'vest_kpt4'), id=145, color=[0, 128, 128]), + 146: + dict(link=('vest_kpt4', 'vest_kpt5'), id=146, color=[0, 128, 128]), + 147: + dict(link=('vest_kpt5', 'vest_kpt6'), id=147, color=[0, 128, 128]), + # sling + 148: + dict(link=('sling_kpt1', 'sling_kpt2'), id=148, color=[0, 0, 128]), + 149: + dict(link=('sling_kpt2', 'sling_kpt8'), id=149, color=[0, 0, 128]), + 150: + dict(link=('sling_kpt8', 'sling_kpt9'), id=150, color=[0, 0, 128]), + 151: + dict(link=('sling_kpt9', 'sling_kpt10'), id=151, color=[0, 0, 128]), + 152: + dict(link=('sling_kpt10', 'sling_kpt11'), id=152, color=[0, 0, 128]), + 153: + dict(link=('sling_kpt11', 'sling_kpt12'), id=153, color=[0, 0, 128]), + 154: + dict(link=('sling_kpt12', 'sling_kpt13'), id=154, color=[0, 0, 128]), + 155: + dict(link=('sling_kpt13', 'sling_kpt14'), id=155, color=[0, 0, 128]), + 156: + dict(link=('sling_kpt14', 'sling_kpt6'), id=156, color=[0, 0, 128]), + 157: + dict(link=('sling_kpt2', 'sling_kpt7'), id=157, color=[0, 0, 128]), + 158: + dict(link=('sling_kpt6', 'sling_kpt15'), id=158, color=[0, 0, 128]), + 159: + dict(link=('sling_kpt2', 'sling_kpt3'), id=159, color=[0, 0, 128]), + 160: + dict(link=('sling_kpt3', 'sling_kpt4'), id=160, color=[0, 0, 128]), + 161: + dict(link=('sling_kpt4', 'sling_kpt5'), id=161, color=[0, 0, 128]), + 162: + dict(link=('sling_kpt5', 'sling_kpt6'), id=162, color=[0, 0, 128]), + 163: + dict(link=('sling_kpt1', 'sling_kpt6'), id=163, color=[0, 0, 128]), + # shorts + 164: + dict( + link=('shorts_kpt1', 'shorts_kpt4'), id=164, color=[128, 128, + 128]), + 165: + dict( + link=('shorts_kpt4', 'shorts_kpt5'), id=165, color=[128, 128, + 128]), + 166: + dict( + link=('shorts_kpt5', 'shorts_kpt6'), id=166, color=[128, 128, + 128]), + 167: + dict( + link=('shorts_kpt6', 'shorts_kpt7'), id=167, color=[128, 128, + 128]), + 168: + dict( + link=('shorts_kpt7', 'shorts_kpt8'), id=168, color=[128, 128, + 128]), + 169: + dict( + link=('shorts_kpt8', 'shorts_kpt9'), id=169, color=[128, 128, + 128]), + 170: + dict( + link=('shorts_kpt9', 'shorts_kpt10'), + id=170, + color=[128, 128, 128]), + 171: + dict( + link=('shorts_kpt10', 'shorts_kpt3'), + id=171, + color=[128, 128, 128]), + 172: + dict( + link=('shorts_kpt3', 'shorts_kpt2'), id=172, color=[128, 128, + 128]), + 173: + dict( + link=('shorts_kpt2', 'shorts_kpt1'), id=173, color=[128, 128, + 128]), + # trousers + 174: + dict( + link=('trousers_kpt1', 'trousers_kpt4'), + id=174, + color=[128, 0, 128]), + 175: + dict( + link=('trousers_kpt4', 'trousers_kpt5'), + id=175, + color=[128, 0, 128]), + 176: + dict( + link=('trousers_kpt5', 'trousers_kpt6'), + id=176, + color=[128, 0, 128]), + 177: + dict( + link=('trousers_kpt6', 'trousers_kpt7'), + id=177, + color=[128, 0, 128]), + 178: + dict( + link=('trousers_kpt7', 'trousers_kpt8'), + id=178, + color=[128, 0, 128]), + 179: + dict( + link=('trousers_kpt8', 'trousers_kpt9'), + id=179, + color=[128, 0, 128]), + 180: + dict( + link=('trousers_kpt9', 'trousers_kpt10'), + id=180, + color=[128, 0, 128]), + 181: + dict( + link=('trousers_kpt10', 'trousers_kpt11'), + id=181, + color=[128, 0, 128]), + 182: + dict( + link=('trousers_kpt11', 'trousers_kpt12'), + id=182, + color=[128, 0, 128]), + 183: + dict( + link=('trousers_kpt12', 'trousers_kpt13'), + id=183, + color=[128, 0, 128]), + 184: + dict( + link=('trousers_kpt13', 'trousers_kpt14'), + id=184, + color=[128, 0, 128]), + 185: + dict( + link=('trousers_kpt14', 'trousers_kpt3'), + id=185, + color=[128, 0, 128]), + 186: + dict( + link=('trousers_kpt3', 'trousers_kpt2'), + id=186, + color=[128, 0, 128]), + 187: + dict( + link=('trousers_kpt2', 'trousers_kpt1'), + id=187, + color=[128, 0, 128]), + # skirt + 188: + dict(link=('skirt_kpt1', 'skirt_kpt4'), id=188, color=[64, 128, 128]), + 189: + dict(link=('skirt_kpt4', 'skirt_kpt5'), id=189, color=[64, 128, 128]), + 190: + dict(link=('skirt_kpt5', 'skirt_kpt6'), id=190, color=[64, 128, 128]), + 191: + dict(link=('skirt_kpt6', 'skirt_kpt7'), id=191, color=[64, 128, 128]), + 192: + dict(link=('skirt_kpt7', 'skirt_kpt8'), id=192, color=[64, 128, 128]), + 193: + dict(link=('skirt_kpt8', 'skirt_kpt3'), id=193, color=[64, 128, 128]), + 194: + dict(link=('skirt_kpt3', 'skirt_kpt2'), id=194, color=[64, 128, 128]), + 195: + dict(link=('skirt_kpt2', 'skirt_kpt1'), id=195, color=[64, 128, 128]), + # short_sleeved_dress + 196: + dict(link=('ssd_kpt1', 'ssd_kpt2'), id=196, color=[64, 64, 128]), + 197: + dict(link=('ssd_kpt2', 'ssd_kpt7'), id=197, color=[64, 64, 128]), + 198: + dict(link=('ssd_kpt7', 'ssd_kpt8'), id=198, color=[64, 64, 128]), + 199: + dict(link=('ssd_kpt8', 'ssd_kpt9'), id=199, color=[64, 64, 128]), + 200: + dict(link=('ssd_kpt9', 'ssd_kpt10'), id=200, color=[64, 64, 128]), + 201: + dict(link=('ssd_kpt10', 'ssd_kpt11'), id=201, color=[64, 64, 128]), + 202: + dict(link=('ssd_kpt11', 'ssd_kpt12'), id=202, color=[64, 64, 128]), + 203: + dict(link=('ssd_kpt12', 'ssd_kpt13'), id=203, color=[64, 64, 128]), + 204: + dict(link=('ssd_kpt13', 'ssd_kpt14'), id=204, color=[64, 64, 128]), + 205: + dict(link=('ssd_kpt14', 'ssd_kpt15'), id=205, color=[64, 64, 128]), + 206: + dict(link=('ssd_kpt15', 'ssd_kpt16'), id=206, color=[64, 64, 128]), + 207: + dict(link=('ssd_kpt16', 'ssd_kpt17'), id=207, color=[64, 64, 128]), + 208: + dict(link=('ssd_kpt17', 'ssd_kpt18'), id=208, color=[64, 64, 128]), + 209: + dict(link=('ssd_kpt18', 'ssd_kpt19'), id=209, color=[64, 64, 128]), + 210: + dict(link=('ssd_kpt19', 'ssd_kpt20'), id=210, color=[64, 64, 128]), + 211: + dict(link=('ssd_kpt20', 'ssd_kpt21'), id=211, color=[64, 64, 128]), + 212: + dict(link=('ssd_kpt21', 'ssd_kpt22'), id=212, color=[64, 64, 128]), + 213: + dict(link=('ssd_kpt22', 'ssd_kpt23'), id=213, color=[64, 64, 128]), + 214: + dict(link=('ssd_kpt23', 'ssd_kpt24'), id=214, color=[64, 64, 128]), + 215: + dict(link=('ssd_kpt24', 'ssd_kpt25'), id=215, color=[64, 64, 128]), + 216: + dict(link=('ssd_kpt25', 'ssd_kpt26'), id=216, color=[64, 64, 128]), + 217: + dict(link=('ssd_kpt26', 'ssd_kpt27'), id=217, color=[64, 64, 128]), + 218: + dict(link=('ssd_kpt27', 'ssd_kpt28'), id=218, color=[64, 64, 128]), + 219: + dict(link=('ssd_kpt28', 'ssd_kpt29'), id=219, color=[64, 64, 128]), + 220: + dict(link=('ssd_kpt29', 'ssd_kpt6'), id=220, color=[64, 64, 128]), + 221: + dict(link=('ssd_kpt6', 'ssd_kpt5'), id=221, color=[64, 64, 128]), + 222: + dict(link=('ssd_kpt5', 'ssd_kpt4'), id=222, color=[64, 64, 128]), + 223: + dict(link=('ssd_kpt4', 'ssd_kpt3'), id=223, color=[64, 64, 128]), + 224: + dict(link=('ssd_kpt3', 'ssd_kpt2'), id=224, color=[64, 64, 128]), + 225: + dict(link=('ssd_kpt6', 'ssd_kpt1'), id=225, color=[64, 64, 128]), + # long_sleeved_dress + 226: + dict(link=('lsd_kpt1', 'lsd_kpt2'), id=226, color=[128, 64, 0]), + 227: + dict(link=('lsd_kpt2', 'lsd_kpt7'), id=228, color=[128, 64, 0]), + 228: + dict(link=('lsd_kpt7', 'lsd_kpt8'), id=228, color=[128, 64, 0]), + 229: + dict(link=('lsd_kpt8', 'lsd_kpt9'), id=229, color=[128, 64, 0]), + 230: + dict(link=('lsd_kpt9', 'lsd_kpt10'), id=230, color=[128, 64, 0]), + 231: + dict(link=('lsd_kpt10', 'lsd_kpt11'), id=231, color=[128, 64, 0]), + 232: + dict(link=('lsd_kpt11', 'lsd_kpt12'), id=232, color=[128, 64, 0]), + 233: + dict(link=('lsd_kpt12', 'lsd_kpt13'), id=233, color=[128, 64, 0]), + 234: + dict(link=('lsd_kpt13', 'lsd_kpt14'), id=234, color=[128, 64, 0]), + 235: + dict(link=('lsd_kpt14', 'lsd_kpt15'), id=235, color=[128, 64, 0]), + 236: + dict(link=('lsd_kpt15', 'lsd_kpt16'), id=236, color=[128, 64, 0]), + 237: + dict(link=('lsd_kpt16', 'lsd_kpt17'), id=237, color=[128, 64, 0]), + 238: + dict(link=('lsd_kpt17', 'lsd_kpt18'), id=238, color=[128, 64, 0]), + 239: + dict(link=('lsd_kpt18', 'lsd_kpt19'), id=239, color=[128, 64, 0]), + 240: + dict(link=('lsd_kpt19', 'lsd_kpt20'), id=240, color=[128, 64, 0]), + 241: + dict(link=('lsd_kpt20', 'lsd_kpt21'), id=241, color=[128, 64, 0]), + 242: + dict(link=('lsd_kpt21', 'lsd_kpt22'), id=242, color=[128, 64, 0]), + 243: + dict(link=('lsd_kpt22', 'lsd_kpt23'), id=243, color=[128, 64, 0]), + 244: + dict(link=('lsd_kpt23', 'lsd_kpt24'), id=244, color=[128, 64, 0]), + 245: + dict(link=('lsd_kpt24', 'lsd_kpt25'), id=245, color=[128, 64, 0]), + 246: + dict(link=('lsd_kpt25', 'lsd_kpt26'), id=246, color=[128, 64, 0]), + 247: + dict(link=('lsd_kpt26', 'lsd_kpt27'), id=247, color=[128, 64, 0]), + 248: + dict(link=('lsd_kpt27', 'lsd_kpt28'), id=248, color=[128, 64, 0]), + 249: + dict(link=('lsd_kpt28', 'lsd_kpt29'), id=249, color=[128, 64, 0]), + 250: + dict(link=('lsd_kpt29', 'lsd_kpt30'), id=250, color=[128, 64, 0]), + 251: + dict(link=('lsd_kpt30', 'lsd_kpt31'), id=251, color=[128, 64, 0]), + 252: + dict(link=('lsd_kpt31', 'lsd_kpt32'), id=252, color=[128, 64, 0]), + 253: + dict(link=('lsd_kpt32', 'lsd_kpt33'), id=253, color=[128, 64, 0]), + 254: + dict(link=('lsd_kpt33', 'lsd_kpt34'), id=254, color=[128, 64, 0]), + 255: + dict(link=('lsd_kpt34', 'lsd_kpt35'), id=255, color=[128, 64, 0]), + 256: + dict(link=('lsd_kpt35', 'lsd_kpt36'), id=256, color=[128, 64, 0]), + 257: + dict(link=('lsd_kpt36', 'lsd_kpt37'), id=257, color=[128, 64, 0]), + 258: + dict(link=('lsd_kpt37', 'lsd_kpt6'), id=258, color=[128, 64, 0]), + 259: + dict(link=('lsd_kpt6', 'lsd_kpt5'), id=259, color=[128, 64, 0]), + 260: + dict(link=('lsd_kpt5', 'lsd_kpt4'), id=260, color=[128, 64, 0]), + 261: + dict(link=('lsd_kpt4', 'lsd_kpt3'), id=261, color=[128, 64, 0]), + 262: + dict(link=('lsd_kpt3', 'lsd_kpt2'), id=262, color=[128, 64, 0]), + 263: + dict(link=('lsd_kpt6', 'lsd_kpt1'), id=263, color=[128, 64, 0]), + # vest_dress + 264: + dict(link=('vd_kpt1', 'vd_kpt2'), id=264, color=[128, 64, 255]), + 265: + dict(link=('vd_kpt2', 'vd_kpt7'), id=265, color=[128, 64, 255]), + 266: + dict(link=('vd_kpt7', 'vd_kpt8'), id=266, color=[128, 64, 255]), + 267: + dict(link=('vd_kpt8', 'vd_kpt9'), id=267, color=[128, 64, 255]), + 268: + dict(link=('vd_kpt9', 'vd_kpt10'), id=268, color=[128, 64, 255]), + 269: + dict(link=('vd_kpt10', 'vd_kpt11'), id=269, color=[128, 64, 255]), + 270: + dict(link=('vd_kpt11', 'vd_kpt12'), id=270, color=[128, 64, 255]), + 271: + dict(link=('vd_kpt12', 'vd_kpt13'), id=271, color=[128, 64, 255]), + 272: + dict(link=('vd_kpt13', 'vd_kpt14'), id=272, color=[128, 64, 255]), + 273: + dict(link=('vd_kpt14', 'vd_kpt15'), id=273, color=[128, 64, 255]), + 274: + dict(link=('vd_kpt15', 'vd_kpt16'), id=274, color=[128, 64, 255]), + 275: + dict(link=('vd_kpt16', 'vd_kpt17'), id=275, color=[128, 64, 255]), + 276: + dict(link=('vd_kpt17', 'vd_kpt18'), id=276, color=[128, 64, 255]), + 277: + dict(link=('vd_kpt18', 'vd_kpt19'), id=277, color=[128, 64, 255]), + 278: + dict(link=('vd_kpt19', 'vd_kpt6'), id=278, color=[128, 64, 255]), + 279: + dict(link=('vd_kpt6', 'vd_kpt5'), id=279, color=[128, 64, 255]), + 280: + dict(link=('vd_kpt5', 'vd_kpt4'), id=280, color=[128, 64, 255]), + 281: + dict(link=('vd_kpt4', 'vd_kpt3'), id=281, color=[128, 64, 255]), + 282: + dict(link=('vd_kpt3', 'vd_kpt2'), id=282, color=[128, 64, 255]), + 283: + dict(link=('vd_kpt6', 'vd_kpt1'), id=283, color=[128, 64, 255]), + # sling_dress + 284: + dict(link=('sd_kpt1', 'sd_kpt2'), id=284, color=[128, 64, 0]), + 285: + dict(link=('sd_kpt2', 'sd_kpt8'), id=285, color=[128, 64, 0]), + 286: + dict(link=('sd_kpt8', 'sd_kpt9'), id=286, color=[128, 64, 0]), + 287: + dict(link=('sd_kpt9', 'sd_kpt10'), id=287, color=[128, 64, 0]), + 288: + dict(link=('sd_kpt10', 'sd_kpt11'), id=288, color=[128, 64, 0]), + 289: + dict(link=('sd_kpt11', 'sd_kpt12'), id=289, color=[128, 64, 0]), + 290: + dict(link=('sd_kpt12', 'sd_kpt13'), id=290, color=[128, 64, 0]), + 291: + dict(link=('sd_kpt13', 'sd_kpt14'), id=291, color=[128, 64, 0]), + 292: + dict(link=('sd_kpt14', 'sd_kpt15'), id=292, color=[128, 64, 0]), + 293: + dict(link=('sd_kpt15', 'sd_kpt16'), id=293, color=[128, 64, 0]), + 294: + dict(link=('sd_kpt16', 'sd_kpt17'), id=294, color=[128, 64, 0]), + 295: + dict(link=('sd_kpt17', 'sd_kpt18'), id=295, color=[128, 64, 0]), + 296: + dict(link=('sd_kpt18', 'sd_kpt6'), id=296, color=[128, 64, 0]), + 297: + dict(link=('sd_kpt6', 'sd_kpt5'), id=297, color=[128, 64, 0]), + 298: + dict(link=('sd_kpt5', 'sd_kpt4'), id=298, color=[128, 64, 0]), + 299: + dict(link=('sd_kpt4', 'sd_kpt3'), id=299, color=[128, 64, 0]), + 300: + dict(link=('sd_kpt3', 'sd_kpt2'), id=300, color=[128, 64, 0]), + 301: + dict(link=('sd_kpt2', 'sd_kpt7'), id=301, color=[128, 64, 0]), + 302: + dict(link=('sd_kpt6', 'sd_kpt19'), id=302, color=[128, 64, 0]), + 303: + dict(link=('sd_kpt6', 'sd_kpt1'), id=303, color=[128, 64, 0]), + }, + joint_weights=[1.] * 294, + sigmas=[]) diff --git a/configs/_base_/datasets/deepfashion_full.py b/configs/_base_/datasets/deepfashion_full.py index 4d989069ee..97691273af 100644 --- a/configs/_base_/datasets/deepfashion_full.py +++ b/configs/_base_/datasets/deepfashion_full.py @@ -1,74 +1,74 @@ -dataset_info = dict( - dataset_name='deepfashion_full', - paper_info=dict( - author='Liu, Ziwei and Luo, Ping and Qiu, Shi ' - 'and Wang, Xiaogang and Tang, Xiaoou', - title='DeepFashion: Powering Robust Clothes Recognition ' - 'and Retrieval with Rich Annotations', - container='Proceedings of IEEE Conference on Computer ' - 'Vision and Pattern Recognition (CVPR)', - year='2016', - homepage='http://mmlab.ie.cuhk.edu.hk/projects/' - 'DeepFashion/LandmarkDetection.html', - ), - keypoint_info={ - 0: - dict( - name='left collar', - id=0, - color=[255, 255, 255], - type='', - swap='right collar'), - 1: - dict( - name='right collar', - id=1, - color=[255, 255, 255], - type='', - swap='left collar'), - 2: - dict( - name='left sleeve', - id=2, - color=[255, 255, 255], - type='', - swap='right sleeve'), - 3: - dict( - name='right sleeve', - id=3, - color=[255, 255, 255], - type='', - swap='left sleeve'), - 4: - dict( - name='left waistline', - id=0, - color=[255, 255, 255], - type='', - swap='right waistline'), - 5: - dict( - name='right waistline', - id=1, - color=[255, 255, 255], - type='', - swap='left waistline'), - 6: - dict( - name='left hem', - id=2, - color=[255, 255, 255], - type='', - swap='right hem'), - 7: - dict( - name='right hem', - id=3, - color=[255, 255, 255], - type='', - swap='left hem'), - }, - skeleton_info={}, - joint_weights=[1.] * 8, - sigmas=[]) +dataset_info = dict( + dataset_name='deepfashion_full', + paper_info=dict( + author='Liu, Ziwei and Luo, Ping and Qiu, Shi ' + 'and Wang, Xiaogang and Tang, Xiaoou', + title='DeepFashion: Powering Robust Clothes Recognition ' + 'and Retrieval with Rich Annotations', + container='Proceedings of IEEE Conference on Computer ' + 'Vision and Pattern Recognition (CVPR)', + year='2016', + homepage='http://mmlab.ie.cuhk.edu.hk/projects/' + 'DeepFashion/LandmarkDetection.html', + ), + keypoint_info={ + 0: + dict( + name='left collar', + id=0, + color=[255, 255, 255], + type='', + swap='right collar'), + 1: + dict( + name='right collar', + id=1, + color=[255, 255, 255], + type='', + swap='left collar'), + 2: + dict( + name='left sleeve', + id=2, + color=[255, 255, 255], + type='', + swap='right sleeve'), + 3: + dict( + name='right sleeve', + id=3, + color=[255, 255, 255], + type='', + swap='left sleeve'), + 4: + dict( + name='left waistline', + id=0, + color=[255, 255, 255], + type='', + swap='right waistline'), + 5: + dict( + name='right waistline', + id=1, + color=[255, 255, 255], + type='', + swap='left waistline'), + 6: + dict( + name='left hem', + id=2, + color=[255, 255, 255], + type='', + swap='right hem'), + 7: + dict( + name='right hem', + id=3, + color=[255, 255, 255], + type='', + swap='left hem'), + }, + skeleton_info={}, + joint_weights=[1.] * 8, + sigmas=[]) diff --git a/configs/_base_/datasets/deepfashion_lower.py b/configs/_base_/datasets/deepfashion_lower.py index db014a1747..65995e1e80 100644 --- a/configs/_base_/datasets/deepfashion_lower.py +++ b/configs/_base_/datasets/deepfashion_lower.py @@ -1,46 +1,46 @@ -dataset_info = dict( - dataset_name='deepfashion_lower', - paper_info=dict( - author='Liu, Ziwei and Luo, Ping and Qiu, Shi ' - 'and Wang, Xiaogang and Tang, Xiaoou', - title='DeepFashion: Powering Robust Clothes Recognition ' - 'and Retrieval with Rich Annotations', - container='Proceedings of IEEE Conference on Computer ' - 'Vision and Pattern Recognition (CVPR)', - year='2016', - homepage='http://mmlab.ie.cuhk.edu.hk/projects/' - 'DeepFashion/LandmarkDetection.html', - ), - keypoint_info={ - 0: - dict( - name='left waistline', - id=0, - color=[255, 255, 255], - type='', - swap='right waistline'), - 1: - dict( - name='right waistline', - id=1, - color=[255, 255, 255], - type='', - swap='left waistline'), - 2: - dict( - name='left hem', - id=2, - color=[255, 255, 255], - type='', - swap='right hem'), - 3: - dict( - name='right hem', - id=3, - color=[255, 255, 255], - type='', - swap='left hem'), - }, - skeleton_info={}, - joint_weights=[1.] * 4, - sigmas=[]) +dataset_info = dict( + dataset_name='deepfashion_lower', + paper_info=dict( + author='Liu, Ziwei and Luo, Ping and Qiu, Shi ' + 'and Wang, Xiaogang and Tang, Xiaoou', + title='DeepFashion: Powering Robust Clothes Recognition ' + 'and Retrieval with Rich Annotations', + container='Proceedings of IEEE Conference on Computer ' + 'Vision and Pattern Recognition (CVPR)', + year='2016', + homepage='http://mmlab.ie.cuhk.edu.hk/projects/' + 'DeepFashion/LandmarkDetection.html', + ), + keypoint_info={ + 0: + dict( + name='left waistline', + id=0, + color=[255, 255, 255], + type='', + swap='right waistline'), + 1: + dict( + name='right waistline', + id=1, + color=[255, 255, 255], + type='', + swap='left waistline'), + 2: + dict( + name='left hem', + id=2, + color=[255, 255, 255], + type='', + swap='right hem'), + 3: + dict( + name='right hem', + id=3, + color=[255, 255, 255], + type='', + swap='left hem'), + }, + skeleton_info={}, + joint_weights=[1.] * 4, + sigmas=[]) diff --git a/configs/_base_/datasets/deepfashion_upper.py b/configs/_base_/datasets/deepfashion_upper.py index f0b012fd37..4f34e2aeb7 100644 --- a/configs/_base_/datasets/deepfashion_upper.py +++ b/configs/_base_/datasets/deepfashion_upper.py @@ -1,60 +1,60 @@ -dataset_info = dict( - dataset_name='deepfashion_upper', - paper_info=dict( - author='Liu, Ziwei and Luo, Ping and Qiu, Shi ' - 'and Wang, Xiaogang and Tang, Xiaoou', - title='DeepFashion: Powering Robust Clothes Recognition ' - 'and Retrieval with Rich Annotations', - container='Proceedings of IEEE Conference on Computer ' - 'Vision and Pattern Recognition (CVPR)', - year='2016', - homepage='http://mmlab.ie.cuhk.edu.hk/projects/' - 'DeepFashion/LandmarkDetection.html', - ), - keypoint_info={ - 0: - dict( - name='left collar', - id=0, - color=[255, 255, 255], - type='', - swap='right collar'), - 1: - dict( - name='right collar', - id=1, - color=[255, 255, 255], - type='', - swap='left collar'), - 2: - dict( - name='left sleeve', - id=2, - color=[255, 255, 255], - type='', - swap='right sleeve'), - 3: - dict( - name='right sleeve', - id=3, - color=[255, 255, 255], - type='', - swap='left sleeve'), - 4: - dict( - name='left hem', - id=4, - color=[255, 255, 255], - type='', - swap='right hem'), - 5: - dict( - name='right hem', - id=5, - color=[255, 255, 255], - type='', - swap='left hem'), - }, - skeleton_info={}, - joint_weights=[1.] * 6, - sigmas=[]) +dataset_info = dict( + dataset_name='deepfashion_upper', + paper_info=dict( + author='Liu, Ziwei and Luo, Ping and Qiu, Shi ' + 'and Wang, Xiaogang and Tang, Xiaoou', + title='DeepFashion: Powering Robust Clothes Recognition ' + 'and Retrieval with Rich Annotations', + container='Proceedings of IEEE Conference on Computer ' + 'Vision and Pattern Recognition (CVPR)', + year='2016', + homepage='http://mmlab.ie.cuhk.edu.hk/projects/' + 'DeepFashion/LandmarkDetection.html', + ), + keypoint_info={ + 0: + dict( + name='left collar', + id=0, + color=[255, 255, 255], + type='', + swap='right collar'), + 1: + dict( + name='right collar', + id=1, + color=[255, 255, 255], + type='', + swap='left collar'), + 2: + dict( + name='left sleeve', + id=2, + color=[255, 255, 255], + type='', + swap='right sleeve'), + 3: + dict( + name='right sleeve', + id=3, + color=[255, 255, 255], + type='', + swap='left sleeve'), + 4: + dict( + name='left hem', + id=4, + color=[255, 255, 255], + type='', + swap='right hem'), + 5: + dict( + name='right hem', + id=5, + color=[255, 255, 255], + type='', + swap='left hem'), + }, + skeleton_info={}, + joint_weights=[1.] * 6, + sigmas=[]) diff --git a/configs/_base_/datasets/fly.py b/configs/_base_/datasets/fly.py index 5f94ff57ca..46386b630a 100644 --- a/configs/_base_/datasets/fly.py +++ b/configs/_base_/datasets/fly.py @@ -1,237 +1,237 @@ -dataset_info = dict( - dataset_name='fly', - paper_info=dict( - author='Pereira, Talmo D and Aldarondo, Diego E and ' - 'Willmore, Lindsay and Kislin, Mikhail and ' - 'Wang, Samuel S-H and Murthy, Mala and Shaevitz, Joshua W', - title='Fast animal pose estimation using deep neural networks', - container='Nature methods', - year='2019', - homepage='https://github.com/jgraving/DeepPoseKit-Data', - ), - keypoint_info={ - 0: - dict(name='head', id=0, color=[255, 255, 255], type='', swap=''), - 1: - dict(name='eyeL', id=1, color=[255, 255, 255], type='', swap='eyeR'), - 2: - dict(name='eyeR', id=2, color=[255, 255, 255], type='', swap='eyeL'), - 3: - dict(name='neck', id=3, color=[255, 255, 255], type='', swap=''), - 4: - dict(name='thorax', id=4, color=[255, 255, 255], type='', swap=''), - 5: - dict(name='abdomen', id=5, color=[255, 255, 255], type='', swap=''), - 6: - dict( - name='forelegR1', - id=6, - color=[255, 255, 255], - type='', - swap='forelegL1'), - 7: - dict( - name='forelegR2', - id=7, - color=[255, 255, 255], - type='', - swap='forelegL2'), - 8: - dict( - name='forelegR3', - id=8, - color=[255, 255, 255], - type='', - swap='forelegL3'), - 9: - dict( - name='forelegR4', - id=9, - color=[255, 255, 255], - type='', - swap='forelegL4'), - 10: - dict( - name='midlegR1', - id=10, - color=[255, 255, 255], - type='', - swap='midlegL1'), - 11: - dict( - name='midlegR2', - id=11, - color=[255, 255, 255], - type='', - swap='midlegL2'), - 12: - dict( - name='midlegR3', - id=12, - color=[255, 255, 255], - type='', - swap='midlegL3'), - 13: - dict( - name='midlegR4', - id=13, - color=[255, 255, 255], - type='', - swap='midlegL4'), - 14: - dict( - name='hindlegR1', - id=14, - color=[255, 255, 255], - type='', - swap='hindlegL1'), - 15: - dict( - name='hindlegR2', - id=15, - color=[255, 255, 255], - type='', - swap='hindlegL2'), - 16: - dict( - name='hindlegR3', - id=16, - color=[255, 255, 255], - type='', - swap='hindlegL3'), - 17: - dict( - name='hindlegR4', - id=17, - color=[255, 255, 255], - type='', - swap='hindlegL4'), - 18: - dict( - name='forelegL1', - id=18, - color=[255, 255, 255], - type='', - swap='forelegR1'), - 19: - dict( - name='forelegL2', - id=19, - color=[255, 255, 255], - type='', - swap='forelegR2'), - 20: - dict( - name='forelegL3', - id=20, - color=[255, 255, 255], - type='', - swap='forelegR3'), - 21: - dict( - name='forelegL4', - id=21, - color=[255, 255, 255], - type='', - swap='forelegR4'), - 22: - dict( - name='midlegL1', - id=22, - color=[255, 255, 255], - type='', - swap='midlegR1'), - 23: - dict( - name='midlegL2', - id=23, - color=[255, 255, 255], - type='', - swap='midlegR2'), - 24: - dict( - name='midlegL3', - id=24, - color=[255, 255, 255], - type='', - swap='midlegR3'), - 25: - dict( - name='midlegL4', - id=25, - color=[255, 255, 255], - type='', - swap='midlegR4'), - 26: - dict( - name='hindlegL1', - id=26, - color=[255, 255, 255], - type='', - swap='hindlegR1'), - 27: - dict( - name='hindlegL2', - id=27, - color=[255, 255, 255], - type='', - swap='hindlegR2'), - 28: - dict( - name='hindlegL3', - id=28, - color=[255, 255, 255], - type='', - swap='hindlegR3'), - 29: - dict( - name='hindlegL4', - id=29, - color=[255, 255, 255], - type='', - swap='hindlegR4'), - 30: - dict( - name='wingL', id=30, color=[255, 255, 255], type='', swap='wingR'), - 31: - dict( - name='wingR', id=31, color=[255, 255, 255], type='', swap='wingL'), - }, - skeleton_info={ - 0: dict(link=('eyeL', 'head'), id=0, color=[255, 255, 255]), - 1: dict(link=('eyeR', 'head'), id=1, color=[255, 255, 255]), - 2: dict(link=('neck', 'head'), id=2, color=[255, 255, 255]), - 3: dict(link=('thorax', 'neck'), id=3, color=[255, 255, 255]), - 4: dict(link=('abdomen', 'thorax'), id=4, color=[255, 255, 255]), - 5: dict(link=('forelegR2', 'forelegR1'), id=5, color=[255, 255, 255]), - 6: dict(link=('forelegR3', 'forelegR2'), id=6, color=[255, 255, 255]), - 7: dict(link=('forelegR4', 'forelegR3'), id=7, color=[255, 255, 255]), - 8: dict(link=('midlegR2', 'midlegR1'), id=8, color=[255, 255, 255]), - 9: dict(link=('midlegR3', 'midlegR2'), id=9, color=[255, 255, 255]), - 10: dict(link=('midlegR4', 'midlegR3'), id=10, color=[255, 255, 255]), - 11: - dict(link=('hindlegR2', 'hindlegR1'), id=11, color=[255, 255, 255]), - 12: - dict(link=('hindlegR3', 'hindlegR2'), id=12, color=[255, 255, 255]), - 13: - dict(link=('hindlegR4', 'hindlegR3'), id=13, color=[255, 255, 255]), - 14: - dict(link=('forelegL2', 'forelegL1'), id=14, color=[255, 255, 255]), - 15: - dict(link=('forelegL3', 'forelegL2'), id=15, color=[255, 255, 255]), - 16: - dict(link=('forelegL4', 'forelegL3'), id=16, color=[255, 255, 255]), - 17: dict(link=('midlegL2', 'midlegL1'), id=17, color=[255, 255, 255]), - 18: dict(link=('midlegL3', 'midlegL2'), id=18, color=[255, 255, 255]), - 19: dict(link=('midlegL4', 'midlegL3'), id=19, color=[255, 255, 255]), - 20: - dict(link=('hindlegL2', 'hindlegL1'), id=20, color=[255, 255, 255]), - 21: - dict(link=('hindlegL3', 'hindlegL2'), id=21, color=[255, 255, 255]), - 22: - dict(link=('hindlegL4', 'hindlegL3'), id=22, color=[255, 255, 255]), - 23: dict(link=('wingL', 'neck'), id=23, color=[255, 255, 255]), - 24: dict(link=('wingR', 'neck'), id=24, color=[255, 255, 255]) - }, - joint_weights=[1.] * 32, - sigmas=[]) +dataset_info = dict( + dataset_name='fly', + paper_info=dict( + author='Pereira, Talmo D and Aldarondo, Diego E and ' + 'Willmore, Lindsay and Kislin, Mikhail and ' + 'Wang, Samuel S-H and Murthy, Mala and Shaevitz, Joshua W', + title='Fast animal pose estimation using deep neural networks', + container='Nature methods', + year='2019', + homepage='https://github.com/jgraving/DeepPoseKit-Data', + ), + keypoint_info={ + 0: + dict(name='head', id=0, color=[255, 255, 255], type='', swap=''), + 1: + dict(name='eyeL', id=1, color=[255, 255, 255], type='', swap='eyeR'), + 2: + dict(name='eyeR', id=2, color=[255, 255, 255], type='', swap='eyeL'), + 3: + dict(name='neck', id=3, color=[255, 255, 255], type='', swap=''), + 4: + dict(name='thorax', id=4, color=[255, 255, 255], type='', swap=''), + 5: + dict(name='abdomen', id=5, color=[255, 255, 255], type='', swap=''), + 6: + dict( + name='forelegR1', + id=6, + color=[255, 255, 255], + type='', + swap='forelegL1'), + 7: + dict( + name='forelegR2', + id=7, + color=[255, 255, 255], + type='', + swap='forelegL2'), + 8: + dict( + name='forelegR3', + id=8, + color=[255, 255, 255], + type='', + swap='forelegL3'), + 9: + dict( + name='forelegR4', + id=9, + color=[255, 255, 255], + type='', + swap='forelegL4'), + 10: + dict( + name='midlegR1', + id=10, + color=[255, 255, 255], + type='', + swap='midlegL1'), + 11: + dict( + name='midlegR2', + id=11, + color=[255, 255, 255], + type='', + swap='midlegL2'), + 12: + dict( + name='midlegR3', + id=12, + color=[255, 255, 255], + type='', + swap='midlegL3'), + 13: + dict( + name='midlegR4', + id=13, + color=[255, 255, 255], + type='', + swap='midlegL4'), + 14: + dict( + name='hindlegR1', + id=14, + color=[255, 255, 255], + type='', + swap='hindlegL1'), + 15: + dict( + name='hindlegR2', + id=15, + color=[255, 255, 255], + type='', + swap='hindlegL2'), + 16: + dict( + name='hindlegR3', + id=16, + color=[255, 255, 255], + type='', + swap='hindlegL3'), + 17: + dict( + name='hindlegR4', + id=17, + color=[255, 255, 255], + type='', + swap='hindlegL4'), + 18: + dict( + name='forelegL1', + id=18, + color=[255, 255, 255], + type='', + swap='forelegR1'), + 19: + dict( + name='forelegL2', + id=19, + color=[255, 255, 255], + type='', + swap='forelegR2'), + 20: + dict( + name='forelegL3', + id=20, + color=[255, 255, 255], + type='', + swap='forelegR3'), + 21: + dict( + name='forelegL4', + id=21, + color=[255, 255, 255], + type='', + swap='forelegR4'), + 22: + dict( + name='midlegL1', + id=22, + color=[255, 255, 255], + type='', + swap='midlegR1'), + 23: + dict( + name='midlegL2', + id=23, + color=[255, 255, 255], + type='', + swap='midlegR2'), + 24: + dict( + name='midlegL3', + id=24, + color=[255, 255, 255], + type='', + swap='midlegR3'), + 25: + dict( + name='midlegL4', + id=25, + color=[255, 255, 255], + type='', + swap='midlegR4'), + 26: + dict( + name='hindlegL1', + id=26, + color=[255, 255, 255], + type='', + swap='hindlegR1'), + 27: + dict( + name='hindlegL2', + id=27, + color=[255, 255, 255], + type='', + swap='hindlegR2'), + 28: + dict( + name='hindlegL3', + id=28, + color=[255, 255, 255], + type='', + swap='hindlegR3'), + 29: + dict( + name='hindlegL4', + id=29, + color=[255, 255, 255], + type='', + swap='hindlegR4'), + 30: + dict( + name='wingL', id=30, color=[255, 255, 255], type='', swap='wingR'), + 31: + dict( + name='wingR', id=31, color=[255, 255, 255], type='', swap='wingL'), + }, + skeleton_info={ + 0: dict(link=('eyeL', 'head'), id=0, color=[255, 255, 255]), + 1: dict(link=('eyeR', 'head'), id=1, color=[255, 255, 255]), + 2: dict(link=('neck', 'head'), id=2, color=[255, 255, 255]), + 3: dict(link=('thorax', 'neck'), id=3, color=[255, 255, 255]), + 4: dict(link=('abdomen', 'thorax'), id=4, color=[255, 255, 255]), + 5: dict(link=('forelegR2', 'forelegR1'), id=5, color=[255, 255, 255]), + 6: dict(link=('forelegR3', 'forelegR2'), id=6, color=[255, 255, 255]), + 7: dict(link=('forelegR4', 'forelegR3'), id=7, color=[255, 255, 255]), + 8: dict(link=('midlegR2', 'midlegR1'), id=8, color=[255, 255, 255]), + 9: dict(link=('midlegR3', 'midlegR2'), id=9, color=[255, 255, 255]), + 10: dict(link=('midlegR4', 'midlegR3'), id=10, color=[255, 255, 255]), + 11: + dict(link=('hindlegR2', 'hindlegR1'), id=11, color=[255, 255, 255]), + 12: + dict(link=('hindlegR3', 'hindlegR2'), id=12, color=[255, 255, 255]), + 13: + dict(link=('hindlegR4', 'hindlegR3'), id=13, color=[255, 255, 255]), + 14: + dict(link=('forelegL2', 'forelegL1'), id=14, color=[255, 255, 255]), + 15: + dict(link=('forelegL3', 'forelegL2'), id=15, color=[255, 255, 255]), + 16: + dict(link=('forelegL4', 'forelegL3'), id=16, color=[255, 255, 255]), + 17: dict(link=('midlegL2', 'midlegL1'), id=17, color=[255, 255, 255]), + 18: dict(link=('midlegL3', 'midlegL2'), id=18, color=[255, 255, 255]), + 19: dict(link=('midlegL4', 'midlegL3'), id=19, color=[255, 255, 255]), + 20: + dict(link=('hindlegL2', 'hindlegL1'), id=20, color=[255, 255, 255]), + 21: + dict(link=('hindlegL3', 'hindlegL2'), id=21, color=[255, 255, 255]), + 22: + dict(link=('hindlegL4', 'hindlegL3'), id=22, color=[255, 255, 255]), + 23: dict(link=('wingL', 'neck'), id=23, color=[255, 255, 255]), + 24: dict(link=('wingR', 'neck'), id=24, color=[255, 255, 255]) + }, + joint_weights=[1.] * 32, + sigmas=[]) diff --git a/configs/_base_/datasets/freihand2d.py b/configs/_base_/datasets/freihand2d.py index 8b960d10f3..ae04742632 100644 --- a/configs/_base_/datasets/freihand2d.py +++ b/configs/_base_/datasets/freihand2d.py @@ -1,144 +1,144 @@ -dataset_info = dict( - dataset_name='freihand', - paper_info=dict( - author='Zimmermann, Christian and Ceylan, Duygu and ' - 'Yang, Jimei and Russell, Bryan and ' - 'Argus, Max and Brox, Thomas', - title='Freihand: A dataset for markerless capture of hand pose ' - 'and shape from single rgb images', - container='Proceedings of the IEEE International ' - 'Conference on Computer Vision', - year='2019', - homepage='https://lmb.informatik.uni-freiburg.de/projects/freihand/', - ), - keypoint_info={ - 0: - dict(name='wrist', id=0, color=[255, 255, 255], type='', swap=''), - 1: - dict(name='thumb1', id=1, color=[255, 128, 0], type='', swap=''), - 2: - dict(name='thumb2', id=2, color=[255, 128, 0], type='', swap=''), - 3: - dict(name='thumb3', id=3, color=[255, 128, 0], type='', swap=''), - 4: - dict(name='thumb4', id=4, color=[255, 128, 0], type='', swap=''), - 5: - dict( - name='forefinger1', id=5, color=[255, 153, 255], type='', swap=''), - 6: - dict( - name='forefinger2', id=6, color=[255, 153, 255], type='', swap=''), - 7: - dict( - name='forefinger3', id=7, color=[255, 153, 255], type='', swap=''), - 8: - dict( - name='forefinger4', id=8, color=[255, 153, 255], type='', swap=''), - 9: - dict( - name='middle_finger1', - id=9, - color=[102, 178, 255], - type='', - swap=''), - 10: - dict( - name='middle_finger2', - id=10, - color=[102, 178, 255], - type='', - swap=''), - 11: - dict( - name='middle_finger3', - id=11, - color=[102, 178, 255], - type='', - swap=''), - 12: - dict( - name='middle_finger4', - id=12, - color=[102, 178, 255], - type='', - swap=''), - 13: - dict( - name='ring_finger1', id=13, color=[255, 51, 51], type='', swap=''), - 14: - dict( - name='ring_finger2', id=14, color=[255, 51, 51], type='', swap=''), - 15: - dict( - name='ring_finger3', id=15, color=[255, 51, 51], type='', swap=''), - 16: - dict( - name='ring_finger4', id=16, color=[255, 51, 51], type='', swap=''), - 17: - dict(name='pinky_finger1', id=17, color=[0, 255, 0], type='', swap=''), - 18: - dict(name='pinky_finger2', id=18, color=[0, 255, 0], type='', swap=''), - 19: - dict(name='pinky_finger3', id=19, color=[0, 255, 0], type='', swap=''), - 20: - dict(name='pinky_finger4', id=20, color=[0, 255, 0], type='', swap='') - }, - skeleton_info={ - 0: - dict(link=('wrist', 'thumb1'), id=0, color=[255, 128, 0]), - 1: - dict(link=('thumb1', 'thumb2'), id=1, color=[255, 128, 0]), - 2: - dict(link=('thumb2', 'thumb3'), id=2, color=[255, 128, 0]), - 3: - dict(link=('thumb3', 'thumb4'), id=3, color=[255, 128, 0]), - 4: - dict(link=('wrist', 'forefinger1'), id=4, color=[255, 153, 255]), - 5: - dict(link=('forefinger1', 'forefinger2'), id=5, color=[255, 153, 255]), - 6: - dict(link=('forefinger2', 'forefinger3'), id=6, color=[255, 153, 255]), - 7: - dict(link=('forefinger3', 'forefinger4'), id=7, color=[255, 153, 255]), - 8: - dict(link=('wrist', 'middle_finger1'), id=8, color=[102, 178, 255]), - 9: - dict( - link=('middle_finger1', 'middle_finger2'), - id=9, - color=[102, 178, 255]), - 10: - dict( - link=('middle_finger2', 'middle_finger3'), - id=10, - color=[102, 178, 255]), - 11: - dict( - link=('middle_finger3', 'middle_finger4'), - id=11, - color=[102, 178, 255]), - 12: - dict(link=('wrist', 'ring_finger1'), id=12, color=[255, 51, 51]), - 13: - dict( - link=('ring_finger1', 'ring_finger2'), id=13, color=[255, 51, 51]), - 14: - dict( - link=('ring_finger2', 'ring_finger3'), id=14, color=[255, 51, 51]), - 15: - dict( - link=('ring_finger3', 'ring_finger4'), id=15, color=[255, 51, 51]), - 16: - dict(link=('wrist', 'pinky_finger1'), id=16, color=[0, 255, 0]), - 17: - dict( - link=('pinky_finger1', 'pinky_finger2'), id=17, color=[0, 255, 0]), - 18: - dict( - link=('pinky_finger2', 'pinky_finger3'), id=18, color=[0, 255, 0]), - 19: - dict( - link=('pinky_finger3', 'pinky_finger4'), id=19, color=[0, 255, 0]) - }, - joint_weights=[1.] * 21, - sigmas=[]) +dataset_info = dict( + dataset_name='freihand', + paper_info=dict( + author='Zimmermann, Christian and Ceylan, Duygu and ' + 'Yang, Jimei and Russell, Bryan and ' + 'Argus, Max and Brox, Thomas', + title='Freihand: A dataset for markerless capture of hand pose ' + 'and shape from single rgb images', + container='Proceedings of the IEEE International ' + 'Conference on Computer Vision', + year='2019', + homepage='https://lmb.informatik.uni-freiburg.de/projects/freihand/', + ), + keypoint_info={ + 0: + dict(name='wrist', id=0, color=[255, 255, 255], type='', swap=''), + 1: + dict(name='thumb1', id=1, color=[255, 128, 0], type='', swap=''), + 2: + dict(name='thumb2', id=2, color=[255, 128, 0], type='', swap=''), + 3: + dict(name='thumb3', id=3, color=[255, 128, 0], type='', swap=''), + 4: + dict(name='thumb4', id=4, color=[255, 128, 0], type='', swap=''), + 5: + dict( + name='forefinger1', id=5, color=[255, 153, 255], type='', swap=''), + 6: + dict( + name='forefinger2', id=6, color=[255, 153, 255], type='', swap=''), + 7: + dict( + name='forefinger3', id=7, color=[255, 153, 255], type='', swap=''), + 8: + dict( + name='forefinger4', id=8, color=[255, 153, 255], type='', swap=''), + 9: + dict( + name='middle_finger1', + id=9, + color=[102, 178, 255], + type='', + swap=''), + 10: + dict( + name='middle_finger2', + id=10, + color=[102, 178, 255], + type='', + swap=''), + 11: + dict( + name='middle_finger3', + id=11, + color=[102, 178, 255], + type='', + swap=''), + 12: + dict( + name='middle_finger4', + id=12, + color=[102, 178, 255], + type='', + swap=''), + 13: + dict( + name='ring_finger1', id=13, color=[255, 51, 51], type='', swap=''), + 14: + dict( + name='ring_finger2', id=14, color=[255, 51, 51], type='', swap=''), + 15: + dict( + name='ring_finger3', id=15, color=[255, 51, 51], type='', swap=''), + 16: + dict( + name='ring_finger4', id=16, color=[255, 51, 51], type='', swap=''), + 17: + dict(name='pinky_finger1', id=17, color=[0, 255, 0], type='', swap=''), + 18: + dict(name='pinky_finger2', id=18, color=[0, 255, 0], type='', swap=''), + 19: + dict(name='pinky_finger3', id=19, color=[0, 255, 0], type='', swap=''), + 20: + dict(name='pinky_finger4', id=20, color=[0, 255, 0], type='', swap='') + }, + skeleton_info={ + 0: + dict(link=('wrist', 'thumb1'), id=0, color=[255, 128, 0]), + 1: + dict(link=('thumb1', 'thumb2'), id=1, color=[255, 128, 0]), + 2: + dict(link=('thumb2', 'thumb3'), id=2, color=[255, 128, 0]), + 3: + dict(link=('thumb3', 'thumb4'), id=3, color=[255, 128, 0]), + 4: + dict(link=('wrist', 'forefinger1'), id=4, color=[255, 153, 255]), + 5: + dict(link=('forefinger1', 'forefinger2'), id=5, color=[255, 153, 255]), + 6: + dict(link=('forefinger2', 'forefinger3'), id=6, color=[255, 153, 255]), + 7: + dict(link=('forefinger3', 'forefinger4'), id=7, color=[255, 153, 255]), + 8: + dict(link=('wrist', 'middle_finger1'), id=8, color=[102, 178, 255]), + 9: + dict( + link=('middle_finger1', 'middle_finger2'), + id=9, + color=[102, 178, 255]), + 10: + dict( + link=('middle_finger2', 'middle_finger3'), + id=10, + color=[102, 178, 255]), + 11: + dict( + link=('middle_finger3', 'middle_finger4'), + id=11, + color=[102, 178, 255]), + 12: + dict(link=('wrist', 'ring_finger1'), id=12, color=[255, 51, 51]), + 13: + dict( + link=('ring_finger1', 'ring_finger2'), id=13, color=[255, 51, 51]), + 14: + dict( + link=('ring_finger2', 'ring_finger3'), id=14, color=[255, 51, 51]), + 15: + dict( + link=('ring_finger3', 'ring_finger4'), id=15, color=[255, 51, 51]), + 16: + dict(link=('wrist', 'pinky_finger1'), id=16, color=[0, 255, 0]), + 17: + dict( + link=('pinky_finger1', 'pinky_finger2'), id=17, color=[0, 255, 0]), + 18: + dict( + link=('pinky_finger2', 'pinky_finger3'), id=18, color=[0, 255, 0]), + 19: + dict( + link=('pinky_finger3', 'pinky_finger4'), id=19, color=[0, 255, 0]) + }, + joint_weights=[1.] * 21, + sigmas=[]) diff --git a/configs/_base_/datasets/h36m.py b/configs/_base_/datasets/h36m.py index 00a719d8b1..f6be31f404 100644 --- a/configs/_base_/datasets/h36m.py +++ b/configs/_base_/datasets/h36m.py @@ -1,152 +1,152 @@ -dataset_info = dict( - dataset_name='h36m', - paper_info=dict( - author='Ionescu, Catalin and Papava, Dragos and ' - 'Olaru, Vlad and Sminchisescu, Cristian', - title='Human3.6M: Large Scale Datasets and Predictive ' - 'Methods for 3D Human Sensing in Natural Environments', - container='IEEE Transactions on Pattern Analysis and ' - 'Machine Intelligence', - year='2014', - homepage='http://vision.imar.ro/human3.6m/description.php', - ), - keypoint_info={ - 0: - dict(name='root', id=0, color=[51, 153, 255], type='lower', swap=''), - 1: - dict( - name='right_hip', - id=1, - color=[255, 128, 0], - type='lower', - swap='left_hip'), - 2: - dict( - name='right_knee', - id=2, - color=[255, 128, 0], - type='lower', - swap='left_knee'), - 3: - dict( - name='right_foot', - id=3, - color=[255, 128, 0], - type='lower', - swap='left_foot'), - 4: - dict( - name='left_hip', - id=4, - color=[0, 255, 0], - type='lower', - swap='right_hip'), - 5: - dict( - name='left_knee', - id=5, - color=[0, 255, 0], - type='lower', - swap='right_knee'), - 6: - dict( - name='left_foot', - id=6, - color=[0, 255, 0], - type='lower', - swap='right_foot'), - 7: - dict(name='spine', id=7, color=[51, 153, 255], type='upper', swap=''), - 8: - dict(name='thorax', id=8, color=[51, 153, 255], type='upper', swap=''), - 9: - dict( - name='neck_base', - id=9, - color=[51, 153, 255], - type='upper', - swap=''), - 10: - dict(name='head', id=10, color=[51, 153, 255], type='upper', swap=''), - 11: - dict( - name='left_shoulder', - id=11, - color=[0, 255, 0], - type='upper', - swap='right_shoulder'), - 12: - dict( - name='left_elbow', - id=12, - color=[0, 255, 0], - type='upper', - swap='right_elbow'), - 13: - dict( - name='left_wrist', - id=13, - color=[0, 255, 0], - type='upper', - swap='right_wrist'), - 14: - dict( - name='right_shoulder', - id=14, - color=[255, 128, 0], - type='upper', - swap='left_shoulder'), - 15: - dict( - name='right_elbow', - id=15, - color=[255, 128, 0], - type='upper', - swap='left_elbow'), - 16: - dict( - name='right_wrist', - id=16, - color=[255, 128, 0], - type='upper', - swap='left_wrist') - }, - skeleton_info={ - 0: - dict(link=('root', 'left_hip'), id=0, color=[0, 255, 0]), - 1: - dict(link=('left_hip', 'left_knee'), id=1, color=[0, 255, 0]), - 2: - dict(link=('left_knee', 'left_foot'), id=2, color=[0, 255, 0]), - 3: - dict(link=('root', 'right_hip'), id=3, color=[255, 128, 0]), - 4: - dict(link=('right_hip', 'right_knee'), id=4, color=[255, 128, 0]), - 5: - dict(link=('right_knee', 'right_foot'), id=5, color=[255, 128, 0]), - 6: - dict(link=('root', 'spine'), id=6, color=[51, 153, 255]), - 7: - dict(link=('spine', 'thorax'), id=7, color=[51, 153, 255]), - 8: - dict(link=('thorax', 'neck_base'), id=8, color=[51, 153, 255]), - 9: - dict(link=('neck_base', 'head'), id=9, color=[51, 153, 255]), - 10: - dict(link=('thorax', 'left_shoulder'), id=10, color=[0, 255, 0]), - 11: - dict(link=('left_shoulder', 'left_elbow'), id=11, color=[0, 255, 0]), - 12: - dict(link=('left_elbow', 'left_wrist'), id=12, color=[0, 255, 0]), - 13: - dict(link=('thorax', 'right_shoulder'), id=13, color=[255, 128, 0]), - 14: - dict( - link=('right_shoulder', 'right_elbow'), id=14, color=[255, 128, - 0]), - 15: - dict(link=('right_elbow', 'right_wrist'), id=15, color=[255, 128, 0]) - }, - joint_weights=[1.] * 17, - sigmas=[], - stats_info=dict(bbox_center=(528., 427.), bbox_scale=400.)) +dataset_info = dict( + dataset_name='h36m', + paper_info=dict( + author='Ionescu, Catalin and Papava, Dragos and ' + 'Olaru, Vlad and Sminchisescu, Cristian', + title='Human3.6M: Large Scale Datasets and Predictive ' + 'Methods for 3D Human Sensing in Natural Environments', + container='IEEE Transactions on Pattern Analysis and ' + 'Machine Intelligence', + year='2014', + homepage='http://vision.imar.ro/human3.6m/description.php', + ), + keypoint_info={ + 0: + dict(name='root', id=0, color=[51, 153, 255], type='lower', swap=''), + 1: + dict( + name='right_hip', + id=1, + color=[255, 128, 0], + type='lower', + swap='left_hip'), + 2: + dict( + name='right_knee', + id=2, + color=[255, 128, 0], + type='lower', + swap='left_knee'), + 3: + dict( + name='right_foot', + id=3, + color=[255, 128, 0], + type='lower', + swap='left_foot'), + 4: + dict( + name='left_hip', + id=4, + color=[0, 255, 0], + type='lower', + swap='right_hip'), + 5: + dict( + name='left_knee', + id=5, + color=[0, 255, 0], + type='lower', + swap='right_knee'), + 6: + dict( + name='left_foot', + id=6, + color=[0, 255, 0], + type='lower', + swap='right_foot'), + 7: + dict(name='spine', id=7, color=[51, 153, 255], type='upper', swap=''), + 8: + dict(name='thorax', id=8, color=[51, 153, 255], type='upper', swap=''), + 9: + dict( + name='neck_base', + id=9, + color=[51, 153, 255], + type='upper', + swap=''), + 10: + dict(name='head', id=10, color=[51, 153, 255], type='upper', swap=''), + 11: + dict( + name='left_shoulder', + id=11, + color=[0, 255, 0], + type='upper', + swap='right_shoulder'), + 12: + dict( + name='left_elbow', + id=12, + color=[0, 255, 0], + type='upper', + swap='right_elbow'), + 13: + dict( + name='left_wrist', + id=13, + color=[0, 255, 0], + type='upper', + swap='right_wrist'), + 14: + dict( + name='right_shoulder', + id=14, + color=[255, 128, 0], + type='upper', + swap='left_shoulder'), + 15: + dict( + name='right_elbow', + id=15, + color=[255, 128, 0], + type='upper', + swap='left_elbow'), + 16: + dict( + name='right_wrist', + id=16, + color=[255, 128, 0], + type='upper', + swap='left_wrist') + }, + skeleton_info={ + 0: + dict(link=('root', 'left_hip'), id=0, color=[0, 255, 0]), + 1: + dict(link=('left_hip', 'left_knee'), id=1, color=[0, 255, 0]), + 2: + dict(link=('left_knee', 'left_foot'), id=2, color=[0, 255, 0]), + 3: + dict(link=('root', 'right_hip'), id=3, color=[255, 128, 0]), + 4: + dict(link=('right_hip', 'right_knee'), id=4, color=[255, 128, 0]), + 5: + dict(link=('right_knee', 'right_foot'), id=5, color=[255, 128, 0]), + 6: + dict(link=('root', 'spine'), id=6, color=[51, 153, 255]), + 7: + dict(link=('spine', 'thorax'), id=7, color=[51, 153, 255]), + 8: + dict(link=('thorax', 'neck_base'), id=8, color=[51, 153, 255]), + 9: + dict(link=('neck_base', 'head'), id=9, color=[51, 153, 255]), + 10: + dict(link=('thorax', 'left_shoulder'), id=10, color=[0, 255, 0]), + 11: + dict(link=('left_shoulder', 'left_elbow'), id=11, color=[0, 255, 0]), + 12: + dict(link=('left_elbow', 'left_wrist'), id=12, color=[0, 255, 0]), + 13: + dict(link=('thorax', 'right_shoulder'), id=13, color=[255, 128, 0]), + 14: + dict( + link=('right_shoulder', 'right_elbow'), id=14, color=[255, 128, + 0]), + 15: + dict(link=('right_elbow', 'right_wrist'), id=15, color=[255, 128, 0]) + }, + joint_weights=[1.] * 17, + sigmas=[], + stats_info=dict(bbox_center=(528., 427.), bbox_scale=400.)) diff --git a/configs/_base_/datasets/halpe.py b/configs/_base_/datasets/halpe.py index 1385fe81dc..cccf9f4c60 100644 --- a/configs/_base_/datasets/halpe.py +++ b/configs/_base_/datasets/halpe.py @@ -1,1157 +1,1157 @@ -dataset_info = dict( - dataset_name='halpe', - paper_info=dict( - author='Li, Yong-Lu and Xu, Liang and Liu, Xinpeng and Huang, Xijie' - ' and Xu, Yue and Wang, Shiyi and Fang, Hao-Shu' - ' and Ma, Ze and Chen, Mingyang and Lu, Cewu', - title='PaStaNet: Toward Human Activity Knowledge Engine', - container='CVPR', - year='2020', - homepage='https://github.com/Fang-Haoshu/Halpe-FullBody/', - ), - keypoint_info={ - 0: - dict(name='nose', id=0, color=[51, 153, 255], type='upper', swap=''), - 1: - dict( - name='left_eye', - id=1, - color=[51, 153, 255], - type='upper', - swap='right_eye'), - 2: - dict( - name='right_eye', - id=2, - color=[51, 153, 255], - type='upper', - swap='left_eye'), - 3: - dict( - name='left_ear', - id=3, - color=[51, 153, 255], - type='upper', - swap='right_ear'), - 4: - dict( - name='right_ear', - id=4, - color=[51, 153, 255], - type='upper', - swap='left_ear'), - 5: - dict( - name='left_shoulder', - id=5, - color=[0, 255, 0], - type='upper', - swap='right_shoulder'), - 6: - dict( - name='right_shoulder', - id=6, - color=[255, 128, 0], - type='upper', - swap='left_shoulder'), - 7: - dict( - name='left_elbow', - id=7, - color=[0, 255, 0], - type='upper', - swap='right_elbow'), - 8: - dict( - name='right_elbow', - id=8, - color=[255, 128, 0], - type='upper', - swap='left_elbow'), - 9: - dict( - name='left_wrist', - id=9, - color=[0, 255, 0], - type='upper', - swap='right_wrist'), - 10: - dict( - name='right_wrist', - id=10, - color=[255, 128, 0], - type='upper', - swap='left_wrist'), - 11: - dict( - name='left_hip', - id=11, - color=[0, 255, 0], - type='lower', - swap='right_hip'), - 12: - dict( - name='right_hip', - id=12, - color=[255, 128, 0], - type='lower', - swap='left_hip'), - 13: - dict( - name='left_knee', - id=13, - color=[0, 255, 0], - type='lower', - swap='right_knee'), - 14: - dict( - name='right_knee', - id=14, - color=[255, 128, 0], - type='lower', - swap='left_knee'), - 15: - dict( - name='left_ankle', - id=15, - color=[0, 255, 0], - type='lower', - swap='right_ankle'), - 16: - dict( - name='right_ankle', - id=16, - color=[255, 128, 0], - type='lower', - swap='left_ankle'), - 17: - dict(name='head', id=17, color=[255, 128, 0], type='upper', swap=''), - 18: - dict(name='neck', id=18, color=[255, 128, 0], type='upper', swap=''), - 19: - dict(name='hip', id=19, color=[255, 128, 0], type='lower', swap=''), - 20: - dict( - name='left_big_toe', - id=20, - color=[255, 128, 0], - type='lower', - swap='right_big_toe'), - 21: - dict( - name='right_big_toe', - id=21, - color=[255, 128, 0], - type='lower', - swap='left_big_toe'), - 22: - dict( - name='left_small_toe', - id=22, - color=[255, 128, 0], - type='lower', - swap='right_small_toe'), - 23: - dict( - name='right_small_toe', - id=23, - color=[255, 128, 0], - type='lower', - swap='left_small_toe'), - 24: - dict( - name='left_heel', - id=24, - color=[255, 128, 0], - type='lower', - swap='right_heel'), - 25: - dict( - name='right_heel', - id=25, - color=[255, 128, 0], - type='lower', - swap='left_heel'), - 26: - dict( - name='face-0', - id=26, - color=[255, 255, 255], - type='', - swap='face-16'), - 27: - dict( - name='face-1', - id=27, - color=[255, 255, 255], - type='', - swap='face-15'), - 28: - dict( - name='face-2', - id=28, - color=[255, 255, 255], - type='', - swap='face-14'), - 29: - dict( - name='face-3', - id=29, - color=[255, 255, 255], - type='', - swap='face-13'), - 30: - dict( - name='face-4', - id=30, - color=[255, 255, 255], - type='', - swap='face-12'), - 31: - dict( - name='face-5', - id=31, - color=[255, 255, 255], - type='', - swap='face-11'), - 32: - dict( - name='face-6', - id=32, - color=[255, 255, 255], - type='', - swap='face-10'), - 33: - dict( - name='face-7', - id=33, - color=[255, 255, 255], - type='', - swap='face-9'), - 34: - dict(name='face-8', id=34, color=[255, 255, 255], type='', swap=''), - 35: - dict( - name='face-9', - id=35, - color=[255, 255, 255], - type='', - swap='face-7'), - 36: - dict( - name='face-10', - id=36, - color=[255, 255, 255], - type='', - swap='face-6'), - 37: - dict( - name='face-11', - id=37, - color=[255, 255, 255], - type='', - swap='face-5'), - 38: - dict( - name='face-12', - id=38, - color=[255, 255, 255], - type='', - swap='face-4'), - 39: - dict( - name='face-13', - id=39, - color=[255, 255, 255], - type='', - swap='face-3'), - 40: - dict( - name='face-14', - id=40, - color=[255, 255, 255], - type='', - swap='face-2'), - 41: - dict( - name='face-15', - id=41, - color=[255, 255, 255], - type='', - swap='face-1'), - 42: - dict( - name='face-16', - id=42, - color=[255, 255, 255], - type='', - swap='face-0'), - 43: - dict( - name='face-17', - id=43, - color=[255, 255, 255], - type='', - swap='face-26'), - 44: - dict( - name='face-18', - id=44, - color=[255, 255, 255], - type='', - swap='face-25'), - 45: - dict( - name='face-19', - id=45, - color=[255, 255, 255], - type='', - swap='face-24'), - 46: - dict( - name='face-20', - id=46, - color=[255, 255, 255], - type='', - swap='face-23'), - 47: - dict( - name='face-21', - id=47, - color=[255, 255, 255], - type='', - swap='face-22'), - 48: - dict( - name='face-22', - id=48, - color=[255, 255, 255], - type='', - swap='face-21'), - 49: - dict( - name='face-23', - id=49, - color=[255, 255, 255], - type='', - swap='face-20'), - 50: - dict( - name='face-24', - id=50, - color=[255, 255, 255], - type='', - swap='face-19'), - 51: - dict( - name='face-25', - id=51, - color=[255, 255, 255], - type='', - swap='face-18'), - 52: - dict( - name='face-26', - id=52, - color=[255, 255, 255], - type='', - swap='face-17'), - 53: - dict(name='face-27', id=53, color=[255, 255, 255], type='', swap=''), - 54: - dict(name='face-28', id=54, color=[255, 255, 255], type='', swap=''), - 55: - dict(name='face-29', id=55, color=[255, 255, 255], type='', swap=''), - 56: - dict(name='face-30', id=56, color=[255, 255, 255], type='', swap=''), - 57: - dict( - name='face-31', - id=57, - color=[255, 255, 255], - type='', - swap='face-35'), - 58: - dict( - name='face-32', - id=58, - color=[255, 255, 255], - type='', - swap='face-34'), - 59: - dict(name='face-33', id=59, color=[255, 255, 255], type='', swap=''), - 60: - dict( - name='face-34', - id=60, - color=[255, 255, 255], - type='', - swap='face-32'), - 61: - dict( - name='face-35', - id=61, - color=[255, 255, 255], - type='', - swap='face-31'), - 62: - dict( - name='face-36', - id=62, - color=[255, 255, 255], - type='', - swap='face-45'), - 63: - dict( - name='face-37', - id=63, - color=[255, 255, 255], - type='', - swap='face-44'), - 64: - dict( - name='face-38', - id=64, - color=[255, 255, 255], - type='', - swap='face-43'), - 65: - dict( - name='face-39', - id=65, - color=[255, 255, 255], - type='', - swap='face-42'), - 66: - dict( - name='face-40', - id=66, - color=[255, 255, 255], - type='', - swap='face-47'), - 67: - dict( - name='face-41', - id=67, - color=[255, 255, 255], - type='', - swap='face-46'), - 68: - dict( - name='face-42', - id=68, - color=[255, 255, 255], - type='', - swap='face-39'), - 69: - dict( - name='face-43', - id=69, - color=[255, 255, 255], - type='', - swap='face-38'), - 70: - dict( - name='face-44', - id=70, - color=[255, 255, 255], - type='', - swap='face-37'), - 71: - dict( - name='face-45', - id=71, - color=[255, 255, 255], - type='', - swap='face-36'), - 72: - dict( - name='face-46', - id=72, - color=[255, 255, 255], - type='', - swap='face-41'), - 73: - dict( - name='face-47', - id=73, - color=[255, 255, 255], - type='', - swap='face-40'), - 74: - dict( - name='face-48', - id=74, - color=[255, 255, 255], - type='', - swap='face-54'), - 75: - dict( - name='face-49', - id=75, - color=[255, 255, 255], - type='', - swap='face-53'), - 76: - dict( - name='face-50', - id=76, - color=[255, 255, 255], - type='', - swap='face-52'), - 77: - dict(name='face-51', id=77, color=[255, 255, 255], type='', swap=''), - 78: - dict( - name='face-52', - id=78, - color=[255, 255, 255], - type='', - swap='face-50'), - 79: - dict( - name='face-53', - id=79, - color=[255, 255, 255], - type='', - swap='face-49'), - 80: - dict( - name='face-54', - id=80, - color=[255, 255, 255], - type='', - swap='face-48'), - 81: - dict( - name='face-55', - id=81, - color=[255, 255, 255], - type='', - swap='face-59'), - 82: - dict( - name='face-56', - id=82, - color=[255, 255, 255], - type='', - swap='face-58'), - 83: - dict(name='face-57', id=83, color=[255, 255, 255], type='', swap=''), - 84: - dict( - name='face-58', - id=84, - color=[255, 255, 255], - type='', - swap='face-56'), - 85: - dict( - name='face-59', - id=85, - color=[255, 255, 255], - type='', - swap='face-55'), - 86: - dict( - name='face-60', - id=86, - color=[255, 255, 255], - type='', - swap='face-64'), - 87: - dict( - name='face-61', - id=87, - color=[255, 255, 255], - type='', - swap='face-63'), - 88: - dict(name='face-62', id=88, color=[255, 255, 255], type='', swap=''), - 89: - dict( - name='face-63', - id=89, - color=[255, 255, 255], - type='', - swap='face-61'), - 90: - dict( - name='face-64', - id=90, - color=[255, 255, 255], - type='', - swap='face-60'), - 91: - dict( - name='face-65', - id=91, - color=[255, 255, 255], - type='', - swap='face-67'), - 92: - dict(name='face-66', id=92, color=[255, 255, 255], type='', swap=''), - 93: - dict( - name='face-67', - id=93, - color=[255, 255, 255], - type='', - swap='face-65'), - 94: - dict( - name='left_hand_root', - id=94, - color=[255, 255, 255], - type='', - swap='right_hand_root'), - 95: - dict( - name='left_thumb1', - id=95, - color=[255, 128, 0], - type='', - swap='right_thumb1'), - 96: - dict( - name='left_thumb2', - id=96, - color=[255, 128, 0], - type='', - swap='right_thumb2'), - 97: - dict( - name='left_thumb3', - id=97, - color=[255, 128, 0], - type='', - swap='right_thumb3'), - 98: - dict( - name='left_thumb4', - id=98, - color=[255, 128, 0], - type='', - swap='right_thumb4'), - 99: - dict( - name='left_forefinger1', - id=99, - color=[255, 153, 255], - type='', - swap='right_forefinger1'), - 100: - dict( - name='left_forefinger2', - id=100, - color=[255, 153, 255], - type='', - swap='right_forefinger2'), - 101: - dict( - name='left_forefinger3', - id=101, - color=[255, 153, 255], - type='', - swap='right_forefinger3'), - 102: - dict( - name='left_forefinger4', - id=102, - color=[255, 153, 255], - type='', - swap='right_forefinger4'), - 103: - dict( - name='left_middle_finger1', - id=103, - color=[102, 178, 255], - type='', - swap='right_middle_finger1'), - 104: - dict( - name='left_middle_finger2', - id=104, - color=[102, 178, 255], - type='', - swap='right_middle_finger2'), - 105: - dict( - name='left_middle_finger3', - id=105, - color=[102, 178, 255], - type='', - swap='right_middle_finger3'), - 106: - dict( - name='left_middle_finger4', - id=106, - color=[102, 178, 255], - type='', - swap='right_middle_finger4'), - 107: - dict( - name='left_ring_finger1', - id=107, - color=[255, 51, 51], - type='', - swap='right_ring_finger1'), - 108: - dict( - name='left_ring_finger2', - id=108, - color=[255, 51, 51], - type='', - swap='right_ring_finger2'), - 109: - dict( - name='left_ring_finger3', - id=109, - color=[255, 51, 51], - type='', - swap='right_ring_finger3'), - 110: - dict( - name='left_ring_finger4', - id=110, - color=[255, 51, 51], - type='', - swap='right_ring_finger4'), - 111: - dict( - name='left_pinky_finger1', - id=111, - color=[0, 255, 0], - type='', - swap='right_pinky_finger1'), - 112: - dict( - name='left_pinky_finger2', - id=112, - color=[0, 255, 0], - type='', - swap='right_pinky_finger2'), - 113: - dict( - name='left_pinky_finger3', - id=113, - color=[0, 255, 0], - type='', - swap='right_pinky_finger3'), - 114: - dict( - name='left_pinky_finger4', - id=114, - color=[0, 255, 0], - type='', - swap='right_pinky_finger4'), - 115: - dict( - name='right_hand_root', - id=115, - color=[255, 255, 255], - type='', - swap='left_hand_root'), - 116: - dict( - name='right_thumb1', - id=116, - color=[255, 128, 0], - type='', - swap='left_thumb1'), - 117: - dict( - name='right_thumb2', - id=117, - color=[255, 128, 0], - type='', - swap='left_thumb2'), - 118: - dict( - name='right_thumb3', - id=118, - color=[255, 128, 0], - type='', - swap='left_thumb3'), - 119: - dict( - name='right_thumb4', - id=119, - color=[255, 128, 0], - type='', - swap='left_thumb4'), - 120: - dict( - name='right_forefinger1', - id=120, - color=[255, 153, 255], - type='', - swap='left_forefinger1'), - 121: - dict( - name='right_forefinger2', - id=121, - color=[255, 153, 255], - type='', - swap='left_forefinger2'), - 122: - dict( - name='right_forefinger3', - id=122, - color=[255, 153, 255], - type='', - swap='left_forefinger3'), - 123: - dict( - name='right_forefinger4', - id=123, - color=[255, 153, 255], - type='', - swap='left_forefinger4'), - 124: - dict( - name='right_middle_finger1', - id=124, - color=[102, 178, 255], - type='', - swap='left_middle_finger1'), - 125: - dict( - name='right_middle_finger2', - id=125, - color=[102, 178, 255], - type='', - swap='left_middle_finger2'), - 126: - dict( - name='right_middle_finger3', - id=126, - color=[102, 178, 255], - type='', - swap='left_middle_finger3'), - 127: - dict( - name='right_middle_finger4', - id=127, - color=[102, 178, 255], - type='', - swap='left_middle_finger4'), - 128: - dict( - name='right_ring_finger1', - id=128, - color=[255, 51, 51], - type='', - swap='left_ring_finger1'), - 129: - dict( - name='right_ring_finger2', - id=129, - color=[255, 51, 51], - type='', - swap='left_ring_finger2'), - 130: - dict( - name='right_ring_finger3', - id=130, - color=[255, 51, 51], - type='', - swap='left_ring_finger3'), - 131: - dict( - name='right_ring_finger4', - id=131, - color=[255, 51, 51], - type='', - swap='left_ring_finger4'), - 132: - dict( - name='right_pinky_finger1', - id=132, - color=[0, 255, 0], - type='', - swap='left_pinky_finger1'), - 133: - dict( - name='right_pinky_finger2', - id=133, - color=[0, 255, 0], - type='', - swap='left_pinky_finger2'), - 134: - dict( - name='right_pinky_finger3', - id=134, - color=[0, 255, 0], - type='', - swap='left_pinky_finger3'), - 135: - dict( - name='right_pinky_finger4', - id=135, - color=[0, 255, 0], - type='', - swap='left_pinky_finger4') - }, - skeleton_info={ - 0: - dict(link=('left_ankle', 'left_knee'), id=0, color=[0, 255, 0]), - 1: - dict(link=('left_knee', 'left_hip'), id=1, color=[0, 255, 0]), - 2: - dict(link=('left_hip', 'hip'), id=2, color=[0, 255, 0]), - 3: - dict(link=('right_ankle', 'right_knee'), id=3, color=[255, 128, 0]), - 4: - dict(link=('right_knee', 'right_hip'), id=4, color=[255, 128, 0]), - 5: - dict(link=('right_hip', 'hip'), id=5, color=[255, 128, 0]), - 6: - dict(link=('head', 'neck'), id=6, color=[51, 153, 255]), - 7: - dict(link=('neck', 'hip'), id=7, color=[51, 153, 255]), - 8: - dict(link=('neck', 'left_shoulder'), id=8, color=[0, 255, 0]), - 9: - dict(link=('left_shoulder', 'left_elbow'), id=9, color=[0, 255, 0]), - 10: - dict(link=('left_elbow', 'left_wrist'), id=10, color=[0, 255, 0]), - 11: - dict(link=('neck', 'right_shoulder'), id=11, color=[255, 128, 0]), - 12: - dict( - link=('right_shoulder', 'right_elbow'), id=12, color=[255, 128, - 0]), - 13: - dict(link=('right_elbow', 'right_wrist'), id=13, color=[255, 128, 0]), - 14: - dict(link=('left_eye', 'right_eye'), id=14, color=[51, 153, 255]), - 15: - dict(link=('nose', 'left_eye'), id=15, color=[51, 153, 255]), - 16: - dict(link=('nose', 'right_eye'), id=16, color=[51, 153, 255]), - 17: - dict(link=('left_eye', 'left_ear'), id=17, color=[51, 153, 255]), - 18: - dict(link=('right_eye', 'right_ear'), id=18, color=[51, 153, 255]), - 19: - dict(link=('left_ear', 'left_shoulder'), id=19, color=[51, 153, 255]), - 20: - dict( - link=('right_ear', 'right_shoulder'), id=20, color=[51, 153, 255]), - 21: - dict(link=('left_ankle', 'left_big_toe'), id=21, color=[0, 255, 0]), - 22: - dict(link=('left_ankle', 'left_small_toe'), id=22, color=[0, 255, 0]), - 23: - dict(link=('left_ankle', 'left_heel'), id=23, color=[0, 255, 0]), - 24: - dict( - link=('right_ankle', 'right_big_toe'), id=24, color=[255, 128, 0]), - 25: - dict( - link=('right_ankle', 'right_small_toe'), - id=25, - color=[255, 128, 0]), - 26: - dict(link=('right_ankle', 'right_heel'), id=26, color=[255, 128, 0]), - 27: - dict(link=('left_wrist', 'left_thumb1'), id=27, color=[255, 128, 0]), - 28: - dict(link=('left_thumb1', 'left_thumb2'), id=28, color=[255, 128, 0]), - 29: - dict(link=('left_thumb2', 'left_thumb3'), id=29, color=[255, 128, 0]), - 30: - dict(link=('left_thumb3', 'left_thumb4'), id=30, color=[255, 128, 0]), - 31: - dict( - link=('left_wrist', 'left_forefinger1'), - id=31, - color=[255, 153, 255]), - 32: - dict( - link=('left_forefinger1', 'left_forefinger2'), - id=32, - color=[255, 153, 255]), - 33: - dict( - link=('left_forefinger2', 'left_forefinger3'), - id=33, - color=[255, 153, 255]), - 34: - dict( - link=('left_forefinger3', 'left_forefinger4'), - id=34, - color=[255, 153, 255]), - 35: - dict( - link=('left_wrist', 'left_middle_finger1'), - id=35, - color=[102, 178, 255]), - 36: - dict( - link=('left_middle_finger1', 'left_middle_finger2'), - id=36, - color=[102, 178, 255]), - 37: - dict( - link=('left_middle_finger2', 'left_middle_finger3'), - id=37, - color=[102, 178, 255]), - 38: - dict( - link=('left_middle_finger3', 'left_middle_finger4'), - id=38, - color=[102, 178, 255]), - 39: - dict( - link=('left_wrist', 'left_ring_finger1'), - id=39, - color=[255, 51, 51]), - 40: - dict( - link=('left_ring_finger1', 'left_ring_finger2'), - id=40, - color=[255, 51, 51]), - 41: - dict( - link=('left_ring_finger2', 'left_ring_finger3'), - id=41, - color=[255, 51, 51]), - 42: - dict( - link=('left_ring_finger3', 'left_ring_finger4'), - id=42, - color=[255, 51, 51]), - 43: - dict( - link=('left_wrist', 'left_pinky_finger1'), - id=43, - color=[0, 255, 0]), - 44: - dict( - link=('left_pinky_finger1', 'left_pinky_finger2'), - id=44, - color=[0, 255, 0]), - 45: - dict( - link=('left_pinky_finger2', 'left_pinky_finger3'), - id=45, - color=[0, 255, 0]), - 46: - dict( - link=('left_pinky_finger3', 'left_pinky_finger4'), - id=46, - color=[0, 255, 0]), - 47: - dict(link=('right_wrist', 'right_thumb1'), id=47, color=[255, 128, 0]), - 48: - dict( - link=('right_thumb1', 'right_thumb2'), id=48, color=[255, 128, 0]), - 49: - dict( - link=('right_thumb2', 'right_thumb3'), id=49, color=[255, 128, 0]), - 50: - dict( - link=('right_thumb3', 'right_thumb4'), id=50, color=[255, 128, 0]), - 51: - dict( - link=('right_wrist', 'right_forefinger1'), - id=51, - color=[255, 153, 255]), - 52: - dict( - link=('right_forefinger1', 'right_forefinger2'), - id=52, - color=[255, 153, 255]), - 53: - dict( - link=('right_forefinger2', 'right_forefinger3'), - id=53, - color=[255, 153, 255]), - 54: - dict( - link=('right_forefinger3', 'right_forefinger4'), - id=54, - color=[255, 153, 255]), - 55: - dict( - link=('right_wrist', 'right_middle_finger1'), - id=55, - color=[102, 178, 255]), - 56: - dict( - link=('right_middle_finger1', 'right_middle_finger2'), - id=56, - color=[102, 178, 255]), - 57: - dict( - link=('right_middle_finger2', 'right_middle_finger3'), - id=57, - color=[102, 178, 255]), - 58: - dict( - link=('right_middle_finger3', 'right_middle_finger4'), - id=58, - color=[102, 178, 255]), - 59: - dict( - link=('right_wrist', 'right_ring_finger1'), - id=59, - color=[255, 51, 51]), - 60: - dict( - link=('right_ring_finger1', 'right_ring_finger2'), - id=60, - color=[255, 51, 51]), - 61: - dict( - link=('right_ring_finger2', 'right_ring_finger3'), - id=61, - color=[255, 51, 51]), - 62: - dict( - link=('right_ring_finger3', 'right_ring_finger4'), - id=62, - color=[255, 51, 51]), - 63: - dict( - link=('right_wrist', 'right_pinky_finger1'), - id=63, - color=[0, 255, 0]), - 64: - dict( - link=('right_pinky_finger1', 'right_pinky_finger2'), - id=64, - color=[0, 255, 0]), - 65: - dict( - link=('right_pinky_finger2', 'right_pinky_finger3'), - id=65, - color=[0, 255, 0]), - 66: - dict( - link=('right_pinky_finger3', 'right_pinky_finger4'), - id=66, - color=[0, 255, 0]) - }, - joint_weights=[1.] * 136, - - # 'https://github.com/Fang-Haoshu/Halpe-FullBody/blob/master/' - # 'HalpeCOCOAPI/PythonAPI/halpecocotools/cocoeval.py#L245' - sigmas=[ - 0.026, 0.025, 0.025, 0.035, 0.035, 0.079, 0.079, 0.072, 0.072, 0.062, - 0.062, 0.107, 0.107, 0.087, 0.087, 0.089, 0.089, 0.08, 0.08, 0.08, - 0.089, 0.089, 0.089, 0.089, 0.089, 0.089, 0.015, 0.015, 0.015, 0.015, - 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, - 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, - 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, - 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, - 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, - 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, - 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, - 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, - 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, - 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, - 0.015, 0.015, 0.015, 0.015, 0.015, 0.015 - ]) +dataset_info = dict( + dataset_name='halpe', + paper_info=dict( + author='Li, Yong-Lu and Xu, Liang and Liu, Xinpeng and Huang, Xijie' + ' and Xu, Yue and Wang, Shiyi and Fang, Hao-Shu' + ' and Ma, Ze and Chen, Mingyang and Lu, Cewu', + title='PaStaNet: Toward Human Activity Knowledge Engine', + container='CVPR', + year='2020', + homepage='https://github.com/Fang-Haoshu/Halpe-FullBody/', + ), + keypoint_info={ + 0: + dict(name='nose', id=0, color=[51, 153, 255], type='upper', swap=''), + 1: + dict( + name='left_eye', + id=1, + color=[51, 153, 255], + type='upper', + swap='right_eye'), + 2: + dict( + name='right_eye', + id=2, + color=[51, 153, 255], + type='upper', + swap='left_eye'), + 3: + dict( + name='left_ear', + id=3, + color=[51, 153, 255], + type='upper', + swap='right_ear'), + 4: + dict( + name='right_ear', + id=4, + color=[51, 153, 255], + type='upper', + swap='left_ear'), + 5: + dict( + name='left_shoulder', + id=5, + color=[0, 255, 0], + type='upper', + swap='right_shoulder'), + 6: + dict( + name='right_shoulder', + id=6, + color=[255, 128, 0], + type='upper', + swap='left_shoulder'), + 7: + dict( + name='left_elbow', + id=7, + color=[0, 255, 0], + type='upper', + swap='right_elbow'), + 8: + dict( + name='right_elbow', + id=8, + color=[255, 128, 0], + type='upper', + swap='left_elbow'), + 9: + dict( + name='left_wrist', + id=9, + color=[0, 255, 0], + type='upper', + swap='right_wrist'), + 10: + dict( + name='right_wrist', + id=10, + color=[255, 128, 0], + type='upper', + swap='left_wrist'), + 11: + dict( + name='left_hip', + id=11, + color=[0, 255, 0], + type='lower', + swap='right_hip'), + 12: + dict( + name='right_hip', + id=12, + color=[255, 128, 0], + type='lower', + swap='left_hip'), + 13: + dict( + name='left_knee', + id=13, + color=[0, 255, 0], + type='lower', + swap='right_knee'), + 14: + dict( + name='right_knee', + id=14, + color=[255, 128, 0], + type='lower', + swap='left_knee'), + 15: + dict( + name='left_ankle', + id=15, + color=[0, 255, 0], + type='lower', + swap='right_ankle'), + 16: + dict( + name='right_ankle', + id=16, + color=[255, 128, 0], + type='lower', + swap='left_ankle'), + 17: + dict(name='head', id=17, color=[255, 128, 0], type='upper', swap=''), + 18: + dict(name='neck', id=18, color=[255, 128, 0], type='upper', swap=''), + 19: + dict(name='hip', id=19, color=[255, 128, 0], type='lower', swap=''), + 20: + dict( + name='left_big_toe', + id=20, + color=[255, 128, 0], + type='lower', + swap='right_big_toe'), + 21: + dict( + name='right_big_toe', + id=21, + color=[255, 128, 0], + type='lower', + swap='left_big_toe'), + 22: + dict( + name='left_small_toe', + id=22, + color=[255, 128, 0], + type='lower', + swap='right_small_toe'), + 23: + dict( + name='right_small_toe', + id=23, + color=[255, 128, 0], + type='lower', + swap='left_small_toe'), + 24: + dict( + name='left_heel', + id=24, + color=[255, 128, 0], + type='lower', + swap='right_heel'), + 25: + dict( + name='right_heel', + id=25, + color=[255, 128, 0], + type='lower', + swap='left_heel'), + 26: + dict( + name='face-0', + id=26, + color=[255, 255, 255], + type='', + swap='face-16'), + 27: + dict( + name='face-1', + id=27, + color=[255, 255, 255], + type='', + swap='face-15'), + 28: + dict( + name='face-2', + id=28, + color=[255, 255, 255], + type='', + swap='face-14'), + 29: + dict( + name='face-3', + id=29, + color=[255, 255, 255], + type='', + swap='face-13'), + 30: + dict( + name='face-4', + id=30, + color=[255, 255, 255], + type='', + swap='face-12'), + 31: + dict( + name='face-5', + id=31, + color=[255, 255, 255], + type='', + swap='face-11'), + 32: + dict( + name='face-6', + id=32, + color=[255, 255, 255], + type='', + swap='face-10'), + 33: + dict( + name='face-7', + id=33, + color=[255, 255, 255], + type='', + swap='face-9'), + 34: + dict(name='face-8', id=34, color=[255, 255, 255], type='', swap=''), + 35: + dict( + name='face-9', + id=35, + color=[255, 255, 255], + type='', + swap='face-7'), + 36: + dict( + name='face-10', + id=36, + color=[255, 255, 255], + type='', + swap='face-6'), + 37: + dict( + name='face-11', + id=37, + color=[255, 255, 255], + type='', + swap='face-5'), + 38: + dict( + name='face-12', + id=38, + color=[255, 255, 255], + type='', + swap='face-4'), + 39: + dict( + name='face-13', + id=39, + color=[255, 255, 255], + type='', + swap='face-3'), + 40: + dict( + name='face-14', + id=40, + color=[255, 255, 255], + type='', + swap='face-2'), + 41: + dict( + name='face-15', + id=41, + color=[255, 255, 255], + type='', + swap='face-1'), + 42: + dict( + name='face-16', + id=42, + color=[255, 255, 255], + type='', + swap='face-0'), + 43: + dict( + name='face-17', + id=43, + color=[255, 255, 255], + type='', + swap='face-26'), + 44: + dict( + name='face-18', + id=44, + color=[255, 255, 255], + type='', + swap='face-25'), + 45: + dict( + name='face-19', + id=45, + color=[255, 255, 255], + type='', + swap='face-24'), + 46: + dict( + name='face-20', + id=46, + color=[255, 255, 255], + type='', + swap='face-23'), + 47: + dict( + name='face-21', + id=47, + color=[255, 255, 255], + type='', + swap='face-22'), + 48: + dict( + name='face-22', + id=48, + color=[255, 255, 255], + type='', + swap='face-21'), + 49: + dict( + name='face-23', + id=49, + color=[255, 255, 255], + type='', + swap='face-20'), + 50: + dict( + name='face-24', + id=50, + color=[255, 255, 255], + type='', + swap='face-19'), + 51: + dict( + name='face-25', + id=51, + color=[255, 255, 255], + type='', + swap='face-18'), + 52: + dict( + name='face-26', + id=52, + color=[255, 255, 255], + type='', + swap='face-17'), + 53: + dict(name='face-27', id=53, color=[255, 255, 255], type='', swap=''), + 54: + dict(name='face-28', id=54, color=[255, 255, 255], type='', swap=''), + 55: + dict(name='face-29', id=55, color=[255, 255, 255], type='', swap=''), + 56: + dict(name='face-30', id=56, color=[255, 255, 255], type='', swap=''), + 57: + dict( + name='face-31', + id=57, + color=[255, 255, 255], + type='', + swap='face-35'), + 58: + dict( + name='face-32', + id=58, + color=[255, 255, 255], + type='', + swap='face-34'), + 59: + dict(name='face-33', id=59, color=[255, 255, 255], type='', swap=''), + 60: + dict( + name='face-34', + id=60, + color=[255, 255, 255], + type='', + swap='face-32'), + 61: + dict( + name='face-35', + id=61, + color=[255, 255, 255], + type='', + swap='face-31'), + 62: + dict( + name='face-36', + id=62, + color=[255, 255, 255], + type='', + swap='face-45'), + 63: + dict( + name='face-37', + id=63, + color=[255, 255, 255], + type='', + swap='face-44'), + 64: + dict( + name='face-38', + id=64, + color=[255, 255, 255], + type='', + swap='face-43'), + 65: + dict( + name='face-39', + id=65, + color=[255, 255, 255], + type='', + swap='face-42'), + 66: + dict( + name='face-40', + id=66, + color=[255, 255, 255], + type='', + swap='face-47'), + 67: + dict( + name='face-41', + id=67, + color=[255, 255, 255], + type='', + swap='face-46'), + 68: + dict( + name='face-42', + id=68, + color=[255, 255, 255], + type='', + swap='face-39'), + 69: + dict( + name='face-43', + id=69, + color=[255, 255, 255], + type='', + swap='face-38'), + 70: + dict( + name='face-44', + id=70, + color=[255, 255, 255], + type='', + swap='face-37'), + 71: + dict( + name='face-45', + id=71, + color=[255, 255, 255], + type='', + swap='face-36'), + 72: + dict( + name='face-46', + id=72, + color=[255, 255, 255], + type='', + swap='face-41'), + 73: + dict( + name='face-47', + id=73, + color=[255, 255, 255], + type='', + swap='face-40'), + 74: + dict( + name='face-48', + id=74, + color=[255, 255, 255], + type='', + swap='face-54'), + 75: + dict( + name='face-49', + id=75, + color=[255, 255, 255], + type='', + swap='face-53'), + 76: + dict( + name='face-50', + id=76, + color=[255, 255, 255], + type='', + swap='face-52'), + 77: + dict(name='face-51', id=77, color=[255, 255, 255], type='', swap=''), + 78: + dict( + name='face-52', + id=78, + color=[255, 255, 255], + type='', + swap='face-50'), + 79: + dict( + name='face-53', + id=79, + color=[255, 255, 255], + type='', + swap='face-49'), + 80: + dict( + name='face-54', + id=80, + color=[255, 255, 255], + type='', + swap='face-48'), + 81: + dict( + name='face-55', + id=81, + color=[255, 255, 255], + type='', + swap='face-59'), + 82: + dict( + name='face-56', + id=82, + color=[255, 255, 255], + type='', + swap='face-58'), + 83: + dict(name='face-57', id=83, color=[255, 255, 255], type='', swap=''), + 84: + dict( + name='face-58', + id=84, + color=[255, 255, 255], + type='', + swap='face-56'), + 85: + dict( + name='face-59', + id=85, + color=[255, 255, 255], + type='', + swap='face-55'), + 86: + dict( + name='face-60', + id=86, + color=[255, 255, 255], + type='', + swap='face-64'), + 87: + dict( + name='face-61', + id=87, + color=[255, 255, 255], + type='', + swap='face-63'), + 88: + dict(name='face-62', id=88, color=[255, 255, 255], type='', swap=''), + 89: + dict( + name='face-63', + id=89, + color=[255, 255, 255], + type='', + swap='face-61'), + 90: + dict( + name='face-64', + id=90, + color=[255, 255, 255], + type='', + swap='face-60'), + 91: + dict( + name='face-65', + id=91, + color=[255, 255, 255], + type='', + swap='face-67'), + 92: + dict(name='face-66', id=92, color=[255, 255, 255], type='', swap=''), + 93: + dict( + name='face-67', + id=93, + color=[255, 255, 255], + type='', + swap='face-65'), + 94: + dict( + name='left_hand_root', + id=94, + color=[255, 255, 255], + type='', + swap='right_hand_root'), + 95: + dict( + name='left_thumb1', + id=95, + color=[255, 128, 0], + type='', + swap='right_thumb1'), + 96: + dict( + name='left_thumb2', + id=96, + color=[255, 128, 0], + type='', + swap='right_thumb2'), + 97: + dict( + name='left_thumb3', + id=97, + color=[255, 128, 0], + type='', + swap='right_thumb3'), + 98: + dict( + name='left_thumb4', + id=98, + color=[255, 128, 0], + type='', + swap='right_thumb4'), + 99: + dict( + name='left_forefinger1', + id=99, + color=[255, 153, 255], + type='', + swap='right_forefinger1'), + 100: + dict( + name='left_forefinger2', + id=100, + color=[255, 153, 255], + type='', + swap='right_forefinger2'), + 101: + dict( + name='left_forefinger3', + id=101, + color=[255, 153, 255], + type='', + swap='right_forefinger3'), + 102: + dict( + name='left_forefinger4', + id=102, + color=[255, 153, 255], + type='', + swap='right_forefinger4'), + 103: + dict( + name='left_middle_finger1', + id=103, + color=[102, 178, 255], + type='', + swap='right_middle_finger1'), + 104: + dict( + name='left_middle_finger2', + id=104, + color=[102, 178, 255], + type='', + swap='right_middle_finger2'), + 105: + dict( + name='left_middle_finger3', + id=105, + color=[102, 178, 255], + type='', + swap='right_middle_finger3'), + 106: + dict( + name='left_middle_finger4', + id=106, + color=[102, 178, 255], + type='', + swap='right_middle_finger4'), + 107: + dict( + name='left_ring_finger1', + id=107, + color=[255, 51, 51], + type='', + swap='right_ring_finger1'), + 108: + dict( + name='left_ring_finger2', + id=108, + color=[255, 51, 51], + type='', + swap='right_ring_finger2'), + 109: + dict( + name='left_ring_finger3', + id=109, + color=[255, 51, 51], + type='', + swap='right_ring_finger3'), + 110: + dict( + name='left_ring_finger4', + id=110, + color=[255, 51, 51], + type='', + swap='right_ring_finger4'), + 111: + dict( + name='left_pinky_finger1', + id=111, + color=[0, 255, 0], + type='', + swap='right_pinky_finger1'), + 112: + dict( + name='left_pinky_finger2', + id=112, + color=[0, 255, 0], + type='', + swap='right_pinky_finger2'), + 113: + dict( + name='left_pinky_finger3', + id=113, + color=[0, 255, 0], + type='', + swap='right_pinky_finger3'), + 114: + dict( + name='left_pinky_finger4', + id=114, + color=[0, 255, 0], + type='', + swap='right_pinky_finger4'), + 115: + dict( + name='right_hand_root', + id=115, + color=[255, 255, 255], + type='', + swap='left_hand_root'), + 116: + dict( + name='right_thumb1', + id=116, + color=[255, 128, 0], + type='', + swap='left_thumb1'), + 117: + dict( + name='right_thumb2', + id=117, + color=[255, 128, 0], + type='', + swap='left_thumb2'), + 118: + dict( + name='right_thumb3', + id=118, + color=[255, 128, 0], + type='', + swap='left_thumb3'), + 119: + dict( + name='right_thumb4', + id=119, + color=[255, 128, 0], + type='', + swap='left_thumb4'), + 120: + dict( + name='right_forefinger1', + id=120, + color=[255, 153, 255], + type='', + swap='left_forefinger1'), + 121: + dict( + name='right_forefinger2', + id=121, + color=[255, 153, 255], + type='', + swap='left_forefinger2'), + 122: + dict( + name='right_forefinger3', + id=122, + color=[255, 153, 255], + type='', + swap='left_forefinger3'), + 123: + dict( + name='right_forefinger4', + id=123, + color=[255, 153, 255], + type='', + swap='left_forefinger4'), + 124: + dict( + name='right_middle_finger1', + id=124, + color=[102, 178, 255], + type='', + swap='left_middle_finger1'), + 125: + dict( + name='right_middle_finger2', + id=125, + color=[102, 178, 255], + type='', + swap='left_middle_finger2'), + 126: + dict( + name='right_middle_finger3', + id=126, + color=[102, 178, 255], + type='', + swap='left_middle_finger3'), + 127: + dict( + name='right_middle_finger4', + id=127, + color=[102, 178, 255], + type='', + swap='left_middle_finger4'), + 128: + dict( + name='right_ring_finger1', + id=128, + color=[255, 51, 51], + type='', + swap='left_ring_finger1'), + 129: + dict( + name='right_ring_finger2', + id=129, + color=[255, 51, 51], + type='', + swap='left_ring_finger2'), + 130: + dict( + name='right_ring_finger3', + id=130, + color=[255, 51, 51], + type='', + swap='left_ring_finger3'), + 131: + dict( + name='right_ring_finger4', + id=131, + color=[255, 51, 51], + type='', + swap='left_ring_finger4'), + 132: + dict( + name='right_pinky_finger1', + id=132, + color=[0, 255, 0], + type='', + swap='left_pinky_finger1'), + 133: + dict( + name='right_pinky_finger2', + id=133, + color=[0, 255, 0], + type='', + swap='left_pinky_finger2'), + 134: + dict( + name='right_pinky_finger3', + id=134, + color=[0, 255, 0], + type='', + swap='left_pinky_finger3'), + 135: + dict( + name='right_pinky_finger4', + id=135, + color=[0, 255, 0], + type='', + swap='left_pinky_finger4') + }, + skeleton_info={ + 0: + dict(link=('left_ankle', 'left_knee'), id=0, color=[0, 255, 0]), + 1: + dict(link=('left_knee', 'left_hip'), id=1, color=[0, 255, 0]), + 2: + dict(link=('left_hip', 'hip'), id=2, color=[0, 255, 0]), + 3: + dict(link=('right_ankle', 'right_knee'), id=3, color=[255, 128, 0]), + 4: + dict(link=('right_knee', 'right_hip'), id=4, color=[255, 128, 0]), + 5: + dict(link=('right_hip', 'hip'), id=5, color=[255, 128, 0]), + 6: + dict(link=('head', 'neck'), id=6, color=[51, 153, 255]), + 7: + dict(link=('neck', 'hip'), id=7, color=[51, 153, 255]), + 8: + dict(link=('neck', 'left_shoulder'), id=8, color=[0, 255, 0]), + 9: + dict(link=('left_shoulder', 'left_elbow'), id=9, color=[0, 255, 0]), + 10: + dict(link=('left_elbow', 'left_wrist'), id=10, color=[0, 255, 0]), + 11: + dict(link=('neck', 'right_shoulder'), id=11, color=[255, 128, 0]), + 12: + dict( + link=('right_shoulder', 'right_elbow'), id=12, color=[255, 128, + 0]), + 13: + dict(link=('right_elbow', 'right_wrist'), id=13, color=[255, 128, 0]), + 14: + dict(link=('left_eye', 'right_eye'), id=14, color=[51, 153, 255]), + 15: + dict(link=('nose', 'left_eye'), id=15, color=[51, 153, 255]), + 16: + dict(link=('nose', 'right_eye'), id=16, color=[51, 153, 255]), + 17: + dict(link=('left_eye', 'left_ear'), id=17, color=[51, 153, 255]), + 18: + dict(link=('right_eye', 'right_ear'), id=18, color=[51, 153, 255]), + 19: + dict(link=('left_ear', 'left_shoulder'), id=19, color=[51, 153, 255]), + 20: + dict( + link=('right_ear', 'right_shoulder'), id=20, color=[51, 153, 255]), + 21: + dict(link=('left_ankle', 'left_big_toe'), id=21, color=[0, 255, 0]), + 22: + dict(link=('left_ankle', 'left_small_toe'), id=22, color=[0, 255, 0]), + 23: + dict(link=('left_ankle', 'left_heel'), id=23, color=[0, 255, 0]), + 24: + dict( + link=('right_ankle', 'right_big_toe'), id=24, color=[255, 128, 0]), + 25: + dict( + link=('right_ankle', 'right_small_toe'), + id=25, + color=[255, 128, 0]), + 26: + dict(link=('right_ankle', 'right_heel'), id=26, color=[255, 128, 0]), + 27: + dict(link=('left_wrist', 'left_thumb1'), id=27, color=[255, 128, 0]), + 28: + dict(link=('left_thumb1', 'left_thumb2'), id=28, color=[255, 128, 0]), + 29: + dict(link=('left_thumb2', 'left_thumb3'), id=29, color=[255, 128, 0]), + 30: + dict(link=('left_thumb3', 'left_thumb4'), id=30, color=[255, 128, 0]), + 31: + dict( + link=('left_wrist', 'left_forefinger1'), + id=31, + color=[255, 153, 255]), + 32: + dict( + link=('left_forefinger1', 'left_forefinger2'), + id=32, + color=[255, 153, 255]), + 33: + dict( + link=('left_forefinger2', 'left_forefinger3'), + id=33, + color=[255, 153, 255]), + 34: + dict( + link=('left_forefinger3', 'left_forefinger4'), + id=34, + color=[255, 153, 255]), + 35: + dict( + link=('left_wrist', 'left_middle_finger1'), + id=35, + color=[102, 178, 255]), + 36: + dict( + link=('left_middle_finger1', 'left_middle_finger2'), + id=36, + color=[102, 178, 255]), + 37: + dict( + link=('left_middle_finger2', 'left_middle_finger3'), + id=37, + color=[102, 178, 255]), + 38: + dict( + link=('left_middle_finger3', 'left_middle_finger4'), + id=38, + color=[102, 178, 255]), + 39: + dict( + link=('left_wrist', 'left_ring_finger1'), + id=39, + color=[255, 51, 51]), + 40: + dict( + link=('left_ring_finger1', 'left_ring_finger2'), + id=40, + color=[255, 51, 51]), + 41: + dict( + link=('left_ring_finger2', 'left_ring_finger3'), + id=41, + color=[255, 51, 51]), + 42: + dict( + link=('left_ring_finger3', 'left_ring_finger4'), + id=42, + color=[255, 51, 51]), + 43: + dict( + link=('left_wrist', 'left_pinky_finger1'), + id=43, + color=[0, 255, 0]), + 44: + dict( + link=('left_pinky_finger1', 'left_pinky_finger2'), + id=44, + color=[0, 255, 0]), + 45: + dict( + link=('left_pinky_finger2', 'left_pinky_finger3'), + id=45, + color=[0, 255, 0]), + 46: + dict( + link=('left_pinky_finger3', 'left_pinky_finger4'), + id=46, + color=[0, 255, 0]), + 47: + dict(link=('right_wrist', 'right_thumb1'), id=47, color=[255, 128, 0]), + 48: + dict( + link=('right_thumb1', 'right_thumb2'), id=48, color=[255, 128, 0]), + 49: + dict( + link=('right_thumb2', 'right_thumb3'), id=49, color=[255, 128, 0]), + 50: + dict( + link=('right_thumb3', 'right_thumb4'), id=50, color=[255, 128, 0]), + 51: + dict( + link=('right_wrist', 'right_forefinger1'), + id=51, + color=[255, 153, 255]), + 52: + dict( + link=('right_forefinger1', 'right_forefinger2'), + id=52, + color=[255, 153, 255]), + 53: + dict( + link=('right_forefinger2', 'right_forefinger3'), + id=53, + color=[255, 153, 255]), + 54: + dict( + link=('right_forefinger3', 'right_forefinger4'), + id=54, + color=[255, 153, 255]), + 55: + dict( + link=('right_wrist', 'right_middle_finger1'), + id=55, + color=[102, 178, 255]), + 56: + dict( + link=('right_middle_finger1', 'right_middle_finger2'), + id=56, + color=[102, 178, 255]), + 57: + dict( + link=('right_middle_finger2', 'right_middle_finger3'), + id=57, + color=[102, 178, 255]), + 58: + dict( + link=('right_middle_finger3', 'right_middle_finger4'), + id=58, + color=[102, 178, 255]), + 59: + dict( + link=('right_wrist', 'right_ring_finger1'), + id=59, + color=[255, 51, 51]), + 60: + dict( + link=('right_ring_finger1', 'right_ring_finger2'), + id=60, + color=[255, 51, 51]), + 61: + dict( + link=('right_ring_finger2', 'right_ring_finger3'), + id=61, + color=[255, 51, 51]), + 62: + dict( + link=('right_ring_finger3', 'right_ring_finger4'), + id=62, + color=[255, 51, 51]), + 63: + dict( + link=('right_wrist', 'right_pinky_finger1'), + id=63, + color=[0, 255, 0]), + 64: + dict( + link=('right_pinky_finger1', 'right_pinky_finger2'), + id=64, + color=[0, 255, 0]), + 65: + dict( + link=('right_pinky_finger2', 'right_pinky_finger3'), + id=65, + color=[0, 255, 0]), + 66: + dict( + link=('right_pinky_finger3', 'right_pinky_finger4'), + id=66, + color=[0, 255, 0]) + }, + joint_weights=[1.] * 136, + + # 'https://github.com/Fang-Haoshu/Halpe-FullBody/blob/master/' + # 'HalpeCOCOAPI/PythonAPI/halpecocotools/cocoeval.py#L245' + sigmas=[ + 0.026, 0.025, 0.025, 0.035, 0.035, 0.079, 0.079, 0.072, 0.072, 0.062, + 0.062, 0.107, 0.107, 0.087, 0.087, 0.089, 0.089, 0.08, 0.08, 0.08, + 0.089, 0.089, 0.089, 0.089, 0.089, 0.089, 0.015, 0.015, 0.015, 0.015, + 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, + 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, + 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, + 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, + 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, + 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, + 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, + 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, + 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, + 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, + 0.015, 0.015, 0.015, 0.015, 0.015, 0.015 + ]) diff --git a/configs/_base_/datasets/halpe26.py b/configs/_base_/datasets/halpe26.py index cb4df83874..7f4d54992d 100644 --- a/configs/_base_/datasets/halpe26.py +++ b/configs/_base_/datasets/halpe26.py @@ -1,274 +1,274 @@ -dataset_info = dict( - dataset_name='halpe26', - paper_info=dict( - author='Li, Yong-Lu and Xu, Liang and Liu, Xinpeng and Huang, Xijie' - ' and Xu, Yue and Wang, Shiyi and Fang, Hao-Shu' - ' and Ma, Ze and Chen, Mingyang and Lu, Cewu', - title='PaStaNet: Toward Human Activity Knowledge Engine', - container='CVPR', - year='2020', - homepage='https://github.com/Fang-Haoshu/Halpe-FullBody/', - ), - keypoint_info={ - 0: - dict(name='nose', id=0, color=[51, 153, 255], type='upper', swap=''), - 1: - dict( - name='left_eye', - id=1, - color=[51, 153, 255], - type='upper', - swap='right_eye'), - 2: - dict( - name='right_eye', - id=2, - color=[51, 153, 255], - type='upper', - swap='left_eye'), - 3: - dict( - name='left_ear', - id=3, - color=[51, 153, 255], - type='upper', - swap='right_ear'), - 4: - dict( - name='right_ear', - id=4, - color=[51, 153, 255], - type='upper', - swap='left_ear'), - 5: - dict( - name='left_shoulder', - id=5, - color=[0, 255, 0], - type='upper', - swap='right_shoulder'), - 6: - dict( - name='right_shoulder', - id=6, - color=[255, 128, 0], - type='upper', - swap='left_shoulder'), - 7: - dict( - name='left_elbow', - id=7, - color=[0, 255, 0], - type='upper', - swap='right_elbow'), - 8: - dict( - name='right_elbow', - id=8, - color=[255, 128, 0], - type='upper', - swap='left_elbow'), - 9: - dict( - name='left_wrist', - id=9, - color=[0, 255, 0], - type='upper', - swap='right_wrist'), - 10: - dict( - name='right_wrist', - id=10, - color=[255, 128, 0], - type='upper', - swap='left_wrist'), - 11: - dict( - name='left_hip', - id=11, - color=[0, 255, 0], - type='lower', - swap='right_hip'), - 12: - dict( - name='right_hip', - id=12, - color=[255, 128, 0], - type='lower', - swap='left_hip'), - 13: - dict( - name='left_knee', - id=13, - color=[0, 255, 0], - type='lower', - swap='right_knee'), - 14: - dict( - name='right_knee', - id=14, - color=[255, 128, 0], - type='lower', - swap='left_knee'), - 15: - dict( - name='left_ankle', - id=15, - color=[0, 255, 0], - type='lower', - swap='right_ankle'), - 16: - dict( - name='right_ankle', - id=16, - color=[255, 128, 0], - type='lower', - swap='left_ankle'), - 17: - dict(name='head', id=17, color=[255, 128, 0], type='upper', swap=''), - 18: - dict(name='neck', id=18, color=[255, 128, 0], type='upper', swap=''), - 19: - dict(name='hip', id=19, color=[255, 128, 0], type='lower', swap=''), - 20: - dict( - name='left_big_toe', - id=20, - color=[255, 128, 0], - type='lower', - swap='right_big_toe'), - 21: - dict( - name='right_big_toe', - id=21, - color=[255, 128, 0], - type='lower', - swap='left_big_toe'), - 22: - dict( - name='left_small_toe', - id=22, - color=[255, 128, 0], - type='lower', - swap='right_small_toe'), - 23: - dict( - name='right_small_toe', - id=23, - color=[255, 128, 0], - type='lower', - swap='left_small_toe'), - 24: - dict( - name='left_heel', - id=24, - color=[255, 128, 0], - type='lower', - swap='right_heel'), - 25: - dict( - name='right_heel', - id=25, - color=[255, 128, 0], - type='lower', - swap='left_heel') - }, - skeleton_info={ - 0: - dict(link=('left_ankle', 'left_knee'), id=0, color=[0, 255, 0]), - 1: - dict(link=('left_knee', 'left_hip'), id=1, color=[0, 255, 0]), - 2: - dict(link=('left_hip', 'hip'), id=2, color=[0, 255, 0]), - 3: - dict(link=('right_ankle', 'right_knee'), id=3, color=[255, 128, 0]), - 4: - dict(link=('right_knee', 'right_hip'), id=4, color=[255, 128, 0]), - 5: - dict(link=('right_hip', 'hip'), id=5, color=[255, 128, 0]), - 6: - dict(link=('head', 'neck'), id=6, color=[51, 153, 255]), - 7: - dict(link=('neck', 'hip'), id=7, color=[51, 153, 255]), - 8: - dict(link=('neck', 'left_shoulder'), id=8, color=[0, 255, 0]), - 9: - dict(link=('left_shoulder', 'left_elbow'), id=9, color=[0, 255, 0]), - 10: - dict(link=('left_elbow', 'left_wrist'), id=10, color=[0, 255, 0]), - 11: - dict(link=('neck', 'right_shoulder'), id=11, color=[255, 128, 0]), - 12: - dict( - link=('right_shoulder', 'right_elbow'), id=12, color=[255, 128, - 0]), - 13: - dict(link=('right_elbow', 'right_wrist'), id=13, color=[255, 128, 0]), - 14: - dict(link=('left_eye', 'right_eye'), id=14, color=[51, 153, 255]), - 15: - dict(link=('nose', 'left_eye'), id=15, color=[51, 153, 255]), - 16: - dict(link=('nose', 'right_eye'), id=16, color=[51, 153, 255]), - 17: - dict(link=('left_eye', 'left_ear'), id=17, color=[51, 153, 255]), - 18: - dict(link=('right_eye', 'right_ear'), id=18, color=[51, 153, 255]), - 19: - dict(link=('left_ear', 'left_shoulder'), id=19, color=[51, 153, 255]), - 20: - dict( - link=('right_ear', 'right_shoulder'), id=20, color=[51, 153, 255]), - 21: - dict(link=('left_ankle', 'left_big_toe'), id=21, color=[0, 255, 0]), - 22: - dict(link=('left_ankle', 'left_small_toe'), id=22, color=[0, 255, 0]), - 23: - dict(link=('left_ankle', 'left_heel'), id=23, color=[0, 255, 0]), - 24: - dict( - link=('right_ankle', 'right_big_toe'), id=24, color=[255, 128, 0]), - 25: - dict( - link=('right_ankle', 'right_small_toe'), - id=25, - color=[255, 128, 0]), - 26: - dict(link=('right_ankle', 'right_heel'), id=26, color=[255, 128, 0]), - }, - # the joint_weights is modified by MMPose Team - joint_weights=[ - 1., 1., 1., 1., 1., 1., 1., 1.2, 1.2, 1.5, 1.5, 1., 1., 1.2, 1.2, 1.5, - 1.5 - ] + [1., 1., 1.2] + [1.5] * 6, - - # 'https://github.com/Fang-Haoshu/Halpe-FullBody/blob/master/' - # 'HalpeCOCOAPI/PythonAPI/halpecocotools/cocoeval.py#L245' - sigmas=[ - 0.026, - 0.025, - 0.025, - 0.035, - 0.035, - 0.079, - 0.079, - 0.072, - 0.072, - 0.062, - 0.062, - 0.107, - 0.107, - 0.087, - 0.087, - 0.089, - 0.089, - 0.026, - 0.026, - 0.066, - 0.079, - 0.079, - 0.079, - 0.079, - 0.079, - 0.079, - ]) +dataset_info = dict( + dataset_name='halpe26', + paper_info=dict( + author='Li, Yong-Lu and Xu, Liang and Liu, Xinpeng and Huang, Xijie' + ' and Xu, Yue and Wang, Shiyi and Fang, Hao-Shu' + ' and Ma, Ze and Chen, Mingyang and Lu, Cewu', + title='PaStaNet: Toward Human Activity Knowledge Engine', + container='CVPR', + year='2020', + homepage='https://github.com/Fang-Haoshu/Halpe-FullBody/', + ), + keypoint_info={ + 0: + dict(name='nose', id=0, color=[51, 153, 255], type='upper', swap=''), + 1: + dict( + name='left_eye', + id=1, + color=[51, 153, 255], + type='upper', + swap='right_eye'), + 2: + dict( + name='right_eye', + id=2, + color=[51, 153, 255], + type='upper', + swap='left_eye'), + 3: + dict( + name='left_ear', + id=3, + color=[51, 153, 255], + type='upper', + swap='right_ear'), + 4: + dict( + name='right_ear', + id=4, + color=[51, 153, 255], + type='upper', + swap='left_ear'), + 5: + dict( + name='left_shoulder', + id=5, + color=[0, 255, 0], + type='upper', + swap='right_shoulder'), + 6: + dict( + name='right_shoulder', + id=6, + color=[255, 128, 0], + type='upper', + swap='left_shoulder'), + 7: + dict( + name='left_elbow', + id=7, + color=[0, 255, 0], + type='upper', + swap='right_elbow'), + 8: + dict( + name='right_elbow', + id=8, + color=[255, 128, 0], + type='upper', + swap='left_elbow'), + 9: + dict( + name='left_wrist', + id=9, + color=[0, 255, 0], + type='upper', + swap='right_wrist'), + 10: + dict( + name='right_wrist', + id=10, + color=[255, 128, 0], + type='upper', + swap='left_wrist'), + 11: + dict( + name='left_hip', + id=11, + color=[0, 255, 0], + type='lower', + swap='right_hip'), + 12: + dict( + name='right_hip', + id=12, + color=[255, 128, 0], + type='lower', + swap='left_hip'), + 13: + dict( + name='left_knee', + id=13, + color=[0, 255, 0], + type='lower', + swap='right_knee'), + 14: + dict( + name='right_knee', + id=14, + color=[255, 128, 0], + type='lower', + swap='left_knee'), + 15: + dict( + name='left_ankle', + id=15, + color=[0, 255, 0], + type='lower', + swap='right_ankle'), + 16: + dict( + name='right_ankle', + id=16, + color=[255, 128, 0], + type='lower', + swap='left_ankle'), + 17: + dict(name='head', id=17, color=[255, 128, 0], type='upper', swap=''), + 18: + dict(name='neck', id=18, color=[255, 128, 0], type='upper', swap=''), + 19: + dict(name='hip', id=19, color=[255, 128, 0], type='lower', swap=''), + 20: + dict( + name='left_big_toe', + id=20, + color=[255, 128, 0], + type='lower', + swap='right_big_toe'), + 21: + dict( + name='right_big_toe', + id=21, + color=[255, 128, 0], + type='lower', + swap='left_big_toe'), + 22: + dict( + name='left_small_toe', + id=22, + color=[255, 128, 0], + type='lower', + swap='right_small_toe'), + 23: + dict( + name='right_small_toe', + id=23, + color=[255, 128, 0], + type='lower', + swap='left_small_toe'), + 24: + dict( + name='left_heel', + id=24, + color=[255, 128, 0], + type='lower', + swap='right_heel'), + 25: + dict( + name='right_heel', + id=25, + color=[255, 128, 0], + type='lower', + swap='left_heel') + }, + skeleton_info={ + 0: + dict(link=('left_ankle', 'left_knee'), id=0, color=[0, 255, 0]), + 1: + dict(link=('left_knee', 'left_hip'), id=1, color=[0, 255, 0]), + 2: + dict(link=('left_hip', 'hip'), id=2, color=[0, 255, 0]), + 3: + dict(link=('right_ankle', 'right_knee'), id=3, color=[255, 128, 0]), + 4: + dict(link=('right_knee', 'right_hip'), id=4, color=[255, 128, 0]), + 5: + dict(link=('right_hip', 'hip'), id=5, color=[255, 128, 0]), + 6: + dict(link=('head', 'neck'), id=6, color=[51, 153, 255]), + 7: + dict(link=('neck', 'hip'), id=7, color=[51, 153, 255]), + 8: + dict(link=('neck', 'left_shoulder'), id=8, color=[0, 255, 0]), + 9: + dict(link=('left_shoulder', 'left_elbow'), id=9, color=[0, 255, 0]), + 10: + dict(link=('left_elbow', 'left_wrist'), id=10, color=[0, 255, 0]), + 11: + dict(link=('neck', 'right_shoulder'), id=11, color=[255, 128, 0]), + 12: + dict( + link=('right_shoulder', 'right_elbow'), id=12, color=[255, 128, + 0]), + 13: + dict(link=('right_elbow', 'right_wrist'), id=13, color=[255, 128, 0]), + 14: + dict(link=('left_eye', 'right_eye'), id=14, color=[51, 153, 255]), + 15: + dict(link=('nose', 'left_eye'), id=15, color=[51, 153, 255]), + 16: + dict(link=('nose', 'right_eye'), id=16, color=[51, 153, 255]), + 17: + dict(link=('left_eye', 'left_ear'), id=17, color=[51, 153, 255]), + 18: + dict(link=('right_eye', 'right_ear'), id=18, color=[51, 153, 255]), + 19: + dict(link=('left_ear', 'left_shoulder'), id=19, color=[51, 153, 255]), + 20: + dict( + link=('right_ear', 'right_shoulder'), id=20, color=[51, 153, 255]), + 21: + dict(link=('left_ankle', 'left_big_toe'), id=21, color=[0, 255, 0]), + 22: + dict(link=('left_ankle', 'left_small_toe'), id=22, color=[0, 255, 0]), + 23: + dict(link=('left_ankle', 'left_heel'), id=23, color=[0, 255, 0]), + 24: + dict( + link=('right_ankle', 'right_big_toe'), id=24, color=[255, 128, 0]), + 25: + dict( + link=('right_ankle', 'right_small_toe'), + id=25, + color=[255, 128, 0]), + 26: + dict(link=('right_ankle', 'right_heel'), id=26, color=[255, 128, 0]), + }, + # the joint_weights is modified by MMPose Team + joint_weights=[ + 1., 1., 1., 1., 1., 1., 1., 1.2, 1.2, 1.5, 1.5, 1., 1., 1.2, 1.2, 1.5, + 1.5 + ] + [1., 1., 1.2] + [1.5] * 6, + + # 'https://github.com/Fang-Haoshu/Halpe-FullBody/blob/master/' + # 'HalpeCOCOAPI/PythonAPI/halpecocotools/cocoeval.py#L245' + sigmas=[ + 0.026, + 0.025, + 0.025, + 0.035, + 0.035, + 0.079, + 0.079, + 0.072, + 0.072, + 0.062, + 0.062, + 0.107, + 0.107, + 0.087, + 0.087, + 0.089, + 0.089, + 0.026, + 0.026, + 0.066, + 0.079, + 0.079, + 0.079, + 0.079, + 0.079, + 0.079, + ]) diff --git a/configs/_base_/datasets/horse10.py b/configs/_base_/datasets/horse10.py index a485bf191b..60cec1fa50 100644 --- a/configs/_base_/datasets/horse10.py +++ b/configs/_base_/datasets/horse10.py @@ -1,201 +1,201 @@ -dataset_info = dict( - dataset_name='horse10', - paper_info=dict( - author='Mathis, Alexander and Biasi, Thomas and ' - 'Schneider, Steffen and ' - 'Yuksekgonul, Mert and Rogers, Byron and ' - 'Bethge, Matthias and ' - 'Mathis, Mackenzie W', - title='Pretraining boosts out-of-domain robustness ' - 'for pose estimation', - container='Proceedings of the IEEE/CVF Winter Conference on ' - 'Applications of Computer Vision', - year='2021', - homepage='http://www.mackenziemathislab.org/horse10', - ), - keypoint_info={ - 0: - dict(name='Nose', id=0, color=[255, 153, 255], type='upper', swap=''), - 1: - dict(name='Eye', id=1, color=[255, 153, 255], type='upper', swap=''), - 2: - dict( - name='Nearknee', - id=2, - color=[255, 102, 255], - type='upper', - swap=''), - 3: - dict( - name='Nearfrontfetlock', - id=3, - color=[255, 102, 255], - type='upper', - swap=''), - 4: - dict( - name='Nearfrontfoot', - id=4, - color=[255, 102, 255], - type='upper', - swap=''), - 5: - dict( - name='Offknee', id=5, color=[255, 102, 255], type='upper', - swap=''), - 6: - dict( - name='Offfrontfetlock', - id=6, - color=[255, 102, 255], - type='upper', - swap=''), - 7: - dict( - name='Offfrontfoot', - id=7, - color=[255, 102, 255], - type='upper', - swap=''), - 8: - dict( - name='Shoulder', - id=8, - color=[255, 153, 255], - type='upper', - swap=''), - 9: - dict( - name='Midshoulder', - id=9, - color=[255, 153, 255], - type='upper', - swap=''), - 10: - dict( - name='Elbow', id=10, color=[255, 153, 255], type='upper', swap=''), - 11: - dict( - name='Girth', id=11, color=[255, 153, 255], type='upper', swap=''), - 12: - dict( - name='Wither', id=12, color=[255, 153, 255], type='upper', - swap=''), - 13: - dict( - name='Nearhindhock', - id=13, - color=[255, 51, 255], - type='lower', - swap=''), - 14: - dict( - name='Nearhindfetlock', - id=14, - color=[255, 51, 255], - type='lower', - swap=''), - 15: - dict( - name='Nearhindfoot', - id=15, - color=[255, 51, 255], - type='lower', - swap=''), - 16: - dict(name='Hip', id=16, color=[255, 153, 255], type='lower', swap=''), - 17: - dict( - name='Stifle', id=17, color=[255, 153, 255], type='lower', - swap=''), - 18: - dict( - name='Offhindhock', - id=18, - color=[255, 51, 255], - type='lower', - swap=''), - 19: - dict( - name='Offhindfetlock', - id=19, - color=[255, 51, 255], - type='lower', - swap=''), - 20: - dict( - name='Offhindfoot', - id=20, - color=[255, 51, 255], - type='lower', - swap=''), - 21: - dict( - name='Ischium', - id=21, - color=[255, 153, 255], - type='lower', - swap='') - }, - skeleton_info={ - 0: - dict(link=('Nose', 'Eye'), id=0, color=[255, 153, 255]), - 1: - dict(link=('Eye', 'Wither'), id=1, color=[255, 153, 255]), - 2: - dict(link=('Wither', 'Hip'), id=2, color=[255, 153, 255]), - 3: - dict(link=('Hip', 'Ischium'), id=3, color=[255, 153, 255]), - 4: - dict(link=('Ischium', 'Stifle'), id=4, color=[255, 153, 255]), - 5: - dict(link=('Stifle', 'Girth'), id=5, color=[255, 153, 255]), - 6: - dict(link=('Girth', 'Elbow'), id=6, color=[255, 153, 255]), - 7: - dict(link=('Elbow', 'Shoulder'), id=7, color=[255, 153, 255]), - 8: - dict(link=('Shoulder', 'Midshoulder'), id=8, color=[255, 153, 255]), - 9: - dict(link=('Midshoulder', 'Wither'), id=9, color=[255, 153, 255]), - 10: - dict( - link=('Nearknee', 'Nearfrontfetlock'), - id=10, - color=[255, 102, 255]), - 11: - dict( - link=('Nearfrontfetlock', 'Nearfrontfoot'), - id=11, - color=[255, 102, 255]), - 12: - dict( - link=('Offknee', 'Offfrontfetlock'), id=12, color=[255, 102, 255]), - 13: - dict( - link=('Offfrontfetlock', 'Offfrontfoot'), - id=13, - color=[255, 102, 255]), - 14: - dict( - link=('Nearhindhock', 'Nearhindfetlock'), - id=14, - color=[255, 51, 255]), - 15: - dict( - link=('Nearhindfetlock', 'Nearhindfoot'), - id=15, - color=[255, 51, 255]), - 16: - dict( - link=('Offhindhock', 'Offhindfetlock'), - id=16, - color=[255, 51, 255]), - 17: - dict( - link=('Offhindfetlock', 'Offhindfoot'), - id=17, - color=[255, 51, 255]) - }, - joint_weights=[1.] * 22, - sigmas=[]) +dataset_info = dict( + dataset_name='horse10', + paper_info=dict( + author='Mathis, Alexander and Biasi, Thomas and ' + 'Schneider, Steffen and ' + 'Yuksekgonul, Mert and Rogers, Byron and ' + 'Bethge, Matthias and ' + 'Mathis, Mackenzie W', + title='Pretraining boosts out-of-domain robustness ' + 'for pose estimation', + container='Proceedings of the IEEE/CVF Winter Conference on ' + 'Applications of Computer Vision', + year='2021', + homepage='http://www.mackenziemathislab.org/horse10', + ), + keypoint_info={ + 0: + dict(name='Nose', id=0, color=[255, 153, 255], type='upper', swap=''), + 1: + dict(name='Eye', id=1, color=[255, 153, 255], type='upper', swap=''), + 2: + dict( + name='Nearknee', + id=2, + color=[255, 102, 255], + type='upper', + swap=''), + 3: + dict( + name='Nearfrontfetlock', + id=3, + color=[255, 102, 255], + type='upper', + swap=''), + 4: + dict( + name='Nearfrontfoot', + id=4, + color=[255, 102, 255], + type='upper', + swap=''), + 5: + dict( + name='Offknee', id=5, color=[255, 102, 255], type='upper', + swap=''), + 6: + dict( + name='Offfrontfetlock', + id=6, + color=[255, 102, 255], + type='upper', + swap=''), + 7: + dict( + name='Offfrontfoot', + id=7, + color=[255, 102, 255], + type='upper', + swap=''), + 8: + dict( + name='Shoulder', + id=8, + color=[255, 153, 255], + type='upper', + swap=''), + 9: + dict( + name='Midshoulder', + id=9, + color=[255, 153, 255], + type='upper', + swap=''), + 10: + dict( + name='Elbow', id=10, color=[255, 153, 255], type='upper', swap=''), + 11: + dict( + name='Girth', id=11, color=[255, 153, 255], type='upper', swap=''), + 12: + dict( + name='Wither', id=12, color=[255, 153, 255], type='upper', + swap=''), + 13: + dict( + name='Nearhindhock', + id=13, + color=[255, 51, 255], + type='lower', + swap=''), + 14: + dict( + name='Nearhindfetlock', + id=14, + color=[255, 51, 255], + type='lower', + swap=''), + 15: + dict( + name='Nearhindfoot', + id=15, + color=[255, 51, 255], + type='lower', + swap=''), + 16: + dict(name='Hip', id=16, color=[255, 153, 255], type='lower', swap=''), + 17: + dict( + name='Stifle', id=17, color=[255, 153, 255], type='lower', + swap=''), + 18: + dict( + name='Offhindhock', + id=18, + color=[255, 51, 255], + type='lower', + swap=''), + 19: + dict( + name='Offhindfetlock', + id=19, + color=[255, 51, 255], + type='lower', + swap=''), + 20: + dict( + name='Offhindfoot', + id=20, + color=[255, 51, 255], + type='lower', + swap=''), + 21: + dict( + name='Ischium', + id=21, + color=[255, 153, 255], + type='lower', + swap='') + }, + skeleton_info={ + 0: + dict(link=('Nose', 'Eye'), id=0, color=[255, 153, 255]), + 1: + dict(link=('Eye', 'Wither'), id=1, color=[255, 153, 255]), + 2: + dict(link=('Wither', 'Hip'), id=2, color=[255, 153, 255]), + 3: + dict(link=('Hip', 'Ischium'), id=3, color=[255, 153, 255]), + 4: + dict(link=('Ischium', 'Stifle'), id=4, color=[255, 153, 255]), + 5: + dict(link=('Stifle', 'Girth'), id=5, color=[255, 153, 255]), + 6: + dict(link=('Girth', 'Elbow'), id=6, color=[255, 153, 255]), + 7: + dict(link=('Elbow', 'Shoulder'), id=7, color=[255, 153, 255]), + 8: + dict(link=('Shoulder', 'Midshoulder'), id=8, color=[255, 153, 255]), + 9: + dict(link=('Midshoulder', 'Wither'), id=9, color=[255, 153, 255]), + 10: + dict( + link=('Nearknee', 'Nearfrontfetlock'), + id=10, + color=[255, 102, 255]), + 11: + dict( + link=('Nearfrontfetlock', 'Nearfrontfoot'), + id=11, + color=[255, 102, 255]), + 12: + dict( + link=('Offknee', 'Offfrontfetlock'), id=12, color=[255, 102, 255]), + 13: + dict( + link=('Offfrontfetlock', 'Offfrontfoot'), + id=13, + color=[255, 102, 255]), + 14: + dict( + link=('Nearhindhock', 'Nearhindfetlock'), + id=14, + color=[255, 51, 255]), + 15: + dict( + link=('Nearhindfetlock', 'Nearhindfoot'), + id=15, + color=[255, 51, 255]), + 16: + dict( + link=('Offhindhock', 'Offhindfetlock'), + id=16, + color=[255, 51, 255]), + 17: + dict( + link=('Offhindfetlock', 'Offhindfoot'), + id=17, + color=[255, 51, 255]) + }, + joint_weights=[1.] * 22, + sigmas=[]) diff --git a/configs/_base_/datasets/humanart.py b/configs/_base_/datasets/humanart.py index b549269b69..8054926de0 100644 --- a/configs/_base_/datasets/humanart.py +++ b/configs/_base_/datasets/humanart.py @@ -1,181 +1,181 @@ -dataset_info = dict( - dataset_name='Human-Art', - paper_info=dict( - author='Ju, Xuan and Zeng, Ailing and ' - 'Wang, Jianan and Xu, Qiang and Zhang, Lei', - title='Human-Art: A Versatile Human-Centric Dataset ' - 'Bridging Natural and Artificial Scenes', - container='Proceedings of the IEEE/CVF Conference on ' - 'Computer Vision and Pattern Recognition', - year='2023', - homepage='https://idea-research.github.io/HumanArt/', - ), - keypoint_info={ - 0: - dict(name='nose', id=0, color=[51, 153, 255], type='upper', swap=''), - 1: - dict( - name='left_eye', - id=1, - color=[51, 153, 255], - type='upper', - swap='right_eye'), - 2: - dict( - name='right_eye', - id=2, - color=[51, 153, 255], - type='upper', - swap='left_eye'), - 3: - dict( - name='left_ear', - id=3, - color=[51, 153, 255], - type='upper', - swap='right_ear'), - 4: - dict( - name='right_ear', - id=4, - color=[51, 153, 255], - type='upper', - swap='left_ear'), - 5: - dict( - name='left_shoulder', - id=5, - color=[0, 255, 0], - type='upper', - swap='right_shoulder'), - 6: - dict( - name='right_shoulder', - id=6, - color=[255, 128, 0], - type='upper', - swap='left_shoulder'), - 7: - dict( - name='left_elbow', - id=7, - color=[0, 255, 0], - type='upper', - swap='right_elbow'), - 8: - dict( - name='right_elbow', - id=8, - color=[255, 128, 0], - type='upper', - swap='left_elbow'), - 9: - dict( - name='left_wrist', - id=9, - color=[0, 255, 0], - type='upper', - swap='right_wrist'), - 10: - dict( - name='right_wrist', - id=10, - color=[255, 128, 0], - type='upper', - swap='left_wrist'), - 11: - dict( - name='left_hip', - id=11, - color=[0, 255, 0], - type='lower', - swap='right_hip'), - 12: - dict( - name='right_hip', - id=12, - color=[255, 128, 0], - type='lower', - swap='left_hip'), - 13: - dict( - name='left_knee', - id=13, - color=[0, 255, 0], - type='lower', - swap='right_knee'), - 14: - dict( - name='right_knee', - id=14, - color=[255, 128, 0], - type='lower', - swap='left_knee'), - 15: - dict( - name='left_ankle', - id=15, - color=[0, 255, 0], - type='lower', - swap='right_ankle'), - 16: - dict( - name='right_ankle', - id=16, - color=[255, 128, 0], - type='lower', - swap='left_ankle') - }, - skeleton_info={ - 0: - dict(link=('left_ankle', 'left_knee'), id=0, color=[0, 255, 0]), - 1: - dict(link=('left_knee', 'left_hip'), id=1, color=[0, 255, 0]), - 2: - dict(link=('right_ankle', 'right_knee'), id=2, color=[255, 128, 0]), - 3: - dict(link=('right_knee', 'right_hip'), id=3, color=[255, 128, 0]), - 4: - dict(link=('left_hip', 'right_hip'), id=4, color=[51, 153, 255]), - 5: - dict(link=('left_shoulder', 'left_hip'), id=5, color=[51, 153, 255]), - 6: - dict(link=('right_shoulder', 'right_hip'), id=6, color=[51, 153, 255]), - 7: - dict( - link=('left_shoulder', 'right_shoulder'), - id=7, - color=[51, 153, 255]), - 8: - dict(link=('left_shoulder', 'left_elbow'), id=8, color=[0, 255, 0]), - 9: - dict( - link=('right_shoulder', 'right_elbow'), id=9, color=[255, 128, 0]), - 10: - dict(link=('left_elbow', 'left_wrist'), id=10, color=[0, 255, 0]), - 11: - dict(link=('right_elbow', 'right_wrist'), id=11, color=[255, 128, 0]), - 12: - dict(link=('left_eye', 'right_eye'), id=12, color=[51, 153, 255]), - 13: - dict(link=('nose', 'left_eye'), id=13, color=[51, 153, 255]), - 14: - dict(link=('nose', 'right_eye'), id=14, color=[51, 153, 255]), - 15: - dict(link=('left_eye', 'left_ear'), id=15, color=[51, 153, 255]), - 16: - dict(link=('right_eye', 'right_ear'), id=16, color=[51, 153, 255]), - 17: - dict(link=('left_ear', 'left_shoulder'), id=17, color=[51, 153, 255]), - 18: - dict( - link=('right_ear', 'right_shoulder'), id=18, color=[51, 153, 255]) - }, - joint_weights=[ - 1., 1., 1., 1., 1., 1., 1., 1.2, 1.2, 1.5, 1.5, 1., 1., 1.2, 1.2, 1.5, - 1.5 - ], - sigmas=[ - 0.026, 0.025, 0.025, 0.035, 0.035, 0.079, 0.079, 0.072, 0.072, 0.062, - 0.062, 0.107, 0.107, 0.087, 0.087, 0.089, 0.089 - ]) +dataset_info = dict( + dataset_name='Human-Art', + paper_info=dict( + author='Ju, Xuan and Zeng, Ailing and ' + 'Wang, Jianan and Xu, Qiang and Zhang, Lei', + title='Human-Art: A Versatile Human-Centric Dataset ' + 'Bridging Natural and Artificial Scenes', + container='Proceedings of the IEEE/CVF Conference on ' + 'Computer Vision and Pattern Recognition', + year='2023', + homepage='https://idea-research.github.io/HumanArt/', + ), + keypoint_info={ + 0: + dict(name='nose', id=0, color=[51, 153, 255], type='upper', swap=''), + 1: + dict( + name='left_eye', + id=1, + color=[51, 153, 255], + type='upper', + swap='right_eye'), + 2: + dict( + name='right_eye', + id=2, + color=[51, 153, 255], + type='upper', + swap='left_eye'), + 3: + dict( + name='left_ear', + id=3, + color=[51, 153, 255], + type='upper', + swap='right_ear'), + 4: + dict( + name='right_ear', + id=4, + color=[51, 153, 255], + type='upper', + swap='left_ear'), + 5: + dict( + name='left_shoulder', + id=5, + color=[0, 255, 0], + type='upper', + swap='right_shoulder'), + 6: + dict( + name='right_shoulder', + id=6, + color=[255, 128, 0], + type='upper', + swap='left_shoulder'), + 7: + dict( + name='left_elbow', + id=7, + color=[0, 255, 0], + type='upper', + swap='right_elbow'), + 8: + dict( + name='right_elbow', + id=8, + color=[255, 128, 0], + type='upper', + swap='left_elbow'), + 9: + dict( + name='left_wrist', + id=9, + color=[0, 255, 0], + type='upper', + swap='right_wrist'), + 10: + dict( + name='right_wrist', + id=10, + color=[255, 128, 0], + type='upper', + swap='left_wrist'), + 11: + dict( + name='left_hip', + id=11, + color=[0, 255, 0], + type='lower', + swap='right_hip'), + 12: + dict( + name='right_hip', + id=12, + color=[255, 128, 0], + type='lower', + swap='left_hip'), + 13: + dict( + name='left_knee', + id=13, + color=[0, 255, 0], + type='lower', + swap='right_knee'), + 14: + dict( + name='right_knee', + id=14, + color=[255, 128, 0], + type='lower', + swap='left_knee'), + 15: + dict( + name='left_ankle', + id=15, + color=[0, 255, 0], + type='lower', + swap='right_ankle'), + 16: + dict( + name='right_ankle', + id=16, + color=[255, 128, 0], + type='lower', + swap='left_ankle') + }, + skeleton_info={ + 0: + dict(link=('left_ankle', 'left_knee'), id=0, color=[0, 255, 0]), + 1: + dict(link=('left_knee', 'left_hip'), id=1, color=[0, 255, 0]), + 2: + dict(link=('right_ankle', 'right_knee'), id=2, color=[255, 128, 0]), + 3: + dict(link=('right_knee', 'right_hip'), id=3, color=[255, 128, 0]), + 4: + dict(link=('left_hip', 'right_hip'), id=4, color=[51, 153, 255]), + 5: + dict(link=('left_shoulder', 'left_hip'), id=5, color=[51, 153, 255]), + 6: + dict(link=('right_shoulder', 'right_hip'), id=6, color=[51, 153, 255]), + 7: + dict( + link=('left_shoulder', 'right_shoulder'), + id=7, + color=[51, 153, 255]), + 8: + dict(link=('left_shoulder', 'left_elbow'), id=8, color=[0, 255, 0]), + 9: + dict( + link=('right_shoulder', 'right_elbow'), id=9, color=[255, 128, 0]), + 10: + dict(link=('left_elbow', 'left_wrist'), id=10, color=[0, 255, 0]), + 11: + dict(link=('right_elbow', 'right_wrist'), id=11, color=[255, 128, 0]), + 12: + dict(link=('left_eye', 'right_eye'), id=12, color=[51, 153, 255]), + 13: + dict(link=('nose', 'left_eye'), id=13, color=[51, 153, 255]), + 14: + dict(link=('nose', 'right_eye'), id=14, color=[51, 153, 255]), + 15: + dict(link=('left_eye', 'left_ear'), id=15, color=[51, 153, 255]), + 16: + dict(link=('right_eye', 'right_ear'), id=16, color=[51, 153, 255]), + 17: + dict(link=('left_ear', 'left_shoulder'), id=17, color=[51, 153, 255]), + 18: + dict( + link=('right_ear', 'right_shoulder'), id=18, color=[51, 153, 255]) + }, + joint_weights=[ + 1., 1., 1., 1., 1., 1., 1., 1.2, 1.2, 1.5, 1.5, 1., 1., 1.2, 1.2, 1.5, + 1.5 + ], + sigmas=[ + 0.026, 0.025, 0.025, 0.035, 0.035, 0.079, 0.079, 0.072, 0.072, 0.062, + 0.062, 0.107, 0.107, 0.087, 0.087, 0.089, 0.089 + ]) diff --git a/configs/_base_/datasets/humanart_aic.py b/configs/_base_/datasets/humanart_aic.py index e999427536..573e9a05f8 100644 --- a/configs/_base_/datasets/humanart_aic.py +++ b/configs/_base_/datasets/humanart_aic.py @@ -1,205 +1,205 @@ -dataset_info = dict( - dataset_name='humanart', - paper_info=[ - dict( - author='Ju, Xuan and Zeng, Ailing and ' - 'Wang, Jianan and Xu, Qiang and Zhang, ' - 'Lei', - title='Human-Art: A Versatile Human-Centric Dataset ' - 'Bridging Natural and Artificial Scenes', - container='CVPR', - year='2023', - homepage='https://idea-research.github.io/HumanArt/', - ), - dict( - author='Wu, Jiahong and Zheng, He and Zhao, Bo and ' - 'Li, Yixin and Yan, Baoming and Liang, Rui and ' - 'Wang, Wenjia and Zhou, Shipei and Lin, Guosen and ' - 'Fu, Yanwei and others', - title='Ai challenger: A large-scale dataset for going ' - 'deeper in image understanding', - container='arXiv', - year='2017', - homepage='https://github.com/AIChallenger/AI_Challenger_2017', - ), - ], - keypoint_info={ - 0: - dict(name='nose', id=0, color=[51, 153, 255], type='upper', swap=''), - 1: - dict( - name='left_eye', - id=1, - color=[51, 153, 255], - type='upper', - swap='right_eye'), - 2: - dict( - name='right_eye', - id=2, - color=[51, 153, 255], - type='upper', - swap='left_eye'), - 3: - dict( - name='left_ear', - id=3, - color=[51, 153, 255], - type='upper', - swap='right_ear'), - 4: - dict( - name='right_ear', - id=4, - color=[51, 153, 255], - type='upper', - swap='left_ear'), - 5: - dict( - name='left_shoulder', - id=5, - color=[0, 255, 0], - type='upper', - swap='right_shoulder'), - 6: - dict( - name='right_shoulder', - id=6, - color=[255, 128, 0], - type='upper', - swap='left_shoulder'), - 7: - dict( - name='left_elbow', - id=7, - color=[0, 255, 0], - type='upper', - swap='right_elbow'), - 8: - dict( - name='right_elbow', - id=8, - color=[255, 128, 0], - type='upper', - swap='left_elbow'), - 9: - dict( - name='left_wrist', - id=9, - color=[0, 255, 0], - type='upper', - swap='right_wrist'), - 10: - dict( - name='right_wrist', - id=10, - color=[255, 128, 0], - type='upper', - swap='left_wrist'), - 11: - dict( - name='left_hip', - id=11, - color=[0, 255, 0], - type='lower', - swap='right_hip'), - 12: - dict( - name='right_hip', - id=12, - color=[255, 128, 0], - type='lower', - swap='left_hip'), - 13: - dict( - name='left_knee', - id=13, - color=[0, 255, 0], - type='lower', - swap='right_knee'), - 14: - dict( - name='right_knee', - id=14, - color=[255, 128, 0], - type='lower', - swap='left_knee'), - 15: - dict( - name='left_ankle', - id=15, - color=[0, 255, 0], - type='lower', - swap='right_ankle'), - 16: - dict( - name='right_ankle', - id=16, - color=[255, 128, 0], - type='lower', - swap='left_ankle'), - 17: - dict( - name='head_top', - id=17, - color=[51, 153, 255], - type='upper', - swap=''), - 18: - dict(name='neck', id=18, color=[51, 153, 255], type='upper', swap='') - }, - skeleton_info={ - 0: - dict(link=('left_ankle', 'left_knee'), id=0, color=[0, 255, 0]), - 1: - dict(link=('left_knee', 'left_hip'), id=1, color=[0, 255, 0]), - 2: - dict(link=('right_ankle', 'right_knee'), id=2, color=[255, 128, 0]), - 3: - dict(link=('right_knee', 'right_hip'), id=3, color=[255, 128, 0]), - 4: - dict(link=('left_hip', 'right_hip'), id=4, color=[51, 153, 255]), - 5: - dict(link=('left_shoulder', 'left_hip'), id=5, color=[51, 153, 255]), - 6: - dict(link=('right_shoulder', 'right_hip'), id=6, color=[51, 153, 255]), - 7: - dict( - link=('left_shoulder', 'right_shoulder'), - id=7, - color=[51, 153, 255]), - 8: - dict(link=('left_shoulder', 'left_elbow'), id=8, color=[0, 255, 0]), - 9: - dict( - link=('right_shoulder', 'right_elbow'), id=9, color=[255, 128, 0]), - 10: - dict(link=('left_elbow', 'left_wrist'), id=10, color=[0, 255, 0]), - 11: - dict(link=('right_elbow', 'right_wrist'), id=11, color=[255, 128, 0]), - 12: - dict(link=('left_eye', 'right_eye'), id=12, color=[51, 153, 255]), - 13: - dict(link=('nose', 'left_eye'), id=13, color=[51, 153, 255]), - 14: - dict(link=('nose', 'right_eye'), id=14, color=[51, 153, 255]), - 15: - dict(link=('left_eye', 'left_ear'), id=15, color=[51, 153, 255]), - 16: - dict(link=('right_eye', 'right_ear'), id=16, color=[51, 153, 255]), - 17: - dict(link=('left_ear', 'left_shoulder'), id=17, color=[51, 153, 255]), - 18: - dict( - link=('right_ear', 'right_shoulder'), id=18, color=[51, 153, 255]), - 19: - dict(link=('head_top', 'neck'), id=11, color=[51, 153, 255]), - }, - joint_weights=[ - 1., 1., 1., 1., 1., 1., 1., 1.2, 1.2, 1.5, 1.5, 1., 1., 1.2, 1.2, 1.5, - 1.5, 1.5 - ], - sigmas=[ - 0.026, 0.025, 0.025, 0.035, 0.035, 0.079, 0.079, 0.072, 0.072, 0.062, - 0.062, 0.107, 0.107, 0.087, 0.087, 0.089, 0.089, 0.026, 0.026 - ]) +dataset_info = dict( + dataset_name='humanart', + paper_info=[ + dict( + author='Ju, Xuan and Zeng, Ailing and ' + 'Wang, Jianan and Xu, Qiang and Zhang, ' + 'Lei', + title='Human-Art: A Versatile Human-Centric Dataset ' + 'Bridging Natural and Artificial Scenes', + container='CVPR', + year='2023', + homepage='https://idea-research.github.io/HumanArt/', + ), + dict( + author='Wu, Jiahong and Zheng, He and Zhao, Bo and ' + 'Li, Yixin and Yan, Baoming and Liang, Rui and ' + 'Wang, Wenjia and Zhou, Shipei and Lin, Guosen and ' + 'Fu, Yanwei and others', + title='Ai challenger: A large-scale dataset for going ' + 'deeper in image understanding', + container='arXiv', + year='2017', + homepage='https://github.com/AIChallenger/AI_Challenger_2017', + ), + ], + keypoint_info={ + 0: + dict(name='nose', id=0, color=[51, 153, 255], type='upper', swap=''), + 1: + dict( + name='left_eye', + id=1, + color=[51, 153, 255], + type='upper', + swap='right_eye'), + 2: + dict( + name='right_eye', + id=2, + color=[51, 153, 255], + type='upper', + swap='left_eye'), + 3: + dict( + name='left_ear', + id=3, + color=[51, 153, 255], + type='upper', + swap='right_ear'), + 4: + dict( + name='right_ear', + id=4, + color=[51, 153, 255], + type='upper', + swap='left_ear'), + 5: + dict( + name='left_shoulder', + id=5, + color=[0, 255, 0], + type='upper', + swap='right_shoulder'), + 6: + dict( + name='right_shoulder', + id=6, + color=[255, 128, 0], + type='upper', + swap='left_shoulder'), + 7: + dict( + name='left_elbow', + id=7, + color=[0, 255, 0], + type='upper', + swap='right_elbow'), + 8: + dict( + name='right_elbow', + id=8, + color=[255, 128, 0], + type='upper', + swap='left_elbow'), + 9: + dict( + name='left_wrist', + id=9, + color=[0, 255, 0], + type='upper', + swap='right_wrist'), + 10: + dict( + name='right_wrist', + id=10, + color=[255, 128, 0], + type='upper', + swap='left_wrist'), + 11: + dict( + name='left_hip', + id=11, + color=[0, 255, 0], + type='lower', + swap='right_hip'), + 12: + dict( + name='right_hip', + id=12, + color=[255, 128, 0], + type='lower', + swap='left_hip'), + 13: + dict( + name='left_knee', + id=13, + color=[0, 255, 0], + type='lower', + swap='right_knee'), + 14: + dict( + name='right_knee', + id=14, + color=[255, 128, 0], + type='lower', + swap='left_knee'), + 15: + dict( + name='left_ankle', + id=15, + color=[0, 255, 0], + type='lower', + swap='right_ankle'), + 16: + dict( + name='right_ankle', + id=16, + color=[255, 128, 0], + type='lower', + swap='left_ankle'), + 17: + dict( + name='head_top', + id=17, + color=[51, 153, 255], + type='upper', + swap=''), + 18: + dict(name='neck', id=18, color=[51, 153, 255], type='upper', swap='') + }, + skeleton_info={ + 0: + dict(link=('left_ankle', 'left_knee'), id=0, color=[0, 255, 0]), + 1: + dict(link=('left_knee', 'left_hip'), id=1, color=[0, 255, 0]), + 2: + dict(link=('right_ankle', 'right_knee'), id=2, color=[255, 128, 0]), + 3: + dict(link=('right_knee', 'right_hip'), id=3, color=[255, 128, 0]), + 4: + dict(link=('left_hip', 'right_hip'), id=4, color=[51, 153, 255]), + 5: + dict(link=('left_shoulder', 'left_hip'), id=5, color=[51, 153, 255]), + 6: + dict(link=('right_shoulder', 'right_hip'), id=6, color=[51, 153, 255]), + 7: + dict( + link=('left_shoulder', 'right_shoulder'), + id=7, + color=[51, 153, 255]), + 8: + dict(link=('left_shoulder', 'left_elbow'), id=8, color=[0, 255, 0]), + 9: + dict( + link=('right_shoulder', 'right_elbow'), id=9, color=[255, 128, 0]), + 10: + dict(link=('left_elbow', 'left_wrist'), id=10, color=[0, 255, 0]), + 11: + dict(link=('right_elbow', 'right_wrist'), id=11, color=[255, 128, 0]), + 12: + dict(link=('left_eye', 'right_eye'), id=12, color=[51, 153, 255]), + 13: + dict(link=('nose', 'left_eye'), id=13, color=[51, 153, 255]), + 14: + dict(link=('nose', 'right_eye'), id=14, color=[51, 153, 255]), + 15: + dict(link=('left_eye', 'left_ear'), id=15, color=[51, 153, 255]), + 16: + dict(link=('right_eye', 'right_ear'), id=16, color=[51, 153, 255]), + 17: + dict(link=('left_ear', 'left_shoulder'), id=17, color=[51, 153, 255]), + 18: + dict( + link=('right_ear', 'right_shoulder'), id=18, color=[51, 153, 255]), + 19: + dict(link=('head_top', 'neck'), id=11, color=[51, 153, 255]), + }, + joint_weights=[ + 1., 1., 1., 1., 1., 1., 1., 1.2, 1.2, 1.5, 1.5, 1., 1., 1.2, 1.2, 1.5, + 1.5, 1.5 + ], + sigmas=[ + 0.026, 0.025, 0.025, 0.035, 0.035, 0.079, 0.079, 0.072, 0.072, 0.062, + 0.062, 0.107, 0.107, 0.087, 0.087, 0.089, 0.089, 0.026, 0.026 + ]) diff --git a/configs/_base_/datasets/interhand2d.py b/configs/_base_/datasets/interhand2d.py index 0134f07de5..e60dfc795a 100644 --- a/configs/_base_/datasets/interhand2d.py +++ b/configs/_base_/datasets/interhand2d.py @@ -1,142 +1,142 @@ -dataset_info = dict( - dataset_name='interhand2d', - paper_info=dict( - author='Moon, Gyeongsik and Yu, Shoou-I and Wen, He and ' - 'Shiratori, Takaaki and Lee, Kyoung Mu', - title='InterHand2.6M: A dataset and baseline for 3D ' - 'interacting hand pose estimation from a single RGB image', - container='arXiv', - year='2020', - homepage='https://mks0601.github.io/InterHand2.6M/', - ), - keypoint_info={ - 0: - dict(name='thumb4', id=0, color=[255, 128, 0], type='', swap=''), - 1: - dict(name='thumb3', id=1, color=[255, 128, 0], type='', swap=''), - 2: - dict(name='thumb2', id=2, color=[255, 128, 0], type='', swap=''), - 3: - dict(name='thumb1', id=3, color=[255, 128, 0], type='', swap=''), - 4: - dict( - name='forefinger4', id=4, color=[255, 153, 255], type='', swap=''), - 5: - dict( - name='forefinger3', id=5, color=[255, 153, 255], type='', swap=''), - 6: - dict( - name='forefinger2', id=6, color=[255, 153, 255], type='', swap=''), - 7: - dict( - name='forefinger1', id=7, color=[255, 153, 255], type='', swap=''), - 8: - dict( - name='middle_finger4', - id=8, - color=[102, 178, 255], - type='', - swap=''), - 9: - dict( - name='middle_finger3', - id=9, - color=[102, 178, 255], - type='', - swap=''), - 10: - dict( - name='middle_finger2', - id=10, - color=[102, 178, 255], - type='', - swap=''), - 11: - dict( - name='middle_finger1', - id=11, - color=[102, 178, 255], - type='', - swap=''), - 12: - dict( - name='ring_finger4', id=12, color=[255, 51, 51], type='', swap=''), - 13: - dict( - name='ring_finger3', id=13, color=[255, 51, 51], type='', swap=''), - 14: - dict( - name='ring_finger2', id=14, color=[255, 51, 51], type='', swap=''), - 15: - dict( - name='ring_finger1', id=15, color=[255, 51, 51], type='', swap=''), - 16: - dict(name='pinky_finger4', id=16, color=[0, 255, 0], type='', swap=''), - 17: - dict(name='pinky_finger3', id=17, color=[0, 255, 0], type='', swap=''), - 18: - dict(name='pinky_finger2', id=18, color=[0, 255, 0], type='', swap=''), - 19: - dict(name='pinky_finger1', id=19, color=[0, 255, 0], type='', swap=''), - 20: - dict(name='wrist', id=20, color=[255, 255, 255], type='', swap='') - }, - skeleton_info={ - 0: - dict(link=('wrist', 'thumb1'), id=0, color=[255, 128, 0]), - 1: - dict(link=('thumb1', 'thumb2'), id=1, color=[255, 128, 0]), - 2: - dict(link=('thumb2', 'thumb3'), id=2, color=[255, 128, 0]), - 3: - dict(link=('thumb3', 'thumb4'), id=3, color=[255, 128, 0]), - 4: - dict(link=('wrist', 'forefinger1'), id=4, color=[255, 153, 255]), - 5: - dict(link=('forefinger1', 'forefinger2'), id=5, color=[255, 153, 255]), - 6: - dict(link=('forefinger2', 'forefinger3'), id=6, color=[255, 153, 255]), - 7: - dict(link=('forefinger3', 'forefinger4'), id=7, color=[255, 153, 255]), - 8: - dict(link=('wrist', 'middle_finger1'), id=8, color=[102, 178, 255]), - 9: - dict( - link=('middle_finger1', 'middle_finger2'), - id=9, - color=[102, 178, 255]), - 10: - dict( - link=('middle_finger2', 'middle_finger3'), - id=10, - color=[102, 178, 255]), - 11: - dict( - link=('middle_finger3', 'middle_finger4'), - id=11, - color=[102, 178, 255]), - 12: - dict(link=('wrist', 'ring_finger1'), id=12, color=[255, 51, 51]), - 13: - dict( - link=('ring_finger1', 'ring_finger2'), id=13, color=[255, 51, 51]), - 14: - dict( - link=('ring_finger2', 'ring_finger3'), id=14, color=[255, 51, 51]), - 15: - dict( - link=('ring_finger3', 'ring_finger4'), id=15, color=[255, 51, 51]), - 16: - dict(link=('wrist', 'pinky_finger1'), id=16, color=[0, 255, 0]), - 17: - dict( - link=('pinky_finger1', 'pinky_finger2'), id=17, color=[0, 255, 0]), - 18: - dict( - link=('pinky_finger2', 'pinky_finger3'), id=18, color=[0, 255, 0]), - 19: - dict( - link=('pinky_finger3', 'pinky_finger4'), id=19, color=[0, 255, 0]) - }, - joint_weights=[1.] * 21, - sigmas=[]) +dataset_info = dict( + dataset_name='interhand2d', + paper_info=dict( + author='Moon, Gyeongsik and Yu, Shoou-I and Wen, He and ' + 'Shiratori, Takaaki and Lee, Kyoung Mu', + title='InterHand2.6M: A dataset and baseline for 3D ' + 'interacting hand pose estimation from a single RGB image', + container='arXiv', + year='2020', + homepage='https://mks0601.github.io/InterHand2.6M/', + ), + keypoint_info={ + 0: + dict(name='thumb4', id=0, color=[255, 128, 0], type='', swap=''), + 1: + dict(name='thumb3', id=1, color=[255, 128, 0], type='', swap=''), + 2: + dict(name='thumb2', id=2, color=[255, 128, 0], type='', swap=''), + 3: + dict(name='thumb1', id=3, color=[255, 128, 0], type='', swap=''), + 4: + dict( + name='forefinger4', id=4, color=[255, 153, 255], type='', swap=''), + 5: + dict( + name='forefinger3', id=5, color=[255, 153, 255], type='', swap=''), + 6: + dict( + name='forefinger2', id=6, color=[255, 153, 255], type='', swap=''), + 7: + dict( + name='forefinger1', id=7, color=[255, 153, 255], type='', swap=''), + 8: + dict( + name='middle_finger4', + id=8, + color=[102, 178, 255], + type='', + swap=''), + 9: + dict( + name='middle_finger3', + id=9, + color=[102, 178, 255], + type='', + swap=''), + 10: + dict( + name='middle_finger2', + id=10, + color=[102, 178, 255], + type='', + swap=''), + 11: + dict( + name='middle_finger1', + id=11, + color=[102, 178, 255], + type='', + swap=''), + 12: + dict( + name='ring_finger4', id=12, color=[255, 51, 51], type='', swap=''), + 13: + dict( + name='ring_finger3', id=13, color=[255, 51, 51], type='', swap=''), + 14: + dict( + name='ring_finger2', id=14, color=[255, 51, 51], type='', swap=''), + 15: + dict( + name='ring_finger1', id=15, color=[255, 51, 51], type='', swap=''), + 16: + dict(name='pinky_finger4', id=16, color=[0, 255, 0], type='', swap=''), + 17: + dict(name='pinky_finger3', id=17, color=[0, 255, 0], type='', swap=''), + 18: + dict(name='pinky_finger2', id=18, color=[0, 255, 0], type='', swap=''), + 19: + dict(name='pinky_finger1', id=19, color=[0, 255, 0], type='', swap=''), + 20: + dict(name='wrist', id=20, color=[255, 255, 255], type='', swap='') + }, + skeleton_info={ + 0: + dict(link=('wrist', 'thumb1'), id=0, color=[255, 128, 0]), + 1: + dict(link=('thumb1', 'thumb2'), id=1, color=[255, 128, 0]), + 2: + dict(link=('thumb2', 'thumb3'), id=2, color=[255, 128, 0]), + 3: + dict(link=('thumb3', 'thumb4'), id=3, color=[255, 128, 0]), + 4: + dict(link=('wrist', 'forefinger1'), id=4, color=[255, 153, 255]), + 5: + dict(link=('forefinger1', 'forefinger2'), id=5, color=[255, 153, 255]), + 6: + dict(link=('forefinger2', 'forefinger3'), id=6, color=[255, 153, 255]), + 7: + dict(link=('forefinger3', 'forefinger4'), id=7, color=[255, 153, 255]), + 8: + dict(link=('wrist', 'middle_finger1'), id=8, color=[102, 178, 255]), + 9: + dict( + link=('middle_finger1', 'middle_finger2'), + id=9, + color=[102, 178, 255]), + 10: + dict( + link=('middle_finger2', 'middle_finger3'), + id=10, + color=[102, 178, 255]), + 11: + dict( + link=('middle_finger3', 'middle_finger4'), + id=11, + color=[102, 178, 255]), + 12: + dict(link=('wrist', 'ring_finger1'), id=12, color=[255, 51, 51]), + 13: + dict( + link=('ring_finger1', 'ring_finger2'), id=13, color=[255, 51, 51]), + 14: + dict( + link=('ring_finger2', 'ring_finger3'), id=14, color=[255, 51, 51]), + 15: + dict( + link=('ring_finger3', 'ring_finger4'), id=15, color=[255, 51, 51]), + 16: + dict(link=('wrist', 'pinky_finger1'), id=16, color=[0, 255, 0]), + 17: + dict( + link=('pinky_finger1', 'pinky_finger2'), id=17, color=[0, 255, 0]), + 18: + dict( + link=('pinky_finger2', 'pinky_finger3'), id=18, color=[0, 255, 0]), + 19: + dict( + link=('pinky_finger3', 'pinky_finger4'), id=19, color=[0, 255, 0]) + }, + joint_weights=[1.] * 21, + sigmas=[]) diff --git a/configs/_base_/datasets/interhand3d.py b/configs/_base_/datasets/interhand3d.py index e2bd8121c2..26b7ccf71a 100644 --- a/configs/_base_/datasets/interhand3d.py +++ b/configs/_base_/datasets/interhand3d.py @@ -1,487 +1,487 @@ -dataset_info = dict( - dataset_name='interhand3d', - paper_info=dict( - author='Moon, Gyeongsik and Yu, Shoou-I and Wen, He and ' - 'Shiratori, Takaaki and Lee, Kyoung Mu', - title='InterHand2.6M: A dataset and baseline for 3D ' - 'interacting hand pose estimation from a single RGB image', - container='arXiv', - year='2020', - homepage='https://mks0601.github.io/InterHand2.6M/', - ), - keypoint_info={ - 0: - dict( - name='right_thumb4', - id=0, - color=[255, 128, 0], - type='', - swap='left_thumb4'), - 1: - dict( - name='right_thumb3', - id=1, - color=[255, 128, 0], - type='', - swap='left_thumb3'), - 2: - dict( - name='right_thumb2', - id=2, - color=[255, 128, 0], - type='', - swap='left_thumb2'), - 3: - dict( - name='right_thumb1', - id=3, - color=[255, 128, 0], - type='', - swap='left_thumb1'), - 4: - dict( - name='right_forefinger4', - id=4, - color=[255, 153, 255], - type='', - swap='left_forefinger4'), - 5: - dict( - name='right_forefinger3', - id=5, - color=[255, 153, 255], - type='', - swap='left_forefinger3'), - 6: - dict( - name='right_forefinger2', - id=6, - color=[255, 153, 255], - type='', - swap='left_forefinger2'), - 7: - dict( - name='right_forefinger1', - id=7, - color=[255, 153, 255], - type='', - swap='left_forefinger1'), - 8: - dict( - name='right_middle_finger4', - id=8, - color=[102, 178, 255], - type='', - swap='left_middle_finger4'), - 9: - dict( - name='right_middle_finger3', - id=9, - color=[102, 178, 255], - type='', - swap='left_middle_finger3'), - 10: - dict( - name='right_middle_finger2', - id=10, - color=[102, 178, 255], - type='', - swap='left_middle_finger2'), - 11: - dict( - name='right_middle_finger1', - id=11, - color=[102, 178, 255], - type='', - swap='left_middle_finger1'), - 12: - dict( - name='right_ring_finger4', - id=12, - color=[255, 51, 51], - type='', - swap='left_ring_finger4'), - 13: - dict( - name='right_ring_finger3', - id=13, - color=[255, 51, 51], - type='', - swap='left_ring_finger3'), - 14: - dict( - name='right_ring_finger2', - id=14, - color=[255, 51, 51], - type='', - swap='left_ring_finger2'), - 15: - dict( - name='right_ring_finger1', - id=15, - color=[255, 51, 51], - type='', - swap='left_ring_finger1'), - 16: - dict( - name='right_pinky_finger4', - id=16, - color=[0, 255, 0], - type='', - swap='left_pinky_finger4'), - 17: - dict( - name='right_pinky_finger3', - id=17, - color=[0, 255, 0], - type='', - swap='left_pinky_finger3'), - 18: - dict( - name='right_pinky_finger2', - id=18, - color=[0, 255, 0], - type='', - swap='left_pinky_finger2'), - 19: - dict( - name='right_pinky_finger1', - id=19, - color=[0, 255, 0], - type='', - swap='left_pinky_finger1'), - 20: - dict( - name='right_wrist', - id=20, - color=[255, 255, 255], - type='', - swap='left_wrist'), - 21: - dict( - name='left_thumb4', - id=21, - color=[255, 128, 0], - type='', - swap='right_thumb4'), - 22: - dict( - name='left_thumb3', - id=22, - color=[255, 128, 0], - type='', - swap='right_thumb3'), - 23: - dict( - name='left_thumb2', - id=23, - color=[255, 128, 0], - type='', - swap='right_thumb2'), - 24: - dict( - name='left_thumb1', - id=24, - color=[255, 128, 0], - type='', - swap='right_thumb1'), - 25: - dict( - name='left_forefinger4', - id=25, - color=[255, 153, 255], - type='', - swap='right_forefinger4'), - 26: - dict( - name='left_forefinger3', - id=26, - color=[255, 153, 255], - type='', - swap='right_forefinger3'), - 27: - dict( - name='left_forefinger2', - id=27, - color=[255, 153, 255], - type='', - swap='right_forefinger2'), - 28: - dict( - name='left_forefinger1', - id=28, - color=[255, 153, 255], - type='', - swap='right_forefinger1'), - 29: - dict( - name='left_middle_finger4', - id=29, - color=[102, 178, 255], - type='', - swap='right_middle_finger4'), - 30: - dict( - name='left_middle_finger3', - id=30, - color=[102, 178, 255], - type='', - swap='right_middle_finger3'), - 31: - dict( - name='left_middle_finger2', - id=31, - color=[102, 178, 255], - type='', - swap='right_middle_finger2'), - 32: - dict( - name='left_middle_finger1', - id=32, - color=[102, 178, 255], - type='', - swap='right_middle_finger1'), - 33: - dict( - name='left_ring_finger4', - id=33, - color=[255, 51, 51], - type='', - swap='right_ring_finger4'), - 34: - dict( - name='left_ring_finger3', - id=34, - color=[255, 51, 51], - type='', - swap='right_ring_finger3'), - 35: - dict( - name='left_ring_finger2', - id=35, - color=[255, 51, 51], - type='', - swap='right_ring_finger2'), - 36: - dict( - name='left_ring_finger1', - id=36, - color=[255, 51, 51], - type='', - swap='right_ring_finger1'), - 37: - dict( - name='left_pinky_finger4', - id=37, - color=[0, 255, 0], - type='', - swap='right_pinky_finger4'), - 38: - dict( - name='left_pinky_finger3', - id=38, - color=[0, 255, 0], - type='', - swap='right_pinky_finger3'), - 39: - dict( - name='left_pinky_finger2', - id=39, - color=[0, 255, 0], - type='', - swap='right_pinky_finger2'), - 40: - dict( - name='left_pinky_finger1', - id=40, - color=[0, 255, 0], - type='', - swap='right_pinky_finger1'), - 41: - dict( - name='left_wrist', - id=41, - color=[255, 255, 255], - type='', - swap='right_wrist'), - }, - skeleton_info={ - 0: - dict(link=('right_wrist', 'right_thumb1'), id=0, color=[255, 128, 0]), - 1: - dict(link=('right_thumb1', 'right_thumb2'), id=1, color=[255, 128, 0]), - 2: - dict(link=('right_thumb2', 'right_thumb3'), id=2, color=[255, 128, 0]), - 3: - dict(link=('right_thumb3', 'right_thumb4'), id=3, color=[255, 128, 0]), - 4: - dict( - link=('right_wrist', 'right_forefinger1'), - id=4, - color=[255, 153, 255]), - 5: - dict( - link=('right_forefinger1', 'right_forefinger2'), - id=5, - color=[255, 153, 255]), - 6: - dict( - link=('right_forefinger2', 'right_forefinger3'), - id=6, - color=[255, 153, 255]), - 7: - dict( - link=('right_forefinger3', 'right_forefinger4'), - id=7, - color=[255, 153, 255]), - 8: - dict( - link=('right_wrist', 'right_middle_finger1'), - id=8, - color=[102, 178, 255]), - 9: - dict( - link=('right_middle_finger1', 'right_middle_finger2'), - id=9, - color=[102, 178, 255]), - 10: - dict( - link=('right_middle_finger2', 'right_middle_finger3'), - id=10, - color=[102, 178, 255]), - 11: - dict( - link=('right_middle_finger3', 'right_middle_finger4'), - id=11, - color=[102, 178, 255]), - 12: - dict( - link=('right_wrist', 'right_ring_finger1'), - id=12, - color=[255, 51, 51]), - 13: - dict( - link=('right_ring_finger1', 'right_ring_finger2'), - id=13, - color=[255, 51, 51]), - 14: - dict( - link=('right_ring_finger2', 'right_ring_finger3'), - id=14, - color=[255, 51, 51]), - 15: - dict( - link=('right_ring_finger3', 'right_ring_finger4'), - id=15, - color=[255, 51, 51]), - 16: - dict( - link=('right_wrist', 'right_pinky_finger1'), - id=16, - color=[0, 255, 0]), - 17: - dict( - link=('right_pinky_finger1', 'right_pinky_finger2'), - id=17, - color=[0, 255, 0]), - 18: - dict( - link=('right_pinky_finger2', 'right_pinky_finger3'), - id=18, - color=[0, 255, 0]), - 19: - dict( - link=('right_pinky_finger3', 'right_pinky_finger4'), - id=19, - color=[0, 255, 0]), - 20: - dict(link=('left_wrist', 'left_thumb1'), id=20, color=[255, 128, 0]), - 21: - dict(link=('left_thumb1', 'left_thumb2'), id=21, color=[255, 128, 0]), - 22: - dict(link=('left_thumb2', 'left_thumb3'), id=22, color=[255, 128, 0]), - 23: - dict(link=('left_thumb3', 'left_thumb4'), id=23, color=[255, 128, 0]), - 24: - dict( - link=('left_wrist', 'left_forefinger1'), - id=24, - color=[255, 153, 255]), - 25: - dict( - link=('left_forefinger1', 'left_forefinger2'), - id=25, - color=[255, 153, 255]), - 26: - dict( - link=('left_forefinger2', 'left_forefinger3'), - id=26, - color=[255, 153, 255]), - 27: - dict( - link=('left_forefinger3', 'left_forefinger4'), - id=27, - color=[255, 153, 255]), - 28: - dict( - link=('left_wrist', 'left_middle_finger1'), - id=28, - color=[102, 178, 255]), - 29: - dict( - link=('left_middle_finger1', 'left_middle_finger2'), - id=29, - color=[102, 178, 255]), - 30: - dict( - link=('left_middle_finger2', 'left_middle_finger3'), - id=30, - color=[102, 178, 255]), - 31: - dict( - link=('left_middle_finger3', 'left_middle_finger4'), - id=31, - color=[102, 178, 255]), - 32: - dict( - link=('left_wrist', 'left_ring_finger1'), - id=32, - color=[255, 51, 51]), - 33: - dict( - link=('left_ring_finger1', 'left_ring_finger2'), - id=33, - color=[255, 51, 51]), - 34: - dict( - link=('left_ring_finger2', 'left_ring_finger3'), - id=34, - color=[255, 51, 51]), - 35: - dict( - link=('left_ring_finger3', 'left_ring_finger4'), - id=35, - color=[255, 51, 51]), - 36: - dict( - link=('left_wrist', 'left_pinky_finger1'), - id=36, - color=[0, 255, 0]), - 37: - dict( - link=('left_pinky_finger1', 'left_pinky_finger2'), - id=37, - color=[0, 255, 0]), - 38: - dict( - link=('left_pinky_finger2', 'left_pinky_finger3'), - id=38, - color=[0, 255, 0]), - 39: - dict( - link=('left_pinky_finger3', 'left_pinky_finger4'), - id=39, - color=[0, 255, 0]), - }, - joint_weights=[1.] * 42, - sigmas=[]) +dataset_info = dict( + dataset_name='interhand3d', + paper_info=dict( + author='Moon, Gyeongsik and Yu, Shoou-I and Wen, He and ' + 'Shiratori, Takaaki and Lee, Kyoung Mu', + title='InterHand2.6M: A dataset and baseline for 3D ' + 'interacting hand pose estimation from a single RGB image', + container='arXiv', + year='2020', + homepage='https://mks0601.github.io/InterHand2.6M/', + ), + keypoint_info={ + 0: + dict( + name='right_thumb4', + id=0, + color=[255, 128, 0], + type='', + swap='left_thumb4'), + 1: + dict( + name='right_thumb3', + id=1, + color=[255, 128, 0], + type='', + swap='left_thumb3'), + 2: + dict( + name='right_thumb2', + id=2, + color=[255, 128, 0], + type='', + swap='left_thumb2'), + 3: + dict( + name='right_thumb1', + id=3, + color=[255, 128, 0], + type='', + swap='left_thumb1'), + 4: + dict( + name='right_forefinger4', + id=4, + color=[255, 153, 255], + type='', + swap='left_forefinger4'), + 5: + dict( + name='right_forefinger3', + id=5, + color=[255, 153, 255], + type='', + swap='left_forefinger3'), + 6: + dict( + name='right_forefinger2', + id=6, + color=[255, 153, 255], + type='', + swap='left_forefinger2'), + 7: + dict( + name='right_forefinger1', + id=7, + color=[255, 153, 255], + type='', + swap='left_forefinger1'), + 8: + dict( + name='right_middle_finger4', + id=8, + color=[102, 178, 255], + type='', + swap='left_middle_finger4'), + 9: + dict( + name='right_middle_finger3', + id=9, + color=[102, 178, 255], + type='', + swap='left_middle_finger3'), + 10: + dict( + name='right_middle_finger2', + id=10, + color=[102, 178, 255], + type='', + swap='left_middle_finger2'), + 11: + dict( + name='right_middle_finger1', + id=11, + color=[102, 178, 255], + type='', + swap='left_middle_finger1'), + 12: + dict( + name='right_ring_finger4', + id=12, + color=[255, 51, 51], + type='', + swap='left_ring_finger4'), + 13: + dict( + name='right_ring_finger3', + id=13, + color=[255, 51, 51], + type='', + swap='left_ring_finger3'), + 14: + dict( + name='right_ring_finger2', + id=14, + color=[255, 51, 51], + type='', + swap='left_ring_finger2'), + 15: + dict( + name='right_ring_finger1', + id=15, + color=[255, 51, 51], + type='', + swap='left_ring_finger1'), + 16: + dict( + name='right_pinky_finger4', + id=16, + color=[0, 255, 0], + type='', + swap='left_pinky_finger4'), + 17: + dict( + name='right_pinky_finger3', + id=17, + color=[0, 255, 0], + type='', + swap='left_pinky_finger3'), + 18: + dict( + name='right_pinky_finger2', + id=18, + color=[0, 255, 0], + type='', + swap='left_pinky_finger2'), + 19: + dict( + name='right_pinky_finger1', + id=19, + color=[0, 255, 0], + type='', + swap='left_pinky_finger1'), + 20: + dict( + name='right_wrist', + id=20, + color=[255, 255, 255], + type='', + swap='left_wrist'), + 21: + dict( + name='left_thumb4', + id=21, + color=[255, 128, 0], + type='', + swap='right_thumb4'), + 22: + dict( + name='left_thumb3', + id=22, + color=[255, 128, 0], + type='', + swap='right_thumb3'), + 23: + dict( + name='left_thumb2', + id=23, + color=[255, 128, 0], + type='', + swap='right_thumb2'), + 24: + dict( + name='left_thumb1', + id=24, + color=[255, 128, 0], + type='', + swap='right_thumb1'), + 25: + dict( + name='left_forefinger4', + id=25, + color=[255, 153, 255], + type='', + swap='right_forefinger4'), + 26: + dict( + name='left_forefinger3', + id=26, + color=[255, 153, 255], + type='', + swap='right_forefinger3'), + 27: + dict( + name='left_forefinger2', + id=27, + color=[255, 153, 255], + type='', + swap='right_forefinger2'), + 28: + dict( + name='left_forefinger1', + id=28, + color=[255, 153, 255], + type='', + swap='right_forefinger1'), + 29: + dict( + name='left_middle_finger4', + id=29, + color=[102, 178, 255], + type='', + swap='right_middle_finger4'), + 30: + dict( + name='left_middle_finger3', + id=30, + color=[102, 178, 255], + type='', + swap='right_middle_finger3'), + 31: + dict( + name='left_middle_finger2', + id=31, + color=[102, 178, 255], + type='', + swap='right_middle_finger2'), + 32: + dict( + name='left_middle_finger1', + id=32, + color=[102, 178, 255], + type='', + swap='right_middle_finger1'), + 33: + dict( + name='left_ring_finger4', + id=33, + color=[255, 51, 51], + type='', + swap='right_ring_finger4'), + 34: + dict( + name='left_ring_finger3', + id=34, + color=[255, 51, 51], + type='', + swap='right_ring_finger3'), + 35: + dict( + name='left_ring_finger2', + id=35, + color=[255, 51, 51], + type='', + swap='right_ring_finger2'), + 36: + dict( + name='left_ring_finger1', + id=36, + color=[255, 51, 51], + type='', + swap='right_ring_finger1'), + 37: + dict( + name='left_pinky_finger4', + id=37, + color=[0, 255, 0], + type='', + swap='right_pinky_finger4'), + 38: + dict( + name='left_pinky_finger3', + id=38, + color=[0, 255, 0], + type='', + swap='right_pinky_finger3'), + 39: + dict( + name='left_pinky_finger2', + id=39, + color=[0, 255, 0], + type='', + swap='right_pinky_finger2'), + 40: + dict( + name='left_pinky_finger1', + id=40, + color=[0, 255, 0], + type='', + swap='right_pinky_finger1'), + 41: + dict( + name='left_wrist', + id=41, + color=[255, 255, 255], + type='', + swap='right_wrist'), + }, + skeleton_info={ + 0: + dict(link=('right_wrist', 'right_thumb1'), id=0, color=[255, 128, 0]), + 1: + dict(link=('right_thumb1', 'right_thumb2'), id=1, color=[255, 128, 0]), + 2: + dict(link=('right_thumb2', 'right_thumb3'), id=2, color=[255, 128, 0]), + 3: + dict(link=('right_thumb3', 'right_thumb4'), id=3, color=[255, 128, 0]), + 4: + dict( + link=('right_wrist', 'right_forefinger1'), + id=4, + color=[255, 153, 255]), + 5: + dict( + link=('right_forefinger1', 'right_forefinger2'), + id=5, + color=[255, 153, 255]), + 6: + dict( + link=('right_forefinger2', 'right_forefinger3'), + id=6, + color=[255, 153, 255]), + 7: + dict( + link=('right_forefinger3', 'right_forefinger4'), + id=7, + color=[255, 153, 255]), + 8: + dict( + link=('right_wrist', 'right_middle_finger1'), + id=8, + color=[102, 178, 255]), + 9: + dict( + link=('right_middle_finger1', 'right_middle_finger2'), + id=9, + color=[102, 178, 255]), + 10: + dict( + link=('right_middle_finger2', 'right_middle_finger3'), + id=10, + color=[102, 178, 255]), + 11: + dict( + link=('right_middle_finger3', 'right_middle_finger4'), + id=11, + color=[102, 178, 255]), + 12: + dict( + link=('right_wrist', 'right_ring_finger1'), + id=12, + color=[255, 51, 51]), + 13: + dict( + link=('right_ring_finger1', 'right_ring_finger2'), + id=13, + color=[255, 51, 51]), + 14: + dict( + link=('right_ring_finger2', 'right_ring_finger3'), + id=14, + color=[255, 51, 51]), + 15: + dict( + link=('right_ring_finger3', 'right_ring_finger4'), + id=15, + color=[255, 51, 51]), + 16: + dict( + link=('right_wrist', 'right_pinky_finger1'), + id=16, + color=[0, 255, 0]), + 17: + dict( + link=('right_pinky_finger1', 'right_pinky_finger2'), + id=17, + color=[0, 255, 0]), + 18: + dict( + link=('right_pinky_finger2', 'right_pinky_finger3'), + id=18, + color=[0, 255, 0]), + 19: + dict( + link=('right_pinky_finger3', 'right_pinky_finger4'), + id=19, + color=[0, 255, 0]), + 20: + dict(link=('left_wrist', 'left_thumb1'), id=20, color=[255, 128, 0]), + 21: + dict(link=('left_thumb1', 'left_thumb2'), id=21, color=[255, 128, 0]), + 22: + dict(link=('left_thumb2', 'left_thumb3'), id=22, color=[255, 128, 0]), + 23: + dict(link=('left_thumb3', 'left_thumb4'), id=23, color=[255, 128, 0]), + 24: + dict( + link=('left_wrist', 'left_forefinger1'), + id=24, + color=[255, 153, 255]), + 25: + dict( + link=('left_forefinger1', 'left_forefinger2'), + id=25, + color=[255, 153, 255]), + 26: + dict( + link=('left_forefinger2', 'left_forefinger3'), + id=26, + color=[255, 153, 255]), + 27: + dict( + link=('left_forefinger3', 'left_forefinger4'), + id=27, + color=[255, 153, 255]), + 28: + dict( + link=('left_wrist', 'left_middle_finger1'), + id=28, + color=[102, 178, 255]), + 29: + dict( + link=('left_middle_finger1', 'left_middle_finger2'), + id=29, + color=[102, 178, 255]), + 30: + dict( + link=('left_middle_finger2', 'left_middle_finger3'), + id=30, + color=[102, 178, 255]), + 31: + dict( + link=('left_middle_finger3', 'left_middle_finger4'), + id=31, + color=[102, 178, 255]), + 32: + dict( + link=('left_wrist', 'left_ring_finger1'), + id=32, + color=[255, 51, 51]), + 33: + dict( + link=('left_ring_finger1', 'left_ring_finger2'), + id=33, + color=[255, 51, 51]), + 34: + dict( + link=('left_ring_finger2', 'left_ring_finger3'), + id=34, + color=[255, 51, 51]), + 35: + dict( + link=('left_ring_finger3', 'left_ring_finger4'), + id=35, + color=[255, 51, 51]), + 36: + dict( + link=('left_wrist', 'left_pinky_finger1'), + id=36, + color=[0, 255, 0]), + 37: + dict( + link=('left_pinky_finger1', 'left_pinky_finger2'), + id=37, + color=[0, 255, 0]), + 38: + dict( + link=('left_pinky_finger2', 'left_pinky_finger3'), + id=38, + color=[0, 255, 0]), + 39: + dict( + link=('left_pinky_finger3', 'left_pinky_finger4'), + id=39, + color=[0, 255, 0]), + }, + joint_weights=[1.] * 42, + sigmas=[]) diff --git a/configs/_base_/datasets/jhmdb.py b/configs/_base_/datasets/jhmdb.py index 1b37488498..1f931fc9f1 100644 --- a/configs/_base_/datasets/jhmdb.py +++ b/configs/_base_/datasets/jhmdb.py @@ -1,129 +1,129 @@ -dataset_info = dict( - dataset_name='jhmdb', - paper_info=dict( - author='H. Jhuang and J. Gall and S. Zuffi and ' - 'C. Schmid and M. J. Black', - title='Towards understanding action recognition', - container='International Conf. on Computer Vision (ICCV)', - year='2013', - homepage='http://jhmdb.is.tue.mpg.de/dataset', - ), - keypoint_info={ - 0: - dict(name='neck', id=0, color=[255, 128, 0], type='upper', swap=''), - 1: - dict(name='belly', id=1, color=[255, 128, 0], type='upper', swap=''), - 2: - dict(name='head', id=2, color=[255, 128, 0], type='upper', swap=''), - 3: - dict( - name='right_shoulder', - id=3, - color=[0, 255, 0], - type='upper', - swap='left_shoulder'), - 4: - dict( - name='left_shoulder', - id=4, - color=[0, 255, 0], - type='upper', - swap='right_shoulder'), - 5: - dict( - name='right_hip', - id=5, - color=[0, 255, 0], - type='lower', - swap='left_hip'), - 6: - dict( - name='left_hip', - id=6, - color=[51, 153, 255], - type='lower', - swap='right_hip'), - 7: - dict( - name='right_elbow', - id=7, - color=[51, 153, 255], - type='upper', - swap='left_elbow'), - 8: - dict( - name='left_elbow', - id=8, - color=[51, 153, 255], - type='upper', - swap='right_elbow'), - 9: - dict( - name='right_knee', - id=9, - color=[51, 153, 255], - type='lower', - swap='left_knee'), - 10: - dict( - name='left_knee', - id=10, - color=[255, 128, 0], - type='lower', - swap='right_knee'), - 11: - dict( - name='right_wrist', - id=11, - color=[255, 128, 0], - type='upper', - swap='left_wrist'), - 12: - dict( - name='left_wrist', - id=12, - color=[255, 128, 0], - type='upper', - swap='right_wrist'), - 13: - dict( - name='right_ankle', - id=13, - color=[0, 255, 0], - type='lower', - swap='left_ankle'), - 14: - dict( - name='left_ankle', - id=14, - color=[0, 255, 0], - type='lower', - swap='right_ankle') - }, - skeleton_info={ - 0: dict(link=('right_ankle', 'right_knee'), id=0, color=[255, 128, 0]), - 1: dict(link=('right_knee', 'right_hip'), id=1, color=[255, 128, 0]), - 2: dict(link=('right_hip', 'belly'), id=2, color=[255, 128, 0]), - 3: dict(link=('belly', 'left_hip'), id=3, color=[0, 255, 0]), - 4: dict(link=('left_hip', 'left_knee'), id=4, color=[0, 255, 0]), - 5: dict(link=('left_knee', 'left_ankle'), id=5, color=[0, 255, 0]), - 6: dict(link=('belly', 'neck'), id=6, color=[51, 153, 255]), - 7: dict(link=('neck', 'head'), id=7, color=[51, 153, 255]), - 8: dict(link=('neck', 'right_shoulder'), id=8, color=[255, 128, 0]), - 9: dict( - link=('right_shoulder', 'right_elbow'), id=9, color=[255, 128, 0]), - 10: - dict(link=('right_elbow', 'right_wrist'), id=10, color=[255, 128, 0]), - 11: dict(link=('neck', 'left_shoulder'), id=11, color=[0, 255, 0]), - 12: - dict(link=('left_shoulder', 'left_elbow'), id=12, color=[0, 255, 0]), - 13: dict(link=('left_elbow', 'left_wrist'), id=13, color=[0, 255, 0]) - }, - joint_weights=[ - 1., 1., 1., 1., 1., 1., 1., 1.2, 1.2, 1.2, 1.2, 1.5, 1.5, 1.5, 1.5 - ], - # Adapted from COCO dataset. - sigmas=[ - 0.025, 0.107, 0.025, 0.079, 0.079, 0.107, 0.107, 0.072, 0.072, 0.087, - 0.087, 0.062, 0.062, 0.089, 0.089 - ]) +dataset_info = dict( + dataset_name='jhmdb', + paper_info=dict( + author='H. Jhuang and J. Gall and S. Zuffi and ' + 'C. Schmid and M. J. Black', + title='Towards understanding action recognition', + container='International Conf. on Computer Vision (ICCV)', + year='2013', + homepage='http://jhmdb.is.tue.mpg.de/dataset', + ), + keypoint_info={ + 0: + dict(name='neck', id=0, color=[255, 128, 0], type='upper', swap=''), + 1: + dict(name='belly', id=1, color=[255, 128, 0], type='upper', swap=''), + 2: + dict(name='head', id=2, color=[255, 128, 0], type='upper', swap=''), + 3: + dict( + name='right_shoulder', + id=3, + color=[0, 255, 0], + type='upper', + swap='left_shoulder'), + 4: + dict( + name='left_shoulder', + id=4, + color=[0, 255, 0], + type='upper', + swap='right_shoulder'), + 5: + dict( + name='right_hip', + id=5, + color=[0, 255, 0], + type='lower', + swap='left_hip'), + 6: + dict( + name='left_hip', + id=6, + color=[51, 153, 255], + type='lower', + swap='right_hip'), + 7: + dict( + name='right_elbow', + id=7, + color=[51, 153, 255], + type='upper', + swap='left_elbow'), + 8: + dict( + name='left_elbow', + id=8, + color=[51, 153, 255], + type='upper', + swap='right_elbow'), + 9: + dict( + name='right_knee', + id=9, + color=[51, 153, 255], + type='lower', + swap='left_knee'), + 10: + dict( + name='left_knee', + id=10, + color=[255, 128, 0], + type='lower', + swap='right_knee'), + 11: + dict( + name='right_wrist', + id=11, + color=[255, 128, 0], + type='upper', + swap='left_wrist'), + 12: + dict( + name='left_wrist', + id=12, + color=[255, 128, 0], + type='upper', + swap='right_wrist'), + 13: + dict( + name='right_ankle', + id=13, + color=[0, 255, 0], + type='lower', + swap='left_ankle'), + 14: + dict( + name='left_ankle', + id=14, + color=[0, 255, 0], + type='lower', + swap='right_ankle') + }, + skeleton_info={ + 0: dict(link=('right_ankle', 'right_knee'), id=0, color=[255, 128, 0]), + 1: dict(link=('right_knee', 'right_hip'), id=1, color=[255, 128, 0]), + 2: dict(link=('right_hip', 'belly'), id=2, color=[255, 128, 0]), + 3: dict(link=('belly', 'left_hip'), id=3, color=[0, 255, 0]), + 4: dict(link=('left_hip', 'left_knee'), id=4, color=[0, 255, 0]), + 5: dict(link=('left_knee', 'left_ankle'), id=5, color=[0, 255, 0]), + 6: dict(link=('belly', 'neck'), id=6, color=[51, 153, 255]), + 7: dict(link=('neck', 'head'), id=7, color=[51, 153, 255]), + 8: dict(link=('neck', 'right_shoulder'), id=8, color=[255, 128, 0]), + 9: dict( + link=('right_shoulder', 'right_elbow'), id=9, color=[255, 128, 0]), + 10: + dict(link=('right_elbow', 'right_wrist'), id=10, color=[255, 128, 0]), + 11: dict(link=('neck', 'left_shoulder'), id=11, color=[0, 255, 0]), + 12: + dict(link=('left_shoulder', 'left_elbow'), id=12, color=[0, 255, 0]), + 13: dict(link=('left_elbow', 'left_wrist'), id=13, color=[0, 255, 0]) + }, + joint_weights=[ + 1., 1., 1., 1., 1., 1., 1., 1.2, 1.2, 1.2, 1.2, 1.5, 1.5, 1.5, 1.5 + ], + # Adapted from COCO dataset. + sigmas=[ + 0.025, 0.107, 0.025, 0.079, 0.079, 0.107, 0.107, 0.072, 0.072, 0.087, + 0.087, 0.062, 0.062, 0.089, 0.089 + ]) diff --git a/configs/_base_/datasets/lapa.py b/configs/_base_/datasets/lapa.py index 26a0843404..88edf3d575 100644 --- a/configs/_base_/datasets/lapa.py +++ b/configs/_base_/datasets/lapa.py @@ -1,688 +1,688 @@ -dataset_info = dict( - dataset_name='lapa', - paper_info=dict( - author='Liu, Yinglu and Shi, Hailin and Shen, Hao and Si, ' - 'Yue and Wang, Xiaobo and Mei, Tao', - title='A New Dataset and Boundary-Attention Semantic ' - 'Segmentation for Face Parsing.', - container='Proceedings of the AAAI Conference on ' - 'Artificial Intelligence 2020', - year='2020', - homepage='https://github.com/JDAI-CV/lapa-dataset', - ), - keypoint_info={ - 0: - dict( - name='kpt-0', id=0, color=[255, 0, 0], type='upper', - swap='kpt-32'), - 1: - dict( - name='kpt-1', id=1, color=[255, 0, 0], type='upper', - swap='kpt-31'), - 2: - dict( - name='kpt-2', id=2, color=[255, 0, 0], type='upper', - swap='kpt-30'), - 3: - dict( - name='kpt-3', id=3, color=[255, 0, 0], type='lower', - swap='kpt-29'), - 4: - dict( - name='kpt-4', id=4, color=[255, 0, 0], type='lower', - swap='kpt-28'), - 5: - dict( - name='kpt-5', id=5, color=[255, 0, 0], type='lower', - swap='kpt-27'), - 6: - dict( - name='kpt-6', id=6, color=[255, 0, 0], type='lower', - swap='kpt-26'), - 7: - dict( - name='kpt-7', id=7, color=[255, 0, 0], type='lower', - swap='kpt-25'), - 8: - dict( - name='kpt-8', id=8, color=[255, 0, 0], type='lower', - swap='kpt-24'), - 9: - dict( - name='kpt-9', id=9, color=[255, 0, 0], type='lower', - swap='kpt-23'), - 10: - dict( - name='kpt-10', - id=10, - color=[255, 0, 0], - type='lower', - swap='kpt-22'), - 11: - dict( - name='kpt-11', - id=11, - color=[255, 0, 0], - type='lower', - swap='kpt-21'), - 12: - dict( - name='kpt-12', - id=12, - color=[255, 0, 0], - type='lower', - swap='kpt-20'), - 13: - dict( - name='kpt-13', - id=13, - color=[255, 0, 0], - type='lower', - swap='kpt-19'), - 14: - dict( - name='kpt-14', - id=14, - color=[255, 0, 0], - type='lower', - swap='kpt-18'), - 15: - dict( - name='kpt-15', - id=15, - color=[255, 0, 0], - type='lower', - swap='kpt-17'), - 16: - dict(name='kpt-16', id=16, color=[255, 0, 0], type='lower', swap=''), - 17: - dict( - name='kpt-17', - id=17, - color=[255, 0, 0], - type='lower', - swap='kpt-15'), - 18: - dict( - name='kpt-18', - id=18, - color=[255, 0, 0], - type='lower', - swap='kpt-14'), - 19: - dict( - name='kpt-19', - id=19, - color=[255, 0, 0], - type='lower', - swap='kpt-13'), - 20: - dict( - name='kpt-20', - id=20, - color=[255, 0, 0], - type='lower', - swap='kpt-12'), - 21: - dict( - name='kpt-21', - id=21, - color=[255, 0, 0], - type='lower', - swap='kpt-11'), - 22: - dict( - name='kpt-22', - id=22, - color=[255, 0, 0], - type='lower', - swap='kpt-10'), - 23: - dict( - name='kpt-23', - id=23, - color=[255, 0, 0], - type='lower', - swap='kpt-9'), - 24: - dict( - name='kpt-24', - id=24, - color=[255, 0, 0], - type='lower', - swap='kpt-8'), - 25: - dict( - name='kpt-25', - id=25, - color=[255, 0, 0], - type='lower', - swap='kpt-7'), - 26: - dict( - name='kpt-26', - id=26, - color=[255, 0, 0], - type='lower', - swap='kpt-6'), - 27: - dict( - name='kpt-27', - id=27, - color=[255, 0, 0], - type='lower', - swap='kpt-5'), - 28: - dict( - name='kpt-28', - id=28, - color=[255, 0, 0], - type='lower', - swap='kpt-4'), - 29: - dict( - name='kpt-29', - id=29, - color=[255, 0, 0], - type='lower', - swap='kpt-3'), - 30: - dict( - name='kpt-30', - id=30, - color=[255, 0, 0], - type='upper', - swap='kpt-2'), - 31: - dict( - name='kpt-31', - id=31, - color=[255, 0, 0], - type='upper', - swap='kpt-1'), - 32: - dict( - name='kpt-32', - id=32, - color=[255, 0, 0], - type='upper', - swap='kpt-0'), - 33: - dict( - name='kpt-33', - id=33, - color=[255, 0, 0], - type='upper', - swap='kpt-46'), - 34: - dict( - name='kpt-34', - id=34, - color=[255, 0, 0], - type='upper', - swap='kpt-45'), - 35: - dict( - name='kpt-35', - id=35, - color=[255, 0, 0], - type='upper', - swap='kpt-44'), - 36: - dict( - name='kpt-36', - id=36, - color=[255, 0, 0], - type='upper', - swap='kpt-43'), - 37: - dict( - name='kpt-37', - id=37, - color=[255, 0, 0], - type='upper', - swap='kpt-42'), - 38: - dict( - name='kpt-38', - id=38, - color=[255, 0, 0], - type='upper', - swap='kpt-50'), - 39: - dict( - name='kpt-39', - id=39, - color=[255, 0, 0], - type='upper', - swap='kpt-49'), - 40: - dict( - name='kpt-40', - id=40, - color=[255, 0, 0], - type='upper', - swap='kpt-48'), - 41: - dict( - name='kpt-41', - id=41, - color=[255, 0, 0], - type='upper', - swap='kpt-47'), - 42: - dict( - name='kpt-42', - id=42, - color=[255, 0, 0], - type='upper', - swap='kpt-37'), - 43: - dict( - name='kpt-43', - id=43, - color=[255, 0, 0], - type='upper', - swap='kpt-36'), - 44: - dict( - name='kpt-44', - id=44, - color=[255, 0, 0], - type='upper', - swap='kpt-35'), - 45: - dict( - name='kpt-45', - id=45, - color=[255, 0, 0], - type='upper', - swap='kpt-34'), - 46: - dict( - name='kpt-46', - id=46, - color=[255, 0, 0], - type='upper', - swap='kpt-33'), - 47: - dict( - name='kpt-47', - id=47, - color=[255, 0, 0], - type='upper', - swap='kpt-41'), - 48: - dict( - name='kpt-48', - id=48, - color=[255, 0, 0], - type='upper', - swap='kpt-40'), - 49: - dict( - name='kpt-49', - id=49, - color=[255, 0, 0], - type='upper', - swap='kpt-39'), - 50: - dict( - name='kpt-50', - id=50, - color=[255, 0, 0], - type='upper', - swap='kpt-38'), - 51: - dict(name='kpt-51', id=51, color=[255, 0, 0], type='upper', swap=''), - 52: - dict(name='kpt-52', id=52, color=[255, 0, 0], type='upper', swap=''), - 53: - dict(name='kpt-53', id=53, color=[255, 0, 0], type='lower', swap=''), - 54: - dict(name='kpt-54', id=54, color=[255, 0, 0], type='lower', swap=''), - 55: - dict( - name='kpt-55', - id=55, - color=[255, 0, 0], - type='upper', - swap='kpt-65'), - 56: - dict( - name='kpt-56', - id=56, - color=[255, 0, 0], - type='lower', - swap='kpt-64'), - 57: - dict( - name='kpt-57', - id=57, - color=[255, 0, 0], - type='lower', - swap='kpt-63'), - 58: - dict( - name='kpt-58', - id=58, - color=[255, 0, 0], - type='lower', - swap='kpt-62'), - 59: - dict( - name='kpt-59', - id=59, - color=[255, 0, 0], - type='lower', - swap='kpt-61'), - 60: - dict(name='kpt-60', id=60, color=[255, 0, 0], type='lower', swap=''), - 61: - dict( - name='kpt-61', - id=61, - color=[255, 0, 0], - type='lower', - swap='kpt-59'), - 62: - dict( - name='kpt-62', - id=62, - color=[255, 0, 0], - type='lower', - swap='kpt-58'), - 63: - dict( - name='kpt-63', - id=63, - color=[255, 0, 0], - type='lower', - swap='kpt-57'), - 64: - dict( - name='kpt-64', - id=64, - color=[255, 0, 0], - type='lower', - swap='kpt-56'), - 65: - dict( - name='kpt-65', - id=65, - color=[255, 0, 0], - type='upper', - swap='kpt-55'), - 66: - dict( - name='kpt-66', - id=66, - color=[255, 0, 0], - type='upper', - swap='kpt-79'), - 67: - dict( - name='kpt-67', - id=67, - color=[255, 0, 0], - type='upper', - swap='kpt-78'), - 68: - dict( - name='kpt-68', - id=68, - color=[255, 0, 0], - type='upper', - swap='kpt-77'), - 69: - dict( - name='kpt-69', - id=69, - color=[255, 0, 0], - type='upper', - swap='kpt-76'), - 70: - dict( - name='kpt-70', - id=70, - color=[255, 0, 0], - type='upper', - swap='kpt-75'), - 71: - dict( - name='kpt-71', - id=71, - color=[255, 0, 0], - type='upper', - swap='kpt-82'), - 72: - dict( - name='kpt-72', - id=72, - color=[255, 0, 0], - type='upper', - swap='kpt-81'), - 73: - dict( - name='kpt-73', - id=73, - color=[255, 0, 0], - type='upper', - swap='kpt-80'), - 74: - dict( - name='kpt-74', - id=74, - color=[255, 0, 0], - type='upper', - swap='kpt-83'), - 75: - dict( - name='kpt-75', - id=75, - color=[255, 0, 0], - type='upper', - swap='kpt-70'), - 76: - dict( - name='kpt-76', - id=76, - color=[255, 0, 0], - type='upper', - swap='kpt-69'), - 77: - dict( - name='kpt-77', - id=77, - color=[255, 0, 0], - type='upper', - swap='kpt-68'), - 78: - dict( - name='kpt-78', - id=78, - color=[255, 0, 0], - type='upper', - swap='kpt-67'), - 79: - dict( - name='kpt-79', - id=79, - color=[255, 0, 0], - type='upper', - swap='kpt-66'), - 80: - dict( - name='kpt-80', - id=80, - color=[255, 0, 0], - type='upper', - swap='kpt-73'), - 81: - dict( - name='kpt-81', - id=81, - color=[255, 0, 0], - type='upper', - swap='kpt-72'), - 82: - dict( - name='kpt-82', - id=82, - color=[255, 0, 0], - type='upper', - swap='kpt-71'), - 83: - dict( - name='kpt-83', - id=83, - color=[255, 0, 0], - type='upper', - swap='kpt-74'), - 84: - dict( - name='kpt-84', - id=84, - color=[255, 0, 0], - type='lower', - swap='kpt-90'), - 85: - dict( - name='kpt-85', - id=85, - color=[255, 0, 0], - type='lower', - swap='kpt-89'), - 86: - dict( - name='kpt-86', - id=86, - color=[255, 0, 0], - type='lower', - swap='kpt-88'), - 87: - dict(name='kpt-87', id=87, color=[255, 0, 0], type='lower', swap=''), - 88: - dict( - name='kpt-88', - id=88, - color=[255, 0, 0], - type='lower', - swap='kpt-86'), - 89: - dict( - name='kpt-89', - id=89, - color=[255, 0, 0], - type='lower', - swap='kpt-85'), - 90: - dict( - name='kpt-90', - id=90, - color=[255, 0, 0], - type='lower', - swap='kpt-84'), - 91: - dict( - name='kpt-91', - id=91, - color=[255, 0, 0], - type='lower', - swap='kpt-95'), - 92: - dict( - name='kpt-92', - id=92, - color=[255, 0, 0], - type='lower', - swap='kpt-94'), - 93: - dict(name='kpt-93', id=93, color=[255, 0, 0], type='lower', swap=''), - 94: - dict( - name='kpt-94', - id=94, - color=[255, 0, 0], - type='lower', - swap='kpt-92'), - 95: - dict( - name='kpt-95', - id=95, - color=[255, 0, 0], - type='lower', - swap='kpt-91'), - 96: - dict( - name='kpt-96', - id=96, - color=[255, 0, 0], - type='lower', - swap='kpt-100'), - 97: - dict( - name='kpt-97', - id=97, - color=[255, 0, 0], - type='lower', - swap='kpt-99'), - 98: - dict(name='kpt-98', id=98, color=[255, 0, 0], type='lower', swap=''), - 99: - dict( - name='kpt-99', - id=99, - color=[255, 0, 0], - type='lower', - swap='kpt-97'), - 100: - dict( - name='kpt-100', - id=100, - color=[255, 0, 0], - type='lower', - swap='kpt-96'), - 101: - dict( - name='kpt-101', - id=101, - color=[255, 0, 0], - type='lower', - swap='kpt-103'), - 102: - dict(name='kpt-102', id=102, color=[255, 0, 0], type='lower', swap=''), - 103: - dict( - name='kpt-103', - id=103, - color=[255, 0, 0], - type='lower', - swap='kpt-101'), - 104: - dict( - name='kpt-104', - id=104, - color=[255, 0, 0], - type='upper', - swap='kpt-105'), - 105: - dict( - name='kpt-105', - id=105, - color=[255, 0, 0], - type='upper', - swap='kpt-104') - }, - skeleton_info={}, - joint_weights=[ - 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, - 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, - 0.8, 0.8, 0.8, 0.8, 0.8, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 2.0, - 2.0, 2.0, 2.0, 2.0, 1.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 1.0, - 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, - 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.0, 1.0 - ], - sigmas=[]) +dataset_info = dict( + dataset_name='lapa', + paper_info=dict( + author='Liu, Yinglu and Shi, Hailin and Shen, Hao and Si, ' + 'Yue and Wang, Xiaobo and Mei, Tao', + title='A New Dataset and Boundary-Attention Semantic ' + 'Segmentation for Face Parsing.', + container='Proceedings of the AAAI Conference on ' + 'Artificial Intelligence 2020', + year='2020', + homepage='https://github.com/JDAI-CV/lapa-dataset', + ), + keypoint_info={ + 0: + dict( + name='kpt-0', id=0, color=[255, 0, 0], type='upper', + swap='kpt-32'), + 1: + dict( + name='kpt-1', id=1, color=[255, 0, 0], type='upper', + swap='kpt-31'), + 2: + dict( + name='kpt-2', id=2, color=[255, 0, 0], type='upper', + swap='kpt-30'), + 3: + dict( + name='kpt-3', id=3, color=[255, 0, 0], type='lower', + swap='kpt-29'), + 4: + dict( + name='kpt-4', id=4, color=[255, 0, 0], type='lower', + swap='kpt-28'), + 5: + dict( + name='kpt-5', id=5, color=[255, 0, 0], type='lower', + swap='kpt-27'), + 6: + dict( + name='kpt-6', id=6, color=[255, 0, 0], type='lower', + swap='kpt-26'), + 7: + dict( + name='kpt-7', id=7, color=[255, 0, 0], type='lower', + swap='kpt-25'), + 8: + dict( + name='kpt-8', id=8, color=[255, 0, 0], type='lower', + swap='kpt-24'), + 9: + dict( + name='kpt-9', id=9, color=[255, 0, 0], type='lower', + swap='kpt-23'), + 10: + dict( + name='kpt-10', + id=10, + color=[255, 0, 0], + type='lower', + swap='kpt-22'), + 11: + dict( + name='kpt-11', + id=11, + color=[255, 0, 0], + type='lower', + swap='kpt-21'), + 12: + dict( + name='kpt-12', + id=12, + color=[255, 0, 0], + type='lower', + swap='kpt-20'), + 13: + dict( + name='kpt-13', + id=13, + color=[255, 0, 0], + type='lower', + swap='kpt-19'), + 14: + dict( + name='kpt-14', + id=14, + color=[255, 0, 0], + type='lower', + swap='kpt-18'), + 15: + dict( + name='kpt-15', + id=15, + color=[255, 0, 0], + type='lower', + swap='kpt-17'), + 16: + dict(name='kpt-16', id=16, color=[255, 0, 0], type='lower', swap=''), + 17: + dict( + name='kpt-17', + id=17, + color=[255, 0, 0], + type='lower', + swap='kpt-15'), + 18: + dict( + name='kpt-18', + id=18, + color=[255, 0, 0], + type='lower', + swap='kpt-14'), + 19: + dict( + name='kpt-19', + id=19, + color=[255, 0, 0], + type='lower', + swap='kpt-13'), + 20: + dict( + name='kpt-20', + id=20, + color=[255, 0, 0], + type='lower', + swap='kpt-12'), + 21: + dict( + name='kpt-21', + id=21, + color=[255, 0, 0], + type='lower', + swap='kpt-11'), + 22: + dict( + name='kpt-22', + id=22, + color=[255, 0, 0], + type='lower', + swap='kpt-10'), + 23: + dict( + name='kpt-23', + id=23, + color=[255, 0, 0], + type='lower', + swap='kpt-9'), + 24: + dict( + name='kpt-24', + id=24, + color=[255, 0, 0], + type='lower', + swap='kpt-8'), + 25: + dict( + name='kpt-25', + id=25, + color=[255, 0, 0], + type='lower', + swap='kpt-7'), + 26: + dict( + name='kpt-26', + id=26, + color=[255, 0, 0], + type='lower', + swap='kpt-6'), + 27: + dict( + name='kpt-27', + id=27, + color=[255, 0, 0], + type='lower', + swap='kpt-5'), + 28: + dict( + name='kpt-28', + id=28, + color=[255, 0, 0], + type='lower', + swap='kpt-4'), + 29: + dict( + name='kpt-29', + id=29, + color=[255, 0, 0], + type='lower', + swap='kpt-3'), + 30: + dict( + name='kpt-30', + id=30, + color=[255, 0, 0], + type='upper', + swap='kpt-2'), + 31: + dict( + name='kpt-31', + id=31, + color=[255, 0, 0], + type='upper', + swap='kpt-1'), + 32: + dict( + name='kpt-32', + id=32, + color=[255, 0, 0], + type='upper', + swap='kpt-0'), + 33: + dict( + name='kpt-33', + id=33, + color=[255, 0, 0], + type='upper', + swap='kpt-46'), + 34: + dict( + name='kpt-34', + id=34, + color=[255, 0, 0], + type='upper', + swap='kpt-45'), + 35: + dict( + name='kpt-35', + id=35, + color=[255, 0, 0], + type='upper', + swap='kpt-44'), + 36: + dict( + name='kpt-36', + id=36, + color=[255, 0, 0], + type='upper', + swap='kpt-43'), + 37: + dict( + name='kpt-37', + id=37, + color=[255, 0, 0], + type='upper', + swap='kpt-42'), + 38: + dict( + name='kpt-38', + id=38, + color=[255, 0, 0], + type='upper', + swap='kpt-50'), + 39: + dict( + name='kpt-39', + id=39, + color=[255, 0, 0], + type='upper', + swap='kpt-49'), + 40: + dict( + name='kpt-40', + id=40, + color=[255, 0, 0], + type='upper', + swap='kpt-48'), + 41: + dict( + name='kpt-41', + id=41, + color=[255, 0, 0], + type='upper', + swap='kpt-47'), + 42: + dict( + name='kpt-42', + id=42, + color=[255, 0, 0], + type='upper', + swap='kpt-37'), + 43: + dict( + name='kpt-43', + id=43, + color=[255, 0, 0], + type='upper', + swap='kpt-36'), + 44: + dict( + name='kpt-44', + id=44, + color=[255, 0, 0], + type='upper', + swap='kpt-35'), + 45: + dict( + name='kpt-45', + id=45, + color=[255, 0, 0], + type='upper', + swap='kpt-34'), + 46: + dict( + name='kpt-46', + id=46, + color=[255, 0, 0], + type='upper', + swap='kpt-33'), + 47: + dict( + name='kpt-47', + id=47, + color=[255, 0, 0], + type='upper', + swap='kpt-41'), + 48: + dict( + name='kpt-48', + id=48, + color=[255, 0, 0], + type='upper', + swap='kpt-40'), + 49: + dict( + name='kpt-49', + id=49, + color=[255, 0, 0], + type='upper', + swap='kpt-39'), + 50: + dict( + name='kpt-50', + id=50, + color=[255, 0, 0], + type='upper', + swap='kpt-38'), + 51: + dict(name='kpt-51', id=51, color=[255, 0, 0], type='upper', swap=''), + 52: + dict(name='kpt-52', id=52, color=[255, 0, 0], type='upper', swap=''), + 53: + dict(name='kpt-53', id=53, color=[255, 0, 0], type='lower', swap=''), + 54: + dict(name='kpt-54', id=54, color=[255, 0, 0], type='lower', swap=''), + 55: + dict( + name='kpt-55', + id=55, + color=[255, 0, 0], + type='upper', + swap='kpt-65'), + 56: + dict( + name='kpt-56', + id=56, + color=[255, 0, 0], + type='lower', + swap='kpt-64'), + 57: + dict( + name='kpt-57', + id=57, + color=[255, 0, 0], + type='lower', + swap='kpt-63'), + 58: + dict( + name='kpt-58', + id=58, + color=[255, 0, 0], + type='lower', + swap='kpt-62'), + 59: + dict( + name='kpt-59', + id=59, + color=[255, 0, 0], + type='lower', + swap='kpt-61'), + 60: + dict(name='kpt-60', id=60, color=[255, 0, 0], type='lower', swap=''), + 61: + dict( + name='kpt-61', + id=61, + color=[255, 0, 0], + type='lower', + swap='kpt-59'), + 62: + dict( + name='kpt-62', + id=62, + color=[255, 0, 0], + type='lower', + swap='kpt-58'), + 63: + dict( + name='kpt-63', + id=63, + color=[255, 0, 0], + type='lower', + swap='kpt-57'), + 64: + dict( + name='kpt-64', + id=64, + color=[255, 0, 0], + type='lower', + swap='kpt-56'), + 65: + dict( + name='kpt-65', + id=65, + color=[255, 0, 0], + type='upper', + swap='kpt-55'), + 66: + dict( + name='kpt-66', + id=66, + color=[255, 0, 0], + type='upper', + swap='kpt-79'), + 67: + dict( + name='kpt-67', + id=67, + color=[255, 0, 0], + type='upper', + swap='kpt-78'), + 68: + dict( + name='kpt-68', + id=68, + color=[255, 0, 0], + type='upper', + swap='kpt-77'), + 69: + dict( + name='kpt-69', + id=69, + color=[255, 0, 0], + type='upper', + swap='kpt-76'), + 70: + dict( + name='kpt-70', + id=70, + color=[255, 0, 0], + type='upper', + swap='kpt-75'), + 71: + dict( + name='kpt-71', + id=71, + color=[255, 0, 0], + type='upper', + swap='kpt-82'), + 72: + dict( + name='kpt-72', + id=72, + color=[255, 0, 0], + type='upper', + swap='kpt-81'), + 73: + dict( + name='kpt-73', + id=73, + color=[255, 0, 0], + type='upper', + swap='kpt-80'), + 74: + dict( + name='kpt-74', + id=74, + color=[255, 0, 0], + type='upper', + swap='kpt-83'), + 75: + dict( + name='kpt-75', + id=75, + color=[255, 0, 0], + type='upper', + swap='kpt-70'), + 76: + dict( + name='kpt-76', + id=76, + color=[255, 0, 0], + type='upper', + swap='kpt-69'), + 77: + dict( + name='kpt-77', + id=77, + color=[255, 0, 0], + type='upper', + swap='kpt-68'), + 78: + dict( + name='kpt-78', + id=78, + color=[255, 0, 0], + type='upper', + swap='kpt-67'), + 79: + dict( + name='kpt-79', + id=79, + color=[255, 0, 0], + type='upper', + swap='kpt-66'), + 80: + dict( + name='kpt-80', + id=80, + color=[255, 0, 0], + type='upper', + swap='kpt-73'), + 81: + dict( + name='kpt-81', + id=81, + color=[255, 0, 0], + type='upper', + swap='kpt-72'), + 82: + dict( + name='kpt-82', + id=82, + color=[255, 0, 0], + type='upper', + swap='kpt-71'), + 83: + dict( + name='kpt-83', + id=83, + color=[255, 0, 0], + type='upper', + swap='kpt-74'), + 84: + dict( + name='kpt-84', + id=84, + color=[255, 0, 0], + type='lower', + swap='kpt-90'), + 85: + dict( + name='kpt-85', + id=85, + color=[255, 0, 0], + type='lower', + swap='kpt-89'), + 86: + dict( + name='kpt-86', + id=86, + color=[255, 0, 0], + type='lower', + swap='kpt-88'), + 87: + dict(name='kpt-87', id=87, color=[255, 0, 0], type='lower', swap=''), + 88: + dict( + name='kpt-88', + id=88, + color=[255, 0, 0], + type='lower', + swap='kpt-86'), + 89: + dict( + name='kpt-89', + id=89, + color=[255, 0, 0], + type='lower', + swap='kpt-85'), + 90: + dict( + name='kpt-90', + id=90, + color=[255, 0, 0], + type='lower', + swap='kpt-84'), + 91: + dict( + name='kpt-91', + id=91, + color=[255, 0, 0], + type='lower', + swap='kpt-95'), + 92: + dict( + name='kpt-92', + id=92, + color=[255, 0, 0], + type='lower', + swap='kpt-94'), + 93: + dict(name='kpt-93', id=93, color=[255, 0, 0], type='lower', swap=''), + 94: + dict( + name='kpt-94', + id=94, + color=[255, 0, 0], + type='lower', + swap='kpt-92'), + 95: + dict( + name='kpt-95', + id=95, + color=[255, 0, 0], + type='lower', + swap='kpt-91'), + 96: + dict( + name='kpt-96', + id=96, + color=[255, 0, 0], + type='lower', + swap='kpt-100'), + 97: + dict( + name='kpt-97', + id=97, + color=[255, 0, 0], + type='lower', + swap='kpt-99'), + 98: + dict(name='kpt-98', id=98, color=[255, 0, 0], type='lower', swap=''), + 99: + dict( + name='kpt-99', + id=99, + color=[255, 0, 0], + type='lower', + swap='kpt-97'), + 100: + dict( + name='kpt-100', + id=100, + color=[255, 0, 0], + type='lower', + swap='kpt-96'), + 101: + dict( + name='kpt-101', + id=101, + color=[255, 0, 0], + type='lower', + swap='kpt-103'), + 102: + dict(name='kpt-102', id=102, color=[255, 0, 0], type='lower', swap=''), + 103: + dict( + name='kpt-103', + id=103, + color=[255, 0, 0], + type='lower', + swap='kpt-101'), + 104: + dict( + name='kpt-104', + id=104, + color=[255, 0, 0], + type='upper', + swap='kpt-105'), + 105: + dict( + name='kpt-105', + id=105, + color=[255, 0, 0], + type='upper', + swap='kpt-104') + }, + skeleton_info={}, + joint_weights=[ + 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, + 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, + 0.8, 0.8, 0.8, 0.8, 0.8, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 2.0, + 2.0, 2.0, 2.0, 2.0, 1.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 1.0, + 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, + 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.0, 1.0 + ], + sigmas=[]) diff --git a/configs/_base_/datasets/locust.py b/configs/_base_/datasets/locust.py index db3fa15aa0..3a6fafddfa 100644 --- a/configs/_base_/datasets/locust.py +++ b/configs/_base_/datasets/locust.py @@ -1,263 +1,263 @@ -dataset_info = dict( - dataset_name='locust', - paper_info=dict( - author='Graving, Jacob M and Chae, Daniel and Naik, Hemal and ' - 'Li, Liang and Koger, Benjamin and Costelloe, Blair R and ' - 'Couzin, Iain D', - title='DeepPoseKit, a software toolkit for fast and robust ' - 'animal pose estimation using deep learning', - container='Elife', - year='2019', - homepage='https://github.com/jgraving/DeepPoseKit-Data', - ), - keypoint_info={ - 0: - dict(name='head', id=0, color=[255, 255, 255], type='', swap=''), - 1: - dict(name='neck', id=1, color=[255, 255, 255], type='', swap=''), - 2: - dict(name='thorax', id=2, color=[255, 255, 255], type='', swap=''), - 3: - dict(name='abdomen1', id=3, color=[255, 255, 255], type='', swap=''), - 4: - dict(name='abdomen2', id=4, color=[255, 255, 255], type='', swap=''), - 5: - dict( - name='anttipL', - id=5, - color=[255, 255, 255], - type='', - swap='anttipR'), - 6: - dict( - name='antbaseL', - id=6, - color=[255, 255, 255], - type='', - swap='antbaseR'), - 7: - dict(name='eyeL', id=7, color=[255, 255, 255], type='', swap='eyeR'), - 8: - dict( - name='forelegL1', - id=8, - color=[255, 255, 255], - type='', - swap='forelegR1'), - 9: - dict( - name='forelegL2', - id=9, - color=[255, 255, 255], - type='', - swap='forelegR2'), - 10: - dict( - name='forelegL3', - id=10, - color=[255, 255, 255], - type='', - swap='forelegR3'), - 11: - dict( - name='forelegL4', - id=11, - color=[255, 255, 255], - type='', - swap='forelegR4'), - 12: - dict( - name='midlegL1', - id=12, - color=[255, 255, 255], - type='', - swap='midlegR1'), - 13: - dict( - name='midlegL2', - id=13, - color=[255, 255, 255], - type='', - swap='midlegR2'), - 14: - dict( - name='midlegL3', - id=14, - color=[255, 255, 255], - type='', - swap='midlegR3'), - 15: - dict( - name='midlegL4', - id=15, - color=[255, 255, 255], - type='', - swap='midlegR4'), - 16: - dict( - name='hindlegL1', - id=16, - color=[255, 255, 255], - type='', - swap='hindlegR1'), - 17: - dict( - name='hindlegL2', - id=17, - color=[255, 255, 255], - type='', - swap='hindlegR2'), - 18: - dict( - name='hindlegL3', - id=18, - color=[255, 255, 255], - type='', - swap='hindlegR3'), - 19: - dict( - name='hindlegL4', - id=19, - color=[255, 255, 255], - type='', - swap='hindlegR4'), - 20: - dict( - name='anttipR', - id=20, - color=[255, 255, 255], - type='', - swap='anttipL'), - 21: - dict( - name='antbaseR', - id=21, - color=[255, 255, 255], - type='', - swap='antbaseL'), - 22: - dict(name='eyeR', id=22, color=[255, 255, 255], type='', swap='eyeL'), - 23: - dict( - name='forelegR1', - id=23, - color=[255, 255, 255], - type='', - swap='forelegL1'), - 24: - dict( - name='forelegR2', - id=24, - color=[255, 255, 255], - type='', - swap='forelegL2'), - 25: - dict( - name='forelegR3', - id=25, - color=[255, 255, 255], - type='', - swap='forelegL3'), - 26: - dict( - name='forelegR4', - id=26, - color=[255, 255, 255], - type='', - swap='forelegL4'), - 27: - dict( - name='midlegR1', - id=27, - color=[255, 255, 255], - type='', - swap='midlegL1'), - 28: - dict( - name='midlegR2', - id=28, - color=[255, 255, 255], - type='', - swap='midlegL2'), - 29: - dict( - name='midlegR3', - id=29, - color=[255, 255, 255], - type='', - swap='midlegL3'), - 30: - dict( - name='midlegR4', - id=30, - color=[255, 255, 255], - type='', - swap='midlegL4'), - 31: - dict( - name='hindlegR1', - id=31, - color=[255, 255, 255], - type='', - swap='hindlegL1'), - 32: - dict( - name='hindlegR2', - id=32, - color=[255, 255, 255], - type='', - swap='hindlegL2'), - 33: - dict( - name='hindlegR3', - id=33, - color=[255, 255, 255], - type='', - swap='hindlegL3'), - 34: - dict( - name='hindlegR4', - id=34, - color=[255, 255, 255], - type='', - swap='hindlegL4') - }, - skeleton_info={ - 0: dict(link=('neck', 'head'), id=0, color=[255, 255, 255]), - 1: dict(link=('thorax', 'neck'), id=1, color=[255, 255, 255]), - 2: dict(link=('abdomen1', 'thorax'), id=2, color=[255, 255, 255]), - 3: dict(link=('abdomen2', 'abdomen1'), id=3, color=[255, 255, 255]), - 4: dict(link=('antbaseL', 'anttipL'), id=4, color=[255, 255, 255]), - 5: dict(link=('eyeL', 'antbaseL'), id=5, color=[255, 255, 255]), - 6: dict(link=('forelegL2', 'forelegL1'), id=6, color=[255, 255, 255]), - 7: dict(link=('forelegL3', 'forelegL2'), id=7, color=[255, 255, 255]), - 8: dict(link=('forelegL4', 'forelegL3'), id=8, color=[255, 255, 255]), - 9: dict(link=('midlegL2', 'midlegL1'), id=9, color=[255, 255, 255]), - 10: dict(link=('midlegL3', 'midlegL2'), id=10, color=[255, 255, 255]), - 11: dict(link=('midlegL4', 'midlegL3'), id=11, color=[255, 255, 255]), - 12: - dict(link=('hindlegL2', 'hindlegL1'), id=12, color=[255, 255, 255]), - 13: - dict(link=('hindlegL3', 'hindlegL2'), id=13, color=[255, 255, 255]), - 14: - dict(link=('hindlegL4', 'hindlegL3'), id=14, color=[255, 255, 255]), - 15: dict(link=('antbaseR', 'anttipR'), id=15, color=[255, 255, 255]), - 16: dict(link=('eyeR', 'antbaseR'), id=16, color=[255, 255, 255]), - 17: - dict(link=('forelegR2', 'forelegR1'), id=17, color=[255, 255, 255]), - 18: - dict(link=('forelegR3', 'forelegR2'), id=18, color=[255, 255, 255]), - 19: - dict(link=('forelegR4', 'forelegR3'), id=19, color=[255, 255, 255]), - 20: dict(link=('midlegR2', 'midlegR1'), id=20, color=[255, 255, 255]), - 21: dict(link=('midlegR3', 'midlegR2'), id=21, color=[255, 255, 255]), - 22: dict(link=('midlegR4', 'midlegR3'), id=22, color=[255, 255, 255]), - 23: - dict(link=('hindlegR2', 'hindlegR1'), id=23, color=[255, 255, 255]), - 24: - dict(link=('hindlegR3', 'hindlegR2'), id=24, color=[255, 255, 255]), - 25: - dict(link=('hindlegR4', 'hindlegR3'), id=25, color=[255, 255, 255]) - }, - joint_weights=[1.] * 35, - sigmas=[]) +dataset_info = dict( + dataset_name='locust', + paper_info=dict( + author='Graving, Jacob M and Chae, Daniel and Naik, Hemal and ' + 'Li, Liang and Koger, Benjamin and Costelloe, Blair R and ' + 'Couzin, Iain D', + title='DeepPoseKit, a software toolkit for fast and robust ' + 'animal pose estimation using deep learning', + container='Elife', + year='2019', + homepage='https://github.com/jgraving/DeepPoseKit-Data', + ), + keypoint_info={ + 0: + dict(name='head', id=0, color=[255, 255, 255], type='', swap=''), + 1: + dict(name='neck', id=1, color=[255, 255, 255], type='', swap=''), + 2: + dict(name='thorax', id=2, color=[255, 255, 255], type='', swap=''), + 3: + dict(name='abdomen1', id=3, color=[255, 255, 255], type='', swap=''), + 4: + dict(name='abdomen2', id=4, color=[255, 255, 255], type='', swap=''), + 5: + dict( + name='anttipL', + id=5, + color=[255, 255, 255], + type='', + swap='anttipR'), + 6: + dict( + name='antbaseL', + id=6, + color=[255, 255, 255], + type='', + swap='antbaseR'), + 7: + dict(name='eyeL', id=7, color=[255, 255, 255], type='', swap='eyeR'), + 8: + dict( + name='forelegL1', + id=8, + color=[255, 255, 255], + type='', + swap='forelegR1'), + 9: + dict( + name='forelegL2', + id=9, + color=[255, 255, 255], + type='', + swap='forelegR2'), + 10: + dict( + name='forelegL3', + id=10, + color=[255, 255, 255], + type='', + swap='forelegR3'), + 11: + dict( + name='forelegL4', + id=11, + color=[255, 255, 255], + type='', + swap='forelegR4'), + 12: + dict( + name='midlegL1', + id=12, + color=[255, 255, 255], + type='', + swap='midlegR1'), + 13: + dict( + name='midlegL2', + id=13, + color=[255, 255, 255], + type='', + swap='midlegR2'), + 14: + dict( + name='midlegL3', + id=14, + color=[255, 255, 255], + type='', + swap='midlegR3'), + 15: + dict( + name='midlegL4', + id=15, + color=[255, 255, 255], + type='', + swap='midlegR4'), + 16: + dict( + name='hindlegL1', + id=16, + color=[255, 255, 255], + type='', + swap='hindlegR1'), + 17: + dict( + name='hindlegL2', + id=17, + color=[255, 255, 255], + type='', + swap='hindlegR2'), + 18: + dict( + name='hindlegL3', + id=18, + color=[255, 255, 255], + type='', + swap='hindlegR3'), + 19: + dict( + name='hindlegL4', + id=19, + color=[255, 255, 255], + type='', + swap='hindlegR4'), + 20: + dict( + name='anttipR', + id=20, + color=[255, 255, 255], + type='', + swap='anttipL'), + 21: + dict( + name='antbaseR', + id=21, + color=[255, 255, 255], + type='', + swap='antbaseL'), + 22: + dict(name='eyeR', id=22, color=[255, 255, 255], type='', swap='eyeL'), + 23: + dict( + name='forelegR1', + id=23, + color=[255, 255, 255], + type='', + swap='forelegL1'), + 24: + dict( + name='forelegR2', + id=24, + color=[255, 255, 255], + type='', + swap='forelegL2'), + 25: + dict( + name='forelegR3', + id=25, + color=[255, 255, 255], + type='', + swap='forelegL3'), + 26: + dict( + name='forelegR4', + id=26, + color=[255, 255, 255], + type='', + swap='forelegL4'), + 27: + dict( + name='midlegR1', + id=27, + color=[255, 255, 255], + type='', + swap='midlegL1'), + 28: + dict( + name='midlegR2', + id=28, + color=[255, 255, 255], + type='', + swap='midlegL2'), + 29: + dict( + name='midlegR3', + id=29, + color=[255, 255, 255], + type='', + swap='midlegL3'), + 30: + dict( + name='midlegR4', + id=30, + color=[255, 255, 255], + type='', + swap='midlegL4'), + 31: + dict( + name='hindlegR1', + id=31, + color=[255, 255, 255], + type='', + swap='hindlegL1'), + 32: + dict( + name='hindlegR2', + id=32, + color=[255, 255, 255], + type='', + swap='hindlegL2'), + 33: + dict( + name='hindlegR3', + id=33, + color=[255, 255, 255], + type='', + swap='hindlegL3'), + 34: + dict( + name='hindlegR4', + id=34, + color=[255, 255, 255], + type='', + swap='hindlegL4') + }, + skeleton_info={ + 0: dict(link=('neck', 'head'), id=0, color=[255, 255, 255]), + 1: dict(link=('thorax', 'neck'), id=1, color=[255, 255, 255]), + 2: dict(link=('abdomen1', 'thorax'), id=2, color=[255, 255, 255]), + 3: dict(link=('abdomen2', 'abdomen1'), id=3, color=[255, 255, 255]), + 4: dict(link=('antbaseL', 'anttipL'), id=4, color=[255, 255, 255]), + 5: dict(link=('eyeL', 'antbaseL'), id=5, color=[255, 255, 255]), + 6: dict(link=('forelegL2', 'forelegL1'), id=6, color=[255, 255, 255]), + 7: dict(link=('forelegL3', 'forelegL2'), id=7, color=[255, 255, 255]), + 8: dict(link=('forelegL4', 'forelegL3'), id=8, color=[255, 255, 255]), + 9: dict(link=('midlegL2', 'midlegL1'), id=9, color=[255, 255, 255]), + 10: dict(link=('midlegL3', 'midlegL2'), id=10, color=[255, 255, 255]), + 11: dict(link=('midlegL4', 'midlegL3'), id=11, color=[255, 255, 255]), + 12: + dict(link=('hindlegL2', 'hindlegL1'), id=12, color=[255, 255, 255]), + 13: + dict(link=('hindlegL3', 'hindlegL2'), id=13, color=[255, 255, 255]), + 14: + dict(link=('hindlegL4', 'hindlegL3'), id=14, color=[255, 255, 255]), + 15: dict(link=('antbaseR', 'anttipR'), id=15, color=[255, 255, 255]), + 16: dict(link=('eyeR', 'antbaseR'), id=16, color=[255, 255, 255]), + 17: + dict(link=('forelegR2', 'forelegR1'), id=17, color=[255, 255, 255]), + 18: + dict(link=('forelegR3', 'forelegR2'), id=18, color=[255, 255, 255]), + 19: + dict(link=('forelegR4', 'forelegR3'), id=19, color=[255, 255, 255]), + 20: dict(link=('midlegR2', 'midlegR1'), id=20, color=[255, 255, 255]), + 21: dict(link=('midlegR3', 'midlegR2'), id=21, color=[255, 255, 255]), + 22: dict(link=('midlegR4', 'midlegR3'), id=22, color=[255, 255, 255]), + 23: + dict(link=('hindlegR2', 'hindlegR1'), id=23, color=[255, 255, 255]), + 24: + dict(link=('hindlegR3', 'hindlegR2'), id=24, color=[255, 255, 255]), + 25: + dict(link=('hindlegR4', 'hindlegR3'), id=25, color=[255, 255, 255]) + }, + joint_weights=[1.] * 35, + sigmas=[]) diff --git a/configs/_base_/datasets/macaque.py b/configs/_base_/datasets/macaque.py index ea8dac297e..926ca30d3c 100644 --- a/configs/_base_/datasets/macaque.py +++ b/configs/_base_/datasets/macaque.py @@ -1,183 +1,183 @@ -dataset_info = dict( - dataset_name='macaque', - paper_info=dict( - author='Labuguen, Rollyn and Matsumoto, Jumpei and ' - 'Negrete, Salvador and Nishimaru, Hiroshi and ' - 'Nishijo, Hisao and Takada, Masahiko and ' - 'Go, Yasuhiro and Inoue, Ken-ichi and Shibata, Tomohiro', - title='MacaquePose: A novel "in the wild" macaque monkey pose dataset ' - 'for markerless motion capture', - container='bioRxiv', - year='2020', - homepage='http://www.pri.kyoto-u.ac.jp/datasets/' - 'macaquepose/index.html', - ), - keypoint_info={ - 0: - dict(name='nose', id=0, color=[51, 153, 255], type='upper', swap=''), - 1: - dict( - name='left_eye', - id=1, - color=[51, 153, 255], - type='upper', - swap='right_eye'), - 2: - dict( - name='right_eye', - id=2, - color=[51, 153, 255], - type='upper', - swap='left_eye'), - 3: - dict( - name='left_ear', - id=3, - color=[51, 153, 255], - type='upper', - swap='right_ear'), - 4: - dict( - name='right_ear', - id=4, - color=[51, 153, 255], - type='upper', - swap='left_ear'), - 5: - dict( - name='left_shoulder', - id=5, - color=[0, 255, 0], - type='upper', - swap='right_shoulder'), - 6: - dict( - name='right_shoulder', - id=6, - color=[255, 128, 0], - type='upper', - swap='left_shoulder'), - 7: - dict( - name='left_elbow', - id=7, - color=[0, 255, 0], - type='upper', - swap='right_elbow'), - 8: - dict( - name='right_elbow', - id=8, - color=[255, 128, 0], - type='upper', - swap='left_elbow'), - 9: - dict( - name='left_wrist', - id=9, - color=[0, 255, 0], - type='upper', - swap='right_wrist'), - 10: - dict( - name='right_wrist', - id=10, - color=[255, 128, 0], - type='upper', - swap='left_wrist'), - 11: - dict( - name='left_hip', - id=11, - color=[0, 255, 0], - type='lower', - swap='right_hip'), - 12: - dict( - name='right_hip', - id=12, - color=[255, 128, 0], - type='lower', - swap='left_hip'), - 13: - dict( - name='left_knee', - id=13, - color=[0, 255, 0], - type='lower', - swap='right_knee'), - 14: - dict( - name='right_knee', - id=14, - color=[255, 128, 0], - type='lower', - swap='left_knee'), - 15: - dict( - name='left_ankle', - id=15, - color=[0, 255, 0], - type='lower', - swap='right_ankle'), - 16: - dict( - name='right_ankle', - id=16, - color=[255, 128, 0], - type='lower', - swap='left_ankle') - }, - skeleton_info={ - 0: - dict(link=('left_ankle', 'left_knee'), id=0, color=[0, 255, 0]), - 1: - dict(link=('left_knee', 'left_hip'), id=1, color=[0, 255, 0]), - 2: - dict(link=('right_ankle', 'right_knee'), id=2, color=[255, 128, 0]), - 3: - dict(link=('right_knee', 'right_hip'), id=3, color=[255, 128, 0]), - 4: - dict(link=('left_hip', 'right_hip'), id=4, color=[51, 153, 255]), - 5: - dict(link=('left_shoulder', 'left_hip'), id=5, color=[51, 153, 255]), - 6: - dict(link=('right_shoulder', 'right_hip'), id=6, color=[51, 153, 255]), - 7: - dict( - link=('left_shoulder', 'right_shoulder'), - id=7, - color=[51, 153, 255]), - 8: - dict(link=('left_shoulder', 'left_elbow'), id=8, color=[0, 255, 0]), - 9: - dict( - link=('right_shoulder', 'right_elbow'), id=9, color=[255, 128, 0]), - 10: - dict(link=('left_elbow', 'left_wrist'), id=10, color=[0, 255, 0]), - 11: - dict(link=('right_elbow', 'right_wrist'), id=11, color=[255, 128, 0]), - 12: - dict(link=('left_eye', 'right_eye'), id=12, color=[51, 153, 255]), - 13: - dict(link=('nose', 'left_eye'), id=13, color=[51, 153, 255]), - 14: - dict(link=('nose', 'right_eye'), id=14, color=[51, 153, 255]), - 15: - dict(link=('left_eye', 'left_ear'), id=15, color=[51, 153, 255]), - 16: - dict(link=('right_eye', 'right_ear'), id=16, color=[51, 153, 255]), - 17: - dict(link=('left_ear', 'left_shoulder'), id=17, color=[51, 153, 255]), - 18: - dict( - link=('right_ear', 'right_shoulder'), id=18, color=[51, 153, 255]) - }, - joint_weights=[ - 1., 1., 1., 1., 1., 1., 1., 1.2, 1.2, 1.5, 1.5, 1., 1., 1.2, 1.2, 1.5, - 1.5 - ], - sigmas=[ - 0.026, 0.025, 0.025, 0.035, 0.035, 0.079, 0.079, 0.072, 0.072, 0.062, - 0.062, 0.107, 0.107, 0.087, 0.087, 0.089, 0.089 - ]) +dataset_info = dict( + dataset_name='macaque', + paper_info=dict( + author='Labuguen, Rollyn and Matsumoto, Jumpei and ' + 'Negrete, Salvador and Nishimaru, Hiroshi and ' + 'Nishijo, Hisao and Takada, Masahiko and ' + 'Go, Yasuhiro and Inoue, Ken-ichi and Shibata, Tomohiro', + title='MacaquePose: A novel "in the wild" macaque monkey pose dataset ' + 'for markerless motion capture', + container='bioRxiv', + year='2020', + homepage='http://www.pri.kyoto-u.ac.jp/datasets/' + 'macaquepose/index.html', + ), + keypoint_info={ + 0: + dict(name='nose', id=0, color=[51, 153, 255], type='upper', swap=''), + 1: + dict( + name='left_eye', + id=1, + color=[51, 153, 255], + type='upper', + swap='right_eye'), + 2: + dict( + name='right_eye', + id=2, + color=[51, 153, 255], + type='upper', + swap='left_eye'), + 3: + dict( + name='left_ear', + id=3, + color=[51, 153, 255], + type='upper', + swap='right_ear'), + 4: + dict( + name='right_ear', + id=4, + color=[51, 153, 255], + type='upper', + swap='left_ear'), + 5: + dict( + name='left_shoulder', + id=5, + color=[0, 255, 0], + type='upper', + swap='right_shoulder'), + 6: + dict( + name='right_shoulder', + id=6, + color=[255, 128, 0], + type='upper', + swap='left_shoulder'), + 7: + dict( + name='left_elbow', + id=7, + color=[0, 255, 0], + type='upper', + swap='right_elbow'), + 8: + dict( + name='right_elbow', + id=8, + color=[255, 128, 0], + type='upper', + swap='left_elbow'), + 9: + dict( + name='left_wrist', + id=9, + color=[0, 255, 0], + type='upper', + swap='right_wrist'), + 10: + dict( + name='right_wrist', + id=10, + color=[255, 128, 0], + type='upper', + swap='left_wrist'), + 11: + dict( + name='left_hip', + id=11, + color=[0, 255, 0], + type='lower', + swap='right_hip'), + 12: + dict( + name='right_hip', + id=12, + color=[255, 128, 0], + type='lower', + swap='left_hip'), + 13: + dict( + name='left_knee', + id=13, + color=[0, 255, 0], + type='lower', + swap='right_knee'), + 14: + dict( + name='right_knee', + id=14, + color=[255, 128, 0], + type='lower', + swap='left_knee'), + 15: + dict( + name='left_ankle', + id=15, + color=[0, 255, 0], + type='lower', + swap='right_ankle'), + 16: + dict( + name='right_ankle', + id=16, + color=[255, 128, 0], + type='lower', + swap='left_ankle') + }, + skeleton_info={ + 0: + dict(link=('left_ankle', 'left_knee'), id=0, color=[0, 255, 0]), + 1: + dict(link=('left_knee', 'left_hip'), id=1, color=[0, 255, 0]), + 2: + dict(link=('right_ankle', 'right_knee'), id=2, color=[255, 128, 0]), + 3: + dict(link=('right_knee', 'right_hip'), id=3, color=[255, 128, 0]), + 4: + dict(link=('left_hip', 'right_hip'), id=4, color=[51, 153, 255]), + 5: + dict(link=('left_shoulder', 'left_hip'), id=5, color=[51, 153, 255]), + 6: + dict(link=('right_shoulder', 'right_hip'), id=6, color=[51, 153, 255]), + 7: + dict( + link=('left_shoulder', 'right_shoulder'), + id=7, + color=[51, 153, 255]), + 8: + dict(link=('left_shoulder', 'left_elbow'), id=8, color=[0, 255, 0]), + 9: + dict( + link=('right_shoulder', 'right_elbow'), id=9, color=[255, 128, 0]), + 10: + dict(link=('left_elbow', 'left_wrist'), id=10, color=[0, 255, 0]), + 11: + dict(link=('right_elbow', 'right_wrist'), id=11, color=[255, 128, 0]), + 12: + dict(link=('left_eye', 'right_eye'), id=12, color=[51, 153, 255]), + 13: + dict(link=('nose', 'left_eye'), id=13, color=[51, 153, 255]), + 14: + dict(link=('nose', 'right_eye'), id=14, color=[51, 153, 255]), + 15: + dict(link=('left_eye', 'left_ear'), id=15, color=[51, 153, 255]), + 16: + dict(link=('right_eye', 'right_ear'), id=16, color=[51, 153, 255]), + 17: + dict(link=('left_ear', 'left_shoulder'), id=17, color=[51, 153, 255]), + 18: + dict( + link=('right_ear', 'right_shoulder'), id=18, color=[51, 153, 255]) + }, + joint_weights=[ + 1., 1., 1., 1., 1., 1., 1., 1.2, 1.2, 1.5, 1.5, 1., 1., 1.2, 1.2, 1.5, + 1.5 + ], + sigmas=[ + 0.026, 0.025, 0.025, 0.035, 0.035, 0.079, 0.079, 0.072, 0.072, 0.062, + 0.062, 0.107, 0.107, 0.087, 0.087, 0.089, 0.089 + ]) diff --git a/configs/_base_/datasets/mhp.py b/configs/_base_/datasets/mhp.py index e16e37c79c..9c8c03c2f5 100644 --- a/configs/_base_/datasets/mhp.py +++ b/configs/_base_/datasets/mhp.py @@ -1,156 +1,156 @@ -dataset_info = dict( - dataset_name='mhp', - paper_info=dict( - author='Zhao, Jian and Li, Jianshu and Cheng, Yu and ' - 'Sim, Terence and Yan, Shuicheng and Feng, Jiashi', - title='Understanding humans in crowded scenes: ' - 'Deep nested adversarial learning and a ' - 'new benchmark for multi-human parsing', - container='Proceedings of the 26th ACM ' - 'international conference on Multimedia', - year='2018', - homepage='https://lv-mhp.github.io/dataset', - ), - keypoint_info={ - 0: - dict( - name='right_ankle', - id=0, - color=[255, 128, 0], - type='lower', - swap='left_ankle'), - 1: - dict( - name='right_knee', - id=1, - color=[255, 128, 0], - type='lower', - swap='left_knee'), - 2: - dict( - name='right_hip', - id=2, - color=[255, 128, 0], - type='lower', - swap='left_hip'), - 3: - dict( - name='left_hip', - id=3, - color=[0, 255, 0], - type='lower', - swap='right_hip'), - 4: - dict( - name='left_knee', - id=4, - color=[0, 255, 0], - type='lower', - swap='right_knee'), - 5: - dict( - name='left_ankle', - id=5, - color=[0, 255, 0], - type='lower', - swap='right_ankle'), - 6: - dict(name='pelvis', id=6, color=[51, 153, 255], type='lower', swap=''), - 7: - dict(name='thorax', id=7, color=[51, 153, 255], type='upper', swap=''), - 8: - dict( - name='upper_neck', - id=8, - color=[51, 153, 255], - type='upper', - swap=''), - 9: - dict( - name='head_top', id=9, color=[51, 153, 255], type='upper', - swap=''), - 10: - dict( - name='right_wrist', - id=10, - color=[255, 128, 0], - type='upper', - swap='left_wrist'), - 11: - dict( - name='right_elbow', - id=11, - color=[255, 128, 0], - type='upper', - swap='left_elbow'), - 12: - dict( - name='right_shoulder', - id=12, - color=[255, 128, 0], - type='upper', - swap='left_shoulder'), - 13: - dict( - name='left_shoulder', - id=13, - color=[0, 255, 0], - type='upper', - swap='right_shoulder'), - 14: - dict( - name='left_elbow', - id=14, - color=[0, 255, 0], - type='upper', - swap='right_elbow'), - 15: - dict( - name='left_wrist', - id=15, - color=[0, 255, 0], - type='upper', - swap='right_wrist') - }, - skeleton_info={ - 0: - dict(link=('right_ankle', 'right_knee'), id=0, color=[255, 128, 0]), - 1: - dict(link=('right_knee', 'right_hip'), id=1, color=[255, 128, 0]), - 2: - dict(link=('right_hip', 'pelvis'), id=2, color=[255, 128, 0]), - 3: - dict(link=('pelvis', 'left_hip'), id=3, color=[0, 255, 0]), - 4: - dict(link=('left_hip', 'left_knee'), id=4, color=[0, 255, 0]), - 5: - dict(link=('left_knee', 'left_ankle'), id=5, color=[0, 255, 0]), - 6: - dict(link=('pelvis', 'thorax'), id=6, color=[51, 153, 255]), - 7: - dict(link=('thorax', 'upper_neck'), id=7, color=[51, 153, 255]), - 8: - dict(link=('upper_neck', 'head_top'), id=8, color=[51, 153, 255]), - 9: - dict(link=('upper_neck', 'right_shoulder'), id=9, color=[255, 128, 0]), - 10: - dict( - link=('right_shoulder', 'right_elbow'), id=10, color=[255, 128, - 0]), - 11: - dict(link=('right_elbow', 'right_wrist'), id=11, color=[255, 128, 0]), - 12: - dict(link=('upper_neck', 'left_shoulder'), id=12, color=[0, 255, 0]), - 13: - dict(link=('left_shoulder', 'left_elbow'), id=13, color=[0, 255, 0]), - 14: - dict(link=('left_elbow', 'left_wrist'), id=14, color=[0, 255, 0]) - }, - joint_weights=[ - 1.5, 1.2, 1., 1., 1.2, 1.5, 1., 1., 1., 1., 1.5, 1.2, 1., 1., 1.2, 1.5 - ], - # Adapted from COCO dataset. - sigmas=[ - 0.089, 0.083, 0.107, 0.107, 0.083, 0.089, 0.026, 0.026, 0.026, 0.026, - 0.062, 0.072, 0.179, 0.179, 0.072, 0.062 - ]) +dataset_info = dict( + dataset_name='mhp', + paper_info=dict( + author='Zhao, Jian and Li, Jianshu and Cheng, Yu and ' + 'Sim, Terence and Yan, Shuicheng and Feng, Jiashi', + title='Understanding humans in crowded scenes: ' + 'Deep nested adversarial learning and a ' + 'new benchmark for multi-human parsing', + container='Proceedings of the 26th ACM ' + 'international conference on Multimedia', + year='2018', + homepage='https://lv-mhp.github.io/dataset', + ), + keypoint_info={ + 0: + dict( + name='right_ankle', + id=0, + color=[255, 128, 0], + type='lower', + swap='left_ankle'), + 1: + dict( + name='right_knee', + id=1, + color=[255, 128, 0], + type='lower', + swap='left_knee'), + 2: + dict( + name='right_hip', + id=2, + color=[255, 128, 0], + type='lower', + swap='left_hip'), + 3: + dict( + name='left_hip', + id=3, + color=[0, 255, 0], + type='lower', + swap='right_hip'), + 4: + dict( + name='left_knee', + id=4, + color=[0, 255, 0], + type='lower', + swap='right_knee'), + 5: + dict( + name='left_ankle', + id=5, + color=[0, 255, 0], + type='lower', + swap='right_ankle'), + 6: + dict(name='pelvis', id=6, color=[51, 153, 255], type='lower', swap=''), + 7: + dict(name='thorax', id=7, color=[51, 153, 255], type='upper', swap=''), + 8: + dict( + name='upper_neck', + id=8, + color=[51, 153, 255], + type='upper', + swap=''), + 9: + dict( + name='head_top', id=9, color=[51, 153, 255], type='upper', + swap=''), + 10: + dict( + name='right_wrist', + id=10, + color=[255, 128, 0], + type='upper', + swap='left_wrist'), + 11: + dict( + name='right_elbow', + id=11, + color=[255, 128, 0], + type='upper', + swap='left_elbow'), + 12: + dict( + name='right_shoulder', + id=12, + color=[255, 128, 0], + type='upper', + swap='left_shoulder'), + 13: + dict( + name='left_shoulder', + id=13, + color=[0, 255, 0], + type='upper', + swap='right_shoulder'), + 14: + dict( + name='left_elbow', + id=14, + color=[0, 255, 0], + type='upper', + swap='right_elbow'), + 15: + dict( + name='left_wrist', + id=15, + color=[0, 255, 0], + type='upper', + swap='right_wrist') + }, + skeleton_info={ + 0: + dict(link=('right_ankle', 'right_knee'), id=0, color=[255, 128, 0]), + 1: + dict(link=('right_knee', 'right_hip'), id=1, color=[255, 128, 0]), + 2: + dict(link=('right_hip', 'pelvis'), id=2, color=[255, 128, 0]), + 3: + dict(link=('pelvis', 'left_hip'), id=3, color=[0, 255, 0]), + 4: + dict(link=('left_hip', 'left_knee'), id=4, color=[0, 255, 0]), + 5: + dict(link=('left_knee', 'left_ankle'), id=5, color=[0, 255, 0]), + 6: + dict(link=('pelvis', 'thorax'), id=6, color=[51, 153, 255]), + 7: + dict(link=('thorax', 'upper_neck'), id=7, color=[51, 153, 255]), + 8: + dict(link=('upper_neck', 'head_top'), id=8, color=[51, 153, 255]), + 9: + dict(link=('upper_neck', 'right_shoulder'), id=9, color=[255, 128, 0]), + 10: + dict( + link=('right_shoulder', 'right_elbow'), id=10, color=[255, 128, + 0]), + 11: + dict(link=('right_elbow', 'right_wrist'), id=11, color=[255, 128, 0]), + 12: + dict(link=('upper_neck', 'left_shoulder'), id=12, color=[0, 255, 0]), + 13: + dict(link=('left_shoulder', 'left_elbow'), id=13, color=[0, 255, 0]), + 14: + dict(link=('left_elbow', 'left_wrist'), id=14, color=[0, 255, 0]) + }, + joint_weights=[ + 1.5, 1.2, 1., 1., 1.2, 1.5, 1., 1., 1., 1., 1.5, 1.2, 1., 1., 1.2, 1.5 + ], + # Adapted from COCO dataset. + sigmas=[ + 0.089, 0.083, 0.107, 0.107, 0.083, 0.089, 0.026, 0.026, 0.026, 0.026, + 0.062, 0.072, 0.179, 0.179, 0.072, 0.062 + ]) diff --git a/configs/_base_/datasets/mpi_inf_3dhp.py b/configs/_base_/datasets/mpi_inf_3dhp.py index ffd0a70297..ed088c2df0 100644 --- a/configs/_base_/datasets/mpi_inf_3dhp.py +++ b/configs/_base_/datasets/mpi_inf_3dhp.py @@ -1,132 +1,132 @@ -dataset_info = dict( - dataset_name='mpi_inf_3dhp', - paper_info=dict( - author='ehta, Dushyant and Rhodin, Helge and Casas, Dan and ' - 'Fua, Pascal and Sotnychenko, Oleksandr and Xu, Weipeng and ' - 'Theobalt, Christian', - title='Monocular 3D Human Pose Estimation In The Wild Using Improved ' - 'CNN Supervision', - container='2017 international conference on 3D vision (3DV)', - year='2017', - homepage='http://gvv.mpi-inf.mpg.de/3dhp-dataset', - ), - keypoint_info={ - 0: - dict( - name='head_top', id=0, color=[51, 153, 255], type='upper', - swap=''), - 1: - dict(name='neck', id=1, color=[51, 153, 255], type='upper', swap=''), - 2: - dict( - name='right_shoulder', - id=2, - color=[255, 128, 0], - type='upper', - swap='left_shoulder'), - 3: - dict( - name='right_elbow', - id=3, - color=[255, 128, 0], - type='upper', - swap='left_elbow'), - 4: - dict( - name='right_wrist', - id=4, - color=[255, 128, 0], - type='upper', - swap='left_wrist'), - 5: - dict( - name='left_shoulder', - id=5, - color=[0, 255, 0], - type='upper', - swap='right_shoulder'), - 6: - dict( - name='left_elbow', - id=6, - color=[0, 255, 0], - type='upper', - swap='right_elbow'), - 7: - dict( - name='left_wrist', - id=7, - color=[0, 255, 0], - type='upper', - swap='right_wrist'), - 8: - dict( - name='right_hip', - id=8, - color=[255, 128, 0], - type='lower', - swap='left_hip'), - 9: - dict( - name='right_knee', - id=9, - color=[255, 128, 0], - type='lower', - swap='left_knee'), - 10: - dict( - name='right_ankle', - id=10, - color=[255, 128, 0], - type='lower', - swap='left_ankle'), - 11: - dict( - name='left_hip', - id=11, - color=[0, 255, 0], - type='lower', - swap='right_hip'), - 12: - dict( - name='left_knee', - id=12, - color=[0, 255, 0], - type='lower', - swap='right_knee'), - 13: - dict( - name='left_ankle', - id=13, - color=[0, 255, 0], - type='lower', - swap='right_ankle'), - 14: - dict(name='root', id=14, color=[51, 153, 255], type='lower', swap=''), - 15: - dict(name='spine', id=15, color=[51, 153, 255], type='upper', swap=''), - 16: - dict(name='head', id=16, color=[51, 153, 255], type='upper', swap='') - }, - skeleton_info={ - 0: dict(link=('neck', 'right_shoulder'), id=0, color=[255, 128, 0]), - 1: dict( - link=('right_shoulder', 'right_elbow'), id=1, color=[255, 128, 0]), - 2: - dict(link=('right_elbow', 'right_wrist'), id=2, color=[255, 128, 0]), - 3: dict(link=('neck', 'left_shoulder'), id=3, color=[0, 255, 0]), - 4: dict(link=('left_shoulder', 'left_elbow'), id=4, color=[0, 255, 0]), - 5: dict(link=('left_elbow', 'left_wrist'), id=5, color=[0, 255, 0]), - 6: dict(link=('root', 'right_hip'), id=6, color=[255, 128, 0]), - 7: dict(link=('right_hip', 'right_knee'), id=7, color=[255, 128, 0]), - 8: dict(link=('right_knee', 'right_ankle'), id=8, color=[255, 128, 0]), - 9: dict(link=('root', 'left_hip'), id=9, color=[0, 255, 0]), - 10: dict(link=('left_hip', 'left_knee'), id=10, color=[0, 255, 0]), - 11: dict(link=('left_knee', 'left_ankle'), id=11, color=[0, 255, 0]), - 12: dict(link=('head_top', 'head'), id=12, color=[51, 153, 255]), - 13: dict(link=('head', 'neck'), id=13, color=[51, 153, 255]), - 14: dict(link=('neck', 'spine'), id=14, color=[51, 153, 255]), - 15: dict(link=('spine', 'root'), id=15, color=[51, 153, 255]) - }, - joint_weights=[1.] * 17, - sigmas=[]) +dataset_info = dict( + dataset_name='mpi_inf_3dhp', + paper_info=dict( + author='ehta, Dushyant and Rhodin, Helge and Casas, Dan and ' + 'Fua, Pascal and Sotnychenko, Oleksandr and Xu, Weipeng and ' + 'Theobalt, Christian', + title='Monocular 3D Human Pose Estimation In The Wild Using Improved ' + 'CNN Supervision', + container='2017 international conference on 3D vision (3DV)', + year='2017', + homepage='http://gvv.mpi-inf.mpg.de/3dhp-dataset', + ), + keypoint_info={ + 0: + dict( + name='head_top', id=0, color=[51, 153, 255], type='upper', + swap=''), + 1: + dict(name='neck', id=1, color=[51, 153, 255], type='upper', swap=''), + 2: + dict( + name='right_shoulder', + id=2, + color=[255, 128, 0], + type='upper', + swap='left_shoulder'), + 3: + dict( + name='right_elbow', + id=3, + color=[255, 128, 0], + type='upper', + swap='left_elbow'), + 4: + dict( + name='right_wrist', + id=4, + color=[255, 128, 0], + type='upper', + swap='left_wrist'), + 5: + dict( + name='left_shoulder', + id=5, + color=[0, 255, 0], + type='upper', + swap='right_shoulder'), + 6: + dict( + name='left_elbow', + id=6, + color=[0, 255, 0], + type='upper', + swap='right_elbow'), + 7: + dict( + name='left_wrist', + id=7, + color=[0, 255, 0], + type='upper', + swap='right_wrist'), + 8: + dict( + name='right_hip', + id=8, + color=[255, 128, 0], + type='lower', + swap='left_hip'), + 9: + dict( + name='right_knee', + id=9, + color=[255, 128, 0], + type='lower', + swap='left_knee'), + 10: + dict( + name='right_ankle', + id=10, + color=[255, 128, 0], + type='lower', + swap='left_ankle'), + 11: + dict( + name='left_hip', + id=11, + color=[0, 255, 0], + type='lower', + swap='right_hip'), + 12: + dict( + name='left_knee', + id=12, + color=[0, 255, 0], + type='lower', + swap='right_knee'), + 13: + dict( + name='left_ankle', + id=13, + color=[0, 255, 0], + type='lower', + swap='right_ankle'), + 14: + dict(name='root', id=14, color=[51, 153, 255], type='lower', swap=''), + 15: + dict(name='spine', id=15, color=[51, 153, 255], type='upper', swap=''), + 16: + dict(name='head', id=16, color=[51, 153, 255], type='upper', swap='') + }, + skeleton_info={ + 0: dict(link=('neck', 'right_shoulder'), id=0, color=[255, 128, 0]), + 1: dict( + link=('right_shoulder', 'right_elbow'), id=1, color=[255, 128, 0]), + 2: + dict(link=('right_elbow', 'right_wrist'), id=2, color=[255, 128, 0]), + 3: dict(link=('neck', 'left_shoulder'), id=3, color=[0, 255, 0]), + 4: dict(link=('left_shoulder', 'left_elbow'), id=4, color=[0, 255, 0]), + 5: dict(link=('left_elbow', 'left_wrist'), id=5, color=[0, 255, 0]), + 6: dict(link=('root', 'right_hip'), id=6, color=[255, 128, 0]), + 7: dict(link=('right_hip', 'right_knee'), id=7, color=[255, 128, 0]), + 8: dict(link=('right_knee', 'right_ankle'), id=8, color=[255, 128, 0]), + 9: dict(link=('root', 'left_hip'), id=9, color=[0, 255, 0]), + 10: dict(link=('left_hip', 'left_knee'), id=10, color=[0, 255, 0]), + 11: dict(link=('left_knee', 'left_ankle'), id=11, color=[0, 255, 0]), + 12: dict(link=('head_top', 'head'), id=12, color=[51, 153, 255]), + 13: dict(link=('head', 'neck'), id=13, color=[51, 153, 255]), + 14: dict(link=('neck', 'spine'), id=14, color=[51, 153, 255]), + 15: dict(link=('spine', 'root'), id=15, color=[51, 153, 255]) + }, + joint_weights=[1.] * 17, + sigmas=[]) diff --git a/configs/_base_/datasets/mpii.py b/configs/_base_/datasets/mpii.py index 6c2a491c7b..2723baead1 100644 --- a/configs/_base_/datasets/mpii.py +++ b/configs/_base_/datasets/mpii.py @@ -1,155 +1,155 @@ -dataset_info = dict( - dataset_name='mpii', - paper_info=dict( - author='Mykhaylo Andriluka and Leonid Pishchulin and ' - 'Peter Gehler and Schiele, Bernt', - title='2D Human Pose Estimation: New Benchmark and ' - 'State of the Art Analysis', - container='IEEE Conference on Computer Vision and ' - 'Pattern Recognition (CVPR)', - year='2014', - homepage='http://human-pose.mpi-inf.mpg.de/', - ), - keypoint_info={ - 0: - dict( - name='right_ankle', - id=0, - color=[255, 128, 0], - type='lower', - swap='left_ankle'), - 1: - dict( - name='right_knee', - id=1, - color=[255, 128, 0], - type='lower', - swap='left_knee'), - 2: - dict( - name='right_hip', - id=2, - color=[255, 128, 0], - type='lower', - swap='left_hip'), - 3: - dict( - name='left_hip', - id=3, - color=[0, 255, 0], - type='lower', - swap='right_hip'), - 4: - dict( - name='left_knee', - id=4, - color=[0, 255, 0], - type='lower', - swap='right_knee'), - 5: - dict( - name='left_ankle', - id=5, - color=[0, 255, 0], - type='lower', - swap='right_ankle'), - 6: - dict(name='pelvis', id=6, color=[51, 153, 255], type='lower', swap=''), - 7: - dict(name='thorax', id=7, color=[51, 153, 255], type='upper', swap=''), - 8: - dict( - name='upper_neck', - id=8, - color=[51, 153, 255], - type='upper', - swap=''), - 9: - dict( - name='head_top', id=9, color=[51, 153, 255], type='upper', - swap=''), - 10: - dict( - name='right_wrist', - id=10, - color=[255, 128, 0], - type='upper', - swap='left_wrist'), - 11: - dict( - name='right_elbow', - id=11, - color=[255, 128, 0], - type='upper', - swap='left_elbow'), - 12: - dict( - name='right_shoulder', - id=12, - color=[255, 128, 0], - type='upper', - swap='left_shoulder'), - 13: - dict( - name='left_shoulder', - id=13, - color=[0, 255, 0], - type='upper', - swap='right_shoulder'), - 14: - dict( - name='left_elbow', - id=14, - color=[0, 255, 0], - type='upper', - swap='right_elbow'), - 15: - dict( - name='left_wrist', - id=15, - color=[0, 255, 0], - type='upper', - swap='right_wrist') - }, - skeleton_info={ - 0: - dict(link=('right_ankle', 'right_knee'), id=0, color=[255, 128, 0]), - 1: - dict(link=('right_knee', 'right_hip'), id=1, color=[255, 128, 0]), - 2: - dict(link=('right_hip', 'pelvis'), id=2, color=[255, 128, 0]), - 3: - dict(link=('pelvis', 'left_hip'), id=3, color=[0, 255, 0]), - 4: - dict(link=('left_hip', 'left_knee'), id=4, color=[0, 255, 0]), - 5: - dict(link=('left_knee', 'left_ankle'), id=5, color=[0, 255, 0]), - 6: - dict(link=('pelvis', 'thorax'), id=6, color=[51, 153, 255]), - 7: - dict(link=('thorax', 'upper_neck'), id=7, color=[51, 153, 255]), - 8: - dict(link=('upper_neck', 'head_top'), id=8, color=[51, 153, 255]), - 9: - dict(link=('upper_neck', 'right_shoulder'), id=9, color=[255, 128, 0]), - 10: - dict( - link=('right_shoulder', 'right_elbow'), id=10, color=[255, 128, - 0]), - 11: - dict(link=('right_elbow', 'right_wrist'), id=11, color=[255, 128, 0]), - 12: - dict(link=('upper_neck', 'left_shoulder'), id=12, color=[0, 255, 0]), - 13: - dict(link=('left_shoulder', 'left_elbow'), id=13, color=[0, 255, 0]), - 14: - dict(link=('left_elbow', 'left_wrist'), id=14, color=[0, 255, 0]) - }, - joint_weights=[ - 1.5, 1.2, 1., 1., 1.2, 1.5, 1., 1., 1., 1., 1.5, 1.2, 1., 1., 1.2, 1.5 - ], - # Adapted from COCO dataset. - sigmas=[ - 0.089, 0.083, 0.107, 0.107, 0.083, 0.089, 0.026, 0.026, 0.026, 0.026, - 0.062, 0.072, 0.179, 0.179, 0.072, 0.062 - ]) +dataset_info = dict( + dataset_name='mpii', + paper_info=dict( + author='Mykhaylo Andriluka and Leonid Pishchulin and ' + 'Peter Gehler and Schiele, Bernt', + title='2D Human Pose Estimation: New Benchmark and ' + 'State of the Art Analysis', + container='IEEE Conference on Computer Vision and ' + 'Pattern Recognition (CVPR)', + year='2014', + homepage='http://human-pose.mpi-inf.mpg.de/', + ), + keypoint_info={ + 0: + dict( + name='right_ankle', + id=0, + color=[255, 128, 0], + type='lower', + swap='left_ankle'), + 1: + dict( + name='right_knee', + id=1, + color=[255, 128, 0], + type='lower', + swap='left_knee'), + 2: + dict( + name='right_hip', + id=2, + color=[255, 128, 0], + type='lower', + swap='left_hip'), + 3: + dict( + name='left_hip', + id=3, + color=[0, 255, 0], + type='lower', + swap='right_hip'), + 4: + dict( + name='left_knee', + id=4, + color=[0, 255, 0], + type='lower', + swap='right_knee'), + 5: + dict( + name='left_ankle', + id=5, + color=[0, 255, 0], + type='lower', + swap='right_ankle'), + 6: + dict(name='pelvis', id=6, color=[51, 153, 255], type='lower', swap=''), + 7: + dict(name='thorax', id=7, color=[51, 153, 255], type='upper', swap=''), + 8: + dict( + name='upper_neck', + id=8, + color=[51, 153, 255], + type='upper', + swap=''), + 9: + dict( + name='head_top', id=9, color=[51, 153, 255], type='upper', + swap=''), + 10: + dict( + name='right_wrist', + id=10, + color=[255, 128, 0], + type='upper', + swap='left_wrist'), + 11: + dict( + name='right_elbow', + id=11, + color=[255, 128, 0], + type='upper', + swap='left_elbow'), + 12: + dict( + name='right_shoulder', + id=12, + color=[255, 128, 0], + type='upper', + swap='left_shoulder'), + 13: + dict( + name='left_shoulder', + id=13, + color=[0, 255, 0], + type='upper', + swap='right_shoulder'), + 14: + dict( + name='left_elbow', + id=14, + color=[0, 255, 0], + type='upper', + swap='right_elbow'), + 15: + dict( + name='left_wrist', + id=15, + color=[0, 255, 0], + type='upper', + swap='right_wrist') + }, + skeleton_info={ + 0: + dict(link=('right_ankle', 'right_knee'), id=0, color=[255, 128, 0]), + 1: + dict(link=('right_knee', 'right_hip'), id=1, color=[255, 128, 0]), + 2: + dict(link=('right_hip', 'pelvis'), id=2, color=[255, 128, 0]), + 3: + dict(link=('pelvis', 'left_hip'), id=3, color=[0, 255, 0]), + 4: + dict(link=('left_hip', 'left_knee'), id=4, color=[0, 255, 0]), + 5: + dict(link=('left_knee', 'left_ankle'), id=5, color=[0, 255, 0]), + 6: + dict(link=('pelvis', 'thorax'), id=6, color=[51, 153, 255]), + 7: + dict(link=('thorax', 'upper_neck'), id=7, color=[51, 153, 255]), + 8: + dict(link=('upper_neck', 'head_top'), id=8, color=[51, 153, 255]), + 9: + dict(link=('upper_neck', 'right_shoulder'), id=9, color=[255, 128, 0]), + 10: + dict( + link=('right_shoulder', 'right_elbow'), id=10, color=[255, 128, + 0]), + 11: + dict(link=('right_elbow', 'right_wrist'), id=11, color=[255, 128, 0]), + 12: + dict(link=('upper_neck', 'left_shoulder'), id=12, color=[0, 255, 0]), + 13: + dict(link=('left_shoulder', 'left_elbow'), id=13, color=[0, 255, 0]), + 14: + dict(link=('left_elbow', 'left_wrist'), id=14, color=[0, 255, 0]) + }, + joint_weights=[ + 1.5, 1.2, 1., 1., 1.2, 1.5, 1., 1., 1., 1., 1.5, 1.2, 1., 1., 1.2, 1.5 + ], + # Adapted from COCO dataset. + sigmas=[ + 0.089, 0.083, 0.107, 0.107, 0.083, 0.089, 0.026, 0.026, 0.026, 0.026, + 0.062, 0.072, 0.179, 0.179, 0.072, 0.062 + ]) diff --git a/configs/_base_/datasets/mpii_trb.py b/configs/_base_/datasets/mpii_trb.py index 73940d4b48..ddb7e9e53a 100644 --- a/configs/_base_/datasets/mpii_trb.py +++ b/configs/_base_/datasets/mpii_trb.py @@ -1,380 +1,380 @@ -dataset_info = dict( - dataset_name='mpii_trb', - paper_info=dict( - author='Duan, Haodong and Lin, Kwan-Yee and Jin, Sheng and ' - 'Liu, Wentao and Qian, Chen and Ouyang, Wanli', - title='TRB: A Novel Triplet Representation for ' - 'Understanding 2D Human Body', - container='Proceedings of the IEEE International ' - 'Conference on Computer Vision', - year='2019', - homepage='https://github.com/kennymckormick/' - 'Triplet-Representation-of-human-Body', - ), - keypoint_info={ - 0: - dict( - name='left_shoulder', - id=0, - color=[0, 255, 0], - type='upper', - swap='right_shoulder'), - 1: - dict( - name='right_shoulder', - id=1, - color=[255, 128, 0], - type='upper', - swap='left_shoulder'), - 2: - dict( - name='left_elbow', - id=2, - color=[0, 255, 0], - type='upper', - swap='right_elbow'), - 3: - dict( - name='right_elbow', - id=3, - color=[255, 128, 0], - type='upper', - swap='left_elbow'), - 4: - dict( - name='left_wrist', - id=4, - color=[0, 255, 0], - type='upper', - swap='right_wrist'), - 5: - dict( - name='right_wrist', - id=5, - color=[255, 128, 0], - type='upper', - swap='left_wrist'), - 6: - dict( - name='left_hip', - id=6, - color=[0, 255, 0], - type='lower', - swap='right_hip'), - 7: - dict( - name='right_hip', - id=7, - color=[255, 128, 0], - type='lower', - swap='left_hip'), - 8: - dict( - name='left_knee', - id=8, - color=[0, 255, 0], - type='lower', - swap='right_knee'), - 9: - dict( - name='right_knee', - id=9, - color=[255, 128, 0], - type='lower', - swap='left_knee'), - 10: - dict( - name='left_ankle', - id=10, - color=[0, 255, 0], - type='lower', - swap='right_ankle'), - 11: - dict( - name='right_ankle', - id=11, - color=[255, 128, 0], - type='lower', - swap='left_ankle'), - 12: - dict(name='head', id=12, color=[51, 153, 255], type='upper', swap=''), - 13: - dict(name='neck', id=13, color=[51, 153, 255], type='upper', swap=''), - 14: - dict( - name='right_neck', - id=14, - color=[255, 255, 255], - type='upper', - swap='left_neck'), - 15: - dict( - name='left_neck', - id=15, - color=[255, 255, 255], - type='upper', - swap='right_neck'), - 16: - dict( - name='medial_right_shoulder', - id=16, - color=[255, 255, 255], - type='upper', - swap='medial_left_shoulder'), - 17: - dict( - name='lateral_right_shoulder', - id=17, - color=[255, 255, 255], - type='upper', - swap='lateral_left_shoulder'), - 18: - dict( - name='medial_right_bow', - id=18, - color=[255, 255, 255], - type='upper', - swap='medial_left_bow'), - 19: - dict( - name='lateral_right_bow', - id=19, - color=[255, 255, 255], - type='upper', - swap='lateral_left_bow'), - 20: - dict( - name='medial_right_wrist', - id=20, - color=[255, 255, 255], - type='upper', - swap='medial_left_wrist'), - 21: - dict( - name='lateral_right_wrist', - id=21, - color=[255, 255, 255], - type='upper', - swap='lateral_left_wrist'), - 22: - dict( - name='medial_left_shoulder', - id=22, - color=[255, 255, 255], - type='upper', - swap='medial_right_shoulder'), - 23: - dict( - name='lateral_left_shoulder', - id=23, - color=[255, 255, 255], - type='upper', - swap='lateral_right_shoulder'), - 24: - dict( - name='medial_left_bow', - id=24, - color=[255, 255, 255], - type='upper', - swap='medial_right_bow'), - 25: - dict( - name='lateral_left_bow', - id=25, - color=[255, 255, 255], - type='upper', - swap='lateral_right_bow'), - 26: - dict( - name='medial_left_wrist', - id=26, - color=[255, 255, 255], - type='upper', - swap='medial_right_wrist'), - 27: - dict( - name='lateral_left_wrist', - id=27, - color=[255, 255, 255], - type='upper', - swap='lateral_right_wrist'), - 28: - dict( - name='medial_right_hip', - id=28, - color=[255, 255, 255], - type='lower', - swap='medial_left_hip'), - 29: - dict( - name='lateral_right_hip', - id=29, - color=[255, 255, 255], - type='lower', - swap='lateral_left_hip'), - 30: - dict( - name='medial_right_knee', - id=30, - color=[255, 255, 255], - type='lower', - swap='medial_left_knee'), - 31: - dict( - name='lateral_right_knee', - id=31, - color=[255, 255, 255], - type='lower', - swap='lateral_left_knee'), - 32: - dict( - name='medial_right_ankle', - id=32, - color=[255, 255, 255], - type='lower', - swap='medial_left_ankle'), - 33: - dict( - name='lateral_right_ankle', - id=33, - color=[255, 255, 255], - type='lower', - swap='lateral_left_ankle'), - 34: - dict( - name='medial_left_hip', - id=34, - color=[255, 255, 255], - type='lower', - swap='medial_right_hip'), - 35: - dict( - name='lateral_left_hip', - id=35, - color=[255, 255, 255], - type='lower', - swap='lateral_right_hip'), - 36: - dict( - name='medial_left_knee', - id=36, - color=[255, 255, 255], - type='lower', - swap='medial_right_knee'), - 37: - dict( - name='lateral_left_knee', - id=37, - color=[255, 255, 255], - type='lower', - swap='lateral_right_knee'), - 38: - dict( - name='medial_left_ankle', - id=38, - color=[255, 255, 255], - type='lower', - swap='medial_right_ankle'), - 39: - dict( - name='lateral_left_ankle', - id=39, - color=[255, 255, 255], - type='lower', - swap='lateral_right_ankle'), - }, - skeleton_info={ - 0: - dict(link=('head', 'neck'), id=0, color=[51, 153, 255]), - 1: - dict(link=('neck', 'left_shoulder'), id=1, color=[51, 153, 255]), - 2: - dict(link=('neck', 'right_shoulder'), id=2, color=[51, 153, 255]), - 3: - dict(link=('left_shoulder', 'left_elbow'), id=3, color=[0, 255, 0]), - 4: - dict( - link=('right_shoulder', 'right_elbow'), id=4, color=[255, 128, 0]), - 5: - dict(link=('left_elbow', 'left_wrist'), id=5, color=[0, 255, 0]), - 6: - dict(link=('right_elbow', 'right_wrist'), id=6, color=[255, 128, 0]), - 7: - dict(link=('left_shoulder', 'left_hip'), id=7, color=[51, 153, 255]), - 8: - dict(link=('right_shoulder', 'right_hip'), id=8, color=[51, 153, 255]), - 9: - dict(link=('left_hip', 'right_hip'), id=9, color=[51, 153, 255]), - 10: - dict(link=('left_hip', 'left_knee'), id=10, color=[0, 255, 0]), - 11: - dict(link=('right_hip', 'right_knee'), id=11, color=[255, 128, 0]), - 12: - dict(link=('left_knee', 'left_ankle'), id=12, color=[0, 255, 0]), - 13: - dict(link=('right_knee', 'right_ankle'), id=13, color=[255, 128, 0]), - 14: - dict(link=('right_neck', 'left_neck'), id=14, color=[255, 255, 255]), - 15: - dict( - link=('medial_right_shoulder', 'lateral_right_shoulder'), - id=15, - color=[255, 255, 255]), - 16: - dict( - link=('medial_right_bow', 'lateral_right_bow'), - id=16, - color=[255, 255, 255]), - 17: - dict( - link=('medial_right_wrist', 'lateral_right_wrist'), - id=17, - color=[255, 255, 255]), - 18: - dict( - link=('medial_left_shoulder', 'lateral_left_shoulder'), - id=18, - color=[255, 255, 255]), - 19: - dict( - link=('medial_left_bow', 'lateral_left_bow'), - id=19, - color=[255, 255, 255]), - 20: - dict( - link=('medial_left_wrist', 'lateral_left_wrist'), - id=20, - color=[255, 255, 255]), - 21: - dict( - link=('medial_right_hip', 'lateral_right_hip'), - id=21, - color=[255, 255, 255]), - 22: - dict( - link=('medial_right_knee', 'lateral_right_knee'), - id=22, - color=[255, 255, 255]), - 23: - dict( - link=('medial_right_ankle', 'lateral_right_ankle'), - id=23, - color=[255, 255, 255]), - 24: - dict( - link=('medial_left_hip', 'lateral_left_hip'), - id=24, - color=[255, 255, 255]), - 25: - dict( - link=('medial_left_knee', 'lateral_left_knee'), - id=25, - color=[255, 255, 255]), - 26: - dict( - link=('medial_left_ankle', 'lateral_left_ankle'), - id=26, - color=[255, 255, 255]) - }, - joint_weights=[1.] * 40, - sigmas=[]) +dataset_info = dict( + dataset_name='mpii_trb', + paper_info=dict( + author='Duan, Haodong and Lin, Kwan-Yee and Jin, Sheng and ' + 'Liu, Wentao and Qian, Chen and Ouyang, Wanli', + title='TRB: A Novel Triplet Representation for ' + 'Understanding 2D Human Body', + container='Proceedings of the IEEE International ' + 'Conference on Computer Vision', + year='2019', + homepage='https://github.com/kennymckormick/' + 'Triplet-Representation-of-human-Body', + ), + keypoint_info={ + 0: + dict( + name='left_shoulder', + id=0, + color=[0, 255, 0], + type='upper', + swap='right_shoulder'), + 1: + dict( + name='right_shoulder', + id=1, + color=[255, 128, 0], + type='upper', + swap='left_shoulder'), + 2: + dict( + name='left_elbow', + id=2, + color=[0, 255, 0], + type='upper', + swap='right_elbow'), + 3: + dict( + name='right_elbow', + id=3, + color=[255, 128, 0], + type='upper', + swap='left_elbow'), + 4: + dict( + name='left_wrist', + id=4, + color=[0, 255, 0], + type='upper', + swap='right_wrist'), + 5: + dict( + name='right_wrist', + id=5, + color=[255, 128, 0], + type='upper', + swap='left_wrist'), + 6: + dict( + name='left_hip', + id=6, + color=[0, 255, 0], + type='lower', + swap='right_hip'), + 7: + dict( + name='right_hip', + id=7, + color=[255, 128, 0], + type='lower', + swap='left_hip'), + 8: + dict( + name='left_knee', + id=8, + color=[0, 255, 0], + type='lower', + swap='right_knee'), + 9: + dict( + name='right_knee', + id=9, + color=[255, 128, 0], + type='lower', + swap='left_knee'), + 10: + dict( + name='left_ankle', + id=10, + color=[0, 255, 0], + type='lower', + swap='right_ankle'), + 11: + dict( + name='right_ankle', + id=11, + color=[255, 128, 0], + type='lower', + swap='left_ankle'), + 12: + dict(name='head', id=12, color=[51, 153, 255], type='upper', swap=''), + 13: + dict(name='neck', id=13, color=[51, 153, 255], type='upper', swap=''), + 14: + dict( + name='right_neck', + id=14, + color=[255, 255, 255], + type='upper', + swap='left_neck'), + 15: + dict( + name='left_neck', + id=15, + color=[255, 255, 255], + type='upper', + swap='right_neck'), + 16: + dict( + name='medial_right_shoulder', + id=16, + color=[255, 255, 255], + type='upper', + swap='medial_left_shoulder'), + 17: + dict( + name='lateral_right_shoulder', + id=17, + color=[255, 255, 255], + type='upper', + swap='lateral_left_shoulder'), + 18: + dict( + name='medial_right_bow', + id=18, + color=[255, 255, 255], + type='upper', + swap='medial_left_bow'), + 19: + dict( + name='lateral_right_bow', + id=19, + color=[255, 255, 255], + type='upper', + swap='lateral_left_bow'), + 20: + dict( + name='medial_right_wrist', + id=20, + color=[255, 255, 255], + type='upper', + swap='medial_left_wrist'), + 21: + dict( + name='lateral_right_wrist', + id=21, + color=[255, 255, 255], + type='upper', + swap='lateral_left_wrist'), + 22: + dict( + name='medial_left_shoulder', + id=22, + color=[255, 255, 255], + type='upper', + swap='medial_right_shoulder'), + 23: + dict( + name='lateral_left_shoulder', + id=23, + color=[255, 255, 255], + type='upper', + swap='lateral_right_shoulder'), + 24: + dict( + name='medial_left_bow', + id=24, + color=[255, 255, 255], + type='upper', + swap='medial_right_bow'), + 25: + dict( + name='lateral_left_bow', + id=25, + color=[255, 255, 255], + type='upper', + swap='lateral_right_bow'), + 26: + dict( + name='medial_left_wrist', + id=26, + color=[255, 255, 255], + type='upper', + swap='medial_right_wrist'), + 27: + dict( + name='lateral_left_wrist', + id=27, + color=[255, 255, 255], + type='upper', + swap='lateral_right_wrist'), + 28: + dict( + name='medial_right_hip', + id=28, + color=[255, 255, 255], + type='lower', + swap='medial_left_hip'), + 29: + dict( + name='lateral_right_hip', + id=29, + color=[255, 255, 255], + type='lower', + swap='lateral_left_hip'), + 30: + dict( + name='medial_right_knee', + id=30, + color=[255, 255, 255], + type='lower', + swap='medial_left_knee'), + 31: + dict( + name='lateral_right_knee', + id=31, + color=[255, 255, 255], + type='lower', + swap='lateral_left_knee'), + 32: + dict( + name='medial_right_ankle', + id=32, + color=[255, 255, 255], + type='lower', + swap='medial_left_ankle'), + 33: + dict( + name='lateral_right_ankle', + id=33, + color=[255, 255, 255], + type='lower', + swap='lateral_left_ankle'), + 34: + dict( + name='medial_left_hip', + id=34, + color=[255, 255, 255], + type='lower', + swap='medial_right_hip'), + 35: + dict( + name='lateral_left_hip', + id=35, + color=[255, 255, 255], + type='lower', + swap='lateral_right_hip'), + 36: + dict( + name='medial_left_knee', + id=36, + color=[255, 255, 255], + type='lower', + swap='medial_right_knee'), + 37: + dict( + name='lateral_left_knee', + id=37, + color=[255, 255, 255], + type='lower', + swap='lateral_right_knee'), + 38: + dict( + name='medial_left_ankle', + id=38, + color=[255, 255, 255], + type='lower', + swap='medial_right_ankle'), + 39: + dict( + name='lateral_left_ankle', + id=39, + color=[255, 255, 255], + type='lower', + swap='lateral_right_ankle'), + }, + skeleton_info={ + 0: + dict(link=('head', 'neck'), id=0, color=[51, 153, 255]), + 1: + dict(link=('neck', 'left_shoulder'), id=1, color=[51, 153, 255]), + 2: + dict(link=('neck', 'right_shoulder'), id=2, color=[51, 153, 255]), + 3: + dict(link=('left_shoulder', 'left_elbow'), id=3, color=[0, 255, 0]), + 4: + dict( + link=('right_shoulder', 'right_elbow'), id=4, color=[255, 128, 0]), + 5: + dict(link=('left_elbow', 'left_wrist'), id=5, color=[0, 255, 0]), + 6: + dict(link=('right_elbow', 'right_wrist'), id=6, color=[255, 128, 0]), + 7: + dict(link=('left_shoulder', 'left_hip'), id=7, color=[51, 153, 255]), + 8: + dict(link=('right_shoulder', 'right_hip'), id=8, color=[51, 153, 255]), + 9: + dict(link=('left_hip', 'right_hip'), id=9, color=[51, 153, 255]), + 10: + dict(link=('left_hip', 'left_knee'), id=10, color=[0, 255, 0]), + 11: + dict(link=('right_hip', 'right_knee'), id=11, color=[255, 128, 0]), + 12: + dict(link=('left_knee', 'left_ankle'), id=12, color=[0, 255, 0]), + 13: + dict(link=('right_knee', 'right_ankle'), id=13, color=[255, 128, 0]), + 14: + dict(link=('right_neck', 'left_neck'), id=14, color=[255, 255, 255]), + 15: + dict( + link=('medial_right_shoulder', 'lateral_right_shoulder'), + id=15, + color=[255, 255, 255]), + 16: + dict( + link=('medial_right_bow', 'lateral_right_bow'), + id=16, + color=[255, 255, 255]), + 17: + dict( + link=('medial_right_wrist', 'lateral_right_wrist'), + id=17, + color=[255, 255, 255]), + 18: + dict( + link=('medial_left_shoulder', 'lateral_left_shoulder'), + id=18, + color=[255, 255, 255]), + 19: + dict( + link=('medial_left_bow', 'lateral_left_bow'), + id=19, + color=[255, 255, 255]), + 20: + dict( + link=('medial_left_wrist', 'lateral_left_wrist'), + id=20, + color=[255, 255, 255]), + 21: + dict( + link=('medial_right_hip', 'lateral_right_hip'), + id=21, + color=[255, 255, 255]), + 22: + dict( + link=('medial_right_knee', 'lateral_right_knee'), + id=22, + color=[255, 255, 255]), + 23: + dict( + link=('medial_right_ankle', 'lateral_right_ankle'), + id=23, + color=[255, 255, 255]), + 24: + dict( + link=('medial_left_hip', 'lateral_left_hip'), + id=24, + color=[255, 255, 255]), + 25: + dict( + link=('medial_left_knee', 'lateral_left_knee'), + id=25, + color=[255, 255, 255]), + 26: + dict( + link=('medial_left_ankle', 'lateral_left_ankle'), + id=26, + color=[255, 255, 255]) + }, + joint_weights=[1.] * 40, + sigmas=[]) diff --git a/configs/_base_/datasets/ochuman.py b/configs/_base_/datasets/ochuman.py index 2ef20838fe..e6e86ba48d 100644 --- a/configs/_base_/datasets/ochuman.py +++ b/configs/_base_/datasets/ochuman.py @@ -1,181 +1,181 @@ -dataset_info = dict( - dataset_name='ochuman', - paper_info=dict( - author='Zhang, Song-Hai and Li, Ruilong and Dong, Xin and ' - 'Rosin, Paul and Cai, Zixi and Han, Xi and ' - 'Yang, Dingcheng and Huang, Haozhi and Hu, Shi-Min', - title='Pose2seg: Detection free human instance segmentation', - container='Proceedings of the IEEE conference on computer ' - 'vision and pattern recognition', - year='2019', - homepage='https://github.com/liruilong940607/OCHumanApi', - ), - keypoint_info={ - 0: - dict(name='nose', id=0, color=[51, 153, 255], type='upper', swap=''), - 1: - dict( - name='left_eye', - id=1, - color=[51, 153, 255], - type='upper', - swap='right_eye'), - 2: - dict( - name='right_eye', - id=2, - color=[51, 153, 255], - type='upper', - swap='left_eye'), - 3: - dict( - name='left_ear', - id=3, - color=[51, 153, 255], - type='upper', - swap='right_ear'), - 4: - dict( - name='right_ear', - id=4, - color=[51, 153, 255], - type='upper', - swap='left_ear'), - 5: - dict( - name='left_shoulder', - id=5, - color=[0, 255, 0], - type='upper', - swap='right_shoulder'), - 6: - dict( - name='right_shoulder', - id=6, - color=[255, 128, 0], - type='upper', - swap='left_shoulder'), - 7: - dict( - name='left_elbow', - id=7, - color=[0, 255, 0], - type='upper', - swap='right_elbow'), - 8: - dict( - name='right_elbow', - id=8, - color=[255, 128, 0], - type='upper', - swap='left_elbow'), - 9: - dict( - name='left_wrist', - id=9, - color=[0, 255, 0], - type='upper', - swap='right_wrist'), - 10: - dict( - name='right_wrist', - id=10, - color=[255, 128, 0], - type='upper', - swap='left_wrist'), - 11: - dict( - name='left_hip', - id=11, - color=[0, 255, 0], - type='lower', - swap='right_hip'), - 12: - dict( - name='right_hip', - id=12, - color=[255, 128, 0], - type='lower', - swap='left_hip'), - 13: - dict( - name='left_knee', - id=13, - color=[0, 255, 0], - type='lower', - swap='right_knee'), - 14: - dict( - name='right_knee', - id=14, - color=[255, 128, 0], - type='lower', - swap='left_knee'), - 15: - dict( - name='left_ankle', - id=15, - color=[0, 255, 0], - type='lower', - swap='right_ankle'), - 16: - dict( - name='right_ankle', - id=16, - color=[255, 128, 0], - type='lower', - swap='left_ankle') - }, - skeleton_info={ - 0: - dict(link=('left_ankle', 'left_knee'), id=0, color=[0, 255, 0]), - 1: - dict(link=('left_knee', 'left_hip'), id=1, color=[0, 255, 0]), - 2: - dict(link=('right_ankle', 'right_knee'), id=2, color=[255, 128, 0]), - 3: - dict(link=('right_knee', 'right_hip'), id=3, color=[255, 128, 0]), - 4: - dict(link=('left_hip', 'right_hip'), id=4, color=[51, 153, 255]), - 5: - dict(link=('left_shoulder', 'left_hip'), id=5, color=[51, 153, 255]), - 6: - dict(link=('right_shoulder', 'right_hip'), id=6, color=[51, 153, 255]), - 7: - dict( - link=('left_shoulder', 'right_shoulder'), - id=7, - color=[51, 153, 255]), - 8: - dict(link=('left_shoulder', 'left_elbow'), id=8, color=[0, 255, 0]), - 9: - dict( - link=('right_shoulder', 'right_elbow'), id=9, color=[255, 128, 0]), - 10: - dict(link=('left_elbow', 'left_wrist'), id=10, color=[0, 255, 0]), - 11: - dict(link=('right_elbow', 'right_wrist'), id=11, color=[255, 128, 0]), - 12: - dict(link=('left_eye', 'right_eye'), id=12, color=[51, 153, 255]), - 13: - dict(link=('nose', 'left_eye'), id=13, color=[51, 153, 255]), - 14: - dict(link=('nose', 'right_eye'), id=14, color=[51, 153, 255]), - 15: - dict(link=('left_eye', 'left_ear'), id=15, color=[51, 153, 255]), - 16: - dict(link=('right_eye', 'right_ear'), id=16, color=[51, 153, 255]), - 17: - dict(link=('left_ear', 'left_shoulder'), id=17, color=[51, 153, 255]), - 18: - dict( - link=('right_ear', 'right_shoulder'), id=18, color=[51, 153, 255]) - }, - joint_weights=[ - 1., 1., 1., 1., 1., 1., 1., 1.2, 1.2, 1.5, 1.5, 1., 1., 1.2, 1.2, 1.5, - 1.5 - ], - sigmas=[ - 0.026, 0.025, 0.025, 0.035, 0.035, 0.079, 0.079, 0.072, 0.072, 0.062, - 0.062, 0.107, 0.107, 0.087, 0.087, 0.089, 0.089 - ]) +dataset_info = dict( + dataset_name='ochuman', + paper_info=dict( + author='Zhang, Song-Hai and Li, Ruilong and Dong, Xin and ' + 'Rosin, Paul and Cai, Zixi and Han, Xi and ' + 'Yang, Dingcheng and Huang, Haozhi and Hu, Shi-Min', + title='Pose2seg: Detection free human instance segmentation', + container='Proceedings of the IEEE conference on computer ' + 'vision and pattern recognition', + year='2019', + homepage='https://github.com/liruilong940607/OCHumanApi', + ), + keypoint_info={ + 0: + dict(name='nose', id=0, color=[51, 153, 255], type='upper', swap=''), + 1: + dict( + name='left_eye', + id=1, + color=[51, 153, 255], + type='upper', + swap='right_eye'), + 2: + dict( + name='right_eye', + id=2, + color=[51, 153, 255], + type='upper', + swap='left_eye'), + 3: + dict( + name='left_ear', + id=3, + color=[51, 153, 255], + type='upper', + swap='right_ear'), + 4: + dict( + name='right_ear', + id=4, + color=[51, 153, 255], + type='upper', + swap='left_ear'), + 5: + dict( + name='left_shoulder', + id=5, + color=[0, 255, 0], + type='upper', + swap='right_shoulder'), + 6: + dict( + name='right_shoulder', + id=6, + color=[255, 128, 0], + type='upper', + swap='left_shoulder'), + 7: + dict( + name='left_elbow', + id=7, + color=[0, 255, 0], + type='upper', + swap='right_elbow'), + 8: + dict( + name='right_elbow', + id=8, + color=[255, 128, 0], + type='upper', + swap='left_elbow'), + 9: + dict( + name='left_wrist', + id=9, + color=[0, 255, 0], + type='upper', + swap='right_wrist'), + 10: + dict( + name='right_wrist', + id=10, + color=[255, 128, 0], + type='upper', + swap='left_wrist'), + 11: + dict( + name='left_hip', + id=11, + color=[0, 255, 0], + type='lower', + swap='right_hip'), + 12: + dict( + name='right_hip', + id=12, + color=[255, 128, 0], + type='lower', + swap='left_hip'), + 13: + dict( + name='left_knee', + id=13, + color=[0, 255, 0], + type='lower', + swap='right_knee'), + 14: + dict( + name='right_knee', + id=14, + color=[255, 128, 0], + type='lower', + swap='left_knee'), + 15: + dict( + name='left_ankle', + id=15, + color=[0, 255, 0], + type='lower', + swap='right_ankle'), + 16: + dict( + name='right_ankle', + id=16, + color=[255, 128, 0], + type='lower', + swap='left_ankle') + }, + skeleton_info={ + 0: + dict(link=('left_ankle', 'left_knee'), id=0, color=[0, 255, 0]), + 1: + dict(link=('left_knee', 'left_hip'), id=1, color=[0, 255, 0]), + 2: + dict(link=('right_ankle', 'right_knee'), id=2, color=[255, 128, 0]), + 3: + dict(link=('right_knee', 'right_hip'), id=3, color=[255, 128, 0]), + 4: + dict(link=('left_hip', 'right_hip'), id=4, color=[51, 153, 255]), + 5: + dict(link=('left_shoulder', 'left_hip'), id=5, color=[51, 153, 255]), + 6: + dict(link=('right_shoulder', 'right_hip'), id=6, color=[51, 153, 255]), + 7: + dict( + link=('left_shoulder', 'right_shoulder'), + id=7, + color=[51, 153, 255]), + 8: + dict(link=('left_shoulder', 'left_elbow'), id=8, color=[0, 255, 0]), + 9: + dict( + link=('right_shoulder', 'right_elbow'), id=9, color=[255, 128, 0]), + 10: + dict(link=('left_elbow', 'left_wrist'), id=10, color=[0, 255, 0]), + 11: + dict(link=('right_elbow', 'right_wrist'), id=11, color=[255, 128, 0]), + 12: + dict(link=('left_eye', 'right_eye'), id=12, color=[51, 153, 255]), + 13: + dict(link=('nose', 'left_eye'), id=13, color=[51, 153, 255]), + 14: + dict(link=('nose', 'right_eye'), id=14, color=[51, 153, 255]), + 15: + dict(link=('left_eye', 'left_ear'), id=15, color=[51, 153, 255]), + 16: + dict(link=('right_eye', 'right_ear'), id=16, color=[51, 153, 255]), + 17: + dict(link=('left_ear', 'left_shoulder'), id=17, color=[51, 153, 255]), + 18: + dict( + link=('right_ear', 'right_shoulder'), id=18, color=[51, 153, 255]) + }, + joint_weights=[ + 1., 1., 1., 1., 1., 1., 1., 1.2, 1.2, 1.5, 1.5, 1., 1., 1.2, 1.2, 1.5, + 1.5 + ], + sigmas=[ + 0.026, 0.025, 0.025, 0.035, 0.035, 0.079, 0.079, 0.072, 0.072, 0.062, + 0.062, 0.107, 0.107, 0.087, 0.087, 0.089, 0.089 + ]) diff --git a/configs/_base_/datasets/octseg.py b/configs/_base_/datasets/octseg.py index 64a2c3ab9a..40d2538524 100644 --- a/configs/_base_/datasets/octseg.py +++ b/configs/_base_/datasets/octseg.py @@ -1,28 +1,28 @@ -dataset_info = dict( - dataset_name='octseg', - keypoint_info={ - 0: - dict( - name='ostium_point_start', - id=0, - color=[51, 153, 255], - type='', - swap='ostium_point_end'), - 1: - dict( - name='ostium_point_end', - id=1, - color=[51, 153, 255], - type='', - swap='ostium_point_start') - }, - skeleton_info={ - 0: - dict(link=('ostium_point_start', 'ostium_point_end'), id=0, color=[0, 255, 0]) - }, - joint_weights=[ - 1., 1. - ], - sigmas=[ - 1., 1. +dataset_info = dict( + dataset_name='octseg', + keypoint_info={ + 0: + dict( + name='ostium_point_start', + id=0, + color=[51, 153, 255], + type='', + swap='ostium_point_end'), + 1: + dict( + name='ostium_point_end', + id=1, + color=[51, 153, 255], + type='', + swap='ostium_point_start') + }, + skeleton_info={ + 0: + dict(link=('ostium_point_start', 'ostium_point_end'), id=0, color=[0, 255, 0]) + }, + joint_weights=[ + 1., 1. + ], + sigmas=[ + 1., 1. ]) \ No newline at end of file diff --git a/configs/_base_/datasets/onehand10k.py b/configs/_base_/datasets/onehand10k.py index 016770f14f..833f1863d5 100644 --- a/configs/_base_/datasets/onehand10k.py +++ b/configs/_base_/datasets/onehand10k.py @@ -1,142 +1,142 @@ -dataset_info = dict( - dataset_name='onehand10k', - paper_info=dict( - author='Wang, Yangang and Peng, Cong and Liu, Yebin', - title='Mask-pose cascaded cnn for 2d hand pose estimation ' - 'from single color image', - container='IEEE Transactions on Circuits and Systems ' - 'for Video Technology', - year='2018', - homepage='https://www.yangangwang.com/papers/WANG-MCC-2018-10.html', - ), - keypoint_info={ - 0: - dict(name='wrist', id=0, color=[255, 255, 255], type='', swap=''), - 1: - dict(name='thumb1', id=1, color=[255, 128, 0], type='', swap=''), - 2: - dict(name='thumb2', id=2, color=[255, 128, 0], type='', swap=''), - 3: - dict(name='thumb3', id=3, color=[255, 128, 0], type='', swap=''), - 4: - dict(name='thumb4', id=4, color=[255, 128, 0], type='', swap=''), - 5: - dict( - name='forefinger1', id=5, color=[255, 153, 255], type='', swap=''), - 6: - dict( - name='forefinger2', id=6, color=[255, 153, 255], type='', swap=''), - 7: - dict( - name='forefinger3', id=7, color=[255, 153, 255], type='', swap=''), - 8: - dict( - name='forefinger4', id=8, color=[255, 153, 255], type='', swap=''), - 9: - dict( - name='middle_finger1', - id=9, - color=[102, 178, 255], - type='', - swap=''), - 10: - dict( - name='middle_finger2', - id=10, - color=[102, 178, 255], - type='', - swap=''), - 11: - dict( - name='middle_finger3', - id=11, - color=[102, 178, 255], - type='', - swap=''), - 12: - dict( - name='middle_finger4', - id=12, - color=[102, 178, 255], - type='', - swap=''), - 13: - dict( - name='ring_finger1', id=13, color=[255, 51, 51], type='', swap=''), - 14: - dict( - name='ring_finger2', id=14, color=[255, 51, 51], type='', swap=''), - 15: - dict( - name='ring_finger3', id=15, color=[255, 51, 51], type='', swap=''), - 16: - dict( - name='ring_finger4', id=16, color=[255, 51, 51], type='', swap=''), - 17: - dict(name='pinky_finger1', id=17, color=[0, 255, 0], type='', swap=''), - 18: - dict(name='pinky_finger2', id=18, color=[0, 255, 0], type='', swap=''), - 19: - dict(name='pinky_finger3', id=19, color=[0, 255, 0], type='', swap=''), - 20: - dict(name='pinky_finger4', id=20, color=[0, 255, 0], type='', swap='') - }, - skeleton_info={ - 0: - dict(link=('wrist', 'thumb1'), id=0, color=[255, 128, 0]), - 1: - dict(link=('thumb1', 'thumb2'), id=1, color=[255, 128, 0]), - 2: - dict(link=('thumb2', 'thumb3'), id=2, color=[255, 128, 0]), - 3: - dict(link=('thumb3', 'thumb4'), id=3, color=[255, 128, 0]), - 4: - dict(link=('wrist', 'forefinger1'), id=4, color=[255, 153, 255]), - 5: - dict(link=('forefinger1', 'forefinger2'), id=5, color=[255, 153, 255]), - 6: - dict(link=('forefinger2', 'forefinger3'), id=6, color=[255, 153, 255]), - 7: - dict(link=('forefinger3', 'forefinger4'), id=7, color=[255, 153, 255]), - 8: - dict(link=('wrist', 'middle_finger1'), id=8, color=[102, 178, 255]), - 9: - dict( - link=('middle_finger1', 'middle_finger2'), - id=9, - color=[102, 178, 255]), - 10: - dict( - link=('middle_finger2', 'middle_finger3'), - id=10, - color=[102, 178, 255]), - 11: - dict( - link=('middle_finger3', 'middle_finger4'), - id=11, - color=[102, 178, 255]), - 12: - dict(link=('wrist', 'ring_finger1'), id=12, color=[255, 51, 51]), - 13: - dict( - link=('ring_finger1', 'ring_finger2'), id=13, color=[255, 51, 51]), - 14: - dict( - link=('ring_finger2', 'ring_finger3'), id=14, color=[255, 51, 51]), - 15: - dict( - link=('ring_finger3', 'ring_finger4'), id=15, color=[255, 51, 51]), - 16: - dict(link=('wrist', 'pinky_finger1'), id=16, color=[0, 255, 0]), - 17: - dict( - link=('pinky_finger1', 'pinky_finger2'), id=17, color=[0, 255, 0]), - 18: - dict( - link=('pinky_finger2', 'pinky_finger3'), id=18, color=[0, 255, 0]), - 19: - dict( - link=('pinky_finger3', 'pinky_finger4'), id=19, color=[0, 255, 0]) - }, - joint_weights=[1.] * 21, - sigmas=[]) +dataset_info = dict( + dataset_name='onehand10k', + paper_info=dict( + author='Wang, Yangang and Peng, Cong and Liu, Yebin', + title='Mask-pose cascaded cnn for 2d hand pose estimation ' + 'from single color image', + container='IEEE Transactions on Circuits and Systems ' + 'for Video Technology', + year='2018', + homepage='https://www.yangangwang.com/papers/WANG-MCC-2018-10.html', + ), + keypoint_info={ + 0: + dict(name='wrist', id=0, color=[255, 255, 255], type='', swap=''), + 1: + dict(name='thumb1', id=1, color=[255, 128, 0], type='', swap=''), + 2: + dict(name='thumb2', id=2, color=[255, 128, 0], type='', swap=''), + 3: + dict(name='thumb3', id=3, color=[255, 128, 0], type='', swap=''), + 4: + dict(name='thumb4', id=4, color=[255, 128, 0], type='', swap=''), + 5: + dict( + name='forefinger1', id=5, color=[255, 153, 255], type='', swap=''), + 6: + dict( + name='forefinger2', id=6, color=[255, 153, 255], type='', swap=''), + 7: + dict( + name='forefinger3', id=7, color=[255, 153, 255], type='', swap=''), + 8: + dict( + name='forefinger4', id=8, color=[255, 153, 255], type='', swap=''), + 9: + dict( + name='middle_finger1', + id=9, + color=[102, 178, 255], + type='', + swap=''), + 10: + dict( + name='middle_finger2', + id=10, + color=[102, 178, 255], + type='', + swap=''), + 11: + dict( + name='middle_finger3', + id=11, + color=[102, 178, 255], + type='', + swap=''), + 12: + dict( + name='middle_finger4', + id=12, + color=[102, 178, 255], + type='', + swap=''), + 13: + dict( + name='ring_finger1', id=13, color=[255, 51, 51], type='', swap=''), + 14: + dict( + name='ring_finger2', id=14, color=[255, 51, 51], type='', swap=''), + 15: + dict( + name='ring_finger3', id=15, color=[255, 51, 51], type='', swap=''), + 16: + dict( + name='ring_finger4', id=16, color=[255, 51, 51], type='', swap=''), + 17: + dict(name='pinky_finger1', id=17, color=[0, 255, 0], type='', swap=''), + 18: + dict(name='pinky_finger2', id=18, color=[0, 255, 0], type='', swap=''), + 19: + dict(name='pinky_finger3', id=19, color=[0, 255, 0], type='', swap=''), + 20: + dict(name='pinky_finger4', id=20, color=[0, 255, 0], type='', swap='') + }, + skeleton_info={ + 0: + dict(link=('wrist', 'thumb1'), id=0, color=[255, 128, 0]), + 1: + dict(link=('thumb1', 'thumb2'), id=1, color=[255, 128, 0]), + 2: + dict(link=('thumb2', 'thumb3'), id=2, color=[255, 128, 0]), + 3: + dict(link=('thumb3', 'thumb4'), id=3, color=[255, 128, 0]), + 4: + dict(link=('wrist', 'forefinger1'), id=4, color=[255, 153, 255]), + 5: + dict(link=('forefinger1', 'forefinger2'), id=5, color=[255, 153, 255]), + 6: + dict(link=('forefinger2', 'forefinger3'), id=6, color=[255, 153, 255]), + 7: + dict(link=('forefinger3', 'forefinger4'), id=7, color=[255, 153, 255]), + 8: + dict(link=('wrist', 'middle_finger1'), id=8, color=[102, 178, 255]), + 9: + dict( + link=('middle_finger1', 'middle_finger2'), + id=9, + color=[102, 178, 255]), + 10: + dict( + link=('middle_finger2', 'middle_finger3'), + id=10, + color=[102, 178, 255]), + 11: + dict( + link=('middle_finger3', 'middle_finger4'), + id=11, + color=[102, 178, 255]), + 12: + dict(link=('wrist', 'ring_finger1'), id=12, color=[255, 51, 51]), + 13: + dict( + link=('ring_finger1', 'ring_finger2'), id=13, color=[255, 51, 51]), + 14: + dict( + link=('ring_finger2', 'ring_finger3'), id=14, color=[255, 51, 51]), + 15: + dict( + link=('ring_finger3', 'ring_finger4'), id=15, color=[255, 51, 51]), + 16: + dict(link=('wrist', 'pinky_finger1'), id=16, color=[0, 255, 0]), + 17: + dict( + link=('pinky_finger1', 'pinky_finger2'), id=17, color=[0, 255, 0]), + 18: + dict( + link=('pinky_finger2', 'pinky_finger3'), id=18, color=[0, 255, 0]), + 19: + dict( + link=('pinky_finger3', 'pinky_finger4'), id=19, color=[0, 255, 0]) + }, + joint_weights=[1.] * 21, + sigmas=[]) diff --git a/configs/_base_/datasets/panoptic_body3d.py b/configs/_base_/datasets/panoptic_body3d.py index e3b19ac462..662340951d 100644 --- a/configs/_base_/datasets/panoptic_body3d.py +++ b/configs/_base_/datasets/panoptic_body3d.py @@ -1,160 +1,160 @@ -dataset_info = dict( - dataset_name='panoptic_pose_3d', - paper_info=dict( - author='Joo, Hanbyul and Simon, Tomas and Li, Xulong' - 'and Liu, Hao and Tan, Lei and Gui, Lin and Banerjee, Sean' - 'and Godisart, Timothy and Nabbe, Bart and Matthews, Iain' - 'and Kanade, Takeo and Nobuhara, Shohei and Sheikh, Yaser', - title='Panoptic Studio: A Massively Multiview System ' - 'for Interaction Motion Capture', - container='IEEE Transactions on Pattern Analysis' - ' and Machine Intelligence', - year='2017', - homepage='http://domedb.perception.cs.cmu.edu', - ), - keypoint_info={ - 0: - dict(name='neck', id=0, color=[51, 153, 255], type='upper', swap=''), - 1: - dict(name='nose', id=1, color=[51, 153, 255], type='upper', swap=''), - 2: - dict(name='mid_hip', id=2, color=[0, 255, 0], type='lower', swap=''), - 3: - dict( - name='left_shoulder', - id=3, - color=[0, 255, 0], - type='upper', - swap='right_shoulder'), - 4: - dict( - name='left_elbow', - id=4, - color=[0, 255, 0], - type='upper', - swap='right_elbow'), - 5: - dict( - name='left_wrist', - id=5, - color=[0, 255, 0], - type='upper', - swap='right_wrist'), - 6: - dict( - name='left_hip', - id=6, - color=[0, 255, 0], - type='lower', - swap='right_hip'), - 7: - dict( - name='left_knee', - id=7, - color=[0, 255, 0], - type='lower', - swap='right_knee'), - 8: - dict( - name='left_ankle', - id=8, - color=[0, 255, 0], - type='lower', - swap='right_ankle'), - 9: - dict( - name='right_shoulder', - id=9, - color=[255, 128, 0], - type='upper', - swap='left_shoulder'), - 10: - dict( - name='right_elbow', - id=10, - color=[255, 128, 0], - type='upper', - swap='left_elbow'), - 11: - dict( - name='right_wrist', - id=11, - color=[255, 128, 0], - type='upper', - swap='left_wrist'), - 12: - dict( - name='right_hip', - id=12, - color=[255, 128, 0], - type='lower', - swap='left_hip'), - 13: - dict( - name='right_knee', - id=13, - color=[255, 128, 0], - type='lower', - swap='left_knee'), - 14: - dict( - name='right_ankle', - id=14, - color=[255, 128, 0], - type='lower', - swap='left_ankle'), - 15: - dict( - name='left_eye', - id=15, - color=[51, 153, 255], - type='upper', - swap='right_eye'), - 16: - dict( - name='left_ear', - id=16, - color=[51, 153, 255], - type='upper', - swap='right_ear'), - 17: - dict( - name='right_eye', - id=17, - color=[51, 153, 255], - type='upper', - swap='left_eye'), - 18: - dict( - name='right_ear', - id=18, - color=[51, 153, 255], - type='upper', - swap='left_ear') - }, - skeleton_info={ - 0: dict(link=('nose', 'neck'), id=0, color=[51, 153, 255]), - 1: dict(link=('neck', 'left_shoulder'), id=1, color=[0, 255, 0]), - 2: dict(link=('neck', 'right_shoulder'), id=2, color=[255, 128, 0]), - 3: dict(link=('left_shoulder', 'left_elbow'), id=3, color=[0, 255, 0]), - 4: dict( - link=('right_shoulder', 'right_elbow'), id=4, color=[255, 128, 0]), - 5: dict(link=('left_elbow', 'left_wrist'), id=5, color=[0, 255, 0]), - 6: - dict(link=('right_elbow', 'right_wrist'), id=6, color=[255, 128, 0]), - 7: dict(link=('left_ankle', 'left_knee'), id=7, color=[0, 255, 0]), - 8: dict(link=('left_knee', 'left_hip'), id=8, color=[0, 255, 0]), - 9: dict(link=('right_ankle', 'right_knee'), id=9, color=[255, 128, 0]), - 10: dict(link=('right_knee', 'right_hip'), id=10, color=[255, 128, 0]), - 11: dict(link=('mid_hip', 'left_hip'), id=11, color=[0, 255, 0]), - 12: dict(link=('mid_hip', 'right_hip'), id=12, color=[255, 128, 0]), - 13: dict(link=('mid_hip', 'neck'), id=13, color=[51, 153, 255]), - }, - joint_weights=[ - 1.0, 1.0, 1.0, 1.0, 1.2, 1.5, 1.0, 1.2, 1.5, 1.0, 1.2, 1.5, 1.0, 1.2, - 1.5, 1.0, 1.0, 1.0, 1.0 - ], - sigmas=[ - 0.026, 0.026, 0.107, 0.079, 0.072, 0.062, 0.107, 0.087, 0.089, 0.079, - 0.072, 0.062, 0.107, 0.087, 0.089, 0.025, 0.035, 0.025, 0.035 - ]) +dataset_info = dict( + dataset_name='panoptic_pose_3d', + paper_info=dict( + author='Joo, Hanbyul and Simon, Tomas and Li, Xulong' + 'and Liu, Hao and Tan, Lei and Gui, Lin and Banerjee, Sean' + 'and Godisart, Timothy and Nabbe, Bart and Matthews, Iain' + 'and Kanade, Takeo and Nobuhara, Shohei and Sheikh, Yaser', + title='Panoptic Studio: A Massively Multiview System ' + 'for Interaction Motion Capture', + container='IEEE Transactions on Pattern Analysis' + ' and Machine Intelligence', + year='2017', + homepage='http://domedb.perception.cs.cmu.edu', + ), + keypoint_info={ + 0: + dict(name='neck', id=0, color=[51, 153, 255], type='upper', swap=''), + 1: + dict(name='nose', id=1, color=[51, 153, 255], type='upper', swap=''), + 2: + dict(name='mid_hip', id=2, color=[0, 255, 0], type='lower', swap=''), + 3: + dict( + name='left_shoulder', + id=3, + color=[0, 255, 0], + type='upper', + swap='right_shoulder'), + 4: + dict( + name='left_elbow', + id=4, + color=[0, 255, 0], + type='upper', + swap='right_elbow'), + 5: + dict( + name='left_wrist', + id=5, + color=[0, 255, 0], + type='upper', + swap='right_wrist'), + 6: + dict( + name='left_hip', + id=6, + color=[0, 255, 0], + type='lower', + swap='right_hip'), + 7: + dict( + name='left_knee', + id=7, + color=[0, 255, 0], + type='lower', + swap='right_knee'), + 8: + dict( + name='left_ankle', + id=8, + color=[0, 255, 0], + type='lower', + swap='right_ankle'), + 9: + dict( + name='right_shoulder', + id=9, + color=[255, 128, 0], + type='upper', + swap='left_shoulder'), + 10: + dict( + name='right_elbow', + id=10, + color=[255, 128, 0], + type='upper', + swap='left_elbow'), + 11: + dict( + name='right_wrist', + id=11, + color=[255, 128, 0], + type='upper', + swap='left_wrist'), + 12: + dict( + name='right_hip', + id=12, + color=[255, 128, 0], + type='lower', + swap='left_hip'), + 13: + dict( + name='right_knee', + id=13, + color=[255, 128, 0], + type='lower', + swap='left_knee'), + 14: + dict( + name='right_ankle', + id=14, + color=[255, 128, 0], + type='lower', + swap='left_ankle'), + 15: + dict( + name='left_eye', + id=15, + color=[51, 153, 255], + type='upper', + swap='right_eye'), + 16: + dict( + name='left_ear', + id=16, + color=[51, 153, 255], + type='upper', + swap='right_ear'), + 17: + dict( + name='right_eye', + id=17, + color=[51, 153, 255], + type='upper', + swap='left_eye'), + 18: + dict( + name='right_ear', + id=18, + color=[51, 153, 255], + type='upper', + swap='left_ear') + }, + skeleton_info={ + 0: dict(link=('nose', 'neck'), id=0, color=[51, 153, 255]), + 1: dict(link=('neck', 'left_shoulder'), id=1, color=[0, 255, 0]), + 2: dict(link=('neck', 'right_shoulder'), id=2, color=[255, 128, 0]), + 3: dict(link=('left_shoulder', 'left_elbow'), id=3, color=[0, 255, 0]), + 4: dict( + link=('right_shoulder', 'right_elbow'), id=4, color=[255, 128, 0]), + 5: dict(link=('left_elbow', 'left_wrist'), id=5, color=[0, 255, 0]), + 6: + dict(link=('right_elbow', 'right_wrist'), id=6, color=[255, 128, 0]), + 7: dict(link=('left_ankle', 'left_knee'), id=7, color=[0, 255, 0]), + 8: dict(link=('left_knee', 'left_hip'), id=8, color=[0, 255, 0]), + 9: dict(link=('right_ankle', 'right_knee'), id=9, color=[255, 128, 0]), + 10: dict(link=('right_knee', 'right_hip'), id=10, color=[255, 128, 0]), + 11: dict(link=('mid_hip', 'left_hip'), id=11, color=[0, 255, 0]), + 12: dict(link=('mid_hip', 'right_hip'), id=12, color=[255, 128, 0]), + 13: dict(link=('mid_hip', 'neck'), id=13, color=[51, 153, 255]), + }, + joint_weights=[ + 1.0, 1.0, 1.0, 1.0, 1.2, 1.5, 1.0, 1.2, 1.5, 1.0, 1.2, 1.5, 1.0, 1.2, + 1.5, 1.0, 1.0, 1.0, 1.0 + ], + sigmas=[ + 0.026, 0.026, 0.107, 0.079, 0.072, 0.062, 0.107, 0.087, 0.089, 0.079, + 0.072, 0.062, 0.107, 0.087, 0.089, 0.025, 0.035, 0.025, 0.035 + ]) diff --git a/configs/_base_/datasets/panoptic_hand2d.py b/configs/_base_/datasets/panoptic_hand2d.py index 7a65731ba8..5d01b9a024 100644 --- a/configs/_base_/datasets/panoptic_hand2d.py +++ b/configs/_base_/datasets/panoptic_hand2d.py @@ -1,143 +1,143 @@ -dataset_info = dict( - dataset_name='panoptic_hand2d', - paper_info=dict( - author='Simon, Tomas and Joo, Hanbyul and ' - 'Matthews, Iain and Sheikh, Yaser', - title='Hand keypoint detection in single images using ' - 'multiview bootstrapping', - container='Proceedings of the IEEE conference on ' - 'Computer Vision and Pattern Recognition', - year='2017', - homepage='http://domedb.perception.cs.cmu.edu/handdb.html', - ), - keypoint_info={ - 0: - dict(name='wrist', id=0, color=[255, 255, 255], type='', swap=''), - 1: - dict(name='thumb1', id=1, color=[255, 128, 0], type='', swap=''), - 2: - dict(name='thumb2', id=2, color=[255, 128, 0], type='', swap=''), - 3: - dict(name='thumb3', id=3, color=[255, 128, 0], type='', swap=''), - 4: - dict(name='thumb4', id=4, color=[255, 128, 0], type='', swap=''), - 5: - dict( - name='forefinger1', id=5, color=[255, 153, 255], type='', swap=''), - 6: - dict( - name='forefinger2', id=6, color=[255, 153, 255], type='', swap=''), - 7: - dict( - name='forefinger3', id=7, color=[255, 153, 255], type='', swap=''), - 8: - dict( - name='forefinger4', id=8, color=[255, 153, 255], type='', swap=''), - 9: - dict( - name='middle_finger1', - id=9, - color=[102, 178, 255], - type='', - swap=''), - 10: - dict( - name='middle_finger2', - id=10, - color=[102, 178, 255], - type='', - swap=''), - 11: - dict( - name='middle_finger3', - id=11, - color=[102, 178, 255], - type='', - swap=''), - 12: - dict( - name='middle_finger4', - id=12, - color=[102, 178, 255], - type='', - swap=''), - 13: - dict( - name='ring_finger1', id=13, color=[255, 51, 51], type='', swap=''), - 14: - dict( - name='ring_finger2', id=14, color=[255, 51, 51], type='', swap=''), - 15: - dict( - name='ring_finger3', id=15, color=[255, 51, 51], type='', swap=''), - 16: - dict( - name='ring_finger4', id=16, color=[255, 51, 51], type='', swap=''), - 17: - dict(name='pinky_finger1', id=17, color=[0, 255, 0], type='', swap=''), - 18: - dict(name='pinky_finger2', id=18, color=[0, 255, 0], type='', swap=''), - 19: - dict(name='pinky_finger3', id=19, color=[0, 255, 0], type='', swap=''), - 20: - dict(name='pinky_finger4', id=20, color=[0, 255, 0], type='', swap='') - }, - skeleton_info={ - 0: - dict(link=('wrist', 'thumb1'), id=0, color=[255, 128, 0]), - 1: - dict(link=('thumb1', 'thumb2'), id=1, color=[255, 128, 0]), - 2: - dict(link=('thumb2', 'thumb3'), id=2, color=[255, 128, 0]), - 3: - dict(link=('thumb3', 'thumb4'), id=3, color=[255, 128, 0]), - 4: - dict(link=('wrist', 'forefinger1'), id=4, color=[255, 153, 255]), - 5: - dict(link=('forefinger1', 'forefinger2'), id=5, color=[255, 153, 255]), - 6: - dict(link=('forefinger2', 'forefinger3'), id=6, color=[255, 153, 255]), - 7: - dict(link=('forefinger3', 'forefinger4'), id=7, color=[255, 153, 255]), - 8: - dict(link=('wrist', 'middle_finger1'), id=8, color=[102, 178, 255]), - 9: - dict( - link=('middle_finger1', 'middle_finger2'), - id=9, - color=[102, 178, 255]), - 10: - dict( - link=('middle_finger2', 'middle_finger3'), - id=10, - color=[102, 178, 255]), - 11: - dict( - link=('middle_finger3', 'middle_finger4'), - id=11, - color=[102, 178, 255]), - 12: - dict(link=('wrist', 'ring_finger1'), id=12, color=[255, 51, 51]), - 13: - dict( - link=('ring_finger1', 'ring_finger2'), id=13, color=[255, 51, 51]), - 14: - dict( - link=('ring_finger2', 'ring_finger3'), id=14, color=[255, 51, 51]), - 15: - dict( - link=('ring_finger3', 'ring_finger4'), id=15, color=[255, 51, 51]), - 16: - dict(link=('wrist', 'pinky_finger1'), id=16, color=[0, 255, 0]), - 17: - dict( - link=('pinky_finger1', 'pinky_finger2'), id=17, color=[0, 255, 0]), - 18: - dict( - link=('pinky_finger2', 'pinky_finger3'), id=18, color=[0, 255, 0]), - 19: - dict( - link=('pinky_finger3', 'pinky_finger4'), id=19, color=[0, 255, 0]) - }, - joint_weights=[1.] * 21, - sigmas=[]) +dataset_info = dict( + dataset_name='panoptic_hand2d', + paper_info=dict( + author='Simon, Tomas and Joo, Hanbyul and ' + 'Matthews, Iain and Sheikh, Yaser', + title='Hand keypoint detection in single images using ' + 'multiview bootstrapping', + container='Proceedings of the IEEE conference on ' + 'Computer Vision and Pattern Recognition', + year='2017', + homepage='http://domedb.perception.cs.cmu.edu/handdb.html', + ), + keypoint_info={ + 0: + dict(name='wrist', id=0, color=[255, 255, 255], type='', swap=''), + 1: + dict(name='thumb1', id=1, color=[255, 128, 0], type='', swap=''), + 2: + dict(name='thumb2', id=2, color=[255, 128, 0], type='', swap=''), + 3: + dict(name='thumb3', id=3, color=[255, 128, 0], type='', swap=''), + 4: + dict(name='thumb4', id=4, color=[255, 128, 0], type='', swap=''), + 5: + dict( + name='forefinger1', id=5, color=[255, 153, 255], type='', swap=''), + 6: + dict( + name='forefinger2', id=6, color=[255, 153, 255], type='', swap=''), + 7: + dict( + name='forefinger3', id=7, color=[255, 153, 255], type='', swap=''), + 8: + dict( + name='forefinger4', id=8, color=[255, 153, 255], type='', swap=''), + 9: + dict( + name='middle_finger1', + id=9, + color=[102, 178, 255], + type='', + swap=''), + 10: + dict( + name='middle_finger2', + id=10, + color=[102, 178, 255], + type='', + swap=''), + 11: + dict( + name='middle_finger3', + id=11, + color=[102, 178, 255], + type='', + swap=''), + 12: + dict( + name='middle_finger4', + id=12, + color=[102, 178, 255], + type='', + swap=''), + 13: + dict( + name='ring_finger1', id=13, color=[255, 51, 51], type='', swap=''), + 14: + dict( + name='ring_finger2', id=14, color=[255, 51, 51], type='', swap=''), + 15: + dict( + name='ring_finger3', id=15, color=[255, 51, 51], type='', swap=''), + 16: + dict( + name='ring_finger4', id=16, color=[255, 51, 51], type='', swap=''), + 17: + dict(name='pinky_finger1', id=17, color=[0, 255, 0], type='', swap=''), + 18: + dict(name='pinky_finger2', id=18, color=[0, 255, 0], type='', swap=''), + 19: + dict(name='pinky_finger3', id=19, color=[0, 255, 0], type='', swap=''), + 20: + dict(name='pinky_finger4', id=20, color=[0, 255, 0], type='', swap='') + }, + skeleton_info={ + 0: + dict(link=('wrist', 'thumb1'), id=0, color=[255, 128, 0]), + 1: + dict(link=('thumb1', 'thumb2'), id=1, color=[255, 128, 0]), + 2: + dict(link=('thumb2', 'thumb3'), id=2, color=[255, 128, 0]), + 3: + dict(link=('thumb3', 'thumb4'), id=3, color=[255, 128, 0]), + 4: + dict(link=('wrist', 'forefinger1'), id=4, color=[255, 153, 255]), + 5: + dict(link=('forefinger1', 'forefinger2'), id=5, color=[255, 153, 255]), + 6: + dict(link=('forefinger2', 'forefinger3'), id=6, color=[255, 153, 255]), + 7: + dict(link=('forefinger3', 'forefinger4'), id=7, color=[255, 153, 255]), + 8: + dict(link=('wrist', 'middle_finger1'), id=8, color=[102, 178, 255]), + 9: + dict( + link=('middle_finger1', 'middle_finger2'), + id=9, + color=[102, 178, 255]), + 10: + dict( + link=('middle_finger2', 'middle_finger3'), + id=10, + color=[102, 178, 255]), + 11: + dict( + link=('middle_finger3', 'middle_finger4'), + id=11, + color=[102, 178, 255]), + 12: + dict(link=('wrist', 'ring_finger1'), id=12, color=[255, 51, 51]), + 13: + dict( + link=('ring_finger1', 'ring_finger2'), id=13, color=[255, 51, 51]), + 14: + dict( + link=('ring_finger2', 'ring_finger3'), id=14, color=[255, 51, 51]), + 15: + dict( + link=('ring_finger3', 'ring_finger4'), id=15, color=[255, 51, 51]), + 16: + dict(link=('wrist', 'pinky_finger1'), id=16, color=[0, 255, 0]), + 17: + dict( + link=('pinky_finger1', 'pinky_finger2'), id=17, color=[0, 255, 0]), + 18: + dict( + link=('pinky_finger2', 'pinky_finger3'), id=18, color=[0, 255, 0]), + 19: + dict( + link=('pinky_finger3', 'pinky_finger4'), id=19, color=[0, 255, 0]) + }, + joint_weights=[1.] * 21, + sigmas=[]) diff --git a/configs/_base_/datasets/posetrack18.py b/configs/_base_/datasets/posetrack18.py index 5aefd1c97f..18e18911bc 100644 --- a/configs/_base_/datasets/posetrack18.py +++ b/configs/_base_/datasets/posetrack18.py @@ -1,176 +1,176 @@ -dataset_info = dict( - dataset_name='posetrack18', - paper_info=dict( - author='Andriluka, Mykhaylo and Iqbal, Umar and ' - 'Insafutdinov, Eldar and Pishchulin, Leonid and ' - 'Milan, Anton and Gall, Juergen and Schiele, Bernt', - title='Posetrack: A benchmark for human pose estimation and tracking', - container='Proceedings of the IEEE Conference on ' - 'Computer Vision and Pattern Recognition', - year='2018', - homepage='https://posetrack.net/users/download.php', - ), - keypoint_info={ - 0: - dict(name='nose', id=0, color=[51, 153, 255], type='upper', swap=''), - 1: - dict( - name='head_bottom', - id=1, - color=[51, 153, 255], - type='upper', - swap=''), - 2: - dict( - name='head_top', id=2, color=[51, 153, 255], type='upper', - swap=''), - 3: - dict( - name='left_ear', - id=3, - color=[51, 153, 255], - type='upper', - swap='right_ear'), - 4: - dict( - name='right_ear', - id=4, - color=[51, 153, 255], - type='upper', - swap='left_ear'), - 5: - dict( - name='left_shoulder', - id=5, - color=[0, 255, 0], - type='upper', - swap='right_shoulder'), - 6: - dict( - name='right_shoulder', - id=6, - color=[255, 128, 0], - type='upper', - swap='left_shoulder'), - 7: - dict( - name='left_elbow', - id=7, - color=[0, 255, 0], - type='upper', - swap='right_elbow'), - 8: - dict( - name='right_elbow', - id=8, - color=[255, 128, 0], - type='upper', - swap='left_elbow'), - 9: - dict( - name='left_wrist', - id=9, - color=[0, 255, 0], - type='upper', - swap='right_wrist'), - 10: - dict( - name='right_wrist', - id=10, - color=[255, 128, 0], - type='upper', - swap='left_wrist'), - 11: - dict( - name='left_hip', - id=11, - color=[0, 255, 0], - type='lower', - swap='right_hip'), - 12: - dict( - name='right_hip', - id=12, - color=[255, 128, 0], - type='lower', - swap='left_hip'), - 13: - dict( - name='left_knee', - id=13, - color=[0, 255, 0], - type='lower', - swap='right_knee'), - 14: - dict( - name='right_knee', - id=14, - color=[255, 128, 0], - type='lower', - swap='left_knee'), - 15: - dict( - name='left_ankle', - id=15, - color=[0, 255, 0], - type='lower', - swap='right_ankle'), - 16: - dict( - name='right_ankle', - id=16, - color=[255, 128, 0], - type='lower', - swap='left_ankle') - }, - skeleton_info={ - 0: - dict(link=('left_ankle', 'left_knee'), id=0, color=[0, 255, 0]), - 1: - dict(link=('left_knee', 'left_hip'), id=1, color=[0, 255, 0]), - 2: - dict(link=('right_ankle', 'right_knee'), id=2, color=[255, 128, 0]), - 3: - dict(link=('right_knee', 'right_hip'), id=3, color=[255, 128, 0]), - 4: - dict(link=('left_hip', 'right_hip'), id=4, color=[51, 153, 255]), - 5: - dict(link=('left_shoulder', 'left_hip'), id=5, color=[51, 153, 255]), - 6: - dict(link=('right_shoulder', 'right_hip'), id=6, color=[51, 153, 255]), - 7: - dict( - link=('left_shoulder', 'right_shoulder'), - id=7, - color=[51, 153, 255]), - 8: - dict(link=('left_shoulder', 'left_elbow'), id=8, color=[0, 255, 0]), - 9: - dict( - link=('right_shoulder', 'right_elbow'), id=9, color=[255, 128, 0]), - 10: - dict(link=('left_elbow', 'left_wrist'), id=10, color=[0, 255, 0]), - 11: - dict(link=('right_elbow', 'right_wrist'), id=11, color=[255, 128, 0]), - 12: - dict(link=('nose', 'head_bottom'), id=12, color=[51, 153, 255]), - 13: - dict(link=('nose', 'head_top'), id=13, color=[51, 153, 255]), - 14: - dict( - link=('head_bottom', 'left_shoulder'), id=14, color=[51, 153, - 255]), - 15: - dict( - link=('head_bottom', 'right_shoulder'), - id=15, - color=[51, 153, 255]) - }, - joint_weights=[ - 1., 1., 1., 1., 1., 1., 1., 1.2, 1.2, 1.5, 1.5, 1., 1., 1.2, 1.2, 1.5, - 1.5 - ], - sigmas=[ - 0.026, 0.025, 0.025, 0.035, 0.035, 0.079, 0.079, 0.072, 0.072, 0.062, - 0.062, 0.107, 0.107, 0.087, 0.087, 0.089, 0.089 - ]) +dataset_info = dict( + dataset_name='posetrack18', + paper_info=dict( + author='Andriluka, Mykhaylo and Iqbal, Umar and ' + 'Insafutdinov, Eldar and Pishchulin, Leonid and ' + 'Milan, Anton and Gall, Juergen and Schiele, Bernt', + title='Posetrack: A benchmark for human pose estimation and tracking', + container='Proceedings of the IEEE Conference on ' + 'Computer Vision and Pattern Recognition', + year='2018', + homepage='https://posetrack.net/users/download.php', + ), + keypoint_info={ + 0: + dict(name='nose', id=0, color=[51, 153, 255], type='upper', swap=''), + 1: + dict( + name='head_bottom', + id=1, + color=[51, 153, 255], + type='upper', + swap=''), + 2: + dict( + name='head_top', id=2, color=[51, 153, 255], type='upper', + swap=''), + 3: + dict( + name='left_ear', + id=3, + color=[51, 153, 255], + type='upper', + swap='right_ear'), + 4: + dict( + name='right_ear', + id=4, + color=[51, 153, 255], + type='upper', + swap='left_ear'), + 5: + dict( + name='left_shoulder', + id=5, + color=[0, 255, 0], + type='upper', + swap='right_shoulder'), + 6: + dict( + name='right_shoulder', + id=6, + color=[255, 128, 0], + type='upper', + swap='left_shoulder'), + 7: + dict( + name='left_elbow', + id=7, + color=[0, 255, 0], + type='upper', + swap='right_elbow'), + 8: + dict( + name='right_elbow', + id=8, + color=[255, 128, 0], + type='upper', + swap='left_elbow'), + 9: + dict( + name='left_wrist', + id=9, + color=[0, 255, 0], + type='upper', + swap='right_wrist'), + 10: + dict( + name='right_wrist', + id=10, + color=[255, 128, 0], + type='upper', + swap='left_wrist'), + 11: + dict( + name='left_hip', + id=11, + color=[0, 255, 0], + type='lower', + swap='right_hip'), + 12: + dict( + name='right_hip', + id=12, + color=[255, 128, 0], + type='lower', + swap='left_hip'), + 13: + dict( + name='left_knee', + id=13, + color=[0, 255, 0], + type='lower', + swap='right_knee'), + 14: + dict( + name='right_knee', + id=14, + color=[255, 128, 0], + type='lower', + swap='left_knee'), + 15: + dict( + name='left_ankle', + id=15, + color=[0, 255, 0], + type='lower', + swap='right_ankle'), + 16: + dict( + name='right_ankle', + id=16, + color=[255, 128, 0], + type='lower', + swap='left_ankle') + }, + skeleton_info={ + 0: + dict(link=('left_ankle', 'left_knee'), id=0, color=[0, 255, 0]), + 1: + dict(link=('left_knee', 'left_hip'), id=1, color=[0, 255, 0]), + 2: + dict(link=('right_ankle', 'right_knee'), id=2, color=[255, 128, 0]), + 3: + dict(link=('right_knee', 'right_hip'), id=3, color=[255, 128, 0]), + 4: + dict(link=('left_hip', 'right_hip'), id=4, color=[51, 153, 255]), + 5: + dict(link=('left_shoulder', 'left_hip'), id=5, color=[51, 153, 255]), + 6: + dict(link=('right_shoulder', 'right_hip'), id=6, color=[51, 153, 255]), + 7: + dict( + link=('left_shoulder', 'right_shoulder'), + id=7, + color=[51, 153, 255]), + 8: + dict(link=('left_shoulder', 'left_elbow'), id=8, color=[0, 255, 0]), + 9: + dict( + link=('right_shoulder', 'right_elbow'), id=9, color=[255, 128, 0]), + 10: + dict(link=('left_elbow', 'left_wrist'), id=10, color=[0, 255, 0]), + 11: + dict(link=('right_elbow', 'right_wrist'), id=11, color=[255, 128, 0]), + 12: + dict(link=('nose', 'head_bottom'), id=12, color=[51, 153, 255]), + 13: + dict(link=('nose', 'head_top'), id=13, color=[51, 153, 255]), + 14: + dict( + link=('head_bottom', 'left_shoulder'), id=14, color=[51, 153, + 255]), + 15: + dict( + link=('head_bottom', 'right_shoulder'), + id=15, + color=[51, 153, 255]) + }, + joint_weights=[ + 1., 1., 1., 1., 1., 1., 1., 1.2, 1.2, 1.5, 1.5, 1., 1., 1.2, 1.2, 1.5, + 1.5 + ], + sigmas=[ + 0.026, 0.025, 0.025, 0.035, 0.035, 0.079, 0.079, 0.072, 0.072, 0.062, + 0.062, 0.107, 0.107, 0.087, 0.087, 0.089, 0.089 + ]) diff --git a/configs/_base_/datasets/rhd2d.py b/configs/_base_/datasets/rhd2d.py index 4631ccd038..8829b15938 100644 --- a/configs/_base_/datasets/rhd2d.py +++ b/configs/_base_/datasets/rhd2d.py @@ -1,151 +1,151 @@ -dataset_info = dict( - dataset_name='rhd2d', - paper_info=dict( - author='Christian Zimmermann and Thomas Brox', - title='Learning to Estimate 3D Hand Pose from Single RGB Images', - container='arXiv', - year='2017', - homepage='https://lmb.informatik.uni-freiburg.de/resources/' - 'datasets/RenderedHandposeDataset.en.html', - ), - # In RHD, 1-4: left thumb [tip to palm], which means the finger is from - # tip to palm, so as other fingers. Please refer to - # `https://lmb.informatik.uni-freiburg.de/resources/datasets/ - # RenderedHandpose/README` for details of keypoint definition. - # But in COCO-WholeBody-Hand, FreiHand, CMU Panoptic HandDB, it is in - # inverse order. Pay attention to this if you want to combine RHD with - # other hand datasets to train a single model. - # Also, note that 'keypoint_info' will not directly affect the order of - # the keypoint in the dataset. It is mostly for visualization & storing - # information about flip_pairs. - keypoint_info={ - 0: - dict(name='wrist', id=0, color=[255, 255, 255], type='', swap=''), - 1: - dict(name='thumb4', id=1, color=[255, 128, 0], type='', swap=''), - 2: - dict(name='thumb3', id=2, color=[255, 128, 0], type='', swap=''), - 3: - dict(name='thumb2', id=3, color=[255, 128, 0], type='', swap=''), - 4: - dict(name='thumb1', id=4, color=[255, 128, 0], type='', swap=''), - 5: - dict( - name='forefinger4', id=5, color=[255, 153, 255], type='', swap=''), - 6: - dict( - name='forefinger3', id=6, color=[255, 153, 255], type='', swap=''), - 7: - dict( - name='forefinger2', id=7, color=[255, 153, 255], type='', swap=''), - 8: - dict( - name='forefinger1', id=8, color=[255, 153, 255], type='', swap=''), - 9: - dict( - name='middle_finger4', - id=9, - color=[102, 178, 255], - type='', - swap=''), - 10: - dict( - name='middle_finger3', - id=10, - color=[102, 178, 255], - type='', - swap=''), - 11: - dict( - name='middle_finger2', - id=11, - color=[102, 178, 255], - type='', - swap=''), - 12: - dict( - name='middle_finger1', - id=12, - color=[102, 178, 255], - type='', - swap=''), - 13: - dict( - name='ring_finger4', id=13, color=[255, 51, 51], type='', swap=''), - 14: - dict( - name='ring_finger3', id=14, color=[255, 51, 51], type='', swap=''), - 15: - dict( - name='ring_finger2', id=15, color=[255, 51, 51], type='', swap=''), - 16: - dict( - name='ring_finger1', id=16, color=[255, 51, 51], type='', swap=''), - 17: - dict(name='pinky_finger4', id=17, color=[0, 255, 0], type='', swap=''), - 18: - dict(name='pinky_finger3', id=18, color=[0, 255, 0], type='', swap=''), - 19: - dict(name='pinky_finger2', id=19, color=[0, 255, 0], type='', swap=''), - 20: - dict(name='pinky_finger1', id=20, color=[0, 255, 0], type='', swap='') - }, - skeleton_info={ - 0: - dict(link=('wrist', 'thumb1'), id=0, color=[255, 128, 0]), - 1: - dict(link=('thumb1', 'thumb2'), id=1, color=[255, 128, 0]), - 2: - dict(link=('thumb2', 'thumb3'), id=2, color=[255, 128, 0]), - 3: - dict(link=('thumb3', 'thumb4'), id=3, color=[255, 128, 0]), - 4: - dict(link=('wrist', 'forefinger1'), id=4, color=[255, 153, 255]), - 5: - dict(link=('forefinger1', 'forefinger2'), id=5, color=[255, 153, 255]), - 6: - dict(link=('forefinger2', 'forefinger3'), id=6, color=[255, 153, 255]), - 7: - dict(link=('forefinger3', 'forefinger4'), id=7, color=[255, 153, 255]), - 8: - dict(link=('wrist', 'middle_finger1'), id=8, color=[102, 178, 255]), - 9: - dict( - link=('middle_finger1', 'middle_finger2'), - id=9, - color=[102, 178, 255]), - 10: - dict( - link=('middle_finger2', 'middle_finger3'), - id=10, - color=[102, 178, 255]), - 11: - dict( - link=('middle_finger3', 'middle_finger4'), - id=11, - color=[102, 178, 255]), - 12: - dict(link=('wrist', 'ring_finger1'), id=12, color=[255, 51, 51]), - 13: - dict( - link=('ring_finger1', 'ring_finger2'), id=13, color=[255, 51, 51]), - 14: - dict( - link=('ring_finger2', 'ring_finger3'), id=14, color=[255, 51, 51]), - 15: - dict( - link=('ring_finger3', 'ring_finger4'), id=15, color=[255, 51, 51]), - 16: - dict(link=('wrist', 'pinky_finger1'), id=16, color=[0, 255, 0]), - 17: - dict( - link=('pinky_finger1', 'pinky_finger2'), id=17, color=[0, 255, 0]), - 18: - dict( - link=('pinky_finger2', 'pinky_finger3'), id=18, color=[0, 255, 0]), - 19: - dict( - link=('pinky_finger3', 'pinky_finger4'), id=19, color=[0, 255, 0]) - }, - joint_weights=[1.] * 21, - sigmas=[]) +dataset_info = dict( + dataset_name='rhd2d', + paper_info=dict( + author='Christian Zimmermann and Thomas Brox', + title='Learning to Estimate 3D Hand Pose from Single RGB Images', + container='arXiv', + year='2017', + homepage='https://lmb.informatik.uni-freiburg.de/resources/' + 'datasets/RenderedHandposeDataset.en.html', + ), + # In RHD, 1-4: left thumb [tip to palm], which means the finger is from + # tip to palm, so as other fingers. Please refer to + # `https://lmb.informatik.uni-freiburg.de/resources/datasets/ + # RenderedHandpose/README` for details of keypoint definition. + # But in COCO-WholeBody-Hand, FreiHand, CMU Panoptic HandDB, it is in + # inverse order. Pay attention to this if you want to combine RHD with + # other hand datasets to train a single model. + # Also, note that 'keypoint_info' will not directly affect the order of + # the keypoint in the dataset. It is mostly for visualization & storing + # information about flip_pairs. + keypoint_info={ + 0: + dict(name='wrist', id=0, color=[255, 255, 255], type='', swap=''), + 1: + dict(name='thumb4', id=1, color=[255, 128, 0], type='', swap=''), + 2: + dict(name='thumb3', id=2, color=[255, 128, 0], type='', swap=''), + 3: + dict(name='thumb2', id=3, color=[255, 128, 0], type='', swap=''), + 4: + dict(name='thumb1', id=4, color=[255, 128, 0], type='', swap=''), + 5: + dict( + name='forefinger4', id=5, color=[255, 153, 255], type='', swap=''), + 6: + dict( + name='forefinger3', id=6, color=[255, 153, 255], type='', swap=''), + 7: + dict( + name='forefinger2', id=7, color=[255, 153, 255], type='', swap=''), + 8: + dict( + name='forefinger1', id=8, color=[255, 153, 255], type='', swap=''), + 9: + dict( + name='middle_finger4', + id=9, + color=[102, 178, 255], + type='', + swap=''), + 10: + dict( + name='middle_finger3', + id=10, + color=[102, 178, 255], + type='', + swap=''), + 11: + dict( + name='middle_finger2', + id=11, + color=[102, 178, 255], + type='', + swap=''), + 12: + dict( + name='middle_finger1', + id=12, + color=[102, 178, 255], + type='', + swap=''), + 13: + dict( + name='ring_finger4', id=13, color=[255, 51, 51], type='', swap=''), + 14: + dict( + name='ring_finger3', id=14, color=[255, 51, 51], type='', swap=''), + 15: + dict( + name='ring_finger2', id=15, color=[255, 51, 51], type='', swap=''), + 16: + dict( + name='ring_finger1', id=16, color=[255, 51, 51], type='', swap=''), + 17: + dict(name='pinky_finger4', id=17, color=[0, 255, 0], type='', swap=''), + 18: + dict(name='pinky_finger3', id=18, color=[0, 255, 0], type='', swap=''), + 19: + dict(name='pinky_finger2', id=19, color=[0, 255, 0], type='', swap=''), + 20: + dict(name='pinky_finger1', id=20, color=[0, 255, 0], type='', swap='') + }, + skeleton_info={ + 0: + dict(link=('wrist', 'thumb1'), id=0, color=[255, 128, 0]), + 1: + dict(link=('thumb1', 'thumb2'), id=1, color=[255, 128, 0]), + 2: + dict(link=('thumb2', 'thumb3'), id=2, color=[255, 128, 0]), + 3: + dict(link=('thumb3', 'thumb4'), id=3, color=[255, 128, 0]), + 4: + dict(link=('wrist', 'forefinger1'), id=4, color=[255, 153, 255]), + 5: + dict(link=('forefinger1', 'forefinger2'), id=5, color=[255, 153, 255]), + 6: + dict(link=('forefinger2', 'forefinger3'), id=6, color=[255, 153, 255]), + 7: + dict(link=('forefinger3', 'forefinger4'), id=7, color=[255, 153, 255]), + 8: + dict(link=('wrist', 'middle_finger1'), id=8, color=[102, 178, 255]), + 9: + dict( + link=('middle_finger1', 'middle_finger2'), + id=9, + color=[102, 178, 255]), + 10: + dict( + link=('middle_finger2', 'middle_finger3'), + id=10, + color=[102, 178, 255]), + 11: + dict( + link=('middle_finger3', 'middle_finger4'), + id=11, + color=[102, 178, 255]), + 12: + dict(link=('wrist', 'ring_finger1'), id=12, color=[255, 51, 51]), + 13: + dict( + link=('ring_finger1', 'ring_finger2'), id=13, color=[255, 51, 51]), + 14: + dict( + link=('ring_finger2', 'ring_finger3'), id=14, color=[255, 51, 51]), + 15: + dict( + link=('ring_finger3', 'ring_finger4'), id=15, color=[255, 51, 51]), + 16: + dict(link=('wrist', 'pinky_finger1'), id=16, color=[0, 255, 0]), + 17: + dict( + link=('pinky_finger1', 'pinky_finger2'), id=17, color=[0, 255, 0]), + 18: + dict( + link=('pinky_finger2', 'pinky_finger3'), id=18, color=[0, 255, 0]), + 19: + dict( + link=('pinky_finger3', 'pinky_finger4'), id=19, color=[0, 255, 0]) + }, + joint_weights=[1.] * 21, + sigmas=[]) diff --git a/configs/_base_/datasets/shelf.py b/configs/_base_/datasets/shelf.py index 5fe6e42b3b..6a7984b1bc 100644 --- a/configs/_base_/datasets/shelf.py +++ b/configs/_base_/datasets/shelf.py @@ -1,151 +1,151 @@ -dataset_info = dict( - dataset_name='shelf', - paper_info=dict( - author='Belagiannis, Vasileios and Amin, Sikandar and Andriluka, ' - 'Mykhaylo and Schiele, Bernt and Navab, Nassir and Ilic, Slobodan', - title='3D Pictorial Structures for Multiple Human Pose Estimation', - container='IEEE Computer Society Conference on Computer Vision and ' - 'Pattern Recognition (CVPR)', - year='2014', - homepage='http://campar.in.tum.de/Chair/MultiHumanPose', - ), - keypoint_info={ - 0: - dict( - name='right_ankle', - id=0, - color=[255, 128, 0], - type='lower', - swap='left_ankle'), - 1: - dict( - name='right_knee', - id=1, - color=[255, 128, 0], - type='lower', - swap='left_knee'), - 2: - dict( - name='right_hip', - id=2, - color=[255, 128, 0], - type='lower', - swap='left_hip'), - 3: - dict( - name='left_hip', - id=3, - color=[0, 255, 0], - type='lower', - swap='right_hip'), - 4: - dict( - name='left_knee', - id=4, - color=[0, 255, 0], - type='lower', - swap='right_knee'), - 5: - dict( - name='left_ankle', - id=5, - color=[0, 255, 0], - type='lower', - swap='right_ankle'), - 6: - dict( - name='right_wrist', - id=6, - color=[255, 128, 0], - type='upper', - swap='left_wrist'), - 7: - dict( - name='right_elbow', - id=7, - color=[255, 128, 0], - type='upper', - swap='left_elbow'), - 8: - dict( - name='right_shoulder', - id=8, - color=[255, 128, 0], - type='upper', - swap='left_shoulder'), - 9: - dict( - name='left_shoulder', - id=9, - color=[0, 255, 0], - type='upper', - swap='right_shoulder'), - 10: - dict( - name='left_elbow', - id=10, - color=[0, 255, 0], - type='upper', - swap='right_elbow'), - 11: - dict( - name='left_wrist', - id=11, - color=[0, 255, 0], - type='upper', - swap='right_wrist'), - 12: - dict( - name='bottom_head', - id=12, - color=[51, 153, 255], - type='upper', - swap=''), - 13: - dict( - name='top_head', - id=13, - color=[51, 153, 255], - type='upper', - swap=''), - }, - skeleton_info={ - 0: - dict(link=('right_ankle', 'right_knee'), id=0, color=[255, 128, 0]), - 1: - dict(link=('right_knee', 'right_hip'), id=1, color=[255, 128, 0]), - 2: - dict(link=('left_hip', 'left_knee'), id=2, color=[0, 255, 0]), - 3: - dict(link=('left_knee', 'left_ankle'), id=3, color=[0, 255, 0]), - 4: - dict(link=('right_hip', 'left_hip'), id=4, color=[51, 153, 255]), - 5: - dict(link=('right_wrist', 'right_elbow'), id=5, color=[255, 128, 0]), - 6: - dict( - link=('right_elbow', 'right_shoulder'), id=6, color=[255, 128, 0]), - 7: - dict(link=('left_shoulder', 'left_elbow'), id=7, color=[0, 255, 0]), - 8: - dict(link=('left_elbow', 'left_wrist'), id=8, color=[0, 255, 0]), - 9: - dict(link=('right_hip', 'right_shoulder'), id=9, color=[255, 128, 0]), - 10: - dict(link=('left_hip', 'left_shoulder'), id=10, color=[0, 255, 0]), - 11: - dict( - link=('right_shoulder', 'bottom_head'), id=11, color=[255, 128, - 0]), - 12: - dict(link=('left_shoulder', 'bottom_head'), id=12, color=[0, 255, 0]), - 13: - dict(link=('bottom_head', 'top_head'), id=13, color=[51, 153, 255]), - }, - joint_weights=[ - 1.5, 1.2, 1.0, 1.0, 1.2, 1.5, 1.5, 1.2, 1.0, 1.0, 1.2, 1.5, 1.0, 1.0 - ], - sigmas=[ - 0.089, 0.087, 0.107, 0.107, 0.087, 0.089, 0.062, 0.072, 0.079, 0.079, - 0.072, 0.062, 0.026, 0.026 - ]) +dataset_info = dict( + dataset_name='shelf', + paper_info=dict( + author='Belagiannis, Vasileios and Amin, Sikandar and Andriluka, ' + 'Mykhaylo and Schiele, Bernt and Navab, Nassir and Ilic, Slobodan', + title='3D Pictorial Structures for Multiple Human Pose Estimation', + container='IEEE Computer Society Conference on Computer Vision and ' + 'Pattern Recognition (CVPR)', + year='2014', + homepage='http://campar.in.tum.de/Chair/MultiHumanPose', + ), + keypoint_info={ + 0: + dict( + name='right_ankle', + id=0, + color=[255, 128, 0], + type='lower', + swap='left_ankle'), + 1: + dict( + name='right_knee', + id=1, + color=[255, 128, 0], + type='lower', + swap='left_knee'), + 2: + dict( + name='right_hip', + id=2, + color=[255, 128, 0], + type='lower', + swap='left_hip'), + 3: + dict( + name='left_hip', + id=3, + color=[0, 255, 0], + type='lower', + swap='right_hip'), + 4: + dict( + name='left_knee', + id=4, + color=[0, 255, 0], + type='lower', + swap='right_knee'), + 5: + dict( + name='left_ankle', + id=5, + color=[0, 255, 0], + type='lower', + swap='right_ankle'), + 6: + dict( + name='right_wrist', + id=6, + color=[255, 128, 0], + type='upper', + swap='left_wrist'), + 7: + dict( + name='right_elbow', + id=7, + color=[255, 128, 0], + type='upper', + swap='left_elbow'), + 8: + dict( + name='right_shoulder', + id=8, + color=[255, 128, 0], + type='upper', + swap='left_shoulder'), + 9: + dict( + name='left_shoulder', + id=9, + color=[0, 255, 0], + type='upper', + swap='right_shoulder'), + 10: + dict( + name='left_elbow', + id=10, + color=[0, 255, 0], + type='upper', + swap='right_elbow'), + 11: + dict( + name='left_wrist', + id=11, + color=[0, 255, 0], + type='upper', + swap='right_wrist'), + 12: + dict( + name='bottom_head', + id=12, + color=[51, 153, 255], + type='upper', + swap=''), + 13: + dict( + name='top_head', + id=13, + color=[51, 153, 255], + type='upper', + swap=''), + }, + skeleton_info={ + 0: + dict(link=('right_ankle', 'right_knee'), id=0, color=[255, 128, 0]), + 1: + dict(link=('right_knee', 'right_hip'), id=1, color=[255, 128, 0]), + 2: + dict(link=('left_hip', 'left_knee'), id=2, color=[0, 255, 0]), + 3: + dict(link=('left_knee', 'left_ankle'), id=3, color=[0, 255, 0]), + 4: + dict(link=('right_hip', 'left_hip'), id=4, color=[51, 153, 255]), + 5: + dict(link=('right_wrist', 'right_elbow'), id=5, color=[255, 128, 0]), + 6: + dict( + link=('right_elbow', 'right_shoulder'), id=6, color=[255, 128, 0]), + 7: + dict(link=('left_shoulder', 'left_elbow'), id=7, color=[0, 255, 0]), + 8: + dict(link=('left_elbow', 'left_wrist'), id=8, color=[0, 255, 0]), + 9: + dict(link=('right_hip', 'right_shoulder'), id=9, color=[255, 128, 0]), + 10: + dict(link=('left_hip', 'left_shoulder'), id=10, color=[0, 255, 0]), + 11: + dict( + link=('right_shoulder', 'bottom_head'), id=11, color=[255, 128, + 0]), + 12: + dict(link=('left_shoulder', 'bottom_head'), id=12, color=[0, 255, 0]), + 13: + dict(link=('bottom_head', 'top_head'), id=13, color=[51, 153, 255]), + }, + joint_weights=[ + 1.5, 1.2, 1.0, 1.0, 1.2, 1.5, 1.5, 1.2, 1.0, 1.0, 1.2, 1.5, 1.0, 1.0 + ], + sigmas=[ + 0.089, 0.087, 0.107, 0.107, 0.087, 0.089, 0.062, 0.072, 0.079, 0.079, + 0.072, 0.062, 0.026, 0.026 + ]) diff --git a/configs/_base_/datasets/wflw.py b/configs/_base_/datasets/wflw.py index 80c29b696c..c31750bb1d 100644 --- a/configs/_base_/datasets/wflw.py +++ b/configs/_base_/datasets/wflw.py @@ -1,192 +1,192 @@ -dataset_info = dict( - dataset_name='wflw', - paper_info=dict( - author='Wu, Wayne and Qian, Chen and Yang, Shuo and Wang, ' - 'Quan and Cai, Yici and Zhou, Qiang', - title='Look at boundary: A boundary-aware face alignment algorithm', - container='Proceedings of the IEEE conference on computer ' - 'vision and pattern recognition', - year='2018', - homepage='https://wywu.github.io/projects/LAB/WFLW.html', - ), - keypoint_info={ - 0: dict(name='kpt-0', id=0, color=[255, 0, 0], type='', swap='kpt-32'), - 1: dict(name='kpt-1', id=1, color=[255, 0, 0], type='', swap='kpt-31'), - 2: dict(name='kpt-2', id=2, color=[255, 0, 0], type='', swap='kpt-30'), - 3: dict(name='kpt-3', id=3, color=[255, 0, 0], type='', swap='kpt-29'), - 4: dict(name='kpt-4', id=4, color=[255, 0, 0], type='', swap='kpt-28'), - 5: dict(name='kpt-5', id=5, color=[255, 0, 0], type='', swap='kpt-27'), - 6: dict(name='kpt-6', id=6, color=[255, 0, 0], type='', swap='kpt-26'), - 7: dict(name='kpt-7', id=7, color=[255, 0, 0], type='', swap='kpt-25'), - 8: dict(name='kpt-8', id=8, color=[255, 0, 0], type='', swap='kpt-24'), - 9: dict(name='kpt-9', id=9, color=[255, 0, 0], type='', swap='kpt-23'), - 10: - dict(name='kpt-10', id=10, color=[255, 0, 0], type='', swap='kpt-22'), - 11: - dict(name='kpt-11', id=11, color=[255, 0, 0], type='', swap='kpt-21'), - 12: - dict(name='kpt-12', id=12, color=[255, 0, 0], type='', swap='kpt-20'), - 13: - dict(name='kpt-13', id=13, color=[255, 0, 0], type='', swap='kpt-19'), - 14: - dict(name='kpt-14', id=14, color=[255, 0, 0], type='', swap='kpt-18'), - 15: - dict(name='kpt-15', id=15, color=[255, 0, 0], type='', swap='kpt-17'), - 16: dict(name='kpt-16', id=16, color=[255, 0, 0], type='', swap=''), - 17: - dict(name='kpt-17', id=17, color=[255, 0, 0], type='', swap='kpt-15'), - 18: - dict(name='kpt-18', id=18, color=[255, 0, 0], type='', swap='kpt-14'), - 19: - dict(name='kpt-19', id=19, color=[255, 0, 0], type='', swap='kpt-13'), - 20: - dict(name='kpt-20', id=20, color=[255, 0, 0], type='', swap='kpt-12'), - 21: - dict(name='kpt-21', id=21, color=[255, 0, 0], type='', swap='kpt-11'), - 22: - dict(name='kpt-22', id=22, color=[255, 0, 0], type='', swap='kpt-10'), - 23: - dict(name='kpt-23', id=23, color=[255, 0, 0], type='', swap='kpt-9'), - 24: - dict(name='kpt-24', id=24, color=[255, 0, 0], type='', swap='kpt-8'), - 25: - dict(name='kpt-25', id=25, color=[255, 0, 0], type='', swap='kpt-7'), - 26: - dict(name='kpt-26', id=26, color=[255, 0, 0], type='', swap='kpt-6'), - 27: - dict(name='kpt-27', id=27, color=[255, 0, 0], type='', swap='kpt-5'), - 28: - dict(name='kpt-28', id=28, color=[255, 0, 0], type='', swap='kpt-4'), - 29: - dict(name='kpt-29', id=29, color=[255, 0, 0], type='', swap='kpt-3'), - 30: - dict(name='kpt-30', id=30, color=[255, 0, 0], type='', swap='kpt-2'), - 31: - dict(name='kpt-31', id=31, color=[255, 0, 0], type='', swap='kpt-1'), - 32: - dict(name='kpt-32', id=32, color=[255, 0, 0], type='', swap='kpt-0'), - 33: - dict(name='kpt-33', id=33, color=[255, 0, 0], type='', swap='kpt-46'), - 34: - dict(name='kpt-34', id=34, color=[255, 0, 0], type='', swap='kpt-45'), - 35: - dict(name='kpt-35', id=35, color=[255, 0, 0], type='', swap='kpt-44'), - 36: - dict(name='kpt-36', id=36, color=[255, 0, 0], type='', swap='kpt-43'), - 37: dict( - name='kpt-37', id=37, color=[255, 0, 0], type='', swap='kpt-42'), - 38: dict( - name='kpt-38', id=38, color=[255, 0, 0], type='', swap='kpt-50'), - 39: dict( - name='kpt-39', id=39, color=[255, 0, 0], type='', swap='kpt-49'), - 40: dict( - name='kpt-40', id=40, color=[255, 0, 0], type='', swap='kpt-48'), - 41: dict( - name='kpt-41', id=41, color=[255, 0, 0], type='', swap='kpt-47'), - 42: dict( - name='kpt-42', id=42, color=[255, 0, 0], type='', swap='kpt-37'), - 43: dict( - name='kpt-43', id=43, color=[255, 0, 0], type='', swap='kpt-36'), - 44: dict( - name='kpt-44', id=44, color=[255, 0, 0], type='', swap='kpt-35'), - 45: dict( - name='kpt-45', id=45, color=[255, 0, 0], type='', swap='kpt-34'), - 46: dict( - name='kpt-46', id=46, color=[255, 0, 0], type='', swap='kpt-33'), - 47: dict( - name='kpt-47', id=47, color=[255, 0, 0], type='', swap='kpt-41'), - 48: dict( - name='kpt-48', id=48, color=[255, 0, 0], type='', swap='kpt-40'), - 49: dict( - name='kpt-49', id=49, color=[255, 0, 0], type='', swap='kpt-39'), - 50: dict( - name='kpt-50', id=50, color=[255, 0, 0], type='', swap='kpt-38'), - 51: dict(name='kpt-51', id=51, color=[255, 0, 0], type='', swap=''), - 52: dict(name='kpt-52', id=52, color=[255, 0, 0], type='', swap=''), - 53: dict(name='kpt-53', id=53, color=[255, 0, 0], type='', swap=''), - 54: dict(name='kpt-54', id=54, color=[255, 0, 0], type='', swap=''), - 55: dict( - name='kpt-55', id=55, color=[255, 0, 0], type='', swap='kpt-59'), - 56: dict( - name='kpt-56', id=56, color=[255, 0, 0], type='', swap='kpt-58'), - 57: dict(name='kpt-57', id=57, color=[255, 0, 0], type='', swap=''), - 58: dict( - name='kpt-58', id=58, color=[255, 0, 0], type='', swap='kpt-56'), - 59: dict( - name='kpt-59', id=59, color=[255, 0, 0], type='', swap='kpt-55'), - 60: dict( - name='kpt-60', id=60, color=[255, 0, 0], type='', swap='kpt-72'), - 61: dict( - name='kpt-61', id=61, color=[255, 0, 0], type='', swap='kpt-71'), - 62: dict( - name='kpt-62', id=62, color=[255, 0, 0], type='', swap='kpt-70'), - 63: dict( - name='kpt-63', id=63, color=[255, 0, 0], type='', swap='kpt-69'), - 64: dict( - name='kpt-64', id=64, color=[255, 0, 0], type='', swap='kpt-68'), - 65: dict( - name='kpt-65', id=65, color=[255, 0, 0], type='', swap='kpt-75'), - 66: dict( - name='kpt-66', id=66, color=[255, 0, 0], type='', swap='kpt-74'), - 67: dict( - name='kpt-67', id=67, color=[255, 0, 0], type='', swap='kpt-73'), - 68: dict( - name='kpt-68', id=68, color=[255, 0, 0], type='', swap='kpt-64'), - 69: dict( - name='kpt-69', id=69, color=[255, 0, 0], type='', swap='kpt-63'), - 70: dict( - name='kpt-70', id=70, color=[255, 0, 0], type='', swap='kpt-62'), - 71: dict( - name='kpt-71', id=71, color=[255, 0, 0], type='', swap='kpt-61'), - 72: dict( - name='kpt-72', id=72, color=[255, 0, 0], type='', swap='kpt-60'), - 73: dict( - name='kpt-73', id=73, color=[255, 0, 0], type='', swap='kpt-67'), - 74: dict( - name='kpt-74', id=74, color=[255, 0, 0], type='', swap='kpt-66'), - 75: dict( - name='kpt-75', id=75, color=[255, 0, 0], type='', swap='kpt-65'), - 76: dict( - name='kpt-76', id=76, color=[255, 0, 0], type='', swap='kpt-82'), - 77: dict( - name='kpt-77', id=77, color=[255, 0, 0], type='', swap='kpt-81'), - 78: dict( - name='kpt-78', id=78, color=[255, 0, 0], type='', swap='kpt-80'), - 79: dict(name='kpt-79', id=79, color=[255, 0, 0], type='', swap=''), - 80: dict( - name='kpt-80', id=80, color=[255, 0, 0], type='', swap='kpt-78'), - 81: dict( - name='kpt-81', id=81, color=[255, 0, 0], type='', swap='kpt-77'), - 82: dict( - name='kpt-82', id=82, color=[255, 0, 0], type='', swap='kpt-76'), - 83: dict( - name='kpt-83', id=83, color=[255, 0, 0], type='', swap='kpt-87'), - 84: dict( - name='kpt-84', id=84, color=[255, 0, 0], type='', swap='kpt-86'), - 85: dict(name='kpt-85', id=85, color=[255, 0, 0], type='', swap=''), - 86: dict( - name='kpt-86', id=86, color=[255, 0, 0], type='', swap='kpt-84'), - 87: dict( - name='kpt-87', id=87, color=[255, 0, 0], type='', swap='kpt-83'), - 88: dict( - name='kpt-88', id=88, color=[255, 0, 0], type='', swap='kpt-92'), - 89: dict( - name='kpt-89', id=89, color=[255, 0, 0], type='', swap='kpt-91'), - 90: dict(name='kpt-90', id=90, color=[255, 0, 0], type='', swap=''), - 91: dict( - name='kpt-91', id=91, color=[255, 0, 0], type='', swap='kpt-89'), - 92: dict( - name='kpt-92', id=92, color=[255, 0, 0], type='', swap='kpt-88'), - 93: dict( - name='kpt-93', id=93, color=[255, 0, 0], type='', swap='kpt-95'), - 94: dict(name='kpt-94', id=94, color=[255, 0, 0], type='', swap=''), - 95: dict( - name='kpt-95', id=95, color=[255, 0, 0], type='', swap='kpt-93'), - 96: dict( - name='kpt-96', id=96, color=[255, 0, 0], type='', swap='kpt-97'), - 97: dict( - name='kpt-97', id=97, color=[255, 0, 0], type='', swap='kpt-96') - }, - skeleton_info={}, - joint_weights=[1.] * 98, - sigmas=[]) +dataset_info = dict( + dataset_name='wflw', + paper_info=dict( + author='Wu, Wayne and Qian, Chen and Yang, Shuo and Wang, ' + 'Quan and Cai, Yici and Zhou, Qiang', + title='Look at boundary: A boundary-aware face alignment algorithm', + container='Proceedings of the IEEE conference on computer ' + 'vision and pattern recognition', + year='2018', + homepage='https://wywu.github.io/projects/LAB/WFLW.html', + ), + keypoint_info={ + 0: dict(name='kpt-0', id=0, color=[255, 0, 0], type='', swap='kpt-32'), + 1: dict(name='kpt-1', id=1, color=[255, 0, 0], type='', swap='kpt-31'), + 2: dict(name='kpt-2', id=2, color=[255, 0, 0], type='', swap='kpt-30'), + 3: dict(name='kpt-3', id=3, color=[255, 0, 0], type='', swap='kpt-29'), + 4: dict(name='kpt-4', id=4, color=[255, 0, 0], type='', swap='kpt-28'), + 5: dict(name='kpt-5', id=5, color=[255, 0, 0], type='', swap='kpt-27'), + 6: dict(name='kpt-6', id=6, color=[255, 0, 0], type='', swap='kpt-26'), + 7: dict(name='kpt-7', id=7, color=[255, 0, 0], type='', swap='kpt-25'), + 8: dict(name='kpt-8', id=8, color=[255, 0, 0], type='', swap='kpt-24'), + 9: dict(name='kpt-9', id=9, color=[255, 0, 0], type='', swap='kpt-23'), + 10: + dict(name='kpt-10', id=10, color=[255, 0, 0], type='', swap='kpt-22'), + 11: + dict(name='kpt-11', id=11, color=[255, 0, 0], type='', swap='kpt-21'), + 12: + dict(name='kpt-12', id=12, color=[255, 0, 0], type='', swap='kpt-20'), + 13: + dict(name='kpt-13', id=13, color=[255, 0, 0], type='', swap='kpt-19'), + 14: + dict(name='kpt-14', id=14, color=[255, 0, 0], type='', swap='kpt-18'), + 15: + dict(name='kpt-15', id=15, color=[255, 0, 0], type='', swap='kpt-17'), + 16: dict(name='kpt-16', id=16, color=[255, 0, 0], type='', swap=''), + 17: + dict(name='kpt-17', id=17, color=[255, 0, 0], type='', swap='kpt-15'), + 18: + dict(name='kpt-18', id=18, color=[255, 0, 0], type='', swap='kpt-14'), + 19: + dict(name='kpt-19', id=19, color=[255, 0, 0], type='', swap='kpt-13'), + 20: + dict(name='kpt-20', id=20, color=[255, 0, 0], type='', swap='kpt-12'), + 21: + dict(name='kpt-21', id=21, color=[255, 0, 0], type='', swap='kpt-11'), + 22: + dict(name='kpt-22', id=22, color=[255, 0, 0], type='', swap='kpt-10'), + 23: + dict(name='kpt-23', id=23, color=[255, 0, 0], type='', swap='kpt-9'), + 24: + dict(name='kpt-24', id=24, color=[255, 0, 0], type='', swap='kpt-8'), + 25: + dict(name='kpt-25', id=25, color=[255, 0, 0], type='', swap='kpt-7'), + 26: + dict(name='kpt-26', id=26, color=[255, 0, 0], type='', swap='kpt-6'), + 27: + dict(name='kpt-27', id=27, color=[255, 0, 0], type='', swap='kpt-5'), + 28: + dict(name='kpt-28', id=28, color=[255, 0, 0], type='', swap='kpt-4'), + 29: + dict(name='kpt-29', id=29, color=[255, 0, 0], type='', swap='kpt-3'), + 30: + dict(name='kpt-30', id=30, color=[255, 0, 0], type='', swap='kpt-2'), + 31: + dict(name='kpt-31', id=31, color=[255, 0, 0], type='', swap='kpt-1'), + 32: + dict(name='kpt-32', id=32, color=[255, 0, 0], type='', swap='kpt-0'), + 33: + dict(name='kpt-33', id=33, color=[255, 0, 0], type='', swap='kpt-46'), + 34: + dict(name='kpt-34', id=34, color=[255, 0, 0], type='', swap='kpt-45'), + 35: + dict(name='kpt-35', id=35, color=[255, 0, 0], type='', swap='kpt-44'), + 36: + dict(name='kpt-36', id=36, color=[255, 0, 0], type='', swap='kpt-43'), + 37: dict( + name='kpt-37', id=37, color=[255, 0, 0], type='', swap='kpt-42'), + 38: dict( + name='kpt-38', id=38, color=[255, 0, 0], type='', swap='kpt-50'), + 39: dict( + name='kpt-39', id=39, color=[255, 0, 0], type='', swap='kpt-49'), + 40: dict( + name='kpt-40', id=40, color=[255, 0, 0], type='', swap='kpt-48'), + 41: dict( + name='kpt-41', id=41, color=[255, 0, 0], type='', swap='kpt-47'), + 42: dict( + name='kpt-42', id=42, color=[255, 0, 0], type='', swap='kpt-37'), + 43: dict( + name='kpt-43', id=43, color=[255, 0, 0], type='', swap='kpt-36'), + 44: dict( + name='kpt-44', id=44, color=[255, 0, 0], type='', swap='kpt-35'), + 45: dict( + name='kpt-45', id=45, color=[255, 0, 0], type='', swap='kpt-34'), + 46: dict( + name='kpt-46', id=46, color=[255, 0, 0], type='', swap='kpt-33'), + 47: dict( + name='kpt-47', id=47, color=[255, 0, 0], type='', swap='kpt-41'), + 48: dict( + name='kpt-48', id=48, color=[255, 0, 0], type='', swap='kpt-40'), + 49: dict( + name='kpt-49', id=49, color=[255, 0, 0], type='', swap='kpt-39'), + 50: dict( + name='kpt-50', id=50, color=[255, 0, 0], type='', swap='kpt-38'), + 51: dict(name='kpt-51', id=51, color=[255, 0, 0], type='', swap=''), + 52: dict(name='kpt-52', id=52, color=[255, 0, 0], type='', swap=''), + 53: dict(name='kpt-53', id=53, color=[255, 0, 0], type='', swap=''), + 54: dict(name='kpt-54', id=54, color=[255, 0, 0], type='', swap=''), + 55: dict( + name='kpt-55', id=55, color=[255, 0, 0], type='', swap='kpt-59'), + 56: dict( + name='kpt-56', id=56, color=[255, 0, 0], type='', swap='kpt-58'), + 57: dict(name='kpt-57', id=57, color=[255, 0, 0], type='', swap=''), + 58: dict( + name='kpt-58', id=58, color=[255, 0, 0], type='', swap='kpt-56'), + 59: dict( + name='kpt-59', id=59, color=[255, 0, 0], type='', swap='kpt-55'), + 60: dict( + name='kpt-60', id=60, color=[255, 0, 0], type='', swap='kpt-72'), + 61: dict( + name='kpt-61', id=61, color=[255, 0, 0], type='', swap='kpt-71'), + 62: dict( + name='kpt-62', id=62, color=[255, 0, 0], type='', swap='kpt-70'), + 63: dict( + name='kpt-63', id=63, color=[255, 0, 0], type='', swap='kpt-69'), + 64: dict( + name='kpt-64', id=64, color=[255, 0, 0], type='', swap='kpt-68'), + 65: dict( + name='kpt-65', id=65, color=[255, 0, 0], type='', swap='kpt-75'), + 66: dict( + name='kpt-66', id=66, color=[255, 0, 0], type='', swap='kpt-74'), + 67: dict( + name='kpt-67', id=67, color=[255, 0, 0], type='', swap='kpt-73'), + 68: dict( + name='kpt-68', id=68, color=[255, 0, 0], type='', swap='kpt-64'), + 69: dict( + name='kpt-69', id=69, color=[255, 0, 0], type='', swap='kpt-63'), + 70: dict( + name='kpt-70', id=70, color=[255, 0, 0], type='', swap='kpt-62'), + 71: dict( + name='kpt-71', id=71, color=[255, 0, 0], type='', swap='kpt-61'), + 72: dict( + name='kpt-72', id=72, color=[255, 0, 0], type='', swap='kpt-60'), + 73: dict( + name='kpt-73', id=73, color=[255, 0, 0], type='', swap='kpt-67'), + 74: dict( + name='kpt-74', id=74, color=[255, 0, 0], type='', swap='kpt-66'), + 75: dict( + name='kpt-75', id=75, color=[255, 0, 0], type='', swap='kpt-65'), + 76: dict( + name='kpt-76', id=76, color=[255, 0, 0], type='', swap='kpt-82'), + 77: dict( + name='kpt-77', id=77, color=[255, 0, 0], type='', swap='kpt-81'), + 78: dict( + name='kpt-78', id=78, color=[255, 0, 0], type='', swap='kpt-80'), + 79: dict(name='kpt-79', id=79, color=[255, 0, 0], type='', swap=''), + 80: dict( + name='kpt-80', id=80, color=[255, 0, 0], type='', swap='kpt-78'), + 81: dict( + name='kpt-81', id=81, color=[255, 0, 0], type='', swap='kpt-77'), + 82: dict( + name='kpt-82', id=82, color=[255, 0, 0], type='', swap='kpt-76'), + 83: dict( + name='kpt-83', id=83, color=[255, 0, 0], type='', swap='kpt-87'), + 84: dict( + name='kpt-84', id=84, color=[255, 0, 0], type='', swap='kpt-86'), + 85: dict(name='kpt-85', id=85, color=[255, 0, 0], type='', swap=''), + 86: dict( + name='kpt-86', id=86, color=[255, 0, 0], type='', swap='kpt-84'), + 87: dict( + name='kpt-87', id=87, color=[255, 0, 0], type='', swap='kpt-83'), + 88: dict( + name='kpt-88', id=88, color=[255, 0, 0], type='', swap='kpt-92'), + 89: dict( + name='kpt-89', id=89, color=[255, 0, 0], type='', swap='kpt-91'), + 90: dict(name='kpt-90', id=90, color=[255, 0, 0], type='', swap=''), + 91: dict( + name='kpt-91', id=91, color=[255, 0, 0], type='', swap='kpt-89'), + 92: dict( + name='kpt-92', id=92, color=[255, 0, 0], type='', swap='kpt-88'), + 93: dict( + name='kpt-93', id=93, color=[255, 0, 0], type='', swap='kpt-95'), + 94: dict(name='kpt-94', id=94, color=[255, 0, 0], type='', swap=''), + 95: dict( + name='kpt-95', id=95, color=[255, 0, 0], type='', swap='kpt-93'), + 96: dict( + name='kpt-96', id=96, color=[255, 0, 0], type='', swap='kpt-97'), + 97: dict( + name='kpt-97', id=97, color=[255, 0, 0], type='', swap='kpt-96') + }, + skeleton_info={}, + joint_weights=[1.] * 98, + sigmas=[]) diff --git a/configs/_base_/datasets/zebra.py b/configs/_base_/datasets/zebra.py index eac71f796a..bc4f9ecd75 100644 --- a/configs/_base_/datasets/zebra.py +++ b/configs/_base_/datasets/zebra.py @@ -1,64 +1,64 @@ -dataset_info = dict( - dataset_name='zebra', - paper_info=dict( - author='Graving, Jacob M and Chae, Daniel and Naik, Hemal and ' - 'Li, Liang and Koger, Benjamin and Costelloe, Blair R and ' - 'Couzin, Iain D', - title='DeepPoseKit, a software toolkit for fast and robust ' - 'animal pose estimation using deep learning', - container='Elife', - year='2019', - homepage='https://github.com/jgraving/DeepPoseKit-Data', - ), - keypoint_info={ - 0: - dict(name='snout', id=0, color=[255, 255, 255], type='', swap=''), - 1: - dict(name='head', id=1, color=[255, 255, 255], type='', swap=''), - 2: - dict(name='neck', id=2, color=[255, 255, 255], type='', swap=''), - 3: - dict( - name='forelegL1', - id=3, - color=[255, 255, 255], - type='', - swap='forelegR1'), - 4: - dict( - name='forelegR1', - id=4, - color=[255, 255, 255], - type='', - swap='forelegL1'), - 5: - dict( - name='hindlegL1', - id=5, - color=[255, 255, 255], - type='', - swap='hindlegR1'), - 6: - dict( - name='hindlegR1', - id=6, - color=[255, 255, 255], - type='', - swap='hindlegL1'), - 7: - dict(name='tailbase', id=7, color=[255, 255, 255], type='', swap=''), - 8: - dict(name='tailtip', id=8, color=[255, 255, 255], type='', swap='') - }, - skeleton_info={ - 0: dict(link=('head', 'snout'), id=0, color=[255, 255, 255]), - 1: dict(link=('neck', 'head'), id=1, color=[255, 255, 255]), - 2: dict(link=('forelegL1', 'neck'), id=2, color=[255, 255, 255]), - 3: dict(link=('forelegR1', 'neck'), id=3, color=[255, 255, 255]), - 4: dict(link=('hindlegL1', 'tailbase'), id=4, color=[255, 255, 255]), - 5: dict(link=('hindlegR1', 'tailbase'), id=5, color=[255, 255, 255]), - 6: dict(link=('tailbase', 'neck'), id=6, color=[255, 255, 255]), - 7: dict(link=('tailtip', 'tailbase'), id=7, color=[255, 255, 255]) - }, - joint_weights=[1.] * 9, - sigmas=[]) +dataset_info = dict( + dataset_name='zebra', + paper_info=dict( + author='Graving, Jacob M and Chae, Daniel and Naik, Hemal and ' + 'Li, Liang and Koger, Benjamin and Costelloe, Blair R and ' + 'Couzin, Iain D', + title='DeepPoseKit, a software toolkit for fast and robust ' + 'animal pose estimation using deep learning', + container='Elife', + year='2019', + homepage='https://github.com/jgraving/DeepPoseKit-Data', + ), + keypoint_info={ + 0: + dict(name='snout', id=0, color=[255, 255, 255], type='', swap=''), + 1: + dict(name='head', id=1, color=[255, 255, 255], type='', swap=''), + 2: + dict(name='neck', id=2, color=[255, 255, 255], type='', swap=''), + 3: + dict( + name='forelegL1', + id=3, + color=[255, 255, 255], + type='', + swap='forelegR1'), + 4: + dict( + name='forelegR1', + id=4, + color=[255, 255, 255], + type='', + swap='forelegL1'), + 5: + dict( + name='hindlegL1', + id=5, + color=[255, 255, 255], + type='', + swap='hindlegR1'), + 6: + dict( + name='hindlegR1', + id=6, + color=[255, 255, 255], + type='', + swap='hindlegL1'), + 7: + dict(name='tailbase', id=7, color=[255, 255, 255], type='', swap=''), + 8: + dict(name='tailtip', id=8, color=[255, 255, 255], type='', swap='') + }, + skeleton_info={ + 0: dict(link=('head', 'snout'), id=0, color=[255, 255, 255]), + 1: dict(link=('neck', 'head'), id=1, color=[255, 255, 255]), + 2: dict(link=('forelegL1', 'neck'), id=2, color=[255, 255, 255]), + 3: dict(link=('forelegR1', 'neck'), id=3, color=[255, 255, 255]), + 4: dict(link=('hindlegL1', 'tailbase'), id=4, color=[255, 255, 255]), + 5: dict(link=('hindlegR1', 'tailbase'), id=5, color=[255, 255, 255]), + 6: dict(link=('tailbase', 'neck'), id=6, color=[255, 255, 255]), + 7: dict(link=('tailtip', 'tailbase'), id=7, color=[255, 255, 255]) + }, + joint_weights=[1.] * 9, + sigmas=[]) diff --git a/configs/_base_/default_runtime.py b/configs/_base_/default_runtime.py index 561d574fa7..1235dfafb6 100644 --- a/configs/_base_/default_runtime.py +++ b/configs/_base_/default_runtime.py @@ -1,49 +1,49 @@ -default_scope = 'mmpose' - -# hooks -default_hooks = dict( - timer=dict(type='IterTimerHook'), - logger=dict(type='LoggerHook', interval=50), - param_scheduler=dict(type='ParamSchedulerHook'), - checkpoint=dict(type='CheckpointHook', interval=10), - sampler_seed=dict(type='DistSamplerSeedHook'), - visualization=dict(type='PoseVisualizationHook', enable=False), -) - -# custom hooks -custom_hooks = [ - # Synchronize model buffers such as running_mean and running_var in BN - # at the end of each epoch - dict(type='SyncBuffersHook') -] - -# multi-processing backend -env_cfg = dict( - cudnn_benchmark=False, - mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), - dist_cfg=dict(backend='nccl'), -) - -# visualizer -vis_backends = [ - dict(type='LocalVisBackend'), - # dict(type='TensorboardVisBackend'), - # dict(type='WandbVisBackend'), -] -visualizer = dict( - type='PoseLocalVisualizer', vis_backends=vis_backends, name='visualizer') - -# logger -log_processor = dict( - type='LogProcessor', window_size=50, by_epoch=True, num_digits=6) -log_level = 'INFO' -load_from = None -resume = False - -# file I/O backend -backend_args = dict(backend='local') - -# training/validation/testing progress -train_cfg = dict(by_epoch=True) -val_cfg = dict() -test_cfg = dict() +default_scope = 'mmpose' + +# hooks +default_hooks = dict( + timer=dict(type='IterTimerHook'), + logger=dict(type='LoggerHook', interval=50), + param_scheduler=dict(type='ParamSchedulerHook'), + checkpoint=dict(type='CheckpointHook', interval=10), + sampler_seed=dict(type='DistSamplerSeedHook'), + visualization=dict(type='PoseVisualizationHook', enable=False), +) + +# custom hooks +custom_hooks = [ + # Synchronize model buffers such as running_mean and running_var in BN + # at the end of each epoch + dict(type='SyncBuffersHook') +] + +# multi-processing backend +env_cfg = dict( + cudnn_benchmark=False, + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), + dist_cfg=dict(backend='nccl'), +) + +# visualizer +vis_backends = [ + dict(type='LocalVisBackend'), + # dict(type='TensorboardVisBackend'), + # dict(type='WandbVisBackend'), +] +visualizer = dict( + type='PoseLocalVisualizer', vis_backends=vis_backends, name='visualizer') + +# logger +log_processor = dict( + type='LogProcessor', window_size=50, by_epoch=True, num_digits=6) +log_level = 'INFO' +load_from = None +resume = False + +# file I/O backend +backend_args = dict(backend='local') + +# training/validation/testing progress +train_cfg = dict(by_epoch=True) +val_cfg = dict() +test_cfg = dict() diff --git a/configs/animal_2d_keypoint/README.md b/configs/animal_2d_keypoint/README.md index efcc3841a5..1ee3b404d8 100644 --- a/configs/animal_2d_keypoint/README.md +++ b/configs/animal_2d_keypoint/README.md @@ -1,20 +1,20 @@ -# 2D Animal Keypoint Detection - -2D animal keypoint detection (animal pose estimation) aims to detect the key-point of different species, including rats, -dogs, macaques, and cheetah. It provides detailed behavioral analysis for neuroscience, medical and ecology applications. - -## Data preparation - -Please follow [DATA Preparation](/docs/en/dataset_zoo/2d_animal_keypoint.md) to prepare data. - -## Demo - -Please follow [DEMO](/demo/docs/en/2d_animal_demo.md) to generate fancy demos. - -
- -
- -
- -
+# 2D Animal Keypoint Detection + +2D animal keypoint detection (animal pose estimation) aims to detect the key-point of different species, including rats, +dogs, macaques, and cheetah. It provides detailed behavioral analysis for neuroscience, medical and ecology applications. + +## Data preparation + +Please follow [DATA Preparation](/docs/en/dataset_zoo/2d_animal_keypoint.md) to prepare data. + +## Demo + +Please follow [DEMO](/demo/docs/en/2d_animal_demo.md) to generate fancy demos. + +
+ +
+ +
+ +
diff --git a/configs/animal_2d_keypoint/rtmpose/README.md b/configs/animal_2d_keypoint/rtmpose/README.md index fbb103e36c..b722d40364 100644 --- a/configs/animal_2d_keypoint/rtmpose/README.md +++ b/configs/animal_2d_keypoint/rtmpose/README.md @@ -1,16 +1,16 @@ -# RTMPose - -Recent studies on 2D pose estimation have achieved excellent performance on public benchmarks, yet its application in the industrial community still suffers from heavy model parameters and high latency. -In order to bridge this gap, we empirically study five aspects that affect the performance of multi-person pose estimation algorithms: paradigm, backbone network, localization algorithm, training strategy, and deployment inference, and present a high-performance real-time multi-person pose estimation framework, **RTMPose**, based on MMPose. -Our RTMPose-m achieves **75.8% AP** on COCO with **90+ FPS** on an Intel i7-11700 CPU and **430+ FPS** on an NVIDIA GTX 1660 Ti GPU, and RTMPose-l achieves **67.0% AP** on COCO-WholeBody with **130+ FPS**, outperforming existing open-source libraries. -To further evaluate RTMPose's capability in critical real-time applications, we also report the performance after deploying on the mobile device. - -## Results and Models - -### AP-10K Dataset - -Results on AP-10K validation set - -| Model | Input Size | AP | Details and Download | -| :-------: | :--------: | :---: | :------------------------------------------: | -| RTMPose-m | 256x256 | 0.722 | [rtmpose_cp10k.md](./ap10k/rtmpose_ap10k.md) | +# RTMPose + +Recent studies on 2D pose estimation have achieved excellent performance on public benchmarks, yet its application in the industrial community still suffers from heavy model parameters and high latency. +In order to bridge this gap, we empirically study five aspects that affect the performance of multi-person pose estimation algorithms: paradigm, backbone network, localization algorithm, training strategy, and deployment inference, and present a high-performance real-time multi-person pose estimation framework, **RTMPose**, based on MMPose. +Our RTMPose-m achieves **75.8% AP** on COCO with **90+ FPS** on an Intel i7-11700 CPU and **430+ FPS** on an NVIDIA GTX 1660 Ti GPU, and RTMPose-l achieves **67.0% AP** on COCO-WholeBody with **130+ FPS**, outperforming existing open-source libraries. +To further evaluate RTMPose's capability in critical real-time applications, we also report the performance after deploying on the mobile device. + +## Results and Models + +### AP-10K Dataset + +Results on AP-10K validation set + +| Model | Input Size | AP | Details and Download | +| :-------: | :--------: | :---: | :------------------------------------------: | +| RTMPose-m | 256x256 | 0.722 | [rtmpose_cp10k.md](./ap10k/rtmpose_ap10k.md) | diff --git a/configs/animal_2d_keypoint/rtmpose/ap10k/rtmpose-m_8xb64-210e_ap10k-256x256.py b/configs/animal_2d_keypoint/rtmpose/ap10k/rtmpose-m_8xb64-210e_ap10k-256x256.py index 0e8c007b31..576b71ff08 100644 --- a/configs/animal_2d_keypoint/rtmpose/ap10k/rtmpose-m_8xb64-210e_ap10k-256x256.py +++ b/configs/animal_2d_keypoint/rtmpose/ap10k/rtmpose-m_8xb64-210e_ap10k-256x256.py @@ -1,245 +1,245 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -max_epochs = 210 -stage2_num_epochs = 30 -base_lr = 4e-3 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=(256, 256), - sigma=(5.66, 5.66), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=0.67, - widen_factor=0.75, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmposev1/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=768, - out_channels=17, - input_size=codec['input_size'], - in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True, )) - -# base dataset settings -dataset_type = 'AP10KDataset' -data_mode = 'topdown' -data_root = 'data/ap10k/' - -backend_args = dict(backend='local') -# backend_args = dict( -# backend='petrel', -# path_mapping=dict({ -# f'{data_root}': 's3://openmmlab/datasets/pose/ap10k/', -# f'{data_root}': 's3://openmmlab/datasets/pose/ap10k/' -# })) - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.0), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/ap10k-train-split1.json', - data_prefix=dict(img='data/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/ap10k-val-split1.json', - data_prefix=dict(img='data/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = dict( - batch_size=32, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/ap10k-test-split1.json', - data_prefix=dict(img='data/'), - test_mode=True, - pipeline=val_pipeline, - )) - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/ap10k-val-split1.json') -test_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/ap10k-test-split1.json') +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 210 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(256, 256), + sigma=(5.66, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=768, + out_channels=17, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'AP10KDataset' +data_mode = 'topdown' +data_root = 'data/ap10k/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/pose/ap10k/', +# f'{data_root}': 's3://openmmlab/datasets/pose/ap10k/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ap10k-train-split1.json', + data_prefix=dict(img='data/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ap10k-val-split1.json', + data_prefix=dict(img='data/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = dict( + batch_size=32, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ap10k-test-split1.json', + data_prefix=dict(img='data/'), + test_mode=True, + pipeline=val_pipeline, + )) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/ap10k-val-split1.json') +test_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/ap10k-test-split1.json') diff --git a/configs/animal_2d_keypoint/rtmpose/ap10k/rtmpose_ap10k.md b/configs/animal_2d_keypoint/rtmpose/ap10k/rtmpose_ap10k.md index 4d035a3725..e72cefb385 100644 --- a/configs/animal_2d_keypoint/rtmpose/ap10k/rtmpose_ap10k.md +++ b/configs/animal_2d_keypoint/rtmpose/ap10k/rtmpose_ap10k.md @@ -1,25 +1,25 @@ - - - - -
-AP-10K (NeurIPS'2021) - -```bibtex -@misc{yu2021ap10k, - title={AP-10K: A Benchmark for Animal Pose Estimation in the Wild}, - author={Hang Yu and Yufei Xu and Jing Zhang and Wei Zhao and Ziyu Guan and Dacheng Tao}, - year={2021}, - eprint={2108.12617}, - archivePrefix={arXiv}, - primaryClass={cs.CV} -} -``` - -
- -Results on AP-10K validation set - -| Arch | Input Size | AP | AP50 | AP75 | APM | APL | ckpt | log | -| :----------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :------------: | :------------: | :-----------------------------------------: | :----------------------------------------: | -| [rtmpose-m](/configs/animal_2d_keypoint/rtmpose/ap10k/rtmpose-m_8xb64-210e_ap10k-256x256.py) | 256x256 | 0.722 | 0.939 | 0.788 | 0.569 | 0.728 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-ap10k_pt-aic-coco_210e-256x256-7a041aa1_20230206.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-ap10k_pt-aic-coco_210e-256x256-7a041aa1_20230206.json) | + + + + +
+AP-10K (NeurIPS'2021) + +```bibtex +@misc{yu2021ap10k, + title={AP-10K: A Benchmark for Animal Pose Estimation in the Wild}, + author={Hang Yu and Yufei Xu and Jing Zhang and Wei Zhao and Ziyu Guan and Dacheng Tao}, + year={2021}, + eprint={2108.12617}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +
+ +Results on AP-10K validation set + +| Arch | Input Size | AP | AP50 | AP75 | APM | APL | ckpt | log | +| :----------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :------------: | :------------: | :-----------------------------------------: | :----------------------------------------: | +| [rtmpose-m](/configs/animal_2d_keypoint/rtmpose/ap10k/rtmpose-m_8xb64-210e_ap10k-256x256.py) | 256x256 | 0.722 | 0.939 | 0.788 | 0.569 | 0.728 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-ap10k_pt-aic-coco_210e-256x256-7a041aa1_20230206.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-ap10k_pt-aic-coco_210e-256x256-7a041aa1_20230206.json) | diff --git a/configs/animal_2d_keypoint/rtmpose/ap10k/rtmpose_ap10k.yml b/configs/animal_2d_keypoint/rtmpose/ap10k/rtmpose_ap10k.yml index 0441d9e65f..1f125760a4 100644 --- a/configs/animal_2d_keypoint/rtmpose/ap10k/rtmpose_ap10k.yml +++ b/configs/animal_2d_keypoint/rtmpose/ap10k/rtmpose_ap10k.yml @@ -1,19 +1,19 @@ -Models: -- Config: configs/animal_2d_keypoint/rtmpose/ap10k/rtmpose-m_8xb64-210e_ap10k-256x256.py - In Collection: RTMPose - Alias: animal - Metadata: - Architecture: - - RTMPose - Training Data: AP-10K - Name: rtmpose-m_8xb64-210e_ap10k-256x256 - Results: - - Dataset: AP-10K - Metrics: - AP: 0.722 - AP@0.5: 0.939 - AP@0.75: 0.788 - AP (L): 0.728 - AP (M): 0.569 - Task: Animal 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-ap10k_pt-aic-coco_210e-256x256-7a041aa1_20230206.pth +Models: +- Config: configs/animal_2d_keypoint/rtmpose/ap10k/rtmpose-m_8xb64-210e_ap10k-256x256.py + In Collection: RTMPose + Alias: animal + Metadata: + Architecture: + - RTMPose + Training Data: AP-10K + Name: rtmpose-m_8xb64-210e_ap10k-256x256 + Results: + - Dataset: AP-10K + Metrics: + AP: 0.722 + AP@0.5: 0.939 + AP@0.75: 0.788 + AP (L): 0.728 + AP (M): 0.569 + Task: Animal 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-ap10k_pt-aic-coco_210e-256x256-7a041aa1_20230206.pth diff --git a/configs/animal_2d_keypoint/topdown_heatmap/README.md b/configs/animal_2d_keypoint/topdown_heatmap/README.md index 90a440dc28..bf13310fff 100644 --- a/configs/animal_2d_keypoint/topdown_heatmap/README.md +++ b/configs/animal_2d_keypoint/topdown_heatmap/README.md @@ -1,68 +1,68 @@ -# Top-down heatmap-based pose estimation - -Top-down methods divide the task into two stages: object detection, followed by single-object pose estimation given object bounding boxes Instead of estimating keypoint coordinates directly, the pose estimator will produce heatmaps which represent the -likelihood of being a keypoint, following the paradigm introduced in [Simple Baselines for Human Pose Estimation and Tracking](http://openaccess.thecvf.com/content_ECCV_2018/html/Bin_Xiao_Simple_Baselines_for_ECCV_2018_paper.html). - -
- -
- -## Results and Models - -### Animal-Pose Dataset - -Results on AnimalPose validation set (1117 instances) - -| Model | Input Size | AP | AR | Details and Download | -| :--------: | :--------: | :---: | :---: | :-------------------------------------------------------: | -| HRNet-w32 | 256x256 | 0.740 | 0.780 | [hrnet_animalpose.md](./animalpose/hrnet_animalpose.md) | -| HRNet-w48 | 256x256 | 0.738 | 0.778 | [hrnet_animalpose.md](./animalpose/hrnet_animalpose.md) | -| ResNet-152 | 256x256 | 0.704 | 0.748 | [resnet_animalpose.md](./animalpose/resnet_animalpose.md) | -| ResNet-101 | 256x256 | 0.696 | 0.736 | [resnet_animalpose.md](./animalpose/resnet_animalpose.md) | -| ResNet-50 | 256x256 | 0.691 | 0.736 | [resnet_animalpose.md](./animalpose/resnet_animalpose.md) | - -### AP-10K Dataset - -Results on AP-10K validation set - -| Model | Input Size | AP | Details and Download | -| :--------: | :--------: | :---: | :--------------------------------------------------: | -| HRNet-w48 | 256x256 | 0.728 | [hrnet_ap10k.md](./ap10k/hrnet_ap10k.md) | -| HRNet-w32 | 256x256 | 0.722 | [hrnet_ap10k.md](./ap10k/hrnet_ap10k.md) | -| ResNet-101 | 256x256 | 0.681 | [resnet_ap10k.md](./ap10k/resnet_ap10k.md) | -| ResNet-50 | 256x256 | 0.680 | [resnet_ap10k.md](./ap10k/resnet_ap10k.md) | -| CSPNeXt-m | 256x256 | 0.703 | [cspnext_udp_ap10k.md](./ap10k/cspnext_udp_ap10k.md) | - -### Desert Locust Dataset - -Results on Desert Locust test set - -| Model | Input Size | AUC | EPE | Details and Download | -| :--------: | :--------: | :---: | :--: | :-------------------------------------------: | -| ResNet-152 | 160x160 | 0.925 | 1.49 | [resnet_locust.md](./locust/resnet_locust.md) | -| ResNet-101 | 160x160 | 0.907 | 2.03 | [resnet_locust.md](./locust/resnet_locust.md) | -| ResNet-50 | 160x160 | 0.900 | 2.27 | [resnet_locust.md](./locust/resnet_locust.md) | - -### Grévy’s Zebra Dataset - -Results on Grévy’s Zebra test set - -| Model | Input Size | AUC | EPE | Details and Download | -| :--------: | :--------: | :---: | :--: | :----------------------------------------: | -| ResNet-152 | 160x160 | 0.921 | 1.67 | [resnet_zebra.md](./zebra/resnet_zebra.md) | -| ResNet-101 | 160x160 | 0.915 | 1.83 | [resnet_zebra.md](./zebra/resnet_zebra.md) | -| ResNet-50 | 160x160 | 0.914 | 1.87 | [resnet_zebra.md](./zebra/resnet_zebra.md) | - -### Animal-Kingdom Dataset - -Results on AnimalKingdom test set - -| Model | Input Size | class | PCK(0.05) | Details and Download | -| :-------: | :--------: | :-----------: | :-------: | :---------------------------------------------------: | -| HRNet-w32 | 256x256 | P1 | 0.6323 | [hrnet_animalkingdom.md](./ak/hrnet_animalkingdom.md) | -| HRNet-w32 | 256x256 | P2 | 0.3741 | [hrnet_animalkingdom.md](./ak/hrnet_animalkingdom.md) | -| HRNet-w32 | 256x256 | P3_mammals | 0.571 | [hrnet_animalkingdom.md](./ak/hrnet_animalkingdom.md) | -| HRNet-w32 | 256x256 | P3_amphibians | 0.5358 | [hrnet_animalkingdom.md](./ak/hrnet_animalkingdom.md) | -| HRNet-w32 | 256x256 | P3_reptiles | 0.51 | [hrnet_animalkingdom.md](./ak/hrnet_animalkingdom.md) | -| HRNet-w32 | 256x256 | P3_birds | 0.7671 | [hrnet_animalkingdom.md](./ak/hrnet_animalkingdom.md) | -| HRNet-w32 | 256x256 | P3_fishes | 0.6406 | [hrnet_animalkingdom.md](./ak/hrnet_animalkingdom.md) | +# Top-down heatmap-based pose estimation + +Top-down methods divide the task into two stages: object detection, followed by single-object pose estimation given object bounding boxes Instead of estimating keypoint coordinates directly, the pose estimator will produce heatmaps which represent the +likelihood of being a keypoint, following the paradigm introduced in [Simple Baselines for Human Pose Estimation and Tracking](http://openaccess.thecvf.com/content_ECCV_2018/html/Bin_Xiao_Simple_Baselines_for_ECCV_2018_paper.html). + +
+ +
+ +## Results and Models + +### Animal-Pose Dataset + +Results on AnimalPose validation set (1117 instances) + +| Model | Input Size | AP | AR | Details and Download | +| :--------: | :--------: | :---: | :---: | :-------------------------------------------------------: | +| HRNet-w32 | 256x256 | 0.740 | 0.780 | [hrnet_animalpose.md](./animalpose/hrnet_animalpose.md) | +| HRNet-w48 | 256x256 | 0.738 | 0.778 | [hrnet_animalpose.md](./animalpose/hrnet_animalpose.md) | +| ResNet-152 | 256x256 | 0.704 | 0.748 | [resnet_animalpose.md](./animalpose/resnet_animalpose.md) | +| ResNet-101 | 256x256 | 0.696 | 0.736 | [resnet_animalpose.md](./animalpose/resnet_animalpose.md) | +| ResNet-50 | 256x256 | 0.691 | 0.736 | [resnet_animalpose.md](./animalpose/resnet_animalpose.md) | + +### AP-10K Dataset + +Results on AP-10K validation set + +| Model | Input Size | AP | Details and Download | +| :--------: | :--------: | :---: | :--------------------------------------------------: | +| HRNet-w48 | 256x256 | 0.728 | [hrnet_ap10k.md](./ap10k/hrnet_ap10k.md) | +| HRNet-w32 | 256x256 | 0.722 | [hrnet_ap10k.md](./ap10k/hrnet_ap10k.md) | +| ResNet-101 | 256x256 | 0.681 | [resnet_ap10k.md](./ap10k/resnet_ap10k.md) | +| ResNet-50 | 256x256 | 0.680 | [resnet_ap10k.md](./ap10k/resnet_ap10k.md) | +| CSPNeXt-m | 256x256 | 0.703 | [cspnext_udp_ap10k.md](./ap10k/cspnext_udp_ap10k.md) | + +### Desert Locust Dataset + +Results on Desert Locust test set + +| Model | Input Size | AUC | EPE | Details and Download | +| :--------: | :--------: | :---: | :--: | :-------------------------------------------: | +| ResNet-152 | 160x160 | 0.925 | 1.49 | [resnet_locust.md](./locust/resnet_locust.md) | +| ResNet-101 | 160x160 | 0.907 | 2.03 | [resnet_locust.md](./locust/resnet_locust.md) | +| ResNet-50 | 160x160 | 0.900 | 2.27 | [resnet_locust.md](./locust/resnet_locust.md) | + +### Grévy’s Zebra Dataset + +Results on Grévy’s Zebra test set + +| Model | Input Size | AUC | EPE | Details and Download | +| :--------: | :--------: | :---: | :--: | :----------------------------------------: | +| ResNet-152 | 160x160 | 0.921 | 1.67 | [resnet_zebra.md](./zebra/resnet_zebra.md) | +| ResNet-101 | 160x160 | 0.915 | 1.83 | [resnet_zebra.md](./zebra/resnet_zebra.md) | +| ResNet-50 | 160x160 | 0.914 | 1.87 | [resnet_zebra.md](./zebra/resnet_zebra.md) | + +### Animal-Kingdom Dataset + +Results on AnimalKingdom test set + +| Model | Input Size | class | PCK(0.05) | Details and Download | +| :-------: | :--------: | :-----------: | :-------: | :---------------------------------------------------: | +| HRNet-w32 | 256x256 | P1 | 0.6323 | [hrnet_animalkingdom.md](./ak/hrnet_animalkingdom.md) | +| HRNet-w32 | 256x256 | P2 | 0.3741 | [hrnet_animalkingdom.md](./ak/hrnet_animalkingdom.md) | +| HRNet-w32 | 256x256 | P3_mammals | 0.571 | [hrnet_animalkingdom.md](./ak/hrnet_animalkingdom.md) | +| HRNet-w32 | 256x256 | P3_amphibians | 0.5358 | [hrnet_animalkingdom.md](./ak/hrnet_animalkingdom.md) | +| HRNet-w32 | 256x256 | P3_reptiles | 0.51 | [hrnet_animalkingdom.md](./ak/hrnet_animalkingdom.md) | +| HRNet-w32 | 256x256 | P3_birds | 0.7671 | [hrnet_animalkingdom.md](./ak/hrnet_animalkingdom.md) | +| HRNet-w32 | 256x256 | P3_fishes | 0.6406 | [hrnet_animalkingdom.md](./ak/hrnet_animalkingdom.md) | diff --git a/configs/animal_2d_keypoint/topdown_heatmap/ak/hrnet_animalkingdom.md b/configs/animal_2d_keypoint/topdown_heatmap/ak/hrnet_animalkingdom.md index f32fb49d90..a2ba7cb646 100644 --- a/configs/animal_2d_keypoint/topdown_heatmap/ak/hrnet_animalkingdom.md +++ b/configs/animal_2d_keypoint/topdown_heatmap/ak/hrnet_animalkingdom.md @@ -1,47 +1,47 @@ - - -
-HRNet (CVPR'2019) - -```bibtex -@inproceedings{sun2019deep, - title={Deep high-resolution representation learning for human pose estimation}, - author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={5693--5703}, - year={2019} -} -``` - -
- - - -
-AnimalKingdom (CVPR'2022) - -```bibtex -@InProceedings{ - Ng_2022_CVPR, - author = {Ng, Xun Long and Ong, Kian Eng and Zheng, Qichen and Ni, Yun and Yeo, Si Yong and Liu, Jun}, - title = {Animal Kingdom: A Large and Diverse Dataset for Animal Behavior Understanding}, - booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, - month = {June}, - year = {2022}, - pages = {19023-19034} - } -``` - -
- -Results on AnimalKingdom validation set - -| Arch | Input Size | PCK(0.05) | Official Repo | Paper | ckpt | log | -| ------------------------------------------------------ | ---------- | --------- | ------------- | ------ | ------------------------------------------------------ | ------------------------------------------------------ | -| [P1_hrnet_w32](configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P1-256x256.py) | 256x256 | 0.6323 | 0.6342 | 0.6606 | [ckpt](https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P1-256x256-08bf96cb_20230519.pth) | [log](https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P1-256x256-08bf96cb_20230519.json) | -| [P2_hrnet_w32](configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P2-256x256.py) | 256x256 | 0.3741 | 0.3726 | 0.393 | [ckpt](https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P2-256x256-2396cc58_20230519.pth) | [log](https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P2-256x256-2396cc58_20230519.json) | -| [P3_mammals_hrnet_w32](configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_mammal-256x256.py) | 256x256 | 0.571 | 0.5719 | 0.6159 | [ckpt](https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_mammal-256x256-e8aadf02_20230519.pth) | [log](https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_mammal-256x256-e8aadf02_20230519.json) | -| [P3_amphibians_hrnet_w32](configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_amphibian-256x256.py) | 256x256 | 0.5358 | 0.5432 | 0.5674 | [ckpt](https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_amphibian-256x256-845085f9_20230519.pth) | [log](https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_amphibian-256x256-845085f9_20230519.json) | -| [P3_reptiles_hrnet_w32](configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_reptile-256x256.py) | 256x256 | 0.51 | 0.5 | 0.5606 | [ckpt](https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_reptile-256x256-e8440c16_20230519.pth) | [log](https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_reptile-256x256-e8440c16_20230519.json) | -| [P3_birds_hrnet_w32](configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_bird-256x256.py) | 256x256 | 0.7671 | 0.7636 | 0.7735 | [ckpt](https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_bird-256x256-566feff5_20230519.pth) | [log](https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_bird-256x256-566feff5_20230519.json) | -| [P3_fishes_hrnet_w32](configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_fish-256x256.py) | 256x256 | 0.6406 | 0.636 | 0.6825 | [ckpt](https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_fish-256x256-76c3999f_20230519.pth) | [log](https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_fish-256x256-76c3999f_20230519.json) | + + +
+HRNet (CVPR'2019) + +```bibtex +@inproceedings{sun2019deep, + title={Deep high-resolution representation learning for human pose estimation}, + author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={5693--5703}, + year={2019} +} +``` + +
+ + + +
+AnimalKingdom (CVPR'2022) + +```bibtex +@InProceedings{ + Ng_2022_CVPR, + author = {Ng, Xun Long and Ong, Kian Eng and Zheng, Qichen and Ni, Yun and Yeo, Si Yong and Liu, Jun}, + title = {Animal Kingdom: A Large and Diverse Dataset for Animal Behavior Understanding}, + booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, + month = {June}, + year = {2022}, + pages = {19023-19034} + } +``` + +
+ +Results on AnimalKingdom validation set + +| Arch | Input Size | PCK(0.05) | Official Repo | Paper | ckpt | log | +| ------------------------------------------------------ | ---------- | --------- | ------------- | ------ | ------------------------------------------------------ | ------------------------------------------------------ | +| [P1_hrnet_w32](configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P1-256x256.py) | 256x256 | 0.6323 | 0.6342 | 0.6606 | [ckpt](https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P1-256x256-08bf96cb_20230519.pth) | [log](https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P1-256x256-08bf96cb_20230519.json) | +| [P2_hrnet_w32](configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P2-256x256.py) | 256x256 | 0.3741 | 0.3726 | 0.393 | [ckpt](https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P2-256x256-2396cc58_20230519.pth) | [log](https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P2-256x256-2396cc58_20230519.json) | +| [P3_mammals_hrnet_w32](configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_mammal-256x256.py) | 256x256 | 0.571 | 0.5719 | 0.6159 | [ckpt](https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_mammal-256x256-e8aadf02_20230519.pth) | [log](https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_mammal-256x256-e8aadf02_20230519.json) | +| [P3_amphibians_hrnet_w32](configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_amphibian-256x256.py) | 256x256 | 0.5358 | 0.5432 | 0.5674 | [ckpt](https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_amphibian-256x256-845085f9_20230519.pth) | [log](https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_amphibian-256x256-845085f9_20230519.json) | +| [P3_reptiles_hrnet_w32](configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_reptile-256x256.py) | 256x256 | 0.51 | 0.5 | 0.5606 | [ckpt](https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_reptile-256x256-e8440c16_20230519.pth) | [log](https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_reptile-256x256-e8440c16_20230519.json) | +| [P3_birds_hrnet_w32](configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_bird-256x256.py) | 256x256 | 0.7671 | 0.7636 | 0.7735 | [ckpt](https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_bird-256x256-566feff5_20230519.pth) | [log](https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_bird-256x256-566feff5_20230519.json) | +| [P3_fishes_hrnet_w32](configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_fish-256x256.py) | 256x256 | 0.6406 | 0.636 | 0.6825 | [ckpt](https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_fish-256x256-76c3999f_20230519.pth) | [log](https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_fish-256x256-76c3999f_20230519.json) | diff --git a/configs/animal_2d_keypoint/topdown_heatmap/ak/hrnet_animalkingdom.yml b/configs/animal_2d_keypoint/topdown_heatmap/ak/hrnet_animalkingdom.yml index 12f208a10b..b560cd82b0 100644 --- a/configs/animal_2d_keypoint/topdown_heatmap/ak/hrnet_animalkingdom.yml +++ b/configs/animal_2d_keypoint/topdown_heatmap/ak/hrnet_animalkingdom.yml @@ -1,86 +1,86 @@ -Models: -- Config: configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P1-256x256.py - In Collection: HRNet - Metadata: - Architecture: &id001 - - HRNet - Training Data: AnimalKingdom_P1 - Name: td-hm_hrnet-w32_8xb32-300e_animalkingdom_P1-256x256 - Results: - - Dataset: AnimalKingdom - Metrics: - PCK: 0.6323 - Task: Animal 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P1-256x256-08bf96cb_20230519.pth -- Config: configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P2-256x256.py - In Collection: HRNet - Metadata: - Architecture: *id001 - Training Data: AnimalKingdom_P2 - Name: td-hm_hrnet-w32_8xb32-300e_animalkingdom_P2-256x256 - Results: - - Dataset: AnimalKingdom - Metrics: - PCK: 0.3741 - Task: Animal 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P2-256x256-2396cc58_20230519.pth -- Config: configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_amphibian-256x256.py - In Collection: HRNet - Metadata: - Architecture: *id001 - Training Data: AnimalKingdom_P3_amphibian - Name: td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_amphibian-256x256 - Results: - - Dataset: AnimalKingdom - Metrics: - PCK: 0.5358 - Task: Animal 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_amphibian-256x256-845085f9_20230519.pth -- Config: configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_bird-256x256.py - In Collection: HRNet - Metadata: - Architecture: *id001 - Training Data: AnimalKingdom_P3_bird - Name: td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_bird-256x256 - Results: - - Dataset: AnimalKingdom - Metrics: - PCK: 0.7671 - Task: Animal 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_bird-256x256-566feff5_20230519.pth -- Config: configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_fish-256x256.py - In Collection: HRNet - Metadata: - Architecture: *id001 - Training Data: AnimalKingdom_P3_fish - Name: td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_fish-256x256 - Results: - - Dataset: AnimalKingdom - Metrics: - PCK: 0.6406 - Task: Animal 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_fish-256x256-76c3999f_20230519.pth -- Config: configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_mammal-256x256.py - In Collection: HRNet - Metadata: - Architecture: *id001 - Training Data: AnimalKingdom_P3_mammal - Name: td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_mammal-256x256 - Results: - - Dataset: AnimalKingdom - Metrics: - PCK: 0.571 - Task: Animal 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_mammal-256x256-e8aadf02_20230519.pth -- Config: configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_reptile-256x256.py - In Collection: HRNet - Metadata: - Architecture: *id001 - Training Data: AnimalKingdom_P3_reptile - Name: td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_reptile-256x256 - Results: - - Dataset: AnimalKingdom - Metrics: - PCK: 0.51 - Task: Animal 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_reptile-256x256-e8440c16_20230519.pth +Models: +- Config: configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P1-256x256.py + In Collection: HRNet + Metadata: + Architecture: &id001 + - HRNet + Training Data: AnimalKingdom_P1 + Name: td-hm_hrnet-w32_8xb32-300e_animalkingdom_P1-256x256 + Results: + - Dataset: AnimalKingdom + Metrics: + PCK: 0.6323 + Task: Animal 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P1-256x256-08bf96cb_20230519.pth +- Config: configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P2-256x256.py + In Collection: HRNet + Metadata: + Architecture: *id001 + Training Data: AnimalKingdom_P2 + Name: td-hm_hrnet-w32_8xb32-300e_animalkingdom_P2-256x256 + Results: + - Dataset: AnimalKingdom + Metrics: + PCK: 0.3741 + Task: Animal 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P2-256x256-2396cc58_20230519.pth +- Config: configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_amphibian-256x256.py + In Collection: HRNet + Metadata: + Architecture: *id001 + Training Data: AnimalKingdom_P3_amphibian + Name: td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_amphibian-256x256 + Results: + - Dataset: AnimalKingdom + Metrics: + PCK: 0.5358 + Task: Animal 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_amphibian-256x256-845085f9_20230519.pth +- Config: configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_bird-256x256.py + In Collection: HRNet + Metadata: + Architecture: *id001 + Training Data: AnimalKingdom_P3_bird + Name: td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_bird-256x256 + Results: + - Dataset: AnimalKingdom + Metrics: + PCK: 0.7671 + Task: Animal 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_bird-256x256-566feff5_20230519.pth +- Config: configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_fish-256x256.py + In Collection: HRNet + Metadata: + Architecture: *id001 + Training Data: AnimalKingdom_P3_fish + Name: td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_fish-256x256 + Results: + - Dataset: AnimalKingdom + Metrics: + PCK: 0.6406 + Task: Animal 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_fish-256x256-76c3999f_20230519.pth +- Config: configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_mammal-256x256.py + In Collection: HRNet + Metadata: + Architecture: *id001 + Training Data: AnimalKingdom_P3_mammal + Name: td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_mammal-256x256 + Results: + - Dataset: AnimalKingdom + Metrics: + PCK: 0.571 + Task: Animal 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_mammal-256x256-e8aadf02_20230519.pth +- Config: configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_reptile-256x256.py + In Collection: HRNet + Metadata: + Architecture: *id001 + Training Data: AnimalKingdom_P3_reptile + Name: td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_reptile-256x256 + Results: + - Dataset: AnimalKingdom + Metrics: + PCK: 0.51 + Task: Animal 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/animal_2d_keypoint/topdown_heatmap/animal_kingdom/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_reptile-256x256-e8440c16_20230519.pth diff --git a/configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P1-256x256.py b/configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P1-256x256.py index 0e7eb0136e..9af6952e03 100644 --- a/configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P1-256x256.py +++ b/configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P1-256x256.py @@ -1,146 +1,146 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=300, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='AdamW', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(32, 64)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(32, 64, 128)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(32, 64, 128, 256))), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/hrnet_w32-36af842e.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=32, - out_channels=23, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'AnimalKingdomDataset' -data_mode = 'topdown' -data_root = 'data/ak/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/ak_P1/train.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=24, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/ak_P1/test.json', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = [dict(type='PCKAccuracy', thr=0.05), dict(type='AUC')] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=300, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='AdamW', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=23, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'AnimalKingdomDataset' +data_mode = 'topdown' +data_root = 'data/ak/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ak_P1/train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=24, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ak_P1/test.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [dict(type='PCKAccuracy', thr=0.05), dict(type='AUC')] +test_evaluator = val_evaluator diff --git a/configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P2-256x256.py b/configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P2-256x256.py index f42057f8aa..d7f4238324 100644 --- a/configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P2-256x256.py +++ b/configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P2-256x256.py @@ -1,146 +1,146 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=300, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='AdamW', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(32, 64)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(32, 64, 128)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(32, 64, 128, 256))), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/hrnet_w32-36af842e.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=32, - out_channels=23, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'AnimalKingdomDataset' -data_mode = 'topdown' -data_root = 'data/ak/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/ak_P2/train.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=24, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/ak_P2/test.json', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = [dict(type='PCKAccuracy', thr=0.05), dict(type='AUC')] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=300, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='AdamW', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=23, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'AnimalKingdomDataset' +data_mode = 'topdown' +data_root = 'data/ak/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ak_P2/train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=24, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ak_P2/test.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [dict(type='PCKAccuracy', thr=0.05), dict(type='AUC')] +test_evaluator = val_evaluator diff --git a/configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_amphibian-256x256.py b/configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_amphibian-256x256.py index 5a83e7a97b..1b54bb7524 100644 --- a/configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_amphibian-256x256.py +++ b/configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_amphibian-256x256.py @@ -1,146 +1,146 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=300, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='AdamW', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(32, 64)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(32, 64, 128)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(32, 64, 128, 256))), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/hrnet_w32-36af842e.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=32, - out_channels=23, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'AnimalKingdomDataset' -data_mode = 'topdown' -data_root = 'data/ak/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/ak_P3_amphibian/train.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=24, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/ak_P3_amphibian/test.json', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = [dict(type='PCKAccuracy', thr=0.05), dict(type='AUC')] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=300, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='AdamW', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=23, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'AnimalKingdomDataset' +data_mode = 'topdown' +data_root = 'data/ak/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ak_P3_amphibian/train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=24, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ak_P3_amphibian/test.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [dict(type='PCKAccuracy', thr=0.05), dict(type='AUC')] +test_evaluator = val_evaluator diff --git a/configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_bird-256x256.py b/configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_bird-256x256.py index ca3c91af61..a3e8d9e193 100644 --- a/configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_bird-256x256.py +++ b/configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_bird-256x256.py @@ -1,146 +1,146 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=300, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='AdamW', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(32, 64)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(32, 64, 128)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(32, 64, 128, 256))), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/hrnet_w32-36af842e.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=32, - out_channels=23, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'AnimalKingdomDataset' -data_mode = 'topdown' -data_root = 'data/ak/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/ak_P3_bird/train.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=24, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/ak_P3_bird/test.json', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = [dict(type='PCKAccuracy', thr=0.05), dict(type='AUC')] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=300, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='AdamW', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=23, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'AnimalKingdomDataset' +data_mode = 'topdown' +data_root = 'data/ak/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ak_P3_bird/train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=24, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ak_P3_bird/test.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [dict(type='PCKAccuracy', thr=0.05), dict(type='AUC')] +test_evaluator = val_evaluator diff --git a/configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_fish-256x256.py b/configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_fish-256x256.py index 3923f30d10..839e7f9785 100644 --- a/configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_fish-256x256.py +++ b/configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_fish-256x256.py @@ -1,146 +1,146 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=300, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='AdamW', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(32, 64)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(32, 64, 128)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(32, 64, 128, 256))), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/hrnet_w32-36af842e.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=32, - out_channels=23, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'AnimalKingdomDataset' -data_mode = 'topdown' -data_root = 'data/ak/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/ak_P3_fish/train.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=24, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/ak_P3_fish/test.json', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = [dict(type='PCKAccuracy', thr=0.05), dict(type='AUC')] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=300, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='AdamW', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=23, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'AnimalKingdomDataset' +data_mode = 'topdown' +data_root = 'data/ak/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ak_P3_fish/train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=24, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ak_P3_fish/test.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [dict(type='PCKAccuracy', thr=0.05), dict(type='AUC')] +test_evaluator = val_evaluator diff --git a/configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_mammal-256x256.py b/configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_mammal-256x256.py index d061c4b6fb..a367693182 100644 --- a/configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_mammal-256x256.py +++ b/configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_mammal-256x256.py @@ -1,146 +1,146 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=300, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='AdamW', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(32, 64)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(32, 64, 128)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(32, 64, 128, 256))), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/hrnet_w32-36af842e.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=32, - out_channels=23, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'AnimalKingdomDataset' -data_mode = 'topdown' -data_root = 'data/ak/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/ak_P3_mammal/train.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=24, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/ak_P3_mammal/test.json', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = [dict(type='PCKAccuracy', thr=0.05), dict(type='AUC')] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=300, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='AdamW', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=23, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'AnimalKingdomDataset' +data_mode = 'topdown' +data_root = 'data/ak/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ak_P3_mammal/train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=24, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ak_P3_mammal/test.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [dict(type='PCKAccuracy', thr=0.05), dict(type='AUC')] +test_evaluator = val_evaluator diff --git a/configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_reptile-256x256.py b/configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_reptile-256x256.py index b06a49936b..8d2c0d7ce5 100644 --- a/configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_reptile-256x256.py +++ b/configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_reptile-256x256.py @@ -1,146 +1,146 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=300, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='AdamW', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(32, 64)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(32, 64, 128)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(32, 64, 128, 256))), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/hrnet_w32-36af842e.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=32, - out_channels=23, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'AnimalKingdomDataset' -data_mode = 'topdown' -data_root = 'data/ak/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/ak_P3_reptile/train.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=24, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/ak_P3_reptile/test.json', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = [dict(type='PCKAccuracy', thr=0.05), dict(type='AUC')] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=300, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='AdamW', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=23, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'AnimalKingdomDataset' +data_mode = 'topdown' +data_root = 'data/ak/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ak_P3_reptile/train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=24, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ak_P3_reptile/test.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [dict(type='PCKAccuracy', thr=0.05), dict(type='AUC')] +test_evaluator = val_evaluator diff --git a/configs/animal_2d_keypoint/topdown_heatmap/animalpose/hrnet_animalpose.md b/configs/animal_2d_keypoint/topdown_heatmap/animalpose/hrnet_animalpose.md index 58b971313f..bd17ee5019 100644 --- a/configs/animal_2d_keypoint/topdown_heatmap/animalpose/hrnet_animalpose.md +++ b/configs/animal_2d_keypoint/topdown_heatmap/animalpose/hrnet_animalpose.md @@ -1,40 +1,40 @@ - - -
-HRNet (CVPR'2019) - -```bibtex -@inproceedings{sun2019deep, - title={Deep high-resolution representation learning for human pose estimation}, - author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={5693--5703}, - year={2019} -} -``` - -
- - - -
-Animal-Pose (ICCV'2019) - -```bibtex -@InProceedings{Cao_2019_ICCV, - author = {Cao, Jinkun and Tang, Hongyang and Fang, Hao-Shu and Shen, Xiaoyong and Lu, Cewu and Tai, Yu-Wing}, - title = {Cross-Domain Adaptation for Animal Pose Estimation}, - booktitle = {The IEEE International Conference on Computer Vision (ICCV)}, - month = {October}, - year = {2019} -} -``` - -
- -Results on AnimalPose validation set (1117 instances) - -| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | -| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | -| [pose_hrnet_w32](/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py) | 256x256 | 0.740 | 0.959 | 0.833 | 0.780 | 0.965 | [ckpt](https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_animalpose_256x256-1aa7f075_20210426.pth) | [log](https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_animalpose_256x256_20210426.log.json) | -| [pose_hrnet_w48](/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w48_8xb64-210e_animalpose-256x256.py) | 256x256 | 0.738 | 0.958 | 0.831 | 0.778 | 0.962 | [ckpt](https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w48_animalpose_256x256-34644726_20210426.pth) | [log](https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w48_animalpose_256x256_20210426.log.json) | + + +
+HRNet (CVPR'2019) + +```bibtex +@inproceedings{sun2019deep, + title={Deep high-resolution representation learning for human pose estimation}, + author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={5693--5703}, + year={2019} +} +``` + +
+ + + +
+Animal-Pose (ICCV'2019) + +```bibtex +@InProceedings{Cao_2019_ICCV, + author = {Cao, Jinkun and Tang, Hongyang and Fang, Hao-Shu and Shen, Xiaoyong and Lu, Cewu and Tai, Yu-Wing}, + title = {Cross-Domain Adaptation for Animal Pose Estimation}, + booktitle = {The IEEE International Conference on Computer Vision (ICCV)}, + month = {October}, + year = {2019} +} +``` + +
+ +Results on AnimalPose validation set (1117 instances) + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [pose_hrnet_w32](/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py) | 256x256 | 0.740 | 0.959 | 0.833 | 0.780 | 0.965 | [ckpt](https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_animalpose_256x256-1aa7f075_20210426.pth) | [log](https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_animalpose_256x256_20210426.log.json) | +| [pose_hrnet_w48](/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w48_8xb64-210e_animalpose-256x256.py) | 256x256 | 0.738 | 0.958 | 0.831 | 0.778 | 0.962 | [ckpt](https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w48_animalpose_256x256-34644726_20210426.pth) | [log](https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w48_animalpose_256x256_20210426.log.json) | diff --git a/configs/animal_2d_keypoint/topdown_heatmap/animalpose/hrnet_animalpose.yml b/configs/animal_2d_keypoint/topdown_heatmap/animalpose/hrnet_animalpose.yml index caba133370..cb03cec6aa 100644 --- a/configs/animal_2d_keypoint/topdown_heatmap/animalpose/hrnet_animalpose.yml +++ b/configs/animal_2d_keypoint/topdown_heatmap/animalpose/hrnet_animalpose.yml @@ -1,34 +1,34 @@ -Models: -- Config: configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py - In Collection: HRNet - Metadata: - Architecture: &id001 - - HRNet - Training Data: Animal-Pose - Name: td-hm_hrnet-w32_8xb64-210e_animalpose-256x256 - Results: - - Dataset: Animal-Pose - Metrics: - AP: 0.740 - AP@0.5: 0.959 - AP@0.75: 0.833 - AR: 0.780 - AR@0.5: 0.965 - Task: Animal 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_animalpose_256x256-1aa7f075_20210426.pth -- Config: configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w48_8xb64-210e_animalpose-256x256.py - In Collection: HRNet - Metadata: - Architecture: *id001 - Training Data: Animal-Pose - Name: td-hm_hrnet-w48_8xb64-210e_animalpose-256x256 - Results: - - Dataset: Animal-Pose - Metrics: - AP: 0.738 - AP@0.5: 0.958 - AP@0.75: 0.831 - AR: 0.778 - AR@0.5: 0.962 - Task: Animal 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w48_animalpose_256x256-34644726_20210426.pth +Models: +- Config: configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py + In Collection: HRNet + Metadata: + Architecture: &id001 + - HRNet + Training Data: Animal-Pose + Name: td-hm_hrnet-w32_8xb64-210e_animalpose-256x256 + Results: + - Dataset: Animal-Pose + Metrics: + AP: 0.740 + AP@0.5: 0.959 + AP@0.75: 0.833 + AR: 0.780 + AR@0.5: 0.965 + Task: Animal 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_animalpose_256x256-1aa7f075_20210426.pth +- Config: configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w48_8xb64-210e_animalpose-256x256.py + In Collection: HRNet + Metadata: + Architecture: *id001 + Training Data: Animal-Pose + Name: td-hm_hrnet-w48_8xb64-210e_animalpose-256x256 + Results: + - Dataset: Animal-Pose + Metrics: + AP: 0.738 + AP@0.5: 0.958 + AP@0.75: 0.831 + AR: 0.778 + AR@0.5: 0.962 + Task: Animal 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w48_animalpose_256x256-34644726_20210426.pth diff --git a/configs/animal_2d_keypoint/topdown_heatmap/animalpose/resnet_animalpose.md b/configs/animal_2d_keypoint/topdown_heatmap/animalpose/resnet_animalpose.md index 20ddf54031..85d391214c 100644 --- a/configs/animal_2d_keypoint/topdown_heatmap/animalpose/resnet_animalpose.md +++ b/configs/animal_2d_keypoint/topdown_heatmap/animalpose/resnet_animalpose.md @@ -1,41 +1,41 @@ - - -
-SimpleBaseline2D (ECCV'2018) - -```bibtex -@inproceedings{xiao2018simple, - title={Simple baselines for human pose estimation and tracking}, - author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, - booktitle={Proceedings of the European conference on computer vision (ECCV)}, - pages={466--481}, - year={2018} -} -``` - -
- - - -
-Animal-Pose (ICCV'2019) - -```bibtex -@InProceedings{Cao_2019_ICCV, - author = {Cao, Jinkun and Tang, Hongyang and Fang, Hao-Shu and Shen, Xiaoyong and Lu, Cewu and Tai, Yu-Wing}, - title = {Cross-Domain Adaptation for Animal Pose Estimation}, - booktitle = {The IEEE International Conference on Computer Vision (ICCV)}, - month = {October}, - year = {2019} -} -``` - -
- -Results on AnimalPose validation set (1117 instances) - -| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | -| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | -| [pose_resnet_50](/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_res50_8xb64-210e_animalpose-256x256.py) | 256x256 | 0.691 | 0.947 | 0.770 | 0.736 | 0.955 | [ckpt](https://download.openmmlab.com/mmpose/animal/resnet/res50_animalpose_256x256-e1f30bff_20210426.pth) | [log](https://download.openmmlab.com/mmpose/animal/resnet/res50_animalpose_256x256_20210426.log.json) | -| [pose_resnet_101](/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_res101_8xb64-210e_animalpose-256x256.py) | 256x256 | 0.696 | 0.948 | 0.774 | 0.736 | 0.951 | [ckpt](https://download.openmmlab.com/mmpose/animal/resnet/res101_animalpose_256x256-85563f4a_20210426.pth) | [log](https://download.openmmlab.com/mmpose/animal/resnet/res101_animalpose_256x256_20210426.log.json) | -| [pose_resnet_152](/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_res152_8xb32-210e_animalpose-256x256.py) | 256x256 | 0.704 | 0.938 | 0.786 | 0.748 | 0.946 | [ckpt](https://download.openmmlab.com/mmpose/animal/resnet/res152_animalpose_256x256-a0a7506c_20210426.pth) | [log](https://download.openmmlab.com/mmpose/animal/resnet/res152_animalpose_256x256_20210426.log.json) | + + +
+SimpleBaseline2D (ECCV'2018) + +```bibtex +@inproceedings{xiao2018simple, + title={Simple baselines for human pose estimation and tracking}, + author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, + booktitle={Proceedings of the European conference on computer vision (ECCV)}, + pages={466--481}, + year={2018} +} +``` + +
+ + + +
+Animal-Pose (ICCV'2019) + +```bibtex +@InProceedings{Cao_2019_ICCV, + author = {Cao, Jinkun and Tang, Hongyang and Fang, Hao-Shu and Shen, Xiaoyong and Lu, Cewu and Tai, Yu-Wing}, + title = {Cross-Domain Adaptation for Animal Pose Estimation}, + booktitle = {The IEEE International Conference on Computer Vision (ICCV)}, + month = {October}, + year = {2019} +} +``` + +
+ +Results on AnimalPose validation set (1117 instances) + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [pose_resnet_50](/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_res50_8xb64-210e_animalpose-256x256.py) | 256x256 | 0.691 | 0.947 | 0.770 | 0.736 | 0.955 | [ckpt](https://download.openmmlab.com/mmpose/animal/resnet/res50_animalpose_256x256-e1f30bff_20210426.pth) | [log](https://download.openmmlab.com/mmpose/animal/resnet/res50_animalpose_256x256_20210426.log.json) | +| [pose_resnet_101](/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_res101_8xb64-210e_animalpose-256x256.py) | 256x256 | 0.696 | 0.948 | 0.774 | 0.736 | 0.951 | [ckpt](https://download.openmmlab.com/mmpose/animal/resnet/res101_animalpose_256x256-85563f4a_20210426.pth) | [log](https://download.openmmlab.com/mmpose/animal/resnet/res101_animalpose_256x256_20210426.log.json) | +| [pose_resnet_152](/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_res152_8xb32-210e_animalpose-256x256.py) | 256x256 | 0.704 | 0.938 | 0.786 | 0.748 | 0.946 | [ckpt](https://download.openmmlab.com/mmpose/animal/resnet/res152_animalpose_256x256-a0a7506c_20210426.pth) | [log](https://download.openmmlab.com/mmpose/animal/resnet/res152_animalpose_256x256_20210426.log.json) | diff --git a/configs/animal_2d_keypoint/topdown_heatmap/animalpose/resnet_animalpose.yml b/configs/animal_2d_keypoint/topdown_heatmap/animalpose/resnet_animalpose.yml index 345c13c138..2888981fd3 100644 --- a/configs/animal_2d_keypoint/topdown_heatmap/animalpose/resnet_animalpose.yml +++ b/configs/animal_2d_keypoint/topdown_heatmap/animalpose/resnet_animalpose.yml @@ -1,51 +1,51 @@ -Models: -- Config: configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_res50_8xb64-210e_animalpose-256x256.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: &id001 - - SimpleBaseline2D - - ResNet - Training Data: Animal-Pose - Name: td-hm_res50_8xb64-210e_animalpose-256x256 - Results: - - Dataset: Animal-Pose - Metrics: - AP: 0.691 - AP@0.5: 0.947 - AP@0.75: 0.770 - AR: 0.736 - AR@0.5: 0.955 - Task: Animal 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/animal/resnet/res50_animalpose_256x256-e1f30bff_20210426.pth -- Config: configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_res101_8xb64-210e_animalpose-256x256.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: Animal-Pose - Name: td-hm_res101_8xb64-210e_animalpose-256x256 - Results: - - Dataset: Animal-Pose - Metrics: - AP: 0.696 - AP@0.5: 0.948 - AP@0.75: 0.774 - AR: 0.736 - AR@0.5: 0.951 - Task: Animal 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/animal/resnet/res101_animalpose_256x256-85563f4a_20210426.pth -- Config: configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_res152_8xb32-210e_animalpose-256x256.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: Animal-Pose - Name: td-hm_res152_8xb32-210e_animalpose-256x256 - Results: - - Dataset: Animal-Pose - Metrics: - AP: 0.704 - AP@0.5: 0.938 - AP@0.75: 0.786 - AR: 0.748 - AR@0.5: 0.946 - Task: Animal 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/animal/resnet/res152_animalpose_256x256-a0a7506c_20210426.pth +Models: +- Config: configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_res50_8xb64-210e_animalpose-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: &id001 + - SimpleBaseline2D + - ResNet + Training Data: Animal-Pose + Name: td-hm_res50_8xb64-210e_animalpose-256x256 + Results: + - Dataset: Animal-Pose + Metrics: + AP: 0.691 + AP@0.5: 0.947 + AP@0.75: 0.770 + AR: 0.736 + AR@0.5: 0.955 + Task: Animal 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/animal/resnet/res50_animalpose_256x256-e1f30bff_20210426.pth +- Config: configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_res101_8xb64-210e_animalpose-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: Animal-Pose + Name: td-hm_res101_8xb64-210e_animalpose-256x256 + Results: + - Dataset: Animal-Pose + Metrics: + AP: 0.696 + AP@0.5: 0.948 + AP@0.75: 0.774 + AR: 0.736 + AR@0.5: 0.951 + Task: Animal 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/animal/resnet/res101_animalpose_256x256-85563f4a_20210426.pth +- Config: configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_res152_8xb32-210e_animalpose-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: Animal-Pose + Name: td-hm_res152_8xb32-210e_animalpose-256x256 + Results: + - Dataset: Animal-Pose + Metrics: + AP: 0.704 + AP@0.5: 0.938 + AP@0.75: 0.786 + AR: 0.748 + AR@0.5: 0.946 + Task: Animal 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/animal/resnet/res152_animalpose_256x256-a0a7506c_20210426.pth diff --git a/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py b/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py index 2680fe8956..9dc501dd50 100644 --- a/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py +++ b/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py @@ -1,147 +1,147 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(32, 64)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(32, 64, 128)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(32, 64, 128, 256))), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/hrnet_w32-36af842e.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=32, - out_channels=20, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'AnimalPoseDataset' -data_mode = 'topdown' -data_root = 'data/animalpose/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/animalpose_train.json', - data_prefix=dict(img=''), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/animalpose_val.json', - data_prefix=dict(img=''), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', ann_file=data_root + 'annotations/animalpose_val.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=20, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'AnimalPoseDataset' +data_mode = 'topdown' +data_root = 'data/animalpose/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/animalpose_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/animalpose_val.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', ann_file=data_root + 'annotations/animalpose_val.json') +test_evaluator = val_evaluator diff --git a/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w48_8xb64-210e_animalpose-256x256.py b/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w48_8xb64-210e_animalpose-256x256.py index 3d4a76d8f5..d671f61851 100644 --- a/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w48_8xb64-210e_animalpose-256x256.py +++ b/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w48_8xb64-210e_animalpose-256x256.py @@ -1,147 +1,147 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(48, 96)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(48, 96, 192)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(48, 96, 192, 384))), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/hrnet_w48-8ef0771d.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=48, - out_channels=20, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'AnimalPoseDataset' -data_mode = 'topdown' -data_root = 'data/animalpose/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/animalpose_train.json', - data_prefix=dict(img=''), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/animalpose_val.json', - data_prefix=dict(img=''), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', ann_file=data_root + 'annotations/animalpose_val.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(48, 96)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(48, 96, 192)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(48, 96, 192, 384))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w48-8ef0771d.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=48, + out_channels=20, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'AnimalPoseDataset' +data_mode = 'topdown' +data_root = 'data/animalpose/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/animalpose_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/animalpose_val.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', ann_file=data_root + 'annotations/animalpose_val.json') +test_evaluator = val_evaluator diff --git a/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_res101_8xb64-210e_animalpose-256x256.py b/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_res101_8xb64-210e_animalpose-256x256.py index 8ffaabb06f..abc4127be5 100644 --- a/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_res101_8xb64-210e_animalpose-256x256.py +++ b/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_res101_8xb64-210e_animalpose-256x256.py @@ -1,118 +1,118 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=101, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=20, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'AnimalPoseDataset' -data_mode = 'topdown' -data_root = 'data/animalpose/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/animalpose_train.json', - data_prefix=dict(img=''), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/animalpose_val.json', - data_prefix=dict(img=''), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', ann_file=data_root + 'annotations/animalpose_val.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=101, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=20, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'AnimalPoseDataset' +data_mode = 'topdown' +data_root = 'data/animalpose/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/animalpose_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/animalpose_val.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', ann_file=data_root + 'annotations/animalpose_val.json') +test_evaluator = val_evaluator diff --git a/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_res152_8xb32-210e_animalpose-256x256.py b/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_res152_8xb32-210e_animalpose-256x256.py index 8ed92929c9..9e8b4bfa9a 100644 --- a/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_res152_8xb32-210e_animalpose-256x256.py +++ b/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_res152_8xb32-210e_animalpose-256x256.py @@ -1,118 +1,118 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=256) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=152, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet152'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=20, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'AnimalPoseDataset' -data_mode = 'topdown' -data_root = 'data/animalpose/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/animalpose_train.json', - data_prefix=dict(img=''), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/animalpose_val.json', - data_prefix=dict(img=''), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', ann_file=data_root + 'annotations/animalpose_val.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=152, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet152'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=20, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'AnimalPoseDataset' +data_mode = 'topdown' +data_root = 'data/animalpose/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/animalpose_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/animalpose_val.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', ann_file=data_root + 'annotations/animalpose_val.json') +test_evaluator = val_evaluator diff --git a/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_res50_8xb64-210e_animalpose-256x256.py b/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_res50_8xb64-210e_animalpose-256x256.py index c053c88814..953b9e9d2f 100644 --- a/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_res50_8xb64-210e_animalpose-256x256.py +++ b/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_res50_8xb64-210e_animalpose-256x256.py @@ -1,118 +1,118 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=50, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=20, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'AnimalPoseDataset' -data_mode = 'topdown' -data_root = 'data/animalpose/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/animalpose_train.json', - data_prefix=dict(img=''), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/animalpose_val.json', - data_prefix=dict(img=''), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', ann_file=data_root + 'annotations/animalpose_val.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=20, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'AnimalPoseDataset' +data_mode = 'topdown' +data_root = 'data/animalpose/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/animalpose_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/animalpose_val.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', ann_file=data_root + 'annotations/animalpose_val.json') +test_evaluator = val_evaluator diff --git a/configs/animal_2d_keypoint/topdown_heatmap/ap10k/cspnext-m_udp_8xb64-210e_ap10k-256x256.py b/configs/animal_2d_keypoint/topdown_heatmap/ap10k/cspnext-m_udp_8xb64-210e_ap10k-256x256.py index 844d17df4e..66391d2b8e 100644 --- a/configs/animal_2d_keypoint/topdown_heatmap/ap10k/cspnext-m_udp_8xb64-210e_ap10k-256x256.py +++ b/configs/animal_2d_keypoint/topdown_heatmap/ap10k/cspnext-m_udp_8xb64-210e_ap10k-256x256.py @@ -1,220 +1,220 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -max_epochs = 210 -stage2_num_epochs = 30 -base_lr = 4e-3 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - # use cosine lr from 105 to 210 epoch - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=1024) - -# codec settings -codec = dict( - type='UDPHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=0.67, - widen_factor=0.75, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' - 'rtmdet/cspnext_rsb_pretrain/' - 'cspnext-m_8xb256-rsb-a1-600e_in1k-ecb3bbd9.pth')), - head=dict( - type='HeatmapHead', - in_channels=768, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=False, - )) - -# base dataset settings -dataset_type = 'AP10KDataset' -data_mode = 'topdown' -data_root = 'data/ap10k/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/ap10k-train-split1.json', - data_prefix=dict(img='data/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/ap10k-val-split1.json', - data_prefix=dict(img='data/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = dict( - batch_size=32, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/ap10k-test-split1.json', - data_prefix=dict(img='data/'), - test_mode=True, - pipeline=val_pipeline, - )) - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/ap10k-val-split1.json') -test_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/ap10k-test-split1.json') +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 210 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 105 to 210 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' + 'rtmdet/cspnext_rsb_pretrain/' + 'cspnext-m_8xb256-rsb-a1-600e_in1k-ecb3bbd9.pth')), + head=dict( + type='HeatmapHead', + in_channels=768, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +dataset_type = 'AP10KDataset' +data_mode = 'topdown' +data_root = 'data/ap10k/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ap10k-train-split1.json', + data_prefix=dict(img='data/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ap10k-val-split1.json', + data_prefix=dict(img='data/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = dict( + batch_size=32, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ap10k-test-split1.json', + data_prefix=dict(img='data/'), + test_mode=True, + pipeline=val_pipeline, + )) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/ap10k-val-split1.json') +test_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/ap10k-test-split1.json') diff --git a/configs/animal_2d_keypoint/topdown_heatmap/ap10k/cspnext_udp_ap10k.md b/configs/animal_2d_keypoint/topdown_heatmap/ap10k/cspnext_udp_ap10k.md index fb10359685..8d1c8d2a36 100644 --- a/configs/animal_2d_keypoint/topdown_heatmap/ap10k/cspnext_udp_ap10k.md +++ b/configs/animal_2d_keypoint/topdown_heatmap/ap10k/cspnext_udp_ap10k.md @@ -1,58 +1,58 @@ - - -
-RTMDet (ArXiv 2022) - -```bibtex -@misc{lyu2022rtmdet, - title={RTMDet: An Empirical Study of Designing Real-Time Object Detectors}, - author={Chengqi Lyu and Wenwei Zhang and Haian Huang and Yue Zhou and Yudong Wang and Yanyi Liu and Shilong Zhang and Kai Chen}, - year={2022}, - eprint={2212.07784}, - archivePrefix={arXiv}, - primaryClass={cs.CV} -} -``` - -
- - - -
-UDP (CVPR'2020) - -```bibtex -@InProceedings{Huang_2020_CVPR, - author = {Huang, Junjie and Zhu, Zheng and Guo, Feng and Huang, Guan}, - title = {The Devil Is in the Details: Delving Into Unbiased Data Processing for Human Pose Estimation}, - booktitle = {The IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, - month = {June}, - year = {2020} -} -``` - -
- - - -
-AP-10K (NeurIPS'2021) - -```bibtex -@misc{yu2021ap10k, - title={AP-10K: A Benchmark for Animal Pose Estimation in the Wild}, - author={Hang Yu and Yufei Xu and Jing Zhang and Wei Zhao and Ziyu Guan and Dacheng Tao}, - year={2021}, - eprint={2108.12617}, - archivePrefix={arXiv}, - primaryClass={cs.CV} -} -``` - -
- -Results on AP-10K validation set - -| Arch | Input Size | AP | AP50 | AP75 | APM | APL | ckpt | log | -| :----------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :------------: | :------------: | :-----------------------------------------: | :----------------------------------------: | -| [pose_cspnext_m](/configs/animal_2d_keypoint/topdown_heatmap/ap10k/cspnext-m_udp_8xb64-210e_ap10k-256x256.py) | 256x256 | 0.703 | 0.944 | 0.776 | 0.513 | 0.710 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-ap10k_pt-in1k_210e-256x256-1f2d947a_20230123.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-ap10k_pt-in1k_210e-256x256-1f2d947a_20230123.json) | + + +
+RTMDet (ArXiv 2022) + +```bibtex +@misc{lyu2022rtmdet, + title={RTMDet: An Empirical Study of Designing Real-Time Object Detectors}, + author={Chengqi Lyu and Wenwei Zhang and Haian Huang and Yue Zhou and Yudong Wang and Yanyi Liu and Shilong Zhang and Kai Chen}, + year={2022}, + eprint={2212.07784}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +
+ + + +
+UDP (CVPR'2020) + +```bibtex +@InProceedings{Huang_2020_CVPR, + author = {Huang, Junjie and Zhu, Zheng and Guo, Feng and Huang, Guan}, + title = {The Devil Is in the Details: Delving Into Unbiased Data Processing for Human Pose Estimation}, + booktitle = {The IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, + month = {June}, + year = {2020} +} +``` + +
+ + + +
+AP-10K (NeurIPS'2021) + +```bibtex +@misc{yu2021ap10k, + title={AP-10K: A Benchmark for Animal Pose Estimation in the Wild}, + author={Hang Yu and Yufei Xu and Jing Zhang and Wei Zhao and Ziyu Guan and Dacheng Tao}, + year={2021}, + eprint={2108.12617}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +
+ +Results on AP-10K validation set + +| Arch | Input Size | AP | AP50 | AP75 | APM | APL | ckpt | log | +| :----------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :------------: | :------------: | :-----------------------------------------: | :----------------------------------------: | +| [pose_cspnext_m](/configs/animal_2d_keypoint/topdown_heatmap/ap10k/cspnext-m_udp_8xb64-210e_ap10k-256x256.py) | 256x256 | 0.703 | 0.944 | 0.776 | 0.513 | 0.710 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-ap10k_pt-in1k_210e-256x256-1f2d947a_20230123.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-ap10k_pt-in1k_210e-256x256-1f2d947a_20230123.json) | diff --git a/configs/animal_2d_keypoint/topdown_heatmap/ap10k/cspnext_udp_ap10k.yml b/configs/animal_2d_keypoint/topdown_heatmap/ap10k/cspnext_udp_ap10k.yml index 8fedc88374..da5785cdd4 100644 --- a/configs/animal_2d_keypoint/topdown_heatmap/ap10k/cspnext_udp_ap10k.yml +++ b/configs/animal_2d_keypoint/topdown_heatmap/ap10k/cspnext_udp_ap10k.yml @@ -1,19 +1,19 @@ -Models: -- Config: configs/animal_2d_keypoint/topdown_heatmap/ap10k/cspnext-m_udp_8xb64-210e_ap10k-256x256.py - In Collection: UDP - Metadata: - Architecture: &id001 - - UDP - - HRNet - Training Data: AP-10K - Name: cspnext-m_udp_8xb64-210e_ap10k-256x256 - Results: - - Dataset: AP-10K - Metrics: - AP: 0.703 - AP@0.5: 0.944 - AP@0.75: 0.776 - AP (L): 0.71 - AP (M): 0.513 - Task: Animal 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-ap10k_pt-in1k_210e-256x256-1f2d947a_20230123.pth +Models: +- Config: configs/animal_2d_keypoint/topdown_heatmap/ap10k/cspnext-m_udp_8xb64-210e_ap10k-256x256.py + In Collection: UDP + Metadata: + Architecture: &id001 + - UDP + - HRNet + Training Data: AP-10K + Name: cspnext-m_udp_8xb64-210e_ap10k-256x256 + Results: + - Dataset: AP-10K + Metrics: + AP: 0.703 + AP@0.5: 0.944 + AP@0.75: 0.776 + AP (L): 0.71 + AP (M): 0.513 + Task: Animal 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-ap10k_pt-in1k_210e-256x256-1f2d947a_20230123.pth diff --git a/configs/animal_2d_keypoint/topdown_heatmap/ap10k/hrnet_ap10k.md b/configs/animal_2d_keypoint/topdown_heatmap/ap10k/hrnet_ap10k.md index fbdd2cbf9f..639509d16a 100644 --- a/configs/animal_2d_keypoint/topdown_heatmap/ap10k/hrnet_ap10k.md +++ b/configs/animal_2d_keypoint/topdown_heatmap/ap10k/hrnet_ap10k.md @@ -1,41 +1,41 @@ - - -
-HRNet (CVPR'2019) - -```bibtex -@inproceedings{sun2019deep, - title={Deep high-resolution representation learning for human pose estimation}, - author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={5693--5703}, - year={2019} -} -``` - -
- - - -
-AP-10K (NeurIPS'2021) - -```bibtex -@misc{yu2021ap10k, - title={AP-10K: A Benchmark for Animal Pose Estimation in the Wild}, - author={Hang Yu and Yufei Xu and Jing Zhang and Wei Zhao and Ziyu Guan and Dacheng Tao}, - year={2021}, - eprint={2108.12617}, - archivePrefix={arXiv}, - primaryClass={cs.CV} -} -``` - -
- -Results on AP-10K validation set - -| Arch | Input Size | AP | AP50 | AP75 | APM | APL | ckpt | log | -| :----------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :------------: | :------------: | :-----------------------------------------: | :----------------------------------------: | -| [pose_hrnet_w32](/configs/animal_2d_keypoint/topdown_heatmap/ap10k/td-hm_hrnet-w32_8xb64-210e_ap10k-256x256.py) | 256x256 | 0.722 | 0.935 | 0.789 | 0.557 | 0.729 | [ckpt](https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_ap10k_256x256-18aac840_20211029.pth) | [log](https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_ap10k_256x256-18aac840_20211029.log.json) | -| [pose_hrnet_w48](/configs/animal_2d_keypoint/topdown_heatmap/ap10k/td-hm_hrnet-w48_8xb64-210e_ap10k-256x256.py) | 256x256 | 0.728 | 0.936 | 0.802 | 0.577 | 0.735 | [ckpt](https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w48_ap10k_256x256-d95ab412_20211029.pth) | [log](https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w48_ap10k_256x256-d95ab412_20211029.log.json) | + + +
+HRNet (CVPR'2019) + +```bibtex +@inproceedings{sun2019deep, + title={Deep high-resolution representation learning for human pose estimation}, + author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={5693--5703}, + year={2019} +} +``` + +
+ + + +
+AP-10K (NeurIPS'2021) + +```bibtex +@misc{yu2021ap10k, + title={AP-10K: A Benchmark for Animal Pose Estimation in the Wild}, + author={Hang Yu and Yufei Xu and Jing Zhang and Wei Zhao and Ziyu Guan and Dacheng Tao}, + year={2021}, + eprint={2108.12617}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +
+ +Results on AP-10K validation set + +| Arch | Input Size | AP | AP50 | AP75 | APM | APL | ckpt | log | +| :----------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :------------: | :------------: | :-----------------------------------------: | :----------------------------------------: | +| [pose_hrnet_w32](/configs/animal_2d_keypoint/topdown_heatmap/ap10k/td-hm_hrnet-w32_8xb64-210e_ap10k-256x256.py) | 256x256 | 0.722 | 0.935 | 0.789 | 0.557 | 0.729 | [ckpt](https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_ap10k_256x256-18aac840_20211029.pth) | [log](https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_ap10k_256x256-18aac840_20211029.log.json) | +| [pose_hrnet_w48](/configs/animal_2d_keypoint/topdown_heatmap/ap10k/td-hm_hrnet-w48_8xb64-210e_ap10k-256x256.py) | 256x256 | 0.728 | 0.936 | 0.802 | 0.577 | 0.735 | [ckpt](https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w48_ap10k_256x256-d95ab412_20211029.pth) | [log](https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w48_ap10k_256x256-d95ab412_20211029.log.json) | diff --git a/configs/animal_2d_keypoint/topdown_heatmap/ap10k/hrnet_ap10k.yml b/configs/animal_2d_keypoint/topdown_heatmap/ap10k/hrnet_ap10k.yml index 38aade8349..f485dcb194 100644 --- a/configs/animal_2d_keypoint/topdown_heatmap/ap10k/hrnet_ap10k.yml +++ b/configs/animal_2d_keypoint/topdown_heatmap/ap10k/hrnet_ap10k.yml @@ -1,34 +1,34 @@ -Models: -- Config: configs/animal_2d_keypoint/topdown_heatmap/ap10k/td-hm_hrnet-w32_8xb64-210e_ap10k-256x256.py - In Collection: HRNet - Metadata: - Architecture: &id001 - - HRNet - Training Data: AP-10K - Name: td-hm_hrnet-w32_8xb64-210e_ap10k-256x256 - Results: - - Dataset: AP-10K - Metrics: - AP: 0.722 - AP@0.5: 0.935 - AP@0.75: 0.789 - AP (L): 0.729 - AP (M): 0.557 - Task: Animal 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_ap10k_256x256-18aac840_20211029.pth -- Config: configs/animal_2d_keypoint/topdown_heatmap/ap10k/td-hm_hrnet-w48_8xb64-210e_ap10k-256x256.py - In Collection: HRNet - Metadata: - Architecture: *id001 - Training Data: AP-10K - Name: td-hm_hrnet-w48_8xb64-210e_ap10k-256x256 - Results: - - Dataset: AP-10K - Metrics: - AP: 0.728 - AP@0.5: 0.936 - AP@0.75: 0.802 - AP (L): 0.735 - AP (M): 0.577 - Task: Animal 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w48_ap10k_256x256-d95ab412_20211029.pth +Models: +- Config: configs/animal_2d_keypoint/topdown_heatmap/ap10k/td-hm_hrnet-w32_8xb64-210e_ap10k-256x256.py + In Collection: HRNet + Metadata: + Architecture: &id001 + - HRNet + Training Data: AP-10K + Name: td-hm_hrnet-w32_8xb64-210e_ap10k-256x256 + Results: + - Dataset: AP-10K + Metrics: + AP: 0.722 + AP@0.5: 0.935 + AP@0.75: 0.789 + AP (L): 0.729 + AP (M): 0.557 + Task: Animal 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_ap10k_256x256-18aac840_20211029.pth +- Config: configs/animal_2d_keypoint/topdown_heatmap/ap10k/td-hm_hrnet-w48_8xb64-210e_ap10k-256x256.py + In Collection: HRNet + Metadata: + Architecture: *id001 + Training Data: AP-10K + Name: td-hm_hrnet-w48_8xb64-210e_ap10k-256x256 + Results: + - Dataset: AP-10K + Metrics: + AP: 0.728 + AP@0.5: 0.936 + AP@0.75: 0.802 + AP (L): 0.735 + AP (M): 0.577 + Task: Animal 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w48_ap10k_256x256-d95ab412_20211029.pth diff --git a/configs/animal_2d_keypoint/topdown_heatmap/ap10k/resnet_ap10k.md b/configs/animal_2d_keypoint/topdown_heatmap/ap10k/resnet_ap10k.md index 11ad6ed033..7dcd2e32a8 100644 --- a/configs/animal_2d_keypoint/topdown_heatmap/ap10k/resnet_ap10k.md +++ b/configs/animal_2d_keypoint/topdown_heatmap/ap10k/resnet_ap10k.md @@ -1,41 +1,41 @@ - - -
-SimpleBaseline2D (ECCV'2018) - -```bibtex -@inproceedings{xiao2018simple, - title={Simple baselines for human pose estimation and tracking}, - author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, - booktitle={Proceedings of the European conference on computer vision (ECCV)}, - pages={466--481}, - year={2018} -} -``` - -
- - - -
-AP-10K (NeurIPS'2021) - -```bibtex -@misc{yu2021ap10k, - title={AP-10K: A Benchmark for Animal Pose Estimation in the Wild}, - author={Hang Yu and Yufei Xu and Jing Zhang and Wei Zhao and Ziyu Guan and Dacheng Tao}, - year={2021}, - eprint={2108.12617}, - archivePrefix={arXiv}, - primaryClass={cs.CV} -} -``` - -
- -Results on AP-10K validation set - -| Arch | Input Size | AP | AP50 | AP75 | APM | APL | ckpt | log | -| :----------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :------------: | :------------: | :-----------------------------------------: | :----------------------------------------: | -| [pose_resnet_50](/configs/animal_2d_keypoint/topdown_heatmap/ap10k/td-hm_res50_8xb64-210e_ap10k-256x256.py) | 256x256 | 0.680 | 0.926 | 0.738 | 0.552 | 0.687 | [ckpt](https://download.openmmlab.com/mmpose/animal/resnet/res50_ap10k_256x256-35760eb8_20211029.pth) | [log](https://download.openmmlab.com/mmpose/animal/resnet/res50_ap10k_256x256-35760eb8_20211029.log.json) | -| [pose_resnet_101](/configs/animal_2d_keypoint/topdown_heatmap/ap10k/td-hm_res101_8xb64-210e_ap10k-256x256.py) | 256x256 | 0.681 | 0.921 | 0.751 | 0.545 | 0.690 | [ckpt](https://download.openmmlab.com/mmpose/animal/resnet/res101_ap10k_256x256-9edfafb9_20211029.pth) | [log](https://download.openmmlab.com/mmpose/animal/resnet/res101_ap10k_256x256-9edfafb9_20211029.log.json) | + + +
+SimpleBaseline2D (ECCV'2018) + +```bibtex +@inproceedings{xiao2018simple, + title={Simple baselines for human pose estimation and tracking}, + author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, + booktitle={Proceedings of the European conference on computer vision (ECCV)}, + pages={466--481}, + year={2018} +} +``` + +
+ + + +
+AP-10K (NeurIPS'2021) + +```bibtex +@misc{yu2021ap10k, + title={AP-10K: A Benchmark for Animal Pose Estimation in the Wild}, + author={Hang Yu and Yufei Xu and Jing Zhang and Wei Zhao and Ziyu Guan and Dacheng Tao}, + year={2021}, + eprint={2108.12617}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +
+ +Results on AP-10K validation set + +| Arch | Input Size | AP | AP50 | AP75 | APM | APL | ckpt | log | +| :----------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :------------: | :------------: | :-----------------------------------------: | :----------------------------------------: | +| [pose_resnet_50](/configs/animal_2d_keypoint/topdown_heatmap/ap10k/td-hm_res50_8xb64-210e_ap10k-256x256.py) | 256x256 | 0.680 | 0.926 | 0.738 | 0.552 | 0.687 | [ckpt](https://download.openmmlab.com/mmpose/animal/resnet/res50_ap10k_256x256-35760eb8_20211029.pth) | [log](https://download.openmmlab.com/mmpose/animal/resnet/res50_ap10k_256x256-35760eb8_20211029.log.json) | +| [pose_resnet_101](/configs/animal_2d_keypoint/topdown_heatmap/ap10k/td-hm_res101_8xb64-210e_ap10k-256x256.py) | 256x256 | 0.681 | 0.921 | 0.751 | 0.545 | 0.690 | [ckpt](https://download.openmmlab.com/mmpose/animal/resnet/res101_ap10k_256x256-9edfafb9_20211029.pth) | [log](https://download.openmmlab.com/mmpose/animal/resnet/res101_ap10k_256x256-9edfafb9_20211029.log.json) | diff --git a/configs/animal_2d_keypoint/topdown_heatmap/ap10k/resnet_ap10k.yml b/configs/animal_2d_keypoint/topdown_heatmap/ap10k/resnet_ap10k.yml index 84cc4156b9..29d5b6e4a3 100644 --- a/configs/animal_2d_keypoint/topdown_heatmap/ap10k/resnet_ap10k.yml +++ b/configs/animal_2d_keypoint/topdown_heatmap/ap10k/resnet_ap10k.yml @@ -1,35 +1,35 @@ -Models: -- Config: configs/animal_2d_keypoint/topdown_heatmap/ap10k/td-hm_res50_8xb64-210e_ap10k-256x256.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: &id001 - - SimpleBaseline2D - - ResNet - Training Data: AP-10K - Name: td-hm_res50_8xb64-210e_ap10k-256x256 - Results: - - Dataset: AP-10K - Metrics: - AP: 0.680 - AP@0.5: 0.926 - AP@0.75: 0.738 - AP (L): 0.687 - AP (M): 0.552 - Task: Animal 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/animal/resnet/res50_ap10k_256x256-35760eb8_20211029.pth -- Config: configs/animal_2d_keypoint/topdown_heatmap/ap10k/td-hm_res101_8xb64-210e_ap10k-256x256.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: AP-10K - Name: td-hm_res101_8xb64-210e_ap10k-256x256 - Results: - - Dataset: AP-10K - Metrics: - AP: 0.681 - AP@0.5: 0.921 - AP@0.75: 0.751 - AP (L): 0.690 - AP (M): 0.545 - Task: Animal 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/animal/resnet/res101_ap10k_256x256-9edfafb9_20211029.pth +Models: +- Config: configs/animal_2d_keypoint/topdown_heatmap/ap10k/td-hm_res50_8xb64-210e_ap10k-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: &id001 + - SimpleBaseline2D + - ResNet + Training Data: AP-10K + Name: td-hm_res50_8xb64-210e_ap10k-256x256 + Results: + - Dataset: AP-10K + Metrics: + AP: 0.680 + AP@0.5: 0.926 + AP@0.75: 0.738 + AP (L): 0.687 + AP (M): 0.552 + Task: Animal 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/animal/resnet/res50_ap10k_256x256-35760eb8_20211029.pth +- Config: configs/animal_2d_keypoint/topdown_heatmap/ap10k/td-hm_res101_8xb64-210e_ap10k-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: AP-10K + Name: td-hm_res101_8xb64-210e_ap10k-256x256 + Results: + - Dataset: AP-10K + Metrics: + AP: 0.681 + AP@0.5: 0.921 + AP@0.75: 0.751 + AP (L): 0.690 + AP (M): 0.545 + Task: Animal 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/animal/resnet/res101_ap10k_256x256-9edfafb9_20211029.pth diff --git a/configs/animal_2d_keypoint/topdown_heatmap/ap10k/td-hm_hrnet-w32_8xb64-210e_ap10k-256x256.py b/configs/animal_2d_keypoint/topdown_heatmap/ap10k/td-hm_hrnet-w32_8xb64-210e_ap10k-256x256.py index c61e6384ae..9d661ccb41 100644 --- a/configs/animal_2d_keypoint/topdown_heatmap/ap10k/td-hm_hrnet-w32_8xb64-210e_ap10k-256x256.py +++ b/configs/animal_2d_keypoint/topdown_heatmap/ap10k/td-hm_hrnet-w32_8xb64-210e_ap10k-256x256.py @@ -1,164 +1,164 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(32, 64)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(32, 64, 128)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(32, 64, 128, 256))), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/hrnet_w32-36af842e.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=32, - out_channels=17, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'AP10KDataset' -data_mode = 'topdown' -data_root = 'data/ap10k/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=4, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/ap10k-train-split1.json', - data_prefix=dict(img='data/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=4, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/ap10k-val-split1.json', - data_prefix=dict(img='data/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = dict( - batch_size=32, - num_workers=4, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/ap10k-test-split1.json', - data_prefix=dict(img='data/'), - test_mode=True, - pipeline=val_pipeline, - )) - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/ap10k-val-split1.json') -test_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/ap10k-test-split1.json') +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'AP10KDataset' +data_mode = 'topdown' +data_root = 'data/ap10k/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ap10k-train-split1.json', + data_prefix=dict(img='data/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ap10k-val-split1.json', + data_prefix=dict(img='data/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ap10k-test-split1.json', + data_prefix=dict(img='data/'), + test_mode=True, + pipeline=val_pipeline, + )) + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/ap10k-val-split1.json') +test_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/ap10k-test-split1.json') diff --git a/configs/animal_2d_keypoint/topdown_heatmap/ap10k/td-hm_hrnet-w48_8xb64-210e_ap10k-256x256.py b/configs/animal_2d_keypoint/topdown_heatmap/ap10k/td-hm_hrnet-w48_8xb64-210e_ap10k-256x256.py index 146114a887..fe280735a6 100644 --- a/configs/animal_2d_keypoint/topdown_heatmap/ap10k/td-hm_hrnet-w48_8xb64-210e_ap10k-256x256.py +++ b/configs/animal_2d_keypoint/topdown_heatmap/ap10k/td-hm_hrnet-w48_8xb64-210e_ap10k-256x256.py @@ -1,164 +1,164 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(48, 96)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(48, 96, 192)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(48, 96, 192, 384))), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/hrnet_w48-8ef0771d.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=48, - out_channels=17, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'AP10KDataset' -data_mode = 'topdown' -data_root = 'data/ap10k/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=4, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/ap10k-train-split1.json', - data_prefix=dict(img='data/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=4, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/ap10k-val-split1.json', - data_prefix=dict(img='data/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = dict( - batch_size=32, - num_workers=4, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/ap10k-test-split1.json', - data_prefix=dict(img='data/'), - test_mode=True, - pipeline=val_pipeline, - )) - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/ap10k-val-split1.json') -test_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/ap10k-test-split1.json') +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(48, 96)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(48, 96, 192)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(48, 96, 192, 384))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w48-8ef0771d.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=48, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'AP10KDataset' +data_mode = 'topdown' +data_root = 'data/ap10k/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ap10k-train-split1.json', + data_prefix=dict(img='data/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ap10k-val-split1.json', + data_prefix=dict(img='data/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ap10k-test-split1.json', + data_prefix=dict(img='data/'), + test_mode=True, + pipeline=val_pipeline, + )) + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/ap10k-val-split1.json') +test_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/ap10k-test-split1.json') diff --git a/configs/animal_2d_keypoint/topdown_heatmap/ap10k/td-hm_res101_8xb64-210e_ap10k-256x256.py b/configs/animal_2d_keypoint/topdown_heatmap/ap10k/td-hm_res101_8xb64-210e_ap10k-256x256.py index be49577511..7c1739d1d3 100644 --- a/configs/animal_2d_keypoint/topdown_heatmap/ap10k/td-hm_res101_8xb64-210e_ap10k-256x256.py +++ b/configs/animal_2d_keypoint/topdown_heatmap/ap10k/td-hm_res101_8xb64-210e_ap10k-256x256.py @@ -1,135 +1,135 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=101, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'AP10KDataset' -data_mode = 'topdown' -data_root = 'data/ap10k/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=4, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/ap10k-train-split1.json', - data_prefix=dict(img='data/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=4, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/ap10k-val-split1.json', - data_prefix=dict(img='data/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = dict( - batch_size=32, - num_workers=4, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/ap10k-test-split1.json', - data_prefix=dict(img='data/'), - test_mode=True, - pipeline=val_pipeline, - )) - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/ap10k-val-split1.json') -test_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/ap10k-test-split1.json') +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=101, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'AP10KDataset' +data_mode = 'topdown' +data_root = 'data/ap10k/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ap10k-train-split1.json', + data_prefix=dict(img='data/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ap10k-val-split1.json', + data_prefix=dict(img='data/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ap10k-test-split1.json', + data_prefix=dict(img='data/'), + test_mode=True, + pipeline=val_pipeline, + )) + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/ap10k-val-split1.json') +test_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/ap10k-test-split1.json') diff --git a/configs/animal_2d_keypoint/topdown_heatmap/ap10k/td-hm_res50_8xb64-210e_ap10k-256x256.py b/configs/animal_2d_keypoint/topdown_heatmap/ap10k/td-hm_res50_8xb64-210e_ap10k-256x256.py index 2172cbe938..703470fb2e 100644 --- a/configs/animal_2d_keypoint/topdown_heatmap/ap10k/td-hm_res50_8xb64-210e_ap10k-256x256.py +++ b/configs/animal_2d_keypoint/topdown_heatmap/ap10k/td-hm_res50_8xb64-210e_ap10k-256x256.py @@ -1,135 +1,135 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=50, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'AP10KDataset' -data_mode = 'topdown' -data_root = 'data/ap10k/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=4, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/ap10k-train-split1.json', - data_prefix=dict(img='data/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=4, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/ap10k-val-split1.json', - data_prefix=dict(img='data/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = dict( - batch_size=32, - num_workers=4, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/ap10k-test-split1.json', - data_prefix=dict(img='data/'), - test_mode=True, - pipeline=val_pipeline, - )) - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/ap10k-val-split1.json') -test_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/ap10k-test-split1.json') +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'AP10KDataset' +data_mode = 'topdown' +data_root = 'data/ap10k/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ap10k-train-split1.json', + data_prefix=dict(img='data/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ap10k-val-split1.json', + data_prefix=dict(img='data/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ap10k-test-split1.json', + data_prefix=dict(img='data/'), + test_mode=True, + pipeline=val_pipeline, + )) + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/ap10k-val-split1.json') +test_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/ap10k-test-split1.json') diff --git a/configs/animal_2d_keypoint/topdown_heatmap/locust/resnet_locust.md b/configs/animal_2d_keypoint/topdown_heatmap/locust/resnet_locust.md index bb7c837492..ac07e5a884 100644 --- a/configs/animal_2d_keypoint/topdown_heatmap/locust/resnet_locust.md +++ b/configs/animal_2d_keypoint/topdown_heatmap/locust/resnet_locust.md @@ -1,43 +1,43 @@ - - -
-SimpleBaseline2D (ECCV'2018) - -```bibtex -@inproceedings{xiao2018simple, - title={Simple baselines for human pose estimation and tracking}, - author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, - booktitle={Proceedings of the European conference on computer vision (ECCV)}, - pages={466--481}, - year={2018} -} -``` - -
- - - -
-Desert Locust (Elife'2019) - -```bibtex -@article{graving2019deepposekit, - title={DeepPoseKit, a software toolkit for fast and robust animal pose estimation using deep learning}, - author={Graving, Jacob M and Chae, Daniel and Naik, Hemal and Li, Liang and Koger, Benjamin and Costelloe, Blair R and Couzin, Iain D}, - journal={Elife}, - volume={8}, - pages={e47994}, - year={2019}, - publisher={eLife Sciences Publications Limited} -} -``` - -
- -Results on Desert Locust test set - -| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | -| :--------------------------------------------------------- | :--------: | :-----: | :---: | :--: | :--------------------------------------------------------: | :--------------------------------------------------------: | -| [pose_resnet_50](/configs/animal_2d_keypoint/topdown_heatmap/locust/td-hm_res50_8xb64-210e_locust-160x160.py) | 160x160 | 1.000 | 0.900 | 2.27 | [ckpt](https://download.openmmlab.com/mmpose/animal/resnet/res50_locust_160x160-9efca22b_20210407.pth) | [log](https://download.openmmlab.com/mmpose/animal/resnet/res50_locust_160x160_20210407.log.json) | -| [pose_resnet_101](/configs/animal_2d_keypoint/topdown_heatmap/locust/td-hm_res101_8xb64-210e_locust-160x160.py) | 160x160 | 1.000 | 0.907 | 2.03 | [ckpt](https://download.openmmlab.com/mmpose/animal/resnet/res101_locust_160x160-d77986b3_20210407.pth) | [log](https://download.openmmlab.com/mmpose/animal/resnet/res101_locust_160x160_20210407.log.json) | -| [pose_resnet_152](/configs/animal_2d_keypoint/topdown_heatmap/locust/td-hm_res152_8xb32-210e_locust-160x160.py) | 160x160 | 1.000 | 0.925 | 1.49 | [ckpt](https://download.openmmlab.com/mmpose/animal/resnet/res152_locust_160x160-4ea9b372_20210407.pth) | [log](https://download.openmmlab.com/mmpose/animal/resnet/res152_locust_160x160_20210407.log.json) | + + +
+SimpleBaseline2D (ECCV'2018) + +```bibtex +@inproceedings{xiao2018simple, + title={Simple baselines for human pose estimation and tracking}, + author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, + booktitle={Proceedings of the European conference on computer vision (ECCV)}, + pages={466--481}, + year={2018} +} +``` + +
+ + + +
+Desert Locust (Elife'2019) + +```bibtex +@article{graving2019deepposekit, + title={DeepPoseKit, a software toolkit for fast and robust animal pose estimation using deep learning}, + author={Graving, Jacob M and Chae, Daniel and Naik, Hemal and Li, Liang and Koger, Benjamin and Costelloe, Blair R and Couzin, Iain D}, + journal={Elife}, + volume={8}, + pages={e47994}, + year={2019}, + publisher={eLife Sciences Publications Limited} +} +``` + +
+ +Results on Desert Locust test set + +| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | +| :--------------------------------------------------------- | :--------: | :-----: | :---: | :--: | :--------------------------------------------------------: | :--------------------------------------------------------: | +| [pose_resnet_50](/configs/animal_2d_keypoint/topdown_heatmap/locust/td-hm_res50_8xb64-210e_locust-160x160.py) | 160x160 | 1.000 | 0.900 | 2.27 | [ckpt](https://download.openmmlab.com/mmpose/animal/resnet/res50_locust_160x160-9efca22b_20210407.pth) | [log](https://download.openmmlab.com/mmpose/animal/resnet/res50_locust_160x160_20210407.log.json) | +| [pose_resnet_101](/configs/animal_2d_keypoint/topdown_heatmap/locust/td-hm_res101_8xb64-210e_locust-160x160.py) | 160x160 | 1.000 | 0.907 | 2.03 | [ckpt](https://download.openmmlab.com/mmpose/animal/resnet/res101_locust_160x160-d77986b3_20210407.pth) | [log](https://download.openmmlab.com/mmpose/animal/resnet/res101_locust_160x160_20210407.log.json) | +| [pose_resnet_152](/configs/animal_2d_keypoint/topdown_heatmap/locust/td-hm_res152_8xb32-210e_locust-160x160.py) | 160x160 | 1.000 | 0.925 | 1.49 | [ckpt](https://download.openmmlab.com/mmpose/animal/resnet/res152_locust_160x160-4ea9b372_20210407.pth) | [log](https://download.openmmlab.com/mmpose/animal/resnet/res152_locust_160x160_20210407.log.json) | diff --git a/configs/animal_2d_keypoint/topdown_heatmap/locust/resnet_locust.yml b/configs/animal_2d_keypoint/topdown_heatmap/locust/resnet_locust.yml index c7d174fafc..e05b37d0e5 100644 --- a/configs/animal_2d_keypoint/topdown_heatmap/locust/resnet_locust.yml +++ b/configs/animal_2d_keypoint/topdown_heatmap/locust/resnet_locust.yml @@ -1,45 +1,45 @@ -Models: -- Config: configs/animal_2d_keypoint/topdown_heatmap/locust/td-hm_res50_8xb64-210e_locust-160x160.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: &id001 - - SimpleBaseline2D - - ResNet - Training Data: Desert Locust - Name: td-hm_res50_8xb64-210e_locust-160x160 - Results: - - Dataset: Desert Locust - Metrics: - AUC: 0.9 - EPE: 2.27 - PCK@0.2: 1 - Task: Animal 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/animal/resnet/res50_locust_160x160-9efca22b_20210407.pth -- Config: configs/animal_2d_keypoint/topdown_heatmap/locust/td-hm_res101_8xb64-210e_locust-160x160.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: Desert Locust - Name: td-hm_res101_8xb64-210e_locust-160x160 - Results: - - Dataset: Desert Locust - Metrics: - AUC: 0.907 - EPE: 2.03 - PCK@0.2: 1 - Task: Animal 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/animal/resnet/res101_locust_160x160-d77986b3_20210407.pth -- Config: configs/animal_2d_keypoint/topdown_heatmap/locust/td-hm_res152_8xb32-210e_locust-160x160.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: Desert Locust - Name: td-hm_res152_8xb32-210e_locust-160x160 - Results: - - Dataset: Desert Locust - Metrics: - AUC: 0.925 - EPE: 1.49 - PCK@0.2: 1.0 - Task: Animal 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/animal/resnet/res152_locust_160x160-4ea9b372_20210407.pth +Models: +- Config: configs/animal_2d_keypoint/topdown_heatmap/locust/td-hm_res50_8xb64-210e_locust-160x160.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: &id001 + - SimpleBaseline2D + - ResNet + Training Data: Desert Locust + Name: td-hm_res50_8xb64-210e_locust-160x160 + Results: + - Dataset: Desert Locust + Metrics: + AUC: 0.9 + EPE: 2.27 + PCK@0.2: 1 + Task: Animal 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/animal/resnet/res50_locust_160x160-9efca22b_20210407.pth +- Config: configs/animal_2d_keypoint/topdown_heatmap/locust/td-hm_res101_8xb64-210e_locust-160x160.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: Desert Locust + Name: td-hm_res101_8xb64-210e_locust-160x160 + Results: + - Dataset: Desert Locust + Metrics: + AUC: 0.907 + EPE: 2.03 + PCK@0.2: 1 + Task: Animal 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/animal/resnet/res101_locust_160x160-d77986b3_20210407.pth +- Config: configs/animal_2d_keypoint/topdown_heatmap/locust/td-hm_res152_8xb32-210e_locust-160x160.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: Desert Locust + Name: td-hm_res152_8xb32-210e_locust-160x160 + Results: + - Dataset: Desert Locust + Metrics: + AUC: 0.925 + EPE: 1.49 + PCK@0.2: 1.0 + Task: Animal 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/animal/resnet/res152_locust_160x160-4ea9b372_20210407.pth diff --git a/configs/animal_2d_keypoint/topdown_heatmap/locust/td-hm_res101_8xb64-210e_locust-160x160.py b/configs/animal_2d_keypoint/topdown_heatmap/locust/td-hm_res101_8xb64-210e_locust-160x160.py index f6e6c2e39b..78816488b7 100644 --- a/configs/animal_2d_keypoint/topdown_heatmap/locust/td-hm_res101_8xb64-210e_locust-160x160.py +++ b/configs/animal_2d_keypoint/topdown_heatmap/locust/td-hm_res101_8xb64-210e_locust-160x160.py @@ -1,124 +1,124 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(160, 160), heatmap_size=(40, 40), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=101, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=35, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'LocustDataset' -data_mode = 'topdown' -data_root = 'data/locust/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale', padding=0.8), - dict(type='RandomFlip', direction='horizontal'), - dict( - type='RandomBBoxTransform', - shift_factor=0.25, - rotate_factor=180, - scale_factor=(0.7, 1.3)), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale', padding=0.8), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/locust_train.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/locust_test.json', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = [ - dict(type='PCKAccuracy', thr=0.2), - dict(type='AUC'), - dict(type='EPE'), -] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(160, 160), heatmap_size=(40, 40), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=101, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=35, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'LocustDataset' +data_mode = 'topdown' +data_root = 'data/locust/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale', padding=0.8), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + shift_factor=0.25, + rotate_factor=180, + scale_factor=(0.7, 1.3)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale', padding=0.8), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/locust_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/locust_test.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/configs/animal_2d_keypoint/topdown_heatmap/locust/td-hm_res152_8xb32-210e_locust-160x160.py b/configs/animal_2d_keypoint/topdown_heatmap/locust/td-hm_res152_8xb32-210e_locust-160x160.py index 8f0a58bc88..c7bdca70c7 100644 --- a/configs/animal_2d_keypoint/topdown_heatmap/locust/td-hm_res152_8xb32-210e_locust-160x160.py +++ b/configs/animal_2d_keypoint/topdown_heatmap/locust/td-hm_res152_8xb32-210e_locust-160x160.py @@ -1,124 +1,124 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=256) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(160, 160), heatmap_size=(40, 40), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=152, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet152'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=35, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'LocustDataset' -data_mode = 'topdown' -data_root = 'data/locust/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale', padding=0.8), - dict(type='RandomFlip', direction='horizontal'), - dict( - type='RandomBBoxTransform', - shift_factor=0.25, - rotate_factor=180, - scale_factor=(0.7, 1.3)), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale', padding=0.8), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/locust_train.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/locust_test.json', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = [ - dict(type='PCKAccuracy', thr=0.2), - dict(type='AUC'), - dict(type='EPE'), -] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(160, 160), heatmap_size=(40, 40), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=152, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet152'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=35, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'LocustDataset' +data_mode = 'topdown' +data_root = 'data/locust/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale', padding=0.8), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + shift_factor=0.25, + rotate_factor=180, + scale_factor=(0.7, 1.3)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale', padding=0.8), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/locust_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/locust_test.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/configs/animal_2d_keypoint/topdown_heatmap/locust/td-hm_res50_8xb64-210e_locust-160x160.py b/configs/animal_2d_keypoint/topdown_heatmap/locust/td-hm_res50_8xb64-210e_locust-160x160.py index adbb89ee5b..309af143e4 100644 --- a/configs/animal_2d_keypoint/topdown_heatmap/locust/td-hm_res50_8xb64-210e_locust-160x160.py +++ b/configs/animal_2d_keypoint/topdown_heatmap/locust/td-hm_res50_8xb64-210e_locust-160x160.py @@ -1,124 +1,124 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(160, 160), heatmap_size=(40, 40), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=50, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=35, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'LocustDataset' -data_mode = 'topdown' -data_root = 'data/locust/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale', padding=0.8), - dict(type='RandomFlip', direction='horizontal'), - dict( - type='RandomBBoxTransform', - shift_factor=0.25, - rotate_factor=180, - scale_factor=(0.7, 1.3)), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale', padding=0.8), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/locust_train.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/locust_test.json', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = [ - dict(type='PCKAccuracy', thr=0.2), - dict(type='AUC'), - dict(type='EPE'), -] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(160, 160), heatmap_size=(40, 40), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=35, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'LocustDataset' +data_mode = 'topdown' +data_root = 'data/locust/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale', padding=0.8), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + shift_factor=0.25, + rotate_factor=180, + scale_factor=(0.7, 1.3)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale', padding=0.8), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/locust_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/locust_test.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/configs/animal_2d_keypoint/topdown_heatmap/zebra/resnet_zebra.md b/configs/animal_2d_keypoint/topdown_heatmap/zebra/resnet_zebra.md index 0c12aed0f3..c49c11edf2 100644 --- a/configs/animal_2d_keypoint/topdown_heatmap/zebra/resnet_zebra.md +++ b/configs/animal_2d_keypoint/topdown_heatmap/zebra/resnet_zebra.md @@ -1,43 +1,43 @@ - - -
-SimpleBaseline2D (ECCV'2018) - -```bibtex -@inproceedings{xiao2018simple, - title={Simple baselines for human pose estimation and tracking}, - author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, - booktitle={Proceedings of the European conference on computer vision (ECCV)}, - pages={466--481}, - year={2018} -} -``` - -
- - - -
-Grévy’s Zebra (Elife'2019) - -```bibtex -@article{graving2019deepposekit, - title={DeepPoseKit, a software toolkit for fast and robust animal pose estimation using deep learning}, - author={Graving, Jacob M and Chae, Daniel and Naik, Hemal and Li, Liang and Koger, Benjamin and Costelloe, Blair R and Couzin, Iain D}, - journal={Elife}, - volume={8}, - pages={e47994}, - year={2019}, - publisher={eLife Sciences Publications Limited} -} -``` - -
- -Results on Grévy’s Zebra test set - -| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | -| :--------------------------------------------------------- | :--------: | :-----: | :---: | :--: | :--------------------------------------------------------: | :--------------------------------------------------------: | -| [pose_resnet_50](/configs/animal_2d_keypoint/topdown_heatmap/zebra/td-hm_res50_8xb64-210e_zebra-160x160.py) | 160x160 | 1.000 | 0.914 | 1.87 | [ckpt](https://download.openmmlab.com/mmpose/animal/resnet/res50_zebra_160x160-5a104833_20210407.pth) | [log](https://download.openmmlab.com/mmpose/animal/resnet/res50_zebra_160x160_20210407.log.json) | -| [pose_resnet_101](/configs/animal_2d_keypoint/topdown_heatmap/zebra/td-hm_res101_8xb64-210e_zebra-160x160.py) | 160x160 | 1.000 | 0.915 | 1.83 | [ckpt](https://download.openmmlab.com/mmpose/animal/resnet/res101_zebra_160x160-e8cb2010_20210407.pth) | [log](https://download.openmmlab.com/mmpose/animal/resnet/res101_zebra_160x160_20210407.log.json) | -| [pose_resnet_152](/configs/animal_2d_keypoint/topdown_heatmap/zebra/td-hm_res152_8xb32-210e_zebra-160x160.py) | 160x160 | 1.000 | 0.921 | 1.67 | [ckpt](https://download.openmmlab.com/mmpose/animal/resnet/res152_zebra_160x160-05de71dd_20210407.pth) | [log](https://download.openmmlab.com/mmpose/animal/resnet/res152_zebra_160x160_20210407.log.json) | + + +
+SimpleBaseline2D (ECCV'2018) + +```bibtex +@inproceedings{xiao2018simple, + title={Simple baselines for human pose estimation and tracking}, + author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, + booktitle={Proceedings of the European conference on computer vision (ECCV)}, + pages={466--481}, + year={2018} +} +``` + +
+ + + +
+Grévy’s Zebra (Elife'2019) + +```bibtex +@article{graving2019deepposekit, + title={DeepPoseKit, a software toolkit for fast and robust animal pose estimation using deep learning}, + author={Graving, Jacob M and Chae, Daniel and Naik, Hemal and Li, Liang and Koger, Benjamin and Costelloe, Blair R and Couzin, Iain D}, + journal={Elife}, + volume={8}, + pages={e47994}, + year={2019}, + publisher={eLife Sciences Publications Limited} +} +``` + +
+ +Results on Grévy’s Zebra test set + +| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | +| :--------------------------------------------------------- | :--------: | :-----: | :---: | :--: | :--------------------------------------------------------: | :--------------------------------------------------------: | +| [pose_resnet_50](/configs/animal_2d_keypoint/topdown_heatmap/zebra/td-hm_res50_8xb64-210e_zebra-160x160.py) | 160x160 | 1.000 | 0.914 | 1.87 | [ckpt](https://download.openmmlab.com/mmpose/animal/resnet/res50_zebra_160x160-5a104833_20210407.pth) | [log](https://download.openmmlab.com/mmpose/animal/resnet/res50_zebra_160x160_20210407.log.json) | +| [pose_resnet_101](/configs/animal_2d_keypoint/topdown_heatmap/zebra/td-hm_res101_8xb64-210e_zebra-160x160.py) | 160x160 | 1.000 | 0.915 | 1.83 | [ckpt](https://download.openmmlab.com/mmpose/animal/resnet/res101_zebra_160x160-e8cb2010_20210407.pth) | [log](https://download.openmmlab.com/mmpose/animal/resnet/res101_zebra_160x160_20210407.log.json) | +| [pose_resnet_152](/configs/animal_2d_keypoint/topdown_heatmap/zebra/td-hm_res152_8xb32-210e_zebra-160x160.py) | 160x160 | 1.000 | 0.921 | 1.67 | [ckpt](https://download.openmmlab.com/mmpose/animal/resnet/res152_zebra_160x160-05de71dd_20210407.pth) | [log](https://download.openmmlab.com/mmpose/animal/resnet/res152_zebra_160x160_20210407.log.json) | diff --git a/configs/animal_2d_keypoint/topdown_heatmap/zebra/resnet_zebra.yml b/configs/animal_2d_keypoint/topdown_heatmap/zebra/resnet_zebra.yml index 3ecedc9700..68ffbe7f62 100644 --- a/configs/animal_2d_keypoint/topdown_heatmap/zebra/resnet_zebra.yml +++ b/configs/animal_2d_keypoint/topdown_heatmap/zebra/resnet_zebra.yml @@ -1,45 +1,45 @@ -Models: -- Config: configs/animal_2d_keypoint/topdown_heatmap/zebra/td-hm_res50_8xb64-210e_zebra-160x160.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: &id001 - - SimpleBaseline2D - - ResNet - Training Data: "Gr\xE9vy\u2019s Zebra" - Name: td-hm_res50_8xb64-210e_zebra-160x160 - Results: - - Dataset: "Gr\xE9vy\u2019s Zebra" - Metrics: - AUC: 0.914 - EPE: 1.87 - PCK@0.2: 1.0 - Task: Animal 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/animal/resnet/res50_zebra_160x160-5a104833_20210407.pth -- Config: configs/animal_2d_keypoint/topdown_heatmap/zebra/td-hm_res101_8xb64-210e_zebra-160x160.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: "Gr\xE9vy\u2019s Zebra" - Name: td-hm_res101_8xb64-210e_zebra-160x160 - Results: - - Dataset: "Gr\xE9vy\u2019s Zebra" - Metrics: - AUC: 0.915 - EPE: 1.83 - PCK@0.2: 1.0 - Task: Animal 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/animal/resnet/res101_zebra_160x160-e8cb2010_20210407.pth -- Config: configs/animal_2d_keypoint/topdown_heatmap/zebra/td-hm_res152_8xb32-210e_zebra-160x160.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: "Gr\xE9vy\u2019s Zebra" - Name: td-hm_res152_8xb32-210e_zebra-160x160 - Results: - - Dataset: "Gr\xE9vy\u2019s Zebra" - Metrics: - AUC: 0.921 - EPE: 1.67 - PCK@0.2: 1.0 - Task: Animal 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/animal/resnet/res152_zebra_160x160-05de71dd_20210407.pth +Models: +- Config: configs/animal_2d_keypoint/topdown_heatmap/zebra/td-hm_res50_8xb64-210e_zebra-160x160.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: &id001 + - SimpleBaseline2D + - ResNet + Training Data: "Gr\xE9vy\u2019s Zebra" + Name: td-hm_res50_8xb64-210e_zebra-160x160 + Results: + - Dataset: "Gr\xE9vy\u2019s Zebra" + Metrics: + AUC: 0.914 + EPE: 1.87 + PCK@0.2: 1.0 + Task: Animal 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/animal/resnet/res50_zebra_160x160-5a104833_20210407.pth +- Config: configs/animal_2d_keypoint/topdown_heatmap/zebra/td-hm_res101_8xb64-210e_zebra-160x160.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: "Gr\xE9vy\u2019s Zebra" + Name: td-hm_res101_8xb64-210e_zebra-160x160 + Results: + - Dataset: "Gr\xE9vy\u2019s Zebra" + Metrics: + AUC: 0.915 + EPE: 1.83 + PCK@0.2: 1.0 + Task: Animal 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/animal/resnet/res101_zebra_160x160-e8cb2010_20210407.pth +- Config: configs/animal_2d_keypoint/topdown_heatmap/zebra/td-hm_res152_8xb32-210e_zebra-160x160.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: "Gr\xE9vy\u2019s Zebra" + Name: td-hm_res152_8xb32-210e_zebra-160x160 + Results: + - Dataset: "Gr\xE9vy\u2019s Zebra" + Metrics: + AUC: 0.921 + EPE: 1.67 + PCK@0.2: 1.0 + Task: Animal 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/animal/resnet/res152_zebra_160x160-05de71dd_20210407.pth diff --git a/configs/animal_2d_keypoint/topdown_heatmap/zebra/td-hm_res101_8xb64-210e_zebra-160x160.py b/configs/animal_2d_keypoint/topdown_heatmap/zebra/td-hm_res101_8xb64-210e_zebra-160x160.py index 68c56d80fb..9a22a339aa 100644 --- a/configs/animal_2d_keypoint/topdown_heatmap/zebra/td-hm_res101_8xb64-210e_zebra-160x160.py +++ b/configs/animal_2d_keypoint/topdown_heatmap/zebra/td-hm_res101_8xb64-210e_zebra-160x160.py @@ -1,124 +1,124 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(160, 160), heatmap_size=(40, 40), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=101, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=9, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'ZebraDataset' -data_mode = 'topdown' -data_root = 'data/zebra/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale', padding=0.8), - dict(type='RandomFlip', direction='horizontal'), - dict( - type='RandomBBoxTransform', - shift_factor=0.25, - rotate_factor=180, - scale_factor=(0.7, 1.3)), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale', padding=0.8), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/zebra_train.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/zebra_test.json', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = [ - dict(type='PCKAccuracy', thr=0.2), - dict(type='AUC'), - dict(type='EPE'), -] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(160, 160), heatmap_size=(40, 40), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=101, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=9, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'ZebraDataset' +data_mode = 'topdown' +data_root = 'data/zebra/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale', padding=0.8), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + shift_factor=0.25, + rotate_factor=180, + scale_factor=(0.7, 1.3)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale', padding=0.8), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/zebra_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/zebra_test.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/configs/animal_2d_keypoint/topdown_heatmap/zebra/td-hm_res152_8xb32-210e_zebra-160x160.py b/configs/animal_2d_keypoint/topdown_heatmap/zebra/td-hm_res152_8xb32-210e_zebra-160x160.py index abb14eefb8..d1840b818f 100644 --- a/configs/animal_2d_keypoint/topdown_heatmap/zebra/td-hm_res152_8xb32-210e_zebra-160x160.py +++ b/configs/animal_2d_keypoint/topdown_heatmap/zebra/td-hm_res152_8xb32-210e_zebra-160x160.py @@ -1,124 +1,124 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=256) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(160, 160), heatmap_size=(40, 40), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=152, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet152'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=9, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'ZebraDataset' -data_mode = 'topdown' -data_root = 'data/zebra/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale', padding=0.8), - dict(type='RandomFlip', direction='horizontal'), - dict( - type='RandomBBoxTransform', - shift_factor=0.25, - rotate_factor=180, - scale_factor=(0.7, 1.3)), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale', padding=0.8), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/zebra_train.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/zebra_test.json', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = [ - dict(type='PCKAccuracy', thr=0.2), - dict(type='AUC'), - dict(type='EPE'), -] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(160, 160), heatmap_size=(40, 40), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=152, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet152'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=9, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'ZebraDataset' +data_mode = 'topdown' +data_root = 'data/zebra/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale', padding=0.8), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + shift_factor=0.25, + rotate_factor=180, + scale_factor=(0.7, 1.3)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale', padding=0.8), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/zebra_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/zebra_test.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/configs/animal_2d_keypoint/topdown_heatmap/zebra/td-hm_res50_8xb64-210e_zebra-160x160.py b/configs/animal_2d_keypoint/topdown_heatmap/zebra/td-hm_res50_8xb64-210e_zebra-160x160.py index e4d2777751..f9dc0e35ad 100644 --- a/configs/animal_2d_keypoint/topdown_heatmap/zebra/td-hm_res50_8xb64-210e_zebra-160x160.py +++ b/configs/animal_2d_keypoint/topdown_heatmap/zebra/td-hm_res50_8xb64-210e_zebra-160x160.py @@ -1,124 +1,124 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(160, 160), heatmap_size=(40, 40), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=50, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=9, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'ZebraDataset' -data_mode = 'topdown' -data_root = 'data/zebra/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale', padding=0.8), - dict(type='RandomFlip', direction='horizontal'), - dict( - type='RandomBBoxTransform', - shift_factor=0.25, - rotate_factor=180, - scale_factor=(0.7, 1.3)), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale', padding=0.8), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/zebra_train.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/zebra_test.json', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = [ - dict(type='PCKAccuracy', thr=0.2), - dict(type='AUC'), - dict(type='EPE'), -] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(160, 160), heatmap_size=(40, 40), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=9, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'ZebraDataset' +data_mode = 'topdown' +data_root = 'data/zebra/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale', padding=0.8), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + shift_factor=0.25, + rotate_factor=180, + scale_factor=(0.7, 1.3)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale', padding=0.8), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/zebra_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/zebra_test.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/README.md b/configs/body_2d_keypoint/README.md index d005d3fed7..15f244a460 100644 --- a/configs/body_2d_keypoint/README.md +++ b/configs/body_2d_keypoint/README.md @@ -1,21 +1,21 @@ -# Human Body 2D Pose Estimation - -Multi-person human pose estimation is defined as the task of detecting the poses (or keypoints) of all people from an input image. - -Existing approaches can be categorized into top-down and bottom-up approaches. - -Top-down methods (e.g. DeepPose) divide the task into two stages: human detection and pose estimation. They perform human detection first, followed by single-person pose estimation given human bounding boxes. - -Bottom-up approaches (e.g. Associative Embedding) first detect all the keypoints and then group/associate them into person instances. - -## Data preparation - -Please follow [DATA Preparation](/docs/en/dataset_zoo/2d_body_keypoint.md) to prepare data. - -## Demo - -Please follow [Demo](/demo/docs/en/2d_human_pose_demo.md#2d-human-pose-demo) to run demos. - -
-
-
+# Human Body 2D Pose Estimation + +Multi-person human pose estimation is defined as the task of detecting the poses (or keypoints) of all people from an input image. + +Existing approaches can be categorized into top-down and bottom-up approaches. + +Top-down methods (e.g. DeepPose) divide the task into two stages: human detection and pose estimation. They perform human detection first, followed by single-person pose estimation given human bounding boxes. + +Bottom-up approaches (e.g. Associative Embedding) first detect all the keypoints and then group/associate them into person instances. + +## Data preparation + +Please follow [DATA Preparation](/docs/en/dataset_zoo/2d_body_keypoint.md) to prepare data. + +## Demo + +Please follow [Demo](/demo/docs/en/2d_human_pose_demo.md#2d-human-pose-demo) to run demos. + +
+
+
diff --git a/configs/body_2d_keypoint/associative_embedding/README.md b/configs/body_2d_keypoint/associative_embedding/README.md index 7f5fa8ea17..5592374d2f 100644 --- a/configs/body_2d_keypoint/associative_embedding/README.md +++ b/configs/body_2d_keypoint/associative_embedding/README.md @@ -1,9 +1,9 @@ -# Associative embedding: End-to-end learning for joint detection and grouping (AE) - -Associative Embedding is one of the most popular 2D bottom-up pose estimation approaches, that first detect all the keypoints and then group/associate them into person instances. - -In order to group all the predicted keypoints to individuals, a tag is also predicted for each detected keypoint. Tags of the same person are similar, while tags of different people are different. Thus the keypoints can be grouped according to the tags. - -
- -
+# Associative embedding: End-to-end learning for joint detection and grouping (AE) + +Associative Embedding is one of the most popular 2D bottom-up pose estimation approaches, that first detect all the keypoints and then group/associate them into person instances. + +In order to group all the predicted keypoints to individuals, a tag is also predicted for each detected keypoint. Tags of the same person are similar, while tags of different people are different. Thus the keypoints can be grouped according to the tags. + +
+ +
diff --git a/configs/body_2d_keypoint/associative_embedding/coco/ae_hrnet-w32_8xb24-300e_coco-512x512.py b/configs/body_2d_keypoint/associative_embedding/coco/ae_hrnet-w32_8xb24-300e_coco-512x512.py index 5adc1aac1a..dc76b5ae2f 100644 --- a/configs/body_2d_keypoint/associative_embedding/coco/ae_hrnet-w32_8xb24-300e_coco-512x512.py +++ b/configs/body_2d_keypoint/associative_embedding/coco/ae_hrnet-w32_8xb24-300e_coco-512x512.py @@ -1,159 +1,159 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=300, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=1.5e-3, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=300, - milestones=[200, 260], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=192) - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco/AP', rule='greater', interval=50)) - -# codec settings -codec = dict( - type='AssociativeEmbedding', - input_size=(512, 512), - heatmap_size=(128, 128), - sigma=2, - decode_keypoint_order=[ - 0, 1, 2, 3, 4, 5, 6, 11, 12, 7, 8, 9, 10, 13, 14, 15, 16 - ], - decode_max_instances=30) - -# model settings -model = dict( - type='BottomupPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(32, 64)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(32, 64, 128)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(32, 64, 128, 256))), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/hrnet_w32-36af842e.pth'), - ), - head=dict( - type='AssociativeEmbeddingHead', - in_channels=32, - num_keypoints=17, - tag_dim=1, - tag_per_keypoint=True, - deconv_out_channels=None, - keypoint_loss=dict(type='KeypointMSELoss', use_target_weight=True), - tag_loss=dict(type='AssociativeEmbeddingLoss', loss_weight=0.001), - # The heatmap will be resized to the input size before decoding - # if ``restore_heatmap_size==True`` - decoder=dict(codec, heatmap_size=codec['input_size'])), - test_cfg=dict( - multiscale_test=False, - flip_test=True, - shift_heatmap=True, - restore_heatmap_size=True, - align_corners=False)) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'bottomup' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [] -val_pipeline = [ - dict(type='LoadImage'), - dict( - type='BottomupResize', - input_size=codec['input_size'], - size_factor=32, - resize_mode='expand'), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=24, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=1, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json', - nms_mode='none', - score_mode='keypoint', -) -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=300, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=1.5e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=300, + milestones=[200, 260], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=192) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', interval=50)) + +# codec settings +codec = dict( + type='AssociativeEmbedding', + input_size=(512, 512), + heatmap_size=(128, 128), + sigma=2, + decode_keypoint_order=[ + 0, 1, 2, 3, 4, 5, 6, 11, 12, 7, 8, 9, 10, 13, 14, 15, 16 + ], + decode_max_instances=30) + +# model settings +model = dict( + type='BottomupPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='AssociativeEmbeddingHead', + in_channels=32, + num_keypoints=17, + tag_dim=1, + tag_per_keypoint=True, + deconv_out_channels=None, + keypoint_loss=dict(type='KeypointMSELoss', use_target_weight=True), + tag_loss=dict(type='AssociativeEmbeddingLoss', loss_weight=0.001), + # The heatmap will be resized to the input size before decoding + # if ``restore_heatmap_size==True`` + decoder=dict(codec, heatmap_size=codec['input_size'])), + test_cfg=dict( + multiscale_test=False, + flip_test=True, + shift_heatmap=True, + restore_heatmap_size=True, + align_corners=False)) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'bottomup' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [] +val_pipeline = [ + dict(type='LoadImage'), + dict( + type='BottomupResize', + input_size=codec['input_size'], + size_factor=32, + resize_mode='expand'), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=24, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json', + nms_mode='none', + score_mode='keypoint', +) +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/cid/coco/cid_hrnet-w32_8xb20-140e_coco-512x512.py b/configs/body_2d_keypoint/cid/coco/cid_hrnet-w32_8xb20-140e_coco-512x512.py index 955293dcb1..9c6c4ceea4 100644 --- a/configs/body_2d_keypoint/cid/coco/cid_hrnet-w32_8xb20-140e_coco-512x512.py +++ b/configs/body_2d_keypoint/cid/coco/cid_hrnet-w32_8xb20-140e_coco-512x512.py @@ -1,164 +1,164 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=140, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=1e-3, -)) - -# learning policy -param_scheduler = [ - dict( - type='MultiStepLR', - begin=0, - end=140, - milestones=[90, 120], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=160) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='DecoupledHeatmap', input_size=(512, 512), heatmap_size=(128, 128)) - -# model settings -model = dict( - type='BottomupPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(32, 64)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(32, 64, 128)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(32, 64, 128, 256), - multiscale_output=True)), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/hrnet_w32-36af842e.pth'), - ), - neck=dict( - type='FeatureMapProcessor', - concat=True, - ), - head=dict( - type='CIDHead', - in_channels=480, - num_keypoints=17, - gfd_channels=32, - coupled_heatmap_loss=dict(type='FocalHeatmapLoss', loss_weight=1.0), - decoupled_heatmap_loss=dict(type='FocalHeatmapLoss', loss_weight=4.0), - contrastive_loss=dict( - type='InfoNCELoss', temperature=0.05, loss_weight=1.0), - decoder=codec, - ), - train_cfg=dict(max_train_instances=200), - test_cfg=dict( - multiscale_test=False, - flip_test=True, - shift_heatmap=False, - align_corners=False)) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'bottomup' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='BottomupRandomAffine', input_size=codec['input_size']), - dict(type='RandomFlip', direction='horizontal'), - dict(type='GenerateTarget', encoder=codec), - dict(type='BottomupGetHeatmapMask'), - dict(type='PackPoseInputs'), -] -val_pipeline = [ - dict(type='LoadImage'), - dict( - type='BottomupResize', - input_size=codec['input_size'], - size_factor=64, - resize_mode='expand'), - dict( - type='PackPoseInputs', - meta_keys=('id', 'img_id', 'img_path', 'crowd_index', 'ori_shape', - 'img_shape', 'input_size', 'input_center', 'input_scale', - 'flip', 'flip_direction', 'flip_indices', 'raw_ann_info', - 'skeleton_links')) -] - -# data loaders -train_dataloader = dict( - batch_size=20, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=1, - num_workers=1, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json', - nms_thr=0.8, - score_mode='keypoint', -) -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=140, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=1e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='MultiStepLR', + begin=0, + end=140, + milestones=[90, 120], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=160) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='DecoupledHeatmap', input_size=(512, 512), heatmap_size=(128, 128)) + +# model settings +model = dict( + type='BottomupPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256), + multiscale_output=True)), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + neck=dict( + type='FeatureMapProcessor', + concat=True, + ), + head=dict( + type='CIDHead', + in_channels=480, + num_keypoints=17, + gfd_channels=32, + coupled_heatmap_loss=dict(type='FocalHeatmapLoss', loss_weight=1.0), + decoupled_heatmap_loss=dict(type='FocalHeatmapLoss', loss_weight=4.0), + contrastive_loss=dict( + type='InfoNCELoss', temperature=0.05, loss_weight=1.0), + decoder=codec, + ), + train_cfg=dict(max_train_instances=200), + test_cfg=dict( + multiscale_test=False, + flip_test=True, + shift_heatmap=False, + align_corners=False)) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'bottomup' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='BottomupRandomAffine', input_size=codec['input_size']), + dict(type='RandomFlip', direction='horizontal'), + dict(type='GenerateTarget', encoder=codec), + dict(type='BottomupGetHeatmapMask'), + dict(type='PackPoseInputs'), +] +val_pipeline = [ + dict(type='LoadImage'), + dict( + type='BottomupResize', + input_size=codec['input_size'], + size_factor=64, + resize_mode='expand'), + dict( + type='PackPoseInputs', + meta_keys=('id', 'img_id', 'img_path', 'crowd_index', 'ori_shape', + 'img_shape', 'input_size', 'input_center', 'input_scale', + 'flip', 'flip_direction', 'flip_indices', 'raw_ann_info', + 'skeleton_links')) +] + +# data loaders +train_dataloader = dict( + batch_size=20, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json', + nms_thr=0.8, + score_mode='keypoint', +) +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/cid/coco/cid_hrnet-w48_8xb20-140e_coco-512x512.py b/configs/body_2d_keypoint/cid/coco/cid_hrnet-w48_8xb20-140e_coco-512x512.py index a114088ae2..f8042b635c 100644 --- a/configs/body_2d_keypoint/cid/coco/cid_hrnet-w48_8xb20-140e_coco-512x512.py +++ b/configs/body_2d_keypoint/cid/coco/cid_hrnet-w48_8xb20-140e_coco-512x512.py @@ -1,164 +1,164 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=140, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=1e-3, -)) - -# learning policy -param_scheduler = [ - dict( - type='MultiStepLR', - begin=0, - end=140, - milestones=[90, 120], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=160) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='DecoupledHeatmap', input_size=(512, 512), heatmap_size=(128, 128)) - -# model settings -model = dict( - type='BottomupPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(48, 96)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(48, 96, 192)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(48, 96, 192, 384), - multiscale_output=True)), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/hrnet_w48-8ef0771d.pth'), - ), - neck=dict( - type='FeatureMapProcessor', - concat=True, - ), - head=dict( - type='CIDHead', - in_channels=720, - num_keypoints=17, - gfd_channels=48, - coupled_heatmap_loss=dict(type='FocalHeatmapLoss', loss_weight=1.0), - decoupled_heatmap_loss=dict(type='FocalHeatmapLoss', loss_weight=4.0), - contrastive_loss=dict( - type='InfoNCELoss', temperature=0.05, loss_weight=1.0), - decoder=codec, - ), - train_cfg=dict(max_train_instances=200), - test_cfg=dict( - multiscale_test=False, - flip_test=True, - shift_heatmap=False, - align_corners=False)) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'bottomup' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='BottomupRandomAffine', input_size=codec['input_size']), - dict(type='RandomFlip', direction='horizontal'), - dict(type='GenerateTarget', encoder=codec), - dict(type='BottomupGetHeatmapMask'), - dict(type='PackPoseInputs'), -] -val_pipeline = [ - dict(type='LoadImage'), - dict( - type='BottomupResize', - input_size=codec['input_size'], - size_factor=64, - resize_mode='expand'), - dict( - type='PackPoseInputs', - meta_keys=('id', 'img_id', 'img_path', 'crowd_index', 'ori_shape', - 'img_shape', 'input_size', 'input_center', 'input_scale', - 'flip', 'flip_direction', 'flip_indices', 'raw_ann_info', - 'skeleton_links')) -] - -# data loaders -train_dataloader = dict( - batch_size=20, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=1, - num_workers=1, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json', - nms_thr=0.8, - score_mode='keypoint', -) -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=140, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=1e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='MultiStepLR', + begin=0, + end=140, + milestones=[90, 120], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=160) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='DecoupledHeatmap', input_size=(512, 512), heatmap_size=(128, 128)) + +# model settings +model = dict( + type='BottomupPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(48, 96)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(48, 96, 192)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(48, 96, 192, 384), + multiscale_output=True)), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w48-8ef0771d.pth'), + ), + neck=dict( + type='FeatureMapProcessor', + concat=True, + ), + head=dict( + type='CIDHead', + in_channels=720, + num_keypoints=17, + gfd_channels=48, + coupled_heatmap_loss=dict(type='FocalHeatmapLoss', loss_weight=1.0), + decoupled_heatmap_loss=dict(type='FocalHeatmapLoss', loss_weight=4.0), + contrastive_loss=dict( + type='InfoNCELoss', temperature=0.05, loss_weight=1.0), + decoder=codec, + ), + train_cfg=dict(max_train_instances=200), + test_cfg=dict( + multiscale_test=False, + flip_test=True, + shift_heatmap=False, + align_corners=False)) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'bottomup' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='BottomupRandomAffine', input_size=codec['input_size']), + dict(type='RandomFlip', direction='horizontal'), + dict(type='GenerateTarget', encoder=codec), + dict(type='BottomupGetHeatmapMask'), + dict(type='PackPoseInputs'), +] +val_pipeline = [ + dict(type='LoadImage'), + dict( + type='BottomupResize', + input_size=codec['input_size'], + size_factor=64, + resize_mode='expand'), + dict( + type='PackPoseInputs', + meta_keys=('id', 'img_id', 'img_path', 'crowd_index', 'ori_shape', + 'img_shape', 'input_size', 'input_center', 'input_scale', + 'flip', 'flip_direction', 'flip_indices', 'raw_ann_info', + 'skeleton_links')) +] + +# data loaders +train_dataloader = dict( + batch_size=20, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json', + nms_thr=0.8, + score_mode='keypoint', +) +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/cid/coco/hrnet_coco.md b/configs/body_2d_keypoint/cid/coco/hrnet_coco.md index f82cb04db0..97d83e2929 100644 --- a/configs/body_2d_keypoint/cid/coco/hrnet_coco.md +++ b/configs/body_2d_keypoint/cid/coco/hrnet_coco.md @@ -1,42 +1,42 @@ - - -
-CID (CVPR'2022) - -```bibtex -@InProceedings{Wang_2022_CVPR, - author = {Wang, Dongkai and Zhang, Shiliang}, - title = {Contextual Instance Decoupling for Robust Multi-Person Pose Estimation}, - booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, - month = {June}, - year = {2022}, - pages = {11060-11068} -} -``` - -
- - - -
-COCO (ECCV'2014) - -```bibtex -@inproceedings{lin2014microsoft, - title={Microsoft coco: Common objects in context}, - author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, - booktitle={European conference on computer vision}, - pages={740--755}, - year={2014}, - organization={Springer} -} -``` - -
- -Results on COCO val2017 without multi-scale test - -| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | -| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | -| [CID](/configs/body_2d_keypoint/cid/coco/cid_hrnet-w32_8xb20-140e_coco-512x512.py) | 512x512 | 0.704 | 0.894 | 0.775 | 0.753 | 0.928 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/cid/coco/cid_hrnet-w32_8xb20-140e_coco-512x512_42b7e6e6-20230207.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/cid/coco/cid_hrnet-w32_8xb20-140e_coco-512x512_20230207.json) | -| [CID](/configs/body_2d_keypoint/cid/coco/cid_hrnet-w48_8xb20-140e_coco-512x512.py) | 512x512 | 0.715 | 0.900 | 0.782 | 0.765 | 0.935 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/cid/coco/cid_hrnet-w48_8xb20-140e_coco-512x512_a36c3ecf-20230207.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/cid/coco/cid_hrnet-w48_8xb20-140e_coco-512x512_20230207.json) | + + +
+CID (CVPR'2022) + +```bibtex +@InProceedings{Wang_2022_CVPR, + author = {Wang, Dongkai and Zhang, Shiliang}, + title = {Contextual Instance Decoupling for Robust Multi-Person Pose Estimation}, + booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, + month = {June}, + year = {2022}, + pages = {11060-11068} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 without multi-scale test + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [CID](/configs/body_2d_keypoint/cid/coco/cid_hrnet-w32_8xb20-140e_coco-512x512.py) | 512x512 | 0.704 | 0.894 | 0.775 | 0.753 | 0.928 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/cid/coco/cid_hrnet-w32_8xb20-140e_coco-512x512_42b7e6e6-20230207.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/cid/coco/cid_hrnet-w32_8xb20-140e_coco-512x512_20230207.json) | +| [CID](/configs/body_2d_keypoint/cid/coco/cid_hrnet-w48_8xb20-140e_coco-512x512.py) | 512x512 | 0.715 | 0.900 | 0.782 | 0.765 | 0.935 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/cid/coco/cid_hrnet-w48_8xb20-140e_coco-512x512_a36c3ecf-20230207.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/cid/coco/cid_hrnet-w48_8xb20-140e_coco-512x512_20230207.json) | diff --git a/configs/body_2d_keypoint/cid/coco/hrnet_coco.yml b/configs/body_2d_keypoint/cid/coco/hrnet_coco.yml index b230d20e24..efd5ee67cc 100644 --- a/configs/body_2d_keypoint/cid/coco/hrnet_coco.yml +++ b/configs/body_2d_keypoint/cid/coco/hrnet_coco.yml @@ -1,41 +1,41 @@ -Collections: -- Name: CID - Paper: - Title: Contextual Instance Decoupling for Robust Multi-Person Pose Estimation - URL: https://openaccess.thecvf.com/content/CVPR2022/html/Wang_Contextual_Instance_Decoupling_for_Robust_Multi-Person_Pose_Estimation_CVPR_2022_paper.html - README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/algorithms/cid.md -Models: -- Config: configs/body_2d_keypoint/cid/coco/cid_hrnet-w32_8xb20-140e_coco-512x512.py - In Collection: CID - Metadata: - Architecture: &id001 - - CID - - HRNet - Training Data: COCO - Name: cid_hrnet-w32_8xb20-140e_coco-512x512 - Results: - - Dataset: COCO - Metrics: - AP: 0.704 - AP@0.5: 0.894 - AP@0.75: 0.775 - AR: 0.753 - AR@0.5: 0.928 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/cid/coco/cid_hrnet-w32_8xb20-140e_coco-512x512_42b7e6e6-20230207.pth -- Config: configs/body_2d_keypoint/cid/coco/cid_hrnet-w48_8xb20-140e_coco-512x512.py - In Collection: CID - Metadata: - Architecture: *id001 - Training Data: COCO - Name: cid_hrnet-w48_8xb20-140e_coco-512x512 - Results: - - Dataset: COCO - Metrics: - AP: 0.715 - AP@0.5: 0.9 - AP@0.75: 0.782 - AR: 0.765 - AR@0.5: 0.935 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/cid/coco/cid_hrnet-w48_8xb20-140e_coco-512x512_a36c3ecf-20230207.pth +Collections: +- Name: CID + Paper: + Title: Contextual Instance Decoupling for Robust Multi-Person Pose Estimation + URL: https://openaccess.thecvf.com/content/CVPR2022/html/Wang_Contextual_Instance_Decoupling_for_Robust_Multi-Person_Pose_Estimation_CVPR_2022_paper.html + README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/algorithms/cid.md +Models: +- Config: configs/body_2d_keypoint/cid/coco/cid_hrnet-w32_8xb20-140e_coco-512x512.py + In Collection: CID + Metadata: + Architecture: &id001 + - CID + - HRNet + Training Data: COCO + Name: cid_hrnet-w32_8xb20-140e_coco-512x512 + Results: + - Dataset: COCO + Metrics: + AP: 0.704 + AP@0.5: 0.894 + AP@0.75: 0.775 + AR: 0.753 + AR@0.5: 0.928 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/cid/coco/cid_hrnet-w32_8xb20-140e_coco-512x512_42b7e6e6-20230207.pth +- Config: configs/body_2d_keypoint/cid/coco/cid_hrnet-w48_8xb20-140e_coco-512x512.py + In Collection: CID + Metadata: + Architecture: *id001 + Training Data: COCO + Name: cid_hrnet-w48_8xb20-140e_coco-512x512 + Results: + - Dataset: COCO + Metrics: + AP: 0.715 + AP@0.5: 0.9 + AP@0.75: 0.782 + AR: 0.765 + AR@0.5: 0.935 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/cid/coco/cid_hrnet-w48_8xb20-140e_coco-512x512_a36c3ecf-20230207.pth diff --git a/configs/body_2d_keypoint/dekr/README.md b/configs/body_2d_keypoint/dekr/README.md index 04726421c0..e30a9e91dd 100644 --- a/configs/body_2d_keypoint/dekr/README.md +++ b/configs/body_2d_keypoint/dekr/README.md @@ -1,22 +1,22 @@ -# Bottom-up Human Pose Estimation via Disentangled Keypoint Regression (DEKR) - - - -
-DEKR (CVPR'2021) - -```bibtex -@inproceedings{geng2021bottom, - title={Bottom-up human pose estimation via disentangled keypoint regression}, - author={Geng, Zigang and Sun, Ke and Xiao, Bin and Zhang, Zhaoxiang and Wang, Jingdong}, - booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, - pages={14676--14686}, - year={2021} -} -``` - -
- -DEKR is a popular 2D bottom-up pose estimation approach that simultaneously detects all the instances and regresses the offsets from the instance centers to joints. - -In order to predict the offsets more accurately, the offsets of different joints are regressed using separated branches with deformable convolutional layers. Thus convolution kernels with different shapes are adopted to extract features for the corresponding joint. +# Bottom-up Human Pose Estimation via Disentangled Keypoint Regression (DEKR) + + + +
+DEKR (CVPR'2021) + +```bibtex +@inproceedings{geng2021bottom, + title={Bottom-up human pose estimation via disentangled keypoint regression}, + author={Geng, Zigang and Sun, Ke and Xiao, Bin and Zhang, Zhaoxiang and Wang, Jingdong}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={14676--14686}, + year={2021} +} +``` + +
+ +DEKR is a popular 2D bottom-up pose estimation approach that simultaneously detects all the instances and regresses the offsets from the instance centers to joints. + +In order to predict the offsets more accurately, the offsets of different joints are regressed using separated branches with deformable convolutional layers. Thus convolution kernels with different shapes are adopted to extract features for the corresponding joint. diff --git a/configs/body_2d_keypoint/dekr/coco/dekr_hrnet-w32_8xb10-140e_coco-512x512.py b/configs/body_2d_keypoint/dekr/coco/dekr_hrnet-w32_8xb10-140e_coco-512x512.py index 6f2d03a82f..37a49f5115 100644 --- a/configs/body_2d_keypoint/dekr/coco/dekr_hrnet-w32_8xb10-140e_coco-512x512.py +++ b/configs/body_2d_keypoint/dekr/coco/dekr_hrnet-w32_8xb10-140e_coco-512x512.py @@ -1,186 +1,186 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=140, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=1e-3, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=140, - milestones=[90, 120], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=80) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='SPR', - input_size=(512, 512), - heatmap_size=(128, 128), - sigma=(4, 2), - minimal_diagonal_length=32**0.5, - generate_keypoint_heatmaps=True, - decode_max_instances=30) - -# model settings -model = dict( - type='BottomupPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(32, 64)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(32, 64, 128)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(32, 64, 128, 256), - multiscale_output=True)), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/hrnet_w32-36af842e.pth'), - ), - neck=dict( - type='FeatureMapProcessor', - concat=True, - ), - head=dict( - type='DEKRHead', - in_channels=480, - num_keypoints=17, - heatmap_loss=dict(type='KeypointMSELoss', use_target_weight=True), - displacement_loss=dict( - type='SoftWeightSmoothL1Loss', - use_target_weight=True, - supervise_empty=False, - beta=1 / 9, - loss_weight=0.002, - ), - decoder=codec, - rescore_cfg=dict( - in_channels=74, - norm_indexes=(5, 6), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/kpt_rescore_coco-33d58c5c.pth')), - ), - test_cfg=dict( - multiscale_test=False, - flip_test=True, - nms_dist_thr=0.05, - shift_heatmap=True, - align_corners=False)) - -# enable DDP training when rescore net is used -find_unused_parameters = True - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'bottomup' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='BottomupRandomAffine', input_size=codec['input_size']), - dict(type='RandomFlip', direction='horizontal'), - dict(type='GenerateTarget', encoder=codec), - dict(type='BottomupGetHeatmapMask'), - dict(type='PackPoseInputs'), -] -val_pipeline = [ - dict(type='LoadImage'), - dict( - type='BottomupResize', - input_size=codec['input_size'], - size_factor=32, - resize_mode='expand'), - dict( - type='PackPoseInputs', - meta_keys=('id', 'img_id', 'img_path', 'crowd_index', 'ori_shape', - 'img_shape', 'input_size', 'input_center', 'input_scale', - 'flip', 'flip_direction', 'flip_indices', 'raw_ann_info', - 'skeleton_links')) -] - -# data loaders -train_dataloader = dict( - batch_size=10, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=1, - num_workers=1, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json', - nms_mode='none', - score_mode='keypoint', -) -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=140, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=1e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=140, + milestones=[90, 120], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=80) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='SPR', + input_size=(512, 512), + heatmap_size=(128, 128), + sigma=(4, 2), + minimal_diagonal_length=32**0.5, + generate_keypoint_heatmaps=True, + decode_max_instances=30) + +# model settings +model = dict( + type='BottomupPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256), + multiscale_output=True)), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + neck=dict( + type='FeatureMapProcessor', + concat=True, + ), + head=dict( + type='DEKRHead', + in_channels=480, + num_keypoints=17, + heatmap_loss=dict(type='KeypointMSELoss', use_target_weight=True), + displacement_loss=dict( + type='SoftWeightSmoothL1Loss', + use_target_weight=True, + supervise_empty=False, + beta=1 / 9, + loss_weight=0.002, + ), + decoder=codec, + rescore_cfg=dict( + in_channels=74, + norm_indexes=(5, 6), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/kpt_rescore_coco-33d58c5c.pth')), + ), + test_cfg=dict( + multiscale_test=False, + flip_test=True, + nms_dist_thr=0.05, + shift_heatmap=True, + align_corners=False)) + +# enable DDP training when rescore net is used +find_unused_parameters = True + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'bottomup' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='BottomupRandomAffine', input_size=codec['input_size']), + dict(type='RandomFlip', direction='horizontal'), + dict(type='GenerateTarget', encoder=codec), + dict(type='BottomupGetHeatmapMask'), + dict(type='PackPoseInputs'), +] +val_pipeline = [ + dict(type='LoadImage'), + dict( + type='BottomupResize', + input_size=codec['input_size'], + size_factor=32, + resize_mode='expand'), + dict( + type='PackPoseInputs', + meta_keys=('id', 'img_id', 'img_path', 'crowd_index', 'ori_shape', + 'img_shape', 'input_size', 'input_center', 'input_scale', + 'flip', 'flip_direction', 'flip_indices', 'raw_ann_info', + 'skeleton_links')) +] + +# data loaders +train_dataloader = dict( + batch_size=10, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json', + nms_mode='none', + score_mode='keypoint', +) +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/dekr/coco/dekr_hrnet-w48_8xb10-140e_coco-640x640.py b/configs/body_2d_keypoint/dekr/coco/dekr_hrnet-w48_8xb10-140e_coco-640x640.py index 776a6bb039..1594dabf06 100644 --- a/configs/body_2d_keypoint/dekr/coco/dekr_hrnet-w48_8xb10-140e_coco-640x640.py +++ b/configs/body_2d_keypoint/dekr/coco/dekr_hrnet-w48_8xb10-140e_coco-640x640.py @@ -1,187 +1,187 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=140, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=1e-3, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=140, - milestones=[90, 120], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=80) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='SPR', - input_size=(640, 640), - heatmap_size=(160, 160), - sigma=(4, 2), - minimal_diagonal_length=32**0.5, - generate_keypoint_heatmaps=True, - decode_max_instances=30) - -# model settings -model = dict( - type='BottomupPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(48, 96)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(48, 96, 192)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(48, 96, 192, 384), - multiscale_output=True)), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/hrnet_w48-8ef0771d.pth'), - ), - neck=dict( - type='FeatureMapProcessor', - concat=True, - ), - head=dict( - type='DEKRHead', - in_channels=720, - num_keypoints=17, - num_heatmap_filters=48, - heatmap_loss=dict(type='KeypointMSELoss', use_target_weight=True), - displacement_loss=dict( - type='SoftWeightSmoothL1Loss', - use_target_weight=True, - supervise_empty=False, - beta=1 / 9, - loss_weight=0.002, - ), - decoder=codec, - rescore_cfg=dict( - in_channels=74, - norm_indexes=(5, 6), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/kpt_rescore_coco-33d58c5c.pth')), - ), - test_cfg=dict( - multiscale_test=False, - flip_test=True, - nms_dist_thr=0.05, - shift_heatmap=True, - align_corners=False)) - -# enable DDP training when rescore net is used -find_unused_parameters = True - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'bottomup' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='BottomupRandomAffine', input_size=codec['input_size']), - dict(type='RandomFlip', direction='horizontal'), - dict(type='GenerateTarget', encoder=codec), - dict(type='BottomupGetHeatmapMask'), - dict(type='PackPoseInputs'), -] -val_pipeline = [ - dict(type='LoadImage'), - dict( - type='BottomupResize', - input_size=codec['input_size'], - size_factor=32, - resize_mode='expand'), - dict( - type='PackPoseInputs', - meta_keys=('id', 'img_id', 'img_path', 'crowd_index', 'ori_shape', - 'img_shape', 'input_size', 'input_center', 'input_scale', - 'flip', 'flip_direction', 'flip_indices', 'raw_ann_info', - 'skeleton_links')) -] - -# data loaders -train_dataloader = dict( - batch_size=10, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=1, - num_workers=1, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json', - nms_mode='none', - score_mode='keypoint', -) -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=140, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=1e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=140, + milestones=[90, 120], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=80) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='SPR', + input_size=(640, 640), + heatmap_size=(160, 160), + sigma=(4, 2), + minimal_diagonal_length=32**0.5, + generate_keypoint_heatmaps=True, + decode_max_instances=30) + +# model settings +model = dict( + type='BottomupPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(48, 96)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(48, 96, 192)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(48, 96, 192, 384), + multiscale_output=True)), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w48-8ef0771d.pth'), + ), + neck=dict( + type='FeatureMapProcessor', + concat=True, + ), + head=dict( + type='DEKRHead', + in_channels=720, + num_keypoints=17, + num_heatmap_filters=48, + heatmap_loss=dict(type='KeypointMSELoss', use_target_weight=True), + displacement_loss=dict( + type='SoftWeightSmoothL1Loss', + use_target_weight=True, + supervise_empty=False, + beta=1 / 9, + loss_weight=0.002, + ), + decoder=codec, + rescore_cfg=dict( + in_channels=74, + norm_indexes=(5, 6), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/kpt_rescore_coco-33d58c5c.pth')), + ), + test_cfg=dict( + multiscale_test=False, + flip_test=True, + nms_dist_thr=0.05, + shift_heatmap=True, + align_corners=False)) + +# enable DDP training when rescore net is used +find_unused_parameters = True + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'bottomup' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='BottomupRandomAffine', input_size=codec['input_size']), + dict(type='RandomFlip', direction='horizontal'), + dict(type='GenerateTarget', encoder=codec), + dict(type='BottomupGetHeatmapMask'), + dict(type='PackPoseInputs'), +] +val_pipeline = [ + dict(type='LoadImage'), + dict( + type='BottomupResize', + input_size=codec['input_size'], + size_factor=32, + resize_mode='expand'), + dict( + type='PackPoseInputs', + meta_keys=('id', 'img_id', 'img_path', 'crowd_index', 'ori_shape', + 'img_shape', 'input_size', 'input_center', 'input_scale', + 'flip', 'flip_direction', 'flip_indices', 'raw_ann_info', + 'skeleton_links')) +] + +# data loaders +train_dataloader = dict( + batch_size=10, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json', + nms_mode='none', + score_mode='keypoint', +) +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/dekr/coco/hrnet_coco.md b/configs/body_2d_keypoint/dekr/coco/hrnet_coco.md index 648b9bc735..bb2e279a21 100644 --- a/configs/body_2d_keypoint/dekr/coco/hrnet_coco.md +++ b/configs/body_2d_keypoint/dekr/coco/hrnet_coco.md @@ -1,58 +1,58 @@ - - -
-DEKR (CVPR'2021) - -```bibtex -@inproceedings{geng2021bottom, - title={Bottom-up human pose estimation via disentangled keypoint regression}, - author={Geng, Zigang and Sun, Ke and Xiao, Bin and Zhang, Zhaoxiang and Wang, Jingdong}, - booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, - pages={14676--14686}, - year={2021} -} -``` - -
- - - -
-HRNet (CVPR'2019) - -```bibtex -@inproceedings{sun2019deep, - title={Deep high-resolution representation learning for human pose estimation}, - author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={5693--5703}, - year={2019} -} -``` - -
- - - -
-COCO (ECCV'2014) - -```bibtex -@inproceedings{lin2014microsoft, - title={Microsoft coco: Common objects in context}, - author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, - booktitle={European conference on computer vision}, - pages={740--755}, - year={2014}, - organization={Springer} -} -``` - -
- -Results on COCO val2017 without multi-scale test - -| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | -| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | -| [HRNet-w32](/configs/body_2d_keypoint/dekr/coco/dekr_hrnet-w32_8xb10-140e_coco-512x512.py) | 512x512 | 0.686 | 0.868 | 0.750 | 0.735 | 0.898 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/dekr/coco/dekr_hrnet-w32_8xb10-140e_coco-512x512_ac7c17bf-20221228.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/dekr/coco/dekr_hrnet-w32_8xb10-140e_coco-512x512_20221228.json) | -| [HRNet-w48](/configs/body_2d_keypoint/dekr/coco/dekr_hrnet-w48_8xb10-140e_coco-640x640.py) | 640x640 | 0.714 | 0.883 | 0.777 | 0.762 | 0.915 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/dekr/coco/dekr_hrnet-w48_8xb10-140e_coco-640x640_74796c32-20230124.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/dekr/coco/dekr_hrnet-w48_8xb10-140e_coco-640x640_20230124.json) | + + +
+DEKR (CVPR'2021) + +```bibtex +@inproceedings{geng2021bottom, + title={Bottom-up human pose estimation via disentangled keypoint regression}, + author={Geng, Zigang and Sun, Ke and Xiao, Bin and Zhang, Zhaoxiang and Wang, Jingdong}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={14676--14686}, + year={2021} +} +``` + +
+ + + +
+HRNet (CVPR'2019) + +```bibtex +@inproceedings{sun2019deep, + title={Deep high-resolution representation learning for human pose estimation}, + author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={5693--5703}, + year={2019} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 without multi-scale test + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [HRNet-w32](/configs/body_2d_keypoint/dekr/coco/dekr_hrnet-w32_8xb10-140e_coco-512x512.py) | 512x512 | 0.686 | 0.868 | 0.750 | 0.735 | 0.898 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/dekr/coco/dekr_hrnet-w32_8xb10-140e_coco-512x512_ac7c17bf-20221228.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/dekr/coco/dekr_hrnet-w32_8xb10-140e_coco-512x512_20221228.json) | +| [HRNet-w48](/configs/body_2d_keypoint/dekr/coco/dekr_hrnet-w48_8xb10-140e_coco-640x640.py) | 640x640 | 0.714 | 0.883 | 0.777 | 0.762 | 0.915 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/dekr/coco/dekr_hrnet-w48_8xb10-140e_coco-640x640_74796c32-20230124.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/dekr/coco/dekr_hrnet-w48_8xb10-140e_coco-640x640_20230124.json) | diff --git a/configs/body_2d_keypoint/dekr/coco/hrnet_coco.yml b/configs/body_2d_keypoint/dekr/coco/hrnet_coco.yml index 0246b0723b..f34a91d63a 100644 --- a/configs/body_2d_keypoint/dekr/coco/hrnet_coco.yml +++ b/configs/body_2d_keypoint/dekr/coco/hrnet_coco.yml @@ -1,41 +1,41 @@ -Collections: -- Name: DEKR - Paper: - Title: Bottom-up human pose estimation via disentangled keypoint regression - URL: https://arxiv.org/abs/2104.02300 - README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/algorithms/dekr.md -Models: -- Config: configs/body_2d_keypoint/dekr/coco/dekr_hrnet-w32_8xb10-140e_coco-512x512.py - In Collection: DEKR - Metadata: - Architecture: &id001 - - DEKR - - HRNet - Training Data: COCO - Name: dekr_hrnet-w32_8xb10-140e_coco-512x512 - Results: - - Dataset: COCO - Metrics: - AP: 0.686 - AP@0.5: 0.868 - AP@0.75: 0.750 - AR: 0.735 - AR@0.5: 0.898 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/dekr/coco/dekr_hrnet-w32_8xb10-140e_coco-512x512_ac7c17bf-20221228.pth -- Config: configs/body_2d_keypoint/dekr/coco/dekr_hrnet-w48_8xb10-140e_coco-640x640.py - In Collection: DEKR - Metadata: - Architecture: *id001 - Training Data: COCO - Name: dekr_hrnet-w48_8xb10-140e_coco-640x640 - Results: - - Dataset: COCO - Metrics: - AP: 0.714 - AP@0.5: 0.883 - AP@0.75: 0.777 - AR: 0.762 - AR@0.5: 0.915 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/dekr/coco/dekr_hrnet-w48_8xb10-140e_coco-640x640_74796c32-20230124.pth +Collections: +- Name: DEKR + Paper: + Title: Bottom-up human pose estimation via disentangled keypoint regression + URL: https://arxiv.org/abs/2104.02300 + README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/algorithms/dekr.md +Models: +- Config: configs/body_2d_keypoint/dekr/coco/dekr_hrnet-w32_8xb10-140e_coco-512x512.py + In Collection: DEKR + Metadata: + Architecture: &id001 + - DEKR + - HRNet + Training Data: COCO + Name: dekr_hrnet-w32_8xb10-140e_coco-512x512 + Results: + - Dataset: COCO + Metrics: + AP: 0.686 + AP@0.5: 0.868 + AP@0.75: 0.750 + AR: 0.735 + AR@0.5: 0.898 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/dekr/coco/dekr_hrnet-w32_8xb10-140e_coco-512x512_ac7c17bf-20221228.pth +- Config: configs/body_2d_keypoint/dekr/coco/dekr_hrnet-w48_8xb10-140e_coco-640x640.py + In Collection: DEKR + Metadata: + Architecture: *id001 + Training Data: COCO + Name: dekr_hrnet-w48_8xb10-140e_coco-640x640 + Results: + - Dataset: COCO + Metrics: + AP: 0.714 + AP@0.5: 0.883 + AP@0.75: 0.777 + AR: 0.762 + AR@0.5: 0.915 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/dekr/coco/dekr_hrnet-w48_8xb10-140e_coco-640x640_74796c32-20230124.pth diff --git a/configs/body_2d_keypoint/dekr/crowdpose/dekr_hrnet-w32_8xb10-300e_crowdpose-512x512.py b/configs/body_2d_keypoint/dekr/crowdpose/dekr_hrnet-w32_8xb10-300e_crowdpose-512x512.py index c00f0459de..545a174080 100644 --- a/configs/body_2d_keypoint/dekr/crowdpose/dekr_hrnet-w32_8xb10-300e_crowdpose-512x512.py +++ b/configs/body_2d_keypoint/dekr/crowdpose/dekr_hrnet-w32_8xb10-300e_crowdpose-512x512.py @@ -1,187 +1,187 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=300, val_interval=20) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=1e-3, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=300, - milestones=[200, 260], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=80) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='crowdpose/AP', rule='greater')) - -# codec settings -codec = dict( - type='SPR', - input_size=(512, 512), - heatmap_size=(128, 128), - sigma=(4, 2), - minimal_diagonal_length=32**0.5, - generate_keypoint_heatmaps=True, - decode_max_instances=30) - -# model settings -model = dict( - type='BottomupPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(32, 64)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(32, 64, 128)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(32, 64, 128, 256), - multiscale_output=True)), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/hrnet_w32-36af842e.pth'), - ), - neck=dict( - type='FeatureMapProcessor', - concat=True, - ), - head=dict( - type='DEKRHead', - in_channels=480, - num_keypoints=14, - heatmap_loss=dict(type='KeypointMSELoss', use_target_weight=True), - displacement_loss=dict( - type='SoftWeightSmoothL1Loss', - use_target_weight=True, - supervise_empty=False, - beta=1 / 9, - loss_weight=0.004, - ), - decoder=codec, - rescore_cfg=dict( - in_channels=59, - norm_indexes=(0, 1), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/kpt_rescore_crowdpose-300c7efe.pth')), - ), - test_cfg=dict( - multiscale_test=False, - flip_test=True, - nms_dist_thr=0.05, - shift_heatmap=True, - align_corners=False)) - -# enable DDP training when rescore net is used -find_unused_parameters = True - -# base dataset settings -dataset_type = 'CrowdPoseDataset' -data_mode = 'bottomup' -data_root = 'data/crowdpose/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='BottomupRandomAffine', input_size=codec['input_size']), - dict(type='RandomFlip', direction='horizontal'), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs'), -] -val_pipeline = [ - dict(type='LoadImage'), - dict( - type='BottomupResize', - input_size=codec['input_size'], - size_factor=32, - resize_mode='expand'), - dict( - type='PackPoseInputs', - meta_keys=('id', 'img_id', 'img_path', 'crowd_index', 'ori_shape', - 'img_shape', 'input_size', 'input_center', 'input_scale', - 'flip', 'flip_direction', 'flip_indices', 'raw_ann_info', - 'skeleton_links')) -] - -# data loaders -train_dataloader = dict( - batch_size=10, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mmpose_crowdpose_trainval.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=1, - num_workers=1, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mmpose_crowdpose_test.json', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/mmpose_crowdpose_test.json', - nms_mode='none', - score_mode='keypoint', - use_area=False, - iou_type='keypoints_crowd', - prefix='crowdpose') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=300, val_interval=20) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=1e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=300, + milestones=[200, 260], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=80) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='crowdpose/AP', rule='greater')) + +# codec settings +codec = dict( + type='SPR', + input_size=(512, 512), + heatmap_size=(128, 128), + sigma=(4, 2), + minimal_diagonal_length=32**0.5, + generate_keypoint_heatmaps=True, + decode_max_instances=30) + +# model settings +model = dict( + type='BottomupPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256), + multiscale_output=True)), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + neck=dict( + type='FeatureMapProcessor', + concat=True, + ), + head=dict( + type='DEKRHead', + in_channels=480, + num_keypoints=14, + heatmap_loss=dict(type='KeypointMSELoss', use_target_weight=True), + displacement_loss=dict( + type='SoftWeightSmoothL1Loss', + use_target_weight=True, + supervise_empty=False, + beta=1 / 9, + loss_weight=0.004, + ), + decoder=codec, + rescore_cfg=dict( + in_channels=59, + norm_indexes=(0, 1), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/kpt_rescore_crowdpose-300c7efe.pth')), + ), + test_cfg=dict( + multiscale_test=False, + flip_test=True, + nms_dist_thr=0.05, + shift_heatmap=True, + align_corners=False)) + +# enable DDP training when rescore net is used +find_unused_parameters = True + +# base dataset settings +dataset_type = 'CrowdPoseDataset' +data_mode = 'bottomup' +data_root = 'data/crowdpose/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='BottomupRandomAffine', input_size=codec['input_size']), + dict(type='RandomFlip', direction='horizontal'), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs'), +] +val_pipeline = [ + dict(type='LoadImage'), + dict( + type='BottomupResize', + input_size=codec['input_size'], + size_factor=32, + resize_mode='expand'), + dict( + type='PackPoseInputs', + meta_keys=('id', 'img_id', 'img_path', 'crowd_index', 'ori_shape', + 'img_shape', 'input_size', 'input_center', 'input_scale', + 'flip', 'flip_direction', 'flip_indices', 'raw_ann_info', + 'skeleton_links')) +] + +# data loaders +train_dataloader = dict( + batch_size=10, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mmpose_crowdpose_test.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/mmpose_crowdpose_test.json', + nms_mode='none', + score_mode='keypoint', + use_area=False, + iou_type='keypoints_crowd', + prefix='crowdpose') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/dekr/crowdpose/dekr_hrnet-w48_8xb5-300e_crowdpose-640x640.py b/configs/body_2d_keypoint/dekr/crowdpose/dekr_hrnet-w48_8xb5-300e_crowdpose-640x640.py index 31d637299a..0952478f62 100644 --- a/configs/body_2d_keypoint/dekr/crowdpose/dekr_hrnet-w48_8xb5-300e_crowdpose-640x640.py +++ b/configs/body_2d_keypoint/dekr/crowdpose/dekr_hrnet-w48_8xb5-300e_crowdpose-640x640.py @@ -1,188 +1,188 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=300, val_interval=20) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=1e-3, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=300, - milestones=[200, 260], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=40) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='crowdpose/AP', rule='greater')) - -# codec settings -codec = dict( - type='SPR', - input_size=(640, 640), - heatmap_size=(160, 160), - sigma=(4, 2), - minimal_diagonal_length=32**0.5, - generate_keypoint_heatmaps=True, - decode_max_instances=30) - -# model settings -model = dict( - type='BottomupPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(48, 96)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(48, 96, 192)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(48, 96, 192, 384), - multiscale_output=True)), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/hrnet_w48-8ef0771d.pth'), - ), - neck=dict( - type='FeatureMapProcessor', - concat=True, - ), - head=dict( - type='DEKRHead', - in_channels=720, - num_keypoints=14, - num_heatmap_filters=48, - heatmap_loss=dict(type='KeypointMSELoss', use_target_weight=True), - displacement_loss=dict( - type='SoftWeightSmoothL1Loss', - use_target_weight=True, - supervise_empty=False, - beta=1 / 9, - loss_weight=0.004, - ), - decoder=codec, - rescore_cfg=dict( - in_channels=59, - norm_indexes=(0, 1), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/kpt_rescore_crowdpose-300c7efe.pth')), - ), - test_cfg=dict( - multiscale_test=False, - flip_test=True, - nms_dist_thr=0.05, - shift_heatmap=True, - align_corners=False)) - -# enable DDP training when rescore net is used -find_unused_parameters = True - -# base dataset settings -dataset_type = 'CrowdPoseDataset' -data_mode = 'bottomup' -data_root = 'data/crowdpose/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='BottomupRandomAffine', input_size=codec['input_size']), - dict(type='RandomFlip', direction='horizontal'), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs'), -] -val_pipeline = [ - dict(type='LoadImage'), - dict( - type='BottomupResize', - input_size=codec['input_size'], - size_factor=32, - resize_mode='expand'), - dict( - type='PackPoseInputs', - meta_keys=('id', 'img_id', 'img_path', 'crowd_index', 'ori_shape', - 'img_shape', 'input_size', 'input_center', 'input_scale', - 'flip', 'flip_direction', 'flip_indices', 'raw_ann_info', - 'skeleton_links')) -] - -# data loaders -train_dataloader = dict( - batch_size=5, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mmpose_crowdpose_trainval.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=1, - num_workers=1, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mmpose_crowdpose_test.json', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/mmpose_crowdpose_test.json', - nms_mode='none', - score_mode='keypoint', - use_area=False, - iou_type='keypoints_crowd', - prefix='crowdpose') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=300, val_interval=20) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=1e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=300, + milestones=[200, 260], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=40) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='crowdpose/AP', rule='greater')) + +# codec settings +codec = dict( + type='SPR', + input_size=(640, 640), + heatmap_size=(160, 160), + sigma=(4, 2), + minimal_diagonal_length=32**0.5, + generate_keypoint_heatmaps=True, + decode_max_instances=30) + +# model settings +model = dict( + type='BottomupPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(48, 96)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(48, 96, 192)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(48, 96, 192, 384), + multiscale_output=True)), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w48-8ef0771d.pth'), + ), + neck=dict( + type='FeatureMapProcessor', + concat=True, + ), + head=dict( + type='DEKRHead', + in_channels=720, + num_keypoints=14, + num_heatmap_filters=48, + heatmap_loss=dict(type='KeypointMSELoss', use_target_weight=True), + displacement_loss=dict( + type='SoftWeightSmoothL1Loss', + use_target_weight=True, + supervise_empty=False, + beta=1 / 9, + loss_weight=0.004, + ), + decoder=codec, + rescore_cfg=dict( + in_channels=59, + norm_indexes=(0, 1), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/kpt_rescore_crowdpose-300c7efe.pth')), + ), + test_cfg=dict( + multiscale_test=False, + flip_test=True, + nms_dist_thr=0.05, + shift_heatmap=True, + align_corners=False)) + +# enable DDP training when rescore net is used +find_unused_parameters = True + +# base dataset settings +dataset_type = 'CrowdPoseDataset' +data_mode = 'bottomup' +data_root = 'data/crowdpose/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='BottomupRandomAffine', input_size=codec['input_size']), + dict(type='RandomFlip', direction='horizontal'), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs'), +] +val_pipeline = [ + dict(type='LoadImage'), + dict( + type='BottomupResize', + input_size=codec['input_size'], + size_factor=32, + resize_mode='expand'), + dict( + type='PackPoseInputs', + meta_keys=('id', 'img_id', 'img_path', 'crowd_index', 'ori_shape', + 'img_shape', 'input_size', 'input_center', 'input_scale', + 'flip', 'flip_direction', 'flip_indices', 'raw_ann_info', + 'skeleton_links')) +] + +# data loaders +train_dataloader = dict( + batch_size=5, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mmpose_crowdpose_test.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/mmpose_crowdpose_test.json', + nms_mode='none', + score_mode='keypoint', + use_area=False, + iou_type='keypoints_crowd', + prefix='crowdpose') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/dekr/crowdpose/hrnet_crowdpose.md b/configs/body_2d_keypoint/dekr/crowdpose/hrnet_crowdpose.md index 0bbedbe696..9b1d251ba8 100644 --- a/configs/body_2d_keypoint/dekr/crowdpose/hrnet_crowdpose.md +++ b/configs/body_2d_keypoint/dekr/crowdpose/hrnet_crowdpose.md @@ -1,56 +1,56 @@ - - -
-DEKR (CVPR'2021) - -```bibtex -@inproceedings{geng2021bottom, - title={Bottom-up human pose estimation via disentangled keypoint regression}, - author={Geng, Zigang and Sun, Ke and Xiao, Bin and Zhang, Zhaoxiang and Wang, Jingdong}, - booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, - pages={14676--14686}, - year={2021} -} -``` - -
- - - -
-HRNet (CVPR'2019) - -```bibtex -@inproceedings{sun2019deep, - title={Deep high-resolution representation learning for human pose estimation}, - author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={5693--5703}, - year={2019} -} -``` - -
- - - -
-CrowdPose (CVPR'2019) - -```bibtex -@article{li2018crowdpose, - title={CrowdPose: Efficient Crowded Scenes Pose Estimation and A New Benchmark}, - author={Li, Jiefeng and Wang, Can and Zhu, Hao and Mao, Yihuan and Fang, Hao-Shu and Lu, Cewu}, - journal={arXiv preprint arXiv:1812.00324}, - year={2018} -} -``` - -
- -Results on CrowdPose test without multi-scale test - -| Arch | Input Size | AP | AP50 | AP75 | AP (E) | AP (M) | AP (H) | ckpt | log | -| :--------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :----: | :----: | :----: | :--------------------------------------------: | :-------------------------------------------: | -| [HRNet-w32](/configs/body_2d_keypoint/dekr/crowdpose/dekr_hrnet-w32_8xb10-300e_crowdpose-512x512.py) | 512x512 | 0.663 | 0.857 | 0.714 | 0.740 | 0.671 | 0.576 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/dekr/crowdpose/dekr_hrnet-w32_8xb10-300e_crowdpose-512x512_147bae97-20221228.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/dekr/crowdpose/dekr_hrnet-w32_8xb10-300e_crowdpose-512x512_20221228.json) | -| [HRNet-w48](/configs/body_2d_keypoint/dekr/crowdpose/dekr_hrnet-w48_8xb5-300e_crowdpose-640x640.py) | 640x640 | 0.679 | 0.869 | 0.731 | 0.753 | 0.688 | 0.593 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/dekr/crowdpose/dekr_hrnet-w48_8xb5-300e_crowdpose-640x640_4ea6031e-20230128.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/dekr/crowdpose/dekr_hrnet-w48_8xb5-300e_crowdpose-640x640_20230128.json) | + + +
+DEKR (CVPR'2021) + +```bibtex +@inproceedings{geng2021bottom, + title={Bottom-up human pose estimation via disentangled keypoint regression}, + author={Geng, Zigang and Sun, Ke and Xiao, Bin and Zhang, Zhaoxiang and Wang, Jingdong}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={14676--14686}, + year={2021} +} +``` + +
+ + + +
+HRNet (CVPR'2019) + +```bibtex +@inproceedings{sun2019deep, + title={Deep high-resolution representation learning for human pose estimation}, + author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={5693--5703}, + year={2019} +} +``` + +
+ + + +
+CrowdPose (CVPR'2019) + +```bibtex +@article{li2018crowdpose, + title={CrowdPose: Efficient Crowded Scenes Pose Estimation and A New Benchmark}, + author={Li, Jiefeng and Wang, Can and Zhu, Hao and Mao, Yihuan and Fang, Hao-Shu and Lu, Cewu}, + journal={arXiv preprint arXiv:1812.00324}, + year={2018} +} +``` + +
+ +Results on CrowdPose test without multi-scale test + +| Arch | Input Size | AP | AP50 | AP75 | AP (E) | AP (M) | AP (H) | ckpt | log | +| :--------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :----: | :----: | :----: | :--------------------------------------------: | :-------------------------------------------: | +| [HRNet-w32](/configs/body_2d_keypoint/dekr/crowdpose/dekr_hrnet-w32_8xb10-300e_crowdpose-512x512.py) | 512x512 | 0.663 | 0.857 | 0.714 | 0.740 | 0.671 | 0.576 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/dekr/crowdpose/dekr_hrnet-w32_8xb10-300e_crowdpose-512x512_147bae97-20221228.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/dekr/crowdpose/dekr_hrnet-w32_8xb10-300e_crowdpose-512x512_20221228.json) | +| [HRNet-w48](/configs/body_2d_keypoint/dekr/crowdpose/dekr_hrnet-w48_8xb5-300e_crowdpose-640x640.py) | 640x640 | 0.679 | 0.869 | 0.731 | 0.753 | 0.688 | 0.593 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/dekr/crowdpose/dekr_hrnet-w48_8xb5-300e_crowdpose-640x640_4ea6031e-20230128.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/dekr/crowdpose/dekr_hrnet-w48_8xb5-300e_crowdpose-640x640_20230128.json) | diff --git a/configs/body_2d_keypoint/dekr/crowdpose/hrnet_crowdpose.yml b/configs/body_2d_keypoint/dekr/crowdpose/hrnet_crowdpose.yml index 5bbb7f4b25..c65d5a9da6 100644 --- a/configs/body_2d_keypoint/dekr/crowdpose/hrnet_crowdpose.yml +++ b/configs/body_2d_keypoint/dekr/crowdpose/hrnet_crowdpose.yml @@ -1,37 +1,37 @@ -Models: -- Config: configs/body_2d_keypoint/dekr/crowdpose/dekr_hrnet-w32_8xb10-300e_crowdpose-512x512.py - In Collection: DEKR - Metadata: - Architecture: &id001 - - DEKR - - HRNet - Training Data: CrowdPose - Name: dekr_hrnet-w32_8xb10-300e_crowdpose-512x512 - Results: - - Dataset: CrowdPose - Metrics: - AP: 0.663 - AP@0.5: 0.857 - AP@0.75: 0.714 - AP (E): 0.74 - AP (M): 0.671 - AP (L): 0.576 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/dekr/crowdpose/dekr_hrnet-w32_8xb10-300e_crowdpose-512x512_147bae97-20221228.pth -- Config: configs/body_2d_keypoint/dekr/crowdpose/dekr_hrnet-w48_8xb5-300e_crowdpose-640x640.py - In Collection: DEKR - Metadata: - Architecture: *id001 - Training Data: CrowdPose - Name: dekr_hrnet-w48_8xb5-300e_crowdpose-640x640 - Results: - - Dataset: CrowdPose - Metrics: - AP: 0.679 - AP@0.5: 0.869 - AP@0.75: 0.731 - AP (E): 0.753 - AP (M): 0.688 - AP (L): 0.593 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/dekr/crowdpose/dekr_hrnet-w48_8xb5-300e_crowdpose-640x640_4ea6031e-20230128.pth +Models: +- Config: configs/body_2d_keypoint/dekr/crowdpose/dekr_hrnet-w32_8xb10-300e_crowdpose-512x512.py + In Collection: DEKR + Metadata: + Architecture: &id001 + - DEKR + - HRNet + Training Data: CrowdPose + Name: dekr_hrnet-w32_8xb10-300e_crowdpose-512x512 + Results: + - Dataset: CrowdPose + Metrics: + AP: 0.663 + AP@0.5: 0.857 + AP@0.75: 0.714 + AP (E): 0.74 + AP (M): 0.671 + AP (L): 0.576 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/dekr/crowdpose/dekr_hrnet-w32_8xb10-300e_crowdpose-512x512_147bae97-20221228.pth +- Config: configs/body_2d_keypoint/dekr/crowdpose/dekr_hrnet-w48_8xb5-300e_crowdpose-640x640.py + In Collection: DEKR + Metadata: + Architecture: *id001 + Training Data: CrowdPose + Name: dekr_hrnet-w48_8xb5-300e_crowdpose-640x640 + Results: + - Dataset: CrowdPose + Metrics: + AP: 0.679 + AP@0.5: 0.869 + AP@0.75: 0.731 + AP (E): 0.753 + AP (M): 0.688 + AP (L): 0.593 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/dekr/crowdpose/dekr_hrnet-w48_8xb5-300e_crowdpose-640x640_4ea6031e-20230128.pth diff --git a/configs/body_2d_keypoint/integral_regression/README.md b/configs/body_2d_keypoint/integral_regression/README.md index d60eaa1a57..967b98e101 100644 --- a/configs/body_2d_keypoint/integral_regression/README.md +++ b/configs/body_2d_keypoint/integral_regression/README.md @@ -1,15 +1,15 @@ -# Top-down integral-regression-based pose estimation - -Top-down methods divide the task into two stages: object detection, followed by single-object pose estimation given object bounding boxes. At the 2nd stage, integral regression based methods use a simple integral operation relates and unifies the heatmap and joint regression differentiably, thus obtain the keypoint coordinates given the features extracted from the bounding box area, following the paradigm introduced in [Integral Human Pose Regression](https://arxiv.org/abs/1711.08229). - -## Results and Models - -### COCO Dataset - -Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset - -| Model | Input Size | AP | AR | Details and Download | -| :------------------: | :--------: | :---: | :---: | :---------------------------------------------------: | -| ResNet-50+Debias-IPR | 256x256 | 0.675 | 0.765 | [resnet_debias_coco.md](./coco/resnet_debias_coco.md) | -| ResNet-50+DSNT | 256x256 | 0.674 | 0.764 | [resnet_dsnt_coco.md](./coco/resnet_dsnt_coco.md) | -| ResNet-50+IPR | 256x256 | 0.633 | 0.730 | [resnet_ipr_coco.md](./coco/resnet_ipr_coco.md) | +# Top-down integral-regression-based pose estimation + +Top-down methods divide the task into two stages: object detection, followed by single-object pose estimation given object bounding boxes. At the 2nd stage, integral regression based methods use a simple integral operation relates and unifies the heatmap and joint regression differentiably, thus obtain the keypoint coordinates given the features extracted from the bounding box area, following the paradigm introduced in [Integral Human Pose Regression](https://arxiv.org/abs/1711.08229). + +## Results and Models + +### COCO Dataset + +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Model | Input Size | AP | AR | Details and Download | +| :------------------: | :--------: | :---: | :---: | :---------------------------------------------------: | +| ResNet-50+Debias-IPR | 256x256 | 0.675 | 0.765 | [resnet_debias_coco.md](./coco/resnet_debias_coco.md) | +| ResNet-50+DSNT | 256x256 | 0.674 | 0.764 | [resnet_dsnt_coco.md](./coco/resnet_dsnt_coco.md) | +| ResNet-50+IPR | 256x256 | 0.633 | 0.730 | [resnet_ipr_coco.md](./coco/resnet_ipr_coco.md) | diff --git a/configs/body_2d_keypoint/integral_regression/coco/ipr_res50_8xb64-210e_coco-256x256.py b/configs/body_2d_keypoint/integral_regression/coco/ipr_res50_8xb64-210e_coco-256x256.py index 3dfaeeda8b..eb60eec15b 100644 --- a/configs/body_2d_keypoint/integral_regression/coco/ipr_res50_8xb64-210e_coco-256x256.py +++ b/configs/body_2d_keypoint/integral_regression/coco/ipr_res50_8xb64-210e_coco-256x256.py @@ -1,134 +1,134 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=train_cfg['max_epochs'], - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# codec settings -codec = dict( - type='IntegralRegressionLabel', - input_size=(256, 256), - heatmap_size=(64, 64), - sigma=2.0, - normalize=True) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=50, - ), - head=dict( - type='DSNTHead', - in_channels=2048, - in_featuremap_size=(8, 8), - num_joints=17, - loss=dict( - type='MultipleLossWrapper', - losses=[ - dict(type='SmoothL1Loss', use_target_weight=True), - dict(type='KeypointMSELoss', use_target_weight=True) - ]), - decoder=codec), - test_cfg=dict( - flip_test=True, - shift_coords=True, - shift_heatmap=True, - ), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/td-hm_res50_8xb64-210e_coco-256x192.pth')) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -test_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file=f'{data_root}person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=test_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=f'{data_root}annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=train_cfg['max_epochs'], + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='IntegralRegressionLabel', + input_size=(256, 256), + heatmap_size=(64, 64), + sigma=2.0, + normalize=True) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + ), + head=dict( + type='DSNTHead', + in_channels=2048, + in_featuremap_size=(8, 8), + num_joints=17, + loss=dict( + type='MultipleLossWrapper', + losses=[ + dict(type='SmoothL1Loss', use_target_weight=True), + dict(type='KeypointMSELoss', use_target_weight=True) + ]), + decoder=codec), + test_cfg=dict( + flip_test=True, + shift_coords=True, + shift_heatmap=True, + ), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/td-hm_res50_8xb64-210e_coco-256x192.pth')) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +test_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file=f'{data_root}person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=test_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=f'{data_root}annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/integral_regression/coco/ipr_res50_debias-8xb64-210e_coco-256x256.py b/configs/body_2d_keypoint/integral_regression/coco/ipr_res50_debias-8xb64-210e_coco-256x256.py index 9618c810ea..9a9cce5475 100644 --- a/configs/body_2d_keypoint/integral_regression/coco/ipr_res50_debias-8xb64-210e_coco-256x256.py +++ b/configs/body_2d_keypoint/integral_regression/coco/ipr_res50_debias-8xb64-210e_coco-256x256.py @@ -1,136 +1,136 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=train_cfg['max_epochs'], - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# codec settings -codec = dict( - type='IntegralRegressionLabel', - input_size=(256, 256), - heatmap_size=(64, 64), - sigma=2.0, - normalize=True) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=50, - ), - head=dict( - type='DSNTHead', - in_channels=2048, - in_featuremap_size=(8, 8), - num_joints=17, - debias=True, - beta=10., - loss=dict( - type='MultipleLossWrapper', - losses=[ - dict(type='SmoothL1Loss', use_target_weight=True), - dict(type='JSDiscretLoss', use_target_weight=True) - ]), - decoder=codec), - test_cfg=dict( - flip_test=True, - shift_coords=True, - shift_heatmap=True, - ), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/td-hm_res50_8xb64-210e_coco-256x192.pth')) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -test_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file=f'{data_root}person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=test_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=f'{data_root}annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=train_cfg['max_epochs'], + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='IntegralRegressionLabel', + input_size=(256, 256), + heatmap_size=(64, 64), + sigma=2.0, + normalize=True) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + ), + head=dict( + type='DSNTHead', + in_channels=2048, + in_featuremap_size=(8, 8), + num_joints=17, + debias=True, + beta=10., + loss=dict( + type='MultipleLossWrapper', + losses=[ + dict(type='SmoothL1Loss', use_target_weight=True), + dict(type='JSDiscretLoss', use_target_weight=True) + ]), + decoder=codec), + test_cfg=dict( + flip_test=True, + shift_coords=True, + shift_heatmap=True, + ), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/td-hm_res50_8xb64-210e_coco-256x192.pth')) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +test_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file=f'{data_root}person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=test_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=f'{data_root}annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/integral_regression/coco/ipr_res50_dsnt-8xb64-210e_coco-256x256.py b/configs/body_2d_keypoint/integral_regression/coco/ipr_res50_dsnt-8xb64-210e_coco-256x256.py index 8c3897fce1..7b262d904e 100644 --- a/configs/body_2d_keypoint/integral_regression/coco/ipr_res50_dsnt-8xb64-210e_coco-256x256.py +++ b/configs/body_2d_keypoint/integral_regression/coco/ipr_res50_dsnt-8xb64-210e_coco-256x256.py @@ -1,134 +1,134 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=train_cfg['max_epochs'], - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# codec settings -codec = dict( - type='IntegralRegressionLabel', - input_size=(256, 256), - heatmap_size=(64, 64), - sigma=2.0, - normalize=True) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=50, - ), - head=dict( - type='DSNTHead', - in_channels=2048, - in_featuremap_size=(8, 8), - num_joints=17, - loss=dict( - type='MultipleLossWrapper', - losses=[ - dict(type='SmoothL1Loss', use_target_weight=True), - dict(type='JSDiscretLoss', use_target_weight=True) - ]), - decoder=codec), - test_cfg=dict( - flip_test=True, - shift_coords=True, - shift_heatmap=True, - ), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/td-hm_res50_8xb64-210e_coco-256x192.pth')) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -test_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file=f'{data_root}person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=test_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=f'{data_root}annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=train_cfg['max_epochs'], + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='IntegralRegressionLabel', + input_size=(256, 256), + heatmap_size=(64, 64), + sigma=2.0, + normalize=True) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + ), + head=dict( + type='DSNTHead', + in_channels=2048, + in_featuremap_size=(8, 8), + num_joints=17, + loss=dict( + type='MultipleLossWrapper', + losses=[ + dict(type='SmoothL1Loss', use_target_weight=True), + dict(type='JSDiscretLoss', use_target_weight=True) + ]), + decoder=codec), + test_cfg=dict( + flip_test=True, + shift_coords=True, + shift_heatmap=True, + ), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/td-hm_res50_8xb64-210e_coco-256x192.pth')) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +test_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file=f'{data_root}person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=test_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=f'{data_root}annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/integral_regression/coco/resnet_debias_coco.md b/configs/body_2d_keypoint/integral_regression/coco/resnet_debias_coco.md index 40e3660e4f..406d2b6777 100644 --- a/configs/body_2d_keypoint/integral_regression/coco/resnet_debias_coco.md +++ b/configs/body_2d_keypoint/integral_regression/coco/resnet_debias_coco.md @@ -1,57 +1,57 @@ - - -
-Debias IPR (ICCV'2021) - -```bibtex -@inproceedings{gu2021removing, - title={Removing the Bias of Integral Pose Regression}, - author={Gu, Kerui and Yang, Linlin and Yao, Angela}, - booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision}, - pages={11067--11076}, - year={2021} - } -``` - -
- - - -
-ResNet (CVPR'2016) - -```bibtex -@inproceedings{he2016deep, - title={Deep residual learning for image recognition}, - author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={770--778}, - year={2016} -} -``` - -
- - - -
-COCO (ECCV'2014) - -```bibtex -@inproceedings{lin2014microsoft, - title={Microsoft coco: Common objects in context}, - author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, - booktitle={European conference on computer vision}, - pages={740--755}, - year={2014}, - organization={Springer} -} -``` - -
- -Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset - -| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | -| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | -| [debias-ipr_resnet_50](/configs/body_2d_keypoint/integral_regression/coco/ipr_res50_debias-8xb64-210e_coco-256x256.py) | 256x256 | 0.675 | 0.872 | 0.740 | 0.765 | 0.928 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/integral_regression/coco/ipr_res50_debias-8xb64-210e_coco-256x256-055a7699_20220913.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/integral_regression/coco/ipr_res50_debias-8xb64-210e_coco-256x256-055a7699_20220913.log.json) | + + +
+Debias IPR (ICCV'2021) + +```bibtex +@inproceedings{gu2021removing, + title={Removing the Bias of Integral Pose Regression}, + author={Gu, Kerui and Yang, Linlin and Yao, Angela}, + booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision}, + pages={11067--11076}, + year={2021} + } +``` + +
+ + + +
+ResNet (CVPR'2016) + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [debias-ipr_resnet_50](/configs/body_2d_keypoint/integral_regression/coco/ipr_res50_debias-8xb64-210e_coco-256x256.py) | 256x256 | 0.675 | 0.872 | 0.740 | 0.765 | 0.928 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/integral_regression/coco/ipr_res50_debias-8xb64-210e_coco-256x256-055a7699_20220913.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/integral_regression/coco/ipr_res50_debias-8xb64-210e_coco-256x256-055a7699_20220913.log.json) | diff --git a/configs/body_2d_keypoint/integral_regression/coco/resnet_debias_coco.yml b/configs/body_2d_keypoint/integral_regression/coco/resnet_debias_coco.yml index b965238a5d..155cdbf998 100644 --- a/configs/body_2d_keypoint/integral_regression/coco/resnet_debias_coco.yml +++ b/configs/body_2d_keypoint/integral_regression/coco/resnet_debias_coco.yml @@ -1,25 +1,25 @@ -Collections: -- Name: DebiasIPR - Paper: - Title: Removing the Bias of Integral Pose Regression - URL: https://openaccess.thecvf.com/content/ICCV2021/papers/Gu_Removing_the_Bias_of_Integral_Pose_Regression_ICCV_2021_paper.pdf - README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/algorithms/debias_ipr.md -Models: -- Config: configs/body_2d_keypoint/integral_regression/coco/ipr_res50_debias--8xb64-210e_coco-256x256.py - In Collection: DebiasIPR - Metadata: - Architecture: &id001 - - Debias - - ResNet - Training Data: COCO - Name: ipr_res50_debias--8xb64-210e_coco-256x256 - Results: - - Dataset: COCO - Metrics: - AP: 0.675 - AP@0.5: 0.872 - AP@0.75: 0.74 - AR: 0.765 - AR@0.5: 0.928 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/integral_regression/coco/ipr_res50_debias-8xb64-210e_coco-256x256-055a7699_20220913.pth +Collections: +- Name: DebiasIPR + Paper: + Title: Removing the Bias of Integral Pose Regression + URL: https://openaccess.thecvf.com/content/ICCV2021/papers/Gu_Removing_the_Bias_of_Integral_Pose_Regression_ICCV_2021_paper.pdf + README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/algorithms/debias_ipr.md +Models: +- Config: configs/body_2d_keypoint/integral_regression/coco/ipr_res50_debias--8xb64-210e_coco-256x256.py + In Collection: DebiasIPR + Metadata: + Architecture: &id001 + - Debias + - ResNet + Training Data: COCO + Name: ipr_res50_debias--8xb64-210e_coco-256x256 + Results: + - Dataset: COCO + Metrics: + AP: 0.675 + AP@0.5: 0.872 + AP@0.75: 0.74 + AR: 0.765 + AR@0.5: 0.928 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/integral_regression/coco/ipr_res50_debias-8xb64-210e_coco-256x256-055a7699_20220913.pth diff --git a/configs/body_2d_keypoint/integral_regression/coco/resnet_dsnt_coco.md b/configs/body_2d_keypoint/integral_regression/coco/resnet_dsnt_coco.md index 608974ae82..d59266b5c7 100644 --- a/configs/body_2d_keypoint/integral_regression/coco/resnet_dsnt_coco.md +++ b/configs/body_2d_keypoint/integral_regression/coco/resnet_dsnt_coco.md @@ -1,56 +1,56 @@ - - -
-DSNT (2018) - -```bibtex -@article{nibali2018numerical, - title={Numerical Coordinate Regression with Convolutional Neural Networks}, - author={Nibali, Aiden and He, Zhen and Morgan, Stuart and Prendergast, Luke}, - journal={arXiv preprint arXiv:1801.07372}, - year={2018} -} -``` - -
- - - -
-ResNet (CVPR'2016) - -```bibtex -@inproceedings{he2016deep, - title={Deep residual learning for image recognition}, - author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={770--778}, - year={2016} -} -``` - -
- - - -
-COCO (ECCV'2014) - -```bibtex -@inproceedings{lin2014microsoft, - title={Microsoft coco: Common objects in context}, - author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, - booktitle={European conference on computer vision}, - pages={740--755}, - year={2014}, - organization={Springer} -} -``` - -
- -Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset - -| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | -| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | -| [ipr_resnet_50_dsnt](/configs/body_2d_keypoint/integral_regression/coco/ipr_res50_dsnt-8xb64-210e_coco-256x256.py) | 256x256 | 0.674 | 0.870 | 0.744 | 0.764 | 0.928 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/integral_regression/coco/ipr_res50_dsnt-8xb64-210e_coco-256x256-441eedc0_20220913.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/integral_regression/coco/ipr_res50_dsnt-8xb64-210e_coco-256x256-441eedc0_20220913.log.json) | + + +
+DSNT (2018) + +```bibtex +@article{nibali2018numerical, + title={Numerical Coordinate Regression with Convolutional Neural Networks}, + author={Nibali, Aiden and He, Zhen and Morgan, Stuart and Prendergast, Luke}, + journal={arXiv preprint arXiv:1801.07372}, + year={2018} +} +``` + +
+ + + +
+ResNet (CVPR'2016) + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [ipr_resnet_50_dsnt](/configs/body_2d_keypoint/integral_regression/coco/ipr_res50_dsnt-8xb64-210e_coco-256x256.py) | 256x256 | 0.674 | 0.870 | 0.744 | 0.764 | 0.928 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/integral_regression/coco/ipr_res50_dsnt-8xb64-210e_coco-256x256-441eedc0_20220913.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/integral_regression/coco/ipr_res50_dsnt-8xb64-210e_coco-256x256-441eedc0_20220913.log.json) | diff --git a/configs/body_2d_keypoint/integral_regression/coco/resnet_dsnt_coco.yml b/configs/body_2d_keypoint/integral_regression/coco/resnet_dsnt_coco.yml index f34e839c10..fa772e8d69 100644 --- a/configs/body_2d_keypoint/integral_regression/coco/resnet_dsnt_coco.yml +++ b/configs/body_2d_keypoint/integral_regression/coco/resnet_dsnt_coco.yml @@ -1,25 +1,25 @@ -Collections: -- Name: DSNT - Paper: - Title: Numerical Coordinate Regression with Convolutional Neural Networks - URL: https://arxiv.org/abs/1801.07372v2 - README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/algorithms/dsnt.md -Models: -- Config: configs/body_2d_keypoint/integral_regression/coco/ipr_res50_dsnt-8xb64-210e_coco-256x256.py - In Collection: DSNT - Metadata: - Architecture: &id001 - - DSNT - - ResNet - Training Data: COCO - Name: ipr_res50_dsnt-8xb64-210e_coco-256x256 - Results: - - Dataset: COCO - Metrics: - AP: 0.674 - AP@0.5: 0.87 - AP@0.75: 0.744 - AR: 0.764 - AR@0.5: 0.928 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/integral_regression/coco/ipr_res50_dsnt-8xb64-210e_coco-256x256-441eedc0_20220913.pth +Collections: +- Name: DSNT + Paper: + Title: Numerical Coordinate Regression with Convolutional Neural Networks + URL: https://arxiv.org/abs/1801.07372v2 + README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/algorithms/dsnt.md +Models: +- Config: configs/body_2d_keypoint/integral_regression/coco/ipr_res50_dsnt-8xb64-210e_coco-256x256.py + In Collection: DSNT + Metadata: + Architecture: &id001 + - DSNT + - ResNet + Training Data: COCO + Name: ipr_res50_dsnt-8xb64-210e_coco-256x256 + Results: + - Dataset: COCO + Metrics: + AP: 0.674 + AP@0.5: 0.87 + AP@0.75: 0.744 + AR: 0.764 + AR@0.5: 0.928 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/integral_regression/coco/ipr_res50_dsnt-8xb64-210e_coco-256x256-441eedc0_20220913.pth diff --git a/configs/body_2d_keypoint/integral_regression/coco/resnet_ipr_coco.md b/configs/body_2d_keypoint/integral_regression/coco/resnet_ipr_coco.md index ce4fbae501..d51e5dfb43 100644 --- a/configs/body_2d_keypoint/integral_regression/coco/resnet_ipr_coco.md +++ b/configs/body_2d_keypoint/integral_regression/coco/resnet_ipr_coco.md @@ -1,57 +1,57 @@ - - -
-IPR (ECCV'2018) - -```bibtex -@inproceedings{sun2018integral, - title={Integral human pose regression}, - author={Sun, Xiao and Xiao, Bin and Wei, Fangyin and Liang, Shuang and Wei, Yichen}, - booktitle={Proceedings of the European conference on computer vision (ECCV)}, - pages={529--545}, - year={2018} -} -``` - -
- - - -
-ResNet (CVPR'2016) - -```bibtex -@inproceedings{he2016deep, - title={Deep residual learning for image recognition}, - author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={770--778}, - year={2016} -} -``` - -
- - - -
-COCO (ECCV'2014) - -```bibtex -@inproceedings{lin2014microsoft, - title={Microsoft coco: Common objects in context}, - author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, - booktitle={European conference on computer vision}, - pages={740--755}, - year={2014}, - organization={Springer} -} -``` - -
- -Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset - -| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | -| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | -| [ipr_resnet_50](/configs/body_2d_keypoint/integral_regression/coco/ipr_res50_8xb64-210e_coco-256x256.py) | 256x256 | 0.633 | 0.860 | 0.703 | 0.730 | 0.919 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/integral_regression/coco/ipr_res50_8xb64-210e_coco-256x256-a3898a33_20220913.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/integral_regression/coco/ipr_res50_8xb64-210e_coco-256x256-a3898a33_20220913.log.json) | + + +
+IPR (ECCV'2018) + +```bibtex +@inproceedings{sun2018integral, + title={Integral human pose regression}, + author={Sun, Xiao and Xiao, Bin and Wei, Fangyin and Liang, Shuang and Wei, Yichen}, + booktitle={Proceedings of the European conference on computer vision (ECCV)}, + pages={529--545}, + year={2018} +} +``` + +
+ + + +
+ResNet (CVPR'2016) + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [ipr_resnet_50](/configs/body_2d_keypoint/integral_regression/coco/ipr_res50_8xb64-210e_coco-256x256.py) | 256x256 | 0.633 | 0.860 | 0.703 | 0.730 | 0.919 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/integral_regression/coco/ipr_res50_8xb64-210e_coco-256x256-a3898a33_20220913.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/integral_regression/coco/ipr_res50_8xb64-210e_coco-256x256-a3898a33_20220913.log.json) | diff --git a/configs/body_2d_keypoint/integral_regression/coco/resnet_ipr_coco.yml b/configs/body_2d_keypoint/integral_regression/coco/resnet_ipr_coco.yml index fa22133f3e..d40d19098e 100644 --- a/configs/body_2d_keypoint/integral_regression/coco/resnet_ipr_coco.yml +++ b/configs/body_2d_keypoint/integral_regression/coco/resnet_ipr_coco.yml @@ -1,25 +1,25 @@ -Collections: -- Name: IPR - Paper: - Title: Integral human pose regression - URL: https://arxiv.org/abs/1711.08229 - README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/algorithms/ipr.md -Models: -- Config: configs/body_2d_keypoint/integral_regression/coco/ipr_res50_8xb64-210e_coco-256x256.py - In Collection: IPR - Metadata: - Architecture: &id001 - - IPR - - ResNet - Training Data: COCO - Name: ipr_res50_8xb64-210e_coco-256x256 - Results: - - Dataset: COCO - Metrics: - AP: 0.633 - AP@0.5: 0.86 - AP@0.75: 0.703 - AR: 0.73 - AR@0.5: 0.919 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/integral_regression/coco/ipr_res50_8xb64-210e_coco-256x256-a3898a33_20220913.pth +Collections: +- Name: IPR + Paper: + Title: Integral human pose regression + URL: https://arxiv.org/abs/1711.08229 + README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/algorithms/ipr.md +Models: +- Config: configs/body_2d_keypoint/integral_regression/coco/ipr_res50_8xb64-210e_coco-256x256.py + In Collection: IPR + Metadata: + Architecture: &id001 + - IPR + - ResNet + Training Data: COCO + Name: ipr_res50_8xb64-210e_coco-256x256 + Results: + - Dataset: COCO + Metrics: + AP: 0.633 + AP@0.5: 0.86 + AP@0.75: 0.703 + AR: 0.73 + AR@0.5: 0.919 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/integral_regression/coco/ipr_res50_8xb64-210e_coco-256x256-a3898a33_20220913.pth diff --git a/configs/body_2d_keypoint/rtmpose/README.md b/configs/body_2d_keypoint/rtmpose/README.md index 38fd938376..19e4c680bd 100644 --- a/configs/body_2d_keypoint/rtmpose/README.md +++ b/configs/body_2d_keypoint/rtmpose/README.md @@ -1,57 +1,57 @@ -# RTMPose - -Recent studies on 2D pose estimation have achieved excellent performance on public benchmarks, yet its application in the industrial community still suffers from heavy model parameters and high latency. -In order to bridge this gap, we empirically study five aspects that affect the performance of multi-person pose estimation algorithms: paradigm, backbone network, localization algorithm, training strategy, and deployment inference, and present a high-performance real-time multi-person pose estimation framework, **RTMPose**, based on MMPose. -Our RTMPose-m achieves **75.8% AP** on COCO with **90+ FPS** on an Intel i7-11700 CPU and **430+ FPS** on an NVIDIA GTX 1660 Ti GPU, and RTMPose-l achieves **67.0% AP** on COCO-WholeBody with **130+ FPS**, outperforming existing open-source libraries. -To further evaluate RTMPose's capability in critical real-time applications, we also report the performance after deploying on the mobile device. - -## Results and Models - -### COCO Dataset - -Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset - -| Model | Input Size | AP | AR | Details and Download | -| :----------------: | :--------: | :---: | :---: | :---------------------------------------: | -| RTMPose-t | 256x192 | 0.682 | 0.736 | [rtmpose_coco.md](./coco/rtmpose_coco.md) | -| RTMPose-s | 256x192 | 0.716 | 0.768 | [rtmpose_coco.md](./coco/rtmpose_coco.md) | -| RTMPose-m | 256x192 | 0.746 | 0.795 | [rtmpose_coco.md](./coco/rtmpose_coco.md) | -| RTMPose-l | 256x192 | 0.758 | 0.806 | [rtmpose_coco.md](./coco/rtmpose_coco.md) | -| RTMPose-t-aic-coco | 256x192 | 0.685 | 0.738 | [rtmpose_coco.md](./coco/rtmpose_coco.md) | -| RTMPose-s-aic-coco | 256x192 | 0.722 | 0.772 | [rtmpose_coco.md](./coco/rtmpose_coco.md) | -| RTMPose-m-aic-coco | 256x192 | 0.758 | 0.806 | [rtmpose_coco.md](./coco/rtmpose_coco.md) | -| RTMPose-l-aic-coco | 256x192 | 0.765 | 0.813 | [rtmpose_coco.md](./coco/rtmpose_coco.md) | -| RTMPose-m-aic-coco | 384x288 | 0.770 | 0.816 | [rtmpose_coco.md](./coco/rtmpose_coco.md) | -| RTMPose-l-aic-coco | 384x288 | 0.773 | 0.819 | [rtmpose_coco.md](./coco/rtmpose_coco.md) | - -### MPII Dataset - -| Model | Input Size | PCKh@0.5 | PCKh@0.1 | Details and Download | -| :-------: | :--------: | :------: | :------: | :---------------------------------------: | -| RTMPose-m | 256x256 | 0.907 | 0.348 | [rtmpose_mpii.md](./mpii/rtmpose_mpii.md) | - -### CrowdPose Dataset - -Results on CrowdPose test with [YOLOv3](https://github.com/eriklindernoren/PyTorch-YOLOv3) human detector - -| Model | Input Size | AP | AR | Details and Download | -| :-------: | :--------: | :---: | :---: | :------------------------------------------------------: | -| RTMPose-m | 256x192 | 0.706 | 0.788 | [rtmpose_crowdpose.md](./crowdpose/rtmpose_crowdpose.md) | - -### Human-Art Dataset - -Results on Human-Art validation dataset with detector having human AP of 56.2 on Human-Art validation dataset - -| Model | Input Size | AP | AR | Details and Download | -| :-------: | :--------: | :---: | :---: | :---------------------------------------------------: | -| RTMPose-s | 256x192 | 0.311 | 0.381 | [rtmpose_humanart.md](./humanart/rtmpose_humanart.md) | -| RTMPose-m | 256x192 | 0.355 | 0.417 | [rtmpose_humanart.md](./humanart/rtmpose_humanart.md) | -| RTMPose-l | 256x192 | 0.378 | 0.442 | [rtmpose_humanart.md](./humanart/rtmpose_humanart.md) | - -Results on Human-Art validation dataset with ground-truth bounding-box - -| Model | Input Size | AP | AR | Details and Download | -| :-------: | :--------: | :---: | :---: | :---------------------------------------------------: | -| RTMPose-s | 256x192 | 0.698 | 0.732 | [rtmpose_humanart.md](./humanart/rtmpose_humanart.md) | -| RTMPose-m | 256x192 | 0.728 | 0.759 | [rtmpose_humanart.md](./humanart/rtmpose_humanart.md) | -| RTMPose-l | 256x192 | 0.753 | 0.783 | [rtmpose_humanart.md](./humanart/rtmpose_humanart.md) | +# RTMPose + +Recent studies on 2D pose estimation have achieved excellent performance on public benchmarks, yet its application in the industrial community still suffers from heavy model parameters and high latency. +In order to bridge this gap, we empirically study five aspects that affect the performance of multi-person pose estimation algorithms: paradigm, backbone network, localization algorithm, training strategy, and deployment inference, and present a high-performance real-time multi-person pose estimation framework, **RTMPose**, based on MMPose. +Our RTMPose-m achieves **75.8% AP** on COCO with **90+ FPS** on an Intel i7-11700 CPU and **430+ FPS** on an NVIDIA GTX 1660 Ti GPU, and RTMPose-l achieves **67.0% AP** on COCO-WholeBody with **130+ FPS**, outperforming existing open-source libraries. +To further evaluate RTMPose's capability in critical real-time applications, we also report the performance after deploying on the mobile device. + +## Results and Models + +### COCO Dataset + +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Model | Input Size | AP | AR | Details and Download | +| :----------------: | :--------: | :---: | :---: | :---------------------------------------: | +| RTMPose-t | 256x192 | 0.682 | 0.736 | [rtmpose_coco.md](./coco/rtmpose_coco.md) | +| RTMPose-s | 256x192 | 0.716 | 0.768 | [rtmpose_coco.md](./coco/rtmpose_coco.md) | +| RTMPose-m | 256x192 | 0.746 | 0.795 | [rtmpose_coco.md](./coco/rtmpose_coco.md) | +| RTMPose-l | 256x192 | 0.758 | 0.806 | [rtmpose_coco.md](./coco/rtmpose_coco.md) | +| RTMPose-t-aic-coco | 256x192 | 0.685 | 0.738 | [rtmpose_coco.md](./coco/rtmpose_coco.md) | +| RTMPose-s-aic-coco | 256x192 | 0.722 | 0.772 | [rtmpose_coco.md](./coco/rtmpose_coco.md) | +| RTMPose-m-aic-coco | 256x192 | 0.758 | 0.806 | [rtmpose_coco.md](./coco/rtmpose_coco.md) | +| RTMPose-l-aic-coco | 256x192 | 0.765 | 0.813 | [rtmpose_coco.md](./coco/rtmpose_coco.md) | +| RTMPose-m-aic-coco | 384x288 | 0.770 | 0.816 | [rtmpose_coco.md](./coco/rtmpose_coco.md) | +| RTMPose-l-aic-coco | 384x288 | 0.773 | 0.819 | [rtmpose_coco.md](./coco/rtmpose_coco.md) | + +### MPII Dataset + +| Model | Input Size | PCKh@0.5 | PCKh@0.1 | Details and Download | +| :-------: | :--------: | :------: | :------: | :---------------------------------------: | +| RTMPose-m | 256x256 | 0.907 | 0.348 | [rtmpose_mpii.md](./mpii/rtmpose_mpii.md) | + +### CrowdPose Dataset + +Results on CrowdPose test with [YOLOv3](https://github.com/eriklindernoren/PyTorch-YOLOv3) human detector + +| Model | Input Size | AP | AR | Details and Download | +| :-------: | :--------: | :---: | :---: | :------------------------------------------------------: | +| RTMPose-m | 256x192 | 0.706 | 0.788 | [rtmpose_crowdpose.md](./crowdpose/rtmpose_crowdpose.md) | + +### Human-Art Dataset + +Results on Human-Art validation dataset with detector having human AP of 56.2 on Human-Art validation dataset + +| Model | Input Size | AP | AR | Details and Download | +| :-------: | :--------: | :---: | :---: | :---------------------------------------------------: | +| RTMPose-s | 256x192 | 0.311 | 0.381 | [rtmpose_humanart.md](./humanart/rtmpose_humanart.md) | +| RTMPose-m | 256x192 | 0.355 | 0.417 | [rtmpose_humanart.md](./humanart/rtmpose_humanart.md) | +| RTMPose-l | 256x192 | 0.378 | 0.442 | [rtmpose_humanart.md](./humanart/rtmpose_humanart.md) | + +Results on Human-Art validation dataset with ground-truth bounding-box + +| Model | Input Size | AP | AR | Details and Download | +| :-------: | :--------: | :---: | :---: | :---------------------------------------------------: | +| RTMPose-s | 256x192 | 0.698 | 0.732 | [rtmpose_humanart.md](./humanart/rtmpose_humanart.md) | +| RTMPose-m | 256x192 | 0.728 | 0.759 | [rtmpose_humanart.md](./humanart/rtmpose_humanart.md) | +| RTMPose-l | 256x192 | 0.753 | 0.783 | [rtmpose_humanart.md](./humanart/rtmpose_humanart.md) | diff --git a/configs/body_2d_keypoint/rtmpose/body8/rtmpose-l_8xb256-420e_body8-256x192.py b/configs/body_2d_keypoint/rtmpose/body8/rtmpose-l_8xb256-420e_body8-256x192.py index 1cf3380435..ea72ca7dc3 100644 --- a/configs/body_2d_keypoint/rtmpose/body8/rtmpose-l_8xb256-420e_body8-256x192.py +++ b/configs/body_2d_keypoint/rtmpose/body8/rtmpose-l_8xb256-420e_body8-256x192.py @@ -1,553 +1,553 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -max_epochs = 420 -stage2_num_epochs = 20 -base_lr = 4e-3 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - # use cosine lr from 210 to 420 epoch - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=1024) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=(192, 256), - sigma=(4.9, 5.66), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=1., - widen_factor=1., - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmposev1/cspnext-l_udp-body7_210e-256x192-5e9558ef_20230504.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=1024, - out_channels=17, - input_size=codec['input_size'], - in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True)) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/' - -backend_args = dict(backend='local') - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=90), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict(type='PhotometricDistortion'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.0), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.5, 1.5], - rotate_factor=90), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# mapping -aic_coco = [ - (0, 6), - (1, 8), - (2, 10), - (3, 5), - (4, 7), - (5, 9), - (6, 12), - (7, 14), - (8, 16), - (9, 11), - (10, 13), - (11, 15), -] - -crowdpose_coco = [ - (0, 5), - (1, 6), - (2, 7), - (3, 8), - (4, 9), - (5, 10), - (6, 11), - (7, 12), - (8, 13), - (9, 14), - (10, 15), - (11, 16), -] - -mpii_coco = [ - (0, 16), - (1, 14), - (2, 12), - (3, 11), - (4, 13), - (5, 15), - (10, 10), - (11, 8), - (12, 6), - (13, 5), - (14, 7), - (15, 9), -] - -jhmdb_coco = [ - (3, 6), - (4, 5), - (5, 12), - (6, 11), - (7, 8), - (8, 7), - (9, 14), - (10, 13), - (11, 10), - (12, 9), - (13, 16), - (14, 15), -] - -halpe_coco = [ - (0, 0), - (1, 1), - (2, 2), - (3, 3), - (4, 4), - (5, 5), - (6, 6), - (7, 7), - (8, 8), - (9, 9), - (10, 10), - (11, 11), - (12, 12), - (13, 13), - (14, 14), - (15, 15), - (16, 16), -] - -ochuman_coco = [ - (0, 0), - (1, 1), - (2, 2), - (3, 3), - (4, 4), - (5, 5), - (6, 6), - (7, 7), - (8, 8), - (9, 9), - (10, 10), - (11, 11), - (12, 12), - (13, 13), - (14, 14), - (15, 15), - (16, 16), -] - -posetrack_coco = [ - (0, 0), - (3, 3), - (4, 4), - (5, 5), - (6, 6), - (7, 7), - (8, 8), - (9, 9), - (10, 10), - (11, 11), - (12, 12), - (13, 13), - (14, 14), - (15, 15), - (16, 16), -] - -# train datasets -dataset_coco = dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/person_keypoints_train2017.json', - data_prefix=dict(img='detection/coco/train2017/'), - pipeline=[], -) - -dataset_aic = dict( - type='AicDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='aic/annotations/aic_train.json', - data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' - '_train_20170902/keypoint_train_images_20170902/'), - pipeline=[ - dict(type='KeypointConverter', num_keypoints=17, mapping=aic_coco) - ], -) - -dataset_crowdpose = dict( - type='CrowdPoseDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', - data_prefix=dict(img='pose/CrowdPose/images/'), - pipeline=[ - dict( - type='KeypointConverter', num_keypoints=17, mapping=crowdpose_coco) - ], -) - -dataset_mpii = dict( - type='MpiiDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='mpii/annotations/mpii_train.json', - data_prefix=dict(img='pose/MPI/images/'), - pipeline=[ - dict(type='KeypointConverter', num_keypoints=17, mapping=mpii_coco) - ], -) - -dataset_jhmdb = dict( - type='JhmdbDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='jhmdb/annotations/Sub1_train.json', - data_prefix=dict(img='pose/JHMDB/'), - pipeline=[ - dict(type='KeypointConverter', num_keypoints=17, mapping=jhmdb_coco) - ], -) - -dataset_halpe = dict( - type='HalpeDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='halpe/annotations/halpe_train_v1.json', - data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), - pipeline=[ - dict(type='KeypointConverter', num_keypoints=17, mapping=halpe_coco) - ], -) - -dataset_posetrack = dict( - type='PoseTrack18Dataset', - data_root=data_root, - data_mode=data_mode, - ann_file='posetrack18/annotations/posetrack18_train.json', - data_prefix=dict(img='pose/PoseChallenge2018/'), - pipeline=[ - dict( - type='KeypointConverter', num_keypoints=17, mapping=posetrack_coco) - ], -) - -# data loaders -train_dataloader = dict( - batch_size=256, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type='CombinedDataset', - metainfo=dict(from_file='configs/_base_/datasets/coco.py'), - datasets=[ - dataset_coco, - dataset_aic, - dataset_crowdpose, - dataset_mpii, - dataset_jhmdb, - dataset_halpe, - dataset_posetrack, - ], - pipeline=train_pipeline, - test_mode=False, - )) - -# val datasets -val_coco = dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/person_keypoints_val2017.json', - data_prefix=dict(img='detection/coco/val2017/'), - pipeline=[], -) - -val_aic = dict( - type='AicDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='aic/annotations/aic_val.json', - data_prefix=dict( - img='pose/ai_challenge/ai_challenger_keypoint' - '_validation_20170911/keypoint_validation_images_20170911/'), - pipeline=[ - dict(type='KeypointConverter', num_keypoints=17, mapping=aic_coco) - ], -) - -val_crowdpose = dict( - type='CrowdPoseDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', - data_prefix=dict(img='pose/CrowdPose/images/'), - pipeline=[ - dict( - type='KeypointConverter', num_keypoints=17, mapping=crowdpose_coco) - ], -) - -val_mpii = dict( - type='MpiiDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='mpii/annotations/mpii_val.json', - data_prefix=dict(img='pose/MPI/images/'), - pipeline=[ - dict(type='KeypointConverter', num_keypoints=17, mapping=mpii_coco) - ], -) - -val_jhmdb = dict( - type='JhmdbDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='jhmdb/annotations/Sub1_test.json', - data_prefix=dict(img='pose/JHMDB/'), - pipeline=[ - dict(type='KeypointConverter', num_keypoints=17, mapping=jhmdb_coco) - ], -) - -val_halpe = dict( - type='HalpeDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='halpe/annotations/halpe_val_v1.json', - data_prefix=dict(img='detection/coco/val2017/'), - pipeline=[ - dict(type='KeypointConverter', num_keypoints=17, mapping=halpe_coco) - ], -) - -val_ochuman = dict( - type='OCHumanDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='ochuman/annotations/' - 'ochuman_coco_format_val_range_0.00_1.00.json', - data_prefix=dict(img='pose/OCHuman/images/'), - pipeline=[ - dict(type='KeypointConverter', num_keypoints=17, mapping=ochuman_coco) - ], -) - -val_posetrack = dict( - type='PoseTrack18Dataset', - data_root=data_root, - data_mode=data_mode, - ann_file='posetrack18/annotations/posetrack18_val.json', - data_prefix=dict(img='pose/PoseChallenge2018/'), - pipeline=[ - dict( - type='KeypointConverter', num_keypoints=17, mapping=posetrack_coco) - ], -) - -val_dataloader = dict( - batch_size=64, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/person_keypoints_val2017.json', - bbox_file=f'{data_root}coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='detection/coco/val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) - -test_dataloader = dict( - batch_size=64, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type='CombinedDataset', - metainfo=dict(from_file='configs/_base_/datasets/coco.py'), - datasets=[ - val_coco, - val_aic, - val_crowdpose, - val_mpii, - val_jhmdb, - val_halpe, - val_ochuman, - val_posetrack, - ], - pipeline=val_pipeline, - test_mode=True, - )) - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) -# default_hooks = dict( -# checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'coco/annotations/person_keypoints_val2017.json') -test_evaluator = [ - dict(type='PCKAccuracy', thr=0.1), - dict(type='AUC'), - dict(type='EPE'), -] +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 420 +stage2_num_epochs = 20 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 210 to 420 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(192, 256), + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=1., + widen_factor=1., + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-l_udp-body7_210e-256x192-5e9558ef_20230504.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=1024, + out_channels=17, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict(type='PhotometricDistortion'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.5, 1.5], + rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# mapping +aic_coco = [ + (0, 6), + (1, 8), + (2, 10), + (3, 5), + (4, 7), + (5, 9), + (6, 12), + (7, 14), + (8, 16), + (9, 11), + (10, 13), + (11, 15), +] + +crowdpose_coco = [ + (0, 5), + (1, 6), + (2, 7), + (3, 8), + (4, 9), + (5, 10), + (6, 11), + (7, 12), + (8, 13), + (9, 14), + (10, 15), + (11, 16), +] + +mpii_coco = [ + (0, 16), + (1, 14), + (2, 12), + (3, 11), + (4, 13), + (5, 15), + (10, 10), + (11, 8), + (12, 6), + (13, 5), + (14, 7), + (15, 9), +] + +jhmdb_coco = [ + (3, 6), + (4, 5), + (5, 12), + (6, 11), + (7, 8), + (8, 7), + (9, 14), + (10, 13), + (11, 10), + (12, 9), + (13, 16), + (14, 15), +] + +halpe_coco = [ + (0, 0), + (1, 1), + (2, 2), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +ochuman_coco = [ + (0, 0), + (1, 1), + (2, 2), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +posetrack_coco = [ + (0, 0), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +# train datasets +dataset_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_train2017.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[], +) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=aic_coco) + ], +) + +dataset_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=17, mapping=crowdpose_coco) + ], +) + +dataset_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_train.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=mpii_coco) + ], +) + +dataset_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_train.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=jhmdb_coco) + ], +) + +dataset_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_train_v1.json', + data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=halpe_coco) + ], +) + +dataset_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_train.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=17, mapping=posetrack_coco) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco.py'), + datasets=[ + dataset_coco, + dataset_aic, + dataset_crowdpose, + dataset_mpii, + dataset_jhmdb, + dataset_halpe, + dataset_posetrack, + ], + pipeline=train_pipeline, + test_mode=False, + )) + +# val datasets +val_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_val2017.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[], +) + +val_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_val.json', + data_prefix=dict( + img='pose/ai_challenge/ai_challenger_keypoint' + '_validation_20170911/keypoint_validation_images_20170911/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=aic_coco) + ], +) + +val_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=17, mapping=crowdpose_coco) + ], +) + +val_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_val.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=mpii_coco) + ], +) + +val_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_test.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=jhmdb_coco) + ], +) + +val_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_val_v1.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=halpe_coco) + ], +) + +val_ochuman = dict( + type='OCHumanDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='ochuman/annotations/' + 'ochuman_coco_format_val_range_0.00_1.00.json', + data_prefix=dict(img='pose/OCHuman/images/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=ochuman_coco) + ], +) + +val_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_val.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=17, mapping=posetrack_coco) + ], +) + +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_val2017.json', + bbox_file=f'{data_root}coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='detection/coco/val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) + +test_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco.py'), + datasets=[ + val_coco, + val_aic, + val_crowdpose, + val_mpii, + val_jhmdb, + val_halpe, + val_ochuman, + val_posetrack, + ], + pipeline=val_pipeline, + test_mode=True, + )) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) +# default_hooks = dict( +# checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'coco/annotations/person_keypoints_val2017.json') +test_evaluator = [ + dict(type='PCKAccuracy', thr=0.1), + dict(type='AUC'), + dict(type='EPE'), +] diff --git a/configs/body_2d_keypoint/rtmpose/body8/rtmpose-l_8xb256-420e_body8-384x288.py b/configs/body_2d_keypoint/rtmpose/body8/rtmpose-l_8xb256-420e_body8-384x288.py index 19b3c8afb6..6ffcd6eba8 100644 --- a/configs/body_2d_keypoint/rtmpose/body8/rtmpose-l_8xb256-420e_body8-384x288.py +++ b/configs/body_2d_keypoint/rtmpose/body8/rtmpose-l_8xb256-420e_body8-384x288.py @@ -1,553 +1,553 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -max_epochs = 420 -stage2_num_epochs = 20 -base_lr = 4e-3 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - # use cosine lr from 210 to 420 epoch - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=1024) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=(288, 384), - sigma=(6., 6.93), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=1., - widen_factor=1., - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmposev1/cspnext-l_udp-body7_210e-384x288-b15bc30d_20230504.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=1024, - out_channels=17, - input_size=codec['input_size'], - in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True)) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/' - -backend_args = dict(backend='local') - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=90), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict(type='PhotometricDistortion'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.0), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.5, 1.5], - rotate_factor=90), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# mapping -aic_coco = [ - (0, 6), - (1, 8), - (2, 10), - (3, 5), - (4, 7), - (5, 9), - (6, 12), - (7, 14), - (8, 16), - (9, 11), - (10, 13), - (11, 15), -] - -crowdpose_coco = [ - (0, 5), - (1, 6), - (2, 7), - (3, 8), - (4, 9), - (5, 10), - (6, 11), - (7, 12), - (8, 13), - (9, 14), - (10, 15), - (11, 16), -] - -mpii_coco = [ - (0, 16), - (1, 14), - (2, 12), - (3, 11), - (4, 13), - (5, 15), - (10, 10), - (11, 8), - (12, 6), - (13, 5), - (14, 7), - (15, 9), -] - -jhmdb_coco = [ - (3, 6), - (4, 5), - (5, 12), - (6, 11), - (7, 8), - (8, 7), - (9, 14), - (10, 13), - (11, 10), - (12, 9), - (13, 16), - (14, 15), -] - -halpe_coco = [ - (0, 0), - (1, 1), - (2, 2), - (3, 3), - (4, 4), - (5, 5), - (6, 6), - (7, 7), - (8, 8), - (9, 9), - (10, 10), - (11, 11), - (12, 12), - (13, 13), - (14, 14), - (15, 15), - (16, 16), -] - -ochuman_coco = [ - (0, 0), - (1, 1), - (2, 2), - (3, 3), - (4, 4), - (5, 5), - (6, 6), - (7, 7), - (8, 8), - (9, 9), - (10, 10), - (11, 11), - (12, 12), - (13, 13), - (14, 14), - (15, 15), - (16, 16), -] - -posetrack_coco = [ - (0, 0), - (3, 3), - (4, 4), - (5, 5), - (6, 6), - (7, 7), - (8, 8), - (9, 9), - (10, 10), - (11, 11), - (12, 12), - (13, 13), - (14, 14), - (15, 15), - (16, 16), -] - -# train datasets -dataset_coco = dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/person_keypoints_train2017.json', - data_prefix=dict(img='detection/coco/train2017/'), - pipeline=[], -) - -dataset_aic = dict( - type='AicDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='aic/annotations/aic_train.json', - data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' - '_train_20170902/keypoint_train_images_20170902/'), - pipeline=[ - dict(type='KeypointConverter', num_keypoints=17, mapping=aic_coco) - ], -) - -dataset_crowdpose = dict( - type='CrowdPoseDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', - data_prefix=dict(img='pose/CrowdPose/images/'), - pipeline=[ - dict( - type='KeypointConverter', num_keypoints=17, mapping=crowdpose_coco) - ], -) - -dataset_mpii = dict( - type='MpiiDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='mpii/annotations/mpii_train.json', - data_prefix=dict(img='pose/MPI/images/'), - pipeline=[ - dict(type='KeypointConverter', num_keypoints=17, mapping=mpii_coco) - ], -) - -dataset_jhmdb = dict( - type='JhmdbDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='jhmdb/annotations/Sub1_train.json', - data_prefix=dict(img='pose/JHMDB/'), - pipeline=[ - dict(type='KeypointConverter', num_keypoints=17, mapping=jhmdb_coco) - ], -) - -dataset_halpe = dict( - type='HalpeDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='halpe/annotations/halpe_train_v1.json', - data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), - pipeline=[ - dict(type='KeypointConverter', num_keypoints=17, mapping=halpe_coco) - ], -) - -dataset_posetrack = dict( - type='PoseTrack18Dataset', - data_root=data_root, - data_mode=data_mode, - ann_file='posetrack18/annotations/posetrack18_train.json', - data_prefix=dict(img='pose/PoseChallenge2018/'), - pipeline=[ - dict( - type='KeypointConverter', num_keypoints=17, mapping=posetrack_coco) - ], -) - -# data loaders -train_dataloader = dict( - batch_size=256, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type='CombinedDataset', - metainfo=dict(from_file='configs/_base_/datasets/coco.py'), - datasets=[ - dataset_coco, - dataset_aic, - dataset_crowdpose, - dataset_mpii, - dataset_jhmdb, - dataset_halpe, - dataset_posetrack, - ], - pipeline=train_pipeline, - test_mode=False, - )) - -# val datasets -val_coco = dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/person_keypoints_val2017.json', - data_prefix=dict(img='detection/coco/val2017/'), - pipeline=[], -) - -val_aic = dict( - type='AicDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='aic/annotations/aic_val.json', - data_prefix=dict( - img='pose/ai_challenge/ai_challenger_keypoint' - '_validation_20170911/keypoint_validation_images_20170911/'), - pipeline=[ - dict(type='KeypointConverter', num_keypoints=17, mapping=aic_coco) - ], -) - -val_crowdpose = dict( - type='CrowdPoseDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', - data_prefix=dict(img='pose/CrowdPose/images/'), - pipeline=[ - dict( - type='KeypointConverter', num_keypoints=17, mapping=crowdpose_coco) - ], -) - -val_mpii = dict( - type='MpiiDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='mpii/annotations/mpii_val.json', - data_prefix=dict(img='pose/MPI/images/'), - pipeline=[ - dict(type='KeypointConverter', num_keypoints=17, mapping=mpii_coco) - ], -) - -val_jhmdb = dict( - type='JhmdbDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='jhmdb/annotations/Sub1_test.json', - data_prefix=dict(img='pose/JHMDB/'), - pipeline=[ - dict(type='KeypointConverter', num_keypoints=17, mapping=jhmdb_coco) - ], -) - -val_halpe = dict( - type='HalpeDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='halpe/annotations/halpe_val_v1.json', - data_prefix=dict(img='detection/coco/val2017/'), - pipeline=[ - dict(type='KeypointConverter', num_keypoints=17, mapping=halpe_coco) - ], -) - -val_ochuman = dict( - type='OCHumanDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='ochuman/annotations/' - 'ochuman_coco_format_val_range_0.00_1.00.json', - data_prefix=dict(img='pose/OCHuman/images/'), - pipeline=[ - dict(type='KeypointConverter', num_keypoints=17, mapping=ochuman_coco) - ], -) - -val_posetrack = dict( - type='PoseTrack18Dataset', - data_root=data_root, - data_mode=data_mode, - ann_file='posetrack18/annotations/posetrack18_val.json', - data_prefix=dict(img='pose/PoseChallenge2018/'), - pipeline=[ - dict( - type='KeypointConverter', num_keypoints=17, mapping=posetrack_coco) - ], -) - -val_dataloader = dict( - batch_size=64, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/person_keypoints_val2017.json', - bbox_file=f'{data_root}coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='detection/coco/val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) - -test_dataloader = dict( - batch_size=64, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type='CombinedDataset', - metainfo=dict(from_file='configs/_base_/datasets/coco.py'), - datasets=[ - val_coco, - val_aic, - val_crowdpose, - val_mpii, - val_jhmdb, - val_halpe, - val_ochuman, - val_posetrack, - ], - pipeline=val_pipeline, - test_mode=True, - )) - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) -# default_hooks = dict( -# checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'coco/annotations/person_keypoints_val2017.json') -test_evaluator = [ - dict(type='PCKAccuracy', thr=0.1), - dict(type='AUC'), - dict(type='EPE'), -] +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 420 +stage2_num_epochs = 20 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 210 to 420 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(288, 384), + sigma=(6., 6.93), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=1., + widen_factor=1., + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-l_udp-body7_210e-384x288-b15bc30d_20230504.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=1024, + out_channels=17, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict(type='PhotometricDistortion'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.5, 1.5], + rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# mapping +aic_coco = [ + (0, 6), + (1, 8), + (2, 10), + (3, 5), + (4, 7), + (5, 9), + (6, 12), + (7, 14), + (8, 16), + (9, 11), + (10, 13), + (11, 15), +] + +crowdpose_coco = [ + (0, 5), + (1, 6), + (2, 7), + (3, 8), + (4, 9), + (5, 10), + (6, 11), + (7, 12), + (8, 13), + (9, 14), + (10, 15), + (11, 16), +] + +mpii_coco = [ + (0, 16), + (1, 14), + (2, 12), + (3, 11), + (4, 13), + (5, 15), + (10, 10), + (11, 8), + (12, 6), + (13, 5), + (14, 7), + (15, 9), +] + +jhmdb_coco = [ + (3, 6), + (4, 5), + (5, 12), + (6, 11), + (7, 8), + (8, 7), + (9, 14), + (10, 13), + (11, 10), + (12, 9), + (13, 16), + (14, 15), +] + +halpe_coco = [ + (0, 0), + (1, 1), + (2, 2), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +ochuman_coco = [ + (0, 0), + (1, 1), + (2, 2), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +posetrack_coco = [ + (0, 0), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +# train datasets +dataset_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_train2017.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[], +) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=aic_coco) + ], +) + +dataset_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=17, mapping=crowdpose_coco) + ], +) + +dataset_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_train.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=mpii_coco) + ], +) + +dataset_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_train.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=jhmdb_coco) + ], +) + +dataset_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_train_v1.json', + data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=halpe_coco) + ], +) + +dataset_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_train.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=17, mapping=posetrack_coco) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco.py'), + datasets=[ + dataset_coco, + dataset_aic, + dataset_crowdpose, + dataset_mpii, + dataset_jhmdb, + dataset_halpe, + dataset_posetrack, + ], + pipeline=train_pipeline, + test_mode=False, + )) + +# val datasets +val_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_val2017.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[], +) + +val_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_val.json', + data_prefix=dict( + img='pose/ai_challenge/ai_challenger_keypoint' + '_validation_20170911/keypoint_validation_images_20170911/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=aic_coco) + ], +) + +val_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=17, mapping=crowdpose_coco) + ], +) + +val_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_val.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=mpii_coco) + ], +) + +val_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_test.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=jhmdb_coco) + ], +) + +val_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_val_v1.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=halpe_coco) + ], +) + +val_ochuman = dict( + type='OCHumanDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='ochuman/annotations/' + 'ochuman_coco_format_val_range_0.00_1.00.json', + data_prefix=dict(img='pose/OCHuman/images/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=ochuman_coco) + ], +) + +val_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_val.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=17, mapping=posetrack_coco) + ], +) + +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_val2017.json', + bbox_file=f'{data_root}coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='detection/coco/val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) + +test_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco.py'), + datasets=[ + val_coco, + val_aic, + val_crowdpose, + val_mpii, + val_jhmdb, + val_halpe, + val_ochuman, + val_posetrack, + ], + pipeline=val_pipeline, + test_mode=True, + )) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) +# default_hooks = dict( +# checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'coco/annotations/person_keypoints_val2017.json') +test_evaluator = [ + dict(type='PCKAccuracy', thr=0.1), + dict(type='AUC'), + dict(type='EPE'), +] diff --git a/configs/body_2d_keypoint/rtmpose/body8/rtmpose-l_8xb512-700e_body8-halpe26-256x192.py b/configs/body_2d_keypoint/rtmpose/body8/rtmpose-l_8xb512-700e_body8-halpe26-256x192.py index 293a5f07ea..2a069baa26 100644 --- a/configs/body_2d_keypoint/rtmpose/body8/rtmpose-l_8xb512-700e_body8-halpe26-256x192.py +++ b/configs/body_2d_keypoint/rtmpose/body8/rtmpose-l_8xb512-700e_body8-halpe26-256x192.py @@ -1,535 +1,535 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# common setting -num_keypoints = 26 -input_size = (192, 256) - -# runtime -max_epochs = 700 -stage2_num_epochs = 30 -base_lr = 4e-3 -train_batch_size = 512 -val_batch_size = 64 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - clip_grad=dict(max_norm=35, norm_type=2), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=1024) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=input_size, - sigma=(4.9, 5.66), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=1., - widen_factor=1., - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmposev1/rtmpose-l_simcc-body7_pt-body7_420e-256x192-4dba18fc_20230504.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=1024, - out_channels=num_keypoints, - input_size=input_size, - in_featuremap_size=tuple([s // 32 for s in input_size]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True)) - -# base dataset settings -dataset_type = 'CocoWholeBodyDataset' -data_mode = 'topdown' -data_root = 'data/' - -backend_args = dict(backend='local') - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=90), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PhotometricDistortion'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.0), - ]), - dict( - type='GenerateTarget', - encoder=codec, - use_dataset_keypoint_weights=True), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.5, 1.5], - rotate_factor=90), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict( - type='GenerateTarget', - encoder=codec, - use_dataset_keypoint_weights=True), - dict(type='PackPoseInputs') -] - -# mapping -coco_halpe26 = [(i, i) for i in range(17)] + [(17, 20), (18, 22), (19, 24), - (20, 21), (21, 23), (22, 25)] - -aic_halpe26 = [(0, 6), (1, 8), (2, 10), (3, 5), (4, 7), - (5, 9), (6, 12), (7, 14), (8, 16), (9, 11), (10, 13), (11, 15), - (12, 17), (13, 18)] - -crowdpose_halpe26 = [(0, 5), (1, 6), (2, 7), (3, 8), (4, 9), (5, 10), (6, 11), - (7, 12), (8, 13), (9, 14), (10, 15), (11, 16), (12, 17), - (13, 18)] - -mpii_halpe26 = [ - (0, 16), - (1, 14), - (2, 12), - (3, 11), - (4, 13), - (5, 15), - (8, 18), - (9, 17), - (10, 10), - (11, 8), - (12, 6), - (13, 5), - (14, 7), - (15, 9), -] - -jhmdb_halpe26 = [ - (0, 18), - (2, 17), - (3, 6), - (4, 5), - (5, 12), - (6, 11), - (7, 8), - (8, 7), - (9, 14), - (10, 13), - (11, 10), - (12, 9), - (13, 16), - (14, 15), -] - -halpe_halpe26 = [(i, i) for i in range(26)] - -ochuman_halpe26 = [(i, i) for i in range(17)] - -posetrack_halpe26 = [ - (0, 0), - (2, 17), - (3, 3), - (4, 4), - (5, 5), - (6, 6), - (7, 7), - (8, 8), - (9, 9), - (10, 10), - (11, 11), - (12, 12), - (13, 13), - (14, 14), - (15, 15), - (16, 16), -] - -# train datasets -dataset_coco = dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='detection/coco/train2017/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=coco_halpe26) - ], -) - -dataset_aic = dict( - type='AicDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='aic/annotations/aic_train.json', - data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' - '_train_20170902/keypoint_train_images_20170902/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=aic_halpe26) - ], -) - -dataset_crowdpose = dict( - type='CrowdPoseDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', - data_prefix=dict(img='pose/CrowdPose/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=crowdpose_halpe26) - ], -) - -dataset_mpii = dict( - type='MpiiDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='mpii/annotations/mpii_train.json', - data_prefix=dict(img='pose/MPI/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=mpii_halpe26) - ], -) - -dataset_jhmdb = dict( - type='JhmdbDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='jhmdb/annotations/Sub1_train.json', - data_prefix=dict(img='pose/JHMDB/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=jhmdb_halpe26) - ], -) - -dataset_halpe = dict( - type='HalpeDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='halpe/annotations/halpe_train_v1.json', - data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=halpe_halpe26) - ], -) - -dataset_posetrack = dict( - type='PoseTrack18Dataset', - data_root=data_root, - data_mode=data_mode, - ann_file='posetrack18/annotations/posetrack18_train.json', - data_prefix=dict(img='pose/PoseChallenge2018/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=posetrack_halpe26) - ], -) - -# data loaders -train_dataloader = dict( - batch_size=train_batch_size, - num_workers=5, - pin_memory=True, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type='CombinedDataset', - metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), - datasets=[ - dataset_coco, - dataset_aic, - dataset_crowdpose, - dataset_mpii, - dataset_jhmdb, - dataset_halpe, - dataset_posetrack, - ], - pipeline=train_pipeline, - test_mode=False, - )) - -# val datasets -val_coco = dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='detection/coco/val2017/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=coco_halpe26) - ], -) - -val_aic = dict( - type='AicDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='aic/annotations/aic_val.json', - data_prefix=dict( - img='pose/ai_challenge/ai_challenger_keypoint' - '_validation_20170911/keypoint_validation_images_20170911/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=aic_halpe26) - ], -) - -val_crowdpose = dict( - type='CrowdPoseDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', - data_prefix=dict(img='pose/CrowdPose/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=crowdpose_halpe26) - ], -) - -val_mpii = dict( - type='MpiiDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='mpii/annotations/mpii_val.json', - data_prefix=dict(img='pose/MPI/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=mpii_halpe26) - ], -) - -val_jhmdb = dict( - type='JhmdbDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='jhmdb/annotations/Sub1_test.json', - data_prefix=dict(img='pose/JHMDB/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=jhmdb_halpe26) - ], -) - -val_halpe = dict( - type='HalpeDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='halpe/annotations/halpe_val_v1.json', - data_prefix=dict(img='detection/coco/val2017/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=halpe_halpe26) - ], -) - -val_ochuman = dict( - type='OCHumanDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='ochuman/annotations/' - 'ochuman_coco_format_val_range_0.00_1.00.json', - data_prefix=dict(img='pose/OCHuman/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=ochuman_halpe26) - ], -) - -val_posetrack = dict( - type='PoseTrack18Dataset', - data_root=data_root, - data_mode=data_mode, - ann_file='posetrack18/annotations/posetrack18_val.json', - data_prefix=dict(img='pose/PoseChallenge2018/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=posetrack_halpe26) - ], -) - -val_dataloader = dict( - batch_size=val_batch_size, - num_workers=5, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type='CombinedDataset', - metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), - datasets=[ - val_coco, - val_aic, - val_crowdpose, - val_mpii, - val_jhmdb, - val_halpe, - val_ochuman, - val_posetrack, - ], - pipeline=val_pipeline, - test_mode=True, - )) - -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -test_evaluator = [dict(type='PCKAccuracy', thr=0.1), dict(type='AUC')] -val_evaluator = test_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# common setting +num_keypoints = 26 +input_size = (192, 256) + +# runtime +max_epochs = 700 +stage2_num_epochs = 30 +base_lr = 4e-3 +train_batch_size = 512 +val_batch_size = 64 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + clip_grad=dict(max_norm=35, norm_type=2), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=input_size, + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=1., + widen_factor=1., + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/rtmpose-l_simcc-body7_pt-body7_420e-256x192-4dba18fc_20230504.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=1024, + out_channels=num_keypoints, + input_size=input_size, + in_featuremap_size=tuple([s // 32 for s in input_size]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PhotometricDistortion'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict( + type='GenerateTarget', + encoder=codec, + use_dataset_keypoint_weights=True), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.5, 1.5], + rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict( + type='GenerateTarget', + encoder=codec, + use_dataset_keypoint_weights=True), + dict(type='PackPoseInputs') +] + +# mapping +coco_halpe26 = [(i, i) for i in range(17)] + [(17, 20), (18, 22), (19, 24), + (20, 21), (21, 23), (22, 25)] + +aic_halpe26 = [(0, 6), (1, 8), (2, 10), (3, 5), (4, 7), + (5, 9), (6, 12), (7, 14), (8, 16), (9, 11), (10, 13), (11, 15), + (12, 17), (13, 18)] + +crowdpose_halpe26 = [(0, 5), (1, 6), (2, 7), (3, 8), (4, 9), (5, 10), (6, 11), + (7, 12), (8, 13), (9, 14), (10, 15), (11, 16), (12, 17), + (13, 18)] + +mpii_halpe26 = [ + (0, 16), + (1, 14), + (2, 12), + (3, 11), + (4, 13), + (5, 15), + (8, 18), + (9, 17), + (10, 10), + (11, 8), + (12, 6), + (13, 5), + (14, 7), + (15, 9), +] + +jhmdb_halpe26 = [ + (0, 18), + (2, 17), + (3, 6), + (4, 5), + (5, 12), + (6, 11), + (7, 8), + (8, 7), + (9, 14), + (10, 13), + (11, 10), + (12, 9), + (13, 16), + (14, 15), +] + +halpe_halpe26 = [(i, i) for i in range(26)] + +ochuman_halpe26 = [(i, i) for i in range(17)] + +posetrack_halpe26 = [ + (0, 0), + (2, 17), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +# train datasets +dataset_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=coco_halpe26) + ], +) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=aic_halpe26) + ], +) + +dataset_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=crowdpose_halpe26) + ], +) + +dataset_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_train.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=mpii_halpe26) + ], +) + +dataset_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_train.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=jhmdb_halpe26) + ], +) + +dataset_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_train_v1.json', + data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=halpe_halpe26) + ], +) + +dataset_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_train.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=posetrack_halpe26) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=train_batch_size, + num_workers=5, + pin_memory=True, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), + datasets=[ + dataset_coco, + dataset_aic, + dataset_crowdpose, + dataset_mpii, + dataset_jhmdb, + dataset_halpe, + dataset_posetrack, + ], + pipeline=train_pipeline, + test_mode=False, + )) + +# val datasets +val_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=coco_halpe26) + ], +) + +val_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_val.json', + data_prefix=dict( + img='pose/ai_challenge/ai_challenger_keypoint' + '_validation_20170911/keypoint_validation_images_20170911/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=aic_halpe26) + ], +) + +val_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=crowdpose_halpe26) + ], +) + +val_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_val.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=mpii_halpe26) + ], +) + +val_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_test.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=jhmdb_halpe26) + ], +) + +val_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_val_v1.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=halpe_halpe26) + ], +) + +val_ochuman = dict( + type='OCHumanDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='ochuman/annotations/' + 'ochuman_coco_format_val_range_0.00_1.00.json', + data_prefix=dict(img='pose/OCHuman/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=ochuman_halpe26) + ], +) + +val_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_val.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=posetrack_halpe26) + ], +) + +val_dataloader = dict( + batch_size=val_batch_size, + num_workers=5, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), + datasets=[ + val_coco, + val_aic, + val_crowdpose, + val_mpii, + val_jhmdb, + val_halpe, + val_ochuman, + val_posetrack, + ], + pipeline=val_pipeline, + test_mode=True, + )) + +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +test_evaluator = [dict(type='PCKAccuracy', thr=0.1), dict(type='AUC')] +val_evaluator = test_evaluator diff --git a/configs/body_2d_keypoint/rtmpose/body8/rtmpose-l_8xb512-700e_body8-halpe26-384x288.py b/configs/body_2d_keypoint/rtmpose/body8/rtmpose-l_8xb512-700e_body8-halpe26-384x288.py index 0aa16f3db4..ae75a5e466 100644 --- a/configs/body_2d_keypoint/rtmpose/body8/rtmpose-l_8xb512-700e_body8-halpe26-384x288.py +++ b/configs/body_2d_keypoint/rtmpose/body8/rtmpose-l_8xb512-700e_body8-halpe26-384x288.py @@ -1,535 +1,535 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# common setting -num_keypoints = 26 -input_size = (288, 384) - -# runtime -max_epochs = 700 -stage2_num_epochs = 30 -base_lr = 4e-3 -train_batch_size = 512 -val_batch_size = 64 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - clip_grad=dict(max_norm=35, norm_type=2), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=1024) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=input_size, - sigma=(6., 6.93), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=1., - widen_factor=1., - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmposev1/rtmpose-l_simcc-body7_pt-body7_420e-384x288-3f5a1437_20230504.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=1024, - out_channels=num_keypoints, - input_size=input_size, - in_featuremap_size=tuple([s // 32 for s in input_size]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True)) - -# base dataset settings -dataset_type = 'CocoWholeBodyDataset' -data_mode = 'topdown' -data_root = 'data/' - -backend_args = dict(backend='local') - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=90), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PhotometricDistortion'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.0), - ]), - dict( - type='GenerateTarget', - encoder=codec, - use_dataset_keypoint_weights=True), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.5, 1.5], - rotate_factor=90), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict( - type='GenerateTarget', - encoder=codec, - use_dataset_keypoint_weights=True), - dict(type='PackPoseInputs') -] - -# mapping -coco_halpe26 = [(i, i) for i in range(17)] + [(17, 20), (18, 22), (19, 24), - (20, 21), (21, 23), (22, 25)] - -aic_halpe26 = [(0, 6), (1, 8), (2, 10), (3, 5), (4, 7), - (5, 9), (6, 12), (7, 14), (8, 16), (9, 11), (10, 13), (11, 15), - (12, 17), (13, 18)] - -crowdpose_halpe26 = [(0, 5), (1, 6), (2, 7), (3, 8), (4, 9), (5, 10), (6, 11), - (7, 12), (8, 13), (9, 14), (10, 15), (11, 16), (12, 17), - (13, 18)] - -mpii_halpe26 = [ - (0, 16), - (1, 14), - (2, 12), - (3, 11), - (4, 13), - (5, 15), - (8, 18), - (9, 17), - (10, 10), - (11, 8), - (12, 6), - (13, 5), - (14, 7), - (15, 9), -] - -jhmdb_halpe26 = [ - (0, 18), - (2, 17), - (3, 6), - (4, 5), - (5, 12), - (6, 11), - (7, 8), - (8, 7), - (9, 14), - (10, 13), - (11, 10), - (12, 9), - (13, 16), - (14, 15), -] - -halpe_halpe26 = [(i, i) for i in range(26)] - -ochuman_halpe26 = [(i, i) for i in range(17)] - -posetrack_halpe26 = [ - (0, 0), - (2, 17), - (3, 3), - (4, 4), - (5, 5), - (6, 6), - (7, 7), - (8, 8), - (9, 9), - (10, 10), - (11, 11), - (12, 12), - (13, 13), - (14, 14), - (15, 15), - (16, 16), -] - -# train datasets -dataset_coco = dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='detection/coco/train2017/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=coco_halpe26) - ], -) - -dataset_aic = dict( - type='AicDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='aic/annotations/aic_train.json', - data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' - '_train_20170902/keypoint_train_images_20170902/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=aic_halpe26) - ], -) - -dataset_crowdpose = dict( - type='CrowdPoseDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', - data_prefix=dict(img='pose/CrowdPose/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=crowdpose_halpe26) - ], -) - -dataset_mpii = dict( - type='MpiiDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='mpii/annotations/mpii_train.json', - data_prefix=dict(img='pose/MPI/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=mpii_halpe26) - ], -) - -dataset_jhmdb = dict( - type='JhmdbDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='jhmdb/annotations/Sub1_train.json', - data_prefix=dict(img='pose/JHMDB/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=jhmdb_halpe26) - ], -) - -dataset_halpe = dict( - type='HalpeDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='halpe/annotations/halpe_train_v1.json', - data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=halpe_halpe26) - ], -) - -dataset_posetrack = dict( - type='PoseTrack18Dataset', - data_root=data_root, - data_mode=data_mode, - ann_file='posetrack18/annotations/posetrack18_train.json', - data_prefix=dict(img='pose/PoseChallenge2018/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=posetrack_halpe26) - ], -) - -# data loaders -train_dataloader = dict( - batch_size=train_batch_size, - num_workers=10, - pin_memory=True, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type='CombinedDataset', - metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), - datasets=[ - dataset_coco, - dataset_aic, - dataset_crowdpose, - dataset_mpii, - dataset_jhmdb, - dataset_halpe, - dataset_posetrack, - ], - pipeline=train_pipeline, - test_mode=False, - )) - -# val datasets -val_coco = dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='detection/coco/val2017/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=coco_halpe26) - ], -) - -val_aic = dict( - type='AicDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='aic/annotations/aic_val.json', - data_prefix=dict( - img='pose/ai_challenge/ai_challenger_keypoint' - '_validation_20170911/keypoint_validation_images_20170911/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=aic_halpe26) - ], -) - -val_crowdpose = dict( - type='CrowdPoseDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', - data_prefix=dict(img='pose/CrowdPose/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=crowdpose_halpe26) - ], -) - -val_mpii = dict( - type='MpiiDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='mpii/annotations/mpii_val.json', - data_prefix=dict(img='pose/MPI/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=mpii_halpe26) - ], -) - -val_jhmdb = dict( - type='JhmdbDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='jhmdb/annotations/Sub1_test.json', - data_prefix=dict(img='pose/JHMDB/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=jhmdb_halpe26) - ], -) - -val_halpe = dict( - type='HalpeDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='halpe/annotations/halpe_val_v1.json', - data_prefix=dict(img='detection/coco/val2017/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=halpe_halpe26) - ], -) - -val_ochuman = dict( - type='OCHumanDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='ochuman/annotations/' - 'ochuman_coco_format_val_range_0.00_1.00.json', - data_prefix=dict(img='pose/OCHuman/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=ochuman_halpe26) - ], -) - -val_posetrack = dict( - type='PoseTrack18Dataset', - data_root=data_root, - data_mode=data_mode, - ann_file='posetrack18/annotations/posetrack18_val.json', - data_prefix=dict(img='pose/PoseChallenge2018/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=posetrack_halpe26) - ], -) - -val_dataloader = dict( - batch_size=val_batch_size, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type='CombinedDataset', - metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), - datasets=[ - val_coco, - val_aic, - val_crowdpose, - val_mpii, - val_jhmdb, - val_halpe, - val_ochuman, - val_posetrack, - ], - pipeline=val_pipeline, - test_mode=True, - )) - -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -test_evaluator = [dict(type='PCKAccuracy', thr=0.1), dict(type='AUC')] -val_evaluator = test_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# common setting +num_keypoints = 26 +input_size = (288, 384) + +# runtime +max_epochs = 700 +stage2_num_epochs = 30 +base_lr = 4e-3 +train_batch_size = 512 +val_batch_size = 64 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + clip_grad=dict(max_norm=35, norm_type=2), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=input_size, + sigma=(6., 6.93), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=1., + widen_factor=1., + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/rtmpose-l_simcc-body7_pt-body7_420e-384x288-3f5a1437_20230504.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=1024, + out_channels=num_keypoints, + input_size=input_size, + in_featuremap_size=tuple([s // 32 for s in input_size]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PhotometricDistortion'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict( + type='GenerateTarget', + encoder=codec, + use_dataset_keypoint_weights=True), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.5, 1.5], + rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict( + type='GenerateTarget', + encoder=codec, + use_dataset_keypoint_weights=True), + dict(type='PackPoseInputs') +] + +# mapping +coco_halpe26 = [(i, i) for i in range(17)] + [(17, 20), (18, 22), (19, 24), + (20, 21), (21, 23), (22, 25)] + +aic_halpe26 = [(0, 6), (1, 8), (2, 10), (3, 5), (4, 7), + (5, 9), (6, 12), (7, 14), (8, 16), (9, 11), (10, 13), (11, 15), + (12, 17), (13, 18)] + +crowdpose_halpe26 = [(0, 5), (1, 6), (2, 7), (3, 8), (4, 9), (5, 10), (6, 11), + (7, 12), (8, 13), (9, 14), (10, 15), (11, 16), (12, 17), + (13, 18)] + +mpii_halpe26 = [ + (0, 16), + (1, 14), + (2, 12), + (3, 11), + (4, 13), + (5, 15), + (8, 18), + (9, 17), + (10, 10), + (11, 8), + (12, 6), + (13, 5), + (14, 7), + (15, 9), +] + +jhmdb_halpe26 = [ + (0, 18), + (2, 17), + (3, 6), + (4, 5), + (5, 12), + (6, 11), + (7, 8), + (8, 7), + (9, 14), + (10, 13), + (11, 10), + (12, 9), + (13, 16), + (14, 15), +] + +halpe_halpe26 = [(i, i) for i in range(26)] + +ochuman_halpe26 = [(i, i) for i in range(17)] + +posetrack_halpe26 = [ + (0, 0), + (2, 17), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +# train datasets +dataset_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=coco_halpe26) + ], +) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=aic_halpe26) + ], +) + +dataset_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=crowdpose_halpe26) + ], +) + +dataset_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_train.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=mpii_halpe26) + ], +) + +dataset_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_train.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=jhmdb_halpe26) + ], +) + +dataset_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_train_v1.json', + data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=halpe_halpe26) + ], +) + +dataset_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_train.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=posetrack_halpe26) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=train_batch_size, + num_workers=10, + pin_memory=True, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), + datasets=[ + dataset_coco, + dataset_aic, + dataset_crowdpose, + dataset_mpii, + dataset_jhmdb, + dataset_halpe, + dataset_posetrack, + ], + pipeline=train_pipeline, + test_mode=False, + )) + +# val datasets +val_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=coco_halpe26) + ], +) + +val_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_val.json', + data_prefix=dict( + img='pose/ai_challenge/ai_challenger_keypoint' + '_validation_20170911/keypoint_validation_images_20170911/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=aic_halpe26) + ], +) + +val_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=crowdpose_halpe26) + ], +) + +val_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_val.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=mpii_halpe26) + ], +) + +val_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_test.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=jhmdb_halpe26) + ], +) + +val_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_val_v1.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=halpe_halpe26) + ], +) + +val_ochuman = dict( + type='OCHumanDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='ochuman/annotations/' + 'ochuman_coco_format_val_range_0.00_1.00.json', + data_prefix=dict(img='pose/OCHuman/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=ochuman_halpe26) + ], +) + +val_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_val.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=posetrack_halpe26) + ], +) + +val_dataloader = dict( + batch_size=val_batch_size, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), + datasets=[ + val_coco, + val_aic, + val_crowdpose, + val_mpii, + val_jhmdb, + val_halpe, + val_ochuman, + val_posetrack, + ], + pipeline=val_pipeline, + test_mode=True, + )) + +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +test_evaluator = [dict(type='PCKAccuracy', thr=0.1), dict(type='AUC')] +val_evaluator = test_evaluator diff --git a/configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb256-420e_body8-256x192.py b/configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb256-420e_body8-256x192.py index be462bfddf..c96ba39ff1 100644 --- a/configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb256-420e_body8-256x192.py +++ b/configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb256-420e_body8-256x192.py @@ -1,553 +1,553 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -max_epochs = 420 -stage2_num_epochs = 20 -base_lr = 4e-3 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - # use cosine lr from 210 to 420 epoch - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=1024) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=(192, 256), - sigma=(4.9, 5.66), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=0.67, - widen_factor=0.75, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmposev1/cspnext-m_udp-body7_210e-256x192-e0c9327b_20230504.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=768, - out_channels=17, - input_size=codec['input_size'], - in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0.0, - drop_path=0.0, - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True, )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/' - -backend_args = dict(backend='local') - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=90), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict(type='PhotometricDistortion'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.0), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.5, 1.5], - rotate_factor=90), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# mapping -aic_coco = [ - (0, 6), - (1, 8), - (2, 10), - (3, 5), - (4, 7), - (5, 9), - (6, 12), - (7, 14), - (8, 16), - (9, 11), - (10, 13), - (11, 15), -] - -crowdpose_coco = [ - (0, 5), - (1, 6), - (2, 7), - (3, 8), - (4, 9), - (5, 10), - (6, 11), - (7, 12), - (8, 13), - (9, 14), - (10, 15), - (11, 16), -] - -mpii_coco = [ - (0, 16), - (1, 14), - (2, 12), - (3, 11), - (4, 13), - (5, 15), - (10, 10), - (11, 8), - (12, 6), - (13, 5), - (14, 7), - (15, 9), -] - -jhmdb_coco = [ - (3, 6), - (4, 5), - (5, 12), - (6, 11), - (7, 8), - (8, 7), - (9, 14), - (10, 13), - (11, 10), - (12, 9), - (13, 16), - (14, 15), -] - -halpe_coco = [ - (0, 0), - (1, 1), - (2, 2), - (3, 3), - (4, 4), - (5, 5), - (6, 6), - (7, 7), - (8, 8), - (9, 9), - (10, 10), - (11, 11), - (12, 12), - (13, 13), - (14, 14), - (15, 15), - (16, 16), -] - -ochuman_coco = [ - (0, 0), - (1, 1), - (2, 2), - (3, 3), - (4, 4), - (5, 5), - (6, 6), - (7, 7), - (8, 8), - (9, 9), - (10, 10), - (11, 11), - (12, 12), - (13, 13), - (14, 14), - (15, 15), - (16, 16), -] - -posetrack_coco = [ - (0, 0), - (3, 3), - (4, 4), - (5, 5), - (6, 6), - (7, 7), - (8, 8), - (9, 9), - (10, 10), - (11, 11), - (12, 12), - (13, 13), - (14, 14), - (15, 15), - (16, 16), -] - -# train datasets -dataset_coco = dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/person_keypoints_train2017.json', - data_prefix=dict(img='detection/coco/train2017/'), - pipeline=[], -) - -dataset_aic = dict( - type='AicDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='aic/annotations/aic_train.json', - data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' - '_train_20170902/keypoint_train_images_20170902/'), - pipeline=[ - dict(type='KeypointConverter', num_keypoints=17, mapping=aic_coco) - ], -) - -dataset_crowdpose = dict( - type='CrowdPoseDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', - data_prefix=dict(img='pose/CrowdPose/images/'), - pipeline=[ - dict( - type='KeypointConverter', num_keypoints=17, mapping=crowdpose_coco) - ], -) - -dataset_mpii = dict( - type='MpiiDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='mpii/annotations/mpii_train.json', - data_prefix=dict(img='pose/MPI/images/'), - pipeline=[ - dict(type='KeypointConverter', num_keypoints=17, mapping=mpii_coco) - ], -) - -dataset_jhmdb = dict( - type='JhmdbDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='jhmdb/annotations/Sub1_train.json', - data_prefix=dict(img='pose/JHMDB/'), - pipeline=[ - dict(type='KeypointConverter', num_keypoints=17, mapping=jhmdb_coco) - ], -) - -dataset_halpe = dict( - type='HalpeDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='halpe/annotations/halpe_train_v1.json', - data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), - pipeline=[ - dict(type='KeypointConverter', num_keypoints=17, mapping=halpe_coco) - ], -) - -dataset_posetrack = dict( - type='PoseTrack18Dataset', - data_root=data_root, - data_mode=data_mode, - ann_file='posetrack18/annotations/posetrack18_train.json', - data_prefix=dict(img='pose/PoseChallenge2018/'), - pipeline=[ - dict( - type='KeypointConverter', num_keypoints=17, mapping=posetrack_coco) - ], -) - -# data loaders -train_dataloader = dict( - batch_size=256, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type='CombinedDataset', - metainfo=dict(from_file='configs/_base_/datasets/coco.py'), - datasets=[ - dataset_coco, - dataset_aic, - dataset_crowdpose, - dataset_mpii, - dataset_jhmdb, - dataset_halpe, - dataset_posetrack, - ], - pipeline=train_pipeline, - test_mode=False, - )) - -# val datasets -val_coco = dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/person_keypoints_val2017.json', - data_prefix=dict(img='detection/coco/val2017/'), - pipeline=[], -) - -val_aic = dict( - type='AicDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='aic/annotations/aic_val.json', - data_prefix=dict( - img='pose/ai_challenge/ai_challenger_keypoint' - '_validation_20170911/keypoint_validation_images_20170911/'), - pipeline=[ - dict(type='KeypointConverter', num_keypoints=17, mapping=aic_coco) - ], -) - -val_crowdpose = dict( - type='CrowdPoseDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', - data_prefix=dict(img='pose/CrowdPose/images/'), - pipeline=[ - dict( - type='KeypointConverter', num_keypoints=17, mapping=crowdpose_coco) - ], -) - -val_mpii = dict( - type='MpiiDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='mpii/annotations/mpii_val.json', - data_prefix=dict(img='pose/MPI/images/'), - pipeline=[ - dict(type='KeypointConverter', num_keypoints=17, mapping=mpii_coco) - ], -) - -val_jhmdb = dict( - type='JhmdbDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='jhmdb/annotations/Sub1_test.json', - data_prefix=dict(img='pose/JHMDB/'), - pipeline=[ - dict(type='KeypointConverter', num_keypoints=17, mapping=jhmdb_coco) - ], -) - -val_halpe = dict( - type='HalpeDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='halpe/annotations/halpe_val_v1.json', - data_prefix=dict(img='detection/coco/val2017/'), - pipeline=[ - dict(type='KeypointConverter', num_keypoints=17, mapping=halpe_coco) - ], -) - -val_ochuman = dict( - type='OCHumanDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='ochuman/annotations/' - 'ochuman_coco_format_val_range_0.00_1.00.json', - data_prefix=dict(img='pose/OCHuman/images/'), - pipeline=[ - dict(type='KeypointConverter', num_keypoints=17, mapping=ochuman_coco) - ], -) - -val_posetrack = dict( - type='PoseTrack18Dataset', - data_root=data_root, - data_mode=data_mode, - ann_file='posetrack18/annotations/posetrack18_val.json', - data_prefix=dict(img='pose/PoseChallenge2018/'), - pipeline=[ - dict( - type='KeypointConverter', num_keypoints=17, mapping=posetrack_coco) - ], -) - -val_dataloader = dict( - batch_size=64, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/person_keypoints_val2017.json', - bbox_file=f'{data_root}coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='detection/coco/val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) - -test_dataloader = dict( - batch_size=64, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type='CombinedDataset', - metainfo=dict(from_file='configs/_base_/datasets/coco.py'), - datasets=[ - val_coco, - val_aic, - val_crowdpose, - val_mpii, - val_jhmdb, - val_halpe, - val_ochuman, - val_posetrack, - ], - pipeline=val_pipeline, - test_mode=True, - )) - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) -# default_hooks = dict( -# checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'coco/annotations/person_keypoints_val2017.json') -test_evaluator = [ - dict(type='PCKAccuracy', thr=0.1), - dict(type='AUC'), - dict(type='EPE') -] +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 420 +stage2_num_epochs = 20 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 210 to 420 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(192, 256), + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-m_udp-body7_210e-256x192-e0c9327b_20230504.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=768, + out_channels=17, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0.0, + drop_path=0.0, + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict(type='PhotometricDistortion'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.5, 1.5], + rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# mapping +aic_coco = [ + (0, 6), + (1, 8), + (2, 10), + (3, 5), + (4, 7), + (5, 9), + (6, 12), + (7, 14), + (8, 16), + (9, 11), + (10, 13), + (11, 15), +] + +crowdpose_coco = [ + (0, 5), + (1, 6), + (2, 7), + (3, 8), + (4, 9), + (5, 10), + (6, 11), + (7, 12), + (8, 13), + (9, 14), + (10, 15), + (11, 16), +] + +mpii_coco = [ + (0, 16), + (1, 14), + (2, 12), + (3, 11), + (4, 13), + (5, 15), + (10, 10), + (11, 8), + (12, 6), + (13, 5), + (14, 7), + (15, 9), +] + +jhmdb_coco = [ + (3, 6), + (4, 5), + (5, 12), + (6, 11), + (7, 8), + (8, 7), + (9, 14), + (10, 13), + (11, 10), + (12, 9), + (13, 16), + (14, 15), +] + +halpe_coco = [ + (0, 0), + (1, 1), + (2, 2), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +ochuman_coco = [ + (0, 0), + (1, 1), + (2, 2), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +posetrack_coco = [ + (0, 0), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +# train datasets +dataset_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_train2017.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[], +) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=aic_coco) + ], +) + +dataset_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=17, mapping=crowdpose_coco) + ], +) + +dataset_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_train.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=mpii_coco) + ], +) + +dataset_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_train.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=jhmdb_coco) + ], +) + +dataset_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_train_v1.json', + data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=halpe_coco) + ], +) + +dataset_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_train.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=17, mapping=posetrack_coco) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco.py'), + datasets=[ + dataset_coco, + dataset_aic, + dataset_crowdpose, + dataset_mpii, + dataset_jhmdb, + dataset_halpe, + dataset_posetrack, + ], + pipeline=train_pipeline, + test_mode=False, + )) + +# val datasets +val_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_val2017.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[], +) + +val_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_val.json', + data_prefix=dict( + img='pose/ai_challenge/ai_challenger_keypoint' + '_validation_20170911/keypoint_validation_images_20170911/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=aic_coco) + ], +) + +val_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=17, mapping=crowdpose_coco) + ], +) + +val_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_val.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=mpii_coco) + ], +) + +val_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_test.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=jhmdb_coco) + ], +) + +val_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_val_v1.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=halpe_coco) + ], +) + +val_ochuman = dict( + type='OCHumanDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='ochuman/annotations/' + 'ochuman_coco_format_val_range_0.00_1.00.json', + data_prefix=dict(img='pose/OCHuman/images/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=ochuman_coco) + ], +) + +val_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_val.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=17, mapping=posetrack_coco) + ], +) + +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_val2017.json', + bbox_file=f'{data_root}coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='detection/coco/val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) + +test_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco.py'), + datasets=[ + val_coco, + val_aic, + val_crowdpose, + val_mpii, + val_jhmdb, + val_halpe, + val_ochuman, + val_posetrack, + ], + pipeline=val_pipeline, + test_mode=True, + )) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) +# default_hooks = dict( +# checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'coco/annotations/person_keypoints_val2017.json') +test_evaluator = [ + dict(type='PCKAccuracy', thr=0.1), + dict(type='AUC'), + dict(type='EPE') +] diff --git a/configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb256-420e_body8-384x288.py b/configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb256-420e_body8-384x288.py index 64cfc8a604..4118d7dbce 100644 --- a/configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb256-420e_body8-384x288.py +++ b/configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb256-420e_body8-384x288.py @@ -1,553 +1,553 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -max_epochs = 420 -stage2_num_epochs = 20 -base_lr = 4e-3 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - # use cosine lr from 210 to 420 epoch - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=1024) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=(288, 384), - sigma=(6., 6.93), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=0.67, - widen_factor=0.75, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmposev1/cspnext-m_udp-body7_210e-384x288-b9bc2b57_20230504.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=768, - out_channels=17, - input_size=codec['input_size'], - in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0.0, - drop_path=0.0, - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True, )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/' - -backend_args = dict(backend='local') - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=90), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict(type='PhotometricDistortion'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.0), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.5, 1.5], - rotate_factor=90), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# mapping -aic_coco = [ - (0, 6), - (1, 8), - (2, 10), - (3, 5), - (4, 7), - (5, 9), - (6, 12), - (7, 14), - (8, 16), - (9, 11), - (10, 13), - (11, 15), -] - -crowdpose_coco = [ - (0, 5), - (1, 6), - (2, 7), - (3, 8), - (4, 9), - (5, 10), - (6, 11), - (7, 12), - (8, 13), - (9, 14), - (10, 15), - (11, 16), -] - -mpii_coco = [ - (0, 16), - (1, 14), - (2, 12), - (3, 11), - (4, 13), - (5, 15), - (10, 10), - (11, 8), - (12, 6), - (13, 5), - (14, 7), - (15, 9), -] - -jhmdb_coco = [ - (3, 6), - (4, 5), - (5, 12), - (6, 11), - (7, 8), - (8, 7), - (9, 14), - (10, 13), - (11, 10), - (12, 9), - (13, 16), - (14, 15), -] - -halpe_coco = [ - (0, 0), - (1, 1), - (2, 2), - (3, 3), - (4, 4), - (5, 5), - (6, 6), - (7, 7), - (8, 8), - (9, 9), - (10, 10), - (11, 11), - (12, 12), - (13, 13), - (14, 14), - (15, 15), - (16, 16), -] - -ochuman_coco = [ - (0, 0), - (1, 1), - (2, 2), - (3, 3), - (4, 4), - (5, 5), - (6, 6), - (7, 7), - (8, 8), - (9, 9), - (10, 10), - (11, 11), - (12, 12), - (13, 13), - (14, 14), - (15, 15), - (16, 16), -] - -posetrack_coco = [ - (0, 0), - (3, 3), - (4, 4), - (5, 5), - (6, 6), - (7, 7), - (8, 8), - (9, 9), - (10, 10), - (11, 11), - (12, 12), - (13, 13), - (14, 14), - (15, 15), - (16, 16), -] - -# train datasets -dataset_coco = dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/person_keypoints_train2017.json', - data_prefix=dict(img='detection/coco/train2017/'), - pipeline=[], -) - -dataset_aic = dict( - type='AicDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='aic/annotations/aic_train.json', - data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' - '_train_20170902/keypoint_train_images_20170902/'), - pipeline=[ - dict(type='KeypointConverter', num_keypoints=17, mapping=aic_coco) - ], -) - -dataset_crowdpose = dict( - type='CrowdPoseDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', - data_prefix=dict(img='pose/CrowdPose/images/'), - pipeline=[ - dict( - type='KeypointConverter', num_keypoints=17, mapping=crowdpose_coco) - ], -) - -dataset_mpii = dict( - type='MpiiDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='mpii/annotations/mpii_train.json', - data_prefix=dict(img='pose/MPI/images/'), - pipeline=[ - dict(type='KeypointConverter', num_keypoints=17, mapping=mpii_coco) - ], -) - -dataset_jhmdb = dict( - type='JhmdbDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='jhmdb/annotations/Sub1_train.json', - data_prefix=dict(img='pose/JHMDB/'), - pipeline=[ - dict(type='KeypointConverter', num_keypoints=17, mapping=jhmdb_coco) - ], -) - -dataset_halpe = dict( - type='HalpeDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='halpe/annotations/halpe_train_v1.json', - data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), - pipeline=[ - dict(type='KeypointConverter', num_keypoints=17, mapping=halpe_coco) - ], -) - -dataset_posetrack = dict( - type='PoseTrack18Dataset', - data_root=data_root, - data_mode=data_mode, - ann_file='posetrack18/annotations/posetrack18_train.json', - data_prefix=dict(img='pose/PoseChallenge2018/'), - pipeline=[ - dict( - type='KeypointConverter', num_keypoints=17, mapping=posetrack_coco) - ], -) - -# data loaders -train_dataloader = dict( - batch_size=256, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type='CombinedDataset', - metainfo=dict(from_file='configs/_base_/datasets/coco.py'), - datasets=[ - dataset_coco, - dataset_aic, - dataset_crowdpose, - dataset_mpii, - dataset_jhmdb, - dataset_halpe, - dataset_posetrack, - ], - pipeline=train_pipeline, - test_mode=False, - )) - -# val datasets -val_coco = dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/person_keypoints_val2017.json', - data_prefix=dict(img='detection/coco/val2017/'), - pipeline=[], -) - -val_aic = dict( - type='AicDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='aic/annotations/aic_val.json', - data_prefix=dict( - img='pose/ai_challenge/ai_challenger_keypoint' - '_validation_20170911/keypoint_validation_images_20170911/'), - pipeline=[ - dict(type='KeypointConverter', num_keypoints=17, mapping=aic_coco) - ], -) - -val_crowdpose = dict( - type='CrowdPoseDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', - data_prefix=dict(img='pose/CrowdPose/images/'), - pipeline=[ - dict( - type='KeypointConverter', num_keypoints=17, mapping=crowdpose_coco) - ], -) - -val_mpii = dict( - type='MpiiDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='mpii/annotations/mpii_val.json', - data_prefix=dict(img='pose/MPI/images/'), - pipeline=[ - dict(type='KeypointConverter', num_keypoints=17, mapping=mpii_coco) - ], -) - -val_jhmdb = dict( - type='JhmdbDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='jhmdb/annotations/Sub1_test.json', - data_prefix=dict(img='pose/JHMDB/'), - pipeline=[ - dict(type='KeypointConverter', num_keypoints=17, mapping=jhmdb_coco) - ], -) - -val_halpe = dict( - type='HalpeDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='halpe/annotations/halpe_val_v1.json', - data_prefix=dict(img='detection/coco/val2017/'), - pipeline=[ - dict(type='KeypointConverter', num_keypoints=17, mapping=halpe_coco) - ], -) - -val_ochuman = dict( - type='OCHumanDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='ochuman/annotations/' - 'ochuman_coco_format_val_range_0.00_1.00.json', - data_prefix=dict(img='pose/OCHuman/images/'), - pipeline=[ - dict(type='KeypointConverter', num_keypoints=17, mapping=ochuman_coco) - ], -) - -val_posetrack = dict( - type='PoseTrack18Dataset', - data_root=data_root, - data_mode=data_mode, - ann_file='posetrack18/annotations/posetrack18_val.json', - data_prefix=dict(img='pose/PoseChallenge2018/'), - pipeline=[ - dict( - type='KeypointConverter', num_keypoints=17, mapping=posetrack_coco) - ], -) - -val_dataloader = dict( - batch_size=64, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/person_keypoints_val2017.json', - bbox_file=f'{data_root}coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='detection/coco/val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) - -test_dataloader = dict( - batch_size=64, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type='CombinedDataset', - metainfo=dict(from_file='configs/_base_/datasets/coco.py'), - datasets=[ - val_coco, - val_aic, - val_crowdpose, - val_mpii, - val_jhmdb, - val_halpe, - val_ochuman, - val_posetrack, - ], - pipeline=val_pipeline, - test_mode=True, - )) - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) -# default_hooks = dict( -# checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'coco/annotations/person_keypoints_val2017.json') -test_evaluator = [ - dict(type='PCKAccuracy', thr=0.1), - dict(type='AUC'), - dict(type='EPE') -] +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 420 +stage2_num_epochs = 20 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 210 to 420 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(288, 384), + sigma=(6., 6.93), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-m_udp-body7_210e-384x288-b9bc2b57_20230504.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=768, + out_channels=17, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0.0, + drop_path=0.0, + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict(type='PhotometricDistortion'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.5, 1.5], + rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# mapping +aic_coco = [ + (0, 6), + (1, 8), + (2, 10), + (3, 5), + (4, 7), + (5, 9), + (6, 12), + (7, 14), + (8, 16), + (9, 11), + (10, 13), + (11, 15), +] + +crowdpose_coco = [ + (0, 5), + (1, 6), + (2, 7), + (3, 8), + (4, 9), + (5, 10), + (6, 11), + (7, 12), + (8, 13), + (9, 14), + (10, 15), + (11, 16), +] + +mpii_coco = [ + (0, 16), + (1, 14), + (2, 12), + (3, 11), + (4, 13), + (5, 15), + (10, 10), + (11, 8), + (12, 6), + (13, 5), + (14, 7), + (15, 9), +] + +jhmdb_coco = [ + (3, 6), + (4, 5), + (5, 12), + (6, 11), + (7, 8), + (8, 7), + (9, 14), + (10, 13), + (11, 10), + (12, 9), + (13, 16), + (14, 15), +] + +halpe_coco = [ + (0, 0), + (1, 1), + (2, 2), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +ochuman_coco = [ + (0, 0), + (1, 1), + (2, 2), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +posetrack_coco = [ + (0, 0), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +# train datasets +dataset_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_train2017.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[], +) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=aic_coco) + ], +) + +dataset_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=17, mapping=crowdpose_coco) + ], +) + +dataset_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_train.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=mpii_coco) + ], +) + +dataset_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_train.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=jhmdb_coco) + ], +) + +dataset_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_train_v1.json', + data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=halpe_coco) + ], +) + +dataset_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_train.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=17, mapping=posetrack_coco) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco.py'), + datasets=[ + dataset_coco, + dataset_aic, + dataset_crowdpose, + dataset_mpii, + dataset_jhmdb, + dataset_halpe, + dataset_posetrack, + ], + pipeline=train_pipeline, + test_mode=False, + )) + +# val datasets +val_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_val2017.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[], +) + +val_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_val.json', + data_prefix=dict( + img='pose/ai_challenge/ai_challenger_keypoint' + '_validation_20170911/keypoint_validation_images_20170911/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=aic_coco) + ], +) + +val_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=17, mapping=crowdpose_coco) + ], +) + +val_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_val.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=mpii_coco) + ], +) + +val_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_test.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=jhmdb_coco) + ], +) + +val_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_val_v1.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=halpe_coco) + ], +) + +val_ochuman = dict( + type='OCHumanDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='ochuman/annotations/' + 'ochuman_coco_format_val_range_0.00_1.00.json', + data_prefix=dict(img='pose/OCHuman/images/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=ochuman_coco) + ], +) + +val_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_val.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=17, mapping=posetrack_coco) + ], +) + +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_val2017.json', + bbox_file=f'{data_root}coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='detection/coco/val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) + +test_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco.py'), + datasets=[ + val_coco, + val_aic, + val_crowdpose, + val_mpii, + val_jhmdb, + val_halpe, + val_ochuman, + val_posetrack, + ], + pipeline=val_pipeline, + test_mode=True, + )) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) +# default_hooks = dict( +# checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'coco/annotations/person_keypoints_val2017.json') +test_evaluator = [ + dict(type='PCKAccuracy', thr=0.1), + dict(type='AUC'), + dict(type='EPE') +] diff --git a/configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb512-700e_body8-halpe26-256x192.py b/configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb512-700e_body8-halpe26-256x192.py index e694dd27d9..cad89e4276 100644 --- a/configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb512-700e_body8-halpe26-256x192.py +++ b/configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb512-700e_body8-halpe26-256x192.py @@ -1,529 +1,529 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# common setting -num_keypoints = 26 -input_size = (192, 256) - -# runtime -max_epochs = 700 -stage2_num_epochs = 30 -base_lr = 4e-3 -train_batch_size = 512 -val_batch_size = 64 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - clip_grad=dict(max_norm=35, norm_type=2), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=1024) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=input_size, - sigma=(4.9, 5.66), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=0.67, - widen_factor=0.75, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmposev1/rtmpose-m_simcc-body7_pt-body7_420e-256x192-e48f03d0_20230504.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=768, - out_channels=num_keypoints, - input_size=input_size, - in_featuremap_size=tuple([s // 32 for s in input_size]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True)) - -# base dataset settings -dataset_type = 'CocoWholeBodyDataset' -data_mode = 'topdown' -data_root = 'data/' - -backend_args = dict(backend='local') - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=90), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PhotometricDistortion'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.0), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.5, 1.5], - rotate_factor=90), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# mapping -coco_halpe26 = [(i, i) for i in range(17)] + [(17, 20), (18, 22), (19, 24), - (20, 21), (21, 23), (22, 25)] - -aic_halpe26 = [(0, 6), (1, 8), (2, 10), (3, 5), (4, 7), - (5, 9), (6, 12), (7, 14), (8, 16), (9, 11), (10, 13), (11, 15), - (12, 17), (13, 18)] - -crowdpose_halpe26 = [(0, 5), (1, 6), (2, 7), (3, 8), (4, 9), (5, 10), (6, 11), - (7, 12), (8, 13), (9, 14), (10, 15), (11, 16), (12, 17), - (13, 18)] - -mpii_halpe26 = [ - (0, 16), - (1, 14), - (2, 12), - (3, 11), - (4, 13), - (5, 15), - (8, 18), - (9, 17), - (10, 10), - (11, 8), - (12, 6), - (13, 5), - (14, 7), - (15, 9), -] - -jhmdb_halpe26 = [ - (0, 18), - (2, 17), - (3, 6), - (4, 5), - (5, 12), - (6, 11), - (7, 8), - (8, 7), - (9, 14), - (10, 13), - (11, 10), - (12, 9), - (13, 16), - (14, 15), -] - -halpe_halpe26 = [(i, i) for i in range(26)] - -ochuman_halpe26 = [(i, i) for i in range(17)] - -posetrack_halpe26 = [ - (0, 0), - (2, 17), - (3, 3), - (4, 4), - (5, 5), - (6, 6), - (7, 7), - (8, 8), - (9, 9), - (10, 10), - (11, 11), - (12, 12), - (13, 13), - (14, 14), - (15, 15), - (16, 16), -] - -# train datasets -dataset_coco = dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='detection/coco/train2017/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=coco_halpe26) - ], -) - -dataset_aic = dict( - type='AicDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='aic/annotations/aic_train.json', - data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' - '_train_20170902/keypoint_train_images_20170902/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=aic_halpe26) - ], -) - -dataset_crowdpose = dict( - type='CrowdPoseDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', - data_prefix=dict(img='pose/CrowdPose/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=crowdpose_halpe26) - ], -) - -dataset_mpii = dict( - type='MpiiDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='mpii/annotations/mpii_train.json', - data_prefix=dict(img='pose/MPI/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=mpii_halpe26) - ], -) - -dataset_jhmdb = dict( - type='JhmdbDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='jhmdb/annotations/Sub1_train.json', - data_prefix=dict(img='pose/JHMDB/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=jhmdb_halpe26) - ], -) - -dataset_halpe = dict( - type='HalpeDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='halpe/annotations/halpe_train_v1.json', - data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=halpe_halpe26) - ], -) - -dataset_posetrack = dict( - type='PoseTrack18Dataset', - data_root=data_root, - data_mode=data_mode, - ann_file='posetrack18/annotations/posetrack18_train.json', - data_prefix=dict(img='pose/PoseChallenge2018/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=posetrack_halpe26) - ], -) - -# data loaders -train_dataloader = dict( - batch_size=train_batch_size, - num_workers=10, - pin_memory=True, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type='CombinedDataset', - metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), - datasets=[ - dataset_coco, - dataset_aic, - dataset_crowdpose, - dataset_mpii, - dataset_jhmdb, - dataset_halpe, - dataset_posetrack, - ], - pipeline=train_pipeline, - test_mode=False, - )) - -# val datasets -val_coco = dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='detection/coco/val2017/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=coco_halpe26) - ], -) - -val_aic = dict( - type='AicDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='aic/annotations/aic_val.json', - data_prefix=dict( - img='pose/ai_challenge/ai_challenger_keypoint' - '_validation_20170911/keypoint_validation_images_20170911/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=aic_halpe26) - ], -) - -val_crowdpose = dict( - type='CrowdPoseDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', - data_prefix=dict(img='pose/CrowdPose/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=crowdpose_halpe26) - ], -) - -val_mpii = dict( - type='MpiiDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='mpii/annotations/mpii_val.json', - data_prefix=dict(img='pose/MPI/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=mpii_halpe26) - ], -) - -val_jhmdb = dict( - type='JhmdbDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='jhmdb/annotations/Sub1_test.json', - data_prefix=dict(img='pose/JHMDB/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=jhmdb_halpe26) - ], -) - -val_halpe = dict( - type='HalpeDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='halpe/annotations/halpe_val_v1.json', - data_prefix=dict(img='detection/coco/val2017/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=halpe_halpe26) - ], -) - -val_ochuman = dict( - type='OCHumanDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='ochuman/annotations/' - 'ochuman_coco_format_val_range_0.00_1.00.json', - data_prefix=dict(img='pose/OCHuman/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=ochuman_halpe26) - ], -) - -val_posetrack = dict( - type='PoseTrack18Dataset', - data_root=data_root, - data_mode=data_mode, - ann_file='posetrack18/annotations/posetrack18_val.json', - data_prefix=dict(img='pose/PoseChallenge2018/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=posetrack_halpe26) - ], -) - -val_dataloader = dict( - batch_size=val_batch_size, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type='CombinedDataset', - metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), - datasets=[ - val_coco, - val_aic, - val_crowdpose, - val_mpii, - val_jhmdb, - val_halpe, - val_ochuman, - val_posetrack, - ], - pipeline=val_pipeline, - test_mode=True, - )) - -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -test_evaluator = [dict(type='PCKAccuracy', thr=0.1), dict(type='AUC')] -val_evaluator = test_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# common setting +num_keypoints = 26 +input_size = (192, 256) + +# runtime +max_epochs = 700 +stage2_num_epochs = 30 +base_lr = 4e-3 +train_batch_size = 512 +val_batch_size = 64 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + clip_grad=dict(max_norm=35, norm_type=2), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=input_size, + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/rtmpose-m_simcc-body7_pt-body7_420e-256x192-e48f03d0_20230504.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=768, + out_channels=num_keypoints, + input_size=input_size, + in_featuremap_size=tuple([s // 32 for s in input_size]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PhotometricDistortion'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.5, 1.5], + rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# mapping +coco_halpe26 = [(i, i) for i in range(17)] + [(17, 20), (18, 22), (19, 24), + (20, 21), (21, 23), (22, 25)] + +aic_halpe26 = [(0, 6), (1, 8), (2, 10), (3, 5), (4, 7), + (5, 9), (6, 12), (7, 14), (8, 16), (9, 11), (10, 13), (11, 15), + (12, 17), (13, 18)] + +crowdpose_halpe26 = [(0, 5), (1, 6), (2, 7), (3, 8), (4, 9), (5, 10), (6, 11), + (7, 12), (8, 13), (9, 14), (10, 15), (11, 16), (12, 17), + (13, 18)] + +mpii_halpe26 = [ + (0, 16), + (1, 14), + (2, 12), + (3, 11), + (4, 13), + (5, 15), + (8, 18), + (9, 17), + (10, 10), + (11, 8), + (12, 6), + (13, 5), + (14, 7), + (15, 9), +] + +jhmdb_halpe26 = [ + (0, 18), + (2, 17), + (3, 6), + (4, 5), + (5, 12), + (6, 11), + (7, 8), + (8, 7), + (9, 14), + (10, 13), + (11, 10), + (12, 9), + (13, 16), + (14, 15), +] + +halpe_halpe26 = [(i, i) for i in range(26)] + +ochuman_halpe26 = [(i, i) for i in range(17)] + +posetrack_halpe26 = [ + (0, 0), + (2, 17), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +# train datasets +dataset_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=coco_halpe26) + ], +) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=aic_halpe26) + ], +) + +dataset_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=crowdpose_halpe26) + ], +) + +dataset_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_train.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=mpii_halpe26) + ], +) + +dataset_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_train.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=jhmdb_halpe26) + ], +) + +dataset_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_train_v1.json', + data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=halpe_halpe26) + ], +) + +dataset_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_train.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=posetrack_halpe26) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=train_batch_size, + num_workers=10, + pin_memory=True, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), + datasets=[ + dataset_coco, + dataset_aic, + dataset_crowdpose, + dataset_mpii, + dataset_jhmdb, + dataset_halpe, + dataset_posetrack, + ], + pipeline=train_pipeline, + test_mode=False, + )) + +# val datasets +val_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=coco_halpe26) + ], +) + +val_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_val.json', + data_prefix=dict( + img='pose/ai_challenge/ai_challenger_keypoint' + '_validation_20170911/keypoint_validation_images_20170911/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=aic_halpe26) + ], +) + +val_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=crowdpose_halpe26) + ], +) + +val_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_val.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=mpii_halpe26) + ], +) + +val_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_test.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=jhmdb_halpe26) + ], +) + +val_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_val_v1.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=halpe_halpe26) + ], +) + +val_ochuman = dict( + type='OCHumanDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='ochuman/annotations/' + 'ochuman_coco_format_val_range_0.00_1.00.json', + data_prefix=dict(img='pose/OCHuman/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=ochuman_halpe26) + ], +) + +val_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_val.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=posetrack_halpe26) + ], +) + +val_dataloader = dict( + batch_size=val_batch_size, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), + datasets=[ + val_coco, + val_aic, + val_crowdpose, + val_mpii, + val_jhmdb, + val_halpe, + val_ochuman, + val_posetrack, + ], + pipeline=val_pipeline, + test_mode=True, + )) + +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +test_evaluator = [dict(type='PCKAccuracy', thr=0.1), dict(type='AUC')] +val_evaluator = test_evaluator diff --git a/configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb512-700e_body8-halpe26-384x288.py b/configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb512-700e_body8-halpe26-384x288.py index 5ee967a309..5c3aff2691 100644 --- a/configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb512-700e_body8-halpe26-384x288.py +++ b/configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb512-700e_body8-halpe26-384x288.py @@ -1,542 +1,542 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# common setting -num_keypoints = 26 -input_size = (288, 384) - -# runtime -max_epochs = 700 -stage2_num_epochs = 30 -base_lr = 4e-3 -train_batch_size = 512 -val_batch_size = 64 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - clip_grad=dict(max_norm=35, norm_type=2), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=1024) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=input_size, - sigma=(6., 6.93), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=0.67, - widen_factor=0.75, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmposev1/rtmpose-m_simcc-body7_pt-body7_420e-384x288-65e718c4_20230504.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=768, - out_channels=num_keypoints, - input_size=input_size, - in_featuremap_size=tuple([s // 32 for s in input_size]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True)) - -# base dataset settings -dataset_type = 'CocoWholeBodyDataset' -data_mode = 'topdown' -data_root = 'data/' - -# backend_args = dict(backend='local') -backend_args = dict( - backend='petrel', - path_mapping=dict({ - f'{data_root}': 's3://openmmlab/datasets/', - f'{data_root}': 's3://openmmlab/datasets/' - })) - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=90), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PhotometricDistortion'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.0), - ]), - dict( - type='GenerateTarget', - encoder=codec, - use_dataset_keypoint_weights=True), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.5, 1.5], - rotate_factor=90), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict( - type='GenerateTarget', - encoder=codec, - use_dataset_keypoint_weights=True), - dict(type='PackPoseInputs') -] - -# mapping -coco_halpe26 = [(i, i) for i in range(17)] + [(17, 20), (18, 22), (19, 24), - (20, 21), (21, 23), (22, 25)] - -aic_halpe26 = [(0, 6), (1, 8), (2, 10), (3, 5), (4, 7), - (5, 9), (6, 12), (7, 14), (8, 16), (9, 11), (10, 13), (11, 15), - (12, 17), (13, 18)] - -crowdpose_halpe26 = [(0, 5), (1, 6), (2, 7), (3, 8), (4, 9), (5, 10), (6, 11), - (7, 12), (8, 13), (9, 14), (10, 15), (11, 16), (12, 17), - (13, 18)] - -mpii_halpe26 = [ - (0, 16), - (1, 14), - (2, 12), - (3, 11), - (4, 13), - (5, 15), - (8, 18), - (9, 17), - (10, 10), - (11, 8), - (12, 6), - (13, 5), - (14, 7), - (15, 9), -] - -jhmdb_halpe26 = [ - (0, 18), - (2, 17), - (3, 6), - (4, 5), - (5, 12), - (6, 11), - (7, 8), - (8, 7), - (9, 14), - (10, 13), - (11, 10), - (12, 9), - (13, 16), - (14, 15), -] - -halpe_halpe26 = [(i, i) for i in range(26)] - -ochuman_halpe26 = [(i, i) for i in range(17)] - -posetrack_halpe26 = [ - (0, 0), - (2, 17), - (3, 3), - (4, 4), - (5, 5), - (6, 6), - (7, 7), - (8, 8), - (9, 9), - (10, 10), - (11, 11), - (12, 12), - (13, 13), - (14, 14), - (15, 15), - (16, 16), -] - -# train datasets -dataset_coco = dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='detection/coco/train2017/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=coco_halpe26) - ], -) - -dataset_aic = dict( - type='AicDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='aic/annotations/aic_train.json', - data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' - '_train_20170902/keypoint_train_images_20170902/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=aic_halpe26) - ], -) - -dataset_crowdpose = dict( - type='CrowdPoseDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', - data_prefix=dict(img='pose/CrowdPose/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=crowdpose_halpe26) - ], -) - -dataset_mpii = dict( - type='MpiiDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='mpii/annotations/mpii_train.json', - data_prefix=dict(img='pose/MPI/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=mpii_halpe26) - ], -) - -dataset_jhmdb = dict( - type='JhmdbDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='jhmdb/annotations/Sub1_train.json', - data_prefix=dict(img='pose/JHMDB/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=jhmdb_halpe26) - ], -) - -dataset_halpe = dict( - type='HalpeDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='halpe/annotations/halpe_train_v1.json', - data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=halpe_halpe26) - ], -) - -dataset_posetrack = dict( - type='PoseTrack18Dataset', - data_root=data_root, - data_mode=data_mode, - ann_file='posetrack18/annotations/posetrack18_train.json', - data_prefix=dict(img='pose/PoseChallenge2018/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=posetrack_halpe26) - ], -) - -# data loaders -train_dataloader = dict( - batch_size=train_batch_size, - num_workers=10, - pin_memory=True, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type='CombinedDataset', - metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), - datasets=[ - dataset_coco, - dataset_aic, - dataset_crowdpose, - dataset_mpii, - dataset_jhmdb, - dataset_halpe, - dataset_posetrack, - ], - pipeline=train_pipeline, - test_mode=False, - )) - -# val datasets -val_coco = dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='detection/coco/val2017/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=coco_halpe26) - ], -) - -val_aic = dict( - type='AicDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='aic/annotations/aic_val.json', - data_prefix=dict( - img='pose/ai_challenge/ai_challenger_keypoint' - '_validation_20170911/keypoint_validation_images_20170911/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=aic_halpe26) - ], -) - -val_crowdpose = dict( - type='CrowdPoseDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', - data_prefix=dict(img='pose/CrowdPose/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=crowdpose_halpe26) - ], -) - -val_mpii = dict( - type='MpiiDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='mpii/annotations/mpii_val.json', - data_prefix=dict(img='pose/MPI/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=mpii_halpe26) - ], -) - -val_jhmdb = dict( - type='JhmdbDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='jhmdb/annotations/Sub1_test.json', - data_prefix=dict(img='pose/JHMDB/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=jhmdb_halpe26) - ], -) - -val_halpe = dict( - type='HalpeDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='halpe/annotations/halpe_val_v1.json', - data_prefix=dict(img='detection/coco/val2017/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=halpe_halpe26) - ], -) - -val_ochuman = dict( - type='OCHumanDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='ochuman/annotations/' - 'ochuman_coco_format_val_range_0.00_1.00.json', - data_prefix=dict(img='pose/OCHuman/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=ochuman_halpe26) - ], -) - -val_posetrack = dict( - type='PoseTrack18Dataset', - data_root=data_root, - data_mode=data_mode, - ann_file='posetrack18/annotations/posetrack18_val.json', - data_prefix=dict(img='pose/PoseChallenge2018/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=posetrack_halpe26) - ], -) - -val_dataloader = dict( - batch_size=val_batch_size, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type='CombinedDataset', - metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), - datasets=[ - val_coco, - val_aic, - val_crowdpose, - val_mpii, - val_jhmdb, - val_halpe, - val_ochuman, - val_posetrack, - ], - pipeline=val_pipeline, - test_mode=True, - )) - -test_dataloader = val_dataloader - -# hooks -# default_hooks = dict( -default_hooks = dict( - checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -test_evaluator = [dict(type='PCKAccuracy', thr=0.1), dict(type='AUC')] -val_evaluator = test_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# common setting +num_keypoints = 26 +input_size = (288, 384) + +# runtime +max_epochs = 700 +stage2_num_epochs = 30 +base_lr = 4e-3 +train_batch_size = 512 +val_batch_size = 64 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + clip_grad=dict(max_norm=35, norm_type=2), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=input_size, + sigma=(6., 6.93), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/rtmpose-m_simcc-body7_pt-body7_420e-384x288-65e718c4_20230504.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=768, + out_channels=num_keypoints, + input_size=input_size, + in_featuremap_size=tuple([s // 32 for s in input_size]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/' + +# backend_args = dict(backend='local') +backend_args = dict( + backend='petrel', + path_mapping=dict({ + f'{data_root}': 's3://openmmlab/datasets/', + f'{data_root}': 's3://openmmlab/datasets/' + })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PhotometricDistortion'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict( + type='GenerateTarget', + encoder=codec, + use_dataset_keypoint_weights=True), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.5, 1.5], + rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict( + type='GenerateTarget', + encoder=codec, + use_dataset_keypoint_weights=True), + dict(type='PackPoseInputs') +] + +# mapping +coco_halpe26 = [(i, i) for i in range(17)] + [(17, 20), (18, 22), (19, 24), + (20, 21), (21, 23), (22, 25)] + +aic_halpe26 = [(0, 6), (1, 8), (2, 10), (3, 5), (4, 7), + (5, 9), (6, 12), (7, 14), (8, 16), (9, 11), (10, 13), (11, 15), + (12, 17), (13, 18)] + +crowdpose_halpe26 = [(0, 5), (1, 6), (2, 7), (3, 8), (4, 9), (5, 10), (6, 11), + (7, 12), (8, 13), (9, 14), (10, 15), (11, 16), (12, 17), + (13, 18)] + +mpii_halpe26 = [ + (0, 16), + (1, 14), + (2, 12), + (3, 11), + (4, 13), + (5, 15), + (8, 18), + (9, 17), + (10, 10), + (11, 8), + (12, 6), + (13, 5), + (14, 7), + (15, 9), +] + +jhmdb_halpe26 = [ + (0, 18), + (2, 17), + (3, 6), + (4, 5), + (5, 12), + (6, 11), + (7, 8), + (8, 7), + (9, 14), + (10, 13), + (11, 10), + (12, 9), + (13, 16), + (14, 15), +] + +halpe_halpe26 = [(i, i) for i in range(26)] + +ochuman_halpe26 = [(i, i) for i in range(17)] + +posetrack_halpe26 = [ + (0, 0), + (2, 17), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +# train datasets +dataset_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=coco_halpe26) + ], +) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=aic_halpe26) + ], +) + +dataset_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=crowdpose_halpe26) + ], +) + +dataset_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_train.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=mpii_halpe26) + ], +) + +dataset_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_train.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=jhmdb_halpe26) + ], +) + +dataset_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_train_v1.json', + data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=halpe_halpe26) + ], +) + +dataset_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_train.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=posetrack_halpe26) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=train_batch_size, + num_workers=10, + pin_memory=True, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), + datasets=[ + dataset_coco, + dataset_aic, + dataset_crowdpose, + dataset_mpii, + dataset_jhmdb, + dataset_halpe, + dataset_posetrack, + ], + pipeline=train_pipeline, + test_mode=False, + )) + +# val datasets +val_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=coco_halpe26) + ], +) + +val_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_val.json', + data_prefix=dict( + img='pose/ai_challenge/ai_challenger_keypoint' + '_validation_20170911/keypoint_validation_images_20170911/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=aic_halpe26) + ], +) + +val_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=crowdpose_halpe26) + ], +) + +val_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_val.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=mpii_halpe26) + ], +) + +val_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_test.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=jhmdb_halpe26) + ], +) + +val_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_val_v1.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=halpe_halpe26) + ], +) + +val_ochuman = dict( + type='OCHumanDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='ochuman/annotations/' + 'ochuman_coco_format_val_range_0.00_1.00.json', + data_prefix=dict(img='pose/OCHuman/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=ochuman_halpe26) + ], +) + +val_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_val.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=posetrack_halpe26) + ], +) + +val_dataloader = dict( + batch_size=val_batch_size, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), + datasets=[ + val_coco, + val_aic, + val_crowdpose, + val_mpii, + val_jhmdb, + val_halpe, + val_ochuman, + val_posetrack, + ], + pipeline=val_pipeline, + test_mode=True, + )) + +test_dataloader = val_dataloader + +# hooks +# default_hooks = dict( +default_hooks = dict( + checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +test_evaluator = [dict(type='PCKAccuracy', thr=0.1), dict(type='AUC')] +val_evaluator = test_evaluator diff --git a/configs/body_2d_keypoint/rtmpose/body8/rtmpose-s_8xb1024-700e_body8-halpe26-256x192.py b/configs/body_2d_keypoint/rtmpose/body8/rtmpose-s_8xb1024-700e_body8-halpe26-256x192.py index 05e6ec0980..7890c58e6b 100644 --- a/configs/body_2d_keypoint/rtmpose/body8/rtmpose-s_8xb1024-700e_body8-halpe26-256x192.py +++ b/configs/body_2d_keypoint/rtmpose/body8/rtmpose-s_8xb1024-700e_body8-halpe26-256x192.py @@ -1,535 +1,535 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# common setting -num_keypoints = 26 -input_size = (192, 256) - -# runtime -max_epochs = 700 -stage2_num_epochs = 30 -base_lr = 4e-3 -train_batch_size = 1024 -val_batch_size = 64 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.0), - clip_grad=dict(max_norm=35, norm_type=2), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=1024) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=input_size, - sigma=(4.9, 5.66), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=0.33, - widen_factor=0.5, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmposev1/rtmpose-s_simcc-body7_pt-body7_420e-256x192-acd4a1ef_20230504.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=512, - out_channels=num_keypoints, - input_size=input_size, - in_featuremap_size=tuple([s // 32 for s in input_size]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True)) - -# base dataset settings -dataset_type = 'CocoWholeBodyDataset' -data_mode = 'topdown' -data_root = 'data/' - -backend_args = dict(backend='local') - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PhotometricDistortion'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.0), - ]), - dict( - type='GenerateTarget', - encoder=codec, - use_dataset_keypoint_weights=True), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.6, 1.4], - rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict( - type='GenerateTarget', - encoder=codec, - use_dataset_keypoint_weights=True), - dict(type='PackPoseInputs') -] - -# mapping -coco_halpe26 = [(i, i) for i in range(17)] + [(17, 20), (18, 22), (19, 24), - (20, 21), (21, 23), (22, 25)] - -aic_halpe26 = [(0, 6), (1, 8), (2, 10), (3, 5), (4, 7), - (5, 9), (6, 12), (7, 14), (8, 16), (9, 11), (10, 13), (11, 15), - (12, 17), (13, 18)] - -crowdpose_halpe26 = [(0, 5), (1, 6), (2, 7), (3, 8), (4, 9), (5, 10), (6, 11), - (7, 12), (8, 13), (9, 14), (10, 15), (11, 16), (12, 17), - (13, 18)] - -mpii_halpe26 = [ - (0, 16), - (1, 14), - (2, 12), - (3, 11), - (4, 13), - (5, 15), - (8, 18), - (9, 17), - (10, 10), - (11, 8), - (12, 6), - (13, 5), - (14, 7), - (15, 9), -] - -jhmdb_halpe26 = [ - (0, 18), - (2, 17), - (3, 6), - (4, 5), - (5, 12), - (6, 11), - (7, 8), - (8, 7), - (9, 14), - (10, 13), - (11, 10), - (12, 9), - (13, 16), - (14, 15), -] - -halpe_halpe26 = [(i, i) for i in range(26)] - -ochuman_halpe26 = [(i, i) for i in range(17)] - -posetrack_halpe26 = [ - (0, 0), - (2, 17), - (3, 3), - (4, 4), - (5, 5), - (6, 6), - (7, 7), - (8, 8), - (9, 9), - (10, 10), - (11, 11), - (12, 12), - (13, 13), - (14, 14), - (15, 15), - (16, 16), -] - -# train datasets -dataset_coco = dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='detection/coco/train2017/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=coco_halpe26) - ], -) - -dataset_aic = dict( - type='AicDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='aic/annotations/aic_train.json', - data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' - '_train_20170902/keypoint_train_images_20170902/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=aic_halpe26) - ], -) - -dataset_crowdpose = dict( - type='CrowdPoseDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', - data_prefix=dict(img='pose/CrowdPose/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=crowdpose_halpe26) - ], -) - -dataset_mpii = dict( - type='MpiiDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='mpii/annotations/mpii_train.json', - data_prefix=dict(img='pose/MPI/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=mpii_halpe26) - ], -) - -dataset_jhmdb = dict( - type='JhmdbDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='jhmdb/annotations/Sub1_train.json', - data_prefix=dict(img='pose/JHMDB/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=jhmdb_halpe26) - ], -) - -dataset_halpe = dict( - type='HalpeDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='halpe/annotations/halpe_train_v1.json', - data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=halpe_halpe26) - ], -) - -dataset_posetrack = dict( - type='PoseTrack18Dataset', - data_root=data_root, - data_mode=data_mode, - ann_file='posetrack18/annotations/posetrack18_train.json', - data_prefix=dict(img='pose/PoseChallenge2018/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=posetrack_halpe26) - ], -) - -# data loaders -train_dataloader = dict( - batch_size=train_batch_size, - num_workers=10, - pin_memory=True, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type='CombinedDataset', - metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), - datasets=[ - dataset_coco, - dataset_aic, - dataset_crowdpose, - dataset_mpii, - dataset_jhmdb, - dataset_halpe, - dataset_posetrack, - ], - pipeline=train_pipeline, - test_mode=False, - )) - -# val datasets -val_coco = dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='detection/coco/val2017/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=coco_halpe26) - ], -) - -val_aic = dict( - type='AicDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='aic/annotations/aic_val.json', - data_prefix=dict( - img='pose/ai_challenge/ai_challenger_keypoint' - '_validation_20170911/keypoint_validation_images_20170911/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=aic_halpe26) - ], -) - -val_crowdpose = dict( - type='CrowdPoseDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', - data_prefix=dict(img='pose/CrowdPose/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=crowdpose_halpe26) - ], -) - -val_mpii = dict( - type='MpiiDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='mpii/annotations/mpii_val.json', - data_prefix=dict(img='pose/MPI/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=mpii_halpe26) - ], -) - -val_jhmdb = dict( - type='JhmdbDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='jhmdb/annotations/Sub1_test.json', - data_prefix=dict(img='pose/JHMDB/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=jhmdb_halpe26) - ], -) - -val_halpe = dict( - type='HalpeDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='halpe/annotations/halpe_val_v1.json', - data_prefix=dict(img='detection/coco/val2017/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=halpe_halpe26) - ], -) - -val_ochuman = dict( - type='OCHumanDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='ochuman/annotations/' - 'ochuman_coco_format_val_range_0.00_1.00.json', - data_prefix=dict(img='pose/OCHuman/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=ochuman_halpe26) - ], -) - -val_posetrack = dict( - type='PoseTrack18Dataset', - data_root=data_root, - data_mode=data_mode, - ann_file='posetrack18/annotations/posetrack18_val.json', - data_prefix=dict(img='pose/PoseChallenge2018/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=posetrack_halpe26) - ], -) - -val_dataloader = dict( - batch_size=val_batch_size, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type='CombinedDataset', - metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), - datasets=[ - val_coco, - val_aic, - val_crowdpose, - val_mpii, - val_jhmdb, - val_halpe, - val_ochuman, - val_posetrack, - ], - pipeline=val_pipeline, - test_mode=True, - )) - -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -test_evaluator = [dict(type='PCKAccuracy', thr=0.1), dict(type='AUC')] -val_evaluator = test_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# common setting +num_keypoints = 26 +input_size = (192, 256) + +# runtime +max_epochs = 700 +stage2_num_epochs = 30 +base_lr = 4e-3 +train_batch_size = 1024 +val_batch_size = 64 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.0), + clip_grad=dict(max_norm=35, norm_type=2), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=input_size, + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.33, + widen_factor=0.5, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/rtmpose-s_simcc-body7_pt-body7_420e-256x192-acd4a1ef_20230504.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=512, + out_channels=num_keypoints, + input_size=input_size, + in_featuremap_size=tuple([s // 32 for s in input_size]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PhotometricDistortion'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict( + type='GenerateTarget', + encoder=codec, + use_dataset_keypoint_weights=True), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.6, 1.4], + rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict( + type='GenerateTarget', + encoder=codec, + use_dataset_keypoint_weights=True), + dict(type='PackPoseInputs') +] + +# mapping +coco_halpe26 = [(i, i) for i in range(17)] + [(17, 20), (18, 22), (19, 24), + (20, 21), (21, 23), (22, 25)] + +aic_halpe26 = [(0, 6), (1, 8), (2, 10), (3, 5), (4, 7), + (5, 9), (6, 12), (7, 14), (8, 16), (9, 11), (10, 13), (11, 15), + (12, 17), (13, 18)] + +crowdpose_halpe26 = [(0, 5), (1, 6), (2, 7), (3, 8), (4, 9), (5, 10), (6, 11), + (7, 12), (8, 13), (9, 14), (10, 15), (11, 16), (12, 17), + (13, 18)] + +mpii_halpe26 = [ + (0, 16), + (1, 14), + (2, 12), + (3, 11), + (4, 13), + (5, 15), + (8, 18), + (9, 17), + (10, 10), + (11, 8), + (12, 6), + (13, 5), + (14, 7), + (15, 9), +] + +jhmdb_halpe26 = [ + (0, 18), + (2, 17), + (3, 6), + (4, 5), + (5, 12), + (6, 11), + (7, 8), + (8, 7), + (9, 14), + (10, 13), + (11, 10), + (12, 9), + (13, 16), + (14, 15), +] + +halpe_halpe26 = [(i, i) for i in range(26)] + +ochuman_halpe26 = [(i, i) for i in range(17)] + +posetrack_halpe26 = [ + (0, 0), + (2, 17), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +# train datasets +dataset_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=coco_halpe26) + ], +) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=aic_halpe26) + ], +) + +dataset_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=crowdpose_halpe26) + ], +) + +dataset_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_train.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=mpii_halpe26) + ], +) + +dataset_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_train.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=jhmdb_halpe26) + ], +) + +dataset_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_train_v1.json', + data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=halpe_halpe26) + ], +) + +dataset_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_train.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=posetrack_halpe26) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=train_batch_size, + num_workers=10, + pin_memory=True, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), + datasets=[ + dataset_coco, + dataset_aic, + dataset_crowdpose, + dataset_mpii, + dataset_jhmdb, + dataset_halpe, + dataset_posetrack, + ], + pipeline=train_pipeline, + test_mode=False, + )) + +# val datasets +val_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=coco_halpe26) + ], +) + +val_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_val.json', + data_prefix=dict( + img='pose/ai_challenge/ai_challenger_keypoint' + '_validation_20170911/keypoint_validation_images_20170911/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=aic_halpe26) + ], +) + +val_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=crowdpose_halpe26) + ], +) + +val_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_val.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=mpii_halpe26) + ], +) + +val_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_test.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=jhmdb_halpe26) + ], +) + +val_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_val_v1.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=halpe_halpe26) + ], +) + +val_ochuman = dict( + type='OCHumanDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='ochuman/annotations/' + 'ochuman_coco_format_val_range_0.00_1.00.json', + data_prefix=dict(img='pose/OCHuman/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=ochuman_halpe26) + ], +) + +val_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_val.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=posetrack_halpe26) + ], +) + +val_dataloader = dict( + batch_size=val_batch_size, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), + datasets=[ + val_coco, + val_aic, + val_crowdpose, + val_mpii, + val_jhmdb, + val_halpe, + val_ochuman, + val_posetrack, + ], + pipeline=val_pipeline, + test_mode=True, + )) + +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +test_evaluator = [dict(type='PCKAccuracy', thr=0.1), dict(type='AUC')] +val_evaluator = test_evaluator diff --git a/configs/body_2d_keypoint/rtmpose/body8/rtmpose-s_8xb256-420e_body8-256x192.py b/configs/body_2d_keypoint/rtmpose/body8/rtmpose-s_8xb256-420e_body8-256x192.py index 7d0a697751..a229d05e8a 100644 --- a/configs/body_2d_keypoint/rtmpose/body8/rtmpose-s_8xb256-420e_body8-256x192.py +++ b/configs/body_2d_keypoint/rtmpose/body8/rtmpose-s_8xb256-420e_body8-256x192.py @@ -1,553 +1,553 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -max_epochs = 420 -stage2_num_epochs = 20 -base_lr = 4e-3 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.0), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - # use cosine lr from 210 to 420 epoch - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=1024) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=(192, 256), - sigma=(4.9, 5.66), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=0.33, - widen_factor=0.5, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmposev1/cspnext-s_udp-body7_210e-256x192-8c9ccbdb_20230504.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=512, - out_channels=17, - input_size=codec['input_size'], - in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True)) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/' - -backend_args = dict(backend='local') - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict(type='PhotometricDistortion'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.0), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# mapping -aic_coco = [ - (0, 6), - (1, 8), - (2, 10), - (3, 5), - (4, 7), - (5, 9), - (6, 12), - (7, 14), - (8, 16), - (9, 11), - (10, 13), - (11, 15), -] - -crowdpose_coco = [ - (0, 5), - (1, 6), - (2, 7), - (3, 8), - (4, 9), - (5, 10), - (6, 11), - (7, 12), - (8, 13), - (9, 14), - (10, 15), - (11, 16), -] - -mpii_coco = [ - (0, 16), - (1, 14), - (2, 12), - (3, 11), - (4, 13), - (5, 15), - (10, 10), - (11, 8), - (12, 6), - (13, 5), - (14, 7), - (15, 9), -] - -jhmdb_coco = [ - (3, 6), - (4, 5), - (5, 12), - (6, 11), - (7, 8), - (8, 7), - (9, 14), - (10, 13), - (11, 10), - (12, 9), - (13, 16), - (14, 15), -] - -halpe_coco = [ - (0, 0), - (1, 1), - (2, 2), - (3, 3), - (4, 4), - (5, 5), - (6, 6), - (7, 7), - (8, 8), - (9, 9), - (10, 10), - (11, 11), - (12, 12), - (13, 13), - (14, 14), - (15, 15), - (16, 16), -] - -ochuman_coco = [ - (0, 0), - (1, 1), - (2, 2), - (3, 3), - (4, 4), - (5, 5), - (6, 6), - (7, 7), - (8, 8), - (9, 9), - (10, 10), - (11, 11), - (12, 12), - (13, 13), - (14, 14), - (15, 15), - (16, 16), -] - -posetrack_coco = [ - (0, 0), - (3, 3), - (4, 4), - (5, 5), - (6, 6), - (7, 7), - (8, 8), - (9, 9), - (10, 10), - (11, 11), - (12, 12), - (13, 13), - (14, 14), - (15, 15), - (16, 16), -] - -# train datasets -dataset_coco = dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/person_keypoints_train2017.json', - data_prefix=dict(img='detection/coco/train2017/'), - pipeline=[], -) - -dataset_aic = dict( - type='AicDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='aic/annotations/aic_train.json', - data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' - '_train_20170902/keypoint_train_images_20170902/'), - pipeline=[ - dict(type='KeypointConverter', num_keypoints=17, mapping=aic_coco) - ], -) - -dataset_crowdpose = dict( - type='CrowdPoseDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', - data_prefix=dict(img='pose/CrowdPose/images/'), - pipeline=[ - dict( - type='KeypointConverter', num_keypoints=17, mapping=crowdpose_coco) - ], -) - -dataset_mpii = dict( - type='MpiiDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='mpii/annotations/mpii_train.json', - data_prefix=dict(img='pose/MPI/images/'), - pipeline=[ - dict(type='KeypointConverter', num_keypoints=17, mapping=mpii_coco) - ], -) - -dataset_jhmdb = dict( - type='JhmdbDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='jhmdb/annotations/Sub1_train.json', - data_prefix=dict(img='pose/JHMDB/'), - pipeline=[ - dict(type='KeypointConverter', num_keypoints=17, mapping=jhmdb_coco) - ], -) - -dataset_halpe = dict( - type='HalpeDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='halpe/annotations/halpe_train_v1.json', - data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), - pipeline=[ - dict(type='KeypointConverter', num_keypoints=17, mapping=halpe_coco) - ], -) - -dataset_posetrack = dict( - type='PoseTrack18Dataset', - data_root=data_root, - data_mode=data_mode, - ann_file='posetrack18/annotations/posetrack18_train.json', - data_prefix=dict(img='pose/PoseChallenge2018/'), - pipeline=[ - dict( - type='KeypointConverter', num_keypoints=17, mapping=posetrack_coco) - ], -) - -# data loaders -train_dataloader = dict( - batch_size=256, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type='CombinedDataset', - metainfo=dict(from_file='configs/_base_/datasets/coco.py'), - datasets=[ - dataset_coco, - dataset_aic, - dataset_crowdpose, - dataset_mpii, - dataset_jhmdb, - dataset_halpe, - dataset_posetrack, - ], - pipeline=train_pipeline, - test_mode=False, - )) - -# val datasets -val_coco = dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/person_keypoints_val2017.json', - data_prefix=dict(img='detection/coco/val2017/'), - pipeline=[], -) - -val_aic = dict( - type='AicDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='aic/annotations/aic_val.json', - data_prefix=dict( - img='pose/ai_challenge/ai_challenger_keypoint' - '_validation_20170911/keypoint_validation_images_20170911/'), - pipeline=[ - dict(type='KeypointConverter', num_keypoints=17, mapping=aic_coco) - ], -) - -val_crowdpose = dict( - type='CrowdPoseDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', - data_prefix=dict(img='pose/CrowdPose/images/'), - pipeline=[ - dict( - type='KeypointConverter', num_keypoints=17, mapping=crowdpose_coco) - ], -) - -val_mpii = dict( - type='MpiiDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='mpii/annotations/mpii_val.json', - data_prefix=dict(img='pose/MPI/images/'), - pipeline=[ - dict(type='KeypointConverter', num_keypoints=17, mapping=mpii_coco) - ], -) - -val_jhmdb = dict( - type='JhmdbDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='jhmdb/annotations/Sub1_test.json', - data_prefix=dict(img='pose/JHMDB/'), - pipeline=[ - dict(type='KeypointConverter', num_keypoints=17, mapping=jhmdb_coco) - ], -) - -val_halpe = dict( - type='HalpeDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='halpe/annotations/halpe_val_v1.json', - data_prefix=dict(img='detection/coco/val2017/'), - pipeline=[ - dict(type='KeypointConverter', num_keypoints=17, mapping=halpe_coco) - ], -) - -val_ochuman = dict( - type='OCHumanDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='ochuman/annotations/' - 'ochuman_coco_format_val_range_0.00_1.00.json', - data_prefix=dict(img='pose/OCHuman/images/'), - pipeline=[ - dict(type='KeypointConverter', num_keypoints=17, mapping=ochuman_coco) - ], -) - -val_posetrack = dict( - type='PoseTrack18Dataset', - data_root=data_root, - data_mode=data_mode, - ann_file='posetrack18/annotations/posetrack18_val.json', - data_prefix=dict(img='pose/PoseChallenge2018/'), - pipeline=[ - dict( - type='KeypointConverter', num_keypoints=17, mapping=posetrack_coco) - ], -) - -val_dataloader = dict( - batch_size=64, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/person_keypoints_val2017.json', - bbox_file=f'{data_root}coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='detection/coco/val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) - -test_dataloader = dict( - batch_size=64, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type='CombinedDataset', - metainfo=dict(from_file='configs/_base_/datasets/coco.py'), - datasets=[ - val_coco, - val_aic, - val_crowdpose, - val_mpii, - val_jhmdb, - val_halpe, - val_ochuman, - val_posetrack, - ], - pipeline=val_pipeline, - test_mode=True, - )) - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) -# default_hooks = dict( -# checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'coco/annotations/person_keypoints_val2017.json') -test_evaluator = [ - dict(type='PCKAccuracy', thr=0.1), - dict(type='AUC'), - dict(type='EPE') -] +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 420 +stage2_num_epochs = 20 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.0), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 210 to 420 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(192, 256), + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.33, + widen_factor=0.5, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-s_udp-body7_210e-256x192-8c9ccbdb_20230504.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=512, + out_channels=17, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict(type='PhotometricDistortion'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# mapping +aic_coco = [ + (0, 6), + (1, 8), + (2, 10), + (3, 5), + (4, 7), + (5, 9), + (6, 12), + (7, 14), + (8, 16), + (9, 11), + (10, 13), + (11, 15), +] + +crowdpose_coco = [ + (0, 5), + (1, 6), + (2, 7), + (3, 8), + (4, 9), + (5, 10), + (6, 11), + (7, 12), + (8, 13), + (9, 14), + (10, 15), + (11, 16), +] + +mpii_coco = [ + (0, 16), + (1, 14), + (2, 12), + (3, 11), + (4, 13), + (5, 15), + (10, 10), + (11, 8), + (12, 6), + (13, 5), + (14, 7), + (15, 9), +] + +jhmdb_coco = [ + (3, 6), + (4, 5), + (5, 12), + (6, 11), + (7, 8), + (8, 7), + (9, 14), + (10, 13), + (11, 10), + (12, 9), + (13, 16), + (14, 15), +] + +halpe_coco = [ + (0, 0), + (1, 1), + (2, 2), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +ochuman_coco = [ + (0, 0), + (1, 1), + (2, 2), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +posetrack_coco = [ + (0, 0), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +# train datasets +dataset_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_train2017.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[], +) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=aic_coco) + ], +) + +dataset_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=17, mapping=crowdpose_coco) + ], +) + +dataset_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_train.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=mpii_coco) + ], +) + +dataset_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_train.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=jhmdb_coco) + ], +) + +dataset_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_train_v1.json', + data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=halpe_coco) + ], +) + +dataset_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_train.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=17, mapping=posetrack_coco) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco.py'), + datasets=[ + dataset_coco, + dataset_aic, + dataset_crowdpose, + dataset_mpii, + dataset_jhmdb, + dataset_halpe, + dataset_posetrack, + ], + pipeline=train_pipeline, + test_mode=False, + )) + +# val datasets +val_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_val2017.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[], +) + +val_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_val.json', + data_prefix=dict( + img='pose/ai_challenge/ai_challenger_keypoint' + '_validation_20170911/keypoint_validation_images_20170911/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=aic_coco) + ], +) + +val_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=17, mapping=crowdpose_coco) + ], +) + +val_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_val.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=mpii_coco) + ], +) + +val_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_test.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=jhmdb_coco) + ], +) + +val_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_val_v1.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=halpe_coco) + ], +) + +val_ochuman = dict( + type='OCHumanDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='ochuman/annotations/' + 'ochuman_coco_format_val_range_0.00_1.00.json', + data_prefix=dict(img='pose/OCHuman/images/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=ochuman_coco) + ], +) + +val_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_val.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=17, mapping=posetrack_coco) + ], +) + +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_val2017.json', + bbox_file=f'{data_root}coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='detection/coco/val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) + +test_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco.py'), + datasets=[ + val_coco, + val_aic, + val_crowdpose, + val_mpii, + val_jhmdb, + val_halpe, + val_ochuman, + val_posetrack, + ], + pipeline=val_pipeline, + test_mode=True, + )) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) +# default_hooks = dict( +# checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'coco/annotations/person_keypoints_val2017.json') +test_evaluator = [ + dict(type='PCKAccuracy', thr=0.1), + dict(type='AUC'), + dict(type='EPE') +] diff --git a/configs/body_2d_keypoint/rtmpose/body8/rtmpose-t_8xb1024-700e_body8-halpe26-256x192.py b/configs/body_2d_keypoint/rtmpose/body8/rtmpose-t_8xb1024-700e_body8-halpe26-256x192.py index 8d70bd27ae..3404d52fd9 100644 --- a/configs/body_2d_keypoint/rtmpose/body8/rtmpose-t_8xb1024-700e_body8-halpe26-256x192.py +++ b/configs/body_2d_keypoint/rtmpose/body8/rtmpose-t_8xb1024-700e_body8-halpe26-256x192.py @@ -1,536 +1,536 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# common setting -num_keypoints = 26 -input_size = (192, 256) - -# runtime -max_epochs = 700 -stage2_num_epochs = 30 -base_lr = 4e-3 -train_batch_size = 1024 -val_batch_size = 64 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.), - clip_grad=dict(max_norm=35, norm_type=2), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=1024) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=input_size, - sigma=(4.9, 5.66), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=0.167, - widen_factor=0.375, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmposev1/cspnext-tiny_udp-body7_210e-256x192-a3775292_20230504.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=384, - out_channels=num_keypoints, - input_size=input_size, - in_featuremap_size=tuple([s // 32 for s in input_size]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True)) - -# base dataset settings -dataset_type = 'CocoWholeBodyDataset' -data_mode = 'topdown' -data_root = 'data/' - -backend_args = dict(backend='local') - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PhotometricDistortion'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.0), - ]), - dict( - type='GenerateTarget', - encoder=codec, - use_dataset_keypoint_weights=True), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.6, 1.4], - rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict( - type='GenerateTarget', - encoder=codec, - use_dataset_keypoint_weights=True), - dict(type='PackPoseInputs') -] - -# mapping -coco_halpe26 = [(i, i) for i in range(17)] + [(17, 20), (18, 22), (19, 24), - (20, 21), (21, 23), (22, 25)] - -aic_halpe26 = [(0, 6), (1, 8), (2, 10), (3, 5), (4, 7), - (5, 9), (6, 12), (7, 14), (8, 16), (9, 11), (10, 13), (11, 15), - (12, 17), (13, 18)] - -crowdpose_halpe26 = [(0, 5), (1, 6), (2, 7), (3, 8), (4, 9), (5, 10), (6, 11), - (7, 12), (8, 13), (9, 14), (10, 15), (11, 16), (12, 17), - (13, 18)] - -mpii_halpe26 = [ - (0, 16), - (1, 14), - (2, 12), - (3, 11), - (4, 13), - (5, 15), - (8, 18), - (9, 17), - (10, 10), - (11, 8), - (12, 6), - (13, 5), - (14, 7), - (15, 9), -] - -jhmdb_halpe26 = [ - (0, 18), - (2, 17), - (3, 6), - (4, 5), - (5, 12), - (6, 11), - (7, 8), - (8, 7), - (9, 14), - (10, 13), - (11, 10), - (12, 9), - (13, 16), - (14, 15), -] - -halpe_halpe26 = [(i, i) for i in range(26)] - -ochuman_halpe26 = [(i, i) for i in range(17)] - -posetrack_halpe26 = [ - (0, 0), - (2, 17), - (3, 3), - (4, 4), - (5, 5), - (6, 6), - (7, 7), - (8, 8), - (9, 9), - (10, 10), - (11, 11), - (12, 12), - (13, 13), - (14, 14), - (15, 15), - (16, 16), -] - -# train datasets -dataset_coco = dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='detection/coco/train2017/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=coco_halpe26) - ], -) - -dataset_aic = dict( - type='AicDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='aic/annotations/aic_train.json', - data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' - '_train_20170902/keypoint_train_images_20170902/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=aic_halpe26) - ], -) - -dataset_crowdpose = dict( - type='CrowdPoseDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', - data_prefix=dict(img='pose/CrowdPose/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=crowdpose_halpe26) - ], -) - -dataset_mpii = dict( - type='MpiiDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='mpii/annotations/mpii_train.json', - data_prefix=dict(img='pose/MPI/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=mpii_halpe26) - ], -) - -dataset_jhmdb = dict( - type='JhmdbDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='jhmdb/annotations/Sub1_train.json', - data_prefix=dict(img='pose/JHMDB/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=jhmdb_halpe26) - ], -) - -dataset_halpe = dict( - type='HalpeDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='halpe/annotations/halpe_train_v1.json', - data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=halpe_halpe26) - ], -) - -dataset_posetrack = dict( - type='PoseTrack18Dataset', - data_root=data_root, - data_mode=data_mode, - ann_file='posetrack18/annotations/posetrack18_train.json', - data_prefix=dict(img='pose/PoseChallenge2018/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=posetrack_halpe26) - ], -) - -# data loaders -train_dataloader = dict( - batch_size=train_batch_size, - num_workers=10, - pin_memory=True, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type='CombinedDataset', - metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), - datasets=[ - dataset_coco, - dataset_aic, - dataset_crowdpose, - dataset_mpii, - dataset_jhmdb, - dataset_halpe, - dataset_posetrack, - ], - pipeline=train_pipeline, - test_mode=False, - )) - -# val datasets -val_coco = dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='detection/coco/val2017/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=coco_halpe26) - ], -) - -val_aic = dict( - type='AicDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='aic/annotations/aic_val.json', - data_prefix=dict( - img='pose/ai_challenge/ai_challenger_keypoint' - '_validation_20170911/keypoint_validation_images_20170911/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=aic_halpe26) - ], -) - -val_crowdpose = dict( - type='CrowdPoseDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', - data_prefix=dict(img='pose/CrowdPose/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=crowdpose_halpe26) - ], -) - -val_mpii = dict( - type='MpiiDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='mpii/annotations/mpii_val.json', - data_prefix=dict(img='pose/MPI/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=mpii_halpe26) - ], -) - -val_jhmdb = dict( - type='JhmdbDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='jhmdb/annotations/Sub1_test.json', - data_prefix=dict(img='pose/JHMDB/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=jhmdb_halpe26) - ], -) - -val_halpe = dict( - type='HalpeDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='halpe/annotations/halpe_val_v1.json', - data_prefix=dict(img='detection/coco/val2017/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=halpe_halpe26) - ], -) - -val_ochuman = dict( - type='OCHumanDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='ochuman/annotations/' - 'ochuman_coco_format_val_range_0.00_1.00.json', - data_prefix=dict(img='pose/OCHuman/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=ochuman_halpe26) - ], -) - -val_posetrack = dict( - type='PoseTrack18Dataset', - data_root=data_root, - data_mode=data_mode, - ann_file='posetrack18/annotations/posetrack18_val.json', - data_prefix=dict(img='pose/PoseChallenge2018/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=posetrack_halpe26) - ], -) - -val_dataloader = dict( - batch_size=val_batch_size, - num_workers=10, - pin_memory=True, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type='CombinedDataset', - metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), - datasets=[ - val_coco, - val_aic, - val_crowdpose, - val_mpii, - val_jhmdb, - val_halpe, - val_ochuman, - val_posetrack, - ], - pipeline=val_pipeline, - test_mode=True, - )) - -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - # dict( - # type='EMAHook', - # ema_type='ExpMomentumEMA', - # momentum=0.0002, - # update_buffers=True, - # priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -test_evaluator = [dict(type='PCKAccuracy', thr=0.1), dict(type='AUC')] -val_evaluator = test_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# common setting +num_keypoints = 26 +input_size = (192, 256) + +# runtime +max_epochs = 700 +stage2_num_epochs = 30 +base_lr = 4e-3 +train_batch_size = 1024 +val_batch_size = 64 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.), + clip_grad=dict(max_norm=35, norm_type=2), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=input_size, + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.167, + widen_factor=0.375, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-tiny_udp-body7_210e-256x192-a3775292_20230504.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=384, + out_channels=num_keypoints, + input_size=input_size, + in_featuremap_size=tuple([s // 32 for s in input_size]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PhotometricDistortion'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict( + type='GenerateTarget', + encoder=codec, + use_dataset_keypoint_weights=True), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.6, 1.4], + rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict( + type='GenerateTarget', + encoder=codec, + use_dataset_keypoint_weights=True), + dict(type='PackPoseInputs') +] + +# mapping +coco_halpe26 = [(i, i) for i in range(17)] + [(17, 20), (18, 22), (19, 24), + (20, 21), (21, 23), (22, 25)] + +aic_halpe26 = [(0, 6), (1, 8), (2, 10), (3, 5), (4, 7), + (5, 9), (6, 12), (7, 14), (8, 16), (9, 11), (10, 13), (11, 15), + (12, 17), (13, 18)] + +crowdpose_halpe26 = [(0, 5), (1, 6), (2, 7), (3, 8), (4, 9), (5, 10), (6, 11), + (7, 12), (8, 13), (9, 14), (10, 15), (11, 16), (12, 17), + (13, 18)] + +mpii_halpe26 = [ + (0, 16), + (1, 14), + (2, 12), + (3, 11), + (4, 13), + (5, 15), + (8, 18), + (9, 17), + (10, 10), + (11, 8), + (12, 6), + (13, 5), + (14, 7), + (15, 9), +] + +jhmdb_halpe26 = [ + (0, 18), + (2, 17), + (3, 6), + (4, 5), + (5, 12), + (6, 11), + (7, 8), + (8, 7), + (9, 14), + (10, 13), + (11, 10), + (12, 9), + (13, 16), + (14, 15), +] + +halpe_halpe26 = [(i, i) for i in range(26)] + +ochuman_halpe26 = [(i, i) for i in range(17)] + +posetrack_halpe26 = [ + (0, 0), + (2, 17), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +# train datasets +dataset_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=coco_halpe26) + ], +) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=aic_halpe26) + ], +) + +dataset_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=crowdpose_halpe26) + ], +) + +dataset_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_train.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=mpii_halpe26) + ], +) + +dataset_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_train.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=jhmdb_halpe26) + ], +) + +dataset_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_train_v1.json', + data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=halpe_halpe26) + ], +) + +dataset_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_train.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=posetrack_halpe26) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=train_batch_size, + num_workers=10, + pin_memory=True, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), + datasets=[ + dataset_coco, + dataset_aic, + dataset_crowdpose, + dataset_mpii, + dataset_jhmdb, + dataset_halpe, + dataset_posetrack, + ], + pipeline=train_pipeline, + test_mode=False, + )) + +# val datasets +val_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=coco_halpe26) + ], +) + +val_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_val.json', + data_prefix=dict( + img='pose/ai_challenge/ai_challenger_keypoint' + '_validation_20170911/keypoint_validation_images_20170911/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=aic_halpe26) + ], +) + +val_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=crowdpose_halpe26) + ], +) + +val_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_val.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=mpii_halpe26) + ], +) + +val_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_test.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=jhmdb_halpe26) + ], +) + +val_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_val_v1.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=halpe_halpe26) + ], +) + +val_ochuman = dict( + type='OCHumanDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='ochuman/annotations/' + 'ochuman_coco_format_val_range_0.00_1.00.json', + data_prefix=dict(img='pose/OCHuman/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=ochuman_halpe26) + ], +) + +val_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_val.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=posetrack_halpe26) + ], +) + +val_dataloader = dict( + batch_size=val_batch_size, + num_workers=10, + pin_memory=True, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), + datasets=[ + val_coco, + val_aic, + val_crowdpose, + val_mpii, + val_jhmdb, + val_halpe, + val_ochuman, + val_posetrack, + ], + pipeline=val_pipeline, + test_mode=True, + )) + +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + # dict( + # type='EMAHook', + # ema_type='ExpMomentumEMA', + # momentum=0.0002, + # update_buffers=True, + # priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +test_evaluator = [dict(type='PCKAccuracy', thr=0.1), dict(type='AUC')] +val_evaluator = test_evaluator diff --git a/configs/body_2d_keypoint/rtmpose/body8/rtmpose-t_8xb256-420e_body8-256x192.py b/configs/body_2d_keypoint/rtmpose/body8/rtmpose-t_8xb256-420e_body8-256x192.py index bdc7f80a2b..966c545c98 100644 --- a/configs/body_2d_keypoint/rtmpose/body8/rtmpose-t_8xb256-420e_body8-256x192.py +++ b/configs/body_2d_keypoint/rtmpose/body8/rtmpose-t_8xb256-420e_body8-256x192.py @@ -1,554 +1,554 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -max_epochs = 420 -stage2_num_epochs = 20 -base_lr = 4e-3 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - # use cosine lr from 210 to 420 epoch - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=1024) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=(192, 256), - sigma=(4.9, 5.66), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=0.167, - widen_factor=0.375, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmposev1/cspnext-tiny_udp-body7_210e-256x192-a3775292_20230504.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=384, - out_channels=17, - input_size=codec['input_size'], - in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True)) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/' - -backend_args = dict(backend='local') - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict(type='PhotometricDistortion'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.0), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# mapping -aic_coco = [ - (0, 6), - (1, 8), - (2, 10), - (3, 5), - (4, 7), - (5, 9), - (6, 12), - (7, 14), - (8, 16), - (9, 11), - (10, 13), - (11, 15), -] - -crowdpose_coco = [ - (0, 5), - (1, 6), - (2, 7), - (3, 8), - (4, 9), - (5, 10), - (6, 11), - (7, 12), - (8, 13), - (9, 14), - (10, 15), - (11, 16), -] - -mpii_coco = [ - (0, 16), - (1, 14), - (2, 12), - (3, 11), - (4, 13), - (5, 15), - (10, 10), - (11, 8), - (12, 6), - (13, 5), - (14, 7), - (15, 9), -] - -jhmdb_coco = [ - (3, 6), - (4, 5), - (5, 12), - (6, 11), - (7, 8), - (8, 7), - (9, 14), - (10, 13), - (11, 10), - (12, 9), - (13, 16), - (14, 15), -] - -halpe_coco = [ - (0, 0), - (1, 1), - (2, 2), - (3, 3), - (4, 4), - (5, 5), - (6, 6), - (7, 7), - (8, 8), - (9, 9), - (10, 10), - (11, 11), - (12, 12), - (13, 13), - (14, 14), - (15, 15), - (16, 16), -] - -ochuman_coco = [ - (0, 0), - (1, 1), - (2, 2), - (3, 3), - (4, 4), - (5, 5), - (6, 6), - (7, 7), - (8, 8), - (9, 9), - (10, 10), - (11, 11), - (12, 12), - (13, 13), - (14, 14), - (15, 15), - (16, 16), -] - -posetrack_coco = [ - (0, 0), - (3, 3), - (4, 4), - (5, 5), - (6, 6), - (7, 7), - (8, 8), - (9, 9), - (10, 10), - (11, 11), - (12, 12), - (13, 13), - (14, 14), - (15, 15), - (16, 16), -] - -# train datasets -dataset_coco = dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/person_keypoints_train2017.json', - data_prefix=dict(img='detection/coco/train2017/'), - pipeline=[], -) - -dataset_aic = dict( - type='AicDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='aic/annotations/aic_train.json', - data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' - '_train_20170902/keypoint_train_images_20170902/'), - pipeline=[ - dict(type='KeypointConverter', num_keypoints=17, mapping=aic_coco) - ], -) - -dataset_crowdpose = dict( - type='CrowdPoseDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', - data_prefix=dict(img='pose/CrowdPose/images/'), - pipeline=[ - dict( - type='KeypointConverter', num_keypoints=17, mapping=crowdpose_coco) - ], -) - -dataset_mpii = dict( - type='MpiiDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='mpii/annotations/mpii_train.json', - data_prefix=dict(img='pose/MPI/images/'), - pipeline=[ - dict(type='KeypointConverter', num_keypoints=17, mapping=mpii_coco) - ], -) - -dataset_jhmdb = dict( - type='JhmdbDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='jhmdb/annotations/Sub1_train.json', - data_prefix=dict(img='pose/JHMDB/'), - pipeline=[ - dict(type='KeypointConverter', num_keypoints=17, mapping=jhmdb_coco) - ], -) - -dataset_halpe = dict( - type='HalpeDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='halpe/annotations/halpe_train_v1.json', - data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), - pipeline=[ - dict(type='KeypointConverter', num_keypoints=17, mapping=halpe_coco) - ], -) - -dataset_posetrack = dict( - type='PoseTrack18Dataset', - data_root=data_root, - data_mode=data_mode, - ann_file='posetrack18/annotations/posetrack18_train.json', - data_prefix=dict(img='pose/PoseChallenge2018/'), - pipeline=[ - dict( - type='KeypointConverter', num_keypoints=17, mapping=posetrack_coco) - ], -) - -# data loaders -train_dataloader = dict( - batch_size=256, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type='CombinedDataset', - metainfo=dict(from_file='configs/_base_/datasets/coco.py'), - datasets=[ - dataset_coco, - dataset_aic, - dataset_crowdpose, - dataset_mpii, - dataset_jhmdb, - dataset_halpe, - dataset_posetrack, - ], - pipeline=train_pipeline, - test_mode=False, - )) - -# val datasets -val_coco = dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/person_keypoints_val2017.json', - data_prefix=dict(img='detection/coco/val2017/'), - pipeline=[], -) - -val_aic = dict( - type='AicDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='aic/annotations/aic_val.json', - data_prefix=dict( - img='pose/ai_challenge/ai_challenger_keypoint' - '_validation_20170911/keypoint_validation_images_20170911/'), - pipeline=[ - dict(type='KeypointConverter', num_keypoints=17, mapping=aic_coco) - ], -) - -val_crowdpose = dict( - type='CrowdPoseDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', - data_prefix=dict(img='pose/CrowdPose/images/'), - pipeline=[ - dict( - type='KeypointConverter', num_keypoints=17, mapping=crowdpose_coco) - ], -) - -val_mpii = dict( - type='MpiiDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='mpii/annotations/mpii_val.json', - data_prefix=dict(img='pose/MPI/images/'), - pipeline=[ - dict(type='KeypointConverter', num_keypoints=17, mapping=mpii_coco) - ], -) - -val_jhmdb = dict( - type='JhmdbDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='jhmdb/annotations/Sub1_test.json', - data_prefix=dict(img='pose/JHMDB/'), - pipeline=[ - dict(type='KeypointConverter', num_keypoints=17, mapping=jhmdb_coco) - ], -) - -val_halpe = dict( - type='HalpeDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='halpe/annotations/halpe_val_v1.json', - data_prefix=dict(img='detection/coco/val2017/'), - pipeline=[ - dict(type='KeypointConverter', num_keypoints=17, mapping=halpe_coco) - ], -) - -val_ochuman = dict( - type='OCHumanDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='ochuman/annotations/' - 'ochuman_coco_format_val_range_0.00_1.00.json', - data_prefix=dict(img='pose/OCHuman/images/'), - pipeline=[ - dict(type='KeypointConverter', num_keypoints=17, mapping=ochuman_coco) - ], -) - -val_posetrack = dict( - type='PoseTrack18Dataset', - data_root=data_root, - data_mode=data_mode, - ann_file='posetrack18/annotations/posetrack18_val.json', - data_prefix=dict(img='pose/PoseChallenge2018/'), - pipeline=[ - dict( - type='KeypointConverter', num_keypoints=17, mapping=posetrack_coco) - ], -) - -val_dataloader = dict( - batch_size=64, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/person_keypoints_val2017.json', - bbox_file=f'{data_root}coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='detection/coco/val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) - -test_dataloader = dict( - batch_size=64, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type='CombinedDataset', - metainfo=dict(from_file='configs/_base_/datasets/coco.py'), - datasets=[ - val_coco, - val_aic, - val_crowdpose, - val_mpii, - val_jhmdb, - val_halpe, - val_ochuman, - val_posetrack, - ], - pipeline=val_pipeline, - test_mode=True, - )) - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) -# default_hooks = dict( -# checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - # dict( - # type='EMAHook', - # ema_type='ExpMomentumEMA', - # momentum=0.0002, - # update_buffers=True, - # priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'coco/annotations/person_keypoints_val2017.json') - -test_evaluator = [ - dict(type='PCKAccuracy', thr=0.1), - dict(type='AUC'), - dict(type='EPE') -] +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 420 +stage2_num_epochs = 20 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 210 to 420 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(192, 256), + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.167, + widen_factor=0.375, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-tiny_udp-body7_210e-256x192-a3775292_20230504.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=384, + out_channels=17, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict(type='PhotometricDistortion'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# mapping +aic_coco = [ + (0, 6), + (1, 8), + (2, 10), + (3, 5), + (4, 7), + (5, 9), + (6, 12), + (7, 14), + (8, 16), + (9, 11), + (10, 13), + (11, 15), +] + +crowdpose_coco = [ + (0, 5), + (1, 6), + (2, 7), + (3, 8), + (4, 9), + (5, 10), + (6, 11), + (7, 12), + (8, 13), + (9, 14), + (10, 15), + (11, 16), +] + +mpii_coco = [ + (0, 16), + (1, 14), + (2, 12), + (3, 11), + (4, 13), + (5, 15), + (10, 10), + (11, 8), + (12, 6), + (13, 5), + (14, 7), + (15, 9), +] + +jhmdb_coco = [ + (3, 6), + (4, 5), + (5, 12), + (6, 11), + (7, 8), + (8, 7), + (9, 14), + (10, 13), + (11, 10), + (12, 9), + (13, 16), + (14, 15), +] + +halpe_coco = [ + (0, 0), + (1, 1), + (2, 2), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +ochuman_coco = [ + (0, 0), + (1, 1), + (2, 2), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +posetrack_coco = [ + (0, 0), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +# train datasets +dataset_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_train2017.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[], +) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=aic_coco) + ], +) + +dataset_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=17, mapping=crowdpose_coco) + ], +) + +dataset_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_train.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=mpii_coco) + ], +) + +dataset_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_train.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=jhmdb_coco) + ], +) + +dataset_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_train_v1.json', + data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=halpe_coco) + ], +) + +dataset_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_train.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=17, mapping=posetrack_coco) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco.py'), + datasets=[ + dataset_coco, + dataset_aic, + dataset_crowdpose, + dataset_mpii, + dataset_jhmdb, + dataset_halpe, + dataset_posetrack, + ], + pipeline=train_pipeline, + test_mode=False, + )) + +# val datasets +val_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_val2017.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[], +) + +val_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_val.json', + data_prefix=dict( + img='pose/ai_challenge/ai_challenger_keypoint' + '_validation_20170911/keypoint_validation_images_20170911/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=aic_coco) + ], +) + +val_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=17, mapping=crowdpose_coco) + ], +) + +val_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_val.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=mpii_coco) + ], +) + +val_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_test.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=jhmdb_coco) + ], +) + +val_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_val_v1.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=halpe_coco) + ], +) + +val_ochuman = dict( + type='OCHumanDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='ochuman/annotations/' + 'ochuman_coco_format_val_range_0.00_1.00.json', + data_prefix=dict(img='pose/OCHuman/images/'), + pipeline=[ + dict(type='KeypointConverter', num_keypoints=17, mapping=ochuman_coco) + ], +) + +val_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_val.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=17, mapping=posetrack_coco) + ], +) + +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_val2017.json', + bbox_file=f'{data_root}coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='detection/coco/val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) + +test_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco.py'), + datasets=[ + val_coco, + val_aic, + val_crowdpose, + val_mpii, + val_jhmdb, + val_halpe, + val_ochuman, + val_posetrack, + ], + pipeline=val_pipeline, + test_mode=True, + )) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) +# default_hooks = dict( +# checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + # dict( + # type='EMAHook', + # ema_type='ExpMomentumEMA', + # momentum=0.0002, + # update_buffers=True, + # priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'coco/annotations/person_keypoints_val2017.json') + +test_evaluator = [ + dict(type='PCKAccuracy', thr=0.1), + dict(type='AUC'), + dict(type='EPE') +] diff --git a/configs/body_2d_keypoint/rtmpose/body8/rtmpose-x_8xb256-700e_body8-halpe26-384x288.py b/configs/body_2d_keypoint/rtmpose/body8/rtmpose-x_8xb256-700e_body8-halpe26-384x288.py index e50aa42f0e..f0c68e7af4 100644 --- a/configs/body_2d_keypoint/rtmpose/body8/rtmpose-x_8xb256-700e_body8-halpe26-384x288.py +++ b/configs/body_2d_keypoint/rtmpose/body8/rtmpose-x_8xb256-700e_body8-halpe26-384x288.py @@ -1,535 +1,535 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# common setting -num_keypoints = 26 -input_size = (288, 384) - -# runtime -max_epochs = 700 -stage2_num_epochs = 20 -base_lr = 4e-3 -train_batch_size = 256 -val_batch_size = 64 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - clip_grad=dict(max_norm=35, norm_type=2), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=1024) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=input_size, - sigma=(6., 6.93), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=1.33, - widen_factor=1.25, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmposev1/cspnext-x_udp-body7_210e-384x288-d28b58e6_20230529.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=1280, - out_channels=num_keypoints, - input_size=input_size, - in_featuremap_size=tuple([s // 32 for s in input_size]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True)) - -# base dataset settings -dataset_type = 'CocoWholeBodyDataset' -data_mode = 'topdown' -data_root = 'data/' - -backend_args = dict(backend='local') - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=90), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PhotometricDistortion'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.0), - ]), - dict( - type='GenerateTarget', - encoder=codec, - use_dataset_keypoint_weights=True), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.5, 1.5], - rotate_factor=90), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict( - type='GenerateTarget', - encoder=codec, - use_dataset_keypoint_weights=True), - dict(type='PackPoseInputs') -] - -# mapping -coco_halpe26 = [(i, i) for i in range(17)] + [(17, 20), (18, 22), (19, 24), - (20, 21), (21, 23), (22, 25)] - -aic_halpe26 = [(0, 6), (1, 8), (2, 10), (3, 5), (4, 7), - (5, 9), (6, 12), (7, 14), (8, 16), (9, 11), (10, 13), (11, 15), - (12, 17), (13, 18)] - -crowdpose_halpe26 = [(0, 5), (1, 6), (2, 7), (3, 8), (4, 9), (5, 10), (6, 11), - (7, 12), (8, 13), (9, 14), (10, 15), (11, 16), (12, 17), - (13, 18)] - -mpii_halpe26 = [ - (0, 16), - (1, 14), - (2, 12), - (3, 11), - (4, 13), - (5, 15), - (8, 18), - (9, 17), - (10, 10), - (11, 8), - (12, 6), - (13, 5), - (14, 7), - (15, 9), -] - -jhmdb_halpe26 = [ - (0, 18), - (2, 17), - (3, 6), - (4, 5), - (5, 12), - (6, 11), - (7, 8), - (8, 7), - (9, 14), - (10, 13), - (11, 10), - (12, 9), - (13, 16), - (14, 15), -] - -halpe_halpe26 = [(i, i) for i in range(26)] - -ochuman_halpe26 = [(i, i) for i in range(17)] - -posetrack_halpe26 = [ - (0, 0), - (2, 17), - (3, 3), - (4, 4), - (5, 5), - (6, 6), - (7, 7), - (8, 8), - (9, 9), - (10, 10), - (11, 11), - (12, 12), - (13, 13), - (14, 14), - (15, 15), - (16, 16), -] - -# train datasets -dataset_coco = dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='detection/coco/train2017/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=coco_halpe26) - ], -) - -dataset_aic = dict( - type='AicDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='aic/annotations/aic_train.json', - data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' - '_train_20170902/keypoint_train_images_20170902/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=aic_halpe26) - ], -) - -dataset_crowdpose = dict( - type='CrowdPoseDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', - data_prefix=dict(img='pose/CrowdPose/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=crowdpose_halpe26) - ], -) - -dataset_mpii = dict( - type='MpiiDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='mpii/annotations/mpii_train.json', - data_prefix=dict(img='pose/MPI/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=mpii_halpe26) - ], -) - -dataset_jhmdb = dict( - type='JhmdbDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='jhmdb/annotations/Sub1_train.json', - data_prefix=dict(img='pose/JHMDB/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=jhmdb_halpe26) - ], -) - -dataset_halpe = dict( - type='HalpeDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='halpe/annotations/halpe_train_v1.json', - data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=halpe_halpe26) - ], -) - -dataset_posetrack = dict( - type='PoseTrack18Dataset', - data_root=data_root, - data_mode=data_mode, - ann_file='posetrack18/annotations/posetrack18_train.json', - data_prefix=dict(img='pose/PoseChallenge2018/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=posetrack_halpe26) - ], -) - -# data loaders -train_dataloader = dict( - batch_size=train_batch_size, - num_workers=10, - pin_memory=True, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type='CombinedDataset', - metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), - datasets=[ - dataset_coco, - dataset_aic, - dataset_crowdpose, - dataset_mpii, - dataset_jhmdb, - dataset_halpe, - dataset_posetrack, - ], - pipeline=train_pipeline, - test_mode=False, - )) - -# val datasets -val_coco = dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='detection/coco/val2017/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=coco_halpe26) - ], -) - -val_aic = dict( - type='AicDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='aic/annotations/aic_val.json', - data_prefix=dict( - img='pose/ai_challenge/ai_challenger_keypoint' - '_validation_20170911/keypoint_validation_images_20170911/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=aic_halpe26) - ], -) - -val_crowdpose = dict( - type='CrowdPoseDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', - data_prefix=dict(img='pose/CrowdPose/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=crowdpose_halpe26) - ], -) - -val_mpii = dict( - type='MpiiDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='mpii/annotations/mpii_val.json', - data_prefix=dict(img='pose/MPI/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=mpii_halpe26) - ], -) - -val_jhmdb = dict( - type='JhmdbDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='jhmdb/annotations/Sub1_test.json', - data_prefix=dict(img='pose/JHMDB/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=jhmdb_halpe26) - ], -) - -val_halpe = dict( - type='HalpeDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='halpe/annotations/halpe_val_v1.json', - data_prefix=dict(img='detection/coco/val2017/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=halpe_halpe26) - ], -) - -val_ochuman = dict( - type='OCHumanDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='ochuman/annotations/' - 'ochuman_coco_format_val_range_0.00_1.00.json', - data_prefix=dict(img='pose/OCHuman/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=ochuman_halpe26) - ], -) - -val_posetrack = dict( - type='PoseTrack18Dataset', - data_root=data_root, - data_mode=data_mode, - ann_file='posetrack18/annotations/posetrack18_val.json', - data_prefix=dict(img='pose/PoseChallenge2018/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=posetrack_halpe26) - ], -) - -val_dataloader = dict( - batch_size=val_batch_size, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type='CombinedDataset', - metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), - datasets=[ - val_coco, - val_aic, - val_crowdpose, - val_mpii, - val_jhmdb, - val_halpe, - val_ochuman, - val_posetrack, - ], - pipeline=val_pipeline, - test_mode=True, - )) - -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -test_evaluator = [dict(type='PCKAccuracy', thr=0.1), dict(type='AUC')] -val_evaluator = test_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# common setting +num_keypoints = 26 +input_size = (288, 384) + +# runtime +max_epochs = 700 +stage2_num_epochs = 20 +base_lr = 4e-3 +train_batch_size = 256 +val_batch_size = 64 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + clip_grad=dict(max_norm=35, norm_type=2), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=input_size, + sigma=(6., 6.93), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=1.33, + widen_factor=1.25, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-x_udp-body7_210e-384x288-d28b58e6_20230529.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=1280, + out_channels=num_keypoints, + input_size=input_size, + in_featuremap_size=tuple([s // 32 for s in input_size]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PhotometricDistortion'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict( + type='GenerateTarget', + encoder=codec, + use_dataset_keypoint_weights=True), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.5, 1.5], + rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict( + type='GenerateTarget', + encoder=codec, + use_dataset_keypoint_weights=True), + dict(type='PackPoseInputs') +] + +# mapping +coco_halpe26 = [(i, i) for i in range(17)] + [(17, 20), (18, 22), (19, 24), + (20, 21), (21, 23), (22, 25)] + +aic_halpe26 = [(0, 6), (1, 8), (2, 10), (3, 5), (4, 7), + (5, 9), (6, 12), (7, 14), (8, 16), (9, 11), (10, 13), (11, 15), + (12, 17), (13, 18)] + +crowdpose_halpe26 = [(0, 5), (1, 6), (2, 7), (3, 8), (4, 9), (5, 10), (6, 11), + (7, 12), (8, 13), (9, 14), (10, 15), (11, 16), (12, 17), + (13, 18)] + +mpii_halpe26 = [ + (0, 16), + (1, 14), + (2, 12), + (3, 11), + (4, 13), + (5, 15), + (8, 18), + (9, 17), + (10, 10), + (11, 8), + (12, 6), + (13, 5), + (14, 7), + (15, 9), +] + +jhmdb_halpe26 = [ + (0, 18), + (2, 17), + (3, 6), + (4, 5), + (5, 12), + (6, 11), + (7, 8), + (8, 7), + (9, 14), + (10, 13), + (11, 10), + (12, 9), + (13, 16), + (14, 15), +] + +halpe_halpe26 = [(i, i) for i in range(26)] + +ochuman_halpe26 = [(i, i) for i in range(17)] + +posetrack_halpe26 = [ + (0, 0), + (2, 17), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +# train datasets +dataset_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=coco_halpe26) + ], +) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=aic_halpe26) + ], +) + +dataset_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=crowdpose_halpe26) + ], +) + +dataset_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_train.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=mpii_halpe26) + ], +) + +dataset_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_train.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=jhmdb_halpe26) + ], +) + +dataset_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_train_v1.json', + data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=halpe_halpe26) + ], +) + +dataset_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_train.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=posetrack_halpe26) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=train_batch_size, + num_workers=10, + pin_memory=True, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), + datasets=[ + dataset_coco, + dataset_aic, + dataset_crowdpose, + dataset_mpii, + dataset_jhmdb, + dataset_halpe, + dataset_posetrack, + ], + pipeline=train_pipeline, + test_mode=False, + )) + +# val datasets +val_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=coco_halpe26) + ], +) + +val_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_val.json', + data_prefix=dict( + img='pose/ai_challenge/ai_challenger_keypoint' + '_validation_20170911/keypoint_validation_images_20170911/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=aic_halpe26) + ], +) + +val_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=crowdpose_halpe26) + ], +) + +val_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_val.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=mpii_halpe26) + ], +) + +val_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_test.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=jhmdb_halpe26) + ], +) + +val_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_val_v1.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=halpe_halpe26) + ], +) + +val_ochuman = dict( + type='OCHumanDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='ochuman/annotations/' + 'ochuman_coco_format_val_range_0.00_1.00.json', + data_prefix=dict(img='pose/OCHuman/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=ochuman_halpe26) + ], +) + +val_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_val.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=posetrack_halpe26) + ], +) + +val_dataloader = dict( + batch_size=val_batch_size, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), + datasets=[ + val_coco, + val_aic, + val_crowdpose, + val_mpii, + val_jhmdb, + val_halpe, + val_ochuman, + val_posetrack, + ], + pipeline=val_pipeline, + test_mode=True, + )) + +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +test_evaluator = [dict(type='PCKAccuracy', thr=0.1), dict(type='AUC')] +val_evaluator = test_evaluator diff --git a/configs/body_2d_keypoint/rtmpose/body8/rtmpose_body8-coco.md b/configs/body_2d_keypoint/rtmpose/body8/rtmpose_body8-coco.md index 5355a7f35b..261949c327 100644 --- a/configs/body_2d_keypoint/rtmpose/body8/rtmpose_body8-coco.md +++ b/configs/body_2d_keypoint/rtmpose/body8/rtmpose_body8-coco.md @@ -1,76 +1,76 @@ - - -
-RTMPose (arXiv'2023) - -```bibtex -@misc{https://doi.org/10.48550/arxiv.2303.07399, - doi = {10.48550/ARXIV.2303.07399}, - url = {https://arxiv.org/abs/2303.07399}, - author = {Jiang, Tao and Lu, Peng and Zhang, Li and Ma, Ningsheng and Han, Rui and Lyu, Chengqi and Li, Yining and Chen, Kai}, - keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences}, - title = {RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose}, - publisher = {arXiv}, - year = {2023}, - copyright = {Creative Commons Attribution 4.0 International} -} - -``` - -
- - - -
-RTMDet (arXiv'2022) - -```bibtex -@misc{lyu2022rtmdet, - title={RTMDet: An Empirical Study of Designing Real-Time Object Detectors}, - author={Chengqi Lyu and Wenwei Zhang and Haian Huang and Yue Zhou and Yudong Wang and Yanyi Liu and Shilong Zhang and Kai Chen}, - year={2022}, - eprint={2212.07784}, - archivePrefix={arXiv}, - primaryClass={cs.CV} -} -``` - -
- - - -
-COCO (ECCV'2014) - -```bibtex -@inproceedings{lin2014microsoft, - title={Microsoft coco: Common objects in context}, - author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, - booktitle={European conference on computer vision}, - pages={740--755}, - year={2014}, - organization={Springer} -} -``` - -
- -- Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset. -- `*` denotes model trained on 7 public datasets: - - [AI Challenger](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#aic) - - [MS COCO](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#coco) - - [CrowdPose](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#crowdpose) - - [MPII](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#mpii) - - [sub-JHMDB](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#sub-jhmdb-dataset) - - [Halpe](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_wholebody_keypoint.html#halpe) - - [PoseTrack18](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#posetrack18) -- `Body8` denotes the addition of the [OCHuman](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#ochuman) dataset, in addition to the 7 datasets mentioned above, for evaluation. - -| Config | Input Size | AP
(COCO) | PCK@0.1
(Body8) | AUC
(Body8) | EPE
(Body8) | Params(M) | FLOPS(G) | Download | -| :--------------------------------------------: | :--------: | :---------------: | :---------------------: | :-----------------: | :-----------------: | :-------: | :------: | :-----------------------------------------------: | -| [RTMPose-t\*](/configs/body_2d_keypoint/rtmpose/body8/rtmpose-t_8xb256-420e_body8-256x192.py) | 256x192 | 65.9 | 91.44 | 63.18 | 19.45 | 3.34 | 0.36 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-t_simcc-body7_pt-body7_420e-256x192-026a1439_20230504.pth) | -| [RTMPose-s\*](/configs/body_2d_keypoint/rtmpose/body8/rtmpose-s_8xb256-420e_body8-256x192.py) | 256x192 | 69.7 | 92.45 | 65.15 | 17.85 | 5.47 | 0.68 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-body7_pt-body7_420e-256x192-acd4a1ef_20230504.pth) | -| [RTMPose-m\*](/configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb256-420e_body8-256x192.py) | 256x192 | 74.9 | 94.25 | 68.59 | 15.12 | 13.59 | 1.93 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-body7_pt-body7_420e-256x192-e48f03d0_20230504.pth) | -| [RTMPose-l\*](/configs/body_2d_keypoint/rtmpose/body8/rtmpose-l_8xb256-420e_body8-256x192.py) | 256x192 | 76.7 | 95.08 | 70.14 | 13.79 | 27.66 | 4.16 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-body7_pt-body7_420e-256x192-4dba18fc_20230504.pth) | -| [RTMPose-m\*](/configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb256-420e_body8-384x288.py) | 384x288 | 76.6 | 94.64 | 70.38 | 13.98 | 13.72 | 4.33 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-body7_pt-body7_420e-384x288-65e718c4_20230504.pth) | -| [RTMPose-l\*](/configs/body_2d_keypoint/rtmpose/body8/rtmpose-l_8xb256-420e_body8-384x288.py) | 384x288 | 78.3 | 95.36 | 71.58 | 13.08 | 27.79 | 9.35 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-body7_pt-body7_420e-384x288-3f5a1437_20230504.pth) | + + +
+RTMPose (arXiv'2023) + +```bibtex +@misc{https://doi.org/10.48550/arxiv.2303.07399, + doi = {10.48550/ARXIV.2303.07399}, + url = {https://arxiv.org/abs/2303.07399}, + author = {Jiang, Tao and Lu, Peng and Zhang, Li and Ma, Ningsheng and Han, Rui and Lyu, Chengqi and Li, Yining and Chen, Kai}, + keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences}, + title = {RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose}, + publisher = {arXiv}, + year = {2023}, + copyright = {Creative Commons Attribution 4.0 International} +} + +``` + +
+ + + +
+RTMDet (arXiv'2022) + +```bibtex +@misc{lyu2022rtmdet, + title={RTMDet: An Empirical Study of Designing Real-Time Object Detectors}, + author={Chengqi Lyu and Wenwei Zhang and Haian Huang and Yue Zhou and Yudong Wang and Yanyi Liu and Shilong Zhang and Kai Chen}, + year={2022}, + eprint={2212.07784}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +- Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset. +- `*` denotes model trained on 7 public datasets: + - [AI Challenger](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#aic) + - [MS COCO](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#coco) + - [CrowdPose](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#crowdpose) + - [MPII](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#mpii) + - [sub-JHMDB](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#sub-jhmdb-dataset) + - [Halpe](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_wholebody_keypoint.html#halpe) + - [PoseTrack18](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#posetrack18) +- `Body8` denotes the addition of the [OCHuman](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#ochuman) dataset, in addition to the 7 datasets mentioned above, for evaluation. + +| Config | Input Size | AP
(COCO) | PCK@0.1
(Body8) | AUC
(Body8) | EPE
(Body8) | Params(M) | FLOPS(G) | Download | +| :--------------------------------------------: | :--------: | :---------------: | :---------------------: | :-----------------: | :-----------------: | :-------: | :------: | :-----------------------------------------------: | +| [RTMPose-t\*](/configs/body_2d_keypoint/rtmpose/body8/rtmpose-t_8xb256-420e_body8-256x192.py) | 256x192 | 65.9 | 91.44 | 63.18 | 19.45 | 3.34 | 0.36 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-t_simcc-body7_pt-body7_420e-256x192-026a1439_20230504.pth) | +| [RTMPose-s\*](/configs/body_2d_keypoint/rtmpose/body8/rtmpose-s_8xb256-420e_body8-256x192.py) | 256x192 | 69.7 | 92.45 | 65.15 | 17.85 | 5.47 | 0.68 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-body7_pt-body7_420e-256x192-acd4a1ef_20230504.pth) | +| [RTMPose-m\*](/configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb256-420e_body8-256x192.py) | 256x192 | 74.9 | 94.25 | 68.59 | 15.12 | 13.59 | 1.93 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-body7_pt-body7_420e-256x192-e48f03d0_20230504.pth) | +| [RTMPose-l\*](/configs/body_2d_keypoint/rtmpose/body8/rtmpose-l_8xb256-420e_body8-256x192.py) | 256x192 | 76.7 | 95.08 | 70.14 | 13.79 | 27.66 | 4.16 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-body7_pt-body7_420e-256x192-4dba18fc_20230504.pth) | +| [RTMPose-m\*](/configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb256-420e_body8-384x288.py) | 384x288 | 76.6 | 94.64 | 70.38 | 13.98 | 13.72 | 4.33 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-body7_pt-body7_420e-384x288-65e718c4_20230504.pth) | +| [RTMPose-l\*](/configs/body_2d_keypoint/rtmpose/body8/rtmpose-l_8xb256-420e_body8-384x288.py) | 384x288 | 78.3 | 95.36 | 71.58 | 13.08 | 27.79 | 9.35 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-body7_pt-body7_420e-384x288-3f5a1437_20230504.pth) | diff --git a/configs/body_2d_keypoint/rtmpose/body8/rtmpose_body8-coco.yml b/configs/body_2d_keypoint/rtmpose/body8/rtmpose_body8-coco.yml index 9299eccb77..9c05bfe20b 100644 --- a/configs/body_2d_keypoint/rtmpose/body8/rtmpose_body8-coco.yml +++ b/configs/body_2d_keypoint/rtmpose/body8/rtmpose_body8-coco.yml @@ -1,93 +1,93 @@ -Collections: -- Name: RTMPose - Paper: - Title: "RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose" - URL: https://arxiv.org/abs/2303.07399 - README: https://github.com/open-mmlab/mmpose/blob/main/projects/rtmpose/README.md -Models: -- Config: configs/body_2d_keypoint/rtmpose/body8/rtmpose-t_8xb256-420e_body8-256x192.py - In Collection: RTMPose - Metadata: - Architecture: &id001 - - RTMPose - Training Data: &id002 - - AI Challenger - - COCO - - CrowdPose - - MPII - - sub-JHMDB - - Halpe - - PoseTrack18 - Name: rtmpose-t_8xb256-420e_body8-256x192 - Results: - - Dataset: Body8 - Metrics: - AP: 0.659 - Mean@0.1: 0.914 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-t_simcc-body7_pt-body7_420e-256x192-026a1439_20230504.pth -- Config: configs/body_2d_keypoint/rtmpose/body8/rtmpose-s_8xb256-420e_body8-256x192.py - In Collection: RTMPose - Metadata: - Architecture: *id001 - Training Data: *id002 - Name: rtmpose-s_8xb256-420e_body8-256x192 - Results: - - Dataset: Body8 - Metrics: - AP: 0.697 - Mean@0.1: 0.925 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-body7_pt-body7_420e-256x192-acd4a1ef_20230504.pth -- Config: configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb256-420e_body8-256x192.py - In Collection: RTMPose - Metadata: - Architecture: *id001 - Training Data: *id002 - Name: rtmpose-m_8xb256-420e_body8-256x192 - Results: - - Dataset: Body8 - Metrics: - AP: 0.749 - Mean@0.1: 0.943 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-body7_pt-body7_420e-256x192-e48f03d0_20230504.pth -- Config: configs/body_2d_keypoint/rtmpose/body8/rtmpose-l_8xb256-420e_body8-256x192.py - In Collection: RTMPose - Metadata: - Architecture: *id001 - Training Data: *id002 - Name: rtmpose-l_8xb256-420e_body8-256x192 - Results: - - Dataset: Body8 - Metrics: - AP: 0.767 - Mean@0.1: 0.951 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-body7_pt-body7_420e-256x192-4dba18fc_20230504.pth -- Config: configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb256-420e_body8-384x288.py - In Collection: RTMPose - Metadata: - Architecture: *id001 - Training Data: *id002 - Name: rtmpose-m_8xb256-420e_body8-384x288 - Results: - - Dataset: Body8 - Metrics: - AP: 0.766 - Mean@0.1: 0.946 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-body7_pt-body7_420e-384x288-65e718c4_20230504.pth -- Config: configs/body_2d_keypoint/rtmpose/body8/rtmpose-l_8xb256-420e_body8-384x288.py - In Collection: RTMPose - Metadata: - Architecture: *id001 - Training Data: *id002 - Name: rtmpose-l_8xb256-420e_body8-384x288 - Results: - - Dataset: Body8 - Metrics: - AP: 0.783 - Mean@0.1: 0.964 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-body7_pt-body7_420e-384x288-3f5a1437_20230504.pth +Collections: +- Name: RTMPose + Paper: + Title: "RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose" + URL: https://arxiv.org/abs/2303.07399 + README: https://github.com/open-mmlab/mmpose/blob/main/projects/rtmpose/README.md +Models: +- Config: configs/body_2d_keypoint/rtmpose/body8/rtmpose-t_8xb256-420e_body8-256x192.py + In Collection: RTMPose + Metadata: + Architecture: &id001 + - RTMPose + Training Data: &id002 + - AI Challenger + - COCO + - CrowdPose + - MPII + - sub-JHMDB + - Halpe + - PoseTrack18 + Name: rtmpose-t_8xb256-420e_body8-256x192 + Results: + - Dataset: Body8 + Metrics: + AP: 0.659 + Mean@0.1: 0.914 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-t_simcc-body7_pt-body7_420e-256x192-026a1439_20230504.pth +- Config: configs/body_2d_keypoint/rtmpose/body8/rtmpose-s_8xb256-420e_body8-256x192.py + In Collection: RTMPose + Metadata: + Architecture: *id001 + Training Data: *id002 + Name: rtmpose-s_8xb256-420e_body8-256x192 + Results: + - Dataset: Body8 + Metrics: + AP: 0.697 + Mean@0.1: 0.925 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-body7_pt-body7_420e-256x192-acd4a1ef_20230504.pth +- Config: configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb256-420e_body8-256x192.py + In Collection: RTMPose + Metadata: + Architecture: *id001 + Training Data: *id002 + Name: rtmpose-m_8xb256-420e_body8-256x192 + Results: + - Dataset: Body8 + Metrics: + AP: 0.749 + Mean@0.1: 0.943 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-body7_pt-body7_420e-256x192-e48f03d0_20230504.pth +- Config: configs/body_2d_keypoint/rtmpose/body8/rtmpose-l_8xb256-420e_body8-256x192.py + In Collection: RTMPose + Metadata: + Architecture: *id001 + Training Data: *id002 + Name: rtmpose-l_8xb256-420e_body8-256x192 + Results: + - Dataset: Body8 + Metrics: + AP: 0.767 + Mean@0.1: 0.951 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-body7_pt-body7_420e-256x192-4dba18fc_20230504.pth +- Config: configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb256-420e_body8-384x288.py + In Collection: RTMPose + Metadata: + Architecture: *id001 + Training Data: *id002 + Name: rtmpose-m_8xb256-420e_body8-384x288 + Results: + - Dataset: Body8 + Metrics: + AP: 0.766 + Mean@0.1: 0.946 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-body7_pt-body7_420e-384x288-65e718c4_20230504.pth +- Config: configs/body_2d_keypoint/rtmpose/body8/rtmpose-l_8xb256-420e_body8-384x288.py + In Collection: RTMPose + Metadata: + Architecture: *id001 + Training Data: *id002 + Name: rtmpose-l_8xb256-420e_body8-384x288 + Results: + - Dataset: Body8 + Metrics: + AP: 0.783 + Mean@0.1: 0.964 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-body7_pt-body7_420e-384x288-3f5a1437_20230504.pth diff --git a/configs/body_2d_keypoint/rtmpose/body8/rtmpose_body8-halpe26.md b/configs/body_2d_keypoint/rtmpose/body8/rtmpose_body8-halpe26.md index 153b71c663..c6ab08d172 100644 --- a/configs/body_2d_keypoint/rtmpose/body8/rtmpose_body8-halpe26.md +++ b/configs/body_2d_keypoint/rtmpose/body8/rtmpose_body8-halpe26.md @@ -1,74 +1,74 @@ - - -
-RTMPose (arXiv'2023) - -```bibtex -@misc{https://doi.org/10.48550/arxiv.2303.07399, - doi = {10.48550/ARXIV.2303.07399}, - url = {https://arxiv.org/abs/2303.07399}, - author = {Jiang, Tao and Lu, Peng and Zhang, Li and Ma, Ningsheng and Han, Rui and Lyu, Chengqi and Li, Yining and Chen, Kai}, - keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences}, - title = {RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose}, - publisher = {arXiv}, - year = {2023}, - copyright = {Creative Commons Attribution 4.0 International} -} - -``` - -
- - - -
-RTMDet (arXiv'2022) - -```bibtex -@misc{lyu2022rtmdet, - title={RTMDet: An Empirical Study of Designing Real-Time Object Detectors}, - author={Chengqi Lyu and Wenwei Zhang and Haian Huang and Yue Zhou and Yudong Wang and Yanyi Liu and Shilong Zhang and Kai Chen}, - year={2022}, - eprint={2212.07784}, - archivePrefix={arXiv}, - primaryClass={cs.CV} -} -``` - -
- - - -
-AlphaPose (TPAMI'2022) - -```bibtex -@article{alphapose, - author = {Fang, Hao-Shu and Li, Jiefeng and Tang, Hongyang and Xu, Chao and Zhu, Haoyi and Xiu, Yuliang and Li, Yong-Lu and Lu, Cewu}, - journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, - title = {AlphaPose: Whole-Body Regional Multi-Person Pose Estimation and Tracking in Real-Time}, - year = {2022} -} -``` - -
- -- `*` denotes model trained on 7 public datasets: - - [AI Challenger](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#aic) - - [MS COCO](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#coco) - - [CrowdPose](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#crowdpose) - - [MPII](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#mpii) - - [sub-JHMDB](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#sub-jhmdb-dataset) - - [Halpe](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_wholebody_keypoint.html#halpe) - - [PoseTrack18](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#posetrack18) -- `Body8` denotes the addition of the [OCHuman](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#ochuman) dataset, in addition to the 7 datasets mentioned above, for evaluation. - -| Config | Input Size | PCK@0.1
(Body8) | AUC
(Body8) | Params(M) | FLOPS(G) | Download | -| :--------------------------------------------------------------: | :--------: | :---------------------: | :-----------------: | :-------: | :------: | :-----------------------------------------------------------------: | -| [RTMPose-t\*](/configs/body_2d_keypoint/rtmpose/body8/rtmpose-t_8xb1024-700e_body8-halpe26-256x192.py) | 256x192 | 91.89 | 66.35 | 3.51 | 0.37 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-t_simcc-body7_pt-body7-halpe26_700e-256x192-6020f8a6_20230605.pth) | -| [RTMPose-s\*](/configs/body_2d_keypoint/rtmpose/body8/rtmpose-s_8xb1024-700e_body8-halpe26-256x192.py) | 256x192 | 93.01 | 68.62 | 5.70 | 0.70 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-body7_pt-body7-halpe26_700e-256x192-7f134165_20230605.pth) | -| [RTMPose-m\*](/configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb512-700e_body8-halpe26-256x192.py) | 256x192 | 94.75 | 71.91 | 13.93 | 1.95 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-body7_pt-body7-halpe26_700e-256x192-4d3e73dd_20230605.pth) | -| [RTMPose-l\*](/configs/body_2d_keypoint/rtmpose/body8/rtmpose-l_8xb512-700e_body8-halpe26-256x192.py) | 256x192 | 95.37 | 73.19 | 28.11 | 4.19 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-body7_pt-body7-halpe26_700e-256x192-2abb7558_20230605.pth) | -| [RTMPose-m\*](/configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb512-700e_body8-halpe26-384x288.py) | 384x288 | 95.15 | 73.56 | 14.06 | 4.37 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-body7_pt-body7-halpe26_700e-384x288-89e6428b_20230605.pth) | -| [RTMPose-l\*](/configs/body_2d_keypoint/rtmpose/body8/rtmpose-l_8xb512-700e_body8-halpe26-384x288.py) | 384x288 | 95.56 | 74.38 | 28.24 | 9.40 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-body7_pt-body7-halpe26_700e-384x288-734182ce_20230605.pth) | -| [RTMPose-x\*](/configs/body_2d_keypoint/rtmpose/body8/rtmpose-x_8xb256-700e_body8-halpe26-384x288.py) | 384x288 | 95.74 | 74.82 | 50.00 | 17.29 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-x_simcc-body7_pt-body7-halpe26_700e-384x288-7fb6e239_20230606.pth) | + + +
+RTMPose (arXiv'2023) + +```bibtex +@misc{https://doi.org/10.48550/arxiv.2303.07399, + doi = {10.48550/ARXIV.2303.07399}, + url = {https://arxiv.org/abs/2303.07399}, + author = {Jiang, Tao and Lu, Peng and Zhang, Li and Ma, Ningsheng and Han, Rui and Lyu, Chengqi and Li, Yining and Chen, Kai}, + keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences}, + title = {RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose}, + publisher = {arXiv}, + year = {2023}, + copyright = {Creative Commons Attribution 4.0 International} +} + +``` + +
+ + + +
+RTMDet (arXiv'2022) + +```bibtex +@misc{lyu2022rtmdet, + title={RTMDet: An Empirical Study of Designing Real-Time Object Detectors}, + author={Chengqi Lyu and Wenwei Zhang and Haian Huang and Yue Zhou and Yudong Wang and Yanyi Liu and Shilong Zhang and Kai Chen}, + year={2022}, + eprint={2212.07784}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +
+ + + +
+AlphaPose (TPAMI'2022) + +```bibtex +@article{alphapose, + author = {Fang, Hao-Shu and Li, Jiefeng and Tang, Hongyang and Xu, Chao and Zhu, Haoyi and Xiu, Yuliang and Li, Yong-Lu and Lu, Cewu}, + journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, + title = {AlphaPose: Whole-Body Regional Multi-Person Pose Estimation and Tracking in Real-Time}, + year = {2022} +} +``` + +
+ +- `*` denotes model trained on 7 public datasets: + - [AI Challenger](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#aic) + - [MS COCO](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#coco) + - [CrowdPose](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#crowdpose) + - [MPII](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#mpii) + - [sub-JHMDB](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#sub-jhmdb-dataset) + - [Halpe](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_wholebody_keypoint.html#halpe) + - [PoseTrack18](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#posetrack18) +- `Body8` denotes the addition of the [OCHuman](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#ochuman) dataset, in addition to the 7 datasets mentioned above, for evaluation. + +| Config | Input Size | PCK@0.1
(Body8) | AUC
(Body8) | Params(M) | FLOPS(G) | Download | +| :--------------------------------------------------------------: | :--------: | :---------------------: | :-----------------: | :-------: | :------: | :-----------------------------------------------------------------: | +| [RTMPose-t\*](/configs/body_2d_keypoint/rtmpose/body8/rtmpose-t_8xb1024-700e_body8-halpe26-256x192.py) | 256x192 | 91.89 | 66.35 | 3.51 | 0.37 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-t_simcc-body7_pt-body7-halpe26_700e-256x192-6020f8a6_20230605.pth) | +| [RTMPose-s\*](/configs/body_2d_keypoint/rtmpose/body8/rtmpose-s_8xb1024-700e_body8-halpe26-256x192.py) | 256x192 | 93.01 | 68.62 | 5.70 | 0.70 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-body7_pt-body7-halpe26_700e-256x192-7f134165_20230605.pth) | +| [RTMPose-m\*](/configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb512-700e_body8-halpe26-256x192.py) | 256x192 | 94.75 | 71.91 | 13.93 | 1.95 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-body7_pt-body7-halpe26_700e-256x192-4d3e73dd_20230605.pth) | +| [RTMPose-l\*](/configs/body_2d_keypoint/rtmpose/body8/rtmpose-l_8xb512-700e_body8-halpe26-256x192.py) | 256x192 | 95.37 | 73.19 | 28.11 | 4.19 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-body7_pt-body7-halpe26_700e-256x192-2abb7558_20230605.pth) | +| [RTMPose-m\*](/configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb512-700e_body8-halpe26-384x288.py) | 384x288 | 95.15 | 73.56 | 14.06 | 4.37 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-body7_pt-body7-halpe26_700e-384x288-89e6428b_20230605.pth) | +| [RTMPose-l\*](/configs/body_2d_keypoint/rtmpose/body8/rtmpose-l_8xb512-700e_body8-halpe26-384x288.py) | 384x288 | 95.56 | 74.38 | 28.24 | 9.40 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-body7_pt-body7-halpe26_700e-384x288-734182ce_20230605.pth) | +| [RTMPose-x\*](/configs/body_2d_keypoint/rtmpose/body8/rtmpose-x_8xb256-700e_body8-halpe26-384x288.py) | 384x288 | 95.74 | 74.82 | 50.00 | 17.29 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-x_simcc-body7_pt-body7-halpe26_700e-384x288-7fb6e239_20230606.pth) | diff --git a/configs/body_2d_keypoint/rtmpose/body8/rtmpose_body8-halpe26.yml b/configs/body_2d_keypoint/rtmpose/body8/rtmpose_body8-halpe26.yml index ceef6f9998..2b554fd66c 100644 --- a/configs/body_2d_keypoint/rtmpose/body8/rtmpose_body8-halpe26.yml +++ b/configs/body_2d_keypoint/rtmpose/body8/rtmpose_body8-halpe26.yml @@ -1,106 +1,106 @@ -Collections: -- Name: RTMPose - Paper: - Title: "RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose" - URL: https://arxiv.org/abs/2303.07399 - README: https://github.com/open-mmlab/mmpose/blob/main/projects/rtmpose/README.md -Models: -- Config: configs/body_2d_keypoint/rtmpose/body8/rtmpose-t_8xb1024-700e_body8-halpe26-256x192.py - In Collection: RTMPose - Metadata: - Architecture: &id001 - - RTMPose - Training Data: &id002 - - AI Challenger - - COCO - - CrowdPose - - MPII - - sub-JHMDB - - Halpe - - PoseTrack18 - Name: rtmpose-t_8xb1024-700e_body8-halpe26-256x192 - Results: - - Dataset: Body8 - Metrics: - Mean@0.1: 0.919 - AUC: 0.664 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-t_simcc-body7_pt-body7-halpe26_700e-256x192-6020f8a6_20230605.pth -- Config: configs/body_2d_keypoint/rtmpose/body8/rtmpose-s_8xb1024-700e_body8-halpe26-256x192.py - In Collection: RTMPose - Metadata: - Architecture: *id001 - Training Data: *id002 - Name: rtmpose-s_8xb1024-700e_body8-halpe26-256x192 - Results: - - Dataset: Body8 - Metrics: - Mean@0.1: 0.930 - AUC: 0.682 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-body7_pt-body7-halpe26_700e-256x192-7f134165_20230605.pth -- Config: configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb512-700e_body8-halpe26-256x192.py - In Collection: RTMPose - Metadata: - Architecture: *id001 - Training Data: *id002 - Name: rtmpose-m_8xb512-700e_body8-halpe26-256x192 - Results: - - Dataset: Body8 - Metrics: - Mean@0.1: 0.947 - AUC: 0.719 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-body7_pt-body7-halpe26_700e-256x192-4d3e73dd_20230605.pth -- Config: configs/body_2d_keypoint/rtmpose/body8/rtmpose-l_8xb512-700e_body8-halpe26-256x192.py - In Collection: RTMPose - Metadata: - Architecture: *id001 - Training Data: *id002 - Name: rtmpose-l_8xb512-700e_body8-halpe26-256x192 - Results: - - Dataset: Body8 - Metrics: - Mean@0.1: 0.954 - AUC: 0.732 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-body7_pt-body7-halpe26_700e-256x192-2abb7558_20230605.pth -- Config: configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb512-700e_body8-halpe26-384x288.py - In Collection: RTMPose - Metadata: - Architecture: *id001 - Training Data: *id002 - Name: rtmpose-m_8xb512-700e_body8-halpe26-384x288 - Results: - - Dataset: Body8 - Metrics: - Mean@0.1: 0.952 - AUC: 0.736 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-body7_pt-body7-halpe26_700e-384x288-89e6428b_20230605.pth -- Config: configs/body_2d_keypoint/rtmpose/body8/rtmpose-l_8xb512-700e_body8-halpe26-384x288.py - In Collection: RTMPose - Metadata: - Architecture: *id001 - Training Data: *id002 - Name: rtmpose-l_8xb512-700e_body8-halpe26-384x288 - Results: - - Dataset: Body8 - Metrics: - Mean@0.1: 0.956 - AUC: 0.744 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-body7_pt-body7-halpe26_700e-384x288-734182ce_20230605.pth -- Config: configs/body_2d_keypoint/rtmpose/body8/rtmpose-x_8xb256-700e_body8-halpe26-384x288.py - In Collection: RTMPose - Metadata: - Architecture: *id001 - Training Data: *id002 - Name: rtmpose-x_8xb256-700e_body8-halpe26-384x288 - Results: - - Dataset: Body8 - Metrics: - Mean@0.1: 0.957 - AUC: 0.748 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-x_simcc-body7_pt-body7-halpe26_700e-384x288-7fb6e239_20230606.pth +Collections: +- Name: RTMPose + Paper: + Title: "RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose" + URL: https://arxiv.org/abs/2303.07399 + README: https://github.com/open-mmlab/mmpose/blob/main/projects/rtmpose/README.md +Models: +- Config: configs/body_2d_keypoint/rtmpose/body8/rtmpose-t_8xb1024-700e_body8-halpe26-256x192.py + In Collection: RTMPose + Metadata: + Architecture: &id001 + - RTMPose + Training Data: &id002 + - AI Challenger + - COCO + - CrowdPose + - MPII + - sub-JHMDB + - Halpe + - PoseTrack18 + Name: rtmpose-t_8xb1024-700e_body8-halpe26-256x192 + Results: + - Dataset: Body8 + Metrics: + Mean@0.1: 0.919 + AUC: 0.664 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-t_simcc-body7_pt-body7-halpe26_700e-256x192-6020f8a6_20230605.pth +- Config: configs/body_2d_keypoint/rtmpose/body8/rtmpose-s_8xb1024-700e_body8-halpe26-256x192.py + In Collection: RTMPose + Metadata: + Architecture: *id001 + Training Data: *id002 + Name: rtmpose-s_8xb1024-700e_body8-halpe26-256x192 + Results: + - Dataset: Body8 + Metrics: + Mean@0.1: 0.930 + AUC: 0.682 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-body7_pt-body7-halpe26_700e-256x192-7f134165_20230605.pth +- Config: configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb512-700e_body8-halpe26-256x192.py + In Collection: RTMPose + Metadata: + Architecture: *id001 + Training Data: *id002 + Name: rtmpose-m_8xb512-700e_body8-halpe26-256x192 + Results: + - Dataset: Body8 + Metrics: + Mean@0.1: 0.947 + AUC: 0.719 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-body7_pt-body7-halpe26_700e-256x192-4d3e73dd_20230605.pth +- Config: configs/body_2d_keypoint/rtmpose/body8/rtmpose-l_8xb512-700e_body8-halpe26-256x192.py + In Collection: RTMPose + Metadata: + Architecture: *id001 + Training Data: *id002 + Name: rtmpose-l_8xb512-700e_body8-halpe26-256x192 + Results: + - Dataset: Body8 + Metrics: + Mean@0.1: 0.954 + AUC: 0.732 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-body7_pt-body7-halpe26_700e-256x192-2abb7558_20230605.pth +- Config: configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb512-700e_body8-halpe26-384x288.py + In Collection: RTMPose + Metadata: + Architecture: *id001 + Training Data: *id002 + Name: rtmpose-m_8xb512-700e_body8-halpe26-384x288 + Results: + - Dataset: Body8 + Metrics: + Mean@0.1: 0.952 + AUC: 0.736 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-body7_pt-body7-halpe26_700e-384x288-89e6428b_20230605.pth +- Config: configs/body_2d_keypoint/rtmpose/body8/rtmpose-l_8xb512-700e_body8-halpe26-384x288.py + In Collection: RTMPose + Metadata: + Architecture: *id001 + Training Data: *id002 + Name: rtmpose-l_8xb512-700e_body8-halpe26-384x288 + Results: + - Dataset: Body8 + Metrics: + Mean@0.1: 0.956 + AUC: 0.744 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-body7_pt-body7-halpe26_700e-384x288-734182ce_20230605.pth +- Config: configs/body_2d_keypoint/rtmpose/body8/rtmpose-x_8xb256-700e_body8-halpe26-384x288.py + In Collection: RTMPose + Metadata: + Architecture: *id001 + Training Data: *id002 + Name: rtmpose-x_8xb256-700e_body8-halpe26-384x288 + Results: + - Dataset: Body8 + Metrics: + Mean@0.1: 0.957 + AUC: 0.748 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-x_simcc-body7_pt-body7-halpe26_700e-384x288-7fb6e239_20230606.pth diff --git a/configs/body_2d_keypoint/rtmpose/coco/rtmpose-l_8xb256-420e_aic-coco-256x192.py b/configs/body_2d_keypoint/rtmpose/coco/rtmpose-l_8xb256-420e_aic-coco-256x192.py index 662bd72924..e1fda25716 100644 --- a/configs/body_2d_keypoint/rtmpose/coco/rtmpose-l_8xb256-420e_aic-coco-256x192.py +++ b/configs/body_2d_keypoint/rtmpose/coco/rtmpose-l_8xb256-420e_aic-coco-256x192.py @@ -1,272 +1,272 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -max_epochs = 420 -stage2_num_epochs = 30 -base_lr = 4e-3 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - # use cosine lr from 210 to 420 epoch - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=1024) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=(192, 256), - sigma=(4.9, 5.66), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=1., - widen_factor=1., - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmposev1/cspnext-l_udp-aic-coco_210e-256x192-273b7631_20230130.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=1024, - out_channels=17, - input_size=codec['input_size'], - in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True, )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/' - -backend_args = dict(backend='local') -# backend_args = dict( -# backend='petrel', -# path_mapping=dict({ -# f'{data_root}': 's3://openmmlab/datasets/', -# f'{data_root}': 's3://openmmlab/datasets/' -# })) - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.0), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# train datasets -dataset_coco = dict( - type='RepeatDataset', - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/person_keypoints_train2017.json', - data_prefix=dict(img='detection/coco/train2017/'), - pipeline=[], - ), - times=3) - -dataset_aic = dict( - type='AicDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='aic/annotations/aic_train.json', - data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' - '_train_20170902/keypoint_train_images_20170902/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=17, - mapping=[ - (0, 6), - (1, 8), - (2, 10), - (3, 5), - (4, 7), - (5, 9), - (6, 12), - (7, 14), - (8, 16), - (9, 11), - (10, 13), - (11, 15), - ]) - ], -) - -# data loaders -train_dataloader = dict( - batch_size=256, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type='CombinedDataset', - metainfo=dict(from_file='configs/_base_/datasets/coco.py'), - datasets=[dataset_coco, dataset_aic], - pipeline=train_pipeline, - test_mode=False, - )) -val_dataloader = dict( - batch_size=64, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/person_keypoints_val2017.json', - # bbox_file='data/coco/person_detection_results/' - # 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='detection/coco/val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'coco/annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 420 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 210 to 420 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(192, 256), + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=1., + widen_factor=1., + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-l_udp-aic-coco_210e-256x192-273b7631_20230130.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=1024, + out_channels=17, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/', +# f'{data_root}': 's3://openmmlab/datasets/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# train datasets +dataset_coco = dict( + type='RepeatDataset', + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_train2017.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[], + ), + times=3) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=17, + mapping=[ + (0, 6), + (1, 8), + (2, 10), + (3, 5), + (4, 7), + (5, 9), + (6, 12), + (7, 14), + (8, 16), + (9, 11), + (10, 13), + (11, 15), + ]) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco.py'), + datasets=[dataset_coco, dataset_aic], + pipeline=train_pipeline, + test_mode=False, + )) +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_val2017.json', + # bbox_file='data/coco/person_detection_results/' + # 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='detection/coco/val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'coco/annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/rtmpose/coco/rtmpose-l_8xb256-420e_aic-coco-384x288.py b/configs/body_2d_keypoint/rtmpose/coco/rtmpose-l_8xb256-420e_aic-coco-384x288.py index 7b5895962b..96be86d81f 100644 --- a/configs/body_2d_keypoint/rtmpose/coco/rtmpose-l_8xb256-420e_aic-coco-384x288.py +++ b/configs/body_2d_keypoint/rtmpose/coco/rtmpose-l_8xb256-420e_aic-coco-384x288.py @@ -1,272 +1,272 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -max_epochs = 420 -stage2_num_epochs = 30 -base_lr = 4e-3 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - # use cosine lr from 210 to 420 epoch - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=1024) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=(288, 384), - sigma=(6., 6.93), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=1., - widen_factor=1., - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmposev1/cspnext-l_udp-aic-coco_210e-256x192-273b7631_20230130.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=1024, - out_channels=17, - input_size=codec['input_size'], - in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True, )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/' - -backend_args = dict(backend='local') -# backend_args = dict( -# backend='petrel', -# path_mapping=dict({ -# f'{data_root}': 's3://openmmlab/datasets/', -# f'{data_root}': 's3://openmmlab/datasets/' -# })) - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.0), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# train datasets -dataset_coco = dict( - type='RepeatDataset', - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/person_keypoints_train2017.json', - data_prefix=dict(img='detection/coco/train2017/'), - pipeline=[], - ), - times=3) - -dataset_aic = dict( - type='AicDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='aic/annotations/aic_train.json', - data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' - '_train_20170902/keypoint_train_images_20170902/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=17, - mapping=[ - (0, 6), - (1, 8), - (2, 10), - (3, 5), - (4, 7), - (5, 9), - (6, 12), - (7, 14), - (8, 16), - (9, 11), - (10, 13), - (11, 15), - ]) - ], -) - -# data loaders -train_dataloader = dict( - batch_size=256, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type='CombinedDataset', - metainfo=dict(from_file='configs/_base_/datasets/coco.py'), - datasets=[dataset_coco, dataset_aic], - pipeline=train_pipeline, - test_mode=False, - )) -val_dataloader = dict( - batch_size=64, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/person_keypoints_val2017.json', - # bbox_file='data/coco/person_detection_results/' - # 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='detection/coco/val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'coco/annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 420 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 210 to 420 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(288, 384), + sigma=(6., 6.93), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=1., + widen_factor=1., + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-l_udp-aic-coco_210e-256x192-273b7631_20230130.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=1024, + out_channels=17, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/', +# f'{data_root}': 's3://openmmlab/datasets/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# train datasets +dataset_coco = dict( + type='RepeatDataset', + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_train2017.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[], + ), + times=3) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=17, + mapping=[ + (0, 6), + (1, 8), + (2, 10), + (3, 5), + (4, 7), + (5, 9), + (6, 12), + (7, 14), + (8, 16), + (9, 11), + (10, 13), + (11, 15), + ]) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco.py'), + datasets=[dataset_coco, dataset_aic], + pipeline=train_pipeline, + test_mode=False, + )) +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_val2017.json', + # bbox_file='data/coco/person_detection_results/' + # 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='detection/coco/val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'coco/annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/rtmpose/coco/rtmpose-l_8xb256-420e_coco-256x192.py b/configs/body_2d_keypoint/rtmpose/coco/rtmpose-l_8xb256-420e_coco-256x192.py index 7d77b88fde..0d354d3cb7 100644 --- a/configs/body_2d_keypoint/rtmpose/coco/rtmpose-l_8xb256-420e_coco-256x192.py +++ b/configs/body_2d_keypoint/rtmpose/coco/rtmpose-l_8xb256-420e_coco-256x192.py @@ -1,232 +1,232 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -max_epochs = 420 -stage2_num_epochs = 30 -base_lr = 4e-3 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - # use cosine lr from 210 to 420 epoch - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=1024) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=(192, 256), - sigma=(4.9, 5.66), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=1., - widen_factor=1., - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmposev1/cspnext-l_udp-aic-coco_210e-256x192-273b7631_20230130.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=1024, - out_channels=17, - input_size=codec['input_size'], - in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True)) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -backend_args = dict(backend='local') -# backend_args = dict( -# backend='petrel', -# path_mapping=dict({ -# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', -# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' -# })) - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=256, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=64, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - # bbox_file=f'{data_root}person_detection_results/' - # 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 420 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 210 to 420 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(192, 256), + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=1., + widen_factor=1., + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-l_udp-aic-coco_210e-256x192-273b7631_20230130.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=1024, + out_channels=17, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + # bbox_file=f'{data_root}person_detection_results/' + # 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/rtmpose/coco/rtmpose-m_8xb256-420e_aic-coco-256x192.py b/configs/body_2d_keypoint/rtmpose/coco/rtmpose-m_8xb256-420e_aic-coco-256x192.py index c7840f6c46..24b70dd98b 100644 --- a/configs/body_2d_keypoint/rtmpose/coco/rtmpose-m_8xb256-420e_aic-coco-256x192.py +++ b/configs/body_2d_keypoint/rtmpose/coco/rtmpose-m_8xb256-420e_aic-coco-256x192.py @@ -1,272 +1,272 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -max_epochs = 420 -stage2_num_epochs = 30 -base_lr = 4e-3 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - # use cosine lr from 210 to 420 epoch - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=1024) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=(192, 256), - sigma=(4.9, 5.66), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=0.67, - widen_factor=0.75, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmposev1/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=768, - out_channels=17, - input_size=codec['input_size'], - in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True, )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/' - -backend_args = dict(backend='local') -# backend_args = dict( -# backend='petrel', -# path_mapping=dict({ -# f'{data_root}': 's3://openmmlab/datasets/', -# f'{data_root}': 's3://openmmlab/datasets/' -# })) - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.0), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# train datasets -dataset_coco = dict( - type='RepeatDataset', - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/person_keypoints_train2017.json', - data_prefix=dict(img='detection/coco/train2017/'), - pipeline=[], - ), - times=3) - -dataset_aic = dict( - type='AicDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='aic/annotations/aic_train.json', - data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' - '_train_20170902/keypoint_train_images_20170902/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=17, - mapping=[ - (0, 6), - (1, 8), - (2, 10), - (3, 5), - (4, 7), - (5, 9), - (6, 12), - (7, 14), - (8, 16), - (9, 11), - (10, 13), - (11, 15), - ]) - ], -) - -# data loaders -train_dataloader = dict( - batch_size=128 * 2, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type='CombinedDataset', - metainfo=dict(from_file='configs/_base_/datasets/coco.py'), - datasets=[dataset_coco, dataset_aic], - pipeline=train_pipeline, - test_mode=False, - )) -val_dataloader = dict( - batch_size=64, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/person_keypoints_val2017.json', - # bbox_file='data/coco/person_detection_results/' - # 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='detection/coco/val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'coco/annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 420 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 210 to 420 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(192, 256), + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=768, + out_channels=17, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/', +# f'{data_root}': 's3://openmmlab/datasets/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# train datasets +dataset_coco = dict( + type='RepeatDataset', + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_train2017.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[], + ), + times=3) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=17, + mapping=[ + (0, 6), + (1, 8), + (2, 10), + (3, 5), + (4, 7), + (5, 9), + (6, 12), + (7, 14), + (8, 16), + (9, 11), + (10, 13), + (11, 15), + ]) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=128 * 2, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco.py'), + datasets=[dataset_coco, dataset_aic], + pipeline=train_pipeline, + test_mode=False, + )) +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_val2017.json', + # bbox_file='data/coco/person_detection_results/' + # 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='detection/coco/val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'coco/annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/rtmpose/coco/rtmpose-m_8xb256-420e_aic-coco-384x288.py b/configs/body_2d_keypoint/rtmpose/coco/rtmpose-m_8xb256-420e_aic-coco-384x288.py index 1293a1ae1c..7cb0e23039 100644 --- a/configs/body_2d_keypoint/rtmpose/coco/rtmpose-m_8xb256-420e_aic-coco-384x288.py +++ b/configs/body_2d_keypoint/rtmpose/coco/rtmpose-m_8xb256-420e_aic-coco-384x288.py @@ -1,272 +1,272 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -max_epochs = 420 -stage2_num_epochs = 30 -base_lr = 4e-3 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - # use cosine lr from 210 to 420 epoch - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=1024) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=(288, 384), - sigma=(6., 6.93), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=0.67, - widen_factor=0.75, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmposev1/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=768, - out_channels=17, - input_size=codec['input_size'], - in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True, )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/' - -backend_args = dict(backend='local') -# backend_args = dict( -# backend='petrel', -# path_mapping=dict({ -# f'{data_root}': 's3://openmmlab/datasets/', -# f'{data_root}': 's3://openmmlab/datasets/' -# })) - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.0), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# train datasets -dataset_coco = dict( - type='RepeatDataset', - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/person_keypoints_train2017.json', - data_prefix=dict(img='detection/coco/train2017/'), - pipeline=[], - ), - times=3) - -dataset_aic = dict( - type='AicDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='aic/annotations/aic_train.json', - data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' - '_train_20170902/keypoint_train_images_20170902/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=17, - mapping=[ - (0, 6), - (1, 8), - (2, 10), - (3, 5), - (4, 7), - (5, 9), - (6, 12), - (7, 14), - (8, 16), - (9, 11), - (10, 13), - (11, 15), - ]) - ], -) - -# data loaders -train_dataloader = dict( - batch_size=128 * 2, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type='CombinedDataset', - metainfo=dict(from_file='configs/_base_/datasets/coco.py'), - datasets=[dataset_coco, dataset_aic], - pipeline=train_pipeline, - test_mode=False, - )) -val_dataloader = dict( - batch_size=64, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/person_keypoints_val2017.json', - # bbox_file='data/coco/person_detection_results/' - # 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='detection/coco/val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'coco/annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 420 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 210 to 420 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(288, 384), + sigma=(6., 6.93), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=768, + out_channels=17, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/', +# f'{data_root}': 's3://openmmlab/datasets/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# train datasets +dataset_coco = dict( + type='RepeatDataset', + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_train2017.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[], + ), + times=3) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=17, + mapping=[ + (0, 6), + (1, 8), + (2, 10), + (3, 5), + (4, 7), + (5, 9), + (6, 12), + (7, 14), + (8, 16), + (9, 11), + (10, 13), + (11, 15), + ]) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=128 * 2, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco.py'), + datasets=[dataset_coco, dataset_aic], + pipeline=train_pipeline, + test_mode=False, + )) +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_val2017.json', + # bbox_file='data/coco/person_detection_results/' + # 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='detection/coco/val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'coco/annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/rtmpose/coco/rtmpose-m_8xb256-420e_coco-256x192.py b/configs/body_2d_keypoint/rtmpose/coco/rtmpose-m_8xb256-420e_coco-256x192.py index f21d0e18c6..d0b23251aa 100644 --- a/configs/body_2d_keypoint/rtmpose/coco/rtmpose-m_8xb256-420e_coco-256x192.py +++ b/configs/body_2d_keypoint/rtmpose/coco/rtmpose-m_8xb256-420e_coco-256x192.py @@ -1,232 +1,232 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -max_epochs = 420 -stage2_num_epochs = 30 -base_lr = 4e-3 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - # use cosine lr from 210 to 420 epoch - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=1024) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=(192, 256), - sigma=(4.9, 5.66), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=0.67, - widen_factor=0.75, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmposev1/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=768, - out_channels=17, - input_size=codec['input_size'], - in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True)) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -backend_args = dict(backend='local') -# backend_args = dict( -# backend='petrel', -# path_mapping=dict({ -# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', -# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' -# })) - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=256, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=64, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - # bbox_file=f'{data_root}person_detection_results/' - # 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 420 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 210 to 420 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(192, 256), + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=768, + out_channels=17, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + # bbox_file=f'{data_root}person_detection_results/' + # 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/rtmpose/coco/rtmpose-s_8xb256-420e_aic-coco-256x192.py b/configs/body_2d_keypoint/rtmpose/coco/rtmpose-s_8xb256-420e_aic-coco-256x192.py index 6c9e9fdc55..635f8c90f7 100644 --- a/configs/body_2d_keypoint/rtmpose/coco/rtmpose-s_8xb256-420e_aic-coco-256x192.py +++ b/configs/body_2d_keypoint/rtmpose/coco/rtmpose-s_8xb256-420e_aic-coco-256x192.py @@ -1,272 +1,272 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -max_epochs = 420 -stage2_num_epochs = 30 -base_lr = 4e-3 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.0), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - # use cosine lr from 210 to 420 epoch - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=1024) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=(192, 256), - sigma=(4.9, 5.66), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=0.33, - widen_factor=0.5, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmposev1/cspnext-s_udp-aic-coco_210e-256x192-92f5a029_20230130.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=512, - out_channels=17, - input_size=codec['input_size'], - in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True, )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/' - -backend_args = dict(backend='local') -# backend_args = dict( -# backend='petrel', -# path_mapping=dict({ -# f'{data_root}': 's3://openmmlab/datasets/', -# f'{data_root}': 's3://openmmlab/datasets/' -# })) - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.0), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# train datasets -dataset_coco = dict( - type='RepeatDataset', - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/person_keypoints_train2017.json', - data_prefix=dict(img='detection/coco/train2017/'), - pipeline=[], - ), - times=3) - -dataset_aic = dict( - type='AicDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='aic/annotations/aic_train.json', - data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' - '_train_20170902/keypoint_train_images_20170902/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=17, - mapping=[ - (0, 6), - (1, 8), - (2, 10), - (3, 5), - (4, 7), - (5, 9), - (6, 12), - (7, 14), - (8, 16), - (9, 11), - (10, 13), - (11, 15), - ]) - ], -) - -# data loaders -train_dataloader = dict( - batch_size=128 * 2, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type='CombinedDataset', - metainfo=dict(from_file='configs/_base_/datasets/coco.py'), - datasets=[dataset_coco, dataset_aic], - pipeline=train_pipeline, - test_mode=False, - )) -val_dataloader = dict( - batch_size=64, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/person_keypoints_val2017.json', - # bbox_file='data/coco/person_detection_results/' - # 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='detection/coco/val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'coco/annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 420 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.0), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 210 to 420 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(192, 256), + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.33, + widen_factor=0.5, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-s_udp-aic-coco_210e-256x192-92f5a029_20230130.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=512, + out_channels=17, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/', +# f'{data_root}': 's3://openmmlab/datasets/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# train datasets +dataset_coco = dict( + type='RepeatDataset', + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_train2017.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[], + ), + times=3) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=17, + mapping=[ + (0, 6), + (1, 8), + (2, 10), + (3, 5), + (4, 7), + (5, 9), + (6, 12), + (7, 14), + (8, 16), + (9, 11), + (10, 13), + (11, 15), + ]) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=128 * 2, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco.py'), + datasets=[dataset_coco, dataset_aic], + pipeline=train_pipeline, + test_mode=False, + )) +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_val2017.json', + # bbox_file='data/coco/person_detection_results/' + # 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='detection/coco/val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'coco/annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/rtmpose/coco/rtmpose-s_8xb256-420e_coco-256x192.py b/configs/body_2d_keypoint/rtmpose/coco/rtmpose-s_8xb256-420e_coco-256x192.py index c0abcbb1dd..ee4f99014f 100644 --- a/configs/body_2d_keypoint/rtmpose/coco/rtmpose-s_8xb256-420e_coco-256x192.py +++ b/configs/body_2d_keypoint/rtmpose/coco/rtmpose-s_8xb256-420e_coco-256x192.py @@ -1,232 +1,232 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -max_epochs = 420 -stage2_num_epochs = 30 -base_lr = 4e-3 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - # use cosine lr from 210 to 420 epoch - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=1024) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=(192, 256), - sigma=(4.9, 5.66), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=0.33, - widen_factor=0.5, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmposev1/cspnext-s_udp-aic-coco_210e-256x192-92f5a029_20230130.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=512, - out_channels=17, - input_size=codec['input_size'], - in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True)) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -backend_args = dict(backend='local') -# backend_args = dict( -# backend='petrel', -# path_mapping=dict({ -# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', -# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' -# })) - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=256, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=64, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - # bbox_file=f'{data_root}person_detection_results/' - # 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 420 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 210 to 420 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(192, 256), + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.33, + widen_factor=0.5, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-s_udp-aic-coco_210e-256x192-92f5a029_20230130.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=512, + out_channels=17, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + # bbox_file=f'{data_root}person_detection_results/' + # 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/rtmpose/coco/rtmpose-t_8xb256-420e_aic-coco-256x192.py b/configs/body_2d_keypoint/rtmpose/coco/rtmpose-t_8xb256-420e_aic-coco-256x192.py index 215a297944..dde95b45c0 100644 --- a/configs/body_2d_keypoint/rtmpose/coco/rtmpose-t_8xb256-420e_aic-coco-256x192.py +++ b/configs/body_2d_keypoint/rtmpose/coco/rtmpose-t_8xb256-420e_aic-coco-256x192.py @@ -1,273 +1,273 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -max_epochs = 420 -stage2_num_epochs = 30 -base_lr = 4e-3 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - # use cosine lr from 210 to 420 epoch - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=1024) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=(192, 256), - sigma=(4.9, 5.66), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=0.167, - widen_factor=0.375, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmposev1/cspnext-tiny_udp-aic-coco_210e-256x192-cbed682d_20230130.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=384, - out_channels=17, - input_size=codec['input_size'], - in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True, )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/' - -backend_args = dict(backend='local') -# backend_args = dict( -# backend='petrel', -# path_mapping=dict({ -# f'{data_root}': 's3://openmmlab/datasets/', -# f'{data_root}': 's3://openmmlab/datasets/' -# })) - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.0), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# train datasets -dataset_coco = dict( - type='RepeatDataset', - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/person_keypoints_train2017.json', - data_prefix=dict(img='detection/coco/train2017/'), - pipeline=[], - ), - times=3) - -dataset_aic = dict( - type='AicDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='aic/annotations/aic_train.json', - data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' - '_train_20170902/keypoint_train_images_20170902/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=17, - mapping=[ - (0, 6), - (1, 8), - (2, 10), - (3, 5), - (4, 7), - (5, 9), - (6, 12), - (7, 14), - (8, 16), - (9, 11), - (10, 13), - (11, 15), - ]) - ], -) - -# data loaders -train_dataloader = dict( - batch_size=256, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type='CombinedDataset', - metainfo=dict(from_file='configs/_base_/datasets/coco.py'), - datasets=[dataset_coco, dataset_aic], - pipeline=train_pipeline, - test_mode=False, - )) -val_dataloader = dict( - batch_size=64, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/person_keypoints_val2017.json', - # bbox_file='data/coco/person_detection_results/' - # 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='detection/coco/val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - # Turn off EMA while training the tiny model - # dict( - # type='EMAHook', - # ema_type='ExpMomentumEMA', - # momentum=0.0002, - # update_buffers=True, - # priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'coco/annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 420 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 210 to 420 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(192, 256), + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.167, + widen_factor=0.375, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-tiny_udp-aic-coco_210e-256x192-cbed682d_20230130.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=384, + out_channels=17, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/', +# f'{data_root}': 's3://openmmlab/datasets/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# train datasets +dataset_coco = dict( + type='RepeatDataset', + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_train2017.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[], + ), + times=3) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=17, + mapping=[ + (0, 6), + (1, 8), + (2, 10), + (3, 5), + (4, 7), + (5, 9), + (6, 12), + (7, 14), + (8, 16), + (9, 11), + (10, 13), + (11, 15), + ]) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco.py'), + datasets=[dataset_coco, dataset_aic], + pipeline=train_pipeline, + test_mode=False, + )) +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_val2017.json', + # bbox_file='data/coco/person_detection_results/' + # 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='detection/coco/val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + # Turn off EMA while training the tiny model + # dict( + # type='EMAHook', + # ema_type='ExpMomentumEMA', + # momentum=0.0002, + # update_buffers=True, + # priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'coco/annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/rtmpose/coco/rtmpose-t_8xb256-420e_coco-256x192.py b/configs/body_2d_keypoint/rtmpose/coco/rtmpose-t_8xb256-420e_coco-256x192.py index cbe0978b2b..d4d8180a93 100644 --- a/configs/body_2d_keypoint/rtmpose/coco/rtmpose-t_8xb256-420e_coco-256x192.py +++ b/configs/body_2d_keypoint/rtmpose/coco/rtmpose-t_8xb256-420e_coco-256x192.py @@ -1,233 +1,233 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -max_epochs = 420 -stage2_num_epochs = 30 -base_lr = 4e-3 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - # use cosine lr from 210 to 420 epoch - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=1024) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=(192, 256), - sigma=(4.9, 5.66), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=0.167, - widen_factor=0.375, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmposev1/cspnext-tiny_udp-aic-coco_210e-256x192-cbed682d_20230130.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=384, - out_channels=17, - input_size=codec['input_size'], - in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True)) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -backend_args = dict(backend='local') -# backend_args = dict( -# backend='petrel', -# path_mapping=dict({ -# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', -# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' -# })) - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=256, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=64, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - # bbox_file=f'{data_root}person_detection_results/' - # 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - # Turn off EMA while training the tiny model - # dict( - # type='EMAHook', - # ema_type='ExpMomentumEMA', - # momentum=0.0002, - # update_buffers=True, - # priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 420 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 210 to 420 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(192, 256), + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.167, + widen_factor=0.375, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-tiny_udp-aic-coco_210e-256x192-cbed682d_20230130.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=384, + out_channels=17, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + # bbox_file=f'{data_root}person_detection_results/' + # 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + # Turn off EMA while training the tiny model + # dict( + # type='EMAHook', + # ema_type='ExpMomentumEMA', + # momentum=0.0002, + # update_buffers=True, + # priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/rtmpose/coco/rtmpose_coco.md b/configs/body_2d_keypoint/rtmpose/coco/rtmpose_coco.md index d3cc9298df..312a36bbff 100644 --- a/configs/body_2d_keypoint/rtmpose/coco/rtmpose_coco.md +++ b/configs/body_2d_keypoint/rtmpose/coco/rtmpose_coco.md @@ -1,71 +1,71 @@ - - -
-RTMPose (arXiv'2023) - -```bibtex -@misc{https://doi.org/10.48550/arxiv.2303.07399, - doi = {10.48550/ARXIV.2303.07399}, - url = {https://arxiv.org/abs/2303.07399}, - author = {Jiang, Tao and Lu, Peng and Zhang, Li and Ma, Ningsheng and Han, Rui and Lyu, Chengqi and Li, Yining and Chen, Kai}, - keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences}, - title = {RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose}, - publisher = {arXiv}, - year = {2023}, - copyright = {Creative Commons Attribution 4.0 International} -} - -``` - -
- - - -
-RTMDet (arXiv'2022) - -```bibtex -@misc{lyu2022rtmdet, - title={RTMDet: An Empirical Study of Designing Real-Time Object Detectors}, - author={Chengqi Lyu and Wenwei Zhang and Haian Huang and Yue Zhou and Yudong Wang and Yanyi Liu and Shilong Zhang and Kai Chen}, - year={2022}, - eprint={2212.07784}, - archivePrefix={arXiv}, - primaryClass={cs.CV} -} -``` - -
- - - -
-COCO (ECCV'2014) - -```bibtex -@inproceedings{lin2014microsoft, - title={Microsoft coco: Common objects in context}, - author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, - booktitle={European conference on computer vision}, - pages={740--755}, - year={2014}, - organization={Springer} -} -``` - -
- -Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset - -| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | -| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | -| [rtmpose-t](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-t_8xb256-420e_coco-256x192.py) | 256x192 | 0.682 | 0.883 | 0.759 | 0.736 | 0.920 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-tiny_simcc-coco_pt-aic-coco_420e-256x192-e613ba3f_20230127.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-tiny_simcc-coco_pt-aic-coco_420e-256x192-e613ba3f_20230127.json) | -| [rtmpose-s](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-s_8xb256-420e_coco-256x192.py) | 256x192 | 0.716 | 0.892 | 0.789 | 0.768 | 0.929 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-coco_pt-aic-coco_420e-256x192-8edcf0d7_20230127.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-coco_pt-aic-coco_420e-256x192-8edcf0d7_20230127.json) | -| [rtmpose-m](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-m_8xb256-420e_coco-256x192.py) | 256x192 | 0.746 | 0.899 | 0.817 | 0.795 | 0.935 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco_pt-aic-coco_420e-256x192-d8dd5ca4_20230127.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco_pt-aic-coco_420e-256x192-d8dd5ca4_20230127.json) | -| [rtmpose-l](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-l_8xb256-420e_coco-256x192.py) | 256x192 | 0.758 | 0.906 | 0.826 | 0.806 | 0.942 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-coco_pt-aic-coco_420e-256x192-1352a4d2_20230127.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-coco_pt-aic-coco_420e-256x192-1352a4d2_20230127.json) | -| [rtmpose-t-aic-coco](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-t_8xb256-420e_aic-coco-256x192.py) | 256x192 | 0.685 | 0.880 | 0.761 | 0.738 | 0.918 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-tiny_simcc-aic-coco_pt-aic-coco_420e-256x192-cfc8f33d_20230126.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-tiny_simcc-aic-coco_pt-aic-coco_420e-256x192-cfc8f33d_20230126.json) | -| [rtmpose-s-aic-coco](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-s_8xb256-420e_aic-coco-256x192.py) | 256x192 | 0.722 | 0.892 | 0.794 | 0.772 | 0.929 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-aic-coco_pt-aic-coco_420e-256x192-fcb2599b_20230126.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-aic-coco_pt-aic-coco_420e-256x192-fcb2599b_20230126.json) | -| [rtmpose-m-aic-coco](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-m_8xb256-420e_aic-coco-256x192.py) | 256x192 | 0.758 | 0.903 | 0.826 | 0.806 | 0.940 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.json) | -| [rtmpose-l-aic-coco](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-l_8xb256-420e_aic-coco-256x192.py) | 256x192 | 0.765 | 0.906 | 0.835 | 0.813 | 0.942 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-aic-coco_pt-aic-coco_420e-256x192-f016ffe0_20230126.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-aic-coco_pt-aic-coco_420e-256x192-f016ffe0_20230126.json) | -| [rtmpose-m-aic-coco](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-m_8xb256-420e_aic-coco-384x288.py) | 384x288 | 0.770 | 0.908 | 0.833 | 0.816 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-384x288-a62a0b32_20230228.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-384x288-a62a0b32_20230228.json) | -| [rtmpose-l-aic-coco](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-l_8xb256-420e_aic-coco-384x288.py) | 384x288 | 0.773 | 0.907 | 0.835 | 0.819 | 0.942 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-aic-coco_pt-aic-coco_420e-384x288-97d6cb0f_20230228.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-aic-coco_pt-aic-coco_420e-384x288-97d6cb0f_20230228.json) | + + +
+RTMPose (arXiv'2023) + +```bibtex +@misc{https://doi.org/10.48550/arxiv.2303.07399, + doi = {10.48550/ARXIV.2303.07399}, + url = {https://arxiv.org/abs/2303.07399}, + author = {Jiang, Tao and Lu, Peng and Zhang, Li and Ma, Ningsheng and Han, Rui and Lyu, Chengqi and Li, Yining and Chen, Kai}, + keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences}, + title = {RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose}, + publisher = {arXiv}, + year = {2023}, + copyright = {Creative Commons Attribution 4.0 International} +} + +``` + +
+ + + +
+RTMDet (arXiv'2022) + +```bibtex +@misc{lyu2022rtmdet, + title={RTMDet: An Empirical Study of Designing Real-Time Object Detectors}, + author={Chengqi Lyu and Wenwei Zhang and Haian Huang and Yue Zhou and Yudong Wang and Yanyi Liu and Shilong Zhang and Kai Chen}, + year={2022}, + eprint={2212.07784}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [rtmpose-t](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-t_8xb256-420e_coco-256x192.py) | 256x192 | 0.682 | 0.883 | 0.759 | 0.736 | 0.920 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-tiny_simcc-coco_pt-aic-coco_420e-256x192-e613ba3f_20230127.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-tiny_simcc-coco_pt-aic-coco_420e-256x192-e613ba3f_20230127.json) | +| [rtmpose-s](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-s_8xb256-420e_coco-256x192.py) | 256x192 | 0.716 | 0.892 | 0.789 | 0.768 | 0.929 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-coco_pt-aic-coco_420e-256x192-8edcf0d7_20230127.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-coco_pt-aic-coco_420e-256x192-8edcf0d7_20230127.json) | +| [rtmpose-m](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-m_8xb256-420e_coco-256x192.py) | 256x192 | 0.746 | 0.899 | 0.817 | 0.795 | 0.935 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco_pt-aic-coco_420e-256x192-d8dd5ca4_20230127.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco_pt-aic-coco_420e-256x192-d8dd5ca4_20230127.json) | +| [rtmpose-l](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-l_8xb256-420e_coco-256x192.py) | 256x192 | 0.758 | 0.906 | 0.826 | 0.806 | 0.942 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-coco_pt-aic-coco_420e-256x192-1352a4d2_20230127.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-coco_pt-aic-coco_420e-256x192-1352a4d2_20230127.json) | +| [rtmpose-t-aic-coco](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-t_8xb256-420e_aic-coco-256x192.py) | 256x192 | 0.685 | 0.880 | 0.761 | 0.738 | 0.918 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-tiny_simcc-aic-coco_pt-aic-coco_420e-256x192-cfc8f33d_20230126.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-tiny_simcc-aic-coco_pt-aic-coco_420e-256x192-cfc8f33d_20230126.json) | +| [rtmpose-s-aic-coco](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-s_8xb256-420e_aic-coco-256x192.py) | 256x192 | 0.722 | 0.892 | 0.794 | 0.772 | 0.929 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-aic-coco_pt-aic-coco_420e-256x192-fcb2599b_20230126.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-aic-coco_pt-aic-coco_420e-256x192-fcb2599b_20230126.json) | +| [rtmpose-m-aic-coco](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-m_8xb256-420e_aic-coco-256x192.py) | 256x192 | 0.758 | 0.903 | 0.826 | 0.806 | 0.940 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.json) | +| [rtmpose-l-aic-coco](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-l_8xb256-420e_aic-coco-256x192.py) | 256x192 | 0.765 | 0.906 | 0.835 | 0.813 | 0.942 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-aic-coco_pt-aic-coco_420e-256x192-f016ffe0_20230126.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-aic-coco_pt-aic-coco_420e-256x192-f016ffe0_20230126.json) | +| [rtmpose-m-aic-coco](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-m_8xb256-420e_aic-coco-384x288.py) | 384x288 | 0.770 | 0.908 | 0.833 | 0.816 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-384x288-a62a0b32_20230228.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-384x288-a62a0b32_20230228.json) | +| [rtmpose-l-aic-coco](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-l_8xb256-420e_aic-coco-384x288.py) | 384x288 | 0.773 | 0.907 | 0.835 | 0.819 | 0.942 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-aic-coco_pt-aic-coco_420e-384x288-97d6cb0f_20230228.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-aic-coco_pt-aic-coco_420e-384x288-97d6cb0f_20230228.json) | diff --git a/configs/body_2d_keypoint/rtmpose/coco/rtmpose_coco.yml b/configs/body_2d_keypoint/rtmpose/coco/rtmpose_coco.yml index bebe64b3b7..46e900d8a4 100644 --- a/configs/body_2d_keypoint/rtmpose/coco/rtmpose_coco.yml +++ b/configs/body_2d_keypoint/rtmpose/coco/rtmpose_coco.yml @@ -1,171 +1,171 @@ -Collections: -- Name: RTMPose - Paper: - Title: "RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose" - URL: https://arxiv.org/abs/2303.07399 - README: https://github.com/open-mmlab/mmpose/blob/main/projects/rtmpose/README.md -Models: -- Config: configs/body_2d_keypoint/rtmpose/coco/rtmpose-t_8xb256-420e_coco-256x192.py - In Collection: RTMPose - Metadata: - Architecture: &id001 - - RTMPose - Training Data: COCO - Name: rtmpose-t_8xb256-420e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.682 - AP@0.5: 0.883 - AP@0.75: 0.759 - AR: 0.736 - AR@0.5: 0.92 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-tiny_simcc-coco_pt-aic-coco_420e-256x192-e613ba3f_20230127.pth -- Config: configs/body_2d_keypoint/rtmpose/coco/rtmpose-s_8xb256-420e_coco-256x192.py - In Collection: RTMPose - Metadata: - Architecture: *id001 - Training Data: COCO - Name: rtmpose-s_8xb256-420e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.716 - AP@0.5: 0.892 - AP@0.75: 0.789 - AR: 0.768 - AR@0.5: 0.929 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-coco_pt-aic-coco_420e-256x192-8edcf0d7_20230127.pth -- Config: configs/body_2d_keypoint/rtmpose/coco/rtmpose-m_8xb256-420e_coco-256x192.py - In Collection: RTMPose - Metadata: - Architecture: *id001 - Training Data: COCO - Name: rtmpose-m_8xb256-420e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.746 - AP@0.5: 0.899 - AP@0.75: 0.817 - AR: 0.795 - AR@0.5: 0.935 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco_pt-aic-coco_420e-256x192-d8dd5ca4_20230127.pth -- Config: configs/body_2d_keypoint/rtmpose/coco/rtmpose-l_8xb256-420e_coco-256x192.py - In Collection: RTMPose - Metadata: - Architecture: *id001 - Training Data: COCO - Name: rtmpose-l_8xb256-420e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.758 - AP@0.5: 0.906 - AP@0.75: 0.826 - AR: 0.806 - AR@0.5: 0.942 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-coco_pt-aic-coco_420e-256x192-1352a4d2_20230127.pth -- Config: configs/body_2d_keypoint/rtmpose/coco/rtmpose-t_8xb256-420e_aic-coco-256x192.py - In Collection: RTMPose - Metadata: - Architecture: *id001 - Training Data: &id002 - - COCO - - AI Challenger - Name: rtmpose-t_8xb256-420e_aic-coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.685 - AP@0.5: 0.88 - AP@0.75: 0.761 - AR: 0.738 - AR@0.5: 0.918 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-tiny_simcc-aic-coco_pt-aic-coco_420e-256x192-cfc8f33d_20230126.pth -- Config: configs/body_2d_keypoint/rtmpose/coco/rtmpose-s_8xb256-420e_aic-coco-256x192.py - In Collection: RTMPose - Metadata: - Architecture: *id001 - Training Data: *id002 - Name: rtmpose-s_8xb256-420e_aic-coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.722 - AP@0.5: 0.892 - AP@0.75: 0.794 - AR: 0.772 - AR@0.5: 0.929 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-aic-coco_pt-aic-coco_420e-256x192-fcb2599b_20230126.pth -- Config: configs/body_2d_keypoint/rtmpose/coco/rtmpose-m_8xb256-420e_aic-coco-256x192.py - In Collection: RTMPose - Alias: human - Metadata: - Architecture: *id001 - Training Data: *id002 - Name: rtmpose-m_8xb256-420e_aic-coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.758 - AP@0.5: 0.903 - AP@0.75: 0.826 - AR: 0.806 - AR@0.5: 0.94 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.pth -- Config: configs/body_2d_keypoint/rtmpose/coco/rtmpose-l_8xb256-420e_aic-coco-256x192.py - In Collection: RTMPose - Metadata: - Architecture: *id001 - Training Data: *id002 - Name: rtmpose-l_8xb256-420e_aic-coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.765 - AP@0.5: 0.906 - AP@0.75: 0.835 - AR: 0.813 - AR@0.5: 0.942 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-aic-coco_pt-aic-coco_420e-256x192-f016ffe0_20230126.pth -- Config: configs/body_2d_keypoint/rtmpose/coco/rtmpose-m_8xb256-420e_aic-coco-384x288.py - In Collection: RTMPose - Metadata: - Architecture: *id001 - Training Data: *id002 - Name: rtmpose-m_8xb256-420e_aic-coco-384x288 - Results: - - Dataset: COCO - Metrics: - AP: 0.770 - AP@0.5: 0.908 - AP@0.75: 0.833 - AR: 0.816 - AR@0.5: 0.943 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-384x288-a62a0b32_20230228.pth -- Config: configs/body_2d_keypoint/rtmpose/coco/rtmpose-l_8xb256-420e_aic-coco-384x288.py - In Collection: RTMPose - Metadata: - Architecture: *id001 - Training Data: *id002 - Name: rtmpose-l_8xb256-420e_aic-coco-384x288 - Results: - - Dataset: COCO - Metrics: - AP: 0.773 - AP@0.5: 0.907 - AP@0.75: 0.835 - AR: 0.819 - AR@0.5: 0.942 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-aic-coco_pt-aic-coco_420e-384x288-97d6cb0f_20230228.pth +Collections: +- Name: RTMPose + Paper: + Title: "RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose" + URL: https://arxiv.org/abs/2303.07399 + README: https://github.com/open-mmlab/mmpose/blob/main/projects/rtmpose/README.md +Models: +- Config: configs/body_2d_keypoint/rtmpose/coco/rtmpose-t_8xb256-420e_coco-256x192.py + In Collection: RTMPose + Metadata: + Architecture: &id001 + - RTMPose + Training Data: COCO + Name: rtmpose-t_8xb256-420e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.682 + AP@0.5: 0.883 + AP@0.75: 0.759 + AR: 0.736 + AR@0.5: 0.92 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-tiny_simcc-coco_pt-aic-coco_420e-256x192-e613ba3f_20230127.pth +- Config: configs/body_2d_keypoint/rtmpose/coco/rtmpose-s_8xb256-420e_coco-256x192.py + In Collection: RTMPose + Metadata: + Architecture: *id001 + Training Data: COCO + Name: rtmpose-s_8xb256-420e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.716 + AP@0.5: 0.892 + AP@0.75: 0.789 + AR: 0.768 + AR@0.5: 0.929 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-coco_pt-aic-coco_420e-256x192-8edcf0d7_20230127.pth +- Config: configs/body_2d_keypoint/rtmpose/coco/rtmpose-m_8xb256-420e_coco-256x192.py + In Collection: RTMPose + Metadata: + Architecture: *id001 + Training Data: COCO + Name: rtmpose-m_8xb256-420e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.746 + AP@0.5: 0.899 + AP@0.75: 0.817 + AR: 0.795 + AR@0.5: 0.935 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco_pt-aic-coco_420e-256x192-d8dd5ca4_20230127.pth +- Config: configs/body_2d_keypoint/rtmpose/coco/rtmpose-l_8xb256-420e_coco-256x192.py + In Collection: RTMPose + Metadata: + Architecture: *id001 + Training Data: COCO + Name: rtmpose-l_8xb256-420e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.758 + AP@0.5: 0.906 + AP@0.75: 0.826 + AR: 0.806 + AR@0.5: 0.942 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-coco_pt-aic-coco_420e-256x192-1352a4d2_20230127.pth +- Config: configs/body_2d_keypoint/rtmpose/coco/rtmpose-t_8xb256-420e_aic-coco-256x192.py + In Collection: RTMPose + Metadata: + Architecture: *id001 + Training Data: &id002 + - COCO + - AI Challenger + Name: rtmpose-t_8xb256-420e_aic-coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.685 + AP@0.5: 0.88 + AP@0.75: 0.761 + AR: 0.738 + AR@0.5: 0.918 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-tiny_simcc-aic-coco_pt-aic-coco_420e-256x192-cfc8f33d_20230126.pth +- Config: configs/body_2d_keypoint/rtmpose/coco/rtmpose-s_8xb256-420e_aic-coco-256x192.py + In Collection: RTMPose + Metadata: + Architecture: *id001 + Training Data: *id002 + Name: rtmpose-s_8xb256-420e_aic-coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.722 + AP@0.5: 0.892 + AP@0.75: 0.794 + AR: 0.772 + AR@0.5: 0.929 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-aic-coco_pt-aic-coco_420e-256x192-fcb2599b_20230126.pth +- Config: configs/body_2d_keypoint/rtmpose/coco/rtmpose-m_8xb256-420e_aic-coco-256x192.py + In Collection: RTMPose + Alias: human + Metadata: + Architecture: *id001 + Training Data: *id002 + Name: rtmpose-m_8xb256-420e_aic-coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.758 + AP@0.5: 0.903 + AP@0.75: 0.826 + AR: 0.806 + AR@0.5: 0.94 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.pth +- Config: configs/body_2d_keypoint/rtmpose/coco/rtmpose-l_8xb256-420e_aic-coco-256x192.py + In Collection: RTMPose + Metadata: + Architecture: *id001 + Training Data: *id002 + Name: rtmpose-l_8xb256-420e_aic-coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.765 + AP@0.5: 0.906 + AP@0.75: 0.835 + AR: 0.813 + AR@0.5: 0.942 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-aic-coco_pt-aic-coco_420e-256x192-f016ffe0_20230126.pth +- Config: configs/body_2d_keypoint/rtmpose/coco/rtmpose-m_8xb256-420e_aic-coco-384x288.py + In Collection: RTMPose + Metadata: + Architecture: *id001 + Training Data: *id002 + Name: rtmpose-m_8xb256-420e_aic-coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.770 + AP@0.5: 0.908 + AP@0.75: 0.833 + AR: 0.816 + AR@0.5: 0.943 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-384x288-a62a0b32_20230228.pth +- Config: configs/body_2d_keypoint/rtmpose/coco/rtmpose-l_8xb256-420e_aic-coco-384x288.py + In Collection: RTMPose + Metadata: + Architecture: *id001 + Training Data: *id002 + Name: rtmpose-l_8xb256-420e_aic-coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.773 + AP@0.5: 0.907 + AP@0.75: 0.835 + AR: 0.819 + AR@0.5: 0.942 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-aic-coco_pt-aic-coco_420e-384x288-97d6cb0f_20230228.pth diff --git a/configs/body_2d_keypoint/rtmpose/crowdpose/rtmpose-m_8xb64-210e_crowdpose-256x192.py b/configs/body_2d_keypoint/rtmpose/crowdpose/rtmpose-m_8xb64-210e_crowdpose-256x192.py index e93a2f1099..9ff68f5e4c 100644 --- a/configs/body_2d_keypoint/rtmpose/crowdpose/rtmpose-m_8xb64-210e_crowdpose-256x192.py +++ b/configs/body_2d_keypoint/rtmpose/crowdpose/rtmpose-m_8xb64-210e_crowdpose-256x192.py @@ -1,234 +1,234 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -max_epochs = 210 -stage2_num_epochs = 30 -base_lr = 5e-4 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=(192, 256), - sigma=(4.9, 5.66), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=0.67, - widen_factor=0.75, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmposev1/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=768, - out_channels=14, - input_size=codec['input_size'], - in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True, )) - -# base dataset settings -dataset_type = 'CrowdPoseDataset' -data_mode = 'topdown' -data_root = 'data/' - -backend_args = dict(backend='local') -# backend_args = dict( -# backend='petrel', -# path_mapping=dict({ -# f'{data_root}': 's3://openmmlab/datasets/', -# f'{data_root}': 's3://openmmlab/datasets/' -# })) - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.0), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', - data_prefix=dict(img='pose/CrowdPose/images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', - bbox_file='data/crowdpose/annotations/det_for_crowd_test_0.1_0.5.json', - data_prefix=dict(img='pose/CrowdPose/images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict( - save_best='crowdpose/AP', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'crowdpose/annotations/mmpose_crowdpose_test.json', - use_area=False, - iou_type='keypoints_crowd', - prefix='crowdpose') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 210 +stage2_num_epochs = 30 +base_lr = 5e-4 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(192, 256), + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=768, + out_channels=14, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'CrowdPoseDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/', +# f'{data_root}': 's3://openmmlab/datasets/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', + bbox_file='data/crowdpose/annotations/det_for_crowd_test_0.1_0.5.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict( + save_best='crowdpose/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'crowdpose/annotations/mmpose_crowdpose_test.json', + use_area=False, + iou_type='keypoints_crowd', + prefix='crowdpose') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/rtmpose/crowdpose/rtmpose_crowdpose.md b/configs/body_2d_keypoint/rtmpose/crowdpose/rtmpose_crowdpose.md index 42bcf0f65f..9314142f8c 100644 --- a/configs/body_2d_keypoint/rtmpose/crowdpose/rtmpose_crowdpose.md +++ b/configs/body_2d_keypoint/rtmpose/crowdpose/rtmpose_crowdpose.md @@ -1,60 +1,60 @@ - - -
-RTMPose (arXiv'2023) - -```bibtex -@misc{https://doi.org/10.48550/arxiv.2303.07399, - doi = {10.48550/ARXIV.2303.07399}, - url = {https://arxiv.org/abs/2303.07399}, - author = {Jiang, Tao and Lu, Peng and Zhang, Li and Ma, Ningsheng and Han, Rui and Lyu, Chengqi and Li, Yining and Chen, Kai}, - keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences}, - title = {RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose}, - publisher = {arXiv}, - year = {2023}, - copyright = {Creative Commons Attribution 4.0 International} -} - -``` - -
- - - -
-RTMDet (arXiv'2022) - -```bibtex -@misc{lyu2022rtmdet, - title={RTMDet: An Empirical Study of Designing Real-Time Object Detectors}, - author={Chengqi Lyu and Wenwei Zhang and Haian Huang and Yue Zhou and Yudong Wang and Yanyi Liu and Shilong Zhang and Kai Chen}, - year={2022}, - eprint={2212.07784}, - archivePrefix={arXiv}, - primaryClass={cs.CV} -} -``` - -
- - - -
-CrowdPose (CVPR'2019) - -```bibtex -@article{li2018crowdpose, - title={CrowdPose: Efficient Crowded Scenes Pose Estimation and A New Benchmark}, - author={Li, Jiefeng and Wang, Can and Zhu, Hao and Mao, Yihuan and Fang, Hao-Shu and Lu, Cewu}, - journal={arXiv preprint arXiv:1812.00324}, - year={2018} -} -``` - -
- -Results on CrowdPose test with [YOLOv3](https://github.com/eriklindernoren/PyTorch-YOLOv3) human detector - -| Arch | Input Size | AP | AP50 | AP75 | AP (E) | AP (M) | AP (H) | ckpt | log | -| :--------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :----: | :----: | :----: | :--------------------------------------------: | :-------------------------------------------: | -| [rtmpose-m](/configs/body_2d_keypoint/rtmpose/crowdpose/rtmpose-m_8xb64-210e_crowdpose-256x192.py) | 256x192 | 0.706 | 0.841 | 0.765 | 0.799 | 0.719 | 0.582 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-crowdpose_pt-aic-coco_210e-256x192-e6192cac_20230224.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-crowdpose_pt-aic-coco_210e-256x192-e6192cac_20230224.json) | + + +
+RTMPose (arXiv'2023) + +```bibtex +@misc{https://doi.org/10.48550/arxiv.2303.07399, + doi = {10.48550/ARXIV.2303.07399}, + url = {https://arxiv.org/abs/2303.07399}, + author = {Jiang, Tao and Lu, Peng and Zhang, Li and Ma, Ningsheng and Han, Rui and Lyu, Chengqi and Li, Yining and Chen, Kai}, + keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences}, + title = {RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose}, + publisher = {arXiv}, + year = {2023}, + copyright = {Creative Commons Attribution 4.0 International} +} + +``` + +
+ + + +
+RTMDet (arXiv'2022) + +```bibtex +@misc{lyu2022rtmdet, + title={RTMDet: An Empirical Study of Designing Real-Time Object Detectors}, + author={Chengqi Lyu and Wenwei Zhang and Haian Huang and Yue Zhou and Yudong Wang and Yanyi Liu and Shilong Zhang and Kai Chen}, + year={2022}, + eprint={2212.07784}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +
+ + + +
+CrowdPose (CVPR'2019) + +```bibtex +@article{li2018crowdpose, + title={CrowdPose: Efficient Crowded Scenes Pose Estimation and A New Benchmark}, + author={Li, Jiefeng and Wang, Can and Zhu, Hao and Mao, Yihuan and Fang, Hao-Shu and Lu, Cewu}, + journal={arXiv preprint arXiv:1812.00324}, + year={2018} +} +``` + +
+ +Results on CrowdPose test with [YOLOv3](https://github.com/eriklindernoren/PyTorch-YOLOv3) human detector + +| Arch | Input Size | AP | AP50 | AP75 | AP (E) | AP (M) | AP (H) | ckpt | log | +| :--------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :----: | :----: | :----: | :--------------------------------------------: | :-------------------------------------------: | +| [rtmpose-m](/configs/body_2d_keypoint/rtmpose/crowdpose/rtmpose-m_8xb64-210e_crowdpose-256x192.py) | 256x192 | 0.706 | 0.841 | 0.765 | 0.799 | 0.719 | 0.582 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-crowdpose_pt-aic-coco_210e-256x192-e6192cac_20230224.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-crowdpose_pt-aic-coco_210e-256x192-e6192cac_20230224.json) | diff --git a/configs/body_2d_keypoint/rtmpose/crowdpose/rtmpose_crowdpose.yml b/configs/body_2d_keypoint/rtmpose/crowdpose/rtmpose_crowdpose.yml index 5fb842f563..ddfe25fd73 100644 --- a/configs/body_2d_keypoint/rtmpose/crowdpose/rtmpose_crowdpose.yml +++ b/configs/body_2d_keypoint/rtmpose/crowdpose/rtmpose_crowdpose.yml @@ -1,19 +1,19 @@ -Models: -- Config: configs/body_2d_keypoint/rtmpose/crowdpose/rtmpose-m_8xb64-210e_crowdpose-256x192.py - In Collection: RTMPose - Metadata: - Architecture: - - RTMPose - Training Data: CrowdPose - Name: rtmpose-t_8xb256-420e_coco-256x192 - Results: - - Dataset: CrowdPose - Metrics: - AP: 0.706 - AP@0.5: 0.841 - AP@0.75: 0.765 - AP (E): 0.799 - AP (M): 0.719 - AP (L): 0.582 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-crowdpose_pt-aic-coco_210e-256x192-e6192cac_20230224.pth +Models: +- Config: configs/body_2d_keypoint/rtmpose/crowdpose/rtmpose-m_8xb64-210e_crowdpose-256x192.py + In Collection: RTMPose + Metadata: + Architecture: + - RTMPose + Training Data: CrowdPose + Name: rtmpose-t_8xb256-420e_coco-256x192 + Results: + - Dataset: CrowdPose + Metrics: + AP: 0.706 + AP@0.5: 0.841 + AP@0.75: 0.765 + AP (E): 0.799 + AP (M): 0.719 + AP (L): 0.582 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-crowdpose_pt-aic-coco_210e-256x192-e6192cac_20230224.pth diff --git a/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-l_8xb256-420e_humanart-256x192.py b/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-l_8xb256-420e_humanart-256x192.py index 384a712d95..8ac425afa5 100644 --- a/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-l_8xb256-420e_humanart-256x192.py +++ b/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-l_8xb256-420e_humanart-256x192.py @@ -1,232 +1,232 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -max_epochs = 420 -stage2_num_epochs = 30 -base_lr = 4e-3 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - # use cosine lr from 210 to 420 epoch - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=1024) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=(192, 256), - sigma=(4.9, 5.66), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=1., - widen_factor=1., - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmpose/cspnext-l_udp-aic-coco_210e-256x192-273b7631_20230130.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=1024, - out_channels=17, - input_size=codec['input_size'], - in_featuremap_size=(6, 8), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True)) - -# base dataset settings -dataset_type = 'HumanArtDataset' -data_mode = 'topdown' -data_root = 'data/' - -backend_args = dict(backend='local') -# backend_args = dict( -# backend='petrel', -# path_mapping=dict({ -# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', -# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' -# })) - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=256, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='HumanArt/annotations/training_humanart_coco.json', - data_prefix=dict(img=''), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=64, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='HumanArt/annotations/validation_humanart.json', - # bbox_file=f'{data_root}HumanArt/person_detection_results/' - # 'HumanArt_validation_detections_AP_H_56_person.json', - data_prefix=dict(img=''), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'HumanArt/annotations/validation_humanart.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 420 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 210 to 420 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(192, 256), + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=1., + widen_factor=1., + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmpose/cspnext-l_udp-aic-coco_210e-256x192-273b7631_20230130.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=1024, + out_channels=17, + input_size=codec['input_size'], + in_featuremap_size=(6, 8), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'HumanArtDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='HumanArt/annotations/training_humanart_coco.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='HumanArt/annotations/validation_humanart.json', + # bbox_file=f'{data_root}HumanArt/person_detection_results/' + # 'HumanArt_validation_detections_AP_H_56_person.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'HumanArt/annotations/validation_humanart.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-m_8xb256-420e_humanart-256x192.py b/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-m_8xb256-420e_humanart-256x192.py index 30178cbb6d..83a2c44c6a 100644 --- a/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-m_8xb256-420e_humanart-256x192.py +++ b/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-m_8xb256-420e_humanart-256x192.py @@ -1,232 +1,232 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -max_epochs = 420 -stage2_num_epochs = 30 -base_lr = 4e-3 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - # use cosine lr from 210 to 420 epoch - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=1024) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=(192, 256), - sigma=(4.9, 5.66), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=0.67, - widen_factor=0.75, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmpose/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=768, - out_channels=17, - input_size=codec['input_size'], - in_featuremap_size=(6, 8), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True)) - -# base dataset settings -dataset_type = 'HumanArtDataset' -data_mode = 'topdown' -data_root = 'data/' - -backend_args = dict(backend='local') -# backend_args = dict( -# backend='petrel', -# path_mapping=dict({ -# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', -# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' -# })) - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=256, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='HumanArt/annotations/training_humanart_coco.json', - data_prefix=dict(img=''), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=64, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='HumanArt/annotations/validation_humanart.json', - # bbox_file=f'{data_root}HumanArt/person_detection_results/' - # 'HumanArt_validation_detections_AP_H_56_person.json', - data_prefix=dict(img=''), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'HumanArt/annotations/validation_humanart.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 420 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 210 to 420 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(192, 256), + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmpose/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=768, + out_channels=17, + input_size=codec['input_size'], + in_featuremap_size=(6, 8), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'HumanArtDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='HumanArt/annotations/training_humanart_coco.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='HumanArt/annotations/validation_humanart.json', + # bbox_file=f'{data_root}HumanArt/person_detection_results/' + # 'HumanArt_validation_detections_AP_H_56_person.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'HumanArt/annotations/validation_humanart.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-s_8xb256-420e_humanart-256x192.py b/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-s_8xb256-420e_humanart-256x192.py index b4263f25e7..87bd833b96 100644 --- a/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-s_8xb256-420e_humanart-256x192.py +++ b/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-s_8xb256-420e_humanart-256x192.py @@ -1,232 +1,232 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -max_epochs = 420 -stage2_num_epochs = 30 -base_lr = 4e-3 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - # use cosine lr from 210 to 420 epoch - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=1024) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=(192, 256), - sigma=(4.9, 5.66), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=0.33, - widen_factor=0.5, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmpose/cspnext-s_udp-aic-coco_210e-256x192-92f5a029_20230130.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=512, - out_channels=17, - input_size=codec['input_size'], - in_featuremap_size=(6, 8), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True)) - -# base dataset settings -dataset_type = 'HumanArtDataset' -data_mode = 'topdown' -data_root = 'data/' - -backend_args = dict(backend='local') -# backend_args = dict( -# backend='petrel', -# path_mapping=dict({ -# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', -# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' -# })) - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=256, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='HumanArt/annotations/training_humanart_coco.json', - data_prefix=dict(img=''), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=64, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='HumanArt/annotations/validation_humanart.json', - # bbox_file=f'{data_root}HumanArt/person_detection_results/' - # 'HumanArt_validation_detections_AP_H_56_person.json', - data_prefix=dict(img=''), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'HumanArt/annotations/validation_humanart.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 420 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 210 to 420 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(192, 256), + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.33, + widen_factor=0.5, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmpose/cspnext-s_udp-aic-coco_210e-256x192-92f5a029_20230130.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=512, + out_channels=17, + input_size=codec['input_size'], + in_featuremap_size=(6, 8), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'HumanArtDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='HumanArt/annotations/training_humanart_coco.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='HumanArt/annotations/validation_humanart.json', + # bbox_file=f'{data_root}HumanArt/person_detection_results/' + # 'HumanArt_validation_detections_AP_H_56_person.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'HumanArt/annotations/validation_humanart.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-t_8xb256-420e_humanart-256x192.py b/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-t_8xb256-420e_humanart-256x192.py index 869f04217d..e5a8092e6a 100644 --- a/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-t_8xb256-420e_humanart-256x192.py +++ b/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-t_8xb256-420e_humanart-256x192.py @@ -1,233 +1,233 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -max_epochs = 420 -stage2_num_epochs = 30 -base_lr = 4e-3 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - # use cosine lr from 210 to 420 epoch - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=1024) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=(192, 256), - sigma=(4.9, 5.66), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=0.167, - widen_factor=0.375, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmpose/cspnext-tiny_udp-aic-coco_210e-256x192-cbed682d_20230130.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=384, - out_channels=17, - input_size=codec['input_size'], - in_featuremap_size=(6, 8), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True)) - -# base dataset settings -dataset_type = 'HumanArtDataset' -data_mode = 'topdown' -data_root = 'data/' - -backend_args = dict(backend='local') -# backend_args = dict( -# backend='petrel', -# path_mapping=dict({ -# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', -# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' -# })) - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=256, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='HumanArt/annotations/training_humanart_coco.json', - data_prefix=dict(img=''), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=64, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='HumanArt/annotations/validation_humanart.json', - # bbox_file=f'{data_root}HumanArt/person_detection_results/' - # 'HumanArt_validation_detections_AP_H_56_person.json', - data_prefix=dict(img=''), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - # Turn off EMA while training the tiny model - # dict( - # type='EMAHook', - # ema_type='ExpMomentumEMA', - # momentum=0.0002, - # update_buffers=True, - # priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'HumanArt/annotations/validation_humanart.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 420 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 210 to 420 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(192, 256), + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.167, + widen_factor=0.375, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmpose/cspnext-tiny_udp-aic-coco_210e-256x192-cbed682d_20230130.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=384, + out_channels=17, + input_size=codec['input_size'], + in_featuremap_size=(6, 8), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'HumanArtDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='HumanArt/annotations/training_humanart_coco.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='HumanArt/annotations/validation_humanart.json', + # bbox_file=f'{data_root}HumanArt/person_detection_results/' + # 'HumanArt_validation_detections_AP_H_56_person.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + # Turn off EMA while training the tiny model + # dict( + # type='EMAHook', + # ema_type='ExpMomentumEMA', + # momentum=0.0002, + # update_buffers=True, + # priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'HumanArt/annotations/validation_humanart.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/rtmpose/humanart/rtmpose_humanart.md b/configs/body_2d_keypoint/rtmpose/humanart/rtmpose_humanart.md index 385ce0612a..adc2bbdf72 100644 --- a/configs/body_2d_keypoint/rtmpose/humanart/rtmpose_humanart.md +++ b/configs/body_2d_keypoint/rtmpose/humanart/rtmpose_humanart.md @@ -1,117 +1,117 @@ - - -
-RTMPose (arXiv'2023) - -```bibtex -@misc{https://doi.org/10.48550/arxiv.2303.07399, - doi = {10.48550/ARXIV.2303.07399}, - url = {https://arxiv.org/abs/2303.07399}, - author = {Jiang, Tao and Lu, Peng and Zhang, Li and Ma, Ningsheng and Han, Rui and Lyu, Chengqi and Li, Yining and Chen, Kai}, - keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences}, - title = {RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose}, - publisher = {arXiv}, - year = {2023}, - copyright = {Creative Commons Attribution 4.0 International} -} - -``` - -
- - - -
-RTMDet (arXiv'2022) - -```bibtex -@misc{lyu2022rtmdet, - title={RTMDet: An Empirical Study of Designing Real-Time Object Detectors}, - author={Chengqi Lyu and Wenwei Zhang and Haian Huang and Yue Zhou and Yudong Wang and Yanyi Liu and Shilong Zhang and Kai Chen}, - year={2022}, - eprint={2212.07784}, - archivePrefix={arXiv}, - primaryClass={cs.CV} -} -``` - -
- - - -
-COCO (ECCV'2014) - -```bibtex -@inproceedings{lin2014microsoft, - title={Microsoft coco: Common objects in context}, - author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, - booktitle={European conference on computer vision}, - pages={740--755}, - year={2014}, - organization={Springer} -} -``` - -
- -
-Human-Art (CVPR'2023) - -```bibtex -@inproceedings{ju2023humanart, - title={Human-Art: A Versatile Human-Centric Dataset Bridging Natural and Artificial Scenes}, - author={Ju, Xuan and Zeng, Ailing and Jianan, Wang and Qiang, Xu and Lei, Zhang}, - booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), - year={2023}} -``` - -
- -Results on Human-Art validation dataset with detector having human AP of 56.2 on Human-Art validation dataset - -| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | -| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | -| [rtmpose-t-coco](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-t_8xb256-420e_coco-256x192.py) | 256x192 | 0.161 | 0.283 | 0.154 | 0.221 | 0.373 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-tiny_simcc-coco_pt-aic-coco_420e-256x192-e613ba3f_20230127.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-tiny_simcc-coco_pt-aic-coco_420e-256x192-e613ba3f_20230127.json) | -| [rtmpose-t-humanart-coco](/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-t_8xb256-420e_humanart-256x192.py) | 256x192 | 0.249 | 0.395 | 0.256 | 0.323 | 0.485 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-t_8xb256-420e_humanart-256x192-60b68c98_20230612.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-t_8xb256-420e_humanart-256x192-60b68c98_20230612.json) | -| [rtmpose-s-coco](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-s_8xb256-420e_coco-256x192.py) | 256x192 | 0.199 | 0.328 | 0.198 | 0.261 | 0.418 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-coco_pt-aic-coco_420e-256x192-8edcf0d7_20230127.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-coco_pt-aic-coco_420e-256x192-8edcf0d7_20230127.json) | -| [rtmpose-s-humanart-coco](/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-s_8xb256-420e_humanart-256x192.py) | 256x192 | 0.311 | 0.462 | 0.323 | 0.381 | 0.540 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_8xb256-420e_humanart-256x192-5a3ac943_20230611.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_8xb256-420e_humanart-256x192-5a3ac943_20230611.json) | -| [rtmpose-m-coco](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-m_8xb256-420e_coco-256x192.py) | 256x192 | 0.239 | 0.372 | 0.243 | 0.302 | 0.455 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco_pt-aic-coco_420e-256x192-d8dd5ca4_20230127.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco_pt-aic-coco_420e-256x192-d8dd5ca4_20230127.json) | -| [rtmpose-m-humanart-coco](/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-m_8xb256-420e_humanart-256x192.py) | 256x192 | 0.355 | 0.503 | 0.377 | 0.417 | 0.568 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_8xb256-420e_humanart-256x192-8430627b_20230611.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_8xb256-420e_humanart-256x192-8430627b_20230611.json) | -| [rtmpose-l-coco](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-l_8xb256-420e_coco-256x192.py) | 256x192 | 0.260 | 0.393 | 0.267 | 0.323 | 0.472 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-coco_pt-aic-coco_420e-256x192-1352a4d2_20230127.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-coco_pt-aic-coco_420e-256x192-1352a4d2_20230127.json) | -| [rtmpose-l-humanart-coco](/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-l_8xb256-420e_humanart-256x192.py) | 256x192 | 0.378 | 0.521 | 0.399 | 0.442 | 0.584 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_8xb256-420e_humanart-256x192-389f2cb0_20230611.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_8xb256-420e_humanart-256x192-389f2cb0_20230611.json) | - -Results on Human-Art validation dataset with ground-truth bounding-box - -| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | -| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | -| [rtmpose-t-coco](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-t_8xb256-420e_coco-256x192.py) | 256x192 | 0.444 | 0.725 | 0.453 | 0.488 | 0.750 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-tiny_simcc-coco_pt-aic-coco_420e-256x192-e613ba3f_20230127.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-tiny_simcc-coco_pt-aic-coco_420e-256x192-e613ba3f_20230127.json) | -| [rtmpose-t-humanart-coco](/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-t_8xb256-420e_humanart-256x192.py) | 256x192 | 0.655 | 0.872 | 0.720 | 0.693 | 0.890 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-t_8xb256-420e_humanart-256x192-60b68c98_20230612.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-t_8xb256-420e_humanart-256x192-60b68c98_20230612.json) | -| [rtmpose-s-coco](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-s_8xb256-420e_coco-256x192.py) | 256x192 | 0.480 | 0.739 | 0.498 | 0.521 | 0.763 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-coco_pt-aic-coco_420e-256x192-8edcf0d7_20230127.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-coco_pt-aic-coco_420e-256x192-8edcf0d7_20230127.json) | -| [rtmpose-s-humanart-coco](/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-s_8xb256-420e_humanart-256x192.py) | 256x192 | 0.698 | 0.893 | 0.768 | 0.732 | 0.903 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_8xb256-420e_humanart-256x192-5a3ac943_20230611.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_8xb256-420e_humanart-256x192-5a3ac943_20230611.json) | -| [rtmpose-m-coco](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-m_8xb256-420e_coco-256x192.py) | 256x192 | 0.532 | 0.765 | 0.563 | 0.571 | 0.789 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco_pt-aic-coco_420e-256x192-d8dd5ca4_20230127.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco_pt-aic-coco_420e-256x192-d8dd5ca4_20230127.json) | -| [rtmpose-m-humanart-coco](/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-m_8xb256-420e_humanart-256x192.py) | 256x192 | 0.728 | 0.895 | 0.791 | 0.759 | 0.906 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_8xb256-420e_humanart-256x192-8430627b_20230611.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_8xb256-420e_humanart-256x192-8430627b_20230611.json) | -| [rtmpose-l-coco](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-l_8xb256-420e_coco-256x192.py) | 256x192 | 0.564 | 0.789 | 0.602 | 0.599 | 0.808 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-coco_pt-aic-coco_420e-256x192-1352a4d2_20230127.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-coco_pt-aic-coco_420e-256x192-1352a4d2_20230127.json) | -| [rtmpose-l-humanart-coco](/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-l_8xb256-420e_humanart-256x192.py) | 256x192 | 0.753 | 0.905 | 0.812 | 0.783 | 0.915 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_8xb256-420e_humanart-256x192-389f2cb0_20230611.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_8xb256-420e_humanart-256x192-389f2cb0_20230611.json) | - -Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset - -| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | -| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | -| [rtmpose-t-coco](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-t_8xb256-420e_coco-256x192.py) | 256x192 | 0.682 | 0.883 | 0.759 | 0.736 | 0.920 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-tiny_simcc-coco_pt-aic-coco_420e-256x192-e613ba3f_20230127.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-tiny_simcc-coco_pt-aic-coco_420e-256x192-e613ba3f_20230127.json) | -| [rtmpose-t-humanart-coco](/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-t_8xb256-420e_humanart-256x192.py) | 256x192 | 0.665 | 0.875 | 0.739 | 0.721 | 0.916 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-t_8xb256-420e_humanart-256x192-60b68c98_20230612.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-t_8xb256-420e_humanart-256x192-60b68c98_20230612.json) | -| [rtmpose-s-coco](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-s_8xb256-420e_coco-256x192.py) | 256x192 | 0.716 | 0.892 | 0.789 | 0.768 | 0.929 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-coco_pt-aic-coco_420e-256x192-8edcf0d7_20230127.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-coco_pt-aic-coco_420e-256x192-8edcf0d7_20230127.json) | -| [rtmpose-s-humanart-coco](/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-s_8xb256-420e_humanart-256x192.py) | 256x192 | 0.706 | 0.888 | 0.780 | 0.759 | 0.928 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_8xb256-420e_humanart-256x192-5a3ac943_20230611.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_8xb256-420e_humanart-256x192-5a3ac943_20230611.json) | -| [rtmpose-m-coco](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-m_8xb256-420e_coco-256x192.py) | 256x192 | 0.746 | 0.899 | 0.817 | 0.795 | 0.935 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco_pt-aic-coco_420e-256x192-d8dd5ca4_20230127.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco_pt-aic-coco_420e-256x192-d8dd5ca4_20230127.json) | -| [rtmpose-m-humanart-coco](/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-m_8xb256-420e_humanart-256x192.py) | 256x192 | 0.725 | 0.892 | 0.795 | 0.775 | 0.929 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_8xb256-420e_humanart-256x192-8430627b_20230611.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_8xb256-420e_humanart-256x192-8430627b_20230611.json) | -| [rtmpose-l-coco](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-l_8xb256-420e_coco-256x192.py) | 256x192 | 0.758 | 0.906 | 0.826 | 0.806 | 0.942 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-coco_pt-aic-coco_420e-256x192-1352a4d2_20230127.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-coco_pt-aic-coco_420e-256x192-1352a4d2_20230127.json) | -| [rtmpose-l-humanart-coco](/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-l_8xb256-420e_humanart-256x192.py) | 256x192 | 0.748 | 0.901 | 0.816 | 0.796 | 0.938 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_8xb256-420e_humanart-256x192-389f2cb0_20230611.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_8xb256-420e_humanart-256x192-389f2cb0_20230611.json) | - -Results on COCO val2017 with ground-truth bounding box - -| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | -| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | -| [rtmpose-t-humanart-coco](/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-t_8xb256-420e_humanart-256x192.py) | 256x192 | 0.679 | 0.895 | 0.755 | 0.710 | 0.907 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-t_8xb256-420e_humanart-256x192-60b68c98_20230612.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-t_8xb256-420e_humanart-256x192-60b68c98_20230612.json) | -| [rtmpose-s-humanart-coco](/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-s_8xb256-420e_humanart-256x192.py) | 256x192 | 0.725 | 0.916 | 0.798 | 0.753 | 0.925 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_8xb256-420e_humanart-256x192-5a3ac943_20230611.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_8xb256-420e_humanart-256x192-5a3ac943_20230611.json) | -| [rtmpose-m-humanart-coco](/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-m_8xb256-420e_humanart-256x192.py) | 256x192 | 0.744 | 0.916 | 0.818 | 0.770 | 0.930 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_8xb256-420e_humanart-256x192-8430627b_20230611.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_8xb256-420e_humanart-256x192-8430627b_20230611.json) | -| [rtmpose-l-humanart-coco](/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-l_8xb256-420e_humanart-256x192.py) | 256x192 | 0.770 | 0.927 | 0.840 | 0.794 | 0.939 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_8xb256-420e_humanart-256x192-389f2cb0_20230611.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_8xb256-420e_humanart-256x192-389f2cb0_20230611.json) | + + +
+RTMPose (arXiv'2023) + +```bibtex +@misc{https://doi.org/10.48550/arxiv.2303.07399, + doi = {10.48550/ARXIV.2303.07399}, + url = {https://arxiv.org/abs/2303.07399}, + author = {Jiang, Tao and Lu, Peng and Zhang, Li and Ma, Ningsheng and Han, Rui and Lyu, Chengqi and Li, Yining and Chen, Kai}, + keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences}, + title = {RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose}, + publisher = {arXiv}, + year = {2023}, + copyright = {Creative Commons Attribution 4.0 International} +} + +``` + +
+ + + +
+RTMDet (arXiv'2022) + +```bibtex +@misc{lyu2022rtmdet, + title={RTMDet: An Empirical Study of Designing Real-Time Object Detectors}, + author={Chengqi Lyu and Wenwei Zhang and Haian Huang and Yue Zhou and Yudong Wang and Yanyi Liu and Shilong Zhang and Kai Chen}, + year={2022}, + eprint={2212.07784}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +
+Human-Art (CVPR'2023) + +```bibtex +@inproceedings{ju2023humanart, + title={Human-Art: A Versatile Human-Centric Dataset Bridging Natural and Artificial Scenes}, + author={Ju, Xuan and Zeng, Ailing and Jianan, Wang and Qiang, Xu and Lei, Zhang}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), + year={2023}} +``` + +
+ +Results on Human-Art validation dataset with detector having human AP of 56.2 on Human-Art validation dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [rtmpose-t-coco](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-t_8xb256-420e_coco-256x192.py) | 256x192 | 0.161 | 0.283 | 0.154 | 0.221 | 0.373 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-tiny_simcc-coco_pt-aic-coco_420e-256x192-e613ba3f_20230127.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-tiny_simcc-coco_pt-aic-coco_420e-256x192-e613ba3f_20230127.json) | +| [rtmpose-t-humanart-coco](/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-t_8xb256-420e_humanart-256x192.py) | 256x192 | 0.249 | 0.395 | 0.256 | 0.323 | 0.485 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-t_8xb256-420e_humanart-256x192-60b68c98_20230612.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-t_8xb256-420e_humanart-256x192-60b68c98_20230612.json) | +| [rtmpose-s-coco](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-s_8xb256-420e_coco-256x192.py) | 256x192 | 0.199 | 0.328 | 0.198 | 0.261 | 0.418 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-coco_pt-aic-coco_420e-256x192-8edcf0d7_20230127.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-coco_pt-aic-coco_420e-256x192-8edcf0d7_20230127.json) | +| [rtmpose-s-humanart-coco](/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-s_8xb256-420e_humanart-256x192.py) | 256x192 | 0.311 | 0.462 | 0.323 | 0.381 | 0.540 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_8xb256-420e_humanart-256x192-5a3ac943_20230611.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_8xb256-420e_humanart-256x192-5a3ac943_20230611.json) | +| [rtmpose-m-coco](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-m_8xb256-420e_coco-256x192.py) | 256x192 | 0.239 | 0.372 | 0.243 | 0.302 | 0.455 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco_pt-aic-coco_420e-256x192-d8dd5ca4_20230127.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco_pt-aic-coco_420e-256x192-d8dd5ca4_20230127.json) | +| [rtmpose-m-humanart-coco](/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-m_8xb256-420e_humanart-256x192.py) | 256x192 | 0.355 | 0.503 | 0.377 | 0.417 | 0.568 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_8xb256-420e_humanart-256x192-8430627b_20230611.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_8xb256-420e_humanart-256x192-8430627b_20230611.json) | +| [rtmpose-l-coco](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-l_8xb256-420e_coco-256x192.py) | 256x192 | 0.260 | 0.393 | 0.267 | 0.323 | 0.472 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-coco_pt-aic-coco_420e-256x192-1352a4d2_20230127.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-coco_pt-aic-coco_420e-256x192-1352a4d2_20230127.json) | +| [rtmpose-l-humanart-coco](/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-l_8xb256-420e_humanart-256x192.py) | 256x192 | 0.378 | 0.521 | 0.399 | 0.442 | 0.584 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_8xb256-420e_humanart-256x192-389f2cb0_20230611.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_8xb256-420e_humanart-256x192-389f2cb0_20230611.json) | + +Results on Human-Art validation dataset with ground-truth bounding-box + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [rtmpose-t-coco](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-t_8xb256-420e_coco-256x192.py) | 256x192 | 0.444 | 0.725 | 0.453 | 0.488 | 0.750 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-tiny_simcc-coco_pt-aic-coco_420e-256x192-e613ba3f_20230127.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-tiny_simcc-coco_pt-aic-coco_420e-256x192-e613ba3f_20230127.json) | +| [rtmpose-t-humanart-coco](/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-t_8xb256-420e_humanart-256x192.py) | 256x192 | 0.655 | 0.872 | 0.720 | 0.693 | 0.890 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-t_8xb256-420e_humanart-256x192-60b68c98_20230612.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-t_8xb256-420e_humanart-256x192-60b68c98_20230612.json) | +| [rtmpose-s-coco](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-s_8xb256-420e_coco-256x192.py) | 256x192 | 0.480 | 0.739 | 0.498 | 0.521 | 0.763 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-coco_pt-aic-coco_420e-256x192-8edcf0d7_20230127.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-coco_pt-aic-coco_420e-256x192-8edcf0d7_20230127.json) | +| [rtmpose-s-humanart-coco](/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-s_8xb256-420e_humanart-256x192.py) | 256x192 | 0.698 | 0.893 | 0.768 | 0.732 | 0.903 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_8xb256-420e_humanart-256x192-5a3ac943_20230611.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_8xb256-420e_humanart-256x192-5a3ac943_20230611.json) | +| [rtmpose-m-coco](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-m_8xb256-420e_coco-256x192.py) | 256x192 | 0.532 | 0.765 | 0.563 | 0.571 | 0.789 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco_pt-aic-coco_420e-256x192-d8dd5ca4_20230127.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco_pt-aic-coco_420e-256x192-d8dd5ca4_20230127.json) | +| [rtmpose-m-humanart-coco](/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-m_8xb256-420e_humanart-256x192.py) | 256x192 | 0.728 | 0.895 | 0.791 | 0.759 | 0.906 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_8xb256-420e_humanart-256x192-8430627b_20230611.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_8xb256-420e_humanart-256x192-8430627b_20230611.json) | +| [rtmpose-l-coco](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-l_8xb256-420e_coco-256x192.py) | 256x192 | 0.564 | 0.789 | 0.602 | 0.599 | 0.808 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-coco_pt-aic-coco_420e-256x192-1352a4d2_20230127.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-coco_pt-aic-coco_420e-256x192-1352a4d2_20230127.json) | +| [rtmpose-l-humanart-coco](/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-l_8xb256-420e_humanart-256x192.py) | 256x192 | 0.753 | 0.905 | 0.812 | 0.783 | 0.915 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_8xb256-420e_humanart-256x192-389f2cb0_20230611.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_8xb256-420e_humanart-256x192-389f2cb0_20230611.json) | + +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [rtmpose-t-coco](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-t_8xb256-420e_coco-256x192.py) | 256x192 | 0.682 | 0.883 | 0.759 | 0.736 | 0.920 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-tiny_simcc-coco_pt-aic-coco_420e-256x192-e613ba3f_20230127.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-tiny_simcc-coco_pt-aic-coco_420e-256x192-e613ba3f_20230127.json) | +| [rtmpose-t-humanart-coco](/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-t_8xb256-420e_humanart-256x192.py) | 256x192 | 0.665 | 0.875 | 0.739 | 0.721 | 0.916 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-t_8xb256-420e_humanart-256x192-60b68c98_20230612.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-t_8xb256-420e_humanart-256x192-60b68c98_20230612.json) | +| [rtmpose-s-coco](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-s_8xb256-420e_coco-256x192.py) | 256x192 | 0.716 | 0.892 | 0.789 | 0.768 | 0.929 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-coco_pt-aic-coco_420e-256x192-8edcf0d7_20230127.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-coco_pt-aic-coco_420e-256x192-8edcf0d7_20230127.json) | +| [rtmpose-s-humanart-coco](/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-s_8xb256-420e_humanart-256x192.py) | 256x192 | 0.706 | 0.888 | 0.780 | 0.759 | 0.928 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_8xb256-420e_humanart-256x192-5a3ac943_20230611.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_8xb256-420e_humanart-256x192-5a3ac943_20230611.json) | +| [rtmpose-m-coco](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-m_8xb256-420e_coco-256x192.py) | 256x192 | 0.746 | 0.899 | 0.817 | 0.795 | 0.935 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco_pt-aic-coco_420e-256x192-d8dd5ca4_20230127.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco_pt-aic-coco_420e-256x192-d8dd5ca4_20230127.json) | +| [rtmpose-m-humanart-coco](/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-m_8xb256-420e_humanart-256x192.py) | 256x192 | 0.725 | 0.892 | 0.795 | 0.775 | 0.929 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_8xb256-420e_humanart-256x192-8430627b_20230611.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_8xb256-420e_humanart-256x192-8430627b_20230611.json) | +| [rtmpose-l-coco](/configs/body_2d_keypoint/rtmpose/coco/rtmpose-l_8xb256-420e_coco-256x192.py) | 256x192 | 0.758 | 0.906 | 0.826 | 0.806 | 0.942 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-coco_pt-aic-coco_420e-256x192-1352a4d2_20230127.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-coco_pt-aic-coco_420e-256x192-1352a4d2_20230127.json) | +| [rtmpose-l-humanart-coco](/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-l_8xb256-420e_humanart-256x192.py) | 256x192 | 0.748 | 0.901 | 0.816 | 0.796 | 0.938 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_8xb256-420e_humanart-256x192-389f2cb0_20230611.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_8xb256-420e_humanart-256x192-389f2cb0_20230611.json) | + +Results on COCO val2017 with ground-truth bounding box + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [rtmpose-t-humanart-coco](/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-t_8xb256-420e_humanart-256x192.py) | 256x192 | 0.679 | 0.895 | 0.755 | 0.710 | 0.907 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-t_8xb256-420e_humanart-256x192-60b68c98_20230612.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-t_8xb256-420e_humanart-256x192-60b68c98_20230612.json) | +| [rtmpose-s-humanart-coco](/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-s_8xb256-420e_humanart-256x192.py) | 256x192 | 0.725 | 0.916 | 0.798 | 0.753 | 0.925 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_8xb256-420e_humanart-256x192-5a3ac943_20230611.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_8xb256-420e_humanart-256x192-5a3ac943_20230611.json) | +| [rtmpose-m-humanart-coco](/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-m_8xb256-420e_humanart-256x192.py) | 256x192 | 0.744 | 0.916 | 0.818 | 0.770 | 0.930 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_8xb256-420e_humanart-256x192-8430627b_20230611.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_8xb256-420e_humanart-256x192-8430627b_20230611.json) | +| [rtmpose-l-humanart-coco](/configs/body_2d_keypoint/rtmpose/humanart/rtmpose-l_8xb256-420e_humanart-256x192.py) | 256x192 | 0.770 | 0.927 | 0.840 | 0.794 | 0.939 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_8xb256-420e_humanart-256x192-389f2cb0_20230611.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_8xb256-420e_humanart-256x192-389f2cb0_20230611.json) | diff --git a/configs/body_2d_keypoint/rtmpose/humanart/rtmpose_humanart.yml b/configs/body_2d_keypoint/rtmpose/humanart/rtmpose_humanart.yml index 2d6cf6ff26..aaabbcd5af 100644 --- a/configs/body_2d_keypoint/rtmpose/humanart/rtmpose_humanart.yml +++ b/configs/body_2d_keypoint/rtmpose/humanart/rtmpose_humanart.yml @@ -1,138 +1,138 @@ -Collections: -- Name: RTMPose - Paper: - Title: "RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose" - URL: https://arxiv.org/abs/2303.07399 - README: https://github.com/open-mmlab/mmpose/blob/main/projects/rtmpose/README.md -Models: -- Config: configs/body_2d_keypoint/rtmpose/humanart/rtmpose-l_8xb256-420e_humanart-256x192.py - In Collection: RTMPose - Metadata: - Architecture: &id001 - - RTMPose - Training Data: &id002 - - COCO - - Human-Art - Name: rtmpose-l_8xb256-420e_humanart-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.748 - AP@0.5: 0.901 - AP@0.75: 0.816 - AR: 0.796 - AR@0.5: 0.938 - Task: Body 2D Keypoint - - Dataset: Human-Art - Metrics: - AP: 0.378 - AP@0.5: 0.521 - AP@0.75: 0.399 - AR: 0.442 - AR@0.5: 0.584 - Task: Body 2D Keypoint - - Dataset: Human-Art(GT) - Metrics: - AP: 0.753 - AP@0.5: 0.905 - AP@0.75: 0.812 - AR: 0.783 - AR@0.5: 0.915 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_8xb256-420e_humanart-256x192-389f2cb0_20230611.pth -- Config: configs/body_2d_keypoint/rtmpose/humanart/rtmpose-m_8xb256-420e_humanart-256x192.py - In Collection: RTMPose - Metadata: - Architecture: *id001 - Training Data: *id002 - Name: rtmpose-m_8xb256-420e_humanart-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.725 - AP@0.5: 0.892 - AP@0.75: 0.795 - AR: 0.775 - AR@0.5: 0.929 - Task: Body 2D Keypoint - - Dataset: Human-Art - Metrics: - AP: 0.355 - AP@0.5: 0.503 - AP@0.75: 0.377 - AR: 0.417 - AR@0.5: 0.568 - Task: Body 2D Keypoint - - Dataset: Human-Art(GT) - Metrics: - AP: 0.728 - AP@0.5: 0.895 - AP@0.75: 0.791 - AR: 0.759 - AR@0.5: 0.906 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_8xb256-420e_humanart-256x192-8430627b_20230611.pth -- Config: configs/body_2d_keypoint/rtmpose/humanart/rtmpose-s_8xb256-420e_humanart-256x192.py - In Collection: RTMPose - Metadata: - Architecture: *id001 - Training Data: *id002 - Name: rtmpose-s_8xb256-420e_humanart-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.706 - AP@0.5: 0.888 - AP@0.75: 0.780 - AR: 0.759 - AR@0.5: 0.928 - Task: Body 2D Keypoint - - Dataset: Human-Art - Metrics: - AP: 0.311 - AP@0.5: 0.462 - AP@0.75: 0.323 - AR: 0.381 - AR@0.5: 0.540 - Task: Body 2D Keypoint - - Dataset: Human-Art(GT) - Metrics: - AP: 0.698 - AP@0.5: 0.893 - AP@0.75: 0.768 - AR: 0.732 - AR@0.5: 0.903 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_8xb256-420e_humanart-256x192-5a3ac943_20230611.pth -- Config: configs/body_2d_keypoint/rtmpose/humanart/rtmpose-t_8xb256-420e_humanart-256x192.py - In Collection: RTMPose - Metadata: - Architecture: *id001 - Training Data: *id002 - Name: rtmpose-t_8xb256-420e_humanart-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.665 - AP@0.5: 0.875 - AP@0.75: 0.739 - AR: 0.721 - AR@0.5: 0.916 - Task: Body 2D Keypoint - - Dataset: Human-Art - Metrics: - AP: 0.249 - AP@0.5: 0.395 - AP@0.75: 0.256 - AR: 0.323 - AR@0.5: 0.485 - Task: Body 2D Keypoint - - Dataset: Human-Art(GT) - Metrics: - AP: 0.655 - AP@0.5: 0.872 - AP@0.75: 0.720 - AR: 0.693 - AR@0.5: 0.890 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-t_8xb256-420e_humanart-256x192-60b68c98_20230612.pth +Collections: +- Name: RTMPose + Paper: + Title: "RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose" + URL: https://arxiv.org/abs/2303.07399 + README: https://github.com/open-mmlab/mmpose/blob/main/projects/rtmpose/README.md +Models: +- Config: configs/body_2d_keypoint/rtmpose/humanart/rtmpose-l_8xb256-420e_humanart-256x192.py + In Collection: RTMPose + Metadata: + Architecture: &id001 + - RTMPose + Training Data: &id002 + - COCO + - Human-Art + Name: rtmpose-l_8xb256-420e_humanart-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.748 + AP@0.5: 0.901 + AP@0.75: 0.816 + AR: 0.796 + AR@0.5: 0.938 + Task: Body 2D Keypoint + - Dataset: Human-Art + Metrics: + AP: 0.378 + AP@0.5: 0.521 + AP@0.75: 0.399 + AR: 0.442 + AR@0.5: 0.584 + Task: Body 2D Keypoint + - Dataset: Human-Art(GT) + Metrics: + AP: 0.753 + AP@0.5: 0.905 + AP@0.75: 0.812 + AR: 0.783 + AR@0.5: 0.915 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_8xb256-420e_humanart-256x192-389f2cb0_20230611.pth +- Config: configs/body_2d_keypoint/rtmpose/humanart/rtmpose-m_8xb256-420e_humanart-256x192.py + In Collection: RTMPose + Metadata: + Architecture: *id001 + Training Data: *id002 + Name: rtmpose-m_8xb256-420e_humanart-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.725 + AP@0.5: 0.892 + AP@0.75: 0.795 + AR: 0.775 + AR@0.5: 0.929 + Task: Body 2D Keypoint + - Dataset: Human-Art + Metrics: + AP: 0.355 + AP@0.5: 0.503 + AP@0.75: 0.377 + AR: 0.417 + AR@0.5: 0.568 + Task: Body 2D Keypoint + - Dataset: Human-Art(GT) + Metrics: + AP: 0.728 + AP@0.5: 0.895 + AP@0.75: 0.791 + AR: 0.759 + AR@0.5: 0.906 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_8xb256-420e_humanart-256x192-8430627b_20230611.pth +- Config: configs/body_2d_keypoint/rtmpose/humanart/rtmpose-s_8xb256-420e_humanart-256x192.py + In Collection: RTMPose + Metadata: + Architecture: *id001 + Training Data: *id002 + Name: rtmpose-s_8xb256-420e_humanart-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.706 + AP@0.5: 0.888 + AP@0.75: 0.780 + AR: 0.759 + AR@0.5: 0.928 + Task: Body 2D Keypoint + - Dataset: Human-Art + Metrics: + AP: 0.311 + AP@0.5: 0.462 + AP@0.75: 0.323 + AR: 0.381 + AR@0.5: 0.540 + Task: Body 2D Keypoint + - Dataset: Human-Art(GT) + Metrics: + AP: 0.698 + AP@0.5: 0.893 + AP@0.75: 0.768 + AR: 0.732 + AR@0.5: 0.903 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_8xb256-420e_humanart-256x192-5a3ac943_20230611.pth +- Config: configs/body_2d_keypoint/rtmpose/humanart/rtmpose-t_8xb256-420e_humanart-256x192.py + In Collection: RTMPose + Metadata: + Architecture: *id001 + Training Data: *id002 + Name: rtmpose-t_8xb256-420e_humanart-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.665 + AP@0.5: 0.875 + AP@0.75: 0.739 + AR: 0.721 + AR@0.5: 0.916 + Task: Body 2D Keypoint + - Dataset: Human-Art + Metrics: + AP: 0.249 + AP@0.5: 0.395 + AP@0.75: 0.256 + AR: 0.323 + AR@0.5: 0.485 + Task: Body 2D Keypoint + - Dataset: Human-Art(GT) + Metrics: + AP: 0.655 + AP@0.5: 0.872 + AP@0.75: 0.720 + AR: 0.693 + AR@0.5: 0.890 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-t_8xb256-420e_humanart-256x192-60b68c98_20230612.pth diff --git a/configs/body_2d_keypoint/rtmpose/mpii/rtmpose-m_8xb64-210e_mpii-256x256.py b/configs/body_2d_keypoint/rtmpose/mpii/rtmpose-m_8xb64-210e_mpii-256x256.py index ca67020f51..3f5c6afdb1 100644 --- a/configs/body_2d_keypoint/rtmpose/mpii/rtmpose-m_8xb64-210e_mpii-256x256.py +++ b/configs/body_2d_keypoint/rtmpose/mpii/rtmpose-m_8xb64-210e_mpii-256x256.py @@ -1,228 +1,228 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -max_epochs = 210 -stage2_num_epochs = 30 -base_lr = 4e-3 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - # use cosine lr from 210 to 420 epoch - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=1024) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=(256, 256), - sigma=(5.66, 5.66), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=0.67, - widen_factor=0.75, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmposev1/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=768, - out_channels=16, - input_size=codec['input_size'], - in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True)) - -# base dataset settings -dataset_type = 'MpiiDataset' -data_mode = 'topdown' -data_root = 'data/mpii/' - -backend_args = dict(backend='local') -# backend_args = dict( -# backend='petrel', -# path_mapping=dict({ -# f'{data_root}': 's3://openmmlab/datasets/pose/MPI/', -# f'{data_root}': 's3://openmmlab/datasets/pose/MPI/' -# })) - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_train.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_val.json', - headbox_file=f'{data_root}/annotations/mpii_gt_val.mat', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='PCK', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict(type='MpiiPCKAccuracy') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 210 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 210 to 420 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(256, 256), + sigma=(5.66, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=768, + out_channels=16, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/pose/MPI/', +# f'{data_root}': 's3://openmmlab/datasets/pose/MPI/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file=f'{data_root}/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='PCK', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/rtmpose/mpii/rtmpose_mpii.md b/configs/body_2d_keypoint/rtmpose/mpii/rtmpose_mpii.md index 990edb45eb..b8ffd1243e 100644 --- a/configs/body_2d_keypoint/rtmpose/mpii/rtmpose_mpii.md +++ b/configs/body_2d_keypoint/rtmpose/mpii/rtmpose_mpii.md @@ -1,43 +1,43 @@ - - -
-RTMPose (arXiv'2023) - -```bibtex -@misc{https://doi.org/10.48550/arxiv.2303.07399, - doi = {10.48550/ARXIV.2303.07399}, - url = {https://arxiv.org/abs/2303.07399}, - author = {Jiang, Tao and Lu, Peng and Zhang, Li and Ma, Ningsheng and Han, Rui and Lyu, Chengqi and Li, Yining and Chen, Kai}, - keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences}, - title = {RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose}, - publisher = {arXiv}, - year = {2023}, - copyright = {Creative Commons Attribution 4.0 International} -} - -``` - -
- - - -
-MPII (CVPR'2014) - -```bibtex -@inproceedings{andriluka14cvpr, - author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, - title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, - booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, - year = {2014}, - month = {June} -} -``` - -
- -Results on MPII val set - -| Arch | Input Size | Mean / w. flip | Mean@0.1 | ckpt | log | -| :------------------------------------------------------- | :--------: | :------------: | :------: | :------------------------------------------------------: | :------------------------------------------------------: | -| [rtmpose-m](/configs/body_2d_keypoint/rtmpose/mpii/rtmpose-m_8xb64-210e_mpii-256x256.py) | 256x256 | 0.907 | 0.348 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-mpii_pt-aic-coco_210e-256x256-ec4dbec8_20230206.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-mpii_pt-aic-coco_210e-256x256-ec4dbec8_20230206.json) | + + +
+RTMPose (arXiv'2023) + +```bibtex +@misc{https://doi.org/10.48550/arxiv.2303.07399, + doi = {10.48550/ARXIV.2303.07399}, + url = {https://arxiv.org/abs/2303.07399}, + author = {Jiang, Tao and Lu, Peng and Zhang, Li and Ma, Ningsheng and Han, Rui and Lyu, Chengqi and Li, Yining and Chen, Kai}, + keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences}, + title = {RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose}, + publisher = {arXiv}, + year = {2023}, + copyright = {Creative Commons Attribution 4.0 International} +} + +``` + +
+ + + +
+MPII (CVPR'2014) + +```bibtex +@inproceedings{andriluka14cvpr, + author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, + title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, + booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + year = {2014}, + month = {June} +} +``` + +
+ +Results on MPII val set + +| Arch | Input Size | Mean / w. flip | Mean@0.1 | ckpt | log | +| :------------------------------------------------------- | :--------: | :------------: | :------: | :------------------------------------------------------: | :------------------------------------------------------: | +| [rtmpose-m](/configs/body_2d_keypoint/rtmpose/mpii/rtmpose-m_8xb64-210e_mpii-256x256.py) | 256x256 | 0.907 | 0.348 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-mpii_pt-aic-coco_210e-256x256-ec4dbec8_20230206.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-mpii_pt-aic-coco_210e-256x256-ec4dbec8_20230206.json) | diff --git a/configs/body_2d_keypoint/rtmpose/mpii/rtmpose_mpii.yml b/configs/body_2d_keypoint/rtmpose/mpii/rtmpose_mpii.yml index 2e1eb28659..7ff95d0e64 100644 --- a/configs/body_2d_keypoint/rtmpose/mpii/rtmpose_mpii.yml +++ b/configs/body_2d_keypoint/rtmpose/mpii/rtmpose_mpii.yml @@ -1,15 +1,15 @@ -Models: -- Config: configs/body_2d_keypoint/rtmpose/mpii/rtmpose-m_8xb64-210e_mpii-256x256.py - In Collection: RTMPose - Metadata: - Architecture: - - RTMPose - Training Data: MPII - Name: rtmpose-m_8xb64-210e_mpii-256x256 - Results: - - Dataset: MPII - Metrics: - Mean: 0.907 - Mean@0.1: 0.348 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-mpii_pt-aic-coco_210e-256x256-ec4dbec8_20230206.pth +Models: +- Config: configs/body_2d_keypoint/rtmpose/mpii/rtmpose-m_8xb64-210e_mpii-256x256.py + In Collection: RTMPose + Metadata: + Architecture: + - RTMPose + Training Data: MPII + Name: rtmpose-m_8xb64-210e_mpii-256x256 + Results: + - Dataset: MPII + Metrics: + Mean: 0.907 + Mean@0.1: 0.348 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-mpii_pt-aic-coco_210e-256x256-ec4dbec8_20230206.pth diff --git a/configs/body_2d_keypoint/simcc/README.md b/configs/body_2d_keypoint/simcc/README.md index 6148c18bf5..6d377d027c 100644 --- a/configs/body_2d_keypoint/simcc/README.md +++ b/configs/body_2d_keypoint/simcc/README.md @@ -1,20 +1,20 @@ -# Top-down SimCC-based pose estimation - -Top-down methods divide the task into two stages: object detection, followed by single-object pose estimation given object bounding boxes. At the 2nd stage, SimCC based methods reformulate human pose estimation as two classification tasks for horizontal and vertical coordinates, and uniformly divide each pixel into several bins, thus obtain the keypoint coordinates given the features extracted from the bounding box area, following the paradigm introduced in [SimCC: a Simple Coordinate Classification Perspective for Human Pose Estimation](https://arxiv.org/abs/2107.03332). - -
- -
- -## Results and Models - -### COCO Dataset - -Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset - -| Model | Input Size | AP | AR | Details and Download | -| :---------------------------: | :--------: | :---: | :---: | :-----------------------------------------------: | -| ResNet-50+SimCC | 384x288 | 0.735 | 0.790 | [resnet_coco.md](./coco/resnet_coco.md) | -| ResNet-50+SimCC | 256x192 | 0.721 | 0.781 | [resnet_coco.md](./coco/resnet_coco.md) | -| S-ViPNAS-MobileNet-V3+SimCC | 256x192 | 0.695 | 0.755 | [vipnas_coco.md](./coco/vipnas_coco.md) | -| MobileNet-V2+SimCC(wo/deconv) | 256x192 | 0.620 | 0.678 | [mobilenetv2_coco.md](./coco/mobilenetv2_coco.md) | +# Top-down SimCC-based pose estimation + +Top-down methods divide the task into two stages: object detection, followed by single-object pose estimation given object bounding boxes. At the 2nd stage, SimCC based methods reformulate human pose estimation as two classification tasks for horizontal and vertical coordinates, and uniformly divide each pixel into several bins, thus obtain the keypoint coordinates given the features extracted from the bounding box area, following the paradigm introduced in [SimCC: a Simple Coordinate Classification Perspective for Human Pose Estimation](https://arxiv.org/abs/2107.03332). + +
+ +
+ +## Results and Models + +### COCO Dataset + +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Model | Input Size | AP | AR | Details and Download | +| :---------------------------: | :--------: | :---: | :---: | :-----------------------------------------------: | +| ResNet-50+SimCC | 384x288 | 0.735 | 0.790 | [resnet_coco.md](./coco/resnet_coco.md) | +| ResNet-50+SimCC | 256x192 | 0.721 | 0.781 | [resnet_coco.md](./coco/resnet_coco.md) | +| S-ViPNAS-MobileNet-V3+SimCC | 256x192 | 0.695 | 0.755 | [vipnas_coco.md](./coco/vipnas_coco.md) | +| MobileNet-V2+SimCC(wo/deconv) | 256x192 | 0.620 | 0.678 | [mobilenetv2_coco.md](./coco/mobilenetv2_coco.md) | diff --git a/configs/body_2d_keypoint/simcc/coco/mobilenetv2_coco.md b/configs/body_2d_keypoint/simcc/coco/mobilenetv2_coco.md index 42438774ba..e6d5b72c01 100644 --- a/configs/body_2d_keypoint/simcc/coco/mobilenetv2_coco.md +++ b/configs/body_2d_keypoint/simcc/coco/mobilenetv2_coco.md @@ -1,55 +1,55 @@ - - -
-SimCC (ECCV'2022) - -```bibtex -@misc{https://doi.org/10.48550/arxiv.2107.03332, - title={SimCC: a Simple Coordinate Classification Perspective for Human Pose Estimation}, - author={Li, Yanjie and Yang, Sen and Liu, Peidong and Zhang, Shoukui and Wang, Yunxiao and Wang, Zhicheng and Yang, Wankou and Xia, Shu-Tao}, - year={2021} -} -``` - -
- - - -
-MobilenetV2 (CVPR'2018) - -```bibtex -@inproceedings{sandler2018mobilenetv2, - title={Mobilenetv2: Inverted residuals and linear bottlenecks}, - author={Sandler, Mark and Howard, Andrew and Zhu, Menglong and Zhmoginov, Andrey and Chen, Liang-Chieh}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={4510--4520}, - year={2018} -} -``` - -
- - - -
-COCO (ECCV'2014) - -```bibtex -@inproceedings{lin2014microsoft, - title={Microsoft coco: Common objects in context}, - author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, - booktitle={European conference on computer vision}, - pages={740--755}, - year={2014}, - organization={Springer} -} -``` - -
- -Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset - -| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | -| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | -| [simcc_mobilenetv2_wo_deconv](/configs/body_2d_keypoint/simcc/coco/simcc_mobilenetv2_wo-deconv-8xb64-210e_coco-256x192.py) | 256x192 | 0.620 | 0.855 | 0.697 | 0.678 | 0.902 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/simcc/coco/simcc_mobilenetv2_wo-deconv-8xb64-210e_coco-256x192-4b0703bb_20221010.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/simcc/coco/simcc_mobilenetv2_wo-deconv-8xb64-210e_coco-256x192-4b0703bb_20221010.log.json) | + + +
+SimCC (ECCV'2022) + +```bibtex +@misc{https://doi.org/10.48550/arxiv.2107.03332, + title={SimCC: a Simple Coordinate Classification Perspective for Human Pose Estimation}, + author={Li, Yanjie and Yang, Sen and Liu, Peidong and Zhang, Shoukui and Wang, Yunxiao and Wang, Zhicheng and Yang, Wankou and Xia, Shu-Tao}, + year={2021} +} +``` + +
+ + + +
+MobilenetV2 (CVPR'2018) + +```bibtex +@inproceedings{sandler2018mobilenetv2, + title={Mobilenetv2: Inverted residuals and linear bottlenecks}, + author={Sandler, Mark and Howard, Andrew and Zhu, Menglong and Zhmoginov, Andrey and Chen, Liang-Chieh}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={4510--4520}, + year={2018} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [simcc_mobilenetv2_wo_deconv](/configs/body_2d_keypoint/simcc/coco/simcc_mobilenetv2_wo-deconv-8xb64-210e_coco-256x192.py) | 256x192 | 0.620 | 0.855 | 0.697 | 0.678 | 0.902 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/simcc/coco/simcc_mobilenetv2_wo-deconv-8xb64-210e_coco-256x192-4b0703bb_20221010.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/simcc/coco/simcc_mobilenetv2_wo-deconv-8xb64-210e_coco-256x192-4b0703bb_20221010.log.json) | diff --git a/configs/body_2d_keypoint/simcc/coco/mobilenetv2_coco.yml b/configs/body_2d_keypoint/simcc/coco/mobilenetv2_coco.yml index 00ef5aaecd..a72c85f134 100644 --- a/configs/body_2d_keypoint/simcc/coco/mobilenetv2_coco.yml +++ b/configs/body_2d_keypoint/simcc/coco/mobilenetv2_coco.yml @@ -1,19 +1,19 @@ -Models: -- Config: configs/body_2d_keypoint/simcc/coco/simcc_mobilenetv2_wo-deconv-8xb64-210e_coco-256x192.py - In Collection: SimCC - Metadata: - Architecture: &id001 - - SimCC - - MobilenetV2 - Training Data: COCO - Name: simcc_mobilenetv2_wo-deconv-8xb64-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.62 - AP@0.5: 0.855 - AP@0.75: 0.697 - AR: 0.678 - AR@0.5: 0.902 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/simcc/coco/simcc_mobilenetv2_wo-deconv-8xb64-210e_coco-256x192-4b0703bb_20221010.pth +Models: +- Config: configs/body_2d_keypoint/simcc/coco/simcc_mobilenetv2_wo-deconv-8xb64-210e_coco-256x192.py + In Collection: SimCC + Metadata: + Architecture: &id001 + - SimCC + - MobilenetV2 + Training Data: COCO + Name: simcc_mobilenetv2_wo-deconv-8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.62 + AP@0.5: 0.855 + AP@0.75: 0.697 + AR: 0.678 + AR@0.5: 0.902 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/simcc/coco/simcc_mobilenetv2_wo-deconv-8xb64-210e_coco-256x192-4b0703bb_20221010.pth diff --git a/configs/body_2d_keypoint/simcc/coco/resnet_coco.md b/configs/body_2d_keypoint/simcc/coco/resnet_coco.md index 80592b4db3..16b5eb0a73 100644 --- a/configs/body_2d_keypoint/simcc/coco/resnet_coco.md +++ b/configs/body_2d_keypoint/simcc/coco/resnet_coco.md @@ -1,56 +1,56 @@ - - -
-SimCC (ECCV'2022) - -```bibtex -@misc{https://doi.org/10.48550/arxiv.2107.03332, - title={SimCC: a Simple Coordinate Classification Perspective for Human Pose Estimation}, - author={Li, Yanjie and Yang, Sen and Liu, Peidong and Zhang, Shoukui and Wang, Yunxiao and Wang, Zhicheng and Yang, Wankou and Xia, Shu-Tao}, - year={2021} -} -``` - -
- - - -
-ResNet (CVPR'2016) - -```bibtex -@inproceedings{he2016deep, - title={Deep residual learning for image recognition}, - author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={770--778}, - year={2016} -} -``` - -
- - - -
-COCO (ECCV'2014) - -```bibtex -@inproceedings{lin2014microsoft, - title={Microsoft coco: Common objects in context}, - author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, - booktitle={European conference on computer vision}, - pages={740--755}, - year={2014}, - organization={Springer} -} -``` - -
- -Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset - -| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | -| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | -| [simcc_resnet_50](/configs/body_2d_keypoint/simcc/coco/simcc_res50_8xb64-210e_coco-256x192.py) | 256x192 | 0.721 | 0.897 | 0.798 | 0.781 | 0.937 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/simcc/coco/simcc_res50_8xb64-210e_coco-256x192-8e0f5b59_20220919.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/simcc/coco/simcc_res50_8xb64-210e_coco-256x192-8e0f5b59_20220919.log.json) | -| [simcc_resnet_50](/configs/body_2d_keypoint/simcc/coco/simcc_res50_8xb32-140e_coco-384x288.py) | 384x288 | 0.735 | 0.899 | 0.800 | 0.790 | 0.939 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/simcc/coco/simcc_res50_8xb32-140e_coco-384x288-45c3ba34_20220913.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/simcc/coco/simcc_res50_8xb32-140e_coco-384x288-45c3ba34_20220913.log.json) | + + +
+SimCC (ECCV'2022) + +```bibtex +@misc{https://doi.org/10.48550/arxiv.2107.03332, + title={SimCC: a Simple Coordinate Classification Perspective for Human Pose Estimation}, + author={Li, Yanjie and Yang, Sen and Liu, Peidong and Zhang, Shoukui and Wang, Yunxiao and Wang, Zhicheng and Yang, Wankou and Xia, Shu-Tao}, + year={2021} +} +``` + +
+ + + +
+ResNet (CVPR'2016) + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [simcc_resnet_50](/configs/body_2d_keypoint/simcc/coco/simcc_res50_8xb64-210e_coco-256x192.py) | 256x192 | 0.721 | 0.897 | 0.798 | 0.781 | 0.937 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/simcc/coco/simcc_res50_8xb64-210e_coco-256x192-8e0f5b59_20220919.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/simcc/coco/simcc_res50_8xb64-210e_coco-256x192-8e0f5b59_20220919.log.json) | +| [simcc_resnet_50](/configs/body_2d_keypoint/simcc/coco/simcc_res50_8xb32-140e_coco-384x288.py) | 384x288 | 0.735 | 0.899 | 0.800 | 0.790 | 0.939 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/simcc/coco/simcc_res50_8xb32-140e_coco-384x288-45c3ba34_20220913.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/simcc/coco/simcc_res50_8xb32-140e_coco-384x288-45c3ba34_20220913.log.json) | diff --git a/configs/body_2d_keypoint/simcc/coco/resnet_coco.yml b/configs/body_2d_keypoint/simcc/coco/resnet_coco.yml index 1e56c9e477..04c0acaec2 100644 --- a/configs/body_2d_keypoint/simcc/coco/resnet_coco.yml +++ b/configs/body_2d_keypoint/simcc/coco/resnet_coco.yml @@ -1,41 +1,41 @@ -Collections: -- Name: SimCC - Paper: - Title: A Simple Coordinate Classification Perspective for Human Pose Estimation - URL: https://arxiv.org/abs/2107.03332 - README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/algorithms/simcc.md -Models: -- Config: configs/body_2d_keypoint/simcc/coco/simcc_res50_8xb64-210e_coco-256x192.py - In Collection: SimCC - Metadata: - Architecture: &id001 - - SimCC - - ResNet - Training Data: COCO - Name: simcc_res50_8xb64-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.721 - AP@0.5: 0.900 - AP@0.75: 0.798 - AR: 0.781 - AR@0.5: 0.937 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/simcc/coco/simcc_res50_8xb64-210e_coco-256x192-8e0f5b59_20220919.pth -- Config: configs/body_2d_keypoint/simcc/coco/simcc_res50_8xb32-140e_coco-384x288.py - In Collection: SimCC - Metadata: - Architecture: *id001 - Training Data: COCO - Name: simcc_res50_8xb32-140e_coco-384x288 - Results: - - Dataset: COCO - Metrics: - AP: 0.735 - AP@0.5: 0.899 - AP@0.75: 0.800 - AR: 0.790 - AR@0.5: 0.939 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/simcc/coco/simcc_res50_8xb32-140e_coco-384x288-45c3ba34_20220913.pth +Collections: +- Name: SimCC + Paper: + Title: A Simple Coordinate Classification Perspective for Human Pose Estimation + URL: https://arxiv.org/abs/2107.03332 + README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/algorithms/simcc.md +Models: +- Config: configs/body_2d_keypoint/simcc/coco/simcc_res50_8xb64-210e_coco-256x192.py + In Collection: SimCC + Metadata: + Architecture: &id001 + - SimCC + - ResNet + Training Data: COCO + Name: simcc_res50_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.721 + AP@0.5: 0.900 + AP@0.75: 0.798 + AR: 0.781 + AR@0.5: 0.937 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/simcc/coco/simcc_res50_8xb64-210e_coco-256x192-8e0f5b59_20220919.pth +- Config: configs/body_2d_keypoint/simcc/coco/simcc_res50_8xb32-140e_coco-384x288.py + In Collection: SimCC + Metadata: + Architecture: *id001 + Training Data: COCO + Name: simcc_res50_8xb32-140e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.735 + AP@0.5: 0.899 + AP@0.75: 0.800 + AR: 0.790 + AR@0.5: 0.939 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/simcc/coco/simcc_res50_8xb32-140e_coco-384x288-45c3ba34_20220913.pth diff --git a/configs/body_2d_keypoint/simcc/coco/simcc_mobilenetv2_wo-deconv-8xb64-210e_coco-256x192.py b/configs/body_2d_keypoint/simcc/coco/simcc_mobilenetv2_wo-deconv-8xb64-210e_coco-256x192.py index 800803d190..1e5f62a4c0 100644 --- a/configs/body_2d_keypoint/simcc/coco/simcc_mobilenetv2_wo-deconv-8xb64-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/simcc/coco/simcc_mobilenetv2_wo-deconv-8xb64-210e_coco-256x192.py @@ -1,124 +1,124 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=train_cfg['max_epochs'], - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# codec settings -codec = dict( - type='SimCCLabel', input_size=(192, 256), sigma=6.0, simcc_split_ratio=2.0) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='MobileNetV2', - widen_factor=1., - out_indices=(7, ), - init_cfg=dict( - type='Pretrained', - checkpoint='mmcls://mobilenet_v2', - )), - head=dict( - type='SimCCHead', - in_channels=1280, - out_channels=17, - input_size=codec['input_size'], - in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), - simcc_split_ratio=codec['simcc_split_ratio'], - deconv_out_channels=None, - loss=dict(type='KLDiscretLoss', use_target_weight=True), - decoder=codec), - test_cfg=dict(flip_test=True, )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file=f'{data_root}person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=train_cfg['max_epochs'], + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='SimCCLabel', input_size=(192, 256), sigma=6.0, simcc_split_ratio=2.0) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='MobileNetV2', + widen_factor=1., + out_indices=(7, ), + init_cfg=dict( + type='Pretrained', + checkpoint='mmcls://mobilenet_v2', + )), + head=dict( + type='SimCCHead', + in_channels=1280, + out_channels=17, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + deconv_out_channels=None, + loss=dict(type='KLDiscretLoss', use_target_weight=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file=f'{data_root}person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/simcc/coco/simcc_res50_8xb32-140e_coco-384x288.py b/configs/body_2d_keypoint/simcc/coco/simcc_res50_8xb32-140e_coco-384x288.py index c04358299f..c724f831a5 100644 --- a/configs/body_2d_keypoint/simcc/coco/simcc_res50_8xb32-140e_coco-384x288.py +++ b/configs/body_2d_keypoint/simcc/coco/simcc_res50_8xb32-140e_coco-384x288.py @@ -1,120 +1,120 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=140, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=1e-3, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=train_cfg['max_epochs'], - milestones=[90, 120], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# codec settings -codec = dict( - type='SimCCLabel', input_size=(288, 384), sigma=6.0, simcc_split_ratio=2.0) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=50, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), - ), - head=dict( - type='SimCCHead', - in_channels=2048, - out_channels=17, - input_size=codec['input_size'], - in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), - simcc_split_ratio=codec['simcc_split_ratio'], - loss=dict(type='KLDiscretLoss', use_target_weight=True), - decoder=codec), - test_cfg=dict(flip_test=True)) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -test_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file=f'{data_root}person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=test_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=140, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=1e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=train_cfg['max_epochs'], + milestones=[90, 120], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='SimCCLabel', input_size=(288, 384), sigma=6.0, simcc_split_ratio=2.0) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='SimCCHead', + in_channels=2048, + out_channels=17, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + loss=dict(type='KLDiscretLoss', use_target_weight=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +test_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file=f'{data_root}person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=test_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/simcc/coco/simcc_res50_8xb64-210e_coco-256x192.py b/configs/body_2d_keypoint/simcc/coco/simcc_res50_8xb64-210e_coco-256x192.py index 33232a4463..2a08ff9138 100644 --- a/configs/body_2d_keypoint/simcc/coco/simcc_res50_8xb64-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/simcc/coco/simcc_res50_8xb64-210e_coco-256x192.py @@ -1,114 +1,114 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=1e-3, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict(type='MultiStepLR', milestones=[170, 200], gamma=0.1, by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# codec settings -codec = dict( - type='SimCCLabel', input_size=(192, 256), sigma=6.0, simcc_split_ratio=2.0) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=50, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), - ), - head=dict( - type='SimCCHead', - in_channels=2048, - out_channels=17, - input_size=codec['input_size'], - in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), - simcc_split_ratio=codec['simcc_split_ratio'], - loss=dict(type='KLDiscretLoss', use_target_weight=True), - decoder=codec), - test_cfg=dict(flip_test=True)) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -test_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file=f'{data_root}person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=test_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=1e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict(type='MultiStepLR', milestones=[170, 200], gamma=0.1, by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='SimCCLabel', input_size=(192, 256), sigma=6.0, simcc_split_ratio=2.0) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='SimCCHead', + in_channels=2048, + out_channels=17, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + loss=dict(type='KLDiscretLoss', use_target_weight=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +test_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file=f'{data_root}person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=test_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/simcc/coco/simcc_vipnas-mbv3_8xb64-210e_coco-256x192.py b/configs/body_2d_keypoint/simcc/coco/simcc_vipnas-mbv3_8xb64-210e_coco-256x192.py index ba8ba040cb..cd2fe16462 100644 --- a/configs/body_2d_keypoint/simcc/coco/simcc_vipnas-mbv3_8xb64-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/simcc/coco/simcc_vipnas-mbv3_8xb64-210e_coco-256x192.py @@ -1,119 +1,119 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=train_cfg['max_epochs'], - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# codec settings -codec = dict( - type='SimCCLabel', input_size=(192, 256), sigma=6.0, simcc_split_ratio=2.0) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict(type='ViPNAS_MobileNetV3'), - head=dict( - type='SimCCHead', - in_channels=160, - out_channels=17, - input_size=codec['input_size'], - in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), - simcc_split_ratio=codec['simcc_split_ratio'], - deconv_type='vipnas', - deconv_out_channels=(160, 160, 160), - deconv_num_groups=(160, 160, 160), - loss=dict(type='KLDiscretLoss', use_target_weight=True), - decoder=codec), - test_cfg=dict(flip_test=True, )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file=data_root + 'person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=train_cfg['max_epochs'], + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='SimCCLabel', input_size=(192, 256), sigma=6.0, simcc_split_ratio=2.0) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict(type='ViPNAS_MobileNetV3'), + head=dict( + type='SimCCHead', + in_channels=160, + out_channels=17, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + deconv_type='vipnas', + deconv_out_channels=(160, 160, 160), + deconv_num_groups=(160, 160, 160), + loss=dict(type='KLDiscretLoss', use_target_weight=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file=data_root + 'person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/simcc/coco/vipnas_coco.md b/configs/body_2d_keypoint/simcc/coco/vipnas_coco.md index a9d8b98fc3..4d36b73008 100644 --- a/configs/body_2d_keypoint/simcc/coco/vipnas_coco.md +++ b/configs/body_2d_keypoint/simcc/coco/vipnas_coco.md @@ -1,54 +1,54 @@ - - -
-SimCC (ECCV'2022) - -```bibtex -@misc{https://doi.org/10.48550/arxiv.2107.03332, - title={SimCC: a Simple Coordinate Classification Perspective for Human Pose Estimation}, - author={Li, Yanjie and Yang, Sen and Liu, Peidong and Zhang, Shoukui and Wang, Yunxiao and Wang, Zhicheng and Yang, Wankou and Xia, Shu-Tao}, - year={2021} -} -``` - -
- - - -
-ViPNAS (CVPR'2021) - -```bibtex -@article{xu2021vipnas, - title={ViPNAS: Efficient Video Pose Estimation via Neural Architecture Search}, - author={Xu, Lumin and Guan, Yingda and Jin, Sheng and Liu, Wentao and Qian, Chen and Luo, Ping and Ouyang, Wanli and Wang, Xiaogang}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - year={2021} -} -``` - -
- - - -
-COCO (ECCV'2014) - -```bibtex -@inproceedings{lin2014microsoft, - title={Microsoft coco: Common objects in context}, - author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, - booktitle={European conference on computer vision}, - pages={740--755}, - year={2014}, - organization={Springer} -} -``` - -
- -Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset - -| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | -| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | -| [simcc_S-ViPNAS-MobileNetV3](/configs/body_2d_keypoint/simcc/coco/simcc_vipnas-mbv3_8xb64-210e_coco-256x192.py) | 256x192 | 0.695 | 0.883 | 0.772 | 0.755 | 0.927 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/simcc/coco/simcc_vipnas-mbv3_8xb64-210e_coco-256x192-719f3489_20220922.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/simcc/coco/simcc_vipnas-mbv3_8xb64-210e_coco-256x192-719f3489_20220922.log.json) | + + +
+SimCC (ECCV'2022) + +```bibtex +@misc{https://doi.org/10.48550/arxiv.2107.03332, + title={SimCC: a Simple Coordinate Classification Perspective for Human Pose Estimation}, + author={Li, Yanjie and Yang, Sen and Liu, Peidong and Zhang, Shoukui and Wang, Yunxiao and Wang, Zhicheng and Yang, Wankou and Xia, Shu-Tao}, + year={2021} +} +``` + +
+ + + +
+ViPNAS (CVPR'2021) + +```bibtex +@article{xu2021vipnas, + title={ViPNAS: Efficient Video Pose Estimation via Neural Architecture Search}, + author={Xu, Lumin and Guan, Yingda and Jin, Sheng and Liu, Wentao and Qian, Chen and Luo, Ping and Ouyang, Wanli and Wang, Xiaogang}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + year={2021} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [simcc_S-ViPNAS-MobileNetV3](/configs/body_2d_keypoint/simcc/coco/simcc_vipnas-mbv3_8xb64-210e_coco-256x192.py) | 256x192 | 0.695 | 0.883 | 0.772 | 0.755 | 0.927 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/simcc/coco/simcc_vipnas-mbv3_8xb64-210e_coco-256x192-719f3489_20220922.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/simcc/coco/simcc_vipnas-mbv3_8xb64-210e_coco-256x192-719f3489_20220922.log.json) | diff --git a/configs/body_2d_keypoint/simcc/coco/vipnas_coco.yml b/configs/body_2d_keypoint/simcc/coco/vipnas_coco.yml index 95077c05c6..3790a10869 100644 --- a/configs/body_2d_keypoint/simcc/coco/vipnas_coco.yml +++ b/configs/body_2d_keypoint/simcc/coco/vipnas_coco.yml @@ -1,19 +1,19 @@ -Models: -- Config: configs/body_2d_keypoint/simcc/coco/simcc_vipnas-mbv3_8xb64-210e_coco-256x192.py - In Collection: SimCC - Metadata: - Architecture: &id001 - - SimCC - - ViPNAS - Training Data: COCO - Name: simcc_vipnas-mbv3_8xb64-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.695 - AP@0.5: 0.883 - AP@0.75: 0.772 - AR: 0.755 - AR@0.5: 0.927 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/simcc/coco/simcc_vipnas-mbv3_8xb64-210e_coco-256x192-719f3489_20220922.pth +Models: +- Config: configs/body_2d_keypoint/simcc/coco/simcc_vipnas-mbv3_8xb64-210e_coco-256x192.py + In Collection: SimCC + Metadata: + Architecture: &id001 + - SimCC + - ViPNAS + Training Data: COCO + Name: simcc_vipnas-mbv3_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.695 + AP@0.5: 0.883 + AP@0.75: 0.772 + AR: 0.755 + AR@0.5: 0.927 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/simcc/coco/simcc_vipnas-mbv3_8xb64-210e_coco-256x192-719f3489_20220922.pth diff --git a/configs/body_2d_keypoint/simcc/mpii/simcc_res50_wo-deconv-8xb64-210e_mpii-256x256.py b/configs/body_2d_keypoint/simcc/mpii/simcc_res50_wo-deconv-8xb64-210e_mpii-256x256.py index ef8b47959e..74a43d5ba0 100644 --- a/configs/body_2d_keypoint/simcc/mpii/simcc_res50_wo-deconv-8xb64-210e_mpii-256x256.py +++ b/configs/body_2d_keypoint/simcc/mpii/simcc_res50_wo-deconv-8xb64-210e_mpii-256x256.py @@ -1,120 +1,120 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=train_cfg['max_epochs'], - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# codec settings -codec = dict( - type='SimCCLabel', input_size=(256, 256), sigma=6.0, simcc_split_ratio=2.0) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=50, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), - ), - head=dict( - type='SimCCHead', - in_channels=2048, - out_channels=16, - input_size=codec['input_size'], - in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), - simcc_split_ratio=codec['simcc_split_ratio'], - deconv_out_channels=None, - loss=dict(type='KLDiscretLoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - shift_coords=True, - )) - -# base dataset settings -dataset_type = 'MpiiDataset' -data_mode = 'topdown' -data_root = 'data/mpii/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomBBoxTransform', shift_prob=0), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_train.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_val.json', - headbox_file=f'{data_root}/annotations/mpii_gt_val.mat', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) - -# evaluators -val_evaluator = dict(type='MpiiPCKAccuracy') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=train_cfg['max_epochs'], + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='SimCCLabel', input_size=(256, 256), sigma=6.0, simcc_split_ratio=2.0) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='SimCCHead', + in_channels=2048, + out_channels=16, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + deconv_out_channels=None, + loss=dict(type='KLDiscretLoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + shift_coords=True, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform', shift_prob=0), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file=f'{data_root}/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/README.md b/configs/body_2d_keypoint/topdown_heatmap/README.md index 47aae219e4..f0b54aa9e3 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/README.md +++ b/configs/body_2d_keypoint/topdown_heatmap/README.md @@ -1,133 +1,133 @@ -# Top-down heatmap-based pose estimation - -Top-down methods divide the task into two stages: object detection, followed by single-object pose estimation given object bounding boxes. Instead of estimating keypoint coordinates directly, the pose estimator will produce heatmaps which represent the likelihood of being a keypoint, following the paradigm introduced in [Simple Baselines for Human Pose Estimation and Tracking](http://openaccess.thecvf.com/content_ECCV_2018/html/Bin_Xiao_Simple_Baselines_for_ECCV_2018_paper.html). - -
- -
- -## Results and Models - -### COCO Dataset - -Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset - -| Model | Input Size | AP | AR | Details and Download | -| :-------------: | :--------: | :---: | :---: | :-------------------------------------------------: | -| ViTPose-h | 256x192 | 0.790 | 0.840 | [vitpose_coco.md](./coco/vitpose_coco.md) | -| HRNet-w48+UDP | 256x192 | 0.768 | 0.817 | [hrnet_udp_coco.md](./coco/hrnet_udp_coco.md) | -| MSPN 4-stg | 256x192 | 0.765 | 0.826 | [mspn_coco.md](./coco/mspn_coco.md) | -| HRNet-w48+Dark | 256x192 | 0.764 | 0.814 | [hrnet_dark_coco.md](./coco/hrnet_dark_coco.md) | -| HRNet-w48 | 256x192 | 0.756 | 0.809 | [hrnet_coco.md](./coco/hrnet_coco.md) | -| HRFormer-B | 256x192 | 0.754 | 0.807 | [hrformer_coco.md](./coco/hrformer_coco.md) | -| RSN-50-3x | 256x192 | 0.750 | 0.814 | [rsn_coco.md](./coco/rsn_coco.md) | -| CSPNeXt-l | 256x192 | 0.750 | 0.800 | [cspnext_udp_coco.md](./coco/cspnext_udp_coco.md) | -| HRNet-w32 | 256x192 | 0.749 | 0.804 | [hrnet_coco.md](./coco/hrnet_coco.md) | -| Swin-L | 256x192 | 0.743 | 0.798 | [swin_coco.md](./coco/swin_coco.md) | -| ViTPose-s | 256x192 | 0.739 | 0.792 | [vitpose_coco.md](./coco/vitpose_coco.md) | -| HRFormer-S | 256x192 | 0.738 | 0.793 | [hrformer_coco.md](./coco/hrformer_coco.md) | -| Swin-B | 256x192 | 0.737 | 0.794 | [swin_coco.md](./coco/swin_coco.md) | -| SEResNet-101 | 256x192 | 0.734 | 0.790 | [seresnet_coco.md](./coco/seresnet_coco.md) | -| SCNet-101 | 256x192 | 0.733 | 0.789 | [scnet_coco.md](./coco/scnet_coco.md) | -| ResNet-101+Dark | 256x192 | 0.733 | 0.786 | [resnet_dark_coco.md](./coco/resnet_dark_coco.md) | -| CSPNeXt-m | 256x192 | 0.732 | 0.785 | [cspnext_udp_coco.md](./coco/cspnext_udp_coco.md) | -| ResNetV1d-101 | 256x192 | 0.732 | 0.785 | [resnetv1d_coco.md](./coco/resnetv1d_coco.md) | -| SEResNet-50 | 256x192 | 0.729 | 0.784 | [seresnet_coco.md](./coco/seresnet_coco.md) | -| SCNet-50 | 256x192 | 0.728 | 0.784 | [scnet_coco.md](./coco/scnet_coco.md) | -| ResNet-101 | 256x192 | 0.726 | 0.783 | [resnet_coco.md](./coco/resnet_coco.md) | -| ResNeXt-101 | 256x192 | 0.726 | 0.781 | [resnext_coco.md](./coco/resnext_coco.md) | -| HourglassNet | 256x256 | 0.726 | 0.780 | [hourglass_coco.md](./coco/hourglass_coco.md) | -| ResNeSt-101 | 256x192 | 0.725 | 0.781 | [resnest_coco.md](./coco/resnest_coco.md) | -| RSN-50 | 256x192 | 0.724 | 0.790 | [rsn_coco.md](./coco/rsn_coco.md) | -| Swin-T | 256x192 | 0.724 | 0.782 | [swin_coco.md](./coco/swin_coco.md) | -| MSPN 1-stg | 256x192 | 0.723 | 0.788 | [mspn_coco.md](./coco/mspn_coco.md) | -| ResNetV1d-50 | 256x192 | 0.722 | 0.777 | [resnetv1d_coco.md](./coco/resnetv1d_coco.md) | -| ResNeSt-50 | 256x192 | 0.720 | 0.775 | [resnest_coco.md](./coco/resnest_coco.md) | -| ResNet-50 | 256x192 | 0.718 | 0.774 | [resnet_coco.md](./coco/resnet_coco.md) | -| ResNeXt-50 | 256x192 | 0.715 | 0.771 | [resnext_coco.md](./coco/resnext_coco.md) | -| PVT-S | 256x192 | 0.714 | 0.773 | [pvt_coco.md](./coco/pvt_coco.md) | -| CSPNeXt-s | 256x192 | 0.697 | 0.753 | [cspnext_udp_coco.md](./coco/cspnext_udp_coco.md) | -| LiteHRNet-30 | 256x192 | 0.676 | 0.736 | [litehrnet_coco.md](./coco/litehrnet_coco.md) | -| CSPNeXt-tiny | 256x192 | 0.665 | 0.723 | [cspnext_udp_coco.md](./coco/cspnext_udp_coco.md) | -| MobileNet-v2 | 256x192 | 0.648 | 0.709 | [mobilenetv2_coco.md](./coco/mobilenetv2_coco.md) | -| LiteHRNet-18 | 256x192 | 0.642 | 0.705 | [litehrnet_coco.md](./coco/litehrnet_coco.md) | -| CPM | 256x192 | 0.627 | 0.689 | [cpm_coco.md](./coco/cpm_coco.md) | -| ShuffleNet-v2 | 256x192 | 0.602 | 0.668 | [shufflenetv2_coco.md](./coco/shufflenetv2_coco.md) | -| ShuffleNet-v1 | 256x192 | 0.587 | 0.654 | [shufflenetv1_coco.md](./coco/shufflenetv1_coco.md) | -| AlexNet | 256x192 | 0.448 | 0.521 | [alexnet_coco.md](./coco/alexnet_coco.md) | - -### MPII Dataset - -| Model | Input Size | PCKh@0.5 | PCKh@0.1 | Details and Download | -| :------------: | :--------: | :------: | :------: | :-------------------------------------------------: | -| HRNet-w48+Dark | 256x256 | 0.905 | 0.360 | [hrnet_dark_mpii.md](./mpii/hrnet_dark_mpii.md) | -| HRNet-w48 | 256x256 | 0.902 | 0.303 | [hrnet_mpii.md](./mpii/cspnext_udp_mpii.md) | -| HRNet-w48 | 256x256 | 0.901 | 0.337 | [hrnet_mpii.md](./mpii/hrnet_mpii.md) | -| HRNet-w32 | 256x256 | 0.900 | 0.334 | [hrnet_mpii.md](./mpii/hrnet_mpii.md) | -| HourglassNet | 256x256 | 0.889 | 0.317 | [hourglass_mpii.md](./mpii/hourglass_mpii.md) | -| ResNet-152 | 256x256 | 0.889 | 0.303 | [resnet_mpii.md](./mpii/resnet_mpii.md) | -| ResNetV1d-152 | 256x256 | 0.888 | 0.300 | [resnetv1d_mpii.md](./mpii/resnetv1d_mpii.md) | -| SCNet-50 | 256x256 | 0.888 | 0.290 | [scnet_mpii.md](./mpii/scnet_mpii.md) | -| ResNeXt-152 | 256x256 | 0.887 | 0.294 | [resnext_mpii.md](./mpii/resnext_mpii.md) | -| SEResNet-50 | 256x256 | 0.884 | 0.292 | [seresnet_mpii.md](./mpii/seresnet_mpii.md) | -| ResNet-50 | 256x256 | 0.882 | 0.286 | [resnet_mpii.md](./mpii/resnet_mpii.md) | -| ResNetV1d-50 | 256x256 | 0.881 | 0.290 | [resnetv1d_mpii.md](./mpii/resnetv1d_mpii.md) | -| CPM | 368x368\* | 0.876 | 0.285 | [cpm_mpii.md](./mpii/cpm_mpii.md) | -| LiteHRNet-30 | 256x256 | 0.869 | 0.271 | [litehrnet_mpii.md](./mpii/litehrnet_mpii.md) | -| LiteHRNet-18 | 256x256 | 0.859 | 0.260 | [litehrnet_mpii.md](./mpii/litehrnet_mpii.md) | -| MobileNet-v2 | 256x256 | 0.854 | 0.234 | [mobilenetv2_mpii.md](./mpii/mobilenetv2_mpii.md) | -| ShuffleNet-v2 | 256x256 | 0.828 | 0.205 | [shufflenetv2_mpii.md](./mpii/shufflenetv2_mpii.md) | -| ShuffleNet-v1 | 256x256 | 0.824 | 0.195 | [shufflenetv1_mpii.md](./mpii/shufflenetv1_mpii.md) | - -### CrowdPose Dataset - -Results on CrowdPose test with [YOLOv3](https://github.com/eriklindernoren/PyTorch-YOLOv3) human detector - -| Model | Input Size | AP | AR | Details and Download | -| :--------: | :--------: | :---: | :---: | :--------------------------------------------------------: | -| HRNet-w32 | 256x192 | 0.675 | 0.816 | [hrnet_crowdpose.md](./crowdpose/hrnet_crowdpose.md) | -| CSPNeXt-m | 256x192 | 0.662 | 0.755 | [hrnet_crowdpose.md](./crowdpose/cspnext_udp_crowdpose.md) | -| ResNet-101 | 256x192 | 0.647 | 0.800 | [resnet_crowdpose.md](./crowdpose/resnet_crowdpose.md) | -| HRNet-w32 | 256x192 | 0.637 | 0.785 | [resnet_crowdpose.md](./crowdpose/resnet_crowdpose.md) | - -### AIC Dataset - -Results on AIC val set with ground-truth bounding boxes. - -| Model | Input Size | AP | AR | Details and Download | -| :--------: | :--------: | :---: | :---: | :----------------------------------: | -| HRNet-w32 | 256x192 | 0.323 | 0.366 | [hrnet_aic.md](./aic/hrnet_aic.md) | -| ResNet-101 | 256x192 | 0.294 | 0.337 | [resnet_aic.md](./aic/resnet_aic.md) | - -### JHMDB Dataset - -| Model | Input Size | PCK(norm. by person size) | PCK (norm. by torso size) | Details and Download | -| :-------: | :--------: | :-----------------------: | :-----------------------: | :----------------------------------------: | -| ResNet-50 | 256x256 | 96.0 | 80.1 | [resnet_jhmdb.md](./jhmdb/resnet_jhmdb.md) | -| CPM | 368x368 | 89.8 | 65.7 | [cpm_jhmdb.md](./jhmdb/cpm_jhmdb.md) | - -### PoseTrack2018 Dataset - -Results on PoseTrack2018 val with ground-truth bounding boxes. - -| Model | Input Size | AP | Details and Download | -| :-------: | :--------: | :--: | :----------------------------------------------------------: | -| HRNet-w48 | 256x192 | 84.6 | [hrnet_posetrack18.md](./posetrack18/hrnet_posetrack18.md) | -| HRNet-w32 | 256x192 | 83.4 | [hrnet_posetrack18.md](./posetrack18/hrnet_posetrack18.md) | -| ResNet-50 | 256x192 | 81.2 | [resnet_posetrack18.md](./posetrack18/resnet_posetrack18.md) | - -### Human-Art Dataset - -Results on Human-Art validation dataset with detector having human AP of 56.2 on Human-Art validation dataset - -| Model | Input Size | AP | AR | Details and Download | -| :-------: | :--------: | :---: | :---: | :---------------------------------------------------: | -| ViTPose-s | 256x192 | 0.381 | 0.448 | [vitpose_humanart.md](./humanart/vitpose_humanart.md) | -| ViTPose-b | 256x192 | 0.410 | 0.475 | [vitpose_humanart.md](./humanart/vitpose_humanart.md) | - -Results on Human-Art validation dataset with ground-truth bounding-box - -| Model | Input Size | AP | AR | Details and Download | -| :-------: | :--------: | :---: | :---: | :---------------------------------------------------: | -| ViTPose-s | 256x192 | 0.738 | 0.768 | [vitpose_humanart.md](./humanart/vitpose_humanart.md) | -| ViTPose-b | 256x192 | 0.759 | 0.790 | [vitpose_humanart.md](./humanart/vitpose_humanart.md) | +# Top-down heatmap-based pose estimation + +Top-down methods divide the task into two stages: object detection, followed by single-object pose estimation given object bounding boxes. Instead of estimating keypoint coordinates directly, the pose estimator will produce heatmaps which represent the likelihood of being a keypoint, following the paradigm introduced in [Simple Baselines for Human Pose Estimation and Tracking](http://openaccess.thecvf.com/content_ECCV_2018/html/Bin_Xiao_Simple_Baselines_for_ECCV_2018_paper.html). + +
+ +
+ +## Results and Models + +### COCO Dataset + +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Model | Input Size | AP | AR | Details and Download | +| :-------------: | :--------: | :---: | :---: | :-------------------------------------------------: | +| ViTPose-h | 256x192 | 0.790 | 0.840 | [vitpose_coco.md](./coco/vitpose_coco.md) | +| HRNet-w48+UDP | 256x192 | 0.768 | 0.817 | [hrnet_udp_coco.md](./coco/hrnet_udp_coco.md) | +| MSPN 4-stg | 256x192 | 0.765 | 0.826 | [mspn_coco.md](./coco/mspn_coco.md) | +| HRNet-w48+Dark | 256x192 | 0.764 | 0.814 | [hrnet_dark_coco.md](./coco/hrnet_dark_coco.md) | +| HRNet-w48 | 256x192 | 0.756 | 0.809 | [hrnet_coco.md](./coco/hrnet_coco.md) | +| HRFormer-B | 256x192 | 0.754 | 0.807 | [hrformer_coco.md](./coco/hrformer_coco.md) | +| RSN-50-3x | 256x192 | 0.750 | 0.814 | [rsn_coco.md](./coco/rsn_coco.md) | +| CSPNeXt-l | 256x192 | 0.750 | 0.800 | [cspnext_udp_coco.md](./coco/cspnext_udp_coco.md) | +| HRNet-w32 | 256x192 | 0.749 | 0.804 | [hrnet_coco.md](./coco/hrnet_coco.md) | +| Swin-L | 256x192 | 0.743 | 0.798 | [swin_coco.md](./coco/swin_coco.md) | +| ViTPose-s | 256x192 | 0.739 | 0.792 | [vitpose_coco.md](./coco/vitpose_coco.md) | +| HRFormer-S | 256x192 | 0.738 | 0.793 | [hrformer_coco.md](./coco/hrformer_coco.md) | +| Swin-B | 256x192 | 0.737 | 0.794 | [swin_coco.md](./coco/swin_coco.md) | +| SEResNet-101 | 256x192 | 0.734 | 0.790 | [seresnet_coco.md](./coco/seresnet_coco.md) | +| SCNet-101 | 256x192 | 0.733 | 0.789 | [scnet_coco.md](./coco/scnet_coco.md) | +| ResNet-101+Dark | 256x192 | 0.733 | 0.786 | [resnet_dark_coco.md](./coco/resnet_dark_coco.md) | +| CSPNeXt-m | 256x192 | 0.732 | 0.785 | [cspnext_udp_coco.md](./coco/cspnext_udp_coco.md) | +| ResNetV1d-101 | 256x192 | 0.732 | 0.785 | [resnetv1d_coco.md](./coco/resnetv1d_coco.md) | +| SEResNet-50 | 256x192 | 0.729 | 0.784 | [seresnet_coco.md](./coco/seresnet_coco.md) | +| SCNet-50 | 256x192 | 0.728 | 0.784 | [scnet_coco.md](./coco/scnet_coco.md) | +| ResNet-101 | 256x192 | 0.726 | 0.783 | [resnet_coco.md](./coco/resnet_coco.md) | +| ResNeXt-101 | 256x192 | 0.726 | 0.781 | [resnext_coco.md](./coco/resnext_coco.md) | +| HourglassNet | 256x256 | 0.726 | 0.780 | [hourglass_coco.md](./coco/hourglass_coco.md) | +| ResNeSt-101 | 256x192 | 0.725 | 0.781 | [resnest_coco.md](./coco/resnest_coco.md) | +| RSN-50 | 256x192 | 0.724 | 0.790 | [rsn_coco.md](./coco/rsn_coco.md) | +| Swin-T | 256x192 | 0.724 | 0.782 | [swin_coco.md](./coco/swin_coco.md) | +| MSPN 1-stg | 256x192 | 0.723 | 0.788 | [mspn_coco.md](./coco/mspn_coco.md) | +| ResNetV1d-50 | 256x192 | 0.722 | 0.777 | [resnetv1d_coco.md](./coco/resnetv1d_coco.md) | +| ResNeSt-50 | 256x192 | 0.720 | 0.775 | [resnest_coco.md](./coco/resnest_coco.md) | +| ResNet-50 | 256x192 | 0.718 | 0.774 | [resnet_coco.md](./coco/resnet_coco.md) | +| ResNeXt-50 | 256x192 | 0.715 | 0.771 | [resnext_coco.md](./coco/resnext_coco.md) | +| PVT-S | 256x192 | 0.714 | 0.773 | [pvt_coco.md](./coco/pvt_coco.md) | +| CSPNeXt-s | 256x192 | 0.697 | 0.753 | [cspnext_udp_coco.md](./coco/cspnext_udp_coco.md) | +| LiteHRNet-30 | 256x192 | 0.676 | 0.736 | [litehrnet_coco.md](./coco/litehrnet_coco.md) | +| CSPNeXt-tiny | 256x192 | 0.665 | 0.723 | [cspnext_udp_coco.md](./coco/cspnext_udp_coco.md) | +| MobileNet-v2 | 256x192 | 0.648 | 0.709 | [mobilenetv2_coco.md](./coco/mobilenetv2_coco.md) | +| LiteHRNet-18 | 256x192 | 0.642 | 0.705 | [litehrnet_coco.md](./coco/litehrnet_coco.md) | +| CPM | 256x192 | 0.627 | 0.689 | [cpm_coco.md](./coco/cpm_coco.md) | +| ShuffleNet-v2 | 256x192 | 0.602 | 0.668 | [shufflenetv2_coco.md](./coco/shufflenetv2_coco.md) | +| ShuffleNet-v1 | 256x192 | 0.587 | 0.654 | [shufflenetv1_coco.md](./coco/shufflenetv1_coco.md) | +| AlexNet | 256x192 | 0.448 | 0.521 | [alexnet_coco.md](./coco/alexnet_coco.md) | + +### MPII Dataset + +| Model | Input Size | PCKh@0.5 | PCKh@0.1 | Details and Download | +| :------------: | :--------: | :------: | :------: | :-------------------------------------------------: | +| HRNet-w48+Dark | 256x256 | 0.905 | 0.360 | [hrnet_dark_mpii.md](./mpii/hrnet_dark_mpii.md) | +| HRNet-w48 | 256x256 | 0.902 | 0.303 | [hrnet_mpii.md](./mpii/cspnext_udp_mpii.md) | +| HRNet-w48 | 256x256 | 0.901 | 0.337 | [hrnet_mpii.md](./mpii/hrnet_mpii.md) | +| HRNet-w32 | 256x256 | 0.900 | 0.334 | [hrnet_mpii.md](./mpii/hrnet_mpii.md) | +| HourglassNet | 256x256 | 0.889 | 0.317 | [hourglass_mpii.md](./mpii/hourglass_mpii.md) | +| ResNet-152 | 256x256 | 0.889 | 0.303 | [resnet_mpii.md](./mpii/resnet_mpii.md) | +| ResNetV1d-152 | 256x256 | 0.888 | 0.300 | [resnetv1d_mpii.md](./mpii/resnetv1d_mpii.md) | +| SCNet-50 | 256x256 | 0.888 | 0.290 | [scnet_mpii.md](./mpii/scnet_mpii.md) | +| ResNeXt-152 | 256x256 | 0.887 | 0.294 | [resnext_mpii.md](./mpii/resnext_mpii.md) | +| SEResNet-50 | 256x256 | 0.884 | 0.292 | [seresnet_mpii.md](./mpii/seresnet_mpii.md) | +| ResNet-50 | 256x256 | 0.882 | 0.286 | [resnet_mpii.md](./mpii/resnet_mpii.md) | +| ResNetV1d-50 | 256x256 | 0.881 | 0.290 | [resnetv1d_mpii.md](./mpii/resnetv1d_mpii.md) | +| CPM | 368x368\* | 0.876 | 0.285 | [cpm_mpii.md](./mpii/cpm_mpii.md) | +| LiteHRNet-30 | 256x256 | 0.869 | 0.271 | [litehrnet_mpii.md](./mpii/litehrnet_mpii.md) | +| LiteHRNet-18 | 256x256 | 0.859 | 0.260 | [litehrnet_mpii.md](./mpii/litehrnet_mpii.md) | +| MobileNet-v2 | 256x256 | 0.854 | 0.234 | [mobilenetv2_mpii.md](./mpii/mobilenetv2_mpii.md) | +| ShuffleNet-v2 | 256x256 | 0.828 | 0.205 | [shufflenetv2_mpii.md](./mpii/shufflenetv2_mpii.md) | +| ShuffleNet-v1 | 256x256 | 0.824 | 0.195 | [shufflenetv1_mpii.md](./mpii/shufflenetv1_mpii.md) | + +### CrowdPose Dataset + +Results on CrowdPose test with [YOLOv3](https://github.com/eriklindernoren/PyTorch-YOLOv3) human detector + +| Model | Input Size | AP | AR | Details and Download | +| :--------: | :--------: | :---: | :---: | :--------------------------------------------------------: | +| HRNet-w32 | 256x192 | 0.675 | 0.816 | [hrnet_crowdpose.md](./crowdpose/hrnet_crowdpose.md) | +| CSPNeXt-m | 256x192 | 0.662 | 0.755 | [hrnet_crowdpose.md](./crowdpose/cspnext_udp_crowdpose.md) | +| ResNet-101 | 256x192 | 0.647 | 0.800 | [resnet_crowdpose.md](./crowdpose/resnet_crowdpose.md) | +| HRNet-w32 | 256x192 | 0.637 | 0.785 | [resnet_crowdpose.md](./crowdpose/resnet_crowdpose.md) | + +### AIC Dataset + +Results on AIC val set with ground-truth bounding boxes. + +| Model | Input Size | AP | AR | Details and Download | +| :--------: | :--------: | :---: | :---: | :----------------------------------: | +| HRNet-w32 | 256x192 | 0.323 | 0.366 | [hrnet_aic.md](./aic/hrnet_aic.md) | +| ResNet-101 | 256x192 | 0.294 | 0.337 | [resnet_aic.md](./aic/resnet_aic.md) | + +### JHMDB Dataset + +| Model | Input Size | PCK(norm. by person size) | PCK (norm. by torso size) | Details and Download | +| :-------: | :--------: | :-----------------------: | :-----------------------: | :----------------------------------------: | +| ResNet-50 | 256x256 | 96.0 | 80.1 | [resnet_jhmdb.md](./jhmdb/resnet_jhmdb.md) | +| CPM | 368x368 | 89.8 | 65.7 | [cpm_jhmdb.md](./jhmdb/cpm_jhmdb.md) | + +### PoseTrack2018 Dataset + +Results on PoseTrack2018 val with ground-truth bounding boxes. + +| Model | Input Size | AP | Details and Download | +| :-------: | :--------: | :--: | :----------------------------------------------------------: | +| HRNet-w48 | 256x192 | 84.6 | [hrnet_posetrack18.md](./posetrack18/hrnet_posetrack18.md) | +| HRNet-w32 | 256x192 | 83.4 | [hrnet_posetrack18.md](./posetrack18/hrnet_posetrack18.md) | +| ResNet-50 | 256x192 | 81.2 | [resnet_posetrack18.md](./posetrack18/resnet_posetrack18.md) | + +### Human-Art Dataset + +Results on Human-Art validation dataset with detector having human AP of 56.2 on Human-Art validation dataset + +| Model | Input Size | AP | AR | Details and Download | +| :-------: | :--------: | :---: | :---: | :---------------------------------------------------: | +| ViTPose-s | 256x192 | 0.381 | 0.448 | [vitpose_humanart.md](./humanart/vitpose_humanart.md) | +| ViTPose-b | 256x192 | 0.410 | 0.475 | [vitpose_humanart.md](./humanart/vitpose_humanart.md) | + +Results on Human-Art validation dataset with ground-truth bounding-box + +| Model | Input Size | AP | AR | Details and Download | +| :-------: | :--------: | :---: | :---: | :---------------------------------------------------: | +| ViTPose-s | 256x192 | 0.738 | 0.768 | [vitpose_humanart.md](./humanart/vitpose_humanart.md) | +| ViTPose-b | 256x192 | 0.759 | 0.790 | [vitpose_humanart.md](./humanart/vitpose_humanart.md) | diff --git a/configs/body_2d_keypoint/topdown_heatmap/aic/hrnet_aic.md b/configs/body_2d_keypoint/topdown_heatmap/aic/hrnet_aic.md index 4b2cefcdcb..282bac8071 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/aic/hrnet_aic.md +++ b/configs/body_2d_keypoint/topdown_heatmap/aic/hrnet_aic.md @@ -1,38 +1,38 @@ - - -
-HRNet (CVPR'2019) - -```bibtex -@inproceedings{sun2019deep, - title={Deep high-resolution representation learning for human pose estimation}, - author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={5693--5703}, - year={2019} -} -``` - -
- - - -
-AI Challenger (ArXiv'2017) - -```bibtex -@article{wu2017ai, - title={Ai challenger: A large-scale dataset for going deeper in image understanding}, - author={Wu, Jiahong and Zheng, He and Zhao, Bo and Li, Yixin and Yan, Baoming and Liang, Rui and Wang, Wenjia and Zhou, Shipei and Lin, Guosen and Fu, Yanwei and others}, - journal={arXiv preprint arXiv:1711.06475}, - year={2017} -} -``` - -
- -Results on AIC val set with ground-truth bounding boxes - -| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | -| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | -| [pose_hrnet_w32](/configs/body_2d_keypoint/topdown_heatmap/aic/td-hm_hrnet-w32_8xb64-210e_aic-256x192.py) | 256x192 | 0.323 | 0.761 | 0.218 | 0.366 | 0.789 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_aic_256x192-30a4e465_20200826.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_aic_256x192_20200826.log.json) | + + +
+HRNet (CVPR'2019) + +```bibtex +@inproceedings{sun2019deep, + title={Deep high-resolution representation learning for human pose estimation}, + author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={5693--5703}, + year={2019} +} +``` + +
+ + + +
+AI Challenger (ArXiv'2017) + +```bibtex +@article{wu2017ai, + title={Ai challenger: A large-scale dataset for going deeper in image understanding}, + author={Wu, Jiahong and Zheng, He and Zhao, Bo and Li, Yixin and Yan, Baoming and Liang, Rui and Wang, Wenjia and Zhou, Shipei and Lin, Guosen and Fu, Yanwei and others}, + journal={arXiv preprint arXiv:1711.06475}, + year={2017} +} +``` + +
+ +Results on AIC val set with ground-truth bounding boxes + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [pose_hrnet_w32](/configs/body_2d_keypoint/topdown_heatmap/aic/td-hm_hrnet-w32_8xb64-210e_aic-256x192.py) | 256x192 | 0.323 | 0.761 | 0.218 | 0.366 | 0.789 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_aic_256x192-30a4e465_20200826.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_aic_256x192_20200826.log.json) | diff --git a/configs/body_2d_keypoint/topdown_heatmap/aic/hrnet_aic.yml b/configs/body_2d_keypoint/topdown_heatmap/aic/hrnet_aic.yml index 0bbc52ccb8..9550a4f216 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/aic/hrnet_aic.yml +++ b/configs/body_2d_keypoint/topdown_heatmap/aic/hrnet_aic.yml @@ -1,18 +1,18 @@ -Models: -- Config: configs/body_2d_keypoint/topdown_heatmap/aic/td-hm_hrnet-w32_8xb64-210e_aic-256x192.py - In Collection: HRNet - Metadata: - Architecture: - - HRNet - Training Data: AI Challenger - Name: td-hm_hrnet-w32_8xb64-210e_aic-256x192 - Results: - - Dataset: AI Challenger - Metrics: - AP: 0.323 - AP@0.5: 0.761 - AP@0.75: 0.218 - AR: 0.366 - AR@0.5: 0.789 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_aic_256x192-30a4e465_20200826.pth +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/aic/td-hm_hrnet-w32_8xb64-210e_aic-256x192.py + In Collection: HRNet + Metadata: + Architecture: + - HRNet + Training Data: AI Challenger + Name: td-hm_hrnet-w32_8xb64-210e_aic-256x192 + Results: + - Dataset: AI Challenger + Metrics: + AP: 0.323 + AP@0.5: 0.761 + AP@0.75: 0.218 + AR: 0.366 + AR@0.5: 0.789 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_aic_256x192-30a4e465_20200826.pth diff --git a/configs/body_2d_keypoint/topdown_heatmap/aic/resnet_aic.md b/configs/body_2d_keypoint/topdown_heatmap/aic/resnet_aic.md index 1cb0f57eb3..f4a457df86 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/aic/resnet_aic.md +++ b/configs/body_2d_keypoint/topdown_heatmap/aic/resnet_aic.md @@ -1,55 +1,55 @@ - - -
-SimpleBaseline2D (ECCV'2018) - -```bibtex -@inproceedings{xiao2018simple, - title={Simple baselines for human pose estimation and tracking}, - author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, - booktitle={Proceedings of the European conference on computer vision (ECCV)}, - pages={466--481}, - year={2018} -} -``` - -
- - - -
-ResNet (CVPR'2016) - -```bibtex -@inproceedings{he2016deep, - title={Deep residual learning for image recognition}, - author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={770--778}, - year={2016} -} -``` - -
- - - -
-AI Challenger (ArXiv'2017) - -```bibtex -@article{wu2017ai, - title={Ai challenger: A large-scale dataset for going deeper in image understanding}, - author={Wu, Jiahong and Zheng, He and Zhao, Bo and Li, Yixin and Yan, Baoming and Liang, Rui and Wang, Wenjia and Zhou, Shipei and Lin, Guosen and Fu, Yanwei and others}, - journal={arXiv preprint arXiv:1711.06475}, - year={2017} -} -``` - -
- -Results on AIC val set with ground-truth bounding boxes - -| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | -| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | -| [pose_resnet_101](/configs/body_2d_keypoint/topdown_heatmap/aic/td-hm_res101_8xb64-210e_aic-256x192.py) | 256x192 | 0.294 | 0.736 | 0.172 | 0.337 | 0.762 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res101_aic_256x192-79b35445_20200826.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res101_aic_256x192_20200826.log.json) | + + +
+SimpleBaseline2D (ECCV'2018) + +```bibtex +@inproceedings{xiao2018simple, + title={Simple baselines for human pose estimation and tracking}, + author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, + booktitle={Proceedings of the European conference on computer vision (ECCV)}, + pages={466--481}, + year={2018} +} +``` + +
+ + + +
+ResNet (CVPR'2016) + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +
+ + + +
+AI Challenger (ArXiv'2017) + +```bibtex +@article{wu2017ai, + title={Ai challenger: A large-scale dataset for going deeper in image understanding}, + author={Wu, Jiahong and Zheng, He and Zhao, Bo and Li, Yixin and Yan, Baoming and Liang, Rui and Wang, Wenjia and Zhou, Shipei and Lin, Guosen and Fu, Yanwei and others}, + journal={arXiv preprint arXiv:1711.06475}, + year={2017} +} +``` + +
+ +Results on AIC val set with ground-truth bounding boxes + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [pose_resnet_101](/configs/body_2d_keypoint/topdown_heatmap/aic/td-hm_res101_8xb64-210e_aic-256x192.py) | 256x192 | 0.294 | 0.736 | 0.172 | 0.337 | 0.762 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res101_aic_256x192-79b35445_20200826.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res101_aic_256x192_20200826.log.json) | diff --git a/configs/body_2d_keypoint/topdown_heatmap/aic/resnet_aic.yml b/configs/body_2d_keypoint/topdown_heatmap/aic/resnet_aic.yml index e320056858..9c89bc3bf9 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/aic/resnet_aic.yml +++ b/configs/body_2d_keypoint/topdown_heatmap/aic/resnet_aic.yml @@ -1,19 +1,19 @@ -Models: -- Config: configs/body_2d_keypoint/topdown_heatmap/aic/td-hm_res101_8xb64-210e_aic-256x192.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: - - SimpleBaseline2D - - ResNet - Training Data: AI Challenger - Name: td-hm_res101_8xb64-210e_aic-256x192 - Results: - - Dataset: AI Challenger - Metrics: - AP: 0.294 - AP@0.5: 0.736 - AP@0.75: 0.172 - AR: 0.337 - AR@0.5: 0.762 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res101_aic_256x192-79b35445_20200826.pth +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/aic/td-hm_res101_8xb64-210e_aic-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: + - SimpleBaseline2D + - ResNet + Training Data: AI Challenger + Name: td-hm_res101_8xb64-210e_aic-256x192 + Results: + - Dataset: AI Challenger + Metrics: + AP: 0.294 + AP@0.5: 0.736 + AP@0.75: 0.172 + AR: 0.337 + AR@0.5: 0.762 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res101_aic_256x192-79b35445_20200826.pth diff --git a/configs/body_2d_keypoint/topdown_heatmap/aic/td-hm_hrnet-w32_8xb64-210e_aic-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/aic/td-hm_hrnet-w32_8xb64-210e_aic-256x192.py index 4d4c504d38..1c6d8c57e9 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/aic/td-hm_hrnet-w32_8xb64-210e_aic-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/aic/td-hm_hrnet-w32_8xb64-210e_aic-256x192.py @@ -1,151 +1,151 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(32, 64)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(32, 64, 128)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(32, 64, 128, 256))), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/hrnet_w32-36af842e.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=32, - out_channels=14, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'AicDataset' -data_mode = 'topdown' -data_root = 'data/aic/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/aic_train.json', - data_prefix=dict(img='ai_challenger_keypoint_train_20170902/' - 'keypoint_train_images_20170902/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/aic_val.json', - data_prefix=dict(img='ai_challenger_keypoint_validation_20170911/' - 'keypoint_validation_images_20170911/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/aic_val.json', - use_area=False) -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=14, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'AicDataset' +data_mode = 'topdown' +data_root = 'data/aic/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/aic_train.json', + data_prefix=dict(img='ai_challenger_keypoint_train_20170902/' + 'keypoint_train_images_20170902/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/aic_val.json', + data_prefix=dict(img='ai_challenger_keypoint_validation_20170911/' + 'keypoint_validation_images_20170911/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/aic_val.json', + use_area=False) +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/aic/td-hm_res101_8xb64-210e_aic-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/aic/td-hm_res101_8xb64-210e_aic-256x192.py index e61da3a5c4..d368730fd3 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/aic/td-hm_res101_8xb64-210e_aic-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/aic/td-hm_res101_8xb64-210e_aic-256x192.py @@ -1,122 +1,122 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=101, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=14, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'AicDataset' -data_mode = 'topdown' -data_root = 'data/aic/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/aic_train.json', - data_prefix=dict(img='ai_challenger_keypoint_train_20170902/' - 'keypoint_train_images_20170902/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/aic_val.json', - data_prefix=dict(img='ai_challenger_keypoint_validation_20170911/' - 'keypoint_validation_images_20170911/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/aic_val.json', - use_area=False) -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=101, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=14, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'AicDataset' +data_mode = 'topdown' +data_root = 'data/aic/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/aic_train.json', + data_prefix=dict(img='ai_challenger_keypoint_train_20170902/' + 'keypoint_train_images_20170902/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/aic_val.json', + data_prefix=dict(img='ai_challenger_keypoint_validation_20170911/' + 'keypoint_validation_images_20170911/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/aic_val.json', + use_area=False) +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/alexnet_coco.md b/configs/body_2d_keypoint/topdown_heatmap/coco/alexnet_coco.md index 6f82685ba8..4cff144198 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/alexnet_coco.md +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/alexnet_coco.md @@ -1,40 +1,40 @@ - - -
-AlexNet (NeurIPS'2012) - -```bibtex -@inproceedings{krizhevsky2012imagenet, - title={Imagenet classification with deep convolutional neural networks}, - author={Krizhevsky, Alex and Sutskever, Ilya and Hinton, Geoffrey E}, - booktitle={Advances in neural information processing systems}, - pages={1097--1105}, - year={2012} -} -``` - -
- - - -
-COCO (ECCV'2014) - -```bibtex -@inproceedings{lin2014microsoft, - title={Microsoft coco: Common objects in context}, - author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, - booktitle={European conference on computer vision}, - pages={740--755}, - year={2014}, - organization={Springer} -} -``` - -
- -Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset - -| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | -| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | -| [pose_alexnet](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_alexnet_8xb64-210e_coco-256x192.py) | 256x192 | 0.448 | 0.767 | 0.461 | 0.521 | 0.829 | [ckpt](https://download.openmmlab.com/mmpose/top_down/alexnet/alexnet_coco_256x192-a7b1fd15_20200727.pth) | [log](https://download.openmmlab.com/mmpose/top_down/alexnet/alexnet_coco_256x192_20200727.log.json) | + + +
+AlexNet (NeurIPS'2012) + +```bibtex +@inproceedings{krizhevsky2012imagenet, + title={Imagenet classification with deep convolutional neural networks}, + author={Krizhevsky, Alex and Sutskever, Ilya and Hinton, Geoffrey E}, + booktitle={Advances in neural information processing systems}, + pages={1097--1105}, + year={2012} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [pose_alexnet](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_alexnet_8xb64-210e_coco-256x192.py) | 256x192 | 0.448 | 0.767 | 0.461 | 0.521 | 0.829 | [ckpt](https://download.openmmlab.com/mmpose/top_down/alexnet/alexnet_coco_256x192-a7b1fd15_20200727.pth) | [log](https://download.openmmlab.com/mmpose/top_down/alexnet/alexnet_coco_256x192_20200727.log.json) | diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/alexnet_coco.yml b/configs/body_2d_keypoint/topdown_heatmap/coco/alexnet_coco.yml index 0c851c3c79..0451088663 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/alexnet_coco.yml +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/alexnet_coco.yml @@ -1,19 +1,19 @@ -Models: -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_alexnet_8xb64-210e_coco-256x192.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: - - SimpleBaseline2D - - AlexNet - Training Data: COCO - Name: td-hm_alexnet_8xb64-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.448 - AP@0.5: 0.767 - AP@0.75: 0.461 - AR: 0.521 - AR@0.5: 0.829 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/alexnet/alexnet_coco_256x192-a7b1fd15_20200727.pth +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_alexnet_8xb64-210e_coco-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: + - SimpleBaseline2D + - AlexNet + Training Data: COCO + Name: td-hm_alexnet_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.448 + AP@0.5: 0.767 + AP@0.75: 0.461 + AR: 0.521 + AR@0.5: 0.829 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/alexnet/alexnet_coco_256x192-a7b1fd15_20200727.pth diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/cpm_coco.md b/configs/body_2d_keypoint/topdown_heatmap/coco/cpm_coco.md index 3d4453a369..c0ecaad379 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/cpm_coco.md +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/cpm_coco.md @@ -1,41 +1,41 @@ - - -
-CPM (CVPR'2016) - -```bibtex -@inproceedings{wei2016convolutional, - title={Convolutional pose machines}, - author={Wei, Shih-En and Ramakrishna, Varun and Kanade, Takeo and Sheikh, Yaser}, - booktitle={Proceedings of the IEEE conference on Computer Vision and Pattern Recognition}, - pages={4724--4732}, - year={2016} -} -``` - -
- - - -
-COCO (ECCV'2014) - -```bibtex -@inproceedings{lin2014microsoft, - title={Microsoft coco: Common objects in context}, - author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, - booktitle={European conference on computer vision}, - pages={740--755}, - year={2014}, - organization={Springer} -} -``` - -
- -Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset - -| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | -| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | -| [cpm](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_cpm_8xb64-210e_coco-256x192.py) | 256x192 | 0.627 | 0.862 | 0.709 | 0.689 | 0.906 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_cpm_8xb64-210e_coco-256x192-0e978875_20220920.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_cpm_8xb64-210e_coco-256x192_20220920.log) | -| [cpm](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_cpm_8xb32-210e_coco-384x288.py) | 384x288 | 0.652 | 0.865 | 0.730 | 0.710 | 0.907 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_cpm_8xb32-210e_coco-384x288-165487b8_20221011.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_cpm_8xb32-210e_coco-384x288_20221011.log) | + + +
+CPM (CVPR'2016) + +```bibtex +@inproceedings{wei2016convolutional, + title={Convolutional pose machines}, + author={Wei, Shih-En and Ramakrishna, Varun and Kanade, Takeo and Sheikh, Yaser}, + booktitle={Proceedings of the IEEE conference on Computer Vision and Pattern Recognition}, + pages={4724--4732}, + year={2016} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [cpm](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_cpm_8xb64-210e_coco-256x192.py) | 256x192 | 0.627 | 0.862 | 0.709 | 0.689 | 0.906 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_cpm_8xb64-210e_coco-256x192-0e978875_20220920.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_cpm_8xb64-210e_coco-256x192_20220920.log) | +| [cpm](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_cpm_8xb32-210e_coco-384x288.py) | 384x288 | 0.652 | 0.865 | 0.730 | 0.710 | 0.907 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_cpm_8xb32-210e_coco-384x288-165487b8_20221011.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_cpm_8xb32-210e_coco-384x288_20221011.log) | diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/cpm_coco.yml b/configs/body_2d_keypoint/topdown_heatmap/coco/cpm_coco.yml index 2c1cad9713..aee822bc92 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/cpm_coco.yml +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/cpm_coco.yml @@ -1,40 +1,40 @@ -Collections: -- Name: CPM - Paper: - Title: Convolutional pose machines - URL: http://openaccess.thecvf.com/content_cvpr_2016/html/Wei_Convolutional_Pose_Machines_CVPR_2016_paper.html - README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/backbones/cpm.md -Models: -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_cpm_8xb64-210e_coco-256x192.py - In Collection: CPM - Metadata: - Architecture: &id001 - - CPM - Training Data: COCO - Name: td-hm_cpm_8xb64-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.627 - AP@0.5: 0.862 - AP@0.75: 0.709 - AR: 0.689 - AR@0.5: 0.906 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_cpm_8xb64-210e_coco-256x192-0e978875_20220920.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_cpm_8xb32-210e_coco-384x288.py - In Collection: CPM - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_cpm_8xb32-210e_coco-384x288 - Results: - - Dataset: COCO - Metrics: - AP: 0.652 - AP@0.5: 0.865 - AP@0.75: 0.730 - AR: 0.710 - AR@0.5: 0.907 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_cpm_8xb32-210e_coco-384x288-165487b8_20221011.pth +Collections: +- Name: CPM + Paper: + Title: Convolutional pose machines + URL: http://openaccess.thecvf.com/content_cvpr_2016/html/Wei_Convolutional_Pose_Machines_CVPR_2016_paper.html + README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/backbones/cpm.md +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_cpm_8xb64-210e_coco-256x192.py + In Collection: CPM + Metadata: + Architecture: &id001 + - CPM + Training Data: COCO + Name: td-hm_cpm_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.627 + AP@0.5: 0.862 + AP@0.75: 0.709 + AR: 0.689 + AR@0.5: 0.906 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_cpm_8xb64-210e_coco-256x192-0e978875_20220920.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_cpm_8xb32-210e_coco-384x288.py + In Collection: CPM + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_cpm_8xb32-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.652 + AP@0.5: 0.865 + AP@0.75: 0.730 + AR: 0.710 + AR@0.5: 0.907 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_cpm_8xb32-210e_coco-384x288-165487b8_20221011.pth diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-l_udp_8xb256-210e_aic-coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-l_udp_8xb256-210e_aic-coco-256x192.py index fc1eb0d36c..db92dacaff 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-l_udp_8xb256-210e_aic-coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-l_udp_8xb256-210e_aic-coco-256x192.py @@ -1,284 +1,284 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -max_epochs = 210 -stage2_num_epochs = 30 -base_lr = 4e-3 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - # use cosine lr from 105 to 210 epoch - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=1024) - -# codec settings -codec = dict( - type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# keypoint mappings -keypoint_mapping_coco = [ - (0, 0), - (1, 1), - (2, 2), - (3, 3), - (4, 4), - (5, 5), - (6, 6), - (7, 7), - (8, 8), - (9, 9), - (10, 10), - (11, 11), - (12, 12), - (13, 13), - (14, 14), - (15, 15), - (16, 16), -] - -keypoint_mapping_aic = [ - (0, 6), - (1, 8), - (2, 10), - (3, 5), - (4, 7), - (5, 9), - (6, 12), - (7, 14), - (8, 16), - (9, 11), - (10, 13), - (11, 15), - (12, 17), - (13, 18), -] - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=1., - widen_factor=1., - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' - 'rtmdet/cspnext_rsb_pretrain/' - 'cspnext-l_8xb256-rsb-a1-600e_in1k-6a760974.pth')), - head=dict( - type='HeatmapHead', - in_channels=1024, - out_channels=19, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=False, - output_keypoint_indices=[ - target for _, target in keypoint_mapping_coco - ])) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/' - -backend_args = dict(backend='local') -# backend_args = dict( -# backend='petrel', -# path_mapping=dict({ -# f'{data_root}': 's3://openmmlab/datasets/', -# f'{data_root}': 's3://openmmlab/datasets/' -# })) - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# train datasets -dataset_coco = dict( - type='RepeatDataset', - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/person_keypoints_train2017.json', - data_prefix=dict(img='detection/coco/train2017/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=19, - mapping=keypoint_mapping_coco) - ], - ), - times=3) - -dataset_aic = dict( - type='AicDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='aic/annotations/aic_train.json', - data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' - '_train_20170902/keypoint_train_images_20170902/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=19, - mapping=keypoint_mapping_aic) - ], -) - -# data loaders -train_dataloader = dict( - batch_size=256, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type='CombinedDataset', - metainfo=dict(from_file='configs/_base_/datasets/coco_aic.py'), - datasets=[dataset_coco, dataset_aic], - pipeline=train_pipeline, - test_mode=False, - )) -val_dataloader = dict( - batch_size=64, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/person_keypoints_val2017.json', - # bbox_file='data/coco/person_detection_results/' - # 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='detection/coco/val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'coco/annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 210 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 105 to 210 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# keypoint mappings +keypoint_mapping_coco = [ + (0, 0), + (1, 1), + (2, 2), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +keypoint_mapping_aic = [ + (0, 6), + (1, 8), + (2, 10), + (3, 5), + (4, 7), + (5, 9), + (6, 12), + (7, 14), + (8, 16), + (9, 11), + (10, 13), + (11, 15), + (12, 17), + (13, 18), +] + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=1., + widen_factor=1., + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' + 'rtmdet/cspnext_rsb_pretrain/' + 'cspnext-l_8xb256-rsb-a1-600e_in1k-6a760974.pth')), + head=dict( + type='HeatmapHead', + in_channels=1024, + out_channels=19, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=False, + output_keypoint_indices=[ + target for _, target in keypoint_mapping_coco + ])) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/', +# f'{data_root}': 's3://openmmlab/datasets/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# train datasets +dataset_coco = dict( + type='RepeatDataset', + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_train2017.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=19, + mapping=keypoint_mapping_coco) + ], + ), + times=3) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=19, + mapping=keypoint_mapping_aic) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco_aic.py'), + datasets=[dataset_coco, dataset_aic], + pipeline=train_pipeline, + test_mode=False, + )) +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_val2017.json', + # bbox_file='data/coco/person_detection_results/' + # 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='detection/coco/val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'coco/annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-l_udp_8xb256-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-l_udp_8xb256-210e_coco-256x192.py index 6cce193544..a6f62396dd 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-l_udp_8xb256-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-l_udp_8xb256-210e_coco-256x192.py @@ -1,214 +1,214 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -max_epochs = 210 -stage2_num_epochs = 30 -base_lr = 4e-3 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - # use cosine lr from 105 to 210 epoch - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=1024) - -# codec settings -codec = dict( - type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=1., - widen_factor=1., - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' - 'rtmdet/cspnext_rsb_pretrain/' - 'cspnext-l_8xb256-rsb-a1-600e_in1k-6a760974.pth')), - head=dict( - type='HeatmapHead', - in_channels=1024, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=False, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -backend_args = dict(backend='local') -# backend_args = dict( -# backend='petrel', -# path_mapping=dict({ -# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', -# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' -# })) - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=256, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=64, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - # bbox_file='data/coco/person_detection_results/' - # 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 210 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 105 to 210 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=1., + widen_factor=1., + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' + 'rtmdet/cspnext_rsb_pretrain/' + 'cspnext-l_8xb256-rsb-a1-600e_in1k-6a760974.pth')), + head=dict( + type='HeatmapHead', + in_channels=1024, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + # bbox_file='data/coco/person_detection_results/' + # 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-m_udp_8xb256-210e_aic-coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-m_udp_8xb256-210e_aic-coco-256x192.py index 096bf30785..d51d467894 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-m_udp_8xb256-210e_aic-coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-m_udp_8xb256-210e_aic-coco-256x192.py @@ -1,284 +1,284 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -max_epochs = 210 -stage2_num_epochs = 30 -base_lr = 4e-3 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - # use cosine lr from 105 to 210 epoch - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=1024) - -# codec settings -codec = dict( - type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# keypoint mappings -keypoint_mapping_coco = [ - (0, 0), - (1, 1), - (2, 2), - (3, 3), - (4, 4), - (5, 5), - (6, 6), - (7, 7), - (8, 8), - (9, 9), - (10, 10), - (11, 11), - (12, 12), - (13, 13), - (14, 14), - (15, 15), - (16, 16), -] - -keypoint_mapping_aic = [ - (0, 6), - (1, 8), - (2, 10), - (3, 5), - (4, 7), - (5, 9), - (6, 12), - (7, 14), - (8, 16), - (9, 11), - (10, 13), - (11, 15), - (12, 17), - (13, 18), -] - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=0.67, - widen_factor=0.75, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' - 'rtmdet/cspnext_rsb_pretrain/' - 'cspnext-m_8xb256-rsb-a1-600e_in1k-ecb3bbd9.pth')), - head=dict( - type='HeatmapHead', - in_channels=768, - out_channels=19, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=False, - output_keypoint_indices=[ - target for _, target in keypoint_mapping_coco - ])) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/' - -backend_args = dict(backend='local') -# backend_args = dict( -# backend='petrel', -# path_mapping=dict({ -# f'{data_root}': 's3://openmmlab/datasets/', -# f'{data_root}': 's3://openmmlab/datasets/' -# })) - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# train datasets -dataset_coco = dict( - type='RepeatDataset', - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/person_keypoints_train2017.json', - data_prefix=dict(img='detection/coco/train2017/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=19, - mapping=keypoint_mapping_coco) - ], - ), - times=3) - -dataset_aic = dict( - type='AicDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='aic/annotations/aic_train.json', - data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' - '_train_20170902/keypoint_train_images_20170902/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=19, - mapping=keypoint_mapping_aic) - ], -) - -# data loaders -train_dataloader = dict( - batch_size=256, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type='CombinedDataset', - metainfo=dict(from_file='configs/_base_/datasets/coco_aic.py'), - datasets=[dataset_coco, dataset_aic], - pipeline=train_pipeline, - test_mode=False, - )) -val_dataloader = dict( - batch_size=64, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/person_keypoints_val2017.json', - # bbox_file='data/coco/person_detection_results/' - # 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='detection/coco/val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'coco/annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 210 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 105 to 210 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# keypoint mappings +keypoint_mapping_coco = [ + (0, 0), + (1, 1), + (2, 2), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +keypoint_mapping_aic = [ + (0, 6), + (1, 8), + (2, 10), + (3, 5), + (4, 7), + (5, 9), + (6, 12), + (7, 14), + (8, 16), + (9, 11), + (10, 13), + (11, 15), + (12, 17), + (13, 18), +] + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' + 'rtmdet/cspnext_rsb_pretrain/' + 'cspnext-m_8xb256-rsb-a1-600e_in1k-ecb3bbd9.pth')), + head=dict( + type='HeatmapHead', + in_channels=768, + out_channels=19, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=False, + output_keypoint_indices=[ + target for _, target in keypoint_mapping_coco + ])) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/', +# f'{data_root}': 's3://openmmlab/datasets/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# train datasets +dataset_coco = dict( + type='RepeatDataset', + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_train2017.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=19, + mapping=keypoint_mapping_coco) + ], + ), + times=3) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=19, + mapping=keypoint_mapping_aic) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco_aic.py'), + datasets=[dataset_coco, dataset_aic], + pipeline=train_pipeline, + test_mode=False, + )) +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_val2017.json', + # bbox_file='data/coco/person_detection_results/' + # 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='detection/coco/val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'coco/annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-m_udp_8xb256-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-m_udp_8xb256-210e_coco-256x192.py index f86e9a8d60..a1dd5f68f0 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-m_udp_8xb256-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-m_udp_8xb256-210e_coco-256x192.py @@ -1,214 +1,214 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -max_epochs = 210 -stage2_num_epochs = 30 -base_lr = 4e-3 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - # use cosine lr from 105 to 210 epoch - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=1024) - -# codec settings -codec = dict( - type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=0.67, - widen_factor=0.75, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' - 'rtmdet/cspnext_rsb_pretrain/' - 'cspnext-m_8xb256-rsb-a1-600e_in1k-ecb3bbd9.pth')), - head=dict( - type='HeatmapHead', - in_channels=768, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=False, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -backend_args = dict(backend='local') -# backend_args = dict( -# backend='petrel', -# path_mapping=dict({ -# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', -# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' -# })) - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=256, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=64, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - # bbox_file='data/coco/person_detection_results/' - # 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 210 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 105 to 210 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' + 'rtmdet/cspnext_rsb_pretrain/' + 'cspnext-m_8xb256-rsb-a1-600e_in1k-ecb3bbd9.pth')), + head=dict( + type='HeatmapHead', + in_channels=768, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + # bbox_file='data/coco/person_detection_results/' + # 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-s_udp_8xb256-210e_aic-coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-s_udp_8xb256-210e_aic-coco-256x192.py index 94cc7d02d2..323a9e803c 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-s_udp_8xb256-210e_aic-coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-s_udp_8xb256-210e_aic-coco-256x192.py @@ -1,284 +1,284 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -max_epochs = 210 -stage2_num_epochs = 30 -base_lr = 4e-3 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.0), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - # use cosine lr from 105 to 210 epoch - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=1024) - -# codec settings -codec = dict( - type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# keypoint mappings -keypoint_mapping_coco = [ - (0, 0), - (1, 1), - (2, 2), - (3, 3), - (4, 4), - (5, 5), - (6, 6), - (7, 7), - (8, 8), - (9, 9), - (10, 10), - (11, 11), - (12, 12), - (13, 13), - (14, 14), - (15, 15), - (16, 16), -] - -keypoint_mapping_aic = [ - (0, 6), - (1, 8), - (2, 10), - (3, 5), - (4, 7), - (5, 9), - (6, 12), - (7, 14), - (8, 16), - (9, 11), - (10, 13), - (11, 15), - (12, 17), - (13, 18), -] - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=0.33, - widen_factor=0.5, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' - 'rtmdet/cspnext_rsb_pretrain/' - 'cspnext-s_imagenet_600e-ea671761.pth')), - head=dict( - type='HeatmapHead', - in_channels=512, - out_channels=19, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=False, - output_keypoint_indices=[ - target for _, target in keypoint_mapping_coco - ])) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/' - -backend_args = dict(backend='local') -# backend_args = dict( -# backend='petrel', -# path_mapping=dict({ -# f'{data_root}': 's3://openmmlab/datasets/', -# f'{data_root}': 's3://openmmlab/datasets/' -# })) - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# train datasets -dataset_coco = dict( - type='RepeatDataset', - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/person_keypoints_train2017.json', - data_prefix=dict(img='detection/coco/train2017/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=19, - mapping=keypoint_mapping_coco) - ], - ), - times=3) - -dataset_aic = dict( - type='AicDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='aic/annotations/aic_train.json', - data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' - '_train_20170902/keypoint_train_images_20170902/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=19, - mapping=keypoint_mapping_aic) - ], -) - -# data loaders -train_dataloader = dict( - batch_size=256, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type='CombinedDataset', - metainfo=dict(from_file='configs/_base_/datasets/coco_aic.py'), - datasets=[dataset_coco, dataset_aic], - pipeline=train_pipeline, - test_mode=False, - )) -val_dataloader = dict( - batch_size=64, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/person_keypoints_val2017.json', - # bbox_file='data/coco/person_detection_results/' - # 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='detection/coco/val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'coco/annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 210 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.0), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 105 to 210 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# keypoint mappings +keypoint_mapping_coco = [ + (0, 0), + (1, 1), + (2, 2), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +keypoint_mapping_aic = [ + (0, 6), + (1, 8), + (2, 10), + (3, 5), + (4, 7), + (5, 9), + (6, 12), + (7, 14), + (8, 16), + (9, 11), + (10, 13), + (11, 15), + (12, 17), + (13, 18), +] + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.33, + widen_factor=0.5, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' + 'rtmdet/cspnext_rsb_pretrain/' + 'cspnext-s_imagenet_600e-ea671761.pth')), + head=dict( + type='HeatmapHead', + in_channels=512, + out_channels=19, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=False, + output_keypoint_indices=[ + target for _, target in keypoint_mapping_coco + ])) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/', +# f'{data_root}': 's3://openmmlab/datasets/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# train datasets +dataset_coco = dict( + type='RepeatDataset', + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_train2017.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=19, + mapping=keypoint_mapping_coco) + ], + ), + times=3) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=19, + mapping=keypoint_mapping_aic) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco_aic.py'), + datasets=[dataset_coco, dataset_aic], + pipeline=train_pipeline, + test_mode=False, + )) +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_val2017.json', + # bbox_file='data/coco/person_detection_results/' + # 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='detection/coco/val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'coco/annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-s_udp_8xb256-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-s_udp_8xb256-210e_coco-256x192.py index 6f50542e5b..918b2fa0ff 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-s_udp_8xb256-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-s_udp_8xb256-210e_coco-256x192.py @@ -1,214 +1,214 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -max_epochs = 210 -stage2_num_epochs = 30 -base_lr = 4e-3 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - # use cosine lr from 105 to 210 epoch - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=1024) - -# codec settings -codec = dict( - type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=0.33, - widen_factor=0.5, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' - 'rtmdet/cspnext_rsb_pretrain/' - 'cspnext-s_imagenet_600e-ea671761.pth')), - head=dict( - type='HeatmapHead', - in_channels=512, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=False, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -backend_args = dict(backend='local') -# backend_args = dict( -# backend='petrel', -# path_mapping=dict({ -# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', -# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' -# })) - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=256, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=64, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - # bbox_file='data/coco/person_detection_results/' - # 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 210 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 105 to 210 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.33, + widen_factor=0.5, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' + 'rtmdet/cspnext_rsb_pretrain/' + 'cspnext-s_imagenet_600e-ea671761.pth')), + head=dict( + type='HeatmapHead', + in_channels=512, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + # bbox_file='data/coco/person_detection_results/' + # 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-tiny_udp_8xb256-210e_aic-coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-tiny_udp_8xb256-210e_aic-coco-256x192.py index cef1b20450..e25d29b8f5 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-tiny_udp_8xb256-210e_aic-coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-tiny_udp_8xb256-210e_aic-coco-256x192.py @@ -1,284 +1,284 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -max_epochs = 210 -stage2_num_epochs = 30 -base_lr = 4e-3 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.0), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - # use cosine lr from 105 to 210 epoch - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=1024) - -# codec settings -codec = dict( - type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# keypoint mappings -keypoint_mapping_coco = [ - (0, 0), - (1, 1), - (2, 2), - (3, 3), - (4, 4), - (5, 5), - (6, 6), - (7, 7), - (8, 8), - (9, 9), - (10, 10), - (11, 11), - (12, 12), - (13, 13), - (14, 14), - (15, 15), - (16, 16), -] - -keypoint_mapping_aic = [ - (0, 6), - (1, 8), - (2, 10), - (3, 5), - (4, 7), - (5, 9), - (6, 12), - (7, 14), - (8, 16), - (9, 11), - (10, 13), - (11, 15), - (12, 17), - (13, 18), -] - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=0.167, - widen_factor=0.375, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' - 'rtmdet/cspnext_rsb_pretrain/' - 'cspnext-tiny_imagenet_600e-3a2dd350.pth')), - head=dict( - type='HeatmapHead', - in_channels=384, - out_channels=19, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=False, - output_keypoint_indices=[ - target for _, target in keypoint_mapping_coco - ])) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/' - -backend_args = dict(backend='local') -# backend_args = dict( -# backend='petrel', -# path_mapping=dict({ -# f'{data_root}': 's3://openmmlab/datasets/', -# f'{data_root}': 's3://openmmlab/datasets/' -# })) - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# train datasets -dataset_coco = dict( - type='RepeatDataset', - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/person_keypoints_train2017.json', - data_prefix=dict(img='detection/coco/train2017/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=19, - mapping=keypoint_mapping_coco) - ], - ), - times=3) - -dataset_aic = dict( - type='AicDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='aic/annotations/aic_train.json', - data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' - '_train_20170902/keypoint_train_images_20170902/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=19, - mapping=keypoint_mapping_aic) - ], -) - -# data loaders -train_dataloader = dict( - batch_size=256, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type='CombinedDataset', - metainfo=dict(from_file='configs/_base_/datasets/coco_aic.py'), - datasets=[dataset_coco, dataset_aic], - pipeline=train_pipeline, - test_mode=False, - )) -val_dataloader = dict( - batch_size=64, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/person_keypoints_val2017.json', - # bbox_file='data/coco/person_detection_results/' - # 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='detection/coco/val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - # dict( - # type='EMAHook', - # ema_type='ExpMomentumEMA', - # momentum=0.0002, - # update_buffers=True, - # priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'coco/annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 210 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.0), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 105 to 210 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# keypoint mappings +keypoint_mapping_coco = [ + (0, 0), + (1, 1), + (2, 2), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +keypoint_mapping_aic = [ + (0, 6), + (1, 8), + (2, 10), + (3, 5), + (4, 7), + (5, 9), + (6, 12), + (7, 14), + (8, 16), + (9, 11), + (10, 13), + (11, 15), + (12, 17), + (13, 18), +] + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.167, + widen_factor=0.375, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' + 'rtmdet/cspnext_rsb_pretrain/' + 'cspnext-tiny_imagenet_600e-3a2dd350.pth')), + head=dict( + type='HeatmapHead', + in_channels=384, + out_channels=19, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=False, + output_keypoint_indices=[ + target for _, target in keypoint_mapping_coco + ])) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/', +# f'{data_root}': 's3://openmmlab/datasets/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# train datasets +dataset_coco = dict( + type='RepeatDataset', + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_train2017.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=19, + mapping=keypoint_mapping_coco) + ], + ), + times=3) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=19, + mapping=keypoint_mapping_aic) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco_aic.py'), + datasets=[dataset_coco, dataset_aic], + pipeline=train_pipeline, + test_mode=False, + )) +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_val2017.json', + # bbox_file='data/coco/person_detection_results/' + # 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='detection/coco/val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + # dict( + # type='EMAHook', + # ema_type='ExpMomentumEMA', + # momentum=0.0002, + # update_buffers=True, + # priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'coco/annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-tiny_udp_8xb256-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-tiny_udp_8xb256-210e_coco-256x192.py index 7ec0bb2be7..576c3be140 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-tiny_udp_8xb256-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-tiny_udp_8xb256-210e_coco-256x192.py @@ -1,214 +1,214 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -max_epochs = 210 -stage2_num_epochs = 30 -base_lr = 4e-3 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - # use cosine lr from 105 to 210 epoch - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=1024) - -# codec settings -codec = dict( - type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=0.167, - widen_factor=0.375, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' - 'rtmdet/cspnext_rsb_pretrain/' - 'cspnext-tiny_imagenet_600e-3a2dd350.pth')), - head=dict( - type='HeatmapHead', - in_channels=384, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=False, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -backend_args = dict(backend='local') -# backend_args = dict( -# backend='petrel', -# path_mapping=dict({ -# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', -# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' -# })) - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=256, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=64, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - # bbox_file='data/coco/person_detection_results/' - # 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - # dict( - # type='EMAHook', - # ema_type='ExpMomentumEMA', - # momentum=0.0002, - # update_buffers=True, - # priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 210 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 105 to 210 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.167, + widen_factor=0.375, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' + 'rtmdet/cspnext_rsb_pretrain/' + 'cspnext-tiny_imagenet_600e-3a2dd350.pth')), + head=dict( + type='HeatmapHead', + in_channels=384, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + # bbox_file='data/coco/person_detection_results/' + # 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + # dict( + # type='EMAHook', + # ema_type='ExpMomentumEMA', + # momentum=0.0002, + # update_buffers=True, + # priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext_udp_coco.md b/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext_udp_coco.md index 7aad2bf6b3..29fd080569 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext_udp_coco.md +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext_udp_coco.md @@ -1,69 +1,69 @@ - - -
-RTMDet (ArXiv 2022) - -```bibtex -@misc{lyu2022rtmdet, - title={RTMDet: An Empirical Study of Designing Real-Time Object Detectors}, - author={Chengqi Lyu and Wenwei Zhang and Haian Huang and Yue Zhou and Yudong Wang and Yanyi Liu and Shilong Zhang and Kai Chen}, - year={2022}, - eprint={2212.07784}, - archivePrefix={arXiv}, - primaryClass={cs.CV} -} -``` - -
- - - -
-UDP (CVPR'2020) - -```bibtex -@InProceedings{Huang_2020_CVPR, - author = {Huang, Junjie and Zhu, Zheng and Guo, Feng and Huang, Guan}, - title = {The Devil Is in the Details: Delving Into Unbiased Data Processing for Human Pose Estimation}, - booktitle = {The IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, - month = {June}, - year = {2020} -} -``` - -
- - - -
-COCO (ECCV'2014) - -```bibtex -@inproceedings{lin2014microsoft, - title={Microsoft coco: Common objects in context}, - author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, - booktitle={European conference on computer vision}, - pages={740--755}, - year={2014}, - organization={Springer} -} -``` - -
- -Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset - -| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | -| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | -| [pose_cspnext_t_udp](/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-tiny_udp_8xb256-210e_coco-256x192.py) | 256x192 | 0.665 | 0.874 | 0.723 | 0.723 | 0.917 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-tiny_udp-coco_pt-in1k_210e-256x192-0908dd2d_20230123.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-tiny_udp-coco_pt-in1k_210e-256x192-0908dd2d_20230123.json) | -| [pose_cspnext_s_udp](/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-s_udp_8xb256-210e_coco-256x192.py) | 256x192 | 0.697 | 0.886 | 0.776 | 0.753 | 0.929 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-s_udp-coco_pt-in1k_210e-256x192-92dbfc1d_20230123.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-s_udp-coco_pt-in1k_210e-256x192-92dbfc1d_20230123.json) | -| [pose_cspnext_m_udp](/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-m_udp_8xb256-210e_coco-256x192.py) | 256x192 | 0.732 | 0.896 | 0.806 | 0.785 | 0.937 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-coco_pt-in1k_210e-256x192-95f5967e_20230123.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-coco_pt-in1k_210e-256x192-95f5967e_20230123.json) | -| [pose_cspnext_l_udp](/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-l_udp_8xb256-210e_coco-256x192.py) | 256x192 | 0.750 | 0.904 | 0.822 | 0.800 | 0.941 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-l_udp-coco_pt-in1k_210e-256x192-661cdd8c_20230123.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-l_udp-coco_pt-in1k_210e-256x192-661cdd8c_20230123.json) | -| [pose_cspnext_t_udp_aic_coco](/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-tiny_udp_8xb256-210e_aic-coco-256x192.py) | 256x192 | 0.655 | 0.884 | 0.731 | 0.689 | 0.890 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-tiny_udp-aic-coco_210e-256x192-cbed682d_20230130.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-tiny_udp-aic-coco_210e-256x192-cbed682d_20230130.json) | -| [pose_cspnext_s_udp_aic_coco](/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-s_udp_8xb256-210e_aic-coco-256x192.py) | 256x192 | 0.700 | 0.905 | 0.783 | 0.733 | 0.918 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-s_udp-aic-coco_210e-256x192-92f5a029_20230130.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-s_udp-aic-coco_210e-256x192-92f5a029_20230130.json) | -| [pose_cspnext_m_udp_aic_coco](/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-m_udp_8xb256-210e_aic-coco-256x192.py) | 256x192 | 0.748 | 0.925 | 0.818 | 0.777 | 0.933 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.json) | -| [pose_cspnext_l_udp_aic_coco](/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-l_udp_8xb256-210e_aic-coco-256x192.py) | 256x192 | 0.772 | 0.936 | 0.839 | 0.799 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-l_udp-aic-coco_210e-256x192-273b7631_20230130.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-l_udp-aic-coco_210e-256x192-273b7631_20230130.json) | - -Note that, UDP also adopts the unbiased encoding/decoding algorithm of [DARK](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/techniques.html#darkpose-cvpr-2020). - -Flip test and detector is not used in the result of aic-coco training. + + +
+RTMDet (ArXiv 2022) + +```bibtex +@misc{lyu2022rtmdet, + title={RTMDet: An Empirical Study of Designing Real-Time Object Detectors}, + author={Chengqi Lyu and Wenwei Zhang and Haian Huang and Yue Zhou and Yudong Wang and Yanyi Liu and Shilong Zhang and Kai Chen}, + year={2022}, + eprint={2212.07784}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +
+ + + +
+UDP (CVPR'2020) + +```bibtex +@InProceedings{Huang_2020_CVPR, + author = {Huang, Junjie and Zhu, Zheng and Guo, Feng and Huang, Guan}, + title = {The Devil Is in the Details: Delving Into Unbiased Data Processing for Human Pose Estimation}, + booktitle = {The IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, + month = {June}, + year = {2020} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [pose_cspnext_t_udp](/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-tiny_udp_8xb256-210e_coco-256x192.py) | 256x192 | 0.665 | 0.874 | 0.723 | 0.723 | 0.917 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-tiny_udp-coco_pt-in1k_210e-256x192-0908dd2d_20230123.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-tiny_udp-coco_pt-in1k_210e-256x192-0908dd2d_20230123.json) | +| [pose_cspnext_s_udp](/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-s_udp_8xb256-210e_coco-256x192.py) | 256x192 | 0.697 | 0.886 | 0.776 | 0.753 | 0.929 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-s_udp-coco_pt-in1k_210e-256x192-92dbfc1d_20230123.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-s_udp-coco_pt-in1k_210e-256x192-92dbfc1d_20230123.json) | +| [pose_cspnext_m_udp](/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-m_udp_8xb256-210e_coco-256x192.py) | 256x192 | 0.732 | 0.896 | 0.806 | 0.785 | 0.937 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-coco_pt-in1k_210e-256x192-95f5967e_20230123.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-coco_pt-in1k_210e-256x192-95f5967e_20230123.json) | +| [pose_cspnext_l_udp](/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-l_udp_8xb256-210e_coco-256x192.py) | 256x192 | 0.750 | 0.904 | 0.822 | 0.800 | 0.941 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-l_udp-coco_pt-in1k_210e-256x192-661cdd8c_20230123.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-l_udp-coco_pt-in1k_210e-256x192-661cdd8c_20230123.json) | +| [pose_cspnext_t_udp_aic_coco](/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-tiny_udp_8xb256-210e_aic-coco-256x192.py) | 256x192 | 0.655 | 0.884 | 0.731 | 0.689 | 0.890 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-tiny_udp-aic-coco_210e-256x192-cbed682d_20230130.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-tiny_udp-aic-coco_210e-256x192-cbed682d_20230130.json) | +| [pose_cspnext_s_udp_aic_coco](/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-s_udp_8xb256-210e_aic-coco-256x192.py) | 256x192 | 0.700 | 0.905 | 0.783 | 0.733 | 0.918 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-s_udp-aic-coco_210e-256x192-92f5a029_20230130.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-s_udp-aic-coco_210e-256x192-92f5a029_20230130.json) | +| [pose_cspnext_m_udp_aic_coco](/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-m_udp_8xb256-210e_aic-coco-256x192.py) | 256x192 | 0.748 | 0.925 | 0.818 | 0.777 | 0.933 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.json) | +| [pose_cspnext_l_udp_aic_coco](/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-l_udp_8xb256-210e_aic-coco-256x192.py) | 256x192 | 0.772 | 0.936 | 0.839 | 0.799 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-l_udp-aic-coco_210e-256x192-273b7631_20230130.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-l_udp-aic-coco_210e-256x192-273b7631_20230130.json) | + +Note that, UDP also adopts the unbiased encoding/decoding algorithm of [DARK](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/techniques.html#darkpose-cvpr-2020). + +Flip test and detector is not used in the result of aic-coco training. diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext_udp_coco.yml b/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext_udp_coco.yml index aab5c44e1b..b1d9cd85cb 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext_udp_coco.yml +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/cspnext_udp_coco.yml @@ -1,139 +1,139 @@ -Models: -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-tiny_udp_8xb256-210e_coco-256x192.py - In Collection: UDP - Metadata: - Architecture: &id001 - - CSPNeXt - - UDP - Training Data: COCO - Name: cspnext-tiny_udp_8xb256-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.665 - AP@0.5: 0.874 - AP@0.75: 0.723 - AR: 0.723 - AR@0.5: 0.917 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-tiny_udp-coco_pt-in1k_210e-256x192-0908dd2d_20230123.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-s_udp_8xb256-210e_coco-256x192.py - In Collection: UDP - Metadata: - Architecture: *id001 - Training Data: COCO - Name: cspnext-s_udp_8xb256-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.697 - AP@0.5: 0.886 - AP@0.75: 0.776 - AR: 0.753 - AR@0.5: 0.929 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-s_udp-coco_pt-in1k_210e-256x192-92dbfc1d_20230123.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-m_udp_8xb256-210e_coco-256x192.py - In Collection: UDP - Metadata: - Architecture: *id001 - Training Data: COCO - Name: cspnext-m_udp_8xb256-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.732 - AP@0.5: 0.896 - AP@0.75: 0.806 - AR: 0.785 - AR@0.5: 0.937 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-coco_pt-in1k_210e-256x192-95f5967e_20230123.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-l_udp_8xb256-210e_coco-256x192.py - In Collection: UDP - Metadata: - Architecture: *id001 - Training Data: COCO - Name: cspnext-l_udp_8xb256-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.750 - AP@0.5: 0.904 - AP@0.75: 0.822 - AR: 0.8 - AR@0.5: 0.941 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-l_udp-coco_pt-in1k_210e-256x192-661cdd8c_20230123.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-tiny_udp_8xb256-210e_aic-coco-256x192.py - In Collection: UDP - Metadata: - Architecture: *id001 - Training Data: - - COCO - - AIC - Name: cspnext-tiny_udp_8xb256-210e_aic-coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.655 - AP@0.5: 0.884 - AP@0.75: 0.731 - AR: 0.689 - AR@0.5: 0.89 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-tiny_udp-aic-coco_210e-256x192-cbed682d_20230130.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-s_udp_8xb256-210e_aic-coco-256x192.py - In Collection: UDP - Metadata: - Architecture: *id001 - Training Data: - - COCO - - AIC - Name: cspnext-s_udp_8xb256-210e_aic-coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.7 - AP@0.5: 0.905 - AP@0.75: 0.783 - AR: 0.733 - AR@0.5: 0.918 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-s_udp-aic-coco_210e-256x192-92f5a029_20230130.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-m_udp_8xb256-210e_aic-coco-256x192.py - In Collection: UDP - Metadata: - Architecture: *id001 - Training Data: - - COCO - - AIC - Name: cspnext-m_udp_8xb256-210e_aic-coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.748 - AP@0.5: 0.925 - AP@0.75: 0.818 - AR: 0.777 - AR@0.5: 0.933 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-l_udp_8xb256-210e_aic-coco-256x192.py - In Collection: UDP - Metadata: - Architecture: *id001 - Training Data: - - COCO - - AIC - Name: cspnext-l_udp_8xb256-210e_aic-coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.772 - AP@0.5: 0.936 - AP@0.75: 0.839 - AR: 0.799 - AR@0.5: 0.943 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-l_udp-aic-coco_210e-256x192-273b7631_20230130.pth +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-tiny_udp_8xb256-210e_coco-256x192.py + In Collection: UDP + Metadata: + Architecture: &id001 + - CSPNeXt + - UDP + Training Data: COCO + Name: cspnext-tiny_udp_8xb256-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.665 + AP@0.5: 0.874 + AP@0.75: 0.723 + AR: 0.723 + AR@0.5: 0.917 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-tiny_udp-coco_pt-in1k_210e-256x192-0908dd2d_20230123.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-s_udp_8xb256-210e_coco-256x192.py + In Collection: UDP + Metadata: + Architecture: *id001 + Training Data: COCO + Name: cspnext-s_udp_8xb256-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.697 + AP@0.5: 0.886 + AP@0.75: 0.776 + AR: 0.753 + AR@0.5: 0.929 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-s_udp-coco_pt-in1k_210e-256x192-92dbfc1d_20230123.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-m_udp_8xb256-210e_coco-256x192.py + In Collection: UDP + Metadata: + Architecture: *id001 + Training Data: COCO + Name: cspnext-m_udp_8xb256-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.732 + AP@0.5: 0.896 + AP@0.75: 0.806 + AR: 0.785 + AR@0.5: 0.937 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-coco_pt-in1k_210e-256x192-95f5967e_20230123.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-l_udp_8xb256-210e_coco-256x192.py + In Collection: UDP + Metadata: + Architecture: *id001 + Training Data: COCO + Name: cspnext-l_udp_8xb256-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.750 + AP@0.5: 0.904 + AP@0.75: 0.822 + AR: 0.8 + AR@0.5: 0.941 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-l_udp-coco_pt-in1k_210e-256x192-661cdd8c_20230123.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-tiny_udp_8xb256-210e_aic-coco-256x192.py + In Collection: UDP + Metadata: + Architecture: *id001 + Training Data: + - COCO + - AIC + Name: cspnext-tiny_udp_8xb256-210e_aic-coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.655 + AP@0.5: 0.884 + AP@0.75: 0.731 + AR: 0.689 + AR@0.5: 0.89 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-tiny_udp-aic-coco_210e-256x192-cbed682d_20230130.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-s_udp_8xb256-210e_aic-coco-256x192.py + In Collection: UDP + Metadata: + Architecture: *id001 + Training Data: + - COCO + - AIC + Name: cspnext-s_udp_8xb256-210e_aic-coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.7 + AP@0.5: 0.905 + AP@0.75: 0.783 + AR: 0.733 + AR@0.5: 0.918 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-s_udp-aic-coco_210e-256x192-92f5a029_20230130.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-m_udp_8xb256-210e_aic-coco-256x192.py + In Collection: UDP + Metadata: + Architecture: *id001 + Training Data: + - COCO + - AIC + Name: cspnext-m_udp_8xb256-210e_aic-coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.748 + AP@0.5: 0.925 + AP@0.75: 0.818 + AR: 0.777 + AR@0.5: 0.933 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/cspnext-l_udp_8xb256-210e_aic-coco-256x192.py + In Collection: UDP + Metadata: + Architecture: *id001 + Training Data: + - COCO + - AIC + Name: cspnext-l_udp_8xb256-210e_aic-coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.772 + AP@0.5: 0.936 + AP@0.75: 0.839 + AR: 0.799 + AR@0.5: 0.943 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-l_udp-aic-coco_210e-256x192-273b7631_20230130.pth diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/hourglass_coco.md b/configs/body_2d_keypoint/topdown_heatmap/coco/hourglass_coco.md index dc7dee47c3..e66d13e8ca 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/hourglass_coco.md +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/hourglass_coco.md @@ -1,42 +1,42 @@ - - -
-Hourglass (ECCV'2016) - -```bibtex -@inproceedings{newell2016stacked, - title={Stacked hourglass networks for human pose estimation}, - author={Newell, Alejandro and Yang, Kaiyu and Deng, Jia}, - booktitle={European conference on computer vision}, - pages={483--499}, - year={2016}, - organization={Springer} -} -``` - -
- - - -
-COCO (ECCV'2014) - -```bibtex -@inproceedings{lin2014microsoft, - title={Microsoft coco: Common objects in context}, - author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, - booktitle={European conference on computer vision}, - pages={740--755}, - year={2014}, - organization={Springer} -} -``` - -
- -Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset - -| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | -| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | -| [pose_hourglass_52](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hourglass52_8xb32-210e_coco-256x256.py) | 256x256 | 0.726 | 0.896 | 0.799 | 0.780 | 0.934 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hourglass/hourglass52_coco_256x256-4ec713ba_20200709.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hourglass/hourglass52_coco_256x256_20200709.log.json) | -| [pose_hourglass_52](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hourglass52_8xb32-210e_coco-384x384.py) | 384x384 | 0.746 | 0.900 | 0.812 | 0.797 | 0.939 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hourglass/hourglass52_coco_384x384-be91ba2b_20200812.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hourglass/hourglass52_coco_384x384_20200812.log.json) | + + +
+Hourglass (ECCV'2016) + +```bibtex +@inproceedings{newell2016stacked, + title={Stacked hourglass networks for human pose estimation}, + author={Newell, Alejandro and Yang, Kaiyu and Deng, Jia}, + booktitle={European conference on computer vision}, + pages={483--499}, + year={2016}, + organization={Springer} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [pose_hourglass_52](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hourglass52_8xb32-210e_coco-256x256.py) | 256x256 | 0.726 | 0.896 | 0.799 | 0.780 | 0.934 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hourglass/hourglass52_coco_256x256-4ec713ba_20200709.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hourglass/hourglass52_coco_256x256_20200709.log.json) | +| [pose_hourglass_52](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hourglass52_8xb32-210e_coco-384x384.py) | 384x384 | 0.746 | 0.900 | 0.812 | 0.797 | 0.939 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hourglass/hourglass52_coco_384x384-be91ba2b_20200812.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hourglass/hourglass52_coco_384x384_20200812.log.json) | diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/hourglass_coco.yml b/configs/body_2d_keypoint/topdown_heatmap/coco/hourglass_coco.yml index 6d9cfd91e9..23d2a9b3bd 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/hourglass_coco.yml +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/hourglass_coco.yml @@ -1,40 +1,40 @@ -Collections: -- Name: Hourglass - Paper: - Title: Stacked hourglass networks for human pose estimation - URL: https://link.springer.com/chapter/10.1007/978-3-319-46484-8_29 - README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/backbones/hourglass.md -Models: -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hourglass52_8xb32-210e_coco-256x256.py - In Collection: Hourglass - Metadata: - Architecture: &id001 - - Hourglass - Training Data: COCO - Name: td-hm_hourglass52_8xb32-210e_coco-256x256 - Results: - - Dataset: COCO - Metrics: - AP: 0.726 - AP@0.5: 0.896 - AP@0.75: 0.799 - AR: 0.780 - AR@0.5: 0.934 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/hourglass/hourglass52_coco_256x256-4ec713ba_20200709.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hourglass52_8xb32-210e_coco-384x384.py - In Collection: Hourglass - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_hourglass52_8xb32-210e_coco-384x384 - Results: - - Dataset: COCO - Metrics: - AP: 0.746 - AP@0.5: 0.900 - AP@0.75: 0.812 - AR: 0.797 - AR@0.5: 0.939 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/hourglass/hourglass52_coco_384x384-be91ba2b_20200812.pth +Collections: +- Name: Hourglass + Paper: + Title: Stacked hourglass networks for human pose estimation + URL: https://link.springer.com/chapter/10.1007/978-3-319-46484-8_29 + README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/backbones/hourglass.md +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hourglass52_8xb32-210e_coco-256x256.py + In Collection: Hourglass + Metadata: + Architecture: &id001 + - Hourglass + Training Data: COCO + Name: td-hm_hourglass52_8xb32-210e_coco-256x256 + Results: + - Dataset: COCO + Metrics: + AP: 0.726 + AP@0.5: 0.896 + AP@0.75: 0.799 + AR: 0.780 + AR@0.5: 0.934 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/hourglass/hourglass52_coco_256x256-4ec713ba_20200709.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hourglass52_8xb32-210e_coco-384x384.py + In Collection: Hourglass + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_hourglass52_8xb32-210e_coco-384x384 + Results: + - Dataset: COCO + Metrics: + AP: 0.746 + AP@0.5: 0.900 + AP@0.75: 0.812 + AR: 0.797 + AR@0.5: 0.939 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/hourglass/hourglass52_coco_384x384-be91ba2b_20200812.pth diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/hrformer_coco.md b/configs/body_2d_keypoint/topdown_heatmap/coco/hrformer_coco.md index 87309d2e7c..c7ab3ee9ed 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/hrformer_coco.md +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/hrformer_coco.md @@ -1,43 +1,43 @@ - - -
-HRFormer (NIPS'2021) - -```bibtex -@article{yuan2021hrformer, - title={HRFormer: High-Resolution Vision Transformer for Dense Predict}, - author={Yuan, Yuhui and Fu, Rao and Huang, Lang and Lin, Weihong and Zhang, Chao and Chen, Xilin and Wang, Jingdong}, - journal={Advances in Neural Information Processing Systems}, - volume={34}, - year={2021} -} -``` - -
- - - -
-COCO (ECCV'2014) - -```bibtex -@inproceedings{lin2014microsoft, - title={Microsoft coco: Common objects in context}, - author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, - booktitle={European conference on computer vision}, - pages={740--755}, - year={2014}, - organization={Springer} -} -``` - -
- -Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset - -| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | -| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | -| [pose_hrformer_small](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrformer-small_8xb32-210e_coco-256x192.py) | 256x192 | 0.738 | 0.904 | 0.812 | 0.793 | 0.941 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrformer/hrformer_small_coco_256x192-5310d898_20220316.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrformer/hrformer_small_coco_256x192_20220316.log.json) | -| [pose_hrformer_small](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrformer-small_8xb32-210e_coco-384x288.py) | 384x288 | 0.757 | 0.905 | 0.824 | 0.807 | 0.941 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrformer/hrformer_small_coco_384x288-98d237ed_20220316.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrformer/hrformer_small_coco_384x288_20220316.log.json) | -| [pose_hrformer_base](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrformer-base_8xb32-210e_coco-256x192.py) | 256x192 | 0.754 | 0.906 | 0.827 | 0.807 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrformer/hrformer_base_coco_256x192-6f5f1169_20220316.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrformer/hrformer_base_coco_256x192_20220316.log.json) | -| [pose_hrformer_base](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrformer-base_8xb32-210e_coco-384x288.py) | 384x288 | 0.774 | 0.909 | 0.842 | 0.823 | 0.945 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrformer/hrformer_base_coco_384x288-ecf0758d_20220316.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrformer/hrformer_base_coco_256x192_20220316.log.json) | + + +
+HRFormer (NIPS'2021) + +```bibtex +@article{yuan2021hrformer, + title={HRFormer: High-Resolution Vision Transformer for Dense Predict}, + author={Yuan, Yuhui and Fu, Rao and Huang, Lang and Lin, Weihong and Zhang, Chao and Chen, Xilin and Wang, Jingdong}, + journal={Advances in Neural Information Processing Systems}, + volume={34}, + year={2021} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [pose_hrformer_small](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrformer-small_8xb32-210e_coco-256x192.py) | 256x192 | 0.738 | 0.904 | 0.812 | 0.793 | 0.941 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrformer/hrformer_small_coco_256x192-5310d898_20220316.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrformer/hrformer_small_coco_256x192_20220316.log.json) | +| [pose_hrformer_small](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrformer-small_8xb32-210e_coco-384x288.py) | 384x288 | 0.757 | 0.905 | 0.824 | 0.807 | 0.941 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrformer/hrformer_small_coco_384x288-98d237ed_20220316.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrformer/hrformer_small_coco_384x288_20220316.log.json) | +| [pose_hrformer_base](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrformer-base_8xb32-210e_coco-256x192.py) | 256x192 | 0.754 | 0.906 | 0.827 | 0.807 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrformer/hrformer_base_coco_256x192-6f5f1169_20220316.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrformer/hrformer_base_coco_256x192_20220316.log.json) | +| [pose_hrformer_base](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrformer-base_8xb32-210e_coco-384x288.py) | 384x288 | 0.774 | 0.909 | 0.842 | 0.823 | 0.945 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrformer/hrformer_base_coco_384x288-ecf0758d_20220316.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrformer/hrformer_base_coco_256x192_20220316.log.json) | diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/hrformer_coco.yml b/configs/body_2d_keypoint/topdown_heatmap/coco/hrformer_coco.yml index 5ac7dc3636..81e8d2b7b3 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/hrformer_coco.yml +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/hrformer_coco.yml @@ -1,72 +1,72 @@ -Collections: -- Name: HRFormer - Paper: - Title: 'HRFormer: High-Resolution Vision Transformer for Dense Predict' - URL: https://proceedings.neurips.cc/paper/2021/hash/3bbfdde8842a5c44a0323518eec97cbe-Abstract.html - README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/backbones/hrformer.md -Models: -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrformer-small_8xb32-210e_coco-256x192.py - In Collection: HRFormer - Metadata: - Architecture: &id001 - - HRFormer - Training Data: COCO - Name: td-hm_hrformer-small_8xb32-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.738 - AP@0.5: 0.904 - AP@0.75: 0.812 - AR: 0.793 - AR@0.5: 0.941 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/hrformer/hrformer_small_coco_256x192-5310d898_20220316.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrformer-small_8xb32-210e_coco-384x288.py - In Collection: HRFormer - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_hrformer-small_8xb32-210e_coco-384x288 - Results: - - Dataset: COCO - Metrics: - AP: 0.757 - AP@0.5: 0.905 - AP@0.75: 0.824 - AR: 0.807 - AR@0.5: 0.941 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/hrformer/hrformer_small_coco_384x288-98d237ed_20220316.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrformer-base_8xb32-210e_coco-256x192.py - In Collection: HRFormer - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_hrformer-base_8xb32-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.754 - AP@0.5: 0.906 - AP@0.75: 0.827 - AR: 0.807 - AR@0.5: 0.943 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/hrformer/hrformer_base_coco_256x192-6f5f1169_20220316.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrformer-base_8xb32-210e_coco-384x288.py - In Collection: HRFormer - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_hrformer-base_8xb32-210e_coco-384x288 - Results: - - Dataset: COCO - Metrics: - AP: 0.774 - AP@0.5: 0.909 - AP@0.75: 0.842 - AR: 0.823 - AR@0.5: 0.945 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/hrformer/hrformer_base_coco_384x288-ecf0758d_20220316.pth +Collections: +- Name: HRFormer + Paper: + Title: 'HRFormer: High-Resolution Vision Transformer for Dense Predict' + URL: https://proceedings.neurips.cc/paper/2021/hash/3bbfdde8842a5c44a0323518eec97cbe-Abstract.html + README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/backbones/hrformer.md +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrformer-small_8xb32-210e_coco-256x192.py + In Collection: HRFormer + Metadata: + Architecture: &id001 + - HRFormer + Training Data: COCO + Name: td-hm_hrformer-small_8xb32-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.738 + AP@0.5: 0.904 + AP@0.75: 0.812 + AR: 0.793 + AR@0.5: 0.941 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/hrformer/hrformer_small_coco_256x192-5310d898_20220316.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrformer-small_8xb32-210e_coco-384x288.py + In Collection: HRFormer + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_hrformer-small_8xb32-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.757 + AP@0.5: 0.905 + AP@0.75: 0.824 + AR: 0.807 + AR@0.5: 0.941 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/hrformer/hrformer_small_coco_384x288-98d237ed_20220316.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrformer-base_8xb32-210e_coco-256x192.py + In Collection: HRFormer + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_hrformer-base_8xb32-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.754 + AP@0.5: 0.906 + AP@0.75: 0.827 + AR: 0.807 + AR@0.5: 0.943 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/hrformer/hrformer_base_coco_256x192-6f5f1169_20220316.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrformer-base_8xb32-210e_coco-384x288.py + In Collection: HRFormer + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_hrformer-base_8xb32-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.774 + AP@0.5: 0.909 + AP@0.75: 0.842 + AR: 0.823 + AR@0.5: 0.945 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/hrformer/hrformer_base_coco_384x288-ecf0758d_20220316.pth diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_augmentation_coco.md b/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_augmentation_coco.md index efe9cd27b9..010ecdb9d3 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_augmentation_coco.md +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_augmentation_coco.md @@ -1,62 +1,62 @@ - - -
-HRNet (CVPR'2019) - -```bibtex -@inproceedings{sun2019deep, - title={Deep high-resolution representation learning for human pose estimation}, - author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={5693--5703}, - year={2019} -} -``` - -
- - - -
-Albumentations (Information'2020) - -```bibtex -@article{buslaev2020albumentations, - title={Albumentations: fast and flexible image augmentations}, - author={Buslaev, Alexander and Iglovikov, Vladimir I and Khvedchenya, Eugene and Parinov, Alex and Druzhinin, Mikhail and Kalinin, Alexandr A}, - journal={Information}, - volume={11}, - number={2}, - pages={125}, - year={2020}, - publisher={Multidisciplinary Digital Publishing Institute} -} -``` - -
- - - -
-COCO (ECCV'2014) - -```bibtex -@inproceedings{lin2014microsoft, - title={Microsoft coco: Common objects in context}, - author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, - booktitle={European conference on computer vision}, - pages={740--755}, - year={2014}, - organization={Springer} -} -``` - -
- -Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset - -| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | -| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | -| [coarsedropout](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_coarsedropout-8xb64-210e_coco-256x192.py) | 256x192 | 0.753 | 0.908 | 0.822 | 0.805 | 0.944 | [ckpt](https://download.openmmlab.com/mmpose/top_down/augmentation/hrnet_w32_coco_256x192_coarsedropout-0f16a0ce_20210320.pth) | [log](https://download.openmmlab.com/mmpose/top_down/augmentation/hrnet_w32_coco_256x192_coarsedropout_20210320.log.json) | -| [gridmask](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_gridmask-8xb64-210e_coco-256x192.py) | 256x192 | 0.752 | 0.906 | 0.825 | 0.804 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/top_down/augmentation/hrnet_w32_coco_256x192_gridmask-868180df_20210320.pth) | [log](https://download.openmmlab.com/mmpose/top_down/augmentation/hrnet_w32_coco_256x192_gridmask_20210320.log.json) | -| [photometric](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_photometric-8xb64-210e_coco-256x192.py) | 256x192 | 0.754 | 0.908 | 0.825 | 0.805 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/top_down/augmentation/hrnet_w32_coco_256x192_photometric-308cf591_20210320.pth) | [log](https://download.openmmlab.com/mmpose/top_down/augmentation/hrnet_w32_coco_256x192_photometric_20210320.log.json) | + + +
+HRNet (CVPR'2019) + +```bibtex +@inproceedings{sun2019deep, + title={Deep high-resolution representation learning for human pose estimation}, + author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={5693--5703}, + year={2019} +} +``` + +
+ + + +
+Albumentations (Information'2020) + +```bibtex +@article{buslaev2020albumentations, + title={Albumentations: fast and flexible image augmentations}, + author={Buslaev, Alexander and Iglovikov, Vladimir I and Khvedchenya, Eugene and Parinov, Alex and Druzhinin, Mikhail and Kalinin, Alexandr A}, + journal={Information}, + volume={11}, + number={2}, + pages={125}, + year={2020}, + publisher={Multidisciplinary Digital Publishing Institute} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [coarsedropout](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_coarsedropout-8xb64-210e_coco-256x192.py) | 256x192 | 0.753 | 0.908 | 0.822 | 0.805 | 0.944 | [ckpt](https://download.openmmlab.com/mmpose/top_down/augmentation/hrnet_w32_coco_256x192_coarsedropout-0f16a0ce_20210320.pth) | [log](https://download.openmmlab.com/mmpose/top_down/augmentation/hrnet_w32_coco_256x192_coarsedropout_20210320.log.json) | +| [gridmask](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_gridmask-8xb64-210e_coco-256x192.py) | 256x192 | 0.752 | 0.906 | 0.825 | 0.804 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/top_down/augmentation/hrnet_w32_coco_256x192_gridmask-868180df_20210320.pth) | [log](https://download.openmmlab.com/mmpose/top_down/augmentation/hrnet_w32_coco_256x192_gridmask_20210320.log.json) | +| [photometric](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_photometric-8xb64-210e_coco-256x192.py) | 256x192 | 0.754 | 0.908 | 0.825 | 0.805 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/top_down/augmentation/hrnet_w32_coco_256x192_photometric-308cf591_20210320.pth) | [log](https://download.openmmlab.com/mmpose/top_down/augmentation/hrnet_w32_coco_256x192_photometric_20210320.log.json) | diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_augmentation_coco.yml b/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_augmentation_coco.yml index 7a29de4f64..b31ef805d9 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_augmentation_coco.yml +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_augmentation_coco.yml @@ -1,56 +1,56 @@ -Collections: -- Name: Albumentations - Paper: - Title: 'Albumentations: fast and flexible image augmentations' - URL: https://www.mdpi.com/649002 - README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/techniques/albumentations.md -Models: -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_coarsedropout-8xb64-210e_coco-256x192.py - In Collection: Albumentations - Metadata: - Architecture: &id001 - - HRNet - Training Data: COCO - Name: td-hm_hrnet-w32_coarsedropout-8xb64-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.753 - AP@0.5: 0.908 - AP@0.75: 0.822 - AR: 0.805 - AR@0.5: 0.944 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/augmentation/hrnet_w32_coco_256x192_coarsedropout-0f16a0ce_20210320.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_gridmask-8xb64-210e_coco-256x192.py - In Collection: Albumentations - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_hrnet-w32_gridmask-8xb64-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.752 - AP@0.5: 0.906 - AP@0.75: 0.825 - AR: 0.804 - AR@0.5: 0.943 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/augmentation/hrnet_w32_coco_256x192_gridmask-868180df_20210320.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_photometric-8xb64-210e_coco-256x192.py - In Collection: Albumentations - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_hrnet-w32_photometric-8xb64-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.754 - AP@0.5: 0.908 - AP@0.75: 0.825 - AR: 0.805 - AR@0.5: 0.943 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/augmentation/hrnet_w32_coco_256x192_photometric-308cf591_20210320.pth +Collections: +- Name: Albumentations + Paper: + Title: 'Albumentations: fast and flexible image augmentations' + URL: https://www.mdpi.com/649002 + README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/techniques/albumentations.md +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_coarsedropout-8xb64-210e_coco-256x192.py + In Collection: Albumentations + Metadata: + Architecture: &id001 + - HRNet + Training Data: COCO + Name: td-hm_hrnet-w32_coarsedropout-8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.753 + AP@0.5: 0.908 + AP@0.75: 0.822 + AR: 0.805 + AR@0.5: 0.944 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/augmentation/hrnet_w32_coco_256x192_coarsedropout-0f16a0ce_20210320.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_gridmask-8xb64-210e_coco-256x192.py + In Collection: Albumentations + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_hrnet-w32_gridmask-8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.752 + AP@0.5: 0.906 + AP@0.75: 0.825 + AR: 0.804 + AR@0.5: 0.943 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/augmentation/hrnet_w32_coco_256x192_gridmask-868180df_20210320.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_photometric-8xb64-210e_coco-256x192.py + In Collection: Albumentations + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_hrnet-w32_photometric-8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.754 + AP@0.5: 0.908 + AP@0.75: 0.825 + AR: 0.805 + AR@0.5: 0.943 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/augmentation/hrnet_w32_coco_256x192_photometric-308cf591_20210320.pth diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_coco.md b/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_coco.md index 51fbf1322e..f8c09f3809 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_coco.md +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_coco.md @@ -1,43 +1,43 @@ - - -
-HRNet (CVPR'2019) - -```bibtex -@inproceedings{sun2019deep, - title={Deep high-resolution representation learning for human pose estimation}, - author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={5693--5703}, - year={2019} -} -``` - -
- - - -
-COCO (ECCV'2014) - -```bibtex -@inproceedings{lin2014microsoft, - title={Microsoft coco: Common objects in context}, - author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, - booktitle={European conference on computer vision}, - pages={740--755}, - year={2014}, - organization={Springer} -} -``` - -
- -Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset - -| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | -| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | -| [pose_hrnet_w32](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py) | 256x192 | 0.749 | 0.906 | 0.821 | 0.804 | 0.945 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192-81c58e40_20220909.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220909.log) | -| [pose_hrnet_w32](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-384x288.py) | 384x288 | 0.761 | 0.908 | 0.826 | 0.811 | 0.944 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-384x288-ca5956af_20220909.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-384x288_20220909.log) | -| [pose_hrnet_w48](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192.py) | 256x192 | 0.756 | 0.908 | 0.826 | 0.809 | 0.945 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192-0e67c616_20220913.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192_20220913.log) | -| [pose_hrnet_w48](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-384x288.py) | 384x288 | 0.767 | 0.911 | 0.832 | 0.817 | 0.947 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-384x288-c161b7de_20220915.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-384x288_20220915.log) | + + +
+HRNet (CVPR'2019) + +```bibtex +@inproceedings{sun2019deep, + title={Deep high-resolution representation learning for human pose estimation}, + author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={5693--5703}, + year={2019} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [pose_hrnet_w32](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py) | 256x192 | 0.749 | 0.906 | 0.821 | 0.804 | 0.945 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192-81c58e40_20220909.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220909.log) | +| [pose_hrnet_w32](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-384x288.py) | 384x288 | 0.761 | 0.908 | 0.826 | 0.811 | 0.944 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-384x288-ca5956af_20220909.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-384x288_20220909.log) | +| [pose_hrnet_w48](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192.py) | 256x192 | 0.756 | 0.908 | 0.826 | 0.809 | 0.945 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192-0e67c616_20220913.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192_20220913.log) | +| [pose_hrnet_w48](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-384x288.py) | 384x288 | 0.767 | 0.911 | 0.832 | 0.817 | 0.947 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-384x288-c161b7de_20220915.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-384x288_20220915.log) | diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_coco.yml b/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_coco.yml index a0e5debe85..525a4964fd 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_coco.yml +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_coco.yml @@ -1,124 +1,124 @@ -Collections: -- Name: HRNet - Paper: - Title: Deep high-resolution representation learning for human pose estimation - URL: http://openaccess.thecvf.com/content_CVPR_2019/html/Sun_Deep_High-Resolution_Representation_Learning_for_Human_Pose_Estimation_CVPR_2019_paper.html - README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/backbones/hrnet.md -Models: -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py - In Collection: HRNet - Metadata: - Architecture: &id001 - - HRNet - Training Data: COCO - Name: td-hm_hrnet-w32_8xb64-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.746 - AP@0.5: 0.904 - AP@0.75: 0.819 - AR: 0.799 - AR@0.5: 0.942 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192-81c58e40_20220909.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-384x288.py - In Collection: HRNet - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_hrnet-w32_8xb64-210e_coco-384x288 - Results: - - Dataset: COCO - Metrics: - AP: 0.76 - AP@0.5: 0.906 - AP@0.75: 0.83 - AR: 0.81 - AR@0.5: 0.943 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-384x288-ca5956af_20220909.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192.py - In Collection: HRNet - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_hrnet-w48_8xb32-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.756 - AP@0.5: 0.907 - AP@0.75: 0.825 - AR: 0.806 - AR@0.5: 0.942 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192-0e67c616_20220913.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-384x288.py - In Collection: HRNet - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_hrnet-w48_8xb32-210e_coco-384x288 - Results: - - Dataset: COCO - Metrics: - AP: 0.767 - AP@0.5: 0.91 - AP@0.75: 0.831 - AR: 0.816 - AR@0.5: 0.946 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-384x288-c161b7de_20220915.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-merge.py - In Collection: HRNet - Metadata: - Architecture: *id001 - Training Data: - - COCO - - AI Challenger - Name: td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-merge - Results: - - Dataset: COCO - Metrics: - AP: 0.757 - AP@0.5: 0.907 - AP@0.75: 0.829 - AR: 0.809 - AR@0.5: 0.944 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-merge-b05435b9_20221025.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-combine.py - In Collection: HRNet - Metadata: - Architecture: *id001 - Training Data: - - COCO - - AI Challenger - Name: td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-combine - Results: - - Dataset: COCO - Metrics: - AP: 0.756 - AP@0.5: 0.906 - AP@0.75: 0.826 - AR: 0.807 - AR@0.5: 0.943 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-combine-4ce66880_20221026.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_fp16-8xb64-210e_coco-256x192.py - In Collection: HRNet - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_hrnet-w32_fp16-8xb64-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.749 - AP@0.5: 0.907 - AP@0.75: 0.822 - AR: 0.802 - AR@0.5: 0.946 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_fp16-8xb64-210e_coco-256x192-f1e84e3b_20220914.pth +Collections: +- Name: HRNet + Paper: + Title: Deep high-resolution representation learning for human pose estimation + URL: http://openaccess.thecvf.com/content_CVPR_2019/html/Sun_Deep_High-Resolution_Representation_Learning_for_Human_Pose_Estimation_CVPR_2019_paper.html + README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/backbones/hrnet.md +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py + In Collection: HRNet + Metadata: + Architecture: &id001 + - HRNet + Training Data: COCO + Name: td-hm_hrnet-w32_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.746 + AP@0.5: 0.904 + AP@0.75: 0.819 + AR: 0.799 + AR@0.5: 0.942 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192-81c58e40_20220909.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-384x288.py + In Collection: HRNet + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_hrnet-w32_8xb64-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.76 + AP@0.5: 0.906 + AP@0.75: 0.83 + AR: 0.81 + AR@0.5: 0.943 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-384x288-ca5956af_20220909.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192.py + In Collection: HRNet + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_hrnet-w48_8xb32-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.756 + AP@0.5: 0.907 + AP@0.75: 0.825 + AR: 0.806 + AR@0.5: 0.942 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192-0e67c616_20220913.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-384x288.py + In Collection: HRNet + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_hrnet-w48_8xb32-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.767 + AP@0.5: 0.91 + AP@0.75: 0.831 + AR: 0.816 + AR@0.5: 0.946 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-384x288-c161b7de_20220915.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-merge.py + In Collection: HRNet + Metadata: + Architecture: *id001 + Training Data: + - COCO + - AI Challenger + Name: td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-merge + Results: + - Dataset: COCO + Metrics: + AP: 0.757 + AP@0.5: 0.907 + AP@0.75: 0.829 + AR: 0.809 + AR@0.5: 0.944 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-merge-b05435b9_20221025.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-combine.py + In Collection: HRNet + Metadata: + Architecture: *id001 + Training Data: + - COCO + - AI Challenger + Name: td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-combine + Results: + - Dataset: COCO + Metrics: + AP: 0.756 + AP@0.5: 0.906 + AP@0.75: 0.826 + AR: 0.807 + AR@0.5: 0.943 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-combine-4ce66880_20221026.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_fp16-8xb64-210e_coco-256x192.py + In Collection: HRNet + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_hrnet-w32_fp16-8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.749 + AP@0.5: 0.907 + AP@0.75: 0.822 + AR: 0.802 + AR@0.5: 0.946 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_fp16-8xb64-210e_coco-256x192-f1e84e3b_20220914.pth diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_coco_aic.md b/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_coco_aic.md index fd88e25e64..43023b1a9c 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_coco_aic.md +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_coco_aic.md @@ -1,61 +1,61 @@ - - -
-HRNet (CVPR'2019) - -```bibtex -@inproceedings{sun2019deep, - title={Deep high-resolution representation learning for human pose estimation}, - author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={5693--5703}, - year={2019} -} -``` - -
- - - -
-COCO (ECCV'2014) - -```bibtex -@inproceedings{lin2014microsoft, - title={Microsoft coco: Common objects in context}, - author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, - booktitle={European conference on computer vision}, - pages={740--755}, - year={2014}, - organization={Springer} -} -``` - -
- -
-AI Challenger (ArXiv'2017) - -```bibtex -@article{wu2017ai, - title={Ai challenger: A large-scale dataset for going deeper in image understanding}, - author={Wu, Jiahong and Zheng, He and Zhao, Bo and Li, Yixin and Yan, Baoming and Liang, Rui and Wang, Wenjia and Zhou, Shipei and Lin, Guosen and Fu, Yanwei and others}, - journal={arXiv preprint arXiv:1711.06475}, - year={2017} -} -``` - -
- -MMPose supports training model with combined datasets. [coco-aic-merge](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-merge.py) and [coco-aic-combine](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-combine.py) are two examples. - -- [coco-aic-merge](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-merge.py) leverages AIC data with partial keypoints as auxiliary data to train a COCO model -- [coco-aic-combine](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-combine.py) constructs a combined dataset whose keypoints are the union of COCO and AIC keypoints to train a model that predicts keypoints of both datasets. - -Evaluation results on COCO val2017 of models trained with solely COCO dataset and combined dataset as shown below. These models are evaluated with detector having human AP of 56.4 on COCO val2017 dataset. - -| Train Set | Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | -| :------------------------------------------- | :------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------: | :------------------------------------: | -| [coco](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py) | pose_hrnet_w32 | 256x192 | 0.749 | 0.906 | 0.821 | 0.804 | 0.945 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192-81c58e40_20220909.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220909.log) | -| [coco-aic-merge](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-merge.py) | pose_hrnet_w32 | 256x192 | 0.757 | 0.907 | 0.829 | 0.809 | 0.944 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-merge-b05435b9_20221025.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-merge_20221025.log) | -| [coco-aic-combine](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-combine.py) | pose_hrnet_w32 | 256x192 | 0.756 | 0.906 | 0.826 | 0.807 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-combine-4ce66880_20221026.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-combine_20221026.log) | + + +
+HRNet (CVPR'2019) + +```bibtex +@inproceedings{sun2019deep, + title={Deep high-resolution representation learning for human pose estimation}, + author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={5693--5703}, + year={2019} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +
+AI Challenger (ArXiv'2017) + +```bibtex +@article{wu2017ai, + title={Ai challenger: A large-scale dataset for going deeper in image understanding}, + author={Wu, Jiahong and Zheng, He and Zhao, Bo and Li, Yixin and Yan, Baoming and Liang, Rui and Wang, Wenjia and Zhou, Shipei and Lin, Guosen and Fu, Yanwei and others}, + journal={arXiv preprint arXiv:1711.06475}, + year={2017} +} +``` + +
+ +MMPose supports training model with combined datasets. [coco-aic-merge](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-merge.py) and [coco-aic-combine](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-combine.py) are two examples. + +- [coco-aic-merge](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-merge.py) leverages AIC data with partial keypoints as auxiliary data to train a COCO model +- [coco-aic-combine](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-combine.py) constructs a combined dataset whose keypoints are the union of COCO and AIC keypoints to train a model that predicts keypoints of both datasets. + +Evaluation results on COCO val2017 of models trained with solely COCO dataset and combined dataset as shown below. These models are evaluated with detector having human AP of 56.4 on COCO val2017 dataset. + +| Train Set | Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :------------------------------------------- | :------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------: | :------------------------------------: | +| [coco](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py) | pose_hrnet_w32 | 256x192 | 0.749 | 0.906 | 0.821 | 0.804 | 0.945 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192-81c58e40_20220909.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220909.log) | +| [coco-aic-merge](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-merge.py) | pose_hrnet_w32 | 256x192 | 0.757 | 0.907 | 0.829 | 0.809 | 0.944 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-merge-b05435b9_20221025.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-merge_20221025.log) | +| [coco-aic-combine](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-combine.py) | pose_hrnet_w32 | 256x192 | 0.756 | 0.906 | 0.826 | 0.807 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-combine-4ce66880_20221026.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-combine_20221026.log) | diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_dark_coco.md b/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_dark_coco.md index c18382ec68..89fa3718e1 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_dark_coco.md +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_dark_coco.md @@ -1,60 +1,60 @@ - - -
-HRNet (CVPR'2019) - -```bibtex -@inproceedings{sun2019deep, - title={Deep high-resolution representation learning for human pose estimation}, - author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={5693--5703}, - year={2019} -} -``` - -
- - - -
-DarkPose (CVPR'2020) - -```bibtex -@inproceedings{zhang2020distribution, - title={Distribution-aware coordinate representation for human pose estimation}, - author={Zhang, Feng and Zhu, Xiatian and Dai, Hanbin and Ye, Mao and Zhu, Ce}, - booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, - pages={7093--7102}, - year={2020} -} -``` - -
- - - -
-COCO (ECCV'2014) - -```bibtex -@inproceedings{lin2014microsoft, - title={Microsoft coco: Common objects in context}, - author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, - booktitle={European conference on computer vision}, - pages={740--755}, - year={2014}, - organization={Springer} -} -``` - -
- -Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset - -| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | -| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | -| [pose_hrnet_w32_dark](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_dark-8xb64-210e_coco-256x192.py) | 256x192 | 0.757 | 0.907 | 0.825 | 0.807 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_dark-8xb64-210e_coco-256x192-0e00bf12_20220914.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_dark-8xb64-210e_coco-256x192_20220914.log) | -| [pose_hrnet_w32_dark](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_dark-8xb64-210e_coco-384x288.py) | 384x288 | 0.766 | 0.907 | 0.829 | 0.815 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_dark-8xb64-210e_coco-384x288-9bab4c9b_20220917.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_dark-8xb64-210e_coco-384x288_20220917.log) | -| [pose_hrnet_w48_dark](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_dark-8xb32-210e_coco-256x192.py) | 256x192 | 0.764 | 0.907 | 0.831 | 0.814 | 0.942 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_dark-8xb32-210e_coco-256x192-e1ebdd6f_20220913.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_dark-8xb32-210e_coco-256x192_20220913.log) | -| [pose_hrnet_w48_dark](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_dark-8xb32-210e_coco-384x288.py) | 384x288 | 0.772 | 0.911 | 0.833 | 0.821 | 0.948 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_dark-8xb32-210e_coco-384x288-39c3c381_20220916.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_dark-8xb32-210e_coco-384x288_20220916.log) | + + +
+HRNet (CVPR'2019) + +```bibtex +@inproceedings{sun2019deep, + title={Deep high-resolution representation learning for human pose estimation}, + author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={5693--5703}, + year={2019} +} +``` + +
+ + + +
+DarkPose (CVPR'2020) + +```bibtex +@inproceedings{zhang2020distribution, + title={Distribution-aware coordinate representation for human pose estimation}, + author={Zhang, Feng and Zhu, Xiatian and Dai, Hanbin and Ye, Mao and Zhu, Ce}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={7093--7102}, + year={2020} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [pose_hrnet_w32_dark](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_dark-8xb64-210e_coco-256x192.py) | 256x192 | 0.757 | 0.907 | 0.825 | 0.807 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_dark-8xb64-210e_coco-256x192-0e00bf12_20220914.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_dark-8xb64-210e_coco-256x192_20220914.log) | +| [pose_hrnet_w32_dark](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_dark-8xb64-210e_coco-384x288.py) | 384x288 | 0.766 | 0.907 | 0.829 | 0.815 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_dark-8xb64-210e_coco-384x288-9bab4c9b_20220917.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_dark-8xb64-210e_coco-384x288_20220917.log) | +| [pose_hrnet_w48_dark](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_dark-8xb32-210e_coco-256x192.py) | 256x192 | 0.764 | 0.907 | 0.831 | 0.814 | 0.942 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_dark-8xb32-210e_coco-256x192-e1ebdd6f_20220913.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_dark-8xb32-210e_coco-256x192_20220913.log) | +| [pose_hrnet_w48_dark](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_dark-8xb32-210e_coco-384x288.py) | 384x288 | 0.772 | 0.911 | 0.833 | 0.821 | 0.948 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_dark-8xb32-210e_coco-384x288-39c3c381_20220916.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_dark-8xb32-210e_coco-384x288_20220916.log) | diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_dark_coco.yml b/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_dark_coco.yml index 9f14e9ffad..ae3d2df69c 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_dark_coco.yml +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_dark_coco.yml @@ -1,73 +1,73 @@ -Collections: -- Name: DarkPose - Paper: - Title: Distribution-aware coordinate representation for human pose estimation - URL: http://openaccess.thecvf.com/content_CVPR_2020/html/Zhang_Distribution-Aware_Coordinate_Representation_for_Human_Pose_Estimation_CVPR_2020_paper.html - README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/techniques/dark.md -Models: -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_dark-8xb64-210e_coco-256x192.py - In Collection: DarkPose - Metadata: - Architecture: &id001 - - HRNet - - DarkPose - Training Data: COCO - Name: td-hm_hrnet-w32_dark-8xb64-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.757 - AP@0.5: 0.907 - AP@0.75: 0.825 - AR: 0.807 - AR@0.5: 0.943 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_dark-8xb64-210e_coco-256x192-0e00bf12_20220914.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_dark-8xb64-210e_coco-384x288.py - In Collection: DarkPose - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_hrnet-w32_dark-8xb64-210e_coco-384x288 - Results: - - Dataset: COCO - Metrics: - AP: 0.766 - AP@0.5: 0.907 - AP@0.75: 0.829 - AR: 0.815 - AR@0.5: 0.942 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_dark-8xb64-210e_coco-384x288-9bab4c9b_20220917.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_dark-8xb32-210e_coco-256x192.py - In Collection: DarkPose - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_hrnet-w48_dark-8xb32-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.764 - AP@0.5: 0.907 - AP@0.75: 0.831 - AR: 0.814 - AR@0.5: 0.942 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_dark-8xb32-210e_coco-256x192-e1ebdd6f_20220913.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_dark-8xb32-210e_coco-384x288.py - In Collection: DarkPose - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_hrnet-w48_dark-8xb32-210e_coco-384x288 - Results: - - Dataset: COCO - Metrics: - AP: 0.772 - AP@0.5: 0.911 - AP@0.75: 0.833 - AR: 0.821 - AR@0.5: 0.948 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_dark-8xb32-210e_coco-384x288-39c3c381_20220916.pth +Collections: +- Name: DarkPose + Paper: + Title: Distribution-aware coordinate representation for human pose estimation + URL: http://openaccess.thecvf.com/content_CVPR_2020/html/Zhang_Distribution-Aware_Coordinate_Representation_for_Human_Pose_Estimation_CVPR_2020_paper.html + README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/techniques/dark.md +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_dark-8xb64-210e_coco-256x192.py + In Collection: DarkPose + Metadata: + Architecture: &id001 + - HRNet + - DarkPose + Training Data: COCO + Name: td-hm_hrnet-w32_dark-8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.757 + AP@0.5: 0.907 + AP@0.75: 0.825 + AR: 0.807 + AR@0.5: 0.943 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_dark-8xb64-210e_coco-256x192-0e00bf12_20220914.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_dark-8xb64-210e_coco-384x288.py + In Collection: DarkPose + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_hrnet-w32_dark-8xb64-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.766 + AP@0.5: 0.907 + AP@0.75: 0.829 + AR: 0.815 + AR@0.5: 0.942 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_dark-8xb64-210e_coco-384x288-9bab4c9b_20220917.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_dark-8xb32-210e_coco-256x192.py + In Collection: DarkPose + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_hrnet-w48_dark-8xb32-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.764 + AP@0.5: 0.907 + AP@0.75: 0.831 + AR: 0.814 + AR@0.5: 0.942 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_dark-8xb32-210e_coco-256x192-e1ebdd6f_20220913.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_dark-8xb32-210e_coco-384x288.py + In Collection: DarkPose + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_hrnet-w48_dark-8xb32-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.772 + AP@0.5: 0.911 + AP@0.75: 0.833 + AR: 0.821 + AR@0.5: 0.948 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_dark-8xb32-210e_coco-384x288-39c3c381_20220916.pth diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_fp16_coco.md b/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_fp16_coco.md index 3e52624dc7..79aa6115de 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_fp16_coco.md +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_fp16_coco.md @@ -1,56 +1,56 @@ - - -
-HRNet (CVPR'2019) - -```bibtex -@inproceedings{sun2019deep, - title={Deep high-resolution representation learning for human pose estimation}, - author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={5693--5703}, - year={2019} -} -``` - -
- - - -
-FP16 (ArXiv'2017) - -```bibtex -@article{micikevicius2017mixed, - title={Mixed precision training}, - author={Micikevicius, Paulius and Narang, Sharan and Alben, Jonah and Diamos, Gregory and Elsen, Erich and Garcia, David and Ginsburg, Boris and Houston, Michael and Kuchaiev, Oleksii and Venkatesh, Ganesh and others}, - journal={arXiv preprint arXiv:1710.03740}, - year={2017} -} -``` - -
- - - -
-COCO (ECCV'2014) - -```bibtex -@inproceedings{lin2014microsoft, - title={Microsoft coco: Common objects in context}, - author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, - booktitle={European conference on computer vision}, - pages={740--755}, - year={2014}, - organization={Springer} -} -``` - -
- -Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset - -| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | -| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | -| [pose_hrnet_w32_fp16](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_fp16-8xb64-210e_coco-256x192.py) | 256x192 | 0.749 | 0.907 | 0.822 | 0.802 | 0.946 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_fp16-8xb64-210e_coco-256x192-f1e84e3b_20220914.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_fp16-8xb64-210e_coco-256x192_20220914.log) | + + +
+HRNet (CVPR'2019) + +```bibtex +@inproceedings{sun2019deep, + title={Deep high-resolution representation learning for human pose estimation}, + author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={5693--5703}, + year={2019} +} +``` + +
+ + + +
+FP16 (ArXiv'2017) + +```bibtex +@article{micikevicius2017mixed, + title={Mixed precision training}, + author={Micikevicius, Paulius and Narang, Sharan and Alben, Jonah and Diamos, Gregory and Elsen, Erich and Garcia, David and Ginsburg, Boris and Houston, Michael and Kuchaiev, Oleksii and Venkatesh, Ganesh and others}, + journal={arXiv preprint arXiv:1710.03740}, + year={2017} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [pose_hrnet_w32_fp16](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_fp16-8xb64-210e_coco-256x192.py) | 256x192 | 0.749 | 0.907 | 0.822 | 0.802 | 0.946 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_fp16-8xb64-210e_coco-256x192-f1e84e3b_20220914.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_fp16-8xb64-210e_coco-256x192_20220914.log) | diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_udp_coco.md b/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_udp_coco.md index 2b85d85a25..988df0fb00 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_udp_coco.md +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_udp_coco.md @@ -1,63 +1,63 @@ - - -
-HRNet (CVPR'2019) - -```bibtex -@inproceedings{sun2019deep, - title={Deep high-resolution representation learning for human pose estimation}, - author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={5693--5703}, - year={2019} -} -``` - -
- - - -
-UDP (CVPR'2020) - -```bibtex -@InProceedings{Huang_2020_CVPR, - author = {Huang, Junjie and Zhu, Zheng and Guo, Feng and Huang, Guan}, - title = {The Devil Is in the Details: Delving Into Unbiased Data Processing for Human Pose Estimation}, - booktitle = {The IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, - month = {June}, - year = {2020} -} -``` - -
- - - -
-COCO (ECCV'2014) - -```bibtex -@inproceedings{lin2014microsoft, - title={Microsoft coco: Common objects in context}, - author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, - booktitle={European conference on computer vision}, - pages={740--755}, - year={2014}, - organization={Springer} -} -``` - -
- -Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset - -| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | -| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | -| [pose_hrnet_w32_udp](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-8xb64-210e_coco-256x192.py) | 256x192 | 0.762 | 0.907 | 0.829 | 0.810 | 0.942 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-8xb64-210e_coco-256x192-73ede547_20220914.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-8xb64-210e_coco-256x192_20220914.log) | -| [pose_hrnet_w32_udp](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-8xb64-210e_coco-384x288.py) | 384x288 | 0.768 | 0.909 | 0.832 | 0.815 | 0.945 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-8xb64-210e_coco-384x288-9a3f7c85_20220914.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-8xb64-210e_coco-384x288_20220914.log) | -| [pose_hrnet_w48_udp](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_udp-8xb32-210e_coco-256x192.py) | 256x192 | 0.768 | 0.908 | 0.833 | 0.817 | 0.945 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_udp-8xb32-210e_coco-256x192-3feaef8f_20220913.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_udp-8xb32-210e_coco-256x192_20220913.log) | -| [pose_hrnet_w48_udp](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_udp-8xb32-210e_coco-384x288.py) | 384x288 | 0.773 | 0.911 | 0.836 | 0.821 | 0.946 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_udp-8xb32-210e_coco-384x288-70d7ab01_20220913.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_udp-8xb32-210e_coco-384x288_20220913.log) | -| [pose_hrnet_w32_udp_regress](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-regress-8xb64-210e_coco-256x192.py) | 256x192 | 0.759 | 0.907 | 0.827 | 0.813 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-regress-8xb64-210e_coco-256x192-9c0b77b4_20220926.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-regress-8xb64-210e_coco-256x192_20220226.log) | - -Note that, UDP also adopts the unbiased encoding/decoding algorithm of [DARK](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/techniques.html#darkpose-cvpr-2020). + + +
+HRNet (CVPR'2019) + +```bibtex +@inproceedings{sun2019deep, + title={Deep high-resolution representation learning for human pose estimation}, + author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={5693--5703}, + year={2019} +} +``` + +
+ + + +
+UDP (CVPR'2020) + +```bibtex +@InProceedings{Huang_2020_CVPR, + author = {Huang, Junjie and Zhu, Zheng and Guo, Feng and Huang, Guan}, + title = {The Devil Is in the Details: Delving Into Unbiased Data Processing for Human Pose Estimation}, + booktitle = {The IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, + month = {June}, + year = {2020} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [pose_hrnet_w32_udp](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-8xb64-210e_coco-256x192.py) | 256x192 | 0.762 | 0.907 | 0.829 | 0.810 | 0.942 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-8xb64-210e_coco-256x192-73ede547_20220914.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-8xb64-210e_coco-256x192_20220914.log) | +| [pose_hrnet_w32_udp](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-8xb64-210e_coco-384x288.py) | 384x288 | 0.768 | 0.909 | 0.832 | 0.815 | 0.945 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-8xb64-210e_coco-384x288-9a3f7c85_20220914.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-8xb64-210e_coco-384x288_20220914.log) | +| [pose_hrnet_w48_udp](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_udp-8xb32-210e_coco-256x192.py) | 256x192 | 0.768 | 0.908 | 0.833 | 0.817 | 0.945 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_udp-8xb32-210e_coco-256x192-3feaef8f_20220913.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_udp-8xb32-210e_coco-256x192_20220913.log) | +| [pose_hrnet_w48_udp](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_udp-8xb32-210e_coco-384x288.py) | 384x288 | 0.773 | 0.911 | 0.836 | 0.821 | 0.946 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_udp-8xb32-210e_coco-384x288-70d7ab01_20220913.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_udp-8xb32-210e_coco-384x288_20220913.log) | +| [pose_hrnet_w32_udp_regress](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-regress-8xb64-210e_coco-256x192.py) | 256x192 | 0.759 | 0.907 | 0.827 | 0.813 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-regress-8xb64-210e_coco-256x192-9c0b77b4_20220926.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-regress-8xb64-210e_coco-256x192_20220226.log) | + +Note that, UDP also adopts the unbiased encoding/decoding algorithm of [DARK](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/techniques.html#darkpose-cvpr-2020). diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_udp_coco.yml b/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_udp_coco.yml index 01cba761ec..3971f52f68 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_udp_coco.yml +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_udp_coco.yml @@ -1,90 +1,90 @@ -Collections: -- Name: UDP - Paper: - Title: 'The Devil Is in the Details: Delving Into Unbiased Data Processing for - Human Pose Estimation' - URL: http://openaccess.thecvf.com/content_CVPR_2020/html/Huang_The_Devil_Is_in_the_Details_Delving_Into_Unbiased_Data_CVPR_2020_paper.html - README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/techniques/udp.md -Models: -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-8xb64-210e_coco-256x192.py - In Collection: UDP - Metadata: - Architecture: &id001 - - HRNet - - UDP - Training Data: COCO - Name: td-hm_hrnet-w32_udp-8xb64-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.762 - AP@0.5: 0.907 - AP@0.75: 0.829 - AR: 0.810 - AR@0.5: 0.942 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-8xb64-210e_coco-256x192-73ede547_20220914.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-8xb64-210e_coco-384x288.py - In Collection: UDP - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_hrnet-w32_udp-8xb64-210e_coco-384x288 - Results: - - Dataset: COCO - Metrics: - AP: 0.768 - AP@0.5: 0.909 - AP@0.75: 0.832 - AR: 0.815 - AR@0.5: 0.945 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-8xb64-210e_coco-384x288-9a3f7c85_20220914.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_udp-8xb32-210e_coco-256x192.py - In Collection: UDP - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_hrnet-w48_udp-8xb32-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.768 - AP@0.5: 0.908 - AP@0.75: 0.833 - AR: 0.817 - AR@0.5: 0.945 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_udp-8xb32-210e_coco-256x192-3feaef8f_20220913.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_udp-8xb32-210e_coco-384x288.py - In Collection: UDP - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_hrnet-w48_udp-8xb32-210e_coco-384x288 - Results: - - Dataset: COCO - Metrics: - AP: 0.773 - AP@0.5: 0.911 - AP@0.75: 0.836 - AR: 0.821 - AR@0.5: 0.946 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_udp-8xb32-210e_coco-384x288-70d7ab01_20220913.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-regress-8xb64-210e_coco-256x192.py - In Collection: UDP - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_hrnet-w32_udp-regress-8xb64-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.759 - AP@0.5: 0.907 - AP@0.75: 0.827 - AR: 0.813 - AR@0.5: 0.943 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-regress-8xb64-210e_coco-256x192-9c0b77b4_20220926.pth +Collections: +- Name: UDP + Paper: + Title: 'The Devil Is in the Details: Delving Into Unbiased Data Processing for + Human Pose Estimation' + URL: http://openaccess.thecvf.com/content_CVPR_2020/html/Huang_The_Devil_Is_in_the_Details_Delving_Into_Unbiased_Data_CVPR_2020_paper.html + README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/techniques/udp.md +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-8xb64-210e_coco-256x192.py + In Collection: UDP + Metadata: + Architecture: &id001 + - HRNet + - UDP + Training Data: COCO + Name: td-hm_hrnet-w32_udp-8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.762 + AP@0.5: 0.907 + AP@0.75: 0.829 + AR: 0.810 + AR@0.5: 0.942 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-8xb64-210e_coco-256x192-73ede547_20220914.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-8xb64-210e_coco-384x288.py + In Collection: UDP + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_hrnet-w32_udp-8xb64-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.768 + AP@0.5: 0.909 + AP@0.75: 0.832 + AR: 0.815 + AR@0.5: 0.945 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-8xb64-210e_coco-384x288-9a3f7c85_20220914.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_udp-8xb32-210e_coco-256x192.py + In Collection: UDP + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_hrnet-w48_udp-8xb32-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.768 + AP@0.5: 0.908 + AP@0.75: 0.833 + AR: 0.817 + AR@0.5: 0.945 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_udp-8xb32-210e_coco-256x192-3feaef8f_20220913.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_udp-8xb32-210e_coco-384x288.py + In Collection: UDP + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_hrnet-w48_udp-8xb32-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.773 + AP@0.5: 0.911 + AP@0.75: 0.836 + AR: 0.821 + AR@0.5: 0.946 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_udp-8xb32-210e_coco-384x288-70d7ab01_20220913.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-regress-8xb64-210e_coco-256x192.py + In Collection: UDP + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_hrnet-w32_udp-regress-8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.759 + AP@0.5: 0.907 + AP@0.75: 0.827 + AR: 0.813 + AR@0.5: 0.943 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-regress-8xb64-210e_coco-256x192-9c0b77b4_20220926.pth diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/litehrnet_coco.md b/configs/body_2d_keypoint/topdown_heatmap/coco/litehrnet_coco.md index 28f608d54a..2bdb62d4cd 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/litehrnet_coco.md +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/litehrnet_coco.md @@ -1,42 +1,42 @@ - - -
-LiteHRNet (CVPR'2021) - -```bibtex -@inproceedings{Yulitehrnet21, - title={Lite-HRNet: A Lightweight High-Resolution Network}, - author={Yu, Changqian and Xiao, Bin and Gao, Changxin and Yuan, Lu and Zhang, Lei and Sang, Nong and Wang, Jingdong}, - booktitle={CVPR}, - year={2021} -} -``` - -
- - - -
-COCO (ECCV'2014) - -```bibtex -@inproceedings{lin2014microsoft, - title={Microsoft coco: Common objects in context}, - author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, - booktitle={European conference on computer vision}, - pages={740--755}, - year={2014}, - organization={Springer} -} -``` - -
- -Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset - -| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | -| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | -| [LiteHRNet-18](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_litehrnet-18_8xb64-210e_coco-256x192.py) | 256x192 | 0.642 | 0.867 | 0.719 | 0.705 | 0.911 | [ckpt](https://download.openmmlab.com/mmpose/top_down/litehrnet/litehrnet18_coco_256x192-6bace359_20211230.pth) | [log](https://download.openmmlab.com/mmpose/top_down/litehrnet/litehrnet18_coco_256x192_20211230.log.json) | -| [LiteHRNet-18](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_litehrnet-18_8xb32-210e_coco-384x288.py) | 384x288 | 0.676 | 0.876 | 0.746 | 0.735 | 0.919 | [ckpt](https://download.openmmlab.com/mmpose/top_down/litehrnet/litehrnet18_coco_384x288-8d4dac48_20211230.pth) | [log](https://download.openmmlab.com/mmpose/top_down/litehrnet/litehrnet18_coco_384x288_20211230.log.json) | -| [LiteHRNet-30](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_litehrnet-30_8xb64-210e_coco-256x192.py) | 256x192 | 0.676 | 0.880 | 0.756 | 0.736 | 0.922 | [ckpt](https://download.openmmlab.com/mmpose/top_down/litehrnet/litehrnet30_coco_256x192-4176555b_20210626.pth) | [log](https://download.openmmlab.com/mmpose/top_down/litehrnet/litehrnet30_coco_256x192_20210626.log.json) | -| [LiteHRNet-30](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_litehrnet-30_8xb32-210e_coco-384x288.py) | 384x288 | 0.700 | 0.883 | 0.776 | 0.758 | 0.926 | [ckpt](https://download.openmmlab.com/mmpose/top_down/litehrnet/litehrnet30_coco_384x288-a3aef5c4_20210626.pth) | [log](https://download.openmmlab.com/mmpose/top_down/litehrnet/litehrnet30_coco_384x288_20210626.log.json) | + + +
+LiteHRNet (CVPR'2021) + +```bibtex +@inproceedings{Yulitehrnet21, + title={Lite-HRNet: A Lightweight High-Resolution Network}, + author={Yu, Changqian and Xiao, Bin and Gao, Changxin and Yuan, Lu and Zhang, Lei and Sang, Nong and Wang, Jingdong}, + booktitle={CVPR}, + year={2021} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [LiteHRNet-18](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_litehrnet-18_8xb64-210e_coco-256x192.py) | 256x192 | 0.642 | 0.867 | 0.719 | 0.705 | 0.911 | [ckpt](https://download.openmmlab.com/mmpose/top_down/litehrnet/litehrnet18_coco_256x192-6bace359_20211230.pth) | [log](https://download.openmmlab.com/mmpose/top_down/litehrnet/litehrnet18_coco_256x192_20211230.log.json) | +| [LiteHRNet-18](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_litehrnet-18_8xb32-210e_coco-384x288.py) | 384x288 | 0.676 | 0.876 | 0.746 | 0.735 | 0.919 | [ckpt](https://download.openmmlab.com/mmpose/top_down/litehrnet/litehrnet18_coco_384x288-8d4dac48_20211230.pth) | [log](https://download.openmmlab.com/mmpose/top_down/litehrnet/litehrnet18_coco_384x288_20211230.log.json) | +| [LiteHRNet-30](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_litehrnet-30_8xb64-210e_coco-256x192.py) | 256x192 | 0.676 | 0.880 | 0.756 | 0.736 | 0.922 | [ckpt](https://download.openmmlab.com/mmpose/top_down/litehrnet/litehrnet30_coco_256x192-4176555b_20210626.pth) | [log](https://download.openmmlab.com/mmpose/top_down/litehrnet/litehrnet30_coco_256x192_20210626.log.json) | +| [LiteHRNet-30](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_litehrnet-30_8xb32-210e_coco-384x288.py) | 384x288 | 0.700 | 0.883 | 0.776 | 0.758 | 0.926 | [ckpt](https://download.openmmlab.com/mmpose/top_down/litehrnet/litehrnet30_coco_384x288-a3aef5c4_20210626.pth) | [log](https://download.openmmlab.com/mmpose/top_down/litehrnet/litehrnet30_coco_384x288_20210626.log.json) | diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/litehrnet_coco.yml b/configs/body_2d_keypoint/topdown_heatmap/coco/litehrnet_coco.yml index f923605872..11ecf9219d 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/litehrnet_coco.yml +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/litehrnet_coco.yml @@ -1,72 +1,72 @@ -Collections: -- Name: LiteHRNet - Paper: - Title: 'Lite-HRNet: A Lightweight High-Resolution Network' - URL: https://arxiv.org/abs/2104.06403 - README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/backbones/litehrnet.md -Models: -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_litehrnet-18_8xb64-210e_coco-256x192.py - In Collection: LiteHRNet - Metadata: - Architecture: &id001 - - LiteHRNet - Training Data: COCO - Name: td-hm_litehrnet-18_8xb64-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.642 - AP@0.5: 0.867 - AP@0.75: 0.719 - AR: 0.705 - AR@0.5: 0.911 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/litehrnet/litehrnet18_coco_256x192-6bace359_20211230.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_litehrnet-18_8xb32-210e_coco-384x288.py - In Collection: LiteHRNet - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_litehrnet-18_8xb32-210e_coco-384x288 - Results: - - Dataset: COCO - Metrics: - AP: 0.676 - AP@0.5: 0.876 - AP@0.75: 0.746 - AR: 0.735 - AR@0.5: 0.919 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/litehrnet/litehrnet18_coco_384x288-8d4dac48_20211230.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_litehrnet-30_8xb64-210e_coco-256x192.py - In Collection: LiteHRNet - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_litehrnet-30_8xb64-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.676 - AP@0.5: 0.88 - AP@0.75: 0.756 - AR: 0.736 - AR@0.5: 0.922 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/litehrnet/litehrnet30_coco_256x192-4176555b_20210626.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_litehrnet-30_8xb32-210e_coco-384x288.py - In Collection: LiteHRNet - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_litehrnet-30_8xb32-210e_coco-384x288 - Results: - - Dataset: COCO - Metrics: - AP: 0.7 - AP@0.5: 0.883 - AP@0.75: 0.776 - AR: 0.758 - AR@0.5: 0.926 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/litehrnet/litehrnet30_coco_384x288-a3aef5c4_20210626.pth +Collections: +- Name: LiteHRNet + Paper: + Title: 'Lite-HRNet: A Lightweight High-Resolution Network' + URL: https://arxiv.org/abs/2104.06403 + README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/backbones/litehrnet.md +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_litehrnet-18_8xb64-210e_coco-256x192.py + In Collection: LiteHRNet + Metadata: + Architecture: &id001 + - LiteHRNet + Training Data: COCO + Name: td-hm_litehrnet-18_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.642 + AP@0.5: 0.867 + AP@0.75: 0.719 + AR: 0.705 + AR@0.5: 0.911 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/litehrnet/litehrnet18_coco_256x192-6bace359_20211230.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_litehrnet-18_8xb32-210e_coco-384x288.py + In Collection: LiteHRNet + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_litehrnet-18_8xb32-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.676 + AP@0.5: 0.876 + AP@0.75: 0.746 + AR: 0.735 + AR@0.5: 0.919 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/litehrnet/litehrnet18_coco_384x288-8d4dac48_20211230.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_litehrnet-30_8xb64-210e_coco-256x192.py + In Collection: LiteHRNet + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_litehrnet-30_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.676 + AP@0.5: 0.88 + AP@0.75: 0.756 + AR: 0.736 + AR@0.5: 0.922 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/litehrnet/litehrnet30_coco_256x192-4176555b_20210626.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_litehrnet-30_8xb32-210e_coco-384x288.py + In Collection: LiteHRNet + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_litehrnet-30_8xb32-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.7 + AP@0.5: 0.883 + AP@0.75: 0.776 + AR: 0.758 + AR@0.5: 0.926 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/litehrnet/litehrnet30_coco_384x288-a3aef5c4_20210626.pth diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/mobilenetv2_coco.md b/configs/body_2d_keypoint/topdown_heatmap/coco/mobilenetv2_coco.md index aed9fd0246..7df4a4209c 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/mobilenetv2_coco.md +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/mobilenetv2_coco.md @@ -1,41 +1,41 @@ - - -
-MobilenetV2 (CVPR'2018) - -```bibtex -@inproceedings{sandler2018mobilenetv2, - title={Mobilenetv2: Inverted residuals and linear bottlenecks}, - author={Sandler, Mark and Howard, Andrew and Zhu, Menglong and Zhmoginov, Andrey and Chen, Liang-Chieh}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={4510--4520}, - year={2018} -} -``` - -
- - - -
-COCO (ECCV'2014) - -```bibtex -@inproceedings{lin2014microsoft, - title={Microsoft coco: Common objects in context}, - author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, - booktitle={European conference on computer vision}, - pages={740--755}, - year={2014}, - organization={Springer} -} -``` - -
- -Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset - -| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | -| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | -| [pose_mobilenetv2](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_mobilenetv2_8xb64-210e_coco-256x192.py) | 256x192 | 0.648 | 0.874 | 0.725 | 0.709 | 0.918 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_mobilenetv2_8xb64-210e_coco-256x192-55a04c35_20221016.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_mobilenetv2_8xb64-210e_coco-256x192_20221016.log) | -| [pose_mobilenetv2](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_mobilenetv2_8xb64-210e_coco-384x288.py) | 384x288 | 0.677 | 0.882 | 0.746 | 0.734 | 0.920 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_mobilenetv2_8xb64-210e_coco-384x288-d3ab1457_20221013.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_mobilenetv2_8xb64-210e_coco-384x288_20221013.log) | + + +
+MobilenetV2 (CVPR'2018) + +```bibtex +@inproceedings{sandler2018mobilenetv2, + title={Mobilenetv2: Inverted residuals and linear bottlenecks}, + author={Sandler, Mark and Howard, Andrew and Zhu, Menglong and Zhmoginov, Andrey and Chen, Liang-Chieh}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={4510--4520}, + year={2018} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [pose_mobilenetv2](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_mobilenetv2_8xb64-210e_coco-256x192.py) | 256x192 | 0.648 | 0.874 | 0.725 | 0.709 | 0.918 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_mobilenetv2_8xb64-210e_coco-256x192-55a04c35_20221016.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_mobilenetv2_8xb64-210e_coco-256x192_20221016.log) | +| [pose_mobilenetv2](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_mobilenetv2_8xb64-210e_coco-384x288.py) | 384x288 | 0.677 | 0.882 | 0.746 | 0.734 | 0.920 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_mobilenetv2_8xb64-210e_coco-384x288-d3ab1457_20221013.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_mobilenetv2_8xb64-210e_coco-384x288_20221013.log) | diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/mobilenetv2_coco.yml b/configs/body_2d_keypoint/topdown_heatmap/coco/mobilenetv2_coco.yml index c7993fe516..644a6b6171 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/mobilenetv2_coco.yml +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/mobilenetv2_coco.yml @@ -1,35 +1,35 @@ -Models: -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_mobilenetv2_8xb64-210e_coco-256x192.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: &id001 - - SimpleBaseline2D - - MobilenetV2 - Training Data: COCO - Name: td-hm_mobilenetv2_8xb64-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.648 - AP@0.5: 0.874 - AP@0.75: 0.725 - AR: 0.709 - AR@0.5: 0.918 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_mobilenetv2_8xb64-210e_coco-256x192-55a04c35_20221016.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_mobilenetv2_8xb64-210e_coco-384x288.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_mobilenetv2_8xb64-210e_coco-384x288 - Results: - - Dataset: COCO - Metrics: - AP: 0.677 - AP@0.5: 0.882 - AP@0.75: 0.746 - AR: 0.734 - AR@0.5: 0.920 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_mobilenetv2_8xb64-210e_coco-384x288-d3ab1457_20221013.pth +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_mobilenetv2_8xb64-210e_coco-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: &id001 + - SimpleBaseline2D + - MobilenetV2 + Training Data: COCO + Name: td-hm_mobilenetv2_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.648 + AP@0.5: 0.874 + AP@0.75: 0.725 + AR: 0.709 + AR@0.5: 0.918 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_mobilenetv2_8xb64-210e_coco-256x192-55a04c35_20221016.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_mobilenetv2_8xb64-210e_coco-384x288.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_mobilenetv2_8xb64-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.677 + AP@0.5: 0.882 + AP@0.75: 0.746 + AR: 0.734 + AR@0.5: 0.920 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_mobilenetv2_8xb64-210e_coco-384x288-d3ab1457_20221013.pth diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/mspn_coco.md b/configs/body_2d_keypoint/topdown_heatmap/coco/mspn_coco.md index d86bc2c2ad..a67cd63e96 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/mspn_coco.md +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/mspn_coco.md @@ -1,42 +1,42 @@ - - -
-MSPN (ArXiv'2019) - -```bibtex -@article{li2019rethinking, - title={Rethinking on Multi-Stage Networks for Human Pose Estimation}, - author={Li, Wenbo and Wang, Zhicheng and Yin, Binyi and Peng, Qixiang and Du, Yuming and Xiao, Tianzi and Yu, Gang and Lu, Hongtao and Wei, Yichen and Sun, Jian}, - journal={arXiv preprint arXiv:1901.00148}, - year={2019} -} -``` - -
- - - -
-COCO (ECCV'2014) - -```bibtex -@inproceedings{lin2014microsoft, - title={Microsoft coco: Common objects in context}, - author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, - booktitle={European conference on computer vision}, - pages={740--755}, - year={2014}, - organization={Springer} -} -``` - -
- -Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset - -| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | -| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | -| [mspn_50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_mspn50_8xb32-210e_coco-256x192.py) | 256x192 | 0.723 | 0.895 | 0.794 | 0.788 | 0.934 | [ckpt](https://download.openmmlab.com/mmpose/top_down/mspn/mspn50_coco_256x192-8fbfb5d0_20201123.pth) | [log](https://download.openmmlab.com/mmpose/top_down/mspn/mspn50_coco_256x192_20201123.log.json) | -| [2xmspn_50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_2xmspn50_8xb32-210e_coco-256x192.py) | 256x192 | 0.754 | 0.903 | 0.826 | 0.816 | 0.942 | [ckpt](https://download.openmmlab.com/mmpose/top_down/mspn/2xmspn50_coco_256x192-c8765a5c_20201123.pth) | [log](https://download.openmmlab.com/mmpose/top_down/mspn/2xmspn50_coco_256x192_20201123.log.json) | -| [3xmspn_50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_3xmspn50_8xb32-210e_coco-256x192.py) | 256x192 | 0.758 | 0.904 | 0.830 | 0.821 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/top_down/mspn/3xmspn50_coco_256x192-e348f18e_20201123.pth) | [log](https://download.openmmlab.com/mmpose/top_down/mspn/3xmspn50_coco_256x192_20201123.log.json) | -| [4xmspn_50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_4xmspn50_8xb32-210e_coco-256x192.py) | 256x192 | 0.765 | 0.906 | 0.835 | 0.826 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/top_down/mspn/4xmspn50_coco_256x192-7b837afb_20201123.pth) | [log](https://download.openmmlab.com/mmpose/top_down/mspn/4xmspn50_coco_256x192_20201123.log.json) | + + +
+MSPN (ArXiv'2019) + +```bibtex +@article{li2019rethinking, + title={Rethinking on Multi-Stage Networks for Human Pose Estimation}, + author={Li, Wenbo and Wang, Zhicheng and Yin, Binyi and Peng, Qixiang and Du, Yuming and Xiao, Tianzi and Yu, Gang and Lu, Hongtao and Wei, Yichen and Sun, Jian}, + journal={arXiv preprint arXiv:1901.00148}, + year={2019} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [mspn_50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_mspn50_8xb32-210e_coco-256x192.py) | 256x192 | 0.723 | 0.895 | 0.794 | 0.788 | 0.934 | [ckpt](https://download.openmmlab.com/mmpose/top_down/mspn/mspn50_coco_256x192-8fbfb5d0_20201123.pth) | [log](https://download.openmmlab.com/mmpose/top_down/mspn/mspn50_coco_256x192_20201123.log.json) | +| [2xmspn_50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_2xmspn50_8xb32-210e_coco-256x192.py) | 256x192 | 0.754 | 0.903 | 0.826 | 0.816 | 0.942 | [ckpt](https://download.openmmlab.com/mmpose/top_down/mspn/2xmspn50_coco_256x192-c8765a5c_20201123.pth) | [log](https://download.openmmlab.com/mmpose/top_down/mspn/2xmspn50_coco_256x192_20201123.log.json) | +| [3xmspn_50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_3xmspn50_8xb32-210e_coco-256x192.py) | 256x192 | 0.758 | 0.904 | 0.830 | 0.821 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/top_down/mspn/3xmspn50_coco_256x192-e348f18e_20201123.pth) | [log](https://download.openmmlab.com/mmpose/top_down/mspn/3xmspn50_coco_256x192_20201123.log.json) | +| [4xmspn_50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_4xmspn50_8xb32-210e_coco-256x192.py) | 256x192 | 0.765 | 0.906 | 0.835 | 0.826 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/top_down/mspn/4xmspn50_coco_256x192-7b837afb_20201123.pth) | [log](https://download.openmmlab.com/mmpose/top_down/mspn/4xmspn50_coco_256x192_20201123.log.json) | diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/mspn_coco.yml b/configs/body_2d_keypoint/topdown_heatmap/coco/mspn_coco.yml index 77eca18b6f..1165bbc62f 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/mspn_coco.yml +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/mspn_coco.yml @@ -1,72 +1,72 @@ -Collections: -- Name: MSPN - Paper: - Title: Rethinking on Multi-Stage Networks for Human Pose Estimation - URL: https://arxiv.org/abs/1901.00148 - README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/backbones/mspn.md -Models: -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_mspn50_8xb32-210e_coco-256x192.py - In Collection: MSPN - Metadata: - Architecture: &id001 - - MSPN - Training Data: COCO - Name: td-hm_mspn50_8xb32-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.723 - AP@0.5: 0.895 - AP@0.75: 0.794 - AR: 0.788 - AR@0.5: 0.934 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/mspn/mspn50_coco_256x192-8fbfb5d0_20201123.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_2xmspn50_8xb32-210e_coco-256x192.py - In Collection: MSPN - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_2xmspn50_8xb32-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.754 - AP@0.5: 0.903 - AP@0.75: 0.826 - AR: 0.816 - AR@0.5: 0.942 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/mspn/2xmspn50_coco_256x192-c8765a5c_20201123.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_3xmspn50_8xb32-210e_coco-256x192.py - In Collection: MSPN - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_3xmspn50_8xb32-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.758 - AP@0.5: 0.904 - AP@0.75: 0.83 - AR: 0.821 - AR@0.5: 0.943 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/mspn/3xmspn50_coco_256x192-e348f18e_20201123.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_4xmspn50_8xb32-210e_coco-256x192.py - In Collection: MSPN - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_4xmspn50_8xb32-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.765 - AP@0.5: 0.906 - AP@0.75: 0.835 - AR: 0.826 - AR@0.5: 0.943 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/mspn/4xmspn50_coco_256x192-7b837afb_20201123.pth +Collections: +- Name: MSPN + Paper: + Title: Rethinking on Multi-Stage Networks for Human Pose Estimation + URL: https://arxiv.org/abs/1901.00148 + README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/backbones/mspn.md +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_mspn50_8xb32-210e_coco-256x192.py + In Collection: MSPN + Metadata: + Architecture: &id001 + - MSPN + Training Data: COCO + Name: td-hm_mspn50_8xb32-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.723 + AP@0.5: 0.895 + AP@0.75: 0.794 + AR: 0.788 + AR@0.5: 0.934 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/mspn/mspn50_coco_256x192-8fbfb5d0_20201123.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_2xmspn50_8xb32-210e_coco-256x192.py + In Collection: MSPN + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_2xmspn50_8xb32-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.754 + AP@0.5: 0.903 + AP@0.75: 0.826 + AR: 0.816 + AR@0.5: 0.942 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/mspn/2xmspn50_coco_256x192-c8765a5c_20201123.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_3xmspn50_8xb32-210e_coco-256x192.py + In Collection: MSPN + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_3xmspn50_8xb32-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.758 + AP@0.5: 0.904 + AP@0.75: 0.83 + AR: 0.821 + AR@0.5: 0.943 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/mspn/3xmspn50_coco_256x192-e348f18e_20201123.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_4xmspn50_8xb32-210e_coco-256x192.py + In Collection: MSPN + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_4xmspn50_8xb32-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.765 + AP@0.5: 0.906 + AP@0.75: 0.835 + AR: 0.826 + AR@0.5: 0.943 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/mspn/4xmspn50_coco_256x192-7b837afb_20201123.pth diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/pvt_coco.md b/configs/body_2d_keypoint/topdown_heatmap/coco/pvt_coco.md index 8a375a4c20..74a189d772 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/pvt_coco.md +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/pvt_coco.md @@ -1,57 +1,57 @@ - - -
-PVT (ICCV'2021) - -```bibtex -@inproceedings{wang2021pyramid, - title={Pyramid vision transformer: A versatile backbone for dense prediction without convolutions}, - author={Wang, Wenhai and Xie, Enze and Li, Xiang and Fan, Deng-Ping and Song, Kaitao and Liang, Ding and Lu, Tong and Luo, Ping and Shao, Ling}, - booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision}, - pages={568--578}, - year={2021} -} -``` - -
- -
-PVTV2 (CVMJ'2022) - -```bibtex -@article{wang2022pvt, - title={PVT v2: Improved baselines with Pyramid Vision Transformer}, - author={Wang, Wenhai and Xie, Enze and Li, Xiang and Fan, Deng-Ping and Song, Kaitao and Liang, Ding and Lu, Tong and Luo, Ping and Shao, Ling}, - journal={Computational Visual Media}, - pages={1--10}, - year={2022}, - publisher={Springer} -} -``` - -
- - - -
-COCO (ECCV'2014) - -```bibtex -@inproceedings{lin2014microsoft, - title={Microsoft coco: Common objects in context}, - author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, - booktitle={European conference on computer vision}, - pages={740--755}, - year={2014}, - organization={Springer} -} -``` - -
- -Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset - -| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | -| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | -| [pose_pvt-s](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_pvt-s_8xb64-210e_coco-256x192.py) | 256x192 | 0.714 | 0.896 | 0.794 | 0.773 | 0.936 | [ckpt](https://download.openmmlab.com/mmpose/top_down/pvt/pvt_small_coco_256x192-4324a49d_20220501.pth) | [log](https://download.openmmlab.com/mmpose/top_down/pvt/pvt_small_coco_256x192_20220501.log.json) | -| [pose_pvtv2-b2](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_pvtv2-b2_8xb64-210e_coco-256x192.py) | 256x192 | 0.737 | 0.905 | 0.812 | 0.791 | 0.942 | [ckpt](https://download.openmmlab.com/mmpose/top_down/pvt/pvtv2_b2_coco_256x192-b4212737_20220501.pth) | [log](https://download.openmmlab.com/mmpose/top_down/pvt/pvtv2_b2_coco_256x192_20220501.log.json) | + + +
+PVT (ICCV'2021) + +```bibtex +@inproceedings{wang2021pyramid, + title={Pyramid vision transformer: A versatile backbone for dense prediction without convolutions}, + author={Wang, Wenhai and Xie, Enze and Li, Xiang and Fan, Deng-Ping and Song, Kaitao and Liang, Ding and Lu, Tong and Luo, Ping and Shao, Ling}, + booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision}, + pages={568--578}, + year={2021} +} +``` + +
+ +
+PVTV2 (CVMJ'2022) + +```bibtex +@article{wang2022pvt, + title={PVT v2: Improved baselines with Pyramid Vision Transformer}, + author={Wang, Wenhai and Xie, Enze and Li, Xiang and Fan, Deng-Ping and Song, Kaitao and Liang, Ding and Lu, Tong and Luo, Ping and Shao, Ling}, + journal={Computational Visual Media}, + pages={1--10}, + year={2022}, + publisher={Springer} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [pose_pvt-s](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_pvt-s_8xb64-210e_coco-256x192.py) | 256x192 | 0.714 | 0.896 | 0.794 | 0.773 | 0.936 | [ckpt](https://download.openmmlab.com/mmpose/top_down/pvt/pvt_small_coco_256x192-4324a49d_20220501.pth) | [log](https://download.openmmlab.com/mmpose/top_down/pvt/pvt_small_coco_256x192_20220501.log.json) | +| [pose_pvtv2-b2](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_pvtv2-b2_8xb64-210e_coco-256x192.py) | 256x192 | 0.737 | 0.905 | 0.812 | 0.791 | 0.942 | [ckpt](https://download.openmmlab.com/mmpose/top_down/pvt/pvtv2_b2_coco_256x192-b4212737_20220501.pth) | [log](https://download.openmmlab.com/mmpose/top_down/pvt/pvtv2_b2_coco_256x192_20220501.log.json) | diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/pvt_coco.yml b/configs/body_2d_keypoint/topdown_heatmap/coco/pvt_coco.yml index 2b4303d704..202ec81b93 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/pvt_coco.yml +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/pvt_coco.yml @@ -1,35 +1,35 @@ -Models: -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_pvt-s_8xb64-210e_coco-256x192.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: &id001 - - SimpleBaseline2D - - PVT - Training Data: COCO - Name: td-hm_pvt-s_8xb64-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.714 - AP@0.5: 0.896 - AP@0.75: 0.794 - AR: 0.773 - AR@0.5: 0.936 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/pvt/pvt_small_coco_256x192-4324a49d_20220501.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_pvtv2-b2_8xb64-210e_coco-256x192.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_pvtv2-b2_8xb64-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.737 - AP@0.5: 0.905 - AP@0.75: 0.812 - AR: 0.791 - AR@0.5: 0.942 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/pvt/pvtv2_b2_coco_256x192-b4212737_20220501.pth +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_pvt-s_8xb64-210e_coco-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: &id001 + - SimpleBaseline2D + - PVT + Training Data: COCO + Name: td-hm_pvt-s_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.714 + AP@0.5: 0.896 + AP@0.75: 0.794 + AR: 0.773 + AR@0.5: 0.936 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/pvt/pvt_small_coco_256x192-4324a49d_20220501.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_pvtv2-b2_8xb64-210e_coco-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_pvtv2-b2_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.737 + AP@0.5: 0.905 + AP@0.75: 0.812 + AR: 0.791 + AR@0.5: 0.942 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/pvt/pvtv2_b2_coco_256x192-b4212737_20220501.pth diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/resnest_coco.md b/configs/body_2d_keypoint/topdown_heatmap/coco/resnest_coco.md index cb7ada4d6b..8bee32c56e 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/resnest_coco.md +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/resnest_coco.md @@ -1,46 +1,46 @@ - - -
-ResNeSt (ArXiv'2020) - -```bibtex -@article{zhang2020resnest, - title={ResNeSt: Split-Attention Networks}, - author={Zhang, Hang and Wu, Chongruo and Zhang, Zhongyue and Zhu, Yi and Zhang, Zhi and Lin, Haibin and Sun, Yue and He, Tong and Muller, Jonas and Manmatha, R. and Li, Mu and Smola, Alexander}, - journal={arXiv preprint arXiv:2004.08955}, - year={2020} -} -``` - -
- - - -
-COCO (ECCV'2014) - -```bibtex -@inproceedings{lin2014microsoft, - title={Microsoft coco: Common objects in context}, - author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, - booktitle={European conference on computer vision}, - pages={740--755}, - year={2014}, - organization={Springer} -} -``` - -
- -Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset - -| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | -| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | -| [pose_resnest_50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest50_8xb64-210e_coco-256x192.py) | 256x192 | 0.720 | 0.899 | 0.800 | 0.775 | 0.939 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnest/resnest50_coco_256x192-6e65eece_20210320.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnest/resnest50_coco_256x192_20210320.log.json) | -| [pose_resnest_50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest50_8xb64-210e_coco-384x288.py) | 384x288 | 0.737 | 0.900 | 0.811 | 0.789 | 0.937 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnest/resnest50_coco_384x288-dcd20436_20210320.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnest/resnest50_coco_384x288_20210320.log.json) | -| [pose_resnest_101](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest101_8xb64-210e_coco-256x192.py) | 256x192 | 0.725 | 0.900 | 0.807 | 0.781 | 0.939 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnest/resnest101_coco_256x192-2ffcdc9d_20210320.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnest/resnest101_coco_256x192_20210320.log.json) | -| [pose_resnest_101](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest101_8xb32-210e_coco-384x288.py) | 384x288 | 0.745 | 0.905 | 0.818 | 0.798 | 0.942 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnest/resnest101_coco_384x288-80660658_20210320.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnest/resnest101_coco_384x288_20210320.log.json) | -| [pose_resnest_200](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest200_8xb64-210e_coco-256x192.py) | 256x192 | 0.731 | 0.905 | 0.812 | 0.787 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnest/resnest200_coco_256x192-db007a48_20210517.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnest/resnest200_coco_256x192_20210517.log.json) | -| [pose_resnest_200](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest200_8xb16-210e_coco-384x288.py) | 384x288 | 0.753 | 0.907 | 0.827 | 0.805 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnest/resnest200_coco_384x288-b5bb76cb_20210517.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnest/resnest200_coco_384x288_20210517.log.json) | -| [pose_resnest_269](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest269_8xb32-210e_coco-256x192.py) | 256x192 | 0.737 | 0.907 | 0.819 | 0.792 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnest/resnest269_coco_256x192-2a7882ac_20210517.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnest/resnest269_coco_256x192_20210517.log.json) | -| [pose_resnest_269](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest269_8xb16-210e_coco-384x288.py) | 384x288 | 0.754 | 0.908 | 0.828 | 0.805 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnest/resnest269_coco_384x288-b142b9fb_20210517.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnest/resnest269_coco_384x288_20210517.log.json) | + + +
+ResNeSt (ArXiv'2020) + +```bibtex +@article{zhang2020resnest, + title={ResNeSt: Split-Attention Networks}, + author={Zhang, Hang and Wu, Chongruo and Zhang, Zhongyue and Zhu, Yi and Zhang, Zhi and Lin, Haibin and Sun, Yue and He, Tong and Muller, Jonas and Manmatha, R. and Li, Mu and Smola, Alexander}, + journal={arXiv preprint arXiv:2004.08955}, + year={2020} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [pose_resnest_50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest50_8xb64-210e_coco-256x192.py) | 256x192 | 0.720 | 0.899 | 0.800 | 0.775 | 0.939 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnest/resnest50_coco_256x192-6e65eece_20210320.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnest/resnest50_coco_256x192_20210320.log.json) | +| [pose_resnest_50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest50_8xb64-210e_coco-384x288.py) | 384x288 | 0.737 | 0.900 | 0.811 | 0.789 | 0.937 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnest/resnest50_coco_384x288-dcd20436_20210320.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnest/resnest50_coco_384x288_20210320.log.json) | +| [pose_resnest_101](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest101_8xb64-210e_coco-256x192.py) | 256x192 | 0.725 | 0.900 | 0.807 | 0.781 | 0.939 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnest/resnest101_coco_256x192-2ffcdc9d_20210320.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnest/resnest101_coco_256x192_20210320.log.json) | +| [pose_resnest_101](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest101_8xb32-210e_coco-384x288.py) | 384x288 | 0.745 | 0.905 | 0.818 | 0.798 | 0.942 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnest/resnest101_coco_384x288-80660658_20210320.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnest/resnest101_coco_384x288_20210320.log.json) | +| [pose_resnest_200](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest200_8xb64-210e_coco-256x192.py) | 256x192 | 0.731 | 0.905 | 0.812 | 0.787 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnest/resnest200_coco_256x192-db007a48_20210517.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnest/resnest200_coco_256x192_20210517.log.json) | +| [pose_resnest_200](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest200_8xb16-210e_coco-384x288.py) | 384x288 | 0.753 | 0.907 | 0.827 | 0.805 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnest/resnest200_coco_384x288-b5bb76cb_20210517.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnest/resnest200_coco_384x288_20210517.log.json) | +| [pose_resnest_269](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest269_8xb32-210e_coco-256x192.py) | 256x192 | 0.737 | 0.907 | 0.819 | 0.792 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnest/resnest269_coco_256x192-2a7882ac_20210517.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnest/resnest269_coco_256x192_20210517.log.json) | +| [pose_resnest_269](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest269_8xb16-210e_coco-384x288.py) | 384x288 | 0.754 | 0.908 | 0.828 | 0.805 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnest/resnest269_coco_384x288-b142b9fb_20210517.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnest/resnest269_coco_384x288_20210517.log.json) | diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/resnest_coco.yml b/configs/body_2d_keypoint/topdown_heatmap/coco/resnest_coco.yml index 082c6a0aa2..d039829bf0 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/resnest_coco.yml +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/resnest_coco.yml @@ -1,131 +1,131 @@ -Models: -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest50_8xb64-210e_coco-256x192.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: &id001 - - SimpleBaseline2D - - ResNeSt - Training Data: COCO - Name: td-hm_resnest50_8xb64-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.720 - AP@0.5: 0.899 - AP@0.75: 0.8 - AR: 0.775 - AR@0.5: 0.939 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/resnest/resnest50_coco_256x192-6e65eece_20210320.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest50_8xb64-210e_coco-384x288.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_resnest50_8xb64-210e_coco-384x288 - Results: - - Dataset: COCO - Metrics: - AP: 0.737 - AP@0.5: 0.9 - AP@0.75: 0.811 - AR: 0.789 - AR@0.5: 0.937 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/resnest/resnest50_coco_384x288-dcd20436_20210320.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest101_8xb64-210e_coco-256x192.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_resnest101_8xb64-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.725 - AP@0.5: 0.9 - AP@0.75: 0.807 - AR: 0.781 - AR@0.5: 0.939 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/resnest/resnest101_coco_256x192-2ffcdc9d_20210320.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest101_8xb32-210e_coco-384x288.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_resnest101_8xb32-210e_coco-384x288 - Results: - - Dataset: COCO - Metrics: - AP: 0.745 - AP@0.5: 0.905 - AP@0.75: 0.818 - AR: 0.798 - AR@0.5: 0.942 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/resnest/resnest101_coco_384x288-80660658_20210320.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest200_8xb64-210e_coco-256x192.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_resnest200_8xb64-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.731 - AP@0.5: 0.905 - AP@0.75: 0.812 - AR: 0.787 - AR@0.5: 0.943 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/resnest/resnest200_coco_256x192-db007a48_20210517.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest200_8xb16-210e_coco-384x288.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_resnest200_8xb16-210e_coco-384x288 - Results: - - Dataset: COCO - Metrics: - AP: 0.753 - AP@0.5: 0.907 - AP@0.75: 0.827 - AR: 0.805 - AR@0.5: 0.943 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/resnest/resnest200_coco_384x288-b5bb76cb_20210517.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest269_8xb32-210e_coco-256x192.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_resnest269_8xb32-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.737 - AP@0.5: 0.907 - AP@0.75: 0.819 - AR: 0.792 - AR@0.5: 0.943 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/resnest/resnest269_coco_256x192-2a7882ac_20210517.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest269_8xb16-210e_coco-384x288.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_resnest269_8xb16-210e_coco-384x288 - Results: - - Dataset: COCO - Metrics: - AP: 0.754 - AP@0.5: 0.908 - AP@0.75: 0.828 - AR: 0.805 - AR@0.5: 0.943 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/resnest/resnest269_coco_384x288-b142b9fb_20210517.pth +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest50_8xb64-210e_coco-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: &id001 + - SimpleBaseline2D + - ResNeSt + Training Data: COCO + Name: td-hm_resnest50_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.720 + AP@0.5: 0.899 + AP@0.75: 0.8 + AR: 0.775 + AR@0.5: 0.939 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnest/resnest50_coco_256x192-6e65eece_20210320.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest50_8xb64-210e_coco-384x288.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_resnest50_8xb64-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.737 + AP@0.5: 0.9 + AP@0.75: 0.811 + AR: 0.789 + AR@0.5: 0.937 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnest/resnest50_coco_384x288-dcd20436_20210320.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest101_8xb64-210e_coco-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_resnest101_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.725 + AP@0.5: 0.9 + AP@0.75: 0.807 + AR: 0.781 + AR@0.5: 0.939 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnest/resnest101_coco_256x192-2ffcdc9d_20210320.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest101_8xb32-210e_coco-384x288.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_resnest101_8xb32-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.745 + AP@0.5: 0.905 + AP@0.75: 0.818 + AR: 0.798 + AR@0.5: 0.942 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnest/resnest101_coco_384x288-80660658_20210320.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest200_8xb64-210e_coco-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_resnest200_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.731 + AP@0.5: 0.905 + AP@0.75: 0.812 + AR: 0.787 + AR@0.5: 0.943 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnest/resnest200_coco_256x192-db007a48_20210517.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest200_8xb16-210e_coco-384x288.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_resnest200_8xb16-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.753 + AP@0.5: 0.907 + AP@0.75: 0.827 + AR: 0.805 + AR@0.5: 0.943 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnest/resnest200_coco_384x288-b5bb76cb_20210517.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest269_8xb32-210e_coco-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_resnest269_8xb32-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.737 + AP@0.5: 0.907 + AP@0.75: 0.819 + AR: 0.792 + AR@0.5: 0.943 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnest/resnest269_coco_256x192-2a7882ac_20210517.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest269_8xb16-210e_coco-384x288.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_resnest269_8xb16-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.754 + AP@0.5: 0.908 + AP@0.75: 0.828 + AR: 0.805 + AR@0.5: 0.943 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnest/resnest269_coco_384x288-b142b9fb_20210517.pth diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/resnet_coco.md b/configs/body_2d_keypoint/topdown_heatmap/coco/resnet_coco.md index 4ce6da38c6..e7b2e8c0fd 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/resnet_coco.md +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/resnet_coco.md @@ -1,62 +1,62 @@ - - -
-SimpleBaseline2D (ECCV'2018) - -```bibtex -@inproceedings{xiao2018simple, - title={Simple baselines for human pose estimation and tracking}, - author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, - booktitle={Proceedings of the European conference on computer vision (ECCV)}, - pages={466--481}, - year={2018} -} -``` - -
- - - -
-ResNet (CVPR'2016) - -```bibtex -@inproceedings{he2016deep, - title={Deep residual learning for image recognition}, - author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={770--778}, - year={2016} -} -``` - -
- - - -
-COCO (ECCV'2014) - -```bibtex -@inproceedings{lin2014microsoft, - title={Microsoft coco: Common objects in context}, - author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, - booktitle={European conference on computer vision}, - pages={740--755}, - year={2014}, - organization={Springer} -} -``` - -
- -Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset - -| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | -| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | -| [pose_resnet_50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-256x192.py) | 256x192 | 0.718 | 0.898 | 0.796 | 0.774 | 0.934 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-256x192-04af38ce_20220923.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-256x192_20220923.log) | -| [pose_resnet_50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-384x288.py) | 384x288 | 0.731 | 0.900 | 0.799 | 0.782 | 0.937 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-384x288-7b8db90e_20220923.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-384x288_20220923.log) | -| [pose_resnet_101](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_8xb64-210e_coco-256x192.py) | 256x192 | 0.728 | 0.904 | 0.809 | 0.783 | 0.942 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_8xb64-210e_coco-256x192-065d3625_20220926.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_8xb64-210e_coco-256x192_20220926.log) | -| [pose_resnet_101](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_8xb32-210e_coco-384x288.py) | 384x288 | 0.749 | 0.906 | 0.817 | 0.799 | 0.941 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_8xb64-210e_coco-256x192-065d3625_20220926.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_8xb64-210e_coco-256x192_20220926.log) | -| [pose_resnet_152](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_8xb32-210e_coco-256x192.py) | 256x192 | 0.736 | 0.904 | 0.818 | 0.791 | 0.942 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_8xb32-210e_coco-256x192-0345f330_20220928.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_8xb32-210e_coco-256x192_20220928.log) | -| [pose_resnet_152](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_8xb32-210e_coco-384x288.py) | 384x288 | 0.750 | 0.908 | 0.821 | 0.800 | 0.942 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_8xb32-210e_coco-384x288-7fbb906f_20220927.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_8xb32-210e_coco-384x288_20220927.log) | + + +
+SimpleBaseline2D (ECCV'2018) + +```bibtex +@inproceedings{xiao2018simple, + title={Simple baselines for human pose estimation and tracking}, + author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, + booktitle={Proceedings of the European conference on computer vision (ECCV)}, + pages={466--481}, + year={2018} +} +``` + +
+ + + +
+ResNet (CVPR'2016) + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [pose_resnet_50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-256x192.py) | 256x192 | 0.718 | 0.898 | 0.796 | 0.774 | 0.934 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-256x192-04af38ce_20220923.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-256x192_20220923.log) | +| [pose_resnet_50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-384x288.py) | 384x288 | 0.731 | 0.900 | 0.799 | 0.782 | 0.937 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-384x288-7b8db90e_20220923.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-384x288_20220923.log) | +| [pose_resnet_101](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_8xb64-210e_coco-256x192.py) | 256x192 | 0.728 | 0.904 | 0.809 | 0.783 | 0.942 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_8xb64-210e_coco-256x192-065d3625_20220926.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_8xb64-210e_coco-256x192_20220926.log) | +| [pose_resnet_101](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_8xb32-210e_coco-384x288.py) | 384x288 | 0.749 | 0.906 | 0.817 | 0.799 | 0.941 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_8xb64-210e_coco-256x192-065d3625_20220926.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_8xb64-210e_coco-256x192_20220926.log) | +| [pose_resnet_152](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_8xb32-210e_coco-256x192.py) | 256x192 | 0.736 | 0.904 | 0.818 | 0.791 | 0.942 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_8xb32-210e_coco-256x192-0345f330_20220928.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_8xb32-210e_coco-256x192_20220928.log) | +| [pose_resnet_152](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_8xb32-210e_coco-384x288.py) | 384x288 | 0.750 | 0.908 | 0.821 | 0.800 | 0.942 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_8xb32-210e_coco-384x288-7fbb906f_20220927.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_8xb32-210e_coco-384x288_20220927.log) | diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/resnet_coco.yml b/configs/body_2d_keypoint/topdown_heatmap/coco/resnet_coco.yml index 296be8898b..ad6dce9f23 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/resnet_coco.yml +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/resnet_coco.yml @@ -1,121 +1,121 @@ -Collections: -- Name: SimpleBaseline2D - Paper: - Title: Simple baselines for human pose estimation and tracking - URL: http://openaccess.thecvf.com/content_ECCV_2018/html/Bin_Xiao_Simple_Baselines_for_ECCV_2018_paper.html - README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/algorithms/simplebaseline2d.md -Models: -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-256x192.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: &id001 - - SimpleBaseline2D - - ResNet - Training Data: COCO - Name: td-hm_res50_8xb64-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.718 - AP@0.5: 0.898 - AP@0.75: 0.796 - AR: 0.774 - AR@0.5: 0.934 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-256x192-04af38ce_20220923.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-384x288.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_res50_8xb64-210e_coco-384x288 - Results: - - Dataset: COCO - Metrics: - AP: 0.731 - AP@0.5: 0.9 - AP@0.75: 0.799 - AR: 0.782 - AR@0.5: 0.937 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-384x288-7b8db90e_20220923.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_8xb64-210e_coco-256x192.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_res101_8xb64-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.728 - AP@0.5: 0.904 - AP@0.75: 0.809 - AR: 0.783 - AR@0.5: 0.942 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_8xb64-210e_coco-256x192-065d3625_20220926.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_8xb32-210e_coco-384x288.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_res101_8xb32-210e_coco-384x288 - Results: - - Dataset: COCO - Metrics: - AP: 0.749 - AP@0.5: 0.906 - AP@0.75: 0.817 - AR: 0.799 - AR@0.5: 0.941 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_8xb64-210e_coco-256x192-065d3625_20220926.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_8xb32-210e_coco-256x192.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_res152_8xb32-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.736 - AP@0.5: 0.904 - AP@0.75: 0.818 - AR: 0.791 - AR@0.5: 0.942 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_8xb32-210e_coco-256x192-0345f330_20220928.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_8xb32-210e_coco-384x288.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_res152_8xb32-210e_coco-384x288 - Results: - - Dataset: COCO - Metrics: - AP: 0.75 - AP@0.5: 0.908 - AP@0.75: 0.821 - AR: 0.8 - AR@0.5: 0.942 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_8xb32-210e_coco-384x288-7fbb906f_20220927.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_fp16-8xb64-210e_coco-256x192.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_res50_fp16-8xb64-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.716 - AP@0.5: 0.898 - AP@0.75: 0.798 - AR: 0.772 - AR@0.5: 0.937 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_fp16-8xb64-210e_coco-256x192-463da051_20220927.pth +Collections: +- Name: SimpleBaseline2D + Paper: + Title: Simple baselines for human pose estimation and tracking + URL: http://openaccess.thecvf.com/content_ECCV_2018/html/Bin_Xiao_Simple_Baselines_for_ECCV_2018_paper.html + README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/algorithms/simplebaseline2d.md +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: &id001 + - SimpleBaseline2D + - ResNet + Training Data: COCO + Name: td-hm_res50_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.718 + AP@0.5: 0.898 + AP@0.75: 0.796 + AR: 0.774 + AR@0.5: 0.934 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-256x192-04af38ce_20220923.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-384x288.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_res50_8xb64-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.731 + AP@0.5: 0.9 + AP@0.75: 0.799 + AR: 0.782 + AR@0.5: 0.937 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-384x288-7b8db90e_20220923.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_8xb64-210e_coco-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_res101_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.728 + AP@0.5: 0.904 + AP@0.75: 0.809 + AR: 0.783 + AR@0.5: 0.942 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_8xb64-210e_coco-256x192-065d3625_20220926.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_8xb32-210e_coco-384x288.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_res101_8xb32-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.749 + AP@0.5: 0.906 + AP@0.75: 0.817 + AR: 0.799 + AR@0.5: 0.941 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_8xb64-210e_coco-256x192-065d3625_20220926.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_8xb32-210e_coco-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_res152_8xb32-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.736 + AP@0.5: 0.904 + AP@0.75: 0.818 + AR: 0.791 + AR@0.5: 0.942 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_8xb32-210e_coco-256x192-0345f330_20220928.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_8xb32-210e_coco-384x288.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_res152_8xb32-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.75 + AP@0.5: 0.908 + AP@0.75: 0.821 + AR: 0.8 + AR@0.5: 0.942 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_8xb32-210e_coco-384x288-7fbb906f_20220927.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_fp16-8xb64-210e_coco-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_res50_fp16-8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.716 + AP@0.5: 0.898 + AP@0.75: 0.798 + AR: 0.772 + AR@0.5: 0.937 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_fp16-8xb64-210e_coco-256x192-463da051_20220927.pth diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/resnet_dark_coco.md b/configs/body_2d_keypoint/topdown_heatmap/coco/resnet_dark_coco.md index 6f1b0107f3..9156fb4feb 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/resnet_dark_coco.md +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/resnet_dark_coco.md @@ -1,79 +1,79 @@ - - -
-SimpleBaseline2D (ECCV'2018) - -```bibtex -@inproceedings{xiao2018simple, - title={Simple baselines for human pose estimation and tracking}, - author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, - booktitle={Proceedings of the European conference on computer vision (ECCV)}, - pages={466--481}, - year={2018} -} -``` - -
- - - -
-ResNet (CVPR'2016) - -```bibtex -@inproceedings{he2016deep, - title={Deep residual learning for image recognition}, - author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={770--778}, - year={2016} -} -``` - -
- - - -
-DarkPose (CVPR'2020) - -```bibtex -@inproceedings{zhang2020distribution, - title={Distribution-aware coordinate representation for human pose estimation}, - author={Zhang, Feng and Zhu, Xiatian and Dai, Hanbin and Ye, Mao and Zhu, Ce}, - booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, - pages={7093--7102}, - year={2020} -} -``` - -
- - - -
-COCO (ECCV'2014) - -```bibtex -@inproceedings{lin2014microsoft, - title={Microsoft coco: Common objects in context}, - author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, - booktitle={European conference on computer vision}, - pages={740--755}, - year={2014}, - organization={Springer} -} -``` - -
- -Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset - -| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | -| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | -| [pose_resnet_50_dark](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_dark-8xb64-210e_coco-256x192.py) | 256x192 | 0.724 | 0.897 | 0.797 | 0.777 | 0.934 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_dark-8xb64-210e_coco-256x192-c129dcb6_20220926.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_dark-8xb64-210e_coco-256x192_20220926.log) | -| [pose_resnet_50_dark](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_dark-8xb64-210e_coco-384x288.py) | 384x288 | 0.735 | 0.902 | 0.801 | 0.786 | 0.938 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_dark-8xb64-210e_coco-384x288-8b90b538_20220926.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_dark-8xb64-210e_coco-384x288_20220926.log) | -| [pose_resnet_101_dark](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_dark-8xb64-210e_coco-256x192.py) | 256x192 | 0.733 | 0.900 | 0.810 | 0.786 | 0.938 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_dark-8xb64-210e_coco-256x192-528ec248_20220926.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_dark-8xb64-210e_coco-256x192_20220926.log) | -| [pose_resnet_101_dark](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_dark-8xb64-210e_coco-384x288.py) | 384x288 | 0.749 | 0.905 | 0.818 | 0.799 | 0.940 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_dark-8xb64-210e_coco-384x288-487d40a4_20220926.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_dark-8xb64-210e_coco-384x288_20220926.log) | -| [pose_resnet_152_dark](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_dark-8xb32-210e_coco-256x192.py) | 256x192 | 0.743 | 0.906 | 0.819 | 0.796 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_dark-8xb32-210e_coco-256x192-f754df5f_20221031.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_dark-8xb32-210e_coco-256x192_20221031.log) | -| [pose_resnet_152_dark](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_dark-8xb32-210e_coco-384x288.py) | 384x288 | 0.755 | 0.907 | 0.825 | 0.805 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_dark-8xb32-210e_coco-384x288-329f8454_20221031.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_dark-8xb32-210e_coco-384x288_20221031.log) | + + +
+SimpleBaseline2D (ECCV'2018) + +```bibtex +@inproceedings{xiao2018simple, + title={Simple baselines for human pose estimation and tracking}, + author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, + booktitle={Proceedings of the European conference on computer vision (ECCV)}, + pages={466--481}, + year={2018} +} +``` + +
+ + + +
+ResNet (CVPR'2016) + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +
+ + + +
+DarkPose (CVPR'2020) + +```bibtex +@inproceedings{zhang2020distribution, + title={Distribution-aware coordinate representation for human pose estimation}, + author={Zhang, Feng and Zhu, Xiatian and Dai, Hanbin and Ye, Mao and Zhu, Ce}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={7093--7102}, + year={2020} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [pose_resnet_50_dark](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_dark-8xb64-210e_coco-256x192.py) | 256x192 | 0.724 | 0.897 | 0.797 | 0.777 | 0.934 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_dark-8xb64-210e_coco-256x192-c129dcb6_20220926.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_dark-8xb64-210e_coco-256x192_20220926.log) | +| [pose_resnet_50_dark](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_dark-8xb64-210e_coco-384x288.py) | 384x288 | 0.735 | 0.902 | 0.801 | 0.786 | 0.938 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_dark-8xb64-210e_coco-384x288-8b90b538_20220926.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_dark-8xb64-210e_coco-384x288_20220926.log) | +| [pose_resnet_101_dark](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_dark-8xb64-210e_coco-256x192.py) | 256x192 | 0.733 | 0.900 | 0.810 | 0.786 | 0.938 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_dark-8xb64-210e_coco-256x192-528ec248_20220926.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_dark-8xb64-210e_coco-256x192_20220926.log) | +| [pose_resnet_101_dark](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_dark-8xb64-210e_coco-384x288.py) | 384x288 | 0.749 | 0.905 | 0.818 | 0.799 | 0.940 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_dark-8xb64-210e_coco-384x288-487d40a4_20220926.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_dark-8xb64-210e_coco-384x288_20220926.log) | +| [pose_resnet_152_dark](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_dark-8xb32-210e_coco-256x192.py) | 256x192 | 0.743 | 0.906 | 0.819 | 0.796 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_dark-8xb32-210e_coco-256x192-f754df5f_20221031.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_dark-8xb32-210e_coco-256x192_20221031.log) | +| [pose_resnet_152_dark](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_dark-8xb32-210e_coco-384x288.py) | 384x288 | 0.755 | 0.907 | 0.825 | 0.805 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_dark-8xb32-210e_coco-384x288-329f8454_20221031.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_dark-8xb32-210e_coco-384x288_20221031.log) | diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/resnet_dark_coco.yml b/configs/body_2d_keypoint/topdown_heatmap/coco/resnet_dark_coco.yml index 02e4a7f43f..c5e156fe94 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/resnet_dark_coco.yml +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/resnet_dark_coco.yml @@ -1,100 +1,100 @@ -Models: -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_dark-8xb64-210e_coco-256x192.py - In Collection: DarkPose - Metadata: - Architecture: &id001 - - SimpleBaseline2D - - ResNet - - DarkPose - Training Data: COCO - Name: td-hm_res50_dark-8xb64-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.724 - AP@0.5: 0.897 - AP@0.75: 0.797 - AR: 0.777 - AR@0.5: 0.934 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_dark-8xb64-210e_coco-256x192-c129dcb6_20220926.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_dark-8xb64-210e_coco-384x288.py - In Collection: DarkPose - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_res50_dark-8xb64-210e_coco-384x288 - Results: - - Dataset: COCO - Metrics: - AP: 0.735 - AP@0.5: 0.902 - AP@0.75: 0.801 - AR: 0.786 - AR@0.5: 0.938 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_dark-8xb64-210e_coco-384x288-8b90b538_20220926.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_dark-8xb64-210e_coco-256x192.py - In Collection: DarkPose - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_res101_dark-8xb64-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.733 - AP@0.5: 0.9 - AP@0.75: 0.81 - AR: 0.786 - AR@0.5: 0.938 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_dark-8xb64-210e_coco-256x192-528ec248_20220926.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_dark-8xb64-210e_coco-384x288.py - In Collection: DarkPose - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_res101_dark-8xb64-210e_coco-384x288 - Results: - - Dataset: COCO - Metrics: - AP: 0.749 - AP@0.5: 0.905 - AP@0.75: 0.818 - AR: 0.799 - AR@0.5: 0.94 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_dark-8xb64-210e_coco-384x288-487d40a4_20220926.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_dark-8xb32-210e_coco-256x192.py - In Collection: DarkPose - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_res152_dark-8xb32-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.743 - AP@0.5: 0.906 - AP@0.75: 0.819 - AR: 0.796 - AR@0.5: 0.943 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_dark-8xb32-210e_coco-256x192-f754df5f_20221031.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_dark-8xb32-210e_coco-384x288.py - In Collection: DarkPose - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_res152_dark-8xb32-210e_coco-384x288 - Results: - - Dataset: COCO - Metrics: - AP: 0.757 - AP@0.5: 0.907 - AP@0.75: 0.825 - AR: 0.805 - AR@0.5: 0.943 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_dark-8xb32-210e_coco-384x288-329f8454_20221031.pth +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_dark-8xb64-210e_coco-256x192.py + In Collection: DarkPose + Metadata: + Architecture: &id001 + - SimpleBaseline2D + - ResNet + - DarkPose + Training Data: COCO + Name: td-hm_res50_dark-8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.724 + AP@0.5: 0.897 + AP@0.75: 0.797 + AR: 0.777 + AR@0.5: 0.934 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_dark-8xb64-210e_coco-256x192-c129dcb6_20220926.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_dark-8xb64-210e_coco-384x288.py + In Collection: DarkPose + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_res50_dark-8xb64-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.735 + AP@0.5: 0.902 + AP@0.75: 0.801 + AR: 0.786 + AR@0.5: 0.938 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_dark-8xb64-210e_coco-384x288-8b90b538_20220926.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_dark-8xb64-210e_coco-256x192.py + In Collection: DarkPose + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_res101_dark-8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.733 + AP@0.5: 0.9 + AP@0.75: 0.81 + AR: 0.786 + AR@0.5: 0.938 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_dark-8xb64-210e_coco-256x192-528ec248_20220926.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_dark-8xb64-210e_coco-384x288.py + In Collection: DarkPose + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_res101_dark-8xb64-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.749 + AP@0.5: 0.905 + AP@0.75: 0.818 + AR: 0.799 + AR@0.5: 0.94 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_dark-8xb64-210e_coco-384x288-487d40a4_20220926.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_dark-8xb32-210e_coco-256x192.py + In Collection: DarkPose + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_res152_dark-8xb32-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.743 + AP@0.5: 0.906 + AP@0.75: 0.819 + AR: 0.796 + AR@0.5: 0.943 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_dark-8xb32-210e_coco-256x192-f754df5f_20221031.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_dark-8xb32-210e_coco-384x288.py + In Collection: DarkPose + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_res152_dark-8xb32-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.757 + AP@0.5: 0.907 + AP@0.75: 0.825 + AR: 0.805 + AR@0.5: 0.943 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_dark-8xb32-210e_coco-384x288-329f8454_20221031.pth diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/resnet_fp16_coco.md b/configs/body_2d_keypoint/topdown_heatmap/coco/resnet_fp16_coco.md index 2731ca8534..8785e3baeb 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/resnet_fp16_coco.md +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/resnet_fp16_coco.md @@ -1,73 +1,73 @@ - - -
-SimpleBaseline2D (ECCV'2018) - -```bibtex -@inproceedings{xiao2018simple, - title={Simple baselines for human pose estimation and tracking}, - author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, - booktitle={Proceedings of the European conference on computer vision (ECCV)}, - pages={466--481}, - year={2018} -} -``` - -
- - - -
-ResNet (CVPR'2016) - -```bibtex -@inproceedings{he2016deep, - title={Deep residual learning for image recognition}, - author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={770--778}, - year={2016} -} -``` - -
- - - -
-FP16 (ArXiv'2017) - -```bibtex -@article{micikevicius2017mixed, - title={Mixed precision training}, - author={Micikevicius, Paulius and Narang, Sharan and Alben, Jonah and Diamos, Gregory and Elsen, Erich and Garcia, David and Ginsburg, Boris and Houston, Michael and Kuchaiev, Oleksii and Venkatesh, Ganesh and others}, - journal={arXiv preprint arXiv:1710.03740}, - year={2017} -} -``` - -
- - - -
-COCO (ECCV'2014) - -```bibtex -@inproceedings{lin2014microsoft, - title={Microsoft coco: Common objects in context}, - author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, - booktitle={European conference on computer vision}, - pages={740--755}, - year={2014}, - organization={Springer} -} -``` - -
- -Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset - -| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | -| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | -| [pose_resnet_50_fp16](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_fp16-8xb64-210e_coco-256x192.py) | 256x192 | 0.716 | 0.898 | 0.798 | 0.772 | 0.937 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_fp16-8xb64-210e_coco-256x192-463da051_20220927.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_fp16-8xb64-210e_coco-256x192_20220927.log) | + + +
+SimpleBaseline2D (ECCV'2018) + +```bibtex +@inproceedings{xiao2018simple, + title={Simple baselines for human pose estimation and tracking}, + author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, + booktitle={Proceedings of the European conference on computer vision (ECCV)}, + pages={466--481}, + year={2018} +} +``` + +
+ + + +
+ResNet (CVPR'2016) + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +
+ + + +
+FP16 (ArXiv'2017) + +```bibtex +@article{micikevicius2017mixed, + title={Mixed precision training}, + author={Micikevicius, Paulius and Narang, Sharan and Alben, Jonah and Diamos, Gregory and Elsen, Erich and Garcia, David and Ginsburg, Boris and Houston, Michael and Kuchaiev, Oleksii and Venkatesh, Ganesh and others}, + journal={arXiv preprint arXiv:1710.03740}, + year={2017} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [pose_resnet_50_fp16](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_fp16-8xb64-210e_coco-256x192.py) | 256x192 | 0.716 | 0.898 | 0.798 | 0.772 | 0.937 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_fp16-8xb64-210e_coco-256x192-463da051_20220927.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_fp16-8xb64-210e_coco-256x192_20220927.log) | diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/resnetv1d_coco.md b/configs/body_2d_keypoint/topdown_heatmap/coco/resnetv1d_coco.md index 1067201532..59ac34f4b5 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/resnetv1d_coco.md +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/resnetv1d_coco.md @@ -1,45 +1,45 @@ - - -
-ResNetV1D (CVPR'2019) - -```bibtex -@inproceedings{he2019bag, - title={Bag of tricks for image classification with convolutional neural networks}, - author={He, Tong and Zhang, Zhi and Zhang, Hang and Zhang, Zhongyue and Xie, Junyuan and Li, Mu}, - booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, - pages={558--567}, - year={2019} -} -``` - -
- - - -
-COCO (ECCV'2014) - -```bibtex -@inproceedings{lin2014microsoft, - title={Microsoft coco: Common objects in context}, - author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, - booktitle={European conference on computer vision}, - pages={740--755}, - year={2014}, - organization={Springer} -} -``` - -
- -Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset - -| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | -| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | -| [pose_resnetv1d_50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d50_8xb64-210e_coco-256x192.py) | 256x192 | 0.722 | 0.897 | 0.796 | 0.777 | 0.936 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d50_8xb64-210e_coco-256x192-27545d63_20221020.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d50_8xb64-210e_coco-256x192_20221020.log) | -| [pose_resnetv1d_50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d50_8xb64-210e_coco-384x288.py) | 384x288 | 0.730 | 0.899 | 0.800 | 0.782 | 0.935 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d50_8xb64-210e_coco-384x288-0646b46e_20221020.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d50_8xb64-210e_coco-384x288_20221020.log) | -| [pose_resnetv1d_101](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d101_8xb64-210e_coco-256x192.py) | 256x192 | 0.732 | 0.901 | 0.808 | 0.785 | 0.940 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d101_8xb64-210e_coco-256x192-ee9e7212_20221021.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d101_8xb64-210e_coco-256x192_20221021.log) | -| [pose_resnetv1d_101](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d101_8xb32-210e_coco-384x288.py) | 384x288 | 0.748 | 0.906 | 0.817 | 0.798 | 0.941 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d101_8xb32-210e_coco-384x288-d0b5875f_20221028.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d101_8xb32-210e_coco-384x288_20221028.log) | -| [pose_resnetv1d_152](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d152_8xb32-210e_coco-256x192.py) | 256x192 | 0.737 | 0.904 | 0.814 | 0.790 | 0.940 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d152_8xb32-210e_coco-256x192-fd49f947_20221021.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d152_8xb32-210e_coco-256x192_20221021.log) | -| [pose_resnetv1d_152](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d152_8xb48-210e_coco-384x288.py) | 384x288 | 0.751 | 0.907 | 0.821 | 0.801 | 0.942 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d152_8xb48-210e_coco-384x288-b9a99602_20221022.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d152_8xb48-210e_coco-384x288_20221022.log) | + + +
+ResNetV1D (CVPR'2019) + +```bibtex +@inproceedings{he2019bag, + title={Bag of tricks for image classification with convolutional neural networks}, + author={He, Tong and Zhang, Zhi and Zhang, Hang and Zhang, Zhongyue and Xie, Junyuan and Li, Mu}, + booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, + pages={558--567}, + year={2019} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [pose_resnetv1d_50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d50_8xb64-210e_coco-256x192.py) | 256x192 | 0.722 | 0.897 | 0.796 | 0.777 | 0.936 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d50_8xb64-210e_coco-256x192-27545d63_20221020.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d50_8xb64-210e_coco-256x192_20221020.log) | +| [pose_resnetv1d_50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d50_8xb64-210e_coco-384x288.py) | 384x288 | 0.730 | 0.899 | 0.800 | 0.782 | 0.935 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d50_8xb64-210e_coco-384x288-0646b46e_20221020.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d50_8xb64-210e_coco-384x288_20221020.log) | +| [pose_resnetv1d_101](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d101_8xb64-210e_coco-256x192.py) | 256x192 | 0.732 | 0.901 | 0.808 | 0.785 | 0.940 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d101_8xb64-210e_coco-256x192-ee9e7212_20221021.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d101_8xb64-210e_coco-256x192_20221021.log) | +| [pose_resnetv1d_101](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d101_8xb32-210e_coco-384x288.py) | 384x288 | 0.748 | 0.906 | 0.817 | 0.798 | 0.941 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d101_8xb32-210e_coco-384x288-d0b5875f_20221028.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d101_8xb32-210e_coco-384x288_20221028.log) | +| [pose_resnetv1d_152](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d152_8xb32-210e_coco-256x192.py) | 256x192 | 0.737 | 0.904 | 0.814 | 0.790 | 0.940 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d152_8xb32-210e_coco-256x192-fd49f947_20221021.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d152_8xb32-210e_coco-256x192_20221021.log) | +| [pose_resnetv1d_152](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d152_8xb48-210e_coco-384x288.py) | 384x288 | 0.751 | 0.907 | 0.821 | 0.801 | 0.942 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d152_8xb48-210e_coco-384x288-b9a99602_20221022.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d152_8xb48-210e_coco-384x288_20221022.log) | diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/resnetv1d_coco.yml b/configs/body_2d_keypoint/topdown_heatmap/coco/resnetv1d_coco.yml index 765c8aaabc..4acdfe41bf 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/resnetv1d_coco.yml +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/resnetv1d_coco.yml @@ -1,99 +1,99 @@ -Models: -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d50_8xb64-210e_coco-256x192.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: &id001 - - SimpleBaseline2D - - ResNetV1D - Training Data: COCO - Name: td-hm_resnetv1d50_8xb64-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.722 - AP@0.5: 0.897 - AP@0.75: 0.796 - AR: 0.777 - AR@0.5: 0.936 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d50_8xb64-210e_coco-256x192-27545d63_20221020.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d50_8xb64-210e_coco-384x288.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_resnetv1d50_8xb64-210e_coco-384x288 - Results: - - Dataset: COCO - Metrics: - AP: 0.73 - AP@0.5: 0.899 - AP@0.75: 0.8 - AR: 0.782 - AR@0.5: 0.935 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d50_8xb64-210e_coco-384x288-0646b46e_20221020.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d101_8xb64-210e_coco-256x192.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_resnetv1d101_8xb64-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.732 - AP@0.5: 0.901 - AP@0.75: 0.808 - AR: 0.785 - AR@0.5: 0.940 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d101_8xb64-210e_coco-256x192-ee9e7212_20221021.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d101_8xb32-210e_coco-384x288.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_resnetv1d101_8xb32-210e_coco-384x288 - Results: - - Dataset: COCO - Metrics: - AP: 0.748 - AP@0.5: 0.906 - AP@0.75: 0.817 - AR: 0.798 - AR@0.5: 0.941 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d101_8xb32-210e_coco-384x288-d0b5875f_20221028.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d152_8xb32-210e_coco-256x192.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_resnetv1d152_8xb32-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.737 - AP@0.5: 0.904 - AP@0.75: 0.814 - AR: 0.790 - AR@0.5: 0.94 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d152_8xb32-210e_coco-256x192-fd49f947_20221021.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d152_8xb48-210e_coco-384x288.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_resnetv1d152_8xb48-210e_coco-384x288 - Results: - - Dataset: COCO - Metrics: - AP: 0.751 - AP@0.5: 0.907 - AP@0.75: 0.821 - AR: 0.801 - AR@0.5: 0.942 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d152_8xb48-210e_coco-384x288-b9a99602_20221022.pth +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d50_8xb64-210e_coco-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: &id001 + - SimpleBaseline2D + - ResNetV1D + Training Data: COCO + Name: td-hm_resnetv1d50_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.722 + AP@0.5: 0.897 + AP@0.75: 0.796 + AR: 0.777 + AR@0.5: 0.936 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d50_8xb64-210e_coco-256x192-27545d63_20221020.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d50_8xb64-210e_coco-384x288.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_resnetv1d50_8xb64-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.73 + AP@0.5: 0.899 + AP@0.75: 0.8 + AR: 0.782 + AR@0.5: 0.935 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d50_8xb64-210e_coco-384x288-0646b46e_20221020.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d101_8xb64-210e_coco-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_resnetv1d101_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.732 + AP@0.5: 0.901 + AP@0.75: 0.808 + AR: 0.785 + AR@0.5: 0.940 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d101_8xb64-210e_coco-256x192-ee9e7212_20221021.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d101_8xb32-210e_coco-384x288.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_resnetv1d101_8xb32-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.748 + AP@0.5: 0.906 + AP@0.75: 0.817 + AR: 0.798 + AR@0.5: 0.941 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d101_8xb32-210e_coco-384x288-d0b5875f_20221028.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d152_8xb32-210e_coco-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_resnetv1d152_8xb32-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.737 + AP@0.5: 0.904 + AP@0.75: 0.814 + AR: 0.790 + AR@0.5: 0.94 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d152_8xb32-210e_coco-256x192-fd49f947_20221021.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d152_8xb48-210e_coco-384x288.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_resnetv1d152_8xb48-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.751 + AP@0.5: 0.907 + AP@0.75: 0.821 + AR: 0.801 + AR@0.5: 0.942 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d152_8xb48-210e_coco-384x288-b9a99602_20221022.pth diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/resnext_coco.md b/configs/body_2d_keypoint/topdown_heatmap/coco/resnext_coco.md index 8862fddf6c..ca7c1b5a62 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/resnext_coco.md +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/resnext_coco.md @@ -1,45 +1,45 @@ - - -
-ResNext (CVPR'2017) - -```bibtex -@inproceedings{xie2017aggregated, - title={Aggregated residual transformations for deep neural networks}, - author={Xie, Saining and Girshick, Ross and Doll{\'a}r, Piotr and Tu, Zhuowen and He, Kaiming}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={1492--1500}, - year={2017} -} -``` - -
- - - -
-COCO (ECCV'2014) - -```bibtex -@inproceedings{lin2014microsoft, - title={Microsoft coco: Common objects in context}, - author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, - booktitle={European conference on computer vision}, - pages={740--755}, - year={2014}, - organization={Springer} -} -``` - -
- -Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset - -| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | -| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | -| [pose_resnext_50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext50_8xb64-210e_coco-256x192.py) | 256x192 | 0.715 | 0.897 | 0.791 | 0.771 | 0.935 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnext/resnext50_coco_256x192-dcff15f6_20200727.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnext/resnext50_coco_256x192_20200727.log.json) | -| [pose_resnext_50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext50_8xb64-210e_coco-384x288.py) | 384x288 | 0.724 | 0.899 | 0.794 | 0.777 | 0.936 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnext/resnext50_coco_384x288-412c848f_20200727.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnext/resnext50_coco_384x288_20200727.log.json) | -| [pose_resnext_101](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext101_8xb64-210e_coco-256x192.py) | 256x192 | 0.726 | 0.900 | 0.801 | 0.781 | 0.939 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnext/resnext101_coco_256x192-c7eba365_20200727.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnext/resnext101_coco_256x192_20200727.log.json) | -| [pose_resnext_101](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext101_8xb32-210e_coco-384x288.py) | 384x288 | 0.744 | 0.903 | 0.815 | 0.794 | 0.939 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnext/resnext101_coco_384x288-f5eabcd6_20200727.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnext/resnext101_coco_384x288_20200727.log.json) | -| [pose_resnext_152](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext152_8xb32-210e_coco-256x192.py) | 256x192 | 0.730 | 0.903 | 0.808 | 0.785 | 0.940 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnext/resnext152_coco_256x192-102449aa_20200727.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnext/resnext152_coco_256x192_20200727.log.json) | -| [pose_resnext_152](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext152_8xb48-210e_coco-384x288.py) | 384x288 | 0.742 | 0.904 | 0.810 | 0.794 | 0.940 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnext/resnext152_coco_384x288-806176df_20200727.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnext/resnext152_coco_384x288_20200727.log.json) | + + +
+ResNext (CVPR'2017) + +```bibtex +@inproceedings{xie2017aggregated, + title={Aggregated residual transformations for deep neural networks}, + author={Xie, Saining and Girshick, Ross and Doll{\'a}r, Piotr and Tu, Zhuowen and He, Kaiming}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={1492--1500}, + year={2017} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [pose_resnext_50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext50_8xb64-210e_coco-256x192.py) | 256x192 | 0.715 | 0.897 | 0.791 | 0.771 | 0.935 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnext/resnext50_coco_256x192-dcff15f6_20200727.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnext/resnext50_coco_256x192_20200727.log.json) | +| [pose_resnext_50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext50_8xb64-210e_coco-384x288.py) | 384x288 | 0.724 | 0.899 | 0.794 | 0.777 | 0.936 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnext/resnext50_coco_384x288-412c848f_20200727.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnext/resnext50_coco_384x288_20200727.log.json) | +| [pose_resnext_101](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext101_8xb64-210e_coco-256x192.py) | 256x192 | 0.726 | 0.900 | 0.801 | 0.781 | 0.939 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnext/resnext101_coco_256x192-c7eba365_20200727.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnext/resnext101_coco_256x192_20200727.log.json) | +| [pose_resnext_101](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext101_8xb32-210e_coco-384x288.py) | 384x288 | 0.744 | 0.903 | 0.815 | 0.794 | 0.939 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnext/resnext101_coco_384x288-f5eabcd6_20200727.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnext/resnext101_coco_384x288_20200727.log.json) | +| [pose_resnext_152](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext152_8xb32-210e_coco-256x192.py) | 256x192 | 0.730 | 0.903 | 0.808 | 0.785 | 0.940 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnext/resnext152_coco_256x192-102449aa_20200727.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnext/resnext152_coco_256x192_20200727.log.json) | +| [pose_resnext_152](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext152_8xb48-210e_coco-384x288.py) | 384x288 | 0.742 | 0.904 | 0.810 | 0.794 | 0.940 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnext/resnext152_coco_384x288-806176df_20200727.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnext/resnext152_coco_384x288_20200727.log.json) | diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/resnext_coco.yml b/configs/body_2d_keypoint/topdown_heatmap/coco/resnext_coco.yml index 1ebb616ecd..29b02d2288 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/resnext_coco.yml +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/resnext_coco.yml @@ -1,99 +1,99 @@ -Models: -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext50_8xb64-210e_coco-256x192.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: &id001 - - SimpleBaseline2D - - ResNext - Training Data: COCO - Name: td-hm_resnext50_8xb64-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.715 - AP@0.5: 0.897 - AP@0.75: 0.791 - AR: 0.771 - AR@0.5: 0.935 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/resnext/resnext50_coco_256x192-dcff15f6_20200727.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext50_8xb64-210e_coco-384x288.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_resnext50_8xb64-210e_coco-384x288 - Results: - - Dataset: COCO - Metrics: - AP: 0.724 - AP@0.5: 0.899 - AP@0.75: 0.794 - AR: 0.777 - AR@0.5: 0.936 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/resnext/resnext50_coco_384x288-412c848f_20200727.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext101_8xb64-210e_coco-256x192.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_resnext101_8xb64-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.726 - AP@0.5: 0.9 - AP@0.75: 0.801 - AR: 0.781 - AR@0.5: 0.939 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/resnext/resnext101_coco_256x192-c7eba365_20200727.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext101_8xb32-210e_coco-384x288.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_resnext101_8xb32-210e_coco-384x288 - Results: - - Dataset: COCO - Metrics: - AP: 0.744 - AP@0.5: 0.903 - AP@0.75: 0.815 - AR: 0.794 - AR@0.5: 0.939 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/resnext/resnext101_coco_384x288-f5eabcd6_20200727.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext152_8xb32-210e_coco-256x192.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_resnext152_8xb32-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.73 - AP@0.5: 0.903 - AP@0.75: 0.808 - AR: 0.785 - AR@0.5: 0.94 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/resnext/resnext152_coco_256x192-102449aa_20200727.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext152_8xb48-210e_coco-384x288.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_resnext152_8xb48-210e_coco-384x288 - Results: - - Dataset: COCO - Metrics: - AP: 0.742 - AP@0.5: 0.904 - AP@0.75: 0.81 - AR: 0.794 - AR@0.5: 0.94 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/resnext/resnext152_coco_384x288-806176df_20200727.pth +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext50_8xb64-210e_coco-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: &id001 + - SimpleBaseline2D + - ResNext + Training Data: COCO + Name: td-hm_resnext50_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.715 + AP@0.5: 0.897 + AP@0.75: 0.791 + AR: 0.771 + AR@0.5: 0.935 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnext/resnext50_coco_256x192-dcff15f6_20200727.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext50_8xb64-210e_coco-384x288.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_resnext50_8xb64-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.724 + AP@0.5: 0.899 + AP@0.75: 0.794 + AR: 0.777 + AR@0.5: 0.936 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnext/resnext50_coco_384x288-412c848f_20200727.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext101_8xb64-210e_coco-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_resnext101_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.726 + AP@0.5: 0.9 + AP@0.75: 0.801 + AR: 0.781 + AR@0.5: 0.939 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnext/resnext101_coco_256x192-c7eba365_20200727.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext101_8xb32-210e_coco-384x288.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_resnext101_8xb32-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.744 + AP@0.5: 0.903 + AP@0.75: 0.815 + AR: 0.794 + AR@0.5: 0.939 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnext/resnext101_coco_384x288-f5eabcd6_20200727.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext152_8xb32-210e_coco-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_resnext152_8xb32-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.73 + AP@0.5: 0.903 + AP@0.75: 0.808 + AR: 0.785 + AR@0.5: 0.94 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnext/resnext152_coco_256x192-102449aa_20200727.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext152_8xb48-210e_coco-384x288.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_resnext152_8xb48-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.742 + AP@0.5: 0.904 + AP@0.75: 0.81 + AR: 0.794 + AR@0.5: 0.94 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnext/resnext152_coco_384x288-806176df_20200727.pth diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/rsn_coco.md b/configs/body_2d_keypoint/topdown_heatmap/coco/rsn_coco.md index 40f570c3c1..b5470d1ef0 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/rsn_coco.md +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/rsn_coco.md @@ -1,44 +1,44 @@ - - -
-RSN (ECCV'2020) - -```bibtex -@misc{cai2020learning, - title={Learning Delicate Local Representations for Multi-Person Pose Estimation}, - author={Yuanhao Cai and Zhicheng Wang and Zhengxiong Luo and Binyi Yin and Angang Du and Haoqian Wang and Xinyu Zhou and Erjin Zhou and Xiangyu Zhang and Jian Sun}, - year={2020}, - eprint={2003.04030}, - archivePrefix={arXiv}, - primaryClass={cs.CV} -} -``` - -
- - - -
-COCO (ECCV'2014) - -```bibtex -@inproceedings{lin2014microsoft, - title={Microsoft coco: Common objects in context}, - author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, - booktitle={European conference on computer vision}, - pages={740--755}, - year={2014}, - organization={Springer} -} -``` - -
- -Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset - -| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | -| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | -| [rsn_18](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_rsn18_8xb32-210e_coco-256x192.py) | 256x192 | 0.704 | 0.887 | 0.781 | 0.773 | 0.927 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_rsn18_8xb32-210e_coco-256x192-9049ed09_20221013.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_rsn18_8xb32-210e_coco-256x192_20221013.log) | -| [rsn_50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_rsn50_8xb32-210e_coco-256x192.py) | 256x192 | 0.724 | 0.894 | 0.799 | 0.790 | 0.935 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_rsn50_8xb32-210e_coco-256x192-c35901d5_20221013.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_rsn50_8xb32-210e_coco-256x192_20221013.log) | -| [2xrsn_50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_2xrsn50_8xb32-210e_coco-256x192.py) | 256x192 | 0.748 | 0.900 | 0.821 | 0.810 | 0.939 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_2xrsn50_8xb32-210e_coco-256x192-9ede341e_20221013.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_2xrsn50_8xb32-210e_coco-256x192_20221013.log) | -| [3xrsn_50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_3xrsn50_8xb32-210e_coco-256x192.py) | 256x192 | 0.750 | 0.900 | 0.824 | 0.814 | 0.941 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_3xrsn50_8xb32-210e_coco-256x192-c3e3c4fe_20221013.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_3xrsn50_8xb32-210e_coco-256x192_20221013.log) | + + +
+RSN (ECCV'2020) + +```bibtex +@misc{cai2020learning, + title={Learning Delicate Local Representations for Multi-Person Pose Estimation}, + author={Yuanhao Cai and Zhicheng Wang and Zhengxiong Luo and Binyi Yin and Angang Du and Haoqian Wang and Xinyu Zhou and Erjin Zhou and Xiangyu Zhang and Jian Sun}, + year={2020}, + eprint={2003.04030}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [rsn_18](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_rsn18_8xb32-210e_coco-256x192.py) | 256x192 | 0.704 | 0.887 | 0.781 | 0.773 | 0.927 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_rsn18_8xb32-210e_coco-256x192-9049ed09_20221013.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_rsn18_8xb32-210e_coco-256x192_20221013.log) | +| [rsn_50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_rsn50_8xb32-210e_coco-256x192.py) | 256x192 | 0.724 | 0.894 | 0.799 | 0.790 | 0.935 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_rsn50_8xb32-210e_coco-256x192-c35901d5_20221013.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_rsn50_8xb32-210e_coco-256x192_20221013.log) | +| [2xrsn_50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_2xrsn50_8xb32-210e_coco-256x192.py) | 256x192 | 0.748 | 0.900 | 0.821 | 0.810 | 0.939 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_2xrsn50_8xb32-210e_coco-256x192-9ede341e_20221013.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_2xrsn50_8xb32-210e_coco-256x192_20221013.log) | +| [3xrsn_50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_3xrsn50_8xb32-210e_coco-256x192.py) | 256x192 | 0.750 | 0.900 | 0.824 | 0.814 | 0.941 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_3xrsn50_8xb32-210e_coco-256x192-c3e3c4fe_20221013.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_3xrsn50_8xb32-210e_coco-256x192_20221013.log) | diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/rsn_coco.yml b/configs/body_2d_keypoint/topdown_heatmap/coco/rsn_coco.yml index 2974aaf2c0..9ef71e189a 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/rsn_coco.yml +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/rsn_coco.yml @@ -1,72 +1,72 @@ -Collections: -- Name: RSN - Paper: - Title: Learning Delicate Local Representations for Multi-Person Pose Estimation - URL: https://link.springer.com/chapter/10.1007/978-3-030-58580-8_27 - README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/backbones/rsn.md -Models: -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_rsn18_8xb32-210e_coco-256x192.py - In Collection: RSN - Metadata: - Architecture: &id001 - - RSN - Training Data: COCO - Name: td-hm_rsn18_8xb32-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.704 - AP@0.5: 0.887 - AP@0.75: 0.781 - AR: 0.773 - AR@0.5: 0.927 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_rsn18_8xb32-210e_coco-256x192-9049ed09_20221013.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_rsn50_8xb32-210e_coco-256x192.py - In Collection: RSN - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_rsn50_8xb32-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.724 - AP@0.5: 0.894 - AP@0.75: 0.799 - AR: 0.79 - AR@0.5: 0.935 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_rsn50_8xb32-210e_coco-256x192-c35901d5_20221013.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_2xrsn50_8xb32-210e_coco-256x192.py - In Collection: RSN - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_2xrsn50_8xb32-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.748 - AP@0.5: 0.9 - AP@0.75: 0.821 - AR: 0.81 - AR@0.5: 0.939 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_2xrsn50_8xb32-210e_coco-256x192-9ede341e_20221013.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_3xrsn50_8xb32-210e_coco-256x192.py - In Collection: RSN - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_3xrsn50_8xb32-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.75 - AP@0.5: 0.9 - AP@0.75: 0.824 - AR: 0.814 - AR@0.5: 0.941 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_3xrsn50_8xb32-210e_coco-256x192-c3e3c4fe_20221013.pth +Collections: +- Name: RSN + Paper: + Title: Learning Delicate Local Representations for Multi-Person Pose Estimation + URL: https://link.springer.com/chapter/10.1007/978-3-030-58580-8_27 + README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/backbones/rsn.md +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_rsn18_8xb32-210e_coco-256x192.py + In Collection: RSN + Metadata: + Architecture: &id001 + - RSN + Training Data: COCO + Name: td-hm_rsn18_8xb32-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.704 + AP@0.5: 0.887 + AP@0.75: 0.781 + AR: 0.773 + AR@0.5: 0.927 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_rsn18_8xb32-210e_coco-256x192-9049ed09_20221013.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_rsn50_8xb32-210e_coco-256x192.py + In Collection: RSN + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_rsn50_8xb32-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.724 + AP@0.5: 0.894 + AP@0.75: 0.799 + AR: 0.79 + AR@0.5: 0.935 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_rsn50_8xb32-210e_coco-256x192-c35901d5_20221013.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_2xrsn50_8xb32-210e_coco-256x192.py + In Collection: RSN + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_2xrsn50_8xb32-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.748 + AP@0.5: 0.9 + AP@0.75: 0.821 + AR: 0.81 + AR@0.5: 0.939 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_2xrsn50_8xb32-210e_coco-256x192-9ede341e_20221013.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_3xrsn50_8xb32-210e_coco-256x192.py + In Collection: RSN + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_3xrsn50_8xb32-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.75 + AP@0.5: 0.9 + AP@0.75: 0.824 + AR: 0.814 + AR@0.5: 0.941 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_3xrsn50_8xb32-210e_coco-256x192-c3e3c4fe_20221013.pth diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/scnet_coco.md b/configs/body_2d_keypoint/topdown_heatmap/coco/scnet_coco.md index 5fb5833e23..c02ef7da77 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/scnet_coco.md +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/scnet_coco.md @@ -1,43 +1,43 @@ - - -
-SCNet (CVPR'2020) - -```bibtex -@inproceedings{liu2020improving, - title={Improving Convolutional Networks with Self-Calibrated Convolutions}, - author={Liu, Jiang-Jiang and Hou, Qibin and Cheng, Ming-Ming and Wang, Changhu and Feng, Jiashi}, - booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, - pages={10096--10105}, - year={2020} -} -``` - -
- - - -
-COCO (ECCV'2014) - -```bibtex -@inproceedings{lin2014microsoft, - title={Microsoft coco: Common objects in context}, - author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, - booktitle={European conference on computer vision}, - pages={740--755}, - year={2014}, - organization={Springer} -} -``` - -
- -Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset - -| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | -| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | -| [pose_scnet_50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_scnet50_8xb64-210e_coco-256x192.py) | 256x192 | 0.728 | 0.899 | 0.807 | 0.784 | 0.938 | [ckpt](https://download.openmmlab.com/mmpose/top_down/scnet/scnet50_coco_256x192-6920f829_20200709.pth) | [log](https://download.openmmlab.com/mmpose/top_down/scnet/scnet50_coco_256x192_20200709.log.json) | -| [pose_scnet_50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_scnet50_8xb32-210e_coco-384x288.py) | 384x288 | 0.751 | 0.906 | 0.818 | 0.802 | 0.942 | [ckpt](https://download.openmmlab.com/mmpose/top_down/scnet/scnet50_coco_384x288-9cacd0ea_20200709.pth) | [log](https://download.openmmlab.com/mmpose/top_down/scnet/scnet50_coco_384x288_20200709.log.json) | -| [pose_scnet_101](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_scnet101_8xb32-210e_coco-256x192.py) | 256x192 | 0.733 | 0.902 | 0.811 | 0.789 | 0.940 | [ckpt](https://download.openmmlab.com/mmpose/top_down/scnet/scnet101_coco_256x192-6d348ef9_20200709.pth) | [log](https://download.openmmlab.com/mmpose/top_down/scnet/scnet101_coco_256x192_20200709.log.json) | -| [pose_scnet_101](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_scnet101_8xb48-210e_coco-384x288.py) | 384x288 | 0.752 | 0.906 | 0.823 | 0.804 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/top_down/scnet/scnet101_coco_384x288-0b6e631b_20200709.pth) | [log](https://download.openmmlab.com/mmpose/top_down/scnet/scnet101_coco_384x288_20200709.log.json) | + + +
+SCNet (CVPR'2020) + +```bibtex +@inproceedings{liu2020improving, + title={Improving Convolutional Networks with Self-Calibrated Convolutions}, + author={Liu, Jiang-Jiang and Hou, Qibin and Cheng, Ming-Ming and Wang, Changhu and Feng, Jiashi}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={10096--10105}, + year={2020} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [pose_scnet_50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_scnet50_8xb64-210e_coco-256x192.py) | 256x192 | 0.728 | 0.899 | 0.807 | 0.784 | 0.938 | [ckpt](https://download.openmmlab.com/mmpose/top_down/scnet/scnet50_coco_256x192-6920f829_20200709.pth) | [log](https://download.openmmlab.com/mmpose/top_down/scnet/scnet50_coco_256x192_20200709.log.json) | +| [pose_scnet_50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_scnet50_8xb32-210e_coco-384x288.py) | 384x288 | 0.751 | 0.906 | 0.818 | 0.802 | 0.942 | [ckpt](https://download.openmmlab.com/mmpose/top_down/scnet/scnet50_coco_384x288-9cacd0ea_20200709.pth) | [log](https://download.openmmlab.com/mmpose/top_down/scnet/scnet50_coco_384x288_20200709.log.json) | +| [pose_scnet_101](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_scnet101_8xb32-210e_coco-256x192.py) | 256x192 | 0.733 | 0.902 | 0.811 | 0.789 | 0.940 | [ckpt](https://download.openmmlab.com/mmpose/top_down/scnet/scnet101_coco_256x192-6d348ef9_20200709.pth) | [log](https://download.openmmlab.com/mmpose/top_down/scnet/scnet101_coco_256x192_20200709.log.json) | +| [pose_scnet_101](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_scnet101_8xb48-210e_coco-384x288.py) | 384x288 | 0.752 | 0.906 | 0.823 | 0.804 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/top_down/scnet/scnet101_coco_384x288-0b6e631b_20200709.pth) | [log](https://download.openmmlab.com/mmpose/top_down/scnet/scnet101_coco_384x288_20200709.log.json) | diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/scnet_coco.yml b/configs/body_2d_keypoint/topdown_heatmap/coco/scnet_coco.yml index cf68c67f90..33d1f99d03 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/scnet_coco.yml +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/scnet_coco.yml @@ -1,66 +1,66 @@ -Models: -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_scnet50_8xb64-210e_coco-256x192.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: &id001 - - SCNet - Training Data: COCO - Name: td-hm_scnet50_8xb64-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.728 - AP@0.5: 0.899 - AP@0.75: 0.807 - AR: 0.784 - AR@0.5: 0.938 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/scnet/scnet50_coco_256x192-6920f829_20200709.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_scnet50_8xb32-210e_coco-384x288.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: COCO - Name: topdown_heatmap_scnet50_coco_384x288 - Results: - - Dataset: COCO - Metrics: - AP: 0.751 - AP@0.5: 0.906 - AP@0.75: 0.818 - AR: 0.802 - AR@0.5: 0.942 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/scnet/scnet50_coco_384x288-9cacd0ea_20200709.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_scnet101_8xb32-210e_coco-256x192.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_scnet101_8xb32-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.733 - AP@0.5: 0.902 - AP@0.75: 0.811 - AR: 0.789 - AR@0.5: 0.94 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/scnet/scnet101_coco_256x192-6d348ef9_20200709.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_scnet101_8xb48-210e_coco-384x288.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_scnet101_8xb48-210e_coco-384x288 - Results: - - Dataset: COCO - Metrics: - AP: 0.752 - AP@0.5: 0.906 - AP@0.75: 0.823 - AR: 0.804 - AR@0.5: 0.943 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/scnet/scnet101_coco_384x288-0b6e631b_20200709.pth +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_scnet50_8xb64-210e_coco-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: &id001 + - SCNet + Training Data: COCO + Name: td-hm_scnet50_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.728 + AP@0.5: 0.899 + AP@0.75: 0.807 + AR: 0.784 + AR@0.5: 0.938 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/scnet/scnet50_coco_256x192-6920f829_20200709.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_scnet50_8xb32-210e_coco-384x288.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: topdown_heatmap_scnet50_coco_384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.751 + AP@0.5: 0.906 + AP@0.75: 0.818 + AR: 0.802 + AR@0.5: 0.942 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/scnet/scnet50_coco_384x288-9cacd0ea_20200709.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_scnet101_8xb32-210e_coco-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_scnet101_8xb32-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.733 + AP@0.5: 0.902 + AP@0.75: 0.811 + AR: 0.789 + AR@0.5: 0.94 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/scnet/scnet101_coco_256x192-6d348ef9_20200709.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_scnet101_8xb48-210e_coco-384x288.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_scnet101_8xb48-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.752 + AP@0.5: 0.906 + AP@0.75: 0.823 + AR: 0.804 + AR@0.5: 0.943 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/scnet/scnet101_coco_384x288-0b6e631b_20200709.pth diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/seresnet_coco.md b/configs/body_2d_keypoint/topdown_heatmap/coco/seresnet_coco.md index b704d9d190..f08f1f38bf 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/seresnet_coco.md +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/seresnet_coco.md @@ -1,47 +1,47 @@ - - -
-SEResNet (CVPR'2018) - -```bibtex -@inproceedings{hu2018squeeze, - title={Squeeze-and-excitation networks}, - author={Hu, Jie and Shen, Li and Sun, Gang}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={7132--7141}, - year={2018} -} -``` - -
- - - -
-COCO (ECCV'2014) - -```bibtex -@inproceedings{lin2014microsoft, - title={Microsoft coco: Common objects in context}, - author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, - booktitle={European conference on computer vision}, - pages={740--755}, - year={2014}, - organization={Springer} -} -``` - -
- -Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset - -| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | -| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | -| [pose_seresnet_50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet50_8xb64-210e_coco-256x192.py) | 256x192 | 0.729 | 0.903 | 0.807 | 0.784 | 0.941 | [ckpt](https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet50_coco_256x192-25058b66_20200727.pth) | [log](https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet50_coco_256x192_20200727.log.json) | -| [pose_seresnet_50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet50_8xb64-210e_coco-384x288.py) | 384x288 | 0.748 | 0.904 | 0.819 | 0.799 | 0.941 | [ckpt](https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet50_coco_384x288-bc0b7680_20200727.pth) | [log](https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet50_coco_384x288_20200727.log.json) | -| [pose_seresnet_101](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet101_8xb64-210e_coco-256x192.py) | 256x192 | 0.734 | 0.905 | 0.814 | 0.790 | 0.941 | [ckpt](https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet101_coco_256x192-83f29c4d_20200727.pth) | [log](https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet101_coco_256x192_20200727.log.json) | -| [pose_seresnet_101](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet101_8xb32-210e_coco-384x288.py) | 384x288 | 0.754 | 0.907 | 0.823 | 0.805 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet101_coco_384x288-48de1709_20200727.pth) | [log](https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet101_coco_384x288_20200727.log.json) | -| [pose_seresnet_152\*](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet152_8xb32-210e_coco-256x192.py) | 256x192 | 0.730 | 0.899 | 0.810 | 0.787 | 0.939 | [ckpt](https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet152_coco_256x192-1c628d79_20200727.pth) | [log](https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet152_coco_256x192_20200727.log.json) | -| [pose_seresnet_152\*](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet152_8xb48-210e_coco-384x288.py) | 384x288 | 0.753 | 0.906 | 0.824 | 0.806 | 0.945 | [ckpt](https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet152_coco_384x288-58b23ee8_20200727.pth) | [log](https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet152_coco_384x288_20200727.log.json) | - -Note that * means without imagenet pre-training. + + +
+SEResNet (CVPR'2018) + +```bibtex +@inproceedings{hu2018squeeze, + title={Squeeze-and-excitation networks}, + author={Hu, Jie and Shen, Li and Sun, Gang}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={7132--7141}, + year={2018} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [pose_seresnet_50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet50_8xb64-210e_coco-256x192.py) | 256x192 | 0.729 | 0.903 | 0.807 | 0.784 | 0.941 | [ckpt](https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet50_coco_256x192-25058b66_20200727.pth) | [log](https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet50_coco_256x192_20200727.log.json) | +| [pose_seresnet_50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet50_8xb64-210e_coco-384x288.py) | 384x288 | 0.748 | 0.904 | 0.819 | 0.799 | 0.941 | [ckpt](https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet50_coco_384x288-bc0b7680_20200727.pth) | [log](https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet50_coco_384x288_20200727.log.json) | +| [pose_seresnet_101](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet101_8xb64-210e_coco-256x192.py) | 256x192 | 0.734 | 0.905 | 0.814 | 0.790 | 0.941 | [ckpt](https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet101_coco_256x192-83f29c4d_20200727.pth) | [log](https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet101_coco_256x192_20200727.log.json) | +| [pose_seresnet_101](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet101_8xb32-210e_coco-384x288.py) | 384x288 | 0.754 | 0.907 | 0.823 | 0.805 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet101_coco_384x288-48de1709_20200727.pth) | [log](https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet101_coco_384x288_20200727.log.json) | +| [pose_seresnet_152\*](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet152_8xb32-210e_coco-256x192.py) | 256x192 | 0.730 | 0.899 | 0.810 | 0.787 | 0.939 | [ckpt](https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet152_coco_256x192-1c628d79_20200727.pth) | [log](https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet152_coco_256x192_20200727.log.json) | +| [pose_seresnet_152\*](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet152_8xb48-210e_coco-384x288.py) | 384x288 | 0.753 | 0.906 | 0.824 | 0.806 | 0.945 | [ckpt](https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet152_coco_384x288-58b23ee8_20200727.pth) | [log](https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet152_coco_384x288_20200727.log.json) | + +Note that * means without imagenet pre-training. diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/seresnet_coco.yml b/configs/body_2d_keypoint/topdown_heatmap/coco/seresnet_coco.yml index 945e84e223..3a4f04ae6c 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/seresnet_coco.yml +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/seresnet_coco.yml @@ -1,98 +1,98 @@ -Models: -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet50_8xb64-210e_coco-256x192.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: &id001 - - SEResNet - Training Data: COCO - Name: td-hm_seresnet50_8xb64-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.729 - AP@0.5: 0.903 - AP@0.75: 0.807 - AR: 0.784 - AR@0.5: 0.941 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet50_coco_256x192-25058b66_20200727.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet50_8xb64-210e_coco-384x288.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_seresnet50_8xb64-210e_coco-384x288 - Results: - - Dataset: COCO - Metrics: - AP: 0.748 - AP@0.5: 0.904 - AP@0.75: 0.819 - AR: 0.799 - AR@0.5: 0.941 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet50_coco_384x288-bc0b7680_20200727.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet101_8xb64-210e_coco-256x192.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_seresnet101_8xb64-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.734 - AP@0.5: 0.905 - AP@0.75: 0.814 - AR: 0.79 - AR@0.5: 0.941 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet101_coco_256x192-83f29c4d_20200727.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet101_8xb32-210e_coco-384x288.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_seresnet101_8xb32-210e_coco-384x288 - Results: - - Dataset: COCO - Metrics: - AP: 0.754 - AP@0.5: 0.907 - AP@0.75: 0.823 - AR: 0.805 - AR@0.5: 0.943 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet101_coco_384x288-48de1709_20200727.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet152_8xb32-210e_coco-256x192.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_seresnet152_8xb32-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.73 - AP@0.5: 0.899 - AP@0.75: 0.81 - AR: 0.787 - AR@0.5: 0.939 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet152_coco_256x192-1c628d79_20200727.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet152_8xb48-210e_coco-384x288.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_seresnet152_8xb48-210e_coco-384x288 - Results: - - Dataset: COCO - Metrics: - AP: 0.753 - AP@0.5: 0.906 - AP@0.75: 0.824 - AR: 0.806 - AR@0.5: 0.945 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet152_coco_384x288-58b23ee8_20200727.pth +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet50_8xb64-210e_coco-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: &id001 + - SEResNet + Training Data: COCO + Name: td-hm_seresnet50_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.729 + AP@0.5: 0.903 + AP@0.75: 0.807 + AR: 0.784 + AR@0.5: 0.941 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet50_coco_256x192-25058b66_20200727.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet50_8xb64-210e_coco-384x288.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_seresnet50_8xb64-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.748 + AP@0.5: 0.904 + AP@0.75: 0.819 + AR: 0.799 + AR@0.5: 0.941 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet50_coco_384x288-bc0b7680_20200727.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet101_8xb64-210e_coco-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_seresnet101_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.734 + AP@0.5: 0.905 + AP@0.75: 0.814 + AR: 0.79 + AR@0.5: 0.941 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet101_coco_256x192-83f29c4d_20200727.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet101_8xb32-210e_coco-384x288.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_seresnet101_8xb32-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.754 + AP@0.5: 0.907 + AP@0.75: 0.823 + AR: 0.805 + AR@0.5: 0.943 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet101_coco_384x288-48de1709_20200727.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet152_8xb32-210e_coco-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_seresnet152_8xb32-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.73 + AP@0.5: 0.899 + AP@0.75: 0.81 + AR: 0.787 + AR@0.5: 0.939 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet152_coco_256x192-1c628d79_20200727.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet152_8xb48-210e_coco-384x288.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_seresnet152_8xb48-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.753 + AP@0.5: 0.906 + AP@0.75: 0.824 + AR: 0.806 + AR@0.5: 0.945 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet152_coco_384x288-58b23ee8_20200727.pth diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/shufflenetv1_coco.md b/configs/body_2d_keypoint/topdown_heatmap/coco/shufflenetv1_coco.md index 0c8be860ab..d33188929e 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/shufflenetv1_coco.md +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/shufflenetv1_coco.md @@ -1,41 +1,41 @@ - - -
-ShufflenetV1 (CVPR'2018) - -```bibtex -@inproceedings{zhang2018shufflenet, - title={Shufflenet: An extremely efficient convolutional neural network for mobile devices}, - author={Zhang, Xiangyu and Zhou, Xinyu and Lin, Mengxiao and Sun, Jian}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={6848--6856}, - year={2018} -} -``` - -
- - - -
-COCO (ECCV'2014) - -```bibtex -@inproceedings{lin2014microsoft, - title={Microsoft coco: Common objects in context}, - author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, - booktitle={European conference on computer vision}, - pages={740--755}, - year={2014}, - organization={Springer} -} -``` - -
- -Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset - -| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | -| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | -| [pose_shufflenetv1](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv1_8xb64-210e_coco-256x192.py) | 256x192 | 0.587 | 0.849 | 0.654 | 0.654 | 0.896 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv1_8xb64-210e_coco-256x192-7a7ea4f4_20221013.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv1_8xb64-210e_coco-256x192_20221013.log) | -| [pose_shufflenetv1](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv1_8xb64-210e_coco-384x288.py) | 384x288 | 0.626 | 0.862 | 0.696 | 0.687 | 0.903 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv1_8xb64-210e_coco-384x288-8342f8ba_20221013.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv1_8xb64-210e_coco-384x288_20221013.log) | + + +
+ShufflenetV1 (CVPR'2018) + +```bibtex +@inproceedings{zhang2018shufflenet, + title={Shufflenet: An extremely efficient convolutional neural network for mobile devices}, + author={Zhang, Xiangyu and Zhou, Xinyu and Lin, Mengxiao and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={6848--6856}, + year={2018} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [pose_shufflenetv1](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv1_8xb64-210e_coco-256x192.py) | 256x192 | 0.587 | 0.849 | 0.654 | 0.654 | 0.896 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv1_8xb64-210e_coco-256x192-7a7ea4f4_20221013.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv1_8xb64-210e_coco-256x192_20221013.log) | +| [pose_shufflenetv1](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv1_8xb64-210e_coco-384x288.py) | 384x288 | 0.626 | 0.862 | 0.696 | 0.687 | 0.903 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv1_8xb64-210e_coco-384x288-8342f8ba_20221013.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv1_8xb64-210e_coco-384x288_20221013.log) | diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/shufflenetv1_coco.yml b/configs/body_2d_keypoint/topdown_heatmap/coco/shufflenetv1_coco.yml index fbdc89936d..c20a130f8b 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/shufflenetv1_coco.yml +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/shufflenetv1_coco.yml @@ -1,35 +1,35 @@ -Models: -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv1_8xb64-210e_coco-256x192.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: &id001 - - SimpleBaseline2D - - ShufflenetV1 - Training Data: COCO - Name: td-hm_shufflenetv1_8xb64-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.587 - AP@0.5: 0.849 - AP@0.75: 0.654 - AR: 0.654 - AR@0.5: 0.896 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv1_8xb64-210e_coco-256x192-7a7ea4f4_20221013.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv1_8xb64-210e_coco-384x288.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_shufflenetv1_8xb64-210e_coco-384x288 - Results: - - Dataset: COCO - Metrics: - AP: 0.626 - AP@0.5: 0.862 - AP@0.75: 0.696 - AR: 0.687 - AR@0.5: 0.903 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv1_8xb64-210e_coco-384x288-8342f8ba_20221013.pth +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv1_8xb64-210e_coco-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: &id001 + - SimpleBaseline2D + - ShufflenetV1 + Training Data: COCO + Name: td-hm_shufflenetv1_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.587 + AP@0.5: 0.849 + AP@0.75: 0.654 + AR: 0.654 + AR@0.5: 0.896 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv1_8xb64-210e_coco-256x192-7a7ea4f4_20221013.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv1_8xb64-210e_coco-384x288.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_shufflenetv1_8xb64-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.626 + AP@0.5: 0.862 + AP@0.75: 0.696 + AR: 0.687 + AR@0.5: 0.903 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv1_8xb64-210e_coco-384x288-8342f8ba_20221013.pth diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/shufflenetv2_coco.md b/configs/body_2d_keypoint/topdown_heatmap/coco/shufflenetv2_coco.md index f613f4fef1..3c80e76e0b 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/shufflenetv2_coco.md +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/shufflenetv2_coco.md @@ -1,41 +1,41 @@ - - -
-ShufflenetV2 (ECCV'2018) - -```bibtex -@inproceedings{ma2018shufflenet, - title={Shufflenet v2: Practical guidelines for efficient cnn architecture design}, - author={Ma, Ningning and Zhang, Xiangyu and Zheng, Hai-Tao and Sun, Jian}, - booktitle={Proceedings of the European conference on computer vision (ECCV)}, - pages={116--131}, - year={2018} -} -``` - -
- - - -
-COCO (ECCV'2014) - -```bibtex -@inproceedings{lin2014microsoft, - title={Microsoft coco: Common objects in context}, - author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, - booktitle={European conference on computer vision}, - pages={740--755}, - year={2014}, - organization={Springer} -} -``` - -
- -Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset - -| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | -| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | -| [pose_shufflenetv2](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv2_8xb64-210e_coco-256x192.py) | 256x192 | 0.602 | 0.857 | 0.672 | 0.668 | 0.902 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv2_8xb64-210e_coco-256x192-51fb931e_20221014.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv2_8xb64-210e_coco-256x192_20221014.log) | -| [pose_shufflenetv2](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv2_8xb64-210e_coco-384x288.py) | 384x288 | 0.638 | 0.866 | 0.707 | 0.699 | 0.910 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv2_8xb64-210e_coco-384x288-d30ab55c_20221014.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv2_8xb64-210e_coco-384x288_20221014.log) | + + +
+ShufflenetV2 (ECCV'2018) + +```bibtex +@inproceedings{ma2018shufflenet, + title={Shufflenet v2: Practical guidelines for efficient cnn architecture design}, + author={Ma, Ningning and Zhang, Xiangyu and Zheng, Hai-Tao and Sun, Jian}, + booktitle={Proceedings of the European conference on computer vision (ECCV)}, + pages={116--131}, + year={2018} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [pose_shufflenetv2](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv2_8xb64-210e_coco-256x192.py) | 256x192 | 0.602 | 0.857 | 0.672 | 0.668 | 0.902 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv2_8xb64-210e_coco-256x192-51fb931e_20221014.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv2_8xb64-210e_coco-256x192_20221014.log) | +| [pose_shufflenetv2](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv2_8xb64-210e_coco-384x288.py) | 384x288 | 0.638 | 0.866 | 0.707 | 0.699 | 0.910 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv2_8xb64-210e_coco-384x288-d30ab55c_20221014.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv2_8xb64-210e_coco-384x288_20221014.log) | diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/shufflenetv2_coco.yml b/configs/body_2d_keypoint/topdown_heatmap/coco/shufflenetv2_coco.yml index cdda3a8667..3c87873434 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/shufflenetv2_coco.yml +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/shufflenetv2_coco.yml @@ -1,35 +1,35 @@ -Models: -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv2_8xb64-210e_coco-256x192.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: &id001 - - SimpleBaseline2D - - ShufflenetV2 - Training Data: COCO - Name: td-hm_shufflenetv2_8xb64-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.602 - AP@0.5: 0.857 - AP@0.75: 0.672 - AR: 0.668 - AR@0.5: 0.902 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv2_8xb64-210e_coco-256x192-51fb931e_20221014.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv2_8xb64-210e_coco-384x288.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_shufflenetv2_8xb64-210e_coco-384x288 - Results: - - Dataset: COCO - Metrics: - AP: 0.638 - AP@0.5: 0.866 - AP@0.75: 0.707 - AR: 0.699 - AR@0.5: 0.91 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv2_8xb64-210e_coco-384x288-d30ab55c_20221014.pth +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv2_8xb64-210e_coco-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: &id001 + - SimpleBaseline2D + - ShufflenetV2 + Training Data: COCO + Name: td-hm_shufflenetv2_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.602 + AP@0.5: 0.857 + AP@0.75: 0.672 + AR: 0.668 + AR@0.5: 0.902 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv2_8xb64-210e_coco-256x192-51fb931e_20221014.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv2_8xb64-210e_coco-384x288.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_shufflenetv2_8xb64-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.638 + AP@0.5: 0.866 + AP@0.75: 0.707 + AR: 0.699 + AR@0.5: 0.91 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv2_8xb64-210e_coco-384x288-d30ab55c_20221014.pth diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/swin_coco.md b/configs/body_2d_keypoint/topdown_heatmap/coco/swin_coco.md index 5bcc5bd187..0d142cefac 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/swin_coco.md +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/swin_coco.md @@ -1,78 +1,78 @@ - - -
-SimpleBaseline2D (ECCV'2018) - -```bibtex -@inproceedings{xiao2018simple, - title={Simple baselines for human pose estimation and tracking}, - author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, - booktitle={Proceedings of the European conference on computer vision (ECCV)}, - pages={466--481}, - year={2018} -} -``` - -
- - - -
-Swin (ICCV'2021) - -```bibtex -@inproceedings{liu2021swin, - title={Swin transformer: Hierarchical vision transformer using shifted windows}, - author={Liu, Ze and Lin, Yutong and Cao, Yue and Hu, Han and Wei, Yixuan and Zhang, Zheng and Lin, Stephen and Guo, Baining}, - booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision}, - pages={10012--10022}, - year={2021} -} -``` - -
- - - -
-FPN (CVPR'2017) - -```bibtex -@inproceedings{lin2017feature, - title={Feature pyramid networks for object detection}, - author={Lin, Tsung-Yi and Doll{\'a}r, Piotr and Girshick, Ross and He, Kaiming and Hariharan, Bharath and Belongie, Serge}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={2117--2125}, - year={2017} -} -``` - -
- - - -
-COCO (ECCV'2014) - -```bibtex -@inproceedings{lin2014microsoft, - title={Microsoft coco: Common objects in context}, - author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, - booktitle={European conference on computer vision}, - pages={740--755}, - year={2014}, - organization={Springer} -} -``` - -
- -Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset - -| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | -| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | -| [pose_swin_t](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-t-p4-w7_8xb32-210e_coco-256x192.py) | 256x192 | 0.724 | 0.901 | 0.806 | 0.782 | 0.940 | [ckpt](https://download.openmmlab.com/mmpose/top_down/swin/swin_t_p4_w7_coco_256x192-eaefe010_20220503.pth) | [log](https://download.openmmlab.com/mmpose/top_down/swin/swin_t_p4_w7_coco_256x192_20220503.log.json) | -| [pose_swin_b](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-b-p4-w7_8xb32-210e_coco-256x192.py) | 256x192 | 0.737 | 0.904 | 0.820 | 0.794 | 0.942 | [ckpt](https://download.openmmlab.com/mmpose/top_down/swin/swin_b_p4_w7_coco_256x192-7432be9e_20220705.pth) | [log](https://download.openmmlab.com/mmpose/top_down/swin/swin_b_p4_w7_coco_256x192_20220705.log.json) | -| [pose_swin_b](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-b-p4-w7_8xb32-210e_coco-384x288.py) | 384x288 | 0.759 | 0.910 | 0.832 | 0.811 | 0.946 | [ckpt](https://download.openmmlab.com/mmpose/top_down/swin/swin_b_p4_w7_coco_384x288-3abf54f9_20220705.pth) | [log](https://download.openmmlab.com/mmpose/top_down/swin/swin_b_p4_w7_coco_384x288_20220705.log.json) | -| [pose_swin_l](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-l-p4-w7_8xb32-210e_coco-256x192.py) | 256x192 | 0.743 | 0.906 | 0.821 | 0.798 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/top_down/swin/swin_l_p4_w7_coco_256x192-642a89db_20220705.pth) | [log](https://download.openmmlab.com/mmpose/top_down/swin/swin_l_p4_w7_coco_256x192_20220705.log.json) | -| [pose_swin_l](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-l-p4-w7_8xb32-210e_coco-384x288.py) | 384x288 | 0.763 | 0.912 | 0.830 | 0.814 | 0.949 | [ckpt](https://download.openmmlab.com/mmpose/top_down/swin/swin_l_p4_w7_coco_384x288-c36b7845_20220705.pth) | [log](https://download.openmmlab.com/mmpose/top_down/swin/swin_l_p4_w7_coco_384x288_20220705.log.json) | + + +
+SimpleBaseline2D (ECCV'2018) + +```bibtex +@inproceedings{xiao2018simple, + title={Simple baselines for human pose estimation and tracking}, + author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, + booktitle={Proceedings of the European conference on computer vision (ECCV)}, + pages={466--481}, + year={2018} +} +``` + +
+ + + +
+Swin (ICCV'2021) + +```bibtex +@inproceedings{liu2021swin, + title={Swin transformer: Hierarchical vision transformer using shifted windows}, + author={Liu, Ze and Lin, Yutong and Cao, Yue and Hu, Han and Wei, Yixuan and Zhang, Zheng and Lin, Stephen and Guo, Baining}, + booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision}, + pages={10012--10022}, + year={2021} +} +``` + +
+ + + +
+FPN (CVPR'2017) + +```bibtex +@inproceedings{lin2017feature, + title={Feature pyramid networks for object detection}, + author={Lin, Tsung-Yi and Doll{\'a}r, Piotr and Girshick, Ross and He, Kaiming and Hariharan, Bharath and Belongie, Serge}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={2117--2125}, + year={2017} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [pose_swin_t](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-t-p4-w7_8xb32-210e_coco-256x192.py) | 256x192 | 0.724 | 0.901 | 0.806 | 0.782 | 0.940 | [ckpt](https://download.openmmlab.com/mmpose/top_down/swin/swin_t_p4_w7_coco_256x192-eaefe010_20220503.pth) | [log](https://download.openmmlab.com/mmpose/top_down/swin/swin_t_p4_w7_coco_256x192_20220503.log.json) | +| [pose_swin_b](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-b-p4-w7_8xb32-210e_coco-256x192.py) | 256x192 | 0.737 | 0.904 | 0.820 | 0.794 | 0.942 | [ckpt](https://download.openmmlab.com/mmpose/top_down/swin/swin_b_p4_w7_coco_256x192-7432be9e_20220705.pth) | [log](https://download.openmmlab.com/mmpose/top_down/swin/swin_b_p4_w7_coco_256x192_20220705.log.json) | +| [pose_swin_b](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-b-p4-w7_8xb32-210e_coco-384x288.py) | 384x288 | 0.759 | 0.910 | 0.832 | 0.811 | 0.946 | [ckpt](https://download.openmmlab.com/mmpose/top_down/swin/swin_b_p4_w7_coco_384x288-3abf54f9_20220705.pth) | [log](https://download.openmmlab.com/mmpose/top_down/swin/swin_b_p4_w7_coco_384x288_20220705.log.json) | +| [pose_swin_l](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-l-p4-w7_8xb32-210e_coco-256x192.py) | 256x192 | 0.743 | 0.906 | 0.821 | 0.798 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/top_down/swin/swin_l_p4_w7_coco_256x192-642a89db_20220705.pth) | [log](https://download.openmmlab.com/mmpose/top_down/swin/swin_l_p4_w7_coco_256x192_20220705.log.json) | +| [pose_swin_l](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-l-p4-w7_8xb32-210e_coco-384x288.py) | 384x288 | 0.763 | 0.912 | 0.830 | 0.814 | 0.949 | [ckpt](https://download.openmmlab.com/mmpose/top_down/swin/swin_l_p4_w7_coco_384x288-c36b7845_20220705.pth) | [log](https://download.openmmlab.com/mmpose/top_down/swin/swin_l_p4_w7_coco_384x288_20220705.log.json) | diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/swin_coco.yml b/configs/body_2d_keypoint/topdown_heatmap/coco/swin_coco.yml index 09ede5fa5c..569993ed43 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/swin_coco.yml +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/swin_coco.yml @@ -1,99 +1,99 @@ -Models: -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-t-p4-w7_8xb32-210e_coco-256x192.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: &id001 - - SimpleBaseline2D - - Swin - Training Data: COCO - Name: td-hm_swin-t-p4-w7_8xb32-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.724 - AP@0.5: 0.901 - AP@0.75: 0.806 - AR: 0.782 - AR@0.5: 0.94 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/swin/swin_t_p4_w7_coco_256x192-eaefe010_20220503.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-b-p4-w7_8xb32-210e_coco-256x192.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_swin-b-p4-w7_8xb32-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.737 - AP@0.5: 0.904 - AP@0.75: 0.82 - AR: 0.794 - AR@0.5: 0.942 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/swin/swin_b_p4_w7_coco_256x192-7432be9e_20220705.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-b-p4-w7_8xb32-210e_coco-384x288.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_swin-b-p4-w7_8xb32-210e_coco-384x288 - Results: - - Dataset: COCO - Metrics: - AP: 0.759 - AP@0.5: 0.91 - AP@0.75: 0.832 - AR: 0.811 - AR@0.5: 0.946 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/swin/swin_b_p4_w7_coco_384x288-3abf54f9_20220705.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-l-p4-w7_8xb32-210e_coco-256x192.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_swin-l-p4-w7_8xb32-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.743 - AP@0.5: 0.906 - AP@0.75: 0.821 - AR: 0.798 - AR@0.5: 0.943 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/swin/swin_l_p4_w7_coco_256x192-642a89db_20220705.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-l-p4-w7_8xb32-210e_coco-384x288.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_swin-l-p4-w7_8xb32-210e_coco-384x288 - Results: - - Dataset: COCO - Metrics: - AP: 0.763 - AP@0.5: 0.912 - AP@0.75: 0.83 - AR: 0.814 - AR@0.5: 0.949 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/swin/swin_l_p4_w7_coco_384x288-c36b7845_20220705.pth -- Config: configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/coco/swin_b_p4_w7_fpn_coco_256x192.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: COCO - Name: topdown_heatmap_swin_b_p4_w7_fpn_coco_256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.741 - AP@0.5: 0.907 - AP@0.75: 0.821 - AR: 0.798 - AR@0.5: 0.946 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/swin/swin_b_p4_w7_fpn_coco_256x192-a3b91c45_20220705.pth +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-t-p4-w7_8xb32-210e_coco-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: &id001 + - SimpleBaseline2D + - Swin + Training Data: COCO + Name: td-hm_swin-t-p4-w7_8xb32-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.724 + AP@0.5: 0.901 + AP@0.75: 0.806 + AR: 0.782 + AR@0.5: 0.94 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/swin/swin_t_p4_w7_coco_256x192-eaefe010_20220503.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-b-p4-w7_8xb32-210e_coco-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_swin-b-p4-w7_8xb32-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.737 + AP@0.5: 0.904 + AP@0.75: 0.82 + AR: 0.794 + AR@0.5: 0.942 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/swin/swin_b_p4_w7_coco_256x192-7432be9e_20220705.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-b-p4-w7_8xb32-210e_coco-384x288.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_swin-b-p4-w7_8xb32-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.759 + AP@0.5: 0.91 + AP@0.75: 0.832 + AR: 0.811 + AR@0.5: 0.946 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/swin/swin_b_p4_w7_coco_384x288-3abf54f9_20220705.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-l-p4-w7_8xb32-210e_coco-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_swin-l-p4-w7_8xb32-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.743 + AP@0.5: 0.906 + AP@0.75: 0.821 + AR: 0.798 + AR@0.5: 0.943 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/swin/swin_l_p4_w7_coco_256x192-642a89db_20220705.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-l-p4-w7_8xb32-210e_coco-384x288.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_swin-l-p4-w7_8xb32-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.763 + AP@0.5: 0.912 + AP@0.75: 0.83 + AR: 0.814 + AR@0.5: 0.949 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/swin/swin_l_p4_w7_coco_384x288-c36b7845_20220705.pth +- Config: configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/coco/swin_b_p4_w7_fpn_coco_256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO + Name: topdown_heatmap_swin_b_p4_w7_fpn_coco_256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.741 + AP@0.5: 0.907 + AP@0.75: 0.821 + AR: 0.798 + AR@0.5: 0.946 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/swin/swin_b_p4_w7_fpn_coco_256x192-a3b91c45_20220705.pth diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_2xmspn50_8xb32-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_2xmspn50_8xb32-210e_coco-256x192.py index 7af125c24d..131a4feb98 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_2xmspn50_8xb32-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_2xmspn50_8xb32-210e_coco-256x192.py @@ -1,152 +1,152 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-3, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=256) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -# multiple kernel_sizes of heatmap gaussian for 'Megvii' approach. -kernel_sizes = [15, 11, 9, 7, 5] -codec = [ - dict( - type='MegviiHeatmap', - input_size=(192, 256), - heatmap_size=(48, 64), - kernel_size=kernel_size) for kernel_size in kernel_sizes -] - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='MSPN', - unit_channels=256, - num_stages=2, - num_units=4, - num_blocks=[3, 4, 6, 3], - norm_cfg=dict(type='BN'), - init_cfg=dict( - type='Pretrained', - checkpoint='torchvision://resnet50', - )), - head=dict( - type='MSPNHead', - out_shape=(64, 48), - unit_channels=256, - out_channels=17, - num_stages=2, - num_units=4, - norm_cfg=dict(type='BN'), - # each sub list is for a stage - # and each element in each list is for a unit - level_indices=[0, 1, 2, 3] + [1, 2, 3, 4], - loss=([ - dict( - type='KeypointMSELoss', - use_target_weight=True, - loss_weight=0.25) - ] * 3 + [ - dict( - type='KeypointOHKMMSELoss', - use_target_weight=True, - loss_weight=1.) - ]) * 2, - decoder=codec[-1]), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=False, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec[0]['input_size']), - dict(type='GenerateTarget', multilevel=True, encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec[0]['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=4, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=4, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json', - nms_mode='none') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +# multiple kernel_sizes of heatmap gaussian for 'Megvii' approach. +kernel_sizes = [15, 11, 9, 7, 5] +codec = [ + dict( + type='MegviiHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + kernel_size=kernel_size) for kernel_size in kernel_sizes +] + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='MSPN', + unit_channels=256, + num_stages=2, + num_units=4, + num_blocks=[3, 4, 6, 3], + norm_cfg=dict(type='BN'), + init_cfg=dict( + type='Pretrained', + checkpoint='torchvision://resnet50', + )), + head=dict( + type='MSPNHead', + out_shape=(64, 48), + unit_channels=256, + out_channels=17, + num_stages=2, + num_units=4, + norm_cfg=dict(type='BN'), + # each sub list is for a stage + # and each element in each list is for a unit + level_indices=[0, 1, 2, 3] + [1, 2, 3, 4], + loss=([ + dict( + type='KeypointMSELoss', + use_target_weight=True, + loss_weight=0.25) + ] * 3 + [ + dict( + type='KeypointOHKMMSELoss', + use_target_weight=True, + loss_weight=1.) + ]) * 2, + decoder=codec[-1]), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec[0]['input_size']), + dict(type='GenerateTarget', multilevel=True, encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec[0]['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json', + nms_mode='none') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_2xrsn50_8xb32-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_2xrsn50_8xb32-210e_coco-256x192.py index 0680f6995e..0eb4a7165a 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_2xrsn50_8xb32-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_2xrsn50_8xb32-210e_coco-256x192.py @@ -1,154 +1,154 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-3, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=256) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -# multiple kernel_sizes of heatmap gaussian for 'Megvii' approach. -kernel_sizes = [15, 11, 9, 7, 5] -codec = [ - dict( - type='MegviiHeatmap', - input_size=(192, 256), - heatmap_size=(48, 64), - kernel_size=kernel_size) for kernel_size in kernel_sizes -] - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='RSN', - unit_channels=256, - num_stages=2, - num_units=4, - num_blocks=[3, 4, 6, 3], - num_steps=4, - norm_cfg=dict(type='BN'), - ), - head=dict( - type='MSPNHead', - out_shape=(64, 48), - unit_channels=256, - out_channels=17, - num_stages=2, - num_units=4, - norm_cfg=dict(type='BN'), - # each sub list is for a stage - # and each element in each list is for a unit - level_indices=[0, 1, 2, 3] + [1, 2, 3, 4], - loss=([ - dict( - type='KeypointMSELoss', - use_target_weight=True, - loss_weight=0.25) - ] * 3 + [ - dict( - type='KeypointOHKMMSELoss', - use_target_weight=True, - loss_weight=1.) - ]) * 2, - decoder=codec[-1]), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=False, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec[0]['input_size']), - dict(type='GenerateTarget', multilevel=True, encoder=codec), - dict(type='PackPoseInputs') -] - -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec[0]['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=4, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=4, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json', - nms_mode='none') -test_evaluator = val_evaluator - -# fp16 settings -fp16 = dict(loss_scale='dynamic') +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +# multiple kernel_sizes of heatmap gaussian for 'Megvii' approach. +kernel_sizes = [15, 11, 9, 7, 5] +codec = [ + dict( + type='MegviiHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + kernel_size=kernel_size) for kernel_size in kernel_sizes +] + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='RSN', + unit_channels=256, + num_stages=2, + num_units=4, + num_blocks=[3, 4, 6, 3], + num_steps=4, + norm_cfg=dict(type='BN'), + ), + head=dict( + type='MSPNHead', + out_shape=(64, 48), + unit_channels=256, + out_channels=17, + num_stages=2, + num_units=4, + norm_cfg=dict(type='BN'), + # each sub list is for a stage + # and each element in each list is for a unit + level_indices=[0, 1, 2, 3] + [1, 2, 3, 4], + loss=([ + dict( + type='KeypointMSELoss', + use_target_weight=True, + loss_weight=0.25) + ] * 3 + [ + dict( + type='KeypointOHKMMSELoss', + use_target_weight=True, + loss_weight=1.) + ]) * 2, + decoder=codec[-1]), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec[0]['input_size']), + dict(type='GenerateTarget', multilevel=True, encoder=codec), + dict(type='PackPoseInputs') +] + +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec[0]['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json', + nms_mode='none') +test_evaluator = val_evaluator + +# fp16 settings +fp16 = dict(loss_scale='dynamic') diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_3xmspn50_8xb32-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_3xmspn50_8xb32-210e_coco-256x192.py index 41162f01e5..0d3020dacb 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_3xmspn50_8xb32-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_3xmspn50_8xb32-210e_coco-256x192.py @@ -1,152 +1,152 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-3, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=256) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -# multiple kernel_sizes of heatmap gaussian for 'Megvii' approach. -kernel_sizes = [15, 11, 9, 7, 5] -codec = [ - dict( - type='MegviiHeatmap', - input_size=(192, 256), - heatmap_size=(48, 64), - kernel_size=kernel_size) for kernel_size in kernel_sizes -] - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='MSPN', - unit_channels=256, - num_stages=3, - num_units=4, - num_blocks=[3, 4, 6, 3], - norm_cfg=dict(type='BN'), - init_cfg=dict( - type='Pretrained', - checkpoint='torchvision://resnet50', - )), - head=dict( - type='MSPNHead', - out_shape=(64, 48), - unit_channels=256, - out_channels=17, - num_stages=3, - num_units=4, - norm_cfg=dict(type='BN'), - # each sub list is for a stage - # and each element in each list is for a unit - level_indices=[0, 1, 2, 3] * 2 + [1, 2, 3, 4], - loss=([ - dict( - type='KeypointMSELoss', - use_target_weight=True, - loss_weight=0.25) - ] * 3 + [ - dict( - type='KeypointOHKMMSELoss', - use_target_weight=True, - loss_weight=1.) - ]) * 3, - decoder=codec[-1]), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=False, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec[0]['input_size']), - dict(type='GenerateTarget', multilevel=True, encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec[0]['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=4, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=4, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json', - nms_mode='none') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +# multiple kernel_sizes of heatmap gaussian for 'Megvii' approach. +kernel_sizes = [15, 11, 9, 7, 5] +codec = [ + dict( + type='MegviiHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + kernel_size=kernel_size) for kernel_size in kernel_sizes +] + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='MSPN', + unit_channels=256, + num_stages=3, + num_units=4, + num_blocks=[3, 4, 6, 3], + norm_cfg=dict(type='BN'), + init_cfg=dict( + type='Pretrained', + checkpoint='torchvision://resnet50', + )), + head=dict( + type='MSPNHead', + out_shape=(64, 48), + unit_channels=256, + out_channels=17, + num_stages=3, + num_units=4, + norm_cfg=dict(type='BN'), + # each sub list is for a stage + # and each element in each list is for a unit + level_indices=[0, 1, 2, 3] * 2 + [1, 2, 3, 4], + loss=([ + dict( + type='KeypointMSELoss', + use_target_weight=True, + loss_weight=0.25) + ] * 3 + [ + dict( + type='KeypointOHKMMSELoss', + use_target_weight=True, + loss_weight=1.) + ]) * 3, + decoder=codec[-1]), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec[0]['input_size']), + dict(type='GenerateTarget', multilevel=True, encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec[0]['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json', + nms_mode='none') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_3xrsn50_8xb32-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_3xrsn50_8xb32-210e_coco-256x192.py index 99326451c6..afc35be7b5 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_3xrsn50_8xb32-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_3xrsn50_8xb32-210e_coco-256x192.py @@ -1,154 +1,154 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-3, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=256) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -# multiple kernel_sizes of heatmap gaussian for 'Megvii' approach. -kernel_sizes = [15, 11, 9, 7, 5] -codec = [ - dict( - type='MegviiHeatmap', - input_size=(192, 256), - heatmap_size=(48, 64), - kernel_size=kernel_size) for kernel_size in kernel_sizes -] - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='RSN', - unit_channels=256, - num_stages=3, - num_units=4, - num_blocks=[3, 4, 6, 3], - num_steps=4, - norm_cfg=dict(type='BN'), - ), - head=dict( - type='MSPNHead', - out_shape=(64, 48), - unit_channels=256, - out_channels=17, - num_stages=3, - num_units=4, - norm_cfg=dict(type='BN'), - # each sub list is for a stage - # and each element in each list is for a unit - level_indices=[0, 1, 2, 3] * 2 + [1, 2, 3, 4], - loss=([ - dict( - type='KeypointMSELoss', - use_target_weight=True, - loss_weight=0.25) - ] * 3 + [ - dict( - type='KeypointOHKMMSELoss', - use_target_weight=True, - loss_weight=1.) - ]) * 3, - decoder=codec[-1]), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=False, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec[0]['input_size']), - dict(type='GenerateTarget', multilevel=True, encoder=codec), - dict(type='PackPoseInputs') -] - -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec[0]['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=4, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=4, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json', - nms_mode='none') -test_evaluator = val_evaluator - -# fp16 settings -fp16 = dict(loss_scale='dynamic') +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +# multiple kernel_sizes of heatmap gaussian for 'Megvii' approach. +kernel_sizes = [15, 11, 9, 7, 5] +codec = [ + dict( + type='MegviiHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + kernel_size=kernel_size) for kernel_size in kernel_sizes +] + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='RSN', + unit_channels=256, + num_stages=3, + num_units=4, + num_blocks=[3, 4, 6, 3], + num_steps=4, + norm_cfg=dict(type='BN'), + ), + head=dict( + type='MSPNHead', + out_shape=(64, 48), + unit_channels=256, + out_channels=17, + num_stages=3, + num_units=4, + norm_cfg=dict(type='BN'), + # each sub list is for a stage + # and each element in each list is for a unit + level_indices=[0, 1, 2, 3] * 2 + [1, 2, 3, 4], + loss=([ + dict( + type='KeypointMSELoss', + use_target_weight=True, + loss_weight=0.25) + ] * 3 + [ + dict( + type='KeypointOHKMMSELoss', + use_target_weight=True, + loss_weight=1.) + ]) * 3, + decoder=codec[-1]), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec[0]['input_size']), + dict(type='GenerateTarget', multilevel=True, encoder=codec), + dict(type='PackPoseInputs') +] + +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec[0]['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json', + nms_mode='none') +test_evaluator = val_evaluator + +# fp16 settings +fp16 = dict(loss_scale='dynamic') diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_4xmspn50_8xb32-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_4xmspn50_8xb32-210e_coco-256x192.py index 999245e74d..a3870f4828 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_4xmspn50_8xb32-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_4xmspn50_8xb32-210e_coco-256x192.py @@ -1,152 +1,152 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-3, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=256) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -# multiple kernel_sizes of heatmap gaussian for 'Megvii' approach. -kernel_sizes = [15, 11, 9, 7, 5] -codec = [ - dict( - type='MegviiHeatmap', - input_size=(192, 256), - heatmap_size=(48, 64), - kernel_size=kernel_size) for kernel_size in kernel_sizes -] - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='MSPN', - unit_channels=256, - num_stages=4, - num_units=4, - num_blocks=[3, 4, 6, 3], - norm_cfg=dict(type='BN'), - init_cfg=dict( - type='Pretrained', - checkpoint='torchvision://resnet50', - )), - head=dict( - type='MSPNHead', - out_shape=(64, 48), - unit_channels=256, - out_channels=17, - num_stages=4, - num_units=4, - norm_cfg=dict(type='BN'), - # each sub list is for a stage - # and each element in each list is for a unit - level_indices=[0, 1, 2, 3] * 3 + [1, 2, 3, 4], - loss=([ - dict( - type='KeypointMSELoss', - use_target_weight=True, - loss_weight=0.25) - ] * 3 + [ - dict( - type='KeypointOHKMMSELoss', - use_target_weight=True, - loss_weight=1.) - ]) * 4, - decoder=codec[-1]), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=False, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec[0]['input_size']), - dict(type='GenerateTarget', multilevel=True, encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec[0]['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=4, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=4, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json', - nms_mode='none') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +# multiple kernel_sizes of heatmap gaussian for 'Megvii' approach. +kernel_sizes = [15, 11, 9, 7, 5] +codec = [ + dict( + type='MegviiHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + kernel_size=kernel_size) for kernel_size in kernel_sizes +] + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='MSPN', + unit_channels=256, + num_stages=4, + num_units=4, + num_blocks=[3, 4, 6, 3], + norm_cfg=dict(type='BN'), + init_cfg=dict( + type='Pretrained', + checkpoint='torchvision://resnet50', + )), + head=dict( + type='MSPNHead', + out_shape=(64, 48), + unit_channels=256, + out_channels=17, + num_stages=4, + num_units=4, + norm_cfg=dict(type='BN'), + # each sub list is for a stage + # and each element in each list is for a unit + level_indices=[0, 1, 2, 3] * 3 + [1, 2, 3, 4], + loss=([ + dict( + type='KeypointMSELoss', + use_target_weight=True, + loss_weight=0.25) + ] * 3 + [ + dict( + type='KeypointOHKMMSELoss', + use_target_weight=True, + loss_weight=1.) + ]) * 4, + decoder=codec[-1]), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec[0]['input_size']), + dict(type='GenerateTarget', multilevel=True, encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec[0]['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json', + nms_mode='none') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base-simple_8xb64-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base-simple_8xb64-210e_coco-256x192.py index 9732371787..775ba6954f 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base-simple_8xb64-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base-simple_8xb64-210e_coco-256x192.py @@ -1,153 +1,153 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -custom_imports = dict( - imports=['mmpose.engine.optim_wrappers.layer_decay_optim_wrapper'], - allow_failed_imports=False) - -optim_wrapper = dict( - optimizer=dict( - type='AdamW', lr=5e-4, betas=(0.9, 0.999), weight_decay=0.1), - paramwise_cfg=dict( - num_layers=12, - layer_decay_rate=0.75, - custom_keys={ - 'bias': dict(decay_multi=0.0), - 'pos_embed': dict(decay_mult=0.0), - 'relative_position_bias_table': dict(decay_mult=0.0), - 'norm': dict(decay_mult=0.0), - }, - ), - constructor='LayerDecayOptimWrapperConstructor', - clip_grad=dict(max_norm=1., norm_type=2), -) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) - -# codec settings -codec = dict( - type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='mmpretrain.VisionTransformer', - arch='base', - img_size=(256, 192), - patch_size=16, - qkv_bias=True, - drop_path_rate=0.3, - with_cls_token=False, - out_type='featmap', - patch_cfg=dict(padding=2), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'v1/pretrained_models/mae_pretrain_vit_base.pth'), - ), - neck=dict(type='FeatureMapProcessor', scale_factor=4.0, apply_relu=True), - head=dict( - type='HeatmapHead', - in_channels=768, - out_channels=17, - deconv_out_channels=[], - deconv_kernel_sizes=[], - final_layer=dict(kernel_size=3, padding=1), - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec, - ), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=False, - )) - -# base dataset settings -data_root = 'data/coco/' -dataset_type = 'CocoDataset' -data_mode = 'topdown' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=4, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=4, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +custom_imports = dict( + imports=['mmpose.engine.optim_wrappers.layer_decay_optim_wrapper'], + allow_failed_imports=False) + +optim_wrapper = dict( + optimizer=dict( + type='AdamW', lr=5e-4, betas=(0.9, 0.999), weight_decay=0.1), + paramwise_cfg=dict( + num_layers=12, + layer_decay_rate=0.75, + custom_keys={ + 'bias': dict(decay_multi=0.0), + 'pos_embed': dict(decay_mult=0.0), + 'relative_position_bias_table': dict(decay_mult=0.0), + 'norm': dict(decay_mult=0.0), + }, + ), + constructor='LayerDecayOptimWrapperConstructor', + clip_grad=dict(max_norm=1., norm_type=2), +) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='mmpretrain.VisionTransformer', + arch='base', + img_size=(256, 192), + patch_size=16, + qkv_bias=True, + drop_path_rate=0.3, + with_cls_token=False, + out_type='featmap', + patch_cfg=dict(padding=2), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'v1/pretrained_models/mae_pretrain_vit_base.pth'), + ), + neck=dict(type='FeatureMapProcessor', scale_factor=4.0, apply_relu=True), + head=dict( + type='HeatmapHead', + in_channels=768, + out_channels=17, + deconv_out_channels=[], + deconv_kernel_sizes=[], + final_layer=dict(kernel_size=3, padding=1), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec, + ), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +data_root = 'data/coco/' +dataset_type = 'CocoDataset' +data_mode = 'topdown' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base_8xb64-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base_8xb64-210e_coco-256x192.py index fc08c61dff..8740b40e7e 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base_8xb64-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base_8xb64-210e_coco-256x192.py @@ -1,150 +1,150 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -custom_imports = dict( - imports=['mmpose.engine.optim_wrappers.layer_decay_optim_wrapper'], - allow_failed_imports=False) - -optim_wrapper = dict( - optimizer=dict( - type='AdamW', lr=5e-4, betas=(0.9, 0.999), weight_decay=0.1), - paramwise_cfg=dict( - num_layers=12, - layer_decay_rate=0.75, - custom_keys={ - 'bias': dict(decay_multi=0.0), - 'pos_embed': dict(decay_mult=0.0), - 'relative_position_bias_table': dict(decay_mult=0.0), - 'norm': dict(decay_mult=0.0), - }, - ), - constructor='LayerDecayOptimWrapperConstructor', - clip_grad=dict(max_norm=1., norm_type=2), -) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) - -# codec settings -codec = dict( - type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='mmpretrain.VisionTransformer', - arch='base', - img_size=(256, 192), - patch_size=16, - qkv_bias=True, - drop_path_rate=0.3, - with_cls_token=False, - out_type='featmap', - patch_cfg=dict(padding=2), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'v1/pretrained_models/mae_pretrain_vit_base.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=768, - out_channels=17, - deconv_out_channels=(256, 256), - deconv_kernel_sizes=(4, 4), - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=False, - )) - -# base dataset settings -data_root = 'data/coco/' -dataset_type = 'CocoDataset' -data_mode = 'topdown' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=4, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=4, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +custom_imports = dict( + imports=['mmpose.engine.optim_wrappers.layer_decay_optim_wrapper'], + allow_failed_imports=False) + +optim_wrapper = dict( + optimizer=dict( + type='AdamW', lr=5e-4, betas=(0.9, 0.999), weight_decay=0.1), + paramwise_cfg=dict( + num_layers=12, + layer_decay_rate=0.75, + custom_keys={ + 'bias': dict(decay_multi=0.0), + 'pos_embed': dict(decay_mult=0.0), + 'relative_position_bias_table': dict(decay_mult=0.0), + 'norm': dict(decay_mult=0.0), + }, + ), + constructor='LayerDecayOptimWrapperConstructor', + clip_grad=dict(max_norm=1., norm_type=2), +) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='mmpretrain.VisionTransformer', + arch='base', + img_size=(256, 192), + patch_size=16, + qkv_bias=True, + drop_path_rate=0.3, + with_cls_token=False, + out_type='featmap', + patch_cfg=dict(padding=2), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'v1/pretrained_models/mae_pretrain_vit_base.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=768, + out_channels=17, + deconv_out_channels=(256, 256), + deconv_kernel_sizes=(4, 4), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +data_root = 'data/coco/' +dataset_type = 'CocoDataset' +data_mode = 'topdown' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge-simple_8xb64-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge-simple_8xb64-210e_coco-256x192.py index 7d94f97c1b..455580a8d4 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge-simple_8xb64-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge-simple_8xb64-210e_coco-256x192.py @@ -1,153 +1,153 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -custom_imports = dict( - imports=['mmpose.engine.optim_wrappers.layer_decay_optim_wrapper'], - allow_failed_imports=False) - -optim_wrapper = dict( - optimizer=dict( - type='AdamW', lr=5e-4, betas=(0.9, 0.999), weight_decay=0.1), - paramwise_cfg=dict( - num_layers=32, - layer_decay_rate=0.85, - custom_keys={ - 'bias': dict(decay_multi=0.0), - 'pos_embed': dict(decay_mult=0.0), - 'relative_position_bias_table': dict(decay_mult=0.0), - 'norm': dict(decay_mult=0.0), - }, - ), - constructor='LayerDecayOptimWrapperConstructor', - clip_grad=dict(max_norm=1., norm_type=2), -) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) - -# codec settings -codec = dict( - type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='mmpretrain.VisionTransformer', - arch='huge', - img_size=(256, 192), - patch_size=16, - qkv_bias=True, - drop_path_rate=0.55, - with_cls_token=False, - out_type='featmap', - patch_cfg=dict(padding=2), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'v1/pretrained_models/mae_pretrain_vit_huge.pth'), - ), - neck=dict(type='FeatureMapProcessor', scale_factor=4.0, apply_relu=True), - head=dict( - type='HeatmapHead', - in_channels=1280, - out_channels=17, - deconv_out_channels=[], - deconv_kernel_sizes=[], - final_layer=dict(kernel_size=3, padding=1), - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec, - ), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=False, - )) - -# base dataset settings -data_root = 'data/coco/' -dataset_type = 'CocoDataset' -data_mode = 'topdown' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=4, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=4, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +custom_imports = dict( + imports=['mmpose.engine.optim_wrappers.layer_decay_optim_wrapper'], + allow_failed_imports=False) + +optim_wrapper = dict( + optimizer=dict( + type='AdamW', lr=5e-4, betas=(0.9, 0.999), weight_decay=0.1), + paramwise_cfg=dict( + num_layers=32, + layer_decay_rate=0.85, + custom_keys={ + 'bias': dict(decay_multi=0.0), + 'pos_embed': dict(decay_mult=0.0), + 'relative_position_bias_table': dict(decay_mult=0.0), + 'norm': dict(decay_mult=0.0), + }, + ), + constructor='LayerDecayOptimWrapperConstructor', + clip_grad=dict(max_norm=1., norm_type=2), +) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='mmpretrain.VisionTransformer', + arch='huge', + img_size=(256, 192), + patch_size=16, + qkv_bias=True, + drop_path_rate=0.55, + with_cls_token=False, + out_type='featmap', + patch_cfg=dict(padding=2), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'v1/pretrained_models/mae_pretrain_vit_huge.pth'), + ), + neck=dict(type='FeatureMapProcessor', scale_factor=4.0, apply_relu=True), + head=dict( + type='HeatmapHead', + in_channels=1280, + out_channels=17, + deconv_out_channels=[], + deconv_kernel_sizes=[], + final_layer=dict(kernel_size=3, padding=1), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec, + ), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +data_root = 'data/coco/' +dataset_type = 'CocoDataset' +data_mode = 'topdown' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge_8xb64-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge_8xb64-210e_coco-256x192.py index 4aa2c21c1f..27b4e6f450 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge_8xb64-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge_8xb64-210e_coco-256x192.py @@ -1,150 +1,150 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -custom_imports = dict( - imports=['mmpose.engine.optim_wrappers.layer_decay_optim_wrapper'], - allow_failed_imports=False) - -optim_wrapper = dict( - optimizer=dict( - type='AdamW', lr=5e-4, betas=(0.9, 0.999), weight_decay=0.1), - paramwise_cfg=dict( - num_layers=32, - layer_decay_rate=0.85, - custom_keys={ - 'bias': dict(decay_multi=0.0), - 'pos_embed': dict(decay_mult=0.0), - 'relative_position_bias_table': dict(decay_mult=0.0), - 'norm': dict(decay_mult=0.0), - }, - ), - constructor='LayerDecayOptimWrapperConstructor', - clip_grad=dict(max_norm=1., norm_type=2), -) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) - -# codec settings -codec = dict( - type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='mmpretrain.VisionTransformer', - arch='huge', - img_size=(256, 192), - patch_size=16, - qkv_bias=True, - drop_path_rate=0.55, - with_cls_token=False, - out_type='featmap', - patch_cfg=dict(padding=2), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'v1/pretrained_models/mae_pretrain_vit_huge.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=1280, - out_channels=17, - deconv_out_channels=(256, 256), - deconv_kernel_sizes=(4, 4), - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=False, - )) - -# base dataset settings -data_root = 'data/coco/' -dataset_type = 'CocoDataset' -data_mode = 'topdown' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=4, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=4, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +custom_imports = dict( + imports=['mmpose.engine.optim_wrappers.layer_decay_optim_wrapper'], + allow_failed_imports=False) + +optim_wrapper = dict( + optimizer=dict( + type='AdamW', lr=5e-4, betas=(0.9, 0.999), weight_decay=0.1), + paramwise_cfg=dict( + num_layers=32, + layer_decay_rate=0.85, + custom_keys={ + 'bias': dict(decay_multi=0.0), + 'pos_embed': dict(decay_mult=0.0), + 'relative_position_bias_table': dict(decay_mult=0.0), + 'norm': dict(decay_mult=0.0), + }, + ), + constructor='LayerDecayOptimWrapperConstructor', + clip_grad=dict(max_norm=1., norm_type=2), +) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='mmpretrain.VisionTransformer', + arch='huge', + img_size=(256, 192), + patch_size=16, + qkv_bias=True, + drop_path_rate=0.55, + with_cls_token=False, + out_type='featmap', + patch_cfg=dict(padding=2), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'v1/pretrained_models/mae_pretrain_vit_huge.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=1280, + out_channels=17, + deconv_out_channels=(256, 256), + deconv_kernel_sizes=(4, 4), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +data_root = 'data/coco/' +dataset_type = 'CocoDataset' +data_mode = 'topdown' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large-simple_8xb64-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large-simple_8xb64-210e_coco-256x192.py index cf875d5167..0bd6652d71 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large-simple_8xb64-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large-simple_8xb64-210e_coco-256x192.py @@ -1,153 +1,153 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -custom_imports = dict( - imports=['mmpose.engine.optim_wrappers.layer_decay_optim_wrapper'], - allow_failed_imports=False) - -optim_wrapper = dict( - optimizer=dict( - type='AdamW', lr=5e-4, betas=(0.9, 0.999), weight_decay=0.1), - paramwise_cfg=dict( - num_layers=24, - layer_decay_rate=0.8, - custom_keys={ - 'bias': dict(decay_multi=0.0), - 'pos_embed': dict(decay_mult=0.0), - 'relative_position_bias_table': dict(decay_mult=0.0), - 'norm': dict(decay_mult=0.0), - }, - ), - constructor='LayerDecayOptimWrapperConstructor', - clip_grad=dict(max_norm=1., norm_type=2), -) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) - -# codec settings -codec = dict( - type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='mmpretrain.VisionTransformer', - arch='large', - img_size=(256, 192), - patch_size=16, - qkv_bias=True, - drop_path_rate=0.5, - with_cls_token=False, - out_type='featmap', - patch_cfg=dict(padding=2), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'v1/pretrained_models/mae_pretrain_vit_large.pth'), - ), - neck=dict(type='FeatureMapProcessor', scale_factor=4.0, apply_relu=True), - head=dict( - type='HeatmapHead', - in_channels=1024, - out_channels=17, - deconv_out_channels=[], - deconv_kernel_sizes=[], - final_layer=dict(kernel_size=3, padding=1), - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec, - ), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=False, - )) - -# base dataset settings -data_root = 'data/coco/' -dataset_type = 'CocoDataset' -data_mode = 'topdown' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=4, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=4, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +custom_imports = dict( + imports=['mmpose.engine.optim_wrappers.layer_decay_optim_wrapper'], + allow_failed_imports=False) + +optim_wrapper = dict( + optimizer=dict( + type='AdamW', lr=5e-4, betas=(0.9, 0.999), weight_decay=0.1), + paramwise_cfg=dict( + num_layers=24, + layer_decay_rate=0.8, + custom_keys={ + 'bias': dict(decay_multi=0.0), + 'pos_embed': dict(decay_mult=0.0), + 'relative_position_bias_table': dict(decay_mult=0.0), + 'norm': dict(decay_mult=0.0), + }, + ), + constructor='LayerDecayOptimWrapperConstructor', + clip_grad=dict(max_norm=1., norm_type=2), +) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='mmpretrain.VisionTransformer', + arch='large', + img_size=(256, 192), + patch_size=16, + qkv_bias=True, + drop_path_rate=0.5, + with_cls_token=False, + out_type='featmap', + patch_cfg=dict(padding=2), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'v1/pretrained_models/mae_pretrain_vit_large.pth'), + ), + neck=dict(type='FeatureMapProcessor', scale_factor=4.0, apply_relu=True), + head=dict( + type='HeatmapHead', + in_channels=1024, + out_channels=17, + deconv_out_channels=[], + deconv_kernel_sizes=[], + final_layer=dict(kernel_size=3, padding=1), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec, + ), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +data_root = 'data/coco/' +dataset_type = 'CocoDataset' +data_mode = 'topdown' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large_8xb64-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large_8xb64-210e_coco-256x192.py index 5ba6eafb4b..f80f746dc1 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large_8xb64-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large_8xb64-210e_coco-256x192.py @@ -1,150 +1,150 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -custom_imports = dict( - imports=['mmpose.engine.optim_wrappers.layer_decay_optim_wrapper'], - allow_failed_imports=False) - -optim_wrapper = dict( - optimizer=dict( - type='AdamW', lr=5e-4, betas=(0.9, 0.999), weight_decay=0.1), - paramwise_cfg=dict( - num_layers=24, - layer_decay_rate=0.8, - custom_keys={ - 'bias': dict(decay_multi=0.0), - 'pos_embed': dict(decay_mult=0.0), - 'relative_position_bias_table': dict(decay_mult=0.0), - 'norm': dict(decay_mult=0.0), - }, - ), - constructor='LayerDecayOptimWrapperConstructor', - clip_grad=dict(max_norm=1., norm_type=2), -) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) - -# codec settings -codec = dict( - type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='mmpretrain.VisionTransformer', - arch='large', - img_size=(256, 192), - patch_size=16, - qkv_bias=True, - drop_path_rate=0.5, - with_cls_token=False, - out_type='featmap', - patch_cfg=dict(padding=2), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'v1/pretrained_models/mae_pretrain_vit_large.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=1024, - out_channels=17, - deconv_out_channels=(256, 256), - deconv_kernel_sizes=(4, 4), - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=False, - )) - -# base dataset settings -data_root = 'data/coco/' -dataset_type = 'CocoDataset' -data_mode = 'topdown' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=4, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=4, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +custom_imports = dict( + imports=['mmpose.engine.optim_wrappers.layer_decay_optim_wrapper'], + allow_failed_imports=False) + +optim_wrapper = dict( + optimizer=dict( + type='AdamW', lr=5e-4, betas=(0.9, 0.999), weight_decay=0.1), + paramwise_cfg=dict( + num_layers=24, + layer_decay_rate=0.8, + custom_keys={ + 'bias': dict(decay_multi=0.0), + 'pos_embed': dict(decay_mult=0.0), + 'relative_position_bias_table': dict(decay_mult=0.0), + 'norm': dict(decay_mult=0.0), + }, + ), + constructor='LayerDecayOptimWrapperConstructor', + clip_grad=dict(max_norm=1., norm_type=2), +) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='mmpretrain.VisionTransformer', + arch='large', + img_size=(256, 192), + patch_size=16, + qkv_bias=True, + drop_path_rate=0.5, + with_cls_token=False, + out_type='featmap', + patch_cfg=dict(padding=2), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'v1/pretrained_models/mae_pretrain_vit_large.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=1024, + out_channels=17, + deconv_out_channels=(256, 256), + deconv_kernel_sizes=(4, 4), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +data_root = 'data/coco/' +dataset_type = 'CocoDataset' +data_mode = 'topdown' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small-simple_8xb64-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small-simple_8xb64-210e_coco-256x192.py index 88bd3e43e3..62281c0561 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small-simple_8xb64-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small-simple_8xb64-210e_coco-256x192.py @@ -1,158 +1,158 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -custom_imports = dict( - imports=['mmpose.engine.optim_wrappers.layer_decay_optim_wrapper'], - allow_failed_imports=False) - -optim_wrapper = dict( - optimizer=dict( - type='AdamW', lr=5e-4, betas=(0.9, 0.999), weight_decay=0.1), - paramwise_cfg=dict( - num_layers=12, - layer_decay_rate=0.8, - custom_keys={ - 'bias': dict(decay_multi=0.0), - 'pos_embed': dict(decay_mult=0.0), - 'relative_position_bias_table': dict(decay_mult=0.0), - 'norm': dict(decay_mult=0.0), - }, - ), - constructor='LayerDecayOptimWrapperConstructor', - clip_grad=dict(max_norm=1., norm_type=2), -) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) - -# codec settings -codec = dict( - type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='mmpretrain.VisionTransformer', - arch={ - 'embed_dims': 384, - 'num_layers': 12, - 'num_heads': 12, - 'feedforward_channels': 384 * 4 - }, - img_size=(256, 192), - patch_size=16, - qkv_bias=True, - drop_path_rate=0.1, - with_cls_token=False, - out_type='featmap', - patch_cfg=dict(padding=2), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'v1/pretrained_models/mae_pretrain_vit_small.pth'), - ), - neck=dict(type='FeatureMapProcessor', scale_factor=4.0, apply_relu=True), - head=dict( - type='HeatmapHead', - in_channels=384, - out_channels=17, - deconv_out_channels=[], - deconv_kernel_sizes=[], - final_layer=dict(kernel_size=3, padding=1), - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec, - ), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=False, - )) - -# base dataset settings -data_root = 'data/coco/' -dataset_type = 'CocoDataset' -data_mode = 'topdown' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=4, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=4, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +custom_imports = dict( + imports=['mmpose.engine.optim_wrappers.layer_decay_optim_wrapper'], + allow_failed_imports=False) + +optim_wrapper = dict( + optimizer=dict( + type='AdamW', lr=5e-4, betas=(0.9, 0.999), weight_decay=0.1), + paramwise_cfg=dict( + num_layers=12, + layer_decay_rate=0.8, + custom_keys={ + 'bias': dict(decay_multi=0.0), + 'pos_embed': dict(decay_mult=0.0), + 'relative_position_bias_table': dict(decay_mult=0.0), + 'norm': dict(decay_mult=0.0), + }, + ), + constructor='LayerDecayOptimWrapperConstructor', + clip_grad=dict(max_norm=1., norm_type=2), +) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='mmpretrain.VisionTransformer', + arch={ + 'embed_dims': 384, + 'num_layers': 12, + 'num_heads': 12, + 'feedforward_channels': 384 * 4 + }, + img_size=(256, 192), + patch_size=16, + qkv_bias=True, + drop_path_rate=0.1, + with_cls_token=False, + out_type='featmap', + patch_cfg=dict(padding=2), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'v1/pretrained_models/mae_pretrain_vit_small.pth'), + ), + neck=dict(type='FeatureMapProcessor', scale_factor=4.0, apply_relu=True), + head=dict( + type='HeatmapHead', + in_channels=384, + out_channels=17, + deconv_out_channels=[], + deconv_kernel_sizes=[], + final_layer=dict(kernel_size=3, padding=1), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec, + ), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +data_root = 'data/coco/' +dataset_type = 'CocoDataset' +data_mode = 'topdown' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small_8xb64-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small_8xb64-210e_coco-256x192.py index 791f9b5945..527057458f 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small_8xb64-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small_8xb64-210e_coco-256x192.py @@ -1,155 +1,155 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -custom_imports = dict( - imports=['mmpose.engine.optim_wrappers.layer_decay_optim_wrapper'], - allow_failed_imports=False) - -optim_wrapper = dict( - optimizer=dict( - type='AdamW', lr=5e-4, betas=(0.9, 0.999), weight_decay=0.1), - paramwise_cfg=dict( - num_layers=12, - layer_decay_rate=0.8, - custom_keys={ - 'bias': dict(decay_multi=0.0), - 'pos_embed': dict(decay_mult=0.0), - 'relative_position_bias_table': dict(decay_mult=0.0), - 'norm': dict(decay_mult=0.0), - }, - ), - constructor='LayerDecayOptimWrapperConstructor', - clip_grad=dict(max_norm=1., norm_type=2), -) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) - -# codec settings -codec = dict( - type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='mmpretrain.VisionTransformer', - arch={ - 'embed_dims': 384, - 'num_layers': 12, - 'num_heads': 12, - 'feedforward_channels': 384 * 4 - }, - img_size=(256, 192), - patch_size=16, - qkv_bias=True, - drop_path_rate=0.1, - with_cls_token=False, - out_type='featmap', - patch_cfg=dict(padding=2), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'v1/pretrained_models/mae_pretrain_vit_small.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=384, - out_channels=17, - deconv_out_channels=(256, 256), - deconv_kernel_sizes=(4, 4), - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=False, - )) - -# base dataset settings -data_root = 'data/coco/' -dataset_type = 'CocoDataset' -data_mode = 'topdown' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=4, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=4, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +custom_imports = dict( + imports=['mmpose.engine.optim_wrappers.layer_decay_optim_wrapper'], + allow_failed_imports=False) + +optim_wrapper = dict( + optimizer=dict( + type='AdamW', lr=5e-4, betas=(0.9, 0.999), weight_decay=0.1), + paramwise_cfg=dict( + num_layers=12, + layer_decay_rate=0.8, + custom_keys={ + 'bias': dict(decay_multi=0.0), + 'pos_embed': dict(decay_mult=0.0), + 'relative_position_bias_table': dict(decay_mult=0.0), + 'norm': dict(decay_mult=0.0), + }, + ), + constructor='LayerDecayOptimWrapperConstructor', + clip_grad=dict(max_norm=1., norm_type=2), +) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='mmpretrain.VisionTransformer', + arch={ + 'embed_dims': 384, + 'num_layers': 12, + 'num_heads': 12, + 'feedforward_channels': 384 * 4 + }, + img_size=(256, 192), + patch_size=16, + qkv_bias=True, + drop_path_rate=0.1, + with_cls_token=False, + out_type='featmap', + patch_cfg=dict(padding=2), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'v1/pretrained_models/mae_pretrain_vit_small.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=384, + out_channels=17, + deconv_out_channels=(256, 256), + deconv_kernel_sizes=(4, 4), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +data_root = 'data/coco/' +dataset_type = 'CocoDataset' +data_mode = 'topdown' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_alexnet_8xb64-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_alexnet_8xb64-210e_coco-256x192.py index 4051f4c5ec..dcd903fa73 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_alexnet_8xb64-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_alexnet_8xb64-210e_coco-256x192.py @@ -1,117 +1,117 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(40, 56), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict(type='AlexNet', num_classes=-1), - head=dict( - type='HeatmapHead', - in_channels=256, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(40, 56), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict(type='AlexNet', num_classes=-1), + head=dict( + type='HeatmapHead', + in_channels=256, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_cpm_8xb32-210e_coco-384x288.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_cpm_8xb32-210e_coco-384x288.py index 38b23cf718..5d719396bc 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_cpm_8xb32-210e_coco-384x288.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_cpm_8xb32-210e_coco-384x288.py @@ -1,125 +1,125 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(36, 48), sigma=3) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='CPM', - in_channels=3, - out_channels=17, - feat_channels=128, - num_stages=6), - head=dict( - type='CPMHead', - in_channels=17, - out_channels=17, - num_stages=6, - deconv_out_channels=None, - final_layer=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(36, 48), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='CPM', + in_channels=3, + out_channels=17, + feat_channels=128, + num_stages=6), + head=dict( + type='CPMHead', + in_channels=17, + out_channels=17, + num_stages=6, + deconv_out_channels=None, + final_layer=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_cpm_8xb64-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_cpm_8xb64-210e_coco-256x192.py index 17f7eb9677..662a0fe29c 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_cpm_8xb64-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_cpm_8xb64-210e_coco-256x192.py @@ -1,125 +1,125 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(24, 32), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='CPM', - in_channels=3, - out_channels=17, - feat_channels=128, - num_stages=6), - head=dict( - type='CPMHead', - in_channels=17, - out_channels=17, - num_stages=6, - deconv_out_channels=None, - final_layer=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(24, 32), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='CPM', + in_channels=3, + out_channels=17, + feat_channels=128, + num_stages=6), + head=dict( + type='CPMHead', + in_channels=17, + out_channels=17, + num_stages=6, + deconv_out_channels=None, + final_layer=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hourglass52_8xb32-210e_coco-256x256.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hourglass52_8xb32-210e_coco-256x256.py index b9d49c8e6a..b83f3ce0d5 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hourglass52_8xb32-210e_coco-256x256.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hourglass52_8xb32-210e_coco-256x256.py @@ -1,122 +1,122 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HourglassNet', - num_stacks=1, - ), - head=dict( - type='CPMHead', - in_channels=256, - out_channels=17, - num_stages=1, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HourglassNet', + num_stacks=1, + ), + head=dict( + type='CPMHead', + in_channels=256, + out_channels=17, + num_stages=1, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hourglass52_8xb32-210e_coco-384x384.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hourglass52_8xb32-210e_coco-384x384.py index d9932ff9e3..86e35f863b 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hourglass52_8xb32-210e_coco-384x384.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hourglass52_8xb32-210e_coco-384x384.py @@ -1,122 +1,122 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(384, 384), heatmap_size=(96, 96), sigma=3) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HourglassNet', - num_stacks=1, - ), - head=dict( - type='CPMHead', - in_channels=256, - out_channels=17, - num_stages=1, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(384, 384), heatmap_size=(96, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HourglassNet', + num_stacks=1, + ), + head=dict( + type='CPMHead', + in_channels=256, + out_channels=17, + num_stages=1, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrformer-base_8xb32-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrformer-base_8xb32-210e_coco-256x192.py index 8b81dbdaac..6537ad02be 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrformer-base_8xb32-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrformer-base_8xb32-210e_coco-256x192.py @@ -1,174 +1,174 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict( - optimizer=dict( - type='AdamW', - lr=5e-4, - betas=(0.9, 0.999), - weight_decay=0.01, - ), - paramwise_cfg=dict( - custom_keys={'relative_position_bias_table': dict(decay_mult=0.)})) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=256) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRFormer', - in_channels=3, - norm_cfg=norm_cfg, - extra=dict( - drop_path_rate=0.2, - with_rpe=True, - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(2, ), - num_channels=(64, ), - num_heads=[2], - mlp_ratios=[4]), - stage2=dict( - num_modules=1, - num_branches=2, - block='HRFORMERBLOCK', - num_blocks=(2, 2), - num_channels=(78, 156), - num_heads=[2, 4], - mlp_ratios=[4, 4], - window_sizes=[7, 7]), - stage3=dict( - num_modules=4, - num_branches=3, - block='HRFORMERBLOCK', - num_blocks=(2, 2, 2), - num_channels=(78, 156, 312), - num_heads=[2, 4, 8], - mlp_ratios=[4, 4, 4], - window_sizes=[7, 7, 7]), - stage4=dict( - num_modules=2, - num_branches=4, - block='HRFORMERBLOCK', - num_blocks=(2, 2, 2, 2), - num_channels=(78, 156, 312, 624), - num_heads=[2, 4, 8, 16], - mlp_ratios=[4, 4, 4, 4], - window_sizes=[7, 7, 7, 7])), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/hrformer_base-32815020_20220226.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=78, - out_channels=17, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator - -# fp16 settings -fp16 = dict(loss_scale='dynamic') +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict( + optimizer=dict( + type='AdamW', + lr=5e-4, + betas=(0.9, 0.999), + weight_decay=0.01, + ), + paramwise_cfg=dict( + custom_keys={'relative_position_bias_table': dict(decay_mult=0.)})) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRFormer', + in_channels=3, + norm_cfg=norm_cfg, + extra=dict( + drop_path_rate=0.2, + with_rpe=True, + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(2, ), + num_channels=(64, ), + num_heads=[2], + mlp_ratios=[4]), + stage2=dict( + num_modules=1, + num_branches=2, + block='HRFORMERBLOCK', + num_blocks=(2, 2), + num_channels=(78, 156), + num_heads=[2, 4], + mlp_ratios=[4, 4], + window_sizes=[7, 7]), + stage3=dict( + num_modules=4, + num_branches=3, + block='HRFORMERBLOCK', + num_blocks=(2, 2, 2), + num_channels=(78, 156, 312), + num_heads=[2, 4, 8], + mlp_ratios=[4, 4, 4], + window_sizes=[7, 7, 7]), + stage4=dict( + num_modules=2, + num_branches=4, + block='HRFORMERBLOCK', + num_blocks=(2, 2, 2, 2), + num_channels=(78, 156, 312, 624), + num_heads=[2, 4, 8, 16], + mlp_ratios=[4, 4, 4, 4], + window_sizes=[7, 7, 7, 7])), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrformer_base-32815020_20220226.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=78, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator + +# fp16 settings +fp16 = dict(loss_scale='dynamic') diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrformer-base_8xb32-210e_coco-384x288.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrformer-base_8xb32-210e_coco-384x288.py index 351685464c..b055be5f9d 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrformer-base_8xb32-210e_coco-384x288.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrformer-base_8xb32-210e_coco-384x288.py @@ -1,174 +1,174 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict( - optimizer=dict( - type='AdamW', - lr=5e-4, - betas=(0.9, 0.999), - weight_decay=0.01, - ), - paramwise_cfg=dict( - custom_keys={'relative_position_bias_table': dict(decay_mult=0.)})) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=256) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) - -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRFormer', - in_channels=3, - norm_cfg=norm_cfg, - extra=dict( - drop_path_rate=0.2, - with_rpe=True, - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(2, ), - num_channels=(64, ), - num_heads=[2], - mlp_ratios=[4]), - stage2=dict( - num_modules=1, - num_branches=2, - block='HRFORMERBLOCK', - num_blocks=(2, 2), - num_channels=(78, 156), - num_heads=[2, 4], - mlp_ratios=[4, 4], - window_sizes=[7, 7]), - stage3=dict( - num_modules=4, - num_branches=3, - block='HRFORMERBLOCK', - num_blocks=(2, 2, 2), - num_channels=(78, 156, 312), - num_heads=[2, 4, 8], - mlp_ratios=[4, 4, 4], - window_sizes=[7, 7, 7]), - stage4=dict( - num_modules=2, - num_branches=4, - block='HRFORMERBLOCK', - num_blocks=(2, 2, 2, 2), - num_channels=(78, 156, 312, 624), - num_heads=[2, 4, 8, 16], - mlp_ratios=[4, 4, 4, 4], - window_sizes=[7, 7, 7, 7])), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/hrformer_base-32815020_20220226.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=78, - out_channels=17, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator - -# fp16 settings -fp16 = dict(loss_scale='dynamic') +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict( + optimizer=dict( + type='AdamW', + lr=5e-4, + betas=(0.9, 0.999), + weight_decay=0.01, + ), + paramwise_cfg=dict( + custom_keys={'relative_position_bias_table': dict(decay_mult=0.)})) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRFormer', + in_channels=3, + norm_cfg=norm_cfg, + extra=dict( + drop_path_rate=0.2, + with_rpe=True, + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(2, ), + num_channels=(64, ), + num_heads=[2], + mlp_ratios=[4]), + stage2=dict( + num_modules=1, + num_branches=2, + block='HRFORMERBLOCK', + num_blocks=(2, 2), + num_channels=(78, 156), + num_heads=[2, 4], + mlp_ratios=[4, 4], + window_sizes=[7, 7]), + stage3=dict( + num_modules=4, + num_branches=3, + block='HRFORMERBLOCK', + num_blocks=(2, 2, 2), + num_channels=(78, 156, 312), + num_heads=[2, 4, 8], + mlp_ratios=[4, 4, 4], + window_sizes=[7, 7, 7]), + stage4=dict( + num_modules=2, + num_branches=4, + block='HRFORMERBLOCK', + num_blocks=(2, 2, 2, 2), + num_channels=(78, 156, 312, 624), + num_heads=[2, 4, 8, 16], + mlp_ratios=[4, 4, 4, 4], + window_sizes=[7, 7, 7, 7])), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrformer_base-32815020_20220226.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=78, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator + +# fp16 settings +fp16 = dict(loss_scale='dynamic') diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrformer-small_8xb32-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrformer-small_8xb32-210e_coco-256x192.py index 6c59395c8a..e283ae3ac5 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrformer-small_8xb32-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrformer-small_8xb32-210e_coco-256x192.py @@ -1,174 +1,174 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict( - optimizer=dict( - type='AdamW', - lr=5e-4, - betas=(0.9, 0.999), - weight_decay=0.01, - ), - paramwise_cfg=dict( - custom_keys={'relative_position_bias_table': dict(decay_mult=0.)})) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=256) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRFormer', - in_channels=3, - norm_cfg=norm_cfg, - extra=dict( - drop_path_rate=0.1, - with_rpe=True, - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(2, ), - num_channels=(64, ), - num_heads=[2], - num_mlp_ratios=[4]), - stage2=dict( - num_modules=1, - num_branches=2, - block='HRFORMERBLOCK', - num_blocks=(2, 2), - num_channels=(32, 64), - num_heads=[1, 2], - mlp_ratios=[4, 4], - window_sizes=[7, 7]), - stage3=dict( - num_modules=4, - num_branches=3, - block='HRFORMERBLOCK', - num_blocks=(2, 2, 2), - num_channels=(32, 64, 128), - num_heads=[1, 2, 4], - mlp_ratios=[4, 4, 4], - window_sizes=[7, 7, 7]), - stage4=dict( - num_modules=2, - num_branches=4, - block='HRFORMERBLOCK', - num_blocks=(2, 2, 2, 2), - num_channels=(32, 64, 128, 256), - num_heads=[1, 2, 4, 8], - mlp_ratios=[4, 4, 4, 4], - window_sizes=[7, 7, 7, 7])), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/hrformer_small-09516375_20220226.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=32, - out_channels=17, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator - -# fp16 settings -fp16 = dict(loss_scale='dynamic') +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict( + optimizer=dict( + type='AdamW', + lr=5e-4, + betas=(0.9, 0.999), + weight_decay=0.01, + ), + paramwise_cfg=dict( + custom_keys={'relative_position_bias_table': dict(decay_mult=0.)})) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRFormer', + in_channels=3, + norm_cfg=norm_cfg, + extra=dict( + drop_path_rate=0.1, + with_rpe=True, + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(2, ), + num_channels=(64, ), + num_heads=[2], + num_mlp_ratios=[4]), + stage2=dict( + num_modules=1, + num_branches=2, + block='HRFORMERBLOCK', + num_blocks=(2, 2), + num_channels=(32, 64), + num_heads=[1, 2], + mlp_ratios=[4, 4], + window_sizes=[7, 7]), + stage3=dict( + num_modules=4, + num_branches=3, + block='HRFORMERBLOCK', + num_blocks=(2, 2, 2), + num_channels=(32, 64, 128), + num_heads=[1, 2, 4], + mlp_ratios=[4, 4, 4], + window_sizes=[7, 7, 7]), + stage4=dict( + num_modules=2, + num_branches=4, + block='HRFORMERBLOCK', + num_blocks=(2, 2, 2, 2), + num_channels=(32, 64, 128, 256), + num_heads=[1, 2, 4, 8], + mlp_ratios=[4, 4, 4, 4], + window_sizes=[7, 7, 7, 7])), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrformer_small-09516375_20220226.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator + +# fp16 settings +fp16 = dict(loss_scale='dynamic') diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrformer-small_8xb32-210e_coco-384x288.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrformer-small_8xb32-210e_coco-384x288.py index eee3521a7c..323a1681b5 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrformer-small_8xb32-210e_coco-384x288.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrformer-small_8xb32-210e_coco-384x288.py @@ -1,174 +1,174 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict( - optimizer=dict( - type='AdamW', - lr=5e-4, - betas=(0.9, 0.999), - weight_decay=0.01, - ), - paramwise_cfg=dict( - custom_keys={'relative_position_bias_table': dict(decay_mult=0.)})) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=256) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) - -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRFormer', - in_channels=3, - norm_cfg=norm_cfg, - extra=dict( - drop_path_rate=0.1, - with_rpe=True, - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(2, ), - num_channels=(64, ), - num_heads=[2], - num_mlp_ratios=[4]), - stage2=dict( - num_modules=1, - num_branches=2, - block='HRFORMERBLOCK', - num_blocks=(2, 2), - num_channels=(32, 64), - num_heads=[1, 2], - mlp_ratios=[4, 4], - window_sizes=[7, 7]), - stage3=dict( - num_modules=4, - num_branches=3, - block='HRFORMERBLOCK', - num_blocks=(2, 2, 2), - num_channels=(32, 64, 128), - num_heads=[1, 2, 4], - mlp_ratios=[4, 4, 4], - window_sizes=[7, 7, 7]), - stage4=dict( - num_modules=2, - num_branches=4, - block='HRFORMERBLOCK', - num_blocks=(2, 2, 2, 2), - num_channels=(32, 64, 128, 256), - num_heads=[1, 2, 4, 8], - mlp_ratios=[4, 4, 4, 4], - window_sizes=[7, 7, 7, 7])), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/hrformer_small-09516375_20220226.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=32, - out_channels=17, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator - -# fp16 settings -fp16 = dict(loss_scale='dynamic') +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict( + optimizer=dict( + type='AdamW', + lr=5e-4, + betas=(0.9, 0.999), + weight_decay=0.01, + ), + paramwise_cfg=dict( + custom_keys={'relative_position_bias_table': dict(decay_mult=0.)})) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRFormer', + in_channels=3, + norm_cfg=norm_cfg, + extra=dict( + drop_path_rate=0.1, + with_rpe=True, + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(2, ), + num_channels=(64, ), + num_heads=[2], + num_mlp_ratios=[4]), + stage2=dict( + num_modules=1, + num_branches=2, + block='HRFORMERBLOCK', + num_blocks=(2, 2), + num_channels=(32, 64), + num_heads=[1, 2], + mlp_ratios=[4, 4], + window_sizes=[7, 7]), + stage3=dict( + num_modules=4, + num_branches=3, + block='HRFORMERBLOCK', + num_blocks=(2, 2, 2), + num_channels=(32, 64, 128), + num_heads=[1, 2, 4], + mlp_ratios=[4, 4, 4], + window_sizes=[7, 7, 7]), + stage4=dict( + num_modules=2, + num_branches=4, + block='HRFORMERBLOCK', + num_blocks=(2, 2, 2, 2), + num_channels=(32, 64, 128, 256), + num_heads=[1, 2, 4, 8], + mlp_ratios=[4, 4, 4, 4], + window_sizes=[7, 7, 7, 7])), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrformer_small-09516375_20220226.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator + +# fp16 settings +fp16 = dict(loss_scale='dynamic') diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py index ea486d830a..7d89ef5eae 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py @@ -1,150 +1,150 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(32, 64)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(32, 64, 128)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(32, 64, 128, 256))), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/hrnet_w32-36af842e.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=32, - out_channels=17, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-384x288.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-384x288.py index ae15d35ee1..fdb2bae35c 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-384x288.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-384x288.py @@ -1,150 +1,150 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(32, 64)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(32, 64, 128)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(32, 64, 128, 256))), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/hrnet_w32-36af842e.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=32, - out_channels=17, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-combine.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-combine.py index f5d2ed0bfd..8499f40ff6 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-combine.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-combine.py @@ -1,221 +1,221 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=3)) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# keypoint mappings -keypoint_mapping_coco = [ - (0, 0), - (1, 1), - (2, 2), - (3, 3), - (4, 4), - (5, 5), - (6, 6), - (7, 7), - (8, 8), - (9, 9), - (10, 10), - (11, 11), - (12, 12), - (13, 13), - (14, 14), - (15, 15), - (16, 16), -] - -keypoint_mapping_aic = [ - (0, 6), - (1, 8), - (2, 10), - (3, 5), - (4, 7), - (5, 9), - (6, 12), - (7, 14), - (8, 16), - (9, 11), - (10, 13), - (11, 15), - (12, 17), - (13, 18), -] - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - metainfo=dict(from_file='configs/_base_/datasets/coco_aic.py'), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(32, 64)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(32, 64, 128)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(32, 64, 128, 256))), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/hrnet_w32-36af842e.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=32, - out_channels=19, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - output_keypoint_indices=[ - target for _, target in keypoint_mapping_coco - ])) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# train datasets -dataset_coco = dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=19, - mapping=keypoint_mapping_coco) - ], -) - -dataset_aic = dict( - type='AicDataset', - data_root='data/aic/', - data_mode=data_mode, - ann_file='annotations/aic_train.json', - data_prefix=dict(img='ai_challenger_keypoint_train_20170902/' - 'keypoint_train_images_20170902/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=19, - mapping=keypoint_mapping_aic) - ], -) - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type='CombinedDataset', - metainfo=dict(from_file='configs/_base_/datasets/coco_aic.py'), - datasets=[dataset_coco, dataset_aic], - pipeline=train_pipeline, - test_mode=False, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=3)) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# keypoint mappings +keypoint_mapping_coco = [ + (0, 0), + (1, 1), + (2, 2), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +keypoint_mapping_aic = [ + (0, 6), + (1, 8), + (2, 10), + (3, 5), + (4, 7), + (5, 9), + (6, 12), + (7, 14), + (8, 16), + (9, 11), + (10, 13), + (11, 15), + (12, 17), + (13, 18), +] + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + metainfo=dict(from_file='configs/_base_/datasets/coco_aic.py'), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=19, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + output_keypoint_indices=[ + target for _, target in keypoint_mapping_coco + ])) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# train datasets +dataset_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=19, + mapping=keypoint_mapping_coco) + ], +) + +dataset_aic = dict( + type='AicDataset', + data_root='data/aic/', + data_mode=data_mode, + ann_file='annotations/aic_train.json', + data_prefix=dict(img='ai_challenger_keypoint_train_20170902/' + 'keypoint_train_images_20170902/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=19, + mapping=keypoint_mapping_aic) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco_aic.py'), + datasets=[dataset_coco, dataset_aic], + pipeline=train_pipeline, + test_mode=False, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-merge.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-merge.py index 847a40da2f..5ac097ca77 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-merge.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-merge.py @@ -1,187 +1,187 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(32, 64)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(32, 64, 128)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(32, 64, 128, 256))), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/hrnet_w32-36af842e.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=32, - out_channels=17, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# train datasets -dataset_coco = dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=[], -) - -dataset_aic = dict( - type='AicDataset', - data_root='data/aic/', - data_mode=data_mode, - ann_file='annotations/aic_train.json', - data_prefix=dict(img='ai_challenger_keypoint_train_20170902/' - 'keypoint_train_images_20170902/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=17, - mapping=[ - (0, 6), - (1, 8), - (2, 10), - (3, 5), - (4, 7), - (5, 9), - (6, 12), - (7, 14), - (8, 16), - (9, 11), - (10, 13), - (11, 15), - ]) - ], -) - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type='CombinedDataset', - metainfo=dict(from_file='configs/_base_/datasets/coco.py'), - datasets=[dataset_coco, dataset_aic], - pipeline=train_pipeline, - test_mode=False, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# train datasets +dataset_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=[], +) + +dataset_aic = dict( + type='AicDataset', + data_root='data/aic/', + data_mode=data_mode, + ann_file='annotations/aic_train.json', + data_prefix=dict(img='ai_challenger_keypoint_train_20170902/' + 'keypoint_train_images_20170902/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=17, + mapping=[ + (0, 6), + (1, 8), + (2, 10), + (3, 5), + (4, 7), + (5, 9), + (6, 12), + (7, 14), + (8, 16), + (9, 11), + (10, 13), + (11, 15), + ]) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/coco.py'), + datasets=[dataset_coco, dataset_aic], + pipeline=train_pipeline, + test_mode=False, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_coarsedropout-8xb64-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_coarsedropout-8xb64-210e_coco-256x192.py index a3ac0bd589..22a962701e 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_coarsedropout-8xb64-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_coarsedropout-8xb64-210e_coco-256x192.py @@ -1,165 +1,165 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(32, 64)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(32, 64, 128)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(32, 64, 128, 256))), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/' - 'body_2d_keypoint/topdown_heatmap/coco/' - 'td-hm_hrnet-w32_8xb64-210e_coco-256x192-81c58e40_20220909.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=32, - out_channels=17, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict( - type='Albumentation', - transforms=[ - dict( - type='CoarseDropout', - max_holes=8, - max_height=40, - max_width=40, - min_holes=1, - min_height=10, - min_width=10, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/' + 'body_2d_keypoint/topdown_heatmap/coco/' + 'td-hm_hrnet-w32_8xb64-210e_coco-256x192-81c58e40_20220909.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict( + type='Albumentation', + transforms=[ + dict( + type='CoarseDropout', + max_holes=8, + max_height=40, + max_width=40, + min_holes=1, + min_height=10, + min_width=10, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_dark-8xb64-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_dark-8xb64-210e_coco-256x192.py index 7273a0503b..a11db9eafd 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_dark-8xb64-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_dark-8xb64-210e_coco-256x192.py @@ -1,154 +1,154 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', - input_size=(192, 256), - heatmap_size=(48, 64), - sigma=2, - unbiased=True) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(32, 64)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(32, 64, 128)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(32, 64, 128, 256))), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/hrnet_w32-36af842e.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=32, - out_channels=17, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + sigma=2, + unbiased=True) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_dark-8xb64-210e_coco-384x288.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_dark-8xb64-210e_coco-384x288.py index 67b13b8bab..b2bb6e469f 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_dark-8xb64-210e_coco-384x288.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_dark-8xb64-210e_coco-384x288.py @@ -1,154 +1,154 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', - input_size=(288, 384), - heatmap_size=(72, 96), - sigma=3, - unbiased=True) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(32, 64)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(32, 64, 128)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(32, 64, 128, 256))), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/hrnet_w32-36af842e.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=32, - out_channels=17, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', + input_size=(288, 384), + heatmap_size=(72, 96), + sigma=3, + unbiased=True) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_fp16-8xb64-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_fp16-8xb64-210e_coco-256x192.py index 306d0aeb44..50188c5f18 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_fp16-8xb64-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_fp16-8xb64-210e_coco-256x192.py @@ -1,7 +1,7 @@ -_base_ = ['./td-hm_hrnet-w32_8xb64-210e_coco-256x192.py'] - -# fp16 settings -optim_wrapper = dict( - type='AmpOptimWrapper', - loss_scale='dynamic', -) +_base_ = ['./td-hm_hrnet-w32_8xb64-210e_coco-256x192.py'] + +# fp16 settings +optim_wrapper = dict( + type='AmpOptimWrapper', + loss_scale='dynamic', +) diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_gridmask-8xb64-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_gridmask-8xb64-210e_coco-256x192.py index d380ad243d..5157242fb9 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_gridmask-8xb64-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_gridmask-8xb64-210e_coco-256x192.py @@ -1,162 +1,162 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(32, 64)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(32, 64, 128)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(32, 64, 128, 256))), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/' - 'body_2d_keypoint/topdown_heatmap/coco/' - 'td-hm_hrnet-w32_8xb64-210e_coco-256x192-81c58e40_20220909.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=32, - out_channels=17, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict( - type='Albumentation', - transforms=[ - dict( - type='GridDropout', - unit_size_min=10, - unit_size_max=40, - random_offset=True, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/' + 'body_2d_keypoint/topdown_heatmap/coco/' + 'td-hm_hrnet-w32_8xb64-210e_coco-256x192-81c58e40_20220909.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict( + type='Albumentation', + transforms=[ + dict( + type='GridDropout', + unit_size_min=10, + unit_size_max=40, + random_offset=True, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_photometric-8xb64-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_photometric-8xb64-210e_coco-256x192.py index f0bc7486ca..ea82efc8d5 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_photometric-8xb64-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_photometric-8xb64-210e_coco-256x192.py @@ -1,153 +1,153 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(32, 64)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(32, 64, 128)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(32, 64, 128, 256))), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/' - 'body_2d_keypoint/topdown_heatmap/coco/' - 'td-hm_hrnet-w32_8xb64-210e_coco-256x192-81c58e40_20220909.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=32, - out_channels=17, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PhotometricDistortion'), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/' + 'body_2d_keypoint/topdown_heatmap/coco/' + 'td-hm_hrnet-w32_8xb64-210e_coco-256x192-81c58e40_20220909.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PhotometricDistortion'), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-8xb64-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-8xb64-210e_coco-256x192.py index 143a686ef7..54ae3d93c0 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-8xb64-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-8xb64-210e_coco-256x192.py @@ -1,150 +1,150 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(32, 64)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(32, 64, 128)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(32, 64, 128, 256))), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/hrnet_w32-36af842e.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=32, - out_channels=17, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=False, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-8xb64-210e_coco-384x288.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-8xb64-210e_coco-384x288.py index 113a91e18c..3344529344 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-8xb64-210e_coco-384x288.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-8xb64-210e_coco-384x288.py @@ -1,150 +1,150 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='UDPHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(32, 64)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(32, 64, 128)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(32, 64, 128, 256))), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/hrnet_w32-36af842e.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=32, - out_channels=17, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=False, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-regress-8xb64-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-regress-8xb64-210e_coco-256x192.py index d147de838a..f29c8a29d8 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-regress-8xb64-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_udp-regress-8xb64-210e_coco-256x192.py @@ -1,155 +1,155 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='UDPHeatmap', - input_size=(192, 256), - heatmap_size=(48, 64), - sigma=2, - heatmap_type='combined') - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(32, 64)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(32, 64, 128)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(32, 64, 128, 256))), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/hrnet_w32-36af842e.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=32, - out_channels=3 * 17, - deconv_out_channels=None, - loss=dict(type='CombinedTargetMSELoss', use_target_weight=True), - decoder=codec), - train_cfg=dict(compute_acc=False), - test_cfg=dict( - flip_test=True, - flip_mode='udp_combined', - shift_heatmap=False, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='UDPHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + sigma=2, + heatmap_type='combined') + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=3 * 17, + deconv_out_channels=None, + loss=dict(type='CombinedTargetMSELoss', use_target_weight=True), + decoder=codec), + train_cfg=dict(compute_acc=False), + test_cfg=dict( + flip_test=True, + flip_mode='udp_combined', + shift_heatmap=False, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192.py index 1c5ff70ab4..5ddd42160e 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192.py @@ -1,150 +1,150 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(48, 96)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(48, 96, 192)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(48, 96, 192, 384))), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/hrnet_w48-8ef0771d.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=48, - out_channels=17, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(48, 96)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(48, 96, 192)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(48, 96, 192, 384))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w48-8ef0771d.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=48, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-384x288.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-384x288.py index f83b7d31a4..755f236336 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-384x288.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-384x288.py @@ -1,150 +1,150 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(48, 96)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(48, 96, 192)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(48, 96, 192, 384))), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/hrnet_w48-8ef0771d.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=48, - out_channels=17, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(48, 96)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(48, 96, 192)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(48, 96, 192, 384))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w48-8ef0771d.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=48, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_dark-8xb32-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_dark-8xb32-210e_coco-256x192.py index daf3cbaddc..80ffe8a711 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_dark-8xb32-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_dark-8xb32-210e_coco-256x192.py @@ -1,154 +1,154 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', - input_size=(192, 256), - heatmap_size=(48, 64), - sigma=2, - unbiased=True) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(48, 96)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(48, 96, 192)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(48, 96, 192, 384))), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/hrnet_w48-8ef0771d.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=48, - out_channels=17, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + sigma=2, + unbiased=True) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(48, 96)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(48, 96, 192)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(48, 96, 192, 384))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w48-8ef0771d.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=48, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_dark-8xb32-210e_coco-384x288.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_dark-8xb32-210e_coco-384x288.py index eec52999c9..04cd41c010 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_dark-8xb32-210e_coco-384x288.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_dark-8xb32-210e_coco-384x288.py @@ -1,154 +1,154 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', - input_size=(288, 384), - heatmap_size=(72, 96), - sigma=3, - unbiased=True) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(48, 96)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(48, 96, 192)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(48, 96, 192, 384))), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/hrnet_w48-8ef0771d.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=48, - out_channels=17, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', + input_size=(288, 384), + heatmap_size=(72, 96), + sigma=3, + unbiased=True) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(48, 96)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(48, 96, 192)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(48, 96, 192, 384))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w48-8ef0771d.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=48, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_udp-8xb32-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_udp-8xb32-210e_coco-256x192.py index b705cb7fb3..6cd31a794b 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_udp-8xb32-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_udp-8xb32-210e_coco-256x192.py @@ -1,150 +1,150 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(48, 96)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(48, 96, 192)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(48, 96, 192, 384))), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/hrnet_w48-8ef0771d.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=48, - out_channels=17, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=False, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(48, 96)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(48, 96, 192)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(48, 96, 192, 384))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w48-8ef0771d.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=48, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_udp-8xb32-210e_coco-384x288.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_udp-8xb32-210e_coco-384x288.py index cfa17ef098..f9edf38143 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_udp-8xb32-210e_coco-384x288.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_udp-8xb32-210e_coco-384x288.py @@ -1,150 +1,150 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='UDPHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(48, 96)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(48, 96, 192)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(48, 96, 192, 384))), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/hrnet_w48-8ef0771d.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=48, - out_channels=17, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=False, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(48, 96)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(48, 96, 192)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(48, 96, 192, 384))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w48-8ef0771d.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=48, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_litehrnet-18_8xb32-210e_coco-384x288.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_litehrnet-18_8xb32-210e_coco-384x288.py index caa7c267a0..f801f7a7db 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_litehrnet-18_8xb32-210e_coco-384x288.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_litehrnet-18_8xb32-210e_coco-384x288.py @@ -1,140 +1,140 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='LiteHRNet', - in_channels=3, - extra=dict( - stem=dict(stem_channels=32, out_channels=32, expand_ratio=1), - num_stages=3, - stages_spec=dict( - num_modules=(2, 4, 2), - num_branches=(2, 3, 4), - num_blocks=(2, 2, 2), - module_type=('LITE', 'LITE', 'LITE'), - with_fuse=(True, True, True), - reduce_ratios=(8, 8, 8), - num_channels=( - (40, 80), - (40, 80, 160), - (40, 80, 160, 320), - )), - with_head=True, - )), - head=dict( - type='HeatmapHead', - in_channels=40, - out_channels=17, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - rotate_factor=60, - scale_factor=(0.75, 1.25)), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='LiteHRNet', + in_channels=3, + extra=dict( + stem=dict(stem_channels=32, out_channels=32, expand_ratio=1), + num_stages=3, + stages_spec=dict( + num_modules=(2, 4, 2), + num_branches=(2, 3, 4), + num_blocks=(2, 2, 2), + module_type=('LITE', 'LITE', 'LITE'), + with_fuse=(True, True, True), + reduce_ratios=(8, 8, 8), + num_channels=( + (40, 80), + (40, 80, 160), + (40, 80, 160, 320), + )), + with_head=True, + )), + head=dict( + type='HeatmapHead', + in_channels=40, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_litehrnet-18_8xb64-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_litehrnet-18_8xb64-210e_coco-256x192.py index 6f5a564d11..dd59f593fd 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_litehrnet-18_8xb64-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_litehrnet-18_8xb64-210e_coco-256x192.py @@ -1,140 +1,140 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='LiteHRNet', - in_channels=3, - extra=dict( - stem=dict(stem_channels=32, out_channels=32, expand_ratio=1), - num_stages=3, - stages_spec=dict( - num_modules=(2, 4, 2), - num_branches=(2, 3, 4), - num_blocks=(2, 2, 2), - module_type=('LITE', 'LITE', 'LITE'), - with_fuse=(True, True, True), - reduce_ratios=(8, 8, 8), - num_channels=( - (40, 80), - (40, 80, 160), - (40, 80, 160, 320), - )), - with_head=True, - )), - head=dict( - type='HeatmapHead', - in_channels=40, - out_channels=17, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - rotate_factor=60, - scale_factor=(0.75, 1.25)), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='LiteHRNet', + in_channels=3, + extra=dict( + stem=dict(stem_channels=32, out_channels=32, expand_ratio=1), + num_stages=3, + stages_spec=dict( + num_modules=(2, 4, 2), + num_branches=(2, 3, 4), + num_blocks=(2, 2, 2), + module_type=('LITE', 'LITE', 'LITE'), + with_fuse=(True, True, True), + reduce_ratios=(8, 8, 8), + num_channels=( + (40, 80), + (40, 80, 160), + (40, 80, 160, 320), + )), + with_head=True, + )), + head=dict( + type='HeatmapHead', + in_channels=40, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_litehrnet-30_8xb32-210e_coco-384x288.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_litehrnet-30_8xb32-210e_coco-384x288.py index 6635935525..8b69bbcbbe 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_litehrnet-30_8xb32-210e_coco-384x288.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_litehrnet-30_8xb32-210e_coco-384x288.py @@ -1,140 +1,140 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='LiteHRNet', - in_channels=3, - extra=dict( - stem=dict(stem_channels=32, out_channels=32, expand_ratio=1), - num_stages=3, - stages_spec=dict( - num_modules=(3, 8, 3), - num_branches=(2, 3, 4), - num_blocks=(2, 2, 2), - module_type=('LITE', 'LITE', 'LITE'), - with_fuse=(True, True, True), - reduce_ratios=(8, 8, 8), - num_channels=( - (40, 80), - (40, 80, 160), - (40, 80, 160, 320), - )), - with_head=True, - )), - head=dict( - type='HeatmapHead', - in_channels=40, - out_channels=17, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - rotate_factor=60, - scale_factor=(0.75, 1.25)), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='LiteHRNet', + in_channels=3, + extra=dict( + stem=dict(stem_channels=32, out_channels=32, expand_ratio=1), + num_stages=3, + stages_spec=dict( + num_modules=(3, 8, 3), + num_branches=(2, 3, 4), + num_blocks=(2, 2, 2), + module_type=('LITE', 'LITE', 'LITE'), + with_fuse=(True, True, True), + reduce_ratios=(8, 8, 8), + num_channels=( + (40, 80), + (40, 80, 160), + (40, 80, 160, 320), + )), + with_head=True, + )), + head=dict( + type='HeatmapHead', + in_channels=40, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_litehrnet-30_8xb64-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_litehrnet-30_8xb64-210e_coco-256x192.py index 6b5d347cd9..2aa7f3c33e 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_litehrnet-30_8xb64-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_litehrnet-30_8xb64-210e_coco-256x192.py @@ -1,140 +1,140 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='LiteHRNet', - in_channels=3, - extra=dict( - stem=dict(stem_channels=32, out_channels=32, expand_ratio=1), - num_stages=3, - stages_spec=dict( - num_modules=(3, 8, 3), - num_branches=(2, 3, 4), - num_blocks=(2, 2, 2), - module_type=('LITE', 'LITE', 'LITE'), - with_fuse=(True, True, True), - reduce_ratios=(8, 8, 8), - num_channels=( - (40, 80), - (40, 80, 160), - (40, 80, 160, 320), - )), - with_head=True, - )), - head=dict( - type='HeatmapHead', - in_channels=40, - out_channels=17, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - rotate_factor=60, - scale_factor=(0.75, 1.25)), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='LiteHRNet', + in_channels=3, + extra=dict( + stem=dict(stem_channels=32, out_channels=32, expand_ratio=1), + num_stages=3, + stages_spec=dict( + num_modules=(3, 8, 3), + num_branches=(2, 3, 4), + num_blocks=(2, 2, 2), + module_type=('LITE', 'LITE', 'LITE'), + with_fuse=(True, True, True), + reduce_ratios=(8, 8, 8), + num_channels=( + (40, 80), + (40, 80, 160), + (40, 80, 160, 320), + )), + with_head=True, + )), + head=dict( + type='HeatmapHead', + in_channels=40, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_mobilenetv2_8xb64-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_mobilenetv2_8xb64-210e_coco-256x192.py index ff8eaccb7e..16018190b7 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_mobilenetv2_8xb64-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_mobilenetv2_8xb64-210e_coco-256x192.py @@ -1,124 +1,124 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='MobileNetV2', - widen_factor=1., - out_indices=(7, ), - init_cfg=dict( - type='Pretrained', - checkpoint='mmcls://mobilenet_v2', - )), - head=dict( - type='HeatmapHead', - in_channels=1280, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='MobileNetV2', + widen_factor=1., + out_indices=(7, ), + init_cfg=dict( + type='Pretrained', + checkpoint='mmcls://mobilenet_v2', + )), + head=dict( + type='HeatmapHead', + in_channels=1280, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_mobilenetv2_8xb64-210e_coco-384x288.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_mobilenetv2_8xb64-210e_coco-384x288.py index d01e4c6c3d..fcf4ee7d02 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_mobilenetv2_8xb64-210e_coco-384x288.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_mobilenetv2_8xb64-210e_coco-384x288.py @@ -1,124 +1,124 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='MobileNetV2', - widen_factor=1., - out_indices=(7, ), - init_cfg=dict( - type='Pretrained', - checkpoint='mmcls://mobilenet_v2', - )), - head=dict( - type='HeatmapHead', - in_channels=1280, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='MobileNetV2', + widen_factor=1., + out_indices=(7, ), + init_cfg=dict( + type='Pretrained', + checkpoint='mmcls://mobilenet_v2', + )), + head=dict( + type='HeatmapHead', + in_channels=1280, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_mspn50_8xb32-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_mspn50_8xb32-210e_coco-256x192.py index d0e2e9893c..b1dbb18209 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_mspn50_8xb32-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_mspn50_8xb32-210e_coco-256x192.py @@ -1,152 +1,152 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-3, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=256) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -# multiple kernel_sizes of heatmap gaussian for 'Megvii' approach. -kernel_sizes = [11, 9, 7, 5] -codec = [ - dict( - type='MegviiHeatmap', - input_size=(192, 256), - heatmap_size=(48, 64), - kernel_size=kernel_size) for kernel_size in kernel_sizes -] - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='MSPN', - unit_channels=256, - num_stages=1, - num_units=4, - num_blocks=[3, 4, 6, 3], - norm_cfg=dict(type='BN'), - init_cfg=dict( - type='Pretrained', - checkpoint='torchvision://resnet50', - )), - head=dict( - type='MSPNHead', - out_shape=(64, 48), - unit_channels=256, - out_channels=17, - num_stages=1, - num_units=4, - norm_cfg=dict(type='BN'), - # each sub list is for a stage - # and each element in each list is for a unit - level_indices=[0, 1, 2, 3], - loss=[ - dict( - type='KeypointMSELoss', - use_target_weight=True, - loss_weight=0.25) - ] * 3 + [ - dict( - type='KeypointOHKMMSELoss', - use_target_weight=True, - loss_weight=1.) - ], - decoder=codec[-1]), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=False, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec[0]['input_size']), - dict(type='GenerateTarget', multilevel=True, encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec[0]['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=4, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=4, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json', - nms_mode='none') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +# multiple kernel_sizes of heatmap gaussian for 'Megvii' approach. +kernel_sizes = [11, 9, 7, 5] +codec = [ + dict( + type='MegviiHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + kernel_size=kernel_size) for kernel_size in kernel_sizes +] + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='MSPN', + unit_channels=256, + num_stages=1, + num_units=4, + num_blocks=[3, 4, 6, 3], + norm_cfg=dict(type='BN'), + init_cfg=dict( + type='Pretrained', + checkpoint='torchvision://resnet50', + )), + head=dict( + type='MSPNHead', + out_shape=(64, 48), + unit_channels=256, + out_channels=17, + num_stages=1, + num_units=4, + norm_cfg=dict(type='BN'), + # each sub list is for a stage + # and each element in each list is for a unit + level_indices=[0, 1, 2, 3], + loss=[ + dict( + type='KeypointMSELoss', + use_target_weight=True, + loss_weight=0.25) + ] * 3 + [ + dict( + type='KeypointOHKMMSELoss', + use_target_weight=True, + loss_weight=1.) + ], + decoder=codec[-1]), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec[0]['input_size']), + dict(type='GenerateTarget', multilevel=True, encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec[0]['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json', + nms_mode='none') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_pvt-s_8xb64-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_pvt-s_8xb64-210e_coco-256x192.py index 1b474b3f2f..4a8704a5f2 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_pvt-s_8xb64-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_pvt-s_8xb64-210e_coco-256x192.py @@ -1,127 +1,127 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='PyramidVisionTransformer', - num_layers=[3, 4, 6, 3], - init_cfg=dict( - type='Pretrained', - checkpoint='https://github.com/whai362/PVT/' - 'releases/download/v2/pvt_small.pth'), - ), - neck=dict(type='FeatureMapProcessor', select_index=3), - head=dict( - type='HeatmapHead', - in_channels=512, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='PyramidVisionTransformer', + num_layers=[3, 4, 6, 3], + init_cfg=dict( + type='Pretrained', + checkpoint='https://github.com/whai362/PVT/' + 'releases/download/v2/pvt_small.pth'), + ), + neck=dict(type='FeatureMapProcessor', select_index=3), + head=dict( + type='HeatmapHead', + in_channels=512, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_pvtv2-b2_8xb64-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_pvtv2-b2_8xb64-210e_coco-256x192.py index e8921e6803..dd7b5d9db2 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_pvtv2-b2_8xb64-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_pvtv2-b2_8xb64-210e_coco-256x192.py @@ -1,128 +1,128 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='PyramidVisionTransformerV2', - embed_dims=64, - num_layers=[3, 4, 6, 3], - init_cfg=dict( - type='Pretrained', - checkpoint='https://github.com/whai362/PVT/' - 'releases/download/v2/pvt_v2_b2.pth'), - ), - neck=dict(type='FeatureMapProcessor', select_index=3), - head=dict( - type='HeatmapHead', - in_channels=512, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='PyramidVisionTransformerV2', + embed_dims=64, + num_layers=[3, 4, 6, 3], + init_cfg=dict( + type='Pretrained', + checkpoint='https://github.com/whai362/PVT/' + 'releases/download/v2/pvt_v2_b2.pth'), + ), + neck=dict(type='FeatureMapProcessor', select_index=3), + head=dict( + type='HeatmapHead', + in_channels=512, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_8xb32-210e_coco-384x288.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_8xb32-210e_coco-384x288.py index cd13e4a422..25ebf01854 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_8xb32-210e_coco-384x288.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_8xb32-210e_coco-384x288.py @@ -1,121 +1,121 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=256) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=101, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=101, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_8xb64-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_8xb64-210e_coco-256x192.py index 5486548481..29f6555bc1 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_8xb64-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_8xb64-210e_coco-256x192.py @@ -1,121 +1,121 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=101, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=101, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_dark-8xb64-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_dark-8xb64-210e_coco-256x192.py index 39b7b3220d..3ef9880957 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_dark-8xb64-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_dark-8xb64-210e_coco-256x192.py @@ -1,125 +1,125 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', - input_size=(192, 256), - heatmap_size=(48, 64), - sigma=2, - unbiased=True) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=101, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + sigma=2, + unbiased=True) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=101, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_dark-8xb64-210e_coco-384x288.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_dark-8xb64-210e_coco-384x288.py index f7c99503d4..c8ce6c1641 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_dark-8xb64-210e_coco-384x288.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res101_dark-8xb64-210e_coco-384x288.py @@ -1,125 +1,125 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', - input_size=(288, 384), - heatmap_size=(72, 96), - sigma=3, - unbiased=True) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=101, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', + input_size=(288, 384), + heatmap_size=(72, 96), + sigma=3, + unbiased=True) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=101, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_8xb32-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_8xb32-210e_coco-256x192.py index beccab1bd1..b2307b28e3 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_8xb32-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_8xb32-210e_coco-256x192.py @@ -1,121 +1,121 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=152, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet152'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=152, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet152'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_8xb32-210e_coco-384x288.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_8xb32-210e_coco-384x288.py index 25d5039f05..eae41ac7a7 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_8xb32-210e_coco-384x288.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_8xb32-210e_coco-384x288.py @@ -1,121 +1,121 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=152, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet152'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=152, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet152'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_dark-8xb32-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_dark-8xb32-210e_coco-256x192.py index acd9119244..4d5525345e 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_dark-8xb32-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_dark-8xb32-210e_coco-256x192.py @@ -1,125 +1,125 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=256) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', - input_size=(192, 256), - heatmap_size=(48, 64), - sigma=2, - unbiased=True) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=152, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet152'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + sigma=2, + unbiased=True) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=152, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet152'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_dark-8xb32-210e_coco-384x288.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_dark-8xb32-210e_coco-384x288.py index 49bd2b224b..524d9995a3 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_dark-8xb32-210e_coco-384x288.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res152_dark-8xb32-210e_coco-384x288.py @@ -1,126 +1,126 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=256) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', - input_size=(288, 384), - heatmap_size=(72, 96), - sigma=3, - unbiased=True, - blur_kernel_size=17) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=152, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet152'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', + input_size=(288, 384), + heatmap_size=(72, 96), + sigma=3, + unbiased=True, + blur_kernel_size=17) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=152, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet152'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-256x192.py index 7dbe1b43f7..e00887fe3d 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-256x192.py @@ -1,121 +1,121 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=50, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-384x288.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-384x288.py index d74cc1392d..91e8ef2a9f 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-384x288.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-384x288.py @@ -1,121 +1,121 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=50, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_dark-8xb64-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_dark-8xb64-210e_coco-256x192.py index fdec305b10..07b60315ca 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_dark-8xb64-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_dark-8xb64-210e_coco-256x192.py @@ -1,125 +1,125 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', - input_size=(192, 256), - heatmap_size=(48, 64), - sigma=2, - unbiased=True) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=50, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + sigma=2, + unbiased=True) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_dark-8xb64-210e_coco-384x288.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_dark-8xb64-210e_coco-384x288.py index b34ad210f3..7678941c16 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_dark-8xb64-210e_coco-384x288.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_dark-8xb64-210e_coco-384x288.py @@ -1,125 +1,125 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', - input_size=(288, 384), - heatmap_size=(72, 96), - sigma=3, - unbiased=True) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=50, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', + input_size=(288, 384), + heatmap_size=(72, 96), + sigma=3, + unbiased=True) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_fp16-8xb64-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_fp16-8xb64-210e_coco-256x192.py index 66a6a27822..57c8374a81 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_fp16-8xb64-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_fp16-8xb64-210e_coco-256x192.py @@ -1,7 +1,7 @@ -_base_ = ['./td-hm_res50_8xb64-210e_coco-256x192.py'] - -# fp16 settings -optim_wrapper = dict( - type='AmpOptimWrapper', - loss_scale='dynamic', -) +_base_ = ['./td-hm_res50_8xb64-210e_coco-256x192.py'] + +# fp16 settings +optim_wrapper = dict( + type='AmpOptimWrapper', + loss_scale='dynamic', +) diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest101_8xb32-210e_coco-384x288.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest101_8xb32-210e_coco-384x288.py index 5bfbace9f6..05be08cf63 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest101_8xb32-210e_coco-384x288.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest101_8xb32-210e_coco-384x288.py @@ -1,121 +1,121 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=256) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNeSt', - depth=101, - init_cfg=dict(type='Pretrained', checkpoint='mmcls://resnest101'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNeSt', + depth=101, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://resnest101'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest101_8xb64-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest101_8xb64-210e_coco-256x192.py index 030ae95d63..fb08555b3d 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest101_8xb64-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest101_8xb64-210e_coco-256x192.py @@ -1,121 +1,121 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNeSt', - depth=101, - init_cfg=dict(type='Pretrained', checkpoint='mmcls://resnest101'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNeSt', + depth=101, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://resnest101'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest200_8xb16-210e_coco-384x288.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest200_8xb16-210e_coco-384x288.py index bdcdb6c75f..48e6992c87 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest200_8xb16-210e_coco-384x288.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest200_8xb16-210e_coco-384x288.py @@ -1,121 +1,121 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=128) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNeSt', - depth=200, - init_cfg=dict(type='Pretrained', checkpoint='mmcls://resnest200'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=16, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=16, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=128) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNeSt', + depth=200, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://resnest200'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest200_8xb64-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest200_8xb64-210e_coco-256x192.py index 1a5e1e8e4a..85466b78cf 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest200_8xb64-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest200_8xb64-210e_coco-256x192.py @@ -1,121 +1,121 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNeSt', - depth=200, - init_cfg=dict(type='Pretrained', checkpoint='mmcls://resnest200'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNeSt', + depth=200, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://resnest200'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest269_8xb16-210e_coco-384x288.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest269_8xb16-210e_coco-384x288.py index b519e9d2ef..5279a43e88 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest269_8xb16-210e_coco-384x288.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest269_8xb16-210e_coco-384x288.py @@ -1,121 +1,121 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=128) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNeSt', - depth=269, - init_cfg=dict(type='Pretrained', checkpoint='mmcls://resnest269'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=16, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=16, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=128) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNeSt', + depth=269, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://resnest269'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest269_8xb32-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest269_8xb32-210e_coco-256x192.py index b3588d1fa3..84eb68b5d3 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest269_8xb32-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest269_8xb32-210e_coco-256x192.py @@ -1,121 +1,121 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNeSt', - depth=269, - init_cfg=dict(type='Pretrained', checkpoint='mmcls://resnest269'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNeSt', + depth=269, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://resnest269'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest50_8xb64-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest50_8xb64-210e_coco-256x192.py index 43295bb41f..a9fb575a7d 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest50_8xb64-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest50_8xb64-210e_coco-256x192.py @@ -1,121 +1,121 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNeSt', - depth=50, - init_cfg=dict(type='Pretrained', checkpoint='mmcls://resnest50'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNeSt', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://resnest50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest50_8xb64-210e_coco-384x288.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest50_8xb64-210e_coco-384x288.py index e45320b036..b6e3dcb3cc 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest50_8xb64-210e_coco-384x288.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnest50_8xb64-210e_coco-384x288.py @@ -1,121 +1,121 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNeSt', - depth=50, - init_cfg=dict(type='Pretrained', checkpoint='mmcls://resnest50'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNeSt', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://resnest50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d101_8xb32-210e_coco-384x288.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d101_8xb32-210e_coco-384x288.py index 4fc55228fa..5d90ab7dd0 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d101_8xb32-210e_coco-384x288.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d101_8xb32-210e_coco-384x288.py @@ -1,121 +1,121 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=256) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNetV1d', - depth=101, - init_cfg=dict(type='Pretrained', checkpoint='mmcls://resnet101_v1d'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNetV1d', + depth=101, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://resnet101_v1d'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d101_8xb64-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d101_8xb64-210e_coco-256x192.py index 6c8cc4e808..f2a66df4eb 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d101_8xb64-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d101_8xb64-210e_coco-256x192.py @@ -1,121 +1,121 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNetV1d', - depth=101, - init_cfg=dict(type='Pretrained', checkpoint='mmcls://resnet101_v1d'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNetV1d', + depth=101, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://resnet101_v1d'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d152_8xb32-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d152_8xb32-210e_coco-256x192.py index a85a7f80c4..4821267382 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d152_8xb32-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d152_8xb32-210e_coco-256x192.py @@ -1,121 +1,121 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNetV1d', - depth=152, - init_cfg=dict(type='Pretrained', checkpoint='mmcls://resnet152_v1d'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNetV1d', + depth=152, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://resnet152_v1d'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d152_8xb48-210e_coco-384x288.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d152_8xb48-210e_coco-384x288.py index 7a728ce806..27281dfb94 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d152_8xb48-210e_coco-384x288.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d152_8xb48-210e_coco-384x288.py @@ -1,121 +1,121 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=384) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNetV1d', - depth=152, - init_cfg=dict(type='Pretrained', checkpoint='mmcls://resnet152_v1d'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=48, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=384) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNetV1d', + depth=152, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://resnet152_v1d'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=48, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d50_8xb64-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d50_8xb64-210e_coco-256x192.py index c241cdd3dd..e4183693ad 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d50_8xb64-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d50_8xb64-210e_coco-256x192.py @@ -1,121 +1,121 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNetV1d', - depth=50, - init_cfg=dict(type='Pretrained', checkpoint='mmcls://resnet50_v1d'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNetV1d', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://resnet50_v1d'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d50_8xb64-210e_coco-384x288.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d50_8xb64-210e_coco-384x288.py index 4d1cea135b..59d3ba68bb 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d50_8xb64-210e_coco-384x288.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnetv1d50_8xb64-210e_coco-384x288.py @@ -1,121 +1,121 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNetV1d', - depth=50, - init_cfg=dict(type='Pretrained', checkpoint='mmcls://resnet50_v1d'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNetV1d', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://resnet50_v1d'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext101_8xb32-210e_coco-384x288.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext101_8xb32-210e_coco-384x288.py index 508233371b..eedb64cb25 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext101_8xb32-210e_coco-384x288.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext101_8xb32-210e_coco-384x288.py @@ -1,122 +1,122 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=256) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNeXt', - depth=101, - init_cfg=dict( - type='Pretrained', checkpoint='mmcls://resnext101_32x4d'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNeXt', + depth=101, + init_cfg=dict( + type='Pretrained', checkpoint='mmcls://resnext101_32x4d'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext101_8xb64-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext101_8xb64-210e_coco-256x192.py index eafed7f075..42487c09ae 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext101_8xb64-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext101_8xb64-210e_coco-256x192.py @@ -1,122 +1,122 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNeXt', - depth=101, - init_cfg=dict( - type='Pretrained', checkpoint='mmcls://resnext101_32x4d'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNeXt', + depth=101, + init_cfg=dict( + type='Pretrained', checkpoint='mmcls://resnext101_32x4d'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext152_8xb32-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext152_8xb32-210e_coco-256x192.py index 27c2c263b0..82cfeae761 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext152_8xb32-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext152_8xb32-210e_coco-256x192.py @@ -1,122 +1,122 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNeXt', - depth=152, - init_cfg=dict( - type='Pretrained', checkpoint='mmcls://resnext152_32x4d'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNeXt', + depth=152, + init_cfg=dict( + type='Pretrained', checkpoint='mmcls://resnext152_32x4d'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext152_8xb48-210e_coco-384x288.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext152_8xb48-210e_coco-384x288.py index c02caeb746..2503796e97 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext152_8xb48-210e_coco-384x288.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext152_8xb48-210e_coco-384x288.py @@ -1,122 +1,122 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=384) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNeXt', - depth=152, - init_cfg=dict( - type='Pretrained', checkpoint='mmcls://resnext152_32x4d'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=48, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=384) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNeXt', + depth=152, + init_cfg=dict( + type='Pretrained', checkpoint='mmcls://resnext152_32x4d'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=48, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext50_8xb64-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext50_8xb64-210e_coco-256x192.py index b088a44ca6..2513248073 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext50_8xb64-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext50_8xb64-210e_coco-256x192.py @@ -1,121 +1,121 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNeXt', - depth=50, - init_cfg=dict(type='Pretrained', checkpoint='mmcls://resnext50_32x4d'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNeXt', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://resnext50_32x4d'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext50_8xb64-210e_coco-384x288.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext50_8xb64-210e_coco-384x288.py index 9f97235218..756010c80d 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext50_8xb64-210e_coco-384x288.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_resnext50_8xb64-210e_coco-384x288.py @@ -1,121 +1,121 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNeXt', - depth=50, - init_cfg=dict(type='Pretrained', checkpoint='mmcls://resnext50_32x4d'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNeXt', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://resnext50_32x4d'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_rsn18_8xb32-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_rsn18_8xb32-210e_coco-256x192.py index 18d16bd267..7641846651 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_rsn18_8xb32-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_rsn18_8xb32-210e_coco-256x192.py @@ -1,154 +1,154 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=2e-2, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 190, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=256) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -# multiple kernel_sizes of heatmap gaussian for 'Megvii' approach. -kernel_sizes = [11, 9, 7, 5] -codec = [ - dict( - type='MegviiHeatmap', - input_size=(192, 256), - heatmap_size=(48, 64), - kernel_size=kernel_size) for kernel_size in kernel_sizes -] - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='RSN', - unit_channels=256, - num_stages=1, - num_units=4, - num_blocks=[2, 2, 2, 2], - num_steps=4, - norm_cfg=dict(type='BN'), - ), - head=dict( - type='MSPNHead', - out_shape=(64, 48), - unit_channels=256, - out_channels=17, - num_stages=1, - num_units=4, - norm_cfg=dict(type='BN'), - # each sub list is for a stage - # and each element in each list is for a unit - level_indices=[0, 1, 2, 3], - loss=[ - dict( - type='KeypointMSELoss', - use_target_weight=True, - loss_weight=0.25) - ] * 3 + [ - dict( - type='KeypointOHKMMSELoss', - use_target_weight=True, - loss_weight=1.) - ], - decoder=codec[-1]), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=False, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec[0]['input_size']), - dict(type='GenerateTarget', multilevel=True, encoder=codec), - dict(type='PackPoseInputs') -] - -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec[0]['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=4, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=4, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json', - nms_mode='none') -test_evaluator = val_evaluator - -# fp16 settings -fp16 = dict(loss_scale='dynamic') +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=2e-2, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 190, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +# multiple kernel_sizes of heatmap gaussian for 'Megvii' approach. +kernel_sizes = [11, 9, 7, 5] +codec = [ + dict( + type='MegviiHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + kernel_size=kernel_size) for kernel_size in kernel_sizes +] + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='RSN', + unit_channels=256, + num_stages=1, + num_units=4, + num_blocks=[2, 2, 2, 2], + num_steps=4, + norm_cfg=dict(type='BN'), + ), + head=dict( + type='MSPNHead', + out_shape=(64, 48), + unit_channels=256, + out_channels=17, + num_stages=1, + num_units=4, + norm_cfg=dict(type='BN'), + # each sub list is for a stage + # and each element in each list is for a unit + level_indices=[0, 1, 2, 3], + loss=[ + dict( + type='KeypointMSELoss', + use_target_weight=True, + loss_weight=0.25) + ] * 3 + [ + dict( + type='KeypointOHKMMSELoss', + use_target_weight=True, + loss_weight=1.) + ], + decoder=codec[-1]), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec[0]['input_size']), + dict(type='GenerateTarget', multilevel=True, encoder=codec), + dict(type='PackPoseInputs') +] + +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec[0]['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json', + nms_mode='none') +test_evaluator = val_evaluator + +# fp16 settings +fp16 = dict(loss_scale='dynamic') diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_rsn50_8xb32-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_rsn50_8xb32-210e_coco-256x192.py index 069cb41312..b144cf670e 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_rsn50_8xb32-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_rsn50_8xb32-210e_coco-256x192.py @@ -1,154 +1,154 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-3, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=256) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -# multiple kernel_sizes of heatmap gaussian for 'Megvii' approach. -kernel_sizes = [11, 9, 7, 5] -codec = [ - dict( - type='MegviiHeatmap', - input_size=(192, 256), - heatmap_size=(48, 64), - kernel_size=kernel_size) for kernel_size in kernel_sizes -] - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='RSN', - unit_channels=256, - num_stages=1, - num_units=4, - num_blocks=[3, 4, 6, 3], - num_steps=4, - norm_cfg=dict(type='BN'), - ), - head=dict( - type='MSPNHead', - out_shape=(64, 48), - unit_channels=256, - out_channels=17, - num_stages=1, - num_units=4, - norm_cfg=dict(type='BN'), - # each sub list is for a stage - # and each element in each list is for a unit - level_indices=[0, 1, 2, 3], - loss=[ - dict( - type='KeypointMSELoss', - use_target_weight=True, - loss_weight=0.25) - ] * 3 + [ - dict( - type='KeypointOHKMMSELoss', - use_target_weight=True, - loss_weight=1.) - ], - decoder=codec[-1]), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=False, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec[0]['input_size']), - dict(type='GenerateTarget', multilevel=True, encoder=codec), - dict(type='PackPoseInputs') -] - -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec[0]['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=4, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=4, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json', - nms_mode='none') -test_evaluator = val_evaluator - -# fp16 settings -fp16 = dict(loss_scale='dynamic') +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +# multiple kernel_sizes of heatmap gaussian for 'Megvii' approach. +kernel_sizes = [11, 9, 7, 5] +codec = [ + dict( + type='MegviiHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + kernel_size=kernel_size) for kernel_size in kernel_sizes +] + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='RSN', + unit_channels=256, + num_stages=1, + num_units=4, + num_blocks=[3, 4, 6, 3], + num_steps=4, + norm_cfg=dict(type='BN'), + ), + head=dict( + type='MSPNHead', + out_shape=(64, 48), + unit_channels=256, + out_channels=17, + num_stages=1, + num_units=4, + norm_cfg=dict(type='BN'), + # each sub list is for a stage + # and each element in each list is for a unit + level_indices=[0, 1, 2, 3], + loss=[ + dict( + type='KeypointMSELoss', + use_target_weight=True, + loss_weight=0.25) + ] * 3 + [ + dict( + type='KeypointOHKMMSELoss', + use_target_weight=True, + loss_weight=1.) + ], + decoder=codec[-1]), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec[0]['input_size']), + dict(type='GenerateTarget', multilevel=True, encoder=codec), + dict(type='PackPoseInputs') +] + +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec[0]['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json', + nms_mode='none') +test_evaluator = val_evaluator + +# fp16 settings +fp16 = dict(loss_scale='dynamic') diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_scnet101_8xb32-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_scnet101_8xb32-210e_coco-256x192.py index 544c87242f..d8e49fe943 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_scnet101_8xb32-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_scnet101_8xb32-210e_coco-256x192.py @@ -1,124 +1,124 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='SCNet', - depth=101, - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/scnet101-94250a77.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=1, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=1, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='SCNet', + depth=101, + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/scnet101-94250a77.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=1, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_scnet101_8xb48-210e_coco-384x288.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_scnet101_8xb48-210e_coco-384x288.py index 1af2e44ef0..3281e4a76b 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_scnet101_8xb48-210e_coco-384x288.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_scnet101_8xb48-210e_coco-384x288.py @@ -1,124 +1,124 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='SCNet', - depth=101, - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/scnet101-94250a77.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=48, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='SCNet', + depth=101, + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/scnet101-94250a77.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=48, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_scnet50_8xb32-210e_coco-384x288.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_scnet50_8xb32-210e_coco-384x288.py index efa1ad924c..41071b6ce8 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_scnet50_8xb32-210e_coco-384x288.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_scnet50_8xb32-210e_coco-384x288.py @@ -1,124 +1,124 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=256) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='SCNet', - depth=50, - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/scnet50-7ef0a199.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=1, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=1, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='SCNet', + depth=50, + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/scnet50-7ef0a199.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=1, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_scnet50_8xb64-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_scnet50_8xb64-210e_coco-256x192.py index 9d784d8029..73553330e8 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_scnet50_8xb64-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_scnet50_8xb64-210e_coco-256x192.py @@ -1,124 +1,124 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='SCNet', - depth=50, - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/scnet50-7ef0a199.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='SCNet', + depth=50, + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/scnet50-7ef0a199.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet101_8xb32-210e_coco-384x288.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet101_8xb32-210e_coco-384x288.py index b515b744c4..f1bb265fce 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet101_8xb32-210e_coco-384x288.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet101_8xb32-210e_coco-384x288.py @@ -1,121 +1,121 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=256) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='SEResNet', - depth=101, - init_cfg=dict(type='Pretrained', checkpoint='mmcls://se-resnet101'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='SEResNet', + depth=101, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://se-resnet101'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet101_8xb64-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet101_8xb64-210e_coco-256x192.py index f6d9fab2ed..d679fc9301 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet101_8xb64-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet101_8xb64-210e_coco-256x192.py @@ -1,121 +1,121 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='SEResNet', - depth=101, - init_cfg=dict(type='Pretrained', checkpoint='mmcls://se-resnet101'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='SEResNet', + depth=101, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://se-resnet101'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet152_8xb32-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet152_8xb32-210e_coco-256x192.py index a0ef9bf571..721d4b8966 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet152_8xb32-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet152_8xb32-210e_coco-256x192.py @@ -1,120 +1,120 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='SEResNet', - depth=152, - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='SEResNet', + depth=152, + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet152_8xb48-210e_coco-384x288.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet152_8xb48-210e_coco-384x288.py index 13524c1217..94ee1e9e66 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet152_8xb48-210e_coco-384x288.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet152_8xb48-210e_coco-384x288.py @@ -1,120 +1,120 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=384) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='SEResNet', - depth=152, - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=48, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=384) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='SEResNet', + depth=152, + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=48, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet50_8xb64-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet50_8xb64-210e_coco-256x192.py index 93fb78fac5..6ac46fdc8f 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet50_8xb64-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet50_8xb64-210e_coco-256x192.py @@ -1,121 +1,121 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='SEResNet', - depth=50, - init_cfg=dict(type='Pretrained', checkpoint='mmcls://se-resnet50'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='SEResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://se-resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet50_8xb64-210e_coco-384x288.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet50_8xb64-210e_coco-384x288.py index fa2002a70a..8860772677 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet50_8xb64-210e_coco-384x288.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_seresnet50_8xb64-210e_coco-384x288.py @@ -1,121 +1,121 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='SEResNet', - depth=50, - init_cfg=dict(type='Pretrained', checkpoint='mmcls://se-resnet50'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='SEResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://se-resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv1_8xb64-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv1_8xb64-210e_coco-256x192.py index 029f48d3d9..ec7d34b5a2 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv1_8xb64-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv1_8xb64-210e_coco-256x192.py @@ -1,121 +1,121 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ShuffleNetV1', - groups=3, - init_cfg=dict(type='Pretrained', checkpoint='mmcls://shufflenet_v1'), - ), - head=dict( - type='HeatmapHead', - in_channels=960, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ShuffleNetV1', + groups=3, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://shufflenet_v1'), + ), + head=dict( + type='HeatmapHead', + in_channels=960, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv1_8xb64-210e_coco-384x288.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv1_8xb64-210e_coco-384x288.py index f06c325bd1..cff10f4307 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv1_8xb64-210e_coco-384x288.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv1_8xb64-210e_coco-384x288.py @@ -1,121 +1,121 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ShuffleNetV1', - groups=3, - init_cfg=dict(type='Pretrained', checkpoint='mmcls://shufflenet_v1'), - ), - head=dict( - type='HeatmapHead', - in_channels=960, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ShuffleNetV1', + groups=3, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://shufflenet_v1'), + ), + head=dict( + type='HeatmapHead', + in_channels=960, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv2_8xb64-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv2_8xb64-210e_coco-256x192.py index 333998490e..59c8109156 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv2_8xb64-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv2_8xb64-210e_coco-256x192.py @@ -1,121 +1,121 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ShuffleNetV2', - widen_factor=1.0, - init_cfg=dict(type='Pretrained', checkpoint='mmcls://shufflenet_v2'), - ), - head=dict( - type='HeatmapHead', - in_channels=1024, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ShuffleNetV2', + widen_factor=1.0, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://shufflenet_v2'), + ), + head=dict( + type='HeatmapHead', + in_channels=1024, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv2_8xb64-210e_coco-384x288.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv2_8xb64-210e_coco-384x288.py index e7be5484e8..d65aa54789 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv2_8xb64-210e_coco-384x288.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_shufflenetv2_8xb64-210e_coco-384x288.py @@ -1,121 +1,121 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ShuffleNetV2', - widen_factor=1.0, - init_cfg=dict(type='Pretrained', checkpoint='mmcls://shufflenet_v2'), - ), - head=dict( - type='HeatmapHead', - in_channels=1024, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ShuffleNetV2', + widen_factor=1.0, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://shufflenet_v2'), + ), + head=dict( + type='HeatmapHead', + in_channels=1024, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-b-p4-w7_8xb32-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-b-p4-w7_8xb32-210e_coco-256x192.py index 81877b893f..c29257b91e 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-b-p4-w7_8xb32-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-b-p4-w7_8xb32-210e_coco-256x192.py @@ -1,139 +1,139 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=256) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='SwinTransformer', - embed_dims=128, - depths=[2, 2, 18, 2], - num_heads=[4, 8, 16, 32], - window_size=7, - mlp_ratio=4, - qkv_bias=True, - qk_scale=None, - drop_rate=0., - attn_drop_rate=0., - drop_path_rate=0.3, - patch_norm=True, - out_indices=(3, ), - with_cp=False, - convert_weights=True, - init_cfg=dict( - type='Pretrained', - checkpoint='https://github.com/SwinTransformer/storage/releases/' - 'download/v1.0.0/swin_base_patch4_window7_224_22k.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=1024, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='SwinTransformer', + embed_dims=128, + depths=[2, 2, 18, 2], + num_heads=[4, 8, 16, 32], + window_size=7, + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.3, + patch_norm=True, + out_indices=(3, ), + with_cp=False, + convert_weights=True, + init_cfg=dict( + type='Pretrained', + checkpoint='https://github.com/SwinTransformer/storage/releases/' + 'download/v1.0.0/swin_base_patch4_window7_224_22k.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=1024, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-b-p4-w7_8xb32-210e_coco-384x288.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-b-p4-w7_8xb32-210e_coco-384x288.py index 0c1d5fa12f..4bc632ac61 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-b-p4-w7_8xb32-210e_coco-384x288.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-b-p4-w7_8xb32-210e_coco-384x288.py @@ -1,139 +1,139 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=256) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=2) - -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='SwinTransformer', - embed_dims=128, - depths=[2, 2, 18, 2], - num_heads=[4, 8, 16, 32], - window_size=12, - mlp_ratio=4, - qkv_bias=True, - qk_scale=None, - drop_rate=0., - attn_drop_rate=0., - drop_path_rate=0.3, - patch_norm=True, - out_indices=(3, ), - with_cp=False, - convert_weights=True, - init_cfg=dict( - type='Pretrained', - checkpoint='https://github.com/SwinTransformer/storage/releases/' - 'download/v1.0.0/swin_base_patch4_window12_384_22k.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=1024, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=2) + +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='SwinTransformer', + embed_dims=128, + depths=[2, 2, 18, 2], + num_heads=[4, 8, 16, 32], + window_size=12, + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.3, + patch_norm=True, + out_indices=(3, ), + with_cp=False, + convert_weights=True, + init_cfg=dict( + type='Pretrained', + checkpoint='https://github.com/SwinTransformer/storage/releases/' + 'download/v1.0.0/swin_base_patch4_window12_384_22k.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=1024, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-l-p4-w7_8xb32-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-l-p4-w7_8xb32-210e_coco-256x192.py index 14d08a49f8..3294263564 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-l-p4-w7_8xb32-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-l-p4-w7_8xb32-210e_coco-256x192.py @@ -1,148 +1,148 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict( - optimizer=dict( - type='AdamW', - lr=5e-4, - betas=(0.9, 0.999), - weight_decay=0.01, - ), - paramwise_cfg=dict( - custom_keys={ - 'absolute_pos_embed': dict(decay_mult=0.), - 'relative_position_bias_table': dict(decay_mult=0.), - 'norm': dict(decay_mult=0.) - })) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=256) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='SwinTransformer', - embed_dims=192, - depths=[2, 2, 18, 2], - num_heads=[6, 12, 24, 48], - window_size=7, - mlp_ratio=4, - qkv_bias=True, - qk_scale=None, - drop_rate=0., - attn_drop_rate=0., - drop_path_rate=0.5, - patch_norm=True, - out_indices=(3, ), - with_cp=False, - convert_weights=True, - init_cfg=dict( - type='Pretrained', - checkpoint='https://github.com/SwinTransformer/storage/releases/' - 'download/v1.0.0/swin_base_patch4_window7_224_22k.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=1536, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict( + optimizer=dict( + type='AdamW', + lr=5e-4, + betas=(0.9, 0.999), + weight_decay=0.01, + ), + paramwise_cfg=dict( + custom_keys={ + 'absolute_pos_embed': dict(decay_mult=0.), + 'relative_position_bias_table': dict(decay_mult=0.), + 'norm': dict(decay_mult=0.) + })) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='SwinTransformer', + embed_dims=192, + depths=[2, 2, 18, 2], + num_heads=[6, 12, 24, 48], + window_size=7, + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.5, + patch_norm=True, + out_indices=(3, ), + with_cp=False, + convert_weights=True, + init_cfg=dict( + type='Pretrained', + checkpoint='https://github.com/SwinTransformer/storage/releases/' + 'download/v1.0.0/swin_base_patch4_window7_224_22k.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=1536, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-l-p4-w7_8xb32-210e_coco-384x288.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-l-p4-w7_8xb32-210e_coco-384x288.py index 692c8df1a6..643cbc24be 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-l-p4-w7_8xb32-210e_coco-384x288.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-l-p4-w7_8xb32-210e_coco-384x288.py @@ -1,148 +1,148 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict( - optimizer=dict( - type='AdamW', - lr=5e-4, - betas=(0.9, 0.999), - weight_decay=0.01, - ), - paramwise_cfg=dict( - custom_keys={ - 'absolute_pos_embed': dict(decay_mult=0.), - 'relative_position_bias_table': dict(decay_mult=0.), - 'norm': dict(decay_mult=0.) - })) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=256) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=2) - -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='SwinTransformer', - embed_dims=192, - depths=[2, 2, 18, 2], - num_heads=[6, 12, 24, 48], - window_size=7, - mlp_ratio=4, - qkv_bias=True, - qk_scale=None, - drop_rate=0., - attn_drop_rate=0., - drop_path_rate=0.5, - patch_norm=True, - out_indices=(3, ), - with_cp=False, - convert_weights=True, - init_cfg=dict( - type='Pretrained', - checkpoint='https://github.com/SwinTransformer/storage/releases/' - 'download/v1.0.0/swin_base_patch4_window12_384_22k.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=1536, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict( + optimizer=dict( + type='AdamW', + lr=5e-4, + betas=(0.9, 0.999), + weight_decay=0.01, + ), + paramwise_cfg=dict( + custom_keys={ + 'absolute_pos_embed': dict(decay_mult=0.), + 'relative_position_bias_table': dict(decay_mult=0.), + 'norm': dict(decay_mult=0.) + })) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=2) + +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='SwinTransformer', + embed_dims=192, + depths=[2, 2, 18, 2], + num_heads=[6, 12, 24, 48], + window_size=7, + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.5, + patch_norm=True, + out_indices=(3, ), + with_cp=False, + convert_weights=True, + init_cfg=dict( + type='Pretrained', + checkpoint='https://github.com/SwinTransformer/storage/releases/' + 'download/v1.0.0/swin_base_patch4_window12_384_22k.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=1536, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-t-p4-w7_8xb32-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-t-p4-w7_8xb32-210e_coco-256x192.py index 068ee0649f..9c4ab23409 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-t-p4-w7_8xb32-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_swin-t-p4-w7_8xb32-210e_coco-256x192.py @@ -1,139 +1,139 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=256) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='SwinTransformer', - embed_dims=96, - depths=[2, 2, 6, 2], - num_heads=[3, 6, 12, 24], - window_size=7, - mlp_ratio=4, - qkv_bias=True, - qk_scale=None, - drop_rate=0., - attn_drop_rate=0., - drop_path_rate=0.2, - patch_norm=True, - out_indices=(3, ), - with_cp=False, - convert_weights=True, - init_cfg=dict( - type='Pretrained', - checkpoint='https://github.com/SwinTransformer/storage/releases/' - 'download/v1.0.0/swin_tiny_patch4_window7_224.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=768, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='SwinTransformer', + embed_dims=96, + depths=[2, 2, 6, 2], + num_heads=[3, 6, 12, 24], + window_size=7, + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.2, + patch_norm=True, + out_indices=(3, ), + with_cp=False, + convert_weights=True, + init_cfg=dict( + type='Pretrained', + checkpoint='https://github.com/SwinTransformer/storage/releases/' + 'download/v1.0.0/swin_tiny_patch4_window7_224.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=768, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_vgg16-bn_8xb64-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_vgg16-bn_8xb64-210e_coco-256x192.py index b85adb998b..f50c2b48af 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_vgg16-bn_8xb64-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_vgg16-bn_8xb64-210e_coco-256x192.py @@ -1,122 +1,122 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='VGG', - depth=16, - norm_cfg=dict(type='BN'), - init_cfg=dict(type='Pretrained', checkpoint='mmcls://vgg16_bn'), - ), - head=dict( - type='HeatmapHead', - in_channels=512, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='VGG', + depth=16, + norm_cfg=dict(type='BN'), + init_cfg=dict(type='Pretrained', checkpoint='mmcls://vgg16_bn'), + ), + head=dict( + type='HeatmapHead', + in_channels=512, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_vipnas-mbv3_8xb64-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_vipnas-mbv3_8xb64-210e_coco-256x192.py index 04fcc1ad2e..7be5676386 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_vipnas-mbv3_8xb64-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_vipnas-mbv3_8xb64-210e_coco-256x192.py @@ -1,122 +1,122 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict(type='ViPNAS_MobileNetV3'), - head=dict( - type='ViPNASHead', - in_channels=160, - out_channels=17, - deconv_out_channels=(160, 160, 160), - deconv_num_groups=(160, 160, 160), - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - rotate_factor=60, - scale_factor=(0.75, 1.25)), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict(type='ViPNAS_MobileNetV3'), + head=dict( + type='ViPNASHead', + in_channels=160, + out_channels=17, + deconv_out_channels=(160, 160, 160), + deconv_num_groups=(160, 160, 160), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_vipnas-res50_8xb64-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_vipnas-res50_8xb64-210e_coco-256x192.py index 8190d7ffd2..947753242b 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_vipnas-res50_8xb64-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_vipnas-res50_8xb64-210e_coco-256x192.py @@ -1,120 +1,120 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict(type='ViPNAS_ResNet', depth=50), - head=dict( - type='ViPNASHead', - in_channels=608, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - rotate_factor=60, - scale_factor=(0.75, 1.25)), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict(type='ViPNAS_ResNet', depth=50), + head=dict( + type='ViPNASHead', + in_channels=608, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/vgg_coco.md b/configs/body_2d_keypoint/topdown_heatmap/coco/vgg_coco.md index a2c19453f3..a03c8fce96 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/vgg_coco.md +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/vgg_coco.md @@ -1,39 +1,39 @@ - - -
-VGG (ICLR'2015) - -```bibtex -@article{simonyan2014very, - title={Very deep convolutional networks for large-scale image recognition}, - author={Simonyan, Karen and Zisserman, Andrew}, - journal={arXiv preprint arXiv:1409.1556}, - year={2014} -} -``` - -
- - - -
-COCO (ECCV'2014) - -```bibtex -@inproceedings{lin2014microsoft, - title={Microsoft coco: Common objects in context}, - author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, - booktitle={European conference on computer vision}, - pages={740--755}, - year={2014}, - organization={Springer} -} -``` - -
- -Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset - -| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | -| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | -| [vgg](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_vgg16-bn_8xb64-210e_coco-256x192.py) | 256x192 | 0.699 | 0.890 | 0.769 | 0.754 | 0.927 | [ckpt](https://download.openmmlab.com/mmpose/top_down/vgg/vgg16_bn_coco_256x192-7e7c58d6_20210517.pth) | [log](https://download.openmmlab.com/mmpose/top_down/vgg/vgg16_bn_coco_256x192_20210517.log.json) | + + +
+VGG (ICLR'2015) + +```bibtex +@article{simonyan2014very, + title={Very deep convolutional networks for large-scale image recognition}, + author={Simonyan, Karen and Zisserman, Andrew}, + journal={arXiv preprint arXiv:1409.1556}, + year={2014} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [vgg](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_vgg16-bn_8xb64-210e_coco-256x192.py) | 256x192 | 0.699 | 0.890 | 0.769 | 0.754 | 0.927 | [ckpt](https://download.openmmlab.com/mmpose/top_down/vgg/vgg16_bn_coco_256x192-7e7c58d6_20210517.pth) | [log](https://download.openmmlab.com/mmpose/top_down/vgg/vgg16_bn_coco_256x192_20210517.log.json) | diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/vgg_coco.yml b/configs/body_2d_keypoint/topdown_heatmap/coco/vgg_coco.yml index 166fa05fcd..6de18307eb 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/vgg_coco.yml +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/vgg_coco.yml @@ -1,19 +1,19 @@ -Models: -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_vgg16-bn_8xb64-210e_coco-256x192.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: - - SimpleBaseline2D - - VGG - Training Data: COCO - Name: td-hm_vgg16-bn_8xb64-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.699 - AP@0.5: 0.89 - AP@0.75: 0.769 - AR: 0.754 - AR@0.5: 0.927 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/vgg/vgg16_bn_coco_256x192-7e7c58d6_20210517.pth +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_vgg16-bn_8xb64-210e_coco-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: + - SimpleBaseline2D + - VGG + Training Data: COCO + Name: td-hm_vgg16-bn_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.699 + AP@0.5: 0.89 + AP@0.75: 0.769 + AR: 0.754 + AR@0.5: 0.927 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/vgg/vgg16_bn_coco_256x192-7e7c58d6_20210517.pth diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/vipnas_coco.md b/configs/body_2d_keypoint/topdown_heatmap/coco/vipnas_coco.md index b6a178865b..e138d218ca 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/vipnas_coco.md +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/vipnas_coco.md @@ -1,40 +1,40 @@ - - -
-ViPNAS (CVPR'2021) - -```bibtex -@article{xu2021vipnas, - title={ViPNAS: Efficient Video Pose Estimation via Neural Architecture Search}, - author={Xu, Lumin and Guan, Yingda and Jin, Sheng and Liu, Wentao and Qian, Chen and Luo, Ping and Ouyang, Wanli and Wang, Xiaogang}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - year={2021} -} -``` - -
- - - -
-COCO (ECCV'2014) - -```bibtex -@inproceedings{lin2014microsoft, - title={Microsoft coco: Common objects in context}, - author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, - booktitle={European conference on computer vision}, - pages={740--755}, - year={2014}, - organization={Springer} -} -``` - -
- -Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset - -| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | -| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | -| [S-ViPNAS-MobileNetV3](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_vipnas-mbv3_8xb64-210e_coco-256x192.py) | 256x192 | 0.700 | 0.887 | 0.783 | 0.758 | 0.929 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_vipnas-mbv3_8xb64-210e_coco-256x192-e0987441_20221010.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_vipnas-mbv3_8xb64-210e_coco-256x192_20221010.log) | -| [S-ViPNAS-Res50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_vipnas-res50_8xb64-210e_coco-256x192.py) | 256x192 | 0.711 | 0.894 | 0.787 | 0.769 | 0.934 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_vipnas-res50_8xb64-210e_coco-256x192-35d4bff9_20220917.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_vipnas-res50_8xb64-210e_coco-256x192_20220917.log) | + + +
+ViPNAS (CVPR'2021) + +```bibtex +@article{xu2021vipnas, + title={ViPNAS: Efficient Video Pose Estimation via Neural Architecture Search}, + author={Xu, Lumin and Guan, Yingda and Jin, Sheng and Liu, Wentao and Qian, Chen and Luo, Ping and Ouyang, Wanli and Wang, Xiaogang}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + year={2021} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [S-ViPNAS-MobileNetV3](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_vipnas-mbv3_8xb64-210e_coco-256x192.py) | 256x192 | 0.700 | 0.887 | 0.783 | 0.758 | 0.929 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_vipnas-mbv3_8xb64-210e_coco-256x192-e0987441_20221010.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_vipnas-mbv3_8xb64-210e_coco-256x192_20221010.log) | +| [S-ViPNAS-Res50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_vipnas-res50_8xb64-210e_coco-256x192.py) | 256x192 | 0.711 | 0.894 | 0.787 | 0.769 | 0.934 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_vipnas-res50_8xb64-210e_coco-256x192-35d4bff9_20220917.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_vipnas-res50_8xb64-210e_coco-256x192_20220917.log) | diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/vipnas_coco.yml b/configs/body_2d_keypoint/topdown_heatmap/coco/vipnas_coco.yml index cbdaa5bcab..66f181970d 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/vipnas_coco.yml +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/vipnas_coco.yml @@ -1,40 +1,40 @@ -Collections: -- Name: ViPNAS - Paper: - Title: 'ViPNAS: Efficient Video Pose Estimation via Neural Architecture Search' - URL: https://arxiv.org/abs/2105.10154 - README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/backbones/vipnas.md -Models: -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_vipnas-mbv3_8xb64-210e_coco-256x192.py - In Collection: ViPNAS - Metadata: - Architecture: &id001 - - ViPNAS - Training Data: COCO - Name: td-hm_vipnas-mbv3_8xb64-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.7 - AP@0.5: 0.887 - AP@0.75: 0.783 - AR: 0.758 - AR@0.5: 0.929 - Task: Body 2D Keypoint - Weights: (https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_vipnas-mbv3_8xb64-210e_coco-256x192-e0987441_20221010.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_vipnas-res50_8xb64-210e_coco-256x192.py - In Collection: ViPNAS - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-hm_vipnas-res50_8xb64-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.711 - AP@0.5: 0.894 - AP@0.75: 0.787 - AR: 0.769 - AR@0.5: 0.934 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_vipnas-res50_8xb64-210e_coco-256x192-35d4bff9_20220917.pth +Collections: +- Name: ViPNAS + Paper: + Title: 'ViPNAS: Efficient Video Pose Estimation via Neural Architecture Search' + URL: https://arxiv.org/abs/2105.10154 + README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/backbones/vipnas.md +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_vipnas-mbv3_8xb64-210e_coco-256x192.py + In Collection: ViPNAS + Metadata: + Architecture: &id001 + - ViPNAS + Training Data: COCO + Name: td-hm_vipnas-mbv3_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.7 + AP@0.5: 0.887 + AP@0.75: 0.783 + AR: 0.758 + AR@0.5: 0.929 + Task: Body 2D Keypoint + Weights: (https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_vipnas-mbv3_8xb64-210e_coco-256x192-e0987441_20221010.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_vipnas-res50_8xb64-210e_coco-256x192.py + In Collection: ViPNAS + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-hm_vipnas-res50_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.711 + AP@0.5: 0.894 + AP@0.75: 0.787 + AR: 0.769 + AR@0.5: 0.934 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_vipnas-res50_8xb64-210e_coco-256x192-35d4bff9_20220917.pth diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/vitpose_coco.md b/configs/body_2d_keypoint/topdown_heatmap/coco/vitpose_coco.md index 68baf35aec..ea4ba6fdc8 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/vitpose_coco.md +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/vitpose_coco.md @@ -1,61 +1,61 @@ -To utilize ViTPose, you'll need to have [MMPreTrain](https://github.com/open-mmlab/mmpretrain). To install the required version, run the following command: - -```shell -mim install 'mmpretrain>=1.0.0' -``` - - - -
-ViTPose (NeurIPS'2022) - -```bibtex -@inproceedings{ - xu2022vitpose, - title={Vi{TP}ose: Simple Vision Transformer Baselines for Human Pose Estimation}, - author={Yufei Xu and Jing Zhang and Qiming Zhang and Dacheng Tao}, - booktitle={Advances in Neural Information Processing Systems}, - year={2022}, -} -``` - -
- - - -
-COCO-WholeBody (ECCV'2020) - -```bibtex -@inproceedings{jin2020whole, - title={Whole-Body Human Pose Estimation in the Wild}, - author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, - booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, - year={2020} -} -``` - -
- -Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset - -> With classic decoder - -| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | -| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | -| [ViTPose-S](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small_8xb64-210e_coco-256x192.py) | 256x192 | 0.739 | 0.903 | 0.816 | 0.792 | 0.942 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small_8xb64-210e_coco-256x192-62d7a712_20230314.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small_8xb64-210e_coco-256x192-62d7a712_20230314.json) | -| [ViTPose-B](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base_8xb64-210e_coco-256x192.py) | 256x192 | 0.757 | 0.905 | 0.829 | 0.810 | 0.946 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base_8xb64-210e_coco-256x192-216eae50_20230314.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base_8xb64-210e_coco-256x192-216eae50_20230314.json) | -| [ViTPose-L](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large_8xb64-210e_coco-256x192.py) | 256x192 | 0.782 | 0.914 | 0.850 | 0.834 | 0.952 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large_8xb64-210e_coco-256x192-53609f55_20230314.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large_8xb64-210e_coco-256x192-53609f55_20230314.json) | -| [ViTPose-H](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge_8xb64-210e_coco-256x192.py) | 256x192 | 0.788 | 0.917 | 0.855 | 0.839 | 0.954 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge_8xb64-210e_coco-256x192-e32adcd4_20230314.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge_8xb64-210e_coco-256x192-e32adcd4_20230314.json) | -| [ViTPose-H\*](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge_8xb64-210e_coco-256x192.py) | 256x192 | 0.790 | 0.916 | 0.857 | 0.840 | 0.953 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge_3rdparty_coco-256x192-5b738c8e_20230314) | - | - -*Models with * are converted from the [official repo](https://github.com/ViTAE-Transformer/ViTPose). The config files of these models are only for validation.* - -> With simple decoder - -| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | -| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | -| [ViTPose-S](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small-simple_8xb64-210e_coco-256x192.py) | 256x192 | 0.736 | 0.900 | 0.811 | 0.790 | 0.940 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small-simple_8xb64-210e_coco-256x192-4c101a76_20230314.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small-simple_8xb64-210e_coco-256x192-4c101a76_20230314.json) | -| [ViTPose-B](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base-simple_8xb64-210e_coco-256x192.py) | 256x192 | 0.756 | 0.906 | 0.826 | 0.809 | 0.946 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base-simple_8xb64-210e_coco-256x192-0b8234ea_20230407.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base-simple_8xb64-210e_coco-256x192-0b8234ea_20230407.json) | -| [ViTPose-L](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large-simple_8xb64-210e_coco-256x192.py) | 256x192 | 0.780 | 0.914 | 0.851 | 0.833 | 0.952 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large-simple_8xb64-210e_coco-256x192-3a7ee9e1_20230314.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large-simple_8xb64-210e_coco-256x192-3a7ee9e1_20230314.json) | -| [ViTPose-H](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge-simple_8xb64-210e_coco-256x192.py) | 256x192 | 0.789 | 0.916 | 0.856 | 0.839 | 0.953 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge-simple_8xb64-210e_coco-256x192-ffd48c05_20230314.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge-simple_8xb64-210e_coco-256x192-ffd48c05_20230314.json) | +To utilize ViTPose, you'll need to have [MMPreTrain](https://github.com/open-mmlab/mmpretrain). To install the required version, run the following command: + +```shell +mim install 'mmpretrain>=1.0.0' +``` + + + +
+ViTPose (NeurIPS'2022) + +```bibtex +@inproceedings{ + xu2022vitpose, + title={Vi{TP}ose: Simple Vision Transformer Baselines for Human Pose Estimation}, + author={Yufei Xu and Jing Zhang and Qiming Zhang and Dacheng Tao}, + booktitle={Advances in Neural Information Processing Systems}, + year={2022}, +} +``` + +
+ + + +
+COCO-WholeBody (ECCV'2020) + +```bibtex +@inproceedings{jin2020whole, + title={Whole-Body Human Pose Estimation in the Wild}, + author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2020} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +> With classic decoder + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [ViTPose-S](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small_8xb64-210e_coco-256x192.py) | 256x192 | 0.739 | 0.903 | 0.816 | 0.792 | 0.942 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small_8xb64-210e_coco-256x192-62d7a712_20230314.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small_8xb64-210e_coco-256x192-62d7a712_20230314.json) | +| [ViTPose-B](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base_8xb64-210e_coco-256x192.py) | 256x192 | 0.757 | 0.905 | 0.829 | 0.810 | 0.946 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base_8xb64-210e_coco-256x192-216eae50_20230314.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base_8xb64-210e_coco-256x192-216eae50_20230314.json) | +| [ViTPose-L](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large_8xb64-210e_coco-256x192.py) | 256x192 | 0.782 | 0.914 | 0.850 | 0.834 | 0.952 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large_8xb64-210e_coco-256x192-53609f55_20230314.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large_8xb64-210e_coco-256x192-53609f55_20230314.json) | +| [ViTPose-H](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge_8xb64-210e_coco-256x192.py) | 256x192 | 0.788 | 0.917 | 0.855 | 0.839 | 0.954 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge_8xb64-210e_coco-256x192-e32adcd4_20230314.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge_8xb64-210e_coco-256x192-e32adcd4_20230314.json) | +| [ViTPose-H\*](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge_8xb64-210e_coco-256x192.py) | 256x192 | 0.790 | 0.916 | 0.857 | 0.840 | 0.953 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge_3rdparty_coco-256x192-5b738c8e_20230314) | - | + +*Models with * are converted from the [official repo](https://github.com/ViTAE-Transformer/ViTPose). The config files of these models are only for validation.* + +> With simple decoder + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [ViTPose-S](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small-simple_8xb64-210e_coco-256x192.py) | 256x192 | 0.736 | 0.900 | 0.811 | 0.790 | 0.940 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small-simple_8xb64-210e_coco-256x192-4c101a76_20230314.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small-simple_8xb64-210e_coco-256x192-4c101a76_20230314.json) | +| [ViTPose-B](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base-simple_8xb64-210e_coco-256x192.py) | 256x192 | 0.756 | 0.906 | 0.826 | 0.809 | 0.946 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base-simple_8xb64-210e_coco-256x192-0b8234ea_20230407.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base-simple_8xb64-210e_coco-256x192-0b8234ea_20230407.json) | +| [ViTPose-L](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large-simple_8xb64-210e_coco-256x192.py) | 256x192 | 0.780 | 0.914 | 0.851 | 0.833 | 0.952 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large-simple_8xb64-210e_coco-256x192-3a7ee9e1_20230314.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large-simple_8xb64-210e_coco-256x192-3a7ee9e1_20230314.json) | +| [ViTPose-H](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge-simple_8xb64-210e_coco-256x192.py) | 256x192 | 0.789 | 0.916 | 0.856 | 0.839 | 0.953 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge-simple_8xb64-210e_coco-256x192-ffd48c05_20230314.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge-simple_8xb64-210e_coco-256x192-ffd48c05_20230314.json) | diff --git a/configs/body_2d_keypoint/topdown_heatmap/coco/vitpose_coco.yml b/configs/body_2d_keypoint/topdown_heatmap/coco/vitpose_coco.yml index 10cc7bf972..fd70420dbe 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/coco/vitpose_coco.yml +++ b/configs/body_2d_keypoint/topdown_heatmap/coco/vitpose_coco.yml @@ -1,155 +1,155 @@ -Collections: -- Name: ViTPose - Paper: - Title: 'ViTPose: Simple Vision Transformer Baselines for Human Pose Estimation' - URL: https://arxiv.org/abs/2204.12484 - README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/algorithms/vitpose.md - Metadata: - Training Resources: 8x A100 GPUs -Models: -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small_8xb64-210e_coco-256x192.py - In Collection: ViTPose - Metadata: - Architecture: &id001 - - ViTPose - - Classic Head - Model Size: Small - Training Data: COCO - Name: td-hm_ViTPose-small_8xb64-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.739 - AP@0.5: 0.903 - AP@0.75: 0.816 - AR: 0.792 - AR@0.5: 0.942 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small_8xb64-210e_coco-256x192-62d7a712_20230314.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base_8xb64-210e_coco-256x192.py - In Collection: ViTPose - Metadata: - Architecture: *id001 - Model Size: Base - Training Data: COCO - Name: td-hm_ViTPose-base_8xb64-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.757 - AP@0.5: 0.905 - AP@0.75: 0.829 - AR: 0.81 - AR@0.5: 0.946 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base_8xb64-210e_coco-256x192-216eae50_20230314.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large_8xb64-210e_coco-256x192.py - In Collection: ViTPose - Metadata: - Architecture: *id001 - Model Size: Large - Training Data: COCO - Name: td-hm_ViTPose-large_8xb64-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.782 - AP@0.5: 0.914 - AP@0.75: 0.850 - AR: 0.834 - AR@0.5: 0.952 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large_8xb64-210e_coco-256x192-53609f55_20230314.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge_8xb64-210e_coco-256x192.py - In Collection: ViTPose - Metadata: - Architecture: *id001 - Model Size: Huge - Training Data: COCO - Name: td-hm_ViTPose-huge_8xb64-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.788 - AP@0.5: 0.917 - AP@0.75: 0.855 - AR: 0.839 - AR@0.5: 0.954 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge_8xb64-210e_coco-256x192-e32adcd4_20230314.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small-simple_8xb64-210e_coco-256x192.py - In Collection: ViTPose - Alias: vitpose-s - Metadata: - Architecture: &id002 - - ViTPose - - Simple Head - Model Size: Small - Training Data: COCO - Name: td-hm_ViTPose-small-simple_8xb64-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.736 - AP@0.5: 0.900 - AP@0.75: 0.811 - AR: 0.790 - AR@0.5: 0.940 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small-simple_8xb64-210e_coco-256x192-4c101a76_20230314.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base-simple_8xb64-210e_coco-256x192.py - In Collection: ViTPose - Alias: - - vitpose - - vitpose-b - Metadata: - Architecture: *id002 - Model Size: Base - Training Data: COCO - Name: td-hm_ViTPose-base-simple_8xb64-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.756 - AP@0.5: 0.906 - AP@0.75: 0.826 - AR: 0.809 - AR@0.5: 0.946 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base-simple_8xb64-210e_coco-256x192-0b8234ea_20230407.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large-simple_8xb64-210e_coco-256x192.py - In Collection: ViTPose - Alias: vitpose-l - Metadata: - Architecture: *id002 - Model Size: Large - Training Data: COCO - Name: td-hm_ViTPose-large-simple_8xb64-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.780 - AP@0.5: 0.914 - AP@0.75: 0.851 - AR: 0.833 - AR@0.5: 0.952 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large-simple_8xb64-210e_coco-256x192-3a7ee9e1_20230314.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge-simple_8xb64-210e_coco-256x192.py - In Collection: ViTPose - Alias: vitpose-h - Metadata: - Architecture: *id002 - Model Size: Huge - Training Data: COCO - Name: td-hm_ViTPose-huge-simple_8xb64-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.789 - AP@0.5: 0.916 - AP@0.75: 0.856 - AR: 0.839 - AR@0.5: 0.953 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge-simple_8xb64-210e_coco-256x192-ffd48c05_20230314.pth +Collections: +- Name: ViTPose + Paper: + Title: 'ViTPose: Simple Vision Transformer Baselines for Human Pose Estimation' + URL: https://arxiv.org/abs/2204.12484 + README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/algorithms/vitpose.md + Metadata: + Training Resources: 8x A100 GPUs +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small_8xb64-210e_coco-256x192.py + In Collection: ViTPose + Metadata: + Architecture: &id001 + - ViTPose + - Classic Head + Model Size: Small + Training Data: COCO + Name: td-hm_ViTPose-small_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.739 + AP@0.5: 0.903 + AP@0.75: 0.816 + AR: 0.792 + AR@0.5: 0.942 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small_8xb64-210e_coco-256x192-62d7a712_20230314.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base_8xb64-210e_coco-256x192.py + In Collection: ViTPose + Metadata: + Architecture: *id001 + Model Size: Base + Training Data: COCO + Name: td-hm_ViTPose-base_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.757 + AP@0.5: 0.905 + AP@0.75: 0.829 + AR: 0.81 + AR@0.5: 0.946 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base_8xb64-210e_coco-256x192-216eae50_20230314.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large_8xb64-210e_coco-256x192.py + In Collection: ViTPose + Metadata: + Architecture: *id001 + Model Size: Large + Training Data: COCO + Name: td-hm_ViTPose-large_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.782 + AP@0.5: 0.914 + AP@0.75: 0.850 + AR: 0.834 + AR@0.5: 0.952 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large_8xb64-210e_coco-256x192-53609f55_20230314.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge_8xb64-210e_coco-256x192.py + In Collection: ViTPose + Metadata: + Architecture: *id001 + Model Size: Huge + Training Data: COCO + Name: td-hm_ViTPose-huge_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.788 + AP@0.5: 0.917 + AP@0.75: 0.855 + AR: 0.839 + AR@0.5: 0.954 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge_8xb64-210e_coco-256x192-e32adcd4_20230314.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small-simple_8xb64-210e_coco-256x192.py + In Collection: ViTPose + Alias: vitpose-s + Metadata: + Architecture: &id002 + - ViTPose + - Simple Head + Model Size: Small + Training Data: COCO + Name: td-hm_ViTPose-small-simple_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.736 + AP@0.5: 0.900 + AP@0.75: 0.811 + AR: 0.790 + AR@0.5: 0.940 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small-simple_8xb64-210e_coco-256x192-4c101a76_20230314.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base-simple_8xb64-210e_coco-256x192.py + In Collection: ViTPose + Alias: + - vitpose + - vitpose-b + Metadata: + Architecture: *id002 + Model Size: Base + Training Data: COCO + Name: td-hm_ViTPose-base-simple_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.756 + AP@0.5: 0.906 + AP@0.75: 0.826 + AR: 0.809 + AR@0.5: 0.946 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base-simple_8xb64-210e_coco-256x192-0b8234ea_20230407.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large-simple_8xb64-210e_coco-256x192.py + In Collection: ViTPose + Alias: vitpose-l + Metadata: + Architecture: *id002 + Model Size: Large + Training Data: COCO + Name: td-hm_ViTPose-large-simple_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.780 + AP@0.5: 0.914 + AP@0.75: 0.851 + AR: 0.833 + AR@0.5: 0.952 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large-simple_8xb64-210e_coco-256x192-3a7ee9e1_20230314.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge-simple_8xb64-210e_coco-256x192.py + In Collection: ViTPose + Alias: vitpose-h + Metadata: + Architecture: *id002 + Model Size: Huge + Training Data: COCO + Name: td-hm_ViTPose-huge-simple_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.789 + AP@0.5: 0.916 + AP@0.75: 0.856 + AR: 0.839 + AR@0.5: 0.953 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge-simple_8xb64-210e_coco-256x192-ffd48c05_20230314.pth diff --git a/configs/body_2d_keypoint/topdown_heatmap/crowdpose/cspnext-m_udp_8xb64-210e_crowpose-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/crowdpose/cspnext-m_udp_8xb64-210e_crowpose-256x192.py index b083719303..1edee28029 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/crowdpose/cspnext-m_udp_8xb64-210e_crowpose-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/crowdpose/cspnext-m_udp_8xb64-210e_crowpose-256x192.py @@ -1,216 +1,216 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -max_epochs = 210 -stage2_num_epochs = 30 -base_lr = 4e-3 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# codec settings -codec = dict( - type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=0.67, - widen_factor=0.75, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' - 'rtmdet/cspnext_rsb_pretrain/' - 'cspnext-m_8xb256-rsb-a1-600e_in1k-ecb3bbd9.pth')), - head=dict( - type='HeatmapHead', - in_channels=768, - out_channels=14, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=False, - )) - -# base dataset settings -dataset_type = 'CrowdPoseDataset' -data_mode = 'topdown' -data_root = 'data/' - -backend_args = dict(backend='local') -# backend_args = dict( -# backend='petrel', -# path_mapping=dict({ -# f'{data_root}': 's3://openmmlab/datasets/', -# f'{data_root}': 's3://openmmlab/datasets/' -# })) - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.0), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', - data_prefix=dict(img='pose/CrowdPose/images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', - bbox_file='data/crowdpose/annotations/det_for_crowd_test_0.1_0.5.json', - data_prefix=dict(img='pose/CrowdPose/images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict( - save_best='crowdpose/AP', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'crowdpose/annotations/mmpose_crowdpose_test.json', - use_area=False, - iou_type='keypoints_crowd', - prefix='crowdpose') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 210 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' + 'rtmdet/cspnext_rsb_pretrain/' + 'cspnext-m_8xb256-rsb-a1-600e_in1k-ecb3bbd9.pth')), + head=dict( + type='HeatmapHead', + in_channels=768, + out_channels=14, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +dataset_type = 'CrowdPoseDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/', +# f'{data_root}': 's3://openmmlab/datasets/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', + bbox_file='data/crowdpose/annotations/det_for_crowd_test_0.1_0.5.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict( + save_best='crowdpose/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'crowdpose/annotations/mmpose_crowdpose_test.json', + use_area=False, + iou_type='keypoints_crowd', + prefix='crowdpose') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/crowdpose/cspnext_udp_crowdpose.md b/configs/body_2d_keypoint/topdown_heatmap/crowdpose/cspnext_udp_crowdpose.md index 24c3534838..734e2108e2 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/crowdpose/cspnext_udp_crowdpose.md +++ b/configs/body_2d_keypoint/topdown_heatmap/crowdpose/cspnext_udp_crowdpose.md @@ -1,56 +1,56 @@ - - -
-RTMDet (ArXiv 2022) - -```bibtex -@misc{lyu2022rtmdet, - title={RTMDet: An Empirical Study of Designing Real-Time Object Detectors}, - author={Chengqi Lyu and Wenwei Zhang and Haian Huang and Yue Zhou and Yudong Wang and Yanyi Liu and Shilong Zhang and Kai Chen}, - year={2022}, - eprint={2212.07784}, - archivePrefix={arXiv}, - primaryClass={cs.CV} -} -``` - -
- - - -
-UDP (CVPR'2020) - -```bibtex -@InProceedings{Huang_2020_CVPR, - author = {Huang, Junjie and Zhu, Zheng and Guo, Feng and Huang, Guan}, - title = {The Devil Is in the Details: Delving Into Unbiased Data Processing for Human Pose Estimation}, - booktitle = {The IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, - month = {June}, - year = {2020} -} -``` - -
- - - -
-CrowdPose (CVPR'2019) - -```bibtex -@article{li2018crowdpose, - title={CrowdPose: Efficient Crowded Scenes Pose Estimation and A New Benchmark}, - author={Li, Jiefeng and Wang, Can and Zhu, Hao and Mao, Yihuan and Fang, Hao-Shu and Lu, Cewu}, - journal={arXiv preprint arXiv:1812.00324}, - year={2018} -} -``` - -
- -Results on CrowdPose test with [YOLOv3](https://github.com/eriklindernoren/PyTorch-YOLOv3) human detector - -| Arch | Input Size | AP | AP50 | AP75 | AP (E) | AP (M) | AP (H) | ckpt | log | -| :--------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :----: | :----: | :----: | :--------------------------------------------: | :-------------------------------------------: | -| [pose_cspnext_m](/configs/body_2d_keypoint/topdown_heatmap/crowdpose/cspnext-m_udp_8xb64-210e_crowpose-256x192.py) | 256x192 | 0.662 | 0.821 | 0.723 | 0.759 | 0.675 | 0.539 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-crowdpose_pt-in1k_210e-256x192-f591079f_20230123.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-crowdpose_pt-in1k_210e-256x192-f591079f_20230123.json) | + + +
+RTMDet (ArXiv 2022) + +```bibtex +@misc{lyu2022rtmdet, + title={RTMDet: An Empirical Study of Designing Real-Time Object Detectors}, + author={Chengqi Lyu and Wenwei Zhang and Haian Huang and Yue Zhou and Yudong Wang and Yanyi Liu and Shilong Zhang and Kai Chen}, + year={2022}, + eprint={2212.07784}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +
+ + + +
+UDP (CVPR'2020) + +```bibtex +@InProceedings{Huang_2020_CVPR, + author = {Huang, Junjie and Zhu, Zheng and Guo, Feng and Huang, Guan}, + title = {The Devil Is in the Details: Delving Into Unbiased Data Processing for Human Pose Estimation}, + booktitle = {The IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, + month = {June}, + year = {2020} +} +``` + +
+ + + +
+CrowdPose (CVPR'2019) + +```bibtex +@article{li2018crowdpose, + title={CrowdPose: Efficient Crowded Scenes Pose Estimation and A New Benchmark}, + author={Li, Jiefeng and Wang, Can and Zhu, Hao and Mao, Yihuan and Fang, Hao-Shu and Lu, Cewu}, + journal={arXiv preprint arXiv:1812.00324}, + year={2018} +} +``` + +
+ +Results on CrowdPose test with [YOLOv3](https://github.com/eriklindernoren/PyTorch-YOLOv3) human detector + +| Arch | Input Size | AP | AP50 | AP75 | AP (E) | AP (M) | AP (H) | ckpt | log | +| :--------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :----: | :----: | :----: | :--------------------------------------------: | :-------------------------------------------: | +| [pose_cspnext_m](/configs/body_2d_keypoint/topdown_heatmap/crowdpose/cspnext-m_udp_8xb64-210e_crowpose-256x192.py) | 256x192 | 0.662 | 0.821 | 0.723 | 0.759 | 0.675 | 0.539 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-crowdpose_pt-in1k_210e-256x192-f591079f_20230123.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-crowdpose_pt-in1k_210e-256x192-f591079f_20230123.json) | diff --git a/configs/body_2d_keypoint/topdown_heatmap/crowdpose/cspnext_udp_crowdpose.yml b/configs/body_2d_keypoint/topdown_heatmap/crowdpose/cspnext_udp_crowdpose.yml index 6e5b4cd691..6201813fda 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/crowdpose/cspnext_udp_crowdpose.yml +++ b/configs/body_2d_keypoint/topdown_heatmap/crowdpose/cspnext_udp_crowdpose.yml @@ -1,20 +1,20 @@ -Models: -- Config: configs/body_2d_keypoint/topdown_heatmap/crowdpose/cspnext-m_udp_8xb64-210e_crowpose-256x192.py - In Collection: UDP - Metadata: - Architecture: - - UDP - - CSPNeXt - Training Data: CrowdPose - Name: cspnext-m_udp_8xb64-210e_crowpose-256x192 - Results: - - Dataset: CrowdPose - Metrics: - AP: 0.662 - AP (E): 0.759 - AP (H): 0.539 - AP (M): 0.675 - AP@0.5: 0.821 - AP@0.75: 0.723 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-crowdpose_pt-in1k_210e-256x192-f591079f_20230123.pth +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/crowdpose/cspnext-m_udp_8xb64-210e_crowpose-256x192.py + In Collection: UDP + Metadata: + Architecture: + - UDP + - CSPNeXt + Training Data: CrowdPose + Name: cspnext-m_udp_8xb64-210e_crowpose-256x192 + Results: + - Dataset: CrowdPose + Metrics: + AP: 0.662 + AP (E): 0.759 + AP (H): 0.539 + AP (M): 0.675 + AP@0.5: 0.821 + AP@0.75: 0.723 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-crowdpose_pt-in1k_210e-256x192-f591079f_20230123.pth diff --git a/configs/body_2d_keypoint/topdown_heatmap/crowdpose/hrnet_crowdpose.md b/configs/body_2d_keypoint/topdown_heatmap/crowdpose/hrnet_crowdpose.md index c0d24d4717..5fdb1aa7bb 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/crowdpose/hrnet_crowdpose.md +++ b/configs/body_2d_keypoint/topdown_heatmap/crowdpose/hrnet_crowdpose.md @@ -1,38 +1,38 @@ - - -
-HRNet (CVPR'2019) - -```bibtex -@inproceedings{sun2019deep, - title={Deep high-resolution representation learning for human pose estimation}, - author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={5693--5703}, - year={2019} -} -``` - -
- - - -
-CrowdPose (CVPR'2019) - -```bibtex -@article{li2018crowdpose, - title={CrowdPose: Efficient Crowded Scenes Pose Estimation and A New Benchmark}, - author={Li, Jiefeng and Wang, Can and Zhu, Hao and Mao, Yihuan and Fang, Hao-Shu and Lu, Cewu}, - journal={arXiv preprint arXiv:1812.00324}, - year={2018} -} -``` - -
- -Results on CrowdPose test with [YOLOv3](https://github.com/eriklindernoren/PyTorch-YOLOv3) human detector - -| Arch | Input Size | AP | AP50 | AP75 | AP (E) | AP (M) | AP (H) | ckpt | log | -| :--------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :----: | :----: | :----: | :--------------------------------------------: | :-------------------------------------------: | -| [pose_hrnet_w32](/configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_hrnet-w32_8xb64-210e_crowdpose-256x192.py) | 256x192 | 0.675 | 0.825 | 0.729 | 0.770 | 0.687 | 0.553 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_crowdpose_256x192-960be101_20201227.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_crowdpose_256x192_20201227.log.json) | + + +
+HRNet (CVPR'2019) + +```bibtex +@inproceedings{sun2019deep, + title={Deep high-resolution representation learning for human pose estimation}, + author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={5693--5703}, + year={2019} +} +``` + +
+ + + +
+CrowdPose (CVPR'2019) + +```bibtex +@article{li2018crowdpose, + title={CrowdPose: Efficient Crowded Scenes Pose Estimation and A New Benchmark}, + author={Li, Jiefeng and Wang, Can and Zhu, Hao and Mao, Yihuan and Fang, Hao-Shu and Lu, Cewu}, + journal={arXiv preprint arXiv:1812.00324}, + year={2018} +} +``` + +
+ +Results on CrowdPose test with [YOLOv3](https://github.com/eriklindernoren/PyTorch-YOLOv3) human detector + +| Arch | Input Size | AP | AP50 | AP75 | AP (E) | AP (M) | AP (H) | ckpt | log | +| :--------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :----: | :----: | :----: | :--------------------------------------------: | :-------------------------------------------: | +| [pose_hrnet_w32](/configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_hrnet-w32_8xb64-210e_crowdpose-256x192.py) | 256x192 | 0.675 | 0.825 | 0.729 | 0.770 | 0.687 | 0.553 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_crowdpose_256x192-960be101_20201227.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_crowdpose_256x192_20201227.log.json) | diff --git a/configs/body_2d_keypoint/topdown_heatmap/crowdpose/hrnet_crowdpose.yml b/configs/body_2d_keypoint/topdown_heatmap/crowdpose/hrnet_crowdpose.yml index c37fa9154f..f090812278 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/crowdpose/hrnet_crowdpose.yml +++ b/configs/body_2d_keypoint/topdown_heatmap/crowdpose/hrnet_crowdpose.yml @@ -1,19 +1,19 @@ -Models: -- Config: configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_hrnet-w32_8xb64-210e_crowdpose-256x192.py - In Collection: HRNet - Metadata: - Architecture: - - HRNet - Training Data: CrowdPose - Name: td-hm_hrnet-w32_8xb64-210e_crowdpose-256x192 - Results: - - Dataset: CrowdPose - Metrics: - AP: 0.675 - AP (E): 0.77 - AP (H): 0.553 - AP (M): 0.687 - AP@0.5: 0.825 - AP@0.75: 0.729 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_crowdpose_256x192-960be101_20201227.pth +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_hrnet-w32_8xb64-210e_crowdpose-256x192.py + In Collection: HRNet + Metadata: + Architecture: + - HRNet + Training Data: CrowdPose + Name: td-hm_hrnet-w32_8xb64-210e_crowdpose-256x192 + Results: + - Dataset: CrowdPose + Metrics: + AP: 0.675 + AP (E): 0.77 + AP (H): 0.553 + AP (M): 0.687 + AP@0.5: 0.825 + AP@0.75: 0.729 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_crowdpose_256x192-960be101_20201227.pth diff --git a/configs/body_2d_keypoint/topdown_heatmap/crowdpose/resnet_crowdpose.md b/configs/body_2d_keypoint/topdown_heatmap/crowdpose/resnet_crowdpose.md index 56a771806d..d987f26c50 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/crowdpose/resnet_crowdpose.md +++ b/configs/body_2d_keypoint/topdown_heatmap/crowdpose/resnet_crowdpose.md @@ -1,58 +1,58 @@ - - -
-SimpleBaseline2D (ECCV'2018) - -```bibtex -@inproceedings{xiao2018simple, - title={Simple baselines for human pose estimation and tracking}, - author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, - booktitle={Proceedings of the European conference on computer vision (ECCV)}, - pages={466--481}, - year={2018} -} -``` - -
- - - -
-ResNet (CVPR'2016) - -```bibtex -@inproceedings{he2016deep, - title={Deep residual learning for image recognition}, - author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={770--778}, - year={2016} -} -``` - -
- - - -
-CrowdPose (CVPR'2019) - -```bibtex -@article{li2018crowdpose, - title={CrowdPose: Efficient Crowded Scenes Pose Estimation and A New Benchmark}, - author={Li, Jiefeng and Wang, Can and Zhu, Hao and Mao, Yihuan and Fang, Hao-Shu and Lu, Cewu}, - journal={arXiv preprint arXiv:1812.00324}, - year={2018} -} -``` - -
- -Results on CrowdPose test with [YOLOv3](https://github.com/eriklindernoren/PyTorch-YOLOv3) human detector - -| Arch | Input Size | AP | AP50 | AP75 | AP (E) | AP (M) | AP (H) | ckpt | log | -| :--------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :----: | :----: | :----: | :--------------------------------------------: | :-------------------------------------------: | -| [pose_resnet_50](/configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_res50_8xb64-210e_crowdpose-256x192.py) | 256x192 | 0.637 | 0.808 | 0.692 | 0.738 | 0.650 | 0.506 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res50_crowdpose_256x192-c6a526b6_20201227.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res50_crowdpose_256x192_20201227.log.json) | -| [pose_resnet_101](/configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_res101_8xb64-210e_crowdpose-256x192.py) | 256x192 | 0.647 | 0.810 | 0.703 | 0.745 | 0.658 | 0.521 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res101_crowdpose_256x192-8f5870f4_20201227.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res101_crowdpose_256x192_20201227.log.json) | -| [pose_resnet_101](/configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_res101_8xb64-210e_crowdpose-320x256.py) | 320x256 | 0.661 | 0.821 | 0.714 | 0.759 | 0.672 | 0.534 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res101_crowdpose_320x256-c88c512a_20201227.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res101_crowdpose_320x256_20201227.log.json) | -| [pose_resnet_152](/configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_res152_8xb64-210e_crowdpose-256x192.py) | 256x192 | 0.656 | 0.818 | 0.712 | 0.754 | 0.666 | 0.533 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res152_crowdpose_256x192-dbd49aba_20201227.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res152_crowdpose_256x192_20201227.log.json) | + + +
+SimpleBaseline2D (ECCV'2018) + +```bibtex +@inproceedings{xiao2018simple, + title={Simple baselines for human pose estimation and tracking}, + author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, + booktitle={Proceedings of the European conference on computer vision (ECCV)}, + pages={466--481}, + year={2018} +} +``` + +
+ + + +
+ResNet (CVPR'2016) + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +
+ + + +
+CrowdPose (CVPR'2019) + +```bibtex +@article{li2018crowdpose, + title={CrowdPose: Efficient Crowded Scenes Pose Estimation and A New Benchmark}, + author={Li, Jiefeng and Wang, Can and Zhu, Hao and Mao, Yihuan and Fang, Hao-Shu and Lu, Cewu}, + journal={arXiv preprint arXiv:1812.00324}, + year={2018} +} +``` + +
+ +Results on CrowdPose test with [YOLOv3](https://github.com/eriklindernoren/PyTorch-YOLOv3) human detector + +| Arch | Input Size | AP | AP50 | AP75 | AP (E) | AP (M) | AP (H) | ckpt | log | +| :--------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :----: | :----: | :----: | :--------------------------------------------: | :-------------------------------------------: | +| [pose_resnet_50](/configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_res50_8xb64-210e_crowdpose-256x192.py) | 256x192 | 0.637 | 0.808 | 0.692 | 0.738 | 0.650 | 0.506 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res50_crowdpose_256x192-c6a526b6_20201227.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res50_crowdpose_256x192_20201227.log.json) | +| [pose_resnet_101](/configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_res101_8xb64-210e_crowdpose-256x192.py) | 256x192 | 0.647 | 0.810 | 0.703 | 0.745 | 0.658 | 0.521 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res101_crowdpose_256x192-8f5870f4_20201227.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res101_crowdpose_256x192_20201227.log.json) | +| [pose_resnet_101](/configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_res101_8xb64-210e_crowdpose-320x256.py) | 320x256 | 0.661 | 0.821 | 0.714 | 0.759 | 0.672 | 0.534 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res101_crowdpose_320x256-c88c512a_20201227.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res101_crowdpose_320x256_20201227.log.json) | +| [pose_resnet_152](/configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_res152_8xb64-210e_crowdpose-256x192.py) | 256x192 | 0.656 | 0.818 | 0.712 | 0.754 | 0.666 | 0.533 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res152_crowdpose_256x192-dbd49aba_20201227.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res152_crowdpose_256x192_20201227.log.json) | diff --git a/configs/body_2d_keypoint/topdown_heatmap/crowdpose/resnet_crowdpose.yml b/configs/body_2d_keypoint/topdown_heatmap/crowdpose/resnet_crowdpose.yml index 1477c28deb..15802eb87c 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/crowdpose/resnet_crowdpose.yml +++ b/configs/body_2d_keypoint/topdown_heatmap/crowdpose/resnet_crowdpose.yml @@ -1,71 +1,71 @@ -Models: -- Config: configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_res50_8xb64-210e_crowdpose-256x192.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: &id001 - - SimpleBaseline2D - - ResNet - Training Data: CrowdPose - Name: td-hm_res50_8xb64-210e_crowdpose-256x192 - Results: - - Dataset: CrowdPose - Metrics: - AP: 0.637 - AP (E): 0.738 - AP (H): 0.506 - AP (M): 0.65 - AP@0.5: 0.808 - AP@0.75: 0.692 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res50_crowdpose_256x192-c6a526b6_20201227.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_res101_8xb64-210e_crowdpose-256x192.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: CrowdPose - Name: td-hm_res101_8xb64-210e_crowdpose-256x192 - Results: - - Dataset: CrowdPose - Metrics: - AP: 0.647 - AP (E): 0.745 - AP (H): 0.521 - AP (M): 0.658 - AP@0.5: 0.81 - AP@0.75: 0.703 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res101_crowdpose_256x192-8f5870f4_20201227.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_res101_8xb64-210e_crowdpose-320x256.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: CrowdPose - Name: td-hm_res101_8xb64-210e_crowdpose-320x256 - Results: - - Dataset: CrowdPose - Metrics: - AP: 0.661 - AP (E): 0.759 - AP (H): 0.534 - AP (M): 0.672 - AP@0.5: 0.821 - AP@0.75: 0.714 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res101_crowdpose_320x256-c88c512a_20201227.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_res152_8xb64-210e_crowdpose-256x192.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: CrowdPose - Name: td-hm_res152_8xb64-210e_crowdpose-256x192 - Results: - - Dataset: CrowdPose - Metrics: - AP: 0.656 - AP (E): 0.754 - AP (H): 0.533 - AP (M): 0.666 - AP@0.5: 0.818 - AP@0.75: 0.712 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res152_crowdpose_256x192-dbd49aba_20201227.pth +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_res50_8xb64-210e_crowdpose-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: &id001 + - SimpleBaseline2D + - ResNet + Training Data: CrowdPose + Name: td-hm_res50_8xb64-210e_crowdpose-256x192 + Results: + - Dataset: CrowdPose + Metrics: + AP: 0.637 + AP (E): 0.738 + AP (H): 0.506 + AP (M): 0.65 + AP@0.5: 0.808 + AP@0.75: 0.692 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res50_crowdpose_256x192-c6a526b6_20201227.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_res101_8xb64-210e_crowdpose-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: CrowdPose + Name: td-hm_res101_8xb64-210e_crowdpose-256x192 + Results: + - Dataset: CrowdPose + Metrics: + AP: 0.647 + AP (E): 0.745 + AP (H): 0.521 + AP (M): 0.658 + AP@0.5: 0.81 + AP@0.75: 0.703 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res101_crowdpose_256x192-8f5870f4_20201227.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_res101_8xb64-210e_crowdpose-320x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: CrowdPose + Name: td-hm_res101_8xb64-210e_crowdpose-320x256 + Results: + - Dataset: CrowdPose + Metrics: + AP: 0.661 + AP (E): 0.759 + AP (H): 0.534 + AP (M): 0.672 + AP@0.5: 0.821 + AP@0.75: 0.714 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res101_crowdpose_320x256-c88c512a_20201227.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_res152_8xb64-210e_crowdpose-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: CrowdPose + Name: td-hm_res152_8xb64-210e_crowdpose-256x192 + Results: + - Dataset: CrowdPose + Metrics: + AP: 0.656 + AP (E): 0.754 + AP (H): 0.533 + AP (M): 0.666 + AP@0.5: 0.818 + AP@0.75: 0.712 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res152_crowdpose_256x192-dbd49aba_20201227.pth diff --git a/configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_hrnet-w32_8xb64-210e_crowdpose-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_hrnet-w32_8xb64-210e_crowdpose-256x192.py index 3117314a43..c5ec67a489 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_hrnet-w32_8xb64-210e_crowdpose-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_hrnet-w32_8xb64-210e_crowdpose-256x192.py @@ -1,152 +1,152 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='crowdpose/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(32, 64)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(32, 64, 128)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(32, 64, 128, 256))), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/hrnet_w32-36af842e.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=32, - out_channels=14, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CrowdPoseDataset' -data_mode = 'topdown' -data_root = 'data/crowdpose/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mmpose_crowdpose_trainval.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mmpose_crowdpose_test.json', - bbox_file='data/crowdpose/annotations/det_for_crowd_test_0.1_0.5.json', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/mmpose_crowdpose_test.json', - use_area=False, - iou_type='keypoints_crowd', - prefix='crowdpose') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='crowdpose/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=14, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CrowdPoseDataset' +data_mode = 'topdown' +data_root = 'data/crowdpose/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mmpose_crowdpose_test.json', + bbox_file='data/crowdpose/annotations/det_for_crowd_test_0.1_0.5.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/mmpose_crowdpose_test.json', + use_area=False, + iou_type='keypoints_crowd', + prefix='crowdpose') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_res101_8xb64-210e_crowdpose-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_res101_8xb64-210e_crowdpose-256x192.py index 79cae1d130..ef78bbcdd2 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_res101_8xb64-210e_crowdpose-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_res101_8xb64-210e_crowdpose-256x192.py @@ -1,123 +1,123 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='crowdpose/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=101, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=14, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CrowdPoseDataset' -data_mode = 'topdown' -data_root = 'data/crowdpose/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mmpose_crowdpose_trainval.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mmpose_crowdpose_test.json', - bbox_file='data/crowdpose/annotations/det_for_crowd_test_0.1_0.5.json', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/mmpose_crowdpose_test.json', - use_area=False, - iou_type='keypoints_crowd', - prefix='crowdpose') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='crowdpose/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=101, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=14, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CrowdPoseDataset' +data_mode = 'topdown' +data_root = 'data/crowdpose/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mmpose_crowdpose_test.json', + bbox_file='data/crowdpose/annotations/det_for_crowd_test_0.1_0.5.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/mmpose_crowdpose_test.json', + use_area=False, + iou_type='keypoints_crowd', + prefix='crowdpose') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_res101_8xb64-210e_crowdpose-320x256.py b/configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_res101_8xb64-210e_crowdpose-320x256.py index eac5caf859..4ffb602ff8 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_res101_8xb64-210e_crowdpose-320x256.py +++ b/configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_res101_8xb64-210e_crowdpose-320x256.py @@ -1,123 +1,123 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='crowdpose/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 320), heatmap_size=(64, 80), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=101, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=14, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CrowdPoseDataset' -data_mode = 'topdown' -data_root = 'data/crowdpose/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mmpose_crowdpose_trainval.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mmpose_crowdpose_test.json', - bbox_file='data/crowdpose/annotations/det_for_crowd_test_0.1_0.5.json', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/mmpose_crowdpose_test.json', - use_area=False, - iou_type='keypoints_crowd', - prefix='crowdpose') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='crowdpose/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 320), heatmap_size=(64, 80), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=101, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=14, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CrowdPoseDataset' +data_mode = 'topdown' +data_root = 'data/crowdpose/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mmpose_crowdpose_test.json', + bbox_file='data/crowdpose/annotations/det_for_crowd_test_0.1_0.5.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/mmpose_crowdpose_test.json', + use_area=False, + iou_type='keypoints_crowd', + prefix='crowdpose') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_res152_8xb64-210e_crowdpose-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_res152_8xb64-210e_crowdpose-256x192.py index 5b99439535..d53e2d192f 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_res152_8xb64-210e_crowdpose-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_res152_8xb64-210e_crowdpose-256x192.py @@ -1,123 +1,123 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='crowdpose/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=152, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet152'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=14, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CrowdPoseDataset' -data_mode = 'topdown' -data_root = 'data/crowdpose/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mmpose_crowdpose_trainval.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mmpose_crowdpose_test.json', - bbox_file='data/crowdpose/annotations/det_for_crowd_test_0.1_0.5.json', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/mmpose_crowdpose_test.json', - use_area=False, - iou_type='keypoints_crowd', - prefix='crowdpose') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='crowdpose/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=152, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet152'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=14, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CrowdPoseDataset' +data_mode = 'topdown' +data_root = 'data/crowdpose/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mmpose_crowdpose_test.json', + bbox_file='data/crowdpose/annotations/det_for_crowd_test_0.1_0.5.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/mmpose_crowdpose_test.json', + use_area=False, + iou_type='keypoints_crowd', + prefix='crowdpose') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_res50_8xb64-210e_crowdpose-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_res50_8xb64-210e_crowdpose-256x192.py index d669b2e267..2ae99ceeca 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_res50_8xb64-210e_crowdpose-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_res50_8xb64-210e_crowdpose-256x192.py @@ -1,123 +1,123 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='crowdpose/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=50, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=14, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CrowdPoseDataset' -data_mode = 'topdown' -data_root = 'data/crowdpose/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mmpose_crowdpose_trainval.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mmpose_crowdpose_test.json', - bbox_file='data/crowdpose/annotations/det_for_crowd_test_0.1_0.5.json', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/mmpose_crowdpose_test.json', - use_area=False, - iou_type='keypoints_crowd', - prefix='crowdpose') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='crowdpose/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=14, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CrowdPoseDataset' +data_mode = 'topdown' +data_root = 'data/crowdpose/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mmpose_crowdpose_test.json', + bbox_file='data/crowdpose/annotations/det_for_crowd_test_0.1_0.5.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/mmpose_crowdpose_test.json', + use_area=False, + iou_type='keypoints_crowd', + prefix='crowdpose') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/humanart/hrnet_humanart.md b/configs/body_2d_keypoint/topdown_heatmap/humanart/hrnet_humanart.md index 6e5f3476cb..71b825ed39 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/humanart/hrnet_humanart.md +++ b/configs/body_2d_keypoint/topdown_heatmap/humanart/hrnet_humanart.md @@ -1,80 +1,80 @@ - - -
-HRNet (CVPR'2019) - -```bibtex -@inproceedings{sun2019deep, - title={Deep high-resolution representation learning for human pose estimation}, - author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={5693--5703}, - year={2019} -} -``` - -
- - - -
-COCO (ECCV'2014) - -```bibtex -@inproceedings{lin2014microsoft, - title={Microsoft coco: Common objects in context}, - author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, - booktitle={European conference on computer vision}, - pages={740--755}, - year={2014}, - organization={Springer} -} -``` - -
- -
-Human-Art (CVPR'2023) - -```bibtex -@inproceedings{ju2023humanart, - title={Human-Art: A Versatile Human-Centric Dataset Bridging Natural and Artificial Scenes}, - author={Ju, Xuan and Zeng, Ailing and Jianan, Wang and Qiang, Xu and Lei, Zhang}, - booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), - year={2023}} -``` - -
- -Results on Human-Art validation dataset with detector having human AP of 56.2 on Human-Art validation dataset - -> With classic decoder - -| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | -| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | -| [pose_hrnet_w32-coco](configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py) | 256x192 | 0.252 | 0.397 | 0.255 | 0.321 | 0.485 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192-81c58e40_20220909.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220909.log) | -| [pose_hrnet_w32-humanart-coco](configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_hrnet-w32_8xb64-210e_humanart-256x192.py) | 256x192 | 0.399 | 0.545 | 0.420 | 0.466 | 0.613 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_hrnet-w32_8xb64-210e_humanart-256x192-0773ef0b_20230614.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_hrnet-w32_8xb64-210e_humanart-256x192-0773ef0b_20230614.json) | -| [pose_hrnet_w48-coco](configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192.py) | 256x192 | 0.271 | 0.413 | 0.277 | 0.339 | 0.499 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192-0e67c616_20220913.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192_20220913.log) | -| [pose_hrnet_w48-humanart-coco](configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_hrnet-w48_8xb32-210e_humanart-256x192.py) | 256x192 | 0.417 | 0.553 | 0.442 | 0.481 | 0.617 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_hrnet-w48_8xb32-210e_humanart-256x192-05178983_20230614.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_hrnet-w48_8xb32-210e_humanart-256x192-05178983_20230614.json) | - -Results on Human-Art validation dataset with ground-truth bounding-box - -> With classic decoder - -| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | -| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | -| [pose_hrnet_w32-coco](configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py) | 256x192 | 0.533 | 0.771 | 0.562 | 0.574 | 0.792 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192-81c58e40_20220909.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220909.log) | -| [pose_hrnet_w32-humanart-coco](configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_hrnet-w32_8xb64-210e_humanart-256x192.py) | 256x192 | 0.754 | 0.906 | 0.812 | 0.783 | 0.916 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_hrnet-w32_8xb64-210e_humanart-256x192-0773ef0b_20230614.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_hrnet-w32_8xb64-210e_humanart-256x192-0773ef0b_20230614.json) | -| [pose_hrnet_w48-coco](configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192.py) | 256x192 | 0.557 | 0.782 | 0.593 | 0.595 | 0.804 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192-0e67c616_20220913.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192_20220913.log) | -| [pose_hrnet_w48-humanart-coco](configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_hrnet-w48_8xb32-210e_humanart-256x192.py) | 256x192 | 0.769 | 0.906 | 0.825 | 0.796 | 0.919 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_hrnet-w48_8xb32-210e_humanart-256x192-05178983_20230614.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_hrnet-w48_8xb32-210e_humanart-256x192-05178983_20230614.json) | - -Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset - -> With classic decoder - -| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | -| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | -| [pose_hrnet_w32-coco](configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py) | 256x192 | 0.749 | 0.906 | 0.821 | 0.804 | 0.945 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192-81c58e40_20220909.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220909.log) | -| [pose_hrnet_w32-humanart-coco](configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_hrnet-w32_8xb64-210e_humanart-256x192.py) | 256x192 | 0.741 | 0.902 | 0.814 | 0.795 | 0.941 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_hrnet-w32_8xb64-210e_humanart-256x192-0773ef0b_20230614.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_hrnet-w32_8xb64-210e_humanart-256x192-0773ef0b_20230614.json) | -| [pose_hrnet_w48-coco](configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192.py) | 256x192 | 0.756 | 0.908 | 0.826 | 0.809 | 0.945 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192-0e67c616_20220913.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192_20220913.log) | -| [pose_hrnet_w48-humanart-coco](configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_hrnet-w48_8xb32-210e_humanart-256x192.py) | 256x192 | 0.751 | 0.905 | 0.822 | 0.805 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_hrnet-w48_8xb32-210e_humanart-256x192-05178983_20230614.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_hrnet-w48_8xb32-210e_humanart-256x192-05178983_20230614.json) | + + +
+HRNet (CVPR'2019) + +```bibtex +@inproceedings{sun2019deep, + title={Deep high-resolution representation learning for human pose estimation}, + author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={5693--5703}, + year={2019} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +
+Human-Art (CVPR'2023) + +```bibtex +@inproceedings{ju2023humanart, + title={Human-Art: A Versatile Human-Centric Dataset Bridging Natural and Artificial Scenes}, + author={Ju, Xuan and Zeng, Ailing and Jianan, Wang and Qiang, Xu and Lei, Zhang}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), + year={2023}} +``` + +
+ +Results on Human-Art validation dataset with detector having human AP of 56.2 on Human-Art validation dataset + +> With classic decoder + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [pose_hrnet_w32-coco](configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py) | 256x192 | 0.252 | 0.397 | 0.255 | 0.321 | 0.485 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192-81c58e40_20220909.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220909.log) | +| [pose_hrnet_w32-humanart-coco](configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_hrnet-w32_8xb64-210e_humanart-256x192.py) | 256x192 | 0.399 | 0.545 | 0.420 | 0.466 | 0.613 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_hrnet-w32_8xb64-210e_humanart-256x192-0773ef0b_20230614.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_hrnet-w32_8xb64-210e_humanart-256x192-0773ef0b_20230614.json) | +| [pose_hrnet_w48-coco](configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192.py) | 256x192 | 0.271 | 0.413 | 0.277 | 0.339 | 0.499 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192-0e67c616_20220913.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192_20220913.log) | +| [pose_hrnet_w48-humanart-coco](configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_hrnet-w48_8xb32-210e_humanart-256x192.py) | 256x192 | 0.417 | 0.553 | 0.442 | 0.481 | 0.617 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_hrnet-w48_8xb32-210e_humanart-256x192-05178983_20230614.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_hrnet-w48_8xb32-210e_humanart-256x192-05178983_20230614.json) | + +Results on Human-Art validation dataset with ground-truth bounding-box + +> With classic decoder + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [pose_hrnet_w32-coco](configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py) | 256x192 | 0.533 | 0.771 | 0.562 | 0.574 | 0.792 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192-81c58e40_20220909.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220909.log) | +| [pose_hrnet_w32-humanart-coco](configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_hrnet-w32_8xb64-210e_humanart-256x192.py) | 256x192 | 0.754 | 0.906 | 0.812 | 0.783 | 0.916 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_hrnet-w32_8xb64-210e_humanart-256x192-0773ef0b_20230614.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_hrnet-w32_8xb64-210e_humanart-256x192-0773ef0b_20230614.json) | +| [pose_hrnet_w48-coco](configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192.py) | 256x192 | 0.557 | 0.782 | 0.593 | 0.595 | 0.804 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192-0e67c616_20220913.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192_20220913.log) | +| [pose_hrnet_w48-humanart-coco](configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_hrnet-w48_8xb32-210e_humanart-256x192.py) | 256x192 | 0.769 | 0.906 | 0.825 | 0.796 | 0.919 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_hrnet-w48_8xb32-210e_humanart-256x192-05178983_20230614.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_hrnet-w48_8xb32-210e_humanart-256x192-05178983_20230614.json) | + +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +> With classic decoder + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [pose_hrnet_w32-coco](configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py) | 256x192 | 0.749 | 0.906 | 0.821 | 0.804 | 0.945 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192-81c58e40_20220909.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220909.log) | +| [pose_hrnet_w32-humanart-coco](configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_hrnet-w32_8xb64-210e_humanart-256x192.py) | 256x192 | 0.741 | 0.902 | 0.814 | 0.795 | 0.941 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_hrnet-w32_8xb64-210e_humanart-256x192-0773ef0b_20230614.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_hrnet-w32_8xb64-210e_humanart-256x192-0773ef0b_20230614.json) | +| [pose_hrnet_w48-coco](configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192.py) | 256x192 | 0.756 | 0.908 | 0.826 | 0.809 | 0.945 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192-0e67c616_20220913.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192_20220913.log) | +| [pose_hrnet_w48-humanart-coco](configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_hrnet-w48_8xb32-210e_humanart-256x192.py) | 256x192 | 0.751 | 0.905 | 0.822 | 0.805 | 0.943 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_hrnet-w48_8xb32-210e_humanart-256x192-05178983_20230614.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_hrnet-w48_8xb32-210e_humanart-256x192-05178983_20230614.json) | diff --git a/configs/body_2d_keypoint/topdown_heatmap/humanart/hrnet_humanart.yml b/configs/body_2d_keypoint/topdown_heatmap/humanart/hrnet_humanart.yml index 08aa3f1f47..d49a662ad8 100755 --- a/configs/body_2d_keypoint/topdown_heatmap/humanart/hrnet_humanart.yml +++ b/configs/body_2d_keypoint/topdown_heatmap/humanart/hrnet_humanart.yml @@ -1,74 +1,74 @@ -Collections: -- Name: HRNet - Paper: - Title: Deep high-resolution representation learning for human pose estimation - URL: http://openaccess.thecvf.com/content_CVPR_2019/html/Sun_Deep_High-Resolution_Representation_Learning_for_Human_Pose_Estimation_CVPR_2019_paper.html - README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/backbones/hrnet.md -Models: -- Config: configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_hrnet-w32_8xb64-210e_humanart-256x192.py - In Collection: HRNet - Metadata: - Architecture: &id001 - - HRNet - Training Data: &id002 - - COCO - - Human-Art - Name: td-hm_hrnet-w32_8xb64-210e_humanart-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.741 - AP@0.5: 0.902 - AP@0.75: 0.814 - AR: 0.795 - AR@0.5: 0.941 - Task: Body 2D Keypoint - - Dataset: Human-Art - Metrics: - AP: 0.399 - AP@0.5: 0.545 - AP@0.75: 0.420 - AR: 0.466 - AR@0.5: 0.613 - Task: Body 2D Keypoint - - Dataset: Human-Art(GT) - Metrics: - AP: 0.754 - AP@0.5: 0.906 - AP@0.75: 0.812 - AR: 0.783 - AR@0.5: 0.916 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_hrnet-w32_8xb64-210e_humanart-256x192-0773ef0b_20230614.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_hrnet-w48_8xb32-210e_humanart-256x192.py - In Collection: HRNet - Metadata: - Architecture: *id001 - Training Data: *id002 - Name: td-hm_hrnet-w48_8xb32-210e_humanart-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.751 - AP@0.5: 0.905 - AP@0.75: 0.822 - AR: 0.805 - AR@0.5: 0.943 - Task: Body 2D Keypoint - - Dataset: Human-Art - Metrics: - AP: 0.417 - AP@0.5: 0.553 - AP@0.75: 0.442 - AR: 0.481 - AR@0.5: 0.617 - Task: Body 2D Keypoint - - Dataset: Human-Art(GT) - Metrics: - AP: 0.769 - AP@0.5: 0.906 - AP@0.75: 0.825 - AR: 0.796 - AR@0.5: 0.919 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_hrnet-w48_8xb32-210e_humanart-256x192-05178983_20230614.pth +Collections: +- Name: HRNet + Paper: + Title: Deep high-resolution representation learning for human pose estimation + URL: http://openaccess.thecvf.com/content_CVPR_2019/html/Sun_Deep_High-Resolution_Representation_Learning_for_Human_Pose_Estimation_CVPR_2019_paper.html + README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/backbones/hrnet.md +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_hrnet-w32_8xb64-210e_humanart-256x192.py + In Collection: HRNet + Metadata: + Architecture: &id001 + - HRNet + Training Data: &id002 + - COCO + - Human-Art + Name: td-hm_hrnet-w32_8xb64-210e_humanart-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.741 + AP@0.5: 0.902 + AP@0.75: 0.814 + AR: 0.795 + AR@0.5: 0.941 + Task: Body 2D Keypoint + - Dataset: Human-Art + Metrics: + AP: 0.399 + AP@0.5: 0.545 + AP@0.75: 0.420 + AR: 0.466 + AR@0.5: 0.613 + Task: Body 2D Keypoint + - Dataset: Human-Art(GT) + Metrics: + AP: 0.754 + AP@0.5: 0.906 + AP@0.75: 0.812 + AR: 0.783 + AR@0.5: 0.916 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_hrnet-w32_8xb64-210e_humanart-256x192-0773ef0b_20230614.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_hrnet-w48_8xb32-210e_humanart-256x192.py + In Collection: HRNet + Metadata: + Architecture: *id001 + Training Data: *id002 + Name: td-hm_hrnet-w48_8xb32-210e_humanart-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.751 + AP@0.5: 0.905 + AP@0.75: 0.822 + AR: 0.805 + AR@0.5: 0.943 + Task: Body 2D Keypoint + - Dataset: Human-Art + Metrics: + AP: 0.417 + AP@0.5: 0.553 + AP@0.75: 0.442 + AR: 0.481 + AR@0.5: 0.617 + Task: Body 2D Keypoint + - Dataset: Human-Art(GT) + Metrics: + AP: 0.769 + AP@0.5: 0.906 + AP@0.75: 0.825 + AR: 0.796 + AR@0.5: 0.919 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_hrnet-w48_8xb32-210e_humanart-256x192-05178983_20230614.pth diff --git a/configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-base_8xb64-210e_humanart-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-base_8xb64-210e_humanart-256x192.py index 4aa431e044..c28de59489 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-base_8xb64-210e_humanart-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-base_8xb64-210e_humanart-256x192.py @@ -1,150 +1,150 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -custom_imports = dict( - imports=['mmpose.engine.optim_wrappers.layer_decay_optim_wrapper'], - allow_failed_imports=False) - -optim_wrapper = dict( - optimizer=dict( - type='AdamW', lr=5e-4, betas=(0.9, 0.999), weight_decay=0.1), - paramwise_cfg=dict( - num_layers=12, - layer_decay_rate=0.75, - custom_keys={ - 'bias': dict(decay_multi=0.0), - 'pos_embed': dict(decay_mult=0.0), - 'relative_position_bias_table': dict(decay_mult=0.0), - 'norm': dict(decay_mult=0.0), - }, - ), - constructor='LayerDecayOptimWrapperConstructor', - clip_grad=dict(max_norm=1., norm_type=2), -) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) - -# codec settings -codec = dict( - type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='mmpretrain.VisionTransformer', - arch='base', - img_size=(256, 192), - patch_size=16, - qkv_bias=True, - drop_path_rate=0.3, - with_cls_token=False, - out_type='featmap', - patch_cfg=dict(padding=2), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'v1/pretrained_models/mae_pretrain_vit_base.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=768, - out_channels=17, - deconv_out_channels=(256, 256), - deconv_kernel_sizes=(4, 4), - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=False, - )) - -# base dataset settings -data_root = 'data/' -dataset_type = 'HumanArtDataset' -data_mode = 'topdown' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=4, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='HumanArt/annotations/training_humanart_coco.json', - data_prefix=dict(img=''), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=4, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='HumanArt/annotations/validation_humanart.json', - bbox_file=f'{data_root}HumanArt/person_detection_results/' - 'HumanArt_validation_detections_AP_H_56_person.json', - data_prefix=dict(img=''), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'HumanArt/annotations/validation_humanart.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +custom_imports = dict( + imports=['mmpose.engine.optim_wrappers.layer_decay_optim_wrapper'], + allow_failed_imports=False) + +optim_wrapper = dict( + optimizer=dict( + type='AdamW', lr=5e-4, betas=(0.9, 0.999), weight_decay=0.1), + paramwise_cfg=dict( + num_layers=12, + layer_decay_rate=0.75, + custom_keys={ + 'bias': dict(decay_multi=0.0), + 'pos_embed': dict(decay_mult=0.0), + 'relative_position_bias_table': dict(decay_mult=0.0), + 'norm': dict(decay_mult=0.0), + }, + ), + constructor='LayerDecayOptimWrapperConstructor', + clip_grad=dict(max_norm=1., norm_type=2), +) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='mmpretrain.VisionTransformer', + arch='base', + img_size=(256, 192), + patch_size=16, + qkv_bias=True, + drop_path_rate=0.3, + with_cls_token=False, + out_type='featmap', + patch_cfg=dict(padding=2), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'v1/pretrained_models/mae_pretrain_vit_base.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=768, + out_channels=17, + deconv_out_channels=(256, 256), + deconv_kernel_sizes=(4, 4), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +data_root = 'data/' +dataset_type = 'HumanArtDataset' +data_mode = 'topdown' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='HumanArt/annotations/training_humanart_coco.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='HumanArt/annotations/validation_humanart.json', + bbox_file=f'{data_root}HumanArt/person_detection_results/' + 'HumanArt_validation_detections_AP_H_56_person.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'HumanArt/annotations/validation_humanart.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-huge_8xb64-210e_humanart-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-huge_8xb64-210e_humanart-256x192.py index 925f68e3d1..92a51d1f4c 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-huge_8xb64-210e_humanart-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-huge_8xb64-210e_humanart-256x192.py @@ -1,150 +1,150 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -custom_imports = dict( - imports=['mmpose.engine.optim_wrappers.layer_decay_optim_wrapper'], - allow_failed_imports=False) - -optim_wrapper = dict( - optimizer=dict( - type='AdamW', lr=5e-4, betas=(0.9, 0.999), weight_decay=0.1), - paramwise_cfg=dict( - num_layers=32, - layer_decay_rate=0.85, - custom_keys={ - 'bias': dict(decay_multi=0.0), - 'pos_embed': dict(decay_mult=0.0), - 'relative_position_bias_table': dict(decay_mult=0.0), - 'norm': dict(decay_mult=0.0), - }, - ), - constructor='LayerDecayOptimWrapperConstructor', - clip_grad=dict(max_norm=1., norm_type=2), -) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) - -# codec settings -codec = dict( - type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='mmcls.VisionTransformer', - arch='huge', - img_size=(256, 192), - patch_size=16, - qkv_bias=True, - drop_path_rate=0.55, - with_cls_token=False, - output_cls_token=False, - patch_cfg=dict(padding=2), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'v1/pretrained_models/mae_pretrain_vit_huge.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=1280, - out_channels=17, - deconv_out_channels=(256, 256), - deconv_kernel_sizes=(4, 4), - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=False, - )) - -# base dataset settings -data_root = 'data/' -dataset_type = 'HumanArtDataset' -data_mode = 'topdown' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=4, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='HumanArt/annotations/training_humanart_coco.json', - data_prefix=dict(img=''), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=4, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='HumanArt/annotations/validation_humanart.json', - bbox_file=f'{data_root}HumanArt/person_detection_results/' - 'HumanArt_validation_detections_AP_H_56_person.json', - data_prefix=dict(img=''), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'HumanArt/annotations/validation_humanart.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +custom_imports = dict( + imports=['mmpose.engine.optim_wrappers.layer_decay_optim_wrapper'], + allow_failed_imports=False) + +optim_wrapper = dict( + optimizer=dict( + type='AdamW', lr=5e-4, betas=(0.9, 0.999), weight_decay=0.1), + paramwise_cfg=dict( + num_layers=32, + layer_decay_rate=0.85, + custom_keys={ + 'bias': dict(decay_multi=0.0), + 'pos_embed': dict(decay_mult=0.0), + 'relative_position_bias_table': dict(decay_mult=0.0), + 'norm': dict(decay_mult=0.0), + }, + ), + constructor='LayerDecayOptimWrapperConstructor', + clip_grad=dict(max_norm=1., norm_type=2), +) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='mmcls.VisionTransformer', + arch='huge', + img_size=(256, 192), + patch_size=16, + qkv_bias=True, + drop_path_rate=0.55, + with_cls_token=False, + output_cls_token=False, + patch_cfg=dict(padding=2), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'v1/pretrained_models/mae_pretrain_vit_huge.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=1280, + out_channels=17, + deconv_out_channels=(256, 256), + deconv_kernel_sizes=(4, 4), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +data_root = 'data/' +dataset_type = 'HumanArtDataset' +data_mode = 'topdown' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='HumanArt/annotations/training_humanart_coco.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='HumanArt/annotations/validation_humanart.json', + bbox_file=f'{data_root}HumanArt/person_detection_results/' + 'HumanArt_validation_detections_AP_H_56_person.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'HumanArt/annotations/validation_humanart.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-large_8xb64-210e_humanart-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-large_8xb64-210e_humanart-256x192.py index 7ea9dbf395..ec7edd29dc 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-large_8xb64-210e_humanart-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-large_8xb64-210e_humanart-256x192.py @@ -1,150 +1,150 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -custom_imports = dict( - imports=['mmpose.engine.optim_wrappers.layer_decay_optim_wrapper'], - allow_failed_imports=False) - -optim_wrapper = dict( - optimizer=dict( - type='AdamW', lr=5e-4, betas=(0.9, 0.999), weight_decay=0.1), - paramwise_cfg=dict( - num_layers=24, - layer_decay_rate=0.8, - custom_keys={ - 'bias': dict(decay_multi=0.0), - 'pos_embed': dict(decay_mult=0.0), - 'relative_position_bias_table': dict(decay_mult=0.0), - 'norm': dict(decay_mult=0.0), - }, - ), - constructor='LayerDecayOptimWrapperConstructor', - clip_grad=dict(max_norm=1., norm_type=2), -) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) - -# codec settings -codec = dict( - type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='mmcls.VisionTransformer', - arch='large', - img_size=(256, 192), - patch_size=16, - qkv_bias=True, - drop_path_rate=0.5, - with_cls_token=False, - output_cls_token=False, - patch_cfg=dict(padding=2), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'v1/pretrained_models/mae_pretrain_vit_large.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=1024, - out_channels=17, - deconv_out_channels=(256, 256), - deconv_kernel_sizes=(4, 4), - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=False, - )) - -# base dataset settings -data_root = 'data/' -dataset_type = 'HumanArtDataset' -data_mode = 'topdown' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=4, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='HumanArt/annotations/training_humanart_coco.json', - data_prefix=dict(img=''), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=4, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='HumanArt/annotations/validation_humanart.json', - bbox_file=f'{data_root}HumanArt/person_detection_results/' - 'HumanArt_validation_detections_AP_H_56_person.json', - data_prefix=dict(img=''), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'HumanArt/annotations/validation_humanart.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +custom_imports = dict( + imports=['mmpose.engine.optim_wrappers.layer_decay_optim_wrapper'], + allow_failed_imports=False) + +optim_wrapper = dict( + optimizer=dict( + type='AdamW', lr=5e-4, betas=(0.9, 0.999), weight_decay=0.1), + paramwise_cfg=dict( + num_layers=24, + layer_decay_rate=0.8, + custom_keys={ + 'bias': dict(decay_multi=0.0), + 'pos_embed': dict(decay_mult=0.0), + 'relative_position_bias_table': dict(decay_mult=0.0), + 'norm': dict(decay_mult=0.0), + }, + ), + constructor='LayerDecayOptimWrapperConstructor', + clip_grad=dict(max_norm=1., norm_type=2), +) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='mmcls.VisionTransformer', + arch='large', + img_size=(256, 192), + patch_size=16, + qkv_bias=True, + drop_path_rate=0.5, + with_cls_token=False, + output_cls_token=False, + patch_cfg=dict(padding=2), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'v1/pretrained_models/mae_pretrain_vit_large.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=1024, + out_channels=17, + deconv_out_channels=(256, 256), + deconv_kernel_sizes=(4, 4), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +data_root = 'data/' +dataset_type = 'HumanArtDataset' +data_mode = 'topdown' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='HumanArt/annotations/training_humanart_coco.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='HumanArt/annotations/validation_humanart.json', + bbox_file=f'{data_root}HumanArt/person_detection_results/' + 'HumanArt_validation_detections_AP_H_56_person.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'HumanArt/annotations/validation_humanart.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-small_8xb64-210e_humanart-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-small_8xb64-210e_humanart-256x192.py index ed7817d2fe..ce27e97f26 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-small_8xb64-210e_humanart-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-small_8xb64-210e_humanart-256x192.py @@ -1,155 +1,155 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -custom_imports = dict( - imports=['mmpose.engine.optim_wrappers.layer_decay_optim_wrapper'], - allow_failed_imports=False) - -optim_wrapper = dict( - optimizer=dict( - type='AdamW', lr=5e-4, betas=(0.9, 0.999), weight_decay=0.1), - paramwise_cfg=dict( - num_layers=12, - layer_decay_rate=0.8, - custom_keys={ - 'bias': dict(decay_multi=0.0), - 'pos_embed': dict(decay_mult=0.0), - 'relative_position_bias_table': dict(decay_mult=0.0), - 'norm': dict(decay_mult=0.0), - }, - ), - constructor='LayerDecayOptimWrapperConstructor', - clip_grad=dict(max_norm=1., norm_type=2), -) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) - -# codec settings -codec = dict( - type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='mmpretrain.VisionTransformer', - arch={ - 'embed_dims': 384, - 'num_layers': 12, - 'num_heads': 12, - 'feedforward_channels': 384 * 4 - }, - img_size=(256, 192), - patch_size=16, - qkv_bias=True, - drop_path_rate=0.1, - with_cls_token=False, - out_type='featmap', - patch_cfg=dict(padding=2), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'v1/pretrained_models/mae_pretrain_vit_small.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=384, - out_channels=17, - deconv_out_channels=(256, 256), - deconv_kernel_sizes=(4, 4), - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=False, - )) - -# base dataset settings -data_root = 'data/' -dataset_type = 'HumanArtDataset' -data_mode = 'topdown' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=4, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='HumanArt/annotations/training_humanart_coco.json', - data_prefix=dict(img=''), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=4, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='HumanArt/annotations/validation_humanart.json', - bbox_file=f'{data_root}HumanArt/person_detection_results/' - 'HumanArt_validation_detections_AP_H_56_person.json', - data_prefix=dict(img=''), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'HumanArt/annotations/validation_humanart.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +custom_imports = dict( + imports=['mmpose.engine.optim_wrappers.layer_decay_optim_wrapper'], + allow_failed_imports=False) + +optim_wrapper = dict( + optimizer=dict( + type='AdamW', lr=5e-4, betas=(0.9, 0.999), weight_decay=0.1), + paramwise_cfg=dict( + num_layers=12, + layer_decay_rate=0.8, + custom_keys={ + 'bias': dict(decay_multi=0.0), + 'pos_embed': dict(decay_mult=0.0), + 'relative_position_bias_table': dict(decay_mult=0.0), + 'norm': dict(decay_mult=0.0), + }, + ), + constructor='LayerDecayOptimWrapperConstructor', + clip_grad=dict(max_norm=1., norm_type=2), +) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='mmpretrain.VisionTransformer', + arch={ + 'embed_dims': 384, + 'num_layers': 12, + 'num_heads': 12, + 'feedforward_channels': 384 * 4 + }, + img_size=(256, 192), + patch_size=16, + qkv_bias=True, + drop_path_rate=0.1, + with_cls_token=False, + out_type='featmap', + patch_cfg=dict(padding=2), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'v1/pretrained_models/mae_pretrain_vit_small.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=384, + out_channels=17, + deconv_out_channels=(256, 256), + deconv_kernel_sizes=(4, 4), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +data_root = 'data/' +dataset_type = 'HumanArtDataset' +data_mode = 'topdown' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='HumanArt/annotations/training_humanart_coco.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='HumanArt/annotations/validation_humanart.json', + bbox_file=f'{data_root}HumanArt/person_detection_results/' + 'HumanArt_validation_detections_AP_H_56_person.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'HumanArt/annotations/validation_humanart.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_hrnet-w32_8xb64-210e_humanart-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_hrnet-w32_8xb64-210e_humanart-256x192.py index bf9fa25beb..00bfd372d9 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_hrnet-w32_8xb64-210e_humanart-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_hrnet-w32_8xb64-210e_humanart-256x192.py @@ -1,150 +1,150 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(32, 64)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(32, 64, 128)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(32, 64, 128, 256))), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/hrnet_w32-36af842e.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=32, - out_channels=17, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'HumanArtDataset' -data_mode = 'topdown' -data_root = 'data/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='HumanArt/annotations/training_humanart_coco.json', - data_prefix=dict(img=''), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='HumanArt/annotations/validation_humanart.json', - bbox_file=f'{data_root}HumanArt/person_detection_results/' - 'HumanArt_validation_detections_AP_H_56_person.json', - data_prefix=dict(img=''), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'HumanArt/annotations/validation_humanart.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'HumanArtDataset' +data_mode = 'topdown' +data_root = 'data/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='HumanArt/annotations/training_humanart_coco.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='HumanArt/annotations/validation_humanart.json', + bbox_file=f'{data_root}HumanArt/person_detection_results/' + 'HumanArt_validation_detections_AP_H_56_person.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'HumanArt/annotations/validation_humanart.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_hrnet-w48_8xb32-210e_humanart-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_hrnet-w48_8xb32-210e_humanart-256x192.py index 6a5ae0707c..21269e4c19 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_hrnet-w48_8xb32-210e_humanart-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_hrnet-w48_8xb32-210e_humanart-256x192.py @@ -1,150 +1,150 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(48, 96)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(48, 96, 192)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(48, 96, 192, 384))), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/hrnet_w48-8ef0771d.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=48, - out_channels=17, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'HumanArtDataset' -data_mode = 'topdown' -data_root = 'data/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='HumanArt/annotations/training_humanart_coco.json', - data_prefix=dict(img=''), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='HumanArt/annotations/validation_humanart.json', - bbox_file=f'{data_root}HumanArt/person_detection_results/' - 'HumanArt_validation_detections_AP_H_56_person.json', - data_prefix=dict(img=''), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'HumanArt/annotations/validation_humanart.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(48, 96)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(48, 96, 192)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(48, 96, 192, 384))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w48-8ef0771d.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=48, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'HumanArtDataset' +data_mode = 'topdown' +data_root = 'data/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='HumanArt/annotations/training_humanart_coco.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='HumanArt/annotations/validation_humanart.json', + bbox_file=f'{data_root}HumanArt/person_detection_results/' + 'HumanArt_validation_detections_AP_H_56_person.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'HumanArt/annotations/validation_humanart.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/humanart/vitpose_humanart.md b/configs/body_2d_keypoint/topdown_heatmap/humanart/vitpose_humanart.md index a4d2dd6c50..dc0e52f0bb 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/humanart/vitpose_humanart.md +++ b/configs/body_2d_keypoint/topdown_heatmap/humanart/vitpose_humanart.md @@ -1,97 +1,97 @@ -To utilize ViTPose, you'll need to have [MMPreTrain](https://github.com/open-mmlab/mmpretrain). To install the required version, run the following command: - -```shell -mim install 'mmpretrain>=1.0.0' -``` - - - -
- -ViTPose (NeurIPS'2022) - -```bibtex -@inproceedings{ - xu2022vitpose, - title={Vi{TP}ose: Simple Vision Transformer Baselines for Human Pose Estimation}, - author={Yufei Xu and Jing Zhang and Qiming Zhang and Dacheng Tao}, - booktitle={Advances in Neural Information Processing Systems}, - year={2022}, -} -``` - -
- - - -
-COCO-WholeBody (ECCV'2020) - -```bibtex -@inproceedings{jin2020whole, - title={Whole-Body Human Pose Estimation in the Wild}, - author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, - booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, - year={2020} -} -``` - -
- -
-Human-Art (CVPR'2023) - -```bibtex -@inproceedings{ju2023humanart, - title={Human-Art: A Versatile Human-Centric Dataset Bridging Natural and Artificial Scenes}, - author={Ju, Xuan and Zeng, Ailing and Jianan, Wang and Qiang, Xu and Lei, Zhang}, - booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), - year={2023}} -``` - -
- -Results on Human-Art validation dataset with detector having human AP of 56.2 on Human-Art validation dataset - -> With classic decoder - -| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | -| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | -| [ViTPose-S-coco](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small_8xb64-210e_coco-256x192.py) | 256x192 | 0.228 | 0.371 | 0.229 | 0.298 | 0.467 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small_8xb64-210e_coco-256x192-62d7a712_20230314.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small_8xb64-210e_coco-256x192-62d7a712_20230314.json) | -| [ViTPose-S-humanart-coco](configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-small_8xb64-210e_humanart-256x192.py) | 256x192 | 0.381 | 0.532 | 0.405 | 0.448 | 0.602 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-small_8xb64-210e_humanart-256x192-5cbe2bfc_20230611.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-small_8xb64-210e_humanart-256x192-5cbe2bfc_20230611.json) | -| [ViTPose-B-coco](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base_8xb64-210e_coco-256x192.py) | 256x192 | 0.270 | 0.423 | 0.272 | 0.340 | 0.510 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base_8xb64-210e_coco-256x192-216eae50_20230314.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base_8xb64-210e_coco-256x192-216eae50_20230314.json) | -| [ViTPose-B-humanart-coco](configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-base_8xb64-210e_humanart-256x192.py) | 256x192 | 0.410 | 0.549 | 0.434 | 0.475 | 0.615 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-base_8xb64-210e_humanart-256x192-b417f546_20230611.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-base_8xb64-210e_humanart-256x192-b417f546_20230611.json) | -| [ViTPose-L-coco](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base_8xb64-210e_coco-256x192.py) | 256x192 | 0.342 | 0.498 | 0.357 | 0.413 | 0.577 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large_8xb64-210e_coco-256x192-53609f55_20230314.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large_8xb64-210e_coco-256x192-53609f55_20230314.json) | -| [ViTPose-L-humanart-coco](configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-base_8xb64-210e_humanart-256x192.py) | 256x192 | 0.459 | 0.592 | 0.487 | 0.525 | 0.656 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-large_8xb64-210e_humanart-256x192-9aba9345_20230614.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-large_8xb64-210e_humanart-256x192-9aba9345_20230614.json) | -| [ViTPose-H-coco](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge_8xb64-210e_coco-256x192.py) | 256x192 | 0.377 | 0.541 | 0.391 | 0.447 | 0.615 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge_8xb64-210e_coco-256x192-e32adcd4_20230314.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge_8xb64-210e_coco-256x192-e32adcd4_20230314.json) | -| [ViTPose-H-humanart-coco](configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-huge_8xb64-210e_humanart-256x192.py) | 256x192 | 0.468 | 0.594 | 0.498 | 0.534 | 0.655 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-huge_8xb64-210e_humanart-256x192-603bb573_20230612.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-huge_8xb64-210e_humanart-256x192-603bb573_20230612.json) | - -Results on Human-Art validation dataset with ground-truth bounding-box - -> With classic decoder - -| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | -| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | -| [ViTPose-S-coco](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small_8xb64-210e_coco-256x192.py) | 256x192 | 0.507 | 0.758 | 0.531 | 0.551 | 0.780 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small_8xb64-210e_coco-256x192-62d7a712_20230314.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small_8xb64-210e_coco-256x192-62d7a712_20230314.json) | -| [ViTPose-S-humanart-coco](configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-small_8xb64-210e_humanart-256x192.py) | 256x192 | 0.738 | 0.905 | 0.802 | 0.768 | 0.911 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-small_8xb64-210e_humanart-256x192-5cbe2bfc_20230611.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-small_8xb64-210e_humanart-256x192-5cbe2bfc_20230611.json) | -| [ViTPose-B-coco](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base_8xb64-210e_coco-256x192.py) | 256x192 | 0.555 | 0.782 | 0.590 | 0.599 | 0.809 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base_8xb64-210e_coco-256x192-216eae50_20230314.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base_8xb64-210e_coco-256x192-216eae50_20230314.json) | -| [ViTPose-B-humanart-coco](configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-base_8xb64-210e_humanart-256x192.py) | 256x192 | 0.759 | 0.905 | 0.823 | 0.790 | 0.917 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-base_8xb64-210e_humanart-256x192-b417f546_20230611.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-base_8xb64-210e_humanart-256x192-b417f546_20230611.json) | -| [ViTPose-L-coco](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base_8xb64-210e_coco-256x192.py) | 256x192 | 0.637 | 0.838 | 0.689 | 0.677 | 0.859 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large_8xb64-210e_coco-256x192-53609f55_20230314.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large_8xb64-210e_coco-256x192-53609f55_20230314.json) | -| [ViTPose-L-humanart-coco](configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-base_8xb64-210e_humanart-256x192.py) | 256x192 | 0.789 | 0.916 | 0.845 | 0.819 | 0.929 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-large_8xb64-210e_humanart-256x192-9aba9345_20230614.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-large_8xb64-210e_humanart-256x192-9aba9345_20230614.json) | -| [ViTPose-H-coco](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge_8xb64-210e_coco-256x192.py) | 256x192 | 0.665 | 0.860 | 0.715 | 0.701 | 0.871 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge_8xb64-210e_coco-256x192-e32adcd4_20230314.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge_8xb64-210e_coco-256x192-e32adcd4_20230314.json) | -| [ViTPose-H-humanart-coco](configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-huge_8xb64-210e_humanart-256x192.py) | 256x192 | 0.800 | 0.926 | 0.855 | 0.828 | 0.933 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-huge_8xb64-210e_humanart-256x192-603bb573_20230612.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-huge_8xb64-210e_humanart-256x192-603bb573_20230612.json) | - -Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset - -> With classic decoder - -| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | -| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | -| [ViTPose-S-coco](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small_8xb64-210e_coco-256x192.py) | 256x192 | 0.739 | 0.903 | 0.816 | 0.792 | 0.942 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small_8xb64-210e_coco-256x192-62d7a712_20230314.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small_8xb64-210e_coco-256x192-62d7a712_20230314.json) | -| [ViTPose-S-humanart-coco](configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-small_8xb64-210e_humanart-256x192.py) | 256x192 | 0.737 | 0.902 | 0.811 | 0.792 | 0.942 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-small_8xb64-210e_humanart-256x192-5cbe2bfc_20230611.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-small_8xb64-210e_humanart-256x192-5cbe2bfc_20230611.json) | -| [ViTPose-B-coco](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base_8xb64-210e_coco-256x192.py) | 256x192 | 0.757 | 0.905 | 0.829 | 0.810 | 0.946 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base_8xb64-210e_coco-256x192-216eae50_20230314.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base_8xb64-210e_coco-256x192-216eae50_20230314.json) | -| [ViTPose-B-humanart-coco](configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-base_8xb64-210e_humanart-256x192.py) | 256x192 | 0.758 | 0.906 | 0.829 | 0.812 | 0.946 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-base_8xb64-210e_humanart-256x192-b417f546_20230611.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-base_8xb64-210e_humanart-256x192-b417f546_20230611.json) | -| [ViTPose-L-coco](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large_8xb64-210e_coco-256x192.py) | 256x192 | 0.782 | 0.914 | 0.850 | 0.834 | 0.952 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large_8xb64-210e_coco-256x192-53609f55_20230314.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large_8xb64-210e_coco-256x192-53609f55_20230314.json) | -| [ViTPose-L-humanart-coco](configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-base_8xb64-210e_humanart-256x192.py) | 256x192 | 0.782 | 0.914 | 0.849 | 0.835 | 0.953 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-large_8xb64-210e_humanart-256x192-9aba9345_20230614.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-large_8xb64-210e_humanart-256x192-9aba9345_20230614.json) | -| [ViTPose-H-coco](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge_8xb64-210e_coco-256x192.py) | 256x192 | 0.788 | 0.917 | 0.855 | 0.839 | 0.954 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge_8xb64-210e_coco-256x192-e32adcd4_20230314.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge_8xb64-210e_coco-256x192-e32adcd4_20230314.json) | -| [ViTPose-H-humanart-coco](configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-huge_8xb64-210e_humanart-256x192.py) | 256x192 | 0.788 | 0.914 | 0.853 | 0.841 | 0.956 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-huge_8xb64-210e_humanart-256x192-603bb573_20230612.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-huge_8xb64-210e_humanart-256x192-603bb573_20230612.json) | +To utilize ViTPose, you'll need to have [MMPreTrain](https://github.com/open-mmlab/mmpretrain). To install the required version, run the following command: + +```shell +mim install 'mmpretrain>=1.0.0' +``` + + + +
+ +ViTPose (NeurIPS'2022) + +```bibtex +@inproceedings{ + xu2022vitpose, + title={Vi{TP}ose: Simple Vision Transformer Baselines for Human Pose Estimation}, + author={Yufei Xu and Jing Zhang and Qiming Zhang and Dacheng Tao}, + booktitle={Advances in Neural Information Processing Systems}, + year={2022}, +} +``` + +
+ + + +
+COCO-WholeBody (ECCV'2020) + +```bibtex +@inproceedings{jin2020whole, + title={Whole-Body Human Pose Estimation in the Wild}, + author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2020} +} +``` + +
+ +
+Human-Art (CVPR'2023) + +```bibtex +@inproceedings{ju2023humanart, + title={Human-Art: A Versatile Human-Centric Dataset Bridging Natural and Artificial Scenes}, + author={Ju, Xuan and Zeng, Ailing and Jianan, Wang and Qiang, Xu and Lei, Zhang}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), + year={2023}} +``` + +
+ +Results on Human-Art validation dataset with detector having human AP of 56.2 on Human-Art validation dataset + +> With classic decoder + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [ViTPose-S-coco](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small_8xb64-210e_coco-256x192.py) | 256x192 | 0.228 | 0.371 | 0.229 | 0.298 | 0.467 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small_8xb64-210e_coco-256x192-62d7a712_20230314.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small_8xb64-210e_coco-256x192-62d7a712_20230314.json) | +| [ViTPose-S-humanart-coco](configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-small_8xb64-210e_humanart-256x192.py) | 256x192 | 0.381 | 0.532 | 0.405 | 0.448 | 0.602 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-small_8xb64-210e_humanart-256x192-5cbe2bfc_20230611.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-small_8xb64-210e_humanart-256x192-5cbe2bfc_20230611.json) | +| [ViTPose-B-coco](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base_8xb64-210e_coco-256x192.py) | 256x192 | 0.270 | 0.423 | 0.272 | 0.340 | 0.510 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base_8xb64-210e_coco-256x192-216eae50_20230314.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base_8xb64-210e_coco-256x192-216eae50_20230314.json) | +| [ViTPose-B-humanart-coco](configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-base_8xb64-210e_humanart-256x192.py) | 256x192 | 0.410 | 0.549 | 0.434 | 0.475 | 0.615 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-base_8xb64-210e_humanart-256x192-b417f546_20230611.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-base_8xb64-210e_humanart-256x192-b417f546_20230611.json) | +| [ViTPose-L-coco](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base_8xb64-210e_coco-256x192.py) | 256x192 | 0.342 | 0.498 | 0.357 | 0.413 | 0.577 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large_8xb64-210e_coco-256x192-53609f55_20230314.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large_8xb64-210e_coco-256x192-53609f55_20230314.json) | +| [ViTPose-L-humanart-coco](configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-base_8xb64-210e_humanart-256x192.py) | 256x192 | 0.459 | 0.592 | 0.487 | 0.525 | 0.656 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-large_8xb64-210e_humanart-256x192-9aba9345_20230614.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-large_8xb64-210e_humanart-256x192-9aba9345_20230614.json) | +| [ViTPose-H-coco](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge_8xb64-210e_coco-256x192.py) | 256x192 | 0.377 | 0.541 | 0.391 | 0.447 | 0.615 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge_8xb64-210e_coco-256x192-e32adcd4_20230314.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge_8xb64-210e_coco-256x192-e32adcd4_20230314.json) | +| [ViTPose-H-humanart-coco](configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-huge_8xb64-210e_humanart-256x192.py) | 256x192 | 0.468 | 0.594 | 0.498 | 0.534 | 0.655 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-huge_8xb64-210e_humanart-256x192-603bb573_20230612.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-huge_8xb64-210e_humanart-256x192-603bb573_20230612.json) | + +Results on Human-Art validation dataset with ground-truth bounding-box + +> With classic decoder + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [ViTPose-S-coco](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small_8xb64-210e_coco-256x192.py) | 256x192 | 0.507 | 0.758 | 0.531 | 0.551 | 0.780 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small_8xb64-210e_coco-256x192-62d7a712_20230314.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small_8xb64-210e_coco-256x192-62d7a712_20230314.json) | +| [ViTPose-S-humanart-coco](configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-small_8xb64-210e_humanart-256x192.py) | 256x192 | 0.738 | 0.905 | 0.802 | 0.768 | 0.911 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-small_8xb64-210e_humanart-256x192-5cbe2bfc_20230611.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-small_8xb64-210e_humanart-256x192-5cbe2bfc_20230611.json) | +| [ViTPose-B-coco](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base_8xb64-210e_coco-256x192.py) | 256x192 | 0.555 | 0.782 | 0.590 | 0.599 | 0.809 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base_8xb64-210e_coco-256x192-216eae50_20230314.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base_8xb64-210e_coco-256x192-216eae50_20230314.json) | +| [ViTPose-B-humanart-coco](configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-base_8xb64-210e_humanart-256x192.py) | 256x192 | 0.759 | 0.905 | 0.823 | 0.790 | 0.917 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-base_8xb64-210e_humanart-256x192-b417f546_20230611.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-base_8xb64-210e_humanart-256x192-b417f546_20230611.json) | +| [ViTPose-L-coco](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base_8xb64-210e_coco-256x192.py) | 256x192 | 0.637 | 0.838 | 0.689 | 0.677 | 0.859 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large_8xb64-210e_coco-256x192-53609f55_20230314.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large_8xb64-210e_coco-256x192-53609f55_20230314.json) | +| [ViTPose-L-humanart-coco](configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-base_8xb64-210e_humanart-256x192.py) | 256x192 | 0.789 | 0.916 | 0.845 | 0.819 | 0.929 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-large_8xb64-210e_humanart-256x192-9aba9345_20230614.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-large_8xb64-210e_humanart-256x192-9aba9345_20230614.json) | +| [ViTPose-H-coco](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge_8xb64-210e_coco-256x192.py) | 256x192 | 0.665 | 0.860 | 0.715 | 0.701 | 0.871 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge_8xb64-210e_coco-256x192-e32adcd4_20230314.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge_8xb64-210e_coco-256x192-e32adcd4_20230314.json) | +| [ViTPose-H-humanart-coco](configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-huge_8xb64-210e_humanart-256x192.py) | 256x192 | 0.800 | 0.926 | 0.855 | 0.828 | 0.933 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-huge_8xb64-210e_humanart-256x192-603bb573_20230612.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-huge_8xb64-210e_humanart-256x192-603bb573_20230612.json) | + +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +> With classic decoder + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [ViTPose-S-coco](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small_8xb64-210e_coco-256x192.py) | 256x192 | 0.739 | 0.903 | 0.816 | 0.792 | 0.942 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small_8xb64-210e_coco-256x192-62d7a712_20230314.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-small_8xb64-210e_coco-256x192-62d7a712_20230314.json) | +| [ViTPose-S-humanart-coco](configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-small_8xb64-210e_humanart-256x192.py) | 256x192 | 0.737 | 0.902 | 0.811 | 0.792 | 0.942 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-small_8xb64-210e_humanart-256x192-5cbe2bfc_20230611.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-small_8xb64-210e_humanart-256x192-5cbe2bfc_20230611.json) | +| [ViTPose-B-coco](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base_8xb64-210e_coco-256x192.py) | 256x192 | 0.757 | 0.905 | 0.829 | 0.810 | 0.946 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base_8xb64-210e_coco-256x192-216eae50_20230314.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-base_8xb64-210e_coco-256x192-216eae50_20230314.json) | +| [ViTPose-B-humanart-coco](configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-base_8xb64-210e_humanart-256x192.py) | 256x192 | 0.758 | 0.906 | 0.829 | 0.812 | 0.946 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-base_8xb64-210e_humanart-256x192-b417f546_20230611.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-base_8xb64-210e_humanart-256x192-b417f546_20230611.json) | +| [ViTPose-L-coco](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large_8xb64-210e_coco-256x192.py) | 256x192 | 0.782 | 0.914 | 0.850 | 0.834 | 0.952 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large_8xb64-210e_coco-256x192-53609f55_20230314.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-large_8xb64-210e_coco-256x192-53609f55_20230314.json) | +| [ViTPose-L-humanart-coco](configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-base_8xb64-210e_humanart-256x192.py) | 256x192 | 0.782 | 0.914 | 0.849 | 0.835 | 0.953 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-large_8xb64-210e_humanart-256x192-9aba9345_20230614.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-large_8xb64-210e_humanart-256x192-9aba9345_20230614.json) | +| [ViTPose-H-coco](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge_8xb64-210e_coco-256x192.py) | 256x192 | 0.788 | 0.917 | 0.855 | 0.839 | 0.954 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge_8xb64-210e_coco-256x192-e32adcd4_20230314.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_ViTPose-huge_8xb64-210e_coco-256x192-e32adcd4_20230314.json) | +| [ViTPose-H-humanart-coco](configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-huge_8xb64-210e_humanart-256x192.py) | 256x192 | 0.788 | 0.914 | 0.853 | 0.841 | 0.956 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-huge_8xb64-210e_humanart-256x192-603bb573_20230612.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-huge_8xb64-210e_humanart-256x192-603bb573_20230612.json) | diff --git a/configs/body_2d_keypoint/topdown_heatmap/humanart/vitpose_humanart.yml b/configs/body_2d_keypoint/topdown_heatmap/humanart/vitpose_humanart.yml index cbbe965c2d..2d2ba30fb2 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/humanart/vitpose_humanart.yml +++ b/configs/body_2d_keypoint/topdown_heatmap/humanart/vitpose_humanart.yml @@ -1,145 +1,145 @@ -Collections: -- Name: ViTPose - Paper: - Title: 'ViTPose: Simple Vision Transformer Baselines for Human Pose Estimation' - URL: https://arxiv.org/abs/2204.12484 - README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/algorithms/vitpose.md - Metadata: - Training Resources: 8x A100 GPUs -Models: -- Config: configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-small_8xb64-210e_humanart-256x192.py - In Collection: ViTPose - Metadata: - Architecture: &id001 - - ViTPose - - Classic Head - Model Size: Small - Training Data: &id002 - - COCO - - Human-Art - Name: td-hm_ViTPose-small_8xb64-210e_humanart-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.737 - AP@0.5: 0.902 - AP@0.75: 0.811 - AR: 0.792 - AR@0.5: 0.942 - Task: Body 2D Keypoint - - Dataset: Human-Art - Metrics: - AP: 0.381 - AP@0.5: 0.532 - AP@0.75: 0.405 - AR: 0.448 - AR@0.5: 0.602 - Task: Body 2D Keypoint - - Dataset: Human-Art(GT) - Metrics: - AP: 0.738 - AP@0.5: 0.905 - AP@0.75: 0.802 - AR: 0.768 - AR@0.5: 0.911 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-small_8xb64-210e_humanart-256x192-5cbe2bfc_20230611.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-base_8xb64-210e_humanart-256x192.py - In Collection: ViTPose - Metadata: - Architecture: *id001 - Model Size: Base - Training Data: *id002 - Name: td-hm_ViTPose-base_8xb64-210e_humanart-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.758 - AP@0.5: 0.906 - AP@0.75: 0.829 - AR: 0.812 - AR@0.5: 0.946 - Task: Body 2D Keypoint - - Dataset: Human-Art - Metrics: - AP: 0.410 - AP@0.5: 0.549 - AP@0.75: 0.434 - AR: 0.475 - AR@0.5: 0.615 - Task: Body 2D Keypoint - - Dataset: Human-Art(GT) - Metrics: - AP: 0.759 - AP@0.5: 0.905 - AP@0.75: 0.823 - AR: 0.790 - AR@0.5: 0.917 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-base_8xb64-210e_humanart-256x192-b417f546_20230611.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-large_8xb64-210e_humanart-256x192.py - In Collection: ViTPose - Metadata: - Architecture: *id001 - Model Size: Large - Training Data: *id002 - Name: td-hm_ViTPose-large_8xb64-210e_humanart-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.782 - AP@0.5: 0.914 - AP@0.75: 0.849 - AR: 0.835 - AR@0.5: 0.953 - Task: Body 2D Keypoint - - Dataset: Human-Art - Metrics: - AP: 0.459 - AP@0.5: 0.592 - AP@0.75: 0.487 - AR: 0.525 - AR@0.5: 0.656 - Task: Body 2D Keypoint - - Dataset: Human-Art(GT) - Metrics: - AP: 0.789 - AP@0.5: 0.916 - AP@0.75: 0.845 - AR: 0.819 - AR@0.5: 0.929 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-large_8xb64-210e_humanart-256x192-9aba9345_20230614.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-huge_8xb64-210e_humanart-256x192.py - In Collection: ViTPose - Metadata: - Architecture: *id001 - Model Size: Huge - Training Data: *id002 - Name: td-hm_ViTPose-huge_8xb64-210e_humanart-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.788 - AP@0.5: 0.914 - AP@0.75: 0.853 - AR: 0.841 - AR@0.5: 0.956 - Task: Body 2D Keypoint - - Dataset: Human-Art - Metrics: - AP: 0.468 - AP@0.5: 0.594 - AP@0.75: 0.498 - AR: 0.534 - AR@0.5: 0.655 - Task: Body 2D Keypoint - - Dataset: Human-Art(GT) - Metrics: - AP: 0.800 - AP@0.5: 0.926 - AP@0.75: 0.855 - AR: 0.828 - AR@0.5: 0.933 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-huge_8xb64-210e_humanart-256x192-603bb573_20230612.pth +Collections: +- Name: ViTPose + Paper: + Title: 'ViTPose: Simple Vision Transformer Baselines for Human Pose Estimation' + URL: https://arxiv.org/abs/2204.12484 + README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/algorithms/vitpose.md + Metadata: + Training Resources: 8x A100 GPUs +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-small_8xb64-210e_humanart-256x192.py + In Collection: ViTPose + Metadata: + Architecture: &id001 + - ViTPose + - Classic Head + Model Size: Small + Training Data: &id002 + - COCO + - Human-Art + Name: td-hm_ViTPose-small_8xb64-210e_humanart-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.737 + AP@0.5: 0.902 + AP@0.75: 0.811 + AR: 0.792 + AR@0.5: 0.942 + Task: Body 2D Keypoint + - Dataset: Human-Art + Metrics: + AP: 0.381 + AP@0.5: 0.532 + AP@0.75: 0.405 + AR: 0.448 + AR@0.5: 0.602 + Task: Body 2D Keypoint + - Dataset: Human-Art(GT) + Metrics: + AP: 0.738 + AP@0.5: 0.905 + AP@0.75: 0.802 + AR: 0.768 + AR@0.5: 0.911 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-small_8xb64-210e_humanart-256x192-5cbe2bfc_20230611.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-base_8xb64-210e_humanart-256x192.py + In Collection: ViTPose + Metadata: + Architecture: *id001 + Model Size: Base + Training Data: *id002 + Name: td-hm_ViTPose-base_8xb64-210e_humanart-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.758 + AP@0.5: 0.906 + AP@0.75: 0.829 + AR: 0.812 + AR@0.5: 0.946 + Task: Body 2D Keypoint + - Dataset: Human-Art + Metrics: + AP: 0.410 + AP@0.5: 0.549 + AP@0.75: 0.434 + AR: 0.475 + AR@0.5: 0.615 + Task: Body 2D Keypoint + - Dataset: Human-Art(GT) + Metrics: + AP: 0.759 + AP@0.5: 0.905 + AP@0.75: 0.823 + AR: 0.790 + AR@0.5: 0.917 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-base_8xb64-210e_humanart-256x192-b417f546_20230611.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-large_8xb64-210e_humanart-256x192.py + In Collection: ViTPose + Metadata: + Architecture: *id001 + Model Size: Large + Training Data: *id002 + Name: td-hm_ViTPose-large_8xb64-210e_humanart-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.782 + AP@0.5: 0.914 + AP@0.75: 0.849 + AR: 0.835 + AR@0.5: 0.953 + Task: Body 2D Keypoint + - Dataset: Human-Art + Metrics: + AP: 0.459 + AP@0.5: 0.592 + AP@0.75: 0.487 + AR: 0.525 + AR@0.5: 0.656 + Task: Body 2D Keypoint + - Dataset: Human-Art(GT) + Metrics: + AP: 0.789 + AP@0.5: 0.916 + AP@0.75: 0.845 + AR: 0.819 + AR@0.5: 0.929 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-large_8xb64-210e_humanart-256x192-9aba9345_20230614.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/humanart/td-hm_ViTPose-huge_8xb64-210e_humanart-256x192.py + In Collection: ViTPose + Metadata: + Architecture: *id001 + Model Size: Huge + Training Data: *id002 + Name: td-hm_ViTPose-huge_8xb64-210e_humanart-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.788 + AP@0.5: 0.914 + AP@0.75: 0.853 + AR: 0.841 + AR@0.5: 0.956 + Task: Body 2D Keypoint + - Dataset: Human-Art + Metrics: + AP: 0.468 + AP@0.5: 0.594 + AP@0.75: 0.498 + AR: 0.534 + AR@0.5: 0.655 + Task: Body 2D Keypoint + - Dataset: Human-Art(GT) + Metrics: + AP: 0.800 + AP@0.5: 0.926 + AP@0.75: 0.855 + AR: 0.828 + AR@0.5: 0.933 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/human_art/td-hm_ViTPose-huge_8xb64-210e_humanart-256x192-603bb573_20230612.pth diff --git a/configs/body_2d_keypoint/topdown_heatmap/jhmdb/cpm_jhmdb.md b/configs/body_2d_keypoint/topdown_heatmap/jhmdb/cpm_jhmdb.md index 29df027e3f..bb19451383 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/jhmdb/cpm_jhmdb.md +++ b/configs/body_2d_keypoint/topdown_heatmap/jhmdb/cpm_jhmdb.md @@ -1,56 +1,56 @@ - - -
-CPM (CVPR'2016) - -```bibtex -@inproceedings{wei2016convolutional, - title={Convolutional pose machines}, - author={Wei, Shih-En and Ramakrishna, Varun and Kanade, Takeo and Sheikh, Yaser}, - booktitle={Proceedings of the IEEE conference on Computer Vision and Pattern Recognition}, - pages={4724--4732}, - year={2016} -} -``` - -
- - - -
-JHMDB (ICCV'2013) - -```bibtex -@inproceedings{Jhuang:ICCV:2013, - title = {Towards understanding action recognition}, - author = {H. Jhuang and J. Gall and S. Zuffi and C. Schmid and M. J. Black}, - booktitle = {International Conf. on Computer Vision (ICCV)}, - month = Dec, - pages = {3192-3199}, - year = {2013} -} -``` - -
- -Results on Sub-JHMDB dataset - -The models are pre-trained on MPII dataset only. NO test-time augmentation (multi-scale /rotation testing) is used. - -- Normalized by Person Size - -| Split | Arch | Input Size | Head | Sho | Elb | Wri | Hip | Knee | Ank | Mean | ckpt | log | -| :------ | :------------------------------------------------: | :--------: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :-------------------------------------------------: | :------------------------------------------------: | -| Sub1 | [cpm](/configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/jhmdb/td-hm_cpm_8xb32-40e_jhmdb-sub1-368x368.py) | 368x368 | 96.1 | 91.9 | 81.0 | 78.9 | 96.6 | 90.8 | 87.3 | 89.5 | [ckpt](https://download.openmmlab.com/mmpose/top_down/cpm/cpm_jhmdb_sub1_368x368-2d2585c9_20201122.pth) | [log](https://download.openmmlab.com/mmpose/top_down/cpm/cpm_jhmdb_sub1_368x368_20201122.log.json) | -| Sub2 | [cpm](/configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/jhmdb/td-hm_cpm_8xb32-40e_jhmdb-sub2-368x368.py) | 368x368 | 98.1 | 93.6 | 77.1 | 70.9 | 94.0 | 89.1 | 84.7 | 87.4 | [ckpt](https://download.openmmlab.com/mmpose/top_down/cpm/cpm_jhmdb_sub2_368x368-fc742f1f_20201122.pth) | [log](https://download.openmmlab.com/mmpose/top_down/cpm/cpm_jhmdb_sub2_368x368_20201122.log.json) | -| Sub3 | [cpm](/configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/jhmdb/td-hm_cpm_8xb32-40e_jhmdb-sub3-368x368.py) | 368x368 | 97.9 | 94.9 | 87.3 | 84.0 | 98.6 | 94.4 | 86.2 | 92.4 | [ckpt](https://download.openmmlab.com/mmpose/top_down/cpm/cpm_jhmdb_sub3_368x368-49337155_20201122.pth) | [log](https://download.openmmlab.com/mmpose/top_down/cpm/cpm_jhmdb_sub3_368x368_20201122.log.json) | -| Average | cpm | 368x368 | 97.4 | 93.5 | 81.5 | 77.9 | 96.4 | 91.4 | 86.1 | 89.8 | - | - | - -- Normalized by Torso Size - -| Split | Arch | Input Size | Head | Sho | Elb | Wri | Hip | Knee | Ank | Mean | ckpt | log | -| :------ | :------------------------------------------------: | :--------: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :-------------------------------------------------: | :------------------------------------------------: | -| Sub1 | [cpm](/configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/jhmdb/td-hm_cpm_8xb32-40e_jhmdb-sub1-368x368.py) | 368x368 | 89.0 | 63.0 | 54.0 | 54.9 | 68.2 | 63.1 | 61.2 | 66.0 | [ckpt](https://download.openmmlab.com/mmpose/top_down/cpm/cpm_jhmdb_sub1_368x368-2d2585c9_20201122.pth) | [log](https://download.openmmlab.com/mmpose/top_down/cpm/cpm_jhmdb_sub1_368x368_20201122.log.json) | -| Sub2 | [cpm](/configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/jhmdb/td-hm_cpm_8xb32-40e_jhmdb-sub2-368x368.py) | 368x368 | 90.3 | 57.9 | 46.8 | 44.3 | 60.8 | 58.2 | 62.4 | 61.1 | [ckpt](https://download.openmmlab.com/mmpose/top_down/cpm/cpm_jhmdb_sub2_368x368-fc742f1f_20201122.pth) | [log](https://download.openmmlab.com/mmpose/top_down/cpm/cpm_jhmdb_sub2_368x368_20201122.log.json) | -| Sub3 | [cpm](/configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/jhmdb/td-hm_cpm_8xb32-40e_jhmdb-sub3-368x368.py) | 368x368 | 91.0 | 72.6 | 59.9 | 54.0 | 73.2 | 68.5 | 65.8 | 70.3 | [ckpt](https://download.openmmlab.com/mmpose/top_down/cpm/cpm_jhmdb_sub3_368x368-49337155_20201122.pth) | [log](https://download.openmmlab.com/mmpose/top_down/cpm/cpm_jhmdb_sub3_368x368_20201122.log.json) | -| Average | cpm | 368x368 | 90.1 | 64.5 | 53.6 | 51.1 | 67.4 | 63.3 | 63.1 | 65.7 | - | - | + + +
+CPM (CVPR'2016) + +```bibtex +@inproceedings{wei2016convolutional, + title={Convolutional pose machines}, + author={Wei, Shih-En and Ramakrishna, Varun and Kanade, Takeo and Sheikh, Yaser}, + booktitle={Proceedings of the IEEE conference on Computer Vision and Pattern Recognition}, + pages={4724--4732}, + year={2016} +} +``` + +
+ + + +
+JHMDB (ICCV'2013) + +```bibtex +@inproceedings{Jhuang:ICCV:2013, + title = {Towards understanding action recognition}, + author = {H. Jhuang and J. Gall and S. Zuffi and C. Schmid and M. J. Black}, + booktitle = {International Conf. on Computer Vision (ICCV)}, + month = Dec, + pages = {3192-3199}, + year = {2013} +} +``` + +
+ +Results on Sub-JHMDB dataset + +The models are pre-trained on MPII dataset only. NO test-time augmentation (multi-scale /rotation testing) is used. + +- Normalized by Person Size + +| Split | Arch | Input Size | Head | Sho | Elb | Wri | Hip | Knee | Ank | Mean | ckpt | log | +| :------ | :------------------------------------------------: | :--------: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :-------------------------------------------------: | :------------------------------------------------: | +| Sub1 | [cpm](/configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/jhmdb/td-hm_cpm_8xb32-40e_jhmdb-sub1-368x368.py) | 368x368 | 96.1 | 91.9 | 81.0 | 78.9 | 96.6 | 90.8 | 87.3 | 89.5 | [ckpt](https://download.openmmlab.com/mmpose/top_down/cpm/cpm_jhmdb_sub1_368x368-2d2585c9_20201122.pth) | [log](https://download.openmmlab.com/mmpose/top_down/cpm/cpm_jhmdb_sub1_368x368_20201122.log.json) | +| Sub2 | [cpm](/configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/jhmdb/td-hm_cpm_8xb32-40e_jhmdb-sub2-368x368.py) | 368x368 | 98.1 | 93.6 | 77.1 | 70.9 | 94.0 | 89.1 | 84.7 | 87.4 | [ckpt](https://download.openmmlab.com/mmpose/top_down/cpm/cpm_jhmdb_sub2_368x368-fc742f1f_20201122.pth) | [log](https://download.openmmlab.com/mmpose/top_down/cpm/cpm_jhmdb_sub2_368x368_20201122.log.json) | +| Sub3 | [cpm](/configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/jhmdb/td-hm_cpm_8xb32-40e_jhmdb-sub3-368x368.py) | 368x368 | 97.9 | 94.9 | 87.3 | 84.0 | 98.6 | 94.4 | 86.2 | 92.4 | [ckpt](https://download.openmmlab.com/mmpose/top_down/cpm/cpm_jhmdb_sub3_368x368-49337155_20201122.pth) | [log](https://download.openmmlab.com/mmpose/top_down/cpm/cpm_jhmdb_sub3_368x368_20201122.log.json) | +| Average | cpm | 368x368 | 97.4 | 93.5 | 81.5 | 77.9 | 96.4 | 91.4 | 86.1 | 89.8 | - | - | + +- Normalized by Torso Size + +| Split | Arch | Input Size | Head | Sho | Elb | Wri | Hip | Knee | Ank | Mean | ckpt | log | +| :------ | :------------------------------------------------: | :--------: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :-------------------------------------------------: | :------------------------------------------------: | +| Sub1 | [cpm](/configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/jhmdb/td-hm_cpm_8xb32-40e_jhmdb-sub1-368x368.py) | 368x368 | 89.0 | 63.0 | 54.0 | 54.9 | 68.2 | 63.1 | 61.2 | 66.0 | [ckpt](https://download.openmmlab.com/mmpose/top_down/cpm/cpm_jhmdb_sub1_368x368-2d2585c9_20201122.pth) | [log](https://download.openmmlab.com/mmpose/top_down/cpm/cpm_jhmdb_sub1_368x368_20201122.log.json) | +| Sub2 | [cpm](/configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/jhmdb/td-hm_cpm_8xb32-40e_jhmdb-sub2-368x368.py) | 368x368 | 90.3 | 57.9 | 46.8 | 44.3 | 60.8 | 58.2 | 62.4 | 61.1 | [ckpt](https://download.openmmlab.com/mmpose/top_down/cpm/cpm_jhmdb_sub2_368x368-fc742f1f_20201122.pth) | [log](https://download.openmmlab.com/mmpose/top_down/cpm/cpm_jhmdb_sub2_368x368_20201122.log.json) | +| Sub3 | [cpm](/configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/jhmdb/td-hm_cpm_8xb32-40e_jhmdb-sub3-368x368.py) | 368x368 | 91.0 | 72.6 | 59.9 | 54.0 | 73.2 | 68.5 | 65.8 | 70.3 | [ckpt](https://download.openmmlab.com/mmpose/top_down/cpm/cpm_jhmdb_sub3_368x368-49337155_20201122.pth) | [log](https://download.openmmlab.com/mmpose/top_down/cpm/cpm_jhmdb_sub3_368x368_20201122.log.json) | +| Average | cpm | 368x368 | 90.1 | 64.5 | 53.6 | 51.1 | 67.4 | 63.3 | 63.1 | 65.7 | - | - | diff --git a/configs/body_2d_keypoint/topdown_heatmap/jhmdb/cpm_jhmdb.yml b/configs/body_2d_keypoint/topdown_heatmap/jhmdb/cpm_jhmdb.yml index f9f6d7568b..f923d5b4de 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/jhmdb/cpm_jhmdb.yml +++ b/configs/body_2d_keypoint/topdown_heatmap/jhmdb/cpm_jhmdb.yml @@ -1,116 +1,116 @@ -Models: -- Config: configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/jhmdb/td-hm_cpm_8xb32-40e_jhmdb-sub1-368x368.py - In Collection: CPM - Metadata: - Architecture: &id001 - - CPM - Training Data: JHMDB - Name: td-hm_cpm_8xb32-40e_jhmdb-sub1-368x368 - Results: - - Dataset: JHMDB - Metrics: - Ank: 87.3 - Elb: 81 - Head: 96.1 - Hip: 96.6 - Knee: 90.8 - Mean: 89.5 - Sho: 91.9 - Wri: 78.9 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/cpm/cpm_jhmdb_sub1_368x368-2d2585c9_20201122.pth -- Config: configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/jhmdb/td-hm_cpm_8xb32-40e_jhmdb-sub2-368x368.py - In Collection: CPM - Metadata: - Architecture: *id001 - Training Data: JHMDB - Name: td-hm_cpm_8xb32-40e_jhmdb-sub2-368x368 - Results: - - Dataset: JHMDB - Metrics: - Ank: 84.7 - Elb: 77.1 - Head: 98.1 - Hip: 94.0 - Knee: 89.1 - Mean: 87.4 - Sho: 93.6 - Wri: 70.9 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/cpm/cpm_jhmdb_sub2_368x368-fc742f1f_20201122.pth -- Config: configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/jhmdb/td-hm_cpm_8xb32-40e_jhmdb-sub3-368x368.py - In Collection: CPM - Metadata: - Architecture: *id001 - Training Data: JHMDB - Name: td-hm_cpm_8xb32-40e_jhmdb-sub3-368x368 - Results: - - Dataset: JHMDB - Metrics: - Ank: 86.2 - Elb: 87.3 - Head: 97.9 - Hip: 98.6 - Knee: 94.4 - Mean: 92.4 - Sho: 94.9 - Wri: 84.0 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/cpm/cpm_jhmdb_sub3_368x368-49337155_20201122.pth -- Config: configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/jhmdb/td-hm_cpm_8xb32-40e_jhmdb-sub1-368x368.py - In Collection: CPM - Metadata: - Architecture: *id001 - Training Data: JHMDB - Name: td-hm_cpm_8xb32-40e_jhmdb-sub1-368x368 - Results: - - Dataset: JHMDB - Metrics: - Ank: 61.2 - Elb: 54.0 - Head: 89.0 - Hip: 68.2 - Knee: 63.1 - Mean: 66.0 - Sho: 63.0 - Wri: 54.9 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/cpm/cpm_jhmdb_sub1_368x368-2d2585c9_20201122.pth -- Config: configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/jhmdb/td-hm_cpm_8xb32-40e_jhmdb-sub2-368x368.py - In Collection: CPM - Metadata: - Architecture: *id001 - Training Data: JHMDB - Name: td-hm_cpm_8xb32-40e_jhmdb-sub2-368x368 - Results: - - Dataset: JHMDB - Metrics: - Ank: 62.4 - Elb: 46.8 - Head: 90.3 - Hip: 60.8 - Knee: 58.2 - Mean: 61.1 - Sho: 57.9 - Wri: 44.3 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/cpm/cpm_jhmdb_sub2_368x368-fc742f1f_20201122.pth -- Config: configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/jhmdb/td-hm_cpm_8xb32-40e_jhmdb-sub3-368x368.py - In Collection: CPM - Metadata: - Architecture: *id001 - Training Data: JHMDB - Name: td-hm_cpm_8xb32-40e_jhmdb-sub3-368x368 - Results: - - Dataset: JHMDB - Metrics: - Ank: 65.8 - Elb: 59.9 - Head: 91.0 - Hip: 73.2 - Knee: 68.5 - Mean: 70.3 - Sho: 72.6 - Wri: 54.0 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/cpm/cpm_jhmdb_sub3_368x368-49337155_20201122.pth +Models: +- Config: configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/jhmdb/td-hm_cpm_8xb32-40e_jhmdb-sub1-368x368.py + In Collection: CPM + Metadata: + Architecture: &id001 + - CPM + Training Data: JHMDB + Name: td-hm_cpm_8xb32-40e_jhmdb-sub1-368x368 + Results: + - Dataset: JHMDB + Metrics: + Ank: 87.3 + Elb: 81 + Head: 96.1 + Hip: 96.6 + Knee: 90.8 + Mean: 89.5 + Sho: 91.9 + Wri: 78.9 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/cpm/cpm_jhmdb_sub1_368x368-2d2585c9_20201122.pth +- Config: configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/jhmdb/td-hm_cpm_8xb32-40e_jhmdb-sub2-368x368.py + In Collection: CPM + Metadata: + Architecture: *id001 + Training Data: JHMDB + Name: td-hm_cpm_8xb32-40e_jhmdb-sub2-368x368 + Results: + - Dataset: JHMDB + Metrics: + Ank: 84.7 + Elb: 77.1 + Head: 98.1 + Hip: 94.0 + Knee: 89.1 + Mean: 87.4 + Sho: 93.6 + Wri: 70.9 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/cpm/cpm_jhmdb_sub2_368x368-fc742f1f_20201122.pth +- Config: configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/jhmdb/td-hm_cpm_8xb32-40e_jhmdb-sub3-368x368.py + In Collection: CPM + Metadata: + Architecture: *id001 + Training Data: JHMDB + Name: td-hm_cpm_8xb32-40e_jhmdb-sub3-368x368 + Results: + - Dataset: JHMDB + Metrics: + Ank: 86.2 + Elb: 87.3 + Head: 97.9 + Hip: 98.6 + Knee: 94.4 + Mean: 92.4 + Sho: 94.9 + Wri: 84.0 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/cpm/cpm_jhmdb_sub3_368x368-49337155_20201122.pth +- Config: configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/jhmdb/td-hm_cpm_8xb32-40e_jhmdb-sub1-368x368.py + In Collection: CPM + Metadata: + Architecture: *id001 + Training Data: JHMDB + Name: td-hm_cpm_8xb32-40e_jhmdb-sub1-368x368 + Results: + - Dataset: JHMDB + Metrics: + Ank: 61.2 + Elb: 54.0 + Head: 89.0 + Hip: 68.2 + Knee: 63.1 + Mean: 66.0 + Sho: 63.0 + Wri: 54.9 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/cpm/cpm_jhmdb_sub1_368x368-2d2585c9_20201122.pth +- Config: configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/jhmdb/td-hm_cpm_8xb32-40e_jhmdb-sub2-368x368.py + In Collection: CPM + Metadata: + Architecture: *id001 + Training Data: JHMDB + Name: td-hm_cpm_8xb32-40e_jhmdb-sub2-368x368 + Results: + - Dataset: JHMDB + Metrics: + Ank: 62.4 + Elb: 46.8 + Head: 90.3 + Hip: 60.8 + Knee: 58.2 + Mean: 61.1 + Sho: 57.9 + Wri: 44.3 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/cpm/cpm_jhmdb_sub2_368x368-fc742f1f_20201122.pth +- Config: configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/jhmdb/td-hm_cpm_8xb32-40e_jhmdb-sub3-368x368.py + In Collection: CPM + Metadata: + Architecture: *id001 + Training Data: JHMDB + Name: td-hm_cpm_8xb32-40e_jhmdb-sub3-368x368 + Results: + - Dataset: JHMDB + Metrics: + Ank: 65.8 + Elb: 59.9 + Head: 91.0 + Hip: 73.2 + Knee: 68.5 + Mean: 70.3 + Sho: 72.6 + Wri: 54.0 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/cpm/cpm_jhmdb_sub3_368x368-49337155_20201122.pth diff --git a/configs/body_2d_keypoint/topdown_heatmap/jhmdb/resnet_jhmdb.md b/configs/body_2d_keypoint/topdown_heatmap/jhmdb/resnet_jhmdb.md index 22422e7316..d82672fe0b 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/jhmdb/resnet_jhmdb.md +++ b/configs/body_2d_keypoint/topdown_heatmap/jhmdb/resnet_jhmdb.md @@ -1,81 +1,81 @@ - - -
-SimpleBaseline2D (ECCV'2018) - -```bibtex -@inproceedings{xiao2018simple, - title={Simple baselines for human pose estimation and tracking}, - author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, - booktitle={Proceedings of the European conference on computer vision (ECCV)}, - pages={466--481}, - year={2018} -} -``` - -
- - - -
-ResNet (CVPR'2016) - -```bibtex -@inproceedings{he2016deep, - title={Deep residual learning for image recognition}, - author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={770--778}, - year={2016} -} -``` - -
- - - -
-JHMDB (ICCV'2013) - -```bibtex -@inproceedings{Jhuang:ICCV:2013, - title = {Towards understanding action recognition}, - author = {H. Jhuang and J. Gall and S. Zuffi and C. Schmid and M. J. Black}, - booktitle = {International Conf. on Computer Vision (ICCV)}, - month = Dec, - pages = {3192-3199}, - year = {2013} -} -``` - -
- -Results on Sub-JHMDB dataset - -The models are pre-trained on MPII dataset only. *NO* test-time augmentation (multi-scale /rotation testing) is used. - -- Normalized by Person Size - -| Split | Arch | Input Size | Head | Sho | Elb | Wri | Hip | Knee | Ank | Mean | ckpt | log | -| :------ | :------------------------------------------------: | :--------: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :-------------------------------------------------: | :------------------------------------------------: | -| Sub1 | [pose_resnet_50](/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50_8xb64-20e_jhmdb-sub1-256x256.py) | 256x256 | 99.1 | 98.0 | 93.8 | 91.3 | 99.4 | 96.5 | 92.8 | 96.1 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res50_jhmdb_sub1_256x256-932cb3b4_20201122.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res50_jhmdb_sub1_256x256_20201122.log.json) | -| Sub2 | [pose_resnet_50](/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50_8xb64-20e_jhmdb-sub2-256x256.py) | 256x256 | 99.3 | 97.1 | 90.6 | 87.0 | 98.9 | 96.3 | 94.1 | 95.0 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res50_jhmdb_sub2_256x256-83d606f7_20201122.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res50_jhmdb_sub2_256x256_20201122.log.json) | -| Sub3 | [pose_resnet_50](/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50_8xb64-20e_jhmdb-sub3-256x256.py) | 256x256 | 99.0 | 97.9 | 94.0 | 91.6 | 99.7 | 98.0 | 94.7 | 96.7 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res50_jhmdb_sub3_256x256-c4ec1a0b_20201122.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res50_jhmdb_sub3_256x256_20201122.log.json) | -| Average | pose_resnet_50 | 256x256 | 99.2 | 97.7 | 92.8 | 90.0 | 99.3 | 96.9 | 93.9 | 96.0 | - | - | -| Sub1 | [pose_resnet_50 (2 Deconv.)](/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50-2deconv_8xb64-40e_jhmdb-sub1-256x256.py) | 256x256 | 99.1 | 98.5 | 94.6 | 92.0 | 99.4 | 94.6 | 92.5 | 96.1 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res50_2deconv_jhmdb_sub1_256x256-f0574a52_20201122.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res50_2deconv_jhmdb_sub1_256x256_20201122.log.json) | -| Sub2 | [pose_resnet_50 (2 Deconv.)](/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50-2deconv_8xb64-40e_jhmdb-sub2-256x256.py) | 256x256 | 99.3 | 97.8 | 91.0 | 87.0 | 99.1 | 96.5 | 93.8 | 95.2 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res50_2deconv_jhmdb_sub2_256x256-f63af0ff_20201122.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res50_2deconv_jhmdb_sub2_256x256_20201122.log.json) | -| Sub3 | [pose_resnet_50 (2 Deconv.)](/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50-2deconv_8xb64-40e_jhmdb-sub3-256x256.py) | 256x256 | 98.8 | 98.4 | 94.3 | 92.1 | 99.8 | 97.5 | 93.8 | 96.7 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res50_2deconv_jhmdb_sub3_256x256-c4bc2ddb_20201122.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res50_2deconv_jhmdb_sub3_256x256_20201122.log.json) | -| Average | pose_resnet_50 (2 Deconv.) | 256x256 | 99.1 | 98.2 | 93.3 | 90.4 | 99.4 | 96.2 | 93.4 | 96.0 | - | - | - -- Normalized by Torso Size - -| Split | Arch | Input Size | Head | Sho | Elb | Wri | Hip | Knee | Ank | Mean | ckpt | log | -| :------ | :------------------------------------------------: | :--------: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :-------------------------------------------------: | :------------------------------------------------: | -| Sub1 | [pose_resnet_50](/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50_8xb64-20e_jhmdb-sub1-256x256.py) | 256x256 | 93.3 | 83.2 | 74.4 | 72.7 | 85.0 | 81.2 | 78.9 | 81.9 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res50_jhmdb_sub1_256x256-932cb3b4_20201122.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res50_jhmdb_sub1_256x256_20201122.log.json) | -| Sub2 | [pose_resnet_50](/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50_8xb64-20e_jhmdb-sub2-256x256.py) | 256x256 | 94.1 | 74.9 | 64.5 | 62.5 | 77.9 | 71.9 | 78.6 | 75.5 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res50_jhmdb_sub2_256x256-83d606f7_20201122.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res50_jhmdb_sub2_256x256_20201122.log.json) | -| Sub3 | [pose_resnet_50](/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50_8xb64-20e_jhmdb-sub3-256x256.py) | 256x256 | 97.0 | 82.2 | 74.9 | 70.7 | 84.7 | 83.7 | 84.2 | 82.9 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res50_jhmdb_sub3_256x256-c4ec1a0b_20201122.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res50_jhmdb_sub3_256x256_20201122.log.json) | -| Average | pose_resnet_50 | 256x256 | 94.8 | 80.1 | 71.3 | 68.6 | 82.5 | 78.9 | 80.6 | 80.1 | - | - | -| Sub1 | [pose_resnet_50 (2 Deconv.)](/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50-2deconv_8xb64-40e_jhmdb-sub1-256x256.py) | 256x256 | 92.4 | 80.6 | 73.2 | 70.5 | 82.3 | 75.4 | 75.0 | 79.2 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res50_2deconv_jhmdb_sub1_256x256-f0574a52_20201122.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res50_2deconv_jhmdb_sub1_256x256_20201122.log.json) | -| Sub2 | [pose_resnet_50 (2 Deconv.)](/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50-2deconv_8xb64-40e_jhmdb-sub2-256x256.py) | 256x256 | 93.4 | 73.6 | 63.8 | 60.5 | 75.1 | 68.4 | 75.5 | 73.7 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res50_2deconv_jhmdb_sub2_256x256-f63af0ff_20201122.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res50_2deconv_jhmdb_sub2_256x256_20201122.log.json) | -| Sub3 | [pose_resnet_50 (2 Deconv.)](/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50-2deconv_8xb64-40e_jhmdb-sub3-256x256.py) | 256x256 | 96.1 | 81.2 | 72.6 | 67.9 | 83.6 | 80.9 | 81.5 | 81.2 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res50_2deconv_jhmdb_sub3_256x256-c4bc2ddb_20201122.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res50_2deconv_jhmdb_sub3_256x256_20201122.log.json) | -| Average | pose_resnet_50 (2 Deconv.) | 256x256 | 94.0 | 78.5 | 69.9 | 66.3 | 80.3 | 74.9 | 77.3 | 78.0 | - | - | + + +
+SimpleBaseline2D (ECCV'2018) + +```bibtex +@inproceedings{xiao2018simple, + title={Simple baselines for human pose estimation and tracking}, + author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, + booktitle={Proceedings of the European conference on computer vision (ECCV)}, + pages={466--481}, + year={2018} +} +``` + +
+ + + +
+ResNet (CVPR'2016) + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +
+ + + +
+JHMDB (ICCV'2013) + +```bibtex +@inproceedings{Jhuang:ICCV:2013, + title = {Towards understanding action recognition}, + author = {H. Jhuang and J. Gall and S. Zuffi and C. Schmid and M. J. Black}, + booktitle = {International Conf. on Computer Vision (ICCV)}, + month = Dec, + pages = {3192-3199}, + year = {2013} +} +``` + +
+ +Results on Sub-JHMDB dataset + +The models are pre-trained on MPII dataset only. *NO* test-time augmentation (multi-scale /rotation testing) is used. + +- Normalized by Person Size + +| Split | Arch | Input Size | Head | Sho | Elb | Wri | Hip | Knee | Ank | Mean | ckpt | log | +| :------ | :------------------------------------------------: | :--------: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :-------------------------------------------------: | :------------------------------------------------: | +| Sub1 | [pose_resnet_50](/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50_8xb64-20e_jhmdb-sub1-256x256.py) | 256x256 | 99.1 | 98.0 | 93.8 | 91.3 | 99.4 | 96.5 | 92.8 | 96.1 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res50_jhmdb_sub1_256x256-932cb3b4_20201122.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res50_jhmdb_sub1_256x256_20201122.log.json) | +| Sub2 | [pose_resnet_50](/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50_8xb64-20e_jhmdb-sub2-256x256.py) | 256x256 | 99.3 | 97.1 | 90.6 | 87.0 | 98.9 | 96.3 | 94.1 | 95.0 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res50_jhmdb_sub2_256x256-83d606f7_20201122.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res50_jhmdb_sub2_256x256_20201122.log.json) | +| Sub3 | [pose_resnet_50](/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50_8xb64-20e_jhmdb-sub3-256x256.py) | 256x256 | 99.0 | 97.9 | 94.0 | 91.6 | 99.7 | 98.0 | 94.7 | 96.7 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res50_jhmdb_sub3_256x256-c4ec1a0b_20201122.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res50_jhmdb_sub3_256x256_20201122.log.json) | +| Average | pose_resnet_50 | 256x256 | 99.2 | 97.7 | 92.8 | 90.0 | 99.3 | 96.9 | 93.9 | 96.0 | - | - | +| Sub1 | [pose_resnet_50 (2 Deconv.)](/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50-2deconv_8xb64-40e_jhmdb-sub1-256x256.py) | 256x256 | 99.1 | 98.5 | 94.6 | 92.0 | 99.4 | 94.6 | 92.5 | 96.1 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res50_2deconv_jhmdb_sub1_256x256-f0574a52_20201122.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res50_2deconv_jhmdb_sub1_256x256_20201122.log.json) | +| Sub2 | [pose_resnet_50 (2 Deconv.)](/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50-2deconv_8xb64-40e_jhmdb-sub2-256x256.py) | 256x256 | 99.3 | 97.8 | 91.0 | 87.0 | 99.1 | 96.5 | 93.8 | 95.2 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res50_2deconv_jhmdb_sub2_256x256-f63af0ff_20201122.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res50_2deconv_jhmdb_sub2_256x256_20201122.log.json) | +| Sub3 | [pose_resnet_50 (2 Deconv.)](/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50-2deconv_8xb64-40e_jhmdb-sub3-256x256.py) | 256x256 | 98.8 | 98.4 | 94.3 | 92.1 | 99.8 | 97.5 | 93.8 | 96.7 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res50_2deconv_jhmdb_sub3_256x256-c4bc2ddb_20201122.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res50_2deconv_jhmdb_sub3_256x256_20201122.log.json) | +| Average | pose_resnet_50 (2 Deconv.) | 256x256 | 99.1 | 98.2 | 93.3 | 90.4 | 99.4 | 96.2 | 93.4 | 96.0 | - | - | + +- Normalized by Torso Size + +| Split | Arch | Input Size | Head | Sho | Elb | Wri | Hip | Knee | Ank | Mean | ckpt | log | +| :------ | :------------------------------------------------: | :--------: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :-------------------------------------------------: | :------------------------------------------------: | +| Sub1 | [pose_resnet_50](/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50_8xb64-20e_jhmdb-sub1-256x256.py) | 256x256 | 93.3 | 83.2 | 74.4 | 72.7 | 85.0 | 81.2 | 78.9 | 81.9 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res50_jhmdb_sub1_256x256-932cb3b4_20201122.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res50_jhmdb_sub1_256x256_20201122.log.json) | +| Sub2 | [pose_resnet_50](/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50_8xb64-20e_jhmdb-sub2-256x256.py) | 256x256 | 94.1 | 74.9 | 64.5 | 62.5 | 77.9 | 71.9 | 78.6 | 75.5 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res50_jhmdb_sub2_256x256-83d606f7_20201122.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res50_jhmdb_sub2_256x256_20201122.log.json) | +| Sub3 | [pose_resnet_50](/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50_8xb64-20e_jhmdb-sub3-256x256.py) | 256x256 | 97.0 | 82.2 | 74.9 | 70.7 | 84.7 | 83.7 | 84.2 | 82.9 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res50_jhmdb_sub3_256x256-c4ec1a0b_20201122.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res50_jhmdb_sub3_256x256_20201122.log.json) | +| Average | pose_resnet_50 | 256x256 | 94.8 | 80.1 | 71.3 | 68.6 | 82.5 | 78.9 | 80.6 | 80.1 | - | - | +| Sub1 | [pose_resnet_50 (2 Deconv.)](/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50-2deconv_8xb64-40e_jhmdb-sub1-256x256.py) | 256x256 | 92.4 | 80.6 | 73.2 | 70.5 | 82.3 | 75.4 | 75.0 | 79.2 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res50_2deconv_jhmdb_sub1_256x256-f0574a52_20201122.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res50_2deconv_jhmdb_sub1_256x256_20201122.log.json) | +| Sub2 | [pose_resnet_50 (2 Deconv.)](/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50-2deconv_8xb64-40e_jhmdb-sub2-256x256.py) | 256x256 | 93.4 | 73.6 | 63.8 | 60.5 | 75.1 | 68.4 | 75.5 | 73.7 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res50_2deconv_jhmdb_sub2_256x256-f63af0ff_20201122.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res50_2deconv_jhmdb_sub2_256x256_20201122.log.json) | +| Sub3 | [pose_resnet_50 (2 Deconv.)](/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50-2deconv_8xb64-40e_jhmdb-sub3-256x256.py) | 256x256 | 96.1 | 81.2 | 72.6 | 67.9 | 83.6 | 80.9 | 81.5 | 81.2 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res50_2deconv_jhmdb_sub3_256x256-c4bc2ddb_20201122.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res50_2deconv_jhmdb_sub3_256x256_20201122.log.json) | +| Average | pose_resnet_50 (2 Deconv.) | 256x256 | 94.0 | 78.5 | 69.9 | 66.3 | 80.3 | 74.9 | 77.3 | 78.0 | - | - | diff --git a/configs/body_2d_keypoint/topdown_heatmap/jhmdb/resnet_jhmdb.yml b/configs/body_2d_keypoint/topdown_heatmap/jhmdb/resnet_jhmdb.yml index d7480d12a0..a4a9de3789 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/jhmdb/resnet_jhmdb.yml +++ b/configs/body_2d_keypoint/topdown_heatmap/jhmdb/resnet_jhmdb.yml @@ -1,231 +1,231 @@ -Models: -- Config: configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50_8xb64-20e_jhmdb-sub1-256x256.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: &id001 - - SimpleBaseline2D - - ResNet - Training Data: JHMDB - Name: td-hm_res50_8xb64-20e_jhmdb-sub1-256x256 - Results: - - Dataset: JHMDB - Metrics: - Ank: 92.8 - Elb: 93.8 - Head: 99.1 - Hip: 99.4 - Knee: 96.5 - Mean: 96.1 - Sho: 98.0 - Wri: 91.3 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res50_jhmdb_sub1_256x256-932cb3b4_20201122.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50_8xb64-20e_jhmdb-sub2-256x256.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: JHMDB - Name: td-hm_res50_8xb64-20e_jhmdb-sub2-256x256 - Results: - - Dataset: JHMDB - Metrics: - Ank: 94.1 - Elb: 90.6 - Head: 99.3 - Hip: 98.9 - Knee: 96.3 - Mean: 95.0 - Sho: 97.1 - Wri: 87.0 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res50_jhmdb_sub2_256x256-83d606f7_20201122.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50_8xb64-20e_jhmdb-sub3-256x256.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: JHMDB - Name: td-hm_res50_8xb64-20e_jhmdb-sub3-256x256 - Results: - - Dataset: JHMDB - Metrics: - Ank: 94.7 - Elb: 94.0 - Head: 99.0 - Hip: 99.7 - Knee: 98.0 - Mean: 96.7 - Sho: 97.9 - Wri: 91.6 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res50_jhmdb_sub3_256x256-c4ec1a0b_20201122.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50-2deconv_8xb64-40e_jhmdb-sub1-256x256.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: JHMDB - Name: td-hm_res50-2deconv_8xb64-40e_jhmdb-sub1-256x256 - Results: - - Dataset: JHMDB - Metrics: - Ank: 92.5 - Elb: 94.6 - Head: 99.1 - Hip: 99.4 - Knee: 94.6 - Mean: 96.1 - Sho: 98.5 - Wri: 92.0 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res50_2deconv_jhmdb_sub1_256x256-f0574a52_20201122.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50-2deconv_8xb64-40e_jhmdb-sub2-256x256.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: JHMDB - Name: td-hm_res50-2deconv_8xb64-40e_jhmdb-sub2-256x256 - Results: - - Dataset: JHMDB - Metrics: - Ank: 93.8 - Elb: 91.0 - Head: 99.3 - Hip: 99.1 - Knee: 96.5 - Mean: 95.2 - Sho: 97.8 - Wri: 87.0 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res50_2deconv_jhmdb_sub2_256x256-f63af0ff_20201122.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50-2deconv_8xb64-40e_jhmdb-sub3-256x256.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: JHMDB - Name: td-hm_res50-2deconv_8xb64-40e_jhmdb-sub3-256x256 - Results: - - Dataset: JHMDB - Metrics: - Ank: 93.8 - Elb: 94.3 - Head: 98.8 - Hip: 99.8 - Knee: 97.5 - Mean: 96.7 - Sho: 98.4 - Wri: 92.1 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res50_2deconv_jhmdb_sub3_256x256-c4bc2ddb_20201122.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50_8xb64-20e_jhmdb-sub1-256x256.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: JHMDB - Name: td-hm_res50_8xb64-20e_jhmdb-sub1-256x256 - Results: - - Dataset: JHMDB - Metrics: - Ank: 78.9 - Elb: 74.4 - Head: 93.3 - Hip: 85.0 - Knee: 81.2 - Mean: 81.9 - Sho: 83.2 - Wri: 72.7 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res50_jhmdb_sub1_256x256-932cb3b4_20201122.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50_8xb64-20e_jhmdb-sub2-256x256.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: JHMDB - Name: td-hm_res50_8xb64-20e_jhmdb-sub2-256x256 - Results: - - Dataset: JHMDB - Metrics: - Ank: 78.6 - Elb: 64.5 - Head: 94.1 - Hip: 77.9 - Knee: 71.9 - Mean: 75.5 - Sho: 74.9 - Wri: 62.5 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res50_jhmdb_sub2_256x256-83d606f7_20201122.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50_8xb64-20e_jhmdb-sub3-256x256.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: JHMDB - Name: td-hm_res50_8xb64-20e_jhmdb-sub3-256x256 - Results: - - Dataset: JHMDB - Metrics: - Ank: 84.2 - Elb: 74.9 - Head: 97.0 - Hip: 84.7 - Knee: 83.7 - Mean: 82.9 - Sho: 82.2 - Wri: 70.7 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res50_jhmdb_sub3_256x256-c4ec1a0b_20201122.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50-2deconv_8xb64-40e_jhmdb-sub1-256x256.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: JHMDB - Name: td-hm_res50-2deconv_8xb64-40e_jhmdb-sub1-256x256 - Results: - - Dataset: JHMDB - Metrics: - Ank: 75.0 - Elb: 73.2 - Head: 92.4 - Hip: 82.3 - Knee: 75.4 - Mean: 79.2 - Sho: 80.6 - Wri: 70.5 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res50_2deconv_jhmdb_sub1_256x256-f0574a52_20201122.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50-2deconv_8xb64-40e_jhmdb-sub2-256x256.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: JHMDB - Name: td-hm_res50-2deconv_8xb64-40e_jhmdb-sub2-256x256 - Results: - - Dataset: JHMDB - Metrics: - Ank: 75.5 - Elb: 63.8 - Head: 93.4 - Hip: 75.1 - Knee: 68.4 - Mean: 73.7 - Sho: 73.6 - Wri: 60.5 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res50_2deconv_jhmdb_sub2_256x256-f63af0ff_20201122.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50-2deconv_8xb64-40e_jhmdb-sub3-256x256.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: JHMDB - Name: td-hm_res50-2deconv_8xb64-40e_jhmdb-sub3-256x256 - Results: - - Dataset: JHMDB - Metrics: - Ank: 81.5 - Elb: 72.6 - Head: 96.1 - Hip: 83.6 - Knee: 80.9 - Mean: 81.2 - Sho: 81.2 - Wri: 67.9 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res50_2deconv_jhmdb_sub3_256x256-c4bc2ddb_20201122.pth +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50_8xb64-20e_jhmdb-sub1-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: &id001 + - SimpleBaseline2D + - ResNet + Training Data: JHMDB + Name: td-hm_res50_8xb64-20e_jhmdb-sub1-256x256 + Results: + - Dataset: JHMDB + Metrics: + Ank: 92.8 + Elb: 93.8 + Head: 99.1 + Hip: 99.4 + Knee: 96.5 + Mean: 96.1 + Sho: 98.0 + Wri: 91.3 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res50_jhmdb_sub1_256x256-932cb3b4_20201122.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50_8xb64-20e_jhmdb-sub2-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: JHMDB + Name: td-hm_res50_8xb64-20e_jhmdb-sub2-256x256 + Results: + - Dataset: JHMDB + Metrics: + Ank: 94.1 + Elb: 90.6 + Head: 99.3 + Hip: 98.9 + Knee: 96.3 + Mean: 95.0 + Sho: 97.1 + Wri: 87.0 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res50_jhmdb_sub2_256x256-83d606f7_20201122.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50_8xb64-20e_jhmdb-sub3-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: JHMDB + Name: td-hm_res50_8xb64-20e_jhmdb-sub3-256x256 + Results: + - Dataset: JHMDB + Metrics: + Ank: 94.7 + Elb: 94.0 + Head: 99.0 + Hip: 99.7 + Knee: 98.0 + Mean: 96.7 + Sho: 97.9 + Wri: 91.6 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res50_jhmdb_sub3_256x256-c4ec1a0b_20201122.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50-2deconv_8xb64-40e_jhmdb-sub1-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: JHMDB + Name: td-hm_res50-2deconv_8xb64-40e_jhmdb-sub1-256x256 + Results: + - Dataset: JHMDB + Metrics: + Ank: 92.5 + Elb: 94.6 + Head: 99.1 + Hip: 99.4 + Knee: 94.6 + Mean: 96.1 + Sho: 98.5 + Wri: 92.0 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res50_2deconv_jhmdb_sub1_256x256-f0574a52_20201122.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50-2deconv_8xb64-40e_jhmdb-sub2-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: JHMDB + Name: td-hm_res50-2deconv_8xb64-40e_jhmdb-sub2-256x256 + Results: + - Dataset: JHMDB + Metrics: + Ank: 93.8 + Elb: 91.0 + Head: 99.3 + Hip: 99.1 + Knee: 96.5 + Mean: 95.2 + Sho: 97.8 + Wri: 87.0 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res50_2deconv_jhmdb_sub2_256x256-f63af0ff_20201122.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50-2deconv_8xb64-40e_jhmdb-sub3-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: JHMDB + Name: td-hm_res50-2deconv_8xb64-40e_jhmdb-sub3-256x256 + Results: + - Dataset: JHMDB + Metrics: + Ank: 93.8 + Elb: 94.3 + Head: 98.8 + Hip: 99.8 + Knee: 97.5 + Mean: 96.7 + Sho: 98.4 + Wri: 92.1 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res50_2deconv_jhmdb_sub3_256x256-c4bc2ddb_20201122.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50_8xb64-20e_jhmdb-sub1-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: JHMDB + Name: td-hm_res50_8xb64-20e_jhmdb-sub1-256x256 + Results: + - Dataset: JHMDB + Metrics: + Ank: 78.9 + Elb: 74.4 + Head: 93.3 + Hip: 85.0 + Knee: 81.2 + Mean: 81.9 + Sho: 83.2 + Wri: 72.7 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res50_jhmdb_sub1_256x256-932cb3b4_20201122.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50_8xb64-20e_jhmdb-sub2-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: JHMDB + Name: td-hm_res50_8xb64-20e_jhmdb-sub2-256x256 + Results: + - Dataset: JHMDB + Metrics: + Ank: 78.6 + Elb: 64.5 + Head: 94.1 + Hip: 77.9 + Knee: 71.9 + Mean: 75.5 + Sho: 74.9 + Wri: 62.5 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res50_jhmdb_sub2_256x256-83d606f7_20201122.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50_8xb64-20e_jhmdb-sub3-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: JHMDB + Name: td-hm_res50_8xb64-20e_jhmdb-sub3-256x256 + Results: + - Dataset: JHMDB + Metrics: + Ank: 84.2 + Elb: 74.9 + Head: 97.0 + Hip: 84.7 + Knee: 83.7 + Mean: 82.9 + Sho: 82.2 + Wri: 70.7 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res50_jhmdb_sub3_256x256-c4ec1a0b_20201122.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50-2deconv_8xb64-40e_jhmdb-sub1-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: JHMDB + Name: td-hm_res50-2deconv_8xb64-40e_jhmdb-sub1-256x256 + Results: + - Dataset: JHMDB + Metrics: + Ank: 75.0 + Elb: 73.2 + Head: 92.4 + Hip: 82.3 + Knee: 75.4 + Mean: 79.2 + Sho: 80.6 + Wri: 70.5 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res50_2deconv_jhmdb_sub1_256x256-f0574a52_20201122.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50-2deconv_8xb64-40e_jhmdb-sub2-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: JHMDB + Name: td-hm_res50-2deconv_8xb64-40e_jhmdb-sub2-256x256 + Results: + - Dataset: JHMDB + Metrics: + Ank: 75.5 + Elb: 63.8 + Head: 93.4 + Hip: 75.1 + Knee: 68.4 + Mean: 73.7 + Sho: 73.6 + Wri: 60.5 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res50_2deconv_jhmdb_sub2_256x256-f63af0ff_20201122.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50-2deconv_8xb64-40e_jhmdb-sub3-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: JHMDB + Name: td-hm_res50-2deconv_8xb64-40e_jhmdb-sub3-256x256 + Results: + - Dataset: JHMDB + Metrics: + Ank: 81.5 + Elb: 72.6 + Head: 96.1 + Hip: 83.6 + Knee: 80.9 + Mean: 81.2 + Sho: 81.2 + Wri: 67.9 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res50_2deconv_jhmdb_sub3_256x256-c4bc2ddb_20201122.pth diff --git a/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_cpm_8xb32-40e_jhmdb-sub1-368x368.py b/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_cpm_8xb32-40e_jhmdb-sub1-368x368.py index 479039f542..fb59f0a9c2 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_cpm_8xb32-40e_jhmdb-sub1-368x368.py +++ b/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_cpm_8xb32-40e_jhmdb-sub1-368x368.py @@ -1,127 +1,127 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=40, val_interval=1) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=40, - milestones=[20, 30], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=256) - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='PCK', rule='greater', interval=1)) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(368, 368), heatmap_size=(46, 46), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='CPM', - in_channels=3, - out_channels=15, - feat_channels=128, - num_stages=6), - head=dict( - type='CPMHead', - in_channels=15, - out_channels=15, - num_stages=6, - deconv_out_channels=None, - final_layer=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'JhmdbDataset' -data_mode = 'topdown' -data_root = 'data/jhmdb/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict( - type='RandomBBoxTransform', - rotate_factor=60, - scale_factor=(0.75, 1.25)), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/Sub1_train.json', - data_prefix=dict(img=''), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/Sub1_test.json', - data_prefix=dict(img=''), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = [ - dict(type='JhmdbPCKAccuracy', thr=0.2, norm_item=['bbox', 'torso']), -] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=40, val_interval=1) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=40, + milestones=[20, 30], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='PCK', rule='greater', interval=1)) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(368, 368), heatmap_size=(46, 46), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='CPM', + in_channels=3, + out_channels=15, + feat_channels=128, + num_stages=6), + head=dict( + type='CPMHead', + in_channels=15, + out_channels=15, + num_stages=6, + deconv_out_channels=None, + final_layer=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'JhmdbDataset' +data_mode = 'topdown' +data_root = 'data/jhmdb/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/Sub1_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/Sub1_test.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='JhmdbPCKAccuracy', thr=0.2, norm_item=['bbox', 'torso']), +] +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_cpm_8xb32-40e_jhmdb-sub2-368x368.py b/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_cpm_8xb32-40e_jhmdb-sub2-368x368.py index 88b60e9f87..84875cac8a 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_cpm_8xb32-40e_jhmdb-sub2-368x368.py +++ b/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_cpm_8xb32-40e_jhmdb-sub2-368x368.py @@ -1,127 +1,127 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=40, val_interval=1) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=40, - milestones=[20, 30], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=256) - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='PCK', rule='greater', interval=1)) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(368, 368), heatmap_size=(46, 46), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='CPM', - in_channels=3, - out_channels=15, - feat_channels=128, - num_stages=6), - head=dict( - type='CPMHead', - in_channels=15, - out_channels=15, - num_stages=6, - deconv_out_channels=None, - final_layer=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'JhmdbDataset' -data_mode = 'topdown' -data_root = 'data/jhmdb/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict( - type='RandomBBoxTransform', - rotate_factor=60, - scale_factor=(0.75, 1.25)), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/Sub2_train.json', - data_prefix=dict(img=''), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/Sub2_test.json', - data_prefix=dict(img=''), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = [ - dict(type='JhmdbPCKAccuracy', thr=0.2, norm_item=['bbox', 'torso']), -] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=40, val_interval=1) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=40, + milestones=[20, 30], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='PCK', rule='greater', interval=1)) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(368, 368), heatmap_size=(46, 46), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='CPM', + in_channels=3, + out_channels=15, + feat_channels=128, + num_stages=6), + head=dict( + type='CPMHead', + in_channels=15, + out_channels=15, + num_stages=6, + deconv_out_channels=None, + final_layer=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'JhmdbDataset' +data_mode = 'topdown' +data_root = 'data/jhmdb/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/Sub2_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/Sub2_test.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='JhmdbPCKAccuracy', thr=0.2, norm_item=['bbox', 'torso']), +] +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_cpm_8xb32-40e_jhmdb-sub3-368x368.py b/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_cpm_8xb32-40e_jhmdb-sub3-368x368.py index 602b2bcfd6..9995cbf584 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_cpm_8xb32-40e_jhmdb-sub3-368x368.py +++ b/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_cpm_8xb32-40e_jhmdb-sub3-368x368.py @@ -1,127 +1,127 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=40, val_interval=1) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=40, - milestones=[20, 30], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=256) - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='PCK', rule='greater', interval=1)) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(368, 368), heatmap_size=(46, 46), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='CPM', - in_channels=3, - out_channels=15, - feat_channels=128, - num_stages=6), - head=dict( - type='CPMHead', - in_channels=15, - out_channels=15, - num_stages=6, - deconv_out_channels=None, - final_layer=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'JhmdbDataset' -data_mode = 'topdown' -data_root = 'data/jhmdb/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict( - type='RandomBBoxTransform', - rotate_factor=60, - scale_factor=(0.75, 1.25)), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/Sub3_train.json', - data_prefix=dict(img=''), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/Sub3_test.json', - data_prefix=dict(img=''), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = [ - dict(type='JhmdbPCKAccuracy', thr=0.2, norm_item=['bbox', 'torso']), -] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=40, val_interval=1) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=40, + milestones=[20, 30], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='PCK', rule='greater', interval=1)) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(368, 368), heatmap_size=(46, 46), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='CPM', + in_channels=3, + out_channels=15, + feat_channels=128, + num_stages=6), + head=dict( + type='CPMHead', + in_channels=15, + out_channels=15, + num_stages=6, + deconv_out_channels=None, + final_layer=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'JhmdbDataset' +data_mode = 'topdown' +data_root = 'data/jhmdb/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/Sub3_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/Sub3_test.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='JhmdbPCKAccuracy', thr=0.2, norm_item=['bbox', 'torso']), +] +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50-2deconv_8xb64-40e_jhmdb-sub1-256x256.py b/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50-2deconv_8xb64-40e_jhmdb-sub1-256x256.py index 8d104e1e86..8eba9a4e6b 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50-2deconv_8xb64-40e_jhmdb-sub1-256x256.py +++ b/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50-2deconv_8xb64-40e_jhmdb-sub1-256x256.py @@ -1,122 +1,122 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=40, val_interval=1) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=40, - milestones=[20, 30], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='PCK', rule='greater', interval=1)) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(32, 32), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict(type='ResNet', depth=50), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=15, - deconv_out_channels=(256, 256), - deconv_kernel_sizes=(4, 4), - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) -load_from = 'https://download.openmmlab.com/mmpose/top_down/resnet/res50_mpii_256x256-418ffc88_20200812.pth' # noqa: E501 - -# base dataset settings -dataset_type = 'JhmdbDataset' -data_mode = 'topdown' -data_root = 'data/jhmdb/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict( - type='RandomBBoxTransform', - rotate_factor=60, - scale_factor=(0.75, 1.25)), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/Sub1_train.json', - data_prefix=dict(img=''), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/Sub1_test.json', - data_prefix=dict(img=''), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = [ - dict(type='JhmdbPCKAccuracy', thr=0.2, norm_item=['bbox', 'torso']), -] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=40, val_interval=1) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=40, + milestones=[20, 30], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='PCK', rule='greater', interval=1)) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(32, 32), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict(type='ResNet', depth=50), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=15, + deconv_out_channels=(256, 256), + deconv_kernel_sizes=(4, 4), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) +load_from = 'https://download.openmmlab.com/mmpose/top_down/resnet/res50_mpii_256x256-418ffc88_20200812.pth' # noqa: E501 + +# base dataset settings +dataset_type = 'JhmdbDataset' +data_mode = 'topdown' +data_root = 'data/jhmdb/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/Sub1_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/Sub1_test.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='JhmdbPCKAccuracy', thr=0.2, norm_item=['bbox', 'torso']), +] +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50-2deconv_8xb64-40e_jhmdb-sub2-256x256.py b/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50-2deconv_8xb64-40e_jhmdb-sub2-256x256.py index 6135ce29ab..627f74ea22 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50-2deconv_8xb64-40e_jhmdb-sub2-256x256.py +++ b/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50-2deconv_8xb64-40e_jhmdb-sub2-256x256.py @@ -1,122 +1,122 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=40, val_interval=1) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=40, - milestones=[20, 30], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='PCK', rule='greater', interval=1)) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(32, 32), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict(type='ResNet', depth=50), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=15, - deconv_out_channels=(256, 256), - deconv_kernel_sizes=(4, 4), - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) -load_from = 'https://download.openmmlab.com/mmpose/top_down/resnet/res50_mpii_256x256-418ffc88_20200812.pth' # noqa: E501 - -# base dataset settings -dataset_type = 'JhmdbDataset' -data_mode = 'topdown' -data_root = 'data/jhmdb/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict( - type='RandomBBoxTransform', - rotate_factor=60, - scale_factor=(0.75, 1.25)), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/Sub2_train.json', - data_prefix=dict(img=''), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/Sub2_test.json', - data_prefix=dict(img=''), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = [ - dict(type='JhmdbPCKAccuracy', thr=0.2, norm_item=['bbox', 'torso']), -] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=40, val_interval=1) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=40, + milestones=[20, 30], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='PCK', rule='greater', interval=1)) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(32, 32), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict(type='ResNet', depth=50), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=15, + deconv_out_channels=(256, 256), + deconv_kernel_sizes=(4, 4), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) +load_from = 'https://download.openmmlab.com/mmpose/top_down/resnet/res50_mpii_256x256-418ffc88_20200812.pth' # noqa: E501 + +# base dataset settings +dataset_type = 'JhmdbDataset' +data_mode = 'topdown' +data_root = 'data/jhmdb/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/Sub2_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/Sub2_test.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='JhmdbPCKAccuracy', thr=0.2, norm_item=['bbox', 'torso']), +] +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50-2deconv_8xb64-40e_jhmdb-sub3-256x256.py b/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50-2deconv_8xb64-40e_jhmdb-sub3-256x256.py index 44d95b15b2..c61e18be17 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50-2deconv_8xb64-40e_jhmdb-sub3-256x256.py +++ b/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50-2deconv_8xb64-40e_jhmdb-sub3-256x256.py @@ -1,122 +1,122 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=40, val_interval=1) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=40, - milestones=[20, 30], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='PCK', rule='greater', interval=1)) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(32, 32), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict(type='ResNet', depth=50), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=15, - deconv_out_channels=(256, 256), - deconv_kernel_sizes=(4, 4), - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) -load_from = 'https://download.openmmlab.com/mmpose/top_down/resnet/res50_mpii_256x256-418ffc88_20200812.pth' # noqa: E501 - -# base dataset settings -dataset_type = 'JhmdbDataset' -data_mode = 'topdown' -data_root = 'data/jhmdb/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict( - type='RandomBBoxTransform', - rotate_factor=60, - scale_factor=(0.75, 1.25)), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/Sub3_train.json', - data_prefix=dict(img=''), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/Sub3_test.json', - data_prefix=dict(img=''), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = [ - dict(type='JhmdbPCKAccuracy', thr=0.2, norm_item=['bbox', 'torso']), -] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=40, val_interval=1) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=40, + milestones=[20, 30], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='PCK', rule='greater', interval=1)) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(32, 32), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict(type='ResNet', depth=50), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=15, + deconv_out_channels=(256, 256), + deconv_kernel_sizes=(4, 4), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) +load_from = 'https://download.openmmlab.com/mmpose/top_down/resnet/res50_mpii_256x256-418ffc88_20200812.pth' # noqa: E501 + +# base dataset settings +dataset_type = 'JhmdbDataset' +data_mode = 'topdown' +data_root = 'data/jhmdb/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/Sub3_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/Sub3_test.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='JhmdbPCKAccuracy', thr=0.2, norm_item=['bbox', 'torso']), +] +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50_8xb64-20e_jhmdb-sub1-256x256.py b/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50_8xb64-20e_jhmdb-sub1-256x256.py index 9578a66c18..2bb50688b7 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50_8xb64-20e_jhmdb-sub1-256x256.py +++ b/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50_8xb64-20e_jhmdb-sub1-256x256.py @@ -1,120 +1,120 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=20, val_interval=1) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=20, - milestones=[8, 15], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='PCK', rule='greater', interval=1)) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict(type='ResNet', depth=50), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=15, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) -load_from = 'https://download.openmmlab.com/mmpose/top_down/resnet/res50_mpii_256x256-418ffc88_20200812.pth' # noqa: E501 - -# base dataset settings -dataset_type = 'JhmdbDataset' -data_mode = 'topdown' -data_root = 'data/jhmdb/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict( - type='RandomBBoxTransform', - rotate_factor=60, - scale_factor=(0.75, 1.25)), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/Sub1_train.json', - data_prefix=dict(img=''), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/Sub1_test.json', - data_prefix=dict(img=''), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = [ - dict(type='JhmdbPCKAccuracy', thr=0.2, norm_item=['bbox', 'torso']), -] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=20, val_interval=1) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=20, + milestones=[8, 15], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='PCK', rule='greater', interval=1)) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict(type='ResNet', depth=50), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=15, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) +load_from = 'https://download.openmmlab.com/mmpose/top_down/resnet/res50_mpii_256x256-418ffc88_20200812.pth' # noqa: E501 + +# base dataset settings +dataset_type = 'JhmdbDataset' +data_mode = 'topdown' +data_root = 'data/jhmdb/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/Sub1_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/Sub1_test.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='JhmdbPCKAccuracy', thr=0.2, norm_item=['bbox', 'torso']), +] +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50_8xb64-20e_jhmdb-sub2-256x256.py b/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50_8xb64-20e_jhmdb-sub2-256x256.py index 856c89e660..3cdcaffb76 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50_8xb64-20e_jhmdb-sub2-256x256.py +++ b/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50_8xb64-20e_jhmdb-sub2-256x256.py @@ -1,120 +1,120 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=20, val_interval=1) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=20, - milestones=[8, 15], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='PCK', rule='greater', interval=1)) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict(type='ResNet', depth=50), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=15, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) -load_from = 'https://download.openmmlab.com/mmpose/top_down/resnet/res50_mpii_256x256-418ffc88_20200812.pth' # noqa: E501 - -# base dataset settings -dataset_type = 'JhmdbDataset' -data_mode = 'topdown' -data_root = 'data/jhmdb/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict( - type='RandomBBoxTransform', - rotate_factor=60, - scale_factor=(0.75, 1.25)), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/Sub2_train.json', - data_prefix=dict(img=''), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/Sub2_test.json', - data_prefix=dict(img=''), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = [ - dict(type='JhmdbPCKAccuracy', thr=0.2, norm_item=['bbox', 'torso']), -] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=20, val_interval=1) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=20, + milestones=[8, 15], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='PCK', rule='greater', interval=1)) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict(type='ResNet', depth=50), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=15, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) +load_from = 'https://download.openmmlab.com/mmpose/top_down/resnet/res50_mpii_256x256-418ffc88_20200812.pth' # noqa: E501 + +# base dataset settings +dataset_type = 'JhmdbDataset' +data_mode = 'topdown' +data_root = 'data/jhmdb/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/Sub2_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/Sub2_test.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='JhmdbPCKAccuracy', thr=0.2, norm_item=['bbox', 'torso']), +] +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50_8xb64-20e_jhmdb-sub3-256x256.py b/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50_8xb64-20e_jhmdb-sub3-256x256.py index 7306596884..151a2a3f97 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50_8xb64-20e_jhmdb-sub3-256x256.py +++ b/configs/body_2d_keypoint/topdown_heatmap/jhmdb/td-hm_res50_8xb64-20e_jhmdb-sub3-256x256.py @@ -1,120 +1,120 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=20, val_interval=1) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=20, - milestones=[8, 15], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='PCK', rule='greater', interval=1)) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict(type='ResNet', depth=50), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=15, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) -load_from = 'https://download.openmmlab.com/mmpose/top_down/resnet/res50_mpii_256x256-418ffc88_20200812.pth' # noqa: E501 - -# base dataset settings -dataset_type = 'JhmdbDataset' -data_mode = 'topdown' -data_root = 'data/jhmdb/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict( - type='RandomBBoxTransform', - rotate_factor=60, - scale_factor=(0.75, 1.25)), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/Sub3_train.json', - data_prefix=dict(img=''), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/Sub3_test.json', - data_prefix=dict(img=''), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = [ - dict(type='JhmdbPCKAccuracy', thr=0.2, norm_item=['bbox', 'torso']), -] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=20, val_interval=1) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=20, + milestones=[8, 15], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='PCK', rule='greater', interval=1)) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict(type='ResNet', depth=50), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=15, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) +load_from = 'https://download.openmmlab.com/mmpose/top_down/resnet/res50_mpii_256x256-418ffc88_20200812.pth' # noqa: E501 + +# base dataset settings +dataset_type = 'JhmdbDataset' +data_mode = 'topdown' +data_root = 'data/jhmdb/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/Sub3_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/Sub3_test.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='JhmdbPCKAccuracy', thr=0.2, norm_item=['bbox', 'torso']), +] +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/mpii/cpm_mpii.md b/configs/body_2d_keypoint/topdown_heatmap/mpii/cpm_mpii.md index 0c2888bb88..ac25f9c384 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/mpii/cpm_mpii.md +++ b/configs/body_2d_keypoint/topdown_heatmap/mpii/cpm_mpii.md @@ -1,39 +1,39 @@ - - -
-CPM (CVPR'2016) - -```bibtex -@inproceedings{wei2016convolutional, - title={Convolutional pose machines}, - author={Wei, Shih-En and Ramakrishna, Varun and Kanade, Takeo and Sheikh, Yaser}, - booktitle={Proceedings of the IEEE conference on Computer Vision and Pattern Recognition}, - pages={4724--4732}, - year={2016} -} -``` - -
- - - -
-MPII (CVPR'2014) - -```bibtex -@inproceedings{andriluka14cvpr, - author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, - title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, - booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, - year = {2014}, - month = {June} -} -``` - -
- -Results on MPII val set - -| Arch | Input Size | Mean | Mean@0.1 | ckpt | log | -| :---------------------------------------------------------- | :--------: | :---: | :------: | :---------------------------------------------------------: | :---------------------------------------------------------: | -| [cpm](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_cpm_8xb64-210e_mpii-368x368.py) | 368x368 | 0.876 | 0.285 | [ckpt](https://download.openmmlab.com/mmpose/top_down/cpm/cpm_mpii_368x368-116e62b8_20200822.pth) | [log](https://download.openmmlab.com/mmpose/top_down/cpm/cpm_mpii_368x368_20200822.log.json) | + + +
+CPM (CVPR'2016) + +```bibtex +@inproceedings{wei2016convolutional, + title={Convolutional pose machines}, + author={Wei, Shih-En and Ramakrishna, Varun and Kanade, Takeo and Sheikh, Yaser}, + booktitle={Proceedings of the IEEE conference on Computer Vision and Pattern Recognition}, + pages={4724--4732}, + year={2016} +} +``` + +
+ + + +
+MPII (CVPR'2014) + +```bibtex +@inproceedings{andriluka14cvpr, + author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, + title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, + booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + year = {2014}, + month = {June} +} +``` + +
+ +Results on MPII val set + +| Arch | Input Size | Mean | Mean@0.1 | ckpt | log | +| :---------------------------------------------------------- | :--------: | :---: | :------: | :---------------------------------------------------------: | :---------------------------------------------------------: | +| [cpm](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_cpm_8xb64-210e_mpii-368x368.py) | 368x368 | 0.876 | 0.285 | [ckpt](https://download.openmmlab.com/mmpose/top_down/cpm/cpm_mpii_368x368-116e62b8_20200822.pth) | [log](https://download.openmmlab.com/mmpose/top_down/cpm/cpm_mpii_368x368_20200822.log.json) | diff --git a/configs/body_2d_keypoint/topdown_heatmap/mpii/cpm_mpii.yml b/configs/body_2d_keypoint/topdown_heatmap/mpii/cpm_mpii.yml index 3e2e439253..077e0cbe44 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/mpii/cpm_mpii.yml +++ b/configs/body_2d_keypoint/topdown_heatmap/mpii/cpm_mpii.yml @@ -1,15 +1,15 @@ -Models: -- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_cpm_8xb64-210e_mpii-368x368.py - In Collection: CPM - Metadata: - Architecture: - - CPM - Training Data: MPII - Name: td-hm_cpm_8xb64-210e_mpii-368x368 - Results: - - Dataset: MPII - Metrics: - Mean: 0.876 - Mean@0.1: 0.285 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/cpm/cpm_mpii_368x368-116e62b8_20200822.pth +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_cpm_8xb64-210e_mpii-368x368.py + In Collection: CPM + Metadata: + Architecture: + - CPM + Training Data: MPII + Name: td-hm_cpm_8xb64-210e_mpii-368x368 + Results: + - Dataset: MPII + Metrics: + Mean: 0.876 + Mean@0.1: 0.285 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/cpm/cpm_mpii_368x368-116e62b8_20200822.pth diff --git a/configs/body_2d_keypoint/topdown_heatmap/mpii/cspnext-m_udp_8xb64-210e_mpii-256x256.py b/configs/body_2d_keypoint/topdown_heatmap/mpii/cspnext-m_udp_8xb64-210e_mpii-256x256.py index fc8d6fdcea..d9c4552dc7 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/mpii/cspnext-m_udp_8xb64-210e_mpii-256x256.py +++ b/configs/body_2d_keypoint/topdown_heatmap/mpii/cspnext-m_udp_8xb64-210e_mpii-256x256.py @@ -1,210 +1,210 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -max_epochs = 210 -stage2_num_epochs = 30 -base_lr = 4e-3 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - # use cosine lr from 210 to 420 epoch - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=1024) - -# codec settings -codec = dict( - type='UDPHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=0.67, - widen_factor=0.75, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' - 'rtmdet/cspnext_rsb_pretrain/' - 'cspnext-m_8xb256-rsb-a1-600e_in1k-ecb3bbd9.pth')), - head=dict( - type='HeatmapHead', - in_channels=768, - out_channels=16, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=False, - flip_mode='heatmap', - shift_heatmap=False, - )) - -# base dataset settings -dataset_type = 'MpiiDataset' -data_mode = 'topdown' -data_root = 'data/mpii/' - -backend_args = dict(backend='local') -# backend_args = dict( -# backend='petrel', -# path_mapping=dict({ -# f'{data_root}': 's3://openmmlab/datasets/pose/MPI/', -# f'{data_root}': 's3://openmmlab/datasets/pose/MPI/' -# })) - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_train.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_val.json', - headbox_file=f'{data_root}/annotations/mpii_gt_val.mat', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='PCK', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict(type='MpiiPCKAccuracy') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 210 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 210 to 420 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' + 'rtmdet/cspnext_rsb_pretrain/' + 'cspnext-m_8xb256-rsb-a1-600e_in1k-ecb3bbd9.pth')), + head=dict( + type='HeatmapHead', + in_channels=768, + out_channels=16, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=False, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/pose/MPI/', +# f'{data_root}': 's3://openmmlab/datasets/pose/MPI/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file=f'{data_root}/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='PCK', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/mpii/cspnext_udp_mpii.md b/configs/body_2d_keypoint/topdown_heatmap/mpii/cspnext_udp_mpii.md index 80aec4c28e..1256ae9859 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/mpii/cspnext_udp_mpii.md +++ b/configs/body_2d_keypoint/topdown_heatmap/mpii/cspnext_udp_mpii.md @@ -1,57 +1,57 @@ - - -
-RTMDet (arXiv'2022) - -```bibtex -@misc{lyu2022rtmdet, - title={RTMDet: An Empirical Study of Designing Real-Time Object Detectors}, - author={Chengqi Lyu and Wenwei Zhang and Haian Huang and Yue Zhou and Yudong Wang and Yanyi Liu and Shilong Zhang and Kai Chen}, - year={2022}, - eprint={2212.07784}, - archivePrefix={arXiv}, - primaryClass={cs.CV} -} -``` - -
- - - -
-UDP (CVPR'2020) - -```bibtex -@InProceedings{Huang_2020_CVPR, - author = {Huang, Junjie and Zhu, Zheng and Guo, Feng and Huang, Guan}, - title = {The Devil Is in the Details: Delving Into Unbiased Data Processing for Human Pose Estimation}, - booktitle = {The IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, - month = {June}, - year = {2020} -} -``` - -
- - - -
-MPII (CVPR'2014) - -```bibtex -@inproceedings{andriluka14cvpr, - author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, - title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, - booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, - year = {2014}, - month = {June} -} -``` - -
- -Results on MPII val set - -| Arch | Input Size | Mean | Mean@0.1 | ckpt | log | -| :---------------------------------------------------------- | :--------: | :---: | :------: | :---------------------------------------------------------: | :---------------------------------------------------------: | -| [pose_hrnet_w32](/configs/body_2d_keypoint/topdown_heatmap/mpii/cspnext-m_udp_8xb64-210e_mpii-256x256.py) | 256x256 | 0.902 | 0.303 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-mpii_pt-in1k_210e-256x256-68d0402f_20230208.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-mpii_pt-in1k_210e-256x256-68d0402f_20230208.json) | + + +
+RTMDet (arXiv'2022) + +```bibtex +@misc{lyu2022rtmdet, + title={RTMDet: An Empirical Study of Designing Real-Time Object Detectors}, + author={Chengqi Lyu and Wenwei Zhang and Haian Huang and Yue Zhou and Yudong Wang and Yanyi Liu and Shilong Zhang and Kai Chen}, + year={2022}, + eprint={2212.07784}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +
+ + + +
+UDP (CVPR'2020) + +```bibtex +@InProceedings{Huang_2020_CVPR, + author = {Huang, Junjie and Zhu, Zheng and Guo, Feng and Huang, Guan}, + title = {The Devil Is in the Details: Delving Into Unbiased Data Processing for Human Pose Estimation}, + booktitle = {The IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, + month = {June}, + year = {2020} +} +``` + +
+ + + +
+MPII (CVPR'2014) + +```bibtex +@inproceedings{andriluka14cvpr, + author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, + title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, + booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + year = {2014}, + month = {June} +} +``` + +
+ +Results on MPII val set + +| Arch | Input Size | Mean | Mean@0.1 | ckpt | log | +| :---------------------------------------------------------- | :--------: | :---: | :------: | :---------------------------------------------------------: | :---------------------------------------------------------: | +| [pose_hrnet_w32](/configs/body_2d_keypoint/topdown_heatmap/mpii/cspnext-m_udp_8xb64-210e_mpii-256x256.py) | 256x256 | 0.902 | 0.303 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-mpii_pt-in1k_210e-256x256-68d0402f_20230208.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-mpii_pt-in1k_210e-256x256-68d0402f_20230208.json) | diff --git a/configs/body_2d_keypoint/topdown_heatmap/mpii/cspnext_udp_mpii.yml b/configs/body_2d_keypoint/topdown_heatmap/mpii/cspnext_udp_mpii.yml index 7256f3b154..e1c738caaf 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/mpii/cspnext_udp_mpii.yml +++ b/configs/body_2d_keypoint/topdown_heatmap/mpii/cspnext_udp_mpii.yml @@ -1,16 +1,16 @@ -Models: -- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/cspnext-m_udp_8xb64-210e_mpii-256x256.py - In Collection: UDP - Metadata: - Architecture: - - UDP - - CSPNeXt - Training Data: MPII - Name: cspnext-m_udp_8xb64-210e_mpii-256x256 - Results: - - Dataset: MPII - Metrics: - Mean: 0.902 - Mean@0.1: 0.303 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-mpii_pt-in1k_210e-256x256-68d0402f_20230208.pth +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/cspnext-m_udp_8xb64-210e_mpii-256x256.py + In Collection: UDP + Metadata: + Architecture: + - UDP + - CSPNeXt + Training Data: MPII + Name: cspnext-m_udp_8xb64-210e_mpii-256x256 + Results: + - Dataset: MPII + Metrics: + Mean: 0.902 + Mean@0.1: 0.303 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-mpii_pt-in1k_210e-256x256-68d0402f_20230208.pth diff --git a/configs/body_2d_keypoint/topdown_heatmap/mpii/hourglass_mpii.md b/configs/body_2d_keypoint/topdown_heatmap/mpii/hourglass_mpii.md index 98e795de4f..ca29dc2a0b 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/mpii/hourglass_mpii.md +++ b/configs/body_2d_keypoint/topdown_heatmap/mpii/hourglass_mpii.md @@ -1,41 +1,41 @@ - - -
-Hourglass (ECCV'2016) - -```bibtex -@inproceedings{newell2016stacked, - title={Stacked hourglass networks for human pose estimation}, - author={Newell, Alejandro and Yang, Kaiyu and Deng, Jia}, - booktitle={European conference on computer vision}, - pages={483--499}, - year={2016}, - organization={Springer} -} -``` - -
- - - -
-MPII (CVPR'2014) - -```bibtex -@inproceedings{andriluka14cvpr, - author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, - title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, - booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, - year = {2014}, - month = {June} -} -``` - -
- -Results on MPII val set - -| Arch | Input Size | Mean | Mean@0.1 | ckpt | log | -| :---------------------------------------------------------- | :--------: | :---: | :------: | :---------------------------------------------------------: | :---------------------------------------------------------: | -| [pose_hourglass_52](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hourglass52_8xb64-210e_mpii-256x256.py) | 256x256 | 0.889 | 0.317 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hourglass/hourglass52_mpii_256x256-ae358435_20200812.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hourglass/hourglass52_mpii_256x256_20200812.log.json) | -| [pose_hourglass_52](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hourglass52_8xb32-210e_mpii-384x384.py) | 384x384 | 0.894 | 0.367 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hourglass/hourglass52_mpii_384x384-04090bc3_20200812.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hourglass/hourglass52_mpii_384x384_20200812.log.json) | + + +
+Hourglass (ECCV'2016) + +```bibtex +@inproceedings{newell2016stacked, + title={Stacked hourglass networks for human pose estimation}, + author={Newell, Alejandro and Yang, Kaiyu and Deng, Jia}, + booktitle={European conference on computer vision}, + pages={483--499}, + year={2016}, + organization={Springer} +} +``` + +
+ + + +
+MPII (CVPR'2014) + +```bibtex +@inproceedings{andriluka14cvpr, + author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, + title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, + booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + year = {2014}, + month = {June} +} +``` + +
+ +Results on MPII val set + +| Arch | Input Size | Mean | Mean@0.1 | ckpt | log | +| :---------------------------------------------------------- | :--------: | :---: | :------: | :---------------------------------------------------------: | :---------------------------------------------------------: | +| [pose_hourglass_52](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hourglass52_8xb64-210e_mpii-256x256.py) | 256x256 | 0.889 | 0.317 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hourglass/hourglass52_mpii_256x256-ae358435_20200812.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hourglass/hourglass52_mpii_256x256_20200812.log.json) | +| [pose_hourglass_52](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hourglass52_8xb32-210e_mpii-384x384.py) | 384x384 | 0.894 | 0.367 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hourglass/hourglass52_mpii_384x384-04090bc3_20200812.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hourglass/hourglass52_mpii_384x384_20200812.log.json) | diff --git a/configs/body_2d_keypoint/topdown_heatmap/mpii/hourglass_mpii.yml b/configs/body_2d_keypoint/topdown_heatmap/mpii/hourglass_mpii.yml index eb22cd98ce..17a5c3c488 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/mpii/hourglass_mpii.yml +++ b/configs/body_2d_keypoint/topdown_heatmap/mpii/hourglass_mpii.yml @@ -1,28 +1,28 @@ -Models: -- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hourglass52_8xb64-210e_mpii-256x256.py - In Collection: Hourglass - Metadata: - Architecture: &id001 - - Hourglass - Training Data: MPII - Name: td-hm_hourglass52_8xb64-210e_mpii-256x256 - Results: - - Dataset: MPII - Metrics: - Mean: 0.889 - Mean@0.1: 0.317 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/hourglass/hourglass52_mpii_256x256-ae358435_20200812.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hourglass52_8xb32-210e_mpii-384x384.py - In Collection: Hourglass - Metadata: - Architecture: *id001 - Training Data: MPII - Name: td-hm_hourglass52_8xb32-210e_mpii-384x384 - Results: - - Dataset: MPII - Metrics: - Mean: 0.894 - Mean@0.1: 0.367 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/hourglass/hourglass52_mpii_384x384-04090bc3_20200812.pth +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hourglass52_8xb64-210e_mpii-256x256.py + In Collection: Hourglass + Metadata: + Architecture: &id001 + - Hourglass + Training Data: MPII + Name: td-hm_hourglass52_8xb64-210e_mpii-256x256 + Results: + - Dataset: MPII + Metrics: + Mean: 0.889 + Mean@0.1: 0.317 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/hourglass/hourglass52_mpii_256x256-ae358435_20200812.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hourglass52_8xb32-210e_mpii-384x384.py + In Collection: Hourglass + Metadata: + Architecture: *id001 + Training Data: MPII + Name: td-hm_hourglass52_8xb32-210e_mpii-384x384 + Results: + - Dataset: MPII + Metrics: + Mean: 0.894 + Mean@0.1: 0.367 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/hourglass/hourglass52_mpii_384x384-04090bc3_20200812.pth diff --git a/configs/body_2d_keypoint/topdown_heatmap/mpii/hrnet_dark_mpii.md b/configs/body_2d_keypoint/topdown_heatmap/mpii/hrnet_dark_mpii.md index a03a96ba2e..5a089f2220 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/mpii/hrnet_dark_mpii.md +++ b/configs/body_2d_keypoint/topdown_heatmap/mpii/hrnet_dark_mpii.md @@ -1,57 +1,57 @@ - - -
-HRNet (CVPR'2019) - -```bibtex -@inproceedings{sun2019deep, - title={Deep high-resolution representation learning for human pose estimation}, - author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={5693--5703}, - year={2019} -} -``` - -
- - - -
-DarkPose (CVPR'2020) - -```bibtex -@inproceedings{zhang2020distribution, - title={Distribution-aware coordinate representation for human pose estimation}, - author={Zhang, Feng and Zhu, Xiatian and Dai, Hanbin and Ye, Mao and Zhu, Ce}, - booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, - pages={7093--7102}, - year={2020} -} -``` - -
- - - -
-MPII (CVPR'2014) - -```bibtex -@inproceedings{andriluka14cvpr, - author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, - title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, - booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, - year = {2014}, - month = {June} -} -``` - -
- -Results on MPII val set - -| Arch | Input Size | Mean | Mean@0.1 | ckpt | log | -| :---------------------------------------------------------- | :--------: | :---: | :------: | :---------------------------------------------------------: | :---------------------------------------------------------: | -| [pose_hrnet_w32_dark](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hrnet-w32_dark-8xb64-210e_mpii-256x256.py) | 256x256 | 0.904 | 0.354 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_mpii_256x256_dark-f1601c5b_20200927.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_mpii_256x256_dark_20200927.log.json) | -| [pose_hrnet_w48_dark](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hrnet-w48_dark-8xb64-210e_mpii-256x256.py) | 256x256 | 0.905 | 0.360 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_mpii_256x256_dark-0decd39f_20200927.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_mpii_256x256_dark_20200927.log.json) | + + +
+HRNet (CVPR'2019) + +```bibtex +@inproceedings{sun2019deep, + title={Deep high-resolution representation learning for human pose estimation}, + author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={5693--5703}, + year={2019} +} +``` + +
+ + + +
+DarkPose (CVPR'2020) + +```bibtex +@inproceedings{zhang2020distribution, + title={Distribution-aware coordinate representation for human pose estimation}, + author={Zhang, Feng and Zhu, Xiatian and Dai, Hanbin and Ye, Mao and Zhu, Ce}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={7093--7102}, + year={2020} +} +``` + +
+ + + +
+MPII (CVPR'2014) + +```bibtex +@inproceedings{andriluka14cvpr, + author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, + title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, + booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + year = {2014}, + month = {June} +} +``` + +
+ +Results on MPII val set + +| Arch | Input Size | Mean | Mean@0.1 | ckpt | log | +| :---------------------------------------------------------- | :--------: | :---: | :------: | :---------------------------------------------------------: | :---------------------------------------------------------: | +| [pose_hrnet_w32_dark](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hrnet-w32_dark-8xb64-210e_mpii-256x256.py) | 256x256 | 0.904 | 0.354 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_mpii_256x256_dark-f1601c5b_20200927.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_mpii_256x256_dark_20200927.log.json) | +| [pose_hrnet_w48_dark](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hrnet-w48_dark-8xb64-210e_mpii-256x256.py) | 256x256 | 0.905 | 0.360 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_mpii_256x256_dark-0decd39f_20200927.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_mpii_256x256_dark_20200927.log.json) | diff --git a/configs/body_2d_keypoint/topdown_heatmap/mpii/hrnet_dark_mpii.yml b/configs/body_2d_keypoint/topdown_heatmap/mpii/hrnet_dark_mpii.yml index 0283b5c827..1f19ecf817 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/mpii/hrnet_dark_mpii.yml +++ b/configs/body_2d_keypoint/topdown_heatmap/mpii/hrnet_dark_mpii.yml @@ -1,29 +1,29 @@ -Models: -- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hrnet-w32_dark-8xb64-210e_mpii-256x256.py - In Collection: DarkPose - Metadata: - Architecture: &id001 - - HRNet - - DarkPose - Training Data: MPII - Name: td-hm_hrnet-w32_dark-8xb64-210e_mpii-256x256 - Results: - - Dataset: MPII - Metrics: - Mean: 0.904 - Mean@0.1: 0.354 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_mpii_256x256_dark-f1601c5b_20200927.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hrnet-w48_dark-8xb64-210e_mpii-256x256.py - In Collection: DarkPose - Metadata: - Architecture: *id001 - Training Data: MPII - Name: td-hm_hrnet-w48_dark-8xb64-210e_mpii-256x256 - Results: - - Dataset: MPII - Metrics: - Mean: 0.905 - Mean@0.1: 0.36 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_mpii_256x256_dark-0decd39f_20200927.pth +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hrnet-w32_dark-8xb64-210e_mpii-256x256.py + In Collection: DarkPose + Metadata: + Architecture: &id001 + - HRNet + - DarkPose + Training Data: MPII + Name: td-hm_hrnet-w32_dark-8xb64-210e_mpii-256x256 + Results: + - Dataset: MPII + Metrics: + Mean: 0.904 + Mean@0.1: 0.354 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_mpii_256x256_dark-f1601c5b_20200927.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hrnet-w48_dark-8xb64-210e_mpii-256x256.py + In Collection: DarkPose + Metadata: + Architecture: *id001 + Training Data: MPII + Name: td-hm_hrnet-w48_dark-8xb64-210e_mpii-256x256 + Results: + - Dataset: MPII + Metrics: + Mean: 0.905 + Mean@0.1: 0.36 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_mpii_256x256_dark-0decd39f_20200927.pth diff --git a/configs/body_2d_keypoint/topdown_heatmap/mpii/hrnet_mpii.md b/configs/body_2d_keypoint/topdown_heatmap/mpii/hrnet_mpii.md index 7e8a69f64f..c8ea9e35f1 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/mpii/hrnet_mpii.md +++ b/configs/body_2d_keypoint/topdown_heatmap/mpii/hrnet_mpii.md @@ -1,40 +1,40 @@ - - -
-HRNet (CVPR'2019) - -```bibtex -@inproceedings{sun2019deep, - title={Deep high-resolution representation learning for human pose estimation}, - author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={5693--5703}, - year={2019} -} -``` - -
- - - -
-MPII (CVPR'2014) - -```bibtex -@inproceedings{andriluka14cvpr, - author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, - title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, - booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, - year = {2014}, - month = {June} -} -``` - -
- -Results on MPII val set - -| Arch | Input Size | Mean | Mean@0.1 | ckpt | log | -| :---------------------------------------------------------- | :--------: | :---: | :------: | :---------------------------------------------------------: | :---------------------------------------------------------: | -| [pose_hrnet_w32](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hrnet-w32_8xb64-210e_mpii-256x256.py) | 256x256 | 0.900 | 0.334 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_mpii_256x256-6c4f923f_20200812.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_mpii_256x256_20200812.log.json) | -| [pose_hrnet_w48](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hrnet-w48_8xb64-210e_mpii-256x256.py) | 256x256 | 0.901 | 0.337 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_mpii_256x256-92cab7bd_20200812.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_mpii_256x256_20200812.log.json) | + + +
+HRNet (CVPR'2019) + +```bibtex +@inproceedings{sun2019deep, + title={Deep high-resolution representation learning for human pose estimation}, + author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={5693--5703}, + year={2019} +} +``` + +
+ + + +
+MPII (CVPR'2014) + +```bibtex +@inproceedings{andriluka14cvpr, + author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, + title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, + booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + year = {2014}, + month = {June} +} +``` + +
+ +Results on MPII val set + +| Arch | Input Size | Mean | Mean@0.1 | ckpt | log | +| :---------------------------------------------------------- | :--------: | :---: | :------: | :---------------------------------------------------------: | :---------------------------------------------------------: | +| [pose_hrnet_w32](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hrnet-w32_8xb64-210e_mpii-256x256.py) | 256x256 | 0.900 | 0.334 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_mpii_256x256-6c4f923f_20200812.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_mpii_256x256_20200812.log.json) | +| [pose_hrnet_w48](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hrnet-w48_8xb64-210e_mpii-256x256.py) | 256x256 | 0.901 | 0.337 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_mpii_256x256-92cab7bd_20200812.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_mpii_256x256_20200812.log.json) | diff --git a/configs/body_2d_keypoint/topdown_heatmap/mpii/hrnet_mpii.yml b/configs/body_2d_keypoint/topdown_heatmap/mpii/hrnet_mpii.yml index f32129742d..b2ead58437 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/mpii/hrnet_mpii.yml +++ b/configs/body_2d_keypoint/topdown_heatmap/mpii/hrnet_mpii.yml @@ -1,28 +1,28 @@ -Models: -- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hrnet-w32_8xb64-210e_mpii-256x256.py - In Collection: HRNet - Metadata: - Architecture: &id001 - - HRNet - Training Data: MPII - Name: td-hm_hrnet-w32_8xb64-210e_mpii-256x256 - Results: - - Dataset: MPII - Metrics: - Mean: 0.9 - Mean@0.1: 0.334 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_mpii_256x256-6c4f923f_20200812.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hrnet-w48_8xb64-210e_mpii-256x256.py - In Collection: HRNet - Metadata: - Architecture: *id001 - Training Data: MPII - Name: td-hm_hrnet-w48_8xb64-210e_mpii-256x256 - Results: - - Dataset: MPII - Metrics: - Mean: 0.901 - Mean@0.1: 0.337 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_mpii_256x256-92cab7bd_20200812.pth +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hrnet-w32_8xb64-210e_mpii-256x256.py + In Collection: HRNet + Metadata: + Architecture: &id001 + - HRNet + Training Data: MPII + Name: td-hm_hrnet-w32_8xb64-210e_mpii-256x256 + Results: + - Dataset: MPII + Metrics: + Mean: 0.9 + Mean@0.1: 0.334 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_mpii_256x256-6c4f923f_20200812.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hrnet-w48_8xb64-210e_mpii-256x256.py + In Collection: HRNet + Metadata: + Architecture: *id001 + Training Data: MPII + Name: td-hm_hrnet-w48_8xb64-210e_mpii-256x256 + Results: + - Dataset: MPII + Metrics: + Mean: 0.901 + Mean@0.1: 0.337 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_mpii_256x256-92cab7bd_20200812.pth diff --git a/configs/body_2d_keypoint/topdown_heatmap/mpii/litehrnet_mpii.md b/configs/body_2d_keypoint/topdown_heatmap/mpii/litehrnet_mpii.md index e664756950..21211e6302 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/mpii/litehrnet_mpii.md +++ b/configs/body_2d_keypoint/topdown_heatmap/mpii/litehrnet_mpii.md @@ -1,39 +1,39 @@ - - -
-LiteHRNet (CVPR'2021) - -```bibtex -@inproceedings{Yulitehrnet21, - title={Lite-HRNet: A Lightweight High-Resolution Network}, - author={Yu, Changqian and Xiao, Bin and Gao, Changxin and Yuan, Lu and Zhang, Lei and Sang, Nong and Wang, Jingdong}, - booktitle={CVPR}, - year={2021} -} -``` - -
- - - -
-MPII (CVPR'2014) - -```bibtex -@inproceedings{andriluka14cvpr, - author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, - title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, - booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, - year = {2014}, - month = {June} -} -``` - -
- -Results on MPII val set - -| Arch | Input Size | Mean | Mean@0.1 | ckpt | log | -| :---------------------------------------------------------- | :--------: | :---: | :------: | :---------------------------------------------------------: | :---------------------------------------------------------: | -| [LiteHRNet-18](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_litehrnet-18_8xb64-210e_mpii-256x256.py) | 256x256 | 0.859 | 0.260 | [ckpt](https://download.openmmlab.com/mmpose/top_down/litehrnet/litehrnet18_mpii_256x256-cabd7984_20210623.pth) | [log](https://download.openmmlab.com/mmpose/top_down/litehrnet/litehrnet18_mpii_256x256_20210623.log.json) | -| [LiteHRNet-30](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_litehrnet-30_8xb64-210e_mpii-256x256.py) | 256x256 | 0.869 | 0.271 | [ckpt](https://download.openmmlab.com/mmpose/top_down/litehrnet/litehrnet30_mpii_256x256-faae8bd8_20210622.pth) | [log](https://download.openmmlab.com/mmpose/top_down/litehrnet/litehrnet30_mpii_256x256_20210622.log.json) | + + +
+LiteHRNet (CVPR'2021) + +```bibtex +@inproceedings{Yulitehrnet21, + title={Lite-HRNet: A Lightweight High-Resolution Network}, + author={Yu, Changqian and Xiao, Bin and Gao, Changxin and Yuan, Lu and Zhang, Lei and Sang, Nong and Wang, Jingdong}, + booktitle={CVPR}, + year={2021} +} +``` + +
+ + + +
+MPII (CVPR'2014) + +```bibtex +@inproceedings{andriluka14cvpr, + author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, + title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, + booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + year = {2014}, + month = {June} +} +``` + +
+ +Results on MPII val set + +| Arch | Input Size | Mean | Mean@0.1 | ckpt | log | +| :---------------------------------------------------------- | :--------: | :---: | :------: | :---------------------------------------------------------: | :---------------------------------------------------------: | +| [LiteHRNet-18](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_litehrnet-18_8xb64-210e_mpii-256x256.py) | 256x256 | 0.859 | 0.260 | [ckpt](https://download.openmmlab.com/mmpose/top_down/litehrnet/litehrnet18_mpii_256x256-cabd7984_20210623.pth) | [log](https://download.openmmlab.com/mmpose/top_down/litehrnet/litehrnet18_mpii_256x256_20210623.log.json) | +| [LiteHRNet-30](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_litehrnet-30_8xb64-210e_mpii-256x256.py) | 256x256 | 0.869 | 0.271 | [ckpt](https://download.openmmlab.com/mmpose/top_down/litehrnet/litehrnet30_mpii_256x256-faae8bd8_20210622.pth) | [log](https://download.openmmlab.com/mmpose/top_down/litehrnet/litehrnet30_mpii_256x256_20210622.log.json) | diff --git a/configs/body_2d_keypoint/topdown_heatmap/mpii/litehrnet_mpii.yml b/configs/body_2d_keypoint/topdown_heatmap/mpii/litehrnet_mpii.yml index c4314b7a74..940eaf69ce 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/mpii/litehrnet_mpii.yml +++ b/configs/body_2d_keypoint/topdown_heatmap/mpii/litehrnet_mpii.yml @@ -1,28 +1,28 @@ -Models: -- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_litehrnet-18_8xb64-210e_mpii-256x256.py - In Collection: LiteHRNet - Metadata: - Architecture: &id001 - - LiteHRNet - Training Data: MPII - Name: td-hm_litehrnet-18_8xb64-210e_mpii-256x256 - Results: - - Dataset: MPII - Metrics: - Mean: 0.859 - Mean@0.1: 0.26 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/litehrnet/litehrnet18_mpii_256x256-cabd7984_20210623.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_litehrnet-30_8xb64-210e_mpii-256x256.py - In Collection: LiteHRNet - Metadata: - Architecture: *id001 - Training Data: MPII - Name: td-hm_litehrnet-30_8xb64-210e_mpii-256x256 - Results: - - Dataset: MPII - Metrics: - Mean: 0.869 - Mean@0.1: 0.271 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/litehrnet/litehrnet30_mpii_256x256-faae8bd8_20210622.pth +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_litehrnet-18_8xb64-210e_mpii-256x256.py + In Collection: LiteHRNet + Metadata: + Architecture: &id001 + - LiteHRNet + Training Data: MPII + Name: td-hm_litehrnet-18_8xb64-210e_mpii-256x256 + Results: + - Dataset: MPII + Metrics: + Mean: 0.859 + Mean@0.1: 0.26 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/litehrnet/litehrnet18_mpii_256x256-cabd7984_20210623.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_litehrnet-30_8xb64-210e_mpii-256x256.py + In Collection: LiteHRNet + Metadata: + Architecture: *id001 + Training Data: MPII + Name: td-hm_litehrnet-30_8xb64-210e_mpii-256x256 + Results: + - Dataset: MPII + Metrics: + Mean: 0.869 + Mean@0.1: 0.271 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/litehrnet/litehrnet30_mpii_256x256-faae8bd8_20210622.pth diff --git a/configs/body_2d_keypoint/topdown_heatmap/mpii/mobilenetv2_mpii.md b/configs/body_2d_keypoint/topdown_heatmap/mpii/mobilenetv2_mpii.md index 8bb280a821..6343855907 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/mpii/mobilenetv2_mpii.md +++ b/configs/body_2d_keypoint/topdown_heatmap/mpii/mobilenetv2_mpii.md @@ -1,39 +1,39 @@ - - -
-MobilenetV2 (CVPR'2018) - -```bibtex -@inproceedings{sandler2018mobilenetv2, - title={Mobilenetv2: Inverted residuals and linear bottlenecks}, - author={Sandler, Mark and Howard, Andrew and Zhu, Menglong and Zhmoginov, Andrey and Chen, Liang-Chieh}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={4510--4520}, - year={2018} -} -``` - -
- - - -
-MPII (CVPR'2014) - -```bibtex -@inproceedings{andriluka14cvpr, - author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, - title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, - booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, - year = {2014}, - month = {June} -} -``` - -
- -Results on MPII val set - -| Arch | Input Size | Mean | Mean@0.1 | ckpt | log | -| :---------------------------------------------------------- | :--------: | :---: | :------: | :---------------------------------------------------------: | :---------------------------------------------------------: | -| [pose_mobilenetv2](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_mobilenetv2_8xb64-210e_mpii-256x256.py) | 256x256 | 0.854 | 0.234 | [ckpt](https://download.openmmlab.com/mmpose/top_down/mobilenetv2/mobilenetv2_mpii_256x256-e068afa7_20200812.pth) | [log](https://download.openmmlab.com/mmpose/top_down/mobilenetv2/mobilenetv2_mpii_256x256_20200812.log.json) | + + +
+MobilenetV2 (CVPR'2018) + +```bibtex +@inproceedings{sandler2018mobilenetv2, + title={Mobilenetv2: Inverted residuals and linear bottlenecks}, + author={Sandler, Mark and Howard, Andrew and Zhu, Menglong and Zhmoginov, Andrey and Chen, Liang-Chieh}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={4510--4520}, + year={2018} +} +``` + +
+ + + +
+MPII (CVPR'2014) + +```bibtex +@inproceedings{andriluka14cvpr, + author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, + title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, + booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + year = {2014}, + month = {June} +} +``` + +
+ +Results on MPII val set + +| Arch | Input Size | Mean | Mean@0.1 | ckpt | log | +| :---------------------------------------------------------- | :--------: | :---: | :------: | :---------------------------------------------------------: | :---------------------------------------------------------: | +| [pose_mobilenetv2](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_mobilenetv2_8xb64-210e_mpii-256x256.py) | 256x256 | 0.854 | 0.234 | [ckpt](https://download.openmmlab.com/mmpose/top_down/mobilenetv2/mobilenetv2_mpii_256x256-e068afa7_20200812.pth) | [log](https://download.openmmlab.com/mmpose/top_down/mobilenetv2/mobilenetv2_mpii_256x256_20200812.log.json) | diff --git a/configs/body_2d_keypoint/topdown_heatmap/mpii/mobilenetv2_mpii.yml b/configs/body_2d_keypoint/topdown_heatmap/mpii/mobilenetv2_mpii.yml index afc54f7934..09d65dd7da 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/mpii/mobilenetv2_mpii.yml +++ b/configs/body_2d_keypoint/topdown_heatmap/mpii/mobilenetv2_mpii.yml @@ -1,16 +1,16 @@ -Models: -- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_mobilenetv2_8xb64-210e_mpii-256x256.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: - - SimpleBaseline2D - - MobilenetV2 - Training Data: MPII - Name: td-hm_mobilenetv2_8xb64-210e_mpii-256x256 - Results: - - Dataset: MPII - Metrics: - Mean: 0.854 - Mean@0.1: 0.234 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/mobilenetv2/mobilenetv2_mpii_256x256-e068afa7_20200812.pth +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_mobilenetv2_8xb64-210e_mpii-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: + - SimpleBaseline2D + - MobilenetV2 + Training Data: MPII + Name: td-hm_mobilenetv2_8xb64-210e_mpii-256x256 + Results: + - Dataset: MPII + Metrics: + Mean: 0.854 + Mean@0.1: 0.234 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/mobilenetv2/mobilenetv2_mpii_256x256-e068afa7_20200812.pth diff --git a/configs/body_2d_keypoint/topdown_heatmap/mpii/resnet_mpii.md b/configs/body_2d_keypoint/topdown_heatmap/mpii/resnet_mpii.md index b8d98c4d6e..790746bd9a 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/mpii/resnet_mpii.md +++ b/configs/body_2d_keypoint/topdown_heatmap/mpii/resnet_mpii.md @@ -1,58 +1,58 @@ - - -
-SimpleBaseline2D (ECCV'2018) - -```bibtex -@inproceedings{xiao2018simple, - title={Simple baselines for human pose estimation and tracking}, - author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, - booktitle={Proceedings of the European conference on computer vision (ECCV)}, - pages={466--481}, - year={2018} -} -``` - -
- - - -
-ResNet (CVPR'2016) - -```bibtex -@inproceedings{he2016deep, - title={Deep residual learning for image recognition}, - author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={770--778}, - year={2016} -} -``` - -
- - - -
-MPII (CVPR'2014) - -```bibtex -@inproceedings{andriluka14cvpr, - author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, - title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, - booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, - year = {2014}, - month = {June} -} -``` - -
- -Results on MPII val set - -| Arch | Input Size | Mean | Mean@0.1 | ckpt | log | -| :---------------------------------------------------------- | :--------: | :---: | :------: | :---------------------------------------------------------: | :---------------------------------------------------------: | -| [pose_resnet_50](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_res50_8xb64-210e_mpii-256x256.py) | 256x256 | 0.882 | 0.286 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res50_mpii_256x256-418ffc88_20200812.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res50_mpii_256x256_20200812.log.json) | -| [pose_resnet_101](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_res101_8xb64-210e_mpii-256x256.py) | 256x256 | 0.888 | 0.290 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res101_mpii_256x256-416f5d71_20200812.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res101_mpii_256x256_20200812.log.json) | -| [pose_resnet_152](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_res152_8xb32-210e_mpii-256x256.py) | 256x256 | 0.889 | 0.303 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res152_mpii_256x256-3ecba29d_20200812.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res152_mpii_256x256_20200812.log.json) | + + +
+SimpleBaseline2D (ECCV'2018) + +```bibtex +@inproceedings{xiao2018simple, + title={Simple baselines for human pose estimation and tracking}, + author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, + booktitle={Proceedings of the European conference on computer vision (ECCV)}, + pages={466--481}, + year={2018} +} +``` + +
+ + + +
+ResNet (CVPR'2016) + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +
+ + + +
+MPII (CVPR'2014) + +```bibtex +@inproceedings{andriluka14cvpr, + author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, + title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, + booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + year = {2014}, + month = {June} +} +``` + +
+ +Results on MPII val set + +| Arch | Input Size | Mean | Mean@0.1 | ckpt | log | +| :---------------------------------------------------------- | :--------: | :---: | :------: | :---------------------------------------------------------: | :---------------------------------------------------------: | +| [pose_resnet_50](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_res50_8xb64-210e_mpii-256x256.py) | 256x256 | 0.882 | 0.286 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res50_mpii_256x256-418ffc88_20200812.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res50_mpii_256x256_20200812.log.json) | +| [pose_resnet_101](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_res101_8xb64-210e_mpii-256x256.py) | 256x256 | 0.888 | 0.290 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res101_mpii_256x256-416f5d71_20200812.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res101_mpii_256x256_20200812.log.json) | +| [pose_resnet_152](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_res152_8xb32-210e_mpii-256x256.py) | 256x256 | 0.889 | 0.303 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res152_mpii_256x256-3ecba29d_20200812.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res152_mpii_256x256_20200812.log.json) | diff --git a/configs/body_2d_keypoint/topdown_heatmap/mpii/resnet_mpii.yml b/configs/body_2d_keypoint/topdown_heatmap/mpii/resnet_mpii.yml index ff92c4f7ce..14ae9106b5 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/mpii/resnet_mpii.yml +++ b/configs/body_2d_keypoint/topdown_heatmap/mpii/resnet_mpii.yml @@ -1,42 +1,42 @@ -Models: -- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_res50_8xb64-210e_mpii-256x256.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: &id001 - - SimpleBaseline2D - - ResNet - Training Data: MPII - Name: td-hm_res50_8xb64-210e_mpii-256x256 - Results: - - Dataset: MPII - Metrics: - Mean: 0.882 - Mean@0.1: 0.286 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res50_mpii_256x256-418ffc88_20200812.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_res101_8xb64-210e_mpii-256x256.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: MPII - Name: td-hm_res101_8xb64-210e_mpii-256x256 - Results: - - Dataset: MPII - Metrics: - Mean: 0.888 - Mean@0.1: 0.29 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res101_mpii_256x256-416f5d71_20200812.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_res152_8xb32-210e_mpii-256x256.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: MPII - Name: td-hm_res152_8xb32-210e_mpii-256x256 - Results: - - Dataset: MPII - Metrics: - Mean: 0.889 - Mean@0.1: 0.303 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res152_mpii_256x256-3ecba29d_20200812.pth +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_res50_8xb64-210e_mpii-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: &id001 + - SimpleBaseline2D + - ResNet + Training Data: MPII + Name: td-hm_res50_8xb64-210e_mpii-256x256 + Results: + - Dataset: MPII + Metrics: + Mean: 0.882 + Mean@0.1: 0.286 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res50_mpii_256x256-418ffc88_20200812.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_res101_8xb64-210e_mpii-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: MPII + Name: td-hm_res101_8xb64-210e_mpii-256x256 + Results: + - Dataset: MPII + Metrics: + Mean: 0.888 + Mean@0.1: 0.29 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res101_mpii_256x256-416f5d71_20200812.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_res152_8xb32-210e_mpii-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: MPII + Name: td-hm_res152_8xb32-210e_mpii-256x256 + Results: + - Dataset: MPII + Metrics: + Mean: 0.889 + Mean@0.1: 0.303 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res152_mpii_256x256-3ecba29d_20200812.pth diff --git a/configs/body_2d_keypoint/topdown_heatmap/mpii/resnetv1d_mpii.md b/configs/body_2d_keypoint/topdown_heatmap/mpii/resnetv1d_mpii.md index 2336265098..09ffe4266a 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/mpii/resnetv1d_mpii.md +++ b/configs/body_2d_keypoint/topdown_heatmap/mpii/resnetv1d_mpii.md @@ -1,41 +1,41 @@ - - -
-ResNetV1D (CVPR'2019) - -```bibtex -@inproceedings{he2019bag, - title={Bag of tricks for image classification with convolutional neural networks}, - author={He, Tong and Zhang, Zhi and Zhang, Hang and Zhang, Zhongyue and Xie, Junyuan and Li, Mu}, - booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, - pages={558--567}, - year={2019} -} -``` - -
- - - -
-MPII (CVPR'2014) - -```bibtex -@inproceedings{andriluka14cvpr, - author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, - title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, - booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, - year = {2014}, - month = {June} -} -``` - -
- -Results on MPII val set - -| Arch | Input Size | Mean | Mean@0.1 | ckpt | log | -| :---------------------------------------------------------- | :--------: | :---: | :------: | :---------------------------------------------------------: | :---------------------------------------------------------: | -| [pose_resnetv1d_50](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_resnetv1d50_8xb64-210e_mpii-256x256.py) | 256x256 | 0.881 | 0.290 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnetv1d/resnetv1d50_mpii_256x256-2337a92e_20200812.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnetv1d/resnetv1d50_mpii_256x256_20200812.log.json) | -| [pose_resnetv1d_101](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_resnetv1d101_8xb64-210e_mpii-256x256.py) | 256x256 | 0.883 | 0.295 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnetv1d/resnetv1d101_mpii_256x256-2851d710_20200812.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnetv1d/resnetv1d101_mpii_256x256_20200812.log.json) | -| [pose_resnetv1d_152](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_resnetv1d152_8xb64-210e_mpii-256x256.py) | 256x256 | 0.888 | 0.300 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnetv1d/resnetv1d152_mpii_256x256-8b10a87c_20200812.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnetv1d/resnetv1d152_mpii_256x256_20200812.log.json) | + + +
+ResNetV1D (CVPR'2019) + +```bibtex +@inproceedings{he2019bag, + title={Bag of tricks for image classification with convolutional neural networks}, + author={He, Tong and Zhang, Zhi and Zhang, Hang and Zhang, Zhongyue and Xie, Junyuan and Li, Mu}, + booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, + pages={558--567}, + year={2019} +} +``` + +
+ + + +
+MPII (CVPR'2014) + +```bibtex +@inproceedings{andriluka14cvpr, + author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, + title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, + booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + year = {2014}, + month = {June} +} +``` + +
+ +Results on MPII val set + +| Arch | Input Size | Mean | Mean@0.1 | ckpt | log | +| :---------------------------------------------------------- | :--------: | :---: | :------: | :---------------------------------------------------------: | :---------------------------------------------------------: | +| [pose_resnetv1d_50](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_resnetv1d50_8xb64-210e_mpii-256x256.py) | 256x256 | 0.881 | 0.290 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnetv1d/resnetv1d50_mpii_256x256-2337a92e_20200812.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnetv1d/resnetv1d50_mpii_256x256_20200812.log.json) | +| [pose_resnetv1d_101](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_resnetv1d101_8xb64-210e_mpii-256x256.py) | 256x256 | 0.883 | 0.295 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnetv1d/resnetv1d101_mpii_256x256-2851d710_20200812.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnetv1d/resnetv1d101_mpii_256x256_20200812.log.json) | +| [pose_resnetv1d_152](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_resnetv1d152_8xb64-210e_mpii-256x256.py) | 256x256 | 0.888 | 0.300 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnetv1d/resnetv1d152_mpii_256x256-8b10a87c_20200812.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnetv1d/resnetv1d152_mpii_256x256_20200812.log.json) | diff --git a/configs/body_2d_keypoint/topdown_heatmap/mpii/resnetv1d_mpii.yml b/configs/body_2d_keypoint/topdown_heatmap/mpii/resnetv1d_mpii.yml index e98e722db1..b6c902d592 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/mpii/resnetv1d_mpii.yml +++ b/configs/body_2d_keypoint/topdown_heatmap/mpii/resnetv1d_mpii.yml @@ -1,42 +1,42 @@ -Models: -- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_resnetv1d50_8xb64-210e_mpii-256x256.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: &id001 - - SimpleBaseline2D - - ResNetV1D - Training Data: MPII - Name: td-hm_resnetv1d50_8xb64-210e_mpii-256x256 - Results: - - Dataset: MPII - Metrics: - Mean: 0.881 - Mean@0.1: 0.29 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/resnetv1d/resnetv1d50_mpii_256x256-2337a92e_20200812.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_resnetv1d101_8xb64-210e_mpii-256x256.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: MPII - Name: td-hm_resnetv1d101_8xb64-210e_mpii-256x256 - Results: - - Dataset: MPII - Metrics: - Mean: 0.883 - Mean@0.1: 0.295 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/resnetv1d/resnetv1d101_mpii_256x256-2851d710_20200812.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_resnetv1d152_8xb64-210e_mpii-256x256.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: MPII - Name: td-hm_resnetv1d152_8xb64-210e_mpii-256x256 - Results: - - Dataset: MPII - Metrics: - Mean: 0.888 - Mean@0.1: 0.3 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/resnetv1d/resnetv1d152_mpii_256x256-8b10a87c_20200812.pth +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_resnetv1d50_8xb64-210e_mpii-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: &id001 + - SimpleBaseline2D + - ResNetV1D + Training Data: MPII + Name: td-hm_resnetv1d50_8xb64-210e_mpii-256x256 + Results: + - Dataset: MPII + Metrics: + Mean: 0.881 + Mean@0.1: 0.29 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnetv1d/resnetv1d50_mpii_256x256-2337a92e_20200812.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_resnetv1d101_8xb64-210e_mpii-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: MPII + Name: td-hm_resnetv1d101_8xb64-210e_mpii-256x256 + Results: + - Dataset: MPII + Metrics: + Mean: 0.883 + Mean@0.1: 0.295 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnetv1d/resnetv1d101_mpii_256x256-2851d710_20200812.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_resnetv1d152_8xb64-210e_mpii-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: MPII + Name: td-hm_resnetv1d152_8xb64-210e_mpii-256x256 + Results: + - Dataset: MPII + Metrics: + Mean: 0.888 + Mean@0.1: 0.3 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnetv1d/resnetv1d152_mpii_256x256-8b10a87c_20200812.pth diff --git a/configs/body_2d_keypoint/topdown_heatmap/mpii/resnext_mpii.md b/configs/body_2d_keypoint/topdown_heatmap/mpii/resnext_mpii.md index bf9d5acf8c..64eb48369c 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/mpii/resnext_mpii.md +++ b/configs/body_2d_keypoint/topdown_heatmap/mpii/resnext_mpii.md @@ -1,39 +1,39 @@ - - -
-ResNext (CVPR'2017) - -```bibtex -@inproceedings{xie2017aggregated, - title={Aggregated residual transformations for deep neural networks}, - author={Xie, Saining and Girshick, Ross and Doll{\'a}r, Piotr and Tu, Zhuowen and He, Kaiming}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={1492--1500}, - year={2017} -} -``` - -
- - - -
-MPII (CVPR'2014) - -```bibtex -@inproceedings{andriluka14cvpr, - author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, - title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, - booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, - year = {2014}, - month = {June} -} -``` - -
- -Results on MPII val set - -| Arch | Input Size | Mean | Mean@0.1 | ckpt | log | -| :---------------------------------------------------------- | :--------: | :---: | :------: | :---------------------------------------------------------: | :---------------------------------------------------------: | -| [pose_resnext_152](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_resnext152_8xb32-210e_mpii-256x256.py) | 256x256 | 0.887 | 0.294 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnext/resnext152_mpii_256x256-df302719_20200927.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnext/resnext152_mpii_256x256_20200927.log.json) | + + +
+ResNext (CVPR'2017) + +```bibtex +@inproceedings{xie2017aggregated, + title={Aggregated residual transformations for deep neural networks}, + author={Xie, Saining and Girshick, Ross and Doll{\'a}r, Piotr and Tu, Zhuowen and He, Kaiming}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={1492--1500}, + year={2017} +} +``` + +
+ + + +
+MPII (CVPR'2014) + +```bibtex +@inproceedings{andriluka14cvpr, + author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, + title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, + booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + year = {2014}, + month = {June} +} +``` + +
+ +Results on MPII val set + +| Arch | Input Size | Mean | Mean@0.1 | ckpt | log | +| :---------------------------------------------------------- | :--------: | :---: | :------: | :---------------------------------------------------------: | :---------------------------------------------------------: | +| [pose_resnext_152](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_resnext152_8xb32-210e_mpii-256x256.py) | 256x256 | 0.887 | 0.294 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnext/resnext152_mpii_256x256-df302719_20200927.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnext/resnext152_mpii_256x256_20200927.log.json) | diff --git a/configs/body_2d_keypoint/topdown_heatmap/mpii/resnext_mpii.yml b/configs/body_2d_keypoint/topdown_heatmap/mpii/resnext_mpii.yml index 580dda77b0..feb338efd3 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/mpii/resnext_mpii.yml +++ b/configs/body_2d_keypoint/topdown_heatmap/mpii/resnext_mpii.yml @@ -1,16 +1,16 @@ -Models: -- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_resnext152_8xb32-210e_mpii-256x256.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: - - SimpleBaseline2D - - ResNext - Training Data: MPII - Name: td-hm_resnext152_8xb32-210e_mpii-256x256 - Results: - - Dataset: MPII - Metrics: - Mean: 0.887 - Mean@0.1: 0.294 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/resnext/resnext152_mpii_256x256-df302719_20200927.pth +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_resnext152_8xb32-210e_mpii-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: + - SimpleBaseline2D + - ResNext + Training Data: MPII + Name: td-hm_resnext152_8xb32-210e_mpii-256x256 + Results: + - Dataset: MPII + Metrics: + Mean: 0.887 + Mean@0.1: 0.294 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnext/resnext152_mpii_256x256-df302719_20200927.pth diff --git a/configs/body_2d_keypoint/topdown_heatmap/mpii/scnet_mpii.md b/configs/body_2d_keypoint/topdown_heatmap/mpii/scnet_mpii.md index cf0e4befff..eaa8a642af 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/mpii/scnet_mpii.md +++ b/configs/body_2d_keypoint/topdown_heatmap/mpii/scnet_mpii.md @@ -1,40 +1,40 @@ - - -
-SCNet (CVPR'2020) - -```bibtex -@inproceedings{liu2020improving, - title={Improving Convolutional Networks with Self-Calibrated Convolutions}, - author={Liu, Jiang-Jiang and Hou, Qibin and Cheng, Ming-Ming and Wang, Changhu and Feng, Jiashi}, - booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, - pages={10096--10105}, - year={2020} -} -``` - -
- - - -
-MPII (CVPR'2014) - -```bibtex -@inproceedings{andriluka14cvpr, - author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, - title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, - booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, - year = {2014}, - month = {June} -} -``` - -
- -Results on MPII val set - -| Arch | Input Size | Mean | Mean@0.1 | ckpt | log | -| :---------------------------------------------------------- | :--------: | :---: | :------: | :---------------------------------------------------------: | :---------------------------------------------------------: | -| [pose_scnet_50](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_scnet50_8xb64-210e_mpii-256x256.py) | 256x256 | 0.888 | 0.290 | [ckpt](https://download.openmmlab.com/mmpose/top_down/scnet/scnet50_mpii_256x256-a54b6af5_20200812.pth) | [log](https://download.openmmlab.com/mmpose/top_down/scnet/scnet50_mpii_256x256_20200812.log.json) | -| [pose_scnet_101](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_scnet101_8xb64-210e_mpii-256x256.py) | 256x256 | 0.887 | 0.293 | [ckpt](https://download.openmmlab.com/mmpose/top_down/scnet/scnet101_mpii_256x256-b4c2d184_20200812.pth) | [log](https://download.openmmlab.com/mmpose/top_down/scnet/scnet101_mpii_256x256_20200812.log.json) | + + +
+SCNet (CVPR'2020) + +```bibtex +@inproceedings{liu2020improving, + title={Improving Convolutional Networks with Self-Calibrated Convolutions}, + author={Liu, Jiang-Jiang and Hou, Qibin and Cheng, Ming-Ming and Wang, Changhu and Feng, Jiashi}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={10096--10105}, + year={2020} +} +``` + +
+ + + +
+MPII (CVPR'2014) + +```bibtex +@inproceedings{andriluka14cvpr, + author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, + title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, + booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + year = {2014}, + month = {June} +} +``` + +
+ +Results on MPII val set + +| Arch | Input Size | Mean | Mean@0.1 | ckpt | log | +| :---------------------------------------------------------- | :--------: | :---: | :------: | :---------------------------------------------------------: | :---------------------------------------------------------: | +| [pose_scnet_50](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_scnet50_8xb64-210e_mpii-256x256.py) | 256x256 | 0.888 | 0.290 | [ckpt](https://download.openmmlab.com/mmpose/top_down/scnet/scnet50_mpii_256x256-a54b6af5_20200812.pth) | [log](https://download.openmmlab.com/mmpose/top_down/scnet/scnet50_mpii_256x256_20200812.log.json) | +| [pose_scnet_101](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_scnet101_8xb64-210e_mpii-256x256.py) | 256x256 | 0.887 | 0.293 | [ckpt](https://download.openmmlab.com/mmpose/top_down/scnet/scnet101_mpii_256x256-b4c2d184_20200812.pth) | [log](https://download.openmmlab.com/mmpose/top_down/scnet/scnet101_mpii_256x256_20200812.log.json) | diff --git a/configs/body_2d_keypoint/topdown_heatmap/mpii/scnet_mpii.yml b/configs/body_2d_keypoint/topdown_heatmap/mpii/scnet_mpii.yml index b1ec80fd80..d132448d63 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/mpii/scnet_mpii.yml +++ b/configs/body_2d_keypoint/topdown_heatmap/mpii/scnet_mpii.yml @@ -1,29 +1,29 @@ -Models: -- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_scnet50_8xb64-210e_mpii-256x256.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: &id001 - - SimpleBaseline2D - - SCNet - Training Data: MPII - Name: td-hm_scnet50_8xb64-210e_mpii-256x256 - Results: - - Dataset: MPII - Metrics: - Mean: 0.888 - Mean@0.1: 0.29 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/scnet/scnet50_mpii_256x256-a54b6af5_20200812.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_scnet101_8xb64-210e_mpii-256x256.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: MPII - Name: td-hm_scnet101_8xb64-210e_mpii-256x256 - Results: - - Dataset: MPII - Metrics: - Mean: 0.887 - Mean@0.1: 0.293 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/scnet/scnet101_mpii_256x256-b4c2d184_20200812.pth +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_scnet50_8xb64-210e_mpii-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: &id001 + - SimpleBaseline2D + - SCNet + Training Data: MPII + Name: td-hm_scnet50_8xb64-210e_mpii-256x256 + Results: + - Dataset: MPII + Metrics: + Mean: 0.888 + Mean@0.1: 0.29 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/scnet/scnet50_mpii_256x256-a54b6af5_20200812.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_scnet101_8xb64-210e_mpii-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: MPII + Name: td-hm_scnet101_8xb64-210e_mpii-256x256 + Results: + - Dataset: MPII + Metrics: + Mean: 0.887 + Mean@0.1: 0.293 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/scnet/scnet101_mpii_256x256-b4c2d184_20200812.pth diff --git a/configs/body_2d_keypoint/topdown_heatmap/mpii/seresnet_mpii.md b/configs/body_2d_keypoint/topdown_heatmap/mpii/seresnet_mpii.md index 1c92ecf9ea..812fd70a4b 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/mpii/seresnet_mpii.md +++ b/configs/body_2d_keypoint/topdown_heatmap/mpii/seresnet_mpii.md @@ -1,43 +1,43 @@ - - -
-SEResNet (CVPR'2018) - -```bibtex -@inproceedings{hu2018squeeze, - title={Squeeze-and-excitation networks}, - author={Hu, Jie and Shen, Li and Sun, Gang}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={7132--7141}, - year={2018} -} -``` - -
- - - -
-MPII (CVPR'2014) - -```bibtex -@inproceedings{andriluka14cvpr, - author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, - title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, - booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, - year = {2014}, - month = {June} -} -``` - -
- -Results on MPII val set - -| Arch | Input Size | Mean | Mean@0.1 | ckpt | log | -| :---------------------------------------------------------- | :--------: | :---: | :------: | :---------------------------------------------------------: | :---------------------------------------------------------: | -| [pose_seresnet_50](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_seresnet50_8xb64-210e_mpii-256x256.py) | 256x256 | 0.884 | 0.292 | [ckpt](https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet50_mpii_256x256-1bb21f79_20200927.pth) | [log](https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet50_mpii_256x256_20200927.log.json) | -| [pose_seresnet_101](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_seresnet101_8xb64-210e_mpii-256x256.py) | 256x256 | 0.884 | 0.295 | [ckpt](https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet101_mpii_256x256-0ba14ff5_20200927.pth) | [log](https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet101_mpii_256x256_20200927.log.json) | -| [pose_seresnet_152\*](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_seresnet152_8xb32-210e_mpii-256x256.py) | 256x256 | 0.884 | 0.287 | [ckpt](https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet152_mpii_256x256-6ea1e774_20200927.pth) | [log](https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet152_mpii_256x256_20200927.log.json) | - -Note that * means without imagenet pre-training. + + +
+SEResNet (CVPR'2018) + +```bibtex +@inproceedings{hu2018squeeze, + title={Squeeze-and-excitation networks}, + author={Hu, Jie and Shen, Li and Sun, Gang}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={7132--7141}, + year={2018} +} +``` + +
+ + + +
+MPII (CVPR'2014) + +```bibtex +@inproceedings{andriluka14cvpr, + author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, + title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, + booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + year = {2014}, + month = {June} +} +``` + +
+ +Results on MPII val set + +| Arch | Input Size | Mean | Mean@0.1 | ckpt | log | +| :---------------------------------------------------------- | :--------: | :---: | :------: | :---------------------------------------------------------: | :---------------------------------------------------------: | +| [pose_seresnet_50](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_seresnet50_8xb64-210e_mpii-256x256.py) | 256x256 | 0.884 | 0.292 | [ckpt](https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet50_mpii_256x256-1bb21f79_20200927.pth) | [log](https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet50_mpii_256x256_20200927.log.json) | +| [pose_seresnet_101](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_seresnet101_8xb64-210e_mpii-256x256.py) | 256x256 | 0.884 | 0.295 | [ckpt](https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet101_mpii_256x256-0ba14ff5_20200927.pth) | [log](https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet101_mpii_256x256_20200927.log.json) | +| [pose_seresnet_152\*](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_seresnet152_8xb32-210e_mpii-256x256.py) | 256x256 | 0.884 | 0.287 | [ckpt](https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet152_mpii_256x256-6ea1e774_20200927.pth) | [log](https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet152_mpii_256x256_20200927.log.json) | + +Note that * means without imagenet pre-training. diff --git a/configs/body_2d_keypoint/topdown_heatmap/mpii/seresnet_mpii.yml b/configs/body_2d_keypoint/topdown_heatmap/mpii/seresnet_mpii.yml index e71050811a..8d6a3e434a 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/mpii/seresnet_mpii.yml +++ b/configs/body_2d_keypoint/topdown_heatmap/mpii/seresnet_mpii.yml @@ -1,42 +1,42 @@ -Models: -- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_seresnet50_8xb64-210e_mpii-256x256.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: &id001 - - SimpleBaseline2D - - SEResNet - Training Data: MPII - Name: td-hm_seresnet50_8xb64-210e_mpii-256x256 - Results: - - Dataset: MPII - Metrics: - Mean: 0.884 - Mean@0.1: 0.292 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet50_mpii_256x256-1bb21f79_20200927.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_seresnet101_8xb64-210e_mpii-256x256.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: MPII - Name: td-hm_seresnet101_8xb64-210e_mpii-256x256 - Results: - - Dataset: MPII - Metrics: - Mean: 0.884 - Mean@0.1: 0.295 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet101_mpii_256x256-0ba14ff5_20200927.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_seresnet152_8xb32-210e_mpii-256x256.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: MPII - Name: td-hm_seresnet152_8xb32-210e_mpii-256x256 - Results: - - Dataset: MPII - Metrics: - Mean: 0.884 - Mean@0.1: 0.287 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet152_mpii_256x256-6ea1e774_20200927.pth +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_seresnet50_8xb64-210e_mpii-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: &id001 + - SimpleBaseline2D + - SEResNet + Training Data: MPII + Name: td-hm_seresnet50_8xb64-210e_mpii-256x256 + Results: + - Dataset: MPII + Metrics: + Mean: 0.884 + Mean@0.1: 0.292 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet50_mpii_256x256-1bb21f79_20200927.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_seresnet101_8xb64-210e_mpii-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: MPII + Name: td-hm_seresnet101_8xb64-210e_mpii-256x256 + Results: + - Dataset: MPII + Metrics: + Mean: 0.884 + Mean@0.1: 0.295 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet101_mpii_256x256-0ba14ff5_20200927.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_seresnet152_8xb32-210e_mpii-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: MPII + Name: td-hm_seresnet152_8xb32-210e_mpii-256x256 + Results: + - Dataset: MPII + Metrics: + Mean: 0.884 + Mean@0.1: 0.287 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/seresnet/seresnet152_mpii_256x256-6ea1e774_20200927.pth diff --git a/configs/body_2d_keypoint/topdown_heatmap/mpii/shufflenetv1_mpii.md b/configs/body_2d_keypoint/topdown_heatmap/mpii/shufflenetv1_mpii.md index 3cdaaaf5ea..b8ccb8c566 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/mpii/shufflenetv1_mpii.md +++ b/configs/body_2d_keypoint/topdown_heatmap/mpii/shufflenetv1_mpii.md @@ -1,39 +1,39 @@ - - -
-ShufflenetV1 (CVPR'2018) - -```bibtex -@inproceedings{zhang2018shufflenet, - title={Shufflenet: An extremely efficient convolutional neural network for mobile devices}, - author={Zhang, Xiangyu and Zhou, Xinyu and Lin, Mengxiao and Sun, Jian}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={6848--6856}, - year={2018} -} -``` - -
- - - -
-MPII (CVPR'2014) - -```bibtex -@inproceedings{andriluka14cvpr, - author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, - title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, - booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, - year = {2014}, - month = {June} -} -``` - -
- -Results on MPII val set - -| Arch | Input Size | Mean | Mean@0.1 | ckpt | log | -| :---------------------------------------------------------- | :--------: | :---: | :------: | :---------------------------------------------------------: | :---------------------------------------------------------: | -| [pose_shufflenetv1](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_shufflenetv1_8xb64-210e_mpii-256x256.py) | 256x256 | 0.824 | 0.195 | [ckpt](https://download.openmmlab.com/mmpose/top_down/shufflenetv1/shufflenetv1_mpii_256x256-dcc1c896_20200925.pth) | [log](https://download.openmmlab.com/mmpose/top_down/shufflenetv1/shufflenetv1_mpii_256x256_20200925.log.json) | + + +
+ShufflenetV1 (CVPR'2018) + +```bibtex +@inproceedings{zhang2018shufflenet, + title={Shufflenet: An extremely efficient convolutional neural network for mobile devices}, + author={Zhang, Xiangyu and Zhou, Xinyu and Lin, Mengxiao and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={6848--6856}, + year={2018} +} +``` + +
+ + + +
+MPII (CVPR'2014) + +```bibtex +@inproceedings{andriluka14cvpr, + author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, + title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, + booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + year = {2014}, + month = {June} +} +``` + +
+ +Results on MPII val set + +| Arch | Input Size | Mean | Mean@0.1 | ckpt | log | +| :---------------------------------------------------------- | :--------: | :---: | :------: | :---------------------------------------------------------: | :---------------------------------------------------------: | +| [pose_shufflenetv1](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_shufflenetv1_8xb64-210e_mpii-256x256.py) | 256x256 | 0.824 | 0.195 | [ckpt](https://download.openmmlab.com/mmpose/top_down/shufflenetv1/shufflenetv1_mpii_256x256-dcc1c896_20200925.pth) | [log](https://download.openmmlab.com/mmpose/top_down/shufflenetv1/shufflenetv1_mpii_256x256_20200925.log.json) | diff --git a/configs/body_2d_keypoint/topdown_heatmap/mpii/shufflenetv1_mpii.yml b/configs/body_2d_keypoint/topdown_heatmap/mpii/shufflenetv1_mpii.yml index b9edecc428..66d6e4efdf 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/mpii/shufflenetv1_mpii.yml +++ b/configs/body_2d_keypoint/topdown_heatmap/mpii/shufflenetv1_mpii.yml @@ -1,16 +1,16 @@ -Models: -- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_shufflenetv1_8xb64-210e_mpii-256x256.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: - - SimpleBaseline2D - - ShufflenetV1 - Training Data: MPII - Name: td-hm_shufflenetv1_8xb64-210e_mpii-256x256 - Results: - - Dataset: MPII - Metrics: - Mean: 0.824 - Mean@0.1: 0.195 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/shufflenetv1/shufflenetv1_mpii_256x256-dcc1c896_20200925.pth +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_shufflenetv1_8xb64-210e_mpii-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: + - SimpleBaseline2D + - ShufflenetV1 + Training Data: MPII + Name: td-hm_shufflenetv1_8xb64-210e_mpii-256x256 + Results: + - Dataset: MPII + Metrics: + Mean: 0.824 + Mean@0.1: 0.195 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/shufflenetv1/shufflenetv1_mpii_256x256-dcc1c896_20200925.pth diff --git a/configs/body_2d_keypoint/topdown_heatmap/mpii/shufflenetv2_mpii.md b/configs/body_2d_keypoint/topdown_heatmap/mpii/shufflenetv2_mpii.md index 8ab7b026ba..7f1362349e 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/mpii/shufflenetv2_mpii.md +++ b/configs/body_2d_keypoint/topdown_heatmap/mpii/shufflenetv2_mpii.md @@ -1,39 +1,39 @@ - - -
-ShufflenetV2 (ECCV'2018) - -```bibtex -@inproceedings{ma2018shufflenet, - title={Shufflenet v2: Practical guidelines for efficient cnn architecture design}, - author={Ma, Ningning and Zhang, Xiangyu and Zheng, Hai-Tao and Sun, Jian}, - booktitle={Proceedings of the European conference on computer vision (ECCV)}, - pages={116--131}, - year={2018} -} -``` - -
- - - -
-MPII (CVPR'2014) - -```bibtex -@inproceedings{andriluka14cvpr, - author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, - title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, - booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, - year = {2014}, - month = {June} -} -``` - -
- -Results on MPII val set - -| Arch | Input Size | Mean | Mean@0.1 | ckpt | log | -| :---------------------------------------------------------- | :--------: | :---: | :------: | :---------------------------------------------------------: | :---------------------------------------------------------: | -| [pose_shufflenetv2](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_shufflenetv2_8xb64-210e_mpii-256x256.py) | 256x256 | 0.828 | 0.205 | [ckpt](https://download.openmmlab.com/mmpose/top_down/shufflenetv2/shufflenetv2_mpii_256x256-4fb9df2d_20200925.pth) | [log](https://download.openmmlab.com/mmpose/top_down/shufflenetv2/shufflenetv2_mpii_256x256_20200925.log.json) | + + +
+ShufflenetV2 (ECCV'2018) + +```bibtex +@inproceedings{ma2018shufflenet, + title={Shufflenet v2: Practical guidelines for efficient cnn architecture design}, + author={Ma, Ningning and Zhang, Xiangyu and Zheng, Hai-Tao and Sun, Jian}, + booktitle={Proceedings of the European conference on computer vision (ECCV)}, + pages={116--131}, + year={2018} +} +``` + +
+ + + +
+MPII (CVPR'2014) + +```bibtex +@inproceedings{andriluka14cvpr, + author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, + title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, + booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + year = {2014}, + month = {June} +} +``` + +
+ +Results on MPII val set + +| Arch | Input Size | Mean | Mean@0.1 | ckpt | log | +| :---------------------------------------------------------- | :--------: | :---: | :------: | :---------------------------------------------------------: | :---------------------------------------------------------: | +| [pose_shufflenetv2](/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_shufflenetv2_8xb64-210e_mpii-256x256.py) | 256x256 | 0.828 | 0.205 | [ckpt](https://download.openmmlab.com/mmpose/top_down/shufflenetv2/shufflenetv2_mpii_256x256-4fb9df2d_20200925.pth) | [log](https://download.openmmlab.com/mmpose/top_down/shufflenetv2/shufflenetv2_mpii_256x256_20200925.log.json) | diff --git a/configs/body_2d_keypoint/topdown_heatmap/mpii/shufflenetv2_mpii.yml b/configs/body_2d_keypoint/topdown_heatmap/mpii/shufflenetv2_mpii.yml index efa6e14f51..71ff431e48 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/mpii/shufflenetv2_mpii.yml +++ b/configs/body_2d_keypoint/topdown_heatmap/mpii/shufflenetv2_mpii.yml @@ -1,16 +1,16 @@ -Models: -- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_shufflenetv2_8xb64-210e_mpii-256x256.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: - - SimpleBaseline2D - - ShufflenetV2 - Training Data: MPII - Name: td-hm_shufflenetv2_8xb64-210e_mpii-256x256 - Results: - - Dataset: MPII - Metrics: - Mean: 0.828 - Mean@0.1: 0.205 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/shufflenetv2/shufflenetv2_mpii_256x256-4fb9df2d_20200925.pth +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_shufflenetv2_8xb64-210e_mpii-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: + - SimpleBaseline2D + - ShufflenetV2 + Training Data: MPII + Name: td-hm_shufflenetv2_8xb64-210e_mpii-256x256 + Results: + - Dataset: MPII + Metrics: + Mean: 0.828 + Mean@0.1: 0.205 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/shufflenetv2/shufflenetv2_mpii_256x256-4fb9df2d_20200925.pth diff --git a/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_cpm_8xb64-210e_mpii-368x368.py b/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_cpm_8xb64-210e_mpii-368x368.py index 794c49420a..cf47ecdfce 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_cpm_8xb64-210e_mpii-368x368.py +++ b/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_cpm_8xb64-210e_mpii-368x368.py @@ -1,125 +1,125 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(368, 368), heatmap_size=(46, 46), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='CPM', - in_channels=3, - out_channels=16, - feat_channels=128, - num_stages=6), - head=dict( - type='CPMHead', - in_channels=16, - out_channels=16, - num_stages=6, - deconv_out_channels=None, - final_layer=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'MpiiDataset' -data_mode = 'topdown' -data_root = 'data/mpii/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict( - type='RandomBBoxTransform', - shift_prob=0, - rotate_factor=60, - scale_factor=(0.75, 1.25)), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_train.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_val.json', - headbox_file='data/mpii/annotations/mpii_gt_val.mat', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict(type='MpiiPCKAccuracy') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(368, 368), heatmap_size=(46, 46), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='CPM', + in_channels=3, + out_channels=16, + feat_channels=128, + num_stages=6), + head=dict( + type='CPMHead', + in_channels=16, + out_channels=16, + num_stages=6, + deconv_out_channels=None, + final_layer=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + shift_prob=0, + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file='data/mpii/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hourglass52_8xb32-210e_mpii-384x384.py b/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hourglass52_8xb32-210e_mpii-384x384.py index e9546504e0..17540658b3 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hourglass52_8xb32-210e_mpii-384x384.py +++ b/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hourglass52_8xb32-210e_mpii-384x384.py @@ -1,118 +1,118 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(384, 384), heatmap_size=(96, 96), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HourglassNet', - num_stacks=1, - ), - head=dict( - type='CPMHead', - in_channels=256, - out_channels=16, - num_stages=1, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'MpiiDataset' -data_mode = 'topdown' -data_root = 'data/mpii/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomBBoxTransform', shift_prob=0), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_train.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_val.json', - headbox_file='data/mpii/annotations/mpii_gt_val.mat', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict(type='MpiiPCKAccuracy') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(384, 384), heatmap_size=(96, 96), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HourglassNet', + num_stacks=1, + ), + head=dict( + type='CPMHead', + in_channels=256, + out_channels=16, + num_stages=1, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform', shift_prob=0), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file='data/mpii/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hourglass52_8xb64-210e_mpii-256x256.py b/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hourglass52_8xb64-210e_mpii-256x256.py index cd854a40a3..07f13ce399 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hourglass52_8xb64-210e_mpii-256x256.py +++ b/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hourglass52_8xb64-210e_mpii-256x256.py @@ -1,118 +1,118 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HourglassNet', - num_stacks=1, - ), - head=dict( - type='CPMHead', - in_channels=256, - out_channels=16, - num_stages=1, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'MpiiDataset' -data_mode = 'topdown' -data_root = 'data/mpii/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomBBoxTransform', shift_prob=0), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_train.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_val.json', - headbox_file='data/mpii/annotations/mpii_gt_val.mat', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict(type='MpiiPCKAccuracy') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HourglassNet', + num_stacks=1, + ), + head=dict( + type='CPMHead', + in_channels=256, + out_channels=16, + num_stages=1, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform', shift_prob=0), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file='data/mpii/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hrnet-w32_8xb64-210e_mpii-256x256.py b/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hrnet-w32_8xb64-210e_mpii-256x256.py index 459f24f3bd..7ee018d471 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hrnet-w32_8xb64-210e_mpii-256x256.py +++ b/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hrnet-w32_8xb64-210e_mpii-256x256.py @@ -1,146 +1,146 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(32, 64)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(32, 64, 128)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(32, 64, 128, 256))), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/hrnet_w32-36af842e.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=32, - out_channels=16, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'MpiiDataset' -data_mode = 'topdown' -data_root = 'data/mpii/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomBBoxTransform', shift_prob=0), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=16, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_train.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=16, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_val.json', - headbox_file='data/mpii/annotations/mpii_gt_val.mat', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict(type='MpiiPCKAccuracy') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=16, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform', shift_prob=0), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file='data/mpii/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hrnet-w32_dark-8xb64-210e_mpii-256x256.py b/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hrnet-w32_dark-8xb64-210e_mpii-256x256.py index 5d47ed6fdc..f22c0f8cd6 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hrnet-w32_dark-8xb64-210e_mpii-256x256.py +++ b/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hrnet-w32_dark-8xb64-210e_mpii-256x256.py @@ -1,150 +1,150 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', - input_size=(256, 256), - heatmap_size=(64, 64), - sigma=2, - unbiased=True) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(32, 64)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(32, 64, 128)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(32, 64, 128, 256))), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/hrnet_w32-36af842e.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=32, - out_channels=16, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'MpiiDataset' -data_mode = 'topdown' -data_root = 'data/mpii/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomBBoxTransform', shift_prob=0), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_train.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_val.json', - headbox_file='data/mpii/annotations/mpii_gt_val.mat', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict(type='MpiiPCKAccuracy') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', + input_size=(256, 256), + heatmap_size=(64, 64), + sigma=2, + unbiased=True) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=16, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform', shift_prob=0), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file='data/mpii/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hrnet-w48_8xb64-210e_mpii-256x256.py b/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hrnet-w48_8xb64-210e_mpii-256x256.py index 4e3fce9600..3101359a24 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hrnet-w48_8xb64-210e_mpii-256x256.py +++ b/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hrnet-w48_8xb64-210e_mpii-256x256.py @@ -1,146 +1,146 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(48, 96)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(48, 96, 192)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(48, 96, 192, 384))), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/hrnet_w48-8ef0771d.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=48, - out_channels=16, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'MpiiDataset' -data_mode = 'topdown' -data_root = 'data/mpii/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomBBoxTransform', shift_prob=0), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_train.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_val.json', - headbox_file='data/mpii/annotations/mpii_gt_val.mat', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict(type='MpiiPCKAccuracy') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(48, 96)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(48, 96, 192)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(48, 96, 192, 384))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w48-8ef0771d.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=48, + out_channels=16, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform', shift_prob=0), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file='data/mpii/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hrnet-w48_dark-8xb64-210e_mpii-256x256.py b/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hrnet-w48_dark-8xb64-210e_mpii-256x256.py index 18b31539a3..9435d790df 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hrnet-w48_dark-8xb64-210e_mpii-256x256.py +++ b/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_hrnet-w48_dark-8xb64-210e_mpii-256x256.py @@ -1,150 +1,150 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', - input_size=(256, 256), - heatmap_size=(64, 64), - sigma=2, - unbiased=True) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(48, 96)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(48, 96, 192)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(48, 96, 192, 384))), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/hrnet_w48-8ef0771d.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=48, - out_channels=16, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'MpiiDataset' -data_mode = 'topdown' -data_root = 'data/mpii/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomBBoxTransform', shift_prob=0), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_train.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_val.json', - headbox_file='data/mpii/annotations/mpii_gt_val.mat', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict(type='MpiiPCKAccuracy') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', + input_size=(256, 256), + heatmap_size=(64, 64), + sigma=2, + unbiased=True) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(48, 96)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(48, 96, 192)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(48, 96, 192, 384))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w48-8ef0771d.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=48, + out_channels=16, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform', shift_prob=0), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file='data/mpii/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_litehrnet-18_8xb64-210e_mpii-256x256.py b/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_litehrnet-18_8xb64-210e_mpii-256x256.py index bdab446f50..a95e33dda1 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_litehrnet-18_8xb64-210e_mpii-256x256.py +++ b/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_litehrnet-18_8xb64-210e_mpii-256x256.py @@ -1,137 +1,137 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='LiteHRNet', - in_channels=3, - extra=dict( - stem=dict(stem_channels=32, out_channels=32, expand_ratio=1), - num_stages=3, - stages_spec=dict( - num_modules=(2, 4, 2), - num_branches=(2, 3, 4), - num_blocks=(2, 2, 2), - module_type=('LITE', 'LITE', 'LITE'), - with_fuse=(True, True, True), - reduce_ratios=(8, 8, 8), - num_channels=( - (40, 80), - (40, 80, 160), - (40, 80, 160, 320), - )), - with_head=True, - )), - head=dict( - type='HeatmapHead', - in_channels=40, - out_channels=16, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'MpiiDataset' -data_mode = 'topdown' -data_root = 'data/mpii/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict( - type='RandomBBoxTransform', - shift_prob=0, - rotate_factor=60, - scale_factor=(0.75, 1.25)), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_train.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_val.json', - headbox_file='data/mpii/annotations/mpii_gt_val.mat', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict(type='MpiiPCKAccuracy') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='LiteHRNet', + in_channels=3, + extra=dict( + stem=dict(stem_channels=32, out_channels=32, expand_ratio=1), + num_stages=3, + stages_spec=dict( + num_modules=(2, 4, 2), + num_branches=(2, 3, 4), + num_blocks=(2, 2, 2), + module_type=('LITE', 'LITE', 'LITE'), + with_fuse=(True, True, True), + reduce_ratios=(8, 8, 8), + num_channels=( + (40, 80), + (40, 80, 160), + (40, 80, 160, 320), + )), + with_head=True, + )), + head=dict( + type='HeatmapHead', + in_channels=40, + out_channels=16, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + shift_prob=0, + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file='data/mpii/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_litehrnet-30_8xb64-210e_mpii-256x256.py b/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_litehrnet-30_8xb64-210e_mpii-256x256.py index 84089add2a..a7b440089a 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_litehrnet-30_8xb64-210e_mpii-256x256.py +++ b/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_litehrnet-30_8xb64-210e_mpii-256x256.py @@ -1,137 +1,137 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='LiteHRNet', - in_channels=3, - extra=dict( - stem=dict(stem_channels=32, out_channels=32, expand_ratio=1), - num_stages=3, - stages_spec=dict( - num_modules=(3, 8, 3), - num_branches=(2, 3, 4), - num_blocks=(2, 2, 2), - module_type=('LITE', 'LITE', 'LITE'), - with_fuse=(True, True, True), - reduce_ratios=(8, 8, 8), - num_channels=( - (40, 80), - (40, 80, 160), - (40, 80, 160, 320), - )), - with_head=True, - )), - head=dict( - type='HeatmapHead', - in_channels=40, - out_channels=16, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'MpiiDataset' -data_mode = 'topdown' -data_root = 'data/mpii/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict( - type='RandomBBoxTransform', - shift_prob=0, - rotate_factor=60, - scale_factor=(0.75, 1.25)), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_train.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_val.json', - headbox_file='data/mpii/annotations/mpii_gt_val.mat', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict(type='MpiiPCKAccuracy') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='LiteHRNet', + in_channels=3, + extra=dict( + stem=dict(stem_channels=32, out_channels=32, expand_ratio=1), + num_stages=3, + stages_spec=dict( + num_modules=(3, 8, 3), + num_branches=(2, 3, 4), + num_blocks=(2, 2, 2), + module_type=('LITE', 'LITE', 'LITE'), + with_fuse=(True, True, True), + reduce_ratios=(8, 8, 8), + num_channels=( + (40, 80), + (40, 80, 160), + (40, 80, 160, 320), + )), + with_head=True, + )), + head=dict( + type='HeatmapHead', + in_channels=40, + out_channels=16, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + shift_prob=0, + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file='data/mpii/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_mobilenetv2_8xb64-210e_mpii-256x256.py b/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_mobilenetv2_8xb64-210e_mpii-256x256.py index 41b9d3ba9b..6b40e1976e 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_mobilenetv2_8xb64-210e_mpii-256x256.py +++ b/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_mobilenetv2_8xb64-210e_mpii-256x256.py @@ -1,118 +1,118 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='MobileNetV2', - widen_factor=1., - out_indices=(7, ), - init_cfg=dict(type='Pretrained', checkpoint='mmcls://mobilenet_v2'), - ), - head=dict( - type='HeatmapHead', - in_channels=1280, - out_channels=16, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'MpiiDataset' -data_mode = 'topdown' -data_root = 'data/mpii/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomBBoxTransform', shift_prob=0), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_train.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_val.json', - headbox_file='data/mpii/annotations/mpii_gt_val.mat', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict(type='MpiiPCKAccuracy') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='MobileNetV2', + widen_factor=1., + out_indices=(7, ), + init_cfg=dict(type='Pretrained', checkpoint='mmcls://mobilenet_v2'), + ), + head=dict( + type='HeatmapHead', + in_channels=1280, + out_channels=16, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform', shift_prob=0), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file='data/mpii/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_res101_8xb64-210e_mpii-256x256.py b/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_res101_8xb64-210e_mpii-256x256.py index def5d2fd16..0bd5fd894d 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_res101_8xb64-210e_mpii-256x256.py +++ b/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_res101_8xb64-210e_mpii-256x256.py @@ -1,117 +1,117 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=101, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=16, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'MpiiDataset' -data_mode = 'topdown' -data_root = 'data/mpii/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomBBoxTransform', shift_prob=0), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_train.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_val.json', - headbox_file='data/mpii/annotations/mpii_gt_val.mat', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict(type='MpiiPCKAccuracy') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=101, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=16, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform', shift_prob=0), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file='data/mpii/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_res152_8xb32-210e_mpii-256x256.py b/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_res152_8xb32-210e_mpii-256x256.py index bf515d0d21..a2d86f8ba5 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_res152_8xb32-210e_mpii-256x256.py +++ b/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_res152_8xb32-210e_mpii-256x256.py @@ -1,117 +1,117 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=256) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=152, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet152'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=16, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'MpiiDataset' -data_mode = 'topdown' -data_root = 'data/mpii/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomBBoxTransform', shift_prob=0), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_train.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_val.json', - headbox_file='data/mpii/annotations/mpii_gt_val.mat', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict(type='MpiiPCKAccuracy') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=152, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet152'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=16, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform', shift_prob=0), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file='data/mpii/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_res50_8xb64-210e_mpii-256x256.py b/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_res50_8xb64-210e_mpii-256x256.py index dee56ae77b..d22c4055f1 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_res50_8xb64-210e_mpii-256x256.py +++ b/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_res50_8xb64-210e_mpii-256x256.py @@ -1,117 +1,117 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=50, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=16, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'MpiiDataset' -data_mode = 'topdown' -data_root = 'data/mpii/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomBBoxTransform', shift_prob=0), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_train.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_val.json', - headbox_file='data/mpii/annotations/mpii_gt_val.mat', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict(type='MpiiPCKAccuracy') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=16, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform', shift_prob=0), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file='data/mpii/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_resnetv1d101_8xb64-210e_mpii-256x256.py b/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_resnetv1d101_8xb64-210e_mpii-256x256.py index 0cbf684e38..25c40875b0 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_resnetv1d101_8xb64-210e_mpii-256x256.py +++ b/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_resnetv1d101_8xb64-210e_mpii-256x256.py @@ -1,117 +1,117 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNetV1d', - depth=101, - init_cfg=dict(type='Pretrained', checkpoint='mmcls://resnet101_v1d'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=16, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'MpiiDataset' -data_mode = 'topdown' -data_root = 'data/mpii/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomBBoxTransform', shift_prob=0), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_train.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_val.json', - headbox_file='data/mpii/annotations/mpii_gt_val.mat', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict(type='MpiiPCKAccuracy') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNetV1d', + depth=101, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://resnet101_v1d'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=16, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform', shift_prob=0), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file='data/mpii/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_resnetv1d152_8xb64-210e_mpii-256x256.py b/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_resnetv1d152_8xb64-210e_mpii-256x256.py index 24653a9e56..ce43cf3f26 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_resnetv1d152_8xb64-210e_mpii-256x256.py +++ b/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_resnetv1d152_8xb64-210e_mpii-256x256.py @@ -1,117 +1,117 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNetV1d', - depth=152, - init_cfg=dict(type='Pretrained', checkpoint='mmcls://resnet152_v1d'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=16, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'MpiiDataset' -data_mode = 'topdown' -data_root = 'data/mpii/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomBBoxTransform', shift_prob=0), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_train.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_val.json', - headbox_file='data/mpii/annotations/mpii_gt_val.mat', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict(type='MpiiPCKAccuracy') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNetV1d', + depth=152, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://resnet152_v1d'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=16, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform', shift_prob=0), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file='data/mpii/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_resnetv1d50_8xb64-210e_mpii-256x256.py b/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_resnetv1d50_8xb64-210e_mpii-256x256.py index 48bcfec5eb..a2853f887c 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_resnetv1d50_8xb64-210e_mpii-256x256.py +++ b/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_resnetv1d50_8xb64-210e_mpii-256x256.py @@ -1,117 +1,117 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNetV1d', - depth=50, - init_cfg=dict(type='Pretrained', checkpoint='mmcls://resnet50_v1d'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=16, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'MpiiDataset' -data_mode = 'topdown' -data_root = 'data/mpii/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomBBoxTransform', shift_prob=0), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_train.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_val.json', - headbox_file='data/mpii/annotations/mpii_gt_val.mat', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict(type='MpiiPCKAccuracy') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNetV1d', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://resnet50_v1d'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=16, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform', shift_prob=0), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file='data/mpii/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_resnext152_8xb32-210e_mpii-256x256.py b/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_resnext152_8xb32-210e_mpii-256x256.py index 30afb10103..8bfe3eff4d 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_resnext152_8xb32-210e_mpii-256x256.py +++ b/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_resnext152_8xb32-210e_mpii-256x256.py @@ -1,118 +1,118 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=256) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNeXt', - depth=152, - init_cfg=dict( - type='Pretrained', checkpoint='mmcls://resnext152_32x4d'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=16, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'MpiiDataset' -data_mode = 'topdown' -data_root = 'data/mpii/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomBBoxTransform', shift_prob=0), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_train.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_val.json', - headbox_file='data/mpii/annotations/mpii_gt_val.mat', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict(type='MpiiPCKAccuracy') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNeXt', + depth=152, + init_cfg=dict( + type='Pretrained', checkpoint='mmcls://resnext152_32x4d'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=16, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform', shift_prob=0), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file='data/mpii/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_scnet101_8xb64-210e_mpii-256x256.py b/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_scnet101_8xb64-210e_mpii-256x256.py index fb5c6b702c..9ae0c8d75c 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_scnet101_8xb64-210e_mpii-256x256.py +++ b/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_scnet101_8xb64-210e_mpii-256x256.py @@ -1,120 +1,120 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='SCNet', - depth=101, - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/scnet101-94250a77.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=16, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'MpiiDataset' -data_mode = 'topdown' -data_root = 'data/mpii/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomBBoxTransform', shift_prob=0), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_train.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_val.json', - headbox_file='data/mpii/annotations/mpii_gt_val.mat', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict(type='MpiiPCKAccuracy') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='SCNet', + depth=101, + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/scnet101-94250a77.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=16, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform', shift_prob=0), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file='data/mpii/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_scnet50_8xb64-210e_mpii-256x256.py b/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_scnet50_8xb64-210e_mpii-256x256.py index c2f7723724..6e2206ab02 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_scnet50_8xb64-210e_mpii-256x256.py +++ b/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_scnet50_8xb64-210e_mpii-256x256.py @@ -1,120 +1,120 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='SCNet', - depth=50, - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/scnet50-7ef0a199.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=16, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'MpiiDataset' -data_mode = 'topdown' -data_root = 'data/mpii/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomBBoxTransform', shift_prob=0), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_train.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_val.json', - headbox_file='data/mpii/annotations/mpii_gt_val.mat', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict(type='MpiiPCKAccuracy') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='SCNet', + depth=50, + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/scnet50-7ef0a199.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=16, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform', shift_prob=0), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file='data/mpii/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_seresnet101_8xb64-210e_mpii-256x256.py b/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_seresnet101_8xb64-210e_mpii-256x256.py index 56b7fccb2e..7ead8483e2 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_seresnet101_8xb64-210e_mpii-256x256.py +++ b/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_seresnet101_8xb64-210e_mpii-256x256.py @@ -1,117 +1,117 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='SEResNet', - depth=101, - init_cfg=dict(type='Pretrained', checkpoint='mmcls://se-resnet101'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=16, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'MpiiDataset' -data_mode = 'topdown' -data_root = 'data/mpii/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomBBoxTransform', shift_prob=0), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_train.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_val.json', - headbox_file='data/mpii/annotations/mpii_gt_val.mat', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict(type='MpiiPCKAccuracy') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='SEResNet', + depth=101, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://se-resnet101'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=16, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform', shift_prob=0), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file='data/mpii/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_seresnet152_8xb32-210e_mpii-256x256.py b/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_seresnet152_8xb32-210e_mpii-256x256.py index 79bb29e4b3..7c2486dfa6 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_seresnet152_8xb32-210e_mpii-256x256.py +++ b/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_seresnet152_8xb32-210e_mpii-256x256.py @@ -1,116 +1,116 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=256) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='SEResNet', - depth=152, - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=16, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'MpiiDataset' -data_mode = 'topdown' -data_root = 'data/mpii/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomBBoxTransform', shift_prob=0), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_train.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_val.json', - headbox_file='data/mpii/annotations/mpii_gt_val.mat', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict(type='MpiiPCKAccuracy') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='SEResNet', + depth=152, + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=16, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform', shift_prob=0), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file='data/mpii/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_seresnet50_8xb64-210e_mpii-256x256.py b/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_seresnet50_8xb64-210e_mpii-256x256.py index 257dc360ad..c14ba34f30 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_seresnet50_8xb64-210e_mpii-256x256.py +++ b/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_seresnet50_8xb64-210e_mpii-256x256.py @@ -1,117 +1,117 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='SEResNet', - depth=50, - init_cfg=dict(type='Pretrained', checkpoint='mmcls://se-resnet50'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=16, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'MpiiDataset' -data_mode = 'topdown' -data_root = 'data/mpii/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomBBoxTransform', shift_prob=0), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_train.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_val.json', - headbox_file='data/mpii/annotations/mpii_gt_val.mat', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict(type='MpiiPCKAccuracy') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='SEResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://se-resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=16, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform', shift_prob=0), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file='data/mpii/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_shufflenetv1_8xb64-210e_mpii-256x256.py b/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_shufflenetv1_8xb64-210e_mpii-256x256.py index 83eaca208f..1b8ac628aa 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_shufflenetv1_8xb64-210e_mpii-256x256.py +++ b/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_shufflenetv1_8xb64-210e_mpii-256x256.py @@ -1,117 +1,117 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ShuffleNetV1', - groups=3, - init_cfg=dict(type='Pretrained', checkpoint='mmcls://shufflenet_v1'), - ), - head=dict( - type='HeatmapHead', - in_channels=960, - out_channels=16, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'MpiiDataset' -data_mode = 'topdown' -data_root = 'data/mpii/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomBBoxTransform', shift_prob=0), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_train.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_val.json', - headbox_file='data/mpii/annotations/mpii_gt_val.mat', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict(type='MpiiPCKAccuracy') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ShuffleNetV1', + groups=3, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://shufflenet_v1'), + ), + head=dict( + type='HeatmapHead', + in_channels=960, + out_channels=16, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform', shift_prob=0), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file='data/mpii/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_shufflenetv2_8xb64-210e_mpii-256x256.py b/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_shufflenetv2_8xb64-210e_mpii-256x256.py index cd05c23596..e39aff8abc 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_shufflenetv2_8xb64-210e_mpii-256x256.py +++ b/configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_shufflenetv2_8xb64-210e_mpii-256x256.py @@ -1,117 +1,117 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ShuffleNetV2', - widen_factor=1.0, - init_cfg=dict(type='Pretrained', checkpoint='mmcls://shufflenet_v2'), - ), - head=dict( - type='HeatmapHead', - in_channels=1024, - out_channels=16, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'MpiiDataset' -data_mode = 'topdown' -data_root = 'data/mpii/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomBBoxTransform', shift_prob=0), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_train.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_val.json', - headbox_file='data/mpii/annotations/mpii_gt_val.mat', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict(type='MpiiPCKAccuracy') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ShuffleNetV2', + widen_factor=1.0, + init_cfg=dict(type='Pretrained', checkpoint='mmcls://shufflenet_v2'), + ), + head=dict( + type='HeatmapHead', + in_channels=1024, + out_channels=16, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform', shift_prob=0), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file='data/mpii/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/posetrack18/hrnet_posetrack18.md b/configs/body_2d_keypoint/topdown_heatmap/posetrack18/hrnet_posetrack18.md index 5d26a103db..a6c58993fd 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/posetrack18/hrnet_posetrack18.md +++ b/configs/body_2d_keypoint/topdown_heatmap/posetrack18/hrnet_posetrack18.md @@ -1,55 +1,55 @@ - - -
-HRNet (CVPR'2019) - -```bibtex -@inproceedings{sun2019deep, - title={Deep high-resolution representation learning for human pose estimation}, - author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={5693--5703}, - year={2019} -} -``` - -
- - - -
-PoseTrack18 (CVPR'2018) - -```bibtex -@inproceedings{andriluka2018posetrack, - title={Posetrack: A benchmark for human pose estimation and tracking}, - author={Andriluka, Mykhaylo and Iqbal, Umar and Insafutdinov, Eldar and Pishchulin, Leonid and Milan, Anton and Gall, Juergen and Schiele, Bernt}, - booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, - pages={5167--5176}, - year={2018} -} -``` - -
- -Results on PoseTrack2018 val with ground-truth bounding boxes - -| Arch | Input Size | Head | Shou | Elb | Wri | Hip | Knee | Ankl | Total | ckpt | log | -| :--------------------------------------------------- | :--------: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :---: | :---------------------------------------------------: | :--------------------------------------------------: | -| [pose_hrnet_w32](/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w32_8xb64-20e_posetrack18-256x192.py) | 256x192 | 86.2 | 89.0 | 84.5 | 79.2 | 82.3 | 82.5 | 78.7 | 83.4 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_posetrack18_256x192-1ee951c4_20201028.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_posetrack18_256x192_20201028.log.json) | -| [pose_hrnet_w32](/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w32_8xb64-20e_posetrack18-384x288.py) | 384x288 | 87.1 | 89.0 | 85.1 | 80.2 | 80.6 | 82.8 | 79.6 | 83.7 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_posetrack18_384x288-806f00a3_20211130.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_posetrack18_384x288_20211130.log.json) | -| [pose_hrnet_w48](/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w48_8xb64-20e_posetrack18-256x192.py) | 256x192 | 88.3 | 90.2 | 86.0 | 81.0 | 80.7 | 83.3 | 80.6 | 84.6 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_posetrack18_256x192-b5d9b3f1_20211130.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_posetrack18_256x192_20211130.log.json) | -| [pose_hrnet_w48](/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w48_8xb64-20e_posetrack18-384x288.py) | 384x288 | 87.8 | 90.0 | 86.2 | 81.3 | 81.0 | 83.4 | 80.9 | 84.6 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_posetrack18_384x288-5fd6d3ff_20211130.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_posetrack18_384x288_20211130.log.json) | - -The models are first pre-trained on COCO dataset, and then fine-tuned on PoseTrack18. - -Results on PoseTrack2018 val with [MMDetection](https://github.com/open-mmlab/mmdetection) pre-trained [Cascade R-CNN](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_20e_coco/cascade_rcnn_x101_64x4d_fpn_20e_coco_20200509_224357-051557b1.pth) (X-101-64x4d-FPN) human detector - -| Arch | Input Size | Head | Shou | Elb | Wri | Hip | Knee | Ankl | Total | ckpt | log | -| :--------------------------------------------------- | :--------: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :---: | :---------------------------------------------------: | :--------------------------------------------------: | -| [pose_hrnet_w32](/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w32_8xb64-20e_posetrack18-256x192.py) | 256x192 | 78.0 | 82.9 | 79.5 | 73.8 | 76.9 | 76.6 | 70.2 | 76.9 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_posetrack18_256x192-1ee951c4_20201028.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_posetrack18_256x192_20201028.log.json) | -| [pose_hrnet_w32](/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w32_8xb64-20e_posetrack18-384x288.py) | 384x288 | 79.9 | 83.6 | 80.4 | 74.5 | 74.8 | 76.1 | 70.5 | 77.3 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_posetrack18_384x288-806f00a3_20211130.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_posetrack18_384x288_20211130.log.json) | -| [pose_hrnet_w48](/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w48_8xb64-20e_posetrack18-256x192.py) | 256x192 | 80.1 | 83.4 | 80.6 | 74.8 | 74.3 | 76.8 | 70.5 | 77.4 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_posetrack18_256x192-b5d9b3f1_20211130.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_posetrack18_256x192_20211130.log.json) | -| [pose_hrnet_w48](/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w48_8xb64-20e_posetrack18-384x288.py) | 384x288 | 80.2 | 83.8 | 80.9 | 75.2 | 74.7 | 76.7 | 71.7 | 77.8 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_posetrack18_384x288-5fd6d3ff_20211130.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_posetrack18_384x288_20211130.log.json) | - -The models are first pre-trained on COCO dataset, and then fine-tuned on PoseTrack18. + + +
+HRNet (CVPR'2019) + +```bibtex +@inproceedings{sun2019deep, + title={Deep high-resolution representation learning for human pose estimation}, + author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={5693--5703}, + year={2019} +} +``` + +
+ + + +
+PoseTrack18 (CVPR'2018) + +```bibtex +@inproceedings{andriluka2018posetrack, + title={Posetrack: A benchmark for human pose estimation and tracking}, + author={Andriluka, Mykhaylo and Iqbal, Umar and Insafutdinov, Eldar and Pishchulin, Leonid and Milan, Anton and Gall, Juergen and Schiele, Bernt}, + booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, + pages={5167--5176}, + year={2018} +} +``` + +
+ +Results on PoseTrack2018 val with ground-truth bounding boxes + +| Arch | Input Size | Head | Shou | Elb | Wri | Hip | Knee | Ankl | Total | ckpt | log | +| :--------------------------------------------------- | :--------: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :---: | :---------------------------------------------------: | :--------------------------------------------------: | +| [pose_hrnet_w32](/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w32_8xb64-20e_posetrack18-256x192.py) | 256x192 | 86.2 | 89.0 | 84.5 | 79.2 | 82.3 | 82.5 | 78.7 | 83.4 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_posetrack18_256x192-1ee951c4_20201028.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_posetrack18_256x192_20201028.log.json) | +| [pose_hrnet_w32](/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w32_8xb64-20e_posetrack18-384x288.py) | 384x288 | 87.1 | 89.0 | 85.1 | 80.2 | 80.6 | 82.8 | 79.6 | 83.7 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_posetrack18_384x288-806f00a3_20211130.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_posetrack18_384x288_20211130.log.json) | +| [pose_hrnet_w48](/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w48_8xb64-20e_posetrack18-256x192.py) | 256x192 | 88.3 | 90.2 | 86.0 | 81.0 | 80.7 | 83.3 | 80.6 | 84.6 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_posetrack18_256x192-b5d9b3f1_20211130.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_posetrack18_256x192_20211130.log.json) | +| [pose_hrnet_w48](/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w48_8xb64-20e_posetrack18-384x288.py) | 384x288 | 87.8 | 90.0 | 86.2 | 81.3 | 81.0 | 83.4 | 80.9 | 84.6 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_posetrack18_384x288-5fd6d3ff_20211130.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_posetrack18_384x288_20211130.log.json) | + +The models are first pre-trained on COCO dataset, and then fine-tuned on PoseTrack18. + +Results on PoseTrack2018 val with [MMDetection](https://github.com/open-mmlab/mmdetection) pre-trained [Cascade R-CNN](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_20e_coco/cascade_rcnn_x101_64x4d_fpn_20e_coco_20200509_224357-051557b1.pth) (X-101-64x4d-FPN) human detector + +| Arch | Input Size | Head | Shou | Elb | Wri | Hip | Knee | Ankl | Total | ckpt | log | +| :--------------------------------------------------- | :--------: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :---: | :---------------------------------------------------: | :--------------------------------------------------: | +| [pose_hrnet_w32](/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w32_8xb64-20e_posetrack18-256x192.py) | 256x192 | 78.0 | 82.9 | 79.5 | 73.8 | 76.9 | 76.6 | 70.2 | 76.9 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_posetrack18_256x192-1ee951c4_20201028.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_posetrack18_256x192_20201028.log.json) | +| [pose_hrnet_w32](/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w32_8xb64-20e_posetrack18-384x288.py) | 384x288 | 79.9 | 83.6 | 80.4 | 74.5 | 74.8 | 76.1 | 70.5 | 77.3 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_posetrack18_384x288-806f00a3_20211130.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_posetrack18_384x288_20211130.log.json) | +| [pose_hrnet_w48](/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w48_8xb64-20e_posetrack18-256x192.py) | 256x192 | 80.1 | 83.4 | 80.6 | 74.8 | 74.3 | 76.8 | 70.5 | 77.4 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_posetrack18_256x192-b5d9b3f1_20211130.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_posetrack18_256x192_20211130.log.json) | +| [pose_hrnet_w48](/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w48_8xb64-20e_posetrack18-384x288.py) | 384x288 | 80.2 | 83.8 | 80.9 | 75.2 | 74.7 | 76.7 | 71.7 | 77.8 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_posetrack18_384x288-5fd6d3ff_20211130.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_posetrack18_384x288_20211130.log.json) | + +The models are first pre-trained on COCO dataset, and then fine-tuned on PoseTrack18. diff --git a/configs/body_2d_keypoint/topdown_heatmap/posetrack18/hrnet_posetrack18.yml b/configs/body_2d_keypoint/topdown_heatmap/posetrack18/hrnet_posetrack18.yml index a0dcc78f7c..c2a078718f 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/posetrack18/hrnet_posetrack18.yml +++ b/configs/body_2d_keypoint/topdown_heatmap/posetrack18/hrnet_posetrack18.yml @@ -1,154 +1,154 @@ -Models: -- Config: configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w32_8xb64-20e_posetrack18-256x192.py - In Collection: HRNet - Metadata: - Architecture: &id001 - - HRNet - Training Data: PoseTrack18 - Name: td-hm_hrnet-w32_8xb64-20e_posetrack18-256x192 - Results: - - Dataset: PoseTrack18 - Metrics: - Ankl: 78.7 - Elb: 84.5 - Head: 86.2 - Hip: 82.3 - Knee: 82.5 - Shou: 89 - Total: 83.4 - Wri: 79.2 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_posetrack18_256x192-1ee951c4_20201028.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w32_8xb64-20e_posetrack18-384x288.py - In Collection: HRNet - Metadata: - Architecture: *id001 - Training Data: PoseTrack18 - Name: td-hm_hrnet-w32_8xb64-20e_posetrack18-384x288 - Results: - - Dataset: PoseTrack18 - Metrics: - Ankl: 79.6 - Elb: 84.5 - Head: 87.1 - Hip: 80.6 - Knee: 82.8 - Shou: 89 - Total: 83.7 - Wri: 80.2 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_posetrack18_384x288-806f00a3_20211130.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w48_8xb64-20e_posetrack18-256x192.py - In Collection: HRNet - Metadata: - Architecture: *id001 - Training Data: PoseTrack18 - Name: td-hm_hrnet-w48_8xb64-20e_posetrack18-256x192 - Results: - - Dataset: PoseTrack18 - Metrics: - Ankl: 79.6 - Elb: 85.1 - Head: 88.3 - Hip: 80.6 - Knee: 82.8 - Shou: 90.2 - Total: 84.6 - Wri: 81 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_posetrack18_256x192-b5d9b3f1_20211130.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w48_8xb64-20e_posetrack18-384x288.py - In Collection: HRNet - Metadata: - Architecture: *id001 - Training Data: PoseTrack18 - Name: td-hm_hrnet-w48_8xb64-20e_posetrack18-384x288 - Results: - - Dataset: PoseTrack18 - Metrics: - Ankl: 80.6 - Elb: 86.2 - Head: 87.8 - Hip: 81 - Knee: 83.4 - Shou: 90 - Total: 84.6 - Wri: 81.3 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_posetrack18_384x288-5fd6d3ff_20211130.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w32_8xb64-20e_posetrack18-256x192.py - In Collection: HRNet - Metadata: - Architecture: *id001 - Training Data: PoseTrack18 - Name: td-hm_hrnet-w32_8xb64-20e_posetrack18-256x192 - Results: - - Dataset: PoseTrack18 - Metrics: - Ankl: 70.2 - Elb: 79.5 - Head: 78.0 - Hip: 76.9 - Knee: 76.6 - Shou: 82.9 - Total: 76.9 - Wri: 73.8 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_posetrack18_256x192-1ee951c4_20201028.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w32_8xb64-20e_posetrack18-384x288.py - In Collection: HRNet - Metadata: - Architecture: *id001 - Training Data: PoseTrack18 - Name: td-hm_hrnet-w32_8xb64-20e_posetrack18-384x288 - Results: - - Dataset: PoseTrack18 - Metrics: - Ankl: 70.5 - Elb: 80.4 - Head: 79.9 - Hip: 74.8 - Knee: 76.1 - Shou: 83.6 - Total: 77.3 - Wri: 74.5 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_posetrack18_384x288-806f00a3_20211130.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w48_8xb64-20e_posetrack18-256x192.py - In Collection: HRNet - Metadata: - Architecture: *id001 - Training Data: PoseTrack18 - Name: td-hm_hrnet-w48_8xb64-20e_posetrack18-256x192 - Results: - - Dataset: PoseTrack18 - Metrics: - Ankl: 70.4 - Elb: 80.6 - Head: 80.1 - Hip: 74.3 - Knee: 76.8 - Shou: 83.4 - Total: 77.4 - Wri: 74.8 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_posetrack18_256x192-b5d9b3f1_20211130.pth -- Config: configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w48_8xb64-20e_posetrack18-384x288.py - In Collection: HRNet - Metadata: - Architecture: *id001 - Training Data: PoseTrack18 - Name: td-hm_hrnet-w48_8xb64-20e_posetrack18-384x288 - Results: - - Dataset: PoseTrack18 - Metrics: - Ankl: 71.7 - Elb: 80.9 - Head: 80.2 - Hip: 74.7 - Knee: 76.7 - Shou: 83.8 - Total: 77.8 - Wri: 75.2 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_posetrack18_384x288-5fd6d3ff_20211130.pth +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w32_8xb64-20e_posetrack18-256x192.py + In Collection: HRNet + Metadata: + Architecture: &id001 + - HRNet + Training Data: PoseTrack18 + Name: td-hm_hrnet-w32_8xb64-20e_posetrack18-256x192 + Results: + - Dataset: PoseTrack18 + Metrics: + Ankl: 78.7 + Elb: 84.5 + Head: 86.2 + Hip: 82.3 + Knee: 82.5 + Shou: 89 + Total: 83.4 + Wri: 79.2 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_posetrack18_256x192-1ee951c4_20201028.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w32_8xb64-20e_posetrack18-384x288.py + In Collection: HRNet + Metadata: + Architecture: *id001 + Training Data: PoseTrack18 + Name: td-hm_hrnet-w32_8xb64-20e_posetrack18-384x288 + Results: + - Dataset: PoseTrack18 + Metrics: + Ankl: 79.6 + Elb: 84.5 + Head: 87.1 + Hip: 80.6 + Knee: 82.8 + Shou: 89 + Total: 83.7 + Wri: 80.2 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_posetrack18_384x288-806f00a3_20211130.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w48_8xb64-20e_posetrack18-256x192.py + In Collection: HRNet + Metadata: + Architecture: *id001 + Training Data: PoseTrack18 + Name: td-hm_hrnet-w48_8xb64-20e_posetrack18-256x192 + Results: + - Dataset: PoseTrack18 + Metrics: + Ankl: 79.6 + Elb: 85.1 + Head: 88.3 + Hip: 80.6 + Knee: 82.8 + Shou: 90.2 + Total: 84.6 + Wri: 81 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_posetrack18_256x192-b5d9b3f1_20211130.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w48_8xb64-20e_posetrack18-384x288.py + In Collection: HRNet + Metadata: + Architecture: *id001 + Training Data: PoseTrack18 + Name: td-hm_hrnet-w48_8xb64-20e_posetrack18-384x288 + Results: + - Dataset: PoseTrack18 + Metrics: + Ankl: 80.6 + Elb: 86.2 + Head: 87.8 + Hip: 81 + Knee: 83.4 + Shou: 90 + Total: 84.6 + Wri: 81.3 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_posetrack18_384x288-5fd6d3ff_20211130.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w32_8xb64-20e_posetrack18-256x192.py + In Collection: HRNet + Metadata: + Architecture: *id001 + Training Data: PoseTrack18 + Name: td-hm_hrnet-w32_8xb64-20e_posetrack18-256x192 + Results: + - Dataset: PoseTrack18 + Metrics: + Ankl: 70.2 + Elb: 79.5 + Head: 78.0 + Hip: 76.9 + Knee: 76.6 + Shou: 82.9 + Total: 76.9 + Wri: 73.8 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_posetrack18_256x192-1ee951c4_20201028.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w32_8xb64-20e_posetrack18-384x288.py + In Collection: HRNet + Metadata: + Architecture: *id001 + Training Data: PoseTrack18 + Name: td-hm_hrnet-w32_8xb64-20e_posetrack18-384x288 + Results: + - Dataset: PoseTrack18 + Metrics: + Ankl: 70.5 + Elb: 80.4 + Head: 79.9 + Hip: 74.8 + Knee: 76.1 + Shou: 83.6 + Total: 77.3 + Wri: 74.5 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_posetrack18_384x288-806f00a3_20211130.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w48_8xb64-20e_posetrack18-256x192.py + In Collection: HRNet + Metadata: + Architecture: *id001 + Training Data: PoseTrack18 + Name: td-hm_hrnet-w48_8xb64-20e_posetrack18-256x192 + Results: + - Dataset: PoseTrack18 + Metrics: + Ankl: 70.4 + Elb: 80.6 + Head: 80.1 + Hip: 74.3 + Knee: 76.8 + Shou: 83.4 + Total: 77.4 + Wri: 74.8 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_posetrack18_256x192-b5d9b3f1_20211130.pth +- Config: configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w48_8xb64-20e_posetrack18-384x288.py + In Collection: HRNet + Metadata: + Architecture: *id001 + Training Data: PoseTrack18 + Name: td-hm_hrnet-w48_8xb64-20e_posetrack18-384x288 + Results: + - Dataset: PoseTrack18 + Metrics: + Ankl: 71.7 + Elb: 80.9 + Head: 80.2 + Hip: 74.7 + Knee: 76.7 + Shou: 83.8 + Total: 77.8 + Wri: 75.2 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_posetrack18_384x288-5fd6d3ff_20211130.pth diff --git a/configs/body_2d_keypoint/topdown_heatmap/posetrack18/resnet_posetrack18.md b/configs/body_2d_keypoint/topdown_heatmap/posetrack18/resnet_posetrack18.md index 86f476e5b7..e172780940 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/posetrack18/resnet_posetrack18.md +++ b/configs/body_2d_keypoint/topdown_heatmap/posetrack18/resnet_posetrack18.md @@ -1,58 +1,58 @@ - - -
-SimpleBaseline2D (ECCV'2018) - -```bibtex -@inproceedings{xiao2018simple, - title={Simple baselines for human pose estimation and tracking}, - author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, - booktitle={Proceedings of the European conference on computer vision (ECCV)}, - pages={466--481}, - year={2018} -} -``` - -
- - - -
-ResNet (CVPR'2016) - -```bibtex -@inproceedings{he2016deep, - title={Deep residual learning for image recognition}, - author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={770--778}, - year={2016} -} -``` - -
- - - -
-PoseTrack18 (CVPR'2018) - -```bibtex -@inproceedings{andriluka2018posetrack, - title={Posetrack: A benchmark for human pose estimation and tracking}, - author={Andriluka, Mykhaylo and Iqbal, Umar and Insafutdinov, Eldar and Pishchulin, Leonid and Milan, Anton and Gall, Juergen and Schiele, Bernt}, - booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, - pages={5167--5176}, - year={2018} -} -``` - -
- -Results on PoseTrack2018 val with ground-truth bounding boxes - -| Arch | Input Size | Head | Shou | Elb | Wri | Hip | Knee | Ankl | Total | ckpt | log | -| :--------------------------------------------------- | :--------: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :---: | :---------------------------------------------------: | :--------------------------------------------------: | -| [pose_resnet_50](/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_res50_8xb64-20e_posetrack18-256x192.py) | 256x192 | 86.5 | 87.7 | 82.5 | 75.8 | 80.1 | 78.8 | 74.2 | 81.2 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res50_posetrack18_256x192-a62807c7_20201028.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res50_posetrack18_256x192_20201028.log.json) | - -The models are first pre-trained on COCO dataset, and then fine-tuned on PoseTrack18. + + +
+SimpleBaseline2D (ECCV'2018) + +```bibtex +@inproceedings{xiao2018simple, + title={Simple baselines for human pose estimation and tracking}, + author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, + booktitle={Proceedings of the European conference on computer vision (ECCV)}, + pages={466--481}, + year={2018} +} +``` + +
+ + + +
+ResNet (CVPR'2016) + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +
+ + + +
+PoseTrack18 (CVPR'2018) + +```bibtex +@inproceedings{andriluka2018posetrack, + title={Posetrack: A benchmark for human pose estimation and tracking}, + author={Andriluka, Mykhaylo and Iqbal, Umar and Insafutdinov, Eldar and Pishchulin, Leonid and Milan, Anton and Gall, Juergen and Schiele, Bernt}, + booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, + pages={5167--5176}, + year={2018} +} +``` + +
+ +Results on PoseTrack2018 val with ground-truth bounding boxes + +| Arch | Input Size | Head | Shou | Elb | Wri | Hip | Knee | Ankl | Total | ckpt | log | +| :--------------------------------------------------- | :--------: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :---: | :---------------------------------------------------: | :--------------------------------------------------: | +| [pose_resnet_50](/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_res50_8xb64-20e_posetrack18-256x192.py) | 256x192 | 86.5 | 87.7 | 82.5 | 75.8 | 80.1 | 78.8 | 74.2 | 81.2 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res50_posetrack18_256x192-a62807c7_20201028.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res50_posetrack18_256x192_20201028.log.json) | + +The models are first pre-trained on COCO dataset, and then fine-tuned on PoseTrack18. diff --git a/configs/body_2d_keypoint/topdown_heatmap/posetrack18/resnet_posetrack18.yml b/configs/body_2d_keypoint/topdown_heatmap/posetrack18/resnet_posetrack18.yml index 478ffa247e..a15fa9fb0a 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/posetrack18/resnet_posetrack18.yml +++ b/configs/body_2d_keypoint/topdown_heatmap/posetrack18/resnet_posetrack18.yml @@ -1,22 +1,22 @@ -Models: -- Config: configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_res50_8xb64-20e_posetrack18-256x192.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: &id001 - - SimpleBaseline2D - - ResNet - Training Data: PoseTrack18 - Name: td-hm_res50_8xb64-20e_posetrack18-256x192 - Results: - - Dataset: PoseTrack18 - Metrics: - Ankl: 74.2 - Elb: 82.5 - Head: 86.5 - Hip: 80.1 - Knee: 78.8 - Shou: 87.7 - Total: 81.2 - Wri: 75.8 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res50_posetrack18_256x192-a62807c7_20201028.pth +Models: +- Config: configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_res50_8xb64-20e_posetrack18-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: &id001 + - SimpleBaseline2D + - ResNet + Training Data: PoseTrack18 + Name: td-hm_res50_8xb64-20e_posetrack18-256x192 + Results: + - Dataset: PoseTrack18 + Metrics: + Ankl: 74.2 + Elb: 82.5 + Head: 86.5 + Hip: 80.1 + Knee: 78.8 + Shou: 87.7 + Total: 81.2 + Wri: 75.8 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res50_posetrack18_256x192-a62807c7_20201028.pth diff --git a/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w32_8xb64-20e_posetrack18-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w32_8xb64-20e_posetrack18-256x192.py index fe8e385f1d..63e35a4171 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w32_8xb64-20e_posetrack18-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w32_8xb64-20e_posetrack18-256x192.py @@ -1,155 +1,155 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=20, val_interval=1) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=20, - milestones=[10, 15], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict( - checkpoint=dict( - save_best='posetrack18/Total AP', rule='greater', interval=1)) - -# load from the pretrained model -load_from = 'https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192-81c58e40_20220909.pth' # noqa: E501 - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(32, 64)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(32, 64, 128)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(32, 64, 128, 256))), - ), - head=dict( - type='HeatmapHead', - in_channels=32, - out_channels=17, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'PoseTrack18Dataset' -data_mode = 'topdown' -data_root = 'data/posetrack18/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/posetrack18_train.json', - data_prefix=dict(img=''), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/posetrack18_val.json', - # comment `bbox_file` and '`filter_cfg` if use gt bbox for evaluation - bbox_file='data/posetrack18/annotations/' - 'posetrack18_val_human_detections.json', - filter_cfg=dict(bbox_score_thr=0.4), - data_prefix=dict(img=''), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -val_evaluator = dict( - type='PoseTrack18Metric', - ann_file=data_root + 'annotations/posetrack18_val.json', -) -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=20, val_interval=1) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=20, + milestones=[10, 15], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict( + save_best='posetrack18/Total AP', rule='greater', interval=1)) + +# load from the pretrained model +load_from = 'https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192-81c58e40_20220909.pth' # noqa: E501 + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'PoseTrack18Dataset' +data_mode = 'topdown' +data_root = 'data/posetrack18/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/posetrack18_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/posetrack18_val.json', + # comment `bbox_file` and '`filter_cfg` if use gt bbox for evaluation + bbox_file='data/posetrack18/annotations/' + 'posetrack18_val_human_detections.json', + filter_cfg=dict(bbox_score_thr=0.4), + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='PoseTrack18Metric', + ann_file=data_root + 'annotations/posetrack18_val.json', +) +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w32_8xb64-20e_posetrack18-384x288.py b/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w32_8xb64-20e_posetrack18-384x288.py index 5132074410..04a4522f00 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w32_8xb64-20e_posetrack18-384x288.py +++ b/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w32_8xb64-20e_posetrack18-384x288.py @@ -1,155 +1,155 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=20, val_interval=1) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=20, - milestones=[10, 15], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict( - checkpoint=dict( - save_best='posetrack18/Total AP', rule='greater', interval=1)) - -# load from the pretrained model -load_from = 'https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-384x288-ca5956af_20220909.pth' # noqa: E501 - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) - -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(32, 64)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(32, 64, 128)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(32, 64, 128, 256))), - ), - head=dict( - type='HeatmapHead', - in_channels=32, - out_channels=17, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'PoseTrack18Dataset' -data_mode = 'topdown' -data_root = 'data/posetrack18/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/posetrack18_train.json', - data_prefix=dict(img=''), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/posetrack18_val.json', - # comment `bbox_file` and '`filter_cfg` if use gt bbox for evaluation - bbox_file='data/posetrack18/annotations/' - 'posetrack18_val_human_detections.json', - filter_cfg=dict(bbox_score_thr=0.4), - data_prefix=dict(img=''), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -val_evaluator = dict( - type='PoseTrack18Metric', - ann_file=data_root + 'annotations/posetrack18_val.json', -) -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=20, val_interval=1) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=20, + milestones=[10, 15], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict( + save_best='posetrack18/Total AP', rule='greater', interval=1)) + +# load from the pretrained model +load_from = 'https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-384x288-ca5956af_20220909.pth' # noqa: E501 + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'PoseTrack18Dataset' +data_mode = 'topdown' +data_root = 'data/posetrack18/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/posetrack18_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/posetrack18_val.json', + # comment `bbox_file` and '`filter_cfg` if use gt bbox for evaluation + bbox_file='data/posetrack18/annotations/' + 'posetrack18_val_human_detections.json', + filter_cfg=dict(bbox_score_thr=0.4), + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='PoseTrack18Metric', + ann_file=data_root + 'annotations/posetrack18_val.json', +) +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w48_8xb64-20e_posetrack18-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w48_8xb64-20e_posetrack18-256x192.py index cac23f14e4..90e81d0b30 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w48_8xb64-20e_posetrack18-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w48_8xb64-20e_posetrack18-256x192.py @@ -1,155 +1,155 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=20, val_interval=1) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=20, - milestones=[10, 15], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict( - checkpoint=dict( - save_best='posetrack18/Total AP', rule='greater', interval=1)) - -# load from the pretrained model -load_from = 'https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192-0e67c616_20220913.pth' # noqa: E501 - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(48, 96)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(48, 96, 192)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(48, 96, 192, 384))), - ), - head=dict( - type='HeatmapHead', - in_channels=48, - out_channels=17, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'PoseTrack18Dataset' -data_mode = 'topdown' -data_root = 'data/posetrack18/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/posetrack18_train.json', - data_prefix=dict(img=''), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/posetrack18_val.json', - # comment `bbox_file` and '`filter_cfg` if use gt bbox for evaluation - bbox_file='data/posetrack18/annotations/' - 'posetrack18_val_human_detections.json', - filter_cfg=dict(bbox_score_thr=0.4), - data_prefix=dict(img=''), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -val_evaluator = dict( - type='PoseTrack18Metric', - ann_file=data_root + 'annotations/posetrack18_val.json', -) -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=20, val_interval=1) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=20, + milestones=[10, 15], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict( + save_best='posetrack18/Total AP', rule='greater', interval=1)) + +# load from the pretrained model +load_from = 'https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192-0e67c616_20220913.pth' # noqa: E501 + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(48, 96)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(48, 96, 192)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(48, 96, 192, 384))), + ), + head=dict( + type='HeatmapHead', + in_channels=48, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'PoseTrack18Dataset' +data_mode = 'topdown' +data_root = 'data/posetrack18/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/posetrack18_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/posetrack18_val.json', + # comment `bbox_file` and '`filter_cfg` if use gt bbox for evaluation + bbox_file='data/posetrack18/annotations/' + 'posetrack18_val_human_detections.json', + filter_cfg=dict(bbox_score_thr=0.4), + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='PoseTrack18Metric', + ann_file=data_root + 'annotations/posetrack18_val.json', +) +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w48_8xb64-20e_posetrack18-384x288.py b/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w48_8xb64-20e_posetrack18-384x288.py index 7ee99469fe..32189ff213 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w48_8xb64-20e_posetrack18-384x288.py +++ b/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w48_8xb64-20e_posetrack18-384x288.py @@ -1,155 +1,155 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=20, val_interval=1) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=20, - milestones=[10, 15], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict( - checkpoint=dict( - save_best='posetrack18/Total AP', rule='greater', interval=1)) - -# load from the pretrained model -load_from = 'https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-384x288-c161b7de_20220915.pth' # noqa: E501 - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) - -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(48, 96)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(48, 96, 192)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(48, 96, 192, 384))), - ), - head=dict( - type='HeatmapHead', - in_channels=48, - out_channels=17, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'PoseTrack18Dataset' -data_mode = 'topdown' -data_root = 'data/posetrack18/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/posetrack18_train.json', - data_prefix=dict(img=''), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/posetrack18_val.json', - # comment `bbox_file` and '`filter_cfg` if use gt bbox for evaluation - bbox_file='data/posetrack18/annotations/' - 'posetrack18_val_human_detections.json', - filter_cfg=dict(bbox_score_thr=0.4), - data_prefix=dict(img=''), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -val_evaluator = dict( - type='PoseTrack18Metric', - ann_file=data_root + 'annotations/posetrack18_val.json', -) -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=20, val_interval=1) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=20, + milestones=[10, 15], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict( + save_best='posetrack18/Total AP', rule='greater', interval=1)) + +# load from the pretrained model +load_from = 'https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-384x288-c161b7de_20220915.pth' # noqa: E501 + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(48, 96)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(48, 96, 192)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(48, 96, 192, 384))), + ), + head=dict( + type='HeatmapHead', + in_channels=48, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'PoseTrack18Dataset' +data_mode = 'topdown' +data_root = 'data/posetrack18/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/posetrack18_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/posetrack18_val.json', + # comment `bbox_file` and '`filter_cfg` if use gt bbox for evaluation + bbox_file='data/posetrack18/annotations/' + 'posetrack18_val_human_detections.json', + filter_cfg=dict(bbox_score_thr=0.4), + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='PoseTrack18Metric', + ann_file=data_root + 'annotations/posetrack18_val.json', +) +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_res50_8xb64-20e_posetrack18-256x192.py b/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_res50_8xb64-20e_posetrack18-256x192.py index f8e529d120..22c7c11917 100644 --- a/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_res50_8xb64-20e_posetrack18-256x192.py +++ b/configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_res50_8xb64-20e_posetrack18-256x192.py @@ -1,126 +1,126 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=20, val_interval=1) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=20, - milestones=[10, 15], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict( - checkpoint=dict( - save_best='posetrack18/Total AP', rule='greater', interval=1)) - -# load from the pretrained model -load_from = 'https://download.openmmlab.com/mmpose/top_down/resnet/res50_coco_256x192-ec54d7f3_20200709.pth' # noqa: E501 - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=50, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'PoseTrack18Dataset' -data_mode = 'topdown' -data_root = 'data/posetrack18/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/posetrack18_train.json', - data_prefix=dict(img=''), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/posetrack18_val.json', - data_prefix=dict(img=''), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -val_evaluator = dict( - type='PoseTrack18Metric', - ann_file=data_root + 'annotations/posetrack18_val.json', -) -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=20, val_interval=1) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=20, + milestones=[10, 15], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict( + save_best='posetrack18/Total AP', rule='greater', interval=1)) + +# load from the pretrained model +load_from = 'https://download.openmmlab.com/mmpose/top_down/resnet/res50_coco_256x192-ec54d7f3_20200709.pth' # noqa: E501 + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'PoseTrack18Dataset' +data_mode = 'topdown' +data_root = 'data/posetrack18/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/posetrack18_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/posetrack18_val.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='PoseTrack18Metric', + ann_file=data_root + 'annotations/posetrack18_val.json', +) +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_regression/README.md b/configs/body_2d_keypoint/topdown_regression/README.md index adc278ce0e..ed247c3347 100644 --- a/configs/body_2d_keypoint/topdown_regression/README.md +++ b/configs/body_2d_keypoint/topdown_regression/README.md @@ -1,32 +1,32 @@ -# Top-down regression-based pose estimation - -Top-down methods divide the task into two stages: object detection, followed by single-object pose estimation given object bounding boxes. At the 2nd stage, regression based methods directly regress the keypoint coordinates given the features extracted from the bounding box area, following the paradigm introduced in [Deeppose: Human pose estimation via deep neural networks](http://openaccess.thecvf.com/content_cvpr_2014/html/Toshev_DeepPose_Human_Pose_2014_CVPR_paper.html). - -
- -
- -## Results and Models - -### COCO Dataset - -Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset - -| Model | Input Size | AP | AR | Details and Download | -| :--------------: | :--------: | :---: | :---: | :-------------------------------------------------------: | -| ResNet-152+RLE | 256x192 | 0.731 | 0.805 | [resnet_rle_coco.md](./coco/resnet_rle_coco.md) | -| ResNet-101+RLE | 256x192 | 0.722 | 0.768 | [resnet_rle_coco.md](./coco/resnet_rle_coco.md) | -| ResNet-50+RLE | 256x192 | 0.706 | 0.768 | [resnet_rle_coco.md](./coco/resnet_rle_coco.md) | -| MobileNet-v2+RLE | 256x192 | 0.593 | 0.644 | [mobilenetv2_rle_coco.md](./coco/mobilenetv2_rle_coco.md) | -| ResNet-152 | 256x192 | 0.584 | 0.688 | [resnet_coco.md](./coco/resnet_coco.md) | -| ResNet-101 | 256x192 | 0.562 | 0.670 | [resnet_coco.md](./coco/resnet_coco.md) | -| ResNet-50 | 256x192 | 0.528 | 0.639 | [resnet_coco.md](./coco/resnet_coco.md) | - -### MPII Dataset - -| Model | Input Size | PCKh@0.5 | PCKh@0.1 | Details and Download | -| :-----------: | :--------: | :------: | :------: | :---------------------------------------------: | -| ResNet-50+RLE | 256x256 | 0.861 | 0.277 | [resnet_rle_mpii.md](./mpii/resnet_rle_mpii.md) | -| ResNet-152 | 256x256 | 0.850 | 0.208 | [resnet_mpii.md](./mpii/resnet_mpii.md) | -| ResNet-101 | 256x256 | 0.841 | 0.200 | [resnet_mpii.md](./mpii/resnet_mpii.md) | -| ResNet-50 | 256x256 | 0.826 | 0.180 | [resnet_mpii.md](./mpii/resnet_mpii.md) | +# Top-down regression-based pose estimation + +Top-down methods divide the task into two stages: object detection, followed by single-object pose estimation given object bounding boxes. At the 2nd stage, regression based methods directly regress the keypoint coordinates given the features extracted from the bounding box area, following the paradigm introduced in [Deeppose: Human pose estimation via deep neural networks](http://openaccess.thecvf.com/content_cvpr_2014/html/Toshev_DeepPose_Human_Pose_2014_CVPR_paper.html). + +
+ +
+ +## Results and Models + +### COCO Dataset + +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Model | Input Size | AP | AR | Details and Download | +| :--------------: | :--------: | :---: | :---: | :-------------------------------------------------------: | +| ResNet-152+RLE | 256x192 | 0.731 | 0.805 | [resnet_rle_coco.md](./coco/resnet_rle_coco.md) | +| ResNet-101+RLE | 256x192 | 0.722 | 0.768 | [resnet_rle_coco.md](./coco/resnet_rle_coco.md) | +| ResNet-50+RLE | 256x192 | 0.706 | 0.768 | [resnet_rle_coco.md](./coco/resnet_rle_coco.md) | +| MobileNet-v2+RLE | 256x192 | 0.593 | 0.644 | [mobilenetv2_rle_coco.md](./coco/mobilenetv2_rle_coco.md) | +| ResNet-152 | 256x192 | 0.584 | 0.688 | [resnet_coco.md](./coco/resnet_coco.md) | +| ResNet-101 | 256x192 | 0.562 | 0.670 | [resnet_coco.md](./coco/resnet_coco.md) | +| ResNet-50 | 256x192 | 0.528 | 0.639 | [resnet_coco.md](./coco/resnet_coco.md) | + +### MPII Dataset + +| Model | Input Size | PCKh@0.5 | PCKh@0.1 | Details and Download | +| :-----------: | :--------: | :------: | :------: | :---------------------------------------------: | +| ResNet-50+RLE | 256x256 | 0.861 | 0.277 | [resnet_rle_mpii.md](./mpii/resnet_rle_mpii.md) | +| ResNet-152 | 256x256 | 0.850 | 0.208 | [resnet_mpii.md](./mpii/resnet_mpii.md) | +| ResNet-101 | 256x256 | 0.841 | 0.200 | [resnet_mpii.md](./mpii/resnet_mpii.md) | +| ResNet-50 | 256x256 | 0.826 | 0.180 | [resnet_mpii.md](./mpii/resnet_mpii.md) | diff --git a/configs/body_2d_keypoint/topdown_regression/coco/mobilenetv2_rle_coco.md b/configs/body_2d_keypoint/topdown_regression/coco/mobilenetv2_rle_coco.md index eddf5a79d3..825c40c37f 100644 --- a/configs/body_2d_keypoint/topdown_regression/coco/mobilenetv2_rle_coco.md +++ b/configs/body_2d_keypoint/topdown_regression/coco/mobilenetv2_rle_coco.md @@ -1,74 +1,74 @@ - - -
-DeepPose (CVPR'2014) - -```bibtex -@inproceedings{toshev2014deeppose, - title={Deeppose: Human pose estimation via deep neural networks}, - author={Toshev, Alexander and Szegedy, Christian}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={1653--1660}, - year={2014} -} -``` - -
- - - -
-RLE (ICCV'2021) - -```bibtex -@inproceedings{li2021human, - title={Human pose regression with residual log-likelihood estimation}, - author={Li, Jiefeng and Bian, Siyuan and Zeng, Ailing and Wang, Can and Pang, Bo and Liu, Wentao and Lu, Cewu}, - booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision}, - pages={11025--11034}, - year={2021} -} -``` - -
- - - -
-MobilenetV2 (CVPR'2018) - -```bibtex -@inproceedings{sandler2018mobilenetv2, - title={Mobilenetv2: Inverted residuals and linear bottlenecks}, - author={Sandler, Mark and Howard, Andrew and Zhu, Menglong and Zhmoginov, Andrey and Chen, Liang-Chieh}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={4510--4520}, - year={2018} -} -``` - -
- - - -
-COCO (ECCV'2014) - -```bibtex -@inproceedings{lin2014microsoft, - title={Microsoft coco: Common objects in context}, - author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, - booktitle={European conference on computer vision}, - pages={740--755}, - year={2014}, - organization={Springer} -} -``` - -
- -Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset - -| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | -| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | -| [deeppose_mobilenetv2_rle_pretrained](/configs/body_2d_keypoint/topdown_regression/coco/td-reg_mobilenetv2_rle-pretrained-8xb64-210e_coco-256x192.py) | 256x192 | 0.593 | 0.836 | 0.660 | 0.644 | 0.877 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_regression/coco/td-reg_mobilenetv2_rle-pretrained-8xb64-210e_coco-256x192-39b73bd5_20220922.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_regression/coco/td-reg_mobilenetv2_rle-pretrained-8xb64-210e_coco-256x192-39b73bd5_20220922.log.json) | + + +
+DeepPose (CVPR'2014) + +```bibtex +@inproceedings{toshev2014deeppose, + title={Deeppose: Human pose estimation via deep neural networks}, + author={Toshev, Alexander and Szegedy, Christian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={1653--1660}, + year={2014} +} +``` + +
+ + + +
+RLE (ICCV'2021) + +```bibtex +@inproceedings{li2021human, + title={Human pose regression with residual log-likelihood estimation}, + author={Li, Jiefeng and Bian, Siyuan and Zeng, Ailing and Wang, Can and Pang, Bo and Liu, Wentao and Lu, Cewu}, + booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision}, + pages={11025--11034}, + year={2021} +} +``` + +
+ + + +
+MobilenetV2 (CVPR'2018) + +```bibtex +@inproceedings{sandler2018mobilenetv2, + title={Mobilenetv2: Inverted residuals and linear bottlenecks}, + author={Sandler, Mark and Howard, Andrew and Zhu, Menglong and Zhmoginov, Andrey and Chen, Liang-Chieh}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={4510--4520}, + year={2018} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [deeppose_mobilenetv2_rle_pretrained](/configs/body_2d_keypoint/topdown_regression/coco/td-reg_mobilenetv2_rle-pretrained-8xb64-210e_coco-256x192.py) | 256x192 | 0.593 | 0.836 | 0.660 | 0.644 | 0.877 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_regression/coco/td-reg_mobilenetv2_rle-pretrained-8xb64-210e_coco-256x192-39b73bd5_20220922.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_regression/coco/td-reg_mobilenetv2_rle-pretrained-8xb64-210e_coco-256x192-39b73bd5_20220922.log.json) | diff --git a/configs/body_2d_keypoint/topdown_regression/coco/mobilenetv2_rle_coco.yml b/configs/body_2d_keypoint/topdown_regression/coco/mobilenetv2_rle_coco.yml index c0f470432b..1dda49e220 100644 --- a/configs/body_2d_keypoint/topdown_regression/coco/mobilenetv2_rle_coco.yml +++ b/configs/body_2d_keypoint/topdown_regression/coco/mobilenetv2_rle_coco.yml @@ -1,20 +1,20 @@ -Models: -- Config: configs/body_2d_keypoint/topdown_regression/coco/td-reg_mobilenetv2_rle-pretrained-8xb64-210e_coco-256x192.py - In Collection: RLE - Metadata: - Architecture: &id001 - - DeepPose - - RLE - - MobileNet - Training Data: COCO - Name: td-reg_mobilenetv2_rle-pretrained-8xb64-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.593 - AP@0.5: 0.836 - AP@0.75: 0.66 - AR: 0.644 - AR@0.5: 0.877 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_regression/coco/td-reg_mobilenetv2_rle-pretrained-8xb64-210e_coco-256x192-39b73bd5_20220922.pth +Models: +- Config: configs/body_2d_keypoint/topdown_regression/coco/td-reg_mobilenetv2_rle-pretrained-8xb64-210e_coco-256x192.py + In Collection: RLE + Metadata: + Architecture: &id001 + - DeepPose + - RLE + - MobileNet + Training Data: COCO + Name: td-reg_mobilenetv2_rle-pretrained-8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.593 + AP@0.5: 0.836 + AP@0.75: 0.66 + AR: 0.644 + AR@0.5: 0.877 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_regression/coco/td-reg_mobilenetv2_rle-pretrained-8xb64-210e_coco-256x192-39b73bd5_20220922.pth diff --git a/configs/body_2d_keypoint/topdown_regression/coco/resnet_coco.md b/configs/body_2d_keypoint/topdown_regression/coco/resnet_coco.md index 77ed459aed..fd9a8c8941 100644 --- a/configs/body_2d_keypoint/topdown_regression/coco/resnet_coco.md +++ b/configs/body_2d_keypoint/topdown_regression/coco/resnet_coco.md @@ -1,59 +1,59 @@ - - -
-DeepPose (CVPR'2014) - -```bibtex -@inproceedings{toshev2014deeppose, - title={Deeppose: Human pose estimation via deep neural networks}, - author={Toshev, Alexander and Szegedy, Christian}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={1653--1660}, - year={2014} -} -``` - -
- - - -
-ResNet (CVPR'2016) - -```bibtex -@inproceedings{he2016deep, - title={Deep residual learning for image recognition}, - author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={770--778}, - year={2016} -} -``` - -
- - - -
-COCO (ECCV'2014) - -```bibtex -@inproceedings{lin2014microsoft, - title={Microsoft coco: Common objects in context}, - author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, - booktitle={European conference on computer vision}, - pages={740--755}, - year={2014}, - organization={Springer} -} -``` - -
- -Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset - -| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | -| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | -| [deeppose_resnet_50](/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_8xb64-210e_coco-256x192.py) | 256x192 | 0.541 | 0.824 | 0.601 | 0.649 | 0.893 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_regression/coco/td-reg_res50_8xb64-210e_coco-256x192-72ef04f3_20220913.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_regression/coco/td-reg_res50_8xb64-210e_coco-256x192-72ef04f3_20220913.log.json) | -| [deeppose_resnet_101](/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res101_8xb64-210e_coco-256x192.py) | 256x192 | 0.562 | 0.831 | 0.629 | 0.670 | 0.900 | [ckpt](https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res101_coco_256x192-2f247111_20210205.pth) | [log](https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res101_coco_256x192_20210205.log.json) | -| [deeppose_resnet_152](/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res152_8xb64-210e_coco-256x192.py) | 256x192 | 0.584 | 0.842 | 0.659 | 0.688 | 0.907 | [ckpt](https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res152_coco_256x192-7df89a88_20210205.pth) | [log](https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res152_coco_256x192_20210205.log.json) | + + +
+DeepPose (CVPR'2014) + +```bibtex +@inproceedings{toshev2014deeppose, + title={Deeppose: Human pose estimation via deep neural networks}, + author={Toshev, Alexander and Szegedy, Christian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={1653--1660}, + year={2014} +} +``` + +
+ + + +
+ResNet (CVPR'2016) + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [deeppose_resnet_50](/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_8xb64-210e_coco-256x192.py) | 256x192 | 0.541 | 0.824 | 0.601 | 0.649 | 0.893 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_regression/coco/td-reg_res50_8xb64-210e_coco-256x192-72ef04f3_20220913.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_regression/coco/td-reg_res50_8xb64-210e_coco-256x192-72ef04f3_20220913.log.json) | +| [deeppose_resnet_101](/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res101_8xb64-210e_coco-256x192.py) | 256x192 | 0.562 | 0.831 | 0.629 | 0.670 | 0.900 | [ckpt](https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res101_coco_256x192-2f247111_20210205.pth) | [log](https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res101_coco_256x192_20210205.log.json) | +| [deeppose_resnet_152](/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res152_8xb64-210e_coco-256x192.py) | 256x192 | 0.584 | 0.842 | 0.659 | 0.688 | 0.907 | [ckpt](https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res152_coco_256x192-7df89a88_20210205.pth) | [log](https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res152_coco_256x192_20210205.log.json) | diff --git a/configs/body_2d_keypoint/topdown_regression/coco/resnet_coco.yml b/configs/body_2d_keypoint/topdown_regression/coco/resnet_coco.yml index e66b3043c6..07fae5e343 100644 --- a/configs/body_2d_keypoint/topdown_regression/coco/resnet_coco.yml +++ b/configs/body_2d_keypoint/topdown_regression/coco/resnet_coco.yml @@ -1,57 +1,57 @@ -Collections: -- Name: DeepPose - Paper: - Title: "Deeppose: Human pose estimation via deep neural networks" - URL: http://openaccess.thecvf.com/content_cvpr_2014/html/Toshev_DeepPose_Human_Pose_2014_CVPR_paper.html - README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/algorithms/deeppose.md -Models: -- Config: configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_8xb64-210e_coco-256x192.py - In Collection: DeepPose - Metadata: - Architecture: &id001 - - DeepPose - - ResNet - Training Data: COCO - Name: td-reg_res50_8xb64-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.541 - AP@0.5: 0.824 - AP@0.75: 0.601 - AR: 0.649 - AR@0.5: 0.893 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_regression/coco/td-reg_res50_8xb64-210e_coco-256x192-72ef04f3_20220913.pth -- Config: configs/body_2d_keypoint/topdown_regression/coco/td-reg_res101_8xb64-210e_coco-256x192.py - In Collection: DeepPose - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-reg_res101_8xb64-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.562 - AP@0.5: 0.831 - AP@0.75: 0.629 - AR: 0.67 - AR@0.5: 0.9 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res101_coco_256x192-2f247111_20210205.pth -- Config: configs/body_2d_keypoint/topdown_regression/coco/td-reg_res152_8xb64-210e_coco-256x192.py - In Collection: DeepPose - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-reg_res152_8xb64-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.584 - AP@0.5: 0.842 - AP@0.75: 0.659 - AR: 0.688 - AR@0.5: 0.907 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res152_coco_256x192-7df89a88_20210205.pth +Collections: +- Name: DeepPose + Paper: + Title: "Deeppose: Human pose estimation via deep neural networks" + URL: http://openaccess.thecvf.com/content_cvpr_2014/html/Toshev_DeepPose_Human_Pose_2014_CVPR_paper.html + README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/algorithms/deeppose.md +Models: +- Config: configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_8xb64-210e_coco-256x192.py + In Collection: DeepPose + Metadata: + Architecture: &id001 + - DeepPose + - ResNet + Training Data: COCO + Name: td-reg_res50_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.541 + AP@0.5: 0.824 + AP@0.75: 0.601 + AR: 0.649 + AR@0.5: 0.893 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_regression/coco/td-reg_res50_8xb64-210e_coco-256x192-72ef04f3_20220913.pth +- Config: configs/body_2d_keypoint/topdown_regression/coco/td-reg_res101_8xb64-210e_coco-256x192.py + In Collection: DeepPose + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-reg_res101_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.562 + AP@0.5: 0.831 + AP@0.75: 0.629 + AR: 0.67 + AR@0.5: 0.9 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res101_coco_256x192-2f247111_20210205.pth +- Config: configs/body_2d_keypoint/topdown_regression/coco/td-reg_res152_8xb64-210e_coco-256x192.py + In Collection: DeepPose + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-reg_res152_8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.584 + AP@0.5: 0.842 + AP@0.75: 0.659 + AR: 0.688 + AR@0.5: 0.907 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res152_coco_256x192-7df89a88_20210205.pth diff --git a/configs/body_2d_keypoint/topdown_regression/coco/resnet_rle_coco.md b/configs/body_2d_keypoint/topdown_regression/coco/resnet_rle_coco.md index d3f4f5a288..365d244ff6 100644 --- a/configs/body_2d_keypoint/topdown_regression/coco/resnet_rle_coco.md +++ b/configs/body_2d_keypoint/topdown_regression/coco/resnet_rle_coco.md @@ -1,78 +1,78 @@ - - -
-DeepPose (CVPR'2014) - -```bibtex -@inproceedings{toshev2014deeppose, - title={Deeppose: Human pose estimation via deep neural networks}, - author={Toshev, Alexander and Szegedy, Christian}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={1653--1660}, - year={2014} -} -``` - -
- - - -
-RLE (ICCV'2021) - -```bibtex -@inproceedings{li2021human, - title={Human pose regression with residual log-likelihood estimation}, - author={Li, Jiefeng and Bian, Siyuan and Zeng, Ailing and Wang, Can and Pang, Bo and Liu, Wentao and Lu, Cewu}, - booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision}, - pages={11025--11034}, - year={2021} -} -``` - -
- - - -
-ResNet (CVPR'2016) - -```bibtex -@inproceedings{he2016deep, - title={Deep residual learning for image recognition}, - author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={770--778}, - year={2016} -} -``` - -
- - - -
-COCO (ECCV'2014) - -```bibtex -@inproceedings{lin2014microsoft, - title={Microsoft coco: Common objects in context}, - author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, - booktitle={European conference on computer vision}, - pages={740--755}, - year={2014}, - organization={Springer} -} -``` - -
- -Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset - -| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | -| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | -| [deeppose_resnet_50_rle](/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-8xb64-210e_coco-256x192.py) | 256x192 | 0.706 | 0.888 | 0.776 | 0.753 | 0.924 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-8xb64-210e_coco-256x192-d37efd64_20220913.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-8xb64-210e_coco-256x192-d37efd64_20220913.log.json) | -| [deeppose_resnet_50_rle_pretrained](/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-pretrained-8xb64-210e_coco-256x192.py) | 256x192 | 0.719 | 0.891 | 0.788 | 0.764 | 0.925 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-pretrained-8xb64-210e_coco-256x192-2cb494ee_20220913.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-pretrained-8xb64-210e_coco-256x192-2cb494ee_20220913.log.json) | -| [deeppose_resnet_101_rle](/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res101_rle-8xb64-210e_coco-256x192.py) | 256x192 | 0.722 | 0.894 | 0.794 | 0.768 | 0.930 | [ckpt](https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res101_coco_256x192_rle-16c3d461_20220615.pth) | [log](https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res101_coco_256x192_rle_20220615.log.json) | -| [deeppose_resnet_152_rle](/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res152_rle-8xb64-210e_coco-256x192.py) | 256x192 | 0.731 | 0.897 | 0.805 | 0.777 | 0.933 | [ckpt](https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res152_coco_256x192_rle-c05bdccf_20220615.pth) | [log](https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res152_coco_256x192_rle_20220615.log.json) | -| [deeppose_resnet_152_rle](/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res152_rle-8xb64-210e_coco-384x288.py) | 384x288 | 0.749 | 0.901 | 0.815 | 0.793 | 0.935 | [ckpt](https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res152_coco_384x288_rle-b77c4c37_20220624.pth) | [log](https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res152_coco_384x288_rle_20220624.log.json) | + + +
+DeepPose (CVPR'2014) + +```bibtex +@inproceedings{toshev2014deeppose, + title={Deeppose: Human pose estimation via deep neural networks}, + author={Toshev, Alexander and Szegedy, Christian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={1653--1660}, + year={2014} +} +``` + +
+ + + +
+RLE (ICCV'2021) + +```bibtex +@inproceedings{li2021human, + title={Human pose regression with residual log-likelihood estimation}, + author={Li, Jiefeng and Bian, Siyuan and Zeng, Ailing and Wang, Can and Pang, Bo and Liu, Wentao and Lu, Cewu}, + booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision}, + pages={11025--11034}, + year={2021} +} +``` + +
+ + + +
+ResNet (CVPR'2016) + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | ckpt | log | +| :-------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :-------------------------------------------: | :-------------------------------------------: | +| [deeppose_resnet_50_rle](/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-8xb64-210e_coco-256x192.py) | 256x192 | 0.706 | 0.888 | 0.776 | 0.753 | 0.924 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-8xb64-210e_coco-256x192-d37efd64_20220913.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-8xb64-210e_coco-256x192-d37efd64_20220913.log.json) | +| [deeppose_resnet_50_rle_pretrained](/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-pretrained-8xb64-210e_coco-256x192.py) | 256x192 | 0.719 | 0.891 | 0.788 | 0.764 | 0.925 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-pretrained-8xb64-210e_coco-256x192-2cb494ee_20220913.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-pretrained-8xb64-210e_coco-256x192-2cb494ee_20220913.log.json) | +| [deeppose_resnet_101_rle](/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res101_rle-8xb64-210e_coco-256x192.py) | 256x192 | 0.722 | 0.894 | 0.794 | 0.768 | 0.930 | [ckpt](https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res101_coco_256x192_rle-16c3d461_20220615.pth) | [log](https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res101_coco_256x192_rle_20220615.log.json) | +| [deeppose_resnet_152_rle](/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res152_rle-8xb64-210e_coco-256x192.py) | 256x192 | 0.731 | 0.897 | 0.805 | 0.777 | 0.933 | [ckpt](https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res152_coco_256x192_rle-c05bdccf_20220615.pth) | [log](https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res152_coco_256x192_rle_20220615.log.json) | +| [deeppose_resnet_152_rle](/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res152_rle-8xb64-210e_coco-384x288.py) | 384x288 | 0.749 | 0.901 | 0.815 | 0.793 | 0.935 | [ckpt](https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res152_coco_384x288_rle-b77c4c37_20220624.pth) | [log](https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res152_coco_384x288_rle_20220624.log.json) | diff --git a/configs/body_2d_keypoint/topdown_regression/coco/resnet_rle_coco.yml b/configs/body_2d_keypoint/topdown_regression/coco/resnet_rle_coco.yml index 97ae41b8f2..dd910e6a38 100644 --- a/configs/body_2d_keypoint/topdown_regression/coco/resnet_rle_coco.yml +++ b/configs/body_2d_keypoint/topdown_regression/coco/resnet_rle_coco.yml @@ -1,90 +1,90 @@ -Collections: -- Name: RLE - Paper: - Title: Human pose regression with residual log-likelihood estimation - URL: https://arxiv.org/abs/2107.11291 - README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/techniques/rle.md -Models: -- Config: configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-8xb64-210e_coco-256x192.py - In Collection: RLE - Metadata: - Architecture: &id001 - - DeepPose - - RLE - - ResNet - Training Data: COCO - Name: td-reg_res50_rle-8xb64-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.706 - AP@0.5: 0.888 - AP@0.75: 0.776 - AR: 0.753 - AR@0.5: 0.924 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-8xb64-210e_coco-256x192-d37efd64_20220913.pth -- Config: configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-pretrained-8xb64-210e_coco-256x192.py - In Collection: RLE - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-reg_res50_rle-pretrained-8xb64-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.719 - AP@0.5: 0.891 - AP@0.75: 0.788 - AR: 0.764 - AR@0.5: 0.925 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-pretrained-8xb64-210e_coco-256x192-2cb494ee_20220913.pth -- Config: configs/body_2d_keypoint/topdown_regression/coco/td-reg_res101_rle-8xb64-210e_coco-256x192.py - In Collection: RLE - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-reg_res101_rle-8xb64-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.722 - AP@0.5: 0.894 - AP@0.75: 0.794 - AR: 0.768 - AR@0.5: 0.93 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res101_coco_256x192_rle-16c3d461_20220615.pth -- Config: configs/body_2d_keypoint/topdown_regression/coco/td-reg_res152_rle-8xb64-210e_coco-256x192.py - In Collection: RLE - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-reg_res152_rle-8xb64-210e_coco-256x192 - Results: - - Dataset: COCO - Metrics: - AP: 0.731 - AP@0.5: 0.897 - AP@0.75: 0.805 - AR: 0.777 - AR@0.5: 0.933 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res152_coco_256x192_rle-c05bdccf_20220615.pth -- Config: configs/body_2d_keypoint/topdown_regression/coco/td-reg_res152_rle-8xb64-210e_coco-384x288.py - In Collection: RLE - Metadata: - Architecture: *id001 - Training Data: COCO - Name: td-reg_res152_rle-8xb64-210e_coco-384x288 - Results: - - Dataset: COCO - Metrics: - AP: 0.749 - AP@0.5: 0.901 - AP@0.75: 0.815 - AR: 0.793 - AR@0.5: 0.935 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res152_coco_384x288_rle-b77c4c37_20220624.pth +Collections: +- Name: RLE + Paper: + Title: Human pose regression with residual log-likelihood estimation + URL: https://arxiv.org/abs/2107.11291 + README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/techniques/rle.md +Models: +- Config: configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-8xb64-210e_coco-256x192.py + In Collection: RLE + Metadata: + Architecture: &id001 + - DeepPose + - RLE + - ResNet + Training Data: COCO + Name: td-reg_res50_rle-8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.706 + AP@0.5: 0.888 + AP@0.75: 0.776 + AR: 0.753 + AR@0.5: 0.924 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-8xb64-210e_coco-256x192-d37efd64_20220913.pth +- Config: configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-pretrained-8xb64-210e_coco-256x192.py + In Collection: RLE + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-reg_res50_rle-pretrained-8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.719 + AP@0.5: 0.891 + AP@0.75: 0.788 + AR: 0.764 + AR@0.5: 0.925 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-pretrained-8xb64-210e_coco-256x192-2cb494ee_20220913.pth +- Config: configs/body_2d_keypoint/topdown_regression/coco/td-reg_res101_rle-8xb64-210e_coco-256x192.py + In Collection: RLE + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-reg_res101_rle-8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.722 + AP@0.5: 0.894 + AP@0.75: 0.794 + AR: 0.768 + AR@0.5: 0.93 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res101_coco_256x192_rle-16c3d461_20220615.pth +- Config: configs/body_2d_keypoint/topdown_regression/coco/td-reg_res152_rle-8xb64-210e_coco-256x192.py + In Collection: RLE + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-reg_res152_rle-8xb64-210e_coco-256x192 + Results: + - Dataset: COCO + Metrics: + AP: 0.731 + AP@0.5: 0.897 + AP@0.75: 0.805 + AR: 0.777 + AR@0.5: 0.933 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res152_coco_256x192_rle-c05bdccf_20220615.pth +- Config: configs/body_2d_keypoint/topdown_regression/coco/td-reg_res152_rle-8xb64-210e_coco-384x288.py + In Collection: RLE + Metadata: + Architecture: *id001 + Training Data: COCO + Name: td-reg_res152_rle-8xb64-210e_coco-384x288 + Results: + - Dataset: COCO + Metrics: + AP: 0.749 + AP@0.5: 0.901 + AP@0.75: 0.815 + AR: 0.793 + AR@0.5: 0.935 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res152_coco_384x288_rle-b77c4c37_20220624.pth diff --git a/configs/body_2d_keypoint/topdown_regression/coco/td-reg_mobilenetv2_rle-pretrained-8xb64-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_regression/coco/td-reg_mobilenetv2_rle-pretrained-8xb64-210e_coco-256x192.py index 97f5d926c6..c1a2232dac 100644 --- a/configs/body_2d_keypoint/topdown_regression/coco/td-reg_mobilenetv2_rle-pretrained-8xb64-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_regression/coco/td-reg_mobilenetv2_rle-pretrained-8xb64-210e_coco-256x192.py @@ -1,126 +1,126 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=1e-3, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=train_cfg['max_epochs'], - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# codec settings -codec = dict(type='RegressionLabel', input_size=(192, 256)) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='MobileNetV2', - widen_factor=1., - out_indices=(7, ), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/top_down/' - 'mobilenetv2/mobilenetv2_coco_256x192-d1e58e7b_20200727.pth')), - neck=dict(type='GlobalAveragePooling'), - head=dict( - type='RLEHead', - in_channels=1280, - num_joints=17, - loss=dict(type='RLELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - shift_coords=True, - ), -) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file=f'{data_root}person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=f'{data_root}annotations/person_keypoints_val2017.json', - score_mode='bbox_rle') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=1e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=train_cfg['max_epochs'], + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict(type='RegressionLabel', input_size=(192, 256)) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='MobileNetV2', + widen_factor=1., + out_indices=(7, ), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/top_down/' + 'mobilenetv2/mobilenetv2_coco_256x192-d1e58e7b_20200727.pth')), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='RLEHead', + in_channels=1280, + num_joints=17, + loss=dict(type='RLELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + shift_coords=True, + ), +) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file=f'{data_root}person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=f'{data_root}annotations/person_keypoints_val2017.json', + score_mode='bbox_rle') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res101_8xb64-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res101_8xb64-210e_coco-256x192.py index 94f35d0fc3..e55f676dc0 100644 --- a/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res101_8xb64-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res101_8xb64-210e_coco-256x192.py @@ -1,120 +1,120 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=train_cfg['max_epochs'], - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# codec settings -codec = dict(type='RegressionLabel', input_size=(192, 256)) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=101, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'), - ), - neck=dict(type='GlobalAveragePooling'), - head=dict( - type='RegressionHead', - in_channels=2048, - num_joints=17, - loss=dict(type='SmoothL1Loss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - shift_coords=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file=f'{data_root}person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=f'{data_root}annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=train_cfg['max_epochs'], + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict(type='RegressionLabel', input_size=(192, 256)) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=101, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'), + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='RegressionHead', + in_channels=2048, + num_joints=17, + loss=dict(type='SmoothL1Loss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + shift_coords=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file=f'{data_root}person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=f'{data_root}annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res101_rle-8xb64-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res101_rle-8xb64-210e_coco-256x192.py index 21b4a3cdcb..b18ea037cb 100644 --- a/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res101_rle-8xb64-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res101_rle-8xb64-210e_coco-256x192.py @@ -1,121 +1,121 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=train_cfg['max_epochs'], - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# codec settings -codec = dict(type='RegressionLabel', input_size=(192, 256)) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=101, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'), - ), - neck=dict(type='GlobalAveragePooling'), - head=dict( - type='RLEHead', - in_channels=2048, - num_joints=17, - loss=dict(type='RLELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - shift_coords=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file=f'{data_root}person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=f'{data_root}annotations/person_keypoints_val2017.json', - score_mode='bbox_rle') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=train_cfg['max_epochs'], + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict(type='RegressionLabel', input_size=(192, 256)) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=101, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'), + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='RLEHead', + in_channels=2048, + num_joints=17, + loss=dict(type='RLELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + shift_coords=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file=f'{data_root}person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=f'{data_root}annotations/person_keypoints_val2017.json', + score_mode='bbox_rle') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res152_8xb64-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res152_8xb64-210e_coco-256x192.py index fa56fba498..64c621f6e5 100644 --- a/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res152_8xb64-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res152_8xb64-210e_coco-256x192.py @@ -1,120 +1,120 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=train_cfg['max_epochs'], - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# codec settings -codec = dict(type='RegressionLabel', input_size=(192, 256)) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=152, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet152'), - ), - neck=dict(type='GlobalAveragePooling'), - head=dict( - type='RegressionHead', - in_channels=2048, - num_joints=17, - loss=dict(type='SmoothL1Loss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - shift_coords=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file=f'{data_root}person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=f'{data_root}annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=train_cfg['max_epochs'], + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict(type='RegressionLabel', input_size=(192, 256)) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=152, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet152'), + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='RegressionHead', + in_channels=2048, + num_joints=17, + loss=dict(type='SmoothL1Loss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + shift_coords=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file=f'{data_root}person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=f'{data_root}annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res152_rle-8xb64-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res152_rle-8xb64-210e_coco-256x192.py index e2a832b652..fa35cfa5c5 100644 --- a/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res152_rle-8xb64-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res152_rle-8xb64-210e_coco-256x192.py @@ -1,121 +1,121 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=train_cfg['max_epochs'], - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# codec settings -codec = dict(type='RegressionLabel', input_size=(192, 256)) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=152, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet152'), - ), - neck=dict(type='GlobalAveragePooling'), - head=dict( - type='RLEHead', - in_channels=2048, - num_joints=17, - loss=dict(type='RLELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - shift_coords=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file=f'{data_root}person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=f'{data_root}annotations/person_keypoints_val2017.json', - score_mode='bbox_rle') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=train_cfg['max_epochs'], + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict(type='RegressionLabel', input_size=(192, 256)) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=152, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet152'), + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='RLEHead', + in_channels=2048, + num_joints=17, + loss=dict(type='RLELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + shift_coords=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file=f'{data_root}person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=f'{data_root}annotations/person_keypoints_val2017.json', + score_mode='bbox_rle') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res152_rle-8xb64-210e_coco-384x288.py b/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res152_rle-8xb64-210e_coco-384x288.py index 6d319e927e..06fa1c7944 100644 --- a/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res152_rle-8xb64-210e_coco-384x288.py +++ b/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res152_rle-8xb64-210e_coco-384x288.py @@ -1,121 +1,121 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=train_cfg['max_epochs'], - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# codec settings -codec = dict(type='RegressionLabel', input_size=(288, 384)) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=152, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet152'), - ), - neck=dict(type='GlobalAveragePooling'), - head=dict( - type='RLEHead', - in_channels=2048, - num_joints=17, - loss=dict(type='RLELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - shift_coords=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file=f'{data_root}person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=f'{data_root}annotations/person_keypoints_val2017.json', - score_mode='bbox_rle') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=train_cfg['max_epochs'], + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict(type='RegressionLabel', input_size=(288, 384)) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=152, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet152'), + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='RLEHead', + in_channels=2048, + num_joints=17, + loss=dict(type='RLELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + shift_coords=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file=f'{data_root}person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=f'{data_root}annotations/person_keypoints_val2017.json', + score_mode='bbox_rle') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_8xb64-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_8xb64-210e_coco-256x192.py index fa7e487acf..09016f661d 100644 --- a/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_8xb64-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_8xb64-210e_coco-256x192.py @@ -1,120 +1,120 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=train_cfg['max_epochs'], - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# codec settings -codec = dict(type='RegressionLabel', input_size=(192, 256)) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=50, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), - ), - neck=dict(type='GlobalAveragePooling'), - head=dict( - type='RegressionHead', - in_channels=2048, - num_joints=17, - loss=dict(type='SmoothL1Loss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - shift_coords=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file=f'{data_root}person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=f'{data_root}annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=train_cfg['max_epochs'], + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict(type='RegressionLabel', input_size=(192, 256)) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='RegressionHead', + in_channels=2048, + num_joints=17, + loss=dict(type='SmoothL1Loss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + shift_coords=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file=f'{data_root}person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=f'{data_root}annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-8xb64-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-8xb64-210e_coco-256x192.py index db530f6ec4..ceccb7af97 100644 --- a/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-8xb64-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-8xb64-210e_coco-256x192.py @@ -1,121 +1,121 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=1e-3, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=train_cfg['max_epochs'], - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# codec settings -codec = dict(type='RegressionLabel', input_size=(192, 256)) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=50, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), - ), - neck=dict(type='GlobalAveragePooling'), - head=dict( - type='RLEHead', - in_channels=2048, - num_joints=17, - loss=dict(type='RLELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - shift_coords=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file=f'{data_root}person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=f'{data_root}annotations/person_keypoints_val2017.json', - score_mode='bbox_rle') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=1e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=train_cfg['max_epochs'], + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict(type='RegressionLabel', input_size=(192, 256)) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='RLEHead', + in_channels=2048, + num_joints=17, + loss=dict(type='RLELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + shift_coords=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file=f'{data_root}person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=f'{data_root}annotations/person_keypoints_val2017.json', + score_mode='bbox_rle') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-pretrained-8xb64-210e_coco-256x192.py b/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-pretrained-8xb64-210e_coco-256x192.py index 6b74aba7f3..a1d485c7f5 100644 --- a/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-pretrained-8xb64-210e_coco-256x192.py +++ b/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-pretrained-8xb64-210e_coco-256x192.py @@ -1,125 +1,125 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=1e-3, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=train_cfg['max_epochs'], - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# codec settings -codec = dict(type='RegressionLabel', input_size=(192, 256)) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=50, - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/td-hm_res50_8xb64-210e_coco-256x192.pth'), - ), - neck=dict(type='GlobalAveragePooling'), - head=dict( - type='RLEHead', - in_channels=2048, - num_joints=17, - loss=dict(type='RLELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - shift_coords=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -test_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file=f'{data_root}person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=test_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=f'{data_root}annotations/person_keypoints_val2017.json', - score_mode='bbox_rle') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=1e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=train_cfg['max_epochs'], + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict(type='RegressionLabel', input_size=(192, 256)) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/td-hm_res50_8xb64-210e_coco-256x192.pth'), + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='RLEHead', + in_channels=2048, + num_joints=17, + loss=dict(type='RLELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + shift_coords=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +test_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file=f'{data_root}person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=test_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=f'{data_root}annotations/person_keypoints_val2017.json', + score_mode='bbox_rle') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_regression/mpii/resnet_mpii.md b/configs/body_2d_keypoint/topdown_regression/mpii/resnet_mpii.md index 150fd48020..af6df37061 100644 --- a/configs/body_2d_keypoint/topdown_regression/mpii/resnet_mpii.md +++ b/configs/body_2d_keypoint/topdown_regression/mpii/resnet_mpii.md @@ -1,58 +1,58 @@ - - -
-DeepPose (CVPR'2014) - -```bibtex -@inproceedings{toshev2014deeppose, - title={Deeppose: Human pose estimation via deep neural networks}, - author={Toshev, Alexander and Szegedy, Christian}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={1653--1660}, - year={2014} -} -``` - -
- - - -
-ResNet (CVPR'2016) - -```bibtex -@inproceedings{he2016deep, - title={Deep residual learning for image recognition}, - author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={770--778}, - year={2016} -} -``` - -
- - - -
-MPII (CVPR'2014) - -```bibtex -@inproceedings{andriluka14cvpr, - author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, - title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, - booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, - year = {2014}, - month = {June} -} -``` - -
- -Results on MPII val set - -| Arch | Input Size | Mean | Mean@0.1 | ckpt | log | -| :---------------------------------------------------------- | :--------: | :---: | :------: | :---------------------------------------------------------: | :---------------------------------------------------------: | -| [deeppose_resnet_50](/configs/body_2d_keypoint/topdown_regression/mpii/td-reg_res50_8xb64-210e_mpii-256x256.py) | 256x256 | 0.826 | 0.180 | [ckpt](https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res50_mpii_256x256-c63cd0b6_20210203.pth) | [log](https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res50_mpii_256x256_20210203.log.json) | -| [deeppose_resnet_101](/configs/body_2d_keypoint/topdown_regression/mpii/td-reg_res101_8xb64-210e_mpii-256x256.py) | 256x256 | 0.841 | 0.200 | [ckpt](https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res101_mpii_256x256-87516a90_20210205.pth) | [log](https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res101_mpii_256x256_20210205.log.json) | -| [deeppose_resnet_152](/configs/body_2d_keypoint/topdown_regression/mpii/td-reg_res152_8xb64-210e_mpii-256x256.py) | 256x256 | 0.850 | 0.208 | [ckpt](https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res152_mpii_256x256-15f5e6f9_20210205.pth) | [log](https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res152_mpii_256x256_20210205.log.json) | + + +
+DeepPose (CVPR'2014) + +```bibtex +@inproceedings{toshev2014deeppose, + title={Deeppose: Human pose estimation via deep neural networks}, + author={Toshev, Alexander and Szegedy, Christian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={1653--1660}, + year={2014} +} +``` + +
+ + + +
+ResNet (CVPR'2016) + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +
+ + + +
+MPII (CVPR'2014) + +```bibtex +@inproceedings{andriluka14cvpr, + author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, + title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, + booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + year = {2014}, + month = {June} +} +``` + +
+ +Results on MPII val set + +| Arch | Input Size | Mean | Mean@0.1 | ckpt | log | +| :---------------------------------------------------------- | :--------: | :---: | :------: | :---------------------------------------------------------: | :---------------------------------------------------------: | +| [deeppose_resnet_50](/configs/body_2d_keypoint/topdown_regression/mpii/td-reg_res50_8xb64-210e_mpii-256x256.py) | 256x256 | 0.826 | 0.180 | [ckpt](https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res50_mpii_256x256-c63cd0b6_20210203.pth) | [log](https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res50_mpii_256x256_20210203.log.json) | +| [deeppose_resnet_101](/configs/body_2d_keypoint/topdown_regression/mpii/td-reg_res101_8xb64-210e_mpii-256x256.py) | 256x256 | 0.841 | 0.200 | [ckpt](https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res101_mpii_256x256-87516a90_20210205.pth) | [log](https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res101_mpii_256x256_20210205.log.json) | +| [deeppose_resnet_152](/configs/body_2d_keypoint/topdown_regression/mpii/td-reg_res152_8xb64-210e_mpii-256x256.py) | 256x256 | 0.850 | 0.208 | [ckpt](https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res152_mpii_256x256-15f5e6f9_20210205.pth) | [log](https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res152_mpii_256x256_20210205.log.json) | diff --git a/configs/body_2d_keypoint/topdown_regression/mpii/resnet_mpii.yml b/configs/body_2d_keypoint/topdown_regression/mpii/resnet_mpii.yml index a744083e97..95484bc001 100644 --- a/configs/body_2d_keypoint/topdown_regression/mpii/resnet_mpii.yml +++ b/configs/body_2d_keypoint/topdown_regression/mpii/resnet_mpii.yml @@ -1,42 +1,42 @@ -Models: -- Config: configs/body_2d_keypoint/topdown_regression/mpii/td-reg_res50_8xb64-210e_mpii-256x256.py - In Collection: DeepPose - Metadata: - Architecture: &id001 - - DeepPose - - ResNet - Training Data: MPII - Name: td-reg_res50_8xb64-210e_mpii-256x256 - Results: - - Dataset: MPII - Metrics: - Mean: 0.826 - Mean@0.1: 0.18 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res50_mpii_256x256-c63cd0b6_20210203.pth -- Config: configs/body_2d_keypoint/topdown_regression/mpii/td-reg_res101_8xb64-210e_mpii-256x256.py - In Collection: DeepPose - Metadata: - Architecture: *id001 - Training Data: MPII - Name: td-reg_res101_8xb64-210e_mpii-256x256 - Results: - - Dataset: MPII - Metrics: - Mean: 0.841 - Mean@0.1: 0.2 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res101_mpii_256x256-87516a90_20210205.pth -- Config: configs/body_2d_keypoint/topdown_regression/mpii/td-reg_res152_8xb64-210e_mpii-256x256.py - In Collection: DeepPose - Metadata: - Architecture: *id001 - Training Data: MPII - Name: td-reg_res152_8xb64-210e_mpii-256x256 - Results: - - Dataset: MPII - Metrics: - Mean: 0.85 - Mean@0.1: 0.208 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res152_mpii_256x256-15f5e6f9_20210205.pth +Models: +- Config: configs/body_2d_keypoint/topdown_regression/mpii/td-reg_res50_8xb64-210e_mpii-256x256.py + In Collection: DeepPose + Metadata: + Architecture: &id001 + - DeepPose + - ResNet + Training Data: MPII + Name: td-reg_res50_8xb64-210e_mpii-256x256 + Results: + - Dataset: MPII + Metrics: + Mean: 0.826 + Mean@0.1: 0.18 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res50_mpii_256x256-c63cd0b6_20210203.pth +- Config: configs/body_2d_keypoint/topdown_regression/mpii/td-reg_res101_8xb64-210e_mpii-256x256.py + In Collection: DeepPose + Metadata: + Architecture: *id001 + Training Data: MPII + Name: td-reg_res101_8xb64-210e_mpii-256x256 + Results: + - Dataset: MPII + Metrics: + Mean: 0.841 + Mean@0.1: 0.2 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res101_mpii_256x256-87516a90_20210205.pth +- Config: configs/body_2d_keypoint/topdown_regression/mpii/td-reg_res152_8xb64-210e_mpii-256x256.py + In Collection: DeepPose + Metadata: + Architecture: *id001 + Training Data: MPII + Name: td-reg_res152_8xb64-210e_mpii-256x256 + Results: + - Dataset: MPII + Metrics: + Mean: 0.85 + Mean@0.1: 0.208 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res152_mpii_256x256-15f5e6f9_20210205.pth diff --git a/configs/body_2d_keypoint/topdown_regression/mpii/resnet_rle_mpii.md b/configs/body_2d_keypoint/topdown_regression/mpii/resnet_rle_mpii.md index bf3a67a49a..9e88cfae61 100644 --- a/configs/body_2d_keypoint/topdown_regression/mpii/resnet_rle_mpii.md +++ b/configs/body_2d_keypoint/topdown_regression/mpii/resnet_rle_mpii.md @@ -1,73 +1,73 @@ - - -
-DeepPose (CVPR'2014) - -```bibtex -@inproceedings{toshev2014deeppose, - title={Deeppose: Human pose estimation via deep neural networks}, - author={Toshev, Alexander and Szegedy, Christian}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={1653--1660}, - year={2014} -} -``` - -
- - - -
-RLE (ICCV'2021) - -```bibtex -@inproceedings{li2021human, - title={Human pose regression with residual log-likelihood estimation}, - author={Li, Jiefeng and Bian, Siyuan and Zeng, Ailing and Wang, Can and Pang, Bo and Liu, Wentao and Lu, Cewu}, - booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision}, - pages={11025--11034}, - year={2021} -} -``` - -
- - - -
-ResNet (CVPR'2016) - -```bibtex -@inproceedings{he2016deep, - title={Deep residual learning for image recognition}, - author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={770--778}, - year={2016} -} -``` - -
- - - -
-MPII (CVPR'2014) - -```bibtex -@inproceedings{andriluka14cvpr, - author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, - title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, - booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, - year = {2014}, - month = {June} -} -``` - -
- -Results on MPII val set - -| Arch | Input Size | Mean | Mean@0.1 | ckpt | log | -| :---------------------------------------------------------- | :--------: | :---: | :------: | :---------------------------------------------------------: | :---------------------------------------------------------: | -| [deeppose_resnet_50_rle](/configs/body_2d_keypoint/topdown_regression/mpii/td-reg_res50_rle-8xb64-210e_mpii-256x256.py) | 256x256 | 0.861 | 0.277 | [ckpt](https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res50_mpii_256x256_rle-5f92a619_20220504.pth) | [log](https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res50_mpii_256x256_rle_20220504.log.json) | + + +
+DeepPose (CVPR'2014) + +```bibtex +@inproceedings{toshev2014deeppose, + title={Deeppose: Human pose estimation via deep neural networks}, + author={Toshev, Alexander and Szegedy, Christian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={1653--1660}, + year={2014} +} +``` + +
+ + + +
+RLE (ICCV'2021) + +```bibtex +@inproceedings{li2021human, + title={Human pose regression with residual log-likelihood estimation}, + author={Li, Jiefeng and Bian, Siyuan and Zeng, Ailing and Wang, Can and Pang, Bo and Liu, Wentao and Lu, Cewu}, + booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision}, + pages={11025--11034}, + year={2021} +} +``` + +
+ + + +
+ResNet (CVPR'2016) + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +
+ + + +
+MPII (CVPR'2014) + +```bibtex +@inproceedings{andriluka14cvpr, + author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, + title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, + booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + year = {2014}, + month = {June} +} +``` + +
+ +Results on MPII val set + +| Arch | Input Size | Mean | Mean@0.1 | ckpt | log | +| :---------------------------------------------------------- | :--------: | :---: | :------: | :---------------------------------------------------------: | :---------------------------------------------------------: | +| [deeppose_resnet_50_rle](/configs/body_2d_keypoint/topdown_regression/mpii/td-reg_res50_rle-8xb64-210e_mpii-256x256.py) | 256x256 | 0.861 | 0.277 | [ckpt](https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res50_mpii_256x256_rle-5f92a619_20220504.pth) | [log](https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res50_mpii_256x256_rle_20220504.log.json) | diff --git a/configs/body_2d_keypoint/topdown_regression/mpii/resnet_rle_mpii.yml b/configs/body_2d_keypoint/topdown_regression/mpii/resnet_rle_mpii.yml index a03586d42c..c2d3237333 100644 --- a/configs/body_2d_keypoint/topdown_regression/mpii/resnet_rle_mpii.yml +++ b/configs/body_2d_keypoint/topdown_regression/mpii/resnet_rle_mpii.yml @@ -1,17 +1,17 @@ -Models: -- Config: configs/body_2d_keypoint/topdown_regression/mpii/td-reg_res50_rle-8xb64-210e_mpii-256x256.py - In Collection: RLE - Metadata: - Architecture: - - DeepPose - - RLE - - ResNet - Training Data: MPII - Name: td-reg_res50_rle-8xb64-210e_mpii-256x256 - Results: - - Dataset: MPII - Metrics: - Mean: 0.861 - Mean@0.1: 0.277 - Task: Body 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res50_mpii_256x256_rle-5f92a619_20220504.pth +Models: +- Config: configs/body_2d_keypoint/topdown_regression/mpii/td-reg_res50_rle-8xb64-210e_mpii-256x256.py + In Collection: RLE + Metadata: + Architecture: + - DeepPose + - RLE + - ResNet + Training Data: MPII + Name: td-reg_res50_rle-8xb64-210e_mpii-256x256 + Results: + - Dataset: MPII + Metrics: + Mean: 0.861 + Mean@0.1: 0.277 + Task: Body 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res50_mpii_256x256_rle-5f92a619_20220504.pth diff --git a/configs/body_2d_keypoint/topdown_regression/mpii/td-reg_res101_8xb64-210e_mpii-256x256.py b/configs/body_2d_keypoint/topdown_regression/mpii/td-reg_res101_8xb64-210e_mpii-256x256.py index 6c7821f91b..157667081b 100644 --- a/configs/body_2d_keypoint/topdown_regression/mpii/td-reg_res101_8xb64-210e_mpii-256x256.py +++ b/configs/body_2d_keypoint/topdown_regression/mpii/td-reg_res101_8xb64-210e_mpii-256x256.py @@ -1,116 +1,116 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# codec settings -codec = dict(type='RegressionLabel', input_size=(256, 256)) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=101, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'), - ), - neck=dict(type='GlobalAveragePooling'), - head=dict( - type='RegressionHead', - in_channels=2048, - num_joints=16, - loss=dict(type='SmoothL1Loss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - shift_coords=True, - )) - -# base dataset settings -dataset_type = 'MpiiDataset' -data_mode = 'topdown' -data_root = 'data/mpii/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomBBoxTransform', shift_prob=0), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_train.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_val.json', - headbox_file=f'{data_root}/annotations/mpii_gt_val.mat', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) - -# evaluators -val_evaluator = dict(type='MpiiPCKAccuracy') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict(type='RegressionLabel', input_size=(256, 256)) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=101, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'), + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='RegressionHead', + in_channels=2048, + num_joints=16, + loss=dict(type='SmoothL1Loss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + shift_coords=True, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform', shift_prob=0), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file=f'{data_root}/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_regression/mpii/td-reg_res152_8xb64-210e_mpii-256x256.py b/configs/body_2d_keypoint/topdown_regression/mpii/td-reg_res152_8xb64-210e_mpii-256x256.py index c1a19b0d6e..f814b67d97 100644 --- a/configs/body_2d_keypoint/topdown_regression/mpii/td-reg_res152_8xb64-210e_mpii-256x256.py +++ b/configs/body_2d_keypoint/topdown_regression/mpii/td-reg_res152_8xb64-210e_mpii-256x256.py @@ -1,118 +1,118 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# codec settings -codec = dict(type='RegressionLabel', input_size=(256, 256)) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=152, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet152'), - ), - neck=dict(type='GlobalAveragePooling'), - head=dict( - type='RegressionHead', - in_channels=2048, - num_joints=16, - loss=dict(type='SmoothL1Loss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - shift_coords=True, - )) - -# base dataset settings -dataset_type = 'MpiiDataset' -data_mode = 'topdown' -data_root = 'data/mpii/' - -file_client_args = dict(backend='disk') - -# pipelines -train_pipeline = [ - dict(type='LoadImage', file_client_args=file_client_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomBBoxTransform', shift_prob=0), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', file_client_args=file_client_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_train.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_val.json', - headbox_file=f'{data_root}/annotations/mpii_gt_val.mat', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) - -# evaluators -val_evaluator = dict(type='MpiiPCKAccuracy') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict(type='RegressionLabel', input_size=(256, 256)) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=152, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet152'), + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='RegressionHead', + in_channels=2048, + num_joints=16, + loss=dict(type='SmoothL1Loss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + shift_coords=True, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +file_client_args = dict(backend='disk') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', file_client_args=file_client_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform', shift_prob=0), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', file_client_args=file_client_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file=f'{data_root}/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_regression/mpii/td-reg_res50_8xb64-210e_mpii-256x256.py b/configs/body_2d_keypoint/topdown_regression/mpii/td-reg_res50_8xb64-210e_mpii-256x256.py index 901fd4b8d6..a2ab46c74f 100644 --- a/configs/body_2d_keypoint/topdown_regression/mpii/td-reg_res50_8xb64-210e_mpii-256x256.py +++ b/configs/body_2d_keypoint/topdown_regression/mpii/td-reg_res50_8xb64-210e_mpii-256x256.py @@ -1,116 +1,116 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# codec settings -codec = dict(type='RegressionLabel', input_size=(256, 256)) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=50, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), - ), - neck=dict(type='GlobalAveragePooling'), - head=dict( - type='RegressionHead', - in_channels=2048, - num_joints=16, - loss=dict(type='SmoothL1Loss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - shift_coords=True, - )) - -# base dataset settings -dataset_type = 'MpiiDataset' -data_mode = 'topdown' -data_root = 'data/mpii/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomBBoxTransform', shift_prob=0), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_train.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_val.json', - headbox_file=f'{data_root}/annotations/mpii_gt_val.mat', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) - -# evaluators -val_evaluator = dict(type='MpiiPCKAccuracy') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict(type='RegressionLabel', input_size=(256, 256)) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='RegressionHead', + in_channels=2048, + num_joints=16, + loss=dict(type='SmoothL1Loss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + shift_coords=True, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform', shift_prob=0), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file=f'{data_root}/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/configs/body_2d_keypoint/topdown_regression/mpii/td-reg_res50_rle-8xb64-210e_mpii-256x256.py b/configs/body_2d_keypoint/topdown_regression/mpii/td-reg_res50_rle-8xb64-210e_mpii-256x256.py index 9d46484755..922cee26de 100644 --- a/configs/body_2d_keypoint/topdown_regression/mpii/td-reg_res50_rle-8xb64-210e_mpii-256x256.py +++ b/configs/body_2d_keypoint/topdown_regression/mpii/td-reg_res50_rle-8xb64-210e_mpii-256x256.py @@ -1,116 +1,116 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# codec settings -codec = dict(type='RegressionLabel', input_size=(256, 256)) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=50, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), - ), - neck=dict(type='GlobalAveragePooling'), - head=dict( - type='RLEHead', - in_channels=2048, - num_joints=16, - loss=dict(type='RLELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - shift_coords=True, - )) - -# base dataset settings -dataset_type = 'MpiiDataset' -data_mode = 'topdown' -data_root = 'data/mpii/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomBBoxTransform', shift_prob=0), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_train.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/mpii_val.json', - headbox_file=f'{data_root}/annotations/mpii_gt_val.mat', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) - -# evaluators -val_evaluator = dict(type='MpiiPCKAccuracy') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict(type='RegressionLabel', input_size=(256, 256)) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='RLEHead', + in_channels=2048, + num_joints=16, + loss=dict(type='RLELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + shift_coords=True, + )) + +# base dataset settings +dataset_type = 'MpiiDataset' +data_mode = 'topdown' +data_root = 'data/mpii/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform', shift_prob=0), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/mpii_val.json', + headbox_file=f'{data_root}/annotations/mpii_gt_val.mat', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater')) + +# evaluators +val_evaluator = dict(type='MpiiPCKAccuracy') +test_evaluator = val_evaluator diff --git a/configs/body_3d_keypoint/README.md b/configs/body_3d_keypoint/README.md index b67f7ce7ac..757cedf41f 100644 --- a/configs/body_3d_keypoint/README.md +++ b/configs/body_3d_keypoint/README.md @@ -1,13 +1,13 @@ -# Human Body 3D Pose Estimation - -3D human body pose estimation aims at predicting the X, Y, Z coordinates of human body joints. Based on the camera number to capture the images or videos, existing works can be further divided into multi-view methods and single-view (monocular) methods. - -## Data preparation - -Please follow [DATA Preparation](/docs/en/dataset_zoo/3d_body_keypoint.md) to prepare data. - -## Demo - -Please follow [Demo](/demo/docs/en/3d_human_pose_demo.md) to run demos. - -
+# Human Body 3D Pose Estimation + +3D human body pose estimation aims at predicting the X, Y, Z coordinates of human body joints. Based on the camera number to capture the images or videos, existing works can be further divided into multi-view methods and single-view (monocular) methods. + +## Data preparation + +Please follow [DATA Preparation](/docs/en/dataset_zoo/3d_body_keypoint.md) to prepare data. + +## Demo + +Please follow [Demo](/demo/docs/en/3d_human_pose_demo.md) to run demos. + +
diff --git a/configs/body_3d_keypoint/pose_lift/README.md b/configs/body_3d_keypoint/pose_lift/README.md index 7e5f9f7e2a..2111277204 100644 --- a/configs/body_3d_keypoint/pose_lift/README.md +++ b/configs/body_3d_keypoint/pose_lift/README.md @@ -1,51 +1,51 @@ -# Single-view 3D Human Body Pose Estimation - -## Video-based Single-view 3D Human Body Pose Estimation - -Video-based 3D pose estimation is the detection and analysis of X, Y, Z coordinates of human body joints from a sequence of RGB images. - -For single-person 3D pose estimation from a monocular camera, existing works can be classified into three categories: - -(1) from 2D poses to 3D poses (2D-to-3D pose lifting) - -(2) jointly learning 2D and 3D poses, and - -(3) directly regressing 3D poses from images. - -### Results and Models - -#### Human3.6m Dataset - -| Arch | Receptive Field | MPJPE | P-MPJPE | N-MPJPE | ckpt | log | - -| :------------------------------------------------------ | :-------------: | :---: | :-----: | :-----: | :------------------------------------------------------: | :-----------------------------------------------------: | - -| [VideoPose3D-supervised](/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-27frm-supv_8xb128-80e_h36m.py) | 27 | 40.1 | 30.1 | / | [ckpt](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_27frames_fullconv_supervised-fe8fbba9_20210527.pth) | [log](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_27frames_fullconv_supervised_20210527.log.json) | - -| [VideoPose3D-supervised](/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-81frm-supv_8xb128-80e_h36m.py) | 81 | 39.1 | 29.3 | / | [ckpt](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_81frames_fullconv_supervised-1f2d1104_20210527.pth) | [log](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_81frames_fullconv_supervised_20210527.log.json) | - -| [VideoPose3D-supervised](/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-243frm-supv_8xb128-80e_h36m.py) | 243 | | | / | [ckpt](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_243frames_fullconv_supervised-880bea25_20210527.pth) | [log](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_243frames_fullconv_supervised_20210527.log.json) | - -| [VideoPose3D-supervised-CPN](/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-1frm-supv-cpn-ft_8xb128-80e_h36m.py) | 1 | 53.0 | 41.3 | / | [ckpt](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_1frame_fullconv_supervised_cpn_ft-5c3afaed_20210527.pth) | [log](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_1frame_fullconv_supervised_cpn_ft_20210527.log.json) | - -| [VideoPose3D-supervised-CPN](/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-243frm-supv-cpn-ft_8xb128-200e_h36m.py) | 243 | | | / | [ckpt](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_243frames_fullconv_supervised_cpn_ft-88f5abbb_20210527.pth) | [log](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_243frames_fullconv_supervised_cpn_ft_20210527.log.json) | - -| [VideoPose3D-semi-supervised](/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-27frm-semi-supv_8xb64-200e_h36m.py) | 27 | 57.2 | 42.4 | 54.2 | [ckpt](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_27frames_fullconv_semi-supervised-54aef83b_20210527.pth) | [log](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_27frames_fullconv_semi-supervised_20210527.log.json) | - -| [VideoPose3D-semi-supervised-CPN](/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-27frm-semi-supv-cpn-ft_8xb64-200e_h36m.py) | 27 | 67.3 | 50.4 | 63.6 | [ckpt](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_27frames_fullconv_semi-supervised_cpn_ft-71be9cde_20210527.pth) | [log](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_27frames_fullconv_semi-supervised_cpn_ft_20210527.log.json) | - -## Image-based Single-view 3D Human Body Pose Estimation - -3D pose estimation is the detection and analysis of X, Y, Z coordinates of human body joints from an RGB image. -For single-person 3D pose estimation from a monocular camera, existing works can be classified into three categories: -(1) from 2D poses to 3D poses (2D-to-3D pose lifting) -(2) jointly learning 2D and 3D poses, and -(3) directly regressing 3D poses from images. - -### Results and Models - -#### Human3.6m Dataset - -| Arch | MPJPE | P-MPJPE | N-MPJPE | ckpt | log | -| :------------------------------------------------------ | :-------------: | :---: | :-----: | :-----: | :------------------------------------------------------: | :-----------------------------------------------------: | -| [SimpleBaseline3D-tcn](/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_simplebaseline3d_8xb64-200e_h36m.py) | 43.4 | 34.3 | /|[ckpt](https://download.openmmlab.com/mmpose/body3d/simple_baseline/simple3Dbaseline_h36m-f0ad73a4_20210419.pth) | [log](https://download.openmmlab.com/mmpose/body3d/simple_baseline/20210415_065056.log.json) | +# Single-view 3D Human Body Pose Estimation + +## Video-based Single-view 3D Human Body Pose Estimation + +Video-based 3D pose estimation is the detection and analysis of X, Y, Z coordinates of human body joints from a sequence of RGB images. + +For single-person 3D pose estimation from a monocular camera, existing works can be classified into three categories: + +(1) from 2D poses to 3D poses (2D-to-3D pose lifting) + +(2) jointly learning 2D and 3D poses, and + +(3) directly regressing 3D poses from images. + +### Results and Models + +#### Human3.6m Dataset + +| Arch | Receptive Field | MPJPE | P-MPJPE | N-MPJPE | ckpt | log | + +| :------------------------------------------------------ | :-------------: | :---: | :-----: | :-----: | :------------------------------------------------------: | :-----------------------------------------------------: | + +| [VideoPose3D-supervised](/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-27frm-supv_8xb128-80e_h36m.py) | 27 | 40.1 | 30.1 | / | [ckpt](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_27frames_fullconv_supervised-fe8fbba9_20210527.pth) | [log](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_27frames_fullconv_supervised_20210527.log.json) | + +| [VideoPose3D-supervised](/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-81frm-supv_8xb128-80e_h36m.py) | 81 | 39.1 | 29.3 | / | [ckpt](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_81frames_fullconv_supervised-1f2d1104_20210527.pth) | [log](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_81frames_fullconv_supervised_20210527.log.json) | + +| [VideoPose3D-supervised](/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-243frm-supv_8xb128-80e_h36m.py) | 243 | | | / | [ckpt](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_243frames_fullconv_supervised-880bea25_20210527.pth) | [log](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_243frames_fullconv_supervised_20210527.log.json) | + +| [VideoPose3D-supervised-CPN](/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-1frm-supv-cpn-ft_8xb128-80e_h36m.py) | 1 | 53.0 | 41.3 | / | [ckpt](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_1frame_fullconv_supervised_cpn_ft-5c3afaed_20210527.pth) | [log](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_1frame_fullconv_supervised_cpn_ft_20210527.log.json) | + +| [VideoPose3D-supervised-CPN](/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-243frm-supv-cpn-ft_8xb128-200e_h36m.py) | 243 | | | / | [ckpt](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_243frames_fullconv_supervised_cpn_ft-88f5abbb_20210527.pth) | [log](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_243frames_fullconv_supervised_cpn_ft_20210527.log.json) | + +| [VideoPose3D-semi-supervised](/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-27frm-semi-supv_8xb64-200e_h36m.py) | 27 | 57.2 | 42.4 | 54.2 | [ckpt](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_27frames_fullconv_semi-supervised-54aef83b_20210527.pth) | [log](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_27frames_fullconv_semi-supervised_20210527.log.json) | + +| [VideoPose3D-semi-supervised-CPN](/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-27frm-semi-supv-cpn-ft_8xb64-200e_h36m.py) | 27 | 67.3 | 50.4 | 63.6 | [ckpt](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_27frames_fullconv_semi-supervised_cpn_ft-71be9cde_20210527.pth) | [log](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_27frames_fullconv_semi-supervised_cpn_ft_20210527.log.json) | + +## Image-based Single-view 3D Human Body Pose Estimation + +3D pose estimation is the detection and analysis of X, Y, Z coordinates of human body joints from an RGB image. +For single-person 3D pose estimation from a monocular camera, existing works can be classified into three categories: +(1) from 2D poses to 3D poses (2D-to-3D pose lifting) +(2) jointly learning 2D and 3D poses, and +(3) directly regressing 3D poses from images. + +### Results and Models + +#### Human3.6m Dataset + +| Arch | MPJPE | P-MPJPE | N-MPJPE | ckpt | log | +| :------------------------------------------------------ | :-------------: | :---: | :-----: | :-----: | :------------------------------------------------------: | :-----------------------------------------------------: | +| [SimpleBaseline3D-tcn](/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_simplebaseline3d_8xb64-200e_h36m.py) | 43.4 | 34.3 | /|[ckpt](https://download.openmmlab.com/mmpose/body3d/simple_baseline/simple3Dbaseline_h36m-f0ad73a4_20210419.pth) | [log](https://download.openmmlab.com/mmpose/body3d/simple_baseline/20210415_065056.log.json) | diff --git a/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_simplebaseline3d_8xb64-200e_h36m.py b/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_simplebaseline3d_8xb64-200e_h36m.py index b3c1c2db80..15af0f5b05 100644 --- a/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_simplebaseline3d_8xb64-200e_h36m.py +++ b/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_simplebaseline3d_8xb64-200e_h36m.py @@ -1,168 +1,168 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -vis_backends = [ - dict(type='LocalVisBackend'), -] -visualizer = dict( - type='Pose3dLocalVisualizer', vis_backends=vis_backends, name='visualizer') - -# runtime -train_cfg = dict(max_epochs=200, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict(type='Adam', lr=1e-3)) - -# learning policy -param_scheduler = [ - dict(type='StepLR', step_size=100000, gamma=0.96, end=80, by_epoch=False) -] - -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict( - checkpoint=dict( - type='CheckpointHook', - save_best='MPJPE', - rule='less', - max_keep_ckpts=1)) - -# codec settings -# 3D keypoint normalization parameters -# From file: '{data_root}/annotation_body3d/fps50/joint3d_rel_stats.pkl' -target_mean = [[-2.55652589e-04, -7.11960570e-03, -9.81433052e-04], - [-5.65463051e-03, 3.19636009e-01, 7.19329269e-02], - [-1.01705840e-02, 6.91147892e-01, 1.55352986e-01], - [2.55651315e-04, 7.11954606e-03, 9.81423866e-04], - [-5.09729780e-03, 3.27040413e-01, 7.22258095e-02], - [-9.99656606e-03, 7.08277383e-01, 1.58016408e-01], - [2.90583676e-03, -2.11363307e-01, -4.74210915e-02], - [5.67537804e-03, -4.35088906e-01, -9.76974016e-02], - [5.93884964e-03, -4.91891970e-01, -1.10666618e-01], - [7.37352083e-03, -5.83948619e-01, -1.31171400e-01], - [5.41920653e-03, -3.83931702e-01, -8.68145417e-02], - [2.95964662e-03, -1.87567488e-01, -4.34536934e-02], - [1.26585822e-03, -1.20170579e-01, -2.82526049e-02], - [4.67186639e-03, -3.83644089e-01, -8.55125784e-02], - [1.67648571e-03, -1.97007177e-01, -4.31368364e-02], - [8.70569015e-04, -1.68664569e-01, -3.73902498e-02]], -target_std = [[0.11072244, 0.02238818, 0.07246294], - [0.15856311, 0.18933832, 0.20880479], - [0.19179935, 0.24320062, 0.24756193], - [0.11072181, 0.02238805, 0.07246253], - [0.15880454, 0.19977188, 0.2147063], - [0.18001944, 0.25052739, 0.24853247], - [0.05210694, 0.05211406, 0.06908241], - [0.09515367, 0.10133032, 0.12899733], - [0.11742458, 0.12648469, 0.16465091], - [0.12360297, 0.13085539, 0.16433336], - [0.14602232, 0.09707956, 0.13952731], - [0.24347532, 0.12982249, 0.20230181], - [0.2446877, 0.21501816, 0.23938235], - [0.13876084, 0.1008926, 0.1424411], - [0.23687529, 0.14491219, 0.20980829], - [0.24400695, 0.23975028, 0.25520584]] -# 2D keypoint normalization parameters -# From file: '{data_root}/annotation_body3d/fps50/joint2d_stats.pkl' -keypoints_mean = [[532.08351635, 419.74137558], [531.80953144, 418.2607141], - [530.68456967, 493.54259285], [529.36968722, 575.96448516], - [532.29767646, 421.28483336], [531.93946631, 494.72186795], - [529.71984447, 578.96110365], [532.93699382, 370.65225054], - [534.1101856, 317.90342311], [534.55416813, 304.24143901], - [534.86955004, 282.31030885], [534.11308566, 330.11296796], - [533.53637525, 376.2742511], [533.49380107, 391.72324565], - [533.52579142, 330.09494668], [532.50804964, 374.190479], - [532.72786934, 380.61615716]], -keypoints_std = [[107.73640054, 63.35908715], [119.00836213, 64.1215443], - [119.12412107, 50.53806215], [120.61688045, 56.38444891], - [101.95735275, 62.89636486], [106.24832897, 48.41178119], - [108.46734966, 54.58177071], [109.07369806, 68.70443672], - [111.20130351, 74.87287863], [111.63203838, 77.80542514], - [113.22330788, 79.90670556], [105.7145833, 73.27049436], - [107.05804267, 73.93175781], [107.97449418, 83.30391802], - [121.60675105, 74.25691526], [134.34378973, 77.48125087], - [131.79990652, 89.86721124]] -codec = dict( - type='ImagePoseLifting', - num_keypoints=17, - root_index=0, - remove_root=True, - target_mean=target_mean, - target_std=target_std, - keypoints_mean=keypoints_mean, - keypoints_std=keypoints_std) - -# model settings -model = dict( - type='PoseLifter', - backbone=dict( - type='TCN', - in_channels=2 * 17, - stem_channels=1024, - num_blocks=2, - kernel_sizes=(1, 1, 1), - dropout=0.5, - ), - head=dict( - type='TemporalRegressionHead', - in_channels=1024, - num_joints=16, - loss=dict(type='MSELoss'), - decoder=codec, - )) - -# base dataset settings -dataset_type = 'Human36mDataset' -data_root = 'data/h36m/' - -# pipelines -train_pipeline = [ - dict(type='GenerateTarget', encoder=codec), - dict( - type='PackPoseInputs', - meta_keys=('id', 'category_id', 'target_img_path', 'flip_indices', - 'target_root', 'target_root_index', 'target_mean', - 'target_std')) -] -val_pipeline = train_pipeline - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - ann_file='annotation_body3d/fps50/h36m_train.npz', - seq_len=1, - causal=True, - keypoint_2d_src='gt', - data_root=data_root, - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - ann_file='annotation_body3d/fps50/h36m_test.npz', - seq_len=1, - causal=True, - keypoint_2d_src='gt', - data_root=data_root, - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = [ - dict(type='MPJPE', mode='mpjpe'), - dict(type='MPJPE', mode='p-mpjpe') -] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + type='Pose3dLocalVisualizer', vis_backends=vis_backends, name='visualizer') + +# runtime +train_cfg = dict(max_epochs=200, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict(type='Adam', lr=1e-3)) + +# learning policy +param_scheduler = [ + dict(type='StepLR', step_size=100000, gamma=0.96, end=80, by_epoch=False) +] + +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict( + type='CheckpointHook', + save_best='MPJPE', + rule='less', + max_keep_ckpts=1)) + +# codec settings +# 3D keypoint normalization parameters +# From file: '{data_root}/annotation_body3d/fps50/joint3d_rel_stats.pkl' +target_mean = [[-2.55652589e-04, -7.11960570e-03, -9.81433052e-04], + [-5.65463051e-03, 3.19636009e-01, 7.19329269e-02], + [-1.01705840e-02, 6.91147892e-01, 1.55352986e-01], + [2.55651315e-04, 7.11954606e-03, 9.81423866e-04], + [-5.09729780e-03, 3.27040413e-01, 7.22258095e-02], + [-9.99656606e-03, 7.08277383e-01, 1.58016408e-01], + [2.90583676e-03, -2.11363307e-01, -4.74210915e-02], + [5.67537804e-03, -4.35088906e-01, -9.76974016e-02], + [5.93884964e-03, -4.91891970e-01, -1.10666618e-01], + [7.37352083e-03, -5.83948619e-01, -1.31171400e-01], + [5.41920653e-03, -3.83931702e-01, -8.68145417e-02], + [2.95964662e-03, -1.87567488e-01, -4.34536934e-02], + [1.26585822e-03, -1.20170579e-01, -2.82526049e-02], + [4.67186639e-03, -3.83644089e-01, -8.55125784e-02], + [1.67648571e-03, -1.97007177e-01, -4.31368364e-02], + [8.70569015e-04, -1.68664569e-01, -3.73902498e-02]], +target_std = [[0.11072244, 0.02238818, 0.07246294], + [0.15856311, 0.18933832, 0.20880479], + [0.19179935, 0.24320062, 0.24756193], + [0.11072181, 0.02238805, 0.07246253], + [0.15880454, 0.19977188, 0.2147063], + [0.18001944, 0.25052739, 0.24853247], + [0.05210694, 0.05211406, 0.06908241], + [0.09515367, 0.10133032, 0.12899733], + [0.11742458, 0.12648469, 0.16465091], + [0.12360297, 0.13085539, 0.16433336], + [0.14602232, 0.09707956, 0.13952731], + [0.24347532, 0.12982249, 0.20230181], + [0.2446877, 0.21501816, 0.23938235], + [0.13876084, 0.1008926, 0.1424411], + [0.23687529, 0.14491219, 0.20980829], + [0.24400695, 0.23975028, 0.25520584]] +# 2D keypoint normalization parameters +# From file: '{data_root}/annotation_body3d/fps50/joint2d_stats.pkl' +keypoints_mean = [[532.08351635, 419.74137558], [531.80953144, 418.2607141], + [530.68456967, 493.54259285], [529.36968722, 575.96448516], + [532.29767646, 421.28483336], [531.93946631, 494.72186795], + [529.71984447, 578.96110365], [532.93699382, 370.65225054], + [534.1101856, 317.90342311], [534.55416813, 304.24143901], + [534.86955004, 282.31030885], [534.11308566, 330.11296796], + [533.53637525, 376.2742511], [533.49380107, 391.72324565], + [533.52579142, 330.09494668], [532.50804964, 374.190479], + [532.72786934, 380.61615716]], +keypoints_std = [[107.73640054, 63.35908715], [119.00836213, 64.1215443], + [119.12412107, 50.53806215], [120.61688045, 56.38444891], + [101.95735275, 62.89636486], [106.24832897, 48.41178119], + [108.46734966, 54.58177071], [109.07369806, 68.70443672], + [111.20130351, 74.87287863], [111.63203838, 77.80542514], + [113.22330788, 79.90670556], [105.7145833, 73.27049436], + [107.05804267, 73.93175781], [107.97449418, 83.30391802], + [121.60675105, 74.25691526], [134.34378973, 77.48125087], + [131.79990652, 89.86721124]] +codec = dict( + type='ImagePoseLifting', + num_keypoints=17, + root_index=0, + remove_root=True, + target_mean=target_mean, + target_std=target_std, + keypoints_mean=keypoints_mean, + keypoints_std=keypoints_std) + +# model settings +model = dict( + type='PoseLifter', + backbone=dict( + type='TCN', + in_channels=2 * 17, + stem_channels=1024, + num_blocks=2, + kernel_sizes=(1, 1, 1), + dropout=0.5, + ), + head=dict( + type='TemporalRegressionHead', + in_channels=1024, + num_joints=16, + loss=dict(type='MSELoss'), + decoder=codec, + )) + +# base dataset settings +dataset_type = 'Human36mDataset' +data_root = 'data/h36m/' + +# pipelines +train_pipeline = [ + dict(type='GenerateTarget', encoder=codec), + dict( + type='PackPoseInputs', + meta_keys=('id', 'category_id', 'target_img_path', 'flip_indices', + 'target_root', 'target_root_index', 'target_mean', + 'target_std')) +] +val_pipeline = train_pipeline + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + ann_file='annotation_body3d/fps50/h36m_train.npz', + seq_len=1, + causal=True, + keypoint_2d_src='gt', + data_root=data_root, + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + ann_file='annotation_body3d/fps50/h36m_test.npz', + seq_len=1, + causal=True, + keypoint_2d_src='gt', + data_root=data_root, + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='MPJPE', mode='mpjpe'), + dict(type='MPJPE', mode='p-mpjpe') +] +test_evaluator = val_evaluator diff --git a/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-1frm-supv-cpn-ft_8xb128-80e_h36m.py b/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-1frm-supv-cpn-ft_8xb128-80e_h36m.py index 0cbf89142d..56c5f9f8be 100644 --- a/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-1frm-supv-cpn-ft_8xb128-80e_h36m.py +++ b/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-1frm-supv-cpn-ft_8xb128-80e_h36m.py @@ -1,132 +1,132 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -vis_backends = [ - dict(type='LocalVisBackend'), -] -visualizer = dict( - type='Pose3dLocalVisualizer', vis_backends=vis_backends, name='visualizer') - -# runtime -train_cfg = dict(max_epochs=80, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict(type='Adam', lr=1e-4)) - -# learning policy -param_scheduler = [ - dict(type='ExponentialLR', gamma=0.98, end=80, by_epoch=True) -] - -auto_scale_lr = dict(base_batch_size=1024) - -# hooks -default_hooks = dict( - checkpoint=dict( - type='CheckpointHook', - save_best='MPJPE', - rule='less', - max_keep_ckpts=1), - logger=dict(type='LoggerHook', interval=20), -) - -# codec settings -codec = dict( - type='VideoPoseLifting', - num_keypoints=17, - zero_center=True, - root_index=0, - remove_root=False) - -# model settings -model = dict( - type='PoseLifter', - backbone=dict( - type='TCN', - in_channels=2 * 17, - stem_channels=1024, - num_blocks=4, - kernel_sizes=(1, 1, 1, 1, 1), - dropout=0.25, - use_stride_conv=True, - ), - head=dict( - type='TemporalRegressionHead', - in_channels=1024, - num_joints=17, - loss=dict(type='MPJPELoss'), - decoder=codec, - )) - -# base dataset settings -dataset_type = 'Human36mDataset' -data_root = 'data/h36m/' - -# pipelines -train_pipeline = [ - dict( - type='RandomFlipAroundRoot', - keypoints_flip_cfg=dict(), - target_flip_cfg=dict(), - ), - dict(type='GenerateTarget', encoder=codec), - dict( - type='PackPoseInputs', - meta_keys=('id', 'category_id', 'target_img_path', 'flip_indices', - 'target_root')) -] -val_pipeline = [ - dict(type='GenerateTarget', encoder=codec), - dict( - type='PackPoseInputs', - meta_keys=('id', 'category_id', 'target_img_path', 'flip_indices', - 'target_root')) -] - -# data loaders -train_dataloader = dict( - batch_size=128, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - ann_file='annotation_body3d/fps50/h36m_train.npz', - seq_len=1, - causal=False, - pad_video_seq=False, - keypoint_2d_src='detection', - keypoint_2d_det_file='joint_2d_det_files/cpn_ft_h36m_dbb_train.npy', - camera_param_file='annotation_body3d/cameras.pkl', - data_root=data_root, - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - ), -) -val_dataloader = dict( - batch_size=128, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - ann_file='annotation_body3d/fps50/h36m_test.npz', - seq_len=1, - causal=False, - pad_video_seq=False, - keypoint_2d_src='detection', - keypoint_2d_det_file='joint_2d_det_files/cpn_ft_h36m_dbb_test.npy', - camera_param_file='annotation_body3d/cameras.pkl', - data_root=data_root, - data_prefix=dict(img='images/'), - pipeline=val_pipeline, - test_mode=True, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = [ - dict(type='MPJPE', mode='mpjpe'), - dict(type='MPJPE', mode='p-mpjpe') -] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + type='Pose3dLocalVisualizer', vis_backends=vis_backends, name='visualizer') + +# runtime +train_cfg = dict(max_epochs=80, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict(type='Adam', lr=1e-4)) + +# learning policy +param_scheduler = [ + dict(type='ExponentialLR', gamma=0.98, end=80, by_epoch=True) +] + +auto_scale_lr = dict(base_batch_size=1024) + +# hooks +default_hooks = dict( + checkpoint=dict( + type='CheckpointHook', + save_best='MPJPE', + rule='less', + max_keep_ckpts=1), + logger=dict(type='LoggerHook', interval=20), +) + +# codec settings +codec = dict( + type='VideoPoseLifting', + num_keypoints=17, + zero_center=True, + root_index=0, + remove_root=False) + +# model settings +model = dict( + type='PoseLifter', + backbone=dict( + type='TCN', + in_channels=2 * 17, + stem_channels=1024, + num_blocks=4, + kernel_sizes=(1, 1, 1, 1, 1), + dropout=0.25, + use_stride_conv=True, + ), + head=dict( + type='TemporalRegressionHead', + in_channels=1024, + num_joints=17, + loss=dict(type='MPJPELoss'), + decoder=codec, + )) + +# base dataset settings +dataset_type = 'Human36mDataset' +data_root = 'data/h36m/' + +# pipelines +train_pipeline = [ + dict( + type='RandomFlipAroundRoot', + keypoints_flip_cfg=dict(), + target_flip_cfg=dict(), + ), + dict(type='GenerateTarget', encoder=codec), + dict( + type='PackPoseInputs', + meta_keys=('id', 'category_id', 'target_img_path', 'flip_indices', + 'target_root')) +] +val_pipeline = [ + dict(type='GenerateTarget', encoder=codec), + dict( + type='PackPoseInputs', + meta_keys=('id', 'category_id', 'target_img_path', 'flip_indices', + 'target_root')) +] + +# data loaders +train_dataloader = dict( + batch_size=128, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + ann_file='annotation_body3d/fps50/h36m_train.npz', + seq_len=1, + causal=False, + pad_video_seq=False, + keypoint_2d_src='detection', + keypoint_2d_det_file='joint_2d_det_files/cpn_ft_h36m_dbb_train.npy', + camera_param_file='annotation_body3d/cameras.pkl', + data_root=data_root, + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + ), +) +val_dataloader = dict( + batch_size=128, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + ann_file='annotation_body3d/fps50/h36m_test.npz', + seq_len=1, + causal=False, + pad_video_seq=False, + keypoint_2d_src='detection', + keypoint_2d_det_file='joint_2d_det_files/cpn_ft_h36m_dbb_test.npy', + camera_param_file='annotation_body3d/cameras.pkl', + data_root=data_root, + data_prefix=dict(img='images/'), + pipeline=val_pipeline, + test_mode=True, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='MPJPE', mode='mpjpe'), + dict(type='MPJPE', mode='p-mpjpe') +] +test_evaluator = val_evaluator diff --git a/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-243frm-supv-cpn-ft_8xb128-200e_h36m.py b/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-243frm-supv-cpn-ft_8xb128-200e_h36m.py index 3ef3df570b..592eac9240 100644 --- a/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-243frm-supv-cpn-ft_8xb128-200e_h36m.py +++ b/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-243frm-supv-cpn-ft_8xb128-200e_h36m.py @@ -1,132 +1,132 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -vis_backends = [ - dict(type='LocalVisBackend'), -] -visualizer = dict( - type='Pose3dLocalVisualizer', vis_backends=vis_backends, name='visualizer') - -# runtime -train_cfg = dict(max_epochs=200, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict(type='Adam', lr=1e-4)) - -# learning policy -param_scheduler = [ - dict(type='ExponentialLR', gamma=0.98, end=200, by_epoch=True) -] - -auto_scale_lr = dict(base_batch_size=1024) - -# hooks -default_hooks = dict( - checkpoint=dict( - type='CheckpointHook', - save_best='MPJPE', - rule='less', - max_keep_ckpts=1), - logger=dict(type='LoggerHook', interval=20), -) - -# codec settings -codec = dict( - type='VideoPoseLifting', - num_keypoints=17, - zero_center=True, - root_index=0, - remove_root=False) - -# model settings -model = dict( - type='PoseLifter', - backbone=dict( - type='TCN', - in_channels=2 * 17, - stem_channels=1024, - num_blocks=4, - kernel_sizes=(3, 3, 3, 3, 3), - dropout=0.25, - use_stride_conv=True, - ), - head=dict( - type='TemporalRegressionHead', - in_channels=1024, - num_joints=17, - loss=dict(type='MPJPELoss'), - decoder=codec, - )) - -# base dataset settings -dataset_type = 'Human36mDataset' -data_root = 'data/h36m/' - -# pipelines -train_pipeline = [ - dict( - type='RandomFlipAroundRoot', - keypoints_flip_cfg=dict(), - target_flip_cfg=dict(), - ), - dict(type='GenerateTarget', encoder=codec), - dict( - type='PackPoseInputs', - meta_keys=('id', 'category_id', 'target_img_path', 'flip_indices', - 'target_root')) -] -val_pipeline = [ - dict(type='GenerateTarget', encoder=codec), - dict( - type='PackPoseInputs', - meta_keys=('id', 'category_id', 'target_img_path', 'flip_indices', - 'target_root')) -] - -# data loaders -train_dataloader = dict( - batch_size=128, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - ann_file='annotation_body3d/fps50/h36m_train.npz', - seq_len=243, - causal=False, - pad_video_seq=True, - keypoint_2d_src='detection', - keypoint_2d_det_file='joint_2d_det_files/cpn_ft_h36m_dbb_train.npy', - camera_param_file='annotation_body3d/cameras.pkl', - data_root=data_root, - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - ), -) -val_dataloader = dict( - batch_size=128, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - ann_file='annotation_body3d/fps50/h36m_test.npz', - seq_len=243, - causal=False, - pad_video_seq=True, - keypoint_2d_src='detection', - keypoint_2d_det_file='joint_2d_det_files/cpn_ft_h36m_dbb_test.npy', - camera_param_file='annotation_body3d/cameras.pkl', - data_root=data_root, - data_prefix=dict(img='images/'), - pipeline=val_pipeline, - test_mode=True, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = [ - dict(type='MPJPE', mode='mpjpe'), - dict(type='MPJPE', mode='p-mpjpe') -] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + type='Pose3dLocalVisualizer', vis_backends=vis_backends, name='visualizer') + +# runtime +train_cfg = dict(max_epochs=200, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict(type='Adam', lr=1e-4)) + +# learning policy +param_scheduler = [ + dict(type='ExponentialLR', gamma=0.98, end=200, by_epoch=True) +] + +auto_scale_lr = dict(base_batch_size=1024) + +# hooks +default_hooks = dict( + checkpoint=dict( + type='CheckpointHook', + save_best='MPJPE', + rule='less', + max_keep_ckpts=1), + logger=dict(type='LoggerHook', interval=20), +) + +# codec settings +codec = dict( + type='VideoPoseLifting', + num_keypoints=17, + zero_center=True, + root_index=0, + remove_root=False) + +# model settings +model = dict( + type='PoseLifter', + backbone=dict( + type='TCN', + in_channels=2 * 17, + stem_channels=1024, + num_blocks=4, + kernel_sizes=(3, 3, 3, 3, 3), + dropout=0.25, + use_stride_conv=True, + ), + head=dict( + type='TemporalRegressionHead', + in_channels=1024, + num_joints=17, + loss=dict(type='MPJPELoss'), + decoder=codec, + )) + +# base dataset settings +dataset_type = 'Human36mDataset' +data_root = 'data/h36m/' + +# pipelines +train_pipeline = [ + dict( + type='RandomFlipAroundRoot', + keypoints_flip_cfg=dict(), + target_flip_cfg=dict(), + ), + dict(type='GenerateTarget', encoder=codec), + dict( + type='PackPoseInputs', + meta_keys=('id', 'category_id', 'target_img_path', 'flip_indices', + 'target_root')) +] +val_pipeline = [ + dict(type='GenerateTarget', encoder=codec), + dict( + type='PackPoseInputs', + meta_keys=('id', 'category_id', 'target_img_path', 'flip_indices', + 'target_root')) +] + +# data loaders +train_dataloader = dict( + batch_size=128, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + ann_file='annotation_body3d/fps50/h36m_train.npz', + seq_len=243, + causal=False, + pad_video_seq=True, + keypoint_2d_src='detection', + keypoint_2d_det_file='joint_2d_det_files/cpn_ft_h36m_dbb_train.npy', + camera_param_file='annotation_body3d/cameras.pkl', + data_root=data_root, + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + ), +) +val_dataloader = dict( + batch_size=128, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + ann_file='annotation_body3d/fps50/h36m_test.npz', + seq_len=243, + causal=False, + pad_video_seq=True, + keypoint_2d_src='detection', + keypoint_2d_det_file='joint_2d_det_files/cpn_ft_h36m_dbb_test.npy', + camera_param_file='annotation_body3d/cameras.pkl', + data_root=data_root, + data_prefix=dict(img='images/'), + pipeline=val_pipeline, + test_mode=True, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='MPJPE', mode='mpjpe'), + dict(type='MPJPE', mode='p-mpjpe') +] +test_evaluator = val_evaluator diff --git a/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-243frm-supv_8xb128-80e_h36m.py b/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-243frm-supv_8xb128-80e_h36m.py index 0f311ac5cf..8063bd51dd 100644 --- a/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-243frm-supv_8xb128-80e_h36m.py +++ b/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-243frm-supv_8xb128-80e_h36m.py @@ -1,128 +1,128 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -vis_backends = [ - dict(type='LocalVisBackend'), -] -visualizer = dict( - type='Pose3dLocalVisualizer', vis_backends=vis_backends, name='visualizer') - -# runtime -train_cfg = dict(max_epochs=80, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict(type='Adam', lr=1e-3)) - -# learning policy -param_scheduler = [ - dict(type='ExponentialLR', gamma=0.975, end=80, by_epoch=True) -] - -auto_scale_lr = dict(base_batch_size=1024) - -# hooks -default_hooks = dict( - checkpoint=dict( - type='CheckpointHook', - save_best='MPJPE', - rule='less', - max_keep_ckpts=1), - logger=dict(type='LoggerHook', interval=20), -) - -# codec settings -codec = dict( - type='VideoPoseLifting', - num_keypoints=17, - zero_center=True, - root_index=0, - remove_root=False) - -# model settings -model = dict( - type='PoseLifter', - backbone=dict( - type='TCN', - in_channels=2 * 17, - stem_channels=1024, - num_blocks=4, - kernel_sizes=(3, 3, 3, 3, 3), - dropout=0.25, - use_stride_conv=True, - ), - head=dict( - type='TemporalRegressionHead', - in_channels=1024, - num_joints=17, - loss=dict(type='MPJPELoss'), - decoder=codec, - )) - -# base dataset settings -dataset_type = 'Human36mDataset' -data_root = 'data/h36m/' - -# pipelines -train_pipeline = [ - dict( - type='RandomFlipAroundRoot', - keypoints_flip_cfg=dict(), - target_flip_cfg=dict(), - ), - dict(type='GenerateTarget', encoder=codec), - dict( - type='PackPoseInputs', - meta_keys=('id', 'category_id', 'target_img_path', 'flip_indices', - 'target_root')) -] -val_pipeline = [ - dict(type='GenerateTarget', encoder=codec), - dict( - type='PackPoseInputs', - meta_keys=('id', 'category_id', 'target_img_path', 'flip_indices', - 'target_root')) -] - -# data loaders -train_dataloader = dict( - batch_size=128, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - ann_file='annotation_body3d/fps50/h36m_train.npz', - seq_len=243, - causal=False, - pad_video_seq=True, - camera_param_file='annotation_body3d/cameras.pkl', - data_root=data_root, - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - ), -) -val_dataloader = dict( - batch_size=128, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - ann_file='annotation_body3d/fps50/h36m_test.npz', - seq_len=243, - causal=False, - pad_video_seq=True, - camera_param_file='annotation_body3d/cameras.pkl', - data_root=data_root, - data_prefix=dict(img='images/'), - pipeline=val_pipeline, - test_mode=True, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = [ - dict(type='MPJPE', mode='mpjpe'), - dict(type='MPJPE', mode='p-mpjpe') -] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + type='Pose3dLocalVisualizer', vis_backends=vis_backends, name='visualizer') + +# runtime +train_cfg = dict(max_epochs=80, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict(type='Adam', lr=1e-3)) + +# learning policy +param_scheduler = [ + dict(type='ExponentialLR', gamma=0.975, end=80, by_epoch=True) +] + +auto_scale_lr = dict(base_batch_size=1024) + +# hooks +default_hooks = dict( + checkpoint=dict( + type='CheckpointHook', + save_best='MPJPE', + rule='less', + max_keep_ckpts=1), + logger=dict(type='LoggerHook', interval=20), +) + +# codec settings +codec = dict( + type='VideoPoseLifting', + num_keypoints=17, + zero_center=True, + root_index=0, + remove_root=False) + +# model settings +model = dict( + type='PoseLifter', + backbone=dict( + type='TCN', + in_channels=2 * 17, + stem_channels=1024, + num_blocks=4, + kernel_sizes=(3, 3, 3, 3, 3), + dropout=0.25, + use_stride_conv=True, + ), + head=dict( + type='TemporalRegressionHead', + in_channels=1024, + num_joints=17, + loss=dict(type='MPJPELoss'), + decoder=codec, + )) + +# base dataset settings +dataset_type = 'Human36mDataset' +data_root = 'data/h36m/' + +# pipelines +train_pipeline = [ + dict( + type='RandomFlipAroundRoot', + keypoints_flip_cfg=dict(), + target_flip_cfg=dict(), + ), + dict(type='GenerateTarget', encoder=codec), + dict( + type='PackPoseInputs', + meta_keys=('id', 'category_id', 'target_img_path', 'flip_indices', + 'target_root')) +] +val_pipeline = [ + dict(type='GenerateTarget', encoder=codec), + dict( + type='PackPoseInputs', + meta_keys=('id', 'category_id', 'target_img_path', 'flip_indices', + 'target_root')) +] + +# data loaders +train_dataloader = dict( + batch_size=128, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + ann_file='annotation_body3d/fps50/h36m_train.npz', + seq_len=243, + causal=False, + pad_video_seq=True, + camera_param_file='annotation_body3d/cameras.pkl', + data_root=data_root, + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + ), +) +val_dataloader = dict( + batch_size=128, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + ann_file='annotation_body3d/fps50/h36m_test.npz', + seq_len=243, + causal=False, + pad_video_seq=True, + camera_param_file='annotation_body3d/cameras.pkl', + data_root=data_root, + data_prefix=dict(img='images/'), + pipeline=val_pipeline, + test_mode=True, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='MPJPE', mode='mpjpe'), + dict(type='MPJPE', mode='p-mpjpe') +] +test_evaluator = val_evaluator diff --git a/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-27frm-semi-supv-cpn-ft_8xb64-200e_h36m.py b/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-27frm-semi-supv-cpn-ft_8xb64-200e_h36m.py index 08bcda8ed7..842bee6163 100644 --- a/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-27frm-semi-supv-cpn-ft_8xb64-200e_h36m.py +++ b/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-27frm-semi-supv-cpn-ft_8xb64-200e_h36m.py @@ -1,119 +1,119 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -vis_backends = [ - dict(type='LocalVisBackend'), -] -visualizer = dict( - type='Pose3dLocalVisualizer', vis_backends=vis_backends, name='visualizer') - -# runtime -train_cfg = None - -# optimizer - -# learning policy - -auto_scale_lr = dict(base_batch_size=1024) - -# hooks -default_hooks = dict( - checkpoint=dict( - type='CheckpointHook', - save_best='MPJPE', - rule='less', - max_keep_ckpts=1), - logger=dict(type='LoggerHook', interval=20), -) - -# codec settings -codec = dict( - type='VideoPoseLifting', - num_keypoints=17, - zero_center=True, - root_index=0, - remove_root=False) - -# model settings -model = dict( - type='PoseLifter', - backbone=dict( - type='TCN', - in_channels=2 * 17, - stem_channels=1024, - num_blocks=2, - kernel_sizes=(3, 3, 3), - dropout=0.25, - use_stride_conv=True, - ), - head=dict( - type='TemporalRegressionHead', - in_channels=1024, - num_joints=17, - loss=dict(type='MPJPELoss'), - decoder=codec, - ), - traj_backbone=dict( - type='TCN', - in_channels=2 * 17, - stem_channels=1024, - num_blocks=2, - kernel_sizes=(3, 3, 3), - dropout=0.25, - use_stride_conv=True, - ), - traj_head=dict( - type='TrajectoryRegressionHead', - in_channels=1024, - num_joints=1, - loss=dict(type='MPJPELoss', use_target_weight=True), - decoder=codec, - ), - semi_loss=dict( - type='SemiSupervisionLoss', - joint_parents=[0, 0, 1, 2, 0, 4, 5, 0, 7, 8, 9, 8, 11, 12, 8, 14, 15], - warmup_iterations=1311376 // 64 // 8 * 5), -) - -# base dataset settings -dataset_type = 'Human36mDataset' -data_root = 'data/h36m/' - -# pipelines -val_pipeline = [ - dict(type='GenerateTarget', encoder=codec), - dict( - type='PackPoseInputs', - meta_keys=('id', 'category_id', 'target_img_path', 'flip_indices', - 'target_root')) -] - -# data loaders -val_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - ann_file='annotation_body3d/fps50/h36m_test.npz', - seq_len=27, - causal=False, - pad_video_seq=True, - keypoint_2d_src='detection', - keypoint_2d_det_file='joint_2d_det_files/cpn_ft_h36m_dbb_test.npy', - camera_param_file='annotation_body3d/cameras.pkl', - data_root=data_root, - data_prefix=dict(img='images/'), - pipeline=val_pipeline, - test_mode=True, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = [ - dict(type='MPJPE', mode='mpjpe'), - dict(type='MPJPE', mode='p-mpjpe'), - dict(type='MPJPE', mode='n-mpjpe') -] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + type='Pose3dLocalVisualizer', vis_backends=vis_backends, name='visualizer') + +# runtime +train_cfg = None + +# optimizer + +# learning policy + +auto_scale_lr = dict(base_batch_size=1024) + +# hooks +default_hooks = dict( + checkpoint=dict( + type='CheckpointHook', + save_best='MPJPE', + rule='less', + max_keep_ckpts=1), + logger=dict(type='LoggerHook', interval=20), +) + +# codec settings +codec = dict( + type='VideoPoseLifting', + num_keypoints=17, + zero_center=True, + root_index=0, + remove_root=False) + +# model settings +model = dict( + type='PoseLifter', + backbone=dict( + type='TCN', + in_channels=2 * 17, + stem_channels=1024, + num_blocks=2, + kernel_sizes=(3, 3, 3), + dropout=0.25, + use_stride_conv=True, + ), + head=dict( + type='TemporalRegressionHead', + in_channels=1024, + num_joints=17, + loss=dict(type='MPJPELoss'), + decoder=codec, + ), + traj_backbone=dict( + type='TCN', + in_channels=2 * 17, + stem_channels=1024, + num_blocks=2, + kernel_sizes=(3, 3, 3), + dropout=0.25, + use_stride_conv=True, + ), + traj_head=dict( + type='TrajectoryRegressionHead', + in_channels=1024, + num_joints=1, + loss=dict(type='MPJPELoss', use_target_weight=True), + decoder=codec, + ), + semi_loss=dict( + type='SemiSupervisionLoss', + joint_parents=[0, 0, 1, 2, 0, 4, 5, 0, 7, 8, 9, 8, 11, 12, 8, 14, 15], + warmup_iterations=1311376 // 64 // 8 * 5), +) + +# base dataset settings +dataset_type = 'Human36mDataset' +data_root = 'data/h36m/' + +# pipelines +val_pipeline = [ + dict(type='GenerateTarget', encoder=codec), + dict( + type='PackPoseInputs', + meta_keys=('id', 'category_id', 'target_img_path', 'flip_indices', + 'target_root')) +] + +# data loaders +val_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + ann_file='annotation_body3d/fps50/h36m_test.npz', + seq_len=27, + causal=False, + pad_video_seq=True, + keypoint_2d_src='detection', + keypoint_2d_det_file='joint_2d_det_files/cpn_ft_h36m_dbb_test.npy', + camera_param_file='annotation_body3d/cameras.pkl', + data_root=data_root, + data_prefix=dict(img='images/'), + pipeline=val_pipeline, + test_mode=True, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='MPJPE', mode='mpjpe'), + dict(type='MPJPE', mode='p-mpjpe'), + dict(type='MPJPE', mode='n-mpjpe') +] +test_evaluator = val_evaluator diff --git a/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-27frm-semi-supv_8xb64-200e_h36m.py b/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-27frm-semi-supv_8xb64-200e_h36m.py index d145f05b17..7f0e68ca32 100644 --- a/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-27frm-semi-supv_8xb64-200e_h36m.py +++ b/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-27frm-semi-supv_8xb64-200e_h36m.py @@ -1,117 +1,117 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -vis_backends = [ - dict(type='LocalVisBackend'), -] -visualizer = dict( - type='Pose3dLocalVisualizer', vis_backends=vis_backends, name='visualizer') - -# runtime -train_cfg = None - -# optimizer - -# learning policy - -auto_scale_lr = dict(base_batch_size=1024) - -# hooks -default_hooks = dict( - checkpoint=dict( - type='CheckpointHook', - save_best='MPJPE', - rule='less', - max_keep_ckpts=1), - logger=dict(type='LoggerHook', interval=20), -) - -# codec settings -codec = dict( - type='VideoPoseLifting', - num_keypoints=17, - zero_center=True, - root_index=0, - remove_root=False) - -# model settings -model = dict( - type='PoseLifter', - backbone=dict( - type='TCN', - in_channels=2 * 17, - stem_channels=1024, - num_blocks=2, - kernel_sizes=(3, 3, 3), - dropout=0.25, - use_stride_conv=True, - ), - head=dict( - type='TemporalRegressionHead', - in_channels=1024, - num_joints=17, - loss=dict(type='MPJPELoss'), - decoder=codec, - ), - traj_backbone=dict( - type='TCN', - in_channels=2 * 17, - stem_channels=1024, - num_blocks=2, - kernel_sizes=(3, 3, 3), - dropout=0.25, - use_stride_conv=True, - ), - traj_head=dict( - type='TrajectoryRegressionHead', - in_channels=1024, - num_joints=1, - loss=dict(type='MPJPELoss', use_target_weight=True), - decoder=codec, - ), - semi_loss=dict( - type='SemiSupervisionLoss', - joint_parents=[0, 0, 1, 2, 0, 4, 5, 0, 7, 8, 9, 8, 11, 12, 8, 14, 15], - warmup_iterations=1311376 // 64 // 8 * 5), -) - -# base dataset settings -dataset_type = 'Human36mDataset' -data_root = 'data/h36m/' - -# pipelines -val_pipeline = [ - dict(type='GenerateTarget', encoder=codec), - dict( - type='PackPoseInputs', - meta_keys=('id', 'category_id', 'target_img_path', 'flip_indices', - 'target_root')) -] - -# data loaders -val_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - ann_file='annotation_body3d/fps50/h36m_test.npz', - seq_len=27, - causal=False, - pad_video_seq=True, - camera_param_file='annotation_body3d/cameras.pkl', - data_root=data_root, - data_prefix=dict(img='images/'), - pipeline=val_pipeline, - test_mode=True, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = [ - dict(type='MPJPE', mode='mpjpe'), - dict(type='MPJPE', mode='p-mpjpe'), - dict(type='MPJPE', mode='n-mpjpe') -] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + type='Pose3dLocalVisualizer', vis_backends=vis_backends, name='visualizer') + +# runtime +train_cfg = None + +# optimizer + +# learning policy + +auto_scale_lr = dict(base_batch_size=1024) + +# hooks +default_hooks = dict( + checkpoint=dict( + type='CheckpointHook', + save_best='MPJPE', + rule='less', + max_keep_ckpts=1), + logger=dict(type='LoggerHook', interval=20), +) + +# codec settings +codec = dict( + type='VideoPoseLifting', + num_keypoints=17, + zero_center=True, + root_index=0, + remove_root=False) + +# model settings +model = dict( + type='PoseLifter', + backbone=dict( + type='TCN', + in_channels=2 * 17, + stem_channels=1024, + num_blocks=2, + kernel_sizes=(3, 3, 3), + dropout=0.25, + use_stride_conv=True, + ), + head=dict( + type='TemporalRegressionHead', + in_channels=1024, + num_joints=17, + loss=dict(type='MPJPELoss'), + decoder=codec, + ), + traj_backbone=dict( + type='TCN', + in_channels=2 * 17, + stem_channels=1024, + num_blocks=2, + kernel_sizes=(3, 3, 3), + dropout=0.25, + use_stride_conv=True, + ), + traj_head=dict( + type='TrajectoryRegressionHead', + in_channels=1024, + num_joints=1, + loss=dict(type='MPJPELoss', use_target_weight=True), + decoder=codec, + ), + semi_loss=dict( + type='SemiSupervisionLoss', + joint_parents=[0, 0, 1, 2, 0, 4, 5, 0, 7, 8, 9, 8, 11, 12, 8, 14, 15], + warmup_iterations=1311376 // 64 // 8 * 5), +) + +# base dataset settings +dataset_type = 'Human36mDataset' +data_root = 'data/h36m/' + +# pipelines +val_pipeline = [ + dict(type='GenerateTarget', encoder=codec), + dict( + type='PackPoseInputs', + meta_keys=('id', 'category_id', 'target_img_path', 'flip_indices', + 'target_root')) +] + +# data loaders +val_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + ann_file='annotation_body3d/fps50/h36m_test.npz', + seq_len=27, + causal=False, + pad_video_seq=True, + camera_param_file='annotation_body3d/cameras.pkl', + data_root=data_root, + data_prefix=dict(img='images/'), + pipeline=val_pipeline, + test_mode=True, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='MPJPE', mode='mpjpe'), + dict(type='MPJPE', mode='p-mpjpe'), + dict(type='MPJPE', mode='n-mpjpe') +] +test_evaluator = val_evaluator diff --git a/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-27frm-supv_8xb128-80e_h36m.py b/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-27frm-supv_8xb128-80e_h36m.py index 2589b493a6..6b68321e12 100644 --- a/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-27frm-supv_8xb128-80e_h36m.py +++ b/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-27frm-supv_8xb128-80e_h36m.py @@ -1,128 +1,128 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -vis_backends = [ - dict(type='LocalVisBackend'), -] -visualizer = dict( - type='Pose3dLocalVisualizer', vis_backends=vis_backends, name='visualizer') - -# runtime -train_cfg = dict(max_epochs=80, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict(type='Adam', lr=1e-3)) - -# learning policy -param_scheduler = [ - dict(type='ExponentialLR', gamma=0.975, end=80, by_epoch=True) -] - -auto_scale_lr = dict(base_batch_size=1024) - -# hooks -default_hooks = dict( - checkpoint=dict( - type='CheckpointHook', - save_best='MPJPE', - rule='less', - max_keep_ckpts=1), - logger=dict(type='LoggerHook', interval=20), -) - -# codec settings -codec = dict( - type='VideoPoseLifting', - num_keypoints=17, - zero_center=True, - root_index=0, - remove_root=False) - -# model settings -model = dict( - type='PoseLifter', - backbone=dict( - type='TCN', - in_channels=2 * 17, - stem_channels=1024, - num_blocks=2, - kernel_sizes=(3, 3, 3), - dropout=0.25, - use_stride_conv=True, - ), - head=dict( - type='TemporalRegressionHead', - in_channels=1024, - num_joints=17, - loss=dict(type='MPJPELoss'), - decoder=codec, - )) - -# base dataset settings -dataset_type = 'Human36mDataset' -data_root = 'data/h36m/' - -# pipelines -train_pipeline = [ - dict( - type='RandomFlipAroundRoot', - keypoints_flip_cfg=dict(), - target_flip_cfg=dict(), - ), - dict(type='GenerateTarget', encoder=codec), - dict( - type='PackPoseInputs', - meta_keys=('id', 'category_id', 'target_img_path', 'flip_indices', - 'target_root')) -] -val_pipeline = [ - dict(type='GenerateTarget', encoder=codec), - dict( - type='PackPoseInputs', - meta_keys=('id', 'category_id', 'target_img_path', 'flip_indices', - 'target_root')) -] - -# data loaders -train_dataloader = dict( - batch_size=128, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - ann_file='annotation_body3d/fps50/h36m_train.npz', - seq_len=27, - causal=False, - pad_video_seq=True, - camera_param_file='annotation_body3d/cameras.pkl', - data_root=data_root, - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - ), -) -val_dataloader = dict( - batch_size=128, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - ann_file='annotation_body3d/fps50/h36m_test.npz', - seq_len=27, - causal=False, - pad_video_seq=True, - camera_param_file='annotation_body3d/cameras.pkl', - data_root=data_root, - data_prefix=dict(img='images/'), - pipeline=val_pipeline, - test_mode=True, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = [ - dict(type='MPJPE', mode='mpjpe'), - dict(type='MPJPE', mode='p-mpjpe') -] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + type='Pose3dLocalVisualizer', vis_backends=vis_backends, name='visualizer') + +# runtime +train_cfg = dict(max_epochs=80, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict(type='Adam', lr=1e-3)) + +# learning policy +param_scheduler = [ + dict(type='ExponentialLR', gamma=0.975, end=80, by_epoch=True) +] + +auto_scale_lr = dict(base_batch_size=1024) + +# hooks +default_hooks = dict( + checkpoint=dict( + type='CheckpointHook', + save_best='MPJPE', + rule='less', + max_keep_ckpts=1), + logger=dict(type='LoggerHook', interval=20), +) + +# codec settings +codec = dict( + type='VideoPoseLifting', + num_keypoints=17, + zero_center=True, + root_index=0, + remove_root=False) + +# model settings +model = dict( + type='PoseLifter', + backbone=dict( + type='TCN', + in_channels=2 * 17, + stem_channels=1024, + num_blocks=2, + kernel_sizes=(3, 3, 3), + dropout=0.25, + use_stride_conv=True, + ), + head=dict( + type='TemporalRegressionHead', + in_channels=1024, + num_joints=17, + loss=dict(type='MPJPELoss'), + decoder=codec, + )) + +# base dataset settings +dataset_type = 'Human36mDataset' +data_root = 'data/h36m/' + +# pipelines +train_pipeline = [ + dict( + type='RandomFlipAroundRoot', + keypoints_flip_cfg=dict(), + target_flip_cfg=dict(), + ), + dict(type='GenerateTarget', encoder=codec), + dict( + type='PackPoseInputs', + meta_keys=('id', 'category_id', 'target_img_path', 'flip_indices', + 'target_root')) +] +val_pipeline = [ + dict(type='GenerateTarget', encoder=codec), + dict( + type='PackPoseInputs', + meta_keys=('id', 'category_id', 'target_img_path', 'flip_indices', + 'target_root')) +] + +# data loaders +train_dataloader = dict( + batch_size=128, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + ann_file='annotation_body3d/fps50/h36m_train.npz', + seq_len=27, + causal=False, + pad_video_seq=True, + camera_param_file='annotation_body3d/cameras.pkl', + data_root=data_root, + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + ), +) +val_dataloader = dict( + batch_size=128, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + ann_file='annotation_body3d/fps50/h36m_test.npz', + seq_len=27, + causal=False, + pad_video_seq=True, + camera_param_file='annotation_body3d/cameras.pkl', + data_root=data_root, + data_prefix=dict(img='images/'), + pipeline=val_pipeline, + test_mode=True, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='MPJPE', mode='mpjpe'), + dict(type='MPJPE', mode='p-mpjpe') +] +test_evaluator = val_evaluator diff --git a/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-81frm-supv_8xb128-80e_h36m.py b/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-81frm-supv_8xb128-80e_h36m.py index f2c27e423d..b759536684 100644 --- a/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-81frm-supv_8xb128-80e_h36m.py +++ b/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-81frm-supv_8xb128-80e_h36m.py @@ -1,128 +1,128 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -vis_backends = [ - dict(type='LocalVisBackend'), -] -visualizer = dict( - type='Pose3dLocalVisualizer', vis_backends=vis_backends, name='visualizer') - -# runtime -train_cfg = dict(max_epochs=80, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict(type='Adam', lr=1e-3)) - -# learning policy -param_scheduler = [ - dict(type='ExponentialLR', gamma=0.975, end=80, by_epoch=True) -] - -auto_scale_lr = dict(base_batch_size=1024) - -# hooks -default_hooks = dict( - checkpoint=dict( - type='CheckpointHook', - save_best='MPJPE', - rule='less', - max_keep_ckpts=1), - logger=dict(type='LoggerHook', interval=20), -) - -# codec settings -codec = dict( - type='VideoPoseLifting', - num_keypoints=17, - zero_center=True, - root_index=0, - remove_root=False) - -# model settings -model = dict( - type='PoseLifter', - backbone=dict( - type='TCN', - in_channels=2 * 17, - stem_channels=1024, - num_blocks=3, - kernel_sizes=(3, 3, 3, 3), - dropout=0.25, - use_stride_conv=True, - ), - head=dict( - type='TemporalRegressionHead', - in_channels=1024, - num_joints=17, - loss=dict(type='MPJPELoss'), - decoder=codec, - )) - -# base dataset settings -dataset_type = 'Human36mDataset' -data_root = 'data/h36m/' - -# pipelines -train_pipeline = [ - dict( - type='RandomFlipAroundRoot', - keypoints_flip_cfg=dict(), - target_flip_cfg=dict(), - ), - dict(type='GenerateTarget', encoder=codec), - dict( - type='PackPoseInputs', - meta_keys=('id', 'category_id', 'target_img_path', 'flip_indices', - 'target_root')) -] -val_pipeline = [ - dict(type='GenerateTarget', encoder=codec), - dict( - type='PackPoseInputs', - meta_keys=('id', 'category_id', 'target_img_path', 'flip_indices', - 'target_root')) -] - -# data loaders -train_dataloader = dict( - batch_size=128, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - ann_file='annotation_body3d/fps50/h36m_train.npz', - seq_len=81, - causal=False, - pad_video_seq=True, - camera_param_file='annotation_body3d/cameras.pkl', - data_root=data_root, - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - ), -) -val_dataloader = dict( - batch_size=128, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - ann_file='annotation_body3d/fps50/h36m_test.npz', - seq_len=81, - causal=False, - pad_video_seq=True, - camera_param_file='annotation_body3d/cameras.pkl', - data_root=data_root, - data_prefix=dict(img='images/'), - pipeline=val_pipeline, - test_mode=True, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = [ - dict(type='MPJPE', mode='mpjpe'), - dict(type='MPJPE', mode='p-mpjpe') -] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + type='Pose3dLocalVisualizer', vis_backends=vis_backends, name='visualizer') + +# runtime +train_cfg = dict(max_epochs=80, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict(type='Adam', lr=1e-3)) + +# learning policy +param_scheduler = [ + dict(type='ExponentialLR', gamma=0.975, end=80, by_epoch=True) +] + +auto_scale_lr = dict(base_batch_size=1024) + +# hooks +default_hooks = dict( + checkpoint=dict( + type='CheckpointHook', + save_best='MPJPE', + rule='less', + max_keep_ckpts=1), + logger=dict(type='LoggerHook', interval=20), +) + +# codec settings +codec = dict( + type='VideoPoseLifting', + num_keypoints=17, + zero_center=True, + root_index=0, + remove_root=False) + +# model settings +model = dict( + type='PoseLifter', + backbone=dict( + type='TCN', + in_channels=2 * 17, + stem_channels=1024, + num_blocks=3, + kernel_sizes=(3, 3, 3, 3), + dropout=0.25, + use_stride_conv=True, + ), + head=dict( + type='TemporalRegressionHead', + in_channels=1024, + num_joints=17, + loss=dict(type='MPJPELoss'), + decoder=codec, + )) + +# base dataset settings +dataset_type = 'Human36mDataset' +data_root = 'data/h36m/' + +# pipelines +train_pipeline = [ + dict( + type='RandomFlipAroundRoot', + keypoints_flip_cfg=dict(), + target_flip_cfg=dict(), + ), + dict(type='GenerateTarget', encoder=codec), + dict( + type='PackPoseInputs', + meta_keys=('id', 'category_id', 'target_img_path', 'flip_indices', + 'target_root')) +] +val_pipeline = [ + dict(type='GenerateTarget', encoder=codec), + dict( + type='PackPoseInputs', + meta_keys=('id', 'category_id', 'target_img_path', 'flip_indices', + 'target_root')) +] + +# data loaders +train_dataloader = dict( + batch_size=128, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + ann_file='annotation_body3d/fps50/h36m_train.npz', + seq_len=81, + causal=False, + pad_video_seq=True, + camera_param_file='annotation_body3d/cameras.pkl', + data_root=data_root, + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + ), +) +val_dataloader = dict( + batch_size=128, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + ann_file='annotation_body3d/fps50/h36m_test.npz', + seq_len=81, + causal=False, + pad_video_seq=True, + camera_param_file='annotation_body3d/cameras.pkl', + data_root=data_root, + data_prefix=dict(img='images/'), + pipeline=val_pipeline, + test_mode=True, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='MPJPE', mode='mpjpe'), + dict(type='MPJPE', mode='p-mpjpe') +] +test_evaluator = val_evaluator diff --git a/configs/body_3d_keypoint/pose_lift/h36m/simplebaseline3d_h36m.md b/configs/body_3d_keypoint/pose_lift/h36m/simplebaseline3d_h36m.md index 9bc1876315..901a3ec763 100644 --- a/configs/body_3d_keypoint/pose_lift/h36m/simplebaseline3d_h36m.md +++ b/configs/body_3d_keypoint/pose_lift/h36m/simplebaseline3d_h36m.md @@ -1,44 +1,44 @@ - - -
-SimpleBaseline3D (ICCV'2017) - -```bibtex -@inproceedings{martinez_2017_3dbaseline, - title={A simple yet effective baseline for 3d human pose estimation}, - author={Martinez, Julieta and Hossain, Rayat and Romero, Javier and Little, James J.}, - booktitle={ICCV}, - year={2017} -} -``` - -
- - - -
-Human3.6M (TPAMI'2014) - -```bibtex -@article{h36m_pami, - author = {Ionescu, Catalin and Papava, Dragos and Olaru, Vlad and Sminchisescu, Cristian}, - title = {Human3.6M: Large Scale Datasets and Predictive Methods for 3D Human Sensing in Natural Environments}, - journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, - publisher = {IEEE Computer Society}, - volume = {36}, - number = {7}, - pages = {1325-1339}, - month = {jul}, - year = {2014} -} -``` - -
- -Results on Human3.6M dataset with ground truth 2D detections - -| Arch | MPJPE | P-MPJPE | ckpt | log | -| :-------------------------------------------------------------- | :---: | :-----: | :-------------------------------------------------------------: | :------------------------------------------------------------: | -| [SimpleBaseline3D-tcn1](/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_simplebaseline3d_8xb64-200e_h36m.py) | 43.4 | 34.3 | [ckpt](https://download.openmmlab.com/mmpose/body3d/simple_baseline/simple3Dbaseline_h36m-f0ad73a4_20210419.pth) | [log](https://download.openmmlab.com/mmpose/body3d/simple_baseline/20210415_065056.log.json) | - -1 Differing from the original paper, we didn't apply the `max-norm constraint` because we found this led to a better convergence and performance. + + +
+SimpleBaseline3D (ICCV'2017) + +```bibtex +@inproceedings{martinez_2017_3dbaseline, + title={A simple yet effective baseline for 3d human pose estimation}, + author={Martinez, Julieta and Hossain, Rayat and Romero, Javier and Little, James J.}, + booktitle={ICCV}, + year={2017} +} +``` + +
+ + + +
+Human3.6M (TPAMI'2014) + +```bibtex +@article{h36m_pami, + author = {Ionescu, Catalin and Papava, Dragos and Olaru, Vlad and Sminchisescu, Cristian}, + title = {Human3.6M: Large Scale Datasets and Predictive Methods for 3D Human Sensing in Natural Environments}, + journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, + publisher = {IEEE Computer Society}, + volume = {36}, + number = {7}, + pages = {1325-1339}, + month = {jul}, + year = {2014} +} +``` + +
+ +Results on Human3.6M dataset with ground truth 2D detections + +| Arch | MPJPE | P-MPJPE | ckpt | log | +| :-------------------------------------------------------------- | :---: | :-----: | :-------------------------------------------------------------: | :------------------------------------------------------------: | +| [SimpleBaseline3D-tcn1](/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_simplebaseline3d_8xb64-200e_h36m.py) | 43.4 | 34.3 | [ckpt](https://download.openmmlab.com/mmpose/body3d/simple_baseline/simple3Dbaseline_h36m-f0ad73a4_20210419.pth) | [log](https://download.openmmlab.com/mmpose/body3d/simple_baseline/20210415_065056.log.json) | + +1 Differing from the original paper, we didn't apply the `max-norm constraint` because we found this led to a better convergence and performance. diff --git a/configs/body_3d_keypoint/pose_lift/h36m/simplebaseline3d_h36m.yml b/configs/body_3d_keypoint/pose_lift/h36m/simplebaseline3d_h36m.yml index 1a8f32f82c..d97e6c1c75 100644 --- a/configs/body_3d_keypoint/pose_lift/h36m/simplebaseline3d_h36m.yml +++ b/configs/body_3d_keypoint/pose_lift/h36m/simplebaseline3d_h36m.yml @@ -1,21 +1,21 @@ -Collections: -- Name: SimpleBaseline3D - Paper: - Title: A simple yet effective baseline for 3d human pose estimation - URL: http://openaccess.thecvf.com/content_iccv_2017/html/Martinez_A_Simple_yet_ICCV_2017_paper.html - README: https://github.com/open-mmlab/mmpose/blob/main/docs/en/papers/algorithms/simplebaseline3d.md -Models: -- Config: configs/body_3d_keypoint/pose_lift/h36m/pose-lift_simplebaseline3d_8xb64-200e_h36m.py - In Collection: SimpleBaseline3D - Metadata: - Architecture: &id001 - - SimpleBaseline3D - Training Data: Human3.6M - Name: pose-lift_simplebaseline3d_8xb64-200e_h36m - Results: - - Dataset: Human3.6M - Metrics: - MPJPE: 43.4 - P-MPJPE: 34.3 - Task: Body 3D Keypoint - Weights: https://download.openmmlab.com/mmpose/body3d/simple_baseline/simple3Dbaseline_h36m-f0ad73a4_20210419.pth +Collections: +- Name: SimpleBaseline3D + Paper: + Title: A simple yet effective baseline for 3d human pose estimation + URL: http://openaccess.thecvf.com/content_iccv_2017/html/Martinez_A_Simple_yet_ICCV_2017_paper.html + README: https://github.com/open-mmlab/mmpose/blob/main/docs/en/papers/algorithms/simplebaseline3d.md +Models: +- Config: configs/body_3d_keypoint/pose_lift/h36m/pose-lift_simplebaseline3d_8xb64-200e_h36m.py + In Collection: SimpleBaseline3D + Metadata: + Architecture: &id001 + - SimpleBaseline3D + Training Data: Human3.6M + Name: pose-lift_simplebaseline3d_8xb64-200e_h36m + Results: + - Dataset: Human3.6M + Metrics: + MPJPE: 43.4 + P-MPJPE: 34.3 + Task: Body 3D Keypoint + Weights: https://download.openmmlab.com/mmpose/body3d/simple_baseline/simple3Dbaseline_h36m-f0ad73a4_20210419.pth diff --git a/configs/body_3d_keypoint/pose_lift/h36m/videopose3d_h36m.md b/configs/body_3d_keypoint/pose_lift/h36m/videopose3d_h36m.md index f1c75d786a..3875cc496c 100644 --- a/configs/body_3d_keypoint/pose_lift/h36m/videopose3d_h36m.md +++ b/configs/body_3d_keypoint/pose_lift/h36m/videopose3d_h36m.md @@ -1,67 +1,67 @@ - - -
- -VideoPose3D (CVPR'2019) - -```bibtex -@inproceedings{pavllo20193d, -title={3d human pose estimation in video with temporal convolutions and semi-supervised training}, -author={Pavllo, Dario and Feichtenhofer, Christoph and Grangier, David and Auli, Michael}, -booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, -pages={7753--7762}, -year={2019} -} -``` - -
- - - -
-Human3.6M (TPAMI'2014) - -```bibtex -@article{h36m_pami, -author = {Ionescu, Catalin and Papava, Dragos and Olaru, Vlad and Sminchisescu, Cristian}, -title = {Human3.6M: Large Scale Datasets and Predictive Methods for 3D Human Sensing in Natural Environments}, -journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, -publisher = {IEEE Computer Society}, -volume = {36}, -number = {7}, -pages = {1325-1339}, -month = {jul}, -year = {2014} -} -``` - -
- -Testing results on Human3.6M dataset with ground truth 2D detections, supervised training - -| Arch | Receptive Field | MPJPE | P-MPJPE | ckpt | log | -| :--------------------------------------------------------- | :-------------: | :---: | :-----: | :--------------------------------------------------------: | :-------------------------------------------------------: | -| [VideoPose3D](/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-27frm-supv_8xb128-80e_h36m.py) | 27 | 40.1 | 30.1 | [ckpt](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_27frames_fullconv_supervised-fe8fbba9_20210527.pth) | [log](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_27frames_fullconv_supervised_20210527.log.json) | -| [VideoPose3D](/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-81frm-supv_8xb128-80e_h36m.py) | 81 | 39.1 | 29.3 | [ckpt](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_81frames_fullconv_supervised-1f2d1104_20210527.pth) | [log](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_81frames_fullconv_supervised_20210527.log.json) | -| [VideoPose3D](/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-243frm-supv_8xb128-80e_h36m.py) | 243 | | | [ckpt](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_243frames_fullconv_supervised-880bea25_20210527.pth) | [log](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_243frames_fullconv_supervised_20210527.log.json) | - -Testing results on Human3.6M dataset with CPN 2D detections1, supervised training - -| Arch | Receptive Field | MPJPE | P-MPJPE | ckpt | log | -| :--------------------------------------------------------- | :-------------: | :---: | :-----: | :--------------------------------------------------------: | :-------------------------------------------------------: | -| [VideoPose3D](/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-1frm-supv-cpn-ft_8xb128-80e_h36m.py) | 1 | 53.0 | 41.3 | [ckpt](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_1frame_fullconv_supervised_cpn_ft-5c3afaed_20210527.pth) | [log](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_1frame_fullconv_supervised_cpn_ft_20210527.log.json) | -| [VideoPose3D](/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-243frm-supv-cpn-ft_8xb128-200e_h36m.py) | 243 | | | [ckpt](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_243frames_fullconv_supervised_cpn_ft-88f5abbb_20210527.pth) | [log](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_243frames_fullconv_supervised_cpn_ft_20210527.log.json) | - -Testing results on Human3.6M dataset with ground truth 2D detections, semi-supervised training - -| Training Data | Arch | Receptive Field | MPJPE | P-MPJPE | N-MPJPE | ckpt | log | -| :------------ | :-------------------------------------------------: | :-------------: | :---: | :-----: | :-----: | :-------------------------------------------------: | :-------------------------------------------------: | -| 10% S1 | [VideoPose3D](/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-27frm-semi-supv_8xb64-200e_h36m.py) | 27 | 57.2 | 42.4 | 54.2 | [ckpt](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_27frames_fullconv_semi-supervised-54aef83b_20210527.pth) | [log](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_27frames_fullconv_semi-supervised_20210527.log.json) | - -Testing results on Human3.6M dataset with CPN 2D detections1, semi-supervised training - -| Training Data | Arch | Receptive Field | MPJPE | P-MPJPE | N-MPJPE | ckpt | log | -| :------------ | :----------------------------: | :-------------: | :---: | :-----: | :-----: | :------------------------------------------------------------: | :-----------------------------------------------------------: | -| 10% S1 | [VideoPose3D](/configs/xxx.py) | 27 | 67.3 | 50.4 | 63.6 | [ckpt](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_27frames_fullconv_semi-supervised_cpn_ft-71be9cde_20210527.pth) | [log](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_27frames_fullconv_semi-supervised_cpn_ft_20210527.log.json) | - -1 CPN 2D detections are provided by [official repo](https://github.com/facebookresearch/VideoPose3D/blob/master/DATASETS.md). The reformatted version used in this repository can be downloaded from [train_detection](https://download.openmmlab.com/mmpose/body3d/videopose/cpn_ft_h36m_dbb_train.npy) and [test_detection](https://download.openmmlab.com/mmpose/body3d/videopose/cpn_ft_h36m_dbb_test.npy). + + +
+ +VideoPose3D (CVPR'2019) + +```bibtex +@inproceedings{pavllo20193d, +title={3d human pose estimation in video with temporal convolutions and semi-supervised training}, +author={Pavllo, Dario and Feichtenhofer, Christoph and Grangier, David and Auli, Michael}, +booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, +pages={7753--7762}, +year={2019} +} +``` + +
+ + + +
+Human3.6M (TPAMI'2014) + +```bibtex +@article{h36m_pami, +author = {Ionescu, Catalin and Papava, Dragos and Olaru, Vlad and Sminchisescu, Cristian}, +title = {Human3.6M: Large Scale Datasets and Predictive Methods for 3D Human Sensing in Natural Environments}, +journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, +publisher = {IEEE Computer Society}, +volume = {36}, +number = {7}, +pages = {1325-1339}, +month = {jul}, +year = {2014} +} +``` + +
+ +Testing results on Human3.6M dataset with ground truth 2D detections, supervised training + +| Arch | Receptive Field | MPJPE | P-MPJPE | ckpt | log | +| :--------------------------------------------------------- | :-------------: | :---: | :-----: | :--------------------------------------------------------: | :-------------------------------------------------------: | +| [VideoPose3D](/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-27frm-supv_8xb128-80e_h36m.py) | 27 | 40.1 | 30.1 | [ckpt](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_27frames_fullconv_supervised-fe8fbba9_20210527.pth) | [log](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_27frames_fullconv_supervised_20210527.log.json) | +| [VideoPose3D](/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-81frm-supv_8xb128-80e_h36m.py) | 81 | 39.1 | 29.3 | [ckpt](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_81frames_fullconv_supervised-1f2d1104_20210527.pth) | [log](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_81frames_fullconv_supervised_20210527.log.json) | +| [VideoPose3D](/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-243frm-supv_8xb128-80e_h36m.py) | 243 | | | [ckpt](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_243frames_fullconv_supervised-880bea25_20210527.pth) | [log](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_243frames_fullconv_supervised_20210527.log.json) | + +Testing results on Human3.6M dataset with CPN 2D detections1, supervised training + +| Arch | Receptive Field | MPJPE | P-MPJPE | ckpt | log | +| :--------------------------------------------------------- | :-------------: | :---: | :-----: | :--------------------------------------------------------: | :-------------------------------------------------------: | +| [VideoPose3D](/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-1frm-supv-cpn-ft_8xb128-80e_h36m.py) | 1 | 53.0 | 41.3 | [ckpt](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_1frame_fullconv_supervised_cpn_ft-5c3afaed_20210527.pth) | [log](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_1frame_fullconv_supervised_cpn_ft_20210527.log.json) | +| [VideoPose3D](/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-243frm-supv-cpn-ft_8xb128-200e_h36m.py) | 243 | | | [ckpt](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_243frames_fullconv_supervised_cpn_ft-88f5abbb_20210527.pth) | [log](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_243frames_fullconv_supervised_cpn_ft_20210527.log.json) | + +Testing results on Human3.6M dataset with ground truth 2D detections, semi-supervised training + +| Training Data | Arch | Receptive Field | MPJPE | P-MPJPE | N-MPJPE | ckpt | log | +| :------------ | :-------------------------------------------------: | :-------------: | :---: | :-----: | :-----: | :-------------------------------------------------: | :-------------------------------------------------: | +| 10% S1 | [VideoPose3D](/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-27frm-semi-supv_8xb64-200e_h36m.py) | 27 | 57.2 | 42.4 | 54.2 | [ckpt](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_27frames_fullconv_semi-supervised-54aef83b_20210527.pth) | [log](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_27frames_fullconv_semi-supervised_20210527.log.json) | + +Testing results on Human3.6M dataset with CPN 2D detections1, semi-supervised training + +| Training Data | Arch | Receptive Field | MPJPE | P-MPJPE | N-MPJPE | ckpt | log | +| :------------ | :----------------------------: | :-------------: | :---: | :-----: | :-----: | :------------------------------------------------------------: | :-----------------------------------------------------------: | +| 10% S1 | [VideoPose3D](/configs/xxx.py) | 27 | 67.3 | 50.4 | 63.6 | [ckpt](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_27frames_fullconv_semi-supervised_cpn_ft-71be9cde_20210527.pth) | [log](https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_27frames_fullconv_semi-supervised_cpn_ft_20210527.log.json) | + +1 CPN 2D detections are provided by [official repo](https://github.com/facebookresearch/VideoPose3D/blob/master/DATASETS.md). The reformatted version used in this repository can be downloaded from [train_detection](https://download.openmmlab.com/mmpose/body3d/videopose/cpn_ft_h36m_dbb_train.npy) and [test_detection](https://download.openmmlab.com/mmpose/body3d/videopose/cpn_ft_h36m_dbb_test.npy). diff --git a/configs/body_3d_keypoint/pose_lift/h36m/videopose3d_h36m.yml b/configs/body_3d_keypoint/pose_lift/h36m/videopose3d_h36m.yml index 6b9d92c115..76a15e0d4a 100644 --- a/configs/body_3d_keypoint/pose_lift/h36m/videopose3d_h36m.yml +++ b/configs/body_3d_keypoint/pose_lift/h36m/videopose3d_h36m.yml @@ -1,103 +1,103 @@ -Collections: -- Name: VideoPose3D - Paper: - Title: 3d human pose estimation in video with temporal convolutions and semi-supervised - training - URL: http://openaccess.thecvf.com/content_CVPR_2019/html/Pavllo_3D_Human_Pose_Estimation_in_Video_With_Temporal_Convolutions_and_CVPR_2019_paper.html - README: https://github.com/open-mmlab/mmpose/blob/main/docs/en/papers/algorithms/videopose3d.md -Models: -- Config: configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-243frm-supv_8xb128-80e_h36m.py - In Collection: VideoPose3D - Metadata: - Architecture: &id001 - - VideoPose3D - Training Data: Human3.6M - Name: pose-lift_videopose3d-243frm-supv_8xb128-80e_h36m - Results: - - Dataset: Human3.6M - Metrics: - MPJPE: 40.0 - P-MPJPE: 30.1 - Task: Body 3D Keypoint - Weights: https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_27frames_fullconv_supervised-fe8fbba9_20210527.pth -- Config: configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-81frm-supv_8xb128-80e_h36m.py - In Collection: VideoPose3D - Metadata: - Architecture: *id001 - Training Data: Human3.6M - Name: pose-lift_videopose3d-81frm-supv_8xb128-80e_h36m - Results: - - Dataset: Human3.6M - Metrics: - MPJPE: 38.9 - P-MPJPE: 29.2 - Task: Body 3D Keypoint - Weights: https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_81frames_fullconv_supervised-1f2d1104_20210527.pth -- Config: configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-243frm-supv_8xb128-80e_h36m.py - In Collection: VideoPose3D - Metadata: - Architecture: *id001 - Training Data: Human3.6M - Name: pose-lift_videopose3d-243frm-supv_8xb128-80e_h36m - Results: - - Dataset: Human3.6M - Metrics: - MPJPE: 37.6 - P-MPJPE: 28.3 - Task: Body 3D Keypoint - Weights: https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_243frames_fullconv_supervised-880bea25_20210527.pth -- Config: configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-1frm-supv-cpn-ft_8xb128-80e_h36m.py - In Collection: VideoPose3D - Metadata: - Architecture: *id001 - Training Data: Human3.6M - Name: pose-lift_videopose3d-1frm-supv-cpn-ft_8xb128-80e_h36m - Results: - - Dataset: Human3.6M - Metrics: - MPJPE: 52.9 - P-MPJPE: 41.3 - Task: Body 3D Keypoint - Weights: https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_1frame_fullconv_supervised_cpn_ft-5c3afaed_20210527.pth -- Config: configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-243frm-supv-cpn-ft_8xb128-200e_h36m.py - In Collection: VideoPose3D - Alias: human3d - Metadata: - Architecture: *id001 - Training Data: Human3.6M - Name: pose-lift_videopose3d-243frm-supv-cpn-ft_8xb128-200e_h36m - Results: - - Dataset: Human3.6M - Metrics: - MPJPE: 47.9 - P-MPJPE: 38.0 - Task: Body 3D Keypoint - Weights: https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_243frames_fullconv_supervised_cpn_ft-88f5abbb_20210527.pth -- Config: configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-27frm-semi-supv_8xb64-200e_h36m.py - In Collection: VideoPose3D - Metadata: - Architecture: *id001 - Training Data: Human3.6M - Name: pose-lift_videopose3d-27frm-semi-supv_8xb64-200e_h36m - Results: - - Dataset: Human3.6M - Metrics: - MPJPE: 58.1 - N-MPJPE: 54.7 - P-MPJPE: 42.8 - Task: Body 3D Keypoint - Weights: https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_27frames_fullconv_semi-supervised-54aef83b_20210527.pth -- Config: configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-27frm-semi-supv-cpn-ft_8xb64-200e_h36m.py - In Collection: VideoPose3D - Metadata: - Architecture: *id001 - Training Data: Human3.6M - Name: pose-lift_videopose3d-27frm-semi-supv-cpn-ft_8xb64-200e_h36m - Results: - - Dataset: Human3.6M - Metrics: - MPJPE: 67.4 - N-MPJPE: 63.2 - P-MPJPE: 50.1 - Task: Body 3D Keypoint - Weights: https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_27frames_fullconv_semi-supervised_cpn_ft-71be9cde_20210527.pth +Collections: +- Name: VideoPose3D + Paper: + Title: 3d human pose estimation in video with temporal convolutions and semi-supervised + training + URL: http://openaccess.thecvf.com/content_CVPR_2019/html/Pavllo_3D_Human_Pose_Estimation_in_Video_With_Temporal_Convolutions_and_CVPR_2019_paper.html + README: https://github.com/open-mmlab/mmpose/blob/main/docs/en/papers/algorithms/videopose3d.md +Models: +- Config: configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-243frm-supv_8xb128-80e_h36m.py + In Collection: VideoPose3D + Metadata: + Architecture: &id001 + - VideoPose3D + Training Data: Human3.6M + Name: pose-lift_videopose3d-243frm-supv_8xb128-80e_h36m + Results: + - Dataset: Human3.6M + Metrics: + MPJPE: 40.0 + P-MPJPE: 30.1 + Task: Body 3D Keypoint + Weights: https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_27frames_fullconv_supervised-fe8fbba9_20210527.pth +- Config: configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-81frm-supv_8xb128-80e_h36m.py + In Collection: VideoPose3D + Metadata: + Architecture: *id001 + Training Data: Human3.6M + Name: pose-lift_videopose3d-81frm-supv_8xb128-80e_h36m + Results: + - Dataset: Human3.6M + Metrics: + MPJPE: 38.9 + P-MPJPE: 29.2 + Task: Body 3D Keypoint + Weights: https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_81frames_fullconv_supervised-1f2d1104_20210527.pth +- Config: configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-243frm-supv_8xb128-80e_h36m.py + In Collection: VideoPose3D + Metadata: + Architecture: *id001 + Training Data: Human3.6M + Name: pose-lift_videopose3d-243frm-supv_8xb128-80e_h36m + Results: + - Dataset: Human3.6M + Metrics: + MPJPE: 37.6 + P-MPJPE: 28.3 + Task: Body 3D Keypoint + Weights: https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_243frames_fullconv_supervised-880bea25_20210527.pth +- Config: configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-1frm-supv-cpn-ft_8xb128-80e_h36m.py + In Collection: VideoPose3D + Metadata: + Architecture: *id001 + Training Data: Human3.6M + Name: pose-lift_videopose3d-1frm-supv-cpn-ft_8xb128-80e_h36m + Results: + - Dataset: Human3.6M + Metrics: + MPJPE: 52.9 + P-MPJPE: 41.3 + Task: Body 3D Keypoint + Weights: https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_1frame_fullconv_supervised_cpn_ft-5c3afaed_20210527.pth +- Config: configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-243frm-supv-cpn-ft_8xb128-200e_h36m.py + In Collection: VideoPose3D + Alias: human3d + Metadata: + Architecture: *id001 + Training Data: Human3.6M + Name: pose-lift_videopose3d-243frm-supv-cpn-ft_8xb128-200e_h36m + Results: + - Dataset: Human3.6M + Metrics: + MPJPE: 47.9 + P-MPJPE: 38.0 + Task: Body 3D Keypoint + Weights: https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_243frames_fullconv_supervised_cpn_ft-88f5abbb_20210527.pth +- Config: configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-27frm-semi-supv_8xb64-200e_h36m.py + In Collection: VideoPose3D + Metadata: + Architecture: *id001 + Training Data: Human3.6M + Name: pose-lift_videopose3d-27frm-semi-supv_8xb64-200e_h36m + Results: + - Dataset: Human3.6M + Metrics: + MPJPE: 58.1 + N-MPJPE: 54.7 + P-MPJPE: 42.8 + Task: Body 3D Keypoint + Weights: https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_27frames_fullconv_semi-supervised-54aef83b_20210527.pth +- Config: configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-27frm-semi-supv-cpn-ft_8xb64-200e_h36m.py + In Collection: VideoPose3D + Metadata: + Architecture: *id001 + Training Data: Human3.6M + Name: pose-lift_videopose3d-27frm-semi-supv-cpn-ft_8xb64-200e_h36m + Results: + - Dataset: Human3.6M + Metrics: + MPJPE: 67.4 + N-MPJPE: 63.2 + P-MPJPE: 50.1 + Task: Body 3D Keypoint + Weights: https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_27frames_fullconv_semi-supervised_cpn_ft-71be9cde_20210527.pth diff --git a/configs/face_2d_keypoint/README.md b/configs/face_2d_keypoint/README.md index 9f9370a754..058b9b5afb 100644 --- a/configs/face_2d_keypoint/README.md +++ b/configs/face_2d_keypoint/README.md @@ -1,16 +1,16 @@ -# 2D Face Landmark Detection - -2D face landmark detection (also referred to as face alignment) is defined as the task of detecting the face keypoints from an input image. - -Normally, the input images are cropped face images, where the face locates at the center; -or the rough location (or the bounding box) of the hand is provided. - -## Data preparation - -Please follow [DATA Preparation](/docs/en/dataset_zoo/2d_face_keypoint.md) to prepare data. - -## Demo - -Please follow [Demo](/demo/docs/en/2d_face_demo.md) to run demos. - -
+# 2D Face Landmark Detection + +2D face landmark detection (also referred to as face alignment) is defined as the task of detecting the face keypoints from an input image. + +Normally, the input images are cropped face images, where the face locates at the center; +or the rough location (or the bounding box) of the hand is provided. + +## Data preparation + +Please follow [DATA Preparation](/docs/en/dataset_zoo/2d_face_keypoint.md) to prepare data. + +## Demo + +Please follow [Demo](/demo/docs/en/2d_face_demo.md) to run demos. + +
diff --git a/configs/face_2d_keypoint/rtmpose/README.md b/configs/face_2d_keypoint/rtmpose/README.md index d0c7f55fb4..c04d5bcb7a 100644 --- a/configs/face_2d_keypoint/rtmpose/README.md +++ b/configs/face_2d_keypoint/rtmpose/README.md @@ -1,32 +1,32 @@ -# RTMPose - -Recent studies on 2D pose estimation have achieved excellent performance on public benchmarks, yet its application in the industrial community still suffers from heavy model parameters and high latency. -In order to bridge this gap, we empirically study five aspects that affect the performance of multi-person pose estimation algorithms: paradigm, backbone network, localization algorithm, training strategy, and deployment inference, and present a high-performance real-time multi-person pose estimation framework, **RTMPose**, based on MMPose. -Our RTMPose-m achieves **75.8% AP** on COCO with **90+ FPS** on an Intel i7-11700 CPU and **430+ FPS** on an NVIDIA GTX 1660 Ti GPU, and RTMPose-l achieves **67.0% AP** on COCO-WholeBody with **130+ FPS**, outperforming existing open-source libraries. -To further evaluate RTMPose's capability in critical real-time applications, we also report the performance after deploying on the mobile device. - -## Results and Models - -### COCO-WholeBody-Face Dataset - -Results on COCO-WholeBody-Face val set - -| Model | Input Size | NME | Details and Download | -| :-------: | :--------: | :----: | :------------------------------------------------------------------------------------: | -| RTMPose-m | 256x256 | 0.0466 | [rtmpose_coco_wholebody_face.md](./coco_wholebody_face/rtmpose_coco_wholebody_face.md) | - -### WFLW Dataset - -Results on WFLW dataset - -| Model | Input Size | NME | Details and Download | -| :-------: | :--------: | :--: | :---------------------------------------: | -| RTMPose-m | 256x256 | 4.01 | [rtmpose_wflw.md](./wflw/rtmpose_wflw.md) | - -### LaPa Dataset - -Results on LaPa dataset - -| Model | Input Size | NME | Details and Download | -| :-------: | :--------: | :--: | :---------------------------------------: | -| RTMPose-m | 256x256 | 1.29 | [rtmpose_lapa.md](./lapa/rtmpose_lapa.md) | +# RTMPose + +Recent studies on 2D pose estimation have achieved excellent performance on public benchmarks, yet its application in the industrial community still suffers from heavy model parameters and high latency. +In order to bridge this gap, we empirically study five aspects that affect the performance of multi-person pose estimation algorithms: paradigm, backbone network, localization algorithm, training strategy, and deployment inference, and present a high-performance real-time multi-person pose estimation framework, **RTMPose**, based on MMPose. +Our RTMPose-m achieves **75.8% AP** on COCO with **90+ FPS** on an Intel i7-11700 CPU and **430+ FPS** on an NVIDIA GTX 1660 Ti GPU, and RTMPose-l achieves **67.0% AP** on COCO-WholeBody with **130+ FPS**, outperforming existing open-source libraries. +To further evaluate RTMPose's capability in critical real-time applications, we also report the performance after deploying on the mobile device. + +## Results and Models + +### COCO-WholeBody-Face Dataset + +Results on COCO-WholeBody-Face val set + +| Model | Input Size | NME | Details and Download | +| :-------: | :--------: | :----: | :------------------------------------------------------------------------------------: | +| RTMPose-m | 256x256 | 0.0466 | [rtmpose_coco_wholebody_face.md](./coco_wholebody_face/rtmpose_coco_wholebody_face.md) | + +### WFLW Dataset + +Results on WFLW dataset + +| Model | Input Size | NME | Details and Download | +| :-------: | :--------: | :--: | :---------------------------------------: | +| RTMPose-m | 256x256 | 4.01 | [rtmpose_wflw.md](./wflw/rtmpose_wflw.md) | + +### LaPa Dataset + +Results on LaPa dataset + +| Model | Input Size | NME | Details and Download | +| :-------: | :--------: | :--: | :---------------------------------------: | +| RTMPose-m | 256x256 | 1.29 | [rtmpose_lapa.md](./lapa/rtmpose_lapa.md) | diff --git a/configs/face_2d_keypoint/rtmpose/coco_wholebody_face/rtmpose-m_8xb32-60e_coco-wholebody-face-256x256.py b/configs/face_2d_keypoint/rtmpose/coco_wholebody_face/rtmpose-m_8xb32-60e_coco-wholebody-face-256x256.py index 958a361c07..07db40c8bb 100644 --- a/configs/face_2d_keypoint/rtmpose/coco_wholebody_face/rtmpose-m_8xb32-60e_coco-wholebody-face-256x256.py +++ b/configs/face_2d_keypoint/rtmpose/coco_wholebody_face/rtmpose-m_8xb32-60e_coco-wholebody-face-256x256.py @@ -1,231 +1,231 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -max_epochs = 60 -stage2_num_epochs = 10 -base_lr = 4e-3 - -train_cfg = dict(max_epochs=max_epochs, val_interval=1) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=(256, 256), - sigma=(5.66, 5.66), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=0.67, - widen_factor=0.75, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmposev1/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=768, - out_channels=68, - input_size=codec['input_size'], - in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True, )) - -# base dataset settings -dataset_type = 'CocoWholeBodyFaceDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -backend_args = dict(backend='local') -# backend_args = dict( -# backend='petrel', -# path_mapping=dict({ -# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', -# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' -# })) - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - # dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.0), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - # dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict( - save_best='NME', rule='less', max_keep_ckpts=1, interval=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='NME', - norm_mode='keypoint_distance', -) -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 60 +stage2_num_epochs = 10 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=1) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(256, 256), + sigma=(5.66, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=768, + out_channels=68, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'CocoWholeBodyFaceDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + # dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + # dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict( + save_best='NME', rule='less', max_keep_ckpts=1, interval=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='NME', + norm_mode='keypoint_distance', +) +test_evaluator = val_evaluator diff --git a/configs/face_2d_keypoint/rtmpose/coco_wholebody_face/rtmpose_coco_wholebody_face.md b/configs/face_2d_keypoint/rtmpose/coco_wholebody_face/rtmpose_coco_wholebody_face.md index 77d99bc63f..fb09265da4 100644 --- a/configs/face_2d_keypoint/rtmpose/coco_wholebody_face/rtmpose_coco_wholebody_face.md +++ b/configs/face_2d_keypoint/rtmpose/coco_wholebody_face/rtmpose_coco_wholebody_face.md @@ -1,39 +1,39 @@ - - -
-RTMDet (ArXiv 2022) - -```bibtex -@misc{lyu2022rtmdet, - title={RTMDet: An Empirical Study of Designing Real-Time Object Detectors}, - author={Chengqi Lyu and Wenwei Zhang and Haian Huang and Yue Zhou and Yudong Wang and Yanyi Liu and Shilong Zhang and Kai Chen}, - year={2022}, - eprint={2212.07784}, - archivePrefix={arXiv}, - primaryClass={cs.CV} -} -``` - -
- - - -
-COCO-WholeBody-Face (ECCV'2020) - -```bibtex -@inproceedings{jin2020whole, - title={Whole-Body Human Pose Estimation in the Wild}, - author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, - booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, - year={2020} -} -``` - -
- -Results on COCO-WholeBody-Face val set - -| Arch | Input Size | NME | ckpt | log | -| :------------------------------------------------------------ | :--------: | :----: | :------------------------------------------------------------: | :-----------------------------------------------------------: | -| [pose_rtmpose_m](/configs/face_2d_keypoint/rtmpose/coco_wholebody_face/rtmpose-m_8xb32-60e_coco-wholebody-face-256x256.py) | 256x256 | 0.0466 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco-wholebody-face_pt-aic-coco_60e-256x256-62026ef2_20230228.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco-wholebody-face_pt-aic-coco_60e-256x256-62026ef2_20230228.json) | + + +
+RTMDet (ArXiv 2022) + +```bibtex +@misc{lyu2022rtmdet, + title={RTMDet: An Empirical Study of Designing Real-Time Object Detectors}, + author={Chengqi Lyu and Wenwei Zhang and Haian Huang and Yue Zhou and Yudong Wang and Yanyi Liu and Shilong Zhang and Kai Chen}, + year={2022}, + eprint={2212.07784}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +
+ + + +
+COCO-WholeBody-Face (ECCV'2020) + +```bibtex +@inproceedings{jin2020whole, + title={Whole-Body Human Pose Estimation in the Wild}, + author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2020} +} +``` + +
+ +Results on COCO-WholeBody-Face val set + +| Arch | Input Size | NME | ckpt | log | +| :------------------------------------------------------------ | :--------: | :----: | :------------------------------------------------------------: | :-----------------------------------------------------------: | +| [pose_rtmpose_m](/configs/face_2d_keypoint/rtmpose/coco_wholebody_face/rtmpose-m_8xb32-60e_coco-wholebody-face-256x256.py) | 256x256 | 0.0466 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco-wholebody-face_pt-aic-coco_60e-256x256-62026ef2_20230228.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco-wholebody-face_pt-aic-coco_60e-256x256-62026ef2_20230228.json) | diff --git a/configs/face_2d_keypoint/rtmpose/coco_wholebody_face/rtmpose_coco_wholebody_face.yml b/configs/face_2d_keypoint/rtmpose/coco_wholebody_face/rtmpose_coco_wholebody_face.yml index fdc2599e71..00b090637c 100644 --- a/configs/face_2d_keypoint/rtmpose/coco_wholebody_face/rtmpose_coco_wholebody_face.yml +++ b/configs/face_2d_keypoint/rtmpose/coco_wholebody_face/rtmpose_coco_wholebody_face.yml @@ -1,14 +1,14 @@ -Models: -- Config: configs/face_2d_keypoint/rtmpose/coco_wholebody_face/rtmpose-m_8xb32-60e_coco-wholebody-face-256x256.py - In Collection: RTMPose - Metadata: - Architecture: - - RTMPose - Training Data: COCO-WholeBody-Face - Name: rtmpose-m_8xb32-60e_coco-wholebody-face-256x256 - Results: - - Dataset: COCO-WholeBody-Face - Metrics: - NME: 0.0466 - Task: Face 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco-wholebody-face_pt-aic-coco_60e-256x256-62026ef2_20230228.pth +Models: +- Config: configs/face_2d_keypoint/rtmpose/coco_wholebody_face/rtmpose-m_8xb32-60e_coco-wholebody-face-256x256.py + In Collection: RTMPose + Metadata: + Architecture: + - RTMPose + Training Data: COCO-WholeBody-Face + Name: rtmpose-m_8xb32-60e_coco-wholebody-face-256x256 + Results: + - Dataset: COCO-WholeBody-Face + Metrics: + NME: 0.0466 + Task: Face 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco-wholebody-face_pt-aic-coco_60e-256x256-62026ef2_20230228.pth diff --git a/configs/face_2d_keypoint/rtmpose/face6/rtmpose-m_8xb256-120e_face6-256x256.py b/configs/face_2d_keypoint/rtmpose/face6/rtmpose-m_8xb256-120e_face6-256x256.py index abbb2ce985..22d28dd58c 100644 --- a/configs/face_2d_keypoint/rtmpose/face6/rtmpose-m_8xb256-120e_face6-256x256.py +++ b/configs/face_2d_keypoint/rtmpose/face6/rtmpose-m_8xb256-120e_face6-256x256.py @@ -1,690 +1,690 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# lapa coco wflw 300w cofw halpe - -# runtime -max_epochs = 120 -stage2_num_epochs = 10 -base_lr = 4e-3 - -train_cfg = dict(max_epochs=max_epochs, val_interval=1) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - clip_grad=dict(max_norm=35, norm_type=2), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - type='CosineAnnealingLR', - eta_min=base_lr * 0.005, - begin=30, - end=max_epochs, - T_max=max_epochs - 30, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=(256, 256), - sigma=(5.66, 5.66), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=0.67, - widen_factor=0.75, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' - 'rtmdet/cspnext_rsb_pretrain/cspnext-m_8xb256-rsb-a1-600e_in1k-ecb3bbd9.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=768, - out_channels=106, - input_size=codec['input_size'], - in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True, )) - -# base dataset settings -dataset_type = 'LapaDataset' -data_mode = 'topdown' -data_root = 'data/' - -backend_args = dict(backend='local') - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.2), - dict(type='MedianBlur', p=0.2), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.0), - ]), - dict( - type='GenerateTarget', - encoder=codec, - use_dataset_keypoint_weights=True), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.5, 1.5], - rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict( - type='GenerateTarget', - encoder=codec, - use_dataset_keypoint_weights=True), - dict(type='PackPoseInputs') -] - -# train dataset -dataset_lapa = dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='LaPa/annotations/lapa_trainval.json', - data_prefix=dict(img='pose/LaPa/'), - pipeline=[], -) - -kpt_68_to_106 = [ - # - (0, 0), - (1, 2), - (2, 4), - (3, 6), - (4, 8), - (5, 10), - (6, 12), - (7, 14), - (8, 16), - (9, 18), - (10, 20), - (11, 22), - (12, 24), - (13, 26), - (14, 28), - (15, 30), - (16, 32), - # - (17, 33), - (18, 34), - (19, 35), - (20, 36), - (21, 37), - # - (22, 42), - (23, 43), - (24, 44), - (25, 45), - (26, 46), - # - (27, 51), - (28, 52), - (29, 53), - (30, 54), - # - (31, 58), - (32, 59), - (33, 60), - (34, 61), - (35, 62), - # - (36, 66), - (39, 70), - # - ((37, 38), 68), - ((40, 41), 72), - # - (42, 75), - (45, 79), - # - ((43, 44), 77), - ((46, 47), 81), - # - (48, 84), - (49, 85), - (50, 86), - (51, 87), - (52, 88), - (53, 89), - (54, 90), - (55, 91), - (56, 92), - (57, 93), - (58, 94), - (59, 95), - (60, 96), - (61, 97), - (62, 98), - (63, 99), - (64, 100), - (65, 101), - (66, 102), - (67, 103) -] - -mapping_halpe = [ - # - (26, 0), - (27, 2), - (28, 4), - (29, 6), - (30, 8), - (31, 10), - (32, 12), - (33, 14), - (34, 16), - (35, 18), - (36, 20), - (37, 22), - (38, 24), - (39, 26), - (40, 28), - (41, 30), - (42, 32), - # - (43, 33), - (44, 34), - (45, 35), - (46, 36), - (47, 37), - # - (48, 42), - (49, 43), - (50, 44), - (51, 45), - (52, 46), - # - (53, 51), - (54, 52), - (55, 53), - (56, 54), - # - (57, 58), - (58, 59), - (59, 60), - (60, 61), - (61, 62), - # - (62, 66), - (65, 70), - # - ((63, 64), 68), - ((66, 67), 72), - # - (68, 75), - (71, 79), - # - ((69, 70), 77), - ((72, 73), 81), - # - (74, 84), - (75, 85), - (76, 86), - (77, 87), - (78, 88), - (79, 89), - (80, 90), - (81, 91), - (82, 92), - (83, 93), - (84, 94), - (85, 95), - (86, 96), - (87, 97), - (88, 98), - (89, 99), - (90, 100), - (91, 101), - (92, 102), - (93, 103) -] - -mapping_wflw = [ - # - (0, 0), - (1, 1), - (2, 2), - (3, 3), - (4, 4), - (5, 5), - (6, 6), - (7, 7), - (8, 8), - (9, 9), - (10, 10), - (11, 11), - (12, 12), - (13, 13), - (14, 14), - (15, 15), - (16, 16), - (17, 17), - (18, 18), - (19, 19), - (20, 20), - (21, 21), - (22, 22), - (23, 23), - (24, 24), - (25, 25), - (26, 26), - (27, 27), - (28, 28), - (29, 29), - (30, 30), - (31, 31), - (32, 32), - # - (33, 33), - (34, 34), - (35, 35), - (36, 36), - (37, 37), - (38, 38), - (39, 39), - (40, 40), - (41, 41), - # - (42, 42), - (43, 43), - (44, 44), - (45, 45), - (46, 46), - (47, 47), - (48, 48), - (49, 49), - (50, 50), - # - (51, 51), - (52, 52), - (53, 53), - (54, 54), - # - (55, 58), - (56, 59), - (57, 60), - (58, 61), - (59, 62), - # - (60, 66), - (61, 67), - (62, 68), - (63, 69), - (64, 70), - (65, 71), - (66, 72), - (67, 73), - # - (68, 75), - (69, 76), - (70, 77), - (71, 78), - (72, 79), - (73, 80), - (74, 81), - (75, 82), - # - (76, 84), - (77, 85), - (78, 86), - (79, 87), - (80, 88), - (81, 89), - (82, 90), - (83, 91), - (84, 92), - (85, 93), - (86, 94), - (87, 95), - (88, 96), - (89, 97), - (90, 98), - (91, 99), - (92, 100), - (93, 101), - (94, 102), - (95, 103), - # - (96, 104), - # - (97, 105) -] - -mapping_cofw = [ - # - (0, 33), - (2, 38), - (4, 35), - (5, 40), - # - (1, 46), - (3, 50), - (6, 44), - (7, 48), - # - (8, 60), - (10, 64), - (12, 62), - (13, 66), - # - (9, 72), - (11, 68), - (14, 70), - (15, 74), - # - (18, 57), - (19, 63), - (20, 54), - (21, 60), - # - (22, 84), - (23, 90), - (24, 87), - (25, 98), - (26, 102), - (27, 93), - # - (28, 16) -] -dataset_coco = dict( - type='CocoWholeBodyFaceDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='detection/coco/train2017/'), - pipeline=[ - dict( - type='KeypointConverter', num_keypoints=106, mapping=kpt_68_to_106) - ], -) - -dataset_wflw = dict( - type='WFLWDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='wflw/annotations/face_landmarks_wflw_train.json', - data_prefix=dict(img='pose/WFLW/images/'), - pipeline=[ - dict( - type='KeypointConverter', num_keypoints=106, mapping=mapping_wflw) - ], -) - -dataset_300w = dict( - type='Face300WDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='300w/annotations/face_landmarks_300w_train.json', - data_prefix=dict(img='pose/300w/images/'), - pipeline=[ - dict( - type='KeypointConverter', num_keypoints=106, mapping=kpt_68_to_106) - ], -) - -dataset_cofw = dict( - type='COFWDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='cofw/annotations/cofw_train.json', - data_prefix=dict(img='pose/COFW/images/'), - pipeline=[ - dict( - type='KeypointConverter', num_keypoints=106, mapping=mapping_cofw) - ], -) - -dataset_halpe = dict( - type='HalpeDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='halpe/annotations/halpe_train_133kpt.json', - data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015/'), - pipeline=[ - dict( - type='KeypointConverter', num_keypoints=106, mapping=mapping_halpe) - ], -) - -# data loaders -train_dataloader = dict( - batch_size=256, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type='CombinedDataset', - metainfo=dict(from_file='configs/_base_/datasets/lapa.py'), - datasets=[ - dataset_lapa, dataset_coco, dataset_wflw, dataset_300w, - dataset_cofw, dataset_halpe - ], - pipeline=train_pipeline, - test_mode=False, - )) -val_dataloader = dict( - batch_size=32, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='LaPa/annotations/lapa_test.json', - data_prefix=dict(img='pose/LaPa/'), - test_mode=True, - pipeline=val_pipeline, - )) - -# test dataset -val_lapa = dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='LaPa/annotations/lapa_test.json', - data_prefix=dict(img='pose/LaPa/'), - pipeline=[], -) - -val_coco = dict( - type='CocoWholeBodyFaceDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='detection/coco/val2017/'), - pipeline=[ - dict( - type='KeypointConverter', num_keypoints=106, mapping=kpt_68_to_106) - ], -) - -val_wflw = dict( - type='WFLWDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='wflw/annotations/face_landmarks_wflw_test.json', - data_prefix=dict(img='pose/WFLW/images/'), - pipeline=[ - dict( - type='KeypointConverter', num_keypoints=106, mapping=mapping_wflw) - ], -) - -val_300w = dict( - type='Face300WDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='300w/annotations/face_landmarks_300w_test.json', - data_prefix=dict(img='pose/300w/images/'), - pipeline=[ - dict( - type='KeypointConverter', num_keypoints=106, mapping=kpt_68_to_106) - ], -) - -val_cofw = dict( - type='COFWDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='cofw/annotations/cofw_test.json', - data_prefix=dict(img='pose/COFW/images/'), - pipeline=[ - dict( - type='KeypointConverter', num_keypoints=106, mapping=mapping_cofw) - ], -) - -val_halpe = dict( - type='HalpeDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='halpe/annotations/halpe_val_v1.json', - data_prefix=dict(img='detection/coco/val2017/'), - pipeline=[ - dict( - type='KeypointConverter', num_keypoints=106, mapping=mapping_halpe) - ], -) - -test_dataloader = dict( - batch_size=32, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type='CombinedDataset', - metainfo=dict(from_file='configs/_base_/datasets/lapa.py'), - datasets=[val_lapa, val_coco, val_wflw, val_300w, val_cofw, val_halpe], - pipeline=val_pipeline, - test_mode=True, - )) - -# hooks -default_hooks = dict( - checkpoint=dict( - save_best='NME', rule='less', max_keep_ckpts=1, interval=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='NME', - norm_mode='keypoint_distance', -) -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# lapa coco wflw 300w cofw halpe + +# runtime +max_epochs = 120 +stage2_num_epochs = 10 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=1) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + clip_grad=dict(max_norm=35, norm_type=2), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.005, + begin=30, + end=max_epochs, + T_max=max_epochs - 30, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(256, 256), + sigma=(5.66, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' + 'rtmdet/cspnext_rsb_pretrain/cspnext-m_8xb256-rsb-a1-600e_in1k-ecb3bbd9.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=768, + out_channels=106, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'LapaDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.2), + dict(type='MedianBlur', p=0.2), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict( + type='GenerateTarget', + encoder=codec, + use_dataset_keypoint_weights=True), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.5, 1.5], + rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict( + type='GenerateTarget', + encoder=codec, + use_dataset_keypoint_weights=True), + dict(type='PackPoseInputs') +] + +# train dataset +dataset_lapa = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='LaPa/annotations/lapa_trainval.json', + data_prefix=dict(img='pose/LaPa/'), + pipeline=[], +) + +kpt_68_to_106 = [ + # + (0, 0), + (1, 2), + (2, 4), + (3, 6), + (4, 8), + (5, 10), + (6, 12), + (7, 14), + (8, 16), + (9, 18), + (10, 20), + (11, 22), + (12, 24), + (13, 26), + (14, 28), + (15, 30), + (16, 32), + # + (17, 33), + (18, 34), + (19, 35), + (20, 36), + (21, 37), + # + (22, 42), + (23, 43), + (24, 44), + (25, 45), + (26, 46), + # + (27, 51), + (28, 52), + (29, 53), + (30, 54), + # + (31, 58), + (32, 59), + (33, 60), + (34, 61), + (35, 62), + # + (36, 66), + (39, 70), + # + ((37, 38), 68), + ((40, 41), 72), + # + (42, 75), + (45, 79), + # + ((43, 44), 77), + ((46, 47), 81), + # + (48, 84), + (49, 85), + (50, 86), + (51, 87), + (52, 88), + (53, 89), + (54, 90), + (55, 91), + (56, 92), + (57, 93), + (58, 94), + (59, 95), + (60, 96), + (61, 97), + (62, 98), + (63, 99), + (64, 100), + (65, 101), + (66, 102), + (67, 103) +] + +mapping_halpe = [ + # + (26, 0), + (27, 2), + (28, 4), + (29, 6), + (30, 8), + (31, 10), + (32, 12), + (33, 14), + (34, 16), + (35, 18), + (36, 20), + (37, 22), + (38, 24), + (39, 26), + (40, 28), + (41, 30), + (42, 32), + # + (43, 33), + (44, 34), + (45, 35), + (46, 36), + (47, 37), + # + (48, 42), + (49, 43), + (50, 44), + (51, 45), + (52, 46), + # + (53, 51), + (54, 52), + (55, 53), + (56, 54), + # + (57, 58), + (58, 59), + (59, 60), + (60, 61), + (61, 62), + # + (62, 66), + (65, 70), + # + ((63, 64), 68), + ((66, 67), 72), + # + (68, 75), + (71, 79), + # + ((69, 70), 77), + ((72, 73), 81), + # + (74, 84), + (75, 85), + (76, 86), + (77, 87), + (78, 88), + (79, 89), + (80, 90), + (81, 91), + (82, 92), + (83, 93), + (84, 94), + (85, 95), + (86, 96), + (87, 97), + (88, 98), + (89, 99), + (90, 100), + (91, 101), + (92, 102), + (93, 103) +] + +mapping_wflw = [ + # + (0, 0), + (1, 1), + (2, 2), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), + (17, 17), + (18, 18), + (19, 19), + (20, 20), + (21, 21), + (22, 22), + (23, 23), + (24, 24), + (25, 25), + (26, 26), + (27, 27), + (28, 28), + (29, 29), + (30, 30), + (31, 31), + (32, 32), + # + (33, 33), + (34, 34), + (35, 35), + (36, 36), + (37, 37), + (38, 38), + (39, 39), + (40, 40), + (41, 41), + # + (42, 42), + (43, 43), + (44, 44), + (45, 45), + (46, 46), + (47, 47), + (48, 48), + (49, 49), + (50, 50), + # + (51, 51), + (52, 52), + (53, 53), + (54, 54), + # + (55, 58), + (56, 59), + (57, 60), + (58, 61), + (59, 62), + # + (60, 66), + (61, 67), + (62, 68), + (63, 69), + (64, 70), + (65, 71), + (66, 72), + (67, 73), + # + (68, 75), + (69, 76), + (70, 77), + (71, 78), + (72, 79), + (73, 80), + (74, 81), + (75, 82), + # + (76, 84), + (77, 85), + (78, 86), + (79, 87), + (80, 88), + (81, 89), + (82, 90), + (83, 91), + (84, 92), + (85, 93), + (86, 94), + (87, 95), + (88, 96), + (89, 97), + (90, 98), + (91, 99), + (92, 100), + (93, 101), + (94, 102), + (95, 103), + # + (96, 104), + # + (97, 105) +] + +mapping_cofw = [ + # + (0, 33), + (2, 38), + (4, 35), + (5, 40), + # + (1, 46), + (3, 50), + (6, 44), + (7, 48), + # + (8, 60), + (10, 64), + (12, 62), + (13, 66), + # + (9, 72), + (11, 68), + (14, 70), + (15, 74), + # + (18, 57), + (19, 63), + (20, 54), + (21, 60), + # + (22, 84), + (23, 90), + (24, 87), + (25, 98), + (26, 102), + (27, 93), + # + (28, 16) +] +dataset_coco = dict( + type='CocoWholeBodyFaceDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=kpt_68_to_106) + ], +) + +dataset_wflw = dict( + type='WFLWDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='wflw/annotations/face_landmarks_wflw_train.json', + data_prefix=dict(img='pose/WFLW/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=mapping_wflw) + ], +) + +dataset_300w = dict( + type='Face300WDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='300w/annotations/face_landmarks_300w_train.json', + data_prefix=dict(img='pose/300w/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=kpt_68_to_106) + ], +) + +dataset_cofw = dict( + type='COFWDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='cofw/annotations/cofw_train.json', + data_prefix=dict(img='pose/COFW/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=mapping_cofw) + ], +) + +dataset_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_train_133kpt.json', + data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=mapping_halpe) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/lapa.py'), + datasets=[ + dataset_lapa, dataset_coco, dataset_wflw, dataset_300w, + dataset_cofw, dataset_halpe + ], + pipeline=train_pipeline, + test_mode=False, + )) +val_dataloader = dict( + batch_size=32, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='LaPa/annotations/lapa_test.json', + data_prefix=dict(img='pose/LaPa/'), + test_mode=True, + pipeline=val_pipeline, + )) + +# test dataset +val_lapa = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='LaPa/annotations/lapa_test.json', + data_prefix=dict(img='pose/LaPa/'), + pipeline=[], +) + +val_coco = dict( + type='CocoWholeBodyFaceDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=kpt_68_to_106) + ], +) + +val_wflw = dict( + type='WFLWDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='wflw/annotations/face_landmarks_wflw_test.json', + data_prefix=dict(img='pose/WFLW/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=mapping_wflw) + ], +) + +val_300w = dict( + type='Face300WDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='300w/annotations/face_landmarks_300w_test.json', + data_prefix=dict(img='pose/300w/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=kpt_68_to_106) + ], +) + +val_cofw = dict( + type='COFWDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='cofw/annotations/cofw_test.json', + data_prefix=dict(img='pose/COFW/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=mapping_cofw) + ], +) + +val_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_val_v1.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=mapping_halpe) + ], +) + +test_dataloader = dict( + batch_size=32, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/lapa.py'), + datasets=[val_lapa, val_coco, val_wflw, val_300w, val_cofw, val_halpe], + pipeline=val_pipeline, + test_mode=True, + )) + +# hooks +default_hooks = dict( + checkpoint=dict( + save_best='NME', rule='less', max_keep_ckpts=1, interval=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='NME', + norm_mode='keypoint_distance', +) +test_evaluator = val_evaluator diff --git a/configs/face_2d_keypoint/rtmpose/face6/rtmpose-s_8xb256-120e_face6-256x256.py b/configs/face_2d_keypoint/rtmpose/face6/rtmpose-s_8xb256-120e_face6-256x256.py index 62fa305115..b18d19dd85 100644 --- a/configs/face_2d_keypoint/rtmpose/face6/rtmpose-s_8xb256-120e_face6-256x256.py +++ b/configs/face_2d_keypoint/rtmpose/face6/rtmpose-s_8xb256-120e_face6-256x256.py @@ -1,691 +1,691 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# lapa coco wflw 300w cofw halpe - -# runtime -max_epochs = 120 -stage2_num_epochs = 10 -base_lr = 4e-3 - -train_cfg = dict(max_epochs=max_epochs, val_interval=1) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.), - clip_grad=dict(max_norm=35, norm_type=2), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - type='CosineAnnealingLR', - eta_min=base_lr * 0.005, - begin=30, - end=max_epochs, - T_max=max_epochs - 30, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=(256, 256), - sigma=(5.66, 5.66), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=0.33, - widen_factor=0.5, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' - 'rtmdet/cspnext_rsb_pretrain/cspnext-s_imagenet_600e-ea671761.pth') - ), - head=dict( - type='RTMCCHead', - in_channels=512, - out_channels=106, - input_size=codec['input_size'], - in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True, )) - -# base dataset settings -dataset_type = 'LapaDataset' -data_mode = 'topdown' -data_root = 'data/' - -backend_args = dict(backend='local') - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.2), - dict(type='MedianBlur', p=0.2), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.0), - ]), - dict( - type='GenerateTarget', - encoder=codec, - use_dataset_keypoint_weights=True), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict( - type='GenerateTarget', - encoder=codec, - use_dataset_keypoint_weights=True), - dict(type='PackPoseInputs') -] -# train dataset -dataset_lapa = dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='LaPa/annotations/lapa_trainval.json', - data_prefix=dict(img='pose/LaPa/'), - pipeline=[], -) - -kpt_68_to_106 = [ - # - (0, 0), - (1, 2), - (2, 4), - (3, 6), - (4, 8), - (5, 10), - (6, 12), - (7, 14), - (8, 16), - (9, 18), - (10, 20), - (11, 22), - (12, 24), - (13, 26), - (14, 28), - (15, 30), - (16, 32), - # - (17, 33), - (18, 34), - (19, 35), - (20, 36), - (21, 37), - # - (22, 42), - (23, 43), - (24, 44), - (25, 45), - (26, 46), - # - (27, 51), - (28, 52), - (29, 53), - (30, 54), - # - (31, 58), - (32, 59), - (33, 60), - (34, 61), - (35, 62), - # - (36, 66), - (39, 70), - # - ((37, 38), 68), - ((40, 41), 72), - # - (42, 75), - (45, 79), - # - ((43, 44), 77), - ((46, 47), 81), - # - (48, 84), - (49, 85), - (50, 86), - (51, 87), - (52, 88), - (53, 89), - (54, 90), - (55, 91), - (56, 92), - (57, 93), - (58, 94), - (59, 95), - (60, 96), - (61, 97), - (62, 98), - (63, 99), - (64, 100), - (65, 101), - (66, 102), - (67, 103) -] - -mapping_halpe = [ - # - (26, 0), - (27, 2), - (28, 4), - (29, 6), - (30, 8), - (31, 10), - (32, 12), - (33, 14), - (34, 16), - (35, 18), - (36, 20), - (37, 22), - (38, 24), - (39, 26), - (40, 28), - (41, 30), - (42, 32), - # - (43, 33), - (44, 34), - (45, 35), - (46, 36), - (47, 37), - # - (48, 42), - (49, 43), - (50, 44), - (51, 45), - (52, 46), - # - (53, 51), - (54, 52), - (55, 53), - (56, 54), - # - (57, 58), - (58, 59), - (59, 60), - (60, 61), - (61, 62), - # - (62, 66), - (65, 70), - # - ((63, 64), 68), - ((66, 67), 72), - # - (68, 75), - (71, 79), - # - ((69, 70), 77), - ((72, 73), 81), - # - (74, 84), - (75, 85), - (76, 86), - (77, 87), - (78, 88), - (79, 89), - (80, 90), - (81, 91), - (82, 92), - (83, 93), - (84, 94), - (85, 95), - (86, 96), - (87, 97), - (88, 98), - (89, 99), - (90, 100), - (91, 101), - (92, 102), - (93, 103) -] - -mapping_wflw = [ - # - (0, 0), - (1, 1), - (2, 2), - (3, 3), - (4, 4), - (5, 5), - (6, 6), - (7, 7), - (8, 8), - (9, 9), - (10, 10), - (11, 11), - (12, 12), - (13, 13), - (14, 14), - (15, 15), - (16, 16), - (17, 17), - (18, 18), - (19, 19), - (20, 20), - (21, 21), - (22, 22), - (23, 23), - (24, 24), - (25, 25), - (26, 26), - (27, 27), - (28, 28), - (29, 29), - (30, 30), - (31, 31), - (32, 32), - # - (33, 33), - (34, 34), - (35, 35), - (36, 36), - (37, 37), - (38, 38), - (39, 39), - (40, 40), - (41, 41), - # - (42, 42), - (43, 43), - (44, 44), - (45, 45), - (46, 46), - (47, 47), - (48, 48), - (49, 49), - (50, 50), - # - (51, 51), - (52, 52), - (53, 53), - (54, 54), - # - (55, 58), - (56, 59), - (57, 60), - (58, 61), - (59, 62), - # - (60, 66), - (61, 67), - (62, 68), - (63, 69), - (64, 70), - (65, 71), - (66, 72), - (67, 73), - # - (68, 75), - (69, 76), - (70, 77), - (71, 78), - (72, 79), - (73, 80), - (74, 81), - (75, 82), - # - (76, 84), - (77, 85), - (78, 86), - (79, 87), - (80, 88), - (81, 89), - (82, 90), - (83, 91), - (84, 92), - (85, 93), - (86, 94), - (87, 95), - (88, 96), - (89, 97), - (90, 98), - (91, 99), - (92, 100), - (93, 101), - (94, 102), - (95, 103), - # - (96, 104), - # - (97, 105) -] - -mapping_cofw = [ - # - (0, 33), - (2, 38), - (4, 35), - (5, 40), - # - (1, 46), - (3, 50), - (6, 44), - (7, 48), - # - (8, 60), - (10, 64), - (12, 62), - (13, 66), - # - (9, 72), - (11, 68), - (14, 70), - (15, 74), - # - (18, 57), - (19, 63), - (20, 54), - (21, 60), - # - (22, 84), - (23, 90), - (24, 87), - (25, 98), - (26, 102), - (27, 93), - # - (28, 16) -] -dataset_coco = dict( - type='CocoWholeBodyFaceDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='detection/coco/train2017/'), - pipeline=[ - dict( - type='KeypointConverter', num_keypoints=106, mapping=kpt_68_to_106) - ], -) - -dataset_wflw = dict( - type='WFLWDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='wflw/annotations/face_landmarks_wflw_train.json', - data_prefix=dict(img='pose/WFLW/images/'), - pipeline=[ - dict( - type='KeypointConverter', num_keypoints=106, mapping=mapping_wflw) - ], -) - -dataset_300w = dict( - type='Face300WDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='300w/annotations/face_landmarks_300w_train.json', - data_prefix=dict(img='pose/300w/images/'), - pipeline=[ - dict( - type='KeypointConverter', num_keypoints=106, mapping=kpt_68_to_106) - ], -) - -dataset_cofw = dict( - type='COFWDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='cofw/annotations/cofw_train.json', - data_prefix=dict(img='pose/COFW/images/'), - pipeline=[ - dict( - type='KeypointConverter', num_keypoints=106, mapping=mapping_cofw) - ], -) - -dataset_halpe = dict( - type='HalpeDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='halpe/annotations/halpe_train_133kpt.json', - data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015/'), - pipeline=[ - dict( - type='KeypointConverter', num_keypoints=106, mapping=mapping_halpe) - ], -) - -# data loaders -train_dataloader = dict( - batch_size=256, - num_workers=10, - pin_memory=True, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type='CombinedDataset', - metainfo=dict(from_file='configs/_base_/datasets/lapa.py'), - datasets=[ - dataset_lapa, dataset_coco, dataset_wflw, dataset_300w, - dataset_cofw, dataset_halpe - ], - pipeline=train_pipeline, - test_mode=False, - )) -val_dataloader = dict( - batch_size=32, - num_workers=10, - pin_memory=True, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='LaPa/annotations/lapa_test.json', - data_prefix=dict(img='pose/LaPa/'), - test_mode=True, - pipeline=val_pipeline, - )) - -# test dataset -val_lapa = dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='LaPa/annotations/lapa_test.json', - data_prefix=dict(img='pose/LaPa/'), - pipeline=[], -) - -val_coco = dict( - type='CocoWholeBodyFaceDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='detection/coco/val2017/'), - pipeline=[ - dict( - type='KeypointConverter', num_keypoints=106, mapping=kpt_68_to_106) - ], -) - -val_wflw = dict( - type='WFLWDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='wflw/annotations/face_landmarks_wflw_test.json', - data_prefix=dict(img='pose/WFLW/images/'), - pipeline=[ - dict( - type='KeypointConverter', num_keypoints=106, mapping=mapping_wflw) - ], -) - -val_300w = dict( - type='Face300WDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='300w/annotations/face_landmarks_300w_test.json', - data_prefix=dict(img='pose/300w/images/'), - pipeline=[ - dict( - type='KeypointConverter', num_keypoints=106, mapping=kpt_68_to_106) - ], -) - -val_cofw = dict( - type='COFWDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='cofw/annotations/cofw_test.json', - data_prefix=dict(img='pose/COFW/images/'), - pipeline=[ - dict( - type='KeypointConverter', num_keypoints=106, mapping=mapping_cofw) - ], -) - -val_halpe = dict( - type='HalpeDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='halpe/annotations/halpe_val_v1.json', - data_prefix=dict(img='detection/coco/val2017/'), - pipeline=[ - dict( - type='KeypointConverter', num_keypoints=106, mapping=mapping_halpe) - ], -) - -test_dataloader = dict( - batch_size=32, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type='CombinedDataset', - metainfo=dict(from_file='configs/_base_/datasets/lapa.py'), - datasets=[val_lapa, val_coco, val_wflw, val_300w, val_cofw, val_halpe], - pipeline=val_pipeline, - test_mode=True, - )) - -# hooks -default_hooks = dict( - checkpoint=dict( - save_best='NME', rule='less', max_keep_ckpts=1, interval=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='NME', - norm_mode='keypoint_distance', -) -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# lapa coco wflw 300w cofw halpe + +# runtime +max_epochs = 120 +stage2_num_epochs = 10 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=1) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.), + clip_grad=dict(max_norm=35, norm_type=2), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.005, + begin=30, + end=max_epochs, + T_max=max_epochs - 30, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(256, 256), + sigma=(5.66, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.33, + widen_factor=0.5, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' + 'rtmdet/cspnext_rsb_pretrain/cspnext-s_imagenet_600e-ea671761.pth') + ), + head=dict( + type='RTMCCHead', + in_channels=512, + out_channels=106, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'LapaDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.2), + dict(type='MedianBlur', p=0.2), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict( + type='GenerateTarget', + encoder=codec, + use_dataset_keypoint_weights=True), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict( + type='GenerateTarget', + encoder=codec, + use_dataset_keypoint_weights=True), + dict(type='PackPoseInputs') +] +# train dataset +dataset_lapa = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='LaPa/annotations/lapa_trainval.json', + data_prefix=dict(img='pose/LaPa/'), + pipeline=[], +) + +kpt_68_to_106 = [ + # + (0, 0), + (1, 2), + (2, 4), + (3, 6), + (4, 8), + (5, 10), + (6, 12), + (7, 14), + (8, 16), + (9, 18), + (10, 20), + (11, 22), + (12, 24), + (13, 26), + (14, 28), + (15, 30), + (16, 32), + # + (17, 33), + (18, 34), + (19, 35), + (20, 36), + (21, 37), + # + (22, 42), + (23, 43), + (24, 44), + (25, 45), + (26, 46), + # + (27, 51), + (28, 52), + (29, 53), + (30, 54), + # + (31, 58), + (32, 59), + (33, 60), + (34, 61), + (35, 62), + # + (36, 66), + (39, 70), + # + ((37, 38), 68), + ((40, 41), 72), + # + (42, 75), + (45, 79), + # + ((43, 44), 77), + ((46, 47), 81), + # + (48, 84), + (49, 85), + (50, 86), + (51, 87), + (52, 88), + (53, 89), + (54, 90), + (55, 91), + (56, 92), + (57, 93), + (58, 94), + (59, 95), + (60, 96), + (61, 97), + (62, 98), + (63, 99), + (64, 100), + (65, 101), + (66, 102), + (67, 103) +] + +mapping_halpe = [ + # + (26, 0), + (27, 2), + (28, 4), + (29, 6), + (30, 8), + (31, 10), + (32, 12), + (33, 14), + (34, 16), + (35, 18), + (36, 20), + (37, 22), + (38, 24), + (39, 26), + (40, 28), + (41, 30), + (42, 32), + # + (43, 33), + (44, 34), + (45, 35), + (46, 36), + (47, 37), + # + (48, 42), + (49, 43), + (50, 44), + (51, 45), + (52, 46), + # + (53, 51), + (54, 52), + (55, 53), + (56, 54), + # + (57, 58), + (58, 59), + (59, 60), + (60, 61), + (61, 62), + # + (62, 66), + (65, 70), + # + ((63, 64), 68), + ((66, 67), 72), + # + (68, 75), + (71, 79), + # + ((69, 70), 77), + ((72, 73), 81), + # + (74, 84), + (75, 85), + (76, 86), + (77, 87), + (78, 88), + (79, 89), + (80, 90), + (81, 91), + (82, 92), + (83, 93), + (84, 94), + (85, 95), + (86, 96), + (87, 97), + (88, 98), + (89, 99), + (90, 100), + (91, 101), + (92, 102), + (93, 103) +] + +mapping_wflw = [ + # + (0, 0), + (1, 1), + (2, 2), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), + (17, 17), + (18, 18), + (19, 19), + (20, 20), + (21, 21), + (22, 22), + (23, 23), + (24, 24), + (25, 25), + (26, 26), + (27, 27), + (28, 28), + (29, 29), + (30, 30), + (31, 31), + (32, 32), + # + (33, 33), + (34, 34), + (35, 35), + (36, 36), + (37, 37), + (38, 38), + (39, 39), + (40, 40), + (41, 41), + # + (42, 42), + (43, 43), + (44, 44), + (45, 45), + (46, 46), + (47, 47), + (48, 48), + (49, 49), + (50, 50), + # + (51, 51), + (52, 52), + (53, 53), + (54, 54), + # + (55, 58), + (56, 59), + (57, 60), + (58, 61), + (59, 62), + # + (60, 66), + (61, 67), + (62, 68), + (63, 69), + (64, 70), + (65, 71), + (66, 72), + (67, 73), + # + (68, 75), + (69, 76), + (70, 77), + (71, 78), + (72, 79), + (73, 80), + (74, 81), + (75, 82), + # + (76, 84), + (77, 85), + (78, 86), + (79, 87), + (80, 88), + (81, 89), + (82, 90), + (83, 91), + (84, 92), + (85, 93), + (86, 94), + (87, 95), + (88, 96), + (89, 97), + (90, 98), + (91, 99), + (92, 100), + (93, 101), + (94, 102), + (95, 103), + # + (96, 104), + # + (97, 105) +] + +mapping_cofw = [ + # + (0, 33), + (2, 38), + (4, 35), + (5, 40), + # + (1, 46), + (3, 50), + (6, 44), + (7, 48), + # + (8, 60), + (10, 64), + (12, 62), + (13, 66), + # + (9, 72), + (11, 68), + (14, 70), + (15, 74), + # + (18, 57), + (19, 63), + (20, 54), + (21, 60), + # + (22, 84), + (23, 90), + (24, 87), + (25, 98), + (26, 102), + (27, 93), + # + (28, 16) +] +dataset_coco = dict( + type='CocoWholeBodyFaceDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=kpt_68_to_106) + ], +) + +dataset_wflw = dict( + type='WFLWDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='wflw/annotations/face_landmarks_wflw_train.json', + data_prefix=dict(img='pose/WFLW/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=mapping_wflw) + ], +) + +dataset_300w = dict( + type='Face300WDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='300w/annotations/face_landmarks_300w_train.json', + data_prefix=dict(img='pose/300w/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=kpt_68_to_106) + ], +) + +dataset_cofw = dict( + type='COFWDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='cofw/annotations/cofw_train.json', + data_prefix=dict(img='pose/COFW/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=mapping_cofw) + ], +) + +dataset_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_train_133kpt.json', + data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=mapping_halpe) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + pin_memory=True, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/lapa.py'), + datasets=[ + dataset_lapa, dataset_coco, dataset_wflw, dataset_300w, + dataset_cofw, dataset_halpe + ], + pipeline=train_pipeline, + test_mode=False, + )) +val_dataloader = dict( + batch_size=32, + num_workers=10, + pin_memory=True, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='LaPa/annotations/lapa_test.json', + data_prefix=dict(img='pose/LaPa/'), + test_mode=True, + pipeline=val_pipeline, + )) + +# test dataset +val_lapa = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='LaPa/annotations/lapa_test.json', + data_prefix=dict(img='pose/LaPa/'), + pipeline=[], +) + +val_coco = dict( + type='CocoWholeBodyFaceDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=kpt_68_to_106) + ], +) + +val_wflw = dict( + type='WFLWDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='wflw/annotations/face_landmarks_wflw_test.json', + data_prefix=dict(img='pose/WFLW/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=mapping_wflw) + ], +) + +val_300w = dict( + type='Face300WDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='300w/annotations/face_landmarks_300w_test.json', + data_prefix=dict(img='pose/300w/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=kpt_68_to_106) + ], +) + +val_cofw = dict( + type='COFWDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='cofw/annotations/cofw_test.json', + data_prefix=dict(img='pose/COFW/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=mapping_cofw) + ], +) + +val_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_val_v1.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=mapping_halpe) + ], +) + +test_dataloader = dict( + batch_size=32, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/lapa.py'), + datasets=[val_lapa, val_coco, val_wflw, val_300w, val_cofw, val_halpe], + pipeline=val_pipeline, + test_mode=True, + )) + +# hooks +default_hooks = dict( + checkpoint=dict( + save_best='NME', rule='less', max_keep_ckpts=1, interval=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='NME', + norm_mode='keypoint_distance', +) +test_evaluator = val_evaluator diff --git a/configs/face_2d_keypoint/rtmpose/face6/rtmpose-t_8xb256-120e_face6-256x256.py b/configs/face_2d_keypoint/rtmpose/face6/rtmpose-t_8xb256-120e_face6-256x256.py index 751bedffe7..88e95173a8 100644 --- a/configs/face_2d_keypoint/rtmpose/face6/rtmpose-t_8xb256-120e_face6-256x256.py +++ b/configs/face_2d_keypoint/rtmpose/face6/rtmpose-t_8xb256-120e_face6-256x256.py @@ -1,689 +1,689 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# lapa coco wflw 300w cofw halpe - -# runtime -max_epochs = 120 -stage2_num_epochs = 10 -base_lr = 4e-3 - -train_cfg = dict(max_epochs=max_epochs, val_interval=1) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.), - clip_grad=dict(max_norm=35, norm_type=2), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - type='CosineAnnealingLR', - eta_min=base_lr * 0.005, - begin=30, - end=max_epochs, - T_max=90, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=(256, 256), - sigma=(5.66, 5.66), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=0.167, - widen_factor=0.375, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' - 'rtmdet/cspnext_rsb_pretrain/cspnext-tiny_imagenet_600e-3a2dd350.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=384, - out_channels=106, - input_size=codec['input_size'], - in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True, )) - -# base dataset settings -dataset_type = 'LapaDataset' -data_mode = 'topdown' -data_root = 'data/' - -backend_args = dict(backend='local') - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.2), - dict(type='MedianBlur', p=0.2), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.0), - ]), - dict( - type='GenerateTarget', - encoder=codec, - use_dataset_keypoint_weights=True), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict( - type='GenerateTarget', - encoder=codec, - use_dataset_keypoint_weights=True), - dict(type='PackPoseInputs') -] -# train dataset -dataset_lapa = dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='LaPa/annotations/lapa_trainval.json', - data_prefix=dict(img='pose/LaPa/'), - pipeline=[], -) - -kpt_68_to_106 = [ - # - (0, 0), - (1, 2), - (2, 4), - (3, 6), - (4, 8), - (5, 10), - (6, 12), - (7, 14), - (8, 16), - (9, 18), - (10, 20), - (11, 22), - (12, 24), - (13, 26), - (14, 28), - (15, 30), - (16, 32), - # - (17, 33), - (18, 34), - (19, 35), - (20, 36), - (21, 37), - # - (22, 42), - (23, 43), - (24, 44), - (25, 45), - (26, 46), - # - (27, 51), - (28, 52), - (29, 53), - (30, 54), - # - (31, 58), - (32, 59), - (33, 60), - (34, 61), - (35, 62), - # - (36, 66), - (39, 70), - # - ((37, 38), 68), - ((40, 41), 72), - # - (42, 75), - (45, 79), - # - ((43, 44), 77), - ((46, 47), 81), - # - (48, 84), - (49, 85), - (50, 86), - (51, 87), - (52, 88), - (53, 89), - (54, 90), - (55, 91), - (56, 92), - (57, 93), - (58, 94), - (59, 95), - (60, 96), - (61, 97), - (62, 98), - (63, 99), - (64, 100), - (65, 101), - (66, 102), - (67, 103) -] - -mapping_halpe = [ - # - (26, 0), - (27, 2), - (28, 4), - (29, 6), - (30, 8), - (31, 10), - (32, 12), - (33, 14), - (34, 16), - (35, 18), - (36, 20), - (37, 22), - (38, 24), - (39, 26), - (40, 28), - (41, 30), - (42, 32), - # - (43, 33), - (44, 34), - (45, 35), - (46, 36), - (47, 37), - # - (48, 42), - (49, 43), - (50, 44), - (51, 45), - (52, 46), - # - (53, 51), - (54, 52), - (55, 53), - (56, 54), - # - (57, 58), - (58, 59), - (59, 60), - (60, 61), - (61, 62), - # - (62, 66), - (65, 70), - # - ((63, 64), 68), - ((66, 67), 72), - # - (68, 75), - (71, 79), - # - ((69, 70), 77), - ((72, 73), 81), - # - (74, 84), - (75, 85), - (76, 86), - (77, 87), - (78, 88), - (79, 89), - (80, 90), - (81, 91), - (82, 92), - (83, 93), - (84, 94), - (85, 95), - (86, 96), - (87, 97), - (88, 98), - (89, 99), - (90, 100), - (91, 101), - (92, 102), - (93, 103) -] - -mapping_wflw = [ - # - (0, 0), - (1, 1), - (2, 2), - (3, 3), - (4, 4), - (5, 5), - (6, 6), - (7, 7), - (8, 8), - (9, 9), - (10, 10), - (11, 11), - (12, 12), - (13, 13), - (14, 14), - (15, 15), - (16, 16), - (17, 17), - (18, 18), - (19, 19), - (20, 20), - (21, 21), - (22, 22), - (23, 23), - (24, 24), - (25, 25), - (26, 26), - (27, 27), - (28, 28), - (29, 29), - (30, 30), - (31, 31), - (32, 32), - # - (33, 33), - (34, 34), - (35, 35), - (36, 36), - (37, 37), - (38, 38), - (39, 39), - (40, 40), - (41, 41), - # - (42, 42), - (43, 43), - (44, 44), - (45, 45), - (46, 46), - (47, 47), - (48, 48), - (49, 49), - (50, 50), - # - (51, 51), - (52, 52), - (53, 53), - (54, 54), - # - (55, 58), - (56, 59), - (57, 60), - (58, 61), - (59, 62), - # - (60, 66), - (61, 67), - (62, 68), - (63, 69), - (64, 70), - (65, 71), - (66, 72), - (67, 73), - # - (68, 75), - (69, 76), - (70, 77), - (71, 78), - (72, 79), - (73, 80), - (74, 81), - (75, 82), - # - (76, 84), - (77, 85), - (78, 86), - (79, 87), - (80, 88), - (81, 89), - (82, 90), - (83, 91), - (84, 92), - (85, 93), - (86, 94), - (87, 95), - (88, 96), - (89, 97), - (90, 98), - (91, 99), - (92, 100), - (93, 101), - (94, 102), - (95, 103), - # - (96, 104), - # - (97, 105) -] - -mapping_cofw = [ - # - (0, 33), - (2, 38), - (4, 35), - (5, 40), - # - (1, 46), - (3, 50), - (6, 44), - (7, 48), - # - (8, 60), - (10, 64), - (12, 62), - (13, 66), - # - (9, 72), - (11, 68), - (14, 70), - (15, 74), - # - (18, 57), - (19, 63), - (20, 54), - (21, 60), - # - (22, 84), - (23, 90), - (24, 87), - (25, 98), - (26, 102), - (27, 93), - # - (28, 16) -] -dataset_coco = dict( - type='CocoWholeBodyFaceDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='detection/coco/train2017/'), - pipeline=[ - dict( - type='KeypointConverter', num_keypoints=106, mapping=kpt_68_to_106) - ], -) - -dataset_wflw = dict( - type='WFLWDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='wflw/annotations/face_landmarks_wflw_train.json', - data_prefix=dict(img='pose/WFLW/images/'), - pipeline=[ - dict( - type='KeypointConverter', num_keypoints=106, mapping=mapping_wflw) - ], -) - -dataset_300w = dict( - type='Face300WDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='300w/annotations/face_landmarks_300w_train.json', - data_prefix=dict(img='pose/300w/images/'), - pipeline=[ - dict( - type='KeypointConverter', num_keypoints=106, mapping=kpt_68_to_106) - ], -) - -dataset_cofw = dict( - type='COFWDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='cofw/annotations/cofw_train.json', - data_prefix=dict(img='pose/COFW/images/'), - pipeline=[ - dict( - type='KeypointConverter', num_keypoints=106, mapping=mapping_cofw) - ], -) - -dataset_halpe = dict( - type='HalpeDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='halpe/annotations/halpe_train_133kpt.json', - data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015/'), - pipeline=[ - dict( - type='KeypointConverter', num_keypoints=106, mapping=mapping_halpe) - ], -) - -# data loaders -train_dataloader = dict( - batch_size=256, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type='CombinedDataset', - metainfo=dict(from_file='configs/_base_/datasets/lapa.py'), - datasets=[ - dataset_lapa, dataset_coco, dataset_wflw, dataset_300w, - dataset_cofw, dataset_halpe - ], - pipeline=train_pipeline, - test_mode=False, - )) -val_dataloader = dict( - batch_size=32, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='LaPa/annotations/lapa_test.json', - data_prefix=dict(img='pose/LaPa/'), - test_mode=True, - pipeline=val_pipeline, - )) - -# test dataset -val_lapa = dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='LaPa/annotations/lapa_test.json', - data_prefix=dict(img='pose/LaPa/'), - pipeline=[], -) - -val_coco = dict( - type='CocoWholeBodyFaceDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='detection/coco/val2017/'), - pipeline=[ - dict( - type='KeypointConverter', num_keypoints=106, mapping=kpt_68_to_106) - ], -) - -val_wflw = dict( - type='WFLWDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='wflw/annotations/face_landmarks_wflw_test.json', - data_prefix=dict(img='pose/WFLW/images/'), - pipeline=[ - dict( - type='KeypointConverter', num_keypoints=106, mapping=mapping_wflw) - ], -) - -val_300w = dict( - type='Face300WDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='300w/annotations/face_landmarks_300w_test.json', - data_prefix=dict(img='pose/300w/images/'), - pipeline=[ - dict( - type='KeypointConverter', num_keypoints=106, mapping=kpt_68_to_106) - ], -) - -val_cofw = dict( - type='COFWDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='cofw/annotations/cofw_test.json', - data_prefix=dict(img='pose/COFW/images/'), - pipeline=[ - dict( - type='KeypointConverter', num_keypoints=106, mapping=mapping_cofw) - ], -) - -val_halpe = dict( - type='HalpeDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='halpe/annotations/halpe_val_v1.json', - data_prefix=dict(img='detection/coco/val2017/'), - pipeline=[ - dict( - type='KeypointConverter', num_keypoints=106, mapping=mapping_halpe) - ], -) - -test_dataloader = dict( - batch_size=32, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type='CombinedDataset', - metainfo=dict(from_file='configs/_base_/datasets/lapa.py'), - datasets=[val_lapa, val_coco, val_wflw, val_300w, val_cofw, val_halpe], - pipeline=val_pipeline, - test_mode=True, - )) - -# hooks -default_hooks = dict( - checkpoint=dict( - save_best='NME', rule='less', max_keep_ckpts=1, interval=1)) - -custom_hooks = [ - # dict( - # type='EMAHook', - # ema_type='ExpMomentumEMA', - # momentum=0.0002, - # update_buffers=True, - # priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='NME', - norm_mode='keypoint_distance', -) -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# lapa coco wflw 300w cofw halpe + +# runtime +max_epochs = 120 +stage2_num_epochs = 10 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=1) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.), + clip_grad=dict(max_norm=35, norm_type=2), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.005, + begin=30, + end=max_epochs, + T_max=90, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(256, 256), + sigma=(5.66, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.167, + widen_factor=0.375, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' + 'rtmdet/cspnext_rsb_pretrain/cspnext-tiny_imagenet_600e-3a2dd350.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=384, + out_channels=106, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'LapaDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.2), + dict(type='MedianBlur', p=0.2), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict( + type='GenerateTarget', + encoder=codec, + use_dataset_keypoint_weights=True), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict( + type='GenerateTarget', + encoder=codec, + use_dataset_keypoint_weights=True), + dict(type='PackPoseInputs') +] +# train dataset +dataset_lapa = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='LaPa/annotations/lapa_trainval.json', + data_prefix=dict(img='pose/LaPa/'), + pipeline=[], +) + +kpt_68_to_106 = [ + # + (0, 0), + (1, 2), + (2, 4), + (3, 6), + (4, 8), + (5, 10), + (6, 12), + (7, 14), + (8, 16), + (9, 18), + (10, 20), + (11, 22), + (12, 24), + (13, 26), + (14, 28), + (15, 30), + (16, 32), + # + (17, 33), + (18, 34), + (19, 35), + (20, 36), + (21, 37), + # + (22, 42), + (23, 43), + (24, 44), + (25, 45), + (26, 46), + # + (27, 51), + (28, 52), + (29, 53), + (30, 54), + # + (31, 58), + (32, 59), + (33, 60), + (34, 61), + (35, 62), + # + (36, 66), + (39, 70), + # + ((37, 38), 68), + ((40, 41), 72), + # + (42, 75), + (45, 79), + # + ((43, 44), 77), + ((46, 47), 81), + # + (48, 84), + (49, 85), + (50, 86), + (51, 87), + (52, 88), + (53, 89), + (54, 90), + (55, 91), + (56, 92), + (57, 93), + (58, 94), + (59, 95), + (60, 96), + (61, 97), + (62, 98), + (63, 99), + (64, 100), + (65, 101), + (66, 102), + (67, 103) +] + +mapping_halpe = [ + # + (26, 0), + (27, 2), + (28, 4), + (29, 6), + (30, 8), + (31, 10), + (32, 12), + (33, 14), + (34, 16), + (35, 18), + (36, 20), + (37, 22), + (38, 24), + (39, 26), + (40, 28), + (41, 30), + (42, 32), + # + (43, 33), + (44, 34), + (45, 35), + (46, 36), + (47, 37), + # + (48, 42), + (49, 43), + (50, 44), + (51, 45), + (52, 46), + # + (53, 51), + (54, 52), + (55, 53), + (56, 54), + # + (57, 58), + (58, 59), + (59, 60), + (60, 61), + (61, 62), + # + (62, 66), + (65, 70), + # + ((63, 64), 68), + ((66, 67), 72), + # + (68, 75), + (71, 79), + # + ((69, 70), 77), + ((72, 73), 81), + # + (74, 84), + (75, 85), + (76, 86), + (77, 87), + (78, 88), + (79, 89), + (80, 90), + (81, 91), + (82, 92), + (83, 93), + (84, 94), + (85, 95), + (86, 96), + (87, 97), + (88, 98), + (89, 99), + (90, 100), + (91, 101), + (92, 102), + (93, 103) +] + +mapping_wflw = [ + # + (0, 0), + (1, 1), + (2, 2), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), + (17, 17), + (18, 18), + (19, 19), + (20, 20), + (21, 21), + (22, 22), + (23, 23), + (24, 24), + (25, 25), + (26, 26), + (27, 27), + (28, 28), + (29, 29), + (30, 30), + (31, 31), + (32, 32), + # + (33, 33), + (34, 34), + (35, 35), + (36, 36), + (37, 37), + (38, 38), + (39, 39), + (40, 40), + (41, 41), + # + (42, 42), + (43, 43), + (44, 44), + (45, 45), + (46, 46), + (47, 47), + (48, 48), + (49, 49), + (50, 50), + # + (51, 51), + (52, 52), + (53, 53), + (54, 54), + # + (55, 58), + (56, 59), + (57, 60), + (58, 61), + (59, 62), + # + (60, 66), + (61, 67), + (62, 68), + (63, 69), + (64, 70), + (65, 71), + (66, 72), + (67, 73), + # + (68, 75), + (69, 76), + (70, 77), + (71, 78), + (72, 79), + (73, 80), + (74, 81), + (75, 82), + # + (76, 84), + (77, 85), + (78, 86), + (79, 87), + (80, 88), + (81, 89), + (82, 90), + (83, 91), + (84, 92), + (85, 93), + (86, 94), + (87, 95), + (88, 96), + (89, 97), + (90, 98), + (91, 99), + (92, 100), + (93, 101), + (94, 102), + (95, 103), + # + (96, 104), + # + (97, 105) +] + +mapping_cofw = [ + # + (0, 33), + (2, 38), + (4, 35), + (5, 40), + # + (1, 46), + (3, 50), + (6, 44), + (7, 48), + # + (8, 60), + (10, 64), + (12, 62), + (13, 66), + # + (9, 72), + (11, 68), + (14, 70), + (15, 74), + # + (18, 57), + (19, 63), + (20, 54), + (21, 60), + # + (22, 84), + (23, 90), + (24, 87), + (25, 98), + (26, 102), + (27, 93), + # + (28, 16) +] +dataset_coco = dict( + type='CocoWholeBodyFaceDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=kpt_68_to_106) + ], +) + +dataset_wflw = dict( + type='WFLWDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='wflw/annotations/face_landmarks_wflw_train.json', + data_prefix=dict(img='pose/WFLW/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=mapping_wflw) + ], +) + +dataset_300w = dict( + type='Face300WDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='300w/annotations/face_landmarks_300w_train.json', + data_prefix=dict(img='pose/300w/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=kpt_68_to_106) + ], +) + +dataset_cofw = dict( + type='COFWDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='cofw/annotations/cofw_train.json', + data_prefix=dict(img='pose/COFW/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=mapping_cofw) + ], +) + +dataset_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_train_133kpt.json', + data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=mapping_halpe) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/lapa.py'), + datasets=[ + dataset_lapa, dataset_coco, dataset_wflw, dataset_300w, + dataset_cofw, dataset_halpe + ], + pipeline=train_pipeline, + test_mode=False, + )) +val_dataloader = dict( + batch_size=32, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='LaPa/annotations/lapa_test.json', + data_prefix=dict(img='pose/LaPa/'), + test_mode=True, + pipeline=val_pipeline, + )) + +# test dataset +val_lapa = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='LaPa/annotations/lapa_test.json', + data_prefix=dict(img='pose/LaPa/'), + pipeline=[], +) + +val_coco = dict( + type='CocoWholeBodyFaceDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=kpt_68_to_106) + ], +) + +val_wflw = dict( + type='WFLWDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='wflw/annotations/face_landmarks_wflw_test.json', + data_prefix=dict(img='pose/WFLW/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=mapping_wflw) + ], +) + +val_300w = dict( + type='Face300WDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='300w/annotations/face_landmarks_300w_test.json', + data_prefix=dict(img='pose/300w/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=kpt_68_to_106) + ], +) + +val_cofw = dict( + type='COFWDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='cofw/annotations/cofw_test.json', + data_prefix=dict(img='pose/COFW/images/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=mapping_cofw) + ], +) + +val_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_val_v1.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict( + type='KeypointConverter', num_keypoints=106, mapping=mapping_halpe) + ], +) + +test_dataloader = dict( + batch_size=32, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/lapa.py'), + datasets=[val_lapa, val_coco, val_wflw, val_300w, val_cofw, val_halpe], + pipeline=val_pipeline, + test_mode=True, + )) + +# hooks +default_hooks = dict( + checkpoint=dict( + save_best='NME', rule='less', max_keep_ckpts=1, interval=1)) + +custom_hooks = [ + # dict( + # type='EMAHook', + # ema_type='ExpMomentumEMA', + # momentum=0.0002, + # update_buffers=True, + # priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='NME', + norm_mode='keypoint_distance', +) +test_evaluator = val_evaluator diff --git a/configs/face_2d_keypoint/rtmpose/face6/rtmpose_face6.md b/configs/face_2d_keypoint/rtmpose/face6/rtmpose_face6.md index 254633e42c..64113ec643 100644 --- a/configs/face_2d_keypoint/rtmpose/face6/rtmpose_face6.md +++ b/configs/face_2d_keypoint/rtmpose/face6/rtmpose_face6.md @@ -1,71 +1,71 @@ - - -
-RTMPose (arXiv'2023) - -```bibtex -@misc{https://doi.org/10.48550/arxiv.2303.07399, - doi = {10.48550/ARXIV.2303.07399}, - url = {https://arxiv.org/abs/2303.07399}, - author = {Jiang, Tao and Lu, Peng and Zhang, Li and Ma, Ningsheng and Han, Rui and Lyu, Chengqi and Li, Yining and Chen, Kai}, - keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences}, - title = {RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose}, - publisher = {arXiv}, - year = {2023}, - copyright = {Creative Commons Attribution 4.0 International} -} - -``` - -
- - - -
-RTMDet (arXiv'2022) - -```bibtex -@misc{lyu2022rtmdet, - title={RTMDet: An Empirical Study of Designing Real-Time Object Detectors}, - author={Chengqi Lyu and Wenwei Zhang and Haian Huang and Yue Zhou and Yudong Wang and Yanyi Liu and Shilong Zhang and Kai Chen}, - year={2022}, - eprint={2212.07784}, - archivePrefix={arXiv}, - primaryClass={cs.CV} -} -``` - -
- - - -
-COCO (ECCV'2014) - -```bibtex -@inproceedings{lin2014microsoft, - title={Microsoft coco: Common objects in context}, - author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, - booktitle={European conference on computer vision}, - pages={740--755}, - year={2014}, - organization={Springer} -} -``` - -
- -- Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset. -- `Face6` and `*` denote model trained on 6 public datasets: - - [COCO-Wholebody-Face](https://github.com/jin-s13/COCO-WholeBody/) - - [WFLW](https://wywu.github.io/projects/LAB/WFLW.html) - - [300W](https://ibug.doc.ic.ac.uk/resources/300-W/) - - [COFW](http://www.vision.caltech.edu/xpburgos/ICCV13/) - - [Halpe](https://github.com/Fang-Haoshu/Halpe-FullBody/) - - [LaPa](https://github.com/JDAI-CV/lapa-dataset) - -| Config | Input Size | NME
(LaPa) | FLOPS
(G) | Download | -| :--------------------------------------------------------------------------: | :--------: | :----------------: | :---------------: | :-----------------------------------------------------------------------------: | -| [RTMPose-t\*](./rtmpose/face_2d_keypoint/rtmpose-t_8xb256-120e_lapa-256x256.py) | 256x256 | 1.67 | 0.652 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-t_simcc-face6_pt-in1k_120e-256x256-df79d9a5_20230529.pth) | -| [RTMPose-s\*](./rtmpose/face_2d_keypoint/rtmpose-m_8xb256-120e_lapa-256x256.py) | 256x256 | 1.59 | 1.119 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-face6_pt-in1k_120e-256x256-d779fdef_20230529.pth) | -| [RTMPose-m\*](./rtmpose/face_2d_keypoint/rtmpose-m_8xb256-120e_lapa-256x256.py) | 256x256 | 1.44 | 2.852 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-face6_pt-in1k_120e-256x256-72a37400_20230529.pth) | + + +
+RTMPose (arXiv'2023) + +```bibtex +@misc{https://doi.org/10.48550/arxiv.2303.07399, + doi = {10.48550/ARXIV.2303.07399}, + url = {https://arxiv.org/abs/2303.07399}, + author = {Jiang, Tao and Lu, Peng and Zhang, Li and Ma, Ningsheng and Han, Rui and Lyu, Chengqi and Li, Yining and Chen, Kai}, + keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences}, + title = {RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose}, + publisher = {arXiv}, + year = {2023}, + copyright = {Creative Commons Attribution 4.0 International} +} + +``` + +
+ + + +
+RTMDet (arXiv'2022) + +```bibtex +@misc{lyu2022rtmdet, + title={RTMDet: An Empirical Study of Designing Real-Time Object Detectors}, + author={Chengqi Lyu and Wenwei Zhang and Haian Huang and Yue Zhou and Yudong Wang and Yanyi Liu and Shilong Zhang and Kai Chen}, + year={2022}, + eprint={2212.07784}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +- Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset. +- `Face6` and `*` denote model trained on 6 public datasets: + - [COCO-Wholebody-Face](https://github.com/jin-s13/COCO-WholeBody/) + - [WFLW](https://wywu.github.io/projects/LAB/WFLW.html) + - [300W](https://ibug.doc.ic.ac.uk/resources/300-W/) + - [COFW](http://www.vision.caltech.edu/xpburgos/ICCV13/) + - [Halpe](https://github.com/Fang-Haoshu/Halpe-FullBody/) + - [LaPa](https://github.com/JDAI-CV/lapa-dataset) + +| Config | Input Size | NME
(LaPa) | FLOPS
(G) | Download | +| :--------------------------------------------------------------------------: | :--------: | :----------------: | :---------------: | :-----------------------------------------------------------------------------: | +| [RTMPose-t\*](./rtmpose/face_2d_keypoint/rtmpose-t_8xb256-120e_lapa-256x256.py) | 256x256 | 1.67 | 0.652 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-t_simcc-face6_pt-in1k_120e-256x256-df79d9a5_20230529.pth) | +| [RTMPose-s\*](./rtmpose/face_2d_keypoint/rtmpose-m_8xb256-120e_lapa-256x256.py) | 256x256 | 1.59 | 1.119 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-face6_pt-in1k_120e-256x256-d779fdef_20230529.pth) | +| [RTMPose-m\*](./rtmpose/face_2d_keypoint/rtmpose-m_8xb256-120e_lapa-256x256.py) | 256x256 | 1.44 | 2.852 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-face6_pt-in1k_120e-256x256-72a37400_20230529.pth) | diff --git a/configs/face_2d_keypoint/rtmpose/face6/rtmpose_face6.yml b/configs/face_2d_keypoint/rtmpose/face6/rtmpose_face6.yml index 2cd822a337..fc9b65295b 100644 --- a/configs/face_2d_keypoint/rtmpose/face6/rtmpose_face6.yml +++ b/configs/face_2d_keypoint/rtmpose/face6/rtmpose_face6.yml @@ -1,50 +1,50 @@ -Collections: -- Name: RTMPose - Paper: - Title: "RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose" - URL: https://arxiv.org/abs/2303.07399 - README: https://github.com/open-mmlab/mmpose/blob/main/projects/rtmpose/README.md -Models: -- Config: configs/face_2d_keypoint/rtmpose/face6/rtmpose-t_8xb256-120e_face6-256x256.py - In Collection: RTMPose - Metadata: - Architecture: &id001 - - RTMPose - Training Data: &id002 - - COCO-Wholebody-Face - - WFLW - - 300W - - COFW - - Halpe - - LaPa - Name: rtmpose-t_8xb256-120e_face6-256x256 - Results: - - Dataset: Face6 - Metrics: - NME: 1.67 - Task: Face 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-t_simcc-face6_pt-in1k_120e-256x256-df79d9a5_20230529.pth -- Config: configs/face_2d_keypoint/rtmpose/face6/rtmpose-s_8xb256-120e_face6-256x256.py - In Collection: RTMPose - Metadata: - Architecture: *id001 - Training Data: *id002 - Name: rtmpose-s_8xb256-120e_face6-256x256 - Results: - - Dataset: Face6 - Metrics: - NME: 1.59 - Task: Face 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-face6_pt-in1k_120e-256x256-d779fdef_20230529.pth -- Config: configs/face_2d_keypoint/rtmpose/face6/rtmpose-m_8xb256-120e_face6-256x256.py - In Collection: RTMPose - Metadata: - Architecture: *id001 - Training Data: *id002 - Name: rtmpose-m_8xb256-120e_face6-256x256 - Results: - - Dataset: Face6 - Metrics: - NME: 1.44 - Task: Face 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-face6_pt-in1k_120e-256x256-72a37400_20230529.pth +Collections: +- Name: RTMPose + Paper: + Title: "RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose" + URL: https://arxiv.org/abs/2303.07399 + README: https://github.com/open-mmlab/mmpose/blob/main/projects/rtmpose/README.md +Models: +- Config: configs/face_2d_keypoint/rtmpose/face6/rtmpose-t_8xb256-120e_face6-256x256.py + In Collection: RTMPose + Metadata: + Architecture: &id001 + - RTMPose + Training Data: &id002 + - COCO-Wholebody-Face + - WFLW + - 300W + - COFW + - Halpe + - LaPa + Name: rtmpose-t_8xb256-120e_face6-256x256 + Results: + - Dataset: Face6 + Metrics: + NME: 1.67 + Task: Face 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-t_simcc-face6_pt-in1k_120e-256x256-df79d9a5_20230529.pth +- Config: configs/face_2d_keypoint/rtmpose/face6/rtmpose-s_8xb256-120e_face6-256x256.py + In Collection: RTMPose + Metadata: + Architecture: *id001 + Training Data: *id002 + Name: rtmpose-s_8xb256-120e_face6-256x256 + Results: + - Dataset: Face6 + Metrics: + NME: 1.59 + Task: Face 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-face6_pt-in1k_120e-256x256-d779fdef_20230529.pth +- Config: configs/face_2d_keypoint/rtmpose/face6/rtmpose-m_8xb256-120e_face6-256x256.py + In Collection: RTMPose + Metadata: + Architecture: *id001 + Training Data: *id002 + Name: rtmpose-m_8xb256-120e_face6-256x256 + Results: + - Dataset: Face6 + Metrics: + NME: 1.44 + Task: Face 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-face6_pt-in1k_120e-256x256-72a37400_20230529.pth diff --git a/configs/face_2d_keypoint/rtmpose/lapa/rtmpose-m_8xb64-120e_lapa-256x256.py b/configs/face_2d_keypoint/rtmpose/lapa/rtmpose-m_8xb64-120e_lapa-256x256.py index fee1201db1..8e43b73175 100644 --- a/configs/face_2d_keypoint/rtmpose/lapa/rtmpose-m_8xb64-120e_lapa-256x256.py +++ b/configs/face_2d_keypoint/rtmpose/lapa/rtmpose-m_8xb64-120e_lapa-256x256.py @@ -1,246 +1,246 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -max_epochs = 120 -stage2_num_epochs = 10 -base_lr = 4e-3 - -train_cfg = dict(max_epochs=max_epochs, val_interval=1) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=(256, 256), - sigma=(5.66, 5.66), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=0.67, - widen_factor=0.75, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmposev1/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=768, - out_channels=106, - input_size=codec['input_size'], - in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True, )) - -# base dataset settings -dataset_type = 'LapaDataset' -data_mode = 'topdown' -data_root = 'data/LaPa/' - -backend_args = dict(backend='local') -# backend_args = dict( -# backend='petrel', -# path_mapping=dict({ -# f'{data_root}': 's3://openmmlab/datasets/pose/LaPa/', -# f'{data_root}': 's3://openmmlab/datasets/pose/LaPa/' -# })) - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict(type='PhotometricDistortion'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.2), - dict(type='MedianBlur', p=0.2), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.0), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - # dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/lapa_train.json', - data_prefix=dict(img=''), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/lapa_val.json', - data_prefix=dict(img=''), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = dict( - batch_size=32, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/lapa_test.json', - data_prefix=dict(img=''), - test_mode=True, - pipeline=val_pipeline, - )) - -# hooks -default_hooks = dict( - checkpoint=dict( - save_best='NME', rule='less', max_keep_ckpts=1, interval=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='NME', - norm_mode='keypoint_distance', -) -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 120 +stage2_num_epochs = 10 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=1) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(256, 256), + sigma=(5.66, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=768, + out_channels=106, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'LapaDataset' +data_mode = 'topdown' +data_root = 'data/LaPa/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/pose/LaPa/', +# f'{data_root}': 's3://openmmlab/datasets/pose/LaPa/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict(type='PhotometricDistortion'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.2), + dict(type='MedianBlur', p=0.2), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + # dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/lapa_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/lapa_val.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = dict( + batch_size=32, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/lapa_test.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) + +# hooks +default_hooks = dict( + checkpoint=dict( + save_best='NME', rule='less', max_keep_ckpts=1, interval=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='NME', + norm_mode='keypoint_distance', +) +test_evaluator = val_evaluator diff --git a/configs/face_2d_keypoint/rtmpose/lapa/rtmpose_lapa.md b/configs/face_2d_keypoint/rtmpose/lapa/rtmpose_lapa.md index 9638de7551..837d3fd6c9 100644 --- a/configs/face_2d_keypoint/rtmpose/lapa/rtmpose_lapa.md +++ b/configs/face_2d_keypoint/rtmpose/lapa/rtmpose_lapa.md @@ -1,40 +1,40 @@ - - -
-RTMDet (ArXiv 2022) - -```bibtex -@misc{lyu2022rtmdet, - title={RTMDet: An Empirical Study of Designing Real-Time Object Detectors}, - author={Chengqi Lyu and Wenwei Zhang and Haian Huang and Yue Zhou and Yudong Wang and Yanyi Liu and Shilong Zhang and Kai Chen}, - year={2022}, - eprint={2212.07784}, - archivePrefix={arXiv}, - primaryClass={cs.CV} -} -``` - -
- - - -
-LaPa (AAAI'2020) - -```bibtex -@inproceedings{liu2020new, - title={A New Dataset and Boundary-Attention Semantic Segmentation for Face Parsing.}, - author={Liu, Yinglu and Shi, Hailin and Shen, Hao and Si, Yue and Wang, Xiaobo and Mei, Tao}, - booktitle={AAAI}, - pages={11637--11644}, - year={2020} -} -``` - -
- -Results on LaPa val set - -| Arch | Input Size | NME | ckpt | log | -| :------------------------------------------------------------- | :--------: | :--: | :------------------------------------------------------------: | :------------------------------------------------------------: | -| [pose_rtmpose_m](/configs/face_2d_keypoint/rtmpose/lapa/rtmpose-m_8xb64-120e_lapa-256x256.py) | 256x256 | 1.29 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-lapa_pt-aic-coco_120e-256x256-762b1ae2_20230422.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-lapa_pt-aic-coco_120e-256x256-762b1ae2_20230422.json) | + + +
+RTMDet (ArXiv 2022) + +```bibtex +@misc{lyu2022rtmdet, + title={RTMDet: An Empirical Study of Designing Real-Time Object Detectors}, + author={Chengqi Lyu and Wenwei Zhang and Haian Huang and Yue Zhou and Yudong Wang and Yanyi Liu and Shilong Zhang and Kai Chen}, + year={2022}, + eprint={2212.07784}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +
+ + + +
+LaPa (AAAI'2020) + +```bibtex +@inproceedings{liu2020new, + title={A New Dataset and Boundary-Attention Semantic Segmentation for Face Parsing.}, + author={Liu, Yinglu and Shi, Hailin and Shen, Hao and Si, Yue and Wang, Xiaobo and Mei, Tao}, + booktitle={AAAI}, + pages={11637--11644}, + year={2020} +} +``` + +
+ +Results on LaPa val set + +| Arch | Input Size | NME | ckpt | log | +| :------------------------------------------------------------- | :--------: | :--: | :------------------------------------------------------------: | :------------------------------------------------------------: | +| [pose_rtmpose_m](/configs/face_2d_keypoint/rtmpose/lapa/rtmpose-m_8xb64-120e_lapa-256x256.py) | 256x256 | 1.29 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-lapa_pt-aic-coco_120e-256x256-762b1ae2_20230422.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-lapa_pt-aic-coco_120e-256x256-762b1ae2_20230422.json) | diff --git a/configs/face_2d_keypoint/rtmpose/lapa/rtmpose_lapa.yml b/configs/face_2d_keypoint/rtmpose/lapa/rtmpose_lapa.yml index 96acff8de6..9f4cf040a9 100644 --- a/configs/face_2d_keypoint/rtmpose/lapa/rtmpose_lapa.yml +++ b/configs/face_2d_keypoint/rtmpose/lapa/rtmpose_lapa.yml @@ -1,15 +1,15 @@ -Models: -- Config: configs/face_2d_keypoint/rtmpose/lapa/rtmpose-m_8xb64-120e_lapa-256x256.py - In Collection: RTMPose - Alias: face - Metadata: - Architecture: - - RTMPose - Training Data: LaPa - Name: rtmpose-m_8xb64-120e_lapa-256x256 - Results: - - Dataset: WFLW - Metrics: - NME: 1.29 - Task: Face 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-lapa_pt-aic-coco_120e-256x256-762b1ae2_20230422.pth +Models: +- Config: configs/face_2d_keypoint/rtmpose/lapa/rtmpose-m_8xb64-120e_lapa-256x256.py + In Collection: RTMPose + Alias: face + Metadata: + Architecture: + - RTMPose + Training Data: LaPa + Name: rtmpose-m_8xb64-120e_lapa-256x256 + Results: + - Dataset: WFLW + Metrics: + NME: 1.29 + Task: Face 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-lapa_pt-aic-coco_120e-256x256-762b1ae2_20230422.pth diff --git a/configs/face_2d_keypoint/rtmpose/wflw/rtmpose-m_8xb64-60e_wflw-256x256.py b/configs/face_2d_keypoint/rtmpose/wflw/rtmpose-m_8xb64-60e_wflw-256x256.py index cbfd788d60..833235de09 100644 --- a/configs/face_2d_keypoint/rtmpose/wflw/rtmpose-m_8xb64-60e_wflw-256x256.py +++ b/configs/face_2d_keypoint/rtmpose/wflw/rtmpose-m_8xb64-60e_wflw-256x256.py @@ -1,231 +1,231 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -max_epochs = 60 -stage2_num_epochs = 10 -base_lr = 4e-3 - -train_cfg = dict(max_epochs=max_epochs, val_interval=1) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=(256, 256), - sigma=(5.66, 5.66), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=0.67, - widen_factor=0.75, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmposev1/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=768, - out_channels=98, - input_size=codec['input_size'], - in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True, )) - -# base dataset settings -dataset_type = 'WFLWDataset' -data_mode = 'topdown' -data_root = 'data/wflw/' - -backend_args = dict(backend='local') -# backend_args = dict( -# backend='petrel', -# path_mapping=dict({ -# f'{data_root}': 's3://openmmlab/datasets/pose/WFLW/', -# f'{data_root}': 's3://openmmlab/datasets/pose/WFLW/' -# })) - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - # dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.0), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - # dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/face_landmarks_wflw_train.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/face_landmarks_wflw_test.json', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict( - save_best='NME', rule='less', max_keep_ckpts=1, interval=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='NME', - norm_mode='keypoint_distance', -) -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 60 +stage2_num_epochs = 10 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=1) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(256, 256), + sigma=(5.66, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=768, + out_channels=98, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'WFLWDataset' +data_mode = 'topdown' +data_root = 'data/wflw/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/pose/WFLW/', +# f'{data_root}': 's3://openmmlab/datasets/pose/WFLW/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + # dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + # dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/face_landmarks_wflw_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/face_landmarks_wflw_test.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict( + save_best='NME', rule='less', max_keep_ckpts=1, interval=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='NME', + norm_mode='keypoint_distance', +) +test_evaluator = val_evaluator diff --git a/configs/face_2d_keypoint/rtmpose/wflw/rtmpose_wflw.md b/configs/face_2d_keypoint/rtmpose/wflw/rtmpose_wflw.md index b0070258da..30554f7df1 100644 --- a/configs/face_2d_keypoint/rtmpose/wflw/rtmpose_wflw.md +++ b/configs/face_2d_keypoint/rtmpose/wflw/rtmpose_wflw.md @@ -1,42 +1,42 @@ - - -
-RTMDet (ArXiv 2022) - -```bibtex -@misc{lyu2022rtmdet, - title={RTMDet: An Empirical Study of Designing Real-Time Object Detectors}, - author={Chengqi Lyu and Wenwei Zhang and Haian Huang and Yue Zhou and Yudong Wang and Yanyi Liu and Shilong Zhang and Kai Chen}, - year={2022}, - eprint={2212.07784}, - archivePrefix={arXiv}, - primaryClass={cs.CV} -} -``` - -
- - - -
-WFLW (CVPR'2018) - -```bibtex -@inproceedings{wu2018look, - title={Look at boundary: A boundary-aware face alignment algorithm}, - author={Wu, Wayne and Qian, Chen and Yang, Shuo and Wang, Quan and Cai, Yici and Zhou, Qiang}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={2129--2138}, - year={2018} -} -``` - -
- -Results on WFLW dataset - -The model is trained on WFLW train. - -| Arch | Input Size | NME | ckpt | log | -| :------------------------------------------------------------- | :--------: | :--: | :------------------------------------------------------------: | :------------------------------------------------------------: | -| [pose_rtmpose_m](/configs/face_2d_keypoint/rtmpose/wflw/rtmpose-m_8xb64-60e_wflw-256x256.py) | 256x256 | 4.01 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-wflw_pt-aic-coco_60e-256x256-dc1dcdcf_20230228.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-wflw_pt-aic-coco_60e-256x256-dc1dcdcf_20230228.json) | + + +
+RTMDet (ArXiv 2022) + +```bibtex +@misc{lyu2022rtmdet, + title={RTMDet: An Empirical Study of Designing Real-Time Object Detectors}, + author={Chengqi Lyu and Wenwei Zhang and Haian Huang and Yue Zhou and Yudong Wang and Yanyi Liu and Shilong Zhang and Kai Chen}, + year={2022}, + eprint={2212.07784}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +
+ + + +
+WFLW (CVPR'2018) + +```bibtex +@inproceedings{wu2018look, + title={Look at boundary: A boundary-aware face alignment algorithm}, + author={Wu, Wayne and Qian, Chen and Yang, Shuo and Wang, Quan and Cai, Yici and Zhou, Qiang}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={2129--2138}, + year={2018} +} +``` + +
+ +Results on WFLW dataset + +The model is trained on WFLW train. + +| Arch | Input Size | NME | ckpt | log | +| :------------------------------------------------------------- | :--------: | :--: | :------------------------------------------------------------: | :------------------------------------------------------------: | +| [pose_rtmpose_m](/configs/face_2d_keypoint/rtmpose/wflw/rtmpose-m_8xb64-60e_wflw-256x256.py) | 256x256 | 4.01 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-wflw_pt-aic-coco_60e-256x256-dc1dcdcf_20230228.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-wflw_pt-aic-coco_60e-256x256-dc1dcdcf_20230228.json) | diff --git a/configs/face_2d_keypoint/rtmpose/wflw/rtmpose_wflw.yml b/configs/face_2d_keypoint/rtmpose/wflw/rtmpose_wflw.yml index deee03a7dd..06c96e9883 100644 --- a/configs/face_2d_keypoint/rtmpose/wflw/rtmpose_wflw.yml +++ b/configs/face_2d_keypoint/rtmpose/wflw/rtmpose_wflw.yml @@ -1,15 +1,15 @@ -Models: -- Config: configs/face_2d_keypoint/rtmpose/wflw/rtmpose-m_8xb64-60e_wflw-256x256.py - In Collection: RTMPose - Alias: face - Metadata: - Architecture: - - RTMPose - Training Data: WFLW - Name: rtmpose-m_8xb64-60e_wflw-256x256 - Results: - - Dataset: WFLW - Metrics: - NME: 4.01 - Task: Face 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-wflw_pt-aic-coco_60e-256x256-dc1dcdcf_20230228.pth +Models: +- Config: configs/face_2d_keypoint/rtmpose/wflw/rtmpose-m_8xb64-60e_wflw-256x256.py + In Collection: RTMPose + Alias: face + Metadata: + Architecture: + - RTMPose + Training Data: WFLW + Name: rtmpose-m_8xb64-60e_wflw-256x256 + Results: + - Dataset: WFLW + Metrics: + NME: 4.01 + Task: Face 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-wflw_pt-aic-coco_60e-256x256-dc1dcdcf_20230228.pth diff --git a/configs/face_2d_keypoint/topdown_heatmap/300w/hrnetv2_300w.md b/configs/face_2d_keypoint/topdown_heatmap/300w/hrnetv2_300w.md index ace8776c4e..8da54765e4 100644 --- a/configs/face_2d_keypoint/topdown_heatmap/300w/hrnetv2_300w.md +++ b/configs/face_2d_keypoint/topdown_heatmap/300w/hrnetv2_300w.md @@ -1,44 +1,44 @@ - - -
-HRNetv2 (TPAMI'2019) - -```bibtex -@article{WangSCJDZLMTWLX19, - title={Deep High-Resolution Representation Learning for Visual Recognition}, - author={Jingdong Wang and Ke Sun and Tianheng Cheng and - Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and - Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, - journal={TPAMI}, - year={2019} -} -``` - -
- - - -
-300W (IMAVIS'2016) - -```bibtex -@article{sagonas2016300, - title={300 faces in-the-wild challenge: Database and results}, - author={Sagonas, Christos and Antonakos, Epameinondas and Tzimiropoulos, Georgios and Zafeiriou, Stefanos and Pantic, Maja}, - journal={Image and vision computing}, - volume={47}, - pages={3--18}, - year={2016}, - publisher={Elsevier} -} -``` - -
- -Results on 300W dataset - -The model is trained on 300W train. - -| Arch | Input Size | NME*common* | NME*challenge* | NME*full* | NME*test* | ckpt | log | -| :--------------------------------- | :--------: | :--------------------: | :-----------------------: | :------------------: | :------------------: | :---------------------------------: | :--------------------------------: | -| [pose_hrnetv2_w18](/configs/face_2d_keypoint/topdown_heatmap/300w/td-hm_hrnetv2-w18_8xb64-60e_300w-256x256.py) | 256x256 | 2.92 | 5.64 | 3.45 | 4.10 | [ckpt](https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_300w_256x256-eea53406_20211019.pth) | [log](https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_300w_256x256_20211019.log.json) | + + +
+HRNetv2 (TPAMI'2019) + +```bibtex +@article{WangSCJDZLMTWLX19, + title={Deep High-Resolution Representation Learning for Visual Recognition}, + author={Jingdong Wang and Ke Sun and Tianheng Cheng and + Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and + Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, + journal={TPAMI}, + year={2019} +} +``` + +
+ + + +
+300W (IMAVIS'2016) + +```bibtex +@article{sagonas2016300, + title={300 faces in-the-wild challenge: Database and results}, + author={Sagonas, Christos and Antonakos, Epameinondas and Tzimiropoulos, Georgios and Zafeiriou, Stefanos and Pantic, Maja}, + journal={Image and vision computing}, + volume={47}, + pages={3--18}, + year={2016}, + publisher={Elsevier} +} +``` + +
+ +Results on 300W dataset + +The model is trained on 300W train. + +| Arch | Input Size | NME*common* | NME*challenge* | NME*full* | NME*test* | ckpt | log | +| :--------------------------------- | :--------: | :--------------------: | :-----------------------: | :------------------: | :------------------: | :---------------------------------: | :--------------------------------: | +| [pose_hrnetv2_w18](/configs/face_2d_keypoint/topdown_heatmap/300w/td-hm_hrnetv2-w18_8xb64-60e_300w-256x256.py) | 256x256 | 2.92 | 5.64 | 3.45 | 4.10 | [ckpt](https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_300w_256x256-eea53406_20211019.pth) | [log](https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_300w_256x256_20211019.log.json) | diff --git a/configs/face_2d_keypoint/topdown_heatmap/300w/hrnetv2_300w.yml b/configs/face_2d_keypoint/topdown_heatmap/300w/hrnetv2_300w.yml index 58dcb4832a..4a813d17f5 100644 --- a/configs/face_2d_keypoint/topdown_heatmap/300w/hrnetv2_300w.yml +++ b/configs/face_2d_keypoint/topdown_heatmap/300w/hrnetv2_300w.yml @@ -1,23 +1,23 @@ -Collections: -- Name: HRNetv2 - Paper: - Title: Deep High-Resolution Representation Learning for Visual Recognition - URL: https://ieeexplore.ieee.org/abstract/document/9052469/ - README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/backbones/hrnetv2.md -Models: -- Config: configs/face_2d_keypoint/topdown_heatmap/300w/td-hm_hrnetv2-w18_8xb64-60e_300w-256x256.py - In Collection: HRNetv2 - Metadata: - Architecture: - - HRNetv2 - Training Data: 300W - Name: td-hm_hrnetv2-w18_8xb64-60e_300w-256x256 - Results: - - Dataset: 300W - Metrics: - NME challenge: 5.64 - NME common: 2.92 - NME full: 3.45 - NME test: 4.1 - Task: Face 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_300w_256x256-eea53406_20211019.pth +Collections: +- Name: HRNetv2 + Paper: + Title: Deep High-Resolution Representation Learning for Visual Recognition + URL: https://ieeexplore.ieee.org/abstract/document/9052469/ + README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/backbones/hrnetv2.md +Models: +- Config: configs/face_2d_keypoint/topdown_heatmap/300w/td-hm_hrnetv2-w18_8xb64-60e_300w-256x256.py + In Collection: HRNetv2 + Metadata: + Architecture: + - HRNetv2 + Training Data: 300W + Name: td-hm_hrnetv2-w18_8xb64-60e_300w-256x256 + Results: + - Dataset: 300W + Metrics: + NME challenge: 5.64 + NME common: 2.92 + NME full: 3.45 + NME test: 4.1 + Task: Face 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_300w_256x256-eea53406_20211019.pth diff --git a/configs/face_2d_keypoint/topdown_heatmap/300w/td-hm_hrnetv2-w18_8xb64-60e_300w-256x256.py b/configs/face_2d_keypoint/topdown_heatmap/300w/td-hm_hrnetv2-w18_8xb64-60e_300w-256x256.py index 52473a4664..7f279f06aa 100644 --- a/configs/face_2d_keypoint/topdown_heatmap/300w/td-hm_hrnetv2-w18_8xb64-60e_300w-256x256.py +++ b/configs/face_2d_keypoint/topdown_heatmap/300w/td-hm_hrnetv2-w18_8xb64-60e_300w-256x256.py @@ -1,161 +1,161 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=60, val_interval=1) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=2e-3, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=60, - milestones=[40, 55], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='NME', rule='less', interval=1)) - -# codec settings -codec = dict( - type='MSRAHeatmap', - input_size=(256, 256), - heatmap_size=(64, 64), - sigma=1.5) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(18, 36)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(18, 36, 72)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(18, 36, 72, 144), - multiscale_output=True), - upsample=dict(mode='bilinear', align_corners=False)), - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18'), - ), - neck=dict( - type='FeatureMapProcessor', - concat=True, - ), - head=dict( - type='HeatmapHead', - in_channels=270, - out_channels=68, - deconv_out_channels=None, - conv_out_channels=(270, ), - conv_kernel_sizes=(1, ), - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'Face300WDataset' -data_mode = 'topdown' -data_root = 'data/300w/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict( - type='RandomBBoxTransform', - shift_prob=0, - rotate_factor=60, - scale_factor=(0.75, 1.25)), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/face_landmarks_300w_train.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/face_landmarks_300w_valid.json', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='NME', - norm_mode='keypoint_distance', -) -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=60, val_interval=1) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=2e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=60, + milestones=[40, 55], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='NME', rule='less', interval=1)) + +# codec settings +codec = dict( + type='MSRAHeatmap', + input_size=(256, 256), + heatmap_size=(64, 64), + sigma=1.5) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(18, 36)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(18, 36, 72)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(18, 36, 72, 144), + multiscale_output=True), + upsample=dict(mode='bilinear', align_corners=False)), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18'), + ), + neck=dict( + type='FeatureMapProcessor', + concat=True, + ), + head=dict( + type='HeatmapHead', + in_channels=270, + out_channels=68, + deconv_out_channels=None, + conv_out_channels=(270, ), + conv_kernel_sizes=(1, ), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'Face300WDataset' +data_mode = 'topdown' +data_root = 'data/300w/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + shift_prob=0, + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/face_landmarks_300w_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/face_landmarks_300w_valid.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='NME', + norm_mode='keypoint_distance', +) +test_evaluator = val_evaluator diff --git a/configs/face_2d_keypoint/topdown_heatmap/README.md b/configs/face_2d_keypoint/topdown_heatmap/README.md index a8b7cf98fa..53fd36d58f 100644 --- a/configs/face_2d_keypoint/topdown_heatmap/README.md +++ b/configs/face_2d_keypoint/topdown_heatmap/README.md @@ -1,57 +1,57 @@ -# Top-down heatmap-based pose estimation - -Top-down methods divide the task into two stages: object detection, followed by single-object pose estimation given object bounding boxes. Instead of estimating keypoint coordinates directly, the pose estimator will produce heatmaps which represent the likelihood of being a keypoint, following the paradigm introduced in [Simple Baselines for Human Pose Estimation and Tracking](http://openaccess.thecvf.com/content_ECCV_2018/html/Bin_Xiao_Simple_Baselines_for_ECCV_2018_paper.html). - -
- -
- -## Results and Models - -### 300W Dataset - -Results on 300W dataset - -| Model | Input Size | NME*common* | NME*challenge* | NME*full* | NME*test* | Details and Download | -| :---------: | :--------: | :--------------------: | :-----------------------: | :------------------: | :------------------: | :---------------------------------------: | -| HRNetv2-w18 | 256x256 | 2.92 | 5.64 | 3.45 | 4.10 | [hrnetv2_300w.md](./300w/hrnetv2_300w.md) | - -### AFLW Dataset - -Results on AFLW dataset - -| Model | Input Size | NME*full* | NME*frontal* | Details and Download | -| :--------------: | :--------: | :------------------: | :---------------------: | :-------------------------------------------------: | -| HRNetv2-w18+Dark | 256x256 | 1.35 | 1.19 | [hrnetv2_dark_aflw.md](./aflw/hrnetv2_dark_aflw.md) | -| HRNetv2-w18 | 256x256 | 1.41 | 1.27 | [hrnetv2_aflw.md](./aflw/hrnetv2_aflw.md) | - -### COCO-WholeBody-Face Dataset - -Results on COCO-WholeBody-Face val set - -| Model | Input Size | NME | Details and Download | -| :--------------: | :--------: | :----: | :----------------------------------------------------------------------------------------------: | -| HRNetv2-w18+Dark | 256x256 | 0.0513 | [hrnetv2_dark_coco_wholebody_face.md](./coco_wholebody_face/hrnetv2_dark_coco_wholebody_face.md) | -| SCNet-50 | 256x256 | 0.0567 | [scnet_coco_wholebody_face.md](./coco_wholebody_face/scnet_coco_wholebody_face.md) | -| HRNetv2-w18 | 256x256 | 0.0569 | [hrnetv2_coco_wholebody_face.md](./coco_wholebody_face/hrnetv2_coco_wholebody_face.md) | -| ResNet-50 | 256x256 | 0.0582 | [resnet_coco_wholebody_face.md](./coco_wholebody_face/resnet_coco_wholebody_face.md) | -| HourglassNet | 256x256 | 0.0587 | [hourglass_coco_wholebody_face.md](./coco_wholebody_face/hourglass_coco_wholebody_face.md) | -| MobileNet-v2 | 256x256 | 0.0611 | [mobilenetv2_coco_wholebody_face.md](./coco_wholebody_face/mobilenetv2_coco_wholebody_face.md) | - -### COFW Dataset - -Results on COFW dataset - -| Model | Input Size | NME | Details and Download | -| :---------: | :--------: | :--: | :---------------------------------------: | -| HRNetv2-w18 | 256x256 | 3.48 | [hrnetv2_cofw.md](./cofw/hrnetv2_cofw.md) | - -### WFLW Dataset - -Results on WFLW dataset - -| Model | Input Size | NME*test* | NME*pose* | NME*illumination* | NME*occlusion* | NME*blur* | NME*makeup* | NME*expression* | Details and Download | -| :-----: | :--------: | :------------------: | :------------------: | :--------------------------: | :-----------------------: | :------------------: | :--------------------: | :------------------------: | :--------------------: | -| HRNetv2-w18+Dark | 256x256 | 3.98 | 6.98 | 3.96 | 4.78 | 4.56 | 3.89 | 4.29 | [hrnetv2_dark_wflw.md](./wflw/hrnetv2_dark_wflw.md) | -| HRNetv2-w18+AWing | 256x256 | 4.02 | 6.94 | 3.97 | 4.78 | 4.59 | 3.87 | 4.28 | [hrnetv2_awing_wflw.md](./wflw/hrnetv2_awing_wflw.md) | -| HRNetv2-w18 | 256x256 | 4.06 | 6.97 | 3.99 | 4.83 | 4.58 | 3.94 | 4.33 | [hrnetv2_wflw.md](./wflw/hrnetv2_wflw.md) | +# Top-down heatmap-based pose estimation + +Top-down methods divide the task into two stages: object detection, followed by single-object pose estimation given object bounding boxes. Instead of estimating keypoint coordinates directly, the pose estimator will produce heatmaps which represent the likelihood of being a keypoint, following the paradigm introduced in [Simple Baselines for Human Pose Estimation and Tracking](http://openaccess.thecvf.com/content_ECCV_2018/html/Bin_Xiao_Simple_Baselines_for_ECCV_2018_paper.html). + +
+ +
+ +## Results and Models + +### 300W Dataset + +Results on 300W dataset + +| Model | Input Size | NME*common* | NME*challenge* | NME*full* | NME*test* | Details and Download | +| :---------: | :--------: | :--------------------: | :-----------------------: | :------------------: | :------------------: | :---------------------------------------: | +| HRNetv2-w18 | 256x256 | 2.92 | 5.64 | 3.45 | 4.10 | [hrnetv2_300w.md](./300w/hrnetv2_300w.md) | + +### AFLW Dataset + +Results on AFLW dataset + +| Model | Input Size | NME*full* | NME*frontal* | Details and Download | +| :--------------: | :--------: | :------------------: | :---------------------: | :-------------------------------------------------: | +| HRNetv2-w18+Dark | 256x256 | 1.35 | 1.19 | [hrnetv2_dark_aflw.md](./aflw/hrnetv2_dark_aflw.md) | +| HRNetv2-w18 | 256x256 | 1.41 | 1.27 | [hrnetv2_aflw.md](./aflw/hrnetv2_aflw.md) | + +### COCO-WholeBody-Face Dataset + +Results on COCO-WholeBody-Face val set + +| Model | Input Size | NME | Details and Download | +| :--------------: | :--------: | :----: | :----------------------------------------------------------------------------------------------: | +| HRNetv2-w18+Dark | 256x256 | 0.0513 | [hrnetv2_dark_coco_wholebody_face.md](./coco_wholebody_face/hrnetv2_dark_coco_wholebody_face.md) | +| SCNet-50 | 256x256 | 0.0567 | [scnet_coco_wholebody_face.md](./coco_wholebody_face/scnet_coco_wholebody_face.md) | +| HRNetv2-w18 | 256x256 | 0.0569 | [hrnetv2_coco_wholebody_face.md](./coco_wholebody_face/hrnetv2_coco_wholebody_face.md) | +| ResNet-50 | 256x256 | 0.0582 | [resnet_coco_wholebody_face.md](./coco_wholebody_face/resnet_coco_wholebody_face.md) | +| HourglassNet | 256x256 | 0.0587 | [hourglass_coco_wholebody_face.md](./coco_wholebody_face/hourglass_coco_wholebody_face.md) | +| MobileNet-v2 | 256x256 | 0.0611 | [mobilenetv2_coco_wholebody_face.md](./coco_wholebody_face/mobilenetv2_coco_wholebody_face.md) | + +### COFW Dataset + +Results on COFW dataset + +| Model | Input Size | NME | Details and Download | +| :---------: | :--------: | :--: | :---------------------------------------: | +| HRNetv2-w18 | 256x256 | 3.48 | [hrnetv2_cofw.md](./cofw/hrnetv2_cofw.md) | + +### WFLW Dataset + +Results on WFLW dataset + +| Model | Input Size | NME*test* | NME*pose* | NME*illumination* | NME*occlusion* | NME*blur* | NME*makeup* | NME*expression* | Details and Download | +| :-----: | :--------: | :------------------: | :------------------: | :--------------------------: | :-----------------------: | :------------------: | :--------------------: | :------------------------: | :--------------------: | +| HRNetv2-w18+Dark | 256x256 | 3.98 | 6.98 | 3.96 | 4.78 | 4.56 | 3.89 | 4.29 | [hrnetv2_dark_wflw.md](./wflw/hrnetv2_dark_wflw.md) | +| HRNetv2-w18+AWing | 256x256 | 4.02 | 6.94 | 3.97 | 4.78 | 4.59 | 3.87 | 4.28 | [hrnetv2_awing_wflw.md](./wflw/hrnetv2_awing_wflw.md) | +| HRNetv2-w18 | 256x256 | 4.06 | 6.97 | 3.99 | 4.83 | 4.58 | 3.94 | 4.33 | [hrnetv2_wflw.md](./wflw/hrnetv2_wflw.md) | diff --git a/configs/face_2d_keypoint/topdown_heatmap/aflw/hrnetv2_aflw.md b/configs/face_2d_keypoint/topdown_heatmap/aflw/hrnetv2_aflw.md index 70c59ac2e4..36aade26fd 100644 --- a/configs/face_2d_keypoint/topdown_heatmap/aflw/hrnetv2_aflw.md +++ b/configs/face_2d_keypoint/topdown_heatmap/aflw/hrnetv2_aflw.md @@ -1,43 +1,43 @@ - - -
-HRNetv2 (TPAMI'2019) - -```bibtex -@article{WangSCJDZLMTWLX19, - title={Deep High-Resolution Representation Learning for Visual Recognition}, - author={Jingdong Wang and Ke Sun and Tianheng Cheng and - Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and - Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, - journal={TPAMI}, - year={2019} -} -``` - -
- - - -
-AFLW (ICCVW'2011) - -```bibtex -@inproceedings{koestinger2011annotated, - title={Annotated facial landmarks in the wild: A large-scale, real-world database for facial landmark localization}, - author={Koestinger, Martin and Wohlhart, Paul and Roth, Peter M and Bischof, Horst}, - booktitle={2011 IEEE international conference on computer vision workshops (ICCV workshops)}, - pages={2144--2151}, - year={2011}, - organization={IEEE} -} -``` - -
- -Results on AFLW dataset - -The model is trained on AFLW train and evaluated on AFLW full and frontal. - -| Arch | Input Size | NME*full* | NME*frontal* | ckpt | log | -| :------------------------------------------------ | :--------: | :------------------: | :---------------------: | :-----------------------------------------------: | :-----------------------------------------------: | -| [pose_hrnetv2_w18](/configs/face_2d_keypoint/topdown_heatmap/aflw/td-hm_hrnetv2-w18_8xb64-60e_aflw-256x256.py) | 256x256 | 1.41 | 1.27 | [ckpt](https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_aflw_256x256-f2bbc62b_20210125.pth) | [log](https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_aflw_256x256_20210125.log.json) | + + +
+HRNetv2 (TPAMI'2019) + +```bibtex +@article{WangSCJDZLMTWLX19, + title={Deep High-Resolution Representation Learning for Visual Recognition}, + author={Jingdong Wang and Ke Sun and Tianheng Cheng and + Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and + Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, + journal={TPAMI}, + year={2019} +} +``` + +
+ + + +
+AFLW (ICCVW'2011) + +```bibtex +@inproceedings{koestinger2011annotated, + title={Annotated facial landmarks in the wild: A large-scale, real-world database for facial landmark localization}, + author={Koestinger, Martin and Wohlhart, Paul and Roth, Peter M and Bischof, Horst}, + booktitle={2011 IEEE international conference on computer vision workshops (ICCV workshops)}, + pages={2144--2151}, + year={2011}, + organization={IEEE} +} +``` + +
+ +Results on AFLW dataset + +The model is trained on AFLW train and evaluated on AFLW full and frontal. + +| Arch | Input Size | NME*full* | NME*frontal* | ckpt | log | +| :------------------------------------------------ | :--------: | :------------------: | :---------------------: | :-----------------------------------------------: | :-----------------------------------------------: | +| [pose_hrnetv2_w18](/configs/face_2d_keypoint/topdown_heatmap/aflw/td-hm_hrnetv2-w18_8xb64-60e_aflw-256x256.py) | 256x256 | 1.41 | 1.27 | [ckpt](https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_aflw_256x256-f2bbc62b_20210125.pth) | [log](https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_aflw_256x256_20210125.log.json) | diff --git a/configs/face_2d_keypoint/topdown_heatmap/aflw/hrnetv2_aflw.yml b/configs/face_2d_keypoint/topdown_heatmap/aflw/hrnetv2_aflw.yml index 06d2d43b9c..ce0bdcc403 100644 --- a/configs/face_2d_keypoint/topdown_heatmap/aflw/hrnetv2_aflw.yml +++ b/configs/face_2d_keypoint/topdown_heatmap/aflw/hrnetv2_aflw.yml @@ -1,15 +1,15 @@ -Models: -- Config: configs/face_2d_keypoint/topdown_heatmap/aflw/td-hm_hrnetv2-w18_8xb64-60e_aflw-256x256.py - In Collection: HRNetv2 - Metadata: - Architecture: - - HRNetv2 - Training Data: AFLW - Name: td-hm_hrnetv2-w18_8xb64-60e_aflw-256x256 - Results: - - Dataset: AFLW - Metrics: - NME frontal: 1.27 - NME full: 1.41 - Task: Face 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_aflw_256x256-f2bbc62b_20210125.pth +Models: +- Config: configs/face_2d_keypoint/topdown_heatmap/aflw/td-hm_hrnetv2-w18_8xb64-60e_aflw-256x256.py + In Collection: HRNetv2 + Metadata: + Architecture: + - HRNetv2 + Training Data: AFLW + Name: td-hm_hrnetv2-w18_8xb64-60e_aflw-256x256 + Results: + - Dataset: AFLW + Metrics: + NME frontal: 1.27 + NME full: 1.41 + Task: Face 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_aflw_256x256-f2bbc62b_20210125.pth diff --git a/configs/face_2d_keypoint/topdown_heatmap/aflw/hrnetv2_dark_aflw.md b/configs/face_2d_keypoint/topdown_heatmap/aflw/hrnetv2_dark_aflw.md index a51c473d3b..fc4f25e428 100644 --- a/configs/face_2d_keypoint/topdown_heatmap/aflw/hrnetv2_dark_aflw.md +++ b/configs/face_2d_keypoint/topdown_heatmap/aflw/hrnetv2_dark_aflw.md @@ -1,60 +1,60 @@ - - -
-HRNetv2 (TPAMI'2019) - -```bibtex -@article{WangSCJDZLMTWLX19, - title={Deep High-Resolution Representation Learning for Visual Recognition}, - author={Jingdong Wang and Ke Sun and Tianheng Cheng and - Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and - Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, - journal={TPAMI}, - year={2019} -} -``` - -
- - - -
-DarkPose (CVPR'2020) - -```bibtex -@inproceedings{zhang2020distribution, - title={Distribution-aware coordinate representation for human pose estimation}, - author={Zhang, Feng and Zhu, Xiatian and Dai, Hanbin and Ye, Mao and Zhu, Ce}, - booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, - pages={7093--7102}, - year={2020} -} -``` - -
- - - -
-AFLW (ICCVW'2011) - -```bibtex -@inproceedings{koestinger2011annotated, - title={Annotated facial landmarks in the wild: A large-scale, real-world database for facial landmark localization}, - author={Koestinger, Martin and Wohlhart, Paul and Roth, Peter M and Bischof, Horst}, - booktitle={2011 IEEE international conference on computer vision workshops (ICCV workshops)}, - pages={2144--2151}, - year={2011}, - organization={IEEE} -} -``` - -
- -Results on AFLW dataset - -The model is trained on AFLW train and evaluated on AFLW full and frontal. - -| Arch | Input Size | NME*full* | NME*frontal* | ckpt | log | -| :------------------------------------------------ | :--------: | :------------------: | :---------------------: | :-----------------------------------------------: | :-----------------------------------------------: | -| [pose_hrnetv2_w18_dark](/configs/face_2d_keypoint/topdown_heatmap/aflw/td-hm_hrnetv2-w18_dark-8xb64-60e_aflw-256x256.py) | 256x256 | 1.35 | 1.19 | [ckpt](https://download.openmmlab.com/mmpose/face/darkpose/hrnetv2_w18_aflw_256x256_dark-219606c0_20210125.pth) | [log](https://download.openmmlab.com/mmpose/face/darkpose/hrnetv2_w18_aflw_256x256_dark_20210125.log.json) | + + +
+HRNetv2 (TPAMI'2019) + +```bibtex +@article{WangSCJDZLMTWLX19, + title={Deep High-Resolution Representation Learning for Visual Recognition}, + author={Jingdong Wang and Ke Sun and Tianheng Cheng and + Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and + Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, + journal={TPAMI}, + year={2019} +} +``` + +
+ + + +
+DarkPose (CVPR'2020) + +```bibtex +@inproceedings{zhang2020distribution, + title={Distribution-aware coordinate representation for human pose estimation}, + author={Zhang, Feng and Zhu, Xiatian and Dai, Hanbin and Ye, Mao and Zhu, Ce}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={7093--7102}, + year={2020} +} +``` + +
+ + + +
+AFLW (ICCVW'2011) + +```bibtex +@inproceedings{koestinger2011annotated, + title={Annotated facial landmarks in the wild: A large-scale, real-world database for facial landmark localization}, + author={Koestinger, Martin and Wohlhart, Paul and Roth, Peter M and Bischof, Horst}, + booktitle={2011 IEEE international conference on computer vision workshops (ICCV workshops)}, + pages={2144--2151}, + year={2011}, + organization={IEEE} +} +``` + +
+ +Results on AFLW dataset + +The model is trained on AFLW train and evaluated on AFLW full and frontal. + +| Arch | Input Size | NME*full* | NME*frontal* | ckpt | log | +| :------------------------------------------------ | :--------: | :------------------: | :---------------------: | :-----------------------------------------------: | :-----------------------------------------------: | +| [pose_hrnetv2_w18_dark](/configs/face_2d_keypoint/topdown_heatmap/aflw/td-hm_hrnetv2-w18_dark-8xb64-60e_aflw-256x256.py) | 256x256 | 1.35 | 1.19 | [ckpt](https://download.openmmlab.com/mmpose/face/darkpose/hrnetv2_w18_aflw_256x256_dark-219606c0_20210125.pth) | [log](https://download.openmmlab.com/mmpose/face/darkpose/hrnetv2_w18_aflw_256x256_dark_20210125.log.json) | diff --git a/configs/face_2d_keypoint/topdown_heatmap/aflw/hrnetv2_dark_aflw.yml b/configs/face_2d_keypoint/topdown_heatmap/aflw/hrnetv2_dark_aflw.yml index 54c0953897..955adb6d75 100644 --- a/configs/face_2d_keypoint/topdown_heatmap/aflw/hrnetv2_dark_aflw.yml +++ b/configs/face_2d_keypoint/topdown_heatmap/aflw/hrnetv2_dark_aflw.yml @@ -1,16 +1,16 @@ -Models: -- Config: configs/face_2d_keypoint/topdown_heatmap/aflw/td-hm_hrnetv2-w18_dark-8xb64-60e_aflw-256x256.py - In Collection: DarkPose - Metadata: - Architecture: - - HRNetv2 - - DarkPose - Training Data: AFLW - Name: td-hm_hrnetv2-w18_dark-8xb64-60e_aflw-256x256 - Results: - - Dataset: AFLW - Metrics: - NME frontal: 1.19 - NME full: 1.34 - Task: Face 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/face/darkpose/hrnetv2_w18_aflw_256x256_dark-219606c0_20210125.pth +Models: +- Config: configs/face_2d_keypoint/topdown_heatmap/aflw/td-hm_hrnetv2-w18_dark-8xb64-60e_aflw-256x256.py + In Collection: DarkPose + Metadata: + Architecture: + - HRNetv2 + - DarkPose + Training Data: AFLW + Name: td-hm_hrnetv2-w18_dark-8xb64-60e_aflw-256x256 + Results: + - Dataset: AFLW + Metrics: + NME frontal: 1.19 + NME full: 1.34 + Task: Face 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/face/darkpose/hrnetv2_w18_aflw_256x256_dark-219606c0_20210125.pth diff --git a/configs/face_2d_keypoint/topdown_heatmap/aflw/td-hm_hrnetv2-w18_8xb64-60e_aflw-256x256.py b/configs/face_2d_keypoint/topdown_heatmap/aflw/td-hm_hrnetv2-w18_8xb64-60e_aflw-256x256.py index a157a01442..50d197bdef 100644 --- a/configs/face_2d_keypoint/topdown_heatmap/aflw/td-hm_hrnetv2-w18_8xb64-60e_aflw-256x256.py +++ b/configs/face_2d_keypoint/topdown_heatmap/aflw/td-hm_hrnetv2-w18_8xb64-60e_aflw-256x256.py @@ -1,156 +1,156 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=60, val_interval=1) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=2e-3, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=60, - milestones=[40, 55], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='NME', rule='less', interval=1)) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(18, 36)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(18, 36, 72)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(18, 36, 72, 144), - multiscale_output=True), - upsample=dict(mode='bilinear', align_corners=False)), - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18'), - ), - neck=dict( - type='FeatureMapProcessor', - concat=True, - ), - head=dict( - type='HeatmapHead', - in_channels=270, - out_channels=19, - deconv_out_channels=None, - conv_out_channels=(270, ), - conv_kernel_sizes=(1, ), - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'AFLWDataset' -data_mode = 'topdown' -data_root = 'data/aflw/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict( - type='RandomBBoxTransform', - shift_prob=0, - rotate_factor=60, - scale_factor=(0.75, 1.25)), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/face_landmarks_aflw_train.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/face_landmarks_aflw_test.json', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='NME', norm_mode='use_norm_item', norm_item='bbox_size') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=60, val_interval=1) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=2e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=60, + milestones=[40, 55], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='NME', rule='less', interval=1)) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(18, 36)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(18, 36, 72)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(18, 36, 72, 144), + multiscale_output=True), + upsample=dict(mode='bilinear', align_corners=False)), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18'), + ), + neck=dict( + type='FeatureMapProcessor', + concat=True, + ), + head=dict( + type='HeatmapHead', + in_channels=270, + out_channels=19, + deconv_out_channels=None, + conv_out_channels=(270, ), + conv_kernel_sizes=(1, ), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'AFLWDataset' +data_mode = 'topdown' +data_root = 'data/aflw/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + shift_prob=0, + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/face_landmarks_aflw_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/face_landmarks_aflw_test.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='NME', norm_mode='use_norm_item', norm_item='bbox_size') +test_evaluator = val_evaluator diff --git a/configs/face_2d_keypoint/topdown_heatmap/aflw/td-hm_hrnetv2-w18_dark-8xb64-60e_aflw-256x256.py b/configs/face_2d_keypoint/topdown_heatmap/aflw/td-hm_hrnetv2-w18_dark-8xb64-60e_aflw-256x256.py index 44100cebe6..335cd34de1 100644 --- a/configs/face_2d_keypoint/topdown_heatmap/aflw/td-hm_hrnetv2-w18_dark-8xb64-60e_aflw-256x256.py +++ b/configs/face_2d_keypoint/topdown_heatmap/aflw/td-hm_hrnetv2-w18_dark-8xb64-60e_aflw-256x256.py @@ -1,160 +1,160 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=60, val_interval=1) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=2e-3, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=60, - milestones=[40, 55], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='NME', rule='less', interval=1)) - -# codec settings -codec = dict( - type='MSRAHeatmap', - input_size=(256, 256), - heatmap_size=(64, 64), - sigma=2, - unbiased=True) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(18, 36)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(18, 36, 72)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(18, 36, 72, 144), - multiscale_output=True), - upsample=dict(mode='bilinear', align_corners=False)), - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18'), - ), - neck=dict( - type='FeatureMapProcessor', - concat=True, - ), - head=dict( - type='HeatmapHead', - in_channels=270, - out_channels=19, - deconv_out_channels=None, - conv_out_channels=(270, ), - conv_kernel_sizes=(1, ), - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'AFLWDataset' -data_mode = 'topdown' -data_root = 'data/aflw/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict( - type='RandomBBoxTransform', - shift_prob=0, - rotate_factor=60, - scale_factor=(0.75, 1.25)), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/face_landmarks_aflw_train.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/face_landmarks_aflw_test.json', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='NME', norm_mode='use_norm_item', norm_item='bbox_size') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=60, val_interval=1) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=2e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=60, + milestones=[40, 55], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='NME', rule='less', interval=1)) + +# codec settings +codec = dict( + type='MSRAHeatmap', + input_size=(256, 256), + heatmap_size=(64, 64), + sigma=2, + unbiased=True) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(18, 36)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(18, 36, 72)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(18, 36, 72, 144), + multiscale_output=True), + upsample=dict(mode='bilinear', align_corners=False)), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18'), + ), + neck=dict( + type='FeatureMapProcessor', + concat=True, + ), + head=dict( + type='HeatmapHead', + in_channels=270, + out_channels=19, + deconv_out_channels=None, + conv_out_channels=(270, ), + conv_kernel_sizes=(1, ), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'AFLWDataset' +data_mode = 'topdown' +data_root = 'data/aflw/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + shift_prob=0, + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/face_landmarks_aflw_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/face_landmarks_aflw_test.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='NME', norm_mode='use_norm_item', norm_item='bbox_size') +test_evaluator = val_evaluator diff --git a/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/hourglass_coco_wholebody_face.md b/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/hourglass_coco_wholebody_face.md index 6099dcf06d..26f08dada7 100644 --- a/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/hourglass_coco_wholebody_face.md +++ b/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/hourglass_coco_wholebody_face.md @@ -1,39 +1,39 @@ - - -
-Hourglass (ECCV'2016) - -```bibtex -@inproceedings{newell2016stacked, - title={Stacked hourglass networks for human pose estimation}, - author={Newell, Alejandro and Yang, Kaiyu and Deng, Jia}, - booktitle={European conference on computer vision}, - pages={483--499}, - year={2016}, - organization={Springer} -} -``` - -
- - - -
-COCO-WholeBody-Face (ECCV'2020) - -```bibtex -@inproceedings{jin2020whole, - title={Whole-Body Human Pose Estimation in the Wild}, - author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, - booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, - year={2020} -} -``` - -
- -Results on COCO-WholeBody-Face val set - -| Arch | Input Size | NME | ckpt | log | -| :------------------------------------------------------------ | :--------: | :----: | :------------------------------------------------------------: | :-----------------------------------------------------------: | -| [pose_hourglass_52](/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_hourglass52_8xb32-60e_coco-wholebody-face-256x256.py) | 256x256 | 0.0587 | [ckpt](https://download.openmmlab.com/mmpose/face/hourglass/hourglass52_coco_wholebody_face_256x256-6994cf2e_20210909.pth) | [log](https://download.openmmlab.com/mmpose/face/hourglass/hourglass52_coco_wholebody_face_256x256_20210909.log.json) | + + +
+Hourglass (ECCV'2016) + +```bibtex +@inproceedings{newell2016stacked, + title={Stacked hourglass networks for human pose estimation}, + author={Newell, Alejandro and Yang, Kaiyu and Deng, Jia}, + booktitle={European conference on computer vision}, + pages={483--499}, + year={2016}, + organization={Springer} +} +``` + +
+ + + +
+COCO-WholeBody-Face (ECCV'2020) + +```bibtex +@inproceedings{jin2020whole, + title={Whole-Body Human Pose Estimation in the Wild}, + author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2020} +} +``` + +
+ +Results on COCO-WholeBody-Face val set + +| Arch | Input Size | NME | ckpt | log | +| :------------------------------------------------------------ | :--------: | :----: | :------------------------------------------------------------: | :-----------------------------------------------------------: | +| [pose_hourglass_52](/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_hourglass52_8xb32-60e_coco-wholebody-face-256x256.py) | 256x256 | 0.0587 | [ckpt](https://download.openmmlab.com/mmpose/face/hourglass/hourglass52_coco_wholebody_face_256x256-6994cf2e_20210909.pth) | [log](https://download.openmmlab.com/mmpose/face/hourglass/hourglass52_coco_wholebody_face_256x256_20210909.log.json) | diff --git a/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/hourglass_coco_wholebody_face.yml b/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/hourglass_coco_wholebody_face.yml index 704c01983e..185474fcb1 100644 --- a/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/hourglass_coco_wholebody_face.yml +++ b/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/hourglass_coco_wholebody_face.yml @@ -1,14 +1,14 @@ -Models: -- Config: configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_hourglass52_8xb32-60e_coco-wholebody-face-256x256.py - In Collection: Hourglass - Metadata: - Architecture: - - Hourglass - Training Data: COCO-WholeBody-Face - Name: td-hm_hourglass52_8xb32-60e_coco-wholebody-face-256x256 - Results: - - Dataset: COCO-WholeBody-Face - Metrics: - NME: 0.0587 - Task: Face 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/face/hourglass/hourglass52_coco_wholebody_face_256x256-6994cf2e_20210909.pth +Models: +- Config: configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_hourglass52_8xb32-60e_coco-wholebody-face-256x256.py + In Collection: Hourglass + Metadata: + Architecture: + - Hourglass + Training Data: COCO-WholeBody-Face + Name: td-hm_hourglass52_8xb32-60e_coco-wholebody-face-256x256 + Results: + - Dataset: COCO-WholeBody-Face + Metrics: + NME: 0.0587 + Task: Face 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/face/hourglass/hourglass52_coco_wholebody_face_256x256-6994cf2e_20210909.pth diff --git a/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/hrnetv2_coco_wholebody_face.md b/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/hrnetv2_coco_wholebody_face.md index d16ea2bc7f..3cf9109f6e 100644 --- a/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/hrnetv2_coco_wholebody_face.md +++ b/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/hrnetv2_coco_wholebody_face.md @@ -1,39 +1,39 @@ - - -
-HRNetv2 (TPAMI'2019) - -```bibtex -@article{WangSCJDZLMTWLX19, - title={Deep High-Resolution Representation Learning for Visual Recognition}, - author={Jingdong Wang and Ke Sun and Tianheng Cheng and - Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and - Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, - journal={TPAMI}, - year={2019} -} -``` - -
- - - -
-COCO-WholeBody-Face (ECCV'2020) - -```bibtex -@inproceedings{jin2020whole, - title={Whole-Body Human Pose Estimation in the Wild}, - author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, - booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, - year={2020} -} -``` - -
- -Results on COCO-WholeBody-Face val set - -| Arch | Input Size | NME | ckpt | log | -| :------------------------------------------------------------ | :--------: | :----: | :------------------------------------------------------------: | :-----------------------------------------------------------: | -| [pose_hrnetv2_w18](/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_hrnetv2-w18_8xb32-60e_coco-wholebody-face-256x256.py) | 256x256 | 0.0569 | [ckpt](https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_coco_wholebody_face_256x256-c1ca469b_20210909.pth) | [log](https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_coco_wholebody_face_256x256_20210909.log.json) | + + +
+HRNetv2 (TPAMI'2019) + +```bibtex +@article{WangSCJDZLMTWLX19, + title={Deep High-Resolution Representation Learning for Visual Recognition}, + author={Jingdong Wang and Ke Sun and Tianheng Cheng and + Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and + Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, + journal={TPAMI}, + year={2019} +} +``` + +
+ + + +
+COCO-WholeBody-Face (ECCV'2020) + +```bibtex +@inproceedings{jin2020whole, + title={Whole-Body Human Pose Estimation in the Wild}, + author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2020} +} +``` + +
+ +Results on COCO-WholeBody-Face val set + +| Arch | Input Size | NME | ckpt | log | +| :------------------------------------------------------------ | :--------: | :----: | :------------------------------------------------------------: | :-----------------------------------------------------------: | +| [pose_hrnetv2_w18](/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_hrnetv2-w18_8xb32-60e_coco-wholebody-face-256x256.py) | 256x256 | 0.0569 | [ckpt](https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_coco_wholebody_face_256x256-c1ca469b_20210909.pth) | [log](https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_coco_wholebody_face_256x256_20210909.log.json) | diff --git a/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/hrnetv2_coco_wholebody_face.yml b/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/hrnetv2_coco_wholebody_face.yml index 0a4a38d5b7..e7e526d742 100644 --- a/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/hrnetv2_coco_wholebody_face.yml +++ b/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/hrnetv2_coco_wholebody_face.yml @@ -1,14 +1,14 @@ -Models: -- Config: configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_hrnetv2-w18_8xb32-60e_coco-wholebody-face-256x256.py - In Collection: HRNetv2 - Metadata: - Architecture: - - HRNetv2 - Training Data: COCO-WholeBody-Face - Name: td-hm_hrnetv2-w18_8xb32-60e_coco-wholebody-face-256x256 - Results: - - Dataset: COCO-WholeBody-Face - Metrics: - NME: 0.0569 - Task: Face 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_coco_wholebody_face_256x256-c1ca469b_20210909.pth +Models: +- Config: configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_hrnetv2-w18_8xb32-60e_coco-wholebody-face-256x256.py + In Collection: HRNetv2 + Metadata: + Architecture: + - HRNetv2 + Training Data: COCO-WholeBody-Face + Name: td-hm_hrnetv2-w18_8xb32-60e_coco-wholebody-face-256x256 + Results: + - Dataset: COCO-WholeBody-Face + Metrics: + NME: 0.0569 + Task: Face 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_coco_wholebody_face_256x256-c1ca469b_20210909.pth diff --git a/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/hrnetv2_dark_coco_wholebody_face.md b/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/hrnetv2_dark_coco_wholebody_face.md index fd059ee23c..60914dbc5e 100644 --- a/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/hrnetv2_dark_coco_wholebody_face.md +++ b/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/hrnetv2_dark_coco_wholebody_face.md @@ -1,56 +1,56 @@ - - -
-HRNetv2 (TPAMI'2019) - -```bibtex -@article{WangSCJDZLMTWLX19, - title={Deep High-Resolution Representation Learning for Visual Recognition}, - author={Jingdong Wang and Ke Sun and Tianheng Cheng and - Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and - Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, - journal={TPAMI}, - year={2019} -} -``` - -
- - - -
-DarkPose (CVPR'2020) - -```bibtex -@inproceedings{zhang2020distribution, - title={Distribution-aware coordinate representation for human pose estimation}, - author={Zhang, Feng and Zhu, Xiatian and Dai, Hanbin and Ye, Mao and Zhu, Ce}, - booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, - pages={7093--7102}, - year={2020} -} -``` - -
- - - -
-COCO-WholeBody-Face (ECCV'2020) - -```bibtex -@inproceedings{jin2020whole, - title={Whole-Body Human Pose Estimation in the Wild}, - author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, - booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, - year={2020} -} -``` - -
- -Results on COCO-WholeBody-Face val set - -| Arch | Input Size | NME | ckpt | log | -| :------------------------------------------------------------ | :--------: | :----: | :------------------------------------------------------------: | :-----------------------------------------------------------: | -| [pose_hrnetv2_w18_dark](/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_hrnetv2-w18_dark-8xb32-60e_coco-wholebody-face-256x256.py) | 256x256 | 0.0513 | [ckpt](https://download.openmmlab.com/mmpose/face/darkpose/hrnetv2_w18_coco_wholebody_face_256x256_dark-3d9a334e_20210909.pth) | [log](https://download.openmmlab.com/mmpose/face/darkpose/hrnetv2_w18_coco_wholebody_face_256x256_dark_20210909.log.json) | + + +
+HRNetv2 (TPAMI'2019) + +```bibtex +@article{WangSCJDZLMTWLX19, + title={Deep High-Resolution Representation Learning for Visual Recognition}, + author={Jingdong Wang and Ke Sun and Tianheng Cheng and + Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and + Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, + journal={TPAMI}, + year={2019} +} +``` + +
+ + + +
+DarkPose (CVPR'2020) + +```bibtex +@inproceedings{zhang2020distribution, + title={Distribution-aware coordinate representation for human pose estimation}, + author={Zhang, Feng and Zhu, Xiatian and Dai, Hanbin and Ye, Mao and Zhu, Ce}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={7093--7102}, + year={2020} +} +``` + +
+ + + +
+COCO-WholeBody-Face (ECCV'2020) + +```bibtex +@inproceedings{jin2020whole, + title={Whole-Body Human Pose Estimation in the Wild}, + author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2020} +} +``` + +
+ +Results on COCO-WholeBody-Face val set + +| Arch | Input Size | NME | ckpt | log | +| :------------------------------------------------------------ | :--------: | :----: | :------------------------------------------------------------: | :-----------------------------------------------------------: | +| [pose_hrnetv2_w18_dark](/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_hrnetv2-w18_dark-8xb32-60e_coco-wholebody-face-256x256.py) | 256x256 | 0.0513 | [ckpt](https://download.openmmlab.com/mmpose/face/darkpose/hrnetv2_w18_coco_wholebody_face_256x256_dark-3d9a334e_20210909.pth) | [log](https://download.openmmlab.com/mmpose/face/darkpose/hrnetv2_w18_coco_wholebody_face_256x256_dark_20210909.log.json) | diff --git a/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/hrnetv2_dark_coco_wholebody_face.yml b/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/hrnetv2_dark_coco_wholebody_face.yml index cedc4950f9..ca0cefd98f 100644 --- a/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/hrnetv2_dark_coco_wholebody_face.yml +++ b/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/hrnetv2_dark_coco_wholebody_face.yml @@ -1,15 +1,15 @@ -Models: -- Config: configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_hrnetv2-w18_dark-8xb32-60e_coco-wholebody-face-256x256.py - In Collection: DarkPose - Metadata: - Architecture: - - HRNetv2 - - DarkPose - Training Data: COCO-WholeBody-Face - Name: td-hm_hrnetv2-w18_dark-8xb32-60e_coco-wholebody-face-256x256 - Results: - - Dataset: COCO-WholeBody-Face - Metrics: - NME: 0.0513 - Task: Face 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/face/darkpose/hrnetv2_w18_coco_wholebody_face_256x256_dark-3d9a334e_20210909.pth +Models: +- Config: configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_hrnetv2-w18_dark-8xb32-60e_coco-wholebody-face-256x256.py + In Collection: DarkPose + Metadata: + Architecture: + - HRNetv2 + - DarkPose + Training Data: COCO-WholeBody-Face + Name: td-hm_hrnetv2-w18_dark-8xb32-60e_coco-wholebody-face-256x256 + Results: + - Dataset: COCO-WholeBody-Face + Metrics: + NME: 0.0513 + Task: Face 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/face/darkpose/hrnetv2_w18_coco_wholebody_face_256x256_dark-3d9a334e_20210909.pth diff --git a/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/mobilenetv2_coco_wholebody_face.md b/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/mobilenetv2_coco_wholebody_face.md index d551a6c9ab..a52040785f 100644 --- a/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/mobilenetv2_coco_wholebody_face.md +++ b/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/mobilenetv2_coco_wholebody_face.md @@ -1,38 +1,38 @@ - - -
-MobilenetV2 (CVPR'2018) - -```bibtex -@inproceedings{sandler2018mobilenetv2, - title={Mobilenetv2: Inverted residuals and linear bottlenecks}, - author={Sandler, Mark and Howard, Andrew and Zhu, Menglong and Zhmoginov, Andrey and Chen, Liang-Chieh}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={4510--4520}, - year={2018} -} -``` - -
- - - -
-COCO-WholeBody-Face (ECCV'2020) - -```bibtex -@inproceedings{jin2020whole, - title={Whole-Body Human Pose Estimation in the Wild}, - author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, - booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, - year={2020} -} -``` - -
- -Results on COCO-WholeBody-Face val set - -| Arch | Input Size | NME | ckpt | log | -| :------------------------------------------------------------ | :--------: | :----: | :------------------------------------------------------------: | :-----------------------------------------------------------: | -| [pose_mobilenetv2](/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_mobilenetv2_8xb32-60e_coco-wholebody-face-256x256.py) | 256x256 | 0.0611 | [ckpt](https://download.openmmlab.com/mmpose/face/mobilenetv2/mobilenetv2_coco_wholebody_face_256x256-4a3f096e_20210909.pth) | [log](https://download.openmmlab.com/mmpose/face/mobilenetv2/mobilenetv2_coco_wholebody_face_256x256_20210909.log.json) | + + +
+MobilenetV2 (CVPR'2018) + +```bibtex +@inproceedings{sandler2018mobilenetv2, + title={Mobilenetv2: Inverted residuals and linear bottlenecks}, + author={Sandler, Mark and Howard, Andrew and Zhu, Menglong and Zhmoginov, Andrey and Chen, Liang-Chieh}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={4510--4520}, + year={2018} +} +``` + +
+ + + +
+COCO-WholeBody-Face (ECCV'2020) + +```bibtex +@inproceedings{jin2020whole, + title={Whole-Body Human Pose Estimation in the Wild}, + author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2020} +} +``` + +
+ +Results on COCO-WholeBody-Face val set + +| Arch | Input Size | NME | ckpt | log | +| :------------------------------------------------------------ | :--------: | :----: | :------------------------------------------------------------: | :-----------------------------------------------------------: | +| [pose_mobilenetv2](/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_mobilenetv2_8xb32-60e_coco-wholebody-face-256x256.py) | 256x256 | 0.0611 | [ckpt](https://download.openmmlab.com/mmpose/face/mobilenetv2/mobilenetv2_coco_wholebody_face_256x256-4a3f096e_20210909.pth) | [log](https://download.openmmlab.com/mmpose/face/mobilenetv2/mobilenetv2_coco_wholebody_face_256x256_20210909.log.json) | diff --git a/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/mobilenetv2_coco_wholebody_face.yml b/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/mobilenetv2_coco_wholebody_face.yml index 2bd4352119..6d8072989f 100644 --- a/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/mobilenetv2_coco_wholebody_face.yml +++ b/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/mobilenetv2_coco_wholebody_face.yml @@ -1,15 +1,15 @@ -Models: -- Config: configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_mobilenetv2_8xb32-60e_coco-wholebody-face-256x256.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: - - SimpleBaseline2D - - MobilenetV2 - Training Data: COCO-WholeBody-Face - Name: td-hm_mobilenetv2_8xb32-60e_coco-wholebody-face-256x256 - Results: - - Dataset: COCO-WholeBody-Face - Metrics: - NME: 0.0611 - Task: Face 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/face/mobilenetv2/mobilenetv2_coco_wholebody_face_256x256-4a3f096e_20210909.pth +Models: +- Config: configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_mobilenetv2_8xb32-60e_coco-wholebody-face-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: + - SimpleBaseline2D + - MobilenetV2 + Training Data: COCO-WholeBody-Face + Name: td-hm_mobilenetv2_8xb32-60e_coco-wholebody-face-256x256 + Results: + - Dataset: COCO-WholeBody-Face + Metrics: + NME: 0.0611 + Task: Face 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/face/mobilenetv2/mobilenetv2_coco_wholebody_face_256x256-4a3f096e_20210909.pth diff --git a/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/resnet_coco_wholebody_face.md b/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/resnet_coco_wholebody_face.md index e4609385bd..296588c498 100644 --- a/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/resnet_coco_wholebody_face.md +++ b/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/resnet_coco_wholebody_face.md @@ -1,55 +1,55 @@ - - -
-SimpleBaseline2D (ECCV'2018) - -```bibtex -@inproceedings{xiao2018simple, - title={Simple baselines for human pose estimation and tracking}, - author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, - booktitle={Proceedings of the European conference on computer vision (ECCV)}, - pages={466--481}, - year={2018} -} -``` - -
- - - -
-ResNet (CVPR'2016) - -```bibtex -@inproceedings{he2016deep, - title={Deep residual learning for image recognition}, - author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={770--778}, - year={2016} -} -``` - -
- - - -
-COCO-WholeBody-Face (ECCV'2020) - -```bibtex -@inproceedings{jin2020whole, - title={Whole-Body Human Pose Estimation in the Wild}, - author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, - booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, - year={2020} -} -``` - -
- -Results on COCO-WholeBody-Face val set - -| Arch | Input Size | NME | ckpt | log | -| :------------------------------------------------------------ | :--------: | :----: | :------------------------------------------------------------: | :-----------------------------------------------------------: | -| [pose_res50](/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_res50_8xb32-60e_coco-wholebody-face-256x256.py) | 256x256 | 0.0582 | [ckpt](https://download.openmmlab.com/mmpose/face/resnet/res50_coco_wholebody_face_256x256-5128edf5_20210909.pth) | [log](https://download.openmmlab.com/mmpose/face/resnet/res50_coco_wholebody_face_256x256_20210909.log.json) | + + +
+SimpleBaseline2D (ECCV'2018) + +```bibtex +@inproceedings{xiao2018simple, + title={Simple baselines for human pose estimation and tracking}, + author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, + booktitle={Proceedings of the European conference on computer vision (ECCV)}, + pages={466--481}, + year={2018} +} +``` + +
+ + + +
+ResNet (CVPR'2016) + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +
+ + + +
+COCO-WholeBody-Face (ECCV'2020) + +```bibtex +@inproceedings{jin2020whole, + title={Whole-Body Human Pose Estimation in the Wild}, + author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2020} +} +``` + +
+ +Results on COCO-WholeBody-Face val set + +| Arch | Input Size | NME | ckpt | log | +| :------------------------------------------------------------ | :--------: | :----: | :------------------------------------------------------------: | :-----------------------------------------------------------: | +| [pose_res50](/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_res50_8xb32-60e_coco-wholebody-face-256x256.py) | 256x256 | 0.0582 | [ckpt](https://download.openmmlab.com/mmpose/face/resnet/res50_coco_wholebody_face_256x256-5128edf5_20210909.pth) | [log](https://download.openmmlab.com/mmpose/face/resnet/res50_coco_wholebody_face_256x256_20210909.log.json) | diff --git a/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/resnet_coco_wholebody_face.yml b/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/resnet_coco_wholebody_face.yml index ef91a3da21..c63e04bbd2 100644 --- a/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/resnet_coco_wholebody_face.yml +++ b/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/resnet_coco_wholebody_face.yml @@ -1,15 +1,15 @@ -Models: -- Config: configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_res50_8xb32-60e_coco-wholebody-face-256x256.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: - - SimpleBaseline2D - - ResNet - Training Data: COCO-WholeBody-Face - Name: td-hm_res50_8xb32-60e_coco-wholebody-face-256x256 - Results: - - Dataset: COCO-WholeBody-Face - Metrics: - NME: 0.0582 - Task: Face 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/face/resnet/res50_coco_wholebody_face_256x256-5128edf5_20210909.pth +Models: +- Config: configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_res50_8xb32-60e_coco-wholebody-face-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: + - SimpleBaseline2D + - ResNet + Training Data: COCO-WholeBody-Face + Name: td-hm_res50_8xb32-60e_coco-wholebody-face-256x256 + Results: + - Dataset: COCO-WholeBody-Face + Metrics: + NME: 0.0582 + Task: Face 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/face/resnet/res50_coco_wholebody_face_256x256-5128edf5_20210909.pth diff --git a/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/scnet_coco_wholebody_face.md b/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/scnet_coco_wholebody_face.md index 2710c2ff39..368b16bcd1 100644 --- a/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/scnet_coco_wholebody_face.md +++ b/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/scnet_coco_wholebody_face.md @@ -1,38 +1,38 @@ - - -
-SCNet (CVPR'2020) - -```bibtex -@inproceedings{liu2020improving, - title={Improving Convolutional Networks with Self-Calibrated Convolutions}, - author={Liu, Jiang-Jiang and Hou, Qibin and Cheng, Ming-Ming and Wang, Changhu and Feng, Jiashi}, - booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, - pages={10096--10105}, - year={2020} -} -``` - -
- - - -
-COCO-WholeBody-Face (ECCV'2020) - -```bibtex -@inproceedings{jin2020whole, - title={Whole-Body Human Pose Estimation in the Wild}, - author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, - booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, - year={2020} -} -``` - -
- -Results on COCO-WholeBody-Face val set - -| Arch | Input Size | NME | ckpt | log | -| :------------------------------------------------------------ | :--------: | :----: | :------------------------------------------------------------: | :-----------------------------------------------------------: | -| [pose_scnet_50](/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_scnet50_8xb32-60e_coco-wholebody-face-256x256.py) | 256x256 | 0.0567 | [ckpt](https://download.openmmlab.com/mmpose/face/scnet/scnet50_coco_wholebody_face_256x256-a0183f5f_20210909.pth) | [log](https://download.openmmlab.com/mmpose/face/scnet/scnet50_coco_wholebody_face_256x256_20210909.log.json) | + + +
+SCNet (CVPR'2020) + +```bibtex +@inproceedings{liu2020improving, + title={Improving Convolutional Networks with Self-Calibrated Convolutions}, + author={Liu, Jiang-Jiang and Hou, Qibin and Cheng, Ming-Ming and Wang, Changhu and Feng, Jiashi}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={10096--10105}, + year={2020} +} +``` + +
+ + + +
+COCO-WholeBody-Face (ECCV'2020) + +```bibtex +@inproceedings{jin2020whole, + title={Whole-Body Human Pose Estimation in the Wild}, + author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2020} +} +``` + +
+ +Results on COCO-WholeBody-Face val set + +| Arch | Input Size | NME | ckpt | log | +| :------------------------------------------------------------ | :--------: | :----: | :------------------------------------------------------------: | :-----------------------------------------------------------: | +| [pose_scnet_50](/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_scnet50_8xb32-60e_coco-wholebody-face-256x256.py) | 256x256 | 0.0567 | [ckpt](https://download.openmmlab.com/mmpose/face/scnet/scnet50_coco_wholebody_face_256x256-a0183f5f_20210909.pth) | [log](https://download.openmmlab.com/mmpose/face/scnet/scnet50_coco_wholebody_face_256x256_20210909.log.json) | diff --git a/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/scnet_coco_wholebody_face.yml b/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/scnet_coco_wholebody_face.yml index d3b052ffc5..d0fde1e850 100644 --- a/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/scnet_coco_wholebody_face.yml +++ b/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/scnet_coco_wholebody_face.yml @@ -1,15 +1,15 @@ -Models: -- Config: configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_scnet50_8xb32-60e_coco-wholebody-face-256x256.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: - - SimpleBaseline2D - - SCNet - Training Data: COCO-WholeBody-Face - Name: td-hm_scnet50_8xb32-60e_coco-wholebody-face-256x256 - Results: - - Dataset: COCO-WholeBody-Face - Metrics: - NME: 0.0567 - Task: Face 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/face/scnet/scnet50_coco_wholebody_face_256x256-a0183f5f_20210909.pth +Models: +- Config: configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_scnet50_8xb32-60e_coco-wholebody-face-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: + - SimpleBaseline2D + - SCNet + Training Data: COCO-WholeBody-Face + Name: td-hm_scnet50_8xb32-60e_coco-wholebody-face-256x256 + Results: + - Dataset: COCO-WholeBody-Face + Metrics: + NME: 0.0567 + Task: Face 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/face/scnet/scnet50_coco_wholebody_face_256x256-a0183f5f_20210909.pth diff --git a/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_hourglass52_8xb32-60e_coco-wholebody-face-256x256.py b/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_hourglass52_8xb32-60e_coco-wholebody-face-256x256.py index 0e6f5c5c90..135a45f970 100644 --- a/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_hourglass52_8xb32-60e_coco-wholebody-face-256x256.py +++ b/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_hourglass52_8xb32-60e_coco-wholebody-face-256x256.py @@ -1,123 +1,123 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=60, val_interval=1) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=2e-3, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[40, 55], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=256) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='NME', rule='less', interval=1)) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HourglassNet', - num_stacks=1, - ), - head=dict( - type='CPMHead', - in_channels=256, - out_channels=68, - num_stages=1, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoWholeBodyFaceDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict( - type='RandomBBoxTransform', - rotate_factor=60, - scale_factor=(0.75, 1.25)), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='NME', - norm_mode='keypoint_distance', -) -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=60, val_interval=1) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=2e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[40, 55], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='NME', rule='less', interval=1)) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HourglassNet', + num_stacks=1, + ), + head=dict( + type='CPMHead', + in_channels=256, + out_channels=68, + num_stages=1, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyFaceDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='NME', + norm_mode='keypoint_distance', +) +test_evaluator = val_evaluator diff --git a/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_hrnetv2-w18_8xb32-60e_coco-wholebody-face-256x256.py b/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_hrnetv2-w18_8xb32-60e_coco-wholebody-face-256x256.py index dfeac90ced..b751fae470 100644 --- a/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_hrnetv2-w18_8xb32-60e_coco-wholebody-face-256x256.py +++ b/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_hrnetv2-w18_8xb32-60e_coco-wholebody-face-256x256.py @@ -1,156 +1,156 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=60, val_interval=1) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=2e-3, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[40, 55], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=256) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='NME', rule='less', interval=1)) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(18, 36)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(18, 36, 72)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(18, 36, 72, 144), - multiscale_output=True), - upsample=dict(mode='bilinear', align_corners=False)), - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18')), - neck=dict( - type='FeatureMapProcessor', - concat=True, - ), - head=dict( - type='HeatmapHead', - in_channels=270, - out_channels=68, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - conv_out_channels=(270, ), - conv_kernel_sizes=(1, ), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoWholeBodyFaceDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict( - type='RandomBBoxTransform', - rotate_factor=60, - scale_factor=(0.75, 1.25)), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='NME', - norm_mode='keypoint_distance', -) -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=60, val_interval=1) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=2e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[40, 55], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='NME', rule='less', interval=1)) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(18, 36)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(18, 36, 72)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(18, 36, 72, 144), + multiscale_output=True), + upsample=dict(mode='bilinear', align_corners=False)), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18')), + neck=dict( + type='FeatureMapProcessor', + concat=True, + ), + head=dict( + type='HeatmapHead', + in_channels=270, + out_channels=68, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + conv_out_channels=(270, ), + conv_kernel_sizes=(1, ), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyFaceDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='NME', + norm_mode='keypoint_distance', +) +test_evaluator = val_evaluator diff --git a/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_hrnetv2-w18_dark-8xb32-60e_coco-wholebody-face-256x256.py b/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_hrnetv2-w18_dark-8xb32-60e_coco-wholebody-face-256x256.py index 3c34f9aa5d..a31e5999aa 100644 --- a/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_hrnetv2-w18_dark-8xb32-60e_coco-wholebody-face-256x256.py +++ b/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_hrnetv2-w18_dark-8xb32-60e_coco-wholebody-face-256x256.py @@ -1,160 +1,160 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=60, val_interval=1) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=2e-3, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[40, 55], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=256) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='NME', rule='less', interval=1)) - -# codec settings -codec = dict( - type='MSRAHeatmap', - input_size=(256, 256), - heatmap_size=(64, 64), - sigma=2, - unbiased=True) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(18, 36)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(18, 36, 72)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(18, 36, 72, 144), - multiscale_output=True), - upsample=dict(mode='bilinear', align_corners=False)), - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18')), - neck=dict( - type='FeatureMapProcessor', - concat=True, - ), - head=dict( - type='HeatmapHead', - in_channels=270, - out_channels=68, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - conv_out_channels=(270, ), - conv_kernel_sizes=(1, ), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoWholeBodyFaceDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict( - type='RandomBBoxTransform', - rotate_factor=60, - scale_factor=(0.75, 1.25)), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='NME', - norm_mode='keypoint_distance', -) -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=60, val_interval=1) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=2e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[40, 55], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='NME', rule='less', interval=1)) + +# codec settings +codec = dict( + type='MSRAHeatmap', + input_size=(256, 256), + heatmap_size=(64, 64), + sigma=2, + unbiased=True) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(18, 36)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(18, 36, 72)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(18, 36, 72, 144), + multiscale_output=True), + upsample=dict(mode='bilinear', align_corners=False)), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18')), + neck=dict( + type='FeatureMapProcessor', + concat=True, + ), + head=dict( + type='HeatmapHead', + in_channels=270, + out_channels=68, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + conv_out_channels=(270, ), + conv_kernel_sizes=(1, ), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyFaceDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='NME', + norm_mode='keypoint_distance', +) +test_evaluator = val_evaluator diff --git a/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_mobilenetv2_8xb32-60e_coco-wholebody-face-256x256.py b/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_mobilenetv2_8xb32-60e_coco-wholebody-face-256x256.py index 6f1a8629fc..c4a314dd61 100644 --- a/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_mobilenetv2_8xb32-60e_coco-wholebody-face-256x256.py +++ b/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_mobilenetv2_8xb32-60e_coco-wholebody-face-256x256.py @@ -1,122 +1,122 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=60, val_interval=1) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=2e-3, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[40, 55], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=256) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='NME', rule='less', interval=1)) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='MobileNetV2', - widen_factor=1., - out_indices=(7, ), - init_cfg=dict(type='Pretrained', checkpoint='mmcls://mobilenet_v2')), - head=dict( - type='HeatmapHead', - in_channels=1280, - out_channels=68, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoWholeBodyFaceDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict( - type='RandomBBoxTransform', - rotate_factor=60, - scale_factor=(0.75, 1.25)), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='NME', - norm_mode='keypoint_distance', -) -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=60, val_interval=1) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=2e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[40, 55], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='NME', rule='less', interval=1)) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='MobileNetV2', + widen_factor=1., + out_indices=(7, ), + init_cfg=dict(type='Pretrained', checkpoint='mmcls://mobilenet_v2')), + head=dict( + type='HeatmapHead', + in_channels=1280, + out_channels=68, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyFaceDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='NME', + norm_mode='keypoint_distance', +) +test_evaluator = val_evaluator diff --git a/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_res50_8xb32-60e_coco-wholebody-face-256x256.py b/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_res50_8xb32-60e_coco-wholebody-face-256x256.py index 0070e55d69..7b4dcad303 100644 --- a/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_res50_8xb32-60e_coco-wholebody-face-256x256.py +++ b/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_res50_8xb32-60e_coco-wholebody-face-256x256.py @@ -1,121 +1,121 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=60, val_interval=1) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=2e-3, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[40, 55], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=256) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='NME', rule='less', interval=1)) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=50, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=68, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoWholeBodyFaceDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict( - type='RandomBBoxTransform', - rotate_factor=60, - scale_factor=(0.75, 1.25)), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='NME', - norm_mode='keypoint_distance', -) -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=60, val_interval=1) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=2e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[40, 55], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='NME', rule='less', interval=1)) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=68, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyFaceDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='NME', + norm_mode='keypoint_distance', +) +test_evaluator = val_evaluator diff --git a/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_scnet50_8xb32-60e_coco-wholebody-face-256x256.py b/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_scnet50_8xb32-60e_coco-wholebody-face-256x256.py index 8f79f4b1d3..62b7885d06 100644 --- a/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_scnet50_8xb32-60e_coco-wholebody-face-256x256.py +++ b/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_scnet50_8xb32-60e_coco-wholebody-face-256x256.py @@ -1,124 +1,124 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=60, val_interval=1) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=2e-3, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[40, 55], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=256) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='NME', rule='less', interval=1)) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='SCNet', - depth=50, - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/scnet50-7ef0a199.pth')), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=68, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoWholeBodyFaceDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict( - type='RandomBBoxTransform', - rotate_factor=60, - scale_factor=(0.75, 1.25)), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='NME', - norm_mode='keypoint_distance', -) -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=60, val_interval=1) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=2e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[40, 55], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='NME', rule='less', interval=1)) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='SCNet', + depth=50, + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/scnet50-7ef0a199.pth')), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=68, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyFaceDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='NME', + norm_mode='keypoint_distance', +) +test_evaluator = val_evaluator diff --git a/configs/face_2d_keypoint/topdown_heatmap/cofw/hrnetv2_cofw.md b/configs/face_2d_keypoint/topdown_heatmap/cofw/hrnetv2_cofw.md index b99f91f3d1..4828f2cbfb 100644 --- a/configs/face_2d_keypoint/topdown_heatmap/cofw/hrnetv2_cofw.md +++ b/configs/face_2d_keypoint/topdown_heatmap/cofw/hrnetv2_cofw.md @@ -1,42 +1,42 @@ - - -
-HRNetv2 (TPAMI'2019) - -```bibtex -@article{WangSCJDZLMTWLX19, - title={Deep High-Resolution Representation Learning for Visual Recognition}, - author={Jingdong Wang and Ke Sun and Tianheng Cheng and - Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and - Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, - journal={TPAMI}, - year={2019} -} -``` - -
- - - -
-COFW (ICCV'2013) - -```bibtex -@inproceedings{burgos2013robust, - title={Robust face landmark estimation under occlusion}, - author={Burgos-Artizzu, Xavier P and Perona, Pietro and Doll{\'a}r, Piotr}, - booktitle={Proceedings of the IEEE international conference on computer vision}, - pages={1513--1520}, - year={2013} -} -``` - -
- -Results on COFW dataset - -The model is trained on COFW train. - -| Arch | Input Size | NME | ckpt | log | -| :------------------------------------------------------------- | :--------: | :--: | :------------------------------------------------------------: | :------------------------------------------------------------: | -| [pose_hrnetv2_w18](/configs/face_2d_keypoint/topdown_heatmap/cofw/td-hm_hrnetv2-w18_8xb64-60e_cofw-256x256.py) | 256x256 | 3.48 | [ckpt](https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_cofw_256x256-49243ab8_20211019.pth) | [log](https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_cofw_256x256_20211019.log.json) | + + +
+HRNetv2 (TPAMI'2019) + +```bibtex +@article{WangSCJDZLMTWLX19, + title={Deep High-Resolution Representation Learning for Visual Recognition}, + author={Jingdong Wang and Ke Sun and Tianheng Cheng and + Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and + Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, + journal={TPAMI}, + year={2019} +} +``` + +
+ + + +
+COFW (ICCV'2013) + +```bibtex +@inproceedings{burgos2013robust, + title={Robust face landmark estimation under occlusion}, + author={Burgos-Artizzu, Xavier P and Perona, Pietro and Doll{\'a}r, Piotr}, + booktitle={Proceedings of the IEEE international conference on computer vision}, + pages={1513--1520}, + year={2013} +} +``` + +
+ +Results on COFW dataset + +The model is trained on COFW train. + +| Arch | Input Size | NME | ckpt | log | +| :------------------------------------------------------------- | :--------: | :--: | :------------------------------------------------------------: | :------------------------------------------------------------: | +| [pose_hrnetv2_w18](/configs/face_2d_keypoint/topdown_heatmap/cofw/td-hm_hrnetv2-w18_8xb64-60e_cofw-256x256.py) | 256x256 | 3.48 | [ckpt](https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_cofw_256x256-49243ab8_20211019.pth) | [log](https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_cofw_256x256_20211019.log.json) | diff --git a/configs/face_2d_keypoint/topdown_heatmap/cofw/hrnetv2_cofw.yml b/configs/face_2d_keypoint/topdown_heatmap/cofw/hrnetv2_cofw.yml index 733e275685..749e348047 100644 --- a/configs/face_2d_keypoint/topdown_heatmap/cofw/hrnetv2_cofw.yml +++ b/configs/face_2d_keypoint/topdown_heatmap/cofw/hrnetv2_cofw.yml @@ -1,14 +1,14 @@ -Models: -- Config: configs/face_2d_keypoint/topdown_heatmap/cofw/td-hm_hrnetv2-w18_8xb64-60e_cofw-256x256.py - In Collection: HRNetv2 - Metadata: - Architecture: - - HRNetv2 - Training Data: COFW - Name: td-hm_hrnetv2-w18_8xb64-60e_cofw-256x256 - Results: - - Dataset: COFW - Metrics: - NME: 3.48 - Task: Face 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_cofw_256x256-49243ab8_20211019.pth +Models: +- Config: configs/face_2d_keypoint/topdown_heatmap/cofw/td-hm_hrnetv2-w18_8xb64-60e_cofw-256x256.py + In Collection: HRNetv2 + Metadata: + Architecture: + - HRNetv2 + Training Data: COFW + Name: td-hm_hrnetv2-w18_8xb64-60e_cofw-256x256 + Results: + - Dataset: COFW + Metrics: + NME: 3.48 + Task: Face 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_cofw_256x256-49243ab8_20211019.pth diff --git a/configs/face_2d_keypoint/topdown_heatmap/cofw/td-hm_hrnetv2-w18_8xb64-60e_cofw-256x256.py b/configs/face_2d_keypoint/topdown_heatmap/cofw/td-hm_hrnetv2-w18_8xb64-60e_cofw-256x256.py index 7c52342e95..ee59f3d71d 100644 --- a/configs/face_2d_keypoint/topdown_heatmap/cofw/td-hm_hrnetv2-w18_8xb64-60e_cofw-256x256.py +++ b/configs/face_2d_keypoint/topdown_heatmap/cofw/td-hm_hrnetv2-w18_8xb64-60e_cofw-256x256.py @@ -1,161 +1,161 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=60, val_interval=1) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=2e-3, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=50, - milestones=[40, 55], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='NME', rule='less', interval=1)) - -# codec settings -codec = dict( - type='MSRAHeatmap', - input_size=(256, 256), - heatmap_size=(64, 64), - sigma=1.5) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(18, 36)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(18, 36, 72)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(18, 36, 72, 144), - multiscale_output=True), - upsample=dict(mode='bilinear', align_corners=False)), - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18'), - ), - neck=dict( - type='FeatureMapProcessor', - concat=True, - ), - head=dict( - type='HeatmapHead', - in_channels=270, - out_channels=29, - deconv_out_channels=None, - conv_out_channels=(270, ), - conv_kernel_sizes=(1, ), - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'COFWDataset' -data_mode = 'topdown' -data_root = 'data/cofw/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict( - type='RandomBBoxTransform', - shift_prob=0, - rotate_factor=60, - scale_factor=(0.75, 1.25)), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/cofw_train.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/cofw_test.json', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='NME', - norm_mode='keypoint_distance', -) -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=60, val_interval=1) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=2e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=50, + milestones=[40, 55], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='NME', rule='less', interval=1)) + +# codec settings +codec = dict( + type='MSRAHeatmap', + input_size=(256, 256), + heatmap_size=(64, 64), + sigma=1.5) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(18, 36)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(18, 36, 72)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(18, 36, 72, 144), + multiscale_output=True), + upsample=dict(mode='bilinear', align_corners=False)), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18'), + ), + neck=dict( + type='FeatureMapProcessor', + concat=True, + ), + head=dict( + type='HeatmapHead', + in_channels=270, + out_channels=29, + deconv_out_channels=None, + conv_out_channels=(270, ), + conv_kernel_sizes=(1, ), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'COFWDataset' +data_mode = 'topdown' +data_root = 'data/cofw/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + shift_prob=0, + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/cofw_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/cofw_test.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='NME', + norm_mode='keypoint_distance', +) +test_evaluator = val_evaluator diff --git a/configs/face_2d_keypoint/topdown_heatmap/wflw/hrnetv2_awing_wflw.md b/configs/face_2d_keypoint/topdown_heatmap/wflw/hrnetv2_awing_wflw.md index 53d5c3b36d..4df239a9b1 100644 --- a/configs/face_2d_keypoint/topdown_heatmap/wflw/hrnetv2_awing_wflw.md +++ b/configs/face_2d_keypoint/topdown_heatmap/wflw/hrnetv2_awing_wflw.md @@ -1,59 +1,59 @@ - - -
-HRNetv2 (TPAMI'2019) - -```bibtex -@article{WangSCJDZLMTWLX19, - title={Deep High-Resolution Representation Learning for Visual Recognition}, - author={Jingdong Wang and Ke Sun and Tianheng Cheng and - Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and - Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, - journal={TPAMI}, - year={2019} -} -``` - -
- - - -
-AdaptiveWingloss (ICCV'2019) - -```bibtex -@inproceedings{wang2019adaptive, - title={Adaptive wing loss for robust face alignment via heatmap regression}, - author={Wang, Xinyao and Bo, Liefeng and Fuxin, Li}, - booktitle={Proceedings of the IEEE/CVF international conference on computer vision}, - pages={6971--6981}, - year={2019} -} -``` - -
- - - -
-WFLW (CVPR'2018) - -```bibtex -@inproceedings{wu2018look, - title={Look at boundary: A boundary-aware face alignment algorithm}, - author={Wu, Wayne and Qian, Chen and Yang, Shuo and Wang, Quan and Cai, Yici and Zhou, Qiang}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={2129--2138}, - year={2018} -} -``` - -
- -Results on WFLW dataset - -The model is trained on WFLW train. - -| Arch | Input Size | NME*test* | NME*pose* | NME*illumination* | NME*occlusion* | NME*blur* | NME*makeup* | NME*expression* | ckpt | log | -| :--------- | :--------: | :------------------: | :------------------: | :--------------------------: | :-----------------------: | :------------------: | :--------------------: | :------------------------: | :--------: | :-------: | -| [pose_hrnetv2_w18_awing](/configs/face_2d_keypoint/topdown_heatmap/wflw/td-hm_hrnetv2-w18_awing-8xb64-60e_wflw-256x256.py) | 256x256 | 4.02 | 6.94 | 3.97 | 4.78 | 4.59 | 3.87 | 4.28 | [ckpt](https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_wflw_256x256_awing-5af5055c_20211212.pth) | [log](https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_wflw_256x256_awing_20211212.log.json) | + + +
+HRNetv2 (TPAMI'2019) + +```bibtex +@article{WangSCJDZLMTWLX19, + title={Deep High-Resolution Representation Learning for Visual Recognition}, + author={Jingdong Wang and Ke Sun and Tianheng Cheng and + Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and + Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, + journal={TPAMI}, + year={2019} +} +``` + +
+ + + +
+AdaptiveWingloss (ICCV'2019) + +```bibtex +@inproceedings{wang2019adaptive, + title={Adaptive wing loss for robust face alignment via heatmap regression}, + author={Wang, Xinyao and Bo, Liefeng and Fuxin, Li}, + booktitle={Proceedings of the IEEE/CVF international conference on computer vision}, + pages={6971--6981}, + year={2019} +} +``` + +
+ + + +
+WFLW (CVPR'2018) + +```bibtex +@inproceedings{wu2018look, + title={Look at boundary: A boundary-aware face alignment algorithm}, + author={Wu, Wayne and Qian, Chen and Yang, Shuo and Wang, Quan and Cai, Yici and Zhou, Qiang}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={2129--2138}, + year={2018} +} +``` + +
+ +Results on WFLW dataset + +The model is trained on WFLW train. + +| Arch | Input Size | NME*test* | NME*pose* | NME*illumination* | NME*occlusion* | NME*blur* | NME*makeup* | NME*expression* | ckpt | log | +| :--------- | :--------: | :------------------: | :------------------: | :--------------------------: | :-----------------------: | :------------------: | :--------------------: | :------------------------: | :--------: | :-------: | +| [pose_hrnetv2_w18_awing](/configs/face_2d_keypoint/topdown_heatmap/wflw/td-hm_hrnetv2-w18_awing-8xb64-60e_wflw-256x256.py) | 256x256 | 4.02 | 6.94 | 3.97 | 4.78 | 4.59 | 3.87 | 4.28 | [ckpt](https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_wflw_256x256_awing-5af5055c_20211212.pth) | [log](https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_wflw_256x256_awing_20211212.log.json) | diff --git a/configs/face_2d_keypoint/topdown_heatmap/wflw/hrnetv2_awing_wflw.yml b/configs/face_2d_keypoint/topdown_heatmap/wflw/hrnetv2_awing_wflw.yml index 6ba45c82b7..6a6d46ae11 100644 --- a/configs/face_2d_keypoint/topdown_heatmap/wflw/hrnetv2_awing_wflw.yml +++ b/configs/face_2d_keypoint/topdown_heatmap/wflw/hrnetv2_awing_wflw.yml @@ -1,21 +1,21 @@ -Models: -- Config: configs/face_2d_keypoint/topdown_heatmap/wflw/td-hm_hrnetv2-w18_awing-8xb64-60e_wflw-256x256.py - In Collection: HRNetv2 - Metadata: - Architecture: - - HRNetv2 - - AdaptiveWingloss - Training Data: WFLW - Name: td-hm_hrnetv2-w18_awing-8xb64-60e_wflw-256x256 - Results: - - Dataset: WFLW - Metrics: - NME blur: 4.59 - NME expression: 4.28 - NME illumination: 3.97 - NME makeup: 3.87 - NME occlusion: 4.78 - NME pose: 6.94 - NME test: 4.02 - Task: Face 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_wflw_256x256_awing-5af5055c_20211212.pth +Models: +- Config: configs/face_2d_keypoint/topdown_heatmap/wflw/td-hm_hrnetv2-w18_awing-8xb64-60e_wflw-256x256.py + In Collection: HRNetv2 + Metadata: + Architecture: + - HRNetv2 + - AdaptiveWingloss + Training Data: WFLW + Name: td-hm_hrnetv2-w18_awing-8xb64-60e_wflw-256x256 + Results: + - Dataset: WFLW + Metrics: + NME blur: 4.59 + NME expression: 4.28 + NME illumination: 3.97 + NME makeup: 3.87 + NME occlusion: 4.78 + NME pose: 6.94 + NME test: 4.02 + Task: Face 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_wflw_256x256_awing-5af5055c_20211212.pth diff --git a/configs/face_2d_keypoint/topdown_heatmap/wflw/hrnetv2_dark_wflw.md b/configs/face_2d_keypoint/topdown_heatmap/wflw/hrnetv2_dark_wflw.md index 476afb6c01..b36477b2c2 100644 --- a/configs/face_2d_keypoint/topdown_heatmap/wflw/hrnetv2_dark_wflw.md +++ b/configs/face_2d_keypoint/topdown_heatmap/wflw/hrnetv2_dark_wflw.md @@ -1,59 +1,59 @@ - - -
-HRNetv2 (TPAMI'2019) - -```bibtex -@article{WangSCJDZLMTWLX19, - title={Deep High-Resolution Representation Learning for Visual Recognition}, - author={Jingdong Wang and Ke Sun and Tianheng Cheng and - Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and - Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, - journal={TPAMI}, - year={2019} -} -``` - -
- - - -
-DarkPose (CVPR'2020) - -```bibtex -@inproceedings{zhang2020distribution, - title={Distribution-aware coordinate representation for human pose estimation}, - author={Zhang, Feng and Zhu, Xiatian and Dai, Hanbin and Ye, Mao and Zhu, Ce}, - booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, - pages={7093--7102}, - year={2020} -} -``` - -
- - - -
-WFLW (CVPR'2018) - -```bibtex -@inproceedings{wu2018look, - title={Look at boundary: A boundary-aware face alignment algorithm}, - author={Wu, Wayne and Qian, Chen and Yang, Shuo and Wang, Quan and Cai, Yici and Zhou, Qiang}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={2129--2138}, - year={2018} -} -``` - -
- -Results on WFLW dataset - -The model is trained on WFLW train. - -| Arch | Input Size | NME*test* | NME*pose* | NME*illumination* | NME*occlusion* | NME*blur* | NME*makeup* | NME*expression* | ckpt | log | -| :--------- | :--------: | :------------------: | :------------------: | :--------------------------: | :-----------------------: | :------------------: | :--------------------: | :------------------------: | :--------: | :-------: | -| [pose_hrnetv2_w18_dark](/configs/face_2d_keypoint/topdown_heatmap/wflw/td-hm_hrnetv2-w18_dark-8xb64-60e_wflw-256x256.py) | 256x256 | 3.98 | 6.98 | 3.96 | 4.78 | 4.56 | 3.89 | 4.29 | [ckpt](https://download.openmmlab.com/mmpose/face/darkpose/hrnetv2_w18_wflw_256x256_dark-3f8e0c2c_20210125.pth) | [log](https://download.openmmlab.com/mmpose/face/darkpose/hrnetv2_w18_wflw_256x256_dark_20210125.log.json) | + + +
+HRNetv2 (TPAMI'2019) + +```bibtex +@article{WangSCJDZLMTWLX19, + title={Deep High-Resolution Representation Learning for Visual Recognition}, + author={Jingdong Wang and Ke Sun and Tianheng Cheng and + Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and + Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, + journal={TPAMI}, + year={2019} +} +``` + +
+ + + +
+DarkPose (CVPR'2020) + +```bibtex +@inproceedings{zhang2020distribution, + title={Distribution-aware coordinate representation for human pose estimation}, + author={Zhang, Feng and Zhu, Xiatian and Dai, Hanbin and Ye, Mao and Zhu, Ce}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={7093--7102}, + year={2020} +} +``` + +
+ + + +
+WFLW (CVPR'2018) + +```bibtex +@inproceedings{wu2018look, + title={Look at boundary: A boundary-aware face alignment algorithm}, + author={Wu, Wayne and Qian, Chen and Yang, Shuo and Wang, Quan and Cai, Yici and Zhou, Qiang}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={2129--2138}, + year={2018} +} +``` + +
+ +Results on WFLW dataset + +The model is trained on WFLW train. + +| Arch | Input Size | NME*test* | NME*pose* | NME*illumination* | NME*occlusion* | NME*blur* | NME*makeup* | NME*expression* | ckpt | log | +| :--------- | :--------: | :------------------: | :------------------: | :--------------------------: | :-----------------------: | :------------------: | :--------------------: | :------------------------: | :--------: | :-------: | +| [pose_hrnetv2_w18_dark](/configs/face_2d_keypoint/topdown_heatmap/wflw/td-hm_hrnetv2-w18_dark-8xb64-60e_wflw-256x256.py) | 256x256 | 3.98 | 6.98 | 3.96 | 4.78 | 4.56 | 3.89 | 4.29 | [ckpt](https://download.openmmlab.com/mmpose/face/darkpose/hrnetv2_w18_wflw_256x256_dark-3f8e0c2c_20210125.pth) | [log](https://download.openmmlab.com/mmpose/face/darkpose/hrnetv2_w18_wflw_256x256_dark_20210125.log.json) | diff --git a/configs/face_2d_keypoint/topdown_heatmap/wflw/hrnetv2_dark_wflw.yml b/configs/face_2d_keypoint/topdown_heatmap/wflw/hrnetv2_dark_wflw.yml index bbb82185cf..303be334c2 100644 --- a/configs/face_2d_keypoint/topdown_heatmap/wflw/hrnetv2_dark_wflw.yml +++ b/configs/face_2d_keypoint/topdown_heatmap/wflw/hrnetv2_dark_wflw.yml @@ -1,21 +1,21 @@ -Models: -- Config: configs/face_2d_keypoint/topdown_heatmap/wflw/td-hm_hrnetv2-w18_dark-8xb64-60e_wflw-256x256.py - In Collection: DarkPose - Metadata: - Architecture: - - HRNetv2 - - DarkPose - Training Data: WFLW - Name: td-hm_hrnetv2-w18_dark-8xb64-60e_wflw-256x256 - Results: - - Dataset: WFLW - Metrics: - NME blur: 4.56 - NME expression: 4.29 - NME illumination: 3.96 - NME makeup: 3.89 - NME occlusion: 4.78 - NME pose: 6.98 - NME test: 3.98 - Task: Face 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/face/darkpose/hrnetv2_w18_wflw_256x256_dark-3f8e0c2c_20210125.pth +Models: +- Config: configs/face_2d_keypoint/topdown_heatmap/wflw/td-hm_hrnetv2-w18_dark-8xb64-60e_wflw-256x256.py + In Collection: DarkPose + Metadata: + Architecture: + - HRNetv2 + - DarkPose + Training Data: WFLW + Name: td-hm_hrnetv2-w18_dark-8xb64-60e_wflw-256x256 + Results: + - Dataset: WFLW + Metrics: + NME blur: 4.56 + NME expression: 4.29 + NME illumination: 3.96 + NME makeup: 3.89 + NME occlusion: 4.78 + NME pose: 6.98 + NME test: 3.98 + Task: Face 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/face/darkpose/hrnetv2_w18_wflw_256x256_dark-3f8e0c2c_20210125.pth diff --git a/configs/face_2d_keypoint/topdown_heatmap/wflw/hrnetv2_wflw.md b/configs/face_2d_keypoint/topdown_heatmap/wflw/hrnetv2_wflw.md index c9b8eec066..121f993353 100644 --- a/configs/face_2d_keypoint/topdown_heatmap/wflw/hrnetv2_wflw.md +++ b/configs/face_2d_keypoint/topdown_heatmap/wflw/hrnetv2_wflw.md @@ -1,42 +1,42 @@ - - -
-HRNetv2 (TPAMI'2019) - -```bibtex -@article{WangSCJDZLMTWLX19, - title={Deep High-Resolution Representation Learning for Visual Recognition}, - author={Jingdong Wang and Ke Sun and Tianheng Cheng and - Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and - Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, - journal={TPAMI}, - year={2019} -} -``` - -
- - - -
-WFLW (CVPR'2018) - -```bibtex -@inproceedings{wu2018look, - title={Look at boundary: A boundary-aware face alignment algorithm}, - author={Wu, Wayne and Qian, Chen and Yang, Shuo and Wang, Quan and Cai, Yici and Zhou, Qiang}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={2129--2138}, - year={2018} -} -``` - -
- -Results on WFLW dataset - -The model is trained on WFLW train. - -| Arch | Input Size | NME*test* | NME*pose* | NME*illumination* | NME*occlusion* | NME*blur* | NME*makeup* | NME*expression* | ckpt | log | -| :--------- | :--------: | :------------------: | :------------------: | :--------------------------: | :-----------------------: | :------------------: | :--------------------: | :------------------------: | :--------: | :-------: | -| [pose_hrnetv2_w18](/configs/face_2d_keypoint/topdown_heatmap/wflw/td-hm_hrnetv2-w18_8xb64-60e_wflw-256x256.py) | 256x256 | 4.06 | 6.97 | 3.99 | 4.83 | 4.58 | 3.94 | 4.33 | [ckpt](https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_wflw_256x256-2bf032a6_20210125.pth) | [log](https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_wflw_256x256_20210125.log.json) | + + +
+HRNetv2 (TPAMI'2019) + +```bibtex +@article{WangSCJDZLMTWLX19, + title={Deep High-Resolution Representation Learning for Visual Recognition}, + author={Jingdong Wang and Ke Sun and Tianheng Cheng and + Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and + Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, + journal={TPAMI}, + year={2019} +} +``` + +
+ + + +
+WFLW (CVPR'2018) + +```bibtex +@inproceedings{wu2018look, + title={Look at boundary: A boundary-aware face alignment algorithm}, + author={Wu, Wayne and Qian, Chen and Yang, Shuo and Wang, Quan and Cai, Yici and Zhou, Qiang}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={2129--2138}, + year={2018} +} +``` + +
+ +Results on WFLW dataset + +The model is trained on WFLW train. + +| Arch | Input Size | NME*test* | NME*pose* | NME*illumination* | NME*occlusion* | NME*blur* | NME*makeup* | NME*expression* | ckpt | log | +| :--------- | :--------: | :------------------: | :------------------: | :--------------------------: | :-----------------------: | :------------------: | :--------------------: | :------------------------: | :--------: | :-------: | +| [pose_hrnetv2_w18](/configs/face_2d_keypoint/topdown_heatmap/wflw/td-hm_hrnetv2-w18_8xb64-60e_wflw-256x256.py) | 256x256 | 4.06 | 6.97 | 3.99 | 4.83 | 4.58 | 3.94 | 4.33 | [ckpt](https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_wflw_256x256-2bf032a6_20210125.pth) | [log](https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_wflw_256x256_20210125.log.json) | diff --git a/configs/face_2d_keypoint/topdown_heatmap/wflw/hrnetv2_wflw.yml b/configs/face_2d_keypoint/topdown_heatmap/wflw/hrnetv2_wflw.yml index 9124324f8b..2d188c3af7 100644 --- a/configs/face_2d_keypoint/topdown_heatmap/wflw/hrnetv2_wflw.yml +++ b/configs/face_2d_keypoint/topdown_heatmap/wflw/hrnetv2_wflw.yml @@ -1,20 +1,20 @@ -Models: -- Config: configs/face_2d_keypoint/topdown_heatmap/wflw/td-hm_hrnetv2-w18_8xb64-60e_wflw-256x256.py - In Collection: HRNetv2 - Metadata: - Architecture: - - HRNetv2 - Training Data: WFLW - Name: td-hm_hrnetv2-w18_8xb64-60e_wflw-256x256 - Results: - - Dataset: WFLW - Metrics: - NME blur: 4.58 - NME expression: 4.33 - NME illumination: 3.99 - NME makeup: 3.94 - NME occlusion: 4.83 - NME pose: 6.97 - NME test: 4.06 - Task: Face 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_wflw_256x256-2bf032a6_20210125.pth +Models: +- Config: configs/face_2d_keypoint/topdown_heatmap/wflw/td-hm_hrnetv2-w18_8xb64-60e_wflw-256x256.py + In Collection: HRNetv2 + Metadata: + Architecture: + - HRNetv2 + Training Data: WFLW + Name: td-hm_hrnetv2-w18_8xb64-60e_wflw-256x256 + Results: + - Dataset: WFLW + Metrics: + NME blur: 4.58 + NME expression: 4.33 + NME illumination: 3.99 + NME makeup: 3.94 + NME occlusion: 4.83 + NME pose: 6.97 + NME test: 4.06 + Task: Face 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_wflw_256x256-2bf032a6_20210125.pth diff --git a/configs/face_2d_keypoint/topdown_heatmap/wflw/td-hm_hrnetv2-w18_8xb64-60e_wflw-256x256.py b/configs/face_2d_keypoint/topdown_heatmap/wflw/td-hm_hrnetv2-w18_8xb64-60e_wflw-256x256.py index ae373c816a..507035c5a8 100644 --- a/configs/face_2d_keypoint/topdown_heatmap/wflw/td-hm_hrnetv2-w18_8xb64-60e_wflw-256x256.py +++ b/configs/face_2d_keypoint/topdown_heatmap/wflw/td-hm_hrnetv2-w18_8xb64-60e_wflw-256x256.py @@ -1,158 +1,158 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=60, val_interval=1) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=2e-3, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=60, - milestones=[40, 55], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='NME', rule='less', interval=1)) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(18, 36)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(18, 36, 72)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(18, 36, 72, 144), - multiscale_output=True), - upsample=dict(mode='bilinear', align_corners=False)), - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18'), - ), - neck=dict( - type='FeatureMapProcessor', - concat=True, - ), - head=dict( - type='HeatmapHead', - in_channels=270, - out_channels=98, - deconv_out_channels=None, - conv_out_channels=(270, ), - conv_kernel_sizes=(1, ), - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'WFLWDataset' -data_mode = 'topdown' -data_root = 'data/wflw/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict( - type='RandomBBoxTransform', - shift_prob=0, - rotate_factor=60, - scale_factor=(0.75, 1.25)), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/face_landmarks_wflw_train.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/face_landmarks_wflw_test.json', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='NME', - norm_mode='keypoint_distance', -) -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=60, val_interval=1) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=2e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=60, + milestones=[40, 55], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='NME', rule='less', interval=1)) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(18, 36)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(18, 36, 72)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(18, 36, 72, 144), + multiscale_output=True), + upsample=dict(mode='bilinear', align_corners=False)), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18'), + ), + neck=dict( + type='FeatureMapProcessor', + concat=True, + ), + head=dict( + type='HeatmapHead', + in_channels=270, + out_channels=98, + deconv_out_channels=None, + conv_out_channels=(270, ), + conv_kernel_sizes=(1, ), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'WFLWDataset' +data_mode = 'topdown' +data_root = 'data/wflw/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + shift_prob=0, + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/face_landmarks_wflw_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/face_landmarks_wflw_test.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='NME', + norm_mode='keypoint_distance', +) +test_evaluator = val_evaluator diff --git a/configs/face_2d_keypoint/topdown_heatmap/wflw/td-hm_hrnetv2-w18_awing-8xb64-60e_wflw-256x256.py b/configs/face_2d_keypoint/topdown_heatmap/wflw/td-hm_hrnetv2-w18_awing-8xb64-60e_wflw-256x256.py index ada24a97bb..f6885dabc4 100644 --- a/configs/face_2d_keypoint/topdown_heatmap/wflw/td-hm_hrnetv2-w18_awing-8xb64-60e_wflw-256x256.py +++ b/configs/face_2d_keypoint/topdown_heatmap/wflw/td-hm_hrnetv2-w18_awing-8xb64-60e_wflw-256x256.py @@ -1,158 +1,158 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=60, val_interval=1) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=2e-3, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=60, - milestones=[40, 55], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='NME', rule='less', interval=1)) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(18, 36)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(18, 36, 72)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(18, 36, 72, 144), - multiscale_output=True), - upsample=dict(mode='bilinear', align_corners=False)), - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18'), - ), - neck=dict( - type='FeatureMapProcessor', - concat=True, - ), - head=dict( - type='HeatmapHead', - in_channels=270, - out_channels=98, - deconv_out_channels=None, - conv_out_channels=(270, ), - conv_kernel_sizes=(1, ), - loss=dict(type='AdaptiveWingLoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'WFLWDataset' -data_mode = 'topdown' -data_root = 'data/wflw/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict( - type='RandomBBoxTransform', - shift_prob=0, - rotate_factor=60, - scale_factor=(0.75, 1.25)), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/face_landmarks_wflw_train.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/face_landmarks_wflw_test.json', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='NME', - norm_mode='keypoint_distance', -) -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=60, val_interval=1) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=2e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=60, + milestones=[40, 55], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='NME', rule='less', interval=1)) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(18, 36)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(18, 36, 72)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(18, 36, 72, 144), + multiscale_output=True), + upsample=dict(mode='bilinear', align_corners=False)), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18'), + ), + neck=dict( + type='FeatureMapProcessor', + concat=True, + ), + head=dict( + type='HeatmapHead', + in_channels=270, + out_channels=98, + deconv_out_channels=None, + conv_out_channels=(270, ), + conv_kernel_sizes=(1, ), + loss=dict(type='AdaptiveWingLoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'WFLWDataset' +data_mode = 'topdown' +data_root = 'data/wflw/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + shift_prob=0, + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/face_landmarks_wflw_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/face_landmarks_wflw_test.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='NME', + norm_mode='keypoint_distance', +) +test_evaluator = val_evaluator diff --git a/configs/face_2d_keypoint/topdown_heatmap/wflw/td-hm_hrnetv2-w18_dark-8xb64-60e_wflw-256x256.py b/configs/face_2d_keypoint/topdown_heatmap/wflw/td-hm_hrnetv2-w18_dark-8xb64-60e_wflw-256x256.py index 973a850f3f..e1a47c72ea 100644 --- a/configs/face_2d_keypoint/topdown_heatmap/wflw/td-hm_hrnetv2-w18_dark-8xb64-60e_wflw-256x256.py +++ b/configs/face_2d_keypoint/topdown_heatmap/wflw/td-hm_hrnetv2-w18_dark-8xb64-60e_wflw-256x256.py @@ -1,162 +1,162 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=60, val_interval=1) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=2e-3, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=60, - milestones=[40, 55], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='NME', rule='less', interval=1)) - -# codec settings -codec = dict( - type='MSRAHeatmap', - input_size=(256, 256), - heatmap_size=(64, 64), - sigma=2, - unbiased=True) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(18, 36)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(18, 36, 72)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(18, 36, 72, 144), - multiscale_output=True), - upsample=dict(mode='bilinear', align_corners=False)), - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18'), - ), - neck=dict( - type='FeatureMapProcessor', - concat=True, - ), - head=dict( - type='HeatmapHead', - in_channels=270, - out_channels=98, - deconv_out_channels=None, - conv_out_channels=(270, ), - conv_kernel_sizes=(1, ), - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'WFLWDataset' -data_mode = 'topdown' -data_root = 'data/wflw/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict( - type='RandomBBoxTransform', - shift_prob=0, - rotate_factor=60, - scale_factor=(0.75, 1.25)), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/face_landmarks_wflw_train.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/face_landmarks_wflw_test.json', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='NME', - norm_mode='keypoint_distance', -) -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=60, val_interval=1) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=2e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=60, + milestones=[40, 55], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='NME', rule='less', interval=1)) + +# codec settings +codec = dict( + type='MSRAHeatmap', + input_size=(256, 256), + heatmap_size=(64, 64), + sigma=2, + unbiased=True) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(18, 36)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(18, 36, 72)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(18, 36, 72, 144), + multiscale_output=True), + upsample=dict(mode='bilinear', align_corners=False)), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18'), + ), + neck=dict( + type='FeatureMapProcessor', + concat=True, + ), + head=dict( + type='HeatmapHead', + in_channels=270, + out_channels=98, + deconv_out_channels=None, + conv_out_channels=(270, ), + conv_kernel_sizes=(1, ), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'WFLWDataset' +data_mode = 'topdown' +data_root = 'data/wflw/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + shift_prob=0, + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/face_landmarks_wflw_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/face_landmarks_wflw_test.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='NME', + norm_mode='keypoint_distance', +) +test_evaluator = val_evaluator diff --git a/configs/face_2d_keypoint/topdown_regression/README.md b/configs/face_2d_keypoint/topdown_regression/README.md index 5d20cb9a31..c4b1cb4a87 100644 --- a/configs/face_2d_keypoint/topdown_regression/README.md +++ b/configs/face_2d_keypoint/topdown_regression/README.md @@ -1,19 +1,19 @@ -# Top-down regression-based pose estimation - -Top-down methods divide the task into two stages: object detection, followed by single-object pose estimation given object bounding boxes. At the 2nd stage, regression based methods directly regress the keypoint coordinates given the features extracted from the bounding box area, following the paradigm introduced in [Deeppose: Human pose estimation via deep neural networks](http://openaccess.thecvf.com/content_cvpr_2014/html/Toshev_DeepPose_Human_Pose_2014_CVPR_paper.html). - -
- -
- -## Results and Models - -### WFLW Dataset - -Result on WFLW test set - -| Model | Input Size | NME | ckpt | log | -| :-------------------------------------------------------------- | :--------: | :--: | :------------------------------------------------------------: | :-----------------------------------------------------------: | -| [ResNet-50](/configs/face_2d_keypoint/topdown_regression/wflw/td-reg_res50_8xb64-210e_wflw-256x256.py) | 256x256 | 4.88 | [ckpt](https://download.openmmlab.com/mmpose/face/deeppose/deeppose_res50_wflw_256x256-92d0ba7f_20210303.pth) | [log](https://download.openmmlab.com/mmpose/face/deeppose/deeppose_res50_wflw_256x256_20210303.log.json) | -| [ResNet-50+WingLoss](/configs/face_2d_keypoint/topdown_regression/wflw/td-reg_res50_wingloss_8xb64-210e_wflw-256x256.py) | 256x256 | 4.67 | [ckpt](https://download.openmmlab.com/mmpose/face/deeppose/deeppose_res50_wflw_256x256_wingloss-f82a5e53_20210303.pth) | [log](https://download.openmmlab.com/mmpose/face/deeppose/deeppose_res50_wflw_256x256_wingloss_20210303.log.json) | -| [ResNet-50+SoftWingLoss](/configs/face_2d_keypoint/topdown_regression/wflw/td-reg_res50_softwingloss_8xb64-210e_wflw-256x256.py) | 256x256 | 4.44 | [ckpt](https://download.openmmlab.com/mmpose/face/deeppose/deeppose_res50_wflw_256x256_softwingloss-4d34f22a_20211212.pth) | [log](https://download.openmmlab.com/mmpose/face/deeppose/deeppose_res50_wflw_256x256_softwingloss_20211212.log.json) | +# Top-down regression-based pose estimation + +Top-down methods divide the task into two stages: object detection, followed by single-object pose estimation given object bounding boxes. At the 2nd stage, regression based methods directly regress the keypoint coordinates given the features extracted from the bounding box area, following the paradigm introduced in [Deeppose: Human pose estimation via deep neural networks](http://openaccess.thecvf.com/content_cvpr_2014/html/Toshev_DeepPose_Human_Pose_2014_CVPR_paper.html). + +
+ +
+ +## Results and Models + +### WFLW Dataset + +Result on WFLW test set + +| Model | Input Size | NME | ckpt | log | +| :-------------------------------------------------------------- | :--------: | :--: | :------------------------------------------------------------: | :-----------------------------------------------------------: | +| [ResNet-50](/configs/face_2d_keypoint/topdown_regression/wflw/td-reg_res50_8xb64-210e_wflw-256x256.py) | 256x256 | 4.88 | [ckpt](https://download.openmmlab.com/mmpose/face/deeppose/deeppose_res50_wflw_256x256-92d0ba7f_20210303.pth) | [log](https://download.openmmlab.com/mmpose/face/deeppose/deeppose_res50_wflw_256x256_20210303.log.json) | +| [ResNet-50+WingLoss](/configs/face_2d_keypoint/topdown_regression/wflw/td-reg_res50_wingloss_8xb64-210e_wflw-256x256.py) | 256x256 | 4.67 | [ckpt](https://download.openmmlab.com/mmpose/face/deeppose/deeppose_res50_wflw_256x256_wingloss-f82a5e53_20210303.pth) | [log](https://download.openmmlab.com/mmpose/face/deeppose/deeppose_res50_wflw_256x256_wingloss_20210303.log.json) | +| [ResNet-50+SoftWingLoss](/configs/face_2d_keypoint/topdown_regression/wflw/td-reg_res50_softwingloss_8xb64-210e_wflw-256x256.py) | 256x256 | 4.44 | [ckpt](https://download.openmmlab.com/mmpose/face/deeppose/deeppose_res50_wflw_256x256_softwingloss-4d34f22a_20211212.pth) | [log](https://download.openmmlab.com/mmpose/face/deeppose/deeppose_res50_wflw_256x256_softwingloss_20211212.log.json) | diff --git a/configs/face_2d_keypoint/topdown_regression/wflw/resnet_softwingloss_wflw.md b/configs/face_2d_keypoint/topdown_regression/wflw/resnet_softwingloss_wflw.md index f1d9629d0a..f36b9392f8 100644 --- a/configs/face_2d_keypoint/topdown_regression/wflw/resnet_softwingloss_wflw.md +++ b/configs/face_2d_keypoint/topdown_regression/wflw/resnet_softwingloss_wflw.md @@ -1,75 +1,75 @@ - - -
-DeepPose (CVPR'2014) - -```bibtex -@inproceedings{toshev2014deeppose, - title={Deeppose: Human pose estimation via deep neural networks}, - author={Toshev, Alexander and Szegedy, Christian}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={1653--1660}, - year={2014} -} -``` - -
- - - -
-ResNet (CVPR'2016) - -```bibtex -@inproceedings{he2016deep, - title={Deep residual learning for image recognition}, - author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={770--778}, - year={2016} -} -``` - -
- - - -
-SoftWingloss (TIP'2021) - -```bibtex -@article{lin2021structure, - title={Structure-Coherent Deep Feature Learning for Robust Face Alignment}, - author={Lin, Chunze and Zhu, Beier and Wang, Quan and Liao, Renjie and Qian, Chen and Lu, Jiwen and Zhou, Jie}, - journal={IEEE Transactions on Image Processing}, - year={2021}, - publisher={IEEE} -} -``` - -
- - - -
-WFLW (CVPR'2018) - -```bibtex -@inproceedings{wu2018look, - title={Look at boundary: A boundary-aware face alignment algorithm}, - author={Wu, Wayne and Qian, Chen and Yang, Shuo and Wang, Quan and Cai, Yici and Zhou, Qiang}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={2129--2138}, - year={2018} -} -``` - -
- -Results on WFLW dataset - -The model is trained on WFLW train set. - -| Model | Input Size | NME | ckpt | log | -| :-------------------------------------------------------------- | :--------: | :--: | :------------------------------------------------------------: | :-----------------------------------------------------------: | -| [ResNet-50+SoftWingLoss](/configs/face_2d_keypoint/topdown_regression/wflw/td-reg_res50_softwingloss_8xb64-210e_wflw-256x256.py) | 256x256 | 4.44 | [ckpt](https://download.openmmlab.com/mmpose/face/deeppose/deeppose_res50_wflw_256x256_softwingloss-4d34f22a_20211212.pth) | [log](https://download.openmmlab.com/mmpose/face/deeppose/deeppose_res50_wflw_256x256_softwingloss_20211212.log.json) | + + +
+DeepPose (CVPR'2014) + +```bibtex +@inproceedings{toshev2014deeppose, + title={Deeppose: Human pose estimation via deep neural networks}, + author={Toshev, Alexander and Szegedy, Christian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={1653--1660}, + year={2014} +} +``` + +
+ + + +
+ResNet (CVPR'2016) + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +
+ + + +
+SoftWingloss (TIP'2021) + +```bibtex +@article{lin2021structure, + title={Structure-Coherent Deep Feature Learning for Robust Face Alignment}, + author={Lin, Chunze and Zhu, Beier and Wang, Quan and Liao, Renjie and Qian, Chen and Lu, Jiwen and Zhou, Jie}, + journal={IEEE Transactions on Image Processing}, + year={2021}, + publisher={IEEE} +} +``` + +
+ + + +
+WFLW (CVPR'2018) + +```bibtex +@inproceedings{wu2018look, + title={Look at boundary: A boundary-aware face alignment algorithm}, + author={Wu, Wayne and Qian, Chen and Yang, Shuo and Wang, Quan and Cai, Yici and Zhou, Qiang}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={2129--2138}, + year={2018} +} +``` + +
+ +Results on WFLW dataset + +The model is trained on WFLW train set. + +| Model | Input Size | NME | ckpt | log | +| :-------------------------------------------------------------- | :--------: | :--: | :------------------------------------------------------------: | :-----------------------------------------------------------: | +| [ResNet-50+SoftWingLoss](/configs/face_2d_keypoint/topdown_regression/wflw/td-reg_res50_softwingloss_8xb64-210e_wflw-256x256.py) | 256x256 | 4.44 | [ckpt](https://download.openmmlab.com/mmpose/face/deeppose/deeppose_res50_wflw_256x256_softwingloss-4d34f22a_20211212.pth) | [log](https://download.openmmlab.com/mmpose/face/deeppose/deeppose_res50_wflw_256x256_softwingloss_20211212.log.json) | diff --git a/configs/face_2d_keypoint/topdown_regression/wflw/resnet_softwingloss_wflw.yml b/configs/face_2d_keypoint/topdown_regression/wflw/resnet_softwingloss_wflw.yml index 7c65215ccc..72338458f7 100644 --- a/configs/face_2d_keypoint/topdown_regression/wflw/resnet_softwingloss_wflw.yml +++ b/configs/face_2d_keypoint/topdown_regression/wflw/resnet_softwingloss_wflw.yml @@ -1,16 +1,16 @@ -Models: -- Config: configs/face_2d_keypoint/topdown_regression/wflw/td-reg_res50_softwingloss_8xb64-210e_wflw-256x256.py - In Collection: ResNet - Metadata: - Architecture: - - DeepPose - - ResNet - - SoftWingloss - Training Data: WFLW - Name: td-reg_res50_softwingloss_8xb64-210e_wflw-256x256 - Results: - - Dataset: WFLW - Metrics: - NME: 4.44 - Task: Face 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/face/deeppose/deeppose_res50_wflw_256x256_softwingloss-4d34f22a_20211212.pth +Models: +- Config: configs/face_2d_keypoint/topdown_regression/wflw/td-reg_res50_softwingloss_8xb64-210e_wflw-256x256.py + In Collection: ResNet + Metadata: + Architecture: + - DeepPose + - ResNet + - SoftWingloss + Training Data: WFLW + Name: td-reg_res50_softwingloss_8xb64-210e_wflw-256x256 + Results: + - Dataset: WFLW + Metrics: + NME: 4.44 + Task: Face 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/face/deeppose/deeppose_res50_wflw_256x256_softwingloss-4d34f22a_20211212.pth diff --git a/configs/face_2d_keypoint/topdown_regression/wflw/resnet_wflw.md b/configs/face_2d_keypoint/topdown_regression/wflw/resnet_wflw.md index 1ec3e76dba..f60568861a 100644 --- a/configs/face_2d_keypoint/topdown_regression/wflw/resnet_wflw.md +++ b/configs/face_2d_keypoint/topdown_regression/wflw/resnet_wflw.md @@ -1,58 +1,58 @@ - - -
-DeepPose (CVPR'2014) - -```bibtex -@inproceedings{toshev2014deeppose, - title={Deeppose: Human pose estimation via deep neural networks}, - author={Toshev, Alexander and Szegedy, Christian}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={1653--1660}, - year={2014} -} -``` - -
- - - -
-ResNet (CVPR'2016) - -```bibtex -@inproceedings{he2016deep, - title={Deep residual learning for image recognition}, - author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={770--778}, - year={2016} -} -``` - -
- - - -
-WFLW (CVPR'2018) - -```bibtex -@inproceedings{wu2018look, - title={Look at boundary: A boundary-aware face alignment algorithm}, - author={Wu, Wayne and Qian, Chen and Yang, Shuo and Wang, Quan and Cai, Yici and Zhou, Qiang}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={2129--2138}, - year={2018} -} -``` - -
- -Results on WFLW dataset - -The model is trained on WFLW train set. - -| Model | Input Size | NME | ckpt | log | -| :-------------------------------------------------------------- | :--------: | :--: | :------------------------------------------------------------: | :-----------------------------------------------------------: | -| [ResNet-50](/configs/face_2d_keypoint/topdown_regression/wflw/td-reg_res50_8xb64-210e_wflw-256x256.py) | 256x256 | 4.88 | [ckpt](https://download.openmmlab.com/mmpose/face/deeppose/deeppose_res50_wflw_256x256-92d0ba7f_20210303.pth) | [log](https://download.openmmlab.com/mmpose/face/deeppose/deeppose_res50_wflw_256x256_20210303.log.json) | + + +
+DeepPose (CVPR'2014) + +```bibtex +@inproceedings{toshev2014deeppose, + title={Deeppose: Human pose estimation via deep neural networks}, + author={Toshev, Alexander and Szegedy, Christian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={1653--1660}, + year={2014} +} +``` + +
+ + + +
+ResNet (CVPR'2016) + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +
+ + + +
+WFLW (CVPR'2018) + +```bibtex +@inproceedings{wu2018look, + title={Look at boundary: A boundary-aware face alignment algorithm}, + author={Wu, Wayne and Qian, Chen and Yang, Shuo and Wang, Quan and Cai, Yici and Zhou, Qiang}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={2129--2138}, + year={2018} +} +``` + +
+ +Results on WFLW dataset + +The model is trained on WFLW train set. + +| Model | Input Size | NME | ckpt | log | +| :-------------------------------------------------------------- | :--------: | :--: | :------------------------------------------------------------: | :-----------------------------------------------------------: | +| [ResNet-50](/configs/face_2d_keypoint/topdown_regression/wflw/td-reg_res50_8xb64-210e_wflw-256x256.py) | 256x256 | 4.88 | [ckpt](https://download.openmmlab.com/mmpose/face/deeppose/deeppose_res50_wflw_256x256-92d0ba7f_20210303.pth) | [log](https://download.openmmlab.com/mmpose/face/deeppose/deeppose_res50_wflw_256x256_20210303.log.json) | diff --git a/configs/face_2d_keypoint/topdown_regression/wflw/resnet_wflw.yml b/configs/face_2d_keypoint/topdown_regression/wflw/resnet_wflw.yml index 81c7b79a7e..8681de7201 100644 --- a/configs/face_2d_keypoint/topdown_regression/wflw/resnet_wflw.yml +++ b/configs/face_2d_keypoint/topdown_regression/wflw/resnet_wflw.yml @@ -1,15 +1,15 @@ -Models: -- Config: configs/face_2d_keypoint/topdown_regression/wflw/td-reg_res50_8xb64-210e_wflw-256x256.py - In Collection: ResNet - Metadata: - Architecture: - - DeepPose - - ResNet - Training Data: WFLW - Name: td-reg_res50_8x64e-210e_wflw-256x256 - Results: - - Dataset: WFLW - Metrics: - NME: 4.88 - Task: Face 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/face/deeppose/deeppose_res50_wflw_256x256-92d0ba7f_20210303.pth +Models: +- Config: configs/face_2d_keypoint/topdown_regression/wflw/td-reg_res50_8xb64-210e_wflw-256x256.py + In Collection: ResNet + Metadata: + Architecture: + - DeepPose + - ResNet + Training Data: WFLW + Name: td-reg_res50_8x64e-210e_wflw-256x256 + Results: + - Dataset: WFLW + Metrics: + NME: 4.88 + Task: Face 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/face/deeppose/deeppose_res50_wflw_256x256-92d0ba7f_20210303.pth diff --git a/configs/face_2d_keypoint/topdown_regression/wflw/resnet_wingloss_wflw.md b/configs/face_2d_keypoint/topdown_regression/wflw/resnet_wingloss_wflw.md index 51477143d1..5dc9adcd97 100644 --- a/configs/face_2d_keypoint/topdown_regression/wflw/resnet_wingloss_wflw.md +++ b/configs/face_2d_keypoint/topdown_regression/wflw/resnet_wingloss_wflw.md @@ -1,76 +1,76 @@ - - -
-DeepPose (CVPR'2014) - -```bibtex -@inproceedings{toshev2014deeppose, - title={Deeppose: Human pose estimation via deep neural networks}, - author={Toshev, Alexander and Szegedy, Christian}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={1653--1660}, - year={2014} -} -``` - -
- - - -
-ResNet (CVPR'2016) - -```bibtex -@inproceedings{he2016deep, - title={Deep residual learning for image recognition}, - author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={770--778}, - year={2016} -} -``` - -
- - - -
-Wingloss (CVPR'2018) - -```bibtex -@inproceedings{feng2018wing, - title={Wing Loss for Robust Facial Landmark Localisation with Convolutional Neural Networks}, - author={Feng, Zhen-Hua and Kittler, Josef and Awais, Muhammad and Huber, Patrik and Wu, Xiao-Jun}, - booktitle={Computer Vision and Pattern Recognition (CVPR), 2018 IEEE Conference on}, - year={2018}, - pages ={2235-2245}, - organization={IEEE} -} -``` - -
- - - -
-WFLW (CVPR'2018) - -```bibtex -@inproceedings{wu2018look, - title={Look at boundary: A boundary-aware face alignment algorithm}, - author={Wu, Wayne and Qian, Chen and Yang, Shuo and Wang, Quan and Cai, Yici and Zhou, Qiang}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={2129--2138}, - year={2018} -} -``` - -
- -Results on WFLW dataset - -The model is trained on WFLW train set. - -| Model | Input Size | NME | ckpt | log | -| :-------------------------------------------------------------- | :--------: | :--: | :------------------------------------------------------------: | :-----------------------------------------------------------: | -| [ResNet-50+WingLoss](/configs/face_2d_keypoint/topdown_regression/wflw/td-reg_res50_wingloss_8xb64-210e_wflw-256x256.py) | 256x256 | 4.67 | [ckpt](https://download.openmmlab.com/mmpose/face/deeppose/deeppose_res50_wflw_256x256_wingloss-f82a5e53_20210303.pth) | [log](https://download.openmmlab.com/mmpose/face/deeppose/deeppose_res50_wflw_256x256_wingloss_20210303.log.json) | + + +
+DeepPose (CVPR'2014) + +```bibtex +@inproceedings{toshev2014deeppose, + title={Deeppose: Human pose estimation via deep neural networks}, + author={Toshev, Alexander and Szegedy, Christian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={1653--1660}, + year={2014} +} +``` + +
+ + + +
+ResNet (CVPR'2016) + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +
+ + + +
+Wingloss (CVPR'2018) + +```bibtex +@inproceedings{feng2018wing, + title={Wing Loss for Robust Facial Landmark Localisation with Convolutional Neural Networks}, + author={Feng, Zhen-Hua and Kittler, Josef and Awais, Muhammad and Huber, Patrik and Wu, Xiao-Jun}, + booktitle={Computer Vision and Pattern Recognition (CVPR), 2018 IEEE Conference on}, + year={2018}, + pages ={2235-2245}, + organization={IEEE} +} +``` + +
+ + + +
+WFLW (CVPR'2018) + +```bibtex +@inproceedings{wu2018look, + title={Look at boundary: A boundary-aware face alignment algorithm}, + author={Wu, Wayne and Qian, Chen and Yang, Shuo and Wang, Quan and Cai, Yici and Zhou, Qiang}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={2129--2138}, + year={2018} +} +``` + +
+ +Results on WFLW dataset + +The model is trained on WFLW train set. + +| Model | Input Size | NME | ckpt | log | +| :-------------------------------------------------------------- | :--------: | :--: | :------------------------------------------------------------: | :-----------------------------------------------------------: | +| [ResNet-50+WingLoss](/configs/face_2d_keypoint/topdown_regression/wflw/td-reg_res50_wingloss_8xb64-210e_wflw-256x256.py) | 256x256 | 4.67 | [ckpt](https://download.openmmlab.com/mmpose/face/deeppose/deeppose_res50_wflw_256x256_wingloss-f82a5e53_20210303.pth) | [log](https://download.openmmlab.com/mmpose/face/deeppose/deeppose_res50_wflw_256x256_wingloss_20210303.log.json) | diff --git a/configs/face_2d_keypoint/topdown_regression/wflw/resnet_wingloss_wflw.yml b/configs/face_2d_keypoint/topdown_regression/wflw/resnet_wingloss_wflw.yml index 49b409121a..5f1d83e200 100644 --- a/configs/face_2d_keypoint/topdown_regression/wflw/resnet_wingloss_wflw.yml +++ b/configs/face_2d_keypoint/topdown_regression/wflw/resnet_wingloss_wflw.yml @@ -1,16 +1,16 @@ -Models: -- Config: configs/face_2d_keypoint/topdown_regression/wflw/td-reg_res50_wingloss_8xb64-210e_wflw-256x256.py - In Collection: ResNet - Metadata: - Architecture: - - DeepPose - - ResNet - - WingLoss - Training Data: WFLW - Name: td-reg_res50_wingloss_8xb64-210e_wflw-256x256 - Results: - - Dataset: WFLW - Metrics: - NME: 4.67 - Task: Face 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/face/deeppose/deeppose_res50_wflw_256x256_wingloss-f82a5e53_20210303.pth +Models: +- Config: configs/face_2d_keypoint/topdown_regression/wflw/td-reg_res50_wingloss_8xb64-210e_wflw-256x256.py + In Collection: ResNet + Metadata: + Architecture: + - DeepPose + - ResNet + - WingLoss + Training Data: WFLW + Name: td-reg_res50_wingloss_8xb64-210e_wflw-256x256 + Results: + - Dataset: WFLW + Metrics: + NME: 4.67 + Task: Face 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/face/deeppose/deeppose_res50_wflw_256x256_wingloss-f82a5e53_20210303.pth diff --git a/configs/face_2d_keypoint/topdown_regression/wflw/td-reg_res50_8xb64-210e_wflw-256x256.py b/configs/face_2d_keypoint/topdown_regression/wflw/td-reg_res50_8xb64-210e_wflw-256x256.py index 2742f497b8..dd9ade768b 100644 --- a/configs/face_2d_keypoint/topdown_regression/wflw/td-reg_res50_8xb64-210e_wflw-256x256.py +++ b/configs/face_2d_keypoint/topdown_regression/wflw/td-reg_res50_8xb64-210e_wflw-256x256.py @@ -1,122 +1,122 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# codec settings -codec = dict(type='RegressionLabel', input_size=(256, 256)) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=50, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), - ), - neck=dict(type='GlobalAveragePooling'), - head=dict( - type='RegressionHead', - in_channels=2048, - num_joints=98, - loss=dict(type='SmoothL1Loss', use_target_weight=True), - decoder=codec), - train_cfg=dict(), - test_cfg=dict( - flip_test=True, - shift_coords=True, - )) - -# base dataset settings -dataset_type = 'WFLWDataset' -data_mode = 'topdown' -data_root = 'data/wflw/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict( - type='RandomBBoxTransform', - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# dataloaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/face_landmarks_wflw_train.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/face_landmarks_wflw_test.json', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict(checkpoint=dict(save_best='NME', rule='less')) - -# evaluators -val_evaluator = dict( - type='NME', - norm_mode='keypoint_distance', -) -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict(type='RegressionLabel', input_size=(256, 256)) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='RegressionHead', + in_channels=2048, + num_joints=98, + loss=dict(type='SmoothL1Loss', use_target_weight=True), + decoder=codec), + train_cfg=dict(), + test_cfg=dict( + flip_test=True, + shift_coords=True, + )) + +# base dataset settings +dataset_type = 'WFLWDataset' +data_mode = 'topdown' +data_root = 'data/wflw/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# dataloaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/face_landmarks_wflw_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/face_landmarks_wflw_test.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict(checkpoint=dict(save_best='NME', rule='less')) + +# evaluators +val_evaluator = dict( + type='NME', + norm_mode='keypoint_distance', +) +test_evaluator = val_evaluator diff --git a/configs/face_2d_keypoint/topdown_regression/wflw/td-reg_res50_softwingloss_8xb64-210e_wflw-256x256.py b/configs/face_2d_keypoint/topdown_regression/wflw/td-reg_res50_softwingloss_8xb64-210e_wflw-256x256.py index eb4199073d..beae1bf487 100644 --- a/configs/face_2d_keypoint/topdown_regression/wflw/td-reg_res50_softwingloss_8xb64-210e_wflw-256x256.py +++ b/configs/face_2d_keypoint/topdown_regression/wflw/td-reg_res50_softwingloss_8xb64-210e_wflw-256x256.py @@ -1,122 +1,122 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# codec settings -codec = dict(type='RegressionLabel', input_size=(256, 256)) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=50, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), - ), - neck=dict(type='GlobalAveragePooling'), - head=dict( - type='RegressionHead', - in_channels=2048, - num_joints=98, - loss=dict(type='SoftWingLoss', use_target_weight=True), - decoder=codec), - train_cfg=dict(), - test_cfg=dict( - flip_test=True, - shift_coords=True, - )) - -# base dataset settings -dataset_type = 'WFLWDataset' -data_mode = 'topdown' -data_root = 'data/wflw/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict( - type='RandomBBoxTransform', - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# dataloaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/face_landmarks_wflw_train.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/face_landmarks_wflw_test.json', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict(checkpoint=dict(save_best='NME', rule='less')) - -# evaluators -val_evaluator = dict( - type='NME', - norm_mode='keypoint_distance', -) -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict(type='RegressionLabel', input_size=(256, 256)) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='RegressionHead', + in_channels=2048, + num_joints=98, + loss=dict(type='SoftWingLoss', use_target_weight=True), + decoder=codec), + train_cfg=dict(), + test_cfg=dict( + flip_test=True, + shift_coords=True, + )) + +# base dataset settings +dataset_type = 'WFLWDataset' +data_mode = 'topdown' +data_root = 'data/wflw/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# dataloaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/face_landmarks_wflw_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/face_landmarks_wflw_test.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict(checkpoint=dict(save_best='NME', rule='less')) + +# evaluators +val_evaluator = dict( + type='NME', + norm_mode='keypoint_distance', +) +test_evaluator = val_evaluator diff --git a/configs/face_2d_keypoint/topdown_regression/wflw/td-reg_res50_wingloss_8xb64-210e_wflw-256x256.py b/configs/face_2d_keypoint/topdown_regression/wflw/td-reg_res50_wingloss_8xb64-210e_wflw-256x256.py index ab519cd401..2f625e6859 100644 --- a/configs/face_2d_keypoint/topdown_regression/wflw/td-reg_res50_wingloss_8xb64-210e_wflw-256x256.py +++ b/configs/face_2d_keypoint/topdown_regression/wflw/td-reg_res50_wingloss_8xb64-210e_wflw-256x256.py @@ -1,122 +1,122 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# codec settings -codec = dict(type='RegressionLabel', input_size=(256, 256)) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=50, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), - ), - neck=dict(type='GlobalAveragePooling'), - head=dict( - type='RegressionHead', - in_channels=2048, - num_joints=98, - loss=dict(type='WingLoss', use_target_weight=True), - decoder=codec), - train_cfg=dict(), - test_cfg=dict( - flip_test=True, - shift_coords=True, - )) - -# base dataset settings -dataset_type = 'WFLWDataset' -data_mode = 'topdown' -data_root = 'data/wflw/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict( - type='RandomBBoxTransform', - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# dataloaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/face_landmarks_wflw_train.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/face_landmarks_wflw_test.json', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict(checkpoint=dict(save_best='NME', rule='less')) - -# evaluators -val_evaluator = dict( - type='NME', - norm_mode='keypoint_distance', -) -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict(type='RegressionLabel', input_size=(256, 256)) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='RegressionHead', + in_channels=2048, + num_joints=98, + loss=dict(type='WingLoss', use_target_weight=True), + decoder=codec), + train_cfg=dict(), + test_cfg=dict( + flip_test=True, + shift_coords=True, + )) + +# base dataset settings +dataset_type = 'WFLWDataset' +data_mode = 'topdown' +data_root = 'data/wflw/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# dataloaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/face_landmarks_wflw_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/face_landmarks_wflw_test.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict(checkpoint=dict(save_best='NME', rule='less')) + +# evaluators +val_evaluator = dict( + type='NME', + norm_mode='keypoint_distance', +) +test_evaluator = val_evaluator diff --git a/configs/fashion_2d_keypoint/README.md b/configs/fashion_2d_keypoint/README.md index e7d761067a..f4ec40fff4 100644 --- a/configs/fashion_2d_keypoint/README.md +++ b/configs/fashion_2d_keypoint/README.md @@ -1,7 +1,7 @@ -# 2D Fashion Landmark Detection - -2D fashion landmark detection (also referred to as fashion alignment) aims to detect the key-point located at the functional region of clothes, for example the neckline and the cuff. - -## Data preparation - -Please follow [DATA Preparation](/docs/en/dataset_zoo/2d_fashion_landmark.md) to prepare data. +# 2D Fashion Landmark Detection + +2D fashion landmark detection (also referred to as fashion alignment) aims to detect the key-point located at the functional region of clothes, for example the neckline and the cuff. + +## Data preparation + +Please follow [DATA Preparation](/docs/en/dataset_zoo/2d_fashion_landmark.md) to prepare data. diff --git a/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/res50_deepfashion2.md b/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/res50_deepfashion2.md index 1dcfd59313..c19eced188 100644 --- a/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/res50_deepfashion2.md +++ b/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/res50_deepfashion2.md @@ -1,67 +1,67 @@ - - -
-SimpleBaseline2D (ECCV'2018) - -```bibtex -@inproceedings{xiao2018simple, - title={Simple baselines for human pose estimation and tracking}, - author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, - booktitle={Proceedings of the European conference on computer vision (ECCV)}, - pages={466--481}, - year={2018} -} -``` - -
- - - -
-ResNet (CVPR'2016) - -```bibtex -@inproceedings{he2016deep, - title={Deep residual learning for image recognition}, - author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={770--778}, - year={2016} -} -``` - -
- - - -
-DeepFashion2 (CVPR'2019) - -```bibtex -@article{DeepFashion2, - author = {Yuying Ge and Ruimao Zhang and Lingyun Wu and Xiaogang Wang and Xiaoou Tang and Ping Luo}, - title={A Versatile Benchmark for Detection, Pose Estimation, Segmentation and Re-Identification of Clothing Images}, - journal={CVPR}, - year={2019} -} -``` - -
- -Results on DeepFashion2 val set - -| Set | Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | -| :-------------------- | :-------------------------------------------------: | :--------: | :-----: | :---: | :--: | :-------------------------------------------------: | :-------------------------------------------------: | -| short_sleeved_shirt | [pose_resnet_50](/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_6xb64-210e_deepfasion2-short-sleeved-shirt-256x192.py) | 256x192 | 0.988 | 0.703 | 10.2 | [ckpt](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_short_sleeved_shirt_256x192-21e1c5da_20221208.pth) | [log](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_short_sleeved_shirt_256x192_20221208.log.json) | -| long_sleeved_shirt | [pose_resnet_50](/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_8xb64-210e_deepfasion2-long-sleeved-shirt-256x192.py) | 256x192 | 0.973 | 0.587 | 16.6 | [ckpt](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_long_sleeved_shirt_256x192-8679e7e3_20221208.pth) | [log](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_long_sleeved_shirt_256x192_20221208.log.json) | -| short_sleeved_outwear | [pose_resnet_50](/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_8xb64-210e_deepfasion2-short-sleeved-outwear-256x192.py) | 256x192 | 0.966 | 0.408 | 24.0 | [ckpt](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_short_sleeved_outwear_256x192-a04c1298_20221208.pth) | [log](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_short_sleeved_outwear_256x192_20221208.log.json) | -| long_sleeved_outwear | [pose_resnet_50](/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_8xb64-210e_deepfasion2-long-sleeved-outwear-256x192.py) | 256x192 | 0.987 | 0.517 | 18.1 | [ckpt](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_long_sleeved_outwear_256x192-31fbaecf_20221208.pth) | [log](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_long_sleeved_outwear_256x192_20221208.log.json) | -| vest | [pose_resnet_50](/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_4xb64-210e_deepfasion2-vest-256x192.py) | 256x192 | 0.981 | 0.643 | 12.7 | [ckpt](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_vest_256x192-4c48d05c_20221208.pth) | [log](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_vest_256x192_20221208.log.json) | -| sling | [pose_resnet_50](/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_4xb64-210e_deepfasion2-sling-256x192.py) | 256x192 | 0.940 | 0.557 | 21.6 | [ckpt](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_sling_256x192-ebb2b736_20221208.pth) | [log](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_sling_256x192_20221208.log.json) | -| shorts | [pose_resnet_50](/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_3xb64-210e_deepfasion2-shorts-256x192.py) | 256x192 | 0.975 | 0.682 | 12.4 | [ckpt](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_shorts_256x192-9ab23592_20221208.pth) | [log](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_shorts_256x192_20221208.log.json) | -| trousers | [pose_resnet_50](/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_2xb64-210e_deepfasion2-trousers-256x192.py) | 256x192 | 0.973 | 0.625 | 14.8 | [ckpt](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_trousers_256x192-3e632257_20221208.pth) | [log](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_trousers_256x192_20221208.log.json) | -| skirt | [pose_resnet_50](/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_1xb64-210e_deepfasion2-skirt-256x192.py) | 256x192 | 0.952 | 0.653 | 16.6 | [ckpt](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_skirt_256x192-09573469_20221208.pth) | [log](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_skirt_256x192_20221208.log.json) | -| short_sleeved_dress | [pose_resnet_50](/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_4xb64-210e_deepfasion2-short-sleeved-dress-256x192.py) | 256x192 | 0.980 | 0.603 | 15.6 | [ckpt](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_short_sleeved_dress_256x192-1345b07a_20221208.pth) | [log](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_short_sleeved_dress_256x192_20221208.log.json) | -| long_sleeved_dress | [pose_resnet_50](/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_1xb64-210e_deepfasion2-long-sleeved-dress-256x192.py) | 256x192 | 0.976 | 0.518 | 20.1 | [ckpt](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_long_sleeved_dress_256x192-87bac74e_20221208.pth) | [log](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_long_sleeved_dress_256x192_20221208.log.json) | -| vest_dress | [pose_resnet_50](/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_1xb64-210e_deepfasion2-vest-dress-256x192.py) | 256x192 | 0.980 | 0.600 | 16.0 | [ckpt](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_vest_dress_256x192-fb3fbd6f_20221208.pth) | [log](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_vest_dress_256x192_20221208.log.json) | -| sling_dress | [pose_resnet_50](/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_4xb64-210e_deepfasion2-sling-dress-256x192.py) | 256x192 | 0.967 | 0.544 | 19.5 | [ckpt](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_sling_dress_256x192-8ebae0eb_20221208.pth) | [log](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_sling_dress_256x192_20221208.log.json) | + + +
+SimpleBaseline2D (ECCV'2018) + +```bibtex +@inproceedings{xiao2018simple, + title={Simple baselines for human pose estimation and tracking}, + author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, + booktitle={Proceedings of the European conference on computer vision (ECCV)}, + pages={466--481}, + year={2018} +} +``` + +
+ + + +
+ResNet (CVPR'2016) + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +
+ + + +
+DeepFashion2 (CVPR'2019) + +```bibtex +@article{DeepFashion2, + author = {Yuying Ge and Ruimao Zhang and Lingyun Wu and Xiaogang Wang and Xiaoou Tang and Ping Luo}, + title={A Versatile Benchmark for Detection, Pose Estimation, Segmentation and Re-Identification of Clothing Images}, + journal={CVPR}, + year={2019} +} +``` + +
+ +Results on DeepFashion2 val set + +| Set | Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | +| :-------------------- | :-------------------------------------------------: | :--------: | :-----: | :---: | :--: | :-------------------------------------------------: | :-------------------------------------------------: | +| short_sleeved_shirt | [pose_resnet_50](/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_6xb64-210e_deepfasion2-short-sleeved-shirt-256x192.py) | 256x192 | 0.988 | 0.703 | 10.2 | [ckpt](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_short_sleeved_shirt_256x192-21e1c5da_20221208.pth) | [log](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_short_sleeved_shirt_256x192_20221208.log.json) | +| long_sleeved_shirt | [pose_resnet_50](/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_8xb64-210e_deepfasion2-long-sleeved-shirt-256x192.py) | 256x192 | 0.973 | 0.587 | 16.6 | [ckpt](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_long_sleeved_shirt_256x192-8679e7e3_20221208.pth) | [log](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_long_sleeved_shirt_256x192_20221208.log.json) | +| short_sleeved_outwear | [pose_resnet_50](/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_8xb64-210e_deepfasion2-short-sleeved-outwear-256x192.py) | 256x192 | 0.966 | 0.408 | 24.0 | [ckpt](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_short_sleeved_outwear_256x192-a04c1298_20221208.pth) | [log](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_short_sleeved_outwear_256x192_20221208.log.json) | +| long_sleeved_outwear | [pose_resnet_50](/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_8xb64-210e_deepfasion2-long-sleeved-outwear-256x192.py) | 256x192 | 0.987 | 0.517 | 18.1 | [ckpt](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_long_sleeved_outwear_256x192-31fbaecf_20221208.pth) | [log](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_long_sleeved_outwear_256x192_20221208.log.json) | +| vest | [pose_resnet_50](/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_4xb64-210e_deepfasion2-vest-256x192.py) | 256x192 | 0.981 | 0.643 | 12.7 | [ckpt](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_vest_256x192-4c48d05c_20221208.pth) | [log](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_vest_256x192_20221208.log.json) | +| sling | [pose_resnet_50](/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_4xb64-210e_deepfasion2-sling-256x192.py) | 256x192 | 0.940 | 0.557 | 21.6 | [ckpt](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_sling_256x192-ebb2b736_20221208.pth) | [log](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_sling_256x192_20221208.log.json) | +| shorts | [pose_resnet_50](/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_3xb64-210e_deepfasion2-shorts-256x192.py) | 256x192 | 0.975 | 0.682 | 12.4 | [ckpt](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_shorts_256x192-9ab23592_20221208.pth) | [log](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_shorts_256x192_20221208.log.json) | +| trousers | [pose_resnet_50](/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_2xb64-210e_deepfasion2-trousers-256x192.py) | 256x192 | 0.973 | 0.625 | 14.8 | [ckpt](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_trousers_256x192-3e632257_20221208.pth) | [log](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_trousers_256x192_20221208.log.json) | +| skirt | [pose_resnet_50](/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_1xb64-210e_deepfasion2-skirt-256x192.py) | 256x192 | 0.952 | 0.653 | 16.6 | [ckpt](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_skirt_256x192-09573469_20221208.pth) | [log](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_skirt_256x192_20221208.log.json) | +| short_sleeved_dress | [pose_resnet_50](/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_4xb64-210e_deepfasion2-short-sleeved-dress-256x192.py) | 256x192 | 0.980 | 0.603 | 15.6 | [ckpt](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_short_sleeved_dress_256x192-1345b07a_20221208.pth) | [log](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_short_sleeved_dress_256x192_20221208.log.json) | +| long_sleeved_dress | [pose_resnet_50](/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_1xb64-210e_deepfasion2-long-sleeved-dress-256x192.py) | 256x192 | 0.976 | 0.518 | 20.1 | [ckpt](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_long_sleeved_dress_256x192-87bac74e_20221208.pth) | [log](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_long_sleeved_dress_256x192_20221208.log.json) | +| vest_dress | [pose_resnet_50](/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_1xb64-210e_deepfasion2-vest-dress-256x192.py) | 256x192 | 0.980 | 0.600 | 16.0 | [ckpt](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_vest_dress_256x192-fb3fbd6f_20221208.pth) | [log](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_vest_dress_256x192_20221208.log.json) | +| sling_dress | [pose_resnet_50](/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_4xb64-210e_deepfasion2-sling-dress-256x192.py) | 256x192 | 0.967 | 0.544 | 19.5 | [ckpt](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_sling_dress_256x192-8ebae0eb_20221208.pth) | [log](https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_sling_dress_256x192_20221208.log.json) | diff --git a/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/res50_deepfasion2.yml b/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/res50_deepfasion2.yml index 28825fa011..61b8a652cb 100644 --- a/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/res50_deepfasion2.yml +++ b/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/res50_deepfasion2.yml @@ -1,185 +1,185 @@ -Models: -- Config: configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_6xb64-210e_deepfasion2-short-sleeved-shirt-256x192.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: &id001 - - SimpleBaseline2D - - ResNet - Training Data: DeepFashion2 - Name: td-hm_res50_6xb64-210e_deepfasion2-short-sleeved-shirt-256x192 - Results: - - Dataset: DeepFashion2 - Metrics: - AUC: 0.703 - EPE: 10.2 - PCK@0.2: 0.988 - Task: Fashion 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_short_sleeved_shirt_256x192-21e1c5da_20221208.pth -- Config: configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_8xb64-210e_deepfasion2-long-sleeved-shirt-256x192.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: DeepFashion2 - Name: td-hm_res50_8xb64-210e_deepfasion2-long-sleeved-shirt-256x192 - Results: - - Dataset: DeepFashion2 - Metrics: - AUC: 0.587 - EPE: 16.5 - PCK@0.2: 0.973 - Task: Fashion 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_long_sleeved_shirt_256x192-8679e7e3_20221208.pth -- Config: configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_8xb64-210e_deepfasion2-short-sleeved-outwear-256x192.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: DeepFashion2 - Name: td-hm_res50_8xb64-210e_deepfasion2-short-sleeved-outwear-256x192 - Results: - - Dataset: DeepFashion2 - Metrics: - AUC: 0.408 - EPE: 24.0 - PCK@0.2: 0.966 - Task: Fashion 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_short_sleeved_outwear_256x192-a04c1298_20221208.pth -- Config: configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_8xb64-210e_deepfasion2-long-sleeved-outwear-256x192.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: DeepFashion2 - Name: td-hm_res50_8xb64-210e_deepfasion2-long-sleeved-outwear-256x192 - Results: - - Dataset: DeepFashion2 - Metrics: - AUC: 0.517 - EPE: 18.1 - PCK@0.2: 0.987 - Task: Fashion 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_long_sleeved_outwear_256x192-31fbaecf_20221208.pth -- Config: configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_4xb64-210e_deepfasion2-vest-256x192.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: DeepFashion2 - Name: td-hm_res50_4xb64-210e_deepfasion2-vest-256x192 - Results: - - Dataset: DeepFashion2 - Metrics: - AUC: 0.643 - EPE: 12.7 - PCK@0.2: 0.981 - Task: Fashion 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_vest_256x192-4c48d05c_20221208.pth -- Config: configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_4xb64-210e_deepfasion2-sling-256x192.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: DeepFashion2 - Name: td-hm_res50_4xb64-210e_deepfasion2-sling-256x192 - Results: - - Dataset: DeepFashion2 - Metrics: - AUC: 0.557 - EPE: 21.6 - PCK@0.2: 0.94 - Task: Fashion 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_sling_256x192-ebb2b736_20221208.pth -- Config: configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_3xb64-210e_deepfasion2-shorts-256x192.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: DeepFashion2 - Name: td-hm_res50_3xb64-210e_deepfasion2-shorts-256x192 - Results: - - Dataset: DeepFashion2 - Metrics: - AUC: 0.682 - EPE: 12.4 - PCK@0.2: 0.975 - Task: Fashion 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_shorts_256x192-9ab23592_20221208.pth -- Config: configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_2xb64-210e_deepfasion2-trousers-256x192.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: DeepFashion2 - Name: td-hm_res50_2xb64-210e_deepfasion2-trousers-256x192 - Results: - - Dataset: DeepFashion2 - Metrics: - AUC: 0.625 - EPE: 14.8 - PCK@0.2: 0.973 - Task: Fashion 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_trousers_256x192-3e632257_20221208.pth -- Config: configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_1xb64-210e_deepfasion2-skirt-256x192.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: DeepFashion2 - Name: td-hm_res50_1xb64-210e_deepfasion2-skirt-256x192 - Results: - - Dataset: DeepFashion2 - Metrics: - AUC: 0.653 - EPE: 16.6 - PCK@0.2: 0.952 - Task: Fashion 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_skirt_256x192-09573469_20221208.pth -- Config: configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_4xb64-210e_deepfasion2-short-sleeved-dress-256x192.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: DeepFashion2 - Name: td-hm_res50_4xb64-210e_deepfasion2-short-sleeved-dress-256x192 - Results: - - Dataset: DeepFashion2 - Metrics: - AUC: 0.603 - EPE: 15.6 - PCK@0.2: 0.98 - Task: Fashion 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_short_sleeved_dress_256x192-1345b07a_20221208.pth -- Config: configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_1xb64-210e_deepfasion2-long-sleeved-dress-256x192.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: DeepFashion2 - Name: td-hm_res50_1xb64-210e_deepfasion2-long-sleeved-dress-256x192 - Results: - - Dataset: DeepFashion2 - Metrics: - AUC: 0.518 - EPE: 20.1 - PCK@0.2: 0.976 - Task: Fashion 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_long_sleeved_dress_256x192-87bac74e_20221208.pth -- Config: configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_1xb64-210e_deepfasion2-vest-dress-256x192.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: DeepFashion2 - Name: td-hm_res50_1xb64-210e_deepfasion2-vest-dress-256x192 - Results: - - Dataset: DeepFashion2 - Metrics: - AUC: 0.6 - EPE: 16.0 - PCK@0.2: 0.98 - Task: Fashion 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_vest_dress_256x192-fb3fbd6f_20221208.pth -- Config: configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_4xb64-210e_deepfasion2-sling-dress-256x192.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: DeepFashion2 - Name: td-hm_res50_4xb64-210e_deepfasion2-sling-dress-256x192 - Results: - - Dataset: DeepFashion2 - Metrics: - AUC: 0.544 - EPE: 19.5 - PCK@0.2: 0.967 - Task: Fashion 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_sling_dress_256x192-8ebae0eb_20221208.pth +Models: +- Config: configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_6xb64-210e_deepfasion2-short-sleeved-shirt-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: &id001 + - SimpleBaseline2D + - ResNet + Training Data: DeepFashion2 + Name: td-hm_res50_6xb64-210e_deepfasion2-short-sleeved-shirt-256x192 + Results: + - Dataset: DeepFashion2 + Metrics: + AUC: 0.703 + EPE: 10.2 + PCK@0.2: 0.988 + Task: Fashion 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_short_sleeved_shirt_256x192-21e1c5da_20221208.pth +- Config: configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_8xb64-210e_deepfasion2-long-sleeved-shirt-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: DeepFashion2 + Name: td-hm_res50_8xb64-210e_deepfasion2-long-sleeved-shirt-256x192 + Results: + - Dataset: DeepFashion2 + Metrics: + AUC: 0.587 + EPE: 16.5 + PCK@0.2: 0.973 + Task: Fashion 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_long_sleeved_shirt_256x192-8679e7e3_20221208.pth +- Config: configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_8xb64-210e_deepfasion2-short-sleeved-outwear-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: DeepFashion2 + Name: td-hm_res50_8xb64-210e_deepfasion2-short-sleeved-outwear-256x192 + Results: + - Dataset: DeepFashion2 + Metrics: + AUC: 0.408 + EPE: 24.0 + PCK@0.2: 0.966 + Task: Fashion 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_short_sleeved_outwear_256x192-a04c1298_20221208.pth +- Config: configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_8xb64-210e_deepfasion2-long-sleeved-outwear-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: DeepFashion2 + Name: td-hm_res50_8xb64-210e_deepfasion2-long-sleeved-outwear-256x192 + Results: + - Dataset: DeepFashion2 + Metrics: + AUC: 0.517 + EPE: 18.1 + PCK@0.2: 0.987 + Task: Fashion 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_long_sleeved_outwear_256x192-31fbaecf_20221208.pth +- Config: configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_4xb64-210e_deepfasion2-vest-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: DeepFashion2 + Name: td-hm_res50_4xb64-210e_deepfasion2-vest-256x192 + Results: + - Dataset: DeepFashion2 + Metrics: + AUC: 0.643 + EPE: 12.7 + PCK@0.2: 0.981 + Task: Fashion 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_vest_256x192-4c48d05c_20221208.pth +- Config: configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_4xb64-210e_deepfasion2-sling-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: DeepFashion2 + Name: td-hm_res50_4xb64-210e_deepfasion2-sling-256x192 + Results: + - Dataset: DeepFashion2 + Metrics: + AUC: 0.557 + EPE: 21.6 + PCK@0.2: 0.94 + Task: Fashion 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_sling_256x192-ebb2b736_20221208.pth +- Config: configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_3xb64-210e_deepfasion2-shorts-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: DeepFashion2 + Name: td-hm_res50_3xb64-210e_deepfasion2-shorts-256x192 + Results: + - Dataset: DeepFashion2 + Metrics: + AUC: 0.682 + EPE: 12.4 + PCK@0.2: 0.975 + Task: Fashion 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_shorts_256x192-9ab23592_20221208.pth +- Config: configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_2xb64-210e_deepfasion2-trousers-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: DeepFashion2 + Name: td-hm_res50_2xb64-210e_deepfasion2-trousers-256x192 + Results: + - Dataset: DeepFashion2 + Metrics: + AUC: 0.625 + EPE: 14.8 + PCK@0.2: 0.973 + Task: Fashion 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_trousers_256x192-3e632257_20221208.pth +- Config: configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_1xb64-210e_deepfasion2-skirt-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: DeepFashion2 + Name: td-hm_res50_1xb64-210e_deepfasion2-skirt-256x192 + Results: + - Dataset: DeepFashion2 + Metrics: + AUC: 0.653 + EPE: 16.6 + PCK@0.2: 0.952 + Task: Fashion 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_skirt_256x192-09573469_20221208.pth +- Config: configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_4xb64-210e_deepfasion2-short-sleeved-dress-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: DeepFashion2 + Name: td-hm_res50_4xb64-210e_deepfasion2-short-sleeved-dress-256x192 + Results: + - Dataset: DeepFashion2 + Metrics: + AUC: 0.603 + EPE: 15.6 + PCK@0.2: 0.98 + Task: Fashion 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_short_sleeved_dress_256x192-1345b07a_20221208.pth +- Config: configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_1xb64-210e_deepfasion2-long-sleeved-dress-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: DeepFashion2 + Name: td-hm_res50_1xb64-210e_deepfasion2-long-sleeved-dress-256x192 + Results: + - Dataset: DeepFashion2 + Metrics: + AUC: 0.518 + EPE: 20.1 + PCK@0.2: 0.976 + Task: Fashion 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_long_sleeved_dress_256x192-87bac74e_20221208.pth +- Config: configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_1xb64-210e_deepfasion2-vest-dress-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: DeepFashion2 + Name: td-hm_res50_1xb64-210e_deepfasion2-vest-dress-256x192 + Results: + - Dataset: DeepFashion2 + Metrics: + AUC: 0.6 + EPE: 16.0 + PCK@0.2: 0.98 + Task: Fashion 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_vest_dress_256x192-fb3fbd6f_20221208.pth +- Config: configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_4xb64-210e_deepfasion2-sling-dress-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: DeepFashion2 + Name: td-hm_res50_4xb64-210e_deepfasion2-sling-dress-256x192 + Results: + - Dataset: DeepFashion2 + Metrics: + AUC: 0.544 + EPE: 19.5 + PCK@0.2: 0.967 + Task: Fashion 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/fashion/resnet/res50_deepfashion2_sling_dress_256x192-8ebae0eb_20221208.pth diff --git a/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_1xb64-210e_deepfasion2-long-sleeved-dress-256x192.py b/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_1xb64-210e_deepfasion2-long-sleeved-dress-256x192.py index 09dfaaa390..437b9aa971 100644 --- a/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_1xb64-210e_deepfasion2-long-sleeved-dress-256x192.py +++ b/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_1xb64-210e_deepfasion2-long-sleeved-dress-256x192.py @@ -1,122 +1,122 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=64) - -# hooks -default_hooks = dict( - logger=dict(type='LoggerHook', interval=10), - checkpoint=dict(save_best='AUC', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=50, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=294, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'DeepFashion2Dataset' -data_mode = 'topdown' -data_root = 'data/deepfasion2/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='train/deepfashion2_long_sleeved_dress_train.json', - data_prefix=dict(img='train/image/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='validation/deepfashion2_long_sleeved_dress_validation.json', - data_prefix=dict(img='validation/image/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = [ - dict(type='PCKAccuracy', thr=0.2), - dict(type='AUC'), - dict(type='EPE'), -] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=64) + +# hooks +default_hooks = dict( + logger=dict(type='LoggerHook', interval=10), + checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=294, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'DeepFashion2Dataset' +data_mode = 'topdown' +data_root = 'data/deepfasion2/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='train/deepfashion2_long_sleeved_dress_train.json', + data_prefix=dict(img='train/image/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='validation/deepfashion2_long_sleeved_dress_validation.json', + data_prefix=dict(img='validation/image/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_1xb64-210e_deepfasion2-skirt-256x192.py b/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_1xb64-210e_deepfasion2-skirt-256x192.py index f0e6f0c632..3b8ec62817 100644 --- a/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_1xb64-210e_deepfasion2-skirt-256x192.py +++ b/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_1xb64-210e_deepfasion2-skirt-256x192.py @@ -1,122 +1,122 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=64) - -# hooks -default_hooks = dict( - logger=dict(type='LoggerHook', interval=10), - checkpoint=dict(save_best='AUC', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=50, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=294, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'DeepFashion2Dataset' -data_mode = 'topdown' -data_root = 'data/deepfasion2/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='train/deepfashion2_skirt_train.json', - data_prefix=dict(img='train/image/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='validation/deepfashion2_skirt_validation.json', - data_prefix=dict(img='validation/image/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = [ - dict(type='PCKAccuracy', thr=0.2), - dict(type='AUC'), - dict(type='EPE'), -] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=64) + +# hooks +default_hooks = dict( + logger=dict(type='LoggerHook', interval=10), + checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=294, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'DeepFashion2Dataset' +data_mode = 'topdown' +data_root = 'data/deepfasion2/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='train/deepfashion2_skirt_train.json', + data_prefix=dict(img='train/image/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='validation/deepfashion2_skirt_validation.json', + data_prefix=dict(img='validation/image/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_1xb64-210e_deepfasion2-vest-dress-256x192.py b/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_1xb64-210e_deepfasion2-vest-dress-256x192.py index 9bed742199..1883314764 100644 --- a/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_1xb64-210e_deepfasion2-vest-dress-256x192.py +++ b/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_1xb64-210e_deepfasion2-vest-dress-256x192.py @@ -1,122 +1,122 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=64) - -# hooks -default_hooks = dict( - logger=dict(type='LoggerHook', interval=10), - checkpoint=dict(save_best='AUC', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=50, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=294, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'DeepFashion2Dataset' -data_mode = 'topdown' -data_root = 'data/deepfasion2/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='train/deepfashion2_vest_dress_train.json', - data_prefix=dict(img='train/image/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='validation/deepfashion2_vest_dress_validation.json', - data_prefix=dict(img='validation/image/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = [ - dict(type='PCKAccuracy', thr=0.2), - dict(type='AUC'), - dict(type='EPE'), -] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=64) + +# hooks +default_hooks = dict( + logger=dict(type='LoggerHook', interval=10), + checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=294, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'DeepFashion2Dataset' +data_mode = 'topdown' +data_root = 'data/deepfasion2/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='train/deepfashion2_vest_dress_train.json', + data_prefix=dict(img='train/image/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='validation/deepfashion2_vest_dress_validation.json', + data_prefix=dict(img='validation/image/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_2xb64-210e_deepfasion2-trousers-256x192.py b/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_2xb64-210e_deepfasion2-trousers-256x192.py index 617e59ae74..a5f66377f4 100644 --- a/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_2xb64-210e_deepfasion2-trousers-256x192.py +++ b/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_2xb64-210e_deepfasion2-trousers-256x192.py @@ -1,122 +1,122 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=128) - -# hooks -default_hooks = dict( - logger=dict(type='LoggerHook', interval=10), - checkpoint=dict(save_best='AUC', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=50, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=294, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'DeepFashion2Dataset' -data_mode = 'topdown' -data_root = 'data/deepfasion2/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='train/deepfashion2_trousers_train.json', - data_prefix=dict(img='train/image/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='validation/deepfashion2_trousers_validation.json', - data_prefix=dict(img='validation/image/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = [ - dict(type='PCKAccuracy', thr=0.2), - dict(type='AUC'), - dict(type='EPE'), -] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=128) + +# hooks +default_hooks = dict( + logger=dict(type='LoggerHook', interval=10), + checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=294, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'DeepFashion2Dataset' +data_mode = 'topdown' +data_root = 'data/deepfasion2/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='train/deepfashion2_trousers_train.json', + data_prefix=dict(img='train/image/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='validation/deepfashion2_trousers_validation.json', + data_prefix=dict(img='validation/image/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_3xb64-210e_deepfasion2-shorts-256x192.py b/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_3xb64-210e_deepfasion2-shorts-256x192.py index aa3b2774fc..0a00361fd7 100644 --- a/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_3xb64-210e_deepfasion2-shorts-256x192.py +++ b/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_3xb64-210e_deepfasion2-shorts-256x192.py @@ -1,122 +1,122 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=192) - -# hooks -default_hooks = dict( - logger=dict(type='LoggerHook', interval=10), - checkpoint=dict(save_best='AUC', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=50, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=294, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'DeepFashion2Dataset' -data_mode = 'topdown' -data_root = 'data/deepfasion2/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='train/deepfashion2_shorts_train.json', - data_prefix=dict(img='train/image/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='validation/deepfashion2_shorts_validation.json', - data_prefix=dict(img='validation/image/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = [ - dict(type='PCKAccuracy', thr=0.2), - dict(type='AUC'), - dict(type='EPE'), -] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=192) + +# hooks +default_hooks = dict( + logger=dict(type='LoggerHook', interval=10), + checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=294, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'DeepFashion2Dataset' +data_mode = 'topdown' +data_root = 'data/deepfasion2/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='train/deepfashion2_shorts_train.json', + data_prefix=dict(img='train/image/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='validation/deepfashion2_shorts_validation.json', + data_prefix=dict(img='validation/image/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_4xb64-210e_deepfasion2-short-sleeved-dress-256x192.py b/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_4xb64-210e_deepfasion2-short-sleeved-dress-256x192.py index 0bfcabaa54..d865565f0d 100644 --- a/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_4xb64-210e_deepfasion2-short-sleeved-dress-256x192.py +++ b/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_4xb64-210e_deepfasion2-short-sleeved-dress-256x192.py @@ -1,122 +1,122 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=256) - -# hooks -default_hooks = dict( - logger=dict(type='LoggerHook', interval=10), - checkpoint=dict(save_best='AUC', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=50, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=294, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'DeepFashion2Dataset' -data_mode = 'topdown' -data_root = 'data/deepfasion2/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='train/deepfashion2_short_sleeved_dress_train.json', - data_prefix=dict(img='train/image/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='validation/deepfashion2_short_sleeved_dress_validation.json', - data_prefix=dict(img='validation/image/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = [ - dict(type='PCKAccuracy', thr=0.2), - dict(type='AUC'), - dict(type='EPE'), -] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict( + logger=dict(type='LoggerHook', interval=10), + checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=294, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'DeepFashion2Dataset' +data_mode = 'topdown' +data_root = 'data/deepfasion2/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='train/deepfashion2_short_sleeved_dress_train.json', + data_prefix=dict(img='train/image/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='validation/deepfashion2_short_sleeved_dress_validation.json', + data_prefix=dict(img='validation/image/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_4xb64-210e_deepfasion2-sling-256x192.py b/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_4xb64-210e_deepfasion2-sling-256x192.py index f627eb182c..eb42c72412 100644 --- a/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_4xb64-210e_deepfasion2-sling-256x192.py +++ b/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_4xb64-210e_deepfasion2-sling-256x192.py @@ -1,122 +1,122 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=256) - -# hooks -default_hooks = dict( - logger=dict(type='LoggerHook', interval=10), - checkpoint=dict(save_best='AUC', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=50, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=294, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'DeepFashion2Dataset' -data_mode = 'topdown' -data_root = 'data/deepfasion2/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='train/deepfashion2_sling_train.json', - data_prefix=dict(img='train/image/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='validation/deepfashion2_sling_validation.json', - data_prefix=dict(img='validation/image/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = [ - dict(type='PCKAccuracy', thr=0.2), - dict(type='AUC'), - dict(type='EPE'), -] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict( + logger=dict(type='LoggerHook', interval=10), + checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=294, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'DeepFashion2Dataset' +data_mode = 'topdown' +data_root = 'data/deepfasion2/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='train/deepfashion2_sling_train.json', + data_prefix=dict(img='train/image/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='validation/deepfashion2_sling_validation.json', + data_prefix=dict(img='validation/image/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_4xb64-210e_deepfasion2-sling-dress-256x192.py b/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_4xb64-210e_deepfasion2-sling-dress-256x192.py index 8b59607060..8d206f32f8 100644 --- a/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_4xb64-210e_deepfasion2-sling-dress-256x192.py +++ b/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_4xb64-210e_deepfasion2-sling-dress-256x192.py @@ -1,122 +1,122 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=256) - -# hooks -default_hooks = dict( - logger=dict(type='LoggerHook', interval=10), - checkpoint=dict(save_best='AUC', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=50, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=294, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'DeepFashion2Dataset' -data_mode = 'topdown' -data_root = 'data/deepfasion2/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='train/deepfashion2_sling_dress_train.json', - data_prefix=dict(img='train/image/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='validation/deepfashion2_sling_dress_validation.json', - data_prefix=dict(img='validation/image/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = [ - dict(type='PCKAccuracy', thr=0.2), - dict(type='AUC'), - dict(type='EPE'), -] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict( + logger=dict(type='LoggerHook', interval=10), + checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=294, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'DeepFashion2Dataset' +data_mode = 'topdown' +data_root = 'data/deepfasion2/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='train/deepfashion2_sling_dress_train.json', + data_prefix=dict(img='train/image/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='validation/deepfashion2_sling_dress_validation.json', + data_prefix=dict(img='validation/image/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_4xb64-210e_deepfasion2-vest-256x192.py b/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_4xb64-210e_deepfasion2-vest-256x192.py index 4249d5a897..c0ed06dc85 100644 --- a/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_4xb64-210e_deepfasion2-vest-256x192.py +++ b/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_4xb64-210e_deepfasion2-vest-256x192.py @@ -1,122 +1,122 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=256) - -# hooks -default_hooks = dict( - logger=dict(type='LoggerHook', interval=10), - checkpoint=dict(save_best='AUC', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=50, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=294, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'DeepFashion2Dataset' -data_mode = 'topdown' -data_root = 'data/deepfasion2/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='train/deepfashion2_vest_train.json', - data_prefix=dict(img='train/image/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='validation/deepfashion2_vest_validation.json', - data_prefix=dict(img='validation/image/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = [ - dict(type='PCKAccuracy', thr=0.2), - dict(type='AUC'), - dict(type='EPE'), -] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict( + logger=dict(type='LoggerHook', interval=10), + checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=294, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'DeepFashion2Dataset' +data_mode = 'topdown' +data_root = 'data/deepfasion2/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='train/deepfashion2_vest_train.json', + data_prefix=dict(img='train/image/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='validation/deepfashion2_vest_validation.json', + data_prefix=dict(img='validation/image/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_6xb64-210e_deepfasion2-short-sleeved-shirt-256x192.py b/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_6xb64-210e_deepfasion2-short-sleeved-shirt-256x192.py index 4161952dcf..e1bbbe290f 100644 --- a/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_6xb64-210e_deepfasion2-short-sleeved-shirt-256x192.py +++ b/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_6xb64-210e_deepfasion2-short-sleeved-shirt-256x192.py @@ -1,122 +1,122 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=384) - -# hooks -default_hooks = dict( - logger=dict(type='LoggerHook', interval=10), - checkpoint=dict(save_best='AUC', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=50, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=294, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'DeepFashion2Dataset' -data_mode = 'topdown' -data_root = 'data/deepfasion2/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='train/deepfashion2_short_sleeved_shirt_train.json', - data_prefix=dict(img='train/image/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='validation/deepfashion2_short_sleeved_shirt_validation.json', - data_prefix=dict(img='validation/image/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = [ - dict(type='PCKAccuracy', thr=0.2), - dict(type='AUC'), - dict(type='EPE'), -] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=384) + +# hooks +default_hooks = dict( + logger=dict(type='LoggerHook', interval=10), + checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=294, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'DeepFashion2Dataset' +data_mode = 'topdown' +data_root = 'data/deepfasion2/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='train/deepfashion2_short_sleeved_shirt_train.json', + data_prefix=dict(img='train/image/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='validation/deepfashion2_short_sleeved_shirt_validation.json', + data_prefix=dict(img='validation/image/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_8xb64-210e_deepfasion2-long-sleeved-outwear-256x192.py b/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_8xb64-210e_deepfasion2-long-sleeved-outwear-256x192.py index 36e0318bf7..2b36f62dec 100644 --- a/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_8xb64-210e_deepfasion2-long-sleeved-outwear-256x192.py +++ b/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_8xb64-210e_deepfasion2-long-sleeved-outwear-256x192.py @@ -1,123 +1,123 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict( - logger=dict(type='LoggerHook', interval=10), - checkpoint=dict(save_best='AUC', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=50, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=294, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'DeepFashion2Dataset' -data_mode = 'topdown' -data_root = 'data/deepfasion2/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='train/deepfashion2_long_sleeved_outwear_train.json', - data_prefix=dict(img='train/image/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='validation/' - 'deepfashion2_long_sleeved_outwear_validation.json', - data_prefix=dict(img='validation/image/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = [ - dict(type='PCKAccuracy', thr=0.2), - dict(type='AUC'), - dict(type='EPE'), -] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + logger=dict(type='LoggerHook', interval=10), + checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=294, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'DeepFashion2Dataset' +data_mode = 'topdown' +data_root = 'data/deepfasion2/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='train/deepfashion2_long_sleeved_outwear_train.json', + data_prefix=dict(img='train/image/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='validation/' + 'deepfashion2_long_sleeved_outwear_validation.json', + data_prefix=dict(img='validation/image/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_8xb64-210e_deepfasion2-long-sleeved-shirt-256x192.py b/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_8xb64-210e_deepfasion2-long-sleeved-shirt-256x192.py index f82e3cb5fb..8d25b31fc5 100644 --- a/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_8xb64-210e_deepfasion2-long-sleeved-shirt-256x192.py +++ b/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_8xb64-210e_deepfasion2-long-sleeved-shirt-256x192.py @@ -1,122 +1,122 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict( - logger=dict(type='LoggerHook', interval=10), - checkpoint=dict(save_best='AUC', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=50, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=294, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'DeepFashion2Dataset' -data_mode = 'topdown' -data_root = 'data/deepfasion2/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='train/deepfashion2_long_sleeved_shirt_train.json', - data_prefix=dict(img='train/image/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='validation/deepfashion2_long_sleeved_shirt_validation.json', - data_prefix=dict(img='validation/image/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = [ - dict(type='PCKAccuracy', thr=0.2), - dict(type='AUC'), - dict(type='EPE'), -] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + logger=dict(type='LoggerHook', interval=10), + checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=294, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'DeepFashion2Dataset' +data_mode = 'topdown' +data_root = 'data/deepfasion2/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='train/deepfashion2_long_sleeved_shirt_train.json', + data_prefix=dict(img='train/image/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='validation/deepfashion2_long_sleeved_shirt_validation.json', + data_prefix=dict(img='validation/image/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_8xb64-210e_deepfasion2-short-sleeved-outwear-256x192.py b/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_8xb64-210e_deepfasion2-short-sleeved-outwear-256x192.py index 30db99de9e..9e381dfe00 100644 --- a/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_8xb64-210e_deepfasion2-short-sleeved-outwear-256x192.py +++ b/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td-hm_res50_8xb64-210e_deepfasion2-short-sleeved-outwear-256x192.py @@ -1,123 +1,123 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict( - logger=dict(type='LoggerHook', interval=10), - checkpoint=dict(save_best='AUC', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=50, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=294, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'DeepFashion2Dataset' -data_mode = 'topdown' -data_root = 'data/deepfasion2/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='train/deepfashion2_short_sleeved_outwear_train.json', - data_prefix=dict(img='train/image/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='validation/' - 'deepfashion2_short_sleeved_outwear_validation.json', - data_prefix=dict(img='validation/image/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = [ - dict(type='PCKAccuracy', thr=0.2), - dict(type='AUC'), - dict(type='EPE'), -] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + logger=dict(type='LoggerHook', interval=10), + checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=294, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'DeepFashion2Dataset' +data_mode = 'topdown' +data_root = 'data/deepfasion2/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='train/deepfashion2_short_sleeved_outwear_train.json', + data_prefix=dict(img='train/image/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='validation/' + 'deepfashion2_short_sleeved_outwear_validation.json', + data_prefix=dict(img='validation/image/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/configs/hand_2d_keypoint/README.md b/configs/hand_2d_keypoint/README.md index 6f7758290e..29b9066fb2 100644 --- a/configs/hand_2d_keypoint/README.md +++ b/configs/hand_2d_keypoint/README.md @@ -1,18 +1,18 @@ -# 2D Hand Pose Estimation - -2D hand pose estimation is defined as the task of detecting the poses (or keypoints) of the hand from an input image. - -Normally, the input images are cropped hand images, where the hand locates at the center; -or the rough location (or the bounding box) of the hand is provided. - -## Data preparation - -Please follow [DATA Preparation](/docs/en/dataset_zoo/2d_hand_keypoint.md) to prepare data. - -## Demo - -Please follow [Demo](/demo/docs/en/2d_hand_demo.md) to run demos. - -
- -
+# 2D Hand Pose Estimation + +2D hand pose estimation is defined as the task of detecting the poses (or keypoints) of the hand from an input image. + +Normally, the input images are cropped hand images, where the hand locates at the center; +or the rough location (or the bounding box) of the hand is provided. + +## Data preparation + +Please follow [DATA Preparation](/docs/en/dataset_zoo/2d_hand_keypoint.md) to prepare data. + +## Demo + +Please follow [Demo](/demo/docs/en/2d_hand_demo.md) to run demos. + +
+ +
diff --git a/configs/hand_2d_keypoint/rtmpose/README.md b/configs/hand_2d_keypoint/rtmpose/README.md index 9687b7e72c..2da6481eb6 100644 --- a/configs/hand_2d_keypoint/rtmpose/README.md +++ b/configs/hand_2d_keypoint/rtmpose/README.md @@ -1,16 +1,16 @@ -# RTMPose - -Recent studies on 2D pose estimation have achieved excellent performance on public benchmarks, yet its application in the industrial community still suffers from heavy model parameters and high latency. -In order to bridge this gap, we empirically study five aspects that affect the performance of multi-person pose estimation algorithms: paradigm, backbone network, localization algorithm, training strategy, and deployment inference, and present a high-performance real-time multi-person pose estimation framework, **RTMPose**, based on MMPose. -Our RTMPose-m achieves **75.8% AP** on COCO with **90+ FPS** on an Intel i7-11700 CPU and **430+ FPS** on an NVIDIA GTX 1660 Ti GPU, and RTMPose-l achieves **67.0% AP** on COCO-WholeBody with **130+ FPS**, outperforming existing open-source libraries. -To further evaluate RTMPose's capability in critical real-time applications, we also report the performance after deploying on the mobile device. - -## Results and Models - -### COCO-WholeBody-Hand Dataset - -Results on COCO-WholeBody-Hand val set - -| Model | Input Size | PCK@0.2 | AUC | EPE | Details and Download | -| :-------: | :--------: | :-----: | :---: | :--: | :------------------------------------------------------------------------------------: | -| RTMPose-m | 256x256 | 0.815 | 0.837 | 4.51 | [rtmpose_coco_wholebody_hand.md](./coco_wholebody_hand/rtmpose_coco_wholebody_hand.md) | +# RTMPose + +Recent studies on 2D pose estimation have achieved excellent performance on public benchmarks, yet its application in the industrial community still suffers from heavy model parameters and high latency. +In order to bridge this gap, we empirically study five aspects that affect the performance of multi-person pose estimation algorithms: paradigm, backbone network, localization algorithm, training strategy, and deployment inference, and present a high-performance real-time multi-person pose estimation framework, **RTMPose**, based on MMPose. +Our RTMPose-m achieves **75.8% AP** on COCO with **90+ FPS** on an Intel i7-11700 CPU and **430+ FPS** on an NVIDIA GTX 1660 Ti GPU, and RTMPose-l achieves **67.0% AP** on COCO-WholeBody with **130+ FPS**, outperforming existing open-source libraries. +To further evaluate RTMPose's capability in critical real-time applications, we also report the performance after deploying on the mobile device. + +## Results and Models + +### COCO-WholeBody-Hand Dataset + +Results on COCO-WholeBody-Hand val set + +| Model | Input Size | PCK@0.2 | AUC | EPE | Details and Download | +| :-------: | :--------: | :-----: | :---: | :--: | :------------------------------------------------------------------------------------: | +| RTMPose-m | 256x256 | 0.815 | 0.837 | 4.51 | [rtmpose_coco_wholebody_hand.md](./coco_wholebody_hand/rtmpose_coco_wholebody_hand.md) | diff --git a/configs/hand_2d_keypoint/rtmpose/coco_wholebody_hand/rtmpose-m_8xb32-210e_coco-wholebody-hand-256x256.py b/configs/hand_2d_keypoint/rtmpose/coco_wholebody_hand/rtmpose-m_8xb32-210e_coco-wholebody-hand-256x256.py index 48c7193394..2199e09d2b 100644 --- a/configs/hand_2d_keypoint/rtmpose/coco_wholebody_hand/rtmpose-m_8xb32-210e_coco-wholebody-hand-256x256.py +++ b/configs/hand_2d_keypoint/rtmpose/coco_wholebody_hand/rtmpose-m_8xb32-210e_coco-wholebody-hand-256x256.py @@ -1,232 +1,232 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -max_epochs = 210 -stage2_num_epochs = 30 -base_lr = 4e-3 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=256) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=(256, 256), - sigma=(5.66, 5.66), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=0.67, - widen_factor=0.75, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmposev1/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=768, - out_channels=21, - input_size=codec['input_size'], - in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True, )) - -# base dataset settings -dataset_type = 'CocoWholeBodyHandDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -backend_args = dict(backend='local') -# backend_args = dict( -# backend='petrel', -# path_mapping=dict({ -# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', -# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' -# })) - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - # dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.5, 1.5], - rotate_factor=180), - dict(type='RandomFlip', direction='horizontal'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.0), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - # dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=180), - dict(type='RandomFlip', direction='horizontal'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = [ - dict(type='PCKAccuracy', thr=0.2), - dict(type='AUC'), - dict(type='EPE') -] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 210 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(256, 256), + sigma=(5.66, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=768, + out_channels=21, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'CocoWholeBodyHandDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + # dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.5, 1.5], + rotate_factor=180), + dict(type='RandomFlip', direction='horizontal'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + # dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=180), + dict(type='RandomFlip', direction='horizontal'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE') +] +test_evaluator = val_evaluator diff --git a/configs/hand_2d_keypoint/rtmpose/coco_wholebody_hand/rtmpose_coco_wholebody_hand.md b/configs/hand_2d_keypoint/rtmpose/coco_wholebody_hand/rtmpose_coco_wholebody_hand.md index b2a5957e6e..edf081972e 100644 --- a/configs/hand_2d_keypoint/rtmpose/coco_wholebody_hand/rtmpose_coco_wholebody_hand.md +++ b/configs/hand_2d_keypoint/rtmpose/coco_wholebody_hand/rtmpose_coco_wholebody_hand.md @@ -1,39 +1,39 @@ - - -
-RTMDet (ArXiv 2022) - -```bibtex -@misc{lyu2022rtmdet, - title={RTMDet: An Empirical Study of Designing Real-Time Object Detectors}, - author={Chengqi Lyu and Wenwei Zhang and Haian Huang and Yue Zhou and Yudong Wang and Yanyi Liu and Shilong Zhang and Kai Chen}, - year={2022}, - eprint={2212.07784}, - archivePrefix={arXiv}, - primaryClass={cs.CV} -} -``` - -
- - - -
-COCO-WholeBody-Hand (ECCV'2020) - -```bibtex -@inproceedings{jin2020whole, - title={Whole-Body Human Pose Estimation in the Wild}, - author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, - booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, - year={2020} -} -``` - -
- -Results on COCO-WholeBody-Hand val set - -| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | -| :--------------------------------------------------------- | :--------: | :-----: | :---: | :--: | :--------------------------------------------------------: | :--------------------------------------------------------: | -| [rtmpose_m](/configs/hand_2d_keypoint/rtmpose/coco_wholebody_hand/rtmpose-m_8xb32-210e_coco-wholebody-hand-256x256.py) | 256x256 | 0.815 | 0.837 | 4.51 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco-wholebody-hand_pt-aic-coco_210e-256x256-99477206_20230228.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco-wholebody-hand_pt-aic-coco_210e-256x256-99477206_20230228.json) | + + +
+RTMDet (ArXiv 2022) + +```bibtex +@misc{lyu2022rtmdet, + title={RTMDet: An Empirical Study of Designing Real-Time Object Detectors}, + author={Chengqi Lyu and Wenwei Zhang and Haian Huang and Yue Zhou and Yudong Wang and Yanyi Liu and Shilong Zhang and Kai Chen}, + year={2022}, + eprint={2212.07784}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +
+ + + +
+COCO-WholeBody-Hand (ECCV'2020) + +```bibtex +@inproceedings{jin2020whole, + title={Whole-Body Human Pose Estimation in the Wild}, + author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2020} +} +``` + +
+ +Results on COCO-WholeBody-Hand val set + +| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | +| :--------------------------------------------------------- | :--------: | :-----: | :---: | :--: | :--------------------------------------------------------: | :--------------------------------------------------------: | +| [rtmpose_m](/configs/hand_2d_keypoint/rtmpose/coco_wholebody_hand/rtmpose-m_8xb32-210e_coco-wholebody-hand-256x256.py) | 256x256 | 0.815 | 0.837 | 4.51 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco-wholebody-hand_pt-aic-coco_210e-256x256-99477206_20230228.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco-wholebody-hand_pt-aic-coco_210e-256x256-99477206_20230228.json) | diff --git a/configs/hand_2d_keypoint/rtmpose/coco_wholebody_hand/rtmpose_coco_wholebody_hand.yml b/configs/hand_2d_keypoint/rtmpose/coco_wholebody_hand/rtmpose_coco_wholebody_hand.yml index 2f87733605..e0ce8586f4 100644 --- a/configs/hand_2d_keypoint/rtmpose/coco_wholebody_hand/rtmpose_coco_wholebody_hand.yml +++ b/configs/hand_2d_keypoint/rtmpose/coco_wholebody_hand/rtmpose_coco_wholebody_hand.yml @@ -1,17 +1,17 @@ -Models: -- Config: configs/hand_2d_keypoint/rtmpose/coco_wholebody_hand/rtmpose-m_8xb32-210e_coco-wholebody-hand-256x256.py - In Collection: RTMPose - Alias: hand - Metadata: - Architecture: - - RTMPose - Training Data: COCO-WholeBody-Hand - Name: rtmpose-m_8xb32-210e_coco-wholebody-hand-256x256 - Results: - - Dataset: COCO-WholeBody-Hand - Metrics: - AUC: 0.815 - EPE: 4.51 - PCK@0.2: 0.837 - Task: Hand 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco-wholebody-hand_pt-aic-coco_210e-256x256-99477206_20230228.pth +Models: +- Config: configs/hand_2d_keypoint/rtmpose/coco_wholebody_hand/rtmpose-m_8xb32-210e_coco-wholebody-hand-256x256.py + In Collection: RTMPose + Alias: hand + Metadata: + Architecture: + - RTMPose + Training Data: COCO-WholeBody-Hand + Name: rtmpose-m_8xb32-210e_coco-wholebody-hand-256x256 + Results: + - Dataset: COCO-WholeBody-Hand + Metrics: + AUC: 0.815 + EPE: 4.51 + PCK@0.2: 0.837 + Task: Hand 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco-wholebody-hand_pt-aic-coco_210e-256x256-99477206_20230228.pth diff --git a/configs/hand_2d_keypoint/rtmpose/hand5/rtmpose-m_8xb256-210e_hand5-256x256.py b/configs/hand_2d_keypoint/rtmpose/hand5/rtmpose-m_8xb256-210e_hand5-256x256.py index f329f1cb1d..96b839f3c8 100644 --- a/configs/hand_2d_keypoint/rtmpose/hand5/rtmpose-m_8xb256-210e_hand5-256x256.py +++ b/configs/hand_2d_keypoint/rtmpose/hand5/rtmpose-m_8xb256-210e_hand5-256x256.py @@ -1,380 +1,380 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# coco-hand onehand10k freihand2d rhd2d halpehand - -# runtime -max_epochs = 210 -stage2_num_epochs = 10 -base_lr = 4e-3 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=256) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=(256, 256), - sigma=(5.66, 5.66), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=0.67, - widen_factor=0.75, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmpose/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=768, - out_channels=21, - input_size=codec['input_size'], - in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True, )) - -# base dataset settings -dataset_type = 'CocoWholeBodyHandDataset' -data_mode = 'topdown' -data_root = 'data/' - -backend_args = dict(backend='local') - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - # dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.5, 1.5], - rotate_factor=180), - dict(type='RandomFlip', direction='horizontal'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.0), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - # dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=180), - dict(type='RandomFlip', direction='horizontal'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.2), - dict(type='MedianBlur', p=0.2), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# train datasets -dataset_coco = dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='detection/coco/train2017/'), - pipeline=[], -) - -dataset_onehand10k = dict( - type='OneHand10KDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='onehand10k/annotations/onehand10k_train.json', - data_prefix=dict(img='pose/OneHand10K/'), - pipeline=[], -) - -dataset_freihand = dict( - type='FreiHandDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='freihand/annotations/freihand_train.json', - data_prefix=dict(img='pose/FreiHand/'), - pipeline=[], -) - -dataset_rhd = dict( - type='Rhd2DDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='rhd/annotations/rhd_train.json', - data_prefix=dict(img='pose/RHD/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=21, - mapping=[ - (0, 0), - (1, 4), - (2, 3), - (3, 2), - (4, 1), - (5, 8), - (6, 7), - (7, 6), - (8, 5), - (9, 12), - (10, 11), - (11, 10), - (12, 9), - (13, 16), - (14, 15), - (15, 14), - (16, 13), - (17, 20), - (18, 19), - (19, 18), - (20, 17), - ]) - ], -) - -dataset_halpehand = dict( - type='HalpeHandDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='halpe/annotations/halpe_train_v1.json', - data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015/'), - pipeline=[], -) - -# data loaders -train_dataloader = dict( - batch_size=256, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type='CombinedDataset', - metainfo=dict( - from_file='configs/_base_/datasets/coco_wholebody_hand.py'), - datasets=[ - dataset_coco, dataset_onehand10k, dataset_freihand, dataset_rhd, - dataset_halpehand - ], - pipeline=train_pipeline, - test_mode=False, - )) - -# test datasets -val_coco = dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='detection/coco/val2017/'), - pipeline=[], -) - -val_onehand10k = dict( - type='OneHand10KDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='onehand10k/annotations/onehand10k_test.json', - data_prefix=dict(img='pose/OneHand10K/'), - pipeline=[], -) - -val_freihand = dict( - type='FreiHandDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='freihand/annotations/freihand_test.json', - data_prefix=dict(img='pose/FreiHand/'), - pipeline=[], -) - -val_rhd = dict( - type='Rhd2DDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='rhd/annotations/rhd_test.json', - data_prefix=dict(img='pose/RHD/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=21, - mapping=[ - (0, 0), - (1, 4), - (2, 3), - (3, 2), - (4, 1), - (5, 8), - (6, 7), - (7, 6), - (8, 5), - (9, 12), - (10, 11), - (11, 10), - (12, 9), - (13, 16), - (14, 15), - (15, 14), - (16, 13), - (17, 20), - (18, 19), - (19, 18), - (20, 17), - ]) - ], -) - -val_halpehand = dict( - type='HalpeHandDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='halpe/annotations/halpe_val_v1.json', - data_prefix=dict(img='detection/coco/val2017/'), - pipeline=[], -) - -test_dataloader = dict( - batch_size=32, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type='CombinedDataset', - metainfo=dict( - from_file='configs/_base_/datasets/coco_wholebody_hand.py'), - datasets=[ - val_coco, val_onehand10k, val_freihand, val_rhd, val_halpehand - ], - pipeline=val_pipeline, - test_mode=True, - )) - -val_dataloader = test_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = [ - dict(type='PCKAccuracy', thr=0.2), - dict(type='AUC'), - dict(type='EPE') -] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# coco-hand onehand10k freihand2d rhd2d halpehand + +# runtime +max_epochs = 210 +stage2_num_epochs = 10 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(256, 256), + sigma=(5.66, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmpose/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=768, + out_channels=21, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'CocoWholeBodyHandDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + # dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.5, 1.5], + rotate_factor=180), + dict(type='RandomFlip', direction='horizontal'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + # dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=180), + dict(type='RandomFlip', direction='horizontal'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.2), + dict(type='MedianBlur', p=0.2), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# train datasets +dataset_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[], +) + +dataset_onehand10k = dict( + type='OneHand10KDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='onehand10k/annotations/onehand10k_train.json', + data_prefix=dict(img='pose/OneHand10K/'), + pipeline=[], +) + +dataset_freihand = dict( + type='FreiHandDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='freihand/annotations/freihand_train.json', + data_prefix=dict(img='pose/FreiHand/'), + pipeline=[], +) + +dataset_rhd = dict( + type='Rhd2DDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='rhd/annotations/rhd_train.json', + data_prefix=dict(img='pose/RHD/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=21, + mapping=[ + (0, 0), + (1, 4), + (2, 3), + (3, 2), + (4, 1), + (5, 8), + (6, 7), + (7, 6), + (8, 5), + (9, 12), + (10, 11), + (11, 10), + (12, 9), + (13, 16), + (14, 15), + (15, 14), + (16, 13), + (17, 20), + (18, 19), + (19, 18), + (20, 17), + ]) + ], +) + +dataset_halpehand = dict( + type='HalpeHandDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_train_v1.json', + data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015/'), + pipeline=[], +) + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict( + from_file='configs/_base_/datasets/coco_wholebody_hand.py'), + datasets=[ + dataset_coco, dataset_onehand10k, dataset_freihand, dataset_rhd, + dataset_halpehand + ], + pipeline=train_pipeline, + test_mode=False, + )) + +# test datasets +val_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[], +) + +val_onehand10k = dict( + type='OneHand10KDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='onehand10k/annotations/onehand10k_test.json', + data_prefix=dict(img='pose/OneHand10K/'), + pipeline=[], +) + +val_freihand = dict( + type='FreiHandDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='freihand/annotations/freihand_test.json', + data_prefix=dict(img='pose/FreiHand/'), + pipeline=[], +) + +val_rhd = dict( + type='Rhd2DDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='rhd/annotations/rhd_test.json', + data_prefix=dict(img='pose/RHD/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=21, + mapping=[ + (0, 0), + (1, 4), + (2, 3), + (3, 2), + (4, 1), + (5, 8), + (6, 7), + (7, 6), + (8, 5), + (9, 12), + (10, 11), + (11, 10), + (12, 9), + (13, 16), + (14, 15), + (15, 14), + (16, 13), + (17, 20), + (18, 19), + (19, 18), + (20, 17), + ]) + ], +) + +val_halpehand = dict( + type='HalpeHandDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_val_v1.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[], +) + +test_dataloader = dict( + batch_size=32, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CombinedDataset', + metainfo=dict( + from_file='configs/_base_/datasets/coco_wholebody_hand.py'), + datasets=[ + val_coco, val_onehand10k, val_freihand, val_rhd, val_halpehand + ], + pipeline=val_pipeline, + test_mode=True, + )) + +val_dataloader = test_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE') +] +test_evaluator = val_evaluator diff --git a/configs/hand_2d_keypoint/rtmpose/hand5/rtmpose_hand5.md b/configs/hand_2d_keypoint/rtmpose/hand5/rtmpose_hand5.md index 361770dad2..5da110eec1 100644 --- a/configs/hand_2d_keypoint/rtmpose/hand5/rtmpose_hand5.md +++ b/configs/hand_2d_keypoint/rtmpose/hand5/rtmpose_hand5.md @@ -1,67 +1,67 @@ - - -
-RTMPose (arXiv'2023) - -```bibtex -@misc{https://doi.org/10.48550/arxiv.2303.07399, - doi = {10.48550/ARXIV.2303.07399}, - url = {https://arxiv.org/abs/2303.07399}, - author = {Jiang, Tao and Lu, Peng and Zhang, Li and Ma, Ningsheng and Han, Rui and Lyu, Chengqi and Li, Yining and Chen, Kai}, - keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences}, - title = {RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose}, - publisher = {arXiv}, - year = {2023}, - copyright = {Creative Commons Attribution 4.0 International} -} - -``` - -
- - - -
-RTMDet (arXiv'2022) - -```bibtex -@misc{lyu2022rtmdet, - title={RTMDet: An Empirical Study of Designing Real-Time Object Detectors}, - author={Chengqi Lyu and Wenwei Zhang and Haian Huang and Yue Zhou and Yudong Wang and Yanyi Liu and Shilong Zhang and Kai Chen}, - year={2022}, - eprint={2212.07784}, - archivePrefix={arXiv}, - primaryClass={cs.CV} -} -``` - -
- - - -
-COCO (ECCV'2014) - -```bibtex -@inproceedings{lin2014microsoft, - title={Microsoft coco: Common objects in context}, - author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, - booktitle={European conference on computer vision}, - pages={740--755}, - year={2014}, - organization={Springer} -} -``` - -
- -- `Hand5` and `*` denote model trained on 5 public datasets: - - [COCO-Wholebody-Hand](https://github.com/jin-s13/COCO-WholeBody/) - - [OneHand10K](https://www.yangangwang.com/papers/WANG-MCC-2018-10.html) - - [FreiHand2d](https://lmb.informatik.uni-freiburg.de/projects/freihand/) - - [RHD2d](https://lmb.informatik.uni-freiburg.de/resources/datasets/RenderedHandposeDataset.en.html) - - [Halpe](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_wholebody_keypoint.html#halpe) - -| Config | Input Size | PCK@0.2
(COCO-Wholebody-Hand) | PCK@0.2
(Hand5) | AUC
(Hand5) | EPE
(Hand5) | FLOPS(G) | Download | -| :---------------------------------------: | :--------: | :-----------------------------------: | :---------------------: | :-----------------: | :-----------------: | :------: | :-----------------------------------------: | -| [RTMPose-m\*
(alpha version)](./rtmpose/hand_2d_keypoint/rtmpose-m_8xb32-210e_coco-wholebody-hand-256x256.py) | 256x256 | 81.5 | 96.4 | 83.9 | 5.06 | 2.581 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-hand5_pt-aic-coco_210e-256x256-74fb594_20230320.pth) | + + +
+RTMPose (arXiv'2023) + +```bibtex +@misc{https://doi.org/10.48550/arxiv.2303.07399, + doi = {10.48550/ARXIV.2303.07399}, + url = {https://arxiv.org/abs/2303.07399}, + author = {Jiang, Tao and Lu, Peng and Zhang, Li and Ma, Ningsheng and Han, Rui and Lyu, Chengqi and Li, Yining and Chen, Kai}, + keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences}, + title = {RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose}, + publisher = {arXiv}, + year = {2023}, + copyright = {Creative Commons Attribution 4.0 International} +} + +``` + +
+ + + +
+RTMDet (arXiv'2022) + +```bibtex +@misc{lyu2022rtmdet, + title={RTMDet: An Empirical Study of Designing Real-Time Object Detectors}, + author={Chengqi Lyu and Wenwei Zhang and Haian Huang and Yue Zhou and Yudong Wang and Yanyi Liu and Shilong Zhang and Kai Chen}, + year={2022}, + eprint={2212.07784}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +
+ + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +- `Hand5` and `*` denote model trained on 5 public datasets: + - [COCO-Wholebody-Hand](https://github.com/jin-s13/COCO-WholeBody/) + - [OneHand10K](https://www.yangangwang.com/papers/WANG-MCC-2018-10.html) + - [FreiHand2d](https://lmb.informatik.uni-freiburg.de/projects/freihand/) + - [RHD2d](https://lmb.informatik.uni-freiburg.de/resources/datasets/RenderedHandposeDataset.en.html) + - [Halpe](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_wholebody_keypoint.html#halpe) + +| Config | Input Size | PCK@0.2
(COCO-Wholebody-Hand) | PCK@0.2
(Hand5) | AUC
(Hand5) | EPE
(Hand5) | FLOPS(G) | Download | +| :---------------------------------------: | :--------: | :-----------------------------------: | :---------------------: | :-----------------: | :-----------------: | :------: | :-----------------------------------------: | +| [RTMPose-m\*
(alpha version)](./rtmpose/hand_2d_keypoint/rtmpose-m_8xb32-210e_coco-wholebody-hand-256x256.py) | 256x256 | 81.5 | 96.4 | 83.9 | 5.06 | 2.581 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-hand5_pt-aic-coco_210e-256x256-74fb594_20230320.pth) | diff --git a/configs/hand_2d_keypoint/rtmpose/hand5/rtmpose_hand5.yml b/configs/hand_2d_keypoint/rtmpose/hand5/rtmpose_hand5.yml index a8dfd42e39..6c570849b9 100644 --- a/configs/hand_2d_keypoint/rtmpose/hand5/rtmpose_hand5.yml +++ b/configs/hand_2d_keypoint/rtmpose/hand5/rtmpose_hand5.yml @@ -1,27 +1,27 @@ -Collections: -- Name: RTMPose - Paper: - Title: "RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose" - URL: https://arxiv.org/abs/2303.07399 - README: https://github.com/open-mmlab/mmpose/blob/main/projects/rtmpose/README.md -Models: -- Config: configs/hand_2d_keypoint/rtmpose/hand5/rtmpose-m_8xb256-210e_hand5-256x256.py - In Collection: RTMPose - Metadata: - Architecture: &id001 - - RTMPose - Training Data: &id002 - - COCO-Wholebody-Hand - - OneHand10K - - FreiHand2d - - RHD2d - - Halpe - Name: rtmpose-m_8xb256-210e_hand5-256x256 - Results: - - Dataset: Hand5 - Metrics: - PCK@0.2: 0.964 - AUC: 0.839 - EPE: 5.06 - Task: Hand 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-hand5_pt-aic-coco_210e-256x256-74fb594_20230320.pth +Collections: +- Name: RTMPose + Paper: + Title: "RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose" + URL: https://arxiv.org/abs/2303.07399 + README: https://github.com/open-mmlab/mmpose/blob/main/projects/rtmpose/README.md +Models: +- Config: configs/hand_2d_keypoint/rtmpose/hand5/rtmpose-m_8xb256-210e_hand5-256x256.py + In Collection: RTMPose + Metadata: + Architecture: &id001 + - RTMPose + Training Data: &id002 + - COCO-Wholebody-Hand + - OneHand10K + - FreiHand2d + - RHD2d + - Halpe + Name: rtmpose-m_8xb256-210e_hand5-256x256 + Results: + - Dataset: Hand5 + Metrics: + PCK@0.2: 0.964 + AUC: 0.839 + EPE: 5.06 + Task: Hand 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-hand5_pt-aic-coco_210e-256x256-74fb594_20230320.pth diff --git a/configs/hand_2d_keypoint/topdown_heatmap/README.md b/configs/hand_2d_keypoint/topdown_heatmap/README.md index 7f63f1f825..969482a98f 100644 --- a/configs/hand_2d_keypoint/topdown_heatmap/README.md +++ b/configs/hand_2d_keypoint/topdown_heatmap/README.md @@ -1,55 +1,55 @@ -# Top-down heatmap-based pose estimation - -Top-down methods divide the task into two stages: object detection, followed by single-object pose estimation given object bounding boxes. Instead of estimating keypoint coordinates directly, the pose estimator will produce heatmaps which represent the likelihood of being a keypoint, following the paradigm introduced in [Simple Baselines for Human Pose Estimation and Tracking](http://openaccess.thecvf.com/content_ECCV_2018/html/Bin_Xiao_Simple_Baselines_for_ECCV_2018_paper.html). - -
- -
- -## Results and Models - -### COCO-WholeBody-Hand Dataset - -Results on COCO-WholeBody-Hand val set - -| Model | Input Size | PCK@0.2 | AUC | EPE | Details and Download | -| :--------------: | :--------: | :-----: | :---: | :--: | :----------------------------------------------------------------------------------------------: | -| HRNetv2-w18+Dark | 256x256 | 0.814 | 0.840 | 4.37 | [hrnetv2_dark_coco_wholebody_hand.md](./coco_wholebody_hand/hrnetv2_dark_coco_wholebody_hand.md) | -| HRNetv2-w18 | 256x256 | 0.813 | 0.840 | 4.39 | [hrnetv2_coco_wholebody_hand.md](./coco_wholebody_hand/hrnetv2_coco_wholebody_hand.md) | -| HourglassNet | 256x256 | 0.804 | 0.835 | 4.54 | [hourglass_coco_wholebody_hand.md](./coco_wholebody_hand/hourglass_coco_wholebody_hand.md) | -| SCNet-50 | 256x256 | 0.803 | 0.834 | 4.55 | [scnet_coco_wholebody_hand.md](./coco_wholebody_hand/scnet_coco_wholebody_hand.md) | -| ResNet-50 | 256x256 | 0.800 | 0.833 | 4.64 | [resnet_coco_wholebody_hand.md](./coco_wholebody_hand/resnet_coco_wholebody_hand.md) | -| LiteHRNet-18 | 256x256 | 0.795 | 0.830 | 4.77 | [litehrnet_coco_wholebody_hand.md](./coco_wholebody_hand/litehrnet_coco_wholebody_hand.md) | -| MobileNet-v2 | 256x256 | 0.795 | 0.829 | 4.77 | [mobilenetv2_coco_wholebody_hand.md](./coco_wholebody_hand/mobilenetv2_coco_wholebody_hand.md) | - -### FreiHand Dataset - -Results on FreiHand val & test set - -| Model | Input Size | PCK@0.2 | AUC | EPE | Details and Download | -| :-------: | :--------: | :-----: | :---: | :--: | :-------------------------------------------------------: | -| ResNet-50 | 224x224 | 0.999 | 0.868 | 3.27 | [resnet_freihand2d.md](./freihand2d/resnet_freihand2d.md) | - -### OneHand10K Dataset - -Results on OneHand10K val set - -| Model | Input Size | PCK@0.2 | AUC | EPE | Details and Download | -| :--------------: | :--------: | :-----: | :---: | :---: | :-------------------------------------------------------------------: | -| HRNetv2-w18+Dark | 256x256 | 0.990 | 0.572 | 23.96 | [hrnetv2_dark_onehand10k.md](./onehand10k/hrnetv2_dark_onehand10k.md) | -| HRNetv2-w18+UDP | 256x256 | 0.990 | 0.571 | 23.88 | [hrnetv2_udp_onehand10k.md](./onehand10k/hrnetv2_udp_onehand10k.md) | -| HRNetv2-w18 | 256x256 | 0.990 | 0.567 | 24.26 | [hrnetv2_onehand10k.md](./onehand10k/hrnetv2_onehand10k.md) | -| ResNet-50 | 256x256 | 0.989 | 0.555 | 25.16 | [resnet_onehand10k.md](./onehand10k/resnet_onehand10k.md) | -| MobileNet-v2 | 256x256 | 0.986 | 0.537 | 28.56 | [mobilenetv2_onehand10k.md](./onehand10k/mobilenetv2_onehand10k.md) | - -### RHD Dataset - -Results on RHD test set - -| Model | Input Size | PCK@0.2 | AUC | EPE | Details and Download | -| :--------------: | :--------: | :-----: | :---: | :--: | :----------------------------------------------------: | -| HRNetv2-w18+Dark | 256x256 | 0.992 | 0.903 | 2.18 | [hrnetv2_dark_rhd2d.md](./rhd2d/hrnetv2_dark_rhd2d.md) | -| HRNetv2-w18+UDP | 256x256 | 0.992 | 0.902 | 2.19 | [hrnetv2_udp_rhd2d.md](./rhd2d/hrnetv2_udp_rhd2d.md) | -| HRNetv2-w18 | 256x256 | 0.992 | 0.902 | 2.21 | [hrnetv2_rhd2d.md](./rhd2d/hrnetv2_rhd2d.md) | -| ResNet-50 | 256x256 | 0.991 | 0.898 | 2.32 | [resnet_rhd2d.md](./rhd2d/resnet_rhd2d.md) | -| MobileNet-v2 | 256x256 | 0.985 | 0.883 | 2.79 | [mobilenetv2_rhd2d.md](./rhd2d/mobilenetv2_rhd2d.md) | +# Top-down heatmap-based pose estimation + +Top-down methods divide the task into two stages: object detection, followed by single-object pose estimation given object bounding boxes. Instead of estimating keypoint coordinates directly, the pose estimator will produce heatmaps which represent the likelihood of being a keypoint, following the paradigm introduced in [Simple Baselines for Human Pose Estimation and Tracking](http://openaccess.thecvf.com/content_ECCV_2018/html/Bin_Xiao_Simple_Baselines_for_ECCV_2018_paper.html). + +
+ +
+ +## Results and Models + +### COCO-WholeBody-Hand Dataset + +Results on COCO-WholeBody-Hand val set + +| Model | Input Size | PCK@0.2 | AUC | EPE | Details and Download | +| :--------------: | :--------: | :-----: | :---: | :--: | :----------------------------------------------------------------------------------------------: | +| HRNetv2-w18+Dark | 256x256 | 0.814 | 0.840 | 4.37 | [hrnetv2_dark_coco_wholebody_hand.md](./coco_wholebody_hand/hrnetv2_dark_coco_wholebody_hand.md) | +| HRNetv2-w18 | 256x256 | 0.813 | 0.840 | 4.39 | [hrnetv2_coco_wholebody_hand.md](./coco_wholebody_hand/hrnetv2_coco_wholebody_hand.md) | +| HourglassNet | 256x256 | 0.804 | 0.835 | 4.54 | [hourglass_coco_wholebody_hand.md](./coco_wholebody_hand/hourglass_coco_wholebody_hand.md) | +| SCNet-50 | 256x256 | 0.803 | 0.834 | 4.55 | [scnet_coco_wholebody_hand.md](./coco_wholebody_hand/scnet_coco_wholebody_hand.md) | +| ResNet-50 | 256x256 | 0.800 | 0.833 | 4.64 | [resnet_coco_wholebody_hand.md](./coco_wholebody_hand/resnet_coco_wholebody_hand.md) | +| LiteHRNet-18 | 256x256 | 0.795 | 0.830 | 4.77 | [litehrnet_coco_wholebody_hand.md](./coco_wholebody_hand/litehrnet_coco_wholebody_hand.md) | +| MobileNet-v2 | 256x256 | 0.795 | 0.829 | 4.77 | [mobilenetv2_coco_wholebody_hand.md](./coco_wholebody_hand/mobilenetv2_coco_wholebody_hand.md) | + +### FreiHand Dataset + +Results on FreiHand val & test set + +| Model | Input Size | PCK@0.2 | AUC | EPE | Details and Download | +| :-------: | :--------: | :-----: | :---: | :--: | :-------------------------------------------------------: | +| ResNet-50 | 224x224 | 0.999 | 0.868 | 3.27 | [resnet_freihand2d.md](./freihand2d/resnet_freihand2d.md) | + +### OneHand10K Dataset + +Results on OneHand10K val set + +| Model | Input Size | PCK@0.2 | AUC | EPE | Details and Download | +| :--------------: | :--------: | :-----: | :---: | :---: | :-------------------------------------------------------------------: | +| HRNetv2-w18+Dark | 256x256 | 0.990 | 0.572 | 23.96 | [hrnetv2_dark_onehand10k.md](./onehand10k/hrnetv2_dark_onehand10k.md) | +| HRNetv2-w18+UDP | 256x256 | 0.990 | 0.571 | 23.88 | [hrnetv2_udp_onehand10k.md](./onehand10k/hrnetv2_udp_onehand10k.md) | +| HRNetv2-w18 | 256x256 | 0.990 | 0.567 | 24.26 | [hrnetv2_onehand10k.md](./onehand10k/hrnetv2_onehand10k.md) | +| ResNet-50 | 256x256 | 0.989 | 0.555 | 25.16 | [resnet_onehand10k.md](./onehand10k/resnet_onehand10k.md) | +| MobileNet-v2 | 256x256 | 0.986 | 0.537 | 28.56 | [mobilenetv2_onehand10k.md](./onehand10k/mobilenetv2_onehand10k.md) | + +### RHD Dataset + +Results on RHD test set + +| Model | Input Size | PCK@0.2 | AUC | EPE | Details and Download | +| :--------------: | :--------: | :-----: | :---: | :--: | :----------------------------------------------------: | +| HRNetv2-w18+Dark | 256x256 | 0.992 | 0.903 | 2.18 | [hrnetv2_dark_rhd2d.md](./rhd2d/hrnetv2_dark_rhd2d.md) | +| HRNetv2-w18+UDP | 256x256 | 0.992 | 0.902 | 2.19 | [hrnetv2_udp_rhd2d.md](./rhd2d/hrnetv2_udp_rhd2d.md) | +| HRNetv2-w18 | 256x256 | 0.992 | 0.902 | 2.21 | [hrnetv2_rhd2d.md](./rhd2d/hrnetv2_rhd2d.md) | +| ResNet-50 | 256x256 | 0.991 | 0.898 | 2.32 | [resnet_rhd2d.md](./rhd2d/resnet_rhd2d.md) | +| MobileNet-v2 | 256x256 | 0.985 | 0.883 | 2.79 | [mobilenetv2_rhd2d.md](./rhd2d/mobilenetv2_rhd2d.md) | diff --git a/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/hourglass_coco_wholebody_hand.md b/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/hourglass_coco_wholebody_hand.md index 4728baaba2..2926593dec 100644 --- a/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/hourglass_coco_wholebody_hand.md +++ b/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/hourglass_coco_wholebody_hand.md @@ -1,39 +1,39 @@ - - -
-Hourglass (ECCV'2016) - -```bibtex -@inproceedings{newell2016stacked, - title={Stacked hourglass networks for human pose estimation}, - author={Newell, Alejandro and Yang, Kaiyu and Deng, Jia}, - booktitle={European conference on computer vision}, - pages={483--499}, - year={2016}, - organization={Springer} -} -``` - -
- - - -
-COCO-WholeBody-Hand (ECCV'2020) - -```bibtex -@inproceedings{jin2020whole, - title={Whole-Body Human Pose Estimation in the Wild}, - author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, - booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, - year={2020} -} -``` - -
- -Results on COCO-WholeBody-Hand val set - -| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | -| :--------------------------------------------------------- | :--------: | :-----: | :---: | :--: | :--------------------------------------------------------: | :--------------------------------------------------------: | -| [pose_hourglass_52](/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_hourglass52_8xb32-210e_coco-wholebody-hand-256x256.py) | 256x256 | 0.804 | 0.835 | 4.54 | [ckpt](https://download.openmmlab.com/mmpose/hand/hourglass/hourglass52_coco_wholebody_hand_256x256-7b05c6db_20210909.pth) | [log](https://download.openmmlab.com/mmpose/hand/hourglass/hourglass52_coco_wholebody_hand_256x256_20210909.log.json) | + + +
+Hourglass (ECCV'2016) + +```bibtex +@inproceedings{newell2016stacked, + title={Stacked hourglass networks for human pose estimation}, + author={Newell, Alejandro and Yang, Kaiyu and Deng, Jia}, + booktitle={European conference on computer vision}, + pages={483--499}, + year={2016}, + organization={Springer} +} +``` + +
+ + + +
+COCO-WholeBody-Hand (ECCV'2020) + +```bibtex +@inproceedings{jin2020whole, + title={Whole-Body Human Pose Estimation in the Wild}, + author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2020} +} +``` + +
+ +Results on COCO-WholeBody-Hand val set + +| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | +| :--------------------------------------------------------- | :--------: | :-----: | :---: | :--: | :--------------------------------------------------------: | :--------------------------------------------------------: | +| [pose_hourglass_52](/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_hourglass52_8xb32-210e_coco-wholebody-hand-256x256.py) | 256x256 | 0.804 | 0.835 | 4.54 | [ckpt](https://download.openmmlab.com/mmpose/hand/hourglass/hourglass52_coco_wholebody_hand_256x256-7b05c6db_20210909.pth) | [log](https://download.openmmlab.com/mmpose/hand/hourglass/hourglass52_coco_wholebody_hand_256x256_20210909.log.json) | diff --git a/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/hourglass_coco_wholebody_hand.yml b/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/hourglass_coco_wholebody_hand.yml index f6247504e2..21ff3f0122 100644 --- a/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/hourglass_coco_wholebody_hand.yml +++ b/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/hourglass_coco_wholebody_hand.yml @@ -1,16 +1,16 @@ -Models: -- Config: configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_hourglass52_8xb32-210e_coco-wholebody-hand-256x256.py - In Collection: Hourglass - Metadata: - Architecture: - - Hourglass - Training Data: COCO-WholeBody-Hand - Name: td-hm_hourglass52_8xb32-210e_coco-wholebody-hand-256x256 - Results: - - Dataset: COCO-WholeBody-Hand - Metrics: - AUC: 0.835 - EPE: 4.54 - PCK@0.2: 0.804 - Task: Hand 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/hand/hourglass/hourglass52_coco_wholebody_hand_256x256-7b05c6db_20210909.pth +Models: +- Config: configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_hourglass52_8xb32-210e_coco-wholebody-hand-256x256.py + In Collection: Hourglass + Metadata: + Architecture: + - Hourglass + Training Data: COCO-WholeBody-Hand + Name: td-hm_hourglass52_8xb32-210e_coco-wholebody-hand-256x256 + Results: + - Dataset: COCO-WholeBody-Hand + Metrics: + AUC: 0.835 + EPE: 4.54 + PCK@0.2: 0.804 + Task: Hand 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/hand/hourglass/hourglass52_coco_wholebody_hand_256x256-7b05c6db_20210909.pth diff --git a/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/hrnetv2_coco_wholebody_hand.md b/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/hrnetv2_coco_wholebody_hand.md index d944ff43a2..eae4dce1fd 100644 --- a/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/hrnetv2_coco_wholebody_hand.md +++ b/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/hrnetv2_coco_wholebody_hand.md @@ -1,39 +1,39 @@ - - -
-HRNetv2 (TPAMI'2019) - -```bibtex -@article{WangSCJDZLMTWLX19, - title={Deep High-Resolution Representation Learning for Visual Recognition}, - author={Jingdong Wang and Ke Sun and Tianheng Cheng and - Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and - Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, - journal={TPAMI}, - year={2019} -} -``` - -
- - - -
-COCO-WholeBody-Hand (ECCV'2020) - -```bibtex -@inproceedings{jin2020whole, - title={Whole-Body Human Pose Estimation in the Wild}, - author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, - booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, - year={2020} -} -``` - -
- -Results on COCO-WholeBody-Hand val set - -| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | -| :--------------------------------------------------------- | :--------: | :-----: | :---: | :--: | :--------------------------------------------------------: | :--------------------------------------------------------: | -| [pose_hrnetv2_w18](/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_hrnetv2-w18_8xb32-210e_coco-wholebody-hand-256x256.py) | 256x256 | 0.813 | 0.840 | 4.39 | [ckpt](https://download.openmmlab.com/mmpose/hand/hrnetv2/hrnetv2_w18_coco_wholebody_hand_256x256-1c028db7_20210908.pth) | [log](https://download.openmmlab.com/mmpose/hand/hrnetv2/hrnetv2_w18_coco_wholebody_hand_256x256_20210908.log.json) | + + +
+HRNetv2 (TPAMI'2019) + +```bibtex +@article{WangSCJDZLMTWLX19, + title={Deep High-Resolution Representation Learning for Visual Recognition}, + author={Jingdong Wang and Ke Sun and Tianheng Cheng and + Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and + Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, + journal={TPAMI}, + year={2019} +} +``` + +
+ + + +
+COCO-WholeBody-Hand (ECCV'2020) + +```bibtex +@inproceedings{jin2020whole, + title={Whole-Body Human Pose Estimation in the Wild}, + author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2020} +} +``` + +
+ +Results on COCO-WholeBody-Hand val set + +| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | +| :--------------------------------------------------------- | :--------: | :-----: | :---: | :--: | :--------------------------------------------------------: | :--------------------------------------------------------: | +| [pose_hrnetv2_w18](/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_hrnetv2-w18_8xb32-210e_coco-wholebody-hand-256x256.py) | 256x256 | 0.813 | 0.840 | 4.39 | [ckpt](https://download.openmmlab.com/mmpose/hand/hrnetv2/hrnetv2_w18_coco_wholebody_hand_256x256-1c028db7_20210908.pth) | [log](https://download.openmmlab.com/mmpose/hand/hrnetv2/hrnetv2_w18_coco_wholebody_hand_256x256_20210908.log.json) | diff --git a/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/hrnetv2_coco_wholebody_hand.yml b/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/hrnetv2_coco_wholebody_hand.yml index f6c0046f66..0190ac9a50 100644 --- a/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/hrnetv2_coco_wholebody_hand.yml +++ b/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/hrnetv2_coco_wholebody_hand.yml @@ -1,16 +1,16 @@ -Models: -- Config: configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_hrnetv2-w18_8xb32-210e_coco-wholebody-hand-256x256.py - In Collection: HRNetv2 - Metadata: - Architecture: - - HRNetv2 - Training Data: COCO-WholeBody-Hand - Name: td-hm_hrnetv2-w18_8xb32-210e_coco-wholebody-hand-256x256 - Results: - - Dataset: COCO-WholeBody-Hand - Metrics: - AUC: 0.84 - EPE: 4.39 - PCK@0.2: 0.813 - Task: Hand 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/hand/hrnetv2/hrnetv2_w18_coco_wholebody_hand_256x256-1c028db7_20210908.pth +Models: +- Config: configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_hrnetv2-w18_8xb32-210e_coco-wholebody-hand-256x256.py + In Collection: HRNetv2 + Metadata: + Architecture: + - HRNetv2 + Training Data: COCO-WholeBody-Hand + Name: td-hm_hrnetv2-w18_8xb32-210e_coco-wholebody-hand-256x256 + Results: + - Dataset: COCO-WholeBody-Hand + Metrics: + AUC: 0.84 + EPE: 4.39 + PCK@0.2: 0.813 + Task: Hand 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/hand/hrnetv2/hrnetv2_w18_coco_wholebody_hand_256x256-1c028db7_20210908.pth diff --git a/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/hrnetv2_dark_coco_wholebody_hand.md b/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/hrnetv2_dark_coco_wholebody_hand.md index 7389636186..718baa78eb 100644 --- a/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/hrnetv2_dark_coco_wholebody_hand.md +++ b/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/hrnetv2_dark_coco_wholebody_hand.md @@ -1,56 +1,56 @@ - - -
-HRNetv2 (TPAMI'2019) - -```bibtex -@article{WangSCJDZLMTWLX19, - title={Deep High-Resolution Representation Learning for Visual Recognition}, - author={Jingdong Wang and Ke Sun and Tianheng Cheng and - Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and - Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, - journal={TPAMI}, - year={2019} -} -``` - -
- - - -
-DarkPose (CVPR'2020) - -```bibtex -@inproceedings{zhang2020distribution, - title={Distribution-aware coordinate representation for human pose estimation}, - author={Zhang, Feng and Zhu, Xiatian and Dai, Hanbin and Ye, Mao and Zhu, Ce}, - booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, - pages={7093--7102}, - year={2020} -} -``` - -
- - - -
-COCO-WholeBody-Hand (ECCV'2020) - -```bibtex -@inproceedings{jin2020whole, - title={Whole-Body Human Pose Estimation in the Wild}, - author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, - booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, - year={2020} -} -``` - -
- -Results on COCO-WholeBody-Hand val set - -| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | -| :--------------------------------------------------------- | :--------: | :-----: | :---: | :--: | :--------------------------------------------------------: | :--------------------------------------------------------: | -| [pose_hrnetv2_w18_dark](/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_hrnetv2-w18_dark-8xb32-210e_coco-wholebody-hand-256x256.py) | 256x256 | 0.814 | 0.840 | 4.37 | [ckpt](https://download.openmmlab.com/mmpose/hand/dark/hrnetv2_w18_coco_wholebody_hand_256x256_dark-a9228c9c_20210908.pth) | [log](https://download.openmmlab.com/mmpose/hand/dark/hrnetv2_w18_coco_wholebody_hand_256x256_dark_20210908.log.json) | + + +
+HRNetv2 (TPAMI'2019) + +```bibtex +@article{WangSCJDZLMTWLX19, + title={Deep High-Resolution Representation Learning for Visual Recognition}, + author={Jingdong Wang and Ke Sun and Tianheng Cheng and + Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and + Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, + journal={TPAMI}, + year={2019} +} +``` + +
+ + + +
+DarkPose (CVPR'2020) + +```bibtex +@inproceedings{zhang2020distribution, + title={Distribution-aware coordinate representation for human pose estimation}, + author={Zhang, Feng and Zhu, Xiatian and Dai, Hanbin and Ye, Mao and Zhu, Ce}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={7093--7102}, + year={2020} +} +``` + +
+ + + +
+COCO-WholeBody-Hand (ECCV'2020) + +```bibtex +@inproceedings{jin2020whole, + title={Whole-Body Human Pose Estimation in the Wild}, + author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2020} +} +``` + +
+ +Results on COCO-WholeBody-Hand val set + +| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | +| :--------------------------------------------------------- | :--------: | :-----: | :---: | :--: | :--------------------------------------------------------: | :--------------------------------------------------------: | +| [pose_hrnetv2_w18_dark](/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_hrnetv2-w18_dark-8xb32-210e_coco-wholebody-hand-256x256.py) | 256x256 | 0.814 | 0.840 | 4.37 | [ckpt](https://download.openmmlab.com/mmpose/hand/dark/hrnetv2_w18_coco_wholebody_hand_256x256_dark-a9228c9c_20210908.pth) | [log](https://download.openmmlab.com/mmpose/hand/dark/hrnetv2_w18_coco_wholebody_hand_256x256_dark_20210908.log.json) | diff --git a/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/hrnetv2_dark_coco_wholebody_hand.yml b/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/hrnetv2_dark_coco_wholebody_hand.yml index af1d607d10..f5b275ae0c 100644 --- a/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/hrnetv2_dark_coco_wholebody_hand.yml +++ b/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/hrnetv2_dark_coco_wholebody_hand.yml @@ -1,17 +1,17 @@ -Models: -- Config: configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_hrnetv2-w18_dark-8xb32-210e_coco-wholebody-hand-256x256.py - In Collection: DarkPose - Metadata: - Architecture: - - HRNetv2 - - DarkPose - Training Data: COCO-WholeBody-Hand - Name: td-hm_hrnetv2-w18_dark-8xb32-210e_coco-wholebody-hand-256x256 - Results: - - Dataset: COCO-WholeBody-Hand - Metrics: - AUC: 0.84 - EPE: 4.37 - PCK@0.2: 0.814 - Task: Hand 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/hand/dark/hrnetv2_w18_coco_wholebody_hand_256x256_dark-a9228c9c_20210908.pth +Models: +- Config: configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_hrnetv2-w18_dark-8xb32-210e_coco-wholebody-hand-256x256.py + In Collection: DarkPose + Metadata: + Architecture: + - HRNetv2 + - DarkPose + Training Data: COCO-WholeBody-Hand + Name: td-hm_hrnetv2-w18_dark-8xb32-210e_coco-wholebody-hand-256x256 + Results: + - Dataset: COCO-WholeBody-Hand + Metrics: + AUC: 0.84 + EPE: 4.37 + PCK@0.2: 0.814 + Task: Hand 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/hand/dark/hrnetv2_w18_coco_wholebody_hand_256x256_dark-a9228c9c_20210908.pth diff --git a/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/litehrnet_coco_wholebody_hand.md b/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/litehrnet_coco_wholebody_hand.md index 7c084b79e1..1508d8698c 100644 --- a/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/litehrnet_coco_wholebody_hand.md +++ b/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/litehrnet_coco_wholebody_hand.md @@ -1,37 +1,37 @@ - - -
-LiteHRNet (CVPR'2021) - -```bibtex -@inproceedings{Yulitehrnet21, - title={Lite-HRNet: A Lightweight High-Resolution Network}, - author={Yu, Changqian and Xiao, Bin and Gao, Changxin and Yuan, Lu and Zhang, Lei and Sang, Nong and Wang, Jingdong}, - booktitle={CVPR}, - year={2021} -} -``` - -
- - - -
-COCO-WholeBody-Hand (ECCV'2020) - -```bibtex -@inproceedings{jin2020whole, - title={Whole-Body Human Pose Estimation in the Wild}, - author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, - booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, - year={2020} -} -``` - -
- -Results on COCO-WholeBody-Hand val set - -| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | -| :--------------------------------------------------------- | :--------: | :-----: | :---: | :--: | :--------------------------------------------------------: | :--------------------------------------------------------: | -| [LiteHRNet-18](/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_litehrnet-w18_8xb32-210e_coco-wholebody-hand-256x256.py) | 256x256 | 0.795 | 0.830 | 4.77 | [ckpt](https://download.openmmlab.com/mmpose/hand/litehrnet/litehrnet_w18_coco_wholebody_hand_256x256-d6945e6a_20210908.pth) | [log](https://download.openmmlab.com/mmpose/hand/litehrnet/litehrnet_w18_coco_wholebody_hand_256x256_20210908.log.json) | + + +
+LiteHRNet (CVPR'2021) + +```bibtex +@inproceedings{Yulitehrnet21, + title={Lite-HRNet: A Lightweight High-Resolution Network}, + author={Yu, Changqian and Xiao, Bin and Gao, Changxin and Yuan, Lu and Zhang, Lei and Sang, Nong and Wang, Jingdong}, + booktitle={CVPR}, + year={2021} +} +``` + +
+ + + +
+COCO-WholeBody-Hand (ECCV'2020) + +```bibtex +@inproceedings{jin2020whole, + title={Whole-Body Human Pose Estimation in the Wild}, + author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2020} +} +``` + +
+ +Results on COCO-WholeBody-Hand val set + +| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | +| :--------------------------------------------------------- | :--------: | :-----: | :---: | :--: | :--------------------------------------------------------: | :--------------------------------------------------------: | +| [LiteHRNet-18](/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_litehrnet-w18_8xb32-210e_coco-wholebody-hand-256x256.py) | 256x256 | 0.795 | 0.830 | 4.77 | [ckpt](https://download.openmmlab.com/mmpose/hand/litehrnet/litehrnet_w18_coco_wholebody_hand_256x256-d6945e6a_20210908.pth) | [log](https://download.openmmlab.com/mmpose/hand/litehrnet/litehrnet_w18_coco_wholebody_hand_256x256_20210908.log.json) | diff --git a/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/litehrnet_coco_wholebody_hand.yml b/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/litehrnet_coco_wholebody_hand.yml index eeecbfe7e2..66c5713872 100644 --- a/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/litehrnet_coco_wholebody_hand.yml +++ b/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/litehrnet_coco_wholebody_hand.yml @@ -1,16 +1,16 @@ -Models: -- Config: configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_litehrnet-w18_8xb32-210e_coco-wholebody-hand-256x256.py - In Collection: LiteHRNet - Metadata: - Architecture: - - LiteHRNet - Training Data: COCO-WholeBody-Hand - Name: td-hm_litehrnet-w18_8xb32-210e_coco-wholebody-hand-256x256 - Results: - - Dataset: COCO-WholeBody-Hand - Metrics: - AUC: 0.83 - EPE: 4.77 - PCK@0.2: 0.795 - Task: Hand 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/hand/litehrnet/litehrnet_w18_coco_wholebody_hand_256x256-d6945e6a_20210908.pth +Models: +- Config: configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_litehrnet-w18_8xb32-210e_coco-wholebody-hand-256x256.py + In Collection: LiteHRNet + Metadata: + Architecture: + - LiteHRNet + Training Data: COCO-WholeBody-Hand + Name: td-hm_litehrnet-w18_8xb32-210e_coco-wholebody-hand-256x256 + Results: + - Dataset: COCO-WholeBody-Hand + Metrics: + AUC: 0.83 + EPE: 4.77 + PCK@0.2: 0.795 + Task: Hand 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/hand/litehrnet/litehrnet_w18_coco_wholebody_hand_256x256-d6945e6a_20210908.pth diff --git a/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/mobilenetv2_coco_wholebody_hand.md b/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/mobilenetv2_coco_wholebody_hand.md index cc76358a8f..6b65bd0f77 100644 --- a/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/mobilenetv2_coco_wholebody_hand.md +++ b/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/mobilenetv2_coco_wholebody_hand.md @@ -1,38 +1,38 @@ - - -
-MobilenetV2 (CVPR'2018) - -```bibtex -@inproceedings{sandler2018mobilenetv2, - title={Mobilenetv2: Inverted residuals and linear bottlenecks}, - author={Sandler, Mark and Howard, Andrew and Zhu, Menglong and Zhmoginov, Andrey and Chen, Liang-Chieh}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={4510--4520}, - year={2018} -} -``` - -
- - - -
-COCO-WholeBody-Hand (ECCV'2020) - -```bibtex -@inproceedings{jin2020whole, - title={Whole-Body Human Pose Estimation in the Wild}, - author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, - booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, - year={2020} -} -``` - -
- -Results on COCO-WholeBody-Hand val set - -| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | -| :--------------------------------------------------------: | :--------: | :-----: | :---: | :--: | :--------------------------------------------------------: | :--------------------------------------------------------: | -| [pose_mobilenetv2](/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_mobilenetv2_8xb32-210e_coco-wholebody-hand-256x256.py) | 256x256 | 0.795 | 0.829 | 4.77 | [ckpt](https://download.openmmlab.com/mmpose/hand/mobilenetv2/mobilenetv2_coco_wholebody_hand_256x256-06b8c877_20210909.pth) | [log](https://download.openmmlab.com/mmpose/hand/mobilenetv2/mobilenetv2_coco_wholebody_hand_256x256_20210909.log.json) | + + +
+MobilenetV2 (CVPR'2018) + +```bibtex +@inproceedings{sandler2018mobilenetv2, + title={Mobilenetv2: Inverted residuals and linear bottlenecks}, + author={Sandler, Mark and Howard, Andrew and Zhu, Menglong and Zhmoginov, Andrey and Chen, Liang-Chieh}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={4510--4520}, + year={2018} +} +``` + +
+ + + +
+COCO-WholeBody-Hand (ECCV'2020) + +```bibtex +@inproceedings{jin2020whole, + title={Whole-Body Human Pose Estimation in the Wild}, + author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2020} +} +``` + +
+ +Results on COCO-WholeBody-Hand val set + +| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | +| :--------------------------------------------------------: | :--------: | :-----: | :---: | :--: | :--------------------------------------------------------: | :--------------------------------------------------------: | +| [pose_mobilenetv2](/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_mobilenetv2_8xb32-210e_coco-wholebody-hand-256x256.py) | 256x256 | 0.795 | 0.829 | 4.77 | [ckpt](https://download.openmmlab.com/mmpose/hand/mobilenetv2/mobilenetv2_coco_wholebody_hand_256x256-06b8c877_20210909.pth) | [log](https://download.openmmlab.com/mmpose/hand/mobilenetv2/mobilenetv2_coco_wholebody_hand_256x256_20210909.log.json) | diff --git a/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/mobilenetv2_coco_wholebody_hand.yml b/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/mobilenetv2_coco_wholebody_hand.yml index a9d0101ce7..cc8c5a26a5 100644 --- a/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/mobilenetv2_coco_wholebody_hand.yml +++ b/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/mobilenetv2_coco_wholebody_hand.yml @@ -1,17 +1,17 @@ -Models: -- Config: configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_mobilenetv2_8xb32-210e_coco-wholebody-hand-256x256.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: - - SimpleBaseline2D - - MobilenetV2 - Training Data: COCO-WholeBody-Hand - Name: td-hm_mobilenetv2_8xb32-210e_coco-wholebody-hand-256x256 - Results: - - Dataset: COCO-WholeBody-Hand - Metrics: - AUC: 0.829 - EPE: 4.77 - PCK@0.2: 0.795 - Task: Hand 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/hand/mobilenetv2/mobilenetv2_coco_wholebody_hand_256x256-06b8c877_20210909.pth +Models: +- Config: configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_mobilenetv2_8xb32-210e_coco-wholebody-hand-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: + - SimpleBaseline2D + - MobilenetV2 + Training Data: COCO-WholeBody-Hand + Name: td-hm_mobilenetv2_8xb32-210e_coco-wholebody-hand-256x256 + Results: + - Dataset: COCO-WholeBody-Hand + Metrics: + AUC: 0.829 + EPE: 4.77 + PCK@0.2: 0.795 + Task: Hand 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/hand/mobilenetv2/mobilenetv2_coco_wholebody_hand_256x256-06b8c877_20210909.pth diff --git a/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/resnet_coco_wholebody_hand.md b/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/resnet_coco_wholebody_hand.md index ae7f287e3d..21693f1bfe 100644 --- a/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/resnet_coco_wholebody_hand.md +++ b/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/resnet_coco_wholebody_hand.md @@ -1,55 +1,55 @@ - - -
-SimpleBaseline2D (ECCV'2018) - -```bibtex -@inproceedings{xiao2018simple, - title={Simple baselines for human pose estimation and tracking}, - author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, - booktitle={Proceedings of the European conference on computer vision (ECCV)}, - pages={466--481}, - year={2018} -} -``` - -
- - - -
-ResNet (CVPR'2016) - -```bibtex -@inproceedings{he2016deep, - title={Deep residual learning for image recognition}, - author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={770--778}, - year={2016} -} -``` - -
- - - -
-COCO-WholeBody-Hand (ECCV'2020) - -```bibtex -@inproceedings{jin2020whole, - title={Whole-Body Human Pose Estimation in the Wild}, - author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, - booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, - year={2020} -} -``` - -
- -Results on COCO-WholeBody-Hand val set - -| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | -| :--------------------------------------------------------: | :--------: | :-----: | :---: | :--: | :--------------------------------------------------------: | :--------------------------------------------------------: | -| [pose_resnet_50](/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_res50_8xb32-210e_coco-wholebody-hand-256x256.py) | 256x256 | 0.800 | 0.833 | 4.64 | [ckpt](https://download.openmmlab.com/mmpose/hand/resnet/res50_coco_wholebody_hand_256x256-8dbc750c_20210908.pth) | [log](https://download.openmmlab.com/mmpose/hand/resnet/res50_coco_wholebody_hand_256x256_20210908.log.json) | + + +
+SimpleBaseline2D (ECCV'2018) + +```bibtex +@inproceedings{xiao2018simple, + title={Simple baselines for human pose estimation and tracking}, + author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, + booktitle={Proceedings of the European conference on computer vision (ECCV)}, + pages={466--481}, + year={2018} +} +``` + +
+ + + +
+ResNet (CVPR'2016) + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +
+ + + +
+COCO-WholeBody-Hand (ECCV'2020) + +```bibtex +@inproceedings{jin2020whole, + title={Whole-Body Human Pose Estimation in the Wild}, + author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2020} +} +``` + +
+ +Results on COCO-WholeBody-Hand val set + +| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | +| :--------------------------------------------------------: | :--------: | :-----: | :---: | :--: | :--------------------------------------------------------: | :--------------------------------------------------------: | +| [pose_resnet_50](/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_res50_8xb32-210e_coco-wholebody-hand-256x256.py) | 256x256 | 0.800 | 0.833 | 4.64 | [ckpt](https://download.openmmlab.com/mmpose/hand/resnet/res50_coco_wholebody_hand_256x256-8dbc750c_20210908.pth) | [log](https://download.openmmlab.com/mmpose/hand/resnet/res50_coco_wholebody_hand_256x256_20210908.log.json) | diff --git a/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/resnet_coco_wholebody_hand.yml b/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/resnet_coco_wholebody_hand.yml index 78d16a6e45..b663c5dd2e 100644 --- a/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/resnet_coco_wholebody_hand.yml +++ b/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/resnet_coco_wholebody_hand.yml @@ -1,17 +1,17 @@ -Models: -- Config: configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_res50_8xb32-210e_coco-wholebody-hand-256x256.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: - - SimpleBaseline2D - - ResNet - Training Data: COCO-WholeBody-Hand - Name: td-hm_res50_8xb32-210e_coco-wholebody-hand-256x256 - Results: - - Dataset: COCO-WholeBody-Hand - Metrics: - AUC: 0.833 - EPE: 4.64 - PCK@0.2: 0.8 - Task: Hand 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/hand/resnet/res50_coco_wholebody_hand_256x256-8dbc750c_20210908.pth +Models: +- Config: configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_res50_8xb32-210e_coco-wholebody-hand-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: + - SimpleBaseline2D + - ResNet + Training Data: COCO-WholeBody-Hand + Name: td-hm_res50_8xb32-210e_coco-wholebody-hand-256x256 + Results: + - Dataset: COCO-WholeBody-Hand + Metrics: + AUC: 0.833 + EPE: 4.64 + PCK@0.2: 0.8 + Task: Hand 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/hand/resnet/res50_coco_wholebody_hand_256x256-8dbc750c_20210908.pth diff --git a/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/scnet_coco_wholebody_hand.md b/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/scnet_coco_wholebody_hand.md index 06c6fda74c..1cf44e2ddc 100644 --- a/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/scnet_coco_wholebody_hand.md +++ b/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/scnet_coco_wholebody_hand.md @@ -1,38 +1,38 @@ - - -
-SCNet (CVPR'2020) - -```bibtex -@inproceedings{liu2020improving, - title={Improving Convolutional Networks with Self-Calibrated Convolutions}, - author={Liu, Jiang-Jiang and Hou, Qibin and Cheng, Ming-Ming and Wang, Changhu and Feng, Jiashi}, - booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, - pages={10096--10105}, - year={2020} -} -``` - -
- - - -
-COCO-WholeBody-Hand (ECCV'2020) - -```bibtex -@inproceedings{jin2020whole, - title={Whole-Body Human Pose Estimation in the Wild}, - author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, - booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, - year={2020} -} -``` - -
- -Results on COCO-WholeBody-Hand val set - -| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | -| :--------------------------------------------------------: | :--------: | :-----: | :---: | :--: | :--------------------------------------------------------: | :--------------------------------------------------------: | -| [pose_scnet_50](/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_scnet50_8xb32-210e_coco-wholebody-hand-256x256.py) | 256x256 | 0.803 | 0.834 | 4.55 | [ckpt](https://download.openmmlab.com/mmpose/hand/scnet/scnet50_coco_wholebody_hand_256x256-e73414c7_20210909.pth) | [log](https://download.openmmlab.com/mmpose/hand/scnet/scnet50_coco_wholebody_hand_256x256_20210909.log.json) | + + +
+SCNet (CVPR'2020) + +```bibtex +@inproceedings{liu2020improving, + title={Improving Convolutional Networks with Self-Calibrated Convolutions}, + author={Liu, Jiang-Jiang and Hou, Qibin and Cheng, Ming-Ming and Wang, Changhu and Feng, Jiashi}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={10096--10105}, + year={2020} +} +``` + +
+ + + +
+COCO-WholeBody-Hand (ECCV'2020) + +```bibtex +@inproceedings{jin2020whole, + title={Whole-Body Human Pose Estimation in the Wild}, + author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2020} +} +``` + +
+ +Results on COCO-WholeBody-Hand val set + +| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | +| :--------------------------------------------------------: | :--------: | :-----: | :---: | :--: | :--------------------------------------------------------: | :--------------------------------------------------------: | +| [pose_scnet_50](/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_scnet50_8xb32-210e_coco-wholebody-hand-256x256.py) | 256x256 | 0.803 | 0.834 | 4.55 | [ckpt](https://download.openmmlab.com/mmpose/hand/scnet/scnet50_coco_wholebody_hand_256x256-e73414c7_20210909.pth) | [log](https://download.openmmlab.com/mmpose/hand/scnet/scnet50_coco_wholebody_hand_256x256_20210909.log.json) | diff --git a/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/scnet_coco_wholebody_hand.yml b/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/scnet_coco_wholebody_hand.yml index a8887b3c8e..0fd05eb85d 100644 --- a/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/scnet_coco_wholebody_hand.yml +++ b/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/scnet_coco_wholebody_hand.yml @@ -1,16 +1,16 @@ -Models: -- Config: configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_scnet50_8xb32-210e_coco-wholebody-hand-256x256.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: - - SCNet - Training Data: COCO-WholeBody-Hand - Name: td-hm_scnet50_8xb32-210e_coco-wholebody-hand-256x256 - Results: - - Dataset: COCO-WholeBody-Hand - Metrics: - AUC: 0.834 - EPE: 4.55 - PCK@0.2: 0.803 - Task: Hand 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/hand/scnet/scnet50_coco_wholebody_hand_256x256-e73414c7_20210909.pth +Models: +- Config: configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_scnet50_8xb32-210e_coco-wholebody-hand-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: + - SCNet + Training Data: COCO-WholeBody-Hand + Name: td-hm_scnet50_8xb32-210e_coco-wholebody-hand-256x256 + Results: + - Dataset: COCO-WholeBody-Hand + Metrics: + AUC: 0.834 + EPE: 4.55 + PCK@0.2: 0.803 + Task: Hand 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/hand/scnet/scnet50_coco_wholebody_hand_256x256-e73414c7_20210909.pth diff --git a/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_hourglass52_8xb32-210e_coco-wholebody-hand-256x256.py b/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_hourglass52_8xb32-210e_coco-wholebody-hand-256x256.py index e0bc1c8739..05b1ad1a6b 100644 --- a/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_hourglass52_8xb32-210e_coco-wholebody-hand-256x256.py +++ b/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_hourglass52_8xb32-210e_coco-wholebody-hand-256x256.py @@ -1,123 +1,123 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=256) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HourglassNet', - num_stacks=1, - ), - head=dict( - type='CPMHead', - in_channels=256, - out_channels=21, - num_stages=1, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoWholeBodyHandDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict( - type='RandomBBoxTransform', - rotate_factor=180.0, - scale_factor=(0.7, 1.3)), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -val_evaluator = [ - dict(type='PCKAccuracy', thr=0.2), - dict(type='AUC'), - dict(type='EPE') -] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HourglassNet', + num_stacks=1, + ), + head=dict( + type='CPMHead', + in_channels=256, + out_channels=21, + num_stages=1, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyHandDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + rotate_factor=180.0, + scale_factor=(0.7, 1.3)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE') +] +test_evaluator = val_evaluator diff --git a/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_hrnetv2-w18_8xb32-210e_coco-wholebody-hand-256x256.py b/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_hrnetv2-w18_8xb32-210e_coco-wholebody-hand-256x256.py index a9b9f0f281..be8d278ebf 100644 --- a/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_hrnetv2-w18_8xb32-210e_coco-wholebody-hand-256x256.py +++ b/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_hrnetv2-w18_8xb32-210e_coco-wholebody-hand-256x256.py @@ -1,154 +1,154 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=256) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(18, 36)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(18, 36, 72)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(18, 36, 72, 144), - multiscale_output=True), - upsample=dict(mode='bilinear', align_corners=False)), - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18')), - neck=dict( - type='FeatureMapProcessor', - concat=True, - ), - head=dict( - type='HeatmapHead', - in_channels=270, - out_channels=21, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - conv_out_channels=(270, ), - conv_kernel_sizes=(1, ), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoWholeBodyHandDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict( - type='RandomBBoxTransform', rotate_factor=180, - scale_factor=(0.7, 1.3)), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -val_evaluator = [ - dict(type='PCKAccuracy', thr=0.2), - dict(type='AUC'), - dict(type='EPE') -] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(18, 36)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(18, 36, 72)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(18, 36, 72, 144), + multiscale_output=True), + upsample=dict(mode='bilinear', align_corners=False)), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18')), + neck=dict( + type='FeatureMapProcessor', + concat=True, + ), + head=dict( + type='HeatmapHead', + in_channels=270, + out_channels=21, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + conv_out_channels=(270, ), + conv_kernel_sizes=(1, ), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyHandDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', rotate_factor=180, + scale_factor=(0.7, 1.3)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE') +] +test_evaluator = val_evaluator diff --git a/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_hrnetv2-w18_dark-8xb32-210e_coco-wholebody-hand-256x256.py b/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_hrnetv2-w18_dark-8xb32-210e_coco-wholebody-hand-256x256.py index 5d67f393f6..1c0f1c3355 100644 --- a/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_hrnetv2-w18_dark-8xb32-210e_coco-wholebody-hand-256x256.py +++ b/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_hrnetv2-w18_dark-8xb32-210e_coco-wholebody-hand-256x256.py @@ -1,158 +1,158 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=256) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) -# codec settings -codec = dict( - type='MSRAHeatmap', - input_size=(256, 256), - heatmap_size=(64, 64), - sigma=2, - unbiased=True) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(18, 36)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(18, 36, 72)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(18, 36, 72, 144), - multiscale_output=True), - upsample=dict(mode='bilinear', align_corners=False)), - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18')), - neck=dict( - type='FeatureMapProcessor', - concat=True, - ), - head=dict( - type='HeatmapHead', - in_channels=270, - out_channels=21, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - conv_out_channels=(270, ), - conv_kernel_sizes=(1, ), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoWholeBodyHandDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict( - type='RandomBBoxTransform', rotate_factor=180, - scale_factor=(0.7, 1.3)), - dict(type='RandomFlip', direction='horizontal'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -val_evaluator = [ - dict(type='PCKAccuracy', thr=0.2), - dict(type='AUC'), - dict(type='EPE') -] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) +# codec settings +codec = dict( + type='MSRAHeatmap', + input_size=(256, 256), + heatmap_size=(64, 64), + sigma=2, + unbiased=True) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(18, 36)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(18, 36, 72)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(18, 36, 72, 144), + multiscale_output=True), + upsample=dict(mode='bilinear', align_corners=False)), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18')), + neck=dict( + type='FeatureMapProcessor', + concat=True, + ), + head=dict( + type='HeatmapHead', + in_channels=270, + out_channels=21, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + conv_out_channels=(270, ), + conv_kernel_sizes=(1, ), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyHandDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict( + type='RandomBBoxTransform', rotate_factor=180, + scale_factor=(0.7, 1.3)), + dict(type='RandomFlip', direction='horizontal'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE') +] +test_evaluator = val_evaluator diff --git a/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_litehrnet-w18_8xb32-210e_coco-wholebody-hand-256x256.py b/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_litehrnet-w18_8xb32-210e_coco-wholebody-hand-256x256.py index f3a6150e49..c160687b20 100644 --- a/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_litehrnet-w18_8xb32-210e_coco-wholebody-hand-256x256.py +++ b/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_litehrnet-w18_8xb32-210e_coco-wholebody-hand-256x256.py @@ -1,136 +1,136 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=256) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='LiteHRNet', - in_channels=3, - extra=dict( - stem=dict(stem_channels=32, out_channels=32, expand_ratio=1), - num_stages=3, - stages_spec=dict( - num_modules=(2, 4, 2), - num_branches=(2, 3, 4), - num_blocks=(2, 2, 2), - module_type=('LITE', 'LITE', 'LITE'), - with_fuse=(True, True, True), - reduce_ratios=(8, 8, 8), - num_channels=( - (40, 80), - (40, 80, 160), - (40, 80, 160, 320), - )), - with_head=True, - )), - head=dict( - type='HeatmapHead', - in_channels=40, - out_channels=21, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoWholeBodyHandDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict( - type='RandomBBoxTransform', rotate_factor=180, - scale_factor=(0.7, 1.3)), - dict(type='RandomFlip', direction='horizontal'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -val_evaluator = [ - dict(type='PCKAccuracy', thr=0.2), - dict(type='AUC'), - dict(type='EPE') -] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='LiteHRNet', + in_channels=3, + extra=dict( + stem=dict(stem_channels=32, out_channels=32, expand_ratio=1), + num_stages=3, + stages_spec=dict( + num_modules=(2, 4, 2), + num_branches=(2, 3, 4), + num_blocks=(2, 2, 2), + module_type=('LITE', 'LITE', 'LITE'), + with_fuse=(True, True, True), + reduce_ratios=(8, 8, 8), + num_channels=( + (40, 80), + (40, 80, 160), + (40, 80, 160, 320), + )), + with_head=True, + )), + head=dict( + type='HeatmapHead', + in_channels=40, + out_channels=21, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyHandDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict( + type='RandomBBoxTransform', rotate_factor=180, + scale_factor=(0.7, 1.3)), + dict(type='RandomFlip', direction='horizontal'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE') +] +test_evaluator = val_evaluator diff --git a/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_mobilenetv2_8xb32-210e_coco-wholebody-hand-256x256.py b/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_mobilenetv2_8xb32-210e_coco-wholebody-hand-256x256.py index dba8538a5f..e68449fa4b 100644 --- a/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_mobilenetv2_8xb32-210e_coco-wholebody-hand-256x256.py +++ b/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_mobilenetv2_8xb32-210e_coco-wholebody-hand-256x256.py @@ -1,120 +1,120 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=256) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='MobileNetV2', - widen_factor=1., - out_indices=(7, ), - init_cfg=dict(type='Pretrained', checkpoint='mmcls://mobilenet_v2')), - head=dict( - type='HeatmapHead', - in_channels=1280, - out_channels=21, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoWholeBodyHandDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict( - type='RandomBBoxTransform', rotate_factor=180, - scale_factor=(0.7, 1.3)), - dict(type='RandomFlip', direction='horizontal'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -val_evaluator = [ - dict(type='PCKAccuracy', thr=0.2), - dict(type='AUC'), - dict(type='EPE') -] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='MobileNetV2', + widen_factor=1., + out_indices=(7, ), + init_cfg=dict(type='Pretrained', checkpoint='mmcls://mobilenet_v2')), + head=dict( + type='HeatmapHead', + in_channels=1280, + out_channels=21, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyHandDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict( + type='RandomBBoxTransform', rotate_factor=180, + scale_factor=(0.7, 1.3)), + dict(type='RandomFlip', direction='horizontal'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE') +] +test_evaluator = val_evaluator diff --git a/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_res50_8xb32-210e_coco-wholebody-hand-256x256.py b/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_res50_8xb32-210e_coco-wholebody-hand-256x256.py index c04950bfaa..e7b9ff624a 100644 --- a/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_res50_8xb32-210e_coco-wholebody-hand-256x256.py +++ b/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_res50_8xb32-210e_coco-wholebody-hand-256x256.py @@ -1,119 +1,119 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=256) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=50, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=21, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoWholeBodyHandDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict( - type='RandomBBoxTransform', rotate_factor=180, - scale_factor=(0.7, 1.3)), - dict(type='RandomFlip', direction='horizontal'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -val_evaluator = [ - dict(type='PCKAccuracy', thr=0.2), - dict(type='AUC'), - dict(type='EPE') -] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=21, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyHandDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict( + type='RandomBBoxTransform', rotate_factor=180, + scale_factor=(0.7, 1.3)), + dict(type='RandomFlip', direction='horizontal'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE') +] +test_evaluator = val_evaluator diff --git a/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_scnet50_8xb32-210e_coco-wholebody-hand-256x256.py b/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_scnet50_8xb32-210e_coco-wholebody-hand-256x256.py index f596227c5c..f65ea47139 100644 --- a/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_scnet50_8xb32-210e_coco-wholebody-hand-256x256.py +++ b/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_scnet50_8xb32-210e_coco-wholebody-hand-256x256.py @@ -1,122 +1,122 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=256) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='SCNet', - depth=50, - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/scnet50-7ef0a199.pth')), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=21, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoWholeBodyHandDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict( - type='RandomBBoxTransform', rotate_factor=180, - scale_factor=(0.7, 1.3)), - dict(type='RandomFlip', direction='horizontal'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -val_evaluator = [ - dict(type='PCKAccuracy', thr=0.2), - dict(type='AUC'), - dict(type='EPE') -] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='SCNet', + depth=50, + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/scnet50-7ef0a199.pth')), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=21, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyHandDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict( + type='RandomBBoxTransform', rotate_factor=180, + scale_factor=(0.7, 1.3)), + dict(type='RandomFlip', direction='horizontal'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE') +] +test_evaluator = val_evaluator diff --git a/configs/hand_2d_keypoint/topdown_heatmap/freihand2d/resnet_freihand2d.md b/configs/hand_2d_keypoint/topdown_heatmap/freihand2d/resnet_freihand2d.md index f1a6c80132..33a57aa5a0 100644 --- a/configs/hand_2d_keypoint/topdown_heatmap/freihand2d/resnet_freihand2d.md +++ b/configs/hand_2d_keypoint/topdown_heatmap/freihand2d/resnet_freihand2d.md @@ -1,56 +1,56 @@ - - -
-SimpleBaseline2D (ECCV'2018) - -```bibtex -@inproceedings{xiao2018simple, - title={Simple baselines for human pose estimation and tracking}, - author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, - booktitle={Proceedings of the European conference on computer vision (ECCV)}, - pages={466--481}, - year={2018} -} -``` - -
- - - -
-ResNet (CVPR'2016) - -```bibtex -@inproceedings{he2016deep, - title={Deep residual learning for image recognition}, - author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={770--778}, - year={2016} -} -``` - -
- - - -
-FreiHand (ICCV'2019) - -```bibtex -@inproceedings{zimmermann2019freihand, - title={Freihand: A dataset for markerless capture of hand pose and shape from single rgb images}, - author={Zimmermann, Christian and Ceylan, Duygu and Yang, Jimei and Russell, Bryan and Argus, Max and Brox, Thomas}, - booktitle={Proceedings of the IEEE International Conference on Computer Vision}, - pages={813--822}, - year={2019} -} -``` - -
- -Results on FreiHand val & test set - -| Set | Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | -| :--- | :-------------------------------------------------------: | :--------: | :-----: | :---: | :--: | :-------------------------------------------------------: | :------------------------------------------------------: | -| test | [pose_resnet_50](/configs/hand_2d_keypoint/topdown_heatmap/freihand2d/td-hm_res50_8xb64-100e_freihand2d-224x224.py) | 224x224 | 0.999 | 0.868 | 3.27 | [ckpt](https://download.openmmlab.com/mmpose/hand/resnet/res50_freihand_224x224-ff0799bc_20200914.pth) | [log](https://download.openmmlab.com/mmpose/hand/resnet/res50_freihand_224x224_20200914.log.json) | + + +
+SimpleBaseline2D (ECCV'2018) + +```bibtex +@inproceedings{xiao2018simple, + title={Simple baselines for human pose estimation and tracking}, + author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, + booktitle={Proceedings of the European conference on computer vision (ECCV)}, + pages={466--481}, + year={2018} +} +``` + +
+ + + +
+ResNet (CVPR'2016) + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +
+ + + +
+FreiHand (ICCV'2019) + +```bibtex +@inproceedings{zimmermann2019freihand, + title={Freihand: A dataset for markerless capture of hand pose and shape from single rgb images}, + author={Zimmermann, Christian and Ceylan, Duygu and Yang, Jimei and Russell, Bryan and Argus, Max and Brox, Thomas}, + booktitle={Proceedings of the IEEE International Conference on Computer Vision}, + pages={813--822}, + year={2019} +} +``` + +
+ +Results on FreiHand val & test set + +| Set | Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | +| :--- | :-------------------------------------------------------: | :--------: | :-----: | :---: | :--: | :-------------------------------------------------------: | :------------------------------------------------------: | +| test | [pose_resnet_50](/configs/hand_2d_keypoint/topdown_heatmap/freihand2d/td-hm_res50_8xb64-100e_freihand2d-224x224.py) | 224x224 | 0.999 | 0.868 | 3.27 | [ckpt](https://download.openmmlab.com/mmpose/hand/resnet/res50_freihand_224x224-ff0799bc_20200914.pth) | [log](https://download.openmmlab.com/mmpose/hand/resnet/res50_freihand_224x224_20200914.log.json) | diff --git a/configs/hand_2d_keypoint/topdown_heatmap/freihand2d/resnet_freihand2d.yml b/configs/hand_2d_keypoint/topdown_heatmap/freihand2d/resnet_freihand2d.yml index 9937b50be6..925f440f89 100644 --- a/configs/hand_2d_keypoint/topdown_heatmap/freihand2d/resnet_freihand2d.yml +++ b/configs/hand_2d_keypoint/topdown_heatmap/freihand2d/resnet_freihand2d.yml @@ -1,17 +1,17 @@ -Models: -- Config: configs/hand_2d_keypoint/topdown_heatmap/freihand2d/td-hm_res50_8xb64-100e_freihand2d-224x224.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: - - SimpleBaseline2D - - ResNet - Training Data: FreiHand - Name: td-hm_res50_8xb64-100e_freihand2d-224x224 - Results: - - Dataset: FreiHand - Metrics: - AUC: 0.868 - EPE: 3.27 - PCK@0.2: 0.999 - Task: Hand 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/hand/resnet/res50_freihand_224x224-ff0799bc_20200914.pth +Models: +- Config: configs/hand_2d_keypoint/topdown_heatmap/freihand2d/td-hm_res50_8xb64-100e_freihand2d-224x224.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: + - SimpleBaseline2D + - ResNet + Training Data: FreiHand + Name: td-hm_res50_8xb64-100e_freihand2d-224x224 + Results: + - Dataset: FreiHand + Metrics: + AUC: 0.868 + EPE: 3.27 + PCK@0.2: 0.999 + Task: Hand 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/hand/resnet/res50_freihand_224x224-ff0799bc_20200914.pth diff --git a/configs/hand_2d_keypoint/topdown_heatmap/freihand2d/td-hm_res50_8xb64-100e_freihand2d-224x224.py b/configs/hand_2d_keypoint/topdown_heatmap/freihand2d/td-hm_res50_8xb64-100e_freihand2d-224x224.py index cd1750cdeb..677ca31669 100644 --- a/configs/hand_2d_keypoint/topdown_heatmap/freihand2d/td-hm_res50_8xb64-100e_freihand2d-224x224.py +++ b/configs/hand_2d_keypoint/topdown_heatmap/freihand2d/td-hm_res50_8xb64-100e_freihand2d-224x224.py @@ -1,138 +1,138 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=100, val_interval=1) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=100, - milestones=[50, 70], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='AUC', rule='greater', interval=1)) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(224, 224), heatmap_size=(56, 56), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=50, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=21, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'FreiHandDataset' -data_mode = 'topdown' -data_root = 'data/freihand/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale', padding=0.8), - dict(type='RandomFlip', direction='horizontal'), - dict( - type='RandomBBoxTransform', - shift_factor=0.25, - rotate_factor=180, - scale_factor=(0.7, 1.3)), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale', padding=0.8), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/freihand_train.json', - data_prefix=dict(img=''), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/freihand_val.json', - data_prefix=dict(img=''), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/freihand_test.json', - data_prefix=dict(img=''), - test_mode=True, - pipeline=val_pipeline, - )) - -# evaluators -val_evaluator = [ - dict(type='PCKAccuracy', thr=0.2), - dict(type='AUC'), - dict(type='EPE'), -] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=100, val_interval=1) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=100, + milestones=[50, 70], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='AUC', rule='greater', interval=1)) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(224, 224), heatmap_size=(56, 56), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=21, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'FreiHandDataset' +data_mode = 'topdown' +data_root = 'data/freihand/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale', padding=0.8), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', + shift_factor=0.25, + rotate_factor=180, + scale_factor=(0.7, 1.3)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale', padding=0.8), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/freihand_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/freihand_val.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/freihand_test.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/hrnetv2_dark_onehand10k.md b/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/hrnetv2_dark_onehand10k.md index 59d70fc597..88fb8e4200 100644 --- a/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/hrnetv2_dark_onehand10k.md +++ b/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/hrnetv2_dark_onehand10k.md @@ -1,60 +1,60 @@ - - -
-HRNetv2 (TPAMI'2019) - -```bibtex -@article{WangSCJDZLMTWLX19, - title={Deep High-Resolution Representation Learning for Visual Recognition}, - author={Jingdong Wang and Ke Sun and Tianheng Cheng and - Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and - Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, - journal={TPAMI}, - year={2019} -} -``` - -
- - - -
-DarkPose (CVPR'2020) - -```bibtex -@inproceedings{zhang2020distribution, - title={Distribution-aware coordinate representation for human pose estimation}, - author={Zhang, Feng and Zhu, Xiatian and Dai, Hanbin and Ye, Mao and Zhu, Ce}, - booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, - pages={7093--7102}, - year={2020} -} -``` - -
- - - -
-OneHand10K (TCSVT'2019) - -```bibtex -@article{wang2018mask, - title={Mask-pose cascaded cnn for 2d hand pose estimation from single color image}, - author={Wang, Yangang and Peng, Cong and Liu, Yebin}, - journal={IEEE Transactions on Circuits and Systems for Video Technology}, - volume={29}, - number={11}, - pages={3258--3268}, - year={2018}, - publisher={IEEE} -} -``` - -
- -Results on OneHand10K val set - -| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | -| :--------------------------------------------------------- | :--------: | :-----: | :---: | :---: | :--------------------------------------------------------: | :-------------------------------------------------------: | -| [pose_hrnetv2_w18_dark](/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_dark-8xb64-210e_onehand10k-256x256.py) | 256x256 | 0.990 | 0.572 | 23.96 | [ckpt](https://download.openmmlab.com/mmpose/hand/dark/hrnetv2_w18_onehand10k_256x256_dark-a2f80c64_20210330.pth) | [log](https://download.openmmlab.com/mmpose/hand/dark/hrnetv2_w18_onehand10k_256x256_dark_20210330.log.json) | + + +
+HRNetv2 (TPAMI'2019) + +```bibtex +@article{WangSCJDZLMTWLX19, + title={Deep High-Resolution Representation Learning for Visual Recognition}, + author={Jingdong Wang and Ke Sun and Tianheng Cheng and + Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and + Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, + journal={TPAMI}, + year={2019} +} +``` + +
+ + + +
+DarkPose (CVPR'2020) + +```bibtex +@inproceedings{zhang2020distribution, + title={Distribution-aware coordinate representation for human pose estimation}, + author={Zhang, Feng and Zhu, Xiatian and Dai, Hanbin and Ye, Mao and Zhu, Ce}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={7093--7102}, + year={2020} +} +``` + +
+ + + +
+OneHand10K (TCSVT'2019) + +```bibtex +@article{wang2018mask, + title={Mask-pose cascaded cnn for 2d hand pose estimation from single color image}, + author={Wang, Yangang and Peng, Cong and Liu, Yebin}, + journal={IEEE Transactions on Circuits and Systems for Video Technology}, + volume={29}, + number={11}, + pages={3258--3268}, + year={2018}, + publisher={IEEE} +} +``` + +
+ +Results on OneHand10K val set + +| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | +| :--------------------------------------------------------- | :--------: | :-----: | :---: | :---: | :--------------------------------------------------------: | :-------------------------------------------------------: | +| [pose_hrnetv2_w18_dark](/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_dark-8xb64-210e_onehand10k-256x256.py) | 256x256 | 0.990 | 0.572 | 23.96 | [ckpt](https://download.openmmlab.com/mmpose/hand/dark/hrnetv2_w18_onehand10k_256x256_dark-a2f80c64_20210330.pth) | [log](https://download.openmmlab.com/mmpose/hand/dark/hrnetv2_w18_onehand10k_256x256_dark_20210330.log.json) | diff --git a/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/hrnetv2_dark_onehand10k.yml b/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/hrnetv2_dark_onehand10k.yml index 7fc64b75c7..d02795cd57 100644 --- a/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/hrnetv2_dark_onehand10k.yml +++ b/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/hrnetv2_dark_onehand10k.yml @@ -1,17 +1,17 @@ -Models: -- Config: configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_dark-8xb64-210e_onehand10k-256x256.py - In Collection: DarkPose - Metadata: - Architecture: - - HRNetv2 - - DarkPose - Training Data: OneHand10K - Name: td-hm_hrnetv2-w18_dark-8xb64-210e_onehand10k-256x256 - Results: - - Dataset: OneHand10K - Metrics: - AUC: 0.572 - EPE: 23.96 - PCK@0.2: 0.99 - Task: Hand 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/hand/dark/hrnetv2_w18_onehand10k_256x256_dark-a2f80c64_20210330.pth +Models: +- Config: configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_dark-8xb64-210e_onehand10k-256x256.py + In Collection: DarkPose + Metadata: + Architecture: + - HRNetv2 + - DarkPose + Training Data: OneHand10K + Name: td-hm_hrnetv2-w18_dark-8xb64-210e_onehand10k-256x256 + Results: + - Dataset: OneHand10K + Metrics: + AUC: 0.572 + EPE: 23.96 + PCK@0.2: 0.99 + Task: Hand 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/hand/dark/hrnetv2_w18_onehand10k_256x256_dark-a2f80c64_20210330.pth diff --git a/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/hrnetv2_onehand10k.md b/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/hrnetv2_onehand10k.md index 262bf32253..41bed705fe 100644 --- a/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/hrnetv2_onehand10k.md +++ b/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/hrnetv2_onehand10k.md @@ -1,43 +1,43 @@ - - -
-HRNetv2 (TPAMI'2019) - -```bibtex -@article{WangSCJDZLMTWLX19, - title={Deep High-Resolution Representation Learning for Visual Recognition}, - author={Jingdong Wang and Ke Sun and Tianheng Cheng and - Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and - Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, - journal={TPAMI}, - year={2019} -} -``` - -
- - - -
-OneHand10K (TCSVT'2019) - -```bibtex -@article{wang2018mask, - title={Mask-pose cascaded cnn for 2d hand pose estimation from single color image}, - author={Wang, Yangang and Peng, Cong and Liu, Yebin}, - journal={IEEE Transactions on Circuits and Systems for Video Technology}, - volume={29}, - number={11}, - pages={3258--3268}, - year={2018}, - publisher={IEEE} -} -``` - -
- -Results on OneHand10K val set - -| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | -| :--------------------------------------------------------- | :--------: | :-----: | :---: | :---: | :--------------------------------------------------------: | :-------------------------------------------------------: | -| [pose_hrnetv2_w18](/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_8xb64-210e_onehand10k-256x256.py) | 256x256 | 0.990 | 0.567 | 24.26 | [ckpt](https://download.openmmlab.com/mmpose/hand/hrnetv2/hrnetv2_w18_onehand10k_256x256-30bc9c6b_20210330.pth) | [log](https://download.openmmlab.com/mmpose/hand/hrnetv2/hrnetv2_w18_onehand10k_256x256_20210330.log.json) | + + +
+HRNetv2 (TPAMI'2019) + +```bibtex +@article{WangSCJDZLMTWLX19, + title={Deep High-Resolution Representation Learning for Visual Recognition}, + author={Jingdong Wang and Ke Sun and Tianheng Cheng and + Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and + Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, + journal={TPAMI}, + year={2019} +} +``` + +
+ + + +
+OneHand10K (TCSVT'2019) + +```bibtex +@article{wang2018mask, + title={Mask-pose cascaded cnn for 2d hand pose estimation from single color image}, + author={Wang, Yangang and Peng, Cong and Liu, Yebin}, + journal={IEEE Transactions on Circuits and Systems for Video Technology}, + volume={29}, + number={11}, + pages={3258--3268}, + year={2018}, + publisher={IEEE} +} +``` + +
+ +Results on OneHand10K val set + +| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | +| :--------------------------------------------------------- | :--------: | :-----: | :---: | :---: | :--------------------------------------------------------: | :-------------------------------------------------------: | +| [pose_hrnetv2_w18](/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_8xb64-210e_onehand10k-256x256.py) | 256x256 | 0.990 | 0.567 | 24.26 | [ckpt](https://download.openmmlab.com/mmpose/hand/hrnetv2/hrnetv2_w18_onehand10k_256x256-30bc9c6b_20210330.pth) | [log](https://download.openmmlab.com/mmpose/hand/hrnetv2/hrnetv2_w18_onehand10k_256x256_20210330.log.json) | diff --git a/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/hrnetv2_onehand10k.yml b/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/hrnetv2_onehand10k.yml index fd0c755876..f5ee14cfde 100644 --- a/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/hrnetv2_onehand10k.yml +++ b/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/hrnetv2_onehand10k.yml @@ -1,16 +1,16 @@ -Models: -- Config: configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_8xb64-210e_onehand10k-256x256.py - In Collection: HRNetv2 - Metadata: - Architecture: - - HRNetv2 - Training Data: OneHand10K - Name: td-hm_hrnetv2-w18_8xb64-210e_onehand10k-256x256 - Results: - - Dataset: OneHand10K - Metrics: - AUC: 0.567 - EPE: 24.26 - PCK@0.2: 0.99 - Task: Hand 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/hand/hrnetv2/hrnetv2_w18_onehand10k_256x256-30bc9c6b_20210330.pth +Models: +- Config: configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_8xb64-210e_onehand10k-256x256.py + In Collection: HRNetv2 + Metadata: + Architecture: + - HRNetv2 + Training Data: OneHand10K + Name: td-hm_hrnetv2-w18_8xb64-210e_onehand10k-256x256 + Results: + - Dataset: OneHand10K + Metrics: + AUC: 0.567 + EPE: 24.26 + PCK@0.2: 0.99 + Task: Hand 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/hand/hrnetv2/hrnetv2_w18_onehand10k_256x256-30bc9c6b_20210330.pth diff --git a/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/hrnetv2_udp_onehand10k.md b/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/hrnetv2_udp_onehand10k.md index ca1599c116..050703595e 100644 --- a/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/hrnetv2_udp_onehand10k.md +++ b/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/hrnetv2_udp_onehand10k.md @@ -1,60 +1,60 @@ - - -
-HRNetv2 (TPAMI'2019) - -```bibtex -@article{WangSCJDZLMTWLX19, - title={Deep High-Resolution Representation Learning for Visual Recognition}, - author={Jingdong Wang and Ke Sun and Tianheng Cheng and - Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and - Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, - journal={TPAMI}, - year={2019} -} -``` - -
- - - -
-UDP (CVPR'2020) - -```bibtex -@InProceedings{Huang_2020_CVPR, - author = {Huang, Junjie and Zhu, Zheng and Guo, Feng and Huang, Guan}, - title = {The Devil Is in the Details: Delving Into Unbiased Data Processing for Human Pose Estimation}, - booktitle = {The IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, - month = {June}, - year = {2020} -} -``` - -
- - - -
-OneHand10K (TCSVT'2019) - -```bibtex -@article{wang2018mask, - title={Mask-pose cascaded cnn for 2d hand pose estimation from single color image}, - author={Wang, Yangang and Peng, Cong and Liu, Yebin}, - journal={IEEE Transactions on Circuits and Systems for Video Technology}, - volume={29}, - number={11}, - pages={3258--3268}, - year={2018}, - publisher={IEEE} -} -``` - -
- -Results on OneHand10K val set - -| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | -| :--------------------------------------------------------- | :--------: | :-----: | :---: | :---: | :--------------------------------------------------------: | :-------------------------------------------------------: | -| [pose_hrnetv2_w18_udp](/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_udp-8xb64-210e_onehand10k-256x256.py) | 256x256 | 0.990 | 0.571 | 23.88 | [ckpt](https://download.openmmlab.com/mmpose/hand/udp/hrnetv2_w18_onehand10k_256x256_udp-0d1b515d_20210330.pth) | [log](https://download.openmmlab.com/mmpose/hand/udp/hrnetv2_w18_onehand10k_256x256_udp_20210330.log.json) | + + +
+HRNetv2 (TPAMI'2019) + +```bibtex +@article{WangSCJDZLMTWLX19, + title={Deep High-Resolution Representation Learning for Visual Recognition}, + author={Jingdong Wang and Ke Sun and Tianheng Cheng and + Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and + Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, + journal={TPAMI}, + year={2019} +} +``` + +
+ + + +
+UDP (CVPR'2020) + +```bibtex +@InProceedings{Huang_2020_CVPR, + author = {Huang, Junjie and Zhu, Zheng and Guo, Feng and Huang, Guan}, + title = {The Devil Is in the Details: Delving Into Unbiased Data Processing for Human Pose Estimation}, + booktitle = {The IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, + month = {June}, + year = {2020} +} +``` + +
+ + + +
+OneHand10K (TCSVT'2019) + +```bibtex +@article{wang2018mask, + title={Mask-pose cascaded cnn for 2d hand pose estimation from single color image}, + author={Wang, Yangang and Peng, Cong and Liu, Yebin}, + journal={IEEE Transactions on Circuits and Systems for Video Technology}, + volume={29}, + number={11}, + pages={3258--3268}, + year={2018}, + publisher={IEEE} +} +``` + +
+ +Results on OneHand10K val set + +| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | +| :--------------------------------------------------------- | :--------: | :-----: | :---: | :---: | :--------------------------------------------------------: | :-------------------------------------------------------: | +| [pose_hrnetv2_w18_udp](/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_udp-8xb64-210e_onehand10k-256x256.py) | 256x256 | 0.990 | 0.571 | 23.88 | [ckpt](https://download.openmmlab.com/mmpose/hand/udp/hrnetv2_w18_onehand10k_256x256_udp-0d1b515d_20210330.pth) | [log](https://download.openmmlab.com/mmpose/hand/udp/hrnetv2_w18_onehand10k_256x256_udp_20210330.log.json) | diff --git a/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/hrnetv2_udp_onehand10k.yml b/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/hrnetv2_udp_onehand10k.yml index 32d5dd6db5..903f047d73 100644 --- a/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/hrnetv2_udp_onehand10k.yml +++ b/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/hrnetv2_udp_onehand10k.yml @@ -1,17 +1,17 @@ -Models: -- Config: configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_udp-8xb64-210e_onehand10k-256x256.py - In Collection: UDP - Metadata: - Architecture: - - HRNetv2 - - UDP - Training Data: OneHand10K - Name: td-hm_hrnetv2-w18_udp-8xb64-210e_onehand10k-256x256 - Results: - - Dataset: OneHand10K - Metrics: - AUC: 0.571 - EPE: 23.88 - PCK@0.2: 0.99 - Task: Hand 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/hand/udp/hrnetv2_w18_onehand10k_256x256_udp-0d1b515d_20210330.pth +Models: +- Config: configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_udp-8xb64-210e_onehand10k-256x256.py + In Collection: UDP + Metadata: + Architecture: + - HRNetv2 + - UDP + Training Data: OneHand10K + Name: td-hm_hrnetv2-w18_udp-8xb64-210e_onehand10k-256x256 + Results: + - Dataset: OneHand10K + Metrics: + AUC: 0.571 + EPE: 23.88 + PCK@0.2: 0.99 + Task: Hand 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/hand/udp/hrnetv2_w18_onehand10k_256x256_udp-0d1b515d_20210330.pth diff --git a/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/mobilenetv2_onehand10k.md b/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/mobilenetv2_onehand10k.md index 3f0bf9d1b7..b89b1d1065 100644 --- a/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/mobilenetv2_onehand10k.md +++ b/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/mobilenetv2_onehand10k.md @@ -1,42 +1,42 @@ - - -
-MobilenetV2 (CVPR'2018) - -```bibtex -@inproceedings{sandler2018mobilenetv2, - title={Mobilenetv2: Inverted residuals and linear bottlenecks}, - author={Sandler, Mark and Howard, Andrew and Zhu, Menglong and Zhmoginov, Andrey and Chen, Liang-Chieh}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={4510--4520}, - year={2018} -} -``` - -
- - - -
-OneHand10K (TCSVT'2019) - -```bibtex -@article{wang2018mask, - title={Mask-pose cascaded cnn for 2d hand pose estimation from single color image}, - author={Wang, Yangang and Peng, Cong and Liu, Yebin}, - journal={IEEE Transactions on Circuits and Systems for Video Technology}, - volume={29}, - number={11}, - pages={3258--3268}, - year={2018}, - publisher={IEEE} -} -``` - -
- -Results on OneHand10K val set - -| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | -| :--------------------------------------------------------- | :--------: | :-----: | :---: | :---: | :--------------------------------------------------------: | :-------------------------------------------------------: | -| [pose_mobilenet_v2](/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_mobilenetv2_8xb64-210e_onehand10k-256x256.py) | 256x256 | 0.986 | 0.537 | 28.56 | [ckpt](https://download.openmmlab.com/mmpose/hand/mobilenetv2/mobilenetv2_onehand10k_256x256-f3a3d90e_20210330.pth) | [log](https://download.openmmlab.com/mmpose/hand/mobilenetv2/mobilenetv2_onehand10k_256x256_20210330.log.json) | + + +
+MobilenetV2 (CVPR'2018) + +```bibtex +@inproceedings{sandler2018mobilenetv2, + title={Mobilenetv2: Inverted residuals and linear bottlenecks}, + author={Sandler, Mark and Howard, Andrew and Zhu, Menglong and Zhmoginov, Andrey and Chen, Liang-Chieh}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={4510--4520}, + year={2018} +} +``` + +
+ + + +
+OneHand10K (TCSVT'2019) + +```bibtex +@article{wang2018mask, + title={Mask-pose cascaded cnn for 2d hand pose estimation from single color image}, + author={Wang, Yangang and Peng, Cong and Liu, Yebin}, + journal={IEEE Transactions on Circuits and Systems for Video Technology}, + volume={29}, + number={11}, + pages={3258--3268}, + year={2018}, + publisher={IEEE} +} +``` + +
+ +Results on OneHand10K val set + +| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | +| :--------------------------------------------------------- | :--------: | :-----: | :---: | :---: | :--------------------------------------------------------: | :-------------------------------------------------------: | +| [pose_mobilenet_v2](/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_mobilenetv2_8xb64-210e_onehand10k-256x256.py) | 256x256 | 0.986 | 0.537 | 28.56 | [ckpt](https://download.openmmlab.com/mmpose/hand/mobilenetv2/mobilenetv2_onehand10k_256x256-f3a3d90e_20210330.pth) | [log](https://download.openmmlab.com/mmpose/hand/mobilenetv2/mobilenetv2_onehand10k_256x256_20210330.log.json) | diff --git a/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/mobilenetv2_onehand10k.yml b/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/mobilenetv2_onehand10k.yml index ade1f054f1..409018965e 100644 --- a/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/mobilenetv2_onehand10k.yml +++ b/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/mobilenetv2_onehand10k.yml @@ -1,17 +1,17 @@ -Models: -- Config: configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_mobilenetv2_8xb64-210e_onehand10k-256x256.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: - - SimpleBaseline2D - - MobilenetV2 - Training Data: OneHand10K - Name: td-hm_mobilenetv2_8xb64-210e_onehand10k-256x256 - Results: - - Dataset: OneHand10K - Metrics: - AUC: 0.537 - EPE: 28.56 - PCK@0.2: 0.986 - Task: Hand 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/hand/mobilenetv2/mobilenetv2_onehand10k_256x256-f3a3d90e_20210330.pth +Models: +- Config: configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_mobilenetv2_8xb64-210e_onehand10k-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: + - SimpleBaseline2D + - MobilenetV2 + Training Data: OneHand10K + Name: td-hm_mobilenetv2_8xb64-210e_onehand10k-256x256 + Results: + - Dataset: OneHand10K + Metrics: + AUC: 0.537 + EPE: 28.56 + PCK@0.2: 0.986 + Task: Hand 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/hand/mobilenetv2/mobilenetv2_onehand10k_256x256-f3a3d90e_20210330.pth diff --git a/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/resnet_onehand10k.md b/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/resnet_onehand10k.md index c07817d68e..24985360d7 100644 --- a/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/resnet_onehand10k.md +++ b/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/resnet_onehand10k.md @@ -1,59 +1,59 @@ - - -
-SimpleBaseline2D (ECCV'2018) - -```bibtex -@inproceedings{xiao2018simple, - title={Simple baselines for human pose estimation and tracking}, - author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, - booktitle={Proceedings of the European conference on computer vision (ECCV)}, - pages={466--481}, - year={2018} -} -``` - -
- - - -
-ResNet (CVPR'2016) - -```bibtex -@inproceedings{he2016deep, - title={Deep residual learning for image recognition}, - author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={770--778}, - year={2016} -} -``` - -
- - - -
-OneHand10K (TCSVT'2019) - -```bibtex -@article{wang2018mask, - title={Mask-pose cascaded cnn for 2d hand pose estimation from single color image}, - author={Wang, Yangang and Peng, Cong and Liu, Yebin}, - journal={IEEE Transactions on Circuits and Systems for Video Technology}, - volume={29}, - number={11}, - pages={3258--3268}, - year={2018}, - publisher={IEEE} -} -``` - -
- -Results on OneHand10K val set - -| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | -| :--------------------------------------------------------- | :--------: | :-----: | :---: | :---: | :--------------------------------------------------------: | :-------------------------------------------------------: | -| [pose_resnet_50](/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_res50_8xb32-210e_onehand10k-256x256.py) | 256x256 | 0.989 | 0.555 | 25.16 | [ckpt](https://download.openmmlab.com/mmpose/hand/resnet/res50_onehand10k_256x256-739c8639_20210330.pth) | [log](https://download.openmmlab.com/mmpose/hand/resnet/res50_onehand10k_256x256_20210330.log.json) | + + +
+SimpleBaseline2D (ECCV'2018) + +```bibtex +@inproceedings{xiao2018simple, + title={Simple baselines for human pose estimation and tracking}, + author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, + booktitle={Proceedings of the European conference on computer vision (ECCV)}, + pages={466--481}, + year={2018} +} +``` + +
+ + + +
+ResNet (CVPR'2016) + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +
+ + + +
+OneHand10K (TCSVT'2019) + +```bibtex +@article{wang2018mask, + title={Mask-pose cascaded cnn for 2d hand pose estimation from single color image}, + author={Wang, Yangang and Peng, Cong and Liu, Yebin}, + journal={IEEE Transactions on Circuits and Systems for Video Technology}, + volume={29}, + number={11}, + pages={3258--3268}, + year={2018}, + publisher={IEEE} +} +``` + +
+ +Results on OneHand10K val set + +| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | +| :--------------------------------------------------------- | :--------: | :-----: | :---: | :---: | :--------------------------------------------------------: | :-------------------------------------------------------: | +| [pose_resnet_50](/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_res50_8xb32-210e_onehand10k-256x256.py) | 256x256 | 0.989 | 0.555 | 25.16 | [ckpt](https://download.openmmlab.com/mmpose/hand/resnet/res50_onehand10k_256x256-739c8639_20210330.pth) | [log](https://download.openmmlab.com/mmpose/hand/resnet/res50_onehand10k_256x256_20210330.log.json) | diff --git a/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/resnet_onehand10k.yml b/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/resnet_onehand10k.yml index 59dc7f523f..f30171df9d 100644 --- a/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/resnet_onehand10k.yml +++ b/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/resnet_onehand10k.yml @@ -1,17 +1,17 @@ -Models: -- Config: configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_res50_8xb32-210e_onehand10k-256x256.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: - - SimpleBaseline2D - - ResNet - Training Data: OneHand10K - Name: td-hm_res50_8xb32-210e_onehand10k-256x256 - Results: - - Dataset: OneHand10K - Metrics: - AUC: 0.555 - EPE: 25.16 - PCK@0.2: 0.989 - Task: Hand 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/hand/resnet/res50_onehand10k_256x256-739c8639_20210330.pth +Models: +- Config: configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_res50_8xb32-210e_onehand10k-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: + - SimpleBaseline2D + - ResNet + Training Data: OneHand10K + Name: td-hm_res50_8xb32-210e_onehand10k-256x256 + Results: + - Dataset: OneHand10K + Metrics: + AUC: 0.555 + EPE: 25.16 + PCK@0.2: 0.989 + Task: Hand 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/hand/resnet/res50_onehand10k_256x256-739c8639_20210330.pth diff --git a/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_8xb64-210e_onehand10k-256x256.py b/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_8xb64-210e_onehand10k-256x256.py index 99419065aa..499a11aca1 100644 --- a/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_8xb64-210e_onehand10k-256x256.py +++ b/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_8xb64-210e_onehand10k-256x256.py @@ -1,158 +1,158 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(18, 36)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(18, 36, 72)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(18, 36, 72, 144), - multiscale_output=True), - upsample=dict(mode='bilinear', align_corners=False)), - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://msra/hrnetv2_w18', - )), - neck=dict( - type='FeatureMapProcessor', - concat=True, - ), - head=dict( - type='HeatmapHead', - in_channels=270, - out_channels=21, - deconv_out_channels=None, - conv_out_channels=(270, ), - conv_kernel_sizes=(1, ), - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'OneHand10KDataset' -data_mode = 'topdown' -data_root = 'data/onehand10k/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict( - type='RandomBBoxTransform', rotate_factor=180, - scale_factor=(0.7, 1.3)), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/onehand10k_train.json', - data_prefix=dict(img=''), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/onehand10k_test.json', - data_prefix=dict(img=''), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = [ - dict(type='PCKAccuracy', thr=0.2), - dict(type='AUC'), - dict(type='EPE'), -] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(18, 36)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(18, 36, 72)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(18, 36, 72, 144), + multiscale_output=True), + upsample=dict(mode='bilinear', align_corners=False)), + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://msra/hrnetv2_w18', + )), + neck=dict( + type='FeatureMapProcessor', + concat=True, + ), + head=dict( + type='HeatmapHead', + in_channels=270, + out_channels=21, + deconv_out_channels=None, + conv_out_channels=(270, ), + conv_kernel_sizes=(1, ), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'OneHand10KDataset' +data_mode = 'topdown' +data_root = 'data/onehand10k/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', rotate_factor=180, + scale_factor=(0.7, 1.3)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/onehand10k_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/onehand10k_test.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_dark-8xb64-210e_onehand10k-256x256.py b/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_dark-8xb64-210e_onehand10k-256x256.py index 610e9d149b..08b6588d34 100644 --- a/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_dark-8xb64-210e_onehand10k-256x256.py +++ b/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_dark-8xb64-210e_onehand10k-256x256.py @@ -1,162 +1,162 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', - input_size=(256, 256), - heatmap_size=(64, 64), - sigma=2, - unbiased=True) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(18, 36)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(18, 36, 72)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(18, 36, 72, 144), - multiscale_output=True), - upsample=dict(mode='bilinear', align_corners=False)), - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://msra/hrnetv2_w18', - )), - neck=dict( - type='FeatureMapProcessor', - concat=True, - ), - head=dict( - type='HeatmapHead', - in_channels=270, - out_channels=21, - deconv_out_channels=None, - conv_out_channels=(270, ), - conv_kernel_sizes=(1, ), - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'OneHand10KDataset' -data_mode = 'topdown' -data_root = 'data/onehand10k/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict( - type='RandomBBoxTransform', rotate_factor=180, - scale_factor=(0.7, 1.3)), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/onehand10k_train.json', - data_prefix=dict(img=''), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/onehand10k_test.json', - data_prefix=dict(img=''), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = [ - dict(type='PCKAccuracy', thr=0.2), - dict(type='AUC'), - dict(type='EPE'), -] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', + input_size=(256, 256), + heatmap_size=(64, 64), + sigma=2, + unbiased=True) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(18, 36)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(18, 36, 72)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(18, 36, 72, 144), + multiscale_output=True), + upsample=dict(mode='bilinear', align_corners=False)), + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://msra/hrnetv2_w18', + )), + neck=dict( + type='FeatureMapProcessor', + concat=True, + ), + head=dict( + type='HeatmapHead', + in_channels=270, + out_channels=21, + deconv_out_channels=None, + conv_out_channels=(270, ), + conv_kernel_sizes=(1, ), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'OneHand10KDataset' +data_mode = 'topdown' +data_root = 'data/onehand10k/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', rotate_factor=180, + scale_factor=(0.7, 1.3)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/onehand10k_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/onehand10k_test.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_udp-8xb64-210e_onehand10k-256x256.py b/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_udp-8xb64-210e_onehand10k-256x256.py index 54e2220d63..0dd9402c78 100644 --- a/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_udp-8xb64-210e_onehand10k-256x256.py +++ b/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_udp-8xb64-210e_onehand10k-256x256.py @@ -1,158 +1,158 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) - -# codec settings -codec = dict( - type='UDPHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(18, 36)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(18, 36, 72)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(18, 36, 72, 144), - multiscale_output=True), - upsample=dict(mode='bilinear', align_corners=False)), - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://msra/hrnetv2_w18', - )), - neck=dict( - type='FeatureMapProcessor', - concat=True, - ), - head=dict( - type='HeatmapHead', - in_channels=270, - out_channels=21, - deconv_out_channels=None, - conv_out_channels=(270, ), - conv_kernel_sizes=(1, ), - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=False, - )) - -# base dataset settings -dataset_type = 'OneHand10KDataset' -data_mode = 'topdown' -data_root = 'data/onehand10k/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict( - type='RandomBBoxTransform', rotate_factor=180, - scale_factor=(0.7, 1.3)), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/onehand10k_train.json', - data_prefix=dict(img=''), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/onehand10k_test.json', - data_prefix=dict(img=''), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = [ - dict(type='PCKAccuracy', thr=0.2), - dict(type='AUC'), - dict(type='EPE'), -] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(18, 36)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(18, 36, 72)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(18, 36, 72, 144), + multiscale_output=True), + upsample=dict(mode='bilinear', align_corners=False)), + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://msra/hrnetv2_w18', + )), + neck=dict( + type='FeatureMapProcessor', + concat=True, + ), + head=dict( + type='HeatmapHead', + in_channels=270, + out_channels=21, + deconv_out_channels=None, + conv_out_channels=(270, ), + conv_kernel_sizes=(1, ), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +dataset_type = 'OneHand10KDataset' +data_mode = 'topdown' +data_root = 'data/onehand10k/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', rotate_factor=180, + scale_factor=(0.7, 1.3)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/onehand10k_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/onehand10k_test.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_mobilenetv2_8xb64-210e_onehand10k-256x256.py b/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_mobilenetv2_8xb64-210e_onehand10k-256x256.py index 1f4e61c37c..63f6af3178 100644 --- a/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_mobilenetv2_8xb64-210e_onehand10k-256x256.py +++ b/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_mobilenetv2_8xb64-210e_onehand10k-256x256.py @@ -1,125 +1,125 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='MobileNetV2', - widen_factor=1., - out_indices=(7, ), - init_cfg=dict( - type='Pretrained', - checkpoint='mmcls://mobilenet_v2', - )), - head=dict( - type='HeatmapHead', - in_channels=1280, - out_channels=21, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'OneHand10KDataset' -data_mode = 'topdown' -data_root = 'data/onehand10k/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict( - type='RandomBBoxTransform', rotate_factor=180, - scale_factor=(0.7, 1.3)), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/onehand10k_train.json', - data_prefix=dict(img=''), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/onehand10k_test.json', - data_prefix=dict(img=''), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = [ - dict(type='PCKAccuracy', thr=0.2), - dict(type='AUC'), - dict(type='EPE'), -] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='MobileNetV2', + widen_factor=1., + out_indices=(7, ), + init_cfg=dict( + type='Pretrained', + checkpoint='mmcls://mobilenet_v2', + )), + head=dict( + type='HeatmapHead', + in_channels=1280, + out_channels=21, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'OneHand10KDataset' +data_mode = 'topdown' +data_root = 'data/onehand10k/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', rotate_factor=180, + scale_factor=(0.7, 1.3)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/onehand10k_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/onehand10k_test.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_res50_8xb32-210e_onehand10k-256x256.py b/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_res50_8xb32-210e_onehand10k-256x256.py index 36589d899d..11b549c18a 100644 --- a/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_res50_8xb32-210e_onehand10k-256x256.py +++ b/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_res50_8xb32-210e_onehand10k-256x256.py @@ -1,124 +1,124 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=50, - init_cfg=dict( - type='Pretrained', - checkpoint='torchvision://resnet50', - )), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=21, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'OneHand10KDataset' -data_mode = 'topdown' -data_root = 'data/onehand10k/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict( - type='RandomBBoxTransform', rotate_factor=180, - scale_factor=(0.7, 1.3)), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/onehand10k_train.json', - data_prefix=dict(img=''), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/onehand10k_test.json', - data_prefix=dict(img=''), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = [ - dict(type='PCKAccuracy', thr=0.2), - dict(type='AUC'), - dict(type='EPE'), -] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict( + type='Pretrained', + checkpoint='torchvision://resnet50', + )), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=21, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'OneHand10KDataset' +data_mode = 'topdown' +data_root = 'data/onehand10k/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', rotate_factor=180, + scale_factor=(0.7, 1.3)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/onehand10k_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/onehand10k_test.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/hrnetv2_dark_rhd2d.md b/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/hrnetv2_dark_rhd2d.md index 334d97978c..2fc7d85502 100644 --- a/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/hrnetv2_dark_rhd2d.md +++ b/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/hrnetv2_dark_rhd2d.md @@ -1,58 +1,58 @@ - - -
-HRNetv2 (TPAMI'2019) - -```bibtex -@article{WangSCJDZLMTWLX19, - title={Deep High-Resolution Representation Learning for Visual Recognition}, - author={Jingdong Wang and Ke Sun and Tianheng Cheng and - Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and - Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, - journal={TPAMI}, - year={2019} -} -``` - -
- - - -
-DarkPose (CVPR'2020) - -```bibtex -@inproceedings{zhang2020distribution, - title={Distribution-aware coordinate representation for human pose estimation}, - author={Zhang, Feng and Zhu, Xiatian and Dai, Hanbin and Ye, Mao and Zhu, Ce}, - booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, - pages={7093--7102}, - year={2020} -} -``` - -
- - - -
-RHD (ICCV'2017) - -```bibtex -@TechReport{zb2017hand, - author={Christian Zimmermann and Thomas Brox}, - title={Learning to Estimate 3D Hand Pose from Single RGB Images}, - institution={arXiv:1705.01389}, - year={2017}, - note="https://arxiv.org/abs/1705.01389", - url="https://lmb.informatik.uni-freiburg.de/projects/hand3d/" -} -``` - -
- -Results on RHD test set - -| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | -| :--------------------------------------------------------- | :--------: | :-----: | :---: | :--: | :--------------------------------------------------------: | :--------------------------------------------------------: | -| [pose_hrnetv2_w18_dark](/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_hrnetv2-w18_dark-8xb64-210e_rhd2d-256x256.py) | 256x256 | 0.992 | 0.903 | 2.18 | [ckpt](https://download.openmmlab.com/mmpose/hand/dark/hrnetv2_w18_rhd2d_256x256_dark-4df3a347_20210330.pth) | [log](https://download.openmmlab.com/mmpose/hand/dark/hrnetv2_w18_rhd2d_256x256_dark_20210330.log.json) | + + +
+HRNetv2 (TPAMI'2019) + +```bibtex +@article{WangSCJDZLMTWLX19, + title={Deep High-Resolution Representation Learning for Visual Recognition}, + author={Jingdong Wang and Ke Sun and Tianheng Cheng and + Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and + Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, + journal={TPAMI}, + year={2019} +} +``` + +
+ + + +
+DarkPose (CVPR'2020) + +```bibtex +@inproceedings{zhang2020distribution, + title={Distribution-aware coordinate representation for human pose estimation}, + author={Zhang, Feng and Zhu, Xiatian and Dai, Hanbin and Ye, Mao and Zhu, Ce}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={7093--7102}, + year={2020} +} +``` + +
+ + + +
+RHD (ICCV'2017) + +```bibtex +@TechReport{zb2017hand, + author={Christian Zimmermann and Thomas Brox}, + title={Learning to Estimate 3D Hand Pose from Single RGB Images}, + institution={arXiv:1705.01389}, + year={2017}, + note="https://arxiv.org/abs/1705.01389", + url="https://lmb.informatik.uni-freiburg.de/projects/hand3d/" +} +``` + +
+ +Results on RHD test set + +| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | +| :--------------------------------------------------------- | :--------: | :-----: | :---: | :--: | :--------------------------------------------------------: | :--------------------------------------------------------: | +| [pose_hrnetv2_w18_dark](/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_hrnetv2-w18_dark-8xb64-210e_rhd2d-256x256.py) | 256x256 | 0.992 | 0.903 | 2.18 | [ckpt](https://download.openmmlab.com/mmpose/hand/dark/hrnetv2_w18_rhd2d_256x256_dark-4df3a347_20210330.pth) | [log](https://download.openmmlab.com/mmpose/hand/dark/hrnetv2_w18_rhd2d_256x256_dark_20210330.log.json) | diff --git a/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/hrnetv2_dark_rhd2d.yml b/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/hrnetv2_dark_rhd2d.yml index 7400dc19e0..9dde35d576 100644 --- a/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/hrnetv2_dark_rhd2d.yml +++ b/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/hrnetv2_dark_rhd2d.yml @@ -1,17 +1,17 @@ -Models: -- Config: configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_hrnetv2-w18_dark-8xb64-210e_rhd2d-256x256.py - In Collection: DarkPose - Metadata: - Architecture: - - HRNetv2 - - DarkPose - Training Data: RHD - Name: td-hm_hrnetv2-w18_dark-8xb64-210e_rhd2d-256x256 - Results: - - Dataset: RHD - Metrics: - AUC: 0.903 - EPE: 2.18 - PCK@0.2: 0.992 - Task: Hand 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/hand/dark/hrnetv2_w18_rhd2d_256x256_dark-4df3a347_20210330.pth +Models: +- Config: configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_hrnetv2-w18_dark-8xb64-210e_rhd2d-256x256.py + In Collection: DarkPose + Metadata: + Architecture: + - HRNetv2 + - DarkPose + Training Data: RHD + Name: td-hm_hrnetv2-w18_dark-8xb64-210e_rhd2d-256x256 + Results: + - Dataset: RHD + Metrics: + AUC: 0.903 + EPE: 2.18 + PCK@0.2: 0.992 + Task: Hand 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/hand/dark/hrnetv2_w18_rhd2d_256x256_dark-4df3a347_20210330.pth diff --git a/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/hrnetv2_rhd2d.md b/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/hrnetv2_rhd2d.md index 6fe91fe17b..1703e8c676 100644 --- a/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/hrnetv2_rhd2d.md +++ b/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/hrnetv2_rhd2d.md @@ -1,41 +1,41 @@ - - -
-HRNetv2 (TPAMI'2019) - -```bibtex -@article{WangSCJDZLMTWLX19, - title={Deep High-Resolution Representation Learning for Visual Recognition}, - author={Jingdong Wang and Ke Sun and Tianheng Cheng and - Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and - Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, - journal={TPAMI}, - year={2019} -} -``` - -
- - - -
-RHD (ICCV'2017) - -```bibtex -@TechReport{zb2017hand, - author={Christian Zimmermann and Thomas Brox}, - title={Learning to Estimate 3D Hand Pose from Single RGB Images}, - institution={arXiv:1705.01389}, - year={2017}, - note="https://arxiv.org/abs/1705.01389", - url="https://lmb.informatik.uni-freiburg.de/projects/hand3d/" -} -``` - -
- -Results on RHD test set - -| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | -| :--------------------------------------------------------- | :--------: | :-----: | :---: | :--: | :--------------------------------------------------------: | :--------------------------------------------------------: | -| [pose_hrnetv2_w18](/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_hrnetv2-w18_8xb64-210e_rhd2d-256x256.py) | 256x256 | 0.992 | 0.902 | 2.21 | [ckpt](https://download.openmmlab.com/mmpose/hand/hrnetv2/hrnetv2_w18_rhd2d_256x256-95b20dd8_20210330.pth) | [log](https://download.openmmlab.com/mmpose/hand/hrnetv2/hrnetv2_w18_rhd2d_256x256_20210330.log.json) | + + +
+HRNetv2 (TPAMI'2019) + +```bibtex +@article{WangSCJDZLMTWLX19, + title={Deep High-Resolution Representation Learning for Visual Recognition}, + author={Jingdong Wang and Ke Sun and Tianheng Cheng and + Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and + Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, + journal={TPAMI}, + year={2019} +} +``` + +
+ + + +
+RHD (ICCV'2017) + +```bibtex +@TechReport{zb2017hand, + author={Christian Zimmermann and Thomas Brox}, + title={Learning to Estimate 3D Hand Pose from Single RGB Images}, + institution={arXiv:1705.01389}, + year={2017}, + note="https://arxiv.org/abs/1705.01389", + url="https://lmb.informatik.uni-freiburg.de/projects/hand3d/" +} +``` + +
+ +Results on RHD test set + +| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | +| :--------------------------------------------------------- | :--------: | :-----: | :---: | :--: | :--------------------------------------------------------: | :--------------------------------------------------------: | +| [pose_hrnetv2_w18](/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_hrnetv2-w18_8xb64-210e_rhd2d-256x256.py) | 256x256 | 0.992 | 0.902 | 2.21 | [ckpt](https://download.openmmlab.com/mmpose/hand/hrnetv2/hrnetv2_w18_rhd2d_256x256-95b20dd8_20210330.pth) | [log](https://download.openmmlab.com/mmpose/hand/hrnetv2/hrnetv2_w18_rhd2d_256x256_20210330.log.json) | diff --git a/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/hrnetv2_rhd2d.yml b/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/hrnetv2_rhd2d.yml index f5292da770..8415f3cef7 100644 --- a/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/hrnetv2_rhd2d.yml +++ b/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/hrnetv2_rhd2d.yml @@ -1,16 +1,16 @@ -Models: -- Config: configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_hrnetv2-w18_8xb64-210e_rhd2d-256x256.py - In Collection: HRNetv2 - Metadata: - Architecture: - - HRNetv2 - Training Data: RHD - Name: td-hm_hrnetv2-w18_8xb64-210e_rhd2d-256x256 - Results: - - Dataset: RHD - Metrics: - AUC: 0.902 - EPE: 2.21 - PCK@0.2: 0.992 - Task: Hand 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/hand/hrnetv2/hrnetv2_w18_rhd2d_256x256-95b20dd8_20210330.pth +Models: +- Config: configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_hrnetv2-w18_8xb64-210e_rhd2d-256x256.py + In Collection: HRNetv2 + Metadata: + Architecture: + - HRNetv2 + Training Data: RHD + Name: td-hm_hrnetv2-w18_8xb64-210e_rhd2d-256x256 + Results: + - Dataset: RHD + Metrics: + AUC: 0.902 + EPE: 2.21 + PCK@0.2: 0.992 + Task: Hand 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/hand/hrnetv2/hrnetv2_w18_rhd2d_256x256-95b20dd8_20210330.pth diff --git a/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/hrnetv2_udp_rhd2d.md b/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/hrnetv2_udp_rhd2d.md index c494eb8fc6..da766c4ba5 100644 --- a/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/hrnetv2_udp_rhd2d.md +++ b/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/hrnetv2_udp_rhd2d.md @@ -1,58 +1,58 @@ - - -
-HRNetv2 (TPAMI'2019) - -```bibtex -@article{WangSCJDZLMTWLX19, - title={Deep High-Resolution Representation Learning for Visual Recognition}, - author={Jingdong Wang and Ke Sun and Tianheng Cheng and - Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and - Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, - journal={TPAMI}, - year={2019} -} -``` - -
- - - -
-UDP (CVPR'2020) - -```bibtex -@InProceedings{Huang_2020_CVPR, - author = {Huang, Junjie and Zhu, Zheng and Guo, Feng and Huang, Guan}, - title = {The Devil Is in the Details: Delving Into Unbiased Data Processing for Human Pose Estimation}, - booktitle = {The IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, - month = {June}, - year = {2020} -} -``` - -
- - - -
-RHD (ICCV'2017) - -```bibtex -@TechReport{zb2017hand, - author={Christian Zimmermann and Thomas Brox}, - title={Learning to Estimate 3D Hand Pose from Single RGB Images}, - institution={arXiv:1705.01389}, - year={2017}, - note="https://arxiv.org/abs/1705.01389", - url="https://lmb.informatik.uni-freiburg.de/projects/hand3d/" -} -``` - -
- -Results on RHD test set - -| Arch | Input Size | PCKh@0.7 | AUC | EPE | ckpt | log | -| :--------------------------------------------------------- | :--------: | :------: | :---: | :--: | :--------------------------------------------------------: | :-------------------------------------------------------: | -| [pose_hrnetv2_w18_udp](/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_hrnetv2-w18_udp-8xb64-210e_rhd2d-256x256.py) | 256x256 | 0.992 | 0.902 | 2.19 | [ckpt](https://download.openmmlab.com/mmpose/hand/udp/hrnetv2_w18_rhd2d_256x256_udp-63ba6007_20210330.pth) | [log](https://download.openmmlab.com/mmpose/hand/udp/hrnetv2_w18_rhd2d_256x256_udp_20210330.log.json) | + + +
+HRNetv2 (TPAMI'2019) + +```bibtex +@article{WangSCJDZLMTWLX19, + title={Deep High-Resolution Representation Learning for Visual Recognition}, + author={Jingdong Wang and Ke Sun and Tianheng Cheng and + Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and + Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, + journal={TPAMI}, + year={2019} +} +``` + +
+ + + +
+UDP (CVPR'2020) + +```bibtex +@InProceedings{Huang_2020_CVPR, + author = {Huang, Junjie and Zhu, Zheng and Guo, Feng and Huang, Guan}, + title = {The Devil Is in the Details: Delving Into Unbiased Data Processing for Human Pose Estimation}, + booktitle = {The IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, + month = {June}, + year = {2020} +} +``` + +
+ + + +
+RHD (ICCV'2017) + +```bibtex +@TechReport{zb2017hand, + author={Christian Zimmermann and Thomas Brox}, + title={Learning to Estimate 3D Hand Pose from Single RGB Images}, + institution={arXiv:1705.01389}, + year={2017}, + note="https://arxiv.org/abs/1705.01389", + url="https://lmb.informatik.uni-freiburg.de/projects/hand3d/" +} +``` + +
+ +Results on RHD test set + +| Arch | Input Size | PCKh@0.7 | AUC | EPE | ckpt | log | +| :--------------------------------------------------------- | :--------: | :------: | :---: | :--: | :--------------------------------------------------------: | :-------------------------------------------------------: | +| [pose_hrnetv2_w18_udp](/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_hrnetv2-w18_udp-8xb64-210e_rhd2d-256x256.py) | 256x256 | 0.992 | 0.902 | 2.19 | [ckpt](https://download.openmmlab.com/mmpose/hand/udp/hrnetv2_w18_rhd2d_256x256_udp-63ba6007_20210330.pth) | [log](https://download.openmmlab.com/mmpose/hand/udp/hrnetv2_w18_rhd2d_256x256_udp_20210330.log.json) | diff --git a/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/hrnetv2_udp_rhd2d.yml b/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/hrnetv2_udp_rhd2d.yml index db63b682e2..148da23ddb 100644 --- a/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/hrnetv2_udp_rhd2d.yml +++ b/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/hrnetv2_udp_rhd2d.yml @@ -1,17 +1,17 @@ -Models: -- Config: configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_hrnetv2-w18_udp-8xb64-210e_rhd2d-256x256.py - In Collection: UDP - Metadata: - Architecture: - - HRNetv2 - - UDP - Training Data: RHD - Name: td-hm_hrnetv2-w18_udp-8xb64-210e_rhd2d-256x256 - Results: - - Dataset: RHD - Metrics: - AUC: 0.902 - EPE: 2.19 - PCKh@0.7: 0.992 - Task: Hand 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/hand/udp/hrnetv2_w18_rhd2d_256x256_udp-63ba6007_20210330.pth +Models: +- Config: configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_hrnetv2-w18_udp-8xb64-210e_rhd2d-256x256.py + In Collection: UDP + Metadata: + Architecture: + - HRNetv2 + - UDP + Training Data: RHD + Name: td-hm_hrnetv2-w18_udp-8xb64-210e_rhd2d-256x256 + Results: + - Dataset: RHD + Metrics: + AUC: 0.902 + EPE: 2.19 + PCKh@0.7: 0.992 + Task: Hand 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/hand/udp/hrnetv2_w18_rhd2d_256x256_udp-63ba6007_20210330.pth diff --git a/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/mobilenetv2_rhd2d.md b/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/mobilenetv2_rhd2d.md index 877247fe86..19506c5af8 100644 --- a/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/mobilenetv2_rhd2d.md +++ b/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/mobilenetv2_rhd2d.md @@ -1,40 +1,40 @@ - - -
-MobilenetV2 (CVPR'2018) - -```bibtex -@inproceedings{sandler2018mobilenetv2, - title={Mobilenetv2: Inverted residuals and linear bottlenecks}, - author={Sandler, Mark and Howard, Andrew and Zhu, Menglong and Zhmoginov, Andrey and Chen, Liang-Chieh}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={4510--4520}, - year={2018} -} -``` - -
- - - -
-RHD (ICCV'2017) - -```bibtex -@TechReport{zb2017hand, - author={Christian Zimmermann and Thomas Brox}, - title={Learning to Estimate 3D Hand Pose from Single RGB Images}, - institution={arXiv:1705.01389}, - year={2017}, - note="https://arxiv.org/abs/1705.01389", - url="https://lmb.informatik.uni-freiburg.de/projects/hand3d/" -} -``` - -
- -Results on RHD test set - -| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | -| :--------------------------------------------------------- | :--------: | :-----: | :---: | :--: | :--------------------------------------------------------: | :--------------------------------------------------------: | -| [pose_mobilenet_v2](/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_mobilenetv2_8xb64-210e_rhd2d-256x256.py) | 256x256 | 0.985 | 0.883 | 2.79 | [ckpt](https://download.openmmlab.com/mmpose/hand/mobilenetv2/mobilenetv2_rhd2d_256x256-85fa02db_20210330.pth) | [log](https://download.openmmlab.com/mmpose/hand/mobilenetv2/mobilenetv2_rhd2d_256x256_20210330.log.json) | + + +
+MobilenetV2 (CVPR'2018) + +```bibtex +@inproceedings{sandler2018mobilenetv2, + title={Mobilenetv2: Inverted residuals and linear bottlenecks}, + author={Sandler, Mark and Howard, Andrew and Zhu, Menglong and Zhmoginov, Andrey and Chen, Liang-Chieh}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={4510--4520}, + year={2018} +} +``` + +
+ + + +
+RHD (ICCV'2017) + +```bibtex +@TechReport{zb2017hand, + author={Christian Zimmermann and Thomas Brox}, + title={Learning to Estimate 3D Hand Pose from Single RGB Images}, + institution={arXiv:1705.01389}, + year={2017}, + note="https://arxiv.org/abs/1705.01389", + url="https://lmb.informatik.uni-freiburg.de/projects/hand3d/" +} +``` + +
+ +Results on RHD test set + +| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | +| :--------------------------------------------------------- | :--------: | :-----: | :---: | :--: | :--------------------------------------------------------: | :--------------------------------------------------------: | +| [pose_mobilenet_v2](/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_mobilenetv2_8xb64-210e_rhd2d-256x256.py) | 256x256 | 0.985 | 0.883 | 2.79 | [ckpt](https://download.openmmlab.com/mmpose/hand/mobilenetv2/mobilenetv2_rhd2d_256x256-85fa02db_20210330.pth) | [log](https://download.openmmlab.com/mmpose/hand/mobilenetv2/mobilenetv2_rhd2d_256x256_20210330.log.json) | diff --git a/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/mobilenetv2_rhd2d.yml b/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/mobilenetv2_rhd2d.yml index 202a636fbe..0d1bd76afd 100644 --- a/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/mobilenetv2_rhd2d.yml +++ b/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/mobilenetv2_rhd2d.yml @@ -1,17 +1,17 @@ -Models: -- Config: configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_mobilenetv2_8xb64-210e_rhd2d-256x256.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: - - SimpleBaseline2D - - MobilenetV2 - Training Data: RHD - Name: td-hm_mobilenetv2_8xb64-210e_rhd2d-256x256 - Results: - - Dataset: RHD - Metrics: - AUC: 0.883 - EPE: 2.79 - PCK@0.2: 0.985 - Task: Hand 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/hand/mobilenetv2/mobilenetv2_rhd2d_256x256-85fa02db_20210330.pth +Models: +- Config: configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_mobilenetv2_8xb64-210e_rhd2d-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: + - SimpleBaseline2D + - MobilenetV2 + Training Data: RHD + Name: td-hm_mobilenetv2_8xb64-210e_rhd2d-256x256 + Results: + - Dataset: RHD + Metrics: + AUC: 0.883 + EPE: 2.79 + PCK@0.2: 0.985 + Task: Hand 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/hand/mobilenetv2/mobilenetv2_rhd2d_256x256-85fa02db_20210330.pth diff --git a/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/resnet_rhd2d.md b/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/resnet_rhd2d.md index f103a0df40..843bd75fae 100644 --- a/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/resnet_rhd2d.md +++ b/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/resnet_rhd2d.md @@ -1,57 +1,57 @@ - - -
-SimpleBaseline2D (ECCV'2018) - -```bibtex -@inproceedings{xiao2018simple, - title={Simple baselines for human pose estimation and tracking}, - author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, - booktitle={Proceedings of the European conference on computer vision (ECCV)}, - pages={466--481}, - year={2018} -} -``` - -
- - - -
-ResNet (CVPR'2016) - -```bibtex -@inproceedings{he2016deep, - title={Deep residual learning for image recognition}, - author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={770--778}, - year={2016} -} -``` - -
- - - -
-RHD (ICCV'2017) - -```bibtex -@TechReport{zb2017hand, - author={Christian Zimmermann and Thomas Brox}, - title={Learning to Estimate 3D Hand Pose from Single RGB Images}, - institution={arXiv:1705.01389}, - year={2017}, - note="https://arxiv.org/abs/1705.01389", - url="https://lmb.informatik.uni-freiburg.de/projects/hand3d/" -} -``` - -
- -Results on RHD test set - -| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | -| :--------------------------------------------------------- | :--------: | :-----: | :---: | :--: | :--------------------------------------------------------: | :--------------------------------------------------------: | -| [pose_resnet50](/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_res50_8xb64-210e_rhd2d-256x256.py) | 256x256 | 0.991 | 0.898 | 2.32 | [ckpt](https://download.openmmlab.com/mmpose/hand/resnet/res50_rhd2d_256x256-5dc7e4cc_20210330.pth) | [log](https://download.openmmlab.com/mmpose/hand/resnet/res50_rhd2d_256x256_20210330.log.json) | + + +
+SimpleBaseline2D (ECCV'2018) + +```bibtex +@inproceedings{xiao2018simple, + title={Simple baselines for human pose estimation and tracking}, + author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, + booktitle={Proceedings of the European conference on computer vision (ECCV)}, + pages={466--481}, + year={2018} +} +``` + +
+ + + +
+ResNet (CVPR'2016) + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +
+ + + +
+RHD (ICCV'2017) + +```bibtex +@TechReport{zb2017hand, + author={Christian Zimmermann and Thomas Brox}, + title={Learning to Estimate 3D Hand Pose from Single RGB Images}, + institution={arXiv:1705.01389}, + year={2017}, + note="https://arxiv.org/abs/1705.01389", + url="https://lmb.informatik.uni-freiburg.de/projects/hand3d/" +} +``` + +
+ +Results on RHD test set + +| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | +| :--------------------------------------------------------- | :--------: | :-----: | :---: | :--: | :--------------------------------------------------------: | :--------------------------------------------------------: | +| [pose_resnet50](/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_res50_8xb64-210e_rhd2d-256x256.py) | 256x256 | 0.991 | 0.898 | 2.32 | [ckpt](https://download.openmmlab.com/mmpose/hand/resnet/res50_rhd2d_256x256-5dc7e4cc_20210330.pth) | [log](https://download.openmmlab.com/mmpose/hand/resnet/res50_rhd2d_256x256_20210330.log.json) | diff --git a/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/resnet_rhd2d.yml b/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/resnet_rhd2d.yml index d09f8ba268..30cf36b0a9 100644 --- a/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/resnet_rhd2d.yml +++ b/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/resnet_rhd2d.yml @@ -1,17 +1,17 @@ -Models: -- Config: configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_res50_8xb64-210e_rhd2d-256x256.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: - - SimpleBaseline2D - - ResNet - Training Data: RHD - Name: td-hm_res50_8xb64-210e_rhd2d-256x256 - Results: - - Dataset: RHD - Metrics: - AUC: 0.898 - EPE: 2.32 - PCK@0.2: 0.991 - Task: Hand 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/hand/resnet/res50_rhd2d_256x256-5dc7e4cc_20210330.pth +Models: +- Config: configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_res50_8xb64-210e_rhd2d-256x256.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: + - SimpleBaseline2D + - ResNet + Training Data: RHD + Name: td-hm_res50_8xb64-210e_rhd2d-256x256 + Results: + - Dataset: RHD + Metrics: + AUC: 0.898 + EPE: 2.32 + PCK@0.2: 0.991 + Task: Hand 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/hand/resnet/res50_rhd2d_256x256-5dc7e4cc_20210330.pth diff --git a/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_hrnetv2-w18_8xb64-210e_rhd2d-256x256.py b/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_hrnetv2-w18_8xb64-210e_rhd2d-256x256.py index 4a9bcc9b89..e9cb89b136 100644 --- a/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_hrnetv2-w18_8xb64-210e_rhd2d-256x256.py +++ b/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_hrnetv2-w18_8xb64-210e_rhd2d-256x256.py @@ -1,158 +1,158 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(18, 36)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(18, 36, 72)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(18, 36, 72, 144), - multiscale_output=True), - upsample=dict(mode='bilinear', align_corners=False)), - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://msra/hrnetv2_w18', - )), - neck=dict( - type='FeatureMapProcessor', - concat=True, - ), - head=dict( - type='HeatmapHead', - in_channels=270, - out_channels=21, - deconv_out_channels=None, - conv_out_channels=(270, ), - conv_kernel_sizes=(1, ), - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'Rhd2DDataset' -data_mode = 'topdown' -data_root = 'data/rhd/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict( - type='RandomBBoxTransform', rotate_factor=180, - scale_factor=(0.7, 1.3)), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/rhd_train.json', - data_prefix=dict(img=''), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/rhd_test.json', - data_prefix=dict(img=''), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = [ - dict(type='PCKAccuracy', thr=0.2), - dict(type='AUC'), - dict(type='EPE'), -] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(18, 36)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(18, 36, 72)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(18, 36, 72, 144), + multiscale_output=True), + upsample=dict(mode='bilinear', align_corners=False)), + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://msra/hrnetv2_w18', + )), + neck=dict( + type='FeatureMapProcessor', + concat=True, + ), + head=dict( + type='HeatmapHead', + in_channels=270, + out_channels=21, + deconv_out_channels=None, + conv_out_channels=(270, ), + conv_kernel_sizes=(1, ), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'Rhd2DDataset' +data_mode = 'topdown' +data_root = 'data/rhd/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', rotate_factor=180, + scale_factor=(0.7, 1.3)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/rhd_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/rhd_test.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_hrnetv2-w18_dark-8xb64-210e_rhd2d-256x256.py b/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_hrnetv2-w18_dark-8xb64-210e_rhd2d-256x256.py index 44b8dc0f5a..eac55958d1 100644 --- a/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_hrnetv2-w18_dark-8xb64-210e_rhd2d-256x256.py +++ b/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_hrnetv2-w18_dark-8xb64-210e_rhd2d-256x256.py @@ -1,162 +1,162 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', - input_size=(256, 256), - heatmap_size=(64, 64), - sigma=2, - unbiased=True) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(18, 36)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(18, 36, 72)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(18, 36, 72, 144), - multiscale_output=True), - upsample=dict(mode='bilinear', align_corners=False)), - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://msra/hrnetv2_w18', - )), - neck=dict( - type='FeatureMapProcessor', - concat=True, - ), - head=dict( - type='HeatmapHead', - in_channels=270, - out_channels=21, - deconv_out_channels=None, - conv_out_channels=(270, ), - conv_kernel_sizes=(1, ), - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'Rhd2DDataset' -data_mode = 'topdown' -data_root = 'data/rhd/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict( - type='RandomBBoxTransform', rotate_factor=180, - scale_factor=(0.7, 1.3)), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/rhd_train.json', - data_prefix=dict(img=''), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/rhd_test.json', - data_prefix=dict(img=''), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = [ - dict(type='PCKAccuracy', thr=0.2), - dict(type='AUC'), - dict(type='EPE'), -] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', + input_size=(256, 256), + heatmap_size=(64, 64), + sigma=2, + unbiased=True) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(18, 36)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(18, 36, 72)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(18, 36, 72, 144), + multiscale_output=True), + upsample=dict(mode='bilinear', align_corners=False)), + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://msra/hrnetv2_w18', + )), + neck=dict( + type='FeatureMapProcessor', + concat=True, + ), + head=dict( + type='HeatmapHead', + in_channels=270, + out_channels=21, + deconv_out_channels=None, + conv_out_channels=(270, ), + conv_kernel_sizes=(1, ), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'Rhd2DDataset' +data_mode = 'topdown' +data_root = 'data/rhd/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', rotate_factor=180, + scale_factor=(0.7, 1.3)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/rhd_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/rhd_test.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_hrnetv2-w18_udp-8xb64-210e_rhd2d-256x256.py b/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_hrnetv2-w18_udp-8xb64-210e_rhd2d-256x256.py index d1c796234d..c2a672b645 100644 --- a/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_hrnetv2-w18_udp-8xb64-210e_rhd2d-256x256.py +++ b/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_hrnetv2-w18_udp-8xb64-210e_rhd2d-256x256.py @@ -1,158 +1,158 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) - -# codec settings -codec = dict( - type='UDPHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(18, 36)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(18, 36, 72)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(18, 36, 72, 144), - multiscale_output=True), - upsample=dict(mode='bilinear', align_corners=False)), - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://msra/hrnetv2_w18', - )), - neck=dict( - type='FeatureMapProcessor', - concat=True, - ), - head=dict( - type='HeatmapHead', - in_channels=270, - out_channels=21, - deconv_out_channels=None, - conv_out_channels=(270, ), - conv_kernel_sizes=(1, ), - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=False, - )) - -# base dataset settings -dataset_type = 'Rhd2DDataset' -data_mode = 'topdown' -data_root = 'data/rhd/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict( - type='RandomBBoxTransform', rotate_factor=180, - scale_factor=(0.7, 1.3)), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/rhd_train.json', - data_prefix=dict(img=''), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/rhd_test.json', - data_prefix=dict(img=''), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = [ - dict(type='PCKAccuracy', thr=0.2), - dict(type='AUC'), - dict(type='EPE'), -] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(18, 36)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(18, 36, 72)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(18, 36, 72, 144), + multiscale_output=True), + upsample=dict(mode='bilinear', align_corners=False)), + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://msra/hrnetv2_w18', + )), + neck=dict( + type='FeatureMapProcessor', + concat=True, + ), + head=dict( + type='HeatmapHead', + in_channels=270, + out_channels=21, + deconv_out_channels=None, + conv_out_channels=(270, ), + conv_kernel_sizes=(1, ), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +dataset_type = 'Rhd2DDataset' +data_mode = 'topdown' +data_root = 'data/rhd/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', rotate_factor=180, + scale_factor=(0.7, 1.3)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/rhd_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/rhd_test.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_mobilenetv2_8xb64-210e_rhd2d-256x256.py b/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_mobilenetv2_8xb64-210e_rhd2d-256x256.py index d7176bacd7..68ee85736d 100644 --- a/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_mobilenetv2_8xb64-210e_rhd2d-256x256.py +++ b/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_mobilenetv2_8xb64-210e_rhd2d-256x256.py @@ -1,125 +1,125 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='MobileNetV2', - widen_factor=1., - out_indices=(7, ), - init_cfg=dict( - type='Pretrained', - checkpoint='mmcls://mobilenet_v2', - )), - head=dict( - type='HeatmapHead', - in_channels=1280, - out_channels=21, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'Rhd2DDataset' -data_mode = 'topdown' -data_root = 'data/rhd/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict( - type='RandomBBoxTransform', rotate_factor=180, - scale_factor=(0.7, 1.3)), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/rhd_train.json', - data_prefix=dict(img=''), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/rhd_test.json', - data_prefix=dict(img=''), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = [ - dict(type='PCKAccuracy', thr=0.2), - dict(type='AUC'), - dict(type='EPE'), -] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='MobileNetV2', + widen_factor=1., + out_indices=(7, ), + init_cfg=dict( + type='Pretrained', + checkpoint='mmcls://mobilenet_v2', + )), + head=dict( + type='HeatmapHead', + in_channels=1280, + out_channels=21, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'Rhd2DDataset' +data_mode = 'topdown' +data_root = 'data/rhd/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', rotate_factor=180, + scale_factor=(0.7, 1.3)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/rhd_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/rhd_test.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_res50_8xb64-210e_rhd2d-256x256.py b/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_res50_8xb64-210e_rhd2d-256x256.py index da55568028..07d04dc34a 100644 --- a/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_res50_8xb64-210e_rhd2d-256x256.py +++ b/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_res50_8xb64-210e_rhd2d-256x256.py @@ -1,124 +1,124 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=50, - init_cfg=dict( - type='Pretrained', - checkpoint='torchvision://resnet50', - )), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=21, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'Rhd2DDataset' -data_mode = 'topdown' -data_root = 'data/rhd/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict( - type='RandomBBoxTransform', rotate_factor=180, - scale_factor=(0.7, 1.3)), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/rhd_train.json', - data_prefix=dict(img=''), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/rhd_test.json', - data_prefix=dict(img=''), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = [ - dict(type='PCKAccuracy', thr=0.2), - dict(type='AUC'), - dict(type='EPE'), -] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict( + type='Pretrained', + checkpoint='torchvision://resnet50', + )), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=21, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'Rhd2DDataset' +data_mode = 'topdown' +data_root = 'data/rhd/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', rotate_factor=180, + scale_factor=(0.7, 1.3)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/rhd_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/rhd_test.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/configs/hand_2d_keypoint/topdown_regression/README.md b/configs/hand_2d_keypoint/topdown_regression/README.md index 0210a89c2d..2fe838b4ba 100644 --- a/configs/hand_2d_keypoint/topdown_regression/README.md +++ b/configs/hand_2d_keypoint/topdown_regression/README.md @@ -1,25 +1,25 @@ -# Top-down regression-based pose estimation - -Top-down methods divide the task into two stages: object detection, followed by single-object pose estimation given object bounding boxes. At the 2nd stage, regression based methods directly regress the keypoint coordinates given the features extracted from the bounding box area, following the paradigm introduced in [Deeppose: Human pose estimation via deep neural networks](http://openaccess.thecvf.com/content_cvpr_2014/html/Toshev_DeepPose_Human_Pose_2014_CVPR_paper.html). - -
- -
- -## Results and Models - -### OneHand10K Dataset - -Results on OneHand10K val set - -| Model | Input Size | PCK@0.2 | AUC | EPE | Details and Download | -| :-------: | :--------: | :-----: | :---: | :---: | :-------------------------------------------------------: | -| ResNet-50 | 256x256 | 0.990 | 0.485 | 34.21 | [resnet_onehand10k.md](./onehand10k/resnet_onehand10k.md) | - -### RHD Dataset - -Results on RHD test set - -| Model | Input Size | PCK@0.2 | AUC | EPE | Details and Download | -| :-------: | :--------: | :-----: | :---: | :--: | :----------------------------------------: | -| ResNet-50 | 256x256 | 0.988 | 0.865 | 3.32 | [resnet_rhd2d.md](./rhd2d/resnet_rhd2d.md) | +# Top-down regression-based pose estimation + +Top-down methods divide the task into two stages: object detection, followed by single-object pose estimation given object bounding boxes. At the 2nd stage, regression based methods directly regress the keypoint coordinates given the features extracted from the bounding box area, following the paradigm introduced in [Deeppose: Human pose estimation via deep neural networks](http://openaccess.thecvf.com/content_cvpr_2014/html/Toshev_DeepPose_Human_Pose_2014_CVPR_paper.html). + +
+ +
+ +## Results and Models + +### OneHand10K Dataset + +Results on OneHand10K val set + +| Model | Input Size | PCK@0.2 | AUC | EPE | Details and Download | +| :-------: | :--------: | :-----: | :---: | :---: | :-------------------------------------------------------: | +| ResNet-50 | 256x256 | 0.990 | 0.485 | 34.21 | [resnet_onehand10k.md](./onehand10k/resnet_onehand10k.md) | + +### RHD Dataset + +Results on RHD test set + +| Model | Input Size | PCK@0.2 | AUC | EPE | Details and Download | +| :-------: | :--------: | :-----: | :---: | :--: | :----------------------------------------: | +| ResNet-50 | 256x256 | 0.988 | 0.865 | 3.32 | [resnet_rhd2d.md](./rhd2d/resnet_rhd2d.md) | diff --git a/configs/hand_2d_keypoint/topdown_regression/onehand10k/resnet_onehand10k.md b/configs/hand_2d_keypoint/topdown_regression/onehand10k/resnet_onehand10k.md index 40c0c18495..9e9e60372a 100644 --- a/configs/hand_2d_keypoint/topdown_regression/onehand10k/resnet_onehand10k.md +++ b/configs/hand_2d_keypoint/topdown_regression/onehand10k/resnet_onehand10k.md @@ -1,59 +1,59 @@ - - -
-DeepPose (CVPR'2014) - -```bibtex -@inproceedings{toshev2014deeppose, - title={Deeppose: Human pose estimation via deep neural networks}, - author={Toshev, Alexander and Szegedy, Christian}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={1653--1660}, - year={2014} -} -``` - -
- - - -
-ResNet (CVPR'2016) - -```bibtex -@inproceedings{he2016deep, - title={Deep residual learning for image recognition}, - author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={770--778}, - year={2016} -} -``` - -
- - - -
-OneHand10K (TCSVT'2019) - -```bibtex -@article{wang2018mask, - title={Mask-pose cascaded cnn for 2d hand pose estimation from single color image}, - author={Wang, Yangang and Peng, Cong and Liu, Yebin}, - journal={IEEE Transactions on Circuits and Systems for Video Technology}, - volume={29}, - number={11}, - pages={3258--3268}, - year={2018}, - publisher={IEEE} -} -``` - -
- -Results on OneHand10K val set - -| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | -| :--------------------------------------------------------- | :--------: | :-----: | :---: | :---: | :--------------------------------------------------------: | :-------------------------------------------------------: | -| [deeppose_resnet_50](/configs/hand_2d_keypoint/topdown_regression/onehand10k/td-reg_res50_8xb64-210e_onehand10k-256x256.py) | 256x256 | 0.990 | 0.485 | 34.21 | [ckpt](https://download.openmmlab.com/mmpose/hand/deeppose/deeppose_res50_onehand10k_256x256-cbddf43a_20210330.pth) | [log](https://download.openmmlab.com/mmpose/hand/deeppose/deeppose_res50_onehand10k_256x256_20210330.log.json) | + + +
+DeepPose (CVPR'2014) + +```bibtex +@inproceedings{toshev2014deeppose, + title={Deeppose: Human pose estimation via deep neural networks}, + author={Toshev, Alexander and Szegedy, Christian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={1653--1660}, + year={2014} +} +``` + +
+ + + +
+ResNet (CVPR'2016) + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +
+ + + +
+OneHand10K (TCSVT'2019) + +```bibtex +@article{wang2018mask, + title={Mask-pose cascaded cnn for 2d hand pose estimation from single color image}, + author={Wang, Yangang and Peng, Cong and Liu, Yebin}, + journal={IEEE Transactions on Circuits and Systems for Video Technology}, + volume={29}, + number={11}, + pages={3258--3268}, + year={2018}, + publisher={IEEE} +} +``` + +
+ +Results on OneHand10K val set + +| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | +| :--------------------------------------------------------- | :--------: | :-----: | :---: | :---: | :--------------------------------------------------------: | :-------------------------------------------------------: | +| [deeppose_resnet_50](/configs/hand_2d_keypoint/topdown_regression/onehand10k/td-reg_res50_8xb64-210e_onehand10k-256x256.py) | 256x256 | 0.990 | 0.485 | 34.21 | [ckpt](https://download.openmmlab.com/mmpose/hand/deeppose/deeppose_res50_onehand10k_256x256-cbddf43a_20210330.pth) | [log](https://download.openmmlab.com/mmpose/hand/deeppose/deeppose_res50_onehand10k_256x256_20210330.log.json) | diff --git a/configs/hand_2d_keypoint/topdown_regression/onehand10k/resnet_onehand10k.yml b/configs/hand_2d_keypoint/topdown_regression/onehand10k/resnet_onehand10k.yml index d5e9d8122e..6b92014f1d 100644 --- a/configs/hand_2d_keypoint/topdown_regression/onehand10k/resnet_onehand10k.yml +++ b/configs/hand_2d_keypoint/topdown_regression/onehand10k/resnet_onehand10k.yml @@ -1,17 +1,17 @@ -Models: -- Config: configs/hand_2d_keypoint/topdown_regression/onehand10k/td-reg_res50_8xb64-210e_onehand10k-256x256.py - In Collection: DeepPose - Metadata: - Architecture: - - DeepPose - - ResNet - Training Data: OneHand10K - Name: td-reg_res50_8xb64-210e_onehand10k-256x256 - Results: - - Dataset: OneHand10K - Metrics: - AUC: 0.485 - EPE: 34.21 - PCK@0.2: 0.99 - Task: Hand 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/hand/deeppose/deeppose_res50_onehand10k_256x256-cbddf43a_20210330.pth +Models: +- Config: configs/hand_2d_keypoint/topdown_regression/onehand10k/td-reg_res50_8xb64-210e_onehand10k-256x256.py + In Collection: DeepPose + Metadata: + Architecture: + - DeepPose + - ResNet + Training Data: OneHand10K + Name: td-reg_res50_8xb64-210e_onehand10k-256x256 + Results: + - Dataset: OneHand10K + Metrics: + AUC: 0.485 + EPE: 34.21 + PCK@0.2: 0.99 + Task: Hand 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/hand/deeppose/deeppose_res50_onehand10k_256x256-cbddf43a_20210330.pth diff --git a/configs/hand_2d_keypoint/topdown_regression/onehand10k/td-reg_res50_8xb64-210e_onehand10k-256x256.py b/configs/hand_2d_keypoint/topdown_regression/onehand10k/td-reg_res50_8xb64-210e_onehand10k-256x256.py index ee1556d45e..4d33893530 100644 --- a/configs/hand_2d_keypoint/topdown_regression/onehand10k/td-reg_res50_8xb64-210e_onehand10k-256x256.py +++ b/configs/hand_2d_keypoint/topdown_regression/onehand10k/td-reg_res50_8xb64-210e_onehand10k-256x256.py @@ -1,122 +1,122 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) - -# codec settings -codec = dict(type='RegressionLabel', input_size=(256, 256)) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=50, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), - ), - neck=dict(type='GlobalAveragePooling'), - head=dict( - type='RegressionHead', - in_channels=2048, - num_joints=21, - loss=dict(type='SmoothL1Loss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'OneHand10KDataset' -data_mode = 'topdown' -data_root = 'data/onehand10k/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict( - type='RandomBBoxTransform', rotate_factor=180, - scale_factor=(0.7, 1.3)), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/onehand10k_train.json', - data_prefix=dict(img=''), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/onehand10k_test.json', - data_prefix=dict(img=''), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = [ - dict(type='PCKAccuracy', thr=0.2), - dict(type='AUC'), - dict(type='EPE'), -] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict(type='RegressionLabel', input_size=(256, 256)) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='RegressionHead', + in_channels=2048, + num_joints=21, + loss=dict(type='SmoothL1Loss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'OneHand10KDataset' +data_mode = 'topdown' +data_root = 'data/onehand10k/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', rotate_factor=180, + scale_factor=(0.7, 1.3)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/onehand10k_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/onehand10k_test.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/configs/hand_2d_keypoint/topdown_regression/rhd2d/resnet_rhd2d.md b/configs/hand_2d_keypoint/topdown_regression/rhd2d/resnet_rhd2d.md index 6cca5580ba..25ae62b935 100644 --- a/configs/hand_2d_keypoint/topdown_regression/rhd2d/resnet_rhd2d.md +++ b/configs/hand_2d_keypoint/topdown_regression/rhd2d/resnet_rhd2d.md @@ -1,57 +1,57 @@ - - -
-DeepPose (CVPR'2014) - -```bibtex -@inproceedings{toshev2014deeppose, - title={Deeppose: Human pose estimation via deep neural networks}, - author={Toshev, Alexander and Szegedy, Christian}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={1653--1660}, - year={2014} -} -``` - -
- - - -
-ResNet (CVPR'2016) - -```bibtex -@inproceedings{he2016deep, - title={Deep residual learning for image recognition}, - author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={770--778}, - year={2016} -} -``` - -
- - - -
-RHD (ICCV'2017) - -```bibtex -@TechReport{zb2017hand, - author={Christian Zimmermann and Thomas Brox}, - title={Learning to Estimate 3D Hand Pose from Single RGB Images}, - institution={arXiv:1705.01389}, - year={2017}, - note="https://arxiv.org/abs/1705.01389", - url="https://lmb.informatik.uni-freiburg.de/projects/hand3d/" -} -``` - -
- -Results on RHD test set - -| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | -| :--------------------------------------------------------- | :--------: | :-----: | :---: | :--: | :--------------------------------------------------------: | :--------------------------------------------------------: | -| [deeppose_resnet_50](/configs/hand_2d_keypoint/topdown_regression/rhd2d/td-reg_res50_8xb64-210e_rhd2d-256x256.py) | 256x256 | 0.988 | 0.865 | 3.32 | [ckpt](https://download.openmmlab.com/mmpose/hand/deeppose/deeppose_res50_rhd2d_256x256-37f1c4d3_20210330.pth) | [log](https://download.openmmlab.com/mmpose/hand/deeppose/deeppose_res50_rhd2d_256x256_20210330.log.json) | + + +
+DeepPose (CVPR'2014) + +```bibtex +@inproceedings{toshev2014deeppose, + title={Deeppose: Human pose estimation via deep neural networks}, + author={Toshev, Alexander and Szegedy, Christian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={1653--1660}, + year={2014} +} +``` + +
+ + + +
+ResNet (CVPR'2016) + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +
+ + + +
+RHD (ICCV'2017) + +```bibtex +@TechReport{zb2017hand, + author={Christian Zimmermann and Thomas Brox}, + title={Learning to Estimate 3D Hand Pose from Single RGB Images}, + institution={arXiv:1705.01389}, + year={2017}, + note="https://arxiv.org/abs/1705.01389", + url="https://lmb.informatik.uni-freiburg.de/projects/hand3d/" +} +``` + +
+ +Results on RHD test set + +| Arch | Input Size | PCK@0.2 | AUC | EPE | ckpt | log | +| :--------------------------------------------------------- | :--------: | :-----: | :---: | :--: | :--------------------------------------------------------: | :--------------------------------------------------------: | +| [deeppose_resnet_50](/configs/hand_2d_keypoint/topdown_regression/rhd2d/td-reg_res50_8xb64-210e_rhd2d-256x256.py) | 256x256 | 0.988 | 0.865 | 3.32 | [ckpt](https://download.openmmlab.com/mmpose/hand/deeppose/deeppose_res50_rhd2d_256x256-37f1c4d3_20210330.pth) | [log](https://download.openmmlab.com/mmpose/hand/deeppose/deeppose_res50_rhd2d_256x256_20210330.log.json) | diff --git a/configs/hand_2d_keypoint/topdown_regression/rhd2d/resnet_rhd2d.yml b/configs/hand_2d_keypoint/topdown_regression/rhd2d/resnet_rhd2d.yml index 3d0a920c5d..705329db16 100644 --- a/configs/hand_2d_keypoint/topdown_regression/rhd2d/resnet_rhd2d.yml +++ b/configs/hand_2d_keypoint/topdown_regression/rhd2d/resnet_rhd2d.yml @@ -1,17 +1,17 @@ -Models: -- Config: configs/hand_2d_keypoint/topdown_regression/rhd2d/td-reg_res50_8xb64-210e_rhd2d-256x256.py - In Collection: DeepPose - Metadata: - Architecture: - - DeepPose - - ResNet - Training Data: RHD - Name: td-reg_res50_8xb64-210e_rhd2d-256x256 - Results: - - Dataset: RHD - Metrics: - AUC: 0.865 - EPE: 3.32 - PCK@0.2: 0.988 - Task: Hand 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/hand/deeppose/deeppose_res50_rhd2d_256x256-37f1c4d3_20210330.pth +Models: +- Config: configs/hand_2d_keypoint/topdown_regression/rhd2d/td-reg_res50_8xb64-210e_rhd2d-256x256.py + In Collection: DeepPose + Metadata: + Architecture: + - DeepPose + - ResNet + Training Data: RHD + Name: td-reg_res50_8xb64-210e_rhd2d-256x256 + Results: + - Dataset: RHD + Metrics: + AUC: 0.865 + EPE: 3.32 + PCK@0.2: 0.988 + Task: Hand 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/hand/deeppose/deeppose_res50_rhd2d_256x256-37f1c4d3_20210330.pth diff --git a/configs/hand_2d_keypoint/topdown_regression/rhd2d/td-reg_res50_8xb64-210e_rhd2d-256x256.py b/configs/hand_2d_keypoint/topdown_regression/rhd2d/td-reg_res50_8xb64-210e_rhd2d-256x256.py index a350c24bfe..7591892601 100644 --- a/configs/hand_2d_keypoint/topdown_regression/rhd2d/td-reg_res50_8xb64-210e_rhd2d-256x256.py +++ b/configs/hand_2d_keypoint/topdown_regression/rhd2d/td-reg_res50_8xb64-210e_rhd2d-256x256.py @@ -1,122 +1,122 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) - -# codec settings -codec = dict(type='RegressionLabel', input_size=(256, 256)) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=50, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), - ), - neck=dict(type='GlobalAveragePooling'), - head=dict( - type='RegressionHead', - in_channels=2048, - num_joints=21, - loss=dict(type='SmoothL1Loss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'Rhd2DDataset' -data_mode = 'topdown' -data_root = 'data/rhd/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict( - type='RandomBBoxTransform', rotate_factor=180, - scale_factor=(0.7, 1.3)), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/rhd_train.json', - data_prefix=dict(img=''), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/rhd_test.json', - data_prefix=dict(img=''), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = [ - dict(type='PCKAccuracy', thr=0.2), - dict(type='AUC'), - dict(type='EPE'), -] -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater')) + +# codec settings +codec = dict(type='RegressionLabel', input_size=(256, 256)) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='RegressionHead', + in_channels=2048, + num_joints=21, + loss=dict(type='SmoothL1Loss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'Rhd2DDataset' +data_mode = 'topdown' +data_root = 'data/rhd/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='RandomBBoxTransform', rotate_factor=180, + scale_factor=(0.7, 1.3)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/rhd_train.json', + data_prefix=dict(img=''), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/rhd_test.json', + data_prefix=dict(img=''), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE'), +] +test_evaluator = val_evaluator diff --git a/configs/hand_3d_keypoint/README.md b/configs/hand_3d_keypoint/README.md index 330319f42b..752fd92928 100644 --- a/configs/hand_3d_keypoint/README.md +++ b/configs/hand_3d_keypoint/README.md @@ -1,7 +1,7 @@ -# 3D Hand Pose Estimation - -3D hand pose estimation is defined as the task of detecting the poses (or keypoints) of the hand from an input image. - -## Data preparation - -Please follow [DATA Preparation](/docs/en/dataset_zoo/3d_hand_keypoint.md) to prepare data. +# 3D Hand Pose Estimation + +3D hand pose estimation is defined as the task of detecting the poses (or keypoints) of the hand from an input image. + +## Data preparation + +Please follow [DATA Preparation](/docs/en/dataset_zoo/3d_hand_keypoint.md) to prepare data. diff --git a/configs/hand_gesture/README.md b/configs/hand_gesture/README.md index 7cc5bb323b..aaf4235442 100644 --- a/configs/hand_gesture/README.md +++ b/configs/hand_gesture/README.md @@ -1,13 +1,13 @@ -# Gesture Recognition - -Gesture recognition aims to recognize the hand gestures in the video, such as thumbs up. - -## Data preparation - -Please follow [DATA Preparation](/docs/en/dataset_zoo/2d_hand_gesture.md) to prepare data. - -## Demo - -Please follow [Demo](/demo/docs/en/gesture_recognition_demo.md) to run the demo. - - +# Gesture Recognition + +Gesture recognition aims to recognize the hand gestures in the video, such as thumbs up. + +## Data preparation + +Please follow [DATA Preparation](/docs/en/dataset_zoo/2d_hand_gesture.md) to prepare data. + +## Demo + +Please follow [Demo](/demo/docs/en/gesture_recognition_demo.md) to run the demo. + + diff --git a/configs/wholebody_2d_keypoint/README.md b/configs/wholebody_2d_keypoint/README.md index 362a6a8976..305dd96b31 100644 --- a/configs/wholebody_2d_keypoint/README.md +++ b/configs/wholebody_2d_keypoint/README.md @@ -1,19 +1,19 @@ -# 2D Human Whole-Body Pose Estimation - -2D human whole-body pose estimation aims to localize dense landmarks on the entire human body including face, hands, body, and feet. - -Existing approaches can be categorized into top-down and bottom-up approaches. - -Top-down methods divide the task into two stages: human detection and whole-body pose estimation. They perform human detection first, followed by single-person whole-body pose estimation given human bounding boxes. - -Bottom-up approaches (e.g. AE) first detect all the whole-body keypoints and then group/associate them into person instances. - -## Data preparation - -Please follow [DATA Preparation](/docs/en/dataset_zoo/2d_wholebody_keypoint.md) to prepare data. - -## Demo - -Please follow [Demo](/demo/docs/en/2d_wholebody_pose_demo.md) to run demos. - -
+# 2D Human Whole-Body Pose Estimation + +2D human whole-body pose estimation aims to localize dense landmarks on the entire human body including face, hands, body, and feet. + +Existing approaches can be categorized into top-down and bottom-up approaches. + +Top-down methods divide the task into two stages: human detection and whole-body pose estimation. They perform human detection first, followed by single-person whole-body pose estimation given human bounding boxes. + +Bottom-up approaches (e.g. AE) first detect all the whole-body keypoints and then group/associate them into person instances. + +## Data preparation + +Please follow [DATA Preparation](/docs/en/dataset_zoo/2d_wholebody_keypoint.md) to prepare data. + +## Demo + +Please follow [Demo](/demo/docs/en/2d_wholebody_pose_demo.md) to run demos. + +
diff --git a/configs/wholebody_2d_keypoint/rtmpose/README.md b/configs/wholebody_2d_keypoint/rtmpose/README.md index ac40c016aa..bddca2c8ee 100644 --- a/configs/wholebody_2d_keypoint/rtmpose/README.md +++ b/configs/wholebody_2d_keypoint/rtmpose/README.md @@ -1,18 +1,18 @@ -# RTMPose - -Recent studies on 2D pose estimation have achieved excellent performance on public benchmarks, yet its application in the industrial community still suffers from heavy model parameters and high latency. -In order to bridge this gap, we empirically study five aspects that affect the performance of multi-person pose estimation algorithms: paradigm, backbone network, localization algorithm, training strategy, and deployment inference, and present a high-performance real-time multi-person pose estimation framework, **RTMPose**, based on MMPose. -Our RTMPose-m achieves **75.8% AP** on COCO with **90+ FPS** on an Intel i7-11700 CPU and **430+ FPS** on an NVIDIA GTX 1660 Ti GPU, and RTMPose-l achieves **67.0% AP** on COCO-WholeBody with **130+ FPS**, outperforming existing open-source libraries. -To further evaluate RTMPose's capability in critical real-time applications, we also report the performance after deploying on the mobile device. - -## Results and Models - -### COCO-WholeBody Dataset - -Results on COCO-WholeBody v1.0 val with detector having human AP of 56.4 on COCO val2017 dataset - -| Model | Input Size | Whole AP | Whole AR | Details and Download | -| :-------: | :--------: | :------: | :------: | :---------------------------------------------------------------------: | -| RTMPose-m | 256x192 | 0.582 | 0.674 | [rtmpose_coco-wholebody.md](./coco-wholebody/rtmpose_coco-wholebody.md) | -| RTMPose-l | 256x192 | 0.611 | 0.700 | [rtmpose_coco-wholebody.md](./coco-wholebody/rtmpose_coco-wholebody.md) | -| RTMPose-l | 384x288 | 0.648 | 0.730 | [rtmpose_coco-wholebody.md](./coco-wholebody/rtmpose_coco-wholebody.md) | +# RTMPose + +Recent studies on 2D pose estimation have achieved excellent performance on public benchmarks, yet its application in the industrial community still suffers from heavy model parameters and high latency. +In order to bridge this gap, we empirically study five aspects that affect the performance of multi-person pose estimation algorithms: paradigm, backbone network, localization algorithm, training strategy, and deployment inference, and present a high-performance real-time multi-person pose estimation framework, **RTMPose**, based on MMPose. +Our RTMPose-m achieves **75.8% AP** on COCO with **90+ FPS** on an Intel i7-11700 CPU and **430+ FPS** on an NVIDIA GTX 1660 Ti GPU, and RTMPose-l achieves **67.0% AP** on COCO-WholeBody with **130+ FPS**, outperforming existing open-source libraries. +To further evaluate RTMPose's capability in critical real-time applications, we also report the performance after deploying on the mobile device. + +## Results and Models + +### COCO-WholeBody Dataset + +Results on COCO-WholeBody v1.0 val with detector having human AP of 56.4 on COCO val2017 dataset + +| Model | Input Size | Whole AP | Whole AR | Details and Download | +| :-------: | :--------: | :------: | :------: | :---------------------------------------------------------------------: | +| RTMPose-m | 256x192 | 0.582 | 0.674 | [rtmpose_coco-wholebody.md](./coco-wholebody/rtmpose_coco-wholebody.md) | +| RTMPose-l | 256x192 | 0.611 | 0.700 | [rtmpose_coco-wholebody.md](./coco-wholebody/rtmpose_coco-wholebody.md) | +| RTMPose-l | 384x288 | 0.648 | 0.730 | [rtmpose_coco-wholebody.md](./coco-wholebody/rtmpose_coco-wholebody.md) | diff --git a/configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose-l_8xb32-270e_coco-wholebody-384x288.py b/configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose-l_8xb32-270e_coco-wholebody-384x288.py index af2c133f22..9b390fd033 100644 --- a/configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose-l_8xb32-270e_coco-wholebody-384x288.py +++ b/configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose-l_8xb32-270e_coco-wholebody-384x288.py @@ -1,232 +1,232 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -max_epochs = 270 -stage2_num_epochs = 30 -base_lr = 4e-3 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=(288, 384), - sigma=(6., 6.93), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=1., - widen_factor=1., - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmposev1/cspnext-l_udp-aic-coco_210e-256x192-273b7631_20230130.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=1024, - out_channels=133, - input_size=codec['input_size'], - in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True, )) - -# base dataset settings -dataset_type = 'CocoWholeBodyDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -backend_args = dict(backend='local') -# backend_args = dict( -# backend='petrel', -# path_mapping=dict({ -# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', -# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' -# })) - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.0), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict( - save_best='coco-wholebody/AP', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='CocoWholeBodyMetric', - ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 270 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(288, 384), + sigma=(6., 6.93), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=1., + widen_factor=1., + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-l_udp-aic-coco_210e-256x192-273b7631_20230130.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=1024, + out_channels=133, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict( + save_best='coco-wholebody/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose-l_8xb64-270e_coco-wholebody-256x192.py b/configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose-l_8xb64-270e_coco-wholebody-256x192.py index 7765c9ec44..f02f58f831 100644 --- a/configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose-l_8xb64-270e_coco-wholebody-256x192.py +++ b/configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose-l_8xb64-270e_coco-wholebody-256x192.py @@ -1,232 +1,232 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -max_epochs = 270 -stage2_num_epochs = 30 -base_lr = 4e-3 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=(192, 256), - sigma=(4.9, 5.66), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=1., - widen_factor=1., - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmposev1/cspnext-l_udp-aic-coco_210e-256x192-273b7631_20230130.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=1024, - out_channels=133, - input_size=codec['input_size'], - in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True, )) - -# base dataset settings -dataset_type = 'CocoWholeBodyDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -backend_args = dict(backend='local') -# backend_args = dict( -# backend='petrel', -# path_mapping=dict({ -# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', -# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' -# })) - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.0), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict( - save_best='coco-wholebody/AP', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='CocoWholeBodyMetric', - ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 270 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(192, 256), + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=1., + widen_factor=1., + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-l_udp-aic-coco_210e-256x192-273b7631_20230130.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=1024, + out_channels=133, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict( + save_best='coco-wholebody/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose-m_8xb64-270e_coco-wholebody-256x192.py b/configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose-m_8xb64-270e_coco-wholebody-256x192.py index 1e2afc518d..d403b05dc8 100644 --- a/configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose-m_8xb64-270e_coco-wholebody-256x192.py +++ b/configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose-m_8xb64-270e_coco-wholebody-256x192.py @@ -1,232 +1,232 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -max_epochs = 270 -stage2_num_epochs = 30 -base_lr = 4e-3 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=(192, 256), - sigma=(4.9, 5.66), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=0.67, - widen_factor=0.75, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmposev1/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=768, - out_channels=133, - input_size=codec['input_size'], - in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True, )) - -# base dataset settings -dataset_type = 'CocoWholeBodyDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -backend_args = dict(backend='local') -# backend_args = dict( -# backend='petrel', -# path_mapping=dict({ -# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', -# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' -# })) - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.0), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict( - save_best='coco-wholebody/AP', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='CocoWholeBodyMetric', - ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 270 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=(192, 256), + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=768, + out_channels=133, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict( + save_best='coco-wholebody/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose_coco-wholebody.md b/configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose_coco-wholebody.md index e43c0b3750..93c8434c83 100644 --- a/configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose_coco-wholebody.md +++ b/configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose_coco-wholebody.md @@ -1,62 +1,62 @@ - - -
-RTMPose (arXiv'2023) - -```bibtex -@misc{https://doi.org/10.48550/arxiv.2303.07399, - doi = {10.48550/ARXIV.2303.07399}, - url = {https://arxiv.org/abs/2303.07399}, - author = {Jiang, Tao and Lu, Peng and Zhang, Li and Ma, Ningsheng and Han, Rui and Lyu, Chengqi and Li, Yining and Chen, Kai}, - keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences}, - title = {RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose}, - publisher = {arXiv}, - year = {2023}, - copyright = {Creative Commons Attribution 4.0 International} -} - -``` - -
- - - -
-RTMDet (arXiv'2022) - -```bibtex -@misc{lyu2022rtmdet, - title={RTMDet: An Empirical Study of Designing Real-Time Object Detectors}, - author={Chengqi Lyu and Wenwei Zhang and Haian Huang and Yue Zhou and Yudong Wang and Yanyi Liu and Shilong Zhang and Kai Chen}, - year={2022}, - eprint={2212.07784}, - archivePrefix={arXiv}, - primaryClass={cs.CV} -} -``` - -
- - - -
-COCO-WholeBody (ECCV'2020) - -```bibtex -@inproceedings{jin2020whole, - title={Whole-Body Human Pose Estimation in the Wild}, - author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, - booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, - year={2020} -} -``` - -
- -Results on COCO-WholeBody v1.0 val with detector having human AP of 56.4 on COCO val2017 dataset - -| Arch | Input Size | Body AP | Body AR | Foot AP | Foot AR | Face AP | Face AR | Hand AP | Hand AR | Whole AP | Whole AR | ckpt | log | -| :-------------------------------------- | :--------: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :------: | :------: | :--------------------------------------: | :-------------------------------------: | -| [rtmpose-m](/configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose-m_8xb64-270e_coco-wholebody-256x192.py) | 256x192 | 0.673 | 0.750 | 0.615 | 0.752 | 0.813 | 0.871 | 0.475 | 0.589 | 0.582 | 0.674 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco-wholebody_pt-aic-coco_270e-256x192-cd5e845c_20230123.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco-wholebody_pt-aic-coco_270e-256x192-cd5e845c_20230123.json) | -| [rtmpose-l](/configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose-l_8xb64-270e_coco-wholebody-256x192.py) | 256x192 | 0.695 | 0.769 | 0.658 | 0.785 | 0.833 | 0.887 | 0.519 | 0.628 | 0.611 | 0.700 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-coco-wholebody_pt-aic-coco_270e-256x192-6f206314_20230124.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-coco-wholebody_pt-aic-coco_270e-256x192-6f206314_20230124.json) | -| [rtmpose-l](/configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose-l_8xb32-270e_coco-wholebody-384x288.py) | 384x288 | 0.712 | 0.781 | 0.693 | 0.811 | 0.882 | 0.919 | 0.579 | 0.677 | 0.648 | 0.730 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-coco-wholebody_pt-aic-coco_270e-384x288-eaeb96c8_20230125.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-coco-wholebody_pt-aic-coco_270e-384x288-eaeb96c8_20230125.json) | + + +
+RTMPose (arXiv'2023) + +```bibtex +@misc{https://doi.org/10.48550/arxiv.2303.07399, + doi = {10.48550/ARXIV.2303.07399}, + url = {https://arxiv.org/abs/2303.07399}, + author = {Jiang, Tao and Lu, Peng and Zhang, Li and Ma, Ningsheng and Han, Rui and Lyu, Chengqi and Li, Yining and Chen, Kai}, + keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences}, + title = {RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose}, + publisher = {arXiv}, + year = {2023}, + copyright = {Creative Commons Attribution 4.0 International} +} + +``` + +
+ + + +
+RTMDet (arXiv'2022) + +```bibtex +@misc{lyu2022rtmdet, + title={RTMDet: An Empirical Study of Designing Real-Time Object Detectors}, + author={Chengqi Lyu and Wenwei Zhang and Haian Huang and Yue Zhou and Yudong Wang and Yanyi Liu and Shilong Zhang and Kai Chen}, + year={2022}, + eprint={2212.07784}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +
+ + + +
+COCO-WholeBody (ECCV'2020) + +```bibtex +@inproceedings{jin2020whole, + title={Whole-Body Human Pose Estimation in the Wild}, + author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2020} +} +``` + +
+ +Results on COCO-WholeBody v1.0 val with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | Body AP | Body AR | Foot AP | Foot AR | Face AP | Face AR | Hand AP | Hand AR | Whole AP | Whole AR | ckpt | log | +| :-------------------------------------- | :--------: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :------: | :------: | :--------------------------------------: | :-------------------------------------: | +| [rtmpose-m](/configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose-m_8xb64-270e_coco-wholebody-256x192.py) | 256x192 | 0.673 | 0.750 | 0.615 | 0.752 | 0.813 | 0.871 | 0.475 | 0.589 | 0.582 | 0.674 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco-wholebody_pt-aic-coco_270e-256x192-cd5e845c_20230123.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco-wholebody_pt-aic-coco_270e-256x192-cd5e845c_20230123.json) | +| [rtmpose-l](/configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose-l_8xb64-270e_coco-wholebody-256x192.py) | 256x192 | 0.695 | 0.769 | 0.658 | 0.785 | 0.833 | 0.887 | 0.519 | 0.628 | 0.611 | 0.700 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-coco-wholebody_pt-aic-coco_270e-256x192-6f206314_20230124.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-coco-wholebody_pt-aic-coco_270e-256x192-6f206314_20230124.json) | +| [rtmpose-l](/configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose-l_8xb32-270e_coco-wholebody-384x288.py) | 384x288 | 0.712 | 0.781 | 0.693 | 0.811 | 0.882 | 0.919 | 0.579 | 0.677 | 0.648 | 0.730 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-coco-wholebody_pt-aic-coco_270e-384x288-eaeb96c8_20230125.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-coco-wholebody_pt-aic-coco_270e-384x288-eaeb96c8_20230125.json) | diff --git a/configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose_coco-wholebody.yml b/configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose_coco-wholebody.yml index 049f348899..723ba2bdb3 100644 --- a/configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose_coco-wholebody.yml +++ b/configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose_coco-wholebody.yml @@ -1,66 +1,66 @@ -Models: -- Config: configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose-m_8xb64-270e_coco-wholebody-256x192.py - In Collection: RTMPose - Alias: wholebody - Metadata: - Architecture: &id001 - - HRNet - Training Data: COCO-WholeBody - Name: rtmpose-m_8xb64-270e_coco-wholebody-256x192 - Results: - - Dataset: COCO-WholeBody - Metrics: - Body AP: 0.673 - Body AR: 0.750 - Face AP: 0.813 - Face AR: 0.871 - Foot AP: 0.615 - Foot AR: 0.752 - Hand AP: 0.475 - Hand AR: 0.589 - Whole AP: 0.582 - Whole AR: 0.674 - Task: Wholebody 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco-wholebody_pt-aic-coco_270e-256x192-cd5e845c_20230123.pth -- Config: configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose-l_8xb64-270e_coco-wholebody-256x192.py - In Collection: RTMPose - Metadata: - Architecture: *id001 - Training Data: COCO-WholeBody - Name: rtmpose-l_8xb64-270e_coco-wholebody-256x192 - Results: - - Dataset: COCO-WholeBody - Metrics: - Body AP: 0.695 - Body AR: 0.769 - Face AP: 0.833 - Face AR: 0.887 - Foot AP: 0.658 - Foot AR: 0.785 - Hand AP: 0.519 - Hand AR: 0.628 - Whole AP: 0.611 - Whole AR: 0.700 - Task: Wholebody 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-coco-wholebody_pt-aic-coco_270e-256x192-6f206314_20230124.pth -- Config: configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose-l_8xb32-270e_coco-wholebody-384x288.py - In Collection: RTMPose - Metadata: - Architecture: *id001 - Training Data: COCO-WholeBody - Name: rtmpose-l_8xb32-270e_coco-wholebody-384x288.py - Results: - - Dataset: COCO-WholeBody - Metrics: - Body AP: 0.712 - Body AR: 0.781 - Face AP: 0.882 - Face AR: 0.919 - Foot AP: 0.693 - Foot AR: 0.811 - Hand AP: 0.579 - Hand AR: 0.677 - Whole AP: 0.648 - Whole AR: 0.730 - Task: Wholebody 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-coco-wholebody_pt-aic-coco_270e-384x288-eaeb96c8_20230125.pth +Models: +- Config: configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose-m_8xb64-270e_coco-wholebody-256x192.py + In Collection: RTMPose + Alias: wholebody + Metadata: + Architecture: &id001 + - HRNet + Training Data: COCO-WholeBody + Name: rtmpose-m_8xb64-270e_coco-wholebody-256x192 + Results: + - Dataset: COCO-WholeBody + Metrics: + Body AP: 0.673 + Body AR: 0.750 + Face AP: 0.813 + Face AR: 0.871 + Foot AP: 0.615 + Foot AR: 0.752 + Hand AP: 0.475 + Hand AR: 0.589 + Whole AP: 0.582 + Whole AR: 0.674 + Task: Wholebody 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco-wholebody_pt-aic-coco_270e-256x192-cd5e845c_20230123.pth +- Config: configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose-l_8xb64-270e_coco-wholebody-256x192.py + In Collection: RTMPose + Metadata: + Architecture: *id001 + Training Data: COCO-WholeBody + Name: rtmpose-l_8xb64-270e_coco-wholebody-256x192 + Results: + - Dataset: COCO-WholeBody + Metrics: + Body AP: 0.695 + Body AR: 0.769 + Face AP: 0.833 + Face AR: 0.887 + Foot AP: 0.658 + Foot AR: 0.785 + Hand AP: 0.519 + Hand AR: 0.628 + Whole AP: 0.611 + Whole AR: 0.700 + Task: Wholebody 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-coco-wholebody_pt-aic-coco_270e-256x192-6f206314_20230124.pth +- Config: configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose-l_8xb32-270e_coco-wholebody-384x288.py + In Collection: RTMPose + Metadata: + Architecture: *id001 + Training Data: COCO-WholeBody + Name: rtmpose-l_8xb32-270e_coco-wholebody-384x288.py + Results: + - Dataset: COCO-WholeBody + Metrics: + Body AP: 0.712 + Body AR: 0.781 + Face AP: 0.882 + Face AR: 0.919 + Foot AP: 0.693 + Foot AR: 0.811 + Hand AP: 0.579 + Hand AR: 0.677 + Whole AP: 0.648 + Whole AR: 0.730 + Task: Wholebody 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-coco-wholebody_pt-aic-coco_270e-384x288-eaeb96c8_20230125.pth diff --git a/configs/wholebody_2d_keypoint/topdown_heatmap/README.md b/configs/wholebody_2d_keypoint/topdown_heatmap/README.md index 23ee1ed315..4d06177ab5 100644 --- a/configs/wholebody_2d_keypoint/topdown_heatmap/README.md +++ b/configs/wholebody_2d_keypoint/topdown_heatmap/README.md @@ -1,26 +1,26 @@ -# Top-down heatmap-based pose estimation - -Top-down methods divide the task into two stages: object detection, followed by single-object pose estimation given object bounding boxes. Instead of estimating keypoint coordinates directly, the pose estimator will produce heatmaps which represent the likelihood of being a keypoint, following the paradigm introduced in [Simple Baselines for Human Pose Estimation and Tracking](http://openaccess.thecvf.com/content_ECCV_2018/html/Bin_Xiao_Simple_Baselines_for_ECCV_2018_paper.html). - -
- -
- -## Results and Models - -### COCO-WholeBody Dataset - -Results on COCO-WholeBody v1.0 val with detector having human AP of 56.4 on COCO val2017 dataset - -| Model | Input Size | Whole AP | Whole AR | Details and Download | -| :-----------------: | :--------: | :------: | :------: | :-----------------------------------------------------------------------------: | -| HRNet-w48+Dark+ | 384x288 | 0.661 | 0.743 | [hrnet_dark_coco-wholebody.md](./coco-wholebody/hrnet_dark_coco-wholebody.md) | -| HRNet-w32+Dark | 256x192 | 0.582 | 0.671 | [hrnet_dark_coco-wholebody.md](./coco-wholebody/hrnet_dark_coco-wholebody.md) | -| HRNet-w48 | 256x192 | 0.579 | 0.681 | [hrnet_coco-wholebody.md](./coco-wholebody/hrnet_coco-wholebody.md) | -| CSPNeXt-m | 256x192 | 0.567 | 0.641 | [cspnext_udp_coco-wholebody.md](./coco-wholebody/cspnext_udp_coco-wholebody.md) | -| ResNet-152 | 256x192 | 0.548 | 0.661 | [resnet_coco-wholebody.md](./coco-wholebody/resnet_coco-wholebody.md) | -| HRNet-w32 | 256x192 | 0.536 | 0.636 | [hrnet_coco-wholebody.md](./coco-wholebody/hrnet_coco-wholebody.md) | -| ResNet-101 | 256x192 | 0.531 | 0.645 | [resnet_coco-wholebody.md](./coco-wholebody/resnet_coco-wholebody.md) | -| S-ViPNAS-Res50+Dark | 256x192 | 0.528 | 0.632 | [vipnas_dark_coco-wholebody.md](./coco-wholebody/vipnas_dark_coco-wholebody.md) | -| ResNet-50 | 256x192 | 0.521 | 0.633 | [resnet_coco-wholebody.md](./coco-wholebody/resnet_coco-wholebody.md) | -| S-ViPNAS-Res50 | 256x192 | 0.495 | 0.607 | [vipnas_coco-wholebody.md](./coco-wholebody/vipnas_coco-wholebody.md) | +# Top-down heatmap-based pose estimation + +Top-down methods divide the task into two stages: object detection, followed by single-object pose estimation given object bounding boxes. Instead of estimating keypoint coordinates directly, the pose estimator will produce heatmaps which represent the likelihood of being a keypoint, following the paradigm introduced in [Simple Baselines for Human Pose Estimation and Tracking](http://openaccess.thecvf.com/content_ECCV_2018/html/Bin_Xiao_Simple_Baselines_for_ECCV_2018_paper.html). + +
+ +
+ +## Results and Models + +### COCO-WholeBody Dataset + +Results on COCO-WholeBody v1.0 val with detector having human AP of 56.4 on COCO val2017 dataset + +| Model | Input Size | Whole AP | Whole AR | Details and Download | +| :-----------------: | :--------: | :------: | :------: | :-----------------------------------------------------------------------------: | +| HRNet-w48+Dark+ | 384x288 | 0.661 | 0.743 | [hrnet_dark_coco-wholebody.md](./coco-wholebody/hrnet_dark_coco-wholebody.md) | +| HRNet-w32+Dark | 256x192 | 0.582 | 0.671 | [hrnet_dark_coco-wholebody.md](./coco-wholebody/hrnet_dark_coco-wholebody.md) | +| HRNet-w48 | 256x192 | 0.579 | 0.681 | [hrnet_coco-wholebody.md](./coco-wholebody/hrnet_coco-wholebody.md) | +| CSPNeXt-m | 256x192 | 0.567 | 0.641 | [cspnext_udp_coco-wholebody.md](./coco-wholebody/cspnext_udp_coco-wholebody.md) | +| ResNet-152 | 256x192 | 0.548 | 0.661 | [resnet_coco-wholebody.md](./coco-wholebody/resnet_coco-wholebody.md) | +| HRNet-w32 | 256x192 | 0.536 | 0.636 | [hrnet_coco-wholebody.md](./coco-wholebody/hrnet_coco-wholebody.md) | +| ResNet-101 | 256x192 | 0.531 | 0.645 | [resnet_coco-wholebody.md](./coco-wholebody/resnet_coco-wholebody.md) | +| S-ViPNAS-Res50+Dark | 256x192 | 0.528 | 0.632 | [vipnas_dark_coco-wholebody.md](./coco-wholebody/vipnas_dark_coco-wholebody.md) | +| ResNet-50 | 256x192 | 0.521 | 0.633 | [resnet_coco-wholebody.md](./coco-wholebody/resnet_coco-wholebody.md) | +| S-ViPNAS-Res50 | 256x192 | 0.495 | 0.607 | [vipnas_coco-wholebody.md](./coco-wholebody/vipnas_coco-wholebody.md) | diff --git a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/cspnext-l_udp_8xb64-210e_coco-wholebody-256x192.py b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/cspnext-l_udp_8xb64-210e_coco-wholebody-256x192.py index 7182e7a3ed..aa98e5afc3 100644 --- a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/cspnext-l_udp_8xb64-210e_coco-wholebody-256x192.py +++ b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/cspnext-l_udp_8xb64-210e_coco-wholebody-256x192.py @@ -1,212 +1,212 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -max_epochs = 210 -stage2_num_epochs = 30 -base_lr = 4e-3 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# codec settings -codec = dict( - type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=1., - widen_factor=1., - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' - 'rtmdet/cspnext_rsb_pretrain/' - 'cspnext-l_8xb256-rsb-a1-600e_in1k-6a760974.pth')), - head=dict( - type='HeatmapHead', - in_channels=1024, - out_channels=133, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=False, - flip_mode='heatmap', - shift_heatmap=False, - )) - -# base dataset settings -dataset_type = 'CocoWholeBodyDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -backend_args = dict(backend='local') -# backend_args = dict( -# backend='petrel', -# path_mapping=dict({ -# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', -# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' -# })) - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.0), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict( - save_best='coco-wholebody/AP', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='CocoWholeBodyMetric', - ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 210 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=1., + widen_factor=1., + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' + 'rtmdet/cspnext_rsb_pretrain/' + 'cspnext-l_8xb256-rsb-a1-600e_in1k-6a760974.pth')), + head=dict( + type='HeatmapHead', + in_channels=1024, + out_channels=133, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=False, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict( + save_best='coco-wholebody/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/cspnext-m_udp_8xb64-210e_coco-wholebody-256x192.py b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/cspnext-m_udp_8xb64-210e_coco-wholebody-256x192.py index 05fae649b8..d6d1c2f678 100644 --- a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/cspnext-m_udp_8xb64-210e_coco-wholebody-256x192.py +++ b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/cspnext-m_udp_8xb64-210e_coco-wholebody-256x192.py @@ -1,212 +1,212 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -max_epochs = 210 -stage2_num_epochs = 30 -base_lr = 4e-3 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# codec settings -codec = dict( - type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=0.67, - widen_factor=0.75, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' - 'rtmdet/cspnext_rsb_pretrain/' - 'cspnext-m_8xb256-rsb-a1-600e_in1k-ecb3bbd9.pth')), - head=dict( - type='HeatmapHead', - in_channels=768, - out_channels=133, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=False, - flip_mode='heatmap', - shift_heatmap=False, - )) - -# base dataset settings -dataset_type = 'CocoWholeBodyDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -backend_args = dict(backend='local') -# backend_args = dict( -# backend='petrel', -# path_mapping=dict({ -# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', -# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' -# })) - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.0), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict( - save_best='coco-wholebody/AP', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='CocoWholeBodyMetric', - ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +max_epochs = 210 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' + 'rtmdet/cspnext_rsb_pretrain/' + 'cspnext-m_8xb256-rsb-a1-600e_in1k-ecb3bbd9.pth')), + head=dict( + type='HeatmapHead', + in_channels=768, + out_channels=133, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=False, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict( + save_best='coco-wholebody/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/cspnext_udp_coco-wholebody.md b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/cspnext_udp_coco-wholebody.md index 1fc4a78dfb..7f8e000157 100644 --- a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/cspnext_udp_coco-wholebody.md +++ b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/cspnext_udp_coco-wholebody.md @@ -1,56 +1,56 @@ - - -
-RTMDet (ArXiv 2022) - -```bibtex -@misc{lyu2022rtmdet, - title={RTMDet: An Empirical Study of Designing Real-Time Object Detectors}, - author={Chengqi Lyu and Wenwei Zhang and Haian Huang and Yue Zhou and Yudong Wang and Yanyi Liu and Shilong Zhang and Kai Chen}, - year={2022}, - eprint={2212.07784}, - archivePrefix={arXiv}, - primaryClass={cs.CV} -} -``` - -
- - - -
-UDP (CVPR'2020) - -```bibtex -@InProceedings{Huang_2020_CVPR, - author = {Huang, Junjie and Zhu, Zheng and Guo, Feng and Huang, Guan}, - title = {The Devil Is in the Details: Delving Into Unbiased Data Processing for Human Pose Estimation}, - booktitle = {The IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, - month = {June}, - year = {2020} -} -``` - -
- - - -
-COCO-WholeBody (ECCV'2020) - -```bibtex -@inproceedings{jin2020whole, - title={Whole-Body Human Pose Estimation in the Wild}, - author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, - booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, - year={2020} -} -``` - -
- -Results on COCO-WholeBody v1.0 val with detector having human AP of 56.4 on COCO val2017 dataset - -| Arch | Input Size | Body AP | Body AR | Foot AP | Foot AR | Face AP | Face AR | Hand AP | Hand AR | Whole AP | Whole AR | ckpt | log | -| :-------------------------------------- | :--------: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :------: | :------: | :--------------------------------------: | :-------------------------------------: | -| [pose_cspnext_m_udp](/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/cspnext-m_udp_8xb64-210e_coco-wholebody-256x192.py) | 256x192 | 0.687 | 0.735 | 0.680 | 0.763 | 0.697 | 0.755 | 0.460 | 0.543 | 0.567 | 0.641 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-coco-wholebody_pt-in1k_210e-256x192-320fa258_20230123.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-coco-wholebody_pt-in1k_210e-256x192-320fa258_20230123.json) | + + +
+RTMDet (ArXiv 2022) + +```bibtex +@misc{lyu2022rtmdet, + title={RTMDet: An Empirical Study of Designing Real-Time Object Detectors}, + author={Chengqi Lyu and Wenwei Zhang and Haian Huang and Yue Zhou and Yudong Wang and Yanyi Liu and Shilong Zhang and Kai Chen}, + year={2022}, + eprint={2212.07784}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +
+ + + +
+UDP (CVPR'2020) + +```bibtex +@InProceedings{Huang_2020_CVPR, + author = {Huang, Junjie and Zhu, Zheng and Guo, Feng and Huang, Guan}, + title = {The Devil Is in the Details: Delving Into Unbiased Data Processing for Human Pose Estimation}, + booktitle = {The IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, + month = {June}, + year = {2020} +} +``` + +
+ + + +
+COCO-WholeBody (ECCV'2020) + +```bibtex +@inproceedings{jin2020whole, + title={Whole-Body Human Pose Estimation in the Wild}, + author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2020} +} +``` + +
+ +Results on COCO-WholeBody v1.0 val with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | Body AP | Body AR | Foot AP | Foot AR | Face AP | Face AR | Hand AP | Hand AR | Whole AP | Whole AR | ckpt | log | +| :-------------------------------------- | :--------: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :------: | :------: | :--------------------------------------: | :-------------------------------------: | +| [pose_cspnext_m_udp](/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/cspnext-m_udp_8xb64-210e_coco-wholebody-256x192.py) | 256x192 | 0.687 | 0.735 | 0.680 | 0.763 | 0.697 | 0.755 | 0.460 | 0.543 | 0.567 | 0.641 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-coco-wholebody_pt-in1k_210e-256x192-320fa258_20230123.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-coco-wholebody_pt-in1k_210e-256x192-320fa258_20230123.json) | diff --git a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/cspnext_udp_coco-wholebody.yml b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/cspnext_udp_coco-wholebody.yml index ebdcc7146e..bdcb4c5250 100644 --- a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/cspnext_udp_coco-wholebody.yml +++ b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/cspnext_udp_coco-wholebody.yml @@ -1,24 +1,24 @@ -Models: -- Config: configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/cspnext-m_udp_8xb64-210e_coco-wholebody-256x192.py - In Collection: UDP - Metadata: - Architecture: &id001 - - UDP - - CSPNeXt - Training Data: COCO-WholeBody - Name: cspnext-m_udp_8xb64-210e_coco-wholebody-256x192 - Results: - - Dataset: COCO-WholeBody - Metrics: - Body AP: 0.687 - Body AR: 0.735 - Face AP: 0.697 - Face AR: 0.755 - Foot AP: 0.680 - Foot AR: 0.763 - Hand AP: 0.46 - Hand AR: 0.567 - Whole AP: 0.567 - Whole AR: 0.641 - Task: Wholebody 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-coco-wholebody_pt-in1k_210e-256x192-320fa258_20230123.pth +Models: +- Config: configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/cspnext-m_udp_8xb64-210e_coco-wholebody-256x192.py + In Collection: UDP + Metadata: + Architecture: &id001 + - UDP + - CSPNeXt + Training Data: COCO-WholeBody + Name: cspnext-m_udp_8xb64-210e_coco-wholebody-256x192 + Results: + - Dataset: COCO-WholeBody + Metrics: + Body AP: 0.687 + Body AR: 0.735 + Face AP: 0.697 + Face AR: 0.755 + Foot AP: 0.680 + Foot AR: 0.763 + Hand AP: 0.46 + Hand AR: 0.567 + Whole AP: 0.567 + Whole AR: 0.641 + Task: Wholebody 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-coco-wholebody_pt-in1k_210e-256x192-320fa258_20230123.pth diff --git a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/hrnet_coco-wholebody.md b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/hrnet_coco-wholebody.md index 53f240bc52..8dd01d5fbe 100644 --- a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/hrnet_coco-wholebody.md +++ b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/hrnet_coco-wholebody.md @@ -1,41 +1,41 @@ - - -
-HRNet (CVPR'2019) - -```bibtex -@inproceedings{sun2019deep, - title={Deep high-resolution representation learning for human pose estimation}, - author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={5693--5703}, - year={2019} -} -``` - -
- - - -
-COCO-WholeBody (ECCV'2020) - -```bibtex -@inproceedings{jin2020whole, - title={Whole-Body Human Pose Estimation in the Wild}, - author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, - booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, - year={2020} -} -``` - -
- -Results on COCO-WholeBody v1.0 val with detector having human AP of 56.4 on COCO val2017 dataset - -| Arch | Input Size | Body AP | Body AR | Foot AP | Foot AR | Face AP | Face AR | Hand AP | Hand AR | Whole AP | Whole AR | ckpt | log | -| :-------------------------------------- | :--------: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :------: | :------: | :--------------------------------------: | :-------------------------------------: | -| [pose_hrnet_w32](/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w32_8xb64-210e_coco-wholebody-256x192.py) | 256x192 | 0.678 | 0.755 | 0.543 | 0.661 | 0.630 | 0.708 | 0.467 | 0.566 | 0.536 | 0.636 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_coco_wholebody_256x192-853765cd_20200918.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_coco_wholebody_256x192_20200918.log.json) | -| [pose_hrnet_w32](/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w32_8xb64-210e_coco-wholebody-384x288.py) | 384x288 | 0.700 | 0.772 | 0.585 | 0.691 | 0.726 | 0.783 | 0.515 | 0.603 | 0.586 | 0.673 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_coco_wholebody_384x288-78cacac3_20200922.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_coco_wholebody_384x288_20200922.log.json) | -| [pose_hrnet_w48](/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_8xb32-210e_coco-wholebody-256x192.py) | 256x192 | 0.701 | 0.776 | 0.675 | 0.787 | 0.656 | 0.743 | 0.535 | 0.639 | 0.579 | 0.681 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_wholebody_256x192-643e18cb_20200922.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_wholebody_256x192_20200922.log.json) | -| [pose_hrnet_w48](/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_8xb32-210e_coco-wholebody-384x288.py) | 384x288 | 0.722 | 0.791 | 0.696 | 0.801 | 0.776 | 0.834 | 0.587 | 0.678 | 0.632 | 0.717 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_wholebody_384x288-6e061c6a_20200922.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_wholebody_384x288_20200922.log.json) | + + +
+HRNet (CVPR'2019) + +```bibtex +@inproceedings{sun2019deep, + title={Deep high-resolution representation learning for human pose estimation}, + author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={5693--5703}, + year={2019} +} +``` + +
+ + + +
+COCO-WholeBody (ECCV'2020) + +```bibtex +@inproceedings{jin2020whole, + title={Whole-Body Human Pose Estimation in the Wild}, + author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2020} +} +``` + +
+ +Results on COCO-WholeBody v1.0 val with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | Body AP | Body AR | Foot AP | Foot AR | Face AP | Face AR | Hand AP | Hand AR | Whole AP | Whole AR | ckpt | log | +| :-------------------------------------- | :--------: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :------: | :------: | :--------------------------------------: | :-------------------------------------: | +| [pose_hrnet_w32](/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w32_8xb64-210e_coco-wholebody-256x192.py) | 256x192 | 0.678 | 0.755 | 0.543 | 0.661 | 0.630 | 0.708 | 0.467 | 0.566 | 0.536 | 0.636 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_coco_wholebody_256x192-853765cd_20200918.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_coco_wholebody_256x192_20200918.log.json) | +| [pose_hrnet_w32](/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w32_8xb64-210e_coco-wholebody-384x288.py) | 384x288 | 0.700 | 0.772 | 0.585 | 0.691 | 0.726 | 0.783 | 0.515 | 0.603 | 0.586 | 0.673 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_coco_wholebody_384x288-78cacac3_20200922.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_coco_wholebody_384x288_20200922.log.json) | +| [pose_hrnet_w48](/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_8xb32-210e_coco-wholebody-256x192.py) | 256x192 | 0.701 | 0.776 | 0.675 | 0.787 | 0.656 | 0.743 | 0.535 | 0.639 | 0.579 | 0.681 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_wholebody_256x192-643e18cb_20200922.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_wholebody_256x192_20200922.log.json) | +| [pose_hrnet_w48](/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_8xb32-210e_coco-wholebody-384x288.py) | 384x288 | 0.722 | 0.791 | 0.696 | 0.801 | 0.776 | 0.834 | 0.587 | 0.678 | 0.632 | 0.717 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_wholebody_384x288-6e061c6a_20200922.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_wholebody_384x288_20200922.log.json) | diff --git a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/hrnet_coco-wholebody.yml b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/hrnet_coco-wholebody.yml index 929bd05356..2cee2ac559 100644 --- a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/hrnet_coco-wholebody.yml +++ b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/hrnet_coco-wholebody.yml @@ -1,86 +1,86 @@ -Models: -- Config: configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w32_8xb64-210e_coco-wholebody-256x192.py - In Collection: HRNet - Metadata: - Architecture: &id001 - - HRNet - Training Data: COCO-WholeBody - Name: td-hm_hrnet-w32_8xb64-210e_coco-wholebody-256x192 - Results: - - Dataset: COCO-WholeBody - Metrics: - Body AP: 0.678 - Body AR: 0.755 - Face AP: 0.630 - Face AR: 0.708 - Foot AP: 0.543 - Foot AR: 0.661 - Hand AP: 0.467 - Hand AR: 0.566 - Whole AP: 0.536 - Whole AR: 0.636 - Task: Wholebody 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_coco_wholebody_256x192-853765cd_20200918.pth -- Config: configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w32_8xb64-210e_coco-wholebody-384x288.py - In Collection: HRNet - Metadata: - Architecture: *id001 - Training Data: COCO-WholeBody - Name: td-hm_hrnet-w32_8xb64-210e_coco-wholebody-384x288 - Results: - - Dataset: COCO-WholeBody - Metrics: - Body AP: 0.700 - Body AR: 0.772 - Face AP: 0.726 - Face AR: 0.783 - Foot AP: 0.585 - Foot AR: 0.691 - Hand AP: 0.515 - Hand AR: 0.603 - Whole AP: 0.586 - Whole AR: 0.673 - Task: Wholebody 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_coco_wholebody_384x288-78cacac3_20200922.pth -- Config: configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_8xb32-210e_coco-wholebody-256x192.py - In Collection: HRNet - Metadata: - Architecture: *id001 - Training Data: COCO-WholeBody - Name: td-hm_hrnet-w48_8xb32-210e_coco-wholebody-256x192 - Results: - - Dataset: COCO-WholeBody - Metrics: - Body AP: 0.701 - Body AR: 0.776 - Face AP: 0.656 - Face AR: 0.743 - Foot AP: 0.675 - Foot AR: 0.787 - Hand AP: 0.535 - Hand AR: 0.639 - Whole AP: 0.579 - Whole AR: 0.681 - Task: Wholebody 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_wholebody_256x192-643e18cb_20200922.pth -- Config: configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_8xb32-210e_coco-wholebody-384x288.py - In Collection: HRNet - Metadata: - Architecture: *id001 - Training Data: COCO-WholeBody - Name: td-hm_hrnet-w48_8xb32-210e_coco-wholebody-384x288 - Results: - - Dataset: COCO-WholeBody - Metrics: - Body AP: 0.722 - Body AR: 0.791 - Face AP: 0.776 - Face AR: 0.834 - Foot AP: 0.696 - Foot AR: 0.801 - Hand AP: 0.587 - Hand AR: 0.678 - Whole AP: 0.632 - Whole AR: 0.717 - Task: Wholebody 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_wholebody_384x288-6e061c6a_20200922.pth +Models: +- Config: configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w32_8xb64-210e_coco-wholebody-256x192.py + In Collection: HRNet + Metadata: + Architecture: &id001 + - HRNet + Training Data: COCO-WholeBody + Name: td-hm_hrnet-w32_8xb64-210e_coco-wholebody-256x192 + Results: + - Dataset: COCO-WholeBody + Metrics: + Body AP: 0.678 + Body AR: 0.755 + Face AP: 0.630 + Face AR: 0.708 + Foot AP: 0.543 + Foot AR: 0.661 + Hand AP: 0.467 + Hand AR: 0.566 + Whole AP: 0.536 + Whole AR: 0.636 + Task: Wholebody 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_coco_wholebody_256x192-853765cd_20200918.pth +- Config: configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w32_8xb64-210e_coco-wholebody-384x288.py + In Collection: HRNet + Metadata: + Architecture: *id001 + Training Data: COCO-WholeBody + Name: td-hm_hrnet-w32_8xb64-210e_coco-wholebody-384x288 + Results: + - Dataset: COCO-WholeBody + Metrics: + Body AP: 0.700 + Body AR: 0.772 + Face AP: 0.726 + Face AR: 0.783 + Foot AP: 0.585 + Foot AR: 0.691 + Hand AP: 0.515 + Hand AR: 0.603 + Whole AP: 0.586 + Whole AR: 0.673 + Task: Wholebody 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_coco_wholebody_384x288-78cacac3_20200922.pth +- Config: configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_8xb32-210e_coco-wholebody-256x192.py + In Collection: HRNet + Metadata: + Architecture: *id001 + Training Data: COCO-WholeBody + Name: td-hm_hrnet-w48_8xb32-210e_coco-wholebody-256x192 + Results: + - Dataset: COCO-WholeBody + Metrics: + Body AP: 0.701 + Body AR: 0.776 + Face AP: 0.656 + Face AR: 0.743 + Foot AP: 0.675 + Foot AR: 0.787 + Hand AP: 0.535 + Hand AR: 0.639 + Whole AP: 0.579 + Whole AR: 0.681 + Task: Wholebody 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_wholebody_256x192-643e18cb_20200922.pth +- Config: configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_8xb32-210e_coco-wholebody-384x288.py + In Collection: HRNet + Metadata: + Architecture: *id001 + Training Data: COCO-WholeBody + Name: td-hm_hrnet-w48_8xb32-210e_coco-wholebody-384x288 + Results: + - Dataset: COCO-WholeBody + Metrics: + Body AP: 0.722 + Body AR: 0.791 + Face AP: 0.776 + Face AR: 0.834 + Foot AP: 0.696 + Foot AR: 0.801 + Hand AP: 0.587 + Hand AR: 0.678 + Whole AP: 0.632 + Whole AR: 0.717 + Task: Wholebody 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_wholebody_384x288-6e061c6a_20200922.pth diff --git a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/hrnet_dark_coco-wholebody.md b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/hrnet_dark_coco-wholebody.md index b215b3c5f2..fa4bc27d01 100644 --- a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/hrnet_dark_coco-wholebody.md +++ b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/hrnet_dark_coco-wholebody.md @@ -1,58 +1,58 @@ - - -
-HRNet (CVPR'2019) - -```bibtex -@inproceedings{sun2019deep, - title={Deep high-resolution representation learning for human pose estimation}, - author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={5693--5703}, - year={2019} -} -``` - -
- - - -
-DarkPose (CVPR'2020) - -```bibtex -@inproceedings{zhang2020distribution, - title={Distribution-aware coordinate representation for human pose estimation}, - author={Zhang, Feng and Zhu, Xiatian and Dai, Hanbin and Ye, Mao and Zhu, Ce}, - booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, - pages={7093--7102}, - year={2020} -} -``` - -
- - - -
-COCO-WholeBody (ECCV'2020) - -```bibtex -@inproceedings{jin2020whole, - title={Whole-Body Human Pose Estimation in the Wild}, - author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, - booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, - year={2020} -} -``` - -
- -Results on COCO-WholeBody v1.0 val with detector having human AP of 56.4 on COCO val2017 dataset - -| Arch | Input Size | Body AP | Body AR | Foot AP | Foot AR | Face AP | Face AR | Hand AP | Hand AR | Whole AP | Whole AR | ckpt | log | -| :-------------------------------------- | :--------: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :------: | :------: | :--------------------------------------: | :-------------------------------------: | -| [pose_hrnet_w32_dark](/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w32_dark-8xb64-210e_coco-wholebody-256x192.py) | 256x192 | 0.693 | 0.764 | 0.564 | 0.674 | 0.737 | 0.809 | 0.503 | 0.602 | 0.582 | 0.671 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_coco_wholebody_256x192_dark-469327ef_20200922.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_coco_wholebody_256x192_dark_20200922.log.json) | -| [pose_hrnet_w48_dark+](/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_dark-8xb32-210e_coco-wholebody-384x288.py) | 384x288 | 0.742 | 0.807 | 0.707 | 0.806 | 0.841 | 0.892 | 0.602 | 0.694 | 0.661 | 0.743 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_wholebody_384x288_dark-f5726563_20200918.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_wholebody_384x288_dark_20200918.log.json) | - -Note: `+` means the model is first pre-trained on original COCO dataset, and then fine-tuned on COCO-WholeBody dataset. We find this will lead to better performance. + + +
+HRNet (CVPR'2019) + +```bibtex +@inproceedings{sun2019deep, + title={Deep high-resolution representation learning for human pose estimation}, + author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={5693--5703}, + year={2019} +} +``` + +
+ + + +
+DarkPose (CVPR'2020) + +```bibtex +@inproceedings{zhang2020distribution, + title={Distribution-aware coordinate representation for human pose estimation}, + author={Zhang, Feng and Zhu, Xiatian and Dai, Hanbin and Ye, Mao and Zhu, Ce}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={7093--7102}, + year={2020} +} +``` + +
+ + + +
+COCO-WholeBody (ECCV'2020) + +```bibtex +@inproceedings{jin2020whole, + title={Whole-Body Human Pose Estimation in the Wild}, + author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2020} +} +``` + +
+ +Results on COCO-WholeBody v1.0 val with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | Body AP | Body AR | Foot AP | Foot AR | Face AP | Face AR | Hand AP | Hand AR | Whole AP | Whole AR | ckpt | log | +| :-------------------------------------- | :--------: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :------: | :------: | :--------------------------------------: | :-------------------------------------: | +| [pose_hrnet_w32_dark](/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w32_dark-8xb64-210e_coco-wholebody-256x192.py) | 256x192 | 0.693 | 0.764 | 0.564 | 0.674 | 0.737 | 0.809 | 0.503 | 0.602 | 0.582 | 0.671 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_coco_wholebody_256x192_dark-469327ef_20200922.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_coco_wholebody_256x192_dark_20200922.log.json) | +| [pose_hrnet_w48_dark+](/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_dark-8xb32-210e_coco-wholebody-384x288.py) | 384x288 | 0.742 | 0.807 | 0.707 | 0.806 | 0.841 | 0.892 | 0.602 | 0.694 | 0.661 | 0.743 | [ckpt](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_wholebody_384x288_dark-f5726563_20200918.pth) | [log](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_wholebody_384x288_dark_20200918.log.json) | + +Note: `+` means the model is first pre-trained on original COCO dataset, and then fine-tuned on COCO-WholeBody dataset. We find this will lead to better performance. diff --git a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/hrnet_dark_coco-wholebody.yml b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/hrnet_dark_coco-wholebody.yml index d0e2bd6954..25a22ccb3a 100644 --- a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/hrnet_dark_coco-wholebody.yml +++ b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/hrnet_dark_coco-wholebody.yml @@ -1,45 +1,45 @@ -Models: -- Config: configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w32_dark-8xb64-210e_coco-wholebody-256x192.py - In Collection: DarkPose - Metadata: - Architecture: &id001 - - HRNet - - DarkPose - Training Data: COCO-WholeBody - Name: td-hm_hrnet-w32_dark-8xb64-210e_coco-wholebody-256x192 - Results: - - Dataset: COCO-WholeBody - Metrics: - Body AP: 0.693 - Body AR: 0.764 - Face AP: 0.737 - Face AR: 0.809 - Foot AP: 0.564 - Foot AR: 0.674 - Hand AP: 0.503 - Hand AR: 0.602 - Whole AP: 0.582 - Whole AR: 0.671 - Task: Wholebody 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_coco_wholebody_256x192_dark-469327ef_20200922.pth -- Config: configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_dark-8xb32-210e_coco-wholebody-384x288.py - In Collection: DarkPose - Metadata: - Architecture: *id001 - Training Data: COCO-WholeBody - Name: td-hm_hrnet-w48_dark-8xb32-210e_coco-wholebody-384x288 - Results: - - Dataset: COCO-WholeBody - Metrics: - Body AP: 0.742 - Body AR: 0.807 - Face AP: 0.841 - Face AR: 0.892 - Foot AP: 0.707 - Foot AR: 0.806 - Hand AP: 0.602 - Hand AR: 0.694 - Whole AP: 0.661 - Whole AR: 0.743 - Task: Wholebody 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_wholebody_384x288_dark-f5726563_20200918.pth +Models: +- Config: configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w32_dark-8xb64-210e_coco-wholebody-256x192.py + In Collection: DarkPose + Metadata: + Architecture: &id001 + - HRNet + - DarkPose + Training Data: COCO-WholeBody + Name: td-hm_hrnet-w32_dark-8xb64-210e_coco-wholebody-256x192 + Results: + - Dataset: COCO-WholeBody + Metrics: + Body AP: 0.693 + Body AR: 0.764 + Face AP: 0.737 + Face AR: 0.809 + Foot AP: 0.564 + Foot AR: 0.674 + Hand AP: 0.503 + Hand AR: 0.602 + Whole AP: 0.582 + Whole AR: 0.671 + Task: Wholebody 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_coco_wholebody_256x192_dark-469327ef_20200922.pth +- Config: configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_dark-8xb32-210e_coco-wholebody-384x288.py + In Collection: DarkPose + Metadata: + Architecture: *id001 + Training Data: COCO-WholeBody + Name: td-hm_hrnet-w48_dark-8xb32-210e_coco-wholebody-384x288 + Results: + - Dataset: COCO-WholeBody + Metrics: + Body AP: 0.742 + Body AR: 0.807 + Face AP: 0.841 + Face AR: 0.892 + Foot AP: 0.707 + Foot AR: 0.806 + Hand AP: 0.602 + Hand AR: 0.694 + Whole AP: 0.661 + Whole AR: 0.743 + Task: Wholebody 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_wholebody_384x288_dark-f5726563_20200918.pth diff --git a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/resnet_coco-wholebody.md b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/resnet_coco-wholebody.md index e4a189833b..187e5d31fb 100644 --- a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/resnet_coco-wholebody.md +++ b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/resnet_coco-wholebody.md @@ -1,43 +1,43 @@ - - -
-SimpleBaseline2D (ECCV'2018) - -```bibtex -@inproceedings{xiao2018simple, - title={Simple baselines for human pose estimation and tracking}, - author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, - booktitle={Proceedings of the European conference on computer vision (ECCV)}, - pages={466--481}, - year={2018} -} -``` - -
- - - -
-COCO-WholeBody (ECCV'2020) - -```bibtex -@inproceedings{jin2020whole, - title={Whole-Body Human Pose Estimation in the Wild}, - author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, - booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, - year={2020} -} -``` - -
- -Results on COCO-WholeBody v1.0 val with detector having human AP of 56.4 on COCO val2017 dataset - -| Arch | Input Size | Body AP | Body AR | Foot AP | Foot AR | Face AP | Face AR | Hand AP | Hand AR | Whole AP | Whole AR | ckpt | log | -| :-------------------------------------- | :--------: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :------: | :------: | :--------------------------------------: | :-------------------------------------: | -| [pose_resnet_50](/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res50_8xb64-210e_coco-wholebody-256x192.py) | 256x192 | 0.652 | 0.738 | 0.615 | 0.749 | 0.606 | 0.715 | 0.460 | 0.584 | 0.521 | 0.633 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res50_coco_wholebody_256x192-9e37ed88_20201004.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res50_coco_wholebody_256x192_20201004.log.json) | -| [pose_resnet_50](/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res50_8xb64-210e_coco-wholebody-384x288.py) | 384x288 | 0.666 | 0.747 | 0.634 | 0.763 | 0.731 | 0.811 | 0.536 | 0.646 | 0.574 | 0.670 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res50_coco_wholebody_384x288-ce11e294_20201004.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res50_coco_wholebody_384x288_20201004.log.json) | -| [pose_resnet_101](/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res101_8xb32-210e_coco-wholebody-256x192.py) | 256x192 | 0.669 | 0.753 | 0.637 | 0.766 | 0.611 | 0.722 | 0.463 | 0.589 | 0.531 | 0.645 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res101_coco_wholebody_256x192-7325f982_20201004.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res101_coco_wholebody_256x192_20201004.log.json) | -| [pose_resnet_101](/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res101_8xb32-210e_coco-wholebody-384x288.py) | 384x288 | 0.692 | 0.770 | 0.680 | 0.799 | 0.746 | 0.820 | 0.548 | 0.657 | 0.597 | 0.693 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res101_coco_wholebody_384x288-6c137b9a_20201004.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res101_coco_wholebody_384x288_20201004.log.json) | -| [pose_resnet_152](/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res152_8xb32-210e_coco-wholebody-256x192.py) | 256x192 | 0.682 | 0.764 | 0.661 | 0.787 | 0.623 | 0.728 | 0.481 | 0.607 | 0.548 | 0.661 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res152_coco_wholebody_256x192-5de8ae23_20201004.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res152_coco_wholebody_256x192_20201004.log.json) | -| [pose_resnet_152](/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res152_8xb32-210e_coco-wholebody-384x288.py) | 384x288 | 0.704 | 0.780 | 0.693 | 0.813 | 0.751 | 0.824 | 0.559 | 0.666 | 0.610 | 0.705 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res152_coco_wholebody_384x288-eab8caa8_20201004.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res152_coco_wholebody_384x288_20201004.log.json) | + + +
+SimpleBaseline2D (ECCV'2018) + +```bibtex +@inproceedings{xiao2018simple, + title={Simple baselines for human pose estimation and tracking}, + author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, + booktitle={Proceedings of the European conference on computer vision (ECCV)}, + pages={466--481}, + year={2018} +} +``` + +
+ + + +
+COCO-WholeBody (ECCV'2020) + +```bibtex +@inproceedings{jin2020whole, + title={Whole-Body Human Pose Estimation in the Wild}, + author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2020} +} +``` + +
+ +Results on COCO-WholeBody v1.0 val with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | Body AP | Body AR | Foot AP | Foot AR | Face AP | Face AR | Hand AP | Hand AR | Whole AP | Whole AR | ckpt | log | +| :-------------------------------------- | :--------: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :------: | :------: | :--------------------------------------: | :-------------------------------------: | +| [pose_resnet_50](/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res50_8xb64-210e_coco-wholebody-256x192.py) | 256x192 | 0.652 | 0.738 | 0.615 | 0.749 | 0.606 | 0.715 | 0.460 | 0.584 | 0.521 | 0.633 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res50_coco_wholebody_256x192-9e37ed88_20201004.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res50_coco_wholebody_256x192_20201004.log.json) | +| [pose_resnet_50](/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res50_8xb64-210e_coco-wholebody-384x288.py) | 384x288 | 0.666 | 0.747 | 0.634 | 0.763 | 0.731 | 0.811 | 0.536 | 0.646 | 0.574 | 0.670 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res50_coco_wholebody_384x288-ce11e294_20201004.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res50_coco_wholebody_384x288_20201004.log.json) | +| [pose_resnet_101](/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res101_8xb32-210e_coco-wholebody-256x192.py) | 256x192 | 0.669 | 0.753 | 0.637 | 0.766 | 0.611 | 0.722 | 0.463 | 0.589 | 0.531 | 0.645 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res101_coco_wholebody_256x192-7325f982_20201004.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res101_coco_wholebody_256x192_20201004.log.json) | +| [pose_resnet_101](/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res101_8xb32-210e_coco-wholebody-384x288.py) | 384x288 | 0.692 | 0.770 | 0.680 | 0.799 | 0.746 | 0.820 | 0.548 | 0.657 | 0.597 | 0.693 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res101_coco_wholebody_384x288-6c137b9a_20201004.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res101_coco_wholebody_384x288_20201004.log.json) | +| [pose_resnet_152](/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res152_8xb32-210e_coco-wholebody-256x192.py) | 256x192 | 0.682 | 0.764 | 0.661 | 0.787 | 0.623 | 0.728 | 0.481 | 0.607 | 0.548 | 0.661 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res152_coco_wholebody_256x192-5de8ae23_20201004.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res152_coco_wholebody_256x192_20201004.log.json) | +| [pose_resnet_152](/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res152_8xb32-210e_coco-wholebody-384x288.py) | 384x288 | 0.704 | 0.780 | 0.693 | 0.813 | 0.751 | 0.824 | 0.559 | 0.666 | 0.610 | 0.705 | [ckpt](https://download.openmmlab.com/mmpose/top_down/resnet/res152_coco_wholebody_384x288-eab8caa8_20201004.pth) | [log](https://download.openmmlab.com/mmpose/top_down/resnet/res152_coco_wholebody_384x288_20201004.log.json) | diff --git a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/resnet_coco-wholebody.yml b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/resnet_coco-wholebody.yml index 0e8db24f6a..c4c148a2f0 100644 --- a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/resnet_coco-wholebody.yml +++ b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/resnet_coco-wholebody.yml @@ -1,128 +1,128 @@ -Models: -- Config: configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res50_8xb64-210e_coco-wholebody-256x192.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: &id001 - - SimpleBaseline2D - Training Data: COCO-WholeBody - Name: td-hm_res50_8xb64-210e_coco-wholebody-256x192 - Results: - - Dataset: COCO-WholeBody - Metrics: - Body AP: 0.652 - Body AR: 0.738 - Face AP: 0.606 - Face AR: 0.715 - Foot AP: 0.615 - Foot AR: 0.749 - Hand AP: 0.46 - Hand AR: 0.584 - Whole AP: 0.521 - Whole AR: 0.633 - Task: Wholebody 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res50_coco_wholebody_256x192-9e37ed88_20201004.pth -- Config: configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res50_8xb64-210e_coco-wholebody-384x288.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: COCO-WholeBody - Name: td-hm_res50_8xb64-210e_coco-wholebody-384x288 - Results: - - Dataset: COCO-WholeBody - Metrics: - Body AP: 0.666 - Body AR: 0.747 - Face AP: 0.731 - Face AR: 0.811 - Foot AP: 0.634 - Foot AR: 0.763 - Hand AP: 0.536 - Hand AR: 0.646 - Whole AP: 0.574 - Whole AR: 0.67 - Task: Wholebody 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res50_coco_wholebody_384x288-ce11e294_20201004.pth -- Config: configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res101_8xb32-210e_coco-wholebody-256x192.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: COCO-WholeBody - Name: td-hm_res101_8xb32-210e_coco-wholebody-256x192 - Results: - - Dataset: COCO-WholeBody - Metrics: - Body AP: 0.669 - Body AR: 0.753 - Face AP: 0.611 - Face AR: 0.722 - Foot AP: 0.637 - Foot AR: 0.766 - Hand AP: 0.463 - Hand AR: 0.589 - Whole AP: 0.531 - Whole AR: 0.645 - Task: Wholebody 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res101_coco_wholebody_256x192-7325f982_20201004.pth -- Config: configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res101_8xb32-210e_coco-wholebody-384x288.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: COCO-WholeBody - Name: td-hm_res101_8xb32-210e_coco-wholebody-384x288 - Results: - - Dataset: COCO-WholeBody - Metrics: - Body AP: 0.692 - Body AR: 0.77 - Face AP: 0.746 - Face AR: 0.82 - Foot AP: 0.68 - Foot AR: 0.799 - Hand AP: 0.548 - Hand AR: 0.657 - Whole AP: 0.598 - Whole AR: 0.691 - Task: Wholebody 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res101_coco_wholebody_384x288-6c137b9a_20201004.pth -- Config: configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res152_8xb32-210e_coco-wholebody-256x192.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: COCO-WholeBody - Name: td-hm_res152_8xb32-210e_coco-wholebody-256x192 - Results: - - Dataset: COCO-WholeBody - Metrics: - Body AP: 0.682 - Body AR: 0.764 - Face AP: 0.623 - Face AR: 0.728 - Foot AP: 0.661 - Foot AR: 0.787 - Hand AP: 0.481 - Hand AR: 0.607 - Whole AP: 0.548 - Whole AR: 0.661 - Task: Wholebody 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res152_coco_wholebody_256x192-5de8ae23_20201004.pth -- Config: configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res152_8xb32-210e_coco-wholebody-384x288.py - In Collection: SimpleBaseline2D - Metadata: - Architecture: *id001 - Training Data: COCO-WholeBody - Name: td-hm_res152_8xb32-210e_coco-wholebody-384x288 - Results: - - Dataset: COCO-WholeBody - Metrics: - Body AP: 0.704 - Body AR: 0.78 - Face AP: 0.751 - Face AR: 0.824 - Foot AP: 0.693 - Foot AR: 0.813 - Hand AP: 0.559 - Hand AR: 0.666 - Whole AP: 0.61 - Whole AR: 0.705 - Task: Wholebody 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res152_coco_wholebody_384x288-eab8caa8_20201004.pth +Models: +- Config: configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res50_8xb64-210e_coco-wholebody-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: &id001 + - SimpleBaseline2D + Training Data: COCO-WholeBody + Name: td-hm_res50_8xb64-210e_coco-wholebody-256x192 + Results: + - Dataset: COCO-WholeBody + Metrics: + Body AP: 0.652 + Body AR: 0.738 + Face AP: 0.606 + Face AR: 0.715 + Foot AP: 0.615 + Foot AR: 0.749 + Hand AP: 0.46 + Hand AR: 0.584 + Whole AP: 0.521 + Whole AR: 0.633 + Task: Wholebody 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res50_coco_wholebody_256x192-9e37ed88_20201004.pth +- Config: configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res50_8xb64-210e_coco-wholebody-384x288.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO-WholeBody + Name: td-hm_res50_8xb64-210e_coco-wholebody-384x288 + Results: + - Dataset: COCO-WholeBody + Metrics: + Body AP: 0.666 + Body AR: 0.747 + Face AP: 0.731 + Face AR: 0.811 + Foot AP: 0.634 + Foot AR: 0.763 + Hand AP: 0.536 + Hand AR: 0.646 + Whole AP: 0.574 + Whole AR: 0.67 + Task: Wholebody 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res50_coco_wholebody_384x288-ce11e294_20201004.pth +- Config: configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res101_8xb32-210e_coco-wholebody-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO-WholeBody + Name: td-hm_res101_8xb32-210e_coco-wholebody-256x192 + Results: + - Dataset: COCO-WholeBody + Metrics: + Body AP: 0.669 + Body AR: 0.753 + Face AP: 0.611 + Face AR: 0.722 + Foot AP: 0.637 + Foot AR: 0.766 + Hand AP: 0.463 + Hand AR: 0.589 + Whole AP: 0.531 + Whole AR: 0.645 + Task: Wholebody 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res101_coco_wholebody_256x192-7325f982_20201004.pth +- Config: configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res101_8xb32-210e_coco-wholebody-384x288.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO-WholeBody + Name: td-hm_res101_8xb32-210e_coco-wholebody-384x288 + Results: + - Dataset: COCO-WholeBody + Metrics: + Body AP: 0.692 + Body AR: 0.77 + Face AP: 0.746 + Face AR: 0.82 + Foot AP: 0.68 + Foot AR: 0.799 + Hand AP: 0.548 + Hand AR: 0.657 + Whole AP: 0.598 + Whole AR: 0.691 + Task: Wholebody 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res101_coco_wholebody_384x288-6c137b9a_20201004.pth +- Config: configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res152_8xb32-210e_coco-wholebody-256x192.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO-WholeBody + Name: td-hm_res152_8xb32-210e_coco-wholebody-256x192 + Results: + - Dataset: COCO-WholeBody + Metrics: + Body AP: 0.682 + Body AR: 0.764 + Face AP: 0.623 + Face AR: 0.728 + Foot AP: 0.661 + Foot AR: 0.787 + Hand AP: 0.481 + Hand AR: 0.607 + Whole AP: 0.548 + Whole AR: 0.661 + Task: Wholebody 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res152_coco_wholebody_256x192-5de8ae23_20201004.pth +- Config: configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res152_8xb32-210e_coco-wholebody-384x288.py + In Collection: SimpleBaseline2D + Metadata: + Architecture: *id001 + Training Data: COCO-WholeBody + Name: td-hm_res152_8xb32-210e_coco-wholebody-384x288 + Results: + - Dataset: COCO-WholeBody + Metrics: + Body AP: 0.704 + Body AR: 0.78 + Face AP: 0.751 + Face AR: 0.824 + Foot AP: 0.693 + Foot AR: 0.813 + Hand AP: 0.559 + Hand AR: 0.666 + Whole AP: 0.61 + Whole AR: 0.705 + Task: Wholebody 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res152_coco_wholebody_384x288-eab8caa8_20201004.pth diff --git a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w32_8xb64-210e_coco-wholebody-256x192.py b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w32_8xb64-210e_coco-wholebody-256x192.py index 2595e3fc13..339581eca4 100644 --- a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w32_8xb64-210e_coco-wholebody-256x192.py +++ b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w32_8xb64-210e_coco-wholebody-256x192.py @@ -1,150 +1,150 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco-wholebody/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(32, 64)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(32, 64, 128)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(32, 64, 128, 256))), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/hrnet_w32-36af842e.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=32, - out_channels=133, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoWholeBodyDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -val_evaluator = dict( - type='CocoWholeBodyMetric', - ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco-wholebody/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=133, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w32_8xb64-210e_coco-wholebody-384x288.py b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w32_8xb64-210e_coco-wholebody-384x288.py index 727fa9472e..677fdc70c9 100644 --- a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w32_8xb64-210e_coco-wholebody-384x288.py +++ b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w32_8xb64-210e_coco-wholebody-384x288.py @@ -1,150 +1,150 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco-wholebody/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(32, 64)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(32, 64, 128)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(32, 64, 128, 256))), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/hrnet_w32-36af842e.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=32, - out_channels=133, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoWholeBodyDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -val_evaluator = dict( - type='CocoWholeBodyMetric', - ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco-wholebody/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=133, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w32_dark-8xb64-210e_coco-wholebody-256x192.py b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w32_dark-8xb64-210e_coco-wholebody-256x192.py index ffee1d1383..371783162e 100644 --- a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w32_dark-8xb64-210e_coco-wholebody-256x192.py +++ b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w32_dark-8xb64-210e_coco-wholebody-256x192.py @@ -1,154 +1,154 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco-wholebody/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', - input_size=(192, 256), - heatmap_size=(48, 64), - sigma=2, - unbiased=True) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(32, 64)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(32, 64, 128)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(32, 64, 128, 256))), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/hrnet_w32-36af842e.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=32, - out_channels=133, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoWholeBodyDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -val_evaluator = dict( - type='CocoWholeBodyMetric', - ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco-wholebody/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + sigma=2, + unbiased=True) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=133, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_8xb32-210e_coco-wholebody-256x192.py b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_8xb32-210e_coco-wholebody-256x192.py index 892b4b7936..5c53b7cb71 100644 --- a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_8xb32-210e_coco-wholebody-256x192.py +++ b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_8xb32-210e_coco-wholebody-256x192.py @@ -1,150 +1,150 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco-wholebody/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(48, 96)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(48, 96, 192)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(48, 96, 192, 384))), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/hrnet_w48-8ef0771d.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=48, - out_channels=133, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoWholeBodyDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -val_evaluator = dict( - type='CocoWholeBodyMetric', - ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco-wholebody/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(48, 96)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(48, 96, 192)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(48, 96, 192, 384))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w48-8ef0771d.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=48, + out_channels=133, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_8xb32-210e_coco-wholebody-384x288.py b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_8xb32-210e_coco-wholebody-384x288.py index d587dbc45b..ef25d2ac27 100644 --- a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_8xb32-210e_coco-wholebody-384x288.py +++ b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_8xb32-210e_coco-wholebody-384x288.py @@ -1,150 +1,150 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco-wholebody/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(48, 96)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(48, 96, 192)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(48, 96, 192, 384))), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/hrnet_w48-8ef0771d.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=48, - out_channels=133, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoWholeBodyDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -val_evaluator = dict( - type='CocoWholeBodyMetric', - ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco-wholebody/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(48, 96)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(48, 96, 192)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(48, 96, 192, 384))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w48-8ef0771d.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=48, + out_channels=133, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_dark-8xb32-210e_coco-wholebody-384x288.py b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_dark-8xb32-210e_coco-wholebody-384x288.py index 63175b99ea..d77872c39e 100644 --- a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_dark-8xb32-210e_coco-wholebody-384x288.py +++ b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_dark-8xb32-210e_coco-wholebody-384x288.py @@ -1,154 +1,154 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco-wholebody/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', - input_size=(288, 384), - heatmap_size=(72, 96), - sigma=3, - unbiased=True) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(48, 96)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(48, 96, 192)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(48, 96, 192, 384))), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/hrnet_w48-8ef0771d.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=48, - out_channels=133, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoWholeBodyDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -val_evaluator = dict( - type='CocoWholeBodyMetric', - ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco-wholebody/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', + input_size=(288, 384), + heatmap_size=(72, 96), + sigma=3, + unbiased=True) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(48, 96)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(48, 96, 192)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(48, 96, 192, 384))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w48-8ef0771d.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=48, + out_channels=133, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res101_8xb32-210e_coco-wholebody-256x192.py b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res101_8xb32-210e_coco-wholebody-256x192.py index c0d8187ab4..87c273c6fb 100644 --- a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res101_8xb32-210e_coco-wholebody-256x192.py +++ b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res101_8xb32-210e_coco-wholebody-256x192.py @@ -1,121 +1,121 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=256) - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco-wholebody/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=101, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=133, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoWholeBodyDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -val_evaluator = dict( - type='CocoWholeBodyMetric', - ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco-wholebody/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=101, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=133, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res101_8xb32-210e_coco-wholebody-384x288.py b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res101_8xb32-210e_coco-wholebody-384x288.py index 42e98575fb..5e58a1644e 100644 --- a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res101_8xb32-210e_coco-wholebody-384x288.py +++ b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res101_8xb32-210e_coco-wholebody-384x288.py @@ -1,121 +1,121 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=256) - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco-wholebody/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=101, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=133, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoWholeBodyDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -val_evaluator = dict( - type='CocoWholeBodyMetric', - ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco-wholebody/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=101, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=133, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res152_8xb32-210e_coco-wholebody-256x192.py b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res152_8xb32-210e_coco-wholebody-256x192.py index 10c16eb71f..3ce49366ae 100644 --- a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res152_8xb32-210e_coco-wholebody-256x192.py +++ b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res152_8xb32-210e_coco-wholebody-256x192.py @@ -1,121 +1,121 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco-wholebody/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=152, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet152'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=133, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoWholeBodyDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -val_evaluator = dict( - type='CocoWholeBodyMetric', - ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco-wholebody/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=152, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet152'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=133, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res152_8xb32-210e_coco-wholebody-384x288.py b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res152_8xb32-210e_coco-wholebody-384x288.py index 43ec5fb67c..a92c4d25af 100644 --- a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res152_8xb32-210e_coco-wholebody-384x288.py +++ b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res152_8xb32-210e_coco-wholebody-384x288.py @@ -1,121 +1,121 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco-wholebody/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=152, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet152'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=133, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoWholeBodyDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -val_evaluator = dict( - type='CocoWholeBodyMetric', - ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco-wholebody/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=152, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet152'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=133, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res50_8xb64-210e_coco-wholebody-256x192.py b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res50_8xb64-210e_coco-wholebody-256x192.py index e568c78b17..127c32225c 100644 --- a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res50_8xb64-210e_coco-wholebody-256x192.py +++ b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res50_8xb64-210e_coco-wholebody-256x192.py @@ -1,121 +1,121 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco-wholebody/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=50, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=133, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoWholeBodyDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -val_evaluator = dict( - type='CocoWholeBodyMetric', - ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco-wholebody/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=133, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res50_8xb64-210e_coco-wholebody-384x288.py b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res50_8xb64-210e_coco-wholebody-384x288.py index 6869d17ba9..88a88e22ef 100644 --- a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res50_8xb64-210e_coco-wholebody-384x288.py +++ b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_res50_8xb64-210e_coco-wholebody-384x288.py @@ -1,121 +1,121 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco-wholebody/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=50, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), - ), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=133, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoWholeBodyDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -val_evaluator = dict( - type='CocoWholeBodyMetric', - ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco-wholebody/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(288, 384), heatmap_size=(72, 96), sigma=3) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + head=dict( + type='HeatmapHead', + in_channels=2048, + out_channels=133, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-mbv3_8xb64-210e_coco-wholebody-256x192.py b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-mbv3_8xb64-210e_coco-wholebody-256x192.py index cad9c539be..b39adf9d15 100644 --- a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-mbv3_8xb64-210e_coco-wholebody-256x192.py +++ b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-mbv3_8xb64-210e_coco-wholebody-256x192.py @@ -1,122 +1,122 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco-wholebody/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict(type='ViPNAS_MobileNetV3'), - head=dict( - type='ViPNASHead', - in_channels=160, - out_channels=133, - deconv_out_channels=(160, 160, 160), - deconv_num_groups=(160, 160, 160), - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoWholeBodyDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - rotate_factor=60, - scale_factor=(0.75, 1.25)), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -val_evaluator = dict( - type='CocoWholeBodyMetric', - ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco-wholebody/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict(type='ViPNAS_MobileNetV3'), + head=dict( + type='ViPNASHead', + in_channels=160, + out_channels=133, + deconv_out_channels=(160, 160, 160), + deconv_num_groups=(160, 160, 160), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-mbv3_dark-8xb64-210e_coco-wholebody-256x192.py b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-mbv3_dark-8xb64-210e_coco-wholebody-256x192.py index d34ea50db6..851c04a61d 100644 --- a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-mbv3_dark-8xb64-210e_coco-wholebody-256x192.py +++ b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-mbv3_dark-8xb64-210e_coco-wholebody-256x192.py @@ -1,126 +1,126 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco-wholebody/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', - input_size=(192, 256), - heatmap_size=(48, 64), - sigma=2, - unbiased=True) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict(type='ViPNAS_MobileNetV3'), - head=dict( - type='ViPNASHead', - in_channels=160, - out_channels=133, - deconv_out_channels=(160, 160, 160), - deconv_num_groups=(160, 160, 160), - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoWholeBodyDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - rotate_factor=60, - scale_factor=(0.75, 1.25)), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -val_evaluator = dict( - type='CocoWholeBodyMetric', - ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco-wholebody/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + sigma=2, + unbiased=True) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict(type='ViPNAS_MobileNetV3'), + head=dict( + type='ViPNASHead', + in_channels=160, + out_channels=133, + deconv_out_channels=(160, 160, 160), + deconv_num_groups=(160, 160, 160), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-res50_8xb64-210e_coco-wholebody-256x192.py b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-res50_8xb64-210e_coco-wholebody-256x192.py index 822e4c698a..24c7578f18 100644 --- a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-res50_8xb64-210e_coco-wholebody-256x192.py +++ b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-res50_8xb64-210e_coco-wholebody-256x192.py @@ -1,123 +1,123 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco-wholebody/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ViPNAS_ResNet', - depth=50, - ), - head=dict( - type='ViPNASHead', - in_channels=608, - out_channels=133, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoWholeBodyDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - rotate_factor=60, - scale_factor=(0.75, 1.25)), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -val_evaluator = dict( - type='CocoWholeBodyMetric', - ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco-wholebody/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ViPNAS_ResNet', + depth=50, + ), + head=dict( + type='ViPNASHead', + in_channels=608, + out_channels=133, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-res50_dark-8xb64-210e_coco-wholebody-256x192.py b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-res50_dark-8xb64-210e_coco-wholebody-256x192.py index 15b152fe96..585e3dcff2 100644 --- a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-res50_dark-8xb64-210e_coco-wholebody-256x192.py +++ b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-res50_dark-8xb64-210e_coco-wholebody-256x192.py @@ -1,127 +1,127 @@ -_base_ = ['../../../_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco-wholebody/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', - input_size=(192, 256), - heatmap_size=(48, 64), - sigma=2, - unbiased=True) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ViPNAS_ResNet', - depth=50, - ), - head=dict( - type='ViPNASHead', - in_channels=608, - out_channels=133, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoWholeBodyDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - rotate_factor=60, - scale_factor=(0.75, 1.25)), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -val_evaluator = dict( - type='CocoWholeBodyMetric', - ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') -test_evaluator = val_evaluator +_base_ = ['../../../_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco-wholebody/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + sigma=2, + unbiased=True) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ViPNAS_ResNet', + depth=50, + ), + head=dict( + type='ViPNASHead', + in_channels=608, + out_channels=133, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + rotate_factor=60, + scale_factor=(0.75, 1.25)), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/vipnas_coco-wholebody.md b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/vipnas_coco-wholebody.md index 63fc0aed8a..13b0321693 100644 --- a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/vipnas_coco-wholebody.md +++ b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/vipnas_coco-wholebody.md @@ -1,38 +1,38 @@ - - -
-ViPNAS (CVPR'2021) - -```bibtex -@article{xu2021vipnas, - title={ViPNAS: Efficient Video Pose Estimation via Neural Architecture Search}, - author={Xu, Lumin and Guan, Yingda and Jin, Sheng and Liu, Wentao and Qian, Chen and Luo, Ping and Ouyang, Wanli and Wang, Xiaogang}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - year={2021} -} -``` - -
- - - -
-COCO-WholeBody (ECCV'2020) - -```bibtex -@inproceedings{jin2020whole, - title={Whole-Body Human Pose Estimation in the Wild}, - author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, - booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, - year={2020} -} -``` - -
- -Results on COCO-WholeBody v1.0 val with detector having human AP of 56.4 on COCO val2017 dataset - -| Arch | Input Size | Body AP | Body AR | Foot AP | Foot AR | Face AP | Face AR | Hand AP | Hand AR | Whole AP | Whole AR | ckpt | log | -| :-------------------------------------- | :--------: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :------: | :------: | :--------------------------------------: | :-------------------------------------: | -| [S-ViPNAS-MobileNetV3](/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-mbv3_8xb64-210e_coco-wholebody-256x192.py) | 256x192 | 0.619 | 0.700 | 0.477 | 0.608 | 0.585 | 0.689 | 0.386 | 0.505 | 0.473 | 0.578 | [ckpt](https://download.openmmlab.com/mmpose/top_down/vipnas/vipnas_mbv3_coco_wholebody_256x192-0fee581a_20211205.pth) | [log](https://download.openmmlab.com/mmpose/top_down/vipnas/vipnas_mbv3_coco_wholebody_256x192_20211205.log.json) | -| [S-ViPNAS-Res50](/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-res50_8xb64-210e_coco-wholebody-256x192.py) | 256x192 | 0.643 | 0.726 | 0.553 | 0.694 | 0.587 | 0.698 | 0.410 | 0.529 | 0.495 | 0.607 | [ckpt](https://download.openmmlab.com/mmpose/top_down/vipnas/vipnas_res50_wholebody_256x192-49e1c3a4_20211112.pth) | [log](https://download.openmmlab.com/mmpose/top_down/vipnas/vipnas_res50_wholebody_256x192_20211112.log.json) | + + +
+ViPNAS (CVPR'2021) + +```bibtex +@article{xu2021vipnas, + title={ViPNAS: Efficient Video Pose Estimation via Neural Architecture Search}, + author={Xu, Lumin and Guan, Yingda and Jin, Sheng and Liu, Wentao and Qian, Chen and Luo, Ping and Ouyang, Wanli and Wang, Xiaogang}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + year={2021} +} +``` + +
+ + + +
+COCO-WholeBody (ECCV'2020) + +```bibtex +@inproceedings{jin2020whole, + title={Whole-Body Human Pose Estimation in the Wild}, + author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2020} +} +``` + +
+ +Results on COCO-WholeBody v1.0 val with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | Body AP | Body AR | Foot AP | Foot AR | Face AP | Face AR | Hand AP | Hand AR | Whole AP | Whole AR | ckpt | log | +| :-------------------------------------- | :--------: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :------: | :------: | :--------------------------------------: | :-------------------------------------: | +| [S-ViPNAS-MobileNetV3](/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-mbv3_8xb64-210e_coco-wholebody-256x192.py) | 256x192 | 0.619 | 0.700 | 0.477 | 0.608 | 0.585 | 0.689 | 0.386 | 0.505 | 0.473 | 0.578 | [ckpt](https://download.openmmlab.com/mmpose/top_down/vipnas/vipnas_mbv3_coco_wholebody_256x192-0fee581a_20211205.pth) | [log](https://download.openmmlab.com/mmpose/top_down/vipnas/vipnas_mbv3_coco_wholebody_256x192_20211205.log.json) | +| [S-ViPNAS-Res50](/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-res50_8xb64-210e_coco-wholebody-256x192.py) | 256x192 | 0.643 | 0.726 | 0.553 | 0.694 | 0.587 | 0.698 | 0.410 | 0.529 | 0.495 | 0.607 | [ckpt](https://download.openmmlab.com/mmpose/top_down/vipnas/vipnas_res50_wholebody_256x192-49e1c3a4_20211112.pth) | [log](https://download.openmmlab.com/mmpose/top_down/vipnas/vipnas_res50_wholebody_256x192_20211112.log.json) | diff --git a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/vipnas_coco-wholebody.yml b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/vipnas_coco-wholebody.yml index 2814836407..cae2a9ac3c 100644 --- a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/vipnas_coco-wholebody.yml +++ b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/vipnas_coco-wholebody.yml @@ -1,44 +1,44 @@ -Models: -- Config: configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-mbv3_8xb64-210e_coco-wholebody-256x192.py - In Collection: ViPNAS - Metadata: - Architecture: &id001 - - ViPNAS - Training Data: COCO-WholeBody - Name: td-hm_vipnas-mbv3_8xb64-210e_coco-wholebody-256x192 - Results: - - Dataset: COCO-WholeBody - Metrics: - Body AP: 0.619 - Body AR: 0.7 - Face AP: 0.585 - Face AR: 0.689 - Foot AP: 0.477 - Foot AR: 0.608 - Hand AP: 0.386 - Hand AR: 0.505 - Whole AP: 0.473 - Whole AR: 0.578 - Task: Wholebody 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/vipnas/vipnas_mbv3_coco_wholebody_256x192-0fee581a_20211205.pth -- Config: configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-res50_8xb64-210e_coco-wholebody-256x192.py - In Collection: ViPNAS - Metadata: - Architecture: *id001 - Training Data: COCO-WholeBody - Name: td-hm_vipnas-res50_8xb64-210e_coco-wholebody-256x192 - Results: - - Dataset: COCO-WholeBody - Metrics: - Body AP: 0.643 - Body AR: 0.726 - Face AP: 0.587 - Face AR: 0.698 - Foot AP: 0.553 - Foot AR: 0.694 - Hand AP: 0.41 - Hand AR: 0.529 - Whole AP: 0.495 - Whole AR: 0.607 - Task: Wholebody 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/vipnas/vipnas_res50_wholebody_256x192-49e1c3a4_20211112.pth +Models: +- Config: configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-mbv3_8xb64-210e_coco-wholebody-256x192.py + In Collection: ViPNAS + Metadata: + Architecture: &id001 + - ViPNAS + Training Data: COCO-WholeBody + Name: td-hm_vipnas-mbv3_8xb64-210e_coco-wholebody-256x192 + Results: + - Dataset: COCO-WholeBody + Metrics: + Body AP: 0.619 + Body AR: 0.7 + Face AP: 0.585 + Face AR: 0.689 + Foot AP: 0.477 + Foot AR: 0.608 + Hand AP: 0.386 + Hand AR: 0.505 + Whole AP: 0.473 + Whole AR: 0.578 + Task: Wholebody 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/vipnas/vipnas_mbv3_coco_wholebody_256x192-0fee581a_20211205.pth +- Config: configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-res50_8xb64-210e_coco-wholebody-256x192.py + In Collection: ViPNAS + Metadata: + Architecture: *id001 + Training Data: COCO-WholeBody + Name: td-hm_vipnas-res50_8xb64-210e_coco-wholebody-256x192 + Results: + - Dataset: COCO-WholeBody + Metrics: + Body AP: 0.643 + Body AR: 0.726 + Face AP: 0.587 + Face AR: 0.698 + Foot AP: 0.553 + Foot AR: 0.694 + Hand AP: 0.41 + Hand AR: 0.529 + Whole AP: 0.495 + Whole AR: 0.607 + Task: Wholebody 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/vipnas/vipnas_res50_wholebody_256x192-49e1c3a4_20211112.pth diff --git a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/vipnas_dark_coco-wholebody.md b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/vipnas_dark_coco-wholebody.md index e39c66e913..6bc5624072 100644 --- a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/vipnas_dark_coco-wholebody.md +++ b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/vipnas_dark_coco-wholebody.md @@ -1,55 +1,55 @@ - - -
-ViPNAS (CVPR'2021) - -```bibtex -@article{xu2021vipnas, - title={ViPNAS: Efficient Video Pose Estimation via Neural Architecture Search}, - author={Xu, Lumin and Guan, Yingda and Jin, Sheng and Liu, Wentao and Qian, Chen and Luo, Ping and Ouyang, Wanli and Wang, Xiaogang}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - year={2021} -} -``` - -
- - - -
-DarkPose (CVPR'2020) - -```bibtex -@inproceedings{zhang2020distribution, - title={Distribution-aware coordinate representation for human pose estimation}, - author={Zhang, Feng and Zhu, Xiatian and Dai, Hanbin and Ye, Mao and Zhu, Ce}, - booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, - pages={7093--7102}, - year={2020} -} -``` - -
- - - -
-COCO-WholeBody (ECCV'2020) - -```bibtex -@inproceedings{jin2020whole, - title={Whole-Body Human Pose Estimation in the Wild}, - author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, - booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, - year={2020} -} -``` - -
- -Results on COCO-WholeBody v1.0 val with detector having human AP of 56.4 on COCO val2017 dataset - -| Arch | Input Size | Body AP | Body AR | Foot AP | Foot AR | Face AP | Face AR | Hand AP | Hand AR | Whole AP | Whole AR | ckpt | log | -| :-------------------------------------- | :--------: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :------: | :------: | :--------------------------------------: | :-------------------------------------: | -| [S-ViPNAS-MobileNetV3_dark](/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-mbv3_dark-8xb64-210e_coco-wholebody-256x192.py) | 256x192 | 0.632 | 0.710 | 0.530 | 0.660 | 0.672 | 0.771 | 0.404 | 0.519 | 0.508 | 0.607 | [ckpt](https://download.openmmlab.com/mmpose/top_down/vipnas/vipnas_mbv3_coco_wholebody_256x192_dark-e2158108_20211205.pth) | [log](https://download.openmmlab.com/mmpose/top_down/vipnas/vipnas_mbv3_coco_wholebody_256x192_dark_20211205.log.json) | -| [S-ViPNAS-Res50_dark](/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-res50_dark-8xb64-210e_coco-wholebody-256x192.py) | 256x192 | 0.650 | 0.732 | 0.550 | 0.686 | 0.684 | 0.783 | 0.437 | 0.554 | 0.528 | 0.632 | [ckpt](https://download.openmmlab.com/mmpose/top_down/vipnas/vipnas_res50_wholebody_256x192_dark-67c0ce35_20211112.pth) | [log](https://download.openmmlab.com/mmpose/top_down/vipnas/vipnas_res50_wholebody_256x192_dark_20211112.log.json) | + + +
+ViPNAS (CVPR'2021) + +```bibtex +@article{xu2021vipnas, + title={ViPNAS: Efficient Video Pose Estimation via Neural Architecture Search}, + author={Xu, Lumin and Guan, Yingda and Jin, Sheng and Liu, Wentao and Qian, Chen and Luo, Ping and Ouyang, Wanli and Wang, Xiaogang}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + year={2021} +} +``` + +
+ + + +
+DarkPose (CVPR'2020) + +```bibtex +@inproceedings{zhang2020distribution, + title={Distribution-aware coordinate representation for human pose estimation}, + author={Zhang, Feng and Zhu, Xiatian and Dai, Hanbin and Ye, Mao and Zhu, Ce}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={7093--7102}, + year={2020} +} +``` + +
+ + + +
+COCO-WholeBody (ECCV'2020) + +```bibtex +@inproceedings{jin2020whole, + title={Whole-Body Human Pose Estimation in the Wild}, + author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2020} +} +``` + +
+ +Results on COCO-WholeBody v1.0 val with detector having human AP of 56.4 on COCO val2017 dataset + +| Arch | Input Size | Body AP | Body AR | Foot AP | Foot AR | Face AP | Face AR | Hand AP | Hand AR | Whole AP | Whole AR | ckpt | log | +| :-------------------------------------- | :--------: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :------: | :------: | :--------------------------------------: | :-------------------------------------: | +| [S-ViPNAS-MobileNetV3_dark](/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-mbv3_dark-8xb64-210e_coco-wholebody-256x192.py) | 256x192 | 0.632 | 0.710 | 0.530 | 0.660 | 0.672 | 0.771 | 0.404 | 0.519 | 0.508 | 0.607 | [ckpt](https://download.openmmlab.com/mmpose/top_down/vipnas/vipnas_mbv3_coco_wholebody_256x192_dark-e2158108_20211205.pth) | [log](https://download.openmmlab.com/mmpose/top_down/vipnas/vipnas_mbv3_coco_wholebody_256x192_dark_20211205.log.json) | +| [S-ViPNAS-Res50_dark](/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-res50_dark-8xb64-210e_coco-wholebody-256x192.py) | 256x192 | 0.650 | 0.732 | 0.550 | 0.686 | 0.684 | 0.783 | 0.437 | 0.554 | 0.528 | 0.632 | [ckpt](https://download.openmmlab.com/mmpose/top_down/vipnas/vipnas_res50_wholebody_256x192_dark-67c0ce35_20211112.pth) | [log](https://download.openmmlab.com/mmpose/top_down/vipnas/vipnas_res50_wholebody_256x192_dark_20211112.log.json) | diff --git a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/vipnas_dark_coco-wholebody.yml b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/vipnas_dark_coco-wholebody.yml index 5449af0ccd..0f10316fff 100644 --- a/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/vipnas_dark_coco-wholebody.yml +++ b/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/vipnas_dark_coco-wholebody.yml @@ -1,45 +1,45 @@ -Models: -- Config: configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-mbv3_dark-8xb64-210e_coco-wholebody-256x192.py - In Collection: ViPNAS - Metadata: - Architecture: &id001 - - ViPNAS - - DarkPose - Training Data: COCO-WholeBody - Name: td-hm_vipnas-mbv3_dark-8xb64-210e_coco-wholebody-256x192 - Results: - - Dataset: COCO-WholeBody - Metrics: - Body AP: 0.632 - Body AR: 0.71 - Face AP: 0.672 - Face AR: 0.771 - Foot AP: 0.53 - Foot AR: 0.66 - Hand AP: 0.404 - Hand AR: 0.519 - Whole AP: 0.508 - Whole AR: 0.607 - Task: Wholebody 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/vipnas/vipnas_mbv3_coco_wholebody_256x192_dark-e2158108_20211205.pth -- Config: configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-res50_dark-8xb64-210e_coco-wholebody-256x192.py - In Collection: ViPNAS - Metadata: - Architecture: *id001 - Training Data: COCO-WholeBody - Name: td-hm_vipnas-res50_dark-8xb64-210e_coco-wholebody-256x192 - Results: - - Dataset: COCO-WholeBody - Metrics: - Body AP: 0.65 - Body AR: 0.732 - Face AP: 0.684 - Face AR: 0.783 - Foot AP: 0.55 - Foot AR: 0.686 - Hand AP: 0.437 - Hand AR: 0.554 - Whole AP: 0.528 - Whole AR: 0.632 - Task: Wholebody 2D Keypoint - Weights: https://download.openmmlab.com/mmpose/top_down/vipnas/vipnas_res50_wholebody_256x192_dark-67c0ce35_20211112.pth +Models: +- Config: configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-mbv3_dark-8xb64-210e_coco-wholebody-256x192.py + In Collection: ViPNAS + Metadata: + Architecture: &id001 + - ViPNAS + - DarkPose + Training Data: COCO-WholeBody + Name: td-hm_vipnas-mbv3_dark-8xb64-210e_coco-wholebody-256x192 + Results: + - Dataset: COCO-WholeBody + Metrics: + Body AP: 0.632 + Body AR: 0.71 + Face AP: 0.672 + Face AR: 0.771 + Foot AP: 0.53 + Foot AR: 0.66 + Hand AP: 0.404 + Hand AR: 0.519 + Whole AP: 0.508 + Whole AR: 0.607 + Task: Wholebody 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/vipnas/vipnas_mbv3_coco_wholebody_256x192_dark-e2158108_20211205.pth +- Config: configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-res50_dark-8xb64-210e_coco-wholebody-256x192.py + In Collection: ViPNAS + Metadata: + Architecture: *id001 + Training Data: COCO-WholeBody + Name: td-hm_vipnas-res50_dark-8xb64-210e_coco-wholebody-256x192 + Results: + - Dataset: COCO-WholeBody + Metrics: + Body AP: 0.65 + Body AR: 0.732 + Face AP: 0.684 + Face AR: 0.783 + Foot AP: 0.55 + Foot AR: 0.686 + Hand AP: 0.437 + Hand AR: 0.554 + Whole AP: 0.528 + Whole AR: 0.632 + Task: Wholebody 2D Keypoint + Weights: https://download.openmmlab.com/mmpose/top_down/vipnas/vipnas_res50_wholebody_256x192_dark-67c0ce35_20211112.pth diff --git a/dataset-index.yml b/dataset-index.yml index a6acc57cc4..b361c95ece 100644 --- a/dataset-index.yml +++ b/dataset-index.yml @@ -1,71 +1,71 @@ -coco2017: - dataset: COCO_2017 - download_root: data - data_root: data/pose - script: tools/dataset_converters/scripts/preprocess_coco2017.sh - -mpii: - dataset: MPII_Human_Pose - download_root: data - data_root: data/pose - script: tools/dataset_converters/scripts/preprocess_mpii.sh - -aic: - dataset: AI_Challenger - download_root: data - data_root: data/pose - script: tools/dataset_converters/scripts/preprocess_aic.sh - -crowdpose: - dataset: CrowdPose - download_root: data - data_root: data/pose - script: tools/dataset_converters/scripts/preprocess_crowdpose.sh - -halpe: - dataset: Halpe - download_root: data - data_root: data/pose - script: tools/dataset_converters/scripts/preprocess_halpe.sh - -lapa: - dataset: LaPa - download_root: data - data_root: data/pose - script: tools/dataset_converters/scripts/preprocess_lapa.sh - -300w: - dataset: 300w - download_root: data - data_root: data/pose - script: tools/dataset_converters/scripts/preprocess_300w.sh - -wflw: - dataset: WFLW - download_root: data - data_root: data/pose - script: tools/dataset_converters/scripts/preprocess_wflw.sh - -onehand10k: - dataset: OneHand10K - download_root: data - data_root: data/pose - script: tools/dataset_converters/scripts/preprocess_onehand10k.sh - -freihand: - dataset: FreiHAND - download_root: data - data_root: data/pose - script: tools/dataset_converters/scripts/preprocess_freihand.sh - -ap10k: - dataset: AP-10K - download_root: data - data_root: data/pose - script: tools/dataset_converters/scripts/preprocess_ap10k.sh - -hagrid: - dataset: HaGRID - download_root: data - data_root: data/pose - script: tools/dataset_converters/scripts/preprocess_hagrid.sh +coco2017: + dataset: COCO_2017 + download_root: data + data_root: data/pose + script: tools/dataset_converters/scripts/preprocess_coco2017.sh + +mpii: + dataset: MPII_Human_Pose + download_root: data + data_root: data/pose + script: tools/dataset_converters/scripts/preprocess_mpii.sh + +aic: + dataset: AI_Challenger + download_root: data + data_root: data/pose + script: tools/dataset_converters/scripts/preprocess_aic.sh + +crowdpose: + dataset: CrowdPose + download_root: data + data_root: data/pose + script: tools/dataset_converters/scripts/preprocess_crowdpose.sh + +halpe: + dataset: Halpe + download_root: data + data_root: data/pose + script: tools/dataset_converters/scripts/preprocess_halpe.sh + +lapa: + dataset: LaPa + download_root: data + data_root: data/pose + script: tools/dataset_converters/scripts/preprocess_lapa.sh + +300w: + dataset: 300w + download_root: data + data_root: data/pose + script: tools/dataset_converters/scripts/preprocess_300w.sh + +wflw: + dataset: WFLW + download_root: data + data_root: data/pose + script: tools/dataset_converters/scripts/preprocess_wflw.sh + +onehand10k: + dataset: OneHand10K + download_root: data + data_root: data/pose + script: tools/dataset_converters/scripts/preprocess_onehand10k.sh + +freihand: + dataset: FreiHAND + download_root: data + data_root: data/pose + script: tools/dataset_converters/scripts/preprocess_freihand.sh + +ap10k: + dataset: AP-10K + download_root: data + data_root: data/pose + script: tools/dataset_converters/scripts/preprocess_ap10k.sh + +hagrid: + dataset: HaGRID + download_root: data + data_root: data/pose + script: tools/dataset_converters/scripts/preprocess_hagrid.sh diff --git a/demo/MMPose_Tutorial.ipynb b/demo/MMPose_Tutorial.ipynb index 0e9ff9b57f..e3002a4993 100644 --- a/demo/MMPose_Tutorial.ipynb +++ b/demo/MMPose_Tutorial.ipynb @@ -1,3944 +1,3944 @@ -{ - "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "metadata": { - "id": "F77yOqgkX8p4" - }, - "source": [ - "\"Open" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": { - "id": "8xX3YewOtqV0" - }, - "source": [ - "# MMPose Tutorial\n", - "\n", - "Welcome to MMPose colab tutorial! In this tutorial, we will show you how to\n", - "\n", - "- install MMPose 1.x\n", - "- perform inference with an MMPose model\n", - "- train a new mmpose model with your own datasets\n", - "\n", - "Let's start!" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": { - "id": "bkw-kUD8t3t8" - }, - "source": [ - "## Install MMPose\n", - "\n", - "We recommend to use a conda environment to install mmpose and its dependencies. And compilers `nvcc` and `gcc` are required." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "0f_Ebb2otWtd", - "outputId": "8c16b8ae-b927-41d5-c49e-d61ba6798a2d" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "nvcc: NVIDIA (R) Cuda compiler driver\n", - "Copyright (c) 2005-2022 NVIDIA Corporation\n", - "Built on Wed_Sep_21_10:33:58_PDT_2022\n", - "Cuda compilation tools, release 11.8, V11.8.89\n", - "Build cuda_11.8.r11.8/compiler.31833905_0\n", - "gcc (Ubuntu 9.4.0-1ubuntu1~20.04.1) 9.4.0\n", - "Copyright (C) 2019 Free Software Foundation, Inc.\n", - "This is free software; see the source for copying conditions. There is NO\n", - "warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n", - "\n", - "/usr/local/bin/python\n" - ] - } - ], - "source": [ - "# check NVCC version\n", - "!nvcc -V\n", - "\n", - "# check GCC version\n", - "!gcc --version\n", - "\n", - "# check python in conda environment\n", - "!which python" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "igSm4jhihE2M", - "outputId": "0d521640-a4d7-4264-889c-df862e9c332f" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Looking in indexes: https://download.pytorch.org/whl/cu118, https://us-python.pkg.dev/colab-wheels/public/simple/\n", - "Requirement already satisfied: torch in /usr/local/lib/python3.9/dist-packages (2.0.0+cu118)\n", - "Requirement already satisfied: torchvision in /usr/local/lib/python3.9/dist-packages (0.15.1+cu118)\n", - "Requirement already satisfied: torchaudio in /usr/local/lib/python3.9/dist-packages (2.0.1+cu118)\n", - "Requirement already satisfied: networkx in /usr/local/lib/python3.9/dist-packages (from torch) (3.1)\n", - "Requirement already satisfied: filelock in /usr/local/lib/python3.9/dist-packages (from torch) (3.11.0)\n", - "Requirement already satisfied: sympy in /usr/local/lib/python3.9/dist-packages (from torch) (1.11.1)\n", - "Requirement already satisfied: triton==2.0.0 in /usr/local/lib/python3.9/dist-packages (from torch) (2.0.0)\n", - "Requirement already satisfied: jinja2 in /usr/local/lib/python3.9/dist-packages (from torch) (3.1.2)\n", - "Requirement already satisfied: typing-extensions in /usr/local/lib/python3.9/dist-packages (from torch) (4.5.0)\n", - "Requirement already satisfied: cmake in /usr/local/lib/python3.9/dist-packages (from triton==2.0.0->torch) (3.25.2)\n", - "Requirement already satisfied: lit in /usr/local/lib/python3.9/dist-packages (from triton==2.0.0->torch) (16.0.1)\n", - "Requirement already satisfied: numpy in /usr/local/lib/python3.9/dist-packages (from torchvision) (1.22.4)\n", - "Requirement already satisfied: requests in /usr/local/lib/python3.9/dist-packages (from torchvision) (2.27.1)\n", - "Requirement already satisfied: pillow!=8.3.*,>=5.3.0 in /usr/local/lib/python3.9/dist-packages (from torchvision) (8.4.0)\n", - "Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.9/dist-packages (from jinja2->torch) (2.1.2)\n", - "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.9/dist-packages (from requests->torchvision) (3.4)\n", - "Requirement already satisfied: urllib3<1.27,>=1.21.1 in /usr/local/lib/python3.9/dist-packages (from requests->torchvision) (1.26.15)\n", - "Requirement already satisfied: charset-normalizer~=2.0.0 in /usr/local/lib/python3.9/dist-packages (from requests->torchvision) (2.0.12)\n", - "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.9/dist-packages (from requests->torchvision) (2022.12.7)\n", - "Requirement already satisfied: mpmath>=0.19 in /usr/local/lib/python3.9/dist-packages (from sympy->torch) (1.3.0)\n" - ] - } - ], - "source": [ - "# install dependencies: (if your colab has CUDA 11.8)\n", - "%pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "MLcoZr3ot9iw", - "outputId": "70e5d18e-746c-41a3-a761-6303b79eaf02" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n", - "Collecting openmim\n", - " Downloading openmim-0.3.7-py2.py3-none-any.whl (51 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m51.3/51.3 kB\u001b[0m \u001b[31m1.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hRequirement already satisfied: tabulate in /usr/local/lib/python3.9/dist-packages (from openmim) (0.8.10)\n", - "Requirement already satisfied: rich in /usr/local/lib/python3.9/dist-packages (from openmim) (13.3.3)\n", - "Requirement already satisfied: pip>=19.3 in /usr/local/lib/python3.9/dist-packages (from openmim) (23.0.1)\n", - "Collecting colorama\n", - " Downloading colorama-0.4.6-py2.py3-none-any.whl (25 kB)\n", - "Collecting model-index\n", - " Downloading model_index-0.1.11-py3-none-any.whl (34 kB)\n", - "Requirement already satisfied: pandas in /usr/local/lib/python3.9/dist-packages (from openmim) (1.5.3)\n", - "Requirement already satisfied: requests in /usr/local/lib/python3.9/dist-packages (from openmim) (2.27.1)\n", - "Requirement already satisfied: Click in /usr/local/lib/python3.9/dist-packages (from openmim) (8.1.3)\n", - "Requirement already satisfied: markdown in /usr/local/lib/python3.9/dist-packages (from model-index->openmim) (3.4.3)\n", - "Collecting ordered-set\n", - " Downloading ordered_set-4.1.0-py3-none-any.whl (7.6 kB)\n", - "Requirement already satisfied: pyyaml in /usr/local/lib/python3.9/dist-packages (from model-index->openmim) (6.0)\n", - "Requirement already satisfied: numpy>=1.20.3 in /usr/local/lib/python3.9/dist-packages (from pandas->openmim) (1.22.4)\n", - "Requirement already satisfied: python-dateutil>=2.8.1 in /usr/local/lib/python3.9/dist-packages (from pandas->openmim) (2.8.2)\n", - "Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.9/dist-packages (from pandas->openmim) (2022.7.1)\n", - "Requirement already satisfied: charset-normalizer~=2.0.0 in /usr/local/lib/python3.9/dist-packages (from requests->openmim) (2.0.12)\n", - "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.9/dist-packages (from requests->openmim) (2022.12.7)\n", - "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.9/dist-packages (from requests->openmim) (3.4)\n", - "Requirement already satisfied: urllib3<1.27,>=1.21.1 in /usr/local/lib/python3.9/dist-packages (from requests->openmim) (1.26.15)\n", - "Requirement already satisfied: pygments<3.0.0,>=2.13.0 in /usr/local/lib/python3.9/dist-packages (from rich->openmim) (2.14.0)\n", - "Requirement already satisfied: markdown-it-py<3.0.0,>=2.2.0 in /usr/local/lib/python3.9/dist-packages (from rich->openmim) (2.2.0)\n", - "Requirement already satisfied: mdurl~=0.1 in /usr/local/lib/python3.9/dist-packages (from markdown-it-py<3.0.0,>=2.2.0->rich->openmim) (0.1.2)\n", - "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.9/dist-packages (from python-dateutil>=2.8.1->pandas->openmim) (1.16.0)\n", - "Requirement already satisfied: importlib-metadata>=4.4 in /usr/local/lib/python3.9/dist-packages (from markdown->model-index->openmim) (6.2.0)\n", - "Requirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.9/dist-packages (from importlib-metadata>=4.4->markdown->model-index->openmim) (3.15.0)\n", - "Installing collected packages: ordered-set, colorama, model-index, openmim\n", - "Successfully installed colorama-0.4.6 model-index-0.1.11 openmim-0.3.7 ordered-set-4.1.0\n", - "/usr/local/lib/python3.9/dist-packages/setuptools/command/install.py:34: SetuptoolsDeprecationWarning: setup.py install is deprecated. Use build and pip and other standards-based tools.\n", - " warnings.warn(\n", - "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n", - "Looking in links: https://download.openmmlab.com/mmcv/dist/cu118/torch2.0.0/index.html\n", - "Collecting mmengine\n", - " Downloading mmengine-0.7.2-py3-none-any.whl (366 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m366.9/366.9 kB\u001b[0m \u001b[31m14.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hRequirement already satisfied: rich in /usr/local/lib/python3.9/dist-packages (from mmengine) (13.3.3)\n", - "Requirement already satisfied: matplotlib in /usr/local/lib/python3.9/dist-packages (from mmengine) (3.7.1)\n", - "Requirement already satisfied: pyyaml in /usr/local/lib/python3.9/dist-packages (from mmengine) (6.0)\n", - "Requirement already satisfied: opencv-python>=3 in /usr/local/lib/python3.9/dist-packages (from mmengine) (4.7.0.72)\n", - "Collecting yapf\n", - " Downloading yapf-0.32.0-py2.py3-none-any.whl (190 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m190.2/190.2 kB\u001b[0m \u001b[31m17.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hRequirement already satisfied: termcolor in /usr/local/lib/python3.9/dist-packages (from mmengine) (2.2.0)\n", - "Requirement already satisfied: numpy in /usr/local/lib/python3.9/dist-packages (from mmengine) (1.22.4)\n", - "Collecting addict\n", - " Downloading addict-2.4.0-py3-none-any.whl (3.8 kB)\n", - "Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmengine) (1.4.4)\n", - "Requirement already satisfied: importlib-resources>=3.2.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmengine) (5.12.0)\n", - "Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmengine) (23.0)\n", - "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmengine) (0.11.0)\n", - "Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmengine) (2.8.2)\n", - "Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmengine) (4.39.3)\n", - "Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmengine) (3.0.9)\n", - "Requirement already satisfied: pillow>=6.2.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmengine) (8.4.0)\n", - "Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmengine) (1.0.7)\n", - "Requirement already satisfied: markdown-it-py<3.0.0,>=2.2.0 in /usr/local/lib/python3.9/dist-packages (from rich->mmengine) (2.2.0)\n", - "Requirement already satisfied: pygments<3.0.0,>=2.13.0 in /usr/local/lib/python3.9/dist-packages (from rich->mmengine) (2.14.0)\n", - "Requirement already satisfied: zipp>=3.1.0 in /usr/local/lib/python3.9/dist-packages (from importlib-resources>=3.2.0->matplotlib->mmengine) (3.15.0)\n", - "Requirement already satisfied: mdurl~=0.1 in /usr/local/lib/python3.9/dist-packages (from markdown-it-py<3.0.0,>=2.2.0->rich->mmengine) (0.1.2)\n", - "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.9/dist-packages (from python-dateutil>=2.7->matplotlib->mmengine) (1.16.0)\n", - "Installing collected packages: yapf, addict, mmengine\n", - "/usr/local/lib/python3.9/dist-packages/setuptools/command/install.py:34: SetuptoolsDeprecationWarning: setup.py install is deprecated. Use build and pip and other standards-based tools.\n", - " warnings.warn(\n", - "/usr/local/lib/python3.9/dist-packages/setuptools/command/install.py:34: SetuptoolsDeprecationWarning: setup.py install is deprecated. Use build and pip and other standards-based tools.\n", - " warnings.warn(\n", - "/usr/local/lib/python3.9/dist-packages/setuptools/command/install.py:34: SetuptoolsDeprecationWarning: setup.py install is deprecated. Use build and pip and other standards-based tools.\n", - " warnings.warn(\n", - "/usr/local/lib/python3.9/dist-packages/setuptools/command/install.py:34: SetuptoolsDeprecationWarning: setup.py install is deprecated. Use build and pip and other standards-based tools.\n", - " warnings.warn(\n", - "Successfully installed addict-2.4.0 mmengine-0.7.2 yapf-0.32.0\n", - "/usr/local/lib/python3.9/dist-packages/setuptools/command/install.py:34: SetuptoolsDeprecationWarning: setup.py install is deprecated. Use build and pip and other standards-based tools.\n", - " warnings.warn(\n", - "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n", - "Looking in links: https://download.openmmlab.com/mmcv/dist/cu118/torch2.0.0/index.html\n", - "Collecting mmcv>=2.0.0rc1\n", - " Downloading https://download.openmmlab.com/mmcv/dist/cu118/torch2.0.0/mmcv-2.0.0-cp39-cp39-manylinux1_x86_64.whl (74.4 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m74.4/74.4 MB\u001b[0m \u001b[31m12.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hRequirement already satisfied: mmengine>=0.2.0 in /usr/local/lib/python3.9/dist-packages (from mmcv>=2.0.0rc1) (0.7.2)\n", - "Requirement already satisfied: yapf in /usr/local/lib/python3.9/dist-packages (from mmcv>=2.0.0rc1) (0.32.0)\n", - "Requirement already satisfied: packaging in /usr/local/lib/python3.9/dist-packages (from mmcv>=2.0.0rc1) (23.0)\n", - "Requirement already satisfied: addict in /usr/local/lib/python3.9/dist-packages (from mmcv>=2.0.0rc1) (2.4.0)\n", - "Requirement already satisfied: numpy in /usr/local/lib/python3.9/dist-packages (from mmcv>=2.0.0rc1) (1.22.4)\n", - "Requirement already satisfied: pyyaml in /usr/local/lib/python3.9/dist-packages (from mmcv>=2.0.0rc1) (6.0)\n", - "Requirement already satisfied: opencv-python>=3 in /usr/local/lib/python3.9/dist-packages (from mmcv>=2.0.0rc1) (4.7.0.72)\n", - "Requirement already satisfied: Pillow in /usr/local/lib/python3.9/dist-packages (from mmcv>=2.0.0rc1) (8.4.0)\n", - "Requirement already satisfied: matplotlib in /usr/local/lib/python3.9/dist-packages (from mmengine>=0.2.0->mmcv>=2.0.0rc1) (3.7.1)\n", - "Requirement already satisfied: rich in /usr/local/lib/python3.9/dist-packages (from mmengine>=0.2.0->mmcv>=2.0.0rc1) (13.3.3)\n", - "Requirement already satisfied: termcolor in /usr/local/lib/python3.9/dist-packages (from mmengine>=0.2.0->mmcv>=2.0.0rc1) (2.2.0)\n", - "Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmengine>=0.2.0->mmcv>=2.0.0rc1) (2.8.2)\n", - "Requirement already satisfied: importlib-resources>=3.2.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmengine>=0.2.0->mmcv>=2.0.0rc1) (5.12.0)\n", - "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmengine>=0.2.0->mmcv>=2.0.0rc1) (0.11.0)\n", - "Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmengine>=0.2.0->mmcv>=2.0.0rc1) (4.39.3)\n", - "Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmengine>=0.2.0->mmcv>=2.0.0rc1) (3.0.9)\n", - "Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmengine>=0.2.0->mmcv>=2.0.0rc1) (1.4.4)\n", - "Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmengine>=0.2.0->mmcv>=2.0.0rc1) (1.0.7)\n", - "Requirement already satisfied: markdown-it-py<3.0.0,>=2.2.0 in /usr/local/lib/python3.9/dist-packages (from rich->mmengine>=0.2.0->mmcv>=2.0.0rc1) (2.2.0)\n", - "Requirement already satisfied: pygments<3.0.0,>=2.13.0 in /usr/local/lib/python3.9/dist-packages (from rich->mmengine>=0.2.0->mmcv>=2.0.0rc1) (2.14.0)\n", - "Requirement already satisfied: zipp>=3.1.0 in /usr/local/lib/python3.9/dist-packages (from importlib-resources>=3.2.0->matplotlib->mmengine>=0.2.0->mmcv>=2.0.0rc1) (3.15.0)\n", - "Requirement already satisfied: mdurl~=0.1 in /usr/local/lib/python3.9/dist-packages (from markdown-it-py<3.0.0,>=2.2.0->rich->mmengine>=0.2.0->mmcv>=2.0.0rc1) (0.1.2)\n", - "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.9/dist-packages (from python-dateutil>=2.7->matplotlib->mmengine>=0.2.0->mmcv>=2.0.0rc1) (1.16.0)\n", - "Installing collected packages: mmcv\n", - "/usr/local/lib/python3.9/dist-packages/setuptools/command/install.py:34: SetuptoolsDeprecationWarning: setup.py install is deprecated. Use build and pip and other standards-based tools.\n", - " warnings.warn(\n", - "/usr/local/lib/python3.9/dist-packages/setuptools/command/install.py:34: SetuptoolsDeprecationWarning: setup.py install is deprecated. Use build and pip and other standards-based tools.\n", - " warnings.warn(\n", - "Successfully installed mmcv-2.0.0\n", - "/usr/local/lib/python3.9/dist-packages/setuptools/command/install.py:34: SetuptoolsDeprecationWarning: setup.py install is deprecated. Use build and pip and other standards-based tools.\n", - " warnings.warn(\n", - "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n", - "Looking in links: https://download.openmmlab.com/mmcv/dist/cu118/torch2.0.0/index.html\n", - "Collecting mmdet>=3.0.0rc0\n", - " Downloading mmdet-3.0.0-py3-none-any.whl (1.7 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.7/1.7 MB\u001b[0m \u001b[31m71.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hRequirement already satisfied: six in /usr/local/lib/python3.9/dist-packages (from mmdet>=3.0.0rc0) (1.16.0)\n", - "Collecting terminaltables\n", - " Downloading terminaltables-3.1.10-py2.py3-none-any.whl (15 kB)\n", - "Requirement already satisfied: pycocotools in /usr/local/lib/python3.9/dist-packages (from mmdet>=3.0.0rc0) (2.0.6)\n", - "Requirement already satisfied: scipy in /usr/local/lib/python3.9/dist-packages (from mmdet>=3.0.0rc0) (1.10.1)\n", - "Requirement already satisfied: numpy in /usr/local/lib/python3.9/dist-packages (from mmdet>=3.0.0rc0) (1.22.4)\n", - "Requirement already satisfied: matplotlib in /usr/local/lib/python3.9/dist-packages (from mmdet>=3.0.0rc0) (3.7.1)\n", - "Requirement already satisfied: shapely in /usr/local/lib/python3.9/dist-packages (from mmdet>=3.0.0rc0) (2.0.1)\n", - "Requirement already satisfied: mmengine<1.0.0,>=0.7.1 in /usr/local/lib/python3.9/dist-packages (from mmdet>=3.0.0rc0) (0.7.2)\n", - "Requirement already satisfied: mmcv<2.1.0,>=2.0.0rc4 in /usr/local/lib/python3.9/dist-packages (from mmdet>=3.0.0rc0) (2.0.0)\n", - "Requirement already satisfied: pyyaml in /usr/local/lib/python3.9/dist-packages (from mmcv<2.1.0,>=2.0.0rc4->mmdet>=3.0.0rc0) (6.0)\n", - "Requirement already satisfied: packaging in /usr/local/lib/python3.9/dist-packages (from mmcv<2.1.0,>=2.0.0rc4->mmdet>=3.0.0rc0) (23.0)\n", - "Requirement already satisfied: opencv-python>=3 in /usr/local/lib/python3.9/dist-packages (from mmcv<2.1.0,>=2.0.0rc4->mmdet>=3.0.0rc0) (4.7.0.72)\n", - "Requirement already satisfied: addict in /usr/local/lib/python3.9/dist-packages (from mmcv<2.1.0,>=2.0.0rc4->mmdet>=3.0.0rc0) (2.4.0)\n", - "Requirement already satisfied: Pillow in /usr/local/lib/python3.9/dist-packages (from mmcv<2.1.0,>=2.0.0rc4->mmdet>=3.0.0rc0) (8.4.0)\n", - "Requirement already satisfied: yapf in /usr/local/lib/python3.9/dist-packages (from mmcv<2.1.0,>=2.0.0rc4->mmdet>=3.0.0rc0) (0.32.0)\n", - "Requirement already satisfied: termcolor in /usr/local/lib/python3.9/dist-packages (from mmengine<1.0.0,>=0.7.1->mmdet>=3.0.0rc0) (2.2.0)\n", - "Requirement already satisfied: rich in /usr/local/lib/python3.9/dist-packages (from mmengine<1.0.0,>=0.7.1->mmdet>=3.0.0rc0) (13.3.3)\n", - "Requirement already satisfied: importlib-resources>=3.2.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmdet>=3.0.0rc0) (5.12.0)\n", - "Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmdet>=3.0.0rc0) (2.8.2)\n", - "Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmdet>=3.0.0rc0) (4.39.3)\n", - "Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmdet>=3.0.0rc0) (1.4.4)\n", - "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmdet>=3.0.0rc0) (0.11.0)\n", - "Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmdet>=3.0.0rc0) (1.0.7)\n", - "Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmdet>=3.0.0rc0) (3.0.9)\n", - "Requirement already satisfied: zipp>=3.1.0 in /usr/local/lib/python3.9/dist-packages (from importlib-resources>=3.2.0->matplotlib->mmdet>=3.0.0rc0) (3.15.0)\n", - "Requirement already satisfied: markdown-it-py<3.0.0,>=2.2.0 in /usr/local/lib/python3.9/dist-packages (from rich->mmengine<1.0.0,>=0.7.1->mmdet>=3.0.0rc0) (2.2.0)\n", - "Requirement already satisfied: pygments<3.0.0,>=2.13.0 in /usr/local/lib/python3.9/dist-packages (from rich->mmengine<1.0.0,>=0.7.1->mmdet>=3.0.0rc0) (2.14.0)\n", - "Requirement already satisfied: mdurl~=0.1 in /usr/local/lib/python3.9/dist-packages (from markdown-it-py<3.0.0,>=2.2.0->rich->mmengine<1.0.0,>=0.7.1->mmdet>=3.0.0rc0) (0.1.2)\n", - "Installing collected packages: terminaltables, mmdet\n", - "/usr/local/lib/python3.9/dist-packages/setuptools/command/install.py:34: SetuptoolsDeprecationWarning: setup.py install is deprecated. Use build and pip and other standards-based tools.\n", - " warnings.warn(\n", - "/usr/local/lib/python3.9/dist-packages/setuptools/command/install.py:34: SetuptoolsDeprecationWarning: setup.py install is deprecated. Use build and pip and other standards-based tools.\n", - " warnings.warn(\n", - "/usr/local/lib/python3.9/dist-packages/setuptools/command/install.py:34: SetuptoolsDeprecationWarning: setup.py install is deprecated. Use build and pip and other standards-based tools.\n", - " warnings.warn(\n", - "Successfully installed mmdet-3.0.0 terminaltables-3.1.10\n" - ] - } - ], - "source": [ - "# install MMEngine, MMCV and MMDetection using MIM\n", - "%pip install -U openmim\n", - "!mim install mmengine\n", - "!mim install \"mmcv>=2.0.0\"\n", - "!mim install \"mmdet>=3.0.0\"" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "42hRcloJhE2N", - "outputId": "9175e011-82c0-438d-f378-264e8467eb09" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n", - "Collecting git+https://github.com/jin-s13/xtcocoapi\n", - " Cloning https://github.com/jin-s13/xtcocoapi to /tmp/pip-req-build-6ts8xw10\n", - " Running command git clone --filter=blob:none --quiet https://github.com/jin-s13/xtcocoapi /tmp/pip-req-build-6ts8xw10\n", - " Resolved https://github.com/jin-s13/xtcocoapi to commit 86a60cab276e619dac5d22834a36dceaf7aa0a38\n", - " Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n", - "Requirement already satisfied: setuptools>=18.0 in /usr/local/lib/python3.9/dist-packages (from xtcocotools==1.13) (67.6.1)\n", - "Requirement already satisfied: cython>=0.27.3 in /usr/local/lib/python3.9/dist-packages (from xtcocotools==1.13) (0.29.34)\n", - "Requirement already satisfied: matplotlib>=2.1.0 in /usr/local/lib/python3.9/dist-packages (from xtcocotools==1.13) (3.7.1)\n", - "Requirement already satisfied: numpy>=1.20.0 in /usr/local/lib/python3.9/dist-packages (from xtcocotools==1.13) (1.22.4)\n", - "Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.9/dist-packages (from matplotlib>=2.1.0->xtcocotools==1.13) (1.4.4)\n", - "Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib>=2.1.0->xtcocotools==1.13) (4.39.3)\n", - "Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.9/dist-packages (from matplotlib>=2.1.0->xtcocotools==1.13) (1.0.7)\n", - "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.9/dist-packages (from matplotlib>=2.1.0->xtcocotools==1.13) (0.11.0)\n", - "Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib>=2.1.0->xtcocotools==1.13) (23.0)\n", - "Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.9/dist-packages (from matplotlib>=2.1.0->xtcocotools==1.13) (3.0.9)\n", - "Requirement already satisfied: importlib-resources>=3.2.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib>=2.1.0->xtcocotools==1.13) (5.12.0)\n", - "Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.9/dist-packages (from matplotlib>=2.1.0->xtcocotools==1.13) (2.8.2)\n", - "Requirement already satisfied: pillow>=6.2.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib>=2.1.0->xtcocotools==1.13) (8.4.0)\n", - "Requirement already satisfied: zipp>=3.1.0 in /usr/local/lib/python3.9/dist-packages (from importlib-resources>=3.2.0->matplotlib>=2.1.0->xtcocotools==1.13) (3.15.0)\n", - "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.9/dist-packages (from python-dateutil>=2.7->matplotlib>=2.1.0->xtcocotools==1.13) (1.16.0)\n", - "Building wheels for collected packages: xtcocotools\n", - " Building wheel for xtcocotools (setup.py) ... \u001b[?25l\u001b[?25hdone\n", - " Created wheel for xtcocotools: filename=xtcocotools-1.13-cp39-cp39-linux_x86_64.whl size=402078 sha256=e6a1d4ea868ca2cbd8151f85509641b20b24745a9b8b353348ba8386c35ee6c6\n", - " Stored in directory: /tmp/pip-ephem-wheel-cache-a15wpqzs/wheels/3f/df/8b/d3eff2ded4b03a665d977a0baa328d9efa2f9ac9971929a222\n", - "Successfully built xtcocotools\n", - "Installing collected packages: xtcocotools\n", - "Successfully installed xtcocotools-1.13\n" - ] - } - ], - "source": [ - "# for better Colab compatibility, install xtcocotools from source\n", - "%pip install git+https://github.com/jin-s13/xtcocoapi" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "lzuSKOjMvJZu", - "outputId": "d6a7a3f8-2d96-40a6-a7c4-65697e18ffc9" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Cloning into 'mmpose'...\n", - "remote: Enumerating objects: 26225, done.\u001b[K\n", - "remote: Counting objects: 100% (97/97), done.\u001b[K\n", - "remote: Compressing objects: 100% (67/67), done.\u001b[K\n", - "remote: Total 26225 (delta 33), reused 67 (delta 28), pack-reused 26128\u001b[K\n", - "Receiving objects: 100% (26225/26225), 28.06 MiB | 13.36 MiB/s, done.\n", - "Resolving deltas: 100% (18673/18673), done.\n", - "/content/mmpose\n", - "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n", - "Requirement already satisfied: numpy in /usr/local/lib/python3.9/dist-packages (from -r requirements/build.txt (line 2)) (1.22.4)\n", - "Requirement already satisfied: torch>=1.6 in /usr/local/lib/python3.9/dist-packages (from -r requirements/build.txt (line 3)) (2.0.0+cu118)\n", - "Collecting chumpy\n", - " Downloading chumpy-0.70.tar.gz (50 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m50.6/50.6 kB\u001b[0m \u001b[31m2.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n", - "Collecting json_tricks\n", - " Downloading json_tricks-3.16.1-py2.py3-none-any.whl (27 kB)\n", - "Requirement already satisfied: matplotlib in /usr/local/lib/python3.9/dist-packages (from -r requirements/runtime.txt (line 3)) (3.7.1)\n", - "Collecting munkres\n", - " Downloading munkres-1.1.4-py2.py3-none-any.whl (7.0 kB)\n", - "Requirement already satisfied: opencv-python in /usr/local/lib/python3.9/dist-packages (from -r requirements/runtime.txt (line 6)) (4.7.0.72)\n", - "Requirement already satisfied: pillow in /usr/local/lib/python3.9/dist-packages (from -r requirements/runtime.txt (line 7)) (8.4.0)\n", - "Requirement already satisfied: scipy in /usr/local/lib/python3.9/dist-packages (from -r requirements/runtime.txt (line 8)) (1.10.1)\n", - "Requirement already satisfied: torchvision in /usr/local/lib/python3.9/dist-packages (from -r requirements/runtime.txt (line 9)) (0.15.1+cu118)\n", - "Requirement already satisfied: xtcocotools>=1.12 in /usr/local/lib/python3.9/dist-packages (from -r requirements/runtime.txt (line 10)) (1.13)\n", - "Collecting coverage\n", - " Downloading coverage-7.2.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl (227 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m227.5/227.5 kB\u001b[0m \u001b[31m27.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting flake8\n", - " Downloading flake8-6.0.0-py2.py3-none-any.whl (57 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m57.8/57.8 kB\u001b[0m \u001b[31m6.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting interrogate\n", - " Downloading interrogate-1.5.0-py3-none-any.whl (45 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m45.3/45.3 kB\u001b[0m \u001b[31m5.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting isort==4.3.21\n", - " Downloading isort-4.3.21-py2.py3-none-any.whl (42 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m42.3/42.3 kB\u001b[0m \u001b[31m5.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting parameterized\n", - " Downloading parameterized-0.9.0-py2.py3-none-any.whl (20 kB)\n", - "Requirement already satisfied: pytest in /usr/local/lib/python3.9/dist-packages (from -r requirements/tests.txt (line 6)) (7.2.2)\n", - "Collecting pytest-runner\n", - " Downloading pytest_runner-6.0.0-py3-none-any.whl (7.2 kB)\n", - "Collecting xdoctest>=0.10.0\n", - " Downloading xdoctest-1.1.1-py3-none-any.whl (137 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m137.6/137.6 kB\u001b[0m \u001b[31m14.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hRequirement already satisfied: yapf in /usr/local/lib/python3.9/dist-packages (from -r requirements/tests.txt (line 9)) (0.32.0)\n", - "Requirement already satisfied: requests in /usr/local/lib/python3.9/dist-packages (from -r requirements/optional.txt (line 1)) (2.27.1)\n", - "Requirement already satisfied: filelock in /usr/local/lib/python3.9/dist-packages (from torch>=1.6->-r requirements/build.txt (line 3)) (3.11.0)\n", - "Requirement already satisfied: networkx in /usr/local/lib/python3.9/dist-packages (from torch>=1.6->-r requirements/build.txt (line 3)) (3.1)\n", - "Requirement already satisfied: typing-extensions in /usr/local/lib/python3.9/dist-packages (from torch>=1.6->-r requirements/build.txt (line 3)) (4.5.0)\n", - "Requirement already satisfied: jinja2 in /usr/local/lib/python3.9/dist-packages (from torch>=1.6->-r requirements/build.txt (line 3)) (3.1.2)\n", - "Requirement already satisfied: triton==2.0.0 in /usr/local/lib/python3.9/dist-packages (from torch>=1.6->-r requirements/build.txt (line 3)) (2.0.0)\n", - "Requirement already satisfied: sympy in /usr/local/lib/python3.9/dist-packages (from torch>=1.6->-r requirements/build.txt (line 3)) (1.11.1)\n", - "Requirement already satisfied: cmake in /usr/local/lib/python3.9/dist-packages (from triton==2.0.0->torch>=1.6->-r requirements/build.txt (line 3)) (3.25.2)\n", - "Requirement already satisfied: lit in /usr/local/lib/python3.9/dist-packages (from triton==2.0.0->torch>=1.6->-r requirements/build.txt (line 3)) (16.0.1)\n", - "Requirement already satisfied: six>=1.11.0 in /usr/local/lib/python3.9/dist-packages (from chumpy->-r requirements/runtime.txt (line 1)) (1.16.0)\n", - "Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib->-r requirements/runtime.txt (line 3)) (4.39.3)\n", - "Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.9/dist-packages (from matplotlib->-r requirements/runtime.txt (line 3)) (3.0.9)\n", - "Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.9/dist-packages (from matplotlib->-r requirements/runtime.txt (line 3)) (1.4.4)\n", - "Requirement already satisfied: importlib-resources>=3.2.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib->-r requirements/runtime.txt (line 3)) (5.12.0)\n", - "Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.9/dist-packages (from matplotlib->-r requirements/runtime.txt (line 3)) (2.8.2)\n", - "Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib->-r requirements/runtime.txt (line 3)) (23.0)\n", - "Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.9/dist-packages (from matplotlib->-r requirements/runtime.txt (line 3)) (1.0.7)\n", - "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.9/dist-packages (from matplotlib->-r requirements/runtime.txt (line 3)) (0.11.0)\n", - "Requirement already satisfied: setuptools>=18.0 in /usr/local/lib/python3.9/dist-packages (from xtcocotools>=1.12->-r requirements/runtime.txt (line 10)) (67.6.1)\n", - "Requirement already satisfied: cython>=0.27.3 in /usr/local/lib/python3.9/dist-packages (from xtcocotools>=1.12->-r requirements/runtime.txt (line 10)) (0.29.34)\n", - "Collecting pyflakes<3.1.0,>=3.0.0\n", - " Downloading pyflakes-3.0.1-py2.py3-none-any.whl (62 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m62.8/62.8 kB\u001b[0m \u001b[31m5.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting pycodestyle<2.11.0,>=2.10.0\n", - " Downloading pycodestyle-2.10.0-py2.py3-none-any.whl (41 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m41.3/41.3 kB\u001b[0m \u001b[31m4.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting mccabe<0.8.0,>=0.7.0\n", - " Downloading mccabe-0.7.0-py2.py3-none-any.whl (7.3 kB)\n", - "Collecting py\n", - " Downloading py-1.11.0-py2.py3-none-any.whl (98 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m98.7/98.7 kB\u001b[0m \u001b[31m11.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hRequirement already satisfied: colorama in /usr/local/lib/python3.9/dist-packages (from interrogate->-r requirements/tests.txt (line 3)) (0.4.6)\n", - "Requirement already satisfied: toml in /usr/local/lib/python3.9/dist-packages (from interrogate->-r requirements/tests.txt (line 3)) (0.10.2)\n", - "Requirement already satisfied: attrs in /usr/local/lib/python3.9/dist-packages (from interrogate->-r requirements/tests.txt (line 3)) (22.2.0)\n", - "Requirement already satisfied: tabulate in /usr/local/lib/python3.9/dist-packages (from interrogate->-r requirements/tests.txt (line 3)) (0.8.10)\n", - "Requirement already satisfied: click>=7.1 in /usr/local/lib/python3.9/dist-packages (from interrogate->-r requirements/tests.txt (line 3)) (8.1.3)\n", - "Requirement already satisfied: tomli>=1.0.0 in /usr/local/lib/python3.9/dist-packages (from pytest->-r requirements/tests.txt (line 6)) (2.0.1)\n", - "Requirement already satisfied: pluggy<2.0,>=0.12 in /usr/local/lib/python3.9/dist-packages (from pytest->-r requirements/tests.txt (line 6)) (1.0.0)\n", - "Requirement already satisfied: iniconfig in /usr/local/lib/python3.9/dist-packages (from pytest->-r requirements/tests.txt (line 6)) (2.0.0)\n", - "Requirement already satisfied: exceptiongroup>=1.0.0rc8 in /usr/local/lib/python3.9/dist-packages (from pytest->-r requirements/tests.txt (line 6)) (1.1.1)\n", - "Requirement already satisfied: urllib3<1.27,>=1.21.1 in /usr/local/lib/python3.9/dist-packages (from requests->-r requirements/optional.txt (line 1)) (1.26.15)\n", - "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.9/dist-packages (from requests->-r requirements/optional.txt (line 1)) (2022.12.7)\n", - "Requirement already satisfied: charset-normalizer~=2.0.0 in /usr/local/lib/python3.9/dist-packages (from requests->-r requirements/optional.txt (line 1)) (2.0.12)\n", - "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.9/dist-packages (from requests->-r requirements/optional.txt (line 1)) (3.4)\n", - "Requirement already satisfied: zipp>=3.1.0 in /usr/local/lib/python3.9/dist-packages (from importlib-resources>=3.2.0->matplotlib->-r requirements/runtime.txt (line 3)) (3.15.0)\n", - "Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.9/dist-packages (from jinja2->torch>=1.6->-r requirements/build.txt (line 3)) (2.1.2)\n", - "Requirement already satisfied: mpmath>=0.19 in /usr/local/lib/python3.9/dist-packages (from sympy->torch>=1.6->-r requirements/build.txt (line 3)) (1.3.0)\n", - "Building wheels for collected packages: chumpy\n", - " Building wheel for chumpy (setup.py) ... \u001b[?25l\u001b[?25hdone\n", - " Created wheel for chumpy: filename=chumpy-0.70-py3-none-any.whl size=58282 sha256=ccde33ce99f135241a3f9ed380871cf8e4a569053d21b0ceba97809ddf3b26c8\n", - " Stored in directory: /root/.cache/pip/wheels/71/b5/d3/bbff0d638d797944856371a4ee326f9ffb1829083a383bba77\n", - "Successfully built chumpy\n", - "Installing collected packages: munkres, json_tricks, xdoctest, pytest-runner, pyflakes, pycodestyle, py, parameterized, mccabe, isort, coverage, interrogate, flake8, chumpy\n", - "Successfully installed chumpy-0.70 coverage-7.2.3 flake8-6.0.0 interrogate-1.5.0 isort-4.3.21 json_tricks-3.16.1 mccabe-0.7.0 munkres-1.1.4 parameterized-0.9.0 py-1.11.0 pycodestyle-2.10.0 pyflakes-3.0.1 pytest-runner-6.0.0 xdoctest-1.1.1\n", - "Using pip 23.0.1 from /usr/local/lib/python3.9/dist-packages/pip (python 3.9)\n", - "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n", - "Obtaining file:///content/mmpose\n", - " Running command python setup.py egg_info\n", - " running egg_info\n", - " creating /tmp/pip-pip-egg-info-tatkegdw/mmpose.egg-info\n", - " writing /tmp/pip-pip-egg-info-tatkegdw/mmpose.egg-info/PKG-INFO\n", - " writing dependency_links to /tmp/pip-pip-egg-info-tatkegdw/mmpose.egg-info/dependency_links.txt\n", - " writing requirements to /tmp/pip-pip-egg-info-tatkegdw/mmpose.egg-info/requires.txt\n", - " writing top-level names to /tmp/pip-pip-egg-info-tatkegdw/mmpose.egg-info/top_level.txt\n", - " writing manifest file '/tmp/pip-pip-egg-info-tatkegdw/mmpose.egg-info/SOURCES.txt'\n", - " reading manifest file '/tmp/pip-pip-egg-info-tatkegdw/mmpose.egg-info/SOURCES.txt'\n", - " reading manifest template 'MANIFEST.in'\n", - " warning: no files found matching 'mmpose/.mim/model-index.yml'\n", - " warning: no files found matching '*.py' under directory 'mmpose/.mim/configs'\n", - " warning: no files found matching '*.yml' under directory 'mmpose/.mim/configs'\n", - " warning: no files found matching '*.py' under directory 'mmpose/.mim/tools'\n", - " warning: no files found matching '*.sh' under directory 'mmpose/.mim/tools'\n", - " warning: no files found matching '*.py' under directory 'mmpose/.mim/demo'\n", - " adding license file 'LICENSE'\n", - " writing manifest file '/tmp/pip-pip-egg-info-tatkegdw/mmpose.egg-info/SOURCES.txt'\n", - " Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n", - "Requirement already satisfied: chumpy in /usr/local/lib/python3.9/dist-packages (from mmpose==1.0.0) (0.70)\n", - "Requirement already satisfied: json_tricks in /usr/local/lib/python3.9/dist-packages (from mmpose==1.0.0) (3.16.1)\n", - "Requirement already satisfied: matplotlib in /usr/local/lib/python3.9/dist-packages (from mmpose==1.0.0) (3.7.1)\n", - "Requirement already satisfied: munkres in /usr/local/lib/python3.9/dist-packages (from mmpose==1.0.0) (1.1.4)\n", - "Requirement already satisfied: numpy in /usr/local/lib/python3.9/dist-packages (from mmpose==1.0.0) (1.22.4)\n", - "Requirement already satisfied: opencv-python in /usr/local/lib/python3.9/dist-packages (from mmpose==1.0.0) (4.7.0.72)\n", - "Requirement already satisfied: pillow in /usr/local/lib/python3.9/dist-packages (from mmpose==1.0.0) (8.4.0)\n", - "Requirement already satisfied: scipy in /usr/local/lib/python3.9/dist-packages (from mmpose==1.0.0) (1.10.1)\n", - "Requirement already satisfied: torchvision in /usr/local/lib/python3.9/dist-packages (from mmpose==1.0.0) (0.15.1+cu118)\n", - "Requirement already satisfied: xtcocotools>=1.12 in /usr/local/lib/python3.9/dist-packages (from mmpose==1.0.0) (1.13)\n", - "Requirement already satisfied: cython>=0.27.3 in /usr/local/lib/python3.9/dist-packages (from xtcocotools>=1.12->mmpose==1.0.0) (0.29.34)\n", - "Requirement already satisfied: setuptools>=18.0 in /usr/local/lib/python3.9/dist-packages (from xtcocotools>=1.12->mmpose==1.0.0) (67.6.1)\n", - "Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmpose==1.0.0) (1.0.7)\n", - "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmpose==1.0.0) (0.11.0)\n", - "Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmpose==1.0.0) (2.8.2)\n", - "Requirement already satisfied: importlib-resources>=3.2.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmpose==1.0.0) (5.12.0)\n", - "Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmpose==1.0.0) (23.0)\n", - "Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmpose==1.0.0) (4.39.3)\n", - "Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmpose==1.0.0) (1.4.4)\n", - "Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmpose==1.0.0) (3.0.9)\n", - "Requirement already satisfied: six>=1.11.0 in /usr/local/lib/python3.9/dist-packages (from chumpy->mmpose==1.0.0) (1.16.0)\n", - "Requirement already satisfied: requests in /usr/local/lib/python3.9/dist-packages (from torchvision->mmpose==1.0.0) (2.27.1)\n", - "Requirement already satisfied: torch==2.0.0 in /usr/local/lib/python3.9/dist-packages (from torchvision->mmpose==1.0.0) (2.0.0+cu118)\n", - "Requirement already satisfied: filelock in /usr/local/lib/python3.9/dist-packages (from torch==2.0.0->torchvision->mmpose==1.0.0) (3.11.0)\n", - "Requirement already satisfied: jinja2 in /usr/local/lib/python3.9/dist-packages (from torch==2.0.0->torchvision->mmpose==1.0.0) (3.1.2)\n", - "Requirement already satisfied: networkx in /usr/local/lib/python3.9/dist-packages (from torch==2.0.0->torchvision->mmpose==1.0.0) (3.1)\n", - "Requirement already satisfied: typing-extensions in /usr/local/lib/python3.9/dist-packages (from torch==2.0.0->torchvision->mmpose==1.0.0) (4.5.0)\n", - "Requirement already satisfied: triton==2.0.0 in /usr/local/lib/python3.9/dist-packages (from torch==2.0.0->torchvision->mmpose==1.0.0) (2.0.0)\n", - "Requirement already satisfied: sympy in /usr/local/lib/python3.9/dist-packages (from torch==2.0.0->torchvision->mmpose==1.0.0) (1.11.1)\n", - "Requirement already satisfied: cmake in /usr/local/lib/python3.9/dist-packages (from triton==2.0.0->torch==2.0.0->torchvision->mmpose==1.0.0) (3.25.2)\n", - "Requirement already satisfied: lit in /usr/local/lib/python3.9/dist-packages (from triton==2.0.0->torch==2.0.0->torchvision->mmpose==1.0.0) (16.0.1)\n", - "Requirement already satisfied: zipp>=3.1.0 in /usr/local/lib/python3.9/dist-packages (from importlib-resources>=3.2.0->matplotlib->mmpose==1.0.0) (3.15.0)\n", - "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.9/dist-packages (from requests->torchvision->mmpose==1.0.0) (2022.12.7)\n", - "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.9/dist-packages (from requests->torchvision->mmpose==1.0.0) (3.4)\n", - "Requirement already satisfied: urllib3<1.27,>=1.21.1 in /usr/local/lib/python3.9/dist-packages (from requests->torchvision->mmpose==1.0.0) (1.26.15)\n", - "Requirement already satisfied: charset-normalizer~=2.0.0 in /usr/local/lib/python3.9/dist-packages (from requests->torchvision->mmpose==1.0.0) (2.0.12)\n", - "Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.9/dist-packages (from jinja2->torch==2.0.0->torchvision->mmpose==1.0.0) (2.1.2)\n", - "Requirement already satisfied: mpmath>=0.19 in /usr/local/lib/python3.9/dist-packages (from sympy->torch==2.0.0->torchvision->mmpose==1.0.0) (1.3.0)\n", - "Installing collected packages: mmpose\n", - " Running setup.py develop for mmpose\n", - " Running command python setup.py develop\n", - " running develop\n", - " /usr/local/lib/python3.9/dist-packages/setuptools/command/easy_install.py:144: EasyInstallDeprecationWarning: easy_install command is deprecated. Use build and pip and other standards-based tools.\n", - " warnings.warn(\n", - " /usr/local/lib/python3.9/dist-packages/setuptools/command/install.py:34: SetuptoolsDeprecationWarning: setup.py install is deprecated. Use build and pip and other standards-based tools.\n", - " warnings.warn(\n", - " running egg_info\n", - " creating mmpose.egg-info\n", - " writing mmpose.egg-info/PKG-INFO\n", - " writing dependency_links to mmpose.egg-info/dependency_links.txt\n", - " writing requirements to mmpose.egg-info/requires.txt\n", - " writing top-level names to mmpose.egg-info/top_level.txt\n", - " writing manifest file 'mmpose.egg-info/SOURCES.txt'\n", - " reading manifest file 'mmpose.egg-info/SOURCES.txt'\n", - " reading manifest template 'MANIFEST.in'\n", - " adding license file 'LICENSE'\n", - " writing manifest file 'mmpose.egg-info/SOURCES.txt'\n", - " running build_ext\n", - " Creating /usr/local/lib/python3.9/dist-packages/mmpose.egg-link (link to .)\n", - " Adding mmpose 1.0.0 to easy-install.pth file\n", - "\n", - " Installed /content/mmpose\n", - "Successfully installed mmpose-1.0.0\n" - ] - } - ], - "source": [ - "!git clone https://github.com/open-mmlab/mmpose.git\n", - "# The master branch is version 1.x \n", - "%cd mmpose\n", - "%pip install -r requirements.txt\n", - "%pip install -v -e .\n", - "# \"-v\" means verbose, or more output\n", - "# \"-e\" means installing a project in editable mode,\n", - "# thus any local modifications made to the code will take effect without reinstallation." - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "Miy2zVRcw6kL", - "outputId": "1cbae5a0-249a-4cb2-980a-7db592c759da" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "torch version: 2.0.0+cu118 True\n", - "torchvision version: 0.15.1+cu118\n", - "mmpose version: 1.0.0\n", - "cuda version: 11.8\n", - "compiler information: GCC 9.3\n" - ] - } - ], - "source": [ - "# Check Pytorch installation\n", - "import torch, torchvision\n", - "\n", - "print('torch version:', torch.__version__, torch.cuda.is_available())\n", - "print('torchvision version:', torchvision.__version__)\n", - "\n", - "# Check MMPose installation\n", - "import mmpose\n", - "\n", - "print('mmpose version:', mmpose.__version__)\n", - "\n", - "# Check mmcv installation\n", - "from mmcv.ops import get_compiling_cuda_version, get_compiler_version\n", - "\n", - "print('cuda version:', get_compiling_cuda_version())\n", - "print('compiler information:', get_compiler_version())" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": { - "id": "r2bf94XpyFnk" - }, - "source": [ - "## Inference with an MMPose model\n", - "\n", - "MMPose provides high-level APIs for model inference and training." - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "JjTt4LZAx_lK", - "outputId": "485b62c4-226b-45fb-a864-99c2a029353c" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Loads checkpoint by http backend from path: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Downloading: \"https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth\" to /root/.cache/torch/hub/checkpoints/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Loads checkpoint by http backend from path: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_coco_256x192-c78dce93_20200708.pth\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Downloading: \"https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_coco_256x192-c78dce93_20200708.pth\" to /root/.cache/torch/hub/checkpoints/hrnet_w32_coco_256x192-c78dce93_20200708.pth\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "04/13 16:14:37 - mmengine - WARNING - `Visualizer` backend is not initialized because save_dir is None.\n" - ] - } - ], - "source": [ - "import mmcv\n", - "from mmcv import imread\n", - "import mmengine\n", - "from mmengine.registry import init_default_scope\n", - "import numpy as np\n", - "\n", - "from mmpose.apis import inference_topdown\n", - "from mmpose.apis import init_model as init_pose_estimator\n", - "from mmpose.evaluation.functional import nms\n", - "from mmpose.registry import VISUALIZERS\n", - "from mmpose.structures import merge_data_samples\n", - "\n", - "try:\n", - " from mmdet.apis import inference_detector, init_detector\n", - " has_mmdet = True\n", - "except (ImportError, ModuleNotFoundError):\n", - " has_mmdet = False\n", - "\n", - "local_runtime = False\n", - "\n", - "try:\n", - " from google.colab.patches import cv2_imshow # for image visualization in colab\n", - "except:\n", - " local_runtime = True\n", - "\n", - "img = 'tests/data/coco/000000197388.jpg'\n", - "pose_config = 'configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py'\n", - "pose_checkpoint = 'https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_coco_256x192-c78dce93_20200708.pth'\n", - "det_config = 'demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py'\n", - "det_checkpoint = 'https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth'\n", - "\n", - "device = 'cuda:0'\n", - "cfg_options = dict(model=dict(test_cfg=dict(output_heatmaps=True)))\n", - "\n", - "\n", - "# build detector\n", - "detector = init_detector(\n", - " det_config,\n", - " det_checkpoint,\n", - " device=device\n", - ")\n", - "\n", - "\n", - "# build pose estimator\n", - "pose_estimator = init_pose_estimator(\n", - " pose_config,\n", - " pose_checkpoint,\n", - " device=device,\n", - " cfg_options=cfg_options\n", - ")\n", - "\n", - "# init visualizer\n", - "pose_estimator.cfg.visualizer.radius = 3\n", - "pose_estimator.cfg.visualizer.line_width = 1\n", - "visualizer = VISUALIZERS.build(pose_estimator.cfg.visualizer)\n", - "# the dataset_meta is loaded from the checkpoint and\n", - "# then pass to the model in init_pose_estimator\n", - "visualizer.set_dataset_meta(pose_estimator.dataset_meta)" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": { - "id": "tsSM0NRPEG1Z" - }, - "outputs": [], - "source": [ - "\n", - "def visualize_img(img_path, detector, pose_estimator, visualizer,\n", - " show_interval, out_file):\n", - " \"\"\"Visualize predicted keypoints (and heatmaps) of one image.\"\"\"\n", - "\n", - " # predict bbox\n", - " scope = detector.cfg.get('default_scope', 'mmdet')\n", - " if scope is not None:\n", - " init_default_scope(scope)\n", - " detect_result = inference_detector(detector, img_path)\n", - " pred_instance = detect_result.pred_instances.cpu().numpy()\n", - " bboxes = np.concatenate(\n", - " (pred_instance.bboxes, pred_instance.scores[:, None]), axis=1)\n", - " bboxes = bboxes[np.logical_and(pred_instance.labels == 0,\n", - " pred_instance.scores > 0.3)]\n", - " bboxes = bboxes[nms(bboxes, 0.3)][:, :4]\n", - "\n", - " # predict keypoints\n", - " pose_results = inference_topdown(pose_estimator, img_path, bboxes)\n", - " data_samples = merge_data_samples(pose_results)\n", - "\n", - " # show the results\n", - " img = mmcv.imread(img_path, channel_order='rgb')\n", - "\n", - " visualizer.add_datasample(\n", - " 'result',\n", - " img,\n", - " data_sample=data_samples,\n", - " draw_gt=False,\n", - " draw_heatmap=True,\n", - " draw_bbox=True,\n", - " show=False,\n", - " wait_time=show_interval,\n", - " out_file=out_file,\n", - " kpt_thr=0.3)" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "ogj5h9x-HiMA", - "outputId": "71452169-c16a-4a61-b558-f7518fcefaa0" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "04/13 16:15:22 - mmengine - WARNING - The current default scope \"mmpose\" is not \"mmdet\", `init_default_scope` will force set the currentdefault scope to \"mmdet\".\n", - "04/13 16:15:29 - mmengine - WARNING - The current default scope \"mmdet\" is not \"mmpose\", `init_default_scope` will force set the currentdefault scope to \"mmpose\".\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/usr/local/lib/python3.9/dist-packages/mmengine/visualization/visualizer.py:664: UserWarning: Warning: The circle is out of bounds, the drawn circle may not be in the image\n", - " warnings.warn(\n", - "/usr/local/lib/python3.9/dist-packages/mmengine/visualization/visualizer.py:741: UserWarning: Warning: The bbox is out of bounds, the drawn bbox may not be in the image\n", - " warnings.warn(\n", - "/usr/local/lib/python3.9/dist-packages/mmengine/visualization/visualizer.py:812: UserWarning: Warning: The polygon is out of bounds, the drawn polygon may not be in the image\n", - " warnings.warn(\n" - ] - } - ], - "source": [ - "visualize_img(\n", - " img,\n", - " detector,\n", - " pose_estimator,\n", - " visualizer,\n", - " show_interval=0,\n", - " out_file=None)\n", - "\n", - "vis_result = visualizer.get_image()" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 801 - }, - "id": "CEYxupWT3aJY", - "outputId": "05acd979-25b1-4b18-8738-d6b9edc6bfe1" - }, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAoAAAAMQCAIAAAA4vkODAAEAAElEQVR4nGT9TcitW7ctBrXW+3ie+b5r7/2dv3s9Cf6gci8oJqIBTSCgFgLWBAuCBAtBJDGKBUGwYkGwFhAJRANqzUKCJBVBJQoWggkxSAzGGDT+BGIM995zzve313rnfMborVno43nXPnGx+djs713znfOZY/Sf1ltrnf+D/+k/TRw2hYgEacoACAFAEOh/AwBQiZREIzOBKElUDHKWiSUAkZkjULZRRxzXdZEkKQlAcGSEbAC2l/fLS+j/QjLh/nnbBds8gnPOzByPsdZaS8zIzLxmZhbcL26CBiBj9N8FAkAYQEjiMEn8//2ZS0EmaDsikABQVUccdhEAWaDs/jHismkEmWRCZVQYzzDsZESEiaqyfeQYVSJsgkRGf9IwSPp+FP3GGAZAR/9f/bn6/5L0NA9mWpdLRJYJVXKYERFkZl7XM48hKTMDadtARAAwimQEMo8jR2YOYozxeDtHHg5+fukAAkk6QQCfX9M+CPczvL8vijDhfiiG+pUM3n9hWZIMSULJuv9u/4kA6YT7x4EEA7FgAP29fP5e2+J+dH1c+z9+vr1l9fc4GABUy2C/pbIA0EDJ0qRdotxHsaokVZXNqmmbGbarCkAyzFhrLYkZJCFTRB+REkmGcb8lynGMpYoIRFTV4LD0/v5e16tgwcwMoKrgEBFQZpJ8PackoyLCNjNd9ThOSWTKhj0YLyyS0PrFV5NwAIgIUSHaxURE2IyIzCRNh+05Z0Q8Ho/ruj4+PoAwolARkXRmRoTWjIgVIaEfBckxoqqqisYYo7+XIw5JY4w159sxrut6rRdzxHgzUFXl9cZ9IDsmOCiUiVFjzlcOAlprRYyRp01jRkSHgjyO8zwhvT6eC6QcEV/e3799+wZgrpWZkSDZT6CvDFwkVwByIscYQUrqm6UOVAAcn3EhIqTVTz5GSiKyqqLjY/+5j2Lsy8IFH3EEOS3bA76gY7kYDiYIkbLTRWRmHzAANs/zzWuSJKBCRETEaz5zMDggTqzMJHOt/V0nrKoIyCSgESmICAMZYFrriFSH6AisSvCiOxJGjIiwSxIggZlZdZ830qXzPK/1conM4zgEV1UEXOqzFIGIfRKAGIzV8bcwMvfrrCKZSUsdKCYNMkjXsk0mEBmHjXLZFSOllWBatlfARDrDKNTb8fanf/qn/8a/8W+MMRwkWdaPb4/n85mZ11JmQisi+liutfrYV5VWRQySJTloCgg6woDLLvAgLangPnhLlcmhALk+Dy40Aq5VDP4iNNVyB7rM/PxfAI/HY72ux3m+6spMTXVsH1VlIfMolYExYlmZGQwA5f5liJ2FWcsMkKwqwyJASDoMAQEQDINGAAD7O/iMmxFhVJXvfIDBILnWkvo/igIiCAUNIjkKLimOwcCc01VH5sijqmbEtHdU7axAReRwSMo7tQGwC6BgAiQtguL3oN1xX5kha63VuUHV554FA+b9WkrKtEGAFJIwBR2ijSRdIpnMVQsuRZDsJ0b3TaeDAZMEO6LKtnWnlv6uIzpgdcggMiiUVOvxwxeioCVi8ChNR7xer8xYa53naXtxgiDSYSBQgfsGiiLtSAAwRcCOJEEEgWBXYtgZvB/lfu++c2EiQRn9HffPkSi6n23n/k5MZdUqr+pT11/ZjmbHQUCfLwuIgOwAvZPrL+umBGF85lyS7sRsR0Qw+q8oHCIZABIo+LPiIbnfNO+DA+6Ya6wdBdSpt49rCUiaHOAQSRZQCURw2bkLD8G2AxT8Wj7GGJGU0xpnCpi4GCSJrrr8/bmutdZamdm/dNUieRzHkvrmIwhakquLHdi/eBD9Xf6lQ61MgrYdkXCogGQSJZkQ/JrXqmX0Q6skR2RVLa+OsZmJ8loLiEAEiCWXAmRGRvR7lrSsDBbxWpMZ53hfwn6GZMaYFggaSRGkMAzbgoMMEBGZKbBguc6RVQU5Mqrq559/Zh+KvofwnC9pkTzO7GQAwNIuZ0mAEUF3ZEBX0Z3Rw9DdX+Qdoz7jVT9KSXC4K0My45AkyxB3EeFdPfPO+vi84UBGEEXKiDACkh2uQkQcxwFpTQ2iIkBB7LjP8EhG1+PB4QiwvDuTiFgqEzSDLCKWVvJAKIBVyB2rGIZBW7aiGyF39P9lK9IhHehYFWk+3t6fz+c4h2CUvCy4T+yOs7bE8pJkIWEkhrCqRgTp13xFBDM6vhCIiMgol2yhDB8RMG2s6xpjZIAMQUGyDHb0sCxJwZGUUf/6/+P/9sMPP9g2HJFVijG6xg0TqyKSqzJzWUfujFtVOdKMudYR6SAibUP3TQTlIkMMuPrJBEhHRJQUkZ83TdIxxnUVum6NPm9Ik5mJHV8+Q1zBC6attYjo2D4yjyXleWjOCIwxul5YNTtukuzskAyAJiNjt7MiCUslhYQcgBkmtMN37JJqJ+xuLh2lAgoA7K5HAISVoGnv43v3TuHso2OtVQE8Hg9J1XVNpCQKRgAMmhwZeSRr7WsmdW3Qp0skibT1ywRMRrg7F4EeOfpOFgvIjpKAOjvB7Kwlgx1HyM7FyxoMkFXFked5dDiAQ/33dzsOkoiwrojoDLHW+mzjOr7EXW1//vczmMIzsYw/RC6sZ5I20ufxUNVxHNf1PMeRmWOMr9dXmODGBhwkTEPCrosBMm2utRhgDHLnvP6VC+oE3FnC+ksRCuxEaGrfTO5wFl2ZATAMu7RUC6WqWnO3Hfcb4ISCcXgnRBEG12CUgV3X+xc5mJ2UP2Nf/y1bhO5mPSIIOqANK6BvGD7vEBlBdQfxiwTMzPrM+vf9AYKE5koyxvAul5wRGXmFbOMOarYd9B3ByxhEMuZ8VdjC6YMR/WFhB4hw2Hkc6G8ey3ZwWLxeK89khGAASxKMsCwqGdY+3gBQdt5VHfYtDdgwmLRRVYmcLNuZ2X1wVQEUMcDPaq8zAcA5MS3tTiHKhmAG6ccYx3FUlcVIwlhrmbgsGCjbDEf2/XIpAkBAvmESA7SDGuOYml7OcfSROh5nWOw0SUYX0WRw2AtkZn68nhEB8jzP7s7JbgMbSYI3OnJjJxJUnZxEZKRtIuMGnEqiNDLKKhNwQqrqX9evHxEds3crTErsQ+m7orMAxIISHNIqrxERMDxAR9jlqsx8PI6IACVRdiaWis6+HV13HyoDqkIOysGGEx1Iygo+8ryGMGtZRz9fqO4bsy+v7wC480J1IiVDwMhRVQxGRM31er3qmuN4B+rzvnc7ZldZwR1gbUREgxwimDGr/Frvb+daUikiFm3giXXqGApokbEsDpcuIBgeR9hVsGv1SexrF6AVAEQNDhV+/PHH67qO4xikgWT89re/zczBgYSlM4cakGNGkBkuZBwCug5YdNBhpojlBvAq6VJH8jAo3+coZBf8i2ILADIPaUYEMmD00wAwGCGf45DkSNt1zQTrmvtNBRIsYthGRtUEZHutC4BXBZXMajjRlsAQICBdElHuRJ/koOt6vgjrsxchboRVuOHBz/+1PUsjQtb+ABTDDMOUJDrIqi5AILk0397e3s7zui7INBDdic8EOoEFYaOrshk3KL1jk5AkGLUfHsmNKWqXJyQtVYkZx3FI0hT3tYRBMKAiTBNiGMMgaBq2YAEx0iQYMgEva0G2BiFANskw4zNVObxhoxvbcARjWf3kP59bZ5S1rnXke4xT+u36ds3rPcbI7OQ9xjCQ47zW6y1i1kUnACKgfWhImkDnS7lQkuacDo8jJYEcDPh+d7AhrrIt3rewH/6dKhtq3u/zF0WGmdKSBNqrNBfkWpoqbQh6PXwE4UlkOkcECISpPU244fedNh13+uxaDfunULAJuKM0BiMZDpaFoHT34584dkDy92BqD6HoMCox1JeC0fenykREpKDucYMAs1HopT03ANnnJBrG1OMMyLYcZES9auQhwtqNb1VtCMswMKsiQqpuascY3VbiqojolrQskmMM2576ZX0GAEwRCcBBywzJgAPUKgQZkJcciejOGA7Y/WwZDvb1iv7tQVdVQCNGRNYyjH4slF9zfv34eDvPt8fbx8fHGAz69XoNDgC6s5qgDIw9vukjScV9FIN5N6SfgZ5UhDT3odOqGPnl7U2FNWdwdLVRtu2wr9eyFGOnlx4EeMcbExF5tyMw6J4zyR00DTbi09gEATIzTFVJNcYwJa/Sfqfgbiv7YLjcFUIZjAgxjEDU3VTbC7IZFBnRjehaS6sakXo8jscxPtbLJBKRoC2hutkCqqYYw4oRtpMuks5yUdThAJcEFMewTKIR487i7CoBAZj0rlplI8hIrjPHt7W6uRJDS48vP3x7vbpMzLv/ikwBWDuYASZ3nFzWdJ6PYQOux+M9cX18+4bwI2MaVWslGGnYRAVSUaqkxxgRsdZyFchRLlhEqKOlOkdKqKrMJJJI0pZGxPH+3ul6MNbS19eTZA4WvWoPqmK3/zoy+8ZGDynU0UNLlbFvHgCjusOGY7Ef4AIQ7g/rF69xHq7GyditZsNma2CM0EIfpVXKIyQdzgjanhaAsYTzPK7rymR//sfj3ctE0aLCHU5d1aiFQdoZiGS3yHKpGAMRpJhhWUBZ4Q1PfbYpVTWro2Cp58xVn31qp+uybQo3iEnYfjvPdV3Mkdy4/MhzWeORFr3WWuqwC6AISPvghyWVTdF2uBGPuwy8y8PI7AY4YtiYswBkBNCjLyBAd+onEdMFOMlIkyx7wWWfFVZxjGHC4DQvP/JQlhtptsDo1AHFOc6qqnXf/Ybw3Zg07lhwd2MRGfHUeobjzDHLY2AMzXq8vVXVx8fHBlFzT9zTA9jNCkkEBJMKZl9L2423I5EjJCXD4UCYHSe5bKg+A71ig2AkExnoG7KzIGCSUd2pQAiguu8MeC2pai0tle2AE8xgCjjhyOI+VNlzY2485HPArB6zYp+oDvHYPR4MZOQe/Uolm3CQ96yad0/Q0+XoarcHvSSquodNcIyj75IsjmFTwgwcmQejh8Tdc1d6zABUTQoIQtizg/nKzAXPpTEGgo/Ho+aaAPbouRpTJQ0pj2NdJentOCVVWXKOI1QjsysGqOQoQ1I2APaXEjDINNmQmGOUZuPc0hwcMdIWCobrqu4agw1vJmrGebiHFNJa63GeY6Qwq5aWg2PHbZsZe5zMXGtJOjDWNd+Pc9S41gsWxsEzZU+9Lpd4hJFwx+2uPBI0sVTMSLAs2KJez+fgaDymx6JEwktSxiF4zpXHyeZSyGOMpdmA82eX109FvsOVPkkn7pti0XB9omUZEYw4Vq1SHceBFXapihml2kCPwz0/7oq2rwYQUCBJWiBxGAtQkuM4EJi6VJlpK6KLM0fs3vr5fJIgnZnHkT3ZT1ByjlQRQDJH5no9DZDSXE4O4Vkzy0oPx9QMnUiUVw+EBUSDdd+HSiC7IcQmBFzPMcasFV2Bk9+uuTt+Ut3fkkSWNnYFbG5B1QSQmaHhlxop/fWvf52ZyFh20CMilGYUMeW0wzGLY3yxVMZ6VcQhZDCIJ0ABssaGqFBe6fcxMiJcWlfl6CCF67oA2LwQY4w4Hzl4XZfoJKWifOY4xtgkp1UiFiGAQYDBOImFItDR0jKBCFooaCQhjEBmSILTQqPlmX3UioxIMGLBLo2glgaDwTCRQ8uROeesLvozU3MdkZZhjAitKxlCSDiDy1paPToccfiTdLNkT2cygpbNel2PtwO1MUr3AOlCxlh0ScFh7VEclERqzjGGvGj88R/+0a9//WvH23GMta4jshw2B6mh8AHLnZISYRIY5YhRRFkI01hG/5aR3TIljSjTMNMEowQLThquYIzz/eNVmDOPyDz7awaJ4Kx6c5gWa5e9wGIV64sPgaJQyIwBYFlT16hjDNmEUGIgTyzPUKYjeJAu1DiGqoiUVKFADeeVhIK1VqzqQ0EkczBUs6ooWevBUU+U7YwMSgLxWhWBH/7gfV71ek3ymLa0RmgXStrjZyNczIPXmog4k2tdxxgHD0whRiVWOLyGiYIo2IsLQq3v6G5kjkweYWB2sseNwu2egwQHXNIqzTmrtKBLVbpsOBjHMSVcM3/IcmHWgYiM7kgiWatQ/UUSYJ/aCqaycIePAAkJlg8ShiwCQfYwuGb1VFXdiXaPrg0GmFFAlUbEDcy54dvxGPP1dESQWs5Era5jOmP1Ve2ZSXM64rKTIMcSzOMNOaWyMx8WAvl6TdtwKSMALoPQ6CrHVA4oyNH0IU+AaS4CVV1OZhyQvCplHY9rzhz5mX+j6Q5lZbosPCMG3WyMsL2uslkBUKDGmZn89vNHZkrl0RwcgcsZj8chLa9akTjPsWRbVQdi9iSDfBxv15qzLhKCx/GAXWM2giMUVg3GYLqAOAiUJs1M5jG0KhyXZnO+Xq/XMR6lCg9JlQoHnaC1qnwxR45RegI4g+zqWD0tlSIl/+FPv/r28bNKCHqHywv5KJXlZKwljDC5Lh1HSipN2yOSoFYtIIAMhKawJqBMMd5naXBSmcnJN7CMuSYiwpItjlh+PMbi0utSjlwAQ+ml15fHkXi8Ss1OzcGIQwKNvo52uYLic0GkULTO8OtK5CA5C7OUxyNUqqs4oSiOKDMRYFk913MfmHv8ikIAA6wG6EkTiqJhqhq9sSELO5siVt9sGiaDBLk0YbKphUDtTiEbRkhMwTwOA1ZWc7vWmhoeQzltofQWKSnSCquuX/3qx+fzW1VJYCkVL6ZtWEApk0zM9RhHxYtkiQvrOI61h+Jgxk08cs2roUTIZxzcvZiCLGmthYyDh2FAY0SjdBkBMfOhNQMLEVIUOExjZoOIwBKWKtwXjYk9k3B3zPYmlb5sQrWw+UOgzAiEJV2uyAPy+AR8dlW4y2mLgOVq6qoan5G16OgvIhqCEqqnX24y5JzT5OPxaPyKB1EImeDKbmLyNJuKs5sw+Kdf/epas3GVLlHnnBEDtZgnjPImJTYvNHq+SECLzRhCZAZKU3OtCxxA7BOZlCi64DfRHTObGdAjK8MjVgA9a7SxKiLGHfFvKBgBCBiNI3eHnbBrTUXmr371/vV17ZY6zjwIoFSG70Fjj2ZULEkRkMCDmYmmavkmSJNdIFdNMQE7WPA7j8s14chIi4UinKPRheu1+bqf3f3I0yg1wMhgRle7aylsVE3XHPkWOYAhuLTpBH08pamaKJa8qttg5Bjew7ao6qTVXSlw96bRHZ1AC96vs+a1pkv9f5d0XVcYyOGPV+TQI4BsdKJuxsfnEf3sZrj5VXfO8fef+cxDe/LXtUDsvkdEswvvDqBxX5BMbhKs2NV9M6KRDKNBVGdmOvfci+yQ1L+hoqfhCa9+MActsFn0VQtQxEg6GQXpcSaDxvJyFafTEHPVHCTBpVruuAJJQbIvXoTJst08OdcxNvPo7oAdmUxGYFJ9xXjz7dfa3C3W5qaSJDMfp9loYglOsJkBFvfnlhruL4q5AQlZMUHwQCCjrGkFV2REj21zNHdgubCUme6KUBTwcmWBwEi7uCRrNmqVpMoRuAJnjlo+SFU1HaxKzgRVDu9pfVGOwKngGL/7+ruyImIALJ2g8v16LQBHjC9fvnz9+rVKJjTwURMUk+f5mHPCFn0M+Fq8VIk8YjDL4LJyx1NJAVyuYBJMRlWhySEZr2sZ5RHkUuCToEMeNIO+6uUgM12NiqUpq0OMbYDOzMGwK6BUQIKj56xwtNggRhIJRIfq72f7cxYt32UxCFQQnzTQ7vB+IcH4/Jf7KoWpLmSbR2LA9f3HWm6AMIOwy06GqrwW7wE2jQBVL6R7Pol7CGghB9e1ro9nzeLIiGFXyUbdgXS/Tsf/4zwy8/m8MrPpSv1SsT5pdxlhERWNV0/e9N6eVeYgImstByMgrTnnGKfANa/Tw6VjJMfxtS6ACs5V2eX6L5l6EWHMnlRb3MMsNCd8IVMIUnYRMhEYJQcOxMWY1jtzfMdh0VPCsEsd6+yCJSx/j8mRGCNIavkT77Y9OHxLFIKcc35CQFJJ+ys1EMZgODDnOh/ndV1wPD+uqspx7kqhT2DnPYrfE7BQHfeozVcUI0fEKq+ppN/OwPvb+iiwuxy4R0tyEAXSYUtyIgS43PB9H3ztt/0dU6pPxAYENECSVVNoKULkJn0047FImtZdzPRLRsJqHIhjjBxcS5ZLffdgAZbpIJMDQLDDFBpiVZM+2Z1hBJ2OI6jIS05Dhqt2QthslU48NCQCMCW79swgEGAuHnCGXlyvjC9FBxOJO7VIgr3W8uxRR1hTmcAi4IqIuEmiG9NusmXnhOYvLtXSpj7PWraX9okYEQ5SRqJ/EWFmZIb4HdzeabJLwx7D91X+JNi7aTrfc/b9IExspty/g7QVEajawHVm64hkxUhWJwKAWtB5nqwFYEQu1SeVw3aSBmZYAonzPANwrRIgLQpgbsnGzV8zairRUFcUnfICJhw1HVmAvFHT/iytGGnyqDbmHEemvAKI/P6UINCIjKVyBrRsEdk/UoabPtMKQwvX2u/qRl2ahEESzJ6/ZByB1+a6E43rNydWSWjZPjKTQTvKWbqIJsDMWbaacQOw2TctINslKaP20MWSMtMuMsGK4CkkrDUX6UABSiPiTbY2Wa8jwSAy+PRMoCkwVWWrYVUILbypeX379q1qMuM8Dq1VcMaIiDMPvyZpIzzLQWWewUeEzWdVAZO2PZiWkSFpjBEYLAhIRo+sZzeITVpnf9jR0zPs0RruHLZGjJ2WwmVmh4vPuQ+qgFcojOgRWkdFIcEK2eqLBrjPpITsuObN/M/PyQuLzVzpedxWqzSyFZ8p/H4dYWsl9p35JPLUJmpaTS/4rGjV9AgBMWJXqyTtymTzFNdakzwiAZSqW2SIqNYGNj+mof7M7C7R53HYXrU+i4zPO263GqwnjB2YGUg4lxegDAJcuutU10vXGcceXI4zItZSHOcL4hgvmNdq8B+qB1Ox73vz2yzYtcCSPhme++mJQVbCSwfgTsBwICUv4BEcY6y5KI0eQ+Hm2kWETIodqgcH2RetZSyEfLTGQ777BsnOW7NxnieA67pa4OmS4BUOZBpDELDCLcySVGZmfFyvPo5jB0ocmVWLwapdcX1GGNvYVAlGjB7eEGAC0lpimUzfIloXOrIYTUbZCpuO+3Yx8nB01UmCEYpoisRgBOAtj9kALJtnEVySJMSw+PKcP9eBMc5DxFrLGNgoCsHrTsbOQbu7eR096vsUs0oFIbr9XeEgQ03BtoN8qo7IYdngoDNcHCLCmwKA1efaGUBUFTIiRsuqYQdA8hAQWZSIIKKKE+GcIw7FPeDpm25LteSqKhdWt+kiJLmKsu4s2IPD3RPfiO6s1fpaAGupqk8OWjbQg8aIcXzS0FqK3JoifAdXQXO3GLvBZQBsOlY/wGjSHzeTG1rWXZx+Pzy4SW9d38V+t94KeI4xhG62ZJtVMcZWpEQN0CM2iW2u7qPfz8fHa+6UL2nJBDLXupgxInkXNIZpPGJ0hRcgxxCqW963QFnRVRajtMLIgLQGgxEiijsH65PsVt9V2u/nYwuaAcGDuXnoCDRZhjRZ1hiBLfnDiOyXqPsrUCuc9sRwAx6bQx++3zxyT4vNgR6lO1xEgqEBVVhkitjEvVLXq5HJ/EQjmuDHjIRRILV/4wKXVo5M8O39/Pj46P5z2ebGPxEk0ThrF2jneQ7GMl5rxkiraETAbo6bfnh7/7heuluo5iFW1VrrOI5IvKpOHiFD+jhUksQD42rNg8wmGVhrrWw9X8umVb61TFJTG4qIgmBXzYJFkwddTZHoSGgULMTRDbO0bBnugJ7oiqyrQohRhDwCO7AAaPCzIb1Pofy+mNFcnt1KEk26v98nszP0hum0Extu5OlOorZNMDmaD0iqqZB9ICJCrIwU7Jvh2AF8jFFV17X6fUagUNd1jTHOcVjKDN1QE29s6TPF3seP8znX0hG51hqt1yIFm0lSukmaIqWQ80iATaqdc/YnCmCMMcaoOSGP5K6XxSAHh2quWhzZStYYQ9fsYIjsLImCpWo8gVsMQskRZMRBFCSzG4CM+DwbZSU5CKfHJyXBNj7DqPtzRDNLt6QpSNKqOWcn4J6rdUunlpbfPOwWBqy10mjorGl3EQQh+5qz1QvHcbSIcH/lFBzBQeQnuY7A2IoIkFy1eXeSZkU1prnJftWs9wo0+RMAPyVamyvRUAoAuoMvzbJjd722m6FdMrKVvg2dRKtY7KUKjtRGQzHyYJMh59OyYfYHpYmyjK3shI0mPbHVYp9HHDdqC1BeaDhaJNOoUtibIs1ddmWenb1GPEB1/xccGKuzu81lhz4pxAg4MiN4dRlRIjkTBZyFR44KDO56q3X3DShV/5s2yGwihFB4zsZRPtvlX17X/o76VVzdC7emc9huAhWMDC7VVB0SpQw0fY83D5y/KL/2cQ3aELS/ndigTd43tsWy8z6QfdK6qQ0GY4M3mUm7uL9iNtoWjDZRKRzHEREdzjIzhUWCLG7F9takytnUpMaEYGzWRXJDP/1PIBgIDZTt0gAprMBhHCbbLmBPs9qIZjcvcUvJewa972AcETF1fT6Z83xDrbqu83F+e34wyVZNe/Pj+tVO4IyxtJixpCI8oqoCsVF5lADS0VP1rmhvsjlkGL4JH31/N++Pg2GUQK9SBkhpKQhQREbkYIgyoHtg0tVt5rHWClPyGNnz2m47+mu2yJY/MlvKSBKgm5C8POAxhshvWnEEOdZaBwMHZpXt8zzaR8i2Zdxy3vM8Y0dJrbUephMXbeuhQeFlvULRNdOnSvvuGgVHtMASXjUyLTfxwjbRSOTGbDnCs0wFGtstoLO1TK+mj3Qhzui4zCNZ2sBIUxRJgHPLfAlwRPSQQoXILbyEm/m5uaJt2xF3BKxWJsY2XrpPxy+yL7Z+CXtA102ksqkXbCWLGufbuYhEqWqrHGMfj7J5nid3lqmIOB/RTamkTAYAdseykbOm+nfRcF1XRIxxrl+UTf19RWaWI9idksllLKuoYfKWSwEgkqAFlVetBqFRyuBPP/7069/+9oijSsFxHFHwTpyrIiJBwfsb9mahjtvDgaR6yAACiCWRCsIY3h5MFXlYLyiFB0JHjsL9XWagowxamLiZNXa76NywXsal6uzb7amJMbI9RXxTan2P3xYixdEUaEJByqEis5pCVRUtdpKPMdQUx8ypamZZZnbH2Q0kyFqzr8rO2AwmjNVPYYzxON8/1pPYJV4KgexjtGBgD1+14xEBvYhge0oYq1qCdyJ2f7cJt+ixM4UjPyVlG9gMdHl4rALAVlQDDEdmijPiU2L/fZw56zp5IHNZVLGxkka9Y48fP5s7QGE6WQaEo5RGkRpgNRUFjC3uFFAGyjegSJJsZxygkHClmUSUw/aBye8JzJ8X0aZR5dpTpjIIqIhidP326VSl27zsiOzZxGdi3nY5QRSqqicIhkHCJWaXqLaNJNljTKsc3Pohg/EpcOpGiD0ZuG/1rkLW1nO2mHZ/GrXMF+gavMvzu+FuBRxJZsSmvTM1duUW9nDajkFWLYtumvuWQc+5iAiO22ssilirIiMMqJoO2s42hmOtjr7RI7p2aUIYaM1FqRx3424w2MJildgQn2mgfZHOVg8DAD6u15yTEc/nM/aAan81jMYeqHJkIFhWaF/bbLMiZ8PPn0VhN5UlYcTBQbWllTkix3l5NowZau5ssYRLSr+9veW2nNI4MMZ4zYlxFPbkJxgVXVWjnXMoU46xTZTWWiOTEdf1BPD7+TOyP0OMRX/K+ZqvzwVrBYhKHuc4Xq9XRMy1jky46GJGu3d9nZPc7PcuEW03HtP6q1pO4DTFUUllEMpqNKUTK7KVJx2ZbFcBOHN08+4tGL1ad2gVeLi6OwxiJbdq5bpW5C0c8wb65SaOZNgJngWpsYwotjmUVXU6cNe7WqVoD7itgm3U674dTXxs2Ci3Ag1tEyBZvyjU9Nlz3pXpPl005LLDwX5gHYCH2seNXUnYHplocw8qxtDKtW7iSAMfNbUW8/3uAfsdiigA8h7u+jucsCy6mWiqiGhvxO7QSurpCe+f1vY2I0qrDfgcmVsXd0RfGXUV4lJfpZWSDQUR4QqYVS4dx6NrtYaX8Ik3PF8NxhqI1kTABGLOPHNtranDWFXOyMyisVrd7kFS9xwev+wzZJS6z+jaU2hLoOESAtFsFrI2iMSq8v35fUOdUgOJewhXUEAJHMf58fGRmV51HIe9fb/A5poRCKFuHU5AoKMHBCIMQxqZBm/94MguVta65vMRWSix3ym69q8NBCGRQtVyjBG4ay5bsQ/btIJUbKJtxyKhSedBelm1KjKPMWiRiEC5PPaJiQNeMwIBjoOv6ap7vvILwLa7BWVAdtmqYoTAXcBin1apHb5cq5+xrDlrjEDGUg07gtoFaw9DUnKTYFoeR1KEHNJ6W3EFV7oGB3jACVRVLNLBZPMl6kYMukext0FFwVU1jbbVrA4YXXJZADR64Lf/Y1lLtWou0VYbaYkgo9a8FADWscdCcDNLWxwVvziUsFCNu/YQ+BcntmHR6sRodcrfKOHN4Oi6rS8tMloy93n5d8F+27dFBjPWWswIIzPrmmxztWUwqiqZBTXGRGZV1WwNvIg8Rqh7jl3pZec1wSfjaACGLIumoCvEYmyUzGBj8dF300QyMmliWZrV1bGk4zE+n8Pz44rMMcb89u3Lly+v64N3GqZxU83wUTVm1znx2L46Hubr+7MOEBaIhJkEiTNGBwdRMY6RZ1ydvQh2/kiRReBaEatT+2enorV43lgXRDZ3x4CTDUPr0wWs08NzC15xjqHC6HRX9UIAupXiu2dHUNK8Ks8cOZ5zeSTJheaRD5UEdfxHcM6pEQwyzsbQ1ny211ciZVRQXnhVkhkR5zhWVcSiy4XbGm9JZxy/dICRVDCDoRYBQQ3PEHAIPjPBfXJqLYw0MwJNJUNkcxex4y+fmO1+0GkuIoCCl/vs61YUdas3vg9HY/cYXcgJiHKPfGmGXexpFVheRkr6bJDtLh3MzQPkjaV1t27DufHOZuhsJk0/LUBqpEYCHIGmuc25BuHkcTw2zXoMAEvNvNyuZC1wAppivOdTcwq01joi2Qk+oyt703KHd9rOHjzRcexJB2HeFU67pzW1KDNlf3s+j+PwvA4PlYvLCTWUNUKrmFFoBUTSOCJH5EuK6CEIJUtKMIIxkmOs6wLCoDOq1AoFJhFuU9sNHm6fXoOBPlI33puNn0lym9HMaoIul9u1LkHNWeP8vDb+Bc9lAGU9oYEIIGs5OIPr+eyb1p3iGCO2AycAqLYmFUmpLevMYIHWHvbazsxrvciMOKonL9EDXLefor2BeHTPFKZS264oGphpttFwJwYxwxEGlyQogr80NyYTDQ4nhNYFWLoTt2oysYkNYTvzrKqP13XPIEOaTetqp6rC8vJabSdJ2SBaOdqyu+aW1zVhMCOPA5cGAsdx1VV2BlwL7RfYB81bdRstYQModCO/P0UOM8eqdIuuhGOXKWfJLIMOKih4QoYzU4pPirXtgqKAG9zu9qVNuW0T2e4o1P4vq4unT8cDeK0V0Q52kUJUn4BqgEIjIvao37e6aTtHGDLH9+TbgY8APvnAu/yvWrNUFZl907ramypaY+xBbEcP3kILy8GszfAcJLWuA5HHMeeVGXaGjZI+Ee9Ejw8yk+BaS1gxRifOPg/NGHBEcjxZQ4a2Hv80bV4MB8ORoXYeWPfrx8hS2TMZJlylltOwOPi7n3/+fBYkf/zxx3VdOaiaEdGWTI2QbV9P451ZS4yocguSl2pEptgGh03I7BIsIhXLq6ZVVWaIIfOqxfaMNcNAQaVMpsHz7XktkjGONnQr8zzelpTgSNoMeOmzK2IoAB3jWKpWHjgcRs0FaklgfKzKYdMBw4wtDrSjPer6XuO6rhcL41CJ0OBYyAiUaoyoa4JORhyR6z7Rax3HAZGG5qrxYKfCoGBGjAhoy+INTOsROSLHma6VujtRLQSXjKAsrIGOX5s8akCghNF1yXmeIeBGE5t2IPRfyI17ut4VyyrRltlGc/2NRrP/2PAqtg1nQ1Dr9svZXB9ydfnku7NxxPY/COlmUrQfHGhXBzNJarurNqlHP32GzcjMVGEZliLaSqFvfTU6tbOdqkdVETFy2LYUweqRH294qnqw58y8ZfmWlLdRCsi2T+mB93m+zVmjmMjVbNMIeQmOQN6WD1qtCDAKI7OyM9Rh+3ldj/cvr9eLxUzO6zry7ciYdbUfwHxtWLebmWY6P1fH8bFQUGtzmJn9qMf7QSZfL6F8nBwDr1dElJYnln0cmRNjeYevkWmX1hURchEPsjtXra5VwXaSCxBwjqMQkgZDSz+9v/32dz/33Ktq9tt9jMdLOo8xGkp1FdsLLUhGVq0wovRicSoej+PSQgBesxwRAhkhKRE0E5YXthtfPNekOA7YrzHCJvOhF+GommMcdV1INI9HNgz5Gu2LGZFHhAuGqKIPDtlZFnKlArzgUyE42tutEAzLCmshIiEXCsECSzDyTa5Agb//+dsjT006U0EKJKYmg2Uk2mIPAiuQJZbG8fjy+KnW+vbtG44kg4Zrwm0QGFwIEYgFEXpjUpAWYQY8VPOVHEKcOXjVRIUxDWenge4C0bIuDi6XywEcMViVx/Dw5VUnDyeqoWct1as0q/o7PBDHmWut5QXz9PE43iStVQjuWz8DsZG9EaElyFrb6AwUxQcfNR2wjDwkVJmDwxBKznbbw6BvpLGreIIYG19pN1QWTELEKK21vExpPl8Rcc0XAKm6vSbQapYYA9WWSC5UDqYoe+ej8USdR3C5HvNwpBITmfZUKVQFR4Z8IGx8rLKVZAyuO3u01PZaK8ZY2L5BiYR9Zgq66kJpjDHtZZADzXx3SCiY4WDPLGRpMBuvIUdvYGDlDYDdCTjj529fXXobj+ixZHb5uG0fAILb+g12aBuBHwzY1nOSxPFAyp5ROViSjvTHsuFzCEwiW96whZB77wiTyEwwAscxEpxq4Hez+B/gcuXxdq0lwMzqmIPHsjOSkdooJwLsvRL2o1ug4OLa3ghjHEKPH2E7xU5FEiIP18J+dCjLOb9dcxzvq2zhwABxxFm5crlUlQSuzPTy8pE2wwmupXO8VU0EytfLkZGnIyXSTK6pNhPi4NLERmc8PGBk1GtgLgwdo+BBo065WOcYLmHVkaxaD45AHAd+fn0cOYZZWivGhLppC0cClZRLVxlRwctrxAgEaWlFCAFh9rw480B3XQcwCRxRJTIz1rpsH8dRhoVn+BhHQPXxSo6IseRVPB1L9SUOQt/mFRFvx1jtQQI88rQ0X2uMcWasulCV+VhLj/ME81qvJTAfw7WqntfLcIycXpaO4+hjeNU6Insg/Bhvc85Ir7kyBkGv1tanXazFkRznqhqM5FhLjBBgGjnosn1gBN35Y2AswYLgPA8PvdY6FY/znPPVZK6a1/s4JWmFiXmqrhrM8KElkAtlF8PDGQ4RL0rhXNY9GLPrDEagqr5+QJytHnRBc2VGeMKRDhNrwYxh7VEEqr0KM5glLgjAqgvA3olkk1FVkcNiuTbBPTMPPZ/P7npby3FkVtXz+cSRq22eWozVjqdS4JjXsrk8f/Xjj2vpemmuizjDd7ep7ncUwKIzgUyoPbPCZlnpsLirM4u1GMXYy16O4xB1022ObqYWLTjLvtk3hzkyKEyYEQWVdJqPXjsDzDmBoMMbzNxTnq4k8Ata/IKaoT0yOZJGQgarVkNBm/UWkcZq39PekgL6utZaRSxGWz9GRMSI3LZEkipagtn4MACXJSCtb7///XDGYRHXdZ395smMXLdxa26YSglKGse4rmscx6zlSMxX+IgDm7kNyKqqWh3zvlfHc1ZLbp7zmpo1temLaqaNNbchUXp/40+tC/WFIzN71CMYwc3qUU9WMcbgSMAJkYOqWxj271Ao4rPT/fwKZF0oZyyv57yUKK2XCvSodO5RaO9pkR1RiYzAe4YCCp4LNn4+4lxvUeP58fWIx8wndV35w9uqef9ytzFeNLZIKpuMWVVqj2EC2ta7tlXVZnuNBm9D/EIy3XIjSV5ZKHvZi/3d8ZF5Mn/3eo2IvZOKELz2rq8kkfr+TFgm6cyiFoBakZlNZqnV7dF3TscngnH/idFOgxQ86wIiMgTh+cojaXgWgxoBtUBmz2tvWkYjaHzN+TgOky6X1BTZHtUm4vl8fn5xbRhQ1zzGgPF6bRS8dt+zaXbbbjYiIDLk75fu8+ezTdk+SUP3yYGD1fZUATszQZe0ULQR6Tb6LjV5d4ywq/0FM8OY8tLEcaS5x8YxCGDW1Vj9kAaO5FE1ASc4GJZeqECOSENLTgSijZeP1vDtIx1xrZXFJ5zjrBKhZek1j+OgZXsGoi+UURm2D0cy0s0JAG9x/Cd/874stNX2cWOMq6616pPHhMbjX+UDcsU4l+SaQGT0haaaXXQkwAKX6tDuzTtULxiEj9SzRkQMlluv19+7pqoZWLrr6Mhca91g9T3tsidUATdnAYYYnX0DdioQ3laCl6tUufDIoW2sTjJQMmzQERO1ahIZI0FIy0QGkvl6vQCd5ym4bAddjvOIOTGL0JQrGHAGFmIIKYFx0YqwleVlEQxjbJZvVKDCoTwRr6ixBFVFnsIMP45zrbX2QfMYQaAVsQaiGlMSnGILpZsDRRo+IpHZCGp33GutCR05rtfL98IcCFMTDjLbMD4iHFx7+JfREqoYxzHmnGvpuq6+XINDUqiadtxGX+3GFhUkq4xS/7DlZYYa+9vm9EdSrON8//j4kNY9UUNv5mLs7RsMQkY078xVK8HtVoc936ARIwDUXJnBSNtHhGyVUHsbw140BKCHx+WIDZuUi9vPLByMT8Ty/pPgqwXCQZRredKVfHe0THhi01KDSWJyRgQQ6CtlN9PaU8w8xtFyH9uX1nQrj93t7yC2dIWtGsTgmJyf6MqCQB3MU+2H3hofrEDN1fcYuM2XEXOW3FK5C9sTByMCjjkcroEIO5Zpp/0e6VJmbgigypGAtalMCGuWMAujt3ewfVNFcLsXdpr7SwYdn5nYqmOiatVacdXjOL69vsVrPc7zsiVeWEeLTfufzDn0ppyDIlD1lbTW+zd/jFj+XeQBrI9HvevxuNrBt79WrVblbmplv5+mT6kX1PiWxfdooG6qSWmy7/qezcIFBjdt/zgos3QCy9Kqj1UXVyKlahS91RSPx+P1euWtPPw8UfIimJHtm3YeD2yNznbMl90YwOdR/KxgbCvWl8eX66XpQsYIaC1EQMqN+hfNtVzWiO8aFZJkSoXqMSaWZLqsFkI3R2YBNedxHJ+f4uPjoyW6BFYV7TgG7i1yvPHSct311oYif3EMvrPub5FtMyf3SLJUrmin9F53Y6pQPbtbhLVftdoi2GXXkS0eW1X19vboKrxhz7aSQZMzUAE+E4Mqq7J7dkQL+/LwqgTycWiuWgskH0dEaAqkXAbJZHscHfn8+u2n9y+ras35/ngDrOjtS62+a1OIpgwrDN/LEpIhF5kqNJuizb27JejWpqpaEbSn8s00rvng0NKrKh5HG701DajHE7NWO6YKnmstOpMWm/VNm1qBOMcx3yJyLF8lbUJiD7OTx7FX03qViF7X1mzq9rtWoKdGrS2+Z/swstWHTONe/4AMSuOIgxjAVXILXjZ/TWiWckbvXiI49uCSY4xpMqK6SMhULWkmuJ6vE/jrf+2v/d//9X/dx+FgXZNWxsPsWKkOvzQW+HCIMsNjT+IThuOL4EQ4JmQgrCsp+Dkv7GhPS6NHleoXBZAhAhnSxM1MUWkwCJheU2cOkm3odZ6D9mteeQwV2n8jMyUyEgh43kXS7U4Muvek1tVTkK8fr/fHQXKtaw2hbQjbZR3w9kOI3r/hyBbDuLcAuMRxhw+S21Ghr3dExN2eqqqryBRgLwqJUUGiMjzijBEEF1idtzCh+Hzdm+YjqR/X99BzOxH2Z++5nwBxZ9ZkGLX0PdjVBoL2JY4mwyQK3eLCMiEmfctHBSXSOxlvytyu8WEgHufbnC/NlW/n+9v79e0jRmYevfO1R+l77pJRczaNkMxrrSTLyki8lhivFkW3u6QcC1fViKgd4/acaa2FGIPtCgnaS22OsB6ikkhMq1zZdMrbeaq/Gmw+kwFpP4zvpk4tjhzHSRLcZKloHij+Uva12917YwSz6lVrQqQvV7WzC1s6D2m2IUK/wqIGfS76CGUci2D8/k1YNg7y9TOP8yOu1DjGa/556EF5x6btiLXna0nA39fvuKRtK+HtjnFfKHYY6URR28EuyAg+r2cTQEfkGSFk740abK1A2V5SkkkHNJSLjk+WKrBNWQE3Sd5LJV/8/NNteqtAfa9M+UzAyy0YW2K/VavkyCJzOcHVVLUWpGWT0m43Je0tBZIiA1L1F32LsFtpEmNsJV5L8rYYL7pxQ6PIat+J0J6H7lSPcBmwj6SqlXvfT0K7UYLAL+TjRH4ygaO3kHrBGBzMbDM6efv4WjYqhOzddrHlZ2RKMyM7Fgl9m7GJF8D7lK4icXJEhDVtBfhkDSAsaJXmQBbiuhaHudnpEI0QSY5kxBm5mpal0XtFL5WIIQIoouBh2Vzc0osI2pW9X8SUcagDhD5VbOSWETVccYevzYbrEckRD4NLFQGwV6Ml3csMvDGM5nu2FvHztSU2zR52L9JugB7dn/QWgCXp/XwInGv1/el3F47aJsy0i3shCEsiHWPcRVUMS8FqQM71Pk6Ss65flKD4VDfIXlVtVxFNKIRFLJTkEaG9v3LzVCLSGdP1r/xf/9XHl/e11mrfMjDKFa6Wq7cSngD0DYXlUZFrk2yupJrezh5oIqBTY1FhXuzeEbOu1+s1+t1WVZ/6Nm0pzSOPpdrjHdnhMQYycnmq2MABw1WryqSuGmOMcUrfVy9IGtAyYEdmE3Eheqm1qlKvPu1tzLZyrRVgbm/X5oJA2NybVgj013mX7wrvnm+fBaS9ZPUMv+wkj/Go3qfd4zk3bROm2tqh3VOXlbek/Rgj6DlnD77Dm8dUVdhTyX3zN9u9E0wHYzUfHXV7ZB0Z43al+fxhkhOLxIEkXJCosEdtu7vPoLrFMnCAbM+oziDeQZ/n8Re/+fXbMX54vP3+4+N6vo6kQJmrqh2SSxpjvNaEmkvqtRZHriUe6aqIeCYe8ClmLwcNXvJ1Oxp+Hu45J5ERQ6XtBdWcu+YdLsyaHTTbWD9y7xQazUvq5UBp21HK2Ov/1DkLEpZzRG7G3CcZFCQisj0BIz9Hn5I6AV9rlXW5rlofH7P5hz/P1/i0rAPFFZaI05ZZ3TshOUBwLH+BLn/4W6HWeNShhw5f9Rfvx2PVFhXQu39tyLVa0rq//n2fAbQNU2ZgC4sr77UffUv6E/Q7sP3mKHrBk6YqgHAkKOrt7e3b8+ucs5dffXwogBXtgP09/IRJBYhqk/jW1u14u+k2mSmri1ttgTLABlzxfF4AOOK6nj1iuprx3os6DNtn5JF5ycBq8PmToPe9tYqGyei7PjhGzrotBlv5Wr1gOZdWIrdhwuvqqO62m23SQlfiN6y6pKZEfU+07bk/CND1eX3uOdHBqko6gmvZ4nGMEGVx70mSJbAIJGMKAUccN88ggkNSRmQEsRkKu3YpzdidrAMKGJSQGdAeZrecMDOJEDTnPHNI95KfqohYa7H4GEdVva6LGWvN9fyIM0MRW8y5Rc/3rPkOA7YVVvfGfwkl0q3Ol/T+9sPz+dxmF92rV5GcNOAjh+cabLuDcgbXaoXlJzQIm2YabDqwbZcZl1CS5xpjnCPKrLVXDvB25OjX/awAsu3KO8t6L3hoepek5bIqMnHvLQGdSRBuQKRUUQDW0jhuNZC/D6oknbeGtrAlZLA0ZXLVysyC55wIZqRKakPiH96mFOQJFrC0zhhpDWnRRYtI6yF7nFNlYt503cOkKK8xMSMGiOATdZozwhJKzceUPTqjdKneJ7hNVTRXtSRAu/DpJxKq8+0LydfrldIxhijZweh93SSNstjl77qFN59foQGOrHUlBoKdgz+uGVjH8Z66RdZbAAFg76A1EHLQA/cOWqKMXmUo79/epy1zSFqrSfnjGHvR3uFY6dJeUFXBaDH1XAXO6KkpK/AwHpFimYpoblsrqRAR5e+wXldtfcTbdmH7NpDZpnu9YA6R4Npvr5cRlQaHwCog2u0s5JDVHrqu7urZupTeHQSGwvbn4I/A8/n8K3/lj1H6+tvf/fjTF9vzORHRYGMXTNLaq5RRe893hLbtHMum9BYH7VmNsDtWABjCx+pZJmOTGE9/SvVvBwYEgwO2vK7HCMNrDvBAHAiMeEpoAB97NRzAZI5xPtfsrtHpZvOPTAIvqfV2fRQMWFiflmS3XQ4D0eVGyWvhmoOYcwUT92m5a5l2/FySVq738zEHCjyXH4Ua8W3U8bw04/d8fPwJv3y9Xt9ef/RXf/q49PzG4Hdxs71PYbdmuLXvtzXBd/ueHiM0GQGAC5k3fNcjFlcZtSrbWyToupkQQURoraqC49N9ogvSJB3xixEwHN3PNSDu6n1m+0sqtw7YAsdnLfiZm3cF2U4C4DkOVcGCeXBcqog6TMoXKiNhf4qw2W7+vyhMu9fviNgBV6sCMec8jseqixljjI+PjwEwPxNkbELlLnbbj9pdfX3mVWm33xFt3+6IqDmxIhGyPmecrbtmWHN1SbSPgT2XIvdAHjbp0RvaHVUzzzPjWPMVwY6Qy8qOYL9oItVCC8axF5+v1bcW0UiG/d2e6FUrM9/G+Zy3E/5dVTQ40U6HY4yP60liPM75uqIVyUERQ+lNEcAQw6je2CYSSmyfPnq3xeWQFI4cGy9oHq10z6f6eQYpV01YYxwoXwLgk4xgN2qVNDRskbUp9bjXeWXQNJhnJhnQFOQcZ3mV5jkeLjnjurFJtEzIbeJxYyRx+3MBuq0j7+yrQGqA9lkUeGVMazDej/Pp2W4TnX0+QR0co9x0iTawNUmOGAxXL7PaMEazlGhfVRgIOTFG5ISYI4G9JRcYbb3XdtN970sst1hGkpIxiLbHxAjVUbZ0iB99/wE0XFr1kjAYWjXGKDCPt6oJ+YjRgrAv7+9VNevqqn3OF7MH7SHJ4uPtjBg///wtrEbYSV6v1XGon3U/jhax5kgqV4W05DnysEuQ7UeOuleD1b38NZAr3cSQ5epPjj1ZCstg0WT2ztMqu14vAK05sf1atVcp9Y5q7zXAAtHcSe7osUEZ6amrXkCbslYDOtgiV9wOfd+TEG4rH5pYcBoUkrFuPUyzxJon7/ZUqOrd4/cWv0ES1mRrhknmYOzgVZDEthbcydkiGvPNI+ac18czAl++fLmez2KP7owe1dsu4etXhiEhs92a8Kl/jUHmyxVT3chSLqwFY6//LpLX8kGOjLU9M6BaNyBPQF4F6s1t3Mo4QsblGmPUZjZvaDMzADMzuivciNya5GCstmTPOBodUjC6sSuz45rXvaJq5x67iKma31e1rIgRI2mVv39l7mpOwLWeOd6kRCgdhgN/4wf88PiTH2f97f+nf2n+yZ/8W/++v/Lnf+s3R/6KfFZpVU01AF3b48h2L2jv0ru9x0kDr9erje/L2341OJC7ZCn7cZ41J4O1VtnuRV9WyC0oLNSrKsjn60Uy41C1EIWyKTFi/oIFLbR+BwfgVbXlpiCZkb1+ISVVj3paUfb5AAtSHA9JdL2/nd9ezzlrcCS4Wo4Y2ZoOrVevR8OmBXyqLXe5Uza6fbxrapfMeJznqmrTlGs+z/NsspXvvzjOXgRpZpT2iP1O7VYrl7LhNJAMuYBzxHPRktlcsO0v7fb3rmL051Pf8eVll5ChslDUyBgMLZcUgeYJIxLgt9czIo5jT1sABFmmLIWNfDAjbwHP7WpgUU0ics7rYkT1Q7qe416CfqtC7qoFqPCs9RjHsiS8v7/P1wtEc2i28bABRApbZGwSh1VtIm0biNrEK9gUMTKPZPtJtRdQT4IzDklDeHt7X9dTwLKudWUe5Ci/3Hs1bAmq2ptAzrNKYdJSKTPawV6Dl1ZIFgfHeZ5LvJZ8E7K80WkCWGsdSUt7l8PS96raQWaTaZY1GBFHp9G7YMKZo1AIMIiJsoluVm5AKKNL3rxB6R4xAIgDjtjmQBH9NMZ5Fp1xmjL5sl+WvOw6elUNzB6ZFBddI8bsjaEsUmQyWhZgVQWHYrHaEmQeSTGjGaBLdK0aSb+/n0eev/nN7xA54jjH47dfn295BHGttdb66f2LtKoF0jv5FHrFn1xVXCk9Sb49Hmtdr9ery0ySHrkFk75rVWnRh1tIEMphOZORh2XRskR6BGxpn0zUzhOwBiPZ4KTMA55EoLccWQ2rnnk0MbsBjyajlpTAob0iicBp0FTSDATTyt64lwFyckVtz8s2MpSKGdLKG3NTgdvNJ0iPFpxg2T6RA0GiiGzdW0Tch2M1oP2qiNiUCd/K+xx9bgJRS6YiIjPGOD/Ws7eSqO0D0ZxYBOO1ZgaOPP78z//8HOPt7e3rfA2zXUIj4hzHx89ff3i8XWs29ibNFu31wDUiNJcH0FC/rDkpqZxt7EVDvq5r2iqc51leVcqmDjQM5jVie1BXMsdRNX1d5zgeBbUXT/Mb4YKDUJVQORJ7ZsNxDgevWme+fRq8ZGZkciRit93qLR+xG7zGcvoCt6mWYNKz6gy6fcnJ3b4wSHwE0i7pSS2uyfrV76+/49fXn/xf/uf/nv/hP/r1P/Z3ff32m//QX/u7//f/4D/Eb//Wcf7R12/fltUbLXcPjG3VEiSR6G1dd89pIDbaoczEtujpszDMOs685jPbnHscx/KULhaII8cjM+VapYR47w/HXlSWY0zPIx76hRc0u9AMKmOtS0vjtnWx7R7U2eS+odj1yJ38eG8X7lF3o6bCnPPtiAX0wt3TFFjpWGEDEaT5i227S0UL8nEc6Lk1cJ7n82MpeD7Or1+/WjchiKy53t/fI9CrUkV05boVVBE9P5bVMZbhjKbRcu+A6apxpMX97G9j8ExUrTPPqmUbjUNSPCh7tJUeUV6skCAGuJLD3vK2ti8GVWC7Y666e0ikwhZWmVrs3SJgyUSMwVdNIiLzeHt8na8AjuCiaGa2GsMka66IWJrTGGN4qRehfMzrhzhebPIpVhJALqbdSv0il50j2yKprMLKYqIN3raZ3pw2it1Arw0sNS5o+0scP719+VvPb0tC9DaU9BLPtu6CiRu4pogDMVUZZuSSZS+0vd3Q0hFKDpteXmvawr2FzHd1siOhavcD28On3TTpQqOMMlr1n8xlxbIZC9Ja78dIe9b8Wa/gqLUAcIwIwNHo92yED/Le5c4wQc35kjDG6AlsF8dedYxsrHq2Rp9MxhHjZWMpwExWxOrxmRyibzlMo1+bJQil4kr0oL6Shzi3zLtI5DFIDnh8m4rreRzH0gz5Y74w8gmFI2L8+OPb12/f0Jsypx2E6KADFTQqSazJjCNdegFdpDcgT1Rv2ezCEFUgWIViZfaetUBaMDV6j/rjOG1f17OhhP7aDhpMIbSt+yoitT/aCCiDq1avMaAx18cPP375+PY6jsPmwZAXySUrQoCqSCp6tSBSEzwORrVZnUj6LQGfzVvr/R9AhXgw5+fKgdxL3DvCfv35lYzMRPiV9QqleRBGG7DufS+Px+P3X2XiDSG0pl5tSLLWivZxjt56rAWYCvOYVx6R5IgE8FxzqeD2HXr9xON1nOuJxzvf+fb1+fXx+NWL+rH4sX6ux3E9E5wX3uYwrzHYim5h8zajrnp/O17zWtc1QKqq5pQdeV4v2ddCRJzxGDCG2pbrPN/W6wLq/e0HSd9WmVkSljKAWgEi8+N5dRIVJbQoiyNHdueUQ3MN84xxHMfH81mWiPr4+hgHbJDjcQYcVmZO3vr63fS5d4n2IK0nr00YgeqRo66ZYzBte/str+t4fx+TjrWkY8T4Nt//6K/+7f/cP/n3/E/+Z3/x7f/7b//n/8v4R/5H/69/7p/4D/z3/rGfYv75i3o93c0rw1bziW1nHuCimWj9ntqYNRKOQ1aER0SS5Yi0y1XryMPln3/9+zwGSwdjxPjgdZhLkcfI8mtOnsMSu0ST0MQuxJEHHAPhWfELXn2PP6J57OUvWT/+9NOf/fo3+XhrK+BQ2h5YOc6pIl1aaEs0BwJVFTEYvNwGp0hhjd1nP8BYOs8zz+Pn56vLfUSDc80fBIDRE9AIV5BcKidzhN4g+Xp+dOe5GghgHGO85jXGQJAOrzWQjLGyXEq6bSkPdTGar9mqFe7Z88jntZwj1lpY5w9v3749HzwJVK18DD9HmYy3QkEaHO3KbF0rM8khUCnCyVU6AcJtkpeIwUjBU45pgBkoDVJLPR259tp7HIzhNkVycT1f1zgOr+tgel4/KKpqxTCF7RfN8zhea/7Jn/zR7373O2fmlKd++vEPzhx/9ps/yyNfnoWD6d7rd+bIx7GmSAw5pcOOyPJe0DiO4XQvlmiv37e3t9frlTky46PmI0bZIf90vv/6+fu385wv/dmv/6JF5l1DSjKNxWC8nY/lpXmNcRTgSy9dkVx22E2jIR2BA2rhiuwivn38PMYIHJr2G7FeB/B2Hh8fMx3JKIVzrlL4OMfLtVDH8eYPIrmhJBxh+rU+zjGySOM8377i40rMxeA4hL4hIUNcwaZrhiAmhcHwqn07Ri7zcMB6TSc02OxQFZrTrOTBMOc8BqRCylMRIaLZhfAK5irF0LLIGExKOCitITFOldgTVUYZiwY8ag/tkxnjGA5SKtw06Awwo9ntpQZg7zGnnZyqkaeBWmW2HxjaHGMXONs/3+SI5NI23/Gnpecu5CHJKtyla3cJYwxEu6BuqUxmMvP589c8kMcu2zOSSK/FIwh9DlnuUQ5brdFM97WujGjlQyTsorYPcHjvWojRW4n6UcjqWdTx8fNHnkdnX5KZ5zniui7cvUjdpJL+95EsqCyKyQw0J4mfpMzGOnr9IjOuqjyPVtvEyKUa57GuuRG3NpUK9nIHR1sQ+LoutVaXJEaSWW/Aq6bwOPUKv/3ueP9T1F/8GB8fH+9/MM6fv4n5a59v9fxN1a/GsUc/n++qLfrmfPWrXmtCtT3cq76lIDYJ+xV10SFzrfeffpxz5qAUH8+vJMeIKsVIGgM+RADCUADMqkWbDaz3N6CuJWV50ssewtQ6J8PQOb4+Px7jGI+zzVoHw5lxjM/ijDfhwnd983q91pzRg3/EGElyzhkjATQKd5xva60aDsVbvD2fzzz45/zd3/X+712av/lH/vH/z3/h7//T+dsfPl7+K3+V7z+M8Rdf+TXXaFrNumaFkAwBs2IEIgpYYFto9o7WCefIEVhrTU12IUZGjGtWJMbZyIrInGsxelmFpYtqq4+Z6doDjtH3sbdlswdWo3ex3aibgNKVzIwjQtDX5wTTdu+sNTU4FrHWDCOTIw/m4zVnwQw84nDh6elL75mMeEZxDJSwCkmPfNp8vt7GY87Zu8Fv3fjN9wnKCMvezuppk8tSkBGJT5FMP5Ax5pzXWt1jucUkVUfvpyE5THIRZaFWbC4P7i0xfYZdoAQXRmRXEmZqNX7GXx74BjDemCX3nM9tNeQYRLGJwJn9yYgFK+rtfHu9Xmee4moRQRufMYLcRqpqFk52TTKG4oWYEWfZdI047etmyI8x5pznEa+PD0hzVWZm5LePn597NhSz2mI2QA+OXhAqITOVhJv1VoYzKYVrChER13V1DGx1taSpOiM/aj6YAH6/XmcclxyxHehtsIVq4Ag40qsajd8UKnwapaIVtM0ZoExy9bD8muM8RkKIH7+8V9W3b88vL7zAV+otRrBe6HolI47Box9mJIxYa8UG2ZmZr5Jdx3GuueaIVUWXUYfyjXsD0IIy9oGADHiAB/NJLVWVPx2bC2By7TT16eDZHlnIqswUSpK0ViUDEjJCdjKOyNUFJdSIRcaAsTwt6UNf3s44xnOVUW1IWHMGx8hctapZ37EHiwPBrHYEYoKIuAgUHsd5zSdMlHopRG+3YBz9zjMim20YjhjVylnZBKO3bKmkiGjTVG3Jmn1rDT7/fNbuQCwvVUiqrR2CJS6/fXlvU1mSRmlWL+MQ15IGuFoQJFLDQGECTReOx+NRVb1/idFWCWh9cG/oyczVD/x+L5+8wbe3s6o4xudCva8fczDaqig4bB977wJUwECPBQMYchKOFDFgBmfPBaTVa9uRJDWX7OM4mjVP5xnHBUVbhQQAh0CggrrqiDTCxMiGGFZVHcH5sr+sM6TL11wf49ePpd+MP/7Vv/uv/jp+9+KPx5//mwPnk/7V23HV9Tn7u8ud3hdYzLFNqW9+ltpFnK1J2fum2iiiHRUyM45AW3lyr98VfdGzRfwAHWGPZO+vaHtXuy/0wHrZbR1quVY5aRs1K8nLnq/ncRxHbsRyruuGT1t0QtuQWmi4pbfWGCeA1+uFxDgCoEtHfifSv9eoN/x+6gdHFI6/+bt/9u/+D/9n/97/zB/8y//K3/5fOr/Or//Sf/8f/U/9F/+rf+O3f3E+5SOjXBFPqJJpPjhW4Bpl+oBTMUQiiihipUMVAMRsi4jYYinG0FroxZLe5KD1mq08a9cUdjRfolDx+tVPf/h2nn/2t/5ir4U54vl8vt7HUb+gAgNzYMDvCl4uegbmmnGca13naFk7W6Dpe95GmXu3cdpaYVMPjuWlSBeOyDICHI9HVc1r5XnImHM624Gg+9FybHVz8thMtGjMjcCQ0BtWwOYS7zlrZq6lrenz91ECgpd1xKjPCRSixceI5W3hDnrXtwTQqv6rkvlyJRGka5GjjRVj7/0R0HaO0a3PZjUHEWwksXnV29eDVNqMC1gRhNYqRkbEqyofR9St/rJ1V84G3vg2WbEil56BJE9jhgPxubemi/L2xmcOGwpoVTOnyxzjrCqUmMDNM21svlSZAdCWrOwZwj2RaJC507wkMO1tzT2rKHvEgRhjDEH2ggKsrn7SMeKNx4SRAXsoWn61rLP54sDOKN3JqHnjbiBKJdvP5zfaZ47fzY8vb48h/va3v888BsOuRWW7UJbQi+vpa80cPI6sWXJkcq3e+xJVM+W4moRYtVWI3z0WWibUsjVZosYYHWOjzfBVI4/l1Vpeb2onFb1TCr1Mdoxx5GOtFeLyZjJWVY9IXGoWghC9If6pheC/6w//5Nu3b8/rdXBkHsdxzFpxHHC8ajXlfifgRJVGk3Q2DNhupm16Fb1dS3t/1n1QGKGquFFQseymrjHu7SJluRlmIiM/J0NdotoLXY50GXDvim7Rgm5ZZ5LMs4FZ2+M4fS+JZPRaq/z28UF6L4MGEaxybrOhZiTWeRwke+lhi28AiCITGzRLRLgWyRHWnojRpgRR13WFfzEwA463h+ZFsjQleRpAkDk4tZiREW09sBtzEp90rd5GJ3TKW+HBhAvWXPXjjz89r1dbLtz+xzCQDkSAkb31bGs5yXsZSYkfP/w4ron1Nb788IHxdh3zT97/E7/+3V//Z/7pX/3N//Pv/v1/5z//9/y9/3bxj2LRfLk+xSHehh4KbK+u/sa1u/zWP3CFK3qZg2k4WZkPt7klrGb92fv9CED6bk+AXny0/R964UFJweBgsBhC2UYVwuEoQe1/Q1dV1URGZq5asKvcreR+qvejHmO8Xq8559vb27Xqui5Es7f8WjNBOjQXRz4ej1lrKsbz43FW5fjZ649W/uaZ/9t/4O//+/7B/9rzn//f5T/+T/70D/7X/4W/9nfy43ffvGLFk4qyqxKYKo7U1Li9SSpUfVEEGOk7XBJjHA2VS+4VC/2ndyGPMb483p6MuV6AVbOklqUSyHGW+Zvf/GZ0wXc8ruv57VkkvyyuXzCwAAwpybnHC01s4hjUbDQrBUg4QESIEHCtayyncTBeVu8LGmMM5GWV62Q8jvHt42PlQOLkoVK3X7kMOHlT25qNu51F2vaf1QTmwIJQdRwHgKpluvuzqurKaYzR/CkJcRvGCF7rGmOEkRGjBQ5RrmZfO5NVFcgdXCJCVgC96AKgrCGD7WKx989TRD5Ze1hmB7jZh6jAsUd5MiBktE4mim8xUKC2eY7loGdzbNs1//Z1IomMozQz1m0qcCV7u856XTsKJrRK6G54rVqsSXL0fsMKmdEwn4L3ZoLoLY8Gfe90JKu9vTYLkCQ/1eq7QZcv+lQUGUeydLHyct7GBm7HXrNXw9Z8VY9PWwHMnqC73BeaRxdGDKj1eNVTEtwrg6pMV5Dnj18+vn688cx4dOnHiCWrKqpn1ZTKKEegCnEI5SqO3EabTc5v/iH4KnvY0oE4Ec02b+amYbF36gRu/lev5gWgKn3a2vd2eHqb3DaNWdtdX7OJ9Il7R9i212WgN6hhVOtyiapa67quK49jlHTVH/7xn/z88e33P3/T5vFs9xveftwjQSUPA/YLBVQqCnhdV5sFjv01OoyQyyuIBKSl7swDE+t2paTtNWt7AMX3aM7N32pOlbctAr5rtppnQVj22n8LBEZ3M7WtGW13fTlGvr+/zzmb9qKtoKr+Hcc4fvzxx69fv85rnucJwKi1gE1/H/usxaZEb18bt76813H0ss71008/vV4vkgX3Bulv357bmYU8jsMlsv0WYMRQNBeuQqSiN8W2vV8vqjbZLHbNRLbl7IhI5gMxyyhVW7Y1QHBP+DrniFCpgQIA7cO5kvn6yEq8fampH6z5x3/yn/tf/S9//F/8U3jUjz/+4eNf+Mf+gX/xX/wf/zf/239rPn9Ad9Tfl0x4X1qSLMnMGNEGbYBJVLUxAyKyFy+qKIrj/H6ULTKC2VJp3ulxPymwPTIBNMUDIArFBh+2dKekaFP9XjJXNZ/zODKPXGt92GE8WxPFbQAU4OKu45bqPM+19PHxMhERvW7lPI6M8GqlRPreQPfit3rgy+v5yuOs+dQ7f/6z3/zqr/yr/5V/+K/9W//Pv/jTL+tv/PXrz/4m4nxPvxzS2quc7gFEV7C9P0I3z7MXxn+qK2xImrVnMxGhuXdaeZXDc9XPc605Y2wy4TlGRMyrWjti8TwfZK/5FnKQjghXmzV+/+MWEbC17GfpVSWvVmBrVjFHo4y8qWFjjPZdaH/OQ0jwmrNxtwAvTzzXcWbvn/+Dn37q3/H1erb54dqwVri9vYmxliCH2aAsspfPnrmtK28lXq8C8IGsKt8OrARLVVXvMQ4DRMs6BE2s4vZcbIOXiNireexV88yBMKBIzjUjONhSwL5EG6/e9z16j2pXyUjQcJpvyIIXrYCB6PRcnk0zh8aRSyo7Rn703snGuIntbWKnIV5BZBJIumfeXdw3/1rnebYiy2uttVToj0PS2wOEc87z9ujbWfLuRk7uzQetlogcVdW7/BqCfnv7Ium6ro7+v/rxy6v0+v3XHCMz5zXPL2+Y9ezNQ06XQjFAypyadnHvZQqZgV72o+rVyMjbi6iiN4iv4xhaa4ws0OZ5HnVNJN6+rpHHC2ZhMJ++II04+mm0swr24u2IitdzRoQCmvOIZLRvwbjYveOIUpiTOeGwWulvtvKtG3Mm+Hw9m3bjOREcvYbIXF2qhKitVR+gewRwHI3DYK8dPJ71IrnJv2ohBySXjBGy0zjG8ee//c15vqVcg2v5b/zFn62piDgj65qZqQgG5VXlMVps2m+2tBplbSjfiohme+ImeYYxxtibJLvQ1xK+r3urpnV/qmPFMNp/IzPzdk4mMsdnfP4OREtqORINOKqqpDaS/eM//uPuYrvLKevj+Xy9XoEk866aG3nZ23R+//vfkzzOlJe8/uAP/iD2vooj4tM1c7NnG9l2F37NuwMyYPZK5tbSoXdgkdYqGsk4cmyGJGTXyAwwNoDd6uDCWrw3B+xU005ZpIipemmtgB7jLz5+bivKaDHfyMwccb++1ub6YlfribS5lmp9++Llx3r6+eOlv/nH7//J/+M/+9f+N//U3/bv/8P3X/3J3/pn/on/9T/03/q3EeOH8w1/8G1F5zf9pdAdJD8NDFzaOMRWnGJYj8JZTYRilL8Ur2t52//iPM8xztUuG90HJF/hZ3jShUKtXeDcU/P+0qtqlCGVVXS7Q1MO7T21/X56jjBVC+yTttYOWpKWISFGtuHO54sznIOvj8ti/3i7oj6fz3XNPKhX+vGjn5qOCwfwPH739V/4j/4d/+//zn/3/a//bT/+ir+L1x+MH9fSWiva2uZ8LOnkMZbPMZScXt18hDzKTR9RoMzy/pZ9j6tLs3sU3ZQiAJZGnvPyKsLjMb4c4z1ipHMtAVHl61oFOzjGPsA6tiPO5z8KmjzNY7mqrEput8Ky26kGiOpvvzTA06RRxIxINU5zX/BWx8IXPV8XS2/nyWN8na+rrhMY8GEOcDhTGGYaQ5jsZaphgkwaIQ9JQN0ZOMbYpgXBbIIQeR8QbO+Ukbp1TLt0X4UlVKOuklTLMmXL3pFatdbiBufsG9fZCewOOABSsLSs1T2TioUBqqa89im93TYoZ/kMxqyYGoVDGMhHDFHdWMdG3zgaoZaLoM1S7aURYERtEaqu+fx0YnheV+/zjlsuQebd7G5GqhDCLXYCgjzGGJlst5BfzBc3pQbqPqE/78frxTnzcT7O8zEOH4lrrbvxYLdNGcytHWXEGL3pNRyc3he2F/C1WJG3JyuCmTTlYMGrbGDO2Sn2w+s4T9SKhLzOHGeOoHvRFGPnEYZppDNiOLYVz+bu0FiV5VzN7+bc4CcuqP/p2High/+Y1/X+/n6eZ/UKlgjKR/RQbMe69jJs+dB+7MiOcp9pbkPA31eP+/5G7iVviIhxPN5acvDD+5cf3r+o8Pb2VlWrKh8nR34mx/5dQ0QKAEQ1Hxfh0dIqMrdpHzPzU7A227Y3Qt3TbhtV2N7eAmQwJS3XIwJwf1zuNmWbcnQlgNvHThLvSfR94vaeaIf+7G/+rXEeb29vVdN2M7PaLLeDVwMjyV2Hjsac7wd6XdfHxwcAlugmazXrDL3xQVQvpUN79EUDaU5m+2viLtuvOc/znKx+z9d1+VbZZzIth9Ui0zaNMQo+IuZczDS3rrHPUxYyo8B1TfSm5HystRjESGagEbAOlxJyW4d2ICeyoZVxPNbP9Fmc8Zv0X71++I//y//8v/n4+Beff/qf/m/8w/PX11/9D/5H/g9/3/j1qONSvXH1ckqAyFYzxVbTWIjelMsNUPSEJ00uUb0yMnohDWGNMbzK5UhMrblWHgNEymkc7foUMlFwcIDEXTDxUzvvrZ8xSAjgso9IBb+8v9e6qiYZy2UOch6MJoiZvFN6Qc6ItcSM6/my63x7jMej9zzM1wuOHGPNeRzHT19+eD6fq8w4dEH55a3w4Re//LSe81e/+/2/9q/9q/xXvv3B8/jV8bd9vf7iEpCvhcxSXpVG+xkewFlYpskiets82wGt7C3UB4CR242rynFEh11RNt21Q68Cohmo+XHRtKJxJfWRT2ltE2oGjB8QM+5yAwBwiA7O3q5dNYA4RohThfZz2sQZtjJw83cIGYwRyKJMt0nQCoj1PrLF8sG4ruv5mykpzwfJue0x2husYa1PVV5XCUhmUFAF4/W63t6+jLZBzTzPExzP51NappOjuZDADmkXiXa6qzXu6VJXn59LKhsD7FMZ5NIsmzEkHBzFKstti4m7j2xcAQi5nSr3wKispM3F1c0RopE4GLHw/yPrz2Jty7fzPuwbzX/Otfc+p05V3f5eseclRVKUQllUQ0qiECkRJVmCBMGAA6cxAgN5SfIW+CEIEuQlQGIjCATYkBHACiIEMRTLMg2JkUlTtBpLIqmGpMT2iu3tb3Wn2XutOf9jjC8PY6596iobB4VC1Tn77LXWnP85mu/7fSKL27CoGUV1I7kz1YfmFECP8GdF0/XJEl1SLpzJOpXvqDB4HcSGdmF1kau+dPXP1qgcctkijluieSD9qUnjK2AN9mtNaWZmEMCw0eXdshzSxXVdz+ddVbe5W9LWZc55iUlgn9OWYbNKkNLaYQlSBKZaEYMG0lSjEQglymaoQaTzbPqihxAUi4hxNOLptuSsoUbq5WS53z8dS4ruGZ986yPvvPPVMOuVbUeztKg+q6rgi89KQt2az3uAFD11JAoyhVllqiOR15Gk4lguHiU46e4Jxhbe4o8OV2UPaGANShUS5GNUSnasr1aVDt1iG2MQYqIdMy9HtSJQ62S0MjvPcNdtv7z5xjPGMZe9zF3XkVX73K5yY33sOR0qXixBqThB0VLxmV0ddxPcRVDkTnK77P34rEwQY4yhErMSaTjGSpHHkaOqyGo0jx+mmuO9yUxRHOE8CaK8t8UkO/GtCtpFAVLED8pMZqa5q2olM8ljXkkzk2pAWGlh3IzHHosdi1RFkQEXSgPrefzfECABVWchMqVoZqIIRkUNtQ5Bcj8ssyRFDMjrv0tVNPBMGFRtuqEmrVt510Ukg4KsqnY0ZptfxTuvjajMXJZli03VLdn1smaBpKkoHUhmE++6kKlWWItQE8Nk258uNy8WfuL5F599/n3nm29+5Vfyr/7f3/rNP/Vzn3nrV7/7+8YH9w9LSYzs/BIqhChpw75JJyxBCRMnJnlgeiyT5hQh1LKspXYsUZlzDmt0xl4Uddlzv4FRJbpukHI1IaxiRqgqVS1FrceBWVXZvCKIUliEYrZ3kDjvZ8k6DU9QSnxozAqJBizZ4SBs0FRVoJozLLKuJwpi23xdai/RcSjpRHplUFV7pN9dNOSkb1bh6TLny+VBRdYbsbd/7j/4v/4Pvvd7vvjn/uLzL59vbp/lJWBFwXnuYxk5w1UzUsdaSahoEdkBlpqKYLnII7KMHzKhPezz6IGAyt3U2URYDPR3SgZyURPq1cFLoLKmu4sooRH1AS9W+uEpdAqE8BAXgTnglaSqSKkacxY4REGNLKqkoExcDbMMUi5OBGu3mjOHqJI5yqfRbGMmMWCrjf0ydXg/yh5jwa6hJJQoVZ+twxl6mAtAM5tzOxZAmedt694iQaWWNOn5SAnNynWyqqggkSJ5NOWyxDGphhwTrBbxjtRS2LIMXy+v7sW8mJ3/InIAdK7mZ1axl9mLqBBIlmqZ7FWOXiVQ2CweRPeEsWUIqkxQMcWcZES40B8tmiJNSUBSSqbBy01SFrUoZKbgYJeLdKiGukcrP1jtEggG++GRqUQqC4xqjbTiGvI8hawQkdUX61VdP5PMEsg5W4VwuVx6BqCm4soZqQZVKebqiFSzZF0t5B2YBhMVgxAIwAWiZJrIUN/74tEjiQ84BoepojYiS02FFREqUtyj9COyhsu55oC76Oe/9hUzGYrI64bK/XXxysxkolQ9k0C5j33f/WbZCzt2VXUxZ4tHppd12lgfrS2hENX1dn3+6rkNH2P0GeFujXMQgVMGj+HnfpVtjTE4pYAixO3m5vRwuecMdXMZJVWgm0Uvkg2nsZASLHFzkTJ/2C89sRvLuu/7aZwUYoVhfqlIVqd8ZKb8h3/5v3SKGl7lPlkndQZTYPWhsHVT9f4jnLmvGBVs2+Nq6jpmRMM6m4iE66Ki/8Ug3SaLOYAoVIESqi6EQcylD3QANJt79iyigy96HxigNRO9sWFmPXhkHgDCZVm6qJwRw10iltN6yWmrV9W+bbfrqYJF6ZKw5wBVxcNYCTV06XUd9VBg1XlTpEEoFooChXCWqjb/oKrM2hKtpccyuKqECT24/y4OH0wwk4bKXFQmq2NShxp5WM/HOMwzmdNMriACd3eyJQ51iPL0CvgUOXNfGAYHFxf/4GZ8rOb3/4tf/fRYfv1f/Hef+UPf/9Xv/p7/+rfev7On5+UVZrovMjNBG+qUFE2FJVV7+35khNUVeKvUy7y0RepmLAAmKyEndVaoU1W5Y8tK49AOBBIUh2jV4VKz4RkCVDBae9L4/QqmwEWzNqCtsoKSISOTulRy6vAsy+AyZMbD0FMvU69ds7S8NiSYNNhRb40hpEK2TsO+ykObZb2MQa8ezChMRKAye42yO95c9cuf+56//iPj//B/+slf/NxHnvnlwXRMyXJfaJozHGJmKT2jSCEzZ79jBa060OJ95ZtZz8FQ7GidbvWaBty3pOvaqsOjpWOZmRpyVnon0AkrykjSgT2xuD+6a7rAVxstRBpAQpJV0AIjwkQXlaMsBpuU7mouWhWzMn0BOaBERkSSqi7Fdij0KK8vuZrtcWynw0FXsCsWYwoXNvSxRCnFKJToquLuFVSFmUUlS2bx1B8fCyJUqYLyQHwfaCGVx6BxIWHrECCSKoGOZjJNEUNdIxe3mFXVKW0q7NngMGfkFpsNp8CSAoN1fT+rquO89twPOg1wWm8vc6+C21IyURxy/PBRNexgkhOaV7fQcegVp6KvwO4fxHSPElPHATG25o4VW5gimo+Lqsf+QURmzcVc1WcyWXpEPlQyVLUiXzdVyxARblubH4/z54rQFxy93USlACq98lNDJcjrKX2tDk11FZPinpECXVQhnLvrKEoJxhhouHxQYA1RqRmrae5zKmwsFalUGCh10DCur4tyNOvtiIsI8ZEFOhBTokbvSaE7AFNjKYQ5Xa01xi20NKH7Ytq54pkHlp08psnwtlWoRIOpqsfH4YvtMbVUsk627givgkqYUFSTmblLnJabzOwNfZcLzfYi5ebmBhmH1aKzNVmK4e6M2cKEEugyCjQsvStvcI1TGVVeckrxQljCZLjPWceougULM49rwS0ph1wKuUcl9vYt9iy5f6Zu5AFkQQwu3qVdQVsGoeIqii48iQZgiYgkzIYZIkJ45Y9H2DJclMhu1pTUw3MIFRG3Wdm1XusYdR2zcozBTKk6jdO+h5l1vOxxLR4nuFZVaSPs7TXGrCqFI7EbhdQQWjUOZkk28x1Iqqm5mkSgUMgCqSJWOJJmOkDBNVAmgDVTJgMmbtiPO+fmZr3Mvar6lATayDi1p4ZZ5hJV1/Voz9yOcVqSd1gKuCyLFXT/4KPbmKebv/UHvm3Fs8tnP/Xnv+9bvu07vvfbfuLv/vavfvHmtJaPWaUqEJldcrO8/IY2ccgX8ipcFxElQvLNN9+cl82HbBkAeqzIrGm4UfXCvrhGLJDZ35Kp/TyHEnk6nZ48efLlL33V3Q8SXjRQRTtbhEhAeSi/pJ9MehB3kR1JLMik6elxWOLtaeKx2mO1+kCrgiLFQFbWsT6oPIQCjYQU1cZ44Uq0PiQPAG/mfP58/eR3f+Hlf/Z9P/PPPvlNH3/35avb5amywjQFeqSpCNsNIX4UGTYOfxehqgbZI7VqrOtBvWtjno9DctM/DI45bo8c+lULIGYz00iYVpW4RVFtZAQPr31RpF7H+0JoTpMEhdNgdUVhX1W+YgKRPY8FTbGDW9v2Y8hazHv9aWOwMvZQVRcK5JCAurdPg2RJi5h6EXCkVQOyCMseNfMypWg6eghyrNY+ZCtQi+rwQa0jzVdgKio1YxmLiES3ht6CgKyYE+gVs6sOsaTQwTxyukRkLKOq9tm3GITI7FdAFxdIZE7TzFQyIxfXqFK1KpqNq30lOhZdFZAo0kQKVRnZKk5VJqamFpXwozguqKSKlRTi4Ad2YKweSqNHHZ90qikmQS8tYQFNg2/CsENoS8sFBIg2SbGIWlREQDW90halEBmFa9rShwDdJDmULbcQJDqOFUIMNNW/WgHrva7oeB+UyDGQxR7qPpabh5oKdYpDkKwCgZLjGi4wW73caMJroO1rwf6Vat6pYY3lEuay+pEfVGnmiSxUViZL3aSwiJJUHxSct0sBGAtJVkZEIQ89bBUPTqARrOLBVGwJOZCZaiYiGvDUMinRyRCRdn5GECgRH6d19dPl4Qx0akczGaREWiy975f2QA8bh3NEjnpO21Jk6qpRzcDbVVsXBZKuc6boFGkikg1jll32hhqzCSG4amX7DhcZvsy4VGExPTAYXcqLiNj1NyrQ+nmkCNix82yZglHleBQi2xxCAIX2tovM7hWiO4DRym1I58xkEaxSSoElMMieYWYUdHIifDD2nLH6gPYlKy3z1Wv+c99L/bqqasdRcxy9u4BkmSwEid2yAKUqkFLrOCUpcqX6srH/dvB85XoYkh0P6yWVISAjOczdrfCwbSb+utfsuU2YCGOGD2WD1A2smvtuZoUAqp3GSrmmtBGUsPJ9u+HNebndJd6+hD+k2DtD9Bd/+Z2333z46Ptf/fzdk31eyArIAjWIFhtXWcgwaLXV4Lhd+xECoBDMaayZc4ylIatSmAYpFmxXGCR7HccOiDKCCfrwmnj16tXz9z843dwB6CeiEoUDmdayLRFpROIhf8h9+LrPNHN5XMbDkBApU1HWIqOaBp7ZF+us9MYikvsWwnwsvISPT3QWo2bpuM5Mj//YPjRabFG+nPjyj/2J+Tf+P5/53/7vnr+4v+Wc4iUsZLtoD+9DVwskj0ZQHt89Qm5v1/1qYder+iYPTKZUFSNFtRGGWaGmbddrlmBPPoxQUlqCJaLhAEIYEoeu8fqlxzIWqVRI/y0Ozevh17JwCjroHGzAI9unPKgxp5lIW0XV1puRmUi6HMGCc85nT56+evWqa0fykEMBGjjINmQdRmMxMnu5WawbH9H2HmJmAGBCxXv/1z9VCyMfMwxa15nH2WmtuhIU3MbpVJGcQSBYtg6t13qlziRtX6xeF/BdbJuZyNXJBJGiikpyUUcJCiaqbFXM0Qe3Q/OYnCWAau4e1DJD6zgbq9sjUlgqkiUilkhvaRXgAFlu66xpnROqbDo6WbE4io6j84nOOnBDZSWEh0wmSUWpYkor2ksAPcQQCKZe2yG53mDHtAwyvIFwMueUgtKGaGSnWvnxYnukTEJRB/NEFCpQMxvrwof9QxddH3YQILo9G4N74CC21vAVWW2r6Is0WEoRgwnVlUTEbsJhvsdcllPMJEqEQerwVbQKNWd2pnUvErTfy+J1M9gPq/ZErr4sy/LifN/PnDqkT4ZMlhjomQXdDGnwohGppXSYi8hSzGSCjNC6JjWx6prn6AqKlZSyB0VXYS/64qlgoQU6omqmEQRU62igYRBoVRGZYGmnGIqiKhNXuG5mJo/Or0AX3fd93y9NxSSZmcMfsUSHSbwH0Dz093icqDSnRNAg+0PEeIjo6uqJuXZ2XfZWFVCRTLzuderAsPaY7moqkOvVRm7buaqiMsFZGVFKVR5phjjE5RasVu223P/1xdTkYVUpmapTFSXSkhrBKGzbOSKqXuu8+qc6tOKUIWpEZ8eqiO5coLfLuppqhrGG6GJq3j9vNtGi188KU9VKXAHU5WONzDnn0QLbUShoz/BFTW4V9uknNyaXBTp4ez9cT8v004n6hcvlb/8X/6/z/+rf/+yv/RLuni2XtSWCVtCkAFAJRbvb0fN5UzF9VEoLsW1bT9iqKiNyBlVCchSE3FFbTGQFK4T9qBOx1irD9LTe3tzcteTVRV3U3d1dhJmpbqpqOlScJUduo3LPc1feEWUmJp1poyUQU4pG5cyggIKdSdKEonT3XllAvZ0218vm9dXVSRfXp8chcezhGJaT6bp/7R39A9/3m19856N/78efvPXJF9tDIb3KC4FKoaQYFeqPW9hHMtpxORXdfdjR7qDqeABna4HtcRv9+M4vPgTVELXmkvajfUBZNRGXmBgyXS7KUFSVxod+JYM128NyaHhFVb1pB5mtohcxHGLaVld5q6NJmnuyDph2VUUuYs2YH1dt46N0iFfXKfrVXlOc8iDlspkkRiCqpeDVfAkwuxA5YB1tw4l2YcjVYNZasMx8FMUcPSy0quacVdXXqqr2xdo/Zw8DeyQm18y3x7v1+hmZJF0woKu6lhgMWQapalxw+fVw6z8mV0qPqpsNQK8nG6ytotaP6uPQq+p6rNXgElVyGMVClOtp6NDrj6QiIlkKFBCq0exDQtufQWZmZVaVNrAYRJQWHWKsNg5ocVDEFE2WeP1iRVyMyBk5A5HD7MnpptVb0UnoAprStExSMRFLwiksiWSreR/2+c6rF0uJFRKcrF3Y7YpktRWGDV4XO27Da1hcE0N4Vef0wVIRuDqGI8Igc9+Bitzb/DHMl2Xp6215cothk2jurBU0ZaFHRzGaqpu6QaUvFTyGx8uBsz7eDtezVAiXve7CCpwuvWlNMJvhQfTDHnX1XMGGWt9KlWDHEYg9eupaYq2HXbYLBSQOwamZtY27ruB63RaliBHWC/nLTjJvRiiq4/RMTdQKg7KWgOkC07axl7Xm4EA/P4q2jy+SXXtpZ873BEx4zXGt1kyq0EEj9OBSiZkABZSZqB6DSvbWU47sXoh0N91fizmzSFYmsjiD5DJOe/ESCRGQjuukkWz97OvnfVbNyH3WjJrB6BCt2IvZlgcoxZJSFFLGGCbSaCUFmzvf46+v/7LW9R+ld2+qVJmoike3wOMVGRF9pqj6ed+2OdW9OgjFHB3AAqXY9ZEPoTpt8+cse/eBLxQcppUmGlssFVPGLW372Lf6n/0zn/nlf/nk6Xrv5w6VK3AHNiYBK3hzoUUex1a9zyip1VzMLjGX0wlZEuXuhfQtd0PM8p2XuU/t5cjxikTEl1HgnHPbNjZAQ0REelXTZ+UYQxXtQHisIo+FCjBrUjlUUCXC02khs1xTMFnnzEvVxgxFK4bXdYzRamG2TWPufPyS6zOpCyxc68XHBzMAE9k26lCdC07r53//H5h/5S9/NObl9u2qUJaCCkHD63qJ3CdoNzRXRS5Jgzy8um+5lgK9X2iDeVvgjgezaRuWlGCkQVy0/1lVQlxULoLJjv6kgytzYY62vX3o19e3Ja8fBn3DP77Sw2tHWOeVXx8Ys0d51a4pFXam9FGvzMvWz/KXL18+zjwBPN5Kx/NY6vH/kodK3WAiMosdrMjr5LKk2kl9jFuy2FYFvD5S+laqwpwNoLFW5OUlGJzEXhSx3tiJSNbMmqexuDuz5Kpg6kAbAgdiSK6WbtM9o1R2zjIJBTE7rUitp+EuMCmTqNf2lcOymECVa3tzl9RRECIgO8r9QNxnZstWOoQxYu+wUbQsJsESleXQxhceKwa5epAe65LrG9KqT76u9j4kvnmsMuVxvcLsR1Gp6DKmcJtzsnZEDLy2ZR8cov5Yjfm6Zm20k7sDOqAOUSo7luPggLKqlwO4lrxH6cKrj/nxhbQkODPdFlLmzDFG/34XTYFZ42xFsmLfgcIw4NDcmVlEkDKsHwdWalGYyT4hZ+X9ZTuWgTwEvNfqU0haElln48Xhhbs6iMj/2tt4fd8Nh+3rML4CCmiGRIFiFCuBKCEhnDzEuV+38Xy83ymtqRI1EYcAFciQ5DGXp4Mmau3ulo6wUR2emeuyrOva37TXEl0sf11dIzSFibKCV5r049laHzrsyOu2nOyY9C3mFkf1bUIfuq5rR1heD1EU2Dcyr6ipQ5of2RqWMYYQW2x9KdTVg9sNCg7JGFzR5UyTv/rnfOyGRZkmrEJWChKiHaTrNsbaxV3fA22ZHW3oFKQiFKEoO1JSp2JK3sd+rpmiqRaQLbeqGMP6su5evF/4Ze5vvPEGVKIIsc4Ioh6bNl6VtST7FFvrTMr0mzverZvCtPYzb+1h6EnAetDT+hs/+AP3//K3Pvvj/2j92Mc3x66ktbjLupP2Vm13p1iPn2ZRwKxt28Q0k+1JQFZEPFkWVQ3I6uPOHYvuhlVGc9Ii9orsLrDXjS1EigiRIxQZ19TIvpZ8qA9VBRt3C6zrMGFWz581a6qVpWAPLVpyUGSmTlhh2+a2bZfL+ZAywdx9WZae4hytRh3zoojIK2GUciT9AjCzE09pQtviftM/+UP1Sr7hX/7z5cmtA6mAyio2KAnuOMCxxy1AfayQ6tpZ94XXL1APf2c3JFC2o/UYbAzXjP3IrK3qU6zPiz2jQGXdqDKmZS5ZFgJoXK+3UKRAMqxSUXE18fedYkfmd1XkdXby+kzvW8vMilFAe2xUdbizqsVlkBpXa2ZdVQI4Zs5tQKoeISgwhSGqhNNKJRVe4HEW24cPhO6MlzGWZVGDovpXxf6om2vyT/+pYI9/XaikZHC29/uqBeuPozfZfVMvy6JX/+VjcQnAIQpI0UVFOMyFUB7spMeVQddXVWWKddhhfyiasA0aRulExYlKSEuxFI2JbEnpa3yCmLbn+Hw+R/QKy6hWhZT2RJUlvQ5UTh6YVzw+uq7VlcrwrkQna+eckpOR8rqIuR6zB+6GiYqYczJrcV/HWHx06WOEg8YylrErcpkuZXJYzCsyAnuM4M6slpv1FI2kCNzuTjd99doVoISiwur1/PloRgEYpA9nkgbLWaT4urRmUN1Jrj4MR1SUCOPh4gWnYKaIlHFqbphX3N7rGYmqd9vWxN9rGdEMnyIZQ2m6TK6ToropIyoYSpiomj3G58ykqvcF8Ch3oIoOt2V0aXg8aB8/4tcV0mvrcGb2ck157QGWKdZm9LZhuQlMJi1oEYzM2eVwhXAKAZ1Rl20CgMpl32dU4rFQpQjN5HV1/AhwgXaeaKG5b8dqudu7yQpwF/FldGmsj4DDanfKVJQSKGEigyRLqkyCVdc+sqHWk4Xeo2Uysi0+/QB7XX/hwEEDYPcHUgkGj3/OmgAWILRKahGsQGpRZRV79XC57FHXu/T1kVo1eFyUPTYY4JKYuevooaiwpAowdXcTPS3LsvrjE6izGZD17Nmzm/W225E+uCPKrs73fjklFZITkeOtsaxxeT/w6t5iRw0djLHsPhEyPF6+996TJ1/7n/zFj/yDv/etv/45nEZLnFzUKU6DWFxl70pI0SmLiomiCBWlvvHkGUlR7ZXb4mOX4h6ZeSEZWe2uydr2/V87pq/djERUV0jL6gAiKqJ6jNPDD1vs8RhVWzPTXd014whFnjWRhYTTrFRhR+2dOcYqvY3vdyz3ObesOcboUrJTQFB0tcfp4usR6lE+ChcVTrkZ9vJe3/rkV3/f9y8//t9+Yr7Q5ZSi1za+54OMK7GhqqjHFiaTABL0McS03cFjjB7sQ3tcqSJybAy7B3ITUyKrJ1cCNcyMJSiHSoIXqZdWzxd5dbKKR/7M40vIejwHEuheVgUNWOhP5DDaPh4XbAUNgSG4WU8mEhGtG79sTTtAxL6uay9Wj9ITLb1UUnqmfiV/cWZev7F+uJ8gqVRkWR0dZIuN9zln7o+xLo8n5rEJOoYKeKx6TXTWnIhgmerqZjiKp2ZZvD7ZzVTV1bqjeHyGPXaHxz/dhDz58MwlD95FQaMam4NjmHeU6SSzyRtHKd/0FcEUbsyAKNVK55xzzp7BChXAHrHN2R3VHnmFHVW7AxaoiVCkHGnsfc0qnZlZj01ZUprDtcoYND8en9I80fYpKI8bWYr2uFsBhq+LLcNMos4vXmGbMmcxcGWA4Kri6UqiH+QKoYqajTFul3Wy9jq2k8fhT0rxtI7Fbc6ZmWZj8bWLmDp+Cx/pFkSSOTOkscAiDRTZ9z158FVLpUzKxIabyAJ195ubGxXpuAghIgLKbT9XTkF1rnnXHED15acQE+/h0wFSpK1lKtwHNy8BvDtloVZqZVV1IL3CxG2L2XAqVageLPs+mXFsVBtwhrw2x/3NSJGS3jR114vEdRyVSrEAFXInelOoqlT4WGtoKhLZ1+4iZkns0W14v/EUE3Mdi16zReXrF6KZ020R0xKNHjb1CBf9+FMRa+1+HixhiXmgN0QM2ZF/EtuOSoWsdsxzutQ9LQ6RjlftRaOqiluCrdR9841nkpXb7u4Q8dulW7EPV8G9U+mm/F9bFVAkhEYs0DGGj7FALVmgDRdru/0VPdOkIRVo15CS2sZ2pORwN1ZFOG3Y0vb/dh9t2xYRTdiPSpLBWtf1K1/80vPnz0/LamYZkZmLuWmfehChWGc/VCDj4ZIVJxus4Wke+0UnZ0HmBeaXuKjc7eOdb/ue+x/+vU9+5qee+WnpLVrxQkY73ev4BE3VIUPU1RRgNsKK5w9exHnuWTszM7V4zunut77MiHutG9jbspbrcnPiVYo21DoGE0DlEU48Y2tvol6/5EpfEjlSrVWdVFLaY+06REbMUpXdEEN3l1c1zxUbane7qMRkpV6r1KPmjdj2iH3fYx46wf6LWsfQOa+PF3BfvYMv1jpl1mU9Sc0v/fD3P/zDn/rI536b6ypUJLKt86AJYhwVUl5b/C4ixbSZRwUspzUqC9B2K6o0Qu6xDJJiRrx6eJiZD5eLiHR203I6RVWA5RqglXJLL7WJm01KbehYxR5/DUEKGhTj+frxzOtuSPspdbgKj6Ec5NB55D7nZetycD+OUetAXHc/n8+q2tOFo0ekSh1HT1WrW3H8NUXWwSfS4oBimFzJkSC12AXRYxDntm19Nu0Rs2HnHyov+kg5+tEKMnW4GJBhUawQQ4Nx+pX2o3rf96rKffZFqAR4UJ9UdXe5oPYhD7VtwnNcSpqdOeo4ka0pnioHdHrOucfsOLKqiipKTRSv+1pNohjC2RVd8fEZI+YUlGIGIMN0uC1Vtc29KswkwKgSclAGxZrmtTzSrqTfcrLpkLJlTFapwa0OVzcxjr+OJK59p4iIcgqn8L5mCuia4HKznG5vAuztbwoCDGEqmqvg4ELRYrKmMMDZESPAFO6ozqmUKNni+fPnIsdY+Dpe0iGjjhjzw66N69BFtcP0SlARu5lBKS6LGqp8GVvxEik2ctYi41LxsG8E3F0ALQ6BFha3ZgpIpYHWx/nrzYggr+J7ACinkYxKgzglBaHQsZjZULOuNq6a3Fm9amJJdW9GZs+5ZpwzJw8tY6s6RhcZx7Q1q6eAwVI3pX7Y5KnUIDJF74mLqKsJi5iqMNEh7hCt9KG0ox4JZOph/F3EpBKWw+PITptTMJaEi3qdqmDlA+oDxdDUUd6655CMmmCeSm6SS6TEpBw5u0AHN8aWETYCNkUCBGoydDGSlyol1rEUA1KSsVA85FaWBYvC3n/5anehqc5aVCiwxYIx55yZe2Wp2OqlhJqnLAkveGGYqw8tUfCNcapgXwdOG+5nTGMhpkEWW9z9zTffWNQWHcSaNBN38M0nN8Ma724bJxOmYyoiL2500ZX+1O+idDvvug4Amtyz1AZQsvh6us2kFqUSqJ4v7cWZOndhouYuyQULHZXbrIQBMqEuZeLJBPS8C96Mu0J8+Wu/8eqP/pnbu+Xtf/DP8LE35HxZZH8iSwSVAdlJYZWBCsa+NXBHfen2axdiyM3NauKLLZHidgrVXZNSA8tU30QXHejMzEp1CwaVkDIzX0VZqWAiwBtfSkrBJcsKcOMwiIwx1tMgpjqy8HDJuXPmnrlBigHugZhWOSo85wosMZdItQQTs1NeJGVGJWCdgbMMe2RnReUlZkAKi8qpGESqreKnFFxw61oQWel8553lO37gy9/9bZ/+x38fYpf5YhWRXCo2lVIMT7Pr0H5miClJKCeLXvAai0nGaAt0F3w7hNqP45JKTFiRMzGKJiKQWLRQ+3a+F9ImbCcmNzJNgBqWamEIMCP5+KsKj3KKFC2pUeUgpSi1qKzqUTVrFhDFUtuyktTKhblpAvVQMYIfX27evL1dUbvNmcxZq/q6uK++c4YkfJTKnrtUrgbXEpdWDnP2lIBuB4p2y7NpYRgd6dxRKdrU96UhNjCo73FUw92sKyj9MFAzX6ogkiazZTg9IVTVXuhmJlVYghKWRXUD4ywRKkqUmpBL1tYP4+QdTjd+i8lT2g1GJWz4aTgsW2AiJQypKZU6d4ouc8/V1BSZmZRMosR15CzOGILKZCSKAttR0ypiF1SwMniqZaGvYsVYTa3SzNaxOEx9eHtlyypVilZ1NVpARIrBCiNawyiNAEJp0iGL2qJihcvLc6gGZBKTRTWxlViZ64mQShctSlJE/RJ82NNEmdj36BW7sQZ0pcJlY14kSg/IVM4NSl+dTM3dq4aOKJ025s1Sxl4wUSWRUKbMB92kpNUSrZkVEXU3G3lAZorQ0nFJ1JEVJMPciDvxJ7YIU4SRFy+spirMK/9YqAuNOsRXjhHCBNUwRLXSkQ2L2ZkpChkVIvRdZ0l5P2GVImI6GlO1AVO1J9ilAuWiRjWYE0ogklmHXifTIUNgJFmRuX3kraeRF/bjuWZqlTFrapHb3HRSU4AqRA/Zu0hTtPe0c9Ok7+X+vlmZW4/J1HCkrOiHLD2iwn19+27N+90Xf1m5wBfUvsa+J93NJC5nEfGlbSomBRMRE7RfS1uBGXrVyPCIAZACo+ZwbzFkG9IyU4HMcoopxLzAYFyYIqXqzgOKaxCyLqgosR0o5pw3p7s5Z2bM2E9PnzJTN0bzdTtqufmChz0pHnfGhRYHmEKGeU9Kz+ftsu855/DVxlxF5pzmNmcKrPM53ua4Z2yIBbrCUnBhglgrTUjVyKiGkItUVaA0oTziyXu255ml4u4N+J7zYtbrB3YO1aM8hCQgVUWYTojbq/3h9kaXNz/6y1/4yvd/9+/6xN/5uw+/+1u+evMEisvYFimpJTiIMkVUkSkuwZxVoGiv7lSqao8ZUT3X86rO/WbUcD3G+ZXp2Q3S4zixKA23cRzxtyQuc6dpad2cbvYMkMOMooxUtXG6yWT3q73MKAHoLdPd58YQMYfI3q5ucwFEdT2Nbc6JUldEQXBY8GIu46QqPDbKqIrmOy5uGeEVJBcDzWZebHghHxLLwvoDP1h/42++/Se/NG/fPuPeaeKmupB8osvGaa4WcLH9fFHVRC3uQs3M0/C6jivnnKaDfNUXz83Nk4h4eLioGguZ22kdqkts+yShTor4aZe9h4qjhVEmSaZQ1SD1iMwBwN5296JHy8QPOwqPxVsfeW04WXxkHA6ZmjEZKtxWuwmbVa9yt51no4k2vTqB0ShSGCb3eU41MxvmxXjUmADooJSqUpXMhGAZp8ys9iwU3WzOaWYPl8uyLHPuxxLiUIewZ5JSnc5ThWjhQzUiB2hJipAFrjdrZu4Ro5CKVFOUdTbhsZcutTY60U3MDn9d1ixpoG5To5ViBZxarJABRWbe3N7MSgIbUxe/xK5Cgy3my+KXy0NlQIoqE5ShVdAiEK5S4kfqNQBIaiXppkrdYvY6Wc2iAhGirpXCasFJCQRinK2jJqDQVpI5jPboypN20reU30z3SJLDtckIPWBDI9ZxGHJbYI/DuEyAj7JQJKBlqkIXZs/G3NXddlpUaZtQxBp1dwx9K5gH6SFzPw7q4xAAixCqajDnnGpK9RPG1AoIASNaaVngXNQLLLkoRWRAadwkTbSp3WUyhe2AT9AijrbygNkqtAj3lFKprBsbSGbGYlaVj9b562qskpvATEsEHXWBjgMgwHKzjBJAVW9v1h7GVJWZR4S4xSw3A+QrX3vXdZ0skm5mdZUmkmVinbcqlRBW6HWZJceQhKpUg8G0rjtaqJNU2Gru6M0S2sTSo2OH6LK+ernvxX1ByEv6+SyoWpzltJqu/uZYnuWUCoG5oFxxQBkpSSGU8GNj0c48lLvbId/PFu/02aHUYe5qEJkR5+0yM6Fqwwvc59zn3DNItvQ6TfaKuGwPDw8CO5+3fQ+DuS+Xh4ecO7RXqlDCC9brHIWYH8FwyIYmRkQVGFlVLfKr1hqow9RgZkuHm+77nkhqUudZqkzULAfC6ohFM900q5VgbsNkVT+JrCXurm4997juMLjYsbNsN0gvz8YYKk5+aJ/3Wg2nIrLYEhAdyNxzz+XurX+Gxd97/snP/Qt7YpH2ESEjQwxjXovTq5QfHGOYaitIGz1QFGiXaLFzltRlXmCw1adwfXab4yrUhGXwSF5SLVFm83Yox3inV7DSm2AGGXSKifZ2x8m+AER5NT2L2VDQ1VQ7vU7MhpmJsNEWMM2qQxh8DDzVbanHzHEcqUQL1EHmzMxSeeC+ec1Fg3GxCpbE/jR8PH/5tT/0g1/D9sY//vs3pzeUsMwEJoyk5DEuripUmOpqfmoXUNU6xqMwBIDbkpl+ZEjrq1evLpfd3ZtD030Vsio1aVm6TyahsDb1hmQgexOkHG72uN08VJMwYwNlRLKYGaxMVnX7fc2eAlV9XW9U3WBKpQrUgxUQktOwAQnZTSFmhEIYedljj0wwRSekcFgwD7ZilR3o/NemryOMFph7kox94hBVRUScbm+gkqx2lPVG6dgBFxWGBuU/luYCwZArMlg+JEoSkVQIsWQpkYo0MeGpKJWNN2/TjkMEkKxCMgOVXUCQTNbDPgWVsdvVT7LFzE4gSEHWaVk7LYbAlrEzUQlgss5z58HEPozw3Vcd77oyFXvtl7kby12T4Wr7vpeA1fY9dYEqUrTEm7kBaouJ8hqQoywnq46dTmbu0Ux8q2q48PUoyJLKHmKFakqTQRlgtiX02BzE46qoxw9ZpSlWqoQeqBaBSkGQh6y9jiTxIrMq3Bf3RdVVfYzVdVQijzSa4x3uu1JVE9yyTPwGJhmX2qdCVQdNYSlaHaPNbH+UtDBbtHG8JHEdFLfbLa/S1DarKJCuvQeESGQG2IP6LBRYhfbj9aYAr78Oh44I1WDC2GfMDRlSqSjmxKEYL5EjM6bILMydBecVfXMcxKptZGdZEAkqyyL8Rb6zc+uK89gWiLgonNs+68j20KpwHJLJHQSuwBcKwQUWQBCL1sPz0tIH8jKwVgG+bRddhkzBDh2liY/aR7/y8I5Up0CiQ21LUAVp4aJ4sazoPATMCSGggIdWAqSXBWMWRVCk5xHd0HdvKjCBootSMUET8Y4LhSpVOh9qmKAG/S3/1JG4yXbRyWbQEiGCZdaZ3kear4mWpIjUVTlVVSJahca5q3gGKUamGshKjlEpM1IB1VVdqPsewirhleqgCYQImr2skq7mY3RMH7ij0bWSOc2lKhuUago0mqT1UTjKNyUUvFQo7In7Jilu+6uH07d+83vf87u+8e/90/vf+Z2/7c/uLyV3T9eY3LeJ5eZ0iogZuYwx55RMJRe3gsQsdrqGmfYooJTKmFOhvepvg5PAHntxEh3MJ1eSKIgE0WFeO6jM+WDDV291PTsJLCnaDrejmmjPZUnlkJbM16KNsgsR8cUqEqb7nKpKZc4YZjNC7Qoqj2iTsTLQOtJir7qfLKdX54tRuVUhb0QyJQFfPF5tD5/45Ksf+H3f8Pf//vv//T/9arm7uewpqskSbHJQ2dy9IkS45+6+mOrMHcDDw8OnPvWpZVl++7d/e1mWWRlTTW8i9yqagcz1ZB3JXlU7RVTaDk5LZhxIqyOGFZ2DoiKMpAr5elFKNqmnkfKl4vL4DuIIPFfRzKRy3y/I2DLbbmtmd7JyYoJLUEFAbs8d+x2w4wQ4wiWUrBJIUioOcwV42CgydlVUoSmJ/QRqJPtHPvLW8/ffhxyBCtHEOn7IWNxIliKBBMiCHa2bqrW6zZq8Ydbn5rZtvDrjKdhERFqPfpAHznoAQo6nlzRAhpaZma1kJmqYi0kQbXc2k0bV7rH3G+hYc9IWGWaXZD+YRZB6vC29+wQJAVVUNPZIqTHGEI2IqEOIO2wtlZxlxCXT7EAPNierR04HIo7VNsx+RLCEzBAzgpHlogZIPc5BqkrcVJWRppqZy7K0TDwevwlwTcMBxI5egmFqIhZ1BLipDgpUqo/ZnMGEqFg13rM5TClHCgK75SVpZvt+EelYmXHoaomqomD4cPPKucooyoWztAZkNW06hc8KEzPzZESElYgsZVEESotmWh1UIVLk9B5vGLr4O84/sgHaqrOy24moCROEgFpHiMPVZ6Xgh4RaOCK5jJLLMqqOEMyYRXJZljHGq4cHEQOzP3dVtaHJWCBbZqkmSsW6FVYqlamkoUQq4H/lvf/za+vgYwXQubPHqhxHFsgVpnbcHzye2uih9CFdMhESLEIv0o9nALjv39abHuID4oqg+jDEh/1/H7/lh/4fj/8rx98lVyH1o2DyqsT++m/5+utIX5DHl3T8U0QA/s/e/Pc/Jh9rxf/WKWrEKWW3pth3cLqqahFWMlluC4C8BuiZOQhFZMoyRlFUJSJAgFykpkrL4JJ8OQqATC6qpdK0NFRpqrUUjGg2Sc9u2U9YEylrA4+I1LHqZ2aa9ychODT30s8+aIiZccy5B3f4yTjP95cP/uyffva3f+wzf+snv/gX/4JjqGTuvg0dpdvlAhHBkWqy2PLy5UuVoSqmCtGqEG0WPMVt7ruPkbMu513Vz/cXKTZttZl7AmVGiVShkyK7mQMFig7ghCKqNFMBhVRVCiiYVDNjJZmGsn5fmIfA4dGx1yRHsjNtshW8199jwxtC2SVUGwMyw8w2wJTMQqlAprCERTyBIeo8RobcS5racjm/+4N/5Bt/8ife/vmf+vIf/OP7qwtOIzEh5mTnP5Ds4IPKChCVbpI5l8W/+tWvtuP5srcrWbsLHcNEZN8vInT3WdTjUSFVoaI3J79cLnu1d8cHD2flRCbStBfG8vVXulwT1JXzqmyHkEgVwAwdgsaIXQVqAA6P1lQ6ZZqAmlUXsxOlTCoKV5qSEspqf+VOElKZ7m6mEYfbuA7WjT5KkXt2lbl96EdVkuwcR2YLdVFHZ3v8SLRiWQfoQh9lnAHC1EQSmY0bUC0UiZ7INxyRQhZEpAPARbTx4ziadc8qqe7Zha1mZ0KZydu7257cmtndGJfLJSJqQaBe3t+PMYYaI2/MydxBFofZup72fT+0rmycjpP5sG8mvq4rpR5e3d/d3Z18eXl5MNGacRrLBA1EBTGi+dU4NOEJ1woREVb7TYvClMkO9gkmVQ8cepFm0gDYuo4HtGd4pJXwSjw6OC2d9oFSbfihsLqxFipnFaRERRv7lqkUg4JgdX6cEMKiqriOyPbpUg81N9j4fe341P4Jq6oSSYqWFAoCM6tog2K6LVHliaGSJlkYQYCb9KOVj8f+9QSQNSkdoydSijYRkFxLXNEgF19P67ren1/tEapOpFyhdR/6Vnq9AzsToknUkgfnq/raUdWZTEYVVG2Y7XP2MGbGZmqs44/j+pDqCetSlCpVEWg28uxPPfkffcQ/KcJkEzsAkYWMgqgFqx5tGhQ/aO8KUuVIBHNoWiEpc/zz9/D2m29+6mb5xd/63J/4PW/Fpu988N7qa15CbOyoUgxbpKKLr845KT4+EEPVccTFE0dtpaHNH+RqYzJv11PMvVQ6MDyY7ZicmR0Hq6oDCiANJLzg7nS9XB5MjUEAOqyK7+xf/G/Of+3CyxwySoS1oaywQMXUQLqhURpkdf2Ys1SSpbCqNKgc2rFKGMlqwX0cSB0VeqKk1zGQKL8kjDT1WVjM2AF9TMUEq+rGhwJjVkUESIGJavVADqqImD3GXH30eXckOeKAaBgALajKnOrrhfnW6Y17grws+/yq25O/+D/85r/6N7799/2eX/wd33L78nKxu90WJSLmsixqtu9pw3UZNK3r1keEYibCitizxI2iAaFrZrpJoaku7AdMX+D12pDOAl2tSM3sDCxVLUY/RWamazP1tHeH132VdmwTRKpqy2otcdSsKl8GgMxqr7i4FRNZY4ye9/bG8Tj1P2QbW0NFuG1TMB72+87mNF13RkmFUKekBWz4B++ev/07n3/yGz720z9z9wN/7NWAswYrMFi7jtHriS4NbYwks1rl0R0ou/vpn2GseX7YzEZVDzl12ztQZAdpBwQ7M6eaFcOwABDVIBXFXq0JOj3ow4VmF6HX1RKvsy86oSIt3C2ycKjGkhxmEXtTIFBRagZtykLHcIJcTYu1N6aKOFCTclTVj94HQKNyXD2gnST72BVFxNDx/L3nZqMENSeAsYx2ix59/NW42CedaOGqyuyzG+huT0jOGT3fVpEsqHpaCtWrjRVHN21sFcWHN+UsUo2mztl7NUqXAJFZabIUNApk2VWMLVf2bKtDREzcZpUo6qo37u1JjwgKNB8RM2qqqkNyn1RZxxByz2BWr1EPKAygTWaHKEoBA0u0xMAgFYC1IJuN4FYzSh0MIFLJ3vYXpAuTA48/Y+sLoq4vv6fZAPQYCcjhFmbWFaAlHf+CUh4/JOvoELPvS1JUAFNkj1vNBVmqKOR19UAyvbzFyCIyxFicOVV1w1x0DLAqJ0vdB0UrQ6SDZtubEU2PL7FFUVKZwSg9Nl2iptkwziPtL7OUTHCP7bTeEojMebncPzzAoHr0B831OxqGD11gPYroBNh+KkcdweR9qTcGZJ/5aN4wMyKz5rJ4Zu5FmCyKBichuyeXBy0hVKhgijqAt/wTH/PPXM/HbL24sSqh6lTp+R4FKs4KZDXE0cy6JU+k8+E+3/pbX3rrc3giv/XwJ7/3W/78H/2+z9irX/iVr+zzO8P9E8/efTq2+80vEyfdTUe3+Y8rjR5iMOfwde+YIVRVLeaH0BGCygGfUgsWyEyV5ToBNrOsCquZUchRg8p28RvLzJd1lcXv88XivtyNZVnef/6B9Vr1LDzSakULAyIFKHYhInkgmBQ8HiiqChMcs2Ntnu6eOykLTlQyjqWIuQxHZs6Tx0QRMF0WPxWDdWGUAExvuxhEslwh0D3LpZMwm8MPoVQEzc1l7lNEtm07jaUqVTWLIngc/AIloqK6R7k45wyL+/v7OW6IeLos/uL5l//wDy//4J985sf+9m/+L//XL1/M29MZL5BrrTenqmqBTES8dz778COvKQ/qqarBhxBeZe5zzt7ro4ncRXVRkdlH/NfTGVVViqFAFQtSGQnMudzcrmNc2sAWNIOJBquNxS4q1GL7vKFuEaHgGIY8QBONJ2u7XtVx+jQZb/H2HUo7wgEM08q8NzqqBgzGqqFApTDOgKLG3IvQRXdyZD6zm3f/6J988yd+9KO/9gsvvvGz/rDfQF5RNy3s+xG00ACjtq+Yzcyqcl8EkmR7oEXEKm/W9bzNMbwfBj3wd3egeTji7nPO82UHdEUUJFX2TiotrGoD6sa2179+Avcgt6+HngAK9CjFD0AsCIVBS68uKfVFCYGlM1T8mHgD5I6oSKf0RqvNtFVRYLIWXbrACJaU9DeHqqs356RPqzpQfKLQHsJ3uzDGmJe5LkvhyIASCBzsYvcKJu9PE5AkRZnFm/W099IT1aHZmZQSFy2rNIAc1WK0CoUlRcSu71UJhKWVNIcQEArMx1DNlGFGyrZtIpKJiNr3XYRu5lWichrreUayVG3PubhbaUlFVc39UDkIhKicRuR1VyoipdrOq41zuOUsGR4sFaZUQQTs7InqvFCBVYWaEEey01GKKpUsI0JFsliEiQlIdlxIPT6A205GssSPbaegB7gdyKZiyd3MMoM11DQzRBPXsV2mUgUkRCKCakATvnvOqT25q9hstL9/qiAzx1ifPr07v3wlV05cN0t7BsHTcBq2mSSH6NNxisrLvjt8KqP2QQFl9gQQdgWME8Awj64IodGP/57BVvfYAfJ0exvFrQJQcxdSrRviUkCPjBNhvebqV/XK4yp3ghpgB/oOEftj+d6TeRFEzHVdZ0cQ2nKEU+mRWQ4WCVGQuZYqC9kZaOIkJitYDsURj0BtKKigWAu8h4XMglFVrWc7FJJGqQqivnr7e362nt6+cf9vGJ+/8m+881/+ovxHP/uu49nXvvLOF967X5/d/uFvefLnvgNP7J337O5UIuJgIvO6QnARWYYvy4ptzsqiQolhNcUFCjn6PMicc6ioaqmKacURmirFISiYJqeBbieIEVH5MLfazouqqyhrPz8MU1450law5MVI46nERRPwZBEpnY5pjy2OQiHe5TxYYVfpqXry3G1z9f6A6j5y20xS1IQqyRSeTcjSvcxPk5WZj9Azdzut6/sfvNjdxCjAKNUqM93taGyWxeecq49+nrHi8JbJIWIUVGlJiWGk5ol1RtZwlCzLuimXfYM++8q/++999K/+p9/xEz/+Kz/0J+L5+3Z7gtR+2VYfw0fMaWZiDlSmUil2YCZwTHcNyMi9TCN3qthA1DTT9mC46KMbODOlyE5wrVIdYygXWblQADK2y4NU2y6rGVUR0vk2pFk/qASqRSpkQAXUYo+/qqSir9Pc991MFvdWaBcwt92Gi0lz6110XcfDw8NaEnM3qCBbcA4wK93WlA1ILHoLnfvcTrcP5w+e/9AfefZf/LXP/MQ//tK/9zsDfN9TMm+JC1DRwXBHGrkpYo+Gl0X0gASAutucm8oo0mxU1czdTIphi9QUtyPwwMwgllnLslzmvdJNFi+4WFVRLUWAyEo+Mn8BqhBa7LmJeJJGNtA1G2BDRUN5ZWYBwuLdzc32cEZhUUXBgAQjE8CiKqZ77lQPkJUOUehQSyFJ66F3VWNrg9nNbluiAdzd3b18+bIqluUUlwkRgskyX7KnFDOgLWAXkki0nEhEUYe6lSIiGkkKUnLWBEpV93lZfYjIyXyPSC2lGFUg1DpoTcxQcbXocZtI93+pCaYrTL3QD0WRoi0DosESEXW77FvXEAExF/exxewrmZGL+Uk9s1SRgi2nuyMBogdgLY5DJyNRWxA7VCeTJao6WaaorFRAfUgZqgQBB9RIrxnNG6+rUiiLBwTfiig5apdeobt4PPIHTUswfM2eFYlDYBCiKIamZV2PwdPNen64RNJgM7fFtVimBsqcPQ9XM5kZTa/rMklEFFYiRbj6ML/sm9uyrmsVzudNNQEsY+QhYr3a7k13lERJlJvtGc/vHxQ1xogqT7LT2qmeVVAMzW0zExdR1XVZL1sTX1K9oZjo1XuKhEoBtu37Npcxsi/RokKGerXVSO0YDEhVuzMUcpSeR8qKKqEmFT0LMZMxVpKZeYlyVzMlPSIEGOYPD5dlWYxZkKyqxu0ACpHi7jaoktXCSgfYiru9UtUIGM1os87aM9UjI9PVW1OmD44bylhSz/Xg9a5898+9/+yf/OIX890v7nF3rvvf/ZnbX/3Vd//az3zO3nrrm263b/j2j373m5/59V9758d//v2f++Dmf/rdH//s7de+Fmp+66Eq844a4kiZznMhYjdXmymMEsy5qY0jwkw5sWuxaWcSKJ3MaAlJVbn7nogMW9VTFl2ztrtnt++/eH7SU1EoexRnPwp0AMAEwQlOhBEqksqCidgExAyRAprL7Xp7uVxmhJgUNnUtBsUFomqFJJOifXbYAQXCOWecTFMPK6YgWArLFHFfMjtMLLoScs3a7u/vbVlUJfY51GBIQRQBPemB+8qk31inM5i1ExrFTWEquthaOTmnDGXWfdXAqUTUJonc87z46fyF+W3f8s6bn/yGH/mR59/xez731psffXjYdTyMpWoiqS6RJ9gLyHKibBECPH365OX9q64YTqcl9hJIPy1YNPfFdd93EW0L58xwNTeTIgTphqjhzlmpIikPtXsTUQ05YarJBtdSXDjLzdV0y6gqc6vKjFAdATFxDWhiWuu+CixjqQlUZ6aazUxVnauBcKoISnjOub2aJ7GZAbq698Je0nV4VrmEhqcIgD01VbXy8hD2iU9/8If+4Mf+yU+99cM//NWPPvGzngYimYSI7nuoelQSFCkiP/WxT7/3/vuXy8Xcm3yMKKXSskBTzxmu4/bmdNm3IsWOVCjtaHTW6lYx1QyFIpVK19U1MCuLovH1U2gXVxWotScYXkpj6t6bVRURT2AiV0CBAp16Pp8LXHiwlJtRrAfJIdXEdI1tf3pa1pvTu++/L2PZmX2SJw4taOf1mjiDoWRwFMzs/vkLqIjY+bzB9Mac+04TcFp7QBfrMW9QOl9UTdnheDK0aLDqobGbsnMdproRYn6iaEXKoiUqNUUOpxtF5ejgVMUjA0o1Z1ZmNcUGzACS7a2SUmnVknlpAcTNspxsPGw7Sqp42fdGNCB4cxolIYZglI7mxb519zQv+2W/6PBZQTLUtMiSUglOh5fKq8u2+BBnZjY6SlWf+IgoU9svG4B1sahorOI4tuCtQKW1QFJEMUFUQU0VzNp1XS97FOXUUingAkI5VN9+8uSLD/eD4qkibm4NOxSRIUvRHu63JDE48+Hkg3v1Fv/m7kax9TMGxbu7u7mdTRWqIlpVSgp0brvc+h4J6GlZTHWbZ7XY5z6g+36B20S1j9kgRjHOKNEjURyMUNfYNxdPCNz2mdqy6UqbJb50ulSyYn/wYRVTOaQsbWNKJlYG6xmW+WY+4+0Dar2EYHkxVHLeCHYRuZd6A+sb4/b5q/u6ycw7ZlGeLxgqNqOZ3YIj3S0NlpKTxSxFqCqzfACBPYMqpZASJIcOTBb49Okb733w/hiDJd0JTOSYhSNlMHSmotetVQqCaQpBqExvGYhJuZZrKqTEk+M+T2saTc4Vtx/7nP4bP/qv9Md+6Rfee/7y/OwZP37rd+N3fmr5p+++J+tYHx7uX7z/D3/iV3/mp7/2ez/72d//HW9+9fn4j386fn1+4tOnRbZ3z8Lpy9mX3aosF3JQMLP2WUGFrbosMgaPJZDZMTPuFKNEHgTmKlcM94ywpqZlmUlxN5PL5dJD09NpeSTbtaKqBwUCOUEGZdDsIIyz4avIWNexLMuMuGwbOla2qhJWCrH+u2blFcqv9aHTUFscyIKU8nB1ScvViygp9xB2YKaLa4qFDFoDd0Rk1uw2IuZWjG2Ph31LUEznnMUQ1cgcFC9oT77JyQpFmBDZOQe47n5ImsiSVWl4/vCb//af3rflM3/nx25Ob7wEd6tlO5vfxcmFXrapmrEuTLhR5XK5MMtELZnnDcDlcmmCbv/z+LGFFKjISV2jck+qhGJMFbEdmuJWrhOKEVR3H2OYO5FtsXdRBXS4eI9DD5MigHVdqdLgvQJTMUQVkgz2CQ5rEuKRK5C5hioshDuTkV4Qkd0whdmj86bwMBukXLGby+lmWVbX13P9wov33/vDP3D/8sUnfvFn/ebNu9VfRelyBPWYmUEa5daekM9/4Qudc/XIgi6yA+dbnJKFqtq2rVOzBCVKec1KbIkJHmfMeo1TRKesa8O8Xn/1d9svmxTHNY0q8+gXD5ERYHV0wX3BVP8tJimAWwq27MJTA7xEnsYy3EnZL9PFhTBRyZAPxQP0D9C32NpETcGFGQp1c8UQKGufl9NpsU5VUae5qmdNBYZgJUemZgAl1n6eRv2lGsCM2EUopk4ZBacUGAqSg62sfoSg4fFns06+7beziZIGsuMo2r51nUAKqNLR6z0sjQjG7J/kkR9eAlEtSkRlJo9slXy4XKgiLqpqVwdaHaTU7GEuiouPK8khK+G2qHh/ypn55I2nb7z5rBMqXJxBxdQGf3IBl66ORI/lTsvLzUzdz+dtjOHtDbtu1oTKkss2n6i5WjinVSId4iZqiJqvLyrA3fuQiV5dXHZmQRXDVLX2eVyfr7NtxEyWtQ8hpdj9eXt5f79HHQ7AdZF1DcLMXDQjtMgZe/RsPGuGC0RkZhRkZ6YgKiklBjG0GCUFJaUKZCm1Jn05pWIlRuG2bscCLE+eni46333/g1/41d+6T5cnz2LlOD+cvPYsfy51O4G8fPn9d3LUnKP01S57iFMaXdI5aRhjnMZikDamNL1xn9le1LlnY7pZZUllmWpJ54Tmi4cX0GZX1O1wiXmrQ9zMRjs5xVwFguPozNUUtZtREFMRLNQReHc83UWnS1xuavvacvPpn/nSR/4ff/dXfuErXzqtT5699fRJzMuXv/I0x1Osv/nOfc39XO9/9UH9Y3zv4eV/+Xf+8Xj7zW99tt5v8R//9P75y82t++K22tInTxmjkY3aENPrSK2qsuliDRiTFpr35TJw3DwVV4NjlYi5uIpU7UBFVId+idfjo+hf+5qKUExhQLLR44227htYCUNJNZNvjHGym1LzgrnY0AVKFbtqLesaPlMVWumF9ve9jnkoGmSolaDR0XjEc1IbWg2g34ZEishQW8Q6CaD35ccsTQmpbBXE9cyuzMdopobY6eu8bhHYoqbLzeXhZX7iG3/pT/3Q7U/8N9/+y79UH//4cr/rs5tQu0kLyCJTSnaUGGxou13HGA0EFSD3uZjXjK4Y5mVbfbjoFcU++4DoDyWT8HSjMkyDHqmhWgPZ+oU2VmpjKZEAZsbMmLmL0F1FOOc253b1qFdJwbCYLyodyiHXNCdSGqFOSptrvarliWEVlXPbB+mH5pw2tJRTmCadV3O5PPSx23OgMcZ+eXn/7b/z8ru++9mP/60nLx6+NkRiU2Lmviwe+yVr9o6zj10z6xoiWLMlwh0qEon+IJbF3CMqItog251vQwVaaCZHcIX26Z/XQMOuzDry7MO/juKyUjIenYj9fvU1YIQAr9PRHwsMwepr1y5jmLqoS+O1z5f7yIzMLavUiNfBEo8345GwVAT5ELuvC0klbpeRc4+qFNyMRVXVbYgbJBWE5iwXLWBHXSQ2zdCWcpcgRDomPhqYLML1NAYFQDSTIss6XUoq1epD0RRT2P/CLrRQwhY3iRBywDrEvt44cUQF9At8jLKAKHF3e6p5PFXuL+equjnduS2uWNxUdY9t54ziZd+2bYNqdh6xmbuvy81R9xw3Nx7l+n239mi3U332KIqZmZuxFlCFWXgoPFTt/TAmeaRqisxMUtRtZkgGAQwRVSkisqq2CiHIlMaiV/T67/HcGGMwy1WTQiClTUh6RpaKw05UFdmvK/xjFQoQ+WESS6fLdGLj4dQv5Iy57xUZLAJhMl1ExrW4rIgQpY3RoQLtmECvCUmSs7KQlOqy2cRZFhHJPWHjbj298UYGPv8bX/lXX8NP/sY3//Tz3/2ff/G7/tKvfNevfPnOav/4ad6qnVxOckN/8gD4G3cTDyhYnRaen+qpVFppW+RRbDFiziqoOuo4VfYtMvj222+bmasNaF9FZBYyGCd1mbnaULCzbLMqGVUFU6p0tp73haeqj5FqgGZlSmv4oUS1xxQslbOF5fbW04//yHtv//Vf/K07vzuf/HK+/4jcfHyJ88jf970f+VJe5ov3RW4NN7NGyR3wUPvdP/rnv/593/mRy8vtKx/M/+9vPPl3vvOjy+VBDMmxcElGaWy5A0pRmqcSApoaREUik2Tj2q+P0cpsA63u+06Ku297iKtAqsJc2eEBJVTOeQEHCKKHi+AjK7XJtBTT1xW9iJQwYy+VPjWYYLH5U2vKZulRAoTCk7MOqF7btUheN7vQKm2ch1BwKC1UBFEiImAygxAYTdglkWkzVlDMzGHWgyaR6+PErICKaW6WaIBL8RCUlTQPiYfMuMpEKo/mYI8cui522d97uf25v/DV/+7vf/o/+0+/9r//Pz6sT4pFhm8046A8VKmKRknBpOPtmAq6pohSWzbf/1jvbmcEqhYbW0WyEgkFixZpLRwHNAXQVIFax9ZWzTYjZdtS6qB6LT6qqni0ayZHsNIhkaiePEtJtUutRcwV0QqyMca2bQapxklnkU2mF1FRMk2FOIggRW0tqQrldWISAFXP4HZJZ5XLr//BP/zGf/J/efvn/uk7v/+PrPn8/bksaq9evbpZV5IPDw/qliySVdzPe0cIZKYAYsrkMtZgbTElo3G/N8tpWZaXD/dof2obprWLQMMRBiBDtQnDvaC6Xdbzts38Oh8wpNpBWlU8UvIegQCQOv57K3FExESPlWelpt+o77kjMXzJzJhzmJ333X2ZhJtSWOSeIUjQQLoqr1rlzFSR5WZ5uNzfjCX2uZ3DTFNgw7VUbNyft9E7mjZBzbIQaQSxAICD/RCodUgLYuMYx+KA6HaicKmoAK5HuOHSJAAe0sgWQ6mgU4cNxxAFTOkb0JRXy2UXLkCx6jAuVVWFoDrdqxJztsSvBxkerMvcSdg12JHqhFIkCw1YJIWZMHhBVZsGk9uuqkyIIx+zqlRZUtDLvgEQlb4CBUhuoJKgHK6Hg7UNUWiLTgQKFRevKkPbLqzxda5WyM688cKgKmVnHhlDpKuJovPIL/s21put9sVGFkwEVFXNyg0l5BA8evxUpI/jD/KdvS6EEdR8PXKryM6L2uekgJEKEbOcJW5LdSQoiMxK4RjQyMRVOyYwr5YfN5YRJgdcyuaoqss8L8t4bsvL33rnN3/rN37+n/70HVKffde//OD79du/8aOfOkO+9Fc+vz+TuFtPv/9T588++dKL54bVBc4JB2BfLDkRxvM74qQc9iE2KTp85pRyMmGaWdcjAu+8/6UqVUIOqRp7bADBe3OKqaVlhBEmrkDtrKGWXpVARaaThGCP6e7bDNUlEgJ3qY4kDkVWSUEJLVERyPwb76x/86e+/OSpz9pvLgb4V+7thSbyyenlwz/53BdQxZt9Fl1psW6APAm5t5//3OV7vuHT599+55e/gvvvvHV5nkVfV6vI4srlwq2788fsKpHDDdQDtMzgtfhCVYLZca8lY4yxLOcZOedJncbhHpFC7Q17j5fQ8sEWOl9DYFzVzJKSQIvGs3MzO18BOATr0BKgT9Kb5WnpfexVdaMjhyyTPXMWVVFmP8VVhTABigeIvIc3hyGke2BDr9JEwDJIRCmFRxN7GDyuXoc0HFiJtiArZCr6oOkYlyMDq5jIHnzJtRo4Khda5qY3drNd5vrWu//Wv/3k//YffvOP/9c//6f+dHzx/WXZH4Y7akLFeJu+4Ygy7SJXfbRGV4dXhNjRCc2MmHMMyyMRTCOjvVOtysHMTahmAg6KU1nkcMm4ubkRkVcPDzlnl/3MQqSQ45isnkVEbYiIqW4sCp0CcMsOH0QbGR9v/u7AIDBWqpaBiQU6YKEytZigFCgmPmfyKtWuqhZoHNUYpBMS9+Xm5v335n/vB+bHvvHNf/iTT3/ojwfyZHcZD+sYRxIDyWNdXe5GFRvuevDZ5ZoOm6zHYJ+G2EXUYb47vD8QdHYWiUSRKmK22FjUIvbMmNuOfJ0619dzlRClqiqaFa+HHyLNNu6ONXD9bVBQ+GgoklrXm8zZMmBVm3Muy6LqcybQISIJlopAkKjhNwcpVqRqV3XMTOI8dx12WtbYp2Zij/ssP61qiuo3KgDcuL+IqaqmpkBVzR7UC5Y9eWyKOjFMVNH5TkoMKERTOE0k6ZSoI2jq6Mq/rihRtE2eKDn8LXWQGAnAIHpV2fZnVFWNoe0TJjNjBq+wCGgIse+7iEC09myVaETVtcvNDOmZdtWlwlnMNPNCmNm+bUP9QIlRKkGGL6OBbqoa+0S0zaErE5XjRLqGOIn0p1OF02kAeLic33jjDZz3nXPOaTZUOHxp36qJocMmGrcogvaUDalgkZlpOroWiUwVVKIv/tYQkOFqyzJE5FFR9X6+8/98/h+0XFdwXLuPWAcegvoD44Dr77oe8PjQZ3XYUYHrN7sCJPq5gDpM/+g/1QfdK27nS9asN8T+hJxPb77c/uXIn68530nVsehb9rXkO8BvvC9PX+1PfNYZEAVSZJA9b5ODbyFXH9n1wngkSwgOvdpj/9bM1///r0fy6+PLkQY3Xf9kvyJ//R4cRtK2Y0iHIhxpVg3eEC0R7vaZp0/W37ik2CtZHLbh/nZ/Vb6+1LtTxmXLVxf18TFqoGbILio4bdsWQ9/YKP/q1z54+umnH3zt3d96+eS7Prrev8iTAJLlEmU3vnSik9T1cmM0LbqfRVlHBG8/pIPVslgx3fd9zlkQd2dSqHuQFAPdtFiAfWhcxuo3/Xi/1MSF1R+qNtGe109dRNQzs9HwYooq2fezwlgmmMKRSGvJh5iJiJqgpCBAicpSMyr3nhBClRERQXMyrUpUvf2dhII7kXsTCovAwTFwQ2VmjdNpXZaHhweFwFSBhSJHFGO/eWKmEGgdp9H1gXRcNyYjl20EZV3wzhe+9gN/+NlP//NP/fX//OO/+3t+7WOfsnfe4ZMxC55jlayqcXfatg089ohWcBGUhZUN7z2Wucw5e2My63Akjs55UztXpYlJCBsqh5IKKZJaKVWtQ5HD5nSs4XueuYxRDuzN/pCZZUdi1euSgoIh2u7yR9/Lvu9N4CJSmi6Ftge0xqeMqjw65N49l0oKl/4BEmbGqg5IFxEGap4fPvPJL/7eH/idP/qjN7/0s+9+62fG1x7ofOONZ+++/56IrLc35/PZ4QXMSgD7ftmLZkOuthBFR5U2FruQErz64qUAZPdYvU0gm3VAVDBU/Jr7p9u2dd7A452vAqhW9eK01mWpQ6kq1+htkdZl8oBVkJRrGNZWu69LPwLFR1aR5adF5hTWEEhjszqQV7CxHssUCiB9h2BZT68+eP90OrF4vn9Y3EktkXFa99koynz6xt2tyMuXz/f9suiKQu+6j+ZchCJmUnKgmipDlW0gV232hrFHnB3JLLKhTD+0tZcWtXKBCYTMbHvCNd6gP/RG/2aPxvqcud4qAAusRL976vbYG+SMnm102K1kg2m5xySpMG3CQU/+RbQDZTtCQoBebmeadnodcezXsjdWFSlFMatMwXqMw3F4T1VHHYJuqyqDNEjS1XKGqiBRVS2SK206gpw4NsZFsicf3ngWyrZNF31y9+TFixci8urVq3Vd1URSd6aKoKrPmYCUyt6sm4OyMrKmQP7M2//jm/3psiyPD9T+l4iAjqwqBgquCqllLHuEcxEropIEVEpUQczScZD9jktWWLWM01JV3awo9twz+fT22Re/8KVf/41//i3f+PuyXuV69yO/+DvyyZtvSnIsW7ySr0Du4o2P3N3VzUvO96f9wOn+ez/1+ZxWWENeLXZjKXvsOWopF9EqlhSJyACg4n1qCSpZUI8MoJ7c3vESAUb7sK6GGrJ0+IxQM1YN6RhQkjTj9emuAFxEUBi2ROxmGhXunjOKBKjFARiF6imAio4X7/LmlzbneMPrnvUK9eYFb7pd9lcvP/7Wk9OT8erlF2t50+wmAZWns+7HK/vIafngtE/Olw/vb7+JTe4+97WH3/VxES6c7Ec/kWABkDpWjMe9LWji2ePW4XCyEaebm/O2kZIZnQIv4D4vJxvJJsnIMR4LiOo1nbYrCwGka7GLYG9+EOkKhaJKYaksqEC8fXikqri7oraZkiVjcVUUU7GK7kXlgUo5VjztlevWWayzoPVwIjb9VY6+62DPEdAxrGF+Um0VR4oK+l6F6hEc5o9rGDMoqezjyilOAaRsqPrRD/RBKxKZu9QbU/bFT2EcYjO/8Bf+zZt/+tPf9CN/8/3/+b97v97cRb2UMRBS2ARrn1aJlnSxuM9UEROdc47ldInL8NPdnT+8elmkNh6kyjrDWcxdg5k6pChhInLAVQjuoobtMiFloouPAmdmsqCGDEbKlaLY5vFph1++OtZbLJEU8WtbgKN9vPb9jahLZGGvolRHu0wtN+ut7DKaalkgCnRfeDiJj41sRNrtzQa7e/ni+Q/+wPs/+RPf9s/+yZc/+82nfDHWpw8vH9yWYEXEsix9K4o0YFygsriCumfUEf/SZCpxscyAedu90Y8glqhUJq6TVDOzZcicEYFIGTbGEB/Z6+YPfbV9UYBiB4kfPuGuaaAq6ohJENcw12JRD//p/vDQu+cetHZCWhbcOga4siYEM9NEIejw4LoiKsV0Ztbc7+7u9n1f1EzGvkf2Z5I7WAcVJ+YQFTFdBxKzMlm9+VZ1wWHbPbSL1bc/RcSXIVEpoAqjvM8MSphYlh+4GiQ49MhBKKmje2wYEwiUuUm20pt9ICYIUkXrSG5vOEOPzpQq27yMMZhBSjNw9n0f60KwkOsYte/DNJNm2qnVQjAPXU9/7jWDgtwSVXvAGOpN3Jb+hqrq6p15x0Jkilc7jsChYmSaEDILS0dWiElEsOp0Om3nS9PFhzmDajbnJNObeINDNqUtqyddpEQF8urVq/aR39zcMDLnzmEG8WAUOHy6DBkVGXg8jZsILQSe8qNvjU+4Oa9hfD1F5+Cz0+395TwjKByLVdWyns643OgNkYWZlCpdxjIcuZ8vFGmqj4CkmaHE1NyrGf+CTMsQar2xvXP+9jc/O9/jbcm+fqdfvqVqQu6f6Rsv5CP5BLJ7fWV90L0yv+nt0z86374h3/G93/QvTg8D/s3nemVG+NjlMlKlXWy96DGKWCWEs6qGKwWTMB+Qqn2uY4nMDhV97GalaKqTIS4kh7U4oC2sUO0QsGY04djSH82HJDkh4QWnUGUqpnYoB5Dl9WTb7Kvn28WH7UPj7VMF6t5fLTfL6UsP2y//1iu9+RTuBsvsPmU+HyqmNRO6Pr2RMZ58hKdP4PSJl+dbuUyOsbmy3GatzBYT0bVliyktjDoWP68NZHIABWOWlDx79mzpJBygc6yyJgHRI9Szg1kA/9AhdW2FRQB4wJJW0OpNMwktdDrKa0WlXnMcg+WqMhYpySChDitQ3DqosfPIFEecdUQUpU2xzZ3ve8BDrdpphARpXmobM/JIbjIXHwopM6meJ4tsMc/bpScFUWVmC/UkfrLhQ9WFjjKmNAVIHr/0Kv4+FfeSk5we8iK8xTtf5ac/9YUf/uPLT/3db/25z4+3PsZID99lXowmnvf7QlnU+gKSxWlarshy98pcxog5z/cPva62ZA9hQnHOeZkXF/WCslSxyx4yHekVPsDlSFB3tbZOXj8dpQrNqVJqR+x0ySHRJA5toEIAO5C5x4C3PzJzgRSRtk+y0oSuBvPUAjcvZzmoqIrpi4khtUJSBFVRDKoEIyKW1T/6sTe3h3tVrfv3t89+85e/95ve+Ec/86nPv3rnLcxtb9KWiCSjKoQ5FusxnYiYWWezq7arAu7ajQ6um2Z2UySlLUESAmXS2kOrqjlnRvR7RXLbtj0jenNx/fUohzYbbnbeLgf9owhov4czEz2cx9GHPT5mHDz5UComrXTpSInINAnFXjMFe027Wfu/tFS4T1tVrSt1coG6WrJ2Yheq2Wq+QIfj5B1pJw+X875FJc4z9tyFeSJuiFPKkqWosgO4T2a/Lf18qooaQKVnelJUaSoiluzEpB7P6pXoW1VZRzd2FBxVIjLGOPnw4+y8JiKDs45ok2RV1XWVpCwR823uJBfXygkevKdCquowFdY6jpD2rPmYy9vCzOMiMUXzq6/wh84WfD3GAPZ9bxFQ5K6GOdly66wNCFWYDtO1WYn9nlvPjeeRHUeyC/RlWY5Ds/iCMxiDXItCTEYgzaWvwN6FJ9iPkKGmUaq6i1Sn0ydVda/8kEz29XFaLLURyaJATEzFNFlQeTW3++2CYSUlhBVkjxNVimMYTJPo0Xdnao7Uk1ifyU7x1BPMZm3cqWbqKFOq+enFfYQ+GevHbha5e+tjf++3fXuSz04j7k77KDLrRc3g/eX5i4d3d5lf/mAu8e6Pvrv80lc/c3eSMeftehKtU/rdvOks0S5hGdnG7dgaiWvHVExHgRkcY52SVLqoEchCVrJSsde0xcR6VUqJWigW1Yq8R0G1N8hk2rF0MF1ICmLarKqTuhVLsXE30SV5L9s3fPxbbuFfkuenRWZklCxpu+ENeXjvgxe/8ny7eRLcn4iJ3fqua26vJJ+++9UPIC+wfQnv/tKT/ZW8efOry7/z3u+6u7XLXhbYMMY5BqrAlAoVGoSiBuuzQdVTu4FNValEFSLDzPbzpVeSvQMnoGUiRG0qBKy1VcGLtR9amitbjN2OnL0g2pZlkgfnFEbUcQPlPrs1MTMVVmiR1KTJOpaIKFaDHlI1GAK4oHeySa5qLRIkj6A4N0XWvk4hsqaJmiByc9UhKpWAiYyZpSpiAmDA0+ZpWRHYycVvGGluF2DK1ISmOMXR9EFp4xAEUdQrlXfOHcCqoonL+aUYNLa5jP3Vln/237r/2V/42I/+v9//rv/Nl8aQ2p/I3YXn8hYEHNL8ypSqAbIItznnMk5zzr4LF1tiMvV4BCLgHArp4WKJIGM0SxowlmelQkUjNhctQwghZjY0UgSoKBcriNolygJwNAFIU8R1UnZwwRglUybBQJjanPtpnLYZQw3jtpcLIKuZfRkDFgomF7q4358nBShz8aqASCIpLNEia6+Hy/MnOmrTbUzLevEH/uj9f/SXPv2zP/PFP/ZHAi91uNzDT1v6vNSdiJwuRanFnJQ5c4xRFYh0M7IUwgNVIWaDkcNGyEQJE0YHINBEa4qQiXVYKqIO7qBgrIHUSn3dAWcVisNsm5uOxRRP1/XFBy/F3U0kalaK6TZ0DYzULHBdVMjt3la7pC/mStaCQi0im0TOMjVUmoiUuK655YrVRIl59Jek0wxSQbPxcN5KdNFboFSBCooIhVOCUFXOHGJ77lRdfMkZBqFUNX1GRQuaNLWZldX2FUkQZoSjKOZBlFLAmDHUBNZ8iMvlsizLsiwxa9u2dV17hHxQwmA2RiYv5yDpokds6HXjd1QlkKZ/qTIrYiYBS7ou3Q7ocIq2rHiIVtV97BDd96iqOdPMxJV7OKQqdDgZDsnKOWX1wSwAVN22TVVLYDvHGFlzXdftMt39vG8xdylLprp0NT9sycyUKe4oFndRgAcgITOhNJeqGov50Pkwb9a1EuucPaEPiFxRxVV1GktXh1XlvhDomMjV5bIHRf3kpM7MPO/DRjCbroVIBY2EIPtiBLqj7tGCiWTEJqZutW+rj9zT3c/7hHQyryo6iH7fApS2xYTaaEYxS6gVIj5c8fCS2y12kQXcnfn+B+dP6G9v9sZ8nq/w8MGLl4X7F7auQz6QvejDnZhnwe34qMRlHXvO2zfy+X/1pTdul/3b346bXXR+4kHfW/wyeEPhzOiomxJkUoclslSqVE1dxCHmwiz3lcK2+UGhwIImljVxBa3WC3DPcHdkRl6sBTYZLiIGal7hOFFkuGqqGiWDQY4xltYrOfLh/W/8+Ke+6xNf/rWfC3mKpyV7lcke9cHz50+wfGr9wufO/+KrH3n7xfNXX9pitSU+WudveHr52LNnv+Mzn3z25jLxie/7g38Kz1/9L/7yP3zvB3/47dMXUPsr1zWWO9aDYJjDUDmT10yCnuge3pJjcSvaUnpQsrLHTCVoGyZ6gf3ou3jcSVSVqErbvFQLbTA5zq++347JNIBrrFB3MLwmrJkNkMwsll6rY3m8eauak9oUnjH8NPzl/Vm1cOgntQo9f1jEAjULFLroECUwWXboIR8Xt72Lgphu+64ic9LdFx9R002RoEiooJuGSoNYkTaqapg3FfnVq1eVuSyLQKbDKNKYt5IVJUM+92d/+Lv/0n/y0R/7sS/9+T83vvKVh/X/R9V/h1uSnfW9+JtWVe29T+owPT2pJ0ia0YwyGmUEQhIGiRyMwQmMr4zTz/4RbF/fx4nrhCPGlo1tsDEm2CZIKGAFBAKhLKGs0Ywmx87dJ+1dVWu94f6xap8Zt/SMRvPM033OPlVrveH7/XyNIglimSYQUA0bERFEgOCEyEeEH4KoVNVIQdVtHlQ7rXBAQ6BKWQNomkZEqJi7RoC5CbOkVHkObg5rIQwnbpiy5YoWsoRgitUTiY4eXJ9d8LpSZeaqi0DEMo6bi81+HNrEQ5+bpulzFRYRgJipYSCgVl0H1wkSYTggBBgzq0d16NY129ACW8wzHVxe7t75vMNrbmw/+aHN177sAGBG0c+ZsCWTzQJZRqOUiIOwZD1qyxwg53GWGnc3NwAIQjAAgKCo0bVQNc9EjgLrKENhblOTI5dSLJSIEjeLtt1d7tkzLg5E2NrYuHp5t13Mx6xdN7t0uGq7OQbmvidhchSFGi0CCiQcAWaR0qwf82YXPh4SMGSYbW1EVh2cpWl5UmxZOALZRIrEcI0pcmOatZhZIHDDFAyBFlOoyNrXXIUsNSkAK8AcotYZUHEcwehqzExMDsDOEuABBdzIOFgCc9hRVmCspXPmhUnMbGNjI+dcNYybm5uHh4eUGpikJEdTtAo8AZ/2UUiIFl79e/V9MatHR22sAwAKQVQssJsoEImZNSy57pI9VAsRzdpWi6tqtpxY1NzAKRxIhpLRg0Hq4iClNPYDJWmaBpmDZXRFhEFLMC7zEGGGwdUNhciSmHgijVB0QB7uWFn8oDWLlzHMIqJr2jzm5XKZ2rZfjYhYP7TKDEkNI7OZmWqbIjXobm6GakSUBCDKFB0CoaoQRGsbwnqZHYDgEB4IgIQSRzKTo/kiMhKKhSNxww7gCa26vAJKnc9HFdsSImqgefHAks0tSNZapACPaLVVTByI5km6UbaIz+6NgoHjuDvfOiXHrtsoG7PihxHm+disqWbCGTc2ZGJwQnU3KHPv3vvILX9u9oVNj6uLWPTlCraLokfDQmAgIAuDwA3n4ydPXNm9WtwYyV1ns9kwDFAKJSFirTR/JicyMwE0izoAYJy0mRa+OZsPeVx/OkkIsZGUiEul2E8uBQ2ABGxgJTxNiHCGRgrvPLKbv+NlN777U+d9+0Rfsrp2KXkQ7uzg5z/wrac+f/Or7zrZza7ZeBVeO7/u9F1v/bl/9p3f+X3a5/n81ENP7P7ZP/F9l57Yi5tXN5z61ceWdsvJmSy10UzkWQYubV2DVf8XAhwJbRAnTVx4GFA1YjReXx93CJBpLIKIYtORxIBQGbWETFU+6OZeVcHTtgkRCaFGma0vbKuHByEiKoZ7dUVVeqYOYcyYJE03rnsABCHFFMCMiOGgBu5FLCwUsI7Sag3uEQDMnosR1MgkqmN/RGSKygiEI4pyFbKiQjAAwhRyUlXQ4KHMBCiVRRkQRMHkDFS8YcaA/nCJFVaOVIYxRAo4OzBRMFFEsRym+urXXPzAB2750Acvv/Rljx3fWPRqBGpGQqoa4UCTE7nqMNUNuZrKK+QhLBQJyetGkCbuIISHBYYWn0nDROqec+YabgQkVHUeHhE1b44oiBkinCYbukckJMVwC4K69QNE5PWe10O7prGIUAyDLnWllDL2BND3PQCIyAwg55z7YevYjpn144rXKsYp1DHCIQggzEXEYo3pMwdGGxTazoiaMS/PXHf+Za943gfeffz+B3af+7y0ux+NjEqB1LeFwDqX4p5SwyKA7hDqlrhB5IAprWUyrK21kfUTgAhHZASEcHAHI2Qz6/veJ5rutGJfrg6IKK15n9M17zifz2cbG7q/p0OZNZ276liY2Zm0XltaANkhzFcJgZBNuZtvYAzCTF1TxjIcLAFonmZjmOY8laFeu746bA9GdncS9nUgY31PAwMJTM3dDYy5wkSIIxDJ0SbJFgAGhTsT1SQAIoLASh4Nd0SsAaVE1CAD1a5Aq8muahsqM3LaHFmulyJz6vs+pVR0lEQWseaFxbqSnmTFYA4YQuQwLTOFaL1QADOPqKPjaTOd2hZrPQoOrszk6JPvECeLpBavteCi7fIwAhNzm3Vk5ro8NTOEcAcr3khLjATO4RSsZohRqVxYLDXcMq3MqTa5EVWrHaEAkE0BkZCQuK4YzYwNMKXKRm2aBgjNgZuEHlpGZpZEEWGBYPWLRlWf9qw4ORp47Sub3gsIAAOg4oaqlOr0lGgdUgl1A8jsobiW6URgEPj6qmbmrIUQ1V2QLDS0pmalqaMJRUQGDsDKcYdaMbgzYjj20qGTauGuONLu7iDLJ1Lrg18izwVvHPuTnYwOB1EYCfau7ktg10lEFC0JIuuYY5h1Ked84LOPXbzjB258IPUHA7bHRs5iAWBWzA0ngSERQ0+2W5YjQzaTKER4MPbhTuJVmY1IiIxWOw9EYZik7FKJs9VmNqFXgJiFkSSqwYYRqmmNqkqIuqp8Zm6chRkDjcApTiZ4/+fv+Ykf+K6X3falP7p4tWVqebbUQ/ZNQ9vUw7f8+E88+6Vfv7d3eLA/nD598mO/9/HrTt398INXP/WpPzpz0w2LWfOvfuof3Pbcu1YP3rNg2T17cOL29uFdm8kOwKGVOUOpKyugyYiJBBjsYPG0LxkBvIZGKNcsHcej6HVzoikTiAGQnk5FZkR1I0GZbq+na+cadl3/vQmVMJVyNX+xUrknGUU2o0SIVMvMym0wCHOLUszBonZSyA7mpppFWJAwpswsdfMAIXaucSLV7YCTc6mKs+hIqh/PrNlT4tAQoYjQYiRsYeIcoWUaEgRicAQYEsuEBDEjIlyveRSDNAwhCFrAQCihrDaO/d6f+FN7//gf3vye/33xh35QD1faKS+DhBnYnsFusKgIWQREixonurY0IOEUROOIiEHgHkQQwYRtUzH9joiNpIlN4Q4IGOhQj0IAdwsjYnO3MAgioDroqF9AIBoCRnAAMQW4QkQpWL09zG6OiKWUra2tiDg4OKhPPwoLNqvVSt1SYnJajx7rXx0AzaOmHtZ9dH0q0GFTmsOVjm1DsILdi2df9rIbf+/9pz79qa8899pH9IqmZF7myr0bQUlFMgU6hVVzO0YYGkMgRziCh7vVgQ0BUOjT3pKaqDdVhIhg4OGkVYxYfZaEgATqgfVbnsoIYusLAOhgHp4Im8wGXtgjEEYEgGDEMrapCwQtox4uN9rWQCzapukIIhd3c7IAIBittNy6AQCDlFLqIGdSjQCTyw6d8rW+ERHNrTJ+kIJxsqNOBQYy1izKp4WyCkBgmkQIQLhme5CFMyUzq0xTRCcmAnYqlfxZ/ziqyBEkm3BeUBEllBppO4/12hUZkQGcYsrJq+W3g9VPOBDDq78J61VUSqnxcyklAFB1s0KG4NWJBCSMWDNxQ5CPFuERYUXdvQpax3FsulZEeDIROAEGWSBBxFjyYrGonvZx7JsuBZC5Nsxj0SBUdxJEjPplRERxkzo7rlkNNSbPfDogAoSwLzWlDasEnonNLGupKkXTmqoCROQQZpZA3AIAmRsAqNp7gxAwD496xz9DCiNIxas1kfHIS+RhbAjVSl3fKHdEswAO1+LqWJyYw0HJDVyQI6JuoOvv7esFvJu5K0ljEUxUs5/Nx7k0HNT7mPH4l8/TqVWvtNEl2rx++54rq37vojU9y4KAox951uZlL4YUAdm6JjXIB6uRY5Vmm/Ny5Q++uPGsrRPPn5/dP8Shy5WAVt3NU+RuWAQQ4dXLV2azWSsshE3TlDFL2/SRa8k8BfFG4DQomsQHtdqrhycGam1lEczMwAQRmKupqwWoLk9mxgZoDFMIUhcUa+oWyq47tfPYPfkzFy7+wz/90u/6Zx/Szc0YXWTBFjLjA9OHL1x64O3ve/i+T1x3yw3v+e0Pfes3f+0dzz39xNlLL3n5qxLHsa308Y99enWwu3nzs16ZLn3q3JMrfOlidhUsDm3YgBSIAuTkuObsANRR+vQjrvv92toGYmUO1PITjsAa9bUkwkohqCnQ5rYmeCAeBV+AuQVATaFimDIx6t8TkdQ/EwGZAtnCPQAlQZT6GzISeG1Y6z9wqwQToAgMqkeqq0ZN1IMwkAQA5iXcRYQipIojCbwyoNQASJBq5VizPQAAMdI06/a1aaWy91Ew6gTWoq6365YbBs9VBtJ2nZmpWd1dubtwfVe43jDIJMRlb3/31udefM3rn/OxD97y0hff8/wX6MHlDZmZWW18j87N+ngxp9q1mTkzT0F/MfkV62dNGAyEEU4AR0nXQjBdqMEsGM6VFhgT/iamtxqomAqhORLlADR3AgOaLmGoiTcQyJXaQiLFXET6YVllKX3fFzMSGUpOKSFAMW3bNgGj2yRHj3jm92YQAJS1si2nS1Fdl1pmkcaiRbgbVnjrLcuXvap89aO//PAnRhFYxTRImaTvR/7cyVmIky/w6f+Nycs43f84GQin0utojhfrvIX6lK9lgYhT7fIMK2LAMxgUk3ERpt8Bmdjr3DscBwSgcAdwViylcJMiQEuRJCKN1RSXgBieNm3GOllyKlwDAuJPbv748XRqci0yhak7EgTU0WRAFXZHOGJtPX29Za3HlhLX+NQpzKpWohpeGASwAXbTEZSIIKIF6t0soIbbrEd87mv7by2UK3yjmtdxOkmDkGogx3S6+BqhYFblJ4FcwpEQmHBiVDFRIBYAIIbar9cFEyEVHQHAp8bRzJyIUNiLl1KweKVu5ZwZiYndghAjyTgUEkHAoWQAX8zaqP8aY1TdbzEiMisWkx4bJkW7a0XYAIgDENo6KbVAnX9R4tSwLJcH28d2MOfVaoBp6usknIiKmapbeN1klRzFlYimxwPCwwPAwGEtPq3axpQSC00XZzisV+cIWP36GIFQs5JwigOiaUJQiiGihRus5XUatZA4evmQGMAtD/B/SIsZgtxixp5thZBaTE8Oi8ditqC2szCDc97PsLO8BxvHbFAnnjEPPgi7lrFfjeEyrg4AnLstiFKgt4Sbwr/1+f3r795sOGcaQRs40vkSAgB6AEADyKmpfpVipnkMgr4MDRBT4+BW116CznVrLQShtdhSY6oJymG1jqmTFjchpEU3215srFuLGq2DjEhgQcjq4FAwUBiKbc22vvO1m//t7V/41b/1DX/zG5/zk+++rzk2n41N9iuz6PI1tw9aLl58cHd3f/PssVe9/Flf/MI9yva9f/yHfu6tb1UbS/hf/et/7Td+5be/8U1v7uiRj559Qg/7xtMhbaWa2InALCiVgeAAUIWIrFwDPj2IACbyCAI4ua9jjD0CARPXcBwWZJbaUJqZugF4DR+3yUFRD6iAgOJuEIJEGFVTLijC7BRMdUCK9RavY25nsaIAkIjdvebGJCQVaIlrx4QVZonoSISJKk0/CJnq7jgi3JSZm2Cb8AJAgMhgDkh19XhE2YUaYRIerSQjMDOc4uTQGMEcI5ogAKx0EUNSHTC1ABE5Z1PEqcIgD+XgQKpPQUAKtsAN2DhcHVz9ru89+6WP3PJbb3vqWWcu4qKEctSwQ9Q6BEOEOtA2RwBmsXpEOzBwVGfrusqpQk0EQI8cKpOszd0n9ggCCDMEFlecXNoAhEDkGgyoaziGAyRER7DJMEqAaBQQSAgEDEHDkM1t9J4bFpTqZkopMfPEpASourmUEhyxKQAQKMIAyCa7insFJh9tZRFGKF1HNx0/df7SQV92udiTL3rh5hfex0t58+0/unN10XZ5IAZPJYCwMDAAsDRmRTUTpzBomtY9HzHDw6Zb1iHAsY5zOCEAuHntNesRZV5Dv1mEp7l4GBEfZeQAQCmFCAnEvPKVMNBRXYzcoemaQXN4mTcwLJcYYTjkYSXmFy9ene8cy2XX9vtVX665+bYTtz6ndxCvdQIGBDG7WUJGmIJhL+WL71v+aomxlkxHHWSYg/mEIvYaPesRHqiVXQVrAwJW6B5TXwoTuPu8m9VsJSvGdf6EMalwa6ojGk+2wCoM9mIaEQTojvXDcVdOVT0IzKkmxNVB6PSxTwN/pKiEZgtGrNxL1Vg3iPG0hwIqcnmxWIzjWKlArrY9m5vqMtcQi/rbToWmBcyInAzACYIByaPObK0mqEZISrWEyqpdIz4WCCYIK8pRjcjsph4+JVGuHWJ1tjR4ERRkYGSop8H0erqHtm27v7/vAG3b1i0MYmjOPt0xTEiuViyvE6e4gkeQpZbzCHyUlbuuBa0UI55+dhgWHhAO0484IAIRPILpaLYEbOEA6hUbEACBDi1xwQmu4kFH9B6WidqrqsXqu66AAlVShZTNZ7PZU+eGwTd3dk6Me5cQSK/qfHuctYuS57N00B+OuQlR8azIwCkhdsyK6NmCaPtg6FXbnZYtrvvcxeWrrz8rhx4N1VO3Uo+Omv6cNaXkpkjUirgDILacRrUICAipIxlwdASAcEMmUBVmr/bN9S8PjYAkRJyEEGezbjabqWo4JJEwdzcT3AQkIlN3hFTTf2e4d5DvvvPkfbvwPz54/gff9OIn+oP/9v5L5eSsl1NoY7Sbpaf7/+jj7fFrHxn7P/893/OT/+BfvuCOF5w+duKOu176/LvuOtx79H/9/M8/94W3f+jdv3jh/ntjftd88TrI3DW+OmDmhBgikibym2s4uzsEEiiGVcwFINSvMjwTmCoApjWduN7LUMHLzLQuZ8BDA3JWAKAqGEAqR90HIRHVaUq9YBiBwoM5NU0Sqpq2VL1zat32ou/7ccj1FV2I1E+/d0NEmUJTA6qPhBCx4rUqLgASgKcUVUY9hckXM7OYXGKpjvgwtHqhKrsBfDRNgV3TFHBVIEMDAwTKagROYIgRjgEE0ERI16lqmHlQnXu4V9gyBYUHEAUzshOah6OCLnJc3d4ev+XbTv/Sb976O78zftv35sPDmiJw1I1FBGMAg5sBgEhCnIwWwBRRQRlrB0at3CPWzY1DTBZVQUJEtTqAnrrGhthiGv4ysjEDhAICBEaVcNU7Kaa2v8pVDSUiECmgaSTnnEjUshu07cygMKfcD5VE42bMZCUzMxDWBFqECdUCUzw5CNX/i1TLQeZNWazKcOHqRVam1On+8vD259j1N/Pqq2cOWty8ZV6WYyOzkRUZeCRsVLVeQ5FMmlaLEzC1ceSVCnIGBCb1Es4R69E9IhLUcUAtwmoWKQCsn3GycAKkZ6igscOsjshBAUEEkG1IRCc2Ng/2l2MppzZF81L3Lst+fuTBe68eXlEtttJWZkNztdj+dcdO0TJ2rts4M7vlsFdDT1gBa0HMIFGD1dzdInI1JkBMcm6isCJEDqlWMxGIRGEQCEA+TYammTBG1ChsVDUmQgZGrOw2QEwiPhatOyiIhCQQBcAwhCRcgzCrxjrrwt0Fm9Cqnis7W8f29/dLcXNFprqlIiJmnppvAHernEqoXx0AWEAEcJUBFAQGQJGmfvhZywyimCJixZ1UVAtRAHplpFdsi5khyVBGCw/E2WxWWRYVxMHAmNjyWHPMEDHnjBHSzWIKiTdHcANA17BKRAOUiHBbewjATSLQ0IAAhJhrYo102XJWrW8fMZcyikioqVYfFBNRBJgZBcAUJ+4BUF28buvRHXjdjCADVRENgJn1PtSjD7B+lR4QRXMlpgRXBxdYOIG5GUMy4Grjd6/0AwhCQQ4Pda2VZb2fSgkm8YhiAWCBhOEMJpxK6Rvu3CUsvnTfQ7y9efVghIVsQ3vtdtpdMOBoy71DHDe6nUPtJUfLMwh1K4ZGlgPUo/XRb9rAre7imPjy6sSXrzZvvB4vN7O5OUIITikUsN5oaMtjKDMlrOxSc/OUWmN0dwhPjBgQOul1RnAwq1TgiCiqiGgQnUjxmPI7wgQJMTG3DSZBJCFGD0JYhc1RyGIlSm0KtWrUarePveVPfftf/XNvee5L/8xnnrj0lm967WLx6X/7m/fQ1tyuOUFAD9z3yPd/25seunzFcO//fstb/sbf+Ov/+31v+/7v+qevetUbPvGx9++NuMzDZx66fKG54Sp//fNue+XWzubelX3g+dZWx4xVQCxICGDupLk4TOwJBCcmIkYIM80jalBwwajuoDp0MowITzy93hpBALDeKxBRLZkxIIBjkl5i9VYyPe0Prg/6HBmjSKTUJp6lWDverGhi4bnU3zClBBGqpWk23BVjyudxCENw4oSAVjvgWjFCHfiDeSR0CAkWBwBQosG1caw0BjN3EMEa3qICLQeEuZAIMXkQzlZl9KaR6cxGj5r6GoiYgBkxROpuYxyL1uGccANo7AbQaFWQcetszaHrrLt6yb/uOx740Kfv/P2Pnn3Zay/fcEL6KBU7N+3JNYgIMDVcSjEvajrpBomylhS0pmpTxToHVhk3FLV26kdDszJzpRBAVZ64VvtjIEgrEJS1iLlN2hUoGBzBwqHGAA5oEObOCBRYwpk5WxEhdyWk7e2tw34QZMsF11pcQgQKAC5esEK0cK3xQwZECrKIND0twUwawQQ8QOJF9tym3mGWMujOzu5dd1L+4vbD56++/Hl+ZUzQGLuDGVgbwUR1KFrcwUOYIQjAJk+5GnpwkqAAnS7agIm1yyhH0+WpjnyGpB8RE4mOmdPTO2Azw6jBHMncGsSunR3a8Fh/ZbHRLYzG809eufjE2UsPPXH/Q5G9L27IY79CH7d35gDdld1Hztz0rM2Tp/cuXZ3LZmko+9hSNx2dtfGpWeSAUJn/KQmKTBNlSCkNo01atghAcjCIgKPeF/iIYAZgANAhB2FxbSRZ0YbFigWBCZFBAsquI6gCWrggj1qk5vysFZqIaOCoHoRAJsR7V6+aGaMwp8H6IAJHIwJAd6uaeYio4iZZB0tgOAH3ZiKTcLsUZyB1Q8Q2yf7uVeYETJgEAy7uXmHmpul8gnwGIjGzutWjuZ3N1E1VGZFFimkwgRp4CHHFZnVN2847VeXKDTEVaWrVbAEcHBjMzMj1NCciJgbziAKQqubFwQ0BggG8Dg4Jqu5JmdndVJWYzGxq0r0GAwMAJEoR4eoptR6+0kxE4QASpoaAAhQR5lOoV32RTDMAChOjIGIIHhEmq5cMPRg5UI0JgxgCDCEwAh19cBVXqKUYVZ9HzRYBTk24I+ZArusDDwT2knBEDIgt72+9bvEHH/vd133NZjQZx93Hzu+eH28rJZ1cpMXW8Yf3z89oq9DYDyt35WbmioQphai1uSlnrrl493VXL18oZ/3c/QcvetJlVg6R26kHq5OoOihg7jypKlOkRF4sEVHTjlqkKgvWHFZkAWQPBCyxhvq5QKhVknfJAwlz1XgACiGlpmlSSgCI06xPAglDoHjwLFJiUsw0W5RsJzb97m/6kR/98f/0b3769Ou+8U33nH/yT3z93Xdsbf6dX/747rlucfKG//aB93/yc585e9n6lJRe8J3/7wdkfqs++28/9Fh02AzHTt5yenux2djuBi+fOnnjTUvvIDWLZmOOqxUQm4NDQ9AjR14WbjYAVjCCM4qEgAiBmjkEMDUyZmVmQBeCCLSqaCdKbWtWAhEQHMBMK4xd3dQwggwQTZvEAIYQm5LmzBqeTcNUkCACkQpaojQNSs2TCNIkuYKY/PVtQB1QEzMSUGqJaNp7rE8HejrS42nacJ0j1emWr5lECWDOHRSr/xAAPGztepfsLsTTdg1JzQr41mIbSiAGT44f0rraxFD1NjWIWErhJCyYVUspgOaAVZ5mCIzhnlduc0vAeqjDFox73/09q3/308953/sOfvhPHvZlNk829o13CPkAteVtg7Euc1EhAdVGNtCJE6C7K9VYdKJSTIRmbRqGEkQWUeUGmIIS1qOvZK36Z6/pBABREMBIHYiYYsxZmDsSzyOgMyAhT9oTJjfvXU8eW+z3vQ3ednMWynkoXiR5FA7CEQoDSMRGO1P1UsoMkwKUSiBUl0pDQjQdBZGYHDE1AOALSjWtZQfiwKNQtxM0pDH63Yt3v0S/+psbX/wk3P0NI2TKCUJSk3lMSgWIRJqccytNcYMpLz5apOwWTIJgEKmZkULx0pCwwqguXTuqMRIEOSkR2ZSzyeYO6KbeIBNRLnZ0AUvb2LAKdqICFL2lmedWaZZ2/PDCuUc+e/6BL439/tkDfOD8xc354sRi63B1MELZ3tzYWw1Y0NLq3JUkn/vI+fPnn33nS57/stfnEYk9GUGAgwWgOTE3bj0wrS8tCBQiHsvKcHQzYigZ25mU0iMQoJJJCApNkul6ewahuWeIBDVzPojIIIygWKaGIFzDhKCRJiIgDB0b99ozhU98XQ9PQgOWxFKr7lI0pYSBpkPnXE3VlgslaFNSVUYwpBoGXIelYSZJci4oVEoRIgSU8FJGqJ2yBaXG1Vri0ZwDEnfGGOZ1H8qYuEnDOBIRCpF2qe10XAUCiShEsJg7hrXtLK+WCJza5B5N08y6brW3THMZLavnRprSKzdJzYSmwMRxHAHRSZXrFd4wkoUnEXVnShX2xuiIYUURseVUTIu6hXeVeE2kPqG+zAsnlllbVkOTUsk5l4JM6JbAVY3ciYiDMAiYGBg0KAUCIDdgAAH9MLpH7t3EwQLJIgyRomjGiJQwF1UNwgo5RWRXTCyFMoAyMeewVoI4HKUvJDGOuRSLsFmbMDDHEGCostXGoGMP0M/vxmvbTz3y4YaufRJf8sToTKe6DvvE43JoRkCzNoZmsbG3XHnRBhSDV4ibeEl969KyfedXrhUS3JIhl9c+tfz46R2PJXAHAFRWDWyYH6oxRRnNoAYbRWEiC3U1QCY1w5oHX7fegWCMyChIgR4iUs0LgsiI883FcLBskYBgZSpA2HEzkyaEELFxiAhnnJvsic8jqzTNIbSLdiy600K2jTteffjKw+/5Fz/7o+dJ3vTqb7z0xP5z73z2f/2/r/t//sPvfHW35xf8hS9D0VMpUZvbw1N88sKl++ebkObHbv3y710d4pHD7XTp4vV3Xnf9ydf41uYvPx5Nuu7O2eXnb63asSM5JNzoU6aBLKQlX/qygS3FHGDhQZ4AAZEMIywkEUulYk1otGlAClxgWpIBYACquWtBYoFQcEKkmhMUDIhqVhWPdWWbTcG8aRqMmvfiYU40NWru3nRtbUQEyS0mbjuAUwgSETEJEcXaH3KkET06KNdf5zpDZpKPrH/VeKP1v1brVAxoarHvdYKDAtFGADFIIBpSJe2BOTg6IoZOyK3MFIQYTUqsiYtFVNnR0UKXiIhWnqIYdNuriwfLl9z1+Ctf9pyP/uH1d9/11EtfubxwbiYnvPSQyiwvIMLJFU2qkxWQglIl4TtmoJQ6wECP0GBgrOL2ahMGNAv3iYEAlfnsjljHCVxKQSL39d4OgGCyCQJApXRDHaBjrV00whlxKLmSp3IZajTeAA7oswoBZQIAIDByEwgkdnQKCq8ejlYSMDlhmnX1A081mK9OulMUDGjkmM7JsFBYRo9IxzfLbD47+9XZYw8/cv1NslRcDD4Kigvw9vb21d39us9LSJOqIzBDEAmZFzBX81VfVCuepbiJCCKjKwnqNLKiidswmeMmXUwQVrLSUQc8my0q8XQcx7bw2DHNnPefeOLzH7104fHifuUQyPKcwoex98OxX7WzbjzMy4Plzsk5yvzJx5/qLy23Nja/+JlPHr/uphtuu7OUgiDuXjRLkzzQ1VhYdFqaqASEhuO87UzzSnWRWkGagYTwsgyQuARRQN2wMvNRdgUzSwUzuVU8eO1HE5KWYBamtckHOMitznIjfMrMKdXshBHJ0MOq86GZOExRPESQCJklAUaEllIhblpDWuoEpb7/lTmJFOgWwQBV5+XuWYt5IHJCsjF3i9k4jp6SEoAGMyOAqpJCy5SwKaWYeyklsUw/OICKgiZEtRwRxOhWapB2BgOyrCZMHpBzlrYx041Fl3N2tSrbdDD3cI+6IhlLFqGch6bpsCbIgpdRu6679vip8+fPg/k1O8fPX7o8n81Xw9IBopi7N00jNZHJI+chQnMuNUkQhcswEqIyu1bviYng2io/ffjqjl6/oensUjMmUlUgLGOeNa2qF/OCUblmBh6BGB6B4C7eMGEgCamZi5uBEcc4mLmnxMxNKSUl2t4+vuxXiVocM9Hc0R7dO+t83QN7d+yqIt6Mp1qIC5B91DbnYSMSp3JYWmEtoF2IEYUebsywzQtBeOj8yQFWaZ6SUbdz7Ivv+dff+W2v/IXrXnxm1VPXrBJhWYFwEeeC6D7lLk65sRHhVXcznccwrUfXMwA3cwsv6ormUvtpEFVKYkX7fpSulQqvA6yCvtr2ARIqlHnhBFvg/ThHNqYuqy0ImoMHhze/5qGf3/ue9/7iL9x447W3nbprdXChEfgPP/qGv/U/PvJHXz7XbWwGnnOft4d2QR7fuKDjxz7HLz59y6VPPvv0Cx6DZ5+5+zXNYoMQsG33+2F7GL4cJwo1r906X0bwZLq/u9icm7X9sJ90sce5CWNzCHRUCpiSWChowtB7Iq7PTb3tyMM8eViu5FAHilI8CMIZQVGqba5uAAEisUJYKVY0IkILREBKYz9UXmA1MwBADXCthtqaGBoSqtXYRwqGRIwkLMwMTJN8ccKmT03zM6/h+leOoLW41d2pgSqhXUexrO1SiLVQqGkS9Z9HBLREsSYJ19YcAQmi+No5KhFRl+vFrZTqv9cjh3j9etpQTexjTy13Szr/7d+9/MKn2w9+ZOPM87vFdj+W0kBg8g67MhK2GfWoa43K2QhECPagwHAH5IyOCUczqusRRFsruWoIZjhiZa1oJOIwq4tAqCYXnDLk6jdXtVsYoQEUDpPfEACcmFeroZhySkQUFDUfkIgSwFFgMyIIMSFGcBfUIDggM5MFM9fbOAFFTMyBSgjiJBHRmY8CrQJnGwnFvSz7xSLZYv7ogW588UtbZ247iNw4kRMIIYMTozACJ0nDMKBwILDDGNYgEgAyMaIAYCukCoyODCQM2LA0XbdcHlAwPCNKxSMmPT8LPVMCDVATXkuukj7WNjs1PBzc/5nff/iBL/dJWLpLZy94k9Lm8SsXL636PEAZDq82hJubi1XfH5Sh6xoXuro82Dqxc/7sQ9dfdwtwM2WQIAJ4eAg36E7hCFPWdYWZEyWNMpsthmElODtYrhCjhndU16jgBGWbphcAFt44GIY7gAQgWGDN36XQmibmAOvBaRVSIAUguqqbKQAIUzETYQoiJHd3BAtvWDqiohoURFx9XIjohL0rcIXt1OV0YECtWc2sUqVq4A8iR4SrSV0tmRGAlZG5qotImgRHjjV3QAcEYgiAMEVhDLBSuGnQHcMD3VQxrKoEJjVlqLMLUCJRVWQupo0whpkVwonE3o+5UqjaxEMuzPXqh5yHyg/pOkmLRZ/zxf1dZXT3q6vDgjaOfSUFAD5ddidu3L2UEQGa1Ax5nG9sDcNggG4huXa0QSCBrhSGHjXcov4ODBE1YLRayrx6EoDAzKQRQDVTDYtqN6rmJa/5FwwtAVDRkViIaMFNbyWTCsR8Y7Hsx5yziADgctmbByXfx9Zst/X5Fs73Z91w/ctfdu4L/3LrZ37ywet/N31be2rW7j2aYgapqArb4eoyb0kiPLjqbZuPHbR04NzymNKlY9DFMOYCqZu975abX/+ZD9z25jt3BWe6YkJsaCO1lM3QKnDcg9wACRgmLzxUMdo6eDYmQXhVQETdIjISIdSf8mroE9Lm5sZ2c/L8pYsCAMZoiSSwJgoAAFsUSpTAOKcRx1mmfuZl89SGf/RzV/7Lx2+d3/RNWy+8ZvG1899byds/+sj3vvTYzSk9uYJ//udf/+9+49Pv/INH4tQpg4MZS+PbB9chXf/N9NhHr8qdV+54ySnZSotNJmk35r0hYnsp5PiFy1+lE3fCk6eY9oa9OHbj/hP3lbFZNWMCSrCZvSRkAfZsHvUFxCTiCAzInJqmqeFudWxLHu6mbmKiqsXXjaYFIxIDC1VQZxOCCJoHo8GLVlVRKWUoebQaxqgAzszuXd1NJggzq8D9qV9lrqpZIqbJFw9EhMx1D18v13Wcw9P9SuD60lxfwDgZSADqPsT/j765xtwG4RSPWLUkasAE8DSfNSZ8vINgqNU1xuQwQ0yqJqaqpVBxU1XUKXUcyR0aAeeZ2OFVuO6Wx77vB794/3uO733h6xav3cXzG0ZLmiGuei6JEzuFlynyYF36ASCksJiOCSGKiFST62qoXEyGMMQJ2eNu9YxjwVKcGSOQWQqsTS811qn29cgEVBPloMI+MFBEmCMCmVKXmJkB591M1ZEnWkAQgjkhVPg5AGD1SiIk4qp2cwSjaIOFua4VYjYLhOJWBTwzNZIwKlAKNs0hqBLgbPaJF96a+aHX7p6F+YYbyZyhkTa792MnCcxDTQDRwhGCBWuKggcIwdqDTJWLwugaagoeZsXBAKQ+mXV3Pj07R1v/Z5R0iJxzrhupWtNsg332Ux/6ypc+BUYeTTPDkcqyH7Q/iIBhVOnaY1ubaDr0ucjh1vwYku3t7YKDpHjovi+cOn3rDc9+QX84VudAKSWi6iegEHgERxVLIyD2eXSMZEDSBMkQQ9u2YTDmFUFqKFUO3VTRMkWEO9r6Ma9JChGBREDIkSZbGlWolpqZIPna/CpEpdQHe0rBIkDysElkD44u8Iz9uXsAUBIEQrNK/Z12MWY1iwoZXZURs6q7p5T6fsz90DQNKhQbiSiDNdiGGRHBUAzN3VmkEQpE1QDC4CltlwBrBEX9STFiXwoDi0jbtMZSq1LzCVsxGS+ZIwoi5jwgEwRZ0aIajiQNBvRjrof+hL1cl++qObAd+wEkIYWrZncBVFcLtHCpG2QznUzPLtJUoXjTNHt7e23btm0ax9EtgkCYoDreHISIELV4RV1OiEDwCLBwpMnP5hYAtBqH2jNMQCl1qNveMCRGt15X86Z1hkJuEZ2FI2VkinKw7GMdwCNIHpBEBiuduUM78KzIYrTLQYt7muf/vl55z6s++l+/8E9/4r7XDdffsTGnftQ5lcTzS4w+m8dyvjMenpT7brKzx9PFm2K/86sRHOhQ5EN/oGd+7Ecuvff+1z92zy+eeXE7joFpyGE2FqeEROSVfAB1El1JyWvFLqy3jVUBXeX6jEQiqsoA4VEMEKdCrR+yOZiZQJV4IiIgeujRW+1IMJJCCZFL3Xwz5s35j9+3+bYPXbzmha9JmwsEuNoHom0eu+YXPjV88w3jK1+0uPjQ3t/85tdec3zx8+/4I5jdyAvfS1lQACyfeNYf3HX8WTKbX3s9RKR5C0QOSPkwuvlBpO2HH7vnum6Lz89k+/F7P7i443mX26XuLgU2OM5xtwFE1S6gmiMipbYhEUIR6bquaZqqyqsNItWtuCoKH/2XVQOBAL0o1aM2nBQggLMLu6oXVQsftKibW54zZy0w9G1qSDjMY/CcqU1NmHpqaXIITbNiwqf/PmKyYCJOgYBANVRl6mXrBfx0AeVT9YTVWrOWZU5lNdSJ9+QKmNyNlUxCHDwdyGsYWBhEOCBTfa/YzMzYLCKgEKvVGWAyy6KllKnSdxRHTovsI3eUr16+9kWv/c1b7j9/y73bezfdGDsFSsJwE3RULgnY1mRQh5qQU2fFEDVSzWuYqCECIhOCutf4cER014ioDTQAIFIdILu7SEJYt1wBWA3iWL8r5EnPU43g0EhCDGZu21mgA1MrCQNaSd4Cc6o1D61zR8yMK9eappkBM0MEiwRPFlcza0laSapTpK6qesPIPoDlBLFXEGABcniYEUhffudnT95zonvsWzZeu7e66gxZi02e79CwPIyz2WIsVd7iDXLxdT0XFqkBNWTS8AAKdBQK9ZwHEg5FX9sxATyqbyqI1i/s0QXs7hHetq0g9X2faPPC1fsfefgrMlq07VLw3NVLNGCvuc9jy7OGG5Fm0S22Z7PLh3tpLHOT4sjbO6axWg4Bu4/d/+ANz7lTNbepw2BEE2HV0jRJgut16e5UAzBdmVMxQ4Scc91/E/DOzo7msHGoTmKaSoeIQBGpTwLHRAnXCn8UIgsLR4SaHFwpBrhGHKjqfGOzgpSz5pSSWACjIjKJACTiEt5rkbYx96l3cY+iNctr1IxYRzXT+xgRTCTB7t62rar2fT+bzTbm3eXLlwkbN5W2SU0DHkIYHmJRolRKvJkDTQnGUxkEIETqvlgsDvaXVAPCqQUAV8vjWBPCgYmlyVFKLoAIQeom0qhaYrZ1yEQpFoQMFBSWnZkRkFhMvWk5Z2UWdxjK0Laz+mYRWSkFKyXUEan+sIS5FstATcrZU0oOqKZdI+FaIgO6MXqo1+s3CAAMORGTazhGhLrHtIBbu+nWYzliUjUUdgQkBA+tCC+IIHCMIGdMCEyEzEgRWgYnCFCDZsxj0zRd21ge3a0mlHg0i0W7Wl3p+yeXuycRtppxn66b/b/nv+2XvnLrf7n9U/fd+Dt/6yv3/fLZl8GJa4bZibCrN+SLt1y97672/MkzPe1fwqHpmq4I5rKA7ePDctzZKG9cHMJ88/qXvKrbP0CGcCipnQcaZWBMxccJTIA17B2riz0MHatf2dcPT72Ga4AQBpb12iiAAsjMXE0RR7XFYiGAkBwbR8UAAnHEACdorR/aLctZTm5tYj68CJ+9r33fvVfmN85uLLt7SrTYccCDAHPfOUHvuffR3bP73/31t9x/5ZHvfs1dpxbdT7/74/vDZpOS0qEPDMdPb+0fPLXrp29IVItBwI596LYQrDRt2h/nG9dtvOA5x/r+4U9+7sLP/uVr/sT/tbzt5lU5EHJvPMzUw3KpBJZ67rdtEpEqI6s1oCOYa7VJEFXfvIdDScXMkIks3MzcNTxrqYm9NGvdqajnPLp7Imo58bSgNC0jgsMK6igypQQepRQRrX8urH84wnRU0zhV4dt0vx5dpc/8NV3bML386JMJJp5uaWp/ty7SnzGL5ro8qRKwCaPJMCWd1hsZapdfp+i1P6iHQpDV6tXM0Lhm9bi7WbSuuSAhpZxWvJqxvP7k1/7MLR96e/+lv/6V13hsBY/gKNKAoUepFhpkmqT1SICQFBtugGUcxwC0CGQKx46d4GlrzWTbx0CSanV1IHdFRLdgsArgOUp49GoKB4xwjyAGRExMXRJmRuGURESQqW0SAyJQTWydipsKbSGqhij0CAYKQKjWlMC0bkbXXHsAkGhqwFXXdZaLQoniVGLlXtv3/Tx4+JmdG37v9nO/2z36mgdfTTw3IgkXMxQ2U4FoZ3OAUAcCMICmMvFrQ0BEIg1yjgIBwuiIQgRCXiplpG6fAmuwLESlwDDV5ONnaAvQEdDMmNHMZB5nP3dP3l9qs3O43KcotCp9Qonu1ptvXu7ub2xshMDV8+d9MT9x+mSuOiMrs8WcHU/OTiz3D648ft/F8y/e2b7Wyzr1aFq+GgYgkCEYoyMwesuCBgaQUsLi4gZI6jgcLgMTgSNSw6k61hjRtdTXFoImiiXWa9gjwHHNkA6ICEH26jykAIiaVRCEBlYf79oxETgATd6k0Aibonk9qlkKa+iWVa6vg6MDIFbGcIEKYoRw1bqJGIYB3EVkWFv/G2rysqdZN4YzcRACcTChe0UtgAM6GEJ9ywglqwOTqQmzZZsWWADMOGphZFVDkggspk3TMUK4IwKQWMlVBlEVaqq5FtBuJsymCgBaKjQj1L3pulLGlFLRXNy4SWZGTbJBG5ZRi5m17dxda52xbp2r0ZxUMyASkaipVsodEkEQR4Q6tIhBFVWAaw5MXSlhBaVNePUkdVbTBCpEAtKACVMJgIhtEsIQj5m0RkDqDtEGqVnbtqUUJmibpuQBkQgCadgbhig+W5weCuHJ7prTtzZtd91dXg6PfccXbv2evdt+4Rs++Vfu/a2fuv+6r5TLX7Nx9dnXxzVb286zg8V2GrfPy94h0aUb3nju9Nf2i2us2QY3GHfPNPzkxU9euuZaplXC1jw8ymg6E4JEUOoZ7u6BRBMUCkKQoAqPjtaFUwdE7l4bkfqxBBIFIKBFpNRSgGUTiHA3U11LdqdebUyFeWxOpvFd73lgSf/TzlybD7W3sydfvNmU1GxAqISOjiO2Kfj0Hc/+xG//0n1PfPkv/9A37R2U173ozhtPnPyHv/g753qx2Q7BUnvtabHY2Sl74/WnTyqCR+jhLmGybhOBRx38d35z78Y/e2EsL/+Jv/jeL31m61/+2Ilv/+FrfuSvPPHIFxcsQixIQUZQvbZAGPUhPrL3cUVnOCXiOq0dhzyOIyUySWam4cRBgOqmEOza9jNiki6hCoW0OEM3Qao57CUcq1MtF82FOYmINY03QUQped0V1Rlv5Q/Uv0FhogkUhwFHM4pn/Iygzkvrk1hHkFNeIYTFUVNYlyswtcuTsyPAnHjyWSHh+taIihk66ohqlzxd80S1kyMiYHd3SmJmpKqqNVRuFUNkQHQpvErUSLcbqxf0z3nFlac+8twn//Dqg2944AWXwZvIe6lpNFF4APL6+5qmJxC5ZhUDIkuEJaSwIMcgJGCidUQtAAAQcL1Y3IOQqu/ZPRSmZE1zC7AK7E6MQlwpWpSoEWpJarQDN0kIU0rIkohlYlISEApx1Y07RDAlXHeSazIak7g78iRGn1FSNyCckj0QEWAcRxQcAca1Vl6Leylg7manyuaLhjs+8eyvfvr8Y1/X35ExdzY4o4NTYveQJi0Pe2AyIA6vEZ/kFszhEyAfkBNTI4Lh6uZAjaRSDCkIiCp9jFIQKjhNaOb/o6ojInfNORszMYy2/9DDnz1/9rHl2EHnx2eLDe62O2xOtH1/sDNLAPboY0+1JGG0tz/gKrTsRdaB2n5cPcjando+fXyey7Jt+HAoyODuxbxNjYbVo3/istX2D8ncpW3HcdWmOYa7R2q4lFyJMiLChGZOQSSsqmoFgQKi8mWr2xQRa849TaEqWAMTHadsbEQEiHHsq69XRCyc2oYDkkVxK+hVs9pJwmLEhAEGQcLMEqqefcKxQbi7iDytuQDmdRx3DYastS85UAAQ7h8eLqSptv6ohSFERGpSSilpLqoqKIpaA48AYbVaIYm6MXPbthEBDGYFGEC9nc28711NhJ3DORKLFw2iYsoIYWoewkEQ7hCuFZFYJwH1IuxmjbpDBAxDwxwlJyIPEIdjWzvjOF4ZDwwq1aRunYBqnaEBhE3Xah6PHTt+6cJFV89uFW5DiAkFAcGD3QlMyaNaaSZYdABEbW0AINSCq3C06gSBURwxiZDVnLdpA511aJqZEKaIYlbcg6VBkuQWlfIZplr9g1k1AY0Ag3Wn89VrztxZjp0WQBCGEN7YPv2KrXc+mG783Jf//vbl3/y61ZO7xz69ar44jPecw8NudWofFrMTG8733vWDV3fu6rsTpVl4/VHPT612r/zbZ//AzVtXz+SHDLAtpXRTRsBheItS8zgCrG5AYYp0hxpLEkdHenUSVcBTVHEzmlnlw1ixLjWmVo9jCY++jMvcsyMjmUyG9G5+6vKjF576xz92y0c/uHXmFTf+X/+UTu6Uz311OBYZZ8wpSMBVSCB8dNPAn/ixb/ntX/n1X/k3//11r3jueOtzDjeOf/+f/7bf/eSTDz72cLNrx+A8hN86frbrb7mq3zZCAqTCM0QgHbVdHG5fd8t//ul8TXf1ujvLwxePvfG1+3/4gb13/tKLvvHPbL/425567BNdSkmEGcXF3WcpNZKarqv3X71aDOuk1o8dO9l1Xcl6eHCwv78Pa+2GuaNHIh6sFAwwFU1ItMHtFs0sNQ5Rygjo6g5mM1Ui8slYG4gQauqjZkspuVopRUSqdFBVw5yTNITMVC/DSnT2o841pglnLYv0CP68xlzUL5XWG1Ve++psAlwiQA10mQqt8KMhJALwlGQZgUAU6FDFTRYBhFhPMhExNAZG52lNK6KqqtoUPkzKNO6zHzsYeuA9cXD61ief/5FbLr//9ifuvnjDYtUddtRqSpDHSj10q5gvYQZCV2N0tOwOSBSggG4RzkiGRHWwUP1+VO2h7lATVdWsaZtax6g6ek26mHSzDTMjtZJAkoGllBohIa71lszajpiECRkA2qYhImZyCBPgILApcgTXaWEuhABugUxQ+aeIAKAWyAkZzY0BQQ0tksYqj5FzDIOvBsi5P1z2VhQDEHBbXrF35mN6/+ef9fAf++qtyxml7AxtUWXkcRxXeweVsQaA6FFCuQaeMRMymruQI6BBAui62SqPBXyWFuiolt3dipZS6sQ01Bwg3FJK2fLTFzCTO1QxDhMJNy/85lfsba3snNqVvb29YdEcvzSeb3ISaYRgNax8hhs729I26Xg3m6VhU3hz3jab186lObmYd+2zr7vmZroBFIhBUgqLLjVmNus698r7IKkbMopKJQgryJRrb8CIpkSYWCJMVQGrNjAmYAUTAym4Vv83BSEmRnZWAtVAQkKYAAi1VQ6IiJzzbDYrpTBzMe26ecLI5gGhbgLoxYKjd2MnRCYCRKybdVTrSA50NDdBmS5Qd4ypyK13CWDUEVcdt7JFm2QEdyHsmq7rbBi5QGICcFDL7ohYwh3JCcFBVTfnm8vlMlhMFUnMAsjNlIgCrPqkm0b63iUEADhRgJuVra2NK7u7yDXkLSCciMaK1nFPTGrBzBsb8+Vy6aE5AzfctG3Ont0AHD2QeSjl4ML52hgUN2aWlEopibjr2lIKCxTX1WpILJcuXI5AQmEmHy0CI7DGzkdUVYcJNVVK6e6AU2YcV8x21DjrwABQQ3dhWoVWD3H9mGvvzyJgC0idWvbA7IU7tsgxlHBwiMRCNecbUS2AZITC2jZN/4Bv+vEbmZmbZi1NxGZ54YY7Nh9c2a81d3/36vwf3ntw3xv+whOnXrTb0nw88Cc+Kg+88+z8xefa2wrOtd2qIhAACIhz3amdRf+V2DpxeE439GQuWBhiGHl74WSY6y0TYYBwdOcWrKq+6RBmJMRgRKAK4VdEcDd1bRoCgBijVh6llK5N4mZ5ZYe03Bxwf97RYZ4DHC5OfOaPzj65F3/tv//25R//a/O3/8qZT73r3m/6gXMXz+uZPnXzIOGymo+XVmnb0kah5vxAP//Fxbf/ybc8f/mln/2tD9375GY7OxDh7e3m5c+/49qr9778gd/Yjnzf8Vd+6pavyeqR2K0AEnresYMxNpc7N3z+VX/mFf/+b39meKots604sdicO23e995ffeGrX3vNiefm4bJh4Qa7CAvH1HQtUJNYpNoyIiJKEBGhXLh01aNHZFPoWobg4gGsbiQOgRaGZLKgYS5EgNJK185KKejRJeokRUSuiAl3VTVX04oTcNcILLkMteScd204q7ZNM1PPGOFNysMwTx2gAzOYYytlzI0wANSkI4/glMgmtsf6QsVgwNoSIrq7whQyDzUcau1ZpICaoEeMHk6AHlbfT0Rx14AIciRWM6QpuQhioutxEndrm8ZyCWFVzUQppZCCVgbleSmFyXOelTTK3snhxLc/eOoDd114x+0P/qnPvUR1nCEqY1IqZkYMAEhkAayWkLKHgQlRuAogBCNCaiRnHUwBGZAZ0ENBcABrHAQRa6RmmaIOE0dUhjQRETCjEIk0bWoUyrxtmSQRt20rTESUUkKmWk/UD80RamBk6wQAyBPOPgACHepxgQAJA+qIDd0qzDlq1VNje4qp5aKqvQ2lH60vy2V/MBwO4xiBDaEwMcvL48wNV2/40vHHzm2tnrM6dW5OPBYM4SET83xjgaP24QCGTOSerbAwqhJKAEhxJpzN5w7Wm21ubiJizpkbHkYUk2U6ryPL2DH10qQhBTsUP0x48ugCDhyUkyHOuzLPSduuO3Pzq577+ojZ8vDQd+MQl80wAMRhink0CLZxbHFQhmHVH4Rtdc0GHO/cDtkddCirPvGl4fBMt9miHNuU/Vw6ItPOorcAD0fEBpzcpk2EBpsXCTeIGOqoH8NbFkL34CygBKn6JBO1PLMxK7mbsQQihqlFjAYAIIwBhKkppThWcTKFFU6N5tJK24jUrUqbGg7HYHIr6kLi4YagWZumAYuGeDGbuetYMgIHU3FgYGbMOQeFAjg4Tpm45EJogUEFwzRqK+simQShT57Ucj8IeC4AKVJpWAI7xEELEQkEeQiEzLpBVyioZThx7FjfL4urGzFwqDbAmE0AL1++TMLYNlhGg3pX4f7hipvWS+6hDpU6AFAKYOBovCDYAOa5X0WEUAoH0xi8ZydBrrpM9wADQWFkxDCoZ4OlJgXiYKODYzQsSSzXIUbd94pDTwooJSgRI2RzbaU1JzQzqKBsDEQGxoAZshA6eHELj83F1uHh4ZqXJy2JWt6Yz2aLzbPnLszn86zaingeu8SK0UliSn3fS1oMq9EZrc7AYgpiD4aI+UjLjRb+aLjLzdpWMAzBvAwoHcw2MWe94bUPPf6r//rR8ut/7O/65nHy1Hg3T+3FW9984tRLtmw8Np+f3Tq1npzH9B/Ew2g3vTwxu/6mw/svJd5xTCTBGWkOYRjRWCBgDnAgoTAEdocgMycEImyY3LRJNFIkYi9oZu7E0hUNwgChw7FvmiYoDF1K2HL/yX25cdyZ46NfvubG24bVXZ/8wuU7b7vp279p84l3/vr42d8/ueXH3/9fZmdeZZSoaanb8HHJLKvZqQAQ7bvxYphTA//187eBvySefYceXo22A5mhK/d7q8WtV6//3q2Hfu/xM9/BssDZxpE0idMMZu1LNg8vvveXPvqt/0+S5uZ3/ZOn7uoeu23P/eDmTwU9+NjBuSujpsXGrbk8W709OwABAABJREFUBXF5GLJQ1wirBpESMmEdwlQeUFU/UqLWzbuuG5YDwhAR1M6bUlLbrPqhI5cGlgOLbCAAN81Mug6bQMhR6n3VBCkKmTEXVQ6y6u6ot3ANMFot3dXatk0NAbACWMMwjC3LYT5opGGk0hAejhHhNQrOg3lyOBztQqbhLUUlA3PVO8AajbQe8NI6A9UBCKlmlx79qq1VRKnaLiIMc/ApHrr21DFx92p69IS3ZebAEhHrCArOyI6SgQuW4kGm3/DkCz93/Yc+etO5Fz711PPPnTk/P79ZTjiNRDR5rSoYkoASz6wdhoHalJo0ltznUVKa9PXARTWxuJmIFC+CuN4iOUxw0GlGLFR3ugAAjMTMraSmaUC6lBICJeKu61gIA5jZGmYkJEIgDOcAckTEmuN0VOg8PViu4ZEWiBX+URmUT3vGEFGteNGcs+YyrpY569D34zh40ZYldW3GOY2cZnKy3Xnz7nN+7sSFD1771Refu2lbc0i38iHm7YaDmucNmQNYLu6g7rwOz8EJV4odCUekeVejbziJzFo1WyQ1zQvdbA2pjcEoWbdAL7K9Sdinp+f5Du2xZc6OPZYr43LnrD41u/TU1hMzb2aMsMkGZVzMBm2S+SBXfYYP3fcox/YLr33p183uuP7Ys7c3btrf+8Wt2SsO9cSXL3/5Y5f+5+5mOX4ysS96zS21oEuEQYOGwTurEjvwxO7upRCRdMKr4fjxY4eHh9nDAymlQlQsL5AZw9yLesOoqjlrtc77pM5ba12m1EtHCi+KiJYLUa1fIedcJdnjUJIwMtf4vHEcAUASuYHBZPkrpcxSU2fUNXnW6rnp6uoeWqlNVgozd83M3YuXKA5eu2GMQEMKD9fiEIxIEG6gOhJGSqlGtQBWPV893NwiiCR7uFW8JB7sL9VyxZIjI7ezrCMAEnXQD2VUoGJoLXBC1prKoKHu2+2Ga3jxiGhREBNEOFrumuKuCk3TmFnXdaWMlEQtaqiDuzoCSk0jKjzJ5QiBEZgADYgJ0NTDDY1TapGrQ7FgdJKyQmIppXgoCw55JG4DMgBU8XmEAxpAmJUS02+uWpb9WAF8Hs7EGgYAs9kMALo2qWViECRiOjIBqDsQGcRsMe/7XseCSVDEwc1NABYUA7AV6RKc7J9azm8BSRQE0gGiRbeX5gM07bj66qkbaPMUbp6w+XYPsEKKMlyY30DtDJ8BqQbEylRCBAt08mIyxxxChRid0K24KmNyNMEKuXJE5cBSDIgZbRrHB6FxIwbIAQSE4R7EMKllibgTHoGymzMOmgU8Lo1b1zz01Z0Pf3RrfssDf+zrH+0feeO3nOm+/KVLf/qfyKc/cmrr5EVtj+Vd+sq72jf+g7x1Ml94FG3Y2NlxbtAK6UoQnvfA/5R7PvDmN/3AWw9eMLvxxc3JWyIiygDSWJofLC8tXvhm/5o3bOP21ZGyYoCjK5LMQEvf7+x+4Vr83OK37v2D7/zH+ObVYy/5xwAuAR8u/ty3f/qO/uLgkiNtNsdUD7klHyMPYySOXsPBixqnuojV4AgjRigJMfby/rXHTjfUS6KzF1dti4fj7uZip1+tDg6XSk3xAIKEzCxO4e5NFWgwaYdsFqDuzhBOIZX3KK6jE5Kp5pyLjsMwtF1pW+OEvvRM1EibWPqiIFKWpWEBjDFCiAEgcCSceEB1eF6PnFjbgoWkbrxonQNBASJSqoGSJzqnuVa/jeMkJjq6ZpCgKCAQ1YejOv9ijXGfzLjkNAmvpdK6GkEDIhBFJwrCEHL1JfGJXfrmB2/5z19z33vvfOTOC9sn+m4pZQ6kCLG2XVIAI6ZAhZw6Uh+HZZ+61M5EXUf1UGfAuUhN7VKfBMY4Tc4B1nsUxCDiNqUjeZ2INCzT1r/hlBIhM3OXGmI8WkNERA0DAkBDMIz19ADW73n9m6e16BRAEehTuNJR9JaZYfi46l1t6Ps8jDquxqIH/aGrtUIppW5j48AOAGneJD/ZfmN+4S/vfvoD25//gcNXHc+zXfRG0BhbA82FCVrHglIQ3b2YIaKH1sSqWmGklNquYWZBYuQqGVPKecnzZrOkUnToYj6bb3lzcHi57O3uTS5MAABYdH0czBYz34FTp05de83mlS98dtXcvT0eDlC0hwI+Bx2U93ST9i/pg++5/Jav/bPXzu88lm6+8Zruvst7n/jo+645devZJx84e/bTt91w/V/95v/wb774o1869+WX3voNtCqtNaONjqtkvPDuAMgBVmYDa0JqRLjGIDW0GpZZS9fOSymlOAgJthBWSl1YcvGA6jYi8CDGmrjtdYpuEVwjSAE8lJAiDFHqiNg8EKC62wFCiJi5lHE+75bLXqc/Ety9ndZDxu42eIXGeASjBxgRE6VSimpxAFN3GBCZBCv93AIFEACVUIJqcisCswTJHMArDM4NFpSCKYehASA4EgIMeUxt51U0yZyHIo1U9itSMjfhJufcr/okIsIQilUqCEwEEpgIiMRQAIujBiBzCiBHdYhutjEMgxAgShnHUgo6NsgrCoMAhCB2K+6KFKlNno2ImOo7xRFBUxKr6ZRFbR5Y3fYBYEXRQTUgrBGZvnhEjOQQgAIBphoVEA4k3DiCIFYgdlCNoiFVSw0Vw4uXL7vDYrFYA7MNkcyNpIa4RCCYZoVMCJ2wY4WvgRAIcQGZNQSrcbEzP1BephbC2/4CMx3KDkaQZ5XFl577w0F1Dxu12AZEkMbHpZdhZ3XxYOsGYAF3gKhMmwAkCDLCyGNqIoOSztW545nmZXHqWtJiFt40ydGHMUQcIufctu049kI8pf0QAhMAAyGoQ9QEenTE4gYRCQgcglhK0dMXHrv7yny865WfecG3r5J9y7fecfknf+rqv/rbNyyuGRIfrB437vabzS/f/RbbOvXYb/zUTv8AvfQHY8ZzgezQ6Hjzg7++df4zi2Pd2Y/+9uxlL7e9J3HnxgCgZjbtpZqbLitc1s3pkCDA8aDJyzI/2RfDfnz3F/ZevXXXnZ/7O5/4QUp7/3Bxyc6/4B+SYbeLX/kTe2+j//TG+98S/dXt2el7PvvEzbfvdKlBS6UfOZHlYlAQx+kWSxIRSE4FLHzr+PEP/sH//pl/9vcOD/d//bc+8Ovvfc+/+1f//Du+/ft+7G/9/SsHK+FstrLqbjIzAgevMeVGAIg6BVvWIWgDocXN1ADgafqVwajFowegBkjdvW1WVroQRITErGHMURW8NQslfPIsIiPikS/+6EpgpGogRsR6rQqST0upaQVRmwY0sAgPq/fr0QUG6xsX1z7RI7sUETGRuQNTgDMzVLx7lYsEcMD01CMZkpBvlLyf7GufeNbHb9j7wg1n3/PsR7/3vtuXkc3IwoPDqfLzA2o9JAERwkQkphmY2DxUg1oKFyTEoHmrZmEQRVGk7pMqUocZRUSE29SmlCRxFe+IiNTzIlG9mIkIq9oNkZlronIdF/jUSKMj8DNU5QCTJi6qSK1qFNfeMazbd3dTNTMrOvaDFR3HVd/3pV+NpuDRte2sSW3btvPFoswEaWPWaqPPS2e+uX/B23c+8vnZE98hLxxK33Cr4FWJKmZgDrPEowKAxcQiRUSqMBChOj9viBtJjBQBDcKhptNbaa9fdhzN/PQewpNf/II9kZtr/OT11922/fyj72zzYJ+uPsAPnrdHPlvu+XDs93aTrF78rNvbkxfLrqW+005mvVO7e+7gvrc99Y/+yj997Qvf/Pf/+c/sPvFfT8nsgm8eOzb76sNyZmf72LUAx/oTduwlx183lvMLaosWgrBu1h9kVwPsCzgCJGYKMlUmrolG1LXDMDKliXsFWOVaRAYeQUJM4R4RjYj6hIuBOulAQnR3P8qgr59SYoYjoZ9b27TVIVYd2+PYz9qOiCooKhBhXd0CAAsGhTtURwCRmKsDhIS75zAPIOaIGNWYoYm6ZiaYZDduEBQI4cIgLEgajsQh2BC7YYCHhRaKFjkhKQQSijRlzADgaiLSzWdjxWmR5VHRY2OxSNL4kDHAHJJACgpkYxYHxFCGJppdK0wQQlAl3WCG6NKMyx7BZ22XR00pafHU8FBy4BQEvvZJirtqNgZ+hnPS1gdOQMLGOAUBoQO6EBkQQFApxVi4TutaaYfDg9QwIGOEaUBYXQWvZaShpYg0RFQjqtC9YdGI1WoQIZHEDDnniEAGA5ucmVXsNumaHAWZOCIwgoCQISEhoDZ2mIss0vXN5fu7uwCAQ8f2GCJSwCxfTfnw9s/8DHr60st/bOx2VOY1QhKWu5E6OLgg3cay26x7XFiTbSAQI+aiHnGanso6NMPihp3NQ97ftVhG3/HMNAdQ4QBVZsaOoWSBhOA69F1KRNRKalgisMJTgcmLAkS4URC4YytRgeoA6Chbly7c/vZ/fuzut3z2h/+Mb156JRye+/6/1P3226+75fmr5RUsMZw8fen84a/9+HuWJ265+QNvvffKQwcX7rv0mR+57nVvfP7zb19eePLY6oGUB43Z0vMm7G8JHdSwuX4/2gU1izCdHAY0KdOx9Bv9JW22CkQAMPPz7jxzdUj3/uB2f+0f5PTu44/+5OXn/v7y5IfGHdx8LD51zXte/Nh3b1/dWczlwx/95Gc/N/zwW/7sQw8+emyxcJRcxqPnDBGxMACAuSAUjc3trZ//uZ8+d/6xMupvve2X3v6OD1x//PrPf+pj//Fn/ulf/et/87HHHrVcwHEANXR2okAXNECISBFjoEONr8QpbAu8oi8BLMAAJ0ydmw3DCgrkrk1eGm4PoWy0M91fjkKgNYerMnudOXGSUsqRgjXwaBBdR9AMhA7hCITMgMysVW7GzMzVSFMLN/dJEW3qNUm7GlsqphERwwF9rSUBJ6I6f3bwSkKoUi8CqnTKCoiomjV31/DA0Tn10L7+kZvvOfn4793+1Nc8de3xfnuQEhbkLlPUDFd5uUgXWTU7M0NgQ2Ku4A6MLbcEkVpxQhHRMWMkI6sM9HraikjbpJQSsTRN0zSJuQqk6sUsR7r36T71qOvbyddbXV1TRCJU3jdURVuVBdVBA0KsI5jWifExrRjMKvxvHMc89uM46phzP6zGlaBszubSNqmVpmnapt2QGTB17cZCZjZrfuDqK97Vf/Ztm1/83oOXbkAgYqu2EgsAtsjgiTCg1GY9CCeMO0EilhoWSSTCzEzCgZTDtubzfHCp29nM2zed/dQnHv7IR2954Ute8+o7/PFl/PbbL33yJ48u4McvP77JzcHlq4tbrtPnPvelP/xPrnnPL3/+bR9+66/97tv+1//+T+f/B23tHZbZ5gY+ee/w6le87mu/7s2f+dC9L3nZGXzxDZ3xsTOn92080LPzizfd+/hDn/zMw1/7/CtMDpSvPXZs9/ApMEOLVmlldtDkmk4lxVMCbJp21iGi9gMpzWSulkspkpIjWClIFBGzthuzTh4h1SowrplIlch2JEhkZveo9Sb45Nar9hh3zzkjQISVPqeUEHEYhmEY1LReukfaflWVVsCIqtZ9vV8goggcxxzutSAgFJQp0WhCtdaHwqdKXESIjtLj4eiVRSL3CIBEjBZmJTgkJQ+btWnIWRrWYsuhlyZBooa5X64akeXyAIBmXVdUpWEMoxrhQgGAGu5qFiARiByWwjNxAcQIokloCMPQm066WjULsHaWEnX1U6r2JKEGMTQ8IAABQxHqW1WHT0g8bZMsEIMM1MyQpZvLashMGO7Lw8NETIhApVZJCMBMlCPAkU29qCoBIE3pMBXRQQxdapBBs6GjiACTuWPD1f6g6muJnDOSQ2QvlVcvYaQYBCEMZo3NFsev+eKD9+rpu2H38QVoSRtBwdaDlp1LX24v/lHs3CU+uh4ENzweKIpvnEB32jiB0oJrtW48sybf3D+/4ctjp+nZ3QnVdJ77y3u7QSPCYiZioZ20vWZTb9sZIpaSE4JV4xswIpZSTp28ZrlcBoAQpsRYlTdmiIyOpmo6ISjaWTeOo6jGheM797/jX93wrn9747V3XDp3/41EeuKYmVnGPd+7tLv9P/7mh3K79S3/+s2r5bmv3PZd9+y3lPjhL9zz+p3zs4PlELNIlprlIrb6Vb4mX0nzxV6Yz7ciD4CAksAyVfMXJ3AHTsPGaaMGILq2tbZ5qpOBntfzvVtfBadxdexz+zd/qNkFS9GfwE2kr1zziW84+M6zZ594zWu/9jMf/3Qpi1VZxf4h9AsMj4h6Lk8vLYJ4M5bVzs7xz37qj84/+eSxnWtK8V/4L/+J0sbm1uk8DO9+12/d/dJX3/LcO8MZwqeoevBwgAicAr+AmcPNItQttHgoRgiRgjVCiOwO7iBIgYEUY3isemjAQYlk1DjoV4omweuT5aihrj5Fefo1xmesHoMmeR4CIdfLpl5LnERCAqmeICU8Ijy7u4epmYX50bIz1vanow4YYGr3RKSYdl1XRaTuzsgFHatY2iHcDbyAqxlqu1XypabccX7zGx+5/X13fvn3bnnsz33xJYOUQAggA2QSJGIE8MiHh/O2a5sUCCVAQ7PlWWqIUFoRrELWcDVGajoZdCT8PxxlTZtEhFhSqhpzOfJQiQgdUd9q0AVgrFOhj35NthYHRIxwniiZAACTzToCBMHDwat3mwDNzM1KKWUcVLVfrcZxHJYrVc05U9M00mzMFiSMbeoWXcMNjy0iHCz4We3OoeU7t2571vKGr+w89unmwoubG/dtFKI5iJmRUBKCodg8wZoQfnQl0ETyRAAgnFYPCg6EdnA4O30TnL988V/8o+tvuvlrv/2P77/zHQ/97L93zQ0NN3//nz36rs+dv3Lqjpef/p7XNTN86sNfOl+euvn0zfqZD//497/h7/yDt977+Os/+NivHY9ZX/DG24Y/eveHP/3xT73sRS+9/typx6889pWDBx556h7w4WTaSPP07BuuffGZW7544ZNfbD94/7u+tHn2+q/52lddeur8aCOEoSkb5okkRRSQhzKWLMRCVMKYORdLKUUEgrdMRASOFEAYFfONiERYbJ0DfZQhXeVD5lU6CxG1zUXEoh4Rs9msDKO7N41sbm4sl0t3WywWeSgUU5bU9KgAMIpZVleRGuNr0xYWgwISsa8buQCrjGhz0zXeTSE8ggCZiZvkGoDOLMiMSBhoFigBiQiRCN2UE2OE5iLV7GsGIpKo4w6ZxpwtlAShEZ0IdBEe6EoOhZAjqFhJEGFkOGJIaET1JzODBSIAmkeEWfVkY9nY2FoOIzeSFRhRLZdRSykQRETqCgDwDGx4jfWsVSwZjAgghAFNAEAaAjSckKvRow7J6rApJS5WiAgBBKVpRFwAavolSCImDiSpEUmmaI4eTuYWxIwBZuFmIFBHXlP4NlAlGtQcOeYEFTcctUmH7JYO8Lbn3vJv/91/7L/jX9sX3nnxkU9uvuZPJjQLTwQ7Fz95x4PvtO74eOnhZnXJ4CSmLZUZAKCOQRKz7bCClbviMSlOIgBpdvXxu//oN255/D3/YnvzuTc8+4Uv+pp0+/OuPnV2Q6CUA/F25SsJniMV14al5WawUdEqQ7BhMoQnLpzDgLZtkcEZKaB40cq9RjPwRIII6harPiJkuX3iwt/+2c1dGd/39vR77z597HRubXV4iLu7A9qT17zg1/7Cr4H5t/zL1+ClR3Fn58y5z9xz3anmsfOP7z76lYubL7p28+DiEptN9Pac7W40euOTv3f5rh/aKlf79ph2C6ttPjceDsTgDuFBVLir48EVALmVS+dumy8/+4pf22/ee/V6fs57P3v8qy+88pwvoIEPEQ6HdPlwGBfB48qe/+JXPf7k/i23vvBLn/94SlaFw8xMjJUsGBGJmzGCm4P/9cs/d8M1156/sr8c9hui7RPH1TS7LTa2f/6//cJdL3rp7a+9LRC0H0YaqlVfADHAqgVVw92RiYAKgqmZWUWpwVoSVTO4RIgTOyAWY8DDftU6H8aBSjCSMVT23qTYPxoUx4SYRnp6XOYRWDGi7lHXwMSUJKWUBqqe1ya1tX9yd6iaxMot8HCdngkCVC/rCL7JeIcBAQY1NT3CtUyVPgICoTsJw1QuIXqIe2hghiVgSH+V4XUP3/WpG5/88J2Pv/TiDTdd2vIgZ3CIHFYR0IR04ppjh1f3LTIxq6tBzNrUNtJQCkJHsKLztjMzIACMrutoslKvPUWJRSSYEwsL1cN0+nw8KqMg1gQuiHUIR6X0I9ThgUcAgmFwgEHwesccE2IMIWt1eEwjUHMrauY59+M45nHs+34cxzL2jkCJmlnXSifdLKXUdKmbzVJqKRpCbBXCYMByg2z+ELzy7+nbfoM+cXecaYOCkYmQzBmREFpsAp55AU+ZQohTdKNPS6NwCAhk2bnxxstvf//5n/v7r/zhv3awcdtnf/QvnJ5t3vza7+q+9RV84qTedvPTh+pPvvWpv/unt9/zHWevPHX8s5+8alcvPvsFr/vLf++XfvJvPm7/6OWvfdVD54fm5mYxLPwGOvMt8Xfe8Zde+Zmv+8bnffdtG7dyavw6bghX1i/b3f54/1S+/wuPvW/z1u39R6786j0/+/I3vmZYln1bllERvY22Tmx60EIgqcG6mg3Syg5tKBB1LE1KSGi5UJJSCgtXh2jHHMiIay/ANF6uXBlX0ymFej0a8UANd3fVjAAiOOQxDiEQU2oPD1eJpaa405o8E45ARtPDU5HS62veIrtWGHTN+KubJWYMM2VCQAhwNEdPACQYEUxJEhA2xTNiIlJmJnCFCLWWJRDbWWdFV6sVABUbF5ubfd9H4OZ8fnBw0Eor5MKwykNNLg+PlFIUNUFECg2LKaMLERnZ3KLmrmIyJKE6MgBCQYzxcL/r5vv7+9J241hEaLlcIjAhJmmj2kYAUaYCNSLqzKvOfYmQ2pZKmYaUFgyRGJ0Ey4TmrXDyiOAmFcvuxMjkEohayI0B0Aphwyk1HGTugQLoQslKJkIDMMuASIHVsyXUaClCbOtxgk14A0DgCmWB6mWAMDMtdtMdZ/7Tb/z7K3f/habf+8p//0uad7/Dv4o71wtTN1yead+0MajExtZ1D7374ef/4Ly/UGTTWKCMRWaR5sASjUA4ag+c0rDf9MvDEze/6Pf/47Me+sNnaXzfOf97n/5f73jPr3/vt3zPa/7Un1l+9fxBO2cYZyEjhSO2JEYwoCcgChKR2Wx2dXeXag0N4O4pqL69jVENGs+uATiUzESMFKopJdk+feoFd9/SrU4tvuUN+siPPPJjf/umz93TnNg5d/XCU6ef/z//0jvS8tIbfv4729d8/eY3/vHjW2fu/Yk/ff2V9vypO66z4SOP8410duv4yau7/eaihzzsc3vN5S/d/Pj7H7/hDd3q3Jg2vDkWlABpGsQTVesnunXDXhu61V9608f/zamhv9QfPPaKx67O799+8kdNDrV7oDmgvOm6AD+MY4cnV9Zfv3XL5z/72Re95kUG13R88823yFfu+UDTSKJJeVgfVzNjPCiyuPrIU/fc82lU0rQ4fs3J/YtPHhzE6eNpmYcmdf1q99Mf/f2x2YVbadn3GUcEQOARARjMjBE011sTISbBVGIR4WEYs2ZzRSRmqe+zh/EISlBKSZQKhlBiU0M42sfUyVsNx3ZzX2M10PHoAq5nTcmmqlNbQMRNamddIZzQH5VqhqBuIlJ3KhCOHla0IoqIyEPdfe27DWZOjO5eMMAjiayWy6Zp+iGntnELrtE6TBFR9cO1YyiRh4iu4LzZafavvukrd/yPl37hXXd94a98+OsYoGX29RLICIBpvy/OSQCL2azbBICGsCFBjkiczVLbAVCXWF0hccOCQEeLXiJiIWa2gKZpGpYAhyrRAcTp+HjaNr1uc2MEq95oAKDwuj5ngBKOAKWe5l4J2+gIAlMomyOEmhV1dfToc19K6VerUoqOQxAK82w2a7q2aWcptfNullgwEJgiEQC1AQFZRC748o/xC//96j2/s/3Fv7H6tpbICAGQOYFZyuFt4+AYSOvtZn0wAKABWvYrwilWGVPiWTeaPvr//8vbH3r/69/6y5/93T/Q3/oPr/zFt1775lf8/k+/ffjmbzhxchueEUc45xvS5sI/9o5rFW3nho1v/YtPnnzwq+9951/8oR/9J//2x979vl/fgJNf/rpjz3rTGcbtneuPHTu+dcMGvu3sf+hX/oJTt1Ps3HJsa9Ucp/GpXIaPPvau2295/tkLV208df7wqT/86sdO+qn+YLRsXXIYjChXFbTSBNAwd0WfObgrMWYt3DVBNI4qTQPhiCxIJTKzENBqHEMIqUqF6zPkQRPVtc45GTHnnFJSNUQEZCJjJIxomqaS07OqiNSbTJqmznWmWUINKSICMyCKiHpuanjBCDNkjKBsJiJB1GuecTKmaehNgAFNICYCaigIcCwWAdbIRpPcNcis5xDihbSXD/f2loccQMIRQNJkNU5NmK8Oe0ZBh6y5Sd1WszAzDAyIcAvhMaIBdALzaDWCsBCwEYUoGGIlcAASMiOQWbiVLG1TvFTiWyK2YiIdIrpa0VJTUhzBzYRSncM5CUVUbAaRFAwLbwwRsYQXmEjFAYyhbdvmrBaBdS9GmKaJHUS4qk0lLTgQEgk5BXkguAMzo7g7QhihqGqDCTHc1DxSsCCpKjA5RDEVaeoRKECtCDEPrsVKO+uOzxd/+Id/eN/erde99hW7v/Adx3fK5cfx4MpTL/Qrl5tufurk4dkDSIu+aCtw/WN/gB4P3fV9jXmgq4c1m83q8jA7xl4W/XkF6jeuh8DZ8mI0s7N3vObOL77roo5/7MabPpBu/mSUd/7Gb5xFu/WbX/viw243qIQ30CRAVUUnxihugliGvD9mCkhIDaaiY5tSdgQPNK2+fyMwQ6VoTRDAIkKodxUI/PnfuPf9H7mw2Iq/+APPfeMvv/uev/j9t376Q0/e9upf+ZFf2zg4+/rDt535L7+9Zdfu60VAvvVNb3rkV351dewNLmPMj73j3MbL8yM3NsxxzUw2gnYfO34jBc/6S5e3b8/dMaiooXDOvZRD49aka4dDTM1tj3zszO4jL8bd4epTLgkzvPTd8OiftI1Hv3V5+v39iVUApD3Qzbi4/fiZ83dYyYZiZf9//ud/9f/7ibde2Lt4bOfkHc9/9ZOf+1AsupF4IwFoipI1DvaWZfPk1rnPPTT0NjtNi329eu7CSPL6Hbjt9HD/6Zc8eHaW8J7hsDx+8X682dFhmXOXNBe2JFIGsbQkbwG1Ti+1JHRwCAElJm5drYakeYxSFx5GlJzcI5wJCcFCo46p1JgxwttGahoMNEyQ1DIAFEVJKcxTSqCFkUqACzpR8Sga6N6Gm5YkbR4LM4+WTT0C2QG0YBgFbi42L1w4nzoS8nEwcmZQQq8kcVOwCAt2t/CMHqDs7rlfoaRxVRDY6vygbcMRmzSoB2E2D3Pycpg6KVe42Xr1U/LRG7764HWrT9zy0Bseu0PRD6kkQTJB3U5+ZQCctalifYihm8+jZDMDkGQ0r4plRmBquUmEBIKILAIA3Eyy5wDv2q5OI56eK0PUM8jMgBCFixYighp0YXyEhPOpeLB1cW1HQ+aj9KcBTFDAw9Xr0H307K7jquQ85LGvZJU2tU0jnGTWbTaJ27aVVig1xCxBrWMAuGBhkawr8lu746+5ePuvnfz0O/DDf7V585OrfWzdiswt7c1W4kI6UewRkYXcvaLXSlZvhcZxCCUX04PxEw/4v/jJ65YXTv3cez/373/hum+/69k/97mv/Py77nnjW9qz99xw440ZFNIzxortqthWOr6Vt58Ff/fHl7e/Yv7Bn7l6+O6f+68fbry5/adePZ4//OpPfVK/fOX6N9524uUnvTRBvLGd02bzA3d+0/bm6S/977eRXnz2q7/p0dj/2JXZo6tXDTi77rt7mO3e1+2nx/d3dXU8cBgl5hY5MGAmMkui6hDUEIOpUgRScSNJUCzQCf8/st47zK6ruv9eZe99zr13qkajLlmWi9yNC2AMphdjICT0BEISUkkhvzRIQt60XxJSgUAob0ICCQQIgZBQbIOxjQHb2Mbdsq1iSVbXSFNvO+fsvdZ6/9h3RJ7nnUePH3tmPHM1c85Ze631/X6+oqCuCLGJjZhDb2oWnFrtDaMCEgKCqhITEpmoiiZS731sKmTsVj3vCgYUiWhGjhRRzVhVkgXvQWhISklR0szEFHlaXFzM0JXKYLIs+92VolUmIEOOYOCgjFRbzlhh5xAAHGVnDBFxSk0gRiPxJGCBGFRUIzIFpqhlEytRBDJHCE0C8stxGMp2XdcZQcdOidGTr+taUkJiEcnbkZQSM6sZZ4g9ICEVqbKEZmyACQQ8ZpAUMLGZpGgALhQCms9zTBZCIQnrlNmtGrUhsmRAYs7Q0BKYAIOBtyJZ7VwIodCY8kw+CohZqDEAWjAx46RGCMwmElHEzCNbqokcgpGYR1RiVGFCAKwlJZIMHCrFUJu8UU5qgFBJYkcczVQRidipgYIWRSulhrgYpn5HfQMEisnTcMil7zHRhJtZxJ4tL0APZws3eXzvZQ+e7Fmz8ac+N/etDyw9/EgqFtGVDxw9+syzrxisLMHBwy3ftpjazuqlBiSUR7/97FP3La+/vGslrMw9fdUvJ9+BcgokRZWEDgwwNUODtUcf3X/xKy7+9M9Vk5v9wvIv8rq7ml2t9tjj372VLz1r7uCjr7zoqoXWhn4zR57GdTJq6vgyggwVMr7NAIRgmBpGrmpxHgTR2ExjuyybOrEIACoIGakpI3tid+D04MmjcXJD0U3wvr974t4Xnn7zb33wjr/5iy+//i/X9Q7/7I9fUD/1G033+OH6ifGVU3Fy6/YXvWHrTf++1D94bO0OmHu6tWXj3Z2f4E53y/aLcPOVK61NgISaOiuHxo9+nwB6ay+OvqNhLEifUy2hw5rGFw5Nw/Al9/8ZnFqpKmBsuoadztjODz1+7ob182PXNGt/Yc1RrqP5ymSiVW8NH3nVO99x44eTnT02MV6OTW7aumFu8eTp04ONm3fghWnf0bs6rmRoo0vQIPqp2dnWoi1994nbzXtdhiH6GlYu2rLukkvWHtErdj3pl7AaT5e/+Dnj1SzM28FaEho1fQVASTWTiYGANkkcMJlSduUygqgziTFJI1Q4xxzrKgF65GjKnKHiBnn/qiMHKvpMVzdAVDTFHJWrBGxmjtBzyHxmxxxC0CSaUAWiiaKYIkrSaGDMYIOqdkmS5qA9YgJlYMDl7gryCLPiHZkBqTNUMDDTHKauIKrqEDOzAgBEBFUUySAliUVoKagLblAPXCiIaaLsNMNGhQwECg9x2IqdHzt+xd9Ofee28+avWlycrDdPRu5rN2CRilhiGYInMGQjIiHSlFB0NEYwoewmcgR5b0yUNcChKMwsZ0oiIpM/o+C3//WmqvEMX9O73NyoKqpJ0jNtcbZ15ZAJXo2ezanmmgnhZgRYnVFlAUiKMdaq2iSRmDM/vPc+hLIoy1C44Fves/eevdPsEMvTMwAwLUyNiyE3K1q9rrzyP3qPfNU//rOD68pQxNhzrjSEUjyKwP+a2OSjA+axHBekQ8YJaJYN+iat+qufnR0ubf2Dz973sQ+Xj9wOLzlvz39+r/tHf7h2YVdrcrobxjy2Eeoz9TcNtfC9Xj2R/s//M3zJ1Z/4l8eO0I+0nvWGzectwI2fSxcuTf7a1snWSv+j+x79t3vXPL1j5wu23g8PNRzX8MZqkU7efc/KnuNbJiYOfufeOzZeWZcvl2jWeNfy/uw1J6qS42ObYXc3UVlKNEkIlnM8OUEyA8tx5AQECGRooqMViFqK0hB673IWAhKtRt5KVkuZKKwawMgAiUrHTd2gWUpx4/qNJ0+dSiIhhKpRE3POIWrKYemootGieOeJ3eLKInsnpswuNjEHFJZlScxg2KRMulDNzmPQzJiDEacOlDAAkjF7V8fYckFEoqnDETsppSSWjbCoIoronAMwUGiahohGpgIzEDWULBMTEVQUWLUwrHrNnXPZsQDClUdKhElqUohKio1nEMkdJxGBWh56F2U5rOs8QgsODDEpMQbnnEeqmyaZGZLLGC+xxMDgyQg0ISUDEfCgbMqJFFUh5TzabKBGU/WGrdZYr99vtTrDpkZGIFRTl3DkYARwREE5O6eSAZghgidylBX+QERARkCi2TlCIJrn6w0ulzpbuUUFJhnO1G4Oezw5FU3l9Jx3Mxec95rx9a2J47t637rx9FP7v/873xw/9OADt/7RVMsNlmR8YurJuVNPLi+c44tjgyphfxI6A+Uw3h66qjUIprF14C5M0q3r9tbr+huuFKnEl0O3Na/424FsZv0Op0+XW8oP3Rk29Hf9whufZ/iyYvOtKM2JQ+9+9ksPXMyH/uYjm179vFOz500vr9TFEpdrF6QvKj4/ONTATDQBADpHzknKhRmD46qqTBEAmFEVgcnjaCTp4vKpNbJch84F1Z0XbDp01mHbd+6Pf+PHP3jOY7e88pM/Hltf6u08q11b2V9JwCdPx22XXjn2uj886+kjxTNe56c28sQMAFiK8yf3bTn5g3PSwuSJ3WX1ZH/YBwlNKB5f8xccZejbtR9HPwFg7ZWTarL5zk+Vjz1pg2GJmAAlKgBP++KSO3/iO6+gyd7NzQqgKzbdpxf8145zfunvPv7in/vYj/78z938D9e96NXXXveS+YUlYmeJlo+dWL/uvKHVc8ceDtxExdDmYdO02hsf+Oa3Dz6+ryy8s4gAsYlhuNJvvfiru4PTfWmlvPYF1206K/zL3XdNrjfTVA+GGKMYJtZEBpJSUCJWQBNBNENFQpUUo6SoRigKCRRcQLUIGopAq0bevFWFkY3CkoqaCgAg+dz9ZRGKEeV4CFMiJELnfJMFDjTaewGAgsWUxAQSe0coyl6SAgCwAYNx6Tw7qesQHABkKkV+hFn2Eyka2yq4xjwHQKW88sER7V8RWkUgB0CqqL7w6BARkjSu47AKE1qbc1VplRbPWjr3RUcO3r7jwE0Xrf/Vh7d2S/YwWfnOWOrW7dCJTjWxRwMEsJTEEzmmBHAmMAoARswpNVcGyK8bLKvqVEEBjCFvBM/8VDNdIW8YRYSa0VQ/11cVOyNAAzRVXX2sU2YNnpE+SnaFJUj5h8+MBJpEkzCiWQKA4HxZliGU3vtQFGVZulCMoN80coPBqqSaACIJaVGktIzDq8cvvmx5y6MbDtwUn3oDXnGcKwBASVawi1FcRtjBGc0RIgKQ4IABBuO9cCp1Y+usTefs+PKnujcfvO+3f+eav343vOof73/WG9Y/+ocbxvzi7A5JEWNfCuf/d3YHIHc1/ejrB6+5+oOfW5jrQ3vLhAi1p8OWl70jnZgHeWDn77+9+8v7Dv/OHfOfPvDgvcc3XbadL+vIlu6Jzad1e9s2nvfUxul+vf7x45t1GI3XUpsoENbQcnF50zPLA09v8AOKRY2F0HL2WZNajpgEBkVjy/EgLJIQEFCJCM04KhNEpsaMmWKUFgeUZLy6P4PRD0UAGLCpajPzPngOp06dIgB0LqkiURIhZ84xEAbPiKiNtsFLTIo4NtGJmm0BMj45Ucccv6LaKDnviJFJJK5aG0a50nndY6Ae8tSKAYARU0rOBRklHIqqWnYlAWaycb4IiYg8mWRflQKAyz8Q1YyRjyKKesYcaGfyvDNVFg0dcQRBEAKHJGCJIOAIIIIIWaxX+qASGYGBkQ1Nk6a8mFGlplb2pKpCkLNMyEBQFQBJAK2pEcky5Z5ZFM2BQ4CkCpRl0JgUcAQzkQgKlv2KKTBLEmJTZMsLAkW0EcDV/Ei6ZmZgIiIaNVmOmzIDgeyoJLAkAgLawvZyWFHhfmy55ak1a2qc/u4DHBcmXv5TMy++Mh16ove3f9T/ymc3TG/4+Ov/brEY/90v/O7TAzlK7MtOUy8FdV+48/HfecVz277uDuoBli32jdeipw0au6KWFFzY2JkeP3j7kxuu5u5cbE1J6KDGIEM3Pu36R6rDc3ye7Z2+eGcL7IXXLv7Pd9+09fKb6oexnPjL9/3JX//lx85718/e84mP4fPr05sualdB6m4gInTNSEygo1x5wvzMUWVfejOLMaHaCARtho5XRZdGRA4lTTzwj3987b9esf30dAfuHHv9+7sXXaX3XXXnR0K9NLzxc3rBXz/h9BjoCZg6ieOyh+Dyd7hzTk+cfGpx7nj10G10fN+adsewM2x2jcfbwa9bKbhF5aDbPfnoXqv/uXrOT1E8BG4M2Y8Nuq7uthaPXnjf13HocbK1HPsT45smxid6Rw5M1v2Ta5+14eB9b3rnUntqbM/M1qePHWttXL+1v+EXv/AP//pj7/34q3/up2/+4CWnn9Pv98tO6dkv+0Hv2PHt264syE4ffqw9PlGlQVlJ3T1RdY8j1B03vVTNi50an1pzzjNuuP8kprl51zn7tW8+a0330Kc+//2Ji84bVHU9rE51jwVf1mYYwAFjg74AIULH3nuH1vIeNIHjRo0ECblSbWIqnHOAapJD6RGRVjs2YEAlNVWifNQ2QEViXO25HCXVJAkdO8cGQg6JvIgxEiNyZrBZUjU1i/XAOUJEDp7ZmxmDIShpU/gQMuFF1XuPZkx0xsVhZC7jGJEMjQkwW5CZGUZyeTYiNCZ2ZSmGQPnDDGaByYoWFCzIU+S9SNnwj/af84OlA9+d2v+CzTsumJ8uWzyIiK3xlpoyIhoSqIhkfC95QyAksOzAzxRbdsTkcsTsqF6eQe+CIaCsMnpVRDSXzaw7J8oGzfz0HL0BZuOpEaKa2qjrzUpzVRVJ+UuJ5Ux4AlQDoFwA1IgYnR+ZcT0XRcv5IidusXf/e0+fB+JgMDLPKDYGXqMXS2h1y70xXbGr2vUFuveGtNOATJoBWUiojlZf7KrlBkba1Da6FaNyaf7EfHPWhTvbm8vT//Vw/MRHLn3Pz+x/bK/9yfun99xLk9PddROTR04MtJ4oZuvZtelg90z51bHF5Rpodvbjn9q9stjjmbOG1ZghJEU/4dOK2YPbn9j9tztfeu3sJ9+27g8eP/7Htx65+2n8RNobxo+d81DaOGnTluZ6k5f9Ls3M8MQm8myhi23Pg7FkxXgHy3Oet7jny+s6ISh2UcEgJomcmDzTKO9FJOd6rb4qAWJwRA4opYSORSKSEUGMNYJlmxwRpZTOSPdlFIGFahnpN4rUJfbksOTQLlsAOhgMMrZirNWuBsMQQlIbVM0oogC0Hg6Qgtrqt1BjIJHESIkMcXTmA6Ocr25AjrmfKgIqhYMrakmGZkmESVUpX09EJmoGwOB5RKRPKTGamNAIWmd5HkM00lkjoqpEwxyvNNJCZlwkgDIQYgOSQNrOOQ4xl1E1511G2mdsCxDmXAcky+lpCQ0BxVAFmlQbYWbhZWgJAIAJExCSgDEViDZiXoLVSTxCFGFAEHWhAAMilyxWsfGhjHVThGCmbIDo9AxaTnOSiObZVBQLzMCQ7y/PLAASU07UzhTfLEbJvwunThar3mzLpQ6u9K94+Osbdl5UbT93ccul668/b+VP3ysf+5uNbdedXHPrZa/YdfWbX/bJdxQLT719Yv37lhY09c++4MJOe/z+B+/54Ldued2znlm6Vh0xtmEN+H6r1Ni01qzt2emFhcVDTz6p7hGOOnjWOy1VZORLL64zPPTExbLr1Hwa33DsoaZ+zP6h/fo1/SPNdceXzm3KA07uuuuua6+56m3veNMv/e5v7/rofxzn6TS7rjOIfS6dI7MEoiISvM9ybjETU2/YDKui8AiIhN65siyXe11YlfjkG98h4f959uGLsTvrJ77Vfsfft/7sufVNV8m9T/zETz566vdPb7ksHeoAQEdDsTy/rjpx7eWb23PHer//HGqaGy/6SbnsJUuukVpLOB1mrpgbjrWam9bguiHWE5Nrpi6c6R2+84Dp/GU/5mZD0MpXvc1777zitg9D6lbOij6wptScYLJBKxWoBy5+2bXf/BAYJbHO8QXqr6w995w60Lpq/a/d/Kl/evE7/+lVv/yWb/zJVcdf5kjBoofWxJqO+fqcC58NvcHKyh4X2hXQ2Jr2htl1DoolS+rgR178k5dedh1Mtk/de+KcnRtf/cyZR+/97lfu+MEFz3vR6epYU2u3VqcJUl2BQa2kHhP5JEzmQ2EwCg7SFI3JABUhSkoAaiaiwXuHzpKAJxoxH1fjjxgAMpRlFB04UiSDIIIgGKESeiIBc9nKzMCIoJiUGMzByGZjCq4IZGAmKWkWWDGBI0BzGhOWXiQyu5Cjv1WzC0NEVEfpvLnz1NWbf7UjzGZhCy6EoiD2RtyIOucCeSJiwqb0noAiEBbNRFpO+tx03msXrvxM+64vzT7y1/a6FVmcLLTftMCiYWTmBGKElsw5Z4QKmiNccj5jrp1ogAZ1zHw+a5oql1QcRVbkigtnCnB+WiWQLH2PMWbha37epZSSiukIQ8irMY5Z6AQAuVHOnwYACcgRMiATBufZO+c9eBeYveccc+lccD744JjRiAxRVlnRkM1OYAogBhwxQSWEReKl4eJ1xcVrB2seXbt/16lD22lLV6sAMNAU0I2aegBbpaFZMtFkwBQMBuvPO3/D2PqwctNDvd/4pU2zsPDn9w/2P6Br1kyGKamW7cipgbSmzrkkzm6YPuc5Rzffd6YANzd+o7Vhy2M337b82uvi+HopJ0ETDhZxbFpgElYWqlu2nf/vg/NvffTYTlx6xs51H//NRT6udx+o9hx66jtP2r3H6MHQ2nrl5IZr3fRZ5MYQiKDYZGlRrZakLR6fPW+Hf8nT+77linUeAMBS0oolOAJEFmAkWZ1DOCQjlJQsAQA0JUpMZcJSRgiSShP5QGb0v5xIP/wnk6gpYJNiuywkNibGBElqUNdA1kWydy6lpASh6FSxUTJTbbfHhv2uz8GUzCbiXM7hMUPIWmhYxSSNbkoiJFKzJkZybGbkWEQNoW6afAFlILtb3XRkFRU551cBF4iYzVdN0xABAJlZzHajVU+gmeiINQsEI5YMACTVSIaITiEheSIvGiFRVFd4IJKRQrNBT4JQFBSjILL3gVQbSwCJWJMBGrCAqgkYMgFSgaRiyIhs7MRyuTAEdWYpqSopADGypMwOEA+5nQB22KTaOTeURARIjmTEmVrlzYEDHOGDbGQrGNmc1NhxPeibGcAPYXyIqE3TzE5sOX5i+113TO979AI7Pfzyf37/hne1n/38Ez/y4sn7vj89tbam6QNjUzf96PsvuPcLV3//c0vFxEta7f9pwcOpfejJQ6//yZ88fvrEsSNP/8/3H3rFRZcUbZi2chn7oUSyYvnIiWEzJMCZqelBb2j33RLuvj1cer2ee0k54VxDy8dP7G27eunk0Redmqx+/dC13/Lt6qGd+r39+2f/7+RTT1i5dla09Zl/+fwddz784V/+zd6e/ac8dscntB4kakNKOeo0zzMAcygnerZkSUWJXCsUZhqbxiE1krKhNMZaRNx4qLdgmnG22138950/Q5M7i1feZa/YLAc26K4dX/vjrTe8agnCOfz4gd7WtevLzTx1eqIl7cn19cJVe7/wyLqw+dLX28IRdqZ1dXrdudMHz7XFB9VtGeqpcR7y+qnzcXd57weentl69uLC9qceL0/s75K0JifTSrc20pIJGj12aMZN7Nv5wqacuHTXNxwIadEy9GCxPd6GzmLq8cC9/ca/+tKL/vqzr3xvfWfveXvf6Ah8JWHCnzx6tChbV137qscfLJ868nBncgpqfeiJ3cYOBgsveOELw+aX/PmnH5mcrl74jLNt4dFPf/5Y5M4b3/SWfYf31L3TSXRuBTbOTshgWKCr66GBU9KUmpnxsYKdDiMDCdQKgDE5g5QxjZLAjMk5QjYkzHD6vIYcPYDQkAmSGJgSEqIyoHeUUxUE1RHlGHMycs6DatIU0I0aBzUVA0VGBgRNElW9Z02iRgbKxIzoiPMrYgQaIQjyLm7UqBHQGU4WESPkDebqdQNoZrnt0yRMRoQTrY73IdP+auTAKTSc2gZNKpui5HSq3X9HeuldKwcen9nzzZX735iuOdKcDEEXlVqKCSQnNXliAgTL062R2ntk+ReJdUOAAoAEWc6NpmaYQ75oFJ0wsm+emdkOqgEiFj6oqni/ugZOVdVkP1UjSVWDY8Y8nRZmBsXcXYlITBEAEhOi8+w9c1EURavE4JCoQA4hhODIeSbnfWY9Gdlq+liOpQMQ1JSigaW6UW4qXwcr+klVhmtl8nl66Zf4/n8PD/1xf1LVKdZofgmbcvUkpiODCGYkGRetvQ/fff7ml42fJftvfqj82AfXTy72DleNzPn154REQbpoiO3Nk9e9otl0bnPs+N5nPxuLF5wpwAvf+e7izuu+efVblzZfAuzAFJBschNZArXO2HSprRc+/4ZO2b34SZy/e+2Tn7nGn7+jPsvjBb2tL3+kaT9cHdwZDj0fh1PQtEAJDACo25VEnqo+d8Z8I5df+sxTi/sH3cPMCJlLgYxMmVjhXCgKl7fvZ6YFisSAKsDoklggGuGlyDGzpZFcbvTJq+y2JkVR8N674AeDQUEUgk+i3lEI3jkHiqqqSUDNTJOYiMnqI57JI7KYFAQxiQKBKHOeMFmjmvXwOdvWVseJgNCYBmUArFImS2COPc5qSh4xKEZGxJxGmC/REEJKyRHVdc3AGWZjiLD6N8rlJ++8R8dKxMCUTVi5HhKB5oDt7NlXdZ5y2iMTOeaUEgJk9T4AIDLYqssfTTQ5F2KdQLMLFNWM8odMwAyN02jbbmoJQInQogKRKrgQqrpOOTCbgJhUkiNOamiQsa8je1iePRGoqQHw6g8kH7IBIfs4ACiqOOeAWFOOmaIqNtrYmvUz/u4fnDxw8CjAiU7x9Cl+yeyOn//el47e/PGTvWZ8bN1CgsJ3b/6JfysGyzd88V1Dr6aDucHgF8rWr4RUx8WpqfD21779rz/6NwvLSzc9/tA1O87hgGOEWFuI9TJEZHJM23acnZZ6ZTQA8HSgOX5UHzs1/cbfvq9fT3em7/+125t1T+K33+3iZUvjN7XW0kGs9APefjz5OKFV7SY3H9v9xC/98/v+8f0feujz37jyhTc8fXxpJnGfRroaW2W05aELOlJRIvKe67pmZoWsA2VVHbFKEN1YEEiwptVskO/8yvK7+37dcwZf2yFP1GH25Anc9c3HVmbx/N/5E3hqBma140J7ek1nvc6PT9ni8asbdHd+5gfUOu+Zry1I6zryGK855517v/2R/u473OT0oNECh2losmmNHNl72d5+5GolSBHNutZGl6RyiYcqNO6h7u++7CWTi0enTzyoncInwGgd6Gw754p6obe01Fsz027jxNu/89dfqia++Ly/WA7zL3/o5930xJGTh9sxlO3wyL79G866KnT8vu/eMrbjAjc2PbFuA0g6eqp/fPjQmrNnrBp847Y9m9aXV132DGP3+IE9J47Y5GVrBoB79q1cNLVJUxJAknwoBTR2zhFSjsJARE9IgCAZZ2dmIqaezTQlTSGETLNFPLO7zbcEFeQxqSc0M0YlQANVoCITVZghH6EB1cy7oD88LxuqZbMvIjpiYBwfH+/3hgqmWcFESKZs5JCYAG20RkMCFUBEtWzNyXcNII2ww6tz29VLBzh/oCgKYBeKApHYOVVtgYskhs5QGUwVvOMV7m/Hbb+wcN17J/77MzN3XXf8vDGYGNLytFKDyIaGKCKMqCIAhkzeMwHmMVQSaZqmqRozEwWRyJTPAYTIuc7lMCIYpYWnlLLOR4HQzMSnrDgzEyBU1dg0ZohMYpBSAlHvWFW9y2mMCmZqlNKIxOQQvWPvXfBFKIsQAntCRO9K55nyiQQhqqQkAAoC2eCHuopIAq2q2kTrQbXCXWnUJDWgCZVl8RXpgq9MPfJdt+vhdNb63lkJu1pMDNMgo9oxzx0yd5FsfHz8ie/eu3Kk2vCGybn7npr/0ld3/vQvH//P1pqVm6b85mLtcGzDKTJItGN+/Jq63DS/fkf94tc/4aeO1KOH7/6eO/KXB4T9zOmnQ/cUmdXtyTL1nec+jxv5prUBtF5eeyn3Hx7MzDxx8RtqPxuV3YKz5fX0+Dnbw+vmKBrgMDpbnVyCWY8LTk17+bh0tpyztRz0lq++4qUP3vrluphDREeOAVkBENQwiVHA7IIDQzEFZAQgdJgaYo6gdY7wEGRAJ9bkyM5Vdi9kRRJgy7sqmaqCYrssTJJzDj3lJEMzdMQjzbxJ0zTEjCpqQkj1oFbVqlZXuKoa5L8NMqlqjMlw5JMZ2Y4FDFRWI0+YOcWoqpoD1piZsR9rBjYzNQEAZgQjI2Ty3o8myWeEjWbmvR82CUbvpDMnXUTOEIzR/Yi4OhBScMSVKLEFcLUZgjIGQHYsCIYK5BGxcF7R1DBFAEVCM0gAyoxADiIQIKIJwqi9BzDVRkf8YyI2dQDGpARKBHVMOYxETGOT7zpFRs2PC2Ay8N6pqndeVWtRI4Jcz8VM8oROzQwBGQkYgDCp5nMrquUwDNVGcTQB8IXv7dl/+uZvTp21c3B07/29uacgfefEwie8/4fNW3c2bve+x1ybbnvhe46c9cw3feB6R63leFIwHZqduW4Ar1b+eoKHf/DQjguufutP/OS9j9677+EnvvrEE88YLj1r2/kToW3QG0+um2oMvhcH5URrWMcEpi2oW3zRAw86ixu2XH5oy5P9sbP84g7xKzx/UTr7P3sblMDGXc2vxeV/68bCWpV2pjccfuToH7/3T5pB99TK6de/7h379j0JRSsLApIkZmZDIpKUuhJd8GIQYwbymahhcCSqCtJEZkYGR6YEwaEpcAf625bvuIQfRIsRN+i4Y4axx+4Pp6GanN26Zfqphx/c/6G/Tfffsu3QXLtcF6H3zFi2b/3MD7C38/p3jvfnyXs/u+HHfvbv7/vK3849dNOayalhC1vUWtPy2+97ypZq8rGTTD07XeoTEAUzXwJjd0VcOHjJa3Y8eUuVZNo59ckMmq07jq5fVxRw9tlnrawsFYVfWl75se//ytRw3def9Q/D8fk33PpeDqZtODq/NOFtXgebt11+sLhv+XMfXhvWzRXj0+uLxRtO2w3/XY13edDZuufS2RvP+d6dh5uVfpp0a6ZxeIplB5443B1eIJ5xGFPBDtQsxTbxoN8tyjYVWJOUxqlJLvgIygIxRc9UlAxqAErOGWJWGCGO9ChomW5FiOBGkBBhBGYSADYyTY653R7rDwZsmLEbAKBAQESOUSBjIkyJ2GJTu6zOEEkqRIQOARCBDYTJ5YmfCz7GRlUdlYiGqGcm4oCZOLPKyM00H8QmimoOL4FGmrFWiWhFqwBDDj6mqqihNqwbA0KDGpknsFyR0y8ur3ru0Ue/c86+j/Zu/79zr4wVNRwlqpIZmDTRcUCDHGpUhEJEYowpaZNiVVV11YCZGsYYCc1MmVFVDYmZKR9hVlPHc+KyiI5NjGsSTcqekqTV4Bd1zsUoAOBcHiFYbpSRhJnMcNWu7Nm7oijQNITgi9K74EKGIKhHRqKRWURgpNPJjNqoCGS5lc9rL7KmqU21qquBWyIpKqrLJAOlWA63DqYvWVr7yOYT35vf/4b+WYdb/YlFNBr2ISu9KeviYmoQsfTh1GLzvLe+dLgCT/3Bn2/eeb4/9/If1IPnQ/+8F9euPUQmaI8DL03AbXfsuOGuzS9/5KSdjD80IXWa7rP86cvf97bx+X2f/rkvVp3pVHSiH2uIwbRoVlhrALxj2xvJXhdin5WVnTmOo4MiPi3qxTeAZ6RsuQInv7Ky/f1LO5a2L521ee3PL57obx4f33r5VfNP34yRgvMuGaJy8IKggHU95FVZiollGZaBAbJm07YoIgGamIGIAhI5yxN9gFzGVJUNAnIURIckQt4758wAkXNhyyENquaCyxwVNGsVoWkSojH7RgUMOt6L6bCpAnvNiGJTM2OgHE6vI7P+KjI9pgTmCCaK9kq/lwoEogIZ2KmOTm+jdQgCM0cRZg7e5zY3r58lpQzLY/5hoFmu97XUjgOvvjlHAJk9gZUTB+DNVV4ckkcwj3lzQ47RUGMKoYimjAgQxGoDJbYUUxIRJVVKVT8BgiNC55A8sfBIwKimiKpYMwISgxKYc2StoqySSJN6/Z4LPop45wpypGcW2SamMTVi6oWASBg1GZk4MERQQgDw+cZVXUUVYUrC7KM0koQcM7ms+ZiYmNj7vTvK2Wnon3wiLh/opJlqgltxheUt+5/65LrZ87Zsvn3q/Nte/u7n3fSXW/bctoS6fMVLirf85I7r3/CNt73k5/cduKUVvvPQPY8+vuvnf+4XXnrlyy8867I777vjgSePLpwcXnb25i0bZ6msxXs2qua7WiiWYanX8NqLptpnPfnyly+0rptfs76hF8ze+4vK/eHE3qPX/IGvyQy6a2BqUcdfwMNPu3Yi1wEqsWz8Qw8+Ggp+cM9TG2a3XHb1844cP5YnLSqSCb4+J/aQbxKUjgEZUOvYGJOqoWLpg5CklFI0VyspWD/ylJdlmtki9xM2DbXB9FR5rpcH4kK3PVGdeGLXnk9+Sv7ry+uHS210jn3SfgmFI9zpcfqe/3rAe3nh29eBdYZVdyxd8dr33H/la4984z/Om39yw/yhHUPsLvWaonJ1KYUPKUIoWhr7UdsuKWjVGltYf97i2rNe+pVb2jMz873ejFHA+MCBxzf80VtnL31muvK1577qlQNoUdn0ZfjCXb8wWU1/7rl/vuwW3n7bn1ZLAGPQp7HlU83YlPD0psWHfnDR2ZctXrFl9zu+reeEhgthhbLb3Xnn3tkHZ5ZefqGdv3vf4tSYP2GnLemY7xqCIbh8RGU1wcTBQzIzRnbEoOaQMZN7DLz3hgBiJTqmVTElGgHm+CFkYqa8k2fiYDnN1BlCDlggp43mVJw6FKxJaTWtiFTRjNEhKhIAmYE1SRKSAB2bO8VIgIpEsRJl5KJwLlRJgtOSCxwpjxxaUoM8oGMiZDWjLBMb+WiZIImaOUAl8MSBnS88kPnARDTCRYCrOdbWgIoYOiSrrW6aSCk28FNw3fe7x2+dffRFKzuv7O88RU8VbhxTDdF5dVgOArSYyoDDYYoOKRAKJLUmC7VSFaOCjQQtQORUAVlTEk+MiKrGzCHTFbxD1qoZEFHhA6CWjk2TAyJyDSihiZilpiBywefTP5FTFWam0fOdmFkNkX27PSYSHZlJQnaGvskr2UqiGhBmZSOqsWEF4hSQaagpEFuTyPGw11fVbtVvpYqowaQVEXhemh8iTz7v+I4Ht3W/se7gNU/tnxj6bliAirFESrFGVzgMyS/pysb2tKyEdefu3LR9w6GPfq63/0F9128/eceX3AP3bb2h7yfVOaQy7R6/4Lbwo3e0X/s0nMdHpdObv7izMnn0qVwnvTbjF1y69IxLZ2/atfOef33khb86Pn+oak9aZ4wklrHLGK989COO3Z7z37R/+pkV+f+dy1YDAHAN+aCGASIrdsuVla03Tx14w4kLbm5Vh/am6o823/yW4Xsm+1dtXrdj76kNVGGrjaU4IyOCpOiDSmJYVcAX7FJKZkkAqGA0KHOIL7MYpCaamWFCcpaMiPORxzFbkpocASCNqikzIzs2IxAkNFVRASBRHPYbVUpJHXI1GBCREcVYMRLX3HiGpJ3QGlS1ApEjNPJJGp8IFJREDcCFvFdgQOe5agoKiZlbrQBkIkLo0RQIfWkAYpSVZSISzAhpRODKdnNFA2M0ZG6aKhNmRCzPqAONAWgOnURESaYC5Fy0hjKenMwHAxAwAjEuvYmamJi5IvQlAgApc2qSJQPAqKTEampKjr3rNE2TNOddYlQjdGTGaOCSoqgZYFBjQxgf70x02kcPHQVwqkJtLyAoQILJmvUbNp08NQdGBIxmUlXsTIlTrLKYI6kmNTAgBTRoRJMSOUYQFVFVRC8i7IKZWWyS9Mi7wI4FewcPrltZOUXyQMCxOFPzQqWxrFrOwU8cOvzpC555y9s+tuHwQy+98xNPP/c5+mM/e8k1rzxxZGVsvgev+cn0t3/2pnL801W/16++duuNP/rqHz9y7PBzrrq6e0Hv9ttuH0rct3j84s3nlGvP749t12dceHxsc2/ynKazHgBQ0vTJPWOLe7fsvvnYM+4+cu3DC7PD87/6+JZHX3L8otuE1CEAWavQXkSesKqSVHfNUMsWhKKD8dOf+qff2rimM7ttsLDcNMk550wTSL9ufBFakZx3qW4sSO7AWCFGRaJqOHTOkXcxqVusnRmeGISZ1mCZZlqcauE6zALQQbtmhr7kek/tefuL/e6D26w3gdgNbRRFpmQKAVOdMKULNm9dc/d/73vkrqOXXDe35pzeRDM8dbw5ePKKAw/uXD4+E9rDalgEW+G167ACqY3KGEVLGjONsYa2tiU8fOlrXTPY/vBtCn3vPVZL9at/b0uvV9zyj3bvzYv33fitr543c/mLZ8+/cvLc83Fqw/OO/nTrvu3/evU7PzHxnnd+5yM4dIXFYVE9/v2vH/nnv79KJu86Mf/ULy/o2ZDG+jqznNtQ329xx+TtD+3/q8me+N7e5WKmGm+HX3jbq5vlbrcRbhcWxTMKqFFdlh2zMzkt4L2vYsPMwsBmrKCqyYERoUEAFFXLB3uzDAZCRIdMSMaIiMwspgAjKJWpAJEm0SxdzrL+PBwWTWqgFtglFMeOi6I/jA7JTAFVzZIKEI+Qe2YAIy8jjMjP6JjyfDnGNBLumpoiOa//i3pvlusymigAenKOPKHDvJxUIzXO8XBqKlKbIRAwMUK3dXrHysSrj1x44zkPfWL2nv+7PDU9XNv4rhpBSUUIIp4Ni4Ia9C0sTJNE8+bBUfDmua6RUYUM8gxZk4TAZpZxPzQKGURVLYJDRBUa2VoAsiKUmAFRsx0agDm3UD/ct6GKGROSjIDb2XOZjHhlZWms00ki3nsgbGJNiRMI5pk9QhRVE4bR3H4ZEtXGUfsOY0oBqK4qUamqWjtGAN1+j4gEzIdwujd/bmqtnfen18YfTB97xrH1i34wEbnpIwDFDhRNW1MCtNmztk6s2b5eZDAHh278irN45AO/m+694+rzys5UMgeLE7Ov37i3pra3ep0cuXz4HUoynLq86Rew49r8SxcwXTcxd9Hr+vMlEq/Z8535C1/iJGozAIIg1dknbyf0Kkth6eB4ub1XrE3kYHVECaMhSTYlmwMOGHc/+90xnFrz1E8Uva29DffJwBGd+OR5f/Tbj3x0HZ+/aeM2WwBENzG9ZjjsKqA3BEgK5rwHUSJSEUIkQO89OMSMXwYywhH6Xy06ZyO1riIy5faXGUAjGRsWgODZERfOR4cQoyRNllRBQVSyFlU0phqkDEUdmyiSUpqenKwGddUbeGLPARHBIKXESMzARgYwmp2qKVrugAmYQ1lb0mo41iqDYdOoMaEg0Wg2nz18o74WDEw1aca5wUjmYFFFmiaE0Gq1EHEwGACwjsJj8g5IzNCHQOTzXY/AWSxm2bme58tJGCmJsHNN02RfOyLKqu8RYXWaDWgAkgyBAyOscvrEakSM0ZiRRg+kDBtJw15/0F0BADMhorqpAKBVlAysTPOnTntDyUoMAg9MIlXdeEMsvDliEFAyREFAEwQwFBUjAyRz5IjYQDXGpKYcQGm2M3ni1FxrSwd6S1XRvl1PlX2FgEMVHqQI1cYZPues2fdd+wsTrck//Ohr903i+f92Y3ffSjUv3ZN7ynjeWc9+5X2tP/3F1L4bF/aZe2rX43/x8Huuf/nrJ2Z2PtI7ufU1f+A3XhS2XrJ/5hxgDwCuNzc1PLJl7h47ufv799z463N7t5xeOrLz/OKl1923547DL16YOvR2w9hbf397EXrTkEo4vUHPuq08PeadqGOqhsOibA3rWsxIjcz+8aMfec/v/XlyrkikSStKgWHT2vUnTs5LqWap4KAxEbMR9qshuWBNA2aqSswG5hpx/75vyx+ee4RRl3HmaL8odHy25R5a8zPd/sYppVAN2k/tbnVa0SYW6wFHQWZBZHJNjEVwky4sPrWnKCauag5d8o1/GJQzg3LNTG9p7VTBY1PLTXFIU2vMQg1j0psvkm8UWAownpoZHl8sxze4tvSX+3t3vnD7k7cW29ev9NK6IwdPXHjl/Asv3qale8Yrjn71L6on71x74ige+fTTN31e2q0ocXxqZqycuv6Ktd/857v/7wXPfuVbNroTMKz7oX9qHaw5WuA9frF7TeWdT2ujqbplhAlft4e4HAY7j8E4XDK95ZVXvZhmj31y4Xu7Dv7g/OlzJ6Aj4lyH6xTbhYuxLyJEGZusnbFOPRgWzkcVQuDs2ee8Y8l+AzTEjHvGVaw8Z1jxqvnPMOfOIjkg54KAmCKREclIN4higqbM7CBFUEYIjiVJTFL6ggDJaX7EG+YCw2UovHdEuipWytoljBqR8yl7VJWZCQgh39YIAGAjSVQGaSMjEboRkRLyMH0ETGeFFCXGqCNxBzkkr7AY+28cXHlv/8DxyZVvTj7+o/UVNWrb+0nkbuBWDIwoLMgB1bIH2jIG20kIgXM4XEoSE5kXjYFdSqkIXjQSGpNLKSFYK3jVUaRrLsw8ikZ2BoLIoEbMYKvBwExIRjQKfwQAJg+Q8+oM0ciUfEgZ3IQsVplZSkNFgJGzYrQCABU0EFNFIOZALI0MYzMcLaGhrqsBDgBATb2hmg76fUFck6YuOzp5x8yJezcfO+fwjK/TctEAuraGNBgES12y9skjYd1F1ZiOr6OTX7gp7dk73Qybh+6eXbN57eZ5IvRFs4ZO72weqFJo+zqh7/F0LMZ6OKHj0yfiiCiejOLegW56ob7tFfD/e+uPbTkxc8n///0wggPjqiCe8gN9ANhzce2uXzEaKA2LxSu62768UiTBE2uQv7fxKz964leKMMHs261xIkJkGomNuCSUqgEA5zCqtjpt730UcQRoufaCqAKh98zMmio1ysG6MvKdKQLW2nhDZZMQWoCIkBw4M+dDtCQiuXbKqk2cGCbHp1dWVhBRRYqiWFpa8hyMLCXtDfre+xA8IqaUkigYKoIiOTNBNIKE5pQqb8GgNB81gkBiA89sgug8koApWLbYImQ3rjPEnI2Zj3jEYEqMHEIws+FwmI03eXiWL0EAzR9NqpAF1ZiNUM4MyFBREQ2YGAARg3OqidGBKBloTNnsi/9L9UkAKqqYYfJGJgCaBRAKwMAiMjIzGznvCucRNOUtdTKNEQ3a7XZKqiDsfJTUDp5UdXW3DQDeF4mAwShppUkkoZmqCURmIEMABcXMm8lZC2LGLkgSi9Bd7oXCLa/Me9C7uT49tJJUquWWLy47S3/pkpVz18g90y//5+1vfc69b1/THm56YuHA7//m2b/+4ZPHn9b12x54avcrX/mqx17yqmN33f3T217/ocmzx7ddXK698MSmC0+V4zPPARkuV8d22dFH4q7/6izudif37nl8VxdqK7laSbNs54/tWC5WWocOdHZvunY48703HW4feslw5q7B2CIAtLrgK1har+76Kn5Kx/Z1ujIIRRjWQ2NOGhlpYnpSAv/HZz/5U7/wawcPzqkjMkyajg2WU+GZrCi9NZLVpMAI5BQMaNWkqkoZzv31Q+t1eeJ/rr97hdb0UvHd4+MTZ//e8sbny5G7BRyLtamsajKIgQzAcREASJrIjmsTGDTrxiaWUjMvMDa1dQ22N8CyrJ9Z7C3Glf3k4nRYMyjbVcfRqWXRRDsuilr7pw9WJ08X3nVXDo6thP5Z24+c+5yrP/+urX//Lw/99y3Ln/jd/o/cMBdTssfXbrzs7J/5yOnDt8zf/uW4+/vjqSm7BID18MQAD194oFMeGLvxq92vfe3YK181PfVE3QmzVVzYPb3j1X/0G3/nf60paiODFsAchMrJxsTjqbDW81/efsaheOzkLQePHcBtun7dpEVA1woKFmusxcrgyhKThbJIKXlHVdWkHKhAyEmVQPN0NAsN0MQDaK4No943Hyoxp5spKhgkxRzgY6CqhqCryUuimv2mZiZIhilKUlXErAUzD5AACZEhB+OONMwOAVf9qbjqMBu9AIPT6UQjUUclOVdccORg9T2jHCFEYiLgsikLbDsJ7IJzDghNLcWYUqqbuqljSnHUbTsmEN8r+qFeu9C+tpn+wo6Hbhob7FiEaZ3pS03ex2XwHoEwDdQFAOTVp6sBgIJGjMmENDYpNiLJRNDISCiROZZEuppeDECEKS+I0cgIDBx6AyFgXQ1YAABVs1XtCSqiGRJlEh6SU9UkSUzMjIG1VnLehzL2ExGBKjMlNRUVlQxAABglKHFZUJUaSRqY1VDMGBfSyUbr03FO1BDBOU+GnrlKFRD2sbX1qNm6+adD/7GW37oYBqASsN21Zswf1b7T4Pbd+/T5lzIC7x48+uW/A314fNtOG/ByWk5l6rNOeuhCZ9PSF1938kN+ErH0PbcpTZy9Z/Of9P7r30/f+G9nSumEDgqYgK2bmmNLu7k/6cYTQjWzFp59FfaglsZNdCK5XmvjgY3PTxiib4MBENtIvoA/HEqDVe19EQ+jFicuff9w5juz99v8Zuh1ljtx+uF4+zP7LztdHxVIxwZPD6s+AiooJp5tb40UNc8MAAO76Ykp59zi4mIi0Jy77fKaxdRMFIiocIzIMUZLKSuh0Kwknyw5oICMnhHRG2p+vGsGtmhaNapledNKb1nMiqLQqiKiTnu8qWsEdsFjdqCe4ZchgowS24kIFIQNABiQoyhCCg7MqQgKksNI7BRzfg4aICEhAiqxy2nYaqY54310BB7502OMjij39FVVeV+EokjSJB0Z3EWk8D7Poc6YGJ1zRNm2kxRIVZkIAJgxJwwKrF7nWd0PgJn8jyBggmoiWeWHRMAEoIZgoxATAgDR6IhH0SwSkSBn24BojDGEkKpUtosGUhGYmlSwj8x1jA0YiSUAJGMFh2wAnPf6AAaZWAKqiKiAWtBYpNpMAxCURdX0fclaDXc3/UOu5QZaY10YXLm+ft9zV5i0X2793NaPPnf5c2/x/+6f7XeubDjyn5888OJXVltetXDy1AMrxw88VJ142UfvekUJANtS3T+xpznx+PKur9bHHxvOPSmnTyJAINh61o5mtnxo9yMKUCSPDQ/Lzo7xJFIN6v448Mq3vm5Er/vD8rY3vaje8Fcb91Mi8xUC2Ms/3br1DTXcMajfxfJ5UzT2IUpCBCQ4OXf8x9764/uGi3ffddulVz/v9MnllvkasVhOvlWWDj27ngyccykKqkWJCuZo1LihGjp2AABoh7sBAJZo5lfufiYtbfrdF54/I67SNpeT3veaZlhgMLQKWp5i0zREziOBIpYeEHtVLBy0RK2eX8QusufFo+04NDcuYbI7qMZ8ZxCAaWpC55tTc62mf8IATNbE/uy7fvvAnY+evuZ1hnz1z/7s0dTx5GH8rKWzzuFTzRit7eLTcaracv2PXfLatx98/L6n7/yfpQfv7u7bTyhBG4DhzCPw5ue2v/yN4f/cNvea61uTT9KuDRs2P/91l2x4tgNMmCAAVAhVwNmhA2lrZTHe/+3PPfJAc82LX/esa16wZ+UhMiGi0A6pGSDEji/qQVU6VOLcxYpYLbUnSim5IiCSqUbI4UjoiYmRjDJHQgHdakUctRfAYhkCi47QwCRBkobRkXN1bEDBe4eIsa7NxJCjJAXwRUBgqZuMO0fPOXwUR4GFaKAZs6eK7JjImB2uKoeXdf7Dc38LAAgj9gcCwuozFuEMxylT+Eb3KiHlir56hjCDEbzxTNkefZ1M8AEEA0ZqHZo/7fX9zbc2pvERgxNX73jLM7szP5XRkX11PpZZFpZTV898Sn5Jo89cdd/mBF/8X+DAM3+71RJsuOrGzt9ytbcbfcIqRMBU1Hu/2hDT6sfzOeaH33H0/yJSD/MPYSSwM0SABpoFnbtx8FkH/swxaDTMNTVE7mH4Tm+50K9Ut0/WQRukvgkidBHBSMFv7rbmD8s3B9rrxqtX4BIg3meqgBS8tiYSOzhO44V99qa1BmCgUfVkPDXff/wlFBr80R9mzFnwpgNIexhxgKDa90AAc43f6ze3TRXACMEQe93PCHlDMszV10byY01GLv97auZSseB7W9PxOQ1LczWlXWqc5uzIgp74m+EvJa1PNUf+a/4TnkKexwLAO/i9M25NUiACH3woi6VhX0w5cEAYplRHZfyhQymKeEZyZCKeHTMrQPayDwYDSWai0oziTJBBEZJotjmJjZzi2TQgYIzsPTZNE0Jo6mSknjiShhAARitby6iQfPbM+g3LunYERGBwYoQkKZnnSCRETqGjrsEMW/zhcTb/ql3mcwHQ6r0/UmiZZc9JFm15789Q27KDWUSIgci32+2maQZVlbfXLsd/kYmoAIACEYkqex9jBIKsZQMAJECkzJxDACKHaKqRGZHYZHQ0MQVAHuUBGxBzphKQYxPNqrFaaiJyLlRVQ44VhDmYWWpi4djM0HMgVzVNC9lAFUQNMEfsCIojJGea8XoIiGpAhOhwmBIzsppQqlQa0Lqfpjudo8zNUtcTcWVhavw9zzrJqJvG4d2bPz1hC28f/O3eda9/Ei8+8nNXHHYXz9vZcITAthSbythbuWrrzMrH3/WMh2+/TO23lo48hFVJbdOI2jShJEHiNN9dPLbnaMu3Y2jqgBO+A4PqBZGLtOLDuEiaDEUFrnz6WtLxs3d/6+AYJQdbd9EzP5uuvdtf89HJd7/v5OBfu3Sdt99kqs0TkQIZkPGtN99y9hUXPbZ2obf7nktnz0OhernSmbEpManjUm9IoUhgKbtDDQK7JDJyqSJ6QacqAXBdJwHAMs0Q0cLC0hO7n3zp2WdLGoikYFonahwiQadpJS+IqDH5okxig34VQjCCCKENQ0xIuDwYWCdw5dcW0ZtPk62xxe6yFmPLvJzG22PVQOPKxX/8kd71r3r4N375ede8tnXujsePXLy1NH9w39YXvmbxbx5YfOFz64amknanLaivTz+9Z+7Y5s1XbLzguZsufzHq8NSJp+ulw+nI/iOHD08e3rvc67/tzYuf++RjX/7O8Mf//Zcv6F4/jt0D4fEwcGlTggSgJmcNa0JK5HskS7xpYdvvvf+9c0hPHL0HERos2+1xaJCQa0FBaHXGtOlTsZqXacqEoomYmhH9H0FRBSKZoBbGQS2X3ZwXvzqFhoyLEYMElnlzgAhgSa3RiIiihjjCgyKiKaIjFDJLKaqCpSgCtpoSC24EgUoiwkT4v8ua5VnRaiPoDBF/bOots27d6vEcDMEhn6k2GZOZB9CuDN6HEIoMoCDnAFBFG0lNbGKMIsoAlrt2QkNmGyYMAH7a+9PFwp9vuKVbuHccef4VacvAEgSRYdL8ihMBZS0YY24PRg5KjcOhmjZ1qurKEETUewaAwMxEagZiMcZkCmYKJkirpDBCMB6t3U3MRNR0VGzNjD055ywvXcxMNakSuSipruvStweDQdkp8lMun9/z0kEBR7QsAEYCIkSUGM0xAQbmKGIAhDjfnPhK9zOvGvvxDWGzgBEhEaMZGlSspWmnGbt3+uBNFxydSPCa+89u9yZqHFARqF+5iTX26APnz9Ub3/HeR373jy/Z1LnvwMLk/d/SYrJsmggGLf/ynzhhxm/beONvLfzaM+V2aaCpSdAe/Xq/GXKl7RaEMwW4jk2kgblOi8fmTE9Xw0mmNqQ5SctXnzWzaSuvDKhoeWddnty17YaBmxQOVbnWiICcr5eLakVCa1hOO4m9zXcOZx+b3vUbe1/5chlbcsPSYOCxmLGzL5i/4g0nf3W+PvrxPb//2qm3rS83qNJcdfSrS5+hAsFziwtC1JhMkldwxKBAphxV1UTFDC3F7ODxZUmAycw5JoQYo0jUJORcW6BhUcagZEQJjcViSslUwQhAkWE1ui8T3EyN0KWozIwATGyrgY8joxG6FOsoKV8SuMriYMTs72awiGKEHRcsUykYB2Q+d5WqhoCqhARmYBZjzAoPIjLV7JQbXYHM2cuX8zSJaDgcBg6Ayt7n6M+YYr/fz8m7zjkgAlO1BjUTKwHJwGgUrjzyLJlm0YfRmQUwIKARqCIZ5SpNo91S1hKSga5GnDERucDeGYmpppSAMCVlZKIsyUAOhKCBOFbRmERVGwnk2XEMFIQlNTUp5ChuxOxfglHkuTtzTKGSCgBtoEuChfeCa6Ynv3/XnUuL3YlOqZrM83ljy9Oumij17ok3PhauZWt+ceP9ADART24ePHbpQ7elm+/b+vbXT0+OWaRBv7VtXfv7/nTrxO7FmXXvGRv79W7vZMAyOgqFi9F77EV5xllbCsPTJ+e1jpOdsaVmuIn0Cur0ZRmamkIRhzLu3bFLXlR259763sfBddCNUd2XtFJT3NQLE29rL91R6wdic424t5a2mxCxaZpWq9Xrdh+78XbP/LDoygtfecOb3lZ1/LDbWyp4XIiIQgiVRENLSVohpLpBHK17EFERXNkqEWBda1SAgw+d9lQCa5yj4aKH2DcgIhADrMyr1AkckXdVqhGxdJxSg+Rd7MbQwnXrl8/bufWaV5347AenDxxL1C16zRHE8TAJ/eWZF71q/b9++qG/+Dv56HvXaFm3tl3zgX/tA401rcNbd96woTr97j87+IFfbpeh+1t/XA8EAnk0F43bMxGauaV9eMqxX1Nge2xs0/SGiwc7eFMritjyYCDVwu/vXfjQzC9/5uf/fudDDww3nrQ2DFs1MMA8cOM1SGG+3fclY/h0sc2ta8Gm5eHhwnuqyIGPBuiMk29Dy9RHxDq4iRDKsuh2u2PtVqqGhJiTKaNpjuxNaJC1vIRA6HHUOCJiLoP5T1JNpglwtA4wNAI18M4lNQU1tXpYMWMZCkOAJoGaJ58omhgREVAjyZJkx8LIOAFZIkUgAmiskO31+VnADCJqZmto7Xq3eVVyBQo2OqcbJJUoYghoSAbelY5d4MDsmTyhByBwUEvdYKxZgCGDA9EUER0FcamD2IhLODy3uegFcXDL1ke/Fk689OBFa4tpqnvDNgNQgqpiHecAhIboiBwxGjQp1rEBauq6riw10JiZOckbL8hZcGYKGjVmMoOZAQVEdEgIypwncyammkTYFCDvwZnZATmkHKqskIwsaTLjJqU6RgbPqeUr5xhD6RGxCK38jIuQOZ7ARAwoYAriCo9qDZonZrGIltAwCoObdRu2trb3+310DtGcd2Rg7BJGj/7awfgDxic2L/SPF+c8ubbfUewP3fRGq+tzvn7/5u2Xhfb5U4NxN/OCTT+4aao/VkKhXcDtm7aMP3VZYQ8tX1j4iy+e726svUeUFPfclTYeXTPcuF4OPlXS1JkCvNTiyWYsWu3IxpOtSA8MJ1zYwmP33LovXlusX7e+XFA3xmvC8pqHv3iq3Hly5pKj5zwDkkoYp8I8D41bHaJOWm4df2Pdfdtg652waR6CpbopezAD61yreOX+t21qnePQBSxmw6Zz11zgXTvMe1gCApsoWylGTGZAqYkCiEhJFXuDBlSIqiaRGogyUkopQmLMJCkCGm0QySCYNZ5CBDKsGcgSR6pNgVFzBV6FtuQBSumDRmViQR2lehA1mjyyiLjgwUBVMx+N2YslRnRI5hFAeeTJxhqAAArjQrHWFMFcCJwEEQWSgmXcsaBlthajQ4bc4GqKRgZACkbONU2TyzMi9ldWcpIHASWDHL+NjERkIu2yZYbOswJYBjBniA4igwGaY26aJvMdgaiua+8cjBrf0RkRBBSA1AmYmBkgEBBzbtsZzTnKDTVmIk1EkUhGOZ0ppXoEvBHxzqWmJs+esUnJOZ/ENOlY0erqIBiBZ3KhU6ceMQCEBOKMCDk/TUABMjYuQZOAGZmcFdKoRyCt77rz1habc64WHaT+plYyhBbrK+JXHut9JCzvv8geuxwe6q/M92p44jub6MiR6vxy9pf+4sT9j89sHsOpYvaNr5//2ufK/srasv1bOP3uetG5NRaHzoXY6MzkxKlTpxaG3Yrl4suu3PvkrhjTy9tTHaSVGDuu7Jpxacl6xy562dYnbgtk4qhBIVLyJVEICuSc/zTKDwr51368a4C/XaZPRJ+dVKDtDWtx0CjZ7ffctv/pg29680+eu3373KlTEQouQj5pEoECeO9TSrGuDQAdO0JQJR/89PTaCeqJYQ8nHXMzSIIAwrq0YFGqokQyBDUoG3DBOUccU50sqSZGmOx0HCNNznb+42vTH/rYWb/+mydn1ndXBrVfHl628zS1dnzyq+EL9yxKf/L1r3/4i9/b/ob/Y1e8eu7bX5sJ1b2//7vTw9NHV5aaMLbmPTec1ZyYWFqpn/fawzQWcFHGgQeG7CQBo8Y0OH7w8TR/hJqFwdKx+ad3ydze3sFDC3v3NkeejCt9nYefufnDM/ObHr/qttOTJ4+NP2WFwQmgFkkZ4SjBAtsKbPt4a/nLMDh9uj5+fGOnFKmNgEk9NAwxat14HFAtWAdsmqbp9XqZBsXBj/oqRE3SaDO0VGkSERRBRGMCNVBb1SSPjqW5qVVABRMDSdZIEjExreuoqrGRmBR9IOejSQPmifNaC4xENMYcYeQYDQ2yAxURM0ERmDJVx+CHQ91VNI+emZKNNr4GKSW1ZJJyk3FmnGaGTVXHuk4pakoS62o4HPYHw/6w6Vdx2KQmZoSFmQEhOS48tNoFoqMCgDvLduwtS5eO9cNTnWNfH39qTeRF77xqiiIVsaa6rjMcZ+S2HPmOSAmNXfb7mlkZApqlplHV3Kbk8WPuAFJKI7qPqKS8BRQxjSpiFmOMURTIkJvV5AwCzns7RgS1VDfSRAbq9Wt2LqUEoMF5N7LHuIyLy3TBkbqHwHtPzORdixwzR4ekVgqqZwAA0H5vhdBgtLlHIgIzJ4X4Zm2cuOzEWKX6wLbugJpUOTVqtKV79m09emilPum6dHs7PP700WL//cogzkFspUHvkuuWj+wf/+97rwEAPHHq5ByfPLDusRvXHt09afWAnn6Kyfdh6cyftWNrdWKi1tZwvJwCKM59xjk/9/srZz9zf909O6ZDD+5ZWopubCzROMHkeqLZxQcnH/379Xu+FqrlMJw3s9p3DDE0C25svPBTLnaG536I+y0A3HhkbP1BCBquP/ZL58VLg5CKAmLwZUppOByyYyYsWwUROecAbWQ5T1r3Bqk/jKCDullYWZ6bPz3fXR40tRBEGM2TETHG2FS1rU6YFSEAAGqyBCmqagOKIt773G5mjmO2qyEiKRIiqjE65xw5FjAKLl85I/mB5tXsaEeSez2xpJDfBWbSjuQ5DEFO1/3MY/SiU87ZCN+t/+vqdUSu9AENUhMlJkQsnM/9bv5e3vtOp5PHzvkayxfziB4TY55L55tLkuXbGQAQ2RRVCNTQIDU1AYJaq9XyPNKKa5Iz2igzk9EDgBGZXcgGgTwbyl4DUDNRMgDVpmlSSkSOEL33ScUFj6tD8uGw70xFYkQjH0yAjb0rKk0tRGJwqiwipMaECBJy4wtGNnpZIAAKKC1rRXNDwYJKqnVqsvPAI/ecOrGnPcYrgy4nYs8nh0JmpUMEbZRfu/Sha9Mt03YqJhwk/UoVJmBaeqfG14/58896eteB3R/5a33/BzeDr8wd7A4vP//Cv5w4Nzan6iJQBLXGkV+zZfO5m7d40ZN7Dg9IL6D2j7qpk8vLlbkaa1/61sbty+2tp7defu7ubydtQ21+qXZDSENAaiORpURE/jEun9vBz7N9rKJ/S36SSuYxHwa9QZPEraTpcnxu8cQ/ffwDX/qvz8dAylw3KTYNm5Iao610u6LqiYkImYBQAKnfGw4G9cxYvRhLQ1paXCnXls8/7+I1k+Xscneae2GYJJlhq8SqTCDgI0Jwky1m9KE1rI8vnaqrJd2y0297xtyhXvehRybWMlx49aY///edX//u4Fc/WOy4qircybKlE+dv2LJ+WB298F8/dWjLdnr4O9N77zv+iufsunvX2NKxzfvvH2/MTYwfueDioq4dtEIP6mANqxRVNCtaJTg5tvjUvmMPDOMp8jwUrSn5SRhbf/Z4GCvJNWNHSXzotbpjpxtXOQ1umicHE2Nx2n2Pyj/oXP+PP3WZ/9XXvP6dV/VPlHE5hrFCQ/aJiniL4JS4SkENLRl6AEIgTdLv96uqEhABNVAGJiVK6pEQWJSSQoxJCQUhmtaZ05P9HCJISgqsBFFEaySNsXbgkIm9C50iQkoaY4yg6NHXmtRMAJMKgAEKWAoeCnKpbvpVv05S1zE1Yk3yKEoWITWxghQtCRAoqMWYUpOP6jFGRCZyKakmgAgj5gChiEaxRq2fmtqkFqmaWDVN1aQkUkvdHfYGcVDHodS11Q0m9ezKUJSFd0WJVjZlWUQXo3Td+Jpl9+YDzwf0n9vwyFN4dLqZ6DmsbFm9d+KRAlAAoKx5FomqKWoEZU1CYETgkJo6ISIQkuBIyIJQpbjcH4hxMt/IsNam1qZou6QNMxExqU8KaiEpDgZ1k5IooA+NYkqaxBq1WiwBJiVBjojBY4oREQE5zwx96dgZADtDUkFtUEDFMClLREhEioEIrVBzCCoRmgYN0qDRqElMk5EgJQ1ArVBQGLokyTdXHNlU9nX3ZP/w7Mnxfo+wbKTZ+NiTGhAPHi+jJt/uff79m5clchnne8L1+TuOlB15+OZWb3yLa4YP/0+6+8aZ++4OdbO2Ra0eEptHKEPROvMnze2WU0fH/uqD/JFvLWvx3Nf90nXv+7PzPvzByV/5FX/BlqK3sO/AvuQ8oZJKjwvHME7luoe/yAv7qVrxi4f90tFW/2gHbGKw3KPylZvDH/b+7YL+CwFt7cHyim/C//nmu17W/VkXWbxKaWgw7koXbNJzSQEACaImJQMiiqa9punWtS9bg8FgsTvodvvVyqAA4sTDfnP02NxKd9gdNoNhbIaNNNGSpLqBJKhmSWJKmBeoxJhU64jeY1KpGmdWtgtzIKSI0imCsJhFgaguGiqoeDAUwwzgrZsYoyEOmmGyJsWhRBXAlJLWQmrDuhrWdWpsgFEsmagvQi2gQqrUbzSKOKLSeW/mCNBENaXUVLESEbEklgyhIYumCpDIATsF6A/rOpoZNrFqB6eqjBaACmRvZjEhYpOiQyUUAiFCIBSJLNJSiYjGjn3IYu0MpSHnatEEmKJqMjayZADApXOkjhRStJjYyGEgIgrQOE7olMukJKNQpqSaaoak4iC/bs3ZXy32jUZVhSaRNA4b014oVaVmJJcwD+eFuUEzA8oRDeBMfQQWMCeGolWUvi7k/4ypXwULY2OP3nPvmC+rCC2DZBVEfPhEa7HB8WC3F681DpsnJDAsDbFWu/VgcVdcnuu4otZHbvrM8fe+Dn7/+vbH/3TDo/cpU8uo1SoPHXngihb/adh61gC62FWwNdPTM0DjU+sHIMfr3sXJ/T9lOYwryjIJooKduq5OnTix8xoA2PbYLR4rBB/HfXv7Oj8+TQUSDocttcbAYTNQfGfBby/01TK8sz+8uE5JXG3keOPFZy/G5eHyQrM0/51bv/HtPd/vzIaGUyUqCUWiQPKOQmQOXsBiSrnxcYDaWzm9oc0LdQkAm7Zt+KWX//hlL7jiazffsf/h/3p5e33f+Y1pWKp2ByW30TcNV/0hRAc1jW878qpr15yzZfPVr3j80x/q/tqbZ1/zq0tjk+de+/wDX7t7odCp061n/Myb9t9z/7bts91X/3a4cuPu3/q1dd+9XW/4ybO+d/vez31o81jZGp86fPENFz7xTXadNFhZvOia7mQL6qqJ2got1DoJjnU6dWxiI2BGjE1TP31o34UXrpmZXVMvYW+4Ug0XU2q4CLu3/8B5b3WkgibSbGljS/5Yt7WyvT73sp949UULL2214ZIrLr/rjW+endDh9rVNNZdwkGWNgKo8kjLkvZFzjpCZsmJQCfPp0RgEkE2FHPsimGJT1Sml4ELuezLoOJMf1fIQ3yxFFwK7ACrOOcdeVUEsNVFVHWDpstde42qMPCCxdwbYYgdo3vtYZSCiRkt5z1LFpqqNgy/IoWNhJAI0BDAlZHIGpiKJpKn6mNNeAJSI6YeBaCAqqo4AUjLATGs1NKOkhpokh3rmt9WIXJcHA84hmVMPHqyfmsUAr1iavXVhev+aE/++5uHfO7G9R4mtLdHMBVwdDzCggoCamKJhbnBzjlGKadTbhxBrQcyoz2RmzK6JNhwOnXPee0Cqo6UkzAJGKYkYGELdJERMKdEqQmvIgICSt1+GEURMGtEw0sUgAsQ6sacsFsu8fsgWcAIwXO2XOEuNTDWLw0a/YgDIVi4zZCKHSJRMqbHgfOXwpFZre3TuXOfRC/v37uye99SkBJ75+o2zjz5hrZJOHcf+gRdfcGH7i7q4Vjp1rAorO+nSa3t77ylPLcPi+LqxpWPsWERXBv0WwvhEJ/Qb59suLVaD/g9H0BPbJn/mF9uXXqYwNr9xcnZ45IM3//Vrdzz75e//wK6/vWLqn//xs0/f8/jabVfsvGClfhosJN+uq97RY/vpE7/Yu/w1dsGLwvj6qQ4/88AT39943abi4HMO37rn9NyznrfjwbfC29697vy9dfinayg2qfSJkM0AsAgT6Dr9GLUkAMTkuU31YFjXtREOBoMitPr97sLCQoXgiIFQ1BCEmBSsX1cuoflg0TMjeee9N7Ek0RUud4e5c2XmqJJSyusBRJSYLwxW1KiRFMV7RvIKSTUiKGIQqKoKVgFb+VrKPlrvnGoyAAQFpYzRMEQ1EDPPHIiReNRnggGY5WFMEXLLbqOLSgFGmVaZTTjywjUVGkhKeSkVWi3QRtWcc2aCWd5PvG3btiNHjohIg+wAxbImUsmREmoSzgGmaJm7CUA5XaRkn1LiIiRVkxRCMIkoKobeB4Ua4ygMW9RCaEFSy0lFIwoemqGIiUSMSmieWFe1FewLYpaYCJ2KqSYAaIYNAIllmhACGNoInIZ5w2mmIIQIZlEFDEUSeq6qquUdGnfK4sSRw4cOPc3eaRJVSYrouKrl6SWmbemLxc+uq/eeHtKggUGE0xX+40NuGGPRnsIbv1B89StrrUplRyEQckLBpu+aFlg4dPzwMybG/roY+xbIbd04AJ2rV1rFxLoY3zC15gYsUQYnm1j6Ti/1PFpsYkjx4EU3rDv8yPRgRUHVloPo4FhqpkN5Yvnk1EaNPc+hagaeOaXEX/D0kJd/G8Y7Bvp7ZedjRX+lv+HsbVe94ZVL/a6tVJ3ZtUeD/OPf/dXP/NyvYKu9tNxtO3ZJkzS1sdTqkAJSrGvXaTtQmly/dtwfXJISAH7ix3903brJD33yS5/9198rtP7G+OzywokrPP9OvVQGrJpUbTibLU698zcPfuHGLc+89Lzf+/P9N321fvSe+rFdl//puw8N5qoVe/rDf+bu+M+z3vKRvd/+1uzWdfE7t9RPjRenHj/8mheev2//uAP9z4/oeGvz2Gxsh+74xvnZc1/+9T/1dY2uPPyy67vLg2mLsdXiFEPbmWEaxmRKwKICZMzY8vzAw9/dtuWcDdNbm9qDpbHZUhqs2wMDAK9cqT+01Nm4dhjGtGXLx0/sPX7r5Gcem3zLG1eOb0qLu3XsnKI90ZyY92EcKiUzQMgKUURCQMZRxFhZlq1WQQyaJMY6D3ckmTdNGoHIAMuyLIIrPJspMTtigFVIRJZioYZWCcxN04A6x+SQfVEMh3V+sgTnvfdCIy0JMmWbYsEuB/8yYKfTWVlexj7E1JCBKqSULCUkoMjsjHm13hvkaHQTMjMFI6KkChqzOiyKEgXOSTfozRpSK31RS6SsWFYzXFVmZWYds3PBrb5l+2ZIJh5TnSI6LtBb7HtcG6fevHTh+8aXb1779AvirmeunDtfNMTgRQlIQQVMQVHR1HJIaZa25EwFDkxEdTOsBgnRMXMmdKJCjJJTxiUBIRlB3QiCi4IAFg0y9DCnStR1jQhVVY8GmcQiKiCgiEaMGEhQhZkQ0TkmouAKQqeSkxYULW+gQUHBTJBcPhoAqJGaAOY6AAYmJgrCyERA3hEzkikrJgHzbQUf2s85uOmJzY8dGe+fOBcvemhx5Y7/Gdt6YRxKPTjd/Z//mbnl+xhCbMSbb9fxGS9ZEqF79m+r3aA3s2Vs8ViMiZmVcTk1ZatjhvWwctiXZ77kTAGe+t0Pdradt7jrsa1nz55cv/2/yiNf//aRC8OWubs/UH/0ny5N8pp12w9Pdg7OPzWlziZ8I9X0uulOGMO55vQjXx/f9e1Nz7lo09zSw+PPmL/87e+680NhuPfZi01ccxIAHBwZlGsLHwDAVT6VfSdIiEvLy+PtSe9cGiQzS5CGw6HEGGMc1g0izs3NMUEyJXajs6mBZRUVATskhRRjt2kAwHnfKkPpAwGmqhopKrJ+cGS0Hbl0ADGmmGNAASDGCMTeKHfeycABE6CioYmqmpGiri47UxZbIYxcSTHGbIVXABd8zuBNTURUdk4IDNASgEAIDhGLoqjrGvLaOCu00WVizEh0QCSOLIn3bjismb3GFEIwTZkKGYiYuWmakydPVk3D3lsOUMjCJQRmyrHhSMSIjMg+E1XVzCwZAas2YGogOZ6N1BgQzDVNoypGObOTkZwag0U7c+xefTOzgKQMgOrYmyWxrJJTVCdiDWl2ThKDJHEuiNnItWaQVab5SzkwzSIsQARIhIjo1WmjPnDGv09OTx584pFh1V27dq00sZY655aunShffd7y1/e5Q1dvXxcfnR+Qmd59hN9/Dy91nQP1DVgi3xkPU1sWji4W7QHEYVlMKxeuAd/gynkbD84dH2/8m9za10745bnuoZNVpzy1bXxtOTYxv7zS7S0XFAhSCA00TN6T4cGLXnbxXZ9qfM3olDCsBFhTmPJYMbkLBjX2U+qhtkQTEznnqicbe37BfyXy/qr3Ymm/s3P3V765/tEncbyF7WLct5/etTut9D/8vj9/49t+euvOC5cWB4U6xNTY0GFbJTnAdig0iTOw+vjp9ZcV0lkLAP/y//5rv3da5k5vnxifGNu4fLrbbRbvd3jkulec/f3vhNmtF/z3vV9/17tf9Oqffezex07vPuqPme45SmjS1Hs//Ln2z71tttzQufz6/rfun//EF/Hg3pOHjq5pnazmhltwcmjD9hTHem0samcyBJV+/+6rXupSvX3vHfVwQL/4e/snpsYXawikwEZSgaC4GsQxp6hEXNcVe4qxmZhsHTvx1NLK8Z0XPG/D2h3HT59s6nqmWo8IXJGM6XCqGQ52NxOMNQSxjU/0thyK5drzJjfPHAN66a+/b49id2UherBsn9CRP4UAHRISGEIRAhGoKjsOIRRFkU+4OV86Sg6/Q0fsGEXEe8yRIIw58BJHsQFgBiimjM6zIyKNCYk7HZfvpZSUiEQiOc5SjrpJIoLOZ/Q/Io51OqZKhHVsUpRB3TRNo4CeGQmQyRDzMVxGZB8gzhsxEBE0G01zVyEh+UZXE1UlREPLPS4zOecRkdij8wBgAM4574uciZv1MoiYHIgDFaAE3lHQUCAut4rrB9c8ePKpWzYd+uT0Y89J50/V2CAKMwmYAICY5ade7iZXBVMuNE1jqiICas57QNbRCZmco3wGJyIKhQAkRVLy3sfReYFFs6TMxaTMvixCjDH4thmaQDIyQzQZxVeA5VjAvJnLNpXsg1LU1XVg/kmZ4siKldH0lFMhDY0Q3KiEM7P3nhznr+CJI0AkowInhn7BpYvn1607PXHyLDuwZvGZX/zaCRUbIDaV76w79jcfHJQ0s2Z6pak0wuzZePbl/fvu3eak045Vb3rz1IndTYwBIISi11STaoVrJzkVf+Yv1vzc7515mBYJ7r3pu1rWO66+oDe17b7hHMwNv7nr/jcvh2Zl/kC3ufjK62XDBfv3PA7VickWULWS+mRFx18QZjedj9MbCvZw+LG73/Olq7/9z/A/fxE3n7dc68TZ0wDA1F9z3gtwx/b+YuXXttrDIoydEoncX3H94UClkmgm2vSF16z0+nl30OtX6HhQVaEsYiMiiZnLIoBRjGKmDES8WlQARGRYNSlpYNeZ6AwGPTJIpmVZiogvAgCcMcsSUUEcRRDBOawATExSamB0bAVEUUUCSdmcw86FM/C4bL/JA5WmaRpJuhoy7UfcaQNARRjxswBbrRYzx1SXZdmklFJyWUZAoxsQgOxMgDGSQH5cYIyRSAyiI1aJ3vvchSPi4uJiq9XKeV9ZnxyYVM2SoHfMfrTf1VGEw2hCBiCm6Fhig0ymqaokOKfAGUJLAGKkCmqKaE1KrImMFPGMCCNX4qw2NGJw7ES8QmMo7CQJIEoy59nMCEd8PAUFwNzxE4xCPw2UwcTUwBBWZWgGHhDMqWqyBoAmy/LwU3tFGwUbDofouI6NI/qJnSvTpf3+dyfo0vbXd8u37ipPdenoAMASGLjCJVPEcRf7aW6hA65O1LYJkcLHfipXUvRhif0gNTEeanHblVG6FyhrGhv4cu74YeeoDK1CmgqGJB1BMJXFrZcNx2d2PnVb5Imy6TZqUsb+wnyH2l3lOyhC6pQM4iBzW5xzJREQ0nupf9uK/mPTv3PFv6Mzd8fRKStM0jxKa3yMp9YQps9+6iO/+H/eM7PhnFNzSyipLJ2kpIbgfNTkDJ2B7bjqmTPjN+1rJgFgMH3uxpWF427P8eq403OkSKh04fZLXvu1r331VW/fvnZqd23DOz+hh34nrN2hS7dMnFUUV1++dt3EoO5c8MZX1haPf+cJmZnobN/c/9qnynEKw7He5nM7utgbDClMxUEVi8iaODWDdpv7xYFLXrH9iW8NF+e3ffAfbpu6ID1wB0zP9ABbUX1BlcIE+C43UtcIHEJhViSVVqvTr4atdhljvP/hb27ecuE5G3cK6OWHrvvezi9O1usX4tF62gASVGniCFi/Pvs/pwatqcmJpnfg4IVXvrLeeUF314PFVIhVTrFWA2MAVCPIz1jHwXnPqxPX4L33zmXjvKoqCKdkCKaYof/OiXMjFbRfxayfuSebFAMiFgxmTB5bFlNCMlM0s6DKzgGAZXiEkuNgSCLC5DDfKgatVukc17EaDKqUUvI+xigiwXEiZUJngEYCCmpOUVDMQFJKlM6kDZqp9w5H6EpIqvlDQIgjnYh3wRM6Dt77ApAhh+WxRyZEZKT87BuoIRk5iCUUKdbt0FGixODpZ3ov+17/P/Z2Dn5x4r6fXn7F0/G0byv2kcwoaSOiJmQAhia51hER6SoSiIm994Nh7ZzzgSWZSFJV731RFJUaGBlAAgRyqqoCZuZdaSpKaiJlu/TOV1UiRCgI2fPoWWOkgpBE0CubmV/9rWV1DBEpyOjZkkXugEhI5GAEGciu4JGmhpUQ0Xtf+IIz3hDJoXPoIAVEALR+6WxQt0LnWXOb/nvznocvGlzW340Wmu2b27uPDoqJ5c78eFkMNRbR1PuLX3C4u1w+/D3lOEdg3elNZ+2703sfY2Szuqm7vcHmzsTR3uHZ515nMjhTgJtqntik3W4GvYnT+/BY/8TcU/d8pXv08ped86yX7rj1sxsf+Pxxuu7CV/6CfPgP6rn9/QsuKFJsWnVrUPL6UDvtfP/e21/7d0XdfcnXf19SU5w6jtT4+3YBwLE17vK3vqtLYyWsAA1cZ7JeHKokiak7rBU01bWqLSyvNLaAiP1+37uiP6hCCETUJC1CEJFRSqZlLCITQeAQVWSkPEJCNLNGEg4GTZNyfLuqppTYO+ec0chpGtgRETWNYjIzQE4opuoSKkJEi5owieGoCuZam1IqsipV8hhGjTgZiGa2BZfOs3Mj0zQAmjISsjOznC7nAy8sLWUtWF3XzD43poLoVptLkWiGInnDxcDJSJKBpuS4BADTPFTBLNFSVQuIBoaaiAExqbKpgvlsL1RjxhxxSETMWKk69hZNkxA59GSICUChLsqg6qwBAhOJmiqHYq5ARIcjImt+nYgoq8xXRENPBOATsWrjhM2DGTp2CKMTiVFCy9Z7BCRDzlscAOecoiWVDCeAUZqiSY67dgCWBt2VfXufDME1TeNcqKQuQ/CWfv1Zw8/v8k+dlvNDa6kXTxxjBEVHiK4x9S60yEXXr60lE+tdU9fdXvBxaCvOksSyREenBw7dclBKVW9xmZmOoVLVTNlkcAwoaNzz1oYJS92CysrSvsuu93Vv++P39mPPwAlWg/GxrR/77GPv+Y0Lx4oj3An7D0fTaDo2Pj4cDuuYMUTgnAs3lunZJJ+smpu78Ceh+PdZN05LVx3WZ/fM9/ye4L8WPvxXf/6qH3vzy1/zur17nqr7qRgrJWmdIisgmWsV5RzytrHe0/0CAJ61M+xemnLL685rtXDrpB4c1BPp0sMnT3zridk/+ENo2cJnP77eQTFu02efU9xz0/ynP+y+971jx5daj333wKGHxl76qrBtzVk3PPcHt97WZi7ceJqEzqETDa0QWyEhsnIamlbWmi5iPXDFwe3XvOD+j1/yvUfnx2Yf+X8/sGW8rFL0vjDQWlLLFYhYClK73esNLDamqAJNLSqWTAK2fOEPHtk7OHX8wvOu42bHcx976/cu/bQftgbd49IMix5SlLNvK9bf3uu1sFfRcnN8YeHgk0/+IPpW0bMADZjmsQmOOC6GSMwcioKAvC9arSKXKCTHBMSGBkkjUaNgpmgCzjkmSJnG7nBUfpGYPSKCUdnK3lzQDMNyVJCdyUA1w5GA0ExECM6EgYtzLnN/QM0F16Q6JO9cQCDqDQY2yDutqEJKbEokgpDDw6s0zDsZQGLyiBilGfV0ZmiIRN5zRsalFMk5co4cIzOxyypKIpcJvbBK+DIEBFDVMfGNNi6BOtSMi/WsFpddvNjO/ZHexV8s7/yse/JFfNm2sG45Vk3RYMyTc0nZeqtmKs65pqmy9tKxR2TVNKwr51k1gQAiB3aIbACIXFVDdgzGgE6zKwpBVK0RR5w9l4UPZtbudJwLwECOFfKSzxwYoolEjVE02qpDmph9CNmIbNl1DQo5fZ0pLxcUzEwyaiBv0RgZADhvLkkVkJkdZQeqMroklRC1Ch8Jrzmx+RtLe7rb5btvvfLHPnZPe2rsqIitzLf8WBwMeXKMOq1N25fWbR7ceP/F2ml8nVR4MD47uXwizzdU1ZFb7g/Wjk0GoPLB7y2ef/6ZAjzYd+zSl5y3ZcO64w/vX6jWxP5Faza/aWLjcw+fkFNnw563/cjE/JM/9YYbBh/6k07vOJ5z/ev/7p+/9ifv2Z6WFji1B+XOfU8vdnY+9ozr3/IvP99uaFi4w7KyFlqdZQYAvOFV/Suej6ePFePrWNLpo4+L9jTFBnwcGx+ePK710NQiFJrEOTc9ObW80puanDQA8UwEZpiBw8w8EvIDGkhBzpsm/f+oes94u67q3HuUOddau5yuakmWJduyLfeKCwabajqEXhMgCRBC6s1NcpNLbgqE9EJCICEhJIBDDcQ0U9x7t+UuS5as3k7dZa055xjj/TD3EbzHH6zfsayztfdaa8wxxvM8f0HLgamcBfB1Xedfe1/me4GRnHNEaBlYK4BqBRNwESQlw4zVM0+gKjnS0gzAiEeOWAAQEcC8VHYpWYgxppSJgYzEDkFUIKF3zjkkJylBdh8gcOFTChKkKEadtHMuJRndGobsR4snAIhiaOZcEWKfmBTEMec6P7IDqJNkgJJ/dJKExCBqJgDgiBAgJRHKZvfluTEhESUIRJhSYiMzJXLMrpbIjosmsoohgWFEAMfKFDUVZmpGRAyERMsWPtDCOSBvKJbAoSEwADeslJda3kyQ0HvOe6KMxSYAUGNmBgZAZjZHZOaXzdYAUVWjmjr0VAaNZcHP7tq+e/fOqZXdMGwsGTKC2rvOHKzp2p/dXcYYyVfa9Du+HKahNzQzh9SNqXTxsKWWQBGxZ6k7fYKFXlX3qaokiRFwhc0wOvIdLaVQdNZp3BIGdQcFfCe4vu+xlRySq9qp1a56YeeWq0584keDLvdXrxi6iQs/9bl//dgfb7zsqqWi89yLfuaNL3jFZ3/9LYeGTadoiaaiKJqmaVdVCMERoQg9S/zaqcHv9PQPw4GX77RkOIEKaAj1Ban+mWH3T8du+N43tj1y1/s/8Bto7X1z+ybak2kYyTt06JLvHtwrra0LmzZtAoDtjx/btHbwzLHxXuhNHB5Y8NxMXFWl+b/841X/9JnhnXdMfunTMzK+93c/UD2xoyP95hN3l0vNeKubSl71klcvjrvhkzc88O9/suqOu7A7VicxIEx949IqWBpyURTkxtNwtr/YA62ffMtHlP2L/uAjcwcOPnjdDyepRWNVObQCUS0pMBj10mCi7DYgiGimAPkJqK1WJ2m0GCGVVQUhLjy7Z9ulV7zsbcNfXvXE1B2bv3tkclw0tdvV2bdtvlRfufpj9r2/+5M1DZ9x9kuf+WBRt9tcD5MCAOgovlmBnAKQITE757xzY50uZpsvELPPw2EzEEkOWMQnFREzMgCIKRHl5zSTd46YiBx5RlIA5zifiF3bq2qUqKiVL/MNNZp05j2KM0cuR0NbldefpqqOOKRIDY7uAcMs+4ixkYwNTtaQSVRkIMAoBqOmjUSsaSIzG5CBiSkBK4xCCPISC1S9L0buDnLLJ2IejatRM5diNLMyAICE2lh0ISVDAaxqFrYaGjLYw7M/N7z8tsGu58aOfmZw28fd20F7JpBMTdVUNaZkimqECJCOG5yCJrKf5MsDQD6dx2RkCuyQ0BETEkKGGkHe8xFh6T3ByDQJmJCw2+mWRUs0ZtoEM+fsAyIQKzRJiHUIIaUAoMRkpGIGo6UzGQjAMnYZcshAll5RnnAyo3OMRK7wVHgFIUNDVFVRJd9E58G4laRRHpbQ3SWnPysPrm8OvOWc/Z+76+Sb7gpTHSfgmzoVnfrYQmdq7LxLD+zb2Z69bn684JpxaXo9EJUHnpUYseCCPKrUoVkymfInzu/eNaHpeAGG4dJzDz/71R/uv3m3Lr7578l3u7I0WJgvN+IQYn/9i3ZMnzfYWV3Ka89LYZp977++e8W997fnjzYc2wvVoLvwH7+/7ZQnbjzjye/VhB1om8yHjSf3Fw4DLPRfcn7cc2CiakGnOHbXg1IlqJLUzfzhZ8dm1ieIwdTAnMJ4p53XCqtXrTCgqFL68WY4QF+klACV0Jlh5Yuy8kQwXBzIcuqYQzKzOoZoOjE1aaJLS0sppYIpIw1GUkkDNWN2qomIgMkxxCYVBoAYCAyhUFTj4CDPbDLNN6XEBGbICAZaMFnhsikgR1WAmhaECJ4oxZSD1CwPaUVijLkdJ6Ls4RGJ2a6Wd9UpIpKM3EoKABxjZOZhGJaVt2SOClE1MEQMTc3Mlqt4CCYGbJpBwMxEgAglUFI0zqcOynapPLBxztUp4mjFi/nGYoBQFTFGh44dUxQH2KIiJgmGx81+hEYEnhnRRDSnw5uJ987MwGEygDRSjeSge4HRlsobZUeWZU9yztBkigasaIqGmEaAYIumMUVVTBBXzUzfe+NjYFYUhVeuIQmIt/hbl4UvP0H7abozXqNrNaGXPKj3LnGEFEW6hJaGLnWcpziYAxy4gWuKobpYBqq4IKqwXkgd4uHSYU0AyWZWdFJTDdolhSjJpdRpau0UwoNjQxkf9OZWbTx0yuUX3P1Xg5/74Mlvfd11L77q1DVnX/HRa/VAP540ubRhw/DI7Ef+8M8//bE/mqv7DDis60ypibG/ZcuWfr+3c9eecjF2/6Dd3Omba/tAAHOYWgYEsIR4iAe/v9Te0dr55JPXX/f1Nes2n3fpxQf3Ha1c1wzJO2ehmYD5yvrb9hFsACpg2xPutFM3hCNw+MhiWt1bf2R4IhTxnu/1Lt+svd60c6FV4J33lW0J1u3w+HAcwSIB7/+r/9M/9Fw7hZkSOrR2jhYmUzlwTGNTVRjGpjesF0Ot4o+l9Vu6V1yy+sMfvvXYOWsP97e/452n/cybHxsOV01WMrTkqdHBCiVDqtla5JckShPzzFOWsT8xRnIYucnbVYBqfjB/853/WcCqLdXGV6Vrn5p/GEpY609treuGzXjembR3YubQ0r5UhWF/Ycb7OiX0hURDtJx1rgDgCJHQc2a+ppgTTYgcI1N2xhORQ58HQc45ZsxxtGajq9OI8wwyS3EIiJkAzBWegEWUiMqyJRZYXf6/IB97R1wWAyTnvWJWOSgXLCIORxT3lIKZldXIbFrXg2FoICQSCKaJ1Ct55ggQQ1AzMVUCSZp50aqmIs5x0sw/YQUlwKIsNSOAFMkAR8LLkQQmr6Byp5iHcgAUbOCAh5QwQCSvWihEZ4lrXugsrdfuzx153sfHvv+jsceunr/jZXbBHp3PVXYkT1MjRGIOdR1TEImIKKI5fD4/9I4Ph8HAOSeAdV1XXCU1ESVPBJrMyHKcooSUKu8KX3rvylbF5LwvahtN2JxzHkkzEtaoInRUkUEgSCqaJKgAoecCkRFzbBwCABmAaJBAmMnOmmswERXOISIQIRMRGuTOzBA4xRjNVUO/6AdAbR2n+PTTz39421NXvvTw6Z2DV6zv3Rj9HESUnkOJ/VaFW07d1xkPN/33TOgqMHDi+elVANA5sst7H2n05AXHixony/2nDod3HHPH6+8nn117cDEtFTSzYm0RnpUFngvoyOFSHdVmjz518sruMZn7ztv+YmHVme/98R+M3/f1edo42w2TIcQV7oZr/qLXWfne/3lrGs7OROmR777tQ/vOuXjdprPLdOGSG8IY2dj40aeegDIJTzjuN4PeDMtkYYt9K4kRwfvkmEYH0KLQZexPNdYFY62UiLLNvfKF914lQreVT2BkAGYpCaIVpctnzTzBZnYaU2YAM6AaMFJRFFEtJRFNtcQKSRygohchIGFA1CKSknhXFM5JspRSURSqqSx9v99nzwXQMJuEVJnZIYWUyrLstNp1XasCEtUxiKkzFNO8llo2EUC+QXDZbR9VMF/hBMyYQswda6fVQkQzIHXmU0hS+EJzuhvgsKnRgMCpkYAZsxKYiUNijxABAMTUFFSVCLLdllQK58QQkKMYpOQLD6IEVFEBoGbJSAYxgioSkDlDJeIsWEazbImoGogM0YGKdYw0SSAMbChYFEWM4n1JBCmmoihE1QPJT4WGjybZgOidSwYKEQ0QHVIiUISyrGjAzvkQ6wceuK8zPrm40KsQy6oVhd5+anPiuP4BvPB5V6z9/tf+B4kgNRIjV75lzswg1Sva3Sr1W1AEHE74Vs/hZCzbw6V+RRGLFrh+bCYqhkYXL7p8xUd+r8VMRX/n33+qvOXHPSjHQZ5dN+Mmz5p87L5mct36H92085N/u+vhvYp8+Yc/vPcbX1i7Y7ZoTTfP3Pb012/Y/Mo3BFpbr90kB54py5Pe9tv/7z8/8dGlfm9ycrIehqZput3uM888oypVuyUITUoOuXkKYYvZSoMaoEEYM0CRfdp/XWw9O3bzD68XkH37XvfOd7/v2T2HkySqwYlZtxgw2Xcfn4cN8Esvv/yhbau+ev3dg2Zlu7V6Yvx5L3rRK/GmvwFud2M/jU8upjpy0W2lNkmyANGV3oSxpFLn961ctY4VFuqlIaKngsSl4RzK0aOpANbOhc+beOO721s3jZ91aaxl5/XfepBPPvuGv3/BWVsfPXVL9cMf6coVEoeUUoEUHCOqa4J5TxCBUTUbhIwpk/5oMAi+XSAiiymbs6ZZSLXtO5oaq9qnnXpmM0jgqMK6bOrHHmp2Pv3UmpOmG5KJctwclqkUs4SAhkLEwKyU1MiRVyY0RUgxIBMVPAxNIeLZEWluD0VNs3EoJRMFNGUANQIkNUhigFAwEIqpQ8bl+VGua4bIUBjnxauZCTCZEYCRgaCaJiJiYkXKDyaE5R0MQOmNgAmGYMYEVHAY1nVdMxBEiSB1aBAxJDS1JCCeuWCVFCV2Wu2y6DSDvvOECIiQ6eSqllIEAg8SYu3yntOiIRyvmtkEQqNMaAusDlCiVuwR4iAumCM1q4wHg94B6L8MNn776KYH1u38t+b2y2a3uMQxj8tUE5qZJUzJgrKIWUzSrlox9gVALTnLMmIwQCYiUFFNBsikAsyIDsGGDskTmmFUhSitgr13eW3PzI4IIbYYc8AkEQLkWg0iIKCNWSq8BIGkzliSsXeKqhpyx2w58syAiAKwR2IdRf7mgbMxASCyR1+YqkrM+Q5Ro48VxbDgB9QH4iU3N9Gqj51y/e51z8btG3XfL15R/OCLumlLPRA5eGyybEM5PPPKpWce7vZmK28SG42oi2OrAaBY2B9hyOyaQTJwhfNl1DR1zqdPuObTN88dfwp2tdWZ6G0gPSZ7aLHU3kK/qb3nqhFql4P5/mxZFZ0uHtt+39UfenLzWe/77r9dvO8rvlekqty3Ysutl7/3Bdf9wdTOO7unXrDAbC972+xLX3TTr73/0pe8pfWazt5y+xPdB1Zvn5gOLZhZWx1eem5pQQaL1XicN4jBoQ0NyJdlWZTHJ7RAyOwE8qreCHJkOnnvkyRHTi2zhDJYE0II4DyqVcZJUy0xkpklYk9IiCSASliBD6wh1CBm3lhSmSrDwVw57grGOpaLddESVQdOl4ah2ynDMCRTYxyIofnCXMK4OGy67CrkoSSP5JZZJY6oHwZRk3OuaRoCJmAA8AjSJFeymTBTkmgoRVbeZfiAChKbISjl3Y+ABQmFK0ARTJEFEUsmi5L5oIaa1d35oOuJFZICKgqYagSnpaGy57oeeHYpQQSrynGJPTZgNNVkCMzMCg4oOlMwFCiEklokFsjspkjERpIQPDgFUCE0XHChANIg4HgI6h1blBLBSidhWLmSTDSC896SIXPApGCAoxjqDEJBAA9JEKkqNQpaQE+U1BujkjnqTo4/t3dHb+5YMcapJqGy6TV1WPiNS+RuO/XI6Zd+/4/+tju9GgA4Dh1iAokRS+9rrC+hMobeou+txMljg7lqcipOT8wfni2VicH5pqqVhr35grb++9d33vnw4NmDC4tH08MPxZ95M156xcMf/b/nfPYb1abLbz0dL3/7W5fm/EmvfNsPeecKGs5df/3Fb3kvxGew0+Xatb7z1Yf/86+n/vfH58LRqpjp9Rc3blj7K7/70Ws//+9PP/Zou10xcx0Ds0dkk5SDyeI5CSNabYAALYB5A0UbM+ednRODhHKs45y79YZvP/vc9vd+8NdC8vNLQ8eElR0GgLroAsCff+mHrz9r+v+978qv37P9/ocP7X1u7osTU2de8DOn3/6V/vjKcth0KSJ1qDWTBvPJWeoyGkJA9jZ043r4EGl0aosYB+R64+v9JZdNnr5lzUtfjhe/iHY/fnDnruFNj23/9d9sHdy9cMLFzf9+37k77rBf+8Mn7t8xMVE2zSxTu+CRV9Wza3eqwVIP0YwYkVRHrL2maQDI+4KaaAAOsfAZ74OOuaqq3Xsf2bfvyVUrT1ioh22mudnD1C2Ozu7ZetKqWhoS80CNRzBxBgp5UzRKjgVUMQUFkTzNHaE3g2jQxjnPzllKvqpCkhSiR9IkIQVlhGQpJV+4oigQsa7r3BNHSDkuLidBAoAhmZnzLAAgqjhiqGTlJDCBaDIBiCNZEKJaMrSf7iCXRcvshOIyhziE4L1XUxEJEAwgiaQmQB6VAsYQVCTmrRWZoDrn1DTGiAgiGkJQACfGUXJGdd77MrOkJCnF5agv1dQQm1mIEQHIgBKa6DHfjNVyrGCw/vsPnP3UxJ4dq+FLi3e9Z3jFvC1EqCsUiQDJDbHfTtgYpKbJIhTnChEBIAXiZaGmjhKICNEISTWakQGhkuVIIIiMYgh5vm2Wde2kYKPEJBhpR4+LwFNKkhKo5VKcxPIQRH8qymqZ4zDqcpwioCkiEBooGjIYigIYiFpIRGZqIkHUUDFgo+wKBWnjgLiana9vu2Ns6ei5d+7avem0p86fvOPM8aufme8VWDkyiudctmgAd9/UiTGIKHtHAIOJdeVgnpNIlLGJjYvWd/Fo48tyYfHzb/7s19ddsGl+3/ECLNL0caynS+M4PrBGuu0V3upBjIFg0KPBfI8aN14QTbeGDy+uPPmj7/z45bdf+CvPXVsv9a57zcemZ3e/4tKZuY3/Z+1Vb3x8cVfjvT7zmNv31A+bTw5o6fZV33uyc58/0W2dv/Q1N75i/cQpu66/pT1VzVe0Kg4GLRVl7gEXVVkxMxBiBjOLqaiAocMRzwdgRNXQmPJHBqZgFmPKB1XvudfrefKOyJFrwpBAY0yeKkRyoomSJK8pCIaidiqtxE1dVVuX5q747g0H16674ZwL+v265SoA6nKlSw0RcOEAEsdY13XfsYNiHHyoa2DqVC0q/eLSkjfwQClqXnplCb2iAlCQ6JxzxKMg85TjSlEVsrIMADQvk/OORlURmNmTNzMEI+/QIFjMqbWkBKKmhGRg0IjkqDsBYe8AkBwxoDAAGIO2XCEi3jtj16Rsg1QEThoVwEyJHRAzKgMoaWKJoKhIiGxgngzQEWbohZhGi4qegR07NDIDSwYIZKBgyOSrajhsCND7MokgmUQ7vopSyc4RgByCCwUzxBiYDFRiikQFmBtj7VksJvm2b98Sh3M4Pi6pSYtLCOlX33DqKdNP/e72ld/90l92WmNUdAEgNEMwxNpLKabRQXG5ay+53ZOpG1JKM5013B3u39G2YONTqS+Hm6XYak++4PW2dGjXX/3Z+AUXLqXm+b/5ka8PHz35qjdOvuSVR8JqwzWzh3cl5/lFr4uzB8fOPXfuyNZzaO/sQ994+uYvTb76NaccemLfu16zedijS686uub0NPdkq6gEO3t2P9eG9gc+/Nv33HPL97799cXDh8a7YzHpMIYTT1y/OL+wtLDQqVrmUtSAgnAQbK3B0LJ2B4HMLKtlZ2baYeHo9V/+wlvf8/OtEh0Sb6oqANiyeesegKMJ//G/71g55S7etOYX3nRWb745aFNwdNFBMd72nVY1HJh2KExUS3tkMlkYDEERSRZ6kanbjK0MK1rjF1yw+jVvmtp8aoTYrNkY7n/s0Je/qNf+S/vxo9hbbO/fs4rnoT193fPf3Fk8fMbpa5dOOFfveLg9OT6ZpB+RCCSlVlm1Wq3+0sLWrVufeuoppIyNWaZROlYjExUVQHTOsXOIKGAJQcDalVe1vXu3c6uMiEXJ49NTzGwsXedFRMFUDQhkORg2t6HImVRh7DwSm1oMKUq/9K4oClADwJgSmAlAowkNQoiM2IycADlr2zIWyXvPeQu7nM98XJ1hhGbWBPPEeQodkqgqiDrmoOIy88gsw8IIEdSMR6dsGVmKDHIQhFpVlJo0hOCczw8D9ozosAZyzORyrI/3rX6/r5KZZUiAKgmZRVUQPSOIxBgNGSA/XbJTglQVeXREGFUvESzIoqJajrTMnl1ErGobqNTReql/4nDsVQdP/sqpO745/dj54YSNx8bnkCNCkcKgjEUfl1K2YzoC7fUG+Y0qilJVj/PGzcBycgdkEJICIKqYISgYj3BIBTvLzskRjkKByRFCxgbr6JvH1+2ST0XB2DAgGC/bokaaAyRSRMTMP0akpIagjhIYGroMzUsGZqiJVNDQxEKd8n5OLRg7B1j1oByf7D15T/fIUbN1L/rikz987ab9U8Wzv/yS8z500+TE6rl0ZHJseNr5/Ydun4lSJBPkDLnS/tTa9uy+kmtL7WODY9Wgjg479dFtZff2+afXTqyWQe94AW6qsSI0Acv5uJDfnyhd5Z4bH6Y5KVesmV47Uw+GTZw/umCTPVs30/rOC9+0U179gVee8tw2/vXD102++l2H77/94XphdmEwOdV55oHbd7+5v+3DfU0YPC11Fn1TPtS6dfcLH/uVO/9i7XRLjMIJa2LTjDfSwwBoYENNkRGAUMEIXU7AiKJZdMBEo/ktQKbxOcchBGRCguFwSMxV1RIDZwaWByISIYBoJGayOkiBrrYGJTnQeUY08AInzg3f8bnP1gtH1nk68cjBL1z6snl1rpnVquU0EmKKmoK1uUWekvQNfOWcUFHHUIKm0BBDu/CmI+kDIS278wgEhNAwI6Il75UyHcQhmoKajMIYgVSX7wgzMyGmHDmJqAmsMAIDMFSzCKY24oXTcYoXQPZWmKKhlagRNAFR5UItHe8YCUMYgokCY/b6M2IWJeTJDChjyLiGJGxgGc7ABEiADMiOScwMk+PKDFQhWjIzcD4bmSVGMcvz9iiBnBPVGKPPeSBGy4+gETQtL9FLzyIRAKksUwRknEdaMTn59LNP+dPXlvetBJGZk9ZsvPz8Ezed+qvxH27Y377+2ofaVUdQtXAAoCYL0Lh6obDOwNyV3c5UfWhPpAm0WT+3qr/qiOyXcrqcqpJQWgUr/u8/DC1Nrrt09l8+7c3GJlvN8OCjX/gf/c4PWy//pae+fes5P/Mzd33zP55/xaVPvuFd0+dd8eBHf/m5G04YXPD75Zd/74xnH7Fnno4/uK4qcJxS9OWB51/ZDBe6ODWEprBm0nf7QfcePHLq1vO2bDnriUceuuHH1w/nj46Pdw8dOJhSqqqyubPRlyssoJUGHYAaoAXQA02G9xovpyLOH1585WuuTsqf/Ju/fvPb3+1Mk/kaAO5+bAgb4LQNM9v7k/N9+fZd+9Y8O7v2lDNfeuTGjfd+/hgMO0d6R8UB+LB0TA45Uh4AuFPPaJ11YXfzhrETxjtbLk1nn+ahWrjh+nhk/2N3PEK3/niKl/zGLc1s/dzM2LZVqza+/30X/M+1dNdNRN1HT33ZqY99f/Lt731mz46xVlW0cThP3jdmUrbbqtI0DTAt9JaccwIWg2Szh0hyziFSvzcsSg+IitCkOGorEURhOEitVqvdbvtOKw7qpgkxqCnveWbH2c+7oN/maedDCgWAY5aozjkUVE1GBJCzZiClZGKAqAKWRMUQrKhAEZzRoKlHea2mISQRAUEEUtWYALJx1hU/7bcbbYiPByAjmCQiMlFX+CYmANCYvHOD0BTsRgrFLIUiwuyUWs52FpGkEjOUTS2E4LwHxBCCEbaqtpmVVlJgRqrK0nuff3er05EYAcA5IqKkqJChw440zxGYmYl9lqpm3dnobP9T5KU8OJeUMOlxVYghAFNqZEgioAppKM2rDm25bfWh/auGX2ge/O3ZK1V4QFqRtWIt4gRJIeaV7/HtVFYlI/6kWBohWs6iAkUCGunCmJCJDBzAstFoRJMFACADVIsa89GHYRSbkF9tIPOC+UHJynkOASqIBAaEAKLZSJo/R869jYLhaBUn5AQQALL+hQDNUBWCqIKBUmqwX6qUqUOQbn0Y1HDlxOp7j1x6z/wPXjj9xFWbzpm4aYsnLIoLXnykt8AP3V1F0bJVZXG7IxqsWD++cFBaZWNWGMrKGZtbcJZuPunkYsur+ocP9PknO+DUp6pcLFO1xDDWLjA46daeV0QoxldD7eHoUk3Oa2hVrFrp0WOLm63PNX19m5576LHOjf916+HHo3e6ZjVR6i0Np8844+k3rAZbSn6QcCguYcFjfpzL8oa1/77i6992byEjXGyGHWmlomVq0lijURUAohkiRWafVFJKOTLDMR9H8mUe7mBoRISmWSIUmxjqBGq95icOK9FI3tUxoCbPrmYTDWKAWBHEtoYj0xOvveHHzdx+aU9ZpSt/fMOWdSfed8qWEjQkLpQAtWwXAx3UgwUqSnKdJDLUxI49jGBZHlmbCG5EeFRVkUgEnjikkO9fUSEiAGSkqAkBmpRFKux4JLQm8stDF03JvDFalhOSIZSCZhaXs6kz3WF01ANABw6ImBHRM5tZQiPnUtSUUgHOQhqEwIVnIZVoaCRYFA5MHKAHqllJMOcHYO59Dcxxhi4bQmPCamyAJibaeIIoRAQFEWJSQ1NA8lyEGGOQZAkApGlSSlVVSbLcqOT7QkUMAJCSDEEMgsN8fiAz06JgqWl6bOLuv/hquvykl3zwPT7AwNLwwOHihs9tvPLIz/7ILVVVy7wDZa4AQPvzY73B+LrVwwUcDA+8rbMp9YaF6jG0Pk15Pbzit/7voSPWd/Wal15z++//3tsufct9X/1SePY/ez/476lXvPKp+7Z1HaypHp2a2TB20uTOv/zUaW5+8MOvzy88N7W0+ORvvLv93a/veOEH6Ozm8ps+L3Go3e5kOV2Hfr3Yi+c+b9fmC9rH9qXY0gJZdWEppLJB1cX5gMBnnHfpGedfdOMPvnvPHbd2CgeVNamJP270fUYbSFFhCiAaBIRxg9XmvlHkYHAAaI+N3XjLzWddeMHzrrho+9PbnHe2dowBYMdSXA0wJe51zztz7/6lZ5fg8J7DunPxzN95Fx+8qt6/B+YO+hUr3OTkxMo11aYzcWZyugjaW1zSYjCMg3seXPrPf937yG2dIawcmzkMceaULfvOPv2fD+w68OTT+z3Ujx6ywf4Pn3Dqur3NUcBmauXhlae84MF/r8983ZFb7mlNcJmcH6uGas65DBuQFKuic+ToURvJcSB7gULC3AiVZckE2RuTUjJR54p81XK7qOsBkw2GSxW5ZBZTXZQ0+8TTN//9v218/pXtfh2cN4iAyMso8syJyu2dmQWzFnOOi5MQY4yc9zQIAmxmilA3w4nuWK/XyxbSHDBJREXpU5Sm7uWXbcs2p3xL0/JXLoohBAockgCAhEiIriqbplnG9i7HXhgIjlBfeSKaUd75HLC8meBWt5Pfk6Io+qEkxHa73Spb+XmXyWjMrCr5aZLtSeQyP2mUCok0SrzLIZSqNkKtmYmkZJr7VBBj5EhGvmBHloTzQJ5knNuNRUYwazrHJn5mx8n/esa2RyaP3LBi3/P3rO2VwsCNVU0pzqMOmQnz2GBU6gkAgNFpJp0BADIsUxySJXKUq6sjZnSEDoiSJhpxgkf9BBpoEhnFRyr/ZNphMcbEZGJJpXCeiHSUzDHqmHP7ki+JkWqMGMxolOA/arRlGVOcUqYagxioYDDpGNYJyvleM+3CM8+0n9vbWjlGdYpRz/3Wk3c//9KdM2nXK9efce2zJ56H609ufvyNGU1ATJ2x8dnZWURoUlqcOGHF7vtcpPbKqcGxw+nwbFmWB1QPbnpJyZPzuAO4dbxKFUn6AZLjbqyHgs1gGIZDF48lCL0hFKle166U3FIrHRq2ulVTzhTztmL1tJf+0oUPf/bZlzx/aaLbsY5gbQxlw9c/+48AIa4tgAdF7ceOdgerm16rV/W69/J3Xms9LVsDU9dvPEDd1GLQRB2kiHVCGxlmzAbZdQbEuUkahV0ULhcgMRKRsvTOudBEypEpioBsIwCzJhgVEhSrmBOhiZGCkLJIdLKqSRt3z5OHhGnhyqu6q0450OggIvRZbIEAgySxSo0LVzrDmGJpHC1ZBE+uH2vnHKnVAC4FGnGxiYjQDMmQjJF12aw8Cs+Jo7spO4xHrh5VZmT2IQRHHEJAg8I5Gk2MMRKagRgYoTOy7AlUSJh3H2xmvKzqyjaL0Bu2W11VtZjQcbvVUjNkl6KqaD7WqBqqqSYrSSWBGKGhmKgCAnp27HIuR84JAh0dg5Ik0BwP6oiRABBQRDPGSRFMrYmh2+7MzMzMHTsmyIjIxDm8KLvmk6qYjnfG+/1+bo41ma+4qYerp9c+cPetD+54qLXz4b2dLpXl4tHZaPH+X0w3P+du3j/RipqwGZY03R4DAC/10VZ7Wl0vHX51a/pUwd02x0yTH/uLdeef9d13vOGtH/jwvR/5nS3rZ+qFWNYymE+LzeKpk5sWXvhCmO5uvuLVzXNLcwuPTx3e++x73nzK3gMPfe2Lz7Nq7sYfrQIATCeX47dd9KYNO28P7al00jndJx8d4AIVVQdx+xXn2SCGkkoFxG5oFgflsFB2zhFYSOnI/OGiqK55zRsvueQF133z2u1PP9ltt1u+CL/fLH18noxgFsyMmGxa9UTVn0vwiSI//AO3pA533XTnJb922ep165zX8Jb194vBP1568x/B7zy858GH7uHuhrGNK/2lJ53wjne+ceXKlUfPfN46F7ngCFwt9NLBx3r3fWewf+/BZ56Th5/o7d/e1QanZ+joscs76wZvfe+fHtu/Z673T6/63U9f908LBm8++5L5zpov7vjzzqqTzlnVrffv4IIfOvNqjs0Fl6yf1XGEQae9AtIiRcfYHQ6HrvCWBAmYGSOqJjCrWh0AFNWiKkREk7RamQkFRui8z9mwmbcLTO12u6kHmjTl5a7acLBUxjDdGTuMCSqqwMWYYklWZ08nmrEu7zYAgBmJRoZ/WVb/jvpUkFz1U4wLaVFERVRAzY3aLxVTjdnqJyJq0ZZXvHmjfPxMVDiPiCgURc0sj6AlRFWFfEDOZO/RA9YQETKwVnX0h4sKmBFWVVVV1eLiYg7tanXax+aImNutioC8d0AYVTw5TBpjtIwF9Q4RmZ2qenZGSXO1EvPLvkkeIVxQM7oYSVERMUkixyoGFnNatjEmFYdxiF5MXbToi73l4JKjJ90/u++R6WPfW7l969GpIsGRogbqtpuei5gcO1fGGM3EzMqyhctOD015/zaCsOY9d9XyWV2OQJybETJkAHUOyQxNNBdOTWpi4DJJGY8vgEejMxEVAYScdukUDBUc5/KffSn5TICOEVEJ2YAVVRUBcyZRWp6QA2FOyU8qiIiAQxLngSsv42N850PSP9Ia25BWtIOkjV95ZMtHzn9kc+ehd2w952tPvvj5gwPPFbufrpjZAHq9nvccY0SC/tTaTfc/lxzjYmOuiLY4CaSunJs5s10fs6LVhonjBbhpxW5iHUJd+nWycPEqt/qEyelqasU0z6zyq2dWTPqyn442Q79jd/jKI8/c8vjBmTPXhHUnvbx8Ir3r1e1F343HzEQ0cqtbD3WIe1JvWCcDhGqxKGqKfRq26tg/EDrmdYyYVXii1XJiHBIgaKhTVkqimhgiKqiIJFN2PovpGZAYSZaVw5LMbFA3KfSKogIzkeSIETVnL4uYAaYYq6piojoGMDZjL2JUi+B8wZcdnK92PdyH2sE4H2zw0C0/Mzb+HX/JI2Oo5AdRWlpUfREIkSGA6/hOAw06hqQakwGGOlbIRVWKRkBgdIX3RCQSATKjmmIUGPmxAcGWtdD57h4ltHNmQJmQAaEjHD1XVNXAiCgDdAlUEZVQ1bIYizFnbSKYiQgYJVJD6IArO+PoXb/fL8tCJUkjjhGK0lx+SZbAECGhGYMpiqFDYMSCuCFJqmgWNUulyAExsjIkRAVpGZkDEWmagI48OWZCM8nBfzEBgSMeDAZ5l6kEpS8Q828BZgIAU42CsTIziakREe9Li0zqmfQr131zvDPFBc5pHO833enpF56weMHapZd9uRj3QAVxYz5IpBIA6jp1yHbPHjvP+D3d9tHFI9PmwML+W27Z8Oo3Trz915YOGH3v39f+w9cPltFWVcNpW3P6xe65h3rHjs387M/G3tHFQ7tXv/xN27Z959Rv/WCFXzUxOT4UHver6v68WWsOw+4NF17+3Y91FkKzd0eEXsKJzuxg9oxNvXMu7xyZawp0laubWKArKE2V47U0wxTIF20uwOzAof2d8c67fvGDzzz+xLf/+2u92WP0DHTeNxFfHupzh1wwP+nwOg7vruXjoQEoP9FmZtRhWaBpuvmm727ZssWd3JndqIYAG/wxAPj2yx7+xMPn/M/2+T3bj1x51slr16zav3tYFUZ1YWvDcx/7ff+dfy1BqhSrBIyuW5Xa7dZ+9QkbTtIrN986c8rHb/767PYHz73oqtmHf/ihwVy1e9cZKzb8v7k9g2b4she+ZsuDdw16u8fXrXnm5Jec+PQtkxdt2T/c1ZBMIg65pFa7m2K7rOq6Jl8gARpooYyUJJRVO4kGSb5wqJZCJOIEFmPMzuDs0QkpChgmVNGqqqQJZA5MGbl0Zcs7XtFtCapIcISMrk5ZXe3Y5SNt9jk4z8SIUZCodD7ByIqjquQ4hQRmddMg4uzCfFVVAOSQUtIMPosxHnfspJSSpHzOjyppBIoBInKesQTvvYrEODLCemHW/PRBAEuSJ0ACajTy5mZSioww9AbGDpEkNMGgVZRVq0REjKksSzMFhFa7lWs/A3p2BqpgKSVXeMoyK6BMO85ncYSs7CUCZHZYMIQ8OedclgAgoXDXk4EjZ5l76JAcK2PLnE8wQA8kJEZMEPGaPac8Nrn01PiBH5zw7LufOqkpuQJVLiSpL7J9hYuilSUsyMTMiKbJMrMhP6MBgExzPIiCgeHxAgyILYej8TXmZl0FFJFIjJiJOR9ZbBkMR0kiqHpnADmUSUgRUZPocgylUdbPjYy/RHn3BRkkYKIZPqyqgIYEgkpgxEiA4J0OC2tzyW7+iSeu+vCHn7rrIb7/hsnWujU1nv7fT9/3WxcdvHDD2te3JmYWr//OagVgRDFtlnpVu0whqq+aznR7dq8OewM93JtaLTGd0AwXLz7LYjLPYzYeLR4vwEgE5rijcqj+vQ+c8rKr1x84tP/I/oOb1py87/B8UdW3PnT7ZDG1YuO6885qv/iyi+7ZfvhPHl016C3NtI6uGi71sCg7kwNmH4Z+rDu3+6lyX22IpAYIkftD7tcOMEG5KON7vbYLaLmqLDiU0CXqtSAgVO2Si6zYzzcCZ3d9jqlhh2pFUeDyaVJEwmAoKeW0Yx3FPtuwGRihxgQ5po0IIZvWzSnUTiuqsOCE0QceMz1U2dylL53e8URoVRNxX7H78NzmU86578vnFrxv66n7127Yu3bjvC/TYK6lfSdDk6hUmJiJJkllVYZURzUYRiqZ0NkyB3N0TY5yeqwoChExQkXIOZRkYBYFFJhd4QBH2QICZiqSj2dGZppHyqww0p4piKlaUjADwwToMfvN2UY+N0MYSPJirNGJOMZkwu2ilgRNyCd+SSnG6AgMkdRBSqqaEHJQtjEBGCVVFs+OGFRTrp2qEsWApECPhGQC5ABIxIQU0GWShEqSGBFRoyJwdkuiCeRbCREBwazyVa/XU4t5iiWGILJ61aobb/n+4b17T5heYTEmHSQSCOG3nze8c5+/6WkpJuoGXQTulK2SKwBIsIStzkumVrx/x0EbLAwBk8YSEW+/kY8cuObPf+/Wd//yqojHnrpneLQ564HHj7z80qW9j9cBTkSMq9fghVsmpkrbMNPtr0vC1O7zHDSOUnO01Yp1CrvOujoV7RMf/1FTDONRR521FQTV2XDly+p+RdhQGMQyrZ4sF48NK5gUjCklMAqN5ilrUbjhsNdbqtdt2vzLv/5bd99+61133j43dzT+F1bfmoQURcTA3F+V4Fz8ozoglh9vOaRo4orWtseevOiCSxwCeAIEKDkBQIX9z5z51el1v79tZ/t973n30aXIhYkPAeqyvQpPPG1VPZw84cTZ1BBwtLiETTO3tHrL6T+47NXXPnz9tu//97Sm1RtPO23rFfD0Y8WPvjTWWXnj2LrPX/tnqzad/PqZNfW1n+xOrrn7RT+7Y9Nl1zz6b2NXfnD23h+2u2NkrbHSfIQasSjLnHkNYDFGR2xmagSI7MqCKiQA0dIXRARRky+BIEqmlSAiKhh6E1FtpNUqdahqAmgizcGnd6zdckr7BF5icGIFe98qIWThVf5y7AuPkIMuXOFTFNL8vC4AIGpMQThr/xBDjMyc0/KKohh59pef7/kBnoVwCGAEJqoKpiPBgvOc62LWGOegDwYEtIyCEBFyzCoxxtQEjYJMAAACTQy5ACOShuS9r6oq3y2QtGq18pAcEMl7Acvmd01iosiAYohYlqWKHX/BkmKGDaDjUYkCZGYgMu+yhDV3MIjoRzs8Mz8awxIYqFVUDgi7KbhI6o2sLmI7jIUzwxkv3L/nupP33LHi0ZfPbVqzOHOUl8hPdMoemPPej9ItmPPSGwwFFZPmAsySBIwByVSEDZUgAQoTMDoEZ0AGQc0IctoAiJoiOOcZlIhGC2zmfI4JkkoBYFBG1YQABAyGSuiRRGSUu5mXGs6ZGakQck4PYSQyk5jyElhFTBQxpzJlLnCCZMlpWfDEkOpde8d/4d2v/suPff2N7zn83S+vg3DG/2xb86ELoq9fdtbc9gfKQwuutbxNbLfb/f6SZ+pPrweA7uw+K8Fd8u7zf/F3Wsfmdt743+Pn9pe+fs9w+oWD+sgGP3O8AB+xYTdV7S4OYFj68o5b7r7/5lusgq8e/WLZaQ8O904544zHn7npvidveeEL3txPsv4FH+aq8+xdd95zeuv9569biAvt4YSDofOuVU0e6Q023IzP9CBzXUMbUhsAcfwgE/KZ3x+r1q6kojfO7WpiRaVxvtXFRWw59JVDZFQjRsmXOgAySUxFUTBgu9USSaNdD8BYqyVqTROhcDElA0EGKnl6bGrQ6w+HQ0SKquw9GlC+PpnZkpn3TatXhK7JgfGxf3rpVRuvfPVCK9TzS65glmSXhKnHnjhp250n3nMfjHXnL77g6c1bDpZra8NFaTpNH5LWKQBTGNbRIN/tLcdZeSTJEAyR8/ykCUMzc0URh0MAEFNfeAXjUaicX1aViXOOmdvtdkopH+byRAuhiKk28mIgYkkTqCFAgcQEkuUIzqkq6nI+NiKqhRAMpCh8IlOA8U4H6uGw3/POAzFospiAEJlAlaIktJSDXXN3Qs4Fdcwj+DcTIJoIAXcc9bUxEEOOhgVxLdmL4VIQ7/3S0hIxlGU5HDQCwcyACdUEDRELdjkHVGISaJwjsWIZsmSmoT+Yv/+mH3RMjg7nk8YipIbTZSvrS9fJq6+FEzedPnHSSdt+dOtUWfU0Tra6AED18Koa3rN0dKFrtS8m5xZLLD1PdpcOLv7W+/dWM1P3Xr9qbLL/t3+Nqm0umsNHVruVra4eCMM1b/6l+dk9zZ3feu6/XrDhoTvd1Fpcmo++5VZQSyaWDi5WZbP/5Bd0Fg6sO/RElRzPdFrTU4e3P95+/webK15a79k+xqsKI8CyEWvKllE/BqemBUKr7eqQEpDHNjcBQBYXBsp2wVVXb7nw/MVDhx9/+IGnH31sAIswogYo/blj8eljDZj5P63QsHQ+Nem/v/Md9xOXRR66oo3ZkVWL9/3rX//bc4cGYW5JC99ODp2mQeD1mxeBoOmlUJfJsdeCOhoX5IStx/qDn1ux8c6rT/raLV+amO2dsmKVu/mGgMDXvPszjz0MoXnda9+2+rYfl+tO/vd1W3c1XXXFFdc877newmB2oVoxYwETIFVYqkdE16qyytdTMSJMovfe52WtIRjCyAYexIECQKGZL2veOREBS8BFKnRpbhFKZENWJCzPe9db2pVflF5FRgK8nBrnmASMGQkBJKljAq6AmaDqVMNmyI5SCnl03MRgaGAYVMB7B6pJSk9gNaCvqirLx6IKEjIyJVAEFkTNh0tMKZAhE6CB956zxBo0FzwAGEVRZnCbSAhNIhLnQqxTkJSSKTogXd6JemQEjSlAno+bRRFyRAZoIEmU1cBQIlOukqySFW1SFEWKAmYxBEbKeXsgWvgiHwicc2iI3uXnCDCrKrFDRNY0MghhPmMs4+tNyblWYQAgVqCHDnhXxHcdufrBqa8cnOAvrN/2f/dfqdhJxuDHOrFIbOjYAxmCEhKiMxJVKE1VNYnLcjNTM+MCGRzA6PLA0U8HAZcDvVU1mTrHAGCWAFgsn4OIAcHQ1EhAPGlIqKBREZELJ3WDkgyRGfPm25Jk0dDocxFloozEQEQsXdIciYuIyOBVzBODWUmuBiAMqeykup5M8Y5f+tmVN9zz8k//y+FHfvO573+l+toPTvqfB9+25ogj+/qjK9YP+gMphcAzDVOfwOoQe1MbAaALYew3/+Xvbvvxvt/6mV94+4e7F5938MnvbubeB1/QBLfm379/8HgBvmTN6v5c/0AgH6EstDkc5mLaWK5cecHU6rHzJ8f8c3t3rX/R6jOe96IjR3YeOtD7xs72Cc3++1PU/mC+GZ9K7aYzwOhD0+r4NOgf0b5c/Jfj37l2qVi07mFUxlYfXYA1z/iLvmj1Wy9BdwtY8F4iSNUYI5VUVlyZmZIRWsF4XP3gyiqLgUMKRDA67YE6IkwjVgg71gRIBOQSijijlhMxMiKAnNZcgLWjGiF7EwyVSK1UROPQ7LcgNRWVb0QGBDhWHLnywieff/bM0cWxJ55af8+D537ve5dNrZg76YzdZ2x9cmpqvqV+wBZqiNJ21SClKfQ1DRsLRFRhiQkiAjqLGByRmfUX++i4ZBfRhhJLQ2Dy7DTF8cmJQ0eOdFotZ1SYq5tgZmiKQKhGXMRayFUpNY5IRdHAiImoZG+qEQMQeWZg1GTArGAiSdAyfizf70VRLi31FaDTrkLMM30WsIGIQ4MYoxgyAbHm0Z0ogpnnximKOSVkVjBFIjBV9cZgwJYcIqdIvrAMBiU1gLIoQpOEOAEaCDEZI4siUVF4SFpLY2jsyYhDLaUnDU1U6VrLu+q58di+7PQV+2fGwGvSwey8DsPHXnVwe2B9+RvXrpl65vPXAeCcD13VXlGuAvgHnpzEdBiWPLVxrNLktNDF+aOtzhrZ9uBEZLV2H2rXnpzQcqjzgEAzfnbv/DTJ7nddZEqrETaLaTktItDtJtNi3h0tetOd0LjJnWe9YsuTPy7CoFYXjy3tqZdO/JOPt1/9vju+9pXpmUkJyVwZUt0MC8RA5rPlMKImNfLOVKM0VKhjlkY9F3NHe+yLlRtOesfFF3//uuu++9//Oj62Ymk4HJ+aWFiYo7+ujCj9cQNI7b/o1imW7fLYkcMODBSgFkzMAOBQHNEvPS8+vHe/WqdsVQEghmimhVh37Sroroc0rFLbzBBsoVmamKnm7/7e1Tq36aJXPFat4xr67SNTgWTp8Q1rVj6wunv/d36wcs2GS2dD3Hbv5IaTdjX98RMuaC/umXntOU/84MHueIcGtXZ91XhAz/44nWN51gqABjbito7UUgqZLwRElvF2ICzkU0opCRJJghDCIISiKvuxATBirjrtg7sPnHnhBYcEG4urWlM9rHtLQ0C0qOYtOSqYC8wORkjOMmJaDDRFAjK1GOpsN2qa2lQFEhnkiFoiane6eTuLy4YcUAMm0mwvMDNzwOwLXgajIlOOdPe+Ot43//SbkE0aKTkRcQ0HCiEEETFFZMppyjmzTslERGIyACV0wDnzOYQQIKpJghFHoZEmv7sGUMfANgrSG6SQOz+PjCkyc+E8GwIzIuYMu7zuAs3m4GJ0gDueUqujw/4y03Dke85ovwkcf/PsWX8zfu+TY/sf6Bw6z87q26GuTEglyICIHggAzBEhO0DN6XhmmiSrvvOhkm3k5spvoCIkMDNric+vQY9/LTcfo2U255JsAOCYkkpeGRyXWTnnlrmukFVoeTxAAJD9xKJ5uFSWpYhIEpdbMufKsoxJsFUkAEUzs0JAqXTDY/0f3b666VH39N51//qD73x17Xt/dstr31K94a0bFx5/1cPv/PPLrr7ziLzh7++rO+PWxKYJJWJEK8Y7Cxs2girM7zr4Z+9/SXfj92d37XzkUSZPa0Jz5LbQe+zMU874+FtXHi/AJ25sv+4PdoUVJ6n3c/PpzLPPuuCqi6ar1vYnF4/VOzavOad90tldSIiG/hV/f28am7MrJg7/eKBvuvqceuFQt7QYBi1PTSLyRe/QgQ7C6h+yMUztZKcESVpDPvP7xZX/MePN6PxTobm5ZN9xZUgAlccMlXUuv5+qOlIgji4ZIsdu+b/mzQUphEz2ZFbL7SbmK2k4HGoSgrxwzyWbmbksqoRmOCKjOKLCUdJoSYFdBUxEzpDUAgIFtYb73VVzV647+sKXjh06PLl928ptD55/zw+3TkzOb9q6e+s5uycm5ro0kFQuDbunTsZdPEApKm7I0BpwxTCFFVgtNY0WjIwAEAELMW8YSpVkwxQIcO/BA2WrSmDIGGMDyFm+QTQy+scYQYXIokgUGy2LzeoYLAlXjogUgICAFLOO2IwRFUDMfFZkejIyD0jiTGpNOJr+MalCk/P7ANOyQhPUwIwKR+IMUcgIjFXZVFAaUDJvZgIKo4NOQEQFKMrKs2vqxaQSl2rLAdLIIkkAyQwElJHJoWAGobrC11JjbNpFSWZFCd/70rWdLRs2Ta6Z8i3XrqBwW2n/8+Rf/9f+Kx676/bntu8oGHynlOQFyirDLmd3DFptwMaWhtPzjp0OahzDtnXY25gYallh21mIcTAPVKtUxVwoisJcsaouxXFoeWCqQmgAKwYfWtI7NtkaNokW3NSRtVu3/ujPD288zW1ax696/7lveRXX5c3XfcmVhYjAyNDFCE6BU1RVQAJWQBAENoMEYIhNqgt2HecXQxKEJsnT23dedcVVEgbf+dZXJifHpI5MFTLETwBDkf64rh2Vn+jEGBnR5d5X1UxHfCt2BOzn5/rsIbaKoih8WZgZBi1Wrp5r0cyqdv9obUsDiL5DYzCoq3HqHer3qslds8+iDNesPmvat3RucOSFb/iHm3+YYvORN/zypm9/ad4PxU+tp9Xb11/46rX1E3c8vG92e3dl0XYt1yxyNRa8FTSKP+Qsx+dRGUYbBTvCTwUpKAK5UQFWVeHIkZkFY1SyynEAgyjknSX1VTm/tNh/7N71M1Vr/Tkk7WGZ2gFLLMVURMwZAiNmsh8AkRkwc5MiM4OM4h0ASBVALH9AGqP3rihGrWS0BAIjD4waMCqYy0kQBplTIgBkTAiMBN4zOcfeEReuAETvfDJ1PouBwTK0L+tsNVbVCPMQQhAdlZPMtLdlKLYRGqEhCIJk0pCaJTFVQcyFIUga5ch7BkMqXErJAJJKTJGNEYFSSpyKjCPMGzgCB5jfIgAgwECjwpaph2aGyyFZo18gyKicARk0VXp774obDu98bPXcv0889K/9Uw3H0aoCU7ZbOkNENEdITJIzeFVVJQfpKx3XT432wTDqfQuzZJoPPWTLnfjyF45MRaM8DVuO7cWUlrP+/eioxy6/i5koh4iy7OUgIiUj772MlHSF86paYoEIGZNsZnk5gmgCVtf1KuqecM7lO+/deaA5staOTbTWTC0NDn7y7/Z98i9DNfG+39wwNz7xsTM2nVVN7/zsw5tjHFiIBXKgBvHWpcMHqPRLh5dmd5dJt0w36wu38vDezw6r00494XWvfevv/9E/Tb3+U5ev6B8vwIObZg/5qS40BaRuAe3Cf+u7dx/Ytffi7/7mtS/rnz72onfNHLk3XDXdXXH7ruqR6Q//8gWxfaAzXNh/89OH33BWMzvbdFvdZrAEUAnw8Oghp3DsbIkte/GHcdVTBY3Z6gMT7GOr33cXXCwnr7NH1XmHTCSo+Yb1Hj3lZYqNJL6wvExFYiYenZ+QiA2IWZeteiIiqESkSKpaKNryLub4B1ewE4IcTYqIYOqQcl6VMoKnAlkQAK1EVgRWs4Idx1azGKMdGav2vuAF7nkXrDqysPbx7esfe/CcB2++sL1ifsOZD5x9ypF1K585sCDTbrIxiMG5UpFCFC9uAZIHFAGHiL5A4iwBoKQgI9BWt2o1IULJdV175wxEc0JNTujPpEugbNvJl6YmyfrHwvs8uUUUG8k/R8wPReWkYJCQCK0UBAXzBIDsnVMwwqSC2e4LSORSEiJ0mMnHWBQeEUdDIkKzrKAgUUuizqGJqSIxonPErKpqOhg2YLUvi0KVGc0wpVSgzwGTlP9GjlA5CyWyJRqMXdHCgopO9ci9d+z43o/bN1W7iAdhkFTY8Dtvs8cn4K8/d4fFwo2NuWEyDmWV+sOFU0/Y5NAef/VLZ7753ydW44uFX3DaHg6dJYJqOH8ExZWdLrdYmwR1BHKhmC6qsr/UECuB+IqqotVvEgxmNapQqBUb8nXheP1Z3XMuefbcN4LZFX/0B5NnbMJubb3iwD9/6sD133Pv+wD0DyiyqpJqWZaqNBioKimIQ2QEshFeNgeVOCTHrtf02bFDNLGi6sz2Bhde/pK5udn77ry5KktPPIx1VbbSnzZjneml35k1Nf5jXxXlqAC3PeQHvgIi0OGZF0zPrK7r0K+HzXDoPTviirVYsTKsPNGevruADpcqaHUhdQ/KOEgEQjrbLIiVZ245fcUNn5k48+xPTpbbbr5vZuOWk8bG09wz43H86d23bHnBhx4fW9Ed3/fE7fecfdoJh5Ol1phiRNC2FkLKlGXwucYCMQERA2J24hDCyNZmDtFQ0QiyyNBZLgyiWnI17PV9VYbQz31PTMF5PuW8S8547VtuePCpsRJDrxm6gBoJSJkRuTDEpNGhIXnIxkDTmHLrI2YipghJLCd5jHfaSWKoG1V1zjO7qIoshJAkpSSUU7QYPDEgjGqeEtLyuRfNOOecjzrItJywcfw7RITo89pSUhi1m6P7CpCJzBhZQZMlIjIGIpczrbJSM/dvx/vUrEG1JAJZFDN6LQrmFCQpAaHLDwsQgkhWGOTQiZErIzt8lv+VUWTLBTjrWkf+Xcss1eVNB2hdTIz92uCaDy5+9fEV+/6nefzt/pJjoUFiR8jLBVgZAQlNjQEMnRItE3LymxPzdhtg5Js0AASPrtaEyzC70Qkmj8TFEDFLnUffoVE8DQBkKCwAaJJsy0YmWxbgJBn9UGZWMgbkikUkpZSnMscWKyKq2q12qy0ijkdONo84tWbN8OCx3vbnrvnnP9j+9l+4/f+8v3XXzYHjWFGN8czaU/tr/UPf3fy79XD3gfM3P3fJ9Ck3HUTPPhhDOdviiUF5eHqjzO3ZpjoGuGvf7ilLT+29f+9L/+7wNz/+87/y9recv+ML9/zP7Re96ngBDvMHqja5fnIJqyl32/e+cdMT+9pj686YOvk9L/ztgwee8MPPD3bclk5+x52T72wdvHf+iQOTq05vdeIX7hsszffecn776Ox8qyRJYTCoB0fmJqh64uKGImy8r2gHx8e4hJCqFlHdvvTSvgAYRGe1iiPHBmC27NCWtHwBAxIgmKnLEwgAQSEDILTcFzOPpicAGXFPAJLdsSPOAaEbucK855xJy469Y0sSY2zqYAZYMLMDwwSKBA7JATQowLqUUgsK7lZd5fH5MCA6sGLs4DWXPvKCC2b2zZ+0/+CKx+995RdvoOnVz56xdfaUcw5sWDNnxYIm9qkaDkxBfGvo/HSMS63gJIAUoaw6sahNPZKqKoImm56YbppGLKmqjcgmyEgiYslExFQQOFvdEZCZkcgAgqTMmtafBKETAZgoieTDbiuqkcWSFK0UM9OMbzIlDWogZopIMSXnHJipJuccACiId17MCgQEEMBEoIaGRsKCokkQkcgfNxwiYOELSaEe9LxnA/CuEPEEzAi5CNlyiwQATNgqy+GwsSRRNAVpTbbvvfP21WMTQ9eeGhvH3tJA6kvWwstPPfwbd2wYm7bQG0BoxLcQCxRphbpbjRnBlf/7y/te+YMn/uQDG57bt8J1BmMVROqHUCnWSMk8DiHUDUIqyRVDhXq+TCgy7ANFEujPgmuVJ50+OOEEPKGaOuOFnUteOXbiZllD1Ek//rFbN29Szx649s6l+36k37+xmN+1/v2/9vAQCBQERCIqAYvEgAImoqRmZKPhBBLm2C9D9Cmlqt1qhrVHBwgJbKjKST74y//731xx0w3XTUxMFMp1f+ichT8WP2zFPxwmjXikchmdfDx7xYDr1rp94y9FhaJw3BpnkVqiJhg2A00tbK8GgTjeng1LYyqYtAOduDC/aWrTD321FPslFOvXnuv3HG3OvGT3oV1rN6x/4wtfVdz2owO9fnXhJSve/s4jF7/fP1Hf953Pv+OSM56s54rxlX6wWHU7DJLcEK0ws9GLwuUpNObmDmQZA6IIuQDkkpIvFyJCBlVlJzHEoigkNZ12S5uhQELEdtW676ufb/Y8O/P6d9eDhkokgGR4/CoadVcCiAZopJBCMjNJKd/5QZKCKRAQEtDSoG+g7ary3kvSJEIFqyoYxJAULK9yRQSS4LKA2S1Dg80squTf4NmJJHLLEolgudU8Xi0gA9xyO0bg3KiaEhEQMpKCeWURG2EBycRk1KuiAZrlHC9EYyABA8wDVVULolEFADiLN3PMo41SLEA0rwGyzjzX4CzvKgCPHxSOX0iIKKgIo5mby2vanMme/FFvLxjb9LL5s68bv//z3bte1Jy1otXpSxIEx+wMzbLwGI1G4jjOgpvl1AIBKzQnOkAehORnOiJ2koPlncVPF2Dzo1/TTwehqPKIw5hG9uKU8oPyODGG8/bX0DTTaTBKQsSyLLMZGgBcWRBRq2qVrcrMMunBSueqsqH+kS9/+tC3f7D/G287/dd/9+03fvfZ+7bv+PpX9bZrp+udV1wdDu+H4UF/2gVn7V849NQvXXHpzd+ksfEwaJ6qFxf70iVPU+un5vevUGIKWrTn02B+1aXHNl1R3Dy75+DRVRu3bNn1H/sPbjxegHFmozNJLXLz1E7uTe96zztQa8G6eM/iwXTOhlcd7P/i868Z/69tKHv4U79wsc4fLaTeNJOeSuGhhfZb0SSlvmGboNdbxKCNuv2X12seYKrRHLfKDoemLeVw3YrqvHPSwhwAhNAEaYCdxKhmsWmihdGUIvuACXMgHwQ1BDHRUfAEgqiq2jJh2syy/dzMTJX9iEU/kkaP5hlQeCLn80egqKZQx+QKj4iVgBEQgCAoIUcTUw5SFWXLOJo0MPQleOROMphPwXcOr+wc2bJ5zZtePbj1nlO3P7PuwfvX3Xnj+bzi6AXn7Tzz9AMrZ4YrV8wvLLEBxMEsUpVcLNmBdaSJFRbiQ2NYFBICeZrvL0xOTvt2tbi4WBAhj1i5ZiYgo2svjZz0ZqI6oi0hoGre9qBIylc+AIhIBPEREHHoEwN3DM0gkTkxRGMEZARHSiACiNgkCyEUntGxxOQ8eV+ElApuIxCCEQpCMotmgiQqo5EbqmlUIyEiYpdiQE0rV84sLS2iYxs9fjAZstho1aWj5E1PrkwwJOOqCE3qjrWPPPvcnh3PtsarJtWzszWqlaT/6/ylnQvun2+ba5pUOY4+enOmXtvdodSnnHPhLrSDe56dOOuilV+8+5n/+Ntjn/unsfnQNat8ReQrNG5qjI3D4RCl1wAjOF8sdleGtasnzrvEbbpw7NSt1fpTcP2GlSuKqrCwkBZ3zM7e8o3B7T+K2/c+9fYvnHnPfy5+7TcrcdPGM9brbT3n4Qsvj73draIlAqhmZIOlnqp6XyZNmIHjIJi3G0gOANE1aoVzOkwFMpUcRVITKlcmkfsf3PaK1715duHwtgfuqorWqaecrAw7t+9wH2dMbB9Lw/09h4SKRTJohAAg+ak7z/vntowNC1JpwNg512LCkqVpXKsMZ540+8iwo4MxqjJuuVc41aK3YnKuO3lgfl97ywkrPQ9n5+Tgk+8ZX7fqshdNllNHXrX6wk99Cs9atfvxQ/9z07H62ft/5fJLn7t/W/W8c2HQlzGqA3SxwpiwIDKwZdwbHU+uwPzcPJ7SBqNWGDnDPo0MGNRguT+z/D+HEFxG2xotLCyed82bXvjBn7vxx/dNjLsYqWoXZAbhJ42XmXnJNDmrSctM9/7pYSahSuJROhIw+5BEDIqiEENrIiIyYDLVHM2QhNCSGbsR4C//pUxRLIP0LMsmzYwRQMQILS2XNABb3lxmRbSYoo4G0cf70dxtKnhNkpLko4mS8WC0Nc/Dulx7PLGmlPM9RGTUYWSPcp5g57xGBROFkEwsaZ7fAgMoYoaPjTLocLSVBxjVTsRlsByO9N60HFAVfVFavw/F/3JX3Xzsof3js58NP/5D9/YAg+zmz9EWZgZgjKA5F0Uz7zFPwtktryFgOXDj+DManTvei//kUlmuu/aTYMsRWKIAR0TkR55sL8vpRUy0PDVd3qLlmi15IwMAfrlLblUVE/uq8FVZsIOkZhZVsSqOPvBk+OEdDvvwlc/c/bXPPvKyt2/++de8/sKdY2dU8fDEmByci7z56U+/ZN+f/cvqg0uXrt15+sTpe+P+Gbf/UJhMbt7CYHp959HvFeNlq5xujiw40KfXnL9u7cHnv/vlN91w48tf9qrzT556+sd/ffwv2776N1J3mtSSiz/Ytu979z+0awcdbtxSb75vRYganKzurMTzLpk8+OSf/P2x0zdNTMPS4Z6bXDGzf/euZ3cN152wcmEQABAlYRMSwYEr9LRv+cIxJChnihQY5hbLV17TrF1pB48aAKbUBqwtiURA0OXVo5mBgqAYjSz1kDTrCoFQj0fomCFx9m2xI9CcVvZTQyA/Cqai5UAeQMuifQBAdlygqRo70kTJIovly5OARAEgAZYh1hSEyAdwCROAJ682bMVBBGmOHdl7cJ8/cf19m9c//crnt57cs7lUd913L7zzDj82eXTLabvOOfPIzIrZzsQc1hZjp588FFyUyASinTYljSIIBih69MAh8o4cE5GNkgDARCjHz/kiSF15772PMQ5DY6iOuCh8XSczcarMCAhqSQEUDBM0CGDmghpj45KZuWBxeayFBrkME3kiauLQLZ9ZkcEQkyoROTPBqIRoSmIsaEbmyKuZjmDoZjnm3MikcARQDPo955xAznylEJIBkZixLZ+ERyfgkCITQLKQ4opu565HHhYRdYVPAA5DjFun9fVb4q/+aFINy7F2vxmYVBU4K2Rcao1Ujq/lAJ3K6AANKn/SB/6YXvOLB2786pHHbiueeIgOHQ1NFCRuj/uZTbhmA208tdq4gc++cuXZV3TWuIoHNbT786D7nzv2wy+m7U/CY3uHD9+++tQ1S6vW9RZ7B07b2rQmz7j7q2vGVixIbCc61ODBt/5CfwBjMkMcEvuoETWrUTXFUSYfjtaHCmAC5pEY0AGIJF8VZlIPhr4siqIIoSnQuxIXBoOffd+vfq1o33Xnj3v9hdnFpl1VZVHIsWpx35ytM9drnzxfrJub35EmNgPAtvP/o01lqw0WTC3GYVRicsyMCWMpYCedTpHHpnkhSukLNKtVW0pwYP+5BfzWCeduPGlt+9Bj869+xaoLXnj2qWs6qzamYuqkFfH2W554+DNf+Oev//vWP37kjS84894//aXOz77NmgYr6XQ6/V4aJJ3sViqjfjcLN2yZ6qWOIBtNEClPNQEMISOGEMABGwIy5OUcMKXQzMzMHA2HmlBLjJXZxMTE4v7n7vnHf1nx0pf0Fxcm2qUBFOjZlIk9sRJmk4OZJtOkwuZDCFx4QIx1cM4BsYiACiOqoRmycwgoQZhRTAFAAMQ0ZbyBAbFz5MFMMj9EzVSTWDLwZDEkIVUxdsRplIKZM6WPrx5zBJjl0/LoPWFC/GlhWm4dhRJBMjMg0NwJGCAQOZ9fORkgIhWouqyJcQ4Rk+lokI8FLEdVi0htofQ+mhagZsrMDrwDAMeEqNkxBKNOW0ZHJGOkEVYRkfJMGE0BnNYCrVmsN1Vr3t6/9LPdm69rPfPi/raL/RYRFVQEVAQBoJx5tNywLjdMo4oblwksPBoUABiYYixyeOfyAWCZ4pBNimCWJdD5L0iqZIAEaDwq2A4AgFSPN9zHo8FMAQEajd57RMwFJteDVqoQkYjLsvSQVaVqYKp46kuuWf/j2x/+1KfnH7l95tDOwQ3/uWn6P0OXBjNjU9VSEFKzdZtnX/O1j37uYx96qlj0b99S/b/b5xNNtMdk46rhtl1xcv3Y4jGisV6n0z8yO+PwMLmXrTv6Tjv/2/d/9h927f/83/3FQL94vAB/78ZPlhd8CFudzuqpT3zzGAhxVUgbO64b0ziNl1AfGtt8Ovb6D993AP3CN+85VpQmq1dX/Tkpq2989/pP/P77Fncd6XQmm8Uj9eEDfGJa3KRrb3MRUknYm52jokjTU6e8/rV9IFe2mKhqlaVvJZQSKiJstzqFrwD+/3dxvpgdIlqe+efzbL4gFUacBkIAgJHajtATJxvlgOb3PC8dyJWGiMSWSRiQ6bxBCWoDiNSwRYtegAQVwSQkoGhQJjSjvkfR2AXSVttFDQlb5YRi7WOvXqqXqNi78QTprozv3fjkod2rHtu24ZFt59x7u67sxFPOfGrrlsMnbT0y0VkMi+NQU6vk6IoQCKxTVk3TOHLdsZYrq8FwOLoHltci2XOo0LSKkomYAAom38rbEInR0FJSM2MokE1HTiSIphmE3iCgCg6RDAIhkTM1yCKY5aWJqlaOk4pjBzTaVXlmVRVoYAT3JTAiI08kCGrDXH3zgT//gSbKVWFJ2HlVJeQYo6owYAJQRgCDmIXXBmBBUsPkia1pGAlSfGbn04kVsmjUrK7rj74Q9yzxf20fq7q4sLBQFh0HmBgQ3QHpv/aFL2uNz7gjwkU51LlqWMjuWWu1Ou/4rRPgN7A+sNRfIKF21SnHxnB8GrsOnXjipYNNb9fTw+8+EB+4Xg4801ba++jj4/2eh7R25fqFN/3sJx977ND+w3sGR1dseOWqNMSrti7+663V1Io69ekjv3dk9erxYWLfREPiwgHEJhWtiiDOLwSuCjJCyFAnZMsSQQWikqEv0hiAmif2iuY4OYUoBBgaPXx08Z0/96Fket89t09MTFbVOKgNXrcEhxFWmMM6Hbj6z772tW2rTj4DBgA0Ttjrh6Jb+KnJ1Y44NFDXTRNDrwlFPzTrNpNx/+jBlGCA3pw3x0srW4MLrqpmOi/5lV91W68oTixKX/Z68MyjTw4f3LHq5BWXnX7mhZedf8naKze/+j2MduKn/l/3yvOXTjqp3HdooUXtoxFLZKJ+LxTOk2MkyroqJCjIETH81Fd+EOeT12icaGCYjSCjCQkiItPhI0fQzDmnnIpWdWx2/tJNZ2084+Sds4vjbfZcRJXkyJDIEyM5Q0NQB2IqoihWa1A0BMsLEu9cEwJBDqoxYgcAKWreUxOiR44xNinmNC5E9K4oXZFiBKXlPs5MMUf+pyAmAZmIQlmWAHF0bnVOVQGUAInIp8TMYJZ1uYCEsHxjE+XGH2CUUpmlQAqqGvNcOCs7bEQyJgQAzzmY15U5zsZY0XkmASM2gGQqYMEkmSkQqqJQfjqyMJCRARsFVLblSQXkaHrLdREyWgjzP6hoAiYOnAhjsQ+PfIhffMvck0/O9P8mXP9fcAZCNAXlfKLPojNLWdwCuNzoj+bQfFx0DaZm+QeBQ8pxh7astwKgvFd0RABqiiNqw+iLmQHN4YgNQMvB3QbAiJn07kadQ1Yk+BhjHkGLCKoxkkNGzGz5SlIyIvTF1PRUOda2Ggay/lWf/vjd3/qfpb/92svWP7Zy6tGykJIXwIDIjU/LYBEuag//6DOPHdq6accbX3/rx246a1hJHf22PeXYKq26NNifFg/KIs0Azq+7lDdfuGv29E/tLadf91nZ8fd/9g//MDf3ExoSD47K/v+u117dOgxrpjvDbkFBDMGsWtcOzY571p16cjMxsff+uyZXjiUtdDpF9V7iOCBNrHvgth1PPfbAKWddubSQ5mefluHs4YsaAFh7B0mKHmoHk6E/t/qVL5s483TZuaeqKkKkVgnOVa5sYxsBi6Koup2shyew5bgYMzNkIEDHnAcMCuSAXFGIKo0iP0HBQEXNIEmBbDFgluDldBMAVEvEMYkZmJpJijESqII1hmKpVB6SBollLQnQgCpvwCzRokGkhh23GrWqiIiBrUXGEpZQe+RdWbqQpvsyN7+rqMq5dSceO/X0PS/vrXp2V/vRR9c/9cR5t94VZjqDc87dd+YF+09Y7/s+YdMwCqKCUeHRjL0fNE0iYDMQy3iQ4+sbQCwrD2puFIJrYj85MvqiyBckAnnPCaCuawBIhIBIBiYaUYig4sJX7RhjSkFUEVkBRExVWjR6RDAROIdmJkqICb0H8IYKElHVmQN1gLUiOBqdTc1yAC0ihihVUUqMTEWQROQ8oQkkiQmMVUE05vGEQVRRoJSapIm9015/MDdXjVUgqSRQsC1r4NUnLf7Gj9tzS0tVUTpTTE0o0DGP+8neoQOnb11XF569xujHy05PekkLP0ydHbuGZRXVleVaKqp68WB/+8O02DSLu3t7HuLDC3r/7RNpOHb+C7f5sj9zemqNXXbRZfEL/zK96axbrnnpxz71ydNmZn77je/9qx99RU+4pHn21gkLAO3e4mz3537t4NUv99vuCq1uqW3yIYmiIyQHGIuyssUIZJYAwRygZ0fIYipgajaUOptX0bmgQc0gJDIEMq9lUjTS3QcOv+M9H+z3hs88/UDQbtMfyGRDLbIETvfuevBXf+WUX/jtVSecCM9AsOSTTrRa+/Y/duP19z67+9FzTnveaadvHZ8+dW17Y7lGu/1THn/160Ub6k6215+ezjlzet1WXrVy7SljCWBu33DHA49t++E2G7o7v/+lW+/9/nt/7iOdBze32sX/+t3/O1FtnH9qrL/91u5z28d+5y8P7d+GHS4ihjKntlcM0SE4ZiZis5CEySkgGHgwAkMCcKhoAOAMTFWJGVBECBGYVcUQyLFrMJphBhapEaEDGOu2H77+i9K8avKCi4Yywt14IcBcMFQVl10rAkny6heYhsNGDKt269hwCBYLdhAzgokNs5QRk2lokvMYUjTDZKNTb8GgFhMLM4DkiB+utQFyKaUoseKCRCEoGimYr8qkgnWdNRRR1TkHzHFZ5kDECAyE4DRhGsUrMhCyZV4jKuSkYkE1yRghBgMEJhJLVHiNWhRFztNoUvSuEFMAFlYCkib43BarRNMQtI0ugpEnAEBwHknFEBSYyJAFzExGSRaI+afbaLMOy2nJDpACB9+gVUnTRNX55f6LfmN47ROd/n/07vn58ryDjfOaEAeCRUFFrWqYyAgQUo60ygcsYpaRsyhLsH6yh87tFI0kfHk8goSigZGIR9Ls0cOOTHDU4GYjVjYbiKozJ2CZrwiIkrPswZwhISNQpsaNnM8eDJWpE21YmCu6U/35w4//8b9wAdDZsOl1r7/tG1+58W++fMZr3jnlb28j+paC4HCRwXPZtvaKVj/wa3og1W/s6Zf3/tHMgX/6w7hn11DcYHIFAODhp+ax1WbYc8X7jlz9ERw/cSlGmVgFF59ebTn/gU9fw/3h8QKsXZ3a8/XkZqTT7g+SSleQCle2+9tX293vefc1n6tfvDXtSA99cWHLy2ZpDAbWKWONcsThGotpqf6rv/jE57/1cocUDuxRsEOXubFdMtmvHDaxO+lXt9M+Wf3Sd87Xfd9u+5oAscOtdqtjZse8JyLf9h3HgGwI2YeaZxKIBgnNzPlKVWnZYwYA3kAoG7/QJXWSxFQcQcI2VVmvpKqS5b6oKIFUUW0YGsgCYAMMsazaSeoGtYwKTUhigN5QyZVIruMp1rGwwiX0BYHCGKprt4eLvUCEil10A40AJs7QV7WZ7w+oVy+ozG3cKFtO3dY0m/ccW7/jcf/UAyvuuOfgSSfe/ZrzqzNOWlevmB6UYQgAEKt0NPba7AxYg1UFBY2ohuSSQqca6zchNjW1OuiKMiSU1JBYUUiToktOgZUETMmaWKsAIgubgBUC5rlIgAUXqp3xznBYqwl5cuwG9VCTMCKoDhCI2CUa9gIzu9KRQwXRZtBaMSODOgVFTxhiRVWM0dCc42QaVTygFxSDxOiQ8olTQFzeA5oBileVkCIgOtYmUIsA0IIZDgNqq+iMOe31Fxb6sy3rxnakSADyvy7rHerjt/aumOjE+fn5qakp4hSWxLSqhwurNpxK1//oYPVDf9rz1EudBmOu1Q91IrGWY00tR8OlwaoTV227/p/9X/3v6Wq11UdWMjjRVZde9YNLLv3PH90kLh3atzctzF351ne97aqr5ybW/N1//Qf059/42ldWs/v6Vs1suuSkg9/ofu3GAQ7c5nPCxVfs3XbnWGt8qqxmQ2gJGUUg9gVr0GZJO52xGrTlYgJS54cSWyyQhIhFlQmSYAVtFQGCQOoT+MjROS1Tu5aG2Sfghx/5s1O23jOInzm8O7iWWwjSHsV3u97RAydeeM7OJxoA4IlOszggTTDwX7728weeueOHrX876ZQrr3nT+07YvO7JL9/+kpe/6Ox/unamY9yhhbnaFp1IGPb5r37nL5687wdrNm256+77juy455d/56MTM9NnrL/wnR/81bnB7jdefcvhm6+bu/Z3n/ntO98wsb36zY8+s+uJiXbsG7CqNXVBnv2QVEW4aZrcxuWlKRhQTqUyNAWSTEI//rwFI0TF493wqMP0yImgSQVyZE0ERLQ4O/viX/7AyqkTds83BXmLQ+cLgALNoqSEgsgCJjkaCgyYJIFFVDADWpofMJpzZTOIhaMYo4CQW55eqqlqPwobFeh97rcYRKTOx/ckZVnWdS0hIYKkVFVVhTwcDvMOMpiQQRzU3nszUJFoqmBRzEPmloA4ZDJGyCIlW9YTgYGpwvK2WmTkj8TlJXqQ5ImBqFONpRSo8KrKTKraKkpTUCRV8VnyVvggKYfgkSqaRlIHXNehqipUizFWrVJNwHAUlf+TDyQzi/CnimL+LqgZsapwK/ZLP3EA5l7UPeOquZOvX7f7m3Dzq8OZLWirV21KdTSMkZ3m4SUAUG66M0oBRiGfOloD/9RHf/zSgJFIL5/N0cgAARDV8PgKwyCLvEwhbwfyqyVEsIxTBcj7kOOtvJpQVl+LQzTHMW+JjNAAbVxhmIp45z99pv7831Q0Vmr/6L9+9Oi6l77rE3+2ZrwbHxvX2f2IIBHMjaWpaSf7BbDnZw6u3/zDJ/c4V0xvvmbyYxfWn/uLM5b+a+H5Z/wVwG+/+NnZU/vfO3LJ9sveI750nvvK9dhaGjblik0nvenTe7+4rII2szg+Z4oHH42zM3DZuYX1rOjFV9xwdMtNNjP8x8de6fYtvfmq+dmb1hyY9bpSfYclFF1RI90f68q7hx996jc+9KH/+M+vV348QDh4Oay5q+AgjZH3fGD2yNZ3/9zkC85ePHa01e4UWhCSK3xRFADgpUCkoizLssqTCMlDkVyAVahwZgaGI6w1gYARESbNdjUzQ2cmLqkkU3SmqhJTVhhQ1sIki2gEYKolu5gSGiQ1Y04pio4MS2KmoIRC6Ia9YX6q5JtCFRWImVWgGIaas9tMl1KDItGElJgZQJXQEFDZR8W6b0O7d/2a7aedPP6SqztP7T6w58lt+PT9q5/srj7xzHr1yXOrNu8fX71QdXVCQJZcYtS+1s5IzEsApxE6uGqsCo2gKafGHDfeB2GVxpOtMLcE0vfYQlcZ1JaMUM1AhJCYHZg655iYifI9XlVVjFGBPBfkKaUUm8ZxkVISiOpEyUTV1Ilpt6xs0MQYhcFE0buhqaA6VygYiFaGSNQ4A7VKiQizIBYAEpho9uInS6oAhpSFkfVgYKYqamwmNggDN9FeWloaDocrJlfUaWgJTmjVbzu9/v1bWiECEaUYRSTUwF32rrswf3RqbPW5Wy7avbAUH3+wPbNp4OxYGk60Wm7QRNPoMIFyVQvOa0mVc9Nla6m1gerD7uIrPnfqBf/+qb951RWXvv+l7/joNz779MO9uccfpRdc9v2HHunt2X3y6WetbY89srCw6DeuIH7DQ98bf252bsPJJ37p6w8/eWhybikaD+sE5SzaKs5zMjIjM48I5gXIO0xqJlnvYphVImDU4tgs6iKyq4S9UKCIHVw13j28f6ku4/jSwvjRQ6uP7PLP7X/NlplXrRh7z6NPPn17RW9obE6diKYztqbJsWe+/tfw4j+TR+5bf+Ulexfl/CtPPf2U0/fsfHB61abLr7li64bV3/zqZ7/zrf+6/bYfXXvt9Tf96OZbfvhdrZceeuy2hdlDn/jE177wmT+sQ/8z7/u1w/sOvPJVf3zNe9/ysjS9trfw7K0397/3Tffj7weMiz//5+rLdRtX7H7u2bGt61tyaHFADoGZYkDzidmTLQPnHWepbRbi5gyI0fNRLYNxAExBcQSkHIE5bVkwiSNtEtKyqBIADt326NoXn2RWU6kxeiTzECwvkRAQQXPMwuhnQZTAzDEEEyUiRGeaiIGIokaJQjYK/TdRM4sARCich0viyBMh5/QlMuecY59iDYgppUK1ZmvIKgTOf83SB9PkzCmaKIiiqoEk1kgopmzsSD0ZGxJxNggCABllQK+oimiQpCaAI4WL5NgOQw2SUmq1SjAkYqIRmy+psIEjFkliyZBSSg6cApioI0wKdV0XpZcQEzvnXIxN5lahLdtmLSvjIAOjYLTHtWwmyZ9dH6RNhWgSB67Ggedfp1fcufQvO7rHPrnw/Y+5Nx+IgwpbpMNE6IGXObw/UeoTjER6x/EUWRqyXHWX6+9Pxsx5B0wwYurkYjr6M83U6PjYDQEABRBy6C/89BkCEQFNCDN+GRjVRqQBcIwIlsT52TK6+mhzxrs/sPKt1zT3PLLjG994cOXzr3zz+w5i8aNbHn2+W7+p09uMe8sycH+O6yEWBK412ak2n7P1mvWbDu4m5iVqr7j4V6dat3aub69li1PjMr554ufPPmp8762dnyNEAyEEKCtUcidfxidek1/ncM9DCIOIJ1xxxuIlL37qb/7uvnDRNe5PPxdXLrAu2fxF/NzLFy76zY9O/mhLvyzaJ8/RxmqIkZcWx7Esm4ndC7OHHnQ0NjHeLWZoz7MPlW0+coGc/V8ppTDt2nGhf0LRXvf6t6rEsvRVYF8WSOi9L4oCEYtUIGHhi7JsKZgp8PKVAD+d4gIgiXN2hMtTjWWZupnl2XUEZREUNbPk0igAVYVURQGTEZGRpShBNGcxqmrSCACqYAbOFWYGKsyWKZb5fUNGBUuqQaXFZTLzSoaqSSlGMysQUYFVFU0J0bOpiqjGJGVaPZsWeguI1px6YvucM95Z1xftmnss7nj4xIN3r97VOaN9er3xefvXrdtTzRwrWT3bILqwCP02FQ17Px/qSlG64k1JvSSHkNjQE0FREoWmiaoBg4A5xJKoNkEjAvQFa6NYMqokBKtrpGLQr51zDGBIYhajsCtiP6YUpqbH+mkphdRutyFJ6YpIUBRukIaucNqPZVkOYtMuKo0pMRoDJEVRRFOghOhNzLKWEnNsteT0GbP8/SwElZSHYDFZIkQxQ18szi9EUa6c6yMC/Obz6sVINy5sOXjwyaIoyqo1NzeP1PaKM2PNNK7dtOVc2Ptgvf5Nk2vXdk5a29v5XCGw2CxyqyzQOzVWESi0divXnkIJNB1KaIStJ8875/Of+8cVJ5z0inPPOfjEvb0jEECff/ppncXmsAZQPWFmxQm24m/vvXHy6t+ZGuyvb739P1evfdNHPiq8YW7/LVU3cYKGJ8dlhZVEAv8fXf8dZtdV3ovj7/uutXY5bfqMumRJtuTeOxjbdAyEGiAJkAAhjdzccJObcpOQQAK5JJSEhCTAvQmBAAESWqg2BhuMe5XcZHVpNBpNn9P23mu97/v7Y52RxX2+v/P4kY80Z/Y5e++z1ts+RaOogB1cUyscRK0zqASkeZ73+/2yqgiMD5A5a30JGgKCZXCipQ2LK8upsa7dHz96aNv0HGgfbTK7WDRVPn7u9o988+Q3rz+ECDYAjF50xcZsKj05DQC99/33YxP15B2/s3j1jbe89dfVzDcbm37ld//o6J75u+6+26OfGhv/yYPfePcvvUqcvOqmN49NnnXTc9+oyfBzXvb8V73hD86/7OI/Hnfj6YbH//QD+PgjM4eOtboLQ7ZenrOj2rdv382/NFEUzen9fO4lWkkPbCOtB65EVRJiiwxgVcmYSPhjFQOEuFbnre2GIjJQ0FRVRIWou6SifPoFXFRFVXpU9oEALUMIYWhiLPf+8bt/vO3mm062F1rWFogVGQCN81rWIACKcQfQ+EbsK4NAFrM0K8uSRUKQPE/FD2QRZYCzRRU1HpkCM3tHiGgRjIABNYklIl8FihaEzKjQ63QFNCEjAJ5AQBJVgwSeCaCKEv8qIkIqFklFSicBQqDgnEsMaGAWFOWILBWRCLnkykfoSvAMCqWvNNWiKM7ddU5ntd3pdJI02j9wLB+dc2VZMjMmBirRwFaAglTeVxzQkCUWMS6xp4EkqmTMGjwKADASgEEwotjij3CNYDEoXg2BBsY8C75nrVuu+jtqZ7+puOQTzb3/2Xj41b3dl9NFM9JJEBMDWDkivxZ7T1eosNZcfjbiDmA+OqiJ16rbMwJwLKN1gNU842dolBhUIhVtUCYP5sTROu50eR3VtKJTjREQAKEIASNQhUTCcr2f8UizfvnGqSNHt366MfztC65ubN71rf3tPq0krUtmzFt+1/z5Edi2PRxMal6gBGoKNsLs7AN/+RlXfn00ET61MtSYyS84aIBP1SYneXp92q6y/H53SahvhSRDAFUqCRXFsgTj6he+K37A4PbKU3+QDp89Xxy95JINt349/8WZ907bPljUYZ2450Od9T9cueBzxZz9yVvHerdtr3f7TKxiW23TDa0N+tT2s8e3XzD20pe9rl/iytMnOpcaSXjdXSYPLJmFojv8iletP++yYnE+GOdzQ54A0JBBR4hoyBCgJSJrUBUI5YwAvDbFVyUkC/a0oAqqKhswAKAsg1aHChnh4A1gJIN57w0DAjkUQlBVT9hjH0CUAQXJA6FChFogRvZx9LgPhBZJgVUZVK1xisgsBpABMAR1VFWVZQgGEjQhsSEwKthBHwhYGJHYG0+2SUYM+8L3uitgaVdVO2flmhcc4rnW8p2bj+yd2vPgunvwrIld5cbLZrdvP5JOnMxsJ9Bw6q3x7LumCrUyF8gEgyAo5RVVUhUoImyRALWQoBgxKRSzZIfEqhYJHFohsWQVA4pLjMY+g0jpq+ZQc35hyYpaMmW/ij4iEriWp957QNPu95I8AQDjjJS+nmX9bs+5NCHDiIG8smRixZhqMJWXiHBURWI1AqRRSH6wFRtjILI2CFTVkmEDZNzsyRPW2q3Nzgt3tXcMhTeeV/3zI+aRx/fZJGFFY0yjVePuqmbpavBbUN5OvvbYo+WLJ1qbzlo6NZ8PtTSw9aHbKQqVLHW5TUpxBA0OVRuCs1NVdzq96sp9tfGbLnveCzZc1LS1Tx544PCxvRdcf8MNmBycmb37vqd277rs13Zf9tWD+w8cfOqq//6KCw/c9tjujenUtnUvfdMTj+9ho/3QaFmj2q0IrCKiERBBBkRCUgVEEkek4IAEoQqBo4srS07CQCgGFcUBCxMgB6xXvjU3M3X8WNo+ikurPsmNVHXNV2x/CIb+PJvc8X7827ccthXi5puvP/LIw81jRwGgMTzkHvze8gO3zV501bm3vPqG3/zD9tarTxxZsmP+v73nL060ly6/+KLzLjjrf3/287bqk2uclTU3XHrtyQ78/sKrV/Z8/cFP//XIHd+Z7VdNcG3oD23YbNoLyR/9zfhvvf2R17zzsXZz8kefzV52QdCsWJ5tbWjkoSo8sjKRGs0gVGAUQQdQBRqAXmkAf40CYbEdorGOOcP89aeKHkSM2D+NhvJEiGitPfTUnhte89qi6rkKFNEoJEEBkYU9hDWkyBqyHxHYGSIJlYJYm/XK4IMYlysCGgIZVN5xGKmIZECiQgqRQcQoFxkdlohkze8WERNA733dmNjTiGhkYEZEZ2wvVKdxK5GjxBTLbLEEYOIolAIwGCBAkWj9Pcg/omUYIkb6A5EtyzJPsyOHj1pDzrmgEfENLCyBjVEfQgghKiiGwhuiwByPiYhF1Tc2L4oitU5VfRXyPBcBAoUzQekxH0KIMmWn78Xp+1VTA2iFqxxMaft1X58Ji29LXnjr0oGDY/jh8ttfyi7NV6myklRBY18XANai4OnO9mkg1SBennZ9OV3Iru31g9bz6U44rQXSGFL5jO+MDlDN8VCRxByZyGsRXRkgEUBFT4qIqZKoemYh6Pju1M66po3Z6c57P/noNx9bziYnh7ZvSLuhaJl1Va1kfxdccgVfcyP95GDYuqM47jmEuY5o5+AdUn96ITFkkFjLqVsE2pqNyhJNDPvZLyVvv735pr3pdS2e3+SfPgpnlVUwztokIWOSej585XPjKfD6XZXZQ8kUb9n+9MJDYeNE4zmLmzws01LjsZcknS37n/s6r0uGJsMVVf6DaceJS0fq42NQCt7/rX0/+dhvf+SFOHXRbOua//jeY/OPPd7/LbQdGHvYWsgr9a003/jzP98vi6KqXJ4ErgaXkU4vGYABN93AQKpsLQDHVYkegAxFZyAAAFFGJGYgJAKUQaUMRiLfXxHRAiFQdNuLHCVDVBRFnN8TaAheRa0xRiWImsiUEyVjnLXAEqcEIoBIwqyK1lpLpsCKBAtirEIVpDLkGEqCVLEiDQqiglXAIF40CsyBg2DFBk+ImbNGdZY4lW62TFuWh95x/LJk/OoHx6bv2Xhsb/bUU5ueSjcPbfbrLjo2tm0m2XQERLKW2l6/CparlDJyItAPlRdvkEtGsSAi6tmA6QsrsiVUg6xAwMYZRbTWqiGL5H2Ja9bgqpIYa60VCSYx/dKHIAgG0AZAD+JqrmVqPQ2FL+MW4cgV3a5YYuXEixFRK2JQDYKoeHbpoHOga4ksESkQRZKYxk2GBSKFzDi1CsaHvrP2+NFD774mvOG8E6CyqSmIcPkG+fgr8Pd+kJRqVJWsqQ2P9Ip2tzf7M7/5rmT/sad8023fzeq7J+eHJicEFNAMN5re+64vSylT5k7WLzvLGUHKStj0B6ZftPSVscXp8ar7Cbvzx3uevmZ8y6+uhqNLi5+bPt4cSs/fOtUrFr6674HWhgs5HZ18+JsLx0/d8I7f7vVl8fgeQ5ql9T53peqCqxsJYikiVg0SAQgBJ04VstRJ6b2vCB0AiYQ8SbrlaqKppK707Bg8UepMvtpfd+ro6OEjre50L2/pFdcmS7P+8BE0PUKV7vLMxh3v3PnizV94xEqWTe7c/Pj/+a9hUQDocDU1tmE8sH9i74HH7nvmAzgyNdHfdm66beeVu26i5tjS3Y8c/6/bz6s31Qc+8tC8nnr013++ufsybG1qXHHRhuJRGJmUK88++eT+S7/w5X3fuy/rTzeuumH1S1/X7RevavqaUTg1dj4/ua++fryZcVkuEiRIDkW1chZVIqlGhSh+k+R0WD1zrDjY1hFIB9h5GMi0DypXDxIqz0XJoiWFICHn+syx6XNHh4rlstv03LTcRpslFXhQFZY4TVYAVVYc+O4RaAgBLSYmbffaqpKmiSqHwABgaSChB6f/NKCCFpBEoyqQpiADEiOkWQYAkcILUKbWiQRmNkQgGnlnPgRRZYOEFKFGwhgkeAIBQDRkSY2AsPceRaPrihDG0RewhBC895FkFJvpIQSX2FBWAaBer1c+YFQUItPv9ThICL0Y6Y0nY20pEqM4iFhVa4wSeu8dofc+sYlNUUQogp1Q1iYCaxcBNPJ2AMDAoOUYb2SBkqNqQM3EKbEV6fdb6cZf6V33R+XXH673/rV/zy/Z647yKQUjCRPH0esZByfUtcTr/98D15Rb4utjBF9L0PD0oQYvAIGYLKAOFKwhxobT9dmz72Uk8r+JkIClEo+IKSIynb9u87e/fu/ffnH5ECbZ5NaNl2yppaYo+31TYQEVWSLFPv4j/f4z9NWb0+9NwGItrPaW6nOPj56Yo/p6IdBVHQo8lwwvYRLYmMs7t/547KV/O/6x+O6rZnzVjBNAlqRnnm+t2Rg8ObcB5/5rfP5dBngYGg9DA2AKAABWtn+WaL9wJX4Bu6Gav2vD5rfmo6O+10ZbTr38Revf9tI9SR566TBNrS679ts+fvSGN03eG0bUrYopi+kAAQAASURBVDrcqNgN3MARAW+NsbqGvD/zyg+G84N4fLrnHFF4RoGiSOia5g3S4LYgkSLG3DfqhyOiAQRro+vO6cyVWUWgUm/ZaaVN54pSJM/7/b4icgUMbMhFcAaSAUuM4GKwFzCGgJWZ400UES9CQD74qBgVdUOcD2pQiYKIilokhsACYL2VMNzPgsk8GvAgiC1ISpYi1QD9DuNwf/TyAxufP7PtWLO/r7507+jTT6/fv3/34ebZrQ29kUvmptY/k21fctQj3za9RDkPRJVhDQHYmcxaDb5UFoMIlkitiR0FIIPGOFFllTjeibCSqFQuIgp88sQMEYZQgXBKaRkYANI0raqqlmcC0C+KVqtV9nvtslRVYiVrDaBXz4ZSskzAgAqYWmOUNJpfR7SExUjVdyIasdsskXsJAKysgkroLCSql6VPv+FcnxmcqvFoDpXgcM1eOln+j2vs++8eCBsUQp5qjaa88lfes6SN8jlf8KZWhwJb9cXZufHRUUjSIlTMXE9zz4HUoxaJy1DA6WqR1munphF6Py7d4b48euLIp179c1Of/b9lWixdtfvgw3eN1bNLzjrHG+4f3X/eOz9mudzeOfTQWHKFdI7vf/jYiWNbL76ovbQ8BKbMag203RBlmYB04D0JGs9MqqqKNn3CYABtkgQJlNdYODMAokxOfTHSLxsri8P79zU682XN5Ve+ML342pOP/9jOnqzKrqvV2pWtn3fJocuuusoNWcqboCb8+I7JtAEArhu4G6o0IJqz3vEni97Tpz84snLXyXt/XNH/YaEhkBbQcaMNVkbojG25+MZXbPy9vz6yb3Hz2eu+/+O7n/uR/2jfdH76ozk6f6Pe9fDYlgueeP1Nrfb0/lf8Wd5fTS67rmpPU4u8dmbufnTr8y9YWVoNiI4JUhTmqPwSI18kdyLiYP5AeLpeiRXtoBsYewQG14pFDiGgMACwqoDKmtZgr9+56X/+j+PHj1c9nxnns3JVPLMDiGajp4ku8ZgKAEwhyYwqIVFCNrBHDMYYQxYAiOxpwmhkLoboR4QYKaYDOQIc8CAjd2VAIY2nAMQEbA2reAQDmKW5L0pTiRIGVEZki1ypBLFIagAVUFRQSLnUgWAQEHCQEEL0G45gLFDwHGL0qKqqlmZRmUQJHRsRKVH7/T6hiZ/Ke0YFcCFItC+WaDpOiECOg2Bm2AdOAkAa2BtLKPpsQyI+w8jfRQVFUYzEnrW2ICN2QpEnQz4sCmYpiSbJXLXy6vz6r6/e89Bo8Y/++9fR1nFM2xbdoP5EgNgKRgSMwv4Ez5KDYbDnKwAQxys90KZ/NnCi4YHGJ+gaqS2SgGFQn6FGUZE11C4OyPeACoKABKqaCq1aAJA6WxToOrBEtT5mCXzsyz/6wjcnNu/asXOkFYS4r8td73KoU82A9EOItl1LZfmt9IZ/4jf+zpNfeMeDnzrg5rns+1ClOLTg+77fQ6pWC/PtLe/46rrfnnY7Luz++DUrf3OV3NmXfDWZvJNedEf+GjEpgTjuQ9luLT15yYl/j2e01Dzn6PqbZ0YuV7LWnwpJ9/Bz/ySknZDPU8gQSh3J06RsDku7v3DuNReawrEph7dPGURjcK7KTCGZTeb70Kf2qefcfeJ5vfGHGk/fLLvuTqQs8osugnXDWno0Kl44OZ14gZ6hkrPWVY4MBo2iogDAoBFJOyDDrw3aT2fWqhrvcFQiBUIThVBQo2IlIhKJqmFmyhNrKFQBAChxiHEZgo1yN4RR0DEEYUEMXgG48pgmMJCyVYFAjAEwYQ2I1jrLWqAYgMqgIjpCAkZjLDksSxHxTABQglSkZMhaKpDQKyYKQtYbRFNU/T7wcqXZKbzSTF5zuNWrlw+NLzw8dOTJTUfnti6ubrEbi/ScxYkLDo9OHbPJYhWM+CyBJCMJiSKR5VzQJSkjcmBgh4SEBoissaxMgEqVMne7ROSDOOeQrXof0XBQhuHh0ZWVdhYlBHxwNumu9PqpJ6KyLIEMiBprFWU4q/eKbqXKKqwEXlkCEAZACRZAJC5qBIM2mnyCMSGwSwwzG0WLCAoSAqgEonq9tnDixM9sWzCEm5qc2bhCYX1digpeuKX3iUfyhR6AqNieLKy8/d3vL7q6dGx5+Hmv8k8oGmrVG11nD506OdEYqdfrPsUQgmHtig71qNdZtkS+HKbQ6U4M7X/tL7/vnz/sTiz90XW/OvXZT/vR3GTjR/rGz53qWTfT1S/e85ONF165ffct7qkfnXjmqZ95689df9MLf+6d73zVz78VO8VwYhUMWdccG12dmbWkRjROoQIzi0eMzRiqwKuoJRdrK+Nsa2h4aWlOSp8qInC6vDw5f3K8vxJWV0AqamypXXxxMTGSb5miRFNu1K996erciW5tqLVpRzi6ZF0rn5+fG7rzAbjgOgAQZ4Sq0mJNi94jd+363n995+TChvnHr/3c9x593/tr3WP2vHMXPvyhzb/09iXMYWL9uicPrspy74m9riiefPBYzXardduPfOFHreUnHv7Hh5ITzxzZ99A6i3VID1/xsxvSts9Gk85CmqfthUNP/vtnL3z5h7gNXUOup5BLCKKBjDGWjLAIcFRrEhEW1SCyBrGhNQ8AsyZCGZd/bMUwc7/TDSGUwN4HYwwEVtUkz5aCHWpsXph7uj6axDKz4wQACdDE3QJUaNDiVFWkJMDAvL6W5gCUJ2meJRzN7Y0ZmAR7z8xR/YoRwTpEkxjrEBIXreZdbEQjPrvhMLMYNMwqasmEEFgkG2mGELr9wqkxAijgBBVQo0VB3NhEiQgptuOJRTRw5C6zsIiyCgcNKhBFeQOLhgrKLMs63W6SZ0HKAdpFxAsjRg08E1BDtFIRNWgrLSsOQJgwJWmiMlD99KHKbOKlTNiiifjmwZB1rXQEBTmj0hnsnmkIwaQ9361jvULvIc2JK+23XfIHxRt+fvWjp0bsv56680/SN87zfMO7Ihn8Lsizs14AQmVFMGsV1//zUFWKKK3T9syYAApDkNOjCtTYnhORyC4Vjd1qZVATvxM4YDYjIYAQYQe4xpZVVtG71I26nISPavXMdGfvU7WzX3Z1EqohJzP9xLEqghj1EqxJAlf9IAg2RZS+a1TV3mxj33HwQ2m/Xbm0I7pcFmZ4y13P+bmPXv/OfjpyY/uLfzH/+t3+YY+O0Ryr1m/whz5f/6W0N2dB1RgBaZzcc9mhrwxRFF2G0c7eDbMPcQIHt9301Ka3AazffPc/zZ7/ofbGO8QWAGCWd5oAQyWNV+utvlhrua3V+sygoN4RSoDEe+hs/MGxS/84uEVJis7W5h0fWHjioL/lXbh585aRrZPzBxdsXQBA1nKUwf2FSMyKlzc2kzTSXk8LqpzZezhjxC5KFlnXFjgCABESgBpcS1WBmRUi8dChB++9pqQIaWoAoC4piGYmZ/bee1BWgIql8gKqWgWbODYO0bCspXOqJRpgaEOwZISkNChl4NQaNchiPTglRhXQUpVBjTIBClpghEqshtTiqtOkMITeI2dkjZIkaQihIChM1wcjK8klC5NX1zevPlPds3Hm6dHpgyMzxyZnbtuWTFXjFx1ff/GBofETCCtdmyad0C0MW7R5UBVmBCKwDGIgQUIih6hEJFpyQDRV5YmIg4qIKpJSVVVKNqnn3F4xVoIq+5AliTWZQXaKVb9kS9baQoI6ml9cSIfzpKfoQZQDQRyEMlelMYiAOkB6DrSHgnhAMJTlaVUUHBQDxRVjyZXKeZodOXD/xkaoJ2RI2xWOZNrmvGWK0ZopVuWSKb3ziCGihaWVN775Z3/rt//gridPaqNXLc8G2J1yoSKtJHcTk6vtbn/VZ0maZKmkmLUBrUkwW0TJ6kW12l//+rd//u7vQdn93d/8wHOS5g8vuri8+TnnfP/r120c/s751/Rmpr/18H2XbZp42djZn0jWXR6+99xv/OAphf/1gY9MbapNtcaOrB7Beg1ZMpAj0yeMIxQhBhAJBqIQolVENWXwaMQZS1EPhjCo752YSWwa8tQX/Yn5xbGjx7ITB7VYqScJd0lqptCqmJumh58wra16ye6VK68pZ2dgfsFwt5taq4H9A4/Vq5XCBwBgSA3anIOrj8w99MND7/6TK//8t6oive+v/mLp2596+V2HH7rrO3j+hRv/8i/D7U+GxB/5wbd2NM/qn1ogrPvOofzRA8d/79X+vh9WCCNmpObyVeuw0VjdcMH0ut1X6ixV074Pw1s233frP4+2EJMNhZlrhqFuHlLlQGAcCUIZpfZVCSCCgMAPgM1xC/beR/saFY66+aEKzKzKzB6EvUdm0YoJVJQZNHhWxqaBVdPzjud9SQoJOopYXQQwpIoiSGhYWCVYa8kHAEJrAJxKyPO8liax452kDq1xZFDBGvCBLKgkA54DElhL1jhEJGsMUNR8iG5PaeIMIXtGroKAqCijVVOyzMyeZFALSSWMqlYRFcA6HBSUiGTImiDiUmMRxVdJknSCEFHFQRWiAbh1lksfQBFRgINKCKHslWSSTlHGRCCOeaIksiB4DorGWVRmkZDnufclAAQRNFU/AFqTsPSL0lgnjBZtBcGhIzsYtHoVQ4bQQJCohsGgSCiiGvWkgAAkIQzEVgnUi6DDtO27Oxtb3tC79hP6yOeG73tR77xL8OpTbn8Lml7RalDUTKASCFlmpAhnTCRO7+kqoBF0Gx0aIiAbFRH71EMhUhPLWTFRelQ5Gk6qEChbCABGwCExeSUjQgYUfGWRKqQ+GTCU12XEEtlshf2J5e5tj/rPP9qXofo5l51ddIqu1wVVMj1VdDZH9qrUl2BKNKKqlfcewRBVTzfX+2o089Mdl5aZn0633P/y9zx+9ZtAZPd9n/mN9ENbzaG8Rt7gsmssSMurPbwwdvHTf58OXbVan6gFbCw+7I/cF0YbMDr6bK3vVUOSdPq13uF+uq6XlBse/VN49P8rTWnCIFkKDDYDAAEDAGK6Ry7+H2qCb84AQDnc7/dwcad8/6Np8rYvXHHwD2xrvfbbIIHASATHRd0vIonMQFUJkVUQZTU0ynMDQHSRiaSvWDRbRRTk6MZGGCeacWYsoIooAjGRNGjIkWeP1oCKQWBmi0l02nA2ERGnXFVEYvtlAYpcFMYQs3KaiZcckyoEJEmSRJVLCQYUERIFAuVSrTHO5BAAERmJLYJy5tJOp2MxpInrdkt0DiwaYJVQWQuKLhAoW5ck0VKClENhrWVmZCQICLKSEUiVdeiGJzZeazd1hsLR4VP3bTx6YPzo96YO/mjbxOb+xPbF+q7DW6Zm7MgcM0EYotU0IGKtZyBHEBELGRCAOA4AUDfGh9DP08SrlCUTgSG0ZIClKo8cO9ZoDXkvwlxLUy575DxTYpTAGQU1iA4dk7haTmUI1laAzGpkMCZgMBQqMISGCC0hogqAgjU5GmEyCsYYRQEkqEDUVlQZyS1mjzz+Y7iURMAQFQwV67CrmNU6BxQq7qMdCsaASp1qSx02oWOTUdLKC4BTr2wqVdPIGhC63ZXujO3ltlbPra1RPds03BQYWVn1L3zV/3744YMPP9yg+v/5l7/7aKMe6q1XzC9ffWQhPfCp99742vbEeKPRaEBydPtl7LKX/e4v3vbNb/79X77rd//0z4YaN51aWaGUSDxVBomSzHIpHsFYVAVkyciKI2ZWAGYFRmPJGArAAmrBOrSdpFfr4MTs7OTxQ/XuSfBzJh0RD+3RegNSU6zq8LjbtBsvvLg3OlLPHSdNxKWq6GWNupV298Hvff7V687q+QIAnKn6xmPH9bEcagyv/MuHVr75eRVXXzg2njX3fepDve/+1+Tktv3f2ru49+6GAKBp33Bt87xLsZ71P3jr0Oy+xtzxUTO0WE+BK65obHTbQn/p9mtuQdDJztO1kZFlyIuZ+T3/8ok3vfYNYpdzTqu8coGrskpIQ7SDZXmW3SkaVCBEcfABHpZ5IHGXJk7WWEMi8Ska45xzlS8AgIMIqQrEqc/i3LwX1sAGkBDFhzKUsSQKIURoxsBlCBEHEsoDvoQxJqroZVmWO2ucBUORAJMkSWAJKqGsAMAYgwTGGEIzwHwOTPogzplYFRFtYiUMOJERRaVrvFdjUBgRgAaGoRLVJghVfSUmNc46ciFUTCBViUje+zi4xYGFsE2SRAtW1RCCWAGJNCEhIh81RmRw3SJk2hiDYJg9IsZGVrQPMoiWwPvSG4I8s8b4qoq3w3sBRFI0xoA1a2zsCE5fA0ifUZgOsqhnGbzRJEfTCudt5+f4ed9aeXx+LP3oyq2fqV1A3gaQRFkgZyjFiJoEQmEE2a6hsdaOCYpIILF/ufZ28d8BlcrEGEOkDF41GCWjRoJqEtJAglQZIsZEfKBQ2GDLtCNinSGUbChRqqaca4opuLr/SPKVPYvHO6FoJ8faKLVsqNVUgJWVDvhxhVJVLdRUNfhVRCOKDOpDMOBHbK+CTJg8ri5S/WRrhE8988zmnfc/7388c8mra52Fq7/1wd13/O1wv3+0pUM3EK5XpzTXGCog/beZq2rJ9nW1g8PHf7CevMFGH3tSmzBARad6NgDbVp6i2AZqCmDSYjhkj2F10dLWLy2f9RUQj7184zfK4fArxch5XJ8CDbVqOslGV8JwTGQWd3xOsAyNOXZdEFLizjqxSHt39V//Ire+WNk/nKnazKVBKzNAV62BsIDgp0cDg3sUb0WcGSP+FEidYp86ssVgsPTiL1KUn4yd59PC0ABCJMiRpUcilgeTZmb2mObWez9Sa/R7BWe1brcvRkoOntRzQGUIUmKFBnLjwkA3JtbeEYYpAOCcQSJmDswFx51Eq1Aa45jZKxhjrDNkLbOKqjU4MjLU6XQwqi4DAAMKWpES2CMmrBagb7hjsOGRFsJ58xue89SWUyO9Bzae2Lf+1IGRI/vGi9sm9qwPG86eX3/23Mj2g2HLoityx0a7IJBiT7niMk0ckuEQkAAxy6D0llOtIXMwha2qyoFh13KpL8IAe+GroeE6sICoN0qKFpGjQp1AAI1yRhH7gqg6YFSAkENQUjFYWSIEq5AwoIn4KyJjBIiMRq1QFucJ8qXF9uHZbj8Zh3I2CE00jKhJ0FeIFdSQesu4qeouJk0cGhr62q3feef/nAEaNgolrQadygUSNqSYWj/RWl+b1HYZ2qu9pf5i6Vf73Xmz5C1Red6FT7z8pSc/+fHrXnpLM09SluFO++wCJh/ff2K5k196Wb5169k3v2RpZNe51573xF7ODvXfdc0FW6/Y9upX//zZndq+sJBwrmI61K1hI5hcOm2mBKMEtwgBRt9qEcgCOjKKwiw9CJlSQlg6Xh6C5iJvOD5jTx1rzZ40vihNi1zStz4tV32vldTH3dZdYupU9ceGxvooplZCo+HTnNZZi878qHXh3RftuCHMAsA42vVeVq0uUo5lMdQY7a8YS20cyZrrtp/86/eOAiT06PTtt2ZZKPvdrc97aTKVnfjSP+Id35w6/KRm44s5Di0V+fK8qdliauJEX44tzx8+6+oxXd480jSeRiwUbLZsOV+yjIukK95BXkez5IACAoToz2MAZUDllUix1aiDE0mEzIajl6LGNTyQSmZmH2IJyCKDAaRADEJE5KxVr6igLFHKRAMDAosMRMhFQghJkhgYoP+TJIl1grVkrbXOZGlKgC5JIjAkbjEmCAknFKEig32EiERUAcyak9qgc37aXGEw5JJoeSISkAwBpC6JMOS1Hq4igrUWvMcsyVwCoitFLyHMrGv7PpH10fyNEBDFS1n2LRkAAdCo9ux9MEaDcEwPYs2BiAoUg6bRyNOJPB/11UARBWQAWkHEEII1BkBVOFQ+Nmrj8rNRLxBAhQURbYRCKeAAa4MDU/ZBX1fXLpSqalJWQlvTyXdU1723+NHe8cWvHL/j9SM3zxazkDuouCa1ru3X1DMjoAhHi8rTe/XpSAwAkZJ8RgAAsCloVQUWInJkITbV0JigFaiVkIuWpB7BeFJxjaZMpYkjWu6X3TbOLiffnfV7TiwcmnUnQg9sM0FGU01MJFrJUj9IJgxsAAykwqxYGVBVKD2LyAiu/kLti89P78qxKOvuB8XVn1595ZzfdtfO5z75mv956JznNXozFz3yyUuP3TvfOdHeceHi8T3S7k9/XbevtyOj2Uc2v+rh2vWHOuNvaD0yjsctuqzQLnUU0zQ1XFU2bT0bzqCv3qyD3lEkGyqmLO1uZlweOfL6fPHcoPtrs5c30tV+c1TqEwhCyKa1rldpNBoDgKVt3wALnHRN1UpWJkmrYujQcjNMtPXSX7xifrRZVVRTA1pVDmNxixLlYAefAREJ1lyqYmNfB3j2SAk3gLz28gFcfgDnGLC/4hRfIoh9jUE2CM+AiOisjStnEDJp0KMONoBoYh0B2obxPpBCCAFLtQQhoIIaMmxMqHwZqrCGFVhThTTRu8mQAoEqGWPEh8H4OcrUKwqCRUQwIgKgxhCJrqwsISKiiR1cVbXGlFYlIIIyiAFroyeDgjGmi72+DY0FvGFh/bVPTC0PV89squ5dd2hheOH20UN3rK9v3L3u/Pa6rTOt3UfTqZXakrS5JkndiQh4SiTXwJ761hKh1qHsKavNgfO8CpXzZb8gQAMgRpPEteoNFS5W+t4hEBkFEDECCFo6NNXaPkaKA//mSMwLIspIighoEiJScKocZ/mIoBRfowAMEoRBymartrra/eTd/f9+pT28xBta0qo5FiFkY9MHO0Oj63YUT0yrxzpkKyeOf+ZTf/O23/6rZx6fDpJ5BSueVZVwuTNz8In7H7rnh7svu3zrWdsmGtsqaTQnxldqzRkWeObBqY//5e9lY2H66JBiZ3xzff1Uc3TX8A0vw8vObWVJb3pl3+GDW3ds+pP/8UvHLvnj+sKTND76J7//V4++83eesSPdKy6FudWGCZlrUImifU5T8IqIBh0ox4oufuPKlJJ+FTBwYoxJmWxQxrLK20vDM/Mjx5+CTjfYlEFtkD6wCwL9YJDCoSOpa1RVxSKBKMsbXVih4WFWdI3MBtVmPux06w8mdjUBPn/WK6/TfOfikR1+oaKi9PP1sNCxTpZ0cemZtD5EVAC4jeDBNcGa1TvuDHfcltVcZpKl8fWNlbmJFe25Fl/7qkVPT/7ke0Om3s2HlzddfnmtWxsZ534PfL+q9Irf/l+wcpgDBBeysl8Eb5whtV5lIAiJwDzgEenaygzCz/YbjSJQpUpExlrFNa86Zu99xVUYwHApinsMOEuioBrXZOAq1rjGGw4hsg+99xqXPaFGC1gUawYeCUhgrWXghFJLxlgrOPDeUaREjUlQRAhB1lwTAAQRfQinPRVEOZbFqkhkEQMhsEr8d1BVoFAWatAZcpaCFy+eRZTY1pxhLUOJiU0rNElSEaaUBEBCjbSQEAKAhIpdHotgBQA0lBjLPoTKgyVBZRVFBDKASoAEIMxR3UOjUZKqM8YZ671nhMRaa20l3opB4xTBcyA0xhLwwG6IFFFU1jBZeNrhcbAxnynDDAy6Bs6iYAwV+bRduSW/6dvTjz46evhjtdtv6u1o1KaW+6suM+LJsCqqpYyhBwz6LPga4q3WyFmEM/wYEKIaNoVKidiaoBpQowOACJOxCRth9CGk1rqhzDrNy+rwjPn+fO/uE9X+U7q0Qv0CxBIk9ZrBWsaGOsgQKuyU/cgAVAURBvEqBlQdYeULAusMDenih4feM2kWHPgUCwfJ87N7aHTT5xrP+1bt9zcevOfG+//xlX/2q7Orbx3Vdxxd0KYU3D3SLvsLT947857fl5X6jxsXdVstMGZP0bqBVOyyykRRLVIImg0EP09fVQlVkmTrFve40RslAaY0mFwBkMusfQHyOZnMqR3SfBSADFSo2veOQwDDYByBKjKYgT24Mhb9VWlpMzXrR+pz51+6Ugw3RDhXLCQ7/b4KGuUmY/Nfnr3rg/+vccMY1AhGANZpKRVBUIrwWiVBAOAz7DRwIJkySLBO2xjGppQCKAzcGqy1Jk29rwyAL8rEpQKau1rlPTgyChykU/arIFbUEWnNdnpFXO9R0EYjfw7RGGAFkbBGSiQlCd6vwTiMIHjhiLY3hjwLGpu5NPaxiIDIsIBDg4QYpDJQkjgvTrSbQE3IBMfqZrPK2Ir63s2lV802rmw0inF7cGz+rol9hycOHW0eMK3WunOHL1zZuvtIvvlgDiccp2gzKZJOURNgcmBrYqskUCJNtb2kqKBHwSbWkagPwaaWVU7Oncptgi5JomeAag/FG0UGxwBoYl5tLQHDAH6JhqlSRVKwAZ0iWFXSioAYYU0xnn2c8ikgliUa9Nbp1m3bPv/oY5tbjdee0zm8JNSWzNmzW5zn+fvv8DfeODG2bqRThBVYdUOTH/+bv76iyjf/+h8u9ntwHEtrSm5PjJ/17f/zD5/71PsKB+M/3HjB9q0vf+1vV7Ws8+g3Lr/4xbv/416/dIxCPTGhbrUYnzh361VuMiz35Nie/U9+/v92Z5/59L983gxVn/mb/7z1+7eff/M/70wfe+uv/dHhX33HurO2rdx8ZffAiZFsKAiBL3rGCpNV0UEFhdZRCBLYA4Cx1BXp1yETytWytVZN1u1Luz156PjQ8Sdy6RANKwgnSca40l8SyOw1N7qt5y6vLKfHnnbjw54w2MQDi0WQRGbmQlixValzIOvLY3lnPQDsXbfpsaFXTnFmZg+eXcxsSHWq6CbdY2OLi7V+t7d6IlFnpV0knHSLet7ESYV2zlQHhgmWYuN5s9fcXD7n1bLrqiOf/zDd81UcGj689RpGe/kQLy8tZHZ4YWGuF7LJyS0jea29cjztJxX3JXdcSSVd4IigQQUMwoOdRdQrwxnG6Wv9roHgMgJIYBGB6CAVxe0QfQiiEqd98VdcmnSLfhk8OSsqJNAuuz5UYjWEAEAiYq313huD1hhjLTMzcERRxUo6coFiCCEkiqE6MiBBSTF2ntf08CpRSdM0KlUBACKRIysiIsYaEQsqA0taFhUUEC8BwETt4kH2TUREylIQ5MZCGYK1GgIF8aiA5nQ5GDcmAjTGqBcAMMaGEIxxiOicAxCIHjIGAWQwecMBuskYCwBJllki730IAUVNYrxwFbxRComwr7Ay1trEAPPAsjRugmd2BZ7df8/sFf90bGYAJEp92ofSIzcweZe+5M373seXJh+fuf29tV9eMitpMBV0CVU5sSaUZAyYMzOzeJEskqCepnGffqACm3oUyU4InQT1goiASbvX5TRpNqmRusrzkRPtn+wLd5+sji7UBMklrbIokszYNGTOSuXbCsUqO0WbmGCCKCtDaTwiVAXXjVH0ABw0DYLWhSS4d9Y+M0HzY7Q4SisdbP2g/uYv1t+1aNdvbz9wwae/eMlPPrT/JW8pjr89W146slIaosrY4GV08qL2Ju6HbplvDOK00vG6f2K1ddKN1hMz73veaU2l05PU2aBntKClXvQrUy5e+OQXHz7/rVk4qWTFWG8bVUJqkn5tEpEUyYS+IFnlGi9qMP1sOIAdtt4d2VDuPEJVIq5XDR3VZo8Qmj3Trk1Mwi3QYUA17DvkmgGZQKP9Fwx4d6dj6pl3H0SjxTIOMHmnA/fgy7FG5ic83bImQKIYiAflMiIRRt65njZviHIoRIPhMWJmjARO6paZydnKe+tcnmVahSp4cBQCS+nVJiZLQCmGzLhaASQWuiKS5VlWq3W7/bJX8hqDH9ZKAubB1EaV2QdEV3oWLgGlVqsNDTUXFxeNS63XyhAipqIkEhCQwCmWDgm8ABowDWhwwlVLSwLx5Obx3LnN5z+5eXWiv2fdkT1jM4dac9PDR787VJ88f+Li1Y0XTY+sP+ZqK9iirG99aXucJGRzVO1VXW+xKSOldCHBTqdnrPHMRDbJcla1hEZAQAMICRIgg1jWAGgMJYiGkAcCtwAgmWQBWQwHEjCaoCU1qbiATESxi3Y6x0JEUEOI3V55w80vevKJJ/7+gfybT1W/eH3r3A1D9z729OXnbX8pHbxy58VFUVxwwQXf/873pqa2KK1Cvn7uHz/Id3259lufBLi+mSWun7RnV9/2jnd8//v/vLQsWV7LxtZv3tL8wJ9+YHbuIL555aW/9j958aoTvshDAbW84cPXv/3vT939/cw1/u0//vayCy56wYtfvlrO/Prr/9tdD/54y03vQNCXmmT1j98+vnP70ht+uTvbnsybfRQNIdLhbBGqHAwLs1ciYxBRaQDU14zVBSNoPFpXcVJ10rnjtcNHh08drnVDmWcGexTUBPJljxp12LBdnn9zvzGKRw4kG7epYHniyebwqviaIGKSpkemV390my1tMk6ZZuLcKAA0qrJelqdMiVNbQuOyR2yRYBrEv/HGSxqmZ4OX+Xl/at8Q8/HD0+XxQ+uT2urC0uTWTXM0enjd+pUtu1obt7qlxYmDT46u27lfJOu3/UW35POHFY7Vm+uWpg+u+jAx4adoZM9td17+tpdUVVBy0hEwIhrp7mqQFJCZAdUgiUQ/Hqmqgct3nKQOyq8B3FLX+lEKAwgrqiqKkjNxcWrgvFZb7XUVgVU4REIRIRErswqiBgkODagacqfT6hg70zQFVGZ2znkOQgPrbEGM1CAlHEgKI0hkQA2oL1r5EiMOK35+sOoMM8vAE0J1ze5eRAHBOccEHEWKY5WPaJACcoNc1qyvrKxkACFwIEzAlBIAIQoGWWvXTH71tDqH5wEMyloLHMBaBAVSoeh3xmjQkgHC6FGqzABgkNAiGVBLIgEgFgcGAFRAWCsIp/0QB2hVQ6eDwen9d+AtCD/VpYxnHWOxR8iSUkzaK1auGrv0VYs3fHv/PV+bOnTL/MPXrT//cHcpB/DGWAZGdDLQo5S14wAqRctBoNO7+hqQJ57+KqIhcJHBIuLRAhpz1lQdPR44VXz5md5dh+hgJ2fIcwgh62RsEpZKq26FGIQrH0LFkDeMcGbmS59WaYpQciEWNVNrMIQqYvd6vZ5zhgOAn79u6N4Wtido4bDd/dbJBxToBb1/f/7sP4+Ux/cvbqoQ09HU1EVOUtpYZ9qLveV7kmSbobTzxBNTakT7UquXBWdQtDZuf3zunJvkkYXeiqgOpGkR+9UZZgy+PzE6kbuMFp+5+oG/Orr1hafGL6g0L23T9U5VtUkwTgHI9wAp8atiM4M5OPEmed66YvHY40/ffWVn+0/M0ojUT0raA1W7RAHXD3e3XX70bB2vtF0rDNRIqzP41s+2HM5MfdaEJwcxGAaaKlGtBc4gL8WiOcp069rvRm4DwwBPYKPamgIRiYOBKFrsCA8Ei8kqGCQhIHIBAyI4MiIigfvGu2CccyBa5lyWJbA06nkIIYInYgJvkBSViKoqBKm8DwPcBrNIiPKWP3VesW5mNZaqqkoz572fmZ0jIsWgoAgolkg08QAEAlAX7DI0ORWjDCpQKUm9RE5MDo0gvV6tVwOqzejzZnbdmOw4PtJ5bOvsvsbM9Ojh740+fetwfWrX1gtXtuycqe+eaY52rYgrHbIjRZeGqpQOZmmapktFn9nnJs1N1jC5S0xZ9hm1jyIqdUaj2BUshBERxRBIYIjUTTCkqoV4UDAMVk0ChNagoUBAQRlERAygEsWCmEVUOIgsLcu2nReedfY5Jw7uO8lj/+Mbi6955fNf9xt/89TRk1f33vMXV868+N9Wf/Htv3bnd7+der/IeScLd+Std+/df88fvgX+9EBNlvId62qh9o2v//2Rw8etwl/8yUef+/zf+NfPvO+Jx+/GRuOsS3bc+dXPfuyv379py2ivD4f27f2XT3/+/b/3cze88PrzzrkYxLz59e86ujB9y4te+qLX/472F2Z2bpg9+viBv3rFJW9+z8yd3+20Z23WUk8mUzWAmrerojmcur50uVjzXBvsTlFDiTWUCiiWhE1nZXT6yMTh/W5+Go1wo2lNTfxqpSERDRvG0m1X6YYNnohPHrehnwC1D55yM/NVc6EaVfF98P1wZH9y/Kj17Fl9mk946RqAzDCmyZY8qwJIZ7bltOiXL7nx8gmsekVe5TXcuRG3X66uNnvi+CP79hkztGFTc+GkNzi7qTY+0aSRk6unsGvO2fjEh94+ZhvqhhfPf+HI/V9Y7j2YvOm9y4Kt5lg9TTijyR2jS912EfpYUUaIpBi4slYDGyRAivQYAyLMxlpRjgE4PgxZipIxLFGILTKAJQyowCAxFDEBMg3y3LIsi6KoijJ1SZQyDpVXfHZFIaKAJsbEuamxNj6Jx1RVlkBEZNSCCkJMjQ2gKgCrUhxJCQdhZlSJH8BzMMY4dNaYqDll1BAaBh9x0VEAgMiyBmMcAwuiAUNRqXhQCEhqqFsVYSkYVXWkoSJBcWjQAEAVPK5Z1RrjVDXNM+hDt1+0UmttAiwcAkWtooirVo16ewAiotamiEiAykLMhkySJMxcFIW11sYOsw9gaKCkk1gxMshvTpekiLGOiY0HjIgsRUSKLuuRU7uWNCMA9A0OcU1VS2NWsfuHG97wo/0PdnaUf7vyvWtXt5Y1qlWaBvRWBEzCylEBG3CgszHgPUe/RsQzOt0DC3cdFvSB+6xlveaGmk0OuLTS+fxdvR8fDY8et6LDrYaMuI73nUIsd3gJJUhwajAIJqbSAKRN8adY3EqYSNPC9nveD+WtpSKW8cShMuoQzQY7e5l94hJ67LrmvZe4pwyKAiBhLp22GftB7XWztGF3+0c7h+7pItvJCXRp0cz90X2m+FEYeuHJZbwgrYq9947YZmYTDmDyHEuF/updKxdePPZgk9PVEApkl7kQQj1vnD7fQL1Kg1aluO64QuuZzyazerCTPnLVBzmpBwm2XEm5A4rd2hQaN9I7dcm3P3HH5T+LW/I31575/bs+dfTzn6w9mlXvLhQELTWbY8ND40OrjTd/78U4dsAm63pZRhVWaVnrrWnCxgx4IDypQiDABObMGLzWq4A1T41nbxIJSoQpwrOvjDTeQZyOWmeqg4QWIbITB2ncGcHernGfZM3UObGJqgRkSByAhqIMRWUTl6ROvdSUfRWYuV95730VfISIiYiABlZrLTAAiCrXs7xXVrEXJVEZWQaATeHKlz5N036/n2UZKJG1ZSWJUackDsWQxmwd1BtseKwSsEIlhQJ8rqCIKYtPmIxtWmvZLubFKvQa/Wpsjp5/ZNNN+dbZ8fLprQtPTp04lh24dejpH6yvj/Y2XLQ8deHixNZpai5gH1w/dUVauW7ZK0p1xhhSVYcAyqESjto5noG5F10SWawH76IN8UA6UCkqygr5kok8GgZUYxNF4kDR7gJQlRHWuEmqopI4o8Gzmk4Rnnvjzf93756h4fGh0dEvffk/+4W+4md+4Vb7a2/0v//8MVy3Yev6TduOzczWsmQyaXyT9cVubKdbBwCn/uztzfO3D7/+nS95+zuvvPGGH9761ZGtN88t23vve8BjtaFOGzac/Zu/9kLfmbv2+je2TyzsuOlFX/naVzc08re99S8rwn950Ss3XHTdldrMnrj/kb/7y/aDP37q1++8SmfO+fqPT33jR0+u7DtreLxYDf20QI9NqJWhYxHKLqsNKLi2LZFq0EEzgCygU1Xt15fnxw4dGT5+LOsvo6XKNEx/wQII1BWWOM3oouthy9mVFegXWci6h0+srq4wNhvDI6WnMHti9MSJ5f1Pw+GjFisrQosH72sv7dt43s0GoOdJl1bNqQWuFX6l6rJesHNrE4vp5VmTDdlVcquLqfPHO7V7fvSdnMY376jDwQPNxG+cWp96XFnoTCf+rNEpe2qFZhcyrRbHt5VD63Y99qPjMt/99iemahvTa64b3rQp69H6q194fHGPq2pii06OrnRCNhkMvgXJMMQBK2gIvDbfHdRYhpRQ10gLsfwNIXhfhlCpsvhKRKwza50lQAWXWBWxSNZaYwwpBPEYBWxpEO8j+lcQRNVGnXHmOCr2RVkGb4yxxnnxilBxiOV45hJCg4jCa04sQUIIChKNhM89f/f09Ey33cmy3DnHIt57jVaDAOzZmKCCRIqCRBSqEtAAUpQMgOh7bwhAcuNE1SWJBEZrggpwMEJoDQG6LI12UqDAzFWoYtvZGOO9d2SMMdFj1yIJgkiwREliAcCXlYQQEaGZSwShqgIzl8GTgiWTJgkRWTIiQAiOzBkbK8Ka7JQCk9pnuxFAOuAFrUkjyUBLMur0ikjNekYnCCnRarG8a2jnG0du+bh97L51Jz5/5L7X1F48ryeRUIUclRXYJOp3IupAZzgGXhoM/s8UmwQBxZ6sNnKaaGSI9SNz4TuP9+7cz0+eksWgrczVh0GgW3otS8fBSNBEc5E+kgdkBwg9AmNLkyxTaIol1XbZt4kjNt1+mQiB8ubeQy/JV69I91zqHtuWnASA2TD2RHXObBhlxf8cefeXG7/x18eeZxOzN7/hjvQVXx7//fC2tHnLvt1Tob1n2R367tbi0faW6++44/svu+kWQrN6/AGn2gY7lBWnIHinKeQL63fdemL9NcljXQ9C+bBHcq7TLk8H4MqTMWVie428BdJOk1p7pRpFZxFIuURE46iEMmvE7Lbqds89fPftr/nzXqd9+x0/PPTwD0eHRyYennr6TU9u/+3zJm/YPCTDG7+aXnHfrqHn37Jw2z+c++Z3LbWkVSgCdpIEC4XokBELwYEUWhymMugZ4OUzxv//z2MAy4oqKGsyozqw91jT96YoP3N6kDFw+jKAz7ZVwAAJrpXEseImhdh8Qh+QwNTIOYesrFLkAqW3NnAQIiqtpZIEVJXL0ltrgFlVOXhQFQ2AViSompjzrSnGh4HWLGrwpTHGoA2i3U6ZJGlAzw4tggFkA0bRMTCBN5Un9AQ5GyNWcocsUHkjoIlV5qrfaWVJEaiyDmsuUFV1u9khfM70hhtqm+Yn2ns2n3piZG66+fRtIwdum6q3zpu89NTktTMT4zPZ+lPNAoq2VGwsJYgKhVYBUAylXgMBqkoIlQKKJhwNN0UFDBkEY4wNyADCKoYy0KDApILMAo6BRNQhqxk0sUSEB7NyIwyIwVi3tLy8+/zLLrjwqmeefty0cGS49Z1vfWFpafW3//A9Pzl+3gde8MTdk9kFV14//Z1/G3FT00uzaujpjEdTAYDNK73mZ/5h5t8/OXPtTaOveesrX/Gqwm6bT/wff/TfZmefWZkpTLbrj/7wIz+88xtuaNPV5z7nhte8ua+rv/Ohvz92qIeze+SZk0//3dv9T25b7XdGr75m+eoX+3zo3FddPPNPHxn9p4/seP7Ll0SV+3kgAVYohK3P1FRlVhkGCiGIQJoRDeaEoIpiIe32msdOtA7ury9Mp8BoEh+SintNHPe6qrlmmy/tBK2NTIXJ4WShz3mOE1Pp04+bvofrLuwvLur88fTo7MrDD2DnlCu6ziTWuWzL9puWj/RPLR3bDHDs4I/8/CmraY+LRl7bvWXL1uHWoYMnMMPMzSceKXcuqR+fmR4b27Zp21bvS0yGa4FPLXUdFMOtsfVDTW8KyusjV79Ov/dxfttfuFA+tf8bhn36zE8QQ/m5oZWrXnb2W35reOLsrtZCp584xo5QnWq+7DsLos6gQUg0QlUxWCOBQwhgSAGEuaoqQoOiQYHSAZE3ep+VImUZxJMKlBIMkWEMCOLS0kuj2VxaXtWAIQCAKAIay4G5qCyAcQZLyeu5FwZDNk8yMZWyqgaVONbyLL7Xr1trRNAQKkjF4JisISKjGEAqDgawLPpRky8IP/3MYVW21ngugQZYDwZvmDxIgUJZQlo6jvIrpUUbe+AigVUILSIyApF11nVXV9I0RcQgAtZVwacIwGwMYggJYsxXrCUXEBGdInl1CgoCBGCjQS/HPUtFQ6VElJiMCcgmqtoXUUUliNEXLApw3PwCD2oLVTUMAYMSgiETjEFRQTCkoGgQDABE6V0gg0BiGUFUVAlRaaB8QkQIDiQFkSrrDklyvCjeNfHS7z5098wNk5849p0Xdc4zQ6O9Xq8BPdGGcR1Vp14VjHNWkVkFnCt9AIO1sihqLZESKqq4cvV8POE8aRxY7N3xk5XvHaoemTU+JJSaemonnLgCq1AUQVNlRae+rDhj6fbJTwTXDUmZshVR9ImAAvUTcaAlhLN47hr7yGW1B0ftw+93B/5t43vO0uSB3q72xpd/qb/1Y49u++Dvvfzf/+uB9677l2MHDn2x8d/e0PnoBfZJFNjRefrCxW8trdSfevjcO3fesG/dL9y/kNvmazYNv3Rpzx07Mp5fnU11Yz4zbbmbhyIJrCZHNhxWbT17iK7fVTxja+q7BTtbR1+QPR3MRDoEGaWNwrczGZ73q/0SsV3Y6UdWJs6npBlcrdNcB4iknBRL6dLcwbMu8UmyMB/+EV5km4f7T32sGp98z+v/9oYdr3zmvx45dNt3L3/li/UtL7z/z3/21S3XmUyobQXLSgEsyEDrmQwgAkZWEgkkSoPAGQvWNch7zJAQ0GiUlBEwpIaUAxACKgGAQV2bHSCAjZFbAIAYdc3wahDZ4+RhMMQxqEQoakSJkNdgYBGeEVtZFlXJsopVtczeWMvsvVcDFEKaWV8FzwHRhBAsErNGaTl0yUq/ayGL3LqYi4sKK5BSKEtnEgGwlLT7PWMMARBYpwwBFFnIkhIpFhVbawNSilZZQrRbrRiMgVo9t+hZjEtpNC/LksueMYZLBRZoWuODULlScf1Y+qLpHS9rXHBo6+yeibk9zdl5e/iO0WfunJrYumvzBcX4pmfMuuN5bY4FRTIXMmhYoqAdSq2ycMEQTJZIxRUHQ0Y5ENrRfGil28lV+hF9aSoxiGqR1JFRFkQUVDDkhUjBKGhgBkVnwQMBOi0C1QKXzXqtLPiml79k75P3rtPRpao3Nr6u1zvxrl9+nSxNH/htO3X/e8aHn3PeOVcuzMxtGB/vBvlSu3tOswUAXd+baAxvKoO/49beHbftzZvljh3pxg3ddefUr7psbPzcxelTl7/6NTe+4ec7Xa+dpSP3316/4zvfPnQiXTnR2fdgHmj9a39p9frnThw7denn7vjMPfvTxV717l9t/PArrdEN3xzJrnZm3veStJkag1ZDt0rFATlR50S0RTUDRbckmxiDaKlq92rLC82Z6clD+83KnLEovgLnRV2NJed+kaWsIhfuwvqmIrRtv9AqmFW0qyf71qVn7ZAk607Ppg/dHxb2JXOnlDJfz0LPW0XmvFp/zi6TNhlgx/brdXyW2W9bN3z9dVc16zZ4CoGdYwiJS6wnLPqy1INMoNPpGFQJfqTZ0sy1cpdZI2k3CxgELn7T67/03Y9X+dbRcrYzuv2Cc3YPHzjamn1ixaxfao61jQwbsFy6Wt+EfscM1Q20rWRo0EKUFCdAVhFWYgYyhpBVBgsYEVVUKQKjEJF9iD6+UrH3XmIIWnMYFRQkjeUsQ5RtEsWI8tBYuilBUAGFMvi11hYGVCIDKOpFAwOLKChy33vDgawhQEZiZmPJGIOKZVVRYiqRqvLO2qLfN9a2221HyM7kaSYiVemjIKJyEIKBqR/AANaBcUImIqwxDMpg3kWogRWMLdgP+mwKyYBvKxGfAiZqmVLsCsbNKvZmzUC+l5w1ETalRuObBi8AUCcDACWH2OWLU25VtQKGKP6Ha4wRDoGchbUBsKHBGDjiv3mtXllTLQMGJIJobhA/8ulBGgNa6adpWlZUtEg781pv/G7+hl859G/h7ImPPvG1DyS/eZwWNAwnVovKkabGCpNWIAHAmTT4fmqJ2FQ00g3txNOEg2RirNdvf++A/897lw7M0WqZS2rXNWxaei++1ysryj1g6dGgeuEiUAJJ0CXDjQRghntOASDxrrYqq2cp7aK7r032Xdncc2X26LhZAoD9YfNXix2zrvPANe99+b+ctfWc1ld/6apPfuy2N7x6J+eN2dXyxHPffVv7CdJw88pn21ov1S1JjQH3Pjh6/UOfLx/+2gVveOG+rL5Qjv3kmF89+5aHgI4ZWf/YkYnhS7vLe1rSNVVg8swefD7G2amd1/zghz968dTTAZoBq0pyX/ZOB+B6YwSUMHjPrf3zR7vtHhQFs3P3/V+85YPUPeXyYbEpl30HFRX+4ts/8sCL/ufGI48dXOFFWR66+T2TWy459PVfWypW2t3Sjk8+7zd+86HD8/e+64XvmTj2+HR543yvk4G1Sb3ybfNTep8xsg406UAB6DTOmU+bZ/z09DT2MBQGppBwBqJnkOHRYI3HEQchyhmWG2ceZ/DnYJ4zkE4cvFKViAZzCohmroP2OHi0xlgkAxhM8N5H1iI6IsB2v8tImDoU4bJoYFoCVr5w4FLriqIgIuei9o4haxBNVYUIqDYmERE2RkUFgJQJGBWQxKYOwcQUw1orqizMzAaQEeOYpqqqKIZT9CsiMg6NGHJZURXDI42i2+sZ64vOWU+tP+fJsZ9pXXh8U/fh9Qf3ZkenzVOnmqZ3TX2rH9q2PHH+oaFNhyropVi6Xr3Iw0oPUI2rhdSUZqXqMgkZhrLWs/2Oq3pJAItsAkmimisWiaALNDw83OGqWxapdcTqrdWIu0woISQPiOCsS6jGIkao7Pc7K70tW3decuV1j99/dz42NXdq8dKLJnbv3r28qv++cOjtU/f8zMW37HnMzswdP/uCzf3D5SnRb5U1AMjQdLvBDOed575Cfvi1DR7C3sdWHn+E0h+c/HSvl2OrrwcAEko8VU225WXXn5jatv2aly6cOrb9d95/fOb4jte/7j8/9I/P++WLHv7oXx7e+dapp78//MOvbJnc/GVu3PfYvhf9bF65jAX6BqzHFGulr4SCCHDZzamejTZXVvuNAArWl6ujnfbooUONI0/kvZXSWUsZuVx8lWZlRxMJASEYdhllNLmRl05wr8cuYZr39Xp6zo6qYeHYk42j+3HfM9521Fgy1paA1loAS5oqF2haAFAb3dTTYsfW4ddee12S4nKXTV0y9mkwrZH6/XsPHjs5KwKJSaKjQKOWmYjaR4cBOLP9ynsvzZZ7+Lbbm9t37GusP2/u7p//6O1uV23PFz77/W98b/NFV9zw+rfUm41lXW1k6Vy/M0rrskbFHhw1M4d27REDjPdeWIG5IvQcSERwzWdemZkJgUVCCCGEqqqiIUEkNqiFgWa5iJAq8sAcjWLHVAAQCBVADKohUopGLlHxQ1kqVHNGoxURVUJghahLxw4RPaqpKhNh04BKyD1vrWXQ3uoqWmdYrHgwWPlSWVQhMKdpSolRBWQgGKCdjTES/Q1RFeW06aKqAhAKlFaQ2RrDRSBjUSVEZyQ8PQPFGFCj9LFBC4Mmu8UBspEAwAAigXNucF5mMDOrJCAM6LwRWRYNqNRZRfQqorFjH1vNalVRwQCaNfmEOAMmIkVRQFDl2CcUEBBCUgOqA63lgdAjgPGaOlOG0KR8tezUE1wQ/8KpG57zxHfu2er/s3Xo59qPbJk6p91ut32SWicsBJa1y2pBScAjZAjch0KVN+fiRuonTurXf3jk9n35gRlZNzJac33rCmGzslzZzDj2GaadsgeU2rLXt5KE3AmC9hNHIVToXA2Lq/jEufTw5Y0nrq3t2WaOEGoHmg+Xu/+58/qH+mcftJeuwshS+cyC+7ODne11lG3D+bG50Kyt48SHDjYpO24v+0Zy8Y3zn6oYT8I4KCz44X/xb3vZ/OfnOR+95hJ39lTrGTpLZyeai5NNeryUE3Zs78z4Y+/+9o+K9tlP3ZH1azUdovJE05mFkzPa2nbk7Nfd+cS7b9y4rlOQG4LM5qdDkRcfyEmfe74H6PIkTZtZDZN271T9Jx88cdFbRYSVHRmScvM33z+1cuLErufd9O//64dbXu4Cr87eg+uuf+4v/SfP/rha5V27L/rMd+667WOv/dHN62pnj3cfWSir5d5IC8u+BwVs4lrUFECz1gtGABnQanHQlMY4BooEeVVQ0TXYLKqIRjKwrhHY1qYKICJARGfkagaerY8HMTsaHw1Ux2M8JgQ1sRk+8HXQ079yGiqICC4qh7AhQD+gGpAn0ysrRUjrGZdV1S0NITrX8R7ZDw0Ndbvdkn2E54AoV56c9awGWVXreY6IDFp6FjSARAaATMzzUQktWiYwIGiYgx+0lpCZKxU0pGiNMXmettttQBHRxKTGCBAouX4RKMkr4DQzp2Q5I8kx236odsnhy8vWpXtH5/eOnNjbmj6eHzjSOnr3ZWPNy+tXdCcveHJ43amhkc5QzuVcWi3mRZ2FuKoA1aMJnboZ7iz0OQlqaxRIuSKQBMlbYEszxWoClFlXhkCJJQ+INgK1YvYNAAhSBTEOnXMWEJS7Pbn5Ba94cs+DQTv1Wut73/vBi176kt/9/b/+t8/+/cUn7rnB/euuc1/w8IN3HX1mTpgaDXhouLUZYGPZ1szM9qsL//CPnnjlqw+84w1br7+h+bwXzX3qH2oXX0R799kJ19o0XlTbRjel5WMPXvauv53fNrV821eCzg+fu2l8fPKhb3733B1jh277r5XPfuHUX//eCx7+7rps+DGyn+xO677V2VMnEmurMu49REpxVxSt+omRbr/f7RkHnIV61Wse2986caQxPW0dSC2vu6zf7klTbVlQVYO6a9ewoWiX+9XMdNh9vviaOblCuc1sEya2qC2r0K5NTvB1E8X0qWT/Q+TIiPXNRJfb1nPVXp0vV6lhhhzAwuLMc3fteN4V51a+fejI6p0/fur5L7pyrI5p3jh6au7E8kpjdLLo9etJliRJWXWLqnLG1LIUnSXAlY5vZGlWs8J9WFmeX3c1CE8euveB/bff/Xt/t3Di5HlDk/U79u85cPfU2Zdc9ht/7rCDx7mVZ32uU8M7blDKMfSCIWOMBgYAMeJLISJSYtAIcY5Qo6LsK6RVVSGiD77ol7EEpCLgmgQxCaigY7EhOENxekSAAyMjUQQQFiUVFEsWFQgQWLjyYDFOmIFlAPSV6OZjUVBQgBAFYjONQMnYsqwUoVf0VZVB1FcOXMViLaEoh0iww4rFlJgai0RhDc0f34JEjUWAgQEhg8aTRUQNKsBAVCkHQANgGCwRpea0x4MxBggNgAhbIUR01rm1VkF8WASrhJZOB3hniAhBMCJROV4bBIEoYJIQEVmDREAY62Naoy9F41UAEFVDBIhRC2kNkDU4JqyZ7DIARG9rWGOIgnoxSgSBE5NUiMH3lxr2vevf8vq7P9K5Yf2f/fhr/zb+20tGjGVhVeMFBNg6BSQBATTYrzDFfGrSPXTCfPEns3eedL1itAa98VwgnChWGyu223B1ohAYqoVqlTv10XrPlw2TGOOM7xPlTW1faR64fP3TF8PDl2dPNl1fgJ4pz+pOXfn2n7zWb3re6Oadjy6t3nuiGE6bE4l1HARs4NAnaTaSo4tlPbHIPVfUmtb4tPrWCbGEn3xo2/2jf7LRHDtZ1e+mK7fNH/jl7uw0+OFrfuZU1W+O5zI0tC2dgL5c2E2uGavyv331zkNLB65+w7EdN7qLrjiHqN8+e3n6yetqP5wayz9/ZOM+e8Xlxd7GyFlL/VNbR4ZPR6Nep99RzZO04L7z0uvJyVPtWol5mp0Fczt++IHF9eeUY1tqoZwcLTf/7v/65l0n6kvTY4cfq8ZfkjFBbbi7dOxR2LTzvJdjmL317unbPvPHX3nlBRs2lCtLoTG5c9U1nXdOgjEYqt7pmfsZICzgtfAJAITP6jsP/DnOqHSfTdpOmy3T4MWDmDTAcAyAe1FrOoL84P/zofisV+UZHwPOQPzJ2jhaAdBYlEh/t1GHI65BJVNVBXvqG8hHm+12WzxnjGxMv9+PRbmigIGqqtAassgcRMgSGALmYJzNa7Wi34/vG0JAFTCoqtLrG0PRby0ICsTNDUQ4+CqvN4qi8KGczMc3bdq0b99+4xwa41GBuZbl/aASuG5sv+o1OU3qeZpnVconQycJdufx8XOeGX3ZyK6nh1cOjRx7aN3xOZj98Z577tm0fvP4+M7ark1zrQ377fp5F6gC55zKUm7qlGnZIdIEEwqhkaU9BbGUeUSVKoh1CbNACDVriqIUdLRG/Yp22gAArElqvQYNxKhpQvPz87vPOf+a59x8x61f37H14m4Tv/Pdr6+2q4vOP/uO5lsu95/5+R1LXyNXy4a7VY96whAA4Mft8jzxEIp9//7FrW//lYMXPP/qr9z62If+pLfjoiu//o3/vOGSG3/5D7rXPkefPDzzw6+O3LTjyIEHFu/bs/z0wcnNk3M/vqf3yKPy+EM8c3h09vjSc38DiK5+6rvH0+b7ux304Fspl51+PkxgnAKlpmAwxiIzKeXB9KndGm7aopJTM63Zo+sPHtbeCgzn3U41hFRxMC5FDpTXqomNlDdXV2dtWaYh0LFjRgqaHOXjJxKoir7HU4s0O12rpzq6sbuurps25vseC1Z6KFmnT0DWIFlI0iQ5NX98I8CNz73opu1T0F093On++9fu2nrWulpiNJQ+18eemm9OjJRtHxcMi0+SDImy1BljOtzVUodbzQL6oS/Lnd7ya5srcy/R3gN7qn2tY6vZvH/1Za+aefq+xAo/dOvcQ985uO3ckde+xK4erRKoYeISSiXzliMuSTDq/g8EGskagxBQMSgNtmxQ1aKqYrs55o+D1jMgGwQGiWgQgxznTIgIBCyxXIs+WwOER6zhAA1GQYnBG6CAKHMQFkXRaLeghkiir+z/64rnhfM8j1ygsiyjkB77oAih8gRYcRS8TIT7xhgxIc5TI8GfjIncR7NGrPIEIqIIKiqgIoKoHLwiiAZDxqA1ZMiYwRGioiSBqKqaLDhCzJzLTRq790SxtTyQ4nJk4qUDVBFxaRqEq8DKrAPFQGOQEsDEJpEMzczAagAMEpioHRQ7B4gEQko0MBQCRBrAkhERkIyyAACqyto+GdvUbMCzOgM+BGvTgiUHXi3md2265E33XPp3i4fu37b6tSN3vH7nS/cvztVTU4kx1iM3AAtERswZe87WhxqdT34rfPrJnFtJ5pMh6dbssQ14qOThQ3Zz5htQLYFr9U92/+odYxua5mc+uG/L9qnd5cPnVQ89Z/LJK+zerW4GAGb8yBG46Cv4pnW7XvS47vydvzv0p79xybeLo632+CtG0no7aQZNE5pZCP1C6hMGgY6t6rqx+qHFxa6BfCyf76+EBESTB+fwxrHVp1Ufpy33VWelajvSuOnJe1yYx8bEwt99uPrkeyt0+fCU33qWO+d8OueKdnN554OPbChx5Ft/93r58Mcv/o3vXPXzQyP19TsuXtp9VUF82dTMwZH33/ODX7y6cZK8K7vPsgMqAfKs/YVgHDocnxpu5thd9idPdg8tzvV9t7lv38bzzpaJYbz47UvuvCM7dj3n23+p2eaqWUtWV9sKLmA3Xfr8wSEze/SHt/3d5OJj2xvnLC0ttje+qj9xXUlSq5ZLqAVuU9aCXoxtZzSBIx9IMLZOeMAQWnOlPA1dXks3ow3zafEWWAvP8TmhiVylQcxbq4MHPZ8zI+8aOyiOnHFN8HIt/K9V3ACigmtGTKKMCESEzpExxCaiO1MkUoFUyCCS8WVSiUdAD1JxAAAkYtaqLKxzzIwhYrMxSZwxSGQUFVAMaGCvghHtiJRYZ4kIgL0PRBQn1LHHToguTaLaQeKypaWlXq9nDKZpao3ngKmrsw+pAZOSL3wjHephVRa9ggtr0wTqINg33aJZ+FV79tHaedl5zxvbNT22si/sPfjU4b164sDoAd29buyidefOT5x3ZHLdfmotlMOlTEsvXZeEQqkvvbzgShzWA3FPqizLQBgFTGJDCEHFORcYB/xpBEQMPoAqkAApCiK6ei0fHslkbuHQsaMvf+Ub9j2xx4deVfmRsckHH7hr35OPvOGNb9478dIX9L65YTI5tbSMmPYAG0AA8LGcr+4O/SrD0S9+prr0ohd/7Zt7P/Z3vf/9vvP+9H2dE6v+4FPrrrz+rvseGDs20zo1PXPkmZ2X7JqYvPrs85+/98Pvzu6+i/c90wIjm7bJ0OSBHVeMTT/2UKf3KerNSdWqtVSTYVdfJOMUjFVNVSsmRRSogDLEbqg2jg2v3vvw2InpodnptNOr6rXEJ9BftnljVTmTqp6Mtsc3mp07NGm6B1dMuch1gMXFbKFDo5sXQ6BuCe0e3HufP3IIztpsgqm3mmZ4tD8y1OI+IEtRQZpZY8zU5Fg1S+du3rIKkAnsO3J4fNPI97+5B126a9dmkLbY5PYfP7TcTdt+VTykxhiVsoI0qwH4wFWoyrSZJnne6a6mRuc2H/rKZX/fSbvjX/+txV0fmfm572+8f/fNT33ihi3Xnpo/VdXLo3d8vfrMJw595F3rzv2P+rrRqkRO+zUYUiuJTaKBgaoqSFgb3Hph0cgwGhjOnab8R9CvSHS1p35ZYQT4gGENAJEjSoKR1wBrhWJsO/hYuhlEAjRk3GA4ZCLFUYLEFRuElYVVlDAxCVUgA/rQs/m1AURjFMQgJGlKhEmShMqTghjs9/vMUdabAquIuMyBaCKAqLGdFZMPp0gEEn0VGREZGKsQVERILRkI7AiNoCVCi+CIiCzGKGwirymoqGqaJISYpS53afyXWMhqBFIFjsiU4D0RhRDUETElURBbBm721tq4ScUzJSIgIEMaU5A1BjBGz64IEB1UtgBwBu1kTexXKQp9S+SXkIJF9gReuGGpKMuciK21Vf1JmP7183/u2/f+wfEXj/3NzPdfuHJ9K615rhJINEjArjFOgQx4LWF0nL54Z+dT+2rj65JqAUbC0Xdmn7oyfQQAVKXbGPrX7qu+0r2hU/afOzF/E+xpHLn3yLV3T5X7UvIB0/s7O77de84LXvKSl3+KLrj80r9+49nfuv3g8MaNG0QkHH3LTRv/6weHni7bnZ5urLlH+gsma+ZOLLQTCGTo6JHOyy7eefL2k0UBFuRUqCUGlkd2pQRXjbc/AYGCG+m154bGdk8f+7nZb50iJFpcl9RC4bMA3fZBO/MEH3q886m/MmBHCaxzIRQWzOaZx5eWZssDpR0173je8ALSg62NZ121Sa+8c9/Jx8YXHp5/4gfxUienntw4VlNfLoujBNaZetHTvFWbHB87d9Nwd/lwr1c0Nm3sjJsqmWxmO+9+ei/UL7j83s/cM/XKtB3KWpUF6kg4K0jZLL45v3HumcfyofqKOCtnh9Zm3b09kdqilC5UmU2hrCJVd9AGiZqjODB5HAxcT09/RWVt/eoa8IJ04BiNoISRCT8Q/B8gPWKzea0yjof9f8S/AGCtnTKgIwvgadB1/FqiKEYHiHg8ATJoiAZ4BlEUiCmsxERdveaQWky86612G9YVNdOpqpzy0G6bxBVFwSKJy1iDqqIqWQMQ+2WhVqsJqNcwMtzqFX3PzAykxKwAogaT1CorALEEEAWEitlAFJIja61IKPvdTqezYcOmuYWlPKUG2F5VqKEGJrWhoYOdY3XAPmsGzqKiCYFKJWX2ECiYsjtk+kG5Zy5enrhk+HW9evd4euj+1T1Hbp85avefGK8dOv8ie+HElvbQxceN3TNUO+g9FdU4ElMzyZb9igUqXFqVZd1aleABKXGCa8M/FVKQwMKsLAogQKrqvW/VmyIyv7hkLYQQDOWvesNb/+Gv/nx8ZKSqXFpLO0X/H/7xY/8xTHt/mf/qBeZnP19ADZU1c0MAoAsnH27ki3nv/Olj9739rTPbzoGD+9YhHXt4r7p/GC/9of3TrbExW8u6rTy5/kW9s69bfXL/obnHzcFnSgCfJTNBmssnUg4Hzn9p777/+HOzYiVx2XBwhVkoTvU6o8Nj3aItBgIXFiyWlYIImHZYaSIs3frDbdMnsuUZI5UY7fa6tSqVRtIte5lJyryZ7DrH7Nhla1Ne+i2AVgg9Z7iz4men0x27sNBwaI+t+rT/mVq/UwwlhXYJbSCfVdyXChKDqQvsbVFW08dPvuKqV+48f+vnlmHKPw5m/ef+44GZ4/0Ld23cNDpUy3XPk0ePHFtNsqToFIrYEUaQVqu1tLxqDGapWT81kVqzuLhYr9f9WOdzV36woG6YuYC4vnrut5g7xy/Zc2Dz7aMPrLNEYWH18tf95vRVV+3/wO8e+eI/7Pq9Dy50jhutY5qg7QnYoOLQAICynpaWjDKTkWpijEFQiH4MA54SR9suFCVAjfgqVlKCqG4DQhWjZzAUx8MsMFiUcWWLouiaCzEFFVBBFQMoCB6EB3k8GiSDZBwhrnnQIhpAY9AYg5YcGZe6qqqyNFXVtNks+2UhQQIzsQERgcAsIqFSsDbWmE7XHP1igeoGmlzMbEOg4FGlUrWKBoiNDGplMqmxzrg4vTLGWENEhIYcgKo6dkhonUtSZ2BACiIiwOCcgwTQkK5tPcZaYBZjwKK3rGsPl1gFQCIRiD26eCFUFa1FY093tk9HXSKKDoox9mrcojS6QTwrxgEDKBYaIQfkkVXEoCGW4AzYjLBjh5r/PXnJ7z717RNnD3340c+/7+LfPDA/k1tWtmC8QqKKooVzjXan+38ecWkzXTy10vDLf976s0maq2m3jn1BGNLVPxv68H9r/N9JMzdpl+F+ONiZGr7gxvffd8OPFi/+xz9++a/978f2zbXu/PVLqvJLN184/rmvPPm+Dx869+KFj//Zeaj9nzy9es4Gc9+PGa5N0tQmI80FXc7QVgKlVxWxZBX56ErZIDhrUzp0yiyyLLgNv3ieHDx8atmPTXZ9Z2SoWOr/8X0f9X7W9OsNJysEhq0m2HVa13Hb17Haxn7V1Q1jYaldL3W57jfrTKaWGtUzvf4ffaF9/rGvzakPF9xCGy5qmRU557V8wS/GK+qK5bkjPxybvad16IkJ6i73etbalX5vYX5JzAjWi42bxqAOxXz38ZGdd3z7q82X/cZZD32j1Tu50JoIqfGVggaieh/D8aML15+9+5wX3PTkrd/ae2h1/JId2dbd7Kt+Q5ANct8rhSRjCboWZTXSkAZ/lzgHhbXyN6h4DhhUVQXWZGIp5niD1xiIuEjUnxLzBsGoVx55bj8Ve/EMVNfpsKwIqkhr0vG6VqCvwaIHtTLpwAoiokFiepqgJSIDmAbTqQK7JB01UJay3B6zLiRZu90OZZXneVEUELiR18qyH/rekk1qeZo6R2iRxKAIn14OMT8VERYJoFAookmMYVYDHLXgBdEqKWFVBUR1LlXVubk5NK4soNlM+90Vk6b9Slf6p8CZsiqyzFBIvGBSgbEi6iX4BBskecGL9dz1fHE0qYru8tRifWuy4YoN5x9rrBw3s4cWnzx069MH9CdHt47dv2lq5NUbt62On33IbD1g/GpNEklNVVKTADM0odNP6mmaZ6u9borOKZYqABDFkU5TPSE6xxmupGMlVaSAZaOenppd2H3etc+5+cV33f7t+tBQ8EqErebwStV7923Fp1/ee8HZ2Q+Oi+EcbSKh3HnhOQf2Hvqz5tQvm+oG9uUzezpJM5x/fvcb/0Ff+/Joku/7peenWcNi5cemtvzhB+a+++Wx9qnsm/9Vp0ZwsK4o0A5NM/zLpit8a+rI09+q+yQk3iFQD6FG3/nm13/51/974QgN2GBIKKAhhLSCMVE6dXJi+nB9ccYhB1WPkhCXY7Wq1xWSen3EX3RlcdZZtbM3FZ7w5AyyDxULpokUdOpwmD3qlueKpx+vJXlAUGtxbGJ45yXiceXE3ej7YCAxtbJY9Q7s+onhP3r+G25q3zl//JOfa/zbdeUX68ceALr4a8Mv2Do5kpTVwVNLjz19TLRGXCkHTyZxJrVJUZat1vDIyFCSJ57sfOFxaH0H3cPZfvvEOzNxtcWry8bh9viDgFjrbbzH3Xpe9oKp9kYYrp08/MTWi2/kN/7Rwb96y9DrfiGdXK/cKbAvVc1gZZDAWFCNOog++H5V+qIUBGEVEbDR0ixal2hiXafTievUh5BaV1VVz6hBIDIgioKBtEysdwYohmmtOMRhcFSXjqir02m1DKTwIh4KRFBRkciCQWFUQWOIMBqnEJIz1hi01gpxq9W4+sqr7rnv3n5VJkmiQet5jsGnxvgqlP2KFUwIYCgy7eLugNaQQTTGGWvQMokzRp0JlY94aEFiZCVS1YwsKVo0hGScMZkzSogYcciDOpgIER1miGTTxGWpO2PbctbhQPgPwBIABBVktkhRb8SIgahD5L1VEktJMuA6M7OImCiAlaaGDBmLhuJ+GsuWM3a6iL2JEBQVBaWBeAKeNvpFrQATTFCKyigba5SysmRTJExHq4WXXnTjl35478O76avu4Z87eN+6DZcsFidcQg5GNDrVSD0bTh/bc2LRQxPaBswb8m9NmblxXBqlhQyquG2zUh17/zR/S3f02le+7oa/+7q+43nn3Xhh772/cOsj8+U129Y9+v2n7nx88vkX7Np7/8m3v2nL5+48ef756QXnNM6bsvfsXdq9e5JuX9xztJORK4Oy16ikbQQQYa7DU+NpURt/y0fveukFU7NLq585MiP5xL13Pf7lH8yNjQz53KwuJn9z91+e17/zGcqG8nY3pFnotIiWyzBMo1j2u1Wn5TKQFZ3YLZ64d8zQyNbFlZovAqYbatxvTj6+9VdSn8rKCbt8am7uxOYvvf2aK54Tb+vKpud0t954fNer4Dlcm9vbOnzH8OE7cLbX41LL2ZHxsdUkHD701Parf+GCF55zX/1o+1irlX6oNzV6sLUBAtQw7SgPZ3gM9bwR+75fGHrfP71gk7u1ZjurQxeMLR3tiimTjR1dVJMY0TSUsuZLFt1zZDD14WhGeLpJrAAswiIUi6do2QmDL4KqEiiRRQWwA0slXUt5Y/OZ19bamhTHT8VgAMCf/jsiRBIgD8Rc448AEGx0fwAQFRWOoEwYlNsap1FKpAnWWs1QVraq8tT4sWRhdbnqVcPDw/1+n1kTNAAS+mXuHDZqhYa17hoiorUWlEIlKkjGiAiYQWEuyiKEEryoeA4cGAENNRqN7nInEotVwUZaARgvzAInV9tAOYEpoQRDTkDQYWAHRCgVBbDOgsUglUDNFTZrza4urUtGFotq0mGn1m2bamm+K6VuhJFzWs9fHu/M1hYOrj5z+PZnDtYPnEr84xefC9sbZ8PYrsezzSfHh9qgCZYJ9IeSVa5qfXYBRHyPCJ3xoYLAEfXOwoBgnS36Ia1b0VLJgKTWOpYydfXO6tKNL3jBvQ/+KERPuyoIAkP65X30zmP9v31huOTzTXJBbE19sX5kasmemK8W/kayCvWStMW+75++dx00EsACdGtNiqJP0AtLtPKuNw0heK6G0PUTa2nqySZ/E6qHy/7ojmvXl13dd48Yh6Ysur16uq7S5dmTh0PVV2Blk4srGTRJuOqaftmcXxx55lA2e6SsGYaUnAsJ5T3vegVWyBdf44e3+q1T2Gp11NVHR0K7gsSKE86TpO31qUP99MEw84SpVitZNWlOHZWEqlPzxbETdPARj33n8tAPBtwwql1dWg63/qZrzGPzQgDg3lwKvZcNP7h9HKZ33bIidGym161tTBojy2nianVOa2yyDrpgkmmyAcwgH10jIsrCVL66EGrzvn7y8PPeUuuM9YeW2nZ52E8dm3q8vljPi2aa2/lnDmy78YXHv/zq2kpncaiol0HKxJiOIKq1VdTWACWiXllUVVVUpa+Cc2kUlICYMIKqsO8FjV6ZZLywklFLaVAg5LLMXCKgKtJQJBErir5yxBbIe48ARhkVPCtbtYTe+7Hx0aIoyspH0isTSvAmoHG2QmENOVgR8WWZJAkRKSoDG5eqIcu4utK954GHiiCJyxHQkLrUoe+L2MqUzhoRKYqBlI+qOmsZgVWcTa011hgitNZaJPYysImP3iMAIQRjLRMY54JqkiSUJJUPzlhDBokSlxg7EO801trKIKIlcqd5F4pERDhQgI5wUBFBEauGGKwxABCYBUFETJ4iYgpgkKy1iuCFRNUCOuMCikECQ6xArNEuhqOGqgIpKaAQCuoaT1QhiBIimMjTVCJFMCqipQVEtQSsGioXv1eu1gbXxN/b/vrX/eSv+8/d8dHbvvlPGy/oiCOxIaksqkFXaNFCONZVI81mj1bLzo31u5zKCC1bZUbbllqG/WVuLcr4nlO7L7r+lpO+3hhb/tO/euD33rrrw3947qe/c/x/v+s5u7bbq8+fnLD0lj/fe9516z/zl8//0cPH3vXXD/XXbfz0nYsmrfKtzQf3lZbaSaE6sq4s55voSrAIcGCm+KN/mnFD+WOH4d4npmvD9d3P3bB48Phnn+xMtlpO0+nZxQ/e9/4Xd+++3ydjlqu+E1eOqF3OhhPyPi1N0MR5g2rzIXnkMYumk6T9AOOwvH3pxANnnZv9/6h673Dbzqref4zxlllW2f3s02t6rySEQAISitQLShexIcpV9F71ioIiiuXHFbxeC4iKIIogqLQAgdDSICG9nySnn93barO8ZYzfH3Pvo3c968lzsk/WPuvsrDnfUb7fz7fw5IetEcZyTuUZVJUe33Ni27sW7/zk1uHz6DWTf9hpzbTPeUFx6IVLl//MwjW/bIuV8ZPfmzl9tzn5aDF3JNs5e8//PPzk2Ccm7/oLkz/w6M/e/btvTfu3nGw9eohEWYbleoineu/8jXNPPLn27eWrf/KNf+6e+ZW9xZHkBK1fe9nIj5Qf9I3NKLcEkaMIsASGZrbMAiABILL8l9YTRDRgZHHiGBgBgwvoUWvdKLbQauCgABQAQFSASpMCEoRNbBo3KGnQSCLiG5qMIDQb3zPDZdriUcetzEvCJjClkXHxFqRWgiCSaioFAs8RCBQQCkoEq22M0WPANGEE9iHTahrGeqZwLhDaEELtpKgLo1TazdfW1pIkURSsTogIlEaBTNsYHZIWjkluNwZ9BhMZUMNqvdGSjonGpMo6XwUfQYsLaAl8QERrbfMmWUSCRPaglFKqqiqlFAcmoiQxSlkfAodgdcoRahG01pWjhNrOVRRxsegZY/qoMCIh25iUhlexXlsvuqtqH03tbu981vQNp7Pjy6b//fd9avbGy6996zV3ZSfv0Yvb5n1yOlx9esf2xbxsU0SoDZIFqh2wUxgi0oiRItfIKFCV9RR5dKhRmYSilhC0wpRVrIa9zuyObTMHy6VFIVciA7EiTVH+x7fSO36i9z8u2PjgDxIyifjyvgce4YyMbyuRz4K+p9PuFrTdtK12iqOGpGa1QY70mHYxT8dUkpZB962at3Ck6q1VpacIL4tjL7lpqL/bf19f3ZzInTjWnarqngYU7z/1sY/85M//cn+jHECFaUxcp7U6bC09YZ58Il8euB2zkqdx7nSagPGpA8Pbujysefu+5Irn5MP1WPTTOAmdMTVRBDVReaIwAqNMsR5++L2sNqQsR3ahhhzViYUIt9qnDtu547E9xlXpTZpDSQPSEEbb4clQkjaLAPCn0x+tt+U9mnKYwioAAEzvgWlwwhSc8zXGKtZFigzVSNUl+UpHN1hbRpGxVjZcXfrhe35XjcV6doOc9u1R1+0rREAJMPfL1Sx0EyWFuDFKRemxH3/2iVPHWztn512vFUdWa2SDiJtGWEQXgwveBd/4EBp5glLUNGHGmKJ2zeCLttB0zMzCWhsgyfPUF1VTobvIQMSaHLAXRkOoLQmwR/GiBTVS4CiE8ysrBglYcpvUjfapYchHFmRmcEq4LjKbhBCMMUTqzJy8gVcWRZFlGTNbpVGjNQYS9C6C0qrBZGpd1zUqSpVmESRMjLXGaK2tNgCgrCEBgoiIpCIAaK2bG1EE0dqIiCZlUXHtldFN4a0QWGKTztu0qs2xrdUmUBNgkwoEwo14qnFVNkIwZraJaX62FGPzxabWQUMKUJMSBGDVjO+UUkYbEFRb/o1N0ZuiBovfdDmNkDUiNrP62ETT0ebQshlfbo7ftxom3ho0Rl+nne4zYeXig5f/1NfO/7tqdOvu9c8/8M2XXPGjC6O1LLiaTApMSjlFUPsCYglBox6jgaWgUABNYDhWz56XnrQYWLglp2wOH/nHR79yfwIdPvKhIzc8b+w7D6z/6ofu23+g9RcfeFgZ1zlr5n/976e6SbkSKaiJtG3ShvhEZQtVqVLKYGywUInptzZC7EfhpLvo2lgWMctie6qb77Sj+uFe77HxyWoD61Dk/+vpj16YPHKzRkvlsvdpdxxUfqzeyDtFrJxC0ZkNENDEKvpElGSdqQvPefSBe88BOrjxtftV2+mlgF1tSumowdxiruJUR1/24glJfvdM+3f//R9TxcZY2T/rgXebu2uavWQ4fsnS7stPHHobcMg3DvuddyydukUKwrnZuYt+J9hoKlh+2SfzWtZO7rO+/KkX7+KRcmNLz5woU73wxe0XLWd//qdrH7jj0hcfk55aP6YYnOI8SY7rrFedBJAYJHpGxOhjg2CLgUERAsZN8sampkFAvI/sAzMr/E/MHDsmItDQ4EsjIqCQwkjIIIJARkmIDa6OFCo4sxNGQdjcI291ubK50Pl/OuMz82nYTDdEAGBNwEIstokfBQ4goImYgUgprQAAxRFAZCDM0ywh70i7GEwS0szWdd1b37AmFQZQjWSSEDFGBggFRmMNRfDeG21DxcToh/X2drtGPHLq6bWNlUsuvDg37aoYFfWamKx5e+w3OYjNzUcBQuTKe6VUM6NCFseFTVNu/A9bdgYistY65yLHxFhmJiRgIaVQYKiCDtzxiEoXKvalkv7I9mQvjB3Ipy786d9LutY8oH/MXmi76tHkxORlk0evLu8d9s97HHcft3lle7BOZmIjxVYdKwnjwGRVFRL2wBAr5JQShTpwjORIKc0oNVA7XVtYGvQ3bGpLV5nESmTxEQjvPlb+7cPd99ww+scHS2qNsStFokb2wNJKV5kXR32JHgIkxkby7CvQScwozVM/Kom9H1bQDpntoFBwTneQ/8zhoVb+4HWndv6aXBrjCwv1JVP+6YiQYowhxPvuufPQxTuec+1PnFg91WWS3nz3+DPJk0cm6n61fVvx/BeM1cat3dwbLhmoQouTi1/OfoMzQxMp0TiUQxgWblSSNdTNM5sFJzrr1qGyCXkpYmWAXJuyUQy0NO8WjgGwb00WVE5SN8ZBMHboV3U3idpn08mgiycO+YcPhcd2hWfa3KuL4ZG11peeObiyPqfK9cqva89Gp5ntKKPTblenSYyilc3GxjAxkz5ZO3VkaenE2PI2Hu+72I/Gg0Cvs4CAuk5Y4o5wdtUqU9OasdujL1qZOXTgBY9+/g/x3AviGvtWNTIYEbXWVmkOEQDqug4hRA42SRgEmOu6BtgkMRVFpZE8Sgxh84BstjsgjcRqY2PDohJSItKclAoVMKAgRJYm96C5Eq0eloVSylprtOYYlTIBUBwHBVEkEIAEjrw1ruUG+lgHjzEYYzhE7z1rY0AwojFGEwqw1oY0Eesk0dYY731duUZotknACEEjpUmqSCkiYwwAIGyeV80kQIVNqbHW2hrduIBM81MS2Yw2YuKtu5LaEkU3g0HY8mAg4paBiBp01xmwSdMHg6BSRCI68pm5HTOLIhFRpESkCQEkAUVEumnNm0gqINm0miit8Ez+Y+PvbDQ5/8+sEBq3MTczf4DwXwALm1slY4tQKVSLw/Lnn/vzN3/mV5d/Yvt3ZPFFrm5RUpPT0USshQVBR0EdhgCtEmFFpmdpSEgYa4t4IFkSkZJtYLXQ3v83H7wzmTk0cbaqoVgchU/evDgxvfvOY9X3Dg+TMV2FyYlQ7r+sM7/RyUdVJyEZRenG0kMmqajSqLFelvJKMaV7L9q94/DG+hOkyvQvRwg0ph0wII5UO6xWPOGiIIHeNzz5pecOP4ukG/cLIFMfBAhRuN94VUXqZvXZzOcFCoBF3C6kiPlfpH3HYNJq9qWAr+PUbp0a6lX8PW38f2Gkp7tPTie6H3/4YPRBGa+eiuYLskayqiBpeZOFhRwWEsE4gDdXjz4ECCAFxNPV+J+x22YyvNNlwwD//oO6nStzrutF+PpEcjfomeE3yke+BcLYrExxS3pHaNmwi0KbcZ8hcB28EoWKmjgQQgBmEfEhRudjbHJGuBE9KQ6bxHXnWelGbM9BPACikkYzpREASGGjBqHNLndLF71lZ6It5zGc8RyLiIhiiFv/0iw/ZAuwRZvjZ5FNfAcCYmh0C4iwKUJEEQFFpvKBlDXRBx5VpUKy2uRpUnoOIViTIGIzkGuM9WM6HZYOkWL0EoRAsaZawyCoOg4++w9/VfbW5p9948te9nqbtAoofO2s0gK8OaVHbN6SMfbQoUPf//73W92O2pyvh1Qlm7PurRQyFCEAoxQzIrDVGGMkQhDSSgMyxai0FoPCbIUsIyvyMbLKhvWo87SMfLE+aWa6xcJILjPbXu6u/7nio199+ptLY1fEN07sXWidf3hyZk6m19yIvDZpqWLbBgAkVlbA2JxUSqhEWKACBUIcVGwn+ejkBoeIbWNNpkltLK8a1ENXTk5O/vVj3VceevqvXk5/P4t1a/Spt6w9sw5/9UD7tqOFRsqNiWgEtABh9FprHyOEYLW2ANGFTpZiZXwZAlEna6+/exHPx1b9PBLbv/RrYFkWMb7S8+OSf7VtjFlZWekm6b9/8gvPvuRF09mkPvzw1MIJPX+4HUMvyfXV13aueGF9/ERIVC4TvO8coIDnnOdoKIefjqz9rl2+Wsb+IKwu6Lzlc1O4um1TRgoh6oFnCqln1lSU0Uxud6O1sVCVyGy625yt2Gd5p+6dWrn6Eq0wEsaU4pgsfXLtuX2zr+OPsqueGozlxb7/WIcd3Ux1u6BmxrMZkFYNG2meDV1FRBr0eGecrF3a2Di1dDKG4aGzD9bHzUOHPpf3psrxtSg+ap/2u0mR29XOiU+fuPpV1+KKf/yhf/7oF770tnf99+dc8CKz7kerKzw2uzZcyUZkNDGRb4wxACySJEmat3uDvtaWIWpSSZI456qqUsp4jpV3pJUxBjUMh0MiJqLRaICK0jRVTJ4jkvIx+BgaIpZVWisdY3MjiCJQe99WGhEkxLLyaZoGCc5HRmbPqtFqRgYCJqyBQWB9fX3TfrP1UEqVsarrOkkSAmlokYgCwFanzBwjK0ZLSkTAWBFhRQZNY4tq8O5NugMCISBgg+WRZo9FRMjcrMQIkAR8jKRUjDHyZvHbHOpNFUxGMwiAxMgcRalGogrM3Lg4mPnMcbi5bOOtX28pWRFRKdXcj4Qac3XzdjfvYJt2wCgAoDQ1eRQgIMLCglsnLzfDQpImzab54hkb11ax3wSwI+Pml9QoQk5GkpHbaKed39r+E799/JbvTj39/aV7nzV+lQ8qAqYMtQ8qMhiFyhikUSmHdz7/PPfxFZiexTmUkKt65PUGjxVR30tX5DszKgKS5IPUWl0mnlRMxvJuK8/celFEMPXjJ1uUVBmopYqiH8BijX6ElE5NtqbnHr98/vtxe+clv/Mb3eHIwc61cv/ScPD4I6ce+cHxY2ddMr3vQJ4nzzzxAOHYpdXiT9//11ODwWndzr11apR4W6Q61URWiwsqsa6qvKtyYoSUXRRNxhjHTpAhoGp3x6h+hsY/cv7PLO49h1dXX3H52Nn7k//7uZGPKwdO3bW3vu3Mh/BKdf65zzwRVVHZ1HhKmdZ0oIkEyhIjH3l9+wuvPoGrPwtx+8Jlfzi5KGGc+i0F6zNY2dZfvxeFpyH/+evOYq7Gu2P/+B+3fX2tvX8Kj83Rcy+nmw4tzZVJGgVQOxl2bGZ1q511J2jaOUdE3nsW8b6uqkppalSEAJtS5xijr+rm9D3jHhQEoqhDaEYogTZdeQCgWGtHRCQITWaRUopjRIGozrh3+IwouvkUbarzYVMRLZu1OZ3ZG29es4KIyHUA0+xemGFrbBOFtcLI0Ai1AKmZVRNpJIcYY9RGyJBzLsbIbLjymw5ABBHxMRAgM/SjkyBGwGpTKOdjJODpTiuy/uq//9vUWPecKy+tIRnVLoemNqMzNeiZS4MRhmXxwMMPpa08z/Ner5doQ4TOORTYjOuJEZv8qOblRhkyIVZEwABKKxFRpJufCRGNylIpBczaWgXgKaYmLTgmrY7V1i7GTi+Wuv/Ria9vS9Vb7x4/cfnSHXc/5r9PD79l384Ld+4YTBw8mnTmgUZcsCqkBuCcxlOWSHU0ViNlPjVMrKJLIiXmofvuQ+fStDPoVaDYpgkF1loPBoPDxeiDd9kP31T10se/TNceGo+HJuCmA72P3W9/7ZsaiJQ1AbhypUWNSkVmQh29l8iUZNDZxumEysaTsVnaOb790Jg+Od0unlu3jtTqKUgBdgs8g/gmrr9Qg0Cr1cq2bS9XBx/9xEd/81WvlyeeGNs4olhKHyUbp+2z6bAoTz1jjIG9+8pnX58c38h4oMYmqqwDvaKVj5fKEES9ui5zSy1mZxRpqcLQaIxViCSSmEb/q+IaYV2AQZ2KsAOOmsz6RjB28affpNO8M5tMa0/g17wvfXVKdF2LtXmn5/ekO3bvGMtPzp9ImJf6yzGsjOdjo7o0iUmSRBlT+0Airj8qwnCq3d6xfe/K99s7d1+5eP4DemVHObFWZBu2122ryfAb6u4HPoP9UQvbPzx2x/j2ydXDp8dfYMJ51xRSxfV5IYnBqLJUuBkCKBAFIMaYiRhjmkMGWJyrAAAVRWFiBoBGrqWQmrq4WRIzc5a1yv4QCZljlmWEWmcJK4xEmlSDceIAEsUISGQxehSdUWZUlVZpAqUwSIyR0CtkZM1oQUnkoNFz1KKam0Jd1w3lgrQSiXVdQ6vNzEQUglhrYwDYFFMxIjZ3KxF2IRilowh4T0TC6MBrLYgRAWIMUdjF0LxKa40CrnbaGOdcBCSiyjtmRuEGYKKUan4RQTTH6IOweO+dc2eUWUQkSjWzvv/q6GjwWM3gWoiaqp8jIyLQfyYck2xqahBRNsv0M9EI3Pw3wTkEIdkMaRdscJjUuEo2q4S4qVAFQVFb6YFNO4LQlB25SkK/jIGhZZd78zdccMNPLA8/NP7tv89/cMFwbxRL5DxxFaOUJRJxJB0V+NHJfa87+uS3LmydHrqiHdfm48xanQDwXzzzyo2el8kOGt5YrDgAj3qzmBc4RPFLJkdKIYxijiauiAjG0Z6W2jWR799H1529/dkXXfrKt73wpvXV15968uETBk69ak7vYQ4x7p/O07e/+nn3PPWOp25+5OH3/pt54L6Dx8Kr+l85/+hnN6qNSsYuDsoD1ASdmXHXrygfdwO2GGMVq6G10SbipOmIx7I2zajFjWjrQEILZTY1cygcu/bpP/733c99cPs5zz2488u/ff9bRgvP6z1yOSw9IFNn7tdDXtrLtg6mjBFzndRhh/MwNyjBmNQeO/FM27+wNf/7a2f/LW5b6c9Ke13BJGmkWFA37E+ovP1bKy8/d+zAORd2ZWF5fccOfU65tDpr47fual+rd+6fLft+YL3oyb1JjEaU5aQsCyAkotpVIuxc7agEAGttkxcEvOloCJG99z5yCKG5ipsKz7tojFGAjFG2LMI6Gmr0jlo1L282TQQY1Oahvrn53Tqz4lby95bxTUSaEG7V1LKbuxIGQNFai48MzLxJ/GhIqyQQHQCLOuNqAmxsF6AoyZImQElZY9OkLqu6rlsZ1QQxxi2LI0ThWNcmSB2hRFAomjCzcHpp7tHDD83a7lte/+NXXnfd2OSY26i/devt/aqXWCVBSbNKF0EAYfYcBaj5u9feYTFKshQFog+kgL3fKkSCUspq3YSpuOC12pSwMQOD+BAyokCAnpNIBjUY42KwpFNUXBajXCELSmWihHbCWgPHbYt6lg7JSy7fToNLRqcXtj11W7U4//ETfDC77dUzM7575Yk9O47bseUWMGr0IRMQNswabdNB+cASATQeOXIksSZUZTUcBa210c4VzNzpdAjk7ZcNAsNL6n+9LXt1boAFaoY3XdV6Mrnwi8cmOelgaxLTMduZUe0p1Zoy7WnVnlatSZV2/2u9wrEOK8shW/Gt+aPPfws8BTCFMCXQEt7Nqos0pCzLCl/qJDxx34MnO53rARa1SqJNOAizcnHDrYqh9PLnVnv25Lt2xRJ7vkqjDRMd6K+o3GBm635ljx7zTz2Oiws6Qmwpq8hXFVjdbXV7YSiVtAh8MVTKBLKZTsNwXRDJurhQLLz7Db1zd2kxE8JuhBNdWUsxkBqw4FrsAsgDcv322T1U+P27r8iVLTQzB4bIIRokrj0iMnJvYTmsb7S77aw9Oeq7WTO5/Wtveurp/cvPfmguPgb71s/fuLz/22riZPWWn/zvH//Wzfcf++ZLr7vhZ97wC6XNC4Eds1Nz8wt21+7hcJiQVAYUAoeoIiukLMus0ojK1VWa5s2qlQiYOdQcgxB71AoJkyTxjQM4slIqSW1Z+eGgkMDKKoFIBIiYGNscCRwisKDQZs6BSJqmVVUpgBD8+Ph4Xdc+hkbKiVEUI4koQhHxKMGHNE2D88zQaiVN1+i9r+u6+RRorQPEEEKapszspU6SBFia+07giATee0Il1KT2NscYxBCaQVgMIcYAhEHYOcc+aNokRzKzxv+EWFWuBhYjDACGVJN9RCFUIiUXLFzXdYWVnNFIa7LRNAfhZn7RVuuJsHXKatWU3s0960yXDLDJWGhsYRogCjfn+uYF0GziAZFAIzWWwUYU1miGYRMGQhQ39azNcI+2iCjNH9TcLp2MQFvgyOtiU//02pHXpM+5b+n4d7bPf+f4Q9e7S0eJY+8KhRvFMBVyAYPy7QQPwODv8994Od581fAz3VzEtB/p7fv84Mfu8OOOoxkuQemvnah2HTh4wVnpNQdngoYk4EkuF+cJIVeaCcLUuNo5NZvlNJGWZMc2wlkF8Buf+5z9//Glx3C2N1gxX/zY2Fs/yOsbHoC9f3qJ0p/7tY3tR8bXTvzix66f0KoXcYHJQJon4nU91KjTsWJ1w0xOB+c72tZeqIopO00+kInBQ113iiz219lICFWetoqpdjVY39gztX/Xrp9cvFV631m5p3d9knbasqrsYyHtdv9zBL17DlctazRGh3akVQWt7lQEJ+wghtnVq1uHP11P3DJ/5S9RlLoFGzNRXApV0p7bd/NvXTXsUXcyFgDzTxZv+di9vbEdk/kKBuwX1FOjv37S/K/uoB5FpYn6PbFZBU6QiXRzlDrnmMUHH5o5cwwhhDMMDRHxkUdVGXxERFSEW4BI4QgAsQGvb1GxlA9ExBJRq2Z0rIgaIUhzISihM7vf5jPqYzwD3ICtpQcAE+mtbTQ17wcbHI0iqnzTLYuIRxBCbArYBuu29YjMQhhisEorpZgZiTQhG6MAS1dDklRVdWab0zz6IAYNxgAoTqJG+tw//MPiySMf+9y/Xn/Fc9cHw/5q/9jjT69uLNmxdhCPm0ChAAAk0GiwNotvRIvW1QEAFJIxpiiKJEmQt2xdELnxJggASoyMsQGPNT8QFogGSRQ7jkgkkTWgc85HDkmqXEy1QVBQO5/6tLJMrSIZaKfK+RO7h2nZ6e5r33De467efezb20/Ndaul/sqDZ62m59l9C+y+J2WLd/bbghRBeQqsPRCCJIntFv1B4cs0NUV/mBrrYogugFYWSUR+7BK7bxwjS5t6e+NTb5h5aJ1mejQdUcNu2Hnmf0GxHoarYbQqxZpffLJ4+o5Yrte9hfP3zT7zyN3rC8dCuVFfuwZ/4GGbwBRACjC79WJBRJQgMcbV1VWtNdikM9P9yN13XXjR2ZlPq13bsLeQVREpTffu90keRalWmrqwlmqtU8q72JnQIx9Gw1CV/MRh8/hDYfWUjSQmKZxocEogQqiLUS4oSucqGamsqEYpu1jWip2Yjj25fPLV1x9/zfMOBKMD2LXxq+eeuSVLsRa77vNKMma8df2cZ4qMYUFjIjWNBD2KzhTGqKx1Htj5SACWRuCnds7YzowisEmirO6P+ufOvbD9FxckJ0899e8fOv7kQvVA/MnX/tyf/cvf9Fr8W7/8BxcdOH9ubRnyctt02z34JTOxpzh0MCtEEqdqba0GTVEYibywiy66sp1mzjlg0VrH6JlZGSPIUEdjksrV/X7fKD0zORVj3NjYqFGcC1lqjDEIYpVeX10LvpYQ2QdkoYYSywKRQQAUueCt0o1HphoNPQAQ6dCY/NETCERkwciEZMzm0dLE7jZztigcnAdCpbCoyhBCNI1eWovRTVZxCIEUNtSLuq5zm0XnIwgRUQwkoJVCRGNMCCHGIAhBuK5dqANwxRKU1jH45sLz3jOCRHYS2QkABIybdzdEllhSyZGrqhrFUWMTag7gerMT2MTl/2dUnPDmcUvYHKXN5RtCEMImwxEFmikd42YXcuaEbm6yuDlYbOybLLFZLgMTaUXNAay3YF2CELfSA+m/Rt0giohTkBV1rcE7SFzUENcJfzF/7rHqln+ZfPLqE2etxZjUXKm4EjQG7Bra8FliYnsKv/NVes6bf/bTX05/J//Am07/xsNrh1oZFckws+XGk8u//obWG3782lHfa8wWe1Iji9Hjxh6YkCiBgdLYdqHu+WptEY74jfGpKS/Q3U75Dx9ITj7Rs+NTnan+5z/df+27bDYp7DkUXA7V2Pknr7jkuk/+wtoF5/UfOwI5tYJY32JyNBrFG2+68Ou3fONNP7P/K39PMV8lGut0jBhfU0UKnIJEZTEpRIx1dli1JscHKNqHCof5enZCHvXcaWc2nVVLJZ4cxKxaz4yDxf9kQffTKo1pYTBV2jsZCwmgi7GwoPrju75502eVPFHte3NnjYeTqJ2ERGCjwwITD73oyUNzE5PT//rFh3ftPviyZ3U64xNrqHePw6pL1mW+nYzPLYR7FsevnTi67IzFrC/rHcoFiSg23qGqrkU4hFCjCyEwSDN6QcSGqNoMY5pxKLKwNMonIaLIYVOhwJGAkAVEkMhLMMY4v8leZpaGpwZb9JumZNzclYTA/2WCS7I5iD4zo2bAxuaLjRyaNtfJzeCtAYIgYqYTZg74n07iGDfrAPYBN1OtcTOgJUZA0YSamhX1ZjlORC2GOvoIFCSkSMeefqbf7z/7ec+ZnNix0RsYUmRh5sBsa3m+KFxLJU5i3IJ0NqQLRBJmEVYmcSEk1nrndWpEZHbbtuWVFYAmfwWJtI+img4k0a4oNRIjAQES2EQrlACiCJAFUBjJaA0sSEzAKakQ6kEWiNByqthzshq9VVxjmq3rwL5qDVU+4snJs15fHXrBN1bv3zl3eHLjeFY/qU9ANvy7645cHPW+ldb5cxOz6xadHmJV0cDmbrTUCxDEahusi0ECchSBoEiHEG7YWQOAJiDxI2xfXn1nUpYneLkT11ph5Uc/NvAbCxl5TTAqi/bk2MbGhiUNoiNCiNXK2oG1UyeCi76qJ54a68N67EWYEIgA2wFKAQ8yFHlUMm937Ntx4tQprVIeudF6sTxmvhzozTr0j823aSD5ZNlfz9cGCrHCKk9zb1SSpRllcUwpmRC36p95yp04oY4cLnrHrMYYJUbWSoMXJI0SFDIHI9GNqA4GLLCmCKRLSE2oRlO7F199PWUt+etbtDbqA3cdvGj14v9zxUIZ9DBmPe7c4Z538/AaTIxRWEWXCnsOBjUyGmRXlyRaA6bGeg6T7fZEqxUpjmc6S30NRT6mTh19Znlu+dWvfMX/vfvTvYufes2N7/z87V/cvmfHz73m5zut6ZPLS9P796qF3r+/+zem5x7LrjnbORMYeQhioXK1UZqIDFAoa6VUYkz0QSnFyE071aCUXPAJYl3XgNDIoObn5xv9UV3XWichsPcuSYwoTtPUGBM1OZQieh+aKxi8AomgBTAwWMOw6atoGO0RraLm6o0gJAIRkZGAQ2iygbX23qdp6mOItd/qAgEgljF2OjPlcLT54QLQpIio9r65wLqdzmAwjMygyBgTY4TI1hjabAI2FVhCKAzee1d5MKKFUVEsCvYhhlDVdZIkYshzbMwWAYKE2Jy1FZeReTQaDfWQGgYeISJo2QxFjlui0ObAayhaTT0hIlrrJqileSEZDZEbXVUUjghKNtvZZnDddAYAgIJRYgRpwsxQSBAiohLhJnH0/z2AG82ZiGwGKG215hBQkhj6WlMcqRS9FHhs53DHC9O9n5957IHx8uynXH885aKqKzWsMAQ1yWyzkMV28LhY6hfftB8ehOO9PVmWyMhLVMDpmKku3HP2kw/NgWDpbdSrLON9GSbOrSoJmBmoPUSUtLYxI2FndnPHqVLA9ONGBCI2I19IKM23PgmveI/tL6nYZlt/b063oZw9fduOC5419+jxyYFIuy50ZkuNVNY297pSvWJYw+4P/lmVZsfe82uZ6aelWBdTrhHaoDSH4XqozIEDdUz56DOTWKStCahHKQDFuUw6Q69iGGCKoBNRusl93GoTEgGXx1SVKm3hSlKOF8pwvtHpfOoXPqN99cY/e+O//HHBCqdXYbU1vbx3BOPL9rZXnPzms9/97Uf+6Y9fedXBmUvOMd+448iJ4XDP9PYsqfz6qdv+17Wv/6M7TrZ2fPuYuWK6m1uOI8BMl1QH4Wa4AQBlXUbmqnZVqJmZULkQm4xCH4NzjrYSihhEGpU+i2CovQAhaoUcRYQEFQOIcIikiQRGo6GIECqSzdVvQ6tFVMwceHMFyyHK1qnZnJEAACjNFQEAgTfjvxSCiGgg0ko0NX2zJQUsEtmbSjbhVoAoQhhjVEEhIwpobRDRNettIiAFBMwcFTkRo5W2ifdekQH2CWlQeuSqoCnrdMqyft6zn3vBvoO9uo7ALLxj154XvWDme7d+u+TCcIKIQhaA2QdQgADsw9jYROVdU+u30oyZEam3tq4IkETRJlAohBBZtNZ5aoAlTfMo7Fgix8QYkJCSdt6zRWJIgILzYJQkilhcYACbxw5I0MEHZuVaFk1BvcwF0paNDTBClNUYsqI10TevnL+gTs2p6aWHJlrfiffEuPrQxL33TyTfOjh2aLD9ksWpPcfT8X4+IZPfP/4IKFCJqkfB+dgdG9PC671VAWDm4XB4hpj2vvW3eSFLjACOMQjddOVLi3J8baN3/PTcefvPPnLqRJIaHZV3EtglqTn21OE073ryQRytk/6K5pdFmtNxKsAkQAQ4DtbY7lcmS6hW19eJKPR7VSs550euTCd23AK9V4zy7pNH5Nzz8dA54Fz95OOybSqahLgbNQCtOiJYW02GvugvJvfcqRfnO62kECVMQGwgYuSAxEoRmQpC1KNMk9PUBhsrrimmEdAEqrV6+ZVw4LJD3z869bHP6/X+8MSjT5yavKHi79y89qzPrr/g5LCVZ5SQ19qkSiOBSEwDBiLNIIA7pmfWVlcjxKCU0SZrT3mR1KsBDgHHEsqOH3uqv1Zeds2zn376oUNm393Pfezux7/Og9ZPvO5XU2mRH7R2nLV48+cWPvme6QN78t27Rw98N152SRmdpnGUSkQiQ3NPb1KRADiiiiyGLARHoMrhIEmSlCAYoEgQg4rakAmmDogBo0EbyrKdQdJO+8Oh0Uk1qjyLJiLRmkiBAWBAzjCnSAJRaxLghmaxNdJpHPuKmQ1qJKEmJwc9RyXCRBTZSfC1OGCkGD2SAYTAdXRg1NLaeqjqhHRNqJEQmAQQODKT0Q6QizoQaK3rYRVQVGKH3psglBgnHhFi5ROyAFRzKEKZIjnnmv2KSPTsp2enl1dWUsqZmSUyiwL0MaCxzFxwYJHAsSgKZkiSBACUUrUBqVjCllFBac+RAS2AUsqHYIzx3otImqaKqF8MG9SXAIdGJobkvTeIQAoIY4wo0AwMBQBRY+Bmgm2AlCGvAAMTB1RaKxNBvPdaa6UphIBGx2E11un2feVi0ACpsXWoQVRmLDM7jgB9EkiS5Mjw9Nv1c/VY8uudj71j6tnPXT142o8c+VFhcrNnNa7dODM+kAgAnZjaZAwAtpnefLZtJE7A+KHZMbsteLU0mNdiMQAHSey6Z98HUUppdI4p+pCYXlUk7W2zR/7uI2e/4931jrPjCLZf96aFu++ZQjHRsFJLX/7qjhe9HSkZar8Wxp4p8KW7qNq2bf5r/5iodu/QdHp0rpP4qlOX6zyx7wCN0n7SuvLOe9XVVzz8jl+eed2PHfyt999z5RV7rjp/VSbcrTcbvZ7LRPfD70+v/lEaqnjsB/Nf/2r7859ppXvY98kkfQkZBETyLGQrDBb+y4MMV4FzRGjp0lctULXFCPKZn/nHsjPztg/dNLO69us/ae56lXnyptaxAxfh3IQ8+2bedkwS/+43XBqK5UeP+n+9/8S3nlhLxw5N7nELx8qXPnfPfLnsQbtAp0ejHx7feem2x2owEANKKyrhYoSpEUWVr1nE126U1MwcKyEB7wMieucUEQlwZEFk5CisGsdOHY0xHBiDV0o1NR8DeO9L1KoWGYyi28zfbKo6G8kBo9XRB1TkOGqtlYAVapDpERAArNVNaehcZa0lgTTN66rGLS1CIDFIRhkvHAm01hTFAoGiZmXjnOvkLUZwwQshiYBIppNmC2N0AiKh9l4LM6PSECIAjEYjpVSUKFoAPHNtFKKP+3bu/qVf/pVsfOrwiWMinGfJwX37n3js8P33PTgzPc09X2uAyJm1VVUbS00mfABeHq3maZYmxntQCpVVgpAnbT8qUVEARAQSNlobY5yP/WGvlaYueDIWOACAcy5VJkQhMkSU5EmoakvkogOJCtEmttMZW1ycT5LEIwnqIJHYpzppt7P1tQ0EDSoXYCuCSVmL6pHvOt51otM6MvOY5C9/5HmnDrijE4On8sX7O4/dn+r0wt37l7vPXxvMPbROp9d9q+2M7Nwxu9wbeF+rVIcyAJpbT7feckk/SsNJAtuEeiAobfT0eR//zS8NC88IG6PBXbd8768++uGHH74nQWTRSCpEMDbrD4cxxryVV64OfwJ6zMbnBhgQMMM4gIPWJ9ozT2xf7C7LSj9J9NDqK15749KRU8u337zG5tPTu/77+Qfmr7oWL7pYry6TnfBp1h5thHohmCRppeVa7Z96ys6fToqREk5Dwb11z7khUSCRtSQOFQEbRM4pGwFFHdNRrLnn0pbxyTAZxVFhDpy9c/bgxDcfm/v4p2cz1r1+OTYxrhylFObd2PxoPBkLUotHiXVtMDYrBEu29JXN2hTrwXo/NalSqqxrRZCQ9t6PlFeDuF4W5WChHLqde84LdXXkh98+Fy67m75avAjePf3+qk2YdYYb60c/8Lr2E0/s7O50J09vPHn4ond9YH5q59GlORkvg2elFCBH5s3E7ghKIUuwyjKHGP3k5PgLb3r+LbfcUleulbYdlMZ2qlCE4Ehnvlxv5V1xYazbtalVSeJHA9JIgApEaWZxxpDWyCxKKYwMXpSxickYJATmGHHLF5sYABDmTTsNMwOpZvAFACE0tbYGNiLCURRwJAGtAI3W1OyCAkQfm0h6jm4zqRAhuF6vAtZEVDIGJqJYV6R14Tn1AQxaq4dl6bgGEReD7WYDVwOjRUEALULAq3OLVpvSu1aSEmOQMKwr0kYJi2Mvnpl94E1DV4xaa+eczSwAKMC6GkLjNSRk5qBVCMHatN8fNlO7fr+vtRatt9yGcEZ5JSI1CyOhokYf15QFqMhHUYHJ6CisI7BCJjRIzEEjIW5JuqjJagchVJ5Ho1FEAKMoyoiHyhpmiCqISBErpZRiKMsyoNxZPjA91s4vmLnre/dsOzVSew/0+qcG5RUhZRnitjQCyVKZ/9MP+6fc8lXjcNH0+mNPnZpIWzG4/jDoFg3L42v1yICyaDjIECBg1Jq01uxDSXFCd9eGzs901W3fmVm4R11xoHy8EjJTz3v+038G0YAvQGdJNv/g2ve/MHXTf++snP5umYyTv7Bt5t7wC6v3fPv8mV3pWRcUqxkPjmnVBajjcKPcGL3gE387vOWuU6+cMaFz1SOP3/2pT5PxZ3/pq4/dfNveX/jpyPT9d73zVT/+U09+8dGVe/5latv506btolFJLSrPjKrKwSD6Nqgxj7XRSfh/G2DXSTqWqrUQEUCHyE7JV9/6D6f3XfnTf/3q7tpjUk0g1m/8Yve3nv493nt5G5x77Pz6nR+Ar113YqOzdpf+k387Nn7O/vZEJzOr7VHn0LYwFeubbzl92baxmYmV+59sfWt9/MD2nSE/ORF2oZR+5MAozQLeKwEUqb1j9FqZcjhK01Rrba0tyzIIE0sjn7dZ6qtSaxVjFEEXHBEBBwLWpKIAENqsE9aHLtRBGp8hGlLiPArXpDyKCaiRWEAi++gii28yPZk3TfCYxhgDR00kghGgdHUzakJEohhQfGCjPROSVqX3FAWUCcJC2Ol0RGRUNRT3iIqyJGUfC1cYY1ikjsEqzcS8Je9oFsxak1LKcyRRW0UDGGO8qw8eOkuYH7v/ER/qycmJdtY+dvz477zvvb/6a//zkksvPn7idCuxzNzOW7V3IURgyWxi0HLtQSDTmrRyMWitWyZbgSpXNhHQiS2CC0AYRUVpJUkIAZWBUAPLVHcsa2en5+dSm0QBFnFVJZEbN3+zdI9RNjY2muva1xXpVCmDAnVdgoQYIyIohZEZFXAFRpNiCVyOUlgWloG0T9c/snCWHsO5meH9rSOPuMNL9r4nxtRTu7r5b+6ZfO2Pl//xZHbLqfmnT7WU+BSiSaBjktJ/+Qk5eR3uGxezuVkAAGQ0gDR39i8efWyhlbZAJE3br3rNqy+98vIP/MF7vvalzxsDIQZNCccAAEmS1HXdbrd9EfDdGC8E9ULhfSxvD/qLduMve3X3mFJKQhiG8vyXvnj98PETdz+STowrZf7j9DNv+OmfHLv0Wb3+aMRmvN0qhqv10sm8k6pOty43bH+teuBRvfgwEvnCKm1rJiUFKyscrY6hjqQUJXUVooBKNIVKKM2dTXSvnu644IsV39129TWy7fz1r30jW5wbTY5pwNhiGvVWEgrDMvRHq9PY9RAMUG4NIEel0jQdrPayLI1lHRJ0dZlpq0WLpkhQRR84oAJkXZ86NQCvs9yNjlxyzuWHnv+qO773tfGTO3e8bdfCV/w4jx2/687wT38wPjxO286LS4v9PN/3y7/Ref3PLt3/RDTWK7aRlFICIBBRaQFwHDVpo1QzINVo6ro+efJkZG51WmVZpmmOWmId81bOPuTQ8d6TNqPajYoixtjqtCvnnaKooZFBNaKhEEIDjgAEAQiAIhC29MbNcasUMDMRACki4rhpZ3RupLUFQhQTAgcXtNYqSQW8C4FEiCVjzSKOsCQmDwyMJKHJAQJAgRADKkQfwUVqbLu1U0xKq9GwTFLT3+jliQXY9D/URZGyJmWabKagQBkdgKrIFlVVVdiUFEoHZhAiBkQAkbIsWtzSWofg6ro0Jqkq17TCzrlmj2ttSkj9UdFut1vdzqAYhRCaGXtd+yZ7XIECaMKCWSNorUNgz15pi4gcI3JkBIqiEhU8i3fMDKiCSABxLIhIoJA9CjTrbUEmrROdxCi+dqxEHGgyhlRdOAUSpNbW1DFqQQgeIgeUOvrlx05Nh2r438797u2LP76wc4lMKWvK7A1RpR0ZejPbon7g21emYRzaCQWTV1LnzArrklXhjPahDlEUEJHnKBJ9FOVIgiiFa9BXQRlftT76Z9t/+71ulOmwsT7qpzsOxWdfF37wQOya1MdxnFj7978fu/71j5rseD95cXtYbQz2X/OyR6956eD7t6ytHFOqaNG4iV4mxstPf+L0o4+tdMfat9+aZPll3/3i6WL1of/19pd95J+W7h/qex+cN+ng4dsvff0bb33f3+2Q+Qte+wvzh5987Pd/afu+7XhqaGwxGoqOoLNpn0JkUnWota+kPnMAt1O0kDqwqCollCj/3Rd/4PGLX/mqT75t+sgDicqqtNwn9KW9P/aNPVdtw2XrLHz3JfNX34q/+eH9P7zpTz8/nDq0B1sqbDBtH1up+m++fqLbmVrw23u3PHXTlM124neeWvju2OSPHaiZCw+aQDwCRZbA3Fi+ERRjjN7a1LN4V1fedcbHnHPReSKMMVZVldrEBR9ZtFYNvoqBAMEDBxZgieLEqAikTVrXNQBVIba6GbBIkAZ9JZEFxDQKf+FGPIgBlVLGmCzLXKi99wCb4LnAUWkFWjWGBCWsrT7D17NKA4oPQSnlmXuDPiIaEYjc7rSrqnKlU0oFH5z3yhoQcMEZY2TLQNjskqzSymipaw0YgdJsM0aMfUDE2nuVJ1alg7K84/Y7x8fGfvc97/nnT39qdqx94/Nu/OHd9wQfRIRI57nmEBUgGShj7HS73vtRWaZpCgCD0XAszYMwE/gYMIAiEGA0ysdglCJmRAwkB8452N/opWoLs6OVRDCkRERIovew5StsCog8zbyPBoEVCKP3fgvLwyyBGGICEhmdAFGLVZ8JBKoUHEk1iJPr6cvg4hfm5z41tf4wzx1ZO7UxduTEjnb+7suy//GsqW+dWL7lUfzOUbU8ZBCenJYkf/VXyu+9HjPthSwjNXnRK/vfvL7/Z7YZ7WvfCFaOnVyZ3Lbvnb/0mz/8/h3z88db3QlA7VwNDS8IsdfrGWO88/phIw+JJnQTRfhpN/bXE1Pj00efecaadM855+zvtr/7lQfSzqQaYtaKA/R/9OXP/eX1128s9cyJ47iymFiEsrbOry+dzhYWh3NPJ8fvjb1CzWznpISIRCoDqKmxovugU0FOfF95y4lS3jNiEdwuTC+bmWG/IuXYiTe86uk95+Rrc+7w/em4dVgTs4xCvPisvQBQU75t9/gAvEgevO9Vo7V+r1haG8wvF77ql8PBYKMYjTSp4HysnRXUEdB7I6K4UCO9PhwN6zCj8JJrzn9m7uQnPvfB57/uLRctvfDxHd9FtvP/8Q/40f+x27eT9FC59ER1xTUX/fm/TbziF4ZrIZ/dlhqbcbvhRyjEzVTgLYJE8/NtdpOVqx9+5JEkSUVAJSmQ1gA6SLUxAuHIJIrAqMhApNMkiS5abVBR7Zy1KQrFCMJKqxRBbyFnSUSYAVEBqUbBiBI9cwTQWjcDWKWRFIhIq51qg8HVITqlUClkDmU1UEwKkZkDSC2RQbSgZVQEIBF40+DYfH+LCn1MlWGQofhSQzI+hqkto09SE4JLUxtjBETXiK2IfGJK4goZtdJI4EIupsWGfGQQSEwNrJAMU0O4bNpWDZQkSQjBWqsTy8jNIi1Gb4wC1eyDMTG23W7X3i0sLaIibQ0zT01NkdFKY57aNLPWWmNMg0oQYNQKCJGEFBABERk6wwwhYxQqtanzItJaazKICIpEN5HPaIyxmuqqQsSslVprE2NJAFEZUkQaYNPjWHvXrKURMVi7a3Xiui9SOVg4Nl3dG5ZybonlJBQ6mrYdL9fwTc9Kb9zr1xwCwNzJ3nieMqaDPMe2RSdTkzC7Y2+ijdaajCallNFEpLXNsixotqT43LP2fv6fthdrrjU9clRGDwFR8rGrX1Bx2cEO+BGEovfUw6cP33nvaGZChbNbjoGI8x0//z9PAE13u4ndWZvoiWxBU+2p1uMPnf3923e1x0N3rP9g9dSrXnT5a9+04+0/fuqeL6Em99Qj1Zc/m59z6ILzW+bKF+LBc0898b2J9/3BNcdOH/+Fd61cfpX8yq8tX3gxgfTWl8NwkYh8mk9uP3jmKRHKYoV97WquanX79b9854ve9eLP/dYVP/xSklZ15F1J+sjuF/5p9zXTw/li0Z8qRwMTt338PcL0zvQ3njhlE+NotW+sW19aWx3aD3197rf+6nFVbSSz8q3DcuNZ8A/vGB834SsnJyrDBUaVGwJgBG+pBBYAAuUDa209RwZBRc31q5E8QIybqr12u6211loLokGCEJWAAY0BDFKqDYQYJGiC2YmpHHUmMJ61JLK2how2iTXG2EQnWmWJTYy2Sjc54tbq1GgSjq6WyFprTWgUWW2MMVrrJEmaz6e1afMZU0ppUhIZeTOBraG3aq2ZOYQAkSFyjLEsS1KwJfluynEnIZIACRhFpFAbRQiKkBAJBWMIdRXqqhmJt9ttC6CiZDYxxmz0env273/jW978z5//7D998uPjY52J8TFDikMMzmMjOvOxZa0vqlg5gxRrp6OkqA2pGKO2NoSgFWqExqsiiK50VukYPAPf9+D9Tzz5eJNh06yqJPgYfQiOGUSwoaPAJsg7GEUQA9d19A4RhaEZKsToUZhD9N557yVEUGTJQNyEB4/8cKSGq1n/uF5bG7gdz7Rf9cglv/bMy1//2GXPeXTirKMuGQ2Gr9krf/6C8ltvrP70enXjjiKtJpfL40eT9z/8PABY5fENmVwYu/rpGz67ct1fMXMx6hdlb1CubQyW6hiPHV/cu/e8a5793EaN0/gVm09UnuetVqvxfzbKFRFR/5+BA+Be68tR1W516+DOveLSu+5+iDFRFB3WsfDd7bu/f89Df/vZjx245MoKOTiX7jpQHzxUri23Hron3vFN9a2vmtkpuvE5MKrzWgBYRx+xwwi+lZeYZzHJmCqgmKSKUq0s+TrFeE63DetL/tAhf+0LfGdsPzD/2YdbajU1SV453crS887ddlbWBgD2sN7bQMOaUzbZuGkn7aRZ7KcSWq2sjqGoSkUaKBKqGGNmdYziXcXenq4enyvWn2cGV/pC//O96PjqK159bG647b4DwyvXnnj8T/bd8qWJqUvWR8fqsXzvOz/WueElMgjrx4+urD5z1eUvf9LuiHFO66SROhttGsSaUUoRxdAMpDcVja1W2/sYA5BWw/5qtz3Z7rQIcH1jhFqATaKwFhCFHgFADKEWnGp1q6rarJUgKIXcSHYBUDaTbUNDjxJWiIDIrLQmY7bsjABEigFcUTcCEBCKMQJylmfWtubXeynphAFBRReCUayQARJQzbVqjQ0hYIP10NQ2KZfusgsu6lfFo4efGJ9I6tLZJsBeIwAwgjKb6CmjFJfeKsVIiOiCV0aX7HWatk3WL0cueE0qtUldlI2GuYlMShJrTFIHf0ZvrAiRBZRqev0tN1AEhYiYZVnT/lZQLSws5HlukBTpzVmxUgAcomcOCtCgUk0gkoDW0OzGYmSNJIo0gmJCRUxILCZCM9xrigOrjCaFLFPbJnvDHqPY5q6XGDI21C7GqI31MVijDETFYEghaK/Zkrnqymf/oL5t9Vz7xPG5K5cOjkmbVcbQN2EEOJKUlk8W2/IcAF58efzq1wfpJHLty1UHXRk5iD4gNjEYiiEqpQmMRo2IGUzLNn3l5/6lc9e35kHTkQfzF7wlVOuStcu1xT0vf8eTf/uRid7puazbftFLz3/dT87nl55exxdPBGFhZTY2Bjuufn7/V98/9+H3TE3OssuARJv1mjM0Uz4MI8Dsar348z+yl5WP2e2vfPm2i58/84a3PfCnH9j1jj9arXujOaX695z+9IfUwvz1n//Gye+P9px/6e6f/ensov3fWzwpTz6w/T0fwO37+n/w3nxwYhn/0y2DASaAK+87kNx91Y987TV/8Oxv//Xlt37I27ZsuG6rux6SX5FnveWNs6/ZnT+8VBz7wYM3+7Fjc6n+s1+p3/u76Y/eLN+/0egOJZKR7feKXpZ1p+zCfO+yPdPIQ5tAKe1LdpsP37z66kPji27VA2iRSAoRlGgBCAGCQpEYYlCgQBGCOOeQJc1zV1UcgzGm3+9Ls79gVg3fpWnERLTWxhhmIGFEtbS8aIhQGSKCCDFGpTRDFBFDOkIEwAhCiIYIGAgACZvkbolCiFGESAFKBAjBKTIooJBkCwtzRslPRJtGJGbaEjckSTIY9kXEJCkRiIhWCiLUZU1EVukIsVFIMPNmqiBHBcghaKUYhBQam9R1HTkWZUCBNE0RsXY+SbLV1dUdO3a9/nVv/PfPfObo0eMvefGPgkIiCMHBJt6AfIyKSGtSmzctQESPMdEqRWQAIaklEoqKERjA6H5VKE3iA0UypArnSICZJcRmBBiEwXlm1ggA2EjWRWKv6hFQ5MjITevJW5nrzYTfBh2MeKhdUTigAZYCoCsiDIhGRZP6CBRBSz+4nPPs/vLRz33l7CvP33/Z+Op12D9Ii7ryb9iP/+0KPjro3/6o//wT7FYA4PjV/5hdfH3KeajqqgpeuKxKBRJrR4Ct8Y4gepZnjh5VSiNLMRxqo0Q209v8lsR1c8aJmDyRxptD+csj+BS5ym0/+9CoqMoTTyftjiC1MoheYll2utnf/eNnn3fjy7vbxlhn9cSYHOnpux8xc0fsyvKgpyZ//92Ld95Gch9oUKrtlLJqaAplOrYC34cRKtLSTVkgepXqAGpSYaJqqQbW2SMHZrMa93zi48OuWnL1WBSf5iQIF1502cLGUQDIx2Z7I21w2yjUBj37YW+43KvX2NYCRW4j4oi0ChyB0EMs6zqCeF+H4Hq9o0sr7gqtnjUozerqjsXDk7rctvvcGGX7yvNUSQtTXx5Pz+utHtbXvfac9982fsWL1o4+kObwlc9+6r2//fof3Pu5VAiBG/0zbUX6WG0U6ib3qDHsiohEjp5jEK0t1PWuHbtIY+Fcryx0ojObKeGirpRVEPmM67eqqm3bZ42VvJ20WqnSwhIiOxYvIMyBABufAwBoIqXQKN0UyHKG09S8NxRr2oIJiBZGEUQw0bOrooqiAYGQNXmNoJUmlYNCFCJAFCI68wcx80jLWqzmVpYQMVWmqiqValTA2OyoaGpqqqpcQ35xzpHRESTP2syQZS0R6Xa7En2/LlObdHSiBCpXshJEMZu3FXExlGXZDMeY+UyTarUSYKVJaTLGAIlSaO0m0MN7n6fZ5OS4UmiVJhBk0QiNo3qrjdCJscYoIrCKjDHaKG2U1Sax1mqTGpsanVpjlCZArUkr1Jq0IaVRa51aowjrqkizRBtDRImxpLAsR4hijCEFaWKyxGyf2WYUKU1pmqjUANaxbL3m72fxvtUj58Unuksz4+TBe+1cGgvr55f4135s32UHtwFAFjaUUomyWmvTHs9bmOuq5wetLFcKAVk1OVSaAJiI7PbW1Z++dfvnPumTGQfcXzpZLK7VMc3VWCxrnJ7sv/TV1Y++dv/n7tj9vk+V259z53DbpAoH7EBnkKSUKFl+cvX8n/v10Rvfsba+2MqSSDzknJFAfJK0EgkmSbvtHTEbx6cemP3yN+Qvf/+Hl810Yu+C975rkFzR2dvdd8m+ctS78Q//9603f4/i6anrnr/yTDjy2ZsHn/rns//s49VzXjHz8jf3zrmoHKndf/uFM8/44Q8eZmi94+cevO5Hb37bP5x7/NYbP/NOfe75xoIdmyjr8unzn/PuP3zDa9vusS98Uf7x/Rd94lfe+tm3X7/DtI68yH7nJdWbP1RkS1GCBIOkk0zlFAHd7319OGq3Lz6/vViOPvEX909MjI0c33oq6+jEObd5HviYgW6C/VihC56IBGKoXWMcIqK6LBufjtY2/pfDjxEoMaLBQxAEH0Pl6shslLLWotFOQU1SctA2aWV5Zk1zqQoCg3jeFOErAdyCfgBi0/ACiyIE4RACsiCLAtSEwFGCby7Mxl/UMOkic+M2Bmjcs8wSAcCkiQ/OB1fXVVNoksBEd0w3Aakojbew+WdVVU1gTO1d82h+CxE5SiQog+uXI8d+NBpoMuvLa7nN3/gTby2K4g//6A/u+cHdidGNF8vHEAm6E+N1DJV3jByJwahIoJHSNC2rihGCd+wcgQQOLoa6KEPtXFHXPozKqqxdURRFXdV17Z2LIdR1Xdd1WZZVVTnnyrIMIVRVVdd1VdZVXTvvg3d1XUcXvY/MHGrnq9qVblQOfe28CEeMEbQXEKmkdtFAVDqIBhSt0aQARDYxeTfJuofvuL93y2M7/3H5go/ycw+fe9nqrunYM4ekevPB+NkX1M9SANDZf26xCksrq6PRoB71MQYSElFJ1jp4cI8hlxh+9NEHXc2KEgCwiWlaXmttURQxBgBploxKKUMmMMgfK7hQ4st95Hj2hRceeeJw7WXIOOhvDNaXR2XPra+G1A2L6k//9I92bz9H2ym3tBru+QE8eHcxOBHTItnfOf57v0pf/6KembHOxnpF1+tFadALmGgUZ76aTlqJ7RR1oVTd7/djUNtsQv1epavw8A9ad9wy9b3vuv7CotVTVSsopQZeR88PPPzEC/fuAICZ2V0X9pLO7FQYpf0hc5Igc8cpdmGUJkfm1jQgYrBKA1EMHoBjWUpd+dFw7tjK9ce+999e/KOrneeUD965MH7g+A2v2WCYcqX/2P/ddzmeeJEe/d+lsXe9d8eNP98f9jeWF7ZPH1xbHdz/8Ddb3ck//5Pfet5Nr73s4itHPFJEmyA6pUQkRBERQAbkECMKWGtDCFmauToAcFl6paD2orWN4OsIY5OdlX7fBZ8ZHVy0JgkxBoSjJ09G5uFw6FwtDaRZWYwgIppUs97ghg/VmAYRiEhCjMAN/QMRRZAZSFOsa0FmYWEB0FUZYhSdmdgEHoikqIhBiAJw4AAiJLgVZL2ZgoBexjqdhYWF1fnFVmLI6rquEq19k3vIsr6y3mq1iKg/7HVbOZAqi3pYjDqdTu1GmkxwNQo0OuQYmRCNTZxzJKwMaSBwoIhQQICzLGulWVEUm/cXQU1IAoKqmZeJNN5fVgolIqKwDwSiyHCTp4YokRGAkBCQUJhAEGgLDbzZUjBYbVCrIOzLSkIUYKsUi9AmZRCYtCIVBUQpZIgCQqSFUEBbk7by6LwwhLomRKM1x5AYi4oiQjpUMReKo0OHzt4D64f3jJ7oxYNPhxnRPUp4LT20C//yodVt02ZppceTtLZRw3inpr4wR/aAaM3stokReBDnfQyIQiDCAgg2s9vveMR85dOnxgMUve37z1tYrkKaRzMcQtVS4/5E/6q3f5ATCC5Zf+rUInVOeHVTd2BdiRaEx6ow7LY6vSPr57z3Lw5n3Y1P/FFubJbu0lotyvEuTpqYr7u1tjIhmJh0lTKJqIMH95Ygd77/L8dmd3au2nPXn//tWRuD9Xuf6P3z30xd9Jdf/j+/+6rf/OBn3/uRm770nfGX3XD8r7862jUdYxlvuKLzrCvOdMDbJ7onXvpq9bPv/Y/b07O30Ut2+6de/bYLXv+WU2/4kbHOTBrryy+97ORD37jlA+9LYZ4StUITh37yLe993dQff+bh7/zjr6gL7y/e+b/pD/6iKoPGoaKsNGnK6exs532fOhZ7/tO/eRX1jp9cXJq0/adPTD9nWlN0HoQ3XXCMIKgFOGbWxuglcJIkaZr2er1Wq6VQNSOosq4aaS4iap1EqCHGhkzOEoWRRFBkuj2+0Vvjqu52xpIkWVpYpjRl9JtgCdociTUW9MZTTKhAETO7GKAGZiYEAIwxxq2AXhExxrjabwaaiTQ7GmFuhIohBGCOMRpjrLUxBkYYFYVFbYz1zlVVxcABuChH3nsUwBh9DIioSCNAjFGiIJEPnohAwDmnUDnnQCiN1kcHGMGoOgQXAxFtlEMI8tofe93Xv/q122/77rnnni2CIqCV5dKvlytGMM/zoq60NRwdIYooT1hKUIAQREKsIEYQSgzXUQFWzkctEYE4pqQZSAKHGIDEcURFAAQsRXCNSyKEzZxWCYLIGphZSGGEKKLYewCSCGCZCy9ArHUwCiIhQpbqDraiqsDXRlllEgSjmTW7tKv7w4UrXvUCe+mB6pEjy6eOtD5ycnLHvtmzp0ZXTi/uoWPThzuHjsMJOL4a2R7rpLPVcJBopcVokSC8+8DMZz/zqUvOPfv8y646fHi4ffv2YW/n3PxxVLrhf1VVqbVmFmttMRppYwCQAMFg+sPU3yb+1+v81qm8nS0++dRke2KnlYvPuWDfZWedfehqYTUWlx+bO/m7f/eZv7395rf+2M+Up0/bC87zR56hWoYxtDhMz+6NBUY/CnknqHb0/Twbh4210cp6bhMa6y67kfVFptRGcJ2sU4S4zGGfGUtzRgpTKUC1+mjWib3VYZ6aCsJspl1wR546ee+pObgGMLHnHDqHtP3+0Tm0MmPHK+GRCXmapsGPtXJvKLrILGVVI0KSGiGsgj9y/NgF5+2b3f6Cxx/8bo4hZAcevvDcUI/GimH/P35/7+rGvm8k3/vDQv/K78J5rxgdeeI/vvE3t37ti3/+l1879fRDlODH//7Jn3vdxScXn37+C29aOrrU7XaV0SINTRgBRGvtpWZiagJ/OCiFZTnqdMYKH3vFOgSVJUYYGTOmenXkE6VBcR0DWT3wtQI0CKk2iKauGoajFcYorq6jgKDGEIJjQaWVUogiwA0GSyllrNKaiqKIUUDQ6KyOo4ieLErkGOKWD15BECBhq0RYR/GuIqWCABhq9iWNcsRzREVCZIJEYNvKFDNmCSsSEvasQJNusnw4Ou8gGkVGEQMgcGLSwWBgrWbkoiqVUhoUAKDVGDjTFlwATaAIAwIioChSzFzXdXS+KSOIAAHSJHPOCYhzjrRqOATBh2YCAZEjitY6wqZUUmPD8yAUCiEAASI02Uabs3wJhATAELk5qAGEFJGgUlR6T4DEggJGKWEJyEJoBEMIZJrRA3gX8k7bC4yqMrGWmTut9sLCwu6du4bFaG3Yt2kSsbZVVlr9mqXL/u6RWx86kMNdd6Tq7GDyhVheM6vX+6MD3R3nXb9z+Ei2vl500pEfSk0WkCeyzMfFXqHIRWgAnUAIooka/e3UFz9XmArr8UrWcC3bOP4FftVd3fOfRWsrS22YLnNfu7BSsU3a4/nXF/Ip7S5qcSVj5KCjdKWzChl0un68PO+3P3Dq/HNXf+9XD2ycoMk942anduRiSNPUOavFKZFK1XHk4cC129/xa23Voge/lZz7sh32X/ViefKDv3YV2Cdf9IJLdszc9yvvOjfrLh155J4L37vvp35s94Uv/VoRr/zQhx/69t1nDuDpwj7vI598z7+td3z/Zy6V8ujFO9/z7KO3/NM0Qqqwvevcx//1Dx7uj6YVZedcpS/5kekbbtpx6fUnnrrnJec5fVy+9Xe/63/zF0c/+s/y0HXm4GESzk6fV83tzTxt63ZOWfeuj9x78W71hovGLXdOOuxHr0FVtScUNHrEXgBAQUqGQ+zmrfX1dZWkTfaXC55okw69ufhohr0xpklSFAU1iNkQUchq471f6K1FXyfWlkVRDEttjSepvE+ZowIB8DEIg2giwIaJ7iBuls5N7Hdko7UPHgBo6/ivK9fklVV1mSRJI5n2ziGiAIQQRJA55HkeQ4g++Ohj4wYOElzT/npWIMi9oo+IOjYiEialQW9WBMzcfE9upE9Ede2b36iYk1T5EJADEbm6ttpEXztQbnXteTfekGb23/7t36699trp6VkEVYWaAFuJpcT4ciSiSCshqoNQ5VMwlXee2ccIPmY2qfqjCBijaK2jj0opIl1xIEbkSAIxBEEAEebonLNaIZJzQSkEUN7XJkmlYXUjAECMIhKMUgSKjPYQvPIRYYxQCGtSEIhMolREnShjE6UdMAdv0hQTY3R773nnFFDc/pt/0tad6CqXxViWu2f3GO6OXTjzIy9+1jn5ZNRt0iQbJDOkdQKKWuOTUDgB319b+vVf/ulffuevXHfDNVMT7cqFnTv2LCyeavJgYGsN3Czpd+7aFUJYXV3VSjuREIL6Yx2/4mffNL10eBEDn78v/ZN3f3C+c1BTd25YpHG9+OFXrrj4qm0Tt/3bVz/37Cuv3HvOlcuWcWYcnjg+mWVltMVgQ0i6ozhIXF4rkDzWp2trW9kMVT4Go4xjXgUal9aEXukZi8crLpTfk3W7zni14+jGgyujMA6qilFbM2LRKDq1MaENAHh6zlfJsJOo8yYnFwdDV4LgCIH6rsQkkTRT7GLpBIOPYXIs8zHt1/XxZ45P7Nwx0PnN+UW7r9p90WP3Lu+9fDRzfvfE43jz+7bTmM+Tc76/49vJo8efl71k0X381n/97tf+pZ2Y//2Bn1JGPftZNxyc6bzt5953avGxqYmZQ7LW63sXQGtEZHSihEKsRKFSChUysyAqRO+r/Xt2Hz91vBpVZCgwK6UUCtexZaxzTqij2XOsSJkYY05AqCdaLSQpXEA1TBBHG8NzLtlz18b4xtJwfGoHYVAQdSCtbCDvxRN6IIpOVFRRFEPQhrx3EqIO6CQCkUWKzFGiQQqKLKH4ElE5AVBKaYWuTJyttUKiUDkRVCyIwJl2Em1AUjpgNAFQWAOikGBwEWyacJAQvUjMWnmvKsShUgpJEqtJgKTBFSCiAmBgAUWjUEuiAYCbW5wARtKkEZsyNhJRAw70wqEsGERrhWqTQJumqQKsyloIjdXIEH2wVpXRp9Y0/HTHgRGYIyFCEwDLLEBRBIyqOWjAOtRKFAmQ0t55ReSDFxQGjtEbk7gYRJRGUkwefGZtXdeSJiECiIzW+kmShFEFhohoZXkRABaWFgHAgiJPwNqZ0pR+J+y89MEd39u+9NDP7Es/utZWnaeXa8SxdCy89SXT173joVee1xpvuXoIiCZDV4UC0TkxifN1oqGoybSGJmYYsOSxqbx+5DTMzVW5iuVGO9oRj1KEk598/9V/+sVVkomR8iYxoYa2gYBHBvp0SF8y0Y8xJAYYOSAyowEUK7bQw2c2znrrm/ovfOHT7/kl+/UvnMc5T45JonWVYAVeFSFRWuWtqenBd247fPvz1GR37JxLT3/oLyb7q7OXXRaeGPPV/EQ+40aj6bu+GZhXv/1Pl9rWyd+6/9HHHt2hBYvRwp99+MwBbF73uo/fdamv3B9dX335/3z01S+8/thtR8y7fymMz5yM+cL80/HAvvNe+IrWdddnZ5+Fti2sN049efrJR2nn5I/MHk6PTH7lG69zr/8rfO6XxOeMUFA0x8/O//mX1zZMqxqt6rGvPzZagXpDFbpP6zwxRvMBnIQ0V2IkEoCJogyKSK8YUpKUISTGchCtVBUdM7P3WhtgBIQonlFiic47hVoii6CABPaAMSHr6jpJ7QjqimsbSYvJsnxjuKpdE9UpGBmC8golshdItTIg4KNRqigrZU3hK/ANs73KskyT9t73+0MRtJaKUdmMr6Nn2UrnZPZOYixEIYDW7AP7AEqzAAEH4iBCThgJgDVgFaTdzp1zwXvhqLU2SpdFwdJwrMRYO6wrIki08XXNBisxyAQhMngCqMuKAazBzvj44sLC5Vdcnbc6p0+f1kryVtYZ29YIrZeXl7XVSBBCIJ0iOgAgIOUisiBAVDgKhYqEwgLsIzOCMGtQxCjkkTEKsjKMjCwSQavUe9eyZsOPUjAEiMbGUZXkrYggJKg2AZ1CVDsPHEjIkMpIoSFrjMWEIlqlE21AkUpINTwBrZDIc5ztdKYuOHjizjv3nbV9fjXaoCeZVidaq1xRtX7iO08evuOHP/J8WXsW7dm748n7H9+ddnzWtu3iL9//Gzf9t5vOO/9FqxtLwLA2/8yddz744OOnzznrssOHv3PxJc964IHv55aCGBImQ3v37jv+1LGiKH2MEqJogLLWbRO/kcE9Ze8Xe+XzSwH1zJG5Hz5w18a01nVvat8BaKXV7oszw9cenPnifY9+7atfef30Tl0OKmlNq2yovQ4RRUSRbyVcYdTEYYSoLHSgHHjPSQBUMUKbrzzfHJ1zSeI5dnQsefDQMkcKneXT3LUtMqVyJNFAOjYiAmTQOJa2AeCJ4/OPHjvBre7E/rFrbrgCjNdphphkSSvPEvbeF5VVseitTuetxJmpjKXsXXT+3qnp7sIzxw/Soow69z3nBeu7zoEn70u+/OeTnIEQ77986sUfHh/ufHzya//fn7/3meM/uOK6V4nmtdEzp+ZXXvSin3ri8JFnP/uFP/+zv8eCV1xxdZLl3MSeMDOCQFRqE4WqtmL2lFJpmj744IO9tfXEWGAG5hACh2CUZYZWZpFcBTUrRq6s+Ha3Pbdy6u/++R8TYzNQM3k3tUlrvHvs9On+YKBaSqFuURtFq1QH4wE4xSRhBMcYWW0mgWNZV6WvQ+mtNomxemvZoK2JCEZpYNnM3AXQWvsYrE2DIWZ2o5IQGYE1ESBXjjgGjIBsEDyEKFEiewhCSFoVVQUAWZblSd7K8oQMNtG/URRgAKkwBBQSaPCcDBJAYmOxjbEhwTZ0vSYOAQhZhEVcVfvaKQZ2Hn0Ez4oxlC7EOBoWVVkDALKURVVXDhGHVSk+uKKuOPSrIvogLqLVHGKSJMLgXWgamrqsIDJW3gjGGMEoryCCxBiD8xbIDYprrrzmp37ybe2xbu0dahVwc35ojPGlY2ZSqnL1em+jM9Y1NkXSNsm89y74EFkAitK3mLrU3cCkLDZeaa7cU03XaTmW8bjPT85X1idVhb/0t3Pv/4XzA6VptW4w01ozKI2dPXlHRyhZhyIwR1VsZMOayzil5e7FJNxzRKSwHsUJp2MMCUGu7v1e/+5bbXvnUKNXQy8SOfcJ3zdszZhwwNR1qgSVsonjWBtIA/sYcSayct96609MObn2X/5t17/e/tjzbpgv13hh3tcbtlMn7V0TISdouajtJG2f6OwsBO78dvfk0wl1508tt+sNdKPSDZRSIVG2253Zdpbbd9asTcI/ffzse+976mWvedZ3vto8r/zeLV94eu/8XPHWj7x5/jd+9pJPfmzuXe90v/OeojV9/8zB5RteNvOev7nwjz637afepQ5dUaythEFtyC989zbqqNTwYAN/dsfxi7bNQdSyfT5OLsjkAtjC7zq88lO/R9jTdooQ8jY9+nCRUzIqqvlTdTuUCbdiSzwFABAkIS0irU6biGL0MfreYCNJDGqsK88RhEgEXfBlXZW19yGGEAAoBBZGjoCggosYAQC0MSGyNQYZ6uCLqlxbWzFMDiSGQAE8YgQhDyyokZxzRVGGEFyILnjnHEdhUiFGASyruqrrsqqajVLpahdDCKEsS9jKqHbONZm7EmJd1nVZNSkLEaRGdsCeYwiBY8TAEKLUXjQUvh65CrRyzGVw/WLoJTJEFlFal2XZyD6YOU1TEYk+NNIhImIQIAIhAMqyVrsz5gOfc+5527fvPHL05He/c/uxw0+gSIxRJamg9iFaaxMFlhRHicysoWI3KgoAIMYzIaREDd0ORSQIKxeDAm8JEW0EFGhYVNba2rs8zRq3QghhZmYmBBdAUJEW1BGgDlx7ICRrksS0Wq28nWVZlqZpg+tJbJLneSvNGq9EmqbtLE+s1ajG9s4mBS/PrYx86yWsryVYBn82p79Ztd6FE/u6M8VYPtWqTq37YrA0u23yjns/+/d/+/v793Tv+OGdR44+mefgqgDkl/pFTXjk+z+84Vk7nn76yNT2s6c73cIFEg4EGnC4tmHyrDcY+LKCxGitpmdnq9FIa9Ef0mvnzZWX9TvjnYUS48J66J8YDbi1MQ9Pfo+K0xuj3o0v+hEV8YePPXr/nd+eyXIfBwXEqNsapPKV1LULdSfJWp22QlJkQCoITiOUFkURAVcPP2mHfW/BRKdGOrHd1vTubfsvMl2jgvdajCAC97E/1CVlVveHNSgHAKjyotLfuO0HS2tFubx89q6dSYRqbeOZxx5YPPGECjx/cuW8/fWPvnjf2edYx2unVw6vrC6eePrIaGk0tX1b2U0zPeR1X9/3jV23fnCPTUDq0VXPobf+9862HbMP7r9/8quPPPaFZx598I1veefV17x62E/e8Y5f606M+xCKsl5bL5Qe/9znv1QW3lorsqm2ZeTSbyLOAcDoRCsbY7TWNh4GiGy10cpS4xNGFeowipCRtqw0paAIkmxxtXflBRc/etcPjx07kk2OlRBFJFVJ4BiYNaAgB415q1OXFTQa4Fg7RCYExEa5kAAljEkAyKxoqkJEZRSgYqDAGSgIPkaPLM1ya3NtjBRQlDXGJBxBEJqpl0WFHIFEJCJwkFgHLyGaxDKIj7Ehyja3g2JQhDokQCxSQqzZCQcrSES1BgfshSWyCkw+EkdQgIagiSUnZIWRIBJHAg/cSASdc95FV4eyLOu6jjF6H0IIMYTgvfe+weRWtRPBBmhQOR8FnPN1XVdF6ZxfXloZFCPPcTgaFaMRCbhRGQG991K74dr6aG19VAwLCbUiB1DGsLi6vLq+VgyG5DkUlfYBgerKFaOysXwopRpJbKP03uj31jbWUVHw0Xs/Go1A/LpfXylG7Or12C/JnDva5+2gP9UPFBzhAPDFF018+vaVv/nXhdbE+NUHeH11pQBAAqmGMzPDxY2TGIpQlrXCQYzOlQNYO1qqr96bTS4+I1lVQLtCE3ZP9FuIRm2X5PS/fdiOhUHFVGFuBKE/N0zmon1We6i1whg9x+AFFXXJVgpDVaed9PTf/d45t3zm0Zdcv/R/PjZ53ZUXfP3L3X//7vrP/8LS7O7+oDccrRWjFaNKZQWlLVO7B7t2+Olu2m1ztYRLD3vXV+NtBIuedZpGgjDc6J88YgnydKrOMZ3My+6u5vm5n/zEsUPPefNfvX7n/D36e7ft9NJRYbm9K7zvUxf/8T9d8NZfnLnkSueGxcLJUG+k289NdX7X+9/b3jNlds5sVAPW+p6LQm/X6faoDdpJayBJIRPL0B7EmZXwrPtrqCgwem9avhVdK4a1SiqaiKrMo0690mQAhJmi8Gg0YmabpQDQ7XaVUqPRSCkDAI2SOQZxgWvvqqryPm4ucZm3tJCklEEFOkuCMHhuJ1mWZYyMwD4w1Ox8rJjBS3TRBQ8hig/ReQEo67qoSlLG+ygh1t55FkZopIhJksUYG28xomKkLc4Mi8QmcTzP8zRNjdKNQZGZZZMN1OyapcFXNsLt5rshSmOZa44tQkVE2ipBaGA1zOy9HwyHCpUCbKBDQA2NQInWpasPP/P0sKzqyEXtDh4659Chc4LHe+++a215fnJy0vsoCFmWcYgoHOuAAEG4ir4zOX7O+ec55xJrjTHaGo3NkhyaoRcQVsiKIRVCRLHKWt1KbG4NoiiF3kdr0hDYkKnqWilUAsBSBx9QTGLT1HazrJskSZ5kWZJtPay1iJRak6VJmiVJkqTGWmuTJDHaEtGEmSgHdXc8cyvLhwifrAZUjX5prPsJWPoy9X7GkN4otndobr363re+3d0xK9F+7l/+LgW56RWvW13qifcoGWDaX1k6sGvmyaPPzO4/pMvqgcdvfdOvvuvSay9tnMplWQ76I6tNmqZAqJPEOTcqiyRvO9efuXd/drLdf+c6EALozz9y17a0KGH02JPfGcvHZWzPgPPLLrh65/bu3MIzTy0sLKyGRKWl8uCHzhU5QVSBh6Mq1utzi1EYmNHXNTArFFIWFLSS1Hnt6lAObKeFmat7p9XCkdGT98LAWcqiFmFLnIvHltI0MzV56dn7oFgHgJIg0XGqNT1/bLlXxZEb7d03u2N2bHZsHIq6d2pxVyefmGkrq1odMCYuza+xOJ3nKk8znQ5PuznnZ37w5Svu/cQ0TCb1yvD615Q3/nqhpm6/49vyFe3PLscvvmCqM27b7Z/5qd/58//z1Zte9KpBsWKMTVJdufWxzo7Z2V24hU1vYvFijFmaNxuUZsQPsHkV6cQCQBCOwlW1ybCMQUjbWIXRaCBaD2uXoVYeWu3xU/Nzb//Vd15wwQU8qhJGq6xKrE0yRDRkXXSF6w+KHgChVwSoDDaXNMfYlJDDqix8HQmQ9HBUNqr3VtYOzpMAoRhSnbyjlFLKbHnRdGCxrEIUh9KeGEuMrYeFiIyCy/K0iWEhZRrKNShSyoggAVitm9zf5i9Oxo6C01pnRiskQWh6R8uoAQkgIgSFbJQyOlWmrex/9VLLZoAviIjDEBQ7ZEwNK4yARe08SxAOIQTPCrVEiFHq4D1HxRCRHIAEQZYq+NK7UNW+DkQ6BA6Bs6ylyETPrazdZ+cVBOCJ8e4bXvOag7v2hkEBIYayzm3y9OGnPvfZfy2KQhmNGlmhiwEAWq0Wx+id29jY8ByLulpZXl1f29i3bx8R9XsD730MgYOUcRQCu7JyEOuA9Wjpecf2zKyh33/UgF5zydMnR90uPffq7m/81MGlvp2B6rnnWN4YBVScM/g4Pjm7XklwMhoOdctUTl+Ybrvj6GTZ1x2sqkFE73OB0dMn7Uap60GgtLjvDnro2/smpkvyJQTx7oejzqxx+1IvgbPAWZZB7VKihLEKIR/vLjx+J//zxybbB6bq3vJ7337kxueUf/kfs2ddfOH//dAF374//fQ9/u1v7111/QakS/NHYX2OBht5UeZgSLccjKtku7TGylHbRVcRU2Nko3YH0sBxxLVCahXu2zf8QvO895o3vvQz7zqwcL/xfmz7uY+n9ptza0/XdWpVXDrZH9SjoO3Yts7+/VpX67d+62s/df2Bs6Z2XPPiYn4twfEy9p7a/oCLGNIaGCEQzc+KEHd6wFCfcz/TiOsBlE7ruVFvo93WL7vpgI9Ok5UaJaHQhGtpHwITadSKmcnoPM/Lumruy0apzc88bbpvEQBRCNBabTQlSaKUSpJEa8scg3CSZAp1nrZMkp6J9dWbAWIROIoII2ig6EOe50SktAZURJRlGQAo1EopjVopQ6Tb7bayRmgrbzvGzSAHZt6K3BYRBFDKGDK02Z4CRiYBDQ0Lj4AwKBUMKQYtmNsE4+bAjBBh68St68a1iDFGITTGsGcREQ6NvTZE9hxjjMaosYlxZQgUMMjqxvq2me3Pu+H5MzMzDz744JGnn04UEEhdVojoQszyNpI2xrRaufdubW3NpElkENxsf63SDYJeEBlAZZkxxqDSmpRVxhhrVGI0EaBWCnATv6NwVA6NMRZVamySmKyVJnmSpmm71WpnebfdSrMky9NWK8/zzBqDiNbYBt2zaa8nIiCNpEnVJnQIl9frwpr5UP9oJ//xlv768srRaNZ9lkCSpTyeynJBd37zy5Hhikteevnlz7nvntMXXHbt+sogRGPasmP7vsefPrFw+kRI5dSoPkjhW8nOqdvue8WPvDJibNnUJllAwcgSIxsFPoJIFNQqidFfdOnVs/+yV14u9bllt9M+emrYkV4r6bp8n19f1+2O+IX100ff9IpXri+urwxPPvXoreOnntIajEQEAYIEEC1JXbfG2uCFAbXNFKWKEhTCQFWI0ZqKVN7a5jhzQ+5ecDX9+GtHL7ixv222aGmWWqBCza2swyPWkfn1L7kh/+ESAKzXedZOoh/aBA8/cbI7TXlre3u8MxX3n149dXz+1AtecM5Yd3pjw3XGcMeO7tzibCm9EOL82vFUdkzEatftHz+3rlzSKesTR/fcCJe+aEebj93/3fmHbn/7iz/06/wsvPH0jWe/Y9x0e73exMTE+trIUDt40CCJSTl4a/KqXjcGEJWLoghJtA+iEyUiRtkQQmTWqBkk+hA5otJ1XaM2QspzzGwyGAwmJjsQXG9U2IScL7XKCOOp00exdP31jfbUTBlrEUnJ2kopQNYgISbKAmGEuNxfv3jv+Xt37fzmt27LslaI8UzYQGptjDFWwSobgEvnNnwPU+NAGCAB1FqXDq21ZV01geREBCxKE5H01tbR6iRLY4ypUVmrHd3/T9V7hll2XWXCK+y9T7ipUudutVpqZcnKsrJzzjlgDDZDMjOYMQYTPAaGzAzRZggegzFgAzYGnKMsS7ItS1aOre5W51i5bjjn7LDW9+NUi/nq6acftbq6qu69++611rveMARiNYQNKFMACMMJqZA1vm4QERBSSuSykCIyGWNay4t1QqkPSNRYZQUrYNcvJhLSGlLLkQIFVAHVM9rHdT1VCCmE0GbRIGKWZbVvAOgZRgMRiQIRiwgxhRQtUQiJjQVIuc0m47FGbe+syWRiiBGxaZoSGAWaGD3I3I6tN2Y3LywvoxIZDr4WScwcYgKAEAJnWUptopQHJetsSqHxnq2xSCoyXB1edsllTdM89dRTKijJm0DQ6WClObJp1Jegp9NLHt31mbNPFCTdMv7gycn0ZjqP6o/+6f2/dGGnSMNzdvbvemzBho4Fl0G9eHKtTMWYKVMzrnDA8YtHOvefyFxXKVDDZqhjYKtQRyCgbn92Os0Pn/7oH5/7BzfSUFNojunGU9G8cnqBNeRZv6lGUPtOnklTxe6gn7JyU/7YL31oF8aVMAKVfv8su+f+hV94/cm/uGzHW95R3fzcwa3XbnvdlWujMNp73H/3zlOP3G1uu41PPN6ZZJA5mC4lZTkrMOlUJ1Rar1WmkzKkKrIm40pS8N+/4o23v+KX25p0y5d/76YHvohrx/n8G27nsPDEEUHYeOGzpnacu7ZauboaH3546dEnmqeemjx2fxUXb3rPL2997TuPPvkkdigL47N6Gw73M4AkJCAEJFADNqR5QAlkh7zsG6wLsp1mOpbZOC8feaQ5u2gynJnkTR21ZeNFEWQGIg2oqgqytra2DrRCIlBLKEhCqkkNGUTD6yo1g4jImNr4UFFnXR0CGxZKk8kkkOZ5DjFqkMCASY2oWIMKVkBZWTiFCACCIKqgEJsGEVlFoiBiPamSyqiaREkCCglE2yAHaj3yDK8be0pMTZJWUkW0XhFb47xWhytJFFQIGKD152kVzG3ad4zRGFM3jXMuy3gyqY0xhSt88oKICdiub68UgZmjgqgioXNmNFprbQayLBtNxhs2bLjhOS+47bbbvv7VL5999lmXPOuy7Tt2NkmJswYSWVNVYwK1zI33RadsTeVas5127GZkQiAiF1NNCiQlGKcEBAKgmHLroopzLkZhZgaFzBrC5NgaztU6Y6y1yGQzh0DGIiITUSscNckgArXQRWudpAgICsrMmbHOlpsHU5ianut8GeKrm3w1d3t8c9Wg87S6vxwHKDtzeX2Pdr5821d/ZP99L37+tbc+/wX3PfztS254/skndy8snxacPv+Cc+781p79Bw4/77qbB6l4Krfo1352//g/brnB9UtFCJKscSlElzmQpCpsrI+BgIGAs8L+Ww5vwvjzDb87P7E6euyJA1uv2P7wsTSaHsC+b1NnSrPNl1w51TX/8NC3H7Av3bizdDOr2LguW59SAlE2TECdskyNV9RkHI9q45I3GkRMJY1JedmrxydTMTvz6rdMv+mVdrBl+0RGJ/bE1Wr80APVN7+py0dHGdhOaQB0ZW35+vM3wRGYyTtHF1YHs12b5Wxx5KvltTDXLYcrY/WjAhi9p1SxQlUNp+dwMNXVcYCAU2XJHbzszs9vns4OnjymYCZXv56uf9umwQDrtW997x9++I3v7dGGDQd3Vi/1rz3x7oXhWseZ2tdFngcPzjFAij5okaZ7M1GCT7Wq5NY1vlIBbVFakXYptB52ZAmYfWw0BDJOVeu6ttaujUfWmNXxpABmIQ2qbCdJXTXetX3n9OxskNR60nashQCZddAgkxauExL6apJleN7uHQ/d/8ATjzzZmZtGotgEZMIkwdesjhC5yKu1kZo2VoVi0gTJWZeSjKpaFFv1FIGmJEi2AQ8ROIjL8mhpEnyv6NR1vbi8YoUFJTVijEOGKOCIEJMCIK17JguCJK+KDjlIaiSxQoEWVAGJMptLEJUIKiAJAEXIKwMqnhl7z5BOAYBaFz8Rh6hAJjOh8QAAIQEAAypoaC8jY0BFRCIji1pBYVBCB2QIhRWJkqQWkCiL0tdNa5KnCCmFoihCHb75tW/u3r27Pz21vLyKIBPf7NyxfXZm7pFHHiGiNvEmM1ZIUkqmvU2IQgjbN23quvzIsaObN2++5ZYbvv3t7zDzzNzcqZMnEyCFRKhZPpi41TphbE7v3DN16c76u70TO4fl3SfCT11U/vvXTl1x5Q4/zOrx4vfuXRxMzfhqaQBw4Xlzjz1xcCrjQNzxJTpo6vyLi3MpDkNv07xMTXM9lsGAwNhyUjVR8qWF+aLoLNz3raNf//j0C97rTx+8py6m/OI5U0tzbuf86jJ1nU5iY4hyrpfWNu2e2/+Fvx/ce0eRTTeerOvyeKTYn+l20tGDi7/1i1U2mGzcsHDryzoveD7fcNP2H33HXPGOuABLT+6Br3925e5vp/1Hpg7tqSg5Kp0relDE3Wc1J497UjUBu7lB2LPrZV94259ec9fftC/xSz7/m0mbxVe99tHpZy3/0//Z0OsOx6eru2//wX9/0eLiYifkbKLhnDds2vmW18694YeqtcnivqeWadWwNhUMzr94Y29J9W7rc597sFE2rkIeoM5UJZ3ekkzWKwfAsSk2Rhcm88P5ii+a2TgaznNZ5BFXk6qqkUwJ2z1GkWUpQZsuH2MkMiBCRGf0asqMhOCsQ0RDyM6mlBhBQgTFoGqJQwi5cwlUICKaEAIaJpUYIgAYVWyZiyBZlrGl8XhonFXFGCWzthXdtTQiQNSoTdMkFWQ2yDGqJRZJbVltY0haVQMhoEKEJACGgVXrNi+4zdsUUFELaJCiCEC7TwVl40WJWAidtU1dP+vyy594Yk8KsbWKC5KsyVq/eQAAAURGECZt3bU6RRlCSCkhwNRUfzKZJLTPee4LHvzBPQ89+IOF+ZO3PO8FO3burmMkJBDpdrvJh6ZpOnkx9rUSGEMtOcsgQQQgUlWLBAwZsjEmty5zBggkaYwRVBIixASFFRFW4cyRCK7fcOCMdW59wCViRFnfJgAAEhuDgG3LrrgevkvMoNq+3JnGCDZX09jmRTiVYzWbilcVitK5sK4+XVdRw1ypS0MaDasfeeOrXvemd1z4rGv2PnXyuhfaCy9/1nCx6m7jc3ddfsdtX7XVeGlxZVTBz+3YffbC6v3dwSe//flU+zpnBHLEI/G33HjrXbffqUxRkVGJGNiSodOHT9k/KsKfTJr/ReY++ff77//VW57D1mOvPz135XDSjFZPbzj3wksvuvi+Rx548uChb6X4I5XxZUieQSWScojB6Mr8ohqgBKkaE2IdG2tMQ1owJoNYez73/M2vefPsNc+ZdHMAsSZNlec1bmAveVa+9eyV797eW5lvjh82olr0epPjo4jZu9904/cePvj9h44N67CxX4CWJw4tbn3WWVu2DLg8++SxesvWLVV7EkMxleF5O8tH98QRrhX9gd1/39rCoyOXdYuZU2/4zXrDbjM+XqfswCNPLB96uth0waP33T06mfQ9q/5Tp/IkSl2XZUkSGxAIhMqGY4zdzvTTh/dn3XV7Jstm0kysKbz3uc3bOAGXZYgYfPKhBiBmGo/Gqtrp9GKMrbMxN+gNUVFoaiSZkkxZmKVx9bGPfezGm56/sLhMjsSQcdZ6JsCMTdU0aDJB6XXKH37LG751+z3zK6OD+/aassgQoyYlRWs8akyNJm+dUUJrDTRhutcfN3WqKlPkTRI0Nkg0xqQUiUhSUsTMWsvQNE0ii0qhbjCKkIB1BJBiRMcSUwoJjIuSWoxdQxSJzDYmsdY0KbFPGRMiTlItQJk1GZmASVWNEgsysqICUSCVpAAKeCZHQZQQBbS9ZdpGX0SQz7DGFDW1zuzJZs6nSAqMbb0Wa0yUhEiSkqiGqmFsZ+l1JWUIDRm01q5Ig8JW0QgcP3DkyL6DzIzeC3HpssX5hVMnT+e9DtusrusIqYMmQCSipqnaL7Vp06YW4S+K4vjx4//62c/t378/y7KqqhQgaiTWrMzIYKFoakXGsBxe9NCmE1vr4SNlA50nTlvsyFuvn5bvd7Lx4R9/887f/OgR1x0wH5yfX6XSTsrp1IxWUyh09btLu9IiD5zMo//y9mv+y+Ev2+SHpqaqQ1lBySeQEPOzTOfIR35r9srn7dWdp6M5/9gXfux97/nlX/uH573q1U89fWzKzIz9JOu6MnO1P7HyN39yacyWrQZcyupilFFg6IzHrsjLqWIKSl04vfzpv5BPfrjZsvP47su6V13evfntm6/dnf/Pn5+FX145uuoeeHR0+HjY8+DSA98La4tmIO7ASq4RQ5IxLW296p/e/pGzn7ztFY9/bH1ou/yyr1+/bekNb+q99X9tT5NGZ/sXPNvuusJuKrbk0zNbL5c8zzcPaNN2zIrq+PGn/ulfn/OuHz/w9JPqbARIo+XzHjuPrjDdSXdsKrAJygl4g4sDBbRfu6ZXhVGyHms7f6g3M3AAl5+3JivBFXkVErKvkygAS0DEZlK3rqWTyaTT6TC7GGNICQFBVBAFSFEVCUEBiNlgm2xIKCBRgAAIDaHWzWhsgiJoFElJAI1rRxtUVQYUgETgkJAhRs/MGhMAOTYpRQBRw5AgSmQwQEiEItrKc0lFBFUSIoqqiCAZCTGCt8TOOU0CIoiALTIkkqgFeBEBFIEQQNYTkb337GwbzNA0ycbULTsPPfigYeesDTHGkNCRpnXPJlVlsqBqkJAlBmYlS2wsJkpsTAih1+vVIUKSW5//gk6n+N7d3/nKl77w3Oe/+Nrrbhg21XA4ZmZFYpdVvnHGaorJGCJqRcyx8aqaYhQRzW2pbMmqRbVkkRQVEa0xQcWIJoPGGK0DFc4oZmTRMWW2ZagZZQNUuDzi+k5QFRSAIwMiu3YCpjZgFFv3FQTRJESdaevr4TRu/3xYpM5EQh/9JMfTfzA497Nal46nc53dddHUnqeX5sd//dd/ASwg+tCB7z3y4J4XXH39n/7tX/78z//8I/u+MT9qpjb2ji0fjWdd7PDwPxx98mtPHO0W3aoJM71pBBhOhnfccUenLCZNowiUko910e0bK6unFzZ9cdvpXzkIP1vN/Le5I08d5uB37Lzsrnsfvfki4O65vPVSqVZuvuzs7z32QLFSF296xZPf+O6GxVOaO8jQ+hAcuKCSkVQNF5lNEl0WUuxGBJfVIbgmVqybXv/m2Re/oo5eKpSavWMM2m0mVMTBTc/uXnl5hGb4nTvM6cWVtX41V1Bcy5qYvejySy7bse0bDzxxbLnOHGbd/OTSwtzUlpTGVb3amwbBJBTJZLWsdTdy51QvytpkMjp/zw9mhVydzd/wI8fZFQe/m9vNVdc+eeJJhfCNv//dRot3/fIH/jz/mcd6D+1ONxoDgI01RdNEgoRMMaDL1ZjcGseAQVKKCSFmuZEk7Ultf6+qquXptcll1XhyxeVXnDh5cmlpxVrLSCklNUkQoBpPqtV8sClBCqEyrKp6+PCh/sxcJ8+aumbShCqgPuHAuaoadYvsxKn5j/7dp7pFt6kar6mNSAMAAiRElFSYLDFDSMomqqjhUV2hM6y5IBhjmiRMhCCGmKxrmsCITYpRoSgLS+wnlSVOjpzNRnWVscvYBBVC7VibRBOgEnrvLWH7pTJjk2ppXN00HtUZZGVLJorMT9bs+mKM0RgABdAWgJP1ARiBEAQEgVRFJBFoS45EDDEWnQIRJ5OJUex0utbapZVlJYQEeZ5LCk2KlDli01FXVRUaTsk74CBijAkpWWvH42FrnOtTnIrU8icjYzCAyOOqNrlVwCQpgWzcvCmkuLY2YkQQVFACZEtFd6au625Rbt++fd/evTNbtrg8O3Tk0OpkuH3nWYuLi6cXTlmbMWUoKWtgKVSFIjEE0yky4FOjyzP/Fczmsvi9/amTBieWRscPwnlbmu5MlEKNxT5mKToQ9JNFaDz0yo7fUvih5AVOzPTy5J7i7LdOb7LLB2auv2X8/XtjM1bMcjIuihSwfRxO/OZ7Hvr5O7e4yb7vfopC+O1fe305+OpVN7/45MFVZ9x4ZdTZvn3+ri8NnnhA+rPg18R0Q6qoMd08w8zJiFK21mRq3aBnp5BnNsTR8h1fyB753vCTf7NSV/6mVxYXXTF7y9XF5Vf1XngDdt+E4Oslx3uHS4t7dO8D9dEjo9D9xIU/vUFG737p1BcGr3pGhvTAhrX58p7njp6MOvGU73juW3e88IUro2HOxcTFzBdD12Qnj+WDTSf/44vT939z5WW3Nilm7MZxldJMOT9z6/de9s2bvojLU1qcxKVZmnRUkv3s8+jY1mUShtUuFbaP4kwV/cLSrkFnKSwltjZhYWSRFBrRlhQTQlhbjXmep5TasTK13giYRDFpa3vDhgmAGBgQJUSbZ3VdiwgAe++hCdaYgJpAEYABIxMANYgoSAIBNaWAiFIakQTrOUjYGrwDABoWxnZSk6i9Xq+JwalWTb1eKVLKnG3NYlNUlzsVGIYmQGBngVohL4qkTC1Cm/qJgJhQIyiokgARJh8cGwIybFWViI1jESmKos15a4MQAgiiimibyV0W/RACsSBC4xMBssHMuhBClGSsFYTSOhFxbG59/ov6M7Pfuu1rX/rCv4dq8qxrru8U5bipiQgIy7IsnQPRkW+e2W2DMUyELrPEURM4i4gkyoDsLBBbEU6KmEp2LRDVmZkJpCYBZxYALFNbzpltlERsbWwZ7+2C68wdQ0zExEwIKbWxpLieDllRrCvocLapP7sycbVM2XTezM6tMXxmVDcatpEFgLt+cG9Y7pdZSXlunF1bWd2/98Duiy/89D/9XT7Vufqqa3fPXTZaOdztbC7yas+WDW/71qe/MakddVJKXXLLy8udqf6WzVvWxqNAYNkEjSSJ2HT7/STeGJsm2vvr/tovreR/0GuWw3f+43OXvPXHVusdqpItP2EJ1sLJ5936so9+9o7Dx+87cHj3jh2bzzpxuOr2yqJc9UsUklqbam/yDADAsA/J2VITZDNbxssLtDTfv/qiqQuv0jpmFXAm3LFeXHLVWMhImfpUDBAVypfMmST6kc9+5errT+0o8q6lU8Nhb6p47YtuePqJA997as+qj6OjaXVt0u0Otm3p9wunSfJSG63qBvuFveKs9O20ofPo93uTowS0tOPcPbsv4dUV193WNcq2dHWj2sztPPdlr/yZ4dJ8Xncf33XPJfddbWI+cjyXwJo0Ie5JR6n2sSlMVljbpFUhIjRGLGKqwFNMo6ayRRl8JKUUBIgBSEm8Xx70TZZvPn7sWMdtACNidTQBPz69c8uGc87f+fjeA6bsro2afnf6F/77+//hM1/CCMvVKBeTYkyUQMEqo9ecC4ha5P3T8yvHdEkIu1wkL2qgDsHl1jnDmGtUAogEhpkEG/BMnJoGEbUJ4ExmEIICQMNKGgCikDEEJZvogyFTgY7BG4QYxDKTgoI6skG1RgWWGERULHNQTSJEQIiI6Bx2XTkeThSNiEeLMSZjjKTY7ZVVVUUMKqpez6zTAAFDklqTjcqOGxQ0hptIYBhJRTM2EMU6F9gA2rXRqmN2TCGpkJ00PiPDjNRa/aBiTpKEyCRVyriNjiEFBEYlADJo1CkDRJ8IMEoCw87lMXofU8e5GlI9nlSTCTKV7DxxG1PnvZ+Znu5mnVOnTj289igi7tu3N4RgrcudYyH1YjlnZJUQU4plf4ooJBARkmrofYH+nPHCHO1cFs8VrAz9x7+tb7T9OFn7lT9ZFFcOx2uDjgX0riWbuIFpUt0xjXZ5+UA16LrghmqeLi46d3K8OrK3RunuvLA6djRQjUnHoZ7tb9xnz5r3/Bp36kN3fj0rp7um+eSf/2K/GGy56CJ/ctTNypBGp/78dy60bimszaRimniEda1Znk+lrBjrcqFcyxqFLo5MrQ8WMzv6P/xfvrVxql7yF7rpjU9/z3z6j0/8pdiZ2XpA/pzds896jj3/snL3jqnzdsMN1wjLH31BskbetuXAntPzT9vTzxTgxy88tQE2ja+7OPv2o6fycmrLOWvH9qEpa5mwKxtLOYSIrszzyWR+y86zTt/19albrh/GKgL2Bv3J2F987Jrsazv+/qwvxa0nyWe87wLz1Zvgqeu0453h6IxLnWgmSGDUDvFEHRksAtRQNxGSIKQoyKllGiCAeiEiBooqKME6B9akJrIgqaqmSahLto001rTZxgRArd84qI2ATYyZZfIJBAUws6XUNSeBzGoSEjAmE0gESKApJFHg3KIkIcDMgpcCEJitySqo67rO8zw0MUcnqEoIIAGSzWxqYlKRmIC0m2e9Xm9ldRVSQgBTFBgjMYtgO3cbYxxim0OQlBKCoiCiEXRsjDPrJiGEgJq1mQrEwpiZLFXV9dfc+Oijj4mkvEDkpECInDM45xhNSqlwhTE2pRTPkECDpKB4/Y03Oee++pUvffWbX1sbrV5303NzY6OvyNlaZKrsYjX2zhljYuOVoez0VRUlOWOb4FvChyAgmbahMWRDLiVYC6alcSBpxsy2FX/adQNdZAAwZJCxQSKCgMEl0LQekZZIWgMlTUJM4CiEIElVIKBfaKpSe00cv9n2t0RYtLQ/VJ9Zmp/ubCgS9no1AJysOpzZqV5x6vTyyDemVx5/ZM+5V1zcmZn52F9+5G+ZRePbzbte+LJbHn34kQ3G/M3yJN8ww2vDGlMlSgia4rhuACImAEQbyVJex3qwYWrpxGmumjofDT41u/bfVld/Yt6+f+77B596TQaL6rMN5QB2NvVk8cQJ7C3t2rzhgaOHcX5yatcFftMWXT45HCVhZ601ZVds8lBlEWrBbqfw3nur9vBTuXP62lcMXvIaHkylEJMDVRT0SGA1Y8TGIielxoe6VgMmiQwKPH70+OIWferI8fPO2TkZV1jNn7t756Ztg7u+t/f4qUULGXg/PR1cibGCTmCxGUZKosXG/vmTLO5RhabCYnju843W01mnTrrkvV87dNENL7nhuS8+/9zzlxZOZIQXHLn6ye131/f/bOmCS7aCCYF1Pks8wixgUMpLQFNPki1sTPWb3vDGr339G0cPnD5r+86BMadOnTLWKgiwqkaISTnvz83ccfe9TnHD1NQkNiiKEPuY77jwkqXlU6RkBKT2m2bnXv3+9y2O11wvn4QmSEDGlLRKPonUIBONAMCKqjgo+2hwOB6JJsrMcDg879xdDvH48eNYsBjS4AElxHVyY0trUtVi0BPQuq6Ns0nFEokIGi7ASIpV9CIyCWNBYFFVJSQmkiQtd8kYkhiZUYhMG8liLSMZZ0NoiHBtdZJndnq6t7y8bG0xGQdrWTUQMeg6zRKUWjZESqkNUm3pV+sccoKWoqIIwJhlebt28r4W0CIkzjJPAoQZGEoIDAKxTfBlptbDL2JrFSQhxRajI2NQlKwhoqgiSbpFiRhDCEgokDQJkckANCaXmdWVlSzLTOa8j1yWDClFcd0ixliWJZK24u8YxGYuRbHOKYqAOmuMMSlhEhmPxyE227Ztq5tmdXU1z3OVjjF1P699pDJ6KNOjR9KLtgx6tsIO9ghTbleWJsO6p/mYA652eeD9fzydP1FvKsLRoh55e6LJd96fn3OduePAoZUSdHT4UI3JYA+yaKRXr9UHXvcbGx//+rXdez/897d94Kff8c+f/8pv/94fH35679aLLp3asOW0xb1/87Nz+x+LpZsKvTWIJkrP9XpQnq5WtapK4tSQSTTVpWBL85y3fmvj5i/c/dj4u3efPHYSi/KsXRdufv5r3nnl5eXPvW9wEuXxh+NXvjAJfhkhzs1pb/vn3/xXSxt3P/eRP7z/rC2z/blbNz7rmQJ87PP/sf3q8qkfu3Lm2/fvvuLF+bYN9fL8XN6tMfooq057ZdG1naN7Ht5wbG+xfVe68IrViVah6UHW6WSLK8soaceps+yd74rP+3bx8R+i71/jY6jTKZt3aQKchyE1uRSxHkXHh57Am69zx9fqiWEnitGDChoLRN57ay0BRxFS8CG0EqMm+PVloQAigkhmXd4pU0jWOSIKyYfk2VmKFIKYzImvQ0iGjTEUJJFBk1tGm1RCCJwUEZENGI4xuU7BSGSQg7RQdpFlbHmdGGyKqIKqxiEyobGtRCCmwMyYZSSYkqYog8FgbsOG1bW1jRs3TiYTALDW5kXWTpbtQ2jfU6pKQAlBQA2xAbBIyGzAorajQqtvakPWEA0L0alTp4qiQISUgnPOZUUIyXLR4skti1hiAkAACCkgmxBCjHFc1Vded13RKb/0xc9/9/ZvrS6vXH/L8wcbNtaTqiyy5dVFdq5fFgBAzrWWO4jISCnE9suqamtsh4ioQNhuccmyY2YCBEKilkVuEFEJSaHF2AEUCPMkUbmIuRAoekZSQOOddigSKCIwqmpE9aQBZCTJLy7XNk4H8xfDeSED4wiy8t7urBv7P2LpWwCAUd1dqesQ1sAwte07wvD4fHfQjz5Ya6vl1ZOHn7zlhdc87wXP/o0P/WziEGvyXjM13MkDJqmjMQZd+yqLYDPxjYBs2LJ1/xN7IuiOzZtPHDvJH6b0fs//K99zaPFr3/jawrD5zreOPfn4gWMrCxlJjbEYbCmtffiJB+8/fcCuVC/ONp7c0nVHjuVZtx6PZTjMi75KnRsNtUcAO5Smu3HqTW+efflLspkZ7z0CapsRtc5KU0XseJykJiJgbr33pl/m1XJjNukw0adv+86lTxy64borp6fnqtGqNcVN11957733jzwdW1y6dMuUAUmo6ogTR1SjouDP2jW1cGeN4psdl5466+w4rGsqeIZZoGP6vcJ2ynxh1AzKTq8szj9+62fO/W2xa6tep2TOWqVSA3twydpuEjZuSgmz3GSZGXn9xN996qprLl+p1zq9nAAxRmJaretOp8NAKBAh1CEZdowsoEiJmaHhjdNTG+bmnnjqsVOLy4PuNCqEED79r595x7vfORqt9W1uMjMaT5BN7StmFklAuC5CUPDegwdKWKu3BoyhVNcvfeUrP/uZf11rasgzIrDWeu/z3CXL3nsACCFW0Rsk53IgSiFUoxGAuCIn5hSiJ7XKNgS0jhArTqoQY7LEklJuHSOwhMzYEUqMkZEy6+pUtzkO0ScmmtRVE+osK4KPzjlC9Y03qEEmkJJBRARq852UFFq1gyRNJAACZJkRtKV6EiJTm5XIapghGGGiDpjQeJehEvgkyGRpfWcsIgZJlJhZCakd00VYUQFYgAhjSsS0bmVAqioqQsQaU57n3ntG4jzPs8xkGWdcSXQmU6MoWpbl6dOnN23aFGIMIVhnmLkoioX5xeXl5SyzTV2vrE6mBjPWMTPPzk3Pz8+nlDZv3jyZTHwDpeCWvNq31kGw1oRs2o0h72I1wnJ3P6w2fpRiZmVFJEl2Lvgvz29/cn5qY2hW2QSL1ahUN3p4+9Unj30myxelzgxoNykkX2fQQdpz3UsXN13wpk/87CP7vr3jD//2k1/ft7RY/+lf/MVvv//n/voj7//w//3y7EVXPrGwaAFT6k0w9gYWgccjyxj6RY+U4spa76JLH772ii/ff7/ZfeF3DhzXR59451tec/jo2mf/+ZMAK3sevuMlL7pu8Lf/1yBrJ/Mp11z70MPJWrYw/LvX/tKpHZe/BO6afs6Nk9UVvzIqp+eeKcBnLU0tfvfx7IfPf/Kybddc/jyMwWS91abBHBntrqne/s9/3vYG4/0P431fO+v5v3twY2dluDqFJnVczAhIc2sqpWx1ugKgmaEaMECbe70RQLSQm7wG6uSZcrY6TivN3NieiKA5GTApgYOkyY8DBGZOKqJCSinGJgYyDKApRCJICVoaLSSwbFaGQ8emZSe0kSGIBABkCUCdc22pVgVmAkZDxhqOKTETKxhCMhwxqWTqk2Xy3g8G/RhjlOTQRE6tt3xWthVUW92RBhEENpSSZebcOkJumiYGpwBHjx7N8zzG2Ov16rouy1JBAKCFl9q5sAWuGVlIE6glNkAEoKStO6uup40BKLUJb0iUrPXe9/t9ZorRIyJbk5ISqHMuqTDbVqxviFNKGWaVb4waMgweq7o5/6KL8073a1/49/seuHcS/Y3PedHWzTtM8GisEBMgAGTW2cwlFSJyxqYQU+u2HZPCOkTcOrg7Y4mI2baapba9IGrjWTGBgiIBtpstRNTcakioFIwkUYSEoOwkJz5jdw8hRQrRxtjEUIwqqX2vX+L07GXN8K02u3M8udRsWpTh35hhzxVnlQkATq8u9U0pIGidbYLEJCLV6eWpjXPHjxx1MabM3vb9h/7P//5DxfTxv/1kYQ02Sy5z6CO0lC8A2yur1IxXVnpAgkSGUbQ36Nejsc3yk8dPKCL+dYE/P8b3rlW/5D7yyX9RaETB9jf2ZqdJlBgyLUwBw1MLhe0Oz9ltdl1ZfO8uqGUIq+76Kwso157ca5ux2ixDHVGIW+d6Nz1v9s2vgW4/jUeC1PqToyhRG/ahCUCSoGFEgCTWZWZ2evCe977u7B88EYLxaeqBg6NDp75z+ZVnXbn73Gh8M/JXX73r9ruf8JG3bJ2hioVDYzjUCRg4UVNVdqbnLtg0ddmbD8GNdZ2EJslkKU46kYbg8ykS6gCMpT44XtDLwkWffo48fPa9rxi9w3Q7VT3CKP0OAZmUciB0BW3auO2JPac6nQKhKjvu1OmTKdLevU/lWSaQpqenr7ngwttvv6PX6QIgGelT6X30KILRGWQlLfPjK6cPfP/AYHZGiVSNAozqyYFDBzNn/aTacvZ2HmSPnXqy2xtAkjzPwEfidQsrAKhjVFWb2b4rmqbpluWJEyf+6V/+Zb3tFbKZQ6QQQqvfb9+NiJgQQDBJjKBN07zg1ufWdX3n3Xf5ARdJDZFLEA3XnPqChZIkIqKqqmzmyNnheC3P7ERTCLElHNa+EdDRZNxmoiWNpFR0plJKlR8NeiYFsZhFCjEFOgNVA2BKQVWB13lWSArMqmqQDBJASiKgUE8qRGYhy0aTAESfpNbYHfRKl68tLWfWouPopZUTgAHnspQqREaErG20ERSA2SUVYRRkR9ZHD60rGBIAOCRRrNWbwoiqddYYQ6KGoe9cUHFs1p8Ka5nZOIM1tM74IlJ2CpfZajyZnZ254dnP3vf00+3gjoidTqfT6YxGoxACOJrKO2c3p+9e2RmZYzAksBb7hLpR/PxqDtXsKE3GAbtdt8HFfaOt3zo963BlhaIDrU+OYdNctjY5ZHt7Nl573fFv74e1DhQjQ3mCnFPy5YMv++D2o/eYna4czez93f+2+7d3Zudf9WNvfOmjj96j0PzCz7/143975y2/8clHzzln5Q9/e4vthnJ75puEx0zocQZ1PdFdO9fe+eY/+7t/XNh77zsv2PW+nduXdrziofvu/MLnvpAKM9h48W984B3X/Ms/Dh9+RPudJvhMO7xSTeIpt2Prv/7w3z9+1suuKR6zayeGq4k8uOli9PiRZwpwfwCPvG7TuBhf+qMv3ri6eUWSB8mdwQRZr7e4/yl/YF9A7heuuPEFeOOVo6cOWguBTWHYSj5ZHUJB2ICqxbVePbXUMSbL8sW12lGXMhOxSiZfwspitpXGr79qtp4cVjFZsm6qOHRiDKpkEETImBBC+5oiosszHwMzKSI700wqa9fjwUSETBvAlVJSRG31wUkCaQqSGBiVkggAMAEBWGvRsDEGMmuJDTFZrL2nxGCBramqKi+LJtTO5JnJmmrinBNQY0yL5RrjQggQtU06UqPGECITIDjb7ZQAQIN+G2FECDPTUwBgLQOAgK5/JoCeYai0SXEEas16fhq0fOkzLw2uF2BFpswO1iObAIwpAaQoCufyajIy1kZJbeToeqUPsQm+3+mOJlVd1y1Du/Fh67btr3zDW+/4xhcfvO8HKysrL3rRq84/7zyJkUOyJRtjCJiIrHXrxVSBCFBBWUQjIgKtz8GEBgCQGc/oiKhtMNqNACogKCAgkaggaEysSTI1UTmysAOAOlaVEQAFJjGcUJNKk3AsqdCotfr50cLy8cv69Z+sDH91+uyvLB35cgq92W3Ncj1wdUiwGLM6hTLLJUTL3DQeAA7ue/qiW58NgBEAMZhu/o9/9/Gl5VOdTt5QByYJTGPKPKQIIsAwHK40kkCtR6vaNCls3rKlaZq1xeVep1eNxzYrdMGVH4fJu4f2DwZuNJPZEJM2FAEssuW6MdKs2aYg9McXDu4+Z8OPvP7Y0okpe2n+gquzi6/Kyi29U8dNClD5ev5Ef7ZXbt+RbC5IMB7FLIMmEREpKMT2eEgrCWMgBBQFUWONWVlZ2j85/NazNts63OJn79t7YkXpzvv3HT++cs3FZ5+zbaDQefYVl3WfeLRPUntNGZKIMuVoVCXjMsW44zk3L9zxvQNHDuOuXZuonKSiU3u3YXORlu1dt9H+e7MTB5q0OhE1BBueB4/v/bVrPv5kecs1Mze/ND97hz85b0axyocdEqUNU9NziLy2NiLDsWmOHzuRjJsq58iaCicRcHVxwVEKcUyGQyPGGABiYpHGi+S5E4DObL+n3dy64bgCYwAAkxnkPQKxTIcOHKiddPJODIGRRsNh4pRSIGcRsfVERKYYY2gaEkw+mixfaibOOUPOJKzrmohEQARaiLe18uHMhabJjDWW68lkbWW1qipDlkIkoAYEGYU4YUxRjbFeAQhdnqHhqh4DUSNx0O2xj1VVqWpSFdXWl0BiiipAZjyZpJSyIq+ahpUwoVFCBsMmSAJVZiJso2kNyDrUBIZRBBGjCp7pG5IKA+B6i6ZiGAU7NmsmjYRIuWM2pOAlEpESWuK27AEAMztrUkpIAgCoaNGkJG1kcpZlrUEmW44+MnGRZZyaFgm0bIg4t46tIcDpXreqqjaO1DknkoiYiLZs2ry6uuq9d8ZaNqFqpvqD3bt3n1pYWFxYKIo8BD89PdXpdMbDYa/TqUMYy2TnlJ09mpZENTfkcZI6ANDvLh2IJYQmstV8sNsde2Rt28ee3mjchKBXLx2sbIdjQ6eO5XMblyv4+ty1l538nusPZLRaSAlajSOevOHlq1sufOH9f372b//Z/a+5+dLl8f73veaGf//+VTc8/9EffNu67Nf/x++sdm33xPKNv/Jbj5x95cEP/sTuow+j21pKL3WsXzk8eN+v/ZXrffk3fyuMF9727vf+5MbN9y+d/q2P/09ZXuxu2/GTr3/r+UcOzf7h708WF6ScMcMJ9DlPzaKDje/7nS9c8qaHFrddLvdtW3ikMWq4yHLArm1GK88U4Oy228q3vuzwpFm+embxfrGVZH2HySPmVIXxkVOb3/pyF008cXjDua85XQe1nPXt+MjyzGUzmHwTU0fNKEUMiivTOL0WieKoIucKNxm5Dorr1BxttTLOLtgAB8dLO6BwVEc/qlez6blNcAqb4E3GZZ6P2/hLa1NKGpNl0x4bY0xkJiJr7DqNPmONCUQRIUVhRsNUe4+OC+NCSDGGLLPGEBIwo9HWXAKcsW3JYGYGowmMsyKa51nSmHPeuuKY9QjO9ZCGXFERLHHWdS2krBLN+vANrVVke0oNsYgMBoPWKs44xjOufO1epi3ArUA/SgBRPgMvqSoBn+mJgVpJHygxaCRrbasbbsswM4fQWOcAwBgnCESEou0eyhhT1zWBWuKUFFVSkKYJOzdtf/Ur32zRPfjgPV/44j/f9NwXXn3Vs0vjqqbucocsszXsLCJiAjKgCIYQRDUZIFUEJCUiFAIAoDPMzPWRgto3e/uI205aKCEAGhtiIoWucCKD7BCoR72MTEu8ggQaJCVlQfLSOHPUNGPkLRlwY5jCPc0qF+V71D06v/CDabdz1i01nqTMIdT12JGxRZH5RomRaegrVTXOVpX54LnnvWb7ee/+we2PLK9mk0XOOUygYc0zA0YTRgRTRATSgBWTSUG73W6om4ytqjpjU/S9PB/+nuh/UfjJmH0kE18TFSBVTCQShDVqzACLmcGJYyc//5XP/eR73nvRL34wrg2LXbuCSq6xOG+3V8+Ud/0lZF0lklWh16TKQBxHZjSKAJDOVF9BACaXQBRcljGgApl6XN3+/W/8yFV7pgv7xlc+58blldu/89Bj+0+cWKy/cfcDt1519bPOnyl5dPklF0Q+7rJ+B42g9hgU6jUUIuSkYsrezTc/a2/ad7AODNaurXHR//I/nrX33+ZAJs7WW6/ddPWV5exUHOmlT/7z/S/ft/LTnzj93b9e+bMtnTf/+NS7fzafmfULR01wWkdEjNE75RABgU3GqKKota8osydOnzhx5HCv1wOmoNLPstXx2GQ5R7XWQIZCyKIhhNI4ilKyTYZUlV3GQj4KOoPG2AxESEVCisTUekihJPCJAQUFAqiIGiSBzGWJQCm1wRo969pQFxX0TWyphu0VEEMEQrImxjgou3v37m2RKxOiMicAZ9gpWMQam8gqqpAiimIdiMGxoaR+rYoE69Y8qkRkiJqqBgDj8iZWyMCGUK1liqHqdouJT0oqLVOUSVDRMpN1WYYe2+Z3HdlTSCqICkyoQswhiUGKEo010SE3iHXqGBcJA6Zup+sUJ80inLEhExFUJVJrWS0qQUxBVVWDsy6GZBgJwDEpuXaiQYuWGEByYiXKnDN51gpIuMiqyRhaD1siFcnzPKXY9v4xxlbK0soip6amJpPJV77yFZdnZVkYYwCgzTQtisIYwy5P4jfnxRSvnIpbGYYoaWwyAGDwHDeabm+yPDrw+Im5a3f97d5B7pw0TTkmKjZVw0PUnRutzft9T/XP23UfXnFk5qpzVu86AgDgByBBO/e/6Fe2Pfy5reduPfnIiSO+2rZp98zxR+97+4t+6ssPLQtevnkq2zzzky8//9zNl7/xJ3/hpW99w6Hrb7zn1//73Bc/sw0kg+lxb8faJ//9XVdc+Ir3/Oj+UXnfU3f/n2F1fO14wWHNTr3wlptvWF2O//4v49I5dblM3DVXjx64+2RJ43e868CVr7nr+NaLwmOXyEnIO9MxNlo0Op5Td2J48pkCPPfUycu/Nz72vE1PD4/s6y3ujrucSGVkUGs0advVV3GvN1lcQfNELKZPn7i/zjlvNGX5bHe2AUlI1mYdSiFjWp1Kg2UKNAGcKuw0Z5PKaldM5mtjTdR3vvCCMBrj2qE1o0KZaTDSCIkK2zNIo7Vhm20gmgghJo+CZAwjSYjGGFUEQgFd98YCNIzGWDA2QQIA51xATSmRgiU0SABgkBjZWadnkNW2PlhjncmEJCigD4bYEDOSI+tj5DKPMRhjrCEFaK1V0ZFKYHLCEiNnLksadT1Fu+VKaAvAIGKrghVIRAbOuMsBACogYmwDGGQ9m7adLFUVlQERCYgIVUSkZXJDIERoY8naSbdliSMyIgIhiqACtkwOSSCaUmoRb0jRh+icwxBS8o3wK9/0zk6/c9cdX/3W1740Wh4+/7kv7Ez1FCmkSMIQpb2dLDuRCISMoESKICi0PtgiALXfmnC9yXjGdKyd4nFdhwKKGoksFxz88WZhOa0cHx+qUrVvZf9qt1HVGH1r7NU+CUMdQjXvlGbAj308f2rjOfHUE3FyWNMe8SfQXbR9+5apQ0s1NzTOwfbVNAICSkS9Xr87NUBEm2Wc9B27d7ymszUceeJPp3b9LK09PdiACrlk46U94+XTHEdCoFmX1SEmsYkSAEA56Bdoow95nreg+9iPZ3nryj8eCT8zsf/sZLW7sjy2FgAjSjQdK02aLvvjlWquM1hcOv2ZT/3Vn33kbx4/xb0QRWMVfeZECCWuoFhTBSCBjFcFI6TcV0kd8Hr0FiACEzGhYU4QYwCAmDRyMr1B/7pn3+iX79i34P/i8b/auvH8t7/0VU/uP/SNe+5dqoqv3n3f2uo5V110DuHqSl3UaWKDa0zMk43kQUgVTWb86iR24OzzihX0xxeKbiF8z3dunjnun3fz07h19qqXlJfd6rbOddg6m66bufBb296C//zJjV956ti//sXa3//P5Ts+d8kvfqS84aZwctFgyLKCqWyCJKgLy6FJiEWDNTGBpH63Cx3yPooXMs6nhIZVk3MZgMSo1pJKyk1u2WiKnTIfVhPDtmoaMIaIkAyx8aF2CbKiw2QkQAKFdnkJaNmklFRTVuSq6n1DgC6343FlnDPGVL5ht74Kag8oMYCgpliUnTYzABUQAJ1hypg4YDJK3QisLCjKBMZi0BxIAWamp4bDYZ2aCCoo1lr00TKbIgfC0WiESI4NAChgZm3bsqOKEnTyoqonmS0AIKkSEgH5pgFkdk5EAQEVjIKH1hCBkFAJVATOeOVEEFAxxFkTI2DsuCYlk8SAri0tAvF6qAOujxpKSUFFImuGbZuP4oyRJA7ZIINBUCBEY2yU1FJdgm8sUt4tIUrb8qOI983s7OzOrTsOHjy4trYGiEmk7HZ6ne7S0lJrmNDtdrGhTqeTZdnT+/a3r0trMdbr9VS1LEtEHI1GULiwhkWezttMe/bGHC1RtgRTALAV6xOMaTy0g/DwgeKxDVubfMVNhlq7RVNZ1lRPpdFCr7uxTo3f80Rn57P/4dxX/tpDPzCdAoeLK3Zw6LpXrW0698q/fdv0H35kwhte/fnv06bpR97+I+f94Cvffd0F/+V3Pzv9wmf/7i/8t9W1xRPFU5//t/9934Pf+q//7Vde8kf/dPJH7nzsL/9k+rYvbfATrk7QnUc33Qk7Nm189i/80lt/+UMzAMnTWbu298/ZVU+wKacHSp2dsw+dd8GTk9H1L3ozvf31+2XrXcd37OJjt+Z7VGk8kVhibDwpD0ONT+x/pgAzmNnPP8U3dJehuX3LoWue3LaUa85ZZSDaRrzJEo/2PNzPbOPiUFP0mU1KAZlmYGkJEWPjAyimCKtTML3cytUmsUarZWYoUdaQZiXQ8HOPnhpEuOGsEutVYsqEopekwoCCpEACkCR0ikKTNME751q9UOs5JSKta0yUYNkaY1C0TaWMQSKIYUbVJOJMGxKKUSOTMcRlWbSGU2wIAJitMcYQBw0AYl2WmgCZoZQym3GWASKZFudez/pud0ZtoWUwbA0zE5oWMxYBQ9z2f957NiYzJqXk0AHhM892CzUTIJh16LaFuIGwfYBtVjetZ2Uz0XrKOCAgtpmdgIbavpYJgyRmhijrntKECdRLEh+Qqa6r9o3TYR7XVUppKflENJwMb37+izudzu233Xb3HXf40eRFr37VdH+AQHXVZCW1zy2QGiaDRIQtCSu1q2oFAGmfitZPA1qepighICorisiZvwFVyBP6UB1cPHj3qQeW4lLC1XEc37P00PTyoZb+2Rp6tw+8bupZN9PoknNz43zyiWq0jLFJ8Wxf3Gz80SLc/tgeOsefXBWLpVZroUgERQihCaHswsmTJ8/ZNENEm13vJ7qDUxqfuvjW5fOuvXLDBW5qLovUpbS4vHbs3rvyhQP+1J7Fpx+hamiJvCchVZFG09z0DBBOJpMLLji/qeqn9+/PCKY/3Dv1zoXhGxa7f1VOdcrQ1A2LxzSVT3GHT5xadGgtIRWD+x54sgnBUT4uOB9qMN2AjSgmYQumYRAIJqpTw4agQKkinIEVkYmJCAkFIjMiJiJjGCSZsje49fxrdz++/ejILn99T1pb+T8f3feOd7z+DS+49rsPPfbogfEj+49fsPtcg57rcXImUZSUlkUKkSyZymGcjJyzpiLv6svOm5Xx6SNVb+vxe2M+f99NH+CzLt+yfc6GpbWjS8b0EsbdC5fYC7InrjzytnM/uOOd7zn8qb9b+/vf/sFP33zRf/9w/10/M1kYOQJmbZIXpCaAISMqgg4Fk28iBs4MOsSopM1EtHQm+tAkb9BYNlJ718kcuyjBWSOEpAApCjEUOYkyEgDleW49iQ9AgIgGKYVICmxANREBGZcQVJPLMwbydZMDMlDTeHaZagIg0YjAKhGQW15J9F4JlQmjqOq4jc5WzbMsJjEm8ykiohMoXJGqMCEhhcloTND6RCKQiVEyYE3QTCpgYqTM2BSipCAoBEaEcyLAiBRTBAbrU3TOkWpSIcMkDIIppaaqVBRVreGEogRMDASGMKVEbFJKxlhVVSRUCCiIBCGxqmUQAhFlayBEAWjtuRmpjewOkuqmbjt3y5j3OitLy9aZhGLJOjIpJAYkkwmkEKNxdudZO+ZHQ6kaRQXUrRs3jUNzwe5zkQwiGGNclhHRpBob4m63W/uGgSdU+QxEAAEAAElEQVRNnVLi4MmavCyKopAYAKCl0qhq+27v9Xq5aDNjs56bPKhICdAhND5NAIBpdRwwJ2ZsTm3pzixPisaOFfou0mhSIVlHpjNYHZ+gwbQJWO/5+qPnX/fFrc9/3dFvP26hK/Dwyz648+Evbzv06J4P/tfdH/iDo1+9r/rSXxWr48VbXtG742tP/uQLynf/znt/+bfT2uTowYdvvPDaj//LR77+tX/51N/c3bvk2td+7R8PP1Ad/dRfDr/yD9NP7i9FNv7wu373s5931dE1mL30uTf/+gvetPhnf+zmj6LNsm0bf+/ssy8X++Zbn7t4y8v3nF69u7l0g1l9jnlYtRRObkabiRjGCJRY7NrwmZJQZN1076MXnbjg/k2DJ8y+k1NXd33RgHOAZay0V8RTR7LDD3Tf8kPHTh1G6zqKDU3yol6ePzn0pzFKA2Jszgi8NPBnPd2EwEqYsoVhMyhjYAqUB4+bpuD7j9ZvvXVQuroZc5rqNLiSVWSAaiOtr3KbgtXyhJ1zikiAjCTr9zm311MCpZQUpHDZOlcYFAAqHxMCEcYYY4yucC0oqkTGcQhtkA+0REJVjZIoRWuMRSYLHhSsrWPU3GJK6+QXQlRgJmNMUmkNKoiQ2RCRILQV2iBKiFnhNElRZO2yxmaGpA04+U9bgha5gTNvDV43uFJCQgSi1LbAREyAIgoAiAbaCFNQZoqiTdMgovdN3QadhWCMIUIfAxoGRCCu6hqYal+lKK1LHRE1CQr2kKJIcf2tryw6c9/8+ufuvv+O1ca//CUvPeecc7z3GhOxEUkMyITt6rolaxpYH3SRUdcH3pYZvf4XZ2w326xvUARSUMKmGj+0+MQja0/VstQr3GIwIFrazLoMACS0JC4GAAUgy5pK19jjtDY9yk/h6i6FN+Z9BbkzmEcrgV62pRNWo6tlNet1TYCgIQO2Ga5ORrmS1N44+8YXvfzei178jc3njPK50zYS2G4TGplQ3ss3T21/2Ws6pph/8Pv0g6+a44+vLRxMqcY4Ll2edzv79jzF1hLA04cOaky9Ij9y9KA7WdJniuqnVvIPh6R2jCZPDGAna9GR7+bZmEQhddg8/tgDD3//4bOvuXpteSJMFhuF3AU1TOT9KCNiGxvPpjYRKjXOGWASERBYz/ATlZQoRO7mE4l5UpMSHTl68jPfuo80bNx87i/+6ofP2f2sJx6551/+/d+bxXjLTddcvvXcsLbw8IGDYwsT04UYJYhENAJBcIJBQkJEYZFSeqk0Nlx9w/TFW/K++kO9a+LchYWNTT1Mk2DBRcehOzBm7oKlax/ofnX5xGpKdudPv+/if3kie+5b7vnjnzv24V/tb+wBF6KWtO5HIpAiGEbqIku9esXl511y6fmxDoUpHeei6NiRyV3ZyazNC2ccGscMSKlBpcaLjyFZ8siQZECsyTix7CxLTBxcji2rgpkYNXMGQooxgjNKCpNJMw6kWV1JAAnWTYSBHKtXxclkFDUoBGszRBZAzl1uO+ohBQiIwaBxDJqoXfOIAkbGZIgaEVNk0LHEoEyTGKuU2JIxSBIcYcOhoZQMKRCxzbollxkWrtPrWufQOrAcJA2mZ2wnx5xdVhAZRGY0IQS2Fi0pCoNFVXQuOsughjAZgyohiSIRtaQSMYbIcFIBYCKDBsFwAFChjIwN0ZG1aFqWuI/Bx6RIg8F0rAMpsZJEXVsZZuSskFM2SoAETJhZtogIO7dvf+kLX3zJpZfbCBLFKhUmW1hdrSb1A48+/tBDDzeNVwCX2Q0b51S1rpuUBARQ0ZElwWZcD5dWcpdND6bKvGNtVpZlt9/LywwZELUsstgxSjzTy7ZPh3o0hOV5Ha6u+T4AdC3lFozVPvULKhIlDRX6ZjRcbhqEGJSbWuuyKKipiym0c+faxx/4+NQ1j2aXnmuzJ298y2jDuc/64gcz7GSP37vnR15S/p9f4SN7N/z4L+76wucOX3rt9vG4++H3Pf2ut3/gPe9927t/+tGTp3af+7yffPfb77r9w+96/lX/6x2/uHq4uuXXfu55375n62e+c/p1r37ksadegPqK5zxn92Xn/PDNL1r70E9vWlvoos2Gp09s2rLlsaeec83NR2593dPHjt7ubywpvLLzUOmILVhirg0lAIiEKYzWJisnn/nV+DjItr740LNM1cwX4e5ybz9m4MbMEyuOG1ePFu0VlyVrT9SBCaYwF8kBZHrDlB0Lq2RaWO8hVLw6C9MrKoY4EUqe5xpTXUuydR1GN+yYfdZGWl0YjYmRHE5qko4QK6KEmkUIFUDaifAZbi2zFeWkLAkskhFxioXLJCoAVT6oqoSYQpSYBFRTgpDa8IaqqROBkIbgl1bXPECjGhFFQGKKMYmCZxNCqpq6CR5DgpAUNQN0yBaIgBEZ2ShwElUBQUI2AkjIKmCJWYgFELFVADqTGbJE1nAGZNkaZBJGsAyWyTCxYTbGWGsdZxYMgVmPHjKOyeZAzGxbz2cyxJYVIhgGpDbyREIQSXVdhyjifYg+SowSJ5MJCKQ6pDq0+6Om9oSsMTWTygB2stxpAmVTDESgrlavu+6a17z2jf3ehr0P3v3Zf/n4/v2P5d0sokRIQNAa3kVUYRTAdh3eCpGiqCLEmEDX3UsYgQkF2Gvw4JEIEkiMAdIoyl2LD9w//2jVLBXWJLAqhohBIySCRku0GZiU0jrhAwk1uE1TvZqKGX0J8w2uc9yHfbJ0N0cuuaz9bEfK2Z2ldC2CqmaqE26cOmM4SLW04i/7qd9bfNv/+qtNl+xLdn60Ui5Xxcow+Cop1nFCk7EbLiwvHR8GGZz/XLjhrf1LXjBVntXvbaBgZqY3Hl08jY0HRojo8l4is3njdkTsfaSv21L1Q5KcKVNQikVpHKbg68lo7OdXZOwNYajjo3vvnhlQbcCCiYIikTg0mhpDNgGHaJxNNg/WIiZlJkGHxhIjshCrMUrsIVbjCYcURSYiRkHvuueBpetP7F+YfdIcffnrf/iKy67+t3/9m69+79OXD2+46Yadi2Hh6LET58wNnCxWtrAUERCI1lsoaVVqUEaadGI57th87Zyrd9z/lbMLQbQpr3xl+vlcWXJkaSSgzbqXLLzw387//WSGY+hkRxbMdHnNn/39vX984cGP/pZt7IW/8RtsqipSRWg0wzxK1WhWcp4vr45C3Rh2KWkCpYwdOsMGQACFmQG4zQqrNSKRCHAjCMCGK5W1aowcAcWwY0ZI7WpUEFQNp6hVU5MxigQx1tX48ksvLSl/6PE9mBkRYBANkQw14n2dLrv80sOHDyIyCIqIqDZNY8iygCVOMQCIYUwRMHi1SEQCam3Weni1egBNQshkCYBjqmNc3y3lxnkfCYAQreXQeE2toRvF6C3bGMPs7Ozq6mpLadF1ZItadEkAnDGqShHXOY2i0ZAqso/C6kxLM4EWOAMgkcRkAQXXtRNyxjkXVJOoJgmtRXN7k0pM8/PzBllaQhYgMCkSgCZQTalNsNi2bdvGTRvuueeeNlVwOBqHGNsN03gyaQnPEmVS171eL8SYUjp06BAAAIGIMFNrGVgU65AjgI7H43bqbW1qyeZncEXNAQ05TGHnVOfs82RxtaNQV94AQBmGJFxFtYzgGxpFSCGPFFAbEBCQxjvImSBSLUy11m7z2bB45M/LCz8Exx97+Qe23vdvmw4eGlHl8pm+6/m1E9M/9f6zf/O/fefdPyn7769/74+WvvTVwZ1ffPwlX7vwR3/0ore+oX/Zy5584LGfecdVimFp7awH7v3kR/7kOy94/ksuuOFFL/jHz6ztPTG582sX7zv2qrpa++s/HlXNsDpqZzqDl75NL7r6RdsfOHB4CA/ce+fGNybFN/UfmsqM98LMw/GErKEUvfebNu6s5vefXj71nxC04eO97gvg2m1Le/cUi7cPDrw8XQ2jjpgk1lS0VtNC99wrT506xblxLl8aTrIsWzu5snJgf+zkRj2YYLvWZjEs9rQ3KrJJCqgokxjzbr4zdE76FZKi6FbXbS9PHzsd6iYFgyxsLEcEUEOsAkBI2roVqoAyEiEGSRpqoyyIdQyucLmham3EeV77hgA7eUFEQdZ3wJokNl4RyBgiJAXDtihdPak0qYBqE3KXGbYCEJtGHYMqILb2eYgICiklQ+vEqDMj3zrxMKXUCmLPmGchE7WG6Ejcbl4RkYlbRLEdzZ/hYZ3ZkZ7hOdJ/8hmf+Zt1/BnPuLYSte8jJFCFEEITfJvCWVWVaJt7qO16K8akAMwsCK0ImIiYOaVUN01VVQLibOZ9ned5G116+ZVXTU9P/8dnP7Vn78FP/N0n3/L2N1977bW+RiASCaS5YQsAShpCaGmeRBQ0UUoqQBRTTMTMSAqQCTZqEklMEQLYojjdHLvv6bv3LjzWHQws2tgoGzSGADRfx7I1IaqKQY5REDDElAU5vfdghVispV1uYz1eEwhv4M3fHZ2e7xmnOJ3JFx/ZF7nr61HGgwBYNNGXjas0cLHlBT+28/pXf+/pvaBsEZkxOkAElxBjUkiNsc4VnbFfq4aRXO+6m5erpcm+B/tFt4GxAJZld2V9JMWmaRzAWjPxKL1HMPti4d/f2M8UnBsT0trKIrKZ2bJx57btO3fuuvvO7y2fmk/E37/vBz+05p3gxKgAUZNiYShKy2UhphbLaV/6IAmRW9vwqAmSKDApEFHd1I6K9t42gLx956bC6oHj4e8e+9aFWx941S23vOfnPvSP//BnjQ/lnr+/bvPMPcfPrlYnoVtG5MH6fkDaw3Xm7FFjEqeODNJwVYtUn3fD5U8fStl4flzkRTYmKUKdS2EdMI/iNadu/eeLfv3J6R9cvnCzK8u4NJ6fpOs/+Ot3r9YHPvE/M+cHuy9YHceB9T6CME46pqvEmB06cnTQm3IuTykpaLsiUk1ERLxOHEDE9XAx0Vg1QQIi2k5HEQJqYvWYCIQYtSUUEAFRqOqzNp4/NRg8+PijLisKNIby6ONiWE7gC2YQqMe1c6WiAFGe2z1P7rXWxtgYssYYlaRJJ1i1KSiCAKAeRAiITIEsoklVJLY/ZIyxCb6tHACIDEkYEcjaFNWqsAgjAwgk9D4QAQNW40mWZyEEZ7lpqsxYYBMlJUiI2PI6CE1M3lpGRUUAhBZSIiJNoIJouF2gtimnLdjFZI0xoGE9+nT90kBJIooxRkBFxE6nMxwOnXOt/kpUCEAQEpNRAIRI0FJF2Flj+NTC/PzyQqffO+vcXY888kiovKoOpqaqqjpn587xeHzkyBEAcNY2TZMkjsfj9mi26Z5FUTDxyspKuwCuqsoabtuXNgijvc6stW3uWx9dxdyou/pZ5qMPHW+qQWHtiBMADIo1TZ4BS0dBAERTrMeV9Ps2Dj2KIpmqaYwxmXW41nQM+LSYT286Ner+4SXv609tf+0d71gB6ANNZNmPqgi5uf0r6b2p+MePXfD7fzX9th9bGO2Ws5/Vu/sLo49/fOHj/zddePmlb/ulb33l9nsPTs654NIP/OjNRw/tv/++r/zsT/zCJ//s9OXPu/miS66/5JUXOFqr3vsz2LPF4ulTK5Px9MzTf/Bbm1f1mH96//N+eiWWr535wRQHItPyZrtlMZyM29MuEJZPnaIU/rMWJO7PbsLpbc9trtq7+oWjgxMPLO+9ga5egKGzGeL8oCyD6602R3xslMFkxhmbHV+oRo/oFVeAtZRkUHR6djRa2wAAsbeE8xvEuhKindhh17qJjdbe+Zh938s3NouH0jjmrhO4Sr49z8rI6AyqogoAYLsCQ0SALMuamBSRiRICACXVqBBDyKxlpElTt3Nzaw7TcWXkCIbZGBBVL7XUbRxvFeq2SFvUka9bXxsTFUXb1rbdxaJgSx5ERFoPIIztLdmecAUlRVFp0ePWQQIU15MSWnZSa+kFbb4YmjPSIkERRCVEPbNIRQDAM/aMQEz/b8HWMx9E1DRNWwJ9iFVVtfncTQzOOZR1F/Qk0hbdOoUWRWj/SUt6mEwmmhKgFkWuAnnuEDH4eMmll8/NbfzMv/7TY488+vGPfmK8Uj3n+TerCGiWUjKG23SKqGKJAaCuayVUVUPoo6iqM9RmwHiqmAuNypl6q08v7H/oxD2Hxo9R1hFFxZTlxkk9jpUACJVIEThFTW2NiCLRUODU2LVk/QBsrfgX9bEpizslWb/yodltt+fxc3FpQ6knJy4KFGQ1RQMhacfVXvtzZ7/ug/3nvGjfoWUvqbROWAnQ+CgCNSoyGI8mmJrSpB6mwwcDo18brz7yCJw+FLdud3l3eWHV2dyHUJZ5Nayz0lYSiwZnss5aip0/6S1//XT/VRL/MTZsr3vhizadv1NE/GiSZflVL73ZW9x370Nf/dJX6/85yooBJRyH2jhjGkBnUJICIEBKKUgiBWYrpAqQQNvWChGJkkFKoOtWJzEBokGQY8eW3FkeTI6SHj24dPDEF2686rwffecvfePBh/718a2vvnTw4IrJyjXhvJ84mf88Q2faPgSAlGVThWnm56mYa5YWz3rOs8unq70H4whNGGtwTafLqQlQUG3SxuVd0/XGU5feufWJW4+eHnachRQXnzpw8fv/x0OP37nvY7/XvOcX7ew5NsRyqru2Oup3RDSJQuaK8aRGZNXkLFtGEEBUJsB1yRoAQEop+qBgYooGBBTaJRIRoAeT0IIyqqCCgBFCRWS2Zbk8HpJhZg4glNk9T+9zaLdv2rK4spyIzKAXQtQQHQjYDAmaJqgqGanrWlEEKYhHpBgjEzAoCjIajeBTfCYkWDQKQBRRREZC5JQSKhlaVyyIpKhxfZmEEGMgBaK23xBNEVGt5eRDGwUqHhVbugSpCiIysDEGQWMUUBBQj2qjCCBadqiq6Ix9pgtGbBWAxMoCEhEBSBHabZwikKUQkkFshbmqCogxRkXIhdVxAjVJkmoCLNpcMgU06xYlRZnv378fAFqO6Gg0GlUTe/JkSmk8Gp1//vndbnfv3r2dXifG2F5M7Q8mIjG0vYLUdc3MgG1C4rrimZEISZO0wWe1QWIX/Oq526Zu2Fz+896q12t0ZCvJSq2tdwbUEVpHUe2w0iI347UhCtpEASJlxnvPmiZTWniyaRtPVsz0IH/xm5cf/8HSwcVLy+p0lVi7GWQBl80TB5ce+/PiTa/b+FM/du/vfWSKm3jFzat+XK3Ctb/xa098+457f/OtxYbt17ztJxamuh/4/T+647ZHr7n2hrtv/+vbv/LP3/zSR3/7D/9433e/+W//9um3vuNdhxbnTy4d5pS2fPmzr33X//jW3h8cec3PHYkzr+k8ckkfVkcqIs651pIpM3aqOzheHZ+fLK3tP0qm88zhj5htvfryNVq9YuXcOdOb70y+Uz56fX1RX+3S/Gmztm/jpTcdWT2eHM6V/RW/2s+yyLyh1t07uvvZGd9YVwJJz8mJpWkASDNDHO9AX+W2O4wTu1z1ZorJytIvv+HSpw8tZLSW2TwF9eoNFzEInLGIQkSD/MwUyNiaKiFkmY9RRB2xhthEL4SUkke0zCmFtgNrz57ElotEZ1AWFoBJaFhARICw9g0AEGM7D2QxPTN9GmM0gjGmxVXb+vdMaVRVFcFWG4TrXKq2+wSAdX4UALRmISm1vZ6QEq5HdK9/HVBFZUNt2QZoHdefISy1nOb//Kbr2/EQ22/kU2xiiCoppclkEgBCSKa1lI+h/WTLZK2VmNo/Iq7nZyuhRSdJvfdZlmVZRrheUOe2bHrHO979H//2mbu+ffs/ferjK0sLr37t6zkTQKMxtVkRiFiFOtSNtXYyaQwSOAMghliTBEkIZHudjdO9wpRLw+r7+++599DtJ+rTJpvpsAMyJrOQEkTtUMaATainpdOgrPqKLRFikbkQgqQ0xsygOV2f6k/3pz1UIT5M9knU//CLN2tnA4wzA6er6MRQ1s3QLodxN/nV6C569a9kz7rh6SOnxqt134WmzxkaQ6QGQdAqCkhAbeLY2qz2o9Hayuzu7c3iQRwdMYWZTHxmsuHqaOX0aQAkhbLIQvAO0WMKxEWl4W4x385X/uvS9jsuuvytr1l8cv/X/+4zOhzbzK3WQ2Ye9HpnvfoFsKH38J7HLjjv2eMmDopcfFCygiJIqgl0fRRGYkZiUk2SJLV4xpl2DRAxyzKN6/+fFGDL9HRpQiUmCc8Oepyb2+596mP/8KmLN21bsHNfPZTNDGqwc6UUlUNQAiWVZ2ZfVU0isaxIwljyzIioMyun1/JZtBu6moQRrEPnLCKmpGiy7uYdV09e9nW4rR41fVNaU/Rnu9mGneWuYvvP/fkIp+fUxE45wbQ2WbSFNVVhUnKETQyUcUJhRg21tHG5610qtML2lFKMUZsQQhIgYUwEggCiDCiMygSGE2NETQQJQQEKa/cfevrIieNFlrMCEqE1U71+1u+Pfd0ri5Jzp8YSK1N0NmlUQrJGEGrfhOSbGEIIRqBrM0esqlFBGCUzIw3t9rQFkYJPhqgFUUXAWj4zB0OKGoO0iltgUkJFQCY0rIRRwVqLiJl1iNjKIVJrNqskCC2BBVqCMa4LdlW11fC0YFpbzFSTYCtOQGOIqJWLxDP3JrXmz+0fWxP5Vt0YYwRYdx5Y/+IMSRVEA0hKqcVkYggxBAZ0bIwxddUcPXLMN6GVhDrnNmzY4L1fWFiwzrVVudfv+yaMx+MWi2tfxxZmjjHWdT2ZTKqqCiFMJpN0xnC/9X9XRGQyzpI1iqljNlW+uPpCYll1PqsNjLXMYSEWq4mlwe6Qp4/ND0tFM6wxScp0bEOEqFL3O5kjcdLV1FMburumexdcYIvi9FNHf/uiH/mHqWumsm4nNLX1hktnJv0sW/nCbd9955s3nr81uc7oS3/RP/yD6de++clTae7Hf+nSLz2+8d3v+86ffmjtJ14x8/GP/fjzn3vhFTftvuTam175pr/8i88cffrw7/zOz8xs6c1uKx3Nv+m8i174hc+8972//9U77ly8+S17Zy+7lR/eli+v1ZMW+R8PR01VV+NJCmltbS2l1JEiVBPO9ZlfC3FxjB2BsDWbu7U+T+t4f37k6eJEb6bsZWG6v3F1YeXE8X1CWBo3XfZ9ig0EZwg6dhUCOsmKsgN+1wz75QIAwvQiN7URqbUqyoxn4NjJ+I7nbvuH2/ccavzMwIUQ0bBxedsvtmdGRAjAELXZdahgkByyxpS7LHeOUA0CSgohtDl9bdYZM7eLiZZR/IyUpeVhESAzGyTwEWJSH1PtY92kOoCPmKSpvW9CU/sYUgwpxhhkvSS3s8gzF6KIhODbubPt9tpTLSKS1v+7rYsxxiRRVRD/f+ojAnyG6Ls+TBPqGdXv+uCL6yTjFjp+BvduW8y6rifjqqqquq4r37QUaCJyzjnnWiytKIr2yXmmV1ZV733TNIoAQM7lrNSCRsTY6ZZoSZJOT0//0Dve8dKXvXg0Gn7xi5//5Kc+MTc7fe655/b7fRRtmqbxdV3XRVFs2bJl+5at3W43+uDrxjnX6XT63d7UYDA3mO50ZkcSHzrx4H1H7lrUxU6vm7MzeT6RsFpPliaTEVoxDhHBYENZpNxw0bXdLuVGgERJJYvGZh3MOj4ZRZflxbSYBmBn5PO4mNk2BwCLVR8ZIabxZCGTYuyrbW/42bkXvoZszNHmllBsM6qrSTMOYRTjWFKTUghJfGRNICkf1Q24zqbz7NwWHdUCnehD7ZvJ8lB8mp6eDY0nBEJFppBiM6nQYEUxfijoFXrWhy7+/ic+9b3PfjZoLR3jWfpTM0Wn1/j06N9/ae8d991993enNruhiSPWhgEQW6BFVQHQGOuMNcSqSglbJKZ93ckwGVZDradbC8kAgAHQtWqcUYxoCmuG9SQr3MbO7IHx6J+//a2rL75oBujk0mOu2yw2blagYQYQAtB1K9F1dKWherKKXU618Zis5FyP/Ja5vAllM66bcd6YxelBZzCzFRbmT3/1rzfOPPmNX3vki7/44m0n+pBn2iko72646JoLf/Hnj/zQzyysrEztlCEZl3fqWEMmfbB13bg8b5omz3NUyUy2Y+vWQ6dOAUBrKyFnfm87kYgqqEGlffO3B5eTN6CUUCJSYEI0qqhikedmZkQkjirn8nYgNQiaEEgrEMiNFQIvmTNJpWmaGH1rltrtd4crq612sA3ljSqkBCAUgDXZOmlOMaWyKOqmsdbGGAkgqbaVktZNfqgNB2SDqiigZEyM4sjEGAUSI6WkNrMgur4oyvLGeyVFZEkJIIkogBCapmmib0QTIJRlOUWdtWoVYuxQTnkWQmgN8Fq2CACAJERYJ2ITCQKAGEAARsTae2MMiDBjCAggSdE6iyF6AidigIJlh5QJBsbSOQCoJhNjrTHGIGWZJQVRaSEKq9Y51+12x+Px4SNHnj5wIM9zY0y/N6jqSYzRsE3Rr41HFk2n01lbW1PVTqczHo97vV4btkMtZsBEAK3UUgMwg6fh0LuLNs5M0VAUe0CK8Nbp7zzXP3G67n755LlfXz3njc+//uTeg98+YFMIvq6KvI8GY4yTyaiwqanmM0JM/cmkOfvWa4cHDlRLh1yn+4Wzf+bpjQ/91MFPl6t7c5oWP01udOVgw6n/+PdT9z3KF1wUs8HxrRef++zrlw/vG37us7LrosnS/l1Zf26cHf7q1xe/8jmz64KrX/ma1//y7+DUuRsv3Pqm939gds3vfeyJKZ4d//7vP+vqF/7lf3y6uvD5D53/4osnd589N8/D7tDUZadomiakmBflaDiMybenPWQWVtbykX9mIKst2nIGEWtduRmv/PLCg6vbw+3uyctwx5ou9gY7933vm3zx+aNqzdYSQyXdUpZXRX10BatI0Pk4mSplxyxnezoVAA9WNKpYIcrCxNoApXML835bMX32IFZrk02DTbWX4IVb8gDieqCQthHzQERM67WHc2eADLuQkU9RFBmZkUTEGWMMtXyr1mMuqbQeuowIClGTBq9JAcAwSYwtoAeIIcYkxMJtiXqmykaljDhShCRERMiCsl5lRQBVfRARtP9ZNdsEzzbft934EiOiPbPchRZvXF/utPRgRIltqMP6DlgQnpmSn6nZ601ASClJUqmrxnvvmybUTR18CzX75Ht5yUQqgogCGpsGZd26mYgkRFBFxKQiMTLlqpplGSYgBSZoJuMsy0LSSRgaSz/+U/919/kX/+VfffhLX/78ZFL/6gc/tGXzptF4uLq20j7PO7fv2Lx5cwxpPB5PqjERTU1NreuSgawxx1ZWb3v4m4+e+K4UE2xMPaqn+jiK4IC2d2ZL24vkTg+PgVKqcTE/QULGshiy5GJQY0pN0qNs6+zs0XgIrPGUyGBdj03ZfZhHz6/dxmwFAJZGkZNOyJT5rA7ns6tede6L31XVyz456uEGQrSuEZ8YVZWCqkgwiARWUQQj8MSZcstGO7WhXlgbi+1qg5gJgq99ZrILLjz/sUcfHI2Gpo1VdaaMWKXg6jA3edbw8Mnv7P5scbIzN7Wh9k2nKDWJcZaL7NTy4mx3ZnVl4f/+zcd+6O0/OtBSmiSOK++dJGJqTQYlJllHPYCF251gUkkiKC0VgEAJzvittrsMtSAZxWE0iVJustCkxtR52Z+vxk8+vn/LpukNM1MjbwparTVHSqgg1G4XYV06hhhCSsRDtqau12DSSyCNJLs86GxdFRGb5bPn1H5t9S9/137pU7Zae/aGqU/9D5i/cP6Cu1fX8tVUG4Z8393/evizn7j+f/zBD5ZPDlfGpkxZU5ErxqGqU2RnfeMRASSxMU0IxxYX5cyJf+YCQlRq59r1ID+1CK05nA+hSslrYlQBFVKDGJIIaJWa00uLqml2MKPITJQgiWOrFilSSgZcEik7eaNxPGoMKSMmBSAKTYWk0UdEBoJJ4wGR+Bk4KqSMOEZrbVmWKSXD3NYzx6yCUZKACrRujlbVP4Pxt49OEJhZUmJnAcT7aIwRAct58EkhQZt8qMqmrW6aUKXlOiVV1fGoGvNYGBixCXVKsc1OBkQE0jbDBRmAgAnatl3kzKJBALgoiqZpGDHGUBRFVVVtV56xURUATbB+x0RAEVDVxvsNGzZkWTY/P98OCt57QWjNfVaWlweDQZusHlJ0NqtDpBTJGhWYm91w8ODBs3bsMIU7cvjouiYSaDyuiCBIckCaNLaRZ7Z10UMBgDzlCYeUQCyVC43pTs9t/LD+cFdHFCYmnNpWuGdfduB9nV0/8407Hnz4qXjF7/CIrjr3gice34e5Y0TgfOyXLzhr+/EjCyGuzGy/lrPi6CPfM1P9NMnm7NrTZ1//G9mmHz3+hUuO3p4gNDhzdFIX5ZZdx/eNt22vLr26u/WsU08f0cas1MMrrr/4q5/6q+KaW/c98J1OEKZucezAqQ//6f4P/0HZmcte/sZj0Bm60/7goV0HT/Quvvib93y9vuktD9303t3NkStX9jcb87EsG1esjseZsYo4aWownFIMmtRQiMOiFfaf+ejrTH/jrAAtYH2enn1lOO+7zdMP9I+dOLBnuj978uBpmXMxd2WmUpEQMtu1kyfPyjop6ztRzjo0lih8Vsf3DNVrPZxeMYkD96zUyI1irzc9PjBcuemsjdXqMANaWF5i4wznoAEEVCHLrXFOU0ohrnfqTKgIqs65FCK0mzBUkMSIBNjmFGgCXicfRUQmMmgBQqSkiKhEoklFGUGATOZAUlTRFCybqNI0wbaEjHZiVrFgU0rgAQnIZmjwGRC4razrE3aSFlMxxgioqqYzXA0GbB2vBHRdmPP//2BABE3cll9FbKW06zD1unOyqoioQIotX1NrHwDAGtPUtWEuMBulpAjPWIMBgGVqgleRjE0I/hnc0hhDTCkIIor4utYYTZZlqlp2im63n1IYlGWRb0yQpjZM/+z7/uuuc3b/+od+7Ztf//Lq6uqvfvCDF1xwQa/XWVxcFpFO2fVNEIS8U2ZloapAFJIAU+P9XQe//8C+e55e2tdg1HEMYbUoipUgW2zvuZfcdO3Z1+Y0GEW/d+WRu+/89+fsuk58MaxWT66dnl9e9LACEAtDfjgEmUlFSgY6JneC03XtS8I4OdWUIxdekRMALDVRqcgg4nhtNLPxpre/33WKuNiwlWEdJ05yE4KSjcoJBAlQswQRkjfikikTUGcq27IZOBbTfbtpSzr0FDurQBJV2Ty+52kf1OU5M6/Wkyl1IdY++sHszDUveu6X3vdR+IzYF5fNnSBol4ejLLN+ec2OmFOqZTiY3Xho7/5DTz16/k0vGJ+uXKKxEw4kiICsKilFSIIG2RoABTKIADGCyPrRQUiSEBGtA4iqagAQpAKAIJ1Ux1Aaz5A5zZtqW8aLy2sZyK6zXIoatTQ68YDMTAwoLRatqm39QwnjemIzFjQ0hhg0QhM72fTCih2j8/ffJp/4s97RvdOb5mK/9KdOb3/YPfaO/JatH9i+9Zxyy5ynbqH541/5ywc+/teXf+R3P//5b22AbsjHwUPfdCv2sYVXNSmboEEAR0201Pa52v4c7QI1pUSMioJAjtiAMqiAJgIiC0qSUpRaohI7BEakpmlecuP1+/fvX1kbdktHQZB0HUYDMiiIqBknFAhpKstDrJsgKgEAQpLcWQKMATAqobCzkhKgWjYQQVWTkZj8qVMn2oV8t9ttl6lBNYTAqADUwryICKAtriUxAjIKlmXR1HVmbJMiQGtcZ6OPQEjGRPUaE0EylpA0hMhAoJTnOdSMEQGg1pRFVRKxrmddVQcRIWuMAqgaw0T/Dy1FBZJoTK3OUSEJs2UmAkQjIlmWAZkQAjB3PU5AA0EeJCLUJEUgj+qcu/GWm+fn50+cPkUKEgOKtnnJqfFzc3Orq6uj8dh1CiWsvW+/9fLycpnlqkqI4/G4Xls2xvT7/aWlJRHodDrtYwdNMUYkEiRzBoYhgpTKKlaZ6UAMnZkdb+v+31sPfvT8TSeMjVM59ktQCMtVnBw+cFNth2e96pHRaCovVocrAcQ0HgQTGHVmfnUYShubTu/SK5b2PkknxmCCL4bA3X51pN7Y/TP3Xy6evv6tR/7trOXHxwnT3NkH6+ycN75x89XP2f+1L2267jIzt/O+55x/+v4v75jedPb7P/DYbT9Y1LoP2fw/fWzL8uq5v/a/H/v2Fzecc+7Jf/7T83/il5vvfHfQLb9z/+3+xrc98ro/3mnG1z74H3TFhpw6IS0xTLNpggoCEEKU5GMKSYg4q5qlU8dymDxTD+jim81MEYZ1aftNii+0l901fPjk1Km7usfesLbliXSsc/7uMBw764jKCnDANmoSiUuFXUtVbyylLaoQtkzpltni9PIg27xmB3nVVLmxwqGql6dJX7R701CzHGvWMmMIKQYvhSPnHI4hhhhNYgBkQpGQkgCQsZm1oMTWeIwpJcsGrTTBe02OrWWjmloU2hgHhL6J3U5Rp4nGlBAiphbxZUVGFtFWsK6qCaGtZ2c4Chg1EqzLxGOM1nKMUUT1DM6sIEkghtB+jj7TxRCrKhK1Cfbt/NuSFlv6TKukfQayRmxH4pYcI//PvIsALWrdutW2xAlsuSA2hipGTUmTtHRQVS3KUlCN4V7eGY5HdVOVnW637KwsLMYYB90eM6+srFRVRda0Moo8d7OzG5xzwUdV7XW6c3Nz/X6P0JJBm2chxdOLo1e95qWzM1MfeP8H7r33+7/6K7/0Cx/4xefcemu/PwUATAZgPSscEKRVBRuqQ3Pi5MlqdXzzBbfelJ59ev4UEAfR/4+9/4yy7KruvtE551prp5Mqd3d17lZ3q5UjyoAAGXCAx8YYE5wTvsY2tsHYPNgGY2w/9uOEczYYg8EgMpggjEACgSSUU2d1d3V3deWT9t4rzPl+WKdK4obxjjvGO+57P3hrCFrVders2mfvNdec8z9//1JsZav9s/tvuvA6I+nAhcKodtJKVHrlruv2tPYjheW1c4fPHn5q/si58vxatbo0XD1w4a7OZHhQHh7K8Mq0eJltHZXFUKRU1R1bhQkEgCXnK+0KI70h7b/pJ5tbd53rrSa5JcmaqvYI1qkUmFk8iAIhpACBQHLPTmNVr2jdWsub6tyZ8cnZfmtHrWfQOBZCpgBgkoJQuv1VJ2Esb6yurhw8ePDIocOX33bLybvur7/m6FHdf+Oy+a+WQgJCQTJ5oy6r8da4mWrWC6XSerm/nOfQr52gThRpTT6wZybUZBSQFxIiZBGhyMHXKEAChMQcCzQQjcJARJOgZgCAynpEwkDtjDLUfXJGF3kB0KpLSQmGJMSSGvQSgIFEBMXhaAhPSKOw9t7awOARBYS9L+ukXW4uWghr5//l9y86fz7MbO2tnYSknbzpjTe0T33ukn+f2fzTuiJVCYN27G9+6x/c975/OfWZr3c2Ff1KFw68rUpjCpChtddef/X8/Pzpk3Mt0wBFoM2wKnlQNdqtsqpSITQqMGoxNYAR1AqRjCdkMgop1MwiGBhRWNRkM9OKFlYFRHKVnjo1BwSBgifGBIwyBkEpFkFKc2ZAYW+ptGiMAWAINYZAQgLoKocAWaKGDFlqNIivQVAJaIcOlBinSUNZd6+69OKdO3d+/o4vZUWDHOsgBFpQPAdCEbaIhKgRPAgrZq1QgnMlM+CAndGkalAEJTjKDFiPAR2ojIJ1dqIxU9d1OVwG5Sk1vbpE9gCYqqTQuZUAJEbAC2glKtFpmlprkb13LssyZobAFlgQiSAxBlhc8KiQAnMAYUgoddapxKAgoAYGa1ADSghsSLxrpimwOOsCyN1f/9pabwDaBOdJOC6uxOCDEOp2a8w510iy2tV1EjQpV9VT4xOrve6Z+XOUpJUP45OT7MOgLE2Wlv2B99YGb6xJDSllFGGmFbqgjUFA71mTRa1sY+gHjQmmFzx/541PBKWnVT0XWJaGoaF8i6pd27e+qtX/y7+6ffKKG8/rLQtPr1AKDKqB1nkn3qwtdk2Sz1x8QKfZ0lOPh3EVnISKxQ5KcrmxCroPbdr72Phbn3fu3u88+/G955+eUemRX//Z6oJrdv+PH8CyPvlXf3AZbqofuGuzk2Of++jM2NTyzIya2TG51B0mg5OP3dk50z2vHt7VN8Vbf2F4wXMeXlnSr3rT3Te9bZrspQ+8b/nIXbuuf91wrTKUA9eJ9/1aciKTakUNIcdUjxUTfHbJP30spWQjADc3bUmKrW7YBa+Hhi/ILtp9JDt+xeoD470Xd3sqMb1erRnY8RDLlJVnlzG2M+oGaWKqU67R85D9eGfX5NrDq23fWcOqNiofgC28SdrtYV3++f3ldXvCd41XPT+sWSskpT0oE5wHAG1ISQARAlSKunY+ydO+tUppAC0uFszZhyBIjtjWzhilWSlC8FzoLLgQy72LPQYRz0FphUjBOqVUlmX9QQWANtiYLwbnEm186bUmDEhaAyF7TihJbCIijRBlE+icS7K0rmsgCsETo0kN14FIpXna0q0sy/rdfn9tzfsQR4fSNCUirQ0RmiSNi6jW2mhNSkXjvgAhRuXg2AXv2UtMshlIq+A9IIKMmn/OudLVzOyDraHywRd5vmn75OTkZK9fElGWZf2FXr02nN463mxndTNtQTbWGQMAs8TD4VAnxhgjCK1GK0mSJDGeAwdGkh6tDFzXxTmRoRKRwOHYkkxfNPabf/m23/v9dz74xN1v+703vfrp1936kluHNTvPKKx0ZBaxAvEg7CXTpirqfXt2EhECTk9PxhGpWMPXKj289KTEajjCqd7xypcne0cCOgIirWa2bpme3cxBnPe9XveCfVs/fvLfh8Xa1NSW/6q7d6Rd8CmoYIpqq05uHXf3WeDpfHtjcm7+bGP/rsbzLp2vHvRWey9ANTgnYIRDrbRiRhZH6KPuKXAKWOugBF3AXJe97gK5gdtey0LFCljKAIBIrMAQcQpAXoSTdmMRzm67ZV897h/94tfN9hb9WVn/Xim3OHlUaRInpdIEhR+YnrEVTwOTfPquz+y4+pJT3TJzOvRDgiYwx34BEooAIhhtIHCAqItniTN5iLkudjS3M7NSGEIAQh0QZjYlANCtEwvs3RAoZ1QtlbFJV5a6m1vNXDf6ZZlpCgLiHJEmJEYMLMgsjAzArnLOiaCAgGgfggKzacvm1VWXtirP7XRsp1s6YXxtk3zbH/0DPO+Gi5c/86E9f3Fo9a6d/X0oqqDUBjv/FF7wwhcf+tIXir71WA8Ar7/6MluGR556rNlsnjrydO2sMcZKyE0y6PW2zk7Nbtp83wMPNoqGr60NjrTRxhQm6/ZWE5NC1CQrJGHvLDGvhnqvmTS9GsaNQ6HIisvSs2fPplp10hzqEISpaTwE611iMvFBKWWdJ4LUYFn2sjhyH723gBlJCANiygC1cySsFSoKAkSkQVQzx+BZm5XB4Dnbt2tSCtQwuBQVg8Sy1mhPDQIAmcmtrQCVcJSiiE4UGTOWJQMYhhASZvGVAyCkXAAANJn5+Xki0jphBiRQqIFgREaPrDscsW2YMQQZbfFJKUQgJQJeWFGc3aSAIgQoBCyWAxICKYZAqXLexo4URswPAoqgiNa6qqxRemxsbFCV586dS5LMe5+ahINHRJFQOyFFp86dGWs1t+/Y0RsOBrYkoi1btpw5dXptbY1BmDlYN+x1y3IQd4szk1NrdsULF0VRlmWmW/HR11pLLLsTKqWCoEef9RKj7MogfPhfP3hg79pUVtdKFmulGuNsTO6XK6ntoD870Ro+/XezF77iTOPStOzXZB01ghuoTJNKJYGpiy9ZPvpENeiBGJNkeUEDqjIWu1LWqytJssrFxB0Tl39l9porFx567qm7bl470jp876nfv29p0ziz63Gts5kyS8aq4dpaP+PgjhyT1oS2Gf/ze8el2vqg1FNbP3L56z6XXvb7PzT7r1tfoVleuPzAve/9rct+7qeXRBEPtYgdQpGlE610ue727SD1xlvbaBam037yr9+zpR4u0TM94FRjEINGkKlCuxm3XKMvOn76jiMXLDw8szbNxXl0qWmUoUeoEiJnh/X5pWSs4QlK9sSePTiVrGH33DnClel6YjHNmKwtnMKksGEVqWFAjTmwEGs2gCCIIQTe4AcXJqucVVot+4V/G74bh1GdBTJqVUj8Z5SQAkA1oiAiIvZxo9fK6/NC60s9igh2kdcTUIpcKpY4p0sAGE1oNpqvMjLdYxmZiMSXYoQ7jbpXokilWUqkYjYavbZi2hpFVuvoyoh4ihOe8aUb/wMbXxAZvTh+E7OMJgQB4JmKNAfmWI3XlaauwqPwDF8r5ssrKCLrfEiMf7UBspKNN4ZndeHWrx0SxktMipgZAZUi/i6XXqqf6j3wO0888jcLm6ZnpmJtHCgia0e/DK4PRoOsXzAY/YbPLsLLaBYVKl+e7p34w3vflupsvUk+msjC6GF6SK3Vy/B9blnPTaFZCc5gAJAhgyV/Z8PdSzB8Ya9Sw2CtHV89Vv+uPRdQGDn6CMGGtyM+c73X7yMAJcgogggqwCQt+eB29HlqNdbzNnrwhKAU+WB7gZFoMfh+MTxLx+r/4Ul1vQh8AOzz+7gDsyxj7633pFTJVQmAhBL4ffUH7v/UQy5sKAH+n1sSsD519owL1rNO96++44NbGtvjnc4iWiutAwIAtiaQTTNv5KbRHa6u1PVU6N166YV790wsdY9kiQ7EwdfoAhNw8AFEa2Jm75lIY61QgEGcr8kIENZu0O2JA2ovpqvbp+HSa8Ohr4mrTGcy37xz+dDi3uyi3Lce2fJfe45dlECjzhTWmtEn1Ghu3nH25OPjzfHVgdWNpNUq6sc5FWgUxY6pHfc//GBR5P1+P9NGLK+udlOVZVlzgBadYx8GvgaAqYnJsiyFndYGSayrEcUG3/HkQcqm8YsrBGBjCzmERqsZfyMRlICuttFOoB5UCjAAa1K97uoLXnDr4cOHHzl2slEUta8VIAARQJZkVVW5XLGzyJIggGcCNEjEUoeQEDU7Y+eXl//un/6pmebDuoIk8TYE8bjeSpeNI0SeAUTCrefaO4chLNUVelZKYZp455Rn9t6hKCalVEAtiIIEIuw4M0YFhABAQBoNKaWUSAjCAMQszoX1OxtdkBBEoWghCYyEATEgK0JxgTSJQJaYqKZOjXbOoUCq00hdH/mHMxulvfcqMeN5trq6KoE1KeccCSulCp0Mh0NlTJEYDmFpcVGEC5U4hcvLy7EzZ0gFASHav2/f+YWFsVY7z/Nz586jNlmWGmMYpK5rrXXkGAhA7IohAbthkU0AdUuT5UTf/fLNk8vUVEkyDNPjWd9AWFiuQ73UrQiUiKrd4lX85OknPw4XvjHnvD9caVLb8UCBnth3CZmk98SDGbD3gIHEi23kCrW02zw2EexiMqxMf+D7/p7mnvsuvfpDUN7Q/erNp754wcrT5LXHZGiTau9E93SvkXdY05bJlIjEep/y8Wb7G7j7ruTSM3ZHOyx/aNNPDBhfXj/46G//yN69l6Uze4dhJeGOzRZYGgmLOIcutIM44UqBsnLu6NxHTxz5HwlOtHZtPPybv+M7g+8Zh+Cw2TKhN7wlvfT2c19Y3nr6Tnnyxzo3gmMUqKRs5g3f7VkECW4w1qjYN0SxQQkVBdvsZ1fvHn5zuS17jzsLOs1tsCgLU7TFuzrp5PvHz7PzOtUiorWqynJmZoZMkC6nJhGRIs201otDj4Avab5qwmyKy6AiRQLeBhecUloReOdEkUTfEa1HqlIARDCkNjyy6nok0F1ZWQFFzrm6rlNjrHVeOEkSQpwaG+sPhoPBwCQmSdIkSaIRnFIaUCRwlmV1XXsOWhlhLq3ttFpjnXaSpdPT0zpNxHFizNBVwhycr6vaOR9RMHmeW+9SkxijY1WZRvMXsqFrJSJQI7azMButouYZSWujAYBiQXsduIFEiTGglQ8+hEAMRBS8FwCVGBEJ3iPGBroQIBJFTwciRK3QR6I2j6aWEePIhSLyIGmSICKzmMSwBGEu0kaQwV/++T986pMfd0249MU3vfaHfihr5L26ykA5Dqg0ekYi1qAi6mSkBUFEJKUgVtSj3AhHts2n1o79wTfe+qZr3znb2Q1BNsS5seRug988Zb76iU/9yp/8Vj629Rb0j/RXksbkrW71LyBMcfFrt6xih3/iM2NVWarW5v2vfXNj8mA3hKQO1XCtG+pEa6MNQ0rgCUERAAsLr98niGyQXCA2QDAQpdWwnjv79X+DXjdJ0HnQWiNwpGvbetVJBV6qQXf/cy5Z6q6dfvJQkjVq8loH/1uV+t5UnlYgJA4bzab33oUAKEWa91ZXv++3X/Hi733FmYUySxSMpDIjFsVobJNotHsREGYg1FrPdU/80Td/c1D3pYjhOPY3hYErAEiKsVa9hVGXtn/Jnj1XXrjzgpmWRn2uO+fXrBJlrUsw8czBBR8EFDHHq4whBFTRB090mgRhDqxV4SxOtvn4Z/5908um0qtuHn7qb2aLZG3u6MKnbh/7qZ9fObt66dLNX5r+cJ8c03Bbd991y9/bHGa1hfbUtnD8XuHJsYn2Y08drod1p9MR5pXV7srqamYSOywFpD0+1ltdPXlufrw9VQ+cR0FQqSKnlLXWZ1m0CmTvvVfRoUgFyZp5o91aPdvTmcFE5zbDLrACy6GuPbAAgCYCwaFz1tcio9F+o8khfvmrX63rOiMDLqTaWGu9MClVlwPPoVEnLBKQPQqLxCEctj7TJhCXrm6YvJEUdfBaaXDBQwjCEoSiyWi8p0EFZiF07DmADTbRI3MDVjpXmpnLwIJac0BQotH5OggnpAgwOK9MYiWUYUStAkIWnygTx7Gcc0atu7BFe3AQ8p6ZFeGoHq40AHjHQASk0PvEmHowBIBojJii+o7bvuORxx87evRo3mz4CLkVYOHMJKfPzN14/Q1a67On55IkyZNkWFfeu6mpKSfcHw6UUpkmW1ZaUZ7n9aDMkzTRqQ+WiEIImzfNvPi2W79x34NPPfVUq9WamJjo9XpKKSBs5AV51tpEEBYCgBplJzopUIXgTToUb7qzL/1uc8cXBmdWyDJWg5z7IOAYBmxO9PCpc2uv/N4fO7myMjl/uAp/19/+6nZhrFkuelN1GycvvnLl8JN2UIESRV4AA4MZOpsKChZsOJn14wmKqJU1vXwqpW53svOJra/4QHrN9vMP31w+ta+a21edyU4s5DVCAgOjn2BaUPi0n3ykuPRU+8K1NM3bO6eLIr30hrPQ/PH22c/97Guec801jZf80CA5m5QJqmEWxpm4Fk1BjC4q5YY+kEvSJD+2drSv8vdPb7uqKDYC8H41wbkWSyG3VsYG9ak9ofHy/MZPdh97eGJ+Ydg7v1xPNHUTEyIyY03rKugPrUm9iLjQlrRPpif1RA6d8Um90vLjy0YS3+03W9nmma2HF89lenJLs5o2y1USQk1x21oUxeLi4jLMk1LRa4gRqtLGz2VCz2zSW7TWeZIOBgPUWhdpWdfW10QkKqSiIhMGAEiNDIK894oIDGitA3ONFhG5lJm8WZUlKjQNnaVpCKFoNGa3b5uYmGgm6dHjxxYXl4pGI8uyiYkJpWk4HDabzaIo6rpOtKmqutvrDYfDfr/fGuuMtdozm6ZFZGQw7IUiW+DZOQ1LTFujrUjsKwNANGhiZiGhdWuTOIkU9RwxMMcW8sarmLmZFyN3MsQotARSRBScFRGS0SgXI0TIDGgjISAKwTNcEVjvZ8efOZpgBtBILCHigBTp+J1+pMoEk+t3v+svLtx04K//7E8//g8fVivJm9/6pv3bZwbdgSApk4ALNniTaq5d3EyMNkQjFwqKWwEZFScEESWEVGXbW7v3jF24EXo3roZzbqzRnm0eMv3MGTnvssuluH2t+4P5psnhycNJ9hyEY33EAagVbjUu6C7NlD2bbJ9FqLTKU6VTSFMFAUkpVZBKUUIIwxC8iBLUgEoxWalcRRq5u7y2ttraclFr7ytWv/J+STkJAgx5YlDQkKoq1wsBguAajetNK+fXcFmrDElpeA/Sr5H6eYQfxcQkCaS266KVhQbSuYHl9M4Pf/H1r/v5pqQahGh05WMrRERCJDHgKCGp65qjI6cPIMIgDLLhEa1DAAoWAI6fHZxePX/xwdmXv/CGa/fuTp0+0lv76n1H15ZO7to9bqhSNZQWQgi180anqJRzdSx4es8eENVG/UTiLWudWx4UY1dfXq90iyuuXb3qxomvfrrRnj3zgT8wB68sX7B7Pnv6VPNJLUYBPdL5yh3bPvDDh37zov5zJ6ZnW0VrWJWTnSai8g7ssDTGeJQiTUMIxAhG9at6MBxcesUVq8vL588tsCYlXJalR07y9uLyciPPXRBESQQ5SO08cuiGOh8OG0lWQl31+4lGAMhEKQEnKFrFhBg9c2UhTeJNp5SxzlsXrAtaGxRh8bZyqJUCDOKV0QrIshihAgiDBAYkroGdloYEFEx0OqpECEJgcF4Z7Z1wCBHuQ0RIqEj5wKgIUAkBAWFCxOyFC50wgrOenE/TdOBdCEE5lSB54YqdMcYzK3GJSYN1sUASbwWlWEZyYtjYsAty9G0SEURAhqDEoyhhZCAWIPAgCekQROlEITnrjdZ1Xd9//wPRu0lrzdaKoDYGEdn5PG88/viTZTnITJLnea/XU4kxxsyfmssaxXjenJqZXl1dJc9FXlTDYZZl3jpEMMbkRRHX4uXlbsytz507Nzs722o1Tp2ZG28UdV230lxEfAhJVI0BhyBEpEj7QS9JxmzqQ7Dj5c1/89XOS+REQjRZQJ5Q6WTNmeD9+57svPaHf/jxQ488cu+96aZN4+HYlcXhry5fCOPZWJq2LjpAWi8+8oAE0nF2ABEUi8hEZoKV1coLG+gOMTGh1aFmW0wYLC/gmSfbjcbirhd+0F0h1bCztJLSArlhnSAX00zNribsTCuY1MpNkEpcP7/2us7ui/bMP/XwO7//4isu2f7qXz938pvDToerSqfJELgIQpXXKgdQFpwGchp8ioOTR7vczyE9mo1vhIq3//3ffM/zLr/isluLZgc9DP3qIMkOptd/7PiXViYnv+KPviC/pMtdQLWyvDI21rRr/QlTFJObWcBrZaFfqCLYoQX1ma+fl6tnpNn3blm7JNdsMMES6kRNSD8xduDGmjn6YENwItJoNMpQwBDYczbesrYS75VWIFKkSTtvloMhs1VBOPjgASSgC55rCFwaIyKJQkXoA4sIBR+sA6OSJBkMe3meX3zhwXa73V1dCyGkzYIE0jRt5IVSCkmlRR5CEOf2X3jggiBx2M9kaSRtxBmhZqNlrc2LxqbNm621iAiKvPcKSWmy0W4b0YfA66kNcMTUKCLSpCvnYi06PlNeRuODiAiESkYRMUTnX8JokRSDcQy3cczXcQAERoCIulRGPDvx0bGYmRWQ1hTbnHFljy6KLKMnOu5TR+H5WUf8/pH3MIJWhkG899FDDISdU244eNNb3rh109bfe+dvffwT/35u6ew73vGOvQf29lYrDVASG1IagPNEmEEwxhURAaDRCr8uPQMgGf3VyMaBRlsHiWeCAgrQD2Fsog3Meyamn5w7dZsyl1BR+N6PmaKqoFFQd1kGflgEmtpzUDcaq0+fTGZndNHGQYCQSJpQAuQtcwAAAWAfgP06AkIqJYlCo/JV6Ot0cuHEN7nbLZJWpSb6S+dyDYFdnYhJUpWYWqGqRRRqTNbqYVlXRdEIzieUVHVd/GVz8Ntd85jYgyU0RI4BfbYhX0cMajAo22Njd95558ljh7fuuXppuZ+gip/pxucePx1B8BxQIEmS0WIbuQVxFhwkUhY0EjQzBoAquJ94zfW3XX9NQyXHT5//0r0Pnji74GqzZUKCcxCcl+BRWDgu8WKD1iMygyHjvBARSAjsGo1GotWw30XgyYnGmePzx+//zPNuuGXqJ35t/pFv7uwPZrVaeNsPv/ehnWAQEOaKIwpU4dttO/HP+3/zlx96z4F093Ovf9EXvvblZtooe32xkJmECUFB6a0gKKW8SO2sydsPP/x4q2gAgC/rALxvz55ev392eVXrpKxdHLQ1xtTOE9FQi7LinBt6O2bMzMzMwwtPIeHAlWMEqYdQeq3Jx65nYhpp4pxzLnCoQxCjEu89ewnEIQRtiDkQUSNJIkDDE4qEAIIsohFEwPmGVk44EUVEdfBKqUIp733aaZZ1pUOQEZM2NmCImbXRXhiAvedEa+ccGU1Czg4NqRREUpXnJtRDJ0FGvmcCRB5EFDGzr6vUZFEJorXWohUSS0BCIpK4JUcZtbVg1H6DIIIgiC4EDdGBFQEgxOJ8CIoANXmCJM/Onp8H4FarVbmY6CAAJEnSLytQNHRDYxIiKorCey8izjnTyPddeAC1euihhxqNBhrdHQ6yLGPn0zRtt1r9waDf76vEBPbfevCBs/Pn0iSLVcc8zzudTiMvAnujEm1MzO8ZQaFSmhARnE+aTT8YstZK6d7C/NPq+//1oRM/fpV3WV4LiWZS+OXystYVB7/xja8dO3JyeuuW7sqAJi543U/v+351//CzJ7Z15K8v+syWc58d4OlzNI2csQekoLTUnub7XjCg8kbrOpDy3tQuJUKduem9dnroAqMbjvF4Z8v27uZ6QYtGSUoNRgPWHaZQlpx202EiSWr2XTS25+LlB+/rf/J/XnZg18wv/unZe+7mphtD1lQM2bXZewDvnBBWDEZDi7FMCMp+9bm7Lj9/cuHgweXlwcb6OwxzX/6GffzEmRuuvHzn5l3VoLemg9x7ctuF03PQezw5eWW93Q5Zt5KUtDi/cPbcxFizaraoN9TOC6QWg5J2K9HbZpOHVlIAGO7ERm/Tgqdz88OZsS0nF+fVUqUOmM2FrgOWw77WWlic9YE5ftDWVoPBYLwz1tnUKPqNyy+/Ynu+69ixY2dXl0AlwpKkaWHM2tpaXUp7rN1otQElTdNmXjjnCBXF6qVzzloR2b59++bNmxBxZmoaADxGbb4CAOGR13WcrNPaoGJBJKNFpI7UJx55S8e4FefIvbDWKiEK1o0y0cinUwoVxsxGJVrhKNrZ4CMNNF7qGCkRERUxM3MIADF5VUQYm0HrGJBRNrgRvGPnEhFx3fxAKUOmZhvN68QLAGlNG7FNax3zzhAEWUDYkPLRhUmpDfV1LBcTSBAgROudMSZJklGmHlSSOVurxYX+q173fTt3bn/jG3/hrru+8iu/8MZff/tvXHfNc/q9ARIFDhr0oCpzkxASESHQenssvktAil7IACMZCeI6GjauRwiKotsjQiowOdnMm2P9QVnnyTv7PWeUqaRBsMlUP5/VJ8q0ULnOyBy4dmzf3uXTp1afeKReXVh57OtVv0tbds/e+tJ02wWpq4NwBcDCWlQc/mJkG3wDYU9RaNENNeeuu2RNplqqPpX/wMLZI/Xasi97KytnMfhGkvVq58RYV6skzVoNIgjOizD6kGeF+5SFt4N/Q63nE8cOt2J1Sx8/ruGP20miBIKAfvef/Olf/sO/LsG3HdHqNd4aIbhYIY43j8j6lVr/mCLTRQtAf3keAN7y8z+5dc++lZXyI1978HP3fauEdGejk4prNnwjV6FMKh9I+UBIpCT4uPkiYO8ZCLKEo9MngnjrbOVsyVmSdavh1IVXzmzPl449Pb57pnj7+0+97ZXT1hy7cens6rl6ahwEPTnNqm9WAoapavPdOz/WefJ1Jz7/5daOYtX2W9rMTBQLq+cJyTmvlDJFVte1okS8J0obCQ16wzQ1rSztdrvzi112XnzQifbMiYpCX0FhROQAJftxlkbQlOhFNyzZCsvO7VvdORcgYYXWe4GgkYipVw211pQqz4wKRQKgsLC2ITcmMLNSQBQEKTXWWgOaog+MiNaZaKq8S7LMCEBgJK1Ia0LwzME5R3H6kIQYFKwPIwUQ55wQGmM0kSbFzOzYCSQGE5bgfUVYhwFXTillCS0ReJcwKi9aa0EYsqvZBh5tVDUpCEwCwQcGASCIJQscdXeARUA8MgWIKhFBjUjAYhiseKUUkyBK0ShQwForhJlJN5LpIB5Y93q9LE1r76KSRYw6dWZubGysHgwj8evM3NxgMMhVavvV9MxMWQ36/f6WrVvKspw/f15rneRZCCGAdPs9nSZJYuq6jjysVrPJ3unEGG2SJBFCL8yOASBih7XKB3WllUbvWbCRm8NHn7jhpe96591fzM99but4OtBTh8Kec/P9Y8c+qFPYPDWxsDQnlM9OTjeHd77C3lVdjH/PbxXUb6h/O7t05S1HXnlf/6AopUmBpoIGtS8YjYSe768qnXJKtYEQ2tQLQn0vHALrBNYCrdgugU5qqmAgKm0NVemrOgimzpRQ+aW0XUxfdd3Kg/c9fe89F17z41f8xBXHHjq8PH9q+sIL0kG9wGWr3yrTWilkrZUPcaWXoTWNsbnh6uf9ubEGXrK4qq9+wcYq8PTKiUrDQ4cfOXfmXGMy2TYxXTab7a/e/xxufGRb/9DU4onlpy/l/UphCVIHb62tlBw5c9YKNvLC6ip4EmhW9VpStKnexACtdKE80Roz6NNOCa4w7efeuOU5V7bPnO8J+uaBCxAxgrsfn3/YHDb79+3ZO35hWZYTExNl0UsOm7zIxycmLm4Uu+o6S1Id2fVKVVVdVZVCajYbMSalaVo7G5f76L1NRMYYF3xNQIpqZ5VSGUNgtj7EQBiroMygFEZ/TK2TWKCldc+fmCyaJGXnbHB5koPjqqoSbQDAsxAqUBjtEB1zzHoRRz8QEdezmVFEJBqlPoIRXg2jFiAiEkVne4FRiXijjQqB2fnIYweASGOOno0BRGttrTUqUSp6uccMmqJsK4RRDi2IgaNX2EjnJRK9Dkfl0IiGGPUnw2gTYIwJ7MWnxH1S2eJSee3N173nfR/8xV94/f333//rb/71N7/5zd/9PS9ZXBsSKcusVcrWo4JRQT5yvUAASGJv61ntYUCIOFvSSmEsQeMIC6/Umh5Mj2/f3B5fIwuUPrdhtq2Wx1r2O8d2HTl1eFuBRaWG9arOtvlmxwTYcclz5h56cOXpU56qdlunKmlBbr0LwYU4HSkCQgDA4iF41qJBzzZ4Buuk+7Ww47qvD5trTs9ccuPMwasi3qs/WHLlcM+WmbXF0+/7o58fb6bLvhqsdquy1DpB8jagt7X8moUFlFlxjRoApWY8p+V/hPqRKv18RsSCyeOPPuXLPqiNaxBvCQXrTQFjTKzVR1QWaYU0ch8AiOUDQiTta3frNQcA7ty5ffbz3zx9+5e+sbi0Ol5MbyqUUOK8z9CQwkp8BnntvABZ73SSJJA45zRSkiSRQZEpLSgW0dsKiLJcs7fdQVlMTN35iQ9MbL35YPNlZt8B87t/O/cbP3n6ugbgoOvnQaTg9vRw20J2slQ9xk1PZ/duav/KqXaWJK204XvLLviKQYA510kAqbwLIMboBNF520iTqelNJ0+e3Lp9J6CfXz6fqdSkytU2TVNblybJgre2rAjEBMlJD8taCJWCmby5kOWadWesdebsqkqUALk6aKWDUCTFalK+tkWaOq5NkhCCtRbyZgguODcqA1hPwaRgUAFojREem2kC5folgeVEgdF18Cnp4J1HTvLMDocMOnhh5hACrkN0RUSRRo3IgkS+jo8rCAipJGgdwKOwswxJgVqHuk6JSJvSVaTIIQhzO2/Vw1JrtSGdDCEQoBce6U9HFTYEIUJiYGTxGth7HRUExIEAPStAo5C9M0QgHOq61WqBD6CViNTeRXyBJkShSPFNtaE8r6rKe9fpdK6//vqzp+fOnjmzNuh3lAohJCZz0f4F0ORZVVX9fr8oClDxBjPR+klr7YWVpi2bZ8uyLKths9nUWoOgEEYTuBACsoAgKaygboRWBT0lUIX+7Nj+lq1+/11v2Dy+qaqm6bQHGPaH91IiRashzvWH89t2XjQ/P982525dORcMKkm/NPGT39f/64N0+ByNvXP37T/45BsG0hAmbxNnOqy9Ip/mOfrU1b1q0B9PJy0teeRAiQZToIi0Br7XVIX3AwXUBJWbLoR+7sDWtR1QkiBt27bjpS/rPv3U8rcegt6SjJeL89u+8E9vv/lHXuWHydABQhhmZSftDGy/UZtAHDLlgF1KRiXLx5bOLw+qsVaYNvjUQxsB+JL9F65U3ebEzkphc3wymZ5xA4a0ftHe2+449tHVy7p3bz5/ydyBM4PlPZOzCdDRpbXG1mm9aztUrpmmnCVopeTVC7ZednjuJNxdA0AJ72g9TbyAXm3HbVeVp2zjVd936SXfU8xXzcQ457RJnHPGGN+pGmcaF1ywd+/YXiJyzj21tMQiQaAOgRIzkTfFO60IEWv2SbMoxtohiJIAIQSQij1rAiRErAV1noqI816TQkRb2QSRAJhFkRbkmH0CKhBIEu1crbWOHNYYijY4zFpr7zfadaZ2lgiNMhyYNmA+AkQkHDQpYWFm0khaAQBzqF2NG5BniB4N8d+R1FYpFY1PAjOzAHgCtZExI6IClOjcFfiZ/vGobQyCG01h5EhKMjrKHkebDEKWEesjvjV4FhEOAtGOheIQQ3QSU56DVnqjKeu91yYB54xpo+KAtLxW7dm3+z3/8oFff/Ob7rzjS7/xm795dmnhx3/kh1b7dUBSARFD7H7LuvPxaFMCgPxMBzp4LyIhsOdAXryw1jrKcmNyj56KsXYx1hwu97TSL7H9TyH8uB7/yNLyf1L4h1wuGCZXDcKha144uXVfLziYwPGZKXukgzxW9U8H06uhcmHoA2sGLShAMa/SIlqY61QlUDv7pA05XdrpT6qVChEhXfTsKochzdTY5vFtmXX1d/3Aa2Txvvf/3V83N29JUVEQL4IuQGLUJeQOMNTxAwPoATSAtwY6pvj7a/lcKiE0G2MP3P/Qvfd85cBzvrPqDUdlGBGJ0nFQRBiCi7cEqSiaG13AuPLiepzW27fNPP/gXngQ3vFXn/vyQyc6Y8XM1DQpZcNwU5MWa6fQsPg6EVMHwNRwpVTqbJAAGtBoShQUzaRf16w1AIJTygMiMkBAbKsxX/VvfsF3z+OB4WCQnnly5rJbzG/8U2/pewkVYAAANXDKaEQCFAbxiodh/jwtN9fK8XxbMF3TsyFYC+A56EDKKFCKfTWeNxZrC+KttbESnhYNXVvDJC6ElFbK7lTeERuoqVWqxdnAbLncMbZ5eWWtXLaXXHfD+UNzfDw88OBj25p7ONQGCVkI0IsHVIAGUOk8dcyzO3cqpLm5s632pAMvVtksBQDjsZKaEmOQStfXJrM1gyhbs4DDzFQiaEMuiiR4xaAAmGpAqxP2oojAh8SY0vkQPHEolGJwGFAESSfKaGbWCjAEZg7eqjhHocSxc9Vw746dp84v1nWdGIUoEhwAdcue0gZEgYAGJYow0b6ySMpLSDh49ibJEFGYRzQBg8QkICWLAmxkRV0NUUHEVz3zVAt3B30R8cgKMM5taKWYmYURhUEosCJUwhw4WPfYw4+ISHOsIyKtZnPQ79e2BILSV9baNM9sZTvtsWanXfYHS0tLiUnz8VZ/MGDnJybGt8xsquu6LMsib4BgjLUKAYXruo67lsCBgEBpxz1QKKwSducH9rbXvfbr9zy41F1tZJyaTYniXBcCxrHrs2sWrT95x0d+4S3fc33zsHetjgw/pb+/xlxB+XDj1on6WMG9F3We+PTajXVgoxNfe6W502gmSbLSrbhoI+t2pzWo1dpaX2HP17pGZfIh1+JlSVgze1UYtDoUbVMk1tVmMMTG2PbbXj6cPz33xdtTrbKJSd9rP/ju/3XwebcU2/ctDE61daJUqzsY9oY9qy17BPTa9ZgMUTI101/cdOS1r7nswW+dPnb4OKpsIyjMnX5gvDO9bebydgfmHj/ZSi4fU1k1277+O3/8ik8e/sriQ8fbR/ff+MOXVxc1JzKdNu/60hd1s3PjjTfkQ9/Poc25xZ4tk9kt6a+945361rstgH1Tt/szCTwp6h8OJfc9vacx9vaf+9iV+760d+813cEQiJz3SBGO4kWkDqEWEe8Dc0AEHJXeAKAKtRJgxzJCY3Goa3AhJDpOTQIIIYoEEURF4L0gCIL3XsGoySooQCYAExEIROoAAYIAaQNEgT0CIIpSyIwMTCSew7qciBBRo0KW0WBLhFCsn8BIlrhOpIriDCIdxcexABvPQjggETIAaolehCQEqERIJAgrUjFjUxCH9UbZs5CS0ZuN+qYxpAMwKR1R7QDAzhMiAcXWr0KKo0wRRS0wCtc6DjuJAAcQZARk5tiBXqd8cITaiRMRxmjjzRpVf1C1pyb+7J//5h2/+tbPfeSDf/qH7zp3duWNb3yD1tZ1g1aFgyGIJhU4iLICCToOSgKCdqCYjDFKaRM/ZmDlQRg9lg5VVmFinPNpBZLnKk8yWR32a9aHlbN+9W1D2AP1ZCcBGD7uaZxa6XNvM5twuFLkWA4aumpWhHu6PlP9E0XVnyw2u0Gd5EnpQ0ppypqITbB9XWR1GCLcs2gpZEQHNWooGME3FQy7xeLxu2fWXJI1uWNEN7++cuLdf/vHsLb0X9+8a2LPLnfvYw4r1CoNUB3wiAANFBHoA54m2cQwLpILXiiehAkbqfR7cPLM8tWF6y7ZJEmAUERi/ZnFK1IS4VeigIUAWQRGWhtgHwBIKRARXRTpancZAL786GPFWKvValnHGfRrHFuxoHTmTbDgNSOiZJT2QxW8eM8aCYkQlWP24ohIE8mIwqrirYRaShMaA+3GiwnVnGrvzFRfza9uuuG6vU/+xIPhL4qBGo6Hgek590gwlHMnSZJLwjXf/N//q7l1qnfhBYPTp3Wq00whGluVlBgmABJUVDSKvGgmK64uXTW/kCf54bmTtq6LogjkFRl09XOuuOrRRx9PdFoNhm5YiaayhsWV7ou/48DqA/fXbO/6xteXqMcCrfExYRKEIAAGSRnkUNWuUIaZ8yINIczPz2tSxiiRkIhAajKtrPVOXNbKnXNDtlmSWeubzXa/37fWIkmRN2N/dwgBlSSAWpSVwBgMKZYQAJgQFRlQcSgZCNl7pRQZHUtnsTtV13WrUIO66nsHIopBCzjg+bXFCCgAiNNwJMyaSIBBiQQIyBGcSUYziPZSKUmATMyMAT2BBAmWAztBVEYHH5yvTaq996DIVzbW30IIxmQAEjUsWifRN1AplabpcDiMBcOyP6iGVilFibHBi8iZM2fGx8cPXHSwHpa1tVGSWqRZI8vLssREE9FgMOh3u3mep2m6srjkhZWQLavz589HZGAUfGmtna3j0hO8j0KteIkMYA0BOThOUtx87szZi6/e85a/+Jk73/fQF7/yn4PiUVATIF6cL0yKwmON1j+/910S3AWtvlS5Se332PfeXvzs7c1f+Ld2CwASHja3nt26BmsLy3V3BbtB7GDQq0CzdmLFhzSZq/uIqMbaCG3tnXF1CKEYyx22AyQZaeGB4MDZPq30fW9AnU1bv+N73HAwd+cX8/ZWCDB05SNL/VtueXk+YwfDlXYxMdMa27R5y8BWq8srY83W/PK8BpNlRVVVs1un73vgU/d960sHD2z50Zuu+9KX7rnv7rWNAOyGM5VbefTIl4EobaXqwfrk6VNvfefbN104+csLr37ogYeWxwZf6T/0Wzf86Lk1Hkq3BNfaumN2qnn2zGojKbiWQJPSWO2r4cM/9wW/fQAepMXOWtkj8DuS/XFePDV1enkpoYYoIGFEJQgoiCwCDAgRP0AjzQkAwDMplCIC0HFufl2xQonyKES00RARkdhCI60ZIQhHUwMkEgCM6F2W0exNJLYCkQgBsQ/MYSRCFo7eWRL8+uzvKO2Edb1SLAVv6KTin2PtNNZ7NwrIWmsexWlijp6GCF6EwPkaRjBKYESNsYtNbqSm5FGBGoGIhJDC+pmPiJUCACQBFG2UNJ9d3hT6tnnTjW+Iai8FGB8BLwyISkWDlNGvuv4iJCIvAQAktsx5JMMe8nAc8nf+zttnd8ze/md/94G//v2VhbNv+9W3F1O03F/KuS2qGvq8CJVo7Z1jUsqxwwqM0qTYMgQAAGRPKmhrLDaYhMQiDrQyEtKF0NuxpTMxNulXD2O7/Uk3+LVdV5+hvju19vLCAayurrk7ivyyRsOHQssQB9hiNb1jur/WednL6eA1g7uXjh6b27Gl0RxUA6XyoDNyjkEGRidlbYG8rM9+sQgEYS8SJqTwau3Msjtx+EHGCu1q6Ifdu3f95A8975fe8qsfe9Gn627X2Uobwxyc9xAwFgaBAD0BAiTxV0NwoBAkAAQBCcHb3JjRvcEg8Ez5It5C8cc8+8MCAGSJN3/cw2kEvOebj3zHuP7BF97w2a8cx0aqwhKamRSHqU6h9glptpxi4lVAcJnJgpcAEhkxLIIgAKijCBtREwUVYnVeQIyUlUbFpeHeyfmT7e3ZFqX0mcUbrvzdL4195fzag7ANENFrnyxBa6Vvzent7/qK+uw96g2/PnAw3nbNdOb44tlmlqYqFed1miGiD1IN63Mr/bTdtODFmImJqZuufs77P/4fnKmsDJY5NcmhJw+nJtm2bfvcyVPW+XJYT0y0O53OHXfcmTUb2jTKynrjiSgBapjUOcfOKzJ1ZRVhgmCtzYs0etFkyQjTSkKauYSgfEgIvQIAphC0oeDEWt8oUALHUBEbMJ4g9ZIogkR7kESQ64BKM6kqeI8BgqCMKmCgkDABxCRJRqUnEWZQylSenWPFmogCiAckhOGqE/SpSQMgCaAwk1fRk1gEAGzwjEF8AA0SggYmB0FLH31iEg2IHETQCscpwxACi7cBm0lReyfeG6M9BwQwpNh5ESEcmSfGcREKI0RlnM5UWUIiMR/aNDW1troGAAf27wfCBx5+6HnPe54h9eADDwiHztgYMpNWZTVkhLSRKUBhTkzaKvIiS51zq2srBNge6yCBD06AlVLW2ogZBkUhBI2klHK6zK2xVARVD1y/IH74kfmLXq3yq0P5x/vP3YthAEmSNRv50J1lgqfmTh7+9N/k2Th7GpQlpZJB+WeDWxGwrNtPwuUP0DXf4Ot8etnkxTvIGABg76pe166tmf5QrSyjG9resrOpk3robZJkgoaI/PJyWF0R6Nmy54Rxum1vPF1u8jDcsnf8VajN6S98Fq1U5YpWSdGEquLN+y+Y3pOAptlt29tFwySJ14AsTdBrrkaoUJpFnp89f/QzH78/VPu/8NSZyemFq27acs0lN2484Z/+8InDRxemp6b6vbqVtE8P5l7y0he++Ud/6rGV7m033rrvyOX36Sc/c+rzb7jke1QyOX9qcWVttbN7a9nzkiaudkRagptsN/5y/vfcrorGiSlAWyBjqAFP0/ANg6XXzl139bXbt+0ZrPWisVDEM5AahYFYCI2FT1IKAQmj+kSBsOcQJCilYjU3JgfREDAOAeL6BAsiBiBEidAL2bAz8pjmRVwpI3I5Gr0JghIgBE8UAw+RQhTPrJ8xL8dnr4kMcQgfBEkihxGA10Wq8qxjVBmGkbDZkNooKjKASTREBAdHdkSIOwiKchsGCDFDRhaAOLS3fhqjEnE0KsFv+2JY/7MSCM9sIOCZVylScewYAAAISRAYwuiKPSuQIzIjjmrsIoCoUIlEWid3/VAn2Rt/5Zc3TW9+3+/8zqc/+PdLy2d/722/O7V783ChHxKVUYViXLDxQ5FGSjaAs6wDaOXIMUig1AfQWIshQR2cYWCnkaoyI4PObxvfFPxgrJn3Btnb8+Xy1LIy6sZ0+CsAX1iqzBWvnNh1dbFSnrTDocHV5QVaCkjn+6GamBn7vr1Hv3XEfuGBKzcXmxEt+JpJh0DNYEqduOAIAEjHeKQgSuC0tBabYW7v9Vf6m272mABVXNlNg4Xl82tXXnrxS77zpV+552vtIl8b9EkrIFb3KI8oDgFExhhaAAg4RKyRvmkgeBaERBNIs1l0u0yko+MR8IiohEQiIWrrBEa1w3ifCIBWlBjDIlHfrhdXeotzZdUxX7z/uFfsbT9rtS1LgTZPMucpI0KPNTASo+eo79Oa1q17RGlEog0gyOjTZREgBNAWSw1tC2d7S83tMw2nnCbUCk4uvyF73+9veTnAk5OHJIeGAKL1L/kdHPvEHfnVt5258MLBydM3Pf/qwbmV03OkC+M5aG2QJS0ytDbYkJq021tNRbfyYnFl+UOf+cRUZ9IIdtHmYHq2TBJtV/uPLjw0tmWmZp9pkxGlSl9x6WWPHzqc54USr6MVn8KAQImpnS3rOk1TFgwuFGmikNiHWImNWxsGDonSSvvaEmDUNyVZVgdfss2yrD/oxufJ1nWSpkQE1rtUcWoyIqwtKKq9BREMHFuzG64SoxaQGpF3EDHScWNHoQ6W2aekGaUWywypNipOFyESUnR9QVBEpBVgGH0unoOsz58xQsi0EtDOpxhIgQPwCD6IOI8aSQKg+Nr2Q+T+qNrbVBtCjFgaIgIQ7/2wdtHRLH720UAtvl10d2kWjW63G7w3xiwvL4NWW7Zs6a6srqwuZ1mKiL1eT0S01jObNy0sLxFRqk2/22u1Wvv27Ws2m87btbW1cjC03pVlmabp+NRkqNxgOHTOaa1VYkIIEhgAyBWVX3XoU8DMVGfWyo/dO3zTy8LUvplf/sML/+1d33ru91zRD+D7fnDyJW6Bbz537siZw/d99UvDyYsyf5iYmXC87gHAOKyRXZ3sP3T36ZUTK4sgA5VPzG6dqdMMi3bemUi3bp9YD8l1t8sra00eHHvyyeHiajh/NlUNzIOjJm3amd74aPnq20NmQWjvo39v1orj5/6I7WbRaAS51018snp+Zdv+q6+95fr5xUErN+zFiUIAb8tgQGnI0ulut59k8Od//pciWFa9+aW1uTPy6BMr1z1n58a6vHnbRQ89+dCYMkk795Qtnzj0mj999TCBuvY9BT99wXd/c/nJR/XRu8889rJLn3vPN5cnWpON6WZtgxajjXipwOUp0h3m854DTDIwYIVJbXzhaRJZwsIlS5dV7b6tkolWWKsFMY42ChFESdG6k8dGjzCapGzIQGNki1EZEDb0zIAiwApwxKhAEor9V/bBxWFcRNSkREbyq0i0gPVBoBD8SBEUo04ktQGP2FHflhGOxmbUqHIYkTQjR7KNpGUkk1yfMxEkQSR5FtOLUEAg8MiIcBT4hFkAMcoYcZ0XsR5oRQGGdT7Seo/2mZaqAoyJ8jNnK6DW6wPfdv7rL4f1hT7aSa3zu0aGpKPdBjMqimJdWddOR7NRZ6wfSqjK1/zoa3Zu2fz3v/TGb3z6w6/v1r/9P3/34PW7uwvOgMU0mJA6JE5E9TymKSU5CI8unogONUgVBIW1gJD32gdWhlWmy9pZGt++xUPuaq3WBuNF2Wm3VlbmNqcaAA5pffXlV7agPsMrSaqRQufaq6rhzrqbpBOrJd01K3Mvv/bo7FT90c9fVtWbxsdT9miMHdb9NCSKNaBE3/iNxAMFladB3eirJkMrkyXUtW3vsAo05V0rv/bWt33hlht9xUQUndpwXukvpvbHSqgB+ggaYACyRBKE3mNE2JhsaWn54MEDL3nJS86vDp+9WWRmkQCxNU4bOyKCZ1UynA+R8YCAqEgvnl2cbO4oA6E/dvnUxUW79diZk8JT3jjIWJADO2TpQ+hojaKdH8bAEOexWDyyYuQER8n4qGuCwoICorO8KFpudanRMNq5DqUp6aoh42wmTrRua//IP8rbDvSe0z/+rW0P4LW3d5pn+qG5/XyjEexwPE+/+o1v4TBNJzPL4oS1NpCY1X4PEY1Jh8yJgNPiq2EOymT5WjlIjNFinAt5I9cE01u3dsYnjs2d0lon2hw5fHIwGCwunjMJO+7PTG9KmySrwMzaUFnXUTswNTV19PixIm/GClU8ABKtIcYYz0IMQTAAGFHWh6DF154DKA0jQTgqESnLUmudKo1OMgQiCAB18BKYKvYKkEWLWGavMIRQKJOicgBJkigEpUgE2+32cDi01mOQEryLzKm4WAQXCBIyzByn5mMsBglKJcSEgAZ0EESlIXgBtAh5AKWUU1BL4BCE0aDW1oE2sdkrAEppAlSoOUhmkrquNSkFaOualEqLPFXKhRoAYoUAQtBpEgVl4AOJAMjU+MTJkyedcxMTE6fn5qLb68LCgnV1o9GYmpoqy3J1dXV8cmLz5s1k9GAwqIdlo9natWvXxMRElMxsmskBJYRQVVUAaTQa9aBqNJvMTEbH2ZK4eDnbrd2YCvnacHXPzNaP3nnqUG/WrC0O2seoCoeP3f89u3eN7ywcHFp9qJ0efR4Nzm6zFxy4/Ll/+6l//IHvQRZYo7ShgYOUZJKMzqylX1i4ttGwwbMbVmePnkClnQ3AoBMjaaIzPb1lNhRZ3pn2nW27910KAOy96/aH/RW7Mqg63+q98qHQsNDq7njkj9qrtxy5/Ef6N30tWbw1+dIByQKNt33SgDL55JeeuvGWKwWy2rkERziyZtoacqnQdHsr4+Pjd33161/8wn9ecvCy5YUe43IVqgPbL7vnvrueWZSleNFLvgtBu+Bve8FLVhbPXnDRwcVl1qmx3fDcS5+z5TPbz2058dcPfOhVV968cOZcd7U31WgjabaOGYIAJrBWq9XminQDEpkVkyxnZT2QCxgSQQHcWdRHE18Px0uzGu2CAACBAQQRAEes+FGAGiV7Gy0DQhSFwhK3aHEmh+OfARFICGLjNnCIg6Qx1gIBCcXs0wmvYwfg2QeDCHN06gUACIyIqdJhnU+wsRoKxjmZ0VdABIBhPVzxhrh3PW8erbBECCAsbhT1ZTSJGmJWGamZuI6mRCdMiEYpFGCRIMwERGrE0x95WkeMDIsIyzP7A5J4PWGjNr6R1Er0cgdhlLj1VErRiNUMUZItEl/9DLJjY0uBpOLGQgFGNggqYzQEwJWl7vNf8oLJsfe+/5ff/oW7/vPnfvX029/2h7e+8PrBqrLkIAQGTpzhluHakiUEZpI8S5RW1Eg6nXGHogQcO+ykmpR1Igr7WIaUJidTSOqGoV5LL/R7HJTtyyWzZRB8x8/ceLetjpQd1lMICmw1oDNe0roRnuzDrrU9F5juoRPDq7fMj/2Pj3zoc9eeW7ho09SmqiqCWbZU5C6AUEAIBADiQYgFUKmApaqH6Le6hRtb76vC2D1rr0kUZ1m21h3u3X/gN9/6m29605uyRst6JgKtdfhDhJ8ooYt0eqSugiHA72byOJJWyuiJ6ckDBy9MG227OMjSCG6T9fmraMc3GjMjUhstDFIEAJ7FjbifKCwaAFLyFSv4v+NYMec2212v+8grz733cGPVpagc/Z+/6r+P/z7+Lz9yze20OtXTaiaVdu4qz567efs3n35RkP+PT4cvh+XSWmPQXa6Wc+g4dtCcyscmGmNN3Z5IO+32lm3KXAL3/nBQA1vM5b39x2/68e7Oj9LctHvFPebL+/7fkGT//+NouzZYQAHOQ8i9NAUA0CIA4PJ/P6X/ffxfcHRS/o1b7MsPsgDkjf319G2bZPkszgQk0LmiXSKLYM/9//q0WgKTkvzPAiqSpsAJlM8pWCNA+T9/7f+Xx38/SP99/Pfx38d/H/99/Pfxf8OhZ2ZbL2g1t7nmJ95w8Xz7hjW1Y1Db5X735InlQ4cOrQy7hRo3ZBpZ4coVTQmNSivMEs1txDOjYCBBCKjWuVwoxCggArbfHxh2U20jrdzlEKTXqXMNaWiYOTy21e284GffOPuaV/XvuOP8o3ftvuaG7vGl8998rNXKuisDgaTTxoEbEqaZSSQEW/H09DQG7veHrBBMmju2pPuhtqXPVKqQnARllGExpCg1T8+dTpXWDLWzrYkGpWql363ZN4r2ucWFxdXzDOKr2vu6kWfVYGiUccNqrNHy3lc2sBeIw71EiMp7RgwiimuHiWIEIGKlHAgoTeJDcEmi13s3KtaykjyzZVUjOwhGaRxWgGqQBHSimDVRqtAbMklSpEnDmOV+XyEQUWaSWCxi5sAsrJA1BAIvpBJRIhw0ktbKhwASgKL3i+LRVDEhwGg+gjCapAmqQBAkIKEBBdEk3DC1Utuz2pDSurbO8ajrpnRSl6UxhgSazeb49u1lWZ4/f95aS0aj8NjY2LYts0VRPPnkk+x8o5F3Gm3nXH84OHPmzNatW8fHxycnJ733vW631+v5EJqtxq5duxrtFjP3+31woWg1txU5s3jrDKksy7ywD15ENI74QXkz+gErVRREFCWjUXwIAN77pNi6mereoG5t3jxcG3z04bW0tX2lm7c7meTVgRt2p61ieWE1N5tbLVNTL21uaob0qpva311j38LHFr7/c5/895feap7/ikuedM2T5eSLXt588j1r3oxZbzNJe1ATc5YR+Fpr8bVrpg2sNaiplWCBQ9E9318+u6zRB99I83KmMm97f+a3p7w97e86sedHy+QJUEEaQ5wBurzi41N2kDQQvHKHjnInz07wkCXxwVspm1p5V3NSGOA8M9rAZ//zw3ZQ3fXlO9vtZq9rtmzauWf73u7a4sYj/Su//DuN1lhtfZZSb7DIYarTnu6RFNYs2e4FU5tf1rzxb+j8/frQg3Mn1lbOT26eGksnBmKV0siuSLNuz89ux6kvTcE0yZLwJPNUiYiZaZBN+oPVibv1pluK1lhjWQCYiRSMlB8jHUpEdscWb2zuIo7uQAVaCBmiTkQ0kmAECI3ETbhu7Rz/7EJQIwEXxWEPYGFmpShAEBEe2QwhAEtYrzPTupkPAEQN9rOKsesl2Q1F9HoL+VljQlF7sXH+GwJXimongmfq67ECqcwGhBdHU0UCgCpKzIDjHDMw0GiKF5/1viASEcHrVWIQCQEAGEdsgI1v3vilGEEBEgAjksRJl5FhQwgBlcZnEyLju4zAdqO1SUSYUFgQEUtvdfTraz49v3bRNVf87HvfPfYLb7rnzs/+0q/81G+/6e3f98pXrJTpqhpMg+Ehee9dwmbcNFTqVsQtlFxaPnJ8fq5UIUuH7GzpQqUdYJ6nm1qtzVv9GlRJroOzvQrA17b8s5cMr9/bbDecB/588WOMakXNKEgDalaArk5oOtHL1dr44Sfwxlee2j08caKb79ydv/kHH/jjDw2PnLp631RjhZsZQ00BQCAytUGERYEyQEr1jE0wxQGuzjvXBX0uX903PkY6JEnz5NzSL77xF//9Qx+895v3F2PjzaIxWOvKTQEA6GMZn0ISMUCEWGKIUvo8T5aXzj7v1ludJ9AmYr822sC0fgTxMVISjEbH4+Ng0kSpEYOFiPQ2vTw2XIVgO0+8e0q9u7vpxU9f9PYJou1XNq6/cNs99z2gcBkRsQqkiBEUmPh+CHEOU5AIAAOzQoUKgCh6FAIIokLnRcAxdEWorNpJy6SZBKqaNfj8fHZiz9pVgxMDajSnXvZD4y9/7exm/sfX/diBK3cf63eFnTHETowCz1YAs0YxKIe7t+9YPHNuqX9eNzLtlc+Tsru8a2aGm8Xho0faXAgyi/IhGMLFxcXAkCbaWzsx3tEa19bWfvS1P/KxT32yLv1kpz0/mFOK0GhITB18JcEozI0yRcbOiQ02VFmaKqXW1npKKZMkw6qkgEopDggAPq4WAJYdIaVp1uutBeuazXYdJ//Fc60DBHSYIgn6GlgB5lZqib7NShMRQAhcVZW3dZ4mSZIYRVmWWmvrugQAk6i6Lo0KHCxz8KQF0SRKbTyT7ImifnOkrSBCQWDyOo4EEgVmjcQg4iVVSrTyRCLBMKRBJEEgKKsBMyskh4LOB/GzU9M7d+/atGlTlmVGJ4PB4NSpUwvnz4ui6enpycnJLTObrKtJoVF6enq61WqHENa63RDCxMRE1JEBAG/eLFEdTZimqYiIhKmxSWEOAMYopRQVKCLWWmQkbQiQ2QNiVLTGQIsMXliBiu0WYY4QHpK6X4YKswtmijf/w9cXm1NFb22lTtuUjzWnF4/35g8tjV9VrJQDM/GkUnvYNhS6Gaxu7Tz2ji/De5+851w/+/w/2gsfPvxTf/5KINhvV19606GPf+XKZLJD7FJjfF3lJuPgOs1WP6tLlhXPZMstRpmsOD+0rmgnaDJiT77RXOqZp+3E4zLRUy4NzQGcmoIOgmJgARggj+s8OJJ8pvjqo+e/+PlHrnj+pUuL1gJlJlGgAnjk4IO0xuTBbz307+//wPXX7+AwsLVkzWzfhZfWa+Od9p6NALxrzz4HQwb98MMP/sd//OOerTtf/4afWjvTJch0ZpY8/NTB2/71oc8MOmv/dvQrbVuPdSYoMzy0BkkSVQ2rvdvb//nJD33+J/5Tf8CwYhywFIEZEiqmJ2f23/uiT8//x/n542Np03YDaBYUBBYQZhlJgYRFJO7AR66oPIpzSiAEZhBgIQEZBW7W62Kr0Z0cVw8iHSd0o7uqxICEUebKOFrYYqyLgZk4CqZGIRMQEIFDEK1jgxjl28IwrHeLR13eDR9D+TYJ8QgvBYBBojwKRjFWkEUEIg1bEGgEcAVEEBA9+g1BSIhAAQoLCIsasSol9ptlFIOBEDcU0iIowPBtQqpnnTkwgiKllBERCCwioKIZomBUdD9rxkkwAkNGMp0olnxmx5OgK2tIofahDY2zKytj2ze/6r1/P/nWtzfe90+/9Vu/cW7h3Gt/5ie2+ry35mQSOoUeVtK95/Gnb/9E/yufO+kO2xcsn/3bn5nEKwY7tyht0rwlu/atzGyuTt6n7vi8quYnbnrJwR0Hdu1+/lA56p+9bRffcmE30z6V/oLaioCrapOFXEGdiHeYizaANsfJ5tTyyaXGV+7d9cK91bh5KlmFnbP+l1/1+N9+cvjUqZtbjWZQaASBIcYdBmbmIEHQsHeW0nSYDGnyoewHmCYxNHbMNBqtpN/tUZIPLNz43Fvuv/c+RPTeW+DwXEeHVXqaAIGBHLALPjWpSJ0lBSKC2GuueQ57YPEMBhVFmP+zt0qw3tMP0awCRzoDFI4fcPwgdF1VThiIce34eVfI8u3u9PLKlX+QNvPSDa+6+OIT579ZV4OcE49qaFlJCCEIRwyb9p6REbUSlojkjTpAEQ4gCOBTVHa45+AFJ+ZCI/FZriVom1hi0yI4l5+8YfmVSRpqoP7JRZPi0bmePPXU5tf+zPHDRyg3ULdWpZ8iBbJK6V45TIrsG/ffl6Had+CCI3MnCTOo2ShdBgdl3yAwMhEYUJioQV2ljBw8aKVSM1gcnj+7unfHnk/c/hGVqERCb3nVQSBUO7fvePqBM1NTUwnh7u07+71e2Q8Gqfb1aP7VC6H2wfuyHF1oZK79RLvlQUJdJ6iYCMF0u2vXXX9tPSzv/ea30qKRpWlZD1cHPWOMp2DSvB6WCjWLWOZEG8/OcdAimoxWhBoxIRKM5irRl9R7j6iE2QiKSSuwWqcYkAOg0pULGn1gDhC0oRCQEY1KveMgXkAqb1lZZAwAIYgJuiIXwWl+UCqGNDNDNyzrcmpqKk0zIRkfH28VjSLLW51W1sh2Ts0WzQYQWmtBUWes3W5dBBdd5HA0XOGdJ6L9F+yLsFzrvBNutFtx8YrRtypLnaUCQEbH/UHUbAfvCXVgp0iHEGwIRmljjPUORSIFTCMgonNe6XWxTpTGbdDqEQIzsBKjJkg9+ODjH7w/37I/mV/tPvTk7I9c8fiS79Km5Mx5e4GfcatDaZIfX/XLm5HTi+b+qpL0L79ZVnplvDPRN4Mnv1G/89Xv+fX3fGffZy986dqxc4cOHdsmDWOcprzoBlG6EUq2NphEpUqXRiA0vHGuoZWEpiu5r4aJlZMt5VNy6LAfTA0kMLUGIjBMWDAsbVVpC6qSh0ZPZC6pP/XZB6+7bU9t66wxQYGDS1HbFJ1DzvL0Ix/5yE/+5E8//NB/tDsIkIkKpw6fWDh92qhqY3V+1zt/NcnH9h+4aGZTUmSrL7rplcaIU2lDeU/50qC7b+sF1z+x+w66/0vVY7csjF148KK6CNInjbzK5dapsW888PUf+7nXj+vZ9H/qsz96Gm7BLOS1c6vnFvv/NPjpm3/6Ne/57snpXTWRD31DJjDHUR4SoRjARCIlFAIH7+VZprUiQACklODIWAARVWLEhWfiCogIBmECIhAWwZELLsSIzQIKRQFGmocXRhZgQRYS8M4hkZBSSgfxSIhKU8xFRSIG7hktMWHkJ8T0dePPMffdCHsRSwkAGJntQeJMBG1EzJgJISgcbYGjj61HQUIGEGFgUIgQ07SoU4Znpb8xsmIcZhit34jICCKCUV8pG5dxA4epR+ZIhFEIJ8hK6w3R1vpBGx8BkeLgI5gzblNExCNIoxBrC5BaKx0adoW3tNrf9+4/mJjaOv7Xf/TX/+utT50/+T//57vau9Lls+eP/ePnlj92e/j6F5owmE7SsKWTqHTy4FXFzueaK67vze5cbbQG1OhhogeDfNsl7X//+/5H3nNw896fazTerRtpdvHz9hwty4WJbICpPEaXn1E7ejQFAAFIQAkIogbhgIWjuWbR+NbjuzG4W26cy3zvsUP1Nfsm3/EjT33om/rOO65Zls3jYwkFJUEi+IIwEJNRJslaqSsaecFi67UJLezwfEYdxs25znuOncD3vuL7/uwP/8h7W/XKNDXuZodf1ZV30WscWHLStXN5guy4V/bzZtppdrprIcsyAolOrCEEtU4VZeZ1Lf0oJ1539oDgHQEaRS54L15rDCkxATaoLrL6dE3TS1/8o/f9y4ELr7j28ktq68o6CHSQFslqRU4cOhuINCKFEILzaSMjEQ9W66K2nhQ08szVA6OMMBjUlLVOnziuafeho8trZdg1syUR9E66rfmBXttR7iYiBUhaJ1NjC1/6Vtv2GrPTq0/cN84trWvnBVVlOPVOTGrssEq0NkmaFC3FOpBLGTkx3WHp54etouEQsyCsPLnkkgsPHj9yeLw5M9ZpPTq8pzk57oGYvTbmubc89/Dhw6fPnhMWL27YW7v84oMLC4utsenl5VUbfFqk/X4/TbT3Hok4QLQjFcHgGCiQ0gLU65bNZrPylUdJAJiAWD999LTWlDUyY3QIrpk2hvUwPmHeVoTogxVFrMGHoUZdNAohLJ3VWiutNREDeBRf2TRLjDGhqq2vEFXtaoxTu+K11qQB0KUJjmXNRqtJREtLS1mW5nk+OTlZ1tWjcwtU43jRbCWd4XAodd0q0larEcdBlpaW9h/Y1Sia3W7XGNPpdDrtdpqmSZLkeZ4kSTyfEILjUIfAQWQkzgQkVAgKKWYeEbvgRGpbiwj7gOvG44EoAQ0ASmsGCRwi3RoAUJFnFgQRr5GEBQA2GAiJNg5CCFYhiEgE8rEQADiyRVDsfQnKqMxr3wjClHj0VvyeifDr//voYHrvZG0nOsXtn+vu3LHjBTef/t6XXnRizi3X/TqMdXB1MN3VZzGRUzvPf+oT6vk9++WxVuOKS2471zs+/+TDg6f1v731vtf/3a3Dk+5Fz6sfP4xAORjOOLCmPntUSUKGAAc6kDVDtKFfFxJAgkfyqUdlQBp0z5X+pm/CYgsmugAAmnG5Db5QDx/A7kSWmIqGwdS0gq0dU+/5/NG3PF2NjU34snKGU3S9oDVJMy+OPvXEi5730tXF83/+v/946+yO+eWTqNJOY1jVK0jPkLAuv/zSsZb+/Gc/ojL+03f/2bVXv3hhyeWGayTFLsWkl8kvbP7+O07f8+CewyrVL+w8j71C4qCGSlr9cv6VL/2eyvZCIhc0rrr4Mwc+/7+/jHuhwR19NHVrw1/78C/9wwf+9abnPffsXB/SRJCQR3kV0ogXgYjKjaaPTMxYEYGUl2fmaxFRCEREQojNqvUcFElAQAiYgosWJYioYj4iIYZPR7HsDBQkRY2EISL/kAkUIoIwi4tBlxHQeQBAXI9zAIIBAGh0EoiRmTCKUxjUqIYetxQQRoa7rEfpMgGID34d4sEwCpACI5daAiIASyEWinE0Z8gSqwKsKUbl0djKiONMRFprQAggAXjdCAHXe3/fZjvIDCiViDgORkfr30BEXDtZLwwIghCSBAgCLKLQW4eI1tqNLQgAGE8hCaIJPBAwcKi1gl5NWj3vHW9q7Zya+l9/9vg/vf+zZ7o3bZ1d/OBHZP6hTENrcptpXo5bZ2B3hztfPnvjSycu/t6VqfbAZofPLKV2KfHCJOPPv3XTy1/s/urPyo/944tKc6bq/2WausFyaNr51u6/6Pzqp4sfEcBEKoQw4c/1aaynJgFRIKmgZWWCUhaXH1q8rP1UuOMrngh+9ycfnBybe/0tT10ye+KDn7viyRM7i/b+dMxLXZDY4CloTk2zpF3UqAtnLDe7Ml8fOQELq8cm7l+57vVT+rkGiRA++7Hbp2a29gdrjUYDpzwfDPkf5hFpAIhpaqqq2joz060GtrJp0J3WhCmoCg4kMBiGoJUiEOYggjEeew5KAAIDCwjgSOgOMYdBREUaGDSh9F36WDnRNG42XZtOq/M+u5Ce2LLt8ot3Nx554JRwv5mG4cCx7oD0AztEFAkhhDzLjC4AgJ3N8sx5j4ASwFaV1jreM8HjoF5JFU12ZipCBcN+b7mZzzQb6lg2DwCTwx1DUJZZqWy86Z745L/uPHBZ6LTGVZEGXvP9glINGkmhxtpZo7DTalrrH33koaLRCiCMwTt+7nNvPnn8xMmTpwFZtELgENyhQ4fY15ub+fTMeJqqc2fOaE29Xm9mcuquu+4KIZgkVV6J45OnT22/cE+v39+xc+fRE0+3283FxUUiQkXBsVZaE4gY771GUgBBpBz2G40GsQyGPa11CE6b1DIXjWxu7pQxanrzpnPnzuV5Q0TyJB3WFeBolt4YY51TmnKVlmW5d+/F55cWpddlBgxMggLSHwz3771g955dSZKsLK8ePXp0rT+YHB9r5gURZVkxMTEWza6VpqnOhEkSRKyqShultc7zvHI2nzMfv+e9z7n2ul1j+621zltDKk3TLMuI6PTp081Oe9PM5hCCtbbRaCCP2EDxOa+DH9l/j1ZYXO+E4Sit4WdyhVHeAMgoQCPAdXzhqF9FJIBIKibEG7amSinxIY5RjoZA1i1OkUZtto0AIxAQlY6iA5MRV4hdw2lXbEO0BJgdb97+xcc+fHZ860xS28U6b6acvOt/X/2JzzS+/zvHHr3n3150647xnXJyKZPx47Jp33Me+GSti4Xv3l386b1r5YrL86t2Xf0VnQ4ePT539Fzl0yrArsnGli1quASu4Ax0yiSAlnBgoJGaXJQrAgOQMVAjg+mJQGBVC/lz5gPXyq4Tsh2lavLuUzBoYK8D8y34x9tCvbpmLUJTUmy0pFUUZzH70j0Pvvhlz1/up5T0manRyIfdnmSl+OaX7/z39//bn2zfOVGX1dRk23o3OaGI9i8sntm4PqsrC3Onzr7h//Hzd9/zFUWgFAwH5ciXngGBhv3yyiuvuu7Tm3urzWo5b+ySLEDX6rPn5669svMrv/iu5d7Spsn9q/XisQcPHbzh4tc976e+dsenjp0702zbzdt2dpqdn/6hH940MXPJlc8rLTL6uGcaZXwi0X4AtRIRZInzOcLM7EHQyYhGgc+aypXwLPtYGVV3YQPwAQCxLRq7rQIgwgJhPU8FEGFGRK11JEUQYMRWQvypfiO1hZhZAkB8n/WxH3rGpybeZswsgdYL3RKJlRt/KxJPKYDE4R/RmtYthKNlQixoJ7xu2CAxuCPH4R/xRCNoiay7JG10E0dPHCAKRHOIDSoA4jM1c46CDlKwPu4y+jmEcbca68zRFp2IFKEDfvZk6ugsoxubRUVUawRENmRYhKWubR7Ka37+R5uPH/+Bf/pr9Yl/LmvK2pS1tgZtQGm1c6v5jpeXu9v13IOrl7/g625q5YmlcVnZPzZmp7LmZtpdUDi5cP6/HpAnH4fhGmK4waqv+sGDsP/Izp+6b+yVY7zwyu6f3JveVqvmgppd1rMMSOy0L71KWWW52Wvq89zthmHvS0/sdNgokoWTg8Xj3Y5ao4v3nv2t1993z+MLX/jc2ZOPzowl052Ogowqu1KeOmOLQcD9VdJzsFIkm9qXT3fPrg30F7w+DS5hWEHKT5w8dn7h5MT4rDJFecsKAJiv5wwEpBFABIn0YFDaYImo2WldccUFs7PbTs8LIYKwc8F7P0K+rQ+Ox+0hUrSDGoFiAMHoJNLyR37SAOJEAWoxRrDLwQfBlrGvuu1qq9RAOQkQHJukqK1nrtrNMa21c25tbS0Ep7Uu0qwoJheW1rzzSqFS6G1lEg0QRAFCQgCN9pbAmGjfaTUSwNXF5dUentz7CADM2j0NplRsYsL5cxLu+Nj2X/n9Rw4f6Q767U5WTDQ3NyY14Om5s8CRN8J1XSNLkiTOVmi0aEAFd999t06MxyAirEh5QQlTU5O7d+6oK79zx9abbnjOv/zLe6cnJn/yx358eXl5cXFRa31y7vTJuWPa6IsvvviJx55qNBqPP/VkkiRzc3OXXXbZ8vLymXNnkySxVe2cM2SyxMSCU6LThqTWWofiapsoLSIrw5KMJoEk0WmaLi0tHTx4UASPHznqvUdCACYkZCzLEhCdswCstX7owQebrZZ1ttVqkUA5HJoku/Siiy86eLDZbIQQ2s32zPQ0C3bazY3HzxgDAIGZiJxzoCgIJ3kmIqQQiBJKx8YntNHj4+MzE1PMHGBkfxTr27sv2Euo6rpGxCRJqqraWBYRERUhROAoIAuuK54AQAAVIBCtMxWe0Xows0SIIFE81Vi+ExEURIEQRpQDgpFYhiEOZQI8q78FACTAPFo1FWBsAaIAIFMgJvYhRIqvuKATZEm8DMXaP/v06fbEdqgWSCW6H5SGscm5w4/OvuuR7c0tl//kWx753uelt373YtfMq7GPHhx85p5rb5k+sGPXiyePfvB82Ts0c8V3XVvWd/obmxMPz9Q9vcfc/cDKcr11bIwqC9agACjGlJFQ5Uku1gIaYWYtnkIClDnGROqCwVwgIupv3uxu/KK/5j7ePA/M9PEris/fjFbpJNPppqEblErKWnUaK42ZzZ/4r+Ovfs0LV1eBnVLGlK5UYFqN7OjRY//0T//w/Oe+9InHHyjdYjNtN/Ls3Oluo4mJ0hsXrZm3p8azb933zXvuvu8tb/1V70DrRGtNCp31iIp9aBXty+n6Ux98Ym7+ax8R/Qd/fvO850sP7n/k6w+dWzr0S3/8o2vHNj/wma8+efbQV7/2td2bn37Z6374zLGnP3z7e572J7ds3n7R/gONNFOJCVVJIBuTjvCsQ2u90X3A0aZLjRIuYcQR321jy7VxFwEwkdn4yoZ4KobtjW9TcfIbJAgDcMyQXfARayMjJRdpUoKj+3Y9+V7/gQDC65VtYBHayCxFRCkEgWdP5SIiEMYmN627Cm50UiMPcvT4RH0WC4l44Fgxkuh5MpJoxelpJGEUFIGRoRhhAkpEPPC6c50gC0Wp2nrlYOOCCIKwoFZ6vVG9caqkFbCw80REasT0GHFy1rncER4Sn3eJ/jECQmQcWwVESjd0WPLH//kLyUdv33rv1+302IpN2kICIXMNzgYhmRkeflJOn+y+4uVhCs7Mnb9A8otnitae8W0TqRsM+l/+xsmPfAwe/Jo++5AGI3m2iGp19lrzvNc/fsmtE/XJ1535pZ+mvxdf/9749FG6DD33qVMHk/lS+6rZP33xkQ8c3/aS4zte5Gdmx5YeOjj4YrtafXz7i//jP384sCkHOPaVlefv/9yV1z5x5c/d8eBXTz/wb2X5lfEt+Q5o9o9ufVFv7AVscp230uwSYp+iN9uDbfx6z9abyCilROCFL3rxBz54u5CqqsreVNERHc4qwFiZE+FAWvUHA9GSp/mZcyd/5LLvS7ICoFTKMDMHLyyCikjLOoMlKq82BAfRhAEBHI9ykpi0aBFsarsZVhLwIGAxB+AFdcGhY91V7galdu3atWVqR54jOIYUE5M3Go3VXvexxx7r9XpT4xOzs7NTU1Nrg9VTp04dP/60rZ1G7R2K0oBKgVWUp2a2PygNOqjbpEFrQHSn81OZbyRr+aBBanU4fvHsN3//XQBlcfNt4dyhCw9esH/XzuZke3xiquoOtp0++60H72eE8YnxZl4sLy+nqU6LxmDQ61XDTqu5Y8vWBx57ZHxsTAkFQ3tmt4+PT85smpyamhz0y80z+Ynj07Yuk9R0xtrtTmvnrh0gWLSaTy08yp6zLCuK7Morr5yemgnWnT59moiCdalJsiyb3byp0+wcP368211NkiRN06uuvWa6M370+LG+rdI0Jc8iEhCWl5dJYMvs5na73RrrTIxPlmU53u48+OC3qqpi5gDUaDQ2bdoKigSh02rGNxoM+1u2bNm2bVs1LIf9gcmKnTu2AcBgMEySRCnV7nSItLBn5kjFCsHH8hwAMXtginDd6CNECgHReicsla3LskREUKNOFUW9qKLIykFFIeqj4wM8svV8FpaW1ptVzBDpuvjMPnod+xe/gswBiZEw4iqBoyEaigCwCyEo0RsbeUGIjt8bK90onPMoqqtvzzyeWYBAGxOcx+AboH0uuDIM+zd3/vHj37i/n+7vuAFrppAjrAYlqo1TdeJ4uFCt2V1/9O7uv31i+KZfv+LVJ//WNhqHDzwHV/o/+BM3vu2DH+wOuSp0u7354uYTD9U//Bu//43nX9H/wjd3D9wk4hABvHDQqAR1AEOaBCpEVjZECm9IGFWgoIIo66v+Gtcl1VYfv9T8xxX1L33Y3/DY+GdeBl5brGwIw/6a1gRIlSg3b61Jv/F49+ihI2ZsTwhpVdeqSDw7Rj729P1pBg8//DCC77RmB73eoNub2aL3HWwXjemNS3T08GnqyeNPPTm7dfv+Cy49s9DtdNqusiGwQuTAHEQY9k027rrvwwcvOvDFj370yBt/btvOS+aePPQ77/7l7/9fF4qqF04/jf7qtf8MK8uHe/3Vf/ibv33xbS9+/c++8ct3f/mGm67/w9/9A895d+izFISfFSw3VEvMlXUAjDJCsHEI7IOIkI5OIoLf5iIPG2FMRETCsz7lZ5YwWP+irKuiaATHQDXyYQVNZuNWEUGREV8KEGSUT6uISnh2xIoxONKJ4n8okFH+Cev7AIy8vwDP0iSPXgkiChkgGpPIKOkRDqCBYKT6BhaOmisAAFKyzkSKNXbRiIAMsN6Kjk7AI20tI0X41/rJxKQcIgoRAET8xllRvDQCo+w8qt4ISauNkYEo2iIgIgIi4RAUIiCyUJAEkBq6v7Dyqbf87ky/O5Fp2b230T+3R/OwkhVpzndgq9+W0GrRNGsrpzof/bv8VbJvT3Xw0i3jnWTw1Jn+332x/6n38rFvpCZUGkxnvFZ0fPtz7rz1F48dfGGxcPzcf/zG5tUPvfiGE9mkPdIz31++5WMzv/V4dqOzgYNBlPHVwwePv1/L8iXH/+XquU8c33TDE9tefP+lb6ZQmXoN+gBJQyVuENSnH3/1icf+68ILnrrkiuqSv37ksfvvf+Ljp49038pbbxMkKCZ9MQbMrDIffJItcHHhe78x/fpL/LhJAPwHP/AJQw32lYD4G2v8qgbkkZbFWgks0SWTrVKKUCanp3wAVEBEwIGUlohY+XZ4uIx8P9b/k9dLQSAkFAVy2oMKQk1VG6MG0uhW+aKffNhe+rW//PSrb7ryB19+Q6lWULJME5CqvdLgAEAl5rLLLiOiVtGIVgFpIxkbG0vT9PATR6z1AuKcI8US6tbYVpCGYBhrJBSUQkpNkkm+XJyeGWy3a0ur549t2XflA5/+8qm/e9uuG18i23ad/dxHrvu+5+4/cIEG4w0lm/JGo0Eaa+/279+fmWRpaSlNU50mS/Pnj8+dHKyseeuuuvLKLTObGmmOaTLe7iBiHexKt2tQ+yA+1IgogaO7p4gI0uzs7A033vjFb30oOH/dNdeNj49PTUwy8+zsbFWWO3fscN53Op0sy7z3U1MToKhoNgBkvNkp0mxyctITAIBBQsRBXVlXF2mWpikieuHgudVqXXzxwYnJztmzZ3du33Hu/PyOHTs6nc6gKhExLxreupiVRnM9V9UE6HyISxEpFUtG1lqVCAkAYuw8pWkqDLV3EQYrIpqUiCitEJFHqQONhh4QibRE5D0he5fkmfc+6pOtc8ycZRn7ABG4E9j79X05kedA676qG6U2jiUwxGcvZ6OqIK6H0iAMIoERFUYT03XaphfGIECKCGF0Q44WYuQR6O/ZYlRez7ZR0AbRWntXG0nBkAazWPX2TI2dOfX0uz+1NjGzZ8UO8izxNvQwiBLvJbNDHYo6F24Nx1q4Ot/8+185+sZb7/lz/cIJ0+nkFq7d+st/8/w7bz918qFvfvyD759qbMPveOnRpZuOfrKe2Qp7kuH8cmaaPmHJMKmJQ0pikprEE2S+cJG+Kd7ZAUoQV6NzTTQhpdAugCQ4H45t4e//rwVebnDOug3odQba6QScJ0tUqKw4s8Dnzw4v3EZLy5UyZIdoUhd8fvedDwc/DAFAzOL5pQsv0S/+rn1VWDp5bKHTaW6EkX2pmZ9zh4+u/PTP/SQlXkQGg0ojkUJmJjJaJ3UFL3jpd/7R772lRhJv//Fv/+bd7/6TX3r3b17/+tmVFR8qnJytx64+t+vQzOnzR7VTkg8/+5//uf/gRc20+JHXvM6qfLXvEkUhIOF6zW295oGASmsiQFQooLRGwHgHCvCo7yCw8ZJ4MygY3bGxHLpebVWGRuHWrw/VbMTgEAKqUSbhvY03nsRs+FnjPaNordWI0Sgh5t//r+F/FEzj/wkTqjhTENcKAAgiMbA9u+oT3yVhiuvraNmNsyGEmhmRWCSqYZFjVBSHAjE6xlxWBAIwiB8RuBRi7F8DCyPghpMSrHeaR4sYjC5XfN3GuwdhHNXfZQSRRiTAaLKEHAvbo30zIiaoIEt0oOBra9AA6UQ9/tn/KubK6Vfc3Fuq/+rMZ9Ta6l6VnIbmkWa2oLCp8ufU2ct5Yc/EjrOyIovHN33k3/Le+PJXvlp9+n2w/HCilR9rVZhKTffve849z/+lk/tunp57/LXv+/nkvs/8Tqq+nuX/emjsd244Pz8wWVK+5OQfTa7d8WjjKgU0MTjVHJxnxaLHCLRyg90nP7HlxBe+ccWbzk9cXOczAADiEVNFIeXqiLvB3Zc+/C3X2rx360Uv3/Oqau7x7yoHWmQzkAZEIAMgoE2g2WEdckg+/zT/xCXpqePHH3n0oVSn3lV2PPBBb/4gVQjB2bqutdZpYmJLLn7Waab27TswLBkRFRIIIKIoQsb1ogUKPgMvYwCFzyBF8VlKaRHRzHC6ajeFJoqmZ7U4xH8Ir/V2ODu29YW33aATT5wEjUPnAYQ1iw/ee6VpemZqtGN1zMzsU0K1Y/veZtYKIZxfmD89P9ceb6ahEyQfuIpIo0dgbxo5ae0EFhonZ3rbMuDiggPnDn396d/+6UmCy1/98/fefdfmidaDjz2kHF948LIGpzaRPE337t+XJIn3vrJ2ZmYmruDbd+xodzqtZlNCMHkaK+6B2TknIh5C8KwUY/R+0jqOGURHZ/4/yHrvOEuO6l78nFNV3X3j5JnNeaXdVVrlLIIAgQgSORljg21sYz9jY/thg/2e43O237MNNhiDMcFEAUIgkhAIIZTTSqvNOU28Mzd1d1Wd8/ujuu+M/Lt/8IHlzr19u6vqpG/wrlqtrlm9Oo7jnTt2bWpuy9Lcex9FUa/XM8Y0m83Qm82yzDq3efNmMtoLCwGmzrFXSjF7RMxszsxAWK1WScA5x15QK2bO87xeqa5au2bt2rXAMjk5CYROuFqtivPWWlKkkJQm65zkOQJ473UcWWu1NmGLaq3DLhKFwIIq6C0XYRsRhNE6Fw4jpQoHCA+B3QPhJ7M4ZGRmEgJFwSkhtAojbYKv6sA9YpDNiQh4BiypGmVTxYsEL47wWnkeIYpGHYyHQ75ORftRtDKMZQ5e/KFfDrHMKiBokAfH8f9ffkZEDCnLFk2Mqc19X3NcXzt8+qm9/+tLz+YTF9ddx4q2PkO2hno61xa0MmN9SHML2hqf9ZNa9Y82f/d0NvzBL71y5D77K79Buy+D62+5+oFjL/zeHY83Ko1e4sySHxnOsObTXDlJqk3lCFFEa+0xTB2VWKcZPOYsjtGLImKsciSkbCK57dleli3m3nvSSPtWeZLRbTP54YtS161pI1rGRtR0q8uq6ZhQsePk5NnuRcZnOdeMUgha606n8+gjj1fiZKi6+uTZY7e/bfOui/UjP8wP7I3bHTTRzHIgATM2Nj40srB+3SZEZLFEFUH03hljEFTu3WKnO7Vu00W7rj925LmRNUPPPLTnYx/9cnJee6y+Y6E1P1SrOJziyulqY0Pe/dY73/FLj+559NiJM8dOHM27CxPjazoZZZAbJOC6h7QQNQx8XwAAUERFSVpOFijEWgne8oEDLEFqcVDsakSlkFc8eg7UpvCZK4SIhCUsV/FFCFNIDOKcQwEABlIDVjERGVJOnAxmqwL8fGLPYF0NXkqHzjGwLyQnUWkFEET2STB8DoR5kEiGLCAKMRxBDgFAfAl4ZoRwUoMKMThU2MC6NJNg5vI2sgiz5wHUC4QEnMvKpKRsaYKE7S3iy/Z+ee4TishAkZuIio50SZWhsg8vhB5EWDJd/KJEdE95RgDnzzy8Z8sNl+ep/uq37/j+8SfTSq0SjyWqppPEc2/R079h+jQmv7i4VFlvSGH/p19Y+ubd2F1sIp1tNi03tIU9W69/4FW/e3rD7tVHH33tR9+88anvDsVxN1JTEO+DPPGykKpXfHnb1tHoHTs3WUo3zD8ISRU4taAZQStHxOIRIea43k9G47zlUAOA6EQAHBhHSVcPz1U2Fw/y2fKJFneFByBwBQ5EpXlktTw3J8Oj5ouf+ebZs8fHm5M5m8rLIgu96KdxnudRFEVRhCLOBYKZsPN5P5+amrriquvSDBGR2QU2B3JY/xSAL4iC2oR8KHxx0ZMAQCgCd1hv+kzePNBbP2Hah47io/bC73VuWrduy02XqC3jm46e3Ds+ud21RYhQabQZcOZFR0ksIi4YyKMCRS7NAIFIhsfqo2NDSmh9e+P6uemJqYkho7959+PWZlFijFIsPnMZIwHns7WjFy29pDq6cc+d/3X2o39anTtjbnxRZ+flX/mD9/3Ob7y7umaqGccxRYioHXvxWmvHnkHq9Xq32wWASqWS2bw5MszMoCjPbFhSkSqcueI4jiOTpj2tUSmV9q33nowW7xklqVbSXj/rp8LS7fd9A8jo3FrwhFo5CdRDydN+pHQoagvoIKEScuwciEYCgUyYFIl1ufiKSdgzBZNUxCiK8jxnhchCiKR07h0qxZ4JKNJKKeWyHJQiIO88EUVR7L2PlHZlTWmtDQvIWiuCkTaI6JxD5MAER0UkBITgJRDACql34eUmXnmQBa2SsO0HlHBgJiJZVhPAMKgIVB8qZAS8wPNK0sCnKMrSwKpg8d6HgL3SvLw4C4wCB+XhgkUO55nFBz8aH/w8AhAWCn6LrOSNlC9WHEkCJjcqrsa1B7797e9//alHai8isEimqXsdx0K6qepeZ2dsKpZj8WK9oIAyG8yzbxx78H1H3gkb1enZxoc+1Llg1B9r1Vy8sTa+uX/4Tr/Yjs480h+5TXy/4fqOI0sZiu4rYPFKRAOCc95ZsD4XtM5q7zVznqfz6MQ5lTqKclJRc6SqVQUh9rMTs4zdHW19zA7FVUx9irLYY6UqEIFyQuR1VD15boE8A2kiJJMyxvOzixs3nLe0ND939tQb3r5peEL95Yce8Xlcr1f6qa3UK4PbsnHTuvEpGB69cM+TJ1/3OqU1iXilIg/YTftaxToy1npE2H31i7906NENeuy5h5/93hUfv+kXL2ydma1WKk07ssh28axRGQC4NTsuPX5m7qk9eyqV2ite88pVGzedmUkrlUqW9xMDzDQovAq0HoBlDygrH/8gmUMiYBGSYJEqyxoUOBisKkQfUFBQWAAh0UA4I5CXLDAiBnAyEmmtGcF6F7qsIt5DAA+HI2Fw/iKUFaeUk46iXYRChCAUgpMvS8wBApHACZIHVqEY9QVQUKTwnw+nrVYoJRIaRdwKfBpw0UMGAEVYrO3CxBA1FmWqBywmw8WsV6M8r1+/Mtkl4YIVzTywhAr/3WhFgIGnx8wKKVI6pN0+/LkiLA0NHQDlDKAjpU3OUaMyt/8o9/ocRZ+742t3HzhcGdmwJvImwVRlxB6FjVGA9Gg6cSha+sXuEpJGHI05VyPjS33Lvey5y2++9yXvn1l7wfpDP37dv75hzdN3J8TAmOcq1nZ7Hh2sR+uG+WQ7IuK+tV1vTZyxQ/ZQ9ejjvudIZYnR5LDnNHcqDQASNAQQ9ebypOlNHdgb7hvJx058a6yS6WpNCx3CS9qja1OuE1gAZtBhqgCARpGAWBat8PTs0n/8x78ntZoX8Yz2upQOKDiNxsR5bp3Lgx+dsGPh0dHxzmKnVqvVG8Mnz+Wmqm2eGWPCSUslrLAwu2RXrDEuOj2hxaeC4sPAVmQxNX/x7AvE16v1VRXHGtSmbfGVl18mbnGq0SQvoNh4Yg+kyXjjyqF9AWS1BQGZVMj8mIh87pMkWbtq9fD48PF9j0+fXZjcsn6pO505VU2aXqNJpDm0ZqZ2pvkA3P8/X8NPPjc+RCnAxT/7F489fXR1LNe+4PrMVKxLhakvXLPSR9aESCgizrlQ+Xlh0Mp7L5o8c0TECBbF51msY6XAMwt7ZrCWnTAQehBjTG4dIoYOQ4lpUs4xkOjIMAiDKKW0IkbQSCgAnpkhMZFzDsNHK0MayYsAREnMzBrQSuDsonOMSgVlDK21BQCF4tg5BiREZaJInEcQ8KyI2DrSChFJyPsC2GJQ+VIZIGAoSJFSBliYoYTesNYqd560UkoxMbCgVuiEy1gZsCqEZEghCCFqHTG7AjHvHCIWhhOEpEhr7XK73CfR2jmG0uYFyyMMsRwpESIoEV92Wgop+2D5IstHCYYJdHC8GLSsQ1aPPhRQoVcZFq8MypL/dgA5z6DY9tPKaGTblQ//1yemf/DNpfU/r/V402V709z10vVJI1X93CsEiaxXrFLb5UizS8W5P9h614l89LPT1zEqGs6RayfdJIyboWoK3srErkY20zt718iWF4BEuVE96lW91kxWS+5cFZRWyhrF4rX1oh3nuQVvwUeRaqooSRIHyHnixVmf9SVjblMX8MyE37Bf8qsiRskyTuJcDIvqtvvYT5NmXeu8Y3NFRqjjnAKXVYdqDz300ImTB4dq+oW3DKNuffhvjtQr66pNlfWXbn/DlS+6+YWDm5OnmlCfnn3uxKl7HnvspouuuHKx1Q+bNIoiAXLOVeq1c2c7rhKvXrVu37NHr/7Fm17+q7uP7T2VjI9Gw7TIqcWWdIeffPB7wyOrP/7hP1voz5+3bdveZ555w5tej4aVtypPwGjrWwSVQXD1wgFMR4jiPAZbAi7Q8uI8M6OERR5GujgA8YVqDQAEmFBJwP+VJFsW4WAJXGCYAitJEFCY2fnUWtAqRMGy7EMszLvYgwgJFR6qBIP0FILZiZCIL22IJFRLCgdhOySpodEHRkko6EuFLxEhZlHE3guLYw8sXPiKkvKMGDA4gIXGBhY9ZQy6ICCegVkRIaB3jpQJH84l0UBEYm0AgPF5u0YpQ1J0qkIdhjrQBooLc4V5GYY6gZd/CIfLQ0VBwUZbj0JdA7mBeNEmqnb8oaeyfv9z93zpyyceXbtuHUsHSWVqRFzUpl7MaK3Vikb7rU5c/zi10OdRRWzVZD3/4NVv+MmLfnNhctuWZ79303/9xobjT0SSLXlE0an3JBDFuI51BWF9Mz/R1h6yjjTmWG23cRrHfclZIib0IoQpERIyWVXrtkRYuQwicHHT5EteV4CU2Cy3nWF7zk8fPnbsaK9r1Wv/MuW6wp6IElFhqgYIiMoLIkpEqAiaceXg0b0gIyAGVG6vS6MHEmZwLhcE0sYLsHNRFLk8ZWZr/a233gpCXkDyvBJp61xRD7BHDPIYKjDWEFdQA6QwDwm+bYMjVA8P13Zun8jmahUVb9zaeMFVl+5Yvf7gmYVVk/muCyfnpjtxFKdoIUIizc4GxTeFFPy3iZQPPQ3HHhRahhgw0tqRVT4Zgj0//un8f3y1+Uu/bRoTzYrpKYkWZ1vnZg4/d499cRZ/5N+Hn66o4alzS8cm3/A78SWX5F/5zC++9CqOvO/2IkicFkEnisgzW4daGaXD2JKI2LMm8MxoRSNaFkSMFCGhBYcCPs214igkIDZmn7IX63yBzkViZ4VEQMBbAu8FAUCTUpqKyQoIEgYCByjIvSNF1lqFSAJggYwJAEtEYgWG0YP4IH3HXmEoKplElFJeA7PHgi8PXAJRCvcU7wFA2BORX2HvOMh5vTAoQmECYJJAEgQm8QyeCVG8K1CeAqENoIBEAHyhhCfOe2ZQKldOYXHEBGdTrY0HdM6JsOPg5oTBrwYRAZiCnh4QIlpg7cijhAIYAFBASZkhASoyIgLAhpQHGXQpFSAw+zTXWhOAE1ZKMUMBuwZBBBGGclYcJnBYgloFGBGd8wYlRxqZqB/cd/hTH/mYas28872/+CvfWbvYbbf62Rt39W/dver3vnLc+mZV5S4XwlpPlmoqTq3PBXZXTr9h5OH3HH9XW1CRR/AaxZtMAfT72phKDFP+kvfaH72fD3w5u+TX1dIMVb32NeSMbIRRz6GpovF2KWWTKAXsqkoJJt5LxNjPsl6aS7ePEQE6QVsZGvKWuF9RxzbgtjONqOd1QyqT0Gl307lGhNdvGn/h5bt3rGMLtXUbh3ptiVkk8t2sOYGwZkh1u6dHR0dXr6t8/pNnk2TI+kWXm3f/5oVpuvh3f/2lQQCemtxw8uTJStJQcY/cJ/7+yisXLXvjFJAGw04sZwBmYe5cRZnrbrz9kYnvvfhXLjt69DCMj1ZUP++YaCOcO7h4z78/d+mF2+984PuTtY2ve/W7k1VrNozdef01LzjVyiBWWoCAxcdaIQcNJkQFy21iBGIAAfRSQEAZgUmQmYg8gGc/CCQEKMIBpS8iRKJRIyCzY2BFugCvMIaWDwmgUR4YQICQEZiFPDtwUWyEhdkDBwEPUUSktQcSTYBomb2IQlECCODQF0W1EKIwAbEwM3hgM8jOCxlaIsl8rpRmZuHgpCseipGQ1grYD05YEUQgRiZCEfCeQST4JYP3SkhYMExhESnSZdMYnMuJabBBmEEEXAH2LmbOjEJEqCR3XgEpQDK6iKyh0y1YRHABCHLBiICkBMhoxAJcVqS4zITojYoIMcu4EWepHHr8yUf3PnRvvzNZX0c2hkrNRRHlvg8d4kjYp5BdOZSdv+mchuhHWfIM6hlfOX7V2++64ReWRtZuf/yO13/mZyaPPjfr+garuUVEcVpAKFNArEbVUrWLa+v5PSdrGtD3cKnT8WNDPu15VXWEHkgZFJv3M1FxRBGatD2xeGC6sd3YnjU11mMkjgGdbiSnHz/w9BNtWk3Vi6oXXNkbvTTqzfp+S0bWFwKcAUEnoFHqRpDgutXw1bu+tOUlt5749v0sPdqgZaenv4ssOIUSY+SZWAA1pz5PKO4L99OZF77kZo4x1hYi1XdOFQu+KHDFIyISautzKl+hX4KOAyYwpHQFCnpqbOh3b7912J2HRrMl23MZZwvTpzetG8kycMLOWVGIgnlmCVAcEIla2WAUQERLDNooFCYUxwZUrDSnfOS+h8aO3J/+4R6Oaiea0HeAC21yrVM3IABMntviosQunaquPn/qHT8z38o3nJqPx7ukta5om3MfXVUw18oQ5t5BqaMWCq9y5DlYll6ESRQgKqWdy1VkAAqdMA9CRChMZWOMiEyUABSAXu8YIw0A3kuoXEVEvA84KCn8laGsRHXIOoOqInCJLEJQAqhUARv27L0HEQcSlV6BoSOkkUiWM/HyJxSPh6R8eCv8JjWqcr+IiICiQWc2FJdYAmEGdYDWKkgAlOdIAG4EIAhj8BUPyBcQRGWM4QJoXaA5wqfpyKBjh8EELfCyUUCc8+GXUvE1hAiaFCF5b5fjbukHF27p4GqLmlgKxV1g/9++uhDfCO1xERHQRmcur9eHtJGvfP6O733nrjVDE7/9v/7HPc/ggVPtW65pvnKb31Ad3bJ+6LJVeO8xATIQ237KibKLYAmkkpsPrPvc4Wzyv05fN1Sv2l5OjhyKAyb2gsQV0yU1NPN4fMMfL/zgd8c3v6Cttuos7auONlEt4kwQUVLJ+5mrOBRQLNTnqsuWjOaco6hCBnPdHPZa56pX96N5mnpDkZPuoTXyyh+hmewt9jGf3zYV33jTtqsvXT0x1Eg7rqXAZbgwnS5tbxtGboNqz/LqsUozHm5sYV4crl/1O7/9nt/5wK+PT46+4xcnDx/Z983PZ0PN0UEAZv/MYruzZvXVaRrfdecP3vTmh6669uq5Vkdh3O2nUUTIpIBaS2dMdfjS3RfkNxxfPNOqueZoI2lsrFAUH5058KU/ePiiaNuoZBvWb3zL635heNfmtPHE+172e0ml2WrnFdIA3SzjSEeDQSMEqJR1IsIFwApDYF6erhaIpv+uMyUicRQx80C+eLAGBtM7IsISCYwlDjEsf2NMoAMVjevw7aCQMHTFnfeiVMlQAhUETYXBM0UKoLhUKobTAABCZU/o+XuTBAqgImIIn0FGJhghAwIRsQ5ZdcGVXwmZHvwu7wYmp6C1RtRECokcOyiRaGEGRCTOOa0Ulx2t5c4BMxba2FyeP+F4sbJiaoPLICCPqIsRwIr3A4BVkDDlzA5xNI5PPfzY/Xd+6xHfiuoj1vWQtNHa2gwyBSQM3dGEP3jZwfOGMwBA4ctN8r7G8Eff/R+mcdUlT3318o/8Te3MnmELogmhmlqCxFvXa+TcYSBOG8noEKoKdNbV85n2EGqx/exsp6N4BCFR3jtBj8JWovLpsPWRMptPfb113nsxnY98z6uIfC5su/X1vOpSfMH6mjA3JrLqRKVzavWZJ2ZWXyB2sYcVjhoKPBEZgpFYtJZhQy87j9/0/v/TjWvX/vrP/viv/g6vqwNAfk+/Kk0L3EULwEqMZlWPEwST5/n6NevWrds0v5ARKvaQRBXxYThIxaMp9WQCXGnwfElrrRUgIiEQYjmd0SLUWlImhsz162Qksnf96LmZU89csPPqzPnAWkEZoOlAmWXG4aA3KMCAyOzCQY4CXpzSujU923vkie0bLm+dm0l8W9KsxjXWsYrWH97RBzi5+kBf6dacru3+w48oSJyec/2ZyvatppG0TncSlQxp7bJcEQmJDoP3MEFRiogCnplBQCQ4gEqJxbV5jsJRNen1U+c9gxBRlmW9TteQyiUPqocSOkUIxkTBG5RIswtSX8jsENFbF0YmQQFAEcVxTAJF2837cPeLQIUldacsBQKJT2tCRYVsRaAAInrmcEaEKlMKU0kYBCERQS47UQNWRtmbEpEAkaeVUnnP31ch71Xl57tiYCsoAenEufMAQEYTomOnWSlERSqsHhFBpUABK1VUwgLI4lGUUEBeBtoElFiS4DVtbVY0XkqkZbDpcMJCEpALIoIUOg2hmyeolXDh80pl1RvOEe99HMd5nvd6vWazOT+z9MlP/euZU2de+pIXv/xlLx2q6Y/881PKNH/xonhkcmrv3mPjI41uKj3jJnS1l/sRSNN+NdFd9uri2p7bhh79lSPvUXFitAbviCoMngg0kxWxDmo737Pw+F+en9j0gpsXnviiesEfqiWlISIVpb5d1cMCtk8Zo9aK+5Y9+ApYGysHukpgnfRVovoZeKlA3qJTFY5Q+1xJ4+SuxdGvz/LZa7euev1123dsHrGSLHbx1OmeUT1xUb0WpX1OF1HXGx6sjlCh2XvgXKXWqFf1qtUXnjreQoKbX6U67fg7X4zHx4ec7w525eJiUkmaJ0/v0waTKn/6s5+79rqr2XLGvaQae6cAIFbQWsxgurdn7N6J9RXOZO21WxfOHf30b96/NC2L5w5f0V47vqn6/eMn3/OBPxyNG9FWm6wb3jl0bbudRRQJa48dodi63GAEAyowIRECFqwjZBJCED8YJEDpOT8Y9xYLWxUiaPj/Q9Qjoi/3LyEEgFZYiAopVMZChEgcCHISZqsiFBqCGASKwDsRKRxKVChniRAYimga+AUCQdKNmDCM8hAHCGQPQDFFzA5UsSyRQ2YujOGADKdx2KniBfSKBFpKgLdSihEYiwGwEAohIwgAahV0C8MdCIyGIrVfTkQQQAoCQpnHYIkkX7npBn/inHMlJMUJh140lGluOHYsOy1GCCXv/+gjH3967sT82qnY5hqH4wrmPkr7adVkNlXVCv7RNYe21PKaySlpfq3x3s/Ubuksvfe83r07P/6hm6f3n0wXDTsGgljFrBTOZ6l4hHR0e37ZCz1kjYfuGRG3Aer1SA4tRXGfMsye7S9d4SwkemwxXTRh0OY8oTYaBLxnTVSxc1fu+fDeda+YH9+lfcbCzblnhpaOnlr7Aq0TRmJT1Xlb2d788JbVJ37cmri439ysOG8mpJXUtSiCHaP2tRfpM888cOCh5zIj509uuObtb/vB5Z+lA2p8aawLXXA+iSpOWzReoWZIgKWi+abrrt2y9bzjs1mMRrFLu2kUhw4lAwAUAgcQ8qOQMiJiAZQJOs3hVFcEiERKt5Y6R45PV0bW9F3n8VOnH3r4EPf6n/7nt0FSm13oeOEojiR3SmvLXhktIhjqMAABLtVVJSGdeyYBUAQKLftmPTly+KQ9O51POYJM5XEXvFNsqBIpNb32ZG1aNX3lVGdm14f+X3/1hWNRJ7IEkMUbN2Q9AC9WOWQQRVrQr1jBIQDDAL4RiLAAFCR4WJDAqEhr6vf7kY7YCRF1l9qXXnzJ+PhogCFAMM9RUQiWYed7ZqUKfQDvPRCZsl4OyItC2gmWufBO2KBazkUGSb3n4H9RxEUoZjnh+YSzoARqDCrDkPPqEJ5DJQElTCPsq4AiCQHMlfNmIWLhUFIEtmXRoi8D+SBhAgBRgQmJyMJF8gxGBBWhIHsO25ZLrZZiw3tGBtQ4qGcY/ADbWfR3gti4BKzrsiV7iQxEVagxFPdkoJOFSN7bcM20wl0k3OQASo+iyFqrlKrX6/v37/+3D38UYv+mt7zpkot3N8bwjq8+tzetbRrTo8NTszP9WlVZNVttkD4O/aFOimzZqHhe5RXW9ncnvnQwXf1f3RsMiLUWCDVpE1p3hASY2owiU735j/c/fQec3BtBJzn8IK+5HEwOidb9qjHK93PlolRjmndiT058u9dTBDHpLM10HDWxgqhcNQZoJWpVSqIRWPXTpyYA4Fd+Q99avzrt947P9DSKJmtqZPVwgxIN/fmF/PjBsxdfFaUtzl1OUFvqdKemkiy13//h90+dnLnm6u0bN/DnP3F2fLLufM+56uApDw03TIyCabslQ8ND3/jmnZf+2+Xv+aV3nD0naT+lZIlsLBYWFhYPzz1y0bZKNRsb3pl84+Pfv+PvHuSe+Hzx2mtvWXfJ1r/99Ic/9DcfvmDNptOnexCdGPNbfbdqOWXjAA2qRHFktGePAEqAiUiVcPcy3HIYu+EgsiKEVuygsizuOVKBvS/Nfwbxpli3/w3NF5raRYtq+aPCFsBlUa2SRMusSu02KWpEDwAKB4WLACGzIEMQNx+UqkVNWZjeo8+9FyZmUFSwB1EJB0TZisinlqtPKWETgzvDzICiNGnQyz9KJGwHUIi+QE5BWQeHPv9yMA6nEGKARSIisgQzAimFa1Ag9y58ZgjAWuvM2YDUGRwpxS91zEZhZpuj1ZM/efxbd941O1XVnpCsMj73SSpLWoPLVLMa37SmtbmaDpl8ZuiSX5u6DwAuav9zJ3/2za0/fnSouXh2VS1SNl3ICbm75EV14lF93Qvg+lfhpS8YG2t2nvhR/8GHK9HcxfXTAHCkNzQbQaS6c/P6yPGFizevPxdl5JwmUEjM3rEgCiJa4FiUuOlL9/8HHWz04rpyC1bggSv/IOrN5rVJAEBvmUxWmaDuTBolFz725/e+5D9vGV9494snZxdtN5VGnKGYtZP0rY/9OAM/Ob7h+1/+6q2/8e7kNc3sm+3mxMjskcVEx5VE9bJMrBIQhlTXajNnjm3avEEUWgdxTGzZmKgEWyGDrORMki76gkQacfkRe2ZmRxQiKepz062//O6na3xvxEPa8FBTv/sNV1LDnD3V0pVYoYayuoqNZpBBG6TM9MK0g733KEETFYghY44NnD58rOKMn++aWBTpGlYiVuz8Qjrfvmho7Gx6eOng1vf+df2VL9ezs/PnsmQobrUWJtZuTftc8ShOnGblVUZOE7LIIHFzzoXTnJkJCUihALMbLHp03Ov3TKRDVAtz6/GxsdmZmQsv3NXPszRNk6QKngOtzoeeNgXYVAG7MNrYLA3B3uZWStKOZW9IgaJwXmBJZwxJUBCmGYAni8PIMylFZcwOMTJYmYRrZmZxnpE9oAc0Ax4tlFNdEfAsZVYLYXIlIrTMKpMSdkHyvFMscP60Li0QAAjAsgWAKIp4hdIyEoj4Qb4cmnuIofRACiRGQkACCQiS8oZjIVfLwTbO+xC/B6lAsdWFTKm9F65WqQKzAOVNGxypIR7neR66HY1qDQC+c/e3P/vZz16w87y3/+K7psYme10LafvLD843J9dz71RLxjrOekOVOEk7tG407sk8WFORc303rpS7Inri1uYjv3D0fQwVJ1YxcoTIXjIG1h5QNAJnaSoM7colbwOpdp77q2E82VE3TR87R7V4cuOqKKPFvJ0ZrmTgxWWZreU21sKgFzu9ofGhjKmVYxSjhy7Fq+MMDWUGoqUl+Z1XX/o3Pu5sPDv7ZJcE6lFDNImP2EsS6Yee/uld950+8Gzv5hsmv3LT9iXMvXEpwdmzZ5e6x9esGXrisX1XXjMxNjKcJNPrNmenj9dq1aEt29cPVsXR40+JrSql4grUqs1NG5MjR/btf/ZkfbRaqzX7/Wa1lnc7pxfOPLv2mjW2ayvbss997L67/ubRNaOTad0B1p/Z88ShE/saZqjb7i0KS3NRN9Nxe1G1odtIohjYKW+896wZSBc90CDrP7DhVEoIMUSjwQZBAK2ozK7CE0dBZqGyrl1Z+4Z0n0gH2Y6Vf0UlNj7EkpCcFfO254t1hP/pXWkAh+TDBmdkZFYCAApBQMLIlIUVFF6EEK6OWQTCzEiIwnL13rP4SGlRAY0cIN3l3hQQBCqHZYPNOLhCEQmpx2DnlhERADjkLd57sA6MVkp5EYLl0ZIsI28RkdSA71Ww/4vNSOVRg4ha6yiK8nz5kBzcUgYhBvDg0GsFT3/vJ0/4lKNhXGpjpVqtYcoxco65U5Iozq+eaCOgqtbPg/3XpHc/EN/yUPUVx7PP/NOmP5oY7p1YdfDGYw/Vjz6m8hatvUZfcwve9MKhVVtdFFc57y62cx+TY5be1mYE0N1wcnGs1Z2r6MeMevjMyQ1TQ6ri8gxQMEli59gLg5dAxQTrGAAjFdkedJe8gXMTV3BA/rnMpItRupjVJ71ORJn20I72+usZtY5dd8lN1vKuEkAGsLJI//rvHzVVba019ep37vj3/B+yXf4Fz+79KVQNVXW32ybSmrSIeOXytFuN9Stec9tix6EC63MFIOAL40kp+w3locgF4p28L/A0A6wMSiF3LyC6WjNrVq0xMh4xDg2PSZ7u2DqVp4Ziw8yxiZxzTjhS2lpLRITIUrRAQ+0SIrEDV4yBHEdhyOKgdewk4YKRDR3IWaUOkBUqYIXx9JqF4b121ZW3117z8wv7jwwliWkm7YV82+WXVLdtXWrZmlYIynj2WjvOxEJYRmH1rEwYmTlQaJYbuQKMEBkDIq3WvFaxMbVuv4+kp6amnGMQNCaWICnDjFDQHlCR9z6JYuasVGqlwXqlku0Qgx6wdwYpdtEpBVaDAFluvOD0OUhgKeTdAUIlxWGhEERhoQkVZjahKB/ANQGlnMg6WW4xIaKUYgVSNLEJEBkL0vfgOIMVL0YILXceTMTLBhwzMAJqpZQipYKRS2gAsGMQAgVBgd+DGKUGtx0LorBY55x3vqQgSzk7BwBEYMDBoTBoXCtNUOob4IDDzhzAtApxbLR+4sT0Jz/5ycNHDr3q1a985e23gkraS73x4cb+Z/c9MOPWboI0jTYPNboAdz87fbx14qET+fAQJVmdpP+377zw1/7v00vV5AOTX96frfly+3rTz1ysSJAJmV3R0UckQKVUVVTXKz9zanjny9LuwZa/dFPr0Pt/7wLfz373k08tVlY3K1q63X4fwcZa0SJ0X3795U/tPcmilVLAijGJvGWjoe+UEnSxMj0r0dmlyrr+eUeTZ6BGOXrPDFa0z2KtXY+vvuiKG6+UA8c7H/vEffc+cvya89YttZpzra5kenR0pDnWveyCl8TmlK7u+cG30vO37zh84IB15tln5gZP1jldqRgltajaSXvnzkyfcVdc0sv6s0dm643RdWtXf/POz77vvb+5buO6//mP723xGYLKd//9odG165LJ5vQzzzQb4yhgOyQ223LNkU7rwihpaFL1ZGIx62U2TZKE2ApLoiMbBKaBB60OLi2BhYuGHDOHsDqYzsJyHxVWZGbFYGbwQ8ot7gEK1SdYFvaTQko3jgY65FKQ18M4A/D5UlseJOShAICoNJV2TLKcpyJiGGUBYUhhRQQch8FTETUVhrJVSWGkyz74/vnBrysqIQEQLsfhVIhpURFo1YqBGqxIOABQhAr37qCmTkVx78EzS+Cf04C/hEIrJDMH29kNwnyZ/YT7bOKIA7F1BZcJWEgryZ2uaO707nnske7YSC1HRRzHdeTESeZ9DC7auFmmJl0EbQe8r3rDi/Kvv+3Ye99hF4+MrvufamZz/9HZsdsOv/w1e1UMAEMzR9YOx2u2rGm0Z1QPGn4+tTM9bGIUS6wrqR0fUpmFDe0zqzfXH5jOHsuWTi6pR6Znb9o+6RLvM8fMpBWVjQ0EAEUVItRKDMYpZeJzjDBIXYNUXJcUOrEeYg+o0Bxce6v2C188Mp/a/LYLxqZGGu18adNE4zMf/9ixYydGR6ayfjfGOH8RAGRDx9bf8oErH/r4p3oz57A2zGC8WIxUHaOZ1txtt7zssqtvPHB6KYmrWd5HVMwu8MJ5hZt1CLYFFl2W5ymB0S4rngsAaKMVuzxPq9WGcoszt730wmuv23TiRKpiFZPO+ykRRUmcOSvCpWA5kwymyBQE5zDS4BCKrimSRgaoGCO66pp5nvbqPGSMoMqdRJXh2tyaU+ueWrvuA3/Xa814M3yme3a0siVbenZiZCiPSYG0IQVdqYnui8SCmbOFLzcXqi4IaIVJwK7oQdFAZwQR2API6Oholro89/Vm83Of/+LNr7iNNBGRMlo8O+EiNxGx3iEXgAWNJCgBAxwQazqOEJGl+MeV85UVLQEImo5Qas0P3sNKPIjzToEqH0A48kHEI0sIrxiSXBDPrLUOirclgKJIAix7Cc2NUDIAIBWqGlKC1JYj2Yp5asgSvDAiDq4wfLghhcWwDaH0Cl15DjIhhVG/CHpkZCNQpgTBkVdCp5DFO2+VVoUQPLNjpjLPgODmG44QwBV9cmTxxKjK7KHoEwACQBzH99//yL9+9F+SJPnlX/7lSy65JPPO9yygalTh0989i9VVztpurr/61Pxzp1ufu3dh9fjwZHOK1Fw7kfnTnYqe+sNfiT/9r5+4ufbYe07/ntF1SVhl4k1HMbInRlFEhtAyi9F9oxz0arXk3KIb2fzmP33rhre+dXdGonx88yWTL/ngvUemJybrta50CebbYAxE37j7KYqgUktmuh7QVSs2d8YjqKpC7y23YxlZNdL5wg/273rHtlOj+1NLlahaQQvGUdLIbO4hn+1wtRVvW1/5i9+9ffbcdLYt5wTrpjY2Hs/9JN5xwQ4anlk4N9FaGKqOwPTZ9Pxd1Yfv6zSHaoPntXbt2sm1tto8t3ZDY9OW6rfvkvvvfe5Nb5R6bWxu5thC66Fjh2fSfnby7MzXvnjHTbdf86PPPhvR6HnrLxufaq4Z2XJw78OLrbSB3Y0vnhy9XLcef4ZWmbH6RkJyFuKoKplnBMeWLAspQY/FhlxW/wZBJyDsJZgSBcqZ50BRHRxTEGi4hZtW2FDLUXmQ2gIEJVReqQblvS8WklZhOxtjvPfAg4CqgqxVaBRprZm4UJPm4t8liOaXHSYM5oQQmMOh3HQrN7KI2NC7BiSFipR3hS7sYOsN3gkQuA5QZgYFfjm8IXgGS7kBg0oDlN4PiMTgBzvUOadMWQ+U/psEWNgdP0+Pvfj28gaGoVHA6LL3nsoe9eBqC+cV1IgSRdHJfUfuPXSgaip51qvEEcTk8o6mqlAvx3ius/jCF9f8KepCbSMfPKx2bjb7IRLy+xoob/YfPoQH/33xzZd/+4cbsHNk4qLZq19z8CTnMgEAQ9gYw5GxWI8lZqQ2NpodGhvOzy6qE2OVyrUbs4dm4wOtNq06eDzdMC6jwwY9Z87GsUGtFLP3jhShSCk3T4xic2/mj/r1QrbndaVTGVc+86ZOwgZc4ttnRy8He6wJ/p5jh7TK3rZ7q4HMZfyFL30Cpdpj50k5Ar6+j/vpgb/59K4bb7z5t371wMOP7bvrnpRTYV3t+8Woj9698fVvyQCElet2K7VEPHqfB54RkPIhwyozHo1GCktsUIEvioOUrGg3AoDu9PyQS5q0KDJaiWove9numQUVI9vQ5iVSWmfeIZFOtHbCxRG9fDSHUQR5DvJoihQzeEKvYGzNBFuswaSzx7t2LgMzDLEXzmp6aZ1s3/K6Xlu5NDfaGqg1FB08c+KGW3fnOassr5H0QUBjzIrTNmkTVmpYuKG5FGvj2SkoNqFzxZyGiKx3SMgsSsQYg4py56fWren2eohAuihTkySBpYImEcJZFEXW5rGKStw/DYJ6GMaEs2OAUQpSFYNiN4xboKxEi6wfMHyj+OLuM7MBo7RmBCm+HZRShAGdBADgg/cpLmf9EP5WWBAYi5Ml7ECAokMwCL2DlAAgzOqxUP0Lp1gQ9IFCwCio4oFIKKYH8T7IWBKiE1ZBGMKxiCALAhqkAJAREfEMCIo0lvNuo3HAREatAjzNQ5HBBLonaVUi8tl7zx7KOXHxiqKk3+9++ctf/ta3vrXrgp3vfOc7x8ZGlpZaGmIFgkbNTh//3qF4dGw89UvVav3/fvX0ptHsjg/s/MJ3j33zWTs6VGfpgG786JnDr7h049deeO+hmQ2fOLrT1BdAUaVOZEl7hWK6kgNxrLXNMAcTI4xH8cLp9Io105/725eOTm48PtNSjA781Pjmn/5j7RXv/fyzixsqtV6KtaQf6cSNVOoZpRn6WCoOrUdnAKtQSYUy0xt24/M6rziD1fHpx0Zmbvvuhokq98VmkAp64arBkajCcZNhYX4GUtVSYk7P51PDkeT89rfd/uBDj9z59R814njbeeefPjI0urojurVuYuT0hr5IfzkAb98/MhYvzft9T50YHW288XWvWJy+AUlMhDu27/qTP/nHz/7nV8bGx/u9/DuffPKHX3q43cJmvVYfjXpRtPP8y5pT6+696zMLrezXfv6WhgzJtuPe2BF7ixCKjYAcKMoAQGv2pAZ6igKIqJEUBoGJMPElAA5mHiXQBxwVtWxxehSgCAzDHR+CXjmw1FoDaGY3yHCL5c0SR5GU415ewZVHVLCcEAtyAaQI2i9BtHXQKA5OHggoAhyIxgBQ9g8BAFGt8HIFREUK2XnPToAUkrWWmY0xoGhg51VUOaHFXP7SMvQWbYDcu0HraEW8JE9Q8kRLvLSIcy6w5wnJi4grQBVIwP55A+agxYFKmwLLtox6K0hT3gfJLY2l7IlSWmtmQIJaVX3xJ49ML3RGGkmVFFSqFtBwDZiBQEXR0WONB+9p73zLmiNzu290n5jG8ZrxAGiV9iQWa3e4N8/Xts1vOv3SO/9u2/wn23u/Pvn7f3XMDJ/Oo/nFpRkz/lS/mZlR+IMf1lund7jHp3qPnVu7d+upoxsrC1ubuMf50+3Fxw/l1+7enBA4xwYBmQkAnNdGxZ4ycMTgnSz1+1nfxfPP6k0nXWMNYeSTBiiD4ivZgiGqddowwU7S3LUorn/niF1fnb39xlX7n3v6iaeOXHLJ1sf3HmxGNcn6vZv8+iMbe/Xu/vsePvHUvktufemrPvj+E0893T15stvt5g5v2rXz+utuml9gQ3Gi0DqnKDFKY6lqHAQ4PLMq0qbA/ggMNB+m/oiAWqGiILsvIlppAMqHh3zN6ze8ZseGtcNnTvaSKiqvGAEUevEoxY7xhAzBgQ6wrKMFhQWhwEYZBOXFK2bJuL7zAjFRmp5Nosain4trzTQTSdoLekYUrB69MT02HderHp0ovdjvbxgZHt9xUW+hL7HqojZEIMS+j0kFQraLmIdkUGtAYhYmEg1KKxEZiJGEeECAyMQKc5cpVem0W/32UiCyO+eSJLLMzuVh2wTPAGRhcMDixIXJqFKkSC1TaFwRb9I0LQSfnQv3wZBi7zPnoyiKTCETT0guwClRitrds0ZiLMbYUIRADDuUkX2g5hHCwHpICgloRPQKlaiydQYSRHxETJis8QCVVmQDIboiQgBX+1KyRweg2eBQIwIvSGQAPEPY3lSk5QKKjBeHwfQNCEALeiQBYLZUdJ5DGibeM6ESAO8CO1nQKELFzjvnUCsVmXDUsnfhsSqtGQkEdWzEee+cACBhUq105uf+5V/+5cCBA29885te+tJbQLDbyeKo7kEW8s628bHvfGvuMMimarvRs/MdvnJL/bdfVDOsd22Lvv5UB53rOzXUUP/03dlHv3/Xdy/+8dIL/vPPLtyVtdv/9JVzbqRiEsasB5hQFFEvMzW9qmpPzSo1lp893X/7JdHf/fmrumrqyMlerWZy9BWgM2lrJBn5+j+84bqf/8y07AY+oQQhp3m1BEZVsMnQRk1aJeIwxTxGU8PhHNMqIGiTRHpuz5be6zq/f/fX5PSqmZY723HU5loz3jSkkgZetHnkxiuHN1TWnumm02daqybXzM9n69dteN9v/+wXvjC9eDaePpEawuPP0tTq9ds2+be/u7nYXhgE4LPTs2vG1l510XjUsPuOyOPP1l98/QX3fPfrm7adv5T3b3nNax564CcHDpxpjA3VSXf7SWPCpd3Ouen5DZsai/PTazevGWmMX/jqkStft+XImfYUrd8gL63j9sx3SLEggoABAB9Ad8B+uf7zII69FOJKroiOwgLAVEh5F5ZEUACFBrriTpiCBEZo6SiNQemCrQqDWS6Ic4QUzLziKCoDpiCitRYRxC9H6xCXBmcCEgU0V+jeiRfvWQuAAtJKGNgPWHbIgdzIwgwoAIo8s3cZloKOAhDk5AJIgssB0wDWFERCPIjWOlwGO0+kQuVtFYYhcTEOLIcyIp4INSIFSSzmILZFtjD0BGApkW4gEMZ9VHYOgAUQWWy4vQX+B0S8C1rFRjRoct4SobM+czZJEmY2wD6qdHN44MH9WlAj51HU1IkHv+ilqslwYiC94FKd97JnjwzfMjY/nU2y6s54s9SmM9WIa9nXKj87T2sm8rkntp134cbrrlw4tLjvsaU9j6266cVjC3NaH1rwU23Q4N3c5z7Xnljjr7n43lW/3H3PxF0AJpurnHx67ZHHu+cOtlqH9p/rbRuNgLwBrGkjrHKCCrM1xjoC0E44z5mti4zavucT+658H2YS2SUhg5yBSSZPPzGbbBpeOHq2mUsaDedM+fRnDnZvuHDk1HP7uJt3F1sV5SPC3iT7853/d1jsLY2MjWZZ/9HPfQNGo/MvvmzkovM3Nkbnzx69bmRqYt3Gs9NpNcIuWe2NiM/YgmWtdRCvV0RaB9myYgLDVrwXYwwG304BdIyOEQE4BC7AoUYyPrpp3wMPb1q3y2VaNFgXKVUSXQS5MAAJkxUsjRwAih7LMj6eg9MOkbXc66UTE1PZeENYvDQS11NCfV6qqcb8Fg/Qqp8bS+Jh4H5XeCyitH1u1XqiKGLvSBAQvffBsY6xWKZFQcmsEBgLRVMpB0JKKfFMivI8FxFR5FlirfPcpWm6ds0655y1WbUKM3PWKksEkY7CQYIQqKilH2fYooGWGA6O0NrlMPSCSqUCAIPaV8JcFovm7QA/DCUamABQEYaNGaZNzllri2avIhDx5Rh1cJiWeXe5jaAQOhAqQBY4UJoFQKDgJLychYhAcEMPNyfgm7h4DxFJWTqUTcRiG/83JCry8mWQKvQqPYvzPgoKfERYXqYQalIBExj61l7Esw9cqUJpHtCz5xJV7pwzSayUcs4FrpcxRmn99FNP33nHHXmefvAPPnT++ed1Ov1etx+G96nCOo2wze76yZGJoYuqHddXUUb9ONGVNWsfeezAnn2gVN2jr+qu62NzHP732NceX1z/b49P3Xz1lniTbd6598BpVK5br9eNtpHVUK3Mp5nXNDKsZs/1f+a85B//8paT3arz00O1ocwqMhkLV6GysNgbW7Xmnz5ww+ve/1Bt5wU2PSGVRsVrFdsOz7M3dd/wYrlSNdzzgUJBpTaYOHNiIwDsiU/6Y2tHKv7idcO7tybH56fv3Q9Jqp45dvLbj81tHD70jtsv3lRVactGUZr1KpLXxyZzUmd0NT9xJLOu533tmQNNPtjod5eHIMqveqo1t9g+o2V9u90eH7vnR/fcf/ZsZ2H6zHVXvOzm11x57S3XOHz86NF+hucinDBWC3C3k3Zz5aPe6JnOjpdd8Z4Pbp6Zxonkwp3+5cauytUsYlIWuyvWDECsjZQeWUUjRIAgjIqCF0wxZQwMBSmQiGE7L1OSAMCDKEAk0lQ0S7z3VK7VFbMeBih4wFJ0boEgqGuJLxd/WEiyAoqBEmrbsqs0oOSKDEwGFRaAwYJ5WLTWQ9guOsBBP64YeJf9XjKF/eLK5rn3XgEqAil5eqF8LTZmIaU+2N+hP18g03wY5ykKgpHOW2RQwdEkIMFDXyFcgC90ESB06gGstSs9Hz2ElJ08hJEBQUG+JxHx7IgRIjp7ZvHgwf1Ki7PeVCpEus8qQuhBrxJpn4m19mfftuooXH/j3J9/aWZy/URlbQXPTfufnNUnNql74rVjkUTqsZ21/MyObW7P2Ig9u/CTB+mSF83PHsL24pleMrFxfOzMMf3TT13I/TdszX78SPymvZsv23zF1I3XnKudP3zlW4aHVgPAbLrUaR0c6xxe7U/3e4ebS6d1TDbzGDlASdNet9WbPzeXdfpEWkWzF7X/dGHnq86M7wQdV6W7RtVr861nr7v0xif/c+GiCzKt2iIX1J+YOTL/+Mmpb3zpS0t23rXqgo0e9OGFAgAnPj87bJo5giUV10i6dv+9P546tP7E0UPvXbf52vff1kdIEzMknIgG1MJIpAZA1AEICRgDzhwLh4wipnhhAci9y71DVIjA6DV735o59cMHj33ot9542RWXnzq3qKMKsRbJy0O8zBhRBCT4bBf7QHiAvZMScIGKwtc750ZHR+q7NvW+/0NpbFL1WLl+3VR7nbNzV61FOTs523SSegUVH1dGhpZmz60+b8oCeAYAUIgeiMETkRAqQyLC1jEV6R54xwCoNTMH1nNYy0UwNoWYYu4sg0RR0u12u50+KcxSQAIiYOdT56XMVZVSRKo4EXTAdgWqTJCXAIUEZX2Z91NjDKllIdyw5UxAzSGJeBZW4daxt+w1KaICZFI+KglTIkQULlBghT3ZinFXOEb+W0iWEq4SpDq4JAKF000G/OzQu4DlV4EIkEJDIPzecFIQUe5duKTBPK/4QGYIYkY8oCWzKk0/igOlBE8hosZCSgxBOe8ABLUOLevyNGZrbYC1e4A8z6uNOjMrUrVazVp75513/vin9+++4KLbX/fakZHm3FzLCVdqVQBi56tso2p15tTcN08PNdc1++lR44dGI/WTp6Yf2REfnc5MTaWV9ryuJb26S/rXyHPXVx7/8rq/f80VF56bbjuDH/rZ6w6cbW9bO/HJbz7ykwPVqALsqIZJijC9aK+uLP3t3738bDsG6VR8PReHxC7LUzQ1ZSu6OnN6/qYbL3vfW4//zVfPjK0e7XZmKB5CrMQuXbsuWZjtOG8w73KkGEDKDakQAcQsjWG7PnXp0bcMv/rybVNDTT2ho1NnVz/27BPxRBPj+myezsylD/31j37u1ov+6Fc3zi/ZVmt+5wW7Vq/69GLrrIqWFuZn77zzjtNnz60e39zt92b7BwbPN0kWx0fXL6X99VtaV+4eOnb6RNx61/iaC/7pw3/xiY985ET7zOl6c9Nrh3on9elnjz30vb3dmSyddeIyTbRqzUQ12bjD0cG5k+ePvvIS9YaOZBTPRrbpyZYYu+XVJCJ5nhutVVhCRJExAMDes/dIREAYsMEsIY0bzIBhoLc8MMFEHLCBwzsGvdnSVwaKEhWAVggSqCKphWVHIBzUBcV1EqAKKhxhV2AITygiyx1nAFCFhV9gR3BxMYWBAQeebkHMCyJzZZ7qGUp9hXDgFmHYutBNKsiECKgIAQhK3r8spwgSxEAGUFMK14heRARDbwxLXGf4NG9dcQ89F4EWIHSVB9M6GTAkRYLafEC9KKVMUJeDQJVUp08unTp1Ik6U9ypJKiKIXcvKGSBmjZHd+4z72hC9+QU9S1F/W31fPZ422f/8jDunTG/y2Pz3v5+8aOvEMG/Kn9i//uLZjdclB79CJ/eoxVPS6k5D59wcrr547dL+bp1BGR3X+vGiGp9fPL74g6umv7HxupFzT/UfWhid23hDvGZnY80uXnfD6foqANCuP9w7Otw6OLp0tN46oNqHgGWk0UxV1F7qtlvt2bknm4ePX7ZlvLH5QnXRS55af/bASF3ZpaGDP6rsOI8h82ktl/Gda5781j3//GhLR0kz9W2tQZDc9Yz7VbxocoC81Rlpjlhc6iFUK7WTs9NXTax6ydrVwy+8ppNaBt0zgF5pIctSAeUUEFFgp9PySB4ECYPuU5k4sngQQVKo9GALaCCp6OidP//iX/7lVx455Shh5dlol7sB56wgdGKxFNxgrayo1RgIvTAQBcsxRQqQUGD7bW9/6vsPbuLT7FYvVnuVLCGrOtvceH81g1QSFhd7hLksH0q71fW7e/2MQ+7MiM+DCIJG8hqwpPE49goQmY0xzjmFBCyhpvTeB9eEOI6VUh4ZAJZarSRJsqwfKGXhzcVCB2EebG0QBSWaDITK8W3YVOV+DkscoMAKFXeKyAcJ3JBiB4koAUSy3jKgLtq2BYaZiCwzlpoeHEYdUKAqBmcCIq7Aai6DO0K2JQIoGLpzg+MGIBTLK1TyB9YohSateO9DkaG1Zi6m3SGPFgpkyBUDKgJEUIJhzgEAqCh0s5e/cVB2D3A0ZXZPJfE3zJ98IDrZIDeNAJBUKuFKGrXqmdNn//M/PgUEP/PWt11x+WVZ5loLnSipVJSy1gJ4UNQXV0/0T/aemBobWkynq6qZs89s+69+fe2YH7/t+g0PPXXkKw+cmxirudhrpz84+m8zyYUTV/zCvoUDf/8fe268cfNLN63esnnkqq1rvvDdZ8SnpIyBjuNhImwsHP4/H3lhj2tZ3q7qob7psaio19exGFSeMgRSSh07hR989w3f+f6nnupfMBTVslxZx8bU5ha5L1AbNYRx2u5TwcMZTPcRkeToxuS802/edv6+g9htzZ/L7Njaqe0b4KmZfhLlFV3NuFJZLx/73H237Iivf8nOxW5aH4kbY/H2aFXak3pdvfzWN5w80ca0O75p1ee+9oXBamGYefLBn9xywUW9/knm3v79EwvnDuvjh7dee9Hb/sfP/fzPvg0aYxs2bD626tnd2zZd8+ZL56YXZo4feu4nebQQPfPTJw8f/Pw1t1719qnf3GCuWbIzTKOc13rSNhgFhOCg7VTUYQHBAxAMZMoIAVSiJUKLbFCfIXPRcSr/ZXmBAQmhrCC8CgIozSIDjYsQ4gbLDFf+fwAIoFbgB1euTOQikZZBbEYggYC7JizoBiJCxYXJQExnAKEypIJCVrEr9TJYIexiAnBlF634gQP4oRdBwAGEZjlUL19q6L2FxpQs48AFWUghoHh2hEoppVEFEwUoU38iFCoaC8VeQy5TkyKQI6JTYVeqQfnu2UNwAbYwfbabpb2oASiRAGS5q2m/aJQS4D7rCCqjdCh+5au7f316fGztmrinjE3T0WZncv3k0w1oPfVYDb917hUv7QItVUeODY1tE7CyQBwPn3d+Ldez7ZOjGme54tAMjacAoJfMOu9/jHj4BOw4I8O7R2/a33n8qa88d192JIm2bF7zwu07krUXt4c2t6ubzo5feXDDqwGAXNZoHRppHRprHRmb3jd5Zr9Nl0ylMnr+rsM37rj3uv+70Di1+rtf5+SOT/+fr0bHV9OxV+vYHutla8YFlvarSRPXzHCOXa6lboFvSPX9kWZrqnEtGp7vdOuxjq1XXWvrdMv6NWbHFrtjU3YurWAlQ08Q6NssJfIGRXx4XoXYgwSTLkFQFFD6BWEplGGDBayVJha45OLzbO6d91FUU8zWZgIDT03G0lJ4EA/Ca5DMrogWJZGNGQnS1F/+0tvO/HHjxEf/58TJE7E0OCOV6JmhmcmlTQqrzuXdtDPSaEI2X63Z+tDIXKtvlEYurjiQBxSg815K+sry1hroL0IB2UfELMuUUiQQxzEACIsCZOerSWW02Zifn9e6MA8J8cYXQ1yw7MsoGMZUEDqr8rwCEgBAKWVMxMyBxoqIoZUqCCVuq9hooV4UkWDvHHrpzztBwmd7BhEFy/26QX6DJaUvvF+VSW6xM4WYhQAZhEq4R2GtGvLy4qwrPsrQslUqAAzC9uA5Ds4+WAaPSDiVggp+me6VFCwBRBqQH7BsYodTA9VyK9t777Jcl0Tkol0WKg0RYwwzg4LDh4/++8c/Pjo88q5feNfQyHC3mwIAauWcC/cwcBm1d8MVufuJ3nw+IY2UUXSujNZ+rmIarSvf/PnP/J9Xrm6qOYmHuvOXNB68pvrMcxd9cebk8V428ku3X7B+bKQ+FZ18YuEPfvDEdw/2q9WqJxTXqFXM7PFTv/3GrZfs3nHkcHe4TrnpiNcmz3P0GiqiM8o0p+2oOdrP+zS89nd++dJ3/PER2LaGwaJGpUzWt0pVXJ8jRSBBGUwLIIsAU1gG+tTWY7seP/hsl8j5Siw2T1x6zebVB4/PxNXhRoPyvu1JrFdt+tr9z7z69Rce2L/wvW/tAcwf+MljP/P2N509e3pm7uRb3vTW+kT1f/3ah+7+wh2DxXnxxbvvf/jJY5d4pWVmpmtpwdKJi+jCDTL1o6OPfOyv/2L1lmvf9DO333jxTY8fvKcbd3pLrcsuu8KfOf4vn/jXZtN84P2/8bZf+R/ox6fTVkMPR1neMb7KNQc2PNNBEqGRmICMyfNcnAdEz5w7KyCklVAxLWFmAWCQlbUvlB5EgwwPRIikSFrLF0kg2/CgBi33lBRwqpJHu/LTBq/BwobBYCX0ocvgh6Wec/jwghAPQAKklk9JKPdFeQ04wFsNziIdZHCYizeUGyqwVwcbsxSuAS5J/IPsofDQJhSBMGkORkahkA0t/XDCqzAvQ/LskXTAmgHi8vyLMOgGltezAhetgjMBYeA8AoOgVlEv8wLQWcpy2zfi4ygOStqLyF6kzzAkABnaBr7pitpoOnMfbEmiBBazej3Slf54jZMIsmix9eQPzNRWe/FVqemfaybXQcU7Hxk08Tqfzl6+baNvJmk+P+RsrUYA0Omq8xXdn0Q/hUjum992PQ1ti146kmzr5g8/tbA029pHh1ctnFu/ZvhiE/Vz33XJYmPDYnPLUmPzzMTFR7e+EpDQ22b7xIg9c6771AObv9K1z5r2LtPbenbnr0ClN3P1F4bi2sjeW2zevWD1BSf0c/tPdus7ru888qRSvebmZGHXEv6N7rNbPdIEocV2l7zKIuqA28y0a26x9qKbG1nUchlGkuQeNAp4Ek6VxL4gc+KKgx0xMMBDKlliiYxBCvr7y8aaWqOqjta++u0nrrx0e11X08ywsqS1eLt8TENwbCUQUMFH4L8VW1BMSYt1ppT3HpwQ43xvZmjjBav+4hvHPvUn8J3PDEcx6MbcmkM75i7nupH5bGK0JqNDrbu+s/2GCxko/DmzAxbUBVoSPCNiIZkRuL8gSlRIQAhwaWmpUqnEwaRPqSzLKlE8OLLzPI+j+uzszImTx3ZsP6/fzxERUcVaBzEKhMLbhEp7cJHQQiDy4pwPg20JvTWE0KpdGXLCf4pnrVRxEARhFIAw6AJVdF+FSCh4bYMHCRFxJQojtIzd4PgYwDgRAEAHq08s4mqRWBXfWJwpRQ0iKBjK4MKNNcTRQYgVKVpcUBbZ3nthDlO6cC4s8ypIDXReKEgaSbG9EVEFZd3y0ATPqIhKVPbgRhGRNsZaCyhaKV1gVQpJbUT0uX3sscd27959662vEJHFxUX2EkVR0A3s9/tElGWZiCBMzpyafezkUj48PMzDKKnSec8N/c4nDv7Hn13ymT+/vSo9rzVJ3o3k9xqff6i7/b7FHZfuOi8+PW84XlL4n3c+8vkvnHWVeGzVkEaZ93m1GXV7fP1G/YFfvX76aKdZr6XpXK8DtaSi2OYUZ56rHlmDr1cqJBLZk6dat9xy7fVfOL53Tobr2Heptyauk2PJrHfcN2EOB6E7qhCVCHvH1RNbll7yjSPfXbh666qTrX5UtT1buXjbVH7PsbQDM9NZ7OqmUpGh9MFT/G8f+atv/+i7p87uXb922/Dw6FfveibPWwcO7Hv8yf/IsK6PPPvGS5d7qLV8zxWXDaM/MUwjM1l72BHx9siM3PXt7184tbbRrZx36fB4deriTTuu2nndmaOt9WvGnnnm6X+985eB+n//z//ytre8/egx9rgQa3H5gpGRiJc81osIUYLkwxIK6aBSCrVGDghbEQHrC5tLAPBFR0ScMPHzDo3BSyGCYAE1+e+6UYWFH5UF6SCgLodzKGyCwpIeLLaVZ9TyBy4rBgICOOdCtyaMukI1aZm1iPz/SMkIwCJa6zAqduyppNjyypZ3gE0oJCIGkNLSWAiRl/vtxQ8p7UYEMYDJizIclQ6ymFC8j0rt2wIjCQCFsXIJsnXAgMwcHOGL5AaRCAeaXyKMpANzCjE05QnB9HNOYgAA763RVRMby4wgaCJyUANhYuuy2o6fe4v79++dGr9jb/ONm1sEkenqG6+qR5vzB85xBBWdzy888NXG1OpsYtWsanSw5tHnNo2oYydM/+Ri+ys/Sr5zR8X06w1hhm5HdqloLclRUBkm1ad6q+bS2praTnC1anTfTOeZbrb3XO1ih43NU9osmSyrnX24evrB9aLYSSbxwvDW1shWWXfBwsh5RyeuHtr7K010PlqwybmZ8+4hQN3n+Yv+a/WBm05Mzzz+1MPrN0STa6fq237hafW12fs/Vt2NAEA/iQnl2PFT9chIpNKeq1eTVNLrolE1OTpx80ttx2mtbSKUowG0KApRswQsrZAKiIdBaC3Udj3bokGjB899JV1NW2s9e6UwiqJex5mYNJhcclR68GED5RpEBBYqi8KwIIsCdMXkI2THHjwRNaTeb+/VjZHt7/uHEzt3H//kH27uSWuzxA/AqFadhFLJqt3W/LEDjV9+Q97zZULKQbADhVBAWEARFe0iQCIRJq1QwFobaVOr1XSpZgcAlUqFmS17l3PwVbbeMfgXv+RFa9asKfzRPINSGgmJYIWgVbGfmQl1MK4omvgsDEHCTpiXgRiBh4BlP82KKyc1AEH8U7FCtOxXHhmDnjMTEJH4gsVPRJY9UgA3h6IJZEU1vLy9CRFAAnRFAFFKUZSybwbLZO1BOytIYoVyU0QCr3tASgvcD0IaHGdcSPyIIIQEyEMJiodgBVc88ZDdh4VCRIV1UTFMI0FRSCqKsNAYKcQyMZy8wcJdayG8/PLLkzheWlpyzpk4AgClSalo4MoQTsPGkDvyTOtwe3j1eLOdtkfELDlIKi5ateovPvvor113/lcffO7QGVy/3l0TPXRZ5enXn/nrOz919Irx/a971dap2ojN3fH9rZffsvX6LaO//42nKyYalSQVXZ07+ZF/vnkmi3JtG4Z9q1ozzoGkHmISInGOSGUKY23T79//2GLn6GWXXf6uV61/90fnJyujACk1qW9VzHHVpD2ACLUABtAcAQp7YCCQ5PTGJc3fPnf/Tee/woMhqPouDY/SSy6fGGqOuu5CZzE/OZ09fe7sYbP66VnZuePiay6/rdddJOWXFjNTf+zGG15t00rHp+PbXwbV7w62dK/yWIwLis3ek63ZmbypR0aGj1Hcwmu6pzo4gkPS7J587MmZxZnRVZM3v/jKb9z1+Xf+3LvSdu+Vt77ytte8fv+RBQ9SUYpyJcpz1Cff9OKEHWChN75yp3OJZ2aQEMxwhR4WlABdFcQRCQeEdShbo8WSXuEgAuWYRkRggF0OSAfEAFCwwgBCXExbkMsLgxV6jWX0DaI0xXm2opYNSuyDNw/+kIhAipp4sO+E2Xnvy/4NM4v3XhWytSTLf17cH8+lq+DyznXCqvRoCqmGrMhImFlLKealqGgHCIhnUIRKQRgSB7+WFT+QiACLoVIY6A4yA1kWgmAIxgDB+TP0TQVBVC/N+rmYCHSMpBWZJJCGtXDkLKpR7rdyUlSr3Hjtlgv6j37w8c1nUHXOpc1J0+r3rr82mReRM7Jhe3vIbXz8ibPTzz5df91OjI0Fi73c61p/uJ4/sX/2Hz7QPPTjCd0EXdfNhbQDXTKbQF+e9040kwUauruzeOnxZP2+RUnpAYT2+KYXXf/i6c7i4X37h/ziJZuTTsQ5K069sS7yoF1nYnrPxMK+eufB0bT3T3/0gOBO132lWdp17EXvjLve1lSeLBGo/uqnZh9oHRyKzqMLmwr69eT81/2y7SwsXftFPKDcSYlZ6SRm8kO6MjRVO3z6xJr1ky+0dXXBhcOj49NzrqJMjsAagcEJI5IGZVWBepMVqANEpBUChaoAuoMM2opl3qi11q5NP/PrV8dRhY1DaLOLvQYMXnzFSwZDhKIzU+AdGGAAFCyMa8IYwgSxYsd7T57RNKxJ+RP7d73yjRaHTn3s/b1xnpweooY3MVfqo+np/evO39TYPDVzYikxSWF4opUoDHYfzntxXiulUIcGVNlQImQfalxQOkx/EdFaq4wmrYAFFeU+B6pEsf7SF7947Q0vfNcv/GK7Gyx7ndZ6IG9ZhKhQp0pxjHCemSiCcqMaY6hUywvHjda6aJ+GNR3uRjDHCC1tBChHBYVnS4mdBgAvjIIexAtrKtpoqKj4hEHFSRhGaqpEiBBgIB2VZyEACnFx/MiKXwQCYeS6PAYeWBxisUuVUrjiT1YWu6Ez5pgJsDCGA/EioQMoskIvesUIzcpgEkZEhZ5fOLC5hImCImHw7J3nWGsWYZaxsTHrcmCJkjhM90UE2A9OEyLK87yu4keOnYuixPhckW6hrUqsfWrzru7A1Prkf134skZ07+eesX+48T8f6l9w//xVl25NX3PhqsmhVTH0vNcf/OVbmlXcc7QDve7Q8LhtyuKh05/9gyvGzt9y8sSRanNtu5vaIR8zxT7GqCrc5pQ7edY6OdNsjGRjzcs2n39iSS/a6RuuXLP9q6c4tpEf73VcpZqmyIhxg7Ul58UHEykNyN4CACGoo1MA8Gz92QX36ooSIsyRnYvef/tl/V4UJZkz1OvbvN365N2Hzh6p8/BSZ9GePHVs1wXbd+y65PCRptjmzOzZETPco5kjz2WDgCdufNXUxPTc8ZFRJrTH5s9lkT999LS1eOH22r5nn/TJU2v0N37w02Trhvfdf+83/vav/qReH9924YYvfPrr8x3fJNWXfle4qox33pMYyEAESyNwEZFSpo2IkBA8M7LR2jsXVpEqFJ9CcBQIRnvMSBQ2adgCuhxDeO8pJKCIEHJTKXyx0JMQKR3QEQXcF4SsYgpi9MA6eHAhDawIBoE8uCaEs9GHJACLIjOs2HA9xWb03heWpDAQ9PBlfGTvw4kRPnyQQYaZ6oBNNIjB4XcVNAii4CfBIKiISIktqoUV07twzVIMxTyxwsIhBoFIcxh5r2B/BKJgocgVWlxQKM0tZzArIDskQWSXvBckj4XGgHJZLmAcgzaYJBEIsTgMcqIqcb6VGtTW1S647a3yn0fatbv3yvDIqU5ng2r0f/rI4oXb61B3yuDF58VHn+0IMh25b+He6vz+RyVOhpby3rf/o/2JaffIPePp6SGY8rWMfdoYVr0liVPIY3edw/19OBgnWV3/kLDuat0o3XzRtTftuunIsWePPPlTFPuj46e0n9y8fk1EOkVgyRlBtHhtVYXifn9kZhaibj9+bvzZT/r4VNxf6I6CskDkSKSreuSWNoxODq2tLB3qS+qpgete9Ka9L7lD/8hFUUSUVlT1XHexkeNC1ieQTXFlSHjo1pdBzIkjrBC5vGIhZfYoBnwvNhXSgsAMUpJHBmspDOYVqMHYDgBC8zKEGgDQjVplok5EzTZ7ghy4ZpXV2oiItVYXRHLAwugAwiAWVcADi/dMmth7B6gICZDFqkgyD2BiSmA0bhykc9LOVg2t91l/9SUX/WBXBADmwbOt62alrqJ5222OTtz89tZ0t+rrbHIWz8DEopURZgTSSGGS5lAYUTwTELAAeKN01utXk8QzexAwKs9yEmAGQgDS3jpmZkv9XgaYrN+8NfdFRHTOOZejojCjEgQGIYUAgB58nhEiaOWD6wShZwZSwICoSARLMeqBinKBhJSC5igk4DnQf0mIkMBzgfwMXWIotMY1kUkKOgcRERZxlAt3NBHPAEACbmC4DUV3G0L1QBRYjMFZBQCIQQkQKQEwJgYgn2eImNSqxc4XABEgjKKImT1AFCUULIPDKgkC9KQYwRR0BQsQ0CsB1yeEwsIipaYVgtLKChYHpSJkwYA+EfHCGskYI8zsmBhABJg1gmC4QSAIBs0gQhvBDHLFROI8InhCsWiqrN1P96VpMtVjARRjc4WEEC30Ft5y+3nfvuvJH+/pvv+9l/UO/OuF8b6fPfW3s2n/Fy5Ze+MNq/c9Mz+9mI2urT/01LHmaPw3n3ukEo9jAu3Trf/xmuGXveLiZw/1k2hY+j1CqIoBBJ0g2Nwy5eAJfGSSpFrB1GWxbFh3PgI3V0+9/qrjn3i420jsnBlmTxo1SKevxpXtJ3Ec/JVFwJg4yx0hLS01aWZyfuLoidn+2qFa2s8BsSZybN7nckZ6kyLdRNNQHP/ee15599f/7dDRUzvOH4mS7UNDQ/sOfbs136/XNs/PnxzdvCaOlnZc+Oxg58dqZKq++9TSnqHGJGB9pLE6jitRbcG53HO2ftvwULIFAOSCnRuHd992+wvf/SvXP/bgwuVXvvj+576Z9tdOja0i5UeaEzZziBh5cR6EgHyfsIJKRIAVEJIXxZDrVDmPJgHrM0JtjAEQKrTMAYSggOKRADjvkYAJgvJpbm3oJA1qXywNFcSLVto5VzS1gZRSIB4QHDsda52JZ4dEGkmYbSHOqhUVDEYf1CcgsIZZPAaqwSDmMYEHUW6ATyoGd8WYVhWQfiVARM57UsoQccHj5cLYPWxAEVgxcAlpaBC+8cwYhDAJwyQLWKQgNCM/v2xSSoEQKfEgQEAA4sKURzHbcAJgmC4GJBUgEAEZz058AWoJkDdShQ5BKIgLSoIiBbrrlhqUiCevhWynza6/bxY2bjAKIq9U3mSVO0amKM8E0ApYcCgm2nLdq27u/cGf71t7y6t/5u7vff2xvdPVo+rD/5Te9urGjhsWGzGNjvfueg69arrZ2d43/0mnfWVUrvr6Sx+rs1U6moeqVb2RlDKE2ojvLJlM5RrlfIyu7mf7KpUcjYZ+W/ial7z2kguu/8pnPjF3+hCMIER6c6onT51Zt5j1tkwujdQka1IGGQJGFGsce/oQHXm6es5VFj6u8qGjN78gaft+A3zkKY0AdbKUVLOFHx2YyXFJkisWUdI+VHdN8nmp+uvRqq63lrpeZzXBNNEuz3xc33rstLri0o03v2R+OiejmT2CZAQKsQooKNpn1hMzax2BiEbM81zr0NgDo4wTJk2OvXNBPwkIIOgTEWoA0EabF95wQ2QUFrMRNpHqdXugSCMJiCIFpXFNqEiYGZkDNjiMQ6z4SGlk0joCiDKbKtKRodOnjn3//odXbb1ibMjYbpYt9usjprszAgC8c6/ZPt3cfE222LPt4WYUjQ/XZl2LJBYG75iJkajA8UpZggOiotBblxJAVK1WQ86oiBz74LvoAQmQSFmb1ZKKtfaqq666dPfu0eGRtN/37MSrKIqYnSYFAmC9FgQizy4IeiECsGhEWFZUJhw4WFORsnLZbSOgMCPCQJUGACm8CAepKEDRRhdfzIIECRQwgATzA1LMnGe5tTauVgbpNgAgCwuTLydwgF7RAN0ozFKEMwmTpJA5caAAiUf2RaXLArLsmiIu0L5AKQVUJP5KkQLgoIBPCCLB+bGsdMtLQoVFmV+6QbBwkKEPxYF1ubUAYIwJ5AcEDNJXBVAAUWvtnGPA0PQTAYTgaYfgRIJYIKDWBiAIHSlkgZ4/dqo1VN3gXB8JQJvMea0gxqFHD3bfftOu3Rf72dPt35367IPppd/l62vq3JNHj7/18m1qRO7+1vcv3rVh+/qRX/37+3RjsjlkOh139ar8T3/rtfvP+kqDLSO4UDeEZJbDBUdxnPXBeUFEpXVSraBwr9s9fvzkW2/d8c933780NhZRRxkib73UDaeglBd2Aug59BWCE1nkJTu+yW45cfKhpa1jjY5iFRm2TsBVeRx9iwAYK/0sPn5i7uZX3HY73Xzq9AlStTxtb94wNj+/JD6uV+PpuemlVnzk8I2DAFypZTMzDxKuS3sikOf5oW63m9R6pLOk2ttyXm3L5hnS/ZHGBV//7J/svnLqz/953Zah+9esu+Dee+/+3vcf+fm3/06zOTmxZhqgOj4+5fKsUa+wy9k2GdjnqYIYwJJyYjUA9yu5sEKqkEeW1HMmIBlkUDAJPKMVYQHL4tlLQAsrVAAgpeY5AFjnarVav993wf9HaxExUaSBnXMi3lpHREopQOVzr7URK8UEA1FBGE+w9wVx2KzQiFVK8YBFH5YulWAov4xDHCQBWGImBp0qLOdTGsr0WsomD2FgDcIKMGOQ34TSHqoYF4kQD3JvDK0mLqXuim8P2QoG+EaRPoQPDj38QQdrGUqG5fUwD5IAFqAADaLi9AAABGTo1LUOJhNZmlI0nj/45OmDJzfs2NTpZbW1Q6PrNy7l+2IbOcksskHn8xiAa5e8+g3qS9bL91vX/49f/bXayM59T/3pFVe3a8OrvvwNucahXAM9VF64wgR6HiuqooaQ+0kuWaKVdDIHolkDIIsiqTX83AktWT/mRrfSuVnJsbb5YXXYWL1h/aabr3/Vlz73qYUz+9VwAqRvanVf790ayNzsifb0TG9ipDMxOt+sqJoaT93Q0YXkyAn0lU0/fuPp1W+avfCteeNY1gBtwSp01YXa3AVypJIZOz0fPXNIjZ+/sXOmNV5307ueAQB5ckvLHq5XhvrUUWKiXjZSQ7V507q9h1a/8Y21+lC/01FGSdGAKHKmsDhQEwoBoji27BlhWay0qJOASCvllVLLo4gyImhmtnkvUpok1BjU7faNMSoAmpzzUsw8RCQo48dx7L0vskhmRDDGeJsnUW1+vlWtV+Kk2uv1xobNY48c/sc//fXJ87eNNlf/4rs/dOnOC+aB4je/TPc/ttZGh+7+6uZd50nuzl+95djT9x54eOb2X/256bN9HSS8EIM1QtgcK7SZEGAgagGhM0lKQbDPo0LTjpyQIu+81rrVao1PTD7++JOPP/HE8aNHLt59aSe1AJx7rwB9bgPAN3SLNSqPUuIytHgX0EpS6N45DMQseR4apbwVyAGXUQruIJTxikpIM65QFAmU33LTIgsXJDJxzhnruOwVKyjfCcuv5a8eqF9B4KEFddDCVwCLLqCAiFaKAjPceVBUssWLTIK54C087ygJKr7lYSQS6Blls0WRrJD+GPwu75cNmpY7YIjAJSBchEpAu9baEwRdgiDHPfhAUkqjABffS6QQdcXgzPTCqSWKh8jmEgnlHpWwtXayEf3nEwvDOr1468jcI5++uHL4jWf/X5LPq1Uj392bwucf/o2Xn/9r73rZwdn5D/zz/dnYurpud7N63D/2sb+79UzHimjUqK3WZUNCpNDlDs8dSVerVSRljCFPnh2S7vWzVVsnLlynnmz1oghQahm3FeYWCYTQC6miqSEiAmjZD5tK/+Rme+09h+7s33Il+bZH5hygluBIHZzUAFg4YpcenUs36UpUGdqwtUJE1Wo90pH3fnZmfrdwvZJ47vV7y6siiuPmUKXZaGRZf2lpif3SYitdmO9Nn12cnZuemT51eM/+uZmzVv3wmaeOxQpffdMPOx3ZsXP7K29+18035idPt+bmDx05MnPszGfXrdl14eb35l5Prq3EsRkdmRK0zEJcz7IUwTO7qC8OrVWgVEWIrCVgIkvWWyIiUggKAMUp8JogQHeFwYWCL3RoA3uw3++XYHjKnSMi551RFOKuc06As8wRUVhCIiG1EcLicJOyOTRYP6ETqKAoBMuVXUQsDPJYK6h0g2p4oCtQ9IUHVoMrJq9QwqeFinI5wBoBy9l4+VIryBQogAAWCnwylrzfAJb2IgBctspRhF2IyZ5lBX0FC1mRgOcQXDHeKiK046LpXU7Hwp5FSRASwV7bZpU6ZBkc+of/t/WGl1AV7IzbtmHt7osv+/YPHo8qFaXYO88+JsVA+dgNb33d4iU/al/8vg/+82J/7rVved2XF5/D2kfWbujPPjV2fE5zLjqLR5u1pQUPvunQVKmt0KDK8zwzOk65p42pMIhYIV9pSn9RKlHVZmmPogmT/ALRZGf2K0I3v/Ztjz17bM8zj8qQHnWdt3fyVxjFkrdTrxVH6WJ0YrF56vgwoCdVZTJpLoSza86bGf1bpT6p4i+sOkBZFQxH7eGx1uQZr1vzZ58xsYusqY1vTaOR7v49/XS+d/vBSmvT6KbX2v3/l+tVZV2SYn9E5z1bO3Fk4+VXbX7D688t5IlnVg6gBNaCIgrApwLQAwCgERG0UlAg2ItOqBcOd74AEclgEYGIaBFGcJow7VutI0QkSrTW3hXtzTDcDdAGZh/ARFA2TFiEJQi+uE6n4zijKMrzXMTHEfz4vh9Wh9cbik+e2vv7f/Ku21/x2itfeEt+XXfomNl6/sWnnrxv4cPJRX/+kYNP7/+zv/ybN77gMkZhZiCDiMwupJiaFCkVPECC3ibD8hQTEcnoANjhPM+zPKqZLMsUKBavKFIKm81mwNpffMklo6OjmgARSStNSpz3oZ2jUDB4eBOVmF6ttZS+T6E5HHo/PBill3nxyo2B5egIgjN4wCmukKoZZBKIKM4LS5hglRgz1MbEYcDDAooGu5dooG5RFKFF3C1nQsE4ZjnDEvAgAuJBjDEAYIxhkdzaUJIOmBuDk4JLs0Km5bVFuIx+Kn9giSLwpf+S58D4CjBUKQuIgSVDKHxDyTuY5oZvV0oxMCiEgB0tAf0ggkQKVMCOObCKgD0PjSQPPzm/qIZHIBMUxUAMJJ4dg2TjpnPZ1pHRYXjd+Fcemrn0aTqPE8OL6dhI58dP4j0P/OD3f/78y7dvcC4fTvo6iuenT/7jr18wumH8+OklrSykNZ2wt37lkwp33jlHWiWRQaBQuGvRqkGViku0ecGO6IF7MRnWvp/FPmIlQYeMmUWQgIPkRCDSOlD6xBa47fP70xlW52tRhtFGRD384KfujSqjUwkOr5Urtm/atX7YLranFxcQxUkuTD5X83OnTAXSTCKqN5sjfXdq8ATz3NQqoxMTEzbPkqQy1BxZvT7atKVpIjQRdNu9ah3ZU6+t20uzJ089/cD9T952Wwd48sMf/eMobjRGjtTijS+85pfWTe7Ys+e53sZHH37uzy5e+sfK0NAzT++ZXD22es22hYVjccVUkmalGoGPNebOOfE9EIuSATKQFfFBHct7DyACHilHxJBRqzCwYLYDv1uRAGkkIvSsAIGFvbdWTKS8K+rUANpw1g62kkjINxERi2FzieR4fh5ZCrbTcnhe3qf/HZf9PETVYEMhYg5FGFZQmHZD0Z0qquvlgBd0z1ZEdyk4aYGksIzExnLXCgISsfMCoEIHO7yNEOV5HzW4xMHAW0r9+WKTlkIlJapchYPCEPY5r4KvRErFjXOfuhNOHfVbJ23fD8e1k0fP7nn64Vq14QTF5THqzGeKXPPSN7zM3DepWnfO3njh3FFL7o6P3v34Qz9Zv1ZNDjujqkvzvSrz6Eg6uRZ6S6uU6nrsTSgwS/0uC2grLmaGqjJG2IqNm0QE3SXquz5prjvogVNR9S0GL9E6e+an+x5+8Eo8cWkeX2uj1VSfTTs5LAyBER3FUSJIeWYpy9mlzpCqqlzMV9/5L/XFs6/90w89+mbz8Gtcb5gxqtKxFzWe23TihX9KL6fFjy/uWLWjObm5p+omjtPWwkLt280ndo/f9IqTRw7S0W+Do9QQLPZwYg1MH9r5tjfkjXE6k/qkIM+Fk9YHYLsUJneDAgNYAkNEKSXLjY1gdjdgQyzjD0REK6UqtdiLQ23E5SxCAZ2kjIiwFwjNSZaAuzOkOESm8hVUqLyQMlGzqj1njkUp00/hskuvEv4qOW40h6o+uvNzH7vnq3f0vnS8MbT2Cw7T1RvsmaMffucbNm0ZedXrXz6xZizJlvcGYtl15oEaThEqFVK4snAsBgGHMIiN4zjLMh0ZdqK0do6ZIXPei3R6Xev94uJiZsN4PLTmbZLECGALIyCRAA8JY3PvoigKXxF6sOFu+NBaLl+DuIWIKgAlSrIBBDgzi8BA61UAILCPQqCy1jpmY4yUpEaWZX0+KVhlZcwPglNSKEsTFYqSK0MphcUh4koVrMCeQq2EkMWLKg6UAR4yXJiXgBpAERHHQqEzUKDQRSTQKAfe5gDgSkdIKcTAlRo4sA6AaQPIaynHEdT1bHmrvfdSSJCEZV5ArwEgZxuqAVeuWmt9JaJDZztdrk4AZ8zMoI0WazUZBkrao/tbasO+/6q2n7sj/nCMTUjtZNPM5MbX8o2TlSsuGfrtP73L66kGVU+eSX/+xtpbXn/BnuP9URP1XO5UpmQZ7gsrqh+llNJGRKTUuBcR55kFHFdue+l5f/ntp7VMOc61ri2yM8IoJAFzC8jChASASJQ5phObAeBEY1+3f22iqghkxVXrdLhnF5Yk7ne7z+af/Vb/wg3Zb7zzsiFO+31rfd7r5iRRtV6LYjl3dobd2W5vLLXLJXCrlU6t6ioVC2dZljEz6iWUJR35xcXFA88dW716cmx0HZkcIN246fLLrrzJ5arf49fia8+cntaRn54+N9OaB23O27VzJvv41l1rpo/MrYrN0SNHPvvZz1YqY299++s2bBo7fWrBqOGRsfFKpaK0A3CKDakYALz1IErKWUdY6dZa770xCZZNoEH8CIskieOBOJqIZFlar9e1quR5LsKCQVRoeVGtDJAra19c4YhQaH2sEMcIlzPoGk5McwABAABJREFU/YAvA+HzukvLuMLnxTyAAGsARAms/WUS87KUx/J/rojB4eWLwZNgUfcuR9+Q+gdR3EFCQESApaTd8/lUyKAUBmWrwPKC8mRm5kAYC4cJMiAyADmRDH1dScdF9Siae+zMwU/855OjceIW1+QKYvjuD+8/cHrP6snhrJ1pxXnqTAxIjfp1P/em+ZdPj9xw4cveM794urNI933vc53Z43W/htScdYvjQ8pqdbLlj+4bMsYCqWZfT1JfZTmqTDB2wFpTEkfEuXdQaQAAdBYNgANtenlH2byiTJ9gJyB/787LlY8qo5qTVHfPZHPgJBLloggcEQtqYNBMnhQiSpq7H73uf8+sueDtf//K8db8yz+sbvqXpF6jPS/5ta++7FVRGptK5/Br/2F19+Xjj2/PcPJcK5k/uS9depy3LsHnmt2jP1z3ottOP1z1P/zP3VftOnrs1OzCuV0bt2686apeHytEfaUw9xgM60rCGCCgFNj6wP3kUrAloO0Awhor5v3hiRcndtlx1N77Tr/TGGmyCINPKkmn06lWq52ldrVa1aTyLA8Nn9AmIoBYG8veex+KqjzPEVGpGiOIz4SUUoY9tRfzyy6/qjnkhhru3Nklqq8ZWjNOuZmZWHSP0nPt4+fm+iO12mJnenjqph8+8OT73/VzHIm3TiEBgFIY0mdvnTifJIlzTgijSAGgc8VxH1ouzBxFUZCzMMYIAgOLs1pVvLchN9yyZcuRI0dGR8cUKULlvTAXP4ElrFBBESeipJwGAaQ2R0StVGhCuqA+E8pQREQMNHlHRdJQdJ5WiCcLgBCqUmcqXC2VKnFYRiYPhdNnMEXzuVdKMQJ778EHu0ARGWwwD0zB3aFQyShqVoBlkCSAaK3DhYTnHa7K6Ci3loh0sKoJxF/vgy0oBXpRGDIFmVkOLtYQsNiDIhuDOwyi996xN1TELeaC+TgIXcuhN0wNyvNRghAPBJ1BKA+l4p5wkI5iQSJCDSiKjEc0JCdmvaEEnA+KulZYGeUtsjgYjv7ijjOPrv9/j1Wv7K6+bO99p0bWjNQiGcHk2OzxP3jPNf/06X17FpPx0Wim19vUnP+j973ixLSqqbzNJtGUg8QuZsNUVjMAYK0VEW2UMDALoCCSiTQVbVQ63eELL9p++67H7j5m6xXV5n41Nu0+VCjghzmAhhGDPAkKKjm5BpzqrTrc6dnYVDK20HfJ6MiGOM6Ubo41ar2u6OTBY/ZLX9zzztfHMwstL9TvZ7W4vnCurRSArhgZ7nR6eZ4OjvhGMpr2sumzx5Dc5MTaPKtLZrM8Zei3WvONoUZzqJ5lXdvPtIqtnZ6Zt+KrOk7zFCM9DirfvGGSveUM4gR63X9InV2IF5WefvmWW29+8Uu/dudn584d1jbas/cbU+fft8adH6sLlb4gNtuiaDLNNABWktHAkhNgARvaNYqqWikvqZTiiForIhQpirTABVeKvHV53l+zetW5c+ds3hkeHrYOoihJ015QoPOFmu7zzjIAQBBcuQgH+bEwDVLewAcJmHx43mtlvYvlXGkQucMrYhQBYRECJhTCotkjhRTD4ENCHgy4/C9BHifkytEKimCp5Q6IyAHRyTLgO4V/HFROQooERAKbC0ih9469FBXzoAVd5iKD/x7e76S2mLdr1WSpnc99+KMnhtR0dbL11MH1N8ORVvvub31tpBmB9aTAsidtEEidd/3FzXO782c+7//3+FSz2+l96r8+2s8Oj61aZZrtSjMnGna9HrOwxNUkS21jpnP0Rqmu87wIHn0caWhBFmlNbK1YEWk0AADSRZ1Uk55zdTPS8VmaLaE3HRPXIuik2tR83p5VKNo4UGRsxWZ5RFUAsWnfYdBFRCXq6M6bH7n5t17wld/fcPxx5SAXXWGEeN1j2y4kqjeSDJ742aWRg+fe8p2x9JqGHeq0ekkSJbeMTwPIj9Xpn3xu1a2vXP3aX9k/c9p3jo5uGJt/av8VV16y9rxd+0/3bGQiAUsMK/OwYnYgChQAE5JSipFDjAgK/VDGglBMFs1ULLoU4XzTAlCvV5MkSlOnjZ6fn++2O5W4Oj8/770fGRkJ1mDLydcAXq8KEJZzjq3LWZIo8hbBa+ctotKJSZdaZ08dODNdmRquz3XmfF9RBLzR2U8ttZca69ZOVFVM0jp5+Dk+eOCi//dHC32KokjrwOgt9IqVUoiaiFAXVBnvbdir4nwURf1+X0eGmfM0DfNpBmHHAk5RiECkiR59+JFLL9l93o5d7W4PA1yNlGOX5xYQSQUbw0J0hgtcLgEDM6dcyG4oTUgUmciHw6IcyypBXkHbD/8XlCoERX1ZpvZYUhcACVjQaL2iJ6aISMBFEOTdB1u3GAuFzwNEJIHlmSsAFLZOAAM+rhAACwJEShtSghTS5NAAEJGcHQkoVoHtoJSy3gUPUQRQhcMaK8CcLUDIxIvhu5Q0DyiH8VQq1A8OLC6PGyj5iEqpKIrYOReyH8/OO621kuDuVPyKguMGQIaCCDEphd4hYhQleb+z90ynWZ3wvqsAAck5qyIBRw6NSPqeDd/d2Tj9lXUfefNlu3avHvmNTz03a1adPXr0H3/p0qN7u3c9OL92dS3zHd/N/vTXt9ZWDS3uVaouRNaDrlPd2b7tWyqFOY0xuc3yNNNWV2t1pRSX6hBBmRUAapBl+dAvvfGKO/70oZHJ9bad61SSSlXluRNmEOe8Ugo8Q8illMdM67Mb3Ibj063O1lX1fj9VYKxxumn8zFzerTtGEtscUrN9r10Vs74mPVqrNmv1NEsAGLUC5cCuQpUvrz1QvX7Hudw7PH703PBounHTeq3AOopNL8/zTnexUR+OcKifttglzESqZ3POMwKZlk6lJ20dVZM6e2tUkk5Vk7WTWzrZBidzVWj+3Dt+q7W4j218ffNVUmvY9HAv/0Gafo6FDG0+l06k2eLRM4+PT9wSRZGJ6xGNImmtIiTnoQcSkyog+EV4QzDaBLSH1ipN00oSbd6y/jOf+crp06cf+PH9b/2Zd7zsllecPn2mUq0iovNeFQxyCcsekAppZUTn/aAQwWXJdArowpXlKUgxfR0E3cF2G0S+8PkSBiMrgFqhAEUQKaMuPH+ETAIDKbqBGXbAGBIsn+SMA6H94tuDHB6rQl2HEVQQm1MlXisIHKIq/qyQXOLwL+Gs1lqz48H1rDyXIp95FZGB01+95+CBn1bPv3K3T/Xho/WFMw/86PGZo3uaoxUnRCQOI3ZOwE+88Jd+dvF3+smGR7JLhw8d/rd//PCxIz9JGms9zo1PVUbOKsns6AjMazzyTDp7LpLGfJ0nrgSdpMfb5EcUdp113g4rhWnmnFUEtSGf9zG3ojlNABjnY4UVX0nR6Wyhz5GgdBa8SbxjNGmC5J2SSFeyvI1g0EPE2hKlnGfN5p3v+Mjmvffc+L1/ZIW5MSlETcbHRtYeaU4mooBi5bL13/n99huPPvfWf97yuUtHa3HldW958rI/hsN1/5xdf/1Lu5mY2We3vPW39nzmj7d221nmLn3pTa0lRG9ZTCDsAi8TVr04YiFhT4NOTNBiQQDy3qMKrDCUFdyk0qk11CFKKaVBYGRkWCkVTCarlWS4OcIMk6umOkttl+WGlPeeCbXWQEgg1loTR0qp48ePVyqV0aFhj6g0WNcB1oYMBll+6T+z96ef/OS/fvazX/3e3T+ojTQrptKqz0INzsPz86rtzi1k1YmoOvKh9/7m5l2bm6sms9YCUsLMbB1qRYqISDwAFKKPUE4TETEUhex8YNGQFOmwAgJCIWYnJCDO+9yJjy7aseu+7/5g777nrrn2uv58x0QGBcKdAgCfOwAgCRJXrIPdIXhAhUSBg+u9ZxFxjpkJg+j1CkMkWtbxIClEUYgDowBV4axUULAHZ4QEtoNI6FCgB2HvRYLW/HIkY+bAYiw3VZikukJlpzCuD9JXxbkWcHpcnDgh5VeACGiFJbTFRQqtbx9sEMGzZyjE2r0XZlAMBBia/FzUDaG4EWZhVxR2WmtUagDXYgyJAAykQ6mU/Ss4oAErhxj6K8XdCHUDFUN0D4KK0IEAKDLCecgcu52lQ3MLUW07F0Rk0gTczytSqcQw3VK/s+Y/7KaX9yauWJpx339yuhIpp1yeEIK5+5GjOF4Dn3T6eN2m3utu/f/Ies84S47yXvgJVR1OmLw5a5VzRgKBiEKAyNGYYGzAxgYHnH19HeAlXPsajG0wxmBskXM2AokkEKCAUEJZuyttDhNP6u6qep73Q3WfGXzPTx9GszNn+nRX1ZP+4ZKH9/nJWRZXebWBxElPsiTXfJx62sREBRMAiKlnjSeP7SYGUDBZ6/hK78onnPKsk3/2zcO6udvyRFwdB5iM1buqpmnuyyrOhNRqq0r8/pOH2/cduHH+zC0bEBU4mTTZ1llz56MsU0hA1tBCSBfzkE/aqbKFlCiENE0xMd77rJUw22LUs5yO9/hEe4pp8/LycsBKxBtjJrqZd4DcYsYTJ074goZakglkHCCmyQyAlEWf0FeVDMLhLCEmZJxVcALp/KBAPJJgKmA5HSK0N225ULFAMK68zKo6rUblsDd8VPHhsnc76GB58VP3jG5ins54x7xa7yrnhKhlTRtRRbyrmoyBMHiP4EMIxChe1q+fW1g88Wd/9tdzc3O/9muv/f73vnnOWadW5aDVyilCsVQVUULwITBbbVrFSBDX27i/HQH8qgqRMNk0IerDUAkg+mKvjmOhCdJxlao2DtmNNA0a1gZIpSJxVoIAYlZnFjjukEPkfWJ9IMTGpdbSmmNBLo2HRSNHH0IYa3J57wEpalPjmmE2jcM2Qpqm4y1fIxxFLHGTZ8A4JyZmV41MpzM8KnfdcEORzxxZPnjykYdPe+T43f+24YY79qetsqAOiEDpCIGNys4nbpztPuXY1z9z4jkbzt39kx9957GDt810Jlb80o4ZWp/CcDhI2va8i5LrPTx6IIWsnSblxlBOFsWIhhZRC7dCQkqdAEWoAqo1tj3lhj2qsOKRBC5BKABXVJQGjc0CFqRZzimEMiCodZUXgxM+DDNK+lUZDKUpSVkR6rdf+2Ehc/Un3ughUVbwBQlQOnnr7GaZ6E44XvS9nlRdSbd/4337Xvqr+5/7Z6d/7sN9SsIp904/fFn70ie7HTP8WNV/7FDI3fbHvezEvd/aPH3w8qc+b+jUU5apgKiDgBpqkgsbjP4aShIRPyreB0RkthCbchEBwas9labv0+jkozKzAZCJbgrKoKWKUVcIVAHQWjs5ORnBhyJC1gigKgbQtN2uqoKZJ6cmkiQBg2yML7wxU2RD5UfGUDtv3XrrNz77pT9/8iWvfu4r1u2+aNePvnvs/rsPbHnaSXthabTXT03PPrbv0em898CDD3zu25/6xG9888RC6VlSAUJUu6ryH1SFwMTFTUjM0fsWlYqqytiCKhoIIpQlLgTUoIrGGkWtgidwoIpChw8fe/oznrZ186aqckhSFMMsTeK4EwGjo7BTFwUBnHPMTGQAQKUW12RARqugKiqshJympqwqYPLeJ2BQxsUgRsEsQIjTYmFERVC1tfWvQiQcu8CIgUBEiaITG4Bq8HVrHRGN4djZCCDq6kRkbHsSw7MXIV8PrgJCBICQaJw5eVAPSgQGSUQIFIExabZoEFVlw5V3CZCqOgQBBVFCQAUnQpiCMkK0KdZo78PMqBC8xDa7l0BMJklVBTyE4ABAxjL6kZAlMOZiYj0WAWLGIOMyQn2IR7MxpgxFarrkoALHzKESMwGLB8Px/uSGaarKURCDTLmK57b4MhTu1Z1vb8b9/9+h/++v/+Tzr3vjuT+5e2GRW5vc0pzpfun+Y/e7ssvpIB9Vh4vffMO2+SK1WBVlyZwZwypBQSOPPISAho0xRVmKQp7n1trhcBhvcpIk41ZSCMEAexsWBu2/fNP533nTz8LsqUW/JzRtfZA0dm6CavCM6jVDztWPslwO7dBzfrzUky2b2sW+4aKWR32YqgzkyiazRb/wPpjElCsnb714OuuPOF1ZPuwLITQTuToFDLphbktodEABIM8z76updILRqOqocosrgyzLtPQhKAB5cW5UWMqYWTJ1tGStJcPMFAJ3zGxibAjBVQNmHox6zNxqtViQQMFZQOmVJ4gtc+X8khpmm0zm+cTU2UwXVPNnZul3zzz5b2etGRVHK3fw4PwdpT9x375P4vLFKUx1Jjd2J9eldiKxXUBGUsDKWsozqwr9/vDHN/3wxu9/57nPedZpp576nGdd9ft//Ofbd5188PCxxKbQwCGx7jTXOAwiDCAqQGRQarqEqsbjEiKynnSc+NbZXmSVaB3bxnVibDNRbW5PSKhBAigTxZoUAYXAiRDVktHS2LA3jSkdb15LVHP5AUBr+0FYxUYBqJLUEtPEJF6FAAgVkaLdnEqIMD7QZsmhkzo/MIIh/mxkVysYRDSoGkoUoyygopCqBlQJ3hhTlDiqNJlJf744PPrY/JVhtBLuv+Eb1x7srDfJtIyKQpKOkEtLKqotz/rDV/Xfi4h7Zl7wk69++kc3fbebWpeJW9KTtrJ6feR+nWrj3GzwI+kPBj6d7So9u4e7pTLI3hRDnjK6MsU01IJSsj4TsK2JsreEUMLo5FNL08q8b7U65d7bWwM2CXqXGwiqhTNk1EiogFssy0q5y8RWUz6s8IhLW9x9xW/vOfvq53/gBdnCQbKp9b6ijgV5OJk6dNbTkyw9trSv38NWq4MtTpentn/13Xtf9lv7nvm2Td/93XJu75bb3jh56ZPLxcUTJ346aI/M6IHZkLfPv+rXXnrlhi1z9zw62mjMQIFaaEYKaIBBVeOBKRgPMiGK2sKAa6ds0ZsZVt2ymQgBgkTwEkktvkS0buO60lVkWENtPzkcDPM8zdI0mu4lSVJ6lyRGgAigqqrUJiGEiYkJIirLEhHzFpFxw0EpEgrf37K59ZMfPvz1T41+/sMvtGf9K3/lWb/1psNfvPbBG+RRANgll2prabSuUGlt3XXahZc+9cEH75+bPd1IGUBEG89RBQkBARK2EHwAVS8BQyztE+bUWB9CxAsa4noBKngvapCJSIjYuKoKqvf84hfD3lKSJOWoIKJutzXsD7I8Fak7UfXOjPTipiMU2Qh1MQoRZQmqioYkhMKX1EjKhRCsMX6sq9Uw9sZt4TqBjd7GWGfERBSVNWvFdtDI6qmHw4oRfFtXC4wc7Q4hnjs14rGuxkIz9KKoPtUk9VBrQY9nVEQEilHFlJmxFraNLOEmhRibsiEYY1y9sLSmcTT3RwSCSARVRfGiWkUrADW68/ENg4I2siQAEPUOvNZGTvDLKBgljKdnxl3VIhh2qGhs8DKXwI0P7B9IHkIJgVNDKt4JKzrPrtXlP+l8+MvLT7xvevu/fODinRs2/ubL7d9+7ObvPuRncnPLXYMUOy3LCwN35tTi0570+OPLSwYbk4lmXK0AIp6ItNHpbLVaKsE5lySJjlW7x2xRIldV2rVH3eC8cy/8tSc/9KHb5qc3JK2ROGtU1TtvicvSqSqhUYTSplistB49adQZfOXw/uv/wC0Olwu/6Eft9jQXJoFiEFpZl0wXyLqqO5EdXfCdvE28Hrwb9IaGJ6enZ5aXl355MhWbkGg40QAIODGRE5EhQwkVRREXobUpsxGRUVkQcuUcIibWGsNGosAMewlAmOe5c240Gnkf8jxJTepCRWQkOBVAkCAozlWVY5OAjBpXGDfdPX1u8ixGwxP3t/f/8NRd10xTWhXLS6OHDs3frYqIaZZ2s3SCKVtaWtn/6GP79x+84+d3bt269fnPf/7DD+991zv/79/8zTuvvuape/ceS5JEar462AZxibgqfM4KPm6KNTCCcfERdaRX+9JNch8rWgDAJjA3u1XjqaqICCh1wEZSpFqLREBX7XwjCyCG1cbBsBm+UlTRq5EZ2NCHap/jCB1rilQRYSLmqGgHaBgDRg0RRqrtpwBUlWuuAcq4xQUAEBA4gCKAhmATIyIomhAqYUC1Ck5xUGIGTAAPP3zw2aefvP2hhx/D9Xfnc0QmFMJk2gmVpXNQmtOvzua2vvD4R2+Vy2ZPe/zBr/6wmj+wbvs2gbLnVzZusl++s7ztZ/yS57exPDoc4SCsn5HhNSfgbFxeAewoEc/MC27QLLGZE0ukGY96YdTulkf3JskfvKN91cupXM46neH9D/ff/vpWuT9AZlMLoaWu3/U0MiXkWbvMRon4IeWlTVMLLvMwXNpw2o0vfteF3/vAmXf+IJAJFYGxzpczLD+65NLl03d3jgyOVrquk010sqWllTS1kwvnbrzurw89989FHQDke85JkiRMd6d2blvpHUq87J9/YBNd1t610/c4N3kPylQR+0bsLxHVxl4LY/3juK6CrLb3oAHBxD6rNPjBZioPqmqyPM3b7L1TBWMTQ9mwGHW77aIYOUJj2Vg+euSYEq7fuKG3tIIKk5OTEfRRuUBEEddflYhuSMjWti1NHDuy+IUvfvo33/img8eW7r3nZ+/8q8+94FWndjd1kx3GLeJ3v/jJU864dMfObQ/c+8Dy8olHH/3Zug0vRxgVI+GEVZXG45a6rsdoURJqNGktdFG3fBkkiPNefWAiZk6IgdgHQcSyLL0Pvd7g6U+/6l8/8E83/+QnL3zJS47ML6NCnueuLJgp3iSpLYHjELKelUa/cUJS0IAaQAUEEMCJ+qCqaZoaw6oaKldVlU2SmrEz7kRFicXGPkW1rlbqHBmQaqF5BAWN8T9qckXTFfl/UMQNM2p8iABAWZbRIwVXmQkAAEGac6HmEdV/uihGnNgYTuI8GxHZsAZQDRi9biASwVUFuOEjIWJAVYmFOijUlg7xpIsbHmJpvmqopYiEoIKA0T6zkbRVUGjo6dL0aAQjwAFVtXJqrENgqJywuIqslLfs6wW7DtUbzKzVkfMqKUJfDL7Yf2kLHfqVo3/2xy8/H4riFw8c3rQuv2CLve7OEW9t54hVUaYpuwPHXvnqnZNdObhYJXlLNIRQ50PRI5mbQ7w+kUUVkIiITQjBWDs+ryMcodVqOfVB3fG+vOX1F37+J9cpnIXtyhdiyZCgsdaLRHEGD8qKeWbLQzsBQLbvfWK2ecfWU006msZ8aIcLC9XxAys/P1o9eqTf66vtJi4YtZawandbVd+fvGPzwPulhYPbd+xYWe67Ju2LK4GIsiytiirLWgLeGFNVDgBarVa7vX1lZaXXWw4qbJiZyVhmjgow8TSJphchhCRJmLnf7wNh4SqPrYitZdSqKKPUatrKRQSVVIcAMCx7gEqGRuUwTd2wqgq3pKDGTG2aOw8EDXNVDZXcqFwqy5FzbjQql5f6IdidO0/fvevM5ZX5u+66a+OmDf9x7UcmJyf37DvRaneq0tnG8R4iOmHtCYBKRFRryzQF7hoZjXEqrM1r/HBrgHQcB2kdwwEhesihgGCzl6KYpcYDV5uDIr4/4C+///hP1HMiUakHQPVrTBlaezEUuQOx/xQNyFVVQA2ACKgGWE27IZ4ykaegWpP3sN7vxAa9BA0JsiEqURSAgKVMfc9VCWw/85Lfvvue9Yf2Ym94S2fb/aZrtLDWiviiGKSZGa6EnS/402v6/zkZjjy69d8+/elPP3jfTVOT3B/MY+hMzWTrN/s7v51AYu570H3/IVq5Qk/T8nmLo13iKhy0NEuS7qHhUhu8RaveBUNUVkcA2+3trcm9/WQzv+CVgwMr+oubC0tHPvHR9vG9TnNMRAcnhgYQQJJ1vliyBQxoJa8IEvUueG1xGI04/drrPzd15KHHffEPweQ+SJIWGpKCBisTJx096cogEHx/W2uCWtxbHCZkoAMrw3Lj3ufRTfsOPOHfuD9ryw1oMMta3XPPShenj3zvJ3LwOOd683Lrir29qTlBzRTJJ5JgQz0XH0UOYuUTaTLj3N0ASdObVBCUIE3JUZ+9Xrz3dWGGYIzl0WBEGTGbclQFosRYkVAURcRb5SHtTnQE0DmXZRlBLaEcQrDG1lhqY5YWTrRaE61W4opiZkvn3z74xSRJjh078ePvfW1ibhOl6Zc+sW/79s7kS3K3yC987esfPbTPKCYIOWZ333znFz7x2de/8Y9CAYBVlKOgmlwXG0feGCONAmLkBYH3AGAMuRAifdAYk1gbg42GIM4DYp7noN4Y02q1VPWUU05xPpBCPDetSaIZw3ijelUDNLYWCHEEHUe2DTU2Hk9xyznnbIQWWxuNGaRxSKzDT423Wq2x1kZTbcpWrF2emsS8wU+C4QRqSh8xetHVy22uGaLGugJi0/omJDYGCXEVD6lraiVmJoV4waRgjAFQ7z2RUUFSRSYmUiJfi5rS+D7IGj/EqI0XtQjqA6WJxPHimk9ac4KjjPC4LK5rFsRYGYtIaOzZm25hGA1LA4aMk1JYrXfulj2jvN0CP0JJQpAATpFM0MT63578r6/2r7yve8mvvfuO9cWeKy489cYH9n7nXphblxYnitJU3Xa7X7n1ndE1T9l1eNHUjtFN40hVGwmRenAeG/6xCUFsxv2G1Xqda+Be35VTaWt5ZeXk00/63Ws2/s03l6c3Zh4kpQQEyijkhOw1UELGEyRJeWIdjFqy7aHfe9bvFIsycglwj8Is7e6ay0pAV5wY3nDPsYeO30fM1jIb0x/2Ttqx+ee33Pq+97+/1xu96EUveNnLXrG0uDJ+slmWRaPrvN3yTohrPa8kSay1Ubo8TdPBYECGQTG2uFqtloo455jrVV2WZafTGZVFWJFuq6sFOhdGo5IoypUG0OCcK8SjICO50gHAYrEYQiirIpu2qoGsADsAES0q1w9Bg6+SJGHgdnu22yUistaec9bFxEJI3gsAJAl5D4uLy8eP99rtdlW6cWyz1rqqYqYgHoEiZR2poeuIKiGvWV24piD+f79ARK9N7StNLBRQijYoEGJSPM6AdQxwXE3OmjNq1ZB77b9G3oTWI2GENX5r4+Jp9aoQGwoCAICgICFxhIMERIzEB21abqqKwojIYwof1MEbmULpmImJQnTAFCxD6K0UpGYoVefyJ/e+8m8TD977SHfu+2CIjLiyAkEFSmj52PLZz/odmdj4mqUPH0zO+9C3H7jr5q8m3Cuwa6Acjo5vmkzvvs/tPWpz7lT9opOxBPe8wfFJNQPAqc0XFNXK8vFHQmd280VXLiz79uMfh1/7b77qyunLrwqzxtx5xeLSZPnxT7bPPSs790LcsJm+/6P1z34JnH3OsQ/+U7apPXv5s1duu3X05b+fPfPK8mnXmLvvLm78ZPrit8jZFy69950d53/06//Sm93+K+95csrtQpYTMy2ykoLmjs2uK09smYUT8zQ1rbmUQ+j5UafTSfomlarqVFN3/vahiz8eWou45WF3+NQiwVx4dnYbXKIHe8vl0WOPnn7mZ/cef9XMusy4wUgM+1DVQmzYqKUCgCIURRXhuyGEqqqIDJnoEAxIxLEPCqoIRKvpYO1/Q2gIcVSMxIv4yhKKqHOVMnTa7bXgZyYmwADBUhJd3EMIriojOaeqqompyZRb4qs0SRD1uq9/dzhaFvSn7j778Py+DdOTu3dt9yFZ2HbrTH/buVdesutIe3F5eO5FZx85fthVeHj+OFAIbmRsMg4wiIiGY1BxrkKsxVSRiQGYLSL6YsTMnGXI5CtXeAcAJrGhcquJsOG8nR2bP7GwsJBkWfzwzjlLTBGYqkDMq5QGBGgMi8ZYDCLCNXoUpMDWImJZluPwD0zadIT+x1bUX978q/+0ppDVSLRVVYSEWOOjQojy9EEFRE3DRxrzDeL7RxZWzWtiWhMvASIyj2pHVW34kfFoQERQraqKrAGEOHMXBW0sE2ydrGnj/FDr8tdnYqhtElAxwtkiA93AKk9aY84EtZczxsI3gq4QVtFhdWyDtTfEOG9sXgb0yiZJOwbve2Tfo4fz6W2EQxCSyvnAwVDJnl6efmMzHXtV8Y/loLryHHj3G1+9Ibfv//KNX3uomEBOMnHBOektLZsX754+5dRNdz8y6LY4gCZJEnwttBJF1cd3TOL8Js+iRevakz2mC1FpZAiapG31wInZf9S9/hWXfeY7/31MzjCmqrwzZII0jGsiQPZJUI+p7SYHd/fX7b3/keXZVut4b1HYWh0EHDhvTNdOd+3znniWT7cuLg+Nlv2iPGnr5hu++a2/f8+7PvvJa392175vXvfV17zmVwaj/nhRTU5OMxMQusp3JtreORGx1o51y5M0m82z2dnZsiyLooxLHVQj3jumI61WO04sW5321NQUIpZF1e/3B4OBoCKBTayqIZMOipFBLFxpuU4TvQ/zi0sn8DgQttvtqlBV8GVYXu6laYogSBQEfKnGoPelDysEGkJI01xEy6qKHTxjTJJkrvKISNbEHS2NHo4xRgUkOoA1I3CEWikWf3mo8T+yz7Xdo9qjt0EkxOUaNXih0XurmzpxHtyoTa3d5vTLclrjwZOCINYitQRrEJEoIegYs7m2dAZD6GqIXxz0RiBORGWSUnyO8VYAAEoAAG3gFM1uR+89IybILgTHaoCtwnDQkwEM2tNpkeD6hF/868Pbbr4rNYe6nbQoxaTOOR8oJbNx/ZbsSa8+Y/iDHaObP996+y/u/H6xcrTbzsvg1aVJpzhy2N50lyhmuRml6/zJvZU7gHrMVgo67xn+9/7+wIffY47vufBdn106cHyuWuHnXr3/zofXP++1FeV871cBwG3ebU7ZWgyX2umOhe99K+S2+9LXHskpe8WvZhdcbk9eX91xf3vn45J3vs/uPnvhuuvdbd+bveoV/tJLe4eWDl/wrJ/73Vd84e1nvukvFgcqX/4Y4dFw592YeGlt3v2217/tlJM+c9NjP3jk+HKYNDiYywyZbBigbdGUsthakHSQLm2759lvOfUzH8v66x3hvKGZU06l9sx8sUKj/jePq949+I2z5iYmMPQl1J3FMA7DMVhE7baYoDMzM6lC5SpmNhSbGqj18F8BAA3F+C2gWEPiIxiYAFATYxFZVQnZmoSQBSGEsLi4OBgMDHFRFK4Yxby1v9IjojRNEZGN+CAhhAcfvO/eX9x15OgBCXjj9+7Zd/jE/n3L27aenE1svuf+h4qNS+V9R97/3nd877t77r1r5bZbHj1yoLjh2z/qtDcrmGEYodY9zBhXjDFsDTIhU8wsACBSAFyovK85yt772ouJSBGqUNf4zjknwfuKGe6///4sy4DQS/Aq4/5PWNO+i+8QuYaR4BlPpfiKN857X1VVjHtOQjws4t+KzyP+7vgJNQ3h1a+h6T+Pvx+bGKvpDq5BrkPdhYKmozX+yfG/QpCqqqJF8bi1NX7bGEXqv4WgjedSHPUBQIT7js+RcT0Qr4lq8CgbY+rbwrzaq4+ZfgzM0pAiRAIKGIxql+O/KCJBV2/I+Hxcex+ifQUpMKAljoI+ncy0k+5oUMxNJdfddnAI0y2sFAlIFIjJGtYU/O90PvrV0TMO6nYdFNu3tH5w456tT3vvB752eEebglY9GRqfO0lhcPRNLz9teUDd3ETh8eCj4LBSFPdvcpQ6a6H6U8QGIK95YYOBDOozhyGEzCbecb5x2+++cNP8kQNZkqoqECTGIqKAoGHxAUSVLaGYQyf5HY8d7oWe95RJrgSSOU7aaDorrr/i9p1YUuwEgdnpiY2bZj71iWs//l/Xfujf/mPnjm3/de2Hzz77bFVYe0lVVXkJ3ntlKENFzMZaEztDqhr128kQUSx88zyPreaGYhvSNI3BgsnEVZckSbvV6rTbBIzIedbqdDqtvD09PRt/PU3TdrezacvmmZkZQgwh9Hq90bA4cvh4rzeKSCLFoghLQbQsyxDcaNQbDJaXlhYG/eHSUu/oseUjRxdEOctaxpgsT5PUEmtcdaRgrY3RKIiwsdHFoV7DxPFrIhqLOGKd7RA1dLJxwBtvtHFfWho/0NWqtrHcHsfy+IVgZArS+J2hCfncqAVE3Eac3Y53dHyNL4Csifz7cQxuttLqNdS2DxrEu3GEHp9X40dmjEEiacxGURRE0XtBEOer4AFJnUMEXBn6kpgtOADnN1161f5zrr5VaKgc0BNag6bV6ZYjt+WprxhmM7/DH+rz+o/dnvePPNpqT4zUZAyirNTO8vIF15QveerKCi0uPWTWLXsCEPU5mOKM84PRTnvb+b//X+HoseKLH02f/sR9H/94nmbDdLp44OHOkfsAoKLZzsS2vL1p/ic3L73nb9efvGHBjOz+x/LTzw99LX54++DW73X+4m0LmFT3P5xqUtptSdkb7d1nnv3SL8jOHQ/ffPHmAT31+e3LLrG/8QZ/xUsXtTxejjb87Z/wE87fms+9+cozf/PSDZOmnLAZtzpBhi3oK8mI2W27HQB2X/f3KLzveX9YJiMLnFRySEs/3U7WzYSQ5qPwg0eOXXvX4bQXWomxaRId42K49d4HBVWM/ztOiaqq8t7HoUCsiWM7qh4QrFl7ceEZ5/309IwLQozWsPfBpAkEtTb13hsjiU2Fraz04hv1estTU1Mri0tJnm3btmV+fjE4nySJc2CNDHqjPDMLh8sDjz52xnmTk+che7P+Rec/sOfnjta/6FdmPrzuvq2HN+nx/r0L1011OpPdznRn3Uufd8Fv/9prl04sT7XbASGooKyh9ERtqSCRzBxlj+vRC6jzTgGifAQSo7jYJxQRMMzA3pdlWXmv3Va+efOmoijGkSOGxKZ5JWu3gdaQKw0oSBS8QBAjFMNDXP2iWtORVceFbRg7+2Lt4lB7E41vPVN0LluN0Fj/jCBgE5aCiNbcbQJQatT2ojnM6naFplNPEIXdI0YbAJiYayX3X4rl9XWCQgjaNEPSNCWiqhkGK9b8DEHANUoaa98kHgdkzdhrhghBRLwo4agqrbUYIBYrsXGDiC742C2LYNGYryPWH2IclXWsq2cZg1Hx3mk7p6JXfP3WQTK7XipXIYEGJTRiUfQl3c+tp/l/7L1BykFrfeuBh0fXPGfbte9+ZZK5V7z9pu6GjRaNbdtyWS7bbM44d1dv3iaZc6E+LuOtYOba+EuwPumwHnlEiMD4jo15nyIiKh2AXjEkSx0wlZSHFvF5L7vkQ9/7+qOj0lrrQjBEELXGrDFAlsNQqhYmycFTh5f/d1UsnbNj05HDCzLtuUglA4eCo9Dhyd6o7C8dmOfhL+4dfuKTn77nrpv/6yMfPvusk77/o9see+zIi17y4kNHjjPb8dNxPiRJogwQAiBGFDoSMrN3UAUPKsYYEU+GM2PHja6Yfllrm0I9FpcIisF5k9hWuz09qyG4NLGIqMGhwkS7E3/dWtvpdvNRyxgzPTm1feNOkya9Xs+VQ2JK02x2eqMgWLJFMUSCJLXOlSu9xaha4yU4z/5IYYyZmJhIksSYxBgG8LEoHyPgYgUcfS2hHrJIADVjvfT/KbBRRzhYU6quDaioGFHKNS8IEVAb1xMYn5vxX6MAHTBqI+Ycl2sVfB19VWqVjyi5EEJEcYzlKsEgNaa/QWtYIiIKAhEqNjqdMagjaRBGWpt2/1KGgVgbJmK9JrVxXXSqXhUSTgVdEOnwaM9jRZjK5mYLLBnsug2Ty69+3d4P7u1W4lAHVZFSMuitnH3+BXruC9f37z2j/8VvjK756jc+NmHKIISSVB4pVT/y6zb0n3LuzL17l7ZS9zlOZ9stRNeFTsZ+45bTN2/avfyKlw0+9eH+Df+69R2fGnZ2h5/ev+0trx500l5naGdWfEnDMNseLsD0uonJ1sopZ6177q8fvuuxjbtnlx490t3QPbx/z/Y3/F7r3KeV+w663dO6tJSet3vltNMWb77num1Po+Ceu+/69W99y+J/fC676pLpJ1+9f++RzhXXnPIHb8ovuTI9Vq6IUKd6zkUXrJva/6Ef732kcmmbsDIJg2qxsOW2dHFHdvzU3V/7hwde9vojz3jb1m+/O6i2PZXqqBgI55MJLHB5w6HeuoxfdtYG58roIxIt2+pOCZrYDgaQcT2DiKqNu0DwqsqiwE1MaWYEMbRFV3kYlaUqG5MzsyBkecuVjpnjrEhEOp1Op9NBxHa77ZwDVAItylJVnXNpmtqExLtWa+KU07tv+8t/PP2sza96zVW33fWtDk0uL/euOP2a79zwo4PFshro2PaT33zV9pndN9/6naNHh7MbNr/hrW+VdmfPXUe27myR5kFDxNTXx3GI/j4aT22RmlEf1SiAOdLTnQukQYOwiU5dJCrG2CA+VslJkux5+GFkhJg4k42uZTX6GGCc3SgEAIgy8cAAAD44FVBspJgRy7KM5qaqitwMRxGcc1hrFpo6hEMdV7RuIyM2hOa49+LUrR6sBlFVgxTiHKEJUfGDxyx7daSEdVOLiJSa/vAaeWrfcJTHnWdVHef1EZtnyZJCtAc3xqCKV5Ew1nyulQTGuNN65TWtgqhkjQqkwEiIoBRTJDJIXkPUkLLWEqCAWjbxmUZp8vGBwk0jAccNdhEV8VWZcBLEO+s708ndP314z/HJ6VNIBqTWhqpCwlBxrtVvz137xeLqvW6Lsd4WeBzwmCsXFkcHDi+86HETP9zjelDkQXu93iteunlion2i3++EDMAbgxDEWuudcxqoUQ5qDutVexwiUlHvPTfTbo66pExFVaatVqbQD1XIsLVcZVvX/c6LT3v9Px/YuHOrF2GEJLHDEADAKFYuw9yFEdCRzWDdl5fuP/zdcs9DNJDjw2CnhVqZ3bk1P2Pb3Lmn7Hzw7ge++KWPPbrfSdXbumPjr776Nd10dhjwf//1H6VJtlD08rwzjjTGpl5FxeetlvceGj4bMAkpQ93FQYDgRQkoJkbOaSPcOF7kGpXXRJxzEiogmpjoiEi0wEptogo240hJB4CiHAEqMc3OzbQm8qIqJ6cnF5dzVciyTJAUAUnyVha8pAmlUzOd9uTS4sLy8rKxGQDkeXtysguoVTXyoUwhT5MsZkLj2TZZA0DUyCIQEQDV3kFrgfRr5DUCKHkZP8Rx+qiqyASGx1tMVYGJDWsVoOlpxzhdIz9l/M4YQKgJh4nhceIICRFjjMoJGxQJDcYQscaDOOfrWRsTI0ljFhFCYOTgvPc+zTNAjTRI4Hr+JSLUtOJV1cc6G0kZGVZxYTagkDKTppYHZSDCljl07wNZr8wuP9ePDElYXvJ7VpaXCmp1RnlF0hJfwbC/svPKl96fr/ub8r2k4X0/zRJckaxtfODQczZlNlqGkzZPHlxe+q+v6EmFuQCPHy3aQL1pGRkMcu3/OXjD592Bh2nhoWxu1xCD+8jfzaw8sjiYlx/elHS46xaGZvPUW/93dfft7tCRgx/5wIbE9753Pdx+68KujcVDR5InXEFH9meXXvboO97qbrtx+xv+bMlRftaleKh/556FvZPmV677h+7Pvrj4xu+Wd9/kFt4285bXnnzN+Zve/GzetH5wfKVqmcwPPUyCuifsWD/bMdf+dP8dS6lPggUImve33Dpx6BIwPLFy9s7r377n2X/Mi7u2//S3DMCw79hMjjqlk9Qriet/fa/dNlecvynRIKJj6V5UjLwSjM1RRK318oxBJEG0bDCqaIg0rpq1L1x8fKhgmNmFcmZ2AkTBw8pgqQo+zbMsSZ3zzOycy1p5nudSuojoS9M0jEbeqYrL83ZRjVzwAoYopcQvr+DefQ//0z/+8z/9w4c++elfJHZqosuvedWuA/nXF954DADuevoDe/ko35Qs3RBO6WzcvfXs44eqyS5u27WOSI0gk0XkKlSxie4Kl9oMjYgEYiaGEAIbDiEgAjKpCKgyo4ggU40xDrFAq5iSzJpOTnseeXB2dvLU3eeURWWAK1eKiEECREJMjIlIIoMUQkisjdwMgxSKSp0HwsKXWZZF3Y9+bzA1NVUURdzGMTevxXiIlMipmMQG71nQGONDICSGaAaoyCSgQSRSXkiBsTa7QiZPCC4AooEIIUYhQCZRpSAmCp0QelQEMBoh2o3ZjkgMsUoIhkK04AghOiYlyCBaqZAhAiIgAPDBB++JOZYC4gJZg4gRrRfBAoDqg7AxquCrQCQGqZnmRrBJTQ62CSMA12O8VUR+nE+Lodp1asxgVgkqUPpxFzEelFiPYIWrkWM2vlpvO++58dFed/1UNfSQSnCZ2qDqbPWK9AsztPSBldc5KohaE3k40u/1loudUxMX7dhu8uM3/OXN6fSWgR+dNrX0K9c8+1CvTA1arjXNxRASmSyNhFr1HqCG6UVFm5j6WGKHqhKc9xF5i0AJkQgo2VAWLrEYJCusZOHIwRPPv+by//zSf/xkhNNWKvXoyWgiWpS2zRTaiB7VPnIWAPyC77vzutmsYzs4KXb4cK90ozB44Ei7OHjq5l+87fUnX44vvfiC0YG9e4OrXvac7dNzs9t2nbTjpF1Hjy60O5PjVAagnuIDQCicIVKwDj0nBkSNEgJU4hxjizOJyqohIIJhAkIFIWMlBGyGEUooQY21wQevIQIdRJEBFYWJgjonAYkZyRJkSaYAo6oKpSSYoNQ8GSeBogNgAE8AliFoURRgeXJ2dm5mrmBx/SERJe3cV65DlgErlYaRT6s6MEFEvKtbx06kads0AbjWRV+1n1LE2rh3bflb55c1jBoEVBAYiZEggCOMK9x5H6j28cVQU95jFwdFpeH4VavytAzNVJqJPAAyR6pifEwkSIhRnVYQAMiHYCOQAoCxHtl4FaiqBBLEyLysW8yISARMXJOSYxhQ0NpVBmMULilH6VUM7aH2bRpkcQq61aN32Fse9K98A6RMRehJdcctNwdXAE45rBKXF6Plx135rPvp9Jnq2GWDj19//PSf3n5XO8sgAKh3QsZkjkYBZddm2PcgHLg/OSkNxbCophRFSrBzYnXxQTj+YAe6QtOwdNj/1WvRi2Kr/2evqQRYQ/5SLU7aFW6+x7Uq+8QrzLc/Uf3wq+HdP8wQg7Ix+eD7n25ZOHGttF1lErP47t8MkNru9N7vff/Hb/70Zdf9w+6v/G3QIj+WZUbS9srs7omW3Vj0B3JiiLV1eIuqojA8yHHHptm3PXPuQ3fdd8Njzmp7qXW4mtnTvfU3gLny2t73lG23vmX/Jf+cLJw09YunBJROzqGCAqosy8j7YuX41x9ITprYMNPmUnyBOOPNCjvFkIfEYUUGo10Ns0VUL0EloHBFBKIMyGxUJUJrXFloy0dfOAUxKirO18Wf891uVwmLqoxJsUGam5urvFteXMqTdDgYQMJZlmnj1TMa9AeDQTI9nbQx+CCKC/Ojq65+7mmnnfG9H353x65NSMlgeOj9N/1t+BcHmwMASC6FDMJFPfsx2//Twde+9ulXve4FBIGACQQJ1XtgZOAQfBzBegkaxHvPtOqDFGNDUIm7K4bAuF1VVb0ScUCpqgqFXdCs1bLWjopCRGKDNCrIsDUAqArGmGjlqE1vuaoqTKxq7Vofxy3xB6J7Wgghy7J6MzOp90QUKidB0NSlagCQqgIT20K1Iq2KQggaQmITQQVDAvV0hxSsYODavSQG+DHCQqHulgtgxNqgRKRmo8LRWFfFADY2bcQI4ARFQkaGEKIkPEdztzVFwzhHi44RGgSJlGoWNBAaE5uT9TE3rq1BV7Ggq00FBEAY99kgBlXV0JyPFE80E/1wAjRumjEMB6RRnpuRiEmPHD347fv9xFQGZUAKBAKWDeO0jN7UvfazxbMPua1tDQX60rVSOtqegNv3Lf/0O4/c9WiFrWnRcuHI8B2/elpnIp8/MkhaUY6UJB7lquCFEZk5tYyIzvsQgoQQA4CIFK6Kj0lBEYERhRGJQTUOIQVECJkQ0Sb5RADzh68/84V/s2h2TnkHJet0KH1qKl96ZnRAxplikpbmzEn71t37NJYW0gqVlEx2XV7NwSaTyl0PPtZletwLrlpeGC2vLLXz7NixY61Wi5kf3ftYu9ONIo7jFzWmW/HBWGvr+QQCM6MCg0aBPESW8UOhegGI8whRbwWC1hQdIrRkOY5Latacar0REkKQEFGB4GuZM15dBqqAQBFXAQikUbw06lAxIhhiwIwhycH7ID4QUZJYVSUJsX5FXbPMGssBAFhT8a4aohjiqISjuoqbY2MiOiH+8Hg1UqMYVX8nMoZjQRxTRabayxrUUy0xq6KrGpMxuSTQyGZDbnpFIQQhwyC/hNiI7e7IoSDVUDe/Ke5ugxT3cqyrxlAV9EIKtMaXJX4MQYBVq1YQERBAJtURG8hKGlhUf3yiu3Hv9T9tfe3z0wWPbv6+efIzhcKxPcceeviB1oQJAyDEYbVy3jmX/sqf/t1H7uPH3fkHnfX73/mD2cyMmCa8W/SaQLstVWEq9ElqunjPL8rNmy589q9dA5//Ahz+bwBMTFJCvyNThm2wlUIJDj3nnCMLZDiV5iEZhsmddrT+HJyb48nN5Z03632/6IrqxLqRq0SGytoCKz7YBFPu+Cx3ULWqQRmWv/Sr/3fTgTufcsP7aLab+pmlpUPyW3/x5A+8Y/loMez38jwflVViMgCIsuFSlVzZNM2K7LFffOVd+dxV7rRLR9tuAYDpIxeKkyw1IDR9828MJ/cceNpfwaPvbZfnMBJ6bxLjQ2DQkKd7Dh368oPhNRftzoCUwJVVC60nqLSK91wxROYkclQrpjjZkeiCZ9iwMSZBQKoLjzHbEyEqH9vEVMF77wUhMTbGGGvtkSNH2u12PJSXl5cdSp7nZNhXjog6nY6KWGOkDEjcbqcLo4VuN//Epz5++OCRbTs2u1ABdunPR9qiaqYCBdqIpZS4iMG7+Zcf3PrBU3btPK0sDCARj4pCmDnOBa1JRIOTIIAWaKxXEwMbMSqA+OC9T9N01B+oapZlZVVZa+PtMMYQm36vAMJW3tm8bXvlyiTPEkhqNJb3kSIZfWNieydanplIK1JgNqhKyM57BFECVRhv5uiS5FUiSCSqOBGRD6HyzhhjiINItO1jiGKZDecAqQKJm5UVascxBqm5S6sTXKWasBjnpQDAiLXNDsWBv+emcSpRS0Siu1E9Po6RL4AQkQIQUJxuRsAar9GRH08y4qvGgQdR0BBWpwONwXl9BI4dIBSi7ABGYpIZG0dGopGOVYlWm3K6hulRP2IAVYEgDBoocaS7NvFnP//YvuW52VnINQxJAMSqquIr2p+dxJV/6b+2YG1pWo7KJO9XduPvffCh3shMTZkJWzo1vZIvnS1f/pLzDx8tWAg1hzAq1I9rbohqoyFEMN34xK9nw7H/H3wEA3L0W6aoNCRai6mBZUbk4FWFlpZHz3ziFU/b9dGfDLHdmpIw8GysZI4kCUYV2QaCyuzf5bbsCb4zGoHghAWPFVGwg8yvHKtSKTsTZnlhFECn5maLoti8c+doOEyN7XSwrKrYqFiNQ6pMFghVFNZwxuIDBgQGRkbf+Ntjk1OGUJfOOkb6NOriOjZMECFEMtxA7QAajnuMr/EVhzyqKg28mKl21RQE8KtktjgcCYarokrZIJLTGjaIiCax0YctZsY1aolIa1m1um07DrTa8HPG8xpkbtaYEtDavjSsQTXGBRBXfuwMGwUXBScBIIgPARApMdS0oE1tvlDHVI7e31xfEagCoGHjRep/bXrI9Sg4VsWqqBJVdQHASYgokNhmW3ttkdEXq3xpjg9ERMGoi1unrYABFEQz5p4PjAzlYGJm/YmfP3LgD/5kfen65bH09tvgSc8E5B8/fPfC8sKGSahkqFnuhqOJ9tzn71qeS5I37b79vqObbt/nWllHVBUTY6B00qKW+mGeVZinB44sPu7KS3c/5QXHTrpk+XOE+o2RL6bTqYDq/AiddE0OhKMwQmTVYMwEZFwllCcHjxwYHP3W25ORp9t+OFkc9V2qwjIpsySAHGDkjAfqlkpSrKQoiSRffsU/jTqzr/7nl4IdVvPY5yN7nvOanyrl193+hMvP9SGMXGUSy4LeS9JqIyk5rkaSpnrjjQ9+7/M/OvnsasvMugcvujNf2pkWm3wCKAHQiuWN1/+v3gv2HPqV/33yF957cPOdbsMeBpsePHXuwSuDnyysuWnP4tbOsaeePMejkRgm9YjWsCCyiCBjBC+LiHgPECXUgK1VrIUqQ6MPofXzVAAwTSKF4upZ6aA/mJiYAADv/ZGFxagUE0Loj4rhcDgxO9VutR577LGyctu3bhsMhxOTk66q9h84sG7dur37Hp6b6ZZlefvttxMbZmssrUz3wilSpRWkQELdfnvF9mQqcM8WlxfT3+T162YfeaTkrBoOwKZMzOIFsWZHUGqIjHhXR7gGBYNE3nsANMbgGp3huM8JpCgcpQaY2+32/PziM65+5sf/86OHDx8+7bTTXOVjfGIkm9joQDLek6sxnigSABBJEKLFWnRVE5E8z1W1qqpYfIuISWxwVcRnihMfREiAGA1jFNZQhSC+oQ8xczAEohHmEY9IUXXBj2nEdUxqcMyIAGNoVWgqbKyHkRDPnbHxi0YBDUQiSxxAUdSrSBCLFDtZY6NlaNjl8UAfH1vNaUWWwOuq9wOgIqIZ+5wzINXdPwaIHW9Y8xIExDqF59pWY9Vu6H84E8cvSMEgudEy5BPl0vEPfOnYxNwu9mUAShMWoMRDQoM3ZNd+YfS8o26LMvTcwLRBh/2OnWvPznZZqpVlKHJpq+yf/+3f2VIl7JYDJ6kuV5gjjJwHRaIkSZQwErAVKUKyg4hgJEnXVxvJNrH7IiLSOApAXUbHa/eiqqDeh4US//hXdz3/7/bg1GSy1PIpBT8IWa6jYZonxiRs0B7aXVz4AzB7507+aaoTAY4iVCi26OFLH//cwX1bRTOTJr4qQxBOkqpySZqBAipEFsT4CqGR448JnBJWwUf9lPHYE2JnJQqnrsEekjUYhcmah9KEkzokx1nvGGwSLTmtYefcWP6pTqpiWCLEZkMhxsAgwki1v+0qhDCgEpFTwbXi5yLia6HjVUhmAzzG1XUSNRphfKkRyRF/jIERIIqrYMMvWAU0MYEPGoKMjUwaKBZH5UkEEI0yPbHnFxl0Y1E5wQZYsQY/2GzV+iQxSNFyOKwBPdRmR402QIgenYgaBBqARfwsSkiEEXRW78dG1QeanJuaVhkiGkCvUoTKSOpIOgksH+/d84dvnthza5jelggXX/44X3SpPO0Zj/zkDoN9Ky1njR+Vu3acXm47c9nOPP3IxzfLjX99426TBW8KCRUQoiYd0sArxcheep7M5cENZs8+/TI31KSbbnz578PdP+PEmRN9MTTJ1iMtUNkmapXsgaxJfAKapu0WpWa48p3vt37h1AO2JkOa+ZEkoGqsFzKGRzTMJB/qyKC2PIXE3Hbxi++55KXP/fgbN7h9K4sLndPP2vDmf+6efc7R7/zoPf/6L4dPvOR5z3qWBOecq4IgIriI4QBOgoTeB9/7LyE1T37cup8ceu+JzbdNLF4MAiSVOkQwo2ox9MrNn3rHgTf9+oOvfGO6sD12Nvrr71w549sn3/QnZmmnePOt+47vnGudMWGXGTNfeeUUOGhkmqhgYGAisiZlZtUQQAVqXno8umOwpYYRKiIGAFJrUfTI0aOdTmdqdibzIU5c0jTtybKWwU5bYgQJO3fu6Ha788eOG2O2bt26sLi4tLiYb9nWL0abN61/ZO8+7/30zpO+/70f3/zT2yYmuv3hUrczU0wSKOWz7BUml6ao0FaaDCeqZIKLQdhyzpleQU1BhlO2EOUggFUxKvKwNUUxtFyvyHj2WWsB1Tln0cRoEcm4NSsretMaElUmGvVH6+fa11337Z/fecfs9EwIIVSOE8tIqbHqAzERN8eEXyX50RqzszHNI+4WanSAI1QNg0cCUInE5SjgkBqWWt2CUqqvM2iNlI5pLDsJoEroGVCAAC2gBvH0S/EJGicGrVuFzWETgcrQJA1Sh/Y4DTLE2FBaiQhEAjUiUxFlBrVuJdSDYwygVVXFlRRl32u1KyUAjoSm+qRDkohNaXK41VjbQFWlcc4YnzuxAaANFmb1YAUY690H0LGLakAVzLZ0ky/f8Ojty90tuzmUWBrMVZFQLL46/WwHhx8Y/HppIHPqAQMQmMlhUAjDURFM0hm2BsPF1hNOCS941mWHl0vKpBvM8mSfigBEjPGEQ4iWEhHhIiqqHKHdABH+E59CrP5V6nE7ISqhOvXkETE2ZpgBBMnwieXlxz3p4qd8av/18+Vst63loDA5lR7TxBkMEKwP5rEd4ZmHDetcZ97o0REOrViTpMtm/2R21m+98ZV2BgVEVVCJvVgyIfY3mUxiOdRqIeMsIfpsNFqn2Ox8iL1cAEABsqyqkRYWdVIJ6k7AOJyHEFZ1KgzyGvfPuOoUwTnnKm+tVZFgMAI9SVGixOsaphmMffeaKhlAUGs6gLHGe6+MRiOwPHaMIO7r8U4crzQcgzTrEw3H1zzu3GKjVqHUYBF0Nc+Lwj51j3dNrR+xXQqKjas3JhaUKSiKevGEjI3JoAIgRpXfRn8OARA1Nqwi0NLU23OcSdSLHOtkwnvvnCNrrLXU3OS4kbHBdgHhGOfMCmN5EMFf6iFBk8t68AzGOjtYl1fXfaPziztaUxPlaLnMiZYeks/+68GVhYP77m0lPNTMkyz35l//xj965NQXZEX1ivyrx+fTr9yTmSlwgZhaCFZg4JTKXnfXrqUXPT+/9+HB3I7Hn3LmydWgSBKTGDBz68KbX1d96vPJfT8flSvQXt/1kNAwtDBUiQZvEZL2umymAoBl6BDxqGU7YRDEh8yCI/bK1nmocm9CUnWBHHMPuT+x87oX/f25t3z60nu+2ls+Yc948snX/kvnzLMWji4+52lPmtq07Utf+PLCUv9Vr3iZomDKCkEDaAAvcPLG7vvf+/4HHrlr9onph5/zGdgtsm6lT/fc8ezfaN/+vO7dV6hqIJ8hg0XqzfgNe8vZfaCIilx0gXHvE95z+jf/vkQ8XMj19x7edvkpoL0ARKKFusSwMUajRL4EEFSNSXCINjyIqIiMyCZaODRG1HFcoqBV6QAgz3OyxrugzSQVAObm5vqDHhuqRqEsS9tqj0aF+JAae+LEiTzPd520uyzLqZnpsj/0Lpy0e6dhfeiBXzz84EOt9nSrlfsRzHElW2hkzULw87JMQwMdEVHXB0J79eOe7Z36yuZ5a1SMrFVGYjarGylOf9GM4aljHXyDodZyIirLMqoaFa4CABWlJMKLNU2SKFN1+umne+8jmM2qrUMp1uDnIB6agVAkYwCAMQZEIdTEgvi30jR1zhVFMb4SDR6VPKgK1CNSAIMUQJ04gChfCYQYwUcB6ulpgzglrGEUUfwGY8VSl79NgxcaRkQU5ozExoAgUBdf46cbGppTCCEC32MsFAQgtMSxoFeN1qjRXwXrUVecmTWN6OZcU4CwWv7G9D/UVW99eTU6/5fIEjForXqVhxoRujZgK6FpeFaIOO4rqmrwnHVxePjIX3/o5+tOevLA9zdMpIUrRECRJrH/2uTazxYvPCEbUg2qglpCPzekOSwT2iRNB1BBYLu4/51/8/gF5uDQoi5Zr4WgUzFkjCFALwKVGGZD7L3EI5uIBCHy/BjQGON8pdIkNLEHzzFJERRAQ6AI4AkxQDDExtsqbb3mOVu+9S/zg2nTCQpBLBhrwtAzAlmi9NCpABB2HgPpBDqCbtJK4vyAk90/euzHl+142dY8aeVsjbFokME7YQQ0WM8X/1+VicYCBIKAiYG2CV1cE501SC0Q0fR+IIioYhPLm94LNrkmxlURLSYRUFVRoagcSp1vqSqt/XWFMT8vXh8zo9QhCiAWjRhrhLg4EzYUGsdsa1jBl6WSalOLr6ky64/bLDMOa9YnjuWiQHVNFhtAaQz2ri+KEAXWNHsQEUUDCmtd5wpoTAFIQcms1soiSggQECn2F8drfvWJhFo5BBselIAAoCESkSr4SPYd79xauiFOvhs3p/HRV0fl1X4YNsq29YeKPSoiSg1bTyPr3Ylq4Utf2ZikBXlrq0Bg59aVN9103979i3lmyHoMadE/++JLvn3PQdyEL5t87NS9X37Pz6eyiU7PrRBWqm1QT2koqjQx1XOuhvmFwV23DC56+lVZN4X5wRJNHv/Gl2TzwoYrruie9bzet75RXvcZu+9nRgriNgOi7Qvb0pWpjCaSeQDIvFtKggfnRDOb+co7LSFtSXAhqCSZESOZKf1KntKXX/uB9nD+6i/8cX/5aOcZLz/jg/+sU+sWTowm0q7u4iuNmYHnffP73zlx+Ohbfuc3lbEYjXJKEAEMHl889onPfn7i4vUPv/XnmitkAQCCW0nM/MolH+eVsnvfRSbtAtLC6d8EsVS0Je9DZVFtaPUA2QEd2XZTZ9+TU/I37pfzNi1esSupMEUAqMoyjvCsoahVixTFDEpfxu9D9LdrhFPqGkZi6oakCiGE4MLk5GSapqOyEFBjkhCCaBAN09PTKysrzDwzM1NVFRKxsZ1OhxQymxTDEYgG59sTE2effbZhXVk+evToo+1OIiIr/eGGLcU7fvc8eAwW9ogMEDoiW6tgPfbRetqyf/NnP3TdrT+9e91cazRcIeMgiDjvnIs1BzMzYCfNx2CEuOxGo5Evq1jdE1FVVVFSYFgWcZmmrTwGMQJUHxKLg5Xe8vLy+vVzitBqtYDJq5RlWZRlc/LXOySu9ZoeKhLjcbTUjd+MyUGcC6pI8E6DgCiKBm2wiKpVVWkzUi29q6qqck4aBYyY/2JiyDAjsQCJQhAnoRD/P6Q5mkMHo9xVLE3qn2juyVgfg4hsI5gVT6oQpQZEQiMILlAXBwxIChAkhFCWJQQxSPGb8Yv488bEN6a1PYCxNMf4CyJiMlzbdZElTthE/DA2Y7Z4iEe3wbFhsDbKWV7Fa90VQEugdjYPn/nxgyf4ZCuA6ofDE1IBkUGmVyWfyLH4QPXahLAFCAxkWtTVZZYq5VLMcFS2Mjvai2998bZzLt69PN+zLZMkuaJPaMJAHu+Sl+Cci6x5aSTBAcBJUFVjTJ6k1lpoegnjFzbNUk4NWaOKzoXaxhGYkLMET5wYXXXVRU/Y3k+qiUHAtlnx6HQI5MSSQxTYfxII9dYfXOy3yIWWx0CesnxLe8IF3rd0b1IZViEFr+JUwDIatMRaeYie8GteUZcBACwxM6MKgaIKoI65OkQUaS/jcDV+OtoICERtFoNkkKLgqIiQNWQ4dk0YkAGzLMvzvB6VhlBbXcWG7SpyUCVyFRA44tvZxD8qjIjIComxsUjVqNtljQMpvKtbKa7WOojvWde1a9KOGnIFq43fuITGS84Fv2a7rBLeRCRA80WcOUTMlGgADTFPDRKcj2UordGfGfd+pVZjqBPH8ebFRrZwdZRLq78bSx1BoIRNmhgkFHUSoiMONGlofShJgCDqQ9z7XiXunTpyR62beHlMQMieK3STrdbw1p/lt91RuEFq0QOnPaOCfqZzaxoEocy0pN7Q8QuufmH38S/KRvNn7P0HC/4zd08PZCRqGBLGFoAMToBfOfDUp9Kje8OHP0yL5qxLz7rArZTDqYnillvo+o/DwqGV972HVlY2vvw1s+/7hPnTD4Snv2Zl6+n9tAulymCQyfyOrbeccc4jqnDWE/pPeGHvjB1D59wIjRC2jVg/MuAnMrBh2ejyaDQ/EdIbr/jTo1vPe95HXltye8Pfvv/Mz386THZDT9q5qaScySa2nnTqGZed/pIXvejEwvxfve0dS/PL3awLAMNRb2Y6/dp/X//YkYWllz46sTHJdiPOCQjwdFXNHEEOS1d8PZjSld4PytHsfeKdkoPKgnXJYB0oStIX0GLjIxmniDjA9PoHjs73gA16wAgKVlXxQXz0qfcxLTOJBUJVRAXGuLMizFQ0+PHSNaCaZZkxBhmDCCeWlKqqUvAKVA5HzrkxJLjVaSuTSRMEUtWjh49MTE5Za9GYyhXKsm793P33HOwt9Xbv2nVicdiZSt/0ZxP/9a937z/CyQcDHE5c7qmFHWOnzYydoZdXZ9LT3b/9x//+u7/7SJbnrjJZQv3BMDWJzWz0swMABFHU2F+NF7OystJKs263O9ao6vf7RVHYLK2PziCqmiYJAARXBZd1Op2TTjopbyfLyyOvAopZlpVlabMEG+6s85WBesNAozVDRNqEnHg93ntrbZ7nIjIqyySpAdUaVYSifAZR7RxMRApC6IOYBsHBEC3E0CgFUEVQRmATI5+KBGxaTFEHQyHOuqAxeR5fpBIC1coeQTVm3MwcyYLIBI2mTyzbEBFEjbXOqUogpBjsYlESvI8HZ/Ae1k7cE1MfcCAxUJFha9MgPiJD4rlWO340Ej+NukaDGiPE6PIrqGvq+3ikomFEJcCaAhwCqE6s53t//MCfXHv0lIsuHi6tMHLpuyZLzsA7n2RufEP20Vv9+SaAoFTG22FVGXZDl6RZKIRhmHZaK8dGV5y2+Ae/ec3+g24q7/rRQAx2bTbyg5CmqYKIAmCSpBV7HwIGHwvf8RmttZW6gionVmL1jRQgejkHAAjBMSWKikymwdRoAMHEYJFmU7/+/A2/++972lsmfH8yGKcgQEkQdeJQiY9uLTc+Gu4+l2Y7oSIIASVZCYOQDg4duqfcdrmfqpA4BhVXVYkhECAEH4IGqQ0cAQAgTZJ6DGwMEwE1a1jVSQjiasa4ABEZ4mhki42DHqwRRFsbqyhaDxHFDnRcLRwhfqLBe2PMWBIyZq6yBrc8xhOh18CAoBgkqAQSDYGDpClZIsWazBbUi2KaZlKW8UrGwD1Y01zBmgIXy0+IhBzgRoR3TeQjotpByNfBm2rFVqp9QTTEdlSEKUAIHlQYLbFVioOoqgGOjYMoAESFsSQqcQrAWKBDVSWgMVE4iZlrrVZVZaKmZUWGNM6aI7W3bmjVIDJoJtaWTYhYMKm3fOynxzx+Fa5lONqcU7A85X/4g1vw7z5wJiyfaOtgCCHosAudqngs6TyKrbQEW6VV0Jf8ym9OnfaE/tG5p+YHzv/Fp7/22MRx0x6FIh/ZlYpdOJFl2amnVi9+zjpIwwf+DdLJ7MVPeW5rdiNx6Q4+6D/69x1YQcTqjk+Vd98JV78yfeZL1j/vFXrNS0ZLK+HIQXzsIeofuSi7tg0LBucVCjuVY4tOfor3c63HbvJgYMlOkk9S8ivowXYRnUW5+5wrf3T17z31wa9ve/1vbbzyinWnbXWD5bxIRu0QQqCUB+UwZd65YduE6SLD17/6jf/7zve+5c2/vX33lmHZzxP44bduUFjqnTVPjGVSUR/tUuKz4CcqSJbJU3/2ft53TlASIWIURJAEApbdowBxuqcsmlSC1LYS7lwqbz3cu2bSOjGQAvrVfnKdfqkQGGVUXX181LiySo0QVFUCEIOIwBREimFhrTXWMNPKaNAy1gfNOp3RaJRnLRJkY+fn561Nut3usWMnnHPrNm6IJPjjJ45Mz8yYDhZO2lO7120+dTi4a8OGLc9/0bZvf+W+678s207dWr0tnHj13unz7WQr966VPDCX/Uv62dtPnHJKOhqd+JPf+933/fN/igk+UA1A9Q5BiBIxRqAiQa/eV7XczNzcnKpWwXsJvhIQqaoqTdNWllZVRQqi3rIdDEfGJIGkN5CLHnfpdd/8xrFjS2mSGWBAKoclmsg4AEF0qtYkMd7EswMNW0oGo36SJKBalmX8JrDxzrmqQgRrTdMGj8VdnLAqxOS3QRFHt20irKqKUpskiVZqyUqosDncMRJ8AUC1nkoSOgkGSUUZQEXUsMRaAqO/cBzQIgG6eHwYjkUyNFocoIqiARtBHxFPhN4RKDJjrDBUI+rNgUKEUNVADyAypBBBsxRUg5KCJaagqlUcxXmUtVM09QFC9AOp8bRa60kjIkljje69d8ElSeK9B66cmMQjGnYkOZgQTCundGn+975yaMPJZ/YXFnySIkgLlv88/4+Lk9vW03ELbo4W/mnizz8yfMn1oyc6SqAqTW6hX5kWQaUYRJeX3/XXFy2Ncktu5MpoBulGI0S2BF6CaWoUIrJp4spKVFgR13QUJap4goL3BpijFboKEQJYFC2qAmwwzACoCCoaG5gkXFg+sVw990mXvf/aLz6wPJUlntFqAPGaMDJam9rs0K5q6wNOnggaFCklG6xJR5Xi9CPFI4eHC/molbc9uMSYYIhFKRAgiVF2QaQBtQFAqQFMlGgQUeCAEjxwNNVQY2xogPpVVcVxb0wxoZ7IIBIxkfM+6j84EDBkfN2mBu99Vakx1lofQohwfWvq5E8iGQNrn9DGMkhKJ6XLWtaToKiCr7xDJkumkKqoSjAG44jUcBk8AxgE9F4NGTQxcIKCd84YY42RWipOIM5EARQJoB4AxRQBx4TaoEBNLhvzKkIEEFAKHmHVSsR7LyjMbJAQNHhRFkUCinMZolBzbuOEj5mZOGIbiSh2kta205yEOM5oGASKBKIB2DCwJQYBjKW/YRBlJBAFwGh/GUWkRaQCIibmRq48xFqfGClI5auKTV5JQF/lScpqPLo86Txyz88uOnpsKXVZmVSQ9MKKc8GE1t2aVOy5skQABn96112PnPr8uVy6N75v29alV99x2tH5IZN2p+FxW/3u06tt28uN61sTqf7HRxZHva1XPbV96uMvYLBF5fv/+eH84G2DDS2BIjcdOf4w/Ofbl677tLnoafnFT0zPvsCcfBZefPHW/R9oP2yzyiSjChXzjiriINl5xla7+Kp3Em/CBDKPpbGJcUbzRLjoJNfPbzupBU/51avWz5r1KQyXl0SpB46GEaiEqjpyDgAmJzpnnnQSPPNJ13//lnf847+84Q2/etm5Zy4dPnDnbXf8/t++4Z3Zu4ZuCAJhEWUhYBugK6rD1K7rbEg6/Q3WJvvmTx9uP4xlC9KBCmpSYDCmmAbFqSNnkcK8KSclWDE3Pnz4kvWduTlTDhNgw7YmSkSiTtQjZDRYx98IKDagAAqhcqrqUQEEghgicpULbc077VhclmXZyvJQlkmSOh+yJBXnh0XpXJXmmSFz/PjxIAIMIThEQ8CTk5MW26PFwlG5vDif2NbSQnji1d3F5dEtP6At2+bEDXvfkree+4R1U/6f/3LP/D1mc8l7jx7auuWkR/cdW5yfL93RH//0Jxdd+oSyWCHIRZxJVDy6omJGIJWgCORcMdZeNsYMh8Pl3nJirTZyP8tLK0TUbrWCd4NBwYkF0dj/7HY6Z511zuTk5GAwiLARRFUvEAICMjF6CbFB39jjkFK/7CMiMVdVBQ1HtqqqetaF9fCG1KiqxJynmcUaYyKGBQCMMeJ8nMhqiNJCgdmQYY0e9VoTBElXIyUomrGtAiAze13FodTtgbjbCceH8LiLDg0zYbXHOGYijiEbTa8sVhJrf3c1rYvszea3xo2B0JgIAcQ6chW2zRRbhaslFMYrEWVAVDBIQsTK4oWAsEpZybeBgFpl3nf9mbmkHZae91e3PDzYlk1DUJEQci7f3P3kRcntbeqvwxMO7CwunhD8zeyTB9ymO6tTrc2rwXKAdnsommV7Hjr+z2/ZuOv0U46fKIjAsF29dUFCKAGgcs6uwTkzM4Ra2RUbsA8AEGAAcD4IakKMzWAbUQQkM7YWK1GNbdVo+czQY2r7UrtbZl/87G1/9Zl+trML/UKyFFBdCIjsvNKBk/xTvxh8AogWjDK20UDGncr3y+Fg1B8Nk85EimqQok6ZKmjwqhDivR2vB2nMuIipVo6DGqI0jgprn924HYqISZJ4bapbZkQce2E50bpFDJCkKWIt1RmLwDi5DFwPXJgMJzZKeI7XDCKOBbeldrxADcJExpiiKIwxURJ19bOAmgaWEX+rzo9FHIiV+BFBDAZQCopBPDZ2KfXEBFBQQIE5UgkamPIY4Rj7z6vEBBGJGnI2SZgwhKBBIrFIINT65swEGqEVIoG0xpdRIyE+7ktz9Ns2sTZSVUVBpAiZrOFvukZmMs7NI/8DGnhHCIGtiU7kEITGFuAAqkCGM5NXZciyJAgE0ODd+un2TXc8dMsXvnrVROaOc1+kFxYZNanMfsu3t1oLx8kYtb7XyqrjxaPGzpnvvO/x7f+460SyxOEVT3XnXphMzIwOL7t9D7duuLE4dDCgw97iromNx84567kT9gxlGHzjevu9z3GqhTKpaJCEBTXPFx4pv3lP/7p/H2w4OWw/1Ww9/dxTr0M7b/Gwx7Swc6wulaU0LJfp5m1bVx7d8ezpZcUp1rIATVIllfCFh9Q7fd6+72zfdPLk3OZ+CDZPg189+Go38hAAYFiN5uZmzr/owpl1W79x3Xf+6R/fV73udZvWtak7/a2Pf7s4uaBZI+hxToFFOwoIsuxKO8p4x6gUj6ONv3janrN+aGBWlEM60GCUApUtO5gzBy4dcJU4LJU4tw+t9G453n/2VOotcMy2kcQ7IUySBFVQwVdRQAZixTjWQDPRFgUEAIjZqGqWpbWjy3DYarVSY6uqStIMAHq9XrfVTtO0EGl12kmenTh8bHFpaW5uDpgiCqndbhtOrQmD4YJTnJ2d7A+Pn1jS2dl19/9iZXKmOxwcf+SB4QteuW5u6/JfPuseNhPdrpGJ0RlnnG6trSq3dev5RVXe/rM7Lrj4cqJJxX6aTAx6vtUFo6EqhSjqCdRnBAN67wcrvVar1UMU57HxDBEEFllYXEyNEQATGMAjYlWUDz30kDjf7/eHw6E1tWqHiDjnADSUFWYA0XhOa1KT954TG4e+8fQJ3guiNabWPW/oXBHkEluX6r02r2gBNGaRRs0EbOyVNE7iEGt16GakRaIeFREj1SSeFJHGE3dsvVcb4Uls6uxxX25t8xDWNgDjDzRAKV0DPFn7ntCYJNaEB4RVd8U1VJBxo56invaaV5D62jix41hORKDBh2CTJIRgkKLbEyKGFieFZkQj1x+h3bGhM+wvPP/t379zZef0lBmhWEkcDiaS5ScnP05wtBUPA0AFpoN9y+5A2PDi1hfuHv0Jg0mER1hJku3ds/KWZ7Z/62WX73t0QCYlggAaQiNiZZCYXeXF+3r2CaoSDLGKkDWAFNVfxx8KVdFwZLCKiJeADTifTaqqIL42V4QosACGTQiDhJNDC/DsK3b9/ee+U5ZbWvaEF0aRuanpdivZv7CM8+u1u3zkTe9Z7B5rL01v3Hv6KXtOH7FPDZ/oLQ2r3qiYJmNESVQFESCAgCpEzrehVRT0uCHGEO3vKDo8xqevqorAY2XNNS80zMwgCMQIol41RGURRY3A6uaZMtcQoTUJijZdGVyzAiPIK3aDOPIGOc5YGuR2EIMExJTYuBmpEUQbh9u1KzmuPe89AXiGKAzIXi1AEHGoFKlNIuNm9erlxV9fA+FSVWhotYARaMZRgsN7D1i7sVHDabZE6kMkWdSNbkRAJQWRCLeuYRPAhNQAHSNVoXmJCDiVpCYXEZEX0THGmw1IzVdSQjbRFBgVBeOkADHi26ObGZGJQsQ2s148MXvnOu02E7z3/7z/7r33Hth+ysZ847Dcb8JyarqpjG6q7FGAZ567vG6HTk12ZmbhpzNv7RUPP2XiH562ceGG1sa//JM0nzAPHvYf+Vi4754uegmwOekUOQbOjm7esmNux3OUJ4d77ik//ffrbFGZTuacIjqG4GczxiIUytQBpUP3jg7dLQzpqwE7jJPBV8aP5oUonaiomIfjlf/h3y0+cm1vcgqEDBImTMPBTac85f4n/s7zPv57Z7zxGa0zto2KgTrVoiVZiCcY1C6otTeRsehcOTk5eeppneFw2Erhvz72mUsfd/4pF5zy1Y9/LL2h5V5S0QnrZx3MKQBQ37SqVPd6d3erNZUsl8N1/Y0bv/2Wo1f9KyqawQyQjOb2uNbSzm/+A/kEoMxCWlmyTpY0++6BxYvWtTZOp4UoqFhiLxgAYsUlIYQQYo0XhSKYa1RvYlNjEg0eQAjRqOpwOHSZI8NTM9OurIjIRxVGY9bPzhVFMRwM0jQ1iVHVvNUajkbHTxybmp3ZsmVLVbqq8gapCn5qYrIA2jiZn3n2dheKwUKy0isXl1ZW5t3jnzTznJfO/ft7B897/uuWV048eP+Dl132uKNHjs8vHBdvp2Y7Bw/3HnzwkZnp5NCR+cROjkajJFdXGWsya6sqFOMYVgyGEe/jfFWU2G63q1GR2sQHKVwVt1aWtUJVpHkWgkR3s6nJid7KYHKqG8NhjKZjaKKqRnRVDatRDc4BU1SzqlGjMQ2vC9+6RACIKgx1HDIxMINyc1hoo/JvCI01gFrvU61z2GiAG+mGNSgznnSEUW5TfZAQrLWCq6wk/eVxnTZiEeNQp01PVWNd3XTGxuXF+BdhzWtcDxGtMqGVUBE0wmitIV0N2DG5i9Xe+HrizJsagqMSxpM0/isDsjGhgcPkeS4CzjlWV3WMH5RpMr2zKzfedu9vfvSheTh//dSKlIvpADnNUJNL4AGFsAGPW/THwsxRWb+Bj3VxmGN5nnkMTLZUrCRZmbnJQ4cGLz3ZveutV+/ZWxWpz8HKKl1qdWxjrRVAY0wA1Uj/UFCiSKfRGuIEKLVoQxqbn813COK8AQbiUIEVLCESoSqBMKBzGVMpCD743SfteNyudd853m+1Ui/BtrLFwWjoBrxrsX/lNwDAzR4nKnuzC4N1Nw83HT75h09AAOHgoCpL7713TpgRgBE9EBpNaw3QNa9xYFOoHx+t+YH4w04CNhmSYM0ti2sVMbZ/ailFtnV7WkJDjmByror7KESJ0+bG1jG+QbDXyVmzF9gYCEHWBmyJKH0RH0ySjrPACGICQjKrVs0xTCZJgohVVQETi6IKAHhCIERCq4iGVRUjWMl7qn2IUBWgRkJgcycUoOY3ENfZCVLtTOqh7l0zEjAz1gQBGnekRDXO4QEAar6yqnoREYm+gdHHSUREJariEKAlBoIyaq+pRm9yAa0lwKxB5Ii1VAUQjXJhUb6DGrYCIKqKIjCx96FwhTUJoIYgeZ4n1rzqd//iZ9/7Nm7efJ2mr0wADXucSYJbcWCenLztdeasXXb/o8l//ueeE+ll/rlPmv/M/9oy3V+Y5j/6xrpBWSwcr1YW2omZbXec5cGw6lUDs/PsDMlcdO6vzp10Uu/owRPv/78bVu4dZnMEeUjmVYGBvXVi1BBxSeBkJe0CYqfsOcm03U7wBKbiQFOqAsBAuCejR8zG1saNev0XyZReqzTQwa2XfO9Vv37xbZ994d/+avfKS3uHhszWKnjTKCKsWfBxwyUmQ5Z40l58/lmEIUlat95x77atu1/5uj/6zH+9ny/1gmpWbMhEC+m2zY5tO16579eOzqQ/qsJ0sTKg1B45c+tn3j485Sflxn2EtnXwovkLP+2m9k0e3xqIvIogUiV51npoYfjzw8vXTG0MrkREX/MLMFQe2IzlAURC3DURFoOIHqSxtmNQMTFpS9PUBR/9RlxRxlN0WBYi0s5yie5aIILQarVGo9HKsBcrzqIoUpOy4YAUfFpqzyucfto5ZWmWjuvsOg+Ub9yaX/PSLfff1mINd9x7u6tk/ca5u+5+YDAoM2vOP+/8A4f2J3l73769H7v22je+4TX791fOlSZREej1KzaCYCIMIm6qqqpiKTwajWKiEEKoKscmKghKipjmWVGViU3JGmR2wbe7ncmJaW7c1GOiLU3zJwJMiLlyLlYDFMfpXmx0Z2uE2lU1SECtjWCrqpLGbzUGzgj3WK0+GzhVjEY++KgE1BxPHJtOSlTbhUKkBikRoqhvQhcgeAncmOWNz9O1rMG1MbXuNIICIBpeq9UgIqQQgtQFyhpeUC29u4ZWqIACGv8uIiITNpyW+JFVVRqBDlVVH7w2/owIY5h0nTTE6t+a4D0zj6pSFa21pThdSXZvTffsPfzGj+z50j2uPXneNCwMVUpqtdoyArBkiXyOo0nseaUVmEgYJc7VEEh9ZvsBCHyy5OGSucG//vVlh1cSZJeYyVD1sUGgrjY5FQAUDUcOTB2cnLPGRCWNsUl7TCmMMT5InNATYTLG7jW9C1VtQowCKiGhESuZ1wFIGtJw+J4fYPuZPDVtqwrBQGo8FCvXvF+4AgU/fUIUSCgbdI9ufax98rodD52WJalAnOjHs76ZcQKBqmlEqdY+XAAABAYkIh/8OLCtjR/jmxCfII9J9qpVVUVlt8beJ6iqSWwE37I2gLsmr8I1vrbjP2SJwy8BoSEG75j3xS5LrIydD6paFAUzxxHPeO1FZ6HxpXoVpgijpvEHxma4QYikNQcdGMYXCTFTHN8fXEVRwZh3BKRNGxhr2L9RwqDinVdVYoMUex8aLTlFxKuoj9BnoDX5a7wtIQQvwdTnCo87RAEARU1qSEFDDbZiQmBjkCJ7OiYBMedQDaqrN3a8j5CQictilLBJktz5ShGyNMeAf/Snf3r9Z745t2Gm6LuvwOGTJmYuzPP+sC99pDPy5/7ZdpWj1YjILOzexuaCP5ivHrpk9oOvOXPpi3s7iS89wKZN7alJv3LiqIOJ0cB7j5dfTts2nlg4cNnllz7ZLZbDz3xm4hf/vTI52R0lRTWELBBgCC7xOvRICQcfNOHc99kzn335jdQ5s/+9PvvNXZhIC1UoBI8OoBfsj7OLLnjsoZxNklvQrDStL//Gv8+Mjv3R711itu6SR3utTNAnRRVMO6Cv4aLNzdGgEoKEChsbGDWWL7jgvLTdOvPcs278wa2mm736xX/wmTd/1L36iH+m8iRv2tTe8ej6yQ9fuOuSJ1x8SfvwPfMPyhZEIRmmxaS555n4oGFmQHAz+x578rsn9nxasBVU0LsyTTQUonDTwf6FO/1sYgrv4tFiFUFBRNGaFtTw1iRJIl2+LEutkdCCjQU1MREz9fv9yCYqy7IoirijjDHdbrdwVcwbR8MibvWZdXObN21p5W0VyJM0Epu8eodVYtgF6K8ENwgnjodt26bPPOPs9dvnOV367rcf2vvoPYNeNSqWdp1y2ote/PI/euv/2rBx3d4DD4QQHnrooW075j7zyS+87W8+kKdJO8/ciFTZ2lgm1hQCYwxZExV5JGiaZKNhIUGd89r0qSxxv9/3Eqy1sQMctauWl5d7g/5wMKpKF3dIXcABgkKEAsUUJMuyen6Dq3o0cT+UZemCRyZijOBGbOZGY1dIRAyg8d1UlQHZkAYJXkCRyVjimPZ6CXEmCmNFm3pwxRGxiY3JaFxqUdrbYPMzDVtmXJKOj5XxF/Wx21Qn8f3rYZ4qKcT/tPEnrtvLTLUujyoEQS8QahdM733kGkpTxY6vk5kZMIQQbzis4SXX53XlSu+8SlVVMRMiorydDkb92aS9fV3xT/95x9P+9u4vP9bdsmFbV466QOCZwfvAVJTWuaHv7OIDBaQV2I14bCMdmrN9QR5ptld32UGeISc8N63HP/iOxy2m67wss0UeBiIgrmdvIcRaguONGV9JvDOxQ9JwdlZfMd6M0Qa1AnMTTlLkDDmJLE3QQOABKgTvSx8qY8l4LCX933/+TFk+PvSSKFFAS6zb98i6A5APQBEUTJko6qjTCyiHTn1QDKWUgGC7nTHHeC/eVzVmTn8p9K4NqLH2jQqO4yVBY6OLX/YFWrv2xrOMqqqGxajyDgBFNISQJEkkvscNGLXix4+4LltF486K7zZehETY8G5XCe4xzhljbJpgs97ijqiDbkQMNOmviFRVFQHzJCoI3qAassQpMsYWaJOP1lNSRS9aunrljnU5xtE3foTxMh5fcPw6ngAQQRuyKniCv/wCgLIs47JXXZ06oYKTUPeKoXYFjpmE+hCtz+rOc4PxHl/n2meKtXQNAVNt/RuLIh/aWQ5QO1UYY5jt//7Lt3/hC19Zv3WzgYJVSkf3hqLlEwk4ksGjp6Q337eyMMTDJya+dgPslyccSJ62/rF///UL8wT0P+5bVxhTiOnk8MTH8cte2H39q9wFp49e/PzsnItX7rm7fclVr5o8fWbh6L2Dr3+euUx8x1SBuKeKipBLcKYkcTqsck7JkU7tPHbeUz81teG3/vuuuw6UheR92ODDhPMd76bXmbS37+Qn3PLd7JZvorGi+QAPfevZb13aeMafvmB7tWlTubI4tBhKiwh5gqlfgzaMT4Hrk9CmbWMMUc3dZ7aXXnKer5auef5VNpsQV73xZb+z5XMXd19h37P/kvP+ZMudl4/u++x9o8XD/+fv/u2eT32uKnpCy62gAUkJOahxwiLbrv9Dny8euOKDKEqJRVTvvfXDnO19C8OHjg/SLLM2JSLxQUSIbQjBEFfBj9fD+HiMvaCYnIUQgoIRUOdXDfuWlpZarZaIlGVpszSoIFOe5eVwlCSJZVN6Z0CnpqaSJCmKIuoXumqEJhMj5HnQDy54k1RBZP/e4rRzR+tmn5Nk989u6jGdQpk7cvTEj2++cTAYodwxOzubtumKJ1y9Y8eODRsmTxzuf/u73/jWN799xZOunJ2Z6vX7YELwwDDhXF1zR+VnYCrL0lqbZVkcFCVpWrjKe9/JW1lqi6rIsixS45MkIeTLL7/8xLHjibX12hWJFCbTWBE0PWExzJlNRESDKNXT/viKB3RUFoqbhBteLDQ4kbjHoqINMQIoA3kdM20wYihK70TErTH9DkAAQlA7eoICMker4AbvUy8/BpRGjm58pP6/cXesQjVGXa3+wJqDWFXHKJ61Z4rWVsy1plJML5qWIOmaLveYf0lEaZpCU4fF/rML3hBHfmr9HefikWqt7S33Nq6fOPLYw3/8wWPfnc93b9290fnDxXJu046jAZaCkjmFTDvUe8fUu/qaHwybJmgwjUtWiiGkx3RWAb7tn15kTjg7+uADX377ZeumNh9d7neCd0nquTTIEMWeWFGilEH8bCoiDNHnJgJgMISAxKoSDz6IY0UNcaURkwT1jTGzIKghdk7ju1L0fq/VEoxNi2qJ3TTy8sr89HkXnXbStmNHqwzafbSkLNXGfYKg2QhAqUrTQVYJubz0xg0mVwIIgm2lnW7HivchMJKoAoJhNki+dP5/PPRGfAdiZ2IstzROI2g8RqUoC1pzxyvvROJ0hCxQWTnvfQwbqOBDxC3Ws//o6gXUjH5VIdT3COrZtECEjqsiAgKKSJxlaCMMDgBRKROJrULs0EaI4uqFKSCgh6ZubgBWnpFF2QkxBguegAKwk7H0F652aBqtx//Be44+EYioIIiNsLQoxRKZowm5MQZQnHPBOaBa+FYbXFiTg9Z8UO99lCVhZkNkskw01E4VPhBR7EWR4br2jUy2sXmGD8wWpZEbo1rKZLzTMeK3iUE1HltOPWIcJvqJydbHrv3KZz//pY0bNvuqX1YwMKO24I8KuYKz06l9rO2XJiY2TMndd9Kx+fKxo3z0lDfywqPf/vg3/88rTnz1WPd7D07aNNEqHH5ssP+h7PyLkksuGWw6WQ4dctf/yF7z7Ddf/JSrh8eOV1+5sd17RNLJ4XDBYZpJIGsBKmcwLywBCMqoWsyzierUi3+Ute96+E7C6s9uX/euiV2XmH1YVYp6dNTuL185/N5N5ehI1trIAfr9gwee+Tt3X/mm33gcbJkYmhUdGNtSFgyVqJjEOBRuskzRtQ/UqyejrORETNrq9QbTU/a97377RY+/5tdf/7pvff1bj9x/yxWXn375U7Z8+UO33vDNYve53aUT1dve9e5LLp24+innlTPlDYuzxwb9YIWUbFBEXzHTyqaNP/6NQ0/84Pr7r84XzwmQSSgtUqlU+nD7Y4sXbO3mNgEXPKEwBgQybIKGOl0GLyG6ZeBqltWM5IANAhJSZpMQgjgfTX9d8IaIDBeuct7pSGPcir8mIt6LSCUCBEDRFoCGVWiRx7RDLlS7dm2QxP3khuLkU2DrDv+fH5x/2rPO+f43lw4fnb/owvMeO3i0qEb9paOsrTf/3u+eeeYFS0vL6qTTbj/zadfcft/HP/GFr1zzzLds33760mKPKAFajvjV0bC01nrvl5aW4qmR53mccsctYYxxzoEx1toYfgREnMdWe+8je9bPzYUQtXEEAMbJJo6xiyLB+0jyidFJLQFEa2FOQs1t9TEdJoPRSFV1HLyxSa7x/8mUI3LHOZewGeta171qjjk4qkJAQEVxPia86uuBGTNb4tUxMK2WNbBm6gZrKgxFEBWNHqa46q2kqpZ4HLZjmV7/gw9BtSbyImKtt4UBNAETBeXX/qHQqPbX/7uKfKnzAxkfnaiiATwyUezhAtNwNNi6dfo73/npb3+ssjOTp29Jy8FSD23L8NAHAXGQ5OAG4rql/PvG3+tS79cWPvDG7se2wf4edhFUAhLQDeFpPxldNjNB9x8YvurJsxecs/vQ/MoETaAxoIaSuG5VVAySoobgicgYDqDiPBoDTRoS8epRuVcVEKO1FGC0wgNNIq9bKapNBRUEUkNB6v6SEWQlCIJBBClJ8lA6k3aMwokh+VKSlg9SsOTkPZcZqIJQQ2JVMR4AUBADQSAAnGpPtNtZCCvikNM6DVJVDQVQNKRYG3/rGqt+LgooGhu5IqIhAEPkz6iqkxAtQOKCsdaKUBzx5HmuQaKsjyUSy6v2lADGGCaK3Q5rLa9y04OqxjpVAQLUnV1BUARgShvN+gjpF+fYxo5uzHJXBSBDCOIkTVNjTPAOmsI6rrQErSdxoCRqAlilACoMFEXffF0HE5noV2obQ8MxWCHW2XWSimPhmvh/JE5iXWoQogxNKb70DhkAwNS1bJ2VMrGTWis79kiijAkzq0EkMEIBQggBRbwhAEiMjcrSIQTEMXYalEibvgVRfayxRufIeGsIsUZ/RgScSWxRFK1W69ChE//ygX+dnJpjm45ELJp2ZQqW5eHwq+i2w4Tv9Nefab5zfW+0aFw+f8+D5297ztUrX/3zN5x24uTJ8q3f2zGdIVIRUhCwi2H03VuKnz+YDEZTuKQTGzetm86q5RU5tOJ/8rmc+obyrrWhHPXDZJCBqqIYZCgTyUaYX/qs+cnNs48ef0Gy+OTTdh7wp67j6VsW3B9e/9icyEpZXnrhM54pMj0aYZ5oeWwpIL7pL79zydsuWgdPnOpp6UvTTchUNCDvFVoGyJs+YVov8jp/BgBABQeBpKoqxISLyqVpXhbDl73gmk9/+Rvqqte+6bdS27r3B59dfuDxR/Yc3bj18MIR1xsNfvv3HveMZ7z6p7esfPrf/91e/KSJU58+Or4oRJ6CEggJA66/7SULZ377kWe+85yPfkTMpLZ0saxYMGG67djCsxc7J63fRF7SxI5Yi+DaNsHCG2vixrRsx2cm1oezRkQTMhkFQMbFleU8SYnIIHnnEyDKs1FVMHMnb4EoBECTIpoUHRFJZLcQktqyKDAAkTHkR8VwXTp79llnfOGz1//+W19/773v+uZXq2TiUDZ9+IZvZttP8ffc7fY8tOfQ0SOzG8+8cPdm5IPziz/58W33n3TSFTfd+LNbbvrh5MyObquc23b9V760/5UvvbY916180JAgMAIaY7z3VTFqd1pZmjvnvHPQTC5rmyBriLkGCobAoABQlOXM3JzGLh6zNpqxUcNWQV1VaVrXuKoaTaOUUIMQgUEKlaOmewCKiiQI1tYxKb4nmzjFUY3sQwSUKCMiNaw6hFqXUUKstOoel2iUZScBIgoSoh2KF98IM0bTq1pzQ0SwgXFFzzhUGNP841gMABiJiREgAUqQFWKsABGp1MOakro+rBFjL0RFvQYA0MhLBiWiClTFZ0pE7AlUgUIjpTsmLFEM5yAoDNHlSWPPIHhRQDRGHWiKLNwTf+quiY9++bY//tTSrpNOdb4cFqWxFryIFy6CU/RGEFBSec/U28+zD75s5QN3+VP+YOUvr05/fGnyswnsHdHZb+lVe6qzUlsuFLB1qfeHr7rsmFNDbcvDnrRacf4KgZtZZR1pEIKqQcIsFREIABLbFBIDbS1a0ohsI2JiEiTyzqOuqlWnyOBjEYSIaJAYKSZ2RBTKfkqpQalCdbxy559y8hPOvu1jv1jauL4LQhWW8PDFFD4GZUsQJCkHMxUAsEvYm5lDm07o0R3ZyZsn14F1oTQ2YwFSDKjivVdlG+fNaxqWnBkBtTE5cJ6IAypEez6E2pNHQBMmNPFcj8NwChp8GRA0KDFpQ9cOISCRATA2EQQBBaiNHBAxt4kCqKEovoHGRA6UUCOa7QMokEJKJt7GAOpRgwaimvSPPhauBCAxGRWRaNDrIUAtfKnENNZAVlIURQGF4L2AMYZiPobApMTqAwQhDIYggJIgEeu4HxBCUEWFClREEjbGGEKMxulo0BgSEQUNEZZBEE08Y4gVRGstEMZkQQDIsASpEXyN9kjpHQgaYmY2MUE3NUlpOByqD3kUQTLsJagoG4bGllgQEInZaOUJyVqLErz33rsksSEEAWRmCCFImdiQZ/Yv3v2hw/uPb948U476xGnm7AjFSF8Nfb9iI4PffPLOLz0ox4+Pzjpv5lMfn9nwkje3R/s/sONfT53sjzz+zoXH0nv0W/smxLskSaxhIip6Xdt2WWeZtfff3/oCQOuKxz9++rd+v/rI+8uD9+ackrbRFsfIADgAKbAroyW98KqvTu646eGbN3GSl8nz5y6+6Me3HT575rq77nT94khh120/8xk7z5j8wj9msDKQudnnvfCgnfr5U/7CjOB1uwZCoGJJSpASQVSNYhUUkRL0jfpQ7FcgAJCIAAKpVSNKxBVa1oWlxXY+e+XF53/4I+/cvrU4Z/1lxk3ddPOdT3jK2Xd/Z3jH0i/e9/6nHtiXvfwF/2f/Yz+e7K6bXVrcMbO9mtg0GK0gtrOQZaNKrS2pvf3bf/zgK3/r4OO+sO72V0mFQCn4kUc4UsK37u+/YSZQJmUoTZl3QUeqllUAEIC8IBlBCir1AcuWgJmivXMwwXtUEO9K0G63W5ZlUbmJiQlflSmztex9RQiFq46eOL775FMp5KOqNEBIFEKoqspVktlEA5auJDKLS4Orn/WMr379kw/u/+JzX7r5p9cf+P5X0mc9/8X3330UZGHb9uMH9vbbreySC48D7N37yIjuOHjGmVsefaj97nf91Vm7T/3Rj3/YSvxv/G6yMpg/cOzHF2x59mC0YLQdxDGzOG+ZMU0BQH1gQLS2KArvfRzcjiu8yLhy3o+bRQcOHIhqgmOhuDHyqD5kIaoGsapGcZLYLvNNxykmtjF5T9iEBupWl5t1OSgBNE4mEGHsUlKUJVHswxpVjbJ1ibHRHAnWdALHEylBwNpCqeYBjdu84wJ0dRAriqLA0bZXJZqVjnWIasm9miRCRLBGv7Cu1DV2cgQinTGK/QKAKgBUvgqoBmtRsNAknrUIkeh4/o2IAsAUc7zVi2zulSMTWO2oKk9Z1/nEF3/+1s8WJ+08xQ8LgYCqXtR770WDAUTMArGXP5369xfm179l/h13VmdYwArSL7grrisuZgKmaUv90rpQ5Yfuu/+Tf3Xp+rlN+4/00la3DIZYAyRMNXd8DC4bmy85DfX0d43Tqo5r/FVStcb63jQthzHwOzRBfbyi1qY1Jp9YcfNTdh2SN7aQyldukXE9UggVChKPWtltTx496esAwJVFBPbWjnIMtPnu00dDv3XLtoluplphQyEjQKxbDqi6ilEav9Z2KZys8tPWLhgbCEhoDSAAsR5IMAEhiWitmacqXN+CsZsCNK20sqpMmmRpHsdg4/lLU8MRWQMAlYQo0h6N+bAWBQfLRlXVh3pZriLVa9ByaFyYnHNZmo+b5+PnEqchIYQo+oWAEkRAmZgQVAQCIIBQPcVp+O91k8qw8d4HUHVOG+cV72vBn2g9GXPcOMbzVS3PFyM4ENZbtD4lkBobkuhRVtcMVRVb4tQcFAkbAQTR4B2rWuboblHpKuhS6s8FiBjEx/sfpf+ROKgAqAQMUMxNzXzt6zd++tOf3rRtQ+kKUUjAr3DlvBhiA65CvaVFOKjm98vjn3DSV687nq67oHXmM1984A9mbTGZhBOF2dSq/uLigxOsX3p4AhVIkBGET0jlJNX5xeMrC4vr5rpheiZ54vNx+xOXP/1O/ck38v6KWGvbGWIhqITz3anN9510ytd/8BVaWjgUls94/HPgwJHebHVdOXj0yF2daRw4+6zLnz5z961+ZaFk3HLNq2/a+fjFif+frPcOs+SozofPOVXV3TdNntmctYqrHFAiS4DIYJID4IATtvlhjG2cA8bGBmNjDCYnYzIGiZyRUE4raZU2553dyXNjd1fVOd8f1d0z8jePHh7topm5HerEN0zsXUz+9OrByLBPvOoD04qPckkbc6FWXPH5KKbyRJ68zYWMFvKCKKSjpP7gnoce2/eoBty75ztDz/xhvX7OS4bX/+zBw2s3bfjwu1/2xa9/4pufPhaNDW1Zu9H5eH5mv/32f697zq+PrtnayWeJeCAGVabR+9OXjTz8qulrPzx68AbVXiOFhAPmg/7hxW7X+iYREgpb6yyRssyZcERKIYp4YQZhKDnoiEFwXXsQ0lqjwPjoWLPeyLIsrtfqzcZSr5P3rfJqYWaRLdhcrPXr1m3o9XoHDh+w1iKqQS9lJwDUbDYZwZBKTKRVkvYxiVr/8YF/f3j3nt337T171/YrnjWWibr0mfXGSON5Lz/7Jb+47gWvqvc7SzOn06ffcO5V126fOXN71pk9b8slg+7Cxbt2XHftruX5yCTt4fGk1wWxyuXivbd5KsGOFCg4AAbF2lqzEdUSJxzVEm2UdTmLt84FewZmbjabeZ6PjI5u3LiRFJLCKDaBzxDyLlQGAiHLVpjeEhlbLHW0juO4VqsF1wcFiFxgRqovRRQprZDC+C6Ettw7LBXUw2/BYPDAHEJ/Jd1s2Vv2RbIsNXsr+K5atamFFc0pDlM7YV75Y0kyoVXJIMQXJ/yUT7LKoKMK3EEYKHx7+CeAv4pQvlqmo/x0Ug72GcQL57nL87wAvwi7IuwjZWQpGrTd2EjzR/c89LYvnlmzfaztOrlii+IRnWMRRC+aQeWO2b+i8c23Dn/qncu/+9X82XkA8VtPHjesX8MwlNrUKpUt4LH9Rz77Zxc/88rNR063h+uR2J43pNBZIufToKO7GjhGRKQLdcDVKTNQjyp74+Jbys0CM0OVnQlRkTI6hGlcJY8AlWWht41a09qOuA55FKu3b5sE2yXSwA6FNHHrJ6/QJ7eBQNSuR8sN00+ifrLz1utbnTGwcNbkeUoXHrKhww4iz1prHZkAYqJVX+HRh6k5lbCyoOdcklsklF+VsWZARQSdSwBAKuyhwtsQXv7Ayguva1CBhmB7ZXTIRsCikTQUmIOgSIyIFQ3JGBMOTlHreA7cNlUF05K/G7IXltAwa61WxujIeufYQ6m9Gn5mgCg654KdObGQgGIAz+wEhFYeIq4yhyCUwv+pAMgUwEOtiAgB0jTNsqyYJGOVDwswPP3/WHxVPgifOTygAM9USoVBOlcgMkSJNcZGjGJNFiVDzsXnUvq6Iwa9GhQGYEEGQgZx7EERA6JWwWgWEU2kpk8tvO+9HxyfmkRka73WUS5WNNUxVk5hnsdiu5A8sJ8Pziaf+d/eqVNTF7/4NSP21AvTT24aygGgrt2WVqZJfuui06MJESIQegSF2vh6loK15td+/68uuvLZg9nF2Ki152+afPu75Y8/0r30hZlTsrQowiZqKmv8xOajIm6+PdYYGxpef9P2XTuXT8xu3H7Pnj0KbHvBbR0euX5u1j1wlx2f0q986xd1dPs3/vO+tU9/8TlwdpO1Vy6UIJWSGqIClBX4PVQvrXOOrWObU2aVF+WQ8jxBydq9LVPjp48d23///Ru2n7v/sWafG1/cd7TdW7hm/Zb+fOcHX512h3dtXL8loqgjkEVe1WoT9nB87Cf1dA4kUR5rYh3YPFYaZPyO31Rp6+gz/5G9N0qD+DzLIqAjS+mjR09rQecFJSej0WZ52fCEYBLaG3ZWhL2z3hey8957zcxepNPtxVEwA+43h1oqs9ooINUYGrZZ3mq14qjeSwdREicN7SGL4ka/m6OCJDL9rGOtpdYQEbKyzDzTbU9N7rjxuX/+7//64QdufzRKfD+f3bR5+JpnbJkcXT80NtOeacbDR9e1Zl265+iBVtS6evis6d9/54Y1zQtPzXRPHn90fOiaxXQ/iyYFue23GpCmkGZZIwDE8jxOEgdex5Fn1x/0tNaIIMCpzYOyoNHGex9WI+GlX79+fTi0RWQE9q7IkZWIRDjJK4cfUOsV7aSivRPPEmSOVpZnClAUERFbJ2WDRQLB6FRrbahYfYUjXfw653O2SKS1RlTVrwgONrBKrwoKhSxZ6YOlmDaH7woLuUBqLJTci3S4EnGQSzKVAK9uNcr0HL4CrIxL8Y3wtVrpVwhD0CwuP2guBhcXRAD27BWZQhgXsHJqAiAdQeagNdWYOXr4dz5yKt66Ebup1gqtR2aRkIDFOQ9KBOX6ePd7xv/pc92XfrzzK9r5sGonBOXskWMddKwSWl70dvHEF//yqqsunDp8Urim2/1BvV7PnHMeuYYKCFwxFQzBHfzK3poAK0dFKoFpAKxQVdwkRARVONhgAMKUi9fwWEO2w0o7ujRbVGg1EUexqFaNl10MaZp4JSAGIENEANSi1dw6NTu16f6zskiGF4fGD2+xxqcqbZrh7RNnCWUcxJicBLkW5CKXMAIUYjIrmSBE51BUFjtOgYBs0EoDISjizDJzWaQV1kaenQtnBEBQJGBOibzCcI0aKWTJ0I2hVklkCrQnYGhaAQEUKgyOjuK8FxAuESRh1VKBsJxzBSmo3KDDU2c8PnfOOdPUWus8zwEk6HhIyYXD0kJRgHPngEUpTUpxyVBG0AENhivLmfJeCaIAEGGYzIsgC2OQkTcsAJ4HbhBuby2qRVHkgMMlOOEwUKoODrIERyZYRZLRpS2VlCU1ADj26AvUgUL0LOysAkSi4D+4epSCWKyfREStmtMUPGzkofrwJz/2icOHj42vG86yPpDJnXWCCWpNOqU8Z2LlY9XI2jhQ9QbL2PqzZje95Jen/3hd3M8ZDYlCROLRxDuhqzZ3vn9sBBlJgJ2DuJkO7Bvf+NvPfuEr/+7v//jUEw9vu/CiZ1xz00XXXDl+w/MGFz7DPfGwves/QX03HcwzACwu7CA1smPb8UMPTzRHlqdPP7J543enu+hPXDeybdvIuvO3X9jcurn5zmePXnjtfl/b+1vPa/7Fj7aP0Yt3DAgVuBiwLRIhCq+SAKrOo4iE2OK9L6IiAClt2YIXJOo7Z2pqemb29Pzs2ZdfrRHaC743PxKf/cRff+voe/79H3/jFS/70N+9Y8PGdS+4+lnfu+Mn/T5Hw4NB1tqx6dKsczzdffP5z3n9A51+5NyQDPeXvdG92qCx7idvO/ayP1s69yejB5/n+plY10pqyyLHFgMKUEgHLLP2joIHZXB2L7KJUsVmEJDLqlSHXi9IWDQajW632+8OmvV6lnYWOu1Wq6ViGqTdTqfT7XZHxsfWr9sszHnfNurNPMtAY0RxY6TlvQdvyAsB1JqNxdn56665UL/9dd/5xi379x159Yvf6Fh968tfeeaz4uGRxqZtyZIdytp9axpRbWLd1MjpU3uOnzz241tOP7Fnfv2myT988y+v23h9bNbnjuuNcWbUsdNxFNSv4jjupYMkScLqN0BqmRkEQVApLSzhzFTDK0Scnp7euHFjEYHDSVArFTqW/D9EJAACDBBgVUoBhGLWujyEVypDrVbFblEBcpnCQVGBTBERwkipSi4/5GARUUXUKPGZsILkqiL46uK6GF+sKGkUO0gqUc2AGFgKUHAQCyb46tYWqMiXxSinhHSFX6EAgx1M+KPHlbo+jLhFxAXfhorf6TlIiOjAXBRhRlV6UK8uIMJ/3hfLrjmBs6/76D7d2gqQsm2KzQWy0G5BEMX3XhOdXzv+yfE/uyO97B2zf8jEwh6RQCtCiFKVK891vdAWtXjs5r+48qILNu2fdXGEID2Kh9NUEJ0XUJ7AqYo4iogBGy/V1YXPGNYNoREMu2EsjIaqG4UsQligqQnDNBjCgKwM66vLMgDwaMCToGcEGVCuYXp6WkU7nHOBs+qcJ/Ru3dHo9NTYk2tzqTc8CYto7vV75w5fPlobcdTXFBERc1HV+EAXRwgd9+pCKhzsQtqlhAcWdkYivhRQDGVWALPAKoBmUZ5iUJvBkB0LMThSRCs8nKBoUW27ARAkCMkVAx4CEAGSYmwOnnPvTeX4SSrcXieiS2smpf7vm8PeE2Lg+wVnrcxZBKy23tU7TERaGeesZwmDAUYVdkZQ0Yoq5+ynvpjVGQnaZxWLn4goTMJzm6b9dNBLmo2VOXxYQwRyoFJAKz9NlZMVTSrPc8u+4g2KCMkK5woAQm5WgIrIWgcAgRxRjApCpcheESptwgY6PClA0gnOzizccvP3ksR4myJqBEJKyYFFLwBooohbNu9jpDCuKcRMloee/uohN/Pc5U9hA5YydaZvtrQyQqh7j2CmYkdCjtiL1JPYUR5rthnNHJvf89Ctvdk93YXHebn/8GOPP+uG55y1aTtfe8XCVX+kHjgAv/jK9q0/HTt+cNe+R3/xvIvvaq3r2c5gZMv8+Ze9enJi56Z36ziJfaontnDiZw7NfuCW/z1/sLTuNe+caU6+5bJBK217oy2L1lEoH6sVXngg1eo33C5SYa4RVkIaI8fMBFoAai3z+J77c8u1JF880NMTEz/99qmX/vLa3Xfcf2ThC9nB5i9deeldg2TBD2565Wvv/MnNe5dgrbbf+N5XNqzf9rznbN2g2v1k5HhnMOh3CGuZMSg0tO9ZQweeOf3Mf42fvBhtPUItHljL8bbPnEUEmxsCn5NNIBJAX8i4FqKKNs+9cJ5lvpRT9c5pREjTfm281u320zRPksjnWS9LKSHvfbvdJlKjzaHxsQlEHB0anjlzBgBGWkPO5c1mM8uyTrc3ZEY0kohLIm9zn3Z8q1aPE/Wyl7/wphueMTO7mNtk/8F99Ubr4OHHgXozj0yjg+mZcanV8t7y9z7/pRe84IY/eOtXL9l28Me33Twxuuaqa57V7aVjI5sy13fOJzXFDgjRJHHuXZIkGmAwGChNyNJIah4kS3Nmr7UOSy+DhRKTLVc769atm5gcL4cYwcirwPpCNfYhqpx0EYBWgZzD0CPY4ChF3pXAtpLJ44SFRVdBxBUgFoUU5LFglVZUuUUFIgqOh1J6A1OpELI6mgOAEIBfmRszCJXjPoACYhquRKpPXwKFVoJauGwBKKxMV0JG1QWiBKBN+ZMJAYRKJT9WyMzKC4TPIEW1zuWGA8KeD0QAKwW+8ldInvF5a+Ft79/90PL6bZtzt1wbmEXKjWhkx+x9sE0klimZ+8zo207bqd858TcgxMqBAuccASuBnHxXRbMH2hc3uh/7m+u3X7T1+HRnpAmUG5RGJs4TJ0oDgR1kkTKZK4XAsHAeLsoQH9wDMKzPAxlFKmpNYZCBiIIsgmDZA1eFGgKRRiJSYSVcZQ4q9S4UxiReMCW0Whpg7a5zJ7/8aI+HCAG8AwFHKHbd0aFHt3qfW2+s1gBOaeyl/XO3nSvegQKFgfMtElDZ4hEIgJzLFTwl+0LZ6JeZpngixWiUSGMhbVaUCKvkmgmVAiQgV2U1AQDxzkppUkvV1WExP9CkgglPYOAAQBBwDgdKK4WEYf4cQlLFpl3RqEGsTIVDRvcgxhhUlCQJAFBZZyulEhOJiA/KIQIiYq3NXFaMfBEwFKDCqEprbc/ISISweqSxUrjIigps2PqUj89DQXcJyZODTHQ4KoEkJSLChFTt2ouDzEVYYMCq4heRLMtCv+5QNJETF0JHuPY0y5ElXAUR+QIOrRDRkEIgQnLIBaJbxHs/Nt76xle+v3fv3jXrx53PERK2LvggW3DKglPaYFJjckyx06DZD53f3HXjtcf+UnwuAEMRpwNaSPVEzY3Evp25ua5WAszACtPMxSY2UfzN736pMbL29W942/98/l8nGk0/PHT3j/77tm//29v/4eNnn33t0omOFdV84cvXvfgdp/be8/DN38mOTY/ng2TtlqGzzrvu2hvandlHD55UCa5bt+7Yj3/Wy5dvfejb/hufW/+mjx6/+BVvvlRGtc85VqybES/laDQgrsjbAgBjGNwxM2skLjToimEDWNE14syLRRAwJNPHj3jLktKl155zxwOPkB966IGFP/yL7fX4xOe/33jm9etvdI3vf+nW2Qs2XHfVteae3Y9OH2iMTqzZZM50fnTme1+D9b84fPZrF/pp3fY6LtZavPDkd//g8G+9Yf6ZH17/gz8RQkfQiqID88uPzzXP3zDh+mIilXtSyjj2IT6gKiztrXdVIVjFIp1nWbez7Ee8Mcp7n3tnFCwtLraGRkeGRpi53W4vLy9HRmtNoHhydK1SqttdarVGSGES634vEwtcT5xjO+g06sNJA7I83XvoxIEDez//yc9FunXBrrOHxxr9Lp44MjhyeGH7tgvWtNav3yZTG8ZHxtfDYOo33/rbVzz9XDt37tDYyP333/7wY3dcdPEF995z39XXPG2h39Fap2k/SGS4LA/zK40U4A/pIAvmgGmaFl0UIEChrSEFeZeQ4OTJk5s3bQFBZ733ecBkgRSqJFKlnDIohB2eUkSEjlfmY95xmI1U2ZGDN4uILV38nHMIEEWRNsaDaK25jDIFzqtM3qoQuw8jZEbEMLIuC21VjVyEEFaFDw9F1gwXIatm6USFjnGVaYJXMSLKKimt1VNTLAfFIYr4MLIDQC8B7alWb3/Llq8ayXqQgCANHymEG2DhQhxZmNkxb5xIvn/3w5+4V20+J1lYXmxyTDbzJLHV4rzPXFBRTjD7xNQ7DGSvOfVvZ6QWs2VEoxQLD9gCy8IJu2P9mbe+dPytv3hTG82Z0+2oZsAheJWjKMzRwECkRsZEkUNvwFTxUSpKAEIBd6/isgBJED7A1X2SlPVHAlTFZcceET2RglIBsdoTh5zCDNxhdFo12LGPlmsS3bV7tlnbihQQvCyivMp4YjpePNd60CA2mGL0PTPvHDvHcx7Hses7raPwBIuZXMDTaYWAEazInEkJlaLSu7OoDssSpCqJsFpYAjrnxDNCUXuF0F88agzSSyRYCH8U4UMKCjWH10AgSEuCiLeFZH5Z/YEwh4xY5e+qBi0+VcmtL2Qoqg7SxMwc+HBZlnmQOI5ZvFaGrWMRUkEfvlST1aSDf5dzhZ4XKRGxngOluMBiBOvcshhFQBAMkDMXaMosVa3svScQozWizm2h9A5CRoWxRCHbXiXacMDDvzsuxxXlixd+oCFFCpQXRDSoCUmQrXgu1LNBY+H8KIyCgEoDCIcFFiEAAbOJdJrKz372s1pdEXhxyOIQRAGx9waVkMvRW9S1egSRyQQsm7XX/iL3lxYe/CbvpNmBnqq5dXULAO2cAHDrcHb1pvYPTo1owQQ1DKlBj6yJB93ZW27+2O+86W0f+scv75tbOLLn/vjCpyUJ7N974suf/b39x38Y/6r+7v9+YWbPocMHHn/WM2668Hm/9Ol3vgUe//nhx++68bJdn/uvD/7sJx+uN9c2x0ZdRq/+5T8+/8B0tP7K3Ve+9hnb4GnNrmZVM40O5t08NzoRZyvEXPHGhm2BFH0FBsEEwaJMjihlRiQk7qeZTsZPn5mdXVxoNOn3/98b5/7+H448nj1+5+jOTe3hzcnEhtq//stHfu2mF736P/7x1r9+97HZA1c991d2zvzENg/UtOt430izZufr22nj3birp00i/dRGjq0MhiZ++KszL/7QyJMvSE5eiDoi4TPOfu2xubUj4+MR9/M0jpNe7owmQcXMyAwikdK1OFGkjIrCMjucDK2V8lm6sDjPAkQEwpEGbSDS6LMBaYWQe59ZQlQwM7fcTKLcZYBusOhTm0YqQqTDJ5aI5tNe3/t+npKJ3ez8oelTeURrt561uG3bRBzNzs0/emb+wbPPn4r0ltQ+evf+WzCtD/LGtc941nCj/ql//szXP/EdhUObztp07dNu+M53vnfq5LGjR89s3rTx/Is3pKmhZez3+2HwJMzBMS1L0ySpJTEJgE2zJEmc42AaKIFRAhAniXOu3++Pj47FtSQk6TALCo0mSwHBKtwO1FOWu1HprkoKAXQFbhIo5CmqE0WklVIg4EqZLWftYDCImE0SB+hm6JMUYDWMCkxKxEInqPrLUF7wivAkVfvcKnqGUFvBhUQk0DyoEN8QcR64uLrq/BfRo/zfIguIKEAqlrgAAAGCWDQIjj2K9wKIvixOFBKj2NyTUaSUFxHw4VOSrDRlCpCQQBWXZgf9P/rC0bGp86V9JrZNYI9qhMX28xQAJOiLsP/w2nedHx966bF/O5NNEYF3lsig9Q0hnbsI6INv2fbcp21pTdQPnhlocloZHmhlODeS6nTI1p1ljhgsW+/TKIrYETwlO4bhInjmAHSDYrAe/vEl+GilpwEO44oAvQKA3Dsum03ln9LrF1MBECR2WPeotFGuph57/MTPn+BkxxhJ39NAGIEbbuoYkJhFw15HSLl3CtnlfqjR2ji0niUDaSBnRQGBiArRS+D2oNbiGZ/aBGMJYqgar2IowkWlJQBMgkDMTAJQCoUSkWgNCtEVZhJYgJMZn6q4VUyMAIWZkChsLp1XFDDamNo89G0CIgA5e+89acWlAEhIz1IgwkDDCo5Pa131lxYZCLx3SBhI/2maKk3sxZAKq98wYKokcQio6HyZbZ5JqNQRuNQWFZFgVlXE7/JLAQahIlSkmJywd1YQoihSCC63aZoiFUYR4pFKTpFnj4ThIAdV2nBjw6IvSNOEX1GpQzvh4BAKihjEiUeAoCestPYrKjhhjeS9daFhKNjGIqiw0UgO7Dt81113NOrGsyUxznlSzJZYGe3YEmgQ8dZrHRMOYtWoDSW7nrf4s49/Z39y42Rt2wj0rW5EzjL1rBKRJxdbb75getdo/zdu3TlQ9UZs+r6d99OxZM1yu/vJf3z7a3ecv+uGGy5+zXPz2i8PYWP/I4/NnX5i6ryb9g3d98ht35o9sFcgPvvqXY/O3Z4lXWhm59x47f1Le+858s2t11y565Knn5qeu3zL+vXuaPuGax/YeSPRvmvGs8MDpzLlnIhyRsVgEcgXbwiwcLD4ExGJtGHmoPMqQTshlJtCDlkLEUjqvT15ytayeIJP9aa/dfuTz/+Vmz7zuc+fOd75xFfq17/EnXPpjFq7485HHn3y03/7/EvXjQ3ix3d/etvFW+PhiX687/wxPzy0xsL8geMfranfmuUxn3NOqUWJ47j18JVLW7eduOpdW255N/mYAbME7jgzd/GTCxduafZzj6B8pNF5IAQRQlLeg8jJ7pHUDU52j0IxBwXHXmsTjY5ONRut/qCXJJrF5WkPAE6dOebVHgvHfJ7lNh4fvgRl7WLntIJFoGnPc1GkHUu329XQHG1umMkeiKKI1bxPrKWa1PWGdS+sxRu3bD0/6yee08kJ2bgzakTbDa4fZPPPfKaLh/czHgH7ZOcCePThhdm5A93M7j001Wi2s2x57xODiYkNt3zjR1/4wuzk5PhLX/KibdvWdTreOsmyLLNZv99vNVomNtb1AFupZYOe2SP7OI5RyGhjrXXes0iS1BYXlybUuCkVRUK6LRq1oCUDGHzoQr3qxCmlgMN4F9kLETEUnkWoKAz3gw9SkOiz1npkCPORUojHA+a5S5Komgc6YSMYpm1IxYmiAF7lIARtPaEyEaA49goUSmEe6QAds5RcBRRgBGQpN7ul628JEQp9bdG6S6ibWQjQMwD6YtlMiADCAuA0kABIwJiVjCZFSlCUKEBdWq35cldNAujYIAppAREQUOQQBCypKEdJ8lgo7aA7d7z5zx/dfXp5anxt3u8n1kQMy5JZcXUvOXttAPrCfzf2sRc2bnvjib96uH8JYTbgfi2K62i9iVNnzlozd/O7n+fU0OJCNjPdieMYw6YAM2RUqGqpziUjrWqgvBIBajC7UE2X6jkqsLPZh1ColAo5uMoNyoaDzQBcmMUiWuCwrKQCkKwDQYURfGqJCIiC+SsROfbgwWHdUAYg8325fLz2od1PdmXdOKadPMtAEyBK2244CgDDS6bWaKT9LGcXqVp3MHvJ8DU6qXnMRUBizezIeQEgMj5wfgDYifcrFm1QTJJJKUUCsTbBnqVITgEiFMDJZZXghMG58PZmWdZqtZCBsNQ6FmbGSEdsHQiA51BlWlvo3wKBJh30OsK0ObyGtSi27K33uWOp6sXMqshUBQGWGnNOOPesSQVMtYgYUipskcgLoY5MKIOCCp1iEGRAct4KA2qlCJk5LH4ZxSGDJgXB6xoEUQuwD8eyqEjY+aIYAQianUQE3pNChYTAhhB8UZLmzoGA0qb4eMYwM2dWtAIAncToPWqT2pxIDwZpFEXBdV2g0ADgEjXGIBZYG40CEBWWkSIQQLNkDIuAY1BKG0M6VOQAAD63AEDgERWwBu8iJXfeuntxsb1p06Z+vy8oGFFuLaBVDiyCcFjaC7J4gTqo5hW/wlm3ff//chq9/Y4db7rg5LM3LmYSocjRTvypJ9bcPj3yxQNjn3jO/p++dM+v/ujc3Ys6dk0EyXigtDoRxz/fe//I7u/VPnGuet0b+5dcMbFp7a//zjtOLhz814P34/PdyLPWkZOv5u+dtzP1Vy6ZZHjfmrv3zt478oaaRN1H+TsykX5raZ4I87HNvcWvrW/J397hAQvZICFADMs2AijUg8r6UoQFwrCkgIuEMRaW5RwG5fsC5ME2e8mCUvJF+97oDKjnK7W4zI5+nGd3dU0yDnhDtOROfjQ6FEMzXzf3YHQ40nGsc7vc54VF50lgMWm+pwNRrsOQAyFHAsZbOJ84dYTeFvXGAZAHhAx/s1vWPlmLDDB4Eh1MZVdVrJC6wcnO0X+5789ilYQwJCIaQML8VhvlnPNWasmQ996YSWdrWXpRFCd93x2K13e6g8Hy/NFD9uSp+W63NzWxLR1YQbVu/cRPf/SDiy56+h23/6jTThuNWr/Xvf4Zz7npRefdd989oyMji0uHjp58gqR13bU3uTyZWZzp9dWaoR1PHJjOJRsbWxbg4aGp2Pjt5w0D6gMHv7Zz18ax5Mpb7/x8vf60Jx6ffdinvd7iBedfMjo+smHzmvGJzd62SBvSZD0BaQDbbAwNsjkCJARvGdAKeBYhJiKyzg0GgzBszrKMmeM4DnAGDMmnBGcJrLQyHDw7Q1AL+7ASvQnl9DUoCYQNMQepGs+uVKMNvTIz2zTDihdU/XARm1utNbOEaWHgVLCX3LtYK0Slg8i8ByFBImAhQAGUEjizuimBcn5edckYCAUERFQNrIWDpkEhzgWFHQ4iojgQIuDwCzCAzQCCQalImHs/dXYtIgAihTBQWCiSsCFhQyC55yiV3K8Zjh548sQ/3dNZO7lmJMkA9Zn2InvPsT5rCI7PRrkXyN2vj93y1vEv/OXMb393cDVQ5lmaoEYiWrb1sXqycHT67b+yVQZDc71OZOIkijloSAYaNKFlr5GYOc8ydGSMUUTChTAFr2ITBbNlI4oKxbTwRAoGWsY+5FFm8KXid0irKEV/DAAogESaCOMirIf5h4gErzrM2Xvw6JtRknbbX7/teDS5vZcOwDpjDAoiUW/NUVxu4aC34E2EnoVFA3K0Y+2u3EMUE4G1lokUhk/uXJjThJVmVTT8n9dAyslH9bJV70bVH8MqnEG4wNDGhelruJNcCV+vmh+EsCgiwGIDoU5ARPLcBjxT6NUCSQpApGxt82CpFB4ZFcQtZI505JwL4ch7D+HOeW9EiQAXGnwiSCpgNcSHI0NYNL6hg9dKIbMXCU2wUlQqR66ws1yYNgmAiCsQZCt3z1mf57liTGo1Y4zl4hdFZalnS7M47z04T0S9TjeKDQC26o00TRtJzQkzOwDwzotnrXWktIh463wYnlWOYMEQqfT5DqMyVfptB5ypVsazA6DAotBKAUCe585Fx44fUUrlaQogROC9AxZNJOhEEMSHgCXi2VmOG41Lblq69RNi+0qp5Yzf+8DG/3x0/YZWbi0d72gGRITbTgw9+393/feN+7/9kj1vu337Nx43pBkVapfHpI6Ob2z/8m+vb66dP/y4jkaydPf0vr3rbnjZW/BvFu79lpw84PuZ6h1t5Co2dW5Fc81kuDXZ63U6y3kmWl1zzWhz0u7f++2XvSs5+uN/ffUz0h7kiE7lBBZZk07Y9TTUmFkpLRLKyxIKo3Kf1gg9EgspzyKSaUyccGGMgsAAbPXUePO/Pvb25d4T2WkBGGIZNJvr1kxtZr/ct8eao2bpeG50I889TKfn7PgFHe28/as3w0iy6fxmr3ZydK1ZPzVxev90z112cupFD8/3pzSlkFnTIsal7Z9d2vWDDV/90+ZgW15HyZHS9A3njl+9fmqJrPIqIi3iWTwhEmkvfLxz6H33/9XbL3/n5qHtjhmV8t5rEcnzXCSAmFjrmK2r15rtwWB8YovPvXiu1btjYyP1ZmNktHXt1TckdYmMXpjrnTx1hpSv1Wpbt1zS7c1HtadrVc/5FFB348ba3OCnVOufmJ4777wLWqPAMOBoL/pz0jzbtHlzPZm+/txp29+mILaujxB1BjNnTqW9Pi/ORuPnjs91ePM5Z+08f+Gy67fU/cvanVmbw94nD+7dd2hscu/GDVtaQ2MjYyaOmix6eWkWOEQE1CrOc9domiiJ89xlWRbFtcADds5FUdTr9SqQBYTurwSCrKzIVzn6hXBVmGaXoSuEOA7TkBI/BQFIXA4hqzzNzBDIlFXIC/m+DH9USgJViGsXFlqlDqIIiKB7qovLSkgFYA6DxxVUQrgcrRUAeOHcF0oOIbYWobmEsYQlJyOw4yC25wtR4ULAEsrfC+WaMFy90rTy3SG8ARIhg0Sk2DFFKIAi0Yjq/u2n9pjhDTUDxNn9Z3hqeCjhPOu3DzmLggPwz2ve/Z71//HRhZd9dPGVWoFYn8eccK3d79e1zM7iRWuzZz/9rGPTXkdKRDQpR2CtJQFUCgWIkH0hmxC0xgQ4UrryPGcKja1wwLQEzHfhzEChkCYkjpARIYzoq9zjmVQJzHTel6OGMIqAp5KqwzCDNKLEAtxM6NCRY/cdipJNJs9zo5UHjIBJKd5wIp4dbZBY68WgYbJsxfO5G3f1srzZVIE/o5FAISAKkbCI58J8FJ5SiFUvnhcBz4X8+FM9r7hCGq5aBgd51EpBXgIOoOyew6tVvQNYJuEqexXHRhEgoqKgU1H8fChSOgAYUuXGBwEgLJ41KQIMLqjFwDbkVUK2HnClVgi/lwO4SQQBEERs4YNCCMWvLyrNov4IJkxY0vZWVKBX6tQVh0oiAssSAN7lYiisyfM8L5jHIoSoQvEtEugDqmSpeW/DoVJGY4hLzvsS5q2gVLUDgHLrFED6IQGLiAfQXMi+VsUTMwOQMDAyUSDCcpZlShEgs7fsXBRFQKiQHDAIeIawOkEUzzh65asl67fv+yoKByFvRMy8ObgctJs4xAUCmO4lL/7mBe+97tCHn3XgyvHuO+7dmKGJmROUpW7ve4/tH3rdpZue9rTF++5e+Ny/nvX7f8nnnNvqHMOJbesvfk46Np598ZPpY3do1rlaPzm9QJ2H4Hm/OnTFlY0N61tD4wtf/PiXXvYhf+TQuenS1qlLBu1ZlZu8Riheu9oAbAMS69vMrMgIhpFT8IYiEod1YKuiqO9ck6lN0BKRXDINiUBGmCgt/XRu29Tm81o3ff7mOUXR448dNLHVqp8N5kYnmVS3PtLcvnNqYo2uN6P+6NKSW16z7/ALYHj30WWEyXUbrvrOV7+979jjDaO+8On3dce2f/jegycyTcqbTJMZWvfAH/WuePD0Sz9/zuc/VUvzmsSZWVyD9c0jGzdY45QX1iHMa62DW4YIx5Ssb23ZOrqTvQTcSSG7mKYpERkTe+9B6cEgS2pqYWGBlMTapBkPUq9ibfvZqTMz4vuLS/NaJTqq1Wtxd5Cefc4FSd3feMOLAODkyVMiPDE5lTR7s+f1RxtbTkw/YdS5c/Oz03P3et9LavaxJ+6II5pZ3N+Mt/TSJ5PY1uvNiXXNoXFz7oUbhofR5rOQD9iNz3ePxTSxZmr4wPRH0u7a9VPXXXzpxRTlp0/35xb21KYnjDo0PNIYGW3UTBNVtLzUty4HlWcWBlmeJIlSqtPujo2NLS8vH15YOP+CC7QxGAAyUKSWYliw+kBW3aQiIRQi9iHrQCAvgS+AspWuVtgMqXLzBqsOWPDBIKIAFSYiFg8iSql6VPPeB0e2ohwmQq0MEnj24hVp1EiEvrQNroLg6q8wCKVCuJKrDJ05G5i/WLo2havjQCpe9bMCYbxSWAwiWFgKrnpfcrdWRdsQfYgISu/0CuBKyltQzJ6F8kxvncSPfOWRO6fHztmIRxZ547D8xoX2K3fMJBtGU1KxJfHpjvr+j6//h592r/yL07+HitgDkkIvaZa3WnHkYf74iT/4+wsXfd3Hg0ZUC655oZ4I+UlEPHvSyogqlBcBgMV7b7QGRK7om0E/yXknGERJww8JqyYgUsHnOfBSlCJVFmQBXI5MAlSZZATetS+qrlCNee8BhSNNFhBwogmf+flBH68dVdQmZRGJTKTyNPe8/nB9vjkcrVl2i47rCUs/W6ohrG1OtbudDbrhPYr3ucuhGMYEEmkxtFBl/1S9CVUR4EHoqXDoqvFdycEsvqoHAeJIhypQWKyzqxJwwYopX5kAjy7AWtUPD5yCQAIMd4NL5IG11npOkiS8mVhWcRBAzuzD6SgKViREjLQpNjNY/BbEAkUFJcopZFkiAkW2dFxYKTGLfh2oRD5CQHuUF1Jwu0tcGDNrJIiianIA1ckFiKIISkZiOAbhYxljRCN73+12I2PSPI/iOE7i3Fld2qkxMyDqEqUViicscSTVBw6vInMhqBcH4TznmDlJIudcwLgAgGfnLEdRZPOUaIjZEVGv10mimggXDToE2QonIlQfbl780vYdn0wwZ6NtzkGQyXqPKK56iGXUsqDecvvO++aH/unqw2dPdH/jx+cv26bVOdHgiSfv/tx7Dv7SM18SHX5EnXOxv+b5M/fduv7i86MrXkLbt7Zv/VFn7Xkjr31zttynjeuGZtrHP/e365/2S+nivuTT75qenv3Omz6Vt8YOffbNr3jFjRRzz1JDx+Q9WIUalXYuTVHVAaxXBMCCguSYnWWvWGuIUeW9zEbaodTJYGY7orX1KSLkvqc8qWhocdk94/lXnZp9/LJLrnvXu96968KdNpU8w4cevk/hmOTNu344fd45Y4udo7uuuuKR3Z0rrtolw/olesN3v/2DOWt+6RW/GY/UfuFNvzo5su7o0elLHoFZm/RJIknBtTOfr/nun5z4xT9cuPQr40+8dJ7SrRri4+/ZZ1501tZfjny7551SSAoFmL0PE/OAx5GSxA8AGkr/OGNMnqdFj5j1IR3EUcvbrOdSE404gUG/A4Raw3KnP+jmpGCpM7Nx07pmLZmZPd7vZa3WcD87bQdqaHjKLOXNkanNm1Q6kEsvu6jX4R3bz0uS5/Z7feF2v99b7vvO0msHvXRx+XRvEQ8f2bv71mO9Xmdh/qBOsubQqCRHLr5izXnnTXY7Q3vbP37ui+n49D1+5rIf/uBbM+nN26Zec801Nx46vK+mm3sO70ezNNQa3bzpgqGRqajh04GxKRNRr5sKCRL2+/35hbmzzjprkGeMYZkQQicG0eZqBB3uiyot6sJECFWA4AU1HJaCaRMoLQXelVeCGFRZClalcwgdFQISSiiAg7Bz2UxT4CcIuyyLkyjsqguhIFRFaGWRok+D1ee2ECdcFQ3Lg42IqLSqDnYRecsYtPJdq36gL3pcCd0kIlUmE6sHA0QkCEQqmJEpQeIiUWmjBpkjxeSjZp2PnDz1T99xI2sbS6lr1nHfdO/Lf3V+o3bff9yuzxqWxX62GdpfX/vXh+263zj+F4BGA/ddrgwlzrAeqCw7Noiff0ny4mdfcOBUV6GkNldagbD1Lqi5BR7I6nEiAfoCEQyZd8UwAwLSCgERCuw6MpZiaKUzoyrbIQEJmqPhz770iWMUVT59EFCaECS8LSEBhwW/DLwxBkGly3Pfv79P9bqzVmmtPNicObKkW7LhqN53dieLPSliAOR00N05cm7DDC/wgjKgAFkpKN3rAo0/aLFJYRyyctVVmqkAfYgroD8padZBlAoAkKSo3gCqeZhSSiltSjBXYeRHKy5GICKKVr+BK686AgCweBQUwqC1zt4ji6AUl1/WgtU76UECVrEgFocRgyl2rr60PSgeokAQSw8L1HBaLfssyyCOAQB5xQBOSjBxdcDD3CJkxZg0Bzo1lnwhJCzp+FVB6diTgNbagwzyLMyrnLXGGNQqtMsYzGCck+AmV4vChiAU66gUM6c2DxL0ofythlirRxTVUcXSCQRBYtLWWm20UvXcZoAURXp+fvH2O++Mk8TlNo50rd7Mc7e0tJREMbOH0CwoZEYRP3b9r4nL5m79VKKASIdRlWMg4OA3VdSrQACsilpNfebxLU/OxZ+4Yf9PX777Td899+7ZBkhkdHQyW9g398Rlj989OlrLPv/JvD27qNTC7Xetf8lNi3vuWv+M50f1Rv++70VL23DDBSqKBx9403I2ezjLfva8d5oLnt098iP75NF0bDjOoJYntkFEkMSSu34Nan3dj7UWIgUIYCTEF4xrRntjnVVaacwbqNI0zdsz7XWbhn3X16KadalJIi/K5WnWbo+ZLVomZk8vb9m0dW569srLn/3YnmNs3et+4ddPHj350r9/5RXnnfPrv/66wXH99r9938c+9A8mrscb+OKzho4tzf38sehlv/Dcr3/y61/+6W1XXHJ2ffLSNSPJ9EJqQLfRGgXNw5e2HnvuzDP+c/Toc2qL628Y+fYE3ProvdfVm3ObdrTUkmN2SmvvvTAopUKRzCwB/k9EIKIB4ETvqCDwoJCFMtYgolhATnPbEXIiWg90fzCwGWoFkQYe4kF/dlDLzjg310Fm6dLg+JnD2qBz+cHjT9ZmW625YWcpzRcbtdGJsfEkJu+FIDYR+NxhDcyaWgtHpmoNZnU5nQOUgaDL9eL8mXSQPfjAntMnZj95z11nn6suvOysj3x6ZmpjMqIGa87etFb9v8PHHt595L7TswfGxtzk2Zv6yyPT7eU9935OqebIyJp1G9ZOjEz0+31m7qW9OIojo+bk9PbRzU+c2eO9856jKGL2SHR8+RA89avKMQBAjgvuqAgFb0JAESFV8Iwr7KWU5NHVk73VZ6wgxRIJAgpxcCoqA3qlwgGeCcVnhSeoJw8OIUj+kggV0gtVnK0ScIiu1d9jMXDWgWIRIm/12YKcqgTLvdCmhIlihaetiL0StIdWJpCyWkYbFTOAoEZdFiICgN7xkIlyFuv6G8eTV7znyV5rw0bd6boEEfoRXfg7D26crE/WOM24ifTpTX9OwK89+rcdqUecK4FYIfhBjsYrZ2yNFw793d/dNL0wUICKtENhkCr6Vx+WtGLnhSWMXsELixhtcm+x7DkCFMuQMsZk4otmv3xwITpX+oiASCxc3pbCrEI8FCUZVok5iKMRaQAK1p+AElno2bTVaB7Yv3jXvmz4/ChPC5FSoyNGnQ0vQpwnS5C6HHykEgaG3PmdU5eCxAoRIlBaGAzqlW1uMcYQQQEhVKsScDWxxABCLjvFYKVV9cpcdsZFgRcGACARxtWYZMWIOiCTAQRXKLNhJi2r2kQIDReACmKNhUZLUQ1orZHAWV/Qa4JqViC/ITJhYXfIK4o0K3scKA9R5QaWZSKFc3BokbVRDdPwnsPSpNoKB8XQMDqSSpSmLCjDmL2aEIQ9URCBB4BiC44Fp8vmmS4R477stsNvEWYAMsFiHDFN016vlyQJCFQK7QKCjCJivVMMoFccyaryaPUjLq83DGAlqkV57sALAg0GvVqtNjLcCv51IaREUWStD1hxz0wognL+eHbTts7I2Oh/X/Pa+u4P+0FngCquJdrEllmAVaAsQlGDIiJysPkSRLEyuPNM6xnfvOBzzz7wjVc++va7t33hkTEFOCD3w8MHk1pz56GHkkNPjCqdff+LysDcobuaebc/d/r4gQdo+aACOKbQxCP3msbd8ejs2Hlnv+SP2qceOPrBP4VJuW0xuvKxo5dMTfqOG+hBpuqcN0RnaEZ73V6e5ySstRbwIl48a0NiLVI916cAmLiWdWl5dq4ezTtvFiQHtkm9+ZOf//fPfn7L+Wddve/gbUvzJ269O9+68dztl52zsHDr+Vfy2NptG7dPn5i9q7Nc/8r3vzIydOqS4/57v/RLG1Py3fgQPbhm4vxn/fWfjebtm2/+xtmbtu5a7E58+556/1h0w/Pdlu1u0BEGTKKEs+0/+cNHt//S8ef8y4XT4z+Z/OZtfqx3+12f/6OfvPMd7zjn8iv63Y4UO76ydhcAKeq88CZrEfno4+8FCE1BKMEK/x3vw3SIAtJToDAlAVyBd+FxpMocTYr/k0WYvT8sImwKoVo0OkJCIsXssZBjEnYeAupNytFWeH8JqU54DvBOvh/uuf/0Pb0kzU/1rPuTKKohqEajPni8Z3QyONPOH11IovHYDBuj7CDLF1z+ZKpUVKslxhgiCg3h/Pz8l3/aSGpJMWFFLC9FELARNatoHr64hI2UK8+VFVEIDdV/VsU4AODC3QypVJUranDrMIyvi41pcXSjUCKtcixQmuI4ydK8mIsSiDAVQKiVBlsC3rliWsr/cfAuEkOeWxHhVWm+CIgB4B8WuAhBHBg8u7CmYiiMBQEA0INw7qQcj2P5JSLMDgUUIBRoLQna6IiYi88sT0w2Pvm1+368b2jjudanMajM9HTa4D5MHl4wcbTYA/nC+r/ZER999cl/XZYJDoxrIFRapy4zuU6a06dm3/6i9RddsGHfibTViJk9ebBZHhR3vfeESFqJiHXOKE0EQX87pA3vfBLFUnojBkZ/MWgFCSqSIoXURuhoXXnHClnsclLgvX1Kt1fcfAl8FgAqfXapYplbm4009L//4Ak1toZIe43Ks9fkvbBo3rAPAJK5esrLDWpkPvdEzvLmoZ3ddIAechbjscRGFbmzGFQIq1Wm91Wqk9J5Agk1FsK5wbpbELTWxpgVe8pwCcyoyDmHSkvALTGX6Y2CD1LIaqgK4C6UM2GogOUACgkBNWDAUgXob3gjASVQZgMHoWrQSYITSWmRWzKAMSgfBfDwUwcw4TKxLAqZ2ToPBFEUKaWK63KevXfERISKNKkgFsMILByESZgolAyVQkgYdjNz4AGHTxt4/EEYBB3rKA4rgABvDuKykdLe+8xmUS0BAB1HzjqHOaEq6wlRgCr4wZQWxcWyAMGxh1Lx4yklgi9esHKJjKAjn6dxHI+MDB8+eGZmZiaKEq2jPM/n5hZYBJAFWesIxL35otOvOntZAL667k8N2Hcm/3LwRvvnP19nWQBZaWAn4hkEw3QvyNhKQewWRIhRjNDSQvzKmy9453WHPnDdoctHlv78zg0tX5ubO/15NK8Z2XpVZ5Yhb/LscAb2yDxLLKcfR2NOJduODo/MbL/8sV66fPCRRZNf8KYPcNad2f3NhpuJcO2xO/fdOX5s7Y3fM9l8v99ysKQlmfVLrJC9YmYQUsqAKKUMeEBEh9bRQpaO+oUX+uT7taSrR9TBxZ5jL4IaR9Olw4v90Zuu/0OtU8zT5OJrL9l1w+6HfrJt67lo5u+646EX/+bivkcevvjyi5GW+r39r3/byxcWTm694EHjO3Y5Zt64MPPzxWhoU/3XbnzVa+/56Tcves6zzx+a/NmDH9O9W9fC5FEtSCqlpA9RozO08clnHb30lgOTpuY5t435Z/5oMDX/hW9N/O1lVxWaN0Srh4uBLh9M5hhBv+uqjyz1FmtJgoilzhyySCDEgIhzXuu41+6wt0qpHBkYCEhpYBFSSqNyWe4Bs0Hfe0giozR6HGS9SMVWgfGeSUlcbwiriakGUmOQ9npLvTwfIDpgndR1rFq1ulJUyzkDZITIe+elh5CgJEpLq9GIa9Zb3e0uPblvd943arTjPF9wznMf2r2v0VAHT3+yJudcesFLMrswPjE8v9Q5fPgwCALT1m3bNm3a/MXPf/GqC5+2fft2X5IuwvlRSmmI1jU2ru4pi6AB4BUCoCdkRFAhBHj2nliKpqFcAAdpVhN4FKGOLBBYAABGKS48eUE8Y4CY4koIC/+ijRKRLBtoSoAEEVm8BwFmFWwMS2GmqlwFAFD4f+DZUk6htTaAK+2voWKfF3CkVa4uKjIkEU9ECBJQSyHks3AUmzBLCR7hKx2wCAZMUPibQuOT0PklVGNDODcz+8+3LJ215byl9lw/TxoKBypNcuWpL9LL7dA/TH3ohsZdv3L0HenEZWPcXuqkulbrOUgZRnQ8DLi85M4ZN3/ypqcfnu7V44bpD9IImbHoRawLOGf2jmjFL5YEKitipVSQoQiXqpQihRwklBVV3SFTmU0BXKmJiFK6Z4cRtaYyOVTb0DAtBAjkb+fKzQU457xSiqKs17l937Ia2qhS7XwaQYTsjESoYLDhCbRKLU9keqCY8oGK6wQAI7VRC6kh8ojeaiLnvXcsRACl95QEGbenvrcVyC68CQXMGFEBeu89+6J1C5ZEvrAhClfjvRfPFft5tThzJfUVCuSVyqNU9KzqEgDIra1ewuqreifJaEPkhZFBKRV2ukqArUNEKPQpufDJCE8hCL+thmIF+l1o9I0Rj9bbzNmYjASYFVHI8Y7ZOctAOohBYiF+B55ZGBVW0PdQW0sJHlud7ImIC3ILA4AiiqLYsheRWJssyxh8eCERcZClwJIkicszZzNjTKgMJOx0CINhapi3r2iSEwXNECqnEcUuXMKrZfI8Awh6nIYQdz/42J/+yV+fOHFiamI8TzOtIgYR55CA2QHgC7ctv+rs5VhJ1By5beK3Xtl+/1paaEyot1428667pxw4k8REyjkfoYZgaRoeKBTucIKCoKWmfEJ53//x7efuXjj1L9ccPn/c/vr3dy5HsVh1l24OgWlEyMAWhnxCM9Ho6aF1g1Zrfs265fpIZ2hNdtdXcziz9cXvMlPbj//v2/WpU/OQNvpnoic+852F8+65eXHDGjuxKUswdp1B3ygWj2wRyeZ5p7O41O4BkLPgva/jWDftN8fjXVsn7nqgHZll7wdK1ZAgVkOZPamj1u/9xifvvO1gr39qdHTXWWsu+59PfvGcHRd89dN7h0dql1/xW9/79DeedsXzU2lF4tzikw/fLFvPe/2//Okbd24E14rXbJjttMEv/CyZSs89703XXf3SO/fcOvW085571RvuvuerR9UTjq7q2m5il89Kjiebf/7TDT9KLJ5pWe2J7bxPPWzC215/Z7vbBmEjRALeeyVl+QxBTrbog/WUWTOUDDcajaiWZFmWZVlreCjPc5SycI4BAOyIZWcRkYAIteOiKkREQ9hX/VoSp2naG/Qb9WagoneTbr/fJ6NJQBHVa43U57qvN2/adPLY0VjIUz2KIgTlna9HzUiiZq1JgMysjC7fQgxWCsoBdFEIJ1rrL3j6FYqhn6XWu+X+4mU7t3Ta8yMXprq9PTumFnvdH3zpJ7/w8hc8d9fzGDxLdmj/qfse+eEkqg20YUdrJ1I0yFLLmQATmqTWsK6PXiEJKnEuB9QMkQB7TCOnjTHeCQmisPPWe0sKHESE7KxHiqJY5bZnvAYfWW+xJBohgSJSChjB+TC/xmD9q0hVEoAQdHGLXI6MyIDABREVEQ1pRvACDBwRMYoXCJ72GJb7zleqftXoLMQ8LUEbnwqCU2jcLWecxXFsSLEEA1QiJCdOrGd0SikmBMDQxIMrWFJE5EHEs0YKYtQJkAcQ7V0mxkTLYFuimVMbC/eTqab/nffvnh9aF2ftDa2hC3YuUCf/9t68PjHcn10exEO/0frSm0e/8sdn3vKDwY1yfNELDcUNHuCGGs3ZPmMjTRRMH/nYe27o6mjAVPfLPW0UiEaFpb0MsDfGEClmDl68yFKv1cq9vAAiKAp4ceEwOwYi8prY+TD2x4qZoxEANAN4LoyjFGEpp1wM86EYgGClfBSSAvsgDS4CzByW0yqh3Y8+ed9ifctkLfUDbchb8WAV1QaDmmx8NFmsq7zZApOhjaOey3wc14dr47mNKO7V8yYYYHZUjpoLfqkUfvHMzHaFCcyViRCIiGhUAKDCwroq9RSFAiKKgi6PDy0jIWhViGoVG2IuPSec10oJiGPnAYTBsScixajKSpSZHQeJEHTOIZAhFSlNiFop0gYAGJxzLixoC16cKkMNQGhPw2xJBXHKCkvNIgQMAkgIKDqkQnTsSQQRExWx96nLQhqLtQmVkDGGPDlkXjGBCOogACiKyXs2SIgq9LJAKMBBZTuMlUmCZCl48cpoKbwHy40yQq3ZCKnaW4E0I5Y8z8GzMQbQ9Qc9AEiSJNhXhN0zIYEiDVQQH0iBZ++sUkoAhFBAlFKOgUFUZCgfKDRRlLjBQCu32G3/1u++df703NTEhHhGJBREYY3kvEfRKPZ1Zy8SwMam/c7QyxyaTW5fs9Wwy/3nbO58/LE185kRJ4ReIwX5OQiyncIKUaF2ziGSQUQHhJyB8wiffnT08ZnoM887+KNXPfIbP9x557Q/ZO1nprbH51zGY+vdAHri45onXRupj+a2L2jgsXvy/Q+Pn3fT2PW/MX3LX6V3fYkmdiRrdtQW251ue/Nm2TF82b598/sfhZMzx2Tgxzasaxg1t7AIAEbpKy97/nUX7Dh89GhSrw0NDZ2ZOQGnz6i2e/DuW9qnqTuIuz3OnU3qDYTFqL503rYXHD9xx/iWgxvoii9/7ePznSM3Pf95s7NHl9OfXbD9hmhk7zOe/op+Ontm6eD9j+177atef+/dP4eJP3v5qzkfxF6c8a0RhdFFLeUfWDp+eP3Fb3n52IvcsWOzXa+ObkjoMxev+cbZw2MbknnxR7943hkNOTGwBs/k2cMEo9c4hPeffuyczsWtCRXBEjnraCr1HkGAMpQIsOe8AGY6y1PPbrm9VMvrqFWtVisUicuzGpZeABAgEt12FzDXOlI6vK9s6nVjzCBLrbPGGCQAFKUJCeIkQmMGvX4cxyG1p9ng+JGjKKzIsAcQUlqFHiJNU+99pA0RRQiFXiOpqggd2JxBvPfdbjfAHxrN5sjQhqmxDQqjNH/a3Ozymdmj2ycur4/F7/u3j77+Db966ZXXzMyc3rbjadu2XnX46BMHzzx2rHPi7C07105MNuM6SjKw2aDfBiBHWQxIGQNq1iTUB2cTVcsdiDilUMBnuUMgRYmzbBQBk2bvXNdzFJsWsPcycFy4foZJ5kofwIWQFa4iAWPQbHVemCUY6wKWbXExiAugHmYGVZgwVtPPsAcOKCGEpywyi54jcFFWjdAhGP0qYF4RAQ6+ExyEok1A2HIhf20dEcUm8lDISIeOMBgAA8qy4JDEKSInTLmvmZi8thDBkt2+hr9392Nf3jO2YWtjebBgs8HjR8U58dJodzIrcGN82z9OfPCji7/w2aXnOMmiejLR8x3ntGoeSTvNOjXYHNt74v2/tfW8XWOz852GrvVgqOmz3BvWLrijV61e1WYREQinWRZUmUo4KJT3U6Do1VGBKlIaSzEkIAyNcxAWJq1ZVkBYUJkLlRNULvnfEKQNpVR3Kn6f2J5dt6n1zf3z4BUZLeycOKXIYAOxCznitqPx3LCJOU8T51GDR4SEqJ60BpmLjFIahDzJStMp1cUCBI2GoF9RPd/qYhHRWY+IATIWREKql6r8DoFVL0/IjtX0tdrRFuYNRifK2NzlLgcWAAEUD55KopQmFZBp3hVyMVmeM4u1tmC6IwmhsASabLETkcJCuAIxhg9TDKsR2Toi4mK5m9fjuvMBdF7yhgWYBYnisPYurWSVUr4oSVXQOJcyDYcttY4qO2QxuhAJAQzHcUVhLBw5JMSn3t5qFVUNAAQxCHcX0pgKG40Ge7HWoi7cCUXEsw8KMMWWxPvAjwiYfBExWufOEpHSqt8fGKMU8iBtm0Szo3/+2490l5am1k+mWR7MyBiAXbURkOGYNw3ZhpFY8YvyL302/9N/nvj0e8TvmHzg7OUfb79gz9KeoyI5ESky1rpqqIHMQYI2KHVaa5GDeLhH0hpp91zzuf973ieee/AbL33iz27f+OnHxvtzhweD3tilN+gdZ8d9Bz4TwcVeP/P9hko0s0kaa1/7L70nb1u+8yuUjCZnPa2+6Wzc9wTvu/XQvodvfP71b/3HPz188GB7aXl5fmZ2qT9/pnf6zP4sy5bmF07O7s1hIc2y8WR8Yt3U1c96WWyibqc91KjvPGtro9VstYaX2t3jR07009O1Wm1+1h47+fDE6K6lRf/aX/xNINn96P7164af+fQ/mJ55oq9/PHdkz6UXPRuYkM3Xb/7chedece8998dGAfUR0WgYHtLdxci01NTk8vyRf1s7/vJmt5Y9+tAVlz1D1Yfcmh+IfxzmodfyBybzjFROYjrKHQJULNshmUoaWL8fbt/kzrOz8fjIxNRk1B50tCCLOCfMzHlUTyLnGjrLCok76/Kh1khk4hDCJCiiR5ExplhroXiQ4ZERZg7mHlongZZuXU7aNGv1tN8LIEZE1HE0yDOX+aSWkNGBJRIp7cF7K41GK4CkRDCKTFCWWVmH5HmtVmNmnxde1lDukgWAtKpHERFlPk97C0SRIdQ0MjEZjU6NZ7m97umbL7vgukefePDxPY8fPXn7+Fmf3rL2nCi6QtpbTx6aP31wYWR4bNPWsQ3rx8dHtzTjcQftzLMVdIgGfQyAKXipW6eV6XoGwMD+J/EsbI0mUeCyvJ7Uxlsji+3l1PZ98FFZITQFm+5ij6rLXa/3K5K8WmvvnYfAqSgGblh6BVRHuii0WcR50USiBBiomJFWxoKrJ36VRCUoAFhxESGpHOaVSCE9DYxh40sAohVzcHKT0FqhUkgKSHlmzz4MbAVDjyegwYNv5pR5ByiZzxTnKOibiU0H//jZdPO5yUgvs6jmu/kix2TySFOWd3fVjn583d//sPe0v5v/XYKsruI068/aWtSwyvdUwszxoWPH//EN69/4i9efOp0prkHMwZCbVYZSagWXsJci37hC8bwqLzxIlmW1OBIsKCtFsC4poVBINMrKUxOx3tkg7qFIKUVaBf/aagoKq76KGelqo+UAX2Vio6xLf7L7TLO10focWSLU4NiKeGqY+Fhvcr710A72fQuJ0o4zxZjVVT0yjX5voElpw875FWqrFAoYxbEtPQCqD7OirRjehHJh7ECEC5uEEOWrRBLuAzMzB5g3+/LnhzkHi+TOAoABgx5Dkg6cHI3/1xk3FAFBGw4AzMBAGPeKaK2LOXP5waoLKSCHq+D9oQfIwUeEoIKeDFrr2DrWNvxZIRqlSxJ2Acrz3hf8gmDHEm6RYPhggBg678ohsfr8mggAw40q/n51Ei7qvBWgZTjFwe5JSi41EIpnJaZwNyEIkyrLXnOhjocEGnV4R0NdgoiiCAHDFqMo0AGAxbOrJ3EKnq0kNawPNf793V/69re/tm7tZKedUqwIhSlstoUgoCoLTc1ISaSA8/a7nrxgUN94fOxZd0Y3/mz8t3ovHd/w/P7gyP354Xuzw/erheMVGrTqExhBCAPhnpmJALmQQ5nu0qu+c/bfPO34e59x4rKp/ltvm7DpfHrgQQLwazeTTrDdJeJESZZ2qTG+6fUfAJATX3tLPz2ZeMzOHDdrtrXOunB8/Wj7+MzBbCqurT/vwjXkcu18J+XMzaYZ9zrdznL7zJkznU5naWlp+syp2+6457a7HtZaj7SG2u2lQwf3X3bZZeedd96uCy6qxa3G0LptWy7YeY56evxs55ywyjN00BNL7XbfxM7mL1mcX44jHPTnuu1tl8kl06ePr53Y5N1/zUyfjmLV6Sx7P9g7Pat6+fxit9PvZb1Oa+qnmxqNy6PW0pc/Nf70G7v2zTk/TPCkLNjB4FBXvG8sSWYVKC9MSkcU5+3ukIlRs0maH/3oJ0+deOhtb//tep2MNrGZMvUodfPtvM+uppVSaZrWajVEzAepOB9FkQhHUWytjeM4CN4qpdJsICK1Wl1EeunACwcogdIUxa04qYXCOdKKmTNnjTJxkjS0RsHhZiuO45mZmW63W6sn4+PjeWbZS5ZlRketVouI0jQVwjDY8Y6DmTuV2vFFK6kKAedMPAmJSBKP5rYDyqRuLu01jIl15OwAh1tDz7z+OS6HSzo7u71dncHPhtfenY3dPLxly6B7wfLCxkcfmX30webo2NG1G1trN46vnVzX0A0g8pB3Bn3SJopEQZ+9VqRByFlRKlEaPGdKUbufbVzfmp0+et89+6+86pk5MGMuhJo0hP1UoQmwUtJTlUpLwQ0RYe9RhFYZEVa+91hKJVQLZmH2XkABlPK2IiIIAlTCulaCoIeCVhl2+WH+VmQaAKUj5pW4WVXxiAWyN5BVVNkJhdlDEZTDZwMAgBqhg9zmnMakUs/tfGjdeNcN1g2rD3xh9wEHG/u6neXaQwQeJHMWAfob1OkvbvzzfdnmN596x4B1jMqCqyFuX9M9sDDCDYfdJi+c+PSbN7/uJdfsn+1oIdBKFBnudjUY0Fxh4p7a+kdR5L334IPjmxNWJUXbg0dRBCjCyMCET1lmB7zVijJDCJsAAe/GjIREZbdU3uRQmCJiRcQN/VPIkZ59Uo+77blHT0hjtOEhVQTMkkSmUc8WF6Jk7YO+lUUzQ5wTkENgIjOwdm1jCNAw9pRSyuhB7hXC6s8JUKiu6PIjrSSJAp1X3JNyQh4qD/HCwAKlLGKVL0sodGEkEO5mhUYW5sQk4f0MrkRUjp2xNCisXmwp3/WqO0TEAId03mIp2oyroAkBIw0AUJKFinGCSKS0eCaBLM9UZKy1kdGois4ejQ4Fevgt3loqZaSqIZMiA6XpJ1S+EYhY8hfC5+GSy15M3ZVm5lKHGAEKzGNFa8ZKZT1ce8jXVAyllVKGFCJmPh/kWRRFKGBdDmgQkb1455RSRuuASw+9b+5d4TSjddB1r4o6Ad+sJybC9/z9pz74X++ZWjPqPNUinbMlpVFc4SKKAITgZTGVE1113tggnP9tw9bQoYt7By+c/u+e07/0wAvyjdc1dl47csNbUEeuPZMdum9w+N7BwXu5O4tBvF0AUVApEFEeCUqTcmIAyB3/yZ2bHpyN//2Zp84bz97wgy1HTu2vLy3ri25wE03kjrc1pVScuMama1o7rjn4gVd2Ty2Z0QtdTFBbU7PN3Nf1+NlTG6/Ix5vzM7O1SFDAx55rQzGPNoaiqUlk5gs8i3jH3lo7yLPe8lKWWhGcnj45e8lFneX2Y3se3X3v/WT0+g1b1m14eGhkNE6isbGxNeNT3gvFlogIqNvHbnepWZ9k7o8Nb59cR5nzF132rG536dKrGoBZvTasojgCtqQ1s4bBQp6yR9dPjy7N9/YcWPzhLR1wj+7vD5vL2+nWg9MH59fNdC89KXWGOvitDg0ZY5LlJG/3Jh9sNs+ZfOLIoc999r2aZqfGhnujdy6OHD516quN/rPHx68cGqFa4JbFcZzneThs3vv5+fkkSYIQTRRFeZ6LgmAlRkp3uu2wfQwtHSJqo1UppN5oNDQhM8dYE5FmqxWRGvRT732v12u1WlEUWXGziwsR6qGhoSiKwuFZXFxc7nbq9fr4yGgURWma9vv9fr9fr9cR0VobGcPMYCEXYYQoNoYUIKaZj+IkqC8oMoNsRtk4jlSWDzKrBExtuDU0+mqbvza3vfnentTubjQfHBv/4bpNjW5v08Lihif3jh3etzA8eWJivLZ+zcTadVvHhic8eJcPwBGSzn1OCnVCzLkipRV1up3Ldm34xtdu/o9/f3+e8q+8Yf6Vv/iL0zP9SLWUKpEmsipilup8FbKjysQh7lR/DFESACho0QX6YyWhAMCFiqKwBgBatdgrRar/j0xHCfGo2qTwGZzLV3dOZbhEcV4FwkkwbtIFuQhLPzsiKkBABEopdqgUWwCd52z9xJapo4cf7/d7uH7yoz8+Pta6tN2bS30cJ3kNKLV5ZE2sFj6/5e+8qNcf+6s+iEanleaO37Jh/NyNsmd+Lpvh8cbBW97/wrN2rt8/kydKUFOuRGweaSWKVG5Y+XC9VfrBVeIqImLiiJkNFDi7gqkEvBpAVMxey3p/9Y1j54GoBIcXsZWIVKnKtPK1CrwPBY+lIJNYa8eIjh+fXUrrQ5HqZrkmDUQuksQYkEG88zgARGeaRAopU4Aq1j53a5trCbWAj3XNAQb7FPn/qU7CU+kr1XWtfp2k/L+lyhwASilbNl4ABU1i9fcCAHixqyb8pqTkBoXO8BoX49/qBS5V0li42KeuAuSLCAtTScOoCutw02Sl5y5gdAEwRqDZOkWFwxggUmSclOBGCNZRhWeJc45Y4iQpNsrMjMXwqcr6iChU+O8yUaAhgSuYCLyK/ktEIp65hHwTVs+6OkfVcwnFnJTKWYgkAOw8ezE6QiBtKM/zPM/jOLbWutSGIYEJUOrq9ypARG9ddSsAwDtXN9pa/66//sT/fO4Dk6NN60AYEHpKRQqL/tsRhScgCAZgc9MqlIPL0WjMdcO5x7mBzj398Ghj4fheOL536baPU1Svb7si2f60xtnXNy55EQBkp/dmh+7Njj2Qn3yE2HlhZKKw/ncOQFAIURyTMvz5/eP75uufecGRn75i76/+YNv9p87Yfd/Is8vHz9plaay31G1G40PbntF+4JtbcX7shTvSelNF6xKBhvkO1kZm9PoFXrt/9qzDi3zJGmFfc+J9vxMJd7HQGzGkAhBdaRVj3FyzJs9cnrvJySkibDbrnXa71+t5O0hTs7B8utNpd5Zk9vT0Pnly29ZNLGR0bWgU6vWxpBZZnxHgfOeMXcA4jpeXFwnjpYXZOGpmfpbB1nAIOXORlpxNRCau151Z21wbPXNz/7zzH3xs98lb733g+L1nnry/116GYwAft3TK8BRDk2Ndb3aGzJzdOL9te3axoP3Pf/nntHPw/HMu23vgvsmzNtp4/+c+efMV5xy97JLObbd/5dwt12ooDeSFsJcOIqUBoNfpRXEcRdFgMGDxxDRgVkqxOEO6XIkJsFAAMSIgi9aKmXPnRUQbRUR5nlvxkTGurHBFRKs4IRyKasaYPM9L8yKpRXEjqYU6ILzlcRwnSRKOrjGm3W6H11cbk+e5OG+UJm3ZGaXAc2pd3mwM9zqD1LejetP5jEWcbaAsCHZNs7Zm6JKWv67vstTPd3tPZIMHN204MN95eGauuTS7vtsbf+yJAxOjT06uiXaetSk2Y8NDmwXZsEltnqW5MWYwyBTSjm0bPv6JT3/2k5/4/P/89+vf8Ks7zz83y1S91rK+S9QoOqRVqKjV/1JAaolCqZ7nOZQDN60UGhNmqmGzyIXWIISBGiIqTQWTzEoYLGutkRQ7D09NDGW+LaDJGglU+ZFWIVSrFTsGi3URYS6gwiX3A6hoNAuYUlH5IxKyQudZRy1J28lEvX14/4efcWM36x570b8OTzw7GwyGVGxa6sxy77IdQ7c/kscq/9iGf9psTr/oyH/O86hRXiG6PI+H6SDPPv7QaHsh+4Mrk7/6o5fk8djRYzOmnngk9iqhSEClnZ6ONCoUX4wNqh4r/EsoJatgWoWwwuStiPrF42BhglJHAldqI0Q0pHxod8p9YUgwRStZrhilIC6thGYpZMcQAYQhNvDIwTNdiEe1p1wJW41mkKU+jQC6vOk4AMTzBo0jj1SQjnH98A4AEGfrtab3Ie+WFsVlmgxtOK/Ko6tTcvUCVAkyTC2qvFVuIgpPreKZIoS+v/SUxOI/ALHWhtQL8JTKssqdoVihVVi/gLSiVRITRkXM7Jil9OWEwCVQKuDg9KqJryAppTxIwC43dH3gcm0MIgVpM2Z2wsHLJIyIiEhRseEWQlAUBus+t6wLjQtUFJrZQAAP38hYgK0qTiBLrsov8Ox9QQ0uzSCxMpMIY6dw2wOkoComvBJDJnxC9hzwNNZa9kWODxs8IlKRqW6U9z5KYvA8GAyC1whpXTfJO/7ivZ//4qc2rpm0uQesUa0nAwLxwrKKKlkQGv/h2unzx9OP7Zm4el2vZ8sphch9Z2r/dt9IzjkiIiixg+7e2zpP3jr33X+LR9Y2dl6TbL+qcdFNQ9e+XlyWn3hkcOT+7Mh97vQhVMx5oQEKAEYZzCWp6Sd7+vlf3vaRF5z+xksP/PXd6z70SKqfeMjMHR3ZumZiZBjO+TO3dLS1+91j649lM+3luRziRr0ejaxJ1o6uH7V7lqLJTv2l1m6jaKgnA8ylgXEHME87SZJESmdZJgJKae9EqSh3LqoPRfUwFLQ5u+bQeJS08kG/2YKJqXEg6ff7eZal6aDdbidxC6V/eH+b1HLSAFRRHFstwzZd6ixzljsTuwjHrZuzjodHmplua92MXCLMiFp8O88d5k2f6EZEuy64LOepb996S031Fp84fvTYUfWeUf+2JTxFa9Zsa9Rqvdn5sZmxXz/8J+OXXf7B//jAYw98bmykefzMwScODDYsbcyelyZrLjl702sOH/q9l71x/9c+29ZEBRkmz3MisrkzxpjEBCqYZ1dgBJiNMcIgINoUOI7wpNl5bQicd5J773UchZJTo2JQtVrcH6QFokpkqNUa2DyuJeg4qJ6GI9psNqutUmism80mlPIUSinn3Nq1awv5Hq1ExOfW5nma2U6/kyQJomYxgz43h42wdhIEjoymLrhYYd11jcf5GVxo1nRL11rJNRg/w7nu2vbJCzfMnlh4NIYx5NZ9Dz/y+H735LGjmzZsbtWeXD+5Zs2aDa16K4rH2Pmx8dFuZ/Dxj37g+PShr3/z22/+3bfuvPCKK57+tBMn50CUiZvgVoZ7q1sTDwHjRAGhpcpNkop1kLUNUhIK0bJnYF2CboqUAcwCyIyBaRmiqGcuhqgrMbFgy1S6WFwky1Jjzq+OvyzeigdBFQSNCZCJmbG0WvLOCWG1L2RmjSQAAVTsWAB83cRoFdfj8aHoQ3/84eF44ngydqR1YT2f7pk8zZQwpnnrgd09ari/n/zQsxsPvO7o3z02WGcUgtcevAdOctXLWq3u0c/+wXkvffElh066LE2HapPEWVdpHfe8tZoaUWTTPBPlDSku7d+pzEaImJgIAIKAfojLhafCU1qKCnpWPCPGcgiLCAAaC1d2RvCOxXkWAUXBSCdMJuH/P2yoRh0ipJUgASmVqANnMhfFjvMoqpPtS8+rRDtWSfNUd/y4WaqhU85oDdp7r7WIh00jZ7G3iqSRKGEk8ey5cpIACM56CIHS9lT9L6ByPRD+pA0ABBESEuAgY4mimRDRgyCVjW/ZZwOAC77OZToBFrZOAEEFEnxp7yEizgMAEwaDP0TA4ObHzMzOuSBLieXetwJDQFn8hao0aIZUp6bKYSrSoAQ9E6FGQlQgoElB+E2lQhmW2G9kCU7AAQlFREHBKhS1VjxxUaBoUFgGMSJSmgCAFIAnkYIJHja4pBCQijz3VK4iEQlKVfuqQhmekQgUklbA6D0jAjMTEJFib4koMjp8jJLkVt4QAtIqxMahoaHBYKAi02hE7/6Hj/z3/3xs46a1gwF7p5gWlB0SnTsWYC9SPEcAAcZfOmfuty+a/eNb13/8santw+lN29obm3k7V7ceb909PSQkyLmwKB0xs/dWKXQuh6VT/uFbenu+a4yJ1+7Um6+Itlw+dN2v0bN+1/cWsyMPdPfdmR66iztnQDwAi9c1HaXCZ9zQG3429ccXH/jHa6cvn5z8k5+Z2ZMn+4PpiRf/NURDe//t+dnJOaCelibJCCfO2e7DSkd114WFKTcvG09dv0Pg3BeDj6PUG8UpuVocOZuDVibSQVSOWFye6ZqxNg3TFxHvnFMJ6ETF8bCXVKsoy7IoVp0ONFtxvxFFsSIhlnTQz3vd1DoGZOTFRCsnkOXOLTnhOfamXm+2O/PaQE4jEwYmJ5IeNKMoQsq9qBrr5Sxt6cYlu7ar2ksWF6+7b/LWE9/96tD3ZfbnfMnfvWjL8zb2Hj65+cc7r4hfsf0FL/zG17//5U/9c2ws0IZLL3zazKljB2cfcZ3+iT2Pza05eOnVf73n1jtfct3rtAeJoziUWoo0W5fneRLHSZKEZqIgy4b9kNaxNtZa0lrrKMsGWmsdxSHuB0Ri5iwHryEPsUkcOx1HgRmSmCgdpCYyeZoZY5z3pCjWGkqhnNCyhKCJJbFBa62NAkWR0iiQW+ed9cLkRWsdiwxSSdM0TiJBilumVhsH0ZymqVBcM+ITZxbSPlnp1eq1GkWe89z30acmN8pgMrTJw85zRp++sHiq1pTrRkamTz+5MHdm/ujuA3NyZvM2kd3Dw8NA6HJ76vipRx99FAWvv/bK3/yN37ryqmt/9/ffMn18QWsFHjAXoMKGpdoVlc0uVTvUol1zjIgeQSlCAvRBdhlFIQO61GmttdJegp0aVjPAKn2KSDC6V6Xta0DzhlDrSxYsPEXHgAJWJZgA8yox/apQ8MKEpMp0ooCMplyKBFZhfMKFaNB9ybXPxyfMD7/y1S/f/5C+6PpH9rt1tVHfnjXW9MFqzYmWbmLePPSlXxv55p+cesv9gytRsWUloHJKG8PJicPy9LWHP/HvL6ttSu4/0WsZIKUyK47y1oB8nGTGWemzBoWRGK+9rhIeIiJj4SBbqlyFzimkYWYGZqikjBGRipE65z5gyKnUTw7OF8wggXVNqCONAlYKAxsAoPK//z8JGBFRkTAX0qGMdQ2nZvpJMsSSLy9lI3U0SZyR2KweNU/ayUGyMGxM1Ldc005pjJRWYCZb6yV3RlNkwIl4lwFqkXICsaI6CqEx8/KU5X+4vuIxFZ5FPqC/iUiVSPvQ2kK5ughmbtWM3Xvvy/aaiHQUMbMSIB2F8QwqkkIZHQXAMYsP0jpQjWqKmkbEl9hMa20ou6uGvvhdRc+9imEMAACZzRWgFhQRrUypmGECkKpqfEWkSO0EQbSkuMCy20YABFEQ7ELCwsV7Zl9WXmG3HdDvzjmttRWXZY6IYm2UUkBQbNDLFr9CqAAAaeWtkzDhJ0IiLnWsAoIpbPrSNA1TPRIojMaD69FgoJSKa4lzFhG0Ueyln6WoqNGI3v/+D/3HRz66ccO6dOAEsTZswI73Bh00sadgVSihBFICV63pve+ZJz/z2NgnHp8AgANL8QcfXgMAUGj1aBFnTIyI3gkCGq0FrNYE5TQ0iiJcOpnNHe3d+yWOtFm3K9l2VbL1ismX/yUi2dlD/QN3Dg7c0zlx9/xgWfu61toO7Lvu3PTA6doHnn3k7K1Lr2kNzfJLaPwXZr/815vi9sbr7cGjjUbSIaKjs9maJJocah89uRTxuqxp+ej8Nz/z1Wede3FjfEMHJa2pOigSPTI86qxtt9taa0IipYyO2AF7h060JgA0WucDp1SciZBSmcs1NupNBUp7R/XWWNrv5ANeu3a9d9jpdBh8p50nNZ3aNrhR0e2aBp9nWtW8dCM11ogVgcrywan5Xm47UaOWxJEsL9LIME20ullGqn3+jrW93gZBOPzk0cce+daVO1/yJ5ved+q9u+3iKZiobb76+vsffOyz7/k9zafXrbliIZs5sP/2yy9+Vm9m/hg/efjAQ4vXvPD8S357+67X5emsJgG2TmvNIuIch5GOSB4mwx6ceK11ZKLcOSGHiqKgg8peGa05UCvFEYXtWnDdCroHLgR/osREZT1rnLAxJlZaFU7wxTQmHLncu6LfQiBF2hQ2XowQGkUymjyD8xhm1Ybq9Waguud57nPv1MAkcVTXEQ6JZzHCaQ0xjZWmnJkyEEEh1IoUWefQeYCs6/OhRtNbs2Z4/aapa1CljpcXlk7aPh47enR5qT3oDmrNxhVXXvL8F97YTwcutze++MXbtm85fOgMKm0g8koAETxKAEARKKWjIGVnnU1T732cJEprx4EZAoAQSwEAkVLLHn1hAOALpTAIN7GwEIDC0JcEjDGpzcF7EYmN5uA/oxUjwQqasdQ1Ay8iBFCyolQAZ1HhHIFh9qUBmQVBuFQ5AETrHAKQKXqUQLkVQNDKWh9FCgR6/YV777p3dO15be5sX9fMegvLYmvQbZrWoG0HRl46+rO/n/rEvy+8+uOLL4gVRTnUGi5jEajP75n5tWub7/3XV7XPuMVTcS1eEjFkDYPVaAaxBxbwRIhaGBCJtagCi4uEoQkLazwsB7AFpgExjPgQAHy4zUFbH0EAkbQuNOKq2SwIEJFnFkJA9BD8kdAAGkErhe1VacBcmFigL3cEAJqUeM8kUQRIQDpl1pgObrp4ePfeXk/Q8kCJilonsrFu6+BaAIlRKbIa4k7eW9NYuyHa1Ol3GsOJqWPecbo2LJxba31utdYaddhuklYqKtJhlX9JgFG8MCoipcR7rRQqY13uvVOoEJFEoSbvQ/ZZcZYkDPtmAGAkUEjCUNgIlTNw9E4jMhIEzYaIBFA8kxSVpgCIgCJdYbUAUSGZIP3BAiKeczKamYULL20QcMIBpS+EAdvFIuBZKPhrA6BoUiBBDDLIdvuAOirYEwiJMhGpUCph6JVJgAU0oTK+4okJCq4y1oRSb8QLgyMiAUBPSivvfc4s3hORUdqyzfM8KbQ2g4EpCqHkDgmDvVqovcPjUAoD/CN4dpFAeCFDxYDs89wCSxxFzrm828tdpmPN0TAAKWPHRpsf/9CX3//eT05NDmfMDlkhkSimggehmDSgMDJ7IhpP+p9+/qHdM7W337bGeycQtgYELIAE7EX6RASMHiSUkezFCymKBDx7n/s8vEekDWmD1vGRh/qHH2izt7pe335VfcfVjXOfO3zNr6zxNj/+WP/gnd2DP+ueOiGu9+UpeeDp8dceye7IF/9i+EU/HPvf47/8X5P/Mb5uFM/e0nO5dOfdjVeYyQkB7Y/Nmjvuy0/OC7Qm77/vyFdu/tn/e8vvJcayi0kGGAcHM2406sycO0uRgWBuDwSaWCSKIsveKMUgMaCIQiBFzg5SBYLGa62T+pSIsHfeezSYp5nNcw08NjIeRzXH9UB/BRaAutYEOmrVG5Ex3vvOcrs36Nt+putmNu3q02lcr9WiOI6wEfPFZ235yVRzbOLsN731b3lpkM9Oe9fbfMFVZ+b8+9/z1nZv+pd/48/PPue6L3z2PxNaenjP3RsuW39CHehb+8QTj7cXZnLLRFprZSBUPuKTWpyQISJr7VNEDr0PfW0YO4NSSqlS4ztU1ViJ5VaCD4gYKVXI+peLRq01e1c1LpWBlyoRiRpIIXGpAq+RqLISE2DPoV6NdIQFF5OCoETVloV/SW2ukQDF5pYRVGQipRHABmyIBPGKlTUYaTOwfUR0vNzrgjBFUWN85DKayDZtv8AoAhRFmrSxHjyDAdfpdY8cnY2SGpDK8jwo2foSrxFOtYOi/VJKhZar9LFZgaVUtwsLoxcpDu6q0kREwAsSeSnk65RQqF2K4MtSKFUxo1ZExT636NgqGb/Q0AhoIAeMKNX4uuh1kCJjwkjNg4RC3jmrACOjc2cVktJaBJDIeR8pzdbbPGu2pp54dG+tNuztcNzyc0dvrm9+ubU06C6vH5/Yah/60Lr3frd77QeO/VqCuTI1wU6UN88IyLH9H/qdC171xitPHQfPDVUDZZMYxCJmxsWuaLDKXqrEkHsAZqU0KWXLLqRIS2GtyxKc9ZRSkdKOGKr5fBgSAq3cvaJYKW4OABitWcQxA4SbRh5RKTLlFDr0QEIF6RNLzDQzh7koCIKgG8CaZmJtr6Wb//TGXa9+x22nuNZI6sh9k7Sz0d7YXMuyVbrOIizc6XW3jp3TGBpqz6e1uvZeTKSZRBiMKZgt3vugwaTKjm31GAMDPbfCJPuC/7Oachr4Bavfw9AsAwIBBoYuBi5+IYBVyKUV93nVyoMdV71sMWwAAAAXJjeljAZR0BEp5Kt8Rbzm4ruEV4HLWSoR6QqiHORUBYlQhYFQYBEpIg/irA2XmXsLlUsVAHsu6nwXXu+y81YEKI5ZSiPRUrqm6JuLgTYiGWJmheSd94DgWQEKgo4MM2fOhjW2Knd5wCvqYGEOoVTA3kvwVhDCMEF1zgWxjlCCYJ4zM2bIGSB3lcLxyeGP/9fn/vnd75scG3aaXJpFQt5xl/th426YcwDPElZLMfn/ft5Bx/jG725xYhAFi3kGB0XbUHkxCEsFnRMkUYLMXSRBFOdsv99zdhDX6pFJAFSo8AQY0nb/iR/0H/s+OE+TG5pnXx9vvXboutePPvfNftDun7lz+Rnf2Tf6k6e9Yv/Xbr7sg3Ov+au19fdd4Tb98+C8H9duvbt938ONNdvxjI94Xy5Km7oZG++wbk4N2YU5+d/P//A5z3/hOeevy3p9jCDxIV6iiAChcY4RQFAQFUF4gZk5zLqiQFIVYGZCNMYLQpYNcmvZB0g7JUnSSGo2yxuNFgEKchRFykQi4qwf9HuBkeuFc5s554zWtUa9OdQKenCjjdYgzxaWFpeWF6MoikycNBsvf82r3vTrfza5duNiLx1+/i5JsbXtwn/8yzeeOXjH7/7RB1/+il+59Yd3nD45/Yzrznv00e7MiblkS9yHwf69T7SXl0fHp/pZqsOwFwASpSJjHHvvfRzHzq6g81eHJ3beAzFzYL2Gb8/FaaBwWlajDZVSztnq3Ia/D/HCu6DEuzLMCd9bVM0iIEBSQJmqKVPR3IR4F94eJETMvSOiMDavUE7siwLc5vnS0lIzabSazaJLIAoi1QNrLTMRGd1AFTGyiFdRwdBtD+YZfKRjmzOi0lorkw3yQRxHipFImzhGxCzLApzburyiEBUlBQRkowrsAqVURdwM96QIRauQq+EOBKXMKjiGQgQARClEdM55lKCVU8ZQUKgk7Go9gwTX7uLGS3nVWGr3UDDiCLlqFQJWYWE5wSXyGQgVAgpUVu25dcwMSrTWOfuaMbWoPt+efdf7/+p9//aJR27/ydTai9cmg0d/9g9nv+Cfjrbbo/zExzf9+WODbW879PYsyj1xH9JmPLrv1Oxlo51Pf/qF28/esP/gfD0aV6YNYIQk4wHpGH3h/RSeNRGGTRcAgPPV7SpKFioZVIjAhZctlcCWUE8yoi6caSVsKLVSIILMIW95BCciIrHSEu4OIgh4YARkLCQ/gzCFh8pnTjQpoRUzxyqUDwA2jw1hvjSfD1/1J3dq58dj7g56xrR57TQoacwOEXsgzj16Q1mWnTN1fpZzLj5JAJUhdha8wRUxSKVVjDpMLCr+wuoETIhBspuZyRRCLp4LcWxFWiv0uQ0JL3xgKSexUr2fQfoRixQY2D7VL6repWINXwl3YJGqi2K9RJtX9yQcVaBCriYYxYSPbdlXlWh1A8UVp1ihLnNzUbOCIjI67GXD9pcEfGBgVwxAhQgUmN7Oeyr7+BCFJASTyvgBA78dypMBzEwCJBJiEngO/H0QsBxm/QrCIC2s5Fcph4eXczWAkYG1iaqX1hgjWnxuB/00BAdh0KqmI2V9e2Rs4r8/860/f8c7166bBEIaOE3KIevIiPXs/Jo1U6DVmYU5zp0wo8h7rzt4/tjghf+7c2GQAArialkYFvGCjFi0QERUEuBFRJwfICOIZy8g5CinTKOAhxjYUUHBcsKeBBDEzR1dXpzP7voy6Thec0Fr55W1Z12+4dj7Nh7TNjn99sb4q7bteNcjx68dwF/d6M5qbt+y4aIt9cHdB3u3PjqnANDWKKqpmm9qsv1sx1nxJJz8+Q9+vuuC37fmNLlxhi5CYFICCYbtg1LKCrOIUio0PBSsukSK2UlgOyAZoxCR8jzYyCKLTTNWShDq9brWxvpM60hrDYISSZIkzubOOcc+/HAPMuj2hCFSCkQGLo/rtbFID3pdb533vt9ur59c42ESatGIi2xty8iWNR/74Lv37rnjb9/1/YuuvvrQyYMn5w50srmHnrxn+1kXPXjkISBcOzl1aO/BPQ/tfs4LXghZwH0UJTT2+31ENHFkjAHBJElCJhAqaPVElOd5LYoBMZfCWoeI0BfmdFhZepUbtUJVpxz9KaXCGSClq4dfRQGtdaiYmV0oFaXkgxfZgIuJFiCGwkQICo15QSdiPTOL4aIGV168Y2e9996zExQnhe5ghfnKrbXsbdZTWhNJWLW6zCllEEjrGBFNxOH8GKXQxACojHHOGU0ut0EDNsuyAsdR1s7MjCLhTAJ4RPDIgCsBGgAKTm0pGVF9+0qyFEEWJy78JSCFyFtsyim0vi403SHqBWq/BOekVb+rKqeq7ucpn0RABPKi+0EpiR9KKU3KkBnkg6CgqaCwFu512sMTI3awSJH++Od/+8Pv+f5k4wKA7NDhe1/38l+uGTi49z8m17/ow2vflULyhoN/v2gxSgB9bThKjh07/gfPrL3z/71yVjUPHu6aZAy5j1ahkpTSqNbM+4O6Ih/sPWAFs1OUcaVySyXdAKVoYojcJBAQuR7Ei1AQOfIu87Y4vQTgBQiRQSMFaopGDFtwF0o9KHKqF2EULHerq01wRQQErHcKiYNkRIGRFhFJM7nowtGpr3WGGr4/wMZIrTPPBFNxcsBNLQCAXm6xUiAOBCLQSuDcqV39PDOGYsNZ6hQyKi3ClWGRFw7OBgBQEflWnmZIY7bo5MAoCI73FAFAUGcLHDkSqRRMuVzqFxvhsAkvGmMK1W2VFFfGBoToC4J4OHHh6ZgSp7kaLl4RXovXzzNbz2WCp+DTsmo+VL3/q55q8VAQMXNWgy54YkWHHfBnTzlfZa1LCMgswlIEDpYAtgMs1FiL90oRqIJr45wjAee9MQZWEbQQ0Ql7a5FIKU0YEHsoIuLZl7CP6kp5FbVJyv2IIHjrAEDHEVuXpmlx/KMkk/bY5NjPfv7w+/79E5t2nsOS9TtZokkDsvMiLApFqUarqbWaX1xwiID4u7tOvebs+d/84bY9C8NCApKH6FLcuuCcCsoLA6kwt8eSig0ARtcFPACEwohIE0beYc6pUUoIQQqbnUIrQIxzGVKO7PzJ+5dPPHL89/4UhpNmcl1j7qqFsz9/yJ944Gz4xC3wpS/yt5pHbzh4+NKzct6Gj85H//7w+L55iZXzNmI9OLOYnLnf7Tyr7+GnC6duGhpdm9u2MqUvKgIgIIvNc4yiequZpqlLbZZlYaKjVLn2WKV3JAxaGVMzUsa9EBW5dOtiYBHxrjCBTZLEc5xlmXh2HMxCVH2oFeA8eZpl3ncHfaVUXK+RF/GcZVm9Tl/59mda0c6Xv+zG7Zt3/PCW7zz6wL3vfu+3d5y9s790RjJjrd20bor7WWemPTFWO5mfGRkdOQ3L3UE/3Hldq9X6vV64kjiOgx9wP0ubSSMg4AO2JYywwhUaJOd9gBoZVIjIhMGxmleBErHEJoTqskLnhvwR+E5F4QxCXOoQEXj2DKKqgwoQ0KekiKCQPQoHyBPoYM9HRb0ZOvIwtYjj2GW5994Ys37dBm0UW+ecq9frQZ82z/OwSIgUse3m+SDtucjUoihCNOGwBYk4rRWzY2ZPYJT2IOysUQrYE4ECsNbV4wQAHPFqP9Eq7RXm7SwIpNRKdADPVZcG1U1b1U8opcICKYwBrPXBXjvUsNVsXwJKUxgrP3bg3Isvn0gVCEKSRs9QETQBsBz5eSoyGQHqYMTnGQGtz0won0h57wnRumzb5vED+4781V+8+ZffdPHpg8voJ5oNPztb37Bu+Ec/+NrYxIaL121/S/MNG1T+tqX/PJPXBsPj9YW0OaSOHTj0l6/b8M7ffd6eM4uS9Ulb8mfAtMQ3nAxqmLiBRrReIiIHgYVVsVPCokFROJmEFNJtRSAJ99Mz++BXSIVdLhaELGLmPM9JFaUPhsZFwuuHyExIqBWyBPWr8N+E9vr/oGCrG4uIjMX61IOoQulBLWe8Y9tZjeaTfchj1jNzksdSy8g3zuTjA0p13NbEyoNz5AcMI/HI+uHN1maNOjbqUZ55jBVbRuaApAvXwswhJVTJb+UjKdKlOEkoWcWXeplBOwxXZu+qVH8kKagsCtAJO7dirszM4LnC9OGqJliRglX4L+UL1FOWZVwvCuPqU60eeiNi6QlTJIBwXzWSrCIHry5DV+bbYUdgDDNzqU3mmQFQaY1YmF5RlYhDBiJUSEqhcCHxHd55X/kNSannyhCYeuAZSlmPEAeKH8XBKAKBAdgDcqw1V/x+Du5jhbR1RYqD4Bm86oCHlycMlqJaopTq9/tOD6Ymx+6+6+Hvfvee//dHf/q0ay7/n899+tOf+mRjpBGYYNY7JMXsDx87Ap4RiUCetXn5b64+/v4H13794IQgWGEdOv9ST75cQwELhm0YFBcAROy9R4oK3EhQAPXI5IlIl8HMs0UAY2IpRpJKQUaswAOSdc6BEUk6y5tuaY/dIhNSz9TNY7L3tfL1W+R3T7bnNHjBnOj8Cf8fz5h+24+mnugQ1AxZaNZ91q/vfcLUGqde8uKDzdGWjYE8YoVsR/S+kDKVFJk5SQpZmICyDJJBHNbqWiGArAIDhvehVquFs+994VVaScpIKS+DiEprhdoJO+/D71aEumGGSfXTASCGck0pVachz/y866/++3f+y+nDJy677uqPfuCfX/mi1+7YedbM7InUSW14OAPVs37Tutbx4wc2X77hhEhq7bqNG3bsPKvb7Xp22js7PDwU8m6aZvVGAgCt5pChYrEUKV0eV0ZEVOSdZ+/JKKWUeCFEY2L0zpWzXygzsVIKKo374AFRTqFDaxWiVRjlQzlWqs5zVfMWJkLylNgHhAH3UTEdq5G49z6Ah8sWHAOOjsG1Wq3C+kZEKvUGZlB1HXMjwn4/9S6vN2LnM0YLEhMqAAJPOo4EJWcr4I2KxDsAqMU1X2ra5XkeGuiVBWF5/BAx4B3+P7LeO86Sq7gXr6pzTve9d+LmpM3SKoIikkCZJGMw2M82YDBgP5IBkw02yfAA++FswPgBFhgwJgojQCQhhCQQQQnlrF1t1OYwMzd0n1NVvz/qdM+I33wwHnZ37tzbfbriNyDNT+cAQMAQs3kW1n7wVgUMcksRmgcpX2QbP4pyCCGEIAoMjAqtm5uQ2u82O3R7D+0WkJrFqsVfJbSuyBEoqSTmOtpGARptEOdJEiM5AEqpXr1i0Y0/ue6f//GK1esnf/WLe675n61TyzoHjs36EvbvP1QQ7H7s4Q+dt+upk7MvuHLFg/rlKe15t7rjoux6+LlbVn7wNV+7d9tswE7qShnLGDURuJAoETCiRtcYN+VDtdBLoN21q6DYTVReYG4BLdoZG6dbEeXsL2mh0IrfZi2OSgTtNlQEAhm2RwRMOMkonuZyuLDEBABSwAXHXiyFE3nv68jdMTr5OLptX+z26Lxl5S/2zBwjWl/uP7h4UBwcrzQSOE8UIPZn+xvHN3SLRYfmji5b1kFEIBQ1t3RVESKyilOphX9rWgiBtkOFmdns0ABI0GZTRIzMSRsb4OZLVU3EgdSafspY62ajnJmEzfm0dMiNWHQeojlyzmFr75jvXb6DRCSgXLdiVWBjfBFRBOQmN2sm6aYFGGxasKJiZlXxztMCuSulbDGpIGgEZnpC5lZW5xyrsCQk9M4jorKQ84jKObMiIyhoO7EDAHLOIltiLkJge66JmNl2ZFVdc0xYBnIUFoA2tGHlYkNSmL9BIkWDniMiYBnF2j5mrxv27zr8qY9+8ZKnP3flyhWDWXHcDZg8UCSqhZNKFz0AxsRM6up4wpL6U5c+9OOd0x++eY2IiLPHuOVDLoDrm2ZDswhv5OQcoWMxDUsQTQC5fkkJi6JAJON2OfQuDyBBlECdE0LEmARIwiPd+OShS8lNq4geLhiGsLcDL34ufOfrcNwkDKOKcj/R433/pnNm/vf3hqCLJro94N7EWGfC9++6Y/b6n9/90ped7QZdcjVYY9CMDENRFEXBmjEHIQRTkbILHlPNKauqtQFWEFli6QrVeTsZRNUUfVnkGq6lNhB552JVQwM/8t5LYlV1iEml2+uhd7EapZQ8OfPofNpTL33RS7Z96C/f8NMfLToy7P/nl7Zv37/7+X/wB8OZfjUzmoQIsT8xtcUf5LnBEAGHo7hk8RpOevjosUDO7969e8WqlUuXLo3Ck73ecDjMiIBGp9Dmda1Ee82pCCGEwA4t8SAAOt+mQPPPMY14W3m2YTS3wkREFDqBiJIKtI9Qc0DNp69BmSICqmj7KHkkQ7oaakMTo6rBjlp0SXsDbMgMAAZaKUJZ17WAGBIbOK8SvfesQXWkkACHCigSQIOnMVX1nhA1QWQRYWVW7wttxkoGorMEGTql+dIIgtqcpqkhhJXIqaa2UXOQV3eijPCbCaaNO2LNHyKKxhgpI3ex2ZyhCkjNgqAipk9pL0FA5MjCGer8YBwAiLwDVZOMno/bhAogyQNG0ZhSG0NFJAEDhpSSd0TBx7pmqd/6lj8bjJau3nROLQcXL5rYfmS4dHE8dkC7XQWceM0Z+589vfON16+7fbS66/rV0Z1hbLsSxIF+7MrPPHR4joL4kgpBcVz4rlBIlSAzFUJFkWpwOiNuDJrAnJf+maqb84FtBIHQKhhqdh/YAKHtkgbnk2ZKkl09VXWAteR9mEGQENBm+zUnMtskItMU1cbdL0fYxGq/ountVLMGBylo47dDDsamFj37hMU37+z3lk/vjLp4bKycHbmxw9WSfnlgwjMBR/I+eT+XZtYvPh6140iXTJQpCQjUKmPODxIHRx6zOTcvkBx3iv//HAxZLAIdOURpOb5tMpBW3WnesTenP6sRs9Bp84uqZtmcf2lTWdo3NSejP7U20vbPJA97c4MJ2jySLNF6d0dAiEDez9NyvPcIauGFGpxmWyioKqBKFO+zq5hNNrI+uQKiOp3fIsOCQUUUVpAiFIQkIo4oKUOjrYELugVIrAAMmlREpUW6tU0tilLma2CVIkJ+k42GCVHwwXlzKsNmH7cw1nnvjUbIqGVZJo6ION4d+9u//sT42OJLL7vo0e07jhw59vBDDyLzkKMn0piIXC08SnWBzked6Mjnn/Xg/mF47XWbiHw24XQEycA92N5bVVG0TVMjcNmWBAgqrAoIQojks5ZnSiJJXYmE3lKC8ZTMNVjqhMDkAjj0OAVfD3jWY0ti72g56rNAB3AESyMum9WxAcwIzMLkohIWFZXrTC2fKv/0okuuvuPBxw8eSe4ocJruLhYY3Hvvbk5awNAIbqoKLEiE3vW6BTknzMCiMg8zQkRRxtARERRtB4e2REOzvwzBErD9raoa89a3OCfUDLckBwAdH6wVjOYKKAIOkyTHGspOGVQSpzoCwP7+3uc8+/If/+S5d9/yy6WLlj8+u++73/no7OH9L3/tm1IaLVuxfnLqlDB+3FMuOvugPLgVH66quP6kjWXZTSkN49AvW7bk4MGDR2dn1q1bB4Qm/aiKCNzWvG1B14KhRIQBnPO+4yXpYDgsCNGkbSijc+2wWpUxHxGaFxQR05PLaIgmOmhiJYLcNYJBNfJ8lK1VRbAFkkE9wVpLTZKIyLRmmBkQkICUrCslIgc4HA6f0LgQaUADragMnOpoVEPtyQWXyDmXYoUEaSQZTIhCAKAMUtWJnHOCKpLKskzMsY6+CHlETo2tqbD51WcD4MtIVOMAAQAASURBVGxtBFaWtk+jCIObr5ERnffYUB6zYo7VLsF5MON0RES0MpCZ0TsbwzXDpeydZ98750ofUkqjWBH5TqcDnFp4ljaeowBAos47DMFikjUopldRc2JhF3A4HC5ftnhYH/2z1736He96167HTvntUy5a94qJ933kq/t3ug3rjsPaXbB2z18/5dhntq774kPFBWdM33DXrqnx9UeGh3jmwH997m+WnnDC9l2zkxPjdV3X3hfk05AZRqHDPKqZO6oEyhyz7IeqZg+jZqTmwMCdzbLQtPmJGNSB2QS13nMAAILivDPaqF1288fNileg2mgFE2AgFyhDhIyOZXdTICuWAEAWJxNBVXN9tzcWyCXrHgz0pLqnX5xz7prhlfc/0l0WIPnUg7C350fVkv7YfSvIFZGHIF7FK/D65ZsTo/e+Exx2OoPBnCooZ5/2KlUAEEIofUgqSZjAmr0FKGjMZ08zR7yZiDQj6DaXaOPmxAsMQtQg4nZJm/JLJT/77cdspnm55G0fKLApfXPl7R9oBiJnZpw2GYCIkMgKRm0MpuxtUEOaoMZSTNHZEM4SBgAwaysso6rg0fsCjcGtOK93bbOlOoIjZ5pdzo2q2jwrY4ptUdJeAREIxkQirGIdikCAiCSthRQLBY9m/FwGh4U2uySzsRERqyECOYmpAYJAO9myD+Wa3O8JQtGdmiq/862fffY//vNN73jHMM7e88ADoP5Xt944OTklLJFZXd4fdcnbR/7k0x9d0a2f+a1T+lyURWDmqq5VFMGbLTmYRvQCl4t2NtTWYdjY3wE4ELOuDQBCprVjYiSOVQSEFQzeNSqLEhiq5KGomaXzq/SUncseXrP3yDbREcEmwR5MPa7v/i4ULlS9VROdJSB9GuzoeijHJ1/8h79/2QuW3XjPL/fsHazduOrBhx/9wXf/55G7b549OOgunYSq9tREUyJLuVWKJRCreh/s0FrzQ0ijuipCsCtgbYUSJuGUouEHHaD3fjQcOu9D4a0ly7bRLpu4FEWBDkyTyp4p9R4RY4xOgRFZjM8JnV7Xs1Yp0jAtWrLo93//Zb/4wTVaziwquuyqn9/0+RVrtvzBS1543KbuxJKJ+x/6yaUXPw+LJUXZ9c53y97k5FRRdkexpjrx1HRn7uihh+67M/aHlKVcnThgAghOPYGj0C3MZLAAr8jYEeeRyNd1Kruy5YTFmzcu6vYK6TjxxEIeyTQqFQHIZNuccyGQM4tQaxmJCMgBuXlFCFQFQYfoUIiiakzc2gYQkQJEFXBEPrhQRJVBNYrCLngBzR6lznnyCJRQsRtGkMg79K7X65nklkfSxGgMfe8SaARgAvIUpQKX0OMojih4QPKhEFUhZIRRimJoDxBQhhSdQhxVpBAoQCUZApBYEkvikrxpsqdYp1gHRE9IwA5FITkP5IMj770P5HxGUKIqS0z2P1NKo9GIJTkETyoOuKnHbZcJBFg4Mst0QjMtUEcJtErRJKtUs5QgAKiySLIKwCk4BSeiyjaAIk+iqo5cWQiCgiAJaO2wDJhKV/RHx5avGH/0wTvf+udvn+mP/vRPL/75jV8mDdo77opP/Ycv3MxgbkPv8Mcv3P+1B8q3Xj0IaXjjz+8cc/29u2591e9d/OCDt138zBfv2dPvdsaGNftQOEkqEZGdINZFGaaKUAZMDhhNZhPQe++dCbGJQyDQpPOAHVVVFhDlmIhVEhNrezGhWXNyHTWxR7LXRERyGcPlzEdPwTLHMNWcJLFYVvCAPoPnmgRACCGX2IbdBRYS1cQpJRNBLCh4peQkzsLKxb0lS7kokkB/JHMFxrnxo9JNnQPTFY/qoHPFqIjYi27L4qfMDPdOLaIw6Yd98UgkOnRlDu6EjDKMI8MSo4KKMCgTtP+JnDBJUHTOqfPCKqwgHBoOVW6IRFvd/6JTtrsJu1YpJY6JOQEBmQxL4SE4F7wLHh2p6V11CiqDC97kxljFBhKREwrmoV/hkFBAJYkkqJUTKqOwJAWTwzOuIVhRPop1zUkQMqqZAIJzRfA+r2Nd8D4U2PXQcZWkYV3ZQzcaVilywEYAxDsl1EZN03tT9PHBBRAgIl8WyXyTmoPEzJFTEk7IdYxm7JZ5laBR2JApIsI2pmYpnHcCyhLIAUsgF0LodDrdbpdESTLYfmGnYaUMMAQvqi6KYoA6aadb3n77I1d8+qpXveHPFi1e+uMf37l3167vf/1rEPtIRQJNInYxUEESg+h7ztrxjOOOvPaGk7Yd7QlCOTFGnSIRIrmaWFAQBBWSdTt5xsaIyApJlBHVOSaIIEikNkdyBQsM62okNTuVogO+AGejEnEegeuqfywNHUHlvR8vOqVAXemZp3fe0e9dfs0l8sPF7lHwtxbShZfd1D2F1vsQujpL9VFfHQSiVIwfnht+/OprP/7Nz23fvn3d+pXPvvwFH/rw3/7t339icvHJex4/MO4KCY7RgxZVXUdgRdCae6xJNKr265ESkh08RElSoIcoqU5RNYJY6xycL4qyLEvX1Mtj4+NFUQjkVtNGoY68Jw8CUiVOKXivqkoYQQTUBR9CQKJerzfe7XkkEZmbm6vrUbcIRYerkV7ytIsvfvZzjw7mBFMtneDLH37303ffftfKdWOXPePZoZo48vhDMQ5D2Vm98Yybfn7DN772hcnJ8bGi4/fs2RPT8MmnnXpwX9r62H1Llq5YvGQNupmAPU6JkYxvJ1HIoapEHJTU474ChkpHq9dMHd6/8y/f/O5Qjr3xre9SJkEsyxJTEnSOvC32rX6ElhbTACaJyJFTJLPPY2Z0viFrZgUoInJEwAwNsRUt6qIo5LpVVSUxEirnLsE3ZGIEIDGDGgDEmhN6V7gCWEzZIO/wPcWYcosvYG1lv9/vdrsppZQSMqO3pSiqZlPelFJG2aE16EgZvzEPpGoqZtXEgETosnGKqqgwZlQntChfwrZlsQbUqrEMOjLCdROecnEr2T9OVRPHLBpqcwijcDY4wMJ5w8FpM6bLtXBmjjJmSWRlVmNeAYBKTS4KjMd6bsPqZbfcfMN//dcnXvW21X/wrP/z0pf/zv964UXve8+7Nhy/6jOf+fy5Zz7r4Narv/i8wa/3hT//8ZinbpUG05Nrq3p0wknLP/LPH9x71B+eGyEQcoJ6VEcsu71RXRVFMDqBtDsLR2jSPLaHaAWZCQx7Ai1+u+nn5kc1C6D4RISU9Z8XDmMAgNA1o9k8EpQG1BZjREfOuVYZyjlH3hFr45ABlNURQREksikfMSgqCmiBDgGUqVdCdQjjDGOHnZCTqaJ7x3DpLAC4Qx1BmKgoFu5Amlk7tXZ6YnF/tr942qcaHQo4KsCDIhB5U29AH6vRcDjsdrvOOY0JEP2CDji1rbl3CtispcHYvUkUEZOwQ9IGdmRCnG6BhrYlDPPbIsROWaoAsMZUtzN851xA4tzciq1yicjAUNruL2zkoJksZ6N7kxoDYVBEAW60vVwjeqoNKzg35c0UmojE1saEXMfCBypKrqPdvrquUT0AaAMgsC9mXgguoYY6paqGCGlNFGzw3miRADUcfWPxB3I1p/bg5ZGVI49omjkLF4qmOmvvXGKqWNBn5oVpK8ZUICkKcnITU8XcXPXRf7ni3AtOedOf/++5uboWlPT0v969/7Gdv66lb9cjZrwLuOBfsPHAm8/c84GbN1zz2CSiIsvhw4dTSoQuTyAUBFQyxyyjnrNBaR4kOABxdnwUnSgY6EDRURa4Za8xjYQrUhaNcZQExDkkcrPVqOtTWRz1bnKslGUrjg7S+k1HX3Led7bcdOOnYuLxRxd94/KxP/7m0jQaujTb1X2qOlv77Xsfv3s/3Hzz9/Ye2FNLuemR0/ce3n/66RecfvoZo9GR226//bTTj0euCdCFUFDhXIiRWViKAKoWCe1gMwuEEEJIoB4aZQUFTimmhEXu9Iyoog3kzRchjiq7U5noEQJYyE2pKEsAiDGGEIBcPapy5GyKV1s8M/NwOCyLLlA1PT3+tre/86H77j6wf7sPFeJYNdh59be/tOmE/xto8TDyzNFqakUfIa5ZvW6089jnPvf5p5x78dMuvNBv3Lj+0IERJ/Tjj/Zo5dGDo6p6ZPWqDeokpTQ2NpZEmTmUhVlhCHYGw0pjPez3T3nS8bfc/Mu3v/H173jzmz7/pS9+5tOfePtfvnv3vlnW2uy0kkTfqPTaeRXINkH2JSIodX5WvTM7B9Xsw+obMjsiOhda1pq9lGn9hVAIQoyxrut242uXm0RVhJRUUCRZ6LX0k1JygLZ1Z+ZKZDAYWOtcliUnSSquCBLjzLFj1JCbSZWIKsnMZrXkBJIFhAB8mAdttqEkb7C8S8yo4iGLLWujl8MIrlHJR8SM37JZX2PfCwAWFUzZAFhSM6BGRRFuIEUsScc6XUHhJA6J5vdxaLnc+jZmRmzKF2xyEiIoqG3lQFJKdZ1h+h79SIYrly8+uPuRN//Za573h2f8zbtu2LLuKVf8v5988B/+4MjuQ7+9YXLfla99ySZ5xumzx0b+JVdPkutCJZ1u0enh44/t/Mjf/9tIujP94djYGKAMh3Od4DkmTVz4rkAEwlYSElvBjVY6W9S0r50pJcE8zrx9MFoMvFJW5bSmCgWdJ0AUVlEbFbbTfmzngW2ARsRArl2rGnWNxFZfaPcl5wPMlWIb08GRgBp63gECCwrsPdyfrbTrOhXHpMdCOASLBsDojnVVYeTAK0jV37j0Au9KD7OLJksekXNJEEFJuWYFh2QYCGGVlKAwtLavhds6rz3/aPA6ZSIPIK38pENlm7hL/pFskWtgHSJYkF8BQEUNI1g4j4g2dRBQEDUdCQiuZZPCAjy2kjp0AOBcsEqYFERYmnF3UXhrVVXR1hwtIiHfesOfa5OS0eJuLs8RQBL7wnkko/J772PMWFmSTOprntz8shZqWny1LoDNW+o1PB0pSKAsqM4sLAkEWJTYl4WNju3YJM0QFnRmVpYPFjaNQdKsrG5vwwFmbCYkSQAY0ehhKFf9z/fvvOOBcnz61l/tuv3W25avW1QWy2/42fWTU2Oqpfd2WSIAhKI4fWn/Xy965MpHln7i7jXz8AhRB1kjwQEAAdsDQq08uM2irfJ22pQjRCTgwQGiQ1GVCKLm/ovgiRyIilSgRSg6OQ7DiIoQqBfjkX41N+4XHdw/3Ds71Rk/YfHyw6L4lPMuv3Dud//lSa++Z/GSUw+u8mFxHBw7ePTY0f5sBPrm/ictWtqrxM0NZut45OZfXf2Ln39nOBx1iq7zxSWXXnTOU07ct++IoIBBH1QE3YhjEXLy02wll2k16J2hNNS+N1IMYqqjNjoK2pAUNCVvgD6RqqpsDuq9DxQwOVYlo2UmBptPFoFjSpp5jwpqEC1VrSsBVw+q0ZlnnfHe93/kja//E1cM05C5wK3bf/5Xb//Dg/u3ajzccScWa88QUXRu85aTH37oro//69+fcOIWv2Tx6sHs/rvv+8WKjbs7UzsdF3v3Tj/80LanPOW8devWVVXFyuPj4zFGh5RS6s8dHQwGpXenP+nEr33pq2/+89d+61v/88zLL7viC58799yzh/1kAnYsSZXK4KQyUNLCbRyQKhA5cIZBaFYRRhpxRjVDVHREkHsXcKSKqkwNs6Ep7ecd6BZ+b3Ezoxkxg6hNK9VgfNoQ4CzpKjavoOjLYmJiAhHn5ubYOaNgtb8CCbx3SdSaYY/emFqchBq5DGgeOZv8iDCKamJFYlJLz1kCJYNq8mNMRERPyAdtsZLfnkMEVIJM/Ld3xaqq5BCE8kq4QWm13aEx5ADA7E6NYG3GgjYfcEAIgD6YCrCiolcUQRRUYCxLneJ47LnP+e3+7Ozll7zqlOP3fOLj/wYBd373misurQ/vPzpGfvWaGAA+dgvvmp31XAXqDmO1f88Db3zzK1/0opfvPiAd8sKsDsqxnnCUqJxS4UIKC9BgMl+jtfc6N6lIAASkqOgWIE7bzrXNx1ZntJVfnvuhtjAuaUwh252c2QZk0qc9gaKChiNSVEUWtgO8YLiNgAiAnvJoxJHdU9NWVi7C2HDvMM3UkwHmEGPhCypGs8uG/kiXWCmJKjhwjHHDspNSBZOTVIZiUAEFr2qwGObmc7XDTGauqqrT6bTwvfYrqZAAOArkgFXJ2S4IEQWUzF+PEHVBZgIAhdZ4o+2DAQDRi2iVRuScYaZUFQkdEJFTTyBKCpaKcgIVAVRzyyB0gGiiMN6oE6oMikAxxTylaDQ+2/bROYekzBzraM9X67vbHhLnHLDUKQu7kqr33rgJLEkly6GbwXYLfrR43Z4cjyYukAfRxr7Dxp7SkpOqAgsDE1GqaovCzhZnTcXQZvScdw1iZuwjFQcYvAeAqqoMK86s3S6NapdSmpru3vyru77ype8wV6M0WLZ88s5f3/zc9c+rhqOox5yb1DSGOEBEZhKRJWX16YvufPBo760/2yxARsKiBouuqspAbsH5b94kAgplflauH7PYjCP0iAhI4NUxcawUHCJ6DDGNpFZQ5/PrC6sA+64L/eERQC67nqvRrbfKslXxjIsObj5x3ZJlZ7z8lX95Cp/x+ep9H3nGlk9eM50e+/GBAwdA070Hu/926/QDxx4nx1wLoRx6fDuRc0hVVc0SgS//5I9/9x3vfOufvPK1W3fu75adQb9f+NL6dyuwsnt001AhoYKmZA5y6hgRsSgKF4I0CrXWyHnvM8CeHCGScxYtU3PafREkMeWBjaooOgJCKgtp5FNSSlHZVmOuoKrWMoR9hw5e8vTL/ux1b/nYv3xgYsIHLoIMxrujpVu2bNi06cKLnvXo0Yfvxp8pDA8dPTIxNvbAfXcePHzI+w4/5fwTl2wt77j9jri8v2HDphM3rt25e/v41CS4xiwTwAHGmI4dPow+nrBpY3927r3ve9+PfvCt7/3g2xdfevG//Mt/3PvgQ0+98MLDR+ZCGFOoU0qOiljXLVoV8tyjwQYtGDE1h8NGQAgLsFoMStYAGb/OdO5t4IakZs6j2qHCEpjZf9rjhJCt0dHmqoRE6ARyI2gIEc1s0W63K6zMTBiLbqfT6fT7/ZRSCME14uyZCR0cIpbOm1ElubxnAkjA0MJTpQWmEgJg6UObyGtOiKAqLNnEV6GZeokQzNfm9uMmddh2zMapV0eqYlACw3pY5BKR4XDonPMu5FqkZSiauqSoc64FSLOpO0IG7qZR5Qrnva9TzazeOWt9hsP+CRuKP3vN2x565OEvffnzz3j6s9ZuPHHf4bnnPHn8VSfsG8zg2kU6BgMPeoinnr2ljuPH3dM746wLhzt2HaHRuf2ZA7fddtu6U56cOCkooBeFWIsPBSge6x8Zm5xAnZ8KtGm1GSTkxkUQTK3GU/a6cQvkJNtKpe112iyeOLb56TeKG8PFBHJRG98enY9Z1vA5O5kNFqHR2UYHaDfIOE6qKsySkzwKKBaF68Sjwwq9H1OnqpwC6NHR0kE4MAYJCDChRowkxZYVpwwH/ZWbiv4gJa40egYmzTaSJgCEonYmq6riBcCx9tNhg5xXlaaJyx0jAKCNlFqxEMtlNjk3hGzNZOTpJwK1shoDAlt5wTYJhBTZIQVyhKgND9BUirKMnTCocqZTZzjQwltjUor2I9LAcw0RzcymPC9gZVk+G8xc+kCNAK33ebGCiA7JHHGR8qZGUPIaYoFt1HzJwgKiSdkiRZ5DilARRObLYmkU6wxe69oZVfOe2zi2sC4EhCIUVVXlmtLuReKkqqCiEQB6427/ocOf//w3B1WdZLh5/QnLlo39/T9+KAG8+S3vHu8EUHRhAFEtDnvgT110b0D53z8+WbAkQlZGQAWXmoJGVJ2QAjJmfXLMcSj3xgCZmZ3Pj2FxAOzogiMVD8g+BGUmEleQiogkFkBUJEHx1ewIy1HRWe7VRVclWnr193eov3fN+k1nn3/p6WefQ9r9vT1//oX1H/rLbf/+44999bhlfgbKvUdLiolqVtKiU4DW4+M9YFLm8U4XPc0NB/v27/zaV776R3/8SvNOLkMQlpqTFOAEkUhtsmXKbhYiCDnVzjlAkMQWHaChHWKzDYFmm5Aka6YFCyNWn8VUp1iGIhk5uyisbVNRoLza6RRdRIeozIxEdV0VRYeBC98B0te8/vX333//j6/90kR3dT2nc3J4ev30maeff/yJJxX98sY90xec9NTr9t549PChyy9/5gknbPZjvem50cxxx63esmVzXfOwD+TqJ5+yvpI0MzsbQkEqVVUFcvaenB//4fe+//d/+8F1G1dd97MbZ2YH9z+050Mf/Pv3/98PFZ2xqjpQYExcF0XhFIwP2ca7hV/mLAPNdAiaKtLUZxBRm2WP0SIsUORTbklVTN0NRNU36gQWcESEvMMmYhKgCUA6cgTKzDElm1mZYHJKSRMXRRFCoGD5ZjgajYio9AG9cxLKsvTem8kEM5MvWlk7K+QJnQ9UKyPmCWquXewoSK7rk4pHT94BACpJ4qYPg3YYgIiKmUWjqsqSIDvrKQIzyxPlfrz3BKgCgNlnBoFUtRXmFNCak6hYVrANu3GWsGEu2Qs63wwAwZWhAwCpVmHesHHRl//7C5/5jy+86EWvuOX2e6/4j8uqem79xg0vPeFeVVlVUgEpkNYMHZnx0Hn6kl0/erj74INTy1f1Nj55/3333efGBqAl+BEqaWQCGit6dT0aae3GSompDXZtKsFGSheIUFQQOCURoUyr0BbS3EZD+2pTchsKMyVJDbc0L05iV8kj+U6nhf2jWvepwQoUyBW0Ctsd5JRya+WcGdW1g35StYmuDclTGnVct98f+tDHeknCIXulsWP10n7v7pXAwAGY/KA6ssyvWD+5YTCsJheNVQN0AVAR1SkkIAeSOaPaSAcAQKdbSmNj3H7eHJIoo+hNj72taPO1QoQkgGBO3u2FQgEGZWECKorCOWdTATLGoCIKeluLe8wq0wuupCgkZlWNMZpqc35hUFslCDe1YLNWh4x5gDrG9ia20QAR66puj4RQno274CFlPThwpqGClCUBGrUQRw6Aga024sbXWRfgBhacN5qv2EShwXi3+HBREQRP1Opg4wKZCABgzURzbkzPtIFlWG2BiKRQOGcWjUIaa1HQ8anOt751y733POKLOqX62h/eePKmkwdzo69c+fUHH7l9vFOmiiQMSy2tTv7IhY+duWz2D39wxt5hB0CJkCiYEROQzxuYQKRkrAjIBlDWGbT8nfnyVIC0QUG3SrqqKkIuFEU3DPtapQE2Bs8p1aPhCGDUcZMuTMeKvXfOF+K5OjL42fXXHXfCzksvuJxcZzQYvHTw+i/y335p4h3jqXpo72Lyrih1EEdUJueUGSITAzuM3mFk5ohLl2/sjNLOXft+8fNbnnzOU5KrSCSJlr1uhIhKbeIgndc5ISJBJOeUIIGiIiHGmKSZcmXCUkvLJswbOs6A/KYgSzFGZbaNhqqGTmk1mV2WJAyqrdxCb3wipQTMhL1R1Z+cGn/b2991+x0/nZnd2fMrRoPRw/ffLgLL1yzvre3o47p5w6b1f7zpkbvueOnL/nTR1LRXqEHGorgjM3Mq3heMMDYzPMzJlWXH3m4Ioa7rUBQrV6+894H7f/ij7517/nkv+qOX79zed0X5rne+8Ulnbn7hS172+J5jnbILBKXrsCR0COhBRZt7qwBtl9OEWESPpJgq0TpGtSkZEGX3HwO8q6oZms5HCrUfdyKJiNA5D82Cyla8C9CeLszD2WtNGDy1DVaWFyQh4/7Pe06UZdn2WJbGmFlUSNoUhYSKAFKzegjBEZFHBFFyJCI2kyQkIVu1gTXp1Jj6EaILWXKrxelBbmQz60NBUuT23Dj0gCoqCIDk2m2W/ayqFp1utoVgdkUJQ2wlf9vQlie6RKR5P2TanqCa4V3kvY3QqxEh9MbK7Q/vetWrX7F+w6YP/u1bfuvZv7ft0QOLl0/292/dMjGc6hYeR4cHoJ1xkHoS6h5VsQjlga1XffnkJYtHD99zz2f/63OnnnL+nt3ivCNyPpRcR+HoPPVCj0EhKSeTtBWTurQrX9e15ICRP4LBWc1vccGYNFdm1PA62gOmmUroAOwoNRIWFqtAAJWZY4yeSCiPn8UhMSCAt2G/XSXbTzfpX7OmYSNG1ihOtEWnA6ySeOVjh/vMpRRVjGPDel853JGmRp39PVRUiGO1OzbsP2nLeardqckBgBcSAnLAqCFpTMjeewEG22DZ5EbFk0uppqanyQGEWRbspBnEEHnYtJuASA6hVUYkQEHXVmAhd37z7TURIibbc6hyzEQaZ/AC51Dm6cJ21QGAa0ZvtaiZOiJ6B4oe1abArWkgABJR4bwx8fIIV1Qxu58Zj0CYY0pRhLwjJVBlFUSnxlFq0qFyyiwUVSJvwmaC4J23RzJ7SipYC2WHzdxcstqaQ2zq5vmHBUBVa072cKlZQLaWEqq+CLY6swTcbnzquvbek1mYN0GJEH0RhoPh9OJFWx858t3vXNft+UMH965ateZd73njpo0n3nnXfQ9v3V6UGhzEREDjiSsR+dOTH3/ZiY+/7ecn3X5ksSKnlICEiFiFAJ1DNe1M55zzImxh06B24jCLUxAJsJgYSxOdWdQ5hwAI6JDUgaAQuhRVmJwLjlxdV6PBHJDts1yf61CpR4lyTGix1Dg5PnXo2PYdP9177plnqSBRbxrc5pvOue+VP3WfXB4PjjgWIkheQbt1H3ulFzdyDp0rYl079Kra7/cXL1v5wH33//cXv3Lu0546MxrWdUXoNAm5JpuGIDExsw5HtqY1tKbpFqM3DBkgPGEetnBFhaLgcsGaDzwoetd1rq5rKkIoisFg0ApYhk6JiJ6C/aIYGUCIqK6T8xAoWHCpY3zSmae+7GVv+Ng//SVOJOp0+/XwoW333Xrz7ad0TgaGWA9P3HTSBz7wodPPPnM4HHrVMSRwLmpSX4hKOeKjwU27UFtzJqpJ2BdBHFYpnnnak9e+70NKuG/vQRHokX/pS15x4kkbZ471U3TOKRIDeOd94prAG8LSxh8AIKqgapVLdgpqkhwiGRtdEqfI5JzJa6CCqEJD8LermVQdOMSsz2coFGxpiw3fXFUUKIdPVU5JUMkRGWqRxXlHiKBaS6b8+0bmwmBvJiTimlWBDSVQlAElMWCj+9oI1lvnRJqxECklFXFEFSlpBkQREYqyiHOkzXbKqMbzFbpavmdEdJ6QEQAkJgHJUsSIzjnfJpLmZ62rMHZHWZZIZDIFhultFdfyykqy20G+MyIOHSCKsigTQdnVXs9NjvnLX/gSgMmJqQ3PfeYrB31Zt2Hj3GBfVyGl2ix4x6cWP7Ln2FgBk2NQBMKERShqmZkYW/76N/3hc37nwt3bR0VPHZdJNEISryjqAZGlQ77GdhJAYILMwqpaFoWIpBZhK2AKRJxZpPOiSwv7eAP1QIPgIxPH0EZsQ4FsVqnMAEVRcB1NVYeoge8SqbBZyYI1zaKq6hSR0AXvpHFfloyLbqR0ne3ViMiHEGSELKNhLLrLoah4ptiybO7Jz37q5+hXdLiHiCKp8lDXo4s3nNcfyIplhUTyJWBC0hjIIXhRsYex7Qux+ZghBGE1GHD+7I7mtUFYrFbFFoRiWabRzc5qGxmKoGiaBo0eedsZpJTIuzzX0eySGW0IXwTEFoqRK2R7jkz5xLTYIqcYI5EHUjN4sPGSUX2yeKcoZ8UYRM0jaE+5MwOi4L0CCKESAks7IqKF9B4g69c1Z5s8gcotb4Pyc85ZchoMBk4yA5CIrAhglVjFtm3Kt7VRR7fugJDUdE4RUTS1zGZRIswZXcQ5l4QlahabU3CIZVmywMRkT1W//MXv79z5uAuj2ZnB29/2jtXLjt+6bfspZ5ywacuWrQ8dBMdUxFoYES9cfezD5z16xf3H/ffDKxAZCW2QZlwph1RVQ8v9AkrBY1JhwcajRE27Fz0haFO82vsHQgajGqkwI4BD7xwQ0ij1FaJolFSJxqIEERlWgxSH0p0KWPXceE1BiTvkZEDjU4sx1Nddc+35512+6eQ1N11/787/2Ae/1OHv9YvPTYFnwqQ1A2jougHXBRJXFYVCU4oanQ/DuUOLFo+vXHXc9df/7Bc/vfW8C86e67NzIdbJERC6Vuu+qY0UEQXUZ8iQknNICMnoXoLBG8JjwcADgvPMzCyuKBAzRJGCdzEvKEejkR3jFg9BRN4HACiKoqoqydz2hOw0AYZhURQpaR2qV77yjdf+8NoHH75pydTKWHfHis6W9Zs7HRKVmZmDe2YeWbdubWJNMRJoRRgRiFyRGARi6buglYgAizHbPDmOSVgLCsMYu2Pjve7YunXHdcdLKvHCS5++bOXGxIReVVmjQIxaM6lHSKSAopgydA7NvS34hKoILniHQRm996FTMnOKnFhYRERQFJOY/5cHCIgEqo3eGDiQvANaEHZFPFLHBSJC78RhdNLn0VBTDcIIhTjHCpFVlQkyt8+5oui0da5FIgZlwDzoAPRoe2PRpLbUx4KwDBE1dEoDZEEgu2iGhGKR5ofER3GSQU/zChsup8y8xGplJQCKEDqhKCggLDA2N6V4R7pAzzL3hZhTrEdKwjFGSFGVFUFBg/fEWveHWicb3CF0RQCQHdWoyYGrEmsIFUmt7AkDpgKr/bt3fO5Tn738smfce//uc84556yn8aoNUnaLOKq7YfERrd3UevVjLODqI6vG4opeIsIaOjHpY4dRh3PDQXrTm957tMLoSs8omkgF1bTBMQGwQgLN1wFJYjKGpaoCYR0rs4hul75FUYAjH5zVOpFFkZxzAagAdC6QwbbJGcjIqXKKCTWhKqErSvQhp20gR0byyfWTxBo4qbKJk0tDFKZmvM2QPf6a20EUfFEUQEa6BAYFzsrnVYq+pEG/vn1n3UXGwVw98HsG2x8rdwKA37tYVZic1INpv2HFklPQ7SsnOyx1CZ5jEvKVqyNI4YPRxI1GbyghRARFqdkBUvDtf5hZEnNMRk515Ann5UcSaiKICEkzgtc5V/hgmSaEoABq6mop2nzBgfPola3kc1h4KgN4Zx+fa2ZWADJsi8Rk6xGmzMU0KBQnTsKqDJHNTEZVR7G2tqPVBkFUkSSSPKFDCI5KHwCgFo6oGhwWHhWI1SlAYonJ0BWIaM+puY6SWpUh6IA8tg1QLjLM9QGhTrHolOSd4V1qTtbfmBiLnUNDw0pMXsBxrkiMd24VSbIqWwCdVyQqg3qqUjSiYKCydAgqATyqR0RGSaAppaKkH1xz3bU/+snEOAyPjVavWHvilie//S2vu+rKbxzbW+/deW+3LFknY4w+4fru8IpL7vvl3qkP37qRAEFyGaQsDgkVmJl8QOcFUBVHVR9j3U1SRCZWAPAC3QgksXAFJ1UU9BhRxVNyNVJyIFBHVYHCxQJ8p9CkENmDAPdjmmOp67quh1UBvtubnvShpPEKnSsmvRQpOvCYkp8qxu59+Pb/+p8vPPrwo5/93L/O3nWg/G4n/XnVKRFFFb2SA0WHgXxITFqgC77jJwShnw4ORwfi3GDtcauPzVU/ue5aH8Gp5zp23FgiMlyOxYFeb7zsdY2D7skllZRSUAyKACA+e6hrYlIonC+cN05pkhxwsPAjjtZlBSCqkhCGTmlVWkbtELoiWNBmjgAyGg1UGR2RDwU6VXRFAC1RCVWqOk2t6L7+LW/pdlZ7T1jQ+PKlf/CKF55x1rnOucFsFcpJVX7ggQdYxLe5vR1UWufRZgIRicKREwEC5XrQesEWioaIvU7XuoF2vmeh6jc0ehCVwCGiqe3kBoWMbidmBSUi9uJ5BB8CIjoiFrVhrPPBIOAiQg3wLz85ROZXk1BRIQAVmFvkCCyIkcQjYVJgLojADNu9M6v2djLZTCry/zToORqCRAXRIyqwCggRISApCBE5RwTGu7dE3q6I8pTDcHTkctfOT5DFQUS7dg4wNerNxl+kBmOpzRhWRBBdbGwtVNXWOO3IhUV4NIp1BIWYMoZeEIwcIjDrA8WoKiWLqlbdXlnHqOzGxzDFwTe/8Z0De4/88he/uu3WW4Zz/S0nb/G+GBvzp589/qKXr55eNPa+t1+1e3f3qkfDSzalA1W5yA8nSmTRGSmwhz+719PSs84/cfp1r3/lkrXTu3an0g8GI3TeJngIDfXTVJSBbKIAVpcIMxI650QBjGaQazeykh8cilH6yDlyMdbeoAKcCOdVnAxh54IXAEnMmsiERlyjOChGUYBEmlSEk3MOiTim9hGwgZUJCAlBW6Xldk9VVa2gtvuYQDWxB4+IQF4ib73nEZiUyOt603PlqttuHv7CzQY/mo0jVigPHpz5nc2/s7xeciTsdM7VMUpVNyUCenASUwu9xgxrsA4sL3oXYLCg3XW183lTCzdYk/fe4CTUGBtbQdYesMJGrJIndVZGigiQmtKjad34tsMjJ5JaA297nbK0hSVY59GeyRhj4Xx7E9sHRBO3aucLr22+/ka0Nc9gyI8VNWgjkQyaV4Io7BoAHWKGYuTtTFQiAgRSEARon1B4AurT2l8A8N6hZPYEQytUmt1TCMk1Si82XGFzZIO8xrF2mxBZUgL2rquqCpGwEBaBempq6sGHd3/2iq9V9SAUaTia3Xjq5iVLlrznPe85/qQt//2Vqw4fPbR00STXkcj3Anzhmff1k3v19ScOk6raJUXQBpFKqJnM3Fy0hEqUvCMFIm8eAgnBgx+OZovgBAOKK0khidMOoNQoYXx8rOjGfhVUii70qapdpXUlzEEdq9TK5fRYYg7gOEqUWHZDVQ0BqPQFx6RaJSwWjU38+oaf7Lz/7kceuX2yx/Dx3syPDvcvnfPfKwnUlyUIVVWFJEWgKmEVRwGVlVatfEqsqoPHDq/ZSGPjne9+71uvePkfLVu5EhlAxSG5AM1yBImUJbtVYgON1kb22WYf2Jzw+UkGQKorDMFuuvlRQmNj1TbKbbnWBnDJOuS6YDSixkG17JMPsMjMsWPP+73fvu/uX3/q3/5p1crFSyaX7tn1uC5h56jX6bhxvO6mG/7uwx95yQtf1RotgEOCRsPdHg9s1eDAYyKDM7b+YvaB26cFs95phvhnReh5goQ2y18UVVLwDeDFPpiBAuzqdIqe5Sf0jhAVW0mtzJ3XeXuDJyxp7EsQ1FoWVWodXRBJkTAL7oMnJRBESRxHVXAFdlymwzYn2D4O+qDKsHBrSKikJBkuCwAgoohSs3ghaeaoC7SvXSNV7yFrNuX3PO8XMC9ej5jlDHIWX7DmXBiYsNFBtSG2a9zftFnbOwUhDM54nuiyLbH5mwJgF0F90BSBbFEAgupWLiseffTu//vBj245/oR/++T/Oe3UM1esdM/93Uvvu/eB2286/MgD47NzBwVvfdbzl3zgny76hw9dfcUtxbnr163oP3jEucIhg8Ykxaru7nPWHb3yYOHrzSecsP+Qkh/6OKVdVmVsEB85ASjb1kARPLkFLjaaUlqIt7LnKjfKCOaj4JGURRMn28QkFpMRUQU0mB4qKgGSQXOBYoxJhZA8UquwGEJAo4SBArPX7HqZn8DmDcOC2Zdr9MjsRVoSiGSJAE7C472JPY/ec+TuL0Xf8Z2VFRyu7x7gb4PfMwaD7pQfX7t2w/f3Xn/xuc+Jc3HsBFLIBRk6JwIOBIk8heSEmYHztKPN/IiYVMy4wr6yclODe1dVo8gTUZZps+ZeFUwKUUETExEDWJ298DTaGfNCJhxrgQIhK8wQUSvlCE24AATT6zC8mCkFOu9sTCoiC5E+piuUGnqJ4HxxY1zbuilkSZsyw7KvqW0z15xQ0Nqg/HQYO9ECiyISmqo+5Gya76o0aL72vCEiq2hkcHm8RETkyFMQkZqZmb3Ov3lpgDkAEBrpj6ZSzx+EApbUVS4YBmXhODnn2fsiKn/jyquOHj2k6uf6yYfeo49su/+BHaecuPH711z7X1/83MTkOAMLYHD+X55254aJ4W9f/eSDAzL1TVU13rw2ijQLQjECgHeOHSYAZfHMzioFR8JVCA6AQINzBUvtnCOlsugkkiGPDuzYPRV6x61atffAzr1H9k33ep2yHJAfpREGQsU4qIIrakmFL13hItdAnghSimVRooKEsvAymjuya+exboGaaHSzo5/70RsH498PzOypI+iAVKUWdqHocRzMVHOLVp16+XNfWQ/6V37zk0cOH3jN/34FYDw6c2T5cWt7RTEcDBwQEagyAiFRVdf5AVTxjgwr2ZaSrCICRrqzprmuaxUJITBmQ1VNrMI2uEZENcWjHNSbur9RdLAz1l5qaXiwdtnLsqvKMQqhA4K5obz4pS+789abfvHznxw9NluNBgV0nfPLV62aO1pte2B3Gs5+4T8/4tu324T1nBe9z/AwZm6ETBERIydyuYBtf9ZKTCKyxRuDkohzzjtP86roYiIPoCCipl1gn23exoYQG2BbW+fa30VhZ1thNbWJHP6MhsQLcp79+4KcAETN1qeC4BUJMNZpFGsXvC8L43X47hgpRGhoJJhlFmC+Hp/fvbWtLTOTczleiJgtqyFm2wmza+yeUrJeLcvRtGXKPOx2gRJvfrCbbS7DPEKPGhBBjNEWuqHwiBCcl3peisFacafAmMMlAKAjh8TM3rxeJSkgKDtPiOSpO5zrj4+N3XvHzdf88Cc/uPrqa4vqla969ewxvfYHP73lpn1veOe5l11+6L1vu27ZihXeL7nqi9vuu7n44Mee/+8f//7v/OuBF527+lnHHV3Zq/pcXLe9+P73yv/z0d897cGrVk6ffPyJmx7d1e8VEyx1zAUOYKv/QGiDUG2QirYUtECfUoJWBD8XauiRgEAVyDtlQdKqGnnvBSAJl85Jk5kMUyMimkRDC3+1SlnUzOEdCQAuIDJ5JCDUOrWznFxNGrWfPDXcJxFTUmz4Y08E1gooMy8u08e/euXhujjnKSfdd/92F5I/vPjYoq3u52ODrx7ADemO2285//STDuzcp7j1uaetrpUNma+ckIhRRSFQs7FeqJ0OQM4pKKbfZBm0ySPT3uraZH3yk9XuWRRsq5NTqxF1BLBpc7XZAZtbkc4XJGgABWh0LexPNLFwgkZNmpwL5MqiyEWMqJJqYzKdh0MKiOidE0OutXCqlqBsJBBrUIyjyJJITEjHFWE+ItqHcuQw72JyuWwKJAYNY87+xnlMja2WdfuAswohNQJfjG7+8SQiyeYKkOl9je2bpPmRSWOdSQCYYtUpu3XksuuyqDrEbnfym9+46bof39jthaOHo0KsUn355ZccPbb/Vzfvf/ihRwEkBAfKoVO8+aSHXrDx4J/85NR7j4wLpFZn28K1CZzZXberZzVcrTUqAREFBODcAGU1oi4DuBJriehCp1PENNp9cAeJ27h+0x+96o8uvey800456aH7H7vi01+8+uqvjaiYGF80NjFWpQGAMoIkLYuxqqoKdI4CIiYB73wSRvFVVfmA1O1ERsJJwujibPlvxfBLg3R2LH/tR3UtkFTYY6HiY107p6HoLVmyfmrxqrAcVq89ZfvWm9evO+5Zlz9jVA9nZ/v1cG7R9EThwihVzsKzqDlKoaOCvNS1PTTOObXpTo78qo3VniiLCkDohCKm5L2nQCZcaJE2xtjr9Sz+eKQoiugU0HrodqQ0f6NFyKEtX+t6JDGFEMpQxFRjCSvXrPzXT1zxnW9/fdXKNaeedvo9e+8E0HKi3LzqpPe/55SnX3zZ+z/wFs+Nv0KTUJsg7gkQMedgMVylI2ff5ISan+T5x56IoMh/DWD4ykw0Mi0eJAJRROCY0LnW2LzJz0BEw2oUnDebARFpalsCsF2OM/8EuxzOhzZ0tsUsAJirBSIIKBIF50yAAoMHAGH2Mc+E0YfUeAi1LwXzb+w35Rra+tdoxCKC0oBOEdURLahDERFakciUgBBEXTNDZlCtE1He6baFMwDYUE5VUTR4LwgNxmQ+E1sCoMatmREAgZxDh8a2UYBmPIMO28bDyFGI4CoWJCVRjtWypV3C/s5te/bs2v6MZ5z37ve877vfu/7rX/n84sXT9937yKteeM+fvPH4r1337Pe+5Wd7HsNTTjv92OGZt73y+r/71KWPb/vxV2+d+e7DONZbPxqNxsfHy870X7/lm/t23XbN9f94+AgE8Bx9DQJQK3rKRlbYFhyWL0kVUW1zhoiBHIb5G2HWMc1ZTZyrxbbgcBYOa8wqgACAmnd46Jz1S0lFJDnn7A4jYsZqmbgE5+8dUO0Isl+bInMemFPunNpf2hZ8iGirh7bXBACjD1x3403LFi96/rMve+yR/45Qaifqlmrsh6vKRYux7493a55x3DM/85F/L8pxry993u/9weHBjDPuECKSisgwsVe0hhVFK83UIMpP04IBdFO5tyfQjkdbsCOiI6pTtFZi4Q8SUXCEJhkn8zbD9gEZxTmXufJE3vvEGmN0PhgIqHA+am0e20RIgM47apymzU5QGopBe91U8xYKoN372C+F3EP7wiqG9u9UFQCT1daCC5+aXAeQEmRXIvtrMRdui8stT8kmTws0goxt7xjQiuD867jt2YPzqQE0ZXhpyya3E9ZcUUQEUkTnvK+qWJRFSomTdy6NT3S3bzv8qU/+57GZI34AmroVj7Zs2fKud71z2Ie5Qb3x+ONv+uX1Bw4eGx/vXbpk5zvPfPTv79j4g50rECOiM0wqIoqAGSoDzPcMAGCyQ0Lo7M8JgYhVgdUJgC8FUEEItFvgaDjYs2v32FjnWU9/1kte9PwLn3bm+Pj4IGp/EM8594zzzzvjBz/67Q+878NbH9y2du3auq5ZkyIqgST1vjAQLSL5bCcv2uFxcVVNozSqq5mx8e6KVcfv33mvfh/xEareMEh/3AUsnWcEQeisWX/iwWP752Z2cwIXOkmqid7U+g0nPfTr6398zQ83Hb9x6coVae4oAh881J+cWLZ46dTYGMzOQooyrCvjpA2HQ5eVd1FVIbEgGM8IkVS55mS6GSICqETOBFsQkVt8AKiIDIdDizZmWICYV8vaoL3mQ3SuYmlUjUIIRVGI9yha17WqpqFITItWLvvzd7yZCA4eGBIFRFIXqJwoe/qcP/ytdcf/jzcJOvOwUlVLnogoolZKOecos+9RBAofRAQQwdPCp1oRQbhti6HpGObnQnbm8141pxBjGVoFTQqKgAq0QKYKcjlv5qyAqICKeQKp1gvalrQd9tpLRQcOEFkcK6oSYWYjCHTKcjAcWjMhKiiJfABhanVxm99r+d4UDBwgIWmzS/DOm4OmNEazudwWIe9KzEQgZgbK4w4L99h411h2b/foVpcAZP2d6FQQOKaFKb+99/aCttgD1VGsOqGw1iajzUWMkNqI/TbHpelCvC+rKha+QNS6Gi1fNv7ow49+9Stf3P7IgeUrF7/+9X/0uc9+7T//67OXXnaZMK9aOX3PPXf++98/sHtH958+ffFfvvaWPdvmFi2Ghx86+I0v7Hn127Z85C8PTi+NB/YfWTy9qtvtDUeHd+x88APvft/ZT7lk96GZTujGVLluhDqQNf2iNodvz4kDTDBv15On9/PNBJgYi8uOsIiG2EQE5hCCEnr0phGRhJWThSRqzrDkZo44s2nyCqNW9kiA6IAQs7+sCUFjO8hSAc6nlFsSsPftSApaVqhBDu0+KghLd3xi2dT4PXf97G8+tGPpspX17IysUOgK3TngQ3urLv9638P9K/ectmXdjoMHHnhg+wt7cOhQohAAIUkkIO8KVQZR16jH2FBEQJkTyHy11+bLdlIioNScFm3LYiKHREiCwqAO0WY5C/NHzu4LXtM5h0CIklGFmtBhCCEZHAmRyBE68t4WrCICJhyWe8Q8ol9oByQiQJm7avVhO+5rWAya+bstAMJK4Qbq3xZD802qIoiySrLA2gRNQXAL/LABIFs1mEa0dRqSZ3H25LZtOioIs6iCc+jzISFA1ywdVdUFDwDa6HUIKDVbI++LmEYA4F2Bro41fepTn9+z54Hp6cWd0KmTzh3Z3+/3P/yhfz7tpNPPv+hp3/7Odx/fs2t6emJdsf//XXT/d7ev+Ohda5veY4Hep9hQXu3/HKIsgIk4V5ACCLKCekJEQgikFTBh6vouV7p/1/7lS7uvefnvvfylf3zS6ZuHc8oMBw8OgYA1HTo6Klxx8bOe/pVTnvSv//SxL//3l6YXL1L1FvICOF8E0Uwkc45mZma6Y+P9mWMR/DCGM8/atEQP1zP940467Sdzxw5sfbz7773hP8z1Tl25Vk7Z9dg9kUe+6J529tPuve/OmSN7ywJ7nQIJRNKqVWsmJ5fdftvND9z/9Cd1O1W/v3jx9NxgeGj/1j17wpe//OUNGza9+CV/FMgP6ypJpuc5csB562cuZ8oCDtDl/WkRgjKbEqCdhJgtXTIDzcaKqkoK1v461+zvQNrnfeHBg0b5dTQaAUBZlhbRSAeuKGdmB4PaIaIDX5YFgDpxgnJ0hntl/5RTN/qFCdIjoULSfEc1S08Jemf0jDZiYkN7R8TsskCoOUlo+85IQdGcZ+dL8tbBGxyZVlMziGZs4F0GVXPOIRoHlxFaMTob4wCi+U5Dm+9txeUAWZWtY2bRxFam2sOGAoBIZWAjZCgiq2O2ulEaJFr7VKvJWyROgN57YFFHqsggSQWzZp4FuJw7VTNiy0K2UTYzAUkBGuWEZJ5hC57hNsvaz7ZrNrvOwXt0zkJDzk8tHVMhxijW+CqIIoiCggBWKYpKvg7NhULEmmvfEa5rYVy+YvxnN/34r/7iPePlmmc/76Tfee4L3/++jzz04LaTTz772OwoVnNLFy+77OnPcyFddeVNpQ/v/MA5b3/lDWXYvPl4ufrr91322xec9dT9t/+8NzneY5Y1q5b++Cc3//nr/+Jt733Xrr1z6MsowXUBUvDkktRop6thpjmTjUqsDceDghcF5jzTc85lq5yUrMoBR55IQAAgCRvlw85GGYqUkvVhzjmxHleEzSVPRCRb0tpdKGw8xTleIyGYiGI7lM4nGpRFGxVGjw4RW49Iu1nee2tYRexWQErsAabGJsbHx5cuWco8dDiTTh0AAD0oI1dTNVLmAwf3TI6N3/mLH7zrXW8fVuCL0oQEUcm0Lq2btKOJ3jUqfAAKor/ZAbcVsEEaG/ZBruHa3Yc0S2Jp5jTtXwGAQ2czCma2ghgdoYH/vWPmGGun1Ol06lxnsi2SnfOGrYQFMrGWLA3eIo0jePuWUvaumZ9vQ44XQIAeUNSufK4QRMQnMI4iZII9SKbeZeHJFoWnDQgLm/GVa3T8rVYz0IZiWyi4LIgdMuQNWvFSBXIYjYWhoCoGIERAYUHzB2zvhSqICkhKEUSd54CdKs5NdMe/fdVPv/Odq8c7YzKimisBneiN79y17Y5bfnbxJc89/5ILzLRt0o/+++n37eh333DDidLOkCEnWYAMS8AmYlg3pg0VO08eEL2NDxUUmRF73aIeDQ7u3bVk0bLXveZP//erXnzSycsPH6kO7DuGVLBS8B1UQUUgnB32Dw/nxnqdv/+7D510wqb3ve/93d54UXjQFOvRbJ9D2UEIqKgInfGxienx9WvXlT0XetN/+PwL5+6/6e4bvr9hwp173oWHjx7S/zoM755zb4XTfnzZ3p3bR/GAjuHcUBITgpTB13UV6ySA04uWTC2aPnRk25GjB0ej0ZEDu6cme4P+6AtXfP5pl56/atWqD3/4gyeeeOJFl1yYVFyD/BcRjpGZM1fbdrSYQzRLas6UgeXsaUGTibczTAqdTk9VLZFnUKyjoiiYo6kFtPEZ8/q1eS69BwBrf9GRpwAK3bLDMvLeQ+LRsFLA0jtPg05nSqqOQPJt6spFlUjiRESmJDWPu5y3CmjwuvM4FJ9SsoYSEQHEqnJqlQXbkWyeygIqMYnL5rjzECoLtVxHdFlaHaAVIzQysECTeICspW5EDwxNxmL7YKoZDdodnO2QkNUJDBwqx9IHVOWYHBGWJACORQFaSduc2zw18gPzmg8ApCoG8AHvDADl0LUdRvvk2xcRoSOIrO1z0nRLtibE+Y1O09eLmJJRIOcWig42cruSRfnFsGw21iNymQfX8PyieVkDtgFPmz23IlVxBOomx3u33P7T//76h97zwRfedP3Wtct/6/4HHr7/obvOOvOSio+yjh7fMXvc2tWzM4PrfnLt1BL8wbce2njCGb//ipVf+uSOdcdPJ+3/6sY9lzxn6sff25Gk89zfvvSGG3/83ne/7/3v/z9bDwypyzxwWlR1FZ2t3GR+2N6WU2CIRhFmrjmVhBlgoqrKXuZhEWLK1QvuODV0rDwmivPyEaZ+oKrism4aETUEsZzduY7OOYcoBn81jqnDQhZgIF3e0TCzSawkY0Z5bynKDF+tP8OmCyGiEMKBg4cf2PbwTM0zW7c6qcqpKd0EMETe672SE1d0OhDhp7+67Q9f8ke/d/kzHj0wCEWhigoafAEAXLOqRq8Lk73E7H49UsEm1bVfvjEjcs7FVGPjdEQK3IydSAG8k7xmRTuTHskoGfMRqgWOSrKJLUKj8STmbusAwNKeHd0WhWTAZskwiZzzQoM+Je/MEtykMOwfsDZcPAUASKokyvbkZaV1q5GhtcTOW2REDQ4ANCYAMHK23YU8uzLstwJ4T2roPBXmjPJDBAIRMdGQpFqQU9WkDADeeYLc+KKo+QvHGIU5b3YU2sUhNQonRmDzZalCLIroyi48/vjjd9y2lYKr46j0VNVHAcb6A12yYvniJRM7d+y75ppr7r333tljR776jEcnQvrDH58xYO9DApEc7khhfh7fjMEVEA1Gmg+tB0kAbHZBQojoAoGnvTsOdQr3ipe/+FWvffFJJx2/9+DM3Y8e6o2NOewgKEutXFv7wFz3QqeToD8cHIzD17z1T1duWPuet/+VzsyVoBPr12zYdDxSGI3S1OSiTq97waUXrzxu1SPb9nCa2/Xo46l2c1IO0/DQrrtOPO2S3adfeOtPrvKfdkffsvvoTyrRkoiGdZ+wTCnWsV9BWY8SW2B1xeSi6Z07Dj3wwH3rN2/uBfVIO3fsWbFi1W89+/Ki6Hz36qvrujZ6d6oj+QyMQO86IVgja0CZrOSLoIkAwCZbzEyNXJ2d9hZ20G76mkiclLNygyOipl9qcwEAeW89kjWMeboDwKO5UafTcepcQhbEgAAqCo4nk5+DsTA7cllAxEBi1sYRuirFXkIiSmoHkkDyGixPlbOwKqqYbTo6F1SVRQSFnM3iwBFFRwxq+k3Y4PU9qVMzlUN0baRzqsDIVDgVYBVfBAJURVJgZJX5Wg9s0ePIOAOI6LxThmQrbRMudECmMOeIQrAGq3SoFFCB0PsyAAjmDjHL93vXRFURSKCJ569PlqJNgCACSqgxtahpRTBvwVYxGJqGWFgY0aMzDQ3bhZeQGeXmn2NKD46AyIECoJJ3SZWVwWSfY/Tqg/fGVKnrutvt2uQQDQUbGUCtfs9HJCYeJURg5uFolDVGiLwgi6jDUIbBzMGdj31r47ruFZ/48uvfds7nPvu3L/5f7//oJz75mf+44uzTn3bs6OH77/jG1PTiq7/17VNOe/K+A1v37Nr9tS/c/Z5/OPVrX7x/x87hyiWb7rzl0DOedxbCA+96x1/s2r31z9/wZ3/xzr9+YOdMt+wQd8UnhxB8xyT7yedri+gs0GYbmSpmbI5oHFXoXQgBAZVcQnXOOXCld+aybu7cNu0wQhEiJtu8IqJoxwVENN1NU/mHZukbgpN5iWkQT1EFBYBQrepSQAWTQGFCBkFVj3mCKiAg6J0DgDiqrF/Hwkkq63So43tJvC80Rjfk4cTE5MF9+865rPO6d53/wN3Hbv7p47+84UFYQ+5RX0hHsRIKPOz7JUs7ROvWH4cFxxg7LqBJiSAJgvdZCsMHbylKQJlUgAmdrYQWJmBtaBiOSBKbKH/eDXnnBTWxJubgsEFfeXNAMWkYZ0BlIHQi4pDynlUAQFnEHlhfFihKGJIkUqC8GmBTVSPWyBEICZAMR60YFFOKGYQsSoClc5XM16CQ5w2qpmOJiIhCKlFQtDTYRBMfraSwcVpSUVEUREfgyVjaJGCgR0urNidXI0ALU1QiCs5LUy6AAgEiKyKU5G1+EDxlTw7RyEIKzrvcWbqyqipy6J1HhyZowwRVqkGl48vsQCU68iNKRS9JKPxHv3HznmNTp538tLtvvRYnRojl6Jg+5eyz3/SON6XUPXRwz9hUOT3ReevGHz1t6tAf/ejsx+d6CBVIKQQeVCSSogCqZo/KKBIEADEBOxTT3HTok2AqsZdcBFAfxtEdmD0Y52Z/63kX/cUb33bGeaccOxgf3b2/SMViklHVZ1eCOsRSPXlgjbEsXK16BEejWBc1bd9+8KxNm/7mec/DHY9MDPvTf/Jif/qpDzx24LH7d3kKw2pw232/5l/dMDMb6+qY98Vl5z+p2D8xWDo9UR5z++980vr19yxefOzjc/C2tP2S68Z+3OkfkV631+2IIHiYZOQqHYO5w37JFKGKnww+BKwP7ty7bsNSV3YG/b4Evuqb37/nnrve9Ja3X3DpxTODikWKEKRxqTG5GFENITiiGKMkVWbnnEqmmavpurCYEAUiSmIH6IpCVY0wkxSc8yJSsyBASuILF0XM+zrGqAJN22OcBQKVGJuFCACA64w5REwJbI2lwghYS2SsuEaoUxnQtwCNdqjtyBWEnIx2N79tBta29wVpWIkGilFVW6uYaUy2DsvZElVV2DKSAyTDBTTdsGu4xSAq5rEKBKj251l8R5CA0DdzrXbnGpNpwaioKZ5jQ45y6BfqkLXpEBpGU9N7KZud3HyDngdOInls23a0GV5rIuaZI6SqCpSfbSTkxARoTGFo2s18V5oFeH5LLRRLxAw7FyDcVdV28OaAS74p26NKYpVmYuGNhcjKoL4Bo0p+QQRCdFb0CAbKFGwkUcWSuA6Lu8WPr//JwYP3Pf8FZ//6V9++5tsP/8ErBg/d+y9/et6XX/eqiTe86RWbNm65/DnP2XD8cX/1ng+fcMIJ3/vetyenrrnvzl27Hi0vftbqa7/Z6U55DDqs3JYTNx8+NLtixYp3/tV7tz7G3V5XWYhc15Xmbt+sSxd2wNqMLyVvu32DlGEGytYlYBMBIiCAzPPBAMjMSdU3miQGevQCQMQIYufJIF3JzAnmx/uwAKzkGvFIZhFVRHJI6Bw3ukUWxLP6qTU8hK6h66SURDkQ94rJalQjMsfC+8prx9HcfY99Kfh4y8/3Pu2S9Rc/c+XenWe859Tv1o+pKieuy6LATnn06BHVtGTxquHQO+eGdRUQiZwIU/Ah+JSSK0I+sbbBQacgzOyb+gOe+KXNWIscGjKl3Vlg8MaFdSH790XOAnCi7bOxgE6DSDDvPcUpgaEOKSeYpAosKBn/n0elSaLUzjkrYW3p4EKIKgbFR3RAWjjP7jfNrBaGHUN9orNGICsHMOfVHZjXEjVcjMQ2QybNphFWNjnE1FTqC3+RnT0bjTgkc73I+3LM2kFi3bgjBw5xvlmHxn0IFFNiEfaFU0UQJW2MwB2h4LiEuYqLFeUttz76vWvvm1i24aRNp4b7flnFY4UPiu7y5z5/5arj9u0dbFh34mw18/trbzrh0C/e/atTbzowicQu+BTRQXIEDklQmTmhIPkksQCSAiTVBQCgCxqcCyNJhaoqcomFuhp42/7tT9m86S/e9TcveMHlw/5o3/46SSyKxX7Mj2oJBUpCSSPnZDQ8dmjf4fEwIeXYoCgAxkB6M6NBf/bA4EffX3zbzSXFHWNp/OEDBwb3Pbj38QN7D0yPd5SwRx2ZXMFuxEe5P5irAQZQHpWJY0f06L4Da867cO36k2Zu3eG/Umx93s0r/uYMEeiWPnSmKmZ1yk5nB/3ZYb3KdagUSskhKZVbd+245oYfPv1Zz7zgnAtf8Pzn+tCpqt/tjY0llpS4KAphYWZfeGZRFRC1hYgabB5EFTj7JOXdok1Sk4pANk23rZ6qMttKNNMNHLj5Y8mSNJqKnCckQmm0DRExF5ZP5MRak6CqYC8CSs6hI+GkqphSdiC305zbNTWu0Py8tMlVoKqski3KWbR5rnL6UTMEABBVBLaUKop5ZWKI1ZyB8rQnS8Nbz6GIAIomhGUpJCsQgaaqNpsEyA+DpXW2zZw0S7v2YuGCYW8TUgABs4bRgi/D5hCCyRq0N8kBgoMozZBKtdHxVxFxrOScLuB9oiNEMt+M9ldkDBFhyx63yp2ZBRvFksSISB4NGYQNlbBZQudbgwYu4wjNIjml5BrcmUorvgdktsqEPgQqrGAHH0IuoJzTxCokOoOlv+ee7X4MDs/e/5LXjg+PVAd3zCxb/dA3r37vZRe/4T3vfv83vnnVxs1POnxwsP/Atsd2P/rbz/vdLVtOetvPX757a7lp02LvZycWrTz3/BPSSOYGw2t/cvV11//k4a0jLdOYm6hSZfsYG0WSgqK2pnVtKFQriWwV3AxwUowSIxF5tnDMSASZzUymle8aPJeKAVpBVWtgT54akBeDCkBkKRq0fIsQxmbRbrApQsTGXUdEkJxI3sE3SBcAQufIcA62ioacsVClTlVwrsSQUmSO1eTk5KNb7/qXj3zx17+Yc6781lceXbWmeO3rXupO1elf06Gj/aUrFs3O9CfGO4sXr7jvvoc2bzq+jiMRKUPhKUufWkojzV4FmdvGrAigJKzQkGQWPvNP+L6ZJms2gUZEZABODZIA5g0qCnKp2Qq1m1oATQscLECV8kI6i2loNswAQkBHdjPGu70GxpEDSEoJvfPO2+7cftB7752LzV7zNxIwAGTBEJZo5TuRIIjk4jpvd59IAPVI2Hh6ShNYW6Hp9smihq6R35vOn0nnnNmZ4bzHAzQVJLTnpyiKBizGhMQx+SKUZWk1ByKO6tqJ96xTwR87OvzsF36R4tjhvTOzyzZuevLFt/3yqu7kGJXguuHKq7595de+/qxnXP6CM6Y2PvSaHx4754oHpxxFYVWxaJRAYWJqotPpHJk5NtufI4cM4BOxQkRM1CEmVRlWcxM9HIUx3+cxDQdnD4Y0fN9bX/9nb35dMV3u2lvXRQFu2EWqBoM0eyhOr4j3b68f2wVYVY/v3nPvw+XmjUefds7WQT3ae2zPod37js3NHZp58trlqyUePnXj2o2bppTuvfFHvGHD9Ibjj1LvwYf39/v90aEjOw/vrRJXc3MJ9ZY77ovDfsFSJ+zX6Xc3zS1bszHcGdy/1MM/qeAPh/IhSjyQuiKtAUYOtHTIaRRjleIg1n0iuu3Wu6aXrxz1577yla+tXrrupFO2zI1GQnhsbtYSZKxqu1+pToiIhEpA6qFoVvgNpduefY+EoMJcliUKN65fKlaxI6B3igY4EGgsH6xsRUQDJ7WHxw6s7bMWHq35mphFQJg5Iw80n+92heybFJj/VBoyGbUIwAUtoH3rkBwgZCV6hWyn04BWRIkQTBoGsRVA5izTAvY+pFHSsH7aPoxHYpWFPZzZY9uaB0XBoYCiApupsOUizqxZbZR92oekfZ6tlAaAAAS2BM0VcX6ioki75slX3HvnHML8pAtaChaCcySqzI0QvxXOSV0bblpaBeVf0qbSdm3ZZhFEtIQxH0lZjJvBreM3YrZVFoXm0qlmaYj5zOGdATIFgJxzAzOcQVUzGwMlRUSO0imWxFp/53d/6+V/cP09t8cTzqiXL59ZPL3xW18/dNWV//wnr9bnPPe33vLmD3z/mm+vWLn06CP7tm579MTNZy5btsTR1M5dW8/ePDk+GTu9ZavW0cx+2rV957e+cxO7RepH4GAwGJBCxRxjNGk344YiASrpwpoMwGTEVFUS2zonhGBNpzMtVoQkIsygTGITCM3kK2YWaflzWHgiB5G9c7YbZ4KkkhdCRO2TM18Oq5K2MDlCURSNDcDYHNozDQnR8L2NFwg2/41SaeQEXhxIp4P9Yzg5Sddf+8vbbzr2xrf+0aH99d69e2+44Yb3/u1nZ145e/7onNH47v7gMKBz1D1y5Njq46b++gPvePLpV/vOKgAQJERAj2rjehM6bfgCbYoy2U6e98j9zS/NaKT5GhqaDZY2k+r2mtjuxkFD6m1+C0CGVTQnE6gd4AtITO11aOtU2+a05Xv7e5GlYX08ATgJqkBPmFdJA4kyFxq7/jXP48+5kTfSBV8ZDUDWr5roWc7BIeRm4zeWfMZWsbjEC8S5HBijCQ2y11Y5bUi07bW2nk5Ew1pSHV2ZJ1lWegq54Wi0aqr8n6/cdvNdRwpf8tzMtp0z52w5Y+f9v6pnDhVF+S//8JHRoNq3Z9umXnVe90e/erz7kh9A6Naa0PtulKgyFO8RdWrR4kVTk6oqkQlQwFVBu4xjrlNxQkJmhd70fokynBsbwv5Dey4+/4y/++cPnHXqaQ8dOjrz092TMjy0Y6+kYXfdiQe/9z2YCCOhx3703QNzh+b6xw4c2j/wJa/e0L/huqXrNqxYsmLJqqnzT928ZvnKNStXjnVLRCynJ4re2IcPyhWf+eTYeKeeHaCjxDoWujjecaDj3aIsw8zMgU7okECnKLoBHt+7fdFUr+iNyyMzYz+dOvTybeEfxwUgxn4A6JbTFLQ/2x/MznW65f4Dj+8/vKc71uvPxf5gx+oV6wZy7AfX/uSiyy8BglCGoD5FNoU47/38+VQQFkUKrsCM+Js/fiklIAVUBalStFuPiI0AH6Aq+IAGeTIk//y6d34B2pRuSkTACUQy36RpvRCRTbVUEdGZ9oAPBSJK4npUUcMj9TZ7EVzgqApZU6nN5/ZXucA0GJmpwiESorQYEJ23NrM3IqDtqNk2uAJqvPi2XpjPTApK2krNZd9vyA+qPWxmlAS5es7kh5xxFwAvDUvSRgR780bMb6h7LVzxCY+9PXjGxsjJOHhVNTbzwnIEWSgPyBVFBRSzYvBvRsA8XYD5iGMv0mbQ9nmWxOhAiVjFwYKiZB73iCW5JAkR1aOxRZNKXdcBCYkEAZhbp0818wBAUFQGjw4BJQoiUgCk/uBIZ/OmtWc81V39nTu4fvHkxXTDD6575MHJJavXbt/98Gz/wu07f7Fn77bppWPPe+7/2rVrBznZvv3BSy59DnXvHtZrlq/csvnUyW7Z++j//ddPfOLjm48/+/FDNXmlWDBwCMECZQ70JrEidvFRSR00wHaGovCx0UDN6PGUjEtgWwoiyiholpTYXlla3dMGwVsIKmjCptoDIMAu+gQJWIQl29w0NRM6MnFHVjEgLhGRd5CkfaRVFQUEwUxhc9e4cDikWpRdQWBNKens3GhqcvzhBx++4pOfP/OczYlnv/ilL6xYsWJqemKweQgAj313ZvPmE1B7vpRf/OKak7c8+ejh8MDdO3bve/SUJx9/+PBMjOxNH5zAYV5aq0BsPPuccw4wmdMQPPHMLZhXIUCjHzUvjGX/ph2l1nVtVaCxls2FsH1w8uclq/TylwlaCaiIzm/Tzb8vu2BArawE3kBtzbTWBoLY0K8bUhkb2wPmq/x5pS2rce3w203XxhKufYc5FJiVFy6ApBECIKGnlpexYFRgX7Gq28+7sI2WxMyMgibXCwsIcu3v5cbMUUXqGMsQhnVVVVUIgeskIr4sRqmaXDJx92N7PvW1W8vjVvUEcNbvP3Jw9yyec9lzvvWl/zjv9NPf/Na/OnTgyJF9j/zp6H1Oyk9XL1+y5MFjx46UZa8ajkRqH5CcA5bB3FysqsFgiIh1VaeUDqYRVCOoBuDCWG9qstfrzh44a3J6xamnLB+bPn7LSec9+2K8/9Btt3z34P1bRysn7p3o7vzUJ8tzz3/wR1dX3//O0bVbasDFvTDeWTK15aTzTli/6bhNk0tWLT5h45J1K8d6ZQf8nLIQQgUQqVJNWnM1eOc7Xnf/A/f88tqfr9i0qU8VxgQVu4QsVPiglXT8RBRR0k4JNc9tf+CuPd0x31vq3BR/go5+5cHp39msN3Zuu/OO2X4qxtdRoGElc6OooTw606/ruQlfqFCSuHvPfvbDa3/045c9+NIzn3TKwYOHumU3hEA+iEgUHo0Gpn5qh8TM40HV6qr2UM3nMsA6Ru89W0HWbFgE8i7VIYJDb+eeRVXrOnlPCwtWiz+uCQUW09s/t/YHnygPBQCenAlTp5RAwStLG9/nD27zvTyRhGrtV97x2jFtVr0A4IiU1NQSsp5UE78EcgPqmmepzaxtNjId6daRUrLS23wFoGoWo0ZHVCVz0muUsygzHKzZX3jRn5DpmxF0+98oKioYfOtqHnB+G1vXdX4/CzoPRHSI3jlqbnAwg0BqO+pmyLmgBf+N9mXhqwHkBD8/xzDvFPsecstitm7eOWguvooKZFG6/FKEIMpGM3fEzcqwxZrW1RAAoCcw4iXF2HXXfmJs6c//7h/+8cLLnvnD79+wYqlcs/OzTzr9zI997F+/+Llv3HPPPaMR7pnae+ap592855bd+uhUZ+XiqanJ1R0na088edUfvPj0rffu++u//sf//eo/23OgwiI66XmMjgoWcYjOuVGsAcCbslWw6sAW4/k0O+c8Ekk2W9VGehAAvG1QFAgphHn6kDyxYaKG9lZLAp6XLAVVVqEQDHzLtrMQZZejLSAKKmQLaszab5LmnVEX2OVqFqLKewUABMhFQ4SIQM45FZeqNLXI/e2HPrdz17bTTrlkurcBoVMNKfLM2FmdPsChW3aXneJpF59YluWJm886/Umnbt91x09/eU1McyDJOQR1KSVgIIeKxGo2i2rhAABAIRFm6PX/bwG88IQDoEH/5k8dArp5OUl7lNrRkdnntdGqPaIZMLiQCmHoLZ2ntznn7CQiYuiUuEAkBBsktiOEZhEAjhQUWE14auE/bn8LATpy7ZKYmr37/NipVVx36BQMcpgXA7nPBgK0Th0he4djUwG7TkfbRnzBV8sPpIx+sTwPsAAKSkSIWTBEQMBRKIsG7+ZjjFVVlYAu1ld8/paD4JZPdmRu1I/QocG2rYfPfdK6J51x6eTExNpVa1cuXr1l+HdL+1tvPPWql1xywaLvfufLX/kCEXkP5MpU1Wk0ZObds7PDasSqVAbv/eTk5NOm109tWLP5uHWnnnzq0o1rVqQh3vjTif179h067Ddv3v3UJ//n3/z7aPvDh6brmW37p04+aXjkYNnRpY/u2rJx7fK//pvjlq5afPrmxcuW9qAYG+8wMPlQMwwqiKoHZ2qA2Y74WlBdkFDWwouo4OGwO9n7p4/9/Rte/qrb7r57cuUyqSKi+aP0B0k9lijkwRH52K973UkncTgzEBkXjXp94e4a67/20MQNpxw8tK/bGUcpo4BKFatRrAb7H3+snpvVsQK6PqokqQsqhv0jt//i9qecccr42BgCWTFq25NOp1PXeRYNACDaWBCQ+V63hydD7RVKH8BR5MQxWTmbxZ1QTNgfckNlyB1Lprai4jYgiEjbwqEtUJsHJ8ZqnsLjQBrJFAAwAoVzLqXkcYG6U86jgLlbtRXRAg/F9jFARLVGHXOzi9rIM+F8Gre/sqfI6MLUSHGIzI9O8+dBGyg3sywDVZJNeNQ2zVm6tvkptl/lyMAxOegYW8llCbr5kG28KcgluTYbYtfkXVYRZiJi+3AIgKg8/5EXXHTwPiQVFiFA770DrFKMKQXnF6bV+W9wvtRo/0H7b2zfXJCDtk63jA55s2tfAo0uQavrq0qOiqJwAlEycIlY8+BOgTlBntULc2RJAuK9d6nX64WxcTnhhEuXL3nGt35w/Qc/8rQ1y1e/4sWvP/H4026/+ZaXvfh/8XAs4THBWuT0s88++6wzz3nk0Tul6m/fed+Fpyxa1D3pghefe8E55z/jAqyT27WXfa/seVeN5nwYTwBSS1t5AAASQWK7ZYrZzr19ZiplXiBaEsBnT0mfL9e80iagKqJou03HFleVonpC0cJ5Tsky8yjWtXJhiPtG5VTZLLbBBd/AmghNV1YS19GXRSa8GMrRQBwg2JDxG7BRU2l5lyWohVcu7976y1u+eeU3161d/atf/SoU8PwXPOdnP7u+qjq6JfndvhgtPlbt/dEPv7vquOVPfepTo9t2rHrooosu+sB7/+2r33iWJ0RyKSWwuQioOo9N12tvP6lYKZnPw4ImeP7g2ecqnCzkcYHaPExStPff7XZVNUqWs+aa2zV5S46w78WU1xRSypjqEPxwWLeLXiumFv52ZmZhBU0qkMXFBBGN5WhBQ0VjjGZppY3AcvuGdcHWxr6cc2VZAgvDfM62k5BECuehncTZ/xNlFddU/ND0FTnpunzAXKvxqYqiCRW9QxYVk7MlRtvNEQBETnYbbNbknAs+pCY8iogjsppv6aLedTc8dP3PH1u5eTXUiN773pjMHhUNv7h964XnXHDHr37w0pe+9I1n7Lho88++O/3+bceWjqd904uXHzh8JPRrgkjkSf3YWHfx1OLlK5YtXb5s8fJlm084/uTTTl5/3FosYxgWB+v4+GMP7fjm166/9ZeDJdNHyce9h+Hhh/CaKxdvWLPiOWdv6i0+66ILggurFy1dtHFThFHXUY3FIA18CgX7w9XsgblDofRqqnNKjgpCn3RcKChBSVCl1ClpODM3Vnb6/WrV6mWf/ty/vfZPXnvnHfetWL5mNBqpHymhL0NKUSsoioJTJCSptZKI6oIL6H2MWn5q9eATD6eTh72HOyhJCSkEqhJBAh7ueuw+1NgpFw81OV9AXWtC59ytt/z6T1/xRySYlE2GCK3lTdyS+6uqso9A3qkKtfWoI1QC0SSMbTHKGRJoKcVaJktqyiLZ6BBtLGyNtdoIj8gFIlVMyXZkUZhg3n3HF8GsHttWVphVNTIz6CjWRKQIvg3l9ETzBxM3SDSviWO/nlizUkdOvWgfo41E2oaCrDtBRGCf0P6NAAMokKHLxGCw0DQx81kKM+XJHlFFRaScXBcsnLhJyAvnZm0Fzaqu6aEdOHKEqDaksi8AcOQcUSXJtJbsH3Pz+eyymIbAwlZjWFcGYY+cUkqW101sua0PcIGqrZCyMqf8iShTuYhsOS/N5E3NwhMBtKU4Yzv3QKPHgA3JC+cTNdFKMz3M3l4RggLUnCh4QDR6q3BC1LGx7vhEp5TRlVdd+V9f/MbJJz+JPJIfPO/pzz26c+adf/nqdWs3XXD+mccOpd/5g1NWn7Dj4NH9n/zH67/wJXznO97231++7bFHb33N6y4+7+zLT9zy21NLyn3bZofjTrvd6EljrGY1dMdGXKWohiSyyI6ORMQ7VzGXzpoPbaMtokMEGwJLg1czTRiXGtxyCyOw3rQxvfe+yGhWVQLogKs1JZUqRUsbmhhYoCzbQVB7W1FEuQnNVmw1Uyx8IgIxcgIjyDYNIsB8nkOEWIPzEFxRx9mxcb7yK98aH+seO3K4Gnau+sa1k4sWM/uVx+mxLbU+SNOLwonHb6hqdp3quuu/++n/t2ftulOe8pR1q1etn5oYP3BwVkGKonCgLEmNuYvgkaSBCDkEWzJZPsMFk9U8g2kkmTSl9oMkycOxNr9aPosxlj4YTpiIhJMFCFqgep3rDEAktLWC1HVyyTk06Zu2orUHh2MiImiMp5SQCZjFLdiiJWaHRMEjQ524rahoAfokONLEGcBlLvcsDeIQuBmZtEnUKqPMnsgOGdCGl3bI197Z9hBa4owxZrdy1MIHBTCiJhpvOGkWmva5dQGVGCMqIHgWAWeyR06FrTPbPXP0Xz53nV8UnPjRaDgxuZwnDiRaynNz04uW7N654+jex1974eQbN970he1nX797yV0Pf9Z7v3rN8nPOO23T5rXTk1OLFi1etXzN5lM21aOqqob9fv/xfXurVD143703XPfjmQOzhwYzpbJWs8dGRzecc/q6YvrpKzeuPf+UYry3YtGSJUumoQQiTH044gEjHB0Mkqa+9EZ6sJM6I60Q5lzRKakYjeqe9wQingSYavVBo6aaBKIW5EcjQXKHJXZA+32ZXLX2E1f850tf/rJHHn10zdSyYawd+5ojkobCq9SiyblezaId75OQcqqjdwG/Pz3cE+rX7er+5enkNQlEAvZ+UPO+A4f27n88eakigxdKQADKUBTF7ffcs2/P42vXHzcYVpk8AqAgIXhocIudIgiCijmKC6ozIo5zRv4WMOtjRE8Owvx20tYnli88Ocl6A4QoiE40USOu3h5OVaVGjVwa0J+d4VAW2jSxGSXgHSA6R0VR1Cman4gXQBcKEM2jJFQBBYQEQOgK8KqKDIhkOzT0JYIgZhMuc1YxcobauBMXlJmirihdKDjbhrBRU0QEAkqT+G3+bE9FrWy6JFbyazPaqklBFYkU0KbcDCoL0m07RBI0Cz/NDCtOIkrolBQ9oprTPSOSKVullLz3AQgRnWtwWFl9GmyIbNq2NvFPtrKyNXmjeavZlkoDZj/5trO3tXdwgZkpOFXVxIJCTKrK5ovn8jydjE5oRX2D+ZKG/qiqiI4ci7AjDwCoSQVBSgwepEJ0wZNwJQJIBXlNARWQRcqi4zqRgIazs/fefc2fv/qvZ2aPTo6vuuvXd73z3e976tMuIJT/9XuXvfUv3vPLX91y74P3ffRj779r63u/c9Vwojz1uc8/9aorb/7UP//b7IG9f/UXb1u5fOW2rdUs337Ho7f/7Nu/HPXTB9/zjlPPfsq+w3OuSyMpe1oK1QKKgaxCceoAYCQJvE8CKtEcseq69kh2nwsi21UQEEQGxOC9MCtAHSNbKQdNMiAzqcOUahEJ5ACQWWrIwKKyLGOMmrjT6ZhjKzWeCgiICgyqhClGbUUkHTrnA6txSGOM0KhS+0BRrMhTA04732hVIgAoIVcRHfa7pTu49+DPf/lrpnjWBUtXbSyWrdi0aBGVxcTadZOvPv6L/FU+dqSq+jNPu+ypew9tW7tm6pKLT7vl1kf+58rPvOG1f6JEibUokJMIogA6MYwYCkISRpHCbD3IbO8UkkC7+YCMkFLvGvgBqqY2NDgiYta6BiBgILM5KUx13JgFERC7RRljrIejTqcTyKHzSQVFnQHaEVkEgquFvTjBbCaoCNIoI5J3xgEzsab2CSWkvCpLrCIZui9SeG9lpgteGiclVa2bmjU4n2JyRFCEGKPHeQVpG3jUwkCYQMnl8p1VmE1NFpTZJFYAQNGLArHty7zaTloFVByCuUhATKISJaHDThlSVXsoQLzIsFZbEwbnCwWloIicUkCKJTvAKgYEKYPOTnTcZz9750P7q/VLj5s7fEwWLzpwaE88NFNVVQG0adncnT//wbte+/Q3uL85MP3M5c/56ouq+hn9PbGiFSsmVqxYMTc3t2PHjtm5ODfYd+N1Pz28b99wNMeUyGMc1r3xye7U1PiK6WlYMjw6t2P76MUveu2zL7+40wGSNDdyoDqaG+7af8wVwSA1wXlVJPAFeoaq58eFxKOKeE3REU32gk13jDcIIRc3XSwTpkSuBC8uUmLxJFXkmI5bs/zT//7xV73qVXt2752eXlxVVWmWFRptI6OSyAkmJ8ICCOiFUYeu+5njBn/52PS/VrC3V3ooeMSh7B+d23n/ncpHponII8CQkWrnRFI53jtyYP8jDz2+YdNxMdW+6GZErHolMGqcVUUIiATkndZ1PeqLwFhvnDTEFAN5Cg4g8x6l6bhEBB0VIWidBLSua1uQOSuzOWqDmcUFXCNqBNWxwQfY+sMFb3QmxAXKMwY8sm2OgiQuffCo85M0MCyz4fEVFFiayS8oSjLeS9E814iEC8yOLPktAIAYtsU6wtbNtCUAZPy0CfpmlKMCEBI+ESPNzCIKniKzUsZmpxYbbBMkme/97c2YVIKy2AKofSceyTbJ+T0TUgZ3P6EcFhHrq1zwTcdqbXX2xeuM9VoQUDsfY2YiB/MS/00ynWchASJSOwhV5fQEEGkuRxBbydz5+5ph6qTihBnREwIQiox8AQDovRORWJN3JEnqeoaROn1xBJPTOBrp3m0P7j54xZGZmw4f3tXn4ebNl77oRc/+wpe+7Fz16KP3f/MbV21Yf8Lv/+5z9u0+ev21N339i9+78NnnXvLMh7/75SMbNp0y6N+5aFqe/3svveuR+2f69cN3bB85PfmizU966qk3fvOml7/yVV/9+hfWrznlaNROEI3NMAAAkq0t0Vrb4INp9aqxhxDNzFxSptmwgQaYIfv+kvMevQcVRVBFsY1+g+2yq2r6TQRgiB6rhbtF2V5JtSOd1cjneyAjjaSUGJS8Q1GAPJDKz7NFbMLgMjuFFGyYIZRRxkSEmgAVgMqi98ubfrV37wPL1gzPPHfj0Uoe27n79ltT/2j3/gcemtlWT+xZOjZGv77zsX0HedGS1atWLI8TB5/7u2evXAvnPO38ugIlNJMDew+s7L3PZBSElNJwOLRf6q0Z9h4XzGZsUKzcENYVWlAbNICJJIKoFAggr1SxRV0B2MUXESWsUjT0ADgDdmSKBHhHCKgOoqYUpbX/agpPNktHk9yCbH1o3XxmWDZmjpoa62uWlD8mmSulSXdZYKmqqqoq733R7RRFYQhYaGxOiMgTRk6+IStSo5hmbyihAotjk9MCQdACBYMHcAKlK5QTCFJR5AtVIJOgoAcHKQj6mkdlkDpREcEFLySpmkF04jtOfIEDRgFAFpPQmuktmfjez+776GduK5ZN7z9yJHTUD/ctLuO6Mye7Dk85df2KxeFPXrLpBdveMNLlvz7t748d3cojOTJzYHZ29p57Dhw5PBtHlSSMNRSldsc9BqLkguvMHJudG9a7dm+bOXpk2J/bvXN7IH3Xu9/5zEvOG83NzRyMDok9Fs06DDgTvBEV86SQmfOE32S6TXCsNapqRSBK523RJnazEBHAFYE5hRAkxdn+3KmnnvgP//APr3rlawaDQVmWKSVW8Q6Z2XunKgDz8A4wqBxi5yurhm/ZMXj5rsl/OsE5H8UnkcMz+/mxQSgW08RSLboMTpEI1EFyEWQo9z1w/0WXnc3MGiN5W3x4VE3N3sRCKDNzTMwsSK5wSUVTNS9m2ljMYUN2FdNjSMk1L4ILvoQy8HAhT9WuXnDzwKu2RebE6DyAMGeMoffeuQDNNtOeFwb12UahWZHmf2F4RZGkidCoUEpEwZuz2Dw/NS0Y/kCzwW37DMvTkFenyYDdC4MFLNjFqiqreIOaifV6xIhgvCY0PWABltSoTBDmRNqsorF9Y1n3Jm8H3fxwwNRxFvBrTdTeNbZ3bSSyK6UmBkuojSBAqeXC9wwLnPJCCLY5aOV1LIK0rJX82QmoIR2hgLT1AQJK3ue1N1gXTNeZmVxy1HHBxZhUPZr1kSpz8iWriMdSOAGUY2Pl+OI0IyFW9S+u+Xk33hgmHqmLn4SAm9dv+b0XTvzzB26cmBz7s1e/6XVv+PMLLnjqKSedfdHTnvq1K3+0dMttf/zGZVd/8d61Jx135rnnXPScQxqnPvQ3f7Vsas39jzy4fsPxHOmUM06YnTsyt6e/fO2qpz39nKuvOvLJj3/y01f8v/3b+iWV7BIIiYhHsqWpjexSSqmu1UoTM4ByGV9r8q0MyoY+NTcks4VGU1cnA/yKdX+BOEl7hLRBzmNMAJJMj9A8Im1J3EyVlVqtvvyz4F1BxKCmB6ItC9uwhM3iHxHrFDvkVdWErHEBsQc1BOIUq8nJ3o0/+eG+3bvOPecFf/e+r69YsrksJoZDXrN+ctOrzrgj/IJ52K/d4hVL9+w+Mjs72x8suf++x5yb/cyX/u65v/WKI7Mj5wJSvuOmsMPNzJYADWSgqkbDNSWBhX7AYAZ8AKCaUvK+aGNJWyi7BSVgW3/bMc7eZc65IrRjW6sUvffYrGmN7144ryAt8pCZOTKoEkBwHsUinoKCxKQsHik1SNH2rlERPBYOsK7rFKMrgh0GFE2cWjcObcbgJpfvg0+NZVNSBpZMTzL1+UYWFBGBnKq6pAoCohqjceXVIJzsQYCBBYA8IaEHRwA1k2MihOgkpUEQcXWtruwCJESoU+Fw5FClLkaqLgjWAOMaQkkwivVEZ2rnjiPf/t6Nz7x4xZNOXb562arV61dM9WDRxNLayeyRQwf27jg6c+wpD70LRjs/PPP6bf/1ZWZGCH5sSafHve5Ub3yZn5Kjh4/OzB7Y8/ih/tzgcH9m5uixONdn5ggSnJ/uje/fu+PMs578sY/985PPOnHvvqMpgfOlJASIqqZ/n/GJIlLFEQajXrvWN1pYAdQ7Z7pAVs2ERjqQzObQKTlDHYLYug0cAARfKMixmcGFF53/3r9+3zvf+c6y2wEQ5xAAnMeYauccKIN6mN/ZIwHSXOh+dc3si3d2PrFGqx5S4QsWSEePzY71VtZJBEEkIjCkSkGQCnLyyKPbkIiCV2nE/xEY5vOXc84cyRCxQ8QhGNwJmq5MxHKKRW8FADVdypwjwJQUjSnTDiQJ8p9YUAYxySmsjIbXdJhtHomcWzhoNJ0kxpaPKo0yq7dnwCMlzSrK9qCS5OTRPi1EVBRFZLH+oO3Y2rrbvsEGjoHZGxgR0SMoYqtsYFekTdVt4CA1NUHzaEQLCuZaKqqhcTSzMlkacYD2lzKo0aly8FLUZq21MLMCzE95tXF6b+8ZtAcEgZC00SlpwT5KGQ7XBrV2LU1ExlT5DWIINki3dr5tc2ki9OR4AcBIWk5nZGyy1PwlAhDGFGPoqFBERZRkFsNIo3qknqA7JmVRDgdx9+M77//Z9ffufqA/mA0br924eN2hudnZw6vKicGwxme9YOnM7Mb/+rcfPnDPg0+/6Pd373tg5+TObdv3vvrlL3zs4CNnPemctRPdu+/YdnDnzLMvf+bsUbf+xFN2bX8gVv19O/bODkdFJ2w+bm16fO/hnbuLRYtOOf74t775LXtnpRO8yEhdCU0Wa5YoxMBJxaaX2PDAiKjV41QAFLRVDQBk4pYIp6xrkJevJtrpsT0/2uiuGPilvbUmegyNyYnFdFW1DVCOSgtU+EtfQACxwplZCVXyisR0UEWkTjUSgSNT0LRHURMruRiTcuAa7r37vk9+6qOvftWfnXPW/8fWf8dZl1V14vAKe59z76341JNz50DT5BzErIgBFDAOBkBHcRzRcczjIKCOA+gYxlERMWfHNIIgiEgS6ABN5/zkUPVUvvees/da6/fH2udUNe9bH+1PU111695z9lnxG56bsuYMq09aPfMza9PrpgAg36P8ilZ/U1+uX3z77bfPze6dmT11zbX7vuJLv3FlY0pi3EXGwg7i4Pwr6XwA+6TYNbsqmPsT4teFOlm0nHMGjRx63y3v8KjTl/g88AR32o24KzcjoOPJcxJw+xdASdmVp/qVec7Zl+NYJO1M3TkNwAwsizL4vqYvxPtHuFVl5ioMsLMRCyGEuuprXEFwoTRX7xoMBm5aHEKgGEv8NSNmMPcy9HCP5v8pKREBuyQwAKGJWcqbUYhQmzwYjAhj07QhRBGpyBpkNYyKjJyDSSaccq5tgAg1yzBGYso5T6eTyQZIRgqbtqkwkVRNJ+Nk+Qfe8F3DkW6tXrp4aXznfZ+ebly6cmZ9+coq4ZgkvHrpI9eMbntX/M+L17/gyxf3jYYLSPHv/+EDd9378bUrzfbWdHtrWbMxZjQ0GlqwesCDuZkqMjK10+bSubPf+u2vfMtb3jKcmTl7aUshQkA15IpVRKVUIYDFal7NsqSuMyrNojd2jRUUHsWQc9bptOIwrGoVAZG2bSkWJ2zstvgxxty0YGSgK2tb3/DqVz1+5vRv/NpvLC3uEUna3cRy2FyCVw1AEZkYEHH23SfG33F6+qpL/PsnAgcMAYERKWVkyorKoGDZCFRBEKTi06dPN02DiLar38MehpmyZRFIRn16Khx3VRHXn0IiIjbUDhIIAMDEhobQW9uVkO7alCFCSk5gNKReTQgAQM1tlwFBpYwQvBbUbG5njmo555SymZEBA1q3Sw5+cAMSxUDQUYbUZS5KPvbnoYtTQTX3qKvSbvbiuoSOgIDOpYTYs2BBPPZN/c54to+hndA8AvZqyogI3RTFESiKhdpUsGpWJHEQ0eW0ulwY+hcH0U7iHf1+dP2rfN5BtF2fugRBKz+vXmt3wcueCFvrA1AR8Nu9WgDAQP3P9FHS/ym7MCD9tfCElEQISinaX2ev2iQjE7dNjrE2y9LCaDRY2B+m4+aB+x96+PHPzh84e+Hc1j23b9934WPNVZNJc83ZzQcuXfn3Q/tOVnig2dz3mY/MPP95L7zlScfe/uZ//eQdZ5/3vC+OQ53bS4+ee4jh6x574NLNTzv2pKd94fratJmOqNp86NTWTFh60k0LdT28uHm5la2NrZWM6fLja8/+oqe99h3fuLh46NxaMwyKwLk1joCIxQrbQE2zZjEFz3kIPmJhREaKyKkb0VNvpWdGBnE4gA506lWRoRceGaFbDvQ3GsrZ6Fs3KXv0oq+yu9pziGwf38119pmZWRGsA1OU1G7qDzn2NthdQwbopjhQD4hNHn74rr1793z367/7He/41fvueyDjdOFFB5tf2BxTlpBAQQapXYDl/3LuzJ8+uPTY0qmzjxDIV375183UR9a2JATNOTdNw0UI3VCNERQKJhm7ZSoiBh/wqfZprP9EESMHJsacVUSzZf9oiGVr4x+tv9rWMbClTa7gt3NcRac5UwzQIRuo+zKn5HbIfGauY3TlTlcyYUAvd5jJSVNt00DXm3axAAyg7G6c7sDkEptZxZeR1MHB+l9ymEhuk6RcVZXjOdSKPX33sPuYwgCMh0NXO0BVtQzgBpQMJAFQTDk3BinnljkiynaCQQgcLCEZV5WFwfygCVAJbGtut8bb9z26ubJeHz6wdOB40OHlZvW++z5zZX0ltbC6vLE9uTwzY1trqxvbmYQp0nA0H0LYN7f30J6rH7ty9unth7925sO/c/GLBl/8/TfP7mnTliFvbco//9Pfrqw9urR0DJFHowFzlNxAxoaamWow3Z5KFTkM1lYuaZ7+z3f8wmte+01rG+PN1S3CKtQVmjtACiErKQGCi4IhUOAqMkhGRNEMVm5Z2R0AtG3rQ/4ikWs2TS0HEoVsiikFJAwmIiqZnXfr3LMY2vGYCL7v+9/w4H0PfvCDH1xa3NNKE0P0IgkKFwassxwFCkQE50fD9xwef8eZPX9xHSqioAqFENS2OSAIgQHYwExdIxKpWl/f3J42c/Oj1KpBRkRTVJXy5PpYSEsDJyJEoW0zM2ZJZlZVFRiqKHIgLB64vT8sFDASaJeGiciozMz7OOyny00JyzBJ1DqCTtl17pI0QMaqe14cpO2YRzMr7phGVqxdTEEtBNIs5nLEBgxd+lFhl0nvSHK7nwdwKpGZuViVE38ditazFDoHpCdkHX+ysxgbETlBBcuypshUAlPOWXLqq2ZmjjG2vkAywE4po39BH0Hv+jaBQ6LAZTF20i13sl99ILPOIFlsJ+P2Ienz8uhOuUTkTZUVYLJ5IxWMdl+u3b/bVZS7WDYA0I30VXU6nZKBa4sLmAVDxJwkhIDSENNgyPWinj195oMfuPvB+x5eXtl+/otuufj40t13TNfW2+e96FmfevQz99x+6pq9exYWvuWxTxw8dAxXtt7fpJSWJ7Ojg//nj1/bblw7rJ5KgacpGNP29uT4wpGt8ViUFuZlMr6SNgOqMM4NZ3hu79yBkyfmBjqanbF638JwpmZqWZY3EitlymR1xZS0NQTVbIa9BTozS2dD5PwfdGoZQPHk8NPfy5J3nSuoOYmqn/OEEFxfHLsdfDldbbEINELFwjRWVQUrE9Ein6KkhoBqyjEwBhdeUFVgMgTn9feTcxNFosDMVTAz6QDz1E0ygEktDurR2c2Uktx9z71v/+VfrAbxe7/3jb/+tLdlSnZcYR4AAA5Anm/3bu575GsvvHLyA6PZD3729jtSO2rbJFIYe8wcidO0YWaFMjDouYN91QiOwe7Gif7lbbGmXCjgIbpI544/jGtfEyAR6M58y8ykTVVVKULbtsWmhUraTiqBArgjcsra3QUREZBS+CNp59+XOwYIAgACxyI7X9dROxXM8uBYSRL8RAEQ6sxW+5teTCbYV/XutwZWYCIKjsYywSJwhMVr0czARJOqMkJNbEaqamCMrNnDd2zMFDHMzBJTXfH8ILeBU0ZYa9Ly2sb5C2FlJS1fPv/wudWzDy1fPtceOgJPfubmA2fPnPnLC1dODVMYb2wlmC7s2Tc/v6iE0+2Z2frIddccX95KG02zvD48f2n15msHn/7gX46a237l6+/9w3v3/Pfbht8w+6ntcWotz86OrlxZO3Dk5PzePVeurIApETfNFECQAmFst5tRXSnlRx+798k33vj2t//PZ77g6atXJlkhhCApa2vucEORICKk3K97/BEAKNBFVVXbEQ2kTt2lr1xLgEJTKc2uP6pERIgZoGkaZ/6AgBkOBoPJZDIcDN761jd/y7c8/Ngjj+7Zs7eZTAOzOG+/I2jsRD9kAJj/3esv/vW/Tr/scvWPe5kqUxaRqooCYKRmAgJmwKoENhPj2sbG5ubmnqX5tmkMjJmRgiqJJB9PIhZIbN/k+FdZ2RKbQis5W0Yi4BKOS2aFDtCDgGqFWQQEWnQaoHP+6MO+b2fNzNMcV5Fxx5/ezBx1YgiuoVtVladhVa1Hw9C/uQ6rAkQETKCAoCRmYA4OdM5vn/xsl9Fm3zL6q5UP05US3hT6N7mjCYopMe2UHt6FiCqA5Cw5BwDZPTruIjh2w0ZUsyxoPQK0yBF2mVLQJTN9FgHkBQGCQsmPyICIxUkoa2lxbFdL0X+cvsLo97IVh76x1k440zm40GFB1VTAeoDqTnbfpTmQ1eX1d83Si7iHVlUlIm3bQidcUFVVEsp5wsDzoyosxoceemjlyoVsl+69++ywmls8QIPRoYdPfXph4fgNN10/v3DdRx76lxiH999+x9J1LxwvHxwcunNlPAr2/L0LJw9fNTx85HiaYqrnLi+fz7y+fDEMquH+YxvSwp76ZhrAZJoOzh1dvGFWYW4Yc6xG0xyZqJZQj+ZTrWNp262sI03S6DgQWaybnGaQCgjAFSj9QyGi89/9UidNZuoK4m4a1ilMlQ5PVVkJ2e0tPFgDGQViLb45/VRgp6CxjqFUurTO6AIQUDpNUTPr1NmKUBeUEXcCzabWNDtpScpamgw0BjdsoHI2ir0V2KRNtHexunJleXXt3Cc/+cnz586eOH7Lv93xYfvGhDNkQ6F1tmXMCxnm4dL08tzemeXDD1/84Hh17dKNNz2pno3p0pXAQyKqR0Mzk+mUuIMiqrf9XpuymSVRRDQVAtqdgPvSLZuCKBgwlwGdJ6qyYq8rRDQC1B3cShbpYTiIuMNlssIpQDWXgHUgYUppt2pHzllEHF3p0laq2mtjUc+f7kZOO0UDWCDuJ3jMjIDgO+Bc4Hj+bHqzi2qtCSj4D2e01o1a1UJXspgPxMzQFAEiUFbNIE6AJqLAkZkGg1pBeMgThTzJsrK9eebC9voGrSzLheX06CPNuUcmV86vbK2d3t4+O2nWUrMxSZdHo/V9jwzuu/vgkaP79u8/Uo+uu+W6m264+cQN1973yKl3/s7vX3Xzl931eLW8ttl+9AwMq9RMeK152lOu+dTHPohrn/ubVz30wObcD37k+i/4wmd97au+bGNzjBSapkHI//G1r33vP/7zr/76W2dn0ARnYtWmbSJtWx7Ozky2r6xcufCNr/76//6zb9qzd9/5Sxs1DUXawBACMRoRT7OYgeVsXbByiXVQA9Xu4pc7XuDiohVy2zTZtK7rojgGSkSpzTsOykRmFmNEooorr+eAqeLgKqFt2x4+vu+Xfunt3/kd39G203oQU0rqTTAqFPQLOpXfzIwg3jVb//vS6mse3Pu3+ygyB8k5q0RjRBQggWDRSBGQoKo5N6kZT0pIRE0pedPHnaueV+0lcSCoaAjkpzelNG0LoTTnzCHYE61RQQ2RmZnI1Ld7Vrp2camJXSmmFMHeoBJ2ew+fuEDo0L1I4OrqCECIROhDzZRSM56EfqrjJZLr36kI+KaL1FR8Vm6EYERIqqVkLpt8URFJzkM17JtCBzl7TWVUZKz7NMNVLIYKWRAxIBFSloxhZ6xtHc8anQO6S/zIM6v/EeqQ1arZUSRIVGT9O7UHRIeOEYBpP5DufjfnjF2BRkRurVjOLj4Bb0WdjLP/VtIdArhPJGoKRSWo+6Se5HEXAK3nPSuYqREX4Hc5NFDwWdO2kZQHg8HcqJpM8vbWlopAFRaXBtbavffcs3rl3Cc/cffM7NKeg7a+Jrz/wtNe3N53h+xdevY/v+/u666t7rv7wp2PPjR6wdyzX/yKj73/n44f2fqyJ70q2N4TVx8YDJv18crKlSsrl1to1/cszNWj+eNHFpcWua6GddxLVaI4w4xklseYMJuGSdNGtklqJWaxdZwQxGE7zFWumMZxZBWOxu32gAXUmNixlEzuM0PEGIwAMak4jJmIuBp4q0I7ztO+ZRcRgcDuOqK9XgqYolrK5JD4LrjvPsOeMplZwXyahIFRrH8FKBBZ7C+4F4EAoFyspnugRHmcDAHIqbTknZmBuhoZIULVTCdVDZeXL507d+lDH7q9GswDwD0rnxEQqMAAbNVgG2bnRk2V9hxdmk+LL3r5sw5fufquuz74pFufs7Fhs7PzbW7G48lwONQsGQSSeR2mhN77UgehENO6rsUwq+R2Z4LSYYwDqCkpCGgqszIn/vrxa5rkma6vicGXu4j+PBqYYCc1hShmbju403EieMyNMRbZAFQiQnd6yOKK8SDaaecYuOMhdms301ItdSV1QGKk4gDMFDkAhz4BO/zKMz1XQU3QLKm4aAYR5SSutGqEnUsGMAIjaaBqdjgYkBBkNW11utVMxlv4wDk9fwEefhRPn83nL69dPL16+bHx1srlzekVaS8ZLFPc4AEt7RsuLs2eWFg8sO/ak4dfdPTI0aXDRw6fXDpydG7/3uFincfAIa9tbf3OD//Oxz74b6PqRMDrCJvZQU7YKOpTn7KUm3sunb/rH1/6YEX2Hf/y1EblzMWV973v0zkbB50ZjhhHDw5PfequOy0mrEgTTVNDHJosMyNdWb/MBj/3prd+z/d818q0uby+TRQzJqyolcyAYpBzU8+OWslRgUNgQOnGeU7sFCNEVlAAoLDjMIGCVVWhinXGWWKWco7Y7w0ZEZvcgBZGRk+w8SbBf2h1df3Zz7n1zW/+2Te+8YcCo5duOWcCIAqAiAqmO8u4ZGn0rhOrv3lnfvY0fnamGtrAZibbTCqCAEgUoaoGuQmt5LYVaNvpdAqiRFTVMecMRoYgOfVjsDJYJQSzULGZtVmYY1UNcs4pNaoFyVQ6orIzREQEMaNCbnCIR1kLUhQDd7tmLgI12inygedt9+oGI8CUUozRxfVKY4Dk/+7Rya9z4EElIgwYKKqqZgHC2NsOIwIHU4Us4LcwoMgOSoKZDUEIIhBQ4VmWR1pUVIop+i7tWUNIOVWZ1OFnTADQ5swGkVh9yjGovagXkTYnVa1j5WqoZtZJgpU9lqpCYNd99f9TAOsaVgAIhKYJswO5gwI5K1RRVbWiwC5JqE50MZE2hMBMbdv63/LnH8rAwaxjPtRELu2EoqRmklNVLH086DMzGZBhhk7OWtVjNwBwN5xQQOziYDSOHBRD0s3DB+fXVy598O8/9twXPHXvvpPrUwW49P/+9uMVnr4E4899In7LV77kfR/78Kk7JouL+zY3B6cfmL/jE5/dv2hp+/zhE8+86blPGt41edfjt91/8f6v+OJXP/tZTxsMOemm8GoznV+amV9cvOZpNy7FeoYoOHR22iICKVgrlbWmOZe6gUB1qmyWBaWtquGAhiICbWMAEoExEqBADhIAclZTkBhjDAUiEBzH7xqaiCbKSKaW2wYRiYKCQ3AVy1oQiTgxgBtwUTc58LvfV0JWzLTdwJUBp21LRKGKbdsSIKnVVdV60VNFEEVRYAR0X1WhXmwykjtikVgWZXaVflCRLBmBTEyyDodDAEiSBYOzcZnRkGZqTS3c/cA9Z86cOnfuj2I9bGV7uDZqdEO2BPZaOMJ7j9fjIAjIKbRtfvC9929vrj/rWS84fuLAlStrWM9UEJRFJKtZPZgx0RgDmjYiQBQCA8B0OuZAkSmnqc8H6rruk2jOucg3+r4tUkoFY0/k75YRMUk2k2aSAxaMOhEhs4JlMCNw7p/zgXoBZ0XDQEYgWbAVAMCKx9Mxc6hjFQe1bbMhCAcycgUpJQADR05FimXeqSKaBYAxADIqUlARYA5giALMHLnKOQsnUhzESpHMMCABkVYWsqaKpRVoszGaGooxUw6BUAd1gJpsEBURBFIDtnFlcmY1XViZPnbKzpyFU4/b44+3Fy+sb1y5sHXlQrN5ObWrAGvIG1VN8wtzV9+8sHfvkRPHnnzyquMnTx49cfLgkaMzCwu8NESo4gAUbGszbW/qyvo2XNmeW5qhTfr+//RTn7ztU/PHl1YvPHLwydee3qDAtnV5cs3RvXW18dFPvP+XX/jg0/Zvfe37bz2XBnXdPnTPnffc9omMNjs/LynJNIch1vVwbmZGRAGEiEBkJlaXLzx+4uSxt7/jHS984XOX17dELBATsdO8gi96wWKM2kgNJB7nu1OBAAyIQOYZoRCQuAz7iJAMidlcv0gDcqAQgQXKSsCLaQrs0GFry2QrSdJd8v7M8dKl9Ze+9KsuXVh+05vetLRnn5gimqoRgaEaqalEjk07qUYzCLn+14Ph4Zmt77pv5j8/36RqVY2nwVhMkdjQSIwJWNCQOG5gEjKaTseWAkRSsjozAkoWYBBL3oN6snAmLSOaicOgSq8bCBm1M+yKXEmxI1UPDREp5eRTBFeNRjTQnZkQgIll7Fbm0O1NyhivqtosIFBxIKKsqiYIkAFbMwWNIQwHdchN662AT3PcLz01rU+ofIZDgKGqrJvF7ex+/H/GUFUVmCLs0GG942SOfTFLvcybUylEfVroK6WUEgPWVSW5GDH2IGcTzSmB2mAwoMBgO4Z2AEWyEVXBl9CiudshlfrAzCdjAFBxIMQswlWUlMU0ELsYveRciu4YEdFXjzHGnsPa10pkTzDEYMeNs7EoImZTfylyfCaSowELML8DvkNHVQTALKII2Pdwnp9gfW44+NhH/+V//c/fqAjf9e7f/sk3fc9mekDy+a126ey5pa//6qXL953/98cfvOUp18/NLH7yznPX37RhWzNXXfWca561dBNdc9tHPvC5372t2hdmrh185zd/2ZOPXDvezlW1NDd7axX22CAiECpORbc2VNXBdzo7S20zRcRQRRHhHhlHGGOV26RIg8FAVVtpB4ORptZ33u6oDGred3pv5BfK7edCCMOqhv8f7Lp/te0Uu30wABBZJ3RMDnfkTu9QAVU1GGLHugOA7MUTQjKByAogKkRUIQOZZlEqfhiKIGBcCnYCAAyoKTdNA0LVoI7Eqsoh2C62jD9vqMYUyuQNMOWGMATGtk0Bg9Agt/C52+8L9SLYOk4bGe4JqbEPVPVX5hmOW7FdzmNtATfCxvrWtVeOxpUD9z/6sac+7UlHDh773AMXYhi1uR0O6o3xZoyRCFSxzUIxRirNaAihrmt0aGRW4Cc41HoC9qa5vM/OE9crbujwjdWgNlEMoCmPx+MYYzUcSO/V2PFosVs2l2LXL4XTQRkZ0Aw1K2hu/Jvmw0ItG2osaHZ078IsVc0gZApEEQrbMYGZSKCAWSTnVNdRVVPeIqIURqSSchMAMPAURXOulVsMIZlFthEOkaJhE2lrCGGSZVPWT61tXzwrl8+HcxfokYfTmYebi9vjKxcm65fX1i9f0ekZgDMcVjlM43CwuDB/6Oo9h49fc+zk8auvO3H1dfuPHJpdWhrNz80szCFDVmiSNTldUa03Ocs2m0iqOEgdSOJwbl8QpB/74R//0Ac+dODIXrF8+tTDC0duPTyaOT0Jswfmjlw1e/tH/ul11z/+mhvP/fBHbrrn8tJswISYA/P8COt6bm4uT9u0NcYqVFXY2FgjCmCoSebm6scff/iLvvgLfu3XfmXv3r3nL14JIVCI/ogx72BKsGzQLdmOCxB0U5wMxmJIjOQM7CydSAMD5CzM7qX8+f40fdLyMSK4RlDwdTCaiUMu/FdyEkbe3Nr6ttd865W1lV/9lV+fn19k5m7xn5kjEOW2DXEgKRNBJJp793Wrb/rM5MTG4Ny8qhpgEyjmEFRbS+OQsykjzmKV65lqOGhyE0IIVKkKITbTabbCZXczBusknvzcgqfPXXLifWveNq2ISJWJCrNC3Y4vBCNkZJ/3ZNUQiLoRpkf4yMEAPFn0j4n4eAwIO3vpDjVdjFMiBzcUB9EQiFVVU04pq/OFAcGgnTYOf4BdPEjuTLlLZ+n0A4ScM2FRlfQP1r8bJ4PLLvhiuUyD2vM3EYW69pABBj4t7rOvpzH35vNFdZIyl2ZmRCLG0M3EuJuWuO6/7UJzePgmorZpzUwmEgd15GhZ3GWBO1mAnh9V7pzD4boxDLg3mwOYwXoovJkZOfba+7yenmTgE7CdzUFJJGju1qKum+evTy6WZtikOL8HHnroyr5DJ3/1Xd/2I2982+/92a9893/8wgvn5558c/vwY825x15w603h8Y302U+eJ7mzmpnbf/BLTlUrK5fv/Ngf/NUwDZ76pBt+9L/8J5lJ//Wj/21h7rq9iy9c2lMBVUk1K6StbWYmRlMlNCRTFTNZX5F9Bw9sb297h9q2bV3XqkpA5IA+5hBKElLNXpQ0OTFz4KBoSKQifo9CCFJUk01V3aLKdkHNscOKd/qOhVQtKqBkCKHDysEOiIMIsFMsVzFlZicFKUKNbABJRQEwhmSGSKgl44KaHwxHFJuhmEJPhDVLKTGSP8Pl1pnGGAnZa0cOwUwQoRNnL28Y6iSTGKp0/uID25v56c+4YfnS+lTk8qX1599xTL537dEr63mVbECcEROGhp7/0S+N9eg5z3zlRz/w4X9+/98/+/lfdXljTETj8bgOUcwk5RjrEMK0bcyUuChvlLURU8BIYrsFSWDXlsSHhElyHy98HWRdEgWASCyhwMVFJOdch4j9ZN5UXLLBrSwRGcg6Z0+PSqioWSmQas4556YFgNDDqUT9CWIsgS8JWWrBcoxMscqACowhGDdZNBJXIWA2QwQe5Jw5bQQKiCgIQW2GkGciDBgqaCc6nWRZn2wur2xdPGuPPz587Bycvbi9fGZ68Vy7ur6xuXZ5unahna5DWGddA2gXFuubb146dnLvoSPPu+rk0auOXX/i5MKB/fP7Dw9mZolABJJYtpwTi+ny6jjnXCFXIQZDS9pQUw0AZDgcUUqQc1rcw20LP/5jP/m3f/tXRw8cHbcUiCdp5dSDH7/2yV+yulndcHz2/s9+8CnV5976wsffde/RP37ggGJuswbHsxo04/FaasFBwmPJeRqroEkZGavq7NnTr3rV1/3Sr/wqM65cWaurATB1oJBQlDK7R8mbVUTq7FaxUJGcIoA7i4z+56lDyJYGSc20KDAhk3Uahbv/ipkBsZmKup12qWXNrOIKEdt2Kqr/5Ud+aHNr63d/591LS/s0C6jFGB3H4M8RcSQCIhr9w/H1/3zf5mseij//NEQmYk4qBE1kU6sMg5gaTgNEHaiZr05SVkQNjJHQ37GZtZL7LOPDFv+Y3ruiV/Rm2QfpgINYKfssTQNXZhI4CJfcYYQBAyISc85t6tRscBcomoiYKLUJADhwgYyUkN7DeX2XC+b3vXMpDqVbDQy5KKGAA716rwJmfzIBXM965zmHHvZs5netHzKLqQ+dYDB0vU7PtdSZmVgxBt6hJJtZRhhQSP5OOwJiJI6DQe5I3IHYLWPRQEWxx4AYIBUccr/W8g/SbwVEBAObajOeqOpoNMLOrsdlH7DT/fB7oJ3RbPehSzmZbUcpuv8+9v4qzGSF1ORKe9gVLuVH1fz/+2MNu5CHaKCag01Xzw+/6Zu/6P3v+euPfej2CO2+fdeAxXHanufrXvgUOXfmsbZCbOnkLQvHnvz0u+/997f/0i/aePPmE09+3dd+6/VPPjKaPWKAp9ZPIdfze082NGozgIlqRrRBNUcMiKAqktssGbNBVlVN0yY37fr6+t69e4fD4fb2dikV23ZmZqZtW/chcQBOQCKA0OmXOSsOO+ELF3VzsU8gFJFQRUQqYmudpYFqpxjc6wkjl0l+SuLHTLXsIykgsRRDrA5z5ycoa2MuJF5eok0t+u0jKz7wskPIRjWfpjrO3H0Xit9AZ54BAAQUI5d04qTGnAUxhKAqBhjrSqVaGOW11fEP/8ib3vBd3xNCOnbNvn97/8Nv/bkfeMmrP/tT33fn5WvH9deyWCIJ1UfnNt8yvv3qj/yXn/yRC+cee+W3Peltv/j2t/2va/ceetJ4Mm1FKyYToRg0tdPCeU27OnJkDubAcMkqknXnhLpFgVcYKaW+cOmTrh+ztm0jsbvzDgaDpOUZ992K9zTARLue0FIjmjMTwIkKdRVFdjpsX9OqGgYGM5ZC+PYBuCFoznFQA9STNtk0xxgDGbaNB5UsOYlwVRNHrriiKg4G2yLGkVrY2thqV9bl4nI6e2l86t7qzCU4/djk4pl25eJ4bX19e3tdmpXAq2YrkXV+AffOze27cenEicNHDj7zuhsOHDpy/OTJ/QcOLC4uxiELGgTYbDAnXdua5Evrms1MeBAwEE8t1N5KBBGZWktEMKIRNOMJcLTt1gBoNAztGP7rD/3kP733PSePHktbyUIUE56J5y88cNOx2bc+m3n5k1vHzr3y+uVPXlz8qduuTVHRQEERCLMhACtYEs+EGLBtE5MFJEC5dOnC6177mrf+3M+sjZvtte2Z0WzOKskpZKCqRWbfANVtzouwXz8TKYkWC1iVdxwvSkIC2CG/OoCGmX1zDJ13MvUr0p70EdAyWAd3sSxuB8cYRVI1HCTNeSI//dM/vbKy+ld/8ZdHDx7ZnkxQDEwjV+qenpKYBmaAU5r546u2Xv+Q/Z9beL0u8lYACqZW7DuIsAbeWN665777n/rMm9psSbHkEYJIEdwxWrSHHH5eiPbU2+N5zSxZGbh2QPoWzKq69jxq3fwGuuS9o17SzTi1Y+SHLuL5i2su0Fp3QPfvGzj0pe1X76HNKYTASBCKxoU/LVWsPBhFYiSPNapm2lFByp0wcBEQ6/he/na1u7s55xhjPyosES2wH4UnaNgSUWDNOygqKVsMZGYSYaIQyDo7MzRgQDHFTpaFmYtQu9f4TJEKQ79UCaoOh4sxOpt7NBqNm2nbtlTVIlJVlTd83qsRkW8urUvB1ikz9zejD0zmzndqiNDfBusw1URkYOBI9x35z87P1Qoi1yV+ETDQvLFKnv2lX3rbr//yb5x/cO+LvuD529Ojc4PZB++/MDPU2Zv1sfsutdvV3WfPvPsPf/PpVz3z27/2lS940Ytn5xebNJ2mKreRg02mZ1ShabPmNIjs2dfMEk5BS2lhmJWBOAzruL01nkwmjiybTqfOC3TDHBFpmsbvlMfZtm2BmJirEEVEUvYo4MrAOWftkkGJAlTglEhUyNxdHdMvFgCgHxJYp2fkFjSesEuaMb8pRbTSX6Si0BbSvfajp1LzSXZnyaqqGEu2iMRGXi9aQPKxs2cRrHbhE7s4hUw552JVxQDgXbsF4nG7tTA3+Mwd9zz+0MrefXsMQWHy5V+38LJvmnz7qz995vSeZ9x0/MKfr8ZFWr1PD+1Lb/jZLzz2pKrW9tHHzsyP5FnP2/+Hv/fb//3n3rayOq3qIZhUg6GkFgDAcJLaem5G1fMnAZRRkIigCAZuOskRjw6DwQARnerg2JDIYSd07jQH3ZiHiweXw7vUFETJCNmVm93irdwR7XaLZakckENIbQaAUA/YLSMVWsvu3ACd+ZiCggFRkFbQoEZOmtN4SnWcGY0aBmQcVoAIaDDdmGwsr2+urev5i3rmHD52Lp09tXX21PTSxY0rly+tXQGA89Celck2UB6MaHZh/vixhcNHDh49fuuJk0evuWb/8SNze/fsO3R4bmGeI0wVAKAdw8bWZH2zCduIkgOAVkMz00ai+jHjgBGBZFBgMghWuxJeVkuSqaoqEFNEGo3qzbXmB773xz720Q9de/3hjdWpZtaqJVDEmW+58cLrT/5Z1da5Gl9/44TQlptqfoBrY9KJcjTDrIChitEPm2Q0CMMoiRFRc3P+3GNv+P7/+Na3/uTy8rgFnZmdUwVkCj0pw4Sg45WU5ZyXsJ3um/Whq0QYopIzHGxR0ioa2BNGzeh4FwQAch4N7G4hOoWDMscCzKYqiopSIBluoGOtpje96WceffiRe++6Z8++Pdvb217+hiq27TQOR5IyGRDCzB+f3Hr9Q9uvfnTxt25V1YYlqA6Is4XMkFQr4hqoCfX73/cvL/vqLycgjiSKgGxs4NoMVvhU0DV1O0mn04YqscVlF5wzjO5FaCK5Ik4pmWQDty7nz/vgiFhx8KFRztmyuKFWSQc7VEkyc/izEhb/QQSgyIogukPlL2Nkc3ZOV+d445xzTpBijDFGZDdmQ3/TRAGREM3xw8TsvMzymQF7akRvC4h9LYwFFi/drKwMQDo7Gp+ZoAESwRN9acwA1UDUEANzJiBAV47dKcfMrJCSykSu66uwSS0BYuCKBu5VZx1qzBv9XviiD1W9s8Luf/bbsr7D9iED+6zSdlejpW9w1KjTIjvGlAZ0vdbChvJfiTECcAvb2wnm9x18zeu+6ffe/Ud333fpzGo6//jZLVuNMnro4Xtn5+ZvPHbD6778G46+9vuOXnu0merGKl1ZnVIVWxWqU9KAIQLitG3QJbFbY4pMAalDVwGAkbPtiGhmZgYAvLttmmZ7Y3N+z+LW1tb6+vpoNJqfn2fm6fbYF43NdCpESOSKRZ7DZBdfi5i8iRARzcVmgwA985pZgZkUI0roLqm7AmRfK5irvnEgYqe0mmQAQwQzgc4TNzdtBogxStvmlBRBNStC4ecR1XXdz0KKlQh27UF5AovWaxWjdVM+RlJTx3mGEBye7Z2/M6wiB1WtwnAy4YP795967O7zKxcam/m+H3zGjc9c/+ZX/dqD94Tnv+Cpe/fu2VxuT3/m/Dd9x4Hv+k83vv/vT/3dzy48+vgviqbLZ7avvW7p615549bG5SoO6yrkDCKGHNA0Na2q2ZQRkcowUXPOWTIRKZJldb8B/xqNRj61AqCqqtp26nhJ3xw7Vh+52HBRDNB1ve7vxjFIYeoLwc4Bxg4R0ov6dnWnhFC1mM0sBA5VhG7/UorJEq9NinDBTD2owwCBrCJDo2bSXtlYH29sbl26lC9dmp4+M378scn58+3y8pULF9fWlze0vTDZujKdbiJuDiPMz89ee+zQaGnv8WO3nLz6xIkTh6+7av+h/Qf37Z09sMg8QxGywGRs03HTtDK5uN0k40qtSaAa64ojC4IQhBCrLIaKrJmyIRIFCsCIEWoxzaaGqgTZ+XII1sYI7XQ82b9v9uL5y9/73T/+mbtvP3LNwZXNTeBYzVQIyYyfs7T53becjWj7eXXPUmaEs9vVVXPj/3rrgz/24avrOJtti4mQA3EQEUPLjDFGU0GEQawfO/vYf/ze17/5LT+5fGViwETYtrkv+kMgM4tciZgzfKAvE50I220Kip2eP2OdJF9hSPeKvO6xBz0H0hSBiAKxdq/tjDvsB91ZiBkDWecLWYIhQzaVNnm8nU6aAwcW3/Tm//6d/+G1W1tbzoIF0HbahHqgORGhKhIHWq1m/vb4xjc/vPDum3jCkLMSJgYMoTasTAQhoS3u23/7Z+6767P3Pf95T1/dHgeuwYRq0s604/OS7u52saPGdsU9IgVEI8aix4eIoJhS6/1bCEHAzdyCea/sqtGdejkRTXOOoWAyyq2h4GM260iSWEDDCmAc2Ekx/ueK04DrehkWFxHtaMHU8SABwG0JwCMfAGgnQeWKKoGxW/XhrrEGRC67om4yLFZ2Qj2QXVVBtHwk1+hSAygyOv4raKjobg2IgUJ3TQlod4PL3akKMRSoN1iH4ytifp4+q7oCtZRSXdcMOE0tMGXTtm2tXzdS8VvdOXbd+mT3EmWn7EA01VLEqEp3ZEtANCDDTs/cjStgl3vyzutkU9KMMlPF6Xgjnzh584/+2A//6q//drt6eTodX7N0+MYbb37FS77iOc969nBhMM2wmfSxs1N3ciYOmGAmDscJhMbGrZUdHxoFCGZgGVsU85YcjQCIydAAEbiqPNlsbW2Z2ezsbDOeuIz46vIKGUwmk5TS0aNHva1XM1WRpmHmGNl5gdr5YmYtvNLeFDJ0qsUe5Q0BVFQ19CuAAlAzMiRin0GBKCq411ux3PHL2JHqTNQnGQv7lrIqV2HAwROtiCCCEVKnqqEKIRA6YsKVszrnzqxFajGpdHNyR/iVfqiIDwC4WiUjCQugRRiB6cH9B/75A38/Nz8chquWlpbe81cP3X37/LU3HDpz5oHTj9SXLmz/9C8fevaLFl/7DZ+473Pj+T1BLR09fM23f8/L/+Hv//Ev/vyTL3jh3c941pcsr6wLAHJQU80SY4XIOSuAZSsMwKJVlEUBHfvaH6FqUOe2oN5CCACVz96lo9KaGbiPZydgS6p+R1JKwQKY7UzeoPifSsedY2YMQVXJgIjanKlwbjGllFJjplkaSQ1yVVVcDQYcaO+emf17FjTbWNq1tUsXH7iwfO7CxoWLkzPnt0+dGV+63KxeMLPV9bULKytjlBXJm2Q2HFRLi4f3nDy0d/8tR4+duO6aq06cOH746JFDh2b2LQzqERI2AllABKatXVqTgU4mSYGwqrQyUg3IWNVC00YZNJAQqI/lADHr1CNfqBACMwdkEVVRKCqIaiYMGMBbDWx5olIfOLTw4L0P/6c3/Ngjjz5w+Mjhza1VtjmjZgK5stkk+ZVXXwC0IzPTmSCBYJpxqc5TCc/Yu3HN3uaR9RoIRYUVgjcLFBK0woCN1TFcuHDqO7/zP/zUT/3E5eUNoJjVqhg76X1xzqsHZ7e5NTOzjMiud1Hivrmn+OdtcN1IG4ssnR+MfhBFiO7w4zFZtHOvAB+99L0QI1HZRPh/w0gMAE1u6rqONGjblgMi8erm+LkvfPob3/jGn/ypHz985EjbTIpAtZqBQEB0yxPD2d+7buvVj01e9vjcX18DXJkZON+fyUIRvadBmGzAxz5+x4tf9HQkZSJtQUOPtCrzUeiG7btAWN0/mQAg+FVSMFNB6wno09QaYBVKvasO5EGJMZqJiqoqUyEW55zr0VA7SDmiPwLJ+X5Qxm8I4F2rgEFR9UFEZDMJaEBd4ePXsfTyMTAUYwAfBTvDkmLwZFnCZAdCUUNCjjE6UDLn7CVWFuGqisbO9zUEy5ZUsMlVVXEV+2OBANYZajoCwE3T/QYrGBOBgZQSHPzQSJcUAcp3cJddFBAykjOpvdBTQlV1nSMFqwa1qbnDYBlcIJq5QzjUde2yJv4GegQpGfRaYH5MfUXNMZipk5r81Yo9FCFhLOP2fvjjC/BuY6GdbqKaaZZWNxkWKDPEdGWaR/XSG37guydrMjfYExdajENCPX9xO01xWAMIDrCS3GDIbc4WZy1khraWkTU1KEaIuWkt5BijIhDHtm0BCYHLkwmkJgaqkneIoWaxqlZXVyeTyezMaGF2rg/izXRaVRUhZgQmyipg2OSUVDgGfz6139pCmQGQwQ643UydgNfVRqVY6ecDXOYgjOwao16iKZiqOF8ti7j7RQihGtT1cABJ2IAIc9O2kgeDgRIoWgTMbUJ2dfqCB/Z8HHrfbMKKK83SSukIAcD9tvuh1tbW1mhmSMBEXAUWkbadEpjVMAfhzs999MRVTwVd3d6a/9zdmywLAFf27KkvX9T1zfN/+t7XCV5+xQv+GmTx6LVLNdvKpbPHTux9+cu/40Vf+JJfesfbfu1X3v2rv/a0hZnZ1a0pEokhVQGATVFlUlWVahFD9edfTYzi/Nyc7irj2rZFA9+keF1fVZWq+gS+CA1CCUy+zqfAzkTwWQV1oEWn+LuEO/iQ02c4BprFsriFZpJkiMO6JoKZdhgCL+2ZP7B3b9vq5ubmhXPntre23/OP79eL/3xl+dLq6fOTK6tbly6N169Mm+1xnm5J25jI7MLS/n1zR5aG1x87evjIs48cv+7qa08eP3H8xFWLexcXFmeZUcFaxUkLbaMrTQsbGxkVKwqlB+ShggQYkCByVpoEgpAjQsggVicQAI4YokEFnDELmLsZswAKUkCMpKhQE2fxolF8tutas2Yo7f4Dc5/+1COve+0Pbm6t7Dt4oJ20Q1hCADWZYiZLoQ637NkcBZ2LQgintgbjTCdmpvMxb6fq1j3rD21FtpAJjRAQCCAAhqQDhlANz18489Kv/NK3vPVnxuNxVnTumWRj5pxbACdagiQngvuiVE2ByHYlS4FujLzTFHXTO4cg+XSKiCKxSlFdEXP5fUQzUHOl+q7f2PHzANGEYh1/VlTNVR+IUkoMRSE8BJrmZmOjec13/Id/+dcP/vM///PhQwfaNgOgWkYKAmLAjICI1ZmF4YcOr77mgdm/PpGFmChw5e88m6AhAghpPb943/2PbKxNOUDbttEqyy12Hja7OxkRIS6OCn3p4M0wAzvZPWUB1IBEgCDA0ZfPktq2VCXk3L9pVRXG0U7/GoLPn/35IiIGTCm1WapRjVCUqH0QjVg28eiO0aKmFkLHDnY+LgCgQdktYwF0GEKsq7ZtJ20z6B18EQBQtdMrABBTzZ0tQQwBzMxGhqJGCoyUwFChAuLIzJXjPjxeZysj0FY6mBlglhJrAAC6LaC78Fq2ZBqIBQ3Rp//9VNO1vw37X6GC0gGmQATE1jWmXhbEupo0U28svCkiCkRkAsmSqsYYycAI/XhRYOeHIWIH+PRJjSbTlIsvFQBBVgIkwgTZYEdiWkQMlJkzgqFJKtBuz0N1XXMLAkJklgwRm3Y71rODPTzOohsIMBURRmYQSGgIGU1V89hGoxGQ5VZQMNsYsEEERQUmydq22wTIzKnVUMUQkQiSCnZqjkCdDwaiqK6vr4vIwvysqDZN07atGZrBypW1wWBQVZXbNDpKyx0uzWcMDMgkqfU+1Vf1SQUJkFwG3aCslojLTAxQDajY3fjLUiAzxFj5aMF8sKkIEZsWVKGqTUVSQgKMrFOlQORmWJE4Zw1VNLNMBpEtCVdsRJKRGFiJA5iVVYiaVcQERmCmDC5k5u6wEM1k2mxbNvf6VlQ0JQoBCcFySjwzuPOO82tbK3PzC9Vc+OxtW69+7dzevx498NB5bLd+5beftnblc//p9ffUo6tGM5ttWp00YW7+sHL967/5tld87at+5Id/6h/+8V9/9Z1/8F3f+T1hECeTXFWV2DgreXLJOglUATaqbqySIwYaVka2e4xCxYylDcREoCiImKU1A2+VmiaJCFWx4uDoWFXjMPBrbpqzihoQGQREYm0VRInNHYFFTK2ZqaswGNRxABWaAZiNx9MrVzYee+yxjfXNv/qLv8kX/ubShYsXL164nE+dufXU//7nt8MlMFFCyyIcw/z8/OKx4yf37z9x4sSxY8eOHb36+IkThw4d2rNnb13H4SAAQZN0YyKNyvmVcfZVAnAnvYJWVVFLggg+cSEAzVZ04BWTuEoREEKNA+CcM0DWGCaWHTLCuVUFMYxVTCkVXL1iJpCUQ2DXh2EKYsCiR4/u/dC/fPb1r/ve6XS6tGc2tw0AGWZFUcCglSCA5tlKrpqbEsJaG1abWJNAf9yBQqokxIAS0FpLSHEqLQNE4pXls894+q1v+bm3bjdp2rR1XYtlAktmQbppJYAHyaZJzNHQEDEOauhk3SCgKeWcg2HRO3I7ZMJobibEjnt3H08FYzDXKQI1QEYkY1VUMBXLoCWWugZFVmFG7jk5VsRfkyn4C3r0F3fM5ZxsMIQ3/9ybPvOZz2xtjUejum0ZoCHDmE0rMxQ01TbP/u7Vl//gY5svvjj6yAF2GSAFQGVyRBZGqCtuHr948dLG6pH9h1PaFFbOmEgQ0aTHDkcQbXMLZoJl8KngXmhmZm2n1UF1zLkNIXqOCGLAJN3SmxFBsiFAm4xJiRKoggYg782w+6Je8ppIRdT1Q7EYukCndg5AiCyoJkoIAQPTLlHJvnbwjSF1sF4nn1hKuS2+rdR9+Zvw2Z12acnDLqKvvTsgNoGp+exCVRjK4k07JxNfVu1+M9DDg31f12nAKppKbrNA9E+O2sHlzWzHLbgfHbs5BKJ129fSHEdy7Xvfl2e1MkwI2KEVuoimYmJumkSdDpf5KI+QfArkPR2igklOBEgUTLVtk6KGEIh2OyuYSDsczVinMu2zX1f88c+b2mxeIsTYNilNprulcQHAoWTVoG6axhFVSQVUsgoBRmZ3xfFbaYg2zaGuEBG5bdvp9vZmVVVxUBNjVUfvgUzUWUY+rhnwgJgxhlZyUiEK9WDgf32apiDKVazruo6VZfFpZ4xRVBUsdl5giFiFiIhZM5ghUgjc21Mys0+3smW1ogks7sGC0Qypk0K07gxIkrqq62E1mYxzzkSoHFVZUwNV5UQpRSBvbc1MLBCqOs4AQYVCyNIGq5iozS1HArM2a+AICpKzogIaUwBCEwXQEEI9P0IsaGRgzHkcEAbDWqbTKtJDD9910423rKysTiaTlbO6dkm/6GUH/ubdp9/4Mzceu2nwY6//eM68/9hovIoQ5uf2D5qt6Utf8pV7Fg/eccdt2eilX/4lqdn6xL994OZbbpqd2zOdpFBFI0spcVxm228mkoF5aCQpRSDA7K7Lu6CehRDSxWKsDaDiKtTkQ2bHkUXGzk3ce5qUTVQ0ZDCRVo2IBoNBQCbGwbCuajQzCwQE4+lge6tZX9s8e/a+9UunT58+df782Qvnz168ePGyXjj/7FO/9U+/YufLk0cHQG/RpaXFq45ds2/fvmPXnjx27MTJq686sP/g0r69i4tLVYWqkBsQg63JeDOntfE45zaEwFWsqCYA6yacAIaEjkym0p1oT9f2fOAi4Nj5wXiAAYBAHIkVQYv0OjKj5BFQ4gCmypUbDKjaJOQKEuUJWPRKTebmRlNJv/27f/3mn/15TbC0d18z2TKFuqo1gxIykRmqyfc96fQNC+NxprFSzXpydhJJAWCjDQBw1+o8MIFOmCtpDREt5BiIjFavbMwvjN729l+Yn59Z3dgAhZzEnaprP/+iqfODIQocA2ahwAomKZsJABGTDwC5E0ztByRoIOqGRAW3GH2d7wNaolDk5BDQ0JCZLXcdlyuDamsuxtClnD4FdMG2mIVAB4byN7GxvXX99Sd+4id+4o0/9P2j4ckQJmo1oRC5klVJE/yphfjZxfXX3D/zb4eIAlNUcHqNlhyWNSKmyXhjfe3g0gERIQ7gGzQA60Y75hwHEwBE2uktofvgLlpg7rNi7hwPZW5fnm4q7j5mbuPWNA2JL9Q8bQkDWi7CYf26Fg2RSDWjoDrPtBCmoTPn3sF0hd33pieMqWqZyiK1bZOsRaaqqgaDQWpa/3j9Vd5dd/dsMH8azFmYCBiIDAwNCcgAiI2MiApSidHfQD8l8I+0u6zQlJ30otixghCRoSfzOH6bOoBrQI8upuhaMYUOJloEOtQ3AE8UIsdu1+WpH3EHSub/STvOsSfCYrpg5kgHBkxJEbXUDcyIagxmFkNERDNxFIy/oAq4bKmXM7mT3mzblqwIaSmiZTEz9/fq32T/hj1rhhBym5IK5kyBu3W7AYCBUSctwlUUMMiCkYhhOIgisrG2SrFck5nRrBuwq2ph1DGLSJMyENbDgSpkT/AE2MEcPPBpzl7gIWKaTikG4gBqgSIRiaiI+sADEbEfR5MBeK1bpHnM+zImAPPBKSMFYnPuBBgz18HUth+8966jR49ec/LwlfXxtMmSIXJop00m8tzftm2apuFwKE3ORMNB1bQtRgqBt7Y2h4MBmGnKaToRCaGKvtS0bJJbp2hXlYUQFRVchUMyF7k0YWBUayxPN5sAbDhGGX3iox946dfdcuGMrF7g+z699PJvueGTH3nsC15y1Yfec8c3/+ej//vNF8YXbfHInjyOV85cfNNbf+hLv/xVk2YSFuvxeHzg6GJFJ0XbKo4CVk2zYamdnVka1rjRJoOWIBMEBDYQMFKQZn1zMBjUo2H/JHYBl8QSAgZyl0xpmmRWjg0AWJtzcVFFwoCIxQF4ZjyKIz9aVUVosLExXt1eufTg6oUL506deuzs2bMXL14+d/701ubKyuo5nA7U8vb2psdkPmDMdMM111z11GuOHj164uTVdIjeefF/ve21v/aUo8+cmRlaAFVIrTVtzllW1rb9iZOUHfjJzDFERlK0LBpMEItSuHs2Z1OTNngr3MVTM/Nx3xNCRHdBsLsshOQ4EiDULGCAIbWthDgHQVM7VckxDMAGWcc4iNI0s/Vgfm52fdL87V//07v+z+996rZPHzhwYDAzaCYToBgiNyqhDtIKA+yp89uff/dXHF/5i4cP3rC4HdAODNuZIMlwdRq3E9+xPP/I1hwQRMRImBKCEZppypIkt5O3veN/Hz95fH1rEwmqepimjSEhllkddU690HHzYkkkGV2rXLOiUYgk5n4kaoahqHV44BIw69aWfRK1LCWLsse1IjXKwWlIZmjkgRpMTQmeQGzxkO+0FmY29+wqHj+qqiHy5ZXmld/48ve+973v+6d/OnhwHwqoZpVWkyEzAxkiGM6865q1X759+Qc/qzeMdSHFi6OZfzs6+77jmJkIGQwkD0fVzGAIAOxwBGJfkED3MWFXXgiwg8yyTmiT/YddjpiKrpSptrqjYVC0tBDYtTNVNYuDmQIXVn7btp4jUkolLAcOSqZmJAhsvrJ09QgokBe/5gwYevRXqVk8CwOmXR63/tOaRSyj+toCpJMsVlXLgoFLH77LxBsAsqoYBCsgOzNUMTEJDht2FrMvAnccZJ/A7ekbxmzqUGF1TA0F6hDO1lfynR53cHFaU+qfT1FffniD1edaRKw4iIgjv2DXF1GhbwNAVVU9Y4yIiBGhYNYAwKfcYFCF8nggW7/v5Bj8hbVDfWNPRe3ePHbALk9padoEKqoXRoWrTUTuZqWdoq8fsqZpOAZCHAwGjldX1UL7IwIreiY+ycg5U4zeBPjdjzFWVdW2bTOZIFBAGri0oYCjCYiIkUurqqYKqmVVzIBA5CZaPmFPOWcR8rqBxDgglhGLK7ox9rxdIyJkMiqq3eVPUFcuxCCtSsqqxSuQmMVUTA8emP+NX//1P/nj37/xxptvvvmWr/36VyztPaQ8MEsWzMwmTcvMhcadJQYCgPFkQkQm1qbGDKZNW0eynABlPJlQQ/VgkLPEWAM64S/3RWaRjzcL9aCdTuoQRWQ0Gm1Pt1VRWiUbxRgnk+aFz3vZuYsPfO5zf/HQA4u3f1Lf8j+ffPbMhV9/x8o3vObIa96w+PP/9fSeA09K6fKhA8NnP/OLz12Yrm9sbSxXayvN59IDcbR43bW33PfZR9bWH7/u2hsHcbA2vDg32jeYP2qwAgBkdW7HqplCRIZ6ZiaLyC4taGGjwKGKKC75OHUYBQVCZBFpUxaRdtpUVajrOlY8PzOoalSElNLGenXp/MVLly6dP3P61OnHL126cPrc2QsXzm2tbk/GG81ko5mOmSMqhhD27FmY3b+w/8De66677uixEwf2H+YD9I5H3/Q/vusdT7/muRxBBe6+dO/vv/c3B7N7xgk3roxZGgBQYPd8ChQJWDX7OkQRRCQSByxjp+QOSEQYAqG3Ce4sC1K4gtCX0aqOhdkxTcEOekNEItk5ihCYDDVLUkXkqopJtiSbGVVxRnIWGUNt8wNYnJt96NGz7/79j/zDX/zd3XffNdozvPrqa5pmur29Xdd1mw0IskFqpwzh6Xs3f/PFn5uN+ds+8JQPnNv/DVefe91NZ86O6w7oAI9sDH/hzqsNURVCGEybKYASEQgx0frm8q//2ju+8AtfuHxlA5lCCJPJJBLHGHPKDIauo4L9gRQrpudmnUyCh0rotY536dj7xNH8InZNVLk4u6KTL8qsf0KBvJEkewLyVGVnV4UdKt4AVAE7r25/6su7dTViyz/5kz/94AP3XrmyWnFAs2ytIRIktYBMhhQ+Mw8T2v6Gx8OFEUTTY7n9ps3xsy7u/8Xn4BSx4mZzctVTrjt09JCoEbGjmjvXO3QN87IMDkx9kuqCrYgQBerZqgBVVZm52xn6CJ2jj2l9ocmqud+vM7hUg6pqksxayLF9+ELEbLnbFCgi9ZhzQCAql9Eve+hvm++Wc9nvYkBi4kAch3G3H3iniKCK3YTBAEPwXUKvtrE7oxgadDr4iEiERqCSvQDpr0JfoeyaZnRvDBEDq2oPr4Vu0tt/9dnI/26bc/k+ubCwM6Cz91g9pNOrh+zTChFV9Tzn6bkvCApk18yHeGY2G2fMTHZZCDumKlBQU0Qj4myaJXsD5/i5/tP5K4cQnKCpqg4Ol+6rdN6EpoaBUc3xjbsb36ZpYoxuDpOzgEEIwdtlf/Z8wd9fVf/g5CxehMgBgWoOXNP29raZDeqhZZlKy0Sxqmqqk2RLZoQkKLnYJ0Riw6LUnRDadqqqVVVFDu7GSEhoMG2aEEI94LZ1jkoIIfj57u8XIgLiEwgS3QHtdxN1XVcccs4ppbZtuYpIeOb8ystf9YqFvbN7F5fW1tY+8MH3PvMZzzt69NrJNHEVQ6hUvbYILknhvx4HtSEDoqpwrFNKeTIWSQ7SZkBWVMBA3HbueACgCkQBCEwLdLqu62Y8qarB6sZ6jGwIdRysr0/WNperKjxw39lvfM3L/+jPf2917bF/+KtqcQmuvfbYLU9Z/+23Xfi133/6K75t8vH35WrP4oWNyeu++ychX5yO15/1koX5Pen05yZVdXpYfclf/f6Zixfal3/9K86f2b60evvzXnz1F33pm+qRNU0TORKhu7w17RRp0DSplZ0lMMdAUpxYYsWSfWeiAFLX1Wg4cBZTGO5tW1hf3Tjz6Nm11dXz58+ePn36/Pmzlx+/fP78+e2ttTZNtravNO2YI9V1jFGW9hzaf80t+/cdOnHixImrTu4/ePDo0eMHDx/at28JAIiCAdx78T4+HSEMzl9a9WHI+sZYRMeTxoZGAExVCEEAc1bVTEQMfrZRzRi9YCYVKSGbiwxx21ncAFNk9g/8ec8UIppan2D6MOKPP6ARlsffT2AIwXAgOROHuqbpdNqm7UEd987PQ4KPfPzOP/vTv7nt3z+xvHIWB3jwhqN5DKvTbWaiURSxyAhitRlz+LabLvzMMx6468rc17/vGWfHIwD7y0cOf+rynpeevHxydjxO4aPnFz98fkHAgegwaXIVouYUAk+2x5Pp5tve/vMvf8VXLa9NkIOIEMJoNELTaWoAAbuO02Olh2kAspyBCUyziovrIWHOGZyOQbzrKhEzZ+uXPtC1Niq+IfZ14RN5H6rgoFgFcygqIiIHBiUrtn99+jBEipWIQvHhtqLpS9yKEU4m0/r6m06+7Ku/7ld/6W2HjxxJKall0goK3QQg0PbrH6HVqIebVG8iICiGlWFz3er6Vz+08Gc3JZTpdHzkyKHZ+cHqlcZEq2pgOWUqwxAE4F20VaLQO61T58elqobmWm8AgCU4ozdj2TTnDGogGkIIxMYmlou/SLfU80iVmtS2bopMtCPoAQCAVnzPAQk7uLi33F1qs+DiVl11UNhHiDgcDj1mobF1s0EvFlJK2XTXwBZjjIw7ybAvrESEDMTBr4gKyGYiKgY5J4+MJe0hcicG2d176qq8knR7X+Gk0tc7+ER5AeupV90XdLh8ZGIMEQgLNaUr3DzTEwK47MAOjg4RHXRqWbKagPXyim1TiG7F8wvVlzLqP2+gqrRLtAw5mGafWffv0A8EdbZI/hndfy2GiN5wmxCioplqSsk1O/viBgAqDsAgqZWcwV0iYmHgGJETpLkbkmjKfje3JmNVPbhv//b2tqkOBiNEbNvWEdDjyWRg1hPMc9OSSy93ta2//1iFNgsiZlOdTiXGyIHdATSrqk4mk6TifDPsxuAdSQCJKKsUaSqfCkJxx+pDQIzRvaUdi+CWIDmlJCFA+Mqv+VbJeW5ukBJMxtPt7aYaDnLO07bxc7idWxPVOgYiigEIU25rGjASICbUGGovpIZhSICgyOAkWkUMjMHM/I44Dc/EkCxJioM6iw4GAwCoA002pgfmZ7iy0dzs7bd/+vWvf/07f/P//vz/+Ll7P/3YH74Tv+cH0jf+h6Pb2/qLP3H2R99y3WMPPXjnp6p6QLL/Ey/60sXhYHD+9ObZh7dPPzq99alPWW8f1dlTr/++d1574+F/+Nv3PPCJ5tA195w7/4FrT35VMx1L2Kp4zmg6aaDNUxgEI+y9vMpdVq2qajQa1KEaHMDApAqTzTweTy+eXjl16szZ02cefPSu5UuXz58/u7J8aWtjbWNjVXJLBMC5qgbzc4uHDu4/eODGw4ePHT56/OjRo8evPrl36eDCwsLSnjkHOaUWphNpRZdXpxxJdZpSWtvaEJVpboZVrapGGAgBjERIs5kl5OzxwTHGmqWrqlVEQA0VTB24B0wMBI4QyCIiCMDG4DM9clzujoG0Hxo/O+zqEh6TCXM26uQwoSvNEdGwRULGSqbNTD1Y3Ds4e2bt/e9/75/+2T/effttotM9B5YWDxzQaZu3JspMvkpUUNXAAREra3/pxY989clLv3XfybfccUMryIAGWQEe2xr+xj3HwAi1azNB0YwMIaCZzczMra9eQmp/67d+5au+6ssuLW+qETA4JzsgAfqDjB4fnDDXxXeAzn/TW/9OsImRgQBEVXLuA5qAiWTqJSd7S2lEVNN+6AgFUkruP9vRvrUj66MCIvgI0zqVlb5Zoqhd98Iu6EAGormKgza1pjJp02u/+zvvv//+D3zgPfsW96ugmYBJQDbSfHicnrIGU/JPSGuVzuW8bxwnc1tffGr2z66zNk2m2whl5OPtBxIx7AiJlOvT8XSK+EIXb51Cjbs0oLB0xtn7MeBOz1VVVYnEZ87KZYFoviF2vmtdFYE8I0XtNS3UBNQEFA0o7KQV3dneUtcBd72zmWtJG3YOU74w6GtGT8MYuA6MTM6jdVZyXdeO1uwbF6+JkNmTJREBoZl2iu47dBQnopkBMPSaR/3PaOEHIyO5nKHmDESRA3QmybuTLhmYA3q9YHPb9qKmRh1svquJzHcApFqaTu+9nGyD7pNIJCSqKio5Z2SKFHPObnhG7lHVyVhTl3HFLRYQDd0YABEZO0uAwuxS7R3i+v1udIJym0CFlMkhX56qQ6nf/U94BdMVqqoIdYwARfE1m0Xjtm1F5LHlByxLltRMp22TZmdnWs2rl69cag7EGKXT2fcgqGZt2ypCYPbJdW5ar7h116jfKQlgaAhJJKWEAMEF3iTXHIGpmU5VdTgzMxqNmMnECMCFSDkWqwzoKg9E7Ab+DsgEABBRNEACL3QUwEHvZhpCPLWhiJjPZiJi15VtPFiUU5FSMxmP27bds7g0badVVatKO5kszs8BAVcRgFWNibeN26bJDuYGAQDnzTNHA1HJHCtmjrEG0JSanMWs0JpVRSGde2jpK7/ty97/qQ995O6Pf8krvvQFX/AFl6bLzd6GMvzKO+/+sq89/OJv3Jpcefo7/+qzX/HKvQeu1337xxbjQ6fbja3Hjx1auvop9ZNfOp/S6UfW6Ekv/gI+DB/4zPvy4vZXfOOX3/XgO69Zvn3m4FNXmyvaCuQKdOyiL1WqY1UNaWcHHAKqWjtuti6uX7lyZfny6vLK5fNnz1y4cH515dLyyqX1jfXUTkeD2LYJAGdGcwtH5o886dqDBw/uP3Bg76GTBw8dOnT4wNzCwuz8bFVVbiyWFDdkc3nrysObKFkQNBCLZAUiAgUJIdb14FJzwUc7xXtUAdUQdkwPTZS4ICV3opVbsAS/mGgIzKzZSdgJgTx/AFP/+BgaQ4Ceu9HFASbeTX4lZ9Oo+Z3yp5KQimecCjObtjlt713cNxnD7/zG3/72u37n0ccfGQzjzNzCfDUn4yQpc6zAcpWEGU3RlABVwG6Yv/KuL3xgb92+/iPPfM+ZQwY+YDbAQObPKbqqEVrRyzckYkTVyHzm8Yeuv+HkL//SLz7rOU+/eGkNqA4Ivguoqiq1qaRbUSV2iHJPgvdRo386N0ExcrVU5I5n6xGUiJjZuWegO2PFvg/GXWhZNUNTQDSi3tc5e91QUouaQfYVqqLn4Ejsbnhq1nWhBoDUMXMUp2Bs0I637cjx/S9+8Uve997/Z8SlGDJDMiVLJzbBAEYKBjgJtDyA3OreRocJMsrCRFchiO6bn7eUEZGIc5vc+wAMimFvad488JvHeQCwLEaInSaBd4sezQISB1cUACIE9sFpp4uHnFJpTY0AnMjcATw5ujOpohoGNrO2bQMhIJiC+9Zbp3VIRfDfzfcwICL0mk1EisDet2mxceizkTqtMGLg4AYMpYxihk5Mp/dCRyd0MqiCluIoiwKqsYIh1MOBdcq6kbiXFPeZrar6ihc7PWo0NTIVkSSmGmPlY0kFr4JFO7S2ay9ANx9GxH6qbC7pgMjQ+yuUHA+EDnYt2QXAXJofylifiEAl5xy5ohgGITgVEnzP6qxlk8jRi3AAwsIARgwM4uyjshtAAkYGgB5vXFaqzESUcutT9Nw2dV0Dl9KHmVPTlkDWi5/5gicGmTbT1Pq/F2kzwoiRiH71Mz9XfqnT5XSrVrtfAwef2xiAmULZ/4DuWofvCmgAvbadU5yt+96u6rP8T39RBACf1hB0QAB/NIo5iHUPSxdJO+hYKbj7f+8qXK+etA+8iFgeOegnsdj93P+fNcf62iqYxRgNrKpqVfVnkoiqKuKuuOYJv8hQd7ah5SPCzl8Av54KIfDCa2R6ceVic+kPt26n/YP6eHRkzJ9cORW3oBqcr54Hf7BqeJU/E1ZdFwPTOTnzyWkd2mE73RJls7U9dr9ahlnIud04tvrIqT+fWflAkkzMRfeii68p59S2/cX3/9m2bU6+jk9lR7gHeC/7NGIUuKrjKAyrWMWqCpEaHp/Cx07jYwCftDWzVQ8XO3fSb4SfDlcS1SLz0M+fujUXYo0zzNEIU0qNgAFkgFaBiNkyKZWBnIhlf9JrlYygRshIIAWYGmMNKE6YkX6spZZzRgIIQBRhl8WNxyjvTgjRMfCimlR8BmNm4P4EVhT6Ukqz9agaVB/7t0+99a1v/dy9Dw7nFg4fOol5TErN+hYFApaU23owk8SQMhhpthDiN179+Juf+8jDGzPf9i8vfnyyQAwmqtDaE0agxc/AeSVmxgQxxK0r2yvbK6989cve/KafnlvYc/HCehyOGs2mRhSqqhKxECrPrG1KnYe5E3MUDMiAAafTieuuW4F2ExlIFjEBwoCFVOK6oUTkTUuneQC9tkH3TzRVZ0iCqJq47Tshq5j0oBZm6hJ28WOFvskrYtTdVj77HlpMiWJgzjlvrOtTnn7ToSMnp5ONWM0lA0ZDMgDVCXj0AadEodogAwBkBDMbG4oNY33jtdfllETA2kxEAmZG4CtWROgAJdAp0VInOVLWSUCdixQHtJSSo5/MrB5UWSRJAvcUMciiZhm5HB4kMrM2J3+UfAPd1XxFViGEgC5XjI5VEPPe2pzZy94IglrArH5enc7l4F5VdbGV3AkA9WlV2+RWqQFQ/AFlAu+sVQrOy4onNhqYCQERk7N0QU3EejEBhwsZYYyRidDAyxAFdNEfFx5SVTQCMbIdWHJSAQdXm5m5VRywQRZpNXl8RkQ2YDUC04DKhFZA4WqqptgVRJVbO3SaDJbdiS8oeWsIWSRJLsE/C1sAg4RgoOjrGAV3h2mahgLXda0iqKhmFbNIYkTo6BPgfnmIudM6V6c/dRv+Ms8BBDVFFS3lDnctuxB1lCcwM2q0DpFiKFMU1RijAVy1eM3//pI/2Wq3XHjLVDULIq1srQWkmdGwnTaRQ6hiyuIHIyAhQCva5uS3yWMiB1YtkFSf2ABAdhNGUFUtNn8AqpZNLYkBcghFG0uBAebn55AIRDEQEaoAI0L3GPu9hi5KihbRFehwjP1CAVzjxWs1wKqu2dN4oN2DE9/wEBKWYwxoOL+wgGq5TW0zEQAzzand3Fh9z//7uwfvu/uqq06Y5D1Le6+78aalvQe4HszNL83OzgUOo+GwmgVESllzUlOeTqcpSc4ZEg8XZ0TykFf/8f0/vLgkzWT2/rumpx9Ye/zU+PDxg8Nq4Yu/6GW/9bu/P1jcvPrYwtOef932lUvnVx4lW6jD8Qubj7AtnD61dvnMII0rps0f/O/fIwQPPfzYtceeetvj/7gU9nzXq9/w8EOX1jfWVy5fPHf29IUL5y8un9u4eGHt0trm1mafgOMgqqYQw749B4aj4b6lIwcPHj5w4MDRo0f37z+47+CBmdn50WjkUkam0DSpSdmvmIgEKvM66FGNTIhEnj2ozEhCCITkMFbEothjZkA4U8+dWDzp5K8QqKoJEYggBEIDC7zjJ4qoBNlyTk0dKz+fXVWlIZBZMmAwCRzZdeUAM1oc1IHQzFLTeoEOokxExALmgN4MQLESAiAKEJRyrSg6SJUM2kYFNQLWoyptTM+vnn/P3/3Rb777zjQ+cuSIKqXxOkRCVAlgBAxhUEcehGa6DUKIOhzK/3jOva+69uLvPXD0v91+s1BtACJGQAGqDJlMAJEAk5FhAtIMSVMeDgbatmcff/jWW07+yI+9+au+5ss2x/nyxpYRynTKwEYA5CR4n5Z7p8TRB8OMgVjMRFqx7NwQ7a6mgAGImhpCRZxzNtPA7NYCOQsABCQrnBFy/Wif82nx2/YRoNu7EUJomzZnAcRqOEDT1DbMXMXQtg0jG0DcpZZsngVEmVlFERFUM4N7s6iqioHWBu1Tn/rUm2588qc+9eFR7Q4E1KCi8uAzs80WyyICgA2zXrMNANgQTjjcPx8TNe3WvuNz1z/9+pwiawI0RdUERJ5xUU3dtRYck2TGXQ1ORMTBp6QZBBAAQQAwsHRiGK0KIBBEVRVAZo7k6B/zOCYpkUEMpKbTybjmATNny56IAoRuwszKwGLZB6OA4iL8BlmL1o2RhWwKWXmXKqSqKhiIElHA4ohgnQAedL4ZwOTUESgE7bIk7jhl4CzPwNyb6QI4ehmHcajON1N1vB8DmqqalokSU8Qyyy01V7e68M/pU1MwjBwUjIKLDrP/CQQqyw9y3WjfsAMgFaKLGRMJaK+A4SCyoiy+6+/2GyYAqEIZ+bZtm/I0hMAxALCYgBaVPv8c2mqR0CqLaQam5ObPgcu6RbFNyVWUseg/g7NusGPBYudkINnMrKpiYCo2iE4b67uOupQy3otoN5FDsEOzx/yapJSkTaYaQlgKGyml2cEgjJxl1GY2ABDNAWkwGASOk7ZpUhtCqGMVY+0ltttGmZmB5pzBMFtBkEE3xgJRgTJNAHIbTfRpT1Ac1sM4ihjY1+/FENPPXZeD+4a1xGlkLDCQ8qUuONchBnqhUwplXOFvMufsSB80ICKRDpLGiLXxiBty1WJbvH7uC5/2FePt9WZ7e7y1/f0/+Lp3/9Lvv/hFX/DMZzz7T//0z4+dPHHw8AEK4cCBI4cOHTh06MDCwsLs7OyRI8diGMwuzs/sHdz5sQ/PxKC0cPn+41vDtW9/1f+U9W9/0fNv+vIX/dnWZOuBz9z1jp/72SMYH7kzn7995bq93/CSr3zFL//Cjxutfvrjn+Cm3rOvueqapZtuPXjiqv1rV5r3vfuvpMWmSfeHz73wJc+5/84H/+t/+PHl1eVLF05vbixrbup6OL9ndnHxyDOufc6B/Yf7K3Pw8PEjR44ePHhwad/S4uLicHFmcX6BiMho0jaImLIvmtSyqdgcAY+i06yT5LDbPanLqZ5ivW52IGQPHlQoCrvU7YMQUZO3oeUBAAN1lXcikCxg2DcoBoroZwA7EGk39yt7sXIGaCfamJlK98R1ay/ufMQpMAPmnFNy75BABsphmm0umE4aHNSJdY6qlckla+OV97137Q9+a0V1Zv5AY0jNtgVD0ewKa668L6INkBEyXzu39VsvuvvYzPQNH7nlrx8/YoYASS0NBwORjAErCikpI7XThusqqdahBoAw4OXLZ2bn4o/86Pd957d/1/6DC5cvbyWV4XDYNmkymdShVgQX7SjzeSZSVzgiICBERTAynzsW7oWIC4sWkggYIWa/zR16lLkAy4v9QAG+dQbzu1S7S6Pc6QXVVZVFcs44bZi5pgAGMm0BwG0gds+WygADduidnVBUEpGZmbmmmRikyaSd37fnliff+KF/ee+e+T2holYA1ACF2jD8syObP/kgJKT1CqLiNNBGBYpzf3YCDTTLvn375ufnjRAD+66q9zB263dMCNTNFM058ghEgoBMCiBmHn362ZhHXehWkF2v6whwA4RQV+ozMUJVQzFjQKKk0koLjlbpuC1EhICKBlhgQNxNCDR3NsySyce8ItJb1nsIY2Lqzjnu8mBq27YOEXwfI4JcbhUWc1bPOLt4uj7k7fYQPfrLed4FfiXIzEbEiNwlwl6hsJ8kJBUiQgrcP4EgAODtpjfrSuq4nlbyMETFEiz8WoJXR26b1UV5v/q9bw/0DF2nJAIyEDujCXa4ZQTIITBz+aDdvSQDJasHlWk5vl6yNHkS6qqHTfmtFQDqsEV+F3bGDKq9Nqm/Vf/3IoTWA+79yibXuOds6mS43LaICEyBgy/picjMeVYAAM1k6tIZo0GtWaqqqqpqPJlyFZF0c219c7MZDEYYuK4iAlmWRid1XQPANLX+0WIVYozaipcAO0e5GzwAo2ueBuCAIIAJNBg5fptiiHVFMYCK5oKMMDMfJrv8IXbTQneNKlWRmuPdiDmbEw3R98pm1k6nfn0cHx45KIKqRop+bdziFAkFzNBiNkVUsZWVNQohVLMyCot7D/3Jn//9H7z7D44fPvKi57/g9KkL//aRD58/f7ZpmtFodro9reKQKIzHWzPzoyzTffuW9swc/NSnPzYazYUQnv705336jgf/6S/e0LT8zd/6nL85+8epXWu3m4OH9t1258rSwSOi6bf+z9tvuGbpt37vt/ftP/ToQ//62GOnttP5jc0Hua0vLm+eOVvTYLixdTnWsLV58c//5M+n2/nktftPHjvwzFuvv+bqk1dfdf3Cwp650fFqP+7fd3A4mN0pTZSmk7ZN05xbIxtvNzmtgZqbZIcQQqgoBo6sYuozgrJWFyqjPOjCkeu7+XKiiOL55RVTMwvEAgrq82j0+Ye5ioAVAbuerVtietYSoHsHCVEEF/3dEQDo15Md8KdslLQbpWYRLHFyV5RRxUEsfmIA2VREQLIBwESRhg0lipUJxKV45o57qn+9bf3JV69/8qOLPE31gohAkBSAUzQsDDRHBZCb/xm84tpzb3vug6e26q/8h2c8tDVHBMyhqqqkqZ8JuX+sgQ0HM0nbYV2jwdb6xtralS/+khf/2E+88RnPvHlleXxpeaOPq2S4MDvXtm2/WfF04pnMsTjuS1Qs1YpIvgGAdbsz6PgUTsDzR6DbJBY8lK/DsKtcfXOZXSCiy/q77Whtl7lQzhm8j8TOrdMxVo4TKhAwl3AvxTSVo2XMPJlsM0ekVHGdWrjhxquXlg5OJtthMOM334wAtP6b/Vs//AhsAl2uyp5qO8z+n2voM7PCSZrpgb37qqqajKeGRIWEEsjUwGWKzQwsF/eBlHNRIzHHbGegQETkEAX0nVuhZiiUYXUHM98Ja7u83WpvsnzxRAhmxUjeg7a3TwGDgBJ0I1WkrlYAIgCV1sfA3jH0N6+/8QRuxhds1xd2EhBQ8E07jkC9Lhd0fLLSZXesmLJnBShTVSOi4CmsvPtd9VdfSuAulpTfUX+rFNh9h4DMOfvO2epPQAxRy2zDGyJXqjJvE6kjN3MnT+9SGL6WLi1UJyEGJaMUVy4iilUN7qjcna3yJkXBjANjwJxEVQOSoKmqU4am02lKaTgcOvSpWEg+kdQLAMxccegdav07QNiktgqxvyBdIWLmuw99gqEYYM9cLdc/Etd1LTk3zVSVFubnNWV3v84q1ajOIggwOztbVVWM9aRt2rZ1R+nxeDKZTHowc4isSuvr6/6QM2FZuxSWAlLRPiVEjEAebRmVkEXEL2vO2VIKoapCdHKXIXmjXBCqouLLoI5MZZJL8SRKnbd0CAGZrXANHbmjvQ+xGyg1Ofmxge5PUwfjRAciIKuoTBIZjdfGgwq/9/v/87iZnj179kff8rPfcN+9jz360MMPPPjvn/jIzExtxuPt6eFjRwlDPRw0TXPPQw9ddd1TOK63m/HuB27fc3D/eHt9jui9//gX0+0cY20GC/sWEq3v2bfnNd/6xl/7X7+wZ/91w2q0cmYF45P/8M/+7tzdt28nm2YOZsG2DhxceMrTbxCsHnv4zA1PGrzu9T/6wCOf/aqvedl0Yoyjtm0ppO3JGmq9sTVZ35zCrq/JZBsRYxUYsAojAmLyUUpxZTCznHeYAo6XAAAGzB3Gx6OrP9A7XYKv9a1jUXhha2AA7velXccqxRxGyyt3HAdmdl4hEQCiYzIAICD2eM9esDqlFMuyzLMr9D7c3eHu8M/UaY+3Sa1o7xihbyvNrGUaaZJ2ilJtzw6qRx5f//Xfm7/p0NK1L1hL201sRxuTYT2HW5OKqjZPOFQEoilLsUMLNcFbn/vIt9904c8f2v+jH7+usQoBJGWuKISQk6q29WAIotmwirVp1iQBK20nFy6efu7znv6d3/lTX/M1L51kvXBpk5DNNSAUpMmhrtu2tW5BBk8k0YKz+TtoIXaR0FxHoRM2KI+GKnEQyeWaY+lzAcAfrK4TKL9uKh4grAvm2u31+5EndvtU6Pa+vWONETpOXcFUtNsJuoiCeuByXmKMESBU0aYT2d5uvvVbX/XQA2ff+c5fnR8MEZERVQXUdNb0yHT089fxuQEsCl8a1J/eiy0bgiJIykuLCwyYJIdQOSSNwXoUkUPHCzMasWUIxVahMJAYzCFm3pgCgJKDaQTAPG2bmfvwlfNutisVFHFpAJ/CFvi9ZZE2cYUhRjU1E+uQKyklN6FFX9Zo0dlFtZBSKi1Fl438cdJdYiJ9XgwhSJsEzFfuO7fcjDomch8F+mTg0y2xMlbyl3ITAjd3o268/HmpaPef6F+5ixkFheIDInCjCIOcMyAMYiWe+MGgQLuFAAOSMLnSWL/thm5Rj443Uf8soN357osP6sXG0OHcuPN+PD0gBkIRMS2wulKOMAUDMrAsuWk1ROgAzH1o669z/9lV1Re+paryJ7JNjh/IZuU2kGPWyKuywMx1TUSCKiL+84GLv7oBWECITIht29Z1TVlSSsBkIhi4DhFC8R5AIKbAhGa2WEdvNIlobmZkZg5s0Y5bXOCavTIRIpmxESEpggAwQgU8RSOm0DHQsqik7NdGwWyX97uVIQcXjVozF4MsF83ARBFNJJsJc5RyT4OZIUbPzSkl5zthDEWT0MHtxACQkyaQGGOWHDkwo6YcAyVVSfVjj6/EulpYOppzfuozv+DFL/7ynNN47eJkMtnY2PjUpz711//3L8+ePb19bmswqJ/1wmffc8dDYYBjOjcazaT21HAUMlabkzUcDaY4Tu3kusOHDu178gfe/7F/HP1JPTf3oY+896lPeR7Nx7i1Ftr1TR3P82Q2DAdh7vylrWxz1934lOc+9yWf+ui///Lb/9t11161uOe6P3jX773sq79++cpp5IA0O7uImNV39P0XMw6G86rQNokwiCTmCggV1QIQsiIkyQOKu88edpgrF6Xvzvwu2roBdBv6EBgRsxqIdsJ6XaHcg31UVJw7lM2sR1wSldGXOzC5rj0TZREmAupdwLsJs/Sfy9ECZbfnDl3WNb7U7XEEjHyv3xkyeo07wLqBjQghWMxKyx+7bfDw/fz8q9fuv2gbk1msj1T2kVYWh1HHgDF4NMg5V1UgxKP11ju/9KHrFidv/Ldr//CBA27b5p1CznmyPRYmDrUIIkQiMMPhcAYHunL+dIjylrf+9Ld966sGo8GV1a0maawHLmUjItVwIG1yA9DdebcPOB7BA5IDOhF81+vXJFpOPvB0ahaq+dbNg7n7mvcQdOy0d6xDtFUcjBi9DXTxV799nccAdSsAIwQfojo0MWsxKCQqSn6E3sOUrgnRUDuCJVShmk7bEDqtCKBzFy7feednXLavrC9UDTE/cxMY6n/dGx6ZKYUFuTSmsalIWlhYIMLOYA3LNIsc11PqclUt+dwxpbsknkAVlPoe3cwUi7egI4L671uHO+n0nE1hVx7GYG7jVLrwYr0KUAbvBR/atUlgvi4oOc49YIKZqYiqVhzMTDoneQHph6IMBUhMRBRjSklAGRhpZ+TdN2R9WgK3ESya0GhSqgif42NhxCc3NXOCTVZxF53d1RZ2JZuIYDdSFxF18gyq+UxbzSs4cqhXt3cyw96VQU29uNPOvAJ7sKKBj+OsKynKgx1DObkAAObQLTCgXeq7ffkJAA6ntiLyZy7aFZitU7EIIUynUyi6MEX+uifI+5eIiAozByqmImZGgMRB2oREzMyAZSq46wtt5/24Hm7/zphZEXLObc4+dnHBZDbHxEtPOfODht1CDhEjcT2ovEll92xXHQwGOhhOxtOUks/oXA/b1wcKSNrtlQitHGGqIjka0B+VumZNOaeG60okO53REE3VpUBp59oqAe0+FWYWmZVLXgVVRhTRpmmIaDAYDQYDSa3PV1TNOVqMkFKiUJlZVVVZk5d3WcXIlFUDKlFluncwlJxB2wC6uXZ5bc0oIIcRz45mBgtf861PuuX5L3jokbtn5uPq6uXB3PDE82+bwBrik5eGB2KYcn00hcGiEuGwbXTvgXDXx87AFhy47+FP3XYHJ7n6Va+cmeWNjXN33vnoiRMnbrrp0GZKp9fPbmzfd9UN+8ZXmk/e9g8ffM/ftA3O7wm/8Stvv/Upz7rx5meN1y9/wXOffnl1KwOub0wENCBJbvrTmFKOkatQcUUxRn9McpacxczNwYm99JSi9NNPejzKgD+mZruVZb39ol2iQog7nFQ/dn3MQiY1AzY0QzUwEC0ubAUj4lAvQ8SCPgUmBXAfce0EXzsCAnbt7y6wj3g1H1z+EDtKPZo5rY8Qs2rTNIbASKMYrR5By+OBjO//rH34Qzxehi3Ye+uTL199aPxvn7tRImobUs5VTMqo7p2FAPTS48tvf9GDy5P4VX/35M8tz3JAhyAoGHMEwFYa5IHlXAXmgI4429pc3riyeuuTTv7sm3/m1qfcvLm5ubE5qQbDEJQINBsQhhCm03YwGFhuTcssjXEnxfZFkvVKQf1OsDAPyLMc9EzojkgJALvQyOrPpphQB5vq7eOIUbJC1x0Z7VJ1eGLv6yGVYiAF6BSKUASYGCJi8ZP1xOzOxB4WJ5NJVQ0ADCAgGoKZhu3JdnCFJS04PjRrn72Gy7F6eMCmiCiAZIQIbMqAqrqwsIBum9umEMjn0CFUIGomiLsaEpEKg5moCRJhKFQ3M4NUKjuXQ/Zoj4jFKnHXlSdAQxYRIKTO0x072Hn5Qylrl4YFzLd+4CQ04hAq7ugiHLh0pF7LVlWlVHQtRATVHAHnPDkiAlFRsZwphuClb0/MMCOfaQDmJ0BkAByNBYzd/IqIfGisqmoCyGgasFR1JpolS07DwUypAUWlo79Q57+oWrB8fQjw0wBgkoXRpxzQ+frl0sIymbe0WrbiHpTNrMxUJXsTUUBM/mcJVXXSTL2I3sW9Iyyyq5pNzdBnEX2tGmOk7n9ShwxSAFMNMWBg9wkIRR5PJpMJFbt4bNtERBUHCaWess6BrjwrMQCTZ0rrVmAV8g4hUk1Mc84uDloPBv3lYoAMgAaRYxW4DtHMmmlLBrGuU851iKZaygc1DsxYNkO9V52ZDQYD7ZAdNij/FQAioxEWdTBRIFJEclt2V50lqpkd65banB2ojEDMWZIk8TPkgR8R2ZN42bvvyOD1RR52FlKoQISaBQvMxEzUpX+sEM0pt0kkBaTA7Gx4EbE6pJTrWJkouMFUBsuqzG3ObgiKSHOzi01ugJBaQ9QAsH358vGlQzccv3Z1bQuuomh466EvzmnCwBDi2mRDdStmk3gF7UJVjds8fPeHPv7wXY895eanLdf3P+VFNzfbW//6qX/4p//3dzecOD4zC9vr9oYf/OZ/fv+/jScvvPex269+8bFnPO0lPp2641Ofu/jgpeUrp8+cX/vxn/jZA/uPHjx49fW3Hnj2c583isNub7jzNZ2kqgYAhSyM0czMoQbMqkqKHh0cMd6nWI9cHT0PGbCHTxoCGKoqd3My7ebIZcTc1aiFEJn7vpWIGREYECRnsEgRCANxWUh5x6YW4xNSe9cla00V7JIZ6e++Vw9e8/kszetCRSrcwxC4qE8IAWzzeGFrsLJ1cfv+26d//jdzZ+8fwySfPZ8IdWEPSj4ZZS600zxneZMDWDYGZIb/9syHvufWC3/7yNIPfeTazZaJCF3lAgxVjYo4lLaTKsSACAJb62sbm6tXnTjy/T/2fa9+9TcMBoOVKxuBIwQaT1OsQ5smwzCcTCZxMKyHg+3xeDQYmqVS0yCygWMXDJSIyUmynRp8CTVqBMQxqJZS3J8ztxjPKSOit2I796sUPN2IHjqvdyDoh5q8cyMAwNt0j5beKogKJPm8CShkH3QUNC6RayqwSIsGSBRjLSIiKYQqSzs3Nzx/buXcuXMhhCQZlL1cN7P0nPXwyTmVbMiEwY+fyx8iGgVeWJzzE2U9BcbbNgRG17TvFlWEDOyIHDVTEacvIiKVJhWLokgWz6ymamo9+6IETyDXX1LNqm51795xpYjt5xZUmOrU75UBkGPgbpvQN8peZYZyoc1yzqaKRBWzUZFkMrPQPXLZtGkaF6Yog1/fWID7+ILPcPsyDTkgYuiYwX2VHQgjV40U8A6WuZJyoFiN2mlpTFUVuibMD5An1/5wlGNB5DMuVTEinwwX0a/AERAQxY21waiofpWlFHQgTwdbWqeK5dAtQjIBx4ihy3sSIpGVGQIiYkSH9pCb7BhoIN+aQwgF/iMikH2aBEmye7UCQBJxXJUj4Nq29Q/oN96lcFwOhXBnxyOm4KKsTGSAav6R+lEwmBXEuKt0ERKR5AwupeJzZSIwdQpHDKFpGmdxSM5kwI677kB5YhlEAQkJVSHllFQilcRGyHVdV1VlZsRIMbQpbU8n5O1LYDQIhoQEjIJQgRkgE2NEkyymCsZMjIRAYt7KKhEgkpogBNg1BenPki8CRCQgMWAMgZmbppEso6oOIZhpO5kSQRVjNQwOQsEYfPIxTS1yCCFgq6zIACJmxACYAQSBARQhg0IkQBVpgi+0B6SqplDxjKqub29N8vZkMhnLeH64F2NYnyw341RJtWe+SimNp7a+ubKRP/7gfdPnPf/IV7xs69KlC8PzNz/z+c/VNP83f/Qn199w7Ve/rr3/c/e95Ycu3X7b+48ePqSTmZtuvnZhe9+f/cYfnb947sabDnzPDx1qxxvN+JrV1ZXl1dUL5z87Wb945oHD15+YgUOHEOpBtdDnp8AzORkCq+Zs2kapQzTkDFAFZgyOFQgBVXYJxnVp2Le5pipmvkNlZkMgMRFx9Ywe3O9EPlXrlem8iDcTiKXoR8cCeHMmKpZKNejSA93sOqfUl9TYNXlmlqYpIu8eIfrPcCh5oj+o3XHVEINlzEkMtaoqNmiaBpivbF+59Ce/u/DxT4zSZCs2IbV2+nOrd/373M1Pnrx/z4G08szN9Y8MB/PEY4VIcrDe/u0vfeAp+8Y//tGr3nXPQTEBVlLLOQsIBcYOUoCIdYSZYdxYWd1cW73hxmve9N/e+OVf8UX7l/Ysr463tyd1XWdFpgDIKU1DCG3T+GfMOccYp23DSJaz82eclMjM7qWNzuwMzFRK/2lqzXAwGAAhgKfVoqbo00SvknNH+eqWSsiGfadkvSoTMnLwZoI7K3u/vFkFCQMFaZO0CVkRIISgJp7tuBf3VZMdhlnxFEpNDoGY2U1hQwgqZGaA8P73fejSpQsHDi2kVhHRee0whPy0rZn/cbUEZCRDEp9voonPH9GqujaDwA5w1pQzonAYECKo7FSTCAiQQJm95jbfeZEBE2UquKJg3D8FTGRAqpnM3O+u64xZLVnBHzAAojpssNwd6+zDzUwNOQYTsEAsZMXYEL1U1Y7e4h5WQaatN09uU1NCm/ibRuuWr4IAiEqUswYkss78oYrOXzK1GKvUtGaWkgxiQF/kOM6oH6QgqFFKCQODgUFBNolBISlXoZTVZUomFQdEalVcmy2EIIwAECkGQA2Yc5Zc+rM+PYdYAULyqUk3KJDOuhhEo98Ss8BshGRWNKtEzNTndYhYU5Bp620BmTFAMlPGULoDRVVCBCTPSUWbDSGXsb/59YlAmhMDDAYDh4a1OYkBItezs2iqKft6xndgNVQMaAKxqia59fxtWS2LYfZN2yBWPtZmDIjGzKgGndoAGUQgC8HEa2EoWGV0mgOZqLrSfV0BAIKGECIU5dFi/2cGWKmqH5KKA3DozzcRBpe86XTdUK3CEOqZNmQRoVxUBn3rw2rAqGyZDJlxKhUwMJmYd1fEQVUZ2Rc8ZlaY4pIRg9f+/v0QQoxR2qTlq8VQc8UYaoisbc7TFqoQKVgWGlRg6v1fDANXuzTJqW3DcKiIhuL7alWBrGRGgCISmKVtfXfeIpCSQwNiXQGT5lxTNYjVJNT1mCxPEXlhtGTDgmNhsoN7ji/uefrW5suff+vStF3d2Fwhok9++o7xhfb6G66FaviX//f/3Xvvk2ZGCz/9ppfefc99mxcGew9UgeYo0oH9h06cPHjfXY89/Ohnplv3zi/M4pCOL/G1t6rBlGG6vX5ubXMf6dO26qrPQDPzBwfxOoU5g8iMczyc2jaSgDJAQCSzpsaENDNNLRFlSISBqTJrIDMACeTAmJNGqqTNxhpjSJIjESFlUQsUYkxNZgUDYUQIVDo2LN4v2qQyLuJOk847D0QAdBKUgGFwCX5gjNqR3bvWAYgCDE0NLRuq1vVQtVE1jkwK6ivnGETV3HA654HRBGwwRAwVNNC0W8AhzNWA4cLf/VX84IdlMAUxbIwjrD3+4Ik2rDz7uZP9J/eevvKsAB+2JDoE3nrJsSv/+wsf3kr0sr+78bblWY5IObBhS9OKw56ZeUDcbqfMCJaHodpe3zx34cyxY4d/+Id+5Ju+5Ztn5+uV5e1T51cJmWLIAAqiORNRHSsxNSZJGR0nyBFC0XBgAhFJOScVKo2tP4YJjRwzKyIBIwEjYk5TIuIQNBfIMSCJWWO53xrs9C3MioE6YLlZLhsuKlBQBmxFiP4/tv483LbtqgtFf6213seYcxW7OvuUOTmpE07gJCEhBSQkGAyFFAooUUQFLvDgGr0q3/X54IqgiHoFr6gooj7EJ09BEQllqIKEBEhCKpKQOqcu9j5nF2uvteaco/fW2v2j9T7m3Fz39xHO2WfvucYco49W/grOOSdiMHkD2WAYGjsDAKkaUykRl1xyomGotRKJbwoRWehGAYOwsLC5JYWRV3ZaA3S8OX3Lr/66SBFVMw03eFPUl6yw8PyOvWwEOIkLta0CCVXHvBYp5JITuSaDT2rTxokoiUNjA8JtSewW0nhEkoexg16Te1UXkYDXA3CYOQdIIsrGREwsKu1tDhf3LIGQDiVUuDslFmGde2sis0oMMq+k3tmWOSjIwpIYTuQQ4rRFyu3yL4Wd4H3h6u6BXEzCknmz2ZSupaxTIaJhGJy8TmW1WsX8PaVFhIPJmu4E+kaZmTPYHHNx57NzZ9OggHfpFgBhw5fHwd2iBg9dJSGJfcOQBiEJIREmbjbRtXGX551x9Khx4Ig67UdYVa2YSJPKRF+ltKsN4H7bLjc4GM+DMrg5RDizmNk0TdjZDc9lJvqyZLPZxCcEi1rVWKQxzYhAnodcphpD6YODA1utzGwYhlprYpn6+N2qIqeT9SqzlFJKOeXcTZo7cbmN06dNy6OBsgleSSeTtGDXtoMeu393D4W57eiirWLbeiTqjLZiAouDdrYmDih8QApqejxEhTuchREgA7CwcM7SG6BQMI7cT6Hm1xpfJBbtM8gZDRAfyzmNLLXW9Xo9y5iTg1LiEc6k7lUrT2DmTsJCEyGfCmeeaiEiApl6lNWcMplPOiEWB0xqmiXVUmqXJS+bKeaiSp6Ix8UC/cAQN/ZLDDZKXZP4uVsG1c1mZVYWe4f7X/j6L12OuHr9+Bu++ZvKZvPIpx746Ic++PD9j156/KmHP3by3vc98dKXfq5zec/73v7qV3/h3/mnr/7oQz+0PLj9+FiI7PRGRV0AWC4vn5xevfTIF50ePwk+M5+3c7du9g8eW+6XWy4+LfHh9c3ehduetUjngclwtdZ9Ei6Wa1UWrsaoPOyl05NVSsllI5YIqU4rFjP1lAfVul6fGqfMgTuCmpZSBCCGz3xKcw81PuZWDbtzR2JyjEP6qi/eCMwOPFHL7vDv58zBJEGrkyS1rr1WDVSqMIXSELWzF8O9lRaxvDm9WsfloR5UZnFfLg4/+hu/vv8rP0d7q+vr4/1JMRyeVl9ef+z08ccuft4Lbtxx8b9/UH5x/xBJUK9/90sf/asvfviX7z/7prc978lTZJbk4uyQRAWmlGRg5tVqtVmd1rI5LvV5n/Gcb/2Kb/6ar/mqO+66eP366cOPXkuSJQ8hfxWthse7lmlIudZKIl1kLXZSzCybmAkPuc3du1IVO6nqVBu2ObEQBaiSyahONZQbiMihQh72ZfNodDtdCMQW4GwMNkL0cfHepfB8VC2lcHPYbeseZyIWaowjC00FIiql+FRSSika8SCIb0Mfu3s1A4OcIbBCh+cWH/7QJz/20Q8sFsvNujqHqAGnlKZXPUlHIn+4X0VEsgMcdApnQlOUvHbtqL3mDf7HJE3goe7AeJk5EZMZNUp6LwSZmBrBnHcwtvG/zYO1o/8jkILA6O5DQCkFWiFtNhw/a4uEdMBjno45i3V6AMSJFWpqTCQdtRUmNjNYLm5xgNPUTEDRxtVaPfX69OZcZaYOj8GpldpMMHLuIHYCYKrcxBnZXRtyakcWnN2dQMRGsCah6iCKaYCIGDWhRFerwYjLyX3H4rdU7ziyeUJFfZmBDskGg7tZYUzanck8VMMUwpwkjmnRmjh5G/G03arBKzVtz0Bjlq7/MrnCbkKD94fSXqRYAKxWKyKqpjJyrSYiaZB6uoFWIh/HsZgeHx+nlKpbIolNTGYxUDTK5jrk0d1FsoJK2fgOl4yIoCBhLTWgFk1GNnBh3eaTFETNAZqCU0KNGxe7qDkORoDz1p20Y9YDJ5i4aaE7BZaNm4qCc0dUVlNVW7Bw5OO42kB7wNM4lFJoC3c3dwez1YYSmre5RM0xKa6qujlTWNlG+BZzJEnjIKDqYcLBVurmdJXGYb6TeSFm1gARqsGIk4CTMriTVBnCZomDTS9hsmRmXo2I1EppWrvZLFywOM8kequbWpiyqpvhlosXmEN+0Y9PNovF4s7Fcxbp7Otf+2WifHy8Oj596skrn7rtlnve8pZf+h+/9ZsXbz3z1re+5ZZ79u951vMOF5u7bue4yOP1kZa8WiH52dvvemjv4m8eX3nJfNIuf/RrVk8cafrER8v9L7nvS/LiqQc+/Vi2e87t3Xr2wuGZswvVnIQqjqfCJ+unJC9JHanUyUeROtiQM0o1VHguRYbFAFV4NSELBwJvJEs3iwaXQyC4cwfMXfqNtaqI9VuEC++FGlOIQsRlC9pj3QmIzRUgL0Zyq9MGAEuCeuyVW9B0JGoiuO7O5HUP5zZnS6H1crW0wXl88PfeNfzov78x3S+yHE+WRNfs2lPr5eEtvKA3/9xbfvwnfubBJx7df9p1Ts8fy794w0c/5/br3/t7z/hn77/TGUkoEUEtDcPaleHnz59V2zz86EOlTM9//vNf9+rP+6I//obP+pzPPn9+cXRUHn3iqogwSSw9yKuAHeSEqE7IPDATkMYdUDgcxdXMkMPJu0mNzvVKHPhQATSNabMW0MBEzEGcr6WICIxI2Nx2o1Db68EbwqfddP4j7YIHDTzm1eYEaIdDUsfioIPPKYACwavRABI3ek9Iy7e3gMi8khkaJDvv7eX3v+fD165fvuXcWVVNnMwlMDbllUfpXQfsXOuagiBDIgSHu1ZnKpvp0YcfiY1SfDzMGVxszruNWepVJ68BgI3hPIRJGv/c4BB2opbKmhB9h0b3gG9WnYmTOIQAzIfWLPDSc9ihHTZB/Ml2tnf2O2jQOYY08FMK5EL85UD8+uw86FBrA+thGDILM9eO2o22KdJJMY1AltOgql4157zZbKZpoh1De+5aNgZvd4GMep9khJD2px0a3LaCAJMk2DRfbWwsAsIdl7FLO55tQOYCEEAsqOJqwSQQIoKapKTozhBwMkM4WUf+CKpoN3KIYSw1nAj66Nu9VmZmadhj9LKmHXViEQmX+DZUEBmZvarWalklLSI3sGMYBiOsVquzwyCdN+Z9iGdmLGRVwyJisVgw10HSkId4drNTBzOjVmkg8ObGIi1ruqqSOxNnSW3gbzAyuLeBmFME0/b+OzW2iTWeLm3XSeg3mTgJmMQgfXoRR5tmHgsxMeZmF30TP0Q8DVBD6uVXAOl5S6UAQMKz6E9wsbIMQuxVp1Amc1c3CvHmnNggKdVaBRRS4ZUbdH876mDebDabWhZ5GIbBu0SaqjKnWnX2h3CPZiDoE80GQ2NVFVFTISKSmWgUnWqhcRhhE6AscKfluH92cfbktLicFp0+9cDjzInTam/vwl3nXrY+5j/79d/+p9/4DZefePgdv/3O7//u737iicvPfs5nnL1lfcfTT579vL3bb5c7nr6+5Va96+kHe+PjOj376t575ptz/eqHL9ymj33yM06OLtz/6bdP9Zlnbr908Vn/+dGrT/uDjz3D0+LOpz9zf3n72bO3nTncv+XwdrP1Zl0Sn81naFOuEaSUTZJlnLTlko9uPJVSSrIPh8agYN50zBS+/p7G/dQ2O+1TFkKzUIWniMtMc9kd8c40nhdx2AAAsWk2eN3UlHhYLEspBCYydw0DxkjVcTZqWzYlmkqVg8KaqW4O967d/+Dxj/3Y2csfob2ln55Q0RX7mQG5XP10Gd784T/4bRxsnnZPOjn6gtue+JHXfbQ6f83Pf9ZvP3rAOU3TmomsSW75wLzYH69euVR188Vf8vpv/MZvvO++zzzY21PFtRvHDz54mVLOaQDAwlFxduvrfmxiF1aqJ5bg/DIoTMsMZD4bYzdnNmvgZwsFghjgmQUTl4EyKW97zv/JL9pRGghA+Fzi9JvPROQBoup/R0QiwWVh9y2Si5ljiRzPXUM4ZSdrgCHcXW2o5R8isVqJzS2L6KXHr/3cf//ZnEAQ86ptMUhKWj/nxvKf3wVgCKFUNaCaiMOckRcZ4CeffJLD3piIKAhFrf7aTXWt6d9ldkHYm1uGiDDNrUW7YCePdgvY0Zbq5CWjprmUA0IobbHmZq6Gxv7oALdSDQ1CEcmihbMklluJwGHS6h3T1K+GmbmuN6rmTCmlarrZbDSlYRhI20o/Cg3M3YzGmD4BoCRGqG5uvsgJbh6UfQaZK2JWnAHEZIXaLDcSHhG1pmrb5ahW2yCohfAIo/HQvCqhOUOICAlnCnRGq9rmBOy9QZwn7e0UmjGzajNQCuY4ESli5486heMeQk7MS/W+n5/JEjmLMkJVgHv2dXhYiQEoptGobWohojQODdddqogYfJqmlHMtJWzXPLGITNOUUipWYymrU+ksZE8pbaZ1kjzVUrTGFCjaRLf2f+Gr1Q8RYXZTBiSRgKpv5y0UwCcmMyObvTIRWgvqWw9EauC5LTCK0HIzcaOdOTszDyax0tapBHYDM42kNuR5lDhuJkkYgTeBAGHekimFSXvAsOPXvOmX5sUWL6qzMIc3m7upWTUIO0mtJu57e3vBpIqRvqrmnEOgA0x5GJroP3w1bRIop5wkTYhRc/u5ou6BlnSPUQSIzCwlcSd3jiGKmbElYl4s9opspulkHEZTOT0u45hrnVjWw0I4y4IWBmcaa82r1WZdUxZcvlqFpuXZC1/7F/7M67/kNf/j137j4SeuPu+5n/Xwg4+8/z2//863Pnp68tQLXnjruDjV8uhdz/7Y8z7z9jngvvhl955cX+DZl27QL9flZz/20Y/94ad//75NuvvOjz7rM6lsPvdn/v3jL3xRPnuBlsPLnvn8u/fOndk/c+uQeTrxBS1OVwRe8eCqBK/rCQvaL+sNBgWTNGJfp3qzaMxVCAxq4wRsXy4KZdieCfoCuEGt58NpcO40m6aewQy4CBMgkqZ1IRNAjIkSOWyqGqqo86tNXT8H4E1ZTws6yOeuPPbI9K9+VD7+npoP5PjoCCaZzzrduLF66yr/t8OLj5xbHJ5Z3nK1fsuLH/4bL/rE2x49941vfcHJWp2dtO6FhQOZubL7/rC4evXqhYvnv/fvfc8bvui1AI6O1o9ePiIimO7vH5apksOZaq3DmN3dp3gBGq7TmhenGFOgXuAeHSYcBmftNwcMbhAe6tRE1M41SsJEBh+WQ7wgBBMiZq61VitDHucmVeeM670A7wsjAHCdZ11CPGOeI4hJW0JRhIOGEDOTxvdsIcXNlGBwVI1ecVbqIIrdVQJt1P1wf/Gud3z0Xe/6vcViod5ou8TExNO9N/zA0u8e1lrAo4gkYncn47g5VphFVHuiVSOJcxUCPpF9Gdieh4LKLAKBmqlZpym3dVLDCUYuoq690fa27atFTGsv/XZeGwNc0+LuIDQfPG3HeEyZI3PVOutqEcisOpSYQlw9aRvyN0mg3te7mQmRgJ0pNcI8amMGNdKtu8/OX0SUJFNqAdfMcs5pyEH4KK7ecKhEPXvFq8ddcSSmtBAOnBn6DwKBBZPWGV89P9f4V23z3T6fFMgOHW37Ye45SXJKnpxbSRL3vVjznGBsv46IxG6zeiietFs/aY2pl0Z3BjPzaeq8MeeGI+1HPGgaBq+hXKEap812NEAMGjo469UmpbS/GFbTxswmnVTbGxk6oKqahxTo6MVicXqyWt04Gsdxb2hSl/NIJL504/KCnd3CW9Q9Fg7cpXTDCjAqniRSmudl38ISMfO6FtpheJOTA5HD0NmB7b8q3J2EVZVBOedB0gRSVSaSHLhxazJq5k4esTyqhvkEBkuSrfvWwGYuRPsz0VlV7ZfV+H+cmU2J0NbcoGiIU2ORpUjica9ySIQ2pWiNBB91j5utVqthGIpp4A9EhCSHdgRi2BA+1vBqlZmiwWBmd4qJDpRzWuzvH+pUCLy/vzw+PnZgrbRMF4hPzKZBzoCq08mSzxpNizwQOyBF6ZEnjiTtvf7L31jX15ntS7/sdSl9/cn66o2jyn5G1R/8+INXbjzw/HufOyfgi+eeWff5GXedvPxFWCzO7g+4/6FH/t1/+trLjxWzU6T3ftU3fIWnD57q+xN+/d1v/4qTa/dcvGdz8bbbP+PezzLg8smlg/1z63UVcpZS65R4qZZIJ/JQ/PHMQo5AsDOzkUfWiW5Henx3d+2K4AHHFZZ5DOYzk5BAZpwlgALUN8HUJcdhWCyGzenGzJbLcVMKC4mEqYPPFWSUxQl0nOwWOlglu/r23zr+qZ+2T77/3CIdHV/xxMtN3pumj0L/Uzn/a+fuOKh1H+vbbhz9s9d/8DV3PvWP3vesf/Tup533RFAR9VqhVKFIGJaLZUrXn3rqzrtu+9f/5kefe++zHn38SZHs7ikP6nSwWM7yauxIKWsJOfooE3pn1i844D9zuxZ1CZkLcYT6WitqQ3KE7LzVZooKgKy96YXJY3ZI4CTUSEtbnzRjYm/cgzk+YOcZxT/Pw3/fbY5DFZF5/lvsBETC2woFhtqBBDyzU4PQJ0YkzHB2UUrxgr/ll38DrovxzDRV6aBOIqqvuoE17X3kLEY6UXMCsbE5nIQZClR192ExRkcayBl3AygYwNRQymiQY0J0pq232MluZaq5/+hoW2hevMWb22oIiq2c9JviIQHXUlbIkmwbvPnz1+t10EDmnieiJKbKk1JqNLzkfZUivZyMzJSXCwBQs1KdKUvHoFojz3tHtxNRNWUnJdda3Qlg67J/TuzkCDWUPuDgcATtVzzHjljcUh9L2mx9xWkvpcm1urG7mjaKJ5OAotsLPpyZWSmekk6tY5vb6FhSKbNQ+EBxoKmZ2eCu1tzCADcYqYCki+fFOx77mGBlBMh+kOZIGsc0D8m0VY5wRwDU3Y1gVa2qEXsjJpK755yr1RqGJ4qqxasiJQjnnKGWc4qH4O5WagUZIXYP1W2gtNls1ut1SikfLgA0LhsRBfLQrIG+O8KlmIVnn5gRSwreJjU4FZLM6yGaf3HweLufaxSZ3G0upNHBoz4M0peqLjCoqgHh2EhEMVefuqB/lI3tjAEig7eLd0dU6sTERhbiDERUSmlu85K82yIFpDnn7IQozMGUlQFYci1VDBBhSeouQwY45GGZuWpB6EgRdCrTag3g8PBwkQd3j7zrTLCwVQEQPGwy99ADt27PLJIACEXI5egC1a0UE8mlTAKXVB2aBzaI2rSankpVkyyVT8kT+QGIUoapbE5ORQ6Ma0qAD6vVyiitTk+Op4cHOUAdF+Oi2BXh4d7Pvjstnnt6uqXirO3EkzOyOV09unbkqzPnbn/t5/2Vhx77ry942lccT5949OgHzox33XXbbfnwupffW+CWd73zU29/23t+Vn/lOc95xtPvfdbq+LHDs3L+9nx6I91y/umQDaGaljQOIVKZ9g6CJ1Oj8AkWTCPQd7Ho3upxTq0MUvWqqQO25/K3HSo04s02MZCb68B5ss04pFvvPrfZ6NH14zFlM6MkDTWNBiNC+ARbPciLpzYn1//lT0zv/80zl69OUo5Wq4F85Vf3DL9zuviRdO7xcxeehrrneM75o3/z+R8S9q9+yws/fHTbuX25fmMiLqQ+LkYA5jUPMqZ8dHT9zPlzP/rv/tWznv2sxx9/Mg1ZiAPvqFaOVxqaaynlaZqmMo0pl6lEFJauujxPblHbW8YsbUxLLEK1ToSo3uDurISYz6euIdjFRGNHG74XaJpIsFIjI+ZxcMW8QqbAkKqq/FHuWWg5VLjWhhdJXTfUqRHMfE4tHfI2Z4E5WzPICAFbMaPZb6OlRocgDUt88hMPvvnNPz+OrUE3ixJF3L2+6iS/93DwJe1JZtqsVlZLrcqcEidjUObqdvbsYci1EYcNA9sOPXre40bmE4t87NL36AonI1GIiJVK0QaYGZOE6WBAWlob3XwKQwbVCV2uqd+HVgXA6vYdFImiYx7yt9i4ZdNVjY1JUjh3Nt6uSlmTqiEZZLvTjhZqbhScKYa0EdnnAqQ5P4PVyRH3qZNurVECs1AsCBHgDYAaqtnQMgdi3wCm6jaCydoUi8JMlLxqTTJIzMEABMZKzcziVMFcZ089lng4McBMKcFs0hrJILEwxYYS1iT3EOyaENMQbgB6Vl4LAAEAAElEQVRjSWJwm8oMNohWkhi1ViahDlyc85WaHV8/muOLmVFI/AQl1iURQxIzHx4eFtPT03XQimIWV9drVF0sFicnJ5SEGJzTOI7Hx8cHBwf7hwethuguHO2nU1NCqRansmF0k2QRKZOJMKXQ8Xa3kMcgi7WwpHlBy3AAQxrm6T0RJWJKJMyUBrLtyfPguxGC5igipdZNLSmlsFuICX/Eo4GaeIJX3dR1QBnBTIxYNJmZe9Ts6n1n3Pye424Hvsw9BI9SSlWrJWb3jFZgCYcSrbkDTFaNiDglMs85T1U3dSMgJhqHodZqqqUUSzQMwyIvtYufJ+IyTTI0NCIRjePoqjaVlNIM+zQiCGchB9hMoaCNWWFK00aAlFKqdTNwkpRLWZt7tcnZGDYMvNFBZC3jXqKahM1MK41JHfvjmFUdajIyD4tst1U9WR2xndzgtJwfAfEZoY0XoXy0kAMfFsd69WUv/7P3Hb3x4q362GOPfN/3PPSnvvJzf+w//djBefrMV350efbDn/dlT/ui/XPXLp27fuWDF277lY++6zN+9hceWx7Ia177hhtPPpiX041jWuxfODxzjpnHvLh4thLR2bPnJYtpQUfJtvkKgWJFF4teYcQWHwTqBtjNTxZzV1Fdo9SGqZl1aXk/PTq9484L683xv/6X//zCxdu+8iu+6vrRWmis1nTZpJPCmy611TOy9/G3/Wz6tf+8GOjI82K9Rl17qVnzb9KdP3x2WQud26CS/dnP+vh3vujT77507lt/83lPrnPmcZoKZ3Uge6qAJyIahPn09NQJP/Svfviepz/n8lNHy72zqmpTZWYYEmdFCe61TpXAOWdwEkrQqZTihBTdvzW2rvfkp27mcHehBEqao4dmYmIP+BY8iA8SwAMREWiTx2ZOBiNiGDKLweId7H7t3MNyow+UKVxe2ja6bTabhrRx+MVFKeMeqBTqWQQ92c6tWtU6SBJwqVGMwXNqM7AIj1ahZAQ2OI37B/SRP/zE5ctP3H7ruXW5QTwywogPk5b6yhvjj992sl7lnG89d+tTx1OplTgb+KRqMV9Jdve9vb248qA1cih/9nqCuk91gxBtAeChFUEknJJQobiN6AvsGGDCWUhmTN+2vtSGH54RWgDcjMNngWiWLubZu6Kzok2jU6hwN0ZI4ZORQJJXNTKipIiI4dGDEHkQlYwRvb6A2UwTuYFSqqWG9mDLlAZnArt5ZVDuOKCiBlOJx29q5tOkHgS0lGZEUmTcaZpYHGB02aO4Blav7CIyTKGdRNVMXU3NyCDMnYja4GNR0wQOkMndN6GYKonZqxsRNrW0iSVETd3cyI1QyR2eJIkx1OC6Wa3DM7GfPHf3nAeFN+h4UTI3Rxpys/TotndKcEDN9s7sExpbyUOpIGUFLSSvi5o7C2lgMOBOZlOt7ptaxuVCFgODwrZsdXqq5jlnrWbqYBvH0UqddBOVV845Saqm5kbC7uHQbYG645xKKdOmhBtE5JUIgVY1oO8SJy7g70ThY1WmiSjwaMLxxrpDQNa2yLwDxxK4DEPUesMwVLf1ej26LxYLMoezEUikukM4MROjmJpa2RQRSWkwQgjPgs2tkoOsMbadyYSsamIh4ViAxbVoqcTAZC6sAgFRTg5iIkOUqaGxzkRt8JRzamL901qiJIEMOZ+WExVrTR3HDJ+zLKJYzizS9QeYmRBrL5DBNOy2hIiSc87iTE4ws8HAnEyIPInDzBIv3eqAxESOZGUaknhNzEYiHt20OHDgZpREoO5spW70mJKktHA3x3ATBsdXClB2wz7EteqARV3dyAu5dHnaO7j1//oXP7q3TM94zhu+46//tfe+Uy7ees7p5Nwt689+Zb5wKz/8wNGFZ7zvL7zpZWcXX3j30z7zwQce4TO/9qH3bB64//ELh2cfvP+hV73q81bJplIvX3r4rjvvXgxLZEESciJnNhihuqa8Z7apOtVaAXIldxpSXvlp2awPl2eqk0FD6iCxZCymsrJciYRIoFZrdUrPfsHeW37mV37x5/77k1ev/fa7fuerv/qrk4wmlMhrBVGefDXysKapbmyf0plbD97x+++T//LmWxbLYztaT9d0TQe0ulHzr05nfuZw7wb4riUn3/zAyz7whqdf+mcffPY/fN+zq5mIH0/HLLF3JU7sbGYuMhjo2pXr/+QHv//zX/FZD1++QeTr0xNBQyoFrHYDYSXqDq3qbiHWn5KEYVQfEXHHqbGwqrlB2piaat0ERNWCRNNa4d6iNH+kqE0i3ooCDImQXWCcmCEMFLfw9RUmZRSvBBKR6gVaAQnaGJiMYOZUNaTx3L3reiaAYvIVAgPYyb4ADJ7zQMzBxiZzuJOquGud8mJZXUNIWZycpWJVNofvf88HErGqJ9mbFExmUGPW5678Ql383tnkXDfl8kMPnix4ECal2y8sXuHT1etXDhSLQ3vFM+8w4cQDgXWCEBcpVNDRukCsMIThIBazACeRSNYyiYGCgE60mjbR4psqm4DbTCv0i+YclBw9fhgRITGMVFUgJgj3V85NOKE6iKgSWHJMW2GViAkGuNUY2gmnBFCKBrSiaXBTQzA54DFSQPccdCDk6IjI1Tpfs802RUShs+AGdfFSIenwWzCFGKX3UXGn6rfNRIA4AjGA+WFHbYaOkIprizUJxQJYtcOqGVb7CKLDa3eaM6iVWqKENAZpIJlDjsBcjR2p4XhUCSJMGgKkbn2UyuYewGRhh2upMVkK6cox52kq1JSrm7R3zjkmFewIcHjOmZlKUbLEzMMwgPx0sw7P4MRSSYloXC6JqDZYj4EboE5E6qbG3Yg6LnXm5WaziTQcLfBUpvjuYmYE7koxMxJt3mFw7jqjTcrcoW6dtpRSCvyCSCulI3kbc51KDOpTSh4tJ1PU+4Fpiv8Un1xLcSYioQ4mjyp1yClA3WaotZIweeOPxdzG0Fjg7MzCQ5bVtDHTnHNKGQDMRcjYTd3MjLbae3Hnfa6LezPt7kSBl/ZmLOG1TBNzMjNXK6YxKxuGoWym2AWMqTlJeFCKHaYaqGDufgDqRuZt008MIkaPWuiSC0IAq0qAJohIRDalUKdv0gyNCUFTZs5tAqmqgAF1kGym88ofc5dCjRDCnFxryCznxbgpZbU6uXq13v3MW3/uLf+FyYXH9bEen6iqD8OwPtmsNpcXZ/Vg+ax1vfzsZ7/Q9EV3ve7E0uaJRx/7vFd+wa233r6ZahqHp566ypzSclyv16hVRDRlFs6SslrlE7ZMvGA396p+ymSb6fRwf1mh02mVnMBkrsMw2OTFTykTMZe1w4rQdNed54nwt/637/jIxz/0H/6/P/m1f/qNf/97vt9ZTvXK0m51RlkkwZonOabpwsG5fNay8pt/+X2/+73f+Q37ZaWq5cbZqy6Zp6PxJy7c/fOeD6fFfn3ivqcvf+C+39lP5Rt/7aW/8PgtDAfVGGMJESAwVICRBubE/NhD9/+d7/mur/v6r3r0iWvMOe6wdHmvPhaKhj8SpSvaoDhEcrCDC2md65BDoxHzvjwGyyRofjjufXPMbkZz5ovfaw9aWKgtQZ3a39KG+nGggap5VhwTtKDUzoy2XelyHD3o+ATuaCP0xcKcdMPwIFq96DXQGOCB3Jxn1eGACHePP+Duw97w1FPXf//3fz8icDWL+YEZFDq94ggV6T2HUeVvMmVw0U229BnT0df4Vd6biqTl0z/z6c9/pho2gCeVqhWkxpmIuhVxe7+I0Vt58uLUSDHBLxXKRTWEGaKWGtN2cU59oxHBNrAjAVYIrBcjHPnYglEWCa4RAWLMqXJzyZJyokZnr2bT4APnnIJosfPCK+BEqKrmgWoN17+myBJo10hj/efFzPN/joOPvDlvc+dml6RpOupMECQJr9ZIl0CMvmOUbck5ApyqqhsJs3As/MyMvFtzO4g8CKC9VOxLJnd3ZSNwyNSBmQlsVdnhWYSaqR7UJQkYSubrgn74BOTmprW6kaQ4V6qaGrKfVLVORVX3F0uf5dDC3MJNSzVgHMcmPzkVI4x7WUOcjAO/3uZ4e8tlqXWQNGllkAJmVoCcc2RZCNdal8slmaeUpmlS1WEYQmVoWq03tQzDEATueBZBLECnr7Ft1VwxD6ncZgACt0WPoZtUN/x5Jws5AR2rb33qFaPsSWt0EEQNHNtOf5IYijlR10eCwqnU+Fh3I5HgAFiALeEVJiIQklB0q+osZA44haJLeKkBodPrnYdGFI/Um6B3FA1mZK5ezEwSE0PdMosDktirMWNMIxGJe51K7DWk72I2tWQWzikG6TWOpVvs9YOPiJ7sjb1Z7jo0flsrM6mGlnKcDo7q0MzGcZzfx7ixgQAPZUF0e66QnapqCZZZkPP8xjXQba+xiIeUB5iWUobFMrNgHOC2Wo+rzWR6UusVGLnR/uHBRk/z3t549jaS4caqcJJ1JbcN5+JTeu5zPrOUUosi8VTswvlbiWg9nQ6SrOoiL546upYXI6svOBVyYR7TwNhnyovlhToM7n7jaEqynHRyOqYB5LluKqwOzJtTlqRnz9CFs/vXn5Sf/emf/tF/+wNf+6e+/ru+6ztf+bmf9+V/4ov+0jf+uU8/dJpkv+qNjErICh/q3t65/MDjT/7iD//mb7/tp9/9gU+86dy+XNjjx9a2OeZp/YilH7Ez71jzbboB7C++8Nrfeenv/sGVg296+8s+eXkz5LUkEqapbNg59lARMZfLfduUhz/9yf/ju77jW7/lGx574lra3/epPR1F0wyO/fcgw5yfiEiYU89YbW8WU9BqtoOEaiQU83mfqk3YeUcOSCsx+87e0XfQMwH5nX+zRzxjSKCr0Wl+7uTkKW1xmvG/IW5l4wgiGXLalhQxDPemPB9GZyDiBs6Vjsps6F0GGdC+V2ZmFrZ4Qx2T1nEc3/++D37wgx9c7u+5uymGIQVSBI76imP5wD5OOdRbmIFqQxqguOvkyA+GTdGTlHD3ndOw2JtsLw2TKIHcMVDqfraY71KMl4VIA+cUYgdOGg+ilNW0KSUiq5dSOKckY5ZUSnE16/VUwMJzbvyOCF+qGuOIoGm1HxoLcsRzacQk6k7ADnIQOwLEIMTR7/Fc7GAHmigEWHO2oaD0CXeZ/nnBQCklr+pmVTpBpf8iCklMm0/StoyiaB/bHiJ+p9bq3gx3+7lpPRALrVarRDyOo4Sav3QspRoT+U6/SyQG4x1/YnRIFxFxcN7n88RMAjeHGrOk4FYRhRo2Sk0hjNx3W1FzCFPwieMThLsjT+cmaXc8DEndMHNGs9ByZo54WkyrWwB2WKG1hd0kUmst0xS7RkoyN22mBqLQHd3bXzBzuMcwUTUr0zRTjUeAuxoJd//jwLG7u3e64TyEADjKaiGOGrjJpITinU4h0qY6zY+4qiYZRTKzht0T0DRfZiO7KIwCJIW2VYdTVNbk3HEiHhku5CfFQ51SdcjSsALcbjX6zgbC0ivc1MvbyP8iW9ZwbMdLKdRtN/vQhYjIvdHP3JyIhmHgnGLSE8cpSmBVzXlMaahWAk9XpylgpdSlSNopDCHxJDD3qg2lqR2g1JdoCRTjVzCxBA+0CRXNNfhuoFytVimlMeW4qxbyokwB2BbeJuC5uY/oX81gJKC16jStnWmQwUHOKzOTtBjTXt1UI9us67A3Oq3VD7WeDOPgdEHLseRUyzIPfONkHaHc1HMeTCuZB9zXQap6uH+QUtqcro6ObzBNT1z5Tc5PXl7XaXPtxpX3jYeDyPlzd5+dihEfrDbLTakHB2NKvhycjDnD1T70oQ/92i986H3vec+dt9/xQz/wwx//1COv+bxXf/u3/YW//je/6yOfuHa4OHCCplP1A/JptU77d+UH3v6+t77pb77nY7//vozls55xeDDRVT2Va8NgJ3c8+0eu4N28d57W58b6fa/6g6981pM//unnfdc77uFTfel9z37k8avH168Ni7ypJpwcCLDIwd7+6uTGlacu/Z3v+f+86X/95stXrihJOZ1GNHmjOay5u8HD0IWI0J4mDI0OEKBCD1ADLEKy9mGMTiVekPayeAslnaZh4RdHfSuJnRGjmUHNOiSWHUSwYDWZbgOvGUiYydlNjUKqApTahTAzRw8Q/FLWhpqOrTAIBu+uQe5Va9A1u8Vk7FyZo3xphBHvbL2m8SjEhPe9+wOr1er82XOlFAQ4mpqzvb7qxvDmW6zvbjOMcqqal7y69647D//SN1+6cuXcmfOLZz4tnb2zmPO0GihbGmAOnaHakay6tKI1YSUgiJIWkEAAVE1AwzAQuZXGYmVmLU2EuDXQnboZjy7Ov4gk5mguw6E84gz5NuNIj0iNzTmTsoTTOES0NLNE5gKym/1GIme4moHExAhz+TMv9qjPx+IANdXozoduuXbn0MwJsj2zTiBrOk3c2sSo5eKAtVPee2jv7oeYQzkhduDV3auCCX3fFvfi//mj3YiEc+ZAirXBqVZ28qqVKADi8QmjpAonZnJrYF2EwQu7GiNuHcBNdyYiNTNPWqEWmWO2Q2Zmlm4PucNBauqYO/kb7uM4skhYypdSNmWSTuvmnbFVAIPnKULMvdkMRIEMmtlWdPNgc0Y5zx8VtyvRdtdTqS1+nCnTsK2md+cKDftnRDRwYmZt3wS1NsdTnS3WbYtA2T1s8/mhkJgmCsfFlNIsmWlVlTS8S+Ojkrd5rJl5lkRcm6bVFkyLNhfieM3mag9tjdGyonAqWkSSEwUVMiYHZrbIQy1V4aVozpkTpZRYUnUrWq1tzdkpPFKQUorIBfeYxruauc2McHeHugWnv0t6AQgwTDzBLpjaljvoZfjpVERkGIa9cVFMa60yDJtaynqLgxv69p0dYEpmdVOUaMyhXx7tETuG5SibzUZSMptSGph5s1bOzlB1JU9uLkOtm5QHmWoVdq01c4iOKblllqJGKalwcUtOuplqrUhSnW+9+Pp/+A++8/O+/Hkg+/jH33n80CcXS79242j/IC3G82f27j09OXji0Sc+9bE/PL5+XIkvXX70icefOr5Rjo9Pnvf8e4a8+K7v+t7zt138qf/231/44s/46Kcu7R9eqLUQ5QrZW0td4vxF+tjbPvjOb/+rlx9+14WL5+8a77q6Pvu4+Onh5bNHTz60Ofvmp9/90TWdvfHgF3/mub/9ot86n06/5a33vvWJ57md4lzeT7ckPyqbSs5JcnikGmFc5GuXLsngP/Qv/vHXfu2XP/HYkXkK+3erdbbK4UCJUvi99yAfUbuJgrURbgyT1Q1wIsosja1TG/+Q+iyKeyfTszuzA+S6s5WbizOK3Qq8icv206UU66HW4bRimiN/ONhN1anRsZMkEZlqsapaS+ZWsovDQmKBiYQJQgRJTgJ2RPvRjm6f1riDkrD5bmcVsFkn22zsne9892KxMDhxatWzMNzt7rXdNeV3ngW3tXrlYW9/nHTvrpPVcPTwcOdth+cOj689dch7jKxCvDe6aZ2KE1Ji76rwAZxo1xZg2664Z2bkzXJtoBZhVEtKaRzHcbn0Xvj2uNE2dACCAa+qXr12b15mngvr6Cy3uWZbKnHPvgRAHbUaEQ/LvXjTU3B7dqOwuw/DYFUj6Guf4KIjpVNKTtsAx8zVLXggtEOBdWsQvvl4zWcoPk5VY5gZAL8woouc0dMwxc5ksVjEdw19DyYwSEDg4N2agrr6timcb9aD5Lk2kWiYGWqu5iDKQhTLcg/RPHOExw4RtFadHZbMKxzm1FU+0J2IgkAmoNjSId6z3verKgurqhDHBjHa3yDYRAoBU4TOUkqpVRtG31pp3BAfvehRjb/YnB5ynqmrIqJmtVYZ8mzzMfdSTXS1790juNcwrg6MwI7AUIxcwKEo2cdQvP1eTJzTVhYjDig7OXMIccyjNpu5Ew1Z3Vi6QNxCkiETEYc+bQc3WSiNzXpJZtWawBkJE0i6BqyZVQaEUbfjuPiJzIkEtZRgaLRD1UD+8wsTbsakapG/vZkYUuRU1AoJQhbVUqFGSYZhwLwoyol7193XTpjvfpO8nndvasZzJm40hgjc0oX0cPMvEs6UTQxqm82mBc6UAAyz6F3/6VGFxAhRUgi8gsCqhdnVnMxlyGaeMkHLweFyvV5TzlwVOhitx3xovgE72/4wei0i2eA+8FBrdbecUkrJaxM7KqqBhuWUHNhbDGUznT175o1f+7++58HfIzq4+xlvHI7Pvu23f/3jH7r/oQc+Ni7tB//p657//Hte8JzP0nX51V9986133vmaz//csweHFy7eutw/uPTUpTvvvOPb/sq33X77HTfW9tFPniyXB1pPNlXyABRZL+rZxd6DD1x921/7GxcvfXB62ovetrZLciMP+x+aLCn/YT3/L6fhvX/w4J15fOMLnvy+l//up472vuk3X3PZn3a0frh6ecbF5z109crlxy/fcsstm83KzJx1sRg3m9XDjzz6yhff949+4B8874XPfujRp3JakDIUQiCR2D216RQRp6DUd6s17xucNj70NlMGS5BcAhQ+ixeGrQ9zTBZNMQf3dkTdPeTjdvzL5/DIHrY8Tg62dpojCs+LDHdnhldSt5R4rsKjeoC6u6c0OFVVbWJuqs6N4Nfjv0VCiuM6A9B8t5ZV02aH5Rref0zmYMfycHH/px7/wz/8yMHBgbq7MIwgEGGHTa88ApDefZZ68U0Y9pCB4WWc7zy59vg//SfVii2W+esPZXxuWTElVrYhixJxdbT5t6si9GKjBlILAmEkVAKDIU5wVWfPWeKlbhKbapKT9fmuM4nPGOGmD9NKfq2VGj5jG0Z0vhNMag3s0vuNfh6IhGEeSJc0ad3WC9EnwaNwMBA7YkMViGp2bFzdvWiTSul6p87Mcc52uxsDhXEyEOh2D5fE0KaIjnaQBKCYcpII/GZdHoWIeNaS5BAuzcK9loSqEsMQXsQOazVIcppb/rjsbTIWqLnXCrVZmDA+KrMk4lCFIeJq1aumnKq1eNruY59IxF0jILpbUGsY5+lltH2bzeb09HSxtyCgVgrpqxAlrqZeNWBfGmVsEgmDl2hqhZ0pzAPinkxaTWtKqZhKkk0tg6Rwl6q1hhuxSDNK876B3n1pg9tQS1FphR6j3a6oA9xjbQkysqhngOo6d+09cQJMiqZ93QKOcBzK5ovMHE+Tukm47pjH7RQH5K5xG0WS18qcRaiLdUAkuLcUPXQphYxDeol7g6iqjciEdnVzNQl3YlCSpmU2C5m5DSm7OweMpaq6Aa3ocXcaRacSdzunBGCz2SyGkQWbzWaz2eRxyDkjYT60ZmF8GeMpa+VOKAhtcTQkQy6lhHA6MzMnIpacdv3UdgMuiEk8MbO0A2ZqqhbMy7yzAw6RLwAQZvPJNkMeg2eZCJKYFOrqOlX1PO4ZQZ2Hxb6q5sSllMT7p6fHOedhWJpV1c2w0FKT1cqSxpzMqqpaIhYB3FQHSa6GIa2nyQlWysGwtz5avf51r15+Sn/mN//d/Q+8+72/8YcPP/zwfffd98f/xJfcedutd9393Dwutaav+vNf/+e/9eve+54PJamv+pyXXj+eqgtnsQk3rtpDD1/jRRr39siROdFYAFtvRMYb19b5rW/6Zrly9aEXvpYPS3n0eLp8tKjT4wv798fLt09nDl997yufuPTXnvm2P/PcK/+/j9/+Xb/73PPnbz1/297lp1x8+eCDDw/Teu9w/3h1PGYZF8N6vX74wU/eetv5v/s9f+ub/tJfLKpXr9wgEjJnITNTGFMbys3hxdV0Z6HTH/HctARMtpfs6CVnj0jjOLb/pHB46gF9JgKBiT1U27l3LvGjCZFrgWhPmQnmTiaM1CKpA2BjAMZwZ9uUgPgyc7y5TQPHEDMAMo/nG2XcxkrE9UQpopyZmatVY8fcJIX8bTjWI3yEAiFLFKX32UHe++73PvXk9XO3nClWHGSOISUDs1t91Y30kf10Y+kSzFsqQtem+jQ6foZd34ysn/jQeOaM3vY0OreUMemx2kY9mRFcCJMhpeBGegckcZveCZlGuDZ3Mq9eiCSlFJ29mcW2yIoZoTnAMhsh/DrDv1fLjqNLE/olItKp8CyYFS+7cPx/d7dQEoXHRJqAWidyA7VCPJ2cnIzjGLtVd7dmce29ZQn0VcMHEVOzmbTtzDMR11qNGpGUt+vYEFPjWiu4Fflx8uZoVUqJ7V0pRXJ4a2zHpA7rDTbXohZajyFTbBAiEAos2jkiCgJxZkmSdBYe7yLI7u7BqOqo+iRSVbXWlJInLlUpiCdMygZ2HpIXNWuTajTlGqiphqqteXMacAtnbqiBMENp4i+eOXOmWiWAhUhRStnUIiJpyHErmPthlrbeHqW55tVa0UfW83s7TZOIBOBZ4c1DmxCzWSKSnEJdJPWlaaSE8EykTnTWLi42l9vxJSINoztpMIcpVEcJ9mpAVU3ajdemxs5F63rahPdWFPXRpkfXTl3eyMyCkhugQSeN7tMIVY3cmXOoZEl3U5jTNgCrrm5QpJS4v+2u3hwXaVZ1aEkxiYiAwO6O2LQSUK1qGSQBxIxaFfBanRhuDSDTRM57hlsMo5m5+TAMiUJqo/SIWVvV1RtoNyMPH2GzYPjF0h1k7jnnMeXA7hULg3eSXjDN7ctOmRLB1+cFdxy8ePTzrxDA0S4Uk4YM56owJUk5hlQpDeaFE9U6qXpKTMJOLmlYSJ6m9bCYrCzW6+OUUkqH6+k0E4sM7CCQuqQ8AJi0QiglslIl5UmrLAadClW7TmUY8fi16yyHy+Xy3P7h1Ucf+lNf+CW2v3z2s+58+l3PXJ2wmqfh9OqRXXlquHjr8/7wgx/55bd8+N77nlVR1ROsmp/CMq348DxvprIpSC6OhPHk/N6tD3/qwcff97sPvfKLLj3tOY/+7I9dIr3jzmeenvqT0P+yxrm7zrx6eeNvveI37lie/NXffsFPfvoeFH3i0mNPXn5c3WnEAl7zYGXa31/Wsn7k0UcWi+Ev/sU3vulN3/6cZ9956cqNskHOS3YlVtU1j6PWXfumm+rI+GfbyiG0d4SdLGQSCTGxZADOlGie9FCnk4hI6OoERFw6G7OYxkm+GZPcVmzqIUw3HwImclOb3xaHkRODQOCU1MxDl5EQCv+EaF6EyFMSojzpZAYnCItpkGttRvMHHgNqcA8MvxslYgM0CnrhNgNgiuRRin3gAx+IDYu6E5MIu1uwE6aXXx/ffoGZ0SQbIWW6fmKvomvPTzc2JPn8wXo42Hva0/af8cJsi82iMNOe8bRSGOnIuqk5Z2IK7mUIkQtRdYQ2gTO38YAZkVdtyDczy5JSGlt7A4K56nZL2NaCaKt8AiLqmmo1nWuxefhBDfNFRCyylRgzDSE/YTTVRQanUtR9mk9MnKFQdApJdOMQoA5Z65BNYXAwtz2F7Ab5gpOqggBpQTwEQadah8XoahLzgcCgMjFIqy4WC5BYqYMkFHOYEHfqWZNXk74KVVWH22rNzDJkcweRKJEjXAUjUDqTmrHCO/OjOQcAZpbMKYmLu5MLc2JSY+YB8LQVYwM8boknDIHH7jSkmOJmSIVF+hFirS2VxT0spQSoNXRq2lOsdRiGBAGwHEYzs00pphwIgrBDpiYIiakmZoBYkiUodz2QBvYTJ458Fs8+wL3KTfA97h8cAnINQy5hZmcAVFVbi9wHrc6BO6Bovikg39L7tsxcEaUGei73bnXs3Ms1uDvEweCmt0AkoHh8KaVQdqTAUQoHgpoZAiqSBnIyRzFmqma22QhRMS9qPIOP2MWhasLhdGeTTtyGVgRQ3UwcaiRVo+Ns4ngFzBy1GDPMXAzCyZncHGQAUWY2IwiBaRQm9qlGhRSyvZxEawm5FVHPOe8NY6lV4cSoaKOUmC7mJBUO00rKzEI0iEyu1Fxw2N0VRuIgTxbHRkW4rUJiDweJOVrxqanldOQLpRBTgLvTvCEEAE/EEK5uq2mTaXAyMJjhwbBnUneqDdA5DI37mFmoltLWZmd8cLOkqrUeiQiyOaCxeGK4TUBIMLHFO0WUkayaECPzSGaTD2kgt1L0hfe9+E/+xNfcdvHgkw9cu379+nJ/Ly8GAjOWQwYybCovetFzj46OPv7Rj128ePHw8HA9bdarzdXVk0tw3ZyT5TItls6ndSqjDCd6+pEH3/yhFz3rBp3ZtyPc9tzNIx9dUrVxWNKeJ/+TFz75/c/8+COnyze8+bM+cXzofkLMDqqGnAdymPpiGPIiP/HIw4nqV/2JL/7Lb/p/3XvfvVeOT+5//Ap7IqJJ1ywMJkoLM2WzLNm7A0cMsdQsSRosubtBQ/1KodFmVa+SRFxCuRMUGgFAE1CTvpoRdhBksRyZJNojYo4glnMuWt2MXWZ5RSIHmU0WaOSUmKi32g6P+VbMgTpkFe6V4URCIqAA2LeqkU2ngiwTkbuJpCHMFDyZuEJVy6lODZkhCWCS5FpZm+6vdjv2NAyraWWGPA5alVmYsTrB+z/8QR6pAMILkO0txtPT48w4vbjS56zyD50pqgd+sOaVEbOXl7z0ea+7/sj+x69MB4fY+JCujy98+XjXxY3XBSevXt1lLxu8qEJ8Y5M4C3KmBLKeQQFmrb5arRbDyIlDRRgh0FiaBbuzC2SQtEZpQwVusoWYKgE8ZDicyVkcIHMRzo7K1cxSEgGmaTLXREmtROYibkU3gSKIOXgyuJOIgDnt7y+pE0Vqrc1veaeIA5oFCoM638ZhxoD2tS6D1EIn2t0UQGJhR611kbOZF602ZIC8mrsLOAAsTXtT4KGd6+TWUKmqjaQfl0HdViFASWWt0Vsvl0s1o05Hnq/fzL1P7X3mPhEBThayULG+wMwyat/aEbB7dNocEDEwGGSt9myWwLGYYWSW6EXS7GPfF0Lx03NKFIb23heozC3Y90njPHuwyXQqIsI5xX9NofDqzrGrYGrzfvdg8kkSM2OzLdzAgifdWDHc9DY6SC8nd28+SyGg786GlIRBaUcY1kwR9to7X2eekgnIzRiQYevFNOQs/d0exzFaJSJaLBZhrtWeaW+ja8gA3dxVQIiJA80ff0zhktg6fywGMGrttWfmlNJyuYxj7H0s1h8uiqnrFvgdJlHWBPU8GgIiDmsUnYoztwEyswDuFhD9IY9x9jabjdYa5A1zbXAPMyJar9dTscViYZMi1uTVVFWbdpcZxam+qYsKcyxqjL/AwQbSvmO1zNsimeC1BkQgwG7zrYuJFKTB40MvbJ4HqKrCWv6O3bwb+sq81MmJF4sFd2Vs6l46Zu3BRRM223xZrbtfYbcdHBZDKYUkEdF6tTke1kfXbyz2z128eJGZvQeTKE8lJQBnzp5d7i+vXLny0CMPp2E0s/OHtw97gKTptLIcL5aHlAuXxQcf/I8/+VN/+/2fOrj7njuWizuf8cLnj5IefOyxpz/r8MZTl//hax7/uuc9/tOfvu1//51n3Fg3b0m1mlJKBHglyHIxXLt27fjGk6/7gs//G3/jf3v5Kz57vaqPX77OJEs5BGoMIbSWyLUSsnXuXVXYzIxBALlaMRchyRk926l6LdPW8I9a+Y7tMAPoIz4AZrBSDcYSXSkBrYwnIk5BoXCJVS/grgxKi9HMap1qbe9+bDRUS39eNz0UuIVWYOhyBAYoxqSqlsyIjIlYpHi0HCAiSiICDrJcqT45pca2QF/0EBHnlAxlM5nbkBcJDFPXstzf//BHPvXAAw/s7e3FVYzDUrUOw2JTi7ymAsjvvLiP4fp4vL8h57J3261f+pLPfNb7r69yobKZFmcO8sH5FzxnOSwDtgZphb/BEzNJju+l0yaA3SH0VMldjYecOYWbrVFz/hEi460LUfsWriEa3/gaQTMDplA9Y3KvETkjhOVuegEgBPEiosDahgidyk9uBEpEOUs7WdGZ7OaJtjVQi6LJ+6o/jk51m4nYIpKC9haTT2zNtyNOwZyAWkrKmXPSBtVBEmkDTYspTUiWNOdLs5iLNoCDTiWWprMhgYU64zSZcEqJk6C2bV9M0cmciQxeSgloUnSKrVYIlhshXKL7qxIaNgBgMeFEYxKEinec42ayFqfcibj9c9CT0lbDpE0fZuaou5urOLuhlKnlEhEAAeSZG9l4BO6exsHMpk4rGiR5cwak2RVr/kHuLs0VsXEQIQwBdT2Q9mT7qx5ArajygpYfZ7e6Z3PpsTSuPLnDEYPWmeLiHVo1L7RmbJfZdirL3fx1HnGPqWE6dod1ADxo52Fc4Q3T70wSUs2dHR6SwgIytxn+MCcJACVO5jiQeRh4iEiOBrqD+yItxeoiclPok8TtjNFvsDxiEoJ+qe6eIEhzYeqBlfNaQ918BnDlnM11mqaEmS4fVhNRDga3sqIleDSdMkYeF+7ezk1tCHYHQnG+32QBxw1ErDl2a5di6gTqgHBqRBQHYr9ibURm5hSqS22m2rI1iVeFIN4jFjGy2s0qdCohVM7ShAxlB0Eyn2QAtdo2qBE5ZFwclM20nso4jgFjiT+pqiyknIYkxTSPi9vuuHPv4HC9Xu/v74tV3hvV+daDXNb8q7/6X376J9/yVV/zElx4x0f/IK3reOXx9127/OgLXvHqr/lzX/7vf+LH7xyv/8LXPXpHevJvvO05P/XJO6vFC43EFJIoDOQk0zTd/+n777nn7r/73d/3p//cGyXzk9eP1utpkEVOo1Vn6dqW7oLGtkAIXZhZDHZna9QuaywVYLKmkRnk0y32Pk4UUbTA21es2WoBlAjaKWQNQryFMTLIHcwelmVmxixFpyCnxKFUVSeeXXOwQwBpaSbsqztau6GycwY5XMkbmtrMYgsii9TiYiOfzM+3Asjc+HyhAE8AM5EpG0CmWpihCs786U8/cOPajQsXb3WnYlpKyQEgYN289LI8uExPLJDSUKsNe6PQtJo2//HH8vHV9S0j8iC2LhefPT7zuVprnSoRiTQLcIalJC0r78TSYIjQkIgojwvLVkqx0KgjMg0ZRPJYDHiL/eHyF70/Cc8AZy7WUptIcFl0KpNO0K2ZRBc0I2bO1HSF2itcVUs1t1orrFGezKxlX+3K6QJyp6iUzUxA4U4fFUGt1al7RITLLNrMkzsUs50qNYOzyOnJyZ7IIGmCmnsiFuZaaxjgzHnR1bpwTxy4xIzMjcQSMWtOUTnlCHPNrEoYANQBZA43WGfmASmGkDZXkcxR7AvIpaEBpe9OvG+p3d3Q6gxnYpOUEiWRzn+Ppah104xtLuQtwjAy+pxThzSatP54tpCaXw/vKGLsdBLzpHez2RTmJn9IIbW2E9oc7l47/aa66YyWYvbmi9wSj/fme67gWgPYx5mxzp9FRokabyqSq3Rvy/kLzr7ffjObPBI/gWYqMxHVWpMQGVXcjOcKEp7DyMKLkCIBx1kK1K4I4OqmqpQSYunLlDnPCbhtmDopK3OOwxMrc+9Wm+iTGzMLgGmcAWZItyK3HQ5bjXahVW0Bim8ovLj4WoqYzFVF/CBXL2UjucFgox9VDjBaomanpKrq1d1L/EV1i7FEZpGUEGUzEakStlceEq0kNPs47QZZ6Yo3/W1qjx/MiVgZ0R7NvHYgfCR5wQut2zvZKtrwuoBRF+CstZJ2FH2nG8xlWfyaN+gcuyiiakR59GkdEnj9tfH1ej0MQx7yptTWtdR67ty5k5MTEdnL51a22jsYjo78Lb/0X37+l/7t//gfv/XH/+QXT48fXbqGC2eR6t7Zs2m6evlTjx5+1fPr97/oD45s75vf9cd/7WPXgcIipRa4MFniPA4jOZ586tI0nX7T//Ln3/RXvu1pd9197eh00soiOY0gKFVDhQmoTw7mewiIcHWLe7J9oRyUJAiBBI4g64HA36lOdl/A+TT+kfs2s87cEZ1DvCZBNAr7c2ZWN3ZwYvabdtIcDrXuVm17Wojmy0jEpm1Z1mI1MTFTKezwxA42M3Ils8CoujujP2VC5HvBVmdqhl4CULO0GCMCGExECtwTPv6xTwS9FnCRXKuKSJmmcVxc+5yn8jvPeeI1lGsC0bWc7ltfvbdc2RzUqnlJWKEMz/sMvvWCbhpE2RA8Pg8ZjRYzZ7jP/PJOhZmRmfqgN8TtRYaogAeWqIHiiUA4orYTiNrjsL5uMzOY10gx8YG8HbI6wbx6WG7U7WSOOicq4n4UN97qwi7S3aJtChqYV1chDjUkq2psiUVEduEec/IzOFuXYybE7zjcCEFmFfdBxJ1rFHLCVtvayj0EObYzz5RSW7IQ5ZxjshesxwDWqnvgbNfrtcCYOVymiaiVGF2OgDsWunUh3eYh7oa7k7n2cx+P0AhwJw20tkUrZ4oUM9LAJTE7QvU8CqTGR8LNvWzwoefXLG51oO/adKt57ICI5pK19ZGlIenRaT/VTR3cRxLb0Gl9JNiaTmbmIEMXrVFHS6cDzv+7yENcuVn3c40K2pUAJ1gvw82NzIlb5MU8MGAO/+PWy3bNJmbO45DR5cmjBClh+mQy5GaT7G6w+XSGweU82GSQkXvTPe+xmijE4tOODdFc9ARKAEymDdsySJpT0VwJRaSYydkeQgGuDjeH19i8BruD4k7CKPiA3EHUvbClAJznDouLB8rMARpfLBahIwpgSCMzN0UX9kBszafCe/dctIafabtmV4U7MHKOwELxdsZ03RvsLt6R+Vd8/bgh8eExRKaAhQjPIWM+rk2zPjWX8jjnbR7DJCLmTkwBbEGQVUyhFpDyOdOjl+CANXs+GICUm5R65C106zOAMouApvWKufFodCrjOB7u7a/X6+Pp2v7+/pnD4Tu+97tPrt24+2lnvvlbX/2Sz/7iH/2hXzw9mS7m5Up1unrl+tWH/9wtv/Ktn3PpN688+y/+tzOWT9x1GBLYoSaUmdJiWJSyunz5ic952Uu+87v+5mte+/Kjo/Wjl66KiDslYFws1+u1C1zYNDRdotAkuIfosfWXfs5q7ACBrK3eAaiDzE21eh2Wi7n6t51hw9wVzCk28IPcQ1ngg2zHcSj+aItmMbKqlXMC4HWm33vQTmNm5h1PMP9coPU82nMqzQ1A8IbBIINR20tNExEh3SQ/4EzSy1Dv298/EujcXYjULWC2DzzwEDMnlvWmAJ5ZSlEwl4XoC072/8OzBpMbw7TkVKwsyuqPrS8tUjmdeCle/HTId5x50cuGw2zd+Mu1gsh3KhgiMqKAysYBgzBvJnbUqYAp5wxzs5paxtkW2XNGQ1U0oBy4tkRA7jGQn2eIFnp87g4nEnUDhTKQ9B/tiKU4mq5o+xlV53aXiJKBUrBgrTESY7YuEAbF21jcTNsCzHsy28038cZGlFezYIOUAH+PWdUklgdutZQ0DjSkTDyvnAMfKxB319hINsSWJ7THHFcs3a9QUoqaRUt1bpzT6LGi5WoZXd26bE1Uc1HROHeXkAaabVkkJkvYGZk2OVbaPqF2AjlMNoHIxDvEqjmoKZB6/hCOwsYpyUhtWVi6jmicIJ9pQkTcVwMkje6pbrWG0PfNqx0JVr/ND4WZA15fa2XZEm/mX7Yjrz3fhvji3iwQtiUz2J223W3E0Li92tcNs8QENYiyWidmzH+RG1pdQIjx1sxYIyI4uqwfejR3IoK5MIdLGBESMXXGze6rHnqcRF5mZSv3UgqMg6UTICNVrX08ED83ZCZD48bc1KoaHDLPTna/OJjYmgxkvEbZJYq8mClJpwCMkluAa4JXrVwwVTWtbXLf5gSRRLGDoY1JIACwhCRHSJG7R0WGZqZjpqREfzSOpK5qm1KK/tgdzs1rnSCJEubhU59ymwVtn0SEd/YpMXziJFG5CxppMhplzBK7O/MPACw0SJ4rACISoaJGTMwSAp8AyHy52FPVRBZiZLQj45CHtHdwIBv59V/+2bf+xk/dd+/rz9+yOHNw93/9D5/+uZ977y3Li7Way+rOg8W/ef2TLzy/+sFPv/zdeMPTn/fQxz72kdX6eD1RSmkcl0y8tziz3tw4XV3/h//wu7/+L32dG1++dOKQxeG+lVpXG8fgk41pnEydEpGbmxPIHL59ja1sV+PzQYp0KCIsVGtlODNL928NlEZUndQhmXMBvc3E3MbU3Nil0Xb3rZbHNqy9R4lQUX2HSNIuxsxbkdCCs8R73Q/JHOh8e/0KRU3sFeQY4ICYQGNGqvHX3KzGHrCZAXV6ISXJXQOgDU0DuGAuwgLmlFdXTx9++OG9vT0ickT7mKCaUjp66eNgLN55UEHJ0+B6lOQrN+tXbE6v5+lcusALKaXU2+/ml7xgr+CY4L2P2i1fIh14oM1BjdhZa3C0oVrVjTTAPClxsUJM3HNwQ0G5NSRsVENNSICJKOeh1lrKhnuuCckRY3dY2yrP3ppu0e45duaCsdZPzUOhwd/MTIkigYNgXuOqkqQgg83F1xwvrA+duP8nAI7mhwx3chjCIzYWGwkcKkXeBTe2nSIRRdvkRA5Pqck5pdy0nVv+83aP5rObc2aiVZloZw0Tbjnc5LMZQOkgnVjLhgujV4/MZISYoaWUovdtPy6UqXd0AVt2EYa7mpE3+dZIJNJXBd3Ftse1Zhpm88Wj22JHJvOti2Qbc8U5hjAxibRso6rCzGkr7DyPs+YHlFnCEKKWWidj5iFnNEovsZADQm0pWFW5q6850/zVY3RG1iIOEe1iRuZec46zoe2MPvmULouRbo4vQIO2tUTVd6UAGpEmhKKCPFctPMWCsR7s6BaHwLEfCQhVA1oHqiB8y/s7UGuNvDpNU8QFWSyYmYqGJQkAyaloNH/Mwrn3ygIyayMQ7FScJJyJmEhn3FxDyvTVace1BuRpU0sOtzi0AE1EQhxDXgDaKf/zBKg9UGooBO5qcZq3ZG53dUCtwszXNfym5uzr7gEs8KoppUB18ZCZuSmQq1Wd0jgwKFRU3T3ya0gNci+BI1IToG6BtAg64/zoc07aN+u7RQAR6VSFExGnFidARAznjotsKIudppCZq9t6PQ3DUGvJQzqzv/+BD3/k+7/zR55x7/vOHdx4+FOPveD5L3/s8q//zI+/o7rIgZ5sHv/6z73n777kQ8cTfc3PP/ODK9nbe+dycXDPM+7mNJyc3njyicen9WZvTy499eCNo2v//F/8k2/6pj/z0ENX1lM5OLylFoUagw4PD09PTzdFAZYhM2fyOlfYkQZBZBpmBu0mbKe4wqlrH86mrnEYat/0A1D1cBcDEzTel4xWyhQ3GDnNg4S2fowck4gEWs1gVqPPDkJBrW1biy78IsxOqJuyGxwie2vfv4TWRLyGZF5rVXNXY/YOemEQc2IKE5RazTWQvRB2IHOOj5rFGqPJiVeIma0qzEl4HPNDDz3y4IMP5pxVy5ByqDhE3emvuiKXxsPHDq+NcnaNkzS9cEOv06uboSaX62njaX/YDPufed/T73nWaW0kPWnK2g5YCH4FGCosooNmAHD4rESIMFPVEq9Y7UpK3d83pLIYCoucAGdAKcKSi3CtG3LMCgHbCiDoYaHJHbEWBEfDbjIBDHMnZxECWa+54wil7SQzCm3rhUz0mtYUkeY/07T4OxoY/UUtEffCtdhdQ+gypXixTYiIJQu7J05edfIKJslJRMLMlZgINE2TJJYkCNUttIKxLeeIUkrTNDmae6uAgts6aZs8R9U/x33MQhYOMxvyEJ8cd8vMNEyZchP/66aPTcNLQk5FtXaNXwI5e91USsJNLjVEngOlNUehjj/HTWAoYprLmkQs8/CwWYZ4tD61m+ySIzfKXXjVs/cxdCCVWixwTHViIhFJIl7dqk7uYTFGREkSM1Pf3NRa1TQOIvf9DYAwTATmyS8FNDjquxB5mPPE3Ap4X5LFkwqPoN1Ji7sz4GmO1/3TOQTuiBQsEpNzn9Fx7ilQV8wECMi2H7hd96IvFJhZe2EuXbMzrpm670FEw+oWGzsNy1VYU1Lp7fiMkggtmhCeJHUrmmIAENN59yQEEgLDbZ6+lFLMLC9GnQqlQHMrMw0pq6qup5xz9OVxQ7R7bM9ZtmdfCT1nT1F4NwyXuwuJM9d1uWmlCoCplCJtcaxODJC6ulZyUJJsbGabzSae+1xiEhFAItxm+H0HaWYOD9yfwkMjNuRplTz3Z4rt8Dm2HrnWOsjQZLhAGrI5HcJpVZlZoU3OOrGBBCIimzItl0sRfvMv/NIv/NxPy8Vf/uo/+7q6vvbLv/helFe/7lC/5dvl6uMnD1+7cduhf+0L3vfWJ2759l8/e7Sy5fL65ctXl8uzz33+fUPew5N89jmHDz/06RtHV7/oy177Td/wv7zyFa944P4j0GJc7K82axERTcOQNrWkcdhM65xFyMinJAPA6tXNQmQ0yOnuIG367egSbyKi8FKn+HbDMKCrc5iqiAzDYO6qk6qDda50VWed9uQxgNOoR33eAc+1b8T3MACOyK5qnAQQq5O7B70jSre8QwBpgIwolPvApl1/qSIyDoMrlI0E1W2qJRyyRSRoc0ScjJiEmRt4ofk3s3XxanY4M5LUWjPgTMXUYUvGk9evnqxO9/b23N1MtVp8kpnWl13ff8+tfnjh/HVdy7GP/IYb6zuWj08TJ9/Pg8hUbiyWh5/7mtPieZp0ECISFu+TAweqaZ3KkPK8QTcLAC1BUPs0DkDsTKc6ASTEfdjQ9KtTSqQFXdIAsbkzd1jKKf6ZdvhB6Pu4fhg6IpWoaiEwhW6YOzvYCYCkJF0ZiYgSw92QOPriQJexyIBJMwty2yYS0MTlRYpbWO5kxN8CDSHR03g9AIKnb2ZKIGEGiSK+GFg1hNOYwSDHYhjLelOx3l/uAYTqtRZmjqwWLoGzWps5lsOImPo4peXo5gLeE5mmSdVkyJykTiUugDzkTlraGMz78ETiH3gxAvCqUynjOEaFEjPblJL23J9DaVndmTLEFxLKvW7mzJO1Hf5uckLDGQJMxLEb4HCpiyBbvfY8zcJMOc1vde7IuMiEUQ81RIC5wZk5BbEx1kVMiDEmEYCcMzp1x8lcdZbiiYpwGBpzupSyqzgRh+vmvl+YuZjCuz5qgPOZwERqiaj2zj7O4mq1yuMgWXoNxGaWupdieBiYWSQwc1N4NmAyESEzYa5Rh1ZVkDBbVXU1xyziQyTqxRiJ2NUUgYyw3PGD8XATE8PJTVJer9fMHExxBqWw+bLaU3gD+pFwdWNKZhoWRmbKzKGZhUSxlCUDyNm9lOLAyEx9ADA3tZvNJhEnp6qacq6qsdonFtlfaKnuOyAydG8GokS82WxoyOpVTUVkALQUY8K85nAytcX+XkAjt/mX2d0D/UsEckUrbin6k8qQnHlzkyROnRoKTLKY+d64cKailZkHYrc2ElF4MYWqEAtIAE0IEGmFK4NIxMHaFvzVqzaftLDhassGMhfCpIVzdjW1ukEdPYMXTnaQ0zjSP/jH//pFL7z7r3z7X7hsz3/skj3jOdefecv9X3zt/3zuRTs83LODgy/cPHEw4K0P7339zy+9LrA8XZ0YD3Vaby5durTanG4mtmJ33v7MH/zHf/lPfOVrV6e4dn2Vh9GIi9boQqpPXjURA0xprGYTGRG5VAEJiUvzKYusaKZOxEDgfufaJboF6TWNhsyoGnPToWDmcczxt8xdFkPcKOp9thvI3GYdFcKmFnePxjGzcOZSzN0ZVKsiQd0lRM8ocR8Xxv022KaWnLPkFJD9UkoSiZ0OutFhHKeoccM9bsbB1cjotVmysES8ARlSzuYttg2RO9xV44aEDStcjYXg5B6WusIaqjNV3cWzqWDhm/uOzvzgM2mqTGWzOPMiv3rfhUfr0ZLdVTKJrlcsL773js++b0yLDZ0WCyyiJmIQFVMwyZAXqAar1ZmTSI7+xN1LNSICuVV1d4EQ0cg5XnndQklSbDJTSsSuqtYhJlGs05AYxHAiKHl1J3MxTszupPCYybl76HBZCbg73EvoZofebynl9PQ05zwMg7s34SRvznEYUiaiUksKdkoUsyLkwV9FVXUCk7hbdROn6paUQa7U5yYNAcg5Z6ulmTQxEdjcq2k1zZKYGdb6JHf3BsElajPSJjrNAUtrntQU8+FoyKLDdHbuTjIRiMJhkKjBo0KVxNypc5pn9yTqP25uBVoX0ne0MeuYZ6eQUI1xAqx3eHPApc4raMu2na0Y9V+zMGn862az8Xk0bToXVsw8SJpzITqzTVUDCB0/y4hm9hd2VvJEBKLctMIZcaSqVY8Nljg3mfJ42bRTmEA2S0j2NOzu3rTIt1OQtvo269vrnZ+eWWAO8hkie5NMbm/043QlClcUn2sRavzqEBJqIBF2JlBkqVqrQEgQEt4CkpScqZrWUrEDcmFJSXLay/GvHkzxEGnlREQuflPCTsm2km2Y66F5Ugr0xQoRO7GwMHEUZ/3Jxg+KlfY8qoqVj3e4e91MQiwpzedBgvgLyiyr01M180rRTtVSN149LL1jPW8dZhKlw81UE+5VlHbi8ZwnWu+OmxY6c0UfBxJqOlDI2ELYGawUE2wAYx7iEcScYO4txAGHkZo5jIrVuQzt99ENDk7CKkxaZTGMm7LKwpn3Bl5DaTNdHcbDZR7/3vf+4HOff+df+LqvevLx46feffiP//bfrOVd/+GN+ZaT1TCV88P1TOoDHjxK58f6ZffmN38KStUsyTSsV4+urj2+GO+5fvLIZ7zwRf/tp3/szNnlY4/dUFBKydyYMOYUYzABQU3Joxli52JqapNO8wsYty7tYAJ0JkD0lYfsML5sdtaat8Vqc7YWEYGE5QscTggZQQXIYrTarJDix81D/uBtRnvAhmaAbaE67pDtBhIxLqJOLWMeJMX4dFNLzDXjNLh7KINoJ/LFlrDNihwuoUdr1v9AyEyycFjn2LYKYc4NPzsX9GWqtVqt1ayacxzpWRV5eukRsvM7zms5PZXFnaurX5OvjcdrU6m+ttGXEE18/mWv2rvl3FQLc8oha6Vaw+YzMt6moEdU72oQEg1cCgI9c2qRPObUjWzZIa5m1SyscVhEEnWJfm+PcnO6ymBhhrmn5stG1nIFzdC5bsvLuXvPMC2GBRHhGHA31VLK/NakGJeFMAd1L8ycUq0VTjGWc2/rBReGbZGrPeEjpNYwk8ollPfa+C4KqEDZxFIRjKBAzJ3WMAwOi53fPDmZA5+7GzDnVKcu9dujYVOL7mPkyJfeo2cs5wJOJSzzynPOWO7kDM7J3INWmzz0kiz6j/gWk1avLjlFd25oQxjdycRzhLWb14e2wyXlHcRME5PqpptzoE/E3j/H+wzKzChJI/QTqE1qez7ozyXaqXAQbUzwqPAa+NpUp1kJK74C9eCy3qwclnNGh0RVN3dLM520I9KYSFhiz6o7dNJEDGbfCfrzr/nYRR5rxU2UIzs+RfM/UGdfxEYnxu2qmoiFJDiWqmrtYpITp9SXati66jLzMAxt2NsVUuZo2FQ75lzblJwZoWm/84dbfKM2pkbTvaJCyszO2+pK4Tzn3dlRY6c/LlVJ4MHqdw9FwCFn6y+8NwFCQG1gmUxT7Nc7ZtDMMouR8U1ZrpmS9ju+61TTIXLwyKM+T1l2IAWB7VRVIoSfnBE5Q0sREXRQbs6ZLCT72R1MlJjbX2Ewe63tKFrkEm1a4urKgmKa8rher8dFFrjW4pSvH0+333G4t5Qf/Ec/+vx7n/Ht3/ZVH/vY6uve+KUffP/vpZGffZiepuXcheXe5jTDFWwkF5Z6rONXPvPKT3/iQpkq7Bg5j3k5LIbLV979hi/+6u//P38wjYsnnrzC+UwkPKC9jEIQyeSkvQSjJEI8EDnZVEqxMsMaVDVU2YWkF/AwbNfAIRxLFG3LPNLfUga258e8ulXTxJJiW0kIWlG8c7SzI/QuVOLExdy9RtAIqIGrNbiAN6zcHJOr2rxaMrONl8DnLxaLMMLz/osDFV+jS/P5VAR+nXPiDuE229GBcJm/Y/u57ARkTkZW3WLtnIgTcVlvqk7uydScGEhCSRLVV1zja8k+OYxMCf75duNZ5dJJnVajpimZKNXBLpy/7fVvqENWXbOShA8Lb+UrArWqM4Bx9nPTqFJ3EGo7W5L2KnX3Ww/FISCqndSZ1d6xPothyQ7T0gwzkjhIPNQu4FZ9p8tqryGTVoWDvJe9wJiHw71992YI2zgM7q5qgUZm5lqsmoavpRefHZBIOFg07t37sInM9GfQTwzvgEqAti1r3YBwEpnNOlpJKLEfbfaKcXRshz3Sbhy3Ozwnj1jPzLd4TnjtVejlJIRjOY2c6Y/gqgAiqNb52Xh0lszYaRQCHWdmZKaqdXaOCw3Irr+PDoid78Ac6LdvoPtsPtguPXxudzJQ/E0jGKFxD+LQlLbYJqK2RgLInFzmO0AdIufuyG1rSESUJWD7HEDl3hTOVU60+NXNahERpjYV6JDMnV+Nse8QNm3DqHb34kbtlA5zGwFA0PRM0myk1glIM5BtvgHUJgo78Sx0NhK7k1kDhcY9DQaz7Jh0zR+kcCsbxSweT/PVxgKPd7Zl6Ix29xgateKG+0a5hdcY9wAOnumStMPeRn+g1ZThslMeMXNmcXPr8PJaaxIRafB4SYmS+DTFx3IThOeoLqP7aojrrri5fTLRRPWvuhuF26vhcMJW/GH37xJxSKQJu7u5i7a9kjTFvnYD5+aglgoEtpcjIIAJQpmy755wtdhADQO0gjmVumZxrQaEvArddc/B5Qc2f/+Hf+ALvuTVX/knXnvtqdOf+q8/9sEPfPCWWy4O2e679fj69enW0YnpibUcY3l+wJ6c7CW/czjeK0v1ZbHJ6joP505OT9/017/z+7//+566oqcnmuWcg0RSrdP8HEWkGuXuC65wqDpzOI8tU9psNm3DJwwwgi1a1To9VGcsj4abtYc/Wxs/9Ez/R24ygES8sVrcnSUw5wIO0OwcyuZw1G44SIYE4/BcsU5t8kCHzTzd/qLlYZgj56zRVt1stY49znZdWmo1a5zjPsNjB3XPkt2xHFMDVYSUUKauaAv3GhevKWdhCTVYuItgsRiC1ODuwbEnGCDTy6/kd104nNKV5C+yJ/+4PrXarNbDuK9eBhKm1UqXL3vl/vOft16VxGJQjiYkJsxVg1ZBzpD+/qJHkv66zRcPIHTuAHDTyTDr1MRe6AT7dJuDw5GQg3XJSSQxNfpim8jGNBAeAP74fDMNzc4o+szM1EHkHYwZF5Z2ckOptVELzIyTuCFIXg3XxxTNF8+piztIzxE4TOvy4tt8Q83qXEA9cxMU2mertB3puOu2QtkWIH0DamYMNu+KTl1xu9cv1uO1oy9fnSlCCfc/Nrf/nIT6eWVGbD+ts0GY2YgibQOAMHf29xyqonltOn9mcSLnlDZfzByX54wSHwhgbuuiT53Tf+TCZhbWUYlGUZa2pTIAmh2f2OcEMMMrND7QiYg8mLU7Y17pdMz4WSGKJiJ5SK1LrrE/b8ew9QcQ96ZlV93M4NZu1/ywGiNrRqnAzZSoi4xqB/rz9iu3P0mBMoNHiRD3JMau/Zn2HE9GqG7S202NfABSM7ftPMDdKYmIOIx0SwjevdvL5ZJmHlr/X06NCjXXcHMwbW+1eSQeoqZIM4dL7JR3s163EaznPO5iq7E4dve6mTabTeEi4yAi681m5BFMbkgS1lisO7znqFl7DVHNbipDdyuM3SpwvjAnWHfa2ClDiYhCozR+P+ac/QDLnFTaqYtXUppon4V+WMSkXogwN8Vy7hPast4kGROxsjBzmaoSSPyOM+nf/buf/sff/z2nx4/d/8kP/73/93c89tClCcOFO8+N0Gkle4ejyBOnq7LcY+XslYWUISS8XC4vX36caS8d7umpnpSjTb3mZTo9xtHVaRylrmvFtFgsMrcmj0iAVEoxKITD3dLMqqmBZhtKI6gqOkTO3WXWggXCDC7etbgDu9omu88Cto0b8TtLyrVWrcWSSU4QdmaDp9puuHUZh/45LCIxkHN3K5WSaKjCNXFWoE8EEfY2fZI842qZ2Url0M4sbUcw5ExEtc9C4jdTg/vFa95+IeZbsTnKmZmJuVvU9PmK2jRNIY/YfjRhf39/GAYgBAMIsFKKpDK95Pr+//WM6zrtbcqX4niP6xFGcV1rljGL6snZs/d86VfQkNIxbPAmAO9ucIWLJCbyMJbtmr4WsKm5y+oyrn3G0848M2KjFxlBRAQUpnwxoqxuXC1itYiEnQa1OjMFUYBBwf+MImxXyonN4Rq4OMlMzE0wSrjGKoaZmZPkFL1wZonRUDxv6dwy5yZjNNfv8Q/SRTmYWECxvW0/vqpZG84Yw4Mc1heoMcYKjWLemdDGvw5IUVz0XEWRVDa1zNnxpvjST1wsC2PxDqYu9L8tJA0e5h3uTsJNgMO20Wc+7nOcJaIS717j4nsHIrJZI5ijDzHILLEgb9eN2wPRg+BuNJyj8CyMFb9ahb4FWnY9BN0yXNGpH/NzySzF1Hd6jjZyn9pNjKvst0KNtwCcuVoKnGrUlLGDbylTW9XZbk6oIhJA205rvmPo/Jl29VsqhyPSauBTrJ0E7LQIu+VLhBVwb4IjusyMQ2lnIHyE2id4KKoxBabZzMxDJzwRM3mwA+cH4e7Tar02FxEZMnc5z5QSgUAM2a6T44Fyn/XFRakZkc/Tkfl8zp8/DEMUanF35kbWokDvf2ze5kaHnVIqdVIzZtkF3G4DOoJ0Hpe0BQqgzxu8c8qre621rW/6Q4Q7i8zVtvtWGw8dt0/ebJu9d8xxkVGO55yZuNYahHXM92SWJu2nov1EYUpCauxEbEoVLl48sTtjHA/+3nf/6x/4Z3/r4q3lzLlbf+mX/+Pe3vLC+TtFN5XAlJOkdz1m+689uHZyfGaYbh1N8wYGy8sNhk+uDr/sq/7MmVsOfO/S44+986knjk5u8JNH77l+bbXcHzZ6XARUrJbNuFiQe4Uxi2ooaSvvoDQAuHmFW6k557hj2p0BOaCmf6QQmWd75rbDvgOA1Hfk0oSAWj0HzyxIsV+FVSU1Ek5JAsQ054z5cTOoTmWzWS/ykFNa9yLSAXKob9NneG5an6ygl0EApmkaJWEn4s2xNILA/L2qh5MhWibuXypawyiq5oAFIDu1LifgJiF83mp3cE4551IqYCJZ3WrV6QXHvrTFBy4srH65X30JX79UJk9MOB7ksGJcb3z/FS8+99KXUylC49oUQdSiKHipwhIzCTuTW0UX6vEGPFUzQ68Y3B3Y1qbqZOYEbkvAqhqAmD7KRkw4LE60MzNLajSNOZFbfKYzCGlHdc49Bd/EK8yUlZljP0Mi4XcXnUnyjt0NSuKYsvXxlLs7gRrMJDXKo0/sHGLRDZ8DdyBzb3m7pFX7nhog/pDZi60ep9DUide897jo0CFvo0vvHGByJxaK5aP1ZaG1hwwBwZzmw0QANxnIuJUtu3sD74YzDHof09Kowsxm17lWxXDUPhaWhVEKwFxrJQmSGeKrUdftq6WhSXfD5XwWd/Nxu6rURLexk54BSBfEaL9pzqB0cziev6C7wzGLN0Xeap+jFssFNQu7I6L4Wze9gbFS6nOt7dicHeaqqkLDnP7ZGxADQJUuMTHTSwgAwo6EqKk4M5E7YM4psMdhggDsZO7dYwMA5upO4K3BZRMB2FpvRg1UqQ15oh2f08/8Uaqa0lZ5xcxUG0E7iYTgdtwEr1pKWRwOm1qD6TRnNY4adqqp87wD39S27AbcrAYVP33+cMRwfpbmFgKTmwfYsgdfrNdrMl8uFierdXXLidaljOPIO77O/89iZb7Om6I/YERJkntzlmTvFeoOsiwSTHxBVaVizgHxRjT3tdX4sFKIKAy8rVQw58VYtbhDmystSBsuhZhD3E37xRARkgio+DRtypiXTATn82eX//VnfvGf/tu/+ZJXHWzWuPbo4bnzd+wd7B+cvf2hh+8f00I5IZ186nL9/dPnfsHtTzz8xCO3HdpyoKdOdTWlYVj88LvseV/w9HtffOv1cvW+V7yAaHV67cyrX/odeQ/Xj09YlAepG/NSggQSxEVAExNC8cZ6KQmKTqXJtoSyW85BYjStFGnYsUU5mIcXC88pfKfonENcL8IAIgYVWHgIkDmKuirU2AJyse0K5qcpcAHG1DDz8XoOw+BVtQfd3TMwZ/E51KQhZx5CAp8oXiJS99DMiQQc0cE6n0K6EnUmsVlUDgBRLaXt2vqPJYugjZRSQjKzGts6s3G5EEnr9TpncVcGO2zzyiOc8ua95xZsd5qaTfskxVVLvp59H1wWi6e/7gvywb6fTuoTu6BhaFoAUzfb0g6zu1r3CoMkZqE2uwqElhAZM9jhTLXfxiw5zrO7iggVjel1SyPsgYJmB2eVnJrgsyNck4kkksAMF3UKyVcnojyk1JdxkdfqVIKAF48mEUHNwtvcwkI2Hr82U2MDPBiowrGyNlerCmdOWR1E5uay9dXaUUkkYnPmpvagVs0sCYiZnNqyamdjNweF+df8B0jatF3dVC0WdU4IPxJW7x2ZKTkBKToVb15y8WYxowQniDnCq+SWSMpU53ozxr/BBLAdiYOGYwRSStXUzIRYCVariITG9aR1HMcmizgP2HtfFWPe3dj3R96Z+UVtYIeeaZjIyDisNPsvswbccKBqjT/vMzQ5QjDcBZAE8KQa02AWpK4kR0TzOjClFOwsMxNJOTajAVMPgf5+xXGh1hVN5+ufC4IGUSeCtexVHZDmgNT/HMJ7KlapRGRM84zLrOnwoX/ZeO4xkDBsv6dHrRJjusxmFpQD2pEMC7LsPOGI1X6cMGbOw7DZbOJljsJuOYyqJRqIYFhFTTOkDCY1U1P1tppxwKxBxnYfJfV+Hb2bmXRa5MHd19UjJDDYzIkQNM+Ukji0VFIfRLTPCclc5tY2tVC4RUHvaJJ779Ww4zJi/Uq0zye8Y6niM+cOOBrgnVlmr2gN3Cclc+4PTz2wNwY1QLMEjbfRSyNB9MubbMWyyOO4Or0hhqcuX333Ox+98sQTX/GnXnb/hz999ZE0jLD1eHL9+qZOA2XgpFo6PDy4euOJH/nDW287s3zWwfqTTz2xd7A4WFzcnJYbL37TJ3/25z71S7/4K7+8QjmVvP85r3rxd/zv33fLxWce3biRiJKe19UpxmZsambCDHhOyWql3pu2r00UFV4YrYPaVq5Bf4mmaRrHkYikfzGP2qg3LbvcJKhpH43OUTHuz+BUzQ0W43oDqlvRkmULL9p+TquHiAIe293PrGUKBxN30ibc1Gq1diVNvle1xedYEnF/NdA012jmH/M2c1e3hIZvkpQiVsxxvp2BeZMY0l3E1tnwA6eVqqrmnJMM8d6VugmhMH3Fjfzuw/2TTR3SP8fyd/3Cl9XVnayU9s4lN+J04dZzn/Vidl1Xl1SIxsRZde01jpYw04wligFfwHh3mpMAa851KqDWKicElKTdpRRKyXUbtOcXWVgSsWmFmpGKACSuRvDMogEtauyaCEfC7BPUt2hzSiRDSkRYDDnsjgCYaqqlhPGzEZxDnsxjx2FVHc3Q0LWCmEXq5GQ+DIMRQhvXqiLYxIQtGEGkmMbbKMNgpepUFymBoEVda+hytctzRBXGRETcAE3kWkM1Rtw9GcipmI55CK3z5ETqDK91QylZZnUlJrYuNyLsMfMUbuvPJBJtq5s4ORhd8iGuJM6KldqiW08wQuxq6Oi4SL2xaojuQbWyc6AZA/cfibbpsxNxEnCjphFT4hQ1NQMViJl583FyTFrNPOXMoFqrkfE4sPDpZhMlZ8QCIQI1bHl1kyQDtzCqtYmkg4hj92C+ZLYoqGsQuT3nHEsH6i2sWWkdktvJRlNKDmdmdSezJl7D5AQRoQovZRwGZ5qHmT3WCJFVt5QYTqpKKZsTtM7j3BDXFSfv6oNM7EzMKfA+TETmBBg3nJq7o6h6dZZaK8aBHFY1syjcXFNiVYvmWFXhzkzqRl2ymJs0Y+sClZH2FlqVQu3WNBxznJ1IQh0lgdw8PEwmGHnQpBKMVRVt5RwVRc+FBCaOvcAM2DGzzNmoeckl50yiMGVnEfGG11KgWs0HSwDURYgSc1igoeuQzwMVIso7dVBx9W7qYGYSIrBgZgGMrZV35G7uOhVKWSRLN9HjxRASeOwwZmcRSq4O8ig13B2JBeJVy2qN5TB6w8h6FksMhRiUqjkIrJMC4ODhsIyLw2GwG1eOxOxf/PCPDqN8+1/+hvW0ye9+7sc+fv/5i3tIN2T/ItZP1I0Qcyk2DLy3WL7kxa+4/tSNb/jJ61/64ue/5o4XfPxd70i33/74rV94+a0nhxfTgvYf/tR07dq0PLv32j/2Z+6885lPXrnGJO5A3qjo3rgsTXul9fvulZnJKElSdm26SpUMAyVL4gDt8BQinEKaInrq3gMAJq1hxwlTFqa2N22BxbRWasDJxJKoOcNLLJsdLR3CAQQv3/pKzqy7fwS8v0zc1pxBGiTSGpET8K5+ysQ0BBvVXaeigEQbxKJu1ri/W+fZOQYyCOZuUVc1AiqZkbOVqO9pkQdTNRYzy0ylTjAPzJc7uAELM5m5UXIR1icuX1+dHmdJtZRE7IxJV+UVR3v/5q6azctGPf9aqQ/ccvEF0/q51y/dm4ZxdXp8z93Lp99TJuSRsMbK14fjmWEYpk0xYpEE90bNIaAq0Q75Tau5D8MgCLlJh7pHmRsu1oyQM3GOUqNGx5Vjv0sunBDTIzcjZSRVtak4a6DQg7vrMcyVHM1ab3ZBkMCnh9uAAkXVgaLaNkEa4VHybs7nkKgmgyHwkN6nqdqk9lsRZ+4ACYl2Qb44FrXWmQAXENNaa2hEzIhTVdVJox2k1ptugbKELkvbmlHvxTUFWyPUmhROzNAYOOyIyzcE9E1IVOz8mrP+PJzxvv0NRsoumG3+u0FsKWisJGr9IsUOf676c87eMZbeYTjMHG9RS1HN56ehQYJVddMlBiKOaOYbbDablNI4jq17ti797FuxazOr1kdJXa3U+/4bfSoV0wioxWI/buYc0IPFOPsQQ40ASQkOV2v3p3fJgQidtIYY9+4URKt6U+Mmamx3dmrInbnGj4YDfboAQDs4KLMArS+coXZz9+yMlJg5lINqtcigLSExx5hK2tZNlYe8KVMpZRzHuXFkZkVzH+KmFsJNoMqnMAKZPQGpT0fiu1OXFHX3sOClQHwEH19N3QCqWueDHbzE6LAXKVPfm0pQqxtOp33TMjVpFyZxkBuqG1HfuTsaj6sBkrcnp/W+3uTTtSoROXl1AyyIjwZrvhDu7N58COMOzyLe8WkeYIQOhwQ1O0MQizBR7RLxwYohSSwMUyaJ0DOOOcKFmQ1Dvnrlxg98/z997gsOLl64/TWv+Zw773ja7//eh9//vt//5KcfHBZlfXpjdR37y+ODxbNX0w2F7o0Xrl6/8oRd/7Y/++2/8daflTP7P/P2T7z91guPPXHbU+/4g3tfurn82MnmaDh7VjY1P+czP+Pvfe/f//zXvPLyU6fjsJhf4XEcve9HrPN11KzUmnNWNWdiNKufLkjwPwkdmDWk1Gqn54aDRZTdDUVMHLoWcSerW5Q7MK8W80SX6IRujksAFouFdx5w/BLinHPRKhR2XdSBI+xq0iNnqBt5H3iOKQPIYSBhZlW1N6bkOxCNPqOOk1zM5uhnDX8X/nvhTdBaDjMj7qi60DRGBjHgmhgABUsBwkRDSr/7tt+5fPnSrbfdAg/moNpnFT+r8jv7PG0mMnM6TMOlE3vA/NfSufNF0tX1N7/8c+XcudXR6dJ445rJix6HN50ZYBqAwWbtRQh0etzGCkf4oqZ+n7mJVMfW3Fqzu52TtfFYOBHCqltMNfqmoRFNravIhUVe5qSqppWIsgikmXD3u7sFCrh7wF+qacwzhtiI9DzE2AJqWHLDaEQGjcScUnLXKBlg5k4KNAEWbi6Zc/MX3V5rEOGY+U5wSuJhpdBtNDrOqy9uGarm8Ibzh5coz/uEDcCMDg9ZM28QOw5nEsVNTiONQ2otSzOzEfEOZDE6lTmTBbYZAc3tgkHwNkzOOc82fHOqiIKjtNK1BWsyD7/r+Otw7IgmzFEuLrqVDESUiDFku3nhp11Dipm5M08ULoATgudKO95wZhbC7u4ekWXnKIQjztaehcwh7IScpVZnONTIGjhcOJUyoW+esFOuWe/4Y9I13xNjdLhPY5KxOzFnaTTB+VC6bdMbADerbhHo5wzn2MKXnBuR2hJTgJCDUUccQVN2CNkQYmu0KBHBtoagmEZyTlUrd0l9RzNKsqkEmAWhXO8ONKm/ltvmiIy2/kcgFqOeiEfQaf6+A+yPbxQzmRgIJycymHkok/ccG74jTAE36wg7atZTYEeZr203lNP2KcQBZu6ny5smFkDoJJnws9MdLUzuLgItATB70ARimBd240SpEQdqc9CKXbtybLXUNA15vig3SymVUiTZt37bn/+p//zjj91//KKX4IN/8J4HP/3Q2972m5euPrS3uOU5zz73uZ9/8Fu//MQH33d9eW5Bi3U5vTKMtnd2+RM//R9e/9rPv+XiM3/gt/+Psn5M0nD+3D0veMGFb/1rT/+tX7py7ZFn3PfZL/0rf+1bzp277fFLN4Y8MnNon3XWkLOwdAUoo2ZWPd9VZqaG06X43d3UGIcx0hL6sHr+r9uv2UeazhxTa6MW0xvrzr3CzUyndiRiscM7OhLtwKjFQB9Ehgp4O/kUSKvmmxLAjziQ2q0volmiJN3pdqebakv69kO5ky3nCjjQNN4bmKo14jnIAkqr7kwNMoLEQx6JYSC1lt5AIFdJ1C1Z5OOf+PSwFIO6VWI3s/rKY0zE71569Zy82gSpZTrh6sNi+ciNq694xR974zd9qx2vRMQMJcvhMJJyMeXQs1QdyFNKlBLAzjqL+Vg3/mJmNfPmOdslJTpGek4l/T7E4D2MwrlW9ZlxQ+Q3q/ZWt2Lqm+o+hXYHNbBeDV6teuRg6hMxapKsgflpQY/TZrOJtz3Mkdxb5RXZFz2hujtggAQKZuZ9dj5rfCBijmcE3dEHjoNV0eZzqhrbUO6gFTOLRrbn0Z1uuIu/Gzxx0w3vl0Rm5uwtVEM4MYGhpqYzdC3O8dxc7Bq3Wfse7T1srW28av0Let9Ms4O4kfDmAx3vFXWolJlJFgDoDvaZZeCtdUG8c+hws/m1pJvZIMycUwrgVXwLAYXGLzlExPqBaKfBmkPLfElzctWboRn9SihKInfHDFXrfyDqIS82U2jinqPHlGAmRB6IysA6ewd98ckMIo4RDFp1aeRtbt/rPACAIIp67wOD1Dem3H1OrNSpUxUp1rFqFBuvLnBBRNr1Bb2PQ+baVq2wkJkHhzKOHzNb9zSdb6CwgGBapmmKwkK6FIOZoY+yd6NzBHQEBCkeYq+EpNNvWt88v2zVnBB0ICdQV9tmaz5xKXGthjmCSpJeVRA5mbcNSANd7zzgqqH/CgCBloDP9JhozYkxP3d3Z4d2rGxkYgHZDtCXnIRD2aXpM5hbBYkTd7HunDO5uXtggc3JqpZS1hFhctPuPzzYm45XGD547kK9fnT2p/7/P/vQQx86f/7Wu255NhFfewqg9C1/6/yH3zv+t5986PjqbWcO946nKycnJ17tHW/7Hxdu+ejZw0F4EN5j0K/89/tNb3nJKy68/akrX/SGLz08uHjlqZPFuDSzaZoWi0UxlZ35CvVRR5wiIkxWAAzcVPHjuU52E0m6v5ut4o8qMODQtUu4Mwlj+xzcPYBpPk+eumOpdSRte1xubAi1wjneppT+b/b+Pdj2NbsKw8aY81tr73Pu7du3u+lWowcICUkIJCTEQ+ERnoEqkJBDrBhsbCo4wYBtgisVJ+WquFIuUsGJHedRJYKJnTi4cAxFyggMMZJsCDKWkQ3IQlJLLanV6m61+v2499xz9l6/75szf4w5v7VOK/k3f2l3173n7rP3Wuv3PeZjzDHHDAsAntCz+PlEVtikfMmQQWzHgBsZgMw8Hh5zrgvmJgToa8+1XTfNdfutO/ctZJA0ODLjOC5+GjaGXK+lmUuAKNx9IWOtlXEaJ1ymDxwr0nA639/fjY9+4hM/8IM/9Pa3vz0zL2tKVe345jfHDzyN58ej0TjMzus43MfpHu985ytzvvErvvIdrz3h58Jzxhh+t5gP82RPl+eMMNXjHx8zc9zdrwy3U9w4HbNhZoMW60IiCUe7kpcnl6A4Ou2q5BprPGetp9E4Gr+Uq0GRRdaKtXIfDKC8jZQ3MpOE3lA9BV4KVKErrGKDhESidxFmtmXldzbt7isrdoJyqM0RyIhYwnkUfN2SO/as42gi0pwTNyYyM+dagm3NrFnQrQWfi6CyiYXYeTl+7lfS3AKQnrDiA/U7cS9nJytfsAdjvMR9sD2fWMY0cqd9q+rLZV7tthR3OimniSxuMG744WyDp0tYZ8XMo+LYGoBT0XeFXrsaVAIR/SG3Tdl3hjtXljtxY3OkIiOyI/Ca+FoVgdW7sGIR1NhLj+pDGD4i4rKmn4azxExuaSZ7xfbpr0BqrjSaenDNXIOqc6mdVKb8Okyiod1tSm6jt3qvRM61GiMZtMwryej2Lt2am2yFZ7TV8x7vuA98zcUjMWqqkr6utHAOktSYTloiFV86G+yZc2VUCrWfYgMANzTD/TmLE8BExFo9ChMwJyKDGblWhr7Lyqs3JYc1VKf7yG8sf9PFs4bolZiX6jU3UNg2GbUs/dT1Km4QF7SJJMHiAbn7leu3YpjNnJE5SKOtkqrl8JGIJ6f7u4dzZhpHBJ6e7s42vv/vf+e733v3o+/74Hd95//2Xe96z1d8+dd/xde+EQ/23/zDN5P8v/+5z77znU9//z/72v/y3/jl3/G//tBHPvDK+ek7nr/5xhl44xPHRz70AS6cn7yNZ7x2/9TtxV/9Cz/6/d/zRf/Gv/Vnv+FX/8pPvjGfuB/HoQEPESEIaoxxmYfaefcZ0/1lXk9ONkvgdNNukLvLqL/qO2atW6V7V1t/K2Kls60OhbzJRhTO6Md2ZlwfI2M1cGVgkJVsLezDqaQQEKUob7/2J2RjPK5tjKVa1X4WufaF66PhhjxIwrLm+qU4AaL+pcBNmNvp5AzbeVomFjQ0BKfkistaKx7z+ZvPnt7ff/LheOWVV9ZauaYPn7/++fk/fJ3M5evuYQ6cn8PCnDOZp1ff8e4f+c6//v/+jb/9V//eb/X11kM+2Iw1Tkc+1wChk3kMe3x8PI5jiRLq189vJbKQEXPbkylgychwybvhJsDa14fkTEHE1eJ/+1dLMGYfpNPpZJZia94GOlejhMzoTj4Rr+biDGtSpwL8OlJzzrWOW4rKth06NDcwYCXjm/yxB3fw5a/ri+R1d0+n0630BAVaVUBRKXhE6CLNY2XmsFLzUSunKrIO7rXLm6xdibj12LvtovQD+yPxBvlhz88p46vo0qqpzvvrdDrd39/vUGOttfDSI+vDm9n5fJZ1fpyHJuLtAIqk93XeG6Y3un4nS7lm224mvDvw0H5O4Y6qs7d1o/2wuJkEsiWTUiHPzVLrOxGBZDYxTS8ip3X7+bUOoitrmrqSpx21YA9cOuYOvzZ/rVr7b0R/6uC6qaKsnm8dyMuae2uAG38QVePsELWzhxthzn2G5YCGn+7O97f2sSyU+0ai0hjEJqaRvKxZAw0bIDGzYb5VKnUdzj5O5nU40covG1e/OW/1wTIjoloqh6caOjJzz4jMBNJM/cuxH6QWpkqzGGP41TXUMd6VsH4dWCKOmdJI75FEtyup+xgt13A1xMM1UNn66/ZBzMy80LPb12RXbeKYCtPnvNyf/XRaH/rwD338kz/w1/7ST33v33zlldfe8y/+yX/ha7/2V2cef+Cffe/b3vEpO813/IK3f+6z+Wf+9Ed+/Afv/2d/6pd91Tc8++xnP+k8v3j26Tc/80ZcjjHGwov1aI+PLz71mTe/4Vd/w1/+K9/1Lb/vW956eLw7P2jBnzx5QkCkK+UAo2uWmTWjV+sz/OQ2AtxXZt10c9xeyS+4WduS9GsuRerskgpJs7qtW2i2jI8m2q4oQqXZuPnKFvHeplXkj62J0Yk6IuLs11/0G7m9WEtp4jwOZYqa2CWnpZMPVEJmZoyMY67LEcdkpEM4dEhxYowTYRnYMpYPD88FJpnZ3TjV1UiEc2bA7XS6A8AZX/NLfsnv/Z3/necvns11QazT6XT8ohf5nunf9yRyYq5nvt4cl9M5gIcXePHxT37MPvnWvJzv7MmTE54M2HC7eyXvnj6mtK7iiLWQp/s7O421DvTQs6sLaBu7d7DvDm6v5Bd4XwA3M32vtjSi3vH2FmTnkO4nac2W0buZFydlOGNRndB8qX1+Rhuyqy6EvtQGw5FyltFzLveHVk4mDZi0KwzoNzpb7n4chxoH11oOCMLdZ+XKQ3Enr2lQV3uXzPeZJxsOdc0OFyvLaqIC1o0U37Z0t0Grvn/EWhEjdSUIAeJdD9j50F59/H8r82wEO3gNjafFKkQw1aiX9tLngXFqvu0Xxicyc/v1q8HN3QkemsVtmtPdJSvDfq6rd+n32r6kP3amuNBbmSSxqhKg8jmy8PMgqUHOImeeTufYslC0Zcqjw06DwWx1X9lr7yx/jAFjZA732UMOBi161BKHuZrQKWmu6mRIe8lx7o6nmSEQIrYUhru7X148jDAOp3oqyBUxQBqP5nDGKqgKgLjcas9fa+1pgLMHnu9jIAu4r5ms3pzzRNJokSWqQM5YmSEmQTUrR2pQB9u67ePHSBLZpI/Lccw5709nGx5uIBnwhRVhp9pB92paswrqNzBQZ0CWeB/O61lljdi5HuPdN8L6/SB26uXDVtbdP9+dgOvRYuW+VVCvwnAVjwHSaXY2jcwZN81LPs655hhDZI77JwN4/le+8y//jb/6H/3QD/9Y5iv/xB/8733Dr/p1P/ljH/rb//l3v/HZT/2m3/b6n/yf/qH/23d8+H0f+E9fffLOIP+v3/G+Pxy//o/9yW/+03/qr3/iJ17/yl/6yz/9Mx+Jcff8+cOZr4/EJz7+sT/0h//4/+Jf+1dgpw999NP345VBh9lxHGrcjwjR0zd7sQwO947ctNK5+XCdNGRi7HD8JbitkOcbm+6gjs3+DpbGdg2ogcL0pqmCLrv2hL747Fp+EpwhQi9U+1eUSbPzPSrNSMtIpGdEzMfHStyLKtVISWrGecRxuaAxVYUjty5Hp0WeqpbopnG5qoBmfj4nrGM+AsjH1FRZl8pHpoHmPjOYjMCxptmYucbpDszzCcRBi9PZXvzG5wi88o/esU7BtTIQa4UFja/ev8LT0/HK659/x/3lK9/7wvKNh4fXnr7D0h4m4HfEzExNa1V5CMBQetm9GJUo6vZ1INUewffTvex6X+LmtLBdfStiuQu6T/EE9uWy5nkA1zrgnFMTmWhZ3J5MDgfB4fCKqTOz533egIckafAosPFyuay1/HxClwNJSpWpxsJ4jdrVK6wMN8+uk4lku+2dIUnu0GnXRcraVYnb1K56Ot2Tj+ty5FxzXUTNBbDWOplLbtjdsTEQNla5M/i4WlI92rocIiDR1R9ssn5YUbJzN+AqeRVHxQ47Xkar9p7pZF8eH0VtW12E3tFAZi7COoeT51PkIfKNBh2czAjMtSTeWR1KO2rIcpwb+dkX5rZsv0N4ufBoxg1ZhSo3M3cNvc/MXDFOJwLHDCDWnEFq4xreVHy3LNsRiu60hcnmkv4c3Y7jOI8TWlinoshcGmGYrb4Lgyc0bnLLTOpxjlib3LRPpX5gznm5XFQ2BhIr3N06hZURMWWO5JZUMzMJQu0EMVRauz/P0AC16u4ddmLXeMyMqLJxRDgdAKMS3GJ7QRtTx+PkV/VBa2aKfL+iln52rsvxuOJ8dzeZHH5qhCCiQEjQI7COl+rr+58hcvJaX0CmjQhxrbTp5lVd1tZLgkTHqY6lpRm3tloba2MW7pKqdAKan1rvUl7+pTSCDc/uzSIlXGGf+dTH/+Zf+Mt/8f/x7+LhbR/60Kf+tf/NH/llX/s7fuwnfuT/8B3/OvnxcZ5/8c9/8F/6V7/8d//j7x5/9Vd98IMfOZ8ZtD//77z/n/8TX/dP/9Gv+jN/6mc+8tFXTsw5/cn9q6+99o6f+vCP/5/+9//uH/4j3/7hn372/PE4Pzk/HC8eg7by7u7uchwRcX9/P9fKo1r8RePwnhOlstcY50Qe65KZp9NJ3aRmdrneLyMHq6Ab0h3zlWVpumsWrWh0VCdFEfhLs/AmD9NEk+3Vsza0LQwwNhv0BqXIm3V2d6ASBrbcVa9/16SHzwgD7u7vzUQmSXfPls98KbknjWDjf208I0nQARrHseYqQM7XWjT4sHWsy+WSmeZuwDqmlcw74b7ox7w8xGH34/F442mOFeutZ5fj1z3zH77H55nBRw32PA9Dnpg5X9jgGvfr2d3dHP48eH76cExyDYD3w6fPyDHGzHj++DBoJx+RpVeobiKSjDyaHL4JHwooMjKJ0zjtn0QhQGWs3EmrLpW9a6tnKmdaVuAaSaxjmekHDt4UIDTKU/VckEjFRpTJJekVsxrMHMWlqtb+OWe6m8HcaOCyK00GNHLJCw4/nU62Esjsqok6ya5CoPs1sxVYCD8NfaIoKQWLrLIuI8lwt5mx1uGnYcOP48AKTtELyYU5L0qjH2IaOTTNY62aZeSuuzFjBjUJIm04wWCaicdZYp4qmmsnzj5qduO6ipUoTbwK3gJpTmgeakXPaa6ZYmOMY8455/39vbkdxwHkyf1AWaWVaYbM2GK81bEKqOs/Ee4u8HmEaUA3Ota6OIDQ1ZFZn8dhp+FEkIvm5iHBWOIxcMrEXHTbCZ+ZISJG49iJyMgrhZjwU0TMtWCs2c+ZnOv+dM6RDw8PmTkjzufzk6f3D3M6G9RVO8sBAy5qapbWhDI2PwNIzHlR9HrXYSnhPmKFXUFd9ybcDc/M2SOkSGpa2pXv7Yxcx0wFnrg/OZyRsYKkRgmcWPrJ5Z4Tc86FPD05+/JAzpjmLpCfMyxSw5T0YQYktRGBPPlgorqz3IKZKyxyOozMFQMmZ6x5UyNzGZxGHQDXVOl4MiyGR4Tg5vU4j1hjjBy2iFKXXNPcz25rrXRDA6cqJVhkxkHn7EFD+jrd3RtZvLnhMCeQKxx5zMlM6gwgIxFOzfgdZiKixDHpDnNoHpTEF0vRD2jmtmLdAFZbcwAhN+IWmbnmoKWfFo7H52/8pb/wp/+L7/qp4/mr8+HFP/HP/JO/5Ku++UD8/b/3fc8/++HXX331dHf/0z9xfNdf/sy3/YFXP/Tht97//ocnT17hyucvfub/8n/+zB/9l3/xt/3Bt/37f+YTfHqmDb87fejDP/En/uS//Pt+/7d/4KfeIH2ccr54MewkhGEdERHn81kp4Oq5EUGgLRKtBnAdcWTmSM655nzcLZQn2JorbYm1K0HgyNT4ry161ZrkXGtZJoefWozWCQ5XM3QUiQ9mwnYrRyhEoTJtc46lrgWzze0MIkknsGJFOAwr3MvMHsfhY2y0RoocERiAuvOzOhVd8IRf61R10WTWaBYqWdLWWitXRoB5OjJJMcLINLMZayLcxkqY+2kDh0YDc67DfNgxIk/mPPvpdPrgT/7QevY53L+Sv+b543/3+eO3PeMn7fN/4kNPvvOV00+cnz+uV+4G1vPF07rL+8vp+OhPfvXX/bqvee97DqaUSe4Ou5xoR4QPZnjCE5qlTfBILJpnkv7kyQnGiOXmwieUmVQ5YB0Rgch57o/dZfzTyQDMdYin5SnQT0y3HMchUgvIzZg7+VAEFiG+ZOVF1b44J82EPUDNDsBY6YGgxuKsgcrTXwpj3R0ac5Xq5UMQucfXtJBQgdKRJ9pSJ1lDrztg13E8jmO3qyq9y07eOx15qRiuzxTdb+7uSp2jmrHW5TIzM09+d3c3L8daSyLP4pKoaFd9nNbqwXNFhJ2Hq76uS/By9oAbulYHPlFpKJtLDVT8qJGvN0lAjZ1w34h9dlyMTOXaK1auVhoRSMteruEuqcu16kWqB98B5IrjOHDSmEvEXDSjypYRrpoHqBa3ORfcfNTcaZPmzm0KpYdvXRh0+kKWqngx4LqUoIGmqnBYDw/JgIElomJ28uG9a9KRTBQkmZmMtUC1MmaPTK6VZFXObmbxEkanb32ZvSP156YySodEbb/Zunobk606BYGbwXAakw6xE41MnOi44ShFj5eA6ohiWAyPatBsc8MrJ5F5nT+x8z+Z1rQiZWWmBtidfKxc5/s7JE1tfu4adXLyERG3rJyaHbOWoeB0NlxpTbTZqcw2AXuhjqOHLXq3S7XImqih2ROvIyIdEWF01YkLlyECMR/WGMNOQ4O79Cy3T5ovk9uddxEvBsbPfuwffubzP/t3vvfhbn3xwc980zf/2q/95d/44Z/+yI+87x9993f9x6+//s758OJ89rGe/p3v/tiXf8UX/ZZv4X/9X//sT//wl9298sbwVz77ifk3/tIn/sAfe/INv/atH/7+u1ff9fDs2eO/9D/+V/74n/wX3njjTcWQDy8u7lxZFJmjHcycMzK3nrMMnXdioMt1lupLH7y4KV25V7kuM0vIpcaxJcQIpFKlVFltWxtTZCsVbr8yNKPrxyTXOoTs6CdXZs4FLBhjrQlmpchFcdC4NjsN0hlXIot3052AE/0GAE2OM7Mjlnq+5WxyvdQgsJ83JM2TCVWLfaiP/+RDIOgYw8m5lpmdxzjiQCZoJy/2T73FWIm7GeRxz6Dd483P4TOf+uxXfN1rn/69n3rz21/kKXEPAut3Pnvrt7/19E89ffq95+PF4QP2xNLiFA+v5uXbvuVbn37xOz7xqc/e2Rlux/BT8DEO5YpOU0dEX66E0cBqEtM/Akr3Xa35KXAojQmvvc6N3vdJLo28ZKlJN0KfIyMz1lJYQxIrVuQY57r16q4eV52APOYecmNmSoPDuES3Jyxt1LDV6wg2CRhY7j3LJG03a1Zwd1NIiChpFaAGY+yHcRfDtwa63rpYCeRuY5Gd7G9npm4HRGktlo2QJY2lTBHMI3NYpchbdHeM8TiPhcwISSFu1PE4prhtPYaItyjuF9iR+lQ7RQbgzEyCkLBGY3QUAaF1MKS5KLD0bpzu7u5GQA0bmrtJ0GFWdqFCGXejkVZdXLZ7zubMzOr0IJG55owbqggly5bASu2B1ouFWyMzsZJNMdiLnLd0TSlzXTsmriB2iOIZAeBeiBaQmWrUkXS0BHSORmfAugzJ6tAuk13lBnWfV3dHBCuj7U9XTsUMWXV3CWK4u0wempRYO8V6he19tVCyUtGjujpuKD5kRCysyBg0ZjihStgO2vYtjQjOFQYgo9oKO16RH8L1S5AJtgfVtMoqPZhoPDMWaRhF+zKzBCPj1GfPbqZlk7SmmMnY1gcLSRIbX24KiNa9AWDyrcScepcU/1LLSNVDI6TxAeByWTlUf6xDMnNFzJOd07p5uqMTc8vrHNaK7fTZJt+4P7/22Y//2Pve95+89ex4e75+8meffvMzX/nVv+zHf/KDv/hLv+xvffffeO2V8cr56UffeHZ3h3GKN9/6+L/3b//dP/7ar/hDf+xr/nf/6k+/+Px77l/9/KtvP/3IDzz76R/98t/yO/0Hv/+jD8++6D/8i//2b/jNv/4jH3l2Ot2dTiMiXnnllcvlwWzMo4ArTd+QVoZbVRO0nrMZbSQ1ZELn6nw+3/qzPXcSV5Zoid6or8F6trRC5Ig4YjGrrcvM4GaBE73y3K68WEvWADKOQePIgqCwIldWctK4IwAxgMYYpEQX6vKexhlZAgA5c2ZgFaaqcqP3+8pLczhWjbnbFSvlTiywShQbCyKQl2MGYS3t7qSBay64FXM2xVmpgzoTPmY+Ph13j5d8Y744f+wT3/MH/ujxk2/7mj/31f8gHu3Zu44E4l3B+7Sf9bf+52+98oOn1/OL3vzM595m959/441/+o/+D/7YH/+DwXc8u8wnd/d0i+fHY6xYsDtX7pmMpc5aBbXjRA2mkwq+G4MR4fCV10lWu9bangTlCFYJgpI81tSYvzQC5nQyYVzHVDKgEGzQwmDk5XI5n4e5kT3ppNycwW2AspMky5sbzcYsjkgPR8zbL3X6Vgfqtb5Ibz73zu1YJ2lmDHeSs+XN8kY/a1/XrCgPBsh2Z16Vm/TmVjrpIDm67F9e0KTpEiRP5worLnMdK+TRFRycTqfLmvf390pDc3PVQGaefFTNrCtYMyMj1eGqCDBubMoYvnCTUjXBIVezWnr0uso4aaS538QT2WH1JljuQppZ9cUWhLjMUvU3tjZH/Zj+4JrZGEHy7u4OHQ8JF2LJFSm9y1hXGs6Oiqj2Ps1XvMnY0L451lVhOLuGjRXoILG4LTfkEbbHvkaUqpQjEwHQCIq3SVyOR7OiBe7QR1ayoi5eTyO67r4jP5I032sixol2rQZzlsCNqT980MWEWt0VRrKabmsrAYDWImjkZc0MKJVfuM5+ST1Nk8VwG6JthFyiCy+xPHQXpCNpkF+fsZyxlmWx5U11kTSsMIC0EFe5dCvTaYwU+imNTpILy2F79Nv2vvpUyre8qG28Ei9pZrbZ+70OsTKwwsJXTE5wuIZ9M410qR3uA5ytscNI1Q5nYxI6Cecn/vnPf/ZHf/yvfu4zHzgujHc8/+TPvPG1v/Hrn/yiV58E/5P/7D/6THzk9ddf+ezDJ89f4o843I/7dzz97Buf+g++832//3/4rm/81vPf+o8/fv/6q6+87e14M/7uP/rE7/rH3vOOX+bf/u3f/u5f/kX/1U/86MrAcwJ088Bca2qa8owSJ7AHoeJp7pFhNM2bgfbHykzdVQZcMd9OhqILau5GXrkFIQ15GkQH2oB8aHR3BzIrakpgBV64Fgma2xMRWAuEmTWuUC2hmTWSeYtUX9YSTRU3bVTo8z90+Fv0OI0ZYaVgVbIhmcnEjMWd+67rlnH4ELSmCJ6EZDzWopkUbCLifD6fxkjkFO1hrlwNUqqp7+QPlzDnWm/cnd7+Ux/6vu//gX/voz/1xk/8rk/G58d8ZeJN4E3wGfKcy5ctf/yWF2//8dfmxy8PD8/HU3v3r3rvzz556zOf/uR4RpAPXE9RM1LjOd1PiVQzZ+7GXBbAjj2yk93R69U9laEyX/FtUhr1reGIHYjc6BIKPpO4tt3GpFuEoFGusga8ktpkfoHYStQffetDmRkrdN6oKc4/+bHLvqv7Jdx9JAXR6MrNKAnG1Wk1WY1JUodQVCh/Ka98HIeDcLvm1j2jMDN37mI29nGKCA0qKdOgeKDZm9HDhkkmQvpTj8d0DbQxP5/Pc04B9LppJ7sybnDDTro5vjgkquJlwrZavX5M4zZLyjFr/ZOGm3eBxmQqRzd6TxbSezk45xR1NrpTkLttw4CZl8vF3f1czdOWNTD4Fhup3Se08e6uWuYOkE+nU4/TWFBHLHWp+AWPrCvKnlFaJtUqeK9vWuFvZUFGyTryZnhD6a50cnz7LpbQPLPrayaVQmEDLZQ3NGuO0uYka0lJPhwX/Uwnf6nGGH3429mC9WiRUVJB3EtXIU6zl5VYqByeVH8XdmPG4zwEQFUzW6pZxVfEZU0hVCdzuAmDNdysZPcw7H+eaFNatccCQLdcwRUJddjH3d0dUPo4+vzo5D6zo4qIkdxE8b1rkRV13TrgTbyoG30cMNLHjgJJWuLAEl+m9y4va855OWHQEpbuJ9jZ7dx3oeJpvZ0c8O70yExNw9yH7cnT+w9+4Eff//4//cM//YPf8dEfS+B8unvve3/hSnzop38qY2kiAgqfaGg9YM63v/3p295+/thHP395zNPdQLj5fMcv8Kf37331ldeViaALVeUCu0H85aYSoL0ddolAIVffZvbfZJNaAf6c3/6539Fr1E7UP1j/Q39DP05+wSsUI+eGdnvz2hXJ7493m1TdfN08xfW3N+kd6I/xc1ZjE4uwRX73R+J+HG1INr+wn/sKBb70irfPl+A1wcCnPvXxT33iU8ex8pdM3iPvkokRlpnTEwk8km+QHzNAPAP7yq/8pfdPnqwZbH+k0CGKUXl99peWRYkxOuK4WfC8/ffLpSLuPOzlhcV1d+uVc2emWWu1l+W6AL2w+9f2G+oMZOb/8bf++1/82i+2cap45TaY0qsS1X9tPVkwbicUGU0KI52pDHNvgIIv8bPNpF3WIcb+q8wcJeOZt8UMWglI7aRnJ0DYilQVfUfW4Ewvf+A2M9I4bESEiNl5Aw3u9qy94HbDjuYOhXg16BUE9K5cbd/LzmY/4A5s90OxI6JiS0YNcTI3EmtORmbcjMuOEP4crYJ2zdvEQmdaSzHPCCelXhT9pPrAZoa1juPA6WTNfGZHhWYW3ZWUXf+riLuZrtmOTdOQjLWh3IMXM7gW/SWkdK8zdJRYCWJLT2ILzeu/b2nbe0fkIzOvZ2P/jLzLWmsPTdonSh9AiAVubVjmKo1IsUljVzEBi5hOUx+d3uIcUJBhu/lYLTo9K8kUgUdETQ25AQO6jr4PhrsvhOX1VqYRwYxVQho61ekAlHCjnKtU/OoM5LEASjyekZlLNxFd/L7e4pdXrCCzm4HwEFtH87OZkaF6NsmTnWIduWbGjDwZj2Vr+Ol8vi81p26EBeAoLohe82R+O9k6jsunP/sPf/rD7/+S1971S//Gl3/ssw///W//p37FL/mmD3z4g//B9/255298+u50yoVxNjN7842HcUck1vJ5vPUrv/l3/p7f96X/+Uf+2t/+rhdv/wWvXC7zyatP/rl/8Xf/5v/WH372bPqZx4Gze0bMtbYku51srlVDgbIL8HLwJGdkxCpXjcomAWUzY4zqsXFn5GpBWY3yhHQOVAwCXNYpU39aERISMncYRYM6FSsqjZ632e96aXxkEPrbjC6K5YLVUUxCfe3rdgh6FhevLLztUDMxl2qiLJZLGTRjJdNCPrLrRNo7aXpnW4OMwmOTFhkqmct8ZV4rXKVc1O0nQuOr8DPnsdaTJ29/43PP/s1//X91+cn3/ebf9tv/3jd9z6df/9mH9zxH8u7zZ38Sbzy55AvYz/rpu87nv3l/RN4/Ob/15uOv/ad+wx/+I//8em5255d5nGc+nIgV4+6cK6qE15okEVFR6jDEOhV6B2XAmY3Hmwkk0BQ7NBKGngHlYiwCbkP04IrRSXMnTbMs0eP4zudTWfvLccxJsoZz7ItPh6ByeTGaGe/H0y++/2JVMDXuYKyVWmtYGInOcuTg89qQfgXcrDs1Z8Q6akx3s+qv+1rtv6WrN3T4bi3vToA61xVVialwbF0bNDMrrTEbpN5oCYdIxPl83qY8MzUNAijSkHwbh6tl0Ttu2i9bVikrwy6tzkz5fiSYG1vomhC+kJNVKbINTUJBF5OwIqXvjz0wEbkCNCLdeByTt1laO6GThsMfEyr8NHEJSJh5w5B1aSM4TAVmFZ/GyYiTxdwLrseRkpaDIE1FlK4YCUiXhBC7vHoyj3HVhApSKjBBqGok/jGbArZQNYWcCyQgdCZ1Y9K4btIXfaqN/WoXtgWXLVDZ28zUx6z+rjlna+gybo5cXUiWqv42Q0AyckYojBaRSh2BTNC4Wehx1KlT+3ufgbwcBwB3lTmU+Fpqiqz0he0l13vr8ySWLj84a5fTzsMS8p/rWHubtmLXF1Q9VgSNSEtkocpgGmMV2oybr6xORM9MusifoQ5Xa5hKua9uXSj1TDPjzHXMS+JCuwB3eUzm3WLASotjH+w2f1d/XyYgEsiPffKzn33j++aciVd/0bse/dmX/aJ3fM14620f+vsf/OyPf+7tr94P2uVxreER+YTn9eyUOCyPU7z6D77nb7/ntfd80zfd/8h3z/Pje77uG37Ft3zL7/5Nv/m3nU9vf7z7PPMVMrlnl1Vxok6UawF77sX1/M9rRX8HtXocHdpt3zbSU8FKEw/NGXMdc51OJzdbKu0LvJGKQPfd6eJUObmE4q/xbt7EmvFzErByD63wgVZo8duEj5l97g/A0EzPjaMmEUcZN/Md+lsioqqEWwpJvyhzvQ1FGnVBTla7fK0Gik7RzeLufmWWGJHh65T2eP/q+d/68//Oj//dn/ryr/myL3316/7W3/6e/Cc5Tqf1+nz27kcqVf4Zw9vSvtcuH3043T3FZyM+/+x93/u9b//n/idf9qVf+4k33rx7dfjKi+VCnlBaimyVuhYeMQcfY8Y87k9nbZl0sNdKP99lJkNZgdhnw8wOzGowWS1d52ZmcwZQ+ZvyPTuNMmidXO2DR3K8fj6OozikEWkU+ddBaZ4IsdOmRIQzQvpR0qUQf2+PhwyNNZwLo7LznSaqc+42J8tIQZQr4v7Jk7UWsrgGOrw9xQHQ69wUF2kEbFUHRWSmQPbz+U77GgjLq57RVaiyuGpnR0aEQMtrh0mnzoMc7tm/q7TW3Y8IjVTqPK89cewpH1fMJ7uGl43T1pCA1vh7CSAyp1muuWNVMxOnCZnHDTdVXP1tW927S+FyMTONyMhMulrF8vHxEbN4ZGe6yIrutntPzYxjxFw2CMrXhCXoHqOKuLf3PDOPy+HuGqqIqC4T9oBYWWoNRdY5uxwHlfsiaKYZfIoAlG8mrglrShZ3FYNfipg0o53IyLSXZHX7C12/PJnb6URZk1FDQbaQlozjWlXKyhtlXZLqTo8WmYDOFmFkrMjGUQljNUHpvIeZOS0thg2DB5YZ6+CVfGvpHO2VrJPc3NF8ORfXD4lhezJ344w15zEzznS4ieU1GnrZ0i5lgvvkVzx3Gnm1mCByRawI1li8l7y1PthQ2dgrKDB5FgETRixoU9KI1mlySr/+OOZbHhg+M0+RM3OO02txIwem6bDo0QKZyRvHBuCNN38G8Tk6PvCTn/66r3/lbePXfcmXvecH/5v3ff/3/Z23v/q2nM9jzCAxT4h1d5czH8iMC8fIh7fiQx/89Df/pm/4lV/3zv/2b/k9/9i3ffvT1+/fems9PMy1Bjnv7lbmnbs/Xh6MOsg+53Q3I9ZauuLRfLG1lsa4Z/fe6M6TXFpKVLSNlttEpsydEKa5jpOd3H2civU6zLyirkSrm5Vvi7jM5ZJ0FkVjzjmnIU+n0/ATKh4oeUi6WUlxqSc7KLXKzETp6nM7vGFuzh5cfRrG9hbyT0mLWJ7JzaAEDGV7V+v4ZvezqF98tbRtaIqxlFeghj5xLxjIGkJj1NDDjJgZlm1Oj5XGNf2V18bf//s/8D3/6f8r8Ln3vPsbP/rJn3njr3zGftsFB/AC/roxbX067IX7D493/eSXzNcuX/KVX/313/irPvyB9yd+6m/+pf/n7/i2/9GXvee9l+Px4R73iweCM4aPRVSDDiGwIZNJDhuPx0UZ12XNO7s7jROLrLYBy+LDH8eRA9JXOJ3PAI5YuVZs/FpVbafRMzJi2WlsDFWc5lQLhayBV0IghYY554mmGeSn85nqJARsuGyOKsCDxt/1e/8sxeQEinKV1NgFfRglfkYr6f6CU2SztQbJlGgjVak1FB4BlAA26DNm0evAzHAiM0Cj9QzVFIa/oCbFLeoSBBAyvgg3y0g3I5girCLVwcgi/SZQtaDusell1W2RD8QVm4dC1KI3t+wQUJ/sWigQPB96oqrR9xsAAE3DyYlkT99TsiqlFqh7xnplhI8VYKs3qCrCzWvDbt5Gbz8BRPjWpkkuL9URRMrZSWAKliJ1Zd3Dygrp5j6MNoxjVMODmc3+YNaPFYAY89diSjMO0DXL6+5l4RI0y/4Z+Y0+DASbrmNEwgrgV0LQ6v+1mlV5iqqGlfqDzB52cHTzO19Q9wqt8f4UkTf/hcxWYovM4ksgMyIDcMlnq/81KoYgVuyHqvCrpLwREb1qAhRAsPj/GezZ2omMTEdNGS70RPoqQMSU3JbKe9o5AKtwmvYThBkj0kCgeswAYx+dffeATLIAsdR1jbUq0JxrQd8MjRm0FdM0q1O3v7o7gka3wuTnnBoMVUcLIKtfobcg8yp5zzodsR1cfTvNEjgj5ww16V3mBKGQ1CBsAY2M9CY3Qyqp2buq/AWKC9WHV43+4q6RVgetBzbrNmKfXrZQlUYQ8DTGMS8BmPE0xvFwCSvbYl0H1IwLJdrVSWlASuPAksoubChWXkGzlTnIlZFy+bAVaXoKC4NJn0/1nnnMoS502UezpeObGDQxy4KwyIlsmRHOSDciCArbiDKyZmosuS4kCDBimVlmjCYK2PUt9VMZWadauqUgvSqiiDoKasNoaD3CylYhzaim8u00QsOoxNBk9jwbkf0zLgFpg9DaHkJPwNsLXlNGWNTIlLY8CbexYpHCMqMLYfVbllDBrt1tGQ61/dYPluJgkmBaxCqnENdnBNLsmlL2NFFYVxyErkXfBAMCm7VXrzaASFgVCQo5S7SxNY0gA2amYB/CJLcn77utTO9pXcCr/8iMTGvDaqAZaneSYGYZCXRO2QJbXUlPVRIzhzHSOnpIZvqwy5waK6i92AmttUKG7A6tNDeuzhgQQFq6dCW3Rxrilhcnxb6QFZG5MyJCgVgmd+GfAKvbRIMvuiasNpUyQ/v4RHaFWpcetTlZDbs3LIA+HvIMofbWtMMC4Ej18O33EkhAQZ+eXk1TN4Lw18/CZk6a2gDtlsCxt+B6GXtP9zduS+FZ/839Ite/3BeAlcH1aYYuZVoKYN3bltSgtdv9wl5HfcCuMNdf34QtrFgnQ367qCV6o+wfFmX1hhhQvEBZEFMVLYG++ZHhzAgGTAErbKlxSUdVRvmqWlG/T0l+glILdB18kpJcKKYkEmnUULX2DalToZVIlTR7PZW73jq63Mh16MNATew7yiw3KIsfgHfzdBW7bzZZNHz9sw4ygETkCiRpKyJj+XACkSnMXyWAChFobXgTibA+7/36ESCxMiVfqifKRMygZDvr4BREwqqcuLyu2plRkY1VFrHXDJTIUu/Gy6EZkC1CSTUmFpW1D0SZ0ezmAmzToZ8tClPdo/1krHVHSiNV+UxkSvfAy8YZlNpiGU2mz1UFMhAeiAxahaCSj7O4OsTKtpNpKR8MFZQisiYblz3psx6Iki+re1nCAACtVBy6h8UwuOYCTKHyjqpA7ia+1KaiNlpYEpBmQ+1aAQggs0hRSOrKa3uYYHqxliITpAUCGlmNXb2vfcwAYZlXRyozotb4FUtr2Iz3sOrW7TnPN1F3CJ+TNVeETjWzCLfNDvMSWSaffStLaLZbECudBSC/JijCKjNmyRiC4Ep1lsqUWd/Hl6c9/PzXz3/9/NfPf/38189//fzX/3++BgAEwgBgRW7XXag0gJo4JNknGqFkUkBmqBAlWQ9ckYrOY7Ky0o6VBBxW+bV4eptmzAqiOq1SAKeYyaR/oHzaQBsRseYc1IfVIPMUmzMVNGYqcd1tAMJ6d1rG+i0AiCywPDuqgxBL84wGbTqPIqUmiduXUno5EUwYzOyamlg9WdUjEYChgIbs5G3ju7h2uFYilIJbIDjRAkug/4KTkyhMJQE1XpNzHVJqsmqZKw3m26S0YlFCCZzd0Ig6bIa9lOJev3YikPlzfqKw9c4nXko8GhypJCW7X4GIRGm25U2msV+ynr8zFmzEPrsd4OW3aainSvqbbQeE9eI2fxIJSUVquLQ+IlFlpkozN2JQiWEIUCpB0y94+luQipGJVoOOIJEnH8eaymIK7bHG5RNmlEori3hI6XUisWvuDfhydQqBSnuEfBg6b71qnYjsnwJRuZAGmHkgIibSgqjmcYRVha+oGJUxxRSYW9eG8OEA1gqhrLHCJGnJLc7VRKiuD0VhD0LuMzMbZqwSlxKOQrl0JpM0qqs1ZqlWGZoOpjS3XrIWv9FPlDlROirjU+ewtxYFxG1Ux4wZa8ail4r6sYJmyKVX28yuTjJ10oqEiKYC6MOoGbkRIAJXnbWs+i6IKD1pNVUDoiIr8VpI6adn4VsqcITyXUPCYOQEajoHbFsVFLZXKH2ycPiCZFW6D52V6CsR6yKJur5jNGQkyK6XEQm7WkO0JQA4Y0WkzgMCRmbV1ovDYHVduFDAopLmsuENOxcLU2WVqq0kJFlTCnaoDYgg6MOnlPyByFxzmblYV5Wv14vbrBOZlqzaGhoBrUJ+lT0EbajMpVxcNRenxZo6rFWvidCoWBGZhWfqz556ah5MGrG2AUwTZ8FqQCxFaREQj93ypCUu5pEGCPVsGp08k7KBemdCvqCNsshWyEwXGHDT6ImyGWxg5Op5qb+4uRWkzbkAjJODXD3krnHbxiUis4X8BBrq+y3WkAne/p/9/4jAjSZGkXdrJvkGPepPWUhgfX9bedvS0BsSjoiIKe6coDj5yqjXkUncjn+/9XWFsqKSOrWJ1OzCiGWYGap1UZymmtdUrE43S0uYWhxsewrWZjdgX16UsKySZl89cC/FS//frvzmo0mvU9FQ2f9dj2Rd8QyI+a+jEhEyA9GgWEb1ZsBQami7gvIyXNNRYtbpwUsl+dSlslpMWXlWeGN1onXRy2cW4FcbaPUkeROz0KQ4psisfgm7bN+02Zuv6PhJF2Z1hTnbCPUNq+cxFrndSSICa80VKzIyBUpCwuQAa8yoRveUidOGiBm6b9m10r3/cgOm1qTKPtL6jrHFBaRhnZr/UUUsRMnXRK5AqDbKTJhbRJzcLLHmXLHUBBOwWW+4xzcZrEbOQjVm6BZFWXyVWgtFzRoUUIdLJiTciz+yXlbD5l7PfRzqO7QbfZtEtgxOHR6VdOX8cONE5Wprd2vdy8D02gJkVoxR3EBZ3oUa9chApErutiIuc3azU7Va1AY2uNyPsDtfqU/CJhpkRWVyXSrc6qwhqlAhy5lV4lIcVVdun45IpkqVqDACVIMZIjKySx6dEpTQI0FNIpXJlZyidlOckj2GzAEP+ExLpNkyhDNUwE2dHAKeaVA3Ti264HFpzcqG5HB79dVX1wppHIq2eDoPGqpM2JnWGG5iqVbRvA8Ndmsqg1gGAJZV3NbV33Wctob7uvS5kfnW6pdrr78W4WBlwLgyV2S6rQTdqtdPwbvIJhkjIoFFeiYEXgc0BKZCN31dWUkKvasG0FPHI7ipH6iaBlBYuX5+X45EIGyD34rLRfYsR1b2KdsVqXkG5gZCEoxGGj0zFm4y7P5iMcNgHeYBWrWylaqG3Tzf9dOxpqoHmrykf++wVzWxCuuyjyT7wZIVT0UCdAkdRAXg15RHL8Yu/yJfrnWx9Q2rpKvH6Dp9yfmO81mJSahan5FEzJDIorpdA3LPpulxUIIHy8ho7Y06a1Y5HjsJv2kt3mev/5D9ECp+9gFlh1SL3YZbmQZJEa633+0lB1m6uy+pCggmwBe+8z6QN+sIGYjtYmBgbXp9DEOTIHh9Lcpm3jSH04hAMhAikcr1oZ1DaFVoCp+JAKVc1Qyj7Og2xWQ5In2zvQFzS8TCrE9/Q1xoSstV0sQMESDo7lutotGlyIp1b0K3L1ijzqs6om+f3JTClLx/xQMVclV7lfp6oCom4irdR0QywiFaewkAiMemW7ViSV4nsiroRJGGeu9St6WxkjZfomRW4J0iWNX2RFzWrBirIj9GrMiaioErk6OYl5WYteOoe9qKDtj10V7RxnL23cQ+//0K3vqq0eVC2ZJryLwPcB3lHXtm81Ep1gPdHBmx1Npdv6zE3ZBWUUiRXKuaLZyIqIrofp8IyaOKR1Rkj9toNFPyo9ig4o0kbfF+5IuNBvg4zTlNmoHR8E+9CMoAVg1etysBWiAy5HZnrK0PhSBlBMi4vhZc/JqEWGAoVDUaMwBrbI409qW/lJ/+zKfOp3MttTLz0nXPymwoVVoE0ksmJ6LGi5n6O5ROaid7c6kDvM0ZKuSh2gdsm0NRQcwOXdV2vqRmjSq61HdapaAogrLQRaYEI1cMM48IujOWImJFoxoe10c4s9tYK3Mo8lIRXyPFbDUAZeh7bzNTzlbCtWJjxtWiJ8227TZ0c+9ttsNSKJT7N/UcC3mwhiPLIZTjIujOHmmKHpyKNhNK/gLXO4Yr9snKA25XsLclGm6BiRNRl/bqgMRGqQOhjGkHX70w3DEly4cX8Nu0Br1uYrN8cx8KJywxDQEMMIDZyZTZQAaHr5jerVaXdUFv3vU5sfEzdsJx7dnXJ8wb5snthb59setXpQi9DO3HUYc80Y0u0G244gc6dFxi0ORGWgAweoxxfGH622u+s/c+bLfRfT1KB136VHkNgXdcX5tYFCptYNO7N0RRHzbC0BxvABUL2hToGleyXYVoRGRQGQIsJPCjBLD2gpWgE2heJXRfEoIT5wwbVwMQKop0gIHK3xDN3WyWTu9UsUyRxY6pYk223rWOpNXBrKYXkYzWitIslnEq1NHA1AkrPWrrbICYuSsAoslJJa4AiltClN7YqImZgUgqbWGauTDB2NumP9Dq5pJzTl16jVysd/w5Z/N6YiKvbrfBw67LNBSUtBKIrfgIVWpyxWpmhaLVmoiQWbWYKgI0oJME1LwhqZNUzKE/CYgexXsVMkhTTc66CQ1gmEyrpnleI19IliXIAZ+WWBkZu9p1m5NUYKLaYh36huBgrPBd+ZSwwJmxLE8iKr90V5RmwLKrd2VEU9EpIzNnnIZ3NzsVJk6kp1tqjAoDySICkoR5YZQakFKJm9J2fVKmKOLn81mVjib+8vHhoQZ8EkCMbi8sBqHRwm5xBL01wVK7phITAtXGoOUp60VKPYB5fQXIAsx1RdjaXBqBgJsraoAyIjBm1Blhv0gaGEPwTsbSbkQsnVRFWFV7yjpeZS/KzAkXqnJnzOx7RZ3czoWuWWY5kwSQKzrw3K23BV0zVfapoeaVBwTWGGMYVyy0EQDIPVVwxy4ZlIzHS7evGlc3MteYSjbQXamIrIC5F5p9Y9BBw9X0gdl9MnUyM7Fl2Co+QbsfQ8MgpGUPLCk06WpeRFwtv3O1Frk/c8QK4zCzsMc4ukG2SoYqLJnZikVgRUdFxd176SIVyF6gVCTqGmhmI/uNC0UNhZf9QUxJO7e/IW681C4E01oBPcURRANbnQPH0G8sgH2JJP6+z1N7F32iG09cWJF2oLSdetGsme+RL2HT+xOiQiKN0ZCfqHMfMCQjVx1IKnBTOtixQ99DPdf2K0pWqoZDjArARI5nrDD6NU1OdHm6viS4kV33MCuTHFMXQYU6xVsGpEpK2X6xI81y7Tq/VniMDrMC/e40KYMoR/aSQkWm+gOKZ6xGRG2OrNGMFfMY5mOMOQ8zEjjmLO7u9RlVKmu4DFXm3aGmogFczxZIaBQf9z4Zhw+ozFxiYWUWkan+sapvXq+LIJHs4ky/3zU21AZseI5XqyDl+AhAeqWZ1YlWqe0NamHl4Kqkb40FMprHXaoZNMqPSIZBBTtYIoa7ux0xsXs1+hxLrHb/qJuAPQTRI8TQHT8LSNKEoGcfe3XuNoSR1ZCBCm1IguHmknOSpV+RPk6zOoO3MkJ1yqkOwQIRWyo4sUB3ywgQPgZnzHkBbBgCmEuXpAKXIJAWWFbbw45BCiqRO7EmXaDcUDaXwMjMSDO6j0apGMHLnHUvmYydYlFjaZ2WNT0Ge9wPJOlvQKeR6NVBNztlk8ZVB19cNnyPd2ClviFcGLUMABhYZlLmNYXcazsUsxEBH7bmolHDDoafqrWuOt3KvokMxS5/aWx3W75s65lqOWOj7YKXt5e6oo8M0GTVGazoh1Vhsjq6vRLIYR5zCchAUbEskDZM2bC0IO36K9igUhd/lcsYOlTYRTvFvTXxSbHnyr0BdYt7E+RfVvemFru+y2MeujSCRcCVXFm1WHTeyStoPsyiO8Wo+6we5jKLZRJ34mH0mTEBDlMLIcxypY+RGXMeJFN9JtriTooqiy5CxLZiAbScG6HOfaaKwbLFigGTeQNbFHHDwGRUTN8Grp1TZSlXl6c/qAweKr4Jx9INF+BiDL1lUxhw20FQAc3m0VSFYLt+FZWsmj5RSlWsTIuV7LRVLz+NyiWQEUU20c4O82jQRAJtKIahOZlJlXhAJGGhuwmCzlzF6wJiatLqlFgSOIZ37bvbVbZLCtAtViAx3KLUjkBzU9Onmt1TMnCRaGPSq8y24ExAqUDpMmomSvU/AhkzO5XU8mktma3YEaEJK2qi6QbR9tuggQESNagAwxgRJ3dLm7EQoVkykViYMxN06AB2HYC91tnoSxRWVN0c23gajTDNszF4IleujRZkdVGvTkEbu7J9UkCRmOR6hacauxOw64O0JIyuD+BuCO4YonMybVwz6DpRQp9zshIXSVDKRjiIlTPTvX1xVPqpKz/nZJ1OM69hkNoaBUNWPZyMOZUGRYSy+JnL1PqTXLEMQ2to3J+PHXbUo+xUTuObIqeZrRT5DpmYRfxD4cTy6rFxaJkjImvUGEmHYcl64OHhwWigybIYOXR4iJUp7YyVMDtlZgCxAvSGuiY6XREBMIFAeA51FOtw7kB8tsDiEtfRncZYKyDB+2CiBJEKWKtmzmtiRXonw+j0EWXy5R0omq7QEWa1azcUrDVOAGYMICONQFznRkPFFTXvNUDkNgaNuUKBFao5T534ZMJFVszVRBiXP1ZYrrRyA7UxYwwvw0YiFt1tAiaCZW6WS8XlQcTqR8Ld/f3DwwMw3FIj5dVCaZB56xJlB5cATM1hVRTR1aUe1digmXw9IBEgFoKkG7MAmp9iBVaYwwohr+hlZQ5d0Zt4PBDBOKfLQ0Y0iy0YkcvSzarwH0kHHZHBUDe6yx7Si8sn1pohDDbL3UVYezspNlV4GAxkhNNyIpBoL5pU5oTz/WnNWCsSFolqiduoMrF7/2jVPOcEYrlGGAnrMkwVorDhfQSVvO4jCyXccKA1RnV8s5xwOc9OAHu6H3JmRq6KB9wjYy7Y2RKBVYIy2mtpterlrKNRyGdXWyT351Hs5pUEa0xwofwR1YEXebWi2MW/xv/NmGmdWiESNmzOVMd6wcvRPKJCGndoIcDYVk3HsggkbKDEeGhDPzinQsooMlUCRNTYsUSaIY1pASBXChmI7d/rHqXiBYSNiEXbNysQSIqCP5WQVc6GjIVMivYJhpnRcFymBnPUsAeEhCAUKKghGnKkyIx0YPVWDB8rYpV8Ac0cyODaVKKyRLDaNKAUPwC6IRR0V+vnXNNtZIZp+k2FUYaSd52QGlpOAGPjBqq2I3Sv7893x7xEI96RKI5LV1zEx9g3NzMrlJHrjAws6vXlGpQ2kqcVYVwqJwQHkImVLS8t4xLwYYHAXCljTIIZuYbbCb4SRl9r0ap9lnoPeuXnwRlKUJOZbphLTbSM0qwbVonMQlgUKF6Apc5G9cAXHl+Ox4C8IemnheoxWhflKZrjYmbNM6yAXRc+CmbaFT056cIBDSsBupr5bRNTAgZamPhMOfRJSwVi3Z3Pcx4p/mXCMlYXHAARVIiIQQsTgTjEUqrqbckI1FHItUhgGVIDcuRGwwidZRi8yyBmuuVdXaUhFxHqf+5Af21pjQCi2U7IHerWWyMlakZOnf3AzRQfUmlmaj6uuDg5Yw9REeWncgWg4Sd51uyekkZgtwDCym2hW900IgG3ofpQuC47sCe1Uahc5cKy6ef7O/G42br1sVZZHveUSWXncVX9QDnEWn2XPdWQSgzbGUFYZjCJAMYu2JjKr2gFhGu1qR/UemebiYM+5fL7WRwaQAKwdj6fjjV7QZ2jXTZ2lbpC/EgViaNyCcl/3AbkrAnOEau8GJHAoK/MhZCENiFXUys/ZylebTaBcdQH0DUSDJZi99RNDTNtnmdXX02/UFjZQjBRClHoHawuFaVbjQwXclaZgJKNACJyZUasJRJwe8y1VkWdxwTNRgDMnrOMW295dXXb7uL6V/V4N3+NBhzk4YrM2T/TudXO2AurF0oouQrhUkp2BdG3fml064axNk2WKRv7pfyLNNmzBVRoksWQeqq4CMHIDGpas2SJ5CzVoMfS+FFspgOdCsjqs0drMYEynQpBFcHd1DAyGpOqsQ5aeDMv4uCNDIccPOqJNyAmfKsWr0IEEV4DWNLbqU4pdseUFDaCtGTuuncgmUGo50cXbFW2QGakGWYrPPimkmTRGcu8VfEo9CSeoNnj5VHvr50aQPAUKwAY7Xw6XY6LsrswRCzdamkn6cCaERG5AINZ0RcjJCTSRrJV7sWJyc3vMKylCIYV+Ncxk30nmCtW1oAEndGm8HFfnyRtUDX7Dv5khjq97lvYVNn2QB2Sqh6aIHfm8BJHo8syNxyKAoc2LwJVRLwpNMV+HLT4Wnb4AehYtq5tZvYc7mkc3LyAvrBmiJVrzlQoJIRDQaQ+b0UKEBNCTTtzLisuVfcT7qxAxWMAKhW2s6tXUzxnJiIxDRkhBZIEYy23EQknYX6JWfl6ZIOJ/S6sf65eFXQoHxG4vid7wSiEHaj2i5U4kUkmcrBdL5p6BoUidX9Vf0X36ulQVvhPVudOVjLWjorouSNQqK7t0fkVlhJEZpgAN2AeKzM06hWVsG5qZqKSOdOfTFMbG2CWkHZExgKZYxAccVyp4fveVBJdomMouRZZTFqB6bw9zWUsbjfA0Lg6isRwJSWZym5a7x3MRCdMWgUKUQsVr0Sd7phVNr2hgtbUwb6c5UIMpijISSNXSfxCmVJ0fYq1q0hkCkOoPCaCVRezgAFcsRzLMMp0WOWyvacRkSUozcQKs4GQ0F/FInWf+x9tzhU8yZ1UyUqXox2maUPKdCjXYTEgyZu2bUC0x9qaW8cMlG95aSJCsWo6Nb/elzJEpO1ARIjxNqNmGVSWJZUcjqrt1pAVUL/STfOhHI9p9HqOBAJh9Vu1Hex8qVWYJCUsExZI5Moeiy5O0s29L5vWuLzRqFzfd/sAIGZy9s9vISztPG9U+Hflhtsco7Pl62h2qxgPlR1bL+3Smc606s3CjggVP+Yq4s92mlmIYmUvqRRhC4oSwvjVUjL6SMsLdYiBAbYAXX1cEycLi7UaHt3hU+1JNCNixeU4qiXaak6q7Pgwz7VUvRFrIB2DdCKTs5iAZf/UVJwarlp/UdrpAATkgiBLJHPv3T60RWvIm24Q5ZW7fFAqUJHAsl1ASfQyURFo8bOAZm1HwhtqDyC7zRpAFB2qK3GVDfdeFGpXLcIdB7TbvtrFjPY6fWV3CHv15dW8Xl/VBMVkRATrI8UWk4pyLjcBi4DWcmTDLSUyddMYse92dvjcEwuBoCe7lCwK8LZQMSN8j4MzkRuC7hNB2kRihnWZUNxH1u5uV5ALTJW/UJfLKpiqBm1r6nJ9ykAYxqY5C3uowCRRhIPKjsqiJlKiWkqFFB0hKyBFiLHKSFHaTJZf3S8rFnkNxaMNoKLiELlbFw4w2rGmDqV1UcqrgiVu8HVrUeeA6iEpbojWpTLrJNSxXscor/YF2xrXz1eHUzNEMpEsIkUiItSP9fL7F6QAseqLeogVsS4Xh5l7cpfjOznkKu+f1bYhyNHM0fZHWym6jLqhmMUk2o5kinYrc2RU7mJFc1SnU+X4ethugGoizgZQsyKi6NJQynkbkAIuOxIW/B3dw6o+VBWgkKDudS1w05sL+NRWRv2ewuEmorNfm5KJQKv11jHQHbsGN7WBV/G2bN41cCWrX6N5mQ+FbMktx5iNIBFgdU8ykUZ2tUHWGvCKiOQJU1mkzoOx+tGiPsMwn7nkmcUKSgIix1mfr6vhghc+pviDJacPjOotUGZFtRqRgEp6YkKKtNlxACqcwclHHUgAUjzY9L4OZvQRzHYx4ioZUyxwfaiKRFOSGjrvpfbSZ7UnNFe9uYJOcQlU3E+U4RJfJFSvhzqNtYO1yTtYBzbJeSl1NhI8nWwewu6LpZ3amm1/KxaC69U1UcCoapIRmWLZ5HmMuZYiryI8h46/RoJiRjo1lC2Ozvh3f2qhHFHxi9UayrYViFYHA47sYEmsKy0y/Yb4Zrh2i5sOfmrEl7yhbG5xNORphbiYlVvpCE/jrXCtZO47g52OVSJVAS+JHePJmVQZpG9Tlnh+OUhCSUvw5jSjX0v3up9FksPl5DJDlVpr8ZMVy8zcbIcQnVLrObebxU7L1wpGurGrE9GfUKa4IbFyi2oopbkhVW6ruKdUUCKEdhSsFxJtNek10Noa6Cejoh4WOq0deekrKvM11y27IoGlp44uGou2p7r+uI1TdhzTL09F27ahCCI1H3a7MgAsZ0p0flKnkNU+WGFd32MgNS7ePSL30JLa+KpVWr/G/u+2JZSEbJIi1QpPbRBAy8MdevRHxM6B+oiiTE3Fj9G1MYWNyrbb8fLlxS4nYlVcRqAGmoAZs4ugYPHdSkTlZquqZGpXrKJi+f2Zo6LY8pPYNDLui6ELhIiprICFClR9JLsaq9bC/kgJtc0ojMgEuBhqBDnv8k4vkXoJVHKSnYIOPkGEgbF07jq8LIpdoJAVNYPVfCtUM1p98kqyM2kWkWF6D0g7p13mNiAvfeWVZ546XO0ty6zLCPZHzoVUt6hYRmCxLpsnVo6elJ8ilCAlpKdTf0/pohUDVCnN5hNXuFHvWaH3blS4XjEZXasSjwHSwJUuxbZb7GOR14ev6ngW/42IJLwr5XAfiFgxh4/LPJrgU2u4qysOuMyoWfSMjsjcRu0KTxTKa5X3bNZ5Xj9abXMD1qCIGNWfCexcH0LgpHCSuPqgigUqMyi0OetqJbXVOVDZjtbzJpZWiB9pSDNP8oigQ+wwJ0GpLsN7sFvOuePiwqNKeCUjYshAIRMYsIxYWfyuThI4ux5fjGCrp0BEf/iKLnTSOntK0nKFyiysm9LMehMeBpUhrZmQdGP76MoMGq/bZ0s5t/D8trHsSnAB0eX9O4/W9683qm/bbTagwL5zlvq9myZXYTLCnMpJ6Lazcm1lsZHJ4WNHiygao96wYIDmpVXsIY8QGQbT7NrGM8TVkqEKqvFjP0MpHURDNfugau+ILA5ouaxMAvd354fHB81ZNxg9czs8de7uy5hdbejEDAAFz1ifSKWAdeJr2TRCbiIMcBBuFiAwFG5vqGG/kxGrQAkabqEwi0xIErHkONClMWTTJYFtCGFK0VB+EQFmgBaRInYWtzLTzaRIJ90+7toPWyJBVjt2Rp3SeSollEqt6D6OmGyyVS2XkL99dW/BSWDt5hGk+BxEUc62CdwPVie+rGmZUuqAt6bKhr+KR9W0YNV8bhztcpqk1qDIC42F110pn2a1hKqVIFMjGQABxlnGiFSXqo5yH+dqAK94WlYjIQ0c6V8ljGtXXW5AZcVylSWkfpQeDHh0XefqYbo9UILn3amPSrF33b66rG5+j8zAQsjekwCdYFSOWcvVAN1tpaDObcU6DbRXV8ON8FpUplLsm70XMlZsn2PqPMwkTHVOyaxaKflYue4bTCiBXBXdKsxlTQarpnPIyti1Kljt9s031zI256DeohCBvne4GQ6Bqk9U4jzcAMw11woa5pzXnh+IuZMVesvqqvKqzcriE1RYWyegTgLBzIAGcaodRphVOSEACtCrgoMVaRhj2KpXh8HMZgTMEhn1GRoP1QpkAREqeQth0chrTQh4XBcZuDRYEeX7A4JgiLVBJGFuXGuCtiJcSFJHG0xcouQJ63BHJhOFT+naptXdpxBtrLDciraRuGpk1Fck1NHQqA9AYMF0IiLpDGkWCqSkUkaNBq89r4Spojhdf6KumRxmqkONia2CBDmcClg0hA1NTt7X6/amVODbXyJQt4fNvGZMu/UVAJiIKhDIBieY1HS7KhMXbi7hnYgA0w0RFhHX9AtgroyAnW58Bap2odtYj9WfNrq4Aur8RSx3FIUtNR6nsrJdSASoiKZgO7VIB7zI84Gm58hQh1UkpwWqH89wG0rKomDLBq5FXFdMJqaLjuMK842X9F7oV1hVwoUE4eC4klxu1rqWZJfvwZYFBKvTq5T7oretVjyk3CqBJaENKMSIQF6BAjNTH31GuFt2AQ9EXqvACkPQFKksVJ1IpPVOd8xWamgRsdYctpV32E23LVArGRLdPSEUbcsrZdTGKZ7CbQqB6AAwUPPFuKOXemk9NM0LWSVgxlW5mJ4UFW5n1Mnf4Z/YXC+Rj25KNVRa1xyHFWL1qIxcssNZToFoLZrMdj1NF8oYYYsIq7Ys1y7GVomouDfamFAmuiqJai3MwEIBEn17u9B7G+YoBFBrh5rZ+kPqgYq3FxZVyyxYoxLM269ranljU/YypUSxiXaRWTXlOtwZvWPVIG0bu95GE0QD6EbKUaqp1xgzqHbh7A6HXQBvgCgXOmmTs0YHZr2VQCBHq1jWkUMkOBEsrRfWGCgCsOzCNtUkQESXDGiIDPMBYKqZ0Gwdx+l0musgrD/AdamOTFuSfmSBF5lQ88dLa62LX2GbUwkaEkFzo401c7ONHWJlBYAZSgki4oa6EdhiC6iIWqGLpJ0I9myYmjwzO8pyM/nxREA83RIBsxsGDpG5Vg4Po81VV0+6ZthhMcr/R8HHpBmq81CyHqBkPlVKWGkyIGYemYZZ0EdVIyPh3OOSCp+7iZ5vAvjKEkEaWBHxmmHWaGkJoJpML0r2DofoPoouwBYn71huT2Wqe/VSFrsrOB2Rc1OnsiqBQDXm2K3PzqrgJHv4Z9c/u6P86s9V7uyIuE22WK6hzLtobrFKx4ZW4m7Fa4zOjHYqUS+25TbIVd5SUE1FjFHWBwKGtxBh4Wfev91nSbGdoFzJviZ4zGnuWEvWMLBQfelMM1mNykpTdSszclZdoOB3rb3yH5rFmlCw0hMzM9v/qDKVqgp3ytfBi+mFmvfbebygQ9CWkndjJND6A2ulDa2XDB8buRV/dNawp1SX+CKjG/avELTeRi8YoHVDsc6wwJz6CWFlaRozXD2COhTFkdbCy/r0mUNlqblpEVUKRabV93epTPevFMh4jakUDgBEWImgFbzDMiR68IIkaJkpy9iHdhXgZCZhQkRRx4t6DIpBGg3rERQaYXRzxkyDwblyRfXLhspA3T8go9tnmUA0I7I+hZqlQtrPgVxWV7X4YGFgBpmAuKE7e0YnHyqR16BgFVoFBjbwcVPmrZ0tA92OJ2JVgKJ7kOrCDYg6WxLbCsaxAXWUebnqAzQ2QaC7J9BgTGRE1hXSS9I6mld1Cr1hWmE0ZliDOFW/zFgOmttaQRHE+rh2+Kc31adW5h8stqBWwhTzqddyIkVSWJltKDBRYzoL96smwJRBr9KKws+KRRkIGh8vFz2EdIJoyFh7smHnJ3UNTmLtCkLsJNCkUdMpSiNa2uzYnduyP5mYir2twmFUvxINwBizOVCW0qiiBPcNLS8IvalsFoXnuYl83s8GCK0VteaIMAPYUwpLa1QRE3Z8EVLqMVfcb2nbU5kxZkAkIIP6CEnkDHeb2XRP84qojRkLTbdlD78OwMvMWjLKXgiZq9kbzAxToKz4lzoH+tAWCUOaD4mHlMKfUO2ydabumsw8wSKjuGodh3UpqqJ5kMVqUmIOMX3r7qOQR+ykioDBrhBqZyClBHAlVZT2i8oQfYz6VJGiiK9ye2g4Q4+gxdV1jlGup7BDZA3+ESLbuj+NqVg1aFFUtTaMwoTBFPPfh69IE4dHD1Oi1sqdK+2N5pfpyleCbZaJuWKMMdfiqhDOKXXIZYSZrxlsrsM2LHNOWfAqK2xPrIDyZIRhIhFwpxmwruUppEnCIhGBseOZst0hEd8ARl/GakNSN6rgHxOkA3UlMgPn03h4uCgPiChljWE+92QMRFnhLCUwKIUARZnOpLsHokDE5oCWXG+nGBVcZ4TmWlQQN2t4hw1MFg/NfKnNgLVwOhqmDImkXxuqQRX2k5mZpsnwgZoeS0f3kBqqpBNqOK4SS/Oeh1oHwMfLxWlYLGnXAICVS9tUdbAo4p+VtMjw4RExjwPeEu3lm1lXP1T+CSSGrH4B3ZFmcVQPt5tx5hIjD8BVJS97NULJk4Icp3GmucFyrrABSw26CNVMpgq5gcxwd1MOgSlHOHxA7cdV+FdRqEDIJspl0SYEXiUHPFbp17rXSUwJBIZmxAqOvpFlecmhsINK7sqCidkr9t5cBFdMQF2z5o5YoKWile24pYMjl5uKJGwi3IkDOZaJDLcgtYxIiH8g+2mJmAE1BlRDWRs20av6rpLdXS1EYsZUKBby9gKV2r52T0Fa1yK2CJQsawDSLKtGBtrlODJy2BDtI9iktcxtNzvVz537WpmOuQjCHcxEMMQWSjccMRMYlvVQWYIlsjqpavlWAam8Rq5ok2aHLpT5bA5ExCKoxZFUgsh+trkZRqTLxxrVm8lCxK1UVtDwNcibwL201CMzudYKsyF8x9JAOEZasGsJC2lmCHHQu8ga6TbEvY2MCRjNc0sJSEKnMgKV4Ws95WaZ0xABS9cTAopHYhS1J0QpchpBM1zW4d33H+yecpSHUuNKzkp9loZWoV1XN0Io7eyqYdKRq0KWbIwwMt09lXpRRIfEsQiRcjICAxYRJ7M6qLDhDBaHfZhlRV1qTE5kkCMi3AcsVkiIz5UTzDWVDlam5z3ZPa8dF8NGrCCvemcowURDBtXMZiX3a7BoCmSoepGJhKPq+ZT4azZsZpYDl4gBpeMzQ/3BcTLPzAwLxBqZU+9uwnekxkODZZEQprgzsYFPJHIU9hDHAck+FApyBK1JEmlgG2wLIAcK/KkeDFbS1oBDdqU0UzX/q6ZB1HcUBpWSC0TBKA7znBObj65EwMAonqNILpF5dz5H5FoZcRDeBWxKaacKj8L3ncKxUeTyataPrc8SAYsOCdOFxUtAh14BVRVOKyok4Vn52gQ0jiKAkRitXLlWtBNrkVBBaFeguDxDdF3RWIKuTbVucl4Hm5WLo0ifgUTMuERxXAvJUrlts6aL5agwtm+8loPH40UdxUmsuXwnQNKjKSWgK7wTAXObK1wiOCAWxMsOZf1KbSM3zoMufBW/0cdcE1jH6qJuyulk2dYC/QKZM2Miz1AtM7KbBgJJYhV5AxaRm5iJVhHesEXn3/trwy3KfqMi05hzCUWc4rtmqfWJHKxs3qxoLMMMRJAjM4GLmweRNo/DzRcncy6cNWr8ZgOrYlZujazIUfe9PlMT4lR4FihdyZaCN/YVQyJK7KBzARqcNsiHuQTV1luj/pyVuN4kRMaknIZAtqITZqvm4P/XVwllyW9NwFQowZx0YyKXEmBW9M4m0/boKMUNM2OYYIPmX3XLpgFzzs5vRcDOXKHJNXMWCh5AanQebrLbypNYF/D2DLz8g/uH9EcZQ/l0klDHhy0U15KFYmUANGdVd8qFBSqCqjOTmR33Lr2fRZh5SEJRQoRAJldpRCoHyybE0ejATYeZcUVYYgJmQ1hjZMYMd9G3q9InZyztd6+yRvtflITAldPQy5TdjWNmK5ecSlWQ9AAz4aYu0EBiLaVjsc816jgrHair1Ite1t2YE8NBQdl6cas40s3J/TsC6jclY4PbGappoklcaO9rKM23rKdaEtAIuNk1g2e9loKVVYUH8gblNoC0tSaQwz26rIlMDrNVAOUqaWuweKmmSluIlYVszoTcvD5AkUcRNoDlVy0kTyximCTwypTpY41N7dETyPpFCIssB8su0Pr1FMrxMiIippmtOZt3lhDtu7OWanfbQLl6k4T/u0mbQapMFPcv86oJoNy3gtqEcu5sOEKIYx+BzNBtA9NszGNWvVSQy7VJlNlqcIUT1weqS0ygmt6bbpFKWWhAehNoAaC7fm/LewgxeLPRtKyLIQXlLoPV7QPnpklnZuRiJjlQePuy2BM0AS7GdcyI/Fgy1RakCVG7nlKkJ6scuNpWgQ1IKUqobDFTd1IJzP7YeihCYmhlTtvrxGq4OHNfJIPwhIScXVYIysyTscqwqKE7FT+y1zKxAlhh5T6y4e6uuVU0eG32fYnEkOHK2CO4YrhfjoMr3EdFfcIOhA4XqYdD4DCJyAuAjNMFhyHyUey56TEwxqwVzECkyB1o41+IqdCq3h61sDZFsfaiiii7VN7eiMBiAu7IK86RkRMi6qgip06JAOA+1ppNSauLRlQXXcYs3qkuLErnMSuAziv6eA3iYviIJcTLbLMdxXMnxB6aanu1DtAUtGtGQtaNiooutEPWETwzlguXAnz4cRyoOYMVGNOt1k0yNCrsVKipk75PBItIK49zjSt6YgqyeQAoqmz3rV2JcnoCBeSqKmdmhncIGJFj+IoAKMboDncEKsgjaixiESgS1TJuBpU83TWBBATdaMyV2452RGzmnMdxHqeIjJwa6ldxdzsXK3OjOgSye24aa7ZsUpciMiVUMGYwM6ret2MFs8w1aCox1Nkza7AqMtXWXGd0bXQL7b+zXPDJfA3CrM2OEAkFnnD3uSah2gfozLg+CzrCEEWkr4N2qokB9YNloXWuFeu0vbMOkFLqtpuCBDElEhIcCiA11ipl6qJE5eY04F3vfOenP/0ZuCWYsZiQjFrUcWPbeYot3ziNplInEmcgJQuq985cRCKnlF7kLrKqqUNHDv1oIuPBrLgO2eM/OkqISLfdCApzI3LFUoy8VCGW9aBu1qp70WQp9cvQPXJlJGiXOU/DCUbM6DEzrFuMZKqWrafBlV2sgKagYNkUlqJstPZNm2YU/r7U7FFdipUtJ5nGQRuodFWbvFrmTi9jhQgULJBdmG73QRY3rWR9geIXim9SVL59TAQttlepcacm0QOV9LPaEdtnG26oObWX7OE+NnxEzFzB4Sf3NSe6zRxXsZ7sfpkgqYpmaKKczNiMGEzN9663SgvMDKOt6vmpX+8wtgh6KAYWgVCEDsPKLKBYD7jNcj9GFEfP2qyFROBJCy5z189eve9N+NKkOvkxpTEprseCdrmcvK51JCLXDavfZNS8mLy0AMjHUak0sS50mwxLuMV6wfSynFHxVVV3XlLm0nmrFhWd4go1BB/fuCeIXJqmuGqu2cUsG+0hEvCqklf9hT2DWkvfx7zA+jIo3d/aAZt8kmp6JZC0VZIgupohJZXQUSiMQTCq5wMqlwDV+L+JC7l5XmFso0hsx9gUg2J/A0U3rE2UFyk8uzVVFLVls5lKGiGtQOKbtb6a6+t/6FbpygEwFQ6EBBYVILrsy+52zIrEbuiTQNFwWLMBaus2+jtW5gqjwE8DlqCmGdXfAUTkMliCM2Jsi5QVGhFUu5rD1GCfsIhwswUk0hrxCkBDx6Ouckew25Kg1IVuQAIShKUsum3KGBMIVuRBo4xfhSO0TjoUZ9QBuh6X+gNRqigygxFVQm7jBLFiLZBwt0SuiOtHw45B9b3dYVzVJW4LThpWglI9ReZwQ5Uvbw5Cg8LZnO0COEAwQ8Lmxb7SaMlqBSClXB0f/8Qnx/kU3cxD6qN0xIrNB8oDiUjLBgGIxSpwJrPFzcLTNDljXWv1ay61BRRzyqKE7VJXMnM5vdICSsdP8wVY6CGr6RgRS7n4SjMz80Y8yjJaZYkhuqHQggxgFGXaaAGjVA+y7qddqSsV2un2laY2KlbaW3jzL1PBIFAhi+6q05tfHnX0K2dphgxNwUJBLzQ3AzPWggYLZhk+8e+3sAPZ955sDKRC29J4UUQjFUiU5+lLTJVpvfKGaqW4kZZCn/OyLLehwPXxEzR78fBimJ3GuBxHzGVE1qie2BmAma1rx0JGqAcX5tBjTksHRrJnUeUKpWaNfxIk11qEiWKWOx4gTS3HiwurGqfKlZXNLRLETRhBFYn7QSLh0JzUqJPJ5ubVhaB11/+NEa50Z0UkYiFWxpxLWcBlzeLi6jgxI9Kvi6fH0twKWuQJufLIIxFhIz09kQsPJ/NYfar3iWsCaNUTZMWbC9Ya9zp6kcUK7t/rfd7mf4BBCsJTywBRdZsxxpwx11QeM2PKEGem19lgwfZVgDJI23VHiqpRtwPtyLs+qfqXdPjnmoNF9SwSZQeObnT6SgAtB3hNplU5vaq57a+SvOiMPyMAJ9ol09w9o4IvRYjobLWyhratumFfAKUn8tqpqOSrDgvAJpvU7ZdMV7vRjow67scqsPc6rHAHG4Q1ebvOb0ZEW2ArB8XoiWYqy+bWowASmoiliyNfWmxeJt0sM9aqICAiODSfRAbgZatw8+xIw/UivvR3isOQGOM057zpHi60RrGzmeWK6sFDSKSqfFc5wcpAK8GuhoWiTWYiV5iZ9zSC7PoMW/VCgYLaNkvPvCGMBiYrnNX8DyGGddM0q55lT2rd0TLvRIdQG9RJbyOtbIbXmymrYmCuWAJes44I7DxCxQgggcBysUUig9miRzESMFvIVOexckOouy5sSUKaME6kJ1ddk7zVUwhgoJLxKEJJZkITCSNVqGhLl6kPme4nAGsuIlwthAKwru25gSYPiXq3je72RxmLu45rnLGIcD8xaor4rlfqpOh3LSsY17oK0pAsf8OtAmyqvBSBzDS7duJaWjB3yUThtdXH7aiFpZY2yEkLCrBSuqPA3FqaYZuLSlPJHSKxnO4+BLschGbTRLRrKp995Vq098pKj/XAyiolzYHbvHDO+fTpE0Qej4/nuxMy1wzAIpNWAgi5wn1kHnEtHVbrF1INdjngRLkxQI2QsIAE4tGiclYATOH1GRvRFjEt1lDX6jLQUGcxUvJZGRqvygTgaGGaWhDh36EbtjKoqKtqATIZ1+h+uxCtEEXCWmEa7VchZ+33hn9Ulo6w0/BlEYAHPC0tL5Y2Vy67wI6nOF1iHuv+dJ4x52Tl21fXlW0Xr9qbVXjorvSC1BSXZUZHJDum3n4rIipMsPYV4JaR2k79al0JGZ9syyu8suxOJw9FNbg6rSaC7qLmjXPSSg6z7CjCIL2O9KRqEU2S7p/P2+AQxI33ze6MUFlrTXePCJqZ2ZyH1VtHliGs5uyo5Arbql4DzsjuzhCElEaLNbMiFbQBkVguaZniA3VR19SuTG3dDpXqxTIWhpvZnEsWkbSAVO2xl1sYYjbZR6jSLtKzTRBrP21GmHE0By3zqqVUAUcbrmNN0nz4movqSCaSsDAdXFQxA6nPEiylzk4GrCyNoHEzk7561Q0bR+8yGkUVXMiCs2YAlkOLp2eTQavTvl2yYI6KSUqCzMAlG1mJkNuQoVurPkPdiYRy6+6oZzelXHsZOs5NA8OAzJEMMLojdJjP3YyD2HVPIHff+fWrKq8lFY0+ElU0TISCiBRsqUMoOmFVetlhRewrlgX/CMeTTmHfPSJD3snTjs7utA/qPx+17rzmSab12iVy4HQaEVmjgsFYE1KPQaWFPpxml8vBTJJN9a62zw0xVWCkADQ0qzEyl/G0Eb0hhnbbyE1U0CxJPTu6qmmq5GUWSaucWgYy5gSKa6fEqEpw15K/3oK7A76D2gpsJ0IDOjOvsUBfsKxgJfUDveXlpFRcLnhJZY2Gu4pAVWBDhvTq+tZanaOrkKoafa1ve+6aNCBetlxL0rki4pggTuO05oy+cr0TiQSOiw44Nnkn95QqAzgzbLXMSCKL7atZOEFwJTRIJ/T2REq3HuXlMgKIkSMTUc8n+quldHRBCT8UHuLWSJNCxJB+ceiS9N+xy0GtrmyqBfAlpCCDWKKmsvPvYsPJZ15/WlVvTM6TjQI6S43x2Qnn+yfnyFc/9tF48vSNtz998fzBeQfOjIyMmutULVftwXYm2N9JoIaqSpJVnCg9hWJgMUFWYLcuCaa6EkaYyCPl+iZLKaIzt46cdVokqcuEa8sr5gTRLoLsQ7OR871yCSTMlV6MMeaaytXYZlDYYiAZ08wrQcy+G9q+uqJZ2L/k3VP+HxpxJqe1YpoN3A5ZAc1FW02gX6Dxcx2O8iFlxmpHi7ZSbTjIDopS7jOqoq5MqJPCUBsAAE3XMYGpCBpC/pwEKJVc96b/AmCRIYMJmPfyzgUrj5IpuV8p9cYELA2BxJpG9mCauA2GDDXzRHMwI3E6jbVWnZaX8lurZj0IpfTEVXz4GqFulSoz1+ATQa8sBi1hQFhijBFrwhCZK1ZxgLGLmK39qSBOIZT0cSJE3AE00Suw1Jli5iNyIYqHWOWyQmEREa4G2QJ5EzvNkA/RkVaz57WHs85Glb1akyHbl+8osE0Nek6YflWBAtKYbb5kNmCKc4ZEi2bdjUhUd4xyM/ESIxmWFlXIiT6T8tElIhPdq04sqn5hUO3covLb5DCkn9zpDw+Pque5jYfLwzBnTRYKjjMxA4ow+9rt66iqkWbUDo9YQsl0qsqwJbBPUKrtjak8wKra0Him1rJO/+4Gaz66Th8aQItWJtsmQKUsG2ZTk+RA7ga+hAGeyCpVcZTVrYtugEYJKtoLBqNuFOlm4qdUTl8fNcCtkiESPyrPdVCjsiv67pxoOwBB93ntjWubKEqLFjAyGC1b6TOqRihnU6eNMHDFIuG0Fy+em9kY47KWZVkHurvZvFyGD0QVAVM18arA0mixNPeyOsJT4iqavhyQRvhcSawMuHsiSommHyAzpBYFWhpgHrlyzbOZ70CjQvUUrhfiP5o4z8GE2YBhRgwfEaFVNha7AoW3pRoS6mIKawuwyvNL8JWqoZVxbiCi5FNxmAxMTmRYBHl3me95EU8/8cOv/Vf/5eW9X3xcHn7BO7/kQ7/mV+PypvmTY12iHE3euvTsTrlEVPCsw4FiIkTCCRS0L39jRJjbXFNzMKUiGvroAMzcZMIzjVU0qbeXQ7fM5fBqWMoCnoOMWIirEH9Xq4ulVf6rc6DKureF7xurn1wRwxjIyAWaJxIMpqt4SKWE1UYvo6lZKzWdUJJ7w+cRSYxhl8vSiVJrQK4YpxOBFSsLe8c1uGKjnrjyi0iLGRpsw5KaK1C0Nqby2mobdHp0HyGYgaAjMscWAmgqT8CAKYhY5su2XQWsqZGFlKHkAySnJXiyIgHQjCu7njXGZU2iqsiszWchKEslwFhSYlC12uw41slsoR6nSF6Skdiq4NkFMlnaMlEFyepC1sNpDBqWACWJn2TiTDuP8/M5YxPlpaY5FE9vpI1aEEcRjkjJxuTUO6dlhFeJDQiJeEo9vc4Y1XdULUZZxDHhwhXL1WXBvrAGgZyskBkZMaoQuS4A0bRE9aFn8WbU2xaylNVuQDBWzOyG4FR5kYboqgFtlRsiwcExExUXG3OPEUqJ3gnKrti7xNKResgay0A6sNDHkqCZj1LUGIAdK2dOTQcx5LwsmJV0htl5jGNelMxxiaKgZAlBwkJII8zcUoV+o2MnjZmZRayjYG/FFRp8WiTnWmlFUq6WZLFR2rE6dKZaQwAacKaE0qgouI6eEVgxT+fTcVnulC9UV6S2NlCdwFEaNEUfbBWEMquDAL3Ya9c0gzWMVu9eE3WEdfO4rGZi5TJxwdngleDQMNoY4/FyABDRhkAyC4ToiUZVb1IHGQMJj6AXPAJgxooG6WYcd/BlFhN+4onjMo/h50mcAzMuaVhzALEADZK9qhV2/hkrxrAVK1Y150WsyEyar6nihArq4tvLZLiNWAuI0zhn5hGZBR7qepbIkSqyZsTO77fpJJIm6pDD3O2YM9KTyOPwnv1pw1WMMXAVQ/9ac6+00xAzzGwlUPos4XpxqyEjouxGhI2TLSaaS3bE6f7p2z70/i/9Bz/4/PLGs6/9JvzO3/OZD/3QO/7O958RzxdzzeTVrQvHliMEOyeQ+2JdtjRnhg43iVSYERkR7ozE5eEimyt+w8ywZIBmxsCKpcvT03/q88tMlouK7EZ7ENXQxoBbns+n5w8PNtTun5aMVBP/iOZzoWJ8VWYlACmdABCwQFiFE6pXuQ9zu8xZEDY7cdmgjcIkWvWUIGkMo0SWL9WauDMMc4UgtUCE9HiJMCnFlcSHC+w1m027aYFCmyuSGsoYdhrzWK451xE2LKZFgtzMOrZHWxKlVD1JrjJyk6SqplXNWLPy3iKYEhFVel8m5BuOxrgTwZhrmXtGOJlrnsGIDDVoa2Bz0N1WxJMn94+XR8C4IiPvzndu9vzhuZktZQ2KvXI5zUb5G4n1FEwi9yScyTIi3LxLv2OtKVxyapp4Apn3fnqYj8PHmnj+8FxgCqLr46VgzeEu0Kdq2CtnqiOua7ZCwQR1dQ4b4JwXMyMq0kEsB4bbMQXSmHKfGUG42RLi6SMPhaxKm51JzJjDTCxEs3HgCMMKUNwV2f+6Atdyub7nUu8EkupCLiswAxp53jifhGdTI9uxlsBThTSKPKpxJkMclIbzUCx4JzIsE/Qs9E+gdeO11SkOI2EOQ2QMWYUCgBSA0KyiwQZSYdIVS1JwjU4/EWpM2hjqTuEU4Bs593d3CUrAR3YLkQiy5QyCLYiQjUuZGZzzcpjMtZgyNLETK26vigo7OQFpa60xao4VDNI3YcN03WLWX+ao7qKdKMHM5mUqsqifsmEGUayVi9w2o4RkSZAS3GAUfroz9PJziDmniq8z0jS6GKCpDcwjVmHHqQBD8EEhOInUaJHiRIEGnDCAGQEMy4UcjzZeRTw/c8457s0vR4Iv0kfMh8h7871N+3OJESL6M2YsNFiZEYcg4EQCk0kmIxFxOp+XKKNpx7yQLP1FIxOG9LLCOo1SduziqPAAQUGRmRBNcal9LmhAmh1zupkNj6ojECrqM2/PlagNKiqsuSKWhiUCGG5FHDMjuWIRNNfU99TU1zmnGV/w8ReO1yLWw+/61jd++de/Eo/nOfPpU5zO9vji4IVhEGC8lmQvVW7jbpQUSEQStMSKVAk7opj58p00myvVF9ERtHT1opY8lyG3NEd2gVCNgqKvk5kro9XoVYFZpJFOAnnMkBkycpMPgkAsKuYwo/laS8zKQUdgYuXMkxFmU/YtiFgwpnEiMeewsVbj/IkGPxQEoYHL1MNZJGZle9adf+z7R7NcS1x9KhqTLk3SdA1UBFdwUyWRKzSNfefr+qu+Lr9l9ao7SdmfFBxgJFY1elQlz9qgV76Dyn0TMcaYc7qNRKxqc2dsaSJZgMJfZM/MAgtYoCfADMNAzoA7hJatFeZYcyJyqTROHvMym+p0LRQX7ipyFswKJszmHOjeqR5ktBXLzZO5akJ5zIDBjgxNRbjEMvqU8P5eQnWiqrquiEw8rF68balv0J8KO9UHoixCAOj5dIqM4zJPMycwLQeNyKXDDCPdaCvCqg7BiB67bDSj9G3EOsyi0EciPTHKTyAYjTMrRoKBDs6ScIM1Mi0YeHOgClrt/4CEPqpII1YCosuIQt7Coo+ZYKmWrkTmkafhdJutceCslnPpdtViKVpoJE6ibNeFbDgVw8d23/ICqaTHSoC8apOq01WnY7YMogBGBFbdKEB9k3K7hfK2w8fNV2SsKDRRpyIiYsYYCkzLbURES1uE2EOlSxAELEtaSq+DMRywiGXm5lK2oLRUMluJO8TZ2pG5/jvGcOTGYzMyjmPtj6bPbrunvkOtRAaSSCvHWahnFCgmUwvASOaKmEv8MkQg0lUXLVxn747ORdH1xEu37jE3NeVamE3kXCtmvsjAczyxt73+4tXT5W2vTifgk+fTaExP6VoTJaMWNqPuuZK4sigunl4mM1j9RjbnLJkOg7kVJMk0MIlluFgchjCEFMJNuVtd6azYTSVkZCACK2KF0sScUZWGOWeUSF4gY8VcsVasiFj6hUh9X6dckutaornmzEWvnN9Z1MHI9GVIPC5EMAP+1uNPf+m7n/+iL7//2CdeHX4gf/a/+HvzK77qrcfnmJlKBokJiKN5ohlM3xYqaQEPY1rSZsFeVdo10orclEW8Amqps6iwERYwcjAdGEiLsEyLjNPpfHd/l4k5g4Q5V6w5sN3RMqZhJH0BmSsxY9XcW+k+ElmdHn2gonL1rvVmML0yegtZyYRBl4ixhGRyLeUSBpr4BEGqeEGqgRVVpq/9RWW4Cp/Ue9pXL0UuzGyJF9lEaS9fIVBjV5/E7hefNCOrCYpIxkxK/0hmdzOesvKZBrHQTKZuGAOat6/sI8qkqV5jXECQC5J2YQocHn717Mqbsyg4g2Op3hg5WU2fCxJaiIrWkJl5zEPIpN49QqQxZNJuRqQL548MMAK5UJOSk+08b0xp0f5VpyBapEH9aVlHThcEHCh7WM7VkI4h86KrLdyqqSRarWgLAlwpGgYq1szIOS8xp5s9Imz4wOnh8RIaE5CNXZUJT9X+11qCiwTZlmdNAJaxmGFrT0UQ1NYeJNv0Vgoueq+xnDKi9lRvFb1jiRSv10Cr0prRxkjdjy4wiwUV6ufupVakop945ekTwuZcnjjR7sdJAa4Zl24gq0KvpzOD0UZvXPaB7JCWBa4WfU48ryzeLDelUAgzCeROaIr2Lh6iAbwpXGEn26wkr7NQefQuJaL4AqHDGmZ+66mHO8jjmCUSWbhDEet1JHRe3QjQnaHYpeKDxu4lsmnSB+NWF9A4+Awhw7OXpj6BjzHXVOof2QEFJEMoJ9vMoewHrdyg9mDzthYlzRQrc0Wcz+e5VpFNcgOroOh4hGWaIaqRo5pDScvgPJ1tLcwDp9OEjWXr6elLXjy+60d+5PzWxy+vf9GHvvTLngXvLZhYO6kXIJC41lMTVZzrLF8eMygdqLRMZTJh9KsmBrs4raJ7oWTbLBQuYdbPlNXZASOxtvZyS9Ao+y6zEREZw9SAVIJW9aaoLLROkdmcK2KNMVZwzQXBoUxlLcziWqu6vNJsHu6ZZi8QT4IPkx/4xq//ir/21+eHPmDf+u13v+bXfuSdX8Tj8chgcIoyEGnAypTX2NXTQE/KCGybpzNvZnGTNdSHrS5tM7PzGJM1uDQrvlQnDMw8Eg8PD2qBdfe1ZswJcPSYdQAWScrEtkEq8EpLWSU9V51YyXqGhai2WFnlSUWpK0NVUnM7DqVPcHh1UrHYEqVZlCAai6Aqk4Ei0SuJTESYV2UjmT6GbMWaRQ4ScI92YOUIY8rWyHGLFpI15rQHBqDaU6guqxQs17kaKyFnI81gEDZVHOkNKXIckqXM3UV+pbokA4OGgEOmPlXAXHsN5BawC4j0zGVCrDORs4pWnlPJg5mHCrTuHiu2woEZVqRlSpA9kde2TLL5D/3GRPPGC4mDkn6ZD0KTVpGYxFAAQmPmYnIt5SHZOIacaQLFNVQx8YpfbrtxJcMJe64trhq28F4gwwg/j3mZA76nD5AU14H1vt25VCMVPbG6N0dUDwAcLLr3jExDZjroKD5shbVKVGRom4fV5RFEhb9asStaXY4vUoRtqxZWme+CNdQ3bA1OsNVQjRXTrJhN44j7p08vx/G4Do2c21FC71zZw0EgjZ5EYiGAEGN9rZXFNirebT2aoF7BZSiJnqjDLzaA8hrF/GUGhMhVk09WILpNEfsC7ORctTUWg4/QWLrUXJTUUXfz02msVfwdqcjq88mInM/ny+USsdTuXJ1KNMFLdb+vUh1aoHKTbMgUiPP5bs6lmxorzOxyzN1n4e4ZNaWrzmWqOs2sQJzsWl3W52jAXb1Y4iqRhA1Y5FScWcyuJtXq1mt3lLPaaj6QlKzXYUGMkStPyPXk6de8//3n9/8IPM/n+/GR7//Gn/nIP/j1v+mtNU/sndvPfbV40sMyyJT1X4qtUIhh++hEUqWmuoa10dqja6ZV55VJLHWa7uYG3VpW5oDMQLJQay0WIpYCyYjoiobee4dkjUcCgRjul8h5rKr6RALpw+BeZdJGVhIZvKTjtOY0H7Fmnnh5/nD39BPf9Gvf+eZnHl49x7N3redvAX4yzMq/0aYKUYhoBS6KKDK5feA+XlGcadS5qhFSEspgxLoIozb5tDQzo2Y/VP7nwwHUsHYzqJktw25QSmWEZmYYiZmBjDQzMASGKwZguctuq2e1q3iAUOGZDDg4EWOGuwkduT/fqWXosmbNaswbsDQzq9ZVZ117JPswrGVkKlKvLg43amCGHkuYTGQOmHeEnoV1iRZb31ROHN3Z34p7yixLYd6QhCX7Rl9tT7mv7acLIkeJ0IUiSQVPYp106rd7mmk8YotUXBOLRFoiudgdhKjocpsdRGAMK95TZMFNUoDbpXUiYhUHrDCiOvZRU5gAiGxUDf16PIIr1hinjFw9CeZ8HiuxHi80ozPmch+ILDU1WtagITDAzJVFI0ftpKxlJ2DllYtmK66Te5MtkAm62guJcQlBcwwYbEI6oA5kRrptY6f4wuZcJIPAUg99RgTdJsolqToQ5FVwsXHm/UVQdSV3zxWojC40QGMHw3opI7P60a0JeaIG+ywpbaUpxc/unxBSj2H+/OFh+LBEGCLw7Pnz0MAB5e5kiWogsvQYEjUCsb1/N6tkhbDV9ZRtQ2jCs/sgZ/m0dumCz7PCH7Dm6Vinu3blAAtMutrOPqXYsBjSKrvMBPD0yVP3Zh+YJXDMOWfJQVSWUZlo0hAZj5dHFHs4MuP+7l7v2o9QD1DKhdvmtyXrS6WCHZS9265SRYhZ7pSphGxszQBCxU9R7xJXk51qu1b4IhXJnGp6HfZiXgJSPQB7yazVTIXWKVQ0bKMi+3WMjBwxMc+Rbz05felHf/qdH/iRV1+/H3dPnv8z//hP/Jrf8AxmJx+4m0rTvvDIsiNqnbCoa6a/M1imJ0bqsBCBU2JFt9wDPtzMit6fABDGZTmtxNgqhqwF6qhCEW5ubKijvIBlUfr1CxEx1xIGVBWCqENbSwzQbGUjihU3gIYpBUX9OBAZc86Yi8ZcBj9jxgKXSLCPx0e+6D2f+82/dbzz1fM9HzHv7KzfVXDBMSLS6RYwsyBVv6hCjZ4dNUVzw0GFUkOFuoJG2XBJBoweBSzasLPZIM3SIhKCAFdD/8X4zbSSUFNaqNJmz11S/hmNv+AlVkiGgQP1USXBoKrU1dQzDTlRRcphDrMj1oo1oIYCcTFU7wdRylzyXuX2dk2o2Cjl98sCaiyGNeWhrYNsSDZg1wel740OCRqwTiQ0YEM5jO4mGwa4tWHXqM3k6rtVdGWUxoBICDcXQ2UzRhqJFYiwhFet0Upuo9yAyltmSvHaoOnMS9GvBW0iYnb51mZpCLL4a7py9apbRl65b11cEl69txUsGIEro9WAJOleCf1ci2vR3d21lRU/9mVhKVq0ZVYDUMUXXNUqGnsR+wjuLBzJqAJcJlDGITImwmwgQjVTp3nlQPtyV7s7E6bRXOxwzSCXhEgLFPF61zGBhdT/9RiaUWdAxDydhjrfqPJFpt3Uv9Bem711ulu16TphUsFqq1XfL/KDSExK48yHaus4n07n0ykTYwy1hrNmIlWHOm9O4VDABr1B5+Jl3NUZr7+kuqpp5Ooga4tZsB3wtaNVHD3EqMIA0JIPdQ7LKfdKJNQ3RZa4thKqXYV8/tZb5j7GkEameMaR1WRWL918XgKmZrV2+XPNUvlJFeRUHaljDRWRCigqypOMFaH+cW7kZq1w964+FbOmlnW3E5VCcvnwBNw4126lquCGTIZ0ViyUygMawEIW5rztUv1DAqs6/SVpBZDmIy5ID4Y9MJ+u03s/9uE3fH5kji//db8mXsTT19/9ka+wF5a+Mof6pDohqYOrjCxV2rDbcAQAmVbD0opBUcuUEhxAaKZCLCGNgCFZCt5N19WF66wwqgcU2x0TuxlGjYoEOE4jIiKWAlg1dW5VBB2iAMSbMEiqjGvORI7hNoZKbmstJM0sVrj7+XSas+bmxULyPAIHJk93Mdf94+OnPvkJfHzeTbvzV494sQKwKaEQTo2ZqkFoah6sjrtOsgrQzD4BXbWRyhHMRYoQypdCDyTnIGGFdRS2XGUf/bZVrhSpsVGnYEfR8EASq2sXJqQhoWYYI27ktHUNI2pegmH3oSJZNV0k80TOVNzNFWs+PCRi2MAV82RzHzu7ZJUD65kAEUfnPMY4qUaYpoTe5pzVcVdiBH09wKWFgp5FFU155gIcahnYiyT1KKj4DINJWg4V9XX0X8iD7Ao6YlYmmQAmNY4zbsJTBkg3c4tcUeLSuTLNnLkU6PTStm2mWWAiEzHCFjJMqGU5yOwk5yq5oJsdbRNld6rQCOXF1V6KluYtzKl0Wpy2DMgQxxDIMTyOAG2uZQlzi1gzVgJrBX0P1at3ECagRnyXUa0BX0k1P9luA6ijVA1ytIhwKWZnmFuKKgi7DERczuZJroxXnzx5/vz5bQN4r5uCY5irFwheICqF2Wy6QbA6l1oFt/xnOY9eS8V5a80aIFRqBKngvmOLLLuqv6gmLgIri8tmAIr5zMpblGWWvIzVrMwV83S+rz6dzBkL7pl5rLmLktf4D6DZQEuuZaWqTNQpES9dsYHMFpDHnClXhUTC3UprIhStmgxhVoJuJbRWoxn1GZQLhV2PHRIpbqwi2thaCr3BHXKIDccC5Td+VChMEswAEz78Nsdykelb9LH/SjFcpacKPCI1KZZChlKD6DIpwaZqo8yqi+7iYwdFReZHQW9SuxABppXdtocJNcmphyIZGWnuSxOOo2IuFVJAI6VW1a9AtWEXqJoMunHGeYwHw6sPb96/8WB5f//s0/GD/+D+81/18LYnn3n3e+3hcvFkeBT6sWURlY2go0jcnm2wr3vdM5FqUmZsxXKJC6Q8HFauk5Iy9q+r5CXeDRVsX6PoonFlR6OZABdiwQAcczJzuAWqniIdAXmOKiakXDZmFO9mx/4xpw3PBcALUOnjlIkVYaeJwOB9Ance69GnEeMEPv3Ef/mffcUXvefNr/7ah2fzdL7LWeV7kZtKNicSbiW6lZIHgJ599x93BpMChG2MY0YDXYic8r0RQfg+tVEBOwurbai1SqJgRD5gWlQ1TnJ3VrV8QmCwsTRqYlVHS3Wmi4pKI2PBWPJsgVxUpz6YGU4LJjkzIuHk4JgzzG0nBWXHuggj6Zma6uQsSAMwkyTF0F/Pecj8dlmg6h3VsYrw1SQpAKV9ARAuHcEqI6ranABsMQlzNxvzckEj3vo1LeEVnqq9YFFL1PFhXJr5yGtFUYYbBGJl9B9j7bZ1kzBtahhOZwAJSMRBEkLDrPksQHkPdxczvzNkXb3SpLOtEFwfMErhSB02Uh7OIOlGRtFO3eDGDMtWQ58zI8EMafrkimJCZoabaL6yKSxQIK3LE9ceGpaXdDMx64PciWQ7N0jgSZBmFJa+IvjURticsZSevvHWWzS63fI9Kp4vQnauRL2aCSGYYSdfiYVFwmCjdnEZ7AqhZHd7w/xkj5cHurn60RXDlpZL0a7qzSiUQ0P8OpA0O41xzItAdQMT1AjIEOJmGD5S5AMlFrQjJiIj0txjrTGG7ofTZkoyQSQIAWTG3/Wtf9ZAM1xiLeSAZZNNsb+sYLIEVswBzy6nutHgKwLcYgDXpFaPR1A9wyiag7KbghYNVc2vu6F+iehpRf0VKA6G6hn78iEhSpL70JmPIgLEGGNGcFhGrLVOPoSVRxvoirBR8ryKjPdlyPJEtSGyMdGlK+nBd30kxZImiNY6T+WKrPqG0WCumhwMGXX4FXC0oksAUu9KQVQqtwJAVeuyyuPlJ6vuCnBiukY/5DDaw8leifiST3z6VffPfeLDr33Zlzz7Be/+wBsvTrw7/IJIM+PKhPRn1WCmhyqD1ah63T+CM0qFeJjrxgU0k1jK9URA86ccRclDpPfgM6BVnFTJo7CWHbGpe2TqYXv1PSM5ECk2LyMwHCumc+TLflyhb6iNp/rFYe7INHCuVmMFAFQnkqkdUYGz6tdcEZmBZbgfePaZ97zvx+y3/o4PfvIzT+9tHkYPRpiNNOQKcbqzRkqJmaM2VTFZsms81o8foqGttRqvvYbwSDWhRW8rmCnimo41Urr8UWOr5TgapE0k6Wo0tXZdevGIIOgiG7LqyNx92IgVKe6pgBClkqUyCAslNKVbhFyVvUhmUoecpXGIaFntSiIbLXSjlTojaBrIQ/G8Mqulcjcf1iFBnRKVaPQjoDuLeinvLqJZsyWohtfs8Hx3oYgfNGO1I6sLC6XeLRAhzUh9zjFOM1YGzFwavdb7GAK9ZQFgqpsoAxP9SZ+YbaxQRM/CISLSfEQ27KcnruXqwFf5cIabsafbqSQlWi5bw7Wg/uEEck7wmmbcdo0SNKspPejKRP07CxLcJSWaOarTLWSXAKxl5ql8pdSWxNQq1AQrnJBuIt2rraTLCL0V2JciSvENEUERFQ3iRe8ErWgjpRyyjKYJqpKQI2HmbP23KJNc/Zu8SnwIlmDXwMKcM5alIWNwLOUkUpwurYBciDFOug9WJ6lZIInTaUCa81bKowV3miHW3ktzrw5jqMJbhzWSA5qlHBg18CRhcLOlAlKVnjUlDkjATLPKFJatyMRMNAq2Ma4rrU62bceSZWpYCTH3OVsaf5EwOryDxU6RbFS/pNgQlScVeKBYKeqIy6e7r/bEmRg+VmwbsWMloXsVtdXz7hOSmUgPTAOZ1qOoE1B/RmEmFH2qiqaK4FmhnK6BgUizymkLpFZvMqFQjxzD51oJiSFUchexKlZL/WehPld0uxK5PGME5vRhCVsPT6etMd7/pe8YuJ/vevWXvfcd73zXF332pz74+c88Ow3PkrUi0Lcx02hDT1kY2hWCJhCI+/v7mDNA9Ug04w5BDNCAZUbECVyi02Yrn4BAjjHO5/OzZ8/MmrovFLqUDrS8kpfZMIFAVJVvQdBKiKXEEphpPZ2ol6R8sWZ9MAOpUYq6wtVmpvIKSY2j0KvdsM6RI9bDW+PVd7/x+EO/8KMfe/Xtr7y4XE4+iAyzABhQT63Kr9vGXFlpCTn1FUEKg0kYC7/ScNDmi+q72ogKeOrVKEhGaRfMQrS9ki7fqJVW2USJhNDOvCpd6aVAQFebUEksKv+sZkTlwImkGRExJTS4uZsp2rZZASKprFLHuq97i7QIB8ASV0Lgeb3INb4vFhVJ3hw4xZpLLKxKcHmdQ67ZpQAEhokMphyGeg/RrVVYLRsTxZ5Qs01khOSuMiJiyPQLQjAzTanLkP5ue4o6lAV0V7SNsNBwcS/MSka8SDtZt7UuUpXFU5+od1a9rWB0u3dm0YMtCfMCF2NfiwDgVeVuJVmA1c7TyU+DE2XOrIkWTevT2rhkhRp8UPkdRa3qKhMCK8zMxumIxWrjVDyHxI6csoB8My2sNv+6AmgDJicpZJQk0r19hHT3FJvGlhGUdke6eRKHmptrxkxERBNKNw0dm+izAahaCHXtEwho1mvSIjfbsauaNBs2bMzj2OUjqPLKVBPKrHmg9G4arUQglkycykrV/oRFoBRhq2M3jUuYXTZsZwA4lygGVym7UrGr9ZMSbGQ/JK5x6zXaqb/SqSKuHCe1BbDvvSUs4AmLzDU1TC6W0uqy9SXpXOV9tew220Wg/OxQsPIB85QqJ2yUjZeHlf4AswQTKtOIjClebBMVtaBhGtnIyej+cSTD3CjooPNYGmBUITeAUPMrsNQ0EUg1t67Z2rMlFFw6VKkYmpZGIJc4UE3ZQMRaTRCoT3ctoKdcGWzN08L0cTGcZzx9se4enp+Tn/rU8+Oty9OH5zyd11I8V77cEmrnTBFqb5EP2ZiKIxMhqdrl7j5c5foSJQeXAqHMrCiusltZc4CXy+WNN94wc1TVpSIJXZDNKmcTvzIRuWRGFVStaJG4AAoj1Snt7oOVGdXBLDu3Vqwl7kjsXIEdYYhjfP2SQyqcZBLmIy9f/hXrR3/ktfMTA09YrMJT41S8boOiznKwfReTOJ1PFdoraKmej0zk1UpLxqiQxvo0OrMbdzTokoBJC/MwiUVt0Kwg/KxW/gJmhS1uluA1RKmjHxtjYQ13U02RMD8NdlpMo5mtWHfnc1fs9fAF+8V1Gfro7KHiiYwcVhTGSC75w9jjIpUHdt0c1aUazbRD29MQgEbYGKyBKxGxGgepDY0o1anM2nFe+ydptGaNkVVozN2vZbCunVjbAxSmYxZd7BUKnnu5wSTSxLULK/aooDXRmUT+TONAO1T1SwCRmWHaKXjCQYkIROtrRktMyASZjAxyMVbBUakhYNnk2comuleJwDBzd7ehYqqDA16YH5tg2r63Ppber7pEaf4F3hS1eVn+xt2ze7RQzIXugkLKqffxyY6plsQNxLhUmFCZptsYThCxWtpBwWeVgzNvipvKLxFufnc+lceQRWMzoLJqMxYsTcCUSmJKTVQ1DOUEERFrtRvMrE6GtJK9TENkRhOl0OYlb+1DMXR1M6vDSzmXOFZFys1CKORgqiWkFBrqAArnJ1estWbF0MioPof+odbnU1hUrKSyfmVZCGQya4Jr1tYEEf7SvqJZxPWJsDsE92XX9l+9Rt3DnEvjGqslIkLEX9t4nQxKLaltdvf1ddgXdUHDpK5NO5ZYa9Ylb8PRNeniAuwDomKlmjqGu5MWIf+kCday3FPFdfRgWDIDXdsIsxEZEstVd40SEKvbSPJk4GvnQc4B8zxfzGz4sjGSb8z5kz/6Q8ff+O53ffZTON/ZrBtIwHrURLx0N0gzkJspTWDOWQdcucyKJINh6l1Brlh1StnHUehiBIxjjDFOiSLaWWUaRiomVZRv7DqFfMTKQ/lWVGE+Y3XRkEgiMjV/NWsYnUL4rJeGXpbWAxmvO6x9UybY+3Ql445B+Hr+nF/6Cz//5vOnH/rA+f7Vx3nImpgybIKhHpdN1d3IzQ5dsR+yjm4UB6I+BAzY1wCC1xTriq/EducuHQbEjAVDGKaJ+RzSi6ao4Zpav4+xCl5qu4pAkRhqh1jEQW2zmpVj07wQ6YVe0XqUb+xUMdtOo45/O36w9MCQRUbTFMcy6LKM9UcThT3aGbMTalJjGSObHivNnBTeG9IEMW78QA/sbgUGRkUatba9uXUK5HQBBxzGqH5KghGxVqlVsx1e/WJUOclMuFTZdvWYgA1JtaxExRZV4g5uH8T04eZom2MkdggTZrGdoSLtOjaRuefTVNxmADMQteZVpNshBJAC+pTyRaRY3OR5DAEGoQ+EYr3DxN4PD1hS2jgKYI+5nl8uI4srINmWsovaXrVpIEDuy50Jle1ub1/Z7c6vsk10xAQyYunnnebmqnfY+QTXgGRY6TJjpEXLLZiZ4gRUj2GHf7yGUYqwJjOYvvIUVO++8nDldmgZxv7xKhi70eq6IptQbFs0RAIj1vm9TGUzUmwTdKNygXrk6ZbVG0smYq5Exskq3705B9WCoi3vhNYqBygGoD6Z3Hd/BnTcBwONA1fiYV1C1uvIE6n8720R0CaR2f73hqB/BRaG6DA6qQGsSOTwEYlZ9aW9dtdXun4V4LSyZJUC2+Pt3IJWcQDoAr/KC0izAxuPuy6cCm3N/DLU9MoMZNZIoowreqBwPgOkzbWm6iKFBRkgsYLue9ABTrO0ZY+ZfH7gQqSTGZqsIO7/CZxPX7ev/qq3feoT57MfdlhWBWgCq+G1inTbZGiLpLzjZjCb/x+y/m3Jsiy7DsTGHGvtfY4f9/C4ZGRkVlZmVqKQqCoULgRBCqRIGsWWqDbKpBb7TQ96kOldX6MvkFmbyWTqNumh2SY11WQ3mwSbgIEgCBJAXRNZWZlReY2M8HA/fs7ea42phznX8SgyjCxExsXj+N5rzcuYY4ypXmuNXB36LnaJkER5i22u7nfWQpYls3ps5/WBLkfl6wYrJFlsIAF399QGiVHdzWPsZ4ZaK+BisJzQXE3eQ+ViRlqpLCUXjkTl3vvoYkYIsFGBRlBzv8Os4lT3BhZar6jl6ptv60/+9U69TWfy8COJucKoBEfTmmhblCzMCmldVu9ioCWKraNIzHnQFUEL6wo4kCuxs5KLz9bCfityh4FAhdeRJE5t/S/9SFqKnxRA+avjuHH8rUhRXaGlD+72EMkG8k+o9fhUy3Iczn+4+z93/6g48n8AAvEmDeip9Y0aDnetpJ0MwR0+RD0Dks9ReIy7FD93yNQEeQ+zTyPiQiUErVrCUR5mCAXzHazlgfzGv6HQBTnR0XGyrzbBPEecOaMcvqanbzSaDaTHmBmKW/GgRqZl7wC3FD6AkZSkdkrOuovmJUh5qew4Je+7Fv6Xrkb+j79SVdwdx3ysWSbFd5x/BihFQO+Bekol0ZH/4K9jEGTiy4dAlgMOiQIaDo3zGbivTjLRUwOIAcLg7qNGkeXutBLlV0lrXCMCwB64i8fAQTGVSG1kOFMG+8EiXlGOroAcrMuX1hOp8qSIjxbQEGQXRzM0go75l6/PqQk5PQvEtxF47VCBxTHy4VuCBFV1GvuMJCfAoq7NbzFfvSP474MA4DLl1Yy0H865qZ3IlkGuWkqtBQmXmZH9NCG++xGpl2PUGj6GJ4ISsswFg3PnTiDX0HepRas3pGillld67FNpcrpW+dUMPvztnKWYI/YoBON/hJpXzmICK1HinwyKT8N2GF1MFWDECxu8DrKGBi/fWD6xcRkG/nyqLMLleFFvkhudJqCruVSKkSXGHoGYONR6nzczLKAnpjPpXc19F/mi2i6+uptYJ8ylGWjqq1cutGpwb1br83ffXb948eiDj8vuvBFRvTLrxjtaYIItr7xNt8DOW5QgURjFw55TkWKVZSJRrRPFgmyOMG+LV2ChVkvDxSgcRsILqWLEp2IMI+PBRim1xL+WIQkdJjrQZHAKBbEyCHS01ntraZDp2f+WUjKan/CPrAA0Ok8koycgN7KiyuDWtHT71W9psfuff1rmiVm7xYE2B07r4vyu1ucgs42OLzrjyGAZ1xNvtJMsLlhhTIFAHNZA8OJr9UFUqjBIJqfHbkaLw+kAPOaRd6dcSQsdQXbweE/TJQy8zGg+DOHgsVKOyAgIwAtTBT2ILoir5yO1cdyrfoLTPEv5pEgzS5/TDQz4nCy1njrZ2DfTXzmAisk/MJjbHMVo6K+j1cku0hO8G5VQLSXnxydkIOuPPOxxDEuOUhOHSJOyUWEFpzfu/OjxYn4JGxhllEejeM1gkMIhH2HC4mV5a+vQXNFDvDsAgzjJQcbQacJqUZKenokFIaAH5oQeCTV6n1OFDkSBHyczQVW4CllZYtuWjeJ7ZJjEdTpPpDLPaaxUhH5KZZ6ZLO7MVOugZQXqkbN7vxtz4JVPn7asHoO2GCmWkgGdBFBYDHZaQK210RNQiU8sqGGs5sjlFNlSD0HHqdk79bRyQEwrtyI3sxaUthPhlAkkOwKUIcbCpfy+g3rOkiMGjPY+KsjRe73yI0mGedEy6hoMLB2psYucnf4rgWgJgTRGXQ7v5oB1eWvZ8rYWYusMNjnXjVZ8YLHjEVAZizLv5mMxxPRCYDewljyIQ0o4WugeFLE4ktIYpNEGSj66CUMP1BrpsxGBZ2TP/Dj8JWQpqvV84omueAdQAdHdvBrqaJwLuLTWTq4IKZyHJbxsJliu9UNJ+UtHySFYoDqJ0hhqqbUSwUdgjOEJ9+12O5XplKsC/TOMUZ5FM+My75DzrNSidhCWlerwYsXF2inICrXcHub55q/8+u7nHz16/gyVAz2PPb3E4GIgi2un20B04pTZZt6ciu7gSjQTuiRvMadqObXoPb0mMr2552wkw7eHL3fcpsG0CBF40DwDDoJZcSmWL/h4MRrFZ1hARDUaUbewjk7ekDsTmryXk5XpAL6Yj/8/ahzjT9EA2URbVju72L/1Fj/42Xk/slSBwsgi2QflsCw49Om1Mtgw6b8IwCw+hmdQy048/nZ8d0kPgQaiHudZRXkZHN7MF+pYsFQqePXjOzv1Ma7Bo4tTaGbBDE987K7Cin/CgQJMpWabTsq9jcVDae3Zo5aKXBZHkJEFT/V+HIw7TGEUzNGNh1Z6DE1BWkziNdpbjPtp+WjGYx5T1PiiPUAqd7MgY/jdBwvD7fwV2iiTT3jUqzxLSxoTDV5ZKBXlB1SirOOjWliGWFbIiVY4kJacAnoyUcxgdJPU1SP/BSLcpaaQKqAPZ5b4p+QqYPCAnEm4oaHancY37rxGAKjG4jwBpPG/U9SyPvxWhp9gRlLWYiU2EqzHBa3HbBXmd7Dk+N8TSMiUG5MsU6n99IpPRRgAoRaSVO/uMmNhxQCYENPG0/5UOOA9lTTIN2VovZ+kj/EBEiIDSpTFU8DmicNLgnlXc3Xkkr7Aee+Agqxb8qcAQLfiZgbRw2YsC0dzpp1blhVRI3d1ue60BlE9hk4np3Y2nncWMz5wJgt31KgOPdnxA5QCBLoFO9cm2BRALECW2D8a1TKj3nfEsuUogaOMNtJY7FV9fPaEJoe8h9TYYw2qinuk4VGVmHkm5ixYFHr16JGVZnBqgjtho5RFpKtaR10f6EeWo3SguQBsNxtz99xPbJzH8ObV6t+HGihoFHfVed4zi82+hSxReMNxCgfZjKbzl3s2NfTocTVqF5Lp6+SRedwQzpqIdq01waIhgKBSyvXV9fF4rLVG1xiaTN599lG4mivlfqqkixSp3qxDDusNZFODTZ37h0+W978xP/1ky1oRTHtvnqtGoShQQuhgxU5lUGaT9XDU2rt7c3d3OpqL5ER2aTFU2BYFtDLVUxUeQuq8FDnKNam1XEGKMSDJOz16pOAohH1Q4g5A+NqgEyrstMXj/qmRLdzL/cS7wKDk9Cal167B7IQkjHnowMZOw9OCY/XqjlareX/5/lvt4493z154TXP8gJKJsF/27JmQagSckBmLoSlKrcEsIlNPF8dv/EpOy5e2yn1tDQOIq7VGUYgg0ovootM6pgaAhaWa1eGvmoPwE7h9OucDxoo7FYBztEtJPJR6CxIWT0NQJWmRa1vjHGJgCYmiRykxAvZg4TqGP1HC9cwxV/CCGRWHPNr9GLvK5YoR0OgYT58/uc+e/xE8bSKQALgszM7ymeYkqLcecyWc+L3jSxrZiQZvtFWtw1e1QCsMp/0KlhwiS3PA3nsfHyOGeRkks3PNVbRRKAIxNMmqEqNM70LcLVpxKcBgy43Lv9SMIlhPo4SwOyQaAOLYwwhaED9BoJghiUCn7zZasm6Q+Zowrjm8TqXWKXpGTzVdzlfcskmqyDFuj9/1tALt5lFzxAWzruPxaHlobXAaWIwZVP2Oq+N3+T1jv5KZ7KBVM7hY2YAmmZVwnWuutfXTvTAlH7KEGkwwiIMCctIow+/QofjvUAcEoYGenNnojpjZEtk3+gCUDdE0DSe2WJTQPFui+OJ3o8bECHygp/GbDhvtaBQwMd5QlMsr0Ib6E6nsROimgxmUI+HcyJoVdOwxgImM/brRzdQSxCZVCEQlCgsFmSd704yK2ORexUks0XBn98/42nI1l1iE0i0XuvcQwMFbaPVKjWbLXOHDV40VheDtcWk0N1pHZZ4IBTLqCoU6S8BjsccG4TJRjBi/sS3VhSY0OcFCNnSDQ50ArZLcbjcFRiuOEpuiCN/OU0krN2ve4aAVEVILdKc4N5wVnqWVAOghlC8ArLLUqiBIyPPQexbVEQfiDlYEIa+H1QLQETSMMABE68BWk0PXN8+Xb/1ancvZzz/Fbra1FeuzhdmhYDnDNcDg6j0G72Q9KY5QrNaS+cZBqzLrFgtBKbKbhRI4R5y04CvBnDRWJBvVIWBi+Ld50ZhAF4NZKaVWOjoJd6xNvYezXcsI1wR1uhcXvRegqhcpV9n2sNwyhWkHLAcCwwsuGPItbcwYSAcgs2KsAhqm2NJWnb7fl9fevX790b1PPgKsaakGqAQ2SxQqafrI9tFG1vBgmbMQLhqGGmbQjWO8Cjk6wu8dp/UWojnQ27oGs9m6o3tHbAFToYzKEefQxMeLi7GPw0uGSwe8ACG2CShb7k52ububVFwNArS4KD8v07bWCpdls1nAWsjK5pIJpBu6urlqYOkpkwmlrw3YBmbWtKYnLSEGeWdsihzwGMDELGGnxjm0SQKNkRSdMemLIJWLSTxGJDmgyaaUA7HN8oqgYM09cSv5hDpxgrw6K4o7rLCW+INA1OMJ7Fo0/+pewjLFPSEJjZFkVw5K8vOxQ52IuiFqi+qFzmpUbBaBGMOnICRmjjIoV3naK51voHlJ+48JRRBLBcKqsRAmtOMqu+vgYWZWHAWq1YG09I9wySasCtFFwFj5yAtQ3UC0OBVjXujqMLDSXZQIlVAns/SJyu2FicwB7uiLtVEd5Nw36hqzohwWeIDZLWApmMGK0RwzOLNGSSBvmWuTWkCHm5NBpGNFOXFlQJjJiXRGEcJquAQi1Kloim3sxGGAVGADetBzLRQyaWNzUlpIidh1ScHdgmWHonZ2NkstyNQ9ZiPmQoc7eu9MlZk7ZHQWBypeoWcgkZZ8R6diMfHxU0k/GnDLeXhIavxsLlpE4givqISrepMCeVXLpdAyC0qNBQDho3pAOMtHMx1CuxqAj+Rk9ewPaTT3nlWHJ8k//lhzR2wO8qjjknrW4BKiJ3T1qU69Bzmj1c3G3a15Mv7h8bdqPi01WUgu89YjRz8l34raqtbl6oWFjHVj3dLIJiuxnZcFavDY/CVDg+CoaURDKW3bLOe+QiqFR6cbIcpAUt3NrPeWAq3/CFuLYaRBQoEA2tLXqVrZ7r66uvnm4yfnH/5sffLgZpoLcCy9wM2LvHoW+4HwmlzdfbyohEsxdD5Jo5fIgvA3yNou6rN0aBrQlvkg8Q9HGLTenKR5qSU8nKPH9xAk1Jy3RRCyvMgkKY9yI3wJ0GMMn4RdFNbelSJjd1gOxySRNU5w4qNRent4Lyi09IUALV6iQ6ujFNc33/Uf/OjsV6/7vG2I7dRGq4LPxiZZMUqAqcX6LA/ERPIaW8CBAOdpBViC2TvVWdLaWjSUaq1W0qp6kwOMQ1yFPg6CmSMbpqiM78BkeQCGSYO9GxhmxgKQA1mvLKH6gAGu7jJDL5wEmS9q1tEMZgjHkFNLShAdvTenGa2kpcZdvJjq1NUHuORAGAYF/p0XuPdO2tJaLfTeY96czN0TBJKNi3JSHOBTfFdkZCPBa63BcIluJii6+V29EtYi0DBH0IZsu4YmCx5AFoAaVmsKXwGfphpenh1uhU2dhkBGi7G15l0Y+GLabwmAQuuYuux4/uaCh/lJGyvuw7sRJ9Myd5i5Yej6elQCgYj6iVhC6xgrPQYKCIOhBDOGNJfB0Hqa3gTRJtcwxbugj9MTkSpHkfJQ2L5ikZWXTnJh+BRE03N39MLJ09B7C7soJkMqKWVmhkQNzMGKkg03ks0aX66HO45bM4eFVRmbd8aqXIMD3U4oL0yp3HMfpYIppMSBP1XGBtheaK47Hvmo9HJB+KAdjxl+4jhuA9My2FRLhBG4jBm0pfxOb25uaTWaGJJ29+ZdNrzBUz0/xpc+SsSgEpjTnESFFaVCl2CF01ArK1HdC1BN1bwKFVYJok7LAsF6pbA612aUT3TSC3olLwp30gRN4AxUWjVMQHVU+QTMjhle4DRM8GooZLX4MEFeTHog4bVY0ja6NAayZiWQ8iEkTs52HGiptbYuBlvX3rsIkrUtq/dx2k80BM+hDtLZihj8REVPlfLLfMbhFYMYqbLG52y9hSjQodVCsGYqIT5LXK6ZRGZZe/cNp0Ynv34KZLwOUngseQ0SL0sCJyeU5RXskRYueDArEWZV5rNfoPD2ePHsc5sp5xkcQTIpciQN3YZlRCmFg+qlQfkBLMSaHd3hTQ0GFgoo29l5+gwcIE7iYAPM9lMqyRA7IB4EAwVmidj7HTs6xxvh3+Qh6Mw7YineS64EzcO7G6fNV6SVlNjnBzKDlaRX9oCEVlenq0CuRghu6huRh+P+nXf2aJtPPprqFp5ynA7LgjabEofLwMpQnwORfcfoBwCthnJPEmDLsrSWsTh6tiEMpEDJWnSsY1Al04nEy9gbbScidJyPYWgUegfFxYm6KPyh3MBSTo1rPG9mVwfIEDBrz1yHmB+3mIbB3dDHYCV4R6GfJgYnKztZOwHycRXi7cgVRVydavIYgj0ZzU2ODTA0r2M8kNsdShJkYgn1nX7cwhQlml9ZLmyuEfHiyaekJNLCIHtIg/0NuS9NAKTO4XPXlbC67p33TAABAABJREFUyeBeS01oEeiu0+YPwYPy6WPynqzBcE2JKtAgV1OzofgirPWWfv/xOiwATyIEbg6Mlbd3VKZYCeqnaU609YHIjsFX5nzYCQ43i+Y4Qdk4uVkfj3l03FRIQmqhHDmXs2iMQqx1x9cc/0cRsnJkUUpcNw/ClGcVZchGwcNJ21hBc7XwPQilDBg01UjYjuEUGA/BB8I+8laqfU4fJ54M4BzbMFEkVwDTBoWhr5AatMF9Hz9i9O+BM5shNo9DssE/OlGd40mZxbwVPSStQcW6e5ocY22esHCTirxG5s7/Nz5/T3y8RDMYLoFxdOXFjGBBzn0pD6/i6mIzimwLajuzPodtPW27Sp2Uqyl0ReWinsMLvDoKbHbMjiJM7hNQ3OleQigcjC6O0j8GlINiagCz0z8R/puC/RRh22NpeySO+O7NlKGhnoipMIuVMTHuiH/AB6c5n2psbKfHtY+DGyVNGIMZYxqVlKIEPj228bgbi4NSeIoVY4Wxqcg5pCkI71WzKJ4BOMONpYTZcosQD0vhrAOW5o7IMfjgoeUrB+FNUsDUBpB9WfjoweH1Jxcf/eLBeiB97UDdVIi9UT5FMpFXo8GtB+IYBUHWyNGy1MJqNXX9YMhyU09wRzKND5JR8jTE9qja3b1L3Zd1OTEdFCQ7JET1SoV4+r6cRpPBVVEKCA8z7OguKPUYGQcmllrHeJtSC8DiTioYSlNsSi1AgaHDXVWwFrbJ1NKX893y7ltnH300tVsvEwOKeWU04CGId5h5GHqfiBvrsm7PthfnF8FNE6RuloVaPCCv1eAtavKmaAuY9g2JJHOwQ06olaXkFH6X9oAYcfldwI5jHMuFLGwzuhpcva+9t6jwJiuz8p7H8GVqQMuy1+zuWMGy9YnE6e4nIRaSGgNgyGlgcoTP0e5sG/ErAnGYKRrJ0xrTLCF99K9KiYdlTYZIyX5HBmm9pbjJ4YaejYePXX1YDY3WzJuhGVZDg69IOftoelQCfRpnJphULJCae89X0FMeIqBJS2uO0fsCiK2pp7ibZlAKdn+8lHgyuUIt4MtEfm1wl+9Ao1BcvUoU9Oy/8idJsn/lPXvKxog0blQpJf4hndoxZN7yhJbjK4s46TgMJ56nIcuAnpZlxN0kWin6jAYl358Ze2sxc+Ev8Z/k8JIq0/A/sQa5OYPEEHlTHkh79eiR3Rwl06xbGgeNWsqh/A+i0JnkJyW8pWjeBC8swQ8BgNMGKwS0Zga6ldMSkYgJsTO11FJC1RZ75+C11M08YwCsyaoIPNhVxoEQFGo2gkFoTRlpSZKd0mYuaeuO3ETtAqux2EyrtGKsxuqsQ5RcDUH7qDFaktOpqSwAd8XmeRW1LTSrosyK5QCuGDbEtD/cwCZ4Ma9ABWe36h7dMGETQLMJPrtK0jjc5Bbcrt5kRrU7iUshC2ses2E9lnl0/DzbyXhD7lIDTV2J3SSAAjc0806Yo0a2HU1U3lQAoZUkfUCyGI8mNrLUwlQzWwEKvJSoypmV7JF+oHfCaT7wF8jNne4cZlIZlizrB6TM3lO3VIKI4sPMD4OmMTDrOIpk9FuxzM6gdTkevvv+8cXV5U8+ZLVSWc0hdhrNWmuBMEsqxlqqayzmiQuYpwWAGyko7GFb6wTXtY38EPMgAMx8G92vBt6V8JQDKbNMjMs95rxu6G5Ip0tYmmAE4jDKykF48YQKMv0AaR4UJyRyuY2QYoMb1YFm3oEmrLBuENCBCpZIvSiLOWhs6/6db+Hmevf5J17nLgUMkEZflgSLmHY5IHhXbsYslfub/fX1dWFp6vGRs7EtJbCspCONSGqWjI9gyLfgtNiJ9RqleJO9mo4By+XSKByNX+rsA7jkmAtI/U6tkk0QAHTCjd19NQDmpAC31CgasjoueezdXadO1OMo+shq8bUjU+a06fRjEMYC2hpdmUtSU5jGIQU08bgGipx+H0F5FYDworBsEhwpRdIo/uJXGX/CQckjYLkh7OaSnoqAX+Sa5tmD0GmcpikZcoRMy7ootqRLk7Hgzi1krvX0arp7k4cgZFVT77WUWqimWkstxWMXuFRLiRtC74iFuaBBQYUWkv0fN86DcSM0YdgVyxgSTPco9zNuhJUOE883t9QXJcJnDipxj0CnMhFHfo4qdLRyMd62kSGgJC/GENoAgpbk3m6mk+VT1GdMetM4JEpAIOeaQY6Q5A2g4ozFOzUUgf10xH2ElLtPW4eTYkEG6yDhFGNlhTsdc62baSogZGSxk/zBOch5jsj7iLxWgbxBykrQBe9S6IxbckuMLD0MDWmpix5dSny/NioeAnQv8HgF0WPX6FTCKMVPAKNZa8oiAGVshqUDYZQImvKxRtRzQ5GqqX54o93Z/fvz/NGzr7739rlaud7va5l9haE2WDPWcjGKxrv6K24yfMq7DJh3VzMr5j0AIKATReylUJZlHGFdnQVghSkD3OiX481xgLrLusTgMk6Zx4wkYgQZTvcNoEQwaxsaTvy2DF4K+RNhgSNFcw0MwCXxH+dQEA5TuySCUorGnj3+XUTWSvTPvYY3hSc8Fv2QacwnLNWiclWWLD1HqMPoGGGCEZKxNrRtnVeHW6td1+T867/64E9/8OitN764fDgdW7OpU0GjKyxG9tazv20Jg8fljqcSLaSojIOBqeaTjCdyasZdHuM2DHcMOmBSqIXINBHDGPcF5uUJvseBHtuZzFyxXt4w7G9ZC4Ae0ddidCpIpZRsTHND+11KiCBVZBabClGaVphE0GqYk8hAQRSs8HDbHr12vLjcffJ0fvu9hQiWR+QNs2AhJJjFkogus3uDI6SUoyaoamszKz5Qi9a81hLkBotP6d2TVSfmTTHFEH1QvU5kzxE7/QRnMbfIYWwFcuSoLjFsASVG3VYCJEixObIVCgFyZQRNuGXjMPKhjbsV/zQllUjYLrIiix7L6TvK4fbI8QeQHUNCIXls7no9hbx2QJw+dEoeoLK8D7whhn8hObbc95dSxlDT2i/1iFGt0ZkcxdQYj4vttKJkPTptdKhIsVP8hWA1h0L1jiyTPWu8ltir0eUKTYFa8FEJj/DtSDdvi1PEwLZOypyMKQTaqKDs7kbF0kjPTzP6/4ycUdRGjOlqduoccHoj8UfjMuYDHzS+0+/lDYhHaAMSVFRPngXfkHxHiRXT6gDY4zHE+hAPBkkBXRC6mTX0imKgS909JHqEWn6CAVkxx3hhzjG4+wNxiVl3hmnQzL2HG05Xr3UCumRqWpYFjEoxuvqo5CPsjMY3s+FprYADNTkuaJEGQiDemxui6kHsGpN64fD/ufO4OZU01ix5S5ZFC8y9evR+OSQfOxU8hawcGTq+V4uRj8hcAcbYdyI0+nHtux+9nJ+h2vP1V588+vZ7lxvTF1e3V+0+Uc/n44ZAr+pVbkSxSGEhsYCMQTpshHWtgBzdUQskXw3uaC7Igv9sUKwBeNUK0GGgmcyLU8PXOE51utmaFVqptZC3xwMHHjKUA2aOMoqsgC+Rt9Hy4UVoM4MPSAwAvKs7UFE9p0dCDB4Jd/VK9TzvhayCoOa55OVu0KVI96bghKUt5QBOpLCy7a2bWeutsngsonnlJOUrNDNjkwi6ukzrsnZOgGop2+Ph+t33+fOnlz/96fPf+73lYHVasUAlWC3uXUaTtD+sJEt4BbgioJwcLRhiFfUAoSGlBp6wnF9gKGI9M0NWbXl8Y9E01FlZSvHWo1c+EXGipSVozijXOxCfzchCE3K0XMjgOtgw/fB0oPHY+RS2OBEjC82llUa4FxDm8mLRb0UUUOnNAaM1eJFvbLp99/3thz/ePf/iePkaW5tgi7NbSzMxG2tqMjhZlyOyUXzdZKeBrlpq62KhuQtWS1CTIhpnfurqIUmqkMPEUAAiYIEMPyN4IHaaumeNNHLoXVxOmJM5XowkwJJaaka9lpv7EBZpco7QbkkAzndShlNUhFofVSiNUv4xG4simJGvdM/ZfCmlt15LGRauYwpzRwvKY5P4bDCa3KdSe4hIkVdTDrgInvwFQsvgwUMKfeIQwmVIVExQE4YxFpq5YmENeusW5FND6y0edCygqLWuXXKZsXsv8VV8VMo4BQlIndmzRoT1GE1K6q6SBetYhHBHGcunMVoEV4jiB7Q15rFJZ0RiOaNyQotRRVSzuPM+yjHBL0ey+LiUB21KUDGau7JzyW4ibFyB5HDwVAeYRwwAzLx3S2V/D2pSYZnnqS3LgKhz2tDkhNUSRHo5UMw2pcq9BYErQQUAQUdwomaEjLDDqpQ42wkRCTp3cOLhqvMkeRMAI6sNLRnjkIaPt1U/VS6eXyjIavnvYDiUDy8OMd9LoPMulVK658ruVObYaIXiPhIOL+KJkRc/c3hV/mNkTrrjb4cenZGDI3bCCdQgEsVg3mEF5t6EejM9+dSneT6+xX5Y/ME8f3lV/vCza+L+/ma52re6vXj3wfzdx3XmcsudKQqHmF5puPqrsJVKtEW+yBuwoMB7pS2G4mhR06o3WonihxZsSwMEGSELPMTC+NzCaWXtzTtKcs3Um8rJRVsWVJzYbVEBxsajDFGR2c1PFr7RjkolAu442KejHFS3QIPI0tdGkzGldgJadCUNxhrjQ4xJPMla6u3hINJibusWFW8PYh9QKtV7PXkqRYWBqMkiHrsneaw4vcob5KwACmsnSu8w3vzO7+7+9E9e+8sPvnrv2zocrFYzb61VFiPVZVGzY8wYs60GT88hOPlj4wcLmvd0VB+dbizwTmw5z7rDrBT6aSErXK2tcAwjheAORXdg6b0VGsrsDTgq8dDrB1gUiuPeesyfJIXdR2+dpBVE2USzUtjUikzeomO30W/BRaux3Aq0CVZ6b7W2dji+9+7mB392+cEn17/7WMAtZdIEhIOtBZWaDPMQdcHM3VKk7hELrPfuVjy6GHkPhqc3K4bYRwskRRl0gZWtrwYSBZ41uDPcixN6zLeREIIHJgtGoYMTITkqA7j1CKjyeZra2uC5Kps52XUAxczMOnq4BUAZfErQW3KDOKR0kQsykbsGxQzTPC3Lks+zaWDdbixyL6WcRgk5SHNIni4qo7XycbBjYNG9AynELIwylV3KDji8Azg6aEjR7AKepOGY6ytiadAtJHU3gxuLneaftNabkTFLNIIsXT3hmRjQkN6jj0BXNuVRUrs7zRLkiVYQgHthabmfkD2brzAzHwbTScIyc6d3WXoqZ5AJjGfU+FFq5/AVTlBpCmagCSgs0TRGPiswT4JaHIYsTOpU16UJThR5S2mAEbAU6wEG61KpZbAOQjBiYYcTZXpTo5VSKxzr2oIkUQvllnSBBOfQkGYRhdbkh3W1mNR63mIZ4GGqUlDMWzeGyRNrmVrvmXpZUoMOIJluBnRrvTeUQgddOZwLCQSGitLu6sdXaAXo8JptaqInwZlNHaOEBllgAumRHkOqxhK8EJOnaX98DRM6Q7KdYSB6+4qcS8bHIDyMCWNnWY1D415oNU06rS7E5GSRNazErb356e38iy9e+u3LptL89o1LfvVs+bOnV9xePpjK5aNHr7+1/fpZ++Dz9tlh91feuni03e9lrOEs0DZO9YqODqxyyY3VZHRzs947jYLB1/AGNw+lbFg3NplHKavQ5HmVy4rTraK4+rytt8djteoytyZ4H/ovRKhDBEjRES2UA2ZsHt2uADdyqiVMM8zgaAP0PwH9mWByIjtKzNW7KiOJRg0uRFcKkCUGZgyGQWxRaeu6WK0WSCM4aHoOWLXQX5g8uNAOd7OShobeI4lXFqmjd5AurQ6iehLF4E1rZV2v8PDhfntx/4c/Or725rOz7W5ZG0tjdXS4G00+wY6wUtNqFZvNfFyXiEi1mjoMpp48XjNWY+zpjIAhV1oHNofBaRBI856rSEJggPg2BWarPdCx7vHXW5i/hF2ilBUSiik4adFfK0IjLYoht/hfmCoTdwTcfHX1RdVitWLu4pDyw7rlmok4Jt3NjQZv62rnl4e33z7/xcfbq/dvdjObVSav0mC9hYeURqnuF+fnt7eH1lrQcJhjtQBz3YzeRSvzVFtvGUGyHSTMzD06dRsdTbYDlQphaNiUxtPKERgVoqVYXegWYt9oMAUICrcwAYSta/MwawvC2gjNHl0Zzax406ayzPX29oBS2phHjAQQDmCg0YXw1KdAcj0sMXte1w7aRHpTgupRE8STTxgsYLgBh1oxi9zpnq6c6YhoZFRqAcGiRHfTY5Sm6LERXSMNuas06AvhhhLgrcbIgzDnqeFM48zCCvraOoIELkcMvoSpFg+g0uVWpAazs3mj1iJn95BmWdgtmpsJnaDMltbCi0WuSvbezWwmJRgtTkGhh71aN5bUCiXyYCwhm4kCYpTukDcrtcndUcdxaYCIQjur88u20MNxg2ZxfgJLojmXpTkctO5LZfHuwWCfptrRpSiGfJ5ntWaJ75gnncl6a5gYxIhaixm6mlG99wJrvaWNjINpSmL0LjcYc3gW/mvZAQNk74oXSpd1gtXGgpulr2R1dfMCQezBUK4QfFI5bHXmU4OziShrIbxXQzeUo2mDeeZ0XFafIMyQ3I4FxcCgTyI93c1dRFGiQYhNBojd2WJDh5kTpqi3wm7A583m9nAI1FBSiX3AQ2cND0i0uFCTezUWRiUtK8Kh0a26FfciK3QSxpV169Zmb6vq+TM9+PjZ+sXN14CXzbnRuPTHF/UX16uVbV25yJ/9Yn92f/urb799dvbVJ4v/0Uv93pOL1+bDzbpfeWYsqwNNFGqz3g2LYZldMAS+uCCDf/BKJV/GdfViJWZMQRiV2gDfw/2rG9FaMzPLPccnkDauaJRuqkCagCCbPE8Gp2qlO3rvrUWlzuguaeYxrxE6dbIcjxB0qhmjwo3HGyhgCC9GyUpl1rTAsqLpPdEphQ4VEOodtCYEow+5Ot5pRa6SJUB2cLEgKIjACXzEbwy70Bo18XF98Zu/dvFPPr33lx9c/fZfPS6Lmdg6po3Y2AzWSULeEDNXb62FSBcurR1Ea63UE60x6o+sRgmMRbAxWnGKgnfALHxVXCxyVI5qM0AHy5ofhTY2wmLQTWupcg/BSbymAekoKA6GaMtOcgGvIoyy5OdEY94MSnjfJUUdEzRKWQ8oIsjEYzQDLLe3776zfPiXF19+evur35lwPDSfK9gZ1uiMgwQ0CcDV1ctsUFy0cIofEmSz4Zij1ls0lIIPg+E4bD6cXk/gG/NkWJLLxvKkQAjCNq+UFIAN6l5CwckrG3U9MECJ6OEsNn+kPtjC4GOuU2dzmNqgbTIWwXJ8gfiZ0QiKY3jfII/aJjS18NbbVGuTEv0yjzsbE+6xHA1uSAa7nSZomXzN4BmGo5OPKaXTT7YbA2sYU4+wCjoNZwN/jxs5ZNBJcQg1QsCqw+tCKaQxnCifcatj9VeAtNElL63VYG2ShPUwJLdTKIkBRCrQetTjClgmB8kun+c5Z0yhb5GH5CKq1/iuDQgPJ3cEilBY4Fhbq7XGgY3vg4lJWOuawVDSAGDEqIDkTsQFMgVy7qT1wD3boKmG6UmLUmb04EO0XWqQhNyBde2Dm2A0imaxFXvYNcQko8ELOZ4JlEBRBJCaNyG70upCqoKiwrLJBXLqshno3onSa4PXGav6enu4vdlv759v5wlq07LWmV0+N9rUBeL6sNRpUidM3SrMSiwy5wZw804WGntvgFwCa+Ll5oKpd4Q5TBKrQ16f05PjcozDQ+NUKPXKqgEY2kgVsBCdupHVpWKT5LQKD2+fCsyGAhRiDg4dWNRmtus6v/Wz6+1ffHklWK2P6gxb1tv9YVsvNyjP90fvXGnres5z3Zp+8MXHb7338OGyfq3DH+3b//xR3RpRjUY1oZtLWr30qqP8FjhWLBWQqwKLDf6BmYZpcYOhAEjdfLQHCtYnQTNJnUZpaCWjyD2xJu5+WLP/6BfH2ovTdDVy5th/lgYFKObm1Rl8iqF48uSHIAnSyn47G8OEIwe6jBNzJlEPDEw7YblwbqFZQw7F6JZ+pDbAqDEgGpyWvKwMN6lg1LhDaUagwrYuvLj/5fvvvfmDHzx6650vn7xWrq6xnZpYxQbQOmQ9XI0s7QYKS3BTDFBXISM4BTA1TTWoLDEYG+0EYCaJVHQ4xjAGTkcOgA65bDR/iVYN8DaDoku9t1duZkxcUIzmahkbskHz6JhDduOe/3RAfGlpMFidARhWqCM0+QHzNl+SIqAe9V9rx/7ocXvy+OyDH89vv3dTrajB0b1V1nVdYCylrq25j49JIh2ycz/8uMU0MyP9FUnUEIJEAE2uRgwgEBMQxUsIctigqJ3ISwmYeGDZuDtJ+SOO4p15x+mwA4VV/kqOMdBq7721xR0mimFzgWInaeO4Haf/cKze5zr11g2oha23ELKG9YfFLvEQ2bt5Vxos4LQIaWgOEBh6WFTEOBWl0jsincddHksP4qDc/c28Pkjng/i+7z4pNML5XXl695SyHFSgCwaDY57rsiwhP19aI2ya5h5sAVoHpNZIuas1jTp+wJYcnKwsTJGAQQLMcSd7oL3yJoX/JCmgpI4W3eFEydIjUhIskOE4n5KSNHXyInGH+VhtMSq1RIJtnC8vZI+5iZAzLbjDVogGOgtM5s1VBrwXQSaQkhCuG6wEJ9TdwkA3tMmSeiPTY0xmbqDKOISubOQ5TgB7PPjRM8dljW/WQcMUVGpHF+YyqWDX2vXViz2mzeeHJzT84jDbYfu93fNH8/6iujSJFVadXLFwM3c1w2zO4sdq22aeCmBAwavJtrwazdWjQm69mWF7drEsB6DCh196sGiBecy/PdWPlDC+g4IR/uVutAoLWDB8LCvgwMbdZBUo5jNtxhAIOWoAr9vNN354fPgXz17M9eFKtvW4W7mrbWV568m9l37QcYHtTFvV2c8X1Obz9PHh6zff3rV9v5n1E86/9XhX1KxKnbVTDb6qrQ0LsYXfEitwoB8qUQyMUglYgHqyppIaAFppfQVA1tZaEERjDndXKhu62ol0PiIWxh+I8HFH58Johl29h3YlBzwex7Q62tiGGyTGLG7yr+cpHHnVE/g3nDgQNLwiLB9+C4ZE5mKQZjGU88KgNRps+B7EDFs9SsuALhxh/Zb5O1igARURFs2ZBYnapsrWDkv77vdufv7RvX//b/Z/73+5lDnUL2wgvbit0bUPBT6GPD63tQ2BUvxOqTX8TwqsJbU92ZcchZMBafIX9mdmDFKrDQgHGiqGqB6ioIHBs55IWooH24lZGGTSDZq00dy9nPC6eA2hGI0KlMECjNccAS3CQ8b0zPBKFofLWxMB3+L52+9u/vhfnn326f6b7xY/HHop4LIsU60OX9cVjNQICRF6gijnIytEtmtSPEEz1lJq5XFZ8UryGKPRPKhB5k8SswOGudS1te4jGCKyq534QKdRCCPh+Z2GzKKviYIAopPGrgYP5+ogdtraOkOTZRaEm+bhLsi4Cz4aaslpKLWsba2F6motKEQhTTWRS2u5Kmtkv2DJamiiQnEYzyjKbSi5pcjohSF9hiX44ULYn4amNK4pzGC5wCmyXraZ+VhSMwoE+pTFtgNZp8llCcm7C+od4aoROD+89R5/mmMxNJLM53CEqVX6f79CnFBTMO0s6JlBrHSEOKi1Nu7TGAp4G3BdQVKFTgyPmCnkeY661TwOeWz3VYznOnLwXczieMgSROT45km21lhrVww+It0FBOYNQLLIkoOcUAAS6EtoMKDbfAfp5OdBJA50I2Y0FpI6DYqCG6agUCBminID6TXCu51GtvmPFjNrvdc6OevN1fVXz37y9Oc/myGevfF0/w6fPLi4nAn82xeXZ7ad69l7l/3JfNUOQi3EOZoVbMHFjcB5G9yyCBNmNtavxaeWcQ6CNllBLcvRE1sMNnHAEsaAvnjilgOOOVCrmLQFZdqjDEZV3jqStYm0Kq9AJQjMskm2ldM0G8yqYeMo7QfH8qPn1/MlxT7JsNbrXg8ibK5L+cWzK/gGZaMZ3DbOtc2wCzn5eWuvv31vbfuvJqyPZuooOrmxo7CiLLUdG1agGibHMSWgXM7Q3XLzW4Ml0BO0YNfavMOdLGSFqUs1NGTDb8gDGk1hSN6wcayj/DcXNdBaWSbp4nmIYswfOENGg6nMslXdw9jPwJ4HbtAu46cBrwknJ9BRiEc6GFHkFD0R1D4i7X6yGEsx3oDoToHhJHHOWRAspH6J30mijVbl1CnJCe822dRbLxe3v/Gb87/6lw8++Mln739H17eFrRUyqzubnW2oLXI8xzLgFPqQxDgQ5veJLCFRiFEqycxOvSSA4giZhxeaW52qmS3L6iFPikZIDniJL95WwMASyV4Rbu/w3hNU+0pxdUpkPjSfQgEZ+8tDsxLJxHIn2qig/BW7n6xsLMwaD4f+5rt992fbn3+4+davCKrcSq3Ewu6c08bn91P7Q0PrnqBFzjL95J05iFdACgnB0c354JAjJDKGYiWqMUm9NQwWvSwgDg+630mqhMzjltS8ccizWjyRz8wAL3Vy9dYCTWDvKjVmY/FCU3h1ekqFVZkqzNExZuCty2i1FrU0nFrkrDUSfg6YgUou6lFVWR4hD2i0dM8awRiEJuR6GMtv2UyGHtkPIQEMmOeV/t6j3MtrGQciOfhjUBxXKzU+QyznyVvOJxoejFE2Rzo2D9YKMCzXLchx49CMri52wQY+G9i7LLBNY6DpGEscWJO8Rhse3GFXknK2IDfnYjGD9d5LLXDEDGhp63azQevNe++5mLlY6d6VBrun1mPcEjeUGGW73MnYKaLcMnBqcj0WOolmsTw01UB5ueKxp59r3jpP/kzPxp3Bu7ewNAmqD5m4rxW5txZNBSUniOj1YV1Wai0tXxRYejeH17KB9OzTZ8fleW/13ffen1/7tQ++5uXhuLx8eXi5nx/eO7vctGWDpfzgZd2L7+32y+Kx0I68lGZphRUrFrQCeQcC919cACfXajbF0EXZEkiOOoQzbidqOsxRhSABKY4EgMImTYR7H1eNLLGvPdSMRoCGAtsAxTEBBpvpW/hsRtsaqvkEsNw7m+vXzS+4nBU6m9bpsDjKgrnu1TgtbUcSW2IrbRu2hoveqrjZtHN83Q/zm/NhuX2x88cXZW1eCRzcF+hg01r8Fqrme0eFBQMDwH6LGPOlIWgAwFGnFKCDRWrL0hXVthxmyUGH885R0gdMNdrEcYbG2NGBnDoG5prs8LEfNKx44ULrjQkGCU63Mfu1hLBsOK4IZFWXqyWubfQmd4Hh4pSbMTwhVRfQm9JBIria2Xm7u4c5S1tXRgMYjaQ7zDpOALdlgXsXi5Iu6w4avTTIrBTsr27efnfzzqf3/uIvzt948vX5Bfd7n+cu0Es1V3fOtbdkZrnC3sPglMkK3fNbUe9hbH5ngpHZ1taoYE023CXdLIyOGPVRlw35acAE0UW4o9SoTmvc7R69QLyiV7Itx/B+OKeEniQitgx0i41yGJhHFCsG8+iVAunohuBme3L1Q4vtZuaC+truXVx/453HP/5J/fKz/aN75aaRPm+2t4dbg5U6rW2tqJF5AOSI9xWVVDbIdprBph0jXukmkXWb7jJKJLpcpgUAvXcfjQjTjjHMKLzWEBnHvG7wZE/QfQI/afYCWPfOUjIxMr8Ia7HezbwAlnJnRMfYkI43GaYtaxfWqsNtDTBgaZVjRFtrSrHlm+1ksONykBrDrjzTQiqekDPHeKvx8QPVDOophrQ+3KfcDB1uv3Qc7k5FcMpeKWsSJgHvtK1DWzNkan76xpCD6VyHDADBTY4+OD5bXKukZZxAhvyuDOmWgzFWyKw2fu427j/uasGMM5YbK6Qx2Q29aJShnplSwUoZbKlTkwxZMEVQnQ25XIE5tkd39NZpNk+b43IAbFmWUkrUCXKPwiEGvAIUg6SohNwtiHJGo63LitPSz7sfioGFvMO9GB1eSuk97BMyW0eXQysOGKu7hRey3EIrb9oUmItRP4fHZZ13V8+vXlwfHj/6rnSr7fynH52tm81ZWeujh02HdkC/7mcXm1lnB7cf7S+49Lfuv1SvjnlVK5wnr603VdEL4AYJCyDX4oPEBjR4R2q7F8Dm+QzrCnSNsxP6FodQS1iZ4uQ40EWP/iuORUB/kKMSFWCxWYJxIye5Qa+OAmzN57KlbxxbagYms3rcb6YvO32aWRZgQdu2ZUu2vl/OL+c6l+X5S1+23E7awXYb7VZubXc5Hc66znSst21GO5+ena9PnsBacbg12BE4Om4dt7Br+Ay7BSaLCgENWOZ4owYHGiBz1mmzttW9uHeixu92eVbJSegBo0gd/q5Brxq4qcGxnopnBG0z0JHQJiM7ztS/pMdgU/gglSzqDdXYFYCaEssegTJvt3GEghFpNXreu6uKQOyEZpFSIIiBspKQGBCAIhCNARIwhqeR9gAEIMcwbfeonSM7dvhG1smqkGTr5a9/Z/rF0wc//NHhr/7OUqZZfgQLQjYf2mgbZFcgRnOGMNYvtTY1cppmrsvR5WCBh5w3UWfSBJ10EIh9ODRzqMMMrQkmGivDgS7EZYSrDaIvYjgsKOLl+OqM+GCJZ8YfOxmkJHAVfbAgQ0/CDTrzRRhUYkIjWVTsLBhGloEmSJ116rB5ORzeeef2w7989OnT69e+X/1YbG7LyjQtVy0ViLSORAAMJbFyeWZEwGWwxAnSNvkEhPjATzNB0sxKNXVJDTLSSsnWaSQADJA7ki2GecnI4JHWxg7zHNrmyFzuva2JzXrShQaOk3ZF0erlE4n27iQk8hAuytWmeepN4Z+qHkAqgKxCvHpTLzH2KmCa6HqgDAEGREeLsEs68X4AlhIn3BmGNiNrWoh2TphFFmGe/bSf8K4A58nx4vMqDkb36RdtjDfzWqL3xlLgStMIeOut1Bq4QmVRb8UQ6CpCujTyNQadLDkiXXDvCpFZEtJg3rqfSvfgPssFjrFBrmIKxmx3lFQ9xbRVqLX2tZFEaGrkFrasIYjyUwKI8tYC3QvR0rIs8UDqVMOvNSFT5bxJgdMEKTE0gGYBzgkJXKQSOum+8bxtU+raWjaJTPzOE9vP0ChHZSWppuawXLIc6P5sqNIMg6u6z5YPiUs7u9q3i4tv3Bx8Umn1MfyS6yJoyyKYn8F6bdelk128PNv9pJ053njjwbPaWebtaovowBbW1AERLU5eoy2w7lqAY++tsMFMauQG6MvSKk0xPrIYnRGUjblaplkDzVrs1YXFhp4o7WKhUqVVdxqKsQCFtoFPsInaoM5+z/s5sDGcATMwi/PcN7hWrZ0AZWe19X4U12oXeLm2ui72+J43+WTcdFwc7ZLcQRewB5t61nB+tp0XAtvN/vG8X2oVTar7i0l91gq9BHZm18De/Qoe0ECHrgrbHIkR3qL2U5d5n7dn6/FwahOKmXvLLcZhmt+7w2jljnLh42IEWtITGI66ePQc47y+QnLJP2QhXaAlLJxGyTZAmCCFBP0UYek+4E3FkQudmJAraSKjpQgjXTaRq6YoVyzwCs1lU2fzEEEFhbikZg9egnOUQ80gjyUxCch8DVX37lZLXbXCJ+z3fnFx9f6vPP7xDx+89e0v33ri+2uiNC4wUsUXhXMX4G4ELf2A1Ad5skhdSxvrR1wIFkWgzihWIHeTwSL/JV2oRv0YBU7qUDOgI2dTo2eNmuKUy0cNM7hUcpTcqztgOo518V0gZIGEWRiYic7U3UoSw4GE8HDCS7NJE7oJpXCe56v9oZK+HNprD67fuP/6x08v3nn/+RnOjhJy17G8uxuBUii51EkzlN5bTNyiEhpU4uhulDouuFuSui1pBAFRmtyhTomlMtYhBDlh9J75B7PP9bW3OBMETs5BkhieGXZKzAZ30YoHJxnex9Ygh7uUd6SH9cQ0zUrBlDFkM8xZa6H1hhAsrmjwEuhMtXQyoqP1ZrWsrTmqlNQVnthwnviMcpoywNagg8tZzIuhK4efsQMxthwou113HxOkRH3uOvVRkkUNPchvDpzmwfk0M135oHg5YGy9F2OhwTty1UQaOZKwBlYqJg0KWUQO/m284vh8rbVc6hCKhtFY5zvqie7GF1f3UZg3ZiVMEl3xzKlwteSY/gBIQTBKKa3lBGXJdbnpKBJWJoXsPc6eIylDMLAEJZm5oJSAOUgu3oYHzymODm4Dy4lFmbiCZOSi1npjLd4VxEvrqtEBVXZ35R1kzptUWdgVTcRsmonZxU6a7WhzYNjGelyq6Cwbrn3a1A+uLtu83drcWJsEFT9SDNzFWXfXh7PpXD8um1LuvfPwpnufdrX3pYjutRehA7eOlVgqlmrovRlrIY8OwkFOwhHywklYYEYU8wDscweme2cJpn2UTaox9U2DCB9tmJGsAsDSje4TsDHsHFtD7RP8QvUic2ffdttamX3d9stHD2fjtQ6VEFxH1FvrHZu23D5fvrpt9YlwNaMa77HdK7hY7N68LwfMx419dfHlh0/0ObfY1t988GTd8XjQ3MADd3vsDmV7vd1pA2zBK6BALIBjMTbT3tBm92ZW3KsLwkJWtUbW3lt8Ww5ZzA29J5+GNEBoPCHDCPlhZyksbL3F+QMZNAczuzP6c1ePeegw4AzHYnM31FIUJnZBq2J6sjOTu8Wk/hWwCsgo66pBe+zJSfCWvqmJRpY7CSNi4W6vrBA6vHCCnFG0WTeHdRtUECC+l4GwxdDL3dUbgGqEo61HI0ytF2rp/p3fWD/94vwn//7w+G9fk+aaMTc0Z6LkOdyWYCgBmNHUey21jy3qlQwihQ10jyi4683MXWWMIQlR4ZwSchQTkYJfFEbl6EonBbJF00NH7N5R8kQ7crLbvSMQA7Pee2VtrgKSk48+8CQ84jCUKE6SS4vW0BiPHeaIFj4yka/74wyiWy9u7sdvvrc+/YPLz55ev/eusFghFrA2Uc0nM6tNDlTS3bqjcHJrrvTsCZhVyV8tkBcLL1C4g0jPbcFhHtm6hkxo2AQZWAVZEGowyDdsamRlwVzKclicwYBCd5HWilWhuElALQC8tUI2ryXQjQKHF3g3KWgNrsBYaMV7LBs1Rx/ItofAOsZ669rcrGJGwq1yC72LKStUJyhvThbWWITjaRR1YjS5meW6ZhtIUGCVcthofSOxgYSVAne01koppRZ1772VUqMnCzRBlnZ+rSlwzyDu+auYcVJqEgSD5UpOOsLJxeFWUvLkjgJIWBbB0FvY6riZGS2XBAdVwnNhcJNVlpT1GnvvBjhh4RrhKrWEb8yqrt4NdO8xnQlbRPcUQ8ODE4MT2yB0MkFSDM9jea+luqP05ASENOoEzaVnAAiE5yLW1shSi7c4nJWAdbm3VmJCHCN5OVLMjw6VHNkPFGJQQDoMNPReyPCFXXsHDGwmGoxW3L17b2ZAAWEWa8Kre4XNsg1tMmDBPKFanVBXVtzq9uLyqnHWQQvb4XaBtkubC3iwFb4ha+SAWs4wt3rWfTttz44/nDb1vD+6r8lhuli5L9ZKn1yuvfut20o/QAeaiuPgZu7VjESLPWzw1bhxNPcWFtCAF+DEE7IMNyagR+3lUgtktIcWS4ZayywRPhkmYIY2jombonPZznXpunBelrIjdvCtaXO4/97F4+n62RfCFrNBK2zfsRyO1zPOLurTZ+3zmx2Oh/11Y+GZdlovt+3i9e27l+2N7dU5rn7j7Yt62P83f/Qvn7zz3pP6bIFd8WzVvRVnV9je8PLZg0fX261mWKwTc1gbleTLCZqBOco8dwnLQMFgCB8UR85pk590N0RLvC04/rmuJJahnn6Yjcv9CkgVQVO5xyVZLw4fG0V/ieCUBbUH14+VXNYGuzv5qdWLwjXmfeYc1KpQlLryhg+STKQw62qAqTvHlgWWILbkXgcgsc3T0K7EzF9alsXlQdlQArZpUVrgMDz77vuv/8G/3n3w0+vvfY/X12tltPkCspXEaJ0Yg1oLosGpDXMY6MXtDvwdYIIHOtMBoJYaZXzM4pMAEk85lgOOyseKFVpvuZFGJX1a82W5eBc98wPFZzRD720zbdbeSmVbvVaurQ+pE08NUPqG3GFi8fXSRwE5/XWDtQrKq2zZL4fXX192l+XjD+d331qACm8TzapJG6Gz+ZCYRCI5qXpbVy30k7/x6QWfjhsQMKkCw0FE2EA+oa4wKKSVqdbDejgBqfM8H24PdZpbV+W0X1qtk7lpXUGjkw2qlm8ulwl6KbU1zNW9LwaiY9rM3lvEi9iiyhxA2PBkDjQmLktiGMHwspInY7Bl853kbRpzTAzmnnJ2a5kn5SGYhsXuhBjgu6iwImjD1Cnf1emcGN01z3PvPSDped4sy2Isp3TwysPO6YDdnem83qUUjTfjuLOI7IkUGORM+bHT2KK38SQxTEkgV+sqRp3mxsbWu7lz0KFLYW8NZC3VAIR9B+DqbkGH9uisBsodI+vE32qAImGpMb4pD2jKVVl61+16W0pprcUIIzFtGIuFRU+TKi08hXqXhTUGMfpw9+RR2sDt87adYoEHomDFTzTSPI0WJJowxgzuQbg+hQdoz+8ECDMQK/IwXKoSpUJuzGbY1n3rNldUbYvNZlVlrm27sV07HGjF+s2hbi947958nOseyzL5Ymfz1NRAsRQvDQV+Dm1d2z6d1Z/wwV+999mm+u2MuekWdWqybtiaHRwL7AC/dhw5H3fb+fJweNFlZgugWktrQF9LYfBhIuF4+CIOb1C5mMvH4PB5moPi7gCQO5pqZRWKVIUKn8HJKnUmbMEL8wvvD8AHsHP4OXFB1Hmdrv76d+pnL55tzmbry6HUtptvZLZs8cEH3/nGZ/e//Xh3qOd4G9+cLt59/Q//8l/89u9997J99biuvHrxt37ze9urp/U+fnrxg91a39xda8VzbRdcH1gvdPnCD4Qw33/54CL0DtaAI2wNMT/8JpRRHWDxQGjpcLA6BDRDGV6JMSWV+UiRzOuaIoXojEP/jbsfMf4ZfUoOarO2c3hTS5VniYv7yoG0QfUyAOrpaprezp7khcC/YQEwneZPYCb0HFmfJnhxnH1YUiR3FrAEyhLHHlNPj9MAA3qMHtGWJf6im6k1kLEYj4w5Kzo6XHrn3f0HHzz42Yf7t755tZ2nJg9GVU7RR+waZKkOJcuFyWCLHaMjCWb+dQwCd9fEktTQniaeHtrqfDED0TKz05Ql0Gw/iRszBLuNziz/tV5LFRwN7oi+vPeVQFtTQjBVtCb1Nm+3Ll/7aqMZ4N18Ig8B4yTE24ppYpNqdbE0rQ/u3Xzzm69/8KOzZ88Oj5/wcKhkE2A8VhGqPT0NEu9EEk3tjmEa/06ycThORkLPaWnijuQfrW2Nw1DGfqe1Hc2sJFAKuE11nuZZx4Nan0p1l1ozxl4jdwfUo4N0X0uETLHOM7zRYNXUe1sOAGuZurtaCxoF3E9T6eCNe04RM4ieKLaBagPeY6Wop72gWbjI3x2OoA7l8HvwGXI8NLj9yCVO8JSKB4rl7lDaVboc5j1yMFlaW8kC9FyLPirwE2np7n2bM20ugt9gsdzGggsND9p+aHeTJRD3GAG3ynw8lBO/XTDjXNlbh5lZ6erpl2JpkuuAdyPD6TUF3Zn2FYCQs1ihrSfml2dzkBP1aE9jaJ+3x6GUy4/YBHnk21RTDp/XnEkDCPONkwdqxMxXGM7ZMhjQ5YYWhgYe2wMtF2MjiKKpssz3NwilqfEdZIB44DGxKWaxeTbEsiSqY5ImV5XN8C1sa7bVbO2iYDbN4kXXbAc1zleFatrjpbS7bC92tXXfH7E3O+DAA83qll680yn0qfW51YvSt33ZTj+fX/utx8+KlqZyJmt0b/Br+a3jOFyvDlgnYUWzbUcnZKhLW4Kd6+gJlhjNe6o80zwRUVXAe6xLU++hSTNjHEtzVaE6KlihSTazTL4TdqjnwAPTI5QHxkdmF1a260W5uVyfffXZ8nd/6/1nD59+erNu2Q+8eCkc/eJmu/ZHL3/3b/2tRxffOny6LF+2i3d3Hz/7+PIbb63Pvvj46Z/Nl+sb09UP/sUffOex6dkv3jJdvOQ7j/TVoW94f4/Dje6dYXHp1stKeS3X93bWYEfqRrh1m2AF7hUwD4oAg7zdbOQo6CT4jhylu1UHZup+Un3koTaCbs6sVDLW+4AQRmJA+C7FXk9niW5bQBB2Qp4kKIwJkQHFw1m3Bb8fp9RiQRs294ie2TD7yNyDCnz345T0ggqRNpo9vlunDIhQEWxeD5pROIfFDDqu6SmABplFsAq4obtT3tp6+I3fPvzzf/bgxz/e/87vaFk0y5a0JniVNz4cYJD8rrvZ+n/IRA16uZFwLxaTUcUIOT58RoOkvSC96OAxNwnGI3It+d24LCFtT3PnrIPUs9tiWjOoa7PduPtyXPpd71jb2uQaTfern9cxhDI40aI8s8aGZVnUawEaDjcvv/nNy7/84Pzp0+snT5pThKQ5VkSB0bqF5AgEPTVIya6KpjBkLoDfhbJEUzxqkZw1gidKj1xpZwUlzSgeOZdlBbDe7j22jYd+tUI+1qqRIRw3g+TretzUSaA3oFYrbGns5ADUVtUM2B60Ig77l6D6weHZE6eAx/OYBcHxJKqGy8MwFRH+LW8VDK5CIj2uO2JTvZUY6riBGEwIC5lPHuNU075CLwrBMAtYK9zbaf8VBvkHQ7ae2QI4ZfPYvGUspp6UDZThfzl2eIyhPYfa+E5rl+EnPIGGaUxnLWSubYqqWB4Gsd7U5zqH53FvK2sJtkclW3g3DAlkIB8eBZy9evoHoSSEuA6arcqIF38kONVdKQiOnRGWBVNcrQh9v3TqHMNWEacRevxggHbw8aZHxtUA5U+S7azXGGOCXN0ZDEohtlGdJlMFqFKJYysVeDHfCjtOs50TZ9CuTxe0Ldp572fbz6/s4rxpM1dyo83nL5fGG1+bbWfs6FeNu9q98Sxc0b2ylM7j1HnRuJun7dc/a/Oj892TzfVxsVYlgQJ2hmvgAGyB2X3vVnG42tdpKr6jqRb2JhaskKMH1hhXJPxFcqnDaHxjemIhzU/tqCfx1lDJIkxmc6lnwLlvzS/M7lm9sPZAegx75OWxXeDqnl3d8+dvX1z/6ZdX5Vr/p99e/8vf/0tW3rbdntujrvb1+oU+bvaNn3zZnn/w8YUuf/KDD7/ze+++9xj15bP3vnnvdfvqra0OP396sWB7Uefl8PQXX/T739gtz7BdS+UZlj3OZFxtBouwW853bSH24Ba+gc/A7FYNrRp4WgHp4Z0dmzPyuzYLB9bR89456rySQ5KQOtLHiTcBhhBlWG8AYT/rqUwJYCgHvVAuBotn/kpnYyFviuiFBKYVJaS8o+fdPO00S5f0UdtqBFyDCbLQn8NP9enAAFPqE4ZLgMVIDemZADPUWqX4RkP7hNEcJOPMCII6Hg8PH+/f+ZVHH3/44BtvfvHkiZbbmVOaSZ5K4/wft7RuflXT4a/cyoyWBisYRlB5jw3BbQk5rI+Q7Tkq98zcsO5hbwiz7imMHTCoeTJv8u9GUg/CSGtrmCOv6xqrWFrvpRQDmnophSgjCWR/e3c0AARG7TZgNZNrkVeUJjlZ22oPHixvvb17+vT8V95/drFDU6VTqqAcDWmWEu4Zcg+jZjINMBMVTQ5LekDG5CLx2nyWSJzPTj1fdoY0tKTZIDJrJKJSCrw3NZARQzfTpi3rKBI7UNVQvFC+v7k6u3fRDsv++uX24vz8/N6y9u5OwmLrQ7DHXdQgedLQLRLG8K8Nzm0bUPNpfpNs5LsleBgdfrzCoLLTlMfJzUyI9UYooLua0hCjwJrkSRy2kKHnE4wzk3bnylIwu7GRIP2V9ntwvJRjhvjIOWRKN1UrQ0QfqwmT/xvD6lia9arw1xhQgaSeLm0O9T46e48v2VsP48ygnkxTcad6yxo7OXjZfePUv3ty1gKxTxBo/F6PBTApE+e6HDdn29aV2E/MtBhSdZfSh7vUqob+S3OcDIZZDKapkZuZFWMwRmOV78i0MUrr3tzhA/8bZaWPhwMAsXYmh4luo4qLsGBgASY1AhNsC2xRJ+yIc8PO6yX6vYYz1kte1flqW2fWWuHA9dKmXVM5Fjvz5/LZpplt1whpUuvNC3s/osG2c7vo3DVtbbOzH6zHe2/MRb2Xhl4gs9l8cjsa5hj7o9BM1fZurUjT0htYV7WKaibHKhjQjBa7Fs1hCJwHkCxXDsQ8FIPtFzAA6lw2KFvYBth4mXBufiE7N3sAvCY8MT72x/jygX11yZcP+tfv1Nsn7y4/+cEf/q//zjvl29e//6PP1u29pd+7dlvw5PPd8ki/uF5eHnZ9Pixvf+fRly8/e3z92d/9jbc/+sM/OvinX/sX/+Bv/NqP/8c/f/Lu+3V5/vPPrnTRqpXl3ny2Xabzr2ceYWXlhdnWcd2x/eryPm4dV9QWdnBVswmmAquA4JNyGWbLVoXhONyy0x31SRDV8uolJc3SUCn1xGDIYoLWiIjkMZgdAXH4pjrpPXGGtBIMTxSiZI2a+cAwlAmInJxGTommSiSLxiYZi+SUCHq6FMXM0jJRuKOSCrZnKBCY4nzzX37JBqkxeCi954RpdBCZ7JFFdXHKMGNe1uPtr3//+vOPHvzgL14+un+DqQ93a3sFm4p/Jq9OenGd/gyCwBIV7knaYY6e8qXhSTJ+OwC6PhaPj6bWYo4YXKxBKM2WBaPr8hQBICRrrcldXWKxgPLgyPnyaNfCoaUEbwfDDxoWvLLxITDMcEZkMnT0Wu3+2cXNfln7weRXb76x/fCDs08+nn/j+357qFVLNYi6Yw4FRT6tbuP9nsS9/KWiJAmAyiItQ6zGUw0U8QQkAuqueL9AUDYZDtxBEYjJbVVoRbogl+Za1uMRfmtobVnkOrzcqy+t3+p2ffbs8+XR44vHT9IBLKYCUSTZyR4lIYF4kdE7JpZEutK0AQg/EACKEpijl7qDzAGjrUqh5FSncElKOd8rQ4EYP7v56Sq5w+Wx9sAQROvstq0w7NYtHQFxgpbis+PkoRBU6JjX6LQzKuAz773nayp0+bSZeqzLdDf17TRJWlrsUTqB5nlQa9rExReMDdiEoctslGVxTbtUGStMaPCQjSL4i5IQRKaIAyZPhlrzHrBaQMHD1SeKSZVal+MioJSythWp7OpDVmVmBnnz/sorCUbCkCxhsJkzFSMqglNDbDj9izhR1/KiDMEngjEZvJaSxxuOagxDHgPdqkSgGopxIidgJ82yWTvHhXBJXKI+JO6jb1Ef15dLa5jvbet0eAbw6kDb1Oli6kud7i3rl70tzgsKsA2sFE7VVln35uDF9jA3XZT7U9tNWtv+yeW+Lc2nWaxXux2u4Xu3GahADX5ZEWTd634XOqxKNB3N5UHsBYGGYbNgNISLg/yXWj7IPRIEoW5mlWUmN7Azacam8B5x4X4pv2+bh9Ue48H89RO8eA1fP+TV4+nZg+MXv/V4Ot/vP//RT//+Ny83z9qffPJJv3/vBc+POpTKJ+vF01/86OzsV5/X89/9K9//N//T/+fNx3zrbJ1eP/v+69++POqH/+wPHvtrP/uX//bm8y9dr8+P34NZBZYDKV5eHOvuWnZNbLvfrtjfbO4t96qdw/buR+POuAB9K+8O73nDwzmlGWBB8mGlAVQOKtzhXWBvQkBPIZplIECJ1aRuOoJmxLxiLKUEfBNrKd0hr5uptbW1SDOchoNlVAEclIf4O6OZc09eQ1CIA0TOZfJSP91+OZi7wJIrmQkY6K7iqKVqLIax2G0QAxbCQ2YbHZwba1WYd3jSXCKHRe0hgOZGMzEWEwiaOm43m/6d71786V88/OCn/Tvfb8tCyx0AWc8gabiBt5RSowkO767TLDDB9fj/aX86+jp3REGe43cfxDUUWA6ezc1ytevwL7hrm5AIeq7+hQ9rhtgT1WMpqSTUWgXRSotFLoCHh2JvZgRtdJVhD4h4XQM2P9UsEGzmvPZ2c7s30UrVYVlee+3w4LX500+27713XaupO21S+NEh2mipgaCNxcAKz6BT8IqHSXmH2SmdGcDYtR5vM7dvw+FdHVn8xflwxNK3GGpaViXuzQzbeXNc1tbWOlNd62HfDvvnX395uxyk7qsKJ76s3Y/3tuda5GqVRV2DgZ+taxZZbmH9G5R4nMA2mIVFQ2wqg929pdO4NVKbDXkA3GCKfZ6EORV0IkchvXXBnAK8JqcyCfMxV0x7FA/eqRMFctLk2s7b43JUd+8pE4olyZH5xqn0vI3w4Zea8BkAP61IYchT0NUnTMHvCqlcdqJp4WFgiCMSwW1q8cWmqb464zCwpOwiq4veOoqzBlHfWoxoBeSeLbew8MsNzsgbMVYsEp7GGiRZ5S3sYB1Os1ySqNFORyEaDnnImUHM9Bmrm4JqFzxRZYk9wK3A/Vp+BYvqPSCHJOiN1CtPRE4lQmkohofhtwejXrlglZyASV69F9rO/VxG7OSX5o8cD8T7pd9fy+PqF3a5vSoffvre5tE3rv9yN73Yoi6b+WraHuv1FS6fPbo/nZ8vh9XPfZrrjINrmWxdUA6Y3actrt6e1wf16w0P1nx3sO9eXl3R6CZNV3b/6mJ3fXGx3N9o66rSzP6VrACgu/n+zCWYZDJfgGrs5kEGNcAbwsIsFec9t+ehFnSZh40WQrBgtdQtbDabrZzZpfGcuHR7gPWBT4+42+0f48U36/PX9PyxvnrEry+W/l//X//L3/utv/b4m3/l08/2f23+1nTv6R9++PnD167w+GILr18tf++73/nzffsM1//iH/8//zd/+9df/uS////+P/6L3337/Mcffzrvr9ZP2y8+vb3p9w7Lt16///Z82BzXBajby2pH+IT58rDgsGC5xfFayw7Htp1wxrKFn5ndGs/gC20p0sFsld0gJkFYPJKPp1QbGPtwHDG+NRaJ7m0QPGDGEgfcGHAT/e56Vithk1sqLVqNOF8SyWliRPlS0iu/1Ky4y8g+bpAxjISQiHb0y3RPawoBHOi4YA0qSk6Oe0y7LJoRosRGLtqp+uWiFsvoR8obkzbLUtdHuxWmdC4P879YkUUBYSni9LK4aj3s9a3vPvvZ08cffvzyrW/p8owrfKzzBDDAOJTCru4x+IpKhiad5rJRFGbczkZNXkkrBXA1GU/dvY3IoAgDYSkg7zF0iqqhm9NjfJ7/YJCbzMBci23dO2OvOGy73YR1vnq6YuFUxBtfUaKO/t5OP+8nJ4sILiTYAJs7WinNUdmh7Xb/5psXP/p32y+vrt5+gkMvKIp1cvCSntkAYhcThpVCWrW4BMEKg+9qxgBNAlRheoON0P0f/IgJX1MphhxcK1xa5V5hKHXx9rzdznOdZP365X5/db1/dvXsOZpWwWGtrfCX2+0M1C8Ozy8vH212F4f97cQNSHkrdmLXBHCYfi7JGWZxz2oMAEtRGxCjO1J5/wpqghMEnL9YUxAukt5VSHU3BvEaBWzeGpyCPGwNFCln7NwYiIgSc6bZ8XCQFI5PzRvNUq+AhFM1jB6AV217nbBVHmsy4pGmk6RZLTwcDinXLjTHzeFAs1Jrjj3T7vFUmHqpU+yJMbPcy8sQT4RarMO91lprDX32CCxFdw7zHEzDBMzsROXQXQjLRxwnx5GGemGMZbGCKIQVMQzj3bUEitGdCIETfFU3xK4QT8r0AAodyeoHzL1FB0uW+GB3DgsZdBLwcCM6mPkoKkhrEHvuI/GwCwBiEmzcwraYV9wzu4Tfc79veM37E7PL9pq++iY+58XVZx//6fff7NvyEm354kZqb73V1zfnm6uZfznfbs7Pz/Bi224mHbbVqw4L5gXbRZuL8vW3d1fvXzyz/XXXcrW8dqnbR1qMdcXmuV+f495LXDzfPHj++JJh/XwQO7gDjm5Lpc6bRDSieqgCYbCalm22xjWnUbH10eDw2FV6d4etuLFOdRI2ZptSC7amc8MFeAm7BB/0+9P1Y795wpeP8OL1ejNftbMrf+vyr/+3/8Uf/YO/f/He27/2+Yur33zy1uM353/64ceHm8OTx/XDD/58/fTZD2+/sefZbNt/9D/86eN59+TR3/jh1Wdvok5mDx5uCqqez2jPL7g9X9BW2jJPS1uPsAIsON8uM1rRQtOOuJ6IamHbxUsLZN2ualnutXZrZuAtsTp8yHusVEgr8rgE1ziPp9SBOVCRoKSa2YZT8FRdcvUk5sQGvTEChHshB3UoL63cKwoQm0QJA2tJ3yAbF+RE5UpWTfxHwMCDepOjXScwsaJn65upNIMVO0JtHD0Hw1J4y21o2BOB81wHDXjs5bChRWsNXeppa4rR98XcpjdvVQS1qG3QD7/+6+sf/qvXfvLTp7/7m8uqOtNbK6hAW0zk7GhjoI0MLJn4o1qX0WAysxD7T7W01rOej2NbnCXBA2XSOXVJ8D7KCYJmrXeaVYtVcDEmGI5XNJc31+5sc1hXb2CtJNWbvJMe1lkdPbyg5jJJ6l3VwgBcgzGE0EnGlp5haQrAJyuSrGALPwJC3cIau7fD9bfeufjwx9unH27fftLRrRc4S+3eiqzDSFJdhaUHa4gyoCK2LmebzVIlyHuJcajAWvrw0HFTtPqhpIyJSZcKjETrDiCchGnuJhCr24ReZVPZ+nJz/fwXN88+b+tyfcSzm5t5mnbz9nY5dmizmQ9rM8G5Xt8+56e8ub5+9PjNJ9/89rFXMy+wHEDGbjEa1ECqt9CYxvCzaWEL2SvUUSb2vga2G6bdvJNH5mse/hnMeT8pwIkusQBDPlBZss+W1aTZm7nZYLWQbOiFsaMSXSopHGx1GFCqdzI5gMXyrJ4qAXexMCysu8JI2ejevY27mwB7Ibu7OYpVj248s3+x0PyYkWayUqvaEqWk4D4m1qXUti4GY63uTtZasR7WMrHBhV5YehMLm8K6xQBvrcfDlAUbq6Q2jJYVdVYVCjtAMxRjz+ToNYc5sQvBHBbyYk61L40s6r11gTB4QdieuoVXq2etD8FKcH1rVFZLa8gdrAbBqrsrDnyDgUTvSk6ZsuISiNqDBgFSUCmwyX3DdTJu+9S1kW81XRbct/5a8ye8OD+8jS8v9cWb+KxMFxfnV9vnP7igvcRjtc2Ot+/Xn+957/P12NoFNV/iy4tJWK+2fdlhv8f5tZ0BvvXjm8v14cvl0vYPN/sH7dl/cn3z8cVG3hc7v4eXF7p3g8c73KLq+aPL3oA1dJiyxbB6vy7AbFqFQhbBkf6MMku/jaChaJW7U6BZ3UztuIYGcJWskFBlnYpms41vaTsrW/jOcWHTBddLPvD9I96+tlw/mW+21/v6NfpX82tny9vvfv/3/9F/e/1X+f4bv7p/fnj85qN/+J2Lf/KjD75a1kdv/8rH+Mb2/JuPbD0WbXj59f76fG735odvf/6jw/X58/3GPrmqS8Xt7aP7j19+fawXD3Q4LDOIyrrgOJfzZdva5G1rTb5nfaTacOmobtcFBQTd5DfO2y161N/FSCDcrZshl756zmCUzFsaUYVm0XlFsRa88ezD5MbuDUIp0QBZIAYjj5ocpUZECEeLu+YwoozRLEfIp2pfp6uev5LpNitRnfgKWR9pzGpz8gnAPIxwTrjuaelNMBbHMiYfVr8WbSvM0Ht0qIUepbi/wuVCVgm0Bnpz1M16c1zefPLi7bde++hnl289vnrr7fXmeuLOe0NR7XMAs7H8ONtKD8t1QOgwlprkv9gOEs/ILceZUaePyXa0GwZzdysWAHLu7xvju1F9B1rtQ+edoELUHy1vO3pvACA0CIYaCB5Hg2JhW2OUOe8gk0qGDTKmfOCMHR/xz4S5cuGZpnijHn6559vrJ2++9uWn58+/fn55n4tsbt4IOsHtdns4HGIaWMYHBhBmBAXeIci1toCmEVabzFY4Iqa7q58IpkFMjYbJwUE0lE91CgvDpla7tWo2gYerF5/9fH/zQo79Aniv5t766ktf1zLVvmg5rtvdbJxevrhu+3Uzbz779OnZvfuXD1/v6iYK7urhkOVKNzJOU+9dZnDRMNcqaZWmUmmo4BQK+MII+cPWkBzfFMMn1dGHOCeKyJJqqTHVNQBBCdNpApwEiGh+hZKLmwf+FFu9gFyuyDB3cHXBRbKl2CEneAG1M/1DPbH9WAWhxPxhLGbeW+XU1b1YCGssJqpSESrNvfQud++9l6TgRBRKxp+8I69nByDlotmmHh12V2ctkuY5POTHswqwZ6zxaL2Tpt5YanJAoN5Uaz272F1f3wA43+5u9vtap9ZWIWgDXmpJIzZDby0mYCyFpNF66zQotiUCjjsXPAzGuJAYdFBokHIm5O1rmlgkdPdY3BanOtRMQIE7fTIrbpWocqNTcGNtkG/Ae+TD0i96eWjbt7Zl8/JN++rN/vQRnr2Fp4fjcmmH+rw9vxZQHj92bn5mzud65P1mg8el+K4/v7Qj8fzCly3319q0crb45QM8542E467c2gue1d3hg9//G995+99cvvlGffHwbH/Oy2vdTHx8YJHhxYMHtgAdaIaD4WB+lK2To5hVee5osBiKA0CLGU2Xy4RgK8IpxQKG1jtrgXvrqrCdcQKJiZiBDezMfAdd9Ad9/wDLA39xb3p+drv3K+jFZJ+X5Yft/e3Xf8zv/eS/+cP7f+f8weMn63pTHuB/+/63/39PP/rF888eXW4mPNt56wtvidduav/40+2b1w+ffcLDkxe3j4o261Flz2/cv3j204/ePnuB77+xLFWUboCj1+PVdn6w8Udsx424sPGBcyPM8Coj0BVXspDYbyEUFrKAFSiuxbAWAWgtyc+ykz6IgiaiGDVmOLJqUnet3j2c5BA+bWuzGurAbHoLawjXo8w1BqkvOAgp1LHAZsiMjAA02DY+rBZHIsZIKfbqL5YyIMdf6pstdl+N28z0NXDk9DK+cloiRAaKPpvs7mPrjisChEYmPn2k4hLprVu1acXNd76/fvq0fPjz+cGTOm1blyocREVVA0okICF4cDghLHQMfSTdBFpXzN0jrzk8wtZgd0UyTnvLjK2I0VGEr9PT8RzgKX/xVNk4zJZIYyV87LxOBZ4epKE4Df7tyXC4woJzGdSTCH+jjnCSBvZIPCVsK9SIKgSyyeZaVpvL4a23vvrk6fL555sHDxc0kzEeR3BrSSMK2VobAu7wpSIRWjEnYHWsY83lKFaMpdZlOVqWIKcTMViwr4Baseo5ZKwEVbuzsB2++vTDr5993kiy7l9eq5Yy7273+7X1BrWbQyE287yu61Gt1uK02/W4Pdtev3xx794jmIEFkuXGv/SINoioPQn48TEI1zTNrS20aVlaVoeGoRsl8vif6JBeY2B4N3wdKCdE98BXLdU2GfbNYeaxBhwAc5ctTwMXBwRVctitsNBO98hha8xQDEGE8DjA48dwlco6Nm4IkxigSON5E81YmABtou7xwiEhZVJ3w6lAf0IKJLJGupIckEwEQzVkuTybBpd3sywRWuuxy69UNnWzNHBVb2SROyvLPK+9+/EgGly369ItNhTnj4gkw9zeY6Nr+HVM86a1FvUxe0Ae0cbATVEiVYyZSkScTMOIPYIjNIklnEKCGx1vKoGbvK810naTVUOZuF01y2ZOqPemZdf71HhJXJrPh4f66vXyxT17/sg/el2ff3l1efXl3F5+861PP/tPN//qn3167y8vvjs/ro/PPsamzexnODzQp7vb22mFHdbDTXlt2/rD21scLvbXZT2yVSzorZe5/lQPfuXPPnj43dcPM3Z89trcWKCyu27Xq51fXzzwK/jWvMIm2ARWw1rBaQwxYChphw0wjhJwYqgFALC0Vsw2m3lb6vX+Jggw1XwnEjNZgQrfABvYBn1XduV6Z88f9Jf36jN/SXwx7176Rz++/eM/vlyXR74s7779/n3WP/mffvLdv/7NMy8vj/hPv/Urf7A8/Xe3Tx+cb3i4nasJ5cW9bb93cf/5h7d6fHv55vRsWW8rX7Rf++bjJx0f39r+s492n34w/Wd/53h9qMbDy2P9xkV/+UL9UmUueP4m8Px8t+x2rPQil3tzwErNhQQ8nJGVnICDZ1Qylwk1ODC9xPmT5MWcwVFlBVQqXWptcckHVUFCU+uS5e7GEEtUOlFBFA9bokEIHL7ljrFpxMKeIq0hE4aOVHkSdP7yD7eToHOMNx13+tpx6FML6ZbDWyRrKzNbfi2PeszD7Ssa/ZivxlMolKTebZC5khoQfAFCrFXLgfcevPjN3/n8q59sD5+/d+/d53Y9i6tVt3W1XgJz9/+wewdC3BRqnZDiaJQiRI5A87NjlAAxxmdB72JI6ow6yRCTJpNuRpbU5UHWtKB0WabbWmJsNof4yjJLpP9yIqD5CYLCygxxcIOAGgCG3MjJqluimjCrctKFXqXGssyioT558uE337rh1986XKPOcGAiCmuXt15js488YDoHnISH4dJQB47n4Ymd5g6mcLKNmSsiW7/inGjAYLHBwNZbenQbINvAP/3kZ19+/hRCePU3almb1iOA1sRat9udSa31zmVbz0KJFh34868+u7h4cO/RG+sSJEfG4vd4gTIo7abzR1N3Q3GQFcbmrZbijt6axUpGgzvdw9ox6rBXxeUBb+TmQTqTKp74jiIL+jgyHAhBPJbkZfvYuwxoKD/y60YbyuKIP3Q3hTlVoNnIFuuSy0vhuvbeWinFBHkD6FBBavrRXBY5keEAFufNc18TYkDN3LQBM1t7Y9ClStEga+oEy+e3zKEPbnEoNboCssKxhmbJ0UMglHEjTUD62sBi9C7BG2Ea4+xAVmK7otG8h4u7G6zUcjweSymlsudUIyJS3vGhvxzj5LEeSmGQAutdrDHLtrW3KJxSGBvlNEIb4gZf1SbSWXtYDImw2jDVGUeuPsN2tHvG+/YQzx/z+QN9/rY+eQNf9k+rnk7t01vcTF9+9uRD3v4f3/z5nzz95//44/eWd157/CvPBL2BZ/al7a/Zluovp+3LZXfvq8v7L8/u7e9/cazrra/07lj50ZUu/85f2//k2a/wi3/7jTdrO148ue4Vi9cL7e5j99LuXZ3f8z0wGyZDdZvNbwM3iUK5AH0MMwIoZaoKg/4bK59rgXxtvTh8GDlUe1C9jl4g8NZc32WUKhpcdc9ZqOX64882P/jxdWtb326l9Y8/2Iv9u6/zT37/g/d/753Xv7u9+eL4t//Kt86/fvrHf/QLHS955suu39veYt73l49+VrcPD5PmFeiXD/13v/NGe9k+//BfX/ze3zz88/8K//XL5e//zbqq+vbqkw+Xx78jHR4cfj7j5iXOtvXxng9fPHptnaq8ozlVzchmFCtqDfA5QMdISrk6i5BAxRNxg0ExIHHA0Qx0b2zu6JIgCr1JkrlpMkpa2lpLDb51aw3WaqlOFVZLcCZyUEJDPv57oKevRJg0gPzlXweAaKWj02F20ohG7i4BhUwqfl0+lqGnBcuAaOMuxl8LeN1g4WUfkUYd4UVqclen1CWT3M2NDpa5oZVq/XZ/8ca7f/7g2c2DL7eHy0vfKr0gaW4qKqcqgOZAUP7IRNZzOjV8NgZS5wMgR7YwyRoLdwgFQzsamqxFHGZg7HkbcMB4iA7zygJzGkupnmByAVBZvNwhmVGGBG8uGHejZrHTDHIUtZB7qawsigqmmpSbqBrUCT/IgBnEsZtVvfeNX+x+dlaff2f+1qHdgmjqCvNIh0xqrU5zTLLdFfM5DzwOHpozY5KNgqXlcvUWPG3P1AzgbrPG3Tg1Tpyj1EKEBnqzP3z1/Osv2eR1asT1Yc+GJrXWCmthJctc522t++XAptnZBW52EtoiYHn+7OW9174hrSwVXg3NSElTqY6mnmoyY471jSXOQO+dLE0ibLvdSq7WDOljZaMQCsOKkTvzKLhFpZlwRXpxDi7FMKjSNG96b2bW1Ush3TAWMtZAvaAmsZSgSseAw7sCLmxx0uR4tSCmUXR5LUWmdW3TVOe53u5vo79nYSkFyQILqkM4LcbVyro7G3kHYYLP83w8LsEiJGtUB5HkgihJlubwFjz8zJSSF9qw7rLe+wm6UQPT+o6xo6JLFgbvvZU6AbFoiz1JVXFvYu4TwD+CRakuFgqQvBY6ssKPBjoKrjhwDGRP8mDzpm1d/m60+vGmYnEqGHCFAWFXzlMMhEcDXg3VWAxVYaoH+MxeG7e1XlLnfZ4Pj/Tl6/ziCb58Z/5KT5f9R+vyk509n+35gj3/h+tf+9NPH/7v3/rk/zL/9L/74Ks/vX3r8tvL7dMdPjvce3nz4Oqr19frXWn29d52rI9q/9L67TyZLTd+xvX9uurrzb36dv1kwQRv6FN59PCg+ep5uTr282e+v9rdw87szLA128CrWxWWCqNZcXSPygmRbQUIKc/J++FI22PBujTN4ayAqkeoFai5KSO0NwKKrSi1dfJ83qgun+IXT+tPn96+Pr14+OUf/2D/QK9/6/Pt2b/8uR599+z7709/+m/+/IV/59f/2oNnL59/j092j+q/+uOPD9yU86LzxasBF/PV8eU1+lG8PvzNv/tdbOteU+tt93j+4rWLdw5f+4P7x28+3L7evvz0y/JH//fv/Obvvnzw5lf6+toePOe6x2Gy9Yvto/3l5Hsv6mioO1K1gjykqZtj6zhA7qzosPTORs/t627uXkNj0aWKEO3MxdvapdYb3Gi1EhabviF175nXYXEDY/smxTL4y4YTEpeZkBETMzfil0bAI08CHu2rhe8rTmOwux+J7kV4GktYI+XlutpcaQbjyNSWS0nNvNAcRjjlysGqW9zwKDQkU+/h44Tqah00ls6VawW/vXv3Xz348Afr53/ji3fdZ7ch4XNz76F/GKkxm/ciMysge2uOyHomR7UhKDmBexGqUokZPCOdGnFkgxtEm5hth7ALpzxcjLUw5bHhOxQrTIAQV8RqtvFiAJiizcqGPPpI3j1l5N/Mn+QHQa1VLdS0bvJ1xKBjX4rVh7sHx9c++8v64t2v3WzuAA18xYi/TjMCnAcAlBgHBMHGPBrE0GnSggANkK3D4BjTjFPZETqYbNMTN8kIGww3buzlp1/1pansluVo6Fx8Jej1wYP7y+E4zzOIw/W1z/PuYtdiECBN88a87KZpPRwPVy9ubvbbzXnYboROlUNcFAYjcdBoKqQJDWAheqoJ5NaWJQpJMzD8sIyFQE4ZIp0Bw6oR8NweOlAjj7cQpMPxdlrvHo4HZOploowKw+pg/bjMy11BnCsNABeHzXbWumbyziGSOVlVtJZbFFsA+65qVWtDrWGO7SUalleKQkfotMjYYzacrUIl1dPYKxJVU6ext3CFRZdKqSlEAzA+JJlMrqhQOXrQgWlFcw8FdtU7S+lq3UUWuVCIlh5b7l5Y4/CWWqKwGLbkGM6RQ8MXoItZmJ9I2XHQM+X6yUolnm5ib6fr48VM7gVFmaLC/xSFNcy4JlS3CqtEqaWoqJzVXqUZ9bJc4Pq+Xb2Gr17D5+3zG/+F6rOL9pTH58vmhbUVFS/31/Z/+/zd333t4T/89U/+Z+0H/+LHF1/89PatZ7ePiF3b+qHusZm5ubl3XG5VPyf2XNrhHi5uDnhW8f73+r0ffv6TN97hs7Wo+OxCv3jy7JFd3pbLXd+XWdqYT2bFvMCLoyDNf/OADsV3BjUMnmzYTdx1W2QxwLvLVWuteCgVoEKRzkfP0dmLLfMO+NEPn+0//vef3D//dNFev/ha33vtyd+4+fDrrz9/d7f755/rB5v3f+e9t/7e3/7eP/2Tv/gKD//6+fvHL/p76+uXD3f/4198cF3p97eYVs1q+2lu2/b09rfefffJcVq++vInP/33s75sjRO3zw6f1D/47/Wf/efXa//Wd3/nJz/8aPeP/quHf/V33/jbf/ej50/PaS/ZKujmn16+tiyVgDVYJxuw0CaaJrMVrFIttZpVSb2p947YYp6mAiIgbwKaN4O8yPsiN0IVBqyECLlaz5GTq0P9aCwUvcurG1iK00+DvNwDSguJ0B36ZX6HeOZE85Rt+MutMDPLnupye+V3fZST+YITPHslbQwByOlrjuEWfLhYRuBOnwGRWZiJlFyrt9jOa90WWmE9YH2yPnp7f/XR45c/u3327WdP9vCKdmSlMu+fqFBIEN37WP0e+wyKhQjrJGnWq6B17DUGbKzKQEy0B0QY+sexATLGV/FtkpUh1a6M7GsgS5jM0sZ00oxBUjHG8Pk0ID8BqDQGoTQmgRNLlPD5KYsZEC1IA7pc7qKpw7sgrP1wD9s32uNPHn319Ob5t9bH3XpVAzEW1zgLl6Uh6qZB/zGlMdnJnrwEWusQ5LDCNNH1dOKPQTVj9VBQXsdzNLi6uruRbN6//vrLm5c3S6+oflbnmdpUlB3betxWAnrx4roYXTgcG1aTVjQ09tb612Td7S52UPda5qUpkMjuvZYqz7Q0MJ6elikQSxnr9hwYm+3cwgOXhp62EhSk6M9SOIwUDI86IuuT0enLoVRtA0Dra+RtknK3UpPgYN4djm7GmrhC4gPG8BI57VoIFAapjfWc4qXP68lM5gT4O2C2LMvEGm1h6jppjlJZTkUtje3kUGBoa4NR6ERJ96YSnpQGodayNvkrFIRiVBcY1jQxRYzRVdQSY2GYB5/LAKsTY5TO1orReyvhjG3Ybra9tdu+KK+pj249lMYOC8J2Ozs721/fuKOFEZmDNq6bOwOPGHJknMzSc7zuiKeZPjaJPZtVhM4uQO14wlaaWqmVYAGWYSLKQlbXBKvCZKJmLBtbi447HNoB7bZeXN+eHab9XnbkSxZos+MG1B/f9MuPpv/Fr+3/D99Yr744e/p1/fzr9uULXXFFweNpNzcUV7kqum4Hrz86Prvdv/yds/L9P/zXH28315f3N3DBitTDExNcPLVSEU2Vnj1ZpUWA8KTi558aJb0i9cqVzu1SwmmkmVWjpOrnfd24F3AGyfRObai8OL74+It//v9+56N/stnZ5ZP/lc1bff3psxfLH321+97rb53huGB9tDytn13Pl839t//h3//13//pj/7d/+tP3rv32Oqjx237v3vvvX/65599cXW7OeP24ho3/vDw6fn18sYPPvr0z/afX3/20WefXrz19u73/7uLw8flPvnJT/qf//vDvde/mm7O6rcOH39w+PLfvvnGbz/4/hsfXn12RZ85Odm0/fzRa5NKUSle2WlHmMyW4lbgBtSz7a7W2juW43JcjgB9iKANKvDmMQBt7gK1dbZCFfqs3g8wSQ2+TmoWMBNysp58anlhcdE6SZZCOGUqXkSvWY1nSIGd1CwDNMy0M+IOXpkBAq7ByH4lO/u4/6MNusu5Zqe/m73J+L3AVUNql3/GAVqSwwtHPZ1xQ6VzoczaYjg7thV2pMPtOy+ffPTg9oPHV2/tL+e1LpVFLOhhPKhhdxQGky7FepVEmk2BmjpgiikmcRpc5lQg8qHJvZQaYxMpVurIR3leSINVEiyCF7IMQXRM1WrAZCCAWgjLHOGE+Su019N7iazmDiP9TqgnwUpQqOLzyQTK19ah5q350qDWljVxWINty9vH+x/r2acPn//qVw/XakVuqNFHtNbWw2LRT8Dg3kNY7NE4lJAtu0HCBNRaVzUBrNPsUNTMQ25pCGtVCCJL+nKkHGUKu9Rq2zfe/5XDFrqG9sfjsU9le2wv2RsJmlprPmHeblhKOZunWttmts2mcFvnyt1urttHFxf37XVoMhMLuvdqcj9OdW69K1H7Bo8tVxa4pRH91K1LFq8BKaj1tGtQuLGN8Uju2KahEKakTRpyyH06A5EP1VudJvUeVsy1TrQAT4fFRHeYVih23hmDD+Xu3eQFPKq75zboGBFjSM5Gm51MJXUJTqGQHS7SKgMOYUcIYSE1V0WNUa0N9s1mmtd18WxkqcAnPIGPaLZZiJaTqag85b7ZzreHAyzTGVzRlLrckT9nsXmel2VBuGUVK6V0ne6jQDb15ebajOC47aV09WKstfTeSXRXWxrJ/fVtQAk0eE+c6SSZYNqhldQbKo3rYpoVpOogZ5iHqBMsXNOjM5x8SKtwGCuc4CxBoBxW6chF03IvM8MfesZadZytEQceSl3Wm72u99up1RfEy9Z8ua77j783P/v2W+0Z9GflrV9/dvP0Ry92h0e/98I+nl9cd3zYjvv15cWX/eppa/u2qHkt71zWR0/Ot4f27z81/oPfWXjNYz1W26FbJ1rr5KR64qwplsYHrmkW9LmM15YYDgEUhidRuAlJQiUMPnxQeu+1lMA/aru89d3c5rah3e6qLX0Clnn3iw9ffsz6m//5//nwj///VP3rkyTZdR8I/s651z08PCIjIyMflZVVXV1dXWg0GkCj8SBIQiAHgkhK5Oo5K9NqaWv6vrb7r6zZ2uynXdtd08h2bbQajh4zFKWhSIiEQBAEgUaj0Wg0qqurq+uRlZWPyMjICA8P93vP2Q/nelarP1SXVWVlRHq433PO7/weK/7gf9mc3TsdfGHhVlV7tlw8/2XY/FzZjnrF527dHHofLx+efSQX/Rvf+Ppb7UcnP/mzH3/yzgMeDrb6/df6xZcKcqGlHFkgqXVVLS7mOK+bZ21zGQdDPxxd3y1uv1Yfvu2mC/r3f/osXLroCy6zPNOid/r9n107uPVSuT6RZcB8gbLhVeXRjjIfQC1TxZoBLenaqdi+r1hUABqARJx3A6gXZvOGYxGlqKokyNBAG1UNPvPex+hJg2fnWRVR4lo0iLYiIhrUKFlJ89I2MYDIe868VzCLY5eRROdUnJMQcueRWnYlzzGI64S69gu7ZMf8ApxOVpXG8zSuxJXraxLr2O8JafakbghRVTv7DFMCDOUiMdaD8YoVZhtEjqHi2EuIYBUTZjkGC6kEYW85pSJZ5MDrMpSfPR882F3+cnv6xaN9keDpUxKZ1CwY50IcUVCwRTSpWcMTE9iltRyIzKBCVcAIUIcXR4357xCUr6w1jeTGtoVy3jmBZM4RsSNyzpullpGfU9oIpyuUSBGSlCagK/mVIp0WHehvnax0h7whu0apFZEoIhI0SBukjU3bNqEJMULhnc97Huxu6NZodXpcXix67XY7WHiiGEHMITAz5w7RbCoEDqyI1g5b6QE4Khje5wppVfK8R6AogYlDBIMbt5RIHDwjsHeBlRSijaMSNhixU/jMI4uFuMKPm5d2NhS+bYLWoUHtQg2ExrWZKhDyft7EEEJoVHKf5djwyg0zwCGidYMq+JHf9sT9Xqhj8BRENCXOQzwRmw8rE7FTURZImnxC4sZBffJcpMCEpM8jdeQo02i58mKOZMYPtnM4yQackyhCRk4GBSHnNEbH3lmLRuSIk9GG0VuIbekrIo4doI4pzzJRiRZ8a6biICISCdrp2I0XxmzxyyATaofUKypb9EtwwqYag0YLoo6cFJDmWc3mhwuw90FaMGkM/X6/bVtRUSG2JhVEKgysVitiIseQqMnMjdZNy+xEpEUgMJGHbcu8sHpEk2JrbBsrD6qAaNDAesXJgoj9WJz4UwBUyYyDiIJGgRA8MbFGExebiQcDgQTEUe3yBlVhzkTMBcxGWxjLlAQ5s7CoebQp8rzXNAFJzuU9sUjMszzL+peLVZ73osCz12imfvDMxK5tmfMsaIRH9CCvXoLnNiPJKFLjQ9PmAbNFGS7q/rrqX3yyOVgfHj9ol3X58gHXw8HQn1fzH08v3z1chaOq5bJXFmWZV7ncRLM/dLvsn/fqftZfFeXb04vvPKO72eitN16rq+gHeSiVQ6jAhWHMFIitPxMGHMiMSdlRipIwAIsAsCcSIe84AI69Com5T7C3TQuYmtA679WU+gqo+sVu6/NV3NjIi9CbHfYn2yHsHk7X+9fKm9duH37w79zR6V4+6B+/7W/elInXURUm69Zf5uNx07S7zsFhuZdL83BYyfxo44Mfvvf00emzIOu6pdnZHmeTvDfey3MupBGZh9JNBl+68/yjB8Xpqcv967ffzG4fzG+6cPQwP58Vy3wzFIF6PratD66U9f2L9aM67vDOPq1iveKLy1AOqF6UpSyUvFDG5JFM6pVFeiaNdOxEyPs8NKHDK72Lwo5CWHkCO2k1Yx5IU7PPWDKPoAgRK0UkCawEakgcUytCShzR2jRswJSqtk1apTgGbNbzuQ/BETdxzewZEM9oAvQFZb/LXJCOApTYhkqiwZAaI5R14mO9WnlSB0MZ/0KuOJCfYgu/+La4Wssm9+mu/qdpAuwcLP8mSNIlG0McBOIQonSsl1curx1tfPJ4c3HtcrG3GC2yZS/2lc1bjoBkXKeAOsrEhRDg2TmOIm0I7BjJo5yiiCM2vYdFLbyAzD/djnQr3DSzEpjYMTvnYVbdYMfkvadETmM1VlxXaQlmI5BQyxfoAH1qO9BBj1dOxXixeAZg+jRbZkhoG4kSQggxaBRPzJlzLvMFQ7nU/G69/Xa5/Hhwtr8Y9SQq+6BBMp8rRDU4zgCNUTqbP7a3mVIoyRMT1GXe/EmYmaknqpkTlZBLzyvQQ1Bi8X1oZN9jtNxpkhoOyANkFbhY5JeeLwtkIp4dchYIch8ELHXghWaYnc1Ii2uD69v57kZ/p8h36/rnhX+lkc3j1cWT6n7dy8p8Qpq30ngKkApYCTSEJoMSq1CAc6JiyWzsmdpQ9oumacT0acyROUrI4RNTSZRJRTQGSdpuSkd1B/qklTAoeUlqEGIkODYmEW0M0Vb/NtnGtCs14+sEk0aJGXvDqGGdmArUTOi0yzZIsiLPGSAxCdbR9b2wVB8jDHKCfABEpLik5IaqyayTLNeKmGPqihVK67oVDRZ1TESUOQs6BDFCkGDuhmqbwM4pA1ApXN7N/7aycjDJr3cRSgJ2zhaKEiM5FlVKAUeSCAUGt2gH5yCZzwnSEgMm6HfOExkbSwDPLgjMpDpCmBFCpM6Y82rFe2Wo3S2MOIq0rSYFlhIlqwrOsh7AKRWGmJF1YBgDLEIgr6xZ37cUFEKOPYtH47TOUWeNUE1auXFbffnZ9z44fjba39q5+Rr8wYOPP14ImhYyC7Hy96brcpnPPD+HPqn0vGnq9WKLN/tx9X/ax8FkfF6H/++H5+dBmGY7124thsPmYu1KyZJCQaJjYWfXzXYkyiwOZsMtpJSKa6fEMMGZ9wJlhO6YZv6UPZhnjsauZwoixur1/cGll7B9/uHuo3v9bHT66lsX7dNvv7aF4+nZv/5Jdfhzlw9PZbLXm7vmF8XBK/EjcU2xjlLvRd8wqrgacrnjueD5/Udlv//O278Is3Xo9+dBMl+eUja/XG+Q/72/841e35dup57JX/3wRyfTarPIX335Lc23T54eFz3H+XZzcq+4CIX4OVEDFJGrYWCdymHVRorMW5P1pSw3fXUWqnk+0h6rk7QyMRVgJOJc4ZhdFE+sa8TBeORd4IwuV60nbtqmVw7bOqzbRrTHSuzJSV+pFm4IawdWWYNZiFi9oOl4fmBWQhBWCRFEKhJVYxNdYOfFe2UmNAgEz56JhRswy8rsUQC9IrLClgXa2delU6irkFdG0JaAY2OZWSCZhAMwbwsTC6YhOGkqrG4QotjC1aAu6V6gG5gT5at7N1YJkqNlcr1wIFPut8z9mu5Oxz8+OL2/M9tZ9srWNyw5SMhOyiQDJIJTEojzLBpCA+fZZSwqIUlxkLOVRRIxSmqHnH8KPicoEXnHieZGYGaXNokEJuccETGxdy7xdZCO8k/v7Mxw57+G7dNFIFxdsuRgm6hzXU6OqkA1hqAioQ0hBIltiLIOjYp6Jnbs8xzswD4HNHevyt679eGD4vkXm5f64muAmYjIWSqOXRxiMROoVHSE0sfHxOS6jXZyFLddKRBb55mFo0jwyDNfqF83VVzXrT3muVc0ed97xWQ43Clbff6scgcbIQhEWrRQgVRCS9fzdZWf36++euutQTbpu8mozE8refLo2WB463Iui8XF1sa1r7/xGz+4+I/HRX19uM9N7SsOVQO5cHKZI0RUjVRCYnwH5oQjiEMIjYjYDiiKHbHeDCfsRjc9b+cbQ51GvluH216RDChKrHhKBnC42tMakMTExBRj9Jlvm1ZVo0QFTERkyxUSlk6TpGqLDgEzw4lEUx6qUrCpnROzXzrabjAGPmtas7ASZ3azWiRiTg6MoGr2OeYvE0Jw3l/J3SVE9tzhV05VmDiIxDZ0PM4raV4XqUEg029QtOwhY2+JCFh8locQGCDiGINEgZIDtR0LRDWxq4jUeach7QLMvkBT/CIACd25Y0JlQUJ9SJJozjOz486njrvjKyltTMCRmNVEal0OOSXjcpFjHyHLqoXGLOsrSJRFcmavysy5wqLcRQJkLRzBgdGigV+hWFPRUCklZyNCEd3eZPDawa+ONt+bnjWn87vl6Pr1G9vMyDQWgUu//+rB2dHCfzDbywofV7vgS8eKC9JF3Qzefjp/or3r/fFNNM+a1Z39m0w5DZosz8JGixFrH9KTdSjXvifig+ZCOa+jrlXEswJNUGGFxBC85xDMp927tBd21pcY11MQEpwJgXaMToaZsPjh4qPbsni5mvZ3cbS3t+uevfVaf/Xnf1B//8+38+tT3jwLMedBFobF4uN85yW+lg0LLM7i5TDsMuYVsr5vy4o9D8Sf3X84cZvHGyFAV2EdVXNSHsgytP/2X/67v/33fzcfNe9876/f/fAXd1++sxm0avKLJ09DsVpGfxO75eXGfNYOe+XZahFJgvps7ck34VgkR8zq0cSvji/z7WHpqr40C5dRRuokQMjkC01kl6sT8hHUqNPesP/w6P4P/vI7TWj+ye//s5/f//CHf/EXr9/9/K99479ZzRszTAvBW3a6WJKBOaYSgSDRvHcyspKBGDRtR+TKv0IRgyUzk/ckrYjzQRtvPldGFTUvmFSASc2YoKuw+BRNLg2mppX79J+AXGfklAIQzQogiTS6TAlA9copia70s1ffpyv2NgSwWtiqXJU+M59LM7cjKJGy5jGsWW/NJ09G9fPR4v7k4o2zSYOrnaTKi/YBEsXyXJiIPYlEpBVQUHh0nFfKnBjd1XAcVe34HWZjwkw+UVuSA6iVJSJ68RX2wwAwYrakgSSdFd2U+yl2c3cJ7M1Kx4y2C5eW1kDauKqIxDZIlBDb0LYxhCgCUe+cd857531GzBnn7DV42dXx3bD3AR49z+afjXtBgmMn0CjCjlkECnjmKOiE4+aPRKCOvccg8kTMzm4gBRrRYa9chwZCPitrwuXzY5k3bsDlcDwuDgD0GqV6wedRZsdy8oFGp7vcvr61zXkltQbnqz5LJsjrxezsF5ff/vrvvLx36z9//7v1/OcD7i1l1O+PpzM/KnaKzRzXqX+j2L9+O2SLbMPHuaAG1z4cD2TmgEWEEsODBbVIw2QyH4H3IYS0sDQmla0kO5eajquvhiGndsnu+k7HewWigqCijo36QEpqIihbJ1twjzTRe2+Wxd1C5IWfMzFMi2oy1C7B0KzUJF4RAJDiIPnTD6N8ijagygzutplMyuRhlmqaaMS+uwdBYHYxmKmAMrPPsyBRRIk1hpagWZYzo7HXEbhk/0JKXYAdw6lvNVJX9QwCUIbChSYA4jMvUZidRGXbEOBqLWBEExYVCUJpajXGUCenB8DkFap8xfJiNValiAoZGxHk2IemYcdX5C0k+z47K5EuIHsyVawKlD2TKLUhMjvmHOAoBPUgr8gEGSMX9aCeae8hkYTIjOMbSHRNVrQoaxRVXvQGC55w1uh0Mtiq6q8VBx8tjsvh5rgoectzVvtJpAJUYevAu3lxOQ/rupYmLClsKr20ufVxlp2J5L1yj/MmLK7f/eyr9w97y+nZb//aenhBm9oOg8/zjVFxoRsXUsyUG851FTRwjIpGOBDAJJHAQC5BPOfE5Dh6M0WEMiLM8jNJRAmicNyd0vYwCBH7b7797yYXT3de/czyy2/63r09hMs/+P7kw1/68d60OSqkel7u5dMTwWTs6yI/5YOsIQQ052XYnYz4dInNmgeQKbch9uL6ZqbzVg/rdQxBiqxuqjLPsFy99NJr8x89+cWT7wzu3n1jd38/1OuNOwsOguDK3q2dreo0zuKg1OPTaq6TEcrRxZOjzdph2cbpQjY3tWizwMePHzw7qkdfuVufnwntgFMOJYkxQIBeAw+wcA/C6I16b3//LxfZXJx88OynH3zyYGN/46h6/KMPvvf1r/2N+ckFeUJ0jRduMg6sqsrOlutONZjKXCOpA2LKYCPLx5NE2bTEZ5EQWgiC907FsW8gufOybgIzWmHHHeSsVjyiyAvrDOrwNyMLqpEQE9Sc6rhVbzZZf/JpBOhqShCBptfQ/7qE23TRSZno6nefYiSl/9uPkmiN9sOLqlJUdi34ldn4pJx/vD2/fjnoh14w2aoSJ/8Ec/dVZg+xykuAc8RKYoorR45MqZI8ciNZxDcxoFdNi3POOZcERc7Z5IsOlDbg8UoplMJXupklnaBy5eL9qQYkwfLpmmunoHhhgZJs9tTQ9xBDiCGGICGGEEJomLiX5eyYPVsVzryH97mxcT1/sb55Lxy933v+xvp6DoDgRMmzAl5UEh8tMWzBFp1inkaU6Lip23DErHAK6rGL69oXZegVl0+fnj9+PL62f+ulXZ23+uGH1dMfALiomh5vrKssH9+Qu7sHv/Nb5dN3j/TR7/3jf/aLn33447OfoVk38zKvBpef8Es3d2/devPw0aPrB9ewv+fFFZt7jQ7W4rJ279SdP5XZrb0Ve0Upw0lRn15iLriECyyr3rqNXkASGC2ByXmXgaAhNCTkORcNVwLT5Gio8N6HIObYJSKdcs0Ssa7KcNrO6hXh6lMNqrEKzO2LVILYjUQhhBCCVWWkSmBhAum5M/PLFzJ9YgJiFLNwT2ws7qr+i23FC6ErM19RNoBPrTASpT25MUtUdJKhzLsQA3uSKOumZe/g4IhDbIi5aRuYtk2EPcNQ4pSSQGbCIgq23YpJ/iw4BUQwAMiF0GrywrTjRZ1nx14EIsHOMUa6wtJRpwGyTgKAKpGtorrjzBJbiNkzNyEwkQrapklsYBaCRQzYt1NoIOaYBmIh8gQBOcvGJSLvchBLZNKMOQdlioy4B8pEM0iOjCgn9IQcwyQpa1BDvMLaFUsqFjyspCgH63xvcPbwg58dvb8j7rO9rYN62HJo9rKmaItcwihmE3YevCm7t7a2L3ljWR3PL/OmvuF7/eFw5UQom4emX9Sbo8lw0dDyaW/abn0YTv/OrxS7mYybxbA9Xvs5ZRWGDY8q5NxyWIu24nJHJcUmuABdOG4KhYIQYxiUw6ZZgALDOUegIDCyG0HJenkDCbz3IUZWBxE/Wh2OBuf9D/7VzXv/PBuMF4vZS8RNf9hK24u56qw5vzzJb8SwUa2f8/GsPNibhXNs8lRis1dzX0I/40FwocnGRXsZtg42v3nc7vdmD9crlowd90OVD3kU6uXxdLe3EZ8939/sVdc+g+H1oa92D3aWQ2maajgsioMbfH40Lgp/2T6ZHvYgHtp4rz4XR7nPLpfzl16+9fRJHiUPsV2tl1hlVKnW4Ia5YSpAJWsOzji4UIyKZ/Xhorzsj8pI+s7Rj7Gd99wwhnBv9sFB/dL4xq5UQaogAVhDK9JVH4pEWYOa9C5oFG2TKZUSkxOIN6qnGmJsW1hEFW1a+EzREDh4bdpWSFJTqd1Nbrhx0iGhWwG/qBbUjW+adpNp4POOCZyeSjU7DpPfRrXFl4qaHOFTR0X6X4d+mzDJ9q+2OkoEWkpDSqevMBqHiiqJ60WpvO4s8juz7Y92Tj4eX7x1vB84efgoyPj6VgFj02TeO5dSgkUlavTsjCrFhBTxK0og79noj5zmQCJig2EpKXvT7+0qWwXu/I66V7ez81MnJF7YiBFw5ZL3qb9/4RKisMEapCpmzisxiEhomhBjaFsRiSHCe2bOfQYm8s5n3pFj9uKkdr6veSOym48nzcZpcXHolvtuVEtggrcf1kMtPCG3DEoFElcOiUPe/Z4sQIsEAHtZRz/cxrJa/sX3NzbHtz771vqXvzj/0Y9VWgdsfuFXASwWbrDz2eEbX3E7fCnHi/35+NqmPMcfv/8vfvNv/t7p4Z2PP3mvPMnacxpdzw/fPTl8cnbj2v5oMbpYzU+a6ezyEqplb4evuclouL8/fj5++nzn47Mnx3kYXn/9perJMp4HtIJKeFrEJjp4Qga0MYQQG2ZlaIQyIwZ1zqwk1XUCODJntmQ+RgREKCfIxrQsCblIcFGa0EyflgJKfOYlBBWw516eN02jkCzLJcQUjWBTNjMBDKcaJbmGp8J89RQwESlrykBU64wlGTYTrlzLTArLTgUgoS5GM/0jUjjqHi9lR7Yv7kSdSkTsyJMnphCjUAATUjueaBMkwmYPrkpRo1NVYUUE2Cw4lTv9HhhkPBSFMHGkmOe9JkTnOEj0hI6xEAHizh/3066lQCdQBZzFuDJI4RSOXGCYz6VEizK8ajjIObZAEQYY7DzHKBItElmN/KgAk09GYEoEp4AqE+ckXtWJmo9jTpoT55IL96B9IQ/KIR4sLAEUmAPWVK5QLjWv14PxAf/we392+sGDclQ+OTm/sbmZ9bnRpphkWhKK3I25LYNWQXq8LlqpZFTQiLaO1yFyPG7rUPGi8FyMFrJwxdbo0VHjV2ebvdH5R4Mfn/7FzWLn5cn4ldvr7Zdml0XgohLONSyGLQdmZSEh4Ty4EIIEldoDGbuo5OeLCgjeq+FApIiSfENVYyL3AaJqynIQsXc+3N0bfu7N3vFp8/Yvwv17m3kRcnWLS8/Vzohr7DjX+qZ5jFGG7R6aekFne7fKi8drqk62etf2erXEDLkM/WJY51uibt1m/o2LrZsYuwhPfBEuvducnl+MCh9IueeExsVLN0Ybl1Sv2/phgcXy0fNr42v+9hjvURGlVi53x7I7Ch/cL/OJZIhoGuFMqG2wt3/zaL4ebd0+nl64WmgNWoNqZgZyQk4olEsXe+DN5udvvz16dbBYr9u4dhmKopAosZZci7cPf7JXHdy6frvoe5lHJdIWaMF1Bm2VOJBnMa83D2QKEWnSLkkTjVKhph9g4zGCEYWBpm29UtNA2LSvMLlKN5xJYlWlSt/xsF7MYQY2WZopk+lb2QW765m9c51RkCLZyaoVFbPuNyROXjw9HRxna2CiVMMkqlLoRsRugZX8pRPcJYpADQAOqwy3Z7uHo8tHO/ODajSqcu34pxFKmupcf9BvVmtAzDtTYJgtO2IbOkWk84kEoN57dFpqplRtmVlt78v/9bzbaQ47KWYqsZ2qJC0GrmqxmHcikqxXP/13sZNIGeyZ1FiQ2IYYYwhtsBk4ACBH3nvHnn3GzM6zzzLHTsG5kyBQICBucP4Wbn5HfvE+PTnQz3swWAksJEpETHDpTVvuQwcXdp1MgviIKYMau9YXo43VLz5evv3dm1/++jqfPPuPfzLM/OatL/vXrnM5lPF1APjze5ff+U5v9nwxvuyXT+tbdXNn8uv/mzd+/uf/8e37F+OXPz+T4Ld99thrlo9l9zu//PObRzuv7r45zg+IxxiWzDthLzS36nC9vbx+Nsvfv7G13Pz4J4tHj1766j994Plyh8UJArx3cjSUEASRqWHuUSKL2lKCyZm/YDTwQmMkxzFGYtPhqmfqalYCeoioy1pINafbTKQeSxChkGAkIIQQ7NN07JumccQiEkU54cwJbTKLUvu9zSJGQI2peCZrC8ACD4D0JjpdqyHjiSBt96AXRFXHJJ3rs6qoZxbAZV6jNG0LZo2S5b0QWij1cr9eN56dYdQhBmsTYKEvsavYAtGIhG5REsFTWqR0/qwdoEXUNmvvs/V6zc5bNkPbtta+O7bKZ/jKizVLOmNMHk0E55JRO6VHwTEpM6LAOcRoCfKqYM9RQ1LGKoNIIpk+iyKRjX5qq3KjwUJCuoFFwPDQHqEHeEYp0XOftRQUQEmSqxsCGeC6bPYKssByVJzzMIvj6zvuj9//90u5726W0+MaRaYTj2VTOMgY+ZDdCDrkuF9G31ArOTOPfLOU+izwk1XN5DmTkUdgJ03Tz7FclvN5PurVJS1e2phsyBt+/e8ePVk+vXzjtXDti7+Cs+PgY466QHWytVP7whGrQ6PqhBHhm9xLXtcBlLETQFSCS7tF9WKyXjYv2RAjU9I0smOFKJz/xq/eKaKfO5+/9Yrc/Orsj/90dHrih8VCljLGweRZcP0qD6M7X7y48y0tytM//n9frHAy2J5I9f1L+fJ4ForJpH48KGuUYT30HnCLdR3JzyG9pobE07jIp3XBPsBH4iD45b0Cc92p+wcjXxSOlpOhq44ee1yrJru9+Wziy6oN9fGScwllj72HCw3TsD9+9tHjeOvlCOdpczzePP3kAbfsAlEQ6oNKYMhSCPfXMsxXvfkxHxKTDLP+ZrleXzYOw4ybJRx8aOvD5UN/wXdvfbaOtSeHFhQoNEDINUaiYJao5tOkICLnvT38jUgUjVc3t9rAEiGARGFy0W5SlSQIs/BqkYSmJmdmNo1dMoK+KsBEEjrfHxuAHWumUeBFuRugBRBVZpYYrCSRply/hJVDXtBL1fBb83dXKBxz27bOuTZE512nP+5CFT7FRYpAAHwg7wpfr+6ebr93/fkvd45+5dFtIjjuqE+U1MBNa34/FEW97wHwtj8jqCMxLx5LH4CAnUurN7oadylRRWCcpKutrrxAlK/GXrOShiqiGTPZINX94NzRzVLZle5SkTEiEtKgYsOSAGjNFq1tJUYJwZogn3nnvXOenc+9Z1OAMSsRxHsIEJi50uYOX/th++FHveNvhM96IjMsJjiIuqDiXQd0AmTa6DQNe7gmRICZXBQmzskXUfniP/7n3icf3/69f3z04LF88L2b/+gfDj9z/eEPfh7+P/+/sswgGwByeoW3+xo/GA5Ivjj6zFt7eflecf+Hv/3Wznd/8P+YfrR1B689fvnzg9u7pEVRFwXf2MiH718+CCHslSPGcLy/bK71ef8yvBwq/OXfGC/K5Qdt9d5a872zXolXHvjxg9u3fS0IQjXr8VC4AdUWKWd8BG/9JUEkkncgCkHYmWyNmLjVYB1lG4OyrW47CFqsn6W05lEBkQUtiHS2hsabUHXe2X0QDHBWS2Sx5E3Dh4mMTMTW6Vq7bKsCihbayQDSS4CoFcmIr/hhQsKAM6IkeShA0RydHDvHbLSRQGBGzr4K9bppqCMJEzuxnF3RpglETBZU5XzuMhVLroLB4BFXngHqRZFsNUmVryRx5gxpPloWt2SiXkqxYubt7A3hN1q4oQ2ikeFswaswmxeFEDsjUaqRs6OdPiakBosG53yUZNthA3uiab4ALLrDhSyG1WvCzJnIMTtVr8oML+IdFaC+iEfuuSAekhSCIWmpUgiPHHIFEWfEnok4LCTM4vne/kae/dWjH5+8N71RvPbKnSdhz6+OQjVpRsFX3md7eaMLbOar3E95b2/yiIMs+jxcgFdBMxlNs53GTdkthHxPRX0uPD5f5FLxRs+VKq5eboS7+eYXaxzifPnuH1d4Wt798htN9QSXp6g95Hi0X4UeReJG41p4zXIZ1hWA3EEcIBKcz4x9D1FTjimJqBcSLwQSS6UUiSCQitcn/L2/Onr/p3VW42uv79z52u+f/Mc/2Lr/CTxrxbPxzugrt0Zf+OZE7n4sdETji7u/df7uX877L8141fPueDH7bHgy9L3hCNk4R7NYZdLP+uqlQQ2vtOB+4aPwQlFj5dgVVSuxzXubWleR0T8om/m5eAcG5k8GvbotxYGz9aVcLuoNFMNMB96pBARlDtK896O/3P3GVxfrqsh3tvduXj55pOpjgXyLMGIdifTX617s7RSL2Yx3mmIoRb0SnQ5665fGYTDMn+Pg+aKUMA1nYVoe8+gOAhoXPURalsC8iBx8y+zgRG1vYtWDYWHVlAfUXVRwZPOAVSJWUoXG1M4av4bNwIig6h2zOdA6YniRaNtkcmwVEZaFAoChTF1FEK/EIo5NO0/RXBnSOBlNy9LLe8vFgj0xawypPTebPKM6iYDNGBaBBJEZKrFtiTk2Ma13FOw9TEQfAUZUqAirNM5zrIIvbs35ycbZdCM8HU9fme8ItCEx/wRIwagCyDsHEXhihs+8RlEVBTs1OX+q9J49256b0m6arByDDG9Mh0Wqtd3EZFM+wewFrv6OzOUsVTd9IfUy/wcA6cdPtThY9FFa/KqKRoSE4IUQY9Bk/+s9MznOfM6OvXPsmIyGDWZzaGJRZY7SEG35/q3lznvl4Qfh0dfdZy7DWpwicqa8zlpWhqgJZoiSzptZRTiKqvMUKaiD5AqSpwv9i7/aaNfl3/8/H/3wZ8PP3p78/X98+vYvTv/Ff3GLw9HoZkANKQEgd7Ll+aVe/OLWtX9y9+XJ+ebD78X6Ty+/t/q8LLO/94X7y+ft9xYf97+2sZ70hyWGDpN+zrVr8y9uvdmT4fHz97lZTm7dDTeni2fTm+ufF6e/DMcf9d8aHBQ/XRyvF7JXUXE03sNc+JQp40wL4lykUTgHhTZCCksHY4eoSsoWGO9cjBJVGA6qaiVTTb6Uxi8wiBxUzG/YOY4hEFETGlP0qYXUEpk0iMieLAelYIQXlbJXEGNV14YSC9Dzvl2vXeYFULDFRflIXcBjwlaMYWGrEBHxRog2syPrBlQBckRRfZRg+AsnvQHXoXU+izHaEcBMROrIhRDMhSO9mpKImCkbWUQKzPk1qPFkDTRmFcv3NKmQiALsnECZHDQygZ0ToaBqhmtGLBAFaYLlDGeDwqkXCszesbOAYGYSiET4KAyASVTZLDOZIRIpmXFCQro25k3dGfvYxk0obZi9KlSAwOSMVh3UYis9CRPl4FyRK3p+6CQX3nBh0OZDF4aEAcmQQp982ZLjXq+/4gZhgTkGl9yrphvzozh/9szfFFyElR/mT4sDOsrnN3b223WNfiv58NwP5jI4qTdFwuTak7JsY+3aR5GyGEqt6na1Et8va8e8qq9lef/y9KzQvM+rnZ6483A8ck/WX/Plv9WTXPKz6sNrWxuL5Sev7H3Rex+jVA7N5u24lnztY9W2NbSARiDkSiFIQ8hCaNnB1kYqyDzHaGs6TpHJaiijNzDC/5vvHE0PtSd+Pcd/+aPTp3erL7zxd2aH/7I3O2zeeHPn7/6jGk1sFsfhk956i3vDcPs3l/cfzpryebmXV9PNcmNZbx3Ws18bzl69M1pu7uJsjlnb9qXOWl8rLWIICL436g98BV9gclwVYQW60IkgHjYXh6TSRORF3jw9pSzLhiFkoWyziya4oZcJaIg4at3YXfpek2/XOfPmXrXiMFsMswltyfTsSTbyNHI0EQxAW8VglFF2/uzBD6/zUVGHHACqGyN/c49O5dbpaTVEHV0+eXOXdKMZr0OMDl7Wir5II5yrBC/iIcRwjAiwKhM7iDAoRGhQ8uyYYwiGytgwmpZdBiYSG8/K+EMpKQWdqknVSjgzOfJKBoKRZxeMeqgWTieqFFU0WiusTYicRmo2uYK5CK3XNRjMZEGkikTGuipbCaq23AM2E4gEmBhiLSLOeXPnaUNg74jI51lso6o4CLyDtD5mr1/uf7949PH26npdF2GjkLbRxpMnHz28846sXWYSkIiQWnYqVMXZhJts7ij5bBCz891BiAQtJPdQXAFo1nVH7X4itmQFm5BwFVKoHS/bzIOSO3wH82r6yg717V5BRWIMCkQRjVHVBh3H3nvvXacQcszkku2+XTgza3IKJS8c1xpe9/vvNUf3+OQr4WXPLsaG2avCq0NHUe0YYPZuDbzPSMEoIAo4icPwy7cHoRj95j87/MlP/cURerenj86aH/64nB1732vcBlOZBvtC/Dg0k9z9g2+88voRf/d/mhz/hdCu8rR6e771t+Jb325nfn751w8fPavKarJ9fXS4eyQbUsRh6GP56EnTX4yK3mz59NbJ/Qndu3n5k8vvz/zP4+jv3cyn1c352+vB5y9kcrS3G88VQ2AqVHMy3Yco28bE/FLN+zOxvM2cmZN1ZnLAsJgEY0tdsZ7M6Z+IPJMEIUBiHA6Gy2UVVLzzIUoUtYE19V5Q0QhVxw7ger0iZzIBW+hCRb33xnU285MXCd6G81zxL2ANMzHIMHPvvFW/JBZEsonuksBUKMUXqiJK7Jh+hv1CycIxoaq2EKJkP2q3H7MRH4igFIhIQIJgwJBCnCAleduawl5LMu/bYLYQ6hmG+potLhNCECGFWbxqygIhm4Bh+bUpy5SUxMZwpFdRtUdLnVKW5U3beJ+HGMyvJkJYWDqMjJFixiQJKUxP75gY6kXBlAMZ2EXJoAXQRw9SKIYI47UvB2G8kpHnoi1HvBg2fitXjbE63+Bsb3KggyHPZ8337p8dTW9de+XWsr0HHzOX1X6cu8PmfFiUk3J02OQrGUZsnsugyjdq7tWt39mcDv00FNpkggH7hgfgqayzlctJ8t5o8tWvLUsJb76084VeJSQBJ//j/3jL4Q5vPIDKJxd/43fuzHbo9P0/8699XcrP5/VUfDHdPqiaVit1K6AAGsCzBA945kAcVdjiRLstCQPKJswmZn4hGwbgFz/5Udl7o61HOJnLyi8eLKpJ4W9/PfvRv8fPft68/aXms+OcdcgXFxiEarVzsHPvpb+FX/zF+dYrOe+fHv/I3ZyU5e/+8OIHveHTG9eC+ixuNmEs+ajQ00Cu14fQmTRNNSw2RvWiqBb1UPj8Xj7MUbS+IxSgod6YV9slndR9CtpIMRqTXIR+CNtCI8KWP1zy+JWvvfby7uGKQ8ix4vXpYpBvt5txmT13Q4lDuD3WstkswuzBO8Xp+/tuVWCVwSNevBRo2x88Op3toirq4Y3bn+Fx9t2PFrsHO14kxhZLkQqSQZyCRRrOOFNEkQA4JC6ihSUFZadqtH9ndy07qzpkZJ/EzbdnwFzJACRrUUqnLzkk+FZsqczMQSW5ZkjHzkViJkKiY4LGbsllD5qyZ2bVEM0QlxTJCtkSHux0IenIJcrsbUELCwrodDmZs9w5COB84oWKRM6YgushKnPwGtTfqCe357OHW7P7u4OvPx81zIwicJbLOuY+i6wq7NJKMyXGJB5UYqJ2x55CiH2yaE80UOOYiJGE5YUlQud/gqTfMY6DiI1c+gIW676bmG8KiK4o6KmWQ5Nzkznnk62AFaLd4ATPznvP3jtm57333oqvqVaujiurMqSILCzOi9RoD3p71+rR8+HsvkzfoP1LDmn6ZnJq4ljV1I/Z97KPS1jzJoev8rVsbA5vbv3Tt9ZHF4ff+083//E38Lm/d/jP/9Xwoz8bXuPV7pYsIpatNIXDGgAKsBf52hu3v5S3f/UXs7f/p1t3Xp2/1xzwAaNpf1n3Pqdv/cb27FeuH/0vm9W/mDX+cnR9TDey3rBZTCq9nenlZPpG/0Zvujh/unNydvHD2erDOP7Ky17KW4s9DPhwfbqZz3OKdS+nXNizgpwSKH18SmB1ikhIic4gS3oGRWHPkWG7mBg1cywiKRfnKtMyrTIgIaqqkeGrZWX7CWO1iAgzmEmIvWMAGtWDLeUs72VRU7ho3stj1LSIiSaG4e6id/dKYn7ZAiiRL67Mba4I1enzTgQNe6wMC08tGLNLqQbayQuI0oDOCF2iOHUrkm7lb3VciYkiNDXTrOgM0rovNZ2aZ6cSjTUFsldIb06UJUrGbCohujqJoJqyWzQGQ/dNuauKqMmUQ6GUApgVEEQoqxgobSCeA0myTyclUhMbmqpalBgEl3QIdsUjhcRocwoH5NxzUiqGKkPBRha3a9dXbLWyxbrlbsXl9ic/z+SyfPXNjVfGuHiw/P6P6l/e88XOWU73zwaT7E7V8Ec8GvqtaaxzbP3ho/CNuy/NAs3bPGBc86jiXt24Gplw6fOqNwnDto8QKZOQt6M511J4V+W7Ob761lxmy0KQX8cshlmuxc3Vh48+P96/vz5C1vveH/zZb//+35189ov3P/rR9m13MdwYhvXMV76foWDJVXPAKzlC682VRgBSdr5QNVGz3W5BVboYKSKK5lCviJ5PK5x93/k3ctl//QCv7JfrabXcvDMvXimWvwzvvNe/81sxhr6fImyePfyw2H9jfP0l/DmPmufPb3+dDyZnzSfDUaDdb70bf9E2PxmOfLHV+p4Pl+vL6rKdNYiZz7J8FtqiLuqah8K5dwMPz5j0642mV4zyzbx5MutToxcXsu1XMfSJy1KnQHYt92OpN6RyOzPdvMBWnV07b3Kp4da8jm0zuxzfuO5yrfLjbK+gUbVdT7eaJa8/YjzeYUi4yLTeKNzr+y8/Xzzdryhk44MvjELz4fff+8nN8ZuXM78x2ln7xhU+5Eo52JHZJTStEAd2YMA7D2nALmogcUwaFCFGz8wgUXGcyCQW8N1tj5PxPnWtcbKa1HTqiqaIzSR2ZDhlQ0SNOkREnTRGY2yT9pUdsUMXDUQhOnbeFqGi7JzxDq9qDiFxSg305I62yURMzuRD9lLEzN6nwgK2IAFHBO/hScAFsVP1gV9vXzqsZ4+K2e2Nyc6q8J4bAVEvM10UpTlAPiU6tCNFVKFBwaSpnJltm0JVhLp5wuihUESk4CFoyi1+oRjpbPU1zbF27ncCWzW4wY4vmMBb0wyj6SXQfSaw87fjXRt51WXsHDM79pwcLgn/1X+WfAOyrGJEqxLR8efl+kk4eY+e3pUdBZPGFuqEJf2ASbCaTn8A8B6+QeFrXVTZePdWvrlVnT2PT3+49/tvTXWK7/ygGD3FuFjnvd7RIvRi3i9jKOWJA4A81Dl4pzz+kz8e/eX/7eW2J58s3GPF0O/43Uc/et772iQefzS58+rof/eF+PUz9847k8cPdt87bl1/tvWfjjduPOvt3/3he/v8ZGt6L8yPaCSj//21Mr/+xQeTWzuvNnfd8Xz2k9MGuXI0rq9CBCaNNFcjpc4pRq+ujgl8LMyImKJGaw9jDFYhiFiTeFW7OcxYWSRJG94NyqYO9z7zGaBtaO3OyrMsNIE9i6CxMFEAQGwDse0GiMACtUeVQMpKqilsqVMBWuPaSgDghR07k/IYHIwXdzKlkFtKqVYmc6IUhZPkfFc3pk2VBKhK1KQ/sx2EWVYBUDZQVwWaMau9bcCiNXDFzAQluNjIkpQ2x3YXiiJKNI6lyqeMdlRtqBdVJm/12giMUYQJ5tcsKva3RAyNQcSxjzE4581C50qxaD1EwtNgPBe1ZZIooMzk1JFGEiVJzGevWaSSeUg0Zt5iLUNz07txNq7PXv/k3VvbLh8jjPY271bxz/85fvQ99sN5MbrE3MfyWEZVdnM/v/Oz5nkh5fWd4cDj2dFHTx60r9x4NXB/JcM6G3rkK99rxI3KwqOabN+bH59qAb/fa8uoRchmfvPmqN7rnzUPNveLBau++xHP6/Z81HefWS4ebnI9qf15KY9/8fj/9X/9v7/5u5//2m99pfnx87Prx2eDnWGzOvc5e6hTsCopu+QBZJebNQtt9N7ZLe8YxPm6WQGE1HbbAQ4S9Vj5duXax+8cvPXrNw7eODx53ifuCYXrd9uPP9JP7hfn3z5dL/Ld08vjjY3hloSmX241uR/cO1qHh/LGnYs8HoVZi7ZXfsW148340xu0Hg+P8q3+7u1+4xarp1W7qEefuTnywX88l5zzwseNoCPxOTSjtdRFX8O2RERfZGi4Vmly9VIxr8u7W3GXa2zNsDnH4FKHZ03ecs/VwFq4yXrjTLfCZPcmQmjK012cj3B+0F/uDhZHeD7Udc6XX3jltRt7B6CsPDkdbm/eutF+/OTtdz9Z3L311cPmSZiF3sivRgVWIQyAStg5ZMS1cAtmOBA5UqHEiQQrxWjBH4AonCMGa2oDDYbrKEKGrnZuE6mXBcEMzJEmQRuITd/acU1IlDlF2XefrnPGj7JUCU07GLAyROCdaDS/RhMnmRZDTXRJfHUsmIChczrtqjTUsXfO2WbTuC2eHUDMiHCOgQgmHzJZe70l26+vrr+bPX5/cPTb+sZaV4XXJmTQSGbNj8SsYTbys1rxpa48gkWVFdKKGuVKYuiWtqnxl9R8oCvlNmWIqWbN1bITgCYCGmz7JaBuhpCU3959N0n5axaEQwAJeXYWaAjHntiiWZxzzEa4YWYyUPXqWLu6cqZApghBECIvXLf1Lb9btv3j8vxkORvzaC3RM9oojvjKmMtGdmPDiiLAgz1ktL2zn29s1dOj5p0/3HgFq4eHIR7JuCj2C63W2lSBfXFrL9bDfnNzXh4CiGf3s95o8d7bpfuR13a7Evl4PvvB8e3bX3wWAg7l8l88vfZae/7g8WTn4Wg/fu7v9rZovf/4sJ4ujx8uxod7WfWZW/zRa5vv572yeGs83D+4FvY/d/8VNz85nf+0/8YXr1+/u8/7P5uuQT1nTg0xKtRxcvkhOKMGEiUluoga6hY9JIoX9laniIKxgIDOhow6WrvNlAm0EBHvnUaBKgOiUZRjaI0d7zg5fjifBYnKgEie5W27NsW5bTJSy2m9knb9LyUjUJBxnYlUo0QjXzOzYVYhRoOjUq+WPjuJ0Vw+2HVuqQSybIYYRBlmTRKT3Ig6xVu6P5GSugid72z6Q1Wxui4iEIpgR1cGbZKigOGYJQoREZsTbwroStI2SViCbWsd7NrCQhk0LV5grpyiKmQLBE7WOtYP2OfBJBKYuRWxFTl3JAo1pZaqu/IJS+0MiBzgQJnjXmgZyJBDe6Ch6gZok3QUws18283ufvD+9em91/Fo4xeH89de7998U/+H/8vg8GFRFAtsn8h0S/KZlBvYPGrxxWz3l/XmIY9OjulLb71xUo3uzy8/eFLe3rru4YplT2K73N4hDn7eLsNWwMHmKzH6NWZN7tui8P5W4fdy3S3arD7tN/JhPZvFDVwvFsL5TnWRb9WrUeandfDjgUb/7p+/9/D86Jvf/juHZ0e526d8S7JG8hxeTIyl0OQxTID0mAkIKkzsPBPQighTL2pgAjNibFQCMSnYx9DQyuU8fPtHf+WHm9e3R029oGrRbt4o/U48P2pOHsb9YcY8GA2vDTaeh6i9zPtiyKuDj35+VDr+3Odm2Yx5N4TZbDC+MWup/jG4HA7medPSy8Vgazs7pWUIo2qGl3OKdchD/lIh4ybs4tTvRPhReznazCUDLmOuYFHy7O3cv9VbFntPq+ICk2NsznhnThNckFwoL8Eibs8t+bL22cHnPhPOLvP58/1isRfPn5y8d52O/Gz18sHtonrpv/wvz3su3L493m0fPnr6Mwzyb3/+8/fmj6vV+ZKQrYabo71Fk/s1h0XUQrWFrKTMcqdB24rgFK2AKbaW8qXqVKM9RWzPHhRmi/Fppq4miUNaClOSFVph7bYp6V8xO3vqXEJyruhFHS3TuBjW3oIUcGRW61b5u5w9EdstdYAQIJQ8eqyFRrJsT0knIHT7KGMUE6HIcis8UA3EjsRFlgyI4sWDpMqbL8udx+vzk3L6UX34htycy8J7WQXKhK78eFMwAlLPnCB4M2kWaSUyqDMPYonB3nHyCMAV0fLKYkRV0YYWRN6xCtR1BVglhGgHakh8Q+ZOo8UJJ7ZROHFELSfcs3NMZm1FjonIgc2Di9I87Jg7eXQHXV55XIqqEmkUUg4uOPGNBHAopXcLe+/Ts5/5o281BZRFAtTVFHwHhiSXB3F2/dnl0+fTycat3uZgenrkP/7B4GDV9ELcqPj61hDtUM6aka/yrd7m3Rgn8aPFWXGT9m8DaP7n+3ndYCFHQ/c69oV/Pnj9jRvtqMxo84Jee2mnuD1sXt55lr30pdP89UcfvPTdJ2VB6/yLuC2335iH35n+zOkrbjT88Gv5hz082Jr8h+LlfoXh0eparH/8w8nf/cKTFW/t3eaqCO0Fx75lEgI9kEKDarCGTST5O6VKBctCAhOLwJuHasrHZHTr2PRrQmUpigjgmBgc2uAS5wjMMI04jIPY2WrbNG7gCwgEZxWdExJiuYQMMe8TeyLTh/BiXgUi1CkBFCQmFD1BLUrJIdaeF9PxpRsCADsWEU8cQjCf9tRJJETa/L6JKc2malnBfIW528MCMygxHyUo2EHTDt0uoHRHQtcOaNdGk5oXlcQU44IuqcUmZPPB7nzRNfX/bMlRJArvXAhRGCRdnJ6KsVuAlIlypdWWhNGp3QTCPjUVyiBO3tDIozJzAeqhT1IqSgp90YH2b/WL+aON2QcRx8f5s+ny6a8NLr716D8s7/2r5SIUcFW9mIxOtiQ/wXjb+Se6oyqzZvnN4Ut/sMjjk5oPenfH3/iLe99vm/Y+H90cbpGnvACJzrdGwJZwTVDeGu/1H5VNjQou+Fh7leXGr+ysTi/7z0q/gZEfNdM4Gm4eNroxGF3OT/eK3YfNmZvlAYH7o8XD0++8/cdf+Dv/x3feezS5ffdZIyWFxoZgpxHmeS3Qvh3yKoESF6cl05irALlogBgq6kVUNXq/Se18xY3Pz3HvnbdvfvubvdaHofgyD3t7Xo5W08P93/ybMd/cGG4veML9DR5uhqxAWBxMiX/67iFn49c+i81J3V9m+TJOJo8ftvXp9w8GOkLjfSst5GZfq/Vw3kge1hBPwB6yHT6SyUz2zrXYzKuNUBWDy+1hbFAL+0yExa9kp538ylm197j26/L6lLbOaLdeZJiSTFUk5Pv53F1kpfM7PL98dmc82MqH/pOfTbZQ51UdC6zz+aP2Ekf9MNDL5qMfTod7/vqNfWS8mN53i+39SX7YLOqT42F/MC+9lOA+oQ8KQEGsTK2DeNIAmBexh7aARY2aCogUoiLsfeepkHZIXREmDxfMNxXKEAIb+dETYpTEAE7ifjB7I4lQN2GpSEcTZkB7ea9tWoF2/A0wlPUFocnce0yQ1k2SL7hMyWKY6OrY6+axpLj13oHMmx3MLKLeZKyWpASFgD3V1Ix5/NXVy9/pffBu+fjWYpKjCKj7QiGRwlPfbeM4iNkRIyF/IioxhGBeSVCN5qDJFthnfUGHoumVRLfj6yigkYkRha7gPvNIgg3TJrZmUoEzyaNNvZ0xDa4Ex46dc8477xw5u87ewHEy9yPtzj2BfUN6sYzWpHESpKhUkWCIoMqrsvNBc/wJnxzJeNiMBY34XpC2C+P5lO80odfzJ588X8/7wzfG1dlq9eyX27/1K4vLrJ9/uL3htsPDnpwOYxW3Xr7c2Zzl8XAyCa++cXFYuL4C8DffaM/e9dVkNtxaYaMecl9ONz57c358Vk4Gfj9MXhvfx/gC41u948/6k4N5cfxeXnw8kAFlBxi84W/82pxmN+J3svg2Rs+1n62byWLjG6559rOsOJ7O57OdyazJDkZ3nq1/gT5ownQ8RDIeXStUVB2roc1IfafxzRgSmSlCgxUGIepYUpJmUurWogTAOw7JZQbeO4jwlQGGSFQjEZpkCSFGpCW+gNhsGkMEOw6h7W57IGU2WNPzacWvEQlg1D+RqJ0gyjERUxsjfeohsvsUV3ZsClUll1pkAM5xG8W+CCka4UXZpCuEPk3dCiMchyhGPzNRIROLGuFASY0v4jr6liQHAYAEkA5oQ+rn9QWGBEWkZFtNTNaTkGnwCDEKkQGitghK18raAeON2IumXrzbnRgmZTYmiZQOl5Y97FSI4MFMkjHn6l3MohaQPnQI3nJNM8vu/3g0Xm/M7y2bT4Cjd86mD+bu94rRTsWnx8fsSYbUH6z3BodNuV3VbQ2cl73Xw/Sz5fVfDvH8/aOt0cEXJ28eHj6dPjr5JU72y/rGS9u9wldFjt4eI4I5yGnoofBVUS4Lv2bSydOTemuwUQ2CazzleV5QqKYP742u7eHm7ct3jiZFQ0L1RSOl+kWdTYbV4fR7f/qnp+2NWT2+/sbfPXlyDs6EEmmdlEhLZqhIo8Kcq90J5ICgCODCOjKJkTkHgkgAMk/rmGdlzRj2i+r86PTekztfvBPWs6wYrifDfIF89owbVKOtYnRwcaSHf/Wn50/rzbOl51JCc3Du/Q/fPaybnbe+RrlU/eGsLN/48v+h/eXw9Oj7bnPd374s9mNZu/HDcxQBueSs2qNmgGPsnWJ/qnsLDC+b2YCbocMZzbVdl5wRhxb88Wi7GX5u5ffbrZ3jdT53O+cz5+eBL52CaEuiD7ypugUfjnZ17qvF9VE/urp478+3VuFklpd1hiXtDHk2nyP2i92dqMtHv7gQbuWANw+ey6Jf+OFq6vv7k9z3F0XuBowFdB3znJtq7YXIu8jBgyWCHUUwaxCJTOS9NwcbNmJO4m5c3f3GcyW1FDZTIsLMu21hIkzk87xt2m5VaY8MgdJDk7pzMTc66RxxrIE2H8w095qPMIicY5Eoogzf9eCpHHbHQHr0r/6JRkGXTR4l5rknUpdlUDjvogQfYVn0IAIFEBfkG6le8ddfunz+ydb0R83Dv7m8uwwUKSZVD1RC9OzseCGCc16TxbKKmLmjWDseY2Q7OK4WJVdkJ1ttG76sooo8z1VEJSUimu7TIs0lpeJcMbdMLmS2A9y1IY6Znfekys45712i54BUzUXY/BpsEr9qA8y6S0FQ6QYnA7UBFaFI6gLBiwalpcdm29+ry+PR4lF1/kYznmdNXgMIbUeqVYDIRRGC9w7L2t16841QD6Zv/8noy9vuzf3DR82vzB7fxXQvPBjkzcbNnVV5Omf55M7tsOt/foazJ8/rcwC4+cUv8V/ex7N5GEw+Gt4tdPZGedqbzFc77UZfsY17DX0w/MzDcPsr8vbNeMnTzYOI2dl52459ljePqtFOsYJbz2MJ9SNZrqrdYq+ZHrnmyeP9G7NHPr+e99Ynt8eb1beuz9859t5DIEcDILAvbGQLoTHzJLUpyYAHBYzC0/3MBsgm2w4bAw07pW6to/BEUYiYWSOc6yikHmqmzaZFFXYMVTWXYe8lmo2Fi6pQZOwU0obgO0hZ0/6d2KRBXcqoYTAaRKBM6Ll83TZCYGXryqzP7izlRBSOnM2x5tPOL3jXQrYndmxNBCHZjAQJzI5ME80mEzDMjAILA6QcWNho8fYjU6eXjuKcj0iu1iqWNJpYDaokQpCmC2EhR2z2dtqBcKaWt8YdyrZbz5wPohLRNI3N8WD2L7J87J/b2kaNA528axLZPRmHmGM71AGO2EGdCBP3VLx65ZIxYOlHHWhvO49H7+4MVjvNY5EneX60N6/5tLe+0D+4nP7DfDCpR6erprxexNk5hgGjZ8PXvjD+wm/w3W+/86//09+cXn40zj85fPr8wclXX/vKnezVnZ1rjx4/fPbkcrVorzWjkS/n46IqD4L25iu/dmXfN7KabfXCzbY6qnW8bMYDptWjvmbzD56Ux7IVNp9/chghxJg0Yafnpj3xDO6D+nB9Pnx2cupHH529vzv4wt7w1vzZJa5Qx74qJVogwUVz+xUC2ii1clAEaPBcKDVWo8HMLL7ZEKrF9wMqX2blg6Nf3nzjJg9Y87bdnnCN2KzzjbCYXz7+yR8tflH1wv6k2WewtK2Pnue0E9B/+/1nNcubXxqyHhfjjVyuv/6P59ffmH30g9vVg4P6yV5Y6E5TK0Isa5er8DnvzXTjSLYr3jnGJPOTPuqhSo9WWV9DUw8JAfirmeA//1CvvRb3v56/9kY775Wr2CyCxMaPtR1EHcRiKOV6Osmnm1QPl2d5oXlfiycf7FZ7ax5XT49zf/PhtF+de6xW+PAcI1/eyDZ3R2fTuhxzU06HVenzMMDNARZVua19pVJRQXJmL1gTwTE8pGWwneSAaWYECk/uyuoh9bxdjWNmSaZ67NXmg6t1kRJrhD090XlWEVISO65SVh2nymGbUxEBK3C5rKxWEJHZwJH37DmIeCZvlNQ0T1iWoEANwureG17ASrZK5kToYM/MnkHp1LGoEwIH0qARKmojdNQQhSjEiC/h5SfrxYPy+PZ653q7XdG545wkIjKDwa1HRuwdQhBhwBNadMsmgoRoGgZJPoJsci2RkHJMFYYCxyayYxUEacmkxgSfgDK2ADixkAMJjpnZdUAdJ/Tb0EImJicKIpdnuaipPQXwIBabTKMKQuK8iBBAimCWBUytiGfWIMQUJRIQDLSgCNFABMf1qgX1bi3GR+P1/cHs5nSaB9e4GoHghSUGYs+OhWsJG3mhdW84GY3GO7Of/6yJR/Jr3zht3r8+e2cfH7/qjm/5p73VrDf6nTnPT2/fqjerk+c/bU+u++sH/acVgAVdayZ3J8/fwyh/MLhdIizCo5evF6xtFdbn/a2H8aUHi+tF2W75Op/XPB3UP28frjDc8NnxfEsW8zs3m4Om55m0jlH9KOeiOf3udzfeWtzU2a0P/7D/hY0JbT5sKz/5zA9fG0FqNCpVhrokVEDG3Kg4a0kh5LsFrQLkGID9SVLhhNiRlRgvlvRgZpJoWaxElqLg0rBpVYSoA2lZhIJEFTLD59g2TKxkZjhEkaIDouZJt0PETAqnEp0gCiWOpBkFAgTyjkPw5IWIve8kRMQ247FHRx1IASKi5E3xf2XVZj2GEnGIwSh9KmqKJsc5oM659JXJo5kFMfmhsbgrKy4F+yu/WcC5tltLsQnsABIlMCWqGjHnUaKkYReCxIB2ZC2QFVVnWsm8l/Wy8nI+N6YIZSwQ8w4QkcGoXC6rNAuDYgjMCvJRQhq9RaFwHUof1eKYTaBg8E6mwuwzFFGLIP2ADXab7IbozR7uNYcFnizcyU7VxtP1+lD81HONP1hc/JPR3l7IT1eL3rB/uTPWN798+9c+w2FQVkej117B9++/Ve7+aIDGh3sP778++QKO5y/1DtbbzcPjh4Fkul7svbTZ3y0ej67vDrdiPHbS7G2EE7SXx7/cB7KzB2526JbRVdvZaOTRyrNmnJcuNgHIOW5CzwhUIEAkNlG9+B58z6H46Ts//vVX+ll/s+VaIrhgHoiQthIcsxdisLk0UeOpGbJIlDVRG0LLTOS8ytruQS8O7CEZkEvR7y/qy8PTw5fuXhOvvszQY8b56b//75uFG2BvGy+fckHCFFmiQCEzQSM7ftR/74Pp8eP5ay8vbm41u6uny09Gs8Nbs7XUHPze05AplxWXOVArNVSeyeaFHy10fC4b5zxyMsk8l7L0Te1jVEY/VOvXvjFrZP3gg/ZJRfe/W3/vvbJ8pcyuF34bt4Zxy7mdQZEvtrHckmokF9s4H7gTfvIR3v6jybT36Hi1qPNisYFqloUwDgd91nw8DJ7r8/p8WTcDbqT2B81br8rkM7c/qObnUubDcTN0riKpVBfRlxmWIJici53zIbbEbORDtqwCMkthuMTHUQLQ5QwaKmxaMLOHSaEwqTiI3fqqmghWBjqJaBRRQNT0uiBm9k2IdiKAbDROxnuWPdLVXSCxFTv7ZBO2pqPByFm+45R0RwZfNbvWyzlK7iJph8RXGx+RaIcmg4G1ryZN77X5zoeTo7cHT769LopQRl6rEjx550QcAc6zgH2KpYNTgEkZHEIkCtJxRKNY0l8ak8yE0ogzqt5dZQcLumwXU29Z28O2YTHmDSzr1EDCZPeQLMc0yZ6VUK/rPM9Nww2iEGP6jDrowe705BbCtIZQUBY0SSJFXY9jUz41TUNM2oKZq7CaSFZWrirjYX+xfzmouc2FpQVAMUMIPUgAYVCO8/72QMtmyfOP7vEwzt/+EykevjxZUGjq9bpfqM6r5oN3mn/4Txvmh+/fO/ev7VwbH36Cvd0DAGW5UckWP2Of+yrivTuv7cne05p9T8rcTWl8pDvzaVbIelnkcSNvhnU1aO/Eg16oL7g6vbZz+uEPxn9x8plf/Vu5FP3pYlhg+vCPh3sLFwo9auTkLHzu8uBWkOKjOh++vP/qJ6fMJ8i38vY5FDkjAKwIzB7aUtLpkjOJDgOAsy2KJYU4B4WYQUWCjMg6oURQICXAgeCZiTw7YROrGwAEc9ASo8iLRKh3LsRoAGBeFKENoWkdkYgzOMp6KTZ5gvnQWZpmF5pFIGIfIAiSZ541xXmRJMoW8MJkzX6RRJ8ye0h062FEiY5dlnkQtU0LG4Ip0ZvU/O+8Kf/sfiPLGYkda8F87agjeEaJKVVCubOb6UDj7heD0p0RfoyuoJGIYtTUw5CZU7KIhKZt12sY5MQUYgCQOW+d92pZ2aoMpEpgECtCDKwEZx8oRZHAHRsJLyZtIiLyIAf2mkc41Z5HIYPN3iJfjouGm+OhW8z0ZK86xzm3p8pHEqcR4ntS/uHx8X+bjydFeHI9n3zz95vQhCdSDu83sX/r5is/y37yG7L1oI9pmZ8fH/+Xe39yd/x6GcqnT575wi/PJS9o5UMvuHapRxv9Zu9uATmu558cPfjbdHMPD9bni42Xr+PBE3YBWVOsL7lRXB5T1ahH7PMoZ5SBGZxRkJY5j1EChFRI9Ud//dff/My3hdl7lkKCg88wLAaL+UqgDHHitBFqCWtq64imh8Dp8xJSRINRPO94EDjPNJNWW8A9p6ObO/taCsTJJdyojVVV57cu9NpJ3FjIABXJGtRwXIlTLla8enLmJ70Dvdib/7B91G/3+mVR7RerPD84itmpLjbzYS80jfiVZw0+cLHAxqK4Nr3MG5r4ubR1Aw6znufrd5og5WL2dO+z69tv1iHn4TfW734PR4/KZo7ZT2fVezrJ4oPYO+hnG8xyuMZlLWdDOWnkNIsno/WTQejPmQ5R17NYNBNQ5qvjDNP++FePzi6ansYCOuzt3ejffeuV4V1+1NSPzz5B/5U+vBfPeR5YsowjNyLKIGKvWGd5FtuVYxctktqGtbS31bR7udpidSKctGqlq664W3Zxgq3T15iqw0q1SgKHzazSkDGRGMSzT//W9H2kJioybybDbkW140IqJJpG84qxkUQhafOY3lnSDF2RMo2emUocmbElDIMzjzu1EkgMcopVbD4fDp42s0Vvfb93/Hq4HqHecQFaO8rgEv/JFA7K7ACCRgiLT2bBKiIJEEAy1vDeGc6fctQB7xxUlLv3+KLDYMvY6QapNEuA2TA6JuoIaC6NHR05xfwCbW0gGmDi7rRIv5pDpDMasA6FHUiihhgS7VURtRNQQVlJVGKMSlRIcW3e+6RcPN2YTy76HGTtI8CZOmlbB6xB2bLmwa2QU2/Ay59/JOdnxbCV0ye9L4x6fDLIWw6X63WdF5Pw4B7ee2d++/ZOtukPNmKe08sH/PQIwNkf/aH/yT0py/yoOW9QNvnxeOtotBX2x97leRzUH81oqvrrxbwOs/q4x/PetQMKJ4t186DV9oPv33l0//lx8+Tp5Prr13t3qurdvx7lT+VWr37YFsfFuq70nSpIfvP1o7ls3fC7R/tjHAd9ruQ8NCNxgPcsEmoAzIiqWZ45dlGEzXRGiZRNCESOmaiVgC7+Om3TVRgUNDqBMtS5zHou06gzRzWPp443BSiUGL28aNZrG1md93Vd2xMmSk3bOOccOzLDbyt/pKoWE2FvTVkpkDqCV5esp0nBbD6ajszTC3QlcyCz97CClbpGZlUlgmmUtW2DkQmgcOw6+Zt6Z0wFw3e7iq6sCmNBXXXQZMiBFW/bfURR422m8yf13PZtVEjVBIqmNbL23B4yWNvOxJ6NB5rEQypCiizP7OowXFTJUj6XofQAwOyTQFkQVFTEibmdRTYeFj4FWiBTZXGgwiEXzbF2jRuzr4+HmM/pfNROZapyvM7OnEyZjqM0GsPsUos/kNnvb+8MP5zN/tUfj3/7d5dyQXeHy4uPb929+8ErN7MHs98c3/q304Y382Im9x/dw5S5Yt5hJ+KHg4VDWNdU4qyerYdRSwpRxlvDYc61K/TiOc9ivkGr40dy0qDOm+USa83Y64hbkrLH0g9c+MZHLv1l4JZdo9SSz3uFML337jtvHXx9FpZaEHkI66K3loJZ1LHTStFA10AP6DFWwMpj7VU9UUMgSAuC530OPWAALqEKzvLzchauB7fdlxVkzsti45yuHYXdM4zn2GqagmuHimQRqeW4lljLcNyr57ES5Drqz7Mh1rozfCyOeFWybrrJ1Dc+k7gMK4GM9xtQMwuraeEqbmazfMbaL+iV2+ePHt7+2//g6N6D9f0/ae58Zv4EEk7L9d7W3u9VeFB98AuZPumV4gvCAC5OR7PTHVwWfL4jxzeGYaN52p9fZOdlWNdnw/EXf/O3/ux/+J+n1bT0G2Wx2cwvitn9jeGNOPHFuL89zjd2qsvlRydP6+za7dGwfxEA9h7cREEQVWbvCWDvRBrHCEEkinNCBJKu5bMZ1BaGyeKX/uvcEUkMR6XUbhMlA4ZkSqydqE6ZiL1HUgWkXACjxxqvP1l5GBuLE2jL9mddKUVHwOjeRHpuqGuWrRR3bl1QTpMidVNI9zVdX83QlESqKh1xOu2qRVu/8mHYuM9c7Px458mDjdmtMCp1yBrUORc1ldugzqd1ODR5PouocIyBvEiQK38BBjNIxYQsdt4ZmwUkSKmO3K2dTOOoifCtBErV0mZoStsqe88WDmMUVEBJOEpL7OB9aBr7UdMbU9WrQn11FTNPIcbQtJ5Z7HOXBKSasx/B0MXM+bYNBAjTtWrwSXs5H9TzYj2pnAjEK+pAuat1zZqXJ8d0TaigOA/Td/8a1XPk18jn2qxbymrna7cxC2f7vSwflv6TH++/9da0OODRLje99vvfXfzrHwDoP2h8LKC5exjcQmLVYBRltuTX9mgVV8uTrM4iuH1cP7p5a7L+5I3Xj6T45dNFXIVY+otXj3lTXmK3an/23vns/uCbQ7k7rXg0ml54CCiQV15MfT7xQeBJyGfcq5u11BFqcGxM+YM+LSFz9kVeMHNdrxLZCmZFnJhHAqZUiCnG2LG2mKBe2Sqx61pBd8W1s+cGai7FqXqRNk0tKfQwMMFleYzBYkygL27wdHN3/WciKzIBYJCNluIIytZ/g3EVeIbEpE+V7yrlwMBeRpIVd9IClRhT1SQOMTjnmZ1oMO62KYCccwq9ct2wm5btZjczGhUiYmFiMp6Udl1+2qhb92yK3nTSmJO8iZbJJgRj/HURGJI4Z0yiQpxGZ4hKjOy8BPG5ixDHRCKeOTLFGAMsXtmahc63TrQD4FNIsHWwoOgojxSV1IGdp4DWeUIIp9KOpR0HCRK9ICicsnhB3Q439/oxPr8MP2kWvzruz3/+k/mX7uRfem0jTi+eflDufHV86/UHH3x/J8/fdO27a8odMu+VBdka8z4Jz5/MRnErcO/48RH6cHNHfQplPhzLjBDjcIjNJw8+PqBn/RHXzDzVwmXrShrPKnBAb0Ox0WDPy5bGsT/D3lS3ZjyeYbx4dvn69hen7epJ9fHenVvVeu17HD0chOE9wyk3VeCG41JQc6yi9sAFcw2tc6pzcG1X0csOUxG1p36UQRGccMFyLfZ2OFT5/JPrCy6P4+Ycm5c0Psc2LhFnkRbMa0YNEoeWmrk4gQ8KqeroUTOv5r4fpMwvh3tzF4oNJ6zCHmEVZj3fNIu5Q63FYl1+/huznz8fDjZG3/oH4d7T+VHOx4zzzXo1oakUUq6bi8hhNH5972tvzl47nLUf1PokFqc7OBvhdISzIaY7WGwtng4XMRwrz+SEhxuTN3b99QHcIqzquiGOlA33Bmf94WnM+kcX4cFsjbP1zV//3O2XP/Pcb6SICp8jCLzmuQshpGPEJlTRKMFcWdkZ4RIxlSQ1wiyspjLMLubqcbeh6cqwhu3JVIDElBhRIrpkUwnBFmGmOHLOE6iNHYTsXFLSmKWAqRAImnrcVNyuDgVKDhvMSIbu9qxqpxKwo8ycahNxOj1d6FhMGi0fyKjKKRfdDiTjXwaIW0j7anv98fr4pKze709/c7m5chAi70klRIInG21sCE+nglkkkY9NVLCnENCKU5CIJ4cg8C6VWOJPmVJ2o3t3Yr1oLNiWIwo2X+0kRtLuYEtkH0pb8KZphsNh0zR1XXvvrcOJIRA7+8HROSsYvMyAHXIsEEuYAMXUdsH2fOmlgMx5jSH6sNuOdpYXJ5P1yWgxqTYVLm+kyRhgT+wFWeR8b7I6PAyPjr0GQol6jaYNIWsLF0Me0ZvnE26r0X5RX57Lo0P+wleqd947+/F38GEzanIAMhqGUnX6vM/qj8eXC9koevknMntwPLq2j6WGSvw+5JCPi7175VeFyht7H77yrftLQfmTMns+Oj1faG80KvKdJmzufCX/2//oL1ajXz3+X9t7y/Xbj7PDxx7nVeEbbtfi1uw4MBpzJoQIeedFGmcoKMPsw+rQKoQcO0IbQpSUSWX/iUY24o6os8SRxLnVtmlVAVUJqizJvYbM0tzSklINT4laUIDZIcTAjiUKAQwWVrM4tR5BNdEatfP/7kom2U3PJkkXUUcCUiYW5OCAFwyurlW1Oy492mw85A6UppS2kh4z55k46xRxdlOLuUdlPosS2xDSHJ3Y+aqCaDe7aYOZo3RZJEm8fNVJW6Oc9BhMgMW9aCdyUHuTKtbQpIeFYG/SNi8MgEOIYAaE2KmqaHQ21zp2QAgxSy+YyruClKBEDpxaGgKURIUZRAgSOfUgEiAREhtQjy+JhvWaAB+Vfc9nro4rkehzrhZVpYHV/bRubrCf8OD5X/xp8Su3jqdH+SCfnp6P7nwxjH52djr9ik6O4+VRDD5kCEDbiGcsciJZ+dXls0s/zGITwxq9dQaE23Ou8oJ5Z6rV0K1m4MnOse87+Bi8czUrEBGEQ2+Y+9K5m15yPpOdGSYz3pnLuJaCLsPHTz/aurF7vLNq9OnetQl5jjHoIC+iSpQ6BPQdWpUCtAIVcDXLQiQn8hBHvOyDoQj+STuaDJqyqMIiOOfZcaX1SX760v7u8cNxKzuN9qcyPuXxlMZaDXjZaAVdiGu9NLqugg+q4gCfNwEBaOuwRLakMCrdzMlAer28Pqk1y2O7Es7yupV1s/urv9ccvPb83/7hrfyz/sa4+skno59qeDzb/tpr9XvPVv2XwlMqamkyda2Li4vpetFu7g/fuDW89Qpdb3v0cFw/mMw/9POPJ/P7wyYOn4+rauGXkt361g6PyyDhUVvWvQWCIFs3i0xqwnbbtKFtGo9BMfzm7/7uch9LmTNJo3nue2yoaoQAWZ5rbNP0Cesb2dzngjTMadhVVev/vSZHHEICyq6qb/KKQcLNLCQJiRrVORh3mDXZ+tglGq5EUZBEuao51OVr2ybMtmaf6u1ZO0apqvrMB7GHxcAhWxxfaXORGlV7qEXhbY0jpCISU7cvCXg2GZA9nqpIazNqGATvvNNfq+7+B//+g+Hp7XbyUhy3KnCiAQCELJddkvdtujKGs1EWIEBIkDFElB0A9kS2faOUsaidjwGlqSQdRqkcd2ohghhLAZwESLaCT1OtXTQRyXvFYlnlmWciibGbJdL3vfoQjY8KILQBjsh79+mOKoWMSeb91eQtCgJCxhk0E39zMZoNZ+ejupqVed1vKDATrdfcL+XoaSEZFZj+5O2DzfFZUfYXZ3Hls0rDolkM8opHF2Gu60HlWFCuOJ796Bez9+PZaaa4JU3uWg9Ajxqd1/AhcFvoLMxy4TwH55+cnr+cDzYHVDfRD90hYs4Pbt86hzzkyTi7+SV/b3NX5HQxGpQlgznEsT+Wpazml7tfmx/I1pvLef7T9k8eZzsZj2jBg5b7kQsvzrwlQQK2K5wFbTLOAYEEqDhNDjCkygIkVAEmK2LA5Q4ghRgpInmkiBBzZvFWNnyCxcANEUGHh9pIChAh2v7G1MaxYwEQI8l2bO9PRJyIBp0i4FPFLH3WkUSBnDOIOd2ggToxybGtRDtJOCFKpJQRlHKdbCjsVhWsFvFJRAqJLciDhJ0DGIIo0rSNRDFMCGxiOVGhzkRVoUTOpcPBcLfuzdLVZUDqyQlqulxDjqxA25GQHjvrRp1jx2kMT+lVafhWiCg5RwQ4sAQxHqKKOmZmiiASJgmRFKpOOVJnDZbMhPgKbiMjqRLWJMTM6suieHb4frtqfE7WeTVti2UTSXpw0pCE2rkeUwvGT+v6t8u8N70IJw/Hn92Kxd2qHZGo7r/UHp9Wsv6mz/9jaBaAB7PPEBeMfnPu9zdHrqHqaIVe7A3yehI2vO4v/SoPoRj2eS8PDbNvHI3G014ZdMzU5BQaFo0U+xvs++4k4znvn8r4CHtnunXOW/E0ZqusydfHZw95TUde69fvvvblN0lE1lX0PhPXCHl2YSE6UF2EbO3lIiAHKlVP5KEEqgpC8HHn9aPZ8SivivEi86xc1IGf8nXim89bFWxf6nBOm1PdmWvJ80amgjmo5lBFauADiaxZPS9iLDyVw/XW9ubdzyw++kHxbCHF2p/LvIdeXgDr8tZnBn//vz36s+/rD75TPvexHN382j9qaipW2bK+n7d19cffnf2v/z7zrvkb34rPtVFmL1yD2r54WYYpNUxN34Vsd5DvDG9uTsp9v78td4fhiJ4sxk/D7GHlnymO67zsMfdYhqimPKS8HLWLOjp1G9lkQo9n09FgyxfDNc994SHDgHIN37CPkmci2rIECsQFs/e8bkLuWYIQQUhVNYh0bkvJtceeAUfpP3QwUer0kRQMqhBOR7ZaALil/qqGNtg2F1ANArv72ZwxmIBoJ49ZN6E7P1SEGKJgpc4ujzo6h40LCTFWdI6LVxtQSDfvpiPKiC0xRvvGkZNNTwxi6kh0gwCUBI5Z2feAKFpLPa43X/Z7H20dvzM83J/lcH1qoxBBOSAExJ56088mr0dyEdEW3RolHRJq9DYxDnkaUIxvw2RVmJJQ0QwDrrwFUpW+YlBZhy4kUJK0H1cRAWmMIgGthBBFNZi9AwDvs+7YS3Q6Tgc7FOK8I9UAMz0mgQqBo0SCAxxz07Tk2JyDFPDgSFJz2KkHZV0thquLsr5W5chJ1w0VeWyryf2PeuMbTZCgYT7co6MzcJYREMgXGyuKS0ymRZCAtumvNLi8PKrz03o99y/JeozzqV7mAFpxBWdRF5BlLtUaQcQT5yOfXT5+fklbo3LHHTvKHUHaBU7G148mO/tFfxSPRsN7/WvDYsmLc6KNFd25cTLGIpQ//KjeyBef2zku6qkfkxxs0DDMw+bC9yst0BAC8rzHfW2bPEpNFHuOJfV8LDHYnCSi0rQBqkwhiEnJM08iEiGUPB7sxk5AtQOiIxeZAIsM8xFRDZFNWHSXlEWq8OxU1FwjjP4IM0oDiyg7iyWwPXLaRZhNdXIEJVg7HBgEeJAXBFFhYXYsRr2OXVet0qHPliji2CVan1VfImaEaDgwiGldr51jcpY4bKQqhoNngqjzrDBbMFz1z0ACi2wwFlGz/YLjEKLrusOEJ9n7l06cpZRoh5wo48ZKTE28acCiBZNaRioHhIQcizpPEgM587ERhheFiuberzV4mEOKz6LZn8CJaTHtFdLJlBR8MUAMTPRqhEwJTx49uMlGbaEWwSsLQ6N4X4SikVWRZVRVwj1+CjnNqWAJj5+Uv/q6zkMxKmZF1n/j87N3/rNv1qVmv079/xRXrH3EwCwSFmXRr2andaVBZO/mwbQ6jj15dVjkS6pJdGfzLA89X3mNAdJq3/XmG+WSVCgqqQc78jxtdo958xzjc4ymPDnH1uqC+BLRRR0iG5fohSILJ6c/57988JUvvD4ZD2fLRpGrd4F781ERCq85OLB4Fh9sMjOxGgVGU/jl+HOrZnK+PNzskWj0nM8byXCT5eazekFy7SLfXMdiqls6pThjt4BWCItIS6IVWDTPNEhNbuj/3j/1meYlLep6fRI81TLcD7Pp5H/7T+qNyeJf/ncbX3/j+fcejfd/bdp7snjn3sbuFx7/mz+5/at/Y3F62vvg8dnH/91YuRfXize+crHIs7DUvMdzkLI04E0JEi9nJ8PtcRbLul5Uq+MRXQSc1+F0gOeyzOIyFDEPiwVFkpjHtdy5/pXpyfHetb0nJ0/8sDwarDcc5nV9kTd9X0VeDHf8YSxmflhLsUKxQi8uBTVkHdGQb2KoSZrG4BzytssVMyJXBItYYJs66SqcO2n0uw6QAGg0oofRGQDz44FoUOrs46znFFWB5swSuuIjlhnOTJysnKz5tl0yE8z3gK6G7qQ6hNFQrzT5XRnukOBuVYVuoauIwaZQOLJNKicFZ+zOkrSLTcszx4B3FJQ8IFktl1+o9x63p9Nsfq93/qWmPHfcC9qKVU+JMELKlUpXbeYQJpVkka1Q71yM0Xzwo1oAmoDJMdufO5dZ86AKOIUoEaLN1CJqSYxk/FgC2LFDF7MjgMSoUQlo2uCcmfunAHMRYXZG8MKn2g0iMDn7sTNAiYwA7RXiUmRb0zTGoRM12g0JwOqVQ9n29hf5B4Pq2WYzOQsScoZGzXl6Orq8qDfGw4Yf5nRjNuvPHiOUWjFNORy1jeRPrx3M1zxDLy8EYVn66zOhJ2HcnOV4OuUz18xqAGWzGfJ1rEPsoagXbjLZuv365cNH1fTJFujRs4v8RjEq+nK6DaBchOVQLm+u6bWbx/TGdOf5OJzVPNIK2NHet97wO8NHza053/rri6MlD7++/SPczujVweP8pUNsn8r1mWzQZevWmVQiobYORsULgrGBjKZEojEEu81CjE0Tm7Zl5oydz1yEkqgjthkLane1MKAgZ8BJkjEhELOKhUrZrhNEDizp7wHDRcDaqcrYM4ONtq9Xpe2qM7ZGjQWglDitkkVWz0FjiI33HqosmjOvbeFs9b5T4jKxJxaNEqNtbh2xAjFJ6pSZvfetGWcyhxjBnMytIBrh2Po3AKxG0pTOLLgLWSCQxGh7a595EQvd7KjXysbr1+7hNws8MoqIJqjeLg5gTC6EGDwMYFJ2LkhkxxJsxNc2tDnnIgLPcF4VrAzmoJKZ3MCeNTJVtEZKln9K2pmTKUFB0YuTwNKqCy5WTUH5s0ePTxb1Xjk4a/MJbYzHl1JFXhDvcX1cc8HwIqEuB4xNCqPmw9HwrbxoisrvF4t868mTcHj/nerxcqPhUPNs0V7b3P+ts+WftWfiyAcIlky+GPncxZOT1eLwoil1p8her4rF8QKBmePsYDcfjsK8t0J/k7cqrTbiRS+sFQiQ3BeBikfizvzmPI4uMH4eJ/N25GbqHNGY3QaQVWOZl3U9KoKvq+c/es8f7N+88xlIHgVBe2MaLPKN+c5otQZ5cMZSKJaAhwAIoFD4v3rMG7SXO5wj5txbVAFl+fr2l0+LG5frY9D4LGyvJMcs87Mas1zmLAvxVY/XbWzZh8tFWHgOfmvPbY6Wjw5ds+ztb639weitb7svv/74r95zej1ctIu531lsD9m1p5e7/80/PPzT744+eNT/6OniR//Ppo19CeqzjJl9Ps933SwwMggCK4sgF1E454X1cnW+vLgYbjfsuY0aWbJCehirtk0QF1C3fv3s0fXXvrBatrev320evDweXx+98vL9D/76oq12vvg5v99/jWXw5H0/qOfD3XqGBZUVNheysdSSl8BCsIY2qi2TGSNKaENN3DBHggAhbU9FOSUKQWw93IX/WfG4mjKJlQQMjhKVAhxLVHYODGJ2GTVtywpVeGYHFyQKILANro0FNqJxCFEodIbvIFXnIIwEzlnZcCxQjiKszN7iSmzmjDGIWkRWEgGbFSDBgl9IVTWKagCnmJQYJPFdFDBjBO8SYMZeQMGrD9KKNJwXdfj8+e23dz55b/j81nTUD5s111Fqdn2OagQyWEBhwr8lWlqaaDo+hbrkmNRFW1MdREIQgESdSmvNQJ77EIL3HgALAoLCCTQ2kT0BcM5HCIl1PAqFAJJeEM6REa2tskLVeU7YhMLIptCkmUq+RB2ondg6IhAhQZRABGGGKBORKDOx81HXFFRc3J9v3J+cn/bCxWC1PUPwWVCMjqdwjmYXXoKwa977L9ewfZkDz8DKyg2v/GLOl+Obj4c3ENQjaF0Mai/P6uak9s+dPPN+BgBSfaJS5b/zW2Fzs/6D//7WG2/ufOMrzw5vz++9Sw8+9Gcn09mT4UFJUDotm4s+D9Dzvir5nb0bkC/dmDx5efyoHOXl7u2mj6NwJ9z5tl/encvk/fYRlh/0kVH42rH/leN646m7qU+Un6I3z0Jb59wIYlQBgkqw7X5QbUVjiHnu66oKojHEKMETQ6iNsW2X3ns48oRMTDhLimj6vbSeBale+atEeAeBhsDElPs2RHNiylwW1MgTRuZnqDpAheAt9TrY4BukRVrFcNoDCZxHG6MqiLklMea08z4qSBlKTdCILtEhLWINLJKQxm1NdH22Dhu2tVbVJgSJClCIwWj8KXEMECRH+Rglcx2Z4SqCTIiBwOmeEwndOkjAHC1wIoIBcmSJiewZISppDKoWV0okJKAUpkDE0q2QrRGPRIjKSHmdzqDnGCIiKyGAWYhZNDjvQ4h0ZQ5HEGZjRye1EljVCUAQp1ElBo1Clxw2ea1SN2GgpeTPHzwNbjSTy5EOZ1T2s6I/rmWNnDP4KFWQDfExx1riduSy92RSf2bIflPvffjo8J0fLJ7yGjeBA9SUrUnEz6fP9rH5LRq9HU6mtGKNRR8l1k3Ra1G3a78H9xulD7O1BulDmoic4vG1vJHtNWdzaTa5KjHhvMnLflu1mUcL9yDL5mG0dBvncbBcl3SiTE62FJsywqIMi7Ff7O/xevZk2FxsYqUf/0I2T167+Zl5FVvtrXS0QH/Im1VvNPeTuszjNGimYOYIqQMC/FO67lfVuNzW9WWeF/1J+eUvfHX98o2f3H/y9GhxO95uGrexaP1c18eeK/As0qxtK8mrQL6cv3an3Mo3Dm6e/PTHzR/98/Kz36h9nEyun8vj6gLFE79/8/Pnv3i2uVM24284v3H6H/5o8PFD3H1z88HD6X/6qw3vMs+ZbzjESCT1crF/a00RqyZGzXwB1II8L4vIMUaLiqPQhunF7MauG5VZWecuINQrqYXgVSTv9ZePPjk5n02++ptVxEuv/8aHP/03X/6nf++bv/OrH3xw34+wd3f/8R/9wWAi7e3yPLgpepe0OcfGggZ120OtqAkr0BpsNAJ7DsBJwQIme4LU8q0dVEMI9lSg869JQFUyn0K35SJiZzzbTnoE0zAy4Jk16Ruv1osWIKmec0DZOQlmN6QiwWqGqoSYvBPBLA50lUDA5JgljYoSQ6SOuSRmuZiML4wTbObUBvOKJHjXHBU1mXiQiUccs7MSrKpszBMGA41I7XG3Lj9e9c/7i3fL599cjBsIIdeYbG2vWN8G62oCEgEGgkI1SnKN997FoAS2uRaqxBxF29Ays2MnZChEInxaJ6FADLbT6jQYIoEASUpOURIkJSkrbN4lIEbpXFIAWy1qx/BKIw9e1IYrC0l0fkFMHZpgCmoSgKM6doGx1FAGmiyK493mcHu9Pe2pc/1798rnU/U5LZfUTG/vjHNpV2WVBQlgXJReoU0TdqCF0KYHUYNcqjYT5IuMTwNf5Hy+CosLAHXhire+5q9tC/Jmo1+2ix/c//5nxwev/s7vHH9/p/jJ2z+bHZ6Uw/2dm004x3pb2mF81MzqxXSWHU5e2ZP8DoZ3d17aOx+cvVefXufni8v5O9+rXttu7r75o+YHw3wiw28t5G4zF52Cj4Bjly2KwJdNrNhHiqKtMiM0bYgtiNum9Z6bplmt6mBTGSgokk0ptAmBBWCBOGZYsAGZd41jgFTNYywSkcl7tUs6sp0FMamQaGSL3jVFmqYB2allsqYPzu4i0W7Bn/jTlk9v7874zHDMjonMuiIFiapRLJ13Kt3iqSMlJApCArgIRAjBOnG7f5xZ5qkyO72yFScab47n83mrbSTmK3scAjGlkGWiDoVOImMD6olYRMizrXGdd7D3alfRR43oHEvgXRZfjMPJctYaEYVSwrZYyZA2wHmYvbnZh9nuKYi1wkTd46GwodcunBKAQMgBg6NEEJzTUIcsZzScB14czy+ezfhGXkl/jnIu5ZBH+bByNUNUgsgAaDnUQgE6AUptN6LfLuTZB/9/qv79Sa4rvw8EP9/vOffcmzezsrIeKAAFEARBEGSDaDabzWY/1Gq1pJYsWTPWwx7PrNezs+GYjZiI/R/2p/0HdtexP8xu7HrC3gjvjGK0Y8u2xpJbcqvVD4rNZpNsPkC8H4VCoR5ZWZk3b557zve7P5yTxTaC0YFGAVVZWeee7+Pzmj32jC22L3RYR1tJJ7zoeG7Fm8lkcs7a7xp7B/ae7wLms8CFXetLe73qvyQVZnEm0cJ58QyNFFnidGW1HawPMDuRaQ8npSzkpBd6ppwujqvRg0gLHk5iQQ3TvtCU9KzyiAd6NMJ4xLPaP7s8Ort9/bL1hwM/7tf2yBw/+tEff+mNr3c82G+PB3Z1IPOxzkh9qNbDes8wSQzsC+2MBrHy0GK4uecb06+Lrv+rr3+jLft/9oN7H/z0L+1Cbnb99mh6ztOvjFs7oTDugqwxx+rr3xh/+NFwe7jxrW8e3Xo/PH0Q9h6e+41vHftnwRfjt/+U7302uvH7hx99Uq8O5aNbcVSZR8+O/+k/2zg8KBnykx+hpBULcZ0N86kNli3HBkyTF88tFkc9jbEoSFprWVBKJ5J7tDSbUFHwk937w6FfrxqETr04ZxOLJ4gvpmPZu7v77NGlb/+XxbmL1y7+V7d/8OeDezzhcfXNVxc8FLcvg/W23Dya2taunXS9OdVTDNBAWyJPCOAOpmOKZAu2hSMSFZFoFbnSZ3NzIoVaa61hazi72pzCM0umMUMtWRCHHBJJDDLWdiFAIBAjatikXnVpSJtUwEvVDci5om0X8JAc05lrT1o5ceZ5LgU6yDsgkexDmXQ1CaJOA3rmZ7JRRBK1xobTKFOFprTurAEhypkE+ZdhImYrEAOJImA2MBq9oTpWr7abP3DtrXp8OT7bXqw3JhCBl+QQxVKGmQk5DI2qEBEisoZAHGMXfAdiItZsSS1RNCbryqT7YsQgKcIF0KhL3IwZjBgiGCFEqOQLJUPKhOyHjTSyJsScAcMW4CUlNq/HVZdtA38e45g0YKe7TEFe9TOBmBKhnQjC6XbjQsFcPDce7A/3JmU3XTdndtvFvc/c6pnYIYR28cnN+s4OjMbYOVgXWhlXhqtZH7rnbaFyjIQ86oJaLzYU2KcwDrw4kQvnAFTf+pZbHc33dlbXyll/8xPb3Lx3uHllOHv0k/DOT89KfHkwPK5k3DystEJZxijVeHCGHYVBM3Ht1nM7N9YXs8mHjx/58urKs8v8yZOLn0X/aFzcn07uuQkuVQ838Zh5D9J43rNmn08OJ6hmzEGkldgCXdctVDqJMcQAwnTaMCP5NWYx2PItTaANabIaigDYcEpdJlAIIZGIMkCTfaBTE8ggChLTLhhAjOkAAsoCkSwbIk2eiTmCSyWFXUq2oz6laCR2MeWUXJMpiWnaY455qZUeFiaQsSaEgNTBSkwubMm9LWnP8wMpSsxdCEnHb4zRdGhEDRMRqeh0Og0xEJt8gtK9kfvdhJoYSqIuk9Mec/8HTpi0QiBA0IwNKUsUUcUpi4tYcqzJ58v6018GpMSJ7p34p8tIY6uqMfu2gxlRhNksS/iSfb18ojg9BSnoBZKCylmgsTNGZEGxCVXsj/d2w9TXi3ruVqcymdCoJ42zWN/c76InIvagDrpQBKWhom9kKLyKA9k4Kc5Oq+f3TtYntsKksYtKvOE5TIN2MBi3h2Vob3D9Smnb6XQylcL6VWet06ad+iOynQHE+IDAFAwFhNa1TecHfXYrXJ0pvUjlOuEVE+/D7rYzboAG2giNibc59MNAjjbMZEUPR2a6xWN/847fo9rNVKQLRTg4qDp/7/DR9a++vr7xwl47m+l0hQasc4N46EbNaNWKRVSagwZkhSWMp+7M1vmz59pp+Ov/+W+COJ02I5Ql3OJwsXg2f6I0Gb249ui+ldXR3/tvP/vjv3jh6o29R7ea/X0zDXowIWKVcPiT9+wbr9e2dOcudXceN+++i/HRdDyrinmYxSF6HbyrJIYqWmF0AVF8bOFL1AFt6Dx/5VuHJcp2AZZEqAsoIRI7YWEJQkKxjeRpEUBV/2B6WC3GK5v99U0+8TOpgl3hOISOih6qRdvc+/7/eOnXf3fv5OagDoPJkR8s7KWN8uXes7fr1W//w5ty9nHbju3mHjbGujZb1HoMTIEG3DB5aAdDCgoqnq2wIWscMn4bQJxt/dM2l0klLTBBxDkXIU2NmTVColqQM5bBQBQQu8IliEYk6eiFmI1hgGJymuPTDMFkgKxMCDGKaBdC6uUpTXBMuuQDJ7U9QGxYU2QcTu8+XXKQT5/E7MKDZY3MHrUAsQFnzzxeFuDTLAcQhCHpJmMwU6ligLawV7uLu7PDOyuTn/X2Lsp6FUlAaapeWngtGaiJVaXJpscGCaqA5lkBRKLQmC5rTq0HgckaASDETMQ25nV8isTRlEVDbKxlicLGpUFVQIrkRs9QZaa0l+PUXVh7+i4JlmOWfs47TZ7PqdOhRNRasqSzYSbAxpySc9IdFQAyVArPGWfmg34zmY3oqDe/8NGtKRQdI4opBtMfvt1Z2+vVPkaJ3rrSSyOxLp6xqtfKaBFjJ8bABOPbEGDtpJCwL298tX7jGwCMyONbd9WGte3KV2uPuxNMcfvZ01cXRtrFkY9nzl+RwebR/lOEw6oAhYkcbuh8zYirZQN2gPeN3r6DhS3aZtK838d6u6P2ubX57rNq1+PcJXo86o4Ch8KNa16g3X9C7R6ZJmgTeUFxTnGhKq2PQFRi7wMxdSEYaxOTPtMMlZKTBC9pt0t2u4YQhNkQF6ULnYdCoNZaVaF0FPOpJQIbQ1GUSMAcMi9RYrKwSFoBAeWdcCRiZpPCCTVrhIiZjbExMQvytJjm8CUonMlNQqCisEQcJTjrgkjqYtPEe0pLRo4uAifvaOTlCpEmCYAEMSZbkQBI+rfsIK0gJGdrqGrSRy8f0M/tenLPpwImjQmNkhDEMGfTkJTbCFqamOc0qNSPLDmcSzEVACLlzIlK+jpZRnyKqE0+1Mnmi+jUcg9LpeLn3T4SxzS5dUZW4VTQtZOFwnMZ7OTJoZxEmeKorKxZ5zhj9hKsZ2xeOEQTpCXpxHiKAvQVNXw12MParl/bj6P9mZliEI7ZTaxMrDnxEhbi2bbM3kdZjK04FtF2U70IdcbNpvvMleUV22jgDrFI/boJGubTarX0s9KahRQ8L7gLbaF2pnyXF2icXUAWkKmoVbZsK1tbqShUXbNi5hWaofM82acmdGOZt2J7jk0PjXzw52+/+V333JlzT2aeYxMtIAWgqHtNcDwDKqBiK8O4efFy4zZ3nZSo/PbW6mR8gv3pdMrzdRGhGZ2ptl7+u//o5u6fjGx1QOj23pXJ1229hfZZuVqZ7Yt133TBbl6/EuCn9w+0VxSjorv5Q+uMCZUfrhe68F1DhmPnowmskWXRFYY8Ktv56Y5nt/E7f/dutSlP7qGqPaSIwtYFSAVaAOoDApuEMQRF0VsEkqKexdndp494qNuDvtasPV3ZXjn71lef/fjfjHxZy9Hef/yXvOnDiGdnet2gqkZh75j8uW8/3rjx8FlzVJ3fjxtj2jwMA52AJqBjpRlTA/LMkQ17Y3QZDWszLVcgeeFqNVuvpeWQ8rJLz3f6Mk3AgKJEBpFZftwiStqJpudp6aOU4Mjc0FPykV4ecRSFZWYrwXchXQBRRESNTT4ABAipSeRQVpUoy7smK6NEREXML3mF5Gk4lw3C6ZBLTMYYNglvJiJKcVDL2w1EQbOrvVpYkVAYp0RCMPS6f/GB/8WBG39U7rzeXh1LYwqhLgkYkheRJMLo6ZYeDAQkvJnBbEzXBWZmyyLZxscYNtYEVSjnPJcEawkUathCRFgRYQtrmEPwJImenrytQUg2+aJCNr1RSwswFSFjsnHBsjnJziZ5y54qQmqz0lAMkJCavIlL7tewDGZKac4MoLOEEAtTXJgNPxkePj0j5/wR1MbRqNhvWlO3ri2tDUomFjA2UCjqbj59xFKxlkLWukKDRhUWhDhfuPlK0Tvxvv/cyynUJ4Y5USm2jIHLpqFpN21OHn/anZx7ce3iK2t3fj54cnNK7Zmrb8jbfxGak27zopFj6Zh2Vo2a2Kn7+aPquJwzcduaxtpuipMuHDyzpW2cGb72Nf/M2WkL1zFXYdGGdqFCXccKjhBI9L7tRAnqfWC2XeeNscQURQ3bZOeYpqbEVwLBsonCOTQ4yQkUEUJdF2IGaxIGQSY/UOmkmuQKlwY2Ups9QZWFdEmcJtEliyLVWhYRMkYk6YTTVjn7W6Vu1vJp6kOOC+PM7kUIkVmYuWnbLMgJgZhTirH+khowk/FT65zVUZrTTij1eURESlq4QlVPn16FCJv0PbGKJPuR1J5w4n6m1o4iCZNBTDIIhgGyUVewxoiCIvIbJ4FJlG1e+vyybj5h10A2rMloc1oF5C+eLo7Trd5yQYb0PTNxCoVkZiWSpBVTARJpvBOxIi24grfdZHH4cN+ULEeBC3MwGpb2jBON7BZiW7G9atqrwLEpEIhlitVprAOP9rG6z9Ueto7K7cVsBQfeTEWmLc9Ep9a2TBPPJK0Rlta3c+JuAuUgFYaGFfBA5xFtWyF6qzaoRDKOB9x2XMxhWVwIfbf6d/5o78//1zPOTNSZ/UlsJXo4cWEUYowKRQyGvSPhMLNh4tzcibd2xTianExVKGhkw2z57T/9/ktfv3H19dfuHo4XnV13VSe2kf60PoMeTJ+0gb24veqd/+bl+eHe0U0/uXZl69GTinmwXhUwpd7zYUO2TqbTvWf13/01BJ2//c6ghh1wNTprH33c/Pyn/ODWyXRq924fHe+4Ky/warH60pmduz3HC8s9qRbF8V4kTwQrJlLkxACzzkgLiZPW8+UXtr77e43rP33nR0Nng8yMMQoXJRRcg8QGkC186LCARsBjHotaXRsLob7wcHeyY8OzzfVLYWGbptt445Xy/Nr0o3cWD+8XIK09D5wv51pGb3niw9NWHu7LAW9P/eiYBmMZyFgxJpqApoQTpQWxJwMmDmzUWuQKlc4+W9YgmnKQWEEqSDOj5NTrTPWlU8tnkF1qC3JEIJOhjIaqajLVoWVFTDGhzJTIU0C2zWJmNsHIckIl7jqfqxkkImlmk31OWlD7pWIHp1kM+WZQzZA1GeS9dFy2DUycKohB2pcnpnR+gXnUhqpTDlFY0DHl7TWzIi44bmH9ZX/mI/vwA96/zGdHtt/GEE3M9hiy9IUXIHlPhpBuLSbDpCoSYmBD2XUfbJaueEQcQsjRrsjx4imeJQRNPwZmtswKuMJxkogkHcYpAQaqGjVK2h7nJoCZjUkfy0ntvLxymIiJifMKNd/pULWJ3UUGjJRNA2KTV9pgJhYNAmONjcDF6ehWO1mMcP+1C194Z6eo6okatKHgQfSgqk/OAUY8fL0K33AApBNEK0rSQVRhmapFkLquDQ/sk8P5xkUA3WFz9oUXhoPy5OmzeTgTfdVbecOtnD+extkIB699qWz2v3T9pfD2fyh8h/WrX/jtP7z5V/9hJE2D4J6sbBzOBvtH3sUi9ESJTXuMk5o2yHEcofrON7pvPQ9/Yoo+ddLs7lku/aGH3xA362atURKwoFZpiVFVbrFoq7JWQMWkFk41pE1yyrpK9camdOzUI+aBGCISQqAUnWutqCyfprT/56wEVjUEGI6JeZ4OJ6fTrblwLTlH6fBmJbykhwtRJPok3FNOlpin7SozmHOQiUJzIHEQUcOGmFVikhEmPAVMZPRzmoXm4qUhEpPmSqc4zbZUUslL5dQL5BTAJeSUXy7xf1IwEwYiMb1SBmm2BuCgkZlMVE6iAJBQ1r0ETVoBMGsSQyfONpDjPzmTVBJeRgikOajYJNV9SklKjhq6tOFkMsh7O5NgbU68lLTNQBelUA4MCKIJbrx7OH5yVF1wchTVqKLYG211gobrE6qOYz2y3gZfGoG0rNqgN0XNcbDLw9soj+TcvF2Tfe8WKzr1duyptdIoeZBtQ2iZggOLIeJYRPLwwmNB5SJ5MyHUvABHJ4WzEtrFwh57P+z7Udlxef53f/9nf/P9kb28eFqMr17/wvNX3735P81mcGq0FMMcEa2xUFjSCr4nfuBQy8y2ZrZ/ImMYz0YgLCIkhotVd/fHHz3defTGd75RYti2z87YsgtHHfemq8PFuLOltW7Q7/YPhvP2rVeG4YfT6U57ZZV2WlnUUjSdrhieVZdJ2ve/P/iN/6y79ai6/U5duckP/rV9cOT8JPzgP1jfWguxtr7yii+l27+7+973Bg8fwZVBZprs+ohhsQhsDIgr6bxfTKEB2xdHv/rr9vkX5ofT3U8/rkipjKYTS3XybQHUy6IydQgKC+0UgSSoVxY78Dqbx6aRQWFHLZ2M/d7Fy1d8wc3xeDi6uPHiFd80i/CsbZ+Ys4P+unz26fet1Ctb17s3L4+Ltd2wNZFh8FU8ZjqOOhZMWE9ADWHO7Nk47wpL5NNjQ6TGUDI0FkmS289JHalwprPNzGSW5CQyyQSXmROkwpYVKhIlObHnlvn0Wk+1nBMNi23S0qdOnUQihVSh0yzHAGIM6cBrWt0ibcIIApM/P4sKoiyBm6zUy6rejKJFAAmaQt6ip6/BSzYqlGQpeM7MI4YJiBxEDCnIBhbSgMiMCc1fD889CONj1/y0e/Cb/EWIz0pjFUBT2sRye5cMGFShcZn0d3rvJJJXthXiZLCQzIRyio1mQgxZm4xQCFCQEFFhneFCU+Z4kk7lWwyqRkQkhiAiEgBNiZFLkk4KulUsreUpA/rLpUb6QiRCdunDl6wAobBZsGZCZIvgCpEI6izcOGweYXeIk1fPT97bXbu3K1WflU2AmF5opKicwgrgHrVkykAGZGEkAFoYKtgYJpKguqi1wqiNx2UvUd91fDT/xb3u/qFtX/0vmNnJNCymZlU6iBleOOzNuzFfpK1zcq+mVf/hznM7J65tA8eidWJOqKhEnFgfQudQAoN4behXm+JrV4v//IuxN63qAj0zv7WrQ8Fx0e7sy7Syfk1QqbaqYLFlUakKM2x/AJBoNIZit4AxIsVypwvL1lomRtd2S3EQmFhVg0RRKXuVinrvJWV8FRagz83WoUScFeEMFnCUpI4LRKppICbh7JecjrGIpCYuMd0NEQwnaf2yfuPUTFxEKBuA5+MVJSbXLsoBWSzJVV/zJE6Rl4KgTCxMyoIuBGNZRZlMcuAiUIyBiXOGpkaNyyDOrDRI2xdeZjQoIZveZQIzs8So4JRIqrmvRrTGxxTXCIrKIEtGsh1OvmOIlHL2dyIW5KU0G874twUkYnniE9MlXwfIcTB6yj6ETTY7nLoKNYIAZShFeBEYYYGrdeXxoz3M1DZGmYITZfVsj0bb4+7wbH+96Q732mNntMcAeyc4QTUV16fBQ6l2Zdi1fRzOcRj4gOOs06nYCdu5oSAUFt513DUzFaDVXuGks8FYsGhgal1gdSzSNh1Xh6XvrRbf+fWTx5+W21fXX3/503/xzzYmZ5+7+g/0ppe28s0wfDb/2iu/9c73/uPcdVRqCCHd6fDd5np/1FU09jY0lQQ0QMNVw2jgfTQgCREFQxhex+3+reojd/nw0sWv3Js8HXHZYGW2OuIBY0rWTp6tYfzuB4+r8Nx3Xt/6iw93xs+KsxtuNsN+x7Luh/vdqi3j3q3mn/9fMfaVcCgZOztWNYh1XHWlgS4Anv7oL/z0sJCmZ0NBvTnNKy0Ck9rShijRd2ERgwqTDDfcpRcGb32lGm7Onu7u/st/sfmF63ud71ciXRRTLGRRo1RygWJhaKFe50xMFEhaoIUuqKmKivvHPDNYEPoTrNv6+N7kfev7lSlH62uT8Zgr7lfnh6PnZRPnXqHjl+q73q3Y3th3C3aNuHZe8YFwAxqTTKBToCVaEAUGdZBWojB3xMKsxCIQRn4gkB4UIk1kSc2GssnKmLJ7Iac/yftdNgClTFlTFKwx3QtZdar5VKfqYw0lPqVC2BhIzlGxIJGIALFagogQAocYEIUEIcU0CZIIVWPU5UY1SMrsSxVGOMGouftWAowxp5JB0uU8klHlZGStp5Hd+UFFYFCAIELIsDKgrMIttc4P1b0+u/DX7tad8tkL7YMr2J5oK7JkehIlQ9q0yosiKpFOt+6fU2SANAmleQIIIVi2koWglHw9NXPPJYpYZsPWGDaFJbAxHNKeOe0hsXyXFZYgZBGCwEqyHRMBkyED8Oc8k9yzaEwiGTLLvoHBbMHCnO1xiQkm+60kAxNlG+yCW6DQEnLw9NLu4cHz682mmz637u8yt1bAno1EsrZEK+QMowgOcMRMwSYDssiOxQAmR9H6MlblZL3XPSwYwE/iynQmXlBvbtr2SBrMozKVtGgF3Daz9X7ViL954w/a/qUv3/mP5c6jljYbx1WUWJdMG1gw2S52szoaz4X7zlcnb20Pv7QVvmDXB+NB0QxLnhxOmm3a5/M8ocMfPLq4dQWgdr9maiO3yoHg2ChB2SbhuwGCLSsoqw2UjKkkWGPTQhiuEBFdwpXpKKZ5i5D6WCZijULE4M8jJY21kvzZRYOKJRJSCBlVBgmDcnitMhvDnPT0bEwyZOx8R4YYkChJrpAIwFHEGuMKF0JIQ2k6mkmlboiI7TKkm5ar53xOY9aap00QsniXEnhMKmBlMSKiiTKSvucuhsRY0izzZ+Ik1mdK0Y5JykhImUhLbmeyHqPEk0jGNVAlJZt7CVHSTgNi+uysdLp5WNKzQFYQKd1raoHkfxkIAFljYxTDlhgSJdG/TXa+B053Sek3JslGEImR/eMFBGuATiyTxPnu3R1XVu2BtwGmKMQoVKbT9vmvvG5XzN0P314rV0U6J2KtVMCxlhMxZ3m0H7ibWDkJ5cT6GVUzY8eLbkqx4cKT13llO4S42D5ff+1rBQnMdPyTt+39Rx5VCT4ajrhar/b2YrU+/N/9t+O335X33lt//qvli1/b/fiDlXutaepw58Hhp3fXr74Sj1dCHMnhoalGN976zs/f/0sffeWqAAmdWFftHz4GZmcKB3DsUDAHCaaoogQGqSgHhcTFSQdjrHP3P7ot48/Kidl+7bfHk8DiQZGNwogNwVcUxOEnnz24+LT6jRs3Pnrq3729fxLqgLp0Fy5feYne/xHIueilrHwXorHOivEqHCFqOCqThQ3tcd1fYS3bMOsghsEiEuaQ6UQsWNz2ufILX3Jn1tzWtgQZ3/pk/90/NfuPLr/5rb2NVXv7jvZriYHFMFFkBSLHoBwIHrFCIHhQC54DU5q6FQmBXGRYCCqKTFhbeRbak6Y5grUbL27FucDCrgWshJ0Z3T5AO1r3tOptOeHVVkY0g0xgjzjOhSZEE5aJYk5GArgVeJGoWIDRxWhEmEPyF0jmGcImefBnhT6n2xvpgQCAJKWAUM7ozbMXMhJjFMjS29MFFGviSqQxN/m6Jek9JThGNX9iS4QOKJhAhmIXQggMStZSQSOIjLJC2HCabRMiVFhrrIves8HnY51i6RIFA40SMs4Z5XMiKSEZUvLyn0iyoxC1zEAMUZTT/GG7zk/hX8ToZrP2ZHj0bnh4cb7JQgJWRFYJeR+fcnRVSSPUWRu9T7ZTnMmVSe3PAFRFNA2nyJIhDUwwTKqUiNSW2Rg2xjAbSn5gkIIzMJ/XbprylNN1I2KMQBHAyTgQrKyqcdklcN4UEEWQIZPqMpMBKWCULZSXUUiJHW1S3BvHiqK03HFnQZHnpQ1+/fZ05YgPV/nkK1fM7Y905WzojExDZXsRBsZBSrJsnESGOIgRVzhvIheBLCfKOVtrrIo9987Fq+8ctgDc0LoVX3WYd8ckVn3btcEIbGQqjG+7uXHG1ZjPdi5/e3/t6hufvb89/ZjFSOnEWmgTKDCj2jq3YNYXbsy/88K9O//LxddvvFKXr4Z7q3bRe7ofSae9M7uz/V/UL3TVLTt6fjpHPOhxNGJ7hYlAtIaIRJGWCizJaFwjJVMqIthCJKZZitMaAQaK5FNBqlaNiASko5a5drQkMllwII0hQFRZWcSKVepaU7JjhGgXwRSJ0qQ+dK5woRNBYhUBYKMsFLsuOmYLDipMOdSEIUzsg0/ciC4KI38wVeskdshjK+kpTwoJ206lPOPUUGjUaNmoUqqIABnSbLIhqpQzR5Z4c/6YkgKCCFYDEmYOwTNzemysLSV6BkCqohHZMpdBkTON0EjyryHhZDMilEYFQs6GVKjSggODVWIisjEToloCLEvoLBuCaETyrSTioKe3GCEbe+dtmZAhWySILa+uMz4cXUXjyYE/PuGVAkpKpouxO2mrNffK73xDBJ9878du9cxMlYv0LAUX4alq2+nLi2F33PiJryfVfNzatoq+6iatmRF5ZdPYMEOYtiZs/cE/OXz4qBtP2sVYnu7EL1yji1ee/uX3t/7eH9jRlQf/9P/83I1rizaMrl69/+mHW4f3D/ebC9duQMbwjsds3/lo969/1PuV35wfNdbXvvGrF1a+/vVf++D+ewfNXhEsi5nGcsj9TnsLOQnsomHjAIfYxeiCSGAUatUUtlcXC/YBalxRVObB3Zs3j+25N/+AxbSLUBW9yMGWBYfZdOvMoJn7h/t+58cfvHj5+e+8fundnebOgziZzD9Y9LYuXt/86MPO9E2ITiK0oKIWaQUSHJEaRJCNHUozm7HOWTsP7zl4NzQXr1Sbm4MrV7H9Ah8/mx4dNvd2Dv7s3xTT4zLEui67stZXrz17clCVFMKMqeREw4AYRlFQt+iIWNXBM+bQmWitchxANFtfc0EIAkjPSKFQ0s2tQ+ursd+bTPcH5bCVrgh0tC8nk7VHc9MfbS50MBHXYCBzxlS4gZ8IHys1RDPQnBA7oZbVq0aRwPCqBMSoIYbAnFmHbK0gpNzvFNWglHwt2FhO/AXRkIUrUQgJvcGS/0uqYEMxtf1Aptmm4sCJ0auApMXpEi/VZd5mquFMaWFFEin144gSktZQ04aKWUUoo2gMaBTRrksiJ2JIiiqGJmm/LqWTLKA0dmDJe0lBxSIhTwDJK5cBLJI6U8ECVW04lkEbQ1D/5enZg+r4aIAPFo++1D03xkIQLKlEQDhgYYUiICGKijKYrUpSHObZnYD8yrBc8dHyLyg0hw8mymV+N7PTr+box4SjSbIjzsy35JklSY2MlKCULQZENUVX8CmtJkkqlx6iAJnM+kqXOpk0ziR3rMQYI5hIUCosnBQIxHbehgdPykU493B6vLZxcK5+uNV/4TB4NpZLoIjiCE6doFS1Sj2GAxNQB7AVtNXKaBE9ownWmrD4+et/7+P6/Kg5ASAhdHDifdlWnQRxrnYIM4nzEj5wjH4xZxia92w38atb3/v6b13aufjW7APpFtweheFa9dYbTdOuXLq+vxgHYsUzrg/dvb+4uL3+oru1zuOBVKGuZoe/2Bpc3fnw00tXhk2feK/zG2qPeqVGGGHynEzT88qzS9jBaZZgej8lx2DLcumCGDPznA157xmGCUw2xo6gEgMbS5zUr1BlkSiINrBIIRyDtZu+vfTZnelgePfc+eC9ZQuQYyuLmHhEgLDGEELHzDAlTAwBjMJaMrzw3qSfYD5+uYQmdY1I/GVsKGY28n9KaELeBacdiyRSQe7q8hImIkue0lmDcNJn5bgVTTRjztvenAcGQCxbETGGlThKXO7jc3BhdjjnJVODRFijACBOWy6TxQuJ1yaqMaVBaBL0Q3Wpi07fBqVcqYRMWRFNqmzK1ZWTB2BGZsQQFUQcozKZfHWRJeKS2SOaKjy4eTOGA3Tr0kLmCxrI5tqlC1959fH7n+3fve2GJabEDsLKlgJXsRCaB56a50yxOPHVxMWxiBSDmeueHhatQJ3IbBqOxWr1/BUsjsY/+qvq3NZC2kvfePPjbmf98mvVlWuzOABcO9sTDnTlBWn23bnt4o0vSHgy3719cP+d6tor69M7kz/+f6x1gS5ebupamv2SWWk0ORoXQ/eVL//KTnjw2fSj+WRab1VBylkszo7OhLaZt5PhiNHKZDo9s7k5OZrqPBHqOxGTRgSRKEHrnpstmtu/eH/ttUu1Kby0rGSL0vXntV/4Gxe3buvuzty+88l9rp6trW29cX3b78bppMLhgp0txbjOdo2qodi3/liqIH7uVT3IL7wn4ljWvq7Lc5v9l69trg0iJA5Ww87T6Yfv48O37V4D3xbTSZ9auB5qt5jOyldeXaxs6sPHtnKVwAtSFIk1tihst1ic2Tqzvz9l6uB70orOgSnMCksBNXo4WBVSYVMxmFiDKqhXL9xAoeHYH2rhjnQwNaNFdW7M+55WwetH0sxlICeCOXQKN6cwgc4UcyERUAME4kAUoD7GEEWMEWsEGhKBCAAHHyGkCFEYCBKJExsLp9REw6zEWQaQ5L2ntGcGFFFOAUteUoMTJpRzg3C68c5sXCTV3fLeWmJUAmsMREKURJ9WgDjtotKiHCqwlpmt7zxoOa4BEEGKMNWEcC3h3WU9Syia6DKqLQVLpOplOMn/M38qmeSArGinGoS9dKPgXpqu/2L98JPe3vk4WG2qOVEkGImdUe7I54QcZsD7kJFay9nOL13jurQewikNMxkN5ZuM8hY6rRfyraVIuux8KWU22tLlJHU0xKCgDIpLZnP6d9kV8fSWVQUyKqBMAkrOSAyjykoplQogJiUNSVBmBQFEDGM7WFf7/R03E8XWCx9M77xybtIrjt66du5Pd6r+qF2EKNZWFXociiBOyBEqlUJRwLtgaqi4pmqsBrFw1Oyxezg4GAwGYj2AyM50MS5sa1qwoKUoTuF5EGQuphj0Vurgu+DbJmi1wLCyN1+5fsjX/uittSf//J+d/+KXcOOVkycPnlLb+K6qisPpAzvyw/YX1Xu7W9dfpHk1OWghU9rUYbF3tV8/IXd4aYhxrFgkElmoxuXxXh4esoZDlJh4BNmAIq2MBGmUDDGCiBhd1xGRtUXyg0niLxUVRKTwIE6PG4dUYoCWFRAWDNvutZ+9GxazIe+sNtP3L77YKnOcq7WMSEQiJBEFFWAW9QS2zMocotjk9cGwhrPKJlOa81MLQeQU+5Nq5vJ4paYrWwJwZtmrikrmTmlyzDiNitB0K6d/KVlBvmSk/SelPD/+BkmWS7CcJEwMItFuCegu8ZzMPkmWq0ocSYAUgJGOb3p1nNZvnBOyhdimT5KxoSVupiIRkltzDcwsCpHIKU5JKfFNcxyyGlVmsFn6VpIxEpnYttC6Kg7GR2azNjsG0taD9dHzl4YXNkLV3f7XP/HtxPUKmYv0SCyMtS0HloVxRbfgS7HoLWbHDZcztLO2ntezyYn62jiILqRP9bd/L6Cthlvzn/6QNbiKQ2j33r+pn92yV7+2f/Pe2S9cffTJh5eeu2xfeaU+d3b3e3/W3rLrZy/t/dW/OwOrh+N4+2fWUEU+GkwvrYdw4BA7wKivUPtOJ+Nm/eK5r72ytW93H0xutu0hqsHRyW4h2rN1Gw6ZrKn4eDoRKDnNWbKhk6hLJg3apr1w7eIjHbz94x8NbvznLA4GNvrpDLE4Dg/D0dXtczIOj09i0xU3H52YvccD3nrx8N7o3nuNDW7qm4bhOTZznTE8+QBe3yy2Nsv1GgPjNs/J1ipDFnc/jrOjvYeP6MHtHi14dT224bjn9gb16rXXz3/6IT26R3DKDDTVjS8fTg6dtdaZrkWS7ZjCQSWGDsStb5hFEWP0WFizYGmF58wFddIZ5slgQyiRXKSDBDKF+qLregVR0bErJr6axMrL6ECwd0hnLo4OnOtPjUwjN+CW2za6YLEQhIXynBCIAiAiXiWAglJQSdb8nbGsEAaFmNlXBE0oESQ/M8kog5OOiTPBIT3PaeGcqqICqpLiT9hy8l+UKIa5k2CI8/T5+TWQC0teeolIYjKpKDSEyMZYohAiGIUtVDNZSaIYw8awRBFoUThNHh8MMKfQs6z9zfAOE2c9Uoa6kMfE5ctZFv4kYU5M0aVnAhgSJLCK6hzSSXxpuvGgPz3ph/fD7q/ML0G5U7GAjUGFl15YCizDSpEyIpjwebHUZJxPdPo+gIWyFReU+NSCZCm/XtZO0QjJIyxIk/iESFVi0g+pJk9cpJtl6eiZ627iwJFFTjVPwFuaK4yQZaQcWAKY1KhaTdgvCqgVNWJVDQp28mAMlFRX/ceLi49w63K5f3lwVO2vmwo2KhBdRCFSqx1YqUQduCTukZRGehqgxkGrGouWyd/bXOeXXuqOp75mANKR1YWd2YVD2bc0Y6mD6eoI01MEg2YRSIw2hY0CliYu1tizAA/H/RdfmuzdG3/SRsM66HMl3oXh89tY++LZvXvV4Qfv/4ufnNv+1ubmK8FNXRzo5AG/++Dsmd86qMizd1yAy+BPyBIANUAUaAALE0SDSBBQFsImpiClOOsgHXjJjFJVFV2EBRQ+htNyJFGIOUhAx4Y4MFSjAAQLRKcy61Wv3L0T5hNxPbXav3N3Y7i6s75hoCJshEFiChvUhy6QMWAnKl3ypMuUaTAYUZZ8umwPt+R4hXQYMkEhpSOoUOq8s01NevxToUtkal2mMyDBHwoyKf8M2Z/u9HvMzR5rPse5W6fEkpCoKsJgDRIlEpvkcaWkpDDMgCZINsX8JhJ/wpcUAJPlzOkPUE6vShWqERKjEBHMUjmdRR0co4hKepEhRhFY69IDmxtaWJHEiSxEIGqhjsAKAzIKa5glcM/Vj/7m5/Lc8MqbXzNROg1hOr/3N7+YTvZNH7G2dma4VJRSGl5w57SrRoOuRTef3jAjaYJpZd6gayteNPWb35zN2o7bwYuXHnzvz25cvLLz0Xtx/Lf+9ge9qy/s7+w4loFtqt6wHNnxD3+yyU13+/12cVj56f6f/X+LW59a0cA/3hB0YHJVZQch+rBo4rkL4/WRbQ5EKjUVq7RNlCJSkMUkUk2bly9uXt0+flQ3j94ZmDbaeSPB8aQsvR2YEDrrCmnFlgWA4AJ6whVThY6duJVb9x+F8+fOXXrp/v7eWtgsn+/bjbr10Q5Lms8OPvhsvL1xduPC+t6UdxfVzrE5mS3OfOs1un45PD7G0ymHmn1V6cCWZxCrXgzqj7xG3/mwc9///GeTp3dc8LUzMyx668OTrc2fTg+n+/sTg7A3gz95a7AxnIQGcPCYLtqz58PW2dm9h0VFRsDOBfXMFsoAiQRrqqaZADah/ewdLRhNwBgIMLXlALRohsOd0s60nPGgwaii1haEMC8oxI48D8axt5DNJ3Zlth/v/fDRYHSp2AtxwjgRmqFqrTSAF6R4F12IdKoiCJYjNIp2ktyeKKTefikJRYhd6Uoffbq1mZJxFVnLIhqCRy50+suVcznTsjEcI6IEEk5LLQnCBLY2SkSiLqbKndas+WFOD6wul3hIXyVIYOLCuQTxGF4ydU02yWKb1k2izDmeG5ochLLDBhOJgJdkaGB512SsLq9ucwYcJ3NQAbE1id7FBBWNDEdF1CR0Dq6pvnC0/u7m3m7V3K1PLh2veCsMCrDRKjOSPkUUp6LnJYj2udY/K0AplVThfKHk1Xpa2C0tprFc0SegV5WW2g/kXgJLYwgVFVVLnBjMy0uUgEyxphQRm+l1FulFKotSqvoCXhZmpNVcSksUMQU4CNlpF3pWDqd23Np6hQKJyNlPJ48ubR/1eHx1c/PDhgc9dUoVtFYaULFezblBBXEiJUIN1+dqUId2JmjMwJ4A0ytX7FrV0hG0AGAX2uzD9L0AAQAASURBVAWIZTcNHSH6GOcd+0YQfQcrYcVZBfueTJvCSbQrpnX12vTp3vf+RF+80V6/tKidg1MEZRSBn/7i3atvyvlrL1+2v43wSPYXoTrsbV88erzjP/t5iQKjsrMaEwu/DYHFCokoBSEEkIA6IGbjNlbNGEcAYAyLAhoELEFsSk2Iy5OVFx15xZOAED31sFzy7oSURSNrP8jqcUsGAmkvXXb99WnULgIdibYAiYpoVJBhw0pRolGSBLISdxJSIQ4AS0jQTAYulIgVKeUw1UtKdpOpuDJU0+iZCm2OK6P06CFKWuFyPpbIQROCzDXIwDEgpMlFLQHJ6SEQgBWd71zhUsYUMVmbyOTWx5BMIhOlK90IMCxRUhoG6XKLZPLyXD9XEGQSo4pko6DTTRFIVSSCCFGhiijRFYXrVfN5l5ihRDYFgCkMwFCnSmXR9x2gDrAq1jgbogwGgye7O7snJ/bD6cTuszFtMxP1zN7aYZgXtlOxMdQoOl4gFOAFuzqwb2fXpLehNJ637Ln65m+tjLZu/fH/59WvvPb43/3bzaENi9aGtmv9Ikw2qpXF5Quoef3S1XA8bxdPe83k6E/+p/XJbPej9y/Atffu9MEA1mwvjDZ0vO+roYxGbn8vxAmMLeAnz62rn4vtTGeJOHSLYAwvDJdMAbGVZn9q2G6+/M3Vi2cPP/nLZ/sz41Aa6q3vCTe2MtKKeEUSmhpQDxiABnSC4YTXxmFl597BC1/fuLC+PW6mTN76qTfaTOfd2TV3HvUnT+9iv5XV4Zl+XB+dvXblSzXVzeaF4cXIE44Ttk+97O35u5/5ncl050iePvInO05b6jlqxs+5unv1+g/mB8fz5veufeOdT99uFdfPXmyLwftH33eD4dbAhpMDMvA20lyqV683UhC6oqgggSOYXNfBGCR2PmdfOSGItQyx6mG8k3nKgc5R9uioW1s5HA4Ex16HBfmChYtIYR5FWho2VLey/qjbt9Pm3MJNRTAlO2WZSvBEDagDc6caFREUgQAIcSAIKJCqJoyGEfKGNu1uRWJcSLs0qJFUL5LEEIhLwV8O/Uvr20RoXD6wYDYJ5I05K0CT7VvOC8cveScDaSGMJOBNBq25kc6YjbV20XrDbI21RRF8V5ZORUIUa1KLL9mtIkJz8meytuKU9ymMPH9CzXIRzsyS0N7MzE7BQipRiUlVJUr2HElEG0ggFZUiIhozsf5CM9qZT/Z681v9gzNNjwUzGwBXRM/CwmA2EtM0D2uL5fYdKVglsZaX+mNwYXKFPJUnp5BlXnKzcneRsDFJHZMud/pLiwQkSBvL6ZuXautc95PRMCfSl82DjDKr0TQwKSckHUKAhaa4eIgyqAA4B0pbo2UPDx9qF2w5krqI4lZ/cbzxtUtPR3b3tQtbn9yujKpT1EQDwgC+8lyzVEIOqMFVlKElG3gA9u3AEmNRrFVUH5qCWUsAMUTXsk4QCh66xXbNg+2qx1T3qNc3g6qu2Hhp4tQc7cYPHx4+OJi6kWu2SvvGP5pWUgR2mCuLBqHChUZ1uj9/sHv04CfN8G61OGcV093H0/t3xO1W56aEK+htauCysEyIWKhlCV6ybwtUhEgFksI9SAkaEkMnnab0k0xecl2IEryx6T1MhN7s7pnOYpBorSVQkJhot6wCCqLwTBenrR3vdgiMiqeRpvdfKcvP+MJTB2UTotpobJekeIjgwriIkOPLRBQUQrRgtlYRiYSILZu0TgaSPicTkpMBdNKrJaLvqZddIlkkZj5pFktkM7wsPuYliqIEUk5PuUCRg0LTQy4JVoISLLgsKjIcxNuEywRlBoxJEzsoW38Jpcg1YLn6AiU7DyVFBAipWhKBlSBEgBYKZVaRGISSgR+ylCqVfABMxncSo1dhIbLGUC69htlBrYiNkcVWUI1ihdj0WR0xmAb6i/c/KTd6RGijlF10vb7MS15Ej3kpoM6KV20RKljnQieuiONJc07pS6Zo5rNex4jh5OP7w9+6Ut54bTFt+bMfD/7uH03NXPvo6jDYPMuTR7456X3pevTTxeyw/+L1vaefrH96p+ZBWbkgpjR16Lyq8xpo8kTYFG0TJ21EK1QV825+ZujPni2a48iWLXzwhkqm0JM6dLGbB25tsQCmevL0xKyvXX7jN+z+uf2Pf0DzFmiHQ7jah4k3gUMICBpdpAGjksb0Zxg04jz3PLvb9+5ubR76vR0lsbGbYb4Y1LyYnkQrX3nx+YeTcHc8OToYr26Vg/X+yZPOetDEaIzHf/Xn/OOf2UbsROwE3LCzXl0VuFxZHeilK/frwQ/uvdccPji3vd3u3vlqN7fH4zP16l+1RyGEq89f23zyyPvjajgIHVoNvZWNNowDS0UUyKByTqI1JgRwYQALGGjebxhLIiJdxYHhVSgysxCiRA0p/Rwnw62Taq2AtxKsemOli7HVQSulbRwOJsV+S6uuWEAnGqeKE+LjaFqr6pmDSABLevyyrVsMIBhDyc4fEBVDy+i0ECNA80Vrrc0cD0kQLLKyIs9bKqnwqsYUTJRLLzGTtWA2KohLIaxJ2UVL4amoxOWaN29XKbXhS0QNCckkCSFqKKyx1qS/Ya2NMTJT4Wx6CamzT3CoyikzC8kkJ9f8/KUpQ8jEZBlB89dBRnpF1bilS85y+ZyiyQs4FgQyiVAMYkRcPV5/Vu3uu+ntlaPX9kfRkmVVMiKaAgDZcHJjVmiiLGcFtCBbdyxxstQyIO/Hl/swwHKemk6vvDxMC36JA516f2aGFRWQGkpQHijpRrFMk1PNLZWlJDCGobTrziACq1oIS7J4Ypudq5VBjsEwLJ3TAoZNuz+5/NVfPXi0R0/uVsVoELD58WLnV9an53l8Ze9SKKaY2VoxYB1ocN6uGCkiDZT6sKYb0IkN01DVLJOzYfF0e/NM3AUPhooFZgDaouyM9SXr0/Dt19avnBlOd0+ak+losD6ZtcaFBzsPK67q0fDcwL5wefvJ8ex//fjw0PZOqrBiWl+wKarOkOkCO9cu9qVdtFyp9JpZqI4/kf1WFoX2unpULEx9bNZnWxft1LIXMMgRtIJli5DswWKUhAsKWZLIpESOODKbfPkTJLEXUxdDWcoFRggeRMm6EvmtpqR0ZyCwWlhiGymaSE4xszK/+GJ9+CxYW8mEx7O4tn5258Ozhk/OrJ+srE4Gqy1bCa0VzxqQchVSnqiIsUZERIEQ2VIK7EtCoMQ0ON0HseFkeXXqT05KSlEiMwPmtIySplDCpbUdsso2Oc7mpjrnaSFTMFP3qEtmV3Lm6USMKkViESYIlJwJIohh6Z8jEtOqXw0zgig05ZAAyXE7G7TSMqOT8jygUZKmgymvxBhJ908C4syFVJWY/i8BBkpQS2QAQ3BMDrCiheXSeyvKqIASWhMq7ffru49uzXiyslWjixI6CcIxwqi3DieipY/UxEgF98zUEztFEFtcqfpvHE3VzjtR0dYg4OH73Lxy9bfevP8n/6qWtjm4F5p268nj5p//3xeTJyGGIWIcODq/UVaM1cp1QxESu6AWkUXaztoYxEdLHI1qF63ExrJzFkG1jc+/GrwwhCRE4/uV8423qDUGaRVzDhOxjskQ93hcW7B7bvT8tW8Mpw/eHz96v53vlJgO1r2RVuYCAVUcbK/VstVqrCsNDSZaW2vMs/evbL/KW6Li7VefG82ms6eH+93Joi26hdgr25tnR3z3KFz70itNjOSgJnZtsNLHaGOAruqvNjGwcmTxGsJ8Mdg8e/u5Fz7Y/XTv1vs9tIPR5uaZSzjYNXc+KF19t+y/98Hf9Efrr/QG4YO3XVU/ef56fetmefVK+fyVvce3C1eSFs56FgSQtUk/awGSqExW08RCHXMAAd5iJlBDIHQqlUFAVNFWMUesybuiY1WTwkFIp6pNBCA7cXrzcOXiejFg3wifgGdGPcMr0AIBCARhw4zApAJhwyIxAY2ABZJ5TmLLAkhhdpyaR2Ps8jlNzCnOLInl45CkfEsIEks5YJIDJPGwIc60rHSLqTCxpGQGCTHpLtL8lshfid4lKsYYa21KN4PAWoNEHiaiJPCFWsNZsJzpIjDWLodzMsZoelABcPbESjwLAKfOtJq2W3QK5GW/amQCpVq2HcNJ5JRTr8FGGwrZkjPPn0xurk8e1ntXB2uDtm7Ik62c9UsZxXKsTY145pvJaQFOhn0EVWElUaRFSFpccwbgE0DMoBQxQSC2DKHkTkIQYuKY1JNWJPAyjDYjCxnlS5HpomC2SSOauOeEIqlGmYyqTe+rKKsyUKSkchhLalUAYeHCFlSpC82iunjh2t/7nY//+E8Ob3+4UsmZZ08H1fmm1r03N7e+/0DOGC0hdcSA7Krtet6skO1pT8YDzGrTlhdf3Hzj6735yfHdj6pzzYOP3g318CQ0I14FsA+C1L5cebxmzIZ5NH68c/8eHGZ775vChalf39zc27+388m9y2de9Y1+7Y3XN2bF0WH7mHtfvrDS8qKSkjmwcOGqpgkMmjeje9X2Fk7Wtt8zazU7xwUWRf+eXni2eu1weKV65MRbu4ixGPnYFERZPKcCqlS9IhmGOo3BWDCMtUXyR1QoEJ0tVDVGAXGUlGMEstxzVee7LnRMHFWSQ1mmUaSVqxoTC2+Cg5w4986Vy6NL11obQ+vZEovohdjb2x89fbj6eAfOtRe2D9bWp3YQQAuJRfAQDRLBqXOFklhiZpMjB5Ydb2p4Q+wUysaG0OXu0xoNSdDGxphlpy3JhDQF92JJdUwrE5GgafrMBrEKwCYaP2m6+DRDTZLOP0FDVECM4ST5qAqH0HXem6QjVGgicxIh2dMvBRWSO2vmz8EdgElBECGQY+M1GKiCkotQSDoxZglqDC8WnoitdV0XYgLLmWJOpCLLDNgoToSJHFuLUlFTLFX6Si52vfbJ+E4xksa2ItF4iSJxHsUIKdZWNks3enrnZs+K12NnjNcpC14IxZcmk9ZpMKjmc0NkyLrF7uLf/48T63o7n/Sd9T/+K2goGGHWDbi0zkxjO7j+lXY+iQ8/mXz4/xruPuJqAN8KF1yzFeOnYq1HgLA1xEaIahSVmx0+K974SnzuUjjeL3nVagRxlBCsBc19JF1YuwAJh0bUsqkLFDxdW33a+pa7c5df39g+V0z35rs3Z3sPG5y4XkMaF1ospNdq6bmeoj/RGsA27yM0Rze//403X28nY/vcuULn9KWrV2ZN++GD2aOW3/t0d+3s1h/+ne/cnQ4b78Ww6ZityjzyaL0FIXrpAi+YolotREQGm3PfvV6vPnrh+kf336kav17XfP+TCNDVL/507yliePmVG/0Hd81w9N5wc3q4/5Z2g7e+fezbMF/YuqcRCiIrVhiE5BWlSmwIiARiFMYkTUhUKIITDwAQScENBiKdqFfTshiBEZAVK7716kFTYgUfmq2LN1zg9tBbD0zBLUtLhEY5iCyYO0AgHVgAtQATbGG6GIhIJBASaVNSnyoqSR8nopYJGkDGWhtjIMNRlRgEImElYRApmBEAkUiZbwRjmAwbNqno5uY3DV5EsJRo7Mn5WSSk32o2ScyfxRARJOM9AmGVZWFkIolLApVIMpASTR49JqmmJPG/JDJIl/MjL83rOWGhnO8RMhnGJkp8jc/HUqR/rkpQYi4yn5Rh4GCY4mvN5d3qo2lF7w+ffhvPKxWiBONcNCkaMC+QkxtR8jcxWCqGPs+Sg6Hk+Z7FIrR07ksc1uxoItnFUwGY5L+VcT3lzPs0ghiQLjYYsgbBU0x3UXoTDATMRhWAJTDUEBliEyKILFknwlCGFoSCYFXZwMECFoHBNkrlBKEaxgc//JP+5NGL//jvzY6+efzgQ/vJndHhbnflzOT62cNPds5Y39VGhuARwkpHfVg720RbVe2Im/W3vvbug09v/fn/5WtffGP1gi32736B/WuXrs7YvX3rEMDlwdqkneyEJkTXG63NQ2jPy6rr16PewJ6vSh6Px8OXBmeOL88Oxu2T8UE5Xr/MqKyM2najrNiGKpByjLaoxI9nsi465v3rV++Om8rMXD/0gVb6LdZ2urWdc9+O7RD7nqeQqIU6YmYUwgtgoSCG51w0RRHZuCgdU861pSz/ZSbWlMCRQYBcZgWqrGxZBEuncghggCKKEjGrULCqQchGUIgTzDWQsZxDiZ2ZPX9+/9JWr1mU+wfDx0/OfvbZc716PjpzfGZzv+q1pNwRYkDUgm2nUsEECsFEJrKwOZSDEwmaRaVbeGI2xCDtJFpQsm1RkbIqZ7NZURQMYuWQkkNTDRUYZgkCTi1fgmDTyWVDnGxRmRKdaqnbSiBX9qezqYJaw37hFXDOxpiTiQTwIsxsRWI2ryRhShYFqZ+OrKSa2/sEq6uqilGGgiEM4myCm1hvUMCYQgKETUKVma1yoWIJzpgKUnmxZGu2TiqKhdoVkirEWl2/MAM+XpNCN+tZr4TRIN1Jq62UvboerJvGeK4O/+ZT1NVc4KJt2F8mvMGmkG6GBVNJLnXOcdFObeF0714VVVS9bbmwldqgDSDooZ34Hsv4f/7voTQgjERhe6IK5wQwLTfG91yMXLEEJyTRB5XYtJOwWP2NXy+uffnhR7/o1ZUGD1NEaaMscXn1iBznhiZItmvxMBBD2O6NtjzX8+ZwxG403Dy3ff3k5of3P/nxirMheJSD6aIL3PNwB1jrIW6bZ0M5KovONCfrE2exsH/99t9YlhrFte3117Y3aU/c9vbVazeOJk3ACrOFIqogiIlwKzXcENNgQ7aUa8O8qm37+NblR5PR9uVntuIAX8yqyLLYXx3UO323c/NWPRhebEJ8+rhaHY1D97yguvgctrdnt3ZdWaALcMZGgxxAkO7VtBo9/U+YlSkCERQVBQeTBLO6tI6SwOJFvKQTKSqeAsPEJqJRgG3jprsnW5vnwxyxk76vfBu8n1QMDR7cCUcmsSSESCTCSiqKpOoMadyUGNImM4SQ16OazCMJxM45IkJiPZ1KTjmL4ZO+NPE1iJbGxJRM5onT8os5DbW5eQWJCAuLBFUNkQUhJHKi0jKeNq9EJZtViQak6dSQiTkLCcnNJyFDibqZMsokhlOE1EtMbjvMHCUykYHJJDDK3w1l1/ZEGc5d9XL0x9LJK7cCGaBC8i/BgMrr7daPq8f77uSJm57DVqdTp5XYDJ8ni4Bky8uJsUJ5tEiEElFAhWB06apHbDRLptSmOTYV4ISRQ0+bCgYop8QBGYlcqogzv0s5Yb2J/iIMZlUmGAKDDVEBTZJfa00halQsw4IKopK4EDBZFQexgFXDgDNMjX/wcDDwZDf98c9u/+lHg2++vvF71+3v36imi39l35tuVk+/ur7+8U5Yd6glVNH2qbaTddeuuKYfDoZ6PPjBe9909YN2r7fLBcvGYDxr7p7xF19cf+6lGwygHOGf/eVJU1/2pui1+5uXV1/7wtWetYf7fhoOR4PtydlzFmiolq7gdtHOwmg4Kbr2+ptng07LSlXmJbMPbPsId0+Ks4CKHw/vu6uGK26ainuNK1oaHZj12H8Ft7R4yhQ41gGGeWE1gJFMxChH8gBAUCEiyQqXRI/TAMBqlpJnv3MBGQZUVUPoEp+Rs9lUtqUx1iYtWMpFYbCxEIkpOswmQrKAVQKBomrgzvXnl1aay1fcdFYdPq2f7p57fHuzqtrRmeMzZ4/Lcu64UzGLzm1UMiYPtZYCKzSAbZBQw/oQxeY+IAJWYZSiFRFKnetkemKsTaKiGAOSvHdJjFbNHuggxM/zvilZb6ooWwYtCZ6USfvEZLM4HYmokSZdBrEiSFBZPl/MUIQlCXGZBYEMORsmTbgvCCAVo1CkQEeDpSwsi/qRSq9l5hi8AHHhlQxTQbAQIzCkDloJV0w1OysD4QoYcKgDquhWDa+oGdGt+x8U11bXMOhZywmdEJZ53H98OD98fHz/yDiYcyZMjU5Lltl2oZgddVaBGfxJr2Vm8YySoI6Nh6jCEJxFCDG0gFd13Aa2pOwGgYU5FAwiG6OALMMEo76pbIhCIl0AT501gw1eW6GX3jh34xUK5v6nH7AllQBmUCC2hKBQkbkqiISaAVlNDWLC8EMINvB8tD7jqqHpiUyfHp6cufStIq5//On7dWW7ULTEge0i6Bo369jfosk6HRZxZjH396ZbmxftAD3j4rOd+3XzZDgYbPTc5av/eGaraRMWHDuKRowhVmYKYnqDOVM9sL4NmEd07LiA72ylfuq9rcbzI0gY9Dd6bHUeZpdfe/v+XRH92itfX7v5YWtEubeKlcWze8Pv/ub9B7sn7YGrjWXLsSVbxuw0l5Uo+fCmHVDyiiAAQhRUA4S1tcRQBxWVoGSFC5GZBBIltcQhKqKSZ7RirGl3W/9oZzi1tjrLLQcORZwL4kJmhXpoAJRIgEBpYYxAhCiBCJwYRwCyl17aTnFKFjKWRdQaE1VMKqpZp0OAMrGeTmgQFU3fIyXYkzml6xo2IBg2yzRyAFDBkkJtRaK1JEQUY4xRsrI/ER5zYnzi4YKRNIuSk8j01OYyhQ/lfNZT4SxzWpeJarpNUmkTkkSlTiR0ygYB+VZhUKAMj+UfWcZMlyX4VN6bvxkEK1/0z92bHu4N2veq3d/364oKsAZCtGwoQEjG+5pJoipJ28gsSUeEhFRhCdymLy6aoIJMISWlJYSry5/FkhWd8UeAhDlxvHMRP3WKACBCp2QTgJkLgWHjICxiQIVRp2DAiRTElhzBiq0MSqJS1SIg1LYcnrt0uBhPd5tBf15uDnpVN93/8ckPfhircuvV7166fPZesz97c/twZ3e9J34YdY2GZjrA5MTvBn84QjvkvU052qhXbvB42Nz8ucdGPbz4/Pm3v/+/yCv/cKsGgMm9o9oUW3jKcNtmZY3njz67szsen//sLz57MSzc1Rv1/H68WrvNJ/vN2Lz88mvXLTV9PjpcyI2tVpqmZ0W7NsAVqI+bWx09s6a/UB637n1ctaSEfqdsJ2C3XR+PZKcrCqsDJYIUYMPSVBCfoUYFNCZcQlmAwOR46aQEtRkJ5tycqeYmTECkypqGseycmtbBZhkvli3h8p6GI4LmYTQLkPNvVGGJSWxciNfG2cmly3xhuz9rV/YPh3tPzu7eO2/rdnVr9+z6bKU+PFlIxVVUiDKsAjEKK7cQA0L2eEq+kqoUSJCNOaDOFjFGgEPmVOfoolP5oCQcJP39fAQlNbPG5FtFIbI8wrRsw1PetRARxCrLKWFhaaAhKbU3Eb54aTeGLAqwhvPNyaflFZrX1VkPka8zZnCOW+hCIqtbltxzi7BhJ2QVjuAAB+6Rs6ZPOuDYF14lDJh7BTZgN+3Tk4dHJ3eKuT1hjcF7MHmynkMbpQU6w5slsygHY8Vre663ff33f/+TP/+T+pP3hrbwHFvrXQisgeBiOxMR6yxbkhgQAiDRFsZytyAiMIQtWbY+CuI8eXMHRSATDPNwy527IKMNjKr1rSu8uU4uqLcn77xzcvsz/vJX0DUCgQYgpV/GLogKKxZExNTSrKcAQtL8gyMx2DeeVtx4Zb0pBrUdTXwwz32b5qO7j+6wdYEYsT1vDtbC/sWV9gydVJN9nUf2NH40Dq6zh82EF/HCxXPbPXitpBpSeUZUXG9IwXZtF9uOO2bP1htT19Jf1buPTONSAFWwGryYGIWgFOdxIbBbm5v1vXfKrfNvV27v3m49OjMqa2nHZSz3xw8vbV5+4b/53+8IP3tw5+xmPROILUExQAs1mvQHmV+TRkgBhFJSPXlNVFfyQAQKKEtrsWAuSZN8yYBJARtCZ4yJiw4BNOdoIk94vbpw5tyrdx89c7aDXxDPjQYiCAUmMSokQTiAkvWjgCBRUs0FZYDxlDlbuUIkhhBVcoaoaHKDhaikfjIl/SZaLTNpVinQcjpWTbVridDIMsfllzyYAGbDoAiRiJyXl5nWaflKSJsrSbLBNLAmPJc46REUn8+pSpkADLLJlD3XTD4tT6kLIihDCGYpQ1oG2WedrE0/rfR55Zfq3JK5rbmLOq1pwZTu6+Hqny4+elZPPgn7X+TtuUQsg24YREg5u5RKeBqXkmm8sqpyIkXnLX36q8sg1ZBgLzLJUvj0yyY/MSVJHt6a7fglRgGsiNDSA4tQaFJPQaEGZCQxnEFEBZNjWLJOhUUsc4+4jMGoJXLgkqUQ7gEFxApKVP1R8I2X8dV//GuHv/bGg5/9q6K9F0cydLFkW9iT8f6PXv7q794rHk1fHE5e7q1PpjSkvmlHOOZiPOwmKxgL9iFH6k+Odo57Xk7GDyeXfnf2V99/460v3sDh+x+/O73yEoAu7p9z9aQDS7XV25jd+lu/v9t3g/We/zuXf3VycnChuzc7Ogzrb3JJz+79qB0ebg+GF4v9vZ3Zbtu+fk5kPimNBjHaDdaaBxaHUSaBKoUJqLzWom1nh9SG4tJFqYAVRJuWmMnkSUkIjVMJCotsTG5STiCzJQSAs5iOU6VF1sUgpHxbyR2n8vLcKGn+gRKlTVNybWYQhERiTLnYJvuVZnMVBSfaMmMhUsCQsw7s2hAYJ7WbXr24e+lcfdKOTqb1s52r79/lXv/ozNZ8fWs6HMxhWxE2YrsOgHDRMdciCyukHsrRWBc55JFSBVDRqqxjDDHJEjh3rEyULe5EhWLiGKfHIjvJA1GFwUppYbzsMRILMIokgmFUsEZDCrWaogBJGKyMZahXKupLw9oMwQjEsFHVvE4CSeolVCAkeSQgMjnBTUCAGjYiMXSB2YgYY5xqSXBMVlCRVko9OEsr0IHyKnhku37AishAZAOjtWb3k7dfcofCti6t922jHKuej8YN1vdniAOPcVBrUThYtW2oh84v1i79yv/25KXX97//x8PjvZoRHCDkY2vVJ1EKgsSwIIghmI4Q5kZIpfNgIYHOwdaMNtuVFazY3ubz7uI1NxzJgNkJPIepLI534wfv+507cuuebcfDL3/9aRBChEI0cOr3YoQYlagc0t5CFTSrsWRpcmDxYoc2zgKf2FBX46oUUjJ84ct/fy/++N7dT7dW/FndH3TP1nl/0OxXfkK7FKdipZBJmLQntvX6xtd+c7TCB+/929JNq83roXS+kQiGcaWpiCVANCK0naCAGSAi2sKrd9KRbywoLtpRb3jbWB+9BQ8H58zxOG5dGE/Hg9X165dfMQ/unfhgty/VN167+OU3fvzhO+On0y9e2NgPC+MqExbWOYIIB2g6rjGzWqFEChJVISx75+SuDAt0IEMwINbWkhgyrKQMkqgGRiQ4KbTrdA6ysI3d+eidsLdTv/Jq8A1sIPioAUaMRiCqRtUAREDBSiohAoiy/MoxUZlBqTAsfKcQZy2zSVthGEpWyTGk9VmCGhVIxsJYXiKUV6tpaSzKnO2gs2Ap2TItbbNSB0oEoszBYl5W06zZh56KOzSTMBWSsdH81VKi0nLfmqq3CoSC5FXW6dKQU1XTZVnltAT7fBud/LHMLxU5peVnJZLlcpqW2+j0Vli4xuASr11pt26WT37uHl0JW7UtfAqXyViu5k91Kmxik0pt6m4UbJJRNDhpk7JImEyR+LTL9VsiVOfXn96QfMEl5+2QEmpFQvLTExEmFQ3Lnw4TuUSzUrXMBaGIaoksmUrUEpVAqU64AtUMBy4UpWil5JhrG0o/+9t3pvfuTI5ubP7Kt774f/pHenQvfPwj/uxv3XjP+WpFnvh7Fzdf25rszfZ/7bmL/+ETNzQroQnh6aA72MLxFg6HPLbNlI9EO27brl29OO+e4yft5E5T2431vfemvArAbKyOMLPVgdDKWRlfeW1zgDoovPmddgodrk78pbeu1g0GDQ3feKPyk3FP9z+sd57J1LfuDFqRQ6MCLlt/vBmeAYcCK9rzsMI1jPhoa9jDi5v26tm4CFhBkIAO6BidqFVlgBU+J04Rp0eVFIoYQVH0cwZjsnjSpX0iZyO51Bx+LkRf/ijzQTaGiTkBMEk2EKKwNZRWOpTPsBKxaIBSiNZaqyTQgI4tGOwEaCWym/Vds7E+uH7NP9jZODhcebKz8vDuOaqb7XPjM5sn/V7Xr9vFghWIXQOywjHt3TRGS6xGoGCjIRJz27VV1TOFbRcLSwaf+8HkzWWS2dMpFILcpCac3KQnXCTPIEiePmIiiCiwMLhIJtGE5F3HS5ETKwvyfjuEaA2DWaOwIcM2iFi2lO0A0mwTFUosqssNVeKBAJQWCTESUNc97yM4RVEZQiFqWa3CwTiuSfuQVdgNxiq6EWjdoJZtNx0e3To4fK+sOEhwc7HazamcSQm3eny8G2J/srbRObBlgMW54MPV119VG6eHs3LrUv/v/3eH731v/t4Py1YdOssAs4Vw8BDP8AHiBUTCxnjXD4N+de4ij867jTN2uI7hsF9bazS04o/m8wefdA/uyMEYs32JzM3YKlfKtXq/dXZ3+7nojwsuNN2CjG6hqmCuBWkk6xSLBIly02MGsQ2qLKoL4R5zR7FRKaK1Vkie7Dy9uvbqfDaL4/cL48+uD8/Q1B48iNOIiXLHfh60UbNg+/U/+O9Wi/7x8e59vgRxr73w7b1uALFTWzXBoRUOXESiSKLRqpHRaC7BdZ2LFggK9iwq5GvXOnfSjouNQc3azYNOJ6+71fq5lcoMm5fWtn/vD7BVHz+b/vUP3rt965Pf/+a3jp/s2Qtb6Fp1FAI7awGhjLLkWSqZLsjni8T0ywOS7B9YrSACNtnPSjQkhABEJQJFhC6wknZCFBbt+NzVK5fffO3unQdlKSLeWgoUkJ0YJUWWmSztk0BiJW1xl0rc9NiL5NJIYDJBlBENWwGQ8k9yYEjSuKfYsmV0CZY3S4IzOT17KWRAsmAQyzK4fC9oGUWQADFKgFiyrkkW0OkvMqc+IMNOxHnPipwjlL4JkyyrksWeZlVF2kvJMus37aYScgXNEcZE2UdgmWWQLJnzsJrvSv7855T+5Jcn0cjWqu9gv8mX7ze7J1XzbrzzHb4REJbf8+knShAiAxDNnu/43HMDS4oAliwBqBLZxJnRX/qa6YJLQJfgdDVKJBps0idx+upJyiykIYmcNXGek3hSLHGhUhh2CgdYQz3VEpWaynBFUgNOuc9wihJSC2rbHU3i9BFveTx75/G/eXfv0Y2rX99+8atfPvN3Xtp9997ev/4XfT4XPnzv8rU//Jmf+qsrh59VV9sj02swmwxl5nBoecx+bOfOLnphv2WvB73zK9300nMv3nv37ouvXTuP3i9+9iMA9vo3ZNTrnT2oOJw8vX1r56k/nDcRjQ+tFl5My1yharkWN+zcysWRApORb/o1uvE0jmVryN53DO/ErMTjiCm4FNHIdiHgnmsioZ1U127EM3194tWCjBjDwQpMhDPapDOcAGCSxLcnRT5yeSGk+bIXQHAqg8snORlMLdc/n8vkUivGSI9SWv8Qk7GqQYlJhAWR8wOknAu9gEyIgVWIOaQpKW2wA0sXobFpJtMJD4c7o2H50iW7f7xulT/97PzDhxfLqlnfHJ87M+vV86JsKSCK82AwWWOIoWIsC6JkcxxtTmZsGNl4IyPVSOmlTGSMSrDGJAeeLgYAzGTYhCxwzql/pyRpEkQCVDlCRaITVeUuR4UAmS+jhJRCHmO3zHdRImjKLkxaJMrO9aTKQgmyye7op08rQVWZNGFPXReYi6UVkokBioLUgpxUyjVhABkAqyTDyOsoB4t+2HvOLWZPf3ZBHzh2JKHgoBJaLRdahnBSw/XKdQo4GGxwYBWtENqK+ufPdidwFfOJ8daO3vwuXv7S9O7PZs9ummcPaNbEGATKruDeiAZDWl2zo1XeulSfveQGbMkHON9Cp+P57Q/kcB97k+7pw/7GwNcr3vu4Qmv9c3rzZlHXrYgTmgZMX/1K16GUHiW2HwHKKYglG8ggEoKqVxgmqyCe98iAI2kQ07faSZh33LfWmaDBGmbQwvkvvfH1/ZtjPLovftYsjtetOBSWre88Bw5tYE+2G1yYqexPd978u//Vwdg2bnPSOXXVPDppgpyIdkyByBvpovXQepPm7JhbiLEgcNDOQnEyOcfyKytbqyNXzI7aa9f7569sbQxcvSq2HNXx4b1nT9/56O13v//aG29+57f/4PH3/tx96bpGkOXCVZ0PnUjlbOJALgk8Apg8znESpgUmQjLZSb0KJaVLIDVJ2qkCkIBEQuxVvabrOvEqHWDLCouT8aO3/6a+crnzbWmgiJbUShCOYAhJUnHq0ihOwCFEtgyChMjJhzVd9pnb8zmBhLJxYrLaS815SJEC2fCOloQMSeQuGGgMwqwhuSFK3uaGZTGibHN3urDWVPjTfcVJP5troyrAUeTzzjpxlE/DhDLaQwQ2p3wqpLVechYCMrs4zZaiEgKsYdVgLCuSUYdJe6/k3qhLDwwk9wEAORMmL73zLJ87lqCwc4SRHXyxu/iu3P+0OHzB713gjS471qvmH21icCfL2bSPzIw8IhOJIJwsMyjHExGUY8aqTwtzWl7n65sAyib5UFLWCBViITWq6WFL58oSpTQkJlgVQ2wS8zOyZa6QADBUKAkVqE/aU5RCfTY1oxSpgYqkj/WXrg6/8k92f/6OHNxZnz5b3PvJ4f09xmzxypUrf/Td6c83/YOJLHr23Z/ya1f313zxxujSX912crhh/WBE9d7Jhsww6enE+mnhx/M6YHbML3bNa935m7949+3d6R/+F7/VdR8AuPXeT8zVr1IsNi8sbn1yx8wieaMGhdPIxD2uMA2YzHl9OhM1e+0j37fz0WAF/l5nsX/rzhvffv1gfFIUKyeLWTXbNSaILAKChzWQMBdn+r46s/XK5QMDrgsTFV5YWFQ5MJRQWxFBB6ADOsACMdXg5DksEpc71mwbrks5QMKDRbI9Bae2NfXg6Syl/2WTm7plU8wEkQhCACCIhKjRREBJAEgUkAgsK5Q6A1VxICkKFg0Ca0uNwYgPPnjiyWhFXR2/PNqfjuu9vdW9p1s7D1C7uH7mYGtjOjrT2CJEXyGgsBzZxEhQZ20IAWBXGja2C92y/csNSeYyRBTWJIDHGCZTQDXG1DUnS1mkUF7Jju+ImXWigRRK1EUoIlOagVPZlCUALBDLJCnZGpnYkdy1BGmvQ0pZnshkBAB1UOjnXI5MYGRrVYTYQEEpbVkTJYKVCrBFoVIpBsBQw4hkA4NVf8Y/WcHeRTl5ePTzAd2vg4EEgyBhUQ7Pjr2dYjp0A9s2ZKWEfbpRKsw0+JdvvLh6bnM6bbk2XehsgIwnVBTutV8t8SaFg4WfUUpyK5nKmhyDhYn9NPjxfvhsNz65pSeHFjTZe1Z6z5BBPVxcf/0nz/Zm09mxbzCW7SvXrr/xev/dd21VB+n4a7/a9PtlEDYxKhEbhsYAWzBA7aJjmzRbkRABz+BkbsaTnu3Bew1thFN2zDPALTH5LYqCxrdffv3LB7jnH/24X3W1WARUXM+nbZwG2xppxOqDR+GVa7tdhX0J0ne6MVGV1rrAK1Jxj6Ii+Bh88I2YRYzliFryfio+BHjlTrlb1OjOX7K1ufK1t/jMc7xaWOO858O9/bA7q9f1//lP/2/bF89vDy6uXdz++lu/uv9v/93g+ef9aMuenLTWFQ3BWga8l7RqSEoZUSUEQ8I5yy+NcFmEqQlhUpu6xuTNkAz0OTF6ODbNCSDMMbI3tjefH26sDVY3R+P22Fk1bKOGSKl7VYJaZSVoCvdVhWjQuAR5JOGUIcT89Kf4ESCb+jATkSHEGGMaQpN3g7GGWaIg+UchATsZJJWgyukFRGstIMkXV7K0Ni2EhJNhRq4kGTzKjWuOzl2W9uwhkh6iVJfSi8y3Qc73MYkAAluYjAupcqpklPoPKBBVhbJoOUUeIPszalJVxbTxo+XA8ksgAVJua56oKdU9YbCogT2h5k26cq/d36+7H4Vb/wCblF7kKWMLSU9FedKlZL5DAKky6+cMeVXW9HvmjB7n85Aik1K2MZNCEQjJFChNF5EZQGQW1bisvvk/SoFqsMyG4MBG1RE5EQsqDPfUMErlAaOn6LP0CCustWifaWirrcoOC+Yoc/uN33/94FPWH0/PDE3bzNanc/fx2+5n565evHR3+mCloW3etA9OZNHz1y8//evvnw3TPiajvd0RzxbjzrYVnk1lj+o5xNuI+eCijn+008Mb4u/+4N+/3ZoTANQ0Wn3cuRdsD6vBda2jiQJQSysuxvuP19zCD9YORoejoUA74xbSMM8OK0tYHUwffCx79vLWC7N2vpg/Wwm7lmih/YhQSBERgrCIH730BTozaI66sNLjELrICLDCNiJGoYKlpwgKKfO1pVANiuRQAhFhhSTPRxBbSm7JCXcXIJOaRAw4SEijMevnqX/C2bQVmrThqXPSkMqPUMeIGpPplgLWAEQSERWRAjPbqGpNBAXSgkEaPaknZltykJ6XeTu21k6Hq8365uSq74/Hxd7e8ODZuQePQl2ErXOTrfMnK0PjWRBDSuZIblkMMtzFIKf4CWmCbjTxPAjGGogmURPnrVea5ilFH0uyNmUS4RgDshaAUiJ3hBLDgthaSR4cqglZT0TrImXCpAVXpnKkxBLDgFEIREgTVGdBQQjL2BFdgj9EHKJYY1WU4FSZyRJZVSdiBYYKgYPUygPCADIUuyLrYe8MdjZ4/4Lfi/Ob7XRuPEggAeIJw4NhFevaDOwaY+hEiUNHPNl8zk+mmxdX7JlCjkSfhbIofOxELHe+OBx3VkSstUMYBD/pDp9hEeNi7Ce7NG3x5GEpXXn+8lO2Xb0p1l3cfk7e/2k12npw9cXv/+1PNnu9X7n+5R/d+cXB/rPjh7fLixeAwvu5e/3r0xde5KePYuGgBXEUVXAWcVnLWAjQqWoKWmFKqFgURNEYG+bGoQSVHFm0AIzAAZtq2AiTGplNj9947bUdf5v277W+i4fd9FFTodLWy1RX0LNHf/G9/ffv2G+8Wa1t7s1d8GxOYqV2Mt67d3tn/Gxva+Xi5nCzjBsrcdVYdf31Z1dekXlkBzscytlhfzikulxZN4IwP+kOnxzt3XmmnX146xcPHt96/fW33O66tb1vfuu7pR1e37u48//+76v+mfLqt2bTXSoKKxotAIlgQicgm6czhvjkgCMQRmAIJX5vptayKgtFhlmismnKjGBwDFEFkCCBIITAoNKFp7f+VsNL1flznaY3N7AqbAosjcnvQdNmLPlSqYCp64IqWVc0XQeNltM+i4lJU8tIENUQhBnLTKMEgNES0pJTuyhlSrmnIhI0WtiUPJigMrZWVCgkIiUUif6Uq3zGezPvSohgUpPNpwNelOU2OW/BMzCWZkMSFbIsEYZNMroIyb1LNdt7EmmMhvMKMapGaAGOVpPZVwQzOF0TGUnWtB9fTp2pvRDkBXnSAqVqGTiYQGpFpLTure7Kv+8+2Hfd+37nDXNuGmFUQEHBTC7kqs45+UDT3M+ggoWS3Z6CQZZ+qRgTGckGRsvFZR6WmMjmvUNad1OXIwQ1MFg1Kgkgoh2rUeRhWmF1+fkZluAIpahVp9QDeipDQs9zvx8GczNgs15514zf/fHANnVRbL98efbxT6c/fvvCtVfO7z/tY7zoYcOLfu//Vw8G57du9C693h42Z6pLdvB66cfdr/9O/51/WR9PKm3sfOombrHfxuPCTRBnXbdypel/ye+d8Hgh/PrJxJw8+qEd9QHoupQPP5a6Flf4RnTu9IQZxmE81J03v3ypGm5Ppkc/+fTm4qUXm67ETF2QwNJUNJhHszv/2Z/9xR/+k/+mruzkZKfG1KmrOAbLC+8XruwG4iY0uHLtIEyqYjgHYV6UHWkfGqCl2I4RYIOgMvBOEHNZzJWTgAWzUxVWC5Lsnc2JK2gBsaIipEgKJirILgHgdIwTmymyKhQhBmQuOyhGa51ICFAjipAEsEYBYkNg50iCWFhOMLOgZOHCdgufFK+O2EfJ8ArbALDv2IdWtF0dyfr6Xoyj4/nwaM8c7NaPHk9Hq4+unbObo2Goq2ClA4BopYneEYNYIqzJciAiFoWzzocYQ2BbKFuTBEwksEaCRBaWlD6omognAiJSUkkydsNWBJaMqCtd14VkGMfMXehUlEFQ6SAEZkHwkZnJcIZxQijqWkNAgBiiqJZMjBGkzEYgEWqQ6Rvxc1t7m2dozdOOUQ6RxQIVoYi2X0gf2tNhOBjg8AyfnOfjYffY707tUycU4QlB2FOYdPVWLSGw23VDMNfiyw6rXg51e52e3Nn58H8YvPCd0FsPc3Ha62QmnNaPLbMG3/WHg6e3PzU//Hc9O9Aw6zNYtL54+c6Fiz+/c08Z05OJtO2lG6/duPzivBz8+MP30fkvvPyync98UGF96crL7tOPOkQenZPt5yZPH7mirIxtEW12tU/OszEsxDkbwBYqEGGIBsstxJzCj6LBLQYyZ1SIFgyYEUdhQIqgIRLYhqeH31k/D3/xwfhmW9uijt20ReVEukqNdVhpDk7Obl54Og71YKDR+XlDIpiYD//2venuo9v6s9Hw+auXXl8xw/1PHlzZuLL1O3+/psBFaNspFkG0CV5++Bd/s79ze7B25tHjvebw6Vvf+s2qt7E5xGtv/kYbDq9f/j9O73/c/fx7R83e9td/q9k4czielM506kzsVMQSk8m6kRCUSJiZ2KYIr+RhlBzCKdmi52k4ZC+kNKQm0QoiVJUjiyB4SxwRhSOxb+eTK299qa4Gx603DEjM8XnSiYY0UabCu7ygoZrQJlZg0fpEBQidGNYYYzJxywofBVQ7UVJYMgYQUWKoSljeIcbaEIKGTLi11gIUQlgGhSoppAtsGJq1rwnZTq7Y0CwForRVWtaYbJCsv1x1lxASERL5EmKYQOQKJyK8PEWiao1NOLOomvTp2KT5IYOrokLKoBCCtcmPNlpr9HRV/MuoL5Z0rSUIq6crYCCtFU30xlRTnV9xm5fna7eGx5/g3jU5Y1GogQaTehRmUuTiSnn/bJf8MAuwKAM2l2FYgJXMslhSTl5blmAsm4RkQ5SmW2TsORFYKCPBhOTll3M3MorNIAu1ylZhYZULaMWxp2qFew59YFDqsJMNOfjhDwY//3cFYoVx+BmXK+e++90vbTrZG5brP9tfwcnMLWTjavP6W/v28kEzaAcVtjcfHe2sH09eWNs+/xt/cOa9P97eeeBl+2nztBxLPPThwMa5dtb6E78/mg4G/fHuY1x8q9q5FXkPAFC1tWJnTw7rsHrWzryOibkMUzl/ZX0lbNz/8O5LN64Ow+Hj+wZG2LOcGGdYK530g93jpw/2/+x/+Dd/9F//wzqgCpO6sAGDIFOGMVwfNs/OvfZNe3nYNK0Ug1AYaQEPdIQaEhgdKIC8ILAKWIOqA3VAQQrkIxclPRAk2fNUYiLsqYIYaaEtmhQEyX80/9DSiY/Za0UMLfNLVEEcJYoKkoYAiYYsAAefDbnTPklAbMDEUWCCBMpm0wuJpBohdBoOmFWzRFEQona6Mxwcbq6VV14oDo6nx/t7ONwZ7LvBaCv01+b9tZNy0FqHSlUWLATtNCbD8ChgFTgaOBuikoIkgDkwRyHVaEhrZQ/tWAtiqwgaNJ9aoSTOzJHeREySLdNtjFGVTHINSTbRxoiIJosigmhWLjlrNYQYExytYO4gSspsk3TfKhEnvBkWBOVT0EfAoiyqUFKxyhaOUKr0tCs89bhn2lWM1+VgRfY2yn3sTcLjrh7Xbddxi7iIBVlTMTrFKgvLwI1H1npnPW8ctdxubJ2rtunT9w7v/PHGd/4PndO5j2VRcRciushG4MkGpRaGLHPP2oVdRZjyc5ff2zj/3t/+5NqlS1++8oW//Pjdgza0e4d8+eKt3ad+Mlvf3F4p6r2FPz44vnDp6rX9MR9389XNjX/wv3m6P6naEBUhCMwc6C8NuqOIwiggLGBDKqTwyO178mED2HIIrXpiZ9uCYaUkMjRwbjr3QUI59YYbO5sujmcvba5/rb7wr4+fHI7YWYSJMOvMezs/8vGl5wXl0x/8h83dw/prvz3cemHS6PmzG5uDzUnY7VVrz1187kxv8Mk77958+8MH9Z1/8Lv/9b3H9+7ffl/DdPfZrbY5/O53/+H7P/1RiP4/e+M3Zifvv/TSV66+/toVGa349ujBHX/rI75zSxAGr1yvr/9BA3c8Pi63hoWc+C6lS8YYlU1HREZz0I1mYFEpMwwy6x6Iy+VrYkonM5jEcpAM3ebrNQIhqYcIHdQCMn2wM7hyTRHIcIQSR6saNIniUu0KywARARA1cLKbTtI6YqgkXFY0aowpyzPVYKhGwBILpUhtSWQMXsIryW8jaACRiBjRwAhJ4wJAlKyJEEkco9Q7J7tYhhCyk7Mq5+Q8SiGdDIGwZB62JrfMbH0VRRLLiiHKElXEp8KfZ2n9XGdriU5vvCRmSJ0Cg4QQQkhelelD2f02s+VS+V/Ov0vzDV5u2pd1GR5SwIqyMjiQZ/46v/TQ//TQNT9pb/0mv3oSO4uStBM2CYJdrpoLEAE2/YnCgtKHrC7/EMl9Cvz5PzodyXMPBA255ibBtyKAApCUBgEIyGE4y4/mT7G8iYgBB0uooD3VHqgHHTIPgVp5fW5rLrvJy196YfvV36t2bi0+eve4v/mFV6+VmO0+eLDf8ifP///5+rcmObIrPRT81trbd3h4REZGBjITicSlUCiwWDcWi8Xmpclusu+6taTWZTSyoyON2ZnHmYeZnzHzMM/zdMwkkx2TnSPT9LRarT4tdTebosgiu7pYJIt1RaFQKCCRyEtkZISHh/v2vdeah+2RAFuySUsDEgkgMjPCt6+1vvVdXrm9PPr6/MP5wff8RLZfubY3PGtH1+OwebzXn59vDWm+6Yob/+TvDX9/dvPO4Zs3v3Jv7CwdhtVJgeBpA/NZzsutzdw9rDwyuf1b8eTfAwBaMRvXt5ur+ydv/PGBty879GOBga/PpndXBzt5GJ6/fzj49I69MallMy8pNk3dJ4uQT5vVo0O2Lm8cW5ofH7rIDuxcVUo25MG8qSZm1H/hxaWE3Eob1FqOjuGADGQhrvNuJ6NqFGwEZk0p0A4kggGUxUjH/FWiTvbf0fUEytr5fivAT9mfXQi4E2mIOGWBdawhaOfmpomMaBJ9MwXOY32ciaFQURKNlo2IGk070UhRABgkRjEUmmDHxCHQKGJlWNe1r0GIk1G2u/2FEPZn9ZFMD0flg+2Z28m2w/jqfGNjbovKMriWIBxqeEc2kuE6BCsQJwZEyiK8Jv4RrAUkxogLa2wYsHZBUDCWNACm89AKIQC29aFLDE1iTVFiE72IhH7Ra32QGGzmIGrZCCHlnLJhbcUYE2K0xkIERJICwzto3wiZjkBB0M63g1WNCosyeoIc1GPuk+SRCmzobCTTCU4nerTR1qf3G3mkvGReEtUgD2ddvr1xdHpsLrEZ21rr0TOHNbh2k30MRjsFFo/qAjsvfNXsbvj5zLTkgyfLhjKoT6GLEmyxMSGBSilgJneyd/ntH/9VsTG5fXmvPHnklzbC3NjeyWopRSE07A+HOnzj4bvcc1/e2ik/uHdvsPnS176lNF7N71kXrNhAPdc1iIkjrsoCGBLhxEY1aacWrHUhxBBLhovBWrYsBK0FOWvOIQ8iVVVbx1yGIs42bAW1DQ+OqvMNuN99ZutH7fKDem42SUSCYVs1NJzcGPnB8aPZ6PHj6n/7X9qtG+bVb6z2bj7/wldRVo5Hv/SVb80+qe5//ECCDLPis0fv/u+//6+UmxeefaEoRjdvvAQ7uvG5z7/wyq/u7l39djEqzPbRd36E05Py7LzXrHLOwqVr4fTYff03Vqzx/qndua7Bt2idIdFaNKQ4Z0VgCLPp6AXd1jEpANdriiQ61+7OqGmfiW4L0v0DaSVIiFHQihCn6iQ+L7JM2uPP7o6ffab0q15yhSRlTT24KqJiHb0DVYlEkva5hmGtDSEmxnKWGY2d2YauZzwlcCQhESE2kiqc6XgmzEwxCBEbY0RUFK33ChjudAWphhMYUQiIFzkpmliUlBZdDGFiBicDPO1uLSLa+TSKiKbNEJFGVWhEtGxDCNuXLnnfeO+NYVWNsbN0ZmNCiCoCw4gCUVawSJdzTMScjBFYNdnYrkPjLijQa+Lqeo2bNtgX5iDrATjh8dbG6JlNHduJvfSFsPemO3rPPXoxbF+hKwt4S8YQU7TaGbOkFsWmcquaqq8FXPpA1ZJhMtCs+zQSbaCTw3TfG7XQJ2FXDO/QOoYKWoHnzgw8EPlUetff+0UBtt27Vc4gGSgHCvCQtGhpwuKcceWOq6+OyJwPP3CjR7uvjTeLn01PezjtO2tz3rZ6H7de+ejwdhPks7euvfrKaeVO3v3h49Lncbs1Y7f8FO3D8+Kwvzw6mj++1Wbv7f99LhyNZ8fH572qFbEHR9fK6rzuLbk55uGI5esA4P/U9iZVdb5XbPzzv2P//D++8eCdEfMzyxeu3T8LL83pxu7+vffem5VXfDN2iyCVaM35gnxRbISTLVNsXb10e++lMKX60cK2rGrJ+hxtsGJD7Z5/bby7G6vARJqpVRFjOpMSS5yWZwxloUS67UD79NQZgBWdJzGvyUMgVV271KSkBQBIlkNCAAuJKCR2jnPrXk4IPlXpRAuJSimGHp30J0VsogPQLrrzTj8clTqWtAgMSRQWCMMQi+H0wPykzosSqVAEO2aQxCDR12C+JHZSX7t1JlWvvjc6Pxo+Phg+wFaxHUdXyvHWzBalLbwgN8IkLXkWyaJVWKWEnWeRg8aAqInGAgodDN+5oYuo6aKsGQxOKRAKoTWOlRhWElyvV61WDDBzbONaYiE2y2IUEDUhGJueGEKUzNrWt4YtExGxMCBilZVM7DiaBHDS/pEQiSVkggwWapT7Sn3SAjaPQ5QbOp/QfAszmg2W9xc85XAWsRStNefetFq4wwWPWSNRS845mdaTa2cLOd3F8HXMZ4tpee2yfumr9aMqG+bqhb3zvg7w1mrGJkTLcCIxAIYL8SWu7p9kw2f3n7k1vOK499b0aDY/2b3x7DPUOytXDw5m29vXv7J9/f2z+fRs9sKrX5xMz97d3jKD0fD2F4+OHilzK3nOrOqFuitHO6SFkC4mhXTrLoA0ihdEJgvxGbeCjBSEvrCoBCjEw9Xi5uXQn9utcz+oa0u1ZILijHsbefbt7WKX+bsPZsPdQV0Gu/C8c+P27N6j/OCE3WbOEu7ere/cLS/tb1998ZnXvtUUV+dHKzbx67/8G/NZvV/s7mwXv/0P/z7HGgZb1m7sPVN6/pXqi/Xjewdvv92/96AM7Bz7PORbI1qszLf/VvGVLx3+m3+PFT34D3+4/zd/V3QY6llvNLSiyQSdOZIaqKxtY5KPTdLcJ4+kbl2UdP3pFCYgIDGIsJ6AAQCBOqQrMWvSKjkwy9nJ4c2XXgqxMSHAMifXcw4iUIkJx0XnYQ4lIK69akHMViAiypxp59SUNC0djT8xCTUVZTLccZESc6kLMmAmwCRehYhktCZopHQeUYIaZi+xIwSjU/VrhzGv5fQdQrqucWkMWN+eYucSSxAIhAzHEKy1s9l50jxIylODptsLg1PcQxdxGCKBQrpFAiAKoXUuCyFYNlAVEctZAsg6XmX3Rli73OLpz61/N92qPmbgwK2LbiHVa/zcx/XZWZ++Hz/+J/Zq1phIZGJ6aE7bfYVN4y/Bgax0ZdYRLCzDAbbLIU2fg3uyAAZABqrrjaTvZl0EaA0IwTu0FgikHvDp1Vg//0mBdTEBW2sAS9JT9MnkpIX6XHhCceD3trBlQzaf/uCtu8ePDyeDuD2RXX/o3DyfhVBLNED0m9K8pfu3Zx/u5yfzf/3/yrGxiYJksqAtQ5dEl4Ij6GHEg9oUw8PPTPPeQbnJjysznX9h+ADLF468vcf9MNnYQW4z0+w+A8Cc7Kkdx/Z2dXh/azj+rW9dP3up/fmHb3/47ge+ufqHH5zs35RZvlOOn+FTT6WnynCwQURm75/O3/nm37u+sZtt2p2Ddz5dfXK4MzBV8NaTc/AaCpP1X30pBB8FmSWSwEkjZi4im2kt4kjojHROGiksI7nGdHT8TkR0IWLrMF/pXM4YULkYdKnDWNBFXzMhhCCagu5JJCa6vKCjC2qnkEts4k7spwJKicTJOBYUEKEIJBQQRMBsBNHAJJIeAVAEgXSxiAqwgbCyCBFZw6S6hBp4W/Oozl+fF7a4elAsHmycH9mTk9GJHfVGMrw8748XdnQOFZsrB4rCIoYtsShaiVEjQ6OSJPpzFAaH9TYkIemAmpTZxbxeU8YkGk7OPTZFp0hnVu9TqAqxABFqMnacBZUgMd0cGCy+FUYSXrJAWZQpwYkaha37RfjIMEOFiYkMJFMYUqNq2aEu2Pc15GhiJabm+aMZ1xwWrVYSmYvJZrlabbNDLRRIWyVPPfQ5VKY9ffVrv11NmxNyO7/zt6sTr3Wdo1AGYHPXFxEvTZRoNHjbRl9bYiMGyOVsdXv1Qb9eFoH+irfvH51fK3Z+qeZz1/5sXrlevjvebIN///TIwm26UTl7aMndfP2LPph6PiWy1uSt1hqjcJYplLs9JneyDRXDBFjDGjXEhmyWQL3MwofWkKrpBVEWiVBDnK3CMMzy8/N8c+Fz1964tVI7ny3mvHDYmIVVcb145XPbvd3Dj2t///0HtnHbo/7Wh29/f2uBxsS2jQ6jQkQ+PTr7+cHUI8+LMNyxbrLff/Zq7K8+fTR//3Q7Y8Razk8rbR//4R+77evIx27/yigssTmWvUm5mF75H//J6d0D28zdczfqBx8Md4uj//gv977wreXGVX10nG2MnW1jaAjMMIRWgzIpKF38QrDJ3HR9l4/r/B7pmD+pCOl64mJZR8hFRRsRJLYSoqgGgqhkQuX8bLuwoa69s9KDNIGtDagRsEataS1UTyzCNcmZiYkb3wCwNvXEQEIq00HFunZz6pwSN5cABZOuG3drk3OUMCdToFTCu/yhdKeJIorOjySRylRIuu13au9JjUI0IqYldbr7dI59ApFk8LSujoBGsZYliECcy6JIQpcMk/dRREXa9JMyEzMH6VA36JOIhBgjI5nuGjCpimpSJeOCBP0L0y4AgNE5+aa/CNCMIEJqlZWFVduQ29GX/fU/ix8cZvEn4eBLfG0mtSZLM33CsUofKyyeOOHZThD0332n9QKXu2UFAhAJyXXUAx6wyXsO7AneIXLnpSkXt57wNATNsGKUepTKfMwjcso2KLrs1jZ/+t5f/eCdA6XF7kCe2Yu75jyPpa1rTBHmTDUogG0u8Cc8ul9tXqmmmPCg1z6Mam1r4VcwlQwaX3jbC6GQugkcQlCtVryqrwxCruWXzM8j88Pp8ebu9ZPD83k5Gu8/A6C88evVsF9OP/vu9xbN8ZzIDm9t7N3e/e1X0Z48eO+dD++8Hazf59uXYp45WF5U1cf32B3v3c6/9FvP2FHWnI02r22d3nsYT88rhfRcHqnmsKHhTKSAXaJxZC0kSzTyp/3akxvq0y0XPUUP0DUYsrZFpTW8kKAtSmcPnaMidcEkyeA5FWWosmqIAKsxIXawTZa1batEmk4xcVIBEChdpd20vZ5qRKU7RNoN3+sowc5O2aQtKXHaszJRF83NApG8tUJWmFO+rhMTVYNRQfCKPPT3pxu3FuPzXnua1Q/6J6fD6XSb3aQ3avO95XA4tVs1k2dpyFuoFaLIKUaayTJDEDoO2pruwUydEbShdVJwFFhrQggdSUZVoeViAUodiVrKgigUJrMxxCy3ANoYeq4X0PoQAUA0OeyJRmG2Hc+ElMmA0wZdLg50ehWYjZBy0nMkjAKAAIFUhMgwWLUs50ScfNiJ2fV6o81RKMt84CIiABUNgkiZ6/HzX/72h3q72F+g3ukjrPJsNa2KmCPdiESdyURbiELFcA61Bm2wRVYuAb0fslngx2X591740vCnPwm2rK/tnx0+LrJ8b7wvLOG8vPTM7WfOF1RszdDuq8ynJ+dltXl526+qHC5YdoDvODOi61tAWkSqIKIlSv2HMBSGRD1ZqxBLgoSbSttvKzcPeTt1RRV6nI2eM5evP5a+llMXK9sLLNn1Z4rp3tbVzd4VyL9bTa0rnsE5yTufblS57zk7LUVEWFDx1kvfqirhN/9rfvJZWd4Pqx/rEr3ge2jm3DgRRe6Lncs3X9/4lb99PlttXh7enR7f+Nu/7V/ctY8ren4Di8Oit3v0J/8yrxc2YPjstflrr6guMDEiKD87HT97qa4XCssaYEN6BQEBCXWi0KQEbaXzZO9KHSfCUHfQO0m6KlSCqIgEQgREk6cVIBIB9aG++Y1fns9nMbSWjNjYIBhhS3HNkoUqMckFgTaSsOX0WSNJVyfGcJftRWyZ044q2YaKhCQpkKT5TVW589cka01aySYrD+oCH0SZL9y/rLUSIgUFp5NFyqQxzamdQySpKimJBqjRqCnfVzSJEmSdZZt6WWtNStvOrAUQQgSDhQGN0DYE6vSCGqNKAIx0GuiO1UWsAjIqCssXOceiQokHffFkXbCukkxKk7NeEpWt781EPoaMe1FWgDWksGYZ6xftjQ/rBweFvCl3r2OroMyT6Wz9u0Vvt+7FehQGW13LceGgnTqXqAfqQ9xTdRPo4LwG6gEHBMCDPMgC6VdLSorGUkxzv3asacXaFZEBtsK1U2TqLMNpm4MH5Aa4tIe/fOvtOx+e3byU7fTzsU7H7Zlr51xHu8h4oe1MuBIANULmQuD+YX312upRi6VutMPeKo+oIvJgKtps46qRWMNmNkSrbWjVDWQwHjmPE4mQvsf1yZWG2/2ivjqsiuwYQHS9KubE4XxoPOthkIMHD+/d5Z+N7DPjyZd+Y/JrcA8Pzt5/5/4YW/VcFsvl7/zzS1vZ85cn+f3lkg9G1y5/iUtt7ldUAuMc0YZW+qCIkO9d4qHj0BoSEwNM4sFjPZ7+tQbsKeV1IkdAmUg1Bdo+BYpQ58uouLiIaG3h3UVwqRJf7ImVRZWsYSKJAvukLKUYwKe6QRVJ29EIQILA8vrhFSSkJFCTrmMyrBo6SwFgna6VFPwUoqR7LxCByJIEQyHx+YwCzMmsKrQBWkdvS9rnwbVZr83io6I6zGfHo/lyXDdjHgUzqQa7s3w4Z16JkERrYSypGCUiFgsYttpZRZt1ICIzIZEhk6uXb4lJRA0bUkgUaywARMnzvK69ZWYmjcJsfN0GKwQKMazxVSai3Lo2+Khph5VQn9hxOBJItx6CmTofoeTaY7KkNk6Mi1Q0IkzMjF2dl029MmREhI1pIaFtJ1tbZbUKbTC03j9w1KZ+/evfkjb68yq/+UJ8txKT9XrkCz47LQeQLDNijQhItFXqBdf6wGRiGJJ4XwymL37luz/+ES9OvnX9q8OfvhP7A7bDWeuk8p7jwmfvPLg/2n32m1df4u//+czZF1579cbNz//bP/z9F77wKvmQWwcIMbt+0SzmzArpFhainbiclKAsFBLKqiAgkjH9Xl6vGo2tUSbypg6DqiyYRWpwIJlko8tBcnXjpd0odbe+dmtR3Xtsw3h0NUx4NnvfXbV2uLdXHpW9nx2gyOPCqyeWEJiyKvi7n23/H/6HO4fVxvz4+j/954//5L/Y+Yx3N6rv/9nml76yoiGKyXC6bGzdVkech+PleTZp5dpk9uB+jqPD7xzy/HR29Gg4hvOGh1emr/+q2MKUlR0bfz47/uSnuy/8DfGtZza+hYWIUwnEyiTQVpOtMYTIqLadkhXd7Nn5vFIX956GY1FRDSrS+lYkRsQYhTgdwmgt14Lcba6qk6xv1mRyXafjrBv0ZPaZSljSqoM0SGYzIFo2mTVpN8NMySUxdqvhDq1K0b5MbAjGEJLoPiXCJ3EexXQ7EdLkm2hTsLbCOudF2hiMpEiIRAlJlhm0ZmuB17g3wElp2aWtqCggKiopoNuEEFL/mlBo33qT2dSRoHP2SKQwEJGwSkweWGRAUSUktxQlS50kXxUiwbIVDVZcWgJ0d0+9AADTIUu3O13fFcmKCBkvrYOLFCOMhUYKDZtfCa/+u/pHy1x+svzs2+bVSusicrAsYCIDNR3KTA4oiFhzcA7JgaffHdCH5qAcai5qsCavUDSi7Xr2raEeXENWyg2hBgAYoLbiewztwmworSrTKG29U8csrI2Npm/6Q0d9afL29//so7YMz35usi2P97jqhdVYaluxLEjOhUsj89iWSmALKLEz8aQuwlKEesb7fCd6Lm2UDe7PYWsMvKnqeJ7nTdMzRjUDma1eHiseuXquFBqNpHUIPdvfGGqfAYgN/eAJ9RWqT3xYeQmcN9zmpZycPfj+HYwuZ59/Zufrr199/8MJI/vSN+fV6nx+ePTZ3Xht//r23st1NOfT8+M77/e8sZWnE81ytQ03O8GONvvjweJMkCWv1eRb9aTt4jVHA93Z0bW9Bq3V20/K9MW0nBz/kljnyd8y0Tq+ueN9rMODrGUSxCgwpATblQcLgSWrKjF2PXeUdTpnELaGOe2lu42RqiYvFw9hYmWNgAYR2wVJJARLSbVj6IGTCRwxhBCUIcTkWTkQU4ykNmkjjUnRaYHbGEgb3lsNrmWjeioPNhan/cVZvjgfLu6O7VCKy/Ph5WleLICmZcNefOc3KZwOMnd2JN3uxjBikhKqgCgGSTX4wng9xXsZl6lvwCqAiFhrmCxBDCi0UTuUS9RwtapMnpnEWOUkEqAk1A7JSrN7Saljs4oGAgg2y0IW1EAZDLUQy0QaMzLHJ0fRtxZWQ0yi2mI4nC2r0eZm6WeGjSAS82rVvPLKy5/75W/8xXFQ10Q/y3hIMagiN5nZGDbns9bXlsVaFtuzjSXODPIVGesk1hi+/OV3PruPKN987bdumOLe5Wvx2ZuXPvno+sbozs7NsFh+dHh8ZbT1+f44/OUP58/dvvGrv3MC+dPv/dVwNBz2RrNmSq5H0lrI+aLsyM3K0ChM6U7OolBECSnPjlQSSVw0hMW54Vwsx6CDatk/r+yigTi3aWTJ0nKoJBzNaXm8svuz69ftfr65HEt1L+iWMbtbu5v5lG08aUL5MFvUIVqpWuXMSLAQI/nyk3uz/++f73/96+Ltwz/7bv3OW8//T/+Xg/vv0u7uxm/9rnyykB6dn/z5lttquWLnImZZPZ2/+W/k9F4YIO/nmcmabSZ20GHztW9UstOXeRTkxejh/bf7I6KNjdAse37UZrVRAwaRB0mEJ1iGUGdwmqw2QuqXiZLiNo02KTqFEFSUOhWvJklC1BCog2GjSFQRx+K5iRyqmCBFw6Shk9ESQCqyhqeEmTlxgpnBBirWWmcTsQfWGGIyRKkfiAIGxPCF8RJzYmN2AQbcpRkkJN0QSEUgUZST/R6DRaUsSwEYJiQB+IWPBBidlRZzQgAMM0FjYGtbicScTKtFRKFsWIMIJLMmhlYAEcQQQNaHLu8IgMbuJ01nNX2bCUwwWRY9kFAYxCAgIaPShkDsUsZEgJikiSICENO0yIzEYkWHQqW+oPPq7XJwZO3CCUPGSztxo5fba3+Fo5/lj55rr+zR9aUpe8gEGalVGKu9oD2xQ86i5IqctAByoADSB32gDy2AHlAk3k96p9a2FAg1oQZaSANqQUtITVpBVsqZqoVUIMCETBE1aTXQIDZEJGQCGGxsofmQeOzqgSzE330s7z0Il4Y71yeZbQ5jbFZYDXmhvpbKYim6pLAQmpMtFRwFEcxs42nYkDK3uvCZjT6KqQtytfNcxRkXhfYtxpM+oX/SFpbmUN9a761VdtL44FYBFIP1YavMtgsAnKvMnJQ4aT3yMPD+IIRTMUPwtil6Jpgz+uzxSWWn0nt8reD7P6uPlvb5K8NXX32mP7hZhzG7+g/+/e8P4/3dawPb9v39uRbW1G68T//5zgffOFtJb68NuQgEHKkjNiXbEknLxggEUCS6QJ8pdbKxk9Sg20d04GVi7XcQMXXG3ujI9QlTJlUmIrZxHZeU+BNMKZKS2LEKDCTESMqp49QgzCSi0rOIsCl7kjXJ36OmPVd3gUhIzbRd04ApMqBqjfXeM8Qa9q10MgaoQpKHC0vaepsEiSmrSkihLBAiCEFrS4BYj5vHG9d55HM5z5cPN87PitnHg+mn48FmO9haZduzzUEp/aUqQXKqjRCQtYSMEhU3xWSmZCRHFFVDZk1UjVFSV8+GgkLk/Pzc9fKkinDWSGiJo1orUKRFshLIiApnGUcR5gh0frPaRd+QxAsHPSbqqB0My6wgimBNtp4IoKDWEyJ6nNmD6T12toFcv70/OzxZzKq97XE5O93b2/nwuIpZQ0UmOSMTmOLcZ1EEdmjbvgQEBolQhKqzzkqrTVu2IbC1GRuLgR2NeuL6dYy3Xv6vj47PDo8dFT9+++03XF+y/ucrXJ1FO33312++3AwWzjkHq4Ph9v/p/+4m/bvvfPCj7/3RN3/t2z33uWVds+1BIkcHCcZCQogJ51IljZZYmJJDWdJfpWgNgSoCI2OwNz7zGJTLYr50rYeAeFdq4/uZqyzPagwLxjZ29o72c5st1PiMKh9GubsKmg9vfsWGnz+4fz6/yePerIbhEENLQp5bir0ib97/fn33HQR2s3nB7uTtN8OHPy+Gk+lHs3p66HpAQc3L13rXr9DQhr+6m9tTV83zS3k9NnBRhQo7rnx9Ohrm13ZxNMtcUdssrKrHd/7qC8+8rEVtnY0ABxMjONkpw6uY1CenXD3RCBEioQ7WXfe2UOKUayndWVZRCBGxQYyppkg3a0ok1roqoygkMgyBIKGLAu/y+IjXwXy0doF8whUxhrqOMjEeUtwJACgjMZIlBqzXx0SckFhdO6unMOB0jEhBposTFAERYrrnd0QW0uRPgI7MmMhORIAEYcvMhlkkCkNDICKJUXFBzVAw2LBEjSHGpGzq7LAE1KVKdLdI1WTzwZ35RiQiNmY92XBKCRQJUQiwnL6uEHdW02nTyhdAYdeC/CI22d1W8QufTIChKtlIlQtf0Fsf1dOqsG80n/xDu09iBclHoCcwwhnQh43sSArq6u4Qv/BBqsd95LSyCIaUIUyRRTWzPsvrDdMEww3Ys/QUjZqM4Cg4wwYMEUiQyMtemwxHKNg8A+nAcE9M6MnDkt87rue5hD7Px6RX7MYVY2QRqgWr76Hs6bIPTyXJeYOSdE4613guVGMdUI9om1XMytCX1dSXNQxYZexkHno5uJaiRt6gf1g1zw5d3LAsUldLmhRYbQQ3t56lgaq4fEBjCkUAwMPc1gg9S/WEl/Xm6Oi6x6RUeFEJNey0ws5oc5wxehu5W4wYm1v2pS/cHI2fL5fjYiz3Dg9Pjh/vf/GG3eL+vCgeA8Zb4bcfVtdujwbBn+a+1aGYTHTNDkrchPSu6xd9jdSsB07qelJacwGeev3/GmuA1gfq4l90Ir7kOSOJcEkpM0m0o/Eri0aYjGOUPHOhDWKz1rfKCCJJBZEmxJSPlLHpmJbrO4l2XBMYQwpKkoJUy0UQJTKxigpi5wDL3F38THk/994DnWUmFBBiSIRGIquafPu8IReVKtmpNm6cbC7z9mC0OB0up/nstAh3i8dDGV2qhpeqfHwm44pDZoTUA7AskKhq2YBJJPGSrUUUVgNLosIBIUYDVnZZT0KHGIYovTzr1IZEnXE0EgrR7QM6pUfKwVAVFYCVOL1gjMgUQBYgWc/kJMRB4Vk8KhSV5nMuKgwXdFq75sZrN+7O7izC4ku/8Ut1JUU+KO7b4fXhsNezEzc3FU/Q28t/+PHJt7+pJW8vdRhXDZd5tiStmTwsYq9fZAP2oWh8u2qXITTBCtcZcy9sXz5+/uXyrTev3/6Cy3KrnHuZBDs8rsva2b1b2fj6pWcvr/pbO9d2W40f//zjt//Nv93cH734wi9f8uMTESO5Ah4rRyzs1M8VWQf5ISbvYNIggBUyXXS6tAqrMMSRtc7VrcLGvOJlnZeeBJEL4hiiNQsfzzyvCtZttY6qUJT9Yzcss17j4MxWMVw+XJ4Mt25a4/I7xcaHO7ee9w+eaadDX0mMtaLmDHXoDYqwImaPbevsVvnWX+Q17HGY3/vXdizBtZsvfc5ctYuDN+nwo6E/1r2i2qKcgi0q3uRQFPMV5mUV9qHj+TDP2XOeI8x5fG1Ht60Uph2IsTareisFiQUqSIAaIKqkzW5qoZMep8uWVwnrEI/upk5MnaI1rotuIvKvrX9TNCYzd66/XayCqgoZk8Lb0ZnbielYyyqidj3yJoIScyLCkrGG1hmC4JR0J4azp+8u1AX4dCbywIU9BcNARZioQ7yTFVfS/IAMJyWworN5vaBJR7LWsgG0Dq0BWeZGA4PjmrGMNLMGuWgPDDMUqVEWFe5c79PejdYjydrrD53wK4SQfHYgiXAFAokIkyTOl0TpuHBrttMaFteuLF8U3iexMOtP6cWXZFVVG6LaTTN8Pd78i/DZUeHfmz98OX++DC2sg2SZ5i1bayA5oa+ag4aEIX7hvQANtUA10EWhK4tgujTFaI1KpFp6Nfdrk9fDfi25OOYlxILPJYNEVVFwJKnZDXQQnCGqW996Lmt7p9SjxWqmbj4Cxs5mglyKEaOn8DV0Zah18D1IX70LFdfQJeJcZSFYEEporZFT5gcTokpv7t1mXa/Go3Dzmh9Et9kv/VDqyXIuMz+ELAfWTc1WGETOLDQ/j8uBnbMaW8E7r7B2k7QnvOUAsLZg6g/z6KWdW1sVQ56KPQ2+Lag4ma2GagcSy3m9vetGeTEZ5+bW5w7uLqcfH934pe1gwHkmaO6Ws92t7SKsXMahoFmYf27kr7/0xU/6RROtahaUamNDzKhN5F26sK4BrU1HuzOQhEICdHnV9BRZa72t6IzKtDvS1IEnirXBzpOeLYWJiYI7skwHvaiKsEBgWJLyPgHlIoKQ/L4FUCYjRCIS5MJtPe380J3llHbIiMmFquNqdK2DggSwlNifCiCJmep61Xmzo1tfMVPs4kw7R7jOYB5gphZty+IqPFNtXOdBncvpKDwczqq8+qQ4+3Qj29ge7vjheJFvn5thndXwYsU4IxAEtpqpaKSWmYg0QwxQ5Qyc2ShiYmxD2t4pKRvTyxygoW7jWpslEIoAEHn9wmlKce2G4LUDt4iapCgzrKTCiawta8leo1hp9FzaotR8qoPd3rjO/cHJw3y/f/bg7GePfvYbv/l3Ht9/PMXZS8++MhoMett2enZuNrPWbR6X/F/eeu/S179RHmeytLZWWUbxjIC6WpydP3r04O72lcvjrdHAjaMGNyjqbHMhPUxPBm/+6Jt2QxY+V/HFxG2MXP9y/syLuLKXO+OrZjqfbl6a/Nl3/vjk8GBSFBhe+vav/O7hH/zxKW+1+zuo5o6dNRliC23VFIhtEo6KkGorokngGg1xG5REDBNbBQeAYrCVz8s6n8/hRbgAlMUHrFgU58wDyKfnNnOhiDoUsWTVVVo8GD7vkA9djTgezee22hF7qS+D0fsbg498c7ku9+vjSX2+Fau4RKgqt4T3rEvU1amd5FwFVG5UCDYdBlzP78l379pNtiOzGm24bDkYKHLTv36FY3X04E5OBY1i/7UbYXcEWD0LOG9jhv2/+y2czXQE8WLnIYgwMa1yhRd16KylfFdT13DUOtcP6IL2ui1IytlNA7CoRImhs0hdM6sicLE+1Q7CEo0AGcPO9VZ1lcpVFEEXWpSAH147mz8hZQjUUpp/Oxg1kThJeZ0ND3QpQxDWVLeIYAxfdN2KLr8vJeKprCVNyScnBhAZIrYsMf2lKIEzw6pRIlm2gchyZLKSMrM59fep/5AonLE1NoQ2+eeYZB4bJeW/dSgfd7SVtEKnbnjvsBgDZmbRqKRp6I8qDDFgJUQVUqYUgpgepLtzPgkz/v/zponilRTfZCm4BTfPZ7fuzE8O++UP7b1n2ysu26oDszUsjtQgV+4bGQbkwAAYAiNg1H2Q82oDiyGWAyyGVHYTsCpBnIRIvLJFhV6Jvqe85v5ssFH1hzZjISjEwGTGcqYZxdkRf3LkP5u104pXtYbQKhPMRrbJzgk5TxkE8BBYDNBarTOqnS57WuVac01StqiYa8ZStVReAjWki5YQMK0QfIUQREbPXf32310M2mLHlPW204l691mcnB2/XXznfy3YlmYzvwR4euzdjQbiaqwGYVmxiG6pFkQTAiCzYJ2FlxArY51pLOpLWch7dm7a8vmt4VyKFQbSDyvr/Wi3blTuNp/8lx8+d/mqfP0FZ7Kjg0ewWLTl/KyyhwiBRj3+Fy+NJq+P33r+1SWGLK7NjAZiYQEj+eIEUGIUJOebbtkr2kn01qNmujxSQ33B16MnBZifTM2p1nZH7iLVowsMSCko6fynJjvxMg0kBgJLiNaaENS6LIq4rpVWH9ogsCnay1ofAnAR6ZXgqY5/lcBy7e4ZpASRuM4QZQFUdL3y5qhiYY0xneskQIZFYIjAlJziI0lyk2oNrBIJK3hphThwEK7s1WVx1blQ8LRffTaYzgazc3fGl3rDnXy3Hm/P7GhmsTBqwBbB+pClrFHOkq+J0Z6yNwHwEDZsSDWKsGGFlNXSsiE2JjHKFZEQWUlgUhfRZUx1Riga0WF0AGm6WtNzrEoxMbYlQDxkpVqr1qiGxSIMSupVPMy3r5TTe/lW7hqcr87+w3f+XW4dJu3u5/ffOPzB7es3+4O+35Ij9Ja9K3/+xk+/FN/kL/0jx62sQt0aU9e52/roox/97K3vBpbi3nB3a+P5F78WHfzhvSt7z2z/k/9rrE9YLLN1bEKxtb15wwxs3Zrzs+nJ3R/7+vQn77xDw/gPf+f/eOezt1//0utuPHmRt2ff+YPhlXHz/M326Dy3Q1GPWIUEF2okztLLzQyRKOoBYTZeRR2sslVWZlayPmjTDmbz3vwkUyEaAkEs21bqcE6IfO1zPLlWn9a2d8q7eQgkhdGEaVammkvjRG0xm3o7s6Eca5HPXcFq8dBNDmgyUEvzs0vzxYbHcO55Pi+OV9l5207n3BhmHzJhG9zQYQfoWRk7DDAYi2xdyq5N+jeuD7Y3w8/+0/UH90xvfOKFP/yT/WuTgwrV5u55aNrGDvqb/UHW8Nz0OfqgYBWNtSCyrlU7QXRtqYTY8Qm64PQUhbMO8iGkstO5MQEpRKXzRe7g0HTk2Vpf10FiYi9DNcTQtN6wTcpDSQldIpR8JrkTHZlu+aMiQsRdAhJS4j11HuZPELWkmlgn30GsNSLSZfylfFvpBE6J6Slpgl4XsBgFhi+UG0gDMBFUAmCZk4MziVCA0DqkYQ38pkVsqo1pRxVCTPZAJj27etHornVc60Y//WhsrURIghNUiSmqskQGiaqKIAZmNpwIKrwGrtYxbP9tudVfqMrreZsVABsbERAjGYfsq3j+351+X/b6Pyo//Y3sas2tlSwC5BTWcC5xQDQgDFL11VSDN2mxifkQ8w0shlSOMLfJ1wXKKoYoKDWaN1Ss1C2lv8Kgj/BY/WK81bPkCg6nev6w+eyRPDiIs8cZ5sztMISltWDrLVtENCzBC3swWKyITeF6jeOA1jsTLXmHSjykIQ7CNUkjUkE9qAaxmrRlgzibcekjU6hjexztcjVrYq9PTD2KPBzuVKPnp9If2mKqk6FpJ3vL4/O8bPsucFX52AeL+AHMJstWBECVCyZE1OQyOM+zugiSWc5ku2d2phQrB98IBNVi9LOzkyvFKCtmzz73ZbdZAKjr87feeYOLzDCkZ/IbfcraW7stXzHvfe5b09GzwZsAFqWS+hAXlShAg6YyjHa9D1YgOUKi23igK2kXs2y39E2Xxbp5pTWKnXCvrkZ39ThVxrVGkDq3gJRV0q13mIgpU1HrWLpfhUUyazVIlAgmKyJBwIatASgR/buEsYSVQ6FqM2tt1rYhSEjaPHSe6520IB0wVUEUkAkJdCNxmevlblWt2BhEElIwG8U6ywWsFBmUYndBDrkaiT0EAoS5op3l5u7JZlO0j4fnR8Vi1qvm+fmdvWy4O7jcbFxe9IfnxtbkyASWwK0akzJ1ffDCcNqP2sKo9+tVNNhaq+mrr59SEjBIkvctlAmGmIiU09adALVqBQBHIQMK6VbIiQ8cMgSFB2pFBapQbgz72CyxPAqr4bNfOT9+KCqxbSejye5o9523f/b8V1+UPTd4bhTGYXdv961702pwbanjZXbt0Zs/xU9Oimu/2+s9I7HBqm3a+kvf+qVPjn66KsU6Zwfbo/H4e9/5Trk8oi+G27/0NVndKGNtheEKF+2HH909OfnE5u6nH//wyrXLtz7/fDMqv/KNr96v72987vLtX/saDs6Wf/KHxWRcv/5665tinIcGaFgBqOdQi3UkrWoAJeid18+WWhUWi5SREcRIMMu5m83zap55BOuYPEnkkEls2Q2wMdBbV4IbYWbZjFEhLE4cD1FlyoShyapFc3jX9alZzu3sxsj1M93wphiC/SB6w7ykgO1NCVcOy2AqK4v4yvaeW7Q8Fz2s4tFJXmjNi8Bnw53M9+rimVG1mccReiM3HPGwPrw8/bQYVid4kIXZmHOZlf35O8btP1rMlmZkrg4Hq/7R3U/3b94OtahnDQqnakWbBIEke39NeiQkZvx6w6qQC0cc7vjPyaVCL5gfqSClNWfaYKahOsusb7vaqdKZrLJlEUmxPaJiwIASmdRcp3qsmuDlLg4wpgqdVFLdF1Rdo2fcOdd2SzAFQozrJQsAZTAZo90altadA6+hWRjDSlBIFE69uU3VkuDIWJfVTWMBEVUmBkVJuaosidWpujboW3PHNW3KU7MriRCFRERJEwZ3MqduC94FmXXQmqb/9Qu1EyqISFT1ZMhFjKTK+G/LLy6epl+syh2CKATLKkxtaPb7+y9UN+5Mjz4Yls9XJ9eHN2atZuDYY+TQgkwPKKBD6BAYE2+EMWYTnY9ovoH5COcjzEeYZxosQtpVGq2FbI3hCsOF5n3JV1xkXA/7O+fR3s2H7x609+/T2aHVumdZZOStkKkktsZHhoiSF2nUu6xQsVRFscEYb0IdAmceLGS9sFf2nDPNOWOxCBzgSHOC1+gjszJDEMUSS+sKjUJ20/JQAbZFP8a89oeBRxG9o5NgcekYcpLtL0MpYHd1+6A6fl4+q+ZePVSgQ/giD8wA8lFdSGEL67kWRu4oPzdtoGVTLZeBTOE091QBLrPIjUoOm91ye0ITb7fpo/sHYbS0k16cANsIQxX4s2vup1//2pRejWU+LwYrv3nCGyvamGKkc8WKUAMrRQO0QL1mmIsmklBn+9nl/ooiUuJhKdbQZzq2igtI5qkL7KL6XrRuRASzPiTobg+JSkBp2cwgWIJYQuQOOWqzyEJODFSj1RgDVJ3LkntrFEmAWxdyTRSDCGKMHdMzGdSkPO/1Uuzp712ZSSQYa6LERVkx4SKHG0yqaiMHEoAyJa9wYsFJMRGE4CKE2cKJ+jZrM1BW4mZ56abZmvf90ebyxC0W/dnH/dOP82x4aXO3GU8W2faiV7QsypFJDIGMlRjVkzXG2FVoVWJmjSXr2DJzjEEIASKAU7CShwbptki8poSkYw+loBFgFmaGgRApKIoJJKzaasWcAyuWpSBDyPl0sNVTz7W/ORkuL30S6b3xFh/Pp9vP7P7j3/ln5yfVbLy8/JX9D+9/duuF33h878dV3H6ouwd2u42jX74zffy9f10MX+g9/2t6fZRJ9uGHfznDnHP85u/8zRvXv/KTH//FcXlAcOO9a5++/+EPv/+no81eaPOz6fnv/dN//N03/93Nl65vX7uMOX/x1786k8Xz37r93G98A2F1+5euHX33j+vTT/b+5q+VH97x4yVLDyVxUC2JV8OmqXu507ZqtYRaJe1uZjCJNiugCCGxgLCv+vPzwWzK1RIMcT1mixgj1EBlY2TGN3RjLNRKecICW20106UJldgqhAJosWjleGqOzoVDlrNdDJcy7k0K9PxBZmurYJOZzDZiF1Q1ozxIuL2/X4QYoo1ExBs0uoJRVvbmh3rK273hrV5t4oQe3LB+uzffK2cWx1cn2fkP/u1NPlX2gfNFfc998B/MF/7BUmF7tOwVfoDBs/1V4UPd0opscpGFBDWaOs6kAScwJAXvKCR0/hKSdEgpXoi141GKSjpR6ReoMneRmYkPqdAYYwghhGjYcrcZDUQ2lYGOsQtNZjMJj2Bmotg9ZtoldyXJYO2ZzF0VW3f4aZMsaXMvieaQ4grTY3UPkgL9ujTdLmMhQphMVy3XFVDWZcyAvASpkx4cECVRmI4AJYlmkbghZFTVuqypm8YHY5jZJpNdWm+8NFnArmleCmXYNQ1GWRVExlhVCSG5zqbdkIA5RiEWBmuKs0kLuTXyTuuynzg06T7a1fqnSVgdtI+WTE9BYgJbT+2vjl69P/0LP5Ef1p9c8/vR5pklYyG5as4mV+krBqAR0QhbON+S2QRnY50NaTHGbAtnG2GRvK6SIzi3mRifD46G7mTTjVo3WUh8VMe//Cy8PVu9N98+w7bro7/lY/ChYWmkthBDbBm1JZu1EHB0EssAU0mRmyBt20pOPVFE6gm7OnBAFjTjDGQRlcRGMMMyWZCFsMBCoMFltp4Vslr2hbcK5DZk1jezEB753i+f1BuwfHJUTvjygtsj2S7sCKGXhbiov+iKht1KrAfFxg1XUowsALjJw8gCG1S97Ts5CzxGPfXm2I7UnLQ+9OEMrKr3frK5NS5L+fitj66/3PvwT5771f179QNMYMbAWCT35aZ74fdevfbCN5ZVcViqtbGyxcxuzsPGuR2Y86ALg1K1UqxADdBAW6VA4pXhgUCkqrKW40d0K17BU2+cYrPT2Vtzn5P5WLKxSQW483LRXyjGqrrmYBDQRQx0O2ZmQA0ZkAolFRkkBAmRDRnLELWqCeAJIhJjkNhh4arSGYCsJ3doZm0bY/KW6uzlkgMsASohRGtN27bWWijDcogwrCykBkqsVklYgUhwArFgMYElIGYKJbIqYgQZO2ZWrm1oEFwIxZKfnY1u2s2yiKfj1fFwPrdnd3un94ZZf3t0uR7srgbjOXoramFaw8FE00YfItZ07USs6m5GqdEQbdNTK8rJRV7X9I+08SYBhEUFRhKKZpIPTiCpVQCw+h7VhKV2TuAz9a44c2OnrQ1NfvNrnx5NMWxRND8+vtts8Yu3Xz7nrJRr758f7Y4+X48Wd8tiZoctTT48k9tlsVtxee+d6b33s+dfy1/+8u1vv77/6o17H7yf792sHD+cH8RCRo43Jtt/9Id/JH5+/cZLzRxbL+28d//90bZ97dd+Swb0e996YePWtf1ezy4eHr7/PX9wL56ebP7SN4ev/Hr19v2Tx9PxtSKcS8gDSnKcRfKsNngGe2jW7UYobVNMciJjpCTT1tVVcXaWz89tW4MR2VFYJYqoIqjJ6PI+NsfCFm1tJW9np7WvlYfO5XHO4udFW9bVCcpz1hitkFWb5W1v+pf2pNoZ5i7PhRmtcCV9K2iKRkfDvRtuHBbzkpDzlMxyZVjmpX1wdMdeLkbDDP5suze9OWi3pSxWjzM+fbZox1XZX3wy1KUP8xLhKoaHpx8sPvqPk+yZ0TXbjLbmoT17+bn5wWO2mfSDD+DaaI9NA61Tyk06hqxKuvZj1+Q6ngoBc3JrFU1x19KdIxFAVSJU2HBcky0IMIZTpkJKEmRAJBKRy1wrIcR4AcAma+gk29VkRsEkIYYUoJR0eEBsRVWZ2RqbfFw1EbGf5BGlK592drfn84X33trMMHcgM7T7ctyphEGxG8SjwHRZPF3R6tasmhFrQrETFwUKFZLO1N5aG0NMUICqxhDZMNJDx2iYmKhztSEocTIGMpYBxBAT6SshZyCEIJwctBQMtobR5ZQlRIufTLEJPH8yHa/zGdKgvcYR5Yl8ef2qwEA146CwChhCHfx2b/eV/PM/4vnD4eqd88MXN1+pTEk90hwmD7Fgs0G6ARphhPmWziZ0tq0nW3S+oWdjzHrLBjPArwtwCx8a52gwsTTimS8PqvqdafHhcuszicHics4Z9ITHwbP2WXM1uYUNyCKcmJrgSThGtrVoT5gE3gf2TJF9E3OXNYR5HQrrPJnzshlmaBtvjXHOegoB0VijjsAgQ9awZ4ytuHw5zTC6PlmWZSmfZL1FtfnlDz99uHnz5jmy8/l0pls1UNpxCQmGC2RheJXK6U370PuglCEWjdmo/RmAGLeujudi1fVyDBszyJr7sWdyzoLr0RZlUG480DNDl+WmVx18cHu+Wr7863de+D9/+NknZ/5evt8f7A1OcXL1xVf2XvxbOrx695PHZRzMn3/Z331v8MUvHLtxiwGV8KU1paAkqiAVdKW6Wk/AMQh5aMtrw09+wpO+qL5PynCHheJpnXDiZD25SpSe1glfZMc/oVArCCRdq3oBgiU827ARAYGsNcyQFMmhHEVYNIXuMiOkHlxiEOZkg5W6Z00GW5qGxdQcdLB1gpSIoBJj4k+IovUx+b2r6fTjKbElxYcJS3LzyJRZWa0hTRG9UMMQlbZ21gZBZIPMCAXxwc7oxmL4TDaqiuZoc3nUrxbu5G5/enfgejvFleXwWlkUC7ux7AUE1aCdgghd+BqRiSrpbiLSAiRq0q0GShcTDbOg23FzB8gF051ujcqqgVGDAZ/p0ih1e2+yBOB8d4ic63p+Y+crfvf03ZOPXR6LfPfgzvyj+uhLv/o3Pjn85AGuzjael30+vHPY8o3ywRIndFLJdjDhhb2t86P2wZvlo7fKqzf7L7z2+a+9EGRcSfzWP/lH5clpcxSouPSt3/yb9z76wPQ2rr0yeebLr7b95htX//Y8BtBjtOXpG38Qp3ebzPefu2Zf28ppP/vtr82+/6P+gx9sfeH51WWFbe2SlQQUVDga8ExsmwuCSFSNxtq0yxMVgoKFfd2bl+7sxK3mBgFsRRClccgFNSzZ0Z4Xzvq5DCyvoJnQIDcnkdtKrl9rVx7Hc7sq6+kBsDRoTcbiVDPY20PJi02U83z5jpmXygTuM0wlGNmtyfiZIh8uan/eG9u6skRwxnA2L+fF9nj03DhuhUERt8Vn9UmO073e6kbPj/ik38Lbl3Dvze1f+eVNH++9+ycuHE6mH60wXvzsqr/6jY0v/h0z+NzJZiZlMCuhUpGzy2JrGTAGlsmSGtUIsLLR5EJFRkEqMTnEQZiMGGOeMrxDEIlBNJIqJApRMo4xyiZEuJ5b1U2SLSZJIsjU3pMIJ8GuwmYmhdNxZq1QoLTzXHNLBL5tlZnUpH8P0SDKnOyhIZ3KkEJo014pSjyZzqBpaRtAFh1GHEk4QgKBrKEQk+uHSGRK8zCriJKkRCWltNrl1jfWmg64ZooiNsVFMUjSKUsOB8m9g1PvYtAZCuCJSLObHiSAGJatMIgNkgWoUlJxJEciSQUfkG6bqwA42X8QQBBhg7U94QWHdc0XTX9kfYpmwxc3z7SxdwCLjblk8yBfLT5/59EPy2cGfzW/95xcp0E/ZK3LouSOBh4Fa4EBVxPMJjSb6OnEnI3jyYSn2XEIi56cRV4ieDFsC5VM3bRpP/2o/jjKoSEZGmzV207Z0DQYEm1EvGJRbEo/xNyKCyGPhTctsuiU60y15ujRulbUBMQg8NbWglDXYgcqv/rC6LO7i/OK//Frl/Pz8t1j/w++du3tHx188fUb//k/P3hQV7bIVXLjgkKY7bA60yyE3LqN/kk47OVxufnaOx8/dvnevGlqGFpUVZP7zE1lImxjKDLxyDDF12N4M8taaaPwiMmuacPVgHm87UNRWpNXZdMC1NLK1/XCZ2ztcCS5Nc6Jc2bx2M2bqhheefC9n9/4O2/KLc5nIfwwjotvfP3vjm9+/aNH/pMf38k//9XZldc//u6/f6a3sRxcmtejbB5iLaggS1ANqolrRU2UgOgajKDJ6lNbUFIpJaZT7LgM6Y/pako+J6nIpt+e8KmfNu8goaev2Kd+V+1KMjOSmdMFc2RNn+hoDSnGWpNNbLqjdDiVFbHWxSBJHywiDEo6WlUQm6ZtGUmIiyAx5bJ1LMsQDbMorLG+DbTmaplU6ild6MSg5F6QUonSLEpESM1sZjNGVCXDbLMQgmpkJokKAXrMUZRjE4Obm1uLyW23O9ssjwbV415Z8exef3pvWIwvbe6GYnRKw3mWVaoQtUYcHBOLerasohoEwpY1JFkEQ4TAfdtrvLeKwMwESepgKADDrBJBNkWHJckka61VXxmUsS6EDLEEMe5si9vREKGyz//GhyfLsWatrIYFtW3+g3//o5N5uHLr19/81M/yWzzekQfLUVX4St/1/uYeuwlXGVyWj7xIedf/6O6RuDiamHzD55fc6ApPdurVcv/LL9z8whd8E+Hq8/NPsrt37ryzsP2Fbx/ZIYZf/ZK/fWNv/vjmP/vd6fFH9Z2H8p/+1ZVPP5BrV968Zq9d4QrBFM70mPoQCpYNAuu8zyGgFy1RaFMYLhNzbGpbV73F2WB2wvWSWFVCMoHLRDP1K8sKkcsTysZBhNsaganpcTNtubVbYzV1uzg2B8eympuy0czGvpUYjTPixE74fmHq0SUZx6GTLDGDVWQ0LDZvPLfKJrPIM+GZMWeXLpkxxxMKj3X1GHYMz5566Meziat3bH3VNns827LT3qyVeb63+/K7339zeL+txuOm3N6dbO9i1oazRzRRtxQ6dbSzwdVyknHd+ibvC5pa7MpQQdzkzGAESYYYGkEsZEWjrI9dt/uhTt6TjlLCVaLEBIUKdV2cQtJ2vVu70rpPTjBN8vxHNzayCK3pHkJdCGLi5CdndgBtFFbpfK3AabdHzKIIUciQqsYYDXPbtsTsmyaZY1lroRJCZylAyett7bi6piCnjVSXD5FILB0XtItk4yAx2dWysBFc/ETd5ja5yqXV65Px4SKkl0wimjEpKZOhdNQBBwYQVNZPbxe2yMqcjLwSITzFOEAutL+cokr5AstK/SO6SSX1LkD3cl0A0evvSwBGa02vDhRyA1+rG34je/UPZ3dksv1G9cFv4qvzXqW93PYReozcmg0ZUjXAfIDZhOaT+HibZjjheJy3M89H1A9UcL+tm49P5L275VlN9TDDtgx32TQSY3STx8GiBVdRPVELO3d9DJxUtVk5k6NsWnaAGPK9RpeFsvfCjaEcVq0E+MCxtdSawXiwtUsfvIcXdt0Lt4qf/ujjr39pWzO31ABnv/rq5qOjxxZGohcbPXuxdltOna24bzG8PNg1G9u37h7Mbtx6adq7Vbqrj48f5EsJZU7AWbu1Qs9rlkXKLTeTa9W91QvDxwE2IBuKingAzu1PcRak2XYyradtGVAF8RwlIHdNMJZccHx2cjwYb+3MKgsvVcgwgSxXbd17/tsD3pvN/vAw9Bs/LAs3+cprP5/lP/mjP/zaoL0zL69NXeYDV8Yt0CwVNVEFpPcVUDO8Aq3CA612469IF4iSrlG5eO2JoGs5QFcudd2oXXzQndhOUPDU7vWpN+om3Scfrx8vdYmJ1bou1d1SB6rJEEWoUyvFZMUoCb5GCF4SUQMiITgyAYgxGMN27S5nDHc7FmYChyBIQebEqinQeI1SpS/OYEtrTlrKNNLuHAkkKZaYQgyiktkshNSZQ5TJ2BBD3nfB+8ASgx+fbFw6KT7f252P2kcbZ0f2fE4nyx6119xY8nFd7Mzy0Vk0raVgvAtW6hakZDJYCtxEr6TEgpC13HoWbwVMQgFqVTNAjJJR5M55VR+j5SSuMJBWKSNVblhKZSLKDBsSVg60qvVeUexvvyT7Rx8f3Mv7vYNy+fndvd72uN8UUzf4T9+/8zvf/j1+668Wn8wvDTfbjViO8NOCv2kPTm/2fRPZWf/M83r3/Y0lpHpcl4c0/6R8P/ga+QzTOcyKY5TekONL1+dXxpPnblf5fPzF35yb+fVXb372g+88/+XPhzf+190er959q1+ej4rNN3ReTz8avvaFCKtzBAtm2OgCgkbRFnrGlpzt9xrfmGAViHFZ+Do/m7nzw8w30SRjJaMSrI2eTQrsggRLzMOhrEoJtRqnNJesZ7cH0QWcP3SzKaaHwlGJWQhLJRgE6ErtNuqhzkY62+LFRs8EZL5dboyLz1/bquzq0C/6GQqVgbjhxta7ZTuvllrDMHPONODNvN1BHKHcQjmW2cjOMfXtMXqNOXzzk3zpwmJTvf7Sa/+Cb7qjw58en9zJb155+aXPnfZwqIe7lj8uRnY0tBANMMueLYi7zb8Q+oq1PYVooGQuE1W6bBJNfjiAJuM5kRhSEu46z4hxEZCSqsYTPPqiUKRDyx2olTzc19QgDdC1mrY74FgnMaRoQAKEhGIy30jLa0iryRG+bnw6ZxwjmKJo1w6oGGPZcqqZF6pcJlrzqaGkXdOvF6swBAZpZCIJwikti6SjoKwLGxOBSZKbQEobTI1894N3PTm443J3RZKUmcO6agJYWwiCUoOSdl8pKaRjUitDeR3d/IsEq/XqV7XbMXcu+6R08S87EJEAijBMUaTHeROiY15pfG5w48bx4YPPyXty9oX2cNNdamzTkDWOxUmO4FBmWvW0cbzMEVBRKANKHdXK4sqpvn94fu/ITk91mPUt2sIFKalG4G1ijjY3/cF5Tv0i2DPOrYQcvrJkMhKO5Ix1krNt20Ax7rr2wNcbrtjeohOpG+/I10NYjvC+Pa7s0cwJ9Ww+PqjEZ3uyNROB27IyhNulbJtbMYYjmG0cbmzytXpewfZf3uNbgxjpuLLSv47erRqbvtg8ef+AVyHE3J2oGbc+3zmxpjB0XC7gRseXfm1x/B9ujoZlcFu5T8HNpA7IQnuGWPaGxo7Vemtz9oUPMw7HK1podVZvu9z4ql2cTIbcOtuMvIyCHajHA+xf3771P81KH5oxbd98487srR/+4e/duhEmu8O7q1jWLY2obmUFNI6qiBraKDxRQwhAUCBoylOhtKZJFTf1kbK+mpP4KFGQsSZRJqC0s03vMJy1Dji9XTRvT11lqcReNNTpKK0JrNAkTJCn/0Pnpg6TDoGSALHzn49GyIfIBJtZiRJ9MARi4yVCtZfnrfdBY2fPrNAYYVi6eGnNbEZEAg0iButoxOSUSx1RgiXJp6AiMYmJiVQlhgQOMDE5zrxvACiEyZoUzEKmDUImC1Dbo0pqy2pht86yvdl+7O0d9aujfnnk5vNsOuvNPtsrelfcflvsHufDZZ77npVY2bCywQlIQgSpEInPOPerVo0oO5LEiRQDCHNgXoSVgbFsg3g2jBgBw+xVLTU5d/cujbVyJAqGV0BfZrEo9n/15weRla3jg7vn127tfeOXfuenb7zZPviYHppLfueQPjvvL3XCbgP3Cr7UNF/Sut7AHHz977/++OjG7A/+cLj3bO/yc8s33nS3RvjwhIemJ0WYjYvcBP9o73f+VnVzWM/eE6qGLwxvLqvw0fde3Tob3H3P/dVfbBIFFIWlI14dNUfFtLdZvWz742M3VpOS9oDIiKKNtCsrdQhtTezU+iyG3vnMLU7dfMpG1dmMufUNHLgJFB0y9hYOhuso5Sxs74qzXC4pY8sWg6GyRonZsJAbvbA4t9PHbAwptMdSCwUmNtbW93KUhcx6kKAmxnjr2u7NvUshnM7Kxaf35dLtF4cZSTY6Kk9L5G6zCFVwsDw0MW85rCyvCtvkxg/RYFnp0nLLUraY1v54dfhf/2z8yusH07MHf/ajqlfuXB0MUJ5MF9Wl1za/8j+cE43nFoVtF0y5cObYCQdGkzwzDIQBVknSe1KmLqsaChVVCRoAihIJiCIhxFTHOEgq0d0hFbAoS0qXSojp04umRJJiTRksHXNBNCL5zqT8hOQageSrt5bGaXLHTIGZgGEOIQIIodVuj6AAokhKd0lemUQUNXCEIU5urk/KFjFrZ0YPUk1J2d2NKrGrVZhiKn5Igz2RoQ7JS/ZclObqjl8qXautRBfmWus9cHenSpZX66DWtRV2QsQvtubJF2ytWkqSowQfXxhwaGcRTOm2omuFyZPyzE8wRlrj1EkIzUoZBIZtgJEoKxd+ffjq//bZ2/6V4XdO3v9H9MsrC3ICC3XR0sqK9OEL1IWsCq7DkkyVDZgPp/TOW+X9E/Z17kIoWkWzCN418M44siIVRy/e+P4wGqlyHvUYudQ5QmkKgti+aY7q9lC+/eXi7K48uBtefm7jsx8uRxvjieWW+WFT59HZyPAiNR8z5siD23i4wsq4uRbzwDe2IbvRTyQElgkZsCUKLKFnMn8y2iznmeSvvbAYaeW35vlY7HbZTqp297w3PD4+vr6EBM7Ow/BoVrprtcvq8Phz9rP+7qs/W+3d9S8MyiPHWwd2mbJuvCdVGOuh7nJutWhL21hBltvhds7OxYpGGBln7e5o43d/5+zNPx0fflTt5rIhGAKjvOblgb8x2Z2ImDufnb3zkx/+3gs37GAUTsTxpKmdEcOVsCepgtZEK8IK6pUioYUmd4aOzixMyeZcu+v4Yk+kT6Ei6fLrtrVPXSbUpRV1bjZr6gD9tQL8196e3hQ/YWtd0A6e5GYq1rImRbJ9I0UkkFAGihI0spBkfdc0XkWskDBCaCkZBpMQUwwxxVVLR5JI17GwYevS/AoQdSZclLyUQlpVJaZT4ot1IbQaLbsQQpQwLIrRaHRyOmVjiLv8IGezNqUDMYXgnVrDmclstFKKN8KTeTE5LW73L53mzVn//HA4X6K8//jBg9FwVBSTbHtUuY0pDysjiDCGVWvLDhbREyVHLXHWtAHKbAVBVSSwcSICCY4phPriFVKtFSK1o8Aa1BQco2hJ2ofZ4GpabV/duYZn7334wXDrsldz588/bN6Ry8PJ1698s56ZrZd2red81/Xj6UBWNlQNNdL6XaNjEfvOH736+ld/vptd+x//1qc/eAOfm+z8vX/x3v/z/33zuV/xgxv4aFa+/34+mZzLwergqNbp5Fo2uP+WPXx35/jnk8X9neX9LUdRfF/ozAze8I/3Y1G44nK4P7c1o51ubYuxwSSRC1OLrDZtLTa3Nogu573l4+H0BGGJ3LY+9iiB8kYlIHOx2CDbq33JIRgJdD4jrXmQ67w1qEKrWM6pLDPnUBTt0OrI0mktTAHBehiQmExV7I6pNmJZRD9/NC96+Usvfn5/c4xpU0p77+Gdza1nxrwqxdZZfX4SNwdXFxaw0AyaRdO3hcXQos/RSMlhkbc21EFKaU4l374x3FnOzz85PmkM5nYqL7zyQukf9sTT4Uf+cObHL9KLv+KaULXI+oYrss6KFbYMA4FNoyMJKXXxBiRM66KR3kIIaaqkLswg4dCdqDbVNSaoSf+va4hpzc3VdS3gdVWmTry0luCmvEDpGEWJoaDMLB1X66+hYgk7UhVhDjGmB0ojc5BASHaQxMwaIhHZTlLc8UrWhlndhCmikmhSnaC4Uy111puaJHuc0hJh0n0teUei8xXqgpFI14IkuijSyRWrcypRQEVhjBVIFFHRtayZCGxAhg2z6RRWopyoXQx0DkDc3fIS1bWjzCXOV0dqpYvkJL3YQydppSpzFBiTOpUsiM0AH6pLo5uvHB7+qJodTOoPFp++dOX2VCprKRhyaHJID95R0yNB07LPevBv/dS8/Z7VhbGl2ajbJoZllBYul2BLC7uC6QXvf/s3io3r9G8+Ph3sTWa+lnKuo6GEyqrzeV6fLF641EevHlgbJ71Hf3n+eb8x7GlZB2PzLLCrjWmoPJc2ar5v2NuHdWb6m8er1SmNlv3tg/ZkzxS+cPYSAeBLRFnjlYxxwdibjx7wpKKNYv7em6cfvntG42V+7XTz5vn2C9PJK+e96dZHj1EXUoLOaNTnT3Ily/vF5q/8yjNN7U4PTx7KNz679/vXRiV5c3xpA4BH46Qp6hnxsM8bl69qT9CylIf+rKwCvIvZaPMSj/KN28+0e9v1pZgFksvjcDkrhtKMUcpoaeXxlG6Wh5/cfX+nXu67m0dNqCevhfx6WKlr61BlsvQUHGpFC2qpkx6pUIKdJRAFkCiSt/867yj51aSXWzWlz+saAnqCSj3Riz/BSeipbfB/p+xe0AD/+/V57d+Bi/PedYcKMJMSqyalWVTAWKIA2HRT4BhjDDE9dJSO+axKSRcgqtw517K1aQ2TrHQSnq1ppZeOKJlkGS0xGc53o3836pMxyX7AGruqa98GJrLGpPpuTSYihsAWEsSZ3EsMwRsNzMYgg1JLbeiF2PClc7tjd24Wl+b9ZipH05PZkS6mxRTbw2J3uF0Nds4Hwynlq5hHWag3QytBOUjLIkEMOWFpo6R9GYmQ4eRcxIZEoqpfC8dEokftEDIEkCdacbaR5WQXWM0W8+c3Xjk5P4rLEKPkdfHok/unxeEr33rV7dmllf627IR7Q1Q5yl6YjVF/1kOI7nX105//cX9PfvWf/vpnP/r9rf/6X2782t+ayUzsyfALN+6/fVCsSqeLcnE62dmm3at7l67J9/+Xrc9mo+kH+zgbjbDTW2ysDsXSKW3+PJ6MVDN3Odf2Bp8dsDFR2fDR9i5EEIgrxAq2T97JqBjVDx4XZZmXK+tjdM6IIFRsradgNWTW+WLEk4myM49qCit1gtXSrmrqj1YSyCsaj4efxfM5xmOSmPX6nCPk6jSCAkIU42zbBg72Sl4XLddT2nfb+5d23RlOy1kx7t/98DHn9sZtI5gFMo/uf6L+aivTOo4MM7NGgqWMYwVdxjjL3WqQtf6oNefIVpZaAzt5QGOMXt7fv1UuppMXXrrx/NVyuJRr4exgSj85KH/wB/nOzd7wxqrKxbWZ6SEDG8NsyIA1WY8SMSEmM0TtKmxnCNuBWukkxBjTUY0xUpfrnQjMUKUENjwF6HbrzafJmAQwsenwIu4qQ0Kb0RGfFaLUBTB0s99TbwSkKBGiLqpBDUuUtMZtQ5ITC8DCqqrGcqCO2mEMra0u0ckNRQjSYe3gFOKbrKcgwkSsqfISDIOI15qp1F2IqqryejQV5SRKThUaqkTMaS4nSJSukWei5DoigvW+fb34fQo4Zuqc67rnIXHAac3x+sVR5GLj+/Qz1f2FavKnhShLlOjYhqAZibBjyU/C4it7X7jz6E/nXyjeaO7e8td7YxtZrDFWwFhZlhxtjiWWyEHvvu/fepAVzHEGrfx5y4EpqKnFN5xv1qIr9vMw3raXs0I0fPNm8acPptvbxZdfuPYvfzLPe8Pf/a3b//O/OXjpxuh3didv/eTMsRsPnYK++OzGh/dOT0PtKx7l5nC1It+zXlh8VqPnsocn9Mrl3ZN701kYlthohOaUldn2zGwU2wvZFjYmD0017G+X8y/kHy0HtCTbYKsNfcikbiguTv3sYTU/aOthfg6uWarANQ0PzlBEK9oUxeztsJFNX0T2+LRe1K815VuFHZ2JB5DtDkopT2RExm+RLAI2L8tg2N/ezv3JLJwFxxvecugNenuTmX+/0EPas+UlawcSxtEKDTEbSZw687gahOnHvd6Wkaan2zwaYrJlZ1lVBVOyrQ1qoZpoBW1AAWiRPPkBSdDUE/URImlUiaLxiTy+qz8EjRcniahDcVLFfBLmkFrhrgdeIytPXUv0CxfV0zWYuk7vr+HW1MWOpgP+FCoEIgohwsIyjHBbe8cmWPIiFka8Z8Op6We2mpz5NQkQOtl95qwCUSXPeyGE2PkDcQLOhNWk3F1c3Fw0ytqONrXmIjF4D78xHFWr2ho4cBuCMjti18unfu4QgsDCMCmRCEeQqkQIhIPPOUgUT3v1YC9/KWTt3Jw9rB+f3y1nPJ0Xbra7y5cHo6a3N2d+nLsziRRiARL0rK1jzeBgTIzBJSeiKGRtYpgLWlUhkERRDRArEATwwsU65uI0aFWv2EGskLcvXP7im298tyj6UhuzYf0w/OXdH5nyL5+50nsmn/XLBxM772s11nIcSgmVuv62DbcW2Sd/8PFi/Gz/bHUVveWj94XtKG9mMs/3C2YrPctXnvPPXvc6zabvXp59NMbJxJ4O5GS7Lvvz6qgc3YEcmYqJ+y4bTg6yZpCHg2u544BAbgV7PhrTPGouWnDjapdhdfez8fzM1jPSRhmtrzOBcaYNrWUOmeNL27x1id2AJPSAnkhrWP0qlnO7dYmCyNkRi2A6y9oQegjYZUIgb6UN4mENLERWYnPjvV08PPQLd3ty7ep4DF9X81pc72fvflxW7e7zo21CsOXhiW/Px5ktyGeecgRZ1eipk3IRaAZ7NhqGLW5WpytXZYXv+UWsZ839Nz+Gtdf3b00fzMcDd8ltHs3PuMcyq2++9KV6/7XH3/tp9c6PBr/y/AItSUawYA9YQbLC6C5pFZEYk1VyOj6c3Ey77ZECnegnldxErEy/Gr0QwAiLpsi9NGCmDu4ie4VUIAzuXJdTsU3gqXZu812PzQkl5ifHOrX5qXyCYZgNc4hijVFm60wIMah0VnWdJYZ2xvCMgBRjxtqBu2SIySA5ciW2ZkybGQEn99lE+0jYM3PCsdEV4DQEw6CbllW7Ze262SCmRFlmpMdL+HmSYIkmlxPhZL4NhRrDul7qdvKGC4S5E1Bhve66uBkySNdR6x3qmKbp/3ZESbC1gYkEJCMaZRgCLJFybr8ebv/J9KPFzd4Pyp/9+q2vnfHCGrEqjlYZvIU3uuLg/Ny/dc/YaFfHTVzKitBkAh98QGBZhipzbrDgoE0xlsXj+j/+6af/7P/2+XfOHnx6vvzctuxm4aRUa7Qwy5v7ez99/+S7f3G2na9+97VtaPjstLm0oQ/vByjbyNb0Vk1tPccKUmmEGN+ryB7U/ZpHZrTFy2xm3YNF/xuT8ft3Zmf9yZbzZb65rPW3pj87M2bVXpqb3glNSh2UdnTCRY2dedgUKmzlYQupG7fUOsjodGHHTFZO5+EP7stOebci6V2+1dob92f3X9qVzdwBOKymlPdJJmMKR6EBb8SszLOpFn3aDxu7fXiEmT8aXr9/9v7LVzd6BUBauUJWLBFwYns2Hy5O5/Ha5Pbo1vbdj4/K2WbYu2rGQ/ExFEoLpqaNFUtj7Up4RfAEjy5guVNet8lzA5A0ukoydhcRTZ8HeL1ETZdMoj6tLcOfvkLSMUVHqfqFRu7pSvzkf6VLcc20V3oKd8YaekmB90/makIyYlQGI7NQIe9F2NgiRwha+z4gbL33EsVmWQgBIi7LIlpphdiytcayIWKw8hMDHOoYkelbFRFCUIAtsybf2DQEk6bdcAzpJFoAVVWBOQS4nm3bJcOEaJp2CaYQg7VMYqJScmNLaeCGHAkFWWWWW4nnLMHXgyrbtBv7w51z18x5ebY6nt09neqD2Wb/YDTIXxyNm+LSjDenLN5qgGGJZEnJgsQHzozJer71BjCgJqGASiKtiFMNnW6YhL0Jc8/OYsXiWleY5bTaHl+/sfHc/U/vuP1cBkpj6u3leT6TsztjzLfN4wnORloPdVqYMHTD6fTRnYF73YZvwjyeLk54e7Zz8+yDH8QPPgy8/+DP/hVPHZ9EqQejV36rOnx31zyefPTdGyi3zfxGeLzDi8UUb0/zB9OKLWdgcWJsi5rG4/nsw+8/+5Wx5xAor6XX2mK1kVHJdo4iA9pyUM+y1dIQpMtkoNCz0nphda5Pu5fD1qa7NAoRVJZpJFQyrIGXU1lOuC7D6ePM9AQhslC/n092EVEvPkVcgojJRPHCLMZr8PZXP/fitrlkSzN/NJc2tyH/+TsfBKt5bsfIzSJW8XhxYnK1hS5zqcQK52xrDhpHGUZ5Mc6KnGahrYfWWmA6L8Ms3n/nk6Etnr167bw8zTeywaXJfL7KxtZ6Qu7ms+PJ5dcnr4w+/P4P8NIh7DWFDwgSM46BY1p6JGZijCJBYgytduTcCIBSl61JhmtSFEnyqrTMIcTAHZ8KUAgJU2COybqZSRWxg5W7A2/5wte9O6d4whVak5S7PKqU1mVoPWETsSHuChhLz7lrV68+ePiwjcEYC1FnLYmkxN/QRtFEfjboqNbpMZgYSIzkpPSltIFOrUjik3UGV7ZjHFNaJ5HtjNPW1Te5VjKlOLS11d4TLlmnXlz/hNzxOVWFbbfz5nUejUrkRJW2hp6y2OwQfWvT7J26gbVA8wnYQE/Pzt3Xpgte6xqESIm7Ju3h1DCptUGFA2t+Hla3L9/8+dGDR8/T+3L4henD4XjPYwauGX2jkaGs1lpzeFKvAnqiJJR28i6Szwutq7b1RC5UZZYXq8A9YzY3cPv5nRbm739t///x+4ujsr4xHv/sk7P7x4tbu5eODsrXb4/e2S93nNnddbsjfnDkt7cL+iQ8Pm1s34ZGlRQZwRKXsCyLI8n3+yfZ/v/8xuLLu9vZSn76xklVPiM/Dp980t/suyYL1cr91mffl7C6R3s2w4mMKuk1tHkaRpXdncf+fJX1VopFrYNtCpDVnH0+Pm+yWRA2IyetHRyPfsmoxWzOOerhFz6+96fP3coB7AxHdcA58yNf+VCqLFeynasUvdVwmDUis6OTyee+eHvnxUWzooc/q6tqZDfPZAMlbN/4oDnRvMX13H/jVf2DN/e3zcGAy0U+6denlodBN1qt1TCL2mXQmrUmNKrqAQ+KIE/qQUIUtGNgiUJEQnrFO/iqK3gXF7YqMUOeGMUBXTFOx7djL/Oa0/DXa/Bfg1cuCrGknVMn6gf0yfpKO0bmkzWx4iIKgpQ5cz2JIUaxNpPCVHUtQfI8DyGIqAEBJG20xsC50DW3XchncqyR2C2YteMxdpREFRAkarfpEQIIzjlfe5Cm1IY1jEXJ0b5sPCgjUEAEwQBKBt23oQFi2TBYhaMgM4Ftr/SrIffrEAcG3rWe4qryiNhAfqn3bF340q5m/nR2bzrNzioTj/d2MHaX0L90bDfLoucBRrAIPRaNrq1ZWDV4ImITu3tiEiUCJGwotGKyTEOLkKPucW6kjlYzX69u7t96cHZfhuARYUNc3o4xv8KLXZzv0/kGn2zo0p9E8m44HNaPyiqr3rD2K072dqciIZ4uB5jkGDAWfetOB2NaevHUfOff5vso9OEuHo/M2TbV4trvT/nRcSvH3s7BgFhDWQwxZGHYhhrDB6N4vIG41GGto5VU1cZEp544Ol/l8zNbnUc2ioxsX4zJWjbtKkbmyzckH8Zx3/aMV876uTQCZmWRzJpG9GQWzANZHFFsojZsLXlVq1LOw7ykswdCNRsnrSdojsDn9cmtPXv3xx98WuY9Ka7kk9Uq3Ht0Uq5C3s+2Crcz3FJRPzvPvAzdaoZy0N8eOTYyNCDesP2NBjr3kco27G7kfjGbznyowsnDg+GG2+/vndWV3SiGk8Ei1MUoZxt9r7boRZvPT+fDm8+Hd8uskYZC1og0lmovDcFz8AJUioYQ2tC9iXhmy8wSAxImnCynUmSYJgNISSCrSfNwDNaYhBw7EEFZQTEaEkYCqwGwoXTHJiVSiUVRJLcsTQwKIkhMU2fSD1tlIo1ta4xNY6FCyFgQsXLT+AcHj4KIMRYgJmVnEFsohxBS6nMKGkrcKsOd62Qy5+r2SAknjl3TbhMtOiT7aFZOmRAwbMgaSdm8xAo2hokZmoZlZgEISnKh7EDazF7cLtZmeybBwd1uDrKG72ENiEwy6mIGEJVUlUHMRiC87mmw3t5dyI0u6Ffxwm963c5w+j8KBSsxK4DAsNCUlhYCu3RPyzxxi29uvvS/3v9+eH3yRvnh78puq5loLxgNMID1ilEW5gIyzik37HNmFVqEUFWenMt6kDpazl0vuEZvXskrYTey3/nhwS//9uXf/NUr379Tvf7VV/wEdmfDXG3/P395tNsb/oPfevbT98//6HuH7XD0k3sV2SobF49OGzatcVA3jKvKORZnwfHkoP2zWaTt3p02e3h0OrLDLaBv4+NPz3fyjNSvysVvPnxzq52/H3ccm9DyuckjNk7sdhkHc1vUbc+sIpXKTa6PH3NJ0dtQo/D11ml5cGnblpHYZ0IqC/QySOB889z+8k8f/ATA1TjFpmKkfVe4UGy5YS+TQes2WaiidjZ3dmPjxsvnZ1W7CPWdD/fyfvSDFW848RzIEqqVx7L5+t/YW5wsP6muPvPKTpz+aOyn5Qz15c97bVkaH4xFZtKNfwWitqu+6qFeJRAnHXCHP0OVQaIi8Oli1BiTmVpX+QxDRYQ5aeg64kDq5FIiKSUhYQoJkQuVb+I94UK2tK69ctHVrst5QogvYBtJxhiJ3oCOirwGwQ0bVokkZK1S0CjWmiLv1W2QKAQWkSjsQzBM1rlVvTLGMGATn4KZFBkb0UjCCrHW1L4RsCqIsYp1pj1DYGsQRSQqMUTIskYhqLEGytpt3TSZyyZsPNlwIW24CKmCG7adq4GxMbSGXJTAQktpDHNDTAKwWjGtwQqhrltX0Zj7I3dzv7i5sLMl+wff+enw5t61V69+ls0f8nK4iGYhVxcbw2UWHAsQGGyA4JNBkZLx0bKGFtwLCBItiKIlZGxbRSOVYXKiIqPG7W0Mt7fa/lJHIWxhzKe7mF7W4119vBvv43EbS8JUtcJhfSQWTI4ZP+/xQ7h8cx57vGCeaVYiP0b/Xn/gt3JuJBvlW73qTIw1Wpn2IDzGUSmPFcfqZg5ztDGQiQC5fh645gAt5JM3/vzGL//Npp6XOBlaXvCIyGf+xJyduNqHrYGurC4qa5SjEdgwKNSTDsd2/4rzS/GVVcDl1A/gPAizeBg27UoO7meRUx0K0nJmaDYTfMinZ3ZxKq6noY4md1KTt4tcVq8/bx88OPOnsb/M3z35uLDuS6+/bPdzgniHTz58ZMtePZTdsa1j3NAwWy1RT5er6EJWT8M8VtATR9OqOqntyfWsatq6nM7Gm6PtS9uL45oyk28Naq3yYggOTah7XFiLBhBiYeteujGdz3WwV7a1a1sWpsYki9LEqIzaRAkxya00ITjoiIgCY9hLTMyKtIVJNGGFMhuwGmtTeUaKOQIJU0h1jYnYQgHRoGrTalUVhEVVGQAKa2zylErsEeogcSQ+ZGZM50PJJCqI8cJNxrdtZjOFWGakbEFDEiWlgktn5RjIkE0h9YSL6mvYAGBroACEyEaSrq2+8N5gVoDBhkiDdH701HGvSDUJotK+mQhE/BSPtAP/qLMHgKoaY9ZTMncZEpK+eUXyteVU0LtbWnpGmMmwTcPKRWBUkoWoyAXjunva1oSsNVTYeXUpJRIdA3SBFiYjFIBUgnH9s1Bd3rrypenOj317t1i9d3D39qXP1XIuQg25BllkJ5kHSXAaXOAeI+eB8JVh3N7u3/30/INz9JxDEE8+UGX6ozd/fvwhDG7i8Adz98z2ncP20x8c+a1rP/wvx3ICNy7+0/emvWVbnZGc59YbCwd4oM6oH8AUbG9eBnDjauUgrVrbA2tzJNlQ2smVEgtB0ee6j6zGPPPyuwdvjdvVT2SXWNoozMOSRrMIz1tzybVkWxopRVaIC7GV0ZBv7eyevnd/1MQrDx8eul2NIpwb9hH9+sy7kWzkev25Pdl9FsCDxffYrcxwsit/ST2F4bha9Vzm2DdBaLix7Z3+p/+w9+Lni8t7J7XPlvFksDGlIkdTRzUcvnF7rG4w3MiOp6Enq7vDl07NzV+t/urg2s1Vr0+hgrKIQOOKnBNhkEoLtAl/7gopknyNNZWcDh2FgkWCSkhudCKRuhYxEhEzkrpHAEYSTKauE0ydF9762r0YY7u8nr++1OC/9ue0+Ohwma7uAsIMVdYUj5Em4tR+andqAEs2IKT/56wVkkgcVYyItTaEUNd1OgLohO/roVDEQ41hVohEZiNBSSE+DJ0LTGfz6aquLu9cdsYF3/pYw1isuV5P/0gMQCVETS07AFYK6q21TwECmtpfwyYt7AybjkoqyZEHnoUFPQGIg1GvQevWQDeRb2XF7mu/ZnPDh/yS3TWOjux5sdc/u9oeeL99jNG5yYJpULEpKssumqDIEdmwE16pBzgQWeps8JUaYsvqVMfc59ovG63NkGOOwoaBLvs6H1LZ90dDdld3br13911X92UaaUWcQUjRs5XT0tWIAndybvkR9Q/i/ISLR3a4GA1kHmBl01f7bvasWd5AuRdLrsmsQN6EuZcZsSWwMFMILfWhTqWSgzuf5dfeGl3726hqp62pZ7Zc2ZNZ7n0YFWH72d4Jx9VZ4xcML1ljL7+gcaYZI7dAzm0F72MbyDB6WcZWAjhzQYKxELQSDDg4ZF6Fq3ksZ4BK1msp9KmnUgtzkKr86peakbGsGxsZosi4KF578RWLEM+rCH7v4490x2CKclTZbTlyB4/jtROez42exW27yNkbY9qCm2VPi2LDSr1aHS6Xs83R8JId+6PaZGYw7AX1tlc0WlmrbjMPRbC5c2aska3NxlvP3X/vI75yU+Yaq+ArEt9ywzZApAHaGLxIrfBsVMCsGqMgxjR3+jZy8mNf+y6rJlqtkgGAuq4NkRIr1BhDnW1cou91esTuErfsvScma4xJ5EY2a+vn7jJPIfYEIAhIjbUhhigiKmxYRYNEZmMAUggnuxs13ZKUzbr8hhDZMbUd8CwiDLbWpnUqmy60nVLWWlr1dukpzJySObs/JNtLqCDRKtPkK5LSkCgxpdfeA139BXUjRuKVq9IaOk6RLgRD0ETzXpOwuqeVmXQdVkWKi104rfF6WodLpJ4gOWLJRbHtGoCn7oxrqDDRsBUKJBdWQCMQwdyGQEpL3375+pc/+un/Xv3m8B6VzzWRXc+TEXUtuFWGZcnA7OGyNkPPyQ3TTjb13TuzF25v2Dsnd+fWZb5CThP64x9+Zq9O+s9RHLfnyvc/qrm4/MmMDk/rudsM3vVDO95zi0OX1aEXWGvVnoQIC4Bqpl7DuVSxsM2tS8OZrwLy00+W9QaynQEFDXXVjLKqGBUhI+1v6OV/8PD3B1XzNu/lxD4yQG2WLaUIvY3lysgy2tqEElKCK1BJWkOq5vzRg16FpZjb8w/enzzDo9GGL5dillX12ueL/x9Z//4kR3bld4LnnHv8uodHZGTkA4kECkChUA+iqsDis9kkm6TYLUqipNasVrMjk2k1M7a/rOmH/U/WbH9Ys11b2djKbGbWZsZ2tbbd6pnu3pbEJtlsssRHVbFYBdYDhcIjkUgkMiMjIzw83K+fe87+cD1QLVv8hKpCRSYAv37P4/v9fC9c9J9oqP3G4nwGAJefHn15VMrZx1e6k7GHQRF44gXyUK82Cs6nYaNl2520Z2fHDx/4IIg85W1wwBFqoeu7/ndvjB80cP/RdPfS6NoE3quOPuGXZu4PXx6PYtMCRI4C6kJEjkGNMBIYA4ChI4gKokoKHfXT07QaTHIJjAoawQySnBERDCNBvz2xGC3Vn4iqCJCOezLIEayfRftMtrFGaK21xPRsJbxmavU1ZX+19rIPhP/EWJyGQp+tkhH1mWEPgIiYOf0XFVVURxrVOukQgLzPmDtVVXWOsbf6gqmCo4I4iCR5CaghkBEIQasULbz/zi+lqasr119+5RZxptBFiSmp7G949tP3wNvbWwcHBz7P+987KqPrJwW0zoMCQwBHlLbKjiglf8MaBNY7LRP61oCMgCCqGnETOz+FTjsZuGHeVQXs0/CVeO1Pul99PL27LPbt1mBcZRdOy+ECylXsMJDzQuSdAGrmCpWAzhMpAhiIASGxGalTHmQhNMYGI6Ixj/0yrxcbUPk4vTCIfJrdf3A/q71OI8wQG1QUn7sOOvLoc6cZQEkb2/XS6prCyuqBVDVvOw8WdcBuIMwiBJKzb7vGs++0AyESMFEkBI8qkQNjpHpa5znf/fW7r178+jjbaU5PJ1Wsq9ajthnT1St+80bsztURZwOY7AA2sL0bqbCTYzbWca5ScQi6qogzzVxQ8cSGpGoUREFZzQg7UVeMYlfnGgTRXD6MJNZlGUszry9fkhvPleRYq5A3QA1/7ytfqRciUmhmP/vlhwHafFxYq5t+iKyXN0vO9sj2Gyg12w1OQMkXfuS7jFttptOqzlS29rdHzWh5LN5n+ahooePcz+vTYtOv2rZ6eP+151+X1h0/ee9/+WC+/3UuLnwDVhCWtWXDpq25RmoQWgyNqbYALUDnHJDzbZgTrZOzHEcVkY4Q1UxUkCh5nEMIyRITQouEzLxOCsJoGlVTjnCSG1uv7TIAkCjPusZOlJlVNUY1NIuG6awqGIEBCCgYrFbN2n6TfigiiUqE6NgBBHYOuZ+XOeJ1OiEwohmCc2CmyWGbRrNqBGaUsM7pptRnVxdiojf3aigEJICYJtJpswQJzaVGpGkivs5MpH5Vjeu31TpyTP8TWQsRfZZ49IwTmVQysDY8f9ZGf/bS6iWc/RJ4/Tbrf8OfadV6DJnBs5CM/z8b12dXcv+PnUJuDjl0zUjzb5df+A+nd+5fmh5Uh6/sTqL6DliNJRI5g5wgJ2qpQ70wgbCID479l69t/cW7x1ev79LxSTYqo1vxyEk5BlT0yMbMuWMWcpTnud8sQ/KQx6dnnqJkQJWAxQCrFmMNqOUgHy8O96tjHY1e+ua3C+6kHFPe1VwczWaHdw5Pn78cR4VrtC2WXXlh3y2+cfSXXbt8113i6DsU0qzhjIAEClhECs5m0C46vzJqWBdGDbrWwSraKlqDnXDp57//4C9+fOmbx9vP6VLfuLm1e43/8qMgk7D74Tvb4zsAcHmnsMPD8fiUXTWpnz5nJxvucFI2RTXHOQ2K5+omSPNYhhcAyAUdcPm0vADBKVNW4MFM/+Rn0y994cL1C5NQbI5oGtSVfv7O7MLZ02Jvq5oZKyIARlli9CAecoCIIBlGjiDp8lCNSQyY0qnTdkgNVfpyUQ3UYlrOkgqtazVCYk4iPFBHFJWQknJJ+8QhQwDF9VW8fub6r9u3vn9zj5yeQXq2JabPqORoMSoR0LPQRERANFNah58YQOLwIDCzoqpIyhojhxKjJUOeqMY1it3WadwGnYkpODMi6siiKgGUuVeljz/4bZn77UsXI7hOIvOz/cx/chzSTRwkHB0/YZ9xxm3TOuoDXUCSPaHnE/UfYP19qya0ZlWnVXRyLSBhCEKEoEbsECiiMbGgOZ87IqrMNybU/mrwyZDxi4+K2aX64aOTeADHb0w2LmxshOFk5vKFYpAuow4aZmX0ZsFMzClB4dQ7LZS8FkCFe3LwGJxwWQYvha5G3I60nlgl09X8YAUnBDPQqbpz1kUE7wJFIIxeoUTKSZdARVUMhgWtBlqXGGYQOnZIqGCaQFYU27oFgbgSXUZoAGsgT+bMWqPMSYhQg9/MOBuJtb/59c+ufW5Sn8CyaaKOxdR8geMRS9fMz8gxTHa6K/tuNmcI5AvJcmg7n3GX6If1CqTypimjRjSksAADSC92IEOrEaMAAXFSqCsBNbUSV196fVnIzr0pf/v3v7aDm2XNJ4+WP/3offYFMLrdjc3RBZ10cTts7PjzbBaMz5sw00o8h1YoEHeMgaY8GsgiNGXQ0cBfvrQLzZO554KGrp22g43B2fJcR3H3ua233vuLdjTVB1rh/juzJowuPT3Viy9sNZs3ZCl2urAVaENUK9YAoSPqmXaqyomibF0yDUgUAE0gxmRciFFVwzMcRNTkXDfOM2kDIkazjDMCoswpglFPfkrZCQbgDKNGcNSZElInHWFyuvY4WCFISsXkhFWCmO5K0ESPS45fpAShFMg8WO/tIfeZ8UANPvNKIaiqQ1LoQR+qCNJvw1L/qmbRtO9TU3yTRHIULUGrMKokWQclGCYikSGhihH17UV6u/QskGT6SJGFfYYbrDMT0i9L61zCZ/ZjSJc/WFKX2lofDWuQ1VoiDtDzjRL4t7/EtX+3GFAyYiYJGJqtsRzOkNaILERQQwWLCuJJZSVqBuyWq8Xzl57/wjL8DD99Gw+uhGJsLEgtcm1s2GEG5pAcgpPxZHS+Wly7ODg+13E5mE7r3PsYKmaSKtq4oAE0jWiE6WypcKHBJlh17hbzzsOs0waoqaECmIfNjjZynmzhte3Rlb2t//GP/vWNJt6az57MSjh+bV6OoVH1job565fGkyfvLn78y9lLr7eTHU/hJv5md3m74eax3yoIG4VonR8ObSHqCqvMBdJGdY5ce6yjteBW1gKTDnnRaBANgM1cSndRj/7RJ//zB09fOhpdvdhsfPTnR7fy6vrseH+7OsISAAI3uEUIGYhZZlFkKGE4X0CjTrg+uO9hVFgp3Fb3Hm4wLUM+H+xuQqhCN/CeNX78yenzVwdbu2OQ2bTVMRfHSxj6+MEDACtL7FoJLnTERTS1XCzp3QKgEAQCZVMxoAjgyHpAkqqBpMWLqqT8IQBASs+VxRidcwZ9iHDS0pM5sn5Is54/96Wd4jq865msIT1l+mzJkm6vtJsCBOopcZhQOAAIRGRRKWW9WO9OT4vh5LHv9VT6N78SuIxB1EzROWKOncQonklAUhr2+ha2KOIUxKBDSKi4jG1RVcfToxEVb9x6/fKVK0VZSCP37j5opHUOrS8A+gbYzBQUAE0NCSVG6DqXMaZN3FpoTQkQ8WwY1ZPkk9gj8WhBVdmhIoAqKzkgIIqgDsiTmUjHadgvZE69AyIAGi5pCFvw0qURtvvdohqe3pfl4u1z287u3yyHcXDpfHNjxkVN0iqhdGwA5AwIGYAVSBWNADyczc94yymLWeO6qnShkGpgyyx4XFmYdTQnrrhd2lzF1TIuR2VeMkCzXOkswsgwg+FgWeiigEFBIYdGihKQ2rAyigTIJi6aimIHZTZYyUoFHAAQoAdTxQAYkS2TVUdjnR0+2vEf7MKVpzQ0x46UwChqAyvLgC9dkzFm41yla2PNWuogh6ZGVmSOGGg209OnsKxIQT0RgooAUe7zRgMIeADtApJT4owYQ2MI5NQqqb59q9kZbS5WxQ9+xlu7pc6m01X28/fehwJrazyDLzItNB8XdGEAeZzsXo58ZUD7pV4G2PHLkfNoVYQKq3LjafW0bQr2ews/fNIuLu2AX4UAq3JYns6f6rB7/vrld+/8dHjRfe13vveDe9Pbs+PJla9889bff8SXz2w0ssHiSYVhHKpAKxAFUlDoSFvCyBk5BESIMTI7U4jaYZq6RjE1UEn6Yec4pQUnNSUzdxJDK5DkxtAHBrDjNH9OY1E07LV8YGmpgwAKvdwx9vkniim4bx0HGtMzzZxygrzLUnWtMabwJTUlIgVKv4yM1CKz+xsFsgL2I19LEUp9MlIfegiQXBsKBGqmMeo6nRR6+cm68zYQFdC0QkphCo4UkVQsIZr7SZth76FCAibX18xrah+sqVSpT088rtS2/qeG6V4LY6bW87bWHM/+k1JSeurTEbQvI9Z2S4P1TIwMzMiQwCKQw+QZ7e1dGYAauMaWQAQdaGXsdTo7ezW7+vh4du9a9WE7K3GYuYJiKTSeUODdJm6jDqLvYHu8WlA2lebtu/Z7b+TzpZ6B87uDWJzHUt2khkG8siu0f+Xa9la2tXdI+6e6d9cmswMHI6K5wUjLgBs6zGosGkHwjZYdNJ+/9tzkg7vHULZB6KM3iy/+A60bVTCIUwC++c3Nj/7bm7d/1JWWl9wyVhmQ52xgkTUAEuXdaeMGpYrmgWRlVAOvIrWqDVmjsBSvbPUqBtNGMoKuJAmLZjyajMdfrD610+P6x821MefbUKN7apznEQDGLdZRC2RE9EqBSHjUQcNQQ9AiZzmba31OTx+PKC81fLB7a5qVQ603C/ov/uByUMi3rRvBoun+p7fnR/nzWgTQzVayJeovj+hbeRtbQ0GsGiMWjmAKHSGvxfoCBgRKZmDOVCOhmiXls6paEFUN6YJFBYA14CJFCfUPIgIg6ZodvX7msRdE9nPmv2kuor7s7pfE/Ql5djyQ1vXlusVc41oxBQWteV3Q3/1gieT8WSeqhpgcFskb2OsbHCGwqKRXR18H9LgAbdOFnM4zKiHdfuedaj79z/7pP7126dqqDU3dzk6mdVO7wqvFVOz3bsF13OGzapUdaEw3LhI5keAcY0IDJT2IgUKfWKGma+dGOnLaOyQQxAwJTI0Aosaopo5JlckBEEhUjixslHWupUjdYjYO3OV+0z+/+1RlPLs3mi+8LMPp0faKd3m3K/dmm5M5FauBQWbkowFQBASwzGVl6EIHwuxSPhhqBOmIIEO0zspsQOjaNoBAq3GwtX1xc5QRZ+SGPp/qUS21J26sNRCy6DNmTUoBjo1ce/VVPKpDgwFdxwreInQ+H/DARy/GgA7RE2UoPpqzOtbUEijTsHzz0ePP7+1BzGI+wrziaGDMGxPdYY1j8jXHZsVA5NF7kIKCaBdUOj2ZZk+PdDVnI3PciVGvslfpOm9oRIyuw6yTjlVMBS0i5jSfn9+8Nrt5bUvJ/eg3hTasEHg4/I9335nBshyP0DT3mVmn2BjnYkiZa4XOpJuDVAytReg01mALtQJgAMduXIwuh3yrwpNzN8i5KPLjyTWeHiyqvPriF1579+5b5/7k6298/U/fu/fYT2597Z/w1iv3muKEs5xc/PRDN98Moy1uDaLQqo+ATkWrmkYI1nWeKUqAfkzamRqSS4+cc05iDNIQUVmUZto0jYBEUWaXmj5H1KxqjQJr5T/YM1gjAAA4iqouTaEBJIS+eFZI7sAIYGgpxih9YLpaUiuZXgrppgREJAgirGSO1MAJmaNkg0zVa3qbxBiZOPF3AJGSCiR9wz16pj+IEkUlGXM1RSQ5dpg24tgv1UysP589YBbMjNmtQRr9jC35laNJWiF9lkHz2fuqf+M925GnaqD3bq4XuevEYvyb/88zWMJnr7uUv679C+5ZDUHrGAwAUiCADoDXuXUMENPXj4RZV0vMNILLjDJtPPxOcW02v/ve9skLcF20cDIwKgc6ihjyS/Nm7lnM7+GDY/3O6+XFLjz/+eGf/cdH2eWB7lhX+GxDmrj85teyV7754mG7fQLbd9vxFMu5K50UV4amjZogD3zsYrOS1TlMz5uizBU0HwEfHrj5cetGAz9ob7/dvPoNdhum0UJnGkbD3SdXXj298y5v7zazKRSUsTFl2kaSzq7f2Pnn/+Xdf/PHk4/eQeFaqABPNWmNIgg1QSRuuOvASaRaspwCBopBIHDTzexYbeQ554KWDs5bzZom6yLMMwBoLwgrB+cMuYuUaZbMAgwAaIIRPJANui4gMsXy9u4r7KVTKUd8upgXW+X7d47Hl7deeH1IxUaHvigKiEXXRWI/m+ujorgCs2VHTFnLjccMIoIaxv6vmNKVailNKNFdDNdW1xglxmgGKTJs/Vis7wjskWwJG97XoKDUp4b0B9OexZasNc62FjHoOtb22eP7TPUAfYUJPQ8vPbXrh7nXBq7TCzNymtSca72gJmqHmfRlASIApoC0lI77THykAAhRIwJ6AOmTwiIjzabTJjRXnr9WFKOmDQ4InA4nQ79chE49udifnb7qpWc/M0NHquDIxajEDsCG5WhZ131hktbpakkL6RhjkJ5DnQ6dSzo2QABK32M/TkvLNWUg1Ri4A0A2VohKYkpogpytSE3FB+JgRbn9umxdv7s62pifDsI5xyN3cnRhRrvjvdV4Upe7i/FolUHMAqpAw+i6RauiZuQiRYAAeaO+Ae4od35lHiAzZXVML167VikYsPe5hm4+m011al618QQEoH4wbBoI5AS9CliAp1Uolce0MZfB3mBSbNftsq26ioYEHRgbeOpIgDTb9RuXN2Y8p01egT9Z+Pv5RqejCZXxaOHPW+hKWTaMLRkINcygBI45Q7AcyAqLCzubyuyMZk+7ZuYoGckoyXcRCUyJ1NRZjB1GJXCgSS8rwKQSBuPlzWuUefvFJ8ODezze4tGe//EPPjyTpd8pYpA8z9TbsCw7ABiiG6GU0pFvzUX0AllEr61gIApAtYsL1cm4KfIKzsaZLdgBwLjAav7I8fnL33j1V3c/WClcePlrP35w0mxev/nq91q//2CZNZMrYTG8+0f/rnxwwvmOGOkKoQKLILFxGDDZ2buOSB2l1AVUi31tl1IMVB2ixAgARM7UqqpK000RIeIkOWYmA2Nmck4JI0BQdc9uHARAyAyiGrCz9Qg2nVrtr4y0l+ybwmR8BxVEIqKo/c54vfRKqX/aRfV5KaFLomtI3huC2CVqM3ifhxBSLpsjJ6ZgRuQIoO2numkajJbclKLJ5Y+E1qlGNVWJ4hxbP2IyQFRQU0385hAUYY1sTho0hPSd9PrttTS0t1SuF2HpMiVKylbr/8eeP5JekPbspk2yZ0ur47U0JsnREe1Zs67Q/+eeFwIGRoaJP00AAsBgPpn6LbHLFM1FbUvKocsZ5tC52QZv3BiNb49PHuT0wqI4LjZjtyqkCyJPja9sVy7TbNPrBah26Fv/cPyTnx/PJxueDQYKI4IR5yA71y589CQ7geIojo4JT2xwBqTTenUGeso0F50aLFhayxozQW+s2BlIsNoggmmnK1CiT38Br3zP1YHY60D0nDYuvdF+en+C+4vz2aACKGPgjCsyFNnMdCZ0KOEYxt/7vmg2+4u/YAlcmWuMm4DqIZCFsBJx41JM7ex8gCvORiAdA6hWDBRWTq3FEgHYsA+AhOgAhJVdZJdlgSMEYvBAQTECgRITimW2Lc3tvc9P81Eeayz901r//bvH/+QPb17Wcv+Ku/2wmgY3KEfCRVit/stvfe6/+cliTvTpib/kc5+bdgCOBKKioayFyaQAREJgpgoMIOsuUlUkxh6/lmJDkvI98RxihD5JLK030gNhKZMWmdcuf6R036blR1ocm6nFtHkxtc8UVs80DD1RDiHpGIDWOblGQPCMbIu4NhRaTGjY9bE3BDNFJewbSQeA1uMnkZA4CY17lSIRu35YpcpIgBRUlCDzXrp4/eq1vcl2I6maho3x5g0/fHD30w7EqVuXBPYMmQmqeT6Q3khtnjmN6dtmlVLRsR88J4eIARCDBwBmtvUOnMkBKCPFGM0l8CZojEAEjGYoqmCONQdQilHBMGaETrBhVSRn4BQCAKxUs5ANgD5X7UXmebmcjbuH+XKZLY+L6qgo8q3RVru7v9Tx+bho8iIOD86OQYEUu07jymC84W0krRdgYJvVM0Z2OV28ug9nmhFtjje6sJrPp9uj4pXr17tdWe22s2Kxvbt9b67GHo1VUYOw0MnxfL/YCDSqoayhzIcVjXFAZb1agRFmpiTj7QmwrYpVXaxohJo3Z27L3Xh5o3j1Pdj+cj3Oq6kVF+DCDsxirJ4aenVKxpE8oEZEWNUuSNcu+dFDWp7lGXfpLgBwYOlpMHTgnIAqdBlAJPJAKhRJ2QCcUiR6+RJs7W8dnA3e+i2PyhgDv/f0wYP68fDSOKsiW67MXZQ6b5gZJxmPqfGjyjaCjlY0DFo05kdcNrLSaNqQa1ym2YwGXvEIRZEj+JNZKJrJC1eufDBdtNnuVOcPHlZL2n7h1t+bw+4yDuuN64d3Hs9+9FeljT2Nw937dv2i1JGkgNiAiaEgpETMNIIyBVWLDp1pR4AhBHbMffgtgCkpATkFUQRAI2QV8czsuQnBkZMgakaQnHrr2SsqozdQMyHqb1nCz+Y/CACIpuAwjXBAVRGjGZoBuTRtilEiKJKpYA9ZjjGCo2XdqIgjij03w6CH6hkQRUALUQkIKIZWwZAZYiAFdBQTokLUESUUZacdA2qMhC4V/9FiOSzrumbyts5awkTy6j1EQEhEqF0KUONUNIOLIAaqjthAESla/1rrhdnkVKOZMTMiNV1I02voPZsEgKqR1k2FmqGt4+MAksB6jSeAHp6tiqpIROQUTKMSMRKqAjrWoIUfttpEJQJmykUjgGakBhY7BmhRgTN3BouvFM/TBv/F9q9+p7zypbqstTDItSvnbvuePf7iftNurmAf8oucD6h8qfCHAkXeFa2NKHoi3TuOe4+Mp5ZP1U11MHeubkTnQHOiSmRONlPXtFI7j8Ozd3+2/dWvyGikQUdXb1WP/v0A1CkoUf3RB6MXvwI6CKK48hTno8nlY9io3nnXOd/slnwyz30UHzVocXULn3Azzy79k/897l86/qM/K597bet3f//R//Vfbe5dWLWFfPyxk1VmWf73fo+fewGD2OzB4pOP/O0PMp6ANkCDYMoAEakzQydon901oqDgNMtaBafcMQVVr8aOlIwYgDVXF/3uOxc+RywkBCSQw7e+vK+2fLrQ2x/O3j1nyK/zJp7M7PrzO2fSBvAx0KIOh2FjP3sqhQNQxMwUNHTpPa4QEyNM03pCFKFPGYlRk1gotb2AEC0SmqGBWBIEgcZ0h5lGBYiqXSr32k5jRKQEcUUAZxABEp0YENSsHxoZphsnEVTZ9eAOicLOgQFzFlVgPY5WAgdA5NRMU3WqaWuYdiyoMfrMW9KUUOp3LSNGBDVw5ABMRSNZqlVBIwB0oSOiPpwBVFUIEaNuboy/9rtf52Jwcj4D0yzjrc3Jycnp0eGTsiy11UgAppljkYCOCCnltiy72jMzO9VIa7Oec167DpB6mIgpEznHEi2EJmPWhPoxA4CokZFUDckhoMtYRRxSNEnuauc4z/NquWDHkfrcVrDAxN5zs2oQCFIwJwCwRKAFRK+wMcu3ZvnlYhIKPhrNzwbxNOuO8qMjrvjC3qSeXDdXPVnhdKXexxFs5MPalrPIYy6azo888cjDTLlwWe5G2xubVHjvRxf2tm99/tWbn7N9CDtRr0Kz3d6tju7+6ujuE2lRgyEoaQ3U8BnxSHnbT2a6zImKCTcaYAIKkQunADbWfKsIPgK3bkKnNBm++sbBzD96cPjAnO8GX/Nbi/EVHV+kuITlwEB812hUJeWMpGni9IlbnHNXoxmrWNuoZklRrkBGEbBfZWaYdYRGysHEWuWMIgcOFjrb2tkYbhV3j6u33xuxLsllmvG7T6rieok1yBDBwDllD2AQPehWrP3wLE7msDnDC6dxPM+2aUbhaeCGUbGrImbgBqQQn4zHoQ2hq6dtyMPOxfHlA6lPD0+2tl86srN5YW+88d0TP2l4d9pkD/7qR9mdk9FqpCfz5unp3it/e2EbZ9UCgqi1hB2AqAphZ9CBJZCzEoKZqOmg8DdefP7OnY+iaJZ7jR25XLQDFSBWaXyWW9Qi95S63tBSb+cBIjOIzhERWn+RqKmSc0kmqGpRDfsqXOlZp7weZPXrIwClfrgGQKrO+uK9v1whSaDA+s8EBSQE0xjXeAzVphVQMoJOMa2boxCRRuBI4IAdNdJFQzCIpi7nEKWHNQIQGIGtqqUjJ1Eyx8mQFUSIHIJZTMdPMu9jTEIYJaIY1WWpg0CR8Oy3ZgDc374sEtI6rW0DEQHR+j7tM4x63VXfjpAleWvfwmBMGow0f1FQSuM/MlVa+5LNFDD2yZIYMLoQ5gYMLkOToEJcpt24gYoyEiOBcKcKD/GoHPtsPHx4dLgJW7ubE2pnJs8dM4yCLLlZlGdnG/BubIfHs1dulVXZPJq2xSRrmJ+GMsv8ofAjGZ5DPsNypsXUnM4CzYDOCWbazW3Q5M1cIvnm3r2yekj7vy8njWEYPH9t+qYUTkJHxI6rB6uD3wxe/GZe1yoDDgjMxfWv1p/c293cYLogK7b5jHIPrdhR0x10N77zn4VPHs7/z/9HCv7yv/w/PPrpb6iKO/+7/+3xh/e2X79lKgd//m8/9/qrpx8e1ofvDsrdkjiqEneqnFEuAsHEAzlFAXLSb+gtemY2nIt1EbBTbBCiy0Wb3Gtso9NCqd3M+D/sfLH2jlWigqcMVt1c6uaI/vqTWfH8hAZDdJEC75akRh/cqXbLix7qx9PsUygmgw3N5oNmA6GLjUCBBGAhEgMEFVCMRui7rmIGIscMIq2mujM1DUxRBJK4Aixq4ksoEhJQmkg59tqEqLKeVasjAlEAFSTFfm+RTE19GFq/n0o2IkNgSxroNXFSNKpqT+FASyfdkSamjKiCGpDTKIaY+9wMgnR9GAsiM4Napx2RS+IyR5RkiUnYgQQGfS54sgJq0pEZkHNRZHt729ROHh9HlUFZ+MzPZuc/+OEPvvHNb17c35udzzPHBuozLzGmz8zSqkkiAKQNtKg6JO+yupMMiQGIuVNRIEy2ZseqikjJ9VXmOftsUS2Y2BKcTOSZgEvXu6qmaQDQDGIUIk7GRJEubTHSCC/tnEyACDIw1dAxGTkS8TN9Ybb1YpEtSj3y9bHUFR+e5GfT4Zy/Pi6/+np3fMLNvDqaj3ehYa7cRp1vAs1gKDKU8WBzlO8sHtbfevWlzd29s8XckM5i16xarxkEY/bPv/bFv3N5a/rjD+5+tDQlaBVatlrrslzx5Fxmm35rgXM/PtMguEeUo7FxxguYr6ihDYJczyD3L37+cGoPD1ang+dWuPHBw7NbL30hz55rPw2hcwVn0sybauZ9gd6LKDW1HD2l6hGgxS5ltjJCB+gUzJFqNEREF0XNCB2hCiBn5hy1sfSi2tWaDy8/B6Pd5s5dXi5CkTslg45P3HgcC/KzyUBiVCEoi0JB2Wvtt+awNaPNmreP6+GKtxbdQJcQqi6rHdUEjLYCWUQ1RYTzwVZThQzKUZZZWGzvXB1d3/vo/r3s0peuXrl5NwTR3cODavXLH+Un56i79rhqGj95/Vv+ypfx4xMLTjQ66BDFQDDhdUDFlFBd0kylZjHK+fmZAWR5Jl3HnAGZinrvNWoGPsaI5EJU6BpV9bmXqJEgJRj1NI9kT4LezJoGpEkvhGuVkPVmHVsbBnveE4BJ7IgcIAA4VQVTIkLHBrHHwBqwkoFFACHDCAZma9p8usb6PjUqpAMDCDGmRMQuiGOqm5aTLZiADKTr2AjIgaohKAI6UjAxdeiSgizFEmsaCaQDAxBCQOvXaSLiHIv0gg6JMTVQRIwITei8977IQ9VpjEldKRIF5NnYAPFv9LUJhmq9fBrXDTgxaa+8UUbqST22nlemVy+AQkx/A448mK5X2krgHVEXAgIq1OQGYpEaAlaYm6pJx3U2L0nCyzt35vUw7h7hrhqcUlkpnOTNPR0t/Nl9jXXo6rrVXaqpm+Ggi4MpZGajzZhP1Z/qZEEb5zCRWYSZ6czonOzccIn1vKUaicX/6q9H3/7d2BFpWIUlb0zsyiV9dKJ5zhEKoNVv3yyuvbrMXGa1QS7V2WTnpafbL4XDT5rpDF2XUeGCGhXy5juLT47rvPAP7jry+//1P5vP6if/9k9e+cP/VfVoQYf3Kwfhyd2Lt1769Idvbtj8wqtfrk5Pn/74F6PJBOaNo65TIS2JhpHACFE0qiYFkFdPAVorGspbKzrKY1d0ZTE3G1PtEQRkM+CH26/d3b5cQs3BwcCtiuryjY39K/kP7i+K5zeX+XjWjOp8pxL/yrULS78/0En2UXVji7jDe4+q+zh4bSzGnTaEBNqBqYEHU8AOwAMGMlHmQiGoBgnii0GUYKkhNRURJo4ae+64pZsUASD2CyAgiEakQEQkMQKgqGYFp6yHfrajYGAuqYv6VGBT6DUfWZaJRo2xV1mZ9QU3JQkkkgExIWKMPUgOklmI0Mza0KRjB2o+9yKiEhHJokYM6BwgRBFKZkJTQkpbsARkNwNKPBKm5JZMZXPUiN5lyKGThw8eFnn+3e985zfvvTss/PXnrx8+eqRRlQCJMqYklUICUfO516hBJPmSQwgFZ4lLoJoywcEA0GE0ddgnpSjCZGc7NA2nEYJZIvE5IjMwNNBo69jxFB/pmWNU1zcJGGMkpLWmTcHAGEwN1QAwA1I1NRMmgCgtDlb8Mly4kWWnJTzRZraihs9me95/dZ/3nxs057PmePTgpK7dDPyouLB96RSsGxb5yPt/+E/+3uH988fnx7xfbGxsQu6GV0dxEukSypa7d+674dXPfe3GDw5+1B0FrwPoSGqBGpZlsYDx06bK3ASibO+cAwJ6MNLotBjmxXgwk1lNE9u5Rvnu4cdPG7+/CmUmFpr4V7dv/8MvXmtCS2fn0FbOLZ0IRW3qOVfzMD92swNtWhqWwKu0j3MAEaGvAIkNjLVFdYAMogbQaRwD75dD1RokP791czreyeoqnhxx4RQjokVRPgqTCuXK9t50cexyG4yyJ42iFaR6LpvnsaziYOnyUyyfSrE6D3DuuHZaKdbmvIMaIIvOwECwyk7NQzl5zgNcufbB7OnHH73/ha99v4bRx+etFruz396lX78zbja7+VaYneD4pb0vf4f8bpiq55I7ZyhAghgAAiGkZwZBUxqCWkxzTIn65PiYOQMzYk4ZAqQgTSDuLebJh+OQmKlfySBJjESMhtEADYk4ybAQXTqk2rNbe1vOM6t/el7TUAgMNIL3rGoiEVCJnJmpqpp45w0tjcskpRkDYJIggcEa9NgDMgA1RnYsqsGUnMt8YaoiwkyqSfitiBRjmo5hdC7N2DhJVqJ64r7oR0BHKekMDclRtJRRAQRIjmMUZgZCA0VLfAJ1PVQLiICI0VHUWFVVEkyp6nAwWK1WZJETamDNAUx8ASDqk5KfbdOSnAb7JZopfWYtRkxfF9aLKwJIC7wokjFnGcXUAakCgCMDUNNoFgC8dIFXTEy6UmXaeDK4Oj2/P65mqPebcGF4eeW609az+hl1B/XZ7uUNpdWHs+b8qHaglLtFzJe+bIiwy6pBsYQLs4oq2IQ50hxgblYZ1UQdhSawcBxvT978adGdi/cQtVMBNTTKn7sqBw8LzEFrUGqn9fz0E9p7DaQxSn+zg40vf/n8wUcXfR4kjxqAHAN4KPXh8Y4CcHHus+ZhO/3Zv95/7ZXRV148+MWvc2ri6Yl89Jb//j+4oE3g67C1Pf/wl8V3/+DKd//Wx3/6w+LxveLqzeWdw8GsbuuaCTgfCnDhNwAgrpZxXkMGi9Eg140StzJY2Wo1wk3zPIDqwp4c85WfDV8ts0oC1KXmGQ7yQZiEP/4QT2nCZTGvR+fZxtNluXTbn97Brj7+4huyNe4ezAfXr/MXXy0+vNd9vBpc36wkU5+T1skMCxGU1aGhqjLkUQwoIvo0FCakAISGgA5MvS/asAIzQHP9/hIIyFQdAKbCDpQQRoOyXi5NzWVeNDp2Fg0RQA0JSBW4L5+VAAEZ17dzjJC8eUmYj6RgfXpYYtaBSyDjRFBPydupjiSipP8CSxAvS6V5OpKY1kCO0lch6De3zmEES5D1tJZXsJQVrGpJVum9V41gRo4BoGnb8WTy+TfeeO/2e2ezs+euXNWooRNRTaSw9BrKHGmXGNRgIo5cApiIiXMcRZLvKCahFWIUyb0XESA4evJYJXrH1tc/2BstTJHSBjOmqiEJSYk4mmqUJI/TZxv73sQNERQNSBGYGDCopvlUiAFQNGONSmG40eRbvEs8eSLnZ2EVINYDCS9vwv5o5sbFJ5PyI4ITKptQbDeDreH1V2/+9N23yo6G13Y2t7ccZ0rabQfdVtmk43b7Cew8OM/c5PrmlacHd9+DFqw1q4EaqspR7XeXKqciETFYvrsz42EAAmIO3gv7efvcWce7l177zcPqHLaWOKzQ43nMeXxw5/gteOtLV7/z9O4ZR+GtzTairaZ+dhKnhzQ7cuMS9vfg8JgJhIzUjLxCNJ+pmFdnKB0iOiZkgqgxOOLtwkO91IvbkI2jLyZg1Zs/y0bogLELih1xwfneTcZl56ETlNA+rUgdeCvFeaHtxm83MFpq2diEaLcw7WqhhiAYNGhi7EEdaie2cufu6SI2z2+E8TBUM7a4cpe++8FiG1QzP1n96pf+9ruF7q1msyh+87V/5PdegjNtjmf18fTyxisnODB7TBTAhEgdqUE0UEeKZNY/CEm+ZD7zyaQDhKGpc1/63BPAqgnpoDGhrJWNaX5CBmWWp14h3Zc96g4R0NAACB08M7r2wfJmRASOuJ/DAKTjFjvpcRlGagaomc8cZVXTMpIzYACNqo6M0AC434cZOaeqBmhqQOgdg+j+7l4j3dPpSUFORHuRJvXizDXpBogIJFIiNiJGjUTUQSRm71zbBVElJHYudl2fs9aLrQCQYtJJrWF8mC6/Hr6TbtXE20WXOVUlJBGpqirz3kMvXrGevJP6fE2L9P5DUJMCLhUEBCldCtCS9QkRzKVEZQDrF2lIwAhWjvImBANiNAAjdkCkombRMUft2JGBw867lsCRtuCQLm9dPTi7v7rAJ6vF5dXWeblzTGOCk6cqEbHlzeV83vr4nW+P3v9k+vZHjQwGK8kWNfuiOIj5LG4t2ohLpIpsprgkWiG1BA1kUtqALr39Xn7w0QKUzg7ohRsqlWXUNYvNV7548tbPi2a64NK/+Mru61+GYrdZzSH3FhtAbprz0eX95utfn7/5w3KwoYoAgWAllgFmAYNCHNbN8k/+1aZBtPDgf/zXw71rw1tvHP3sYOOr36njPCyI2vvz936CVXXtn/5X5wfN5u7e+Mtf5r39+9Wf2cnJ6Lt/gPuT9q0fZM15HQgAQGDAID5KPprzKtPM+4sajjrkuqFJVoqFf5u/8sbXLtwa05OlzA8OP1Z/VDs99yEfNDQGs4p25m6jwZ3TpuQsjAaBq3Bzc6MAKfIOyp2LV/I3313dvFBUuIo+eckQAUlQOnOM5kFMtSPUjJwgOI0CBsx5lAjWEbnUXyKSQcS1hn6tcuoZcoQKgNWyckDkegeR9bMiAwTXy/f6+jZB7vqMEDBKAipESwpVSLJNcegSxM0gkd0+cxInjQNhbztOox1mbkNrBs6xQwXoIz5FBCE5/G0NnbO0G7KYBA89SAQQmVM6uHYS0mcCgog6l61W9Wi08fqtz//2vffPZucvvfhSmrupKq1de6kvX9Ne+zpXUZmQ+0gp60ds6XQRtRLSQB4VCTDESACqhqhJo6pgEBXM0mtGui7p46K0SeyWfl+9GhMB+g7EXD9ck9h1Ctihc4auQwMxl7tIHAGgA6ZW6wzK7EF3/JOPdm7uTr5Y1B22hPPnyvzWteLmRGaH+YP3Nm9/sM/hZDJ94Zsvl1nB6DsRUY0Msi22SU/C9hS262JrBZOFDp8ezSkQrKybB8rRltaUfp5tkNYATQTuwIty6WsgH8x1jSEXc8lx+9LTbvT0fL70exVuUfDagFVdjvzWr96/VrxSlAWwi4W3M6FHx7Q4onoeGip+/1vLh/fQDoGUiIWQMbgOwTuB2EBARIKcDcAiMinQAIExmgQX+WxrmEXY/PU7ocBljIVB5EwBWZSf0u4Ll25++PC9r7/x3Y8++OTovB5wqTF48K2OZyGvqGh5fBbLcmky77B2WhnVqI1FUSKnIaqPgc6XppcG9Ny8Q78aUldNNnhyuXtKzOXynZ8Ujz4q2t2mmvLFW5Pr32Z19cOjcb733i/e/ej9N7/79X/M0HXQOQQDSzoATAcHyFQM0u4QUkGaikoip9KNN8ahS2kNRo4ckYgGEU5ZvKkvUxOT4WjkGNgziGlK/zEliMkISMCAhKimPf6GADV1yeuGeC2+FHBe+5VnOg7OokU1VCMHgKi9PARpLYeitDPudV09wKcj7FTm9TLPc0YnIsRsGpNDABDLoqyWKwB0jmKU1G1n7EWE2UeV3OehC21UZkaDaCqxAwRCwx50pVFBLTrX73EdEiKqJqa0pUrf9TEsCK5nc6lqxuyYEugnaVSpx14lrGef3AKYwmSod4ukmx9Bk0NCAYlSwFVaixsmAYMSEROooEjHzMkC7ZANTaR2WBA5AGHKgGJeZE0dMAyYSQJADDotXvvL0VtvTGd7o5NqOdRJE2jlxgfMC/CLhXzztefe/bj+b346v3Zh/xEpYFETV74cZXGGOJ+BrzKtDCrDc3IrhKXBUrF1jv3FNz/OP/plU2YaurA8oeU8Gnl0oathM29ffkm6MPmd7/Fwr1tF1zmfNaqdyxyAkkl9Ul/4ypdPqpPVe78cDTYlxmCKKKiBHSgEx3mOmcUOT++NTkzvf3D4kz8q3vjChb/13YP3D/LNsyHywePVi3/wvU8/vnvhuf3BtSv1VOPTj9p3f73/D/7pcv/y5NrF80/eg7P5+Pv/DADO2+PTv/rT3W99ZTn9RLfHhZ/MDt6NOy/Z4qjDkXSzZv/a69/81n6sb3/4QKcPBg/vPkeD6o3//CAWjfASxjHAyk0qnRj63ezcY+0x/voO/a0vuVd26XC++OTnxy9+9XNxEO9CdmUcQhUzTTkalhXUdWYdGKBEpcyZqEYkIHCMqFGCARoQE0m01F72YUXEYBJNIW0twNLACR1YTEoGAFByLgkdOo0WLW00TdP932dspUNJROQcqK7rbFW1Z7GZRJactpDU0wpgSmuGRnKWWD9f7EtTxy5GSRaAtOpCg8EgF5He2a9p39yDu1JJKhr7ix8pXaiqYASdxvSNadc64qZeZZx9/o03fnv79k/+6sc3Xnzp8pUrphEM1RSQikFRr1YA4MgBWXIaMiI513Wdpb2aKhMl2zVETUOBXqSGqKrJ55DMETHG/qWWduQQnxmsTXV90yegOyWqgammTBdRTbjcpNV26W0MApA5RTJAsEiASXRJkbBgzU8/OhrzaEMm4+lw55u7dmXQZPem27s4uVnf2i7eOfqD6y+XzN00tlojgIKi51AWU9s+4wt+tFM35YqG954udGG0ctCAi05rdQXihjstBgZjBInkg+UB8hJWAiwGXBRB4Bz8xb2bd05WZzo+s83ThvGktjm5KbJyWMrPfvKTf/TG36lkFZaVPnoATx4IN447nvD5j/48U6Bh6VZVpzUBBfBe1Zw5BRTxvuyMu7bymQuNmKNhztg2QkLHB76ee/DSVkuisiNhozYCsSPgg+XG7FAub9385UFVTl5WnXajkYZuEaDm8UKHUYdVW8gSZk9WNAWYqVsQLFErBVEjAi/qwnxVX2se3HztpdX4ajc9qNif799snsCglfjBj8vzB7QadVIVt74z2v9qmDXNrBrR1uq4Pbp/11vx85/8+bUbz+9f3OpghRgRDUARLV0DZpo2Ksnn4ygBLjIVBbCu06TPIHIGKgrFwNdtKxozRxrNJaEjwNn83NRCCBrFrM/RTWeeiaEPxlubECG5FiDFHkIiQZkZoFoaz3Zp1pWIs9KpmlDmEhIvofXSIU/zNDBDeJbEtzbrRCi8r6pqVS0zphT37YjWWCxb1Y33GQK1ofE+A0TpYui6PM9FAgGpChokUEASmLBjjWnMlfyKyUaZfOaZ56zrgqa5ccrzNVhHChr0uavppgREsNg7otNfR/ImppEz2NqngYD2WWAr9UnBzhGqQdQk+jCHaXOUZNOJG0SWvEiJ/0NEierliDOf6iyNbYIPmDaOHGK0leeAuuFp1m2V2+NZc7odTkS3TrQM1ACHQeH2L7z5cFUXebXMLl3de/ekPiwoEsQAXQVZHdxGNlx0sIBuHnWuWAHWBg1ADU7d6N6x+/itedFAaEaTcVU3yqROAsQMvZ6vLn/5D4xJY7Y6PUH2xBwjORV0ZFiKSu7z9qzd+VvfPc20+fVPM3LMm0S0hFOAkhRWeuoRFUmdESGbTbYnAu3DH72Zjzbyy5sP33xnu1mtHh+3v3m73PuHH73548996x+8/5c/v/HP/6vi5VfO374bdkrrxC5f9hcuAcBolZ9fvTl+41sPfvj0C//sP18cf1C9s3Xj1o2zf/OvNn05Ut25eLN+MvvFX/2AYYEOBXe2v/D1125dPXrv7JHs5UqV26q0yGVVwnRAwtSWoMWoeOfde3Wb/eG3bpRNu6oOLjhdtLu0QygxKkBQ8GADI1FQjGrZgCyqBec4Z3Zts8i8Q3QIZMpJ95ewEkhkILDOHExjriS4LH3RNCuTmOe5c1xXNfHaNN9Xlv2PNDvVNfQ8Bf2BJJs+AGB/O/Y0Oks1unOEmN4kvYyjL0bXRb5zjhwnf13XdQTkHMWoIpIszKHrNCboHkZTRFBYA9jNAEk1ppJdoxBiIhWyspoAGhCKqqgSYiMBFF597dYnd+48uH9vZ2fb+qPkoIurrnYAWea7KAm5kc5uBOtMKZmW1USjgqFzpkqAElWp12Rwb1sAMIkASRMKAGCgGtUgGSnTn+yaBWZgBsi9ulIjJK61M+06AzQiJWQwJiIG1MxIQQNhjg4RHJkStJxDW1WXvviCe2Eiy9ny3pmfnw+uTvTGC3jpueXmlVX5kV0ZfyzXB1oNxmc5b4XQOSJ0+XHcmsIYN6/86v2PaGdH96/W9x6Pwih0G/OTGZ6TU0p0Jgru6Xh3NRyfdGdjWo2gGkBQJCNC8EElDMbR3/jw9C76S2Cj12ljcmV7+9pzcIbF2fJ4Y/7Dv3zvrfLjN1672c0rtzfRWQ4SgmJmUg7H1qnGRrNciS0GnxXQ1F29Yucwz5ex4yhM1GjMM9+pLVU3qWA2JB0wgKyeste2DhmTgI4yJWyXwp/M2FXZ3am+cHFva1B0288FcgdnT8AB2vZCc6sxC8yzyMssBrRatYa4EFgBizMEwTitZnv7myN//eTd+5lXHWw9ubSjD7oihPb2jzbrpkWnZsUbfxv4xe7OyQe3f3n39kd//3v/Yn50hA384//1v/y3/8//03x59MLwQj1b+ZzRpfOQHgglgmj9cpAQLNEfpfM+F7VWVqDIjsDQgI2k7oIjSkpFcNTGiIAOgckBuigKiIjOFA3TQxt5wKqa0MqJWAdruysiOUYi7EKXEhwcZdE6hUiMacODvXBrrUdlUlMyUBVMegW37p9TmW/rNlFNydhnaIoZ6xpGRSnrCACdqaiBEKHrpRXmHLdt6xwZWCeC6yE1MqFaRq5LrT8hmhFTWhADQJTYxlTtptUXsOMYe6btsyFXmp5hMhWBEdFnFMn+NdfX+mmaDYhmz7bbhik1zda8Z0gtBSJSYnCmJXsSz+jafdz/4SMhQhQpSq8CQYWdVwve51V1Pt7YCV3XhEBWQi0k3Cm9erj/ttx9cjmDg4fsdhS5KoSH1NwPW7uj3Xzjp2+t6q7gXK1WDQi1DTzHYtnOEeYGDcASqQZogFrUFYBB+cE7wdUoJLDoVllz977/3BfyCxexni+9lIIxNlorsPeFA2miuswNBMki5RSETKAFouZ8sfvt35tfGK9++OeT5pgG48J5EommzKqCZC1BFNIQFCa7o69+3WOJTz51O5/b4N9SJfO//veXwZ389//d3mjj8Z//L7vZRj09PPy//HjypS+Ot16+U+ul7/+9498+AoCycK98/x9/8tMf7NCwmXHHL+1+5/LhJ29OYLIil413P3r/vWmoSxzwzgt08YXy+Rdl/9rd6dPJLm7Mxh+tJpuxLmjuqSmhLbXZgEYxcCwGeXns7Ce//Hg0Ht7Y237XskVcCRsWJCGSNyhIJJIH6pA9mZgvfNOsCLIoHRKpCqxbrDV/FJDQLLLjTkLK5zKNAMSEMWrVrEzFses66YIQUySTqGw9kl1V7NmdaWZmaeuE/RgYwBJSug8/S+2dKGQZA1gy0Kd9aJSU75LKfQRQ77OEoosaU+1sCl0nyeOXZJxt1yAgWcrV1GdyzrQU78F8/coFJca03WpMmFG1N0ZEESSnKgoUV/W1688z028/+ODKlStlOUyXNAFmzqGj2KkREqEhRTWQyOCkDxY2UGVikU7TNonIoiEhIUqShkAP84FeQAQpAQIBYu/aJlUhx6kOt/5Eg6m6tNpmiqBKqgBFookBGjMQI0Ukh8qOMgVTbZlzcEAEm5PNruke/Nu/9pNcN0X3VN+T8bUxjfKta3TlxVfmGy88HF/KpM6bnawcGQki5MX4uNuoYFCvyv/2T3/58tf2vjPxZeXlqY5kXC3mUBMogBgGhAB6rvMxb1y8URdqzXSEXQRQBScUVCc712fL4dN699p49L0v/d2q2qJ5Pj/pGFfh+OPLw8tDfHD747euXCo3dyZLN4aS4aQeMIhh19ZA0XcWXPABAFjjXMhl2RAlqjpyorYiLCArqG6J8VyikBtznitFGs2aJ3XQAkjUyLlgxmLggPl4gKXWXt+eN1eu7ZYevCPMrixDsCXDKmADbd3hykGd4XnUWYRK41IHlqnGsIiz0/Nia9TOs49wbzwY7509rulSWO4Wxyfw4Q9HmEdk5rH70tfRLk9m+vbb7997/z0P7qd/9kfo9OrlK5NSv/iVr8+XjwYDvwVF2wZVJdIkkiUAjQLU+3BtfZFplMl4fD4/FxUgUuv9eybmnZMYAXOyqCpIzlQdAgEVWYYIISpicIjdKlx57vKF3e3fvPdRXnokJVBSJHSKGiECKBCIIFKvNCQHMUZTJUVVAwQHZAYKKZoJGdFiB4hJxsBEGMVFioSApFEsJcEgqqNo5tKcB8HpWs5hvYWS2PU0LFDOfCvdGmAFLiWnQL/BRSQATSCOTqM56l91BmA9ZwN7OajCWiEVwbQTS0usnkNrzEwAKQ7ZMYGCqjrSTiM7R2k3vEZp9omF1tcUyaYVQQlAVMgIDIgoxohICRCdBC1ELlqSwaJDjJAckJ0xqBEAhdWKXa4hiIuIWV0vAFy1PAfwBDnFoDUrCFW6AaOL1eh+WT95eZN/s/Lgp4cRdnNu9Avb5f/9T5/M3bgYmSwCNsRdlEUHw6gFuZnGQLAQaH1oNAuqlRWcyZMZzGfio3WNN+usZtT5r//y8t/9L1akg86iI6cIHkE1RjAHBKgamEABFZ0aOYjmnOswnM233/hce+PK9Ad/7u58uGvOBgUQkUQTjVQrEyFlZRHufXr63/1rHBTFzuXFT98sQjvcv6wnPsqi8APtuvLgjlpWf3p7n0fn/+H4+Ox4JIBVV73zMwDIvvoq2MXzN995+V/+1+/8xU9vfvHaIUz9f/iZFFefapTFabb1wvbeC/HaNdjeBeeD0WxeH53EbmPvymjGXX2gk6GKt2ZghqxKqzwgAcxjPRCqaPvkOMygFRRrBy2UjhslVeUMgMg4LVsYDayVgOz6cY4CE4mBagTtEugCMFl+1UQkKiEkjgxAYmuYQ6cRHLkOokRxSmSUZVkTVhQBkNKIFhQEIXnueG2QJ8JOBIlClMSm7bou44yQYtS2DUn7J9olcfSz2BAi0vRdheTtJFDVqIB936kYNZ0cSMfPRM17Lyoa1czSDrvrAhilxt4516kQgqOEwlAx6j9MDQGiSGJF5kWxrKr9y8+xz+fzOaFlWZYXZaJhL+uaHPVrOOqlIAkR8Oy1ETSQ9mO7aMnYkWZSaBhRk8KiTwk3BSKOGr3jJnbc04LYOnHsDZ/1DEBIhhCjgmmalTEhELnUrlDCmyAQJWE5RUzWb7Uw8r7cG83uPZ1sjBaduYb8DFaq9ZFQ2cyncnDvbEjivxi//91vV48fbvNeBHC+e/PHP7l48wvF7qtPGzmGvYsP5g/8k6OP5tu0P13e2xs+d/T4wDVoLWFjqDjhyezRrFt2OrAO8zAaqgh5Z5JFq/fh6uHbn8AJTQ+qQzhohGjalLwFgYUvcqtXxvzh0eM7H793q3ydupWAK4kCBVIii6CoGZqwkpl2CMSQQ9eqmtMOoxp4uHSBzuaRnZrlDjpon9RgqH65sNx5cgICYGRYdCgqwMw6E9cQF8OizE8/mFYlX9m/OFCcjLYPjo6pYa0h6wha0lmEM3FLWE1XQxo5FedDtWr2dnIBf/5gtr09kGV5NHnBQ0mfPHb3flUCAyFsXYSXv+FwMxzOf/KbX4Ywv7TzytMnt1f6WOrsxkvfOzl9fOXqlbK8UVWHly5dfPjoUII4VFMxAIRIlIxAQJQCGPqYsCdPniCaI7feHmGfEm/g2QUVSSQwEwfg82Jez99+79dMPouaey8aofCns9mimlOOhMTAYoJMCgqqDA4BogBigrNTNOuiRFXsNHMce+KqAa41nEhgmhA5aXcSTZ1jTaYj6YjInimsJJ1CcKgOe3lh0lkjEBCICBFlzGCQZWxRowNIib+I2m+UkiEymcHX+u31YG79L/vNcyLRIVgURUQi0pS1kN5HIuCwC9ILO8C6IAjA7IIIgqmaEqioS3xpJpWY+awLktgdAOl7RohAhApKjuN6RmiqxNyF9tq1axf399+//du6WTnO0kxATck5FSWCFOEp0vliqEpg6ggaadffqgvSFFRQO6gkkDafG1w+mx1WeTeszRo/ny340lCO4U///eL3X7zw0w+qp7PIPoOg1ilVfqxKWd3NCVYKrXLdZIE0wEjoQa17jw4NKo5diGLea1QApMNPwuHHbu9G01WEAuoMc+Pgg6pj1RgZUIGYRLpILo/QovLQNIRP/+iPX/ru98v/zT+p7j58+uZfF4d3fTBgx3mOuJ1LE5xXZTegEbLrKDw89IhcbFbz1keJaqKdIxdZHWVDP5R8NJw18tY7Ozmf/uv/6XIZAKA+eXr8zrvXazv+f/3P+8uT6sPfCK/qwYV7xdZob2dyaa/Y26qZRU1XM85GnLv5vSc2HpSus0X45kZ1QsvfnGWIxk3NMWRRnDatBW+FukkGUno+edLmg9EihOU82/UCkIdcY2cEoI7JKQD4wocmqKppFGm8JyCVEAnJ0BloVAETA8VUZ/d8GzIDItRoaRRDzqmaI4oCohFNQ9cxoSBQmrIiEBgqRkBCiKpRlYkMKGqkXo6Q9I/YiRChSEzRgaICfdWpziWaL4hEADPqt6qwhrgamAAS9IYDUkDo71dzFFSiCDOrqWpcc6F7smwnHTmXRmLMHKKY6drmCAqfRRVnmfd5oaq7u7uz2ezs7HxV13sXdidbu2pGnAFAVGNm6q9vVUAjEBERSeFv6fQnR+LfoGmqU1CHikgGrKb9lM/YOYkxY04Ky051XA7rpgH3TKIJEBWSlZ/IASWOQkpEdewQ0dA5dIiuJxkhAaT1aCwm3h00dTX1uv0SSSd6t9Hdhm/VFDf4vZGeiS5Yf3q7vf6Ni364+e7hg+Ojk+/9ve/+1eHPXtorXxvv1E9O7bEuW4mbMHvn8PrG+M7ds/3JS2X3tJ41PKBYQ9ZAqBpy3BwHVyCURAUVxaiq5szs2MWi03viG1/NG/Urdedynm/yIs7mtNIm0+sv7X98eOfw5NHOQ3/j0t6ZNgJixKRdiMEhRnS5I8pcaCIiGghoJKDOJbmpyZMTBlAH1EXsiHKPpefc6+JEVSOhUzSIAVtAJnKAyhehPHhS+bGHusqQZU6fHD66uDuhvN5pR+cnVThtqvN5Dn4I2/NH9Ys7fnB5swndydN5Xc9WjTULzIrJoOQOMWvaZupldnf3+J1RPpLYdJdvZC++4Wk0vXP37vvvzqtHKvH7//CffXC7vvfpB1/73d8tCgrdUjXWq4DEt3/7blGUKbYgJX4oWIzqKEsH1RH3kWfOmVqSDjA5NUxkZgCIIiGjDKkzI2IFAcfVqrm8t/f04eFgY7K1u9upIBgjq+pKokM0NEXIKO9CQw6RSGJEdIl3nE6mA0JFVIgZGYCIOXYYEx5TGShqVFACkhhTeFFajPZZCwamZgSZc7GTVG6nlSkmlqwBaWp8+/GuRgUCNOha0aiOKIIJGFhEAyZS7DsASpyvRAXHtVy5XzX3BzFBf6xvsXWtWjGlSOTM1CKuR8Q90Kp/K6XsYTSNgEjRIpiZEpouqw4IEEgkEAI5J0HYMcQIACGs0tg9GcYYoFNd1stVs+pCwGgK4pAUMYokVKEqO0cACoiqHVFW1y1gIPKqHQCJCFG50hY7tY5WqgPwu7PN88nTAK1Ww0jYzuGlYvDee/XywH7nC+Ons9VPby+YCxaAszAcanUyx6aItRmQLiMEFZMz9XcO+fnlU+MmmBNQGnOoahQbCc5v/8etqzfaFZSUebZgHUQyz6aJewoxUegJcwJRUBH2xcl//IudO+8dHxwOv/nt8stfyv/Fv2jvP2p++xu9c2cwnxuhSkMloyOIhQ3HLYCtKlY0CSBVVMKihAZQldibgYY6NnWJI+JC0DjLBAoA8LM5VZVsE/32fjkptdSz4dj93t/d29vmTOIATuOSm07znEcv+k6mP/qTa5+7LBukzVlGIoPxXt58vlveOSVHlEk3tKBgwMAKBsFpRmpZJqQhVx8kii/UBY8ZRUMHSqBGgDF0nQGQZ5XO5wVBCGFF6AC6Z3b4BIkBjS6NMRUgLeyAABSRknlOJJKC5yyqJQlxSgtWAEVaZxH0o23VuO59EdHFqEQWEycSgB3F+FnSKHESSqOp9EVscjoQETsCVBGBRFNNLt71CP1ZXC/0s28VQXw22u3TNrGn1STsTJr7RBEldtCLSCIQQoQEpxaNJ9MpO05a0+3tHeZsWT04fHTg83xr+8KqDeQoFeJApFEBQUHFNB8Um0UxPZlmzL31Q9dpDGkkTyganREnoQcDATKgKigkDY16zqJEhyQSkz4cwMQMAJgpMeTTFpDJERElfFi/NiJHDEgKREAJXSuKSFYQd2GVF1Gq2ZYf3ZYWC/taXv5MZl7oS5b/cCX5JDtfnN79xYOXPn+rmp/94mc/+fY3/8HFva+t7rc2jnDGcM5tqLa+UJ4cTG9+/nk6l6PF3c9/+Wv3P7l9/OgUyYlJDMobmWZkQSk6XcTgOzav0g63J+3jpjms2RVwTr/96OGt53fOKnw6P7g8vLzKY4jL/d290YgW1dPTamtvNWGiDiWREzKCCApBxGdx0RCnSaVESOHoxGaSOe5Se6LeF6ptbOcuUlepA+LMB1IQh+hMQ56RRiDy/NWrt2D50cPHM97w5tSReiqreZ1vcVzFTR25FqAOXR2auhrHbIAeXch861xdnc/IeeKcWBxAs2hadpsPHm2fP8xw6EJVX7up+1/DoEcP7qymD7/29W/99Cd/ruGEvH75S19/9eatyVZZr6bOMaJIDOVgOBqWUVNPRqZsIKaaca6mmKLuDQCAHFlUcmQa1RQtPTRkaYFKLkrsTICyECV3ZBHYF8eLR1/9xtdOjqehEUeIyMaI4AwjRYwaFSJFIkCIiAToECQFICQKh4U+CREQKHRiCBq1yHzT1EQEaA6JKNP+SIOqUgKpA4qBARRF3olI6NKRKzx3EgEMkc0EIKEjnarheiC25sMqOuqisiOihLZIagng3j0FCXqbtj4O0SEKpdSi/6Q5NoPY+xWAmFQVAFOMBKChGlqfBpHIu0jARhFAknjKmWg07aNUE50HQX3mNUaN5jPfxI4JQXVQ5C++cOPho0dnsxlxpqreuenp9Gw6U1PnODED1BQBssx1XTQNTQzoWMRCEGa/OdlczOvQrpxTRANlAUUgVIzEVnsJ9fO0eTR8Gmnm5ruVuun9zld4bZD/3vPF229VL27758kdHAZl1hrgRIvxcDaN3JhoKCALK9kbubeegISQQy2hRXIeNExnpIIQIhbd0WN8crA5uVaHioBABSk3EyAEBQZDn8XQMrNTW2nMirw6uQe/+eXAj7NYL3/wx6t3f1V+5Tujm6+M//73dPEHq8Oz9sH77YMHOJvr4rQkR54JWMERoUQix8DaBQJbSXKwgXcADlQtBAQn3i9ECgIALTwTu0WTFbsndZgdrtptd2mGFs5DkcPI82gzFlmn8/Dp/aNf/cmNz1278NyVJ4/e9RmpzstYzBscDSLLmWta9iQFUAwcvEBwGTgB0sL76aKBohh97sbmyZMFOTZDyCx2iKDApmrsnFLi45LPcNXUzFkS2KqCofWZXIZJ3YiAxAQAznGCWBFCVDEi5gwkEjtQ7dbUNgJUMF274AzQAUaN3mcxSqLXIkKWcYxCmEDmiOgQwXvfhrC2NmCSO2ifswTPQh9SbolL1kfrnRTJ4qT9twAKfVODgM451diPttJtbP1OmYgAMaoiMpFLsypL0SzrGzxlJRZFEbVXmdVNMypH165fPz588OToCYAbjkYGFjshR6qaZb4TIYIsw6i6qhtiZ/2VC4hrbgElQqcRZw4QjRTVCB30xU7XG59TTgwiWJDAzGBAjtQUHTkkQmTnABLOkqjXsPZFBhFjDy6BZ7d/QvUJhRy75WpFDJUsX8GsCXLncX02dOPArI4LDcJA7uDNj27s3boML19y7z9+e75HVx49uK0XHVUwCpsnj+fVw7nWMD+Lk0b/0I8+ePDpKy9cPXp4UISJGMROnRiwmkesFMiMkZyTNlzYu9wcLfVYlTQXfzYP+aTxzUanI11NaTK2arpa6Buv3PjJW7+ow/T0WC7Pj1tSB33IlQOLDizGrPAxhKSzNzBCp2agIEmFrsB+IIYammLvctze7uoqHk8JRGNnoEiUUa5dRGKxwM0T+cpzr0z0wZ3DY819UbDFQASnT+Y5Y5aNfO0H7USq+fnT+fbV7dxr07R5IRsjXFSFQKMKVXPEtlFYHD94d0c0Mos8no1vwMVLI1/NHn+6ePLgK1//O0TB08n+K68VLrZNNRjQajV3CBoDOWEC0JZcLnGViseEhMTE4XNkpo5Sa5jSpVVVE3pGoiA5IIyqGXPbtoNBDipN6MhhgqsB2Hxxhl1sV40fDDuLYMbkMCoqdc4gJi00KGjThr3N3c3xxt2797PMq5pZCnJBcmxmJkroFExibGKD7NKg1fUXEjqiLkpfCwNCj6fEtmmAiDNO4uQs8yYBCI0AumTvS+AqQEdR1ukOpoacQgxpLZVKkYIaI6CLDtDAqTlKpD4ANOldv333m37Wv2EQgJxG7TkDiAm29yxVLYEEiNIrMvUAlORmMRqRUzIm14XOUvYDUdeFtAmSKAyIBlE1gpWbo6vuWr1aoREQqkqKOE31RoyRHav1XQ2YpfdLfAbU1K5tmr29HRGcns7MYjQlMfA5dC0DkFgkgxN5Ebdu7y6yuXrQw4+6wQR3WN76iyPYHwcOW1n+oKmdJ6fkCOqm9cwhJxcsRCyYPpoXhwtmH0HzSEULAuQBagUFLP1gqPXZ2Vu/3Pre82RsUZCGmc9CqMiMHUvXkKBnp7FTX+TG2Yif/vsfTkAabQCsKDbo9FH97/6H6heXNm+90V27UTx/dePmxRBimC7k4b3qySHdu0eLp1mXRfZQeFNmVCCALIsdSFDKGofUKYOBy8hARDgGAoAcPdZzvHrlnmg9n+sIRpf3i3yrVXFBwuy4fvA0Vqc6P8j09JWvfWnv5kvtyYM86wYqeZ4XZdGsThS4VDMS19RZnSLVo1ARBBoEBoK40Xqu2D097tiJc4OOY4rRQHIRBV1CU6CpgWHTtmugRMKwOwMzcmCRiACYehoLIRh+hnUB51hiJEJFlCCRjJkTGDECgEJy8gKAUwAyBErK6iQKBgAVeXYZJt+tGQSRRNYAxbTWTcAZ6O286/sjwSoI+/ThZ5udlEhqqexP9W5a9xgRRYngqAe+ijA757DrhIiYMk1KUktMmiRkhD4XGyyBHELdIiECsXNBwrAcXr1+49NPP737yZ3JZHzh4sXxeDMaAHIEI0dd1yGYIxSVzGemEEWwf2lYrwJBQEBSEzRAY6AMMP0pGSo7UgN2qOl9CwbsCNEQiZB7S5dDROov4GSYRqIE+eqb/l6bjsnDbWZKBI6AHY1yRqs90B2Ir3TWmD9ZwWWfTRv+5YnCpodKs9Hw41/c+cIrhy/euHytvHH4m/t7V16oVtv1wRKk2MWt+09Op5/Oro+u5TOeasQ4/fpUPri25YoOoNIVO8y0UZc7IAMyZBch9tyVGbePGz73YIgRqmU4Hp9s7Gfzehm2HUw/AM/gRhcu7XmKT+4duJeenzAMWo3Okwtq6eoEBPDMrYghKDEGIda0maDOhJR9LqGybDB45Vbx+isuH43FwuJU2647etzd/RSbeeDOZVmOYGoMM13N6hvD/c1Low/vHc2Pm2LgHTIChCAriiVk4SzYfJlJDRHQCgTppC5KKQpogkIMLiP0q737t0dFmFUzAN9dvolXXh3lBjK99/Cnb7z2u8D1wcEHNB7ffOVm3S68e8Z7SodINYKxG/gNVVQLAMSOYwxJ3mcp/M/6AU+MkZxLSh9VI2IzExEi14TgiJoQGIgALRoQdQpOusl4MhgMYvKYm2XkQIEcq1PSgOzVQDphhu3t8dHjo5Pjk6wsAEjX2l2NQuAQAJmlDZD0B0Ca5NFE6YQnGSQ8AzSm9bsCRXCO1WGIMWcvIvWqSfsDjdbLqA0YHNB6dgw9VwtMzNAhRTUDIwDXT5YJnWOIaYJmYJSWTzGVpp/BrC1dpQAIiVuvLjl6nUtuCugdR8kCqCkxKdXyiWxL1puOXDItkQGBqjnn1CxjjhLZcXpdmUXmTCXevfPp9vZ2PiiaVQsAncTNzXE5KI+Pn/Q0hlQ8ApopORLpUvk1Hm145+eLajQqr127ev/eASKU5aCqGgPDqITkOOuciZnKanNR7D0nD7erCWUHoF+9yh/cX+7fGqPIAfFRW+XXBiqrXGD3Zvn0ZFYouQazJkMEWfBH07FxiG6z1u0CH3aW5yDkSLRVy5pamMf14aP53duDG78Tl1Ok/Ojg7oUr2wMa102D3oOokCJDWK1G26PpR2/nh3eZfYzkHGJoALPSe52frH78Zx2XUo7q6y/5F27g1avjL94qsze0puZkCp982Bzct7NFMTvrUBgzcnkOrNsjqaoIESgCO4dBbZBT6JoIABZX9edvHu/sre79crjpW1d1h/cO/91/X+vKe6aR0oBxZ3jx1ku7r33fNeft9CTCiimqxHxnRzdL/XTaKbRUDtkErcNVptRgbDkLmrMfBSy6bKelUVPDvMv2ymFolwQZA6iARHWRVVVFo8QMXDKWAqiqIEQAwTSsIQPt0RKOEl1d0TlLgUaaInXNIcYY2TlLpSlRTMho6+NSKNWSZADIzpHDEAK5VJ5DqtcRktk4Ha7eRISIhJgWzGnyjCmYQWMaIUGfvWLWY61MwHolIwIZGBhp/yGQ5uYKRiTJTI/AjkTk4sWLT5+epsp1rQthSvkN/YVOaeMTYzQzn2UxJtckFUUeQmdA16+/cHR4eHR0WC+ray/cGG9ui/bwSe+9aRSJnrMgYp8lflpvykBESO8scIBExI6dS4LVnvPVB6llLum7iQk1pcekEDXHzqXkt8T7XH8JAOjjxgEAgdflBfTmCFMidBYUhE0i1S/CmLEplT6HiFWx28j71GmIec6wghbl//N/+x9eff2N3YuXpw+rKxPaLfbDofiSttz+vdkdmnXN0yZcar4+Lib18rGn39z7tclS2ACIMA+tPf/cSw/uPTRkRSQ0cARCOKPlwwqXmHkTi6TVB4/vfOfahGgBfrMoJ6HrQntWbk32diePjw9OZqN7Jl/oYJWJRgcGSkoKkbSpV0lraBIQQVQdkRAwJDRshO2d8ubr5eXnO88A4EiKre3IhbtwkTcmzcN7eVPHxblqQCSOtfi8qE6bzWL05etXHj6ZPXqyaE2HOUPIquVqvMOjDSBfVnPa2MjFBMggQsG0PfHHJ03AjvOhOzsI9cHUec9++drflnKXuqWYLo5OVudPeTQ+fnzvgw/e+dY3vqsyZ+vAmDhb63EdQOp7cu93z84b9gxAZkrEnTSOUGPLro/8Y3YJpKlr718Xghl47zXFI5mRohICZ6hiRhlRxtQEeevtt69efaFeNebQCIidM3RRgVzbCRADqvf+jVuv3bt3UDdhNp0SM2NyxIM5EgBVAVVylMjLIFr4vItinVDG0RSJYnKvmxIiJJg7OUKQKIqEhlEEVZVsjQpRcAhqFi3hajUasQPt04cSN0pUyRJJAzoTA2RHjiiaAhoZYR+gaIAY0VKsyXoF3I/7DMw5F9OlC32lkMp/wl6LmiRRoj1N8zONaIInmJqBdtpzuhDRjJlVIxISUQOCSg6ADBez8/l0lkQyhpixWy3rZVWxz5BYRATUIwEpAIqIGWaZHxUlAGo05kFtkzf5AAEAAElEQVS1qH/72ztnZ+fODaQjAK/GSMzZCGnIKQK+RHXxxXa4eF7CeRZ99nTo8Ird+mLxybxersKXL05+/Ms5+YLirB43mFNnhVWhXWkWmwcysYLyXa013mmvf7m638Q2UI3dANmDmoJXpU3i85//vNz/XKTxqPTNcvrH/48//tZ3//ELr7x8OpsVVITYsGfPJnHRvP3mnnJDELF2kgXGiOZD49hxURbgrV6s3v+5vftm3Jgstvf8pcv+2hujy7v8+78zgN9r5oGPTsJsEU+P6scPNbSUqztr2BRVrAtgWY1VcBv+4mUAuHtjvPrS6/mf//WYQ4Qy37lCV/fdBR6NB+X+RR0x7xW0PR5ysMWT8/d+c+lLr02nC2ZUcBqa5ryNCI5CB76LiNG1EiHDgD42RISNuRqhXlZWFgCDvZ3OZtE57lpjkaBE5ihEU5RWSJ0D10jjfUJLYkx5eX1qR3rO0gq0lxqk0Skk92y6mcxiDJ2pYQ+4sNQsq6a8aNdnLPUXTNoBJ0Le+mY1S85FU7KU3dWb0dPwumdPJ3N/ooiqCkSHRI5MDdccqNT6rtM9+wu65032fXPsIZpEGo1UPWdPjo4IeT26056zldzDBtgzvxIfnQiQkMhBQkrHGPPcS1QwuPbCC1nGDw8e3Pn4o+svvHj5uatBuhA6JDRAcq6T6JKMgxAJnBE5p5L4Nwqmxi4zJHKpoHgWyN1zN5N0hRhEMXNk2Cuc17ou6sMhWdcssBQ7s56XISggkK6v+1QaqUUjyQYQpS6w/DDOMANTwUgclt/LL95myZC0kes3X3p4cqd52v3qP/wy6duOPn14fHB6Y+/K9//wD7/x+jee3L5bT2OR42J1rJubDuTd+eEnJ1PPpUhd+BFCDF1z/94HnvMQOzNABRXgzJO0zfxsNCol1Hnmspzmp0eoq/Fk8uDw6NqukJ/gxjZIfe3i+OCpZo3w6y+c3H04XFXGDM6RdurAxWhM2onLUvKvi6pOAYgFIkft0Iavvl6++LKomqAJCBEq+DZApvnVK/7SvoK0Dx6sbn/g64rffPOD37n52mRcns0qdPzixt7FbHz38cl8LgzgyKrVtCwGqtrJKi+cQgAUJBBrfKm+ILUgnY1PHgwsOon1lVcXFHn2Cbtx5/mkOjRo7r77w2j0rW98hzwHCcgjoFQd+CiUAmM1ErEncERjBI0myfrtOBXRZkYIChC67llMN5qSdGH/4v6iqpqmISICTGxIRSAJoWs4HyloVEmF5/l8lhdlxhyTspdQ0DRi8iP6jKtq+dY7v/HsRaKYQh+aBABAgGiaERtR6m3VzAiDCDKhcUocFQVCAlACQqIYlSD5+YAzNqSo4pCMkQiDCBMxUTRDtMwlUxMCwToOAQGAiRQgI5YokcwRopFDUtM6NOkSJwBMUWhgz2zM/QgK+mYkeXvjWiCKCFE18wyAnXRkkHnvyK2aVQoZ70UrquAIkTy4TjqkpDJdK71VnXMhhFRzqEGhpMlqhaAEyCCdEJP1UhYdjUbRtG1C31OsB1lFWYqI52I8Hk+n56PRpmM9n1dNV483L9SrUNXBuRLJo7GTYuWBM8AcY5llA8As7G/EO96VQz1g83vFYhI+OKheeW3H73V6bEyQo9M9BwFiW4NXUM7qURaCIWNFxbI72Jrc2h3StCv2b3YH91QaQ8dITtGYxkGqH//Fxvf/xbyZHh4do7q/+uH/2xf/fP/ajWpWMxWhafy4rB98Wjw9gbyE2AAVqopCnhmZLJBx07I5HuRUIg2GGlb377gnR+E37zai8erN7MLlwfNXsouX/Q0P/gtjEKk9TrumPrPpI5mfc6ftuCzBZ5c+99sHTwDg8VZY+kfXw4k6ERc2b9zafONGw4FHHDaUcw55LKopFlx/+OHw8R15edtMCDFqg5ppi6YQgSLBMvelaa5b2lXQOWGqkQSEoKQiF/Ir4Hm9O6bjuDKKTrVwQagxUdNgBKQhNo0yk2owDQjRenyGGqCaWG/zcdCDk02jOmaRvv+LEiGpDp8BZQEVCQAlmc8VI5hZRERjTjf7Wv3UD42xD43rCane51EjAYhID680c85BKigVHDtLuUmg6DwQaDRK4yJz2C+DMPWWCsBglMSSEgkpweCeUeEMgLNMY4/WInAKBpj8P0SOPPuomoCVMaYYGHDOxxhTzCIgZI5TGNTzN27kZXnv7p2PP/pAu27v8pWMuYsCSICYec6IASxITJC/XiVFCOZcmjyQ6/E7AOQIkNSM1CJixs7ITC0r8whGBgl3RQkI4Ny6fXeoCkCpIOnlcIl/3VfxKegMEq4LTLVjFYEMeUSDBligoHZ7sL2h7nZYxk65y6LTe++8B5lnlxEyOdc0zdnd2fZ49/2f/pqb7NLF57ZhL9RPvWfm7mSD/82nn96VxgGqth5908x9MRyN8tBJpMYpatrCEfuCzVZEQZU8q8QWkATCww/ev3Dri81kCBBdc+xo0sT5C8+/+NZv78yqw9n57nhzsrlYivfMrm2WGM2cU+mIfW9PVWPypsCjUVjVuqrzS7vF7iUQ5eSTyCiaU+6CEkFmBTIAAPBLJZWT5Z3b3IT6R++89bnn967vb6v4KjTeu5tX96dPpwenx02MYd417dx7tzECnwmYAlmERiTmGe5v6j3Ns+ODvDtBgNV452R7i5qKfekpoiMnDVhTbm68/MpXuthoFKbSqycogsNScyLtiNm8oag6pjJz82gVAAMAgSCEDlpQDVEcYxREIItJl0uGEkXyghyPDh7OB3mZ0g9CB7Fdbm6Uk+39k+kZZT4E8X7wzW98893bd1ChkcBGJgoMYNjnIiCDAnO+rJu5rQDBY5ZGTBKVmNgRAieKlQIQkkMVNSLUtHoRBUfckwZA0CglSSARQkakouSoA1OQNLZy1GeUO0eaTI2oPR4vuTTUkHpyVsbkOevazoDAorlkxiUw9bnvuk5RzcyiEVFKRCBDMTUAUiNGSSM2UQRKLxKHBArOoSIBUQgtITqiqAZIQSKnrhbQTAUBmVSNkMwM2SWABliyaScqIIFLRZX2rA1CcqwaVS1zJADSdV3ogNCjE+wX1DHqoPDeFdWyevLkBJGn0zNVJCq8Y7QMBBx5xAKV1ViHvhiQ5qIeqOxar9lQtgZ1OdhsBhEzmIO80xjc8G/NToslw+WsjW3uHOxH1yg2RBsZ1SZjisFTmHWFZ+cC0nR8YburJJwJej/Z6+ZzJQHVTnVQjMLxrP35/5de/cbh/YecjTyFd3/+Q8/lxu52rGrPPppWv/irXZfVMZSaA7kAjQAxe2XurMmMRVvUgpQ6O+bBOH/jy/dGI6ltl4fl9BG9//PFL3/uBiMpME62y/0XaPtStr1Z7OzC1SucqdUrmbYhhieL1dRVAPB0ty5h1F3ZcyfHS/bFxlazmMIokxAoeMkjW3RoJcMsnBeTjfrBnfLabtRGAfMi19Bi5jCyaRI+Fx0JEEkxFvJGg86NwEZCeUcMmtVQlwEBEESggRjBgKAzVLNIaISYamhFMIWIFskpOLAYU9ingYp2jC6xDOBv/EgyRgWIqs6lIEIAAHZsIqSqjiANTYiT+jBlcZoBcg+QwYwgmgMEAkeuW3t1NCqnVXRKCAUlRxBVzUgVwDw77/OmacAUASjL0hWeqNdpzMtpvQRghrpeEhMgoUsihlT+AhqzpUBxRWByKnL5ytXj46dgyhlCTOg8RFJH/T2XUbauZdOMHaOaGly5ctURfXLnzp1PP2lCc+XadSansQPnRLEoPHTBMRGRigJBNsjMDMFSYmmPc8d+5EAAjC6yZoCUblHG9IvT3U+YZm2UxFVJtSXgCCGicnIFExBA7BMonCkCOXSkahbBDBShjpDBUNRuUTmiUDt3Fpe3mzDIxpmpqBRQrFqXeSyGWbVcKUTibHH3ZOu1C1ks3vrhz5HArPn866/feOnl4yf3h6TvNDWXBYaVYJCUPqmhsw7AUCkFqjsE0booR/XilKSW/x9X//5k2XXdd4Lftfba+5z7yJuPysp6oFAoFEAABEGQoiiKoiialmVZlt3yo3tmuifGHv8wPTF/wPwpE+GZXzwRPdMdPW7HtEMt2zRFqyValiWKoviEQBAoFgqFemRlZd28eR/n7rP2WvPDPlmkphCBQKCybt6bdc7Z6/H9fr5qklLWjKIs7f3549dFN+jDODSYFN1uzhfU+N509mhxhrWu9g7LdO3dMueNUwiBOApCMuRgUKcUg5VsATx/KhLwxifaV17nprVhEksgAyG4EKEQkTkVM1UwRjeuTo4OJet8p23vfHjv+OTJyy9evrS/l/tMfXdwMJ7uHNy7//h82VVV8GjUSYymGottAxOTWZFJutTDnmbHVhG3BzcY61ZEbdsVt+748MbNG7dePNy/tFkvhNogCSEppimIuCiI0ASNRgUhUBGSCEy1LxzZfPupT775wZ33FvPHu7M9Zl0uz5gj0NdIH1hxRjNu793/mJ3GbdtbIXPAGsju4dG6W3KVIWiZjMavfemL6z6HRrIVQwGJO6DWq4pINROTE4BGGmJs+2zuHHi73V462GdguTwnGTjGILvQDQ5yDXeXNvnghWUHAlVrESewm/VW4F7xrRVAXdWZdR9UtDAPo6iq7nD3wIEAllBKYcK260VCO0rdpuNQKysGWdU+UAVSgoi4ajutHm30/LlWLRSDzxeEEMXq0qmoAbE4hVAIqIqbeg3BLnRYdQMUquDCzYpb4AAfgNLVlW/u5p4kAjTMEyt8g5hgbh4Cd10nIbAEVeMUq+MsBDFDTIFQgxAFoCDRLXBoHcFgwi1zayRG6KUvVHYOdlTKNnSyKz6LvKeNaZlSDCWQLvv1NOneeJ26TCwFrpt+h7WfWh5NViTNsvz4oTyZTWO3kJwLzjXtPlrs39h++OysixF5s9BkHFqIcd+WruxND8/f/fG1o2u//Y//29//vX/5v/lv/uk3//g/Lp4tZodX20laBX/ynX8zPn1qEW1pOoDNG5EG7Uo7qAjIC7FTm4qFwC+9cXcyfu/+cb5/Z7lYQuLe/tH09pW3r15LX/t6swSO/8re/1ZvpQNsPKO0Q9nXauW1NzeHt8bp8NbkKoDzH783ux2f/sLV0e8/OLh6W3YmWlYjSZrMyDrhlHgawvrk/vT8STObtoe7fa9W8gjUJDrvMpmTkzKrERuyw503uu2l2fahiHekytJDM2P+WG/uhPOsvVFQUFf/QIRZ6ZU9EFWkRjHLZj2zufVDjBsK4HALIUgMXpiFCVzcihuHin0Ah1DMS7FhsgUnJpZACI76/6tXjivwVaLUVGEqDrg5JIT6agQQR4NRRbQSEYe60h1KWBluSTNvm2Y8mWy33Xg87fsMQCSKBKCCKQf9lF+km1T8HRNfcOLIQiAf5C0Y6PWwoSPn1XIVRYhQjQAVyMUcq/Qp8sCwqPdscSNitmJmWfXajRsxpffe+/H9u3e3XXfj5u1mMtZeo4SuW1MIjUQAlORirk9MsGIpJvqZlmzQZxJRhWoEErqAHYFAg/osXEzPng8VqKYRRpPatrDDwEHF2GqsIzi6k8ELeQFni7YuyjKy9s/zximhZ5h+Me1wxp9yMUA5R05d11vu6qMQUSz3+dlJimZJA1Pf5eX8wUu3f/Xl25f/8A/+jbGYJlUVRE4oKK4ds5BEq2IY0lyKg8bT9OzkoSHPprPl+QL9ljmA08nZ/IM7765zf++nZydPni66pZArmTSzyM3jJ6cPl8vQ6W2ZLqcii3MJjebOcicSHUVYTXsAnEtJk/bNT40+8YqMxqVoVQ9UaR4GCR/F4r2pEUhYS5EQMG7l9tXpxw/mltrTfrv+6enRyezF69dGI9F8ysw3b+x//GCV1Rebs6OdxNgaigeiIeSuALq7l9b3MnxbZger3ZltszK4BaCRR01sYmzXmZs4S3GqHkKYRE1d0TaMuVFuUaggWODkIE6tryR0owDJGr/3vQ+uXT/sNKckQCFbMXunOUWpUkkzywamwMO96MQE5UnbjsfjJ6fHq3XXpJaAYvbOO++8/QufzblrWFg4ZyVmL6VNTVYFU41ooAHZU3tHhUttcF977bV333mnU4UICMKh1MRAJi0GwIvVOXNgAZOX0ucegEQhYitm5AzmUqqWX9kcqAnb7ia1c3YTcK4xn6AwmHyo7quYqNdei9aNeAjDOpwJxRXV4F/FoVbxTDbIroYT+CIwYfi6encS4OQcCEXARAkoaiwEQjEQU6BhZ+wDnICoclZtcDOz06Ctxs/x3wfIwKBQg5mIlDpXF5EQOAgF7t2YmZlhHFNcLTeT6cwMZuQIRCnG8XqVu24rMlKFbnM7asLYaUqjvdEaKxOfHk110usEcUY7rW6szHQlWI1HvVg38m2hfpZoU/rO9QUuS/jWd6424+/JlZPReFK0K2xb9BZB+dH168vzd4KvwYHJkxOgCkTlsvVuqTu89/ibf7r7m3/vv/yn/9f1uvutv/+P/uPXf+/b3/rGb//O/250+cqTdccQ86YnNE0AQp+JqDRxQk7WbePlq4+vX/3Jw4/54PCjZ+d+fP+zn3p9vti8+6PvAfnk8U9feeVK+90/YWyRSE0g1GCGvo/r7XbT5Zdekzc/T2ncdqxdTqMdALu5XT98IrcvnVyfXX/hZeLCbbP1QgzisDdu1j/5waYBn96Vh+9PXv7Vbpy2eT0mQWINoiQIqQcDnBkBQT0TWOKsQ9pyazzuMIWMnWddHnebcR4tLbvkAHXz4GroMjKxs6ub93CDq5oSw1BQClNxKACqQiSjbrsVZtdayFUGLWOQD3q4mJqiSq6Y6nTX3GuaUrWhGtw9oDgzilrbNLX6DGCrrtQqGveKSqWBzg5U3GolqDORajErDiwWi7p8aZpGVauNCsDFkV81RlXzSBWNWRFRz2+uv3baOYYDmODGxbRpG6ZantalthM8iJgbUwBVfxS5eSBo0fqptVCvenB4+GZK77/37sNHD3qzF2/d3pnOuBiYLwbDCIFDEIeBWJir/wuoxLpaN4CYQLXEHxRWldsz2LZqBY6Lm3g4fwlS5+9s7ObgAiawgC2Zm7sQYEYVCFfMJLspNW2LdveK+lvcftiXI9nZWP8d1iY0XqQvCJ6bJG5MIQRVN4ZDV95O95aLtTFc2p9+/Oxbf/IdIH/3uz8WbqmoSEJRkDhlh3IT1LvcdQk1TBrm1rSmecmiy+WJEyz0bRPUt53hWz/4PqDmFtpJM94hI2IOSBzLdqUxjLcHh7x3Nd6/Cw1baLhxI0K2J49ZtwgkVDKUd9p089boU68jtdZnv1A61J8bXdjHi9WLFXAPIXCKqkXefuvmJ1+/+uOf3P/4eGHSPJovzpbnV67uXzvYd+4067Vrk7v3H5vZzjRB3aneQwpWNqhmHsVwadIefeoML6qqc+cci1m0tEWUNgntAO6a83qV0r6K8V7am+3xNCkyxFNDcHYTAEF4mmcnj5YxRXKNmKxWajZ+ejqXUByxbdP1wxt3776XYgIyMbUcVLWQOSEQGORRzrv1/ON5MxrVTZMDWfv5fC6BS9bp3oxbOV6epNS6+eH+wYOHj6jeRYMp3upyKIVYVFOKy+X5D3/0w7oUwfMNipVaqA5Gw+rDczI3MxTV2y/dUtV79+9pS9HB4GAwJmVrjATkRkSkfc8iFHjbbyWEDC82kKrUigM51zJczI0Kxdi6W6+5adgLAomRqZchhAhkgFtx+HPBZX2D7j6kRFzkO2ruqZblNa3JrTgyLLUpBek2XWAmYVMjZmEGQ0T6XgeF5HPNM0AcHG4EY5ZKoq8BFFQH3XBiRZ1tYBhzOZjRMhdHYAYH7fvK2eEQVWtrLW4UYxOCaB/G4+mlg+uny3lp1VvQCKlNcTflNpdJwQG1h/FGfoQlZtwlO2+tb0oXad56wJamPa/s/JKVgwTn1cf52mq9PEzd+d44EOvTjKOxHOvZfjq5cv2Fxx8+420kyURiEHds2Jn5pRtrk/To3tM//g8Hv74vh0e/+y//xfHxXaD/+r//1//wH/6Tl/7mf3188PXuT/54h9uSLgV1o4WUCQVSzb5/efv2L/zZ976/fnr8mUtHv7J3sJl98vHDu+/9+D0TamdXv/rlt67/6Hv50SnaqEXFI3elt3WY7T177Va++UY3mlG39q5jG8uozU/PADQTHH9mmtv+6POvTOK0Ey9ikhiMME6bxVN/dmKUJxGTGy/jxWv56SNi7okji3u72c4hqRTuiKVQD46cYsCzLBmzjUwzZM3tmuIGaazlzQPo8szXHLeBmZfrpXRgE+sdhd0UsKJboA8SimWqYr0QSp9rBEglr1aYg5mb60BXAvvPwsqqXbjynKt7LlDNiA8h1EMjkJZCxlXmq9xLEi0aWAJL6XMQGZDm7ubOHGpyyQWAsqJYmQEJzEkAELiUIemobVsAgS8GzRfo9eHJ6oM4ETU4/OLM5Z87gCv0HQCIJAS7iG0SYcAlxsDSax5ep86HmWsUipbSpJSzqmo93ovZdGf22iffunfnvUcPH3Rdd/v2a4eXDsyMzEMciBnEqHCMgbrFIIcPvkiqxqd64F58pkEUAoQB7F5NjARHAJwuEB/sbmJsTBacGebZC3sEBMzOyQAHq1l2FTBUdAXddEcN/2mnv9YevN8tf2KhGe+VbXHqPZqxKCyKuBqPqZQC4vny7PDFF4BU5QOc+Aff/eGmW6Q0UjBUwb1EKW5wBiNvO0XGsBRQNZtOp0U32/U8paR5wxJrcI1QHxiBxyGYVU0MWrCQGrvkAHGxhc0P2slnP3O+6drA8vLVcPlI4qRZnbFlaNbVIo0kzXYsMEjQZwsBZfCLD+k+F1F6hS8soDUcWi2A5cfvfGe2c/gLb9761Gv9u+9/+ODpsgPde7RZLufXD2d7swj4jav7x08eNdypkou5O6gIwYMJBzPdfenG+t5Hz86e0f7+pLTZR0khkwNxDfc+omffCedLdXNv+5BCY7HdX77ycvrE9fbVV+Vo19Yr3lovOZIhTFod45yzZ5qybXSxVEds5YCCKi0MoVsPKUBgtqLmAgjB3HIBSMiBOE4NkjBv82DMZ+ZGmur2O5vPNXiSaFYYuPfTuyElN7AwCG5eFc5mBlUCXI2DdEWZmSFspKbEw0C3ig4GZkUIpSZmB9Lett22154poBhVgiyTg50qRZJL1S6IEHOvGaACa1JiveDwVEUKc5Dg1bvP3Gtv5iGKllJZ8FxrB2ZzB5woeKVJBzb3C7MeUUVNDRw61CN5mE9XXiQzuSeO2qubsXAVk1Q2Ci7MG/X2JKIQ2Kx+++FB417tkQgh1CVWZVISOEZR1zoPZGaA64SQQG1Kqr2qEzEHcSNiZ6LJZG+7VS01ZzKaUpvGB0cHK16voXFHSiztpTbtp963zWHUHWvDs6vtCgsn70ayGut27OsRrZq46dys+ITpitBB2NzRaw+enR+xrmUmy/liclh658U5H05UcOfohaPlfZ62nrtoEd73hVJUW+Z05fr0M597+N/9i6N1fvb1/++N//qfXrvx2vGDexyar/6Nv7NNTTrfvvjl3z7euz3/g39zsHhGvBsx8xRL96z50lf/gtNPvvlNy+u3PveVz4+nD7rFN7/7J96t02z2+TfePFg8G//Jn+lm6WmKbUYTxcpaePrFX5/ffHV5vkHuabkoHJgTc0JKerYEEO7dkS99Yu7a3Rxtzp3dpBWIEgvD+uVy/61XJpbT8vHs4FbWrnCQNuWzdbs3UcPWOEKWhm1JykFK7NmRy5rHLnHDaYux6qQLzVnX3ECen29m2yhZbZV1G8ZplhdrzSolJUjG1tEzk7v5IGM0JmeG8UUbC6u4m3rc1rkrEXFA1kLCQqEGIkgIzAQaMEu14A1ciajVLcfONQ/NRYLDhISJmZmpqS1dXRLVHYwRS+KqjoYZD4ciRAZXPBMzk5u3bauq5sbhwiaMCrfC4F8iZmZDqdrl581vpck+/yO1u2eCG0vAAMOq7K2qyhweU/J85utmFDgxq2o1+3qNTi1eXPcms/a1TzGFR48+fu8nP+z629ev3Qgc+qKJYqhlbAi1/KUBJFkno3xRGVdpdwCAAVcy3ORDesxFFVEZ20MNwaFmzEQjo5iZyagFFyJCMASYoGrBXahAQ7tg6andCS2rEOHjAmrHX+BwXNYP9kPLjQbqexU2tRwSB4muxc3AlBv1MUIQ3chXLh28MT341w/eP+7WomsSlD4omwSAk0EBieYgLyDmaMVSmhYNwmNAApubJZFVp5JCoCYE8dIRRcCttOTB2c04QGTcLBfLH79/9xc//2uXf/U3bbuR/R1zFVc5uGrYEjjZFkzqvfSaVJVhhiGiYrBiDs0JmMRgDpFAEAe0KDPJ0wf3H4UHT+ePLu3uvfna1Rc33d2PHj05XZ6v7c79ZzevX7160ETaXD3aMVqGIJHYYYkJ1m/JCMTuxjHdvHnlaTmdW6EYgm5pgp/8eHb6wRhJQ6uzW5NrL6edkTEY8255TGffX3/3O+370/i5z7Wf+6LIqCwX7MHZsANri4NtC3LmwN6ZK5dSEEbL1flysU6pBZt5TmG07TccmAwcBFz54W7mgQMMkbkmBnAINeKAAhNzkDqvtWImbSrVrGMOK4QBYY6admsQDlY5knAza1ioohmdilbYzWCPqwnEVYXRxvT09KmqSoxsBmKDMXMAAqCosd2AKQBo9a0zmZdOjUFMF0YrMHHpFQAHKaqDZMIDE5n1TYpZHUNi71Dr1jJYYtRSzLQOn9wc1UpUp8huYC7mTKRuzGyBuBjUE4sBRpZiYqe+bIbxVaVWOYg8hFBtCHZR6lW7RS3Aw5CA5iCAKRBVWqcTQpD6EGIQS+j7Hn0GwBzcSEQqJh4UzMBBUkhW2Dxxm3r19+++H6YSx5FHjAk0qbLKbuQ93mnXe766IavMp2uVGZ6OsZlw19pCuLgRUlh1mp5spi9MH5yuroenE203uU8yknx/Mbv+zEFPnjYH+w/06mJ1bX9z76wHrLQOy8G6AAPfur5cLM9Yd3YP22ePHvyr/8/n/w//7QZ0dSoy3flf/vt/fjA9evMXv/zqW5+d3/jEgz/8/fF77+4AAbE0R/n7d37h6uEnfukLz3J8+PTet7a03C4jh46nt2/evrFVe/fHOZJ4EkO4/kJ+eG+ZKL/9WXnp9vLxQ/EE2oGUkbEiqfPY09KWAMZny6sP+/NPT055cTpZH4z2ObgmtAxjO7h2edoUXs/B2aRdLo+LQJRzaGPa6SAdGuboZBuJ0cLWezJRKOIOaLTRSZdG2qQ1T3jtb9++ZKc9Nmc5u+XAKzLtAiSokFpebZ2K+2D8Nc9khdhqS8cX1lsHAg3plhU8BQy8tiChGv/Jq7Kpdmp1ml5VVxx4ON8Ch1CnL4AXq1tYJg7gYiZxIGMMib9ONY8EVigENjaYBKmBobjgR9bZy/M3IyRDFzn0ifVMrU4bZ+bgF7Punx25dZs62Azd7CJrbEjQdqrkRrZitQ6vc6oLzQb5gJTy6v0zc7hZsSHq2LU4vfbmZ1KK9+59cPeD9/Mmv3zrdmyTE9emkM2dqgIyqBmYqji4vjx5lVbRhU8i0EVTfNHD/8yB+PxTGTFz5KKLsul8WZuU4OKBATHzYsFdQAyXrRt6C2hGiNnipfHOvq6exH4R/CmXc4TDqzOFrnIpOQsHcdaaT2popE1tiynCbuCCty/N3kizsj75u+Ojf8ebZ82IXAXerx/nbs62dqq27gSqIgEBNLa7kcZWRCQZGQFZy2h2ULpNiJFErHC3KSFEUIIxS4D6KKbc6TjtrtfLd374o9/+7b9/vIypRHf0ZkFgCEBPxlzxQYLO2VCkqBMbXVRcg9OEwEMsNSoUnyEpmZu8/NrNbLjz/g8/+nDx4Pj2dHr46VdfOzmd3/n4waYvH9y/m7u9q4czom2npt4FZWUTJyOFBQAcWLfZIvYuNR3l8/UoSaKPH98cb8vs1Tkdja59Kh59Qo7GccRhYmjzdJTy6kE+vnN+58/Dh9/cLN47+tJvxxsvlvmGpUgQ3olF3YJKJFs5EG2thBZOKRW4mC1NM3NryMTJoTFUzLBRJDcXlpqLkKJse61g52FHQgxiLSqOING4lHpKENUoslBdv+4hirubFzMwh9xnDsKMvnLe8dzzbjWLyM1ijKWYWqnhucTMIoPTDkg2aEmqrpAMtRQetW3O25qM6PAQqBowOAqIcs4gXBjeCWGAY9SBcpTYay8sdfJcy9dSioPCgMuukUkXlsdKocIF7dnNh5PVQSTFDGSRtcKlHd1mMyQ60M+aYFxkp/OFBhUgqb7n+tku3PnCVN0LAMyUwTGleiFWJ6KWMhqN9ma78/l82xUQuVtMo5TablNKUfOYUlQKbdwRmZzO17FlJFgyaintJ+xCLgvtEbXnh7I+yGdHMsd08eDUDspignXA1lb9eAtlmGVG6R70H4b2UD7u8rOk+ye8usQ7qlPLD2znxrKE8uRJmr7w/Zde++rDB+yRunWHhrsOgTL44Pqs34zf+Gf/J7Sj4//hfz649/5H/+M//9xv/Jej20f/8eu/u93mZVy89+63Hzx69IVf/sorv/m/X37moyff/nb705+MValf4F43vWe70/ELX/rSv/rGH4xhZs3e/rTZv6J90ThtHWmveXSwe6LdjVc+Q59+fUuNPTkZteOAYECfyVIwJYZkVzx7BoCFx3dO6NNpK3p3ena9m60JwqFPsEZTUTfSkwdToRIsu7kJmbtxwQzdWhE75Q14a0kpMDcgKGlnyaL0MlIbZWslR1/n9+6umiVeDIK+cKGglNfoNzrS6IUd6ijmloThQQsHIbdtFW0QXyhUyM2tyu3gF9zWcuHud3JY4IEHUXtQJkp18HNhzyMK9XAtXlBl0qUgMLkHluEwYXv+9RjibYeDkZjZ62/WsfigcqjhIlZKVScMw/CfHa7P9zqoF/uF+BpDyiYuNBG1s3Q4sw8LoqrJqPvZ2lk617kU8VAmE+rtXOCuBcRqGQARRQm9qrl1BifOfb55+9WY0t2f/vT+vQ9L7l954/U2NQBpr5KEaVjkDjveeu9TGN7hUEcE/MysWBlA1WxFPOS3DL/lgLho0flmcX/5cGPbyFIQzAIjEII5MyXihhBBogVjmRUvQXbyWL9HuWtMg+1BbkpZpHJ3eQJmZuNp69YZO3s0MlWLLZa6PGhHmNDU0y/utEvY00u3NvvXr8phkHFQT9RvNsvzB3dkfaLLR5v5A+o7rmGPxPBQEMejXVDKPR9eOlLV+emzgBQIZrASqKQ2JVNWSk7Uyogin6/WAQ2Tc4wPH63VWuGQBZKjc2vYAlLFsoXU4GxFqnpW1FSZqJIT66Vby0cjRqC6k0eFOKrJlZduNW5XDvYeHj/6+N5PLK///C9O3n77jTdfvnrv8ePjZ9vHp/NLBzuMnjV7YCUz985J3IO5ilnfMUfuqYgdHUw925k2s/MTk/Lw5hdpdmt6sMfjdWdrpsZhlMhGGl+8OvnCjV36/NkH39v+8D8++Pa/OPz8320/80t5kWUKOkXZFNtS6cCRfW0emFZknRt6lgJOBAM0O6cgVop6Gbx3qiGGQKFaVK1WdPWIFeEhGZdEJBTyYiBnZjcvpXLpqJJhKITaULIEApsWATEw4NRRzXBGqEYGIoYbrNSbh+sR1RflUMdiUpGrpTJxDG0Q603ZCOhzfm5jMCYzBLAb1Pq6cxZm8youLtV8IUQgq7RXAhezIML1YOYhNNncrM9VoxKYHTbMkAmBuFI8zSxcpJiS47lSg+GB4AS4UwhehhPXh4PYCWRkqlrhQYEhKXbdhpmdPHAI4AGsw1InDcxhb293nbfWo573s/Ekl3J4sP98TRAkEUnfK3NJKWlhptAXcw9qTh5kFOM0elswg02dp4wZeMa8hwPbXsXyUtsdNZvu0bxHt+sdr7WcdXIW++w9WEAsvpQ0uttPpt7OnrWiKS+foLPQh8Sa76/3rltP+uDO8dEL78nLb8zvnrRIalbLBytP//TfHnzmby9+8rD/0bdlmTefeC395IOT3/sf0me/8sVf+03P/WL+5Mbh7e/+6Nt37tz5r37n/9xcfuWNf/L6/FG/+MG38vvfb0+eJrfx7bf++K/uiuYOzdGtV796+43Nn/5xWM/BY9lt/nhvdsX9UzdvbF66vVzPJ31n45FrMAiIwzhoJmYyExOEfgtAUrLj48ubw4c77RM5XY6vpRCVgzAilBJseRYXj9Kn3lyv5haiuHfcQ7Bcd13pehMF55C2FIOXArPCcN765HwbEds1RshiS5oY7t/Tty410quuyJCKdTEj9iGrc2fuvbm6ZXODlyDkMKDiqGrJR/XxVPNp4S5BULOGAAB9GYRCdQsbIl9A5YgDU6kTncqtgMPNwG5gDkTOQeHErGYQRrUqXJydzMSQWnGaGYYHZNVIDOIvLyYibiaxShycmXlYrVwwrIbszRr9N0ytUWNXiKziGGujWduf6q+oySpuPpDeXbUQqBTVIeHTaq+uZsRUX1dVQaTW1wUxM8OpGIQK3NzkxkuvxTS+88GP7z/6cFvKq6+8enCwr1pgThe5g0zPFc3hec9Lwwe+aHkvDlpgGEvAL1bfxJVJoqqP1ifH26fq20ZiQfCMlFJ1F7sxKBKF6nLiwLCWSc7b3EKWst0PeLNNLrjnfkyOsYzZqU2r0klMZChmAcyErWcJbKnwhN986bWP927fSQcZ47UZMqetFSuKVqaYvfpWZKwefcgPfszLR9164WZsJYUgaXJ6ck48IuDZ2dLNU2wXi3XgQBIYJH0yHmdqpCGLyMwinkZtD4MhqjxZzh+fLPeOrm+XPagPVBwTtq3Qlss2CzOJlTWzspm6cGCQVYdnFarCAHcqSknUTQxs5sU4BPna73/rxZs3bl8/evXWp2699OYHH7z7k/fe+dG7cvvqay+9eJPL8fHp48fz5eGlkEubrDMwhuxZGBUvTghgglCjUYWu3dhpTsYGnqeXbHyzbUTTNjQks2R7VPaaMEWYaRlvNxNNu2HvrV+xv/32yR997cH7XzvcO9/7wt/Sp+5PGStt11E7j0tWIRHk0l29fGTbvePjeymRAWZdJeUyg6HMAHogEIhcHVyKo056neFoid04gOtSzJkCsRPX29BIhYOVYkAVYqDviyNJq6pg8xCKMzHYixP11XJHzCFU3RsLC2LWvq6GiCpixGlA3AEwImfi4kWiGGnN+eoHFC2Y4WrEXGjQaDJAHEKK0GKmIlJ0GOCZ+bgdqfbFjBEIVB1wZkYchk6bBKaQYERUjIislvdel1T1YHZmqswfgCtbB6Dixk6BmIoZBcCLlxrpDYADjZrRcrWuIxY3bLutQAigYSddVdZMcDPszmZHl49Ywr1798yMgwiH9XYL4OHxCQ2mDg7Co3Y8ny9Ui0gNdaJAUQtrNtMsbRjttH3MpTWZMM/YdopNfIrlYVxNsdwpy6vNOY9Olg8yVo6V+4pt04ftVJCZPHGD4N66rxWdtXuPp7bXs/XkndNMDtdl2c3GygdYPPzujesH7frS9v751nmNsloJtbZ48PQP/l/jNYrZ6Nd+a/KFX37wz/+fh/fv2rf+4NmD93/1q79x/+TR3bsPD/bfuvXalXv3v/+d77/z8u3br7715Zf+zq/2f/tX1z89OfnL/7h8ur4dJvu3Pvlkvf3MzTfyH/z+RBrCuM9n59Mb0+OTlz7z2bOXXtwuHo9k7I0KyMJFnaZecYwELZ31yyUAhrWj2StPrjyePlwlvl9O31hf15grOX8bxpbPRldvcZge61aYxhS23hk2cXy0PD1dwwTNRn1RmDkZk5qDLWO6ljGvXTsLpvrMX5yNFyV3x7lvGWumrL6Olotn9m5LroQeyMwGV2YD3GHMwVHcvTJQuSZpBjHtUZPHUK28Nsx366YDbHBVZRFmFCvrrqueNybiQcsMZirEKG7Q4aSvfEoAF6zE2oz6BQWyBokMy1p4eH5EEygweS0cBwdBFSzVJVT1IPGF+JJ/NrPF8zkRD2tUqzeADYHcQ5QvjBjsBisFblqq5Kx4hQg5VAtzcC2gKqhEKYWJ1bSopSgiknMPMMekqtp3169fTzG895MfP310X7fLN9586+DwyMyoMq6rVtydKKBSMsyY4B7cjDh4sWHGRsSAUzAneAGcSWDujkKkRg/Wj07XpwaVkAzizhIFzkCCIZIASU2YI9AwCALZb9Macc9vKI2TnEvJ0t3n2EaOVKyh116//Vfvv08J3iO4q2jSZGJF+47Lla/+xvryp/5qfu6dce7EiEAWvRYlksGqG2i22By+0e9db589Kqcn5l3pctvun8wfUgmIAgsi0cp2Ot3tNuvIEjgps01ClOJTjq0QWfFSrKipkMQQ+4Ud4/61qy+sTpG2bd8X6sEUiotxYMuOTDwx7oEMy6DANcJh2AnyRaxf8V6JyaiSySDFxEw+uPvg/v2T60d7169f/cQbn7969NJfvfMX73/03tXt9Zs3Lq1LXiw2++Mktu5DCAPKdZikDGMWQCzkhNQ3LLx3ff/R+0fiIySW2GtMMk1xx2hWfEp8mMqMm5HZrlGTZ5snaYSrv/M37v1p3Hz363ms0y//HX6k2tK6JV4zkoGLk7BJx9myMo3d1wZBSEJg1ou7yYAwXMEV+gSY1o6UzG3bZ+I6/BQOVYfkDgPMg3hBX5SGzaVp3185Okokj548hdSUX0cxZlKUon7l6tH8bE5EMHIydy9amJlr/NkwZYUbQAUBlSAbQqjRK4OiYVBVEgBzrSj62vJqGcJeAlPR4labXTIrgYNZGY9G225bIRkXkQ9MAXh+dntNVKDqYqohq1TMyQNHh9ftT50XD3+XdCGRhD8HBFakX7VFmV1krhmtVqvnc7nawA5dS/25lmLm+/s7k8nk4wcfc+Aud3nZ17YDQM59CFzLuV41paaO8efzs8E8Z16dJkQcYypFOAREZM8I4JZ5zDximlK7YwfY7vj6AKurtN63OdT2SNZdtI3SgnnptjAOrABHAoqJYq9IJjOfzuZGtinIaDvOC+vQpC0p7Yy33frbVw+/uj5r+83ZGU+YMmeWNo2a8vS8fftLe1/5wkf/4/9iywf6W7/V/dX7zYfvnvy//7vDz3/u8POfbq6+enL85N/87v8DbJu4+3Dxg2/97kcv33rl8MYrt/+P/9vtk/P+7p3D48XrWbu//G7WtNUcRty8+kt++fLt2UfzM+eHj5v9HXAfkJihBUwh9wqOMLNSppOprjar5QoAjek8pJfzC7P505N2czfNX6XrWCeHQbhfbzdwPnhluTzbyLThtM69hHa5LGWeu7jXuTJLTrISc43ZKBCrw7eSex2Z7K7SctvRmcTUvzCNq8erktWXTArKgo6sc3Z1ZHDProAC6ihEhWHmxS3XK1XNJICZdZtZWEsBkESIeLA4BmZnew43JqrqfUlBta+WHhTjIMO9owqpC8uabFijEvxCn4GLze7PHZVemdBDf4efP0oHf3CV8A8Pu1p30l87butvXCyoMSiGn7/GxesPvMj6QgN3HShWSrEaKtNrj+chQgMcX+tBbkCxUu+auj7XUrzXASg7oEWKWbly7Vo7Gr37zg9Ons6/993vf+rtt164fr0oAHYrRMJU4bBuVtzrmWtWvYbVrFiMWJjh7uJBnbz+hRTimDpdPHj24HR9nJqWSUzryErISMDZ2QCQwANTNBNGMCkSbHk+10TRfG86UWyR7M12el/XqzEHUM/l3Xvv2DQUZGmb4iSl6ERFUVKcfvoXdm++cf/JUyQOVpUxRBFhS4ChZs36JPXbrDBIeuHVTtGfzps4LV0HtJJmoOxOcNbCAe1WoSSpCGgkl0InHY+ZG9/SGonHs8nu3mzvYP/+Bx91ZyuL/PH6wafbEqbUt/BMWLmVEelQvREJcTbUMY8ZbKjs6q7DKyUGRKyqQYSGoGVXzQKZ7s+Sqd07zseL+4c7J6+/dOMLX/zb3//+f1ZjOXnnxrS9fz7ru2KpNfKAAtiQNkLDDgYIhcGWrGHdhmh8cOPms7Ox8Do3Ijs9T6K14nscZsRToz0v43yJ11M934tb7tadtp/7ypt3u+Pt9/8nkvPx5Ze7ENpGVQChTJzAbDSfL1puQxi7d0BgSkDvDqJAXC56LjIrzGxw6wt5IRAnckIhGKPACMNkeljkiGjX706mbdM+OjkOQaLVwYKtbWPQyBEFmjWEWO3rIuHk5Gmltw8eVncYeq/R9OwEwAuqJpgFXFet9flSnYjFys8JKyt1B1QJtIYLSrTDyVTrza59LyLFCjNr6UNFEAwq0iGClIjNS6BhOl7Vn3AiJrdKlx8EAhXkU9dfVe8GFDisHudU36djQHMBQExxu91KqEM80GBvxBDFDBjXyaGzMDOt1uvVZh1T2t3ff/z4sam5e9u2fd8fHOzn3C8WZwCYQ1FzIOfCHJiFOPS9xpgY3HVdjEkk9m5BmCNbY/UAxghhJ+yGzcw3u7Y8wmaPViPt2qb1B+d62sQu+MJ4rmHLRs6O1FT6kHvWbeeNMKvt7c0zybqcjXgy5dk0HxtrNi3tpM/le3nyy/HJ/mi6YE+BlTsralPh4/dP/9Dkvb+89Jv/xeiVX1jnAz+8mh69l9/77vrOd+zq1aO3v/zP/i//7MGi3zs6+sbv/ovF6bOHZ+//snzpBz9YXXnp5uUXblx++5KUbf+VXyIOslgul9qP4vw/fWPapaUuD197CXDjLbuBLARzp0HWgACYQzerc8oFAIk0YUJh59bm+tPFe4uD80f56Q2+tuYcJFBYNZCtTRb9OjttorB4Yu4Wz+Z5nY+um6tsjENL27zZNkCxXHtsjhmhC1vjsGRbhw+X4VeuT7Q786XJNpVefV2CUrENk0IU3teHEbieYRU/TiVXTVDNEWQbghqKcCCg13rksMOsaAzJmOsQBeZeoK5D9JtXQWJgeC5axZJsGCJQ3N2GhIahwL5YfdZ8zeGEHQB0VIX6dSdycXr+/PPtwulQG9+fnd+owhGqL2nDEf9zB/BFkNLzA78qzpiKajFzRzFT7c1MtRSzIAyDWRGJ7mZmZqxWmAnVNOyIMQFQ7d0MhBjFDSIBhKJ2dHRlPB6/884Pj4+Pv/sX3+27/qVbNyst270ajg3E5sQUQFyH2z7kE5uDA8g8gFAIHKIZWKBMz9aLx8uP5/kphcaRvG7xvJRi6pF5BHIQmwsQ4MHA1qq1rjvZd0srQcf4tizalvaSM3dfmezcbe29fpPaae8bCy4pwIzNDElcvRnvvfGV5sYrp/NN2fXYs/dOmXhr3kHZIeBcY9U8k9l6BVD5iLvHC2w6253xGBvLMo6aPLWx32qI0rtGD+NmunVrx+5tGY1lm7Y6Cy+8dntydQ/sqr0kuXblZnE6/fDR+6cf6G4Oo5Y3yBvlxLQE+hbb4AiE3swNBhcmMvQXzhirVsxh8Qe/2AZXaiACizBkPrco7Sgx3I7nPj+/d/P6pc9+9jfuPHrwV8fr14+SdCJxqcyNkTOhZuT6kItT40KMm1ZSWWfIQdnk3Vu34wpPzfKErfEyLfES2RQ4ID3UPZ4f4vyTL2h5dor1fMyWTfJT2/3SJz4++fb5d/7V4S/9XZu+1I/HMm7yk9yy1RJQiuSiFNm3EkITOMMd5ExVR1C7XjYvpopKKq53Sr2WCVCwI1QV4mCXZnjoWTnGrt8SExEXgCQ8fXYaKMwmO5tu48TcihXzYgGGIESoASYMUlPAHWReqn9pkD4NUiNUhQVzICIfdE+44KnWW5gGVDyomF8YguAEWKmmherncbOKi3MrtVWA1WlkrcedCDyIoNGbc8X/DMMBApMQuSNwAPkQyUDEVNOjqC6uhvlBHXYTqumZQW6DOrR2wwYI2IUcYDMnMiMBVaPnoH0DYpRnz56hzhWJc86575fLpZnnnC8dXEopPT2dp9SYQbUQcykZHuFqRkADuKpRGxFhAZQYCWjAY5bWRr6cYrlD6ynP9+jcjnV/276Y4w9PetbiJ6EshNcIDCfnlthhHKz3CM6caQ/BeWd/Pg3TeTm/RA9Ka6KmNLZ+2bVt187+fHHlM+tnhwe2AFFIwcR8w5v55p1vyec+OfnSLzz4+rdaKXbrZjfJ+gDXf+OrJ/fvPvjOv5L3Ztd/4XNrpC//w79z94Pj69du3L/znbuPfvjTj//i13/9t07fvfNXP3znrTc/e7ZYL8/ntPWdH/3VG5/5tbund3ev7rI5OIxl3OUN3CpPEZDAoU3p/Dyv+tX22ZyQAJjJzuUr27y9WvbHklZtfy8c39DLbcfrvOZnp7NbNxfzczMacdP1W4kxB44r2ZX22TrYRgMlmDcrXy2pIc4ZRoHWKpq2XR+6PlHszza/9tbR6eO15C5AvIdlZcSaD39x/QQmB2K12BDAcHKHxGJq5lIdrqYgqpOfULUXVEdIqBva553r8/WlWl3OGGqC9YUvH1XlcHH4VVP7cxlznec8DyDC0P7W03L4n1ZpGvWLB88uD+T0iwO8lhIX32N4pTrYru+zehae19R/vVl2s+rJsiEt0U2tmLuZq/YFKNkYNc+01JwiJh80z3UARigXkEtmcUOFa4pIfQOqOp5O3377c++++869u3d/+IPvduv16298iupqaIgQNxCrWVENzLkvDKrzAwZAod7WkuJkNIos6609efbw47MPl7rhMI2UcGEjgVFiYUpqHiEF3JXqRuHYckmG5HkceMSrftVMUyvQZI9G4UT83WZzs41S+jxSB0JiblMw3lhOvXYmh5/9Srjy4rNulVnbUdGGRJm3hIYg4MTo3CLKug8xKHJO3Xg208UcvOAp96FIE7bIna7RAg1iEkMRowIvbFG9b5Vnknk9ubl/5Uuvr9end+69A2RupLMtF2pDM/v0y5inYzq+tH+jj9ZOxM4KgvjW3CNyPWsjoRAlQg7sbsW91Ej3ykmzodELMB9CM5lEgjhNZuOJWW8gOI8bMeQ7H2+W8zufePnmw7B8/wxtkxH2xHIfVWpnZwBpTZ0F4MZJA9hcRgwpoe1WJvshIKF12kHYQ7gUaN/tkl9Kyxfb9aW0Hp3Pp/n+lLsEtTatMbadvPvFtz/4vd/d9WNLs3W3mk9mjDFTQC5cOGeTqZga940XNSeWCBRQANzRuxvczYyLVzTHxWDo4t8MJwaz07AHrfdhCvJs/gxADIJhf8mtNA7uTZsUVd0cRq4M4wAzIqbAXopaQVViAEI0pDYNlHI4c6/KTlwTlYjdLUgoZsyE4kG4lIE75zVqcJilXdzsdX3EZABX7mNltg7q5vrJyJ+f5KA6sauc+lJqLjBbbRRqlgzgMCKuMpb6YzC3MDxbhmDE59/frcY64XkrPBz3QM2fqpqDavKoXEAC1ZAJA7Qvi7xIKYUQVIuIjCdRs3ZdV72bxNQ0SbX0WpiCFSWSGjNMHkqBqpkVTn3wYMjCQsJIoJZGlMfYjGkz4/WEO+lyXE76U7qW6J2zbchNtwAvbIFOhFmllGQmeTkfHxFMCe6JcluanfVYFnvpcKynxYTcc+hkkharZe4bnU2+P949Wh6/cJC3lrNBSgyUG0+ru3c++rf/086Nt/TpKn/07Smt9bOfOjEb/9qXp3/jC9uP3r/3p1+fvven7Us3f/HtL/d719fl0YvB3n7lrUf3P/rWX/znmy+8Nr4cl/3qU9PL3R/+4e1f+9vf+eH7e9PdeHQ1FxXhTrOZc0g5b6wU80KI25LNNHlY9+u6WFnnddOnpGWK8UvdwTvrR49GZ8/o/DIuFV2JTLen3dKeyXgSoxAjb9UiUkcYc7csjSIgsXb7hZ4se4WpkvROW6hphPhIF0v7xbdm3z8+2UmjwwOxpTGIk/jaHfCtELkXJR42p4CTOxOYUCxLiAQ37yuStBQDkwQys9rqhsDE5EbMbGrDBWZGBuFwMbux+soDzYKriIpKvard6QKpg8rrIKpQD3p+Zrq52VAo81CqwquPiapAbHAYGTBkFV/4SYbBzzCbGmxLPjxdfm7MTRen+zDDdqtv2CsRvVjptaj2NZO7mFNgBklgZtYL39Tzr8ZFCWGu8CoWRQjibnwhFK/oLje0bfv2228n4Q8+uPOTn/wkq/7Kl36lbWfr9Xq97rSaEs2SpPF4ouo5a5+zg2LbRkn1dE9pFGOTtX+8fPTw7MHGS0pjWGKRbMXVYZx41HJkBzgUGxkxEydp0Tgl98atNWk57EZILBPmKDJGTNg05SjSpWm820xiznkUmbfC2UpuaNxrnr31pfHrr+tmKUu2CGxDyYo+IAEKJKIlwIZcH5MupjoN8cola8/wRBGSxaIEcEay0eG41xyYUX2Yau4mU9GJmSz33n7h+ts379774VoXcjUhskcbS6NGmv345H2B37f7L9x+aflILRMFBIEvAXWHUG55kN+5ubEBrnVi4iCwVt17YLJKaBoqPrNCAjRdDqlJDLBLVg0xTaPMc/ejD4+vHd4Yoyy7xyHpRpuRU6GhRHPEC2McA0EB6yRxUGay5GNWKjv7UoKUVnWkOrLRYXxhkg/XD6bvfzM8+i6Wd8xOW1+SBx7HiYzj5Vs3f/Wr5e1rj7sPb2HykHdYwtOdkBEaC9oXUdGsMhask1iZ7czOVk+BYDXb3QmoERxcZ6KD9QaINPDsyKqhFzBQqRa44qbEMh6P3NyyhiBOjufkcniPizQiszZEc1dXsxooxKmR3G3rl9fg4kpgroU5wVgNEsw8StSiIdTzaWDggfhCTMLPVRqguklgrVm5Zj6g6RCkttQOOIuo1mjyevjWnEAiYi1qpQABTEkSg7p+CzMh1AQFM6PBYFCfG3VsR5XWYbgIWyUioLqEq4dxkIC5BwleTAniNTySGJUTTbH2373W+TwTiOXibVd9DYuElFLO+ezs7NmzZyKJWdpm1GuB1u0vtjkzOKXxdtu7x4TY9zmhqQkQHBhMgS3AE0pka4qGLUq/zZ1cTqN2u/WOOMuzhovsYdNJXq0WD0eyfvPWdevmdzfs66ILjeOGFI1sJc8nQbe9BUZvO5YpxdQRlrpCOri/99Zj7T6pd1N3Km1reRRivjYbr+68u7pzTFcPbbdZHBwevHGj257mJ+/a4aGOn+1dTmOSxfzO5g/f49ml66++/uY/+A2k/fGt2Zu/9avjhZ1+/KRdjfKf/Kerr97+9kfvTKc4ePMLj+bvtu2EMjIjxqaUrRkkxrzti/dmAYAFoMuSCwBN4DgiJ7XuZrn2/vJRN7a7OLlSdre6adrd03s/pWsHebPlzs17T8nXnW+K9ZEZvvW15pZ85hyWFplKBoJ7AgmKcYiSGl4H3bk02mutn+fpwVRXXlbDxIQ5+IqCkPt2OKi4UoEI7iTCMOZYQObZaKCimkP4Z4egWbkAJw+VMGqsnGk92pgwZBTUO64OVS+KVq8ed8BAUnlIwypniEkb1H4EWHH7a0vdobccvnVldhDq6YthXjaMmYfhNIgIQyEPuhh4/8wzC3reBjv8ot2FailFrRTToqVUa5a5ichAIwENIx8ftF5wdjeqYzY4zJjE3UVC5WwwQftcd0OqmRm/+PkvHBxe/fa3/+wn7/+47+0rX/kb0+lOzn23ynA3K7uzg+l018wrZgskbdsyixUHQmA57/Kd43vH5x8jFtKkGW2DbBzAszSNnIyDqWmfrXjHHblwGFvrMgkWwaMYRp52wvTyeOFn2OHSGO3A0PNOetzkl1NIusSeTnMeeedAkODbY7v6yauv3Mjl48ycZ6IjUW91o7YhdI41wF7YKSJsAYEyqXDKk3B5rNblCSdTRDGGxhJauXRw6cnxo6zb50P4qNBUuC07nzw6/MUX3vnBn8cD7Oy2UpY7UlrfpGAq4znnOJ2ed/qdB9/59Bc/2xwKNm5CSoWrkYwIJq7iECADDGeiWEfQPhhk6mOWgZ+Z4IkCQKLUtpyM2BjCIhgXNWUWGa/6zcnJemcSJ6O9rCq0UG+IthguTbsY+QpRNAtGIXNi5S0stebi1m6a6Wx7aH5J5Mb+Lh6mb//bnfe+cV3vHlG33xzTsdJJ2drWmEOSrvl+/5//5Eu/81vf7XrpPqR4MCk5sT2c7uqGSIP1BVNAjVvWVTrf9A4BInl5Pn8mYmKuDWmVPVxofWGl9G518+QEZ+cLADtM81oBG7WjeoB5nWqBUQHuVdgbRd1yLoOeAiCCqYLqYIGY0evFfUcX97UwmzGHmMS3xsRalGu6tZNdRJI6QBSIansKGkxOg7vX3YgCYFpq/BGYxXR4XgCB3Go5VD977UHdUUrZlhxqxrhDi7pbjGnwYNSSHxdi0frBanXgPyvno0gpWvfrUaRXrWEPgTgMK7CLTzusx1CKTiaTEMJqta55ZVZ/Ru6q1m26pm1rhGpxkyBajFwp9G48Ho/m8+Xu7Ijj6Oysc18TjQDOXc8trDibu7oVowJFcJCxGNgEISCzwxiyVk7UXhovHq2fnWxP7l9qV9PD8eGbL+9MLv/VX/5Jnp/YW79Oytd3D5+sT5E5tmWP1yir63uz87N15xvvx3OihG7aTqzXw3D6aHrzm+HobX3vqLvryYq2C9U4mu4vT3Oa6SvX09FsSWeYsWJz9XOX3//Gtw8/d+v0yb0EZ0+iz5Z3/+z0zp8kGYcbb55r3OrKjud7x8vmlcM7P/0gNfsHX/ovTu4/TTZVgC2zhG2/DQynkLVHJfPDwKFYlouiuuVxszMyxpr1wPeu6qWP8rOH0/Pz1Uk7Gy0XK2/ZRETc+6oX4H6+3EV0NLx1LpE6mNAuW0rYMoUxIFw4BlGwOjcp9c+4uxmn/XYrM16vNzwKDAEXJzfzOBHk5E5ehiaSmAgKZwnJLMOVmcwD3IkKodQziYwvxEdWkRvEYKvIl7ox8gpSNRAHsepZcgvEDq/T1GFr42bwgGDmgFFdGPGwlHt+SVeNVRUXOi7m1X7Be8KF38P9wtL7//+L6pk7CK2AYWF8QenwoWH2QYtTu3bUpUxgLqpMJEGyVwpN7dkJQCDSUuDOxL2VwRx8sYdyKwS4qyrMWERUEZOk1LqXRmKUicHayeSLX/zC/t7BH/7hH9354Cfdtv/KV75yeOlyalabTXbzKG0p7gRJjaQWzqjiZ0Ypfu/0/qPTR8+6eQE8o1iOIl2haWhuXX7p+t4LQqNsaq5Z1/Nlt+g0qy61W9lmXTpEk0BWMrz1XbM14lRCQ22rNiJq+t69wdkXffWjspywJS8JW/S6GU9fePvNEFfbTS6BVsqr0Cjn8zTiSMwwqQFDMHdlF+YUwLENNsXY4qU2HO346Sk1DGGQO/PJ6llpPUCYudO+NTZWbbW9PL722Vt3fvTd5oAmuz71k4R1m8+n0nO3KtyMLbiP9ka7j+bvbfqPpi++0T/uxblXY4fXHLctWU6wLbFQHSGSE5mbwQtQt5yVpgIKof7tcQggEpqysTbjmcJctbAVRyBIwU4ZbTYqoP1dMSPzCWNVjJjrNT+8LsA1td2taA9hQkIWs2hgi7N+nUI/De3xj6bf/9dXF++8OFleb9ezxf18Ktxck0+8PIt78WBc2iRRTu59+/g/fOvT/+TvnL93LEhBOOtIuXm02/rGrQWSIbEFM+asHCq4/PnEiwA3N/MLw28YKGxAxefUttbMoG5OFIiYiLf99qUbLz57dtptc4oBxasnPgBM7GQEKjLoe1sRM1UbxGjmLiEYzApQB/8hVOVwTfpzd2cy1+VyWXvflJLVZAL3UpvyQT48uCGGpVeNNnKKUVQ1MJdB7wEaVjVEzIbi5gyrgnetai5nEe7NxYI5FCa1VA/chNCr1XSj2k9wtQ7XAVyVlZhfLOHc65OA6jqbzRGC1K0wiFKhHm4EMRhMGbGwkYUgN27eXK9X56sVHGalci/rdHE8Hm+3Xe57jhGAlgIicu42nYS29hh932k25tg07WbTu2uSMWrrY2ZmpRA5g2AUDAyCe1SDUESxlGZvyXfmP/oDOTvbHyX/xU/Obrylm3zy/k//+Lt/Jim9fvWVx+vc7km36koy7pBGmxlOPfSh204jOKfOy06JapQ1d+JGabc/3k6OvnX1c4d0460P391bH2cnm+zNIXtffHP6iVunD9+bfuIKH+0++O//b0t6f3ZruveVXzm+//KatdWw+u5f7qy3B7/6m8cfvDee7S+//WeXPv1lPf2oPYr3HtxNt144eOvvWSZaPcXBNHJT+kxoib14qQ938ypFMCYJqpvVQtADsMsv8SiWraZJU9xu48q9/Hhpy3uy+OTm+kk5T4cHWXNAII+9UWtsal5sDc65R4dIomI7R75T4nHW2UwyS196keBkvW1GI7xyMN2uRdbKSaRBWZnBIhMJZyq2MgC0TcTmbsXUYMJcbb7MrBC3HIiJuZgWINDwQLnYAQuAUixGqTxUJxis6grYQfXpU+qQadjrBuYLBBsMRsbOVmUJQ9Mz7FP8otyFWWFiNwIKDBABuE6VvRqSieEV1DXQL2u9OnxLv8jaHu4Q++sYqQFkUVfQQx9LFEIIZr31Q4Fba1yHxOjkzJQk5j5rKRJTirFbb8ysTQ0zdV2nqsRcUSJJZDQeV/acuzcxjcfj1DQ1CjSIFMdyk19/47XRePyNr/+HBx/f/4Nv/MGXvvzlWy/dXjcdwET1I9c8ZhiYEYhZi50vV7r1m4ev3rSyXG9BwdwVUNWD2eHNwxuM1BdLwQBvZLbTiHsi5g0tT8vTEzpdhlXXdufWXbq815g/Whz3s/7aOL42Tmuaj8Om1c1O2Zym/vK2G6dzoX7MujHcfPHN2Y4u8v1JdPO0S7ak0domDSarNNrstdQ5CVlwRI8rLyOodTxOHSfanI+OdvKj3aJjJKsoA2cEiWLIuTO2FrFbd4dXDk/L6dVfurlYP8iHemWyveSPpnY2xWZM5xPkVrqNMqeZjqeb9dPI4SB/sDe78qAfQxCEmMnUzUCFg0ZsJ07V2VVXn05s5NULWukQVh90gdlB9T8kHXAB5XEmKuxCTg2TgHNvbDEOMotEWBPMPTAzLDuCoxD64YwwA0c3NvOtOZISwdmMNEg/3WlovMWffP1w/WB3zGn7aMxzfuuLB7ufln5XTwxLsLorG9nNz/zmgwffXX7/w8PDadb1tEhrS8FOSrtZygu3r6+wXJwtUkiIBEOvW9+W1IReEdwpoHhguMLYnQnEwTB490x9kCAQ3GicIhOttz3DI8ezxQIEIzNyCJhYhhIXHMQcBC/KWowDA4wqlQI5yLQAEOFsJMJc+ZJEDi4oYITCzKylu3Z0tLu3d+fOHYkJZmyo+REXs9kyHILVS1QLfC+mDqCHBQYVsENRIAFq5Cg1I8ysjW0ppdcNyFjCtig7EbGAwRU47uQwBxMosEgoagQzKxUACYd6qUtkIYajmFGd3tW5PcSGnwDqlLpUQoGbM8EsBQF7KcXgH93/aLvNqHxKd/cKF4CZE3Fq2lIsBimmBcbERW08mnTbslguSaKat6PGLWbtg8Q+q5mqGitXKAo5BGSFKj2+GDMVZipJjWLrfPP20X4Mve9/sMCH9++u//Jr+fR4PJn8+t/9nfd+/P3Hd/5qPHtxlafrpxu6AjOa8CrZ1hXUaZui0KiltJW2g7OlkXZMZwvlKadtosf7B0/8yy/Jg9c+fnf/+GyyE06/9Xv66Pr+59/Cbjn76Z9cvTLx/u7R8db+1Z/eaiWPx5geGE7zjnX5vUlar+jx3ozjX/67/vCFx8/W7Rc/t3f9F2RNxz+4068/2rv6dr9UxhiwYMgKIaoBBI7sbG0SPy82fxZIAKSdndDsFMow9PADOdw7lfmse9jmV7aZhbe5VGxFDxUjU5NCDXibkYLwzFXM2Gy33UP3ZAGagbtuxGxBG2dLjdj2z9f99dZeE92yVsw6sWEdjIzV2YNvDBnwEEgcnFJr2tU5jheIA8TDBoHEVBEY7AEMtyhSDz8m9NqD3GprTGKlgCjEtM0ZoDpYJuLeSuCgpszUq9JFzjxdjIaAisWwohaimBcQzIxBFNiLuZEkaZo2Rem6vO3WZkbERjVGydwqh2sIAg+BQwjPGUfPsZpWrFTp8kDgAPGgl6nSSyIuRR3GIVRNrANtbA4PL03G423uiSjGeH5+vt50+/t7bdsuV0sGjdsRgNV6lXMOIXAIANqmEZEQpLi5G4GYAzOp1SFUzSXDctXdvHnzH/zDf/C1f/+1R48efPMPvrn+pfzGp17PSlpAzkzizOQkREZshRpusZMOdq8RCQ0aUgYGBypzcjMFURDDhfKc2ANBMJ7MxpOdG9OXLMHG1kl3+dbu9979dv/ge9O9vbuSH9PjndJNfD3C6nrcrsv2RruNvL3UhPX5CU8PD1/eM/9ohGQ9G9nWOflsbCvhWYPRiifdeLSWCAbDAqiwQtlJW469d2iLX2mwAYRBxRkgMgJL8I6JjcwlNV1cH7x2ha+l4/sfH01WV8rxZRzvYz71xQ4tJ7ZsuV8jWBnpctRJGz3hwz+9/srtDpP54VUFzNjWQG+uTlvynAgNE6OKYp0dwYlB9TJCTcWpFi8QcWB3yPYQs51UMfgcnLUHxMGpCBA2J3kaU5ylfKZiwbxH2RI7IQDqXqsiMzCsNwvwWJVBRkbMk/29TkrY0QTstHG8me/YYsqb2Zd/Gzsv9h9s7cEcz5rSZ0okSSyW5Rkd3HzlpPtgt1sueb5EfPXazqgfnzzt0mxydjovfeEYiliUmJf9znQ8O5o+eHQ3xmCqxWuofGyZc15KCMPNNnhjCpt3pgdhxFvFKJQq/BNm5+VyKUxtEKgbjFMyWLFS0zppeGogAH2fhQKDacC7u9XLARAAalYXAXyxGoNTEnKDhU3OL8xmTEzg3opUkRz9zL1Q/yuwmCmI6yXvBg4M5lakR3Z3NodpHajFYTfGq9Wq3of1xbgSU+rNj4q+qk+koce9aAyIq2iLzIZdb9Vo1ck0wb1g4JoCRkLFCvnPVfg0lDXErKqBuG3bXnW5XAYWM5MQMOybrRjAtFiet03a3dvd5r7f9iCe7kzPz9ZdtzUQPFjRzXbV91SMmUfT8V5XzLo+jqP2KpZQiOpjnOFuvRMxCoLCZcuM0vX2n/703f50NbafPOiWgfNbn31jdvW3Uzt79uDu3Q8fHYR29vQvdq588pyuhJxLX8pZMs3MTBDbltgsU8uCKfkkMGuLY/ax0bpTdKuQO8fop7Or9/j61ebRS3bvppw29uDsnQfr+aiV1TQsWuYotqML69bJqDsla2ZWuPvLrycg83i1d/S9q2+/v73yW18Y8+h2Pl2v5quT9/7o6PXPbxIhlmQouUThUWo2ZZUti5GZpRS5aU/+/Hs7WtZcr2IyDxQdAYoyxc51XJ7Pf3p6fX086sYaV1YkRdUMMgEs93q2CeNoM/RsMRoY1oRutu3OyiRRk5/uipoFKQ5Jak8DIkN30lSnoEAkgNSrqQCoDNwEUTXaNg4LlMx61FOaDEEI6sYg9kHli8HBwR4CV6Wju4FY3QEEkVq9iQRT86J1H8uABAFAF2kiDBeJ5mbmUUJlLEuouFOtsOicM9xZQsVMWq9wa1K7O9tNTeNuUZKZqqoWcytQSKjp9GxmMUpKKVyYj//akgZArPP0Un9FETev1MwQAhGDyaxor9przjlJbNtmPB5LSsycGnN3M9/d3dvb57osnM1mNDwBfEdmBGJhwoUSvM43veZ2kxUrxZxIJLjBzGJKfV/W6+3eweE//Ef/+Ov/7ps//em73/yPf7hc9Z/7/C8SvM9kLGziIAejhBBEDVGSF/EhuUEABocBGUAEcYQhcsD0YroQYNE9GkUgAWOWXW6atsw8HTSYQtvlbwTc35xdle0OTk96/SrrCZ49QJmW5cww1SftS68dTvzM5w6C+7ZgbUm4JIyC6QqTiH6NzHGyHo9g0Km3yj1bAdMOZGfXck9Ts8skKSiMC8OrSoUO2v1Vd6ZdTpQ2m/Vkd29Rnk3k/IhOD/34Kh5cwum4zG/uj9dP+vXp+mCWCi+pYeUUwvTk3W8efvWrS961Ep/MDkoHjIEMyu4b9+TYirnC2AbpAhOLe3EzRxnWmhigDOTkcGmvji2RSMPiRfuEJCVtN12Xdaz55U8c7rftZnEqO2wbtuxUGgfcsoGYa99lICYNQHSEWjtCUELOBZbQWEgptVcutafd2OaztkTb23ywxrzYdpTWbpsgKZYxEbNvjRcpyd5yeXx4mJd5xXF7qUE82XRxkto4vrL74PGjKE22LCww7bocSERiD4eZW86Wk/B4NOr7bQ0LA1BMCa5urZHBNbGtNwQoAszEkVKqKrnaKNatEihoVgYBysR52916+dbp09Pj+VmUWGpyA4gBYVFVE7IaCsYEc6qUVVBxC0Spaddd9xd/+ZcpxL4omK2GsNjP2wWHqRSqg5irKlKtGLltVMlATCShlEIGNytV3D0YDwf5lxeXwOYVb+nEFKrdF25uqEC8Yu4AkzuhdsZDpWZ1j+H1H3didoeE4ObuFpjNCjkHrtB8qyttNw/EZkYhtFG6roM7M5dSCGAmYdE+cwgcgpmv12u4RwrG2Ky7OtwLVbZKdunSpdWqb5txjJPlcgMeC0vdf2tWLswWLJdt4Z7FQlSSvnhsEpqttiINfe7zO711B7PdzDeWbPOFvv+tb7//nY9sfZZS24CV1tfoZHH6Yxx9MZrk0y5RU0JPlQs09mCQ8ZodHXm2VUzTRB4bSe20S5CswRaW7f7ewYO96z+a6Y3mw5t05yW9v58XLbbBWHbBiyc7UgKXMI6F1tvCneAktXcwey9MH4HQzPXGCymvddF9/J3f3X/hqhzu910XY1to5etxqJvNQq1zcVMCqS2fzd+dn74hNkp7AKa3X3XdBhCMUmJb55f4yrunH3R7i7ty8lm5CTdkqGniaNutTYC29IdjnVoURguHJuilom/sLX+0sglbYysOTfFC3o9ox8oqt+XWaJlB3d64YlzVdDIaN9vmtHs2Go/cXVSYWHMB1E3BYp5RjMng7GUALhLHGl7iQ3IqYcj5YBBGJKUoO4KkXlU4pOl4vV4HCVaKucGKaqlRCsw0Ho1yzv02BwmVPsPupqocAFfVGMVKqbAsM9eibdOOJ+PBxkMgp5TS3sFB1Tz2vVoxIsSYUopqRVhC4L/mJPbBLuXD5c0IwcTrZe9uogXMgQMAYpiFNiar1hVikQAmM1PV4YNXaHPg4UsAI5TK5GF2914LE6FGNVXXx4Uwu264CeiziqQQRNWjRHNsszZp9vd+57f+8H+d/fAHP/j2t//zet1/4Ze/2LTtVkEIZoFYAHYTBIEFJ4KLI1AkBHBgBDjXc5/A5kIwkNEQbeoAuwegISRQa2iYpizJ0thojJmblOWhrWZUPl+WfxK6R0VfDatl3kyll8WibePlS23Uh+6BlHLuYCbcRuQVZoACRmzBFDCL1O0lAtSZ3JgtZEEDzqOxXTnDh6qdRCkKBsNhwTfojeEj7re5uMZDCeHZLh5d1ZNDfvACHk27U1rg9G7n57Dzgl2nSCUaTTYvXtWH83dOf/SNV9/+jXzelNScHcywcldHJu7Ytoac3MxhBHUwjJyIWYisGMz1olobwhoIEFzu+xhSG9pRo9vWCus2H432ru3tHVDiJS8X5+ZGiQpKKMlKMlWzAHa4gULdq2AQ9hJLtGBgcEwlYDzBswc/fGX31t61g/a97iBu0vGj1bf+qr35y93J03SS+lOUkhAV54WahsTKGO3BzLqP253ucJzvP/3wad/tNy8/FOrOtt2ykzYU9M6exm1ez8+WZ6PUlLw1YkIIzEaiJYuk4eNWxW/dErlLijE1Xc4sgZiTc8nZBFYxdnAMIhDqTcuF5QEAsxXg7of3SlFBgFngUGetRKTam1vScGFocAcqJ97UhIOT96aRJTZSLsDrNig6f9b/Vj+R+XAmwlGJ8A4HkxMHZrj3dnEqIoCpmJJ7GAa8xiwFpmbCg0jTUYO+ATcrzhzgsLpOBteCupLxHW7mHAiAVUAdEdUPm/valMCMwbdfeeX4yZPT09OY0s+WbIAEWZyf37xxg5mXi3PmECVkVTObjcdrWM59PYxzb0yIMWrfC0vgUKfT7j6dTF+9/fL9B6cnJ4ummbRtkzMIhEKRI5mxMRshs3rK1GSkNcad7GrqwkRl5Ja3lz/3IlAe/sXpR9+5f/+xLh4sdNkfSCPTmaqWZvPSZz97NtmMz59q/os8fytFNt7E87FGM2IucDAY49EKBMF00i/Pw0y8a2Fd2rWDAAb1HfcLid121r539MkP8879VfN2//5NPb+uJ9P5aly2LfcWbK7NOTdPbPqRXHrSXH0apm3b3JT88Fm3tNXhwc57v/svX3jjejx4u18vwzzAclyPzLz0LdwDjxWcrSNVCTzvzjL5D8bpWmQAl/rWe/bM3mpZthmLfY6vp5s/Pj5+vLda63alZTTm1AbeIdpLFhXc61RsDLBNg8PVbDOOfRjpA1/MGKmsbXsmSaaT9nT9sfC4TdurweY8QqHtbOzs0WU9X2+0a/dbXxG0eKXWIDm2gMALcYiBc+7BzkmsuNqWYAio45o6QaoK/GFrC4QgF1c+1Ip2azBUewBRRIJICBLTbHfWjkYphGfP5uv1OqYkIqNRS8za55SSxFSKBmbVst1u+15zztN2p03NdDr26jTiaqeDiNQruWnr46OWxZZiU5v9IUOwGn9rhTtgFxwVnjk0u1WpHeqExqwmFnqSWNmUqKEplZjHwascGqgWZwMCB4cRh0rMGTIdBjKIXcgvqyzAvZoMwYDFwFV9y8TmZAYCb7caovz6b/z6dLb37T/91nf/8i+2nf3qV77cjnfz1omFuQVYjQPEjFFtfgQI0MBgYHJ2YqrdsgcblLhemSsAGA3QEFpHizIya61rI3bTrnRtyGqbV5vl/f7Zfiwv9Mca+HZZzHnJkm2Zk7/Yn6zL/KPR/k5gbV1HzAXjDpbQN5zHZB3ZEn0wA2G3bu9AXJDNckq5LeelNDd30vjV7v4PLDkpXE3AVCeCGswVHSOR7CY+W+xgPcX5DIupnjbLRAvKx5k3gddJlz2P2VvnFUGwv7t48IP/9dVPvX0pNBufrcYT3WFsgDW8cSR4AjIThFhgUqw3t8BWUZQYvHl/7ZfkPfDI+1Y3UGtwebbz+o0XX5juhQ2fPu0+fu/Zdnu2d7nlpRJBAZunoiWwgLMVAxmzD7QZHuJxh7EmvBRbl9hev2qbDV3dy9eu270fx7iz/N5/CnYtjm9mOxNM3FQUABsKHCVbO5o2XdNnTTvcQkIZUFBGEAlMTjBwyFn7Pl+5eqXbnC6XS7ASVPvOSFlkvVnHWNffwy6mmMG8cxXtY5AeiqwjGRkhOisGIrPX4lMLtECqrA01xLuYFyvMoXoFixoxM2rgDzNY4cFJqjXQQeQKN/Y46J/kos0dIiIGYpfVIxuDoa8usYjqCJpAJFWI4pGDE4oamUUJmdXM2Ab0s3oJHMzhKCEE02EWwkNXWo/1wRZZ52Z+YXr0QbrlTm71iXKha6vRxWZOHJjIijFzKfrw4cPqV2BmtwrAZyKyYjHG4ycnqllYYpTtNrMwsyzPziVKG9N4Mum6DqYpivYqIqZG4BBYYnJnZl5vutoqLZeL6fRS04zPznVEqWRNraCHdR4KSNFJs/bUoV1jssxhf0ds38xKqzf+4H/5s/e/+QEtUthgfzbdNKvRaHy+XE6m6RNf/OUn/vjx2cdyc9qun12V03uLQzTSQorm4PACJ+IAZxqPuzE2LfKh5GK60M54htLDgk9amjR+yXtZ7WzuXo7Ldm/3Ubm61PTxZm+HTsa2SLwJqdlies6pb/cZu5dYxmi3dj5PpJdm4/XiwZ/93tFrN3df/PLypx/30nhWltCzRQP1CJYAL1BGMmYTymdnW6wj5FQA4A//4o9fu3Xz6pVXY2zRo49dbuXy5Ma7yzubw/GH+uzl9mgbO6TQedc2SSmPdmI6nAqpoE/oWnYu8xHyhx/Np9aOF4/G4gxqOIzRmm7B/SXIAXtvMcQ8D3EzETdESlxT/woEjbqiA4PLWmJsmYL2K3NlJytwK4BTscpetUBwDzwgBSrOryYaBQ69ZhE5OjxMTZu7ztwkJTgkhBRjNbIHiTW07NLhpQM/qLRXljCcUwOpI5kVkTSdTlULEepUmWjI5b3gvA7tbPX0Eep3ICbWUi7ifmmw+12UzIPXF4N4q8AuGFhD0oOTVUuTBLLBMH+xHkbwqsSkGqQKqtTKisoEX+RT/IwKwBfZUHSx5Q5D7esDTbP2CxwBMoNIBAjgUkKBfelXvzSbXPrjP/qjH7/37nKT/+bf/M39S0fbDoykoMCJhbwFxBHJg7u4p2oxcSdQqAGjgzuqohEHYyfI2dG4t6AW1IDHMMHexKa2eGkU8/mTV2n5ed4c2dO/xaukejXkvU13ulA5pfH1y3yWuvMz0QmnhtQAiWMdXzpLlheuLXTtmW2srgkdwOQoPMRgLIAcxnl5n9rDvSly22u3CMx9IAtkoUVgWE59n0eRm6C8HZflNNmorPawtrnRArKJ/bK3Z4Y1LDlnQkM04iyamvb0wx/Y/KeX9w6fdosxzRbtlMaEBp7cgiOAEJ2q1Ig5RLLBnsMsTlUUXX5OUe8y2zsfRfTb9TTqm5997caNa8bj40X3/vHx/HRdmHcOYNUAW9zcXBGWY+t7V2dOtb0MRL0Nl497iakJLfWcOfi4Tef98uTRg0+8+Ob4c19eHX/rynw1Lbz5xv88/qV/lMY37GRbAmthDlxypparxe2l67fv9HdbEd4aDImJiCBQG67Q+p5CbB89PmliD5hpBvqD/b2cu+VmzZWAMWTThmIGYmXnAivWW2lDmEyny3VGDNs+B5ZgcK1pBW5wCCcJpVgpZqbmwzHs5k4wNw5UdUUpyGCfACptkuqeykHFInOBiTOIihkxRSYzkyb1RdkqePZnZVEdmw8qLPPAXIpxYHKU0geiAEBIJJiqDebD4e60urUxN1NhqXvrOoTnQbcM4gGs4ReEoYvwQMAGj6PVTd3FteLPUX4EYjJCEDlfrgBvmqTFnlseOQTtFSAtPQepE7wqTDErHOXg8JCYHz9+FGMi5q7vRQRmItKkce4158whuedHjx6dr3LgNqVR161FpG3HEeJkDGFnKlT55/2ozTze0mhd2k062KGPMXM2zvPV7MpL0xuPxz574dK1oqLam+sLQWKSj9f3TnkxubmztZ704O1PHtyKz771n9LSual5zESITitxALDxuEu27XPX01po3XJexURsjCIjwkRkMtnFYuZ5r5xcjeujCJ4yeDalIIsDODep7EbK5WwjK8vtQmwJpmWXWsMPv3Pz0kH3xb/x7L1j3yttB17Hnq1RswzL5iWoU+DYuPUB6Lf6wf0ry8X68rVu0wHIdv/u/e3JfHHj5o29q3vKeevmDxazG5Nz2T4Zz6/RTMm5pdCwJ1uvl9PdIK1NtptYujF6IU1eDri/PtMH6yeXJsacYxAzzavV1XZ8tn60u067l0Yhjp85b7OWuKdTMTNkWGumgGsf+tFOuxf3Unc5NdnKZv7s8Xk3BwusBAmRpdua99a0MTURhBBCirEUY6odKZVSSjHAZ7Pd6XRCoOl4XKtwmNWzsHJ3fDADgzlU+XJl8VZoFMzNaiAmuVsp4BoPyhSYTY2HrsF8CCessMBabqJOsotbCOHiLLyAYlQyBy7q2QvHVCWq4+dVzgM0uAbyEqpD2odpNjMFCsWViZ1rc0s8pCnh58XV9jwyBWwD8YCfI0Dqkc+g4sPNzhw51A8OtxCESgnrlb319qf2di9/7Wtfu3fvwb//t9/4ta/+xgvXb+UMBPboSJRZY8PUEAlRIk9w8gvAj4EBqRMAAkCF3AF1YvLsFY9DLWEMnmCU+uko7yVt+0WU9ffzkx0+u6ynO7SZbumd8866JCXynMIL11s/6J4uuvNj1VX39CPVjMO92duvTq4fJNFz7wEzbOHmvDCwO7KhBXZTUkfhx+sXmrljxHklR3mJrtOsZdltzDKHVnXZe96A+yZMm5KoG5XlxBdTO+d1zE9186RLy9R27fLJKoyDr4DWaUwiATvWhu69//y1t37n9Wk3XWO5mE6xBFogoQYwYstuNRIKTEws8AJUhbwZqKrjf2ZVv0yPyvz0rWuHX/7F2zszm28++uHd1Y8fnJ5jyntHqLmrIGNWMgpuGQTyhXjfEDmgXgzBJRSzTBTcYbmUjBJNIFvN4yvXru/FzbO57o3iV//x2e/+3yfO7Ln7d/92+rm/n3ZurE/XSZCDEadQVCMv16v58d30hsw1C2OnbaxzJtdSI9xFSXnALXEK3G/7IJQkbbu8Wndu6qYcGI5AdQHpgBHBak6De3Im8Lrk3pX7crC3dzZfGNipygiNwWS81Z6ZWdgG1NVwR7G6BDZzDBAUIAQrhcF0wd4JQcCsVlg41IKamAIPyWpWF1JVxcRDCQuyarO3IdCXuUowKhEHIQwzYnWo9dBCTMakIJgFgNUribp3U5i4MwIu4Hx43vwOJsbnhqfh+xsZDVtw8xrd5mBHcSNmJ2NAUiJHMa2hFs+fSubGxDlnkaBW0ykUgRfni7ZtS9/XJ8nyfJH7XkhK1vF0rNrnnKc7O9r3q/WKWYLEWlN0OdfQMNXM3JhpSgw1Ng7ObIIC27p3vp1JV9IKzYZ35uWspZ3d8dx6ShKe+pPP/vZXHnz/3g/e+2HbNs14ZzKedOvN/PFTvoLp1fGaF1jKDk8Wm/PD/dUXPr394/8UlVuJAWzGXjlMHLmd9hNsRqYtWWOZ85J4hkD9DDZpUsjT7Xzmp1M/3QunUzsVnY98G85J11k3IW1Zk5qAxmUnuctJsRPGSORIu7x3/eXXfuGN9x7/9LH1zY3L8lDX0z5po0mphWemDFIhL54ppOa8X39g6zbFo3Xma58AMO8WyvLo9HRZurgIs8NJv03t8YMXJvGd6/np/nq+nB/FQx6jj15aQ9+nsu4Wxw3yRFx4E00b2LicHEhZt6td6LRfdFtLzCYto0tsn32xObg2+3iJEeHg0vSMpk/7MR822yfbPMrjfhyWQZu+zeMZ72DupitqJCba05kEZeoJhdhVt6odw1KSQcQjoYoq3Asz9zX0N7CZVQOWWkW0wxzVd8AD8LxerCjFqhJwWEle0HcGfpzUHXCREGBQ1TDEVKPKi+pStZQhPpAIdcgHUGDxC3/wIEb0gRTLxAM55CL/tTa7uKAAAT5UpwYrRj9DMdQngbkT2JhZiwXi2gDUXGGqYYsXlK4KeK2VPl04kOF0IQczghjTBfSazCvQzjlEgGBCZhxkvcH1my/9w3/833zta197+PD4G7//R1/6Vbz2xutr69FQCcYpuBglQzMwXwmDZckjDd+enYzICQ4qQCECcwJaoCW0hJHRiInPDkdy1HqD5Zg2b6Zyq1srd5+wvdP5073N+ION3es23M1s3YZTzNL18yePusWZqSZmWYS0iGVszXSpCT3cPLP3ZlwAmPVMU/D1OIqkfXdvufva+/1YSrd/NJPLqVMgNOvcb9V2p7P1evnd//y1cWrPDdwtkp5NudujNZ0Xmxs/ozAXX1A/V9fGljlNBA3KyllAYyBId3xn3J/OaLrGMkbtR4IWaBkJSO7JQk7g1tF7bck4VPD/RYgghjKpMjT25z/+lc+8/MVPHyae/+T+vXfvPl2scSNeLrEswM9GUxm3AExdTLRkTGkIv1uMrSgTi4hpD2IhBkIBzBTGEtlRtr3GOP7wJz8IO9KnsR0chr/198/v/e5enGVeL77+e3uf++3xrZvds1XsyRmUQpM1yOQsSUAzStCNZmwZBivC4gy1uiDhwFSsS4HHk/HZfLmzOwa61WYp5EG4qAbhUpRZzKz0SkBwCLFqcYAYk5ioUEjtqG1PMScWXCw+rR6lleZZTEIwL1VOoaWgFbeC59bDYkQcBpsxg+BsLAww5Z5RLBDCQK5wM4VJFO37Gg3p7sMEuEZEwAdMkDuBTA1MA5XZgzEbDHArjhCZ2YoGMDP3RY2HzLVGUumVf5ayBjNn0MATqrPmiykaoabb/f/I+tMmSZIjSxB8zCIqqqZ2uPkRHh5HRkRGJhKJRCIBJLJRqCoABVSjj6qu6e2eHdrZOZZoP+yH/VdL1EQ9O7RTVDu03V1TjUIdOBr3kcgbeURGRsbh4eFubm6HmqqoCPN8EFX3wKx/yHT3MDdVMxMRZn78+D0kO8Ok9awEJUGCsRkiMT2ZxuCcg0/aJBq6N4FYiRSc6FrMZG0IQUTyvLh+/fpysVguV433ObGIWjYiEtqQuCghBO+9tXk3s2SsCjNB+2JlNBqGYEPwDgUTJ+CLhVlYg4pX7/KayxVWY909Q124isfLydZuPgo/euOvR3FkLxeBJGBxWp8ww15x2Ip+uJqYS6ujdSO1Wxer43Zk9SufWf7ily4bWmIIWrYCJ7DMwVsLi7rgamCnI7E1ENQP8kEYViOcjuhsSqf7OtvHeipHZQ1ZBF7BVWRrD+8tQ0wIlsyQwhC7l7x3YRnLUFdbhbfr6fGvf3P5S9+alVwPLTbSos1t3rY+y1gtFJlI9DZjttVps65CKEod5jg5ArC/e1CH4MpByMmNB2YyjKxw4fb+C3eW79V7/t5Otd9igc3O9tiWaI7OJmOUU+tCGNmYWZdHZDq7MRm65YPF8aMpfles3pqk+UWe0OSgOZPdl197bv+KKfN9U55EMzFuHIuVmcpEZFfsymIOKjnOYphHhRIspCW2RTGCVMwCIGqwzrpiJBIYIQFOQfs9B44gtik6JhYCYhCbEBpNZZ90iC0YgDUcY+w6IB1/iXqSfnIa0CQ/yZTCPNKwPlHfM1NiItWE4qp2mHGaCJAood9GqbKl5DGcpvWJ0JuKobt6pwt0AVNz2mSAqiSguIOxe31M0S7sp+8ShytZICe9D3TaHmkykqlr/HYTFEwEten+mEmEOfE4yABGBGwsIluTg4yQ2TSyvXvp3/yb/+573/3HT+48/Icf/GCF+stf/WKNKI7YAo40h+YCBziQJbKEZDxIgIFCNWgXgEHqlQBphXMmh9QGLrkZSTMtZLvwRVVt0ebVuHgiiy+vinfXm/fnKGv55sr4hZlNnx20O81xhKWBL+OsQChCWGjThFmQvJWog/ECmQA6oFU3voEYoisN70ReCjHz0K/360OL4GxgaSrJ1IyGhTOuUFm9/PL1SXXrN7/+TT7aneLMixgNrBHJS09A4FAH8hLIWIJEIYEKNIKiOFscPjg8vH83v/68NiAitqxGNWHyIEIS+k2jryYNdqb2vPZcIkUvukaw//d/dvPFayO/fvjr37798eHZqJhMyks1Sy35lmspbvJiF3kRWrAIomUfGEaiwCpHy+wMiXWZjzXYImlDCSdNdhHN1Yn3V2+9VFC98Uf1cp5fu2n+9N+s/uf/dZTnKGn+3b/KX/n64KuvBhKmEBhckJj12lYutm3YbY2hBkHFQzhE9sSRodAQCuMqCFRiCCLBEGxmOSR3AlHDdWhKm0OEwGQYMaho1HarGG02dVvF/WvPHJ8dL5erT+eLzDnRwMQ2kQ+7sRkDYs5IRMfTLQItF6s8dxGC2A31GEFABBtDaKNnY1OTKwQAgawJUERYBUGE05bhoIhsVMBEImoMhyiiQiKOk4ECVAnWsGEVIUIXqCVyn1YLooR2Z2trsa5CjCaN54oQqAk+taUhYGYhIcsSIoEjhEUFwViLzr4t8Y/BygptVVnJORtCAHX0j3S4MJGo1t4DqTfcCVInuZJumDn1vJL8p6pKPHp8BKgrckBz51rvg7RgBAkhRmuthFAUhcsHrZfNpmFGNsh8Cw2xyIfj0VYIHIJYayEKD2TENbDRuIpUkBY42d5iaZmDlSi8a4ChU1vi9ne+8On8sKpqNxZrRoY0C5mCBeIn0WXuX/7hf/vXf/k/LR/OVxuzfQne6+Vn4rXHZw9ObJYJrBOGzTlaAbsQWNiYrMzMwDdGKYewm+bOiWvaUhYuLBhLlz2x9UbOPJYsS+HGoGKFMwMrGoy00oBiDKayEyqK2WhwyXk7/+nfXb31+XYyOm4X1jkuXLNovfeRoipBhINIUGYqc18OF6+88szhw9XsZEnUAlgsFoN8NNm7WuRYns3z3YPC2XCpuP7Sqwf3Z/fC4Xw829l/5bK75C7ZYRZXd+qRc8/d2Bv6GpkfqVosXXBXR5vffu8Xo9U95z4uwMWoqKs1ySGdHW1lxY//052tnYPJzksb75kixDMHVt8qCxQsIAZESGHAlhAN1AAmiGdI0qhPp4+ECAmSPLhSZNMuxnYqNAQldPTODiFSJZMy1C5ypswVTJyoT4mZpNzlrNQP2T9FXk7gj57jP+d9007loxfsAVI8TjrqHVAkCnRyC8kD7bxRk+6jD63MnO6ll9np55XO5xRSPUt9DpymaVWVktYr+lw+3VgvsUUdfySB0+fQWXJFTNMLmnBOTW1b1vS9AklrFpbVgqxvOS+nf/Zv/uvv/+M/fPjR2z/99Q9Xtv7at77KRYxQLq24gIIpF81BrLAkVtkomMWSGrAwx0TdATyJQKPQRmBtYMMUWSuroTQysI2GRZBVxVWs6r9fYbuKCKaqcBgwqJ3Zf84wQp1ZDm3gEALJjg+WFvNs5ct2FDfRDNlYv0MLm1T1JdbW2iggriuQZp5GNVbOthn8iGttJZw+zBtWW0peBC7r+u5/9199tWyO339wONm+8ulDDoRAZAgCTUmGLbOwEm3FEqtRZbIZRSNiKHMKptmqHli0lUST+M3omDKdJ5wi+QEnyav06Uoq1xX9/DcDRtm+enmD1ZM3f/Izf1bdLJ3m7VqCx2BFe6totpnYZN6By5xK2Mb4RRCrwsqO4HOgiRpIPZNNWkqdbFQLqoFAbavZnNeXpmPyWhQthU04mr5yrV5+e/Gfvj/NJpnL6rv/GMKd/OtfLa7fsCPOruuHv/kH95nybLzzeMEnNp/ZUUXjsFDbEFogKoQy6zI2tuYQJKxXmc1OFguJlc1IoQSD6K8dXDk6emLYhNZLCMrcRlSb5rnnduvDR6LtvQefClMbUZRlkDTClTjQhlRDjJZMElxV1vVqzUTGkKpaANZYphgkQqzNosRW1FobgzhXeO+To2eWOVWBIkDASIacMXGkiZOYViJ7sOldiJiSBhYbTlOAxKxACCHPyIfgk6ehgAGBrupKYpdWU+o3aU/HSEdYsl/sfddYEFht59+r0rWsgKjJzSlVDFFiAgBBJDGyMUlkzFoDaIyRiJK/U9sGY9ka66VNmvrBt6FtmYnYJGWAxWI5GBS7ly7Fto0Src1iDNbYzNq2TWau5Fvvm+TuwptqI8oMF4NfrZaqTDRgFmZlsdIGeFBDshbNlDNWi9Px1IkSgmiMolMMThbz3du3/uB//MMHv/z0zvsfensEHkAEUawxGKBo89+88QOIeL9uZz7egC2lrsrnP9Mcn6287pmiHhQjO2jWRjaiLQaR8k2DhiFitBZlszR+SE1ZSAFbZpjEplCfwUV1CmuZETy8j7Wn5UqolYHjgaJwPCwt3NDyPPqzanH75rMyzI/CYpJd5q3JMB+1LtSmLgZuFVYsxvos1PV4kj189MbDwwd7u5Mv/cGzdz4+fHgvApAwCFwfre5CYHYtzeLZ0dk3/uJbo6uDP3IvP14cbnJ/jx//ye0vNdLmODaoJuPRpVJ8qAcGeWiYuXCV47pefJrLI9ciVLKu1600EBmNyiLPqV4XBCGfwTsKGYJD4ORwZDT5sxCnqRXpi0AFlJmTqIZSL57MRMSS0N7OyT5Vn0JEXXOnU2TueEuJIa+4GINFciXRpK6T4iIl8Y0+rvVkwj7Id1UsUUdFZFBn5prCOElHv0qTUeiIzV2cTk7eiZ2YiplwwarpPc0STwqceGCdfnp3pfMb1+6Gzl9E1y7uvnr8Wi9C9PnjASTRzHQ+AOhNwC06Msp50qBJoFdAgEkhRZRFicCtoHD223/+rdFb4/de/9Vbb/+ozlff/Kffcjta8SYb5JqHdmCzPMCxQDRjUJRMkBMzIXbhJnXOuDExOBmDJcCJyzDQKLIsirBd8KJeFW5zTxZfL64uKx/X9fUNWd/MF+3Hwey3mawctx4CV3NpSx+LF17Apdvtp+50ttgaF66tvMuF6ogmEkOslq2XTttAXC5s14w1qctQj6Myrdv6qDk5bojbIN5z2N4zr+z/8z/+wt1/98aw2cnCig2rGhGBpVSZWGTFIINE5QhDnBEsyHa5lQBRYIxNNvVJ+p9ianhQ0uXW3lkrlS1d/qTK1KdJCiEC1O40y+9+7/VmVr18cP2DR6f5pWZYftrwpZJiwSSRFXYZyQ8LqRV1tIUVq+oUNp3wJnUGuFNBJdagsIiMCG6VNm3wVKttVB6tvJsUYx6O7cPBN74ow8H8f/nr6YDp9gThcP6T/2lz+bJev4a7c1vfoT/45lImvhiJuTyrbG2H1htshKMlD/EIdVw1a5MDEBgUxeDG1Ztvvvdba8kGihBrzMnxzLCZTLaWZ2cxStuGwSDPi+LjO59YlzFnIURYdpllJWIrEI1CzDFE6to4gdlKFGKyhru+jVIntiXoqYhCoqnNGkUyQDs/E05AkxCMwIJgWKBGqAPEwAEiJEjCdorOnIgNADaGexsJleTNABGQMhF1lQKZUIuSGLbdKtBOPyitAQGJihVW0eQmwxArUFZPYtikho4qIpL8XkeZigJns5iIYKZTFzKpgd31nyhNcIgIq4Qo1hqJEkIkazhJ76oOy2FdNwB2d/dA9Ojx41u3bjL48PAQkDwvSAGmEFolti4jMDRYQ8YWmc1FUDcVw7qiIA6iHt6y4VgLlkIWsCyZMFFk+2Qy5ihEVtisZDlkd/8xPfPyle2rq90f31g9JHix1rrMhrhwvj6+v5jd/aWtCkvm4ZOHl+xVaxbe+nzKn3tp9cb9HVhUtKIIOygiFyHKRqRiXkZ4dhApMqMulNE7bIwsHNeO2oE0cuz1cKOrJs69LBR+INH4YS7DYRY1BlBgqjSc1sw8HmWLUO3tlsX2DvNwa3LFtztmzpIBDm7OdRZoEqh2Vuxq/fCD3z2WcPnO8aYsZwc3tq8e7AD44P35yaIaTkufBTcoFlg+/9LtP/7qV47Q3P7Ms7u/O3hYHr9ff/Q1PF9a9WczambT7eu2qciGLDSOg5V2J5fV0aELi52R5coURSY+zNGCrW1dfbK8/czlS5Ph46bK1GVoMgSLkHEwmdNcuSFk6A6sNDkKKAzIQoN0uFKKKdzZenQ4aiqjEiEqRU/ukGTtxnhUARGTZQnBuxCBQaqIQei9udA1gEXPSUp9w1T7xBQX5MPzsHdupdBJJWj3DZFKGswnMhfiH1AgAb24aOqkUQPqst+u9qaOCaVPBdA+qvaOwr15YqI39d9zx4x8Ksh3FTt3ysNIbxYpcWdxhkTF7MjKSGNDYCajaogswZEycQ5mz4EL+7U/+cPR1dEbP//B+x/9qsqX3/mX/7S8OgrwOmA7DFQYsREWasEWLngrDUMkMwLTZpmSExUMVSNRw7IxsErsTagLUhv9TlFsZDVx3lf2H3UTZhtqWRrJRc2i5f3PlcVVOw8LDZKhPq14rZStfRUGXLx4aXbow0eLK6NiRGeCdS2RFeTALXESX4IyBuTKkJfzkTqHxtHGtovBwTQ+s1PTIAA+mLz1bn148/L1L71w5Xf3P9xxLF7ABAMCrLVixUOYlEXhYC0jY2GBEQHAVsDWuaphJZvg3s42rjMYkD5LOv8tABFVSwo2yaZQqZtGsb/+h7v3fnpvOtq58+BUjEr0bsfyZDHI68I4L454ADEblOsiZwcMmAtSx2mbScvMTMkCBwCECRIYUdGStmRqCh6m4iOg2Jo24k8l7Lnl0K8mr77QjraO/+o/5o8Oi50rVIwiKnnyO4T58NXn7u09f3zmJreeP15NquWgpQFVgT1RowYZxSBerCHvK6PirKvq6u0P3iuL3CDW8BlMExtrEBp/VNXFqIwqltkSWeaDy/tPTmaZzSjx00RgbMKUQgwSxFqrCo0xs5YpcZRZUpcnLW9KA76ROtdQsdZGkVaDtdb7JoW+INFaQ0SIIoaCNZaAqCCElBmpiEoH8RL60qCTnU1d4RTtUoYbJIiKpTSFLVAYTv2wjoGZdCY5wVMdNpcmO1I7ol8qlknBUQyYU0eVEmdekjImSCVEL0hjGFEkscfS5D9zooJJG2MInGVpvKrT4+1Y2CpMcC73TSMixnC9qcA8Ho2bTb2pa2sNgXzjFWrYjEbD9aYhgmX2TXCu2NnZca4UobqJoZUo3LbBGi1Kp2Lb4GMtXDMVRtcKFljyPDrKZUMQyIjNac1vPgjf+eza7sRn//nzb/7g8NZn972Cvd+eV3ZW3xitZqPZwzfv3H7hljyo3nn9o1e+ecWKb2t7cFsP+ehwdckNorgs5H4jU1uOqHVt4ECZjxrq4ArWzGeymoTquVGU44dOjnR5YmeOWo3BkZ1ax+wpBEITCaH1ZKIhERRkLHTux3Zxto57u/Hg5j5XW6c2m9UmMsFCEITBFrYYNpUvLH7+s59DbQhxVYXF0h7NF9dubgMY7V16XD2WnHlYSG43m+MvvPqyLxCC+ByvXn3hPh0f5bMni0+/fHnv4XK+k/NkKC6uDVpngtUqUxlSU82faLVwFhqtbPyl6XRHykVTr5pQV/X+dYtYFcWWayRDNAgWwSDCEhiSUnx0xF/qmmOmg3Y09SuS72zHuu8MfbUzmUZnqclpQYuqiHS/TcLsne7EhQwkAQl87krlhAJ1xa10pQnw1OAQ+nKbLn7Rx/7ziQLpBvSoj5sseIo30Udx6iedUpWTInAHTctTD+32d3KI1d+7jdRdwvl4MYF7ZL0Prt0N9nXwU7/AeSpxPjxIZIiQzNoVpnsIGSUmslBLnBnKiDItVMoorKLtF772hemN0a9++Df3H77zH38Sv/Vnf3rphe1GIpcoijoTIUS2oWhqazSz0WoIYN9pH7tWRcBNVjRZ6XPLXkysmVCE2krYntgniFn0NA9FzRC32SxplVWLDUt2dXLFVWG53rBhE7WYHoTpVvDGog7+3vZ68dmD0xHF904uh2pURIOW2MY2iCXDymBVhjqVMWgIZxv2oEC+dr5wsK5wSwxiyLcKrvdogbD4869/6f7/699TyBvKvGbqgBJUgoZMG0FgBGgBnwWTW80FAzFju0E92rt+4/mX3q4RKVPfqYOgFURFgIYkoR9Tj6X/fJSAKELUf8B948H++i9/PRlPZLWA2stb+650R9UZJiVPV8Xl05JylqJlM0PJowHmLCaQA2VIY09oWWFUW0MMBKgm2SVEiCeqlYMtxEm1aaejKvrgJg3LsatcxlQvixd2n/l//t/mP/3Vyds/zERslolrcLB1WOycxmGTbb35YPXET5pyElcaa+VgoFyvGmrIELy0rBI5SqgziLGmCTVzZHCMMcsyZinHk2IwmC3OmNmymZ2cee+rasVGI/y4HBXlYHZ8yizM1IbIzKQoy3J2OnOZIyRXAuklLy6Gd0m6iQIDigJllZiiJFRiT6igZMlniElgY1J5RnJco2Q2L2BoVBWCqDoYBgtgrCGAGVB2eR7aNkShpN1BEO2H/1SEYNPB1OXdSOUFG5uybwZCSo21M4ixAmaOhNgLWjIxx5gq73T4JGeW5MFqmUOIzMSg9I2xmTWIIQCIMTGmIhuTjFvQSQqgHAzm8zMRGQyKxXKZ9OjX60okZM6VZdm2oa7rQTkYjUZgbtsY2zZzdjqdlGUuAmYe2RwwolkIVpFlmYutzaxDJmA2MGkcRILIxgc7qfPJcTBuuPfG3cMP/O6Xam6LMA7Z2/OH0+3pcMtPcX/42JX2GpvlZLSzN7jxwc9++9nJzvHdh8efWV597tLipKrr+MVXlut3dU1X3aD1xrZSLGptqFDkIRphw0pVs94B53LmeLNane2XboxBHi5pkYW6NWqoanWFdkPWt1BiMwza0JKwqQFVCI1ysC1y88nH9z9/80WHiZGGrKohgrjcBQ7Epmk3g+Hg3gef3rlzZ3/voFovQG3INnt7l+8f3wNAJrv90mdQspRy++Xnayx3rl+qRLnkAL1149r44aQtj948/OU3rnwrro5Rn+1kWpDnuCngGd4ZodAMC+9w5vhyPnRR+eP7DwuQNQk8zUMwHDajUC1oYCE5wgbCqbPFJKyctBoSO6BTOMW5d0hXmApAyslDWiIoOXASOvm4RIJi6pk2Ca5OTKfzhu5FgOq+NM3QUTrkVIiIic/bv/1h2A3aPfV3CXrqilLtTX77OJe6Nz0CrX0uQN0TqXaMrb7o7ZJoUaEUCZFk5npcW5FkCNNPqX1IRPp0uNaLG+jQazx1e/3r6E0eUk0AJWG2KqyqRCYhz9DzuV1WYSIWZVJDsJIJLJAbzqElbdDceuXZwfV/++aP/vHO/MO//uniWzv/7PZL11xdDXBm0RqEQQwmDyb4IrQZWs8W1sJaL4ZhWnALX2ld5UNPAycYUMhq70y9VYrjzdC0HlwtvJ5ynKuTcOMzr4y4rE7qJvhKWxudWXPMajEmkh4vMJ1t75TNyWG4emNdXH3nnV9fWx1fGrpRkEzsJiLLEkzBkBLwKjVAgCfyJCEEJ2Ounpm8EdbF/fiyK8N2qBv/eLK39Rff+PL/+t0fsdtay2BjitHES63waiIrAxmQk2Yas4ASPGHaYrc3yvdubbK9ZVVuzJBawCt50siJspBIeeg+laQrSMnTNgq4pxyoJnUZ2Ndeff727o29fKfexJZjHKovY100c2xOaDYLruAtT6OxlXXr46CkArCAU1iBJQUlFxAhJY5EgSgqWrSOAySQ1tHPW8PRTrbqfDTLhDWUYVogK4c2w2pQ8s5ffG38rz7vP/14Pbs3vfHMcWUePtRlfnCyGZxgry4mZyGnhdja6FriSoZ2SJn4dgUOIMqiRoLXNrbecsfiYAOCGhAZni8WSVAnSHSlY8sb76NKZvNVtV77TVS1rRCxy2zwntiID0WWi0hAPynEbIkI1E/qQ0Mk0/mdKnGP30JEjOVuZqEzC4fNbGhDJEn2LwgCUGuSzLkyGAQxSWKSnTEb7wlgIsud3VzHlQZT+gsRIosup+7EBDpMujtf0jFEfWUBYmhIHTkWgqhQEscSFVUYUG5iI2zAzCGEiGTcAWYTQstsGHDOTSaDENrVai0SkyReURST8SjL3PHxMUSyLCsyJyK+9YvFcjKZFEVRlqWI+KZuGi8qLi+n06nLnap67xHVOjfJMiglFrq1TjRK95opSZpnLgOYSDiDowIpo2iBGrCQSpDlozx6DvXurYd+8f2jKnfFXd+WhV/b2F5/ceX2ZH1/YqbGRZSNaUdO7YHLq5PZ+3fe//Lnbv3d//b6lZeOv/FffXZ+VgvLy1+hn3/cyKhYt7bFcIFyhcLbQRDDPkoT3cRlwTMVXiUgsNRGFk0dpaoyn4UAYwvDXoL6DbNqLpXL1Y/VuowcIktsTNbCUTidSWk5ybaJUNTWEUmMwpllzazhiA8+fDt4f+/uPVeUTTDj6db29k6jFYA//MafulERCrFj8lmlXObF0Fs4mE2sd/LRZ92NN3H6iGeHy3mzWRWjUWEGQWvHViRY60JTZ1t87/6jVvLZ8liWtkRxaXvn63/25z/51U+P33m7HAxGk8wU5QmsFxPYBrat2tjJLSXTD6G+6OzqMxKQGHCKfCqsFLuqDpqSTPRBkQkKJkLsB4nSs/Zht6MBAudRj4DO5KgPXheArZ6juoQuOl9EXj1vwKIPb2n7iChpL64BSZym86L16ZCf2tL9idBBV+mRiT8l/awU47zNnShg51VQSibOMXDt9OUJlNC2zoPt4i3tEG0knVh0o8iaKLiJzE2aUAZKMZgVtpN0hgWswlAOzZQcwYgUQoXh0s21ufT8wWs3/qz48XfvH3/4jz/4yzJ89auff8a0K+H1FtT6YKQm04yKOGBpa65XMfoWEtYqhbroHUsGcVYKb4fZaDEeiW08Gyl1LQtCLWEVZB2uXrm5v3Vz9vHh0Z1ju/tidcXWfqRYkbFFRblF5lSrYnYPz9w4m9J8Psum0+yPDh795F47+/TqbpltMmeQuDUEJZTMjWqtRGRq4rYxjcUAbVavqugLXpl6hwuay2TYrBaPv/O1z7/39m8/fDCLxSRm+5jcZ0+AESPCoFyNI3YcnPJIZQK7bR9t+MvPvrKQUcWjhkpsCI2qJwpQZY4AWPn8UwIgBE0DSGwtIIkYlAT2AdhvfuZLjx9Udz6d2xaTLXYmd0VRRppsZ9f3JjT3x6gKqvPQFNyuClDGsCpWYAlMKobYAFY1QIhMACJR1AB4oCZaEXJEhoz0eDAcGc2sOVYEt5jIAHwycMvSttGN+MofZPiGjviNv/q+HnzmUz+eY7jkyTJu8QKdTEmT+WW7XU4qXVa+4kwYUSyFyk/LQt3g5PSoSJpXYEgUcFVVCliTSZRBUTCjrusvfeGL773/u9jKoMibULvcSRQQB5EAZUJmyDiLGCVqlGCtYeK6aZiYjdHObKKjXEpa6oqISETGWO9rCeJcHqVDfyUGhWiEIQYkQBhko4ZON4C4I49q0CAxZsZYY5jIWhOlk98zhkNoTTfH1FlMMDGfz+mrdN7jyf9JAUqDvcJpYJJZRVJkTgRsZRZWhRqFFagBCG1oVbQjkogoZFyW0+l0OBpZaw0b79vF4my9WivTsBwOysF4OIwxEsMQl8PSuUJFGt+I6GBQ9D1j6GiE1GQGGWvS+VYWg3RQGgMi5ow1qSiogmE6I/XUzYtAFKlJIWBuXJJchSorE4gQfZQwtDuT7Lu/vP/AHpS+vR8OJrQZufzh/GR2Essr5Vmw+eCMeWxzcCm5G1SjzUwXvzi6y9v20YPV3/+nt77yr17SLewO6s+4419/+nw1mGxC7nlUBxds2UpZSu5taHNsbL0lfkSY2Ayeox1kVq1TYcmN+HpOXnQTrm9tH4f1xrIdOCs1WqEIsHIm0SArsrtH1YcfHY9uXT2uEEA2YSEiJCpRndXDT47eeuPN69cnqhKjsaXbvXo5hEG+2QYw3d0V67Xkx8vDd+78Znp567VvfqXZNFpahtl4vDq9/c7snU0hv53d245k83GwTkPLMGQ1+ObSVvnO797/3j/+ZMtNA29s2VTz0yXMb99/44+/+Y2r1/Z+/NO/X/gl25FvysC5p8zDtci8ZvCKmtAqWtVACNBWKSSUODJiGrNNykmkna88VHqH0PMoFlP/hElSL1RwHlLPS1fqg2pPqdIkFJGKDT2PSxBJIFIfap8ufqlvBxNRp8/bRdpzolRXimuCmhMvq7uu9nMJCatOTeiLoT8wqKd/dbB8V+KmOQgi7e/0HDm+uKgmpF6SIG26WX06fei6VBbpNYL6CS7COacyIbNgJSYyqgxygGW2YIMM7BgFuKSYCRyCk7zIVrwproxe/u//9eQnfzt74/u//Mf/MKk++/XXnisEqDeubEc2IDRy/1F874367iexWigMq90aXG22JoEHlG3T9May3KsX95Z3fsvh4eTG9PKkeLx9y9RrsstinF29fKuOxft3Kj+XwtO+hipUjbEDsswIRcwyO1Cg5PnKfXJ/evtmKEYnZoYtlj+8+eSXq/bk9EZeuC5NSq83QroRaEFjtInRG7sx3g0el5/XUIKz6ThzrfEnTb5rbKxeuLn/6MHDiHwuI+OK4VZNqoZgGFqQOMRMTGExCHYv80W+WBejq59bymipZSUFGmij1IoGRoBEsAqoW7JAIEpm0toj0hetB7Ksovb//e9++ujR3HrsFKPaYefG9PoL0yu3d0yWBdc+d2nUrjeLsBprvaRmYYQyUavKRBmRY0l8YzIqHX+QKEBblUy8RQ1pwOt2ur8zn6srZDHeM+qMjZWWLVW1HYywKaWpkYUziMn8Mn56Mpi88vWjWTizk3nYb2feLliWgSv2Zy1He//jh3bld3Yns+UTgiAGJgSNCMGkApVgwGTQhmAS4MVElmPlq2W9s7X93rvvkiED8Zs6kgyLYjgpPr1/vyxLA9re2vJN03pl4iCtMUYkGSawqEi4mPATkTLPBSoxmqQDS+yb5tr1a7FtHzw4NFlmjW1jW3tvmIXVMIe2ZbBCo6phTvaunGwrmXp3zhQ+NSYFMhECdTQQNgJhNiRpU3KIwiSSyNbcgWgGJs1vCCNKsKA039BNA1NizEPalhTWmlb8JoSyLI21jfdFUeTOZda6PLeZnZbjzDkwYohgKgqXu0uX9i7F/mYTT213Z4c5caQlQjOXJ8QuHUyhDWytAmA2KQdUTX1wEHeWTSpBArNlJpHYi/EFQmqj+ERtgwbVWhru5eG74wyREEwR6fCtJ2/91o6umvUqfHT34Is71bEJYTg4WdMNmUh96t1wM2ADB244+GJrSBNZ53WxPfChPV6E7//d69/4H1/whd1/wZZV8+Q0q7KilqLJtk6l3EjJK5FKeIcdG4JlcWBWZwji4OHZhyCeLbtyWFQxHNWxZbYxSFW3Q0tG2DgyrNSiJSnz1hbvf3D40nOfa2NgNgSV1oCD0SiqNrPvvPvOq6++9vjxb/I8A1t1enYyX1cLkwcAP/zhd82g2L22P9xna+vb1z5voDHaTEU0q5pmd7JzcLb3GGe/C2cvVeHG3v7GZeRzQ1LHdrucfPTozl/9p++Ni8vi/GLdjKy9fPvS6WL5zns//92d1//0O9/6r//7f6vltQWVldqG8kZtrc4ja9WxVwRCoyokrcBDvXKg1CVTFUAI0uWFib9PIDYaleiiW4ZzAnNXUz5FWlJIon0mHI8g2kuppsGlKMSkSMizgIgspRn3897sRQzjTi8yHZAX8zzUDSN1ZyVTl+AmqeceKe5EKbtWsnTQ91P3mhBJ7dMK7p+e9WL2qa/BLyIqIekmabekcTGo1L8/KeUgTo5SQEKb0zMxsyiA3+sBpKoXysSZiDGUq1Mkz6IMUgCjTIuYjTSOmYdOMt2e8NU/+4N5efr4F39350dvjNcvfPsb35xOfVjN8et3wnu/ze+/W2Kzz5mxI51cCtOJ7Jbh4OrZ+ODM7XhMNzQq2nwzmUze+rv6nd9cHcWvOPNmkZnRJSfu6OFsdRTGmHJQsO5sHrrTwm8/wzQwUudcmsiWfcvB5e7R8TamcvPq0s6aJ4/i1Xzw7RdO3n7Adz+5WtOoKEz3eRnlDcMKvDEtG5/bJnOSicRYD7gScWu7U8h6ZLPML9UW/rWXnvn5j38UxC58ubTb2eg0M7WysGMdAE4zx8FGO4JO9bAZrd2Wd5eOm7G3E1qDW1ZP0ioLpdFX0QCN0KAa0lGsqqklrCrGJJUVEXTjxXZ9fzkyhgLiMRfbmD9cz6v6o4fHN1+cXH1130xWJsBBHc8LKdkFsRRJyDIskiINM5NS0rsMITD7zBYaavYjbECOaeAW9+e8PT2pN7WoXN/K0HqpxK6GMtzwxsG34MCs5ej07r2zuKXjm/eOn0jY4bPIZ8A6mDMjC+WG5Sxww2xhHbOKUjAa1aBuvaw3uXWRvE2CE2L29/ZOZ7NBMSoKd+Tvu3IgvW7zrZs3T05OFssVFKtqlTEOLu2t11VellVVi4rJjPfemiQYS6KIIbI13aYhSeln07TOOZGQjCmUAOWz0wUz2cwaw6LiTNYidBKPMRBINCqRMkQCE2fOgqiNkbsvSp3aEKK1YGYNGiSAKEqgbow7SXUAFI2lwrrMOSZUm421xtqsLAchhPWqEhF2hQIhBAnBZTbPs5TGV9Vmd3fqnKvrxhjO86IocmsMG5tZa2zykmARFZWYfJfQdzk4be6umE4SAaIIMUJVRdPcU/JYMgllZE6yPkzUdbYSnEig5APRaw2pJG92FohoS2BFm05s0aQKLpnmKptQ5xwLgbgWGo0ECa1MLf3dT2at7pSP4kCzd/9Ls1VMbn9pcfsz1zaL49MgY5lOUFVDY5elzde+cKObud7hbJgdXH1uFU5Xs8dt4Nd/evjSv/78fD7YvzV4czZe87TC0OsgchFrQctsDYMCR0RuKXgfMkSnXgLECrGBNcKyXK9l2aKJNCDkxg4LGTrYCCZrOJDXLNIm5JPy13cWt+fiipKrIFBD0QtzVGez04dPbt/8TD0/+9mPl5PJ3qo+o8LmpQ+oKbcALo/2iwF/dOcdPtJ/+W//7Oqzz63XMXMSQRSjDaYBvjr4/P93fed+eVaY+oXiWiO54VCTVx2vwuLf/fu/QiBY2tva39rbfvTJu5ivKSt4a+B9/A/f/cFf/Df/za1bL95fcG3KDZWNDBq4mgaiVr3CK7WEjZAHWlAkBKTiVFRMZ2uQiBNJ3CkSdd3VxI1IzVFOlkl9b7afhe3kNWI//EqSPPFIkzEQlNh0ShapaauqdK7y2sVWIFGTO0C8a6me1yaAcFKE7GU0pIuSyr3UAvpBASSaxzkyfG6MSAAllIvO4WJoz45NJXl/V4kBrcpE1MnqdQPRXf+qv7Ne4gNIBzuCKkSlG0FRgFgCLkI9oNQpVWlUMEtQIhM5wqILwDk4Yy1VS0IJjBRDpTLkfjHlxc1vv7Tcmi3/y39ufvP9+fKTvUnOb/06W306YF/aIfMUW0PensrO/mIysXtX12WWx2azOBJZFVJ6DOnWZfPZ/2v8xdi/97c3WopN/Yto6lVFS88alosF6jCxrjBcrB/XsJvhXnRObASZCqXCUwa4bFZfLj6VO68LrfBPXzh0bvnqlZO9fP7OhwfHT7ZcsWdK0SZDHqU2UqmNrm2mHKIVFu9qrOLRHKGeuUf1tddKd5Mt8QZ333hjr7SrdkHOneLSWsajYrF1bVWEBWoyA9NqyIc7y4DHYfsE+8v80lF29Ui213VBx9C58ppQKdaqFTgwm14kXNJSFyJNCSYJogQk9VDtPnsLmxmP2LrGYhe6Nxqu87Cqw7jE/sgd+QUDhWmobcGGVSInAyRRhXXWuAwStOMAe4KqNDFUzCXIa1PoSltTG6HBcBQMeN7WdjO7vi+uDvBjWp3JxiK2gkiudPbt3/2S9r64yZ+RurWV1POQLQ1vmCpCzWEZuUFhbAz+6PGJzYwiKomI3Lxx/Wx+fHY2A3UMSJV4MpuphMzZ4bAwllfLJTN53wwH5b1790TUGCOqEsLpfLa7t9d4vzWdzubzIndVVaWR/TRTzwQkkffUTlKE1mdZRtC29akFy2yiwjm7WJwx83A0Wq1WWZapwrIJMY3LEQA2HKMQU2ZN27Y7O5dWVaW+UekKVQV88Hs7O9Pp1FizqerT01nj23JQOOuIYK0bDIrkv81MZTFgNkQIIbBhZs6sDSK+8RLFGFYiiTFKNETGWmstgRaLhSvy0XAkKjFI5jJSJUpeNCqiQaSbozpXJ6B+vrjf4+gmJrveFCMRYbXT6e064yCAu+qkI76KaKeMyt28J3WdEXSmsN0fp2upgoE2HZksoiBhhgpiMMuiySQLrBtM1L1358k7Hw3GOyYsqjBxtjQ//Mer7z90r31BVvfvvnJr4qZ+Vk3KYoORtYt9njzY+1rl3nINNrJrD8ZX7x7nq6Plg1M9kO0zze1g24yGVVWu3WCDUkKR1ZCGWtbMsQWbzm3DIDBgPQsEHFT9igIzG1sM4IwWjFJggVAJM2JofAAciMyAC+dauA/vH40+e1uCgXooOZN5aSCtirv78Vtv/uZHW9NRDLEc5cKxHDJlu2u/BFD7anm0+uoff/Xe8b0UJ9omGGM0KFogo1C3V/avXP7p1NZSbzKeWoitQnFWrZ85mP7t3/zdSUO7gyurOHtwuKFnxs+/9PUnH78xWy2sc6PJ1sTlf/lXf/0XxY3iyis+lrW6GkWDwmsmjcADHtooC2urFFUSR5Q9NABROthZO1E3SBdHzoHY8z5rz2Ppv9eL2k8VpFHT1mQg6bInravYs5qSxrJ2API5nQnSi16dX6K7Cj2FHWuK5udjS0/dSQrY6ZfSxXHqhOK6GCldu7kTS74I7p2CVcLUn5LRSjU5JZXYjuHVBXIVRde86R5+jpmnP2BikOl0tRJ8zhBRJjBbgAUGSP48TpARW8Aig1qljFIA1jwiIyoQR4Qx2aKdYDXRxSgcb8v8mT+4FY4vX/31b93vPjLRE4n1hYQcLfNkzNsv1vsHi8FQyuFsHjdHZ4Pi9LPD6WnJYWSNKxbzw8cPV+vj01XrG2yuw9+LdGjVZjE6mJylsaLEjtCur6w+cc1RCxtMvrGDZliEzIkrgxdZy537U5k7u1yfyUz9QFbFaCxffe3hw+P13Q9XZw+H42JYBIKnUNVhtgyh8WGCQqWIWVkU5V7t27a6I9UCAwO7QZXNH5362fFkOmHTNlLPzfRUd49RFbwuylo480Joh2vhM9peFVfzg+er8QvzkxHPgLnKqciSeE2ogRoaauG06EOi8KdsLzVGDLMiKS11xQcA601wzrHywXRUk3+4qQdTlxX4/EtXZIe8l+Cdh1Eug3CImnPBjiWLtTTihcHOsbXDanMmkZmVWUXqpE2IQKhzWGSDsc5hW8nzzDBVcVONqRyXw2ExNFJoCCow5sHKPPp4vv/Vrx9+uGgeeojNWjfSAQdanC2xJt5AawRek9ZsVKQGB+IAkk8/vctGhDoxdhIloCwH061pDLK1NXnm+rXXX/9tOShf/dKrm01VVRUzny0Xi8VqNBptb08ePHjonHtyfGwsL5aLy/sHm7parlbGmhiixMhkbN9vJeZMjUgUoHMOBjbSdgLUho2xm0116dKeKk5npyLSzSpACRTaAEJs1UOZ+fDw0Lk8SnAuZ6D1rbH28qVLly5dci4T0dzlo2EpSkXhzkcX2CStPk3K9SBWiMnsebhjorIsRdV0WuApaddzCfnpzjYThxgAMpZDSEivJHerjniaTiJJ/M2kS9sfLdyfLz2BBaCnuJnaHU7UJS3d8HmPMHZAe3+0dsck9RVKgikVSSaJYZ5qzRGpBSUDOMNoNQqTxbKQPKjEn/58kRcTzNa8ZXgl5FAMFif3xn+7GO2Ppv/z9979o1vll19A4CfLyXE5CjNX7Y8uZ597tHh7c9yGcvR8GbYeykSKRQyF3R59eJgdhnFdjJcyWnEZa3ANWhPGlMFaMoBjNcQ2WgfkhQ0YaCwBDJQBiIZAVdDWax3VqnWZyQPnlsthm7VtBh8sMm+Ge+/dPfvjLxgCacsE07Yti3HOns7mv/nNr29de+7Jk8dt3DiTZ5ldLRpXwuYMwNm8LOyjhw/vP374dfdHEsAwLExIvXxSrzm5S3qzffvTsLr3OuTmn11tRLb3rn56/8Fxtfryv/ianLrFB+/OVw9X92aL0dkXX/naZLZ8573fLs/a0WhQ7t4IdlKb0SoUGxQ1FR7OB4saKQAjgAMnQQyGKkciKEmqYdN4W7dQNM2qQhH7BSRMvXPAeUVK3fLTPrHuPOig52CwUleScifoik7JFSZpn59ni+fL9KlfqPaDuOmnhP/q/3FqqQufneABuCPbUFfSn8/m9s+TtKH7H7qq/ZxtRpSGD89fJ8OCE3LUezp0u6JjPWu3NTpWdZra4iR9pyrnU7/KTFBIFGbDxMpGhSSNC8BRllSLRXOCg+aqBUkuxrEM1GRaohphPck2o80qvP5bevdn1x+8NRk29QZ5cFqrrTJFK37oHx/j/s/rL7zmbz2/erwoIJemWT4tJ5dkh09md+8cv/NQDufj1VKQi5UVyY7yFnAoDBZoTEJ6zpiCM1Ej0nKIBUAeIyvcENhwY5ugsTZYIDvFahlOPsoW7f7JfTPdoUvXNtuXm92v3jn6ZHH2ZpDDYlRMEerVop7VXiCT0ajgAtNiePXq48bv3LC2NdgYcowKtw+ef+vtd+Fhs1XOLsNgpaMndK2g2mkTNBPm4LnhbGN3H63c5y+/UrXbOG5pxbqArAQrwZpRMTw6EjZ5IJAGIgViSi8ZiOlDYlCaX1FVVSsFEEBZeLiYlcNy75msKoKbFrO6qRdNXRbldG+vvCn2GuRyIZfYZK7M6qI5Wh75lS8nxXg8Kktu2uHZ2dHpaSWxZipEGrCBZlwbttZWIx8CtxGaUw32oCr6lQ0jrcuSS0dVNdwdf/KDH1ZzmxW35f5sDzu7xdQVrhiVgdvJavnoySOtUObGZbGqgjNinfW+8aEucp6Mtg+PHhSFY1hh2ZlsFcVgOCrLsmx9Oyqz+XwoMRjLReHywm1Np1BkuauqjW99llnn7MHBwbAcqcTFYgGQBDFsrLXj0bBwxenpvGnqFFyvXrta5sVsPvcxWGOoN/Xd1BUpxuORy4u8yAeDQWjDoCgODx+FEJJFqHPZ1nCSQnGROwKrRO/9aLw3mUxC2wbfsrXTrQkA74M1zEQuL5IrQzeAAEiQjmaVsirWXptPmahTsEplAIE6cdxkU9opb4DpPNYmP5fzxhcTkqZLd1R1Gx1dE6wPrin6prMqCQppX6b0oRfo5OTTGRtFFZ2/eFcA4PzSSufxnBJVn6SDujvr0Zi+JxJAjGmjQCQDh0x548Nunv/6rQcP52ZvIl5YnGaKekVqCzIBQU8rG8ON/+2nj998L/zpNy49e3nZmNnmzIVNdfXLX3zn7Xd9u+vcfuOm1s3uxD/9yx+d7B/Ydx5cfyJXKhouMakrSxVoBV7DOaYay3JYUL7Q0mIyoM0Z+enejDwoSpAadQBHzlUtcck0sDxgOI5FiKW0ruEhKh0tdXi8LlZcHh7h+ZMVy0SjDU1Sfo0a9PTkoWE8fvwYCHk+aivfqi+nvLtXZIMSwGyxYI/j0+PR7tbu6PJy0RTjPDZRjCTGrIpqiV2Tvf7wzo298p13P/4nX/vK9tbOk+Pjv//pD1/+Z9eZfFhUIi9UH5JsHsx8+4Nf/u7Z25958bXv3L13d//G9e/803++0q0nbelNucGkxmijI25Ya9Va4YEGoYnwCg+0AIJqgAo0gGOqS0n1ov5ELxfZEVdiD7B0WWYXKymtjc54l8CahK+I+vpP+aIF2zGf+gq7t9JFt6ihuGgz98GYemIXdd93JLGnYjbQDwc/TUhOUpOMi0eDunH6foYYvT5Dt7lU4oWVQt8Qlj7QEy72XfIk7RVLOh53F7LJdj9o2sUp2TUgBpjUqhp0dGhLnCUtDhiFVbVgp3CMAlqoDGGHoAFGWE9kNXFVUT1++Lf/vxv+YWYX5bbwY7NTq19pvTHrgHE9sW3Nnuvjs8GjH5WvSPnSLbc/GVjb3lv4n9+Jh29M/H3mCfO0yXdOaOjV1j5w1u6QMGxgdlbFijqG07qIzkOFoojNRI2F4eACA5bNIFPxgVbiN7Vv8fijqUy33Lhdb+Tsva3iAbb2xjsHYf9fPa4ePzp+b3HyIQc/WkaJiqpuqlDxGc8fHO5+2Vzd2zeLy5EFbFDKWz97j6tMbXCTMKA8E1ewGfDwBPtVyIIaUVabN0INbc98/OJmSw5BM9CKMY9myVqpVqCNAk0KvdAABEVQBCBCIzRq0mKgjgFwfsra6V5RV8Rz/eM/unFcrw5DbQdWCvvdN95/+atXbrzweeKpYG/EOyPsbdbbnAsInJmDSwewyDdszAZoTFYWxZ4xYXZ8FKMHQWIEKdS75gALh6wpkCEwNTCtsfMsjpAVJnK1odl498qjN+4u/uPfT595XlfT1evvXrt5c9fucM3iyfrMuYwmFMp6b3dgTVNVmbWRDVXrs/nixNeVxObKlcvj4SCzGRkq8gJA1Lipa0MsqkkxTkWjSIdpgsajye3btyXG0PprV68PiqIsByI6Go1DCNPplojkeW6tFZGyHIDIOgfowOWZsWVZ9kK0BFAbQpSQWWuM7TqgoomtVAyK1XI53dparVeTrWmR5z60RJRlmcQES6NruIZIoCjSTz9RGoQUiX2ZSKIRCmNtMjmmvhJN3C7TuZZ2v+rHNTQRMxOBRYLazIokuQwOUZKWSB+eU6yN/VYnSZpCnYRRjw1KNynCRD0Z9GkySXeMdDTPblqj43um9wdQgIn/D/IGROgTfpBq0rtLCp6UGA1BhBkSQ+/05KpQTUtezhc/f6Mud7Y3VZs5K7U0KuogC7XSMrJ66kI53M2nh+vwv/zH5bP7cbRb/+Erw5it22s7n/mL4Z13lncOq3ffese5nfq50ay6/ub7uRkXZNzZZqfRQXamdslxDamh0YYG0nDjppV6J2GFdR6WObgoV1RHB6OOdJxhDamltVFsi4KynLTMsSU8hbh8g8mK8w3G3o6eVHa24h1HVRtYKW7IqGigex8/1hDERsBU69Xevnv+czvBbM6W67xwAHYdrys5Odq8+gevkgpa+HXggigjJSEyzCaucOuZF378o++tqfBif/LLt/7iz77zdz/78aXXLs03RkK2NQlyxduTy0frZS2+sPT6h4e7e/vGTm9/4etntD3z+ZpGC4wqKldStDVoo+SJWtJaKZC0SsKUFOmpT9o60YxAKqKBqNM/pYSrnhPyushIBDJMHcn4XBUKXeujzxrT1F+kRC/Q35OL6mOvgqlXupJOGepC+aoPgL/31Y0OJc077TjPvSZX38o9X+LJGxwduNX9nilZeSfHpD7A9p2ZLpp2ruDaa2B1c9J0AQGky1/U5x1Sn1KL1F7vdMXOD/WEMCT+cxIqMCDDMKIGFmRVjGqmYpkLUE5mAIwsjznLqoLWY6ymZlV98Mb+8snVz03Kjf/lJxnfr7eDWVRuJnbdkNvY69Xos816u9yqmxC++4PJvaP4+S9sHnwS7r2B4sjskEzzcruZSjPXWdmKJwpiPWdj2jgOwWUhAA2zYVYYjoBmUMNMZFP+wFwwGBQEISxttgyx5pXQbDUPWTkZTDHgTE3t6+re+NH9uDfJbl67fu1rR8OXn3xyZxHvxXmNmlejYXZw2dy8UV6+vDtpPmsWu+KCy83Zw9Oju48tW0GIQLa1LMkF5LVUHFzgvZoncx0HyWLQrMrMrNrRXX+kNCdeQxZARVST1gppiVpQAAQk/SJJKma/p2n2dCMfqrberIQtRq51bVkU8/vV9LrVzI8mk9svPlNbGyX3bCvhChyjaBRphCOXgxIbIHiNURNHGNl0a9tZVsV63SzW63zgjBQK365rGINIKMS0lgNLBixUMrWZ2t291Rufnv3Nfyg3OHj1qw9/eW+0zA/fPaRS98aXXbCh1azhne0hGydShbgeDi0RibaTyTDPNXf7qtHYcxEJEYnadSE7+w7tRFNTGyZlwZI5m+dbqjBMIhJCTLKLST0jdzkR2NgYQhSZTrcp4fgECiL9SCKBQkxBgrIso16CEUyiKiG6LBuNR5PxGNByOASRqDjnVCRKkgsgYooinUuGClsjCdOXDlDrNIZS7pDUXwWUsDaCEpKnd1fXamefSD3oJVB0ZoNC2snWq4KZobDMIKNQZk7mZ91IMbj7vmd9nqf/0vkd/37Zeg6PE0s/TAH0CFwCpvlpXmf6vM5H56SvY3rB+vPbvzgcA9QakqieTEahCdIyCjcuV0cP/uGd0zB8xtVNtCZuIhANWl6xgJkHrQ2BIFKc2QkyGDt8+2RYPNj69O76G18bHlwJ5XNX5vP8nfceI7vqbembHV9secmqOPAoPZVYKq3AK9YK4kGedC2cY527XJ3FYMFDx5MJmnI7wAZZtXEWYwjCSgXxgIqcTMFt6VrbuonBRGMxrutiQ9OVjmsuanHzedi7pKFWF5giWNjX/tH9R5nlwhWL1fzFL+ztX3YPP5WTU9MImXwNgKwZTMo8q7fKKVqSJhpj0UIrYWa0FElqtKPxdH/rxsnJcTnZuvdg8aNf3fW7Fm5nUa8HzqyQV9lR664s/Z2Xv/ja7Ojj2dn6wSJ43/xpeTCL4zPYmopKJyst6tZppahANaEBBaKWOAIenUowCZEkuIUZik6ZKoHIRLELtkQEVhbtrD1UcWHmx08FTJU0KayJ6Idu4FhFpIuO/YpPzoCkEO1BIwWRoHfhffqra4P0iWJanufJaOJGJUci7qwO+g2hFNNUb/93Qv0TJmib+kWcxpL6nXC+0EU01dWdnEaHJ1EPOSWntb6/c/7udP3iJHyd8OdOtkQ1iYWlmQlKnmKixGRgGAy2IobgoKxqVBzBAAaFiiOfU+2kOXtw78aN3SzM33v7gzuPH4fgsnZgJWNvtQlNjV834XFjv7Jp9oZZJty886vwuzehTV5gte/E51ypoCkmlYMppD4xW2vKFrDDkQxrOvHRBejAxAgRygorW4FXpEWmFMAkFuwEpDogsDFGNAMaa3xA5WfhoXdL5qwo7Gh3L2PUi7Vvy0cz68t8/Ozk8390eu2fbFqSYRhJyAdSZrMMD2+UB8/TQGNoRkP3szc+WC3Oyp1hgClcNtoan604xggDYzolXYXKWvVM4yaMdHS1vBGOgQVJJVyTVEAFCgAHqBfxhEAcqMNpOnXxHpGJF5hOn1NZuZnZMEQdvn/nntsfF7eyyXU7uco7V6ef+hq8cxy2Z7w3p91VGGEZdMU2WA2QSlCDNYAg0YNaplgMbDGYMuzWVphUzXC0nXP54YenMa4NDbnOIBo5qBJMdEWQ6cjq6Ognb61+/QNXrfjGLW+vvvujv/njL33Z5sOitXZjEMBBFDVzK+qBxjn2bQ0Ea03UkA+cioBMCCHlrJYTGqvGGGOSiAQxUWiTvReriBKszULbMiJAkcDGJG5UskOQDhRFCK1ltmwB9K5kRMqiIh1bEjFZm0SJgOVOf0o6QwUTQgR3dApmDiLEnMaBDBMTS4xQZiQ5PTbGJOPCjr9JiLGLQFEkUbpAKYL2SUCSDaKu+fR006s7ovqfGUSEqF3A09g/qDN+6dtSSQeAKbkUpvOsL1p7OPmpSPl7sJ/oxYzm759xZLg/WdOJkrBH6fBmJN8I1ov4Sx2+2PV/038DwKBAamGiIZsZfPrROx//7uSh+wxjAxS5iG9Ahp1xYsOqClC1qvAKQT0uvQ0OtsiHvpivqun/5+9XlwbNaV0Es2PKF/zsNETEVb3ae7FRRmSprawjLbldQlbKS+WWUAmKiFxrZ+euZKlLlFkomPIodsusTOkZ7ErHU0veqoM4tA7qfFFkGMmcJ7Uva5psTNlK7uG8nSxmNU0EwhRhEERNvWy2RnueV9Vq/tIXJsUQP/r7+xILV9gAsdECmO5NygJFsX/0YPG5l4gDqRdiqyx+FRiW2UgdUOBg99Y7Hz6cjrYOH5zZq+8++5Vr8+XaZmMnxVL9fLUJ0a1Q8t5zsmxPjj7MsuzZF1/C1vXTSrwtF1JUPNpoyTW0VtRIYVgboFH2hJCGj1LXQEGafOhJFdzP7CCmTdrpxXQ9C0pC9Qkb6TPOniOliSwtXUGZGh/MIIoq4ORlL1000vNQiT4k4+nqkjqFVyRpdTylEd01fLs2Tbej9HwfacdMUIUkk4kkhkP94G/XGgbQ2fGi2279nXR5RspWtUeMSC/CbHoo9zX3U9uo3zvoTPBIE4kt/bOoUsLAjIJVjSgnFpZ0+vMg7jQ5lFWMilFmDYYz8oU0pRM5eZyFKjf8wXuvf/TwNIuTSascKPhItepauTHw/Gg9/B7V32zluispwrKnomxCxLI1dhMEklmmKicmZoNyrYOzII7Xu05mUwMmKphsaFvxYzFlgCPJ4ABxLaxltsaSFK2IaqDMZTnixKqWReEsypztqMjYbtpNdTYuyyKnUiWsTNa4COJmuqzL03tlvipHGI6L7NZnbtncBzA5bnz92zd+Y0snRtVSJKxXdZCCTNZGeJFgOJKJShJ14Eq/9I4y1xaLYzEVyyqgZl0pQiT2QA00QKNoAVFNQHRURE58BVI6J6Pq+YKGbUZRxMsk5PvjzCkmLV8dX/vSlTWbjbtR686MdxYyPZMprWCXnGK+VsKBIRD1Kp65HwGHMEOCt9ZORoOidGfHj1erzWh7q27n0ReZTISJjRaDifhQv3H/5P2f69FxWSBUuPz17zx6bz5e6fWdZ0LIRAMaCuqd1C0CcyD2osllKDKrogWLSASTaLRMAhFoG6LhNByWmITopNyp2zAJhY4xMHPXDaXEqwXbzsOAmFNQM2m5iwLodKlSDk7Mva6rtUYUNhk1ddaBSCcCkrlQ2jUdHpEYVJR8UqBKaRaWiYlJOfGqACSTrD4+qogmKUeoqiRNDwBimYIocS9eoHqhEtKfMqJgUuJO/ZqJocLMlKQeKKk3azI6ZWYJopAEjiVjta6g7e6/0xt66mzgpNaVzoGE2PVvzzlk1/nPJP+Wpw3O06mcnofo6fMq1SVCXRs4AFYVqh4GEmoeGG3cL9762frjj5utV5ltLnIcGqllIqPAbUhwYiscOMRWlZUDWonTouKiMDAoTVHlGK/FxtJSxiJaD18ycbNendih5cZKiG0TsiXzGWQNWQgviC2JJ62Zq6gDWWYDi9KhcHZcUmgt1xgPos9G67gKoYjaNGQAp2QRLZsCaykWNK50stCtuXezlla5Dcy+jhQMghdPaEKm2f1P7y9ms8Lh1nMFuPrFj4+cnRZ5FkJ88aWrt164BUCEYXnlj8/WHz+6e/Pyrau1DyAhZmtIPQRiJ9n62Eu0Yzc5vj+/9kc3b37l4HS2sOWuK2gmwuRX7d6jB+/L4OCHv/hNDPPJzo3jJ0/+5KWvrs2okuBlUvFoJaxrYAOqCR7SKFpQQ9QCdWJEJ45cSNopqkIsSBNDEKDfIxdEaCg0dUi69mmCV4Be/Pm8Pcvo5sahIiFtHGb09INzXZokYtXFeDr/dYfZUFfHJn0QPSdJJaYUUVLY6cGyCBhCKlO7RyReviqnKShNTWzR1DainpJxnkoA6JWpCV3MTJK0KXlVIWbivlODruF83lHqeRRpv3RWhMwUo0JAbPp/7mfi9dykuEt609B0Sq1hiByxVUZakAL2rt6UhV8+uGfb5Qcff3Tv6NOxm+iZR8vBF6hsU3u7Rqwj1ygWtS/cj7n6+nq1X5rGMM1DGAWyiEMrVsOm5XU2GFRWipzDTEdOaWD9sFxnBWBUl61y9D7fTGh3Y4OzrQ0Cqy4qKzhQBhooLZlqCbZllmGbCQlJ2GwW2RDVBou23R5syWp5eP/TB23c2S22eXX0u0U7uLq+NxisysEk3xq5F28840bWG8+WyRIyM1s9QTkAGZgQTVAeirAPiMTKWQAHAUwGCSoSW3n+xc+ghUZECVZYqgRd1yoNyDPVTLFf22mII40iCXqaC+nFx59Wtf3c16+tZ36zbmy006vu5mcPtvb37i2tjJzbv3G4ni7MpRkmjTdmRboUbIANuGF4IASGdJJnnVQEgyNZkFCkaItwdO9O/fqd4rXMuK3chpZXZhnr03bz6PWzww/K1VFhLOXD9enZ8HN/bPKDeOeNV3eu6Uq1aQxEOBDVykLiVTzQGhJRzxyZIBKZRDV2csopPqUJ9bRzQuRzPpFYlQCFSNcrYpBKVCZR4aTwrGlOhohNmmTt+lTd8GJquKbrpsYsiA1UFMSkSmwoHQvQxPLsqEopGWflrn0FFQXrueRAOoy6nyKeUuLpkmXSRBwEU4dyccKzCMKqgnS353xSBhMbgoioaGefhC7MAiws563a1GyzbASUHt/nECQi1E1ZRE5D36mFDLDQU8SSdM0OXxEigkn/ZIgUlGBApqTHBQkp9kMgTJwcGfsUsT85+wKo438iDVqnY0kMmZaoKHl2cvrGL75Pdf3Fr778nz6aNH5TB/PSHn3mYOfv3lvHusggIuDCtdpkMAEaI4xnjcwDtIght2JHjRFmq3Dq2QhLu4/pS/79/2xX74adP+DlmmLkVYaziIqpCtIYJ6atm1CzdYRKbMa+3Ko0VEKzqBpk0rYla0m2LOtixLohJUslBFY0nPH4jPbmPjsLrrFuOr1068qzPNlZNNNJzMNcba0a0Tb5MGJi2beng9KMJvz26ytrTZTKRPvq166GUP/0H94BMNzaWlSLzOVUeAqvX7tyFY0mQ3uGEdEYWljeLNbZhp/ZefGhu/Psi1fm90/hBnYVQmV4ivmRvvWr+TP7z3386XsmK1584TU7GsfBB8Prr35SW7EcMKpCgVXgNWSpWCkq4gpaAZWiJtSkdRI99aQBFBSiFFRjMo6RxKfrbYlUYyolU2DsFka/YPSi56sKYgV16JRe6FMKBGKtkaSCpNJVwMSJd5WwnZiG4nr2oHQ7MQV3lRSMRSEg07dJUrAiUtaoSQ+g38tdOzZFQQuVTgu2K61JoNwFWoVq2jqQ3sE+2UgQwdD5/JGKkPZbo+sCd6jTBY5FqoTUseoKZ8MQTiIlBOpmlZQVSYUv8SWZYNgyOWgG5V5RmpQyqOMStQu1zYWDXz/6dPPk4yo8LGVCa4M2E2+pEl95rhkbDZtoK64k5BWtefBLxrekHVlaKFyQyrauisRZqKQoBCwDW9e8YR3OaeS0FW5tvgkZx4zYkVbUlF62clkFNVaI1BiypBwDQDlTINnAFDbkPq/ROicspaifn6zWyzIbnp4uZ/NTq800z3Q+OPmBu1cV7jP7bIYrWx0Uk1vPPJsbaULFha0g+wX/5oO3t59/YfHwnpLnzCrFINSAO+t4YVWrbNsoVmyICL66des2mAwiQCEkF8QItEiiM5pQQhbxIEk20h3/LqnQMBIsyomvQKSq9rU/+bwzoV4KiCPbs1A+0sG9qiknlxH2jnV6LFurUGKm8STSknQFbIQ2RA2BQiJ9EQVhQSo4IZBIYEOiwZ/e+2Qw/yj8/aHYwcLZVhzVAhEmu0O2dRPxrKtVNp6Obr9YH2+2Hi5sWRM2nEkMEshn2kYGI+HPHqSMIBqTmHuqRLtBZ+6qSXQeeYGY07bqYmLaEBckNBi2QSQZwktawAnGSUxLRSfYiASKKqVXKJJEVpNLWrczqKMNMdIEftoGiUylAu0LVkpluf19dInOWUtAshPsJh37QiGdBCk8defOU6+mrycStbivABTMHHv0uR9j0P5J0qtIHJPUs0qVhEjfo0jupoAyG8iFYH2islA6sC4oX+nSxAQmElERYaaUFen5VTq+taSKpgu151VLT5hOl1aVRALtKVzKhmMQ43I29N7bb9/56KNRMfqjb71298icLOvnr9sXdvzEjna2zMEo3F0AxODY1mopNiwE2Gii1iRs1pKVmVQiJDHXCDKiKhBr/IyK9SM7+vbm/b8tb95s2h32IdSeK5PViBvQRgPFtpLMQi1rQa3NGhUuCqMD2EkOb/MssG/4TEV9WJlJG1tXSWBXtuLWtS7ioBhObt86GB9ci8WlmR89xkRWqFehHnleQSvQWSX5wMIW+VhlUbjrf/xHX/ru9/6mHE5eebU8nR9+8JYW+RiARN8EP96fBG/ef+vjz7/08Nqta5X3xOxDMCWREDuq50sTiisH+/HqWf2wziQvJ9btZGTMfHbyzn98sB92bAY7uHr15Vdlf1S5wxee+z8tsoNVIxaMWsLa28rKWrFSXQOV6lqlVt6w1uANkQfRptuwaBUBEKBVFaKeqQBNtPYUOHtwJSEeqVylPgdNoshdP6bvDRMAY4yeP5ecAzCdNXbafjinNfS7rGuqmMSx6rIAXDguoSNQ9Ffpfq8daKOd7MV5dZ6OHJCycqeomS7XL+VuM1KCqVOa0ENBaYP0TOauu41uCFBFtJPq1B4JS3fUm3hLPMetu/aPCIhsxzgj9M8s3WtPlhCGYAEDtRozWCZGZAojA//wk8P33/ay2HEsjYdnEyB1QMVoVDYt1WxqqaNpTKExRLSHXt6Q4jVZWZZGIY3BCmwVRRZqQgnD6zyUQLXQycSWFj6Qr3IeIQTWqGHlPJcDsCVSYahVYjHp8A4Aiyk4lMoDKw0ywRgmSpsxl+OJD3JUVaNBNhiPx9ZyEAt77cpOo/n89KhYrvZu3XJ5bAVgF1hKw97gV7/4od9y1//wi/d+8xPLJXHWBJ+RVWQ11PejmI4sWQ6rMBlNJqNpVUdWkghrLZBUTrnLxLQf/TBWxItKwoMJFhpS5tQ9DDhfG/ZevKWtjxygjWBYcf6be/XjRXx+bw9y6Zi3Fn6EmWAOnEGXSmtGBdRACKpe0QKtagAkCWKqJu+wwAb1ahEe3pts7dbrjZUVAkqUypZNAbY+IgvHwmHD9uCb/5JQCdcSHtndLc7rellZ5oJFgicgTRcCkoYNmZPBU+z5GCkesJImiZkYhRSZs74NIpJ4jSHE1reGKEI5Yc5IxvVk2Ma+Zy4qKe0VFUpJZBdvut1njYWC+Xz+Jy1z7htawO+F1oRnd814OiedoMew5Jw4oh1s+7QWn9LvP1cfkdOW1nMhoQuE7fdieW9KqNLHPwUlP0qQqGoSBzeMpBGmKSnvGsx6TsUy6HlUqU8L06MDXWOjv+FUr8YY0g2I9ApCBGYS7WvdRCFJSGI6L9PTXXgzpqMxzWWIiFrLMUjrQ164er15/be/Xi6q27dvPP/cZ4vM/uLnT5jHr+5jMJw+eVKVA9MGH+CHYdBWMnAheGejV88ovUZjnKJgExitUGbFChM4UhQVhjNfqT/5L7vDGNyzm/fe4St/whVxZVCbUDdZXcBLmwW0jIBYi1SSWYmWVuWkUFmIEEvtV4X4MVDxeixD5rbhjHi68VK1uLJbPnf9erm9c6bTJ+3kbFGs2GEWXGXCXIIHS64hssm5NseHqyxj59xovLOcr0Dh9gvRe3z0NsoyF6kANHVhbbaYnbCFJX3jV28+c+UaWo1obWGkZgQYh3oWcRSOzN3hpQwzHd/Y3qznb3zv07rSZnV6VSblnrtzf/Hat/5kUOQmi8V4tJ3dbB4LCyOytB6VjetgNhaVUkWooDV4TdqAalUfqWuG1apJEytl6qIUoEIk52spCY12+6tr0EoX0xKGjUSC6lyS0AfY/pFMTD2ztC9aE2bTiywnwZc+LCckO4kR6XngSvNCibaFC34+niJMk+3War/RUgWuqqBe1urinEjNbu1bNug5Y0ysT3krpct3SQBzgibTlukNjvU8tz5nMqY93BEkkdITApFqP+TSpS+GyUrihCZeJOP3Zo+ZiclYaCaFhoJiJutHv/zZojrKJxKrwL6wkWJlwypYH+KSjafQEElRUbUhDho4asv2PR/2vb0BrZzYtWhGKNm2zL4KK2Qu2sHQXPlCwGj+4IMJog925Vrk6xHXgcMTg6tbEUMu21hbA1ZlUUtsGGeIVjkjzQklhUpMFa1QlMgSxdiVw8CWEUStbEL0bIIHbfygeVw38xu7B5Pplg0EK5nVqJEH9ujw05PHx3GNvYOt669+4e4Hbw5olLlR25KIsnFgKwZKrN5q1Ax06+r1nenO/DBaGI4S6mCEu3ZvGvaFQoXSIHy/qrpjmJgJgqhQ6jUB0wKz37sTdieTcWZE6sNl88GD5bp1//LPv72y2w/rcuFHvDRYCVcUN0qeUSsa1hbd0B9aRQsEC4FGSt7IhBhD4dzpfBZXZ3EkhMjReIhww8SG2rY9lWxkKFb1cu+b/6YdTwbGm8jA2m5djm0NaYQJIsqpsg2gABWRCESmNFEQ+wyzH+/vGrowxGy4bVvLJogykW/8lcuXy7KQ3vFTRJhNyn2JiaTbYN2/ps5wP3LTkR3Omzr9dQViwAp9KkyeRyLgqZkc6PlGSoSsiyZnl8ymTiczdWSm/iA4H7jVzkiNmSRJ54iiQwC6AfxUk3fUaL24V5wnBR1dpE/gFQoY7Xuv/WtU7TMP6l9OP8Rxnvn3PVw8VXMngA1MTBefiZxH6dR9Phfb6o+S9BgmouQpfv5+EshYbr1YYyQGYlO47GQ2+/XPX4fF51/+wsHBVTcw7703exLstERZlNW6dU6EFpmLPPctI0SWaCjb0NLqQDSw8SpltLXGTKDElo0hMoQIFgSNBLbTb588fA/Hx8Z72zyQ4gq3EZ555TiwBiG2IUeovGGClUYCCyx4XjjY0kEJWXAxApZ211Q7BM/wKzsZZ1955fJkZ2se8o/XWU2jCvnKjcIqy8Vy3W7O4mK+2h/asBZZB6Ks9pvhEDHIx3c/XC6q69e3t7bo7d/My2Eh0ohkAIqC2RpAmorzsnz/zd9d2bvylT/44qrSEAKVnqzRJepZPTt5ePlZm60GxZ59//sfv/uTB4gqsbl+/bnJ3vaP3/jlN//8z/fjdHHUwp2V7Y5SFkMAiQRDwfLa8IZ1BdSMSskT16SVolYVD/WgNAic7NFS+6vjZCUBRVAiZ1EXczq28DlE9NSa/f1N1QezZEetzJ1VUYqg/eJEZ3hO6N0ZeoCmOylBF3ksoUe5L5jH6EjUiZScfuxoJCQ9Zx/8FF+h41z1Qwf9Nk//PwedOkoHd9Tl/nKJjEZ9QptiMCX6CCUHbuqT1C4lvtjd6RYScJQ44kxAEOkZKQn1RIQASoaSmjYIYgQWBDUUbfTjQaw//fTT370RRrolnrxSq7EyoarZQxZcRBMbbjxqrTfKtWTRWBOkUFlY/DY2W3GQe47VJpaki1os+0HJl27yl19wN768U171h6vZ/TmbVqSZo/Q8XGfzoVtKK3Pa7G9vrSQShC2IWVjIKzmCQ2zV5BQK5RFZIW0By96j0rBqsGibpdKQHVtiIDIUoRU/HUxufv7Lw9EWfARzSzFQNijx4Hf3Ims52rrz3nsvfOfLB1946dG999102MyhbI21CEC0EaxtYMrWi7Pps1sKStWKRmWYJP3cH+AxqZ0rhKCK2CGBHczTCfslTDIdayoKgv3Ln63cYJ5nJlcT2KGYfPal5x/n146Wtg0lLwkL8IqwYuOBDaQhbIDo+1GDSNCe9yUpVU0ymMbIcnaaCWlVGcMMdURGokq10VBuX47Ozh9/sP3Vf+5euMrVSb1SW5i6Pi0ngxAqqx6iwpGFIwmTaJduCBBFhCm1D1N/J5F6nmL1CNrg2bCkAkuVgXIwqKpqf3+/bZoQg7VZWsWi0q9/ViQaUwITjMSQEs8gQVMjJ5kopEnGLmj0hXTaEUzJ0j5tqnPctWtGU+ruaNqDTMn8N7UJVCEpWJn/v1OoaydcyOydi2n00lKaYjYU2vPtkJ45ZeLMnN4xAEyIEgGY5KjYEZ2615B2bULyu1jemc0hcVqQ+sAXmUh6E7rqHkTQ2Mdv6rF6JVIQDBOgzCTCCmF6aiwkJQQ9gTRVKjEIMWKU3GUAf/jhh2+99falvd0vvPqVUTluvSJs3nmwKka78OsaQy8sHK1F8HFSitcl4sD6JjRDKoSjl8yKV/IkFDkjNQLTE7MTJyaGEI2yt9nLiNY//nEhC1/eWB2tWU2Zj2wb6yDRSVZBrISVOETDqpZr8cVBuTJOoh3aPEflzM0ytpYqB60a+ernt29cvnIa9P4CFUa1KVc8qttcz9Ru+MEH999/ezm7096eDv8v3971DSRKAFarmW9nozEdPppduz4cDKy1m8m2X5xZl9nt3S0A87MnKjmRMRk7O5yOJqePTk4eLNxW5mzerjkbSrtY1o+fTKajeCqD/fjWDz55/8ePxoNhsAJyTz48PH144urcPw61U8Sa16Fc7mc5ayBRoBWqWTeqXrBhVLiQ4PAC1EDNFMFJBrojQxMFUEBXdPZsdgUQRTt5U+1SvD5iddG5qw7PV0iSukrxNW05kXghtXYRorqYCzpXXiMGCyF1ydDZH2mnqpXuqevMSfp77eUvuMOYO9pDj5xxBzk+dXvAhRui6FPlaId1XzyO+sB/ztQ6F+zqGJdR2DARKyUGogI9iIWuJUbdSH467QlgEJNy+lGVickQM1tjbQy9mM35jQg68x7lQBC2j+48vK+YmlyaUxhyJYXKkItgYWdJItls0EggcBaK1kporSVugwk4RXHE4TYHKUid0v51fuV5fPlWcXVHLhuPzNfiJavEzTH0ynWsDmoppVDLMyMfqh+NglpuPRMbY7MgmBQrFWGwFClpA1mGg9aiDRWBmiClZ7I5CxiwzA6UEahth4NMptdO2sEui92L3gK52iLqAL/66Fe8yzIS47KP3n/9s9/6mt/++q9+e39td5tsdOZNxUOP0qPQVtpV64Sff/azzUoQIHWkAAQlVSAqQmrpnwOQmibwVDWmECGcVJA6s3hKYkoiwkQ2XPtcCFrbNpdoinElLmw/f7zZ8auAFUxFshBZgjeQhdCKuCYJodOdI08UgYjEMiYRiIqY9PEK12dzYMnY9lDmjcCoUQpgRQizeLYcXbuVvfBiffyosJYL29Syc3Ur2xk0dZWxAmokKFtR3+OUsRtsgCQxts4cVCMn9FY5VZ5KMMxQbOqayTA7HwIxD0ejxEtgY7SbYFdKYSmN2otYY0MMna33BXWS0hCREiwSkwvUKVtQLxiXZlr7FLjffv3+1PPEuhMh4J4DkoCm86L4KT2C30efu7q2/9c+6nfiPgooMRgGdAEQd/sfv/cl6NL1c2Cvr/RJ+xDNnDylL5q/aXSqO9eA1Ni+wM/TSaoaRaRPDw33AbXrbYk8JYPVvy+dSU7fiu7ivZJoABGYqCizs7PV66+/fnp69sILz33mxRfBzjdtWQxOnszur2U8lcCY5pmH/fBodlafPFi0RZ7bCNb6n39p+69/etJIkQurBIpsNkEyhoVaEpY0iqnCyW7YBfIgrZdF/nwws3pxMK1O//DVS1rFv3390zM7zG2ulbQCqOVxrNk//+zVx+sF1POKPfKN2xdZ1Tyybay5sTKxXC/V3m2mhvfOfBTHKww2OtAV87q1NcsC14dXb7yis6n/9Y/u3b1zdn0w8atiYzcIUgxsUVZX+JYxS7ZHH38Qd3d3ZidzEVs9WQMQsdZahjU2Bn+6qtZyda/dtNWmcqPBZG/0wW/f+Ju//c+TncnX//U/qesVV/ajHz4cZBNr3Pr4SZ6XJIg14OPO5NQ/2TdFzmfkeFgv2xiCZUshoob1JtakNVBLJ0IZg6oHamIPRJEu9DIiECQpYUGB1NwRgAUxxRS5kF48Xw+Kp1ZNt3P6OrJL7IztPHC6irabAAaBLiiMXZQ5TxaJmcDKvV/uxVbt00ruJ3n7v7rAr5i4Q7dVejPCLnftdxddtIy0L8S7/DIl+B3cfg5H9cmrdvGZ+k0kqU3c8amT6KpKoj4/3fXtSv6uAu4w9/S2pY2jXScYxhrh2G/+vjYLSiCJCmujlzuPDptBKVGIyGYOsGKjsAXrdAdDlgcfLL0pCuGVt2BTcMWeRWQ82dm9MX2wOt07erzleJ5p+bVvZq+9ACyCMMeNj+RpS20Gy1UYEjLX2gHO9qajJ1U9j1I3xq3dM7vjWqARVhxYLaEcr7sjTtgyYBk5zDFDJGRSeFpxGBhnnIXCZKS+1bYtrGnz/Vm5d7Ss64V8dmcwnObe1tOd/I33fzUPi8HuMObelFbK4hc/+s3Oa9++9vU/Of31p4tKN1npMVjDBYELXFWbF2/cvnJw8+R+YyWLsTXKKiIagaDJZ5AASDrYu0QqUdGTHW3n7NEVUgnMsMwALMKlk0UVbVEM2a6az17dv5LvLO4GDmQqjotADZE3cRl1raYibECd0FwKw21St1HToUqdBTWTAtZk4FzyNobWaaGGiGqBtYUNi4fYuzL5+jd8fSyGl+18kO3E+nhYIFpPCB4N2DhFi2i0jSKGDSEBnIp+YB/aTbYkmm/XXb3gPmg5KEKQKOLy/M233nn2+ReJmRhJ1lG0d73tW0WJ6JicylKSkrSiyHblXxrV/f2O61OJLXfqj0/L66Rxvk5uXbo7JwJEE5KcOCDnQnkpBDFzp5v3VIKddqOez95312bR7t0/b0FBkSC0BBAkLl6y/r3Y6+hmgQxTio1dfs3n/d4uKBL1ar797u8KjESVQtdv65tYkZkT2JJYVf1hpwD6qafuwBM9P0GFtDN8UOrbfwAAY8y9ew9/+atfWmtfe+21g4PLQUR8CyKXyRt3FsgmEts24L3Hy+NleOtuPSpHQzcgWjTW1AuxvP8n/2Tvr//LocZpEUBOYSw1EOeZKKbKXYnQaX6FyCKtU7teSOFe/tMXt17+3EGMQqLPXtZ/93d356vtYZG3jQdV3hgW8/7rhzRGtmPXKtjAbmPuhlIMM0cDqUmbDPs0CH9/V2RvK8u4adl6m9WCSqhxcSlShWoldmF3suw7X3mxOlnHS1FAjlGWurnPe/sHXKw2q7zmzA2wXtV7+/Twk8blOYDJeFiO4Yqz8aSY7piP3m8/vXv4+Ze8y8pqflbXD+aPq7CKC1m/96v3br58/e5Pj82m2B0clK4Y7+zMnjyo65C7dnq9LIZcb46s4bKeQggBRjNtFQppRWpFS9oq1YAHiSi1RB4aoEGgqlEpMtIhFZHkgTQ5w4SukdKjHdTZ7eJ8tST24UXDJnGg+12WiEapk5soq4b5KdoEel1I9AIg3K2vPqj2s314+qsjZ2iHPJ+3fi8u2gdP5o5jiH7n0+891XnGkPa5dIdVv4u530B6ToTuu1XoybHdEwkEkmT1uQfQzw1we6rH+U+EdOpL2vLcHxIAICrntkrdGINAo6qoRlYFbDY/Xt2ZnU5NHsIi2BxZK+yZM818zGyFza3rTuvw0e+qz17JRpXeOa4IMG5y+fbWzuXtMBht6tuP9c0hPmTVuJiRX7CDNb71TaOlZAWxDeAltpWbh6HkYd48M5w/mM1nC4KVM9OU23lhRJWFSo6Gyeaa85ptEtEkWBEBDRAbaSkKxEE268pbW2S2sC43NhbW5G42vgYe5IXc2cw4TF6eZOyijOSdw99gatstEcd15tZSiM3e+tXvspvF5T/8k+rB+tEHpws1m8BZFZq1opKXnns5eiCwVG2mVgOkjR3szEEgpNJ5v7EwEbTLL8mgJ9d356OcQzMpDH/6dp0PnHUNqoGV7LnPHVT32a40BkglvCbyHBuhFXMN3kClTkAT4KFBNRK68T5ocsyxKqwkQrYcTVSsw5bEMy8+ggtjBQKDmGW7r/6RF5XgDZOBdSSz1ezGZw5iCBy84xgiwGSEEQJxGrJB55SpQsSWO8vbFFPT3GvCeS/cvkDMhoii6HAybtsW3eoEAGutxNjP7hOghlkkWjbnDEbSbmiVWJlS34ekG4xV5m6uqQ9L6dLaI7HaRaxu8jWdOaoQhqGuU3seJrnj/Wq/j3GeSXTfikg6PeQ8/wclcKZL0LXnQZ2fB9ojW6l31Hem0uxjL2PZVRx6nr6g29QdCQpq0iUSzi8KkCES0Q7VTpkFcX/ywDCp9ETQ/uYEHTVaRZL+V5qUTnzypEP/dPi31ratf/fddz/44INLly596UtfHJSDuqkZlgExplrP7sx4UBZBW+v4p++dTQf033599+2P1h8cyyDPoBHM946Onr+y+z984+D7b84enIkpGDlnFmjBymD2iMxqAAkkzASUsPU8XC3W/+d/8dwg31os1yRBEIbl8P/x57f//V+/96Q+sNwGdbY2zJKxi00IKzXRCkRV2FHmbHC0zGLBW2c22lrE829en/+LV2/WVSsriRXCBtrANhg0mUquXG9OEDY112Z5FkdONPovvPL8/Ycfvv/eR87Szu7Oau6KUc1cTYbFcqtR1ADGu48HA1NvzMlRNSiLlz73bL2+BfJs3N5g9wc/+tmb775Xjst2HT766eHd1x/6gLxwbtu2Ne+VB/n+5O4Hb2wW8avffi5f5bBn4mMxex6ABksiYI0CRNYG7JHGf0lbUGBqAQFFVVES6njCnQpB2oZCTwGg51z3RLzv+yTnHUuGTf2g3wtsClUYNh3RQ8+RHVII/V5E7dutHe7UZ4xPY8X9f5IAQKpOn8p107RIPw6QMN3UJBLiNOCnYoxBL6XRU676IHdR6HY3kjZm55jUA2Np33GfuJ/veiKWjuTRiY1oYmhoz2eWvuOT0gMSgCgZMKQZJPQuh2BNRZsIAjgQArEASpasKCjCWnPn08N5LVuOmW2wA+HA7JKjEudmfprfv9e89FKxmNWKzWefL45njej04HPTyfWrdbNTi/UDt3jmVnv8yNp5/fDDkX+2nWR1jtg0prAcqbbsuWjFgsZrqh9sBsMTCtZG5xbCC4/FKV872E4F3A6qhAqWROPtxpkYVbCBGGqoDSxixHOgIBIQHUpiF5FZO2VGJetAm8x6V9HQfVjJpK1evD06WT0+9PODF3cerM6i3aqj1Xxy+fpnTu6efPLJ2frw3eELr1z/xqvxyBcPVx6tBLrx/N6NS7eqM2VvrEJq4WiNdueeklLqBif3BYmK1B5VEHpphaTmy90qpi5BU1GbL4FFKMbqwC+9sLe1LpbH3iJJXOICZaoVNak0QhV0Q2gBr2gT8phG2IDASX4FRMqI7C5dVS5C8NaO67YyWRGiqm1oVWUvvMyXr/uzhXVOEVS4CWGrcOXejq89rHice7Z7WIPOxy+JACemEqcCFjhvd59HHk2ZoSorI0okynxTB9+g3+dJOkMkgtC1ikUT6QKKThMyKbwz9V3UbreIagjBWNNHYigpKUM1iBhmwyZhUyCSvsJGYk9oGrHlFIdwMTikKvjf2fq7JsmOKzsUXHu7Hz8nTkRGRmZlZWV9oAAUCmARAEGQRJMUxf5St9RSt3TV0h3Z2J0xm+f5J/M8z2MzTzNzNWMaXUlXV5K1eqi+3RRFsdkkGwTRYBEoFAr1mZWVGRkZceIcP+57z4P7iUxQU0ajFbIiIyIj3ffH2mutvVlGrrkuzoOfYZ6FoTfP3WH2hVSYvKpo0wOnGJBs3DVh2RmrZwaUs0HHEBrA6TWTenfQSCAj1KycJvvIJkbD5Iygcr55nJJfhyT5pkjSoCtxXh4uIsTMlpHH1UmtTYOdnrIxOUwARLCF9ev1j3/84xcvXrz19tu3br0GUO+DNVbAbfS7dfXxr45PEGe2LYM0nq/vFN95lYzi8q788rCFhF5sWdKP7i1++NGL3/raS//gOwefPlvETn70UStVaYyi78GWrYEPpuSKZLFgqsPyNLyzZ/7gd1/veTRfnBVOIvsCsgzryo7+h79/5//6r3656l9Gs+ACCLwOHYRtdAieQAwLRnDROFPYKnIoDEGNDfz8WP9s/nTdNmFFq4UsF8JnUsDOlK3iSlXdnFXbND2TsFo0kytuve6m0+m3v/P2Lz6Yd0tdnnrmsHiO8dZ4d0Zf+brtug7ActVO6q1rV0rriqM5P33Or9zcuf/JL2a7V1rB7du3H33+8MXxWTktHbhfWVdLWPvli2Y2c51vtqaTUaz3b1fXX949mXdjbM/sLYfdoJ4lgAABY0C+AtRHQlR0RFHSxDe1uQgggUYllWwnLkndPtSSuWhG/jehQZWa+Bx5qSBiyi0XuIRpWAxj7CbRErId9IYCAvx6AXo+Ihq+JiosDE60FYiCFWmepMNuTcmlc5qqhAHHJmJEkZwXk7tXUhJdbJsTq5AHS5EMm6mqxkFzN/x0SRRAgKTOn4mESVVY08ApwdQkA4d2gL3Ox9NDka2KmMw0LwD6kogNnLz2EIlJVPqgVqxEMQIl20p48GgOGCHj2Rl2zNIZLQoybMHhpZc4zuPZKd68Y3/6vj8YFbdu+I8/Xq+fLGU6KbemjHXNzdODS/v60jV/3J4+aY+fmBuvEnWB3ZmvTxFZShYBwpL3e23uSzX27PZEu+np+oyMWfflcrk1G9mSAsNadlZtx1Z1tTOexxXgWEoJBdQKj9lpYcSLi5ZsIFbxxk553dSf3d+6dPN4NNXC1EbJL99/6G/uVWcvXsgy+nlbou+tLmCXPGlP+qO+iKPLTaye/vzhfNTWu69Wk73ZaNSY+c04qavt5fPgAnkRDqy9RAmIntkDQRPrK/Hl0sFJjicixljRSDL0qLnDIQYlWr+tXhh2VHP94tGj2c3L8oxxChHDSV68Vm0JXuBF0Sl1RK2ig6ZbmPcF5JEiWLJjG2uUvpd6vB3rWgFVZ0UYLmhb0LhH3Lv9btuKNRNI74HacvDNZNvAOGibWMkqITnXJQA0lZ8D7oxh81g6YrmKTFxaiVEVYBIVyyZCQghb06mISAy2gDQSKTKB2YjEgXAESBrSDL4budNTDAMmHay1i8ICuZhND0h9K2eX5oE/TCmxpQiQuBlJlKMQlSi522MkHHpInBjkfkM0OW+HkR5PQ2+8ge+SEHGgHiN/TLnFzNi8ZM8LZLx+yOx0Yaik52jV8AFLfhspHnEuuSEiyfUzaZ9zYb7RZeuwRSIhYCl0DfVSFjmlkl6YrUnmXalPYTbMfPjs8Jd/81GU8Ju//Vt7l/a87/u+T8E3kDhyEv2vPn9RV1cKH3s2EdHYophMHz89OTxKBBYtuJXeuhooq+/9l48Wb+7dunHTzsjdfXx8NmaJzlXMwmuG4XXfrZhGzM1x886u/cPfe2Xhjcjz0rkYlYwXDQW07dp6MvnD7x78v/7kodu7EVcLVKVVyxy9tBrZ+VLWUavC2F5ZxUJZyaa2UDji06edepYGFXBQVAf7dj5f3n8M2/Pzs8XHtJ7h5J3bl8stH9poTBu9QbCjSUu8YtefnoQoQcUcvij1mPveAmCZPWu71p+w7vjO1/Wv7n/62XIp7Wr10rUvvfqlmy+9ui+K+TxGbpjHJjJU+nnwhoX8qPV7B9e+8c3Z6hnGZv8ybhuZBGqIbNJZZKqwJC1jsCxAL9JDQtp6RCSEze4jyUAnkgBOwJKr5DTsVxkauAzY5v3zuRPNphkX9XW5k85h7nwIlAdsNKTbL1yV4XGZInF+m3IPKgBSHuO0hTqZU23kgoS0C5FBMmTKc6UAIGR4aE2HYbaqqDDSPgZsqncZmCYZcju/ekjML4iCNGaqdErK2aguAe6yIWFlkD5hgcRsAE77jmK64xnzl6EvT379BDAUpMzpM46igSC8OOuPjo+ZKUSgKJRtK5UzoWdfVCwWkeK7X5+gOavZXHncx9X65lVz/9Nu/eyYzEf4yh03qq15uleHs4NdeVxXtGwfPqS3Xlm3J0stTnqPGfOy8X7Z66xXW4X6fVeWi3hr3/rLI/vQnrI9je50oZd0vO90XBbHzBL6bTPuYqcssBrY931ofBPbQEvmwLUt61Af977UdTG77Lbf6M1zN7p/CfOHciN68qe4PH26+rx9uj+5+9MP/WK9OHIYl0V1WhvTrBcvVmJ4vyG3pHFXjL0v1h89GGP74ePjb/LsxrtfCgEBpgpie4YwBEDMqxcghII4pgXcAEMtUVAloixSTyyHwTGCCTkaArD9o3WEf9E8+q3vvHXVXj37vGO11LOGCA9tFUGhHeBBa6gHBWiXlPUJckb2kEHenZ5qOLYiqKvK7e/19+7DzchVEHFce3+G3Zf6yS63bQQrcSHWVkXXNJODbQGLpKSV2rIsvGOTaopzUgSyBJajwppsvJx/ToBMMljWKKKAsbb33vsAphiQHHJEVCQwERNnqj4P0WDYxKJJHJR1u6nhJwChD8ZwVssrYZizMnH+vqR3TZlMNEIS5XkTFvLYc8h7Kbzp0HZeRIL1YvU+ZMYN6y4JOTYVQx4wXYDfMkxyXq3kcMS86a+TKWaafyfrnPPAN0iWJYsXdbPPN6H90KQ94tQQ57qAsRll5WVqaRIggwuHqsQoGGrGGENRuuR/4pyTGO/e/eWDh58fXN6/8+U7VVU161ZUisIBEFGHyIVrzpa/OnPltOzD3Eg9svrw2enjPTNfeuNsKMKaS+uN2F6Coeb499/bO9jfWi5PxZjf+uru8TLsbm397OOnn79wpmAVOFAgrLr1jeLsD/7gzrLz0M6qRmlAosEHaMFiebRaLF6+efnbb5/+4KPn9WTLL1sKJbSwNmw5u1546ZgKr5aVoaxsGQm9iApBEXW3wsv746vTSQWuA59h8vTDp6YoKbjGh0bah//57rtvzH73vf21j2272tvfnUz+uG0XbNu2Wf7y7i/PlqtJPelD1fQnAKzr6lHVBWzvLK8djOZnC9v+Rr115Ud/+cN//A//3qJbnzk7+zL7hV0+Xzy8d9w3EtZRbeCJTrarwk72JBzPjy/VbxzYNz0C6NRGJ9wDoFxhIlN3VKJ4w2COyZAOLCJBJQAhERWIguqQg1NVO+z03dxjHYYsuT1IHMUBud1QkYeDnQ7UudbnAuExT0+/OI7Nd4dBSpscnf47qwnOUW4alh0inV3d1Ad5BxEnuiVtKlQaSk5cTP+kqRYWkWR/sXlPlGzkkBRSeVa2CQBfuOeEVPEqCfi8nDgHrgk6bFHTtM4hEyuJ2XDWGtKmUCBSkchqVFQjCGTI5E01CgjNl/F40ezZohenVrx2sWcteuNYLGMsRyfyy8/jN94cOR9u3yk5oPL9bCZNVL/6NHwGfPmtupaZe/pictDEl6z/G2oPWc9U+xMUJ03vrlSNX7LaQK4Vmi1pRNXnJW099PTKDrbL7tTP7QQez08szfqyhA/BWqm5b6NrZOTsKlrBSEfTMqjppO+WvvGt89XU1eWla+b6awueuNOHfiXrwxaXxXRBjItSX548+9XPf/T4iLktw6JjAo1QlW2NUXS+C57a5aScdhAv5KRYzJfXeXKr2qoObvgmqmevTEE5UgxaAJKwDTggEDit9YWqkhBZ0qAkQzUYIEByiDyXpYIAS0txat++9dp7t14/eSwQoaBGJHglAcEDXskTAlEPksGOKmE6nI8zQqr2QBawEBAVAAHVpS+99/Te0QwBMm3tuogli5hqanjUQaxlCAvJOnIZ4LYP+sAKmxzXgXB+k0gMUcLAkmtdVE37RYzhRIyCIkZh5qhR+gjAWsODs3/XttbaGPqNFZRIRDIuSUShTcKj8ypXKUuBL7agmiQ9A3g16A7yKHNznXHe3CJGVRbOL6L5ehHLwDlO8DaxpKhyYRaVa/ZcKec3kDHfhD4l8t2FEDV0x8PfEw0q/0DDv2XrPFFmTmuIUqrlIWZkNkpCyQahhw44fPKM3iBfKVCdU1ZVh7EcDT8DJFM7B52TxM3PaYtCRJm5LNzZ2fL9n/0MzO+8/c61a9dCkLYN1joiTtIpMPfQsXUPnzeTUdWGs4LKSBKC//1vTmutvvTS7NHh6YefL+tRITaysJf1735979J0+mJ98l/++ujlly+/Npvu7Iyu70x/ce+JakvEjCBaAXDrk9/7h6/0yiGeOS5641XJ9B3bVBtGgifm+Zn/ra8ffHLvF89CUVkKHUSEq2J9on2BwjKTDW2AAQyBB88JIVK0Da6+Ur61v/fiED6sl12sy8luhWcnvVWx0Ahx2/jJz+/d3vM3b81az66Cq6tdrkIIztnbb7y5OO0Q+no2+eCXHwAQtM8efn57/6rvl6rh+MV4vXzOpy92X9r5l//uX7z77jtwxXS7Pp28ONitb7y91yx9s5gfPQim7Q8fPpofH914/fpXJt/a5pudnCiNIOqxNGpy1smnKwoiNEBjiAoEiZ4NW0uEAAoZwKHk+Cr5B04I8ZBuzj3dNsL7xBPU4RgTMNSy+RYMS7cG7EdV8yaR4eHn5S0u1K+QXAunznN4vBKTyLD+IV1q2jTkw7dKzp6GKRlupDT+hdfSTOk/t0vPGqns1JevxxeFCHquP0CGoi+MkVPKzJqAPFEXzf70JJq1VUNDIEqGNL9ZlSg02E+SVSiTEAlYEUFgDYoA7VWNwpOsoSvMF7oIRe3qJcUKtoje8YS3gjOeOnAPW/Ln8373hd653s9ucb/m2GB03e9Maqr7j+99bhZudf21vuJuVM1ltLtAnLVSl766xbK37qzhyRJmSZOey9j4urfTtT5wdLLCngXvzfbU94f+qI3HVoKrjCupqiqYOrSGdyn0lyYdeaEozhajSTmeij8L8bQ3XVHby/7S7U6nIoooQDChRRfEOGKZd3FrW+FfcG3sylhGDxfCWl3LrBBYxyOunvuolq2AvEig10ZbPN6R6SwchqIrQqcUwMl0CoGIiexQK0ZNBQ2Sj1dMvytoVAXBEqXJPSuyXXBi9dsquvbMH4wuxWOVhVrjSELUHlBFT4hAIATAqyZf9ZDRTKLcGqaNfJrU3RYwgIVakA3BXn3ta2e/u7v4q+/Vi4UdjTWOyHI/F6wLpplI6EMYuQJx7QrnymnTemamRO8iaBYzJJavDPOSTWqhpAgASCExKoFiDERsSPKgKNWjIoW1tXPrtk2eVKKaUR2R5ICR0GVRPV/QMHxMv/aH03qP9NpMDB68JrCZEg25OveLho0O3OiBTHFhUJM92YeKXjZJ/QL+vMHNhpdBhuJTmwsawL0LcHLyCqLN91J27h7C0Pk+hQ1sfbEiP+dj6pCSkQfGuZw/r+UwvL2B8J1dAbB5JyIh0a8UQNqONjhjK3M6vubkZPHTn/xkVFVf+/p75ajyPgAAGxG1BUNg2EYRllAV9PHT2MgYLiiUhRxD18xl83/5F7/4p7//+lYpa2jpfWtQ23BlVi3mR32o3ruzMx2VbiKLp+1/+vThJ8eds5WQqMDZ0CwW33lrcuVgOj9pKifRdCrMMUREhgF3CKyhsdWkDy2qyd9+7/K//PMX2N1RtATHvQunkSorABuCpt6XwJBkliZQQYHibz5sX648daJksBYbww03OV4vbG2dkxh8j8iT+qMHj9/48vTF0fLex4+B9uHnj9/5yp3lslmtF2+/9bYbV3/27/7041/8DYD9KwefP3p2ehCJadWooBU+2eeDbdSfzR/+5D9/b7Lz8lvvvPnyletPjj/zpun98uBgV84WP/5P3y9L892/9Rtf+Y1vQ+tVmJdcmLD2rIWykB9EN2l7oDJUSchoDF4lgKJqiAJVMcnME6LDfAaAilJaiIRBlr8p5vJXztuC88udVxRdKDyhuLDHns7P8YZpmJMZNsgNMITFgcQ8VKKbl6JBGJDnJrlk1+EFQeenOLMrL8j6NlaR+QpvIKi0QvQcHdcBb9/I94enGPr4dB0VA9eKh7IWm7oWhpkGAzxOsQKZt5hWPaY9LmminbCHqJrESAWUEIkShOEVhrhn3yoa+HlsxKwwGhsslRw8URc01NViMvMQkhCh+skJXbuJ8Z7FSXATw/thvCPVDtuzddvfN91uPLgWJmFF9ibb+YRXPG3ttb51W7u7p+VsHnyQaq1j00A9Lp3i8zE/tBXeb3df5XKHX9uqd6vw6LBtj+J9Mt5H2iomZiIhiGwZdDvTIwT0JFyACsvGOJ5wrPmzdfnZ++SKvauvirUBQn4xmA2w9P3l8f4iHr0I3u295NtnpL5yxUpIGD2krlwDx14hLNZ4lpnS/lnnXr/lGtu2HXq1rcBDe2XEwLBiU5IYxvA9lC84UnDaCpc6NUp7BwCCGaiIIIWtEKTsP/ro02t7IwcTIjH1xKqSs28aMiNvWUqr7NORzIzb4SQakIMStCByqhZiSd3aL8vtW5Pfv3361/8rPvl5ZUpw1fslrVW2p7T245HRkW3vfr57c09QA2CGRg/4jLfk3GYEQsP8Qwb2bupPu9YXhbVskLiYIRbWiIgxrIoYg2HXNM3pYr63u9v3AZl0PIgRsyXyIP4fKss0zMzOWXldZ6rrWfRCp7u52ILBxWoAfzfoLZNKdnvSVM1TAoCSQkc1u3xkNEsx/HKHYJFej3VoHQaS9ab1pFxqKzYxizZeIRk+5gzT5RxLabg1vFD2sqeNv8+FPwPbEsPFB5BG8jRk+iGKIHG0N/zqi0/BbAQRSBk3f3Ka/EfAGuTJk6cHB9def+M2lLvOq8AYJjJKGnqATIyJejdbLZrHiygVVTqBBua+l/o//vT4j3/v4J/+3u1CgzKA0Ntgw9apP3lwfHKwt2fPWtZRx/T+3YcffLCUwtSTEZNvJLrS+D7e3A7f/Y2D1XxZuiKEVe9RWMOIwhSkdyLCEEcWwVpZLBav3d6/+eHR88ZXjnvxEmGdk5ZiFDG9YYJN3QiI0wBJRbQkNIt28chf26kXy95o7AVXdm38dBk8VqvOCJuCUIZHC/nJX/7ZJw/uni2fTrdmVeU++tXjEP3x8dGTZz+JcDx//uaBAnDy9Nq1kuSkovEq9JUQ6xaz+dXTX+1Pxq4p967auij2ZzvXLl9Zzv10Mnn+/Nlf/fLfgpo/+KP/7p2335nPVXBqWCU0jLHBWsnhvBZMZzD5OasgEse8YkDSlg+JssGckxpV0985Q8pZorr5s2mFU9Y5z6nIw5IvzF++MDLefG0z3uXhMV84eBe72mGh0PCec35Nd5CgGlVZBV/opXPFDKQ1CQSoJAORvMPhi3clY1jnWBeQ8fHN+zmHrnOcoGHpS/b1SpaSecGwDDvbktEzBspi1ieqqpL0AgM1w0UMUFYSpJFj/il60jLJVqAgASIhmOCj7YEW62A7nnbWriAV1pa3SukdBJN5aIN4R4z5aXvv2L715ZbUmJ5ffttN9+LRqbfTAuV6vfjImUncnjR96dfuDHUn1Zrc6Xh6uiiO/+Zh+ORxYEutRE/a634stpaYWw6w9hM/2Q2udpeDFME8OPOfW16sOhWzPRvDrIpYUihZq+lkiRb9mYTWh7XUQNUGK6i3SoQ+PH8oiyNBUJ4IGYh3UszXq6ePH02v2fFk7HbfOjz+ZXP6V6U3Uu+A2Kh9cXrWmBpmFHpxzAHhphlRUdW3bslCWFgACAwoGuVgWaJISG0DkYIM5SkAE6sKq4S8dATMNPDo8hHZ5GC1y8XnKAvmztrT1sMYYiBoIFbKrhfY3BkihghhULGD0gMYVsBQAxQgR+QIhWhBVDu44Odcud3v/pPF1VfnP/mzHS/W7J0eNzs7L3nbBhXrsT5t3f4NCY5Yock9RwbHWFKNyV5qgKc0NU2kiFEsc+EKBm18/IvCJlN9iWqNMcaKikJevfXq1tb0IozLYKG8y1N0AGFz45hnpJul9BuKm6Y7wVCBaExBIhXCUS9qDPNnzpzWAaUWiJDScDJJJwKRDut+mSlmICkVAvmGDSypTcOp2WAjx640efoCfnYRkt6UA2ltmjEmf1YMJLvPDBFL9r2ijX9WuupKwxPqBUiAzkUauPjSRJydcHMdwSn3w1gMqg+T2+P8gTMJMSuZq1evWeu6NojA2GTZwUScsUoFYBRaVjg5DPOuntQjH3yl3AVrC+bJ9Ps/f/rNl3Y/enR4vMR06j3ipFoGdn/6s6Nr9dM339gbuzpGzI8Wt29v39wZ/endw8JwDRtUXXPyR3/0ahNi5OiMakuFCQIJEiyZzDTgwChZ2k8fPO788uDajXffGP2bHy/Hdgz0VNpe1PiyiOgTLmqgZtPiCQIYMEbQVw/uLV76CkkgNr36vhr5W1e5rArxwbd+0YTD5eKEq8MmXN67dOPqrb5viELX9eye3bz5uoTCS6h3X4X7BIC3Ty1WDPN80TQrrbiuqpZMQzdWCx9HGKEaL548WnWLejJ+9ZWX7v7qx//qX/3r0PVvvPHGnTduH53MFVoQQSwoKp2RFqJdYucSJa7lEPLzVsGYeWXSA8KWVGJeEwZB7nV1k++Gs4gLKIxiMAYfZj4p4Gg6IBf633wFIhSDa2VCjs9P+AY0GkKWpgS8OZmbHvU839HwD5lqlY54HtUMjo+5NkUmialk+xDVbCxDdD6ghSidz60yyCSabTiYMHiP5LedPlMeClwQwwwfVWI6M5HmKLHxTteNnIWMiAoiZRJYVtIn8+1UUQDMbCkZokSgVzUAm7COgZQn4ABquZlNlyCRvlBvpSt5j3sQx629ObeteKMWz8G+DO4qt03/0oEtKz28F3buiCm3nh4tV/7Q3bgcpDw+mpz144b3ltXe/Gk8/OH325OF8qU1dsPK21a8L2YtX239aW1bLj9edlfndju0GvjzCfy12cvXXmlt8/nRvS2R2zu7anqjp31ACGICEGAjWbaWnVutq6POVogM2d3m44UBonOtUcvE0GbdHrf2kuyXjN7ZS2++F++te3+XbYhRSIlspcQlF5PJ5Oik25rUr6wcXd6vqnp1IkUwAYAjEKRTcsS+Em6zyWK+3IrMt6XBtncjAOfBZ3DjaJ6XZNl6PF4uwzvfuWm4FA6MVoQ0GwNtJiyaGYpIiwc4B+IcXNMVcqqWUBJK0cJQCTiBPWpbHlc8ZgnHe197K06q1Q/+pKpgRhEF2LC1kzA/ml66Vu7srU6X1pSqBERiVvIgQIOIqkTkzV3IjSgTEjdfITGmXh8QIopRyDBxwmQoagBZY/nDD39x4+YrX//6NzovCXxOdhwbL6eL/nOJpaEhGGuQP1NKm8IS30FENLkuG9KBJ6ybAjeBT4PuOo2SB470OScqdXOKpLhPPNG8kmEDlCXycNZbAQM/NPXZw8Bok2gvpt30DJI5lhtPSpJBzLDBxM+ppqpKwzNj426yYWblaHo+S5Zc09Pmm5mYBwFwDnCaBxWb0V/mdUITWAnLLMqA1nWyKoOxho2RzP3KdpjpIEoUR/x43rCtWInItSQFLKsXEfY82Xa/s/+64/sfPJcpm6Adh+LqrtzZn9ZVZdCr0G+993Lp6HDu0S9LM5Mytifz/91vXR1dmiwWx67c8r6RUgzICkVjoF5D8BLbxdqV4zAaXd2ZLTp08eTmte3dyQkMG3W9b4uiC1wquVJcVNEABKNgBqmE3Bra4FA8W71YS2lJmEIgFWn/1p2dEMjYQhh96GI3+tnHL5Zz1gq+7RaLo739nb296yfzErFsmmXFrqfTk6MOAMRNJvWqORvVPZPM1/3E6Nl8HqPZv2RePD/S8GjCH95/aHen335w/4MffP9/dW6yvz/53/zTP259XxF6BA8tSFRETGQEDBDH0G/FdBxSvoGERKhljtDIsCnup6OdLyrAJMgbe5ExlJSfJO2Pz3ToBLKI5glngnUGxzTN60GVhZUG7kU6n4nbtMnhQ+Ibsq3+WtLNB50HBgOyOiCHu8QuTveGh3/dbJFLbz6tEUuIL51/OOcJO2YL2wE7TgY4nBUiw928cFNzWBNVEOsmQqRhj55jXMPFJjsAYJmzYVLPraIIAEM3BZNQSsYaIF57R72gAAVCIGlE2UgL9rDBBF8sy0owLSk4FpbjzkwRI9C5uisdFKtTBD/ZJtc/nHf7M1dMUN2013f56dwjKvGD02eT/mi9bffW3dbxx/fnP/t0/vhFCEZwdVmMcBZ47dCKbRCi3FzTcYPjykbmzzpyvfNlmO3fuPnKK/PTw5OzJ1uVv784nml/aRsV2QJAzOCIsLAhu+on85ZVfXcmpuDFUXf2lKhqXQUhQ+J7UOi267qc2O5FgFcimr769unjFxqW1jJICzJNLzGGNjaM0cwUJaN6/TasWiFUxBJtRBBVC4gGRzY4IGT74IxKpPCc5hipp0j6zYDcUiVGam7lALIvX3vls88eA7MOFcNDC6GeWZOf4GY/KylUo+YDFgf8l9K1UCkEjmEJpaJgU0ZyqA2NMGrdsVvCxMlsW6t+6439p79yZWHW68euegWWjY/9rB7vfb1dWacTQasQRUHaM1kRSRPspOGQtOg4LcoWAGqIQx+stTo4H4YQE5c59Z4pTapQ7yNgt3d2wqabRxCJxOfUjA1RkgGVmKYreZCTbLBoQMHTM5jUECf9Yuabb0AzTWEod7lpDJj5n9m+RoigEpSZ2NiE6A/kYxBBZaBlqKbCPAxRg4aMmn8PxNkQbegaOBfyLKpsrIio9ABZZ/N4S7M2io1JEcAYm7elDWFB0kJfggFpkmhgU9OnCJXiJW8AcWJOY2AaenjC5vFCYE4blFMtqAQFg3WAEDT1jGSYkrAMEVEldQSU+AAwpbA+fMHBToIIoCxCYELV+uXbd658fPf5g0P/nW8efPTkaTt2pS8WIX79yuTmzcnR83XTxmpaPTp8UVbmBx88LmxJNviz9bfesLdvXzo8WVtjtW8JsAm3twzpJDu2RDawBVHwwch0OiNQNdl689rJzx43zvqGJ6rE6KG+5wlJb00h6hPaYJiCCEHbGJ0NZ8tu0aympQ19D1Kn4bSVqEv4StFZ1tLQb37j1Y/vvjien+5dKtheqqri6ORuuw7OzZrmtNrZMqbb238OwHI1cfuL7rAqx0BVuYm1Jbu1CFRke9eVdgYw9i/Pqr3/8Z//26//xo0nj/zVazc/P/oo9Fvj0YRZq7KWGJTUiIiykrIGRZFtX0hyC4jIQVWJrCQZLhkCAlFaIAcoKyLlLZKiIsSiA44TY0yreNIRVgVp3hYKTZvKRFSJVMTkHEwqKmzBIfk6gZFYWnHIp0kEkZY0ZEIhBoPnoXIfzhEwaNpB+XjnILBJ0gxQWhHGRMRKw44QOc+ZmwJluDGgYdYiWcKIPGXKWHBSzX+BgpWjEQbO4uBZMwSCC55b0KigvGWbQFbzxufh5hOYWQQ0DKtEk3RMAPTSOrbwJE7Je0/SHza4uW068Jp5Wco0Los6srGxVxFBVwssBcO9hnZa6iu33nxw7+eP48o1/Jcfhzu2fGXWvny7fDRfH/7Ehp16KXjxq7s7oV7w7pzr5Ycftyh73lli0lJVrDgulQKkNdIF7vVSb64v4lEtYMO274LeePfOlRs3P/r4Z409xj6c5Wmw07OjWbesZ205skWoySKUYMfWY3R0LIeHdGatGm9HOHrKfSvGtXXVsXVsDc0L3372fBUrr6NrLWnoUe3uOX3dH31UsAudeg6FUrAcRMS4nZMz2j6YvXprfRJpxOoVQcMIbNIGQrAPEll6YraAsHI60sib9KwoyIpIn2HFYVSi57MSKGC36r03bl+yZkRapaKJTdH3LQhMFpB0+EUDFGyMSMgz2UTxZRAKhbFUQgvmCeACgWo2NZ35+afHjycH10ZbHGMM6N3E8OWSpFkvHu6apty/EZY+Lis3svW0bOZCWkJFJShFkEl4Y8IviQJSd6UB+R4QlIrCpqTBJi2/S3rzfOxFpLA2xnjjxvWDg4NRNQp9r5CowmwAAZOECAgzITlLAyAGSW74L0yENoZ4iqEB1XSrktQhpTXS8w7z3GkqPwNy7T98NeXlJJclQ5Qa+hijKQo+Z2CnrlR4E0KQ9NyatBIb1ljerHpRAJXCgEgKTJvRU6Z2SH6Bi+bMGe84xwQ1xjg0w+ffO/TxumFZqw701xQRRGJMKx/MpiXJJ5JZJe17sSLJCIEHcQgTGDBpxAxigmNChBCZlKXR0+lZqGwp0hMDbKNASQ1NHh/rOy9fP7gSmzPfw22Lay0XvHg6X759dY8rfPyre/uXty9Nx//LDz9jNy5L9j5cn/R/5ztfOlqGopQoSUWD/IkKMYEts2EOQcQQhJitY1L1vZ8v+rffmP3o40dtvWO5YS4gvWpt0IJEsnsLAAblcsyIF2WluDhrdkdjzz0ZqxEKX2hF0gAATAhuvuhfvf2VO/T64mxO7GJYz6bTtl2rkLO2WZ/5lk5ObgAoXFytHhJNgmegD/Ki99E6T6zWxp29amdmiWXk9n75/n86uDb6vT+a/J//Tx9sTXfu3797797jr73zHedG42kJuLreEomlcyKCWIA0hpCmvWkpH6DBBk3TG4WiV9FELLLpJKDPx4QywAyR1D0iEaM150MRca7o+x5RiDg1mmwsJ9u3rPKnRGWRMKymzjks0wIx+HAQITE/hpPM2aRgeNzwa9W8HSRd64EUSUTn0G7OjenYn5swY4PDpwHTFzO3ptycTSXPSZS0+f8NZYw2L3LOd94AWMOuhwEYz0+UJgHpQakWZ4VCTAJD80HLdDIgWwoJMQSt4xqIqia0obB1+OzZsl1Mb8z8KrqiqtzMr44YvJqUBiOLNct2WqfMKirranz5lXf+tlQv3T/83ivXz4rLW+8/tgunX/v66MWiOIaD1gI+s1sjni6l8FIvHOocOwABAABJREFU7WwFeyaThsfwTEulFlgF03FcB9uWffCvqp4em/sjw8rbB7NXb7zx4d331+GIDyyUX27lWxpehu40Z9PlfHt8VhZhbSyXXEPK5dqeLcjZWMCGyLKWoKymIBSuLIgNiY/LMIqr3hwuqJ5t+3VbB5FiOr526/jscNkFLmYBUFgJKAszm8ymhydbd94qXNkXnrdYe9GeqANsZhiDGYa4M+gNRCN6hRUFIJnxAQsEYsfomYEMSwiGJZLJP8OGYCVEppqoSnRE79fGjIiVSERC2nLFyRZOIkDGQiVmq3JRIsM8UjEF100rrmY7NT33o5k5ezr/rz/5d+Mbu6N66xvf/K2Dvctr8KXf+b3VD/5dVcvJ049mL+2ilr169/Tx/eNHqzvvvLs69kxqTMJde8kaVlX1qZ48x5wuYj5pbdfmokJZkKAcZm7bth7XT548ffr06en85MrBVR8iDQ50GjRfekmbDDih2irISSKRMnRIgtjoDXVIhQQMy08yepS7u7zve6CS5EGRpOs7lM5Dp5j639TvioiRzbBnA6D9WtmcX0jOcyHOnzKpTsFDkYCUZdMISkXSykDQ0GALFBupSR6J5fbkwrvNlMxNnEhGH9gg4pqNwTYVRoZNNPNXZBMfldjmSQlbIYbSMFcwyCvTEjFNoFahgAVZQAtjmlW7aEu7XcZIRilqBCRGGZfV+0+7iruDHXf34WJcVUFgpePJ1r3n/k/lybdv73zza68dN6s//dH9UI9rbn2sbZj/oz9448x3iUPBCrZpfBNTy5PmakwqBFcYEBJqIKIECX2c7GztT+Ozds2WCTYiEpoIC2VCBOXPOn14UaRiDhSY9Xi1vl1U0nlmjRBnuXJWdAIo1Kj4eSMzdmzd9u6IoIWzhkkkNs3yQMUVrNL2AQCMsWXJZTkKsfetF227NqzX0ix906xXzerk8GjdNJHuPX92apj+x//bPe9lb2/n9VffffVmWJy16/Z4Pm/myw+mk/39nW+Jcr1VWMOj0YS4E2VWF0PyABAToiBImtAjfc2m3Y+DKjcV7wMbnhKQrMPMggHVGJm573tjTOIqhmTALmI4w71JuRZC2KgPMthMuTD/4qUgDI/IxuubAvp8GqyUfp0Di2m4m5RKduRLDmYMefYL2TfdqCEla/5XGmYyF8ZAQ97PfycgbmhlOjxZLlUSW+08CQtApElGtWm583hK2BhOw6i0BymhZUypOklXLGTGKgAIabo+sfNalIg9Tv78hzt3bjHgz2R3Z+tg6+CT508YlqCnky0SjnTaAzC0XDZv33n3K19973F/uvf2dz/+sT8p/nO33T97NvVLmR6bk960zjWtERkdYbtGseByqeVCqpbHcy29jsYraCPaCK8IK7Heyip4b8a9/XpHddd8VODWl77y9Mnp4ePHuMYj779yJm/O2AkvQlFRVQWSU1/yvFp6PWX7AmaZ7gqLBAnM0nMACxXVttY741K1atrFodkWU7Hb3wnbo/7F4eK4qV57s7g0leW78sndzk27WIdQt9VW249wdDK7em3nu19ehWgLlShJjase5Jk6QqdoCQ3IE1rIuqJYEnuIV3DilwNelYGQJgsDQJ0aoBQchcBWqoBCeILghKmgSDYYZlEJqhGJiiRBFURBYhLIyBDMITCiBdQwnPcsACYSHbQSO8ODX9wv9rd5ahf6/E/f/9d3bt+5/srtnbenZ7+0e7PZ4tln7S/t/m//0fH9F3/+0x+8dflAa9UFIVREKhqR3gAxYAc2sGxQ0KSpIhAxi4gxRmKUGA2bEGIiVTAxM5WlA0DMVw4ORqM6xXhiZrCKxBgs25Rc0x00IFECy4UpEQ0UDx3a4Fzd02YtRC6wN9koZ9XMX8w3NPM10iyTEkNaMLjJ5uzJxpjN9zP4/A5n45uLuQ1DcX0hz2ZoV5LlSgaG2VrLzMmAk9nQBTXIhT8bcHkocDJV9fxBF/9rgN8pGXki437nug9rLXLgzZUJDRsVB6yPmU3qdCGJxz/8TwGyDBFlVRZEJhal0rlHp6uWpyOwwrKSqGHN+tSa47WdcTUqvvOVl//kLx+1jD6W0sbRKDx4Vnz6+YPf/NrutUsTkVChY+Z+Of/Db+3V29X8rGUCgmMraVCXfSFSs6ZRJIBhDacBokjSZVoRWJZX9ujh/WArK6EzYpXDgElwAiLzZ5bFIwyIMXG+XgttMZi1FBvI0/d+9rkpqrGlakuvXZpe3q6k9Q1akIp2gEqI6/WpKSSEnslWZdnLKYAY4YpRXY8lBmttWZZb2/VsZ2SMYcPeB+dIxATPXdsszg4///zpl+546OjHf/U9tq6s5oXdfuXGN6Znu4eHx2H2+aOj719p/9BW5eHh08mknkx32/WJKaiwzlpAC6YoMqgkFGySVlBS7TvgsQPoOtwPzWSLSKCUvFURYyRiSKZBqUiURHvLp5yZoCISUw89VN4D52LQ+13YypCvoUo2Tx5633zIM7zyhcOfHzZ0vZsiMvm2Sr6eoE0uH7CnZIk5tOQX8u7F7Dv8w4XX+sK9yvrDFBmGJYqbV8inJ/UbeYEbKM+pU4WXXDTJ5Eemjz87FYohCfAWpqCSpFr99Jd4MdfJJJxpNSoWj5eHnzwuqlIWhD4a4ZN6HDn0bDtvb+4fHMruZ2trMH74V5++eNx8Y3q5rfwp7ayaeCNEjKmZumM/jYQjqg+oXnQ819GSSy/jpjfWs1kjNqJrlkZoTbEL5OEa9F6oL95mHIw4PHl4dPbwerG42pgb1kzYLTyYtYBtufRm6uGbGCbVkbZBJsREQoAIRQoKm8hZPvT7uyc3JmrJlTB9WNLZ3v5eebDTT5wJJpTt/KMf8MtfxcvvNPOqmx+2Ui/NyLcU6pntj/f+7pvhlRrHQWZp70HKwZAW1AINdK2wqh2BFYawJgoV1BJY2RI8QIokAmaiOJzK8w4nVWq2HJVRezUCl9xrlAjSM7OhfJqF2CYOIxGzWiAw80A5ZsNjEScoqCyrseokSgWemDDB1beu4+FHvKtlXVk2dw9/cv/4o/G0uvKbtw+fHIXRtoT5j/78/z1zozf+1u2xHdkKGAMda1sSFJzoQCkLimpMq0UotSaZuixpcZCoMBEZG2NgyyrD1nrRKCoK3/so0rZtlJwMBCIiRWGJqI+RQEPeSdMohkgimqtuMNjNvaTc8tGFe0YJZLzgIZ/qT4Vi4HErAJhN2U6sGmNUY/gCs0rTQCvFigsKoOyQoVBK5DslZtIMfA963/zUQzMKhSpxVqNqli5hw+rYEDJzd5KPRzo8SpvtS78WSvIh0gEBTxmLByibL7BGNbNJsjVlknmYKERgJhYhTQZqWa9joZwahqhKsAALCLAAx4jCFCdL6XU8ho1Jp2EYsWMYUGG76qgt1qfdl25Mt7ZGfQOEOK60CUVXtNu1u3al/JM//5XylmOzWPp3X7Zvf3n3cLEeGeol7TbhQW6uF4oepbzMGKoBMEm0klxcBO7Oa9vf/+Q5Yxo0MlOnzPC5+cu8xUytYeYgYIa1YbHmvi8sl0AhyoXjk1CsvbN975/LBx8v9rfDt756UKINfRSF955B1o2sCctlr7LoexekA9C2fjzxRKQabTSiFbEQ1szUdv74aDHZmtbVDCYAYXt25er1axI4BBV86exsyUZXq1XTrsC4dHl3FX6yszdazZcTi/n86IOff26L0dtfeXM2Gy0WK8NuNKptUTArICxWSQCRuFlZnbOvZkBHzDnBXok0SfNSgrHWSIwJRFYghuCcY7YxRmiyFjovhVW/kCCHvL7JVkOm4uE2bbJgEtulBcR6Xnj+N8DSZuT0xWzJPNCXkcvadLOIN1YeFyLBF580A+bDygj9wrMPDlnQC9K9AY4W/PofhuT6jygVpsIgZqtghYBM6vAlyZtIABINgdQRe6md5fWj5fFfvv90ZG3bbq0JBe599Pnx0eFkr4ohsmoQYWA12V23i4PJ1ru/8/u+mT9tj9BWP7/3mTTyZbk558ULsZcr62v7eN4+OJoqu0Dc9XVk1wSs2J1h2qvlVmxv0QRtoA1Ry9oyOkHg0HtaoxAODnsCfHz32paY3YqDDdIvfQMPKcrW9K1UjZZrrixq5npUNxQRRNBD1mS1sH2HSCGY2pnn1/fXlwx74a2tnUu3rVA9MrEeL9k2/kXsD4tr+/P5akWP3StfbR8/X99/uHP9cjxdL9sHN3fHr70y6uVYKrMsyr4W6oAe5Ek76AooQZaICUXebqSq1BNaCy0v2L30RCCK6Xc2BJNcFAIMFrvuO0EoxyUAsFhn/dJbU/iuc4VhsiF0hlmhEjOJ17CNGlVgjFW1URgo2ZVSQeuAMdGUsUXdOF79ynX3gZSXZSUd11sV1RA+7V6sxLudsOJ+FIt20VRbNz87eXrt9Xd1F9qIGoIY7itJSzFFIGytEYGSsUYBkrRd2rLECKgmyUpicJgkg1CRyFSIRlUlxmxndz6f13WdTIkS2dIYBtD3PQyLgqEySGlTngppoElUFAXS7moBVDJCiyyWlwFyvrCCBQlASlgVbXgWeWJKFx+WXSE30iGCRDUmAU8iCh5gtNyUSzLDVuZMg0/1d7r0OXWkRaCcLfGGQAgCGTYxCjE4lc8ZHZUUARhpJfAgaFbZnKiLNUSKEYnsIiqiajauYTIA1ch2Y+epF7kD1k3XnjC3jLwlUNqmyJIZbcpEhmFASnBEYHKnTWmohBAQkOzCDSOqwqOi7/+NDy8WZ8d+azb65Wfz0VZZcKysm5/Mf/s3D370/vFh5+pR0fT9rGx/99u3Fo1x1HdCliGQQqwa2RB3EritymwAmOTpSoAxAJJVr1l62b+yfefyo4/n3hXcaWuN8wGWhgXKSS5AWaKiZAVSFJO+iz4UhkdRCb239WjbjCPVbiSu98r88NRvfbj46pvUrL0AIWhhaL30xAHsjLL3XYwAUNpx8P1Kz0AyqcfCoghRgkDatnVlWZY2xFZCYLZNPGvWAWLZhhDIcgWW2fYIIhJhLfr+7wWRtW2Zm9s7t2698spHd3++Xh1x3Ds8ujvZux9kz9A+8yVrdpnH1qIouCiqGNvUBw+/XGZYNhANqsBQlmVcR5WyTW6yf5cY+62tyXK1kuCrqhJVY+yAPyf9PWc0WS8UptnGMlnDpsOe0/M5YpRXlQj9enLEr+fadIEuDIaRTOpSx5tQnqRX0mw9iQvp/7999g2eBZz33PqFh1IagSFvLc0vmrmKUMAg19rpvxOLAiJgNiAeyB8FYBIFg/NmB1ZlUlIUnUhh4b1f/9cfnRpe2XH7+fH2bczn3cfvf1TBYCFUIQbhiSWwb5pie7L35jc+etEfXL7WHFcffPDDJtrJ5OZJdXbqzAuuxeMw6CFmC+u8TE58c0W3JJZPgVZq7dE30ayZ2yiN6Ep5zbwmXZOF6ztxvvIxBu8h7Nm6Ebxn7lWahhyYBYGCHTfBNzxu0ZwFp3DAuKyClUDKCuEANsoeMbINCPXWo1cvY9uVMfpRoHI2Gn9Z5NRvV169nXG9c20xubl4sTr6xT3/+r6985vF6i/2/JPJlI4PX7x37c7re/F48TDYaqplY0o/KsK4bFFpRzgDLUEOVBAaZWK2pIVSB2FgXaoAaFPtlArFYYioGVODKEI6nVYgVeWsMyEIW27O1n3rt+qiXXcqbjSyLGZQ8PEmfWjayAXLXIk4MIUy2ImRmrBFsiU0Jd43Ae1ydLwGX65DQQ+rQGQo1h3Wj3S0617d4+Bw3C6WR1geX7nzOy2TmRluSER0aRAs4DgbUBJxJCQfMC8iRAIha0zfB7akghiCTZOkPIoUYmVmWGbwk0ePDw4Odvf2ut4TQaIMDbSSMSCiND/P9OJcwEOhkDAIsoiTFMkky2QeqmdW+oJhwCYxbxBsbOr7jFyDMlBMQ7N7Tv9SIgMyfAHvvzDcBaW+U9MU8vxmnwNsuc5ikKoBiE0KJnmVoWrqKKIKZfpYgppTOklRLK9bhYCZgkQMPdwmUF1smBOcmD/8wetDkVFuDNtUiYh5Q5qzEBYBc8EgoMjtL1lFQSBVw4Y4WpBRMqRE5AybGMJRU7pyR8mzImVBYwUiQgL1k4l965XpTlXtXy4Otmb/4a8PGx4v58d/+I1X50f+7iM/3SqDeOnj3/nmfjGZdM9BTtkEhXVwIl3sM4OXmY3hvg8h9CzsHDEnAy/RZMIKBqSAD6F6782Dj/78yWiyI13gAGstp72VoCCWyakSUKSfEQIytSCsWrc72Qq9MFfClsupNCJ9JVIBKMu+CZGlQiyYpCpGVcEhVkAAR5CHTME+Rfi+b0R6EZrPu6qW2WyLBVGCZY4afd+WrjJwfWgJViFEPkaRwMEu0ViPyFxYp300ZP3Y2q165uNUtOGievedb7XdEaKtqtsoTAwnQT4J4a9F1fCMcNnyri22J8UeGzbGETNIVKOiFwjUbEY65yrzZGUAMFMIwVq7t7P9/s8/XC6Wnz948JV33rl1+/Xl2ZlNHuAb1Y3mS5EpgprAMN0YRxMNgmLKGNIGxt1k/c2NSwd08/fNNkylROI452AOZzt3skMvnF8xtb1fyK8XQaMEGF8AyLOST/ND000UHqZClF1slQA16WprZrElfIhjfjYDcHL6ZM4jc4JJq5V0cIExQkrMRhYf3Ts+vl9ceuWAhJ/N3enZw2dPm6eHbmYFTEHgrIhQCD11X3v726/tf/WTw3sL4p//+O76ZF6XrzZ01ozrVb08Cq6qzaIyd++2D1ajWDrR3ZdRr/rGe6m8+i5KJ1VLaKIsYsJvueOwForRCrRoDVCwDb2wX4fWaAG/FONEe+LWUqPryjo7WcXlispSRhbBQU7Rz6p5ASExplehENWEYEqPJ7Pp/PLYjghiqIyxCN2WK0ZXxPZ11dlbbz9e8+MPP17JxN68s4661/xy7ys3j99/cKPvivDk3Vvv7raPnfZexp5dy3WrzsO1qFbl+Kwca5EQusxGF1Yw1CSzOCHvBjQwndEwhP00GkhmHTaVnRaEejIiYiWRKK60VTFCj3qy5btWojJZ0TD80pVZRfIegtNFY21RV5VUSjVi5VGzmRpMlS6p7vTPjx/+H/6H37n3wY8PP/75pCLHpotrp5MrB5NnzdFZs2zdHnbte7/znR03K80kHK8xsQqgFQ2M1YhpWB6ASJQuYT9s42aAVKKxnCFZ5NVgkqqMZN0gKkFU+cre3mf3Pj06Orpx42YyvASQ9cRIi4mIcL5oTAfKFYiJ05Nn+S9l2CoZGw/spJy4aJMKE/Caym4emJbIde3Az2Kkadn57RWoamb7pscjc7N56MuJkLKdDAXxxpUeecSfCiziCzwTGvL4BivLCTX7bCiQaOQ5gCSP97SLcbMcVS+07wpoztdJZpWXHFEmfig2TTM21HAaPuAkVbTMvGl8BxPTAnAKIwCoSAbjbAqgVzY0Mj2vj+3aXrok6ZcSwQztxVpbQM+a4tWq+s239+4+bLoVffrs1Jpa2EZbAaOPHy+onkDgg31pdvbl128cz0NZMwSiVkgjOrWmAMvwhw0n3tzw+cTBLizjSFCwLRq/fvnm7Pbu44+XccsZYXBYAhNCCbYEa20lgRQFYJXZSmF0FMQverk8sqg8YMuJnR7w08csFSBkQG1TrispRnUVDCgCvTEKcSLeWmUOITRMBkBZWOKtrm0UQRCYkWjMRAUzNU0rQb165gAWkFiqAAnBg4IE8dJYC+aaaQREoFj3PZHPrhC2Jbgtd6AUCBzjgVFEjSH2vp8rjhSLrn20bu42KIlGlrcLu1u5mSsrY5xlkIGqSBwM3olEJPmDE5GCxnW9bps//dP/VNf1u+9+9f79X+3v70nobVGkTVmqmrakJLHDOQeCN1k2JzrdHPOsD8b5TCcnu3SJzrvenMnTBU+ZckO9TIgQZxF8Tq8ywNCcr2q6+blpHhApGizqkEdHSRp1nqQzoyIhYLKRMWXTK+ahwVbkUciF9t2aQrMtEjMbgFTYJPQoS4HTqMoQTIxgV/qVPbz3INhq2R3vLheX5u2z/zy5d3xqfAiNQ1CQ0AgUsF40t978St3s/+R7P7tx5+bnH947evh8VO0c+XhtUjQGj705dvXtm+XDtdw9kaWtmGQShJs2tD33hLX4Xqkj1yOsgi7BnrGGWInbkb0qAoTVUwwhBmJvU8NbSAEf1QMuxg7cu4bDmCbz2BJXworggYll1NXScVAYNYoQKIC0fHRlS6+4MvJaWl9GVxoes5YwZUGVbVwhPB1/efdsDjsbXZ2fTU6fmqK8cmPWHX4yrZbv3ZqyPK1JBYtWyiXXrZZeixajmiclpovt7VBYsWkVkYpoEroD0AhVxTrJ9hRQEEQDEDkRI8BAXoLHbC0oVrWFIUSBZe0DEITIFKaiUSJCA5QG+6oSwaYoJbaGbFVNmUeoiCcUSuG6oi0NWz3vUrFfvDj94PBv/vn+9Vu//fZifbU7/PTo6LDbm11eLg+nYVxVs8fzIy2aByft54/cnX/8f2weRYlsIyCkniFAD+2dQpJhpyKALJMQF6pKmvyJbGIpK4SsiapJbsWGlUyUtBMNpHy2bG7denW6tRUlAhJCtNakO0HEnLch5CF5lMhMNDiZpLqSgGFhbiYeG8sxBhCLJB3AhVkUshVFMtJKVweZJnHOKqYoOV2pDvgvUorWgXyRBY4KgaR+NDWvm/SmAoFQXid6XpGzDpOnpOXKJtjId9qem9SpKjMHEQMCksf9ID4GooLV4EKxkTI3M5Fm2ouC0oYlZqtpMKURw4eQP4e0L2so5kGWiQELMiQGZFUdYCEOsCDHbIPCOgeCjBLoolxj3WhTVZMZQt9HZYCtBokOfYhRCuZjy//xo+Of/MXDr33lyueH0tJkIm3N04+O/JGYkkpfdHEZvvH1S+tQGVqHEJldHvcjEjSlBjAxcwhRAVtYZhN6n11QrSEYhXBClWMUjmtf/NZv7H/6vzzX+nLwQTFhKWAKwAFG1QlbFWNROVCoy66MbLktsHW1CEu/1rAaSVUxdsCF5eCDqFjDtt+9NhstfN/bbrWU0BFCadcRAWLHda3qAdgCIm01Tpy5GGK37qK12fwyCdGieMOWiGBtpMYwE8OCReC4NGxURWJHTL33RFw4y6qpAAbFLiyTV1qUACY2piq4rPaIrqiI1hFAG45DWMZ45vsHzeoTLF1lty0qV01cVVuumB2IiRSIxpC1Bgrv/eefP/js/r03Xn99b+/S//P/8X//9t/+7vZs92y5ZLb5VlHWQRAPW4SSeiCFPzl3qsro33AXUm4bwOQ0Xzq3rtzks4FyqMlcJodXKOd3C8rLGBV5uy/pINREYpcNEmEFmXzvLiDPw2VMhXqW8mbciwdX+3zDBotPM8SULMiP+aWYlZVoAJwNwAwGG4AiMSfmhLAFS1JZsw2xDMHwaPS8Xa8W7cskno/uffCzxXTCdoRVCME4i95EimE0mr50cKtyo91Q3/vxBw+e3CuNiVARnV623ZofPquBkbX29IVvjn0Aj1RuL3i3i9yTSPBtxZ2vIvVtoI65syCWHQgHBPSzSxGFDWLVxWdP7BmxI+kKNqJ9kEDcs3aK0tC6C7Y8K2sTd1fauWCIW5bggMgyLc/KqRqIiEPAMVfL126ZmV01c9+iqB1NmAvBOMJRZ6eraJcY99PZ1piq9RGap4U7c2erUlw8uPzOW5dubPmnp8cHzAutXNHZEFoatVQVKiwqoACeVzOUghpoQR0QoAkRcwSvMCqRCMl+JzJbSEjjmORRKiqp77NuaicHW5EjOUYPMEG5X/tCrC1tWAdjK1IE6dloAtwkBsOVqjg3YR4HF+FgZ8Sz6McROxp2/NU9//TnHy4/ePD5g0/a+vi7b1/57nv6y/dP7917dG1vtLf18qI/2hrLmaKe2evXJ2Fxj3duIUYJCsmUM2qhPQNsqYCygFIcUBWGEJFNA05OJSenm4VMGkouOWndQlTo4eFh71tjTOwDM9vCBu9tYUOIheEocVPBbmpSzdImXOAgJY2fkiVVCUGYEyCMRDORjZFWou1kBnG2vNChp9ZkA6XAuTwlA0+aJ4UYCJASQgbWmNMANaFoIB7K7USAvHDZcyIfEjCdC6KQ3w8oCfiH0ZuKbrbBIIeFgd7JzEMe1QvtfeKy5kRFwGCRm51LNuaYKXzk+Z9YyrUBAxDlgW/FROd9sFIJLciQrYzagIJjCWONQOsJPpuf+i0rWxE9G056UVbE4MWQmcX49ChcKekP/9mtWTX5BvP/+leP7j23dREePltbmlju214vV6tbL7/SdJ5BRJYoq055GNyngir5dJrCqqpIZGPTY1I05kE2KrFXZ5bSH1y58e4ry7963FbjquidcCFaiBqmKkanaolKLSmMGNzZymJs767Dve8/WGsXsJa+KEYcatPCa21LYdeS4Vju2FUUp45cjbbyrWcaTUe2axc6LA0TCVDHWahLriyYQrIACCEApIgp40Klj56yzJ9M3u2lCekRDaxsC5YoofciYgtrYaLEPKQACEGUNWpI1BAFG6uIgI7cLpX7nKf+3kvj/UJC24bjxfEhAMBa66wtGbZtu8Xp6enp2dOnT6fT6Zfu3Dk+Pvn+93/wO7/zd26/cevkZMnGYtgxxswiuqE9b5JnRns2Z+0C0pv92PMt2OiOcgUIgDLD7tfmR5svbHDsxHhSzk50GFAwztdk0Fedj4KS4o/yTTyvFTb5GAPaDNUUkZPdCVFihaQyncnIcO80s5qZwEpJqmcG1YCV9HVhY62KAciSBbGSNWojuNfCWubKHLdnrx/sbp8cn27Vz0Y1EUsQFi4MRRHh6KFfuvXmWKZH95/t3bh+9+HH8WhR7W5LiF17NrlpPvqlf/wLevN1R0fNk4fGn2xV8G+ssB+7rifnmVCv1xgHy8GKZ2phtfe2Z/TMpXz399wrb8F31rrw+Nj/6b8p1qcarWGDYLXzruFgAwpbtLb3kBUaM6ssW+kIJyJjZRYttrRUlNNqVRVt7ONI9cH1G+1rO27tl4Txti2ntoW3Y/Z2vJCi59E6jBbmUtH7Xbt2o9bNYHxThHXbPpzy5PLO7rRbBGZGKHTU+ZHj0CBU6BzVhgAYge21PBtV1BMqUEcIEFGU4EjwQA9EoxpFVYgNgcBEqZayBFH0qRGyl27uuCl5L8rgio23fduXhQtNiBSYiZVX86UGGk+qtvWAVJWLoQWcKosRVMCYpCSpAm8RzYrJFOXywfMP/+wPvvGKWT0Nh599/J/e//o7o1cnonxmF9Icfjg5eOON2dVPn59Ky26++/LkreeYH0/H1JMGcAO0UKdgQbBkRGFJOU9GEsBCChjmlEo0ikCUCURsmJB6VkKIQUS7rr9167W//PF/ffTw4Z0331w2rWHYwkoIzBxC2Nw8zms6cylr2WyyjgwUNhA0Zt84ZpvsKSVIlMj58TjHnDQXR5kzMgSHVD5TwoiQm2cdquPNFIiENjpFnDe4eSOCDjrpEANn1Qxp8o7IQRkXRLo5wKhqCD0Zk9B7TauUE4yczTTS55txP00uJTmIpdxLiqFuGDic+aljerec7XkIACOtoFAGMxOLpgUFFpTAlQKwqqkDdiAHFHBQp+rAVbJZi1qQKPMkPgqdbo8xFg7WWPQiCIbEiwP5vilKVzdvf+VltOH50+XE2oPL5uPnPZWjghCDWIt4HL7yzl5V8mItrrCqMRUTGUIQHUQmlIDmYdzNSfPGxuAC74YIpigTzbHx+NbXr3/4+X3FDI4lGGZHUrIZiVgyBg5SgmtYx+oEYHa4OaPZ7i67UMH2Nqzb0Cz8k2U4nXsvMBMTK9aJIYmucIFld7zV+9iuj6azPd8uo1gAMTZga61K6KwlQWA2EnsAhS1m29ttt/a+U8S0m5nYEGfvl9T7B+kJnEWmLN57EIKIIws4AAoedMAwhR2KUgBgY11hQdyH3loJIRCzKjGPtuopACaO0YOkj20MIUrs+9i2XtTMZns7O3tdtz58+nSyNfnHf/yPy6o8njfOuRAkbx4TBbBZA7ihqdNgznKe+XCejBN8hQF20g06lBFq3QgtMNzB4R+HdYCbXJ0HzMMTbprrXGduioHc6ib6pA4OrJsMT5mQeVFdlNX5g+kVYTMuzrwKlqTxTZMqbFKvBWj4CwNWQWBLQlGNpYKpCDAKg4KVjPQxjjB9+fpvrA/HcoIyPNrePrIlIxgxKhKCt8yykBu33tgZ77lo9orrH/zwg+OHD6qS/Ys1jKsKOw7y9AODOR/9Kj74gNtW90P/Wu93QhTpC2+NurOuLbyYwAhRAlGIS0fObMOQXJrwnbf93OPJw0C0/MlP3epEYgEofOMFZAGppQks6NVbIMwQSeZbhUoZMIlU9TIPQORRJxSs2WIbi6avZotXb8kOSdltsyPLHUKg6qwaL/qSeOojGjurZTHidhvtyK7He1U3v7R81OBoxTN5esgnthlNQLBltbTOW0iFfq2lVU8aFBLIeCr7SR2UxQs8WJgBScsCK9JAFER7CxRAr7CEoJLXshOl5ikqsR3vVR49RjCGgxdVGDYSJfgAGIlakLqJU08iZG1FCCLCbFUtUaUl1CmPqDGNKys7Y5321+r5wx//5yv2ZLw6Xdz/4Y26XS67j/59O9t2l0fbW+Ws2tqay9zoYYfWUdU8+uDZ+39x7b3LTT3pPetaUIMaUqdagAKrMrMVFEnFLKBE/wOEE8AWk6cSG06ywqROEgGKouhVmLlwBRS7u7uphE+THmarQN4clS6Zgi9IBYatR3kbX776RDIMP0ViGpIZwzFKbgM3G09zmNZsYrwpzNMdFD6XuKhuNJGbTJlysL0QdeT80m5CDGgzLEre2MluJSHEkvhQgx7pPAowAzFt5N0sNBVJRpKkw0ok2kyUaQho50zvjQgrBxjlQQk1xI8Baqc8ImcogwyBVRM/04ItUJIawBEVghLWwgElUAKV9EVgx1RHNUwM2ZZHNhTXCxSBIqtRlagCDkoRsed41uDa+F998HS8PLl56dL9RfvpU9R7NixiFHbFlg+rseM3Xr18tq6sTYt9kNXOWWg+BGtKvjYDfztPbvhCOM0LyEjZx1DZuu1kd+/yt96Y/9mvYjUpBc5gBJQxOpTMFcQpTYgdY8R9GYjBZfj2794MvfZqYD0JiB1z/BpiWIR7D5cvFke0yyYtj1v62Wzr6aePfvgXf+nP+i9/6bW33nqjXZ8BsNZFaYBgHUsc9l6DrWHDzJys1433fUohzMYYLgoHlRglYTFRYgzROdfHINo5V2roY9SAkJxIVAUqUSSokILAyewM5GPlClvYooAKTDo7ClBMLHmJxhoGFUVdOoDYGL5y+RqxEBJXH8ayCNp12zSdK4oYY4JWVDXdr4R3nCdaHkTwenFTKW1O7IXUmDvW4S/Z3Z0GoAkD5jHMmC4UF+dz1+F0/9oV/IKxZP67KNK6sFQjyyD1T+XOr91iEEElcyIBUEKI7PCEnC33lQGjwpopQEwwRCa1vykxExcSiNkSnMBqASZEKz4oVdy76O684p/+VXn8/GS3vs/MFaOPEYoIVm6b9uD69du335xM6vXz5pcPHjx7+kuGD96xCb30k6l59rHOnxrbuuBD5enVZnE1UFD2impyEELXzk+UR9P9l5uluKs36Fe/ondfrl65HWa161arH37fvP+B279sr15FtcUfPxi/9CamV1bf/6+2dPXB7W7+uH/wg9HLL8cvv8HNs3Dygf3qN+X21eUv/oKrcdyb4umHEYhwQU622YlwRAWp29lXHm7dmDcyrrYKKwiYB4abBF8vtejdqNdqFo8m1MyMd7og38czrrspxjcWz7r4pJk7/MI371yrrY1+RWa6mpRrxsRybdAzqYJ7uJZGTVgsRlMesbQSfGDHcEwlqAUVIMeqTFIqEUPz+qLk6ZzOGDEgtqi57dvCOWExBmoR2wiGmxbih1OSDBUFaNmwE3hViLJQYMuoEJ1Uu5XZKXQSd7jZpeMP7v7ldv/wMty1KZYP2smLcsdty6kxy9EEB5fG05ktWg1Xbrtj+Mdherp+uIujI+HjrQM0hCVQAQ5UMSLEG4m6mZYQGyClWpLgmYmtBZFIDInhY1mDnGcMpqKwq6ZZrxtjk780RYkm2fEkyloWamKjoNmsABvK59xcIsFdOcEhxigSkRy4+CLGtRmZ0kbST1+8urk7zPE+UUJymjSDb8PgypMus26EQDKIKlJ+M2QHpnEyb09pY5ANp0iD7GZLRNZy0jvkZ5OIzCXRNLeVmL5n0/puGvvNJolEw04057waOgcbynyxVBsktFo0cgbNKOUthQVZFUeUsq8Dai4ZFVBCS9VKuRKui+CgFbOzrsJROz8d2eoyUYAygohCOUbuGQHaguoyLMMre/j9d96ZBPOjn312dxFKYSMSG5WAtrNv7k4vXdp9dtyWzgmCNSQSksMZNKbfWO6GE75eWAyenRfiu0ZNfAruQcbWEMsGiyV//e1XPvj0swZjsItSMFfihGtoDYwIY5KRwMGMnHFYqz/idrTrVn0LZmYv8CLMzlQT/tL+vsi0tT0j+jbOdrfuvf/JD/7rX/yzf/pPntyf/+qDX37VftWHFYCqqkgUFCV6VxYinUrHieBPvWo0xtZ1XY8QYgghDgdSibhwJhGJCy7Sj1mgqKqKCDFE7733fXI0NIaT5UYfAlPa9ZmEf1g3rbehsB5ErijS7gwV8bGzNiNJoolTwCLBdwGAihhbQDXEbH3DzNkHngjMGmOa7CZKZt5ocgF+Pj+e55XiecP5hXr2XN59kbdI57n54rVE7n15KL7l/AxcyJtfzL6b2TMRIJwXjuYqIUfgC1Lj/DLJLoyGQXUUTZJ6ERBbpIDNRoWJWIihTCBoIuBS7oOJQVaECcbCReVYgC0Zg047Yfi6sjA0M/z1d/s/f/hsymeVMyFqaSRGCWyJJ+Pptdde80X34NnhbOvK88/uh/WyHNngRWFNGZZPzOctVG3he+tkf9HuLk0LQAIf3JLf+HuLH/6AlydX//4/a4+betLym7cXL47Hb7wbnZXjo+UnHxRXdujSNPi2mGyvP/1YrCnfencZyL7zjr10g6fj+L0jV1+3v/kPzLX99eNP5Oi+u/q23Lzu1y2/8VI76rv/+fD6b/1R25/JRz9ieipPBWYZ7OVrv/s7713a+vDBycPj0yDOwoutA08XUolxIboJjifUTrmtwoJ9oAZxhablkbtE10fr0NGx/7hRrPuvXa6rKclpkBp1vbTwSiLEHq5BW+q65rFH37oCBbhijqwFIoQNsSUqQBESkvcCJVnLgEOGYVUGWWVRAzHCliEwylqo9gLLLEZtVIEa7dZdgaJwRWgDENkapqKNvipGtjLBBapUnKqT9vizVfNYVk+39Ozk/ok786v73Uv1DbuuH372+Y3rs8MX79/7iUxvHJhdG6cN7+nh8YdX33ulhLeyNC7EitURlQQHsgQLDUbFEFuVCIRUrYr0gBpm0ZBpjmCwiCJKQgRIRDIWzTg6OrLWgpJVjzJlFmVq+y5e0AvYKSHbECZkmDSmnlgsGyJEVeZkw01JHpod9fKlSnV5xqo2rWSe+SbfnNxsDWmbBqbIhWt+DoWBRJW+cOGRGtaIAIDBaVlyqrEpp/VNVBmuqiJ1vSqkg+PuYI6f30SCz3jAtXOwuUAfy2l3MLTauEykDVDC2X0+MT9FGEqSwn2aIICJGWqBAXlGBQc4RUWoQTXxiDCC1tHVjNq26rcvlz/96Mzvj7auRI1ITUMSO8MzWqVWi6CdhO0b1f3j+Z/86d2yms52R74JQYRDEQlYrN5761rri7IQlQBAECQttiIwjKBHHnzr5peiLCIZQ09gtaZqBCzKAi4ii8KyC1LYyfTbd07/7QftZHsniEENU7HW0Bo0Zh0p1aS1oVpQIQY+mypvCxthMQIrFi6AEb2Hb9tp5YR1tFNqxx/86K8/vvfxP/rf/+PZZPrv//R7r77yKgrDXAOIwWtaus4SxBsqyKhq0OwWsNk3rYYt2bywllOZxSQi1piEmibSOxEMG7YM1d4HEBXWJEW+MRZtAwWRFNaWVRlDXLfrKOJ9YLa+89a6uq6iCigE6Q25tCMo/X8IPUAiEkJkDnVd27x9EslZdpMEYUyyMQ8BzOmtZo9SlbQqDJvdpRdTY74d5+xofBGphiYVYqZh5IuxMQHH5ptS9kVSNXxBNJhJGIPMPfOlNzQxGkpw6PD4bFTwRYONzY02qeFPliGaLF05fRSUMLvMliDDCXNWo5p637QP2JISyIrYaAUlYCI5g7aXwvHEIAClTL50+/jR7cf9k74m6yPBcFRDrm/We1ev7r91s2JeLye/+vC+l9OirPpeil4ksjpnNdz5UqA+/PhvQnFcTfpoA/cUrHC4cSALdevtvTf/WI9X4d7P7D/8+8e/et/t2H6vkuNDloWcfWJefctemYqX9cmj9hd/Pn7znWYW+LDhmwfSITx/4h/fn/z27zYtuwfHtjNNO+WF7z6f11e/DJLjP/kPozffO7v1G271rBtN4vFn86dHp5Ev/d2/P3/pzdrr7Vf2Xf30Z4eNJW7tpBXjUQJ2Sqe7tJzq2VTPTCvaGn8a7ZJ1JWcQExwL69xYo/ePVnwSf+PmxGxzEFYJznmu5qQxAA2XY1QdRo0UTbnFI9ZOQxHIchqcSVBZp57HAgr4VFkNJKx0VJkgdtk009l261tSYWIhZWNE1MAKC7MaTohHFxELFF3nq8r4tmXrprtbTVyriHFGLLjSSGFivS7XYf5k/2C1NxNmHr998OJXT+J68uat+uTomPsxXrTP+4+rtSuDG7n6629cvf3unU+b06nbXsD3poLVPOawgAUpq1KaAUOZNq5+0JjpGVmzSQNHI/G9CSQSY4wiWhZ2a2srhLAhGiEz+3kY5WGw19jcveTiiLQgjxPhIr2gIdHsD6Z6XmbLoIcfAMvzwVKm9mzc6AmZwbRhTxM2/aXmxDogaWl2QBAV3QSR/E1Dq7npUZEJ0ol18oWKffiPwa4rr45htkRJgkWZgZWGoYlKndaE0xefJTGrTfKXkFy8SM78IUQefPCZLYOUmIjihgumDDGAIRjAAYUSqwONFCPVGjyB1owpY6SoRUoUNUIdPm7a6WXawZw4CiHCKBWqGhytxrV61mVXuOKFhDduuD/+p+9YH//Fv//cVROOwbDxx3pjWuxd2fPrwDYmY06RJNhIb40BFgmbD0sl2anpANSnED3UUsIAO5APIGYHG8Us1/zGW68e3H9w2huuWUrhmrVWGauZMo+IpxIq2NqgYqvQ7f7aa3W7bNUGExgWkYT6sOZptxTfLhoOh4f+5z/64PDjR//kH/13+9s79//m8alffvnrX14sVlxYALGHtapEqpLs0wEa7JRtFJ96S6gnJpt3YORIgOQzkho6GujBadWOMRZuVENErEmseyGFK1z+vJidKwMHYhKRoijY2M531hpnbSCUphTAkAnBEwEwIrHt1pspDwkvl4GZq6pkNswENkmtkZRgxuSCYJP4MgsxTU43eezXsOH8Rfo1lPi8SR0Ap/PREJT+G3h4uEME4uStzhe+nMt9wqBtHoiVep7TNycrfX+6gHRBLp8+csplOgHEZDRn4kT0S1orKLIEFbAKq4lFoTaNgQmWYKOyFIKSbUFSQMYUTk5DUdnRKMTIE1OPyu5vv3vy/twxZKR9Hww4+G7/2tXZ3uXPju+/tH/16fz53Yc/K8dRBNTbSKCo0sq49K8W1eG6nZ64NwIuWdd7YXGEWMnelu50b7/l7/3EP/jx9B/+9/2NHfnF0fT3vtpfs90koLc27MmBxaU1irGbFO3J5fF3v3b2eD65VrePl+XYnS1PZr/zbXvzlj1cyH6lbWsnu7661N07nL5zvbn3uCr2yjvffPZXDyav705feW8595Ob39759lv+2vXTpp9LtK7fu7r7ZjX/2YPjx7ES5xD4Cs9rzEsst3QpZ6oroInV0vqlyBIuUNDAvReyVYmG4r2Fr0Pz9quT2AfqwSM4hGm9ilo0WLaYtGgqKawtpaqoJhIiT+oJPcirWtEAimnOaBks2RMUGP6iEFu4Qgl9jGqJDRMRDCy72AhbMs70q16CuMo5OIpUlC7GABCDgkQ4RI7GGa5YbZwUcmtv/Mn3Pri5b779zsvPPvylK0bd4+6me+PeRw8a8lVVVa668frLk5e2H7afLuFHbvqd77x74mT97NTOlgV5KVxqfGFyu5QSV+LZDysUBlEB5/0yMQql3X+sacSa1asQNqxQNvbk+Dh1mMzMZMCZ5CwSedibltpB5MuZq/CUgEHnwt8QB9g5dRwpKlA2fuIsbKJNtEhYnyh4WNYtqpl/RUOqSsEeSsqS3a0uBAaVhL9LnhLniRIASrvHNKuDMGCnqbwYoPj8ToaYlMI0pWVviQOS6KwDuJ1eNm9FVR0IYkNPkNSFmgUfiZcyxBNKcYIEFGNQJWPy3NdkDwo7QGcmGVMwlzoCTYAJYaJSKcaKLZGJmC0jY5GJTEbRP/zMNaf7u2d1v4RBCFHYIFoBB4ORTpqybqvKnNHK62oc16t2sWjvfMk9eBh9DIWiY//221tVOTn2c6cV4JkV0jGXIq1qTJsF0jEZ3DShiUgGBjSNMPKYgpNY2YYIW5RWrVdWa23HdrT9zS9f/Tc/bSZ7U6mVathtE0aCCXgMGVvsiBrIdseIh2fP608/W76IHk0vPFa11kynbmt799LuwaPDxYf3Pjw9jOq66SuT/89//J/KrvZn+O2/+x07tevGF7UFYLtaIqnCWiPSQAWIqlGS1TgbRZDkci7CnMcLeePyMLMYUk4+DDGKcACzcy4jJ6qWrQKMIqn3AITQg8i5go0FaYixqkpmlhBtUYR8a6QorAiMgTEj58p2vW67lmEBFIUrSweCxCAaLAprbbqe1trsp2Z4OKr5UnzBJOM8X57/XaAYQB3azJN1CIPpyxvLMyJizizCCy3zBX5VpjQkvWJK5MkgLWMJJpfFUcUMUlDkCXXuvEU3hnfZOCA9vQiZJCmUaKxDRiJYyKZIJIk2IUxsUiAhYsApGaKSyJJaMHMhYsGOUBvmIA60y4t7RzZGe/mKBOZaOsbJpGtnKKreelJW8fDL7uCdG3feeOf02Unv6dHyvtnulBx7QdvJyFBgiO5cLhfc/vVd7DR8IA1pFUwctevORn3/+2dHH8ZwTHxsb8zCJY2f/aDeOmmna20f2D2KLdu/+5vVl27EJ0/kZLn4/C8nU+nm9/Ds0bqchM+X5vJNllN77cbpT/5DvPdg9uXvtgZ2eh3PPX/w03j8hJsX8M/aw4dh+eD09O/wb70zeuPO+JvvdBM0TWutWOkYplGpt7fv3N47feiftmaXn4/gK6jhNTzgiXuSVkITtSPbMffw60hcBReb3ggR1N8l3p6Gg4LVAkbgYV0ora8QHHqHvuJQMbclR45qlS2TJS3AjqkiCYogEhQgJpvAURUhTts+owpsVVVRwmjkYIEePrTBixVr2UoPAkWN1hWFtdKKahSItTa0LMIao61cKHphUQZVxhmPrl3PH/9v//7vfPK9//DB957zSVWd0DsHO8sn97z3vl9Mph7Nzt1PPziSk93b9e7+fvM8Ftfb/R320BH8WcVwHDlSQVQgqhhjSayIMBk2RrRjptwJcq488zJRm4JExkrBQrCWxVmeHx+NRuXu7pUYAoNjjApJAxrLhohERIkYnCr9kKyKhUUCooAQolhr2VioeN9XVRVCSPfZWqMZKU6egxwVbFlFSGDYiEiSDmZLUIZms1YgqXspixNBnEhiqnQ+piJQQgWzQJBAkDQSSwnwfFadLeOVCMSqkk0xmECUlk6EBLtJQkWQtN7MlFXQkvB8EhFiJHcwEEQ0+1RGTYLmPAtNP4OCMCw2Tk4mKX6wS0PfZHgkxCl2MVkip+oEFrDCkRyRY64VNdEWaEKYQsdKk96VYUuOrxj5/mefbDscSFOhY+kBG0CeDQmFaCZo5qg62u6m4xcN/CjMDqqrl2bcre49eWS2tnrt92bt2+/cXizFsjWcdxcrK3EPLlJ5opJoQSGRytPIgzlXP5rofZRX/BhYVQaxRIlsoMYGB2uXZ3Lnzq2fPfrZQ4eq1jiJqC2PjUxC2Cl4om4Gq13NfsvOZb56+PysdsGRsdz3XnyPkxe+D5N6cu+bX79R3rqxvFkcPV7qPL71xnYlo23e3S5my+frYlqhUSQ0IxRAkKDMBSARkQygPashaNAYKTq2gABRMw8xpxtio3mNs+YDJmDDKip51H3eszFIkctSJiKGYe4lSAwMNpTOj4IoimYaclrkzQTVEAKYylFdj+rAGn1PIOusRLHkCIjI6xkSNp5VvEKqQTKuEzEU4XpxwjLojjZjEU1bvDWNS84nKptGV3OzmnmHg3Zwswcx8/6H9HteyaZzPDAthjY2F6iZWpLa1jwGSug0mDhdSkr+AcljgInSymFRIAbApaSLPCpKpSAzF1CjWgCGkHTzFmTVWC2AUsUU5Lo44sKor6xW62pCEp7q8bHYb2BKsNI5efrRIx0HbFXSixEbuu7GrduPFi/Ko0+uXDn46Z//+MXqYTGz8IAVATGs9EG8zC7zfInFwuzUGpaB1fNKQwk7tuqO0Ry7idOdCpOl/OJ/IqtaF/79fykOFVoH3fr6t8qnC1+E/s7LeGLw2Wf93c9sQ7okbgv/9APbo7mrxTqWltc/+Z+1Mbwzkvd/aIsz//gTM4pSBbu0toL1beUqO3ttLo1v1iDHIgXBiF8yUIg1O//gteNPn91dnD7Z1s7hdCY9+ajRxg7ixYFjE0PHaKMGFI51LQFirSWRcNbcfWx3JuORo9gjBIwCsw2G2kp7A+84FBTWBuQMexWrQpFAEQwoWyLDBFZtIxQhZs/ixDMiUDJZTUYI6Zq5qnSGQhcQIBALrid1bGPbthY29B7M1lqUDobJog++D95yZUoSEwvtpFm+efuVq5fs//fuve3RFjXs++WPf/DnriudKTQK1li+OPbFero7Cctw94MP3vmDO4TOQRx6i8zmI0vJX8QULCvJey1JNgpVSs6J+VKIYavZ/zlZDathEmiUAGERtc4ZY0IIqtLHaNhYYzOuKLnkj0PxnDjSMQqZQRBkLOf9SFAg7VBTVWtt7hYNq4hhSutkkRijCW6KIesnNuwLzcimTYuATP5CuqUM6CAz+v9X4QuS0wBlq8jNoIryJ6Iby4GsO6ShKMFGHK0J+2ImQxnS19zWpd2im8UDct5oaLYg2MQgPcfXFAoZlpxv+gdJU6tEQ8hiCRrY00aVCU4LRg0dQWuRCqjBY+gW0ZR0RiPTzsJiwmf18tnq6PHLldsLZxWtLLywC2yh5Nm0xNtxMtatuXbruMM2VPvTJw/bh0+Pnz2IdKnSVWwW4e985ZKbFeu2N9ZFCUyFUlRERUDKR2wNCwExjfSAtD5LBUGDAqob4bMZoHkmtaxWQYoC7ABnrZOKv/OdvX/+k5anldSI23BbUXY4TKKZxBktamom5MdhMXGrrerMCZhbChpLE200MI2ND4+OX+LqjTs3j9fu2Y2rGuvlo8Z1lk759Pi0qEpVoR4AtCDqmcQBEbBsVDWABGqIlcAsnBx10jKVvCdiQEiS2JaH1JWWBSlRpgbQME0ZfrubkbIkKoHmr2xoyRhQkWxCtXFQywVkHt9aBltIcgshYmMApYEMeX68NOefPGS9gDZvWt7sS5PTaPLYGab1yAr+DWJNQ/sO0KD9xvATZUcqzlsgcmGSJg8ybCBKhWficWo2xkmwlOTAcqEZzwTJvHBhIH8OP6BmFj4rqTFEZGhjaCXpnjPAqlaVhslcoVoABZHTgtUqKpCDuh41rKW+IrhVuTWZHz60hx+OLIfVfX7pNVhtzhbHfFRfERva4NhLvPrqS7/1+3/vFz/5+dOnT9xe+WT9md2NHEtpW+kYlZN1YICnlndw+Ji39q6+/uYb+MmHePgxFWwq17vWlRVPWKdRtwJqyFZBJUyJitjatu57+/qXtl65Rhz60q6e/TI8v1di2UxmjWVwr0YLY3QpzGTJiCsiBac9FovoiCaVaUGlcb5qj5fy3e++8pu/185Dv+zsVjUvCraGWCV4Q8qhH4fuhj3Zsg/jR//6ar012tsuw4ksDK8ga7XCwVO/VNvZsA79qTfGUAAFsGEhYaiyPXm++GgiXx3vWhZ1iDZMKr9QgXpLwcJb7dROKPkGOaUSshZYFVaFGmPYsooDAtTmHm0zZgSsYQqQZOoUokgUTctQBWyZySznywIFMzPQrlsJsDUni38GO+dQCDNrVCJyzkgIdWl+/vP3l4vlVKcxCNRVlRop+i5e3rt+48bN4xfzZVy3zSlPUNnJbLp3HDix/EIImYcPYraC9I6IoYAgeUblTCy5fZNgjO29B2CtDTEaNhniYSJm3wUwWeu2trejBJM25BFE04CJkr2wtTbdEmZWETYco6Swkqrj5AidpEDDrDfV6SSa2I6k2Y4HIhqkZ2YiFoGqUDakzAEghaOYkuXgTpXC1mZOutmNoiBFdtNKVZQZJsdpZWnUZBdLREqD8kgya/o8tCg2pM2cXEPEEAmHKJbzbgbuLn6raJoJnI/BzsPZedjVlNuz8CivF6S0M5w0re9iwIoYwIEtKqAASqAGamACmQJTxbbOcDrF2Qwnr07OPv3wbtU2e3XYxaLEwiLtH7OebFD2ahc0HcGbvmmLNZvpj3788bOwa9iVsxjBneDGpfD2ewfLF4EsYEu0GhAITGmQBlZNEKgYBgYj0cH+HwSOKmDLKX+QJTIAiTKDiZ3CGnaAE6ngqKXw2pdv3nr2s88diu1Kp16mzDNXl82uLmYyn5qzCVZOmik3E2lt74XEQEwAVHoLLKsbCNfKvX7tFBNT7x4GN7056194Y41jivMgUahIYKiwZfhk5xk1h+xhx1RyK0wu6KzJxgbDqGI4ldnyYiAe6AY+1dSD5jVopJvpaxJHp29IzKONDWk624yM8RLSBh8dYF1VVeYQomUmaLzAk2KbymLJNXHaXJ20OF9IvRfqv7zKV9PFTxIlyizAX1MKpU5WhrxKF28AE2RzZUTTg/K0STVzSzZvIH0G52h4Rp3t4F2TPhdNwSp/DjSojNUkgnXidiTWm7GqUM3ZNyv3wJrWKiTnOBSEQmFVC4KDTfI20kpRwpbcVZGdovTlzriZnyz+y38cT8TbxnSPsf0aShw9f1D5x5fKhjRGO2pCe9VdfvHxj1+5sVu8fv3HP/0JjddcOgTAMXuEVpy1aoINkaZm8aC9cef6zpfuNLvXVj8aVR/+TUCgolKH3gSClFzAUk+9YYIG5oKtE8PWVv39j/3pE+6Bx0/K0Isrg3YMl1jcor1YgZTBAL61EQwj3ihgFiJTwVPy9fLkzXceLrj4yZOXXrsi0NBGrjlvorOVWOWJKcJybNsX9x8++fDuzr65VN9e+S0XCukCR6BRdEY99U3vT1vu2BoOXeDk6RkCKSlzcPzgcTutl6/erGkVUDCHvix8ha6i4DQUHAvWWLH0Cso728jCONYIiZpKNiZSgoJJk4OKAhYQKxgShoopjEL7rnfGgSEiy+U6R2pR733vQ+kq59x8Po9w27vTPvRlWUWJi8Wi3h0/Xx5eGcUQ4rOnj9LIli159Mzu9PisnrivvfOVs/WR758UdSu2atar3YmZjOsjEbXeeyHLCThOEYCZYBkFw6fGjXVYtMKU1ZnMTNlCXUUk3XeGhhDJMoiKwjXN+rXbr73/s5+enS33Ll3yMc2GlMDGsO/8IJM4B680O5sLG04sI4EyNMYE86q1BaDJ0lIVKmBLGjJYl0y0JAGVw6wnZbEM8YKYSTayiBwXkpB3wJNB54X3MIXFJqVKwq9T+ZwYanKxOaAkBxLoJmFrnooNXk6cPGvT9+Y3cIHXcqGCp3S+z9nOGUo/fzHioaFITKVzjx5Or6UpI6NgFIAFShEHx3BADR2pVooJ8v+mmGGxh5NJf7hbNJfax3/20f2XajuTs0lo0UCDWg5SIJRgSGCuuJ3rity06RfOxKrermh50pWh3gIU8+a996ZhTHKmVBk0Ecai6/NonQmctpobUEg4syqUhAfYQlQNMYiR/IkGCnTy5UzONrk7saROxcm6oO98a/bPP5zTdmUqJzOM7HybFrvhxWV7us3rCa8rPa1lMbWH051PrJaCFRAomhDoWy994+ho67IeB1Na8SToDY4dm22HCOrBwWhUDgxAOslmdmIVIpt12jmCZ3xFOZUUG4kdyHD2TdvoU88tXzbzy5zYRFhJCWo4Cd/zcwyXMXfVBCDTuVLGzdbsuZLJz5zx3iFXDfT75FNvTZqwnB8wXEyiQ+6jYeaSYfg0YWXeLO5S3aDKG46kJqJm4rUPCBANr5MxY9XBnllzM0vnj8llhibaWq5ZL3a8eekZ5Zu8uZJDyM1MnKTUAHHy0EtVayJ8gFjJUHZ7JlVStUQWMAoLcsSODGsJdaARqCYpNIwCj4z8/8j61yc7juxOEPyd4x4eceM+8oFEIgmCIAmCjyIpFkWV2KWaMrVebSNppttmxmZsZ8z2w/5X+2Vt9+OOjbWtWbdpu3slbUsqlaR6k8VisUiQBEG8CCSAzJv3ETfCw/2c/eAeN7NsYWUsVgHIvDdvuJ9zfuf3GMGN0Fl//C//qZSHennGBcdHv6yf7FcvH+H+J4d4eKAiRvr+7Pmd3dotv/nlD268+TbvHDR3fjGpyPMoeKAEWuss1Psg/Py+1hMVOzq8fk0MeGJn3/uTvt7DrV+QtKKojBHLDUdnURgSy5YNjJBlY6x8+rMQ1hU3UZjtDtsiBLagyEbADOptb9siaM+sVlmZo1UqwI2aCYenrbt6efK9vyivXVk9vPsvf/OT5dM333jrZmhFGokSyRAK0cvk0NamqWTxkx/9YGTbt65Nnn72wVl7effyO7QyFHs0wJr7RaenkddcEIdOjDXKCgkchGBtSaEhMH/xoNmduYMD7gKsV2N9hb7QrlBfSGuw6U3NBZEzVBI6FadIRHUDONWQFvap3UpuZenxZQtiywBhtV4766qisk4ZLFZMYZRUgxpn1Er0uru7U5qyWa6ZeTbZbX3b8qbAjg/9dDI+Xcwth2p3fOfOXb7/sCpLv2qdjKI3RSjefGf36LlJ54/bZr6/O+53Z/0kfHXW7O88LxFkvGGxhSFAogCsTDEECkTK0vesgYlUJe0uDRuQxCgmawA0iXGtNaqSzBSYIcniuO/HdfHFF19+8+hRPapEVaKwYSYyzCLiXDGwk1I0wvYYpdZfMcjyiIgZMUko85XExhhwds7CYLszMDnTvpDMgGaJpBkid+QsJMjWU0hZgYBmjVIGw9JLyzvXIf7l/JwP18LA6soAAghpw01bpypNxniZ6Z1MNJF9RxSSVmAIMftga45HVEApOXsO2zAAKXz0t8jRw6/kapnTafJLtUDisRODlYioAAyn8bcCRkBNUhNqlYnyDBNe7WI+o/kuzV9yq5Pbt2I7f24GOUFckW1AgdSBLBxLsKSlXBqfVlUzQrfkXRYjstmELtqDk52i86MXrskb3762Og6YSOmLduZpoeCCEIgCUa9I9gWqsCRIQZ6ULG7iUJmS05mykkBtbn2YNLJSmgYt4FApVUQ1N9xde/PqSyeL2xxGl9yoWExptR+eHpj5JX46QzOTxUQXRReKJtazhnUVNLAwW9v6s4PJ3e+/996iXqzgoBsQh2iV7elkt29JPHFv4IUCA9CN6kbJEnyh6AlMZHXwriHNs15i50vyYEm7KMnQ6eC6nLiIg28PD4tfTamWeTSOUSQpd1MgDKUdCqV5enjkBqT2vIgmpZFA8uFiw3mnviVDJSPlmNYspMNYSReXG6nsp4Z0eO4AGihRudYqaMsU+61hGUqJlIHzpckw/NN2R6JsmCVBb6mdRu7RM/xEqflA/kmm46I6HMZhiZNORLohBNgO8SISY0ybXWYWIVVhpvRDTy9HKbHT0yonR5gkOJoMawFYhYOWhApaqIyEa2bH/Z4NX91ym0fFgQuu5SpM2q9Gv9bYvczzWwdmUQMMv/Th915/96U33lotuier9gc/+s+zsKxqtxL1ReWLkZheiIK4vaP2W9+yT+739dEL+8/vx2WwFQMw77/fXDvUL37M/psgHdy4tDDcSwmAWb0FVcWIjUpZkfNo62gLq+I1klWJDAFxFNZCWG1wgcWStymnhVSlYNOdrvm5l/b//C/d1cubfvPa4YtVufPpjz9t5/6d995So2xZIXoEFHDSHk3kix/9S3Ny58WJfH3r7mW7Vzx42MTn4KeBAy8EC8habM8cuG8jrHFewBR9r8QYEdawDqGSVaO37y9nO5cQOg1UoYP6gqMjKUisBhiBYTUpT14FCkPEokzMRCbvp4ZpBJnAimhFRAgWXFhLNisfIyIxgVHXtVdPoCgaQzDk+j6kzWXTNMXY7u7uBcRqPIrUi8jB/szy2fzpYzx9tk9kXSEe4zq8/3sFFsWvP1icHJ+QFrPxXrtodVfdrL75yg2JKmrYuhhVOLlJZvYyOOGkQoBybretSdc+MXGMAYgApUmUmXzIMDXbvLC1xhg2UeTg4EBEoLLd4yTYJ4Q+22oMThrbY8wJ4VVVkqHLJmOsiIQQtutSiIBYUtrFcCEkN8eoMTMxIJQTc7MEE9Aoen6tJJIVcsjhwCYhbAfS9Adzq5BJQBg8ZoHzFbOoQIkp35cZ3R5IvayMrWoiLSVo4P1ut2JMqSSnbz7Aihcw52HDdWFeTv/YXpDpgbMETpeTgiA0+A8pQLACC7XEllCCxsCYzJhK52dY72CxK4tDt9hZ3f2Hn3982b3oH/lJa8Jp0DOosHFQC7VghqmgE613l0XtRxQsNoauwDoGB0Vn/J/+yfWWSByMRVuKrhSsag3DEYIIJ2CAmUVYwcy5gImE1BExc6LjDhe9Zm2f2jxVpeprhRzEKVfEFYdp8e33Zl/cauIUs7jY1dPLmF/h45mc7GA1lZbXgjV45bBywiv0JasReDZ7d9t71yZv7bz4tBxZaCCqwC7EKpCbT6YUCB7YkLYAQAXBgYTUA2LArMoXhlrLJJo95NKQR8NnJpINLjIKkn+bc/eVnlsZPtT0uYcYSZU5B4BuR7pzV4tUAPM5yqkoF/bAw2QtooBJ/aCKgoxhUoQYiFj54uN0/gXzM5hp9zKARbk6DkjS+Z+VJNlmGuolBlXv+YMLSjCXYDB7VpxXcNLB3W7bhJJcOMs0jP1J8kmpa89Wdxm8UmQ4TGUI/iXibTc/vNTUhxMy0zFh0QVR0s0XyVFc1MIBDlwTasgIOlIeMU/AFYexiI3tw88m+yaOMIkri2ZS9/buR0/mv66tr2npEKrQvHD16untDw5tO7185Se/+nkxv3dUjU+jLahtdKdH19TFxtSW46tvYOP7x6f9c6+9YncMYt/6qv34x1VbTn/nve7GVf/4Vnf8axO+YQnEBQvBtqAgAqPG9h6xtSEw9+AIcY44BAW8ukpaUVW1lgOr4xA6V6hY1cpp471fu6tvXf53f6HleHPcl65Uyy+NuL78+ucff9U8Xr3//e/AIezFwho4lNzH9uTTjz+8Mik3T589fto+aze2tfLklweX31k5RyuhJnBbwiO2XoIrREIQL75ypXVl3wsK67teOrJVvHOiV55uro9NDNYiutiStoSOTbQqXBAMKwuMRkpCmNRCiYBYE7GHhkcn1y8o2TTeiaCsKoX2XQCU2eQIXJVRVbWLzsJWo5Ffdw6OmJ2z3oslG3xAD91IsVce7h56fnLa6qNV3He7Onvivd+/Xnz3zy9//sOzz34q0sf6kCDh0dmDUe12nh9feuHgwy8/Hb+/G6++d9q5jalacdqKNpBW2YOFEcRxIRBFBCKpgBBCx3mGA0AisSisMdaHkE6CcdncjpNQz9i+7bquG9e1AoUtJK+PRSHJVQv5AA98XRURyeYMbNLZS5MQDVkoGa+WFH2ohIHkDMr/LyeAHEFiqqvJ916gibdCNiv7MvCsEJJz1tO2yx8mAwx0mBSEti18iSiU0wZV072luewjWzwPoBgRUlDR+fyqqkCMgZkTxpr0VflyoqFnFyZGJidZ5gSnXDAVyvyvdMVm/nRq+kwy6gYzwDK0FwNErWopewkUihIOmxFtxtROYnNol1/fvbtZ7dUEWUi/6DB33DAkMUBFnTombnMkkCmjLZYlcxlaG1bTqjo97v7wrb3D63snt7tiZE3DvRFjSnAUFo2sSjJ0ZcOcSKop+0SZ2XChGlWZODGCzdaPN6VksXGASzxIOJABW6KCbS2N9K+8+dz1xW9aKUpd7fGzWhbTfrHPp1Xw1FA8I21UzorWurpQIyQsZIqZK5exmR8/2d29ujOa94oO9Qp1bTYtquVoJiuBY5SEZKjqOK0Ok1nV8LnytoVL70vEpMJzvqOkLYaa8haJiNKnldwRZatRS2VPlUA2GWek50I0bZQvFpL0PKStZzqtqnoBh9YULZRABVWQimGOhAjdVi/E1GkPT5UORhgXHrkt4TlX81yIs19A1GHPo9jC4tDMOqOhBwXAwxOv+eFPbLTk5sx8/raGAwNoNuBJLBXddseJ6oyB95gRhsyOTN6cwswJlYQgx5mxuUB4RKJIiuSBfHCsTvsORgEUgIM4kCOMlGpCBa5YprHadfP7X9vFI2NXOya4uBr5zdituA63jeygq+zSaeNEvnPz956t+n5xp+MlvvnoOUKLjWg9IleSbXUjjQPOrr195ayVe7fYzS4/f+M5CTHul+HT+3znx1jIZn5i/uj3Jt9+N1RvhPltWd3p5InBchIah01FvTxdw0TE3kGAOIZrCLDOcKwgUXtYshVC3zKx71GKo3UI3qOV3k13/vhPdt/5femCLrUoOfpYo7SjUg6oKMqv73/1d//5B3/w3/2ruh5JFTgud+r41ce/8avTerZ89dWq3t/RxnerdtaHp92a4YIDW6TdvGgnXTAGKG1VVgaWgEIQioiKbcGw2rP98nR9+dqssggwYBIYQSIvJ5RUgMQiZDGKbUPIpFCRMBwf3TIGBNaKSFUViWSiCjaGFDHEpI+PIYhP0liSGJ1zGpht7jrXpyvnKtMz9SRN0DGwd/Dk+KunbXm4dz1udH+6+c633Id/+/j2HZldmzCK1bKbTIrf+6Od599+LcyasHvSlPHvf/Wzbx293xQ7q7Dj7UxWalviYLSPCEAA0AMBGpNzDih0rS8snHPGsDEmBPbehxCMtcjLHmjOaoBG0SjOub29vcLZtu1TC2ytjSFy9tRJ94gkntQA0qb6m7ko5y2qiDGmKAoRjbG3ho01UBVVYwaHemKVQLkzVxCJCmu230ohC6R55ExpSTB58zYwJFPVFdULkuSkgk44dNoagRIxjJQCBMMFitQOMJ03Xbm6AAI2nNjaSdua3hozVHK82rBLy9bWZFiH+pqsTYiJjdmqnraXYb5e08irihxImNbgJl+Kmj0pE9ACCyWlklCBKoKTEptCVk4XO+P15v7X//izJ5fKq/5pR3OK85Ibo3NRQSxUCrEWajVUYjRGQwKpD/ugTYuTqavX6+XNS/ij77z58CRWdSmrXhllYXvutTQmsAYGjDFF5CjSxcyGS8MVZ0/fYS9BbFNSDRMlsuPQQYCoULJAwQ4ogApaQEvLZbCT6jvvjn768wc703Xtw5TPpli4rpclsBJaASuWk0JshZnTCKiSNa3xOvWLB8dx/4Vy0k5G6xXWI2qKuHRcVbppXCmFwmjagBhjhQVI0GdyRStI+2QKIBpJJD02lHaieUO75eGmWqwD0gOirdBHiZLwL3OZsjujQCX7RwL5GmJjdKh1+dlTVeLsx4WMzyipqrKKzfCLAiCmRPKy1kgIlN1B018akJ9hD5u/Ui6WWY3EZghR0GStw0xK2xZXQWmSTeSr/M6zmv2CG3iSEybXHMmuzumwUcbx8zZX86mFnr/G9BWJWXKh3TalUBqc8fiiaklzYUUWgjGTwEChyoZYwaIKIQUj52cbSnyDBHsWQElwKqVyaWgmXz96QL/4yTWzAK2tj5UuC7epZdGAImEnruvYOlm98/a3Xzqavsj2pMEvfvHBns6ldKytjWHu605CaScv7ZsXX9tp7eLvfuZ43755/fVib4JNiMfP5Nf/7IqWa9d/8zP87R28945578367bcxeTNU3oQTd/ag8KdWuyqsaXNGcYX12nXr1rfaE28CswlsohRquTOCopQQjUFbsTnat/YIBy9cOrhej2Zx2RUN9yPRXgnk295G3jU75bSka7j16PN//pd/ef/y+zvVtCBfW3/85SeVzGl13MbN4v4zOTMuTp8+mi/mH8zefp+ntUhIabVsuBhbZ1kcG3Av0Vq2wp7VpnEEBVt57MODtX+NWdSIYZIhkGPAMNOAs82UzPeGJOpE4qYmGDVdzYZykg+xiITQG8MMZuI2dk6NKKxzfddbW1APYt4sNkaNK0zTnMVox9NdAAhYz1ejac0Rm9a46nmZvXjcz8eTyze+Fb/8+uvbxzp7fYKlbZaLN78zufnG5WZx9f7Cf3Nyb1P1s+v7D0L18X/50e/+xbsNj9vWmk7QAm0kr+SNCit7iIgGSExKh7quFb2oiCbPIokxGGOttUPitxjiPgRmIyxdj+euPf/FF7dW69YamxnIfUxXkCD9TDTlnfEARIOJyfS9T/U1ynYpyBJjDDFNtHGoZAms2v5KFU+3ziHZ+TIKwRpGJMOUIU1JCuJhzbUFphhRhJOpSPqIOa1mE+SJDBEmkaQKCJR2VpoPtWbDS9IBaFOBMFESDGUXsC1CRspDZ7+9ePMlJBmgjwqFYSJV5M2oSrLm3UIsmt+OIvN6Em0nqTEl2/4bEY4UrbXCgiIEwwYESyU2Y3QT9Xu232mf/O2nD8fusn+4kcZipbSIvCpoIVAqVPyIxBZiNuQ0ioEJXDBO4uzKqo1uLKuyk7/81y+ueuVSA3s4wFKMgZKGnITBmtnC1haQ2CiS39G5z69iQOtFOAWvghI8SSghRYhs2DAXmV5mwdngi0JFDcV3r+8ff/gr0/mZmU9pM9NlnMMsiVfEG2MWfXcSpaggqsIGpJYNIrQ6LU+XJ02x6+rqbCKjhsdTmna6cdQ1dckNiVNJLGgrsKQ2MW6JhRUC5sTkZTbZJYYpxl4VyWY1PbBZijrcBqlmRCiYBpeZBMZHYjaGRRMUAzYpL1iJYNmIikgiCg/7zxgV5JwZjKIkihDDEAeNPgSkIEQiMAcZ4ghEwJTtcUSgEIkJPRo6YQUGe3jKhzU/8VmPNyxZKJfPVDs1t6BgFQIUxETCmsTGKcOLQPnY0hYvpMxUTFHc5wz/YTecRYe5R2emdCiGCEUkLpjmQG8yMLmRUagFhEEMtQmuijF5EBmFRFgiS2nqFSaxAMOCDQsHYaHCBiNkpagslyx1LCbu9M7Dl/ypNWez2FgE1vlIFvvinzFPyc90U9PG2cXy0a3FzBwe3rjz6a313Z9N7Giz0Qk5VP65WVseuHK34Xo3WvzTL5a1P3r+9erSK0fUcVD2t35h40MeuaBchDL4E/PJP7Tzj/m1l4s3XzQ3j8b7e9XVmeOFE5lSa0KwFDi0HHvbR9tJ17PBtLc1ZLwOlWmFG2sWTMIlldqXdKa18tijX7TaU6eBNkzEUgAGvY+wKK07mO1j98UvVw9+8Iuf/N7zv/PtqxbL45OHd//dH7178suH9341p4elLKPtzoywmy/lwZ39t7+z3rVqDBvT2o0R26sWQkpqU8i8ITZkGGrQuFhZMZa/ni+f987ssPSFsAW5mNZWiW+TZlvKDFeBwICJVZDGZDY238eiADHEusKFGIipcEXyewoxOC6kD8kP3RqDTvo+yEZMYTlys16LCkikFw5MgUuquC3CPEQbn8I0fOmpn75+c/KwO3v0aDK9obrwm+jf/7PDK0fm7//6wWr+YHKtWrlVNTv6al59tXEP4qK4f8bPvyYrj43VJnKn0pG2kXxSKAgBIQZmAWKyqQt913atMVvFrXZtS8SusFHEh8CGkXiOhNK5w8tXqrL0fZ/II0SawoLYGhVECdkJaFuAFV58OrdRFIAxSXYSMfyUOeNddrilsT3qAAuyFzTzFroFqcS8ocrGwnnoSmDy9q7XxF8fEDPoxR3teZ1HZkoPjGpsqSLp7tHhhsp/Ls3c20ZBVQd3Z4Fe+Lv4rX/JK3NgEIASZZA/jRMJP6fhm6QWRySV9jQC5O/BSI4d+RYSCCyIrSWSGlTRWBSymdbtjpz8p7/7tFtWdoW4Ul2IXURaGpxp1zIDHNlu+t54Y01fgTiwK6LpFEVR6WTm752c/OX7s6OD+nazMWaKMq/NUAFGBRGEmI2FOZVdJguNgFGNQ35VAilYQRkXIKbkg58QBpDlkmA1GXgYIkuxADlw1VHtpNS9iX7nNfubjx9Ndhczf4JWsYKcRaxZlpAzhIWKWiiMcHKZgiVnxVvfn/r+mXVTMxs3K25raUY0rrBRO4NVWCQZkiS7TwYZot7kj1i2nyNn/oGG4WGwiYBMBGN4oN4Pi1JNjzmiZFsVAMZaDPArDU9bRnjShgUMTqIayWeASJPflioRJ38ZgLbqnhCS9sOeUxiApEEYJoyBt5jPhLDm31QmycQXFZxj4IlqtiV0bPkOifw4sLGS9U1qbgePOVEBGcOaGGJDvDc4e8wNlEVVaIyat0Msw+YiES01JUkQZfmR6rC9Th35QFRDCpQa5nBkVWE6PazZnwB5O5KaHDYprDNnAFQcjNjaahWkglRxsufurp7NP/3g0LWjpnGyMLoaYzWJKzXdpijKdVPyyvWrwofls88+u/vlw4Ojebu6Ws1HdbW3i53nJnG0+aYdPTzZfXDHP15iFcOm3d+fPHnl8uvlZD869J99aR79mmv0zEUbxLJ1IGeK/iR+feznP6fb+2a3NLPaTeuijOS8VE4qy+TsyLItpdxB5QLVdTdCNW67Ao2xa0KlWPT9/Wf0YLF/6bByUy/K1mTjWnjAIDA8lBUWvenr56qjg+cqO/t8dfsnP/3Rwe8+P6mXVWluf/wRffNNoSMLgStiFwoP77n96iu58mL14uW+CWxD6Yz33hinCmFi4fR4q0NXACZYpsDEFZ+Iv7/xzwkCs6R8aKIgaagxaSaSIKTMOXmVND0FxhJGRGm7GJgZGqDBSgyFK4QFQO/7AoVlDl6MsejhN95pYYxVCdYZS7Z5tt60TV0zCKENpjHcMXvLXmWx0QJ05eC4L+bt3mY0vvWk0FGY2vvRH7/xJ3X9Uvsf/uqYa+dmTo56V99cmN3TODOz61YuffpN+9KlCitG423v/FIKgYpK7MGBTYaF2CjDioS+64rCEpEOWYSZSaLSbFqbZlGmtO6KITw7OdEo3vs+9IYNgBhj2kmRkAzpSdkPmRLnU9hyujt4SB4G0vcaqiQRnYeVkmIrpcyqhORkkISjQ4ec1MCsA8UqFemt+DCJF4c/mvv3vFy9cPWluyZ9bZFz843tDZHr7gCNZX0wBgPcoaBeUBYN/554Z5ope+klbevxVhq1xS0B2ho4pAk4827JseVhrWWYbB6wDKsmbZym+AupyBawFfrCW7RHExn5xX/8h0+4Mzse/VLN3MSlD0szmpv1pm+Mg4ozgUWNN6VvxQdrC3ZqmEIZdWK7uPjjV+r33tr/9Gw54lmJ0DgrLAmdhSMOiK0ohr1DmrvAKsRmy1eUAaHVZFwy/KAoUXfBrDCGbdpIqwUXiRqmcISS1fkx9/1m8+3r5aNfH48CZrSQhaVnWrejwvPZs45XqJZFhA0lCgGzteBYiLFopO1Pfb87QsPVuK3RVNiUaCrdlGhbV8GBHQHQArDIUi8QEUv+xAzIKKJms/+t0EWH/ouzXBdJCCRbH46tJj190Im+MDylw+efsV4MVL/8GBnKIEtyTcfAZx62sJKmXmJWQYxCDB6M2VN7un2Gt98we8eySALRRRMrXfJAfA6nD0vuXPlyv7ztPNNknM+LMpFKUgLpNnpyu6tD1uvl30L6CSuBlYGYoflBkkc0AOkDWXoYo5NFt3AWHhBlIbaopsEpya+2P2tiO5B3TAoiJLCqUauSOKpWqSQzMlIJjVgqcbOCxvjw737QzL+WWTW1QGjHerrD/W48exgil/y7Rye7la96V6klMV3Te75//Tma7dnpjhRjedqf/PMv66+PsRbeYLdwTU0E6/en1w53nwsUFs083vmnehKidUWATCgoOI6sUtCgSg7KqydmtVRqoW10hVgpnAWitTYU7GkiXDc9L9W21X4jEwkFGqJF3y96+WaOJR98/0+LvVm/6NUrWgdJo1enMKSksEYtMXNBEmJpyoNdF3ZfuB/w01/e0udlcnT0+UcfvuKm1UTKXbc+acQpheBm0/2DanHymT+q7GTShd45Y2AEkUAGrFaDQpm0SFHLaplDwQy0ZO80m0krdmQoc/VMHjZUaeDaGGKkD1OQLdZhbSJpq88tITHAVoJEeLLElitXSS+ILKFP24Z6VIdV6DtvjWVl7dUWhentullVo2o2cREkG+GKZS2Vq0KHCRf28KVe22826LvOt5t60918eXLlffrZv3TP//7rfbc6efpk8sKbx6vi8cacyR7VV5+s6vaJfsub5XxjVmV/FkxE3LDxlk0XpcvOG4TgO2ZhliiBghaFkaCGraiEmFF5awuVYKxV0SCRmauy8q2vKpco/oOqPp/s8xgTHparyeyIWdONnKfT3ypxmbiypVRyup62KloiIhVR0uRQneQWW0R2cGzP1tPnhlb5SiMmSt6hqmKMkfMVEwZkF8QZ2FLJBlrbqyd1zfmrDRV4yyFlupA3fn7D0XBnZoM9oswZ2A6/dKG08zYZly5setP9muVYlMJckrgzeacQ2+F6oqI0YiEsDAkVSx8LUx0U80cPv/mrD76Y4fDV6is6bW0DDpaiRaAYexbD2gZDbWAGdlkRUcZJgLZhYzhYrpYL/9ar+Fd/cOPLU/UWERzAGkGSHB6UBBAyZISJWQSsogQhCDHl1GMSJU0alPSBZucyBYZsgNQheRVSoVzhKPVLDEi0hCjoRcOl3ekLe1W3noOseDHGbrrer9WyY4iPHmwEiCBKQ5sHAhQqEmMvEkQCUSEMNSQMsUpsznnCbDnto9PDqbRlFdG2u4uq2SQxEbwRkcIVMjw7oDiZ5T08axKJCMRBAg9C1cRgyt0ZAQO7OAuOzzNOeCvZx/AHctKnqoow2226SmLvpy1o9l1HHjmtNQDFGEDJ/12QmmuiZLqRwAjKe6kUkJDldLkEpjcGpIwDybKlYZJPrqu5DVOk9f5v2X5QgrEoDcZDa5D2U2k0T2JnztHXQIYeB9fS7cIL+RAlI24Vobzaopx3qpwVF4OCINMlyAJWDcgxFSwcI4IxFlalUFtbM+b/8Ld/c3bn4+nUPIS/bn3Bm4p0oqsirmYv6b/7Xb46krO77sMfzjdzt5hzkJKqvqjZ1izGN514FDzDXtlVpFag/ezyoVkRz668crBXPlkfx5/8qMaTfjImWK1b9cQFS4xiwEU27+xMWVBvg7e7+6dXb/5m8U0VmpLEaXR9CBNZb+bF6OWaC719B+x0JXaF2DpZeGee2/sf/7vy0vPdcU9qTIAA6KESAAGCZtWmBchYC4j2yj1dvXIZ0EXV33n08c3Z1Vff/ZOzT/5f1XQV52zHppfYtv71t/HdP/resgkPV+buQhmdN0ac40hIwkqTc2ENiFnFkDBglEWLonjW9Q+WfrcyITWqqaGPLEFUWTPAARWRKCSkULLMcBL77LtABE2CfbYxirE2RaOICBNLiMkZqO+DtlqgALH0IqKIKIrCucI3yQvLh8aYylBHuiE1JnInc+xXR6cH8rB5Vo/Xp0SHs/HsLfroYfxGw+dP5hrr2eTmV4/ds75uzay+8q2vlmZeHLZ35r8Mv/zOy98+W8a4CtwBG9u1HXMLCCiIRpUgEkR6IFrDIXSpK9ffShQXY4itDTFaY9JhiCqudGU1GhpbykyHrVukInGXosa8NocKlESNMcj2FBkg3sp5AKhGETUmRw+BksrnApB7wZBOAclCfDUwaVWWLyzWC+60GY0mVRlYTkyQNBZcrJqaq2sS++YLNEMombFMyHvcC39JkIgAlPHqxPQE0iAyoGbDGx64XOlN0xAbNHQjOYhYc4XP4l/aqrHT6m5AHaEKNlaFiagPUYUMc4So5/0dM2+X//EX9+bH6+vV0RhfRoRAtijQM4xlQDwI0ECWESaOQt9HMdawgReGdQS2reD5Sf8X//rlrzorzOCRj4iABskO2BezcUAiUZH667RrTO1EzLqwdIGnzaIm8wgQsrlUXu4lJitEIxA5nUIFg2FgCQIhtbQ8vuOKmsbWIABMhhVRRSkI2LJNu3RlaMjNFdkE/uQ6OrzmrIZKtLZc85K8LhdEIhFc/NTTCCh6/jRoBnE57aNINUYRCLNJ7m9KyYnTDJDp0LLlzxYXNLh5vwDAZO7WhW/NlIDUTIIeOrjEbgohEJNhc868T73mhfk3AbaclXLDt8yrF1B+9DWzG0k1WWOmJffwSpIt1tAuQAcvdJyTl5HuE8IQ05lO0UBgZSJjhnDuZIOTZId5xZLhgbQIT6oHSpw9KAYLWbbpMGk2dSWALJMRMMEQM2AEhFx6efsf1bzvIiYYRApsuSgLKaJa2JGlgv76H/7m4WcfXR2j9uu7WL1WhjeKzvZnY78aX+5ufr9EswwnRCu/R/Ty1WJVnz1dNZevugf34OdFX2Dqit6Ib1bG9SEUlejRC8ZMumeL164/f9htFsXHX+w9+XBe7trAgXsyUhS2s8EIhwAIq4gadupd7Pnw2t29/U/uf6qrrydFOR0faWzVAPPVKe++cHmPTxaBHMMprILFL4rrrz33h3+uvCdPuiICPYegHIVEkh+SkgcYSaQQST1pUOoJUTnwc4dXRnvYPTKrOx/ulpOb3/6TZ7/8L7HcUG8d2T/8bw/jSfGzX9/+1rfffu05t7rdPA1TFKDec+Rk8MRQCBlSriClyoikEjgNE6NliGy+XOKlXSu28lJ42B4WERwhPUjYZUNWhgEsokY4YWYEQwIKTIggI9IzyDrrFPCtt4UVFQ0aQ2BlCJjZFtZ7zwoQhT4wDKtUdQ0OhY3QUHAhHrpRsQoDY1nm8I914+396d7LzysfPLeQjx8bc+uL9Xrdgq5A25f2X9k5uLFTX/3xJ5/fW5gzunT2xclz7ujX//jJ6lf+/dd+n7wNy0gdGY4KD/X5YmOoSSEtIYWSdl1nbaEiGEzUDbP3vnDWmLSC5ShRVdquZWN6H2IQsmJMInREACqw1oQQADLGIvnZC5hNRlZVmZMYOubLggY8KvneabKP5oQmDxksw6lOFI2MGA+tsQgRWwLl0WGIL0x41HDFnF+XaclKqdBejGQcakLGrtNfPL/+Buzt/J4kcLK/puFPpqYtzy3neHOG2Uk1qZzzvncY0wehaPqKKZc1wwmJRMaZjY1cliXGNLzEaLgiQ8RkK9OFbmzLou5//OGzn66X+664Odmp/HFM2KEEUUOh52hF2bIECoDpxVXV6r2XZrfuLM68ZccuSoCIHVU0/+//zbW1nfgQwUZ6I8ZG2GR1r4FY0qyrSCtJEUp+C5w+67RUSDV16DLy1A4lM+wELDQqIpHY9DNTUeR6qQIVQAICMxMHimr+9fdv/vMPvgilWLBGygUjMcIFig2AKNJLNCALYsCwhaIoLHNGI0QimBhqEnlvqINZS8NpCzuUFB0egPQsZBruFoJOxVoJ+SwgGdxITLGAOdzZmPzlKe+Gc9neno7hG+WudAtT57q8xX3TxoYw7GtAJBgMI9NskLHjVDcptYaaOhvD4NxZCBMTJftVgUZSiDIrD3mfmi1Itl0tpbSD4YXgfOLdnpftqyYkKx2opq6XDA9MLspkq2zVgUTGTH3NEFiZHgVJNl80aEBTfAWLKEz6n9sORpI+HqDzLUf+mWWtHnFqqpAp1lQ4G1mEVYyyM1yZ//rDv/vNvU+vzlwdj0vtrLQraSfiRTqjC3+pf3Af+4bCWfnFxyGcjqZd0R5Prl1zbRNWz8A1hVbrGa5epmLXFQfx3rON2yv73bNPPt996a03Zgf08Pghbn1Y0aaS3dZILDo1HEotluKtsDJ6KWobRFHtrqaHX5Zy794nrjk5nD33ezdf29t46RsD6drQhFIefrk6WUWzp33Rh6V4e/D2v5589193TeSzjZDhnkxEIUSBBWE4kh4oQJEREQ1roRI0EoKIBwVzeO3a4os71994q/1qvtm4N373j+79+Ocb/uZP//jqpz9f3flRWx8tX6WXfvib26unwOGbmEjRcEy9ryorwCoCVCwTYAqaGlS9TIRrafjoyWbEjZvuz9owa1E3oTSNoGNtxfQ2tIEVGAEGGBH3gCcKUE8Iqh20t4gF0CsKm/J02bBGNWzati1soUFDiEZMeoass8EHQ8YoB4kMqarKmhiChxLaUqyALYxC2TuNJ8KQDfj2yezykc5G339i755Nn7R8iW1sVqvHD/y0j2tddKMXgep3dm7uXN2dhLLZ819+8PkXH355fe+lWguPDWwrEiBGpGMS0SASjDGGOMTAZKy1qkJMhm2QEKM466zhEIO1Nt0R1hgCX7v2QrNem4Rf5wh6Y4xBBkgxqD+FiW2CSROvnNLRUYUkYVJG0n5rDpastNna5XBOEwKQTBwTrI2sduAgkVSiMmUgL8VQJGZmPq9Zj3l+M2D736m0pfrIpNuaeXH0SOPvtsAOF02+bHjwob1IqbkwVySQVXXrl6DKW6g5OaTknJl0MwMZh7fbH4iSgSKqcEqiJZMS42OkqEHRGzJd200ulavm5K//6/qrmb38yt5+3HRhI8ZWUSmR6IIix3Sw8AZaWCaJ4dGKfvGwFVjLwZoi2CjWrudP/9f/9lpdz572wgrPNpAKrApRzP0RgiIJf4Oq9IwoEpDSjCkhrpoTLzmJNJE2FcQmASciYAqUyi0LS6+w6X2zpmEtYfI2oFWxyn2z4atXD/Zmn2uwsJ7YgjIIy0wCMrYHNEJYojJZslAQ2BlXOiMQpa2xBjMbA5GL9hbpaUkUeJWtT0u+yJNUnTgFjSDRhimzSJLIiAwZUIxxyAFKp0PYZGqwymBqlffDlCGYbeNH214uFfdMSlLK25pkGg8g5t0s87D6gWSdffbEyU9urvyZF0EUEyE4KiUnVwYELNsjkRlnRNuPLT/fafkK0PkaJe+4U8XbCo14S6WGakoTN5wtb3VYtST6FqCpC92yzJiJQWxtfuOiySmI0t3B2Y8usy6JAU5fidmkuKgkOBgyK7YHlxWG2CqTsYBBJKGCyJEUUs2KX3722cdffHpwqXLhKYk3vBzJ5jisVxRe4p6dX5OpRI7vYnUc50/o5Cv5/GmjwY6/lGWPYK0ZWSVZPvNnT+zR6/b5HT8by2K1/vIuXX/t3Rdfvnaybsxntyb+cWN2EFaBCjKKktFodFysiHqo137TFiMXxlfvjGh+8nUgh8ml2f6Lnz2Lq2fHQqEup6/NnpdHt9cheHuorekXS+MOj/78z6trL8eTvlig74zdEFqNrWqw3EE5AAEaVAToiQzQgUhaS7WyJ/HKfdGt/Khx/58ffvjO9f2/eO+7y1v6xdMPrr39wmsvTT/52cPbT8Peu64N4e8//+HzL5U3X7sSEG4/q5ulV1IosSqpxjQm1YKaTE26o1La6LDBbKGzMxnfOrMvzUYNz1qZmNZIB7SgwFyA90FCCBCv8Eoe8KStIkA9xCq8UiB4g2AsKamqhVUSDeKsI6UgIR24EERC1F4tmSgiUQCBRpEYtM++DrGklsj2gQsW2BrxNO7ujNXEe78O+7/n2ivV3/98/vqrV7/5vF2vV5efe2WzXCzj7moFavT9d9657I7a4w4bdZvilcPXHn390Ud3Pn3tuX+1uzNq2x4AeM0AIDF6NhAJbdsSo7BsbZFMKDG0/yIRzNmghygpGFBgfnI6rut8fyXTaGSrjeEvKlRy3GtW+6ViyprMFzRfZKICzfNuquWDkYGQDs7BF0Ct8xlENcbszpFu6AGD4nN2CAEEjQk9o+HCVSbehiNju4JMr3MoxcNvnU+9GYrD0LKnziEnHeVXRUMvn1cXrOecFclRNgJY5oGSdGEVfbE1uOicOXxLyfMzskdhst0kEFlY0wc/2xl99fX9v/oqmherg12DvunRF0xRJEAVsKBexUQW9Vw4G8oAjjE6VquuWcUJCmOZo1Qj87QN77xdP/fc/qJri9E+uIRasUVAoR0QlAOl7BoSYpCkhEjWLYacrCfT9JqklyAQQ+WcRke8HdGgiARJ1uSKqGIYREERgF4kErON0rN1BdB7SFBjVWMgKVhEBTEIhFWU0afRS1lVyUNKMASVK4vCqrQSARZGhASgj9Lnj2G7dt2Om0wJulDNH1rKWAQlcCJhyEKK5DFqmIVIYgRgbZEmP6gakBrKIjTOlX9oQSLYpDkyr3Nz/keeQElZE/xKAMGSSRQtZhJVjZoMWpMCcOAaIpVM1WiMTbEo+ZvmIwZLLNCYh5WMXCinVY9km9dERVMoYBiJSZECFHnoFPNSacu8TjatlL+jpm6ZGaCYpFOU1kJEeWpXApGhqMqGUouNYeamwbx1AAnyghgJ0pB0q0Tks2CQ2RKcKXJIMmFmFKKsaiSRFVMvnG4UCypMQChGdrFufvLBT6tJXXBnOJQk4xiY+jas7mD5Esi6frxTfPWJ75+yNMtHnxWyqNG05HnZwlaugNImiIOyaTfhzu3waGX9qCKO451yrxLbLe3S8r2PHDqh8cQEHzZLMzMmUMqWthpErUdx+WZzaVqfrG9G8QdXWzncY7M2m48f3JK2iRpev3RzEa2EUcNsFxu/ovKN95///X8jvfWPOm4ltiW3LH2PIAgFBxLeECLQg4U0Ii20IKQhaqRe4kao5bAWMyrivL/xyjuffPkLFzd/+vvfXXF5584/uJMb93pX3DzetM0G8vt/+OKbN15/eLv5+B9/YfZfdFdvhGajKUNToUZICCUwRZyQsdK7el2Uizha6bjl+nhNdUtcVz44E1iAUEVXGHjhCSNCexhPslF0Cg+ypEHRgQ3BEiIJK3lYFSXmtm0tW2awsARhIWYbQiAlx0XajhEsGDZ56FAABaZI4BgaNKPEDA5tP56MDouD33x6+7t/9t6TzQ+/+EF4/bvL0Ppf/kJ2LttHtyfrNi43WrM9Go3ILzdf37+3ebrnrt+99fDhx3dL2S1DqM3tz35z/Dtv/bmrNYhPtyFBmCESYvCFs6n0JmljJomkBZihXDuTpB9QIIQwqmtNKeScDZnTuU7QYggYYhKS0JC3aFgqGJrLPNKKMyFYxANKl1prZgZrbqsFgwOdJmHl8Hok7YI5Vbv0nbJ5MinSjGKMVRVR2eLPw5qWhhQXHWbu5CKEcz+AYehJVTTPuanC5sFMw5bhsv3FAMGk7DYdLA4wTBWMIApVCyKwMFRpK8nEhW+aBhvNM1LaXGV0NtHYVVgNGVAncmmv+vCzh399r9198ZJw6H00XAbhKBQDe3DLtjZRa+gIJjD6qJXaIOPAnbKj6EiUhSzzSFqLGbo/eP/ayiiPXGReax1ggpQejiNRBAIogpUlcZ6FYCnjxSqQoNRTHu8klxJBGmIsc2JxJ5oQoIAYpNlJQKKqLEQCCaJBSUm8t96AqBc5Fbx86fDqYX18fDIpSlgKJsI5a6WoxW2U7cIICwXLzgaG1bVZ7db7k9kYo6iOURQBLpANarxQUIMApGIPpMCQzC+SAEBJkBcJiVEQoEIm7SCGtgsgQWquVQY6OxGT0WTqAjDbPImyJjSIAJuMo5O8TkFsUkiwDiz7pA3gLOfVVJW2pCRj8pNGaVmau1FNdtM6zK+pieBhhTy84kSS09xtp2UCAUTKgCCpFrN3VxJJbCUIwyohAgoxmoo7ZVoga0p8St9I0kFXzq9LVABjGMlaezhlms2wMjqtiigCzWwsTlzMTP3k3gcVFNYyWzCJJEG5SXtNAitZkGVyGhlk2VQQFrEibGwp2difAFEK1klRFf/fn/x82a53D0oTViWFSegtpNamZv8odJ/I2feOph/e0+ZpOBxXv/pgFFp2vWhrYyATmUDiRQvSLprScE1ccjh1hsWWLXB294sPAurdF97c+867Tz+4RYuV42KkaMsQ+mSeJ4GdtC1efOVT2rn36MF0Zqwxbxxc3Xv0sLsy/ujxcRMcYOud/cPdl/pPfgEPCZW79saiq47e+450JjzrJYAbgzaijWgELauP6JVIID0QwR7oAVUKQBAByJJ3Kqo9qIVptTnZuK7ef+7Vf/zgb3Zn/vXxFZLrP7u/PHz57cXtLxf4+k/+8ubxfPrD//2T/uyzvRL1vN29siOzSes9obBirAjAYcQ8Dt5ZT+ONVus4XqFeyWhOs+Mwkafu5rWxQOM0cFU44aBwESqgCOoELaMk6cAe1DG1QCEoCIWIV2sYnmzbtPW41iDRBssuhBCClFxKiEbJEItEKEIf14tmb3+HlfsQmECkIn0UiUGssdpUEgM53jztb1555dYnHz+785vXjib37y/u/MC++u6bT9crPNzMfLO45V1pr15rsJzPv+npyeJgPJt37of/6e8Pi/2739y1Xt57v+/608X6i6Ppdd+vGaoaQFFjYGKYNA8rg8AcQhAR55wO410qe2xM7Pu8hGVaLBbG8NBfp+O2tbXLeHL2kNIsBc6N+ZaojPMFp0l6jDRh5FFSc5BLshUgPh97Fcn5gZitZdWsVGI2aZrOnfiw8c2BgJTi1BLJ5wKrSzk5H6VlkGbEMdNRCOcQIIbXvRVnUK6VxDyMvLlk5+FOhsE9qShpwL9jUB3McLfMsKHV2P78aJBCZS+utEtLLYJoBES1Zwaj6kN/aTr56ONv/svdsP/CJVkHHQkajRusiqpCXfGkpp1am8n0uNiAG2VLsRA2pBWKRaj7wCW4rLjwoTLi7EKf/s//w7XJtelZ1cXR9ER2lzxbYKelSYsxNUQd0GseIAPQQ3oFBZBaonR1qjIQsB3H0jtRyrweGpIJKLGe0/uXARqIKko95XE6gKXs2obiuHEzh1UTq7nYFe9P9le6AghwASRxaRDieBRskSobQ1ULjVOZHe1UBxbTgAktMFthssa04Wkj4w1N0Cp1hDQJB0iv8KkeiyBAY3opipiIVIpohMBCTAwDxGF6JqPJiTNndg1Meh7aKpw/gclcLgY21lrLKSgrP1qsOuAyBqrpjkxri0wgUBUBGxrqdfq6A5TCDAFIs/JYNbepICRxW2IBbjGeTPVLkZ7pKUz6Nr2All9sSzU3npxyQgd/Wc1G7jnNLBH6Acl9LpnBBhxbhd7Q1WY0PjW6Oa+COAkjRQOSpomQeEPZ+BYsIbKxCecn5agEMCWes5IoCJZQiLCiABXMFg5UsFgVhhIJh3o8unX7648//Xjy8oRkbdWPqBXeFNKU1NVoKmqfWv+LhW0e6Qs7u5992PSrceV7PnNhHW3P4noEIoYpSZgRwUJkoUWjjSirX8zDRvZr4ZGtXrxR77zRfPwh7j+yfs22MrUjJ8ICbiqePR1funX3U+bNct0dvP4qHq06DZ9s5MFi6UZlH8zVq2/Lw5MwD7bF9MY79+z13eZk4md+5bmDa9i3wq2iBVrAq3qk/2J4oFMJRKnfDEBPzAIv3nHP2qk2qg1b6775/NGTeDzH/m8en+GlB/vF5f3y6PbDb9zs9Xf/7A/+66e/uv3hN5NKDycvrbRrZOUffTJ5+T23uxtkQaIhMkyAdQvMVlxF1B3Vjbq1jta00/Bs0Y/Cxt6QfuICIDAcBZGNqAnp0W8JraJVbBSd0VaoYJTErcJxfndMti5rRIxcpdDgg7UFQdtNZ3qyUjSL1mkBr9LLdDrp+269nNdjZ4z23jP3gDonUVpDQO8kmnCiblb++ff+/Ac/+yuu4sHBlTjpw2M6mhbPjsPN/Utr1xmO6/utX8n1ywfTEqd3vyrwymU6CieLK5O9cd23zSO2TVVL7wOkFwXIq/aagloVbFhEQ4jMbF0RAomqsVYlhBCZIUIiaowVic65EGI1qkZVlRzgGBcjU/IFcz5rJtuprI/X85VrIkjCDEcfyJIU2vIrmcBZ3pcN4jXXq20HPtwJqlDmbZneTompDGZOVS7E2z+i2y+Q6+45BKyiSeq7nUnP6VdbjFm3t+D2C9FFkpduv93FrwRQhirPX1X6BoNIaYDQB2ljGoQIlkBKieDDSPbKlqUL9bi+fe+bv/5kPbm+0/rOliw9qCNtZL0zHslkhVkdNzWmlW3qS6vowbWiUnaEUmiE2Wi8bPtgg51wDLTq5//Tvz168e3Z3LV2d/RMxidmt9F6QdOFVNICjWIDdEAHBFAaAZMaCVsIGonKOvQ1SCah2fZbc+BP/iCHFFyoJoQGgA6ll4QokHZSiItN11hbyXgpM7d7uHryVHYs7QoscwlKhFx4moWgcIVREbVgB0ywf/Uy7avuwlflmqaNzjY03dCk5Yl6y14pkPYKgLrcKGSFDguQZk6oREkjX1qARgEJmJP/eG7dcuGgbSBYSmncVrGtTj09TUlVo3lbnDCf9CXOH66k9E4FSEXSoKmixAIaAlzTlLvtcdPqIlW/YUMUVWiocZLSBQ0ASBQRSQI83tKNt6c2r2wzETqP/JTXLkRZRJ+Mlkk5sacSGZOtMWxySBk0WZtnvofq+ZkEBhZFgrUHMwBookoZsuktByS7bII1JAknsJEYNNjEZdg5hZdks3GFAVuIEU2hv+ARq4taKBfEBa82m3/5yU/qnZqcmNhVHI22jvuJSCX9SJsxGt/Zh3OVU3vyRd8/HRuvbcOmDbRBCIE92AssqCfjTIpv0w68YTgO6EX4/X/z3StXrz5en9ajA1zeid/7Q39/gU/v4PhMfQO2tioFKvuzeVBZdvWlERXFqzuH+08fPz3cf/DwEYl0S9ofja8vGrl7X9yYr731sbfLn/zH1/7d/+KFQ0POk3hwz8l6GEHhwegl/Q8NqgL2qoB6DFYrDKFQYeVgemNsOOn3xqPVvbNn84fTFy9/9ajYeXH24bP5+5cn49nOvdP7608m67M3ZXJvIQvRzZjbkmsrLGdzWx6uaFwiGhODCd5MfBytxAWMPIoWdUvjhU7nsfaoqvYkzNvDSy6osKpw4QUC65XBNlRlU9XSEjloG9UoWUabmstzF3/rV96WNrTRGgOFl96xo2g4EiI7LqWJDs4660NrrNpCFJ6NVe8JwTD1cRWjpVKIgmqprWl8W+/vvXLpD3/005895GNbi6dm51L5wos7Nc8cVn7lDM6ms7WcPj5rSo7Xyuny/d+fjnGwahaLs2d1ebAJj0QDcQjiywIhhBh9YY0oQpTSWiCyNaqSQRBAIUEkHQDDqUIH54oQoqpMpzPVYb3LpKJpFN7a7wwNt+YbMYkq2Vyom6nSydDwntc5ypcQiwgGKwLeyicS+2KwyBkOMSAShj+QrKbSGAs+t8o4/87nsyq2ZtC5MKchTWUQvxBSuUv3RvpTyX46OxtfLOQ4N57HBT4V/dY3v+jAsLU9ylClbr8SJZWJ5H1kGrv1XONBykaCiBu79dmzv/rZxhzuownsGK3SRnRNUqovzcJWjuoSdYEdi64o/Wi/w0rJAU6oglnLPHQYgyu0LNKv/uc/vXr19clpiX6vetaPl8WlhUzmOl3YcYMJGqCFblRb1Q7wgAc6pFmY0SsCECllNWUl8zDW5Y+FeAi30a3vHAQaB52nci7X4MiICZYW9oTeet71tF6Zeh52z8gvYHZmT2AIFSVdKxctzzSoERYWCk64gJtWe1f3dS/Gms90dqbTJdVLjNc6blCjgTbARqWNANARPKGHhlSz4jCJx1w9iECMGCQHYGr69EihKnHQ6WRrZQBMQsm3ljhHcSN9vmCybEVigqMlK7KG4pl1ADIAxiwpdleFBsGbyJBVlFYyg5XN9nGXIAnZYuYYBGnhkXbTqRsggMiyVWgUgSgTE7OeP+CpuIEuUsEH9w0oBlNopFVVeqZNInuI9tKnqddyYS0LNFGwz+Vg2/E39WbCF05s0iJqZnQPfa6oUpCMuyPz0ih3EEZzc88pmYTIAEbVKiw5ggUctFAuSQoVK1VRfvDxB6eni3pWBulhOYpEhSE2bEiMAJHEsQ0dQnQuaOWKuGkLsUIdyJioXLAS1GjPKQyCU8ciAmYXRL79u7/38qtv/ud/+Ju7Tzpz5ebOC78/vvZ6fePm5vCd4qn3n3zK7Z3+ZMEM2Ha/purS3sI/qlF1y9XjndnnS0+yuO52d6vp4f4VN5u5P3pptHv92cY++3/+399/4+bs+o3mpGcmBAvpNBpKEXeBgEAULA1kQ5WUYy8IpCmex4AQtUHDVHBgYUPLx+vVo/Wlg2vcoXsii2e780v+/7h19u/+/L954Y3vfPz3f3V5Nj689s7Dex8vQoXyrAujK7Mj79v20cPy5Xced95KX2ux6orAZk2TjqpWit6MNjReBOcjLtuVqkfrL8H2EGUXlcDkxQbmAOu1MOg3Vd06hwZkSDdgS5okLD4pHcmGpdieQ/BgLQoXW99TcKaI3rfrxonjQCFsvF9731R1NZmOob2EzhU2Bg/uDdmiYtUOwqREUhVm3B5vXtg55Hfe/vyLT09O5m+99W0huvVPn7x4w5aVm81sizLMe/GOQz111erR8WJ+cvuzk6ePT6Y77g9+/w+nswNr6ijeFYVqZMNsit531hprqQ/BWps8sGKM2/MARTbW0YiBWpVq3mq1nM1m2yOTElZ02y8Pc+526uMkgRhsKSl7YwUdqmQ+ZulSG6jO6WYBgxM/BTo4EWxB7Px6tqLGBLJRmj6SW+y2Cl6okjivc3kwJtrelNv6d+47H1TzLaDYaj/yn6PhhsrQdC7k6S0rbV3z88tILKzE6JEsP8kzYJadg5lM/llpirrM8N2AA6rCAvAQFa6x/Pc/f8buKoLAO22icMRGMQZalZUuZ/WIJ43pVjE4dFb90eyYy8hrQkUYwZQUVVFzE5Rk8b/9ydXDF3ZOnMRpear1ylyZh50FZguMG5n4xnEraAFP0hG8Up/3djmZCQkLTUbGvWpgEqLzdSblf0ZFMpzTNPhmhYlKXsBCCFGDzaNYgPYsLaMT3VBjxmfcf7PC2u7PhVy5rGwrbdpvEY8bqYNqqnmESrz0B1efGx1UsuOXdLCi3ZVOz3RnicmKRr41aMAdwQOeACCAY4I3RSHM6QHIOI7mhA/JNh/DI5Xnwhz5Q7o9CcgzaPJuOlf8IhMeJLsn5ydI8o5/qGbJGCaDQIkomXsyw5R+sJqrFLCt3xl3SP8qycGDFIaZmcIFuiVyI5CMpYjJiESBDBaACf7V/PSee6P+lrNXfjJFt6crmWDT8DYjSCWG0IfQW1fkZRHOiWM5sOF8kzX8dJRTWGrUFI5i07tPjZCmZDBw4nIQiMgm8lYEUaaAk6bAI2WyzBXDijhopVQRV+CaGt18dvsLa1liIGGFVYIq96AeKLkgjCQ0sEyV5R6h9CEQSsNGYEspRAliQQ7ExAXIQC3EkhTqKiMuWqeR+OzMP3j04GTd+TY862bhuNu98bvFbFa+cBivXMONpvv53e7+r0bL00Mc/86rV+4tJj35vtrZXL361m59af/PuLJGAlc7anV9sv7xx785PGtvOnP0/vdD21vv1XOM4GgwrIcQBOhVg2qPRIHOC3lz4UIECGSCaktNLYAd8dM7D+McRRXbpz0f1nf+efXad2f3+9WXZw/np7j2/BtP+7DQ1cG3vv/kqw8ftm6fu3/+4tHOdPbayweWJmzdwnfL0PeoN1z2Wm60iuw2Wq77AiIzWk3UK3vT2R2xQiLRCTiQAmUAB6WNliPqVuoLdeuq8mxhYK1lCzHQTRKoqOUV+tZbtt6HgNZaVgm9BmKSlXShI5HK0WhkAaocNetTQCoHEe+cDaHxXhwLkwOiNTbGTfCFs5Vty9ePbr56eH3VtUL22dmz4rA8ufcEE//owQoeq7NaYeOm/eKXv7x589r73/3Lo93j23c/qqvq6rWrvm9HVR2lFfG2QBQhCFsSFWsNA30fkvmbs4VAY0iXDkfVhC0lLyfJCTA6mUzrepQ3xOdszSSnSQQOZK3DUNSYznURqjkgiLIhzvAEZL4XEm7H2y4+5RNR1i5slcHZaHcwtWJCuhNUAQgpJ8bI8PuE7eu56NRxoSSfY9Cg1BsMcB5dxL23Y0Eq8vk2GG6jc1AkzUMX0UYeoLsEsqWRSBUweQWdbKIJUNKs6kgnhARCeaBMll0iIVyeyH/50YNH7aXd3SC+CralwBCIVzQqVuFULC/ryYh8FVurrUNv2VdlOyka44UqYCQetmm6Kzv+3/7x9f2ruwvfST1eyu4ae3PMTmi2ot0V7y5CxY2ElaABd4weFAkRFHI3RBRAScii+a5OZryJ657xhZha8OSjMvQyBiwMIUocpagI0IBo8yXSgbxhr9oG6sTXu6vo64PDxfHJwo0rzIK19WyRY2eplSJGMcYyVGhEfdsfPHege7IwuwvaX2JnidkK0zXNVjrhDWMDaYQ2gAcAeEgUdIB60fNrDIhp7WiJlVgRE1VJttvZtDkZOrDzjx6ARCVK7lk0tIWJ2ZiRZebtw5J6yiRz2npMnFc5HZQF2yKWnkDBQKvOqh02SchjUx3MTQAbyxZQyQliAEEkxiCaTXdTwytBlLYMZNEBdD6n7GP7RnD+a4sRDIcF287b8G+rrdKhpmwpli6cC2xnJLecJNmjrCDSGISTYy6B86o42ZsYKGsQ2vbJakUJMGAiWKYiy40smRGpE6lUnIz2yk8/+/JZ+3R8WEsRKNooCCABQvrBEASVRWXIG2YYjcWIHYSDjJQ6MR1DBRawxKxkUj4AyACWAolhY5259dXHfuforW9//we/+o0rDnxZ3739yfzWrbf/5P+Mg4PN0yb25eR73y713dXp/ceffx6bZT0JfrJT7l9+4dqNTtbHpwuqMJ1O51/d6fvu64e35KOPXrj2+8//9/+b3d+PJ73CUGSn2noyMY2/AHrVnqhX9JCkVRBWKxDAACH1VIjCBWkM6DyiMyMs78/lRJTt0dGluw8eoywfVZvvvrHnusXP7rpXXz18RXDv148eXnb7z7/T3//q7upRVU1HO+VdP8cXP8DsrX7/W11Ya3RLGQWuNihDMCIYY+O4r6irqauNx6YPTXE0rYIwmBtFwa6DiaANlS15p96JZa0bW7ezKY2ANTFr/iEr7OL+vJpUo2pshFQ1kBhG27RlVVWolH0X1l0nxghzBHd1VTFH7xvnHHGw1vV9A/EoWERiaAs7s0WMsn56+uzky9OPP/iEeXx4eFCNKu9p8TDMtd2/dDguxlPXj2dVVY/Qhfe+++7VFyfSuLL2Dx/efvzkiytXDh7cP7l27crGB2aE0Bmj1rKEc51FqjYhRmYuChtCuMi2sNaISMo4SuPoYrGczXbTURIJQ6hARlQvnM9cHNPox8PmaVh85qxDulgdoUkXGM8n3QjAGmZjBJrkjnlhTNlzY4uGE3F20ItKIGU2vMU/L4gUt93/cH2ct/Oa916qaQTJffSgfQBt0fXBjABpJkn7Xs2Lu+FHsnXYTDC62e7OMIiVc4FXJFWwgKCMXLsHt2ElZagSaUh74Vldf3H/4QcPsXOJ27Z1OqImqDW8ZiKNLMqqFmAsrEMxCfAbRIF45RrdEuuibGrXKK/36/V3X6q/8/abK3KPOoR6tpLZQnbnmC2pPjOzp7q7wC43LCvhhqklakk71U7h0wmPxJGG7iGNX5wXpcP/ceGJIIIFq7KAgChCpEHIMAKYoIzEzOyFe4aHdMLrIE64cGJVuG1m4189cit7eMIWghqrEeqd8Wo06qhocqpOumGsatDZC88d035vLp2EnQVfeqZ7c9o9o1lcG1oDG0JL5IlTeHeilUVhFki6xkQ0qopoTH0HkQytV1p7UFqaUH5oB+5xXnAPbRtBkyArF+EheQ0DbSHvb879zC/+4NJKgpDXPefmGGndkgIqUxxR8nJEIlFaEU14fwhRglprVJXJZBcRImZDnP9uplsnKR3UJEkCiSRxYO4/hzE9hzVJxsNT9UzYmGZ8LMnVABhmIoQoQ6QxTA5lSD+MXJwTYVwHwDnHMQ27o3SSJAaGTWFJTGzIAAZEAhKYxMBiYs36LSME40gd1AoctARGjErMmIPTO8d37C7TRHREXAYHzyAVZmYh9kQd2NvSWYkdojHOSWiiWjWWgg1irRMnxitDClDJYpSssoUtGDUFQ7G0QZuPP/vozff+9C/+9P/0VVPcPV41h6Pazp48W/36o/9ycuvOqzd/L1bF6fz+6fzJSy+/euX5dz78wX/C6dfz/t6NNw4/+ulP73z5s8JNXFlJT2+99d8cPFxVYXLplecvXb20edgZzyYUXS++CSZYbYVaICiSayOIyAIKjQQCwsC6MKpCJLASNHfDvfccxqvjdfOsdZbf/7NvNz/+wXwen348ujRu2dpa7T/87UeLt1/99l/8T9/83X89bvzsxh9W68/YHYPpRGMRNlX32PH1YxxuuAyKRmovVOt6R1c7vLHcV2jHHEZonawfP9Eb1a4zphOZGeejmbLtyTRartFb7UoalUZbxkq4sXUw9vxSj7C1RWiajU/IqgHUMFiFgwhWzAFoVXuRCJZ1453lKC0Q+jaE2BpikKwXLWgT+qgSYyjZctMsV0tl2t3df7y7e9na0DSr1ebRweWaeTfIrfvPnlEw/V37wovXK9d9+E8//PSDisnO9icvPH/t889vLZfH8/liZ/b+5aNRCIQ2hOAH5EySfXwMwVprLQFI/y6iIcYB11UA1loRCX1fjyprbQg5CiZZN2cj9FRzMpJE2zpHBMt5rZuPzOBKkQtXHl6TzGDrg5MmXRaJfR+Mgq1JSsq0/iUgyRpTyxyjDKk7yEYGKgm4kvMKyxd4ucM/L7Kd82r3HB9WDGTrbfnVbbHNUz8pJavqBKblwSa9kNz+J6INJLnwJPt5MChpTjhKTDk2qpw8MPNeMb9g4TyXm/Szjb3/64+fjupr2p0Z2YX0pFVsU8wzYDSlnCZK+GpnwhYCFiHPZRX9TJtCmhbtX/4rd/P5ma3dr9cQspFtCG7F0wVPT7j2snsaZ8/MzMxZzySsDK+F19BGpRG0oJZIYuYNs4emhW3eGG4/XzYGRNCYdL6DwS8sAzAh781jdmZKPS0JNEhnyRActIO0TjbMJaSn+/cXHz3ZOdirC9gAHqGqRBqsKpxd2S0qGltYz7AwohCartzrC60DnpvLeMX7JzQ7of3QFLRQrEBLsGd0Sh0AoIf2oCS0opAXAkSD+UhMNmBKQ4sG3Sp/mClBpdimBg7UqvNN+LZogiyrig6x89AB2iWgl0ggw5S3NokwwdlxGhcfVxCT0DbCIQHRxIlKHUnBGXYqbBElptykqIHBzAYDDpVAdUkJhmmOFESNTDJARYMOQBMqlE0vNf+GDgxtQjJCz6A52BgG0kFOQj0RBQtJkWp9mnGjJKuNpGseThHDsgEAZVVYm6jdGHYeQLbVlJRemTxbBYZgoRBNnhCkEtgwLLhiFNBCqaZiYk82p/ef3HMzlkq45JEsJ7waSVNQhNhA1sGoamACODq2tvcoyKl1QWBd4YKVHq04ZkvEapiIRRjiIKUpatNXrVCwdve0lZ/94L++sP/K3o13qzffPCyOPB189ai9v4r7b79947XX//GH/2nR3DOg117900erR8yt1t2Lr9/Y2PbR2a2DGy9ePXx1/nR1fbaz2wX//Buzd75/eHl/c9yjU12Lb4QaIbHoQUSwgEDFqESkOD9Em+RhZAaCewYOSSVtARkizLqOtdbFmlYPmwcf+3df/t0fffzTxbz98J/cze/S9atdPb78zfGzsx/85M2DqxTk4df3rl47rEaVt8f7dahGB8Di6elXka/NZSbRBgoTLGbWj9FVsig1jo2foh2hta5dN0HO2r39UQvE6DyXyc21QFUgOPQF9Ra6SgR+lUW5I7VJ3j4U2Nb1LEgQSN9Ha6GqMfQAr1Ynwg8FzyRsouioOiStNt2CsQYvRBbWQiR43zJc5aZN/MYaFu7UklCJwk2nrxdmf3d3L/ROVep6fyZS2F2DWR+al148suWx0AnifX9Zjh+vmqb1MTw9cc69HMPq6RNf15PPPv3iVx8vx3X5+us3dvcmvkv6hxgl9L53rjSWo/RAqVEzsiuSwCvDJkoUFQWste2mrevamKQCyvMsEavGJDxIu9Nh36eikjXByKBxYuXkOyQphoaNlckPbxyEg6nG21T1JIq1ZjtDi8KQqlI2m01+win8INXN5O1HFgRJGzgIJFm1Uw42ADBYDQyuOTgvx9st7rASzCZJCaSjAfJOI2q+VaHJLSi9v8EhUYGkOky3G5KLniIoISmf1ZBQGmTSLQpKM3IkYwNgowV5j3BQmx/+7NGqrUeTNvQcjSqvEAqsxlIHMLNFbwFGkXyqxC13po1DYKPkrIlziVfrJ//rn11zLLebvlsRWRdhe0EANzpd03QZJgtM15jwnHQBXVJxprIm3QBrzdXXEyQAXsSL9MxdgmqJE/gllIyx8medo51SqgZIFIagyZolPzKhVzCzVxTMgaSUoPAq3hof0Wqz0Kuz4qdfPT31V2rSGLsNuMZkhNUakxEM8eVRgeBjr3BUtv3qsLp+bK+dURlweGana53MZRbXjDV0DV2qaYAVpBFpAIA9Q5Q4MgIziQrRoCOXPPamHkIRtnA6QKqx76UsS2i2ehx4UWTSrZf2I8RQRI0JlQeDk6XlVuqtAKEwJqrGFIKb6IFQRKHBz3LAigkqQZG+KeUlcIKT049eQWCT7Dt0eCwzXziLgrJuUIlSUHaShg1NAdKuHjmRlzgRnlVkm0qSIKu8uqZzUT40G9RlRS+nx16TfFljzEsba1mFYEKMxBxCMIOqWLMQP/f4KRpDVJJVHLElZVVOHY6CjLGJ0gtmNkxcAIWkvGkIGGSTlIVRiB3r/XuPNqbd2dsJpp/ibGLWozivsZ7JakIrUhVYgQ1iPdvKajJ3EJZIxE5lI1oqWWtZxYgWiEVUmwZyiISm78zIwaIDIpuFLf3T4+cf/e3mF7c3b39vffRGtXP1d7/zffgWXfOv3n9nPd+vGn/kmsd3P3mDH88uvfTyK9eirP/iu38w3d3tg9nM6vknH7fg2Uvvr+WZXnoOJ70QODJ5jZYZBOklsKZl78bmAE0KMUQhwwyJIcXkJoojawR6hlERYTEkvgvXn78xtf9cTtWfPRjN6PvvvfnJ3c96tt98NIevL12lw+v7rTxpL+3uupf00fyLk3r/0uH+/uX77QM/71t/APDBtbLzk9isdu1qFps6tLs43bEr5jCRdsbNWFojPTX6ydeyK4e7E3i7KKTuGSLUolpobxEs96SB0FsNFlNRXY53BCw+mkqtUjAGTEVSralqYZ2oGjOTqCHsGYu+X1Vm3HkfuvX8NCyXK+/bcb0TegbFyXR05/bnh1eu37t7u+uCc6Pet9dfvPnqq7MHD++NqnrTbs4WJ0B1/YWXROx6s/a9n5SzpyfHQZu6bhWxKkfW8O6BBXBy+qv9w8nIHn19/5eueO7pk+ax+r5fXb58NKqr6WwyqmdGSuKWmWMKskZ0RdnHJjkZD8st3Z6CKNL3IY2AfYiqYoxVEQzbobTxGaiSNLRXMjTmidqUA9IyWCmJwGzSDKGJwUP5ryXNEqUEX9UYAm0vpLxnEkAlRmYWwbA5S7eQBhHLAjDniZxBMhB/cof9/wf3De0+Bh4JaIBxgAFEzpZ+AyangwNf/lspQkkSHJLu67TXTuTSVK23oUdpg56Kt003vSZiTSyAzpDVGGBFA48r9/Bp88MH7aSuKtsDxdovVQq1tF+ExcoGUSCSAGAxAhJIVNEwrVezuo3THSvx7OT771x6EA56vyqYo+EoEHAQCmoamq6kbmga1iTriCV4Y3hNuoRZQddQT+QJAdAgFIh6Q8LZdUEVUSWmYTHF6RBn5vp2QygZZU0hT0n2JWnCTx+oqodYUEfBkWdsIAbK6mDDSfebXy2M2ztdw9ndzpS1bkY8OY1xt9yI7lNnDUkvVNpRh8108u5DPVpxJdidy6TVCc5EFsBasCFaE22gDailvAPuAfi0OQN61YEUpknsK8OMl7qurUsNEbExHGNMj2he36qmLK4txDI8SwPkrBoR0zBLQ7yRMTY/xlmjpHnSZY4Ss5mWAJzCKJlFDdNgDZscMCioqqjhrd0j0vcY+ASSDkvmG4pqrlrMyT12kCUMmNE5CTnjTDr8vQs7HIAkaow9K2xRMBtRGU6lJUKMcp7yKwIRIu47bywDcIULIThrZZDJa1RhMLNlVk3bYUkx1JpW3/n4mYS4J4ZJnkbVxKhEliuSQuAUTsQyl0CFWERxZh7mNGGYboL1lFa1LmusJmhmdDbWFUMMZfy/F3bcskM0AgsuGEa10BQjDQN2EAZsSt8bIpcsEwcqNLJVISX3rN7fvfle5Y4W80V8Ou8fbcLTH+3deMWw7Nz5pZl/w761t//2jwMbE3T9m8UPH4/Kyabzq5YiSn75xcMbV9znv/71x//H8oU3vzV9i1oblEKIoMBsxBOg7FkCCEwi8GnhxdYy2EtwBEb0RCwKlYaIoxKzgr1CQNYHmdaTl1862sijZ4+/fLaspPTVperw8t5zZeX5RLyd33tiStcuHs3b+4cv3rxRH3754YcnE7d7dMNX83pmd3cmi7NPX5Z9mV5frZtLpikxH3E3QlvH1YTaUViVgYOQzmnRxrld7O1NxZqee2OYjUyq1jpfUl9pa+EdRyvCUIEIzKKuNRBUbYxJ9IYQeqgyW4lSFEXXh1G9o1Kp9EVB1chax9Xo8IVr12wRDWPTbJarBcgXRbG7e+j9xhYvMBVB16Awm0kTbpGNi9Xi4OBKWc0EQc19yEGIzc5OXdgn1w+exb4msEgHiA/r9VL7PraN1Adl4+Ps0u6ly83R9ZmTV1u/jhFPn548fXo6Go9m092yqqrKGOsAbts1hlUUkw0izhljTQwSYiysjVGqUSUibCz6fpAeZQQpmzTn/VZe7WS6Fm0JoQMqnfQoyMQrwm95euTarEgiRmQ8OVlOcip7tPXeG26DXKoHpCx10AM1OlfVdM0MPJbkAoKBowvJQQn5yCHdtJTykXOa+VCUMxFmkD9m1mn6HzrgjQplMjoU4NzN51QWIEW1qBLbYZhIOmGbF+psLJwIcWYEU8Xt33/whMuZNUIaH651XE4sU/SbU4kkZWgtsxjmaDPYrZ1GEedts+ntZDLvzNUJl9ev3VpRxRsLVTIB3EdlIFDRaB08YwE0Qg2hIW1UGjVr1jV0rWiTVkcEHuQVgXKErND5YlsBEps4b0PaASCqydVLwYpIwgKBilIgEGABIgSQIQqqgYJFx1SAOyMNSofTu2cPvzb2eRMldNNqM61rdI48qwb3RPi5EAxTwvCLIN3Ls3fvxCN2lddZWME2wJqxVLSkDXQh0gAb0kbRAgC8DuTRHuKJA5FkaXjuLmPCgXVQvSN7RDBRsoBWIhXhhBsPdMJ0IrbtHpEOySd5v5umUMkKusyHzp2aDinU21Ch4ZFOEAoRSKIQwRTJgCgbZkoU5PYzPc6Z+TCcLxCpxkHkllYdWRKYzwsApgFuT89oXgLTtje9QHQnsCKm2V8Ge5wUzSlDOkXGrm2ag9NXSU9FyjoaWn9mzmphURl+SAZJhMVpvCaymtOaCMQiIKU4vGZVS4UIE4zCAY7VQUulSqVSqTS6UIxlRgsXFxMsZ7yZYD6hzURPp7ogCGssEA1FVWrZ1lMPFSqg6V1Yooq0z0WXUwEuQAVQAxNgrGZKi2J8htlKpy3Vc2/Xx5sX3z4srn27eTC3H/3nm+9/1xwg+lPMVvvXnuORlY8X/skDltjbvXJ1oifL8sa7s/0X4/SAyt3Vh7+4tzl5EOIrB6XsFdKvTZq4DbOxoQ3OlrH1MIKNSVchvMtzVARTkMjGkggTWoNKNQAthAECCmaJsjDsxtXRVx+dENsn6xMeC0/a6OajfaD2btftHtX1LuzE9Vaezk8nd9sXGvPorGlX1u1e+fXDz5/Rw1nV/l/+h3fqEb5oV4irkppxWEzY12hqXfWt2FUFH93KxBD1ySZSVTCLDZGZDHSk1WzDlaoE5czdVIJX7tQFYFWP0cMmZ5YQPBOIbSLy9n1vC9q0DVFnWEOMIYAtfC/L9QrSbtpVovsXjn3vL126ZAu9ceNVQJaLpULremxdaA5C5fYWq2eGJs2mXTYPoAtruydPHhir680DZ2Y+zK0NrnD11JYje3BlXJYjiWeIa5Vi4xeWDseT8uTRz4OfTMYvHD13hVhWK9/MjwtTM51WlatGhTUOZLvWRxFQCNGGEKy1TNS1vq5Hx8fdaTO/fHiY0CRkSoayYU16isETYyi6qgoQpwVRhiM1+dtlLlMqmbL9vYSv5VyjLVa97chpuD5y/0vEhS1UJQ6saM0EEE5Oc4oAYmbWC4Pz9jb57V+63T1hGBpU82xO+RdrpiVjIHHnWqxpLzbsj9MLFzAJKwyDEy0z2x8k3UfiDSuIrcpwnbLd/vWIQkVVJUbereXnnzy+t9y5NDPzTbxahfeu+F/fndvZTmCYGFQkhMoGy5vkQMACUE0QCmV0e8b0aBaL3/s3R6fLWRujq3ZEgwSwsiRjZyVdA41QS9QYaRQNsvy3EW6YPGun0qmSJ/KqAdoLoohPRsiAaM7mAUFUZeiHhslNkuYl/8x48ItI6HsSdQMsapA2U12JktM7qmt8+OmpbCbVEt6zBCCYUJatLby1rpxYfnnNK0XBQkEigxp349SPdvvKPmMsJSwFDbilbKOT3x2xh/j0ibeE9NZSlFNQDdDMxqLB1lRUEyM4pcarZrqZNbx9kgfZUC5+23/ZPl3Y8gsy9WAYmmNkohyRqVmiJFGhYq3ViwdlyDJJwX55Rzsg1JY5aegHD43Ml05/f+iVh9OQorqEMWgQhg8uT9903kVhW6NZWZMgd9hdMZLLXhqQ0zvP2+l0aQwzOrbkK2NYmaDqvTfGhNAbY6y1Ibl34cKaOVtepkRI0KCBBOW1DhODDBLOxGzZwbJYEYatWCoJNsIaLkUrkUrNzJZxUZM4XdW0Lvv51DRTXc14NcGSJBr0jIA+UfFITFFMrNi0OWdYCAt66G8NvkBBNCKdaZhw69wGrsW0pdmay85WT54+ffLPP3nlpXdHp/dGl0aTa5f9w19dvjLjq4f1ft3euSUTM3vrj3wXeTbltV989Pf1tRfa9lQ//JfTZTxx+6dvv1/fvvV8JRO7Pq7ZhaRatGSJnQgCWatIfIHUQ4l2EiNYmeHAnQ9qGIQSHEQCGCItYEWVhcm4TedfeuXyan3p6OiVf/zxPx9e348WYvHo7CFJpezuP10dXBm1enb40tXju5198bDa5dfs7ItbXzaef+ed9+yR/e57N44qofn98Lh9GkNJYaJNLQunK2yqcVuFTUsdb5qwKzAP/+mkfW3/4B3DrWdhS6QEq5Y3E6uq3JMNsKLcwW1Q9bCdqUJlLdSLcAo8ieJFxLDto0cIxrLGtpdo2ApC8B1ImUPbtb3vmbXtNrOdsbO8Xs/7Xspy48NKgpRl3bXOVaOdHQp9fO5ov/fY2yNrrwXvFb73vg3i29eCD5t20bc4nR9/c2fR+02zecrWu3IE+/To6uTgoPbePeu+evk1nK3u6+ro9he3VuGzvclbL1y7cTI/Kdg9fvSMuHXlaHfnsqvGRdGHYCQIEXsfQADD+75pmv39/RDT1mnYVxJF0XRURbZtPm9r8LlCV5O3RVYf5qEvF+wEv5GI/naUWD5fFwDhHJGbyjcYiiSTSrcPMbMoUpSTqKqKSZbUua3fntr863z3djE4HLr9x1B6KYUHZjnUsCA+Z3JCk7lESqYQpHdrVEnVKDGooGTNk/Lf1aYarMQEQ0QCYRgIIWVXGNuHwBxI2VmaL87+8ZarJlUbgnPybLX8X/7wUmGPf3x3vV+51kcrLrDHxogkLouGEEmN7Q36nqOeOXNzZl/bu3zywFOhoQhsGTHjfBQRoySec65PLagZpsMGoRXeMKIQeYIHApGHRlAAb2POBCqqQpC0AEyjo55f6IkMH9MolYpyekyII7MRCQRR9SBKAlZdFgRDhsKz5stbnrmWU6EJk4e0KiMhV3K5Xo37oPuNqThaC2n69kp10MWDft5wBQ7QFWGtuoFulDxRC9owddAWyIQy6GAgxBSFg0gkCHHK10owjYgGa+yA1tBg6gQoQsxkQyJmNmnJkHQEwort6RgkaxkBJt0+SDqodfLjM1iDpSlUotAQ5JBWGJornSaWYhYW06DnG0CmVN+zEarCsB30b7ncCiTEYI1Fso9ISQyUe4PBOz1/Qc2trRqwJqAIqSOlLXsZOM9okIE1KdAQA+c41MjJEXpwFMkrPNGIaAqbCW6S4QEZmqqtXUmyaM0QAEjUgAyzgVpRQ6keOoaDrSFWuOJixLEKqNhMuMHmyf1bu7at42ZqVpeKdhIb1x7PTDuLc7fxJNCQ/iPqAY1BlSqQY9TJzxJUgHqIRUJwkvqACtAENOPoRguplma8CtVKx2sp1xh5rucBV5u715/86qDaFL/SrltM6Eq8++Xu6y82x7/euX7DuPny4adFu4vpUWnm5U/+b4sQ5sE+qG90v/PWRJa7iyeT0eYgzj3Gy5mjSmyLuPK2LXrbm5aJiSydLwjUFMIiIhKYmGJB3ISgbdNNZ5X61ppUwiBqRGLsFpUZsdpmdbZTzZrj5vmXXzo+W8iZvPPq7y665evfe+Pq0cF/+H//+3CPv/dnf/7zD35gKmumerRfzrm5ezZ/5+bVOx98+IOvPnrzyB6OqytVq+18BysT1rQBNqFt+sudPdsYOy9u8K9q9/Xx8XV7o9k5dOxFSTiQBIGgHHdSrqJCQAE8gptyKcqtVn05sURBIWxSuyFEShxcCUQD0qgBFARdCOT7LgYxLIVlO6l8v6nYGAPR4AorCG27ZmYgLNcnXbfpwkgC9bFxxXQyrovCtq0yLBsDlKORTCYTBhfusgox3QQFABKwXi/73t+7++DsdPXxndtHR5Or1w8++TBM98Y1ja8eHYJvPD25v141Xbu0dbxydd83e23bPnz4FbOr6tnu7mxST7z3IuRDZ60FhJl2d3dCCCnkwBorKgQSEWOGxO3hA0+WcgAoZchw8gLOQHUCjbfePSLJanGwp7qwS95OyQCQL/VMepLMvpChTqfdkiZfwmQDBEA4pYhSbpuJtt33hXo7hCzgXD2U2/whXCGPHOdrL8Ugek59ZtoGIjUs+ZYlVaswqgYwClZYgKGsxKombY0UBpp8w9I6nACocMUIiCJ+NuL//Z9XfXkw47WXAtCey//rXz2bjVFnWvqISCh0AEMrQmSFAUGCVCxBuCs0nPzRe68u7/cEYsfCqhYslCKsNNFtW6XA2oi2IE/Uka5UO0VrZBM1ptLbAz3UM4JhiUiRS6IaFJHSJhhqtkhpLl8DhpmX9JLlWPn/jsMP1xAHSPLWB9AVPfmlOriTk/benVi9ZGUl6kUrcG3gOZLh8VOu2rAsAENWoJBWLl0+whNDc8IU1IEaQ62ihbaKDdADm1R9ARHiHgBpTHZ9RIEBUAQJZ/PklJggAERjIvrmuTbxpEitMUNpHDKoodu3rL8VbJ+e5lxicyWGDNPmMNFSrmRpIZw9s0QzaXB48EFkDFMgDBGZaft8/nhn9v4gUY5JXztIjQlM7Jw7H9+3vLA8KA9g1fnpoHwACFuiBw3fOn/QQ7ucuhCJmZK51SaliT8tqxSJraZgCiF477OIOZ11TQprgnICUUADMgYiWAVlw66Ei8EANpMgLWmpXBupBKWi4uB6Oymmtdt3vsN6Qs0Mq13jx3E5seuRnNmVlwU4ABEUSIW5BwXEgL4MtrRcGTGiNdgBflj9MmCIWOFUavKVO41VQ5MlJgszWdC4o9GmL1uMa3r67HQe7aqe36rnvyopmC+ITVud1vtxZZsvVieP9tpTC16wOTDVU1M+sMWxjGkvjNa/6W/d2R1XJ23z9MndWX0VElZV0boacGoDUdWjj12kVtmyWk1AGytpKyQm8gLoSCh43zWN40Y0ip4AbG115+6v7tz98mD/+snJvbZp2vu6O7u2t3d1c/b15T0dVbszt1zM73XH9tcPP6m65dGZfPH/+PczsMKcXno0uXrw0r/9/mjq73z645s7uN4uDj5/etCHxY39sGuKfiVzkDe2CdyUpw3Z3l3f3N+jX3bt0fHD9uOf/tUf//F/c+na1RA6TXoIBQmVWE/LxIljD+fFeZgaVWcqG+LKWE6sHGvJ+/SBAKTed4pQOCPRe9+pqjEk0msEVAwzG0cEy9wGTyTWqgglvbz3m+VqGSVM6lpFVZtxvWsLJkN99IYRetv3q77zSesBDL4YxhAX1aT81rtvM1MM34GIRCDsz88ePmw+m00PGdW1a9cfP7k/G1958uzzrx7++PLOu7vTm9cPXl2tTudnza3ju6NqdvngYDqbjes6LUEXi9XTpyeXLx/QsI7igXCFCxYBeZilbX3Nwfd5H5wOqugFsG44y4OYmIbSu0WiExScih8NLFACKSSd2yTDSBMKM1lrQpCcWZiSeTStoy4U3W3KzVCDcfHXMOKGOCyS88vii7+fHffB6ZaAIJFMBsw5+TMYhdWY6isTWyILGEKRrlJVZhTDIc5wN5iCaIyxHpcf/ObeV8+mswOWMAJ79gJXeFSnm2BN41Us+QiMTBHE9JHVsxomw+zFizC51en6e2/OrlTTZyfBlQatEhARsnIjZIdRdIidmJ7RQTrRTrlleOhGrALsFUG1h3poUPgoogiESBygQjqYjGbv3vRzpgF0zp/3sDLcmpgmFhsjZyAkVyprmDKfq9dKqh/96iljTCtWB/YqQhJUNwwTKjPHHAGtK1ygIAxZ6Wyy75/2tEAMygHSRLTEPdBCNsL5PSqHtPf1AECRKAAxLUnSMCoac4eRUFAa9GwDAIrskCKJaqhD2FEioKbnPxWvRO/fAtE0hBwMjUpS1xLIJPDmwoOWv5lcoD2KKCMVex4kAMPJAW9ld0Mvm98O8l/JL15VYkotMCZvrJUGj9kkRch8L4UmpVzCetMWSVKxT6FPyqmXyzpgQowqGTo2xESibE2KsuQs30qWdpy2SGwtALYsIhJiOknpp8QgmLSeUKSWKdtjpvUgAyA2KWIRzAQGc5pKxSgXgCVURl0wtalmZZg/iutnExtmvKnCmpvTEvMxFtx5biyWgl4kxWSF7MVilNEqnKJWKgCrMIQSPEbOjykAghYI1jRSeRq1OvGh9jTxKDbMHragZh+dbZ59TYtrFfa7ZYF2hM0otnwS1Bs8vltaXvJsVVXtpavHMbQnj2G7A4e4szPvTqw8rumwvXv7dDR/4ZVHdSAvlwLQVrPYt0rQRFZnYmUIkzAEJCSIEtrgrbQvqf20sCuuwknbiHpozzQK7dmmH9+8/n1moXDFHtmjw9e+efRgb/cScbx3/9lrb22enT6+snuFNm1oTt759httv9y9/pCdF2dlf9a6u+2k2rv0ztH41c2dHx29/Nz18mDxzd/U/tjhmu8UnkJrfWPcyh20m8Pul9f634i0y81zx1+dPvrm08/Kne/9xfOIOR2P0lrd0qjYBC6DcEtlC+fJVhiVurHEbdeGwllEYiZrAYiKEKMoo6qItMZw6AXSE7NSSIWEU56bahBOCUUhCoDC2tIVSr7vLBeRYFQEKl2/6nqeTEqyrut9t1mF0DMiYGzBlp11TFREDYBEb9vWKzyphRZs9Oq1566/fCiR2/b/R9a/Ptl1XfmB4G+tvc8+5z7yZiKRBEAAJMGHKIqiHkVLKpWqVA+7LNfDdle33R12z0zMRMf8PzMR82VmYj5MdHQ4OtzT5bK7qizbKrkeLImlkiiKRVEU3wRBEEwAicybN+89Z5+915oPa++bkJshBR8AMm/ee/Zea/3W77H59PCjw8ND9rra3Hnqxldv3Xo8OLp558+awyuPXf3i3oKuPXpwtunv3T+8d/8ulA4ODvYu7JuI0I6Ec5RTtkgg8+sAHjbiOK9iUoKJyMI6CxpZO9W6Kt5CalId9cqFveVamYSfqncBkcW+bWVOWiBoR1BNKTH5869R7hUrkaCCX9fryf5WQLayvaylWcv4Uhyq1dXuwMo/AJS1rqUhOVVix/ZjK3zNZgnOexVWZUIAGOqVGPBQBnnz7SGyJtr4OdILT9ivV+u/fkv2dx/ph9Mxh0BN4t5nEU6QIWcPTpkiZd1bXFyv+6OYWHbGDVJGF1ybaDiTi637xlOPP/hobHzjNKUGxjKHQFXYJJ1ZKTNFIMMyODFAR6WByJgaiEQJlECJkFREJIITlUFFKzQvAOS82ylM3oK/MteZEVUhyyq2QrYAnETMImumIJKEhODzGjc/6tkv6JilSa5h9MLBU8DImyasNYXMIzNJImoYG3R7k7xJbk0qpJGpFx0kRfAIbCBJMAIpKZLCvK3BNMICF5FURzJP8aLDoToaiqnXSkw9FU6SqCLnLXJLVc7+8BNlCbdFUWvEwv+dsWNK2fnzcGuU0lkPFDOxrXzBXBpAOxL2jQqxkR/6o2XeLi+lbgbL35xzKiKSk4iveZx1gFdRzSmreVMSnR8YgUHqlQpNID5XJsJ+PkIhOxTxoVbEyznOqoA65pwzzIAvC4HGnKDqvZeURZTZe09ETusag5nMtkbELEcs3MOVCCZyTE7hRRmuLGU5uOwSAlzwNGWa4vjTmz/4T/9WlrcvTIdpOt3ls12sF7Jq1kmWwBHwAFrczUHJsg1EBeSRQ5YgPHUcWEh4xhDQDqhDDK2Ak6KXbkWzJfbWfmfl56fj9Ay7GwTWcVfPZrSeNat9OTnjFRAbymNs+n6uic5yt3I7KbTrnXnfdtHP06dvyNHZdO5Tc5eOfujTzn3c9XHw95b3+yfu/zzRrJPdnNAu4/CAp3HlcCp0gnwk8bDv70esSM4gg4axi6sUnLu0t/jok9FxFDkzQxRPTZIjduFrv/R7H908GeNJN9nbn137u1feuLh/7Y1Xb7fd7OrVz73zg3euXX0+SXAicnzv0xPsPf6ll/7zv7l4CfKIn+WzOINsPtiLJ88cPHX1+pPLw5/g2vTz1z6z/vilLHqyuRZX0Z8N++uT3eHehc2tnXyaT1k3z7x3sz96521Obr3keC+CBKnUBhIjpep8sRzBPbpe26hh4oZOe6+5hyaI941POaWU264VySqAZrFyKMJOhASUGhARi2ZVdVx0hYIcvGNwHJPznpmZp9A4jpmcHSBmaJK4WqXFwsXNKsZBReAcIMOg4iWrC0EJ0Axy4hlAq6AsY8q6Xo92RL3zj117hhRjSqLSj6udPQzDeu/SmvtwdO94Ex98fPvj55975tLBJUBF8/H9o49vvcPUIwsBnv2YU4ZCEoHJOSHxttdio59w4dVQcsLMbCQjgopkESGGwrHZUJNznnOOrAz1qud2tcSFxWlU6XoBmFMmV8IkAdiqLVWhpmvZphRWryADWQ1Ys1Nb9I51NKhkZgW21r3l6tMKfRsKJ1lVs/fOkTOmGUrSLTRDMxOxGi2SAxAsjpTgiYKAoI4RCF4JvoEEoBFRkHcDcsssnOGgA8+m8sev3Fnvz3dy3HGzS92aor59D810Oq5Pk5tBN5wgDLC7szxRbTvPkjY7frpeC2JIU8Lq+J/8/lNx6VKk4IeRHRXjRIYi5QSBY0cgjcqZJQlGbdBoL5qSWvAC90A0qJlUwIlYFLYozdBUzK8IakIoK8TF8Pd8j1+Afi2IJsiS6l0pEhrZMjKJRY1t2rNvPjk8vL1Oe1OXliN3EAcZhBuK3LjuVrMT02loPSeXnSZRdanpjieZPY0bnwISNAoSXCSjb+sIIBIiWFQHzT0AoQyMzDlhhAo7M0IxeEbKE2XEX4J3rrLoVUW5Zv2g8pi2jpslDRBUcJ+yU2WW8jQasaBUTLadKJklM4pBunEcpPzS1qXSYTsrF1sYKKBUch4riq21LbIp2TKcQOYgQiDPXkUSki1cXN00s2OIAYFb0dE5bGQuWbbrLuZcVrbpYfZWOVeiYGc2GiYQLl5pPjQlmhOSU2JoSjmJWvJpShEI3jsQQA4CFS6h38RSdj8MVUlS8DJiteengTpQcORHbpinXkLiiQzcf+dP/iisbj865Ymup7SZYT3X026IsgSdMI4FS9DAEEWEpoQEyqw5U+fIMwVCBFrhhpFL4KICvunPeOeUd9cSVpivsTiT+RmmS0zPaJIEl/h4R0/mdH9f1rt6P8Xjn/K+m1/BzkJOETfiVSBN5yeyidg4HN+SO3c8PNbghbr4/vxgX+fT1PencT1100l75cP7fHqYPj1bn6ZOdtpTt3d6P+IIfMLXpo8+/tj+g4+O/aRpqV3dXyKtqJfbt94aVi6OTRybLKNvmLDmJj5y4fpy+f5k98EOXX/jjVfWw71nnnlqvV4N6c1H9p513cdPPPHEmI7PVvH2/ZMXvvKlj48+xPw7z31BxXnZFQ5tF+CvhT3+SJY/37n87JOTJ6bL93NcT45Dv/7J5eHNgzTZobWmE9ePutHYN3yy9/aH09vvfJj7NXeLJy99IX7qlXKrxM72Q7MccmgSWkybvsfpSkJHaMau46nPOarKEJNIA6bGNyV0E8a6YOeM+Gc+UykOEcg2SYpkQLlpmF1KKdsylcp9DoLznhyPcQyOU8qhCyml5fEJziNtidns8DWlpCLMjomYPIjMjaewDkGjZOviY4ymCgghdO1iNlkQcUrX1uvhbH28N320mbjvv/yjL33py1euPXa2Wu3tX9/bu358cvfo7PDk5vLi3v58Og0+kPoxp3EcAIqUPBElBTGYhBIkezTZxiECoCnZlsubKQcysUIkqjrvWoiKjqIFeC6LWhsYdIsRSwW6C0BXPKKr4LhcLNu+n0iLNsjwNFdHCdTfWC7AyqTRh9e8KMi5bgtz+aM2alTlFJG1DgyAHStY4Zga0UZzA2oddwpHZg3PrKzwltGivecu+BSysJJo43zx98n5wq68c+vwp+tusR+Gfp3TeHeEDI0gDHEUdCqZNDqnpMuk7Jq2iylKz+yP0zKE0Aif3Ox/5+t7j0wnZ0cxsI/cBqQMp964a1sZswJAghbYjVK23NDILCKxInHKlOzzIAgxXHk/SDWV0UiBylkFPB7e+uFhvRmbhKuk/tjcZfpayuWdpiSjzhfh50dLiIKjZpYNUUMOARoh1M2PdBnYQeCFiaEE8Zmavh2TNEJMQBYMhAREaFJN6TytTROQyFnTIEZ4BtRCBEAixVwTxW2GapgztrwpGIojKpXvTSpaQ/ZMegti9sySJUku9dTaSuMZlTUw6iLFNDllR2vvcfHtUNVswzeXRqd6T6K8HiMel8i/optCKfzO2a5XtCae2TxOTA5ea/hEPTtAQZwr6v4QHMXOVIJqj721vwUI04fUAPbSKncKei5isgamrCTKVomYSVXGUZi5CUFt2GUy3bAh4aJC5JkZYBGCgtnZEkCVHXOGEJg8JRmZiTySRA4MR9//87+VfrUzn3A+dUgeiZFYTT5Vugs2knM0SxUCq9FsBcLsyBNYzXpLWEryoQdIs1IG1pgu+cIyT1ey06NbyjS66R6OO/RTOmsldnw2QXJjGM8ejPfj9MJTPLnoBkGfNaEfYqIUHLhX9AmUiVvuPRr18+t7ly7l+w/c/Y/j/Z9ff/r6N/7BVz86Go97d7jhw767dTa/vZ/S3dR/0p8+uJ9jnyhN2+l0Z3b98ef8yPFBDOov7k+aFm1Lfb8+Ob6X0pFvdH02Lk8/mYaDfpM+/4Uvg3Dn8N58Pnvi8b93evYgHf98ffzJlSufg4LEvfnGa5cev/rxzdt+lzGJyMQe3Q7HlfOL4cJ0dXb8N7uTqz6u0+Hh41dudAuW+A7u3cU9jBNJPSCz++vLd98OH7zznspKEb72xd989ODqsj8JTZNP/XQ2nXV+oEFHpB40iPeJs3be9dIETh4bn1LxtcmS29A59gr1JgZUy7WCSCLxdua6rlOVrNmsHCWLSBZJYBd8SCnWyHrPjlNOksR7z+amqnDMChHREEIckx0M75jZVVkdRAU5sW9EoTlZHAK2G1EATI1riDhJSmnN7Blg7qZTN5lNcs6PP7H36COPH9775O6nhyenNyf7r+7OD9hdbYa909V69WDdtdPdvW5nMZ10e8FPBDGLiDFTSDwYCaJNJmYXRcrlVaJSRezfJGfv/SR0m6FPeRRF8cYCtvtgrdZZ1efE7vKyMC5hTTCbgXLCt3SrrZSiMDPt2mGFWtxg2VhvB+WHYb/zIkzYcrsBlOa7rqa0anzLF4FX9ib8VQ3QINkzt0QTRRAGAigQe2hD6hUN4CFBAlNyAtLMJb1A2Oc8/tX7afcp361SZt6sc58dNdl1Lg0JmLBGplbJk6aGJKWztcycH0mEPUPwYLn6B1++8OXnHj29L0xeXCXHct6G87BJuO2SFkgUIJNm1VE1A1EhOfce2UhJCoEmMsVRCfEV1awKIjHf/DKsWTPD4CLgVlRN68PXc327VSUrDE0d66TMwhDp37/zIIR90Q0pO7ToIZG1bRwfU78U8UQxk2cvmkihDRq3bsY+sWdmlagkhASIkCawpSxkQMQMvMoAmu1nAZkLtNr2zLjIZo+SpeIm9mMQHEo6rRU2038Ydcl+tiQCC8FWVMtlk80UDfV5pbJ3ht3Wk1VEzMqNHWnedp+13NojaRPw+TNaVu4Z4rZljyhL1izKZgjMXFoKAQDZusCWl274eXHjKpgQofpBi5hLa6rfVFhrA7GFkQrQXT9hUWxDorUYXpZYrPNewgS89dmkghCKCisTOYXtnupcLgI4Aik7mAJCAOQMhS+7LR9cYiUSH9BMm5df/ek7b//08bkb4xk5JQizsgggEKHCXWMoMUjZuGoEIZEsksmRZFswFnoKOXOxABgJTsCZGiHOcL2brTFfS9vzpNM+oA8YvAxz7hsdfB60J4zE/ToNn2AK9buUPM4iOfFe0xAZE+KZplORmPreO+T7J67dm853w5yOlzJm3fXMly9ekvaq7NxL8+vSHeWd5bEbbvdnN8+Ge0M/7U8PVx/evvXhcId77nI3LPsH9+88+uj+wcHupUsXGs9Nt3Nhd3f/Ipx/QiSpOslOMGrmYQB7lsT9RpxrxjHFvfZRT6t0Ml/sSv+P1+OKJxTjoKvx3p0zXsoxn9yPn+6On3w8+/CpsHyBce+N1/YvXY3yNVndQbqHlfQns6Nbs9sfLNcfvO+V1jE9/dlfevozz5ze+WB6cCmxOhd+9MNXTvnOr3zrKzuLvdDNB4ZrEssgeZVkwj6JimemlJL3DQE5RXBi5xVwzouoc5Q1qYCJU06q2vgGgKZBy04EzOza1vmGiSjCMVQ0STY9nO0gu9A6587O1jFG3/jpZJJz9oKcE7MPIdjLKAYxIBHknEUKz6UOjVq9LDSj6IG8n4gMYJdkjRzYOWbJI9q2feLxJyXjynAxjpeG8YNufitN3mx391K81Pfp8PDs8M5p1x3PF+18MZnPdhr2cKTIQxqZHXswRlW26iuiTJ4YqpkJ/ZgXO+369Pj2raOr154QqFK2U2SYs2KrGC1HuIQ6lF78nHlFWqZm+89cBblFy08EkHM1clSIWQDejrwlhsguPzmvtRX602J4VKZw8x4R4oZMHVK8NawSO6gHGoBVG1HPPAFa1aAdxAsFQgcKRAEUIA5NgPgcWVNr/hN5ujONGOct/eDVT47mWDAPObOHg4KzjAQZCaJjoFHAbdLkMResPbX78/XReoowIrayufcHX7n4wmcfOVrfZzRAAMB5E9Vslsquvb4P9i/JO4hGkQwdiZJoAhQ0KrJqgklgkUmTCcegafshKZlU2zI7z6NptdIASqDe1sbQ3nbodtiyxxRFaC2i2Tc+DqeHyzF0KlgTBVXxvmVPm820ae/pWdLeKzOcwAllN+ZhHlqcOV2PFJiZ00apEKwESBbkZMEPbFN/MfMShYCKmIqp2jxohtERivckbx/LLSNA1Z6xCghYmVCFwptFrSJnUQgpC5s06BdIgVo07PTQ/thgaNaSj4CqiVPVwmCi6v4mFdPnSnBwzCpKipSz6ZScY1Bx7QBzTuemIjln856zJrZo3/lhtKmsgYouCOVwqW51ClR22mUfDBPN16ertrpWpc9/dAsPozoeg5gcEdnDJ5mZrXswUFpVU05EzrEX9QCV+FQpPEjzrzU8vgapaBO86+iv//LHf/vO9/YPAnQzYfU6eoYTZdKGktn0QIwQRppZxSwFGGTEPOVAymWHkJOQrymiXPET8kKsykok5CO6NeYsMkGcIHYYpjwE3QSKGhVrdanTmOLxfZ8H3n1KfKA4KBry5INAna5d6qPGjg8uSWasZs1RyG3D84ODS2E+QbP+ZMI7GXP2sWt4Vzy3zk33pYMuBMcqxyrLPJ7l8UGflhlndHp3eXbpYhxO7x4e3rl9kzjtLNr5Ttd1wXmaTNr5dCJC5I351sToY5TQXIDqtN2Z7Uwy+8vzG9H3V/YazHKz09K+9/uaD5jn2i42s3zngj6YxyPf39w5fCu89yCyHN6P3fJqv9x78PHRg1ur/oP3sFqDsVnH/Us3vvq131h/cnd1+IlGzG/cuHd09Nor3+O99Wy/czcPD54NBzdu+HwQdl7Y6Vp2fr3mKbPPIs77LKMoA05Z4npjzAFAnPM5RyWxSANijnGwS4+Z1RzDCs9HlRCaYBxhT42qhrZ1oJSSiIqMIQTnOKus+w2Du65lntoR6vu+j0NomknXOedTSuMYxzE1jSeilLJzTiv0pIDzbEbnKan3XoWZHSuP6UzIe0c5p5xZwU3XtpPPz/ILSeImHia5E8LtyfRuWjQx7m76nXv3p8dHm3a6nE6bndl0vrM37aYClTRadmqSzEzsISKOiUBDjFcvLd58482XX35ZEr74pfVzX3hhtRqdb6mSpre9e72WC1yMh2BM1W0MqvXvUndUVQlCJhepdFOi0vdsIVEbta2slin2F4ez7RatLKZNcMYqmcgTuaJ1IBYhwEMcswe8aEPoiKfkgrSCDtwwJuCW0ao2Si04kDriRrIDN1mSTBez4+O7KUb42Q/vLSf7V4bNOql3bfYeibM7YwmiZrGZGWCGZ4bGuLfTHSzip+tlPsuTZviXv/vc/v7e0dnSk4DbTIOKemYQszgzMULpLKjWgAyBagISe4KODklVmJPpc2r6r1lcCVSqlZPqQz7fQB0FK1AArQ4rdP676o27/RxgdcMaRhERjRPyJycP+oTWScyRSUBeXPLOQ1M3Oe4m3K+iTSkMJrBGmc/ndMa6yR4saVBNoKwqTLnolVGKMQFKmajEIZENwVYXrKhpxSVhCdmUy5BbqseW/H/+g2mJ07Y3lV3x07CcXaMccHXteLgBsodz24xs1UxFWXC+1rXO1AyzuOgLqBwaVaBYoFjqp020AtjeWcqLzWK59gYdi4h5faAeMaUSB147qdJdoWQDl5KL4h9fdsNc0KVqkFb3tOenaZtlUpAnxda5urYu5SeQ0n9YOLH5AuWcJalzjgiuhJyWtwpgghdbyTIRKRxEpGEW6F9990d/98EPZlcCSc9IisGTNJIapEDZkVY0ByQkSTjXlYAIWffoGarkQZ7FBPwsWcRxUydgr8SJGqEg3IqyqAM3E5xOZd2hn2Do0Acd5m4lPfFAtIm5JyRPw0pWb+bJo5O9S+InMUafoZo7ni8W1D3KaRK43fEOYfM2uDs729nofMU+Xz7Zm6kXFuUUnQADyIF6N3OBxKt45Sk5cQGzzJJZps2UDq6EIENcjnGtskoprfujOAza57PVyX3Fhb2FgB2HtgtN2PGNEx0Jfh1X0o+umfWyoc73OHMp5P5MB/GxpT7LAbs+HbjonHTCi7C4eOPpxYFbHb69vH3r9uHts5u3x8Oelgwe2M1G4knovv7Lv72+e9S/8/N27wBdA80/+Ku/TusHj1x99P7h7f3pweqTvXdv/uTg2Ys7T4RXbt7xu1+48Mw34+a+L7ZrpCCJKXlmQGMcvHfMlNIgmpmQUiQiFXLMglyOmiV7VekGwylJssUwMREZjme5JbXSGHUGnW+YXc4pZzvq2jjXeH+ukyHy3lkroKrMLo59yuK9Y3Y5Z4gwM3MWcfaEi0gI7TiMSaPzQTSpapSGdKMUXfCzcKXVx0dJSdcx3kvpk93F0Tp+erYO/dlOjNO7d4+mk3vTmdvfX3g37cLCjnESSSkxu3HMTLiwt3jllVdf/fEr//yf/Td/+Id/tP/IQU7cNG3W6NDUo4l6F5zjdOWmqG245TdsbwdbOtagJDV0uvBtoUWjyTVh3RQdWkJZpWSVP1x4qVRthdqlU2AnI7EUI3g1ySsRkVdtgEbFgzxzq+jIBXSwQiyN8IQxAQLQAhOgIW0oO+W2VfShC8Pq/g//8H+MOS4/84+6gyfTapw0nqd8dtI/uhdufiDUQUroqRDAQ/AkktW17ZFu7t7xw4a/dg2/8Ss3snPHJ/dcE4S85uQ5KCQNIzsPZgPzQCVhphRKpJRHZq2GI5nLOjYREljqlFMWpfXSItVcCDiKqjUz2/5tP/RwmdlqWG3Lb59lrcmF61TyABynwwcnEdJxTzmrjkxtypDUAcmFIxIAPZx9TUfsVM92Og+cQpZNE0RBbDizrTTE9Mow6y5JJcsOQC3PREIFhpUiQdM6k2I7uJLtWesO46H/236DAGtSspiJ0/ljpYCZYGgdomE7YSv5qBt0qp6O5JiLtZgqsRG+bDhkqFBF9rdFX+tmlYgaDqMk7z2IVe20q6hJBKUEX1a1mOnwC8FYoDmLMrZhv8WrVe3IGfHdqp+1vyIAZSKjTTtArXd+aClMxSqNSnJjTf8tbYq1YsLqyNypt29IWULVTEYVSUDDngkMOBWnKuwbgFMaPTtyIM9N47/z59/7u1s/XlydZhImdi5yEtZMlBjCEIgWKXtpMtnARMn2X6SYf5ffSWACQ6BgTVLkfGCf0ABOwGI1mYhUJxzb3Le6bmndoZ9x5DP2kZGz9Oqzh/g8iMY1P7jj7p10+7PptBlTGuMwlZOpX6WPhyFmdKHpuNvz88Wi40/73Vn0n/XrvcVuGNPxPAvQRMSQVlM/j7PFuMoawB1LUu5cjuK61iVrcSRjHcLMe04jhdBPp5fAOcZBcp9S7GPvXUcYj48iOPkmECXnibEjqR96zb3wTFye5HEtHh2HNB14Fvzajx0iXNS8SkMn+dinqW8uXXo035i+ff+t5nLcrJYnwzH1U6GeNLz41d+bt+0nr/+kc769cnF66dEf/M0PDm+9Nr0Ulsuje31ayepRf725+tjF6ZP3H/zx81+il19rlrcf+2iVPCErQdVsYClnuKKoywDV0FwR0YJEGWEBykRiakxR0zGY5TCbF3XZOalv3DimWn21DWGU7L1Hyd+2I8YhBCJyzqNCRiGEegDJavl8PnfOE3PJUEsiklPKcYx2OAVujBpap+DCUwUzItQ7aiQ6xfoMm9BwQBOm18k9IRLnwyl2zpabQ48O2t7+9NO79+XeyfFisdf6ezvT2Xy+E3zr/VREptNJHMZXfvQ3y9MH/+Jf/vd/8iff3r989erj15enayixC7Whpf+y9Na7r3Bh6kBKjrWYAgIl9FC0WP2j3G1k1EzVGnNWCkDdnhHJ9n74hb8VDth2auP6QrjkDBrlV5ngiT0okDrVAPVEE0IrjWACnjGmQAuZCneMKdCRdtCg8Np0jhxJ56dT97f/9ofdlekS0+NHL/vxdJzkdMYYkCTcXo60r7JWXkkZf5QhLFEVyedmzNTGB//1Lx989tn9B8uYUt81LVQjg32vOTEF7zTlTRbvmARkBoEPDf3Zs8J8hyTZ+2gZK5oF0BKyXAbfrY/3FlMuhZSLKwMEpEJF+8sAzHNSt59n/b5SpqRCSJdyOAjscXQ2CnvR6JynnDH26r3o6MP9hLtpHUAq8AyvMoJHaL/bzVU2TEPjARXSpCq6LbQlKkoBgWaFWDQ82P6hdAfMDmWno0wkYsR+3e47YRpgouoIrqWEaCnB1n5INjNoSxuymc4S4YxGWThKBHOlgSKLZKP4EbFVbFE1W/JSAgt3CtBk1Kz6Tup27UKOoUSixGBlQiGTgUyRXNcOBGNfkNrK2WiUZDsfs38U0Ux5u2soTFG1wlrY3zYu149QiqyZLRZQt2aW278IrKz/5YETLUszZkjhORfPEyIRJTjnfUW8iLmoFsy6E+RFBCztpB0lMXOYuJde/tFrb/1o8cR8VCiYdE1CnhPLyGK0+LTFdMpmWYFMWht9+xGVmJwIC0iZnLCqCIEEGVCQChjswE7F2+8ToEFsEAMNHfUBMaAPWGOATz6NEKGgPsesg3fjhCPOTpehP512OqyPV4MscwcZWQOaDp0IxzuBfSfR99P5Bp+cPv4IuivPzhmagvCmw2rH+42cdbyRSScbwQDuIUjcutwnQBkOLJKEvTL71rWqmZlSiuxCHCS07CN57wCvmtMoYzSLYk+SPXcCylkEommtG24WYXiwZo9M3VQxnfueUoRG1l6zSl7ntMPuypV9evbZ/sJjt/OHy/tvBMh6iM995tcef/Kp+++93SG31x+dPvrYm2+888aPX3KzDN25cun6mZw8OLkbP3hnEffXj+5d+9yvvfnh+nOPffUvfvrK5ed/2wtG73xK4hwzs2ZJafTee+9zjsww+xcPWHiJZyeSwcxMmhJVwoUVBecoSZas3jsIHHvLPxARgnp2KSXnOKfsHBs50MHjfOVp/+C2xwZbPqpp+RRZCpmJBMzsnaaElJLzDoBvnW86KGtKKZMLDPWifRohGn3jPXuRlHUkTZwcO/h2Idg/mDyx2SybgMe77nR1b7M+2xx/crTWs90Lqp+0XQeCZFktTw8PD6H0+GNX/92/++Nr165/9WtfXy3XNg1Q1opFUr2jDV5jsPkbnP+iqYHtP1Ex4DNnC9LzsYO3HMuyJ9aHzzvKCst25aUuFKhbz/vxSpe2q8tYoDZnWNaCKXrhAa9oBJ7QAQFTRSfUMe8gT4QmRHOiCXQCnSsFoo4okPjY5bg7i++9+ebN43f4xsG9Iyz2pB/ErTk54V59gxgcr2Jm3yoxU88iQhDOkhrulsfyxPz4n/7OZ/2u3F72LQszpTwKpTAGdZxdyppsmIGLrA0A1VSRQJtySnFliBIcmT8zFCZer1ITG0NIARaREnKBQkwrliTbt4+Jmcn8sar/yvb3P/SXFLsj4rK6o6yqDaflau39AuiHnruG2YdEWbL6cJ84O6+KHLM0nODhmRlpGlrknnnw7ASiMtRiqVRaASlAek6w2gwDTA0yMf9nex4SKbbMqi1br/KMKtlIUayjzBh5C1MTee+K3Ie9Hb5qzFq+pkCQC7hr1z3Oke36jhFEMqpId1twRUoTuIW+7NURIeXEIC4JKyW6m0tCKLaDb/mPlotiubwwLpRo9bMz6Nm+gZSNr9a9LVkKHG/FS8xZJVl8GdvSiR7i20vF/Or7xef4wnZFUfZlAmJ473OSnJP3nuDImM/lPdEsiZi8b40Kx0wgHnNCg9C4l//mb//mjR8tru6kUXRKTcsNT3Q8FucTcwJnYSUITEho21yFJxA0gxtAgGwiSFJHHJy12eTBvlGfORA8hDQKJ24STwUhic/sRZyn1CB55AapRR+0pyRJc94InzWcvEShtVKGxI0MpIr1Og4rT3xhfzosQjy624QwoKGTPs06P+N4fNqz28kh6yfrt77/xo2rlxePyADHvltgKojJc8rp1At3TD3QEYvTEdbXml0uc8jj2tb5FgTJFELTGFo4DW0aBxn9fD4V8XGAgofovQ9JwDJRHn0DaRJ3XhGdnwRPcJwlrU5j6jc+RHYShs3QpTidrHI/CatLl+cxLDDgwa3juydvXb3x7Be+8sLRa+9qf0bXLi4ev377/uGrP/xTdqud3avr5uzowc2rT9+QPJ6ko3h0q083Dq5845lL+8fJf/PK858OU89IhdZraxQTyGPMKdlhECizseSzkmRWZzOxEjlmrdWkonDOvA/FLjy1k+ULOYPIsSicM2uZOo4VN0cAyGV0NiTLQu5rww5kKDE7lSxVre/YN8E5tvRfzSI0sneuYde0tpnW5IHkiEmgkmAzExv3W0gESFGlbYJkN2t3FrPHwEmk7/tlTnRyfNL3fYrJh3D16pWnn3l6TKNIfurZZy9c2HvwYEXMDk6sHdZzjIsdM1ghmiVHURHnPTFLIVkAgMmkH64ilsFmYh6pjve1eaetcSQUTC5JMs8fR0VJTJbcUN6xuh+um7JKX2I18YE6pgYUCI2CVRpGgHjiRhtoAKaEHchUaA7MCXNgAp4Tz8BBOh68DMENHeIknaRbrz4xlzVuL+ZhHG8ew8fOr6cXx2NJnBwLey+rFBM8kR/ZzyUxgGZz6+yXHsO3/tEL/arvl67xGwUoN4rI0ORGgJAdleLGpAyqHD1z/rd6aaOnAiiPNFHKomyJ9DX/1lQoKGzaMvpVnyWgLCZLhGxdWloMMue6vz9vhmziEVVkqCMIE6skYjgHYmELSkvpmcvTO/dTVBZkUuVwjyWp9iDx5InUwQ1pnDVh4bphPG06cBPTkLhhlSSSkbONg4aYECs52spwq3gomxpGJTMr4ERyNjYQwMooLqrGr98uuEl1+9DUrac9nKgPlEjhBxYkpsyRpEzO3kHUIbWMhlzkN1Zrrb4mLeYwcJ7LF95S0CtzqujBqk8IqLjXlQ6pgkuGgps1pScHou06tjpg2jXi6qesdidwmd2LHQ1t/dy3BtFMZoNtTTUzC3JO4rlGoZHC3GVFyrEsYaGoxdiaoAIhMCCSDQ9gdlBSSVDvnIqkHMcsI3uGa6FMyJMuvPKjn778wx/PrrY5qWSlCCSOoYnUJcQknaDL2o45iA9oemkzJ9aNai8QJiIoIykUyKIyUkdoSIOgI2qhrNoxdaydbrQ9y2EFt4LvuRu5G6WNmHSyTipZLSs7iAYhphwb7TLGlEakDpI4Q0Wdekmpa5y4tqfkBz9vsX85SkbM8vQlN1soghwn/ujTfNpDu9ntd4/f+MkHv/zbX93rAEGCjE5WEhsd2vlCRqSzzJ1DAgXlQAikEc47GcX5IIgeUGXihqB5HBlQB2bnmznUqbKqI6c5uSyJkabd3LETbpNP2pgKK7AD2Ie2cVOnU0mhS2Nej5u137mbNrpS+KYNTDMEyOUbu+/9PHTLiy/+2m/qasz+VKdx8cy1FevLr387dqsvfOObF6899vr7P/C7/Z31rYtPH2w2/RrHd88Oj3tdZlnTdOCQ2z1LPDbpnvrGm8A3Z8EWFVKIsBEsmZyK2mqArSs0KkO1hrPOZFtRHJutf2k5TXhTdypaBTaKSjk1o9RqPVNs5Mp1abeDqMWr+ZKTy4C0XeuMkZOqJh6wxQagkrMC7NkVRWX1g5VtxjdBlZiTJCAJhjFCFc6FSXeVOO3uPcLMNouDnQhE4SDDGI9Pzgz9Tjk7dkQ1WqHixoWYqmAmLaPUecEtlwHOaVnlHS07ROWH/6OWbJaC+ZEtAvg8R0kKjmUcDlT+UAWxy9xknTsDQo6IAU9FBtgQeaLGU4uGMYFOgDnpDHkqPAUvOE+FFggLnsrplPogm45Sl8+avN4L+MHhG1d918sktbQ6/ut298VTbTlGt39wvPbDOjYEFzl1iV2DODgKqwFY3f/9r196/oVHTk96VU9NTzl4DEISHbwokB6CmaXahwFQcxHOFjSBYndsBB/L9BXJzMrkhBTm+Afj0wOi9ibj/C/acqkcs+g5Qk1WH4gcXGWvi/GEWJmIqtbUNsrbxxAypllAzkPg5re/vP+vv3NzqQiOoWvv16pRVVQTcQsAmofxbG/yaNNyv46NZ1E4LwoGkmMLTBTVnCWb5tVQkvre2A8BBVSSBdBy/cvOl1huZYXQtQ7CxhIoimY7xcU62d6P7QL0oU2tzZ9a/vlcRgitS8iyYEKBuIBqFW6rYa1ZzLr9gLe6usrVVojJu5iowsJmXSdELCW4Scu2tfa+9rPp9ms/hHBscZItHl7hY+tFhNgaXrKkbzKPNKg5kyjBsVNolqrO14ox1dOF2imTme3BGAUMhgqcgYiqzI3znuCzuaBm0iSikQmTZvLKD1576fXvz/Y6EUjMPJJGjWcjPHXwSZoI16MZ0DToOmy6doOWMCpNCBGGpEHEJmCbEzUAjSAALSGAnKIj7WLyzQC/ET4bZSVp7WnDTY9uEB8K/9Pe1AwIUWTno4xQk1ElqLCGEeywgSBiMo7xkUafvjy0nD98b/j4bjM/wFl0+mlGwzx3ExfVhdk0b4K+8cN3b3zuMwef2TmNE+/mrcaOxinnicowC3zGmoBIGgQNxAl50qhMXjB6dkwBagp4dQolzimmnNjAMO6c8433ObnQqCkpHQdyE7QqXpMbk0t2W+Y+y1q4Y+46P7/YgFncJASXw6a/cz+e7rS9z97vh+e+8fyLL/7ajBebe6n9yiUkavcvv/Tdf7NKN7/yO7//3HNf/ODWRytZPX7toF8dno6rS1cufrhcfnx09mTv8nS+TmElzRnYFws0I8KyE1UVMVt2V8V8D19PKioQIjv6ZaMpkP/deStl1Vwez2UPVlWr0zKqHsFwWq6J3KUylpFSH6Zo1rXx9msa41+Y2XufcpHCEVjVkFkSyX3fB9/YXtn+lGMmppQ0Z5M2NsTGnxFyZq0uQ1orxLGXrOX1cR7z6JxjgMDOORCllLxzoC3BCtvXp9a7u3LjcYkMP7/oUeVIlS8K2z+JpZdTuRe4ig1NbVi5LWq7JpQzX1PjRFEc37diTtQ7aCv8NTGPR7W8U/Ugz2i18WihQTFR6ghz4gWwgMyFFgjTPM/HCz2Z8mbOvZe4cOupj4jH//J3nnvp5Z+8e/P1+fzqgQ8ffPDBpWf+q4+H1Es8k+kw3UOCSFJIPEthNrm/PHvUxT/4589eaHeOPjn1riHe2H1lHyiZ7zIVc7GisqYMVD/D4rG9JcFUkFns4hGihLoHBcz9j5wzFSZtPZ6qPkfV/KChnou0095c47ArabX8LHCFGokWwmQzGm35tKqAygi/Ow2UV5t04f/9n95j0anzcUzMA7tTKDNUEAEkYWbkvLk428u5Fwzem3NoyoAjKJlmPDOTY2NZSU5piyURshlFMcxLF1rpaPbs2WnVnLX2defHkqAPr0i2JQvISY3ra3+Galtoz6Mx0NT4gtjym1FPa+2VFFmEGOCaCAQQE1v6rZ1a3maK1EmVt1/SvpaUG4mIzKRd7acnsoCkglaUT4hQfM1qphlUy05BpHitb9OWHppeS+GnigiakPoX+5LyBlnmlaEE57LDAqfUe4yhQuwdYOwOdc6pehWMMREJUQCEKbBnkdhNpq/9+K3v/vlfzK/OIKAonEgyWEhHTclN5ouGOW3Oxuyi8ggetAnsOCgHSAfqqTjpmblBVrMgVS/qbY4UNFAHDSI+9dL14F78AN+LGxJFjx48CgSUTQGu2fY7pFD1QpnJicBJJiLV6MYxi1N0NKRnH+8/ezXud56xN3OzPOjh8frozikFEHtMGp5pWECafOEZN907vfmzm5ee/FrHfSfcIzY0sKZG4+Bb7pyslQOJU2GwJ3auIH7ijRHMTCJklENnjsoZMFNCyVmiFQLfBOYgJplhBhheffDis2292TMHFo+UYwpNpqnwZiXDtNkJLmlMLPdU05iGxcFcVjPAd8mL7HWz+Y/+9qV7q49+8x//n65cv/5g/eCUj4Z2fWf98f71y3dW96aL6czvfXy0uXXn/u4zz/ToErcjOm8Jesb8TOMIAjsLLBLvPRFyFjAce9uyZUkNexByJYhyYeFTrSr1GBBU1ciHVNqoLbBFxLWwPmSaWN6UOhEaSqiESss4V03gIep/uXQtUk5FBU5LGSJz5ckl6gxVw0NExOwdA8g5Z1Wk0QZUi4aQLESOAQu4Zac2lzuGeRJYG27sMNtL5Vy4Zrblom2TUTZlSgQpXXS5kmAdM1E1q6rHulCWt+urSoEmAGyzLnMFG8xqpy53pbLbbI8OpodyZYpn5UPRhbwVATO8akjE8EIB2gETyFRoSrzDvOC0k7pJ3NHlLpYX+Hgi6zCeLKbcjfd3fP/j1/5/P/jee124cgnr9YPDzz335QWnD+9/+2D+K3dSexA2LoXTaecBik1L/uT+ydeea/7+5z63PgtHdyL7OemKxIMlQVzo0tg35MRQQUpmq1AAX2jdS5QBDKCtdaIR9Klc3yxQQSZ2qgyVLCogxw68jf4FKaMSatTKaoExqabiaKEHPRQIWQuY2oXLRHZJb1lQqkhZL18Kszc2bRjHpKGjYZMIrff3mDcibXGc0gjAIZCmg9lezBvm0TnJCURmgmrYj6qB41Rgd1fyl20HDNVEQsXfkQsITFVGY8mA5WTWNYUBvecJlbWxLcIhIgv2MQzq/Fki+3ZFDiaqRFx0upJruEdZ6EqtQ8XnRMTcw7aZCnUpvB2Fy6dSv5lu52siKuJarQepgNa07ScKbaLcSFanoapGBiOQlA+WHnJN326LSUFWsGtO2jlGQgRR0xwTmQ1x6Q/UQhtJraMzAZZtwInJjmcGDK6o7JZiVynsEuDhKes4me58cPP297/348Xegfo0puzFpClZR0IiFdIwJ0bq2xFhpHbQOGjTUjvpRAQYM6b2qcCSOAz9UVUEokAwtmVQsKIDui5iGjEXmidajNgZaLaWyVq8AuoKCGqV2BDbYuAK8USAk6SaU4NJTNrR+oufj48fsKT5Rzfnn37k37k3cDzl6DpqRBmcPIkMTmNaLf3qPdl/apT4fn/vmcnBxSafBucDYoPYUERQeAgnYu+nIa2TrJAkM1iSGWtT+fTYGylQlZldKMt4L+JEDPywe9s+ySwysgSrucIuUQZUkugITtxwq5NWBH0aNjocp43StAkXOsHUr507o4Z+eusnbb//3NWnLhzsv/vWW4erW7/93/33+3v7sT9T4jzJu9dm0uaBhvklv0ynmF48W8Xj1AR0a2nPNAzkvfc+jdGucue8iHjHKafgG+8dQCAx7q7tYpmDMws6AoEcmCwV9iHZnIEy5Uibb0aBe8UiVLJmV74aAVTsVc8raz2WlbRLxdDWHHjONzei20gye9zBzGAybNCxk5RVhR3vzBfmxSMiTfCOmGw9qOqcdwzNMedxHMXlxjlXIwrKUMts04dqcWZWkcxMKPwdiEhjcaSMX6i+9TLDQ1jneUA4VZ1IWXZhO8sS2R1FBVwVVRXnOIvZLFSLuTrc2MVjb13JdANEhSxC0ZBqFVUmJtFqMA8z6HGkthX22gFB0RE68AwyJUyBHZVFmnX9Lq/29HhXjy7gJMjJlV1Z37/z/T/7w19+cWf64OaBrBfN4fIMl3cmy3f/sptef3bOP7v1v15dfNVf+twbS13Pro69DzM+OTr69S8t/v7nnv709kbjyCFTjLaSUs2eg6REBIFjzgCpAGTm9dnGXSWGGviLIhjSap+AeuWiJmbUSUq1uAFmSdvGxwDKUphsGWggvql+qktopTk/DFmXP2kDuKBy/W07SkxMQ6ILewchHI8gL361VnHis0qzFDP8QIJa5KSMkM75RTeXPIYgofE5JeehueDKFeFUFZN5WqnY1k7lbY9LrAw1++ba69lCR6QOleegS+k2BCLbbrrC/fkcXT4v0kwMVpE6y0IVknKpfNu3xm4ArhxruzQs6wGoppha7wlUZjJte1D7YamwzAqM5GATd8V7BHBcceDqrlngbIN+7djailyVCwurfFoP40+GgpTICmYQl7yz8pvrKKBAFhD5kmZmJGxUX1K7k7auahDJVLjk9otOITkTEfuGifyYEihNZ/NbH915553DX/6Vb1x/+uprH7z66s9fDTtNjpk7lihIpFEPl+teVvugiDBSm3To0QUaQxghI0fCaDcTHnJ7A4GoBTWEAHilADiVifa8SJgnzCPmvUwHbddoB/jErXEPx3KreIITseSIZDZrIBEVYseNX29kl8evvpiuXp7eeb97883TmzePVv1pZOccTzm0TumsaRrOQpgyBh+SpN7fv83N/PSzXz7a2Q+NnwSZBYotxY5SR2nTePECzoaR+ODRiY4QOgcvBMJsdkb2eZlik4zI0TRMFLQmTSGZ9Xc217g8iIpSIBJiYckqUSkSBsR2KmEk0pwkkh8196yOu9kkhHn/zAvX//zfv7RaLh+9dP2Vn//1cy9+fv/G/mq1TFM0octrimtdXA4n49HB5Yu3H0jklhcXuv3ry8gbhEhdlMarSNt2IuI8p5QbHwCE4LlOYFtncxsRiE3BpsaPMtsfR2wZYCjbOGuNC56lhBoagO1FUF2uitXxlqy4PbbnZ2N7lPUX775C8GXdukUzGJ5qWmABLO2uKFaYaNtQGZXlolEVZIACewmO4phUsm8gmhUZ8AQWEITYO4WqiuHSxippfGPVEUDOUpdt2/HWDmHNLyp9BD0U25uJXL026vABlaxUkAkQkdnoGFBox1hEgaSqzjm2zLgSjmZlfOtsb+ZPRY3p2JvmgcgpmMiZ4E+pYTTwjlpgAm0VrUrHvCAsFHMNbdzl1b7c28NyF8e7cu/KPN774I1Xvve9xxbr47fuvf+9e4uFk/vHC4/04LgNOI39bG9+meXmnb9I6/cv4kqix5e0J8ujZx+Z/9av/reHHw2OvXRwvc/GxGGBENSTJpAr21/bZdQ9lJIxhhIA2XoB2udyDrTURwJkvYbWze92G1rDj7RU6ZrhZ5tENWiatz7cpR485GJRoE4ttcMO//YhNla2F9GmcQcLd3sFH+j6rLl1OvSU99waQhkZqgTPJA4ah7gX9jy369hPZx7m1wSCQEkgxU+ZCMpl4CMleQjiMIH1tgbaQFnzhEhURKoIB/WnoKo1VzNrrpiUlCPL1VwGFRqWrEK6hbWYSeDqzGuGU+UNAgCw2uyebXvEREWvVE3M7d4o2LWWkmsfHxOjSHJVBYWuXGdme6fVZvFCvahIEYoSWjVr2RarmASZTBfNBa8zRops37Iq1a0bNNhYUowPyLhuAJByZhJyjhjOWHpVeSgwwJzqxn2LzsB5EslQRywQpGKZHRqP9fLsR3/z6hM3XpjPJ+MGHBunwkJZOEcRr83ISOglNGh68ZFDROjReQy9BM/ttEuIilTqLunWpVQF4MDwQIBNlvAkodlo6Knr0a019Oh6ChFuLUEzyMNQxEwsag4zVMG4c0qNYxLxV3bxtc/7zuWX/0I+ePN03cfUKE/amCJGSYizbjENDbLm1RrShRCwadqZB42f3owfvH34hetXO2DgGBA77RsMDeImeA7edyxrmFENsxOXXWM3sIgMItYEFdN+Kyoi4pntH4hGoAEEktk7p6zqVFUt4oeJhWQQJCASJ+LEMkrfdj3FlY6h2R3Z9yl5UebQcMPT9fXn915YPvjLP/qTD29NNn589ebJiT/97HPPp82YJbULwUlsL+5zrw9i6Kk7jq1OH1lL1/e8RrOEHCXxp6fL2c58Op1mkbYLYxq9c1vQ2J7C6qRh774wO2YoQ0UMeaESaG8r4HO1DNkjVs86qgzfcK3yqzbcbJFZgB+6P+yqqE2lGnGqLLdsEioKSMvrFIsqIhhgDOfYSldWJSXnnNnpGRNbtbw65vp5QIBRQYUhzI0tGEDl8JuJLLPbiiLM56tQTL0rxjr2ercH3G5F8/FRFRNQgM7hu1ot7I6jcm0WG/ktTTPnbLcXak9TbjHr7QtJpNBHTbmVkwU50XasAEBacgZBjuFQ0nwDPMEJB5IgOQh1oAljBp6mmT5YyMlcjvbo5MAdz9PhvsY//uP/Ma2mePRqUkzOwvFJms5kiNp0Ct9Od5dnH/ykpcuPTC+eyf2+Xy/COmP/BPPf+b1/ev800gScmKOiUT/xsmZJiRRwQg4qxDpWsjsRCVHl31qhsXkFUh4T0vrRwwCXyk5itduOSqdENWCKKiZfoehSvhk+i1TxlpEWAPuktp2iCZnZUeG0mfSLiWzXTNXUm5p25+n92cdLaUK3zNwFuCFziKoNpLxcYhLWKHFvugDAGKfBiSQIMiEwR8nOXhpyncmlLmDrVvIcR7ZSUUZyq5X1zauRRrVLKT915fGecxPqL6VsYA8V4c2WfVmiBbYeMKWPoeILoMUDtQ6kttxB6dSpOD2AmIoQiCpL0UxoSPl8sWv3g60asjKTq3ut8qVqv1D7960Gv5zBLKoQ77x1VUVwj/OTdx4HVdf4omrC50KnwNZMS7dAfZZctl/nzpzEjh25YpVDBCVLUto+mbbsUmUBrLQDCN7/1fd/EJruxpPXH5z0m7P+6O59ijKOwhHawCVOg6Y++Y6TbwaabBDXaIHkKXnKHpmYZvMVUAdfLToIc5rRBtRAG9UG8JTQrDE908lSZ2eY9jTteR5pp9f5IB1WSnOKoUvURQ1RuacmchsRQogUsnYOROyDLN1il7/xhRCcvPTdo4/f6x3JopuvZFhu+ul0duMzz9+4/swUTKvjEPHp6Z2bJ5+s4kayYCPd2Gkb7z5YKkDIAla4BC8GjIJDcBiIvVbFOFX3b6PFeIaHZClU1tpxqulmvcjDMZo5p8E7x1wF0wAyNAOZEOGTg6AEn/S6mXQNx6UOkOz9XqPMskJGgiqvnvnS0+/dfPbw3VtTNz3dnL39yd8MfPblF39ZJE3bvXbzCHb39y5/5jjxaby73PBicS36xVrCSea+6XpM/HQ2Xa/X/TDs7u6CYToqu98K/diuG1QxS114KEDMjlgF4zg6tkmFq9lDgVqY3PYsKyrHAqpKosas5jp5KJGFn3Ap1bRdpuD8sq22+BU5I4CoNMDFz3ZrokrEOafKmcI4Jluj1muKlZULfjSSIqcMYSJHAiaWnEGQtEWHbQlpXgPGqFJRMaGk5Fx8V4GSLAbL9y7HkAlqk6vU0cNuovIu0xY7s0bG9tkWkJPFljnk2NZsRWrqHUuWQuIqhsAMVBmI1suV2LEXQcqJELwvngYKBrySI23ABA9uwC1h6jADpqRTyI7sYLmrDxZyvJCjfb/cGQ+vTddyv//Kjb/3n/7kz5Znlz5z7Ym9R9vvvvzTtae9/QV62nB0PV99hvP60ysHOz8+PJqHJ47TWnv83h98a3rt4sl7QzsPeUgamDuWU1XOjkVTLqaYmiQzfL1Ut8MFjKZH9edTsiIrpk+tg5sKyZY7BDEBG9VRieyZEyYj6heg0a5je4qt3mP7KQHnhtAoQLCIEqmS4y3Xl7yQANZsgYHVyFev741v3D/y3kFYpnBHDWs2HwcKWQeItxKyNz0QcWwBI55TiqqwmGmFppRgbaWRtJHtOdo+0NtCpXqeCVCfLyMSO5Pz2NayaNPr/ltstquLNXuzVArv0X5AEJEii6qehybbclchJJW5L3puPH2OahksbPOu3RSFiGzfoUDfYNbq+FZ+kjLM169SOu9SB5m4IlLlAamjMBFKvmhpQMnyK0PjRerIbM1uUWSBqbQbKefSv3N5PsoXLy4FwmapsYWwpVhmKkAsDJYsYCF4GBHMnhyBFtZqyZdhx10Xfv7mzR//6JVf/tXfTLk/vHcIXd66+UFYdOgleyBAe5CXJrAGbBbTllKDsdEezA17FpZMg7SZm/lixcVqxgQB1ggxtRB2EWFUN2hI8D26Jc2X2FthvtSdU1qc4OIaEywFKyBw6rpBpz3Wa52sYf9rJnNPZ0iR4ZMM6nX9/Jcm08XyL757cusIYQ/ct+uT40Dd3/vSb9z4zPOzxd7cBz06zMHPLy0uyVPP8erm+tbSp90b8/ty9M6dN48e3N6Mspm2m+x7NJl8ohCptXCzHMWNxSvE7MokqXOigGRhhvcs6gkJ8GbfIpKzREAZDRHSOBKT8yEne6ZHGxdNCunUA5CUOAE9IwAB8BhDt/HwJAzLj1RtWpEuyiqlsDe78OxXvnLr9rugYTJtNKZbp6/O7118/vOfXxw0ON15++jBlccf03CBdy+sh8PW78VwkPx+lnb0ezywzyJt589O1/fj5uLFK77xsFUtqwDkuMBM3llpdEqCTN4WOpxzCoEWe1NWnK7HZAc+qSOtxJRyeVHZFm+HQ84iZGwIS26xla89Mob0gADlc1ivINW5+mcRSHISyeyc887YR8wlT75cu41LIsyOgBAaIsrZ6qZsD70U9BbEJCmzJ2KfUioWBI5zzia1MpCZt1OtgokkmcjSIauQvQADyNSzM4qLZKnDdEE9FcJMusXuKipoN5QatwsQkZzNvIaIVMgWUQXnz0mraoKFRIugy4gXpV+xSUQkMzcoocMgeCJnSbxQp3DkgAB0JA3QEc987jKm0vlhqssF9Xs43uezLh1em68fvHXn+9/+m0s7e7/0xOM/eenvnp1cl7j4p9/8J3/6H/6klyjq2POwGIZ37+5c8vduvXuhvXS6HH/txX/w9Df/2VHz+IPl2ExDWotrvbiEsnMnkobdBOhVs0KUPRsKzQxlazsMYJGHYb0yghSQU4sGphaH+vCoWFpGZesQUKTqZfqxyYeIRsmmtrWBj83WAaiOJ/YnoOIM4OSCczhVi81mEJx6VQirDjzvuumUkmPJo2jywAgVzB1iVhGmxNKlxme/P70e02o68dz5uLH+QEZmZ3o2hmoec/LkmZmy9a0o8Kmp+JiKNVppNdQV/sUWEK1aWGIjYasNe8UiUcy7lJihlHMmb9pALqXPjq8vLHEUGXahPBRjWNtAWaHS+r259k9a2Itk/E+FkbU1F4pysbww+ygwnX+KBJA6AMijiKhnR4Q8Zst9MdoEavyg1VSbCmyHYZw+8lQAa2wreimwun2nKsnRKNBWoA1vcrbKIK8qCmXyRteyDSNZMoQYRxoELvxnURV4R2q/WUg0s6OU0AV/+/bRKz9648WvfWUymb73/u3V6eqTm4cYR8qTnEQGdT2xI+kha0VLxFgtpoHGlUQhF0KXk+sFUx6i+BGObVSAgOGc6b00cRfVJ3WR20Q+quu1XdLuiS7OsLPk3aXMT1Ora6EVdPAcWaIMLvToIk968WcjTWky9XG+GMPoJabIuPKof+Lx4da9y/cH8d19TtO+P730+Gde/KVf3997LMV+6COH1F09ON5sXr/z0eGDW9Ksu91m/+nLT73w3BevTx+/+9Rbn75+/3Sk2TxyO2gXdbLOPjasI3SQJiONKqPmMTk4YxYSkWYpTy1EkGElRtUxs3NQSC5xrk3wAAkkNMFCuhpPImCBROha4MGBdQBFSC/q4Zjh0U8nIbBDy5lFqI+bGdOe9ypICTuPf3nxzAcfvvPazIekjkP/+s03Ztcfu3D9+oUnv/Lea28erkaZTfL+oyyP/Oyj+8vFO5/56u8MQ9PHLqj3p8tTkfHSpUvrMzk+PpxM55PpAhQdfDG9cSwqmg34lYTkOcioIJc1zXe6fn3ynX//Z+zD177+a+as6ryzpfEWjq5Lpy3AhWrHWuR9SsZuL7u4X7w5iSvT2mbxAtSqqlFSK+HQyldKGVscm2Dc6MpxpawCJsceApEsuTjUmvp5izHmnFUlxpJCISLmx1PZjeWHKd4lqJSwIrfX7c+6ddIw4aAjribDduzNdOMc96szvd0dUgDqMlzVXga1UtfO226QUmUkO+cKG4XY1Nuor8oRK3m1/lutB6+bYDPC8kBQ9VCvNCGeuimv5zJOaTVBCvn06hx33vngtT//wYtfXvzr/+dffPGzz37uySe++799Z28+/6/+2X999cLTt++9vbh4YbVeoVPOPvVpvtOus14+CN/6R7/xXu+HGEEBJNCUEzyHJIkbVvPG0voRk3lcS2G6lf9DaWvshTqzcn0iCj5ayN42yVBFG6xpqlezve2MwkIiw28KwkVZspHXtukLRYamKtuNKbHF4qmNWWCAFM74zw7e7sDGh7RpJQb4lpWhM+ePlQJnb0NKSCJe1zIuuoMu7I2x7zqW5JmyfSxQBqVSWJglpTQm3/jiBlVHVtkizGVbpEDxKdmWFTE117n5Zllvq1VSQXVhJBjHzZ4lUUHSKhckJmeYw/lIi7Jwr/1JebyL6QVtQYaK9xh6vT3pYGLh8yKILYJTDFHwUO8ACw73jiQXEmLK2dsX5vr5UjklD3ViYLLIC/PPoipftnHZMHzd3juFSw9WIkeUy3pcUJOUQN7kYIBhyzazl9lb6hyTx5HYe9eUSkHIlnFMLMJtF2KMf/PyD689dumXv/a1GDmjUZn85+X3jm/fy+OokdAgD0aDB09YN6qM1IWlnzFFEZbNupN2SntR+o4aUU+AXZqOhECOMoCMJlIT0SQ0ESGR77lb6mKJxQrzU1mcpZaWymvQGQSQPinnzaINCD75Bl1H057Tceq7dvDznmNoHGaX+nG66xbPXHpsevP+UZ/Onnr+xa9/6R96msi6B/HOYt7H/pVXfnjz5quZV83CrcZTOfV74fJqd3WZH7/y1JW0379/58G1Ky6KG+FH12buOPkcRUfR5CBa1khi20IBMjsCmOGg0dguKpIlkS9ZlkV6p8UMjL2TNJYVjkQiIhcgrEmlF985OIgXQ7WzZBFx4JWfClOmDOcFvehqHFcL1wzU73bd89/4vffuDp+ePfAuE3ZlXP/gzY9+af+FJfNdub3u5+2kXTvm3SeG9eSlV991V48Prj0bNkk34vcu7G7Okgo4HAWa9+uU09F8Z08JIhJCIwrRYicJQMnHmCA5xfHg0v7t27f+45/+8Te+/vVXX3vtx6/84Fd+9ddPzwbVTMyGcZ1jVNjyHcrqytSDgpq7wnV0tHGjFLvCGSmDXWV01oOm5Oz6k5zEed5+JaDMLNYqbSuhgqoikQDyjkQppTzGsRqwezVHWedUchx6FIGyGok2ay5MyO29D1jXwA9xybZiR9T5X8SyQiuSim0DcY6vEm2XuOUr/SJlpn41W7/b7tkqjt2vduR8U7LMKvkeBWETokDk1D5TuxmVQQ5sBZjQAC1oCkwgrUiTO1nNuV9gmOjJlWnC6a1v/+v/7dlLV/7qjz44aK6+8hcf/NavPb+80C/85b/7wd92Sn7UeDwKpMk+9/DgEMbx+P5v/MPfhyKPMTTcNy6l6D0Li8KgPIEj07iiEgClMGXYYLvttrfCkXX1WdUsJrxSQEmLs1HBL00zUuUfDwUkl6FQ6BwNBUDkHvo9VbpmH7gDlIrPhGNi+1TrRoph3YyKwDFscO1WGzekruF5yhsBsZM2dxGWKoHEwkqa0oXFVeYJI09ar8kRxTKqazZ8XdTkjxAROBAXYkElatfrBgCkGACXlsUqyjmrWxUWnGlto20wrBhXfKXQjH3R+qsxtGDfXRRFoqPn7dD24aeHil45+WLjKROxmTxXDzJA5RyvUJGtjnj7xNvkWwo3scUSeybKKsU/IBtou/MAAQAASURBVBePERZUCRnXbWBB5WtHVTgW0HOeowKsABMpaXn6iqaxBFIQs/dQm6RJTAdichDy5akzKWDt68RCD+qNYB+E9cQQKCVQa2f2zTff+fTOPRfmtz8+/uT2vdnuwrm9D26+100biGOBJugocHDiKLOOOQ0ZPfqdbk2z0kJCGSzU5KJKAUOYhRSu2AFoRojwkUKCT/CZ/Ibmp7R3hvkaO71MeZOxBjaCNeCZR5YREjU20+TiiBgx9FgHP4kAh6MoMbSTtY4r3/q9i91iox5Xbzzz9a//XmBeL1eN5851tz68/cO/++5y/V674y9cvaQeOVHsovjNrcO3PhreGt9M2J0ueb+58fXp1cdk5RO5hMAZmqCJ05gd/JY2z0U3qDln4qIvVREq+/RMIMmiCqZkMmubw0QSkxdAVVIyFBrMgdgDJBb1OQC9oGEGc3K6EfFyNu0Eo4ISeSEW5Zz6jnmT1xevPPvib/zBn/7pHwUekFx2TX9y8v5//A9Hq00vGmi6N297nSQ3by92J/c/+PPv/+h3f+tam7u8ET+dLNKw+vTerfne0rdLErdadfePHly7em2xu5dzgkhogmSxKjYOMcboma9cOnj99de//ad//C/+xX/31DNPvvLqq9euXk2jWCssKgryTJJs4qinGZU/44iEmSpTUbcYGNe5V4tqq86TxrraaljNQqtAvWWpdX7cYfhzaVrLIZDyL1SHRRVTkHlXLgkFRNn5tg0gxKhCWyMhu8+lcDh1y9y2KZ6UKuD+EFouWtMDbZiFqGHCxBYkYAtIrZujSiLY0gO28/CW+UKF16rnP6kppJwjUdacf9Hpg7ZXs6qzt5fACqewTCGnwtooe4IHdaxBxAs60FSntJ7QeqqnHW12NE/kwb/6f/1P4734zPMvPvLM6d/e+gEG/OSlW19+7okfvfTuY27n4oWn+/nmw8P3dEqyZO6adJoHvfMrv/wPv/j5L76zlgCwJmF1bSPeJDLCwmJGZQ8PUNuhH1YOthxvuyhhnVW9fLQ+WgVhVtsR14euEuW0fP4o9QYFbTRyrwBgV/jQdZda3Qpx7s1AtnGw0BhbZxIznCqBPAOijmDvNnOgVQpDXjiwufCw85kmoo6RSWzHS0pub3pVUmhD47hLEHJQE3LWdahFpDAzcyM14Q6V9quoygwucb6oZv8gWKgYtOrPasGzN0ur/LzgyeWiI4Xl1xaNgSqKkw6RMtvcJ6TbT0FhImVbbxodQRRgx1S82JRAuS5ESSmrSeQLmFFyFVRzNkjaPohz7AeFGCEpwfa3KB7RhbFcjlWpfqXrUoGQosqZ1MgkVE9WIZURWTwWbACwOR/Gu7SrrALhXM4+jHhW2wStMLigyA0AxwwgF6N4VtWmcSmLSOq69tatO6+/9jORmKSfzcKdTz56dveFPEbBhjRoDNSMNCI7qEADsFExaYUHPNbdVADhkLXJ2gQdI4VRg1Jxq3EQUjBImErdRROptX8eMFnShSUtUgx0qjgjPU04Y6zBcydDUgIajmHWs0Qd1tqvtZ8Qn4w9tdwEyDrdPtbZkVzZW+9f353sX/nSN341XEBcxjCf+ihv/OiNH77yp9SedYsZGne0foBWtREKWOcTikQjZ5/OTtMxh3/9h3/4+W/87vUX/9FqScn5cTX66DRBEyQKBFyGHfsMc+nLxJK/TVNgNGFSERj3FGJtpFYEgkkBKgx3HaHEg+fWYVQwOEB7mC8q1iBmddCsm9lOYh+1iWimHKa+H1I38M7ZZr3z1N976ivxb1/+7iQ4iGzUeT/3B1cP9h698sRzxz2vR78c3N3+LPm9e+9/uv7CZnexN/Tw7OXq9YPJsb9z+47M497e/sGFxfL0OHRdfeZLRdEs/aYnzhf3L4xD/O5//u6777z5f/g//osnbtx4+fs/Orx/7/rjj/d9ZG7MBZDJGWt3O7Nu71Z96G6khxwTDc0rwBG2uCoZXKplV1QgRzAMJVNVvyUZUlUioWpa6rcAkYWDmwVhYYRuDdObBoV8mdl7730co612DAhlZuccrLISOWJnQ23RKJN13rVb2KqZC3TmmS1dQauEUczSpz5M1qKI2FBEtSqXjtqqbhniCaSkrACy3bjVtowJGZrG0bjrujXJgzA3aurhYurlAIb4mjumcISgGYlb5o4liARMaNihYcqpG48f3Vt/79/+0dGto//mN//gyctP/9/+P/+P1b0489PVYXw7fviZG3vrVfvOx28+9sxTZ/2pm3aXnp0++pV0Enrau3onrg4/udk+suslMRIzhCBJ2HsQhtw3FCp9rxYGVS1jnN1uBTcRyaJihBqm4ol6rr2sYtDSxpUqDtG8XQecfxcCTErLYDKlYXnYdfsyyrNDhcRaRjZbvNvH0hA5KABnLY6AQca2ZbiGvfajB08bdKpOpQOQdVJthCGUMwQ6vTi/lkaaX5iMiURFMyuUysKAFMrkoMLsmZFS3Lac9SgZIajgTA/9kOW3MaC/eB4LI5lVQZK1wPVgsD3MIqLOrECqJ92W2CZjPq+yDFPT2QagdES6hZTrHWLtz1YoJKpQ5xhUvU5QO1ZR71zl3VXCpf13dmV1YAlLFTEzdX1BGolsE7F1GTrniz3U1KHsn+pTAs1ZybMKTCJepfasqsyFT2kyQoJJIVhYoQwoF1SugOCeOSHVl2bEDsO4zbCam4DVev2Tn7w55iyI+3sXplP/rW/9hiD8+2//ZfAKTawjBnMkIvWip1mZQHDMuSdZYWSf2kWSdQeOFFrZDJj0mBASEXGRUMCRKigiJPjMTUJjBTjyZIkL6cxhrTgjOoGcMHp1yWkU6pUdNCKNtGmaNbUTDhuZnQ5D48PE7ztE5o2E6Vsfn2g83GkvXH3qxuUnrnLvXQc4fe31t179yb9zs+xmjTaZnMArvLqZxzSF3RY7wFxTN2FepNQ8WPWvvP72/hd/X9xkjHDiZIREkRGclDJpUkmKbA4zDsj28DCTgFVyNQ3UaosEwLzTHQHsSFRIs4EhWwstyVHWzjsnKuTgnANji99qEu3g1KfJdNVOE9YR3VrOJn4esfKYO9anv/oP37s3vPfe652fchSoD7v7e1de4Is3Ou2uT/bas3jzxz/v7378zJWnLnYX0nGmDfnQdDENi8XOxf39LDJGMOdLB3sZEmNkdkxIKTsisxQnDu++9fZLL/3l3t78//w//A/DEO/dW/7lX/z1b/z23/c+pHTmPItmIx6LEJ1TqX/xr+Kven4bqpkriBhGrdjK93Uryaf6NyKQnIvcyma2Aldip6W4JWmBK7c6A81ZbAa1imzcxczsvHdZmAjjmFJKBHau0EK888ysxf3KRN825qo53RvNMldCTL12itQC1RNCQUxltjeSVbmRiq7SYi5sJ7kdfOtG3VghWqG7Umce8t8uQhCyKyNntchVBScRqAORAWWqFhTBBKeOyvbXgzuggQRBR6FzE6SQh1bOrl6Q9/7u5R+/9JPPP/Xl2+/cfeXf/H/Tcdxr99JqI6s4mT7Vfxqf+kJ37/5Hr7/y09n+ZLNe+b0+3A7TJ/2Fy+vTk+PQbEaQ59EhQ4WFfQgJKUGo9TgTLe3sOaWXzJ+ECcSkZGuRImKh8gwZ7lDdEVEu4nN3CpRHyMplcUOyYavs+7IkNkSzWkOTwpR1la+lZR0iTMSCsjtkgNmDyP4RCERQZSav5AFWdSLsuY3jPXaO0lSIlTtpQOjILM5YhFXSesYX99pLcRy6bppSruw9VkRQgEYVu+eL2BGAbzy2moBCzuVKQ9DqxK7nTyNt/2jZZJw/qlr2oQyQY4KzpGo2FhUpqRZvtVKKwXyuHjReqlVdG79RcB/rAQ2kkbq7KfXQflvOWzb19nwzs6aUt5Y1CpBFs/iCgRu1qvj3KBcOCJ2Pp0UdXF5D6YO3BZLsVZCV7erBqWYbCVXzMSwKaQUX3Vppt4s0jbSo6rd04zIv2JurzBY6YzM6FS9urdGQofM/f/Pm3cN77EgkvffOhwd7j6aI1994+97R3eAbyVBEnxsdAWeyQkKE9qIe5OG8Uy+adN1Os2+SbkbXJI2j+i0zDwSG2Esw2DnDZwqZfEQzYJKjow10Q7QGRdaoGkFgjyZlpCGiBW9onMx6WZ+OoQEank74YJlOEnfBty6cOV7dPH5/geWN55/mCz6t4mQePvlw+eqH3/EXE7kOHbsZRk7UCk9UmiRTlk54rtI1S5mcyESmjyHs3VnKO7eO+Op1v/ZIoiMcvJlaawYSKIMF7HPZchKELMXDIpqJoFmSPTrEYBLAQbOSEgJIqOwmRHW0XRMzy8iyEQ1CjsVlzXDkpTguko4iECTwSH03TaGb+kmSfhAOhJzipPXP/co/efPO8WpYBp6kEfHe2af46POLJ6YX91ZuurNz6ctPTY5eu/OFZ36pk+705KwZvVdkoMnCOUYVZidAiGktws57Cyh35LIk5928m9+9d++d9965fv3a51/40vIkEvs/+86fXrqy//kXvni66r1vwHDwaqbrxYr8v6y95/9oV6uB0DkXtNoOYTmrWs6koppZWOtqPXU152Vy26ZdraDWqUCVHZtZL9mUY8faGuQtzklQIEk2Kg9UvfMlxalSXcRopeXl1rnIfgHC7KiwacsxPh/xaTsB2G3EVEEwM7e0Or21EQZI6ysmG1MEwsompUCugL5Fvlqdtn2Veu/ZsSF/7Ey1WZklhG2dNrZrMQ3evpemSyJlx/DseeQUGWMbdHn/+I//3f96cbH7W7/59f/p//4/H7+77jScnR5z7/cvXIpy8t47d5sLj+3t79x5cC+thr0rk7tHx8dv7k/O0v3XD7/5z/6ve5du3Fv6kZuRGlYvSSRmFrDzZY9QbqdESIbzA5RzKrvyWhy3K4GigUatp+c/4jk3DYbXK865tAVuBGBRBAW3LAaK9akrvlgoq7v6ODkiJrWyR4BXZRUGmBDADUDMjRZdNTNcFmYNw9ors0w551kaNj730vkgjERADgl96i/vP6m603UKNIpRwYTEcAIRTcxBMAKqYBURE5ZDRc5jbotPexk3a158bQSNzvuQREArPFDOo2Muw2s1cSuP8zYAOcv2WNK2GhXouJAJC5JfekfzpSGqWQRGVtCs229LZJlhW2xXRQxVtqTTUsltw2tttVX2+iwbgZIMTkBdHVi9tE+RjYqIrTWsufSUH8Ex1evE/rTjYu3BxdgFXqFZzGiToRBlqJKWCZi9aTvIGgzmYn6QTXzBrBU1JRKCsEMaUzfpHhwNb731kW/Cer2cz+ff/PWvXNg7uPPpyf3jB84Ls0hKQCtRxTTuxIigSNqzQMBqAW8UwBFj1wzBNYzEsdFN5XMSUL1nBYkbASV4QZO0gTJ6wgq0JlqDVkRrcO90UGKW3rynHAXOSAnj8XzCvOd1EPHIKVPq0TfomhSm7Q7zpydHq2vXr+gBoQtA+ujNN3BhzRc6SUkbrxNQUIQmeYQJp53Eexgn02WenGFxoovNGGR2cOcwfe+1D75+7VdkM8hZ5kjSKycLPiajIookHSMoMatIAkSpkDFIhZCLbzuySgPWasgrQIIySIDE7FUFyOYQ3GiTByVil3xcRYlCChnK3l0aJ1nQIQdFJ6nlZddR1zU8C8gJfSO+e/Tg2hd/72+/9x8mbZd9u8opPsj00dlTgqmHUDzg3d988bcuLx5ND0Y3BOnhVRsAzCKizouKT9ozdY6l8oohJMxOSbLIlUuXfv3XfwtMq9VaBMHzF1748sWDvWEYzZS7oFjMopnB513h9v4rLSWLefiwmtERO3LOiakTszkLWX3eQlrlS6C67RS6ofmhQem8whjHov6LVcpCVbJphkSUVbckV3PJgZi4CQBZrmK1wSgpEc4XuoWg5ACU1SXZmqdEbPM5E7KwmNOWxGyvW7VgyaAtFkDlWq8V2DZZZFFoZTqWLXmUaIuxqEJBZI5DVUwCkHPOPCEEJgpxYjRXrWW7SJQItt4sHkqmXMpAJk8huEnj/+h/+XcJs3a++6/+1b8dsy4O9uLJigfeyIM2U9fu/+4f/M5//MtvNwvvZ83+1YWGRNMmtbHdmX7phRcfe/bZ9459bHzUea/TPIgmYFQeiQa4kXMWZBAlUAagkCxZkb3zqr3pVW1AtUV9Vq2UdC4gx0PI8bnHb3Htofo8gEFSPSytjXLOKXLO2TmPaloJ2s55TiuKryBWT8Rg74TJMGfxarCaOMAzB7Dx2QJ5dk1Ci9RltzfDIum6ubgXn3ryuZ9+/JY05CKrIKUgvX9875kx+dlsIuLYM4lnDExEkKQkkghOkUGuNl8CqHNORDVn2PNUHBy1Nhl2B7Ma56j0KNbTF3SoHEmqKC9VC1itDxLbJlmriLq8y+wtH7B+NZC1AI7ZXGtwzn0kJi58LyYoObtxzOkCZpJugggyvpeUjFQj4NleHrbXRdbCtpNas0uMIG05WGI9VyFwwsBELl4FhQU+xkSsbJYO5fEy0ZRg6wlSUDPrDAq2RORsGiYFlI26T+RLXGqRJ9nIq7ZlKhtjcs6TKkKrCnr9tTeXy4FY4qDf+JUXd6Z7D46PLl05uLC/9+DoNpjJpwwhtBididOF1EiKBDI+JwtzoORHDMQtp4D1fAfcmcHWNsvVyC6iDFEdlQTIYAUiyRrujHCmciZ8xrRmiuBAqY9gEWRwVsncIHXNMQeRbvTkKQXyPaJHmnLXJ6GJm1w6en/53nV+5sL1xUdvHf7s01d4D0ICDmiEWlGnCMl1HH32c1r7sMqLpUxWmK95fhx9mnS8M3/jg3vXfn772vzROA4sTkahESQsUWgkHaG28UVSTTUKiEyYTgobgUgI5loqoxQxCIPEEVRH1aRluyUAgz1JpOy190kTPFhYBOhEwYjgqUOEz5z6rGvSTjUodxx9E4MTapFo3fKTX/zWj94++uTBzS7M47oP4g/yfrPiMY2ao56lXb8rxyoQbBwG54FsVAUqIV/ZsxfNdTawLQuyZX6BUxYfAoDdxUJE2OHxGzcAJEllL5NBlGE5viRsPgC65SMySJmrRsKxdaZmNa2iRl2zi5a1yEIqn5NsKVWt4lDZE9vKXHg6pXEmGIAQJaEwPcmZsTDKhkpFncVMwBcjizIdGfQEqlKfMmdIWVczkTgmgoi44lYv4IK75WJjglrdjUBqizSUoaHsz6RkB9bR1H4e51w59lZdsYVYAdSSo0JUlt+s5hQFJmRRm+eUXfWJcCqU0wh4UXKegUYBImXKpjxJkpmckBDUMRMyoGeny1fe/8mfv/Wf6e7qs1evX9lbHveajp2c5qadjmn1wrNfi8dnR3fu/+zdN0bXi3b7j4fFwd7t40NuRVwcffelr33rXu7OeJa03WgYydMGiNBIEsEjJClGS/HJIok4qyalDNIs0QTThtMzJXhGYcCxiGQVNnRSFSBlhglpzkn4KiLknd2JTG4Lo9h1bORcqA3B9plbPS+iQvtzIEC9gKBs9yzgS8Uln8UbK0bQsDTUMBrKnDkgBflEst8R6mJO7Wl7vA6aZsEzq08izHHs0t58cQWb5Nu5anRwSQawCkbVxpXsge2YZmO8g8Is6U1yYV5AgBAsqrvACsxKoExb8+TyVYjYk2F7wuyyZGtgs1gvS0bofdiHA0Cdh0mSzcOsELOkLkeEACl1vTSOxASlaohKoJSNVU5ce+XypakaRDJ5ZhFN9aKwwwCxXkIF4hyr6YlViazwl51N+Z7bL1lM08mM7LQcW1d+EOsuBGwZwnZIhcmgdTUfaVI2Jy+CeGuB6oTiiJwS4AJUk4DhrCQzpWxptPYc2tMlGlp+660P33v3Vgh+vUo7s4sHF6//x2//yYWDS1/60q+slp82jlR9lsjUqI7IhIFYIEIosvSi71eBNErBIUIGRUDqRwpgYQWE2eRRnCEMDx/HiKxkLx7AmHDKtIGuBGeERBLho1MRiDKrpFEkGalAVGRvHrwGXq/hj2gVPLzEtcSOo4rfd+Fsc+cnh699ae/5H996edg5nc5D40OKMTHQMUjRgDpWzycyPXPTNe+vsHMi/kT8MnuNLS+uLD8e33/jvcdeeJQjyyb7MYzjSL2wMDIcOdew2tqEzPI8Q5It1QAIVyW4KECOQISUE8DkDJFkJSSNTELwDEbuwXCeRZJsPLcMAgQkTkQ4MWJG0DQIPKgl9MyNqAe1rN5WT5I92q756rNf/85/OGYPrBCm0+dvfD6gu/fx3fXxqtVOs957cO/qwaMhUb9MHltnoFrFpHbE5d9VsmrxfOcKpILYE2+t2Ika761H3W56ij/rQ2wQFLKDjYKKOm5w+T4oyrnidmkaUDW2sJnImw01eyq5YDVtCWWPi7otLiffFbQMqpKhRGqJJCQKVQcCQXIhPgFbAHJ7QZfV0ENYnsJ8MAroS/Wgg+q61RY9xe7n/G0sc7adZwCqKpoITDYD17vJqrGIqumd7OarbThK+XcKIWVRcdsF2raMb6H4ZF7KlfxMTlC2mYqB2ZsDoCqpJO+9qOhIwZMgvvnGW313Fm/9ON3+8UE8+tzBnvBp6MKVx8MLN+bdOnz3D988PfZHy08CuzDt3v34591+t87j3tULB08M7/xtevTK5dlj3Yu/+1Xe3VuddiP7VXI9+3FgDII1MAADMBKiWaOPgBAZym/eIzZfmWtKNkWm/TsTWby5rcbF4p8JJf6dihW2QO03KGDBssIFmrSPRsWsnFmMFifCTCA2qabhMICtMJiIbbkL9Vr2vl7VQ4OjIPAqnhxLAJxwx+QJLYvXB6sjzDRPd5su+sXtn68edIuZIMlaAL/uh2cvf3bWTDdYsm/GSJoSwaIzmcuZ9MQCOFIUO0MIzC6tntatSj2r1JQO59iJiAUDw8bGEgFupdi4bAyo2wrntnsVsJnAW36BqJZUAy3wcnFkozqSERNxlgzYUTUfCgY0a/bMRbFtV4411aIPAzkPY9qAaYnY6m0BNGyuVdSMCa1LhnMK5DkQZhoLyfZ9ySb1kmxVONIKhTluV49nZmfjghrREVxoY0LMrvSB9hiY5ZmRnFCHhXoYVSHm11HaEqMCSdtO7t9f/viVn6Ws7CildOHSwWQ6++av/8b+wcHfvf7Opj+bdp1KZPIAiJLC9MSskGJpoMX/FABS+R8lQgS8akdVu1XM4WzKG9PgiUQdCXlRCCh6rCFrcA4NGlllXokHoqTkMvqsXplIGdmJnzeSdNPtcnZes2Nap9GjmXLXSzRdwSzozz48/vTey8v+MDyi0kuilIOYt5QLPmmzyi5K0/PsROZnudtQOMUU88eRJvd6N9ubN3L69ms///KVL0wxRwKiUmIWaFSLNGXOqgm2JyJFidHLIgKy/zFREklEvtADDRpMo3NeVE3+AuSqkKCcIxFADRNhbEAVwIdqUgmCAApEHdFACKoB8JCNmOCfQOpkoOHZRz9z96k7P3z5ezthMuHp8tbp/jyEsRk3DXl88NYHL33npS985sVvfvXXJiyeanNYHkctT3BRfTAxM0EtMlqxJVQC1VTIgN7t7qyKL6kcjlKCtnXBUFg9p1fUZGB7sAF4bxbgRQOKSpOutb6QoW0MLQvbh6o8ChW0cCHq+qNMSAV5ZIYFvInmlBw78lvP+u11BlWAXWFlgAolikvbXFk8qAOB1GNeNEi1tzGRaCnRqNwzra/S7i/d+suSHWMD+x7G0VG/HdiV99/eUrs5jClZun9b6JUzajcIg2w5bG9JAzjL8CKmyj+i+cw9WB3+1UsvHzyz/8PXvvvCJTefuxefewaHbxzePJN7m3jnTO/ffvqRyW/+3uPf+85by6PTp64//nfvvtVMm+x1sk/v3VwePHf5a7+9ePtw7ZxML167t+l69jHvRr8YMKVENBBGaFTTv1MkHQEkZsscTYBAy0BXZiQRVCZdtUq0Ho5KvB2L+Z6qKjSXt9dufGv6iiEplX0iLPFIyqawBGPbGy620tzuKUvWgtoW0FswIpMHtdBG1EMDu6BB1SsaIEAaES9h3q6WdzZnfyejl2GeZZPPxm5nmueg4DsOi/neO/0HTzz9TN5IOIAykbCmBkYmQCRuCBBSUYZmwnYzysaLkMJEqk8dpDzKCsmZnSOuLqUVZMFWZ1VckcvOohwoq30FzjZJnNTaUrIfCFxzKQwwKoc3S1LYCbNyVVS3TIb8AwRS1mopK4ZPVJbC9kknQhLCFhquD75hRtZJ2As4p4YVyAgFp7LHg8l4jtuvXS4A1KFDC925OHZxKd0gb2QDFUpKqqiutYXrTuQAD7CzA0tOq0e0PYPk4OChXjU5RyoOlB1D4H72xjt9nwCKkdhNHhyt7t47u3Txkbffef8nr73Stt68bh1LztGE+9AE8RBvvYER9KzuqlcNauXB7JwkGrUBLFvRM4kmB4IwhEmdSiZiGtgnlh5jn9Z3T9uhWfB8tV6uaNWl4MilxIZmgzkfZQeOSjLb845Fl54kUF5L7lgZTebNyM0ybo6XQ+MeaYmZU5DezcIAn8BAN1A3QAa4Ic/Wbm+p4XgMmF+//pnfbMfw7ptv4aj/e5/9Ek6lX25m3SIojzFxBGVT8xKxJBkJQhizjswC5MK0YaBas9lH7pyNZ0mFnGMtV65AtH7kBDiDOQAxY2jRAX3LsbG+gQNTqldWr+q1fLBe4eG7BqRZM3mGSjzRF65+8U5389YHH/Q05BcSEnXDjOGO7xw/eG8pR/Enf/XSg/cO/8Fv/LYnLufBHvwijwe2hszmmFqo3UWzUZGobYSqWhxAER0KlIQsLmlbM1ApIaX6SOE3Udks0ba8cl2+FqdUKAFZtSSbmohaxXwzmLaHtlAfbVy1S0qqHaaUNQxJliSJmdl74280TaC66y048flIgW0NrG6xtg0zMRPXmV+9ZxT8Q223RVvScpnOiUtXatXEuuXqSVQX2hVUt+1vmUgqeo9KYlXJQsQJxr4Gey56a/ttJbEBMP6jhVYxCGxLzfo2KZCsuDA14xgDh7t3Pn73/vvvvPnWe0fpl771td14uHzn7oc3V7/7qzeaZ9yr3759YeFZJm++9eDwI/f3//FzP3jp7dde/9n80d3cpzGMYdq6Tl594/63/vmLJ9M3Z9cPuotXV8t2dHtRmg3CBp2ZvVEEjaSRJIkmRVbizCXJKhnhWCSDpIj2bPGv1aYJIGYVZUJKqdh2qnrj1Bgbo864KkqOyudYAuEFiiTFufihBq6UHU2ytUaxlkiV1fxKyDlqFKxic7AHOmWPVs+TVgMQIE4n+/KDn77Rz9zVZw4O75+wE+4nq+XAS258Qifro9vXDw7O0kr16NmdRbY0wEiI3i5aKQ5sYlAUSGzdqCXvxpOkumTUgrRyuU6yJAgsPaVsfKG2liwLdTO+2FKSiwzovPKBQDUtxNppKeXF4s6YTPRfA4KMn7xNIKsPf4VLpWaC1G7StD1aN7xUH/qt/qjasQAVKSqL3bKBLuVyy+0gMxFFeUu2H2xZFNsPouWllp2uHTU1or09NFy/ogeD1ZxQScnkeo2oVWwm8iqoVbk6iNhbkdW7Jme4xqvaPB2bpvvZGzffe/8THyb9egQkiTzz9FN9P976+N7R0RIQdgqF80b9Mutj0yALiaAP1m6Z9yIy4IBGqSENSp5SSNQYGlQvahRXImgjADMkZ4A9+xzT8v4xrflCd+ELzz1z4/K1S/ODe0fHr/zstbdu/tQl184njWtyP8JDGuiJOvZpzA8u7CXyHpk1BdaAlMStcHrGvmvGRoYG/YQmLq8Cb1R45FbgYvYjmgFO0Gx0fpq7nrqlm3fTG/3kus6mU16e3Pp47/riqeeeSkdpWEY5jp20Lvk0ZEpEIkB2DMOfnSNNGUimCd52WuesQAjAomb35L1zWRKzJ1bYphWs0CwaGhZNsE+x+Jpl6T3HpjTWHgggJg0KB2qImTVoXiaFsHc+OAEQMJf57/zqP31r7415t/NIuJzuRYj65B/pHrn04qUnd5788+98++ZP3/zOA/IiZgxULdfrhGtcJBKyR9d+pBq+a4J8e9a2uA+KiV1RStcjsD1plnnEZLTlEjFf0GeUE6hQ4jEnR+zYWQJBUfiU5VxVyXMZMLe4Wbkohc9rp3X0AECuqARB7JGgImwSWqiyk19g8Jz326Xa1osZMHlQaSmNjFp2XVIuK1TK5zmoX76ilgNvYxRTkUhYg1OGjfNxtxjGKqjoSaCidbVNRfaQbXvHhahFgIIZJSRdTYdORGz7AzNIKDMIBEASJcpIkDHNFh4cT26fnh6fPHX9+jf/6a+/ffTBa2+//vhk8eHhvf/5f7nzm1/jf/R/+aVXv/3qOuaDZ68M94dv/+UH//B3bqzG948+WYWJdruzFFK7F9xO990fvnnGt3//d//l7c3eCrsnsneE3VNM08B0prJR3hAGQlREYAQwkgg42fRJNqc73vZthiMXxllVEnH56JlK7KPkspTg+kgqwWyNbacjxda3oB1UZWz2RNuZJQYlduWBLP5PVMF8B3UKBjXEHWmr2sE7mpAEkUb8zFNLBli5zuV5fu/so+lj3bNfe/L4ldcyWiTx8MEzpKHI+1g8deOpH3/vh24M3H3h2aeez5QosPaqo6cMMUYSHIq2JKfSvTEs6aSQpqCogdwgYSHzltNzBIXBWTL9QltcfoVdMSawOqj1+UfFsawXt7pazFmZoULEjjiTpJRRmptKAKmRLVRYk6zWR9lAhe27Wj4rAIUxYk+v97X9PWdOwBh2hEJ5qBcQgbSqA7doGbZYbenldXtGYRqjcz8NIlWI8SO97R1UbYHqCA0VgIQBVmqKDJ+YKEhZCtQvRQCc3QM5M3snklSYCKGl4wfDj3743tA3zALpkg4HFx/9tW9+K40pxri3f+Xmrdvr9YMQOMXI8IArr0y9Iqkyicpg/w1QkII8kAgjKAFeEQlN6enLu5PsE3Wm5YAjD6SYTjengfxTV57+4jOfffyRK0FCPMPYx6uXr1y/ceWdjz7z59//y+Oj44Vf5JyFBMY2I/DUCeN0vuAGpNI58jKk4OfSDTJ3svbpZBqm+/NLafkx0kqEBU0vnKgT5givmDR715Y9jmM6k+mML03TdCLtLh/cP/zgvZ+8uxcvTDGTZU9rXferFrMJ2hAwyCBZxzwSCTlK48gYqyzceKQlMJT//2T9+5Nc13UuCH5r7X0emZWV9UDhQbwIkqBIUZBIUdT7acqWZNn30TP3unt+mb6/9N80ETPRMREzExPdPW23R6Hr8bVlX1tXkmWaoiiK4psgCIIAWCgUsrIyT548Z++15oe19ynodoUfAFGVlXnO2evxre/7FnOUGETYNMFJl6KJdYXsWJ5CR+j7VuGY1ecyzToBKGHtEo4vUG+TLaVIQYKrnCucgiloXEcl2PB95MZfeu7LJGiOegqeIiRECoUnvfrk1a36v/3Hv/3bpx9/wufsYNwi2wtkj2qa3iaNqfV+krZhJ2JpZmql7GeTp9TJITXJJ4kojVzyKIYAmPIoC17tAA7nbbBNGDoRO1JKlPxsVBGjJLjbJKG5a06J2Njp9rlA1qR77/pezMoqipKFEuSVMvkrz2VPZELACR7m2Jsrlg7DKLuCSf2XoE4zhDRuPCWTnWEwnL45/0LNXFIQITKpzaeRYQZL/hkUVYhzFgpVYvDO5SCccMgUbeyX2dA7PXtR1TH7GKPnGqAYu41qdHj/we9+/dpRWI4vjr747Gdf/eXvfn3711efu8yyvzlBv9/8wy8OVvP6j/70sy//+HeLG119BoefNG9+dPz8n+z+7K+aupZG2tH2pNgq+7I9Wh88961/O7rw7IfNeOF2jqVuilETp27F6IA10Ck6sewrvbCIoDPwOQVWFYKcdP/22YiJNMaETthkgRN5hvNTqkCwH7Ioz8l9jIw4YzAGEYiHhSEWMFOujdHWXTibL6UdmMSgQuFUWNUzF8yVUkWFxxiogRrsGWNFTfZf1Is/U22cL/fv3Pwvb/xsfGYS2paCE16LgNR1pd5d3u9uH595fOvosDkIR9dOYdUJB4YjWQnWjuMoe2FaP9gplDgpsIA4zB0VYWjo7TmwXU+pwFU4TpMmZA/1PEA9KaNTAfJ7eZEyKpS0tRbdJFWwsF0N6lhVRYSJJZjHnME6TACT8UisY05UkVw/2axgOBVpmGqJ1px2DChKHS7ByHdAqqUyOmWTs/Thh71MlnNTHDNVp70MuRRFNJ1egFRJRMymhigN+xUKZbCHQuFJmbg0VrxqQY4Br7bVV222AWJWEoYXDYBn9mAR5pd/96v5+kE9GnvyIuiWseuKn/7jq2f29i4++sg777y1mHd1vdl3K5fcd6OqAp7SztYIBPTFUI4oQB7koWWSplHtSEABYqM+BQkcEFVBwaOQpS6bZqP2X/jUp5+99rm9rZ0wV1mjWQasoWhXbXB9eeXRx/7d2TO/fPml199+rd4ZwbM2ClJycJ61JZCqY3iet8GPJ227XkMLwaOPnNrUHd8txnvnPu6KxYMP63K8jkA1radnZ7PDLvbkth85/7l+fz4/OGjdeBRHNIeKbNK0iuM77398sPHY2amPx32tdb/sm8Xhce9e/9Vr29uja5+96kj72BMiIAJxQEKPQUwsiIa5EDtbg+MdW7uTE5pI8leBUgCYmSUa4S/EvJyabISOnpkgDkISEupAzlh7DCD0AUmUr4Ay92C/7kKPCCEWLtTFdceRNErbaYH+9Nb2f/Nv/r1z8DQ895R614yxWkrTfGoMyLVlPEizn9QYp25iwMGQJ8oGFA7kijx0AZBWlJjCY5jTprdBef8BMRHlRHiSIPNhQaZmpvyUkGmQpkfR4EdrTsWk2slbyzmBKsFq++x8k0VAavVxYmgClJW+bHfNMOikJmSCOTOQhTxKfTMRM6umWYO90cSfUCRcgQYPDdXc3qUYSCdLHWxI5thcIEREoTGXQUlXE2PMJFerC8yzAUEjyGyfbWurZRyN2rH3EjpV3pjUNz/84Cc/+7tyd/OJF/aeunbtP//8Z/fDg9Pnzi8XAubxaHrxsed33OFrb/y24OrL3/nMP//oVc/bOxv6zsf3rnz+0vnnmtsfFNVWoRuYXBy/+cHhtS/9q6e+9a9vLsZz3nkg28d+upQdbgtZRDRAC6wJa0IERWVhoIcaaSKQ0YZUYL5diVqUNS3W0jKskRIVtoVSpLDtqmJHSdmmqLkagapB78l6WqECl+Ypht+kB+qh/pBIOfts2K9jIs9UEpVRHMihFK5Za+UJUwUdKcbCNWGs4pV3UZ0tSyrHO2NGG7o1kXo4XcfIoCZoKY0c12V59+Z73/jMV/sReNejZVoaVEroHHUVcUJHKA/A81HNciOAElELooGYE2M/TT2NKmrWFlkRngubJPzNZmrpqbNHKB9PRvIzjzEyk/c+whKuZoNMB0J2ExuwHCuXDLbJ2kG1zZuZ22ixIXMgEtygqZfUREhPXTGnHUonDCRFOhKcOTn509kIDSftrj0KYr6jyHIsTimbHRGLUsq+YMBbxQ4lokLFqTLBAwSUBE/glJgJKG1GASLAQdn28wRmYfZBuqoq337r5jt33il3CwgJBRGUZTnvZnff+ejRxZMXr1z1flqW24qOmMEh9j1zr3CAAC6PqIhIEUoLNLBthw4INpIm6gEPwGQFABJo4AsnXd8083G58cWnnnvu+Wt7pzbaZWzurdGxdOx6T+gABUnXtauDrhj5733zD/Z2tv/zz/+hmJS8zRCJfVivxK08jRlEKODrouJytPuE25BRgQtPnZN7H9z/8M1Jea64uHPUjo76Vas63Xq8Pvupg6PXmzjnYlf73XtSLptlGSmEGAtRUN2Pqr5e3Z+1B02QsDqcV7Xvj8Nv/uk3l85dnGxWP/3p3546VT965ZyokjMLFmh2cHCOTXQDEFiIPJu6PUOnSllDDoUDG8NTlKHeF6o2eA2qxCzgkh2pBLOlQ/BEnhyRkBpQAqAAOwYQ2mBIiv3Vw2sfGIQgfa+A84IowYdKO6c+RG37EEw/k6X3FotsMEac5pL2LCdEPU1B03FCmssmbVCC9hJh8SGYK/8pf3CCjVs4pafs2WKrglSiAae5P86jUEUejiKlXxrq5VTsG8+YmSgma2nYLREj+6IngopjZiWNwgywEyBr6SGp3DXmTfKaFs1rzO3BTtvWYALq9M1DSMsBMqVV++eYk0dKtFa5Zwoa8oTNynFRMMxgkohijDHqScwFoMpEg78xmymmCUQFvx9wPaewBIOgNWHZHGIH+Kr0t+988Nprv/jWdz5z8+DBVnX13t379+7cPf/0lTBuxYWljv3W9lG7f/ODt7ZH41feOj61Oz7/7MV3Xz48dbqOYXbrePHol6rr945k6i9f/fQbNz+59q1/+8Vv//DmcnzkNw/7rSO38SCMl8HzvJek+id0sP/RzhrWaBokRXAUcQIvBs5wiA6aLJhJJaWBpgXyhHgm4MRweWtuErpPRODk05LwKNKgnMXgAlF1yQsqMfw9sQd5hVMhVePalCqeuGCuacw6Ui1Ex6AxYQSMCBPFWDFhLtHUqxnd89WCj+6W2tb12LNDdF0pinI1qbpJhRYfHtz5zDevffrzjz046HnK8KRmPAlIEMBHLaAdyHhz0bYvOeeCMYkTXCR5tuqISBCQmXpWf4nE5PySV3KZJRAx2BYYA5mKaOd9KLsp95HJYjVxQYR9BrWyYt7mBTYQFZE4GLolMS9ATJp2HoKJs+4wa/vzk26kYU2p2IrNJBFO0LWmGABrZWNSGmmuVNgcvOxYqRoYnhwzrS5PfUXKwUoQJc9elUQMSy8YJJbi1DN5go+RFSWRI8fKKqXAgz07T/BQSgWbLQcXBZg947hZ3F0+oB2OIQAsoYUUfaejrUm9Wx01y+t3rt97MGtbjMcToFVZgxwI0IiTCxdOjI2ix5ohoEDKQK/kiRzxWsTbAyFsO1cZYFp80njPzz1z7fkXru3t7i4W6/1bq4IL6jx1qn0fNQA90Kuq57oW3y9D04cXPv/5yXTr7//zTzDr3Firjc3tvV1suOClHtW+8peuXpnsTQ7Xx1p289XxwXrSojmUO+t5nJx5VM5t3P7gLWE9iufacHYf97tY0rKaHk9k/yge9LFzMhUdAww6dnVXz+83BzfubcddH8Dgo7uLSbF59YnLzsk777wSpQUJsUjsmQF4hSOOjjwlu7GeSAmFmYcm3wQ2z0lDYNN5MW2rs3JQI0GBtJlbJKhVe+KZCxOhGXSjawYcPDGT9gLH7GFuezYYCKHz7EmIFBpBQioiwoRCeIkS6yAlBBLTjiPnTKKQntIgUgybRRLRNN33BNmRPQ4WIoUJNp80YQ6yXomS9Qslt/KURkFp97ykvlmz0lCTVTGMkOgzl1oT7W/IxFA18YKc9IyGcllFS2reMdbOMBE7hYqIiUjJQChv1IuUKc2+nzOvRFRZjKOSVbl5AJm72Nxe5y/JTLBhZGXgp82IzSQwxTgiD04tnflZKpDXs9hrm80NElaoMQozHDMxi8QQYlF48wwCEXNigBANemZFVJHel3UUCVGYPZEQmXwkEBXOo18vjmZv7my5V37x+gt/dP7Vd392bfvbf/z9P/31u6+c37nUlqu37r7RXzl//a3XHz/zqXVzYzFvf/6b7k//6NH5a4v5ka9HV9/5ZHXu06eb0eq5r33/7nx99YvPfubrP3hnziu3eyTbRzw5xrShbV4oN6CWdKWyEloxdUBv/bmoWPsrUJEQwb0lCiUWe88AuDjBpNOTYatBAZCYZgZEUJ91qwpjX5yMOIzVnsfHUE78O8CIa5Ss2iGAU04b8hhM7AmFooB45hIoBEJOacQ0YYxdHK/8xMuYeUoyprgRNjaYlgfXrsyvXqwX++0nN8OtG9cVVTnaALxAG95cBK/be6uIrUtT2tPYivcMqAZFJBVwYADSFOxqpMlISCeH2BGreDXIXSESYJWwSB7cJgCLrVIx777cfxovz9SyNgmnhNQIDzC0YXX2fxXsPEGJnIhkCCn5p5prekznmkxnjLT912ZF5sWqnilIFjadfNnJSbiyEGkUgjo4MwBId5oMg1K7GqrZaXJo7iXx/iXxyRJgJlBR5WhTIZ+mNSYzg5IwETN7EWJ2jj1sK6dClFVL4kLBQMXeRxUUyqVSBTiGh5boWUDiS08MLQBC8AHiC4gr+JfvfnxcVmcev/TJrevkA6ILS71w6fyXvvpl6YrVbF44X2/4esN/fP2WUyLiiBbqQB0jiAZK0F+vRpZXcPQQLyxkM00PcixEUpIXEgDsClDTNtKtr37q0a+98NVzF0+vF/Hw7tIFNyIJsVdxikCk4MgIGtfsWNC12oWeXShmt5vz1c6LFz9F4bByfX3tGj96+qBdzhZzdq7X/s7bd+Tt9dpJLFqu3ZXJWb+ou/UEjpqDpq4faehBOz+a9l1HvT8q1vfVF0UxgR6CD0qJGroW9Yqrmo4gi5oXjpvY3FlMx2MiH5atUPvWW7/b37/1la988fKVS11vPv1eEcgkhOyM8MosTIhRVEwgYIxLtjicJjQQy5nJsNF5wIz4SSQQMRCDWIPkmSlCHHtmRBvns1MJJE4DAYxEAYJtuwPIwxNIgkZRRWQGEyICQzQGRPEOoiLc+wyT5sELkWExKprniKpZU2ngrLUhqWswdFCRF1pbFhkOlR31k9yZf2iAicCExNg9+R5Wc08zzVGiJib75Yc8rqyXSxBujMGaP06MGz450Qqz5OL/HcJo3aY90EhtUiqukbnKwxR7mAqnykNTv2ovbJlBJGmvTgQJqdJOUqTcKasBqWm5Zea5WFOSUzgGFY19KFvBTiKDWaBV+XZ9rZETGfoh45tAJAqEnE320jpdciSRSs/Xb7zfNLefeurxO59cv/67+8883x/e+6fnHvt3Lzxd/dU//W/bl/euXnmymu5+7lv/9spucefdXx1Xv/nok1vXH5waPbG6+SYmFVpefxRPxz1/s9kKk8lnvvGD92bVuqiOZHPJW8fYmscRjhQL6DHQACtwx2n624p2QdGItCq982J7glViJGGGIqqa1ZDpBazDIU6EI2Ul8DAmTENf8/EyGwVzEHgIoMgjD9VBUZMxA8kQhiNmVSYTK5nLkuUo8ooCVBJKGolW0Fq0EjeRYlqGWmhTdep4Grao3eT5wezvz/GdcKu99mj19ccxn5957ZWjB7NZF0IQWbn12NfHbddX4/HFST9i2qIe0UVCIIhA2UWWKBw8hKEe2iOpS6NIz2wCH8pFiT3SCgzGy4anEFEyuAaBHMcIEVFJJL6Hz0SeIKfvzUJ385HLJyQZhRkKiqSfThIFiZJm5qYUAECckigTIAyGI1aXfEQHDaRm7Dp1pIzEvkiGQCRi65iTGD/dOIjaipUcbm0+k07EcBxOuJGU/HxAxBbKHJRFU+cPZVUmMuGZZzCImQsVM0wn8mSaH3GqZeSStCCQkif1Ag8UDIfSuy6I2yhu337w3t175eb23unTbnkr9GsnDKUnnn1qsjddzvvtcm+9Xl+enirryUcff4ReET1xLdITlKCORElUeoEnoqBrD69OVNZOGOIYjoIPEMeqPSmTU4qQWTM/v7PztRe/+/RTT4R1WB7YjtsRM4cQnVNBhPTEIfTLZnlcMquveyaghPI6xH7ddO9/MP7wYzeSo6mUHy2XKxyERdMt67FXR955lBNxQVvpFl2coTt07WHVzrTFcnr58lT22vtHLR232tAD1gfwFbtFFQ8VC0inXdutR3Fzy9NKeCG8Iqz8YTt775P3Hn/0yqXz55566iKzxHjFF15ERXp2XjWIOHZONaoKNGpqmTIrIpktURLTZb24KBSByCfJjvSqpBpJS1hzhGG3rCg6FRYVqBDMtskrA/B2qKyqTekye2PYw4rM3RI1KpLEGKARYmNS9TmUp6NlSSVnHEI+iil4JWcDe0aHw2Kjl8ThBRLersiI8cMjJ+S0dHKWc6uCNCnVYb8pZf8JqAQhHo5PLoUhqjJATMO/aNb74CHKU/5AJzOz9N9sumXvRoZyOU2xs7nukJNhjRaJmdoZsJ3DCGeedv4VJ/1avtBWv4uoIrH1RMSqC80XAxj2CRJSBXLCvR4IM5J0kEREKtnNw3h9oskVymwsyZYjJcxOk6FlBx/29x9wIavuzueeLXuKzf56fOrwrdf+/sq1L33zC9958+O3tquzq/3uwHez+YPPfepLO6fO/Pyj/+eNB2c2t8OMlxht7FzcOQj97X79yY3DP/sPf/bug0nrS+HpTOpGNle6wUtgCVpCV5BG0AIt0AEtuGfhQCow9iAJkaioatAYmQygtHQiCSZJehlJDoKaROEmAYyaRvWcMWl7Dn1ueS1t29VlYlExgMVQCR0g/lxvWfAlQxWJiUtCAfVaAQWhVpTAiLSKUjFPHLYEU5nq4V7ZrR+8987P/uHoo9kGd4vXZ7ub8fkvPvvvv3f2r//6pbu3Y72xcbAOVbm9Na7W+4eXd8u4EbChrI5B0is60k6jFzhERBKT5TsRB9tOIQwW2LwXeIiLa8OKE9hJBtIVoGLbBpLSwqhcznbcIrnZWxOaBEJ2spIIPtW1Ji6yJwsph1m9KKUvLR8nAZ/BdszMyTZEVYzAYm5xw/NsSVeHWGH0EdU4UCApFwbG+EhibSak7bD8EA1l2HMKGgp6CxDOaOOSlFF0IjuEI/YQD3giDyqgDvAgM/cuAUcVoYArHTloAfGRShIvPHKucPAKBkoKHJiYC1TMLfWvfPiRbJYrXncbOztPPnr7xltjV6AG7bg3br39xm/eeOLCE5fOPXbjxluHt+9JHdmJdtC2YHiLPVVVeo/VeildsMzAIgKJAHFJIgqJYVUVHLjmlRTMTbd00n3rKy+88JUXfO3nR+vIDFkXSqFvpFvGeiPeeyCzQ9A6HM+O7+27nWl7+cyDTsMiHq+axVq6pj833ZpA2q2t6elTdYX9t96XM9v1md12XRzsL7uuD91qLotQakQnXm+/d0+kZ4iAOpKnuRt3O27GCOsW81rr+X2SqpfNiFnALHCAq0lnIWqQRYhHHa3o9nt362ISutXrr786GX9tb2+3C0HIrbtITExOYiCUzCIxEhyxKIQ4GKOWtAchI0M2wjVzBXHOeMRgMrE0ksKE2L5fradSG+kxsydEkSgSQJ6UQSb860VB6tKQUXPzZhMRieAoplcTsT5YiZhi0j2Zp1oqcE2NzqmsTS2cqQzsRW0FJYFtunZyCnJvkEvo5CihECg9ZGM/6D1SRLVEp+YlccICUWjag00JvafMvjKmjeBEp5u+JIG9FosHz5xhFpu6VcOE08tYLj3pbmOKRzIEdGKTGQ94l0mm0+tQssZLIUCh2VEgFRuZ9zP8yvQe2Fav2pjB8Hwd8vtDtPFEJdC80CZRV4bG5AQPHzSgqRE2ESZUzCOAJfnXIV1phvlPeB5J0KeevvK//S839++sT50LGzsyrrff+vXqrRv/9NwBrj5/9Suf+867n7y9cX7cHi7vdA+2di/tjC/epwvX5+7R3fOzainF7t529dEyvHd0+7//P/0Pn/DlB8wtlW03ajDpV0TLnlvHS9ZjwZxoCTSkjXILXUPRAZ2iB0XSCAnKQsmDNPVAZNqCNPwjJltDoBbB7Wk5eR6YkzwGZtZremeNcRCO583uGWMQ2K+j1O4l0ScyV5+gnK12ChVSccrgAjQi1KANkk3VDY0bggncVPb88ag9Ol+tfnv91e6jj3/4lSfcciaL9vaH7/7uJ59cunLu6e2xP9gP/T1gNxBmq/DodPzrf/hffvDf7fH2eQAijE5ICFElCPeMlSJ4IhAJKKhtw9EA4Qx4/NdfVrNhcNd56Mhquj5W5JlAyBgVNnVKsK45PaWqkEybfdJqa8x5K9topEVm6Ynlk6AAIJvpDU932phiASS3vMONGdzX7Qei0axyFjcfdB1OdjrNEBY20VEa/Cig7JIiVsR0iZIYy2CCo+RMmSpfUiawCinYeQeU1u0CJTyTAyqoV6pJPXhM8I5r9KxSRq49vKqDq8ix05ICwqT2b75++3bf8imPfv3At+cfPzdvb4XFylful7/9WWjDYjmrDhgl/+r1f/Yi5dijBTsfIdqpomKialSPKlKNKg1BVDgU0QtK5iCt+Qy5olyoop8XgZfd6tGL5/7oB3987vSZw9Wy/XBVa2iO5iqh2Npp3n0XFYLw7Ppby27edavl6igwy+a0+7Aab+1NxpvjyeTi6e3pxnQ62S58RVS4euJG1U9/hlf+5eVy4kPsyLNAi6qgiedSXcGudt39pS88FN4777C4O6uLwnWlrjs5DuNy061LRMhx5xbw65oi+rbvx53f8MtmsTw6LlD0nfTr+eZk2gd+78btR5/4FNg7LqFRokrskPX8TCYWElXjM1tfyxn7kERoteGHSrQFdNaiJUdvm1Sm3AtRhZHPjR0kCeO05bfmpkAusS+IIT6fEgBOxYoAsbaBc8qIsZeEVYumlSjqmZCMhHOWAwYImShbbQDD6NdS4TB6TfPrlBf05MgNnL10em1mM/TTOccPgYGBZFyZqJ6/138Pcp0MH1qBnoruoUZP5HNzOtFsSGhFAxFy96t0knpziZ/SGmftnMUpdkZVT1mf0m80ID6xSwY04OFXRf5FqaDK/8F6U2u4SHObDjKeF7P5C55QvhmDUwFMISIqRKRs4lcIVGIcIHSIKqu9TTUzTkr4CcMsxoKiAAtx27e8sz05dzG+887HGl+o/MZbN64fLqtxsTW7d7+bXz5a3Do+flCPi09d+PS8O+qa4kazmFz56oH/wIfxclKOz5SHrnjpv/ziOz/8D/3us7dXZc9+HWvp4Vp2x9ClyEp0odSAG+gCZJPgtRICKDCCoPccY3aiYRZjt7mkdrRpOJu3vQ23mVnFJMMGs6cizINU8iY2S90gb2dxMAKlRIlNsg3Ngrpsf0JkR8slF08whEFsRtAKRyW0UCoJNXSkGKnbKngHsimbMh91989Vjd7/4ObLf/fZ83FPPnrztz8/O9HzTDGA7jadFE/tjFSrueff3Xrz4u6Td1v5aH/pFh/snd3d70p0ymPmoOjAPWurVDHWiAbhgokdwws8KFg7BCAfhJMHOB9j/N5fszo+BjPVUWaydT9EabySn3+1LiEd4qQlTwlvMIoZKIqmAwwJDkotgc1Z8jlI7A+jEIqcbENJVWbS+iO7d6VIQkwZTrTePv1vrk1Pbrew0SwIhLxIgWGq6OFIGTIf06YOs+0COYLJzLwIA6xSKljVQwsuGRW0UKpIK7OdUqkZtQSvrnahDMEHVzvlGErl2gWO1Ua1Pzt++c5td2VSKNBx0zfHYzr/3JNv/epXF8+e/fIL31jN2uWDeRnHXtynmqv7N2+vZ60ryrgI2gcmolAgou8kBhO8cAydCBoRREEMIFcWo7Ioi/XyXF1P9nY2ivHuqb0LT1yhew/ufPxJc+8wTKp7VXH0q5f9+fMH77fxvTfb6akIGZWu9L46dfriqSd2plvVaDQ6tT2aTsuy9vCdshIjeIiPcKJBWnz9Cy8c3N2/devmxpndngMg6IUXJAW7ktHCF1UkVYYvEV03u/EJc8HtmKVe3esCLet+Gtfx9vt3u6W4sEVKodVuIUqu7bpInd9w6kgkzkOj6K9//MGzy889sne6Waw8F65gxBKIUfoYg/MVEIHI7JMjBwJISXsbRCA52Rn0xTHakq40b0qu8rBlh5qN5m3woZaw2dZKk2MSVRva9pTYAyBK2pjUkIETgAoxQjVIVNWxbXxhkQBNIxuvghMm00MVq+UASVKdhGhlXHfYDZd/wHp1K6aN5ZvSL+WqGycAdPqBDEsb4Ju7xFzzUu4Hf7/btX8yr30dIkmGixXDlcNJvqScp80PPqt18mvZvNuE9nZEHfMwtg4xUsbYh1RNCaQ2dpoCygNQffJm03wghQljiSvnaTvoZFEyIZVfRInJDgCpnbCRV27sKTkJpeugGXY+KXQIkEQsIqZMfGUAbOY5IQCC0iG4EY9vXP9FOf7wD//oh5cfe+b9d+9O/JXr+6+eefzcD7/9g9fee3N/sR9KHBeLR05d/Pj27fnBg3oy4TDtx6cPcVb3NvaunT3cP37mO4+ff/7LHzYu9MJdwWtBD12CGmDJsQlowA1LI2iJGqYWiB0QiFtCIAGTpOkuJdTQsE4CqUYr2Ngl4aboAACkR3CY7ocsDx2aKrVlFGRXXpI/uV3FxPPK5ROzKucRYlYBaMrBBvAyFUoOHijJtL9aK40pTiJNeYuX27LY6o/PTRe/+se/d/P3r5zevVgWHzWrOB9J0022JzX0aP+e29y+9NjFR07pI7tPV2ee/HDe/vrWbFP2nc6P6+l6MpJW0IBLqCdznQUD4qAOICgJDw0kD6L53KCmZ3EAgZGzshW+UeNw3rNyFhKjBZqThAggj0jsXmRQHw85G5hkj0zYSyBnbmT56aT8QOeRWnobyK27IvtP5WY6ubQhCeiBwZ9ssK96SGKQErS93yFpU37lRE8hHiAx62Yce7ZgqiAoG1c4t1AMFARW8xv0Ci/mMKqVogaNCAVQi5ZArRiDK2c1GXmOJKEKriSu4yu/vt1MebLltQv9El7Dg7i6sLt15uqVsqi2HtmaTKenz+2tZyE2/Qtf/+p707d/+9JrrNAAIi+rKF0vnc7XqxAb0Y5dYA5lXVyqt6rtrd3p7pm90+PtzQ0RunmzXCwWq5Z3JscXd1/9Lz8Ls8Omjt1sWe3t9W3jvYwfHO7tTMbf/vZ0PB6f3a036hIoSlZEMIlKFyCgpg3A2sNHccolGEFlRNAgrhp/7w//8K9+9KPb+/vVdKwiVLCCgK6P4MJTB2Ymx7IORVUxYlDSvowiEI5dz/AIvpFFgZI6lgCNQShI0S/DLPJaC081S1TR4NmFRXvn3p0Ll0+XXEBIegEB4lTU+zJKRyBiB8RU9CECBTQkeh08WQgVUbX1l1b/BWZieBj30yY6iXcAkECJyHGK/aYkUJh6GMJwqsMpS4Qb1bRH20xTzbI4yxMikTLbXgMFIBK9zVFPBo5IPN50VGwgPPhESaqECaSsGbdD7jfYIsOQxi3N5u/CSZohDPzE34OpTjAtsjGl2i6Gh/M0Tkhd6TdliJpzLpLM2U4Zzmga1uBYF27deJKYDr9dVYXActK6AwMkn1t6m/h6dpIpVoaFBonmw/df1xrJFE3JuNlEQ2zLkouE8bk8wkuXLffiD1+fgW6ayy0FcXJ+zg4JBiKmQYBIhhps366oGRWJ974qy7B76vLnxlfffu/WT3/2f9ncOP3stW+eGp+5897tv/h//c9SllK3GEeJZ8+fmz8yOn+4uqtdN7t17/JoVMvpixcuXKwvuquI4IM7YKUiSGw6DiU6yFJ0CbTKLcO63gW0AxrRKNAW2kF65h7oogTRFlDmKCpOJO1NYDU44CFoxTjotvM1VVQW4lWidV5s1wcEUNCoqi65KWanChWNmd2WqOguOW8IJAr7FI5BED3ZZgjvU+ProQ6oQTXRiGhCNbcbstiQ+bmNdnHrrTtv/PLKlixvvf6gufD0uSdvvn4jzL2Hax70rq3Xi8X1++9OLm9cvBbAVRWaz16+8tLf/8N3/+wzE/h+MpaFoCZZKwqoZ/agklgK1aDqBAGSWZFDvhqukBX7zAPhMHlBmz2qpGLO+wLQCDXrSom21ROAVSxI8EPa6SaqqrZNm4mZQ8gLMFKXmctJTUCMqDKpJqxQJbOeUrhQW1wSFWkfsOoJKdoyvc1rk7gQwsTOeXulAVOyfxaB44eekxQgEhaZCNywwbdNbYYVLMzsCF7hFETqhBhcQApjQbPzWgEj4ppQI1ailaIWjMA1aExu7KUCalABLZU8F05K7aYjfPTh/uzWB5d3NyEzeGkrwfq491t35+35T1+++/57f/6f/nxUjMc8evyRx70WZazr7ckSKxcjiZAwgcuRH23UG8Wp8YYbbRQ7pyanz0y3pjW5zvVFI3o8O5y99dqN27e6cd3Cy6LB/du4/sp4e7pxdXenrB+5fJmZp/W43t4VdJ4Rwb10LOSUVnG1XPTOJWCJ4AmeyIuywimb0W3nPffrdenqvl9MJuWf/ukf/fgv/393796fTDdDF9CLeOKSZR1V4QsIRSpIV9qxkIqLDmAJQFARkHApHiLwhJEjEYwFdZi39zASPy57JwyHGBVgpdsP7gT/WZQkIlyydAIBB68BDiNAFX0IHcwZkEvVwPAEAiTRpLNG1XpTaKrtwJp5nQoopaFWJE07rGCbNBXmaGIWyqQM6YlIAJVo40lLJuxyKhSbM0bApqpKoGADQUQAxORBNBCBcTJGHgzQsombsJqSXh+S3uRkogkAPKlkhzRiDJFh8EZEkjHhDAPqsOAWGHpW8yJU4lwV5yZvYCYn2nDKnEjK/FxeG2ybZQ7WYrJB5ppZSKoqDMPdes2bzB/qzJELDYkyvAH76kOkvI7QpMDWvya5sA5XIPPxSEXD4MmcPlcqnATJLsAg1yy3+L2GOykeQTIkZsc84IOJeDYMIpwDECRB6KYqVg1Erix9WRYO8Y23fvOb1949vXee2BPjU4891s7xk5/8+XR67tLuufWxPP35venFeYPFy6/ceFXoG3/w1ddevz1b3n7hm1cunH7i1Okn64lffLzmmrQo4po0Rm3AKEMXpAX3rI3ElfCasYa2yh31jbooTJ1on1TAIkQRFEQEENWo1p+pBkROUnCyihQn9z4PF9gMsMw3CR4cVYQ0OfU7RlCVCO+BE6YeDV6mw021etDAe2ak4QKsaIymaQBpANdKRGYMCw8qFRXEUY12zN2GLHbK41+9/vJu2Vbtfjj0b713vepGesSTQmtxs7vzelKe2tmOoiTxxu/eubN6vd/6dH3h3NnJZKuSWbPy1GtdUAktRApERACsFCPIfCNBIrYGCshJbTCQtC+CDM2lQFQ0gbAEdmypNEZxeds0EatIkEBgLoiS21ZEfrAo9Q0iQZltR2TCIezY2h+McwAom2EW0pDGCqesgRIC28MpIgSnJ1QQsg0OSVcoicClCc6IOUPnijg1y3TClwCI1NhhGZXi3LtoqtXUErvB6k4BiQolZi/qHBcKA6K9OpVCURIq4pp5xDSGjBSVxlJoBJmwVIIR+1JqhAJdJauxl7Bevv3qq3s1KokhhKLe7MqloO+7FepdaW4FfXDuyiUEPr7X7M/u7N8+4JYnfnL+6pnd8VYdqppGk2K6u7EdFzEu+65tF4uDKO39/YMPr8/aZrHql04VsW9Du31+Z9vVu5Od6cUzriw3RqPxqIYDMUmHFYMEbTcXCKsP2nrxwSY+RqAOobTHg0URKYKZI4IQhciefAgA+RWih+/7db05+uG//t5f/OWPDg8fTOutvhfyLq4iCnXM2kaQoCwiASWTKHoREVaWSBBhD5QllSoeUkIih0oWaBZ6LBONpYAVRq4RsHd35vvLcLy1Oe3WidaPAHjlyFDSDhRK7xzIqXQqvSKoFsm+AgCVQAdlsD1dDGdDFLWnThCZAAijUI55CqMEbw6lIIaarjZtNDTUhZQVnYEtRpZm562/S4NePfGEYuZoE2KFSPCOPUDM5rmakFV7ywoQmVTKenFbCaRktkOU00PCro1bm+GcVDkIBOyZnEt+6llnA1W4NLhL4FJOwVGTywclXQ0AZaaQmDJsyBVR3lGffn3+7envDBMuWHVgQ1OjVeHEzsExW2nPac0YQGztxAkOYTtGlTjB8mZ/QWanQ0gvi8y7TjPXh3pXi+qOnELgoGo01ASSJeMeTm6ZJ5QzALldVgFUE2SX9spIGoobXQ0OzNCY5dBJrMaskYmEVYL3FXsluH7d7e+/91c/+um6C1W5/bs7d7/2ze9euvQYkf+f/6f/91e+9vVbt+7d2z/843/94t3F37/9cqgmZ568cOatdz5+ef1S1y2+8dWvTWTy4P2wfnDn7uzOzbdvhSgvfvNrpx+5sFwEEgprlL2XGKVTWhMtIa3QmtAhtAIRQRBtCWsmidJ5UkIPiCfJeVQQBSQ2CwAQY0zD3lRtwVIvsuzKeLiqOe8SwTuRCFHvve3jsymDZkWSAGBEicmaW6AMdkxCBAfiGNNSL4Zn9kIFUUXkhUVJ2QMFoVQpgVJL3xdxVdLR1LdYHBzeem+T5md3x5t7bkO26479spxW1dvvfnTzYI2li9xdunpxUcy2NqtHn/K/25+/+cbPH/vCH1bUei1r1/feoyD1IMB7VSZ1EBKo8WIdJV6YGzpas4YBC2Fo2S2vQa3yH7SCAgKiKDkHKInJHyJAnr2IhD547x0T4G2gkfB/qKqwp6hpgu7Y4pFZc6S4AUEC4DIUraqJhwFVMf2X0ZTF2TojR0ZdT7SstJaYjfltQA7Y2f2i3AA7c00zFj0SzK0WopMbHiiDJFZ6KEBCUCEyoyuD8h0RETlmj+hUWJRB3tcsXrhkFIwihFJQgWvHG05HwChgLFIzj3Sqq1qXzoVSZITFtpeXf/0+FvcujKddO9d61DeHvlnHGEuU041w/eaN77zwpXNPPXvUV9R5zENzPJcj3qByQyfdvJvfna3n0s0XH965udpfhEUnnRB3sW/KEr6icjKu4UPbHs3aa5/57NWrV7wHVLpAUA1dP1+uzQxEkVjCBDiIoiuYlVsGbHclE+rSaWauMYltaATIkxcSoejglHuyJagRErvptP5XP3zxRz/68fHxQV1vhl49vKygLOxZQCiFWKklUUUAhAUET1wXqJlLqGNXw7kg7Dvu5sd3ddTWjlCCEYQpgBHEoVg1zeHiePvMVDQwFxQUAWbVjABbMYC1t9DNzDFIjEtFKLxnOJWeqAYHoFcJqeeFS1ALi2fWKEoapQXI0hxDoVHAEGI2tBiKPiPMGIpfAaVtHwxbI0uZ5zU8mKIq0hkv1LMLGvp18ClFDJ2fkYwp+84aOdLcIQYHduQOMUNcw1zzoeybBjwiwprg16HnU8quEbl8xvCq5iaSnKJsTXpCvqKaOQ2IEmHGUqYVAimR51+eFUwq1qgrFBJFk6mOxaahHUraWyQyDghCiqii5DgPA4mNyiMiAl96sXmijbPIMIZE0D2Jh7mT1kwpITJvLx1alYfmX5T6Y0qzrkQ/QQqixrpSdea5yQSQV5gsweoAlWgBS2NYC1EZnCuJamobXTy4d9y8ulrfaleLTmVn9+q1z1z9zW/fYl4/eHD3zTfe296ePPP0pcW8uXH9w9+98sblxy8+eu7+u++stsen+/t367P61FOf279+0J0K9w+OQqGnH909u3Hmw7dv/sX//Uf//s/+D1vjvXYNr9C1ciRpRTtgBawFPWsPCb3zgbBWCYq1cgSiaAR6xDVM0GtEdLVkrCAiZwPDdJWS/5fdhnzt0sw7VStm6kSekzOOISL0EJkOeTTgPUNUBIJA8DYHMljFrm+eFtiOH28dqBYiDDhBSSjBIx5hNabjGutN7o4+/hCLGxvl7JHpTntfZ3ePmzvS7xcHN+87X2z4Ka3Dnfdmy0OpL29OLmzEunnu6V2educu7fnQ1Vg7WVE91grkSQtlz1LA9jhJQB/Meh9MykxpIR6g6MSIOURGBU80BQiBhiVj9mnsnKhktgbSaQUSrB9FRJJlrCVRK+8ELoFEApEItQVHNvG1QjwZsWd6pA1fyVZ4Gdxt/Sdk+BmrvOwoafrZdGchIcQYmNkV3jsnMbmx6KDEI44q7HLBLvTwGnIhmOWRbS8TsDoCFQQmYccFhEGOqLSWl50X8lDP3sM7rTiWwY8RavIFaMw6VqnXqEgnnsZ+7JcVrTbRlbr02ntdbY/0k5u33v71W9tj79tl5cEB3uvWI5Uj3j6zzaP6+WefL7cuHscjAc3AHbtWmnXo9g+Wq/11XETMKTZwvfrAqAgde/j1ousC5sdHXXvUd4vj+SeM1Te+9Y3Hr5wLXdM2yiBh+IR1DFMy09Alaq5GUQQYEYJtrh9FlJLLouEE4tmJCEMURhw3tY9TDcwO0ncdTp/Z/qPvvfijH/2471fOlyKdWg3YMbM3Fo+kGStQAqXCE2rQCDQCWLlm8SzQlVtK6N10RDKG8wIGmbYIti/zXnfw6Oi8qEIiCkIAR4aotAIGBSIQdYVGaBRRp1QQQcyP5YQRTIBLQ1kSBpktXg4ldsaTBCKBvqb+FLOXTxQjUTMMT5ods00kUF4IQpnxCWYGkUhksimOJrdqCDN8GptqVpEiY9kJ0UngKvK8zOa/Ce3Nxo046VYzDcS0NsMQVhWkiDm9/d6XmgFUmmzntfYJhyZG1n+KZceItCc0Y0knI9V0lu34WfNv8yWDpiQBtZSIN9YWqK2lG9JsXs7INGyEJ5hWFEwO3i4aTjYhpvErJUGm5hKAhtueq5OHY78mUW76CeP0Ui7lzddHVNWYoqn3symvhyUgnJDFVYS9ioo5aQO+LF05Eul5djA/OLjTdjOuZuI+ZHY7W6c+/Znqn/7helUWLzz/hf/4V3996fKjp/cuPXrp4utvvDk+9eHnvuTeee3G1t7eucnZR8912tYvvvCN8WR68OHB1s6uzHC6PtWFVXej29iaXNo8/86d9uW/fvlf/fBPmvu9JydBIIQVOELXrGtrjYLwWkKrCMw9ECltXouwREJRoRpt1bZwIssYqcBSBgEJvgfzwJJNz2CalQiQDktaf6xKljrsUrFmQD//bBayKqBRBEwwerC3IYIqiyop9xDv1aQmKM0PGCihhdboN9CNZb5bN69/8KrO727vXf35//eNjX7XNWW4p1OqLo6e/ODjD8qNVojHk/H8VrvmrpPRwUcz/uDGt/7bP9u6+tSH60XN44oClVBbbmj2Dw5mNcycNpFCO1UVCPFD5H5k/V4iGJ88IPmp5oGelnA20mSFbd9gLJF0agTI+8rTD6bC1zE/zO5XSRa25DjLkpH5XWp8RUmRLF11qxK8AwExmmmltdOJu6AAkhGZLfUiiQIGu1RgUVYdMjOnfd8JH8mkFlYoS8bmgyoicbLYJ2UWG/064gLwDE/wUWuWkgoXS5Wy50q5jFr5YgwZgUbixhQ2CSNx4zD27QSzEcLUNSO0XhYjj/5odv/dl79ymS6cqSbjyWR7WhWoqkkgbVfr2WJ+v2vuLfzNjz9cdDzvaKH1Els9b/ugRVUV2xtVrS21bdscN00369ujdXvYyryXJgpWzF3t0SwOz53f++MffPvs+d3ForFqSQVAgm6Y7CqRqoRgBAAmVk7mrqoaVA1JcoDRh8iluUAgCQo26Cg9DwKyHRPpQMb1url8+dy3vvX1n/zk75yfIOFMzOyi9AQHYcQKhdLYjgyoBEaEMUuhWgoqoHRciJbS+nWxOZHMsycIJMDM7Asc6gw7xCPWAATRXtEDQZVVTS8Q2LFD60nG3jl1TNSJBGhkm8gpoMocB7BUAWN9igopkzlFmzLuZHZp6lARJSBAyfhDcXAzzdeDQI5dcgOxgRpITfJOJooylyRv8xDHzltmHZDO5AqExN7SlOBSrmLyUSU1CPkxz3zmE0DYsmGe3OZMmU88E5llXeq5B9sPBpTNK8cYNJztK+xMsq2ksw3FDE1ddGrfEys2vYvU7WCYPD+UWXN5zemDpYvlTgQRQ/NqA2mLJWZxxQmptiuQMNET4ylONcl/9UXZLCJTNzXlcGR/ysG5K39FyYNy+r3XU4QQnVchIfWUvJwADjEoE4pCnPd9F48XR/du3miWbUHjjemH21vVquuWq42qjL3oE0+P193may+9fbB/78rlJ4+Xh/Py/my2eP7ZZ2fNwfmz56ZVvX+3aY4OH3/iU11bbpfn5ocHcd4vF4s1gqt5d3sqx4vVbO64Po3drz75lcUn6lesMUAdxKyemRDJDGbRC9ZMa1vyRWRLf0Pa+cURpiPlVLGndVZRkpORUJaipchPw3OXmto8RslXyrzEAVvdE9NZ03RdgfyEJ4yImTyYVVgkSfPN1Y0Yqk4BjRoRAIYHlZw4saUW6ArtCm1K6cvQzvdv/Zvvfu/5iy/8X1//v8mhSodu2dUbuxu8I8c3p1vbVHDXtk9fuXi3u1uFUclH25PxZx6/+lG7qHVccajQO+21KOGVSxYWMATCadEfm3WkvVUSu5gARQBEnigDNVCX91IDic88KHwobeMG2KKPIUBIGp90mAlQUWRyZ1pGBIkGDlsxeSLyNYMa1rSrRFXNeZeTUj+N3SlpI1QhJgNiT6DkjM9s635hZC5rJYhiFBHx3lvTYQklvWkFOFO58klJaIrttiKY8zuYoawBa6fMkCjel4QiRsdcqpZMZXReK+KauBAZq9TEJctY/ZiwTbrt3CZxFSfhsAoPNmW2wbHWeY3jMvYcuhGaP/nS5Q3fop3Nl4eHB9eP1qtmvl6s1kDstF66nVBsPzI6pZu7Wp/pi5057bz6zt1bRwftvdAdhG7WYA5aCLXQZaG9+oL9uPBcAWUMi+Xx4Wefu/bii18rSpkvGsCDRaHk0v1IURDWDFkA66FMkgzDTNWnogHRYjKYRKKEwOQL70QIIjEGWztB7IgEysTMXEiw5Q+yaptnrj11NH/wLy+9Mq7HoqIaRJi5ADw5ryW0BCqgFioZNWhMqKElUc1Sgscgz6hBnkSIEOHMeiGJg1SgNc3dUZhG1KZetJ3ZQAcqiTpCK+hEzQJgzdDSagw+aYLZYrR5LikCQEA0dUquU09iMBtTix1EmITIM9nqu8w8ENIE8DNUQrDHmPPOnpiaKJEYJXkAw4RBkWDIZkwyJKLEPEkBLct1LVKJpP6M06LvZEpHSbRnyCullKUPWWLZ58hdb3KyIGsWh9z0EI+DWbOR8YnwVjPMbf8vR82Tf9QUeim345Q7Jvs5OqmKATASQ1hzq5pC0eDH+RDD8gQkT2QWSSqX1HINadjilCTay6AzzmA00dCU549t10uQc3NuyRImT6nMd+RsdUwOXgqwCpg4RnHOW8NXlL4ec+jDwf3DB7P9amO+OO7u3emOj+5u75wpxnsrubls70zGWw6bsRvfvekvXrx0Zu/4Fz/96PbdxcWLT3AhZaGz4wPCk7OD7vS53TPnyrZFDCPy8fBoUVB9evKIr/0iNtF17WwlnSwP2gtPnHv+29fqcjI/DAXAQjGCAQQFByCItEAQ7RSdaFAEhZD9XxUCHGnOlbZDzsBSZcAVHkDMdClDOCgxDImNTSUJUB2Mm+yOpBex2tPICUMpkyFOe2DyqjzJnZ5TwGy1lAtoeoaZXSbV2SCfyBEYTOoRCq814vzB7enYP//553/5F/98cPe+zMI4bJdU3f3wzkfHN0YjJ8cdnIs+hNWqHtXz2QxTufrY0yNXE8gzSILEFhxABQgQW+sEx44cIZKNYgBiZoVLxWE+c8yp4zd6oDlp52X1pJqs+ShxEFI3ykzGNMx1v2U/BInEjLyDwQSGnP2fBylBsmNTIPkBpnPOLiFpqhqDFYtZcDEU3unvVj0xOwMy0tIQGwMbRdK+MQ2QY9QozrNZcsgw9xlObipclQsvZiCkibpFxOzZ5HmkYDEIKdoR7AW+FCpVSsLYuZHzOz5M4TfRjSSWfd88CG0znXA5wZYGH5ezT2669b0qNGiOEGbTcnXY3teudVHJU1HUnv25csyj8WGzWHXtevGxd+srFz8Vy1NH0S8I7Trcfe+Vg1Y8dqnkctMTnCKCERAq8qENyo68b48ble573//e555/vG3n61XH5NkxNABREVWjJd1kEUgwMU1q7DSokjn0JxYBKAYRCcyemWwLRohC7JRUBBQ7JkdQEVIB+SKBnhwVLoaO4L/4pefvHxzcuPFhXY8kgLgSOOYCJVAoFdBSUIJGwJhpBIzAlecxoRbUUE/qwTWL9KmhBKAeqrYTgbxrq3Vfh2pSxFa1E+qhK4LTZAvKSDaUhaooWiK4KETsDBAzAyzVmKaT6pVC/rOAnCk2c6FplTps05cqiGJSJ5KYZCk58ikr2aocESFjWoGYweYaZZh1CPYe2DFLGj2q2kYlMbUP2xICwIT5ompWVgoGSfJ4sB5lSG8GNBO5dCowJMeBEJVcrZAuKpGKWWH+ngoowasmpx2yFmx9iR1zEhG7Oql+59Tv53o9oZB4KPFajWAkkSywUsWwpiulW+8sB2PYy5S+L32axIX+31dJeOh1ACJWztbV9p8NJz5xyDvhKacfZfNKGfSLwz+yeTNKCIGy0smSr91qZiaNEPIF+Rrzo/kH1/fvHxw2TX/p8unF0Wj/TmjX8tjVyx988J7I6Xo8qqpnZofTyRRNdyPGmTSx9Jt/+n98Nq5PFe482IdYgn3XyVY97XoRRV2Fvj+K6xJQosIXZVXXY7dVlVrUJdy4KgpHJCrNTDiycCB4pxJhfIeoCEAAAlEHBEUgCkRCTmByBARAk5NcbnqQkZIokQyVVxGVbBHKfAJBJ9MkszHXGGFRxzh7lJK2II92c0tHGSMhZzNlEgk2slQSz17VKTuyFcICYmVick49lKEu7aqBI0/qWRlUOw7rQLG7d/vgFz/7uQO/8MI3XvvZa91qzWDv/OZkevnKlVt3b33q6jOz1fLimSf25frdcFfWHjE46ZmloOAIBWsngTKDwvAf8iRdTCmTHMCkrMT+oe1D5jsLCCcpkUaJEHDeyZCmI1ncY3WkRHWOlRCDqGRIIHsU2DhABmYUDViUiogNdTUdBBKVQW2Y3qeqKrxLDvNZNjUkVJy426ajyxbgLTwSnORlkYkkqoZfiUgy7jAhHqVhQjqSxgmT1IxHR0XeuAomtjqG2Ad1IM9FxVw5N67GFGuS0jwb2n6+4NVK9peH4XgVDht/LOc3p2c2uvvd4fwDXt08JYuqPZzgkEe8U0lBfbnWcu1H5YVmFVuJTVcsmtXeKdz+8DcHiwM3diuQTi60W9sP+vGRjtvy1L22PDv1xbj6ZIUYlMDSBVQKYQouLKOvPFQeHB6c2dz9/vf++JHLp9rmSMDMXmMnEEgkUnIgZpGgRvwhU6lZBLUbElPxZB61iWpqIXrQGXC6G0TOaRqtIRqnPYY1vAcY4kHRe+7Dynv/3e9+48///GD2oKlHu7FnplKTX42iRtqZPQJtACPGGKgJI+KxC2XgirRk7zqf2e7QADH6PqLzRcetdOtJNxpXoYnoQB2bO42shDpKH8WQSyUIqK+IzC4jqqojUoQYWcxKb+AMmVaGJPVgBCiiaAbblfKoRRJRMfVVySKJIhJbh7L7RmRIbgMszgt7ZkBEYowi6kvv2MUQfRqaGulnEKFm7U7e7Sa5nLQ8nOqTQaXwsEkWIQHogyhiyMikxGwTBOSmmWARD6SCvIbPhD2sJy486ZMkDwrTLqsmiX2GnYemM1V+NFxkG7XaayQWLCVsxrhMdsHsLanKibdWeueZZS25bWbiYdyk2XWTORkHpb5KcyeP9JHTH06cBzSQUd5zpje4XVSh3jlRxMyLEWHnOKoX6Rlcec+1Ozw8XB0uBMt7+/PCV/UG+2LyYH67qrb29nbLavdgtj/dPHv39ofnzpzvmomf3G26gvXiuNqebJeb050YSHzdNI1Q1yxLz+V4KhJ97c6Q5z7oRlnXp8aK2jM7XwUpCN6r91RHlr7vYy9aIAbRDlQF5yDqidaAKnqVSBRBUaUHRccBHAlKJIJeVW2cPbDP1JpgtVslnIH/E65aMmDRdK/xe70PmWjPLibDjAZTLCZkiyxL0eknsz8cMTuyHd8qIYYMTTgRUmFSSV5YeSQJJrAVWAG65tjVdWjbed/OPr758WI2n/KZj9/92AfP4kiVHI4P25fv/gq+2L978Ogz5yeYLo76lhZ722dKD40tOBC0LNJuF2ZlR0KqRoAOtgnTKVy0j2BtYnK6cICHRjM+g/mZsIHVZsAcSch5ziBwEmSJwPq/5PKiPEyekDw3ALV8ljA80cgpQ6ZraBeWszTYxmqDUYa1x0ogHQik6SykMyepU7dMLHnjiKqYfbp3zorQoMpmzMJOCDHtG1YmUhooJlY8p0CZaNcICiYUzJ7Z+bIWePZl0FJ6lpWu53Pq5ySNhia2sxgOe16sfHtUd8eT2G7EtorNXlF+/NHeQXt2wuc3uh3fPL7rr+xevrj3eHN4+zcvvbI5eeKT266ZdfJgDnJRArXH5x7ZuX3zg2Z+f1x7RsG+fexC/fQzpw666pi2H8TxDNMXPv/5V949+E8v/barNzVQOXJh3bEgNFxuFH3brprFtc8+/Z2v/kFdu8Vi7tmLMDu2LWfsECSqMrS30CMcQZzsIxKAxNCBJ2oXWDxRDFEg3pXm9GkTOpHAoGHQqUrOFUTkPEvsiDwYTF40AJBIm1vTH/zgT/7yL/9KApyvxUUtlGqgUtTgmmRMGJGOiDagE6AWGamOSGt4LxuYe2md2B4uhXZWTYkgkO9L7iSgbrG7qaVgLbJSE2+TI7QgIvIkIhCQkjJkDuaKORgkH9KCWi8iZjaYHWw4VR7kbf+cZElqXkBETJJHsoL8PCNbyuctJebVYwknzcIUUI3M5NmpROcShBu6zspYn/jGIPu9nFxCxIZwRKLWCapmnkr+TrLT6hNVSDJQm7OgfSsNJjZysvsWBHLO+g/NWjylNGdN504Ty9XOa5pppPSXgeyHMi+xzZ0kWd0xkKB/mCjQkGYxwCFluhS5rE8dsjy7h3YfmnaMkTksGaYmiEgU9dmeKYoZLPPDM+CMtuVKik4ysU2CIcgGtellNWfoEINNvErvQ5Cu66AMx/XII+q9g/22WXx8a78oR6OJrluljcW5R+PBnXo8unD9/Xu7O+7g3mJ/f/+5L1yrxzvvvPfa1mb3xOlrjPF0e8P7uA7tqlk2jSJ2dVX6cjLdnIxq71zt3YQY5EZMDiDpnRAgvo/iKPQxqusELfUEV8RCnDChc6UyuSCtJw/tiKAaxEjaZHMbI2xHQUBaFqvWmikwuBwmrNmIOExMpJJLweHhEsm1UyribCCipgwWHQJ3Iruz0Tk1P0upF7Y7IgqyDAWAA3LTZs0hYBo6Mk2OcgJnEnBBypACkNAXTo6aWXM8v/HeHUaNgE/273BHXuqw7ufzvgA+/eyj5y+dv3Dhal9JdcX729X+Ox/snT7ftjou+YH0Ia7VB5FO4KGR4JVU2To545DZpRHn0igv42aJ/5zESiJpiR8xO7JVCwBCMLOCE+KkZdlUVCgjzbokdceWfU2pZdiBxakYjTWVoH62klj05GSqIg+bzBEjc99Sl6sp0UMN8LCcoDCv51Spsxm6aRYZs+GDanvBUqsuUUzV+zALgBPCRL708F6ZRZwKhU5CJzg41kWHwwXNW1mEdtGumqZH17jQVKEZodlw7djz3thvj8uzVX1hvHtx85ldvjQOlyY4u4mzG+u9+njazbZ5huP5j3/y8kev3yyubPNql2fBLyT2UUTO7Yzkk3vzu/slE7yThdKYFx8vDq7/bi6TOW93frfHmeNi/mB/MXbHVBYrHYUuYswhaDmR1aohh+/+4YsvPP35Zh6btiMuBB05l4ytRYIEV7qogTVt8tFhzAKwLTyhlFSsyUpXStTEaKpB1Nk5EjEjC2NsOSISlShRoUwuI97GDGCgBPFqFc6fv/DiH3z/r//TP5a1wCvXLIVQIVQzRkQ1dES0oZgAE8Q60gS+wBRhhNW0WIy073tijUpRKToS53yIshZeCYfop2F/inJVoR+P+xpYklakCzHzXcdOg1rfDAWPCKsiCJjhnRdpYzQ+dI68yWlHyHidlgXIEWkyi8hRfCAbMbmEqGW0Bkgep2JCcpB1jzmhiLFHQ+ydpbsUhZIu0rM3rjkZa9wOQPKUSXNeSvFOLTtBxGBkHSQKaYdAsmlOCIb5e9hpsWOS9hMQRMSlEiMRtILJOohNHU3sbCQtolFELc+lGIrB1iMpTIwhBgUDSQOZT3ICM20hgSrEMQ/UPiN/ugS1mU2lgeFpR5vEmIJ4HnI7UFbLCRF7ytZ6YqIxjThZ4UQMsv3nmsQfRJlyMoShXCgN0dApg1iJRdaTjXq9Wtx459aFS2fH4602AFi8+/Yth/kS3f4t99mrj77/0c353b6ux93azw+qu7c+GdeQ/niy/cjexdNbB/V7N6+HdX/1sWfOnz/nCxbplNsYylExquvxub0N9jXBKwHwIRolkqMQYi/S2dTPOLhKpEKk4sh7VApFWCkgjtmcWiCsAupEA6COiTlCeyJ4F0VEk1mrqAROSc9W2OaqLT0Uakk1GoAzcJbTHBhKRLbr1pR/QPbo1xAjiNhxiMIAKbzjYE+OY6gOoKchGzQ010xgFgEldXgmwKlGFSBCg0RXcDGw0NkxnHhWB4x9TzHcP9ifH86O919z6qXrPDw6tItGFvTktXNf/FK94XebxXR+vAxLvbO63/mj82cubY0n7bzViZi6mCQA4stCWZ1nIgkQ44mBEIKYAFfimokB57wdls4I4soBAvKIImpWaJrgYAJF41T1af9t+s8+4Qaas2MivmquUWDGsKwitmOGPPchELF37Ngb9VPJriDspKuqRGUCwyGN5iURKWBeoJLMR83owI4AsYgICymcY7PjMVRaoSwqnjQogqQsHoTIPJDgPcFBvVMiCGKAdk03X8qiDbOFzhsczXV2LMu2XcdFFxchNuJXKFtUa79B03F1aq/aGW8+Mj1zfnv6yNb0wtbGpWlxtubznjd418+2MJusD6puLu1x18z8+Jjb1Y//17+/9d7tqqjb/cPJZGe+pHKl3azf2R759frmR9e9Y/EsEtgXruTDj+/em91alWVbn34QNx+E6aLYOnanx8Vup9bbEaCld8tmtjWZfu+PfnB590Jz0IkoAwQH9oQO6hUdzNUkqgcJSJJdSYYlTlABsZKHiVMiMEYOpaJG04yfmVlOqqRoha25BCHAOLqivYDtgVUwkV824eqTT32jwT/+8z+MxmMpFBVpoVQCI9ExUIublGHcu0lBtdQ+jnU1ofUI64m2I1kKme4mEEVAC3WK2KCoyPVOpnK0rVMKYR07Kccdld5x0n8qC6L2aeSlQckzPHMoobbMgwEPBGVHDCPtK7Ejr9qriq26IRVHFNNCPjXUHbkOtX0MFtFtN0gyj4EaoxFK6ljEnD3TxFcgpuaJIoAws/cFFKLqJcSEB5tSkElVYojOu8x/Mk8Mr1Ad/NNzk6ER5Nh5a63JxstJ50904phof9WUcRxlJpT9tygxigDeOxG1RaIZIbHWOQao957M9mhoMZP60JKnfffg0Ji1E9mC0Q62sS6JnUaxXBijsLUXKiB2jhUUY/ZREtAJipzSApECSbee3OSzsa01CZIVYGRLw5AcuTRXUnmhOlmRoTk4KTTbhrRl4W999MEvf/4vjuiVV3/1re98oZNDkeMujI4Xo6c/NV0eLG7NDk+f3a3K+uO7x7t7a3Tl9vaFnfOjPdq+c/OD/V/fHlejRy9fOPfIbln50An7uiqnjjfgbWeWC0rSqiBaGV2WLNEkC0Xe5c5JIkLexMjesyKIivdOYrQRu9qoNg2cRDWoBHt0o/TQyAzvWNJqqTyVSLN5SZ7bw2XmRDwb2GsGpaaCFMIJBk6yUdG0Sj6KWs0T1eK+sXZ10P9K7n0zDY6JIVFsn7TzzOSgyHumo6pXjarObhVrEfvIgSAkXdSOuceq9GvyDU2aGG7eXUg99ZNjmgXRmiWuFmHnwvi7f3aqkPjqbw4//OhAfbl9ZuPilQt+pzhY3Tv7+OnNM9O7aBqpGimi31j0vncFelBE6ITVsU17BMyAJwKrigqEI3O+7BLYqarEKMQGqPBJaZPdYbx35joiUULonWNXeMlyAwLSHCyPlszmwk5XlsjB6J82Awqqlv4c04m6wRAmWMGjQDB6HTS3ERDRCFVVBptNQXTOQaWTwMyRPalGiQwBUyCoBCccydg/hNIVIAYCc+fBQaSTdtZ2y7kuF7xY0OGhzB+EZd+v2r5dtetVq5iD51yuyAe34evNanK2nuztTM9Mt89t75wbn90sT42KrbLYq2gDMkIca9iWdqxOuWqPRedRFgUvN7mZcLc37jbQ/uTHf3fjtzc2eKSNHDUPqt3FZF3M51xqtenLO9ff5za4wsOR92xSXSmIR74YcVm7ELjvII56B79uHI0dVYFQ1W52/8GVxy//8Gs/HIfRYn/FYHhGQBTJ0njJF5oBjScmQmLsXAtKBMnkHYvikidhJkhhTptVdeCRZ46tAUygBEIx0lzD/lKYX7mqFyGG7zr53Bc+2/rmn3/zUlXUXJF6aAUthMeMMcko8tS7cTdGO6Fuk5oNtBVWm7KsdUVQZS4keI1RI6ABNEYZ4FtH5/yslrpgWlNYaN+UGwtUISqJOarYqBoalJUlKHpAQMEDYgp2VVH0zAxRib1oSCu/E7gZRETT3kCDkNTQHcsCdugI7IgVsDSR8GqDYxhsjQoAGO/QYg1D1RFnRn7KYd4YhjZ2VRLOcS6GYPkjpbWTFo5OOsskFUlTtMRq1wRnWTZMrrxiOPCA+ik7NxAd2Xl2yYcuz4VT9rUEAFECW7dkwLSRkpI/RqqAxAZ+ROAh7VnUIHLkbTodQgQACeydd2xEFFUD0JIxXoYlJaEZSAE7vbOTSX86ATayyIh2ZmaniXZilg7A8lCPUsq6mk0mKNUorFDEyFWNw8PVeLL9w3/zub/561+8+ruXXnjhyuK4OnM6PpjFxezS2b39o7Xsf7yA3vVFuTF5/KhtmubuR795oxB/9vTe17/2pe3NKcGL+qqaoGawjyIClq6nNJkzy7fkGr5eyXhj2vfB5twhRu8dFCrBOUlgLvsokdmJ9oAqIYokhgyZmWq07dP2nAO9RWFRsbVPMIPWdEmTlHOYu6ggmmSPTAg61IHpQtFwJ2x2z5QZP/BkGLc9PTw8/ym7K7I4x9vLZPdjEJkjeyRDg0ihbGWjc46I8kpZNqEZC0GBHtrqeuxbVzT9uHXtJ4vGrcefvlw20oYDXS7Xj35u8gdf33vnnw5e/vsVirHfBtUtJiVve94pLjz6zEfzD68fvD15/IWmHTW0MeuLwBsrrWglLjiOHLqoHSiChZlEEYlEoexBQiJij5ARNAElVscuarBiMB1bVhqKHxvS5BbXDCaNyUUJbIdgqGWHeiXPZVSDuV3YWE0kiii7ovAW2jTD3XaiTYggyogB5pjhXMob7MzxxoGdQVxEzF5EKHapfCDbSURUOHiCQwwUe5G265pVt5jr0ayYLXC86Jp5WBzHdr1et01oFzG04JalhZdq7E6fHU13xpPdi9s7m9u7p6anq8lONd7x5YRQq5ZRvWyIVKylNutenPiCGaab1cih5q7WMHF9Hde1LHZHzSg2//lHf/vWy29OZbNbEDfUL1dHxx/tTh5vG39qo7z/3vVucVSPaulEnESnkJ49oSZeIfgeesgQhuO+JFmUrqxi26HWqpzfP3rms0/94Gt/zHNqPmld4cFmYcCOvWQtGeCQ5gKmEODM08lqFkBhg8V0K9UCWIpESRNGaXAYQQD5PIXTHKocACU1Q0+FQHzSEJOHcpJIRadOv/YHX12X3a/ffnVcjWMRUSlvsowVI/AEvu620Yy52aLVGO0EixGaDaw2aMVkpPTITlTEGA1rVCsugrpt3J9SsSDXSOep9Yie69nmWLxTQnSCCOqIIgHgQOgB81UIXjUyIpQT0RienVnDBVJh9qqByasTpCc41RnMLBKMi5PNeWyGbgpeW3iTZqAmDaAhrtnEFGTyXc5ejCmXgLyNXcFpF70k/+jEYXBpdUMKW8P6IQyjHQxch4GjlPwATLRX+rRQOlNYCdDhtwBGXUyCfQU8ucTCTE0hHMh5Lyk0WOBIFZiKptJt+H7lh7ulvGEuAd32/QoJXXAqXJQAMVOQwHoCc5JddAy0zP/qS5OzykMjLQvzxqMzO8jEg0hDAitHNI2eNX36rIFOODrScg2ICmtoF8W1a1fef/fNmzfuMOJ4vA3lTrqSdi+dk/l8JkyItHW6mp45d+/g1j/98ufarU9vn3n+6c+dOrPpy00ADp7YMRddNGm4iAqReK6GOygSc7UhqhRDJzG262Y8mhSe+75l9iLaxb4oyigSxTG5GHvOrDOG8dBsGCggMCUjQomdSEj6URUmRyxZmWaPjE0ohtLOfFg4918xj2w1Dwg5KwCS7IhtHzNURYNtYrCpv1k3WMYhRJP3CiiR+NLZNBEPs8t2cYEsdOXHnWCuNxjGS9pJ7JQjaxDtIKhXsrlTLGetf/Zrf/Lyj/7HWMv04vjD/Qff/dMvPXpp/6f/6/4Hr3WnHpv2unZVGQgfz2/Lg/i173x9Uc6e2T79Dy/96vndz64nu4uw2WjVaK0tuIWsRBrx0RuII0GgnSIt2JAoKsHKdgDGMGfHBIrSD4CCmg1YftRCjI7IRrXe+yj5PKbmNe3aSrcz/Xgey9uUi5STY0OqhWwOHaMJlYmUKI27LBok5ZjzHkCIokGcOVeJgaUkIlGEvSeTNxBKL52qkkNEt+5Cs9JlE4+XYXaP5w3ms7CYx9Wib9u279cSV0wr6IpZqppGZTU+VW9vTSaT87u748nu1vbOxsZWXY+5KAUE9l2oRHzbsSw6FSgClzXqxKvjkrhi8SJOuSJMaOoXvm+8a5wcV1hu+K7qm//y1z9+/5X3tnkq84jWaQPueHF0WG9Or2w/ce/m9eb+QT2qQ4B4KCs5CCmtiApFBfao6rbkdYmu4s7F1tPKY+ype7BeP//F51788rfbe7HrurIqZa1mFE8OJkQ0R+vEo7UNuOlxz+4ClpshhqYmbioy+gykSWc6WEQpDwAarFtIVBn7XitJ2ZlJRpqhCikY7ElLVe9GPjoRlW//0bdWVfvGB29Mp5OuClQqRuBN8qN+G82WzjbRTdBsotnAYoxmimaMlgFmc92KQGCQMHWo1iiOGuDg7TPn61rcHNEjeBGPCTmZT6adQFk5sAbVoAjgklHnkB4Z4kCeKBgtPxrjiZ2IqJKIQIU9k03LSRLok3CjzDdRNd8oZlsarZScTKw8JUuXw1lDlsVIIiravDlP2gheJKZO13j91jMSPDvTGDgQgbLYDjpIQTA0JIYEI1uvIxFfTG4mwo45u/VY8WX43kPcaSA3u+aTbJMpo3SYVS+JOYG4RGI2dX6GMZPtOlkITfUJ6EQCgYRci1045zjGSNR7X4QYQoyFg4gyO4PBJbPdOPFwTkDobDlpawBw0iFAjZttowCQaaYNVM5Ef9XBhcuC3OC3ggzwpB6Za0BEyu9//3v/8st/WRyOLl+51IXNypeH9xdlgXJPZwdL6dz+fP7qa796ZPuRZz/1zKXLj5Z1HWIIkUUcs/YhCLTwAhXv8pJd1UgBGQ+HOfU59s71XQihY+bSuxDaEAMz+7RtKcZkDyIx9sNoiNiUmqIxGCzpnFHug+EwzJT59GpAKqUCMRcx+R6dPAs56aYS3W4DnXxPqi1hdIRoBE8HTsWnWrGVcH7VPMcAOe9SM6fJr99UeQxiZlKOJuZhDw2qhb0aUQQKsEgQCtAISJo2UQD33BYIZX377nx1qBifavCgHM+vPl89+ZXwl//j7fm8Pvv0ZHHQ+qpaNHGyE//4T69Ozzm3F2bdvNyVbZz+5W/efOa7X1jMfOc2Wx058dJHrIEV9cvOxcKsso2DLBJEelEhFWKOMVgC9h6AxhhFI5tuanAvQboynNEp5NnK4I1j3FgbuQ7erqllSBOoLEww5iTI3E7Ye/u3NIxH6rlzqE/QidVDDhRFYwxwXJZFIBCRFeoEhLbvmmbdtrpY6HxBs3mcz7v5LDTLdbNcti2AY4RjDT1IfIGyrrem1WS6MZ2enW5v7m5vTKfluB5PJmVVs0MQAD4GXnfargP3BPM1cYWqahA29TEKhgOTlEbGUXj4guGSLp2jjn2sdT1Ct1EKt+3f/PjH995+b7ecrO8HXbB2kRpFX/q1zI/vyv0w2993TCGqlMSelexJg45Ea3DvOLJGHblurF0odBSxIiolNvMHL3z5D178wjeb/T5Cy7rUFnDEfqiKTgyMbDCv0MQwN2FFAtgkA5ZJBTxwSK0ZSPTok7Nn8ShH9pynH+5ENNEkHREDSRiQlPZwNmlVh8DynT/89oO/Obw3PxhN6r7sUBHGmHAzltmmX4xjO9FmkxYTaiau3cRiQ1oRgISjspIYW4F0POqEyoL13vu/+cynLm2jYtvSYDgw+V6LfrJBII6M3mpUYA1UQAQLIyi6EgDQE3sAkIjEj2BRhvYOLMmNPgw05oe/jP2c9DoihtaYNQlwouths99Qa6gSzQKicGnCmNidpKpkGILJtk2ag9w+A9AoEgWOnXNsg3slm/4qKVNid6QhQbpPuTsc/HeQYiVOtEpWT6sOit7sopHeXCrZk29kHkqkqGlQSapKLE9mIXB6KA1Xzb/MXs+o45mYY1gAUZCY57jJWEDTIlo8NDUchs46fJbBhcAQMitrBOrTU46Ex8JWEKYX06EMyZcrbWJD1qRplriABaET1OONZ5+/9uqrv713sJyv4uLouNMVa3l4eK8sq73pqeevfnr6/Benu9MQZL3i1SqQ46hKrhdbLwDtYzC8HaIE55ijvd/0W611VWIUhQfEKEghxK5t61HddYu2XZdFVVYVswudiIhzHEIQYiKw8wpl4yxIANjGe0TEPjPbJRBYJQKabyuQTLx/7ylPQ36Jkqn3ZoxmOcFYcqmzgiI3wxJErMBSMU8Jc/4WBYhYyXlvJRWG+QGd/FIbQ9rBcezAJsAQ88oNMQK97fvSICyMCO2SAZ6spas2j0M3Gp++M1t80kip+MxX9s7vHv35n790v+VLV8+O4qiT+0eL42vfnHz+G6euXz9667Vq5n8hW/Hg1SLuXj79zM5srS1vdG7Sh1ob0Jqp19hFaVVboo4IEWRet71Ib45YBvgD8M4TQyUA6p0PsRNEZvbewYYymp1YIeQS/5/A7FhEyNm0KCny85FNRxWpXEq0LAKp6uBNxuyQjXzSk251qCZsy8od7wv2BFJHCnDsw6pd9926WyylWYajeX8064+PpWmaxbJdN53EReiaEDqgKxyqqtydTop6tDU9vbWzvbU12d0eT8aT8bjcqJlKWzUQOg0hhiAhdCGCnWhggJyr2HkDasHOyGLEhmtEIkUB9mDvtVDxqoVKCakVNaEmL51Hx/1yOm5pce8nP/rz7t5b02rS3OmwYr920kRdEnfAgtDKrNvvpC/YOSh1EK+A+qIU7mhENGJ0rK1irWW1Ln2sZFXSRs1o5w++8sXvPP/iN5s7PTyTl2gevQywsidldVqI9gmHzLRFInONGmZaJxC01ZxJAI4hq2qySbDvlnyXbT2lIonnf28OxzD9AZGhqgSnxFAGsTApInmCR+CwsVP/wQ++85d/9aOOOzdxmGjNzTgcbfvjTTnapNVUF1NaTfV4Im3ZBjTEEQnHBUHZRwJExuCy2xtx+8k7/SfvnL14mfs1k3rA+0jCNshcTMcQoAf1xMroQD0hQKGIBCEEBxS2FI4ZmbQcmbKSJbaqAlYbpKSeUIWzOk4TOQvMFCSag0QmNquV86DUf6rCVJAKBYTZSWLAGdIAqPqhtwaljGEiVBVhSnxjVYkR5NiWTiSWr6ZMQ5RkAyrWkGYpnr1TZ3S7iKwNethmgV2ygNbMyTMxQxYNJqT7YYvk1JZmypXRrOwz2rYie944mWqlKVZGrMU+rIg67yCIEr13DBdiNCA1xoefWsoOTEOCUNNfc648HkrNIEoWD5SNMa1BJ2IdJgIP1ZqJG/PQVwLzCawBWnoOXSdb26e/8Y2vvvQvr8iqCaHfqTdP7Z3+9KNPXDh/wVc+CDqR2TyAAQ22i6bgIkQIdybOsksOskUDKgjZXCt/ttSzEHsbk0vXRVUtSx/6jhlAWDVr6DgEEYmbm1uqQiRAjAqNweAGZgcWtVW+RmIIRmGwB2NQmENEUxkvSH7XoJNiTg0SQ8LIRAnJ49+QhFyak2ODbtCHECXU47EIyLPLsUlVKOMhhHS8bYiRlHEpMFHCVcBZDenSWDtZVgToml0NIQRoD+2UWpJG0CDWZVOPxxunX/vg9rTaaXirH8nvrh/ea6qdK5P54v7R3Dfovv3fTc4/Xv/oL28dHPbllLEr5ejco5994ZfvHL7/xkIur/z5U4um0KWgZW01NuKiI7B0omsV7eA6mHeLqkhQRIU6xwCcp+SGz8zMHi5GgRpxMuVGyuVNolhJOg4S7TXz85AsOhSJXAJORgPelH5EHAe2MyhKVDUTWQP3nWewK7wzVAoQ7TS27XJ5f9EcL9aLRZgvuqN5v2xiu1DV1bpdNKsAaVQ6gnrv6noy2pqMNs5Mp1u7O9tbW1ubm5PJpBzX3hUgimLeCAhBl6167XtRAN6nmbgSXKkURJk17UASZ2CZSLAoZi7QXDFKNStxEmGIV5CyA3vAc43OU1tK2Jnw4mD/P//Hv9AHH5wtRut7LS1LNKFvxC1LWYrRArq160hQjlZd5yBVdOgEnoJEFMBauRVulCpCYEZgCk762sV2cfCF57793Le+9slqLcwC9d6JeY+qsGNlNWINyAyCeLibORDFbD+Wu5AMPjKxDpMFSUcjPfycD57Y6E4GqHIgP+a45c3zQomNdUTqQWUUciWo8pEje6KCV9pfeOKRr3zrK3//yt9NJptcrjbQTGgxkeNNzKa+28JiC4uJLHjBuoBbmHOyQ69pjRBBiXkNLUAbLCqH7733zKVpQOnZmy1sgAvwAV6Yl3WFElSTBoUHCqAABCiBAEgB9IwSujaZIaDmoc2MEDrAMSNRD+0SqyTphJphh2HBEBHnCzOCTCtPokoUW/RpySARoQRqRNs8bSGijPipz1SgFPsNs4NtAcoXPROelZnJ5RWKVk2pZgdHAsg5NjKe5Im/iDjnSJ1lJvtFUVWCMDs2T4ABA7GoZ3Qp63xyftLcyhqL2fJJfr7ULDecLW40TnRGUYgSkweaAGuFem9eg2oMI1OwuGxEoooo0U7yYEs5pHm7KCdeYJKnXQlzTl5Nw6OvCjCQDODT58y8LVU56b+t9rS7G3RNqEUIHFdBCj/+4pe/0Lda+drVEVwQ5HjZS4B3gMKDRSJYRES5BAshFFIGEUh0rBIELMxOCUQuSszNEFJ5DDF1lbU1BmYyu7Zt+17KouCyEjFGCsfQsfMWrzhx+iUKokZm05bQQ3UVrOSjh2uO39vLYWP/7OCZLx0RZXuY1IOp2TmL5rrM5gFgZuedI4+YzNokxiDivQchQh1IgiQtSxoXpOczubuoIm2NVNvZ6cjuS1Qh4sLyUtc1pR8jgDp2gaRTWQZUihLLenK4/w5tP1nouOuat/YPJ/rIUSlnz1ftga6x+Hf/4Xkplv/T/+dNlPXm0yNXYL/XydalM0+/+MIV/Md/eutv/vnGl3/4NeVNLAO10DVzZHSsHaQXzyoaRQLQM0dVUY1KVFelaAcgSiRV70gyl9B5VoFpDCwrG8pEBI2RmMj4/yIgSiwNmwzZmY8n5aWwZkmeBWcBEBUgKpyzs1gUzppsk623q0WzXM7n88X8uGkW7XwRmrZrFl3bhtAFCZ3GoCplNd4Yl5OR351OJ5vnp9Pd7d3t6dZ0e7se1XVdGjwWQX2ARGmCoF0LKbk8TwMXCmF4EiIWQWAGixmlqPoIBryDZ7CHswkmkZDCCLSkBMdKigLm6UHJfcTqbImIpHE85nu3Zz/70V9Pu+Nzk3FczAuuwVDmwEIsXDD6qI7AWjofAWbnRTkqPIsLWkTyLAVQEmpYz63CEC9uY7ZYX7n62S9/97v3eg5p2w+kUxKSaG5XAkGM0VtSsApTkSVcQ9xLwFryksmZWdKSC+UEJzGb85EmAI+S9j0hGRjW3+XYbXWfalY9IE0yyLAEEVa2Zp09BQrrGJ974dkbqxvXZ9fPTVBJW6Ir0YyoLdGOsByhQUNomRaqCyuKjdLsjPavUOqISijgXX3w4aw9CJtb3TqMNlD2Qh18h3JN4x6hqWvUQAt1ihJwpqkEUXJkwskGaI3RCg/HMKs40/NqjIEQhh0nIQTvkuMpJYQSAMMcUlWDRCM3xSgq4gpvExqk6tXq11S9cOqk7S6QZxqyylDgKLEZe1tMtEVILkoMEm3jYqKnGJVD8+3NxTLIGNKqQGF9joLJXDXBSYbgJEGdJ2wpUKLqWT+U/8w4SWi59TF2uC0vs50ngA05csepDxUW+WdTcqGcRG3DBTnn+hgMr88oAYPIzIOgyo4H46AokVOpDyIjqZ2Yb0gKfdnTJ+qJMdBg9qEabed8pvYP1G5NJbyTUCVA1Va3rjt2pa+pF+1aAoKqsHW7QgCERKJKQFkWIEgUUrK4bKs9zfAoxN54FNFMdJwjgmi0D0sMpO1B9jZ1vW5FpKpKVQkhiERVp6rNqvHeO2ekY2KGSBTAM2W3NIBYtCMCyEtqvk7QhVQmpqqPh2cPeaxotz5dRsPPaWjRCIwYoQoyyDyCQI4kIHsQgByRSLp3ZjuoUdlbYGIiDJ6qg0MMW8+b5phBwUTOUHaAQuggrB20VXURLfGasQKWJJVg4t+5w/tdQdVZLY/evBO+/Hms3ohvH67Gsvp3/+dHWuz/1V/dw5nTKOMyStByWZ7p6PTfvvzGuae//pmv/uuX3pv93SvvP/fYDrcc5uJar6tO1owOEC/asC3iRBejKIIjhldNZmqw0U3UkF3kbEYVVJMHe5QoqmmzYgYImNN2L2SsxDAtU11kSI2gziijhXPsnWMPl1TSfR/Wq3WzbBaLxfx4vjg+Xi6Wy8Vi3Tbrru3a1nK8nVp2XJZ1PZ1ujTe2trem0+l0c3u6tT2ZTEb1yHlXeAYhRmmDBtXjVS8W9TKTjInUu5RFc3wQwjAUBACJ5sGlBHh4OBEFArjsEYgKKNsiOqMUi0TYMpPorOvnHCXY3HuEtjZHtz746Bc/+otpaOpRIWZ6mnIesXoFwAJPvAaTBhERdaIOIAdlIcdErJ7ZCxcQEngOLAGFcrlomtPnnv7qd78/j+iCeLBEYYGIciCSzAEUMFwIa2YxOoXzdga6IfGKRM52eypihEOXDU5yFhUgufJS1rvDQHkrxswwWIbnpCKYTwOZCZyBfKbfVuuZRYSBoatyLFCUePH737n713elu1+WoQx9SatKVxuyrt2KQ8AKWIgsCQ0j2OzHAWbhpcmotwZ7zz7M9hfN4Woy2hy7446rkfCKVp7GhbYFypJHsa5QaHQRMIoUwGkaSAQVkrwPjX0h0rPjJMSwmiL/jWBeubY3m8UZacVE2MMg0Uy3EqDPtnHQhsGUEo4Ve6YGo/ygUtJtkLcNDfYYnWAYiUxhzTA48Xo52kyYkr8dDYbTSTGWtwVkeecA3OZiDarqcjo0LFcUSXOkdrk5daQD33pgw+f+0e6ISnYnACHvGs+EHs2Dbgxf+TkbPmWy8heREEOyZUNMYCWnbjS/EKIqVBx7HggM5lACNeWX6uCdQnlOSUwshhSSDosW7SKJKiC+KHOJoGKmemRcFSKQGfMSwTkXY4whe3YT7BmNIRIpe280HCQFjlpatkKJwVECwysrQna+VhEJfb92zrN3THCeRYTJJg6BOe13897+yFFDDEos3htKgRA7qJBz3jvvOInZVJxzohFQ53jYfOWYVWhY3JsqNB2wYWguxTkBppZX3cBlGKgAVrI45713IfSGhRI5hTN7JmaOUR4aEqhGo7kn8AYQYpYYmRwTBYnGJxCJzB5qdGkFApMHRWgPKHPpKweKuiZllcIFH9jBj5wcBzele7cO/KknF6s7KtQcd7cb3vwUbr5697vfOdvt6d/86MO7sjOeTlYr6rniUX3YjZ++8pWuPv/GnfVM52cff66bhVtvfHC62CtXo3DcccvaQTrhrgFINaoEYke2FJ2UxayhB2FD9tIjBTkoHJfs82IGa4sZ2dnNZj1RoCJqcr4YwETee6tBfeG80faYwQjBd11o2/Xx/GC9PDo6mh8v5ovFfLlYrtv1umv70EkSP8IxV2U5ntiUdjzd3Z5Ot7a3tzc2NkbjcV2PnCND5kTR9d1aorZ9ooWy8yk0wg3FNzliVjELWYsNMgx1kuu8ESjgUjhSBYTJJJjIvAdhEtEaqYkWYqfw5CPQszgJkDW0ZmhkknpUuBG/+us3fv3T/3gqxo1xLWGmAsfO4B0w4EmNIZpgHIWIA7xlXw84SvsrfeCClQEPOHElt1wfN4HrzRe//4O+Gq3W6OGlU+mUAzs4VUVQCYIAc1JnBmxhF0TEdtNrmtCbJzYk+RgOIcmCEqVQyMmKz94tJ7l4AiNtZZYx8RKhXWMAewvbOjB0yGVtC4NZMmJNzj4p4Gkt3e6ZrW+9+I1f/af/R1mg5rZGP6bViLqaWmoYDfRY9RiYC0Q8HIFZWWPCV1ErdQwRN4Jw395vN6Yb1bQdcRPIN1qN0K7Qluo9Rr33VCi8wBMKQo/8ToAAUkdwBgMJBDDEmDNFzWZQSJyixPVBjMHEwETJz5yTx7iQOcDamEpM2Jn3UefdwKkJNg32SUaCiHh56A7lAZyZIQsRE7HE3lxlvWPvvcRgQpokLDpJb9YKnfSplnFTq5wmFJo7WVJIplDZzreET56glMpGbU2vF7M71e//yoybZ8EP8l5hZAjpIfTETikzG58suT8yICkrkYHC6WtIxSl/i6iyKbPphDoOQy+T9XQXU6GuosyG6SAnFVKjzyW0n8w0AYkQbnZdxMxRAiXWERGlKW5MyL+1JJLOBiFGAUW27TcQEslLglJoUighbTVgq0lFybGKFL4Uka5dGUCpqkVRcsp2hr2CiEU0aATIFQyFaLTLaneNjZerqcsh21MdYtqDZ4MTstowcX8ol2yS4mMa65vfYS6bGEjUZc5yFyvTiG0PQnd4MNucbm5vb7ZtF4KIwBGHEG3TOsFJjKELReE1ioAK76JEI253XVd4D6hEldArM3unxEGC4Ql2z533phE0WWDUjpQhLCS+JCo0egkcOLJOe8yLD/avX3tuu593q0V45/b2569dfPdmVz968aX3rp/5ytUPfto8WEz95jj0/njePv/iD8488aXDOC5vN76vax6Py1rL6JqCG47HXVxR2Y1IwjpAsSZ0lMYaSRDUr9fewxUm4bXzTYpelVK8UAkhwmBV5wBoNIacpAYPlHYBlsFxYRMU5xiKruvabrU8Xi0Wi6Oj2Xw+Xy6b4+Ojrls17ZyCV5Wu7+zceubReLSzPZ1MJtPN6XRre2t7e2trqx6NyrIqy8IU1BI1RBHRpu3trGm2O/+9/YYqLIOk0OK9FRcxu/xxrqcVYKg+HDaHCKEwACZmoYQAzsR44BAFBVUgH6NAe4aHetEeJSvH0vlyVK4pvvXm+2+881J3+7ULGzSqqxiOGGBfxLBiz0rRcHsbyJIDSmhPLiqI2AO2usPygQeX4ILEKxxToeKli9QKv/i9H4629h50EsmrG2mvEIKwdoIIEhrsDa2jcVDAGi4LfpLeiTASIQdIi9lFRZO5lSEjRnEni35D3UDDHNJuB+zKpqrG/s2gSh2AK+O6mCei7dtTVWVlz1RAvHBFTR+fu/Z4896Vw/d+NZqsa+kqbSptsQiy8HzMugCOFQtCb0ZJSI7UgcgDAhKwIHbiyZdtgQXI82hzGcmPtWp0XGFVw5dYN8WGeqCEeGVP8Ektle5OCs/GaHBAqdraVCvCAjt5LpG8sUCSVSsiSsnAleCIk08iAIl5O5JtJRGYNaZmcF6GfJuuYfaNtOXtqTfXdBwBEmhUtZUOueGjhOEYUJl5vilf284yQnZutg0c9lCoEAY8BCAT63DadX8CLlsOI9vqOfSvJ0RKDCazpkBLMMigKLWnJhFsk028jbuSHiKTXNl8GzTnWoLj5FWZX2dI8bmUAJxnEuLEO0/UNTpBL9OH8Ozsc5GzNj3XPsPBAQZPyqTXyPk4tbYMEo4x5N9ijn3wDHiKIVqREGAe9ATrg5358HlmR5wisf1zDGnVgSGPIgJPSIxlA/md8y6GGENvb8M7J+BE+hAr8Snt+zbD5ROjfdgkAqrZ2EskUUbSDhtkcZgJtxLlPRMCLYdnJqHZspLECCJiRlCRZEhuJagoFHGyUb/8Ly/99rXfnNo7fXrv9FPPfHo0mjB720YOaCJoWzAR4zGiD8G63yhBFSEEx9441X3oKJLzXkSdmSwDCqPyWYIIiqDKzlcSOh8qXWkxLrq2o4IUwseFW7hwP545da3dufve/i/lYOJvy+e+98wH8+7nv5h87rnxI1+Uf/jbdntjr5HgxzvV+RduLqb763q/qZtPuoBDXtW75emDGw/ae7PdYs+vfdsvq3bsfa1YAB0pS+xUO3AklrLwokFjBBAtSDuCeEAlSTmNnkymKhSRGILz7J1nR1XpnSMlxBi7Nc8Xy+VicTyfz+ezxXIxPz5eLI67tgtdF0IbQmBmm5aNRnU5rjc2xru7u5vT6cbG5vb2zng8ruuqGo3IAYIgav4PIUi36kijVbWJJUYMkKg4ZiRkSPhkEpdoJOaSa1k5LzJmNb/QdPCMD2B7J4Gs9LHTiUGbiKgCYsNXDWqPjssonQornONCRDT28Kg8/Lg8XM6vf3DznVtv74dPJmfk4vY2x4O+78klZ15RRAlku6s5+Zvaig4qIJG8Lbn0YoQgLQgMLnzwAV65JDgC+0UT//CHPzh35bGbrQTyQmUblCOROOmEg1JPiNnl2I6gRqWQ3O/JAm1EIlKoWeJmHRjMnSkpMIw7K5kfKpkYm05mIlsMFzC7vSaCpIANv6Y0qtAcrDXjUKScMnrKfB5UkdfuD7/11Z/ef51X92puRrqqwlIXnuaiC9CS9JhoroiEqLaDhmzKwIQA7QDh2Ibtvd0JTXQBLqmueymbMUYTrFpadzoq0ZcudKVHyVQqeVJWGBBh4zoZ7PwFUOdE1QHROQ8FYPZTUZWInI0YUozKsHoKgWo3PMlGkf+cZ76AaT1SejJakA4Rk1NvTZ4yfm2NjTlRpfEYMRNzYQKG7KTBmXdlAKtFKeaYyJWRT5g0CcegQfeKlHDNDjqnLT3JellBm3PywNmxPiu35fm7c3Y8AVpy1uGYroRCbQWDQZpCZI5aGXxRzZZXGqOqCtkSCmszErvg/8/WvzZJll3XgeBa+5x7/bqHZ2Tko7IeKBQKhSIIFB4EwacefIqS2BQlU6tHsp4xm4/zh+brPGw+aMxmRmbdY209akkjsdkUm6TEF0gU8SwAhUIhKysrKzMywsP9+r3n7D0f9j43At2TBIGqjAgP93vP3Y+111rbVJ3Da26Yp4Y+9Q1G9peSRjcVNfj6h4VZFcaBFtyFxjr0vQ8urdHA8NVUq5er1zWAtOE22vxdWLXWUlMSCd1ZkHvRKhYQAqlqyUUmpKdtadYtQbaSrFmmaQKQc2eqxYxgyikzV1OYxq7lwDCZfEelX9JYYGcpJ4SFc3w6j9e5Zy1u0iZeg9+4Ya2JbnhAA8d8/Va4aOSc3By41moGJ+5dXO4/99bnVut+PazHcfz+99955ZVXT2/dmUtlSuLSE5WUBMmbeFWtkpOXwVYhKWmtWopqlfYeXDUgYIWKZJ9LmCq86Q4t95xSrmWSeTU+HwViGYl5fDaPT/dyJR+/c/GFX/niX/zN2+Mo59+oZbj3yt2VvNT92z86/+1/+on7X/74ve/d0nW/H+v/7b/72l71cpRX7q9WpV48K+lwkW9/+pt/crF7VD/36ud2j+bd0w9efXD702/8UuqPtYwQAYuwKmotRxcBO1lKBLDiPjwp0cLc0wyWs3QpA6aq0m1qwTiO+/OL8TBeXl48v7jY7S725/vL3W6exlLnaT7UMjMxJ5Fkm/V2s3mw2WzPTm+fnt0+2W5vnZ5ut9vNZuMFpTlPqRYzXF4dEEQVb5SWajV5BaYKU3W9pLRCmz6Wk2WBSmy1MmPsO6KH99ygLIaEJmJjck69+3u3gli8IGyBh77WPA4/O1Xni+dSrOqcU1qdrtDjvR8/evt//tYHz9/f50vexfYTp7mfxvJ844YTnmoVyYwi2vZGkEACKoykUDqgABlMRDJ2MJplzLScRXuVXmZOV2P5zX/4T1/5/OcejTDmYrkw59yDVoqv5BOrwPIfgMunjOWkNcBFcTG2W3UHvBidHBnEKTYya0M6b/bVZjeDr2cKdzDQ1i1JIFKNTKNGqioVIkY1Nac+VauuqqjJILMVfXD/5K3PvvmDP357OJ26etRJZQIOajvYleBA7I2TAaJOpfQdBf5JJ2jSUufTvO2Rx32xlaVtWvfjiseeU4+55zxgzqil6y0bszC7KNmcPR4VnFd7ABxGsurkTi/g4gKqijBJ8uXlwQkgI+l4jKpaq5KURELUmgO3H0+PJLG3jc4TbomuWU3B8jXGG02L+lHvuuwxqzW6MTVy6xA1b6p89yfJ5BtYYuYSra2ZL8ISQGP3eqs/fYbnRCfzFCvCGx3tkpxrzBqaoBNA9RlENFftt0UxZlCCzaKzzYH9qjFJLIHQ8MW065mwe/XJcv6srbbw66lWrrOIoZTa6KL+nepOqxpwGtCmsK2qEFDbWQ7euN8e8a7O2gBbFQgpqveybOYdtWrK4sQxIX0CkGJrRfGewZ1P4vlxXoG3nk7tVq1mpExlhtnJZjPPs6nl3JGstbgt9lyKV85mEBF1o+Y2EkJADRCRokagwqz4gFrcl0pVASulqK/+TkKgqrJlX8eAvBlhlF1+NCMIxAmW5B8liGQOgWitKoL+zZ/+sqn2fVbFPJV5LqnLPtSHQaHTHBw6B48Aqtacmg8lIUw+e87S+a0iZLEFEQpMwtQ6CVnFCORqo+ReJ+Qu4whZybwr/VnPg/Vj/8G3H/7cz371n/yd//o//uUffvTk/Pf/Yvrbv7x58cv94+npf/t7+W//5mfHp8/efzhIlnyyf/Xl4aVJLn98vDyfnz8sL24fHOWZlouvfvGf3B2233n0ztOHZbv+4HL3nbtnn5jKZOK2XHOpU9EJOVtbsli1mtaUU5dzlpRP3EUPZdJ5LruL/fPzi8uLi4/PH+93+8vdxWF/NR3H4/Hgs39QJeehH7Ynm+3Jve329Nbp7Vunt26fna3X22G1Wq/DPU0rStGith+LBLBcQZqpiHQpLcRHH7j4/VS4uCsGUu6aGXZnAZj5YwBt0vp4DjXIGM2n1sOyRAVsi6WPYyveE/sv8hhFhkXREgq8Vy2UjshWtEt5WA+X0/i9t3/49g+/+9Hzh3arDC9uhvXGqDoVXatBFFIsF6NKp8iWTbqEQXAAeliwDoIkDFEkYQZ7IhO9IhszuYGt0N/qR9lzXf7+P/oX9z775ff3GK07slPpR82YgAJxy5fS7nAD4gMElACfKC4KJR2IV5rzW69DGczdgOMl/A74smlrN4uBGTSe4+L00eZ4kcN9MGZhVuKJG2bmy7iaFtSYCEKpKeeaNNmEMv3Cz32x//ivd9//o25QHMWOyiNlhs1mI+wA1kBqjVJhJiqG1YEwlr7MZcZkGAGFFMERaa1dqhnl5n/QgR2dLuWtJJLznWgZVAlcGyAn9UaiVqOCypCvRpAWEQ/dGtYa6iNvAySlNuTzplIslk3HDDWCe7sL2hhSaMoOheXosZq7lTbPpzC4caN3M1hB5D+DW8UJoahu7K0l5xzPWYgmzUfazrrzDlxkedQCBXEw2/d7AGiboVpT7J/YzIfF7Vk134KT4JzZduTj/UV+WDDhNrpo/xVP6pJ143GuN5vOJi8m6DotYxNHewuYfHnDkt79t/skV9rlbnwHhKbbayA6Xd/MvBpPotFcWjwWKcOs1goVSvtLd/KVtjWZZgZJcq2SUhixaMdi3KCiWkspOeeUBGallFrqauhFuN8dupxTShqwledpJaXWMs1TqM4ozR4Eja4cn1wNAkrKFK2lavC/SlXtJIE8TpNN2vd9z54Clw95SyIpBdHU/KVi6TKalBqAxOAD7j4T91V9G6yScjxOQhyniaBISl2QAzXigtVS5jLXY92sN1MpOWczncbDehhArLoESLbOwZ5aSlU1rVUNBtUK60RgJo6OUuYkPVDM3Y8VOGyQoCi2rleP8fOf/7l3v/f++28//Jf/x//HZ7785rwrGDHl/vd+v7z18w/ufUm2V5/6/a/9+K3P35a1bk8mG/XpezI+f3onb1+80336bFUv98enfP3BT58NZ4/ffziw//JbX3r4+D/M05MuvzZrMhTVqnpU1JQIsst97hKAJGZWy1yO+8PT/X6321/tLp+fP7u4uNhfXeyudofDvtbS51RqhWG1Gtbr4e7Z7Vu3Tk9Pt5vTe6enp6e3bw3r9WpY5ezgPKpB1cpcD8eywML+ICD2A6a+X6kGVh/moc65ZRMBAIgt4RZ7qwhD8uUuDXaO7c9NjRt9lqEtIwPMv0czYli8cH0RtqiBsjqdEMawEdUmFfSYoS5V16o2roezMstf/tk3//xvvnZen+Wz1J+t8mqlh2pQ9oKjqiblatKhYDVhvcdBdFwPW5QJs1BhZtwDIzAIR+UImwQF1oMdkKGdIZMrcG1ymxf16d2Xzn7zn/7u8Mpb7+1lh+0oJyNP9toXWetYeUVMwBFWgGKY4A6LKA4n15Yl1NECH/slb2PUc4i6ehstxt4EFCO1EgtkGv6sYc8X/Gj1Ba8OpcTPB5HN5Z2JvvJNDKCLKqvPJ02EWlVLQWVNdpzx4HT76mtvfPOdPwQF7kshBhfbh2TKxU0wqgqX7bIgtRZR3fQrVAe7RVVFAGG4LYBBUl46zdZfoVjY/5ifD8dHi6nvBEvG4hwUUFQApZnVWkip0KXyu06UBrigAA12dm+SYu58RbcyVo1ZO9qaUF/O3ZJT9pzZEP+gLgd3GqbubiGEuVW9b3wTx5DNsPCknOlq8WqtO21rdBt82wh0RM45WmqSpIYiiWqldaZB93UwHGaQmC/CLDHFODMujNqy0QnBiLZm/OuocqAB2lYlO6RFLyCsqcQc9Yo0XrU60yk4CE6zT4ki2V0zG1zmIaCwJjbOZqhhnOPilaUt0AMQIUVrbKOM1Om2jaquFHKuL+hFromw7deAYakYDACTWNHi2HJMxTyxaddnNnmGiCDbPM1K9Kv+cNgPw5Bz5114KUWSWNWcc3GD6IpqVUCEi2m7mzEu0mvi+wIlGADMpVKiXi6lGqYud4jGxp/mWLYEByUZIJe0xb0xe/YdCWZNMxa/izbbLBRRQRIhMwHV0hgB8UDmruv6TlW1aNdlU+ty/sE734Hq9tZWTU9Pz6ZacspZcs7p9PRW7vvUixVMk2pN5KpWm+a56kT1bioD2Zw6qzOvElRBHnW/vX3/v/oH/+g/fe2/vzhe/Of/4ff67f3bL2+1L1rLu3943N7f3r779HMvDeUDfbCBTpwu8FP31sOd0935Bxg3Q33x+Yc/mi563a82r29euvsqNrrfPXq2u9dJn6SQdbXqBao61HKstU7T8Xx/eXFxDuBqt7+8eHpx8Xy/u9rvr8bxqpTiJ3HV96tV//KDFzab9emd+5vN9vTW6fb0dHuyHdaDpOQ4ctGqqkX1eCzHsfilFhVQU0JV5JxVUXVmrJ5BeNCXQopRTJVMEGqtraj1cljonD5/7Fu8FXbxUDhQrSAzTIUJrF5pOoQqPjxSMMzg5SZDK5AjNpA1gDarFSk1rUZY/olRoKiqfZLE4f0f/vgP/uOfPv7oMp+cbO/dZi5UlP0kfqYmzdbVYse+X3E4lOEoJ3s9ZClXWs82hZMQYDHN1XpjIXrY4DZMhgSsiKRIJivKKk1pnrB/65c/+yu/+/eOqxff33XH7mxvm72tR6xHuaUjZEqYwUItVWdH9LwOdRmBClnKJFI80AEVoISSrIAqjkebkXH54qm4FkNGflr+22sd94QDNPwC28xucS2UAPyxBLPlNgQCwYY0OMHYlJKSJKoej3jppfs/3N6dy49WIpr8572uVpgEuF1iXlt94bxPGsy6Lt+/e9crZGhpazIi4URZYe0YZELCkE3VUJxFgMgrFrix4+gw38WnjiNLAHYw02Ww5nGreM5OCUtq84wXhzg2L0skE1U1kkzeFbhZZcw3CWbqIv9URahcPOk61AyD1oUvBa2xc0MaIrL0nDfyugKMwaE15ZizJrwxrorcuntTI5MX84BIcumPQs0kSfZj4cYostRugEKvPYPVi5+Yu1arCy4Nc/GxqcRILPiTZs7risYVMGs+Hg76O2WpTbDVl8JGundimU9kLO6+k3qTznMVYcrZlzZA3YCmurGZRgXgHLSAkYNjsmS4wPG1ob2xs8kLAXrVExYaQTwRH6r5dkxf+CMJQE4SUij/NZL8Gh6ngwD9MMylaFVJqapRoNVCtk4ptQIq3gQ3BIZkdsy8qoA1NGf+nAaUJWAVP1C0JBDOqmU8CsBhYDONF8K9vn0SEfdSfSRDA7QuIINYbRMsr7ak8yZGq0q1nAl/qJI7IIYAwxAnkUlgCqFW++KXvwo1rbUUt04yrWUc91/7i689fvTw/v27prrZnLzw0osnJ3cl98P69qrfiKS+H/JKwKxVahUzlHkulXoUqAx3+nKpp5vTV077N+7kL37+/uOPpqfPr55eTKcvnfb77tObL/7hv/+T9TDeO9188vUX5t3lxdMnu+Mxz+uLi7Uc5OkHPzx/OJddkenrD/rXdMaTj96/f+dBrXzv4eNXP/XmdD4+f7a7unx6fv7xxcXHF7sn4+XF5cXFcRoBt3TQlGWz2aw33f37r56enp3e2p6dnW1vnW5v3epXQ9/3ENdto5Raaj3WanPxbikemajqrHENXWEIg87TlFKsLXI6rK9LVa1utC4REy0k3JF9fdYibf4gbG73WuecOscwXfjQqjQDOphrIZyKQyVTdrq+aAmKfoRURrgjqVBIZ2LwQT5KRlalZctOuRKz1GfVcjlevvPNv/6zbz1S3jq9a8g6HrEKqq8apEpKSShF5xm5QKbUjegGrPdmA6dBpmEzmUI2FEmaFZMxQybWiaaGXi0DonmVLZWL8emLr539nd/9jTd/6TNPyvajY97L+qL0I1YT+5GrCb0UogJVbQaKJPMmGEnFjGazWVGdXLbQ1tPCW16jOmECVhsNPoaTwvhQrU6+OckKu1w1mjlCKqWoqIJJciZQqgotiVSNsXtaojrgiB6qiVIVrDFjcOqn2zgBuUJffunl+/cfzA9/GKHeKU+guL+6OZZnCakuilPWlKTovLnb333prmqSGIZrrR79HTlT98iEz0abWgp+9EQcPVaURQALUFEd1q3aRrjGCKYxYg96nWoVwC0hyjxlySLSzIZMorWNyZ9LjrwhrITbsZEqFEo4PRosB6PVdwcbmjjHPE8IF39qp/42/WADXJcpgLBtxAUsmmLE8gNc/3FScpbsYCWgXs8K3AeglWcCQWJTr3lJYmZtl4c1zR+Se26IQxHeRZrzpkA6Py3kb06CpGjMk8IwyMtDbbxteh1ni6mILYzuFHU3SlVqERFJzuR0kAxEit9XLawzw8vSQKmqFGeTOUQD1SopWfRrCfBZGgiqWi1Kuk28j6JNmCQKrxjRtNkNgRy1rBiDuxtMgHgMPEr66wiThKo7STK1aTr6x/WhvrOpYVpqFZWckqQcA7zl5LrxjfB6pN2gHx8QmYj5wM1pZDQzO46j4+E0CflJDO9xfX6s1duIStH5Nm2CFYW2P2b+z6WUoP1EkbZMi70CtfiSGcjxcAAJNRG6ZyYkn21f+JVf/415OpZpKtP0r//Nf/fH/+mPX3vtjVdefu3tt795enZnu70NyduTO9vt7e329mq17fvtrdMHItuep/0mP/rB9/IgGFaPf7DJj8evfOU3dum/ffCZu2+8+b+ZbHr68eM/+r3fP1F9+rCcl4uT+c3X3/jl7337P6AeHr7zfe7TOh/PTtavv3Dr9qc247PytT/6E9OxlvE9sddef+HJo4f/z//7v9yPF1e7p9NxpzrlhNU6D8P2xVdeOtlsAJzcOj29dXqy3a43wzAMeeiHfhABTEotHkr2x6M/eK2hkXA7WarZOEbtwW43ug2vGknHRwRtPNjKcDVtPgxoM4MGA8H8VTzRh7+CW984P8tLKK3xnJoBCI+tNurxWi259a0PRFQjGCl8kY6LjLwdTgSQpNTai5QyIQ+VumJ3KAer6/H774x//bU9un51Vg0oBYOhQieTQbKKGnQyG5UrzOthtHmw/QH9WtYZuscma5FuGk7N1DhAiuihyixlX8SSzZr6jB7Sy/540Z/I3/l7v/Czf+tnN68Oj/f9x1gf89m+3Lqs/V7WO673uiozuTceiVk4m01qE+GU4OroHBqA7xCxCaNzADT4mKpm1QO2szThiKBhKfQdhrUbrJfWKQV4mJM4cx6lijA7a73UBrFhoUogENb4+7iTIAxWqs7WSW9mCitz6Tbp3oMHP3y3nqZkObmWI5ANhydqlBN+xrwBdmOKzWazGgZLcIMcOAF92bZitWiprBGpmklU8Mi8lXfadwMk42OHbLr1kA6UtH5Scopr562pBjVfzWopAFJy/x5bDjfo5SsaC8kWyE9Nm70f4SSsSLFN1MtFZIPGiovOXkutuZFioAaxNk7m8nb9k/ubsObba2YtKrbBrRsVqhONRcWXHUUijEDgAZdAuJEEbuvPok+ray2ITU2mzmQzLaqduO4ecQVi6m3aHDE9DXuIcFwbpLiJRwiSvKX2uG1qbRTiYwuRBZINuoLFJ045WcDV5oBqUZWcFtmOg9beBDrY0GjSy/Wrquo0dL8PIhSmxagLjcAFM6vq0wtt+LnW0sRYEkmXRDO1AVDmklJOYn1OqiaJyWSeC1Mi7TiO01Rzzggff5hqsdn/uWhxF1n3uwgRYUMWr59kJ8OFlFAEMLJSfVVzrYVJ3GshmJiC8GxG4PXxaEQctyjafYgQl4NunOmgvcPT/kgQSE4Qj8GLClKcQ4aLtwkUSAojTe1wGCki0lsvw2b7X/2L/91ffe2vb29PX3v19Yvnux++96Pd5UWp7PLjMmtKa6Kfi/b9Vk02m7tDd+fh4w/yppdBXn7t1YdPP/7eX/5/apIvfvWVb330dcVYrW7L5uHDw2a1VdW/+L0/vs/NP/57/3jTb589+uH54/N53B0vn8oh7c6PFyMky7jfSy7T8eJvvv5+mcfbd7rbp8PLD167e+f07OxsWHV9fyttcLLZZjcONZRZqxbVAqJMZV/H4OUE3zf5Ll5baOgRcNWRFv+bhiC2xyAiigfVUJCFq0k8QBH+zWBNcYtWEFl78q7nLzHQdahTIj5p9FRkfJYgHGh7NOgyYfeWd49BLz6jAqRr+aHiqvSmH8OskL6KAiuopQ0vPnicfvjR/ODF8f33Bk6aNmoGmVSylISqKIBCi+rRpKcdYUeUGaOkUftRhisrQluzlMTRVNKu2856NFFBL1as2/R1KhkZGVM5juXwxpde+7u//rdeeu2FA6bHU77A9lJP9nUYsdH+zr5u9tiMGHCETWaj4QBOFBUthkqrPhGM2Za2GBaFEQGraub9Tkowy4CDx+q8LDrDQxtPpsKNoTyOibg2EkGItFAMShshe8Mj9F1mBITMvgrJrJoVsoMmbXGG7qAmZOI8TdYlmDH1s8rd+3cfDafzeCnoWq0mFEOCTcaUXGsoPqgwVQULNJXNepOylFoMYcXp2Hr0INbynKrX92II/6qI+h4sADi5xltRWgstcNqwzybjr64pyykltgY1wCLC3NBRQ59THe9UZZAU1CDBpW1NQVVHb4WUvHiT+WMYdazLvCAiy/swL0S0mUcGI0xi6LawG4Cw3SckMeBQmP+ltMQMGqK783PUEu+iJHaMuBVkMATi6M+zxPzTdUkRQ9tzS480AaAzklHrn8zX0EWsiZ4VdBx6qYkkYIiWUJyaQMe7JGVI8J8BuyaIKGDmidObYIF3fnDJUKmlau1ylyiuNYqb5CVby2JeS0oYtba/8U2ubr4DM7PrIxKnxloLGucfcZ+cdoAk9FXnpRQYV6uV1dg9h6rOHwbQ931OWSQVrT5ENEMpcymzSCLCjJPG4/GYmhCqwQQthrONjIgELr264+GhPlTVWkVSEtHauKPRmrVKfKlZbPEXa0N+l8T5gWy3IKwA1Eqt4l/yxGyxNNvfotc3iCcJFEkWGjAC0zjnhF/4xV+ai15cXP6d3/y1zz/5+PzZ+bOnF++//37XdwDmadqe3hJ0KW9q1Y+ePjw7vctUa8kf/eiD9fZkOo698J0/+0YxlVVGwrBd6eVx4PpnvvAL//k//eFQ7na77rA/cHrw13/yrd0Hj6aDlQmCWbA7OZEHL9829udPy/0XNl/96i8+OX//s599oxQlpNZCmed5D6RxOmKa/HOVefLnNhSEICkpSa1eZZvW2nQG9BDpZ6+NBSIPL2n4epTjdV+EisaKuu6Al6N3zZvzjirMClzgFyq8uCmkBBwNWswARSQBVtU32zlq7RpbJ7dkusGgGeiObCGMRMUyBzOm6BtQK9FBrBQYpl7y0/Pjn/5Vf/9sc2c76lxS7Y7HnDeY5oS+liI1UQ2z1qOhMy2CWXQCR0xDP3FV0M/W761spJ/ERj2i1jtb08H0CFkJitpBZZVR5t3x+atvvvyVr/7qZ7/4U0V0Z8c5r/e2OXA7YntVh3062dVusn5iryU58UpH9UVbKIRCCqHLamwFqgfrcLpjwF2IdFIAr8hDmeK5iQQt1rEETBn7sawFryWCLOofLGcFFB/atb/z3kRid6MhOhfHcavPIenTB7cGkpSt6DzVL33prfzknW/88f+0ko4xdVAo3fTY0GwEnMXr2YDQqpvNIGBVjRXgrtM3DZsMiIBexJGofl4icsCqspo7GpLBYNA4gQDcvEoM1Sfa7e9MmpNGC0VoUKlfAIOaVk0Jvu/I2jjNXElJg/dOzi4DXUzvGdQtYMIPIQjI/hvU27OWgXytnZjVGoZvjeTeKmLD9cOI9mwCCCK0mtUa6iCvPJrAWZu5mb+SQiUwwyY1YyvPETXv0lgji2gzu2DzoG9/GQW8u4kFk8kpcL6MOQDiKJ/i9Hk3ZjdzdWDfN6Dp62PogYyEGZdFJVZNGOHeDGjH3lS1VJOEdG3Ge/1KiCNrMVh1SnKTePsF0uofP0pS0ox0poDTE4UJ2Rs/Nd9yQ0FytzXA1fEEWWt1pmut6hsVIJLcSL2hUj4dMFrOPZwdJlxJNqCqppQ8ZzdNURzTKKkRZdbSzWZIcfgxB9qhRlWVCOy2rDrys95Mdxbrl5/gnQU5Q2s1c+ZzaKIBiouLtNbqymkmlwhEEyyh3VKDJUnRIJLmS4FgVvP5+SFlGTa3VPHiy6996lNvqto0Hstcj8f5xw8ff+ub37m4fDLNyHn1ymuf/ujRU0E369zVk/r8vBtEU57GETmX+aCY765f2Q4Pvv/N97579fV0WL379jsv5VeRJe1GuTger44rHPrMLNztL9TWd+/dfvXVVx++9+6f/PH/9+7dfhhO/+ov/+KnPvu5/eGKAiL3g6F5jHjMzXllhlprUGGzOEppzQa1qma3zmtFToPpXAYXFzc8YhbWRHvQwwtFrzfdLVHJwWpH87xv8QG+P+bCxiTw/bXh2ydk9nOnQt9urBBxJmNoI+jmH/7omyZK28bj2ne0ZzMeoxI9SvJHqWRI1YNIL0ox7t9/nJ8+k1fvjR9f2PHYg7dSfa+UIWebCyw501hHlU6kSDmo9Iqe2FsR2ffrjlWIasyqOuWt6In0ausuzemk9KhDVk56eH4h1N/8u7/25a9+Qfr+YtRj7S2f7jDseHqhm6vu7lVdn9ftTs52PN3rynaGHbinHMWb4HqoMgpmsEAKm8w3Tr6IGhSmvqPETN16BNCqlUQQ2wLlbQxFEGCiBPzsYUXDRT8U/najmpeIGnGrg5Glvq7AfTpAgUnb9E2nZ6LCiomlOmnZ5Mlk5jBh9fGuvPfoGdbCSq7JNWw0DMAkXl2gA6oT15SJWEPWZlZXtwdsSAgHYCBWmKQ76mpCP1ma2B2RJ02Yqs4S8i1H7yO3GFQhZlbM1KwYCzCLk9yCbStR36CNoH180qajgYG2+ag/R7IAosJW1QPROrrha7haAkGy8acsY9HDeKHakqt5BdJcp2I1IomUaq0Go8rSBS6kJidfL++zJZIYFsvyrGukd1iNQllI9xlilLHXTHnCgQYNNqQwGkBFm0diida8trLEjUo84oeFaRvMQkQd7zPW58UrRTOtZvBNkQ0naTx8oA0P/as/Cb+iGT0hpqaelX1iKiLBTRXHM8y5hhocRZKoqmombujlE5XYNiGRUL0DjU9s2tDCRR593eNcv63wP6kaa4lEpNbqpd4CbNz448ZrJJiFKcduu8bOw6rPplZKqbW6Mmq5SteaBMYDbG3l3yJFUzNCcoJWt8gQU5ecSavELarjuAMBvAPwF7NFNhBdjwpEDbUWgjl3OWfz1k9VDTks0KBaKRlmSbIr6fwKQKBUEzFSoGvX4egs4DSO42giHWXFvuty99Nf+sKDV195+uxZv1ofxmPu17dffVxQwH7dnSYB0/0q/UBI7orZ5pY8/tEFDCf29OE3HlH17JW3upHH6fmjR0/Ott39uy9P9XhxfDZOj85Upv3Vww/efvedvyil9Ovyp3/yBy++9ODe/ZfK8fL1V1+8OowKPR6LqqP6PuKtjv1QmJLEAxmu+0G5yYsFd+NEXN/u5Zxbq3F9ZLgUms0stuXuG4BLK5sozlA0KgOwEGMsU5AAkiC+9IwQX+GuyFHsGU1FKa05Rmt//Xclg0ATKWBmoORoT5N3HwmgWrViRhXmTjpkQcXcl/nJI3v3fc4TJlk/uH91ZzP/8Ml9IzGJqiZByTYpEy0Tk9gIGLAidj5F455rdKgiW+kNebbLCfmgq6NsOtGVTFkUx4t62L38yqu/+Ru/cu+llx8ea9kReThKrtLvS7eT2zvZPivbKZ8+5/bKTkZd89JkD90DB+AAjMAITsRodlQ9GEaKzN5dGhSoi/QLqLHjLhyhI6O0sAU4vaPZPlyDHGSbl8fQ0r9/KYJv9AmOpkYUN3NVf4VUX8fQaL/RT6KJpub9nG/lCf2IzYbjEcPetuelK7IdTmaMZpNg9rpCzZhMUEA1KwwwcgOuzQyrByueAlDdQLbA1uVbt452MmEzcTXbapbejrBRZRJMZkUxCQvFhx25oBagAMWiSfGprAIqNHf9X4JnYKuxtj2um1nrD5XWvK9IhKf6EnbJNn+H3ViebRqwBCE55WTqCz6parGMN6ZxdDKXG2cgFk15CWzOgJEW8JU/+ThG0HR4JN6u470NuQV8g3Eg6f5/NeWucXAWtxAKG4vIzIKrZkt+obMrgzWWAGgMwzVaWInW1t+JUEJSZRBf+eqzKA9ALSLRYswuvvMnFE1e4wfdOlo0Z4ShAeVJogww919Fszo3oWSh1sVWhmZ1nmdSck4wmlZzy/l8bWCpjcJM76qb6UnARzE6u24OHQYJU9+c0cIng56OxJQSs4gBtVSYy0s0O68SrVMUprDpZq0q0YUj5xwyuCS5fRVA8gfRj6cahMtekKXpzKGSo09CQsYZOrRW8TVKSXTP0ajdsEDj8oT4BEJN0UYvCBtO8w0oQlNXUqmqahW/UgFkFCTxVh5mUBOI758xxt4Iv2R931c1iLAoaALOV89P15t7p3cPowJdQnpx+6oqiB7Sj3M1U9FkMhJ7SUWP/de+8f7TD5++9ML9ff/xg9fu1f3uhz/+q3e++417pyddr/Nx/sVf/sL3v/e9qdx5cv7js9c2r7z0KqyC9dHDR7uPn+8PH1/s9v/+3/+PJyfbk5Ozey+uP/GJV3Pqlj0cAErRlAFYVQ2T+DC78GLZsyyxROs26HEIx6+7LMi8178R1umVZjhVNcuq5Y8A1x5zcROdnUJzNaMXYJKinYKEaZdkQ2x0N5VWmktm1ygm/v0IY19zzC4vPYu6tDw+VHFugqJCK6Gz6GrqD9NuevLj8o23+4vzAurFoZK2WtHKbWEvY9FbpiMFduyZCFKlUkmldQYBc3SH+9OhUop1xWRkN9bdLekrj2Kqh109Xt4/e/BzP/drX/rCT1uWH+5VZVUkTUWY81xZ5NZ5We3znV23vZj6MZ8dp8wrcm/YUfame+AKtjdO5ERMtIPpaOIJDXPViaKSFuGJebEiUgHTWhocYNfGNkstewPk8Pvod8trWe9cHBD0yaVqaeIYN59W0u2VEaFCTTmbkZKhRBEUsFLVODt5O+vRyqRTv9prX/vN+UX+8LKcyHZK52ljHA0FqFAYCsJsvwRNhkIOxo0RHB6suAVBuwU7wRW2e5zssd5zc+Rm4kmxwaY+VTUllVC1Qitmtamo1YVJ3rUC5voOdWtgY4k1YrgmIImItWAfodjbNQuUsiXshvvdoDE5kzr2FAQUxMZqUIDZIsKZqprbTZMWAiI1mDSsWaG1VK9ykzg5OEhYZkCIqbmob8HWOXsCDk09XOpV2q6a9nWjsEtdDXWB5/hGZ7XQJPmn8+osGhsynHJgDYRqcVqY0CC4hY7ZxlrSRBKR1a4H9f59hPg+wsDQNGAKiWIBIJGC2iPRUzjD0Lw5E/P5QDWleg1fTWlIKQPhqykSm2pKQWqLCuDs0IgrC6c3mnVHkGO2bWEVbtTwCvWlAWYGesSlbwlWc/QjqPEWHtFO1IIa3VEuiKlcFG5q6s23kUbzHjo5w1krISllN/n3k1FqncvsIw1XH4Z9lrvF+PeRbIMJDfcAoiEHTlgA6B82QJDGG/SzZBZC91YUiQhLqaa1T8kVGmUuIkiSkvTRuonUWkiU6kMIQVUaWduYwSthIi2fPwGEWo0kkF3xQMkZqMd5LFpKwaR16G5B8rGcl0mS9cOqV61zOY7H86O+//ETffWTp2/+lOz2T/PlrVc++YLV/K2v//ndu7c/+9XDx48f/8G/u3z4wTdOtxsrcv/+7WHq3/6zr+12z+7dX//c3xp0PpT57njY7cf9bvdoHi8uPt7eu90N2y2QcxoACDs3r3BKmiZLbv4FB68y4SczBojLYbsGcNokwIxOZSBBRa0l+C6BJbkZbjBVlxfhcjxbncToZc2UjWuHdnidLJuqkhAyI4iY2Z2LtXg9nSgOOJNMvtoDBiC7iYenfrjyJDk+UwBNKZvUUmbQDtO4e/sbw4/e7epxEkglLp6OH37Yv3B//l53ovWV4+G9ru+Zi6kodFRz8FOADDsoMizTqbwiPN4aTDpFKuwgkvLJ8fDxPO5fuvfSL/3a3//sm6+dDvnZOGEuNffFxJgn5qKq0h3r6iAnO5xe6GafNtM+cQ/uFHvD3rAzXgmOlCKcwQlaSIUIaNVsKnU2lOw4EbwDNrMKGimm5m4zQb9qgc5nsTd7pOuA4HvPAMKpUAuAaM5F0QKtKlIR3E8NUJICiAZxS00L/G0WWLE6qgxIJVkhCqTImFcrdCOHd7//6OOr0m9P9nU6XV/ZxlDACawkqAYxWqGz6CwZV2aDmVg6TbaFkLYV7WSv/R55ktWEYUI/WXdEhwItYIEWiElwf1S1GAvEqtLFvkXUCGVDniN/GNql8kTGql7gekhbfPZb50hJqXW5oLhffqS+6ASC42UKd8cXiR4Tlq0UUAyNRu485MjEsuQkr0VMnEoN+vJhU6aUJPn1l5S1FhiqanZeTlh5sbHnYaC5J2Jr76xlleiLU5DxggNnmiAki5kWjfblBrdWpXU2EpScnxjyxTAXQuhNRVGzCNBmdtiazZgLRtXi4+RSm00HnfYbVAb/gQATGNqYmFs1LqAhJTFzQVeht48wkHWuHhtT32czrU6lizFzKzIhKRc/2VStrnT04kVymDyH+2PbL8bIcUACTdwb4WZpEkVXUwxTciQ3xgjIhK4q9SJQtI3XEhdjI4bSKMJf+w61TJHUq6iaUoPw1vx4gBgVGbOgaAITaAq0/U5+caQ1ZWi1oE9AboDPIkm0KKKZrZQsmaYZiVpVa4U7UKoipygWTZPkVrZprUW6ztSCNuhKGTWam465lU9o4xTQMJM1ST0IVUuCTVrNU03FTEcyr7quz54VVJhPhjvDcG+a3vjki+tSjuN0JeT7Dx/Nu+O9e3cg9o1vff3Jk3tdJ7/6659//NGjaSebExEmpnqyGW7ffvHJh0+fPXtYpserIaOz0w3vPIBhEpTp+PY4baAv+R3sV9ssdxW9j6Q6dsUmF6qg+fQkKtEXK6S4S6jLtt041aAi0GopJa1qtOQIgbMQAoNMbtUbvIxgf5gtGsJaXUpBkSilHDQigM5TqdtkQRKMwt4UZpkUIJu/W8nozCyZkpZS6i2eWhGBipvzimtGvXbMhlktr0TqCiNqPUJE+gxi951vp+8/1Kwwcs6S8nj+/Lb2+srd8eRv1s+fvCLlXZvMMmSqJjL3IJCs0kLKk1BREmRYr3DEjKLrbrdJkldltGe78wenL/7c3/rSV770+WHFj/aHD3dzAlKCQmYkVRqziUzIE7udbva6OeoJJ8l7YAfsIRN0b7rTulffM20T7Agcq02kFKKoVslKgCiqE1BaOWVmCveAcCovdKmMzGdsjYbrT1QgZo6MeHtHJcQlgq708OYuZ4ZbUXSFrFbFy99kyKI+1aoVUDsqB3JC7sEqLAY1TLBRxiGvsL6ox7/+3qMiw5XmZMe+O/a3JgBWhUIU9ZkEARQP0MoTaAcmwSns1Cogt2XHW6NtD3Wzq6sDu4Os9lhNHDBD5gALdDIUokIsJRBUbRCKD9HNJ9WYQQhrg9/cfylbWjBOZ/I4qrgA+VGGLC0wfZF4+BY1eML7QFlysWsqCYGqZYepF9FlIEiNIBo2iDBqGIxLYqmlLm4StSqYnOtRa5mL2y6KBPJZrNlMuj0w4FVVC9gBG0Vcj9paGnGAAKqZVU3ZReha1ByUJpxHjixJGTZJiA2fUd+FcWmgxEtBAMeNzQChUzmi572BFTSYHJIbSuNXqGGqS29NYfI9kFqg/4tJatQVGt7Idcle4uviJSok/4Xe+JYyk13f9zoXM5WUVVUovgHJDKaKhLmoUGot6i4c/lltqWpIwkq1BVFBA6AYk/GgtqmZmTHAFosvRYnBAKTIxitts+bofWLC18prU0BMdOl1ok5pJirRwgqc2Bc1Qa0xmnRXHcfIWkXAawZEVEIxinSTZ+d1qyobqYTiW33Dvk2qPx6tp5UkpHuQVa3emwdsH78eRSsk2PwuJ1OtsAlMBBWTy2C8QkiZPVPLHK6vDFxDtVAwrFOQz7Xkvnvj06/mjHEcv/LVz9X6mctnz548fnRx/nS/u7z4+Pjo0cVLL79kHD549P3XXnv913775Y8v3u36zTQZYPMxSruu20/z4eriM/O0h/QAhs3zvt/lvm42twSrsXbrk7Msa6AaDqo9SEUUbQpAIUnKXETEWGlGiJaZAtUiyd/zrJTkN993lGv1rrcdkNYyewNAVF1Kbp+6Z68X/dq3ElWaLiMDQopZVvOvZmECMgmtkYxVxaCWodmYiOSQaZCFKBST2SpVqh40db32ChFD7tZPvv+9/p0foJdjQVcVaT0r8/xs3h02r947brffevzkna6DqNW96KA6VaiUQQ8akkzGuTSFdEnAMpZ6NWtfn/R6/5OvffGnf/VnP//66a3h8TiWyynzlogmVJopk7rLsIqmHpJH5CsMdgUc1EbgADmABykHxQg5Jk7AERjNRkMBjzQ7VD0KFSyCQlZYhRXSVAt8cxDN1KSReE2bMQMbB2aZ3LvOdcEpBHSTCJiq1doEqbLc4KDcGmBWgN63mpmp1SpSBSk8dr0q87a8iM2w0ZAVg6CglGQn24cfPXr48cVZPr0se5FNRjldX/Y+8EkIfyUVU0ilexBjA1kBgrE/cksSe2522O6wPXA7ya2Dnex1NSJNk3FUHCGTcFIWl/iES1Lrk5rtYPi9iO/QqLHfw9UYFqM0H6O2qW6tFdYKV/jRW+jIS33iX2mJegmkBk8EFsUQmSVLdJON7RWv48WuS5lV4JwO/+HWRTXgm20Q4AERqlYDO42fwsLpMBpoQidKMkoLx5z9G8wW5kWD4eGuU05iblt+w5LUxW1Bf41ZNaKfN2VQpsJPNKDKKGYURiXaIjw6ndzJOFwG6Wq6rOFUNU91BtOWovxmVnOTS9HWOv+v/sTUzUlVzsNywnfY4SbRufoLp5Sq2TRNIuI2LI5MJNKES6OcUoJBslhB1SqIrU9Ls0uhVceL2ECPuH5BDtBI/NcIfUt/0dCjzdni0eX1vW9/vA9qZWKwA4UxD4FXSiElNHGv0AatLHdZUq5aXZa3HDAGZuHcW8dtzH3gGxsLSm2pnICpHx0JYEgN7EkQqmUukt3hQYUiORyenXPgb2ipnm6yk2BuoeNnQhGibSVF1aqBzGQiFSgMSxv67pWiR6Izq2ZpsxFwZaaqZZpKzrrdbrP0n/7UJ0W/PE3HqVzt9082m9PvvfPtH773/c2m/8EPvrM+zbfvnPV52m5FpAKYylGrlBliq5PTi37z7rR/CcDVx2/td6PK0yd6/tKDN1PePz/fiZ4O/ckwrLoh++o/xVQV83Rg6mDFKVxJqMkkJVQn1kqpTLnzytVIN9UJcRkFMRUGEabuanA+YCP/q1ZFGMkEecdrFKftuz0jkMQEzJRMy2aZzGbiXbtkNyoo6FSyqEA6MsGEEINAFlWrGWGaMUwrnVm6OdcES88f/jj/9V9PuGLNadoQqvNUutWaG3zre+987a++9fziort1FA4GwAomx8lVJ5l6EUBNmAoNinU32GU9f/5c+3rvpXufeuOTb771xoM3Xh5O84h6/nxi2jgoJNSkJRHqayMNxmSVVoAJGKPrtcmwR92Do6ISe+hBMcJGcAQmn7kWYM4oQFGdDMVX9CRxDNkUqrUEiVbols7Xcae5lCysXaDhEcsj7I3EcuwVXKgnDAGwiAC5cQK8eCLMezNb6LcErDIGrhVajIUYK4+UYypnmx8+3D0dbT2cXik7WIYJcLo5ZJ2CWqyFEBQJmy8aVmprViuXuJiHvJuHKzu9xMmVbfbYXNpwwGpmP3HDQkzAZDoWmcUmtRE8AsUdv9TfcTR3po6oqaFNnILU5qCaVx4RkoQx7TW415NLWxuNpV0XAG7N1F4zwqo04LHNDw0ws9ysggAAbYuOxuIimGmplWDKDsRQ20J730/gisNlk4FIMjUTTUlqrbXqorz0T2ytH4rhcduhFG+I7gbusTzcrmRJBCJWqy0yxojTqma+Jlli4Ylj/tamztrm3HALbv8I3oWCoFLamjzPRHQll0UP3a4/ETZnManV4P7QdczuXhlWIfHmrjOxkKFKjXzmjLbkVtTqJUSICZBSNmAu85AyGSXMdcfh9k/qBYblnFU0O0wWbF4yvC1Zm9Cei2mKH5F45MwgKbbwBbULsFDf2ULnXlrPGx+M5A26AbzMMropGg3J0QttG1f8J/U6x0tcKr9lzEzWnm9t9wKxhjHkLF51uXUJAJOWEFIS53OKehGkFrBKM00SEXXTB0e0b1xR+GuWUqtqTpJSrss4XE3adLz9qXEWmszMwqkskpEZRDQlARKtajVKhs6O7auh67pVTnOBsajtn53vKKBMXV5v7z4ok33xyz/71he/eLU7f/+9H/3B7/+PV7vLO3fvDuuyvT2f3e22W9meYrPRW7f7Lu1U74z5AwDj+NH6xC6f3df9cH7+I9WzfrvbnL19Od56/OS2Sb51+6zrtqvVybDq1qsTs1KKClcysNSRoNYi7EioWZc4HvciItLHs8OfONLRPxibhI3x+PjoivQZIUzM3LInATRkxvhIgA7IZonoYB2QKRnIdBcdgYpKZuqzQpHJZJZMBchAjjob7shuPm6p2vc6m5hWdOPT59M3v7YqT/Kqw2GG6ow8JEidznX49pOn7wH11j2ZL2nTpJWpT1WqFhGpWgCxOVGIYlKQkQ7Pd3qob37x01/55a+8+MkH/UlnPcbd9Hx/xSGllD1aFLe9YL9wiFEhgBVoURTIUTiCI2ymzcAEHmGzwZPuBFaimBU1TGYTWXwI4IAdRAnVUiG1SW6XPzf/2buEprD3+2ctV1xH5RsmwkQztLJ0bUbhEKEYFZbC91GLoxQx/WVB8xBllTBBmcFJbFT0Zocka714Uv/8W+/vZXuJbgau5/eabp9c5jTDkKJhdd0sDWZr5K2Uff/4mC9weom85/YKty5weuD2gJO9bfbYTCU39jgx0SbYbDabTeAsVoxaDLOILnr2MIQwT2UL9+QmvdACMXCxq4N30aHJTzCK0Wa+gWTLItgJy6lm4eCNpn9zDkHLsnYmcO2kpcLcDVbUrJQqYjktCkJ3YfRY6QQohmrVu9JglVkOfpM66u22RAoThA+3l07Lrhs0W5DrDj7wvIpsFvbK152KE19VVX0IKhAkM6UF8W8Rp/qF8oS9eFSwBU1t++TdbM8xWBdD+1ojDZyczjzR4MQ5jM2UfIdGEEqB5ffFe63OZRep6h1bil5cw2e9lOIbamGN1Cu+H1CqhfRIi8aVhIuIijAVr6GKSi/BEmjwCa69t9rjF1gDkohVVffHb2iKf3wL3rpf7DijGsZwrU32SxRCkaX2jd9o9LE3skFFVQ1aPZ+1R98rJKItpDMY2+pBQ9ijqFkSmN9frSGE83vjRSglgox5NevVmsJcrFidOwY1GnLuGjCTnSvgZiMAQKaUGuMfpRZpXloFi5RMwt26ZaIo1Sm2iHEt0rtZMcskcmalqu5TzqYyTZozVAtoKZNSzT8rqdqVcoRCiKtxEtRuWL31M2+9/uarP/z+Dy6uru7dfXDx/PLRBx88fPdimg73XzjJedK6u3X343svbAG89Mr9acw4u5rwjuaXLz9+Mp5/8GKR0+2TswfQ8slvfm33wktpGJjzy2d3T7th1a02OUmZLDOXGcYiycfcWioyey0F2QvA4FqGjQZlaajY5EzL1JEUqDA4nkLXiUJgmcxAxwCfs6GnZlUBOkNmpqwEgmBAZylQZiKLZd8xp5WWfMGf2yU4GRRkJQx1LnXN3ob9+WX9xp9z/4FuejmO40oFHMpwHMcfzPKtVb4Y0PcYxlI4FJ2yYKoqmI2JVjoJqisKcNS+y+P5ONwdfuPv/fpnvvAp9BgvyuXVEQPQWbdZ6aTB7IbmPhnNvPxUgBCV8JXwtawH2Ag7Gia63MiOwNE4khMwivN1VWZaURSzyVvhyMSEAqmTqPyCX+jFfHVTGW9Sq0P0UfBe55Rrp1HPzE62ctZnq7ElFCqt04njXcPqAGDg3qoswAwFOAHiRGhMQCZ7YBR0xXrrTrofP3ryvfc+vj3cPcduQ4FlZSroRwyT9dvVTlAFTIIMI2qmqfGIvnJ9Tntid57i7s5kZ7d23F7i1oHbC7t1yVuHMmBH7A178EhO1FlRyEpUYFYU865cUUClVZdTO2zAyAXRYogwtmgysHf/04AxmiG27UYZiGBJm2VJgPPHtX2VXFhBN5opAk4IgqGxrdhCoDXuZDxSALD4RMakMWgYaDR3p114wKUkEVmCWvOpZiS35aXkupuNVvgmOuKYpIgUjRfmT3zRvdcDyFSXMYvKjaHkNUAfPLPYruce2l5NVDNEzxQXiDcuduu5o+IuqjSKO294bjJqjV/nBKYbWA8a8GuqJim5RSPCORIkwjRDFZ6GRfqc51JNUcMxkwY697hWFZGqlUDOeZ7nMpWUc+c+fKQFASuq2jbTjGdwyRJxu0AEIEx6XSV0ItqN7tBF2Gq0xSPFIWJVX/mwYNsIORYMbHuLJSVajRVPlCQabbYias+mkXBldxMB+3133IwQgzO5wz6MbGf4ujel+VZmimOdEUIC/FdZPCEovs2QRMqJDeGQUMqgFk0ipjbXOaWsMdWtIvSlf+0Me2dNgqqVbszUCORGNaiYJJG+77UqE/o+TdNoQDF2siEns1lkBRTwIFwZ55wSUQFUw+XVSOk+/dnPazlS7M2fel3ky1M5TJPSBjM7//j5eDy/+8JdAJvhTDs5uzV94iXkPHSC84uLv3z7X13tqmmBfPD5r/y0yePZPhR8/+GPPjuNZ5vbZXOyvf/CAwBX867vBhYljayqKswKyboIEzRJOzBoroStJAMWVg98aaAZolCyTCZhski6HSBEZ+xpQhng0q4V0UF7ZSZ7WjYIci+1VkuWh1ylsiczLQHZ/LB7nwQFFZPpuvazYvzBD6e/+obuPhxOZdrvq0g3pq7WJ1rftu33h02vY49R9kWFRQA9mephAxSIsDrTUgFIzrbJJuPFfnt3+7v/4Hfvvnzn4vFehoTeZEiq6Ieks7ILXDKnpKaS2PhhMOf6qPtCkEYcDQfYZJjBiTgAR5OJmIERelQUhUzALGLhWKNFbYYVolIUVioBc4LuYujkMENjDqGRL6O0ZoNL0R4xf4Tkxr9eh1dtPvkxa6NGuWm17dNVRYGYoDd4ECzAjLnHESDQEUckiB7F9sYDvve1d3Fhx+H2M02TrAq6gtXMYW/rIsMkW5pW0wRdSRWt2UwlHXRQ7Z8BZ/mFp7h7CVzp9kq2z3FrtO2lnRzqwB15ReypR9gRmKATpICFVLMqhgkoZlOtJVEVM+HJuQAV9Lt13c4uKPt1KPdYz/g2a5izBajgYc1KKeJTQ7LJBqBV/TRQFvDeDMjtoltMc9no6V0GXHukEKe/0pMR28+jDVbdvkNp0UH4UxmOleKbjZbCKgpmiyqglWfwIGhLEdIoYd6S9iLFLJ79YAw1vNcNwxJ9sYRWg/jq70ambWnUADedDhZcaH8ZsK5fE8KU6qSCYBg6pBsvFPolNTNz36h2WCXlZssVtwV63S35nNHRMpOAmiV8uH2m7fsNxRsyp+yKv4ghNGgubYVCTTNzKdVztvQrLJxsxOyiUcauE5uTVhBosty4DwAWQ8d2VVq9hIATW/Z2RpXfDdEbz7J/HWaaE73wWlDqnJMCJdTdZkstZWoqvsUISwZvVZ15cyMkpUKrVix6VcDVc3SOAhHiZ0G6LnpUDD7yNQNTglvZkQA1vCgIQKvWuQBY5VXuM9xat92/5i7nUhx3wKe0NZrwdSvNaJEhxynqTTi7OpnARERNm/XrPNciqsLOeCAElkGTpKYyFxXJFmN0ljIrpEzzVC6S9NCcM9T2RH7h5VPJd+dJARTMECOSGg/jOGJeDdvXXv2li9037p9+dipPL6Y/GtKt7ckm9SPqjzM2P3747EfvffDtv3nnzt2z2/fP5mm3GmR9ItNRNutTsAACUcmi6tGjdwKBN0ps7isLtQpIodFjoiRTANkMVgzZDZw7wPPnAGRPybIBeqAHEqwzrMzE0ipVqXmQk5Oh0kadUk7Iil6MaglMgBkKYdCqVrTP+XCYxz/9en3/3dV8qCd1QhFFKYcOeH/MfybD7nRzWpCP/b5WgRQtqodNlzQN43GkKK1POQFKuGV6OV7qsB3+8X/xj+5s7uye7GWTBLCZKNCs02jSER0ki1atVnOWWpUptlxAfYkvzVf3VMEU8CxmYCKP5ASdlDPsCNRiKNTJ2zVKJZSihqpaDEqtcJURq8tKYLASE8fkK+5uQF/+5Ku7OzdWpSHmfa369xqq+QPSrsmV8WC2MnthcdA/GwEjikjyjQi0wilDgKO6/kSypBWevvf823/+nbRK9hxXJydzNxTmytVkqw2GgvUkk4gJTecjdabWLBDIngNk9RTlldXLz3j3EtjLds/NzrZXtpnmFXZmO7M9cQXsIUdgokziS5Tp24/E8SuIKcWoCiitOns87hNVwuQk2oJGbLmOS0ugW6zXFo+rCBAtOLVo5qiyQ4sIzi8CLAx0abFDujHmCUQx+9IoT3uA473+a9x1wZvhoNV4sIwBfmx9MK/MAvKN9JQkBoSLdJ+tOXbUGQ38BOlMKLa6G23qrapZ2v5d/1Ai5v4OQQKGG3f4rNo/x2JdCdMayhgLa6lWsESPFjoLttALp+8rACs3Br2MQmSxA/jJW2Vm03EkhdLFXyy0QpKUzFhILNIrdJrDA8altFoKFDnnaZohBJQiXc7TNPV93/c9l/EblpLLb5VruizyJgAiSSJZq78FwbWdEOHzbDjtVW5e2JRpgeyCWHpJQDIW2Co+rVZIKdWvuaoWrSJONbTWkYOw5IpVU6cMO5HbS0F/U16BSRuNU5iZEFld2lyWBvNtPz6yVXFbPHEX1+hq4+Mz9PMiNBNKUSsoXpQ6NK1qqsWESSSnzgcrKUHIWqosbEQw5cTojJMG4aAC2kxRFGZWCY6OJ5WSvK1XLUmQfQOaoaIYhNAkUpWUSXIWqMNJZhliQJdzUjWoSQYli2xVp3kkZIJkAOSKLKjCdMzSW8qTHV75xBdfPH5hs7HL3cUf/P7F53761a99/Wv9mg8+8XEePvrkT51+phvG/TDuH2+233vy4/vf/s5l7uW119+Y9s+lq9OE3G36YRAgpbxZbUisVoOkIMh58b48OwAECZTmuQFYMgOYVU2YwOwJGMhAb6T1Jj05CHqzbByITGSbMW/P1kWmP//GfxpOt59763MjCnPWXE0gA53K7naDUsVm7bvu4/e+LY/ezsRxkGwFfUFRKendbvufT7OOHCbUalOuaaIrrzK6WlhrleScUyiKSfISa55H0/Tb/+Af3V7duTo/dtuVjWazsiNmyEpsUl15meFiOYEreVQrKjQm3lZg3gQb7GicaL5wcAKLyCxWoEeIKljIIij+M6oTpIrUeO4cTMPiR1Rpod8wA1FVuTzPaBiYWa3FKCJiMGmsWYSBrDlnXdRhTgOdlLpEkuUPAdCn0wIRmmoFZoAmDskKUFA7m5VHukWCpdTf4pMfPd1/tDt5cSiXEy2VIe23d4m+mkzoi+axHDrhrWEYpx10FjFAi8qonXK9t7LvXnhupxdqE052aXuQk2nMPBB72Gg8mI0OIRCjcQIrUIhCs9FYKTUg5/DsgT/LkOIgcqAFBkN1ulRk0MWPQtrlsECNyZtYZuiTGtxrtkxNed3dEO3fDBkO6/pEZxk/OykYoRcyt/QFne/oWnitGgvisKDadNMbNvDamp9lMPHMzFBLMTiVq2mEWsatqm1XzdK5Qtr0n8JUvWQLwnPYk9zIFKoWr6lBXvIG1/0Xw2KcRteZmIFsXubRyCl9rCsNRNNS6uIztYwgU0raBgCoYYQuWRCmWG4I117TrFv1QFMrNcmLEVlS0WKxpccdVD2LqZpVldRl5kSgquWcS5nN6CbMzklPKZtqiTqDIsnzUJvqIrpVL+mSaNXqgyI3HGrHCW19UytDvJpwHIVaa4Oy2VBmA8EmMm6ic/+NxiTNGiaJWSklJ+Sco81dHn3Pk0Q1M9NafDdUBhoOwsbGdJdKEl6jqrlJtiL6eomljUClSSgHRMQCMmAUdk2aBkkwSxK/x4EjQSaYpJt10uhfnDNPE6YuO5rhspxGE4hq0KsKN5Dx0kdMmJxFlgyWXVhMZcwpVJjMipNmDbGFGk6KEIIC13ijb1EAAKxq1QkiIhnZXWr9shYFmBToIKZqCVnnY8q82mvfb377d363y3J2541/92//zaMfy2YzGKdhXV5+VYYNL86P69uPvvyLLw/5jdPTB+fnFzJ8//HDcn6+26yG8/OLV199dRZo1auri1vbU9+X5UI6eh1EUaNItgW+gmt8U5Zuhpaqfe4UvUFo2YQyMPW55mKDYi3oiAzNik7uPOje+e733nn3W/syvvft99/6lc9JydZBVqYGDKlgzppKrTpbX6Uf+vd/9Ijvf2t9lqfjsdTRKnorR5Xv1f5bfTcl3uo5XxmN2aB5ozqbGYmpjBTCOmKiFEg2TAQNHA+Hf/gP/+FrL9272E1MKGVmT+mJDFlBZpREGX0mHWdbgyoRZRgA88JMaRVqJpU2mRVIFRawQseCCtikLEIFimICC1ylCncXQeClokBjVsHlvwvDIlczWoO/BdWa1wRdJOLypMgKanB2qBnMLBJB0+77kcb1J7v+B3H3CSok0RRWaALL0CnlldqMku1oJM2oadbL1aPvfihXtOeQmmsB1ygmz05OD5IHHSdsRuxZ69OLUvNZFktqJ5v8CdPnxyJ2cnelw9lrH8vdK5Ej1lfzWg6oR+XOOAI72GjYw/bGPTGCs+hRWQQwZkGtbmLuYFXRiVbIqlp8gYuPdU3NNXqRfRw1MGhYdsTmPULMnYQAL8oXKDFQNTq7WsnYitHWKAd507vs3PrXGjhfy+emRtDCTFBa0LSYwpo2XDYgCBFpySPylBNjhFyUwGwJHeETW9vfeJr0+k4Dn2yvZEFp5XImAAslq4d9rzdaQ3o9p/6JQ+NfMlusTzwVm/pn18BrITEKcF4ngUb49uW1dP2SX3gCZrWZt5GmllMqtTIg+kB7JCU2fVitcynVTa+qxuAhpQxiLqWG1TMd8ctd9qGA86zbBuygTosQnl4MmUlVYailqu8NhsE3VkeBa21eYa3Ht7iUcdYkCVvd0HxEVF1wJe4hHINjqud77/urquuLpOGQNBcOhcSa4rRzMxe/8xpDdkY/mJKvxaxuFwO2waIXOa7h9ewmcO+QokUVyf0cAJj53oiFPdc8+NDGXP56akFuj5OqZjRrkjm1qqT4K9RqOSc1pJxqqcFRj87ezCC+ntssEhCYEi2U1RBHH/zkwkgNY632/76cWjXqXhEW1yVDGWIQLBIs5+H7VdCQ22ly9xV/ZqOU5cLIaH7ulJyK1nmcR+jp2cn/9n//LwgIU5lsmlUVKUmZ61yu8mB9vlP06u7dF1Rf3L4+Q8rucvfqJ/LJybZWlSz7/UgRybmUAlWRbMJgZqo7OHYeW1QnmAhzUV11a0VXZ2HqAbFO0jpZZ7Ur2IAbqRnIKutyeneNDv/+9//tk91H/+U//+f/6v/9r37zn/ym3Za57Lu8sVW2lawwopYj6yqfpGxS+O1vPXr/j//DV3otazU9rjJkzTqnr986/Y6kfsp92XFzKlOZVlNvuZjKlIxCZEoWVNCA7D11ggj18uKjX//13/jSlz+z210Ie5szq1ApCmTabD6NRvZ13nEfdCHuEVBf0wSooZpWEyZUUMlCzC7oVbPCpMAR5r4QFTabVUINhdTGdq7BOkMY0wGMbrhFzagbA/cK0mUAlHIjKlqwo7Obl1qt7cQGwYIGeGwLyMoDqIhb1bk3cPUjB1SzCaawrHoEaRWYhDCtTCnvnxw/eOchD+QVzfXMk2qFHW2/Hg6rYUrTHhuhQlSg0DkDJ8VewOFeHo6ytdNX5N4bH+vdK8BmcFfKSDtSroCDYFSMwB48uH8n9KiYYe78LNVQiNkw01CtljL7FIFcTKGclsWWEWOPy7UJ3zK/ix7vOk8z6K5OG44XiTREJAnPgFIKzAQpSfJoln0uFb5+bV7pSc0pDiFgEs+oERYdhW5OvQtr5v/fn2WUF+8bpoRoeDqaWo1A6E/wdeqNHGGQZuMAI4N5TAHEi7AYG7ct03FegjSOxsuMtOMLgxpoIITS1BLhxjAh9TKjiP9elAouR7BR3eAxOqKh+FDbYKaTVlXrcxdTZYJEYltt3bi1btIAApLMV0zFDfSBDbqcq2os72sgQXX2sm9Bpqhq12WaUaRWVdWckz8otZSiNackSa7vTAO9/chIo2QEZ5JREcet9BVFAb8HXLPMgW+sn2jlsSfOlm/qDRW2iJjbVgThcBl6xK9SGKtnyOD1sRUJcWTc09ureDOYgmEVGyYzhiWp+XeYRm1SFQbLKUYVNAlDNNPQtgZwRoO5ooaNnCVUrSoi4YqlBkP1gVnzMtUoBKMSrT85fVD42hJrD0mFgUgRLQPJ921uqmrZOV9hI2iI6K055zatoVsJBHgmSCTiCVd1BnOkZCdBmmpNuXPKGUxLyaVWtVm1uKtg3/dVp5T7tNqSMpVKYVGaVko1lbt3XlCtqgaRUrEeNiRLnZOIqWbJ++OUcscyZ2RfpZCkhyXpEpCKwpCOR4oM1TIU6A0raFasNfUovcigwymG0+4Ifuv9b/7523/0hS9/+Ve/+qv/p3/5f/7sW29+5e9/6dlh7pL0+nwto6kNOHaa6jA8uTr/xjfef/e9b7/34cVXN2ueDHxSbJ7kpFwU+bM6vK/c1IJsKsNudyFdNsi+HAQZgEx91UJkIixryDlnoujF+Ye/+iu/8nNf/eJut5N+baWAxbS3KakRxefXyJZRA28k3C6u9Y5uWmWGaqY0Nalut09RsdmoBlRFJd3ZqhIVrE7cp5U2Zy2ONiP0Pf4fi142YMj27AHWVNrR6YKgmwpLQFuAtYiElceDlL0YXV6nDR6V1iJgKAdbqR1rN+ARGgqoiIpYOLpqZmGFZk2PHj5+/N7jbtPbwawiQVpLT0yGAfvNMA6DT7tFlQKp+sl5f95vxiqjbPKtV3TeDhc5F9RJcQCvILPoTnGM3RUYYQfjERghky9wnIHZtPg+QliF1qKTOmVMFSiSsjAJk3sVLPQUUyvQ1Ahufk1dkwMg3egt/1d/4kH1e9AClWVJ7p7L+BtkkG1JpP/c0jW05iRa8PZ6IXlVb3JFPJyZnzTeeAsEkej51sNPdDWijD1YArlmZPvwDQCa7X6QdUOjOQsk5xTWNNd6lp/YmeNHyYJfjiU93HAw9bbPjcSDcW1mUNDX9nkFCAYkmBIQRGu2S9oMnxVhxyVN7uqopDVxgDRX1fbvjBor56xu++GCJoSoW8NtL/w1p0nJMBuJubsZwJQSgC73bNYiXvT61mEP6RnJOzlrF5QQZduzEZ7axvD5iIhBNopTu76etooWoaPydbnSairwO+If8poN3X42MIskKV7Ili/4916Lk6/LRpEguakx0bkCKfvzT21Uhmgw1ECVVrc2y//rW++/sFZ1GrgIm1jOk7EtPQIJSob8xCDfV4XBLKUkOavWuL9WrxktAHi960u8qDYL53dTLGO5UDnUZgBSIY3pFswwfx6jJAo0wjCXWURyK6XN3xXhQjWmwIea1RhgVChMBCxqKAXiRaqYFFMVySJZi5qhFE1dNhZYrzqnnAwb04lJtHZJMM3FzCA0dcBNrYZrG0BVXXW9SK7FjlMhbXf4rsiU5TZ02w+nq+GM3A6nG9UOOCmUkrQ/TbKxvAV78hawsse7x9//3kePnn+wvb/9L/7Z73y8u/i//qv/yy/85s/8rd/4uxdPn9/J6DglGXtUsUMuZXMrP3nv3fN//fv7p4/3ku+e3b/bz5v5+GyLRExnd//sGR4yD1MRRRk1o8+r08Ozsav6wqv3L5+M0/kovdjYkWYovkCq6/I8HQ6Hy1/79V/9xZ//6tX4XNHXCQkZzBQQPVWsJiuGHJ7qzF5PgXOU+3H0NewHfD7gqVNVqTQthiJCYGrj12qYoVVRgFngMIte591YFaCmTnO2ZRKPIG6TUeW2ZoohjnBoJAZsERspQkfsHF91U3u6m0cTyaJRbv0JF4FD2WRjZtEklir5kzSTKSaXc0chJjx699G8L+thqKMCgkPkJSpcvGWKWsIZWFNWpox5fXKrfOWr4+WhT0Penkq5pZfAOKeabMoYDaPiKHKEjcIZOAJFMUNn2KSwAt8IgcrklBGBFlFNgvBN8jJYzGpAC83vz/taiX7UFGQ8RJGeowNWNLjOo1d8tbGbbnxBhNCI7T7+zAE2LLHPotdR1YaiCtgyYTSxkJTaX9CaBMe99v1U+G0LKs0SmZbY7FErOktj7Gy+pgvRSzYsKS8MDm8uYIlCzSdu0SvHbFstrBaXbHDdsCkhEHeRUefL+AY1NjcvH/MqgOzTAA+y3jwDEHj77poXIyChqaFXgeItjOdTlbZ1wDlLuHHvHBm9IW61dsiZU6KIqroJYq21lZ6RLP2JqDUW7sGr3Gs+o48/G+weehz1XjLuAK+f3AX9WLYGoz0gfmgT8oIlYMEVDE2zbgASZTnT8CPkhiFWfeCvgeHc1L3FrbEw1ncgF6QkwiSGi2AYPgelyq01vAhyXNrH+26hItdwCOAZXkgvh83fFKJmjM9CiloNrzSKqjlyUKpmZleZuwyMvgvSdyZpUNaEDd13WVIYtrsViLB5RTUMvFVlwE8Itb1zWdayOpRDIxIAqqnqXCdSck6d5GqmqinlohWlAMgpBbhgcba0FCOytEF3u50p51qKV88iQkotimQCVXMbUWNSLZKyk8bNvNiSGFGkZr+jhAOSVlXVIFmhJ5vP/OEf/scvvHV7e6KPP/wx+CjnNE7W95vcn/an9wv73ZOrZ4ePJk46yBUudnqYUp1kvveJ25Lzf/jT31/f2/zX/4d//uqL2/2z919cSa9jj5Jx2JQqeb/ZlKfvPd3/9/9udfHRq5tbms4uythLOelrLnicbv/13dOPBUN9fvfszmGL6cPzepikiAxAlr7fSD/VpAQl91SoqVJykvHqgqn+9u/81he/8NndbmcmkhB50b2spMB6c6rfnDnRzUUQbgFw1YLHPZ/5qCkqCCShKmjVrJQyApqzAMWc1CmzwEjn3zeBBcrN7Cs0M/ElgO2xb4+lF9V0gMhxPmuTCQBLKjUwTrLTqGIKEqALm7wyVm1Edg4AU5ZA2AKZAFUdE7JCilkBCppg3KZc9/bwBw9zyTrCZ9yqhiK+spAF6GETeDQmKnI35ILu1jzn8UKmkxVX08W+rx0lqUA02dH0UDFBCrE3nYiD2gw5CkY3w1KBgbO60MiKqSpmYEpupCMwgwhy7nPnPdt1W4bABT34WfxRC8f6hY6BBtBHEFyQ+3h6PVC2GXD8hOSOQDVVVYegYQ2jbSNA8zWFDfWLv1zuikiD/RCdmQ/WFgTT4kzEG1rms0sY8DCs7jhlof8VekHdDgkjqQDIObd23g2b3EqHaO6FCizGwg2/vG6LW8W3nBzGIh7f8e4diIU9GNwHDwShWq/7tUCgCerSdlsDmuKbEptiBQBpgljcSHNlpSS/yGpKQJiaeIspZVJVq6pO0TyFdSXbdfYLrsGbUJ9cklKruuja54Wq6h8gyhifgbbJTrPciF5QrzXiETJaX8gYgsPC9YVodMB4Nn1DVZDIzMwHsWiWZh6Nwozn2qST0W3HrzRAsiBGVBIkNW8e/UU0Hgk3OLtGzMHkLsGIUUDjEHgd6dOKGFR71RPIcbT8S61LuvupQZwk4Y5pAcC41ZOPn6lVYUoRieFZI6+1Z9KLhSaQJRCmM4HnL4fMn3G5foA1NllFPXsTzKb4IINQdxRHbHMCMpOPVBolMSzVbsAA7oXeWmNJCKGq9qtcSoH3NJoUJaderYAQ6ySbk9MBuud2Y0+2zY+gWk2SBQS6BM2rQasOq80X3vqly93+9PT1O/e2l7vje+/96Onjj5+fP0q3Hv7Df/bbp/dO7/UP9FH9/qPvbDbb11775HB3NdzfdKf9Fa62D7a/+ODnX9gKy96efXg/z2u9TDoPUrKWPk+383x4/nz/b/6bl66eDKcvvV3mgY9ynqba3Tf7oW6+rvvvP3mY0qneSqPpPI7DWZ9Pht2TC+317PTuxe5wNV1tTjflakYx0y5nrbVcXO5effHeb/2DX7v74PT55XmSTCToRJBMtGpIphMwE4mSKAnsWMXK4m3RLrobQPpTbAZUeCuDCipYKIVQT7cano2AFA+0RAVqW3O0lLzmsy2izbccP2M7VNH4NttEA0V9M4AInZGBKHJjmiySDM0XwXsAbzSk+QF7iWxqSAITZkRrKGRbLqniNYOZmk0w8T1JMO1Wm/Mnu49+9KTPve1hQzRmVFoBC2wCVuARWPlimNSNCZpe1rTVcfdv/lhLhXTyVs/VXZ0AFauWZtERmCATdYLtgRJzXx58zerMkHk5hq9ENRqsGmtKCJ8hV3pqpQgQumonh1obogX66Ah1NaWy2QqgJTtr/V7zNrjBI47FdCEaUtVoEhJzjR12bLnbE240GbQFN1U3CqmNohPZ14NDgyvip68Dig/z/O4328klphAu0QRQPV7EEW7x0xwLiR69kZ3dZwQI+DEil0e+SLI3RBHmq+yXKOfbP3zpjZerpgCqqSBYTV7BqDkE7fu2brxtRz7blvCY//FmHLXWDJHCWss0z7nLdC5j+FHkaBZVpSmSAG/Vkg8dI2ILBEL6rhhzQxKnvLmvllBqVVxvhYI0JMA/99IO+kzfe0rVWun4umMa/s4JYhmBREEV08hmnSJLJ+eImnMw2CShBJmEWpWSPCFK8nmF0tN4Sy2tRiS9GIMJwSA4NT0ivF73EZt4DeTfK54ahfAFmwii+/X5a8HIwQl/GTrtzYFhVJFkBrEK0CKDQZgcgmHyRWyibXtMrSVLJqSUorVIyikJBI1vGJVulGfmtLXr08jgwqmk5JWW1Vj9F09aUArNl8u1H7SF2iYSOjrHgV3NnRgupMEkJAlWK76vOWprCRqsWVFFSp0CZky5V/d40SrST/OURFLOpmpWJZuqmGqiSPK/9F/hhZgmyaaGlEqtYLJae1mXo73x+uu7/XG/Pz599v533/nRxcX44ouffOMLr2xfOLn14K6cZBvk869/+cu3v/zo2WM51VfffHnsq66EtznogePIi6fbPPa5bDBusc/p0GOSMq/yri/lw3/9/3qwf7574d5Zvz/boVyNohW5e3cavlnT7U++8uYlPto93p3evTqcay/bfjuw319BND/fX6R96bf9tJ/yioJUduXi+eXJSfqNX//Vn/2ZL6qN4/5AZlcbmlVDBiuDkZpj6mliVXz7jG/puEaGYDCfF9TokiIjskmDNeWZ0e8W0+rsDkDNqpn6YsBIlzRfLOKwVCBebE8sAQtaBRktuAYo1b6JtFrD7YFEePYxoENxg6A4Ql7fV6vBpI4D7KczCkQfBtMIJjLDlzoji+ttmw4e0JT6Dx6+ezg/DmeragqjFSQTq5RsmMDOdCB68OipnCP1FqezeiyJ9vSjnHsdTjl2cim2N9RqajYDM+AboSZYEZuASTGBpuAMFueak2ZWCFd9qQgSSBG1DCN01koQWud4RGnR/IuIJKs1rpT7QsRYjVbrIhYCfMgOf07DpscCzfXHttSaWm9tIQxinqY555RTxHfzIxKDTJOGR0RSElJya0QM7mvv8g9bIM3WF3oUEripamOMRevrb129bwM0bIbgW9pb+vCCgCS1mnqX2jZx+FmvcMJt4M2mSEKImwYTzWBsyY2+TYIRGUVNg+LrA63qEZLqyqsk0IZq+ibCuH7RzfkGGK84g2erBiLlFNAvRUSG1SrgbSEUtWrViRTJ0j6jtFtlJmYGcRO2JsKJDifm6KhahBJzWTh121k3Ys6Pdoy9+Wt6Y+cDW2vDAUTP1CDPVrPHZWugSbu4S1K31u2br+zwVq6B8DTVUi01Sruqtk0elrOPXQPeafWTX4QgTANu1mlB/5OWodVb24DcoOa1oDReSFwmbwWccgJR789hQiavA6zlfvEhfM2OMTR0bhlz+OxAfG13iryeJfuHzTkpkvl0mQBQqt9iWRh55psheKMQ8JGO+ughZUm+46uqOnEmvsGxQkX06sT1PYNds819VK7wNRwUQkVNrSJs6Hxa5ysrzTvsYE6q1oZyirfFWURrybmq5lImERFZlTIlUpiicAClbbMGIcxaNUlXTSX3WkHlaDXnvBsnyvrBC/fG/Y8Pl1ef+/Rb1m/OXrh1+8FZMYhAVvNB7XBMm7t3P9o/eefhR/ffONOkwzhRd4Odb+y4Loc7Q+nrZV/HDQ49iuSrs47jsyf50dvPP/Hm+a2Tj7/9N8rule39q/nygPzDUvpbm1PuteBKSe7PV2dY6dV4ud/ttDdWZIVarlq7Taf7snv2PBf5ys986Rd/8Wfu3FlfHS61VpGOaqSqjUydqkIlur2GwZLSRCKCMP9yzxjf0VgYo0AvfloVJgaraq7119hNK+6oo77BmRG2GqsO5komNuzGqRge4G8SgdoxjsLazOjPPtzsOCrxCsSGqyj9A2L2mr5Bm1zCnXvKWTgfiZt8qfrmoGAimcT+ZKPXi2Gbpjo+/vDHakdMvZuC+yDbZmMHdMCKUoEimEBAWMYZr8p4T441i6S+TH03nHbpBXmeS6009jPLlUJpZnZUKcIKzrRJaAUyUYthMnNuNmDFUNQqrWromw1WRCCSAS3KAJvdYz8ePFN/qFsHFgNZU10mWAiQD2FnHDLa6MgCuq5WkVL4ZhBgkHA0q1opCtRr0ygzSRKmQoC1yireFUAIRE1jFmEwJTLFVL1ltgUVAapqygkWgmYLFRoFUNWcMyhWNQWpVRkR9fpEOSPJU4sBWgoJpuT5TjSkOE6qT5JMYGoxS4xj07ACOP9anIHqVaNbtjnmgobAx/mjQDRJuD4tl1sAgehCADOfHzdkFFCtCDPiIHd5ZYlG2s0pm6kVl91o6OvRsGYhtOGfFEjUsFGhAPAarsGXvL6nCMPkgDrRbL8ILoLN2HXlH8ahJza01Ck2cXWcVI9lciVxZYJ3bLEKWkLQ5aW5AARqVFT0stpxyzIXZw9ISO3inNDX+Tr06uY5ZlqKEFWh6vaqJGlUTzsNcrCiVWJ+4fhwjYxnen0AAKuQRpOSkB4hUcJMxuNdXFAS4vHJivsNiThVW8Q9Q7z3F5GcUgSs5vXqtpUgkvi0zAsLEWMWlrZzxksPhT8ycSqqamp2MWZwh944SIw9l3odCiQqIy7jKmsYvttZpxihJyw/o7EMhiCyc6Td5Ua1RmpfUWAQVVMdhU4XAxgFYGAziALLEf6ErK5MTDkjWUkiK2jaXc0vvfL6f/nPfupks316dTx2x+60k9OMDWToZAtsoav64it3j3l6On58X3Cbc18v+nKB+bzHfl3KOpdtnjayy2XqRNc27p//yf5Otnl6AR/YCeaL8zPIKp3M7E8Mu+nJ4eMp2+kG2ayugauTrZEoEE2osAm5kzSk3eMLOern3/rML3zx5144vX+Ydue7PUFATGeKGgjpzSYxiHTw4Q6VzBpQYhYmmBhQS3PpcbsVqIj7WbgjXZN4qJIqVLXirZavns/ZUX1F7Pw2wiSpakEUzTHM8YLMavBp2xoXoIGYwOIYsSCSpkHl8ZGM1uqkZRGB1YokBfTyzgUesKxholVL1SCgSAayk2DF6Ls0zJJqgiZJedYEE0mdKoQZlHmSRx89ZVJFoSbs0W3yfDWlXsqsKWfUWgX9IZe+mJCKl+7f/dTxonu6r5sexVI6ppdfybapO80qKNAC0WTF6gTMqqVKpVQmVvCoOgNFWJBqncs8l5xBp1yGiKtaVdANdYyQ7Pbbar6cBwbnQBNgvqasee0DSA43wpjGqhVTODpFh2sXSJh0U3wDnFclFDhNRJH7LiNmuh5bnHyEKNajf1oA5xj44Vqx5ElliXfL3/hKL81JDFZNwwNTfdLMampqEAO04XJRZEd89ykGDRYoSG0OVqq1luJGgLnLDmrGYFUVMT30Ika4UEnjKlpzyGiVwjWJus0s26eA2eKWvAT0JVcRjEExSZqa7zOXKIiocNwUBmUSUSBFU+UvJBazhOVNxPBaq2lVETanYUgTqwTEKjFF9+832DK3bDVEXAdtuHFT7cQfv+MwqtPRYAifNoms33Y6GBQVTevj8GgQvTxVe1hhStE+kiknqlvkIOfsqYVgztmXawWRIWxJFAamm3X8kuXJsBNxhY1FEvVC3KXP/tSYUiAivnGhyWT9xgbqXF0FfuO3I4JV6+aXmwpYrTV+S3MrO3BHAABdzUlEQVRCszgwrpdW1VqLj4n9sy/ECBKllFprztmqIZ6tWEICczrNMkIC2nkSi50ofqF9NhKFUUMfrH0kVZdRJdU4Q/6u0PoYbV6+IkSYjrmwUBhzkuvxXnFuR3Y2UWuVzdQUJeA4L5iaGM2WHsD8Cb32t5KUh6oZ0gukzDDg+HyXbw1pu+EGWIsNZmvjCW1tshFsMWz7OzrK/snx8sNVOoiNL6zsNF+dyXFVxjPZbYQ5HXKRx4/++u3/+fc+/EF/enuby/bB2XYt5enu2Z2zdL7fMetcUE1ytQ3WFf2MPCPN6zUnYFL2kgcZd+O033/q9U/97Z/9pVfuvFwudHc+EuhkBZhqAUyLgtXvkhDOew8XeU6t8a1qVSRRciJckmtqxevIa6Kft8AtIJjBjaGTBg6lxVBJEkrx7rZW82VmBNWNt3W54iBzhlnIwxgVW6jsImZ4FPfHydcGagBCIiISTFGXMxQPmASdLobFzhiWBQalabZokRKYYAJ0LoWWlMVyrQITSb1g5WGoW/UfPX12vh+701uQBGFKSakJuah28yBA2WGVMQ5TP9FEu+3mzdMHdz4cy0FRSs1DL/16fTcfs85e09KKWTEUk0rW5IRntaIoZBEppCoVVpg1hYVfAR3eN99V6x2RitK0UX/CCSdCEA1wlnurfmGOwhIQyQu/qa2sdbTVXFjqj6SrZJIv3HMjPq/iDCSyi1ha2rHITeo4yaLrjOlrY+s46SZW2ysgKTTbHsyC5uNLhKpKEgYj10jXHiFYq+JJZQlBfmHMxa90kwcXYhIEqxZnwkCjJIvmxGs3xnzCE1OtanBDREGcUAd1/bxq5BMAqNd8hgh07aQDHsIcTUQUGdeVJhqGLHHxWzy9NuiKTBZOV6qqKjkmBim3OxdBLa66ZDGNylQgWcRa62M3ytpWPfjqJEd2gsqEBF4vuXI+EtuGvlZ7xNiK3repIS2E9yje3JsP6jzHhYKFwMQFNxhbaFG7tmF8u5aNoMQsqW2+ckI2vYpp5daSfuIKhzGXLmJ5eiFkWIBwf1BCUK6mge0r1LSNDxLBAEkQeaXJ6xk0+MDm6fWIow1+mqRVXmYxkZf25RjXLqYoFEkwcyDBaq0S3C0wZsMBLNrCwg7cOY5/SnlhRDZn9bgg7e2isQ+vfen80tRWsgK63KnmTadxIyhu5AMsljVRuQaXkmEonsTZD/ChvJYCYZKO0q5LnHABEtD2HUFUc4BERpG+Vkl5rakW0dxl7RVZ0QEdrDMMsLXkW9Lb7laaTrbs+34o+7NeTu1i2x1PcHWvPw5lfPc73/jmX77z+c+8hPyjj78v5SIfDo/G5yf33vzkW2989mvf/proxeffePNHH43j5b6gHzQr04xxRn+UuWKNjNRLuSrnu93t9elv/PJvvvX6F2Xi/tmxHGtCFiG0QCiSVJtwvI1r1EAUcwliW/ykVolU3bzMOwkJhlLTbQPu+ePZkIBp4zmbaW2wDxDohDp5C3S1fdw/f8pCKmK+v6569R/DuxhF3KCeLlECMcNqt2x5XxSXsyw31J9+x3jCINfFLQDFTICsKpBOzIkpK0hSTUAPdjQYMiRZIlc0Krc8//D5lI/r7Yk5+URUskAMRYlSYJgIlbRX7btM1Oe1/slfpulQthmWOZZ6epaGu7ZXnRrNsJLVUCgqZZ4ROxWK88lrnQBlVkIlCQRVq8HoHHJV+CJ2CSDYm0BHEbzxitrbo2V7TEKBJKaBXLbdcGRrCYjk20tNgtEWrYM6scud5ynB4oDlqMccN6EL9xnHLTwRXMDtDA5PXR42CcDcMs+VsdqSj5kPIigyT1MnfaJU74x9oKuKtv3BKWOmbX2giKk6FuiYeXDpLI6UNjmzzzX91yxHrI11QYZnRYMjFxVvS6OyfJKbZ3T5R8+FZqRoy/QRDU2u5yPXP0MCJkt3yPYtbuzgZYCIlFLiZ5b07f9znf8bOUl87omihXCbqibN9hq6sdb9fgUdvd34NrxVNh47GxHZvy8awUja8MbWm0dZlg15pLaA8duvCDhBAma4LkR+4vKBCHvIYByoqghE40hft4CeulqQYyupoqBUEEaGssJM254PP0TJK1UXU8RPa6ymjMNTK5uZFMmYMSPAA204B264uTpg6H9iuBAGacs7WnKiy4LFIlezDebc8rNVIOLDGtJ04ZdGEG4lBxp4QUIW+7C4pF6CBgRNB4P8f9zrDghwxYd2XD6i5wYR0ASmi6XoUtEQgsxs6sZlgTN5uZySL1OGSFKlutEwk8+b/UHyjb/RESKLZCCL9KYZzMakHdFndKV2lT0xAANwYqUrMqR0SzpenXJ/JrtBr06HaTUfT3i4n6aVXdzuD/nq+J2//JvvfOMv3v3OD9+482adxqtHWAMydkMS/XB/Lqt+3ozHPU8fnab+HDqrbEWKYrBUKDN7yevDdr2/2NVafvYXv/SLn/v5U56Oz+Yy+qoOIMf98LAqIkAfzlNW/SL4SUhJ1fzJUjGhwLEQClTV2bNu/3wDAA5ULJ5zA8XXeWt7dtThQNOq4Q9jzvcHGNS8YIRFHcYbqdYPbwBjUcXGSVlgFoGEuCAYD02nrxUGSPLiK+ZZJqbJT7K76ICJyGSS2OIM1QzJwk6kB3rTTgayM0tWxaRjTcAtfFye4pS85eCSKEx61FJTznMtNKFKUaUKFCPkwXF8YTwUVb1AFhTU/OC+cLBLteK8XNCMCitaUYgCFLKACsxmLvxVLYVSRARuq27mEZwpvCuSX2ItABiLCxg0NzQ/d7O2fUcDrGusGaOkG8b/LT2y1c3emP3ELQq5bIvyZOjKb3BumdgIP9IwVu8cBEIPMMsLWrgNG4z6E78qAFwgxKyx3pnR9Ui8y9bvLGZACBQ3ch8ZdGw3qVBJQq/yKGZaSmGjmXnStcggEUkXZUh0fu1qRbdlN+Joa0T0+gNEcFSIaJBgG3U9PnX8qOfEn8yr0fVyMUMP1phHVefOWPRhYHiiAy0tWdWF/OSUqChvmk/Gcm+bOUf8ZjGCsebYiWvtB9Qrd4PCfLXzAmtaaG8jXjtM7zdnAez92vpnC4zLE4pqG4Wb1qoUSMrJq2ZPcQaoVi+DmIDom/WGDrbtPFvuF7Sh7DcOMEhSRWSRKnmzoY4BmBqcJGyFKu4ZCaOi1ZkOA4Qg2/xJ9ofHz4VqaaVlRKp4O4EFeRlGi3koYkwQOFVoOej3N5YWBbpFSddKhdZ+Rk3gI5SwIFWTdvTQNkoB2a0OYU3sFadOxFeJSLxNNZJZEHtoYQ5lXscXp7bFV/3YejxpKbndYjOY04nM37yPuOKB9Ztn6i40jZ3E7Ao+x7MNAsupT5bBDFmL9YYBXNPWhoGyoWwgHNd6ucHzDXabenGW9qfdVV8uhvr8Xj+uC//d//AH05PpFKuvvvnaS+vP/PmffXd+XDeSC/VqOuzPLz/6+Mkn3nhtwvzonQ+607zBiUln0k+qyl45aKpaLz8ax5dffflXfudvv3bnE8ePysWTw8Ki6LquXBWjT8OBZc4U7AqnUvjwvIJCnzdBwAqDUEHfLFdgvpgXKefrI0rloo93o2bT69/CSA9m6mxnMzW3/6HTIglW+DMaTkgxtQlnDs+nKu05BtpcqVX47ZvMII0gY/R1kT7isuj2DSYArSqZIBQmQMyyMRk7sQw3+bHOtFdmcCVdZ4OyJ3uaWMpi2SQTWzzX57hNuSVFFQLJrOL6mAwFjVI4JctVtGou5dNpl5POIzqF6iRl2996ReakkwHON3TGohmW3teACisuQRYBpLAUWtFqoInAy2ehtlt5HeiDWaG6RCDvRSIQtZ6NATyx/bg2SpMPaNrFd3SvqarNWhBb6Bs39Es52CyBbMRM31RlAcYQtgFBcg8m3P8i3wCmYQjlMY5UQlWZRbWht6ZaFTk5vXsZOYe4EBLyyIC5EWY/TQocULB6dxjO+1bDucBbNH/Nlrhg1aw16E4nCLq0t9vu0HWjj1w8QxpAERsZfvJPNLWAw7Wt30NonOJLdvPqtPEyAGFCTl4xNVMps9YBs5Uj4X5l9PIcrnVVieJBbsh+fGyxsJqAoGHacikoaOPagBNMbcFgydYuASo33q//DRl8tfiC/0LHmoO7Zu08tVmF21ff2BvoLxmd8JKzl6vjRdzyb2LwcpWIBUpeG0AQprvt15EM6a+IGK3Gbg6ahYZbRJI7wAW731RjpErCRbFwyxB4iaFLXMaNtxkfRX0yEiMMqARO5bh0G9UmMt6IIHAFr5IaGW/pgM0J9uF1Hb8HN36rBN1J26FyHAQ3suSCnVx38E4CjzgudFWDj+8sHAeWFn4pS31ijCYijaMukpw9K60tcBDUS3z1sZJPf02ATHYpZbMMW5mKdGQP7Q0ZHKhZpRdvgvNpn4fpBLut7W5xt7Xdlle38PwWLtbp6qwf5bn84Bvfevdrf/Ng+PR6yIOcfuN/evadP320LhutZlJQV91pzjvZP9qfPTi7v7n/dPekXz0daZ3YaT4DDpJPp3qFefqd3/pHP/3mz+McV49nKPOmR1FlgYnBUk5azJCFxSJamCzwv4vS4Ri+P8cAKoy+H1ekqPq+X8nup4dqVtDG9hD1YlGWoB9SCp/XqHNJASUtaCeGBtotAiIaRL2irLpMubwpWICU5W8Qbbi1w+NhGIBD6a3TUtDi0JskDRqeGAQqbZ9TzPjNMpmBTOmFvVlvvsV5ADpFT+nAjpYhg8ycL3DRv9BhCPhIsgAqWWYtnSZg1toJJRfdQz67L59AGbsy5A1BHasOp/L6/T5hopodmzRDDRAWUFULUJxZQRSjKQq0xs5MmzUgphlm8KFcwA/+yAAQxeJgRMCadaEPF5OqVq3e2zTQ35X1MZFt+cPakkL9ieYhoiDpCVIbgijMHgKiZ27OB0aIz4pseZ2YRzBokNdRouWbFhuWjjzuqFHEL5jjZTEtW9ovUKuvejZnB3jZ6EbP3rl4gPbO2BpLKyUh6aSe9g7o0cCNhBw6cOqN+ab21miaaTNkaGlxUZ23582d1vwCMFoXZ6FRfeH4Nda+BGwQQfpvdkveC2ubtSvoVacAdJkTlusV/xPsGYfhWyA0R7CvcdG461zuQvNIM1Wt1UC65bc/hM7Qi5wAU1NGVx90p0DJ/I3cGDI7VN/QCrVmu73UEzdxbCJFWlkw+AbbmtccLniV61dQa0ZR105q6lHCmQTBvXK8AtEveIUU1OcmbnTOXfulakoYSq2wwqCDtXqJAUa12YQXsdZy/wLqLmVp5PtEoBVnuMYQFKBp7PmwOKsoGso/99akOFAQRpv+LmsIjZbyKG5oGJQyYChN2R9Ca1Wqw2haNOe0uC4EpUBj9zAIppR8b3FVp+y4jsiNyRDUbvPsL+0IsE0+Ldxy/F35XYtJlrr9bAvs1xV5jbWDgmRdI2Zlk16wIgagBwagVwwyYFzpeCL7wfZdvdik3Vov1rK/tzp+9O7TP/xv/uz2+tEwHi8+3N1/7ZXL3fe/+efv6ZECm8bd2b2Xu6E/TBfTs2lXno77XXfan65Pz87ywKy7qZTd0G0uDh+Ox81v/84/f+1n3/rgyVhG7ft1zeY5px9W82Gu7scMSSJQWZoWRBpUU2u+hOoAshsSICiVRU1US1wOgiJqc0uINa6hBAXX4Q4ApiVmXfT1bQZoXHgq6ZJ0CdaAOYzhd5lOS4QfBy8Cneda9Lp58paKsc08emeYUNxPN0pDE68l3VEztEbI5nkJ2rzyE0w8+5plgxg6ZmJlMsAyrAc3plRZAT3SSX6+uzyvz2WbdKWJ4kouETGxDEuaRWVUribM0PsFr9uhiMokx1xM+6S5f+3F0xfOpomsSeYVMZlNsAKU0GIHB3gGKlyCGglQHdAkixfedJKD02OpQdpxnMNXNALwa0E0CJbQAvgW1zYiikfEVSGeCdrht3b1iNA1Ne5QdYPrm8HTkHnDsYGeoZ2enJITVHltZIVWjaE5Bt94KbZ/bDnAKdoUmkIDxUnicJmzthuLqpYSE1qyamm0KW9HjBALH0c/ZFKL0he8qxJw5UY1Fe/9xMdRi6CTTlv1q50S2yt7nI6pmFxvqYl8S4PREgNNVvOyxGEjaNFwsXTyiQNJLpm//hPsOVVv7qnqvLOAjZaWosX66z4wymMPeN5ZqxMi2II0m6kiDEZj1eK5SHwyoVZRfJ1zYIwt2bqOzZpltfiAdfldP/kRLPIfjFT1TQE1qsNQBzYfFJ8gUJIktosc/LTI39mtla8/W9AJfEzpBahFRckYcTTozz9yy7emjGo2Dp0ZW56md6QWF1+rVcG1XFyE6jxFVUVAb0tJ2AoapzWAy/ebQq2oiSQ6WzwIpdeNskuwATo3NeVsWilcAhwlmZgWFaZQ1kVG9fLFZ9vLhxLQ0WRrhmbe/TZFOGmqtVY2HqPWGDq0jntZtw4Ik4mab/Kgu0rHpSMRY+OQQXoJ62cyyrqoKZPjlAZpGy0JpDYpTESiZFUkZKO0xtjYQTtjBw7iWKUNEI4Dx42MKxzXPN5Kx5Xutnk8lf13v/7d7/7pN4l3Pv/6p/Th+M53Pqh3Xu/K2RffGL7z9uM6HpOmlQxX54cDRm5Uet0/GzNWd09fzH0+mr5wd3t8rpfHy89+9o3f+spvnb7yuR+dj2anKctci4BiIpKqqqRUWSSJlArzdXVp8egDlomV+2vXJeb67mmDVK1RiIcIwidMSkrKXvJVx5d98kTficoFJWv2Fu4e0NJdPDXamoJr2wexNhRTLYZWNzWdSIP07CfgQMTh9391cmtOYm1orVDVKkhejrolDQk6z1k6WFZLap3DGoaMjtope9iKWFN7TQORWVdqHfIp9v+/tt6uSbLruhJbe5+TN7OyqqsbjcYHQRD8FAlRJEVSpKihpNF4wpIdMyNPeMIO+8m/zQ9+dEz4acLhcEgamdJQQ4mi+AFCBEiCJNBoNIHuQnVVVubNc/byw977ZLZmMoJEd3VW5r33nLM/1l577f3N/nS/WC1QQTW6Ey8guCilSumyWN2wtZmqn75sZ9NV34reTGUpMvdZF9NnXtmvUKzZSqEq80pcbFOEaMa9de+f7nHWnY0p3mVkRPdbd0JMtz2crjsInoH8CIxIIdigUhigrsODtKIYfdceVDFsZMKrB0KGxMy8tKSqyYDzXFXdAXuq6m7A+ytViiqaKTSHXBIAzDF07fFXC3CM7gP9biS/rEIDpnMz7QyD4fWcKe3mr5baW2vAtKiAIDpYAgT3CDGSB5DEorruD0lRb0MSWYgfgy5aDgC1J5yubujpqXnA4w+cXkp3j9dbL6UGBgDxVgxXGQTM5/15LVchqCUeGIzQxogV4tYip0nXlMlSeOzs5nI4MlM3DLUHp7c+JXztLCnJrITeJJpe9bjQAAFQSmSiTmkl+yjSEx6TBCnYLDpknSWkIscSnvnVri8cyDyObkq8DExXzIi8cN/2pRYpScxzuWDx3qbw3cjw3S+yEPCRmKAgLzbEOTSAm/AG6gAH2X0hmeSITinODvbYUf2iCEKLtNYAKUWdKqFFSB4UYjBo5mIe9o3E3SCKGPynGRll9OzMpQIRkVLKSARV0XtT8U5Q06JG2t4iBpiqd/OPiDgRrUjme28oQporiFUMbV4/pgIIjXWqI5r0+q7hSHomFYs8NjKBFEWjQDygJJKyTkgRGhdaPdLyMtIYemKQTnqNQsNjUxBYnIkIirAIlVYFk1FRiApRwSRYiCzoEYVMaIuuqzLJdslNxeYENyfYr2R/rv2szt/+y7958eT8G7/9pesP7149wO3T3RIXP/3J688/e/7xj3xi/dVX/ubP/moh9vjX79myyWrCytoOsjRrfbO52mvbTKvHVk9Pn//GH//JvVe/9HC++/7OpCy0aXeOJ2Hd2LzIJIJi3QHlBjFFd+kMBN0vGQPiQ3+6WQHC0Hsa4KeYINmS1FoEzXd1KdEPTFJrlH1z41lAS4qoOfq0clC1GLtCtUiPBncxs+BXRM3SYSwPW33NrRmLeqUPyNGlku3+uTNAWjNEwgKouo5VqBVlUauKFsFEVJGFypKowAJYlFpkDVuAJ+SSOKVMglOgUibIJLgD2UPviKyAalSjJ0yqUtGrKCjN5KQ3TC+27QunV/bhQhakTxu9VH3h3tmnXqham852AxAwaFtC2K1BKSoVMOyflkg2RtzpPVYkXGhMqpaE99kbA/rykFMjc6GPGhKlmPVs9YWbEpercNnbCGs01DkSywxGs0dhMW8AItabGYuWUg+VjRo5bjjwSLS7RWnLXbX4W5SuswO/OBfWoRhMqUHUOVwnASmltKxzelTtdtRgRVQQpBPgEK+5PyKcJwJNu51WX5jZKQYPUKlB4kkfYyFFbInshhGSrG+NcnM6MElc9FD8JM2663w5fKGacVQcnLhCRoDlMUL2tg6aCw5fJANMgOsoSGs9wCUFes6BAiBSVY4+AsiAIAlvkq7hqYT18Ac5oBvhlUhji+RGg7kTa0CLKXsQx/aP3T8QrHpPqoZJRirI5h3GPxaR0cmGSGvhJYZ4HMNN5YOLqSriwI+TswQezUQKIupjm2HWHOuBQGABJBBiMFeu8ZtNkKVMSZtHurGMcyWpWemwnfJnOH4dZDiPHvLgHXh1biAWh+QDgMDMCSASjYH5stZ17Dn/veTtFZX9vGcUoli0Wnem5tGO9ShaIyCLmpJ3OfvGcFQ/uW0ONMRxSZIjsyQPAIrWOoxakxrky0AVV4EGalGaeqe9HiaHFkUxavAnKN3pIgqpzpwFlFSiqq4oK2HVelKx2K5luwZv6f7U5mW/ONPNrbr/27/4i7ursy997jc39+fNL6a/+r//X7t65zc+9cwv32z3f/r+3cX0sZe/Jr9r3/3299quT6cn+9oJM5ia7neX+0dXVc6vbq5Onnv2T/+Xf7dZ3rv/pF2hNdUGgaFArZvrMaOTnega4jcO77hlTzDFI3hGZjwO9oHr4YT7iNTcPIZOcFQJEgoyES2x22OAjWe0xDAqNDYE5cbLlL6vE6NSOFtYNA09Rig8VN6gYnFMFO6H3aoP+EyizSHaMkkADVZFxVVvBRVaDKostBrdEygiKrIAJhbh0jBBlpAT5VqwNiyBU+ikXKGr2RltZ7xtPHVVRU7sql5MbgW6qytb6v50urXf/qZty6axi9XOJRZbNcjqMy8v7q36pnvXKxnMFrRJQNoEziaLTCxT792lxNQAA118z7d8b0EucTpJrJpPslONDkMDyRyeVkpvzR8aGZJgR73Cbp2O8OAAD9zLoGqNGK13i8SS0l1Lzhu5FNaNRAKFRkpxXcbweBilKSoDZsdRjJ3WhiOXPwziCP/kpdZAJCXy8sEMEkitrmylgcVRgicM723D4AI6IyjAs0iL45O16DBPXq4J4dxMZUCoDvY+hzF17r346FwROCsDSroqcXjl5uUT1eQ9xUnLKUp+p8mPYtpj3xzB/x/HLl6jGur54GC/qIijLMxrN6OqC24cP4SDbx9jHD2n9M+1/LQAGL3aZ5YdAeGmRePMt96AoqVk/IU8exIrfNRomDo/MASLCDgugRzCDmR/znFEoQnyPRXsIa2I777hDkaKb9CsgNAGT8G1L8OjxcoFc9xEtJTIFIMaGh2OVNE2+jVzGSRDnzSBR1vl4GZ9XQmiO6XlOHbL5najmbEUL7ZEWCPC7qMeiAYCVAeogkkjpRZkXy9oMXpINIWm4VuliDCURY5ekutp42FzTOYhfTz4AV/Jo+qA2eHngVUAtJ6wP0RQioJqRgTzU8VlGVAgFShiJYIvAYVDn8OUUmFquqpN22nhCvPCbhbsNm9vneG8tu/91X967s7tr7/6m+//cv/v//f/4+Gbb+tGcaXXp/jtz3/iO9/6xYNfvlPsXtGz5TTVRW2Yu+2tGdoMai0LXZTN7v4nf+urv/PH/6aX6clmZ+UOuTAWg1o3hTqtCBKpkITYIkTgA6HM9iLR89N7h5pqtFr4GUsasdOZ7XC0kQ0Co3Z+CLrVl9S5wP7gLJvJBycmN3DuIROK9GTVFpRQ/Us0kRgd5cNQwGuNni51Mbd7tVYLNRam91VRiIkLVpKa/crhfFQUUo1FWMACFDMVKVJElq4iCazAFWWFslZbwVYmp5AT0ap6Jv26l9VupXPFXsgKq9CFNm/pqpiqTB2Lj2P7kXJ9vdJdFb1SzgYUTidnr37KTtWsKWJKMkxJ0ARWBRRUhkRSiUASGmJLaTCSveyUNgaCGD4tzlL8J7tGPMNUAYmqVSKOMu+B9OQiR94OA5EfJwAzIo8eaqjDY462ksxGweqVWl+THkNd6DNPvWLGnPMaAYLqoF1K7LWnsjSHaINSxYMDGlThpHUGedUzNe9RE0mlNZFo+wTh7ITMwvMWgQN/5GB6NIFfv/jAoUNyy4G2mhlcUvABFyfx8+Y3l91W3uWRlhlRz87JdkGf8XqbaQRFLqw/4qu8MuR3McqmeQ6e+venjlJupEAtAbA7oiAD4HY8n9ka6zSkYy/nRjbjXhUSWoCgcJgQPDLC8E7isLnB+ToOuA7X5zcog6iPJFv6ph6oDGMUk0N6MVxZDrGSb+e46APKnrHF4IIOCyWKqEHkgrsZSoT2EPkkZVdIa8fhSl6h19uCbTdEAzPfHX0DiXsHKD6OGsbHmTFM37ig/DdPRiXeSHpp0CGFMHdeL9fiEsI++EGFzUFLeIImCqEeQjRECHK0bXzvBhohGhod+Suhz+ocv7itcXQgoh52xuWLdzx5xylHbd1zPIqIQyYH/RknymopUsnKKXLf2IQqdYIpZFKrTVdc2G6BeYmbBXf3bhs/3P31f/6zL33mzm+++PL24fzad7/38FcPT3RdFFtrP/3RxbMn79856/OG791/Y7V8eX26tJ08fv/h8l6ty2pm1prqat/a7/7RP//mv/5X79zcebw30TVRRRdmaihKtWYqah3qT8D7N1vIj6iK6qK1PX00vQTd2czMOmFZS3MbVOD2bnQkJE/Hw9n/4tiYCppZF8ujGgTIoSYa7M/EIFzsIRrLnIsLQTZeRn50KC0KgFKFFoY0qaZmpO1bVnB859A/UbWK6BHh2t2Pd3/WbDArKguzShaqyoJlEqzABbgEV4YVsBI9VT1VrGErw4TpdP/Mxc0dPD4TqM0CqsgKVmGK3lHv2GKDetf61+ymWRNdL6bTzd1FuUEzLj7y0cUrz7Yr07UaTC3keKLSMi/UFPReeYsRZCP+dKn3iG4F41lAwGzuY/D+qYkikSbUmOgd3GTnr0qMuEmTnlFV2Ck7Eg6ItkCNeIikhYrtMfnUM+MaWJyid2NSd4zBuwGR4reAT2jKWmzEefAHggzWYehO6oldIVm+hrOpkr+QKGQkbRmYAL77kpMShsyLuCam5oK3EgmKm2wGOU1GvpL5+3gOgWz37ocn8Nn4M2LpJB9ZwNuReHHQzkzTe0M0WqZRigQgr671hqDrZv7kWyIB2eHTwjThAHmmH+NBJsFDB4mB12HQwnKOLtqjgGv8Jo/dQCRB8cvBh4vPoyDmG0axKlhnMMlw3tFgUX8WxIGT6fhtNnrJ8U1k22uSqIyiQd1M5zig6NgqYcxjLOfh54ghModc1NGlyFqQR8Xbn6M9KSo67rsyjmO6qNyx4KIuAgCgCwkJYaIaM3wjwo1pE3FJwRz1zNxrdxa1c8ld7fFvtp8xW4lyz0v8USO87q2ZipYioq23gurcuxIjpNQzKpUYli75RT0xxKOSxVFAdHC0HnFm2u60d+ZlCKRodtMzOfFEwJUFB+K9R9KqIUYYuBGzddHH40jNDGkyq0Blh2kpuhBOUics+3Yp24Xu7k3zT/7+H17/y3+/nn+F917+1i/+w5P7131TVotV2Znd6Eldm/TXvnv/q1/97NtvPbLNQkp59u7HLx49eOH8hftXv6i60OXCdra/mrttYW2edbftKNoa9mhWARSDsnsfkPbebaYTMgQSrFF0jdEICinGPUKST8kWjEoZsbefBhsW4wiG9LXg8dnOc2vViz5eai2K7CpTiwQg2OnOtnLDI+LAjADsJqo9lc4hg/eZGV+KBAKppyQqJbSj6TqbEWqo+IzLeBUJbqCKeFkBDE64ZBvSpCcqk2AlmIgVuaScCE7BtfUTcg0907KyhW3Ppel0/Wy5OkErsld0hS3MJvQi1iCzLc+afAV8TvZX0AnbSzNdYHN6stfl+Vc+J+eqDVQ6mVcITqDRpwFyowApJpzg3CQa6QOJA3VDGAdLk2zeuk9jty4EqijQO0SjjOKWLyzlQTQwj1wWfBIEkwMeHYkDh95CUYhIFHRDyAWpWwUoqhRnuKCoSEQNGKEZkAWGDLMjX0HWn+LNkVCEgc6oPCoNfpKjD0gydTiOIw7JcfXyLA/wa3LBbYAuh2BNRqoKpz4diwqlfQsvbpkokV4DDg8Q2YBflvyTzEB69ssm8hTr4tJd+UvqgvhBWwsk+egfRiacSe/glqfaYALU43ZjqcJJOOh0lO4dckS/2hCYOsjfxddJizf5gvmWSVXSmKA3ENfot4Z6j0Mwc2Hs2eLgH2UKsSN7c/zHEWCkKzhcDtMmgbBuhwp1/GLmlwxiV3qU8JhpB4hBE4FrFKWjCUxBoh5PiyxOxVt7HbaXyJJddHLfm+uq1ujVj2qQGzZxvmoE0+KN44N6HmfahtuOAxIXDRKlaoysABAAY9pmibErcLnHwC3N+826NQKHijsiMDlsTg0KgCBDn0Bv4tdcZStOR8y8FCJEbNw0++q7Uz9KozO9HhmzZ9LZ1gT1bMRjFAEK6e3FIqwi4tqUqEABFiqVWECqyJJcGhaysN1KdpPMZ8X+5i/++rVv/58vnX5wZ2Vv/vD79bqu7Zb0ZlvotmCnfW+ffPnlX7zx4HvffvMLX7336P7lOz/ffPkr33jy4SOd+OqrX169OPF8c6Vv32CeV9jsH2y3TReLPfus1juaGaurAUEg1q1IMTYXuYrSFgh0Q2OfS6hCOUnSWwUS1RugPVwex1ylZQSVB1vmdsh5kkmoNr8IdTqXx2TeuqBwUFM8nxkpVlxwb60WVdUWUW7Eq4N8FTY6hFOzByHz3d6sBMdFUr4pzbaISIV3//lXh+8vpLqqKE1NVKXqpKiQpWCCnAhOUNbgqciZ4FRsTZ5ZmeYz3KxwfW77qpf39KL2bcV+UgJWbL9AX6m1Ou33i09j/xtoH3Y917oCFmqXAKzz059YfeolzF1Pq1fFsQIJNLAJK9Qok7BNsE5RxSIZgTBrPicP4iNQO3Ao21rEw4iD6Q4hRqd4npb21ftXRZKLxzRUkrY3aFxhWL0EifTTRob2GYQCbyIdYLCIiFamoKMF40PzyMXxA9RLj4mXN3FxgQR1PYgOhC5lhAJ89M3o1a7opIOMYQQcySvytzMJih9HNpnFXt8fVFhgcZFThN05uIF42hEBUSLX85N0BGf6ycncnSyafbdMg6vO0+lhlFWdDQ5xRTRk0peKrCH1F/mhf1OMf4xQhgF+HDzVsM+HG5dMcIZCmHM7DKGoJR7apk8CA5cYLbHxJGKMuiEapsJQU4Iv5m+M8mRGUQejTTDkIoLhTM87Bxc/dKjiWWqG4/p0dhvkzoRoHbx/KlQYrhvpbul4Q6I2HElj/M8L2wMB944k76Kx1PpxOzmwKxFxBGrU3H2asirQnKPO3ntdLpsFoSafRWaz3eUUMOoXETYRyTrL2xGC6M3KUA7J2IJZvaD/Mc6iAGitoaPW2nvzDdl6L7Xowb7HEvkdwJtx8zoHMcd75ty+xE/SqHBEfd73D3UkI1hHUUkZFocU0Bqg1ZP13lRLqcuoJgJAhyxgHSDRRIs1g0GayMI1kgUQKejWbbbSvYW0rJbT66+/9a3v/vlnXz45aev5qq5ut1qmqZ/e3FyUUq0q1vvthXGx/IM/+eZ//H/+/GdvXbz6hTuvvfnLn//qx7dfuvfG2z/4whe/cO8zp9vV2y/cu4fztq/TR1/9w3mxejKXnS6aTru22PS61QqD7gUh4k+fIc9ONkoXRBFfUSo5sxui4A1gz+46JtH/hjTVHinKIQg97OFMEvK5+0FzRasB4nWH9ogQW+NhNkuCNJp6N5J6f+KTHywZ70+dI6QP9zqykVBn3lo6DQqc6u+0opAdFUQ27FFD8YS4SDFU6II2EVWWYtW8qK8LYAWsISvihDiDrnW92q94vbYnJ7w+Q1svru7qJdrVstgCTUHFvrJP4LatiujLnG+jLWVxg8XOtOl0jkWr9exTz8vdCZeNc4/4skEaZBKG5pWgUkwEC6Kb7eESBaoqC/dyManND4TGyDzLMPWAErgsRHefNA60C8V2ElKyu9XNJsyOFOmPn79TBUQE0OhJiCjJWx6HZQ4/XUWinykrwTZw4DSD7omC9iKqTHaL+DBzsSwk29gfaf9dNkad6G2wTmoO+Dg2ClG7ZQaK+fKcxnt30qLGIFhLXMGEAcz6SGkAPjLb4BGQ59MQgDrCwtCUKHGh1g2pFom8mFypeOWjgascZBQpFgoMamY+flE1Wl09wjCjwHxrQGTIPBwybRz91W1jBKkjjx8QpyML7pF9bHFE4ozEDBnVAJ73q3sxae54fHe4ohMpfv2IUAui0e/jOa+AOeXAs96nLvnIRSGshiR9VIK0Y9l6yPCZZC5JVGGfclopFQKXpsL4rNBaFU1rxyQMOZbh5C91aRMXm4p3OoDSR5AkSbKIL7KuWnpvXkXyL6paMip9KmhTLQhBGxgjTh2X+bT7Rci7c3TEYW9WtQJsPrYos2EmRqLeDWYmhqJi3dUYosE/DodGhBe3w6MhOYdBEaFg5n514EeOiIu3YxFgc1kN89z46U3JCBKCGIQAcMSMKi1SNZbw+FHPN9L9cd4UXdgPfd8EVRX7ecbCHl3fXF3dx9Xm06++0h/+/MMnp7VgWevetr23siywvTWd6iTbqyft8mJ359WvfvEH//Cf7Gf1a7//uw8fXHzuy5/+1fbNN995482rhpNZb08vff7Fb/53/7LdfvnhvJqx2tndua/netplvbXab0y2gi3VFLOhi3QRI1rAigJzuoA/CLJ3M5Xme8isOTw1YJzYURYia8N2iWRuGwIB45ESiNF0wSoVTZzYigdMGdSMlAPD5rgugkZf22GvqceVhDN2GMBHKRFaZe3AFzCJsn48PJuCmPOFJTjrhqJQsyJStCyApWHBmvMzlpQTsRWwgpwAZ8AZeI6V3pzZk1t6dUuuF/3JHduW8vh5fUz5cKUQ21b2hfSFWLXWuZjL6mfAFfkpw5nUra4nrZeiujpZPr+6qZcXp2tBh1atxXpjA2dKE0wCAw3WujSFqGiBBYc87ksK0JkHBuMIOAIMJmVagcGOPGx+j9aVSljKARkYrKMiwmBlH638weda2Gkf3kKSLN4KEalp6NJV693jUw/qY5e4azFD5KzOBVIVaZ1ClhJAhxR1Vp95A2r4JIpEN4KjcOxGWvV2qKhKp3V00zyIYa7WLQJx5UWBKMHiVw0rWpyhrZkcW29UlaKdXUTEXJQIyPmvkHSH3ocLRHyII/Re/PyZanF5GM3pC75mbkWdAG3mFRpvMgi1iAQmvYWyeD24d2MQVb2izZEuS+RDGJglYhRuqHB4gObeHaVokX1vkrlltETFrwepw4ds09N5MlqwM+ZaCJzaDcIrkqVo5wgUSArp41lghJPyCNeqo1JG9iaAqMAU1ouWiN8DN4iMByJBcYI4e5Cejcal0VEUTTwHYEDzLp3IBM0PSbcEAshGUTMrtXj0oUFYdkZVR/Qcm29FO0zdGFTkAINMoLU6SyLCpcgBKPQFi8QfART7PYYPH5FUhKcZsdEr5yI9xdxNAJpKSa/EEpPS6R3nggjMHAjUyQVInXzpgg8YDeUJGsUFl8hDAQxG57AqOIIJhqehNzg6iODmQ6uaBaXUeX2DhOGREhG96jT0fcNiUagC0jpLdMgq1GCEiRXrrM1JR0DXqpMK5+1OV/z2d777XHn8J19/ZWOPLu4///DR2x9ZnzR9Mp2cYf3ArlRWZrPhRCfUF9cf3V9tX/vZ9+89e/7q17/+w+/+HRYP777y4g/femO6p/V8urTrLaxO+PhvfX66+/yjDbayusFqW+rGlq2c7fpqZwu7MZ2BWbE1aYoG7WqB4YDoIKvAHA0GJFQ11FyYXLyNxLL5iwA6qT7AKsrCASTlO5xd50llCGT4Xg8dlMFGAXp3zYdISKKMCBEtBMxa4oVu3EMo3O1IJk2OJntZGjFMXhx0dEarEMY483oUbLkwlmR5T1WLd6AJK3ulCFDrUrgyrgrX1BOxdcMZ9FbhGXELa92e4uo2rm7bh7fwZMUnz8iT6+v7d+b7K7kpfb+QPQC7MW1FTLjarfSJLeSy65vrW3d6W+8268l0v9u9+OIz5/aoXUppFydnbW4TpnqrtL1hD5iiUxpkAgnsISy9R6ENBqAnqTWZk847kiyuBIYWmJlv+iRbQKA4CGMguEvdeTJI/ZNBrdSs2iPKP8ho/VgnCALasPvNxBOKqlIwQrSE1bwIqUU5QjJXs8tCUURUGqyO+G0LYQrVotkHF5oYjOtRD9bo8251VB+HCQMgx1MjvHKVqIByiNk6P983nUKSOuQ1wKNckonnHL0iKxIZTRfhUM0OBna81V85U4oHa+uhE3q4HnWv6QRd8658N9QOaEgm9MIInuBn56iSPV6ee5Xi9tFab6paS03+HrIBN2yxBwGWE/7Cx6d05RGsTfG5PhYEgZKrGeg1YQZ1bggN0dMZ4yQtM0tnaR2Su6iUhG9z4xAj2QQxbtMdRxbIfWsgi5pM5nGW2eEijkfCb3Ioe/sKDGEZcz6uaoaAHsBJtPwRRinSelfrpZTohM6ML7I3CEj1+T+03lCKqupgTh02hIcqGhk7vdbkVRtJcp9PLx7DoOIIhvwLjVVL1CCCzDXYOgIRhHJk9JO4cTUYsl7p7OloIckLPOoAhOhwv8kfG0fN3GJ7q1603pPRSzggNQ3mvwKdKBEnifn3lCpGqJLO/meDVHW+KCphsFJVrTXpysYC3FzPf/2fv333s9P6pdOPvfTSy3c+8qt3fnXz4NfvX1yhymYP2U1a2+nqmX3bEay23j7aXLXt1778tZ//6B/PzhcPfv3B2enm5PnV2x+8sV082uzmtqirlbSzcvcj9/6bf/svX/rcJ967mXb1fIezHc52OGn1dMtV0yXmIo2cITNsBuamLaFy84fvSEEzdKALjsfLWJ53D7mTuOG8GQsqF6Oaf4iRzKl/BAiTOLJa9QCV5JoDqNUdeVgJLyiVop0uJBjuOyMoBMMEGRR6tQhWtQAo3ozjam80cwzbFzacj59GiCpcU98Jz8FlFEUVcdnnQlQUckGrkBWwEqxpK8MKulacyTTtbsnVGS5v4/KcF7d5dSqX9/Tqx7/43nT59qqeYE/uyb2ha597aQVL49K4kFuTSr98tOgPdLHty6ub7adeunV3tZ13F5VtL/bk1pnpzL3qmaIBnWIKo7UuRWRBm1XUu4GtQwJlHnM6FQh5JRf/PzoO+RABZLN1lJFlpJKHRWLvzWJwgRRRY6h2FMns4GAuBiwVPtMAEfNpeVWj0lMl4axx+sNYVBdZMEdWk60zeMxuYwQ6ZtNh6AGkaLMfc4qnRxaKvh7gOUhoBrJLdCYF0piAD48u/9Bt5BiK98fFF3slEC4x74ckwFX/KDmAOV69ZTbxSeogRJ8lJFmmo6brZjYGQ/gDR60l4lC/XwmzSNBlwkJNItT/4yEAQRNiGrqnDmBmT46eoGTBL42jgSiBS6UAFizJwkVrpLb5uR7lOOR47OAd2T7IsI3VE6GgDANsAWB48t7ZYfThEPFIclO6CXeNgUhmBUxdyUg0jSE3M0oxiIT5sIHi8syLagyeV8aGKhKUKI5SthdcEj6JZxFtHCMYNZeojPZ5C5kbRDyp6vs4HlnWYsDuNisBGn8kHP8/nqhm8zGOwrXw+wdSQ6TF4t11yuDbw9NaOfC3It71QxJuNpaNvvlzDoLBZPxKXMshiD1kyZknj02WtxOoDqMlxs8S44GC3goBMLvEKVH0yR3QLSYNOyvOIM2sEE3rhOyUJ6lQaybFfuerX/zR/X94gvnFM7zxwTvl8VsXv/wRtg+er9PtZ/hbr+gHb87X71wupMqimW7KGgssvv/m9z/1yisn6zt//eDPbPdEl3U13b730ZOP/9b5L97bbnn7+c9+5Bv/8qur588eXuus61nXmzbtMN3IYivTFotZl9phe2Ojj8/hHmjkntKA5gOhLTo2YsEJmoj/fEQ2lAxkecyXCHsXIczADZ1qEZLBET8am1uNgBclbdHhXc4uISAMHaODpk0ySXzdxX1pTnAS0mDWg+Tlq5x5VFhUJVCyjlOgoClQoQALBIT39kjy7DJXXsAqZCFcmFVggq6rnIFr4cpOsFlhs5arU16e6dWtfrHGk1tys3n3vl4LSc4mnbajNkgT21MrdQ0sOlZm13tUrs+5k/deevmLv/eVzz2eL86lVMqVVptOd6w2U3a0SooVLVoVk8IU1XQPoEA6TV0MVWSmw25pqN1zjaEm3j+XRiOaUaJeYM4NGmoqKQrtb4I51N3QvbkfkCy/HmQhhpGUDLRzkzxVs6+9dc/coz7s9QIPxJIRHaYgDOGhMuE2JgyDf2dIfGPA4h7H+cmNgrQx1Z4PNhemLoNPOG8tvy+3miErHWaeFzpBKCZ9ucXyOnlUZ/0+wgzkBWczWFrJg1uPtnQRiAY9NdNThq4QRM0aydaaQ3G+dOEjvdZbIgLo1gVSVMo4LfALkrSQsfLZP5QlOO/3L+q5URZtCBHrHU6Yghu+wfxy9ZLIdyUuKeYf/VdfljMBDy54vJx83wdlFx6BhdYFxAdj5m15speJNwDP8AGvBribTTlot1xBDBzeIjNF+CIOAlfgsl5aJ0cJ3T9GQohLguMj0o867dxjZEu2NOsiQljvMdYitSWQzsMPktMoYOxm3cx1wVIPLSssg842thLHnXtwx2xK0rStfvjMIzOFHQfG3kUYZWR37wGVH+3cqPv7mXA77c9y1IYdPvbH6gSUCELSZUQgmHYh7byKaAYiekAa4NFqdlKZUClOvYjn6hqHKWRLEqpmFFqzHaAo6+ItWta5rJOVBnm4OrHt1fL1139aL//xxZXdPblb5epywyb2pT84mx/M//h3j+YPT5dndcZ2v93T+Mv33lqvz5bPFI15tHjz7Qu7ffLiq+tfvX/zmS98Znl3/cF2Mdfzja2ftNVczzd2eoPTGy53WNgNsAP2kL1gDxjEtM2GjtpS+QICSIsSDLMjzh8nkQLCw6RE6zjoDIujczbQHn/y47znY823WkJPiLeox6dhuAmAZj116pk+PsgAROxwYOQvokprHcaOFoIEaa6j6h9IJX0ervPORBFd21YZ5PUKrUDpHVJES8UkrJSlyIliTaxMz8TWxAlP8eSWXN22i9t6ecsuzuXimXJ1u2zmXz968OOHq+sVNuhbk1m0C/cmTW3utoSeKJbKarLUusZJWdjyyWefufnI4hFps+mknE0u2kKmZ2/WalvKCTCj7zoqdVmtQRdqs8IqYEQTrQJTQWdzxEFgIvGvgw8aO5xeYhoPPI1heN/wjQNqhEIjcHXoGhbsEzdp/jm++scf58GShDkf5ysIAQzNXonhOFCRUOyDDssoGk0TmeYEk0/CRrPUVNhh05L1s5BljkKpqqI6MZcR+nnWTxdOY3x12N+hsSkanR7BJztI+D31EvGAjpmyZyyfHnoIB4zDEhYv9PkiTnW4KcOAAI0zAsquaGQBX7JjyqeXw0E+I4TdXOA5cuWgJsUW8McZd2KQLCMz08gDbTqK4hj8qlF9LNGbNKTxogXfwdB8d+SaPJiUrAhEzUnFOwtD6Juh4ulvMK9ehKmIUDoQC/OLpXOSgAwqnTOm6TQj0404KnfnYSWish60lEwmBvUvNpl3WXs7rCNs6UNytRhtY2N9BxQTgEwGf3BBjFx6h5MjEvbvjH0riCqNguaohtBL//mMQ18oqWHOyDv+/vGhSF9dfDApCPh4xHTYMqImEqkuiKwrHMKWML2h4flUmJUhVX63l48jsCYEAycN/50Li9jYphAf0eghQXyvCCgF8Gb3rlItSLZQiLETXVCdN1C1QtGaVRSzPklR6tvvvH66Ku8/vvjua391+7npzp2X7t379brp9YMnCvzN39389ES/+ZVbf/Sn9Tt/dnH59lTPVnPfVcXcNpeXF3ICnSYoVnXSXfvJz96/vz374//p37zw2Rffb6udrj/s68u+vNLTa1vf4OSGy1nXN7bEzmQrMgv3RANmSBc1dTaUzYQ1oKtayegRgQ0Y2dwfu7KtdwQ4EhX7JPG23FdpfCOJHfG3H/7snwuVnTzuCJUDl5lI5iC7UaUXCyb92Fd56CLMHvlxRk4kLeCsYUATMtTDnGlmRF09l3YJDkFxEhNNSBUWFkoRTsQCMklZiazVVsQa07Kd8maNq1u4OsfVHbm6zQ9v8UqvWntvXlzW64c2tYVuDHtTU9samskOXJlOpqu6nwQrkQ0FZbpz+vjvf/T4Y39753O/d2OFsDPDTlcddbN+RjZSTsRm6dve92bSXeNlxKnqPPYIghTZjpdha1IQ/Uw9fXJEosYo2Vp7/IpjZlCJSAeSIhnZVOYrEE/56cV1+3OItfJMV1VX7U9LgAMNdbT8+rVHLJ/azhIeP7yGG1rHp13fJyB3w4gvUqhAipZu2c/m290vwdSUisBM2Ifvh6syuZ3j6HIbsb/foCU/YfQ5hfBAvOGgWo/BEwJCfc39aByeDAGQUkUjHioi6umCkT7jc+SvAzlXEdSgUTSDZMFmYKojDBur71tjJB++2gH3ZczgRF+EGfU1os+zM+Z834iqh9/J24xDOkJxupIU0kPTvNHY6bAjPIzqV9LmgEBL1JHfohJpuSqC0eC2wxyDsRRP1eCqBxAyzszAWJEzjDl0NG3kfInxDkPmNxNYfFg8HU9KomszoAvkZR/L1Xg+kayKdK4QwSCmAeg0aSiuIK+x67wLjkEaAwMgD11BxzkSIMjFxIguvOcNHg0E00vE2xtwCP6O3HY2JyDDCP+zJu2bKUkztpBHVPCt2Z2+LhmQIvt6CUR5goYcZxnxtfjoL1GDjXpoEFsUMB0gqjsqoGSY1FUnWtNe60Jt32pVNV4+enj94YOfvPv4qiymZ06/8rtff/yzN3d49KWv3Pv+n783zxNWtx9trv7jt65//xt3vvEnJ9/9y3ff/em8uDXtt5u+qVhT68JqAxet7jc2v/DxF//1//o/3vv0c7/eL7Z19cTOr3C2recbO7my5TVObvT0ylZyo9yQW2IL2YFbYIY0ERM2WqN0knuROeT7Y2igl2Asl9FXpNNc887XxY1g1hQPHcVOBvUdlopiQgSDksI0KA5rID6MNNAyDkSI+Fs3sSQPjQNr3jIagdQQqSHggbADZi7JISJAa01VcopimiFIFAgBwNS/21OtjKARVWFncaHbXj2yLlInXchc0ZeYF7qvmCfZn8q+tr1s8OzJM5996VN//dr9aoqtqanNhpnckzvAOBOqrara3PbKq4urCWq3yolyWdppnzc4mVSKirQ9YVK9iYtlKrag3XQf1guJNsSMjTzujPgXIIccHI597n8tbgVGBOVZos+UH9UdpFsVjLyNAUErxtpFJuL22I9y/K7FUaWQrH6Boy2NDi+6KwPFh44BnnfK0RVIADKZFaukroyrqxBwJkhPNg3Fi0U0VY3TPjyihDCbf4iEUQ3WT5Hilx9yYAAGCEi30TJ2H2MJ4j4zZ2A3EKaoTz3x3LgMUPawBIdndshekOkTaOpTeKB+ah2DRvF2vbjcSAoRXXchVRBXdZTMjEsKIAlA1pyiYhn0gRESebkAUTcKz+ZM3sNWkRQTHHGzRGwR2S0FTG0J/16nDmVd08PsoBfBS7lFksrgiKqm7I4g4EpBzI0wl7BQjKF9ApFD0SBYKVFxiUtWSspLAFkhjnw4ygSq0vdNqc5TTUMWlf0QQY6C3vAcHtiy905aKcU1v4zDcPlj8L61Qy7ugaHZCOqYTcGe94pi1PPAaD7JDs5M3T2XPdhohXWzbrX4UJKwozCExLJ7wlFoybyJucsRmzy2ygDzR0jKEUO4c2f2JOCp1lEvkZkR6LWmUG1EgRJaDxnOkIiBkGH4RVQNXjb06zLAVCb4AOxutkddFtj+9ddee+OXP344f8A7i9/65udf/I2XHj26fP2X7yy3eOUT+OI3/9kb//n+g4vv3alLtVt/8Z3tN/jC537/9mX7/vxrfeal524+uKTKft/KtFLienf121//+j//4z/grfKrm8VWn9lhfYXzC55e28lGzy9560pu3cgZ9yvZmtwINsQW2JFbyA7YAlvjDMyANFXHZWPQbOj4y7E9dRkpNT00+iNiHY3dyYzaQ8gi1pG5VuGbc3mHbQlUx8xHSoeuOiGiCvH5zRFQe785SFrvcSKCWnvY6EF6sN4OXxGo4fAyHqz6+EgJ0Q4kCpgiQyIqZQKqKVBNqwY+rTQ0T0UmbleYl9oW3K24XWFbbYMt7MZ0V7GDboFm2EKh2BJNZC99Z9JNDdjTmmHChEkWVetqd6fy/KRgy1ZOp9tbcmX7jZzUYr2qFUJRVLUIJmjL+cl+ktWrh3l3UjXs5li0zEAOL3fXMt6ELCL4iVYpQeVJLk2ApsGfikDZj6rTbx1tjnwBwzoJYWLeBxRCpxVpxzVOPFzKI+BLWu+NpGi4wGB1Bt0pj3+ebbjwk+hAOj2dVdVSDhRZH58p1PEwgjyqEJQ0+lK0dPRQpmZ3BNkfSgkg3QIZdNhcM/F2n+q05dH96VK6rUez9LAtntkZkXzRTBR5uLfEMDN1MOIwwDWNkYDovWv4YK8/iD/hsc4eEYUrdmYQc9kjMhURsZhTMaIcd8weSgclGZmY+35y0W5VV4oYfPboJYmWhoNDEvHJPyWgLM2xjEDqqtK0lGHvLaMzZF1/JND+i3QgRNGNC9WG8b3u6Q6ch4hoXL/SWbiH1N9zNgqyE0wkFbnFH751xqpELWNAvuIn0buvEO4H7jQ9gkyZVpBorWstlk17jiWrim8ZZlDSu2vaDLmYQCk1p4plbEsHpfPshicWSBLyHGEJ1p51a2QpxSQo3NFChiztOoetE5Lc5rDcwwoI7ShspA26uN9yXJyn19FTEW8Fotco4vkjy6xHJA+LKGbcmIsGJzSetizQo1hA76UysE61XF1e/OjN7/3ozb9HnT7E5l/84R/ce/FTH3zw8Nvf/xZu76mr7/zD/N/+ET76+Xvv/eNnri4eoMhKNv/fd/d/+LvPfOJrn/z+X34wd1ndgVGrTsvVycXlB//9v/23v/07X/jgQ7vYnG7qrQ9tfWPrK6635fZlnzY83dbzS7vVt4rrLk8E15CdyCyYiUbbkVvTWaWh9wbutDQVE3SIGRvMRDsiGYi79EVMMECybOzPDIR0RrDos8NCkiAj6gAL04akfckH7/lm0lDiQSa5JJfbd68JQtzliHMrIwtwhypAqTVPKKFeufS0TTL59TNSVCWdrrqVZhgvVdTuyKBAq1g1FP8Du7YCFLYJ86L7bKvdCXaTdduoXVvbdp21X21hyq3tt01ZuYc2wRZtKewARH249mYvFZybXda60WXbTXW961uVzYT1VNcntr+uK53ECvdo6oqSSq2AaouuO4m6AozoquYq1+F+xZ9ECTwvirWOVBiR5cDjV2oLui0Zi0VB7zmunK4xEFbFCU3I2jAiJnYdPoWySNJggApkdj02h8BCEA+gF7koaZWRKb4ZS3U6SWwWGwY0RyiGxxUBYI7GOIk3sytEZA4GTweRKPh0aHYp6mm0GcPcRXrrfk4bfFCgN75R1e2g+gmxcKuRB0KQCXCYe58GikyCiqgDDjxiGSXAdPibu02BHDqFo5199Muj1oqsZxeVnsGVKywa4GC+oKoMsDgaykQ0GIsIFjUAV9DoAufLi1tYg3VTLwTH7D4lzK1oNxTQf1O0RG+MdwS6L9EcWyNjGQEpodwQvgskxBUklK01kkYrRReL2o54T/QmaIMADUyAMh+SDqNhAFSrDr+iqkFPz3DU23gj8RzPIFyZ77FAZ0VIhu2DoZZBKhYBxbFR8Zotkk/nktRlUVzy22hu9Zx/5U4pnPGhOuCl8GgzipgOUWg1JeTgDslgVGgIwImYQw7eh82qaip+4gpozSxmMGAAmh7mivdeewwSPFsg0A16YOG7pZSqkn1FmuEWoT7Dg/7Usk7p9L/ObIx2ddBI3uD63hmkxuaM2AvASBccnvGKcYEYaLSmolSh9ffvv/XDn/ztm+8+6DJZbb/1O1945vbLNtu7b769v/hwNU22Or//6Pr7r+nXvrBeXS7f/2CFWvdcsV3++d8+/uY3X3j+i+X171yvFs+K1lLKzeX113/vf3jhC3/w88fzLMsrPbls62s53/LkiZxt7NYVplnPt3rHNuQG2FA2ghvwxngluhXZieyIPawZ9qaczfZmW60doIfxZl3QgQ50iietSZ8dfhHC0Wmm4n0poyqLdHK0sO6ajU3erh2Iguc9Kori/vNQ5XUjlfp/Dn/BvLGeouqxfkInhwghtl0KBSVXZyxjEIGS4ehBQAHUJHllRghLkCAMRaX4PCszhS7UKmShOmnFbsI8ybzEPNluIbuVXOuWumNvWnbl4p2HvNgCk87GLcCGnc1t6tUWG+k7TqaYZ9sXM9Za8P7jZ+9+9N50atidSNtiXtm+apv6rqjJQqRAK1hcMkRMPDOjQBZ14TIHYU4OztEjfCO7d97gcIo8lPZHkRVPpiUWkCi9Myibia4BRdRFIWjmAKPnyNG4SCdUax6SwBIBIVxtK4rwNY6Q/1PaODmkpglvAtHXmb2tYW0BuJ6AdxmNCDu61US0kGEiRCOjHEp4kEE6ZW658fLIwQJSyZ9JKNkaSS1aSvVRi9FtBxUJhx19nMlBdfGiYHYdJcr/5CU4LswkpHgEGY23ZX5xlAsE4DCkPceTZ1waMr3RNKEw+nA3YFh3b5IdGU24GkPvFs8L3n8W9Xoji/Ogw1LS0CGSSgLMqz2kUEHBsH8i3kQ39xhFfXierWBoQjhV6qieKuOOSoZyg0o/IGAyQQaJcn5qwrvmgEXJISMhOEHzaADDeDQAvAN2XLevJjzEzfM11mQsnYe93k2ZOWmsY4x0tVRjN2aDAcx6kMBR/Nbs8L3/ZAvEfwYpj0GZjdOLsNtSRAmrtSCFQVTTcqpmtwDILDcmp10yVPZ7FQ+ULKm6KXnkd9d79u5lYSK0kkTooLJnZtEnB9Ccb5YCr44u05oPSctCQ37FAbRgh5R4FgKVSqOiP/7w7df+8VtvvvlEVqeoNx956aXnnnnxw4eX77/93k9/8ZPV7RPbNKk611s/+Wm7d+f0ld84vX/fHj2c1tM8ye35evv3r/GrX7uzfkkv7ms9sXmWr3/jjz7/jd/99c4a1ibToz5tZX3F9QZnG66vuL7mauYZroTXlJ3IjdiN4QbYQmfYhtzS9pQmOisxEw1ogJntVTtJwHx4O2lkDxYGvA4TuQQS7TfrTusjnHLCYRtdikccT3E3bA7qmxx08d1B0wEqI73zc2wkkRh3KD78kUPTDIdM+tCXPDAkEWEnlVkFQxYRjhQYPTviKGYNYEeULKqL3s3MdOF5FkWlVO3aIdAiWnVSTugLtCXmSXdLzAvb2XXFNWSL3QU2j27unK/axbx5YrqXSdFRt9g3E6KvZthVlzNoVTYr2zbt+uc+/tk6nWw2N2WxqzKr9gkN1oh9F4vpxoXuFCFUF810zJwgCsO0BDCWFe50OnC2kB/qA3yfxUUAQWFJ3ygjKAn1T/MS3iFUSnUtc5pWSjnBCwThfI76h3xorQkrMghLGT46mjo6g49NjEBIF+45cIlTItFDCybY7Uud9v3IEIdNKJ4ijx8eRdphNEKCIdnBHCGjuRSneq9UUxGzPtJuf5SGzrHJdHSDeGZ3AE4Nh5IbwkAy4cu0/ZKrSeCIwTuKcFEzIRxSdv0uKMzMNdBrrQoTxBQUEcCio8ZcONF/GJGBROVRpBSJ3eBWNas9MQeX5p1XIhnZWDSn+HkaVSjk6eJTD/kAqPj9uGV1CRF/9BKTSZNQAtRaI/Ukes8ZjsARlSMstEdEDNVTcXBGcDA9mcV62vBUlE6DiIeRgyTlhFsdZU45gC4ROMhxz1lCsUxtCpe9iiMVnCmGm4Mi/VYoyB5efmAl+4VdsEAiQEx3fuz0DThSPYvlyj4XBX3uO53NxNS2IIQ0RbIXJcpSUUpAVJoZY62yBhkxn47D6FcQpB2axyRZlUBWRfwJRXTmF986CyE4lkOnmZVExWgmqKIGuD6Dx8ea/sntlJlsaj3fXr3/4N0fvv2LK9hJ4Xyzubl7fu+Dhxe36/nPf/7GcqkTFpdXc1koT/Wx2bf+7vEfrV745NfsB3/29m773Hp6slgu33gwP/v++vanz9+8/7jOp//uf/7Tl1/5+IPLWUtB0ZmLzeLkw7660bOrfnKj6yuc7WzS6yo3xmtoEz6hbIQbYgvbkBvyhrIV7mE2A021u8qMBBDVzBrQxIF0d5FO9oi2tugZgYNxGoziDLrgVl0JhVIikfJNp3KYCZ07xUGo2PTedesMCK+wSCQ/EhXHqIiBoM8SEDIcMzj4EqLp9iEaea0P+HKRCWY/gqX5h7DEpoBCq6H2bpQqUKi5wowqTA0LQSWVLFaxr2hL7JdoK2xXusX1pPvW551dlauHP/vCb3b76EsX714+eHP/9ju7yyfWVGa0Bm1GUz3Xs/3Vdip1O2+/9KUvf+1ffZHPnswbq6u6qm253wJbse2qrifspQqqsQp90mUVrYXFySVFpDvVkgZFNcySQav7TfyTg8p4Wv4mV/sOK3RoQvLGX3ipaxQeBejdCjCKtgBCX9HTxCjKdJ9kmL+ldvCsUEgCshyHOXNXIOu4npAneGUejBc3rKIK60YnZ0s7Jid7inWYQ4BhNgD0Zlmj8icUOqhHZcVMgPxy3C2Gjry41pao9IwIHdY266qlmdVaI7YEYVGT9bBlJH1hrEj6sIGBOTPzYFfwxjFNChJofoAGHFXNTAXjpEAGx8zv2glowPgi3/cZmRAwijpYHsTs/MZY4BRZIkQOup4wxBy9+DYHcGNR8/89LZN0wpJ5/bge916JuwIExdm84s7C80BVjW5WpNsetN0MpDy6Gus43L6K9N69Li0yEBpfl3Qtge+RTM5oakRmt3vAdk+fpEMY6J7NP0pTFSEBCUiEkcdLCXcwGTcMnMnVrNTJqHFAJJ3XIaIUZC9APJAM0TJXBWIqsMQFmvfKUQCXeReoglHASgIlTUY7qko2STizi2ounCciQx6PQabzTZHGJ1jKliPzjk1Q3Geiy6SJBdcxtkCSBgfPE4Sx5RYqErIqnZ5SU7TW7fb6/Uc/vtk8vHwk09o2T24+8rGPnd96Fqfy5k9f2z65Onlm2l1sF2fVbqDSdL3+YHf9ne/v/+D3X7zzmZu3vn9tevdsNfXt/Prbuy9++W470y989V+cvfKV+5veq8d7tetiw+VVWW3kfFtWV1j3G9Ub6BbYgBtgD26oN8prckdswC2xFzRhb0UbbTZr5E5kr9occ4Y2wCShvqiYZNirQmeuRTp5aKwedQMas51y2HxJkRTHCxnyNSo8KBqUMrQLvQM8Em7zSZodQy8lNzFT+U5VJcSenEjomzto7SIU0zTFGvl2RL7dk6HQH6JFM6s7YlRQuxmsF6m+U0XBShTX/NhDu4op+lSKdhOx3naVJx88/tlbP/ubi3/cLjb1ufM7n/pNe+Zjm3fe3b71q/nyCZqpCppidX6mG9h+X1f1xZdemm7dvuZcdg0mxv2iYjJvLg9Q6rB7fUFMo+DrDZG0YV5VK2C0xmiAhNs+d2UFUSQlvbwjyTgBgugf3aQHAJbZCZLsVxsfHNZU0rhIvoEiNNMIVeHCBRFUAUx+3UGt3b8eGsPS3I1HVqAqbjgcMOH4UsZRd0OgKjCadZAael1xzA8NiKNWcsiK/8tXsl4QA5U4XOPY0ICKtt5FrJRqZq03CNq++T8Vp9EnrAowbWgiu9FEcaT+EYhcpAr/RdjEtLKZ4hxSCkT9NYr6Umv1WCnJ24jGNSTCrM6eYuR2I471VNuO2e+IQN2TqShAR/Oq+SinsPCRHIkFHSCj+4Rw5UDRQ7jnIGUcyHCOUzH7QQOBZJ5bwtdbk+yWeRpyrx1WMZPwp0Ch4IMzKgzpOUbxwkOiZj0iU3j1IfJ+5Nvig/NesjUGBxZWBkABYoPqVYzsrk4I4ZDz+eqIA6+IG/UWkdgNI0SJZc+XHm41ww/RDOBIiDOLhUIljd1Ca1MJPH0O+PRfAsI4yAUIBg9oFEzgPsOfDrshEur8RUdNaSJDgS/gKjNTqJkZmrK4hMxBt8a9gVA0mgU8KIvACM2LoYBBWKTsthfz/t2i+2mi9f7c3Y/+s6//4Xay//Bn/5dN29XdhV2bCG3TYaU1VJuxWL53hZ+8defVz37z7fs/+PWF9bXWaXF/e3Fvu/i9P/3fXv7oq7/aoIfqaDEpe8oNzjZlfWUrs6lf0WctcAPZiW5gM7EVuzbeEFvILNJEdsJOyAzsRWaykY3cq7oER8819CJqHxgHIYDD+lnF8nMe210lHybSRjq56djAefEdaWyZfyadP+DDbQBwcFs1SloUwHIxJfs6A9sJZ+/xZsktP/YpRMB+0I/xDe/G6CnmWBD+zOEi33QKIcOmiXeVm5Cgqkkx0QaZiXnfrS6kzmU6LSd2/71f/+AfPnzy1iyXuFXr3Vvl/LmTVz525+zu5vXXP3zwcLuq60Xbf/jwIc+0TJBVPbt3SvjkSlgFdLXrtaHuUBtk7mpmMKipD/ZBA5r5RDYVkn3w/d3iHQ298T4qI7t7j3QKhAOxQa9jmn13yALxKsOwbNQYtEOxkAqAow2eT2csFsFYLoEZYR0qwXWNlUPNqt1hi0i0GME11KIYdfTvAnXYxPE7jXQ7ew3TdgRDWNzFHNt6ePkWDpnnGAMnSfnUGrfNURnNjne3zk7bJNniUan5MB9vWBS4yw+PMzo1IkVzUJN5J3mFQdCKxlM3lCMHHU8xHclgoBydq9HvnNeZ4GA4pqhoMijRDvKT7v3T+Hp+Yk5Xjd6uEQSMRytHE04ki+Ijq47cTpW9WzcUCFIhS8ath1k4ypO9Wyhhl/FRh7wTkh3vWdUAmKcU414PFa1jJ5wHwYnN8UAZqaAeriz2UD5p/2rfTJo0YoteKSC9ZjwXv2956ge+7rl1wQOTEaCTVbK24PMOWFEQ2s45Y5PRBCnQSI6FdA24DJBEXOQ1WhDj1xLmO7LB3moXhT1v/GDI8B7ui5mTJt9P2ZNalfoN8bC8uf/Inh9WWiH/5GfDQhwd7OD0QKOd1yfgSjdU1VJLDd6kmAV6GaiAHxPAlDXW2k9X399s37348P3VdLpazk+u7JOvfJKml+8/XtU6UzCTitLqknW3a6UoN5BJt5w/+PD5Ldbnr7zz/uXmiRVaO7Fn9fzVs5e+/t62s1QDVEujmBnLdIPTy77EXm1juhVsITfCG2IGtsANsRO5AbawOTJgmSE6C2azPdBULVZZRUIN3kSMMLMGmGoBrIdGAiBCM2/tc9TKzMQFDxSOhBaIEVKjQePoOMk4QchmMC+JSWgfSZBWEniynF4U4b4dYmB4FwMTaREYKCFCn/YHQ4/IBd3dcLqF9MAa2YrudsmPgmt6G9gpVUthIUhrxN60KzphEGopxagdCilNptYu534z1ZOr6/nnbz3oLF/66pfbRXv0y4ePHm021/Xsaj57Vk6nUpup2CRSJrRqpdZdn3/8kx/c+cI/r0VxoqzN9rIvtRGoK9hkFBikA83QvOnHDW4wVCWG9kWFxgmtyIKRecFA4CfS8Uivannc7JIYHoIbTEyzXSHCXC99qgZObRjedwB+gtG65lRnJw6IaHH1ZcFT/a6oxsy5XZln9GRmcOUw3SBKw0uEkcaS3UIM4Vj6h5AMnlNZ5BiT9N0WriW1H5DBXG7XdEuMQaWZq4yIIeyHaQkFLt9tZlaqwoTslsllhKeHPNS/buDx4X0Z4iMMlDmB2vH9GFkVDtVgHJyyJ4mZb9FTTimlOE3RT+RAOFWld0vzGc4wGDseQZiZc7NE3RV6jJzdTcNR+fu9wJygKcqoN8TpTVXaofh06A0N/4XW+qHO6oQpj4z9wcS3alYoU7ZMDtZeHMtJHUWJTejWWhKphiTtytLBH4U0kpizj1TzYC52H5DSmCIIITNmiJD3k8HT8OYSEyHS0gAHzGOw0AXsESP6E/T7AdgdhyxBXnTqN48jQjk8xfje/EzfrG7vknFDSUV+ORo0mZ8QUM/hcQR0Rb/fCBv9rgbmnDshGXyQbLVP9k3sM/e1/tCgXiEW8b5ma6TDJpU0ovqAEYGL4QR2huAuMCbCjHGIZkC/2uxutr80a0A9O//Q7G6dVleXm3ff/tXVo6vVnYIZRmMBDGUFY0Hv7E2s/uK11xfL9b2Pns7rxZNy+tJLd1/97KfOP/qxD/piL3vrSwgI7RSUxR4aGpMb6Ky6hV0Te5GtYOesK+UNuUFIcNxQ9gbMsB2wVzVDh3ZBA42YE1L06q+XdcPmAjTrTmA6wHcGAI48ESlsklRTNwj+vKIZiEAiMYOsMCAuscRJ6L0s0U+aGIRE9Ba5RHJgj5rNou54oGn52ZGgvQxIPNDy0B1xNk4mKxJdZB6QEqTQ3a2qFJgaXI7OoMZOMRSiNJ2ManoyrS5rsx+89sMHDx/cefZ0unX2sx/+sM7QqbS9vfuz6/5W3+1xWkvFLMBee5kWps1k+8sHr/92++3bZ89dYVdr2duiscxSe8dMNV1oV+tmjWJBJhagw9gs9CGtQU1BM9OiZBE0EnAsUYuqdrSoMiLpKBDvJnCIAZH+ZCYreWg9Z2qRbWopjODXae5QLW56W2vmhNmiWYOHFscJs5YBAFLNuewBPaY5c2K9n97oqQzuMUJpwZeQZiyQbqyLGowWSTs5qIDuyEUHQMfRKWSpiZY4bqll6EuAB63jyEiQpRct4aHgkanPyERUP82B3mjGHYRkFXTSZ/alx8FA8wdVbdgyCRCeEa6GnMTB0A9amz9hcTLE+NSgKQsynshY1dnx4WpExAWgDF2d6xDfE0hV7w0ZjhUJfpwGBckDLIiqGYsqRDodAoBqzNcNye58kehmDl0Ed0rjhLtg7HBiA2Pp1quUeCKS3jD+6DfL4VyZwQfCu4T5d1h5ACqayk0BaTNcT1FSC6KVw0cvW0a4nhO7I0S0c3vUGLvOqQPI70SsVMz6C5q3BIjnS5fjN0QoVFHPUh2rMTPNgADMOkiayqPG0JH6cgQQyL4yr9IaaNYzU4pA/QAbhL9P+xnfkX/T7M4d25PZCj7WNVjrVCqJQQ2RZLaHRYkkPlsIaQy5eTWa2Sz+SFAcolIsBRYhdQTT6mFkSAgQSVkyALv5AtiI2qPH188/r5O+dH5r/d4H77/z87dWiyXmPasRwo2ArAprDUbO0I62sYtf3nz07gsvrtavfOYzr776+bquT2ZSYK0LpRTCCgzWOhp0FtmJbUz2gh1wbdJUmti1YQ/dq+1MtsSs2BMGXezJRnZBozRwZowabH79Ht47KVJVe+9OI1b1miIwSuSMPiOkURIIYa0heNFmouqdAwqqFtUU64Wzunw95KD0QiL24WHzSM6VVI01cBaV01m8ug86NBoxWoI/ZtnaGkGDJ0L0qN7HWHmiX32jkA2oYFNUshEFMJHq6YxXxjNnMi/JUErHwjqa1NkWOk3v/vr+z371Buv29OzFq+3l1eb9qanOtc3z+a0z5a33Hz1aT4ulclpNtmy3Xrz3wide/HD+gGePf/rGjz959+T2R842KLu6gC2BqVsVrQZlBxvYw/u6FVJoj76YubNVodvQwTdKqMlIa6352DoIfBKgwXvnJP2a45CxxtapRV0D1/9hxEtpJEWSbkxjs1ZyNlopBYdOS1c7cVPE0Wr//wNfoigaFBtyugAAAABJRU5ErkJggg==", - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "if local_runtime:\n", - " from IPython.display import Image, display\n", - " import tempfile\n", - " import os.path as osp\n", - " import cv2\n", - " with tempfile.TemporaryDirectory() as tmpdir:\n", - " file_name = osp.join(tmpdir, 'pose_results.png')\n", - " cv2.imwrite(file_name, vis_result[:,:,::-1])\n", - " display(Image(file_name))\n", - "else:\n", - " cv2_imshow(vis_result[:,:,::-1]) #RGB2BGR to fit cv2" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": { - "id": "42HG6DSNI0Ke" - }, - "source": [ - "### Add a new dataset\n", - "\n", - "There are two methods to support a customized dataset in MMPose. The first one is to convert the data to a supported format (e.g. COCO) and use the corresponding dataset class (e.g. BaseCocoStyleDataset), as described in the [document](https://mmpose.readthedocs.io/en/1.x/user_guides/prepare_datasets.html). The second one is to add a new dataset class. In this tutorial, we give an example of the second method.\n", - "\n", - "We first download the demo dataset, which contains 100 samples (75 for training and 25 for validation) selected from COCO train2017 dataset. The annotations are stored in a different format from the original COCO format.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "qGzSb0Rm-p3V", - "outputId": "2e7ec2ba-88e1-490f-cd5a-66ef06ec3e52" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "/content/mmpose/data\n", - "--2022-09-14 10:39:37-- https://download.openmmlab.com/mmpose/datasets/coco_tiny.tar\n", - "Resolving download.openmmlab.com (download.openmmlab.com)... 47.89.140.71\n", - "Connecting to download.openmmlab.com (download.openmmlab.com)|47.89.140.71|:443... connected.\n", - "HTTP request sent, awaiting response... 200 OK\n", - "Length: 16558080 (16M) [application/x-tar]\n", - "Saving to: ‘coco_tiny.tar’\n", - "\n", - "coco_tiny.tar 100%[===================>] 15.79M 9.14MB/s in 1.7s \n", - "\n", - "2022-09-14 10:39:40 (9.14 MB/s) - ‘coco_tiny.tar’ saved [16558080/16558080]\n", - "\n", - "/content/mmpose\n" - ] - } - ], - "source": [ - "# download dataset\n", - "%mkdir data\n", - "%cd data\n", - "!wget https://download.openmmlab.com/mmpose/datasets/coco_tiny.tar\n", - "!tar -xf coco_tiny.tar\n", - "%cd .." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "fL6S62JWJls0", - "outputId": "fe4cf7c9-5a8c-4542-f0b1-fe01908ca3e4" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Reading package lists...\n", - "Building dependency tree...\n", - "Reading state information...\n", - "The following package was automatically installed and is no longer required:\n", - " libnvidia-common-460\n", - "Use 'apt autoremove' to remove it.\n", - "The following NEW packages will be installed:\n", - " tree\n", - "0 upgraded, 1 newly installed, 0 to remove and 32 not upgraded.\n", - "Need to get 40.7 kB of archives.\n", - "After this operation, 105 kB of additional disk space will be used.\n", - "Get:1 http://archive.ubuntu.com/ubuntu bionic/universe amd64 tree amd64 1.7.0-5 [40.7 kB]\n", - "Fetched 40.7 kB in 0s (161 kB/s)\n", - "Selecting previously unselected package tree.\n", - "(Reading database ... 155685 files and directories currently installed.)\n", - "Preparing to unpack .../tree_1.7.0-5_amd64.deb ...\n", - "Unpacking tree (1.7.0-5) ...\n", - "Setting up tree (1.7.0-5) ...\n", - "Processing triggers for man-db (2.8.3-2ubuntu0.1) ...\n", - "data/coco_tiny\n", - "├── images\n", - "│   ├── 000000012754.jpg\n", - "│   ├── 000000017741.jpg\n", - "│   ├── 000000019157.jpg\n", - "│   ├── 000000019523.jpg\n", - "│   ├── 000000019608.jpg\n", - "│   ├── 000000022816.jpg\n", - "│   ├── 000000031092.jpg\n", - "│   ├── 000000032124.jpg\n", - "│   ├── 000000037209.jpg\n", - "│   ├── 000000050713.jpg\n", - "│   ├── 000000057703.jpg\n", - "│   ├── 000000064909.jpg\n", - "│   ├── 000000076942.jpg\n", - "│   ├── 000000079754.jpg\n", - "│   ├── 000000083935.jpg\n", - "│   ├── 000000085316.jpg\n", - "│   ├── 000000101013.jpg\n", - "│   ├── 000000101172.jpg\n", - "│   ├── 000000103134.jpg\n", - "│   ├── 000000103163.jpg\n", - "│   ├── 000000105647.jpg\n", - "│   ├── 000000107960.jpg\n", - "│   ├── 000000117891.jpg\n", - "│   ├── 000000118181.jpg\n", - "│   ├── 000000120021.jpg\n", - "│   ├── 000000128119.jpg\n", - "│   ├── 000000143908.jpg\n", - "│   ├── 000000145025.jpg\n", - "│   ├── 000000147386.jpg\n", - "│   ├── 000000147979.jpg\n", - "│   ├── 000000154222.jpg\n", - "│   ├── 000000160190.jpg\n", - "│   ├── 000000161112.jpg\n", - "│   ├── 000000175737.jpg\n", - "│   ├── 000000177069.jpg\n", - "│   ├── 000000184659.jpg\n", - "│   ├── 000000209468.jpg\n", - "│   ├── 000000210060.jpg\n", - "│   ├── 000000215867.jpg\n", - "│   ├── 000000216861.jpg\n", - "│   ├── 000000227224.jpg\n", - "│   ├── 000000246265.jpg\n", - "│   ├── 000000254919.jpg\n", - "│   ├── 000000263687.jpg\n", - "│   ├── 000000264628.jpg\n", - "│   ├── 000000268927.jpg\n", - "│   ├── 000000271177.jpg\n", - "│   ├── 000000275219.jpg\n", - "│   ├── 000000277542.jpg\n", - "│   ├── 000000279140.jpg\n", - "│   ├── 000000286813.jpg\n", - "│   ├── 000000297980.jpg\n", - "│   ├── 000000301641.jpg\n", - "│   ├── 000000312341.jpg\n", - "│   ├── 000000325768.jpg\n", - "│   ├── 000000332221.jpg\n", - "│   ├── 000000345071.jpg\n", - "│   ├── 000000346965.jpg\n", - "│   ├── 000000347836.jpg\n", - "│   ├── 000000349437.jpg\n", - "│   ├── 000000360735.jpg\n", - "│   ├── 000000362343.jpg\n", - "│   ├── 000000364079.jpg\n", - "│   ├── 000000364113.jpg\n", - "│   ├── 000000386279.jpg\n", - "│   ├── 000000386968.jpg\n", - "│   ├── 000000388619.jpg\n", - "│   ├── 000000390137.jpg\n", - "│   ├── 000000390241.jpg\n", - "│   ├── 000000390298.jpg\n", - "│   ├── 000000390348.jpg\n", - "│   ├── 000000398606.jpg\n", - "│   ├── 000000400456.jpg\n", - "│   ├── 000000402514.jpg\n", - "│   ├── 000000403255.jpg\n", - "│   ├── 000000403432.jpg\n", - "│   ├── 000000410350.jpg\n", - "│   ├── 000000453065.jpg\n", - "│   ├── 000000457254.jpg\n", - "│   ├── 000000464153.jpg\n", - "│   ├── 000000464515.jpg\n", - "│   ├── 000000465418.jpg\n", - "│   ├── 000000480591.jpg\n", - "│   ├── 000000484279.jpg\n", - "│   ├── 000000494014.jpg\n", - "│   ├── 000000515289.jpg\n", - "│   ├── 000000516805.jpg\n", - "│   ├── 000000521994.jpg\n", - "│   ├── 000000528962.jpg\n", - "│   ├── 000000534736.jpg\n", - "│   ├── 000000535588.jpg\n", - "│   ├── 000000537548.jpg\n", - "│   ├── 000000553698.jpg\n", - "│   ├── 000000555622.jpg\n", - "│   ├── 000000566456.jpg\n", - "│   ├── 000000567171.jpg\n", - "│   └── 000000568961.jpg\n", - "├── train.json\n", - "└── val.json\n", - "\n", - "1 directory, 99 files\n" - ] - } - ], - "source": [ - "# check the directory structure\n", - "!apt-get -q install tree\n", - "!tree data/coco_tiny" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "Hl09rtA4Jn5b", - "outputId": "e94e84ea-7192-4d2f-9747-716931953d6d" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - " 75\n", - "{'bbox': [267.03, 104.32, 229.19, 320],\n", - " 'image_file': '000000537548.jpg',\n", - " 'image_size': [640, 480],\n", - " 'keypoints': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 325, 160, 2, 398,\n", - " 177, 2, 0, 0, 0, 437, 238, 2, 0, 0, 0, 477, 270, 2, 287, 255, 1,\n", - " 339, 267, 2, 0, 0, 0, 423, 314, 2, 0, 0, 0, 355, 367, 2]}\n" - ] - } - ], - "source": [ - "# check the annotation format\n", - "import json\n", - "import pprint\n", - "\n", - "anns = json.load(open('data/coco_tiny/train.json'))\n", - "\n", - "print(type(anns), len(anns))\n", - "pprint.pprint(anns[0], compact=True)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": { - "id": "H-dMbjgnJzbH" - }, - "source": [ - "After downloading the data, we implement a new dataset class to load data samples for model training and validation. Assume that we are going to train a top-down pose estimation model, the new dataset class inherits `BaseCocoStyleDataset`.\n", - "\n", - "We have already implemented a `CocoDataset` so that we can take it as an example." - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": { - "id": "jCu4npV2rl_Q" - }, - "source": [ - "#### Note\n", - "If you meet the following error:\n", - "```shell\n", - "AssertionError: class `PoseLocalVisualizer` in mmpose/visualization/local_visualizer.py: instance named of visualizer has been created, the method `get_instance` should not access any other arguments\n", - "```\n", - "Please reboot your jupyter kernel and start running from here." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "3I66Pi5Er94J" - }, - "outputs": [], - "source": [ - "%cd mmpose" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "rRNq50dytJki" - }, - "outputs": [], - "source": [ - "# Copyright (c) OpenMMLab. All rights reserved.\n", - "import json\n", - "import os.path as osp\n", - "from typing import Callable, List, Optional, Sequence, Union\n", - "\n", - "import numpy as np\n", - "from mmengine.utils import check_file_exist\n", - "\n", - "from mmpose.registry import DATASETS\n", - "from mmpose.datasets.datasets.base import BaseCocoStyleDataset\n", - "\n", - "\n", - "@DATASETS.register_module()\n", - "class TinyCocoDataset(BaseCocoStyleDataset):\n", - " METAINFO: dict = dict(from_file='configs/_base_/datasets/coco.py')\n", - "\n", - " def _load_annotations(self) -> List[dict]:\n", - " \"\"\"Load data from annotations in MPII format.\"\"\"\n", - "\n", - " check_file_exist(self.ann_file)\n", - " with open(self.ann_file) as anno_file:\n", - " anns = json.load(anno_file)\n", - "\n", - " data_list = []\n", - " ann_id = 0\n", - "\n", - " for idx, ann in enumerate(anns):\n", - " img_h, img_w = ann['image_size']\n", - "\n", - " # get bbox in shape [1, 4], formatted as xywh\n", - " x, y, w, h = ann['bbox']\n", - " x1 = np.clip(x, 0, img_w - 1)\n", - " y1 = np.clip(y, 0, img_h - 1)\n", - " x2 = np.clip(x + w, 0, img_w - 1)\n", - " y2 = np.clip(y + h, 0, img_h - 1)\n", - "\n", - " bbox = np.array([x1, y1, x2, y2], dtype=np.float32).reshape(1, 4)\n", - "\n", - " # load keypoints in shape [1, K, 2] and keypoints_visible in [1, K]\n", - " joints_3d = np.array(ann['keypoints']).reshape(1, -1, 3)\n", - " num_joints = joints_3d.shape[1]\n", - " keypoints = np.zeros((1, num_joints, 2), dtype=np.float32)\n", - " keypoints[:, :, :2] = joints_3d[:, :, :2]\n", - " keypoints_visible = np.minimum(1, joints_3d[:, :, 2:3])\n", - " keypoints_visible = keypoints_visible.reshape(1, -1)\n", - "\n", - " data_info = {\n", - " 'id': ann_id,\n", - " 'img_id': int(ann['image_file'].split('.')[0]),\n", - " 'img_path': osp.join(self.data_prefix['img'], ann['image_file']),\n", - " 'bbox': bbox,\n", - " 'bbox_score': np.ones(1, dtype=np.float32),\n", - " 'keypoints': keypoints,\n", - " 'keypoints_visible': keypoints_visible,\n", - " }\n", - "\n", - " data_list.append(data_info)\n", - " ann_id = ann_id + 1\n", - "\n", - " return data_list, None\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": { - "id": "UmGitQZkUnom" - }, - "source": [ - "### Create a config file\n", - "\n", - "In the next step, we create a config file which configures the model, dataset and runtime settings. More information can be found at [Configs](https://mmpose.readthedocs.io/en/1.x/user_guides/configs.html). A common practice to create a config file is deriving from a existing one. In this tutorial, we load a config file that trains a HRNet on COCO dataset, and modify it to adapt to the COCOTiny dataset." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "sMbVVHPXK87s", - "outputId": "a23a1ed9-a2ee-4a6a-93da-3c1968c8a2ec" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "default_scope = 'mmpose'\n", - "default_hooks = dict(\n", - " timer=dict(type='IterTimerHook'),\n", - " logger=dict(type='LoggerHook', interval=50),\n", - " param_scheduler=dict(type='ParamSchedulerHook'),\n", - " checkpoint=dict(\n", - " type='CheckpointHook',\n", - " interval=1,\n", - " save_best='pck/PCK@0.05',\n", - " rule='greater',\n", - " max_keep_ckpts=1),\n", - " sampler_seed=dict(type='DistSamplerSeedHook'),\n", - " visualization=dict(type='PoseVisualizationHook', enable=False))\n", - "custom_hooks = [dict(type='SyncBuffersHook')]\n", - "env_cfg = dict(\n", - " cudnn_benchmark=False,\n", - " mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),\n", - " dist_cfg=dict(backend='nccl'))\n", - "vis_backends = [dict(type='LocalVisBackend')]\n", - "visualizer = dict(\n", - " type='PoseLocalVisualizer',\n", - " vis_backends=[dict(type='LocalVisBackend')],\n", - " name='visualizer')\n", - "log_processor = dict(\n", - " type='LogProcessor', window_size=50, by_epoch=True, num_digits=6)\n", - "log_level = 'INFO'\n", - "load_from = None\n", - "resume = False\n", - "file_client_args = dict(backend='disk')\n", - "train_cfg = dict(by_epoch=True, max_epochs=40, val_interval=1)\n", - "val_cfg = dict()\n", - "test_cfg = dict()\n", - "optim_wrapper = dict(optimizer=dict(type='Adam', lr=0.0005))\n", - "param_scheduler = [\n", - " dict(type='LinearLR', begin=0, end=10, start_factor=0.001, by_epoch=False),\n", - " dict(\n", - " type='MultiStepLR',\n", - " begin=0,\n", - " end=40,\n", - " milestones=[17, 35],\n", - " gamma=0.1,\n", - " by_epoch=True)\n", - "]\n", - "auto_scale_lr = dict(base_batch_size=512)\n", - "codec = dict(\n", - " type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2)\n", - "model = dict(\n", - " type='TopdownPoseEstimator',\n", - " data_preprocessor=dict(\n", - " type='PoseDataPreprocessor',\n", - " mean=[123.675, 116.28, 103.53],\n", - " std=[58.395, 57.12, 57.375],\n", - " bgr_to_rgb=True),\n", - " backbone=dict(\n", - " type='HRNet',\n", - " in_channels=3,\n", - " extra=dict(\n", - " stage1=dict(\n", - " num_modules=1,\n", - " num_branches=1,\n", - " block='BOTTLENECK',\n", - " num_blocks=(4, ),\n", - " num_channels=(64, )),\n", - " stage2=dict(\n", - " num_modules=1,\n", - " num_branches=2,\n", - " block='BASIC',\n", - " num_blocks=(4, 4),\n", - " num_channels=(32, 64)),\n", - " stage3=dict(\n", - " num_modules=4,\n", - " num_branches=3,\n", - " block='BASIC',\n", - " num_blocks=(4, 4, 4),\n", - " num_channels=(32, 64, 128)),\n", - " stage4=dict(\n", - " num_modules=3,\n", - " num_branches=4,\n", - " block='BASIC',\n", - " num_blocks=(4, 4, 4, 4),\n", - " num_channels=(32, 64, 128, 256))),\n", - " init_cfg=dict(\n", - " type='Pretrained',\n", - " checkpoint=\n", - " 'https://download.openmmlab.com/mmpose/pretrain_models/hrnet_w32-36af842e.pth'\n", - " )),\n", - " head=dict(\n", - " type='HeatmapHead',\n", - " in_channels=32,\n", - " out_channels=17,\n", - " deconv_out_channels=None,\n", - " loss=dict(type='KeypointMSELoss', use_target_weight=True),\n", - " decoder=dict(\n", - " type='MSRAHeatmap',\n", - " input_size=(192, 256),\n", - " heatmap_size=(48, 64),\n", - " sigma=2)),\n", - " test_cfg=dict(flip_test=True, flip_mode='heatmap', shift_heatmap=True))\n", - "dataset_type = 'TinyCocoDataset'\n", - "data_mode = 'topdown'\n", - "data_root = 'data/coco_tiny'\n", - "train_pipeline = [\n", - " dict(type='LoadImage', file_client_args=dict(backend='disk')),\n", - " dict(type='GetBBoxCenterScale'),\n", - " dict(type='RandomFlip', direction='horizontal'),\n", - " dict(type='RandomHalfBody'),\n", - " dict(type='RandomBBoxTransform'),\n", - " dict(type='TopdownAffine', input_size=(192, 256)),\n", - " dict(\n", - " type='GenerateTarget',\n", - " target_type='heatmap',\n", - " encoder=dict(\n", - " type='MSRAHeatmap',\n", - " input_size=(192, 256),\n", - " heatmap_size=(48, 64),\n", - " sigma=2)),\n", - " dict(type='PackPoseInputs')\n", - "]\n", - "test_pipeline = [\n", - " dict(type='LoadImage', file_client_args=dict(backend='disk')),\n", - " dict(type='GetBBoxCenterScale'),\n", - " dict(type='TopdownAffine', input_size=(192, 256)),\n", - " dict(type='PackPoseInputs')\n", - "]\n", - "train_dataloader = dict(\n", - " batch_size=16,\n", - " num_workers=2,\n", - " persistent_workers=True,\n", - " sampler=dict(type='DefaultSampler', shuffle=True),\n", - " dataset=dict(\n", - " type='TinyCocoDataset',\n", - " data_root='data/coco_tiny',\n", - " data_mode='topdown',\n", - " ann_file='train.json',\n", - " data_prefix=dict(img='images/'),\n", - " pipeline=[\n", - " dict(type='LoadImage', file_client_args=dict(backend='disk')),\n", - " dict(type='GetBBoxCenterScale'),\n", - " dict(type='RandomFlip', direction='horizontal'),\n", - " dict(type='RandomHalfBody'),\n", - " dict(type='RandomBBoxTransform'),\n", - " dict(type='TopdownAffine', input_size=(192, 256)),\n", - " dict(\n", - " type='GenerateTarget',\n", - " target_type='heatmap',\n", - " encoder=dict(\n", - " type='MSRAHeatmap',\n", - " input_size=(192, 256),\n", - " heatmap_size=(48, 64),\n", - " sigma=2)),\n", - " dict(type='PackPoseInputs')\n", - " ]))\n", - "val_dataloader = dict(\n", - " batch_size=16,\n", - " num_workers=2,\n", - " persistent_workers=True,\n", - " drop_last=False,\n", - " sampler=dict(type='DefaultSampler', shuffle=False, round_up=False),\n", - " dataset=dict(\n", - " type='TinyCocoDataset',\n", - " data_root='data/coco_tiny',\n", - " data_mode='topdown',\n", - " ann_file='val.json',\n", - " bbox_file=None,\n", - " data_prefix=dict(img='images/'),\n", - " test_mode=True,\n", - " pipeline=[\n", - " dict(type='LoadImage', file_client_args=dict(backend='disk')),\n", - " dict(type='GetBBoxCenterScale'),\n", - " dict(type='TopdownAffine', input_size=(192, 256)),\n", - " dict(type='PackPoseInputs')\n", - " ]))\n", - "test_dataloader = dict(\n", - " batch_size=16,\n", - " num_workers=2,\n", - " persistent_workers=True,\n", - " drop_last=False,\n", - " sampler=dict(type='DefaultSampler', shuffle=False, round_up=False),\n", - " dataset=dict(\n", - " type='TinyCocoDataset',\n", - " data_root='data/coco_tiny',\n", - " data_mode='topdown',\n", - " ann_file='val.json',\n", - " bbox_file=None,\n", - " data_prefix=dict(img='images/'),\n", - " test_mode=True,\n", - " pipeline=[\n", - " dict(type='LoadImage', file_client_args=dict(backend='disk')),\n", - " dict(type='GetBBoxCenterScale'),\n", - " dict(type='TopdownAffine', input_size=(192, 256)),\n", - " dict(type='PackPoseInputs')\n", - " ]))\n", - "val_evaluator = dict(type='PCKAccuracy')\n", - "test_evaluator = dict(type='PCKAccuracy')\n", - "work_dir = 'work_dirs/hrnet_w32_coco_tiny_256x192'\n", - "randomness = dict(seed=0)\n", - "\n" - ] - } - ], - "source": [ - "from mmengine import Config\n", - "\n", - "cfg = Config.fromfile(\n", - " './configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py'\n", - ")\n", - "\n", - "# set basic configs\n", - "cfg.data_root = 'data/coco_tiny'\n", - "cfg.work_dir = 'work_dirs/hrnet_w32_coco_tiny_256x192'\n", - "cfg.randomness = dict(seed=0)\n", - "\n", - "# set log interval\n", - "cfg.train_cfg.val_interval = 1\n", - "\n", - "# set num of epoch\n", - "cfg.train_cfg.max_epochs = 40\n", - "\n", - "# set optimizer\n", - "cfg.optim_wrapper = dict(optimizer=dict(\n", - " type='Adam',\n", - " lr=5e-4,\n", - "))\n", - "\n", - "# set learning rate policy\n", - "cfg.param_scheduler = [\n", - " dict(\n", - " type='LinearLR', begin=0, end=10, start_factor=0.001,\n", - " by_epoch=False), # warm-up\n", - " dict(\n", - " type='MultiStepLR',\n", - " begin=0,\n", - " end=cfg.train_cfg.max_epochs,\n", - " milestones=[17, 35],\n", - " gamma=0.1,\n", - " by_epoch=True)\n", - "]\n", - "\n", - "\n", - "# set batch size\n", - "cfg.train_dataloader.batch_size = 16\n", - "cfg.val_dataloader.batch_size = 16\n", - "cfg.test_dataloader.batch_size = 16\n", - "\n", - "# set dataset configs\n", - "cfg.dataset_type = 'TinyCocoDataset'\n", - "cfg.train_dataloader.dataset.type = cfg.dataset_type\n", - "cfg.train_dataloader.dataset.ann_file = 'train.json'\n", - "cfg.train_dataloader.dataset.data_root = cfg.data_root\n", - "cfg.train_dataloader.dataset.data_prefix = dict(img='images/')\n", - "\n", - "\n", - "cfg.val_dataloader.dataset.type = cfg.dataset_type\n", - "cfg.val_dataloader.dataset.bbox_file = None\n", - "cfg.val_dataloader.dataset.ann_file = 'val.json'\n", - "cfg.val_dataloader.dataset.data_root = cfg.data_root\n", - "cfg.val_dataloader.dataset.data_prefix = dict(img='images/')\n", - "\n", - "cfg.test_dataloader.dataset.type = cfg.dataset_type\n", - "cfg.test_dataloader.dataset.bbox_file = None\n", - "cfg.test_dataloader.dataset.ann_file = 'val.json'\n", - "cfg.test_dataloader.dataset.data_root = cfg.data_root\n", - "cfg.test_dataloader.dataset.data_prefix = dict(img='images/')\n", - "\n", - "# set evaluator\n", - "cfg.val_evaluator = dict(type='PCKAccuracy')\n", - "cfg.test_evaluator = cfg.val_evaluator\n", - "\n", - "cfg.default_hooks.checkpoint.save_best = 'PCK'\n", - "cfg.default_hooks.checkpoint.max_keep_ckpts = 1\n", - "\n", - "print(cfg.pretty_text)\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": { - "id": "UlD8iDZehE2S" - }, - "source": [ - "or you can create a config file like follows:\n", - "```Python3\n", - "_base_ = ['../../../_base_/default_runtime.py']\n", - "\n", - "# runtime\n", - "train_cfg = dict(max_epochs=40, val_interval=1)\n", - "\n", - "# optimizer\n", - "optim_wrapper = dict(optimizer=dict(\n", - " type='Adam',\n", - " lr=5e-4,\n", - "))\n", - "\n", - "# learning policy\n", - "param_scheduler = [\n", - " dict(\n", - " type='LinearLR', begin=0, end=500, start_factor=0.001,\n", - " by_epoch=False), # warm-up\n", - " dict(\n", - " type='MultiStepLR',\n", - " begin=0,\n", - " end=train_cfg.max_epochs,\n", - " milestones=[17, 35],\n", - " gamma=0.1,\n", - " by_epoch=True)\n", - "]\n", - "\n", - "# automatically scaling LR based on the actual training batch size\n", - "auto_scale_lr = dict(base_batch_size=512)\n", - "\n", - "# codec settings\n", - "codec = dict(\n", - " type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2)\n", - "\n", - "# model settings\n", - "model = dict(\n", - " type='TopdownPoseEstimator',\n", - " data_preprocessor=dict(\n", - " type='PoseDataPreprocessor',\n", - " mean=[123.675, 116.28, 103.53],\n", - " std=[58.395, 57.12, 57.375],\n", - " bgr_to_rgb=True),\n", - " backbone=dict(\n", - " type='HRNet',\n", - " in_channels=3,\n", - " extra=dict(\n", - " stage1=dict(\n", - " num_modules=1,\n", - " num_branches=1,\n", - " block='BOTTLENECK',\n", - " num_blocks=(4, ),\n", - " num_channels=(64, )),\n", - " stage2=dict(\n", - " num_modules=1,\n", - " num_branches=2,\n", - " block='BASIC',\n", - " num_blocks=(4, 4),\n", - " num_channels=(32, 64)),\n", - " stage3=dict(\n", - " num_modules=4,\n", - " num_branches=3,\n", - " block='BASIC',\n", - " num_blocks=(4, 4, 4),\n", - " num_channels=(32, 64, 128)),\n", - " stage4=dict(\n", - " num_modules=3,\n", - " num_branches=4,\n", - " block='BASIC',\n", - " num_blocks=(4, 4, 4, 4),\n", - " num_channels=(32, 64, 128, 256))),\n", - " init_cfg=dict(\n", - " type='Pretrained',\n", - " checkpoint='https://download.openmmlab.com/mmpose/'\n", - " 'pretrain_models/hrnet_w32-36af842e.pth'),\n", - " ),\n", - " head=dict(\n", - " type='HeatmapHead',\n", - " in_channels=32,\n", - " out_channels=17,\n", - " deconv_out_channels=None,\n", - " loss=dict(type='KeypointMSELoss', use_target_weight=True),\n", - " decoder=codec),\n", - " test_cfg=dict(\n", - " flip_test=True,\n", - " flip_mode='heatmap',\n", - " shift_heatmap=True,\n", - " ))\n", - "\n", - "# base dataset settings\n", - "dataset_type = 'TinyCocoDataset'\n", - "data_mode = 'topdown'\n", - "data_root = 'data/coco_tiny'\n", - "work_dir = 'work_dirs/hrnet_w32_coco_tiny_256x192'\n", - "randomness = dict(seed=0)\n", - "\n", - "# pipelines\n", - "train_pipeline = [\n", - " dict(type='LoadImage'),\n", - " dict(type='GetBBoxCenterScale'),\n", - " dict(type='RandomFlip', direction='horizontal'),\n", - " dict(type='RandomHalfBody'),\n", - " dict(type='RandomBBoxTransform'),\n", - " dict(type='TopdownAffine', input_size=codec['input_size']),\n", - " dict(type='GenerateTarget', target_type='heatmap', encoder=codec),\n", - " dict(type='PackPoseInputs')\n", - "]\n", - "test_pipeline = [\n", - " dict(type='LoadImage'),\n", - " dict(type='GetBBoxCenterScale'),\n", - " dict(type='TopdownAffine', input_size=codec['input_size']),\n", - " dict(type='PackPoseInputs')\n", - "]\n", - "\n", - "# data loaders\n", - "train_dataloader = dict(\n", - " batch_size=16,\n", - " num_workers=2,\n", - " persistent_workers=True,\n", - " sampler=dict(type='DefaultSampler', shuffle=True),\n", - " dataset=dict(\n", - " type=dataset_type,\n", - " data_root=data_root,\n", - " data_mode=data_mode,\n", - " ann_file='train.json',\n", - " data_prefix=dict(img='images/'),\n", - " pipeline=train_pipeline,\n", - " ))\n", - "val_dataloader = dict(\n", - " batch_size=16,\n", - " num_workers=2,\n", - " persistent_workers=True,\n", - " drop_last=False,\n", - " sampler=dict(type='DefaultSampler', shuffle=False, round_up=False),\n", - " dataset=dict(\n", - " type=dataset_type,\n", - " data_root=data_root,\n", - " data_mode=data_mode,\n", - " ann_file='val.json',\n", - " data_prefix=dict(img='images/'),\n", - " test_mode=True,\n", - " pipeline=test_pipeline,\n", - " ))\n", - "test_dataloader = val_dataloader\n", - "\n", - "# evaluators\n", - "val_evaluator = dict(\n", - " type='PCKAccuracy')\n", - "test_evaluator = val_evaluator\n", - "\n", - "# hooks\n", - "default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater'))\n", - "```" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": { - "id": "ChVqB1oYncmo" - }, - "source": [ - "### Train and Evaluation\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 1000, - "referenced_widgets": [ - "2a079d9c0b9845318e6c612ca9601b86", - "3554753622334094961a47daf9362c59", - "08e0412b8dd54d28a26c232e75ea6088", - "558a9420b0b34be2a2ca8a8b8af9cbfc", - "a9bd3e477f07449788f0e95e3cd13ddc", - "5b2ee1f3e78d4cd993009d04baf76b24", - "a3e5aa31c3f644b5a677ec49fe2e0832", - "d2ee56f920a245d9875de8e37596a5c8", - "b5f8c86d48a04afa997fc137e1acd716", - "1c1b09d91dec4e3dadefe953daf50745", - "6af448aebdb744b98a2807f66b1d6e5d" - ] - }, - "id": "Ab3xsUdPlXuJ", - "outputId": "c07394b8-21f4-4766-af2b-87d2caa6e74c" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "09/15 12:42:06 - mmengine - \u001b[5m\u001b[4m\u001b[33mWARNING\u001b[0m - Failed to search registry with scope \"mmpose\" in the \"log_processor\" registry tree. As a workaround, the current \"log_processor\" registry in \"mmengine\" is used to build instance. This may cause unexpected failure when running the built modules. Please check whether \"mmpose\" is a correct scope, or whether the registry is initialized.\n", - "09/15 12:42:06 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - \n", - "------------------------------------------------------------\n", - "System environment:\n", - " sys.platform: linux\n", - " Python: 3.7.13 (default, Mar 29 2022, 02:18:16) [GCC 7.5.0]\n", - " CUDA available: True\n", - " numpy_random_seed: 0\n", - " GPU 0: NVIDIA GeForce GTX 1660 Ti\n", - " CUDA_HOME: /usr/local/cuda\n", - " NVCC: Cuda compilation tools, release 11.3, V11.3.109\n", - " GCC: gcc (Ubuntu 5.4.0-6ubuntu1~16.04.12) 5.4.0 20160609\n", - " PyTorch: 1.12.0+cu113\n", - " PyTorch compiling details: PyTorch built with:\n", - " - GCC 9.3\n", - " - C++ Version: 201402\n", - " - Intel(R) Math Kernel Library Version 2020.0.0 Product Build 20191122 for Intel(R) 64 architecture applications\n", - " - Intel(R) MKL-DNN v2.6.0 (Git Hash 52b5f107dd9cf10910aaa19cb47f3abf9b349815)\n", - " - OpenMP 201511 (a.k.a. OpenMP 4.5)\n", - " - LAPACK is enabled (usually provided by MKL)\n", - " - NNPACK is enabled\n", - " - CPU capability usage: AVX2\n", - " - CUDA Runtime 11.3\n", - " - NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86\n", - " - CuDNN 8.3.2 (built against CUDA 11.5)\n", - " - Magma 2.5.2\n", - " - Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=11.3, CUDNN_VERSION=8.3.2, CXX_COMPILER=/opt/rh/devtoolset-9/root/usr/bin/c++, CXX_FLAGS= -Wno-deprecated -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_KINETO -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE -DEDGE_PROFILER_USE_KINETO -O2 -fPIC -Wno-narrowing -Wall -Wextra -Werror=return-type -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-unused-parameter -Wno-unused-function -Wno-unused-result -Wno-unused-local-typedefs -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Werror=cast-function-type -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_VERSION=1.12.0, USE_CUDA=ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=OFF, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON, USE_ROCM=OFF, \n", - "\n", - " TorchVision: 0.13.0+cu113\n", - " OpenCV: 4.6.0\n", - " MMEngine: 0.1.0\n", - "\n", - "Runtime environment:\n", - " cudnn_benchmark: False\n", - " mp_cfg: {'mp_start_method': 'fork', 'opencv_num_threads': 0}\n", - " dist_cfg: {'backend': 'nccl'}\n", - " seed: 0\n", - " Distributed launcher: none\n", - " Distributed training: False\n", - " GPU number: 1\n", - "------------------------------------------------------------\n", - "\n", - "09/15 12:42:06 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Config:\n", - "default_scope = 'mmpose'\n", - "default_hooks = dict(\n", - " timer=dict(type='IterTimerHook'),\n", - " logger=dict(type='LoggerHook', interval=50),\n", - " param_scheduler=dict(type='ParamSchedulerHook'),\n", - " checkpoint=dict(\n", - " type='CheckpointHook',\n", - " interval=1,\n", - " save_best='pck/PCK@0.05',\n", - " rule='greater',\n", - " max_keep_ckpts=1),\n", - " sampler_seed=dict(type='DistSamplerSeedHook'),\n", - " visualization=dict(type='PoseVisualizationHook', enable=False))\n", - "custom_hooks = [dict(type='SyncBuffersHook')]\n", - "env_cfg = dict(\n", - " cudnn_benchmark=False,\n", - " mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),\n", - " dist_cfg=dict(backend='nccl'))\n", - "vis_backends = [dict(type='LocalVisBackend')]\n", - "visualizer = dict(\n", - " type='PoseLocalVisualizer',\n", - " vis_backends=[dict(type='LocalVisBackend')],\n", - " name='visualizer')\n", - "log_processor = dict(\n", - " type='LogProcessor', window_size=50, by_epoch=True, num_digits=6)\n", - "log_level = 'INFO'\n", - "load_from = None\n", - "resume = False\n", - "file_client_args = dict(backend='disk')\n", - "train_cfg = dict(by_epoch=True, max_epochs=40, val_interval=1)\n", - "val_cfg = dict()\n", - "test_cfg = dict()\n", - "optim_wrapper = dict(optimizer=dict(type='Adam', lr=0.0005))\n", - "param_scheduler = [\n", - " dict(type='LinearLR', begin=0, end=10, start_factor=0.001, by_epoch=False),\n", - " dict(\n", - " type='MultiStepLR',\n", - " begin=0,\n", - " end=40,\n", - " milestones=[17, 35],\n", - " gamma=0.1,\n", - " by_epoch=True)\n", - "]\n", - "auto_scale_lr = dict(base_batch_size=512)\n", - "codec = dict(\n", - " type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2)\n", - "model = dict(\n", - " type='TopdownPoseEstimator',\n", - " data_preprocessor=dict(\n", - " type='PoseDataPreprocessor',\n", - " mean=[123.675, 116.28, 103.53],\n", - " std=[58.395, 57.12, 57.375],\n", - " bgr_to_rgb=True),\n", - " backbone=dict(\n", - " type='HRNet',\n", - " in_channels=3,\n", - " extra=dict(\n", - " stage1=dict(\n", - " num_modules=1,\n", - " num_branches=1,\n", - " block='BOTTLENECK',\n", - " num_blocks=(4, ),\n", - " num_channels=(64, )),\n", - " stage2=dict(\n", - " num_modules=1,\n", - " num_branches=2,\n", - " block='BASIC',\n", - " num_blocks=(4, 4),\n", - " num_channels=(32, 64)),\n", - " stage3=dict(\n", - " num_modules=4,\n", - " num_branches=3,\n", - " block='BASIC',\n", - " num_blocks=(4, 4, 4),\n", - " num_channels=(32, 64, 128)),\n", - " stage4=dict(\n", - " num_modules=3,\n", - " num_branches=4,\n", - " block='BASIC',\n", - " num_blocks=(4, 4, 4, 4),\n", - " num_channels=(32, 64, 128, 256))),\n", - " init_cfg=dict(\n", - " type='Pretrained',\n", - " checkpoint=\n", - " 'https://download.openmmlab.com/mmpose/pretrain_models/hrnet_w32-36af842e.pth'\n", - " )),\n", - " head=dict(\n", - " type='HeatmapHead',\n", - " in_channels=32,\n", - " out_channels=17,\n", - " deconv_out_channels=None,\n", - " loss=dict(type='KeypointMSELoss', use_target_weight=True),\n", - " decoder=dict(\n", - " type='MSRAHeatmap',\n", - " input_size=(192, 256),\n", - " heatmap_size=(48, 64),\n", - " sigma=2)),\n", - " test_cfg=dict(flip_test=True, flip_mode='heatmap', shift_heatmap=True))\n", - "dataset_type = 'TinyCocoDataset'\n", - "data_mode = 'topdown'\n", - "data_root = 'data/coco_tiny'\n", - "train_pipeline = [\n", - " dict(type='LoadImage', file_client_args=dict(backend='disk')),\n", - " dict(type='GetBBoxCenterScale'),\n", - " dict(type='RandomFlip', direction='horizontal'),\n", - " dict(type='RandomHalfBody'),\n", - " dict(type='RandomBBoxTransform'),\n", - " dict(type='TopdownAffine', input_size=(192, 256)),\n", - " dict(\n", - " type='GenerateTarget',\n", - " target_type='heatmap',\n", - " encoder=dict(\n", - " type='MSRAHeatmap',\n", - " input_size=(192, 256),\n", - " heatmap_size=(48, 64),\n", - " sigma=2)),\n", - " dict(type='PackPoseInputs')\n", - "]\n", - "test_pipeline = [\n", - " dict(type='LoadImage', file_client_args=dict(backend='disk')),\n", - " dict(type='GetBBoxCenterScale'),\n", - " dict(type='TopdownAffine', input_size=(192, 256)),\n", - " dict(type='PackPoseInputs')\n", - "]\n", - "train_dataloader = dict(\n", - " batch_size=16,\n", - " num_workers=2,\n", - " persistent_workers=True,\n", - " sampler=dict(type='DefaultSampler', shuffle=True),\n", - " dataset=dict(\n", - " type='TinyCocoDataset',\n", - " data_root='data/coco_tiny',\n", - " data_mode='topdown',\n", - " ann_file='train.json',\n", - " data_prefix=dict(img='images/'),\n", - " pipeline=[\n", - " dict(type='LoadImage', file_client_args=dict(backend='disk')),\n", - " dict(type='GetBBoxCenterScale'),\n", - " dict(type='RandomFlip', direction='horizontal'),\n", - " dict(type='RandomHalfBody'),\n", - " dict(type='RandomBBoxTransform'),\n", - " dict(type='TopdownAffine', input_size=(192, 256)),\n", - " dict(\n", - " type='GenerateTarget',\n", - " target_type='heatmap',\n", - " encoder=dict(\n", - " type='MSRAHeatmap',\n", - " input_size=(192, 256),\n", - " heatmap_size=(48, 64),\n", - " sigma=2)),\n", - " dict(type='PackPoseInputs')\n", - " ]))\n", - "val_dataloader = dict(\n", - " batch_size=16,\n", - " num_workers=2,\n", - " persistent_workers=True,\n", - " drop_last=False,\n", - " sampler=dict(type='DefaultSampler', shuffle=False, round_up=False),\n", - " dataset=dict(\n", - " type='TinyCocoDataset',\n", - " data_root='data/coco_tiny',\n", - " data_mode='topdown',\n", - " ann_file='val.json',\n", - " bbox_file=None,\n", - " data_prefix=dict(img='images/'),\n", - " test_mode=True,\n", - " pipeline=[\n", - " dict(type='LoadImage', file_client_args=dict(backend='disk')),\n", - " dict(type='GetBBoxCenterScale'),\n", - " dict(type='TopdownAffine', input_size=(192, 256)),\n", - " dict(type='PackPoseInputs')\n", - " ]))\n", - "test_dataloader = dict(\n", - " batch_size=16,\n", - " num_workers=2,\n", - " persistent_workers=True,\n", - " drop_last=False,\n", - " sampler=dict(type='DefaultSampler', shuffle=False, round_up=False),\n", - " dataset=dict(\n", - " type='TinyCocoDataset',\n", - " data_root='data/coco_tiny',\n", - " data_mode='topdown',\n", - " ann_file='val.json',\n", - " bbox_file=None,\n", - " data_prefix=dict(img='images/'),\n", - " test_mode=True,\n", - " pipeline=[\n", - " dict(type='LoadImage', file_client_args=dict(backend='disk')),\n", - " dict(type='GetBBoxCenterScale'),\n", - " dict(type='TopdownAffine', input_size=(192, 256)),\n", - " dict(type='PackPoseInputs')\n", - " ]))\n", - "val_evaluator = dict(type='PCKAccuracy')\n", - "test_evaluator = dict(type='PCKAccuracy')\n", - "work_dir = 'work_dirs/hrnet_w32_coco_tiny_256x192'\n", - "randomness = dict(seed=0)\n", - "\n", - "Result has been saved to /home/PJLAB/jiangtao/Documents/git-clone/mmpose/work_dirs/hrnet_w32_coco_tiny_256x192/modules_statistic_results.json\n", - "09/15 12:42:07 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Distributed training is not used, all SyncBatchNorm (SyncBN) layers in the model will be automatically reverted to BatchNormXd layers if they are used.\n", - "09/15 12:42:08 - mmengine - \u001b[5m\u001b[4m\u001b[33mWARNING\u001b[0m - Failed to search registry with scope \"mmpose\" in the \"data sampler\" registry tree. As a workaround, the current \"data sampler\" registry in \"mmengine\" is used to build instance. This may cause unexpected failure when running the built modules. Please check whether \"mmpose\" is a correct scope, or whether the registry is initialized.\n", - "09/15 12:42:08 - mmengine - \u001b[5m\u001b[4m\u001b[33mWARNING\u001b[0m - Failed to search registry with scope \"mmpose\" in the \"optimizer wrapper constructor\" registry tree. As a workaround, the current \"optimizer wrapper constructor\" registry in \"mmengine\" is used to build instance. This may cause unexpected failure when running the built modules. Please check whether \"mmpose\" is a correct scope, or whether the registry is initialized.\n", - "09/15 12:42:08 - mmengine - \u001b[5m\u001b[4m\u001b[33mWARNING\u001b[0m - Failed to search registry with scope \"mmpose\" in the \"optimizer\" registry tree. As a workaround, the current \"optimizer\" registry in \"mmengine\" is used to build instance. This may cause unexpected failure when running the built modules. Please check whether \"mmpose\" is a correct scope, or whether the registry is initialized.\n", - "09/15 12:42:08 - mmengine - \u001b[5m\u001b[4m\u001b[33mWARNING\u001b[0m - Failed to search registry with scope \"mmpose\" in the \"optim_wrapper\" registry tree. As a workaround, the current \"optim_wrapper\" registry in \"mmengine\" is used to build instance. This may cause unexpected failure when running the built modules. Please check whether \"mmpose\" is a correct scope, or whether the registry is initialized.\n", - "09/15 12:42:08 - mmengine - \u001b[5m\u001b[4m\u001b[33mWARNING\u001b[0m - Failed to search registry with scope \"mmpose\" in the \"parameter scheduler\" registry tree. As a workaround, the current \"parameter scheduler\" registry in \"mmengine\" is used to build instance. This may cause unexpected failure when running the built modules. Please check whether \"mmpose\" is a correct scope, or whether the registry is initialized.\n", - "09/15 12:42:08 - mmengine - \u001b[5m\u001b[4m\u001b[33mWARNING\u001b[0m - Failed to search registry with scope \"mmpose\" in the \"parameter scheduler\" registry tree. As a workaround, the current \"parameter scheduler\" registry in \"mmengine\" is used to build instance. This may cause unexpected failure when running the built modules. Please check whether \"mmpose\" is a correct scope, or whether the registry is initialized.\n", - "09/15 12:42:08 - mmengine - \u001b[5m\u001b[4m\u001b[33mWARNING\u001b[0m - Failed to search registry with scope \"mmpose\" in the \"parameter scheduler\" registry tree. As a workaround, the current \"parameter scheduler\" registry in \"mmengine\" is used to build instance. This may cause unexpected failure when running the built modules. Please check whether \"mmpose\" is a correct scope, or whether the registry is initialized.\n", - "09/15 12:42:08 - mmengine - \u001b[5m\u001b[4m\u001b[33mWARNING\u001b[0m - Failed to search registry with scope \"mmpose\" in the \"parameter scheduler\" registry tree. As a workaround, the current \"parameter scheduler\" registry in \"mmengine\" is used to build instance. This may cause unexpected failure when running the built modules. Please check whether \"mmpose\" is a correct scope, or whether the registry is initialized.\n", - "09/15 12:42:08 - mmengine - \u001b[5m\u001b[4m\u001b[33mWARNING\u001b[0m - Failed to search registry with scope \"mmpose\" in the \"data sampler\" registry tree. As a workaround, the current \"data sampler\" registry in \"mmengine\" is used to build instance. This may cause unexpected failure when running the built modules. Please check whether \"mmpose\" is a correct scope, or whether the registry is initialized.\n", - "09/15 12:42:08 - mmengine - \u001b[5m\u001b[4m\u001b[33mWARNING\u001b[0m - Failed to search registry with scope \"mmpose\" in the \"weight initializer\" registry tree. As a workaround, the current \"weight initializer\" registry in \"mmengine\" is used to build instance. This may cause unexpected failure when running the built modules. Please check whether \"mmpose\" is a correct scope, or whether the registry is initialized.\n", - "09/15 12:42:08 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - load model from: https://download.openmmlab.com/mmpose/pretrain_models/hrnet_w32-36af842e.pth\n", - "09/15 12:42:08 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - http loads checkpoint from path: https://download.openmmlab.com/mmpose/pretrain_models/hrnet_w32-36af842e.pth\n", - "09/15 12:42:09 - mmengine - \u001b[5m\u001b[4m\u001b[33mWARNING\u001b[0m - The model and loaded state dict do not match exactly\n", - "\n", - "unexpected key in source state_dict: head.0.0.0.conv1.weight, head.0.0.0.bn1.weight, head.0.0.0.bn1.bias, head.0.0.0.bn1.running_mean, head.0.0.0.bn1.running_var, head.0.0.0.bn1.num_batches_tracked, head.0.0.0.conv2.weight, head.0.0.0.bn2.weight, head.0.0.0.bn2.bias, head.0.0.0.bn2.running_mean, head.0.0.0.bn2.running_var, head.0.0.0.bn2.num_batches_tracked, head.0.0.0.conv3.weight, head.0.0.0.bn3.weight, head.0.0.0.bn3.bias, head.0.0.0.bn3.running_mean, head.0.0.0.bn3.running_var, head.0.0.0.bn3.num_batches_tracked, head.0.0.0.downsample.0.weight, head.0.0.0.downsample.1.weight, head.0.0.0.downsample.1.bias, head.0.0.0.downsample.1.running_mean, head.0.0.0.downsample.1.running_var, head.0.0.0.downsample.1.num_batches_tracked, head.0.1.0.conv1.weight, head.0.1.0.bn1.weight, head.0.1.0.bn1.bias, head.0.1.0.bn1.running_mean, head.0.1.0.bn1.running_var, head.0.1.0.bn1.num_batches_tracked, head.0.1.0.conv2.weight, head.0.1.0.bn2.weight, head.0.1.0.bn2.bias, head.0.1.0.bn2.running_mean, head.0.1.0.bn2.running_var, head.0.1.0.bn2.num_batches_tracked, head.0.1.0.conv3.weight, head.0.1.0.bn3.weight, head.0.1.0.bn3.bias, head.0.1.0.bn3.running_mean, head.0.1.0.bn3.running_var, head.0.1.0.bn3.num_batches_tracked, head.0.1.0.downsample.0.weight, head.0.1.0.downsample.1.weight, head.0.1.0.downsample.1.bias, head.0.1.0.downsample.1.running_mean, head.0.1.0.downsample.1.running_var, head.0.1.0.downsample.1.num_batches_tracked, head.0.2.0.conv1.weight, head.0.2.0.bn1.weight, head.0.2.0.bn1.bias, head.0.2.0.bn1.running_mean, head.0.2.0.bn1.running_var, head.0.2.0.bn1.num_batches_tracked, head.0.2.0.conv2.weight, head.0.2.0.bn2.weight, head.0.2.0.bn2.bias, head.0.2.0.bn2.running_mean, head.0.2.0.bn2.running_var, head.0.2.0.bn2.num_batches_tracked, head.0.2.0.conv3.weight, head.0.2.0.bn3.weight, head.0.2.0.bn3.bias, head.0.2.0.bn3.running_mean, head.0.2.0.bn3.running_var, head.0.2.0.bn3.num_batches_tracked, head.0.2.0.downsample.0.weight, head.0.2.0.downsample.1.weight, head.0.2.0.downsample.1.bias, head.0.2.0.downsample.1.running_mean, head.0.2.0.downsample.1.running_var, head.0.2.0.downsample.1.num_batches_tracked, head.1.0.0.conv1.weight, head.1.0.0.bn1.weight, head.1.0.0.bn1.bias, head.1.0.0.bn1.running_mean, head.1.0.0.bn1.running_var, head.1.0.0.bn1.num_batches_tracked, head.1.0.0.conv2.weight, head.1.0.0.bn2.weight, head.1.0.0.bn2.bias, head.1.0.0.bn2.running_mean, head.1.0.0.bn2.running_var, head.1.0.0.bn2.num_batches_tracked, head.1.0.0.conv3.weight, head.1.0.0.bn3.weight, head.1.0.0.bn3.bias, head.1.0.0.bn3.running_mean, head.1.0.0.bn3.running_var, head.1.0.0.bn3.num_batches_tracked, head.1.0.0.downsample.0.weight, head.1.0.0.downsample.1.weight, head.1.0.0.downsample.1.bias, head.1.0.0.downsample.1.running_mean, head.1.0.0.downsample.1.running_var, head.1.0.0.downsample.1.num_batches_tracked, head.1.1.0.conv1.weight, head.1.1.0.bn1.weight, head.1.1.0.bn1.bias, head.1.1.0.bn1.running_mean, head.1.1.0.bn1.running_var, head.1.1.0.bn1.num_batches_tracked, head.1.1.0.conv2.weight, head.1.1.0.bn2.weight, head.1.1.0.bn2.bias, head.1.1.0.bn2.running_mean, head.1.1.0.bn2.running_var, head.1.1.0.bn2.num_batches_tracked, head.1.1.0.conv3.weight, head.1.1.0.bn3.weight, head.1.1.0.bn3.bias, head.1.1.0.bn3.running_mean, head.1.1.0.bn3.running_var, head.1.1.0.bn3.num_batches_tracked, head.1.1.0.downsample.0.weight, head.1.1.0.downsample.1.weight, head.1.1.0.downsample.1.bias, head.1.1.0.downsample.1.running_mean, head.1.1.0.downsample.1.running_var, head.1.1.0.downsample.1.num_batches_tracked, head.2.0.0.conv1.weight, head.2.0.0.bn1.weight, head.2.0.0.bn1.bias, head.2.0.0.bn1.running_mean, head.2.0.0.bn1.running_var, head.2.0.0.bn1.num_batches_tracked, head.2.0.0.conv2.weight, head.2.0.0.bn2.weight, head.2.0.0.bn2.bias, head.2.0.0.bn2.running_mean, head.2.0.0.bn2.running_var, head.2.0.0.bn2.num_batches_tracked, head.2.0.0.conv3.weight, head.2.0.0.bn3.weight, head.2.0.0.bn3.bias, head.2.0.0.bn3.running_mean, head.2.0.0.bn3.running_var, head.2.0.0.bn3.num_batches_tracked, head.2.0.0.downsample.0.weight, head.2.0.0.downsample.1.weight, head.2.0.0.downsample.1.bias, head.2.0.0.downsample.1.running_mean, head.2.0.0.downsample.1.running_var, head.2.0.0.downsample.1.num_batches_tracked, head.3.0.0.conv1.weight, head.3.0.0.bn1.weight, head.3.0.0.bn1.bias, head.3.0.0.bn1.running_mean, head.3.0.0.bn1.running_var, head.3.0.0.bn1.num_batches_tracked, head.3.0.0.conv2.weight, head.3.0.0.bn2.weight, head.3.0.0.bn2.bias, head.3.0.0.bn2.running_mean, head.3.0.0.bn2.running_var, head.3.0.0.bn2.num_batches_tracked, head.3.0.0.conv3.weight, head.3.0.0.bn3.weight, head.3.0.0.bn3.bias, head.3.0.0.bn3.running_mean, head.3.0.0.bn3.running_var, head.3.0.0.bn3.num_batches_tracked, head.3.0.0.downsample.0.weight, head.3.0.0.downsample.1.weight, head.3.0.0.downsample.1.bias, head.3.0.0.downsample.1.running_mean, head.3.0.0.downsample.1.running_var, head.3.0.0.downsample.1.num_batches_tracked, fc.weight, fc.bias, stage4.2.fuse_layers.1.0.0.0.weight, stage4.2.fuse_layers.1.0.0.1.weight, stage4.2.fuse_layers.1.0.0.1.bias, stage4.2.fuse_layers.1.0.0.1.running_mean, stage4.2.fuse_layers.1.0.0.1.running_var, stage4.2.fuse_layers.1.0.0.1.num_batches_tracked, stage4.2.fuse_layers.1.2.0.weight, stage4.2.fuse_layers.1.2.1.weight, stage4.2.fuse_layers.1.2.1.bias, stage4.2.fuse_layers.1.2.1.running_mean, stage4.2.fuse_layers.1.2.1.running_var, stage4.2.fuse_layers.1.2.1.num_batches_tracked, stage4.2.fuse_layers.1.3.0.weight, stage4.2.fuse_layers.1.3.1.weight, stage4.2.fuse_layers.1.3.1.bias, stage4.2.fuse_layers.1.3.1.running_mean, stage4.2.fuse_layers.1.3.1.running_var, stage4.2.fuse_layers.1.3.1.num_batches_tracked, stage4.2.fuse_layers.2.0.0.0.weight, stage4.2.fuse_layers.2.0.0.1.weight, stage4.2.fuse_layers.2.0.0.1.bias, stage4.2.fuse_layers.2.0.0.1.running_mean, stage4.2.fuse_layers.2.0.0.1.running_var, stage4.2.fuse_layers.2.0.0.1.num_batches_tracked, stage4.2.fuse_layers.2.0.1.0.weight, stage4.2.fuse_layers.2.0.1.1.weight, stage4.2.fuse_layers.2.0.1.1.bias, stage4.2.fuse_layers.2.0.1.1.running_mean, stage4.2.fuse_layers.2.0.1.1.running_var, stage4.2.fuse_layers.2.0.1.1.num_batches_tracked, stage4.2.fuse_layers.2.1.0.0.weight, stage4.2.fuse_layers.2.1.0.1.weight, stage4.2.fuse_layers.2.1.0.1.bias, stage4.2.fuse_layers.2.1.0.1.running_mean, stage4.2.fuse_layers.2.1.0.1.running_var, stage4.2.fuse_layers.2.1.0.1.num_batches_tracked, stage4.2.fuse_layers.2.3.0.weight, stage4.2.fuse_layers.2.3.1.weight, stage4.2.fuse_layers.2.3.1.bias, stage4.2.fuse_layers.2.3.1.running_mean, stage4.2.fuse_layers.2.3.1.running_var, stage4.2.fuse_layers.2.3.1.num_batches_tracked, stage4.2.fuse_layers.3.0.0.0.weight, stage4.2.fuse_layers.3.0.0.1.weight, stage4.2.fuse_layers.3.0.0.1.bias, stage4.2.fuse_layers.3.0.0.1.running_mean, stage4.2.fuse_layers.3.0.0.1.running_var, stage4.2.fuse_layers.3.0.0.1.num_batches_tracked, stage4.2.fuse_layers.3.0.1.0.weight, stage4.2.fuse_layers.3.0.1.1.weight, stage4.2.fuse_layers.3.0.1.1.bias, stage4.2.fuse_layers.3.0.1.1.running_mean, stage4.2.fuse_layers.3.0.1.1.running_var, stage4.2.fuse_layers.3.0.1.1.num_batches_tracked, stage4.2.fuse_layers.3.0.2.0.weight, stage4.2.fuse_layers.3.0.2.1.weight, stage4.2.fuse_layers.3.0.2.1.bias, stage4.2.fuse_layers.3.0.2.1.running_mean, stage4.2.fuse_layers.3.0.2.1.running_var, stage4.2.fuse_layers.3.0.2.1.num_batches_tracked, stage4.2.fuse_layers.3.1.0.0.weight, stage4.2.fuse_layers.3.1.0.1.weight, stage4.2.fuse_layers.3.1.0.1.bias, stage4.2.fuse_layers.3.1.0.1.running_mean, stage4.2.fuse_layers.3.1.0.1.running_var, stage4.2.fuse_layers.3.1.0.1.num_batches_tracked, stage4.2.fuse_layers.3.1.1.0.weight, stage4.2.fuse_layers.3.1.1.1.weight, stage4.2.fuse_layers.3.1.1.1.bias, stage4.2.fuse_layers.3.1.1.1.running_mean, stage4.2.fuse_layers.3.1.1.1.running_var, stage4.2.fuse_layers.3.1.1.1.num_batches_tracked, stage4.2.fuse_layers.3.2.0.0.weight, stage4.2.fuse_layers.3.2.0.1.weight, stage4.2.fuse_layers.3.2.0.1.bias, stage4.2.fuse_layers.3.2.0.1.running_mean, stage4.2.fuse_layers.3.2.0.1.running_var, stage4.2.fuse_layers.3.2.0.1.num_batches_tracked\n", - "\n", - "09/15 12:42:09 - mmengine - \u001b[5m\u001b[4m\u001b[33mWARNING\u001b[0m - Failed to search registry with scope \"mmpose\" in the \"weight initializer\" registry tree. As a workaround, the current \"weight initializer\" registry in \"mmengine\" is used to build instance. This may cause unexpected failure when running the built modules. Please check whether \"mmpose\" is a correct scope, or whether the registry is initialized.\n", - "09/15 12:42:09 - mmengine - \u001b[5m\u001b[4m\u001b[33mWARNING\u001b[0m - Failed to search registry with scope \"mmpose\" in the \"weight initializer\" registry tree. As a workaround, the current \"weight initializer\" registry in \"mmengine\" is used to build instance. This may cause unexpected failure when running the built modules. Please check whether \"mmpose\" is a correct scope, or whether the registry is initialized.\n", - "09/15 12:42:09 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Checkpoints will be saved to /home/PJLAB/jiangtao/Documents/git-clone/mmpose/work_dirs/hrnet_w32_coco_tiny_256x192 by HardDiskBackend.\n", - "09/15 12:42:12 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", - "09/15 12:42:12 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 1 epochs\n", - "09/15 12:42:13 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", - "09/15 12:42:13 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [1][2/2] pck/PCK@0.05: 0.009035\n", - "09/15 12:42:14 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The best checkpoint with 0.0090 pck/PCK@0.05 at 1 epoch is saved to best_pck/PCK@0.05_epoch_1.pth.\n", - "09/15 12:42:16 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", - "09/15 12:42:16 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 2 epochs\n", - "09/15 12:42:17 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", - "09/15 12:42:17 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [2][2/2] pck/PCK@0.05: 0.163666\n", - "09/15 12:42:17 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The previous best checkpoint /home/PJLAB/jiangtao/Documents/git-clone/mmpose/work_dirs/hrnet_w32_coco_tiny_256x192/best_pck/PCK@0.05_epoch_1.pth is removed\n", - "09/15 12:42:17 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The best checkpoint with 0.1637 pck/PCK@0.05 at 2 epoch is saved to best_pck/PCK@0.05_epoch_2.pth.\n", - "09/15 12:42:19 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", - "09/15 12:42:19 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 3 epochs\n", - "09/15 12:42:21 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", - "09/15 12:42:21 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [3][2/2] pck/PCK@0.05: 0.201942\n", - "09/15 12:42:21 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The previous best checkpoint /home/PJLAB/jiangtao/Documents/git-clone/mmpose/work_dirs/hrnet_w32_coco_tiny_256x192/best_pck/PCK@0.05_epoch_2.pth is removed\n", - "09/15 12:42:21 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The best checkpoint with 0.2019 pck/PCK@0.05 at 3 epoch is saved to best_pck/PCK@0.05_epoch_3.pth.\n", - "09/15 12:42:23 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", - "09/15 12:42:23 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 4 epochs\n", - "09/15 12:42:24 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", - "09/15 12:42:24 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [4][2/2] pck/PCK@0.05: 0.247750\n", - "09/15 12:42:24 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The previous best checkpoint /home/PJLAB/jiangtao/Documents/git-clone/mmpose/work_dirs/hrnet_w32_coco_tiny_256x192/best_pck/PCK@0.05_epoch_3.pth is removed\n", - "09/15 12:42:25 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The best checkpoint with 0.2477 pck/PCK@0.05 at 4 epoch is saved to best_pck/PCK@0.05_epoch_4.pth.\n", - "09/15 12:42:27 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", - "09/15 12:42:27 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 5 epochs\n", - "09/15 12:42:28 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", - "09/15 12:42:28 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [5][2/2] pck/PCK@0.05: 0.296205\n", - "09/15 12:42:28 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The previous best checkpoint /home/PJLAB/jiangtao/Documents/git-clone/mmpose/work_dirs/hrnet_w32_coco_tiny_256x192/best_pck/PCK@0.05_epoch_4.pth is removed\n", - "09/15 12:42:29 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The best checkpoint with 0.2962 pck/PCK@0.05 at 5 epoch is saved to best_pck/PCK@0.05_epoch_5.pth.\n", - "09/15 12:42:31 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", - "09/15 12:42:31 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 6 epochs\n", - "09/15 12:42:32 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", - "09/15 12:42:32 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [6][2/2] pck/PCK@0.05: 0.316309\n", - "09/15 12:42:32 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The previous best checkpoint /home/PJLAB/jiangtao/Documents/git-clone/mmpose/work_dirs/hrnet_w32_coco_tiny_256x192/best_pck/PCK@0.05_epoch_5.pth is removed\n", - "09/15 12:42:33 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The best checkpoint with 0.3163 pck/PCK@0.05 at 6 epoch is saved to best_pck/PCK@0.05_epoch_6.pth.\n", - "09/15 12:42:35 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", - "09/15 12:42:35 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 7 epochs\n", - "09/15 12:42:36 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", - "09/15 12:42:36 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [7][2/2] pck/PCK@0.05: 0.290834\n", - "09/15 12:42:38 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", - "09/15 12:42:38 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 8 epochs\n", - "09/15 12:42:39 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", - "09/15 12:42:39 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [8][2/2] pck/PCK@0.05: 0.335645\n", - "09/15 12:42:39 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The previous best checkpoint /home/PJLAB/jiangtao/Documents/git-clone/mmpose/work_dirs/hrnet_w32_coco_tiny_256x192/best_pck/PCK@0.05_epoch_6.pth is removed\n", - "09/15 12:42:40 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The best checkpoint with 0.3356 pck/PCK@0.05 at 8 epoch is saved to best_pck/PCK@0.05_epoch_8.pth.\n", - "09/15 12:42:42 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", - "09/15 12:42:42 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 9 epochs\n", - "09/15 12:42:43 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", - "09/15 12:42:43 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [9][2/2] pck/PCK@0.05: 0.348761\n", - "09/15 12:42:43 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The previous best checkpoint /home/PJLAB/jiangtao/Documents/git-clone/mmpose/work_dirs/hrnet_w32_coco_tiny_256x192/best_pck/PCK@0.05_epoch_8.pth is removed\n", - "09/15 12:42:44 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The best checkpoint with 0.3488 pck/PCK@0.05 at 9 epoch is saved to best_pck/PCK@0.05_epoch_9.pth.\n", - "09/15 12:42:46 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", - "09/15 12:42:46 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 10 epochs\n", - "09/15 12:42:47 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", - "09/15 12:42:47 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [10][2/2] pck/PCK@0.05: 0.310204\n", - "09/15 12:42:49 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", - "09/15 12:42:49 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 11 epochs\n", - "09/15 12:42:50 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", - "09/15 12:42:50 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [11][2/2] pck/PCK@0.05: 0.338200\n", - "09/15 12:42:52 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", - "09/15 12:42:52 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 12 epochs\n", - "09/15 12:42:53 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", - "09/15 12:42:53 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [12][2/2] pck/PCK@0.05: 0.356559\n", - "09/15 12:42:53 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The previous best checkpoint /home/PJLAB/jiangtao/Documents/git-clone/mmpose/work_dirs/hrnet_w32_coco_tiny_256x192/best_pck/PCK@0.05_epoch_9.pth is removed\n", - "09/15 12:42:54 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The best checkpoint with 0.3566 pck/PCK@0.05 at 12 epoch is saved to best_pck/PCK@0.05_epoch_12.pth.\n", - "09/15 12:42:56 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", - "09/15 12:42:56 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 13 epochs\n", - "09/15 12:42:57 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", - "09/15 12:42:57 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [13][2/2] pck/PCK@0.05: 0.384718\n", - "09/15 12:42:57 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The previous best checkpoint /home/PJLAB/jiangtao/Documents/git-clone/mmpose/work_dirs/hrnet_w32_coco_tiny_256x192/best_pck/PCK@0.05_epoch_12.pth is removed\n", - "09/15 12:42:58 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The best checkpoint with 0.3847 pck/PCK@0.05 at 13 epoch is saved to best_pck/PCK@0.05_epoch_13.pth.\n", - "09/15 12:43:00 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", - "09/15 12:43:00 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 14 epochs\n", - "09/15 12:43:01 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", - "09/15 12:43:01 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [14][2/2] pck/PCK@0.05: 0.372036\n", - "09/15 12:43:03 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", - "09/15 12:43:03 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 15 epochs\n", - "09/15 12:43:04 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", - "09/15 12:43:04 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [15][2/2] pck/PCK@0.05: 0.331702\n", - "09/15 12:43:06 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", - "09/15 12:43:06 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 16 epochs\n", - "09/15 12:43:07 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", - "09/15 12:43:07 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [16][2/2] pck/PCK@0.05: 0.350346\n", - "09/15 12:43:09 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", - "09/15 12:43:09 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 17 epochs\n", - "09/15 12:43:10 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", - "09/15 12:43:10 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [17][2/2] pck/PCK@0.05: 0.358399\n", - "09/15 12:43:12 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", - "09/15 12:43:12 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 18 epochs\n", - "09/15 12:43:14 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", - "09/15 12:43:14 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [18][2/2] pck/PCK@0.05: 0.377378\n", - "09/15 12:43:15 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", - "09/15 12:43:15 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 19 epochs\n", - "09/15 12:43:17 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", - "09/15 12:43:17 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [19][2/2] pck/PCK@0.05: 0.392675\n", - "09/15 12:43:17 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The previous best checkpoint /home/PJLAB/jiangtao/Documents/git-clone/mmpose/work_dirs/hrnet_w32_coco_tiny_256x192/best_pck/PCK@0.05_epoch_13.pth is removed\n", - "09/15 12:43:17 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The best checkpoint with 0.3927 pck/PCK@0.05 at 19 epoch is saved to best_pck/PCK@0.05_epoch_19.pth.\n", - "09/15 12:43:19 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", - "09/15 12:43:19 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 20 epochs\n", - "09/15 12:43:21 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", - "09/15 12:43:21 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [20][2/2] pck/PCK@0.05: 0.413536\n", - "09/15 12:43:21 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The previous best checkpoint /home/PJLAB/jiangtao/Documents/git-clone/mmpose/work_dirs/hrnet_w32_coco_tiny_256x192/best_pck/PCK@0.05_epoch_19.pth is removed\n", - "09/15 12:43:21 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The best checkpoint with 0.4135 pck/PCK@0.05 at 20 epoch is saved to best_pck/PCK@0.05_epoch_20.pth.\n", - "09/15 12:43:23 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", - "09/15 12:43:23 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 21 epochs\n", - "09/15 12:43:24 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", - "09/15 12:43:24 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [21][2/2] pck/PCK@0.05: 0.422105\n", - "09/15 12:43:24 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The previous best checkpoint /home/PJLAB/jiangtao/Documents/git-clone/mmpose/work_dirs/hrnet_w32_coco_tiny_256x192/best_pck/PCK@0.05_epoch_20.pth is removed\n", - "09/15 12:43:25 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The best checkpoint with 0.4221 pck/PCK@0.05 at 21 epoch is saved to best_pck/PCK@0.05_epoch_21.pth.\n", - "09/15 12:43:27 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", - "09/15 12:43:27 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 22 epochs\n", - "09/15 12:43:28 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", - "09/15 12:43:28 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [22][2/2] pck/PCK@0.05: 0.430300\n", - "09/15 12:43:28 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The previous best checkpoint /home/PJLAB/jiangtao/Documents/git-clone/mmpose/work_dirs/hrnet_w32_coco_tiny_256x192/best_pck/PCK@0.05_epoch_21.pth is removed\n", - "09/15 12:43:29 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The best checkpoint with 0.4303 pck/PCK@0.05 at 22 epoch is saved to best_pck/PCK@0.05_epoch_22.pth.\n", - "09/15 12:43:31 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", - "09/15 12:43:31 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 23 epochs\n", - "09/15 12:43:32 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", - "09/15 12:43:32 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [23][2/2] pck/PCK@0.05: 0.440251\n", - "09/15 12:43:32 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The previous best checkpoint /home/PJLAB/jiangtao/Documents/git-clone/mmpose/work_dirs/hrnet_w32_coco_tiny_256x192/best_pck/PCK@0.05_epoch_22.pth is removed\n", - "09/15 12:43:33 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The best checkpoint with 0.4403 pck/PCK@0.05 at 23 epoch is saved to best_pck/PCK@0.05_epoch_23.pth.\n", - "09/15 12:43:34 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", - "09/15 12:43:34 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 24 epochs\n", - "09/15 12:43:36 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", - "09/15 12:43:36 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [24][2/2] pck/PCK@0.05: 0.433262\n", - "09/15 12:43:38 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", - "09/15 12:43:38 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 25 epochs\n", - "09/15 12:43:39 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", - "09/15 12:43:39 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [25][2/2] pck/PCK@0.05: 0.429440\n", - "09/15 12:43:41 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", - "09/15 12:43:41 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 26 epochs\n", - "09/15 12:43:42 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", - "09/15 12:43:42 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [26][2/2] pck/PCK@0.05: 0.423034\n", - "09/15 12:43:44 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", - "09/15 12:43:44 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 27 epochs\n", - "09/15 12:43:45 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", - "09/15 12:43:45 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [27][2/2] pck/PCK@0.05: 0.440554\n", - "09/15 12:43:45 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The previous best checkpoint /home/PJLAB/jiangtao/Documents/git-clone/mmpose/work_dirs/hrnet_w32_coco_tiny_256x192/best_pck/PCK@0.05_epoch_23.pth is removed\n", - "09/15 12:43:46 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The best checkpoint with 0.4406 pck/PCK@0.05 at 27 epoch is saved to best_pck/PCK@0.05_epoch_27.pth.\n", - "09/15 12:43:48 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", - "09/15 12:43:48 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 28 epochs\n", - "09/15 12:43:49 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", - "09/15 12:43:49 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [28][2/2] pck/PCK@0.05: 0.454103\n", - "09/15 12:43:49 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The previous best checkpoint /home/PJLAB/jiangtao/Documents/git-clone/mmpose/work_dirs/hrnet_w32_coco_tiny_256x192/best_pck/PCK@0.05_epoch_27.pth is removed\n", - "09/15 12:43:50 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The best checkpoint with 0.4541 pck/PCK@0.05 at 28 epoch is saved to best_pck/PCK@0.05_epoch_28.pth.\n", - "09/15 12:43:52 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", - "09/15 12:43:52 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 29 epochs\n", - "09/15 12:43:53 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", - "09/15 12:43:53 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [29][2/2] pck/PCK@0.05: 0.434462\n", - "09/15 12:43:55 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", - "09/15 12:43:55 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 30 epochs\n", - "09/15 12:43:56 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", - "09/15 12:43:56 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [30][2/2] pck/PCK@0.05: 0.434963\n", - "09/15 12:43:58 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", - "09/15 12:43:58 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 31 epochs\n", - "09/15 12:43:59 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", - "09/15 12:43:59 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [31][2/2] pck/PCK@0.05: 0.445667\n", - "09/15 12:44:01 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", - "09/15 12:44:01 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 32 epochs\n", - "09/15 12:44:03 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", - "09/15 12:44:03 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [32][2/2] pck/PCK@0.05: 0.445784\n", - "09/15 12:44:04 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", - "09/15 12:44:04 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 33 epochs\n", - "09/15 12:44:06 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", - "09/15 12:44:06 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [33][2/2] pck/PCK@0.05: 0.434502\n", - "09/15 12:44:08 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", - "09/15 12:44:08 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 34 epochs\n", - "09/15 12:44:09 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", - "09/15 12:44:09 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [34][2/2] pck/PCK@0.05: 0.435661\n", - "09/15 12:44:11 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", - "09/15 12:44:11 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 35 epochs\n", - "09/15 12:44:12 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", - "09/15 12:44:12 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [35][2/2] pck/PCK@0.05: 0.425407\n", - "09/15 12:44:14 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", - "09/15 12:44:14 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 36 epochs\n", - "09/15 12:44:15 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", - "09/15 12:44:15 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [36][2/2] pck/PCK@0.05: 0.428712\n", - "09/15 12:44:17 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", - "09/15 12:44:17 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 37 epochs\n", - "09/15 12:44:18 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", - "09/15 12:44:18 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [37][2/2] pck/PCK@0.05: 0.423183\n", - "09/15 12:44:20 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", - "09/15 12:44:20 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 38 epochs\n", - "09/15 12:44:22 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", - "09/15 12:44:22 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [38][2/2] pck/PCK@0.05: 0.432350\n", - "09/15 12:44:23 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", - "09/15 12:44:23 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 39 epochs\n", - "09/15 12:44:25 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", - "09/15 12:44:25 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [39][2/2] pck/PCK@0.05: 0.423967\n", - "09/15 12:44:27 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", - "09/15 12:44:27 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 40 epochs\n", - "09/15 12:44:28 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", - "09/15 12:44:28 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [40][2/2] pck/PCK@0.05: 0.429198\n" - ] - }, - { - "data": { - "text/plain": [ - "TopdownPoseEstimator(\n", - " (data_preprocessor): PoseDataPreprocessor()\n", - " (backbone): HRNet(\n", - " (conv1): Conv2d(3, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " (layer1): Sequential(\n", - " (0): Bottleneck(\n", - " (conv1): Conv2d(64, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " (downsample): Sequential(\n", - " (0): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " )\n", - " )\n", - " (1): Bottleneck(\n", - " (conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (2): Bottleneck(\n", - " (conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (3): Bottleneck(\n", - " (conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " )\n", - " (transition1): ModuleList(\n", - " (0): Sequential(\n", - " (0): Conv2d(256, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (2): ReLU(inplace=True)\n", - " )\n", - " (1): Sequential(\n", - " (0): Sequential(\n", - " (0): Conv2d(256, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", - " (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (2): ReLU(inplace=True)\n", - " )\n", - " )\n", - " )\n", - " (stage2): Sequential(\n", - " (0): HRModule(\n", - " (branches): ModuleList(\n", - " (0): Sequential(\n", - " (0): BasicBlock(\n", - " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (1): BasicBlock(\n", - " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (2): BasicBlock(\n", - " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (3): BasicBlock(\n", - " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " )\n", - " (1): Sequential(\n", - " (0): BasicBlock(\n", - " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (1): BasicBlock(\n", - " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (2): BasicBlock(\n", - " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (3): BasicBlock(\n", - " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " )\n", - " )\n", - " (fuse_layers): ModuleList(\n", - " (0): ModuleList(\n", - " (0): None\n", - " (1): Sequential(\n", - " (0): Conv2d(64, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (2): Upsample(scale_factor=2.0, mode=nearest)\n", - " )\n", - " )\n", - " (1): ModuleList(\n", - " (0): Sequential(\n", - " (0): Sequential(\n", - " (0): Conv2d(32, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", - " (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " )\n", - " )\n", - " (1): None\n", - " )\n", - " )\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " )\n", - " (transition2): ModuleList(\n", - " (0): None\n", - " (1): None\n", - " (2): Sequential(\n", - " (0): Sequential(\n", - " (0): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", - " (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (2): ReLU(inplace=True)\n", - " )\n", - " )\n", - " )\n", - " (stage3): Sequential(\n", - " (0): HRModule(\n", - " (branches): ModuleList(\n", - " (0): Sequential(\n", - " (0): BasicBlock(\n", - " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (1): BasicBlock(\n", - " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (2): BasicBlock(\n", - " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (3): BasicBlock(\n", - " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " )\n", - " (1): Sequential(\n", - " (0): BasicBlock(\n", - " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (1): BasicBlock(\n", - " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (2): BasicBlock(\n", - " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (3): BasicBlock(\n", - " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " )\n", - " (2): Sequential(\n", - " (0): BasicBlock(\n", - " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (1): BasicBlock(\n", - " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (2): BasicBlock(\n", - " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (3): BasicBlock(\n", - " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " )\n", - " )\n", - " (fuse_layers): ModuleList(\n", - " (0): ModuleList(\n", - " (0): None\n", - " (1): Sequential(\n", - " (0): Conv2d(64, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (2): Upsample(scale_factor=2.0, mode=nearest)\n", - " )\n", - " (2): Sequential(\n", - " (0): Conv2d(128, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (2): Upsample(scale_factor=4.0, mode=nearest)\n", - " )\n", - " )\n", - " (1): ModuleList(\n", - " (0): Sequential(\n", - " (0): Sequential(\n", - " (0): Conv2d(32, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", - " (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " )\n", - " )\n", - " (1): None\n", - " (2): Sequential(\n", - " (0): Conv2d(128, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (2): Upsample(scale_factor=2.0, mode=nearest)\n", - " )\n", - " )\n", - " (2): ModuleList(\n", - " (0): Sequential(\n", - " (0): Sequential(\n", - " (0): Conv2d(32, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", - " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (2): ReLU(inplace=True)\n", - " )\n", - " (1): Sequential(\n", - " (0): Conv2d(32, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", - " (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " )\n", - " )\n", - " (1): Sequential(\n", - " (0): Sequential(\n", - " (0): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", - " (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " )\n", - " )\n", - " (2): None\n", - " )\n", - " )\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (1): HRModule(\n", - " (branches): ModuleList(\n", - " (0): Sequential(\n", - " (0): BasicBlock(\n", - " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (1): BasicBlock(\n", - " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (2): BasicBlock(\n", - " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (3): BasicBlock(\n", - " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " )\n", - " (1): Sequential(\n", - " (0): BasicBlock(\n", - " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (1): BasicBlock(\n", - " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (2): BasicBlock(\n", - " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (3): BasicBlock(\n", - " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " )\n", - " (2): Sequential(\n", - " (0): BasicBlock(\n", - " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (1): BasicBlock(\n", - " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (2): BasicBlock(\n", - " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (3): BasicBlock(\n", - " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " )\n", - " )\n", - " (fuse_layers): ModuleList(\n", - " (0): ModuleList(\n", - " (0): None\n", - " (1): Sequential(\n", - " (0): Conv2d(64, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (2): Upsample(scale_factor=2.0, mode=nearest)\n", - " )\n", - " (2): Sequential(\n", - " (0): Conv2d(128, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (2): Upsample(scale_factor=4.0, mode=nearest)\n", - " )\n", - " )\n", - " (1): ModuleList(\n", - " (0): Sequential(\n", - " (0): Sequential(\n", - " (0): Conv2d(32, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", - " (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " )\n", - " )\n", - " (1): None\n", - " (2): Sequential(\n", - " (0): Conv2d(128, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (2): Upsample(scale_factor=2.0, mode=nearest)\n", - " )\n", - " )\n", - " (2): ModuleList(\n", - " (0): Sequential(\n", - " (0): Sequential(\n", - " (0): Conv2d(32, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", - " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (2): ReLU(inplace=True)\n", - " )\n", - " (1): Sequential(\n", - " (0): Conv2d(32, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", - " (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " )\n", - " )\n", - " (1): Sequential(\n", - " (0): Sequential(\n", - " (0): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", - " (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " )\n", - " )\n", - " (2): None\n", - " )\n", - " )\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (2): HRModule(\n", - " (branches): ModuleList(\n", - " (0): Sequential(\n", - " (0): BasicBlock(\n", - " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (1): BasicBlock(\n", - " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (2): BasicBlock(\n", - " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (3): BasicBlock(\n", - " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " )\n", - " (1): Sequential(\n", - " (0): BasicBlock(\n", - " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (1): BasicBlock(\n", - " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (2): BasicBlock(\n", - " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (3): BasicBlock(\n", - " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " )\n", - " (2): Sequential(\n", - " (0): BasicBlock(\n", - " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (1): BasicBlock(\n", - " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (2): BasicBlock(\n", - " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (3): BasicBlock(\n", - " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " )\n", - " )\n", - " (fuse_layers): ModuleList(\n", - " (0): ModuleList(\n", - " (0): None\n", - " (1): Sequential(\n", - " (0): Conv2d(64, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (2): Upsample(scale_factor=2.0, mode=nearest)\n", - " )\n", - " (2): Sequential(\n", - " (0): Conv2d(128, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (2): Upsample(scale_factor=4.0, mode=nearest)\n", - " )\n", - " )\n", - " (1): ModuleList(\n", - " (0): Sequential(\n", - " (0): Sequential(\n", - " (0): Conv2d(32, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", - " (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " )\n", - " )\n", - " (1): None\n", - " (2): Sequential(\n", - " (0): Conv2d(128, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (2): Upsample(scale_factor=2.0, mode=nearest)\n", - " )\n", - " )\n", - " (2): ModuleList(\n", - " (0): Sequential(\n", - " (0): Sequential(\n", - " (0): Conv2d(32, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", - " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (2): ReLU(inplace=True)\n", - " )\n", - " (1): Sequential(\n", - " (0): Conv2d(32, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", - " (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " )\n", - " )\n", - " (1): Sequential(\n", - " (0): Sequential(\n", - " (0): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", - " (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " )\n", - " )\n", - " (2): None\n", - " )\n", - " )\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (3): HRModule(\n", - " (branches): ModuleList(\n", - " (0): Sequential(\n", - " (0): BasicBlock(\n", - " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (1): BasicBlock(\n", - " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (2): BasicBlock(\n", - " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (3): BasicBlock(\n", - " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " )\n", - " (1): Sequential(\n", - " (0): BasicBlock(\n", - " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (1): BasicBlock(\n", - " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (2): BasicBlock(\n", - " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (3): BasicBlock(\n", - " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " )\n", - " (2): Sequential(\n", - " (0): BasicBlock(\n", - " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (1): BasicBlock(\n", - " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (2): BasicBlock(\n", - " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (3): BasicBlock(\n", - " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " )\n", - " )\n", - " (fuse_layers): ModuleList(\n", - " (0): ModuleList(\n", - " (0): None\n", - " (1): Sequential(\n", - " (0): Conv2d(64, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (2): Upsample(scale_factor=2.0, mode=nearest)\n", - " )\n", - " (2): Sequential(\n", - " (0): Conv2d(128, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (2): Upsample(scale_factor=4.0, mode=nearest)\n", - " )\n", - " )\n", - " (1): ModuleList(\n", - " (0): Sequential(\n", - " (0): Sequential(\n", - " (0): Conv2d(32, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", - " (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " )\n", - " )\n", - " (1): None\n", - " (2): Sequential(\n", - " (0): Conv2d(128, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (2): Upsample(scale_factor=2.0, mode=nearest)\n", - " )\n", - " )\n", - " (2): ModuleList(\n", - " (0): Sequential(\n", - " (0): Sequential(\n", - " (0): Conv2d(32, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", - " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (2): ReLU(inplace=True)\n", - " )\n", - " (1): Sequential(\n", - " (0): Conv2d(32, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", - " (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " )\n", - " )\n", - " (1): Sequential(\n", - " (0): Sequential(\n", - " (0): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", - " (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " )\n", - " )\n", - " (2): None\n", - " )\n", - " )\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " )\n", - " (transition3): ModuleList(\n", - " (0): None\n", - " (1): None\n", - " (2): None\n", - " (3): Sequential(\n", - " (0): Sequential(\n", - " (0): Conv2d(128, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", - " (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (2): ReLU(inplace=True)\n", - " )\n", - " )\n", - " )\n", - " (stage4): Sequential(\n", - " (0): HRModule(\n", - " (branches): ModuleList(\n", - " (0): Sequential(\n", - " (0): BasicBlock(\n", - " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (1): BasicBlock(\n", - " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (2): BasicBlock(\n", - " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (3): BasicBlock(\n", - " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " )\n", - " (1): Sequential(\n", - " (0): BasicBlock(\n", - " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (1): BasicBlock(\n", - " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (2): BasicBlock(\n", - " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (3): BasicBlock(\n", - " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " )\n", - " (2): Sequential(\n", - " (0): BasicBlock(\n", - " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (1): BasicBlock(\n", - " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (2): BasicBlock(\n", - " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (3): BasicBlock(\n", - " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " )\n", - " (3): Sequential(\n", - " (0): BasicBlock(\n", - " (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (1): BasicBlock(\n", - " (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (2): BasicBlock(\n", - " (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (3): BasicBlock(\n", - " (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " )\n", - " )\n", - " (fuse_layers): ModuleList(\n", - " (0): ModuleList(\n", - " (0): None\n", - " (1): Sequential(\n", - " (0): Conv2d(64, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (2): Upsample(scale_factor=2.0, mode=nearest)\n", - " )\n", - " (2): Sequential(\n", - " (0): Conv2d(128, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (2): Upsample(scale_factor=4.0, mode=nearest)\n", - " )\n", - " (3): Sequential(\n", - " (0): Conv2d(256, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (2): Upsample(scale_factor=8.0, mode=nearest)\n", - " )\n", - " )\n", - " (1): ModuleList(\n", - " (0): Sequential(\n", - " (0): Sequential(\n", - " (0): Conv2d(32, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", - " (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " )\n", - " )\n", - " (1): None\n", - " (2): Sequential(\n", - " (0): Conv2d(128, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (2): Upsample(scale_factor=2.0, mode=nearest)\n", - " )\n", - " (3): Sequential(\n", - " (0): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (2): Upsample(scale_factor=4.0, mode=nearest)\n", - " )\n", - " )\n", - " (2): ModuleList(\n", - " (0): Sequential(\n", - " (0): Sequential(\n", - " (0): Conv2d(32, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", - " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (2): ReLU(inplace=True)\n", - " )\n", - " (1): Sequential(\n", - " (0): Conv2d(32, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", - " (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " )\n", - " )\n", - " (1): Sequential(\n", - " (0): Sequential(\n", - " (0): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", - " (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " )\n", - " )\n", - " (2): None\n", - " (3): Sequential(\n", - " (0): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (2): Upsample(scale_factor=2.0, mode=nearest)\n", - " )\n", - " )\n", - " (3): ModuleList(\n", - " (0): Sequential(\n", - " (0): Sequential(\n", - " (0): Conv2d(32, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", - " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (2): ReLU(inplace=True)\n", - " )\n", - " (1): Sequential(\n", - " (0): Conv2d(32, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", - " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (2): ReLU(inplace=True)\n", - " )\n", - " (2): Sequential(\n", - " (0): Conv2d(32, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", - " (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " )\n", - " )\n", - " (1): Sequential(\n", - " (0): Sequential(\n", - " (0): Conv2d(64, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", - " (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (2): ReLU(inplace=True)\n", - " )\n", - " (1): Sequential(\n", - " (0): Conv2d(64, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", - " (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " )\n", - " )\n", - " (2): Sequential(\n", - " (0): Sequential(\n", - " (0): Conv2d(128, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", - " (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " )\n", - " )\n", - " (3): None\n", - " )\n", - " )\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (1): HRModule(\n", - " (branches): ModuleList(\n", - " (0): Sequential(\n", - " (0): BasicBlock(\n", - " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (1): BasicBlock(\n", - " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (2): BasicBlock(\n", - " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (3): BasicBlock(\n", - " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " )\n", - " (1): Sequential(\n", - " (0): BasicBlock(\n", - " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (1): BasicBlock(\n", - " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (2): BasicBlock(\n", - " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (3): BasicBlock(\n", - " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " )\n", - " (2): Sequential(\n", - " (0): BasicBlock(\n", - " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (1): BasicBlock(\n", - " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (2): BasicBlock(\n", - " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (3): BasicBlock(\n", - " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " )\n", - " (3): Sequential(\n", - " (0): BasicBlock(\n", - " (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (1): BasicBlock(\n", - " (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (2): BasicBlock(\n", - " (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (3): BasicBlock(\n", - " (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " )\n", - " )\n", - " (fuse_layers): ModuleList(\n", - " (0): ModuleList(\n", - " (0): None\n", - " (1): Sequential(\n", - " (0): Conv2d(64, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (2): Upsample(scale_factor=2.0, mode=nearest)\n", - " )\n", - " (2): Sequential(\n", - " (0): Conv2d(128, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (2): Upsample(scale_factor=4.0, mode=nearest)\n", - " )\n", - " (3): Sequential(\n", - " (0): Conv2d(256, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (2): Upsample(scale_factor=8.0, mode=nearest)\n", - " )\n", - " )\n", - " (1): ModuleList(\n", - " (0): Sequential(\n", - " (0): Sequential(\n", - " (0): Conv2d(32, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", - " (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " )\n", - " )\n", - " (1): None\n", - " (2): Sequential(\n", - " (0): Conv2d(128, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (2): Upsample(scale_factor=2.0, mode=nearest)\n", - " )\n", - " (3): Sequential(\n", - " (0): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (2): Upsample(scale_factor=4.0, mode=nearest)\n", - " )\n", - " )\n", - " (2): ModuleList(\n", - " (0): Sequential(\n", - " (0): Sequential(\n", - " (0): Conv2d(32, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", - " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (2): ReLU(inplace=True)\n", - " )\n", - " (1): Sequential(\n", - " (0): Conv2d(32, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", - " (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " )\n", - " )\n", - " (1): Sequential(\n", - " (0): Sequential(\n", - " (0): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", - " (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " )\n", - " )\n", - " (2): None\n", - " (3): Sequential(\n", - " (0): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (2): Upsample(scale_factor=2.0, mode=nearest)\n", - " )\n", - " )\n", - " (3): ModuleList(\n", - " (0): Sequential(\n", - " (0): Sequential(\n", - " (0): Conv2d(32, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", - " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (2): ReLU(inplace=True)\n", - " )\n", - " (1): Sequential(\n", - " (0): Conv2d(32, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", - " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (2): ReLU(inplace=True)\n", - " )\n", - " (2): Sequential(\n", - " (0): Conv2d(32, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", - " (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " )\n", - " )\n", - " (1): Sequential(\n", - " (0): Sequential(\n", - " (0): Conv2d(64, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", - " (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (2): ReLU(inplace=True)\n", - " )\n", - " (1): Sequential(\n", - " (0): Conv2d(64, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", - " (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " )\n", - " )\n", - " (2): Sequential(\n", - " (0): Sequential(\n", - " (0): Conv2d(128, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", - " (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " )\n", - " )\n", - " (3): None\n", - " )\n", - " )\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (2): HRModule(\n", - " (branches): ModuleList(\n", - " (0): Sequential(\n", - " (0): BasicBlock(\n", - " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (1): BasicBlock(\n", - " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (2): BasicBlock(\n", - " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (3): BasicBlock(\n", - " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " )\n", - " (1): Sequential(\n", - " (0): BasicBlock(\n", - " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (1): BasicBlock(\n", - " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (2): BasicBlock(\n", - " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (3): BasicBlock(\n", - " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " )\n", - " (2): Sequential(\n", - " (0): BasicBlock(\n", - " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (1): BasicBlock(\n", - " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (2): BasicBlock(\n", - " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (3): BasicBlock(\n", - " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " )\n", - " (3): Sequential(\n", - " (0): BasicBlock(\n", - " (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (1): BasicBlock(\n", - " (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (2): BasicBlock(\n", - " (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (3): BasicBlock(\n", - " (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " )\n", - " )\n", - " (fuse_layers): ModuleList(\n", - " (0): ModuleList(\n", - " (0): None\n", - " (1): Sequential(\n", - " (0): Conv2d(64, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (2): Upsample(scale_factor=2.0, mode=nearest)\n", - " )\n", - " (2): Sequential(\n", - " (0): Conv2d(128, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (2): Upsample(scale_factor=4.0, mode=nearest)\n", - " )\n", - " (3): Sequential(\n", - " (0): Conv2d(256, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (2): Upsample(scale_factor=8.0, mode=nearest)\n", - " )\n", - " )\n", - " )\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " )\n", - " )\n", - " init_cfg={'type': 'Pretrained', 'checkpoint': 'https://download.openmmlab.com/mmpose/pretrain_models/hrnet_w32-36af842e.pth'}\n", - " (head): HeatmapHead(\n", - " (loss_module): KeypointMSELoss(\n", - " (criterion): MSELoss()\n", - " )\n", - " (deconv_layers): Identity()\n", - " (conv_layers): Identity()\n", - " (final_layer): Conv2d(32, 17, kernel_size=(1, 1), stride=(1, 1))\n", - " )\n", - " init_cfg=[{'type': 'Normal', 'layer': ['Conv2d', 'ConvTranspose2d'], 'std': 0.001}, {'type': 'Constant', 'layer': 'BatchNorm2d', 'val': 1}]\n", - ")" - ] - }, - "execution_count": 7, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "from mmengine.config import Config, DictAction\n", - "from mmengine.runner import Runner\n", - "\n", - "# set preprocess configs to model\n", - "cfg.model.setdefault('data_preprocessor', cfg.get('preprocess_cfg', {}))\n", - "\n", - "# build the runner from config\n", - "runner = Runner.from_cfg(cfg)\n", - "\n", - "# start training\n", - "runner.train()" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": { - "id": "sdLwcaojhE2T" - }, - "source": [ - "#### Note\n", - "The recommended best practice is to convert your customized data into COCO format." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "zJyteZNGqwNk" - }, - "outputs": [], - "source": [] - } - ], - "metadata": { - "accelerator": "GPU", - "colab": { - "provenance": [] - }, - "gpuClass": "standard", - "kernelspec": { - "display_name": "dev2.0", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.5" - }, - "vscode": { - "interpreter": { - "hash": "383ba00087b5a9caebf3648b758a31e474cc01be975489b58f119fa4bc17e1f8" - } - }, - "widgets": { - "application/vnd.jupyter.widget-state+json": { - "08e0412b8dd54d28a26c232e75ea6088": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "FloatProgressModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "FloatProgressModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "ProgressView", - "bar_style": "success", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_d2ee56f920a245d9875de8e37596a5c8", - "max": 132594821, - "min": 0, - "orientation": "horizontal", - "style": "IPY_MODEL_b5f8c86d48a04afa997fc137e1acd716", - "value": 132594821 - } - }, - "1c1b09d91dec4e3dadefe953daf50745": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "2a079d9c0b9845318e6c612ca9601b86": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "HBoxModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HBoxModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HBoxView", - "box_style": "", - "children": [ - "IPY_MODEL_3554753622334094961a47daf9362c59", - "IPY_MODEL_08e0412b8dd54d28a26c232e75ea6088", - "IPY_MODEL_558a9420b0b34be2a2ca8a8b8af9cbfc" - ], - "layout": "IPY_MODEL_a9bd3e477f07449788f0e95e3cd13ddc" - } - }, - "3554753622334094961a47daf9362c59": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HTMLView", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_5b2ee1f3e78d4cd993009d04baf76b24", - "placeholder": "​", - "style": "IPY_MODEL_a3e5aa31c3f644b5a677ec49fe2e0832", - "value": "100%" - } - }, - "558a9420b0b34be2a2ca8a8b8af9cbfc": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HTMLView", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_1c1b09d91dec4e3dadefe953daf50745", - "placeholder": "​", - "style": "IPY_MODEL_6af448aebdb744b98a2807f66b1d6e5d", - "value": " 126M/126M [00:14<00:00, 9.32MB/s]" - } - }, - "5b2ee1f3e78d4cd993009d04baf76b24": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "6af448aebdb744b98a2807f66b1d6e5d": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "DescriptionStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "description_width": "" - } - }, - "a3e5aa31c3f644b5a677ec49fe2e0832": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "DescriptionStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "description_width": "" - } - }, - "a9bd3e477f07449788f0e95e3cd13ddc": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "b5f8c86d48a04afa997fc137e1acd716": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "ProgressStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "ProgressStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "bar_color": null, - "description_width": "" - } - }, - "d2ee56f920a245d9875de8e37596a5c8": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - } - } - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} +{ + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "F77yOqgkX8p4" + }, + "source": [ + "\"Open" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "8xX3YewOtqV0" + }, + "source": [ + "# MMPose Tutorial\n", + "\n", + "Welcome to MMPose colab tutorial! In this tutorial, we will show you how to\n", + "\n", + "- install MMPose 1.x\n", + "- perform inference with an MMPose model\n", + "- train a new mmpose model with your own datasets\n", + "\n", + "Let's start!" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "bkw-kUD8t3t8" + }, + "source": [ + "## Install MMPose\n", + "\n", + "We recommend to use a conda environment to install mmpose and its dependencies. And compilers `nvcc` and `gcc` are required." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "0f_Ebb2otWtd", + "outputId": "8c16b8ae-b927-41d5-c49e-d61ba6798a2d" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "nvcc: NVIDIA (R) Cuda compiler driver\n", + "Copyright (c) 2005-2022 NVIDIA Corporation\n", + "Built on Wed_Sep_21_10:33:58_PDT_2022\n", + "Cuda compilation tools, release 11.8, V11.8.89\n", + "Build cuda_11.8.r11.8/compiler.31833905_0\n", + "gcc (Ubuntu 9.4.0-1ubuntu1~20.04.1) 9.4.0\n", + "Copyright (C) 2019 Free Software Foundation, Inc.\n", + "This is free software; see the source for copying conditions. There is NO\n", + "warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n", + "\n", + "/usr/local/bin/python\n" + ] + } + ], + "source": [ + "# check NVCC version\n", + "!nvcc -V\n", + "\n", + "# check GCC version\n", + "!gcc --version\n", + "\n", + "# check python in conda environment\n", + "!which python" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "igSm4jhihE2M", + "outputId": "0d521640-a4d7-4264-889c-df862e9c332f" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Looking in indexes: https://download.pytorch.org/whl/cu118, https://us-python.pkg.dev/colab-wheels/public/simple/\n", + "Requirement already satisfied: torch in /usr/local/lib/python3.9/dist-packages (2.0.0+cu118)\n", + "Requirement already satisfied: torchvision in /usr/local/lib/python3.9/dist-packages (0.15.1+cu118)\n", + "Requirement already satisfied: torchaudio in /usr/local/lib/python3.9/dist-packages (2.0.1+cu118)\n", + "Requirement already satisfied: networkx in /usr/local/lib/python3.9/dist-packages (from torch) (3.1)\n", + "Requirement already satisfied: filelock in /usr/local/lib/python3.9/dist-packages (from torch) (3.11.0)\n", + "Requirement already satisfied: sympy in /usr/local/lib/python3.9/dist-packages (from torch) (1.11.1)\n", + "Requirement already satisfied: triton==2.0.0 in /usr/local/lib/python3.9/dist-packages (from torch) (2.0.0)\n", + "Requirement already satisfied: jinja2 in /usr/local/lib/python3.9/dist-packages (from torch) (3.1.2)\n", + "Requirement already satisfied: typing-extensions in /usr/local/lib/python3.9/dist-packages (from torch) (4.5.0)\n", + "Requirement already satisfied: cmake in /usr/local/lib/python3.9/dist-packages (from triton==2.0.0->torch) (3.25.2)\n", + "Requirement already satisfied: lit in /usr/local/lib/python3.9/dist-packages (from triton==2.0.0->torch) (16.0.1)\n", + "Requirement already satisfied: numpy in /usr/local/lib/python3.9/dist-packages (from torchvision) (1.22.4)\n", + "Requirement already satisfied: requests in /usr/local/lib/python3.9/dist-packages (from torchvision) (2.27.1)\n", + "Requirement already satisfied: pillow!=8.3.*,>=5.3.0 in /usr/local/lib/python3.9/dist-packages (from torchvision) (8.4.0)\n", + "Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.9/dist-packages (from jinja2->torch) (2.1.2)\n", + "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.9/dist-packages (from requests->torchvision) (3.4)\n", + "Requirement already satisfied: urllib3<1.27,>=1.21.1 in /usr/local/lib/python3.9/dist-packages (from requests->torchvision) (1.26.15)\n", + "Requirement already satisfied: charset-normalizer~=2.0.0 in /usr/local/lib/python3.9/dist-packages (from requests->torchvision) (2.0.12)\n", + "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.9/dist-packages (from requests->torchvision) (2022.12.7)\n", + "Requirement already satisfied: mpmath>=0.19 in /usr/local/lib/python3.9/dist-packages (from sympy->torch) (1.3.0)\n" + ] + } + ], + "source": [ + "# install dependencies: (if your colab has CUDA 11.8)\n", + "%pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "MLcoZr3ot9iw", + "outputId": "70e5d18e-746c-41a3-a761-6303b79eaf02" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n", + "Collecting openmim\n", + " Downloading openmim-0.3.7-py2.py3-none-any.whl (51 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m51.3/51.3 kB\u001b[0m \u001b[31m1.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hRequirement already satisfied: tabulate in /usr/local/lib/python3.9/dist-packages (from openmim) (0.8.10)\n", + "Requirement already satisfied: rich in /usr/local/lib/python3.9/dist-packages (from openmim) (13.3.3)\n", + "Requirement already satisfied: pip>=19.3 in /usr/local/lib/python3.9/dist-packages (from openmim) (23.0.1)\n", + "Collecting colorama\n", + " Downloading colorama-0.4.6-py2.py3-none-any.whl (25 kB)\n", + "Collecting model-index\n", + " Downloading model_index-0.1.11-py3-none-any.whl (34 kB)\n", + "Requirement already satisfied: pandas in /usr/local/lib/python3.9/dist-packages (from openmim) (1.5.3)\n", + "Requirement already satisfied: requests in /usr/local/lib/python3.9/dist-packages (from openmim) (2.27.1)\n", + "Requirement already satisfied: Click in /usr/local/lib/python3.9/dist-packages (from openmim) (8.1.3)\n", + "Requirement already satisfied: markdown in /usr/local/lib/python3.9/dist-packages (from model-index->openmim) (3.4.3)\n", + "Collecting ordered-set\n", + " Downloading ordered_set-4.1.0-py3-none-any.whl (7.6 kB)\n", + "Requirement already satisfied: pyyaml in /usr/local/lib/python3.9/dist-packages (from model-index->openmim) (6.0)\n", + "Requirement already satisfied: numpy>=1.20.3 in /usr/local/lib/python3.9/dist-packages (from pandas->openmim) (1.22.4)\n", + "Requirement already satisfied: python-dateutil>=2.8.1 in /usr/local/lib/python3.9/dist-packages (from pandas->openmim) (2.8.2)\n", + "Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.9/dist-packages (from pandas->openmim) (2022.7.1)\n", + "Requirement already satisfied: charset-normalizer~=2.0.0 in /usr/local/lib/python3.9/dist-packages (from requests->openmim) (2.0.12)\n", + "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.9/dist-packages (from requests->openmim) (2022.12.7)\n", + "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.9/dist-packages (from requests->openmim) (3.4)\n", + "Requirement already satisfied: urllib3<1.27,>=1.21.1 in /usr/local/lib/python3.9/dist-packages (from requests->openmim) (1.26.15)\n", + "Requirement already satisfied: pygments<3.0.0,>=2.13.0 in /usr/local/lib/python3.9/dist-packages (from rich->openmim) (2.14.0)\n", + "Requirement already satisfied: markdown-it-py<3.0.0,>=2.2.0 in /usr/local/lib/python3.9/dist-packages (from rich->openmim) (2.2.0)\n", + "Requirement already satisfied: mdurl~=0.1 in /usr/local/lib/python3.9/dist-packages (from markdown-it-py<3.0.0,>=2.2.0->rich->openmim) (0.1.2)\n", + "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.9/dist-packages (from python-dateutil>=2.8.1->pandas->openmim) (1.16.0)\n", + "Requirement already satisfied: importlib-metadata>=4.4 in /usr/local/lib/python3.9/dist-packages (from markdown->model-index->openmim) (6.2.0)\n", + "Requirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.9/dist-packages (from importlib-metadata>=4.4->markdown->model-index->openmim) (3.15.0)\n", + "Installing collected packages: ordered-set, colorama, model-index, openmim\n", + "Successfully installed colorama-0.4.6 model-index-0.1.11 openmim-0.3.7 ordered-set-4.1.0\n", + "/usr/local/lib/python3.9/dist-packages/setuptools/command/install.py:34: SetuptoolsDeprecationWarning: setup.py install is deprecated. Use build and pip and other standards-based tools.\n", + " warnings.warn(\n", + "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n", + "Looking in links: https://download.openmmlab.com/mmcv/dist/cu118/torch2.0.0/index.html\n", + "Collecting mmengine\n", + " Downloading mmengine-0.7.2-py3-none-any.whl (366 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m366.9/366.9 kB\u001b[0m \u001b[31m14.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hRequirement already satisfied: rich in /usr/local/lib/python3.9/dist-packages (from mmengine) (13.3.3)\n", + "Requirement already satisfied: matplotlib in /usr/local/lib/python3.9/dist-packages (from mmengine) (3.7.1)\n", + "Requirement already satisfied: pyyaml in /usr/local/lib/python3.9/dist-packages (from mmengine) (6.0)\n", + "Requirement already satisfied: opencv-python>=3 in /usr/local/lib/python3.9/dist-packages (from mmengine) (4.7.0.72)\n", + "Collecting yapf\n", + " Downloading yapf-0.32.0-py2.py3-none-any.whl (190 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m190.2/190.2 kB\u001b[0m \u001b[31m17.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hRequirement already satisfied: termcolor in /usr/local/lib/python3.9/dist-packages (from mmengine) (2.2.0)\n", + "Requirement already satisfied: numpy in /usr/local/lib/python3.9/dist-packages (from mmengine) (1.22.4)\n", + "Collecting addict\n", + " Downloading addict-2.4.0-py3-none-any.whl (3.8 kB)\n", + "Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmengine) (1.4.4)\n", + "Requirement already satisfied: importlib-resources>=3.2.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmengine) (5.12.0)\n", + "Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmengine) (23.0)\n", + "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmengine) (0.11.0)\n", + "Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmengine) (2.8.2)\n", + "Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmengine) (4.39.3)\n", + "Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmengine) (3.0.9)\n", + "Requirement already satisfied: pillow>=6.2.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmengine) (8.4.0)\n", + "Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmengine) (1.0.7)\n", + "Requirement already satisfied: markdown-it-py<3.0.0,>=2.2.0 in /usr/local/lib/python3.9/dist-packages (from rich->mmengine) (2.2.0)\n", + "Requirement already satisfied: pygments<3.0.0,>=2.13.0 in /usr/local/lib/python3.9/dist-packages (from rich->mmengine) (2.14.0)\n", + "Requirement already satisfied: zipp>=3.1.0 in /usr/local/lib/python3.9/dist-packages (from importlib-resources>=3.2.0->matplotlib->mmengine) (3.15.0)\n", + "Requirement already satisfied: mdurl~=0.1 in /usr/local/lib/python3.9/dist-packages (from markdown-it-py<3.0.0,>=2.2.0->rich->mmengine) (0.1.2)\n", + "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.9/dist-packages (from python-dateutil>=2.7->matplotlib->mmengine) (1.16.0)\n", + "Installing collected packages: yapf, addict, mmengine\n", + "/usr/local/lib/python3.9/dist-packages/setuptools/command/install.py:34: SetuptoolsDeprecationWarning: setup.py install is deprecated. Use build and pip and other standards-based tools.\n", + " warnings.warn(\n", + "/usr/local/lib/python3.9/dist-packages/setuptools/command/install.py:34: SetuptoolsDeprecationWarning: setup.py install is deprecated. Use build and pip and other standards-based tools.\n", + " warnings.warn(\n", + "/usr/local/lib/python3.9/dist-packages/setuptools/command/install.py:34: SetuptoolsDeprecationWarning: setup.py install is deprecated. Use build and pip and other standards-based tools.\n", + " warnings.warn(\n", + "/usr/local/lib/python3.9/dist-packages/setuptools/command/install.py:34: SetuptoolsDeprecationWarning: setup.py install is deprecated. Use build and pip and other standards-based tools.\n", + " warnings.warn(\n", + "Successfully installed addict-2.4.0 mmengine-0.7.2 yapf-0.32.0\n", + "/usr/local/lib/python3.9/dist-packages/setuptools/command/install.py:34: SetuptoolsDeprecationWarning: setup.py install is deprecated. Use build and pip and other standards-based tools.\n", + " warnings.warn(\n", + "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n", + "Looking in links: https://download.openmmlab.com/mmcv/dist/cu118/torch2.0.0/index.html\n", + "Collecting mmcv>=2.0.0rc1\n", + " Downloading https://download.openmmlab.com/mmcv/dist/cu118/torch2.0.0/mmcv-2.0.0-cp39-cp39-manylinux1_x86_64.whl (74.4 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m74.4/74.4 MB\u001b[0m \u001b[31m12.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hRequirement already satisfied: mmengine>=0.2.0 in /usr/local/lib/python3.9/dist-packages (from mmcv>=2.0.0rc1) (0.7.2)\n", + "Requirement already satisfied: yapf in /usr/local/lib/python3.9/dist-packages (from mmcv>=2.0.0rc1) (0.32.0)\n", + "Requirement already satisfied: packaging in /usr/local/lib/python3.9/dist-packages (from mmcv>=2.0.0rc1) (23.0)\n", + "Requirement already satisfied: addict in /usr/local/lib/python3.9/dist-packages (from mmcv>=2.0.0rc1) (2.4.0)\n", + "Requirement already satisfied: numpy in /usr/local/lib/python3.9/dist-packages (from mmcv>=2.0.0rc1) (1.22.4)\n", + "Requirement already satisfied: pyyaml in /usr/local/lib/python3.9/dist-packages (from mmcv>=2.0.0rc1) (6.0)\n", + "Requirement already satisfied: opencv-python>=3 in /usr/local/lib/python3.9/dist-packages (from mmcv>=2.0.0rc1) (4.7.0.72)\n", + "Requirement already satisfied: Pillow in /usr/local/lib/python3.9/dist-packages (from mmcv>=2.0.0rc1) (8.4.0)\n", + "Requirement already satisfied: matplotlib in /usr/local/lib/python3.9/dist-packages (from mmengine>=0.2.0->mmcv>=2.0.0rc1) (3.7.1)\n", + "Requirement already satisfied: rich in /usr/local/lib/python3.9/dist-packages (from mmengine>=0.2.0->mmcv>=2.0.0rc1) (13.3.3)\n", + "Requirement already satisfied: termcolor in /usr/local/lib/python3.9/dist-packages (from mmengine>=0.2.0->mmcv>=2.0.0rc1) (2.2.0)\n", + "Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmengine>=0.2.0->mmcv>=2.0.0rc1) (2.8.2)\n", + "Requirement already satisfied: importlib-resources>=3.2.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmengine>=0.2.0->mmcv>=2.0.0rc1) (5.12.0)\n", + "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmengine>=0.2.0->mmcv>=2.0.0rc1) (0.11.0)\n", + "Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmengine>=0.2.0->mmcv>=2.0.0rc1) (4.39.3)\n", + "Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmengine>=0.2.0->mmcv>=2.0.0rc1) (3.0.9)\n", + "Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmengine>=0.2.0->mmcv>=2.0.0rc1) (1.4.4)\n", + "Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmengine>=0.2.0->mmcv>=2.0.0rc1) (1.0.7)\n", + "Requirement already satisfied: markdown-it-py<3.0.0,>=2.2.0 in /usr/local/lib/python3.9/dist-packages (from rich->mmengine>=0.2.0->mmcv>=2.0.0rc1) (2.2.0)\n", + "Requirement already satisfied: pygments<3.0.0,>=2.13.0 in /usr/local/lib/python3.9/dist-packages (from rich->mmengine>=0.2.0->mmcv>=2.0.0rc1) (2.14.0)\n", + "Requirement already satisfied: zipp>=3.1.0 in /usr/local/lib/python3.9/dist-packages (from importlib-resources>=3.2.0->matplotlib->mmengine>=0.2.0->mmcv>=2.0.0rc1) (3.15.0)\n", + "Requirement already satisfied: mdurl~=0.1 in /usr/local/lib/python3.9/dist-packages (from markdown-it-py<3.0.0,>=2.2.0->rich->mmengine>=0.2.0->mmcv>=2.0.0rc1) (0.1.2)\n", + "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.9/dist-packages (from python-dateutil>=2.7->matplotlib->mmengine>=0.2.0->mmcv>=2.0.0rc1) (1.16.0)\n", + "Installing collected packages: mmcv\n", + "/usr/local/lib/python3.9/dist-packages/setuptools/command/install.py:34: SetuptoolsDeprecationWarning: setup.py install is deprecated. Use build and pip and other standards-based tools.\n", + " warnings.warn(\n", + "/usr/local/lib/python3.9/dist-packages/setuptools/command/install.py:34: SetuptoolsDeprecationWarning: setup.py install is deprecated. Use build and pip and other standards-based tools.\n", + " warnings.warn(\n", + "Successfully installed mmcv-2.0.0\n", + "/usr/local/lib/python3.9/dist-packages/setuptools/command/install.py:34: SetuptoolsDeprecationWarning: setup.py install is deprecated. Use build and pip and other standards-based tools.\n", + " warnings.warn(\n", + "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n", + "Looking in links: https://download.openmmlab.com/mmcv/dist/cu118/torch2.0.0/index.html\n", + "Collecting mmdet>=3.0.0rc0\n", + " Downloading mmdet-3.0.0-py3-none-any.whl (1.7 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.7/1.7 MB\u001b[0m \u001b[31m71.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hRequirement already satisfied: six in /usr/local/lib/python3.9/dist-packages (from mmdet>=3.0.0rc0) (1.16.0)\n", + "Collecting terminaltables\n", + " Downloading terminaltables-3.1.10-py2.py3-none-any.whl (15 kB)\n", + "Requirement already satisfied: pycocotools in /usr/local/lib/python3.9/dist-packages (from mmdet>=3.0.0rc0) (2.0.6)\n", + "Requirement already satisfied: scipy in /usr/local/lib/python3.9/dist-packages (from mmdet>=3.0.0rc0) (1.10.1)\n", + "Requirement already satisfied: numpy in /usr/local/lib/python3.9/dist-packages (from mmdet>=3.0.0rc0) (1.22.4)\n", + "Requirement already satisfied: matplotlib in /usr/local/lib/python3.9/dist-packages (from mmdet>=3.0.0rc0) (3.7.1)\n", + "Requirement already satisfied: shapely in /usr/local/lib/python3.9/dist-packages (from mmdet>=3.0.0rc0) (2.0.1)\n", + "Requirement already satisfied: mmengine<1.0.0,>=0.7.1 in /usr/local/lib/python3.9/dist-packages (from mmdet>=3.0.0rc0) (0.7.2)\n", + "Requirement already satisfied: mmcv<2.1.0,>=2.0.0rc4 in /usr/local/lib/python3.9/dist-packages (from mmdet>=3.0.0rc0) (2.0.0)\n", + "Requirement already satisfied: pyyaml in /usr/local/lib/python3.9/dist-packages (from mmcv<2.1.0,>=2.0.0rc4->mmdet>=3.0.0rc0) (6.0)\n", + "Requirement already satisfied: packaging in /usr/local/lib/python3.9/dist-packages (from mmcv<2.1.0,>=2.0.0rc4->mmdet>=3.0.0rc0) (23.0)\n", + "Requirement already satisfied: opencv-python>=3 in /usr/local/lib/python3.9/dist-packages (from mmcv<2.1.0,>=2.0.0rc4->mmdet>=3.0.0rc0) (4.7.0.72)\n", + "Requirement already satisfied: addict in /usr/local/lib/python3.9/dist-packages (from mmcv<2.1.0,>=2.0.0rc4->mmdet>=3.0.0rc0) (2.4.0)\n", + "Requirement already satisfied: Pillow in /usr/local/lib/python3.9/dist-packages (from mmcv<2.1.0,>=2.0.0rc4->mmdet>=3.0.0rc0) (8.4.0)\n", + "Requirement already satisfied: yapf in /usr/local/lib/python3.9/dist-packages (from mmcv<2.1.0,>=2.0.0rc4->mmdet>=3.0.0rc0) (0.32.0)\n", + "Requirement already satisfied: termcolor in /usr/local/lib/python3.9/dist-packages (from mmengine<1.0.0,>=0.7.1->mmdet>=3.0.0rc0) (2.2.0)\n", + "Requirement already satisfied: rich in /usr/local/lib/python3.9/dist-packages (from mmengine<1.0.0,>=0.7.1->mmdet>=3.0.0rc0) (13.3.3)\n", + "Requirement already satisfied: importlib-resources>=3.2.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmdet>=3.0.0rc0) (5.12.0)\n", + "Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmdet>=3.0.0rc0) (2.8.2)\n", + "Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmdet>=3.0.0rc0) (4.39.3)\n", + "Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmdet>=3.0.0rc0) (1.4.4)\n", + "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmdet>=3.0.0rc0) (0.11.0)\n", + "Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmdet>=3.0.0rc0) (1.0.7)\n", + "Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmdet>=3.0.0rc0) (3.0.9)\n", + "Requirement already satisfied: zipp>=3.1.0 in /usr/local/lib/python3.9/dist-packages (from importlib-resources>=3.2.0->matplotlib->mmdet>=3.0.0rc0) (3.15.0)\n", + "Requirement already satisfied: markdown-it-py<3.0.0,>=2.2.0 in /usr/local/lib/python3.9/dist-packages (from rich->mmengine<1.0.0,>=0.7.1->mmdet>=3.0.0rc0) (2.2.0)\n", + "Requirement already satisfied: pygments<3.0.0,>=2.13.0 in /usr/local/lib/python3.9/dist-packages (from rich->mmengine<1.0.0,>=0.7.1->mmdet>=3.0.0rc0) (2.14.0)\n", + "Requirement already satisfied: mdurl~=0.1 in /usr/local/lib/python3.9/dist-packages (from markdown-it-py<3.0.0,>=2.2.0->rich->mmengine<1.0.0,>=0.7.1->mmdet>=3.0.0rc0) (0.1.2)\n", + "Installing collected packages: terminaltables, mmdet\n", + "/usr/local/lib/python3.9/dist-packages/setuptools/command/install.py:34: SetuptoolsDeprecationWarning: setup.py install is deprecated. Use build and pip and other standards-based tools.\n", + " warnings.warn(\n", + "/usr/local/lib/python3.9/dist-packages/setuptools/command/install.py:34: SetuptoolsDeprecationWarning: setup.py install is deprecated. Use build and pip and other standards-based tools.\n", + " warnings.warn(\n", + "/usr/local/lib/python3.9/dist-packages/setuptools/command/install.py:34: SetuptoolsDeprecationWarning: setup.py install is deprecated. Use build and pip and other standards-based tools.\n", + " warnings.warn(\n", + "Successfully installed mmdet-3.0.0 terminaltables-3.1.10\n" + ] + } + ], + "source": [ + "# install MMEngine, MMCV and MMDetection using MIM\n", + "%pip install -U openmim\n", + "!mim install mmengine\n", + "!mim install \"mmcv>=2.0.0\"\n", + "!mim install \"mmdet>=3.0.0\"" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "42hRcloJhE2N", + "outputId": "9175e011-82c0-438d-f378-264e8467eb09" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n", + "Collecting git+https://github.com/jin-s13/xtcocoapi\n", + " Cloning https://github.com/jin-s13/xtcocoapi to /tmp/pip-req-build-6ts8xw10\n", + " Running command git clone --filter=blob:none --quiet https://github.com/jin-s13/xtcocoapi /tmp/pip-req-build-6ts8xw10\n", + " Resolved https://github.com/jin-s13/xtcocoapi to commit 86a60cab276e619dac5d22834a36dceaf7aa0a38\n", + " Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n", + "Requirement already satisfied: setuptools>=18.0 in /usr/local/lib/python3.9/dist-packages (from xtcocotools==1.13) (67.6.1)\n", + "Requirement already satisfied: cython>=0.27.3 in /usr/local/lib/python3.9/dist-packages (from xtcocotools==1.13) (0.29.34)\n", + "Requirement already satisfied: matplotlib>=2.1.0 in /usr/local/lib/python3.9/dist-packages (from xtcocotools==1.13) (3.7.1)\n", + "Requirement already satisfied: numpy>=1.20.0 in /usr/local/lib/python3.9/dist-packages (from xtcocotools==1.13) (1.22.4)\n", + "Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.9/dist-packages (from matplotlib>=2.1.0->xtcocotools==1.13) (1.4.4)\n", + "Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib>=2.1.0->xtcocotools==1.13) (4.39.3)\n", + "Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.9/dist-packages (from matplotlib>=2.1.0->xtcocotools==1.13) (1.0.7)\n", + "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.9/dist-packages (from matplotlib>=2.1.0->xtcocotools==1.13) (0.11.0)\n", + "Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib>=2.1.0->xtcocotools==1.13) (23.0)\n", + "Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.9/dist-packages (from matplotlib>=2.1.0->xtcocotools==1.13) (3.0.9)\n", + "Requirement already satisfied: importlib-resources>=3.2.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib>=2.1.0->xtcocotools==1.13) (5.12.0)\n", + "Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.9/dist-packages (from matplotlib>=2.1.0->xtcocotools==1.13) (2.8.2)\n", + "Requirement already satisfied: pillow>=6.2.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib>=2.1.0->xtcocotools==1.13) (8.4.0)\n", + "Requirement already satisfied: zipp>=3.1.0 in /usr/local/lib/python3.9/dist-packages (from importlib-resources>=3.2.0->matplotlib>=2.1.0->xtcocotools==1.13) (3.15.0)\n", + "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.9/dist-packages (from python-dateutil>=2.7->matplotlib>=2.1.0->xtcocotools==1.13) (1.16.0)\n", + "Building wheels for collected packages: xtcocotools\n", + " Building wheel for xtcocotools (setup.py) ... \u001b[?25l\u001b[?25hdone\n", + " Created wheel for xtcocotools: filename=xtcocotools-1.13-cp39-cp39-linux_x86_64.whl size=402078 sha256=e6a1d4ea868ca2cbd8151f85509641b20b24745a9b8b353348ba8386c35ee6c6\n", + " Stored in directory: /tmp/pip-ephem-wheel-cache-a15wpqzs/wheels/3f/df/8b/d3eff2ded4b03a665d977a0baa328d9efa2f9ac9971929a222\n", + "Successfully built xtcocotools\n", + "Installing collected packages: xtcocotools\n", + "Successfully installed xtcocotools-1.13\n" + ] + } + ], + "source": [ + "# for better Colab compatibility, install xtcocotools from source\n", + "%pip install git+https://github.com/jin-s13/xtcocoapi" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "lzuSKOjMvJZu", + "outputId": "d6a7a3f8-2d96-40a6-a7c4-65697e18ffc9" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Cloning into 'mmpose'...\n", + "remote: Enumerating objects: 26225, done.\u001b[K\n", + "remote: Counting objects: 100% (97/97), done.\u001b[K\n", + "remote: Compressing objects: 100% (67/67), done.\u001b[K\n", + "remote: Total 26225 (delta 33), reused 67 (delta 28), pack-reused 26128\u001b[K\n", + "Receiving objects: 100% (26225/26225), 28.06 MiB | 13.36 MiB/s, done.\n", + "Resolving deltas: 100% (18673/18673), done.\n", + "/content/mmpose\n", + "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n", + "Requirement already satisfied: numpy in /usr/local/lib/python3.9/dist-packages (from -r requirements/build.txt (line 2)) (1.22.4)\n", + "Requirement already satisfied: torch>=1.6 in /usr/local/lib/python3.9/dist-packages (from -r requirements/build.txt (line 3)) (2.0.0+cu118)\n", + "Collecting chumpy\n", + " Downloading chumpy-0.70.tar.gz (50 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m50.6/50.6 kB\u001b[0m \u001b[31m2.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25h Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n", + "Collecting json_tricks\n", + " Downloading json_tricks-3.16.1-py2.py3-none-any.whl (27 kB)\n", + "Requirement already satisfied: matplotlib in /usr/local/lib/python3.9/dist-packages (from -r requirements/runtime.txt (line 3)) (3.7.1)\n", + "Collecting munkres\n", + " Downloading munkres-1.1.4-py2.py3-none-any.whl (7.0 kB)\n", + "Requirement already satisfied: opencv-python in /usr/local/lib/python3.9/dist-packages (from -r requirements/runtime.txt (line 6)) (4.7.0.72)\n", + "Requirement already satisfied: pillow in /usr/local/lib/python3.9/dist-packages (from -r requirements/runtime.txt (line 7)) (8.4.0)\n", + "Requirement already satisfied: scipy in /usr/local/lib/python3.9/dist-packages (from -r requirements/runtime.txt (line 8)) (1.10.1)\n", + "Requirement already satisfied: torchvision in /usr/local/lib/python3.9/dist-packages (from -r requirements/runtime.txt (line 9)) (0.15.1+cu118)\n", + "Requirement already satisfied: xtcocotools>=1.12 in /usr/local/lib/python3.9/dist-packages (from -r requirements/runtime.txt (line 10)) (1.13)\n", + "Collecting coverage\n", + " Downloading coverage-7.2.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl (227 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m227.5/227.5 kB\u001b[0m \u001b[31m27.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hCollecting flake8\n", + " Downloading flake8-6.0.0-py2.py3-none-any.whl (57 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m57.8/57.8 kB\u001b[0m \u001b[31m6.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hCollecting interrogate\n", + " Downloading interrogate-1.5.0-py3-none-any.whl (45 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m45.3/45.3 kB\u001b[0m \u001b[31m5.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hCollecting isort==4.3.21\n", + " Downloading isort-4.3.21-py2.py3-none-any.whl (42 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m42.3/42.3 kB\u001b[0m \u001b[31m5.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hCollecting parameterized\n", + " Downloading parameterized-0.9.0-py2.py3-none-any.whl (20 kB)\n", + "Requirement already satisfied: pytest in /usr/local/lib/python3.9/dist-packages (from -r requirements/tests.txt (line 6)) (7.2.2)\n", + "Collecting pytest-runner\n", + " Downloading pytest_runner-6.0.0-py3-none-any.whl (7.2 kB)\n", + "Collecting xdoctest>=0.10.0\n", + " Downloading xdoctest-1.1.1-py3-none-any.whl (137 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m137.6/137.6 kB\u001b[0m \u001b[31m14.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hRequirement already satisfied: yapf in /usr/local/lib/python3.9/dist-packages (from -r requirements/tests.txt (line 9)) (0.32.0)\n", + "Requirement already satisfied: requests in /usr/local/lib/python3.9/dist-packages (from -r requirements/optional.txt (line 1)) (2.27.1)\n", + "Requirement already satisfied: filelock in /usr/local/lib/python3.9/dist-packages (from torch>=1.6->-r requirements/build.txt (line 3)) (3.11.0)\n", + "Requirement already satisfied: networkx in /usr/local/lib/python3.9/dist-packages (from torch>=1.6->-r requirements/build.txt (line 3)) (3.1)\n", + "Requirement already satisfied: typing-extensions in /usr/local/lib/python3.9/dist-packages (from torch>=1.6->-r requirements/build.txt (line 3)) (4.5.0)\n", + "Requirement already satisfied: jinja2 in /usr/local/lib/python3.9/dist-packages (from torch>=1.6->-r requirements/build.txt (line 3)) (3.1.2)\n", + "Requirement already satisfied: triton==2.0.0 in /usr/local/lib/python3.9/dist-packages (from torch>=1.6->-r requirements/build.txt (line 3)) (2.0.0)\n", + "Requirement already satisfied: sympy in /usr/local/lib/python3.9/dist-packages (from torch>=1.6->-r requirements/build.txt (line 3)) (1.11.1)\n", + "Requirement already satisfied: cmake in /usr/local/lib/python3.9/dist-packages (from triton==2.0.0->torch>=1.6->-r requirements/build.txt (line 3)) (3.25.2)\n", + "Requirement already satisfied: lit in /usr/local/lib/python3.9/dist-packages (from triton==2.0.0->torch>=1.6->-r requirements/build.txt (line 3)) (16.0.1)\n", + "Requirement already satisfied: six>=1.11.0 in /usr/local/lib/python3.9/dist-packages (from chumpy->-r requirements/runtime.txt (line 1)) (1.16.0)\n", + "Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib->-r requirements/runtime.txt (line 3)) (4.39.3)\n", + "Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.9/dist-packages (from matplotlib->-r requirements/runtime.txt (line 3)) (3.0.9)\n", + "Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.9/dist-packages (from matplotlib->-r requirements/runtime.txt (line 3)) (1.4.4)\n", + "Requirement already satisfied: importlib-resources>=3.2.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib->-r requirements/runtime.txt (line 3)) (5.12.0)\n", + "Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.9/dist-packages (from matplotlib->-r requirements/runtime.txt (line 3)) (2.8.2)\n", + "Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib->-r requirements/runtime.txt (line 3)) (23.0)\n", + "Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.9/dist-packages (from matplotlib->-r requirements/runtime.txt (line 3)) (1.0.7)\n", + "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.9/dist-packages (from matplotlib->-r requirements/runtime.txt (line 3)) (0.11.0)\n", + "Requirement already satisfied: setuptools>=18.0 in /usr/local/lib/python3.9/dist-packages (from xtcocotools>=1.12->-r requirements/runtime.txt (line 10)) (67.6.1)\n", + "Requirement already satisfied: cython>=0.27.3 in /usr/local/lib/python3.9/dist-packages (from xtcocotools>=1.12->-r requirements/runtime.txt (line 10)) (0.29.34)\n", + "Collecting pyflakes<3.1.0,>=3.0.0\n", + " Downloading pyflakes-3.0.1-py2.py3-none-any.whl (62 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m62.8/62.8 kB\u001b[0m \u001b[31m5.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hCollecting pycodestyle<2.11.0,>=2.10.0\n", + " Downloading pycodestyle-2.10.0-py2.py3-none-any.whl (41 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m41.3/41.3 kB\u001b[0m \u001b[31m4.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hCollecting mccabe<0.8.0,>=0.7.0\n", + " Downloading mccabe-0.7.0-py2.py3-none-any.whl (7.3 kB)\n", + "Collecting py\n", + " Downloading py-1.11.0-py2.py3-none-any.whl (98 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m98.7/98.7 kB\u001b[0m \u001b[31m11.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hRequirement already satisfied: colorama in /usr/local/lib/python3.9/dist-packages (from interrogate->-r requirements/tests.txt (line 3)) (0.4.6)\n", + "Requirement already satisfied: toml in /usr/local/lib/python3.9/dist-packages (from interrogate->-r requirements/tests.txt (line 3)) (0.10.2)\n", + "Requirement already satisfied: attrs in /usr/local/lib/python3.9/dist-packages (from interrogate->-r requirements/tests.txt (line 3)) (22.2.0)\n", + "Requirement already satisfied: tabulate in /usr/local/lib/python3.9/dist-packages (from interrogate->-r requirements/tests.txt (line 3)) (0.8.10)\n", + "Requirement already satisfied: click>=7.1 in /usr/local/lib/python3.9/dist-packages (from interrogate->-r requirements/tests.txt (line 3)) (8.1.3)\n", + "Requirement already satisfied: tomli>=1.0.0 in /usr/local/lib/python3.9/dist-packages (from pytest->-r requirements/tests.txt (line 6)) (2.0.1)\n", + "Requirement already satisfied: pluggy<2.0,>=0.12 in /usr/local/lib/python3.9/dist-packages (from pytest->-r requirements/tests.txt (line 6)) (1.0.0)\n", + "Requirement already satisfied: iniconfig in /usr/local/lib/python3.9/dist-packages (from pytest->-r requirements/tests.txt (line 6)) (2.0.0)\n", + "Requirement already satisfied: exceptiongroup>=1.0.0rc8 in /usr/local/lib/python3.9/dist-packages (from pytest->-r requirements/tests.txt (line 6)) (1.1.1)\n", + "Requirement already satisfied: urllib3<1.27,>=1.21.1 in /usr/local/lib/python3.9/dist-packages (from requests->-r requirements/optional.txt (line 1)) (1.26.15)\n", + "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.9/dist-packages (from requests->-r requirements/optional.txt (line 1)) (2022.12.7)\n", + "Requirement already satisfied: charset-normalizer~=2.0.0 in /usr/local/lib/python3.9/dist-packages (from requests->-r requirements/optional.txt (line 1)) (2.0.12)\n", + "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.9/dist-packages (from requests->-r requirements/optional.txt (line 1)) (3.4)\n", + "Requirement already satisfied: zipp>=3.1.0 in /usr/local/lib/python3.9/dist-packages (from importlib-resources>=3.2.0->matplotlib->-r requirements/runtime.txt (line 3)) (3.15.0)\n", + "Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.9/dist-packages (from jinja2->torch>=1.6->-r requirements/build.txt (line 3)) (2.1.2)\n", + "Requirement already satisfied: mpmath>=0.19 in /usr/local/lib/python3.9/dist-packages (from sympy->torch>=1.6->-r requirements/build.txt (line 3)) (1.3.0)\n", + "Building wheels for collected packages: chumpy\n", + " Building wheel for chumpy (setup.py) ... \u001b[?25l\u001b[?25hdone\n", + " Created wheel for chumpy: filename=chumpy-0.70-py3-none-any.whl size=58282 sha256=ccde33ce99f135241a3f9ed380871cf8e4a569053d21b0ceba97809ddf3b26c8\n", + " Stored in directory: /root/.cache/pip/wheels/71/b5/d3/bbff0d638d797944856371a4ee326f9ffb1829083a383bba77\n", + "Successfully built chumpy\n", + "Installing collected packages: munkres, json_tricks, xdoctest, pytest-runner, pyflakes, pycodestyle, py, parameterized, mccabe, isort, coverage, interrogate, flake8, chumpy\n", + "Successfully installed chumpy-0.70 coverage-7.2.3 flake8-6.0.0 interrogate-1.5.0 isort-4.3.21 json_tricks-3.16.1 mccabe-0.7.0 munkres-1.1.4 parameterized-0.9.0 py-1.11.0 pycodestyle-2.10.0 pyflakes-3.0.1 pytest-runner-6.0.0 xdoctest-1.1.1\n", + "Using pip 23.0.1 from /usr/local/lib/python3.9/dist-packages/pip (python 3.9)\n", + "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n", + "Obtaining file:///content/mmpose\n", + " Running command python setup.py egg_info\n", + " running egg_info\n", + " creating /tmp/pip-pip-egg-info-tatkegdw/mmpose.egg-info\n", + " writing /tmp/pip-pip-egg-info-tatkegdw/mmpose.egg-info/PKG-INFO\n", + " writing dependency_links to /tmp/pip-pip-egg-info-tatkegdw/mmpose.egg-info/dependency_links.txt\n", + " writing requirements to /tmp/pip-pip-egg-info-tatkegdw/mmpose.egg-info/requires.txt\n", + " writing top-level names to /tmp/pip-pip-egg-info-tatkegdw/mmpose.egg-info/top_level.txt\n", + " writing manifest file '/tmp/pip-pip-egg-info-tatkegdw/mmpose.egg-info/SOURCES.txt'\n", + " reading manifest file '/tmp/pip-pip-egg-info-tatkegdw/mmpose.egg-info/SOURCES.txt'\n", + " reading manifest template 'MANIFEST.in'\n", + " warning: no files found matching 'mmpose/.mim/model-index.yml'\n", + " warning: no files found matching '*.py' under directory 'mmpose/.mim/configs'\n", + " warning: no files found matching '*.yml' under directory 'mmpose/.mim/configs'\n", + " warning: no files found matching '*.py' under directory 'mmpose/.mim/tools'\n", + " warning: no files found matching '*.sh' under directory 'mmpose/.mim/tools'\n", + " warning: no files found matching '*.py' under directory 'mmpose/.mim/demo'\n", + " adding license file 'LICENSE'\n", + " writing manifest file '/tmp/pip-pip-egg-info-tatkegdw/mmpose.egg-info/SOURCES.txt'\n", + " Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n", + "Requirement already satisfied: chumpy in /usr/local/lib/python3.9/dist-packages (from mmpose==1.0.0) (0.70)\n", + "Requirement already satisfied: json_tricks in /usr/local/lib/python3.9/dist-packages (from mmpose==1.0.0) (3.16.1)\n", + "Requirement already satisfied: matplotlib in /usr/local/lib/python3.9/dist-packages (from mmpose==1.0.0) (3.7.1)\n", + "Requirement already satisfied: munkres in /usr/local/lib/python3.9/dist-packages (from mmpose==1.0.0) (1.1.4)\n", + "Requirement already satisfied: numpy in /usr/local/lib/python3.9/dist-packages (from mmpose==1.0.0) (1.22.4)\n", + "Requirement already satisfied: opencv-python in /usr/local/lib/python3.9/dist-packages (from mmpose==1.0.0) (4.7.0.72)\n", + "Requirement already satisfied: pillow in /usr/local/lib/python3.9/dist-packages (from mmpose==1.0.0) (8.4.0)\n", + "Requirement already satisfied: scipy in /usr/local/lib/python3.9/dist-packages (from mmpose==1.0.0) (1.10.1)\n", + "Requirement already satisfied: torchvision in /usr/local/lib/python3.9/dist-packages (from mmpose==1.0.0) (0.15.1+cu118)\n", + "Requirement already satisfied: xtcocotools>=1.12 in /usr/local/lib/python3.9/dist-packages (from mmpose==1.0.0) (1.13)\n", + "Requirement already satisfied: cython>=0.27.3 in /usr/local/lib/python3.9/dist-packages (from xtcocotools>=1.12->mmpose==1.0.0) (0.29.34)\n", + "Requirement already satisfied: setuptools>=18.0 in /usr/local/lib/python3.9/dist-packages (from xtcocotools>=1.12->mmpose==1.0.0) (67.6.1)\n", + "Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmpose==1.0.0) (1.0.7)\n", + "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmpose==1.0.0) (0.11.0)\n", + "Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmpose==1.0.0) (2.8.2)\n", + "Requirement already satisfied: importlib-resources>=3.2.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmpose==1.0.0) (5.12.0)\n", + "Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmpose==1.0.0) (23.0)\n", + "Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmpose==1.0.0) (4.39.3)\n", + "Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmpose==1.0.0) (1.4.4)\n", + "Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.9/dist-packages (from matplotlib->mmpose==1.0.0) (3.0.9)\n", + "Requirement already satisfied: six>=1.11.0 in /usr/local/lib/python3.9/dist-packages (from chumpy->mmpose==1.0.0) (1.16.0)\n", + "Requirement already satisfied: requests in /usr/local/lib/python3.9/dist-packages (from torchvision->mmpose==1.0.0) (2.27.1)\n", + "Requirement already satisfied: torch==2.0.0 in /usr/local/lib/python3.9/dist-packages (from torchvision->mmpose==1.0.0) (2.0.0+cu118)\n", + "Requirement already satisfied: filelock in /usr/local/lib/python3.9/dist-packages (from torch==2.0.0->torchvision->mmpose==1.0.0) (3.11.0)\n", + "Requirement already satisfied: jinja2 in /usr/local/lib/python3.9/dist-packages (from torch==2.0.0->torchvision->mmpose==1.0.0) (3.1.2)\n", + "Requirement already satisfied: networkx in /usr/local/lib/python3.9/dist-packages (from torch==2.0.0->torchvision->mmpose==1.0.0) (3.1)\n", + "Requirement already satisfied: typing-extensions in /usr/local/lib/python3.9/dist-packages (from torch==2.0.0->torchvision->mmpose==1.0.0) (4.5.0)\n", + "Requirement already satisfied: triton==2.0.0 in /usr/local/lib/python3.9/dist-packages (from torch==2.0.0->torchvision->mmpose==1.0.0) (2.0.0)\n", + "Requirement already satisfied: sympy in /usr/local/lib/python3.9/dist-packages (from torch==2.0.0->torchvision->mmpose==1.0.0) (1.11.1)\n", + "Requirement already satisfied: cmake in /usr/local/lib/python3.9/dist-packages (from triton==2.0.0->torch==2.0.0->torchvision->mmpose==1.0.0) (3.25.2)\n", + "Requirement already satisfied: lit in /usr/local/lib/python3.9/dist-packages (from triton==2.0.0->torch==2.0.0->torchvision->mmpose==1.0.0) (16.0.1)\n", + "Requirement already satisfied: zipp>=3.1.0 in /usr/local/lib/python3.9/dist-packages (from importlib-resources>=3.2.0->matplotlib->mmpose==1.0.0) (3.15.0)\n", + "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.9/dist-packages (from requests->torchvision->mmpose==1.0.0) (2022.12.7)\n", + "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.9/dist-packages (from requests->torchvision->mmpose==1.0.0) (3.4)\n", + "Requirement already satisfied: urllib3<1.27,>=1.21.1 in /usr/local/lib/python3.9/dist-packages (from requests->torchvision->mmpose==1.0.0) (1.26.15)\n", + "Requirement already satisfied: charset-normalizer~=2.0.0 in /usr/local/lib/python3.9/dist-packages (from requests->torchvision->mmpose==1.0.0) (2.0.12)\n", + "Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.9/dist-packages (from jinja2->torch==2.0.0->torchvision->mmpose==1.0.0) (2.1.2)\n", + "Requirement already satisfied: mpmath>=0.19 in /usr/local/lib/python3.9/dist-packages (from sympy->torch==2.0.0->torchvision->mmpose==1.0.0) (1.3.0)\n", + "Installing collected packages: mmpose\n", + " Running setup.py develop for mmpose\n", + " Running command python setup.py develop\n", + " running develop\n", + " /usr/local/lib/python3.9/dist-packages/setuptools/command/easy_install.py:144: EasyInstallDeprecationWarning: easy_install command is deprecated. Use build and pip and other standards-based tools.\n", + " warnings.warn(\n", + " /usr/local/lib/python3.9/dist-packages/setuptools/command/install.py:34: SetuptoolsDeprecationWarning: setup.py install is deprecated. Use build and pip and other standards-based tools.\n", + " warnings.warn(\n", + " running egg_info\n", + " creating mmpose.egg-info\n", + " writing mmpose.egg-info/PKG-INFO\n", + " writing dependency_links to mmpose.egg-info/dependency_links.txt\n", + " writing requirements to mmpose.egg-info/requires.txt\n", + " writing top-level names to mmpose.egg-info/top_level.txt\n", + " writing manifest file 'mmpose.egg-info/SOURCES.txt'\n", + " reading manifest file 'mmpose.egg-info/SOURCES.txt'\n", + " reading manifest template 'MANIFEST.in'\n", + " adding license file 'LICENSE'\n", + " writing manifest file 'mmpose.egg-info/SOURCES.txt'\n", + " running build_ext\n", + " Creating /usr/local/lib/python3.9/dist-packages/mmpose.egg-link (link to .)\n", + " Adding mmpose 1.0.0 to easy-install.pth file\n", + "\n", + " Installed /content/mmpose\n", + "Successfully installed mmpose-1.0.0\n" + ] + } + ], + "source": [ + "!git clone https://github.com/open-mmlab/mmpose.git\n", + "# The master branch is version 1.x \n", + "%cd mmpose\n", + "%pip install -r requirements.txt\n", + "%pip install -v -e .\n", + "# \"-v\" means verbose, or more output\n", + "# \"-e\" means installing a project in editable mode,\n", + "# thus any local modifications made to the code will take effect without reinstallation." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "Miy2zVRcw6kL", + "outputId": "1cbae5a0-249a-4cb2-980a-7db592c759da" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "torch version: 2.0.0+cu118 True\n", + "torchvision version: 0.15.1+cu118\n", + "mmpose version: 1.0.0\n", + "cuda version: 11.8\n", + "compiler information: GCC 9.3\n" + ] + } + ], + "source": [ + "# Check Pytorch installation\n", + "import torch, torchvision\n", + "\n", + "print('torch version:', torch.__version__, torch.cuda.is_available())\n", + "print('torchvision version:', torchvision.__version__)\n", + "\n", + "# Check MMPose installation\n", + "import mmpose\n", + "\n", + "print('mmpose version:', mmpose.__version__)\n", + "\n", + "# Check mmcv installation\n", + "from mmcv.ops import get_compiling_cuda_version, get_compiler_version\n", + "\n", + "print('cuda version:', get_compiling_cuda_version())\n", + "print('compiler information:', get_compiler_version())" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "r2bf94XpyFnk" + }, + "source": [ + "## Inference with an MMPose model\n", + "\n", + "MMPose provides high-level APIs for model inference and training." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "JjTt4LZAx_lK", + "outputId": "485b62c4-226b-45fb-a864-99c2a029353c" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Loads checkpoint by http backend from path: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Downloading: \"https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth\" to /root/.cache/torch/hub/checkpoints/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Loads checkpoint by http backend from path: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_coco_256x192-c78dce93_20200708.pth\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Downloading: \"https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_coco_256x192-c78dce93_20200708.pth\" to /root/.cache/torch/hub/checkpoints/hrnet_w32_coco_256x192-c78dce93_20200708.pth\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "04/13 16:14:37 - mmengine - WARNING - `Visualizer` backend is not initialized because save_dir is None.\n" + ] + } + ], + "source": [ + "import mmcv\n", + "from mmcv import imread\n", + "import mmengine\n", + "from mmengine.registry import init_default_scope\n", + "import numpy as np\n", + "\n", + "from mmpose.apis import inference_topdown\n", + "from mmpose.apis import init_model as init_pose_estimator\n", + "from mmpose.evaluation.functional import nms\n", + "from mmpose.registry import VISUALIZERS\n", + "from mmpose.structures import merge_data_samples\n", + "\n", + "try:\n", + " from mmdet.apis import inference_detector, init_detector\n", + " has_mmdet = True\n", + "except (ImportError, ModuleNotFoundError):\n", + " has_mmdet = False\n", + "\n", + "local_runtime = False\n", + "\n", + "try:\n", + " from google.colab.patches import cv2_imshow # for image visualization in colab\n", + "except:\n", + " local_runtime = True\n", + "\n", + "img = 'tests/data/coco/000000197388.jpg'\n", + "pose_config = 'configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py'\n", + "pose_checkpoint = 'https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_coco_256x192-c78dce93_20200708.pth'\n", + "det_config = 'demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py'\n", + "det_checkpoint = 'https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth'\n", + "\n", + "device = 'cuda:0'\n", + "cfg_options = dict(model=dict(test_cfg=dict(output_heatmaps=True)))\n", + "\n", + "\n", + "# build detector\n", + "detector = init_detector(\n", + " det_config,\n", + " det_checkpoint,\n", + " device=device\n", + ")\n", + "\n", + "\n", + "# build pose estimator\n", + "pose_estimator = init_pose_estimator(\n", + " pose_config,\n", + " pose_checkpoint,\n", + " device=device,\n", + " cfg_options=cfg_options\n", + ")\n", + "\n", + "# init visualizer\n", + "pose_estimator.cfg.visualizer.radius = 3\n", + "pose_estimator.cfg.visualizer.line_width = 1\n", + "visualizer = VISUALIZERS.build(pose_estimator.cfg.visualizer)\n", + "# the dataset_meta is loaded from the checkpoint and\n", + "# then pass to the model in init_pose_estimator\n", + "visualizer.set_dataset_meta(pose_estimator.dataset_meta)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": { + "id": "tsSM0NRPEG1Z" + }, + "outputs": [], + "source": [ + "\n", + "def visualize_img(img_path, detector, pose_estimator, visualizer,\n", + " show_interval, out_file):\n", + " \"\"\"Visualize predicted keypoints (and heatmaps) of one image.\"\"\"\n", + "\n", + " # predict bbox\n", + " scope = detector.cfg.get('default_scope', 'mmdet')\n", + " if scope is not None:\n", + " init_default_scope(scope)\n", + " detect_result = inference_detector(detector, img_path)\n", + " pred_instance = detect_result.pred_instances.cpu().numpy()\n", + " bboxes = np.concatenate(\n", + " (pred_instance.bboxes, pred_instance.scores[:, None]), axis=1)\n", + " bboxes = bboxes[np.logical_and(pred_instance.labels == 0,\n", + " pred_instance.scores > 0.3)]\n", + " bboxes = bboxes[nms(bboxes, 0.3)][:, :4]\n", + "\n", + " # predict keypoints\n", + " pose_results = inference_topdown(pose_estimator, img_path, bboxes)\n", + " data_samples = merge_data_samples(pose_results)\n", + "\n", + " # show the results\n", + " img = mmcv.imread(img_path, channel_order='rgb')\n", + "\n", + " visualizer.add_datasample(\n", + " 'result',\n", + " img,\n", + " data_sample=data_samples,\n", + " draw_gt=False,\n", + " draw_heatmap=True,\n", + " draw_bbox=True,\n", + " show=False,\n", + " wait_time=show_interval,\n", + " out_file=out_file,\n", + " kpt_thr=0.3)" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "ogj5h9x-HiMA", + "outputId": "71452169-c16a-4a61-b558-f7518fcefaa0" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "04/13 16:15:22 - mmengine - WARNING - The current default scope \"mmpose\" is not \"mmdet\", `init_default_scope` will force set the currentdefault scope to \"mmdet\".\n", + "04/13 16:15:29 - mmengine - WARNING - The current default scope \"mmdet\" is not \"mmpose\", `init_default_scope` will force set the currentdefault scope to \"mmpose\".\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/usr/local/lib/python3.9/dist-packages/mmengine/visualization/visualizer.py:664: UserWarning: Warning: The circle is out of bounds, the drawn circle may not be in the image\n", + " warnings.warn(\n", + "/usr/local/lib/python3.9/dist-packages/mmengine/visualization/visualizer.py:741: UserWarning: Warning: The bbox is out of bounds, the drawn bbox may not be in the image\n", + " warnings.warn(\n", + "/usr/local/lib/python3.9/dist-packages/mmengine/visualization/visualizer.py:812: UserWarning: Warning: The polygon is out of bounds, the drawn polygon may not be in the image\n", + " warnings.warn(\n" + ] + } + ], + "source": [ + "visualize_img(\n", + " img,\n", + " detector,\n", + " pose_estimator,\n", + " visualizer,\n", + " show_interval=0,\n", + " out_file=None)\n", + "\n", + "vis_result = visualizer.get_image()" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 801 + }, + "id": "CEYxupWT3aJY", + "outputId": "05acd979-25b1-4b18-8738-d6b9edc6bfe1" + }, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAoAAAAMQCAIAAAA4vkODAAEAAElEQVR4nGT9TcitW7ctBrXW+3ie+b5r7/2dv3s9Cf6gci8oJqIBTSCgFgLWBAuCBAtBJDGKBUGwYkGwFhAJRANqzUKCJBVBJQoWggkxSAzGGDT+BGIM995zzve313rnfMborVno43nXPnGx+djs713znfOZY/Sf1ltrnf+D/+k/TRw2hYgEacoACAFAEOh/AwBQiZREIzOBKElUDHKWiSUAkZkjULZRRxzXdZEkKQlAcGSEbAC2l/fLS+j/QjLh/nnbBds8gnPOzByPsdZaS8zIzLxmZhbcL26CBiBj9N8FAkAYQEjiMEn8//2ZS0EmaDsikABQVUccdhEAWaDs/jHismkEmWRCZVQYzzDsZESEiaqyfeQYVSJsgkRGf9IwSPp+FP3GGAZAR/9f/bn6/5L0NA9mWpdLRJYJVXKYERFkZl7XM48hKTMDadtARAAwimQEMo8jR2YOYozxeDtHHg5+fukAAkk6QQCfX9M+CPczvL8vijDhfiiG+pUM3n9hWZIMSULJuv9u/4kA6YT7x4EEA7FgAP29fP5e2+J+dH1c+z9+vr1l9fc4GABUy2C/pbIA0EDJ0qRdotxHsaokVZXNqmmbGbarCkAyzFhrLYkZJCFTRB+REkmGcb8lynGMpYoIRFTV4LD0/v5e16tgwcwMoKrgEBFQZpJ8PackoyLCNjNd9ThOSWTKhj0YLyyS0PrFV5NwAIgIUSHaxURE2IyIzCRNh+05Z0Q8Ho/ruj4+PoAwolARkXRmRoTWjIgVIaEfBckxoqqqisYYo7+XIw5JY4w159sxrut6rRdzxHgzUFXl9cZ9IDsmOCiUiVFjzlcOAlprRYyRp01jRkSHgjyO8zwhvT6eC6QcEV/e3799+wZgrpWZkSDZT6CvDFwkVwByIscYQUrqm6UOVAAcn3EhIqTVTz5GSiKyqqLjY/+5j2Lsy8IFH3EEOS3bA76gY7kYDiYIkbLTRWRmHzAANs/zzWuSJKBCRETEaz5zMDggTqzMJHOt/V0nrKoIyCSgESmICAMZYFrriFSH6AisSvCiOxJGjIiwSxIggZlZdZ830qXzPK/1conM4zgEV1UEXOqzFIGIfRKAGIzV8bcwMvfrrCKZSUsdKCYNMkjXsk0mEBmHjXLZFSOllWBatlfARDrDKNTb8fanf/qn/8a/8W+MMRwkWdaPb4/n85mZ11JmQisi+liutfrYV5VWRQySJTloCgg6woDLLvAgLangPnhLlcmhALk+Dy40Aq5VDP4iNNVyB7rM/PxfAI/HY72ux3m+6spMTXVsH1VlIfMolYExYlmZGQwA5f5liJ2FWcsMkKwqwyJASDoMAQEQDINGAAD7O/iMmxFhVJXvfIDBILnWkvo/igIiCAUNIjkKLimOwcCc01VH5sijqmbEtHdU7axAReRwSMo7tQGwC6BgAiQtguL3oN1xX5kha63VuUHV554FA+b9WkrKtEGAFJIwBR2ijSRdIpnMVQsuRZDsJ0b3TaeDAZMEO6LKtnWnlv6uIzpgdcggMiiUVOvxwxeioCVi8ChNR7xer8xYa53naXtxgiDSYSBQgfsGiiLtSAAwRcCOJEEEgWBXYtgZvB/lfu++c2EiQRn9HffPkSi6n23n/k5MZdUqr+pT11/ZjmbHQUCfLwuIgOwAvZPrL+umBGF85lyS7sRsR0Qw+q8oHCIZABIo+LPiIbnfNO+DA+6Ya6wdBdSpt49rCUiaHOAQSRZQCURw2bkLD8G2AxT8Wj7GGJGU0xpnCpi4GCSJrrr8/bmutdZamdm/dNUieRzHkvrmIwhakquLHdi/eBD9Xf6lQ61MgrYdkXCogGQSJZkQ/JrXqmX0Q6skR2RVLa+OsZmJ8loLiEAEiCWXAmRGRvR7lrSsDBbxWpMZ53hfwn6GZMaYFggaSRGkMAzbgoMMEBGZKbBguc6RVQU5Mqrq559/Zh+KvofwnC9pkTzO7GQAwNIuZ0mAEUF3ZEBX0Z3Rw9DdX+Qdoz7jVT9KSXC4K0My45AkyxB3EeFdPfPO+vi84UBGEEXKiDACkh2uQkQcxwFpTQ2iIkBB7LjP8EhG1+PB4QiwvDuTiFgqEzSDLCKWVvJAKIBVyB2rGIZBW7aiGyF39P9lK9IhHehYFWk+3t6fz+c4h2CUvCy4T+yOs7bE8pJkIWEkhrCqRgTp13xFBDM6vhCIiMgol2yhDB8RMG2s6xpjZIAMQUGyDHb0sCxJwZGUUf/6/+P/9sMPP9g2HJFVijG6xg0TqyKSqzJzWUfujFtVOdKMudYR6SAibUP3TQTlIkMMuPrJBEhHRJQUkZ83TdIxxnUVum6NPm9Ik5mJHV8+Q1zBC6attYjo2D4yjyXleWjOCIwxul5YNTtukuzskAyAJiNjt7MiCUslhYQcgBkmtMN37JJqJ+xuLh2lAgoA7K5HAISVoGnv43v3TuHso2OtVQE8Hg9J1XVNpCQKRgAMmhwZeSRr7WsmdW3Qp0skibT1ywRMRrg7F4EeOfpOFgvIjpKAOjvB7Kwlgx1HyM7FyxoMkFXFked5dDiAQ/33dzsOkoiwrojoDLHW+mzjOr7EXW1//vczmMIzsYw/RC6sZ5I20ufxUNVxHNf1PMeRmWOMr9dXmODGBhwkTEPCrosBMm2utRhgDHLnvP6VC+oE3FnC+ksRCuxEaGrfTO5wFl2ZATAMu7RUC6WqWnO3Hfcb4ISCcXgnRBEG12CUgV3X+xc5mJ2UP2Nf/y1bhO5mPSIIOqANK6BvGD7vEBlBdQfxiwTMzPrM+vf9AYKE5koyxvAul5wRGXmFbOMOarYd9B3ByxhEMuZ8VdjC6YMR/WFhB4hw2Hkc6G8ey3ZwWLxeK89khGAASxKMsCwqGdY+3gBQdt5VHfYtDdgwmLRRVYmcLNuZ2X1wVQEUMcDPaq8zAcA5MS3tTiHKhmAG6ccYx3FUlcVIwlhrmbgsGCjbDEf2/XIpAkBAvmESA7SDGuOYml7OcfSROh5nWOw0SUYX0WRw2AtkZn68nhEB8jzP7s7JbgMbSYI3OnJjJxJUnZxEZKRtIuMGnEqiNDLKKhNwQqrqX9evHxEds3crTErsQ+m7orMAxIISHNIqrxERMDxAR9jlqsx8PI6IACVRdiaWis6+HV13HyoDqkIOysGGEx1Iygo+8ryGMGtZRz9fqO4bsy+v7wC480J1IiVDwMhRVQxGRM31er3qmuN4B+rzvnc7ZldZwR1gbUREgxwimDGr/Frvb+daUikiFm3giXXqGApokbEsDpcuIBgeR9hVsGv1SexrF6AVAEQNDhV+/PHH67qO4xikgWT89re/zczBgYSlM4cakGNGkBkuZBwCug5YdNBhpojlBvAq6VJH8jAo3+coZBf8i2ILADIPaUYEMmD00wAwGCGf45DkSNt1zQTrmvtNBRIsYthGRtUEZHutC4BXBZXMajjRlsAQICBdElHuRJ/koOt6vgjrsxchboRVuOHBz/+1PUsjQtb+ABTDDMOUJDrIqi5AILk0397e3s7zui7INBDdic8EOoEFYaOrshk3KL1jk5AkGLUfHsmNKWqXJyQtVYkZx3FI0hT3tYRBMKAiTBNiGMMgaBq2YAEx0iQYMgEva0G2BiFANskw4zNVObxhoxvbcARjWf3kP59bZ5S1rnXke4xT+u36ds3rPcbI7OQ9xjCQ47zW6y1i1kUnACKgfWhImkDnS7lQkuacDo8jJYEcDPh+d7AhrrIt3rewH/6dKhtq3u/zF0WGmdKSBNqrNBfkWpoqbQh6PXwE4UlkOkcECISpPU244fedNh13+uxaDfunULAJuKM0BiMZDpaFoHT34584dkDy92BqD6HoMCox1JeC0fenykREpKDucYMAs1HopT03ANnnJBrG1OMMyLYcZES9auQhwtqNb1VtCMswMKsiQqpuascY3VbiqojolrQskmMM2576ZX0GAEwRCcBBywzJgAPUKgQZkJcciejOGA7Y/WwZDvb1iv7tQVdVQCNGRNYyjH4slF9zfv34eDvPt8fbx8fHGAz69XoNDgC6s5qgDIw9vukjScV9FIN5N6SfgZ5UhDT3odOqGPnl7U2FNWdwdLVRtu2wr9eyFGOnlx4EeMcbExF5tyMw6J4zyR00DTbi09gEATIzTFVJNcYwJa/Sfqfgbiv7YLjcFUIZjAgxjEDU3VTbC7IZFBnRjehaS6sakXo8jscxPtbLJBKRoC2hutkCqqYYw4oRtpMuks5yUdThAJcEFMewTKIR487i7CoBAZj0rlplI8hIrjPHt7W6uRJDS48vP3x7vbpMzLv/ikwBWDuYASZ3nFzWdJ6PYQOux+M9cX18+4bwI2MaVWslGGnYRAVSUaqkxxgRsdZyFchRLlhEqKOlOkdKqKrMJJJI0pZGxPH+3ul6MNbS19eTZA4WvWoPqmK3/zoy+8ZGDynU0UNLlbFvHgCjusOGY7Ef4AIQ7g/rF69xHq7GyditZsNma2CM0EIfpVXKIyQdzgjanhaAsYTzPK7rymR//sfj3ctE0aLCHU5d1aiFQdoZiGS3yHKpGAMRpJhhWUBZ4Q1PfbYpVTWro2Cp58xVn31qp+uybQo3iEnYfjvPdV3Mkdy4/MhzWeORFr3WWuqwC6AISPvghyWVTdF2uBGPuwy8y8PI7AY4YtiYswBkBNCjLyBAd+onEdMFOMlIkyx7wWWfFVZxjGHC4DQvP/JQlhtptsDo1AHFOc6qqnXf/Ybw3Zg07lhwd2MRGfHUeobjzDHLY2AMzXq8vVXVx8fHBlFzT9zTA9jNCkkEBJMKZl9L2423I5EjJCXD4UCYHSe5bKg+A71ig2AkExnoG7KzIGCSUd2pQAiguu8MeC2pai0tle2AE8xgCjjhyOI+VNlzY2485HPArB6zYp+oDvHYPR4MZOQe/Uolm3CQ96yad0/Q0+XoarcHvSSquodNcIyj75IsjmFTwgwcmQejh8Tdc1d6zABUTQoIQtizg/nKzAXPpTEGgo/Ho+aaAPbouRpTJQ0pj2NdJentOCVVWXKOI1QjsysGqOQoQ1I2APaXEjDINNmQmGOUZuPc0hwcMdIWCobrqu4agw1vJmrGebiHFNJa63GeY6Qwq5aWg2PHbZsZe5zMXGtJOjDWNd+Pc9S41gsWxsEzZU+9Lpd4hJFwx+2uPBI0sVTMSLAs2KJez+fgaDymx6JEwktSxiF4zpXHyeZSyGOMpdmA82eX109FvsOVPkkn7pti0XB9omUZEYw4Vq1SHceBFXapihml2kCPwz0/7oq2rwYQUCBJWiBxGAtQkuM4EJi6VJlpK6KLM0fs3vr5fJIgnZnHkT3ZT1ByjlQRQDJH5no9DZDSXE4O4Vkzy0oPx9QMnUiUVw+EBUSDdd+HSiC7IcQmBFzPMcasFV2Bk9+uuTt+Ut3fkkSWNnYFbG5B1QSQmaHhlxop/fWvf52ZyFh20CMilGYUMeW0wzGLY3yxVMZ6VcQhZDCIJ0ABssaGqFBe6fcxMiJcWlfl6CCF67oA2LwQY4w4Hzl4XZfoJKWifOY4xtgkp1UiFiGAQYDBOImFItDR0jKBCFooaCQhjEBmSILTQqPlmX3UioxIMGLBLo2glgaDwTCRQ8uROeesLvozU3MdkZZhjAitKxlCSDiDy1paPToccfiTdLNkT2cygpbNel2PtwO1MUr3AOlCxlh0ScFh7VEclERqzjGGvGj88R/+0a9//WvH23GMta4jshw2B6mh8AHLnZISYRIY5YhRRFkI01hG/5aR3TIljSjTMNMEowQLThquYIzz/eNVmDOPyDz7awaJ4Kx6c5gWa5e9wGIV64sPgaJQyIwBYFlT16hjDNmEUGIgTyzPUKYjeJAu1DiGqoiUVKFADeeVhIK1VqzqQ0EkczBUs6ooWevBUU+U7YwMSgLxWhWBH/7gfV71ek3ymLa0RmgXStrjZyNczIPXmog4k2tdxxgHD0whRiVWOLyGiYIo2IsLQq3v6G5kjkweYWB2sseNwu2egwQHXNIqzTmrtKBLVbpsOBjHMSVcM3/IcmHWgYiM7kgiWatQ/UUSYJ/aCqaycIePAAkJlg8ShiwCQfYwuGb1VFXdiXaPrg0GmFFAlUbEDcy54dvxGPP1dESQWs5Era5jOmP1Ve2ZSXM64rKTIMcSzOMNOaWyMx8WAvl6TdtwKSMALoPQ6CrHVA4oyNH0IU+AaS4CVV1OZhyQvCplHY9rzhz5mX+j6Q5lZbosPCMG3WyMsL2uslkBUKDGmZn89vNHZkrl0RwcgcsZj8chLa9akTjPsWRbVQdi9iSDfBxv15qzLhKCx/GAXWM2giMUVg3GYLqAOAiUJs1M5jG0KhyXZnO+Xq/XMR6lCg9JlQoHnaC1qnwxR45RegI4g+zqWD0tlSIl/+FPv/r28bNKCHqHywv5KJXlZKwljDC5Lh1HSipN2yOSoFYtIIAMhKawJqBMMd5naXBSmcnJN7CMuSYiwpItjlh+PMbi0utSjlwAQ+ml15fHkXi8Ss1OzcGIQwKNvo52uYLic0GkULTO8OtK5CA5C7OUxyNUqqs4oSiOKDMRYFk913MfmHv8ikIAA6wG6EkTiqJhqhq9sSELO5siVt9sGiaDBLk0YbKphUDtTiEbRkhMwTwOA1ZWc7vWmhoeQzltofQWKSnSCquuX/3qx+fzW1VJYCkVL6ZtWEApk0zM9RhHxYtkiQvrOI61h+Jgxk08cs2roUTIZxzcvZiCLGmthYyDh2FAY0SjdBkBMfOhNQMLEVIUOExjZoOIwBKWKtwXjYk9k3B3zPYmlb5sQrWw+UOgzAiEJV2uyAPy+AR8dlW4y2mLgOVq6qoan5G16OgvIhqCEqqnX24y5JzT5OPxaPyKB1EImeDKbmLyNJuKs5sw+Kdf/epas3GVLlHnnBEDtZgnjPImJTYvNHq+SECLzRhCZAZKU3OtCxxA7BOZlCi64DfRHTObGdAjK8MjVgA9a7SxKiLGHfFvKBgBCBiNI3eHnbBrTUXmr371/vV17ZY6zjwIoFSG70Fjj2ZULEkRkMCDmYmmavkmSJNdIFdNMQE7WPA7j8s14chIi4UinKPRheu1+bqf3f3I0yg1wMhgRle7aylsVE3XHPkWOYAhuLTpBH08pamaKJa8qttg5Bjew7ao6qTVXSlw96bRHZ1AC96vs+a1pkv9f5d0XVcYyOGPV+TQI4BsdKJuxsfnEf3sZrj5VXfO8fef+cxDe/LXtUDsvkdEswvvDqBxX5BMbhKs2NV9M6KRDKNBVGdmOvfci+yQ1L+hoqfhCa9+MActsFn0VQtQxEg6GQXpcSaDxvJyFafTEHPVHCTBpVruuAJJQbIvXoTJst08OdcxNvPo7oAdmUxGYFJ9xXjz7dfa3C3W5qaSJDMfp9loYglOsJkBFvfnlhruL4q5AQlZMUHwQCCjrGkFV2REj21zNHdgubCUme6KUBTwcmWBwEi7uCRrNmqVpMoRuAJnjlo+SFU1HaxKzgRVDu9pfVGOwKngGL/7+ruyImIALJ2g8v16LQBHjC9fvnz9+rVKJjTwURMUk+f5mHPCFn0M+Fq8VIk8YjDL4LJyx1NJAVyuYBJMRlWhySEZr2sZ5RHkUuCToEMeNIO+6uUgM12NiqUpq0OMbYDOzMGwK6BUQIKj56xwtNggRhIJRIfq72f7cxYt32UxCFQQnzTQ7vB+IcH4/Jf7KoWpLmSbR2LA9f3HWm6AMIOwy06GqrwW7wE2jQBVL6R7Pol7CGghB9e1ro9nzeLIiGFXyUbdgXS/Tsf/4zwy8/m8MrPpSv1SsT5pdxlhERWNV0/e9N6eVeYgImstByMgrTnnGKfANa/Tw6VjJMfxtS6ACs5V2eX6L5l6EWHMnlRb3MMsNCd8IVMIUnYRMhEYJQcOxMWY1jtzfMdh0VPCsEsd6+yCJSx/j8mRGCNIavkT77Y9OHxLFIKcc35CQFJJ+ys1EMZgODDnOh/ndV1wPD+uqspx7kqhT2DnPYrfE7BQHfeozVcUI0fEKq+ppN/OwPvb+iiwuxy4R0tyEAXSYUtyIgS43PB9H3ztt/0dU6pPxAYENECSVVNoKULkJn0047FImtZdzPRLRsJqHIhjjBxcS5ZLffdgAZbpIJMDQLDDFBpiVZM+2Z1hBJ2OI6jIS05Dhqt2QthslU48NCQCMCW79swgEGAuHnCGXlyvjC9FBxOJO7VIgr3W8uxRR1hTmcAi4IqIuEmiG9NusmXnhOYvLtXSpj7PWraX9okYEQ5SRqJ/EWFmZIb4HdzeabJLwx7D91X+JNi7aTrfc/b9IExspty/g7QVEajawHVm64hkxUhWJwKAWtB5nqwFYEQu1SeVw3aSBmZYAonzPANwrRIgLQpgbsnGzV8zairRUFcUnfICJhw1HVmAvFHT/iytGGnyqDbmHEemvAKI/P6UINCIjKVyBrRsEdk/UoabPtMKQwvX2u/qRl2ahEESzJ6/ZByB1+a6E43rNydWSWjZPjKTQTvKWbqIJsDMWbaacQOw2TctINslKaP20MWSMtMuMsGK4CkkrDUX6UABSiPiTbY2Wa8jwSAy+PRMoCkwVWWrYVUILbypeX379q1qMuM8Dq1VcMaIiDMPvyZpIzzLQWWewUeEzWdVAZO2PZiWkSFpjBEYLAhIRo+sZzeITVpnf9jR0zPs0RruHLZGjJ2WwmVmh4vPuQ+qgFcojOgRWkdFIcEK2eqLBrjPpITsuObN/M/PyQuLzVzpedxWqzSyFZ8p/H4dYWsl9p35JPLUJmpaTS/4rGjV9AgBMWJXqyTtymTzFNdakzwiAZSqW2SIqNYGNj+mof7M7C7R53HYXrU+i4zPO263GqwnjB2YGUg4lxegDAJcuutU10vXGcceXI4zItZSHOcL4hgvmNdq8B+qB1Ox73vz2yzYtcCSPhme++mJQVbCSwfgTsBwICUv4BEcY6y5KI0eQ+Hm2kWETIodqgcH2RetZSyEfLTGQ777BsnOW7NxnieA67pa4OmS4BUOZBpDELDCLcySVGZmfFyvPo5jB0ocmVWLwapdcX1GGNvYVAlGjB7eEGAC0lpimUzfIloXOrIYTUbZCpuO+3Yx8nB01UmCEYpoisRgBOAtj9kALJtnEVySJMSw+PKcP9eBMc5DxFrLGNgoCsHrTsbOQbu7eR096vsUs0oFIbr9XeEgQ03BtoN8qo7IYdngoDNcHCLCmwKA1efaGUBUFTIiRsuqYQdA8hAQWZSIIKKKE+GcIw7FPeDpm25LteSqKhdWt+kiJLmKsu4s2IPD3RPfiO6s1fpaAGupqk8OWjbQg8aIcXzS0FqK3JoifAdXQXO3GLvBZQBsOlY/wGjSHzeTG1rWXZx+Pzy4SW9d38V+t94KeI4xhG62ZJtVMcZWpEQN0CM2iW2u7qPfz8fHa+6UL2nJBDLXupgxInkXNIZpPGJ0hRcgxxCqW963QFnRVRajtMLIgLQGgxEiijsH65PsVt9V2u/nYwuaAcGDuXnoCDRZhjRZ1hiBLfnDiOyXqPsrUCuc9sRwAx6bQx++3zxyT4vNgR6lO1xEgqEBVVhkitjEvVLXq5HJ/EQjmuDHjIRRILV/4wKXVo5M8O39/Pj46P5z2ebGPxEk0ThrF2jneQ7GMl5rxkiraETAbo6bfnh7/7heuluo5iFW1VrrOI5IvKpOHiFD+jhUksQD42rNg8wmGVhrrWw9X8umVb61TFJTG4qIgmBXzYJFkwddTZHoSGgULMTRDbO0bBnugJ7oiqyrQohRhDwCO7AAaPCzIb1Pofy+mNFcnt1KEk26v98nszP0hum0Extu5OlOorZNMDmaD0iqqZB9ICJCrIwU7Jvh2AF8jFFV17X6fUagUNd1jTHOcVjKDN1QE29s6TPF3seP8znX0hG51hqt1yIFm0lSukmaIqWQ80iATaqdc/YnCmCMMcaoOSGP5K6XxSAHh2quWhzZStYYQ9fsYIjsLImCpWo8gVsMQskRZMRBFCSzG4CM+DwbZSU5CKfHJyXBNj7DqPtzRDNLt6QpSNKqOWcn4J6rdUunlpbfPOwWBqy10mjorGl3EQQh+5qz1QvHcbSIcH/lFBzBQeQnuY7A2IoIkFy1eXeSZkU1prnJftWs9wo0+RMAPyVamyvRUAoAuoMvzbJjd722m6FdMrKVvg2dRKtY7KUKjtRGQzHyYJMh59OyYfYHpYmyjK3shI0mPbHVYp9HHDdqC1BeaDhaJNOoUtibIs1ddmWenb1GPEB1/xccGKuzu81lhz4pxAg4MiN4dRlRIjkTBZyFR44KDO56q3X3DShV/5s2yGwihFB4zsZRPtvlX17X/o76VVzdC7emc9huAhWMDC7VVB0SpQw0fY83D5y/KL/2cQ3aELS/ndigTd43tsWy8z6QfdK6qQ0GY4M3mUm7uL9iNtoWjDZRKRzHEREdzjIzhUWCLG7F9takytnUpMaEYGzWRXJDP/1PIBgIDZTt0gAprMBhHCbbLmBPs9qIZjcvcUvJewa972AcETF1fT6Z83xDrbqu83F+e34wyVZNe/Pj+tVO4IyxtJixpCI8oqoCsVF5lADS0VP1rmhvsjlkGL4JH31/N++Pg2GUQK9SBkhpKQhQREbkYIgyoHtg0tVt5rHWClPyGNnz2m47+mu2yJY/MlvKSBKgm5C8POAxhshvWnEEOdZaBwMHZpXt8zzaR8i2Zdxy3vM8Y0dJrbUephMXbeuhQeFlvULRNdOnSvvuGgVHtMASXjUyLTfxwjbRSOTGbDnCs0wFGtstoLO1TK+mj3Qhzui4zCNZ2sBIUxRJgHPLfAlwRPSQQoXILbyEm/m5uaJt2xF3BKxWJsY2XrpPxy+yL7Z+CXtA102ksqkXbCWLGufbuYhEqWqrHGMfj7J5nid3lqmIOB/RTamkTAYAdseykbOm+nfRcF1XRIxxrl+UTf19RWaWI9idksllLKuoYfKWSwEgkqAFlVetBqFRyuBPP/7069/+9oijSsFxHFHwTpyrIiJBwfsb9mahjtvDgaR6yAACiCWRCsIY3h5MFXlYLyiFB0JHjsL9XWagowxamLiZNXa76NywXsal6uzb7amJMbI9RXxTan2P3xYixdEUaEJByqEis5pCVRUtdpKPMdQUx8ypamZZZnbH2Q0kyFqzr8rO2AwmjNVPYYzxON8/1pPYJV4KgexjtGBgD1+14xEBvYhge0oYq1qCdyJ2f7cJt+ixM4UjPyVlG9gMdHl4rALAVlQDDEdmijPiU2L/fZw56zp5IHNZVLGxkka9Y48fP5s7QGE6WQaEo5RGkRpgNRUFjC3uFFAGyjegSJJsZxygkHClmUSUw/aBye8JzJ8X0aZR5dpTpjIIqIhidP326VSl27zsiOzZxGdi3nY5QRSqqicIhkHCJWaXqLaNJNljTKsc3Pohg/EpcOpGiD0ZuG/1rkLW1nO2mHZ/GrXMF+gavMvzu+FuBRxJZsSmvTM1duUW9nDajkFWLYtumvuWQc+5iAiO22ssilirIiMMqJoO2s42hmOtjr7RI7p2aUIYaM1FqRx3424w2MJildgQn2mgfZHOVg8DAD6u15yTEc/nM/aAan81jMYeqHJkIFhWaF/bbLMiZ8PPn0VhN5UlYcTBQbWllTkix3l5NowZau5ssYRLSr+9veW2nNI4MMZ4zYlxFPbkJxgVXVWjnXMoU46xTZTWWiOTEdf1BPD7+TOyP0OMRX/K+ZqvzwVrBYhKHuc4Xq9XRMy1jky46GJGu3d9nZPc7PcuEW03HtP6q1pO4DTFUUllEMpqNKUTK7KVJx2ZbFcBOHN08+4tGL1ad2gVeLi6OwxiJbdq5bpW5C0c8wb65SaOZNgJngWpsYwotjmUVXU6cNe7WqVoD7itgm3U674dTXxs2Ci3Ag1tEyBZvyjU9Nlz3pXpPl005LLDwX5gHYCH2seNXUnYHplocw8qxtDKtW7iSAMfNbUW8/3uAfsdiigA8h7u+jucsCy6mWiqiGhvxO7QSurpCe+f1vY2I0qrDfgcmVsXd0RfGXUV4lJfpZWSDQUR4QqYVS4dx6NrtYaX8Ik3PF8NxhqI1kTABGLOPHNtranDWFXOyMyisVrd7kFS9xwev+wzZJS6z+jaU2hLoOESAtFsFrI2iMSq8v35fUOdUgOJewhXUEAJHMf58fGRmV51HIe9fb/A5poRCKFuHU5AoKMHBCIMQxqZBm/94MguVta65vMRWSix3ym69q8NBCGRQtVyjBG4ay5bsQ/btIJUbKJtxyKhSedBelm1KjKPMWiRiEC5PPaJiQNeMwIBjoOv6ap7vvILwLa7BWVAdtmqYoTAXcBin1apHb5cq5+xrDlrjEDGUg07gtoFaw9DUnKTYFoeR1KEHNJ6W3EFV7oGB3jACVRVLNLBZPMl6kYMukext0FFwVU1jbbVrA4YXXJZADR64Lf/Y1lLtWou0VYbaYkgo9a8FADWscdCcDNLWxwVvziUsFCNu/YQ+BcntmHR6sRodcrfKOHN4Oi6rS8tMloy93n5d8F+27dFBjPWWswIIzPrmmxztWUwqiqZBTXGRGZV1WwNvIg8Rqh7jl3pZec1wSfjaACGLIumoCvEYmyUzGBj8dF300QyMmliWZrV1bGk4zE+n8Pz44rMMcb89u3Lly+v64N3GqZxU83wUTVm1znx2L46Hubr+7MOEBaIhJkEiTNGBwdRMY6RZ1ydvQh2/kiRReBaEatT+2enorV43lgXRDZ3x4CTDUPr0wWs08NzC15xjqHC6HRX9UIAupXiu2dHUNK8Ks8cOZ5zeSTJheaRD5UEdfxHcM6pEQwyzsbQ1ny211ciZVRQXnhVkhkR5zhWVcSiy4XbGm9JZxy/dICRVDCDoRYBQQ3PEHAIPjPBfXJqLYw0MwJNJUNkcxex4y+fmO1+0GkuIoCCl/vs61YUdas3vg9HY/cYXcgJiHKPfGmGXexpFVheRkr6bJDtLh3MzQPkjaV1t27DufHOZuhsJk0/LUBqpEYCHIGmuc25BuHkcTw2zXoMAEvNvNyuZC1wAppivOdTcwq01joi2Qk+oyt703KHd9rOHjzRcexJB2HeFU67pzW1KDNlf3s+j+PwvA4PlYvLCTWUNUKrmFFoBUTSOCJH5EuK6CEIJUtKMIIxkmOs6wLCoDOq1AoFJhFuU9sNHm6fXoOBPlI33puNn0lym9HMaoIul9u1LkHNWeP8vDb+Bc9lAGU9oYEIIGs5OIPr+eyb1p3iGCO2AycAqLYmFUmpLevMYIHWHvbazsxrvciMOKonL9EDXLefor2BeHTPFKZS264oGphpttFwJwYxwxEGlyQogr80NyYTDQ4nhNYFWLoTt2oysYkNYTvzrKqP13XPIEOaTetqp6rC8vJabSdJ2SBaOdqyu+aW1zVhMCOPA5cGAsdx1VV2BlwL7RfYB81bdRstYQModCO/P0UOM8eqdIuuhGOXKWfJLIMOKih4QoYzU4pPirXtgqKAG9zu9qVNuW0T2e4o1P4vq4unT8cDeK0V0Q52kUJUn4BqgEIjIvao37e6aTtHGDLH9+TbgY8APvnAu/yvWrNUFZl907ramypaY+xBbEcP3kILy8GszfAcJLWuA5HHMeeVGXaGjZI+Ee9Ejw8yk+BaS1gxRifOPg/NGHBEcjxZQ4a2Hv80bV4MB8ORoXYeWPfrx8hS2TMZJlylltOwOPi7n3/+fBYkf/zxx3VdOaiaEdGWTI2QbV9P451ZS4yocguSl2pEptgGh03I7BIsIhXLq6ZVVWaIIfOqxfaMNcNAQaVMpsHz7XktkjGONnQr8zzelpTgSNoMeOmzK2IoAB3jWKpWHjgcRs0FaklgfKzKYdMBw4wtDrSjPer6XuO6rhcL41CJ0OBYyAiUaoyoa4JORhyR6z7Rax3HAZGG5qrxYKfCoGBGjAhoy+INTOsROSLHma6VujtRLQSXjKAsrIGOX5s8akCghNF1yXmeIeBGE5t2IPRfyI17ut4VyyrRltlGc/2NRrP/2PAqtg1nQ1Dr9svZXB9ydfnku7NxxPY/COlmUrQfHGhXBzNJarurNqlHP32GzcjMVGEZliLaSqFvfTU6tbOdqkdVETFy2LYUweqRH294qnqw58y8ZfmWlLdRCsi2T+mB93m+zVmjmMjVbNMIeQmOQN6WD1qtCDAKI7OyM9Rh+3ldj/cvr9eLxUzO6zry7ciYdbUfwHxtWLebmWY6P1fH8bFQUGtzmJn9qMf7QSZfL6F8nBwDr1dElJYnln0cmRNjeYevkWmX1hURchEPsjtXra5VwXaSCxBwjqMQkgZDSz+9v/32dz/33Ktq9tt9jMdLOo8xGkp1FdsLLUhGVq0wovRicSoej+PSQgBesxwRAhkhKRE0E5YXthtfPNekOA7YrzHCJvOhF+GommMcdV1INI9HNgz5Gu2LGZFHhAuGqKIPDtlZFnKlArzgUyE42tutEAzLCmshIiEXCsECSzDyTa5Agb//+dsjT006U0EKJKYmg2Uk2mIPAiuQJZbG8fjy+KnW+vbtG44kg4Zrwm0QGFwIEYgFEXpjUpAWYQY8VPOVHEKcOXjVRIUxDWenge4C0bIuDi6XywEcMViVx/Dw5VUnDyeqoWct1as0q/o7PBDHmWut5QXz9PE43iStVQjuWz8DsZG9EaElyFrb6AwUxQcfNR2wjDwkVJmDwxBKznbbw6BvpLGreIIYG19pN1QWTELEKK21vExpPl8Rcc0XAKm6vSbQapYYA9WWSC5UDqYoe+ej8USdR3C5HvNwpBITmfZUKVQFR4Z8IGx8rLKVZAyuO3u01PZaK8ZY2L5BiYR9Zgq66kJpjDHtZZADzXx3SCiY4WDPLGRpMBuvIUdvYGDlDYDdCTjj529fXXobj+ixZHb5uG0fAILb+g12aBuBHwzY1nOSxPFAyp5ROViSjvTHsuFzCEwiW96whZB77wiTyEwwAscxEpxq4Hez+B/gcuXxdq0lwMzqmIPHsjOSkdooJwLsvRL2o1ug4OLa3ghjHEKPH2E7xU5FEiIP18J+dCjLOb9dcxzvq2zhwABxxFm5crlUlQSuzPTy8pE2wwmupXO8VU0EytfLkZGnIyXSTK6pNhPi4NLERmc8PGBk1GtgLgwdo+BBo065WOcYLmHVkaxaD45AHAd+fn0cOYZZWivGhLppC0cClZRLVxlRwctrxAgEaWlFCAFh9rw480B3XQcwCRxRJTIz1rpsH8dRhoVn+BhHQPXxSo6IseRVPB1L9SUOQt/mFRFvx1jtQQI88rQ0X2uMcWasulCV+VhLj/ME81qvJTAfw7WqntfLcIycXpaO4+hjeNU6Insg/Bhvc85Ir7kyBkGv1tanXazFkRznqhqM5FhLjBBgGjnosn1gBN35Y2AswYLgPA8PvdY6FY/znPPVZK6a1/s4JWmFiXmqrhrM8KElkAtlF8PDGQ4RL0rhXNY9GLPrDEagqr5+QJytHnRBc2VGeMKRDhNrwYxh7VEEqr0KM5glLgjAqgvA3olkk1FVkcNiuTbBPTMPPZ/P7npby3FkVtXz+cSRq22eWozVjqdS4JjXsrk8f/Xjj2vpemmuizjDd7ep7ncUwKIzgUyoPbPCZlnpsLirM4u1GMXYy16O4xB1022ObqYWLTjLvtk3hzkyKEyYEQWVdJqPXjsDzDmBoMMbzNxTnq4k8Ata/IKaoT0yOZJGQgarVkNBm/UWkcZq39PekgL6utZaRSxGWz9GRMSI3LZEkipagtn4MACXJSCtb7///XDGYRHXdZ395smMXLdxa26YSglKGse4rmscx6zlSMxX+IgDm7kNyKqqWh3zvlfHc1ZLbp7zmpo1temLaqaNNbchUXp/40+tC/WFIzN71CMYwc3qUU9WMcbgSMAJkYOqWxj271Ao4rPT/fwKZF0oZyyv57yUKK2XCvSodO5RaO9pkR1RiYzAe4YCCp4LNn4+4lxvUeP58fWIx8wndV35w9uqef9ytzFeNLZIKpuMWVVqj2EC2ta7tlXVZnuNBm9D/EIy3XIjSV5ZKHvZi/3d8ZF5Mn/3eo2IvZOKELz2rq8kkfr+TFgm6cyiFoBakZlNZqnV7dF3TscngnH/idFOgxQ86wIiMgTh+cojaXgWgxoBtUBmz2tvWkYjaHzN+TgOky6X1BTZHtUm4vl8fn5xbRhQ1zzGgPF6bRS8dt+zaXbbbjYiIDLk75fu8+ezTdk+SUP3yYGD1fZUATszQZe0ULQR6Tb6LjV5d4ywq/0FM8OY8tLEcaS5x8YxCGDW1Vj9kAaO5FE1ASc4GJZeqECOSENLTgSijZeP1vDtIx1xrZXFJ5zjrBKhZek1j+OgZXsGoi+UURm2D0cy0s0JAG9x/Cd/874stNX2cWOMq6616pPHhMbjX+UDcsU4l+SaQGT0haaaXXQkwAKX6tDuzTtULxiEj9SzRkQMlluv19+7pqoZWLrr6Mhca91g9T3tsidUATdnAYYYnX0DdioQ3laCl6tUufDIoW2sTjJQMmzQERO1ahIZI0FIy0QGkvl6vQCd5ym4bAddjvOIOTGL0JQrGHAGFmIIKYFx0YqwleVlEQxjbJZvVKDCoTwRr6ixBFVFnsIMP45zrbX2QfMYQaAVsQaiGlMSnGILpZsDRRo+IpHZCGp33GutCR05rtfL98IcCFMTDjLbMD4iHFx7+JfREqoYxzHmnGvpuq6+XINDUqiadtxGX+3GFhUkq4xS/7DlZYYa+9vm9EdSrON8//j4kNY9UUNv5mLs7RsMQkY078xVK8HtVoc936ARIwDUXJnBSNtHhGyVUHsbw140BKCHx+WIDZuUi9vPLByMT8Ty/pPgqwXCQZRredKVfHe0THhi01KDSWJyRgQQ6CtlN9PaU8w8xtFyH9uX1nQrj93t7yC2dIWtGsTgmJyf6MqCQB3MU+2H3hofrEDN1fcYuM2XEXOW3FK5C9sTByMCjjkcroEIO5Zpp/0e6VJmbgigypGAtalMCGuWMAujt3ewfVNFcLsXdpr7SwYdn5nYqmOiatVacdXjOL69vsVrPc7zsiVeWEeLTfufzDn0ppyDIlD1lbTW+zd/jFj+XeQBrI9HvevxuNrBt79WrVblbmplv5+mT6kX1PiWxfdooG6qSWmy7/qezcIFBjdt/zgos3QCy9Kqj1UXVyKlahS91RSPx+P1euWtPPw8UfIimJHtm3YeD2yNznbMl90YwOdR/KxgbCvWl8eX66XpQsYIaC1EQMqN+hfNtVzWiO8aFZJkSoXqMSaWZLqsFkI3R2YBNedxHJ+f4uPjoyW6BFYV7TgG7i1yvPHSct311oYif3EMvrPub5FtMyf3SLJUrmin9F53Y6pQPbtbhLVftdoi2GXXkS0eW1X19vboKrxhz7aSQZMzUAE+E4Mqq7J7dkQL+/LwqgTycWiuWgskH0dEaAqkXAbJZHscHfn8+u2n9y+ras35/ngDrOjtS62+a1OIpgwrDN/LEpIhF5kqNJuizb27JejWpqpaEbSn8s00rvng0NKrKh5HG701DajHE7NWO6YKnmstOpMWm/VNm1qBOMcx3yJyLF8lbUJiD7OTx7FX03qViF7X1mzq9rtWoKdGrS2+Z/swstWHTONe/4AMSuOIgxjAVXILXjZ/TWiWckbvXiI49uCSY4xpMqK6SMhULWkmuJ6vE/jrf+2v/d//9X/dx+FgXZNWxsPsWKkOvzQW+HCIMsNjT+IThuOL4EQ4JmQgrCsp+Dkv7GhPS6NHleoXBZAhAhnSxM1MUWkwCJheU2cOkm3odZ6D9mteeQwV2n8jMyUyEgh43kXS7U4Muvek1tVTkK8fr/fHQXKtaw2hbQjbZR3w9kOI3r/hyBbDuLcAuMRxhw+S21Ghr3dExN2eqqqryBRgLwqJUUGiMjzijBEEF1idtzCh+Hzdm+YjqR/X99BzOxH2Z++5nwBxZ9ZkGLX0PdjVBoL2JY4mwyQK3eLCMiEmfctHBSXSOxlvytyu8WEgHufbnC/NlW/n+9v79e0jRmYevfO1R+l77pJRczaNkMxrrSTLyki8lhivFkW3u6QcC1fViKgd4/acaa2FGIPtCgnaS22OsB6ikkhMq1zZdMrbeaq/Gmw+kwFpP4zvpk4tjhzHSRLcZKloHij+Uva12917YwSz6lVrQqQvV7WzC1s6D2m2IUK/wqIGfS76CGUci2D8/k1YNg7y9TOP8yOu1DjGa/556EF5x6btiLXna0nA39fvuKRtK+HtjnFfKHYY6URR28EuyAg+r2cTQEfkGSFk740abK1A2V5SkkkHNJSLjk+WKrBNWQE3Sd5LJV/8/NNteqtAfa9M+UzAyy0YW2K/VavkyCJzOcHVVLUWpGWT0m43Je0tBZIiA1L1F32LsFtpEmNsJV5L8rYYL7pxQ6PIat+J0J6H7lSPcBmwj6SqlXvfT0K7UYLAL+TjRH4ygaO3kHrBGBzMbDM6efv4WjYqhOzddrHlZ2RKMyM7Fgl9m7GJF8D7lK4icXJEhDVtBfhkDSAsaJXmQBbiuhaHudnpEI0QSY5kxBm5mpal0XtFL5WIIQIoouBh2Vzc0osI2pW9X8SUcagDhD5VbOSWETVccYevzYbrEckRD4NLFQGwV6Ml3csMvDGM5nu2FvHztSU2zR52L9JugB7dn/QWgCXp/XwInGv1/el3F47aJsy0i3shCEsiHWPcRVUMS8FqQM71Pk6Ss65flKD4VDfIXlVtVxFNKIRFLJTkEaG9v3LzVCLSGdP1r/xf/9XHl/e11mrfMjDKFa6Wq7cSngD0DYXlUZFrk2yupJrezh5oIqBTY1FhXuzeEbOu1+s1+t1WVZ/6Nm0pzSOPpdrjHdnhMQYycnmq2MABw1WryqSuGmOMcUrfVy9IGtAyYEdmE3Eheqm1qlKvPu1tzLZyrRVgbm/X5oJA2NybVgj013mX7wrvnm+fBaS9ZPUMv+wkj/Go3qfd4zk3bROm2tqh3VOXlbek/Rgj6DlnD77Dm8dUVdhTyX3zN9u9E0wHYzUfHXV7ZB0Z43al+fxhkhOLxIEkXJCosEdtu7vPoLrFMnCAbM+oziDeQZ/n8Re/+fXbMX54vP3+4+N6vo6kQJmrqh2SSxpjvNaEmkvqtRZHriUe6aqIeCYe8ClmLwcNXvJ1Oxp+Hu45J5ERQ6XtBdWcu+YdLsyaHTTbWD9y7xQazUvq5UBp21HK2Ov/1DkLEpZzRG7G3CcZFCQisj0BIz9Hn5I6AV9rlXW5rlofH7P5hz/P1/i0rAPFFZaI05ZZ3TshOUBwLH+BLn/4W6HWeNShhw5f9Rfvx2PVFhXQu39tyLVa0rq//n2fAbQNU2ZgC4sr77UffUv6E/Q7sP3mKHrBk6YqgHAkKOrt7e3b8+ucs5dffXwogBXtgP09/IRJBYhqk/jW1u14u+k2mSmri1ttgTLABlzxfF4AOOK6nj1iuprx3os6DNtn5JF5ycBq8PmToPe9tYqGyei7PjhGzrotBlv5Wr1gOZdWIrdhwuvqqO62m23SQlfiN6y6pKZEfU+07bk/CND1eX3uOdHBqko6gmvZ4nGMEGVx70mSJbAIJGMKAUccN88ggkNSRmQEsRkKu3YpzdidrAMKGJSQGdAeZrecMDOJEDTnPHNI95KfqohYa7H4GEdVva6LGWvN9fyIM0MRW8y5Rc/3rPkOA7YVVvfGfwkl0q3Ol/T+9sPz+dxmF92rV5GcNOAjh+cabLuDcgbXaoXlJzQIm2YabDqwbZcZl1CS5xpjnCPKrLVXDvB25OjX/awAsu3KO8t6L3hoepek5bIqMnHvLQGdSRBuQKRUUQDW0jhuNZC/D6oknbeGtrAlZLA0ZXLVysyC55wIZqRKakPiH96mFOQJFrC0zhhpDWnRRYtI6yF7nFNlYt503cOkKK8xMSMGiOATdZozwhJKzceUPTqjdKneJ7hNVTRXtSRAu/DpJxKq8+0LydfrldIxhijZweh93SSNstjl77qFN59foQGOrHUlBoKdgz+uGVjH8Z66RdZbAAFg76A1EHLQA/cOWqKMXmUo79/epy1zSFqrSfnjGHvR3uFY6dJeUFXBaDH1XAXO6KkpK/AwHpFimYpoblsrqRAR5e+wXldtfcTbdmH7NpDZpnu9YA6R4Npvr5cRlQaHwCog2u0s5JDVHrqu7urZupTeHQSGwvbn4I/A8/n8K3/lj1H6+tvf/fjTF9vzORHRYGMXTNLaq5RRe893hLbtHMum9BYH7VmNsDtWABjCx+pZJmOTGE9/SvVvBwYEgwO2vK7HCMNrDvBAHAiMeEpoAB97NRzAZI5xPtfsrtHpZvOPTAIvqfV2fRQMWFiflmS3XQ4D0eVGyWvhmoOYcwUT92m5a5l2/FySVq738zEHCjyXH4Ua8W3U8bw04/d8fPwJv3y9Xt9ef/RXf/q49PzG4Hdxs71PYbdmuLXvtzXBd/ueHiM0GQGAC5k3fNcjFlcZtSrbWyToupkQQURoraqC49N9ogvSJB3xixEwHN3PNSDu6n1m+0sqtw7YAsdnLfiZm3cF2U4C4DkOVcGCeXBcqog6TMoXKiNhf4qw2W7+vyhMu9fviNgBV6sCMec8jseqixljjI+PjwEwPxNkbELlLnbbj9pdfX3mVWm33xFt3+6IqDmxIhGyPmecrbtmWHN1SbSPgT2XIvdAHjbp0RvaHVUzzzPjWPMVwY6Qy8qOYL9oItVCC8axF5+v1bcW0UiG/d2e6FUrM9/G+Zy3E/5dVTQ40U6HY4yP60liPM75uqIVyUERQ+lNEcAQw6je2CYSSmyfPnq3xeWQFI4cGy9oHq10z6f6eQYpV01YYxwoXwLgk4xgN2qVNDRskbUp9bjXeWXQNJhnJhnQFOQcZ3mV5jkeLjnjurFJtEzIbeJxYyRx+3MBuq0j7+yrQGqA9lkUeGVMazDej/Pp2W4TnX0+QR0co9x0iTawNUmOGAxXL7PaMEazlGhfVRgIOTFG5ISYI4G9JRcYbb3XdtN970sst1hGkpIxiLbHxAjVUbZ0iB99/wE0XFr1kjAYWjXGKDCPt6oJ+YjRgrAv7+9VNevqqn3OF7MH7SHJ4uPtjBg///wtrEbYSV6v1XGon3U/jhax5kgqV4W05DnysEuQ7UeOuleD1b38NZAr3cSQ5epPjj1ZCstg0WT2ztMqu14vAK05sf1atVcp9Y5q7zXAAtHcSe7osUEZ6amrXkCbslYDOtgiV9wOfd+TEG4rH5pYcBoUkrFuPUyzxJon7/ZUqOrd4/cWv0ES1mRrhknmYOzgVZDEthbcydkiGvPNI+ac18czAl++fLmez2KP7owe1dsu4etXhiEhs92a8Kl/jUHmyxVT3chSLqwFY6//LpLX8kGOjLU9M6BaNyBPQF4F6s1t3Mo4QsblGmPUZjZvaDMzADMzuivciNya5GCstmTPOBodUjC6sSuz45rXvaJq5x67iKma31e1rIgRI2mVv39l7mpOwLWeOd6kRCgdhgN/4wf88PiTH2f97f+nf2n+yZ/8W/++v/Lnf+s3R/6KfFZpVU01AF3b48h2L2jv0ru9x0kDr9erje/L2341OJC7ZCn7cZ41J4O1VtnuRV9WyC0oLNSrKsjn60Uy41C1EIWyKTFi/oIFLbR+BwfgVbXlpiCZkb1+ISVVj3paUfb5AAtSHA9JdL2/nd9ezzlrcCS4Wo4Y2ZoOrVevR8OmBXyqLXe5Uza6fbxrapfMeJznqmrTlGs+z/NsspXvvzjOXgRpZpT2iP1O7VYrl7LhNJAMuYBzxHPRktlcsO0v7fb3rmL051Pf8eVll5ChslDUyBgMLZcUgeYJIxLgt9czIo5jT1sABFmmLIWNfDAjbwHP7WpgUU0ics7rYkT1Q7qe416CfqtC7qoFqPCs9RjHsiS8v7/P1wtEc2i28bABRApbZGwSh1VtIm0biNrEK9gUMTKPZPtJtRdQT4IzDklDeHt7X9dTwLKudWUe5Ci/3Hs1bAmq2ptAzrNKYdJSKTPawV6Dl1ZIFgfHeZ5LvJZ8E7K80WkCWGsdSUt7l8PS96raQWaTaZY1GBFHp9G7YMKZo1AIMIiJsoluVm5AKKNL3rxB6R4xAIgDjtjmQBH9NMZ5Fp1xmjL5sl+WvOw6elUNzB6ZFBddI8bsjaEsUmQyWhZgVQWHYrHaEmQeSTGjGaBLdK0aSb+/n0eev/nN7xA54jjH47dfn295BHGttdb66f2LtKoF0jv5FHrFn1xVXCk9Sb49Hmtdr9ery0ySHrkFk75rVWnRh1tIEMphOZORh2XRskR6BGxpn0zUzhOwBiPZ4KTMA55EoLccWQ2rnnk0MbsBjyajlpTAob0iicBp0FTSDATTyt64lwFyckVtz8s2MpSKGdLKG3NTgdvNJ0iPFpxg2T6RA0GiiGzdW0Tch2M1oP2qiNiUCd/K+xx9bgJRS6YiIjPGOD/Ws7eSqO0D0ZxYBOO1ZgaOPP78z//8HOPt7e3rfA2zXUIj4hzHx89ff3i8XWs29ibNFu31wDUiNJcH0FC/rDkpqZxt7EVDvq5r2iqc51leVcqmDjQM5jVie1BXMsdRNX1d5zgeBbUXT/Mb4YKDUJVQORJ7ZsNxDgevWme+fRq8ZGZkciRit93qLR+xG7zGcvoCt6mWYNKz6gy6fcnJ3b4wSHwE0i7pSS2uyfrV76+/49fXn/xf/uf/nv/hP/r1P/Z3ff32m//QX/u7//f/4D/Eb//Wcf7R12/fltUbLXcPjG3VEiSR6G1dd89pIDbaoczEtujpszDMOs685jPbnHscx/KULhaII8cjM+VapYR47w/HXlSWY0zPIx76hRc0u9AMKmOtS0vjtnWx7R7U2eS+odj1yJ38eG8X7lF3o6bCnPPtiAX0wt3TFFjpWGEDEaT5i227S0UL8nEc6Lk1cJ7n82MpeD7Or1+/WjchiKy53t/fI9CrUkV05boVVBE9P5bVMZbhjKbRcu+A6apxpMX97G9j8ExUrTPPqmUbjUNSPCh7tJUeUV6skCAGuJLD3vK2ti8GVWC7Y666e0ikwhZWmVrs3SJgyUSMwVdNIiLzeHt8na8AjuCiaGa2GsMka66IWJrTGGN4qRehfMzrhzhebPIpVhJALqbdSv0il50j2yKprMLKYqIN3raZ3pw2it1Arw0sNS5o+0scP719+VvPb0tC9DaU9BLPtu6CiRu4pogDMVUZZuSSZS+0vd3Q0hFKDpteXmvawr2FzHd1siOhavcD28On3TTpQqOMMlr1n8xlxbIZC9Ja78dIe9b8Wa/gqLUAcIwIwNHo92yED/Le5c4wQc35kjDG6AlsF8dedYxsrHq2Rp9MxhHjZWMpwExWxOrxmRyibzlMo1+bJQil4kr0oL6Shzi3zLtI5DFIDnh8m4rreRzH0gz5Y74w8gmFI2L8+OPb12/f0Jsypx2E6KADFTQqSazJjCNdegFdpDcgT1Rv2ezCEFUgWIViZfaetUBaMDV6j/rjOG1f17OhhP7aDhpMIbSt+yoitT/aCCiDq1avMaAx18cPP375+PY6jsPmwZAXySUrQoCqSCp6tSBSEzwORrVZnUj6LQGfzVvr/R9AhXgw5+fKgdxL3DvCfv35lYzMRPiV9QqleRBGG7DufS+Px+P3X2XiDSG0pl5tSLLWivZxjt56rAWYCvOYVx6R5IgE8FxzqeD2HXr9xON1nOuJxzvf+fb1+fXx+NWL+rH4sX6ux3E9E5wX3uYwrzHYim5h8zajrnp/O17zWtc1QKqq5pQdeV4v2ddCRJzxGDCG2pbrPN/W6wLq/e0HSd9WmVkSljKAWgEi8+N5dRIVJbQoiyNHdueUQ3MN84xxHMfH81mWiPr4+hgHbJDjcQYcVmZO3vr63fS5d4n2IK0nr00YgeqRo66ZYzBte/str+t4fx+TjrWkY8T4Nt//6K/+7f/cP/n3/E/+Z3/x7f/7b//n/8v4R/5H/69/7p/4D/z3/rGfYv75i3o93c0rw1bziW1nHuCimWj9ntqYNRKOQ1aER0SS5Yi0y1XryMPln3/9+zwGSwdjxPjgdZhLkcfI8mtOnsMSu0ST0MQuxJEHHAPhWfELXn2PP6J57OUvWT/+9NOf/fo3+XhrK+BQ2h5YOc6pIl1aaEs0BwJVFTEYvNwGp0hhjd1nP8BYOs8zz+Pn56vLfUSDc80fBIDRE9AIV5BcKidzhN4g+Xp+dOe5GghgHGO85jXGQJAOrzWQjLGyXEq6bSkPdTGar9mqFe7Z88jntZwj1lpY5w9v3749HzwJVK18DD9HmYy3QkEaHO3KbF0rM8khUCnCyVU6AcJtkpeIwUjBU45pgBkoDVJLPR259tp7HIzhNkVycT1f1zgOr+tgel4/KKpqxTCF7RfN8zhea/7Jn/zR7373O2fmlKd++vEPzhx/9ps/yyNfnoWD6d7rd+bIx7GmSAw5pcOOyPJe0DiO4XQvlmiv37e3t9frlTky46PmI0bZIf90vv/6+fu385wv/dmv/6JF5l1DSjKNxWC8nY/lpXmNcRTgSy9dkVx22E2jIR2BA2rhiuwivn38PMYIHJr2G7FeB/B2Hh8fMx3JKIVzrlL4OMfLtVDH8eYPIrmhJBxh+rU+zjGySOM8377i40rMxeA4hL4hIUNcwaZrhiAmhcHwqn07Ri7zcMB6TSc02OxQFZrTrOTBMOc8BqRCylMRIaLZhfAK5irF0LLIGExKOCitITFOldgTVUYZiwY8ag/tkxnjGA5SKtw06Awwo9ntpQZg7zGnnZyqkaeBWmW2HxjaHGMXONs/3+SI5NI23/Gnpecu5CHJKtyla3cJYwxEu6BuqUxmMvP589c8kMcu2zOSSK/FIwh9DlnuUQ5brdFM97WujGjlQyTsorYPcHjvWojRW4n6UcjqWdTx8fNHnkdnX5KZ5zniui7cvUjdpJL+95EsqCyKyQw0J4mfpMzGOnr9IjOuqjyPVtvEyKUa57GuuRG3NpUK9nIHR1sQ+LoutVaXJEaSWW/Aq6bwOPUKv/3ueP9T1F/8GB8fH+9/MM6fv4n5a59v9fxN1a/GsUc/n++qLfrmfPWrXmtCtT3cq76lIDYJ+xV10SFzrfeffpxz5qAUH8+vJMeIKsVIGgM+RADCUADMqkWbDaz3N6CuJWV50ssewtQ6J8PQOb4+Px7jGI+zzVoHw5lxjM/ijDfhwnd983q91pzRg3/EGElyzhkjATQKd5xva60aDsVbvD2fzzz45/zd3/X+712av/lH/vH/z3/h7//T+dsfPl7+K3+V7z+M8Rdf+TXXaFrNumaFkAwBs2IEIgpYYFto9o7WCefIEVhrTU12IUZGjGtWJMbZyIrInGsxelmFpYtqq4+Z6doDjtH3sbdlswdWo3ex3aibgNKVzIwjQtDX5wTTdu+sNTU4FrHWDCOTIw/m4zVnwQw84nDh6elL75mMeEZxDJSwCkmPfNp8vt7GY87Zu8Fv3fjN9wnKCMvezuppk8tSkBGJT5FMP5Ax5pzXWt1jucUkVUfvpyE5THIRZaFWbC4P7i0xfYZdoAQXRmRXEmZqNX7GXx74BjDemCX3nM9tNeQYRLGJwJn9yYgFK+rtfHu9Xmee4moRQRufMYLcRqpqFk52TTKG4oWYEWfZdI047etmyI8x5pznEa+PD0hzVWZm5LePn597NhSz2mI2QA+OXhAqITOVhJv1VoYzKYVrChER13V1DGx1taSpOiM/aj6YAH6/XmcclxyxHehtsIVq4Ag40qsajd8UKnwapaIVtM0ZoExy9bD8muM8RkKIH7+8V9W3b88vL7zAV+otRrBe6HolI47Box9mJIxYa8UG2ZmZr5Jdx3GuueaIVUWXUYfyjXsD0IIy9oGADHiAB/NJLVWVPx2bC2By7TT16eDZHlnIqswUSpK0ViUDEjJCdjKOyNUFJdSIRcaAsTwt6UNf3s44xnOVUW1IWHMGx8hctapZ37EHiwPBrHYEYoKIuAgUHsd5zSdMlHopRG+3YBz9zjMim20YjhjVylnZBKO3bKmkiGjTVG3Jmn1rDT7/fNbuQCwvVUiqrR2CJS6/fXlvU1mSRmlWL+MQ15IGuFoQJFLDQGECTReOx+NRVb1/idFWCWh9cG/oyczVD/x+L5+8wbe3s6o4xudCva8fczDaqig4bB977wJUwECPBQMYchKOFDFgBmfPBaTVa9uRJDWX7OM4mjVP5xnHBUVbhQQAh0CggrrqiDTCxMiGGFZVHcH5sr+sM6TL11wf49ePpd+MP/7Vv/uv/jp+9+KPx5//mwPnk/7V23HV9Tn7u8ud3hdYzLFNqW9+ltpFnK1J2fum2iiiHRUyM45AW3lyr98VfdGzRfwAHWGPZO+vaHtXuy/0wHrZbR1quVY5aRs1K8nLnq/ncRxHbsRyruuGT1t0QtuQWmi4pbfWGCeA1+uFxDgCoEtHfifSv9eoN/x+6gdHFI6/+bt/9u/+D/9n/97/zB/8y//K3/5fOr/Or//Sf/8f/U/9F/+rf+O3f3E+5SOjXBFPqJJpPjhW4Bpl+oBTMUQiiihipUMVAMRsi4jYYinG0FroxZLe5KD1mq08a9cUdjRfolDx+tVPf/h2nn/2t/5ir4U54vl8vt7HUb+gAgNzYMDvCl4uegbmmnGca13naFk7W6Dpe95GmXu3cdpaYVMPjuWlSBeOyDICHI9HVc1r5XnImHM624Gg+9FybHVz8thMtGjMjcCQ0BtWwOYS7zlrZq6lrenz91ECgpd1xKjPCRSixceI5W3hDnrXtwTQqv6rkvlyJRGka5GjjRVj7/0R0HaO0a3PZjUHEWwksXnV29eDVNqMC1gRhNYqRkbEqyofR9St/rJ1V84G3vg2WbEil56BJE9jhgPxubemi/L2xmcOGwpoVTOnyxzjrCqUmMDNM21svlSZAdCWrOwZwj2RaJC507wkMO1tzT2rKHvEgRhjDEH2ggKsrn7SMeKNx4SRAXsoWn61rLP54sDOKN3JqHnjbiBKJdvP5zfaZ47fzY8vb48h/va3v888BsOuRWW7UJbQi+vpa80cPI6sWXJkcq3e+xJVM+W4moRYtVWI3z0WWibUsjVZosYYHWOjzfBVI4/l1Vpeb2onFb1TCr1Mdoxx5GOtFeLyZjJWVY9IXGoWghC9If6pheC/6w//5Nu3b8/rdXBkHsdxzFpxHHC8ajXlfifgRJVGk3Q2DNhupm16Fb1dS3t/1n1QGKGquFFQseymrjHu7SJluRlmIiM/J0NdotoLXY50GXDvim7Rgm5ZZ5LMs4FZ2+M4fS+JZPRaq/z28UF6L4MGEaxybrOhZiTWeRwke+lhi28AiCITGzRLRLgWyRHWnojRpgRR13WFfzEwA463h+ZFsjQleRpAkDk4tZiREW09sBtzEp90rd5GJ3TKW+HBhAvWXPXjjz89r1dbLtz+xzCQDkSAkb31bGs5yXsZSYkfP/w4ron1Nb788IHxdh3zT97/E7/+3V//Z/7pX/3N//Pv/v1/5z//9/y9/3bxj2LRfLk+xSHehh4KbK+u/sa1u/zWP3CFK3qZg2k4WZkPt7klrGb92fv9CED6bk+AXny0/R964UFJweBgsBhC2UYVwuEoQe1/Q1dV1URGZq5asKvcreR+qvejHmO8Xq8559vb27Xqui5Es7f8WjNBOjQXRz4ej1lrKsbz43FW5fjZ649W/uaZ/9t/4O//+/7B/9rzn//f5T/+T/70D/7X/4W/9nfy43ffvGLFk4qyqxKYKo7U1Li9SSpUfVEEGOk7XBJjHA2VS+4VC/2ndyGPMb483p6MuV6AVbOklqUSyHGW+Zvf/GZ0wXc8ruv57VkkvyyuXzCwAAwpybnHC01s4hjUbDQrBUg4QESIEHCtayyncTBeVu8LGmMM5GWV62Q8jvHt42PlQOLkoVK3X7kMOHlT25qNu51F2vaf1QTmwIJQdRwHgKpluvuzqurKaYzR/CkJcRvGCF7rGmOEkRGjBQ5RrmZfO5NVFcgdXCJCVgC96AKgrCGD7WKx989TRD5Ze1hmB7jZh6jAsUd5MiBktE4mim8xUKC2eY7loGdzbNs1//Z1IomMozQz1m0qcCV7u856XTsKJrRK6G54rVqsSXL0fsMKmdEwn4L3ZoLoLY8Gfe90JKu9vTYLkCQ/1eq7QZcv+lQUGUeydLHyct7GBm7HXrNXw9Z8VY9PWwHMnqC73BeaRxdGDKj1eNVTEtwrg6pMV5Dnj18+vn688cx4dOnHiCWrKqpn1ZTKKEegCnEI5SqO3EabTc5v/iH4KnvY0oE4Ec02b+amYbF36gRu/lev5gWgKn3a2vd2eHqb3DaNWdtdX7OJ9Il7R9i212WgN6hhVOtyiapa67quK49jlHTVH/7xn/z88e33P3/T5vFs9xveftwjQSUPA/YLBVQqCnhdV5sFjv01OoyQyyuIBKSl7swDE+t2paTtNWt7AMX3aM7N32pOlbctAr5rtppnQVj22n8LBEZ3M7WtGW13fTlGvr+/zzmb9qKtoKr+Hcc4fvzxx69fv85rnucJwKi1gE1/H/usxaZEb18bt76813H0ss71008/vV4vkgX3Bulv357bmYU8jsMlsv0WYMRQNBeuQqSiN8W2vV8vqjbZLHbNRLbl7IhI5gMxyyhVW7Y1QHBP+DrniFCpgQIA7cO5kvn6yEq8fampH6z5x3/yn/tf/S9//F/8U3jUjz/+4eNf+Mf+gX/xX/wf/zf/239rPn9Ad9Tfl0x4X1qSLMnMGNEGbYBJVLUxAyKyFy+qKIrj/H6ULTKC2VJp3ulxPymwPTIBNMUDIArFBh+2dKekaFP9XjJXNZ/zODKPXGt92GE8WxPFbQAU4OKu45bqPM+19PHxMhERvW7lPI6M8GqlRPreQPfit3rgy+v5yuOs+dQ7f/6z3/zqr/yr/5V/+K/9W//Pv/jTL+tv/PXrz/4m4nxPvxzS2quc7gFEV7C9P0I3z7MXxn+qK2xImrVnMxGhuXdaeZXDc9XPc605Y2wy4TlGRMyrWjti8TwfZK/5FnKQjghXmzV+/+MWEbC17GfpVSWvVmBrVjFHo4y8qWFjjPZdaH/OQ0jwmrNxtwAvTzzXcWbvn/+Dn37q3/H1erb54dqwVri9vYmxliCH2aAsspfPnrmtK28lXq8C8IGsKt8OrARLVVXvMQ4DRMs6BE2s4vZcbIOXiNireexV88yBMKBIzjUjONhSwL5EG6/e9z16j2pXyUjQcJpvyIIXrYCB6PRcnk0zh8aRSyo7Rn703snGuIntbWKnIV5BZBJIumfeXdw3/1rnebYiy2uttVToj0PS2wOEc87z9ujbWfLuRk7uzQetlogcVdW7/BqCfnv7Ium6ro7+v/rxy6v0+v3XHCMz5zXPL2+Y9ezNQ06XQjFAypyadnHvZQqZgV72o+rVyMjbi6iiN4iv4xhaa4ws0OZ5HnVNJN6+rpHHC2ZhMJ++II04+mm0swr24u2IitdzRoQCmvOIZLRvwbjYveOIUpiTOeGwWulvtvKtG3Mm+Hw9m3bjOREcvYbIXF2qhKitVR+gewRwHI3DYK8dPJ71IrnJv2ohBySXjBGy0zjG8ee//c15vqVcg2v5b/zFn62piDgj65qZqQgG5VXlMVps2m+2tBplbSjfiohme+ImeYYxxtibJLvQ1xK+r3urpnV/qmPFMNp/IzPzdk4mMsdnfP4OREtqORINOKqqpDaS/eM//uPuYrvLKevj+Xy9XoEk866aG3nZ23R+//vfkzzOlJe8/uAP/iD2vooj4tM1c7NnG9l2F37NuwMyYPZK5tbSoXdgkdYqGsk4cmyGJGTXyAwwNoDd6uDCWrw3B+xU005ZpIipemmtgB7jLz5+bivKaDHfyMwccb++1ub6YlfribS5lmp9++Llx3r6+eOlv/nH7//J/+M/+9f+N//U3/bv/8P3X/3J3/pn/on/9T/03/q3EeOH8w1/8G1F5zf9pdAdJD8NDFzaOMRWnGJYj8JZTYRilL8Ur2t52//iPM8xztUuG90HJF/hZ3jShUKtXeDcU/P+0qtqlCGVVXS7Q1MO7T21/X56jjBVC+yTttYOWpKWISFGtuHO54sznIOvj8ti/3i7oj6fz3XNPKhX+vGjn5qOCwfwPH739V/4j/4d/+//zn/3/a//bT/+ir+L1x+MH9fSWiva2uZ8LOnkMZbPMZScXt18hDzKTR9RoMzy/pZ9j6tLs3sU3ZQiAJZGnvPyKsLjMb4c4z1ipHMtAVHl61oFOzjGPsA6tiPO5z8KmjzNY7mqrEput8Ky26kGiOpvvzTA06RRxIxINU5zX/BWx8IXPV8XS2/nyWN8na+rrhMY8GEOcDhTGGYaQ5jsZaphgkwaIQ9JQN0ZOMbYpgXBbIIQeR8QbO+Ukbp1TLt0X4UlVKOuklTLMmXL3pFatdbiBufsG9fZCewOOABSsLSs1T2TioUBqqa89im93TYoZ/kMxqyYGoVDGMhHDFHdWMdG3zgaoZaLoM1S7aURYERtEaqu+fx0YnheV+/zjlsuQebd7G5GqhDCLXYCgjzGGJlst5BfzBc3pQbqPqE/78frxTnzcT7O8zEOH4lrrbvxYLdNGcytHWXEGL3pNRyc3he2F/C1WJG3JyuCmTTlYMGrbGDO2Sn2w+s4T9SKhLzOHGeOoHvRFGPnEYZppDNiOLYVz+bu0FiV5VzN7+bc4CcuqP/p2High/+Y1/X+/n6eZ/UKlgjKR/RQbMe69jJs+dB+7MiOcp9pbkPA31eP+/5G7iVviIhxPN5acvDD+5cf3r+o8Pb2VlWrKh8nR34mx/5dQ0QKAEQ1Hxfh0dIqMrdpHzPzU7A227Y3Qt3TbhtV2N7eAmQwJS3XIwJwf1zuNmWbcnQlgNvHThLvSfR94vaeaIf+7G/+rXEeb29vVdN2M7PaLLeDVwMjyV2Hjsac7wd6XdfHxwcAlugmazXrDL3xQVQvpUN79EUDaU5m+2viLtuvOc/znKx+z9d1+VbZZzIth9Ui0zaNMQo+IuZczDS3rrHPUxYyo8B1TfSm5HystRjESGagEbAOlxJyW4d2ICeyoZVxPNbP9Fmc8Zv0X71++I//y//8v/n4+Beff/qf/m/8w/PX11/9D/5H/g9/3/j1qONSvXH1ckqAyFYzxVbTWIjelMsNUPSEJ00uUb0yMnohDWGNMbzK5UhMrblWHgNEymkc7foUMlFwcIDEXTDxUzvvrZ8xSAjgso9IBb+8v9e6qiYZy2UOch6MJoiZvFN6Qc6ItcSM6/my63x7jMej9zzM1wuOHGPNeRzHT19+eD6fq8w4dEH55a3w4Re//LSe81e/+/2/9q/9q/xXvv3B8/jV8bd9vf7iEpCvhcxSXpVG+xkewFlYpskiets82wGt7C3UB4CR242rynFEh11RNt21Q68Cohmo+XHRtKJxJfWRT2ltE2oGjB8QM+5yAwBwiA7O3q5dNYA4RohThfZz2sQZtjJw83cIGYwRyKJMt0nQCoj1PrLF8sG4ruv5mykpzwfJue0x2husYa1PVV5XCUhmUFAF4/W63t6+jLZBzTzPExzP51NappOjuZDADmkXiXa6qzXu6VJXn59LKhsD7FMZ5NIsmzEkHBzFKstti4m7j2xcAQi5nSr3wKispM3F1c0RopE4GLHw/yPrz2Jty7fzPuwbzX/Otfc+p05V3f5eseclRVKUQllUQ0qiECkRJVmCBMGAA6cxAgN5SfIW+CEIEuQlQGIjCATYkBHACiIEMRTLMg2JkUlTtBpLIqmGpMT2iu3tb3Wn2XutOf9jjC8PY6596iobB4VC1Tn77LXWnP85mu/7fSKL27CoGUV1I7kz1YfmFECP8GdF0/XJEl1SLpzJOpXvqDB4HcSGdmF1kau+dPXP1qgcctkijluieSD9qUnjK2AN9mtNaWZmEMCw0eXdshzSxXVdz+ddVbe5W9LWZc55iUlgn9OWYbNKkNLaYQlSBKZaEYMG0lSjEQglymaoQaTzbPqihxAUi4hxNOLptuSsoUbq5WS53z8dS4ruGZ986yPvvPPVMOuVbUeztKg+q6rgi89KQt2az3uAFD11JAoyhVllqiOR15Gk4lguHiU46e4Jxhbe4o8OV2UPaGANShUS5GNUSnasr1aVDt1iG2MQYqIdMy9HtSJQ62S0MjvPcNdtv7z5xjPGMZe9zF3XkVX73K5yY33sOR0qXixBqThB0VLxmV0ddxPcRVDkTnK77P34rEwQY4yhErMSaTjGSpHHkaOqyGo0jx+mmuO9yUxRHOE8CaK8t8UkO/GtCtpFAVLED8pMZqa5q2olM8ljXkkzk2pAWGlh3IzHHosdi1RFkQEXSgPrefzfECABVWchMqVoZqIIRkUNtQ5Bcj8ssyRFDMjrv0tVNPBMGFRtuqEmrVt510Ukg4KsqnY0ZptfxTuvjajMXJZli03VLdn1smaBpKkoHUhmE++6kKlWWItQE8Nk258uNy8WfuL5F599/n3nm29+5Vfyr/7f3/rNP/Vzn3nrV7/7+8YH9w9LSYzs/BIqhChpw75JJyxBCRMnJnlgeiyT5hQh1LKspXYsUZlzDmt0xl4Uddlzv4FRJbpukHI1IaxiRqgqVS1FrceBWVXZvCKIUliEYrZ3kDjvZ8k6DU9QSnxozAqJBizZ4SBs0FRVoJozLLKuJwpi23xdai/RcSjpRHplUFV7pN9dNOSkb1bh6TLny+VBRdYbsbd/7j/4v/4Pvvd7vvjn/uLzL59vbp/lJWBFwXnuYxk5w1UzUsdaSahoEdkBlpqKYLnII7KMHzKhPezz6IGAyt3U2URYDPR3SgZyURPq1cFLoLKmu4sooRH1AS9W+uEpdAqE8BAXgTnglaSqSKkacxY4REGNLKqkoExcDbMMUi5OBGu3mjOHqJI5yqfRbGMmMWCrjf0ydXg/yh5jwa6hJJQoVZ+twxl6mAtAM5tzOxZAmedt694iQaWWNOn5SAnNynWyqqggkSJ5NOWyxDGphhwTrBbxjtRS2LIMXy+v7sW8mJ3/InIAdK7mZ1axl9mLqBBIlmqZ7FWOXiVQ2CweRPeEsWUIqkxQMcWcZES40B8tmiJNSUBSSqbBy01SFrUoZKbgYJeLdKiGukcrP1jtEggG++GRqUQqC4xqjbTiGvI8hawQkdUX61VdP5PMEsg5W4VwuVx6BqCm4soZqQZVKebqiFSzZF0t5B2YBhMVgxAIwAWiZJrIUN/74tEjiQ84BoepojYiS02FFREqUtyj9COyhsu55oC76Oe/9hUzGYrI64bK/XXxysxkolQ9k0C5j33f/WbZCzt2VXUxZ4tHppd12lgfrS2hENX1dn3+6rkNH2P0GeFujXMQgVMGj+HnfpVtjTE4pYAixO3m5vRwuecMdXMZJVWgm0Uvkg2nsZASLHFzkTJ/2C89sRvLuu/7aZwUYoVhfqlIVqd8ZKb8h3/5v3SKGl7lPlkndQZTYPWhsHVT9f4jnLmvGBVs2+Nq6jpmRMM6m4iE66Ki/8Ug3SaLOYAoVIESqi6EQcylD3QANJt79iyigy96HxigNRO9sWFmPXhkHgDCZVm6qJwRw10iltN6yWmrV9W+bbfrqYJF6ZKw5wBVxcNYCTV06XUd9VBg1XlTpEEoFooChXCWqjb/oKrM2hKtpccyuKqECT24/y4OH0wwk4bKXFQmq2NShxp5WM/HOMwzmdNMriACd3eyJQ51iPL0CvgUOXNfGAYHFxf/4GZ8rOb3/4tf/fRYfv1f/Hef+UPf/9Xv/p7/+rfev7On5+UVZrovMjNBG+qUFE2FJVV7+35khNUVeKvUy7y0RepmLAAmKyEndVaoU1W5Y8tK49AOBBIUh2jV4VKz4RkCVDBae9L4/QqmwEWzNqCtsoKSISOTulRy6vAsy+AyZMbD0FMvU69ds7S8NiSYNNhRb40hpEK2TsO+ykObZb2MQa8ezChMRKAye42yO95c9cuf+56//iPj//B/+slf/NxHnvnlwXRMyXJfaJozHGJmKT2jSCEzZ79jBa060OJ95ZtZz8FQ7GidbvWaBty3pOvaqsOjpWOZmRpyVnon0AkrykjSgT2xuD+6a7rAVxstRBpAQpJV0AIjwkQXlaMsBpuU7mouWhWzMn0BOaBERkSSqi7Fdij0KK8vuZrtcWynw0FXsCsWYwoXNvSxRCnFKJToquLuFVSFmUUlS2bx1B8fCyJUqYLyQHwfaCGVx6BxIWHrECCSKoGOZjJNEUNdIxe3mFXVKW0q7NngMGfkFpsNp8CSAoN1fT+rquO89twPOg1wWm8vc6+C21IyURxy/PBRNexgkhOaV7fQcegVp6KvwO4fxHSPElPHATG25o4VW5gimo+Lqsf+QURmzcVc1WcyWXpEPlQyVLUiXzdVyxARblubH4/z54rQFxy93USlACq98lNDJcjrKX2tDk11FZPinpECXVQhnLvrKEoJxhhouHxQYA1RqRmrae5zKmwsFalUGCh10DCur4tyNOvtiIsI8ZEFOhBTokbvSaE7AFNjKYQ5Xa01xi20NKH7Ytq54pkHlp08psnwtlWoRIOpqsfH4YvtMbVUsk627givgkqYUFSTmblLnJabzOwNfZcLzfYi5ebmBhmH1aKzNVmK4e6M2cKEEugyCjQsvStvcI1TGVVeckrxQljCZLjPWceougULM49rwS0ph1wKuUcl9vYt9iy5f6Zu5AFkQQwu3qVdQVsGoeIqii48iQZgiYgkzIYZIkJ45Y9H2DJclMhu1pTUw3MIFRG3Wdm1XusYdR2zcozBTKk6jdO+h5l1vOxxLR4nuFZVaSPs7TXGrCqFI7EbhdQQWjUOZkk28x1Iqqm5mkSgUMgCqSJWOJJmOkDBNVAmgDVTJgMmbtiPO+fmZr3Mvar6lATayDi1p4ZZ5hJV1/Voz9yOcVqSd1gKuCyLFXT/4KPbmKebv/UHvm3Fs8tnP/Xnv+9bvu07vvfbfuLv/vavfvHmtJaPWaUqEJldcrO8/IY2ccgX8ipcFxElQvLNN9+cl82HbBkAeqzIrGm4UfXCvrhGLJDZ35Kp/TyHEnk6nZ48efLlL33V3Q8SXjRQRTtbhEhAeSi/pJ9MehB3kR1JLMik6elxWOLtaeKx2mO1+kCrgiLFQFbWsT6oPIQCjYQU1cZ44Uq0PiQPAG/mfP58/eR3f+Hlf/Z9P/PPPvlNH3/35avb5amywjQFeqSpCNsNIX4UGTYOfxehqgbZI7VqrOtBvWtjno9DctM/DI45bo8c+lULIGYz00iYVpW4RVFtZAQPr31RpF7H+0JoTpMEhdNgdUVhX1W+YgKRPY8FTbGDW9v2Y8hazHv9aWOwMvZQVRcK5JCAurdPg2RJi5h6EXCkVQOyCMseNfMypWg6eghyrNY+ZCtQi+rwQa0jzVdgKio1YxmLiES3ht6CgKyYE+gVs6sOsaTQwTxyukRkLKOq9tm3GITI7FdAFxdIZE7TzFQyIxfXqFK1KpqNq30lOhZdFZAo0kQKVRnZKk5VJqamFpXwozguqKSKlRTi4Ad2YKweSqNHHZ90qikmQS8tYQFNg2/CsENoS8sFBIg2SbGIWlREQDW90halEBmFa9rShwDdJDmULbcQJDqOFUIMNNW/WgHrva7oeB+UyDGQxR7qPpabh5oKdYpDkKwCgZLjGi4wW73caMJroO1rwf6Vat6pYY3lEuay+pEfVGnmiSxUViZL3aSwiJJUHxSct0sBGAtJVkZEIQ89bBUPTqARrOLBVGwJOZCZaiYiGvDUMinRyRCRdn5GECgRH6d19dPl4Qx0akczGaREWiy975f2QA8bh3NEjnpO21Jk6qpRzcDbVVsXBZKuc6boFGkikg1jll32hhqzCSG4amX7DhcZvsy4VGExPTAYXcqLiNj1NyrQ+nmkCNix82yZglHleBQi2xxCAIX2tovM7hWiO4DRym1I58xkEaxSSoElMMieYWYUdHIifDD2nLH6gPYlKy3z1Wv+c99L/bqqasdRcxy9u4BkmSwEid2yAKUqkFLrOCUpcqX6srH/dvB85XoYkh0P6yWVISAjOczdrfCwbSb+utfsuU2YCGOGD2WD1A2smvtuZoUAqp3GSrmmtBGUsPJ9u+HNebndJd6+hD+k2DtD9Bd/+Z2333z46Ptf/fzdk31eyArIAjWIFhtXWcgwaLXV4Lhd+xECoBDMaayZc4ylIatSmAYpFmxXGCR7HccOiDKCCfrwmnj16tXz9z843dwB6CeiEoUDmdayLRFpROIhf8h9+LrPNHN5XMbDkBApU1HWIqOaBp7ZF+us9MYikvsWwnwsvISPT3QWo2bpuM5Mj//YPjRabFG+nPjyj/2J+Tf+P5/53/7vnr+4v+Wc4iUsZLtoD+9DVwskj0ZQHt89Qm5v1/1qYder+iYPTKZUFSNFtRGGWaGmbddrlmBPPoxQUlqCJaLhAEIYEoeu8fqlxzIWqVRI/y0Ozevh17JwCjroHGzAI9unPKgxp5lIW0XV1puRmUi6HMGCc85nT56+evWqa0fykEMBGjjINmQdRmMxMnu5WawbH9H2HmJmAGBCxXv/1z9VCyMfMwxa15nH2WmtuhIU3MbpVJGcQSBYtg6t13qlziRtX6xeF/BdbJuZyNXJBJGiikpyUUcJCiaqbFXM0Qe3Q/OYnCWAau4e1DJD6zgbq9sjUlgqkiUilkhvaRXgAFlu66xpnROqbDo6WbE4io6j84nOOnBDZSWEh0wmSUWpYkor2ksAPcQQCKZe2yG53mDHtAwyvIFwMueUgtKGaGSnWvnxYnukTEJRB/NEFCpQMxvrwof9QxddH3YQILo9G4N74CC21vAVWW2r6Is0WEoRgwnVlUTEbsJhvsdcllPMJEqEQerwVbQKNWd2pnUvErTfy+J1M9gPq/ZErr4sy/LifN/PnDqkT4ZMlhjomQXdDGnwohGppXSYi8hSzGSCjNC6JjWx6prn6AqKlZSyB0VXYS/64qlgoQU6omqmEQRU62igYRBoVRGZYGmnGIqiKhNXuG5mJo/Or0AX3fd93y9NxSSZmcMfsUSHSbwH0Dz093icqDSnRNAg+0PEeIjo6uqJuXZ2XfZWFVCRTLzuderAsPaY7moqkOvVRm7buaqiMsFZGVFKVR5phjjE5RasVu223P/1xdTkYVUpmapTFSXSkhrBKGzbOSKqXuu8+qc6tOKUIWpEZ8eqiO5coLfLuppqhrGG6GJq3j9vNtGi188KU9VKXAHU5WONzDnn0QLbUShoz/BFTW4V9uknNyaXBTp4ez9cT8v004n6hcvlb/8X/6/z/+rf/+yv/RLuni2XtSWCVtCkAFAJRbvb0fN5UzF9VEoLsW1bT9iqKiNyBlVCchSE3FFbTGQFK4T9qBOx1irD9LTe3tzcteTVRV3U3d1dhJmpbqpqOlScJUduo3LPc1feEWUmJp1poyUQU4pG5cyggIKdSdKEonT3XllAvZ0218vm9dXVSRfXp8chcezhGJaT6bp/7R39A9/3m19856N/78efvPXJF9tDIb3KC4FKoaQYFeqPW9hHMtpxORXdfdjR7qDqeABna4HtcRv9+M4vPgTVELXmkvajfUBZNRGXmBgyXS7KUFSVxod+JYM128NyaHhFVb1pB5mtohcxHGLaVld5q6NJmnuyDph2VUUuYs2YH1dt46N0iFfXKfrVXlOc8iDlspkkRiCqpeDVfAkwuxA5YB1tw4l2YcjVYNZasMx8FMUcPSy0quacVdXXqqr2xdo/Zw8DeyQm18y3x7v1+hmZJF0woKu6lhgMWQapalxw+fVw6z8mV0qPqpsNQK8nG6ytotaP6uPQq+p6rNXgElVyGMVClOtp6NDrj6QiIlkKFBCq0exDQtufQWZmZVaVNrAYRJQWHWKsNg5ocVDEFE2WeP1iRVyMyBk5A5HD7MnpptVb0UnoAprStExSMRFLwiksiWSreR/2+c6rF0uJFRKcrF3Y7YpktRWGDV4XO27Da1hcE0N4Vef0wVIRuDqGI8Igc9+Bitzb/DHMl2Xp6215cothk2jurBU0ZaFHRzGaqpu6QaUvFTyGx8uBsz7eDtezVAiXve7CCpwuvWlNMJvhQfTDHnX1XMGGWt9KlWDHEYg9eupaYq2HXbYLBSQOwamZtY27ruB63RaliBHWC/nLTjJvRiiq4/RMTdQKg7KWgOkC07axl7Xm4EA/P4q2jy+SXXtpZ873BEx4zXGt1kyq0EEj9OBSiZkABZSZqB6DSvbWU47sXoh0N91fizmzSFYmsjiD5DJOe/ESCRGQjuukkWz97OvnfVbNyH3WjJrB6BCt2IvZlgcoxZJSFFLGGCbSaCUFmzvf46+v/7LW9R+ld2+qVJmoike3wOMVGRF9pqj6ed+2OdW9OgjFHB3AAqXY9ZEPoTpt8+cse/eBLxQcppUmGlssFVPGLW372Lf6n/0zn/nlf/nk6Xrv5w6VK3AHNiYBK3hzoUUex1a9zyip1VzMLjGX0wlZEuXuhfQtd0PM8p2XuU/t5cjxikTEl1HgnHPbNjZAQ0REelXTZ+UYQxXtQHisIo+FCjBrUjlUUCXC02khs1xTMFnnzEvVxgxFK4bXdYzRamG2TWPufPyS6zOpCyxc68XHBzMAE9k26lCdC07r53//H5h/5S9/NObl9u2qUJaCCkHD63qJ3CdoNzRXRS5Jgzy8um+5lgK9X2iDeVvgjgezaRuWlGCkQVy0/1lVQlxULoLJjv6kgytzYY62vX3o19e3Ja8fBn3DP77Sw2tHWOeVXx8Ys0d51a4pFXam9FGvzMvWz/KXL18+zjwBPN5Kx/NY6vH/kodK3WAiMosdrMjr5LKk2kl9jFuy2FYFvD5S+laqwpwNoLFW5OUlGJzEXhSx3tiJSNbMmqexuDuz5Kpg6kAbAgdiSK6WbtM9o1R2zjIJBTE7rUitp+EuMCmTqNf2lcOymECVa3tzl9RRECIgO8r9QNxnZstWOoQxYu+wUbQsJsESleXQxhceKwa5epAe65LrG9KqT76u9j4kvnmsMuVxvcLsR1Gp6DKmcJtzsnZEDLy2ZR8cov5Yjfm6Zm20k7sDOqAOUSo7luPggLKqlwO4lrxH6cKrj/nxhbQkODPdFlLmzDFG/34XTYFZ42xFsmLfgcIw4NDcmVlEkDKsHwdWalGYyT4hZ+X9ZTuWgTwEvNfqU0haElln48Xhhbs6iMj/2tt4fd8Nh+3rML4CCmiGRIFiFCuBKCEhnDzEuV+38Xy83ymtqRI1EYcAFciQ5DGXp4Mmau3ulo6wUR2emeuyrOva37TXEl0sf11dIzSFibKCV5r049laHzrsyOu2nOyY9C3mFkf1bUIfuq5rR1heD1EU2Dcyr6ipQ5of2RqWMYYQW2x9KdTVg9sNCg7JGFzR5UyTv/rnfOyGRZkmrEJWChKiHaTrNsbaxV3fA22ZHW3oFKQiFKEoO1JSp2JK3sd+rpmiqRaQLbeqGMP6su5evF/4Ze5vvPEGVKIIsc4Ioh6bNl6VtST7FFvrTMr0mzverZvCtPYzb+1h6EnAetDT+hs/+AP3//K3Pvvj/2j92Mc3x66ktbjLupP2Vm13p1iPn2ZRwKxt28Q0k+1JQFZEPFkWVQ3I6uPOHYvuhlVGc9Ii9orsLrDXjS1EigiRIxQZ19TIvpZ8qA9VBRt3C6zrMGFWz581a6qVpWAPLVpyUGSmTlhh2+a2bZfL+ZAywdx9WZae4hytRh3zoojIK2GUciT9AjCzE09pQtviftM/+UP1Sr7hX/7z5cmtA6mAyio2KAnuOMCxxy1AfayQ6tpZ94XXL1APf2c3JFC2o/UYbAzXjP3IrK3qU6zPiz2jQGXdqDKmZS5ZFgJoXK+3UKRAMqxSUXE18fedYkfmd1XkdXby+kzvW8vMilFAe2xUdbizqsVlkBpXa2ZdVQI4Zs5tQKoeISgwhSGqhNNKJRVe4HEW24cPhO6MlzGWZVGDovpXxf6om2vyT/+pYI9/XaikZHC29/uqBeuPozfZfVMvy6JX/+VjcQnAIQpI0UVFOMyFUB7spMeVQddXVWWKddhhfyiasA0aRulExYlKSEuxFI2JbEnpa3yCmLbn+Hw+R/QKy6hWhZT2RJUlvQ5UTh6YVzw+uq7VlcrwrkQna+eckpOR8rqIuR6zB+6GiYqYczJrcV/HWHx06WOEg8YylrErcpkuZXJYzCsyAnuM4M6slpv1FI2kCNzuTjd99doVoISiwur1/PloRgEYpA9nkgbLWaT4urRmUN1Jrj4MR1SUCOPh4gWnYKaIlHFqbphX3N7rGYmqd9vWxN9rGdEMnyIZQ2m6TK6ToropIyoYSpiomj3G58ykqvcF8Ch3oIoOt2V0aXg8aB8/4tcV0mvrcGb2ck157QGWKdZm9LZhuQlMJi1oEYzM2eVwhXAKAZ1Rl20CgMpl32dU4rFQpQjN5HV1/AhwgXaeaKG5b8dqudu7yQpwF/FldGmsj4DDanfKVJQSKGEigyRLqkyCVdc+sqHWk4Xeo2Uysi0+/QB7XX/hwEEDYPcHUgkGj3/OmgAWILRKahGsQGpRZRV79XC57FHXu/T1kVo1eFyUPTYY4JKYuevooaiwpAowdXcTPS3LsvrjE6izGZD17Nmzm/W225E+uCPKrs73fjklFZITkeOtsaxxeT/w6t5iRw0djLHsPhEyPF6+996TJ1/7n/zFj/yDv/etv/45nEZLnFzUKU6DWFxl70pI0SmLiomiCBWlvvHkGUlR7ZXb4mOX4h6ZeSEZWe2uydr2/V87pq/djERUV0jL6gAiKqJ6jNPDD1vs8RhVWzPTXd014whFnjWRhYTTrFRhR+2dOcYqvY3vdyz3ObesOcboUrJTQFB0tcfp4usR6lE+ChcVTrkZ9vJe3/rkV3/f9y8//t9+Yr7Q5ZSi1za+54OMK7GhqqjHFiaTABL0McS03cFjjB7sQ3tcqSJybAy7B3ITUyKrJ1cCNcyMJSiHSoIXqZdWzxd5dbKKR/7M40vIejwHEuheVgUNWOhP5DDaPh4XbAUNgSG4WU8mEhGtG79sTTtAxL6uay9Wj9ITLb1UUnqmfiV/cWZev7F+uJ8gqVRkWR0dZIuN9zln7o+xLo8n5rEJOoYKeKx6TXTWnIhgmerqZjiKp2ZZvD7ZzVTV1bqjeHyGPXaHxz/dhDz58MwlD95FQaMam4NjmHeU6SSzyRtHKd/0FcEUbsyAKNVK55xzzp7BChXAHrHN2R3VHnmFHVW7AxaoiVCkHGnsfc0qnZlZj01ZUprDtcoYND8en9I80fYpKI8bWYr2uFsBhq+LLcNMos4vXmGbMmcxcGWA4Kri6UqiH+QKoYqajTFul3Wy9jq2k8fhT0rxtI7Fbc6ZmWZj8bWLmDp+Cx/pFkSSOTOkscAiDRTZ9z158FVLpUzKxIabyAJ195ubGxXpuAghIgLKbT9XTkF1rnnXHED15acQE+/h0wFSpK1lKtwHNy8BvDtloVZqZVV1IL3CxG2L2XAqVageLPs+mXFsVBtwhrw2x/3NSJGS3jR114vEdRyVSrEAFXInelOoqlT4WGtoKhLZ1+4iZkns0W14v/EUE3Mdi16zReXrF6KZ020R0xKNHjb1CBf9+FMRa+1+HixhiXmgN0QM2ZF/EtuOSoWsdsxzutQ9LQ6RjlftRaOqiluCrdR9841nkpXb7u4Q8dulW7EPV8G9U+mm/F9bFVAkhEYs0DGGj7FALVmgDRdru/0VPdOkIRVo15CS2sZ2pORwN1ZFOG3Y0vb/dh9t2xYRTdiPSpLBWtf1K1/80vPnz0/LamYZkZmLuWmfehChWGc/VCDj4ZIVJxus4Wke+0UnZ0HmBeaXuKjc7eOdb/ue+x/+vU9+5qee+WnpLVrxQkY73ev4BE3VIUPU1RRgNsKK5w9exHnuWTszM7V4zunut77MiHutG9jbspbrcnPiVYo21DoGE0DlEU48Y2tvol6/5EpfEjlSrVWdVFLaY+06REbMUpXdEEN3l1c1zxUbane7qMRkpV6r1KPmjdj2iH3fYx46wf6LWsfQOa+PF3BfvYMv1jpl1mU9Sc0v/fD3P/zDn/rI536b6ypUJLKt86AJYhwVUl5b/C4ixbSZRwUspzUqC9B2K6o0Qu6xDJJiRrx6eJiZD5eLiHR203I6RVWA5RqglXJLL7WJm01KbehYxR5/DUEKGhTj+frxzOtuSPspdbgKj6Ec5NB55D7nZetycD+OUetAXHc/n8+q2tOFo0ekSh1HT1WrW3H8NUXWwSfS4oBimFzJkSC12AXRYxDntm19Nu0Rs2HnHyov+kg5+tEKMnW4GJBhUawQQ4Nx+pX2o3rf96rKffZFqAR4UJ9UdXe5oPYhD7VtwnNcSpqdOeo4ka0pnioHdHrOucfsOLKqiipKTRSv+1pNohjC2RVd8fEZI+YUlGIGIMN0uC1Vtc29KswkwKgSclAGxZrmtTzSrqTfcrLpkLJlTFapwa0OVzcxjr+OJK59p4iIcgqn8L5mCuia4HKznG5vAuztbwoCDGEqmqvg4ELRYrKmMMDZESPAFO6ozqmUKNni+fPnIsdY+Dpe0iGjjhjzw66N69BFtcP0SlARu5lBKS6LGqp8GVvxEik2ctYi41LxsG8E3F0ALQ6BFha3ZgpIpYHWx/nrzYggr+J7ACinkYxKgzglBaHQsZjZULOuNq6a3Fm9amJJdW9GZs+5ZpwzJw8tY6s6RhcZx7Q1q6eAwVI3pX7Y5KnUIDJF74mLqKsJi5iqMNEh7hCt9KG0ox4JZOph/F3EpBKWw+PITptTMJaEi3qdqmDlA+oDxdDUUd6655CMmmCeSm6SS6TEpBw5u0AHN8aWETYCNkUCBGoydDGSlyol1rEUA1KSsVA85FaWBYvC3n/5anehqc5aVCiwxYIx55yZe2Wp2OqlhJqnLAkveGGYqw8tUfCNcapgXwdOG+5nTGMhpkEWW9z9zTffWNQWHcSaNBN38M0nN8Ma724bJxOmYyoiL2500ZX+1O+idDvvug4Amtyz1AZQsvh6us2kFqUSqJ4v7cWZOndhouYuyQULHZXbrIQBMqEuZeLJBPS8C96Mu0J8+Wu/8eqP/pnbu+Xtf/DP8LE35HxZZH8iSwSVAdlJYZWBCsa+NXBHfen2axdiyM3NauKLLZHidgrVXZNSA8tU30QXHejMzEp1CwaVkDIzX0VZqWAiwBtfSkrBJcsKcOMwiIwx1tMgpjqy8HDJuXPmnrlBigHugZhWOSo85wosMZdItQQTs1NeJGVGJWCdgbMMe2RnReUlZkAKi8qpGESqreKnFFxw61oQWel8553lO37gy9/9bZ/+x38fYpf5YhWRXCo2lVIMT7Pr0H5miClJKCeLXvAai0nGaAt0F3w7hNqP45JKTFiRMzGKJiKQWLRQ+3a+F9ImbCcmNzJNgBqWamEIMCP5+KsKj3KKFC2pUeUgpSi1qKzqUTVrFhDFUtuyktTKhblpAvVQMYIfX27evL1dUbvNmcxZq/q6uK++c4YkfJTKnrtUrgbXEpdWDnP2lIBuB4p2y7NpYRgd6dxRKdrU96UhNjCo73FUw92sKyj9MFAzX6ogkiazZTg9IVTVXuhmJlVYghKWRXUD4ywRKkqUmpBL1tYP4+QdTjd+i8lT2g1GJWz4aTgsW2AiJQypKZU6d4ouc8/V1BSZmZRMosR15CzOGILKZCSKAttR0ypiF1SwMniqZaGvYsVYTa3SzNaxOEx9eHtlyypVilZ1NVpARIrBCiNawyiNAEJp0iGL2qJihcvLc6gGZBKTRTWxlViZ64mQShctSlJE/RJ82NNEmdj36BW7sQZ0pcJlY14kSg/IVM4NSl+dTM3dq4aOKJ025s1Sxl4wUSWRUKbMB92kpNUSrZkVEXU3G3lAZorQ0nFJ1JEVJMPciDvxJ7YIU4SRFy+spirMK/9YqAuNOsRXjhHCBNUwRLXSkQ2L2ZkpChkVIvRdZ0l5P2GVImI6GlO1AVO1J9ilAuWiRjWYE0ogklmHXifTIUNgJFmRuX3kraeRF/bjuWZqlTFrapHb3HRSU4AqRA/Zu0hTtPe0c9Ok7+X+vlmZW4/J1HCkrOiHLD2iwn19+27N+90Xf1m5wBfUvsa+J93NJC5nEfGlbSomBRMRE7RfS1uBGXrVyPCIAZACo+ZwbzFkG9IyU4HMcoopxLzAYFyYIqXqzgOKaxCyLqgosR0o5pw3p7s5Z2bM2E9PnzJTN0bzdTtqufmChz0pHnfGhRYHmEKGeU9Kz+ftsu855/DVxlxF5pzmNmcKrPM53ua4Z2yIBbrCUnBhglgrTUjVyKiGkItUVaA0oTziyXu255ml4u4N+J7zYtbrB3YO1aM8hCQgVUWYTojbq/3h9kaXNz/6y1/4yvd/9+/6xN/5uw+/+1u+evMEisvYFimpJTiIMkVUkSkuwZxVoGiv7lSqao8ZUT3X86rO/WbUcD3G+ZXp2Q3S4zixKA23cRzxtyQuc6dpad2cbvYMkMOMooxUtXG6yWT3q73MKAHoLdPd58YQMYfI3q5ucwFEdT2Nbc6JUldEQXBY8GIu46QqPDbKqIrmOy5uGeEVJBcDzWZebHghHxLLwvoDP1h/42++/Se/NG/fPuPeaeKmupB8osvGaa4WcLH9fFHVRC3uQs3M0/C6jivnnKaDfNUXz83Nk4h4eLioGguZ22kdqkts+yShTor4aZe9h4qjhVEmSaZQ1SD1iMwBwN5296JHy8QPOwqPxVsfeW04WXxkHA6ZmjEZKtxWuwmbVa9yt51no4k2vTqB0ShSGCb3eU41MxvmxXjUmADooJSqUpXMhGAZp8ys9iwU3WzOaWYPl8uyLHPuxxLiUIewZ5JSnc5ThWjhQzUiB2hJipAFrjdrZu4Ro5CKVFOUdTbhsZcutTY60U3MDn9d1ixpoG5To5ViBZxarJABRWbe3N7MSgIbUxe/xK5Cgy3my+KXy0NlQIoqE5ShVdAiEK5S4kfqNQBIaiXppkrdYvY6Wc2iAhGirpXCasFJCQRinK2jJqDQVpI5jPboypN20reU30z3SJLDtckIPWBDI9ZxGHJbYI/DuEyAj7JQJKBlqkIXZs/G3NXddlpUaZtQxBp1dwx9K5gH6SFzPw7q4xAAixCqajDnnGpK9RPG1AoIASNaaVngXNQLLLkoRWRAadwkTbSp3WUyhe2AT9AijrbygNkqtAj3lFKprBsbSGbGYlaVj9b562qskpvATEsEHXWBjgMgwHKzjBJAVW9v1h7GVJWZR4S4xSw3A+QrX3vXdZ0skm5mdZUmkmVinbcqlRBW6HWZJceQhKpUg8G0rjtaqJNU2Gru6M0S2sTSo2OH6LK+ernvxX1ByEv6+SyoWpzltJqu/uZYnuWUCoG5oFxxQBkpSSGU8GNj0c48lLvbId/PFu/02aHUYe5qEJkR5+0yM6Fqwwvc59zn3DNItvQ6TfaKuGwPDw8CO5+3fQ+DuS+Xh4ecO7RXqlDCC9brHIWYH8FwyIYmRkQVGFlVLfKr1hqow9RgZkuHm+77nkhqUudZqkzULAfC6ohFM900q5VgbsNkVT+JrCXurm4997juMLjYsbNsN0gvz8YYKk5+aJ/3Wg2nIrLYEhAdyNxzz+XurX+Gxd97/snP/Qt7YpH2ESEjQwxjXovTq5QfHGOYaitIGz1QFGiXaLFzltRlXmCw1adwfXab4yrUhGXwSF5SLVFm83Yox3inV7DSm2AGGXSKifZ2x8m+AER5NT2L2VDQ1VQ7vU7MhpmJsNEWMM2qQxh8DDzVbanHzHEcqUQL1EHmzMxSeeC+ec1Fg3GxCpbE/jR8PH/5tT/0g1/D9sY//vs3pzeUsMwEJoyk5DEuripUmOpqfmoXUNU6xqMwBIDbkpl+ZEjrq1evLpfd3ZtD030Vsio1aVm6TyahsDb1hmQgexOkHG72uN08VJMwYwNlRLKYGaxMVnX7fc2eAlV9XW9U3WBKpQrUgxUQktOwAQnZTSFmhEIYedljj0wwRSekcFgwD7ZilR3o/NemryOMFph7kox94hBVRUScbm+gkqx2lPVG6dgBFxWGBuU/luYCwZArMlg+JEoSkVQIsWQpkYo0MeGpKJWNN2/TjkMEkKxCMgOVXUCQTNbDPgWVsdvVT7LFzE4gSEHWaVk7LYbAlrEzUQlgss5z58HEPozw3Vcd77oyFXvtl7kby12T4Wr7vpeA1fY9dYEqUrTEm7kBaouJ8hqQoywnq46dTmbu0Ux8q2q48PUoyJLKHmKFakqTQRlgtiX02BzE46qoxw9ZpSlWqoQeqBaBSkGQh6y9jiTxIrMq3Bf3RdVVfYzVdVQijzSa4x3uu1JVE9yyTPwGJhmX2qdCVQdNYSlaHaPNbH+UtDBbtHG8JHEdFLfbLa/S1DarKJCuvQeESGQG2IP6LBRYhfbj9aYAr78Oh44I1WDC2GfMDRlSqSjmxKEYL5EjM6bILMydBecVfXMcxKptZGdZEAkqyyL8Rb6zc+uK89gWiLgonNs+68j20KpwHJLJHQSuwBcKwQUWQBCL1sPz0tIH8jKwVgG+bRddhkzBDh2liY/aR7/y8I5Up0CiQ21LUAVp4aJ4sazoPATMCSGggIdWAqSXBWMWRVCk5xHd0HdvKjCBootSMUET8Y4LhSpVOh9qmKAG/S3/1JG4yXbRyWbQEiGCZdaZ3kear4mWpIjUVTlVVSJahca5q3gGKUamGshKjlEpM1IB1VVdqPsewirhleqgCYQImr2skq7mY3RMH7ij0bWSOc2lKhuUago0mqT1UTjKNyUUvFQo7In7Jilu+6uH07d+83vf87u+8e/90/vf+Z2/7c/uLyV3T9eY3LeJ5eZ0iogZuYwx55RMJRe3gsQsdrqGmfYooJTKmFOhvepvg5PAHntxEh3MJ1eSKIgE0WFeO6jM+WDDV291PTsJLCnaDrejmmjPZUnlkJbM16KNsgsR8cUqEqb7nKpKZc4YZjNC7Qoqj2iTsTLQOtJir7qfLKdX54tRuVUhb0QyJQFfPF5tD5/45Ksf+H3f8Pf//vv//T/9arm7uewpqskSbHJQ2dy9IkS45+6+mOrMHcDDw8OnPvWpZVl++7d/e1mWWRlTTW8i9yqagcz1ZB3JXlU7RVTaDk5LZhxIqyOGFZ2DoiKMpAr5elFKNqmnkfKl4vL4DuIIPFfRzKRy3y/I2DLbbmtmd7JyYoJLUEFAbs8d+x2w4wQ4wiWUrBJIUioOcwV42CgydlVUoSmJ/QRqJPtHPvLW8/ffhxyBCtHEOn7IWNxIliKBBMiCHa2bqrW6zZq8Ydbn5rZtvDrjKdhERFqPfpAHznoAQo6nlzRAhpaZma1kJmqYi0kQbXc2k0bV7rH3G+hYc9IWGWaXZD+YRZB6vC29+wQJAVVUNPZIqTHGEI2IqEOIO2wtlZxlxCXT7EAPNierR04HIo7VNsx+RLCEzBAzgpHlogZIPc5BqkrcVJWRppqZy7K0TDwevwlwTcMBxI5egmFqIhZ1BLipDgpUqo/ZnMGEqFg13rM5TClHCgK75SVpZvt+EelYmXHoaomqomD4cPPKucooyoWztAZkNW06hc8KEzPzZESElYgsZVEESotmWh1UIVLk9B5vGLr4O84/sgHaqrOy24moCROEgFpHiMPVZ6Xgh4RaOCK5jJLLMqqOEMyYRXJZljHGq4cHEQOzP3dVtaHJWCBbZqkmSsW6FVYqlamkoUQq4H/lvf/za+vgYwXQubPHqhxHFsgVpnbcHzye2uih9CFdMhESLEIv0o9nALjv39abHuID4oqg+jDEh/1/H7/lh/4fj/8rx98lVyH1o2DyqsT++m/5+utIX5DHl3T8U0QA/s/e/Pc/Jh9rxf/WKWrEKWW3pth3cLqqahFWMlluC4C8BuiZOQhFZMoyRlFUJSJAgFykpkrL4JJ8OQqATC6qpdK0NFRpqrUUjGg2Sc9u2U9YEylrA4+I1LHqZ2aa9ychODT30s8+aIiZccy5B3f4yTjP95cP/uyffva3f+wzf+snv/gX/4JjqGTuvg0dpdvlAhHBkWqy2PLy5UuVoSqmCtGqEG0WPMVt7ruPkbMu513Vz/cXKTZttZl7AmVGiVShkyK7mQMFig7ghCKqNFMBhVRVCiiYVDNjJZmGsn5fmIfA4dGx1yRHsjNtshW8199jwxtC2SVUGwMyw8w2wJTMQqlAprCERTyBIeo8RobcS5racjm/+4N/5Bt/8ife/vmf+vIf/OP7qwtOIzEh5mTnP5Ds4IPKChCVbpI5l8W/+tWvtuP5srcrWbsLHcNEZN8vInT3WdTjUSFVoaI3J79cLnu1d8cHD2flRCbStBfG8vVXulwT1JXzqmyHkEgVwAwdgsaIXQVqAA6P1lQ6ZZqAmlUXsxOlTCoKV5qSEspqf+VOElKZ7m6mEYfbuA7WjT5KkXt2lbl96EdVkuwcR2YLdVFHZ3v8SLRiWQfoQh9lnAHC1EQSmY0bUC0UiZ7INxyRQhZEpAPARbTx4ziadc8qqe7Zha1mZ0KZydu7257cmtndGJfLJSJqQaBe3t+PMYYaI2/MydxBFofZup72fT+0rmycjpP5sG8mvq4rpR5e3d/d3Z18eXl5MNGacRrLBA1EBTGi+dU4NOEJ1woREVb7TYvClMkO9gkmVQ8cepFm0gDYuo4HtGd4pJXwSjw6OC2d9oFSbfihsLqxFipnFaRERRv7lqkUg4JgdX6cEMKiqriOyPbpUg81N9j4fe341P4Jq6oSSYqWFAoCM6tog2K6LVHliaGSJlkYQYCb9KOVj8f+9QSQNSkdoydSijYRkFxLXNEgF19P67ren1/tEapOpFyhdR/6Vnq9AzsToknUkgfnq/raUdWZTEYVVG2Y7XP2MGbGZmqs44/j+pDqCetSlCpVEWg28uxPPfkffcQ/KcJkEzsAkYWMgqgFqx5tGhQ/aO8KUuVIBHNoWiEpc/zz9/D2m29+6mb5xd/63J/4PW/Fpu988N7qa15CbOyoUgxbpKKLr845KT4+EEPVccTFE0dtpaHNH+RqYzJv11PMvVQ6MDyY7ZicmR0Hq6oDCiANJLzg7nS9XB5MjUEAOqyK7+xf/G/Of+3CyxwySoS1oaywQMXUQLqhURpkdf2Ys1SSpbCqNKgc2rFKGMlqwX0cSB0VeqKk1zGQKL8kjDT1WVjM2AF9TMUEq+rGhwJjVkUESIGJavVADqqImD3GXH30eXckOeKAaBgALajKnOrrhfnW6Y17grws+/yq25O/+D/85r/6N7799/2eX/wd33L78nKxu90WJSLmsixqtu9pw3UZNK3r1keEYibCitizxI2iAaFrZrpJoaku7AdMX+D12pDOAl2tSM3sDCxVLUY/RWamazP1tHeH132VdmwTRKpqy2otcdSsKl8GgMxqr7i4FRNZY4ye9/bG8Tj1P2QbW0NFuG1TMB72+87mNF13RkmFUKekBWz4B++ev/07n3/yGz720z9z9wN/7NWAswYrMFi7jtHriS4NbYwks1rl0R0ou/vpn2GseX7YzEZVDzl12ztQZAdpBwQ7M6eaFcOwABDVIBXFXq0JOj3ow4VmF6HX1RKvsy86oSIt3C2ycKjGkhxmEXtTIFBRagZtykLHcIJcTYu1N6aKOFCTclTVj94HQKNyXD2gnST72BVFxNDx/L3nZqMENSeAsYx2ix59/NW42CedaOGqyuyzG+huT0jOGT3fVpEsqHpaCtWrjRVHN21sFcWHN+UsUo2mztl7NUqXAJFZabIUNApk2VWMLVf2bKtDREzcZpUo6qo37u1JjwgKNB8RM2qqqkNyn1RZxxByz2BWr1EPKAygTWaHKEoBA0u0xMAgFYC1IJuN4FYzSh0MIFLJ3vYXpAuTA48/Y+sLoq4vv6fZAPQYCcjhFmbWFaAlHf+CUh4/JOvoELPvS1JUAFNkj1vNBVmqKOR19UAyvbzFyCIyxFicOVV1w1x0DLAqJ0vdB0UrQ6SDZtubEU2PL7FFUVKZwSg9Nl2iptkwziPtL7OUTHCP7bTeEojMebncPzzAoHr0B831OxqGD11gPYroBNh+KkcdweR9qTcGZJ/5aN4wMyKz5rJ4Zu5FmCyKBichuyeXBy0hVKhgijqAt/wTH/PPXM/HbL24sSqh6lTp+R4FKs4KZDXE0cy6JU+k8+E+3/pbX3rrc3giv/XwJ7/3W/78H/2+z9irX/iVr+zzO8P9E8/efTq2+80vEyfdTUe3+Y8rjR5iMOfwde+YIVRVLeaH0BGCygGfUgsWyEyV5ToBNrOsCquZUchRg8p28RvLzJd1lcXv88XivtyNZVnef/6B9Vr1LDzSakULAyIFKHYhInkgmBQ8HiiqChMcs2Ntnu6eOykLTlQyjqWIuQxHZs6Tx0QRMF0WPxWDdWGUAExvuxhEslwh0D3LpZMwm8MPoVQEzc1l7lNEtm07jaUqVTWLIngc/AIloqK6R7k45wyL+/v7OW6IeLos/uL5l//wDy//4J985sf+9m/+L//XL1/M29MZL5BrrTenqmqBTES8dz778COvKQ/qqarBhxBeZe5zzt7ro4ncRXVRkdlH/NfTGVVViqFAFQtSGQnMudzcrmNc2sAWNIOJBquNxS4q1GL7vKFuEaHgGIY8QBONJ2u7XtVx+jQZb/H2HUo7wgEM08q8NzqqBgzGqqFApTDOgKLG3IvQRXdyZD6zm3f/6J988yd+9KO/9gsvvvGz/rDfQF5RNy3s+xG00ACjtq+Yzcyqcl8EkmR7oEXEKm/W9bzNMbwfBj3wd3egeTji7nPO82UHdEUUJFX2TiotrGoD6sa2179+Avcgt6+HngAK9CjFD0AsCIVBS68uKfVFCYGlM1T8mHgD5I6oSKf0RqvNtFVRYLIWXbrACJaU9DeHqqs356RPqzpQfKLQHsJ3uzDGmJe5LkvhyIASCBzsYvcKJu9PE5AkRZnFm/W099IT1aHZmZQSFy2rNIAc1WK0CoUlRcSu71UJhKWVNIcQEArMx1DNlGFGyrZtIpKJiNr3XYRu5lWichrreUayVG3PubhbaUlFVc39UDkIhKicRuR1VyoipdrOq41zuOUsGR4sFaZUQQTs7InqvFCBVYWaEEey01GKKpUsI0JFsliEiQlIdlxIPT6A205GssSPbaegB7gdyKZiyd3MMoM11DQzRBPXsV2mUgUkRCKCakATvnvOqT25q9hstL9/qiAzx1ifPr07v3wlV05cN0t7BsHTcBq2mSSH6NNxisrLvjt8KqP2QQFl9gQQdgWME8Awj64IodGP/57BVvfYAfJ0exvFrQJQcxdSrRviUkCPjBNhvebqV/XK4yp3ghpgB/oOEftj+d6TeRFEzHVdZ0cQ2nKEU+mRWQ4WCVGQuZYqC9kZaOIkJitYDsURj0BtKKigWAu8h4XMglFVrWc7FJJGqQqivnr7e362nt6+cf9vGJ+/8m+881/+ovxHP/uu49nXvvLOF967X5/d/uFvefLnvgNP7J337O5UIuJgIvO6QnARWYYvy4ptzsqiQolhNcUFCjn6PMicc6ioaqmKacURmirFISiYJqeBbieIEVH5MLfazouqqyhrPz8MU1450law5MVI46nERRPwZBEpnY5pjy2OQiHe5TxYYVfpqXry3G1z9f6A6j5y20xS1IQqyRSeTcjSvcxPk5WZj9Azdzut6/sfvNjdxCjAKNUqM93taGyWxeecq49+nrHi8JbJIWIUVGlJiWGk5ol1RtZwlCzLuimXfYM++8q/++999K/+p9/xEz/+Kz/0J+L5+3Z7gtR+2VYfw0fMaWZiDlSmUil2YCZwTHcNyMi9TCN3qthA1DTT9mC46KMbODOlyE5wrVIdYygXWblQADK2y4NU2y6rGVUR0vk2pFk/qASqRSpkQAXUYo+/qqSir9Pc991MFvdWaBcwt92Gi0lz6110XcfDw8NaEnM3qCBbcA4wK93WlA1ILHoLnfvcTrcP5w+e/9AfefZf/LXP/MQ//tK/9zsDfN9TMm+JC1DRwXBHGrkpYo+Gl0X0gASAutucm8oo0mxU1czdTIphi9QUtyPwwMwgllnLslzmvdJNFi+4WFVRLUWAyEo+Mn8BqhBa7LmJeJJGNtA1G2BDRUN5ZWYBwuLdzc32cEZhUUXBgAQjE8CiKqZ77lQPkJUOUehQSyFJ66F3VWNrg9nNbluiAdzd3b18+bIqluUUlwkRgskyX7KnFDOgLWAXkki0nEhEUYe6lSIiGkkKUnLWBEpV93lZfYjIyXyPSC2lGFUg1DpoTcxQcbXocZtI93+pCaYrTL3QD0WRoi0DosESEXW77FvXEAExF/exxewrmZGL+Uk9s1SRgi2nuyMBogdgLY5DJyNRWxA7VCeTJao6WaaorFRAfUgZqgQBB9RIrxnNG6+rUiiLBwTfiig5apdeobt4PPIHTUswfM2eFYlDYBCiKIamZV2PwdPNen64RNJgM7fFtVimBsqcPQ9XM5kZTa/rMklEFFYiRbj6ML/sm9uyrmsVzudNNQEsY+QhYr3a7k13lERJlJvtGc/vHxQ1xogqT7LT2qmeVVAMzW0zExdR1XVZL1sTX1K9oZjo1XuKhEoBtu37Npcxsi/RokKGerXVSO0YDEhVuzMUcpSeR8qKKqEmFT0LMZMxVpKZeYlyVzMlPSIEGOYPD5dlWYxZkKyqxu0ACpHi7jaoktXCSgfYiru9UtUIGM1os87aM9UjI9PVW1OmD44bylhSz/Xg9a5898+9/+yf/OIX890v7nF3rvvf/ZnbX/3Vd//az3zO3nrrm263b/j2j373m5/59V9758d//v2f++Dmf/rdH//s7de+Fmp+66Eq844a4kiZznMhYjdXmymMEsy5qY0jwkw5sWuxaWcSKJ3MaAlJVbn7nogMW9VTFl2ztrtnt++/eH7SU1EoexRnPwp0AMAEwQlOhBEqksqCidgExAyRAprL7Xp7uVxmhJgUNnUtBsUFomqFJJOifXbYAQXCOWecTFMPK6YgWArLFHFfMjtMLLoScs3a7u/vbVlUJfY51GBIQRQBPemB+8qk31inM5i1ExrFTWEquthaOTmnDGXWfdXAqUTUJonc87z46fyF+W3f8s6bn/yGH/mR59/xez731psffXjYdTyMpWoiqS6RJ9gLyHKibBECPH365OX9q64YTqcl9hJIPy1YNPfFdd93EW0L58xwNTeTIgTphqjhzlmpIikPtXsTUQ05YarJBtdSXDjLzdV0y6gqc6vKjFAdATFxDWhiWuu+CixjqQlUZ6aazUxVnauBcKoISnjOub2aJ7GZAbq698Je0nV4VrmEhqcIgD01VbXy8hD2iU9/8If+4Mf+yU+99cM//NWPPvGzngYimYSI7nuoelQSFCkiP/WxT7/3/vuXy8Xcm3yMKKXSskBTzxmu4/bmdNm3IsWOVCjtaHTW6lYx1QyFIpVK19U1MCuLovH1U2gXVxWotScYXkpj6t6bVRURT2AiV0CBAp16Pp8LXHiwlJtRrAfJIdXEdI1tf3pa1pvTu++/L2PZmX2SJw4taOf1mjiDoWRwFMzs/vkLqIjY+bzB9Mac+04TcFp7QBfrMW9QOl9UTdnheDK0aLDqobGbsnMdproRYn6iaEXKoiUqNUUOpxtF5ejgVMUjA0o1Z1ZmNcUGzACS7a2SUmnVknlpAcTNspxsPGw7Sqp42fdGNCB4cxolIYZglI7mxb519zQv+2W/6PBZQTLUtMiSUglOh5fKq8u2+BBnZjY6SlWf+IgoU9svG4B1sahorOI4tuCtQKW1QFJEMUFUQU0VzNp1XS97FOXUUingAkI5VN9+8uSLD/eD4qkibm4NOxSRIUvRHu63JDE48+Hkg3v1Fv/m7kax9TMGxbu7u7mdTRWqIlpVSgp0brvc+h4J6GlZTHWbZ7XY5z6g+36B20S1j9kgRjHOKNEjURyMUNfYNxdPCNz2mdqy6UqbJb50ulSyYn/wYRVTOaQsbWNKJlYG6xmW+WY+4+0Dar2EYHkxVHLeCHYRuZd6A+sb4/b5q/u6ycw7ZlGeLxgqNqOZ3YIj3S0NlpKTxSxFqCqzfACBPYMqpZASJIcOTBb49Okb733w/hiDJd0JTOSYhSNlMHSmotetVQqCaQpBqExvGYhJuZZrKqTEk+M+T2saTc4Vtx/7nP4bP/qv9Md+6Rfee/7y/OwZP37rd+N3fmr5p+++J+tYHx7uX7z/D3/iV3/mp7/2ez/72d//HW9+9fn4j386fn1+4tOnRbZ3z8Lpy9mX3aosF3JQMLP2WUGFrbosMgaPJZDZMTPuFKNEHgTmKlcM94ywpqZlmUlxN5PL5dJD09NpeSTbtaKqBwUCOUEGZdDsIIyz4avIWNexLMuMuGwbOla2qhJWCrH+u2blFcqv9aHTUFscyIKU8nB1ScvViygp9xB2YKaLa4qFDFoDd0Rk1uw2IuZWjG2Ph31LUEznnMUQ1cgcFC9oT77JyQpFmBDZOQe47n5ImsiSVWl4/vCb//af3rflM3/nx25Ob7wEd6tlO5vfxcmFXrapmrEuTLhR5XK5MMtELZnnDcDlcmmCbv/z+LGFFKjISV2jck+qhGJMFbEdmuJWrhOKEVR3H2OYO5FtsXdRBXS4eI9DD5MigHVdqdLgvQJTMUQVkgz2CQ5rEuKRK5C5hioshDuTkV4Qkd0whdmj86bwMBukXLGby+lmWVbX13P9wov33/vDP3D/8sUnfvFn/ebNu9VfRelyBPWYmUEa5daekM9/4Qudc/XIgi6yA+dbnJKFqtq2rVOzBCVKec1KbIkJHmfMeo1TRKesa8O8Xn/1d9svmxTHNY0q8+gXD5ERYHV0wX3BVP8tJimAWwq27MJTA7xEnsYy3EnZL9PFhTBRyZAPxQP0D9C32NpETcGFGQp1c8UQKGufl9NpsU5VUae5qmdNBYZgJUemZgAl1n6eRv2lGsCM2EUopk4ZBacUGAqSg62sfoSg4fFns06+7beziZIGsuMo2r51nUAKqNLR6z0sjQjG7J/kkR9eAlEtSkRlJo9slXy4XKgiLqpqVwdaHaTU7GEuiouPK8khK+G2qHh/ypn55I2nb7z5rBMqXJxBxdQGf3IBl66ORI/lTsvLzUzdz+dtjOHtDbtu1oTKkss2n6i5WjinVSId4iZqiJqvLyrA3fuQiV5dXHZmQRXDVLX2eVyfr7NtxEyWtQ8hpdj9eXt5f79HHQ7AdZF1DcLMXDQjtMgZe/RsPGuGC0RkZhRkZ6YgKiklBjG0GCUFJaUKZCm1Jn05pWIlRuG2bscCLE+eni46333/g1/41d+6T5cnz2LlOD+cvPYsfy51O4G8fPn9d3LUnKP01S57iFMaXdI5aRhjnMZikDamNL1xn9le1LlnY7pZZUllmWpJ54Tmi4cX0GZX1O1wiXmrQ9zMRjs5xVwFguPozNUUtZtREFMRLNQReHc83UWnS1xuavvacvPpn/nSR/4ff/dXfuErXzqtT5699fRJzMuXv/I0x1Osv/nOfc39XO9/9UH9Y3zv4eV/+Xf+8Xj7zW99tt5v8R//9P75y82t++K22tInTxmjkY3aENPrSK2qsuliDRiTFpr35TJw3DwVV4NjlYi5uIpU7UBFVId+idfjo+hf+5qKUExhQLLR44227htYCUNJNZNvjHGym1LzgrnY0AVKFbtqLesaPlMVWumF9ve9jnkoGmSolaDR0XjEc1IbWg2g34ZEishQW8Q6CaD35ccsTQmpbBXE9cyuzMdopobY6eu8bhHYoqbLzeXhZX7iG3/pT/3Q7U/8N9/+y79UH//4cr/rs5tQu0kLyCJTSnaUGGxou13HGA0EFSD3uZjXjK4Y5mVbfbjoFcU++4DoDyWT8HSjMkyDHqmhWgPZ+oU2VmpjKZEAZsbMmLmL0F1FOOc253b1qFdJwbCYLyodyiHXNCdSGqFOSptrvarliWEVlXPbB+mH5pw2tJRTmCadV3O5PPSx23OgMcZ+eXn/7b/z8ru++9mP/60nLx6+NkRiU2Lmviwe+yVr9o6zj10z6xoiWLMlwh0qEon+IJbF3CMqItog251vQwVaaCZHcIX26Z/XQMOuzDry7MO/juKyUjIenYj9fvU1YIQAr9PRHwsMwepr1y5jmLqoS+O1z5f7yIzMLavUiNfBEo8345GwVAT5ELuvC0klbpeRc4+qFNyMRVXVbYgbJBWE5iwXLWBHXSQ2zdCWcpcgRDomPhqYLML1NAYFQDSTIss6XUoq1epD0RRT2P/CLrRQwhY3iRBywDrEvt44cUQF9At8jLKAKHF3e6p5PFXuL+equjnduS2uWNxUdY9t54ziZd+2bYNqdh6xmbuvy81R9xw3Nx7l+n239mi3U332KIqZmZuxFlCFWXgoPFTt/TAmeaRqisxMUtRtZkgGAQwRVSkisqq2CiHIlMaiV/T67/HcGGMwy1WTQiClTUh6RpaKw05UFdmvK/xjFQoQ+WESS6fLdGLj4dQv5Iy57xUZLAJhMl1ExrW4rIgQpY3RoQLtmECvCUmSs7KQlOqy2cRZFhHJPWHjbj298UYGPv8bX/lXX8NP/sY3//Tz3/2ff/G7/tKvfNevfPnOav/4ad6qnVxOckN/8gD4G3cTDyhYnRaen+qpVFppW+RRbDFiziqoOuo4VfYtMvj222+bmasNaF9FZBYyGCd1mbnaULCzbLMqGVUFU6p0tp73haeqj5FqgGZlSmv4oUS1xxQslbOF5fbW04//yHtv//Vf/K07vzuf/HK+/4jcfHyJ88jf970f+VJe5ov3RW4NN7NGyR3wUPvdP/rnv/593/mRy8vtKx/M/+9vPPl3vvOjy+VBDMmxcElGaWy5A0pRmqcSApoaREUik2Tj2q+P0cpsA63u+06Ku297iKtAqsJc2eEBJVTOeQEHCKKHi+AjK7XJtBTT1xW9iJQwYy+VPjWYYLH5U2vKZulRAoTCk7MOqF7btUheN7vQKm2ch1BwKC1UBFEiImAygxAYTdglkWkzVlDMzGHWgyaR6+PErICKaW6WaIBL8RCUlTQPiYfMuMpEKo/mYI8cui522d97uf25v/DV/+7vf/o/+0+/9r//Pz6sT4pFhm8046A8VKmKRknBpOPtmAq6pohSWzbf/1jvbmcEqhYbW0WyEgkFixZpLRwHNAXQVIFax9ZWzTYjZdtS6qB6LT6qqni0ayZHsNIhkaiePEtJtUutRcwV0QqyMca2bQapxklnkU2mF1FRMk2FOIggRW0tqQrldWISAFXP4HZJZ5XLr//BP/zGf/J/efvn/uk7v/+PrPn8/bksaq9evbpZV5IPDw/qliySVdzPe0cIZKYAYsrkMtZgbTElo3G/N8tpWZaXD/dof2obprWLQMMRBiBDtQnDvaC6Xdbzts38Oh8wpNpBWlU8UvIegQCQOv57K3FExESPlWelpt+o77kjMXzJzJhzmJ333X2ZhJtSWOSeIUjQQLoqr1rlzFSR5WZ5uNzfjCX2uZ3DTFNgw7VUbNyft9E7mjZBzbIQaQSxAICD/RCodUgLYuMYx+KA6HaicKmoAK5HuOHSJAAe0sgWQ6mgU4cNxxAFTOkb0JRXy2UXLkCx6jAuVVWFoDrdqxJztsSvBxkerMvcSdg12JHqhFIkCw1YJIWZMHhBVZsGk9uuqkyIIx+zqlRZUtDLvgEQlb4CBUhuoJKgHK6Hg7UNUWiLTgQKFRevKkPbLqzxda5WyM688cKgKmVnHhlDpKuJovPIL/s21put9sVGFkwEVFXNyg0l5BA8evxUpI/jD/KdvS6EEdR8PXKryM6L2uekgJEKEbOcJW5LdSQoiMxK4RjQyMRVOyYwr5YfN5YRJgdcyuaoqss8L8t4bsvL33rnN3/rN37+n/70HVKffde//OD79du/8aOfOkO+9Fc+vz+TuFtPv/9T588++dKL54bVBc4JB2BfLDkRxvM74qQc9iE2KTp85pRyMmGaWdcjAu+8/6UqVUIOqRp7bADBe3OKqaVlhBEmrkDtrKGWXpVARaaThGCP6e7bDNUlEgJ3qY4kDkVWSUEJLVERyPwb76x/86e+/OSpz9pvLgb4V+7thSbyyenlwz/53BdQxZt9Fl1psW6APAm5t5//3OV7vuHT599+55e/gvvvvHV5nkVfV6vI4srlwq2788fsKpHDDdQDtMzgtfhCVYLZca8lY4yxLOcZOedJncbhHpFC7Q17j5fQ8sEWOl9DYFzVzJKSQIvGs3MzO18BOATr0BKgT9Kb5WnpfexVdaMjhyyTPXMWVVFmP8VVhTABigeIvIc3hyGke2BDr9JEwDJIRCmFRxN7GDyuXoc0HFiJtiArZCr6oOkYlyMDq5jIHnzJtRo4Khda5qY3drNd5vrWu//Wv/3k//YffvOP/9c//6f+dHzx/WXZH4Y7akLFeJu+4Ygy7SJXfbRGV4dXhNjRCc2MmHMMyyMRTCOjvVOtysHMTahmAg6KU1nkcMm4ubkRkVcPDzlnl/3MQqSQ45isnkVEbYiIqW4sCp0CcMsOH0QbGR9v/u7AIDBWqpaBiQU6YKEytZigFCgmPmfyKtWuqhZoHNUYpBMS9+Xm5v335n/vB+bHvvHNf/iTT3/ojwfyZHcZD+sYRxIDyWNdXe5GFRvuevDZ5ZoOm6zHYJ+G2EXUYb47vD8QdHYWiUSRKmK22FjUIvbMmNuOfJ0619dzlRClqiqaFa+HHyLNNu6ONXD9bVBQ+GgoklrXm8zZMmBVm3Muy6LqcybQISIJlopAkKjhNwcpVqRqV3XMTOI8dx12WtbYp2Zij/ssP61qiuo3KgDcuL+IqaqmpkBVzR7UC5Y9eWyKOjFMVNH5TkoMKERTOE0k6ZSoI2jq6Mq/rihRtE2eKDn8LXWQGAnAIHpV2fZnVFWNoe0TJjNjBq+wCGgIse+7iEC09myVaETVtcvNDOmZdtWlwlnMNPNCmNm+bUP9QIlRKkGGL6OBbqoa+0S0zaErE5XjRLqGOIn0p1OF02kAeLic33jjDZz3nXPOaTZUOHxp36qJocMmGrcogvaUDalgkZlpOroWiUwVVKIv/tYQkOFqyzJE5FFR9X6+8/98/h+0XFdwXLuPWAcegvoD44Dr77oe8PjQZ3XYUYHrN7sCJPq5gDpM/+g/1QfdK27nS9asN8T+hJxPb77c/uXIn68530nVsehb9rXkO8BvvC9PX+1PfNYZEAVSZJA9b5ODbyFXH9n1wngkSwgOvdpj/9bM1///r0fy6+PLkQY3Xf9kvyJ//R4cRtK2Y0iHIhxpVg3eEC0R7vaZp0/W37ik2CtZHLbh/nZ/Vb6+1LtTxmXLVxf18TFqoGbILio4bdsWQ9/YKP/q1z54+umnH3zt3d96+eS7Prrev8iTAJLlEmU3vnSik9T1cmM0LbqfRVlHBG8/pIPVslgx3fd9zlkQd2dSqHuQFAPdtFiAfWhcxuo3/Xi/1MSF1R+qNtGe109dRNQzs9HwYooq2fezwlgmmMKRSGvJh5iJiJqgpCBAicpSMyr3nhBClRERQXMyrUpUvf2dhII7kXsTCovAwTFwQ2VmjdNpXZaHhweFwFSBhSJHFGO/eWKmEGgdp9H1gXRcNyYjl20EZV3wzhe+9gN/+NlP//NP/fX//OO/+3t+7WOfsnfe4ZMxC55jlayqcXfatg089ohWcBGUhZUN7z2Wucw5e2My63Akjs55UztXpYlJCBsqh5IKKZJaKVWtQ5HD5nSs4XueuYxRDuzN/pCZZUdi1euSgoIh2u7yR9/Lvu9N4CJSmi6Ftge0xqeMqjw65N49l0oKl/4BEmbGqg5IFxEGap4fPvPJL/7eH/idP/qjN7/0s+9+62fG1x7ofOONZ+++/56IrLc35/PZ4QXMSgD7ftmLZkOuthBFR5U2FruQErz64qUAZPdYvU0gm3VAVDBU/Jr7p9u2dd7A452vAqhW9eK01mWpQ6kq1+htkdZl8oBVkJRrGNZWu69LPwLFR1aR5adF5hTWEEhjszqQV7CxHssUCiB9h2BZT68+eP90OrF4vn9Y3EktkXFa99koynz6xt2tyMuXz/f9suiKQu+6j+ZchCJmUnKgmipDlW0gV232hrFHnB3JLLKhTD+0tZcWtXKBCYTMbHvCNd6gP/RG/2aPxvqcud4qAAusRL976vbYG+SMnm102K1kg2m5xySpMG3CQU/+RbQDZTtCQoBebmeadnodcezXsjdWFSlFMatMwXqMw3F4T1VHHYJuqyqDNEjS1XKGqiBRVS2SK206gpw4NsZFsicf3ngWyrZNF31y9+TFixci8urVq3Vd1URSd6aKoKrPmYCUyt6sm4OyMrKmQP7M2//jm/3psiyPD9T+l4iAjqwqBgquCqllLHuEcxEropIEVEpUQczScZD9jktWWLWM01JV3awo9twz+fT22Re/8KVf/41//i3f+PuyXuV69yO/+DvyyZtvSnIsW7ySr0Du4o2P3N3VzUvO96f9wOn+ez/1+ZxWWENeLXZjKXvsOWopF9EqlhSJyACg4n1qCSpZUI8MoJ7c3vESAUb7sK6GGrJ0+IxQM1YN6RhQkjTj9emuAFxEUBi2ROxmGhXunjOKBKjFARiF6imAio4X7/LmlzbneMPrnvUK9eYFb7pd9lcvP/7Wk9OT8erlF2t50+wmAZWns+7HK/vIafngtE/Olw/vb7+JTe4+97WH3/VxES6c7Ec/kWABkDpWjMe9LWji2ePW4XCyEaebm/O2kZIZnQIv4D4vJxvJJsnIMR4LiOo1nbYrCwGka7GLYG9+EOkKhaJKYaksqEC8fXikqri7oraZkiVjcVUUU7GK7kXlgUo5VjztlevWWayzoPVwIjb9VY6+62DPEdAxrGF+Um0VR4oK+l6F6hEc5o9rGDMoqezjyilOAaRsqPrRD/RBKxKZu9QbU/bFT2EcYjO/8Bf+zZt/+tPf9CN/8/3/+b97v97cRb2UMRBS2ARrn1aJlnSxuM9UEROdc47ldInL8NPdnT+8elmkNh6kyjrDWcxdg5k6pChhInLAVQjuoobtMiFloouPAmdmsqCGDEbKlaLY5vFph1++OtZbLJEU8WtbgKN9vPb9jahLZGGvolRHu0wtN+ut7DKaalkgCnRfeDiJj41sRNrtzQa7e/ni+Q/+wPs/+RPf9s/+yZc/+82nfDHWpw8vH9yWYEXEsix9K4o0YFygsriCumfUEf/SZCpxscyAedu90Y8glqhUJq6TVDOzZcicEYFIGTbGEB/Z6+YPfbV9UYBiB4kfPuGuaaAq6ohJENcw12JRD//p/vDQu+cetHZCWhbcOga4siYEM9NEIejw4LoiKsV0Ztbc7+7u9n1f1EzGvkf2Z5I7WAcVJ+YQFTFdBxKzMlm9+VZ1wWHbPbSL1bc/RcSXIVEpoAqjvM8MSphYlh+4GiQ49MhBKKmje2wYEwiUuUm20pt9ICYIUkXrSG5vOEOPzpQq27yMMZhBSjNw9n0f60KwkOsYte/DNJNm2qnVQjAPXU9/7jWDgtwSVXvAGOpN3Jb+hqrq6p15x0Jkilc7jsChYmSaEDILS0dWiElEsOp0Om3nS9PFhzmDajbnJNObeINDNqUtqyddpEQF8urVq/aR39zcMDLnzmEG8WAUOHy6DBkVGXg8jZsILQSe8qNvjU+4Oa9hfD1F5+Cz0+395TwjKByLVdWyns643OgNkYWZlCpdxjIcuZ8vFGmqj4CkmaHE1NyrGf+CTMsQar2xvXP+9jc/O9/jbcm+fqdfvqVqQu6f6Rsv5CP5BLJ7fWV90L0yv+nt0z86374h3/G93/QvTg8D/s3nemVG+NjlMlKlXWy96DGKWCWEs6qGKwWTMB+Qqn2uY4nMDhV97GalaKqTIS4kh7U4oC2sUO0QsGY04djSH82HJDkh4QWnUGUqpnYoB5Dl9WTb7Kvn28WH7UPj7VMF6t5fLTfL6UsP2y//1iu9+RTuBsvsPmU+HyqmNRO6Pr2RMZ58hKdP4PSJl+dbuUyOsbmy3GatzBYT0bVliyktjDoWP68NZHIABWOWlDx79mzpJBygc6yyJgHRI9Szg1kA/9AhdW2FRQB4wJJW0OpNMwktdDrKa0WlXnMcg+WqMhYpySChDitQ3DqosfPIFEecdUQUpU2xzZ3ve8BDrdpphARpXmobM/JIbjIXHwopM6meJ4tsMc/bpScFUWVmC/UkfrLhQ9WFjjKmNAVIHr/0Kv4+FfeSk5we8iK8xTtf5ac/9YUf/uPLT/3db/25z4+3PsZID99lXowmnvf7QlnU+gKSxWlarshy98pcxog5z/cPva62ZA9hQnHOeZkXF/WCslSxyx4yHekVPsDlSFB3tbZOXj8dpQrNqVJqR+x0ySHRJA5toEIAO5C5x4C3PzJzgRSRtk+y0oSuBvPUAjcvZzmoqIrpi4khtUJSBFVRDKoEIyKW1T/6sTe3h3tVrfv3t89+85e/95ve+Ec/86nPv3rnLcxtb9KWiCSjKoQ5FusxnYiYWWezq7arAu7ajQ6um2Z2UySlLUESAmXS2kOrqjlnRvR7RXLbtj0jenNx/fUohzYbbnbeLgf9owhov4czEz2cx9GHPT5mHDz5UComrXTpSInINAnFXjMFe027Wfu/tFS4T1tVrSt1coG6WrJ2Yheq2Wq+QIfj5B1pJw+X875FJc4z9tyFeSJuiFPKkqWosgO4T2a/Lf18qooaQKVnelJUaSoiluzEpB7P6pXoW1VZRzd2FBxVIjLGOPnw4+y8JiKDs45ok2RV1XWVpCwR823uJBfXygkevKdCquowFdY6jpD2rPmYy9vCzOMiMUXzq6/wh84WfD3GAPZ9bxFQ5K6GOdly66wNCFWYDtO1WYn9nlvPjeeRHUeyC/RlWY5Ds/iCMxiDXItCTEYgzaWvwN6FJ9iPkKGmUaq6i1Sn0ydVda/8kEz29XFaLLURyaJATEzFNFlQeTW3++2CYSUlhBVkjxNVimMYTJPo0Xdnao7Uk1ifyU7x1BPMZm3cqWbqKFOq+enFfYQ+GevHbha5e+tjf++3fXuSz04j7k77KDLrRc3g/eX5i4d3d5lf/mAu8e6Pvrv80lc/c3eSMeftehKtU/rdvOks0S5hGdnG7dgaiWvHVExHgRkcY52SVLqoEchCVrJSsde0xcR6VUqJWigW1Yq8R0G1N8hk2rF0MF1ICmLarKqTuhVLsXE30SV5L9s3fPxbbuFfkuenRWZklCxpu+ENeXjvgxe/8ny7eRLcn4iJ3fqua26vJJ+++9UPIC+wfQnv/tKT/ZW8efOry7/z3u+6u7XLXhbYMMY5BqrAlAoVGoSiBuuzQdVTu4FNValEFSLDzPbzpVeSvQMnoGUiRG0qBKy1VcGLtR9amitbjN2OnL0g2pZlkgfnFEbUcQPlPrs1MTMVVmiR1KTJOpaIKFaDHlI1GAK4oHeySa5qLRIkj6A4N0XWvk4hsqaJmiByc9UhKpWAiYyZpSpiAmDA0+ZpWRHYycVvGGluF2DK1ISmOMXR9EFp4xAEUdQrlXfOHcCqoonL+aUYNLa5jP3Vln/237r/2V/42I/+v9//rv/Nl8aQ2p/I3YXn8hYEHNL8ypSqAbIItznnMk5zzr4LF1tiMvV4BCLgHArp4WKJIGM0SxowlmelQkUjNhctQwghZjY0UgSoKBcriNolygJwNAFIU8R1UnZwwRglUybBQJjanPtpnLYZQw3jtpcLIKuZfRkDFgomF7q4358nBShz8aqASCIpLNEia6+Hy/MnOmrTbUzLevEH/uj9f/SXPv2zP/PFP/ZHAi91uNzDT1v6vNSdiJwuRanFnJQ5c4xRFYh0M7IUwgNVIWaDkcNGyEQJE0YHINBEa4qQiXVYKqIO7qBgrIHUSn3dAWcVisNsm5uOxRRP1/XFBy/F3U0kalaK6TZ0DYzULHBdVMjt3la7pC/mStaCQi0im0TOMjVUmoiUuK655YrVRIl59Jek0wxSQbPxcN5KdNFboFSBCooIhVOCUFXOHGJ77lRdfMkZBqFUNX1GRQuaNLWZldX2FUkQZoSjKOZBlFLAmDHUBNZ8iMvlsizLsiwxa9u2dV17hHxQwmA2RiYv5yDpokds6HXjd1QlkKZ/qTIrYiYBS7ou3Q7ocIq2rHiIVtV97BDd96iqOdPMxJV7OKQqdDgZDsnKOWX1wSwAVN22TVVLYDvHGFlzXdftMt39vG8xdylLprp0NT9sycyUKe4oFndRgAcgITOhNJeqGov50Pkwb9a1EuucPaEPiFxRxVV1GktXh1XlvhDomMjV5bIHRf3kpM7MPO/DRjCbroVIBY2EIPtiBLqj7tGCiWTEJqZutW+rj9zT3c/7hHQyryo6iH7fApS2xYTaaEYxS6gVIj5c8fCS2y12kQXcnfn+B+dP6G9v9sZ8nq/w8MGLl4X7F7auQz6QvejDnZhnwe34qMRlHXvO2zfy+X/1pTdul/3b346bXXR+4kHfW/wyeEPhzOiomxJkUoclslSqVE1dxCHmwiz3lcK2+UGhwIImljVxBa3WC3DPcHdkRl6sBTYZLiIGal7hOFFkuGqqGiWDQY4xltYrOfLh/W/8+Ke+6xNf/rWfC3mKpyV7lcke9cHz50+wfGr9wufO/+KrH3n7xfNXX9pitSU+WudveHr52LNnv+Mzn3z25jLxie/7g38Kz1/9L/7yP3zvB3/47dMXUPsr1zWWO9aDYJjDUDmT10yCnuge3pJjcSvaUnpQsrLHTCVoGyZ6gf3ou3jcSVSVqErbvFQLbTA5zq++347JNIBrrFB3MLwmrJkNkMwsll6rY3m8eauak9oUnjH8NPzl/Vm1cOgntQo9f1jEAjULFLroECUwWXboIR8Xt72Lgphu+64ic9LdFx9R002RoEiooJuGSoNYkTaqapg3FfnVq1eVuSyLQKbDKNKYt5IVJUM+92d/+Lv/0n/y0R/7sS/9+T83vvKVh/X/R9V/h1uSnfW9+JtWVe29T+owPT2pJ0ia0YwyGmUEQhIGiRyMwQmMr4zTz/4RbF/fx4nrhCPGlo1tsDEm2CZIKGAFBAKhLKGs0Ywmx87dJ+1dVWu94f6xap8Zt/SMRvPM033OPlVrveH7/XyNIglimSYQUA0bERFEgOCEyEeEH4KoVNVIQdVtHlQ7rXBAQ6BKWQNomkZEqJi7RoC5CbOkVHkObg5rIQwnbpiy5YoWsoRgitUTiY4eXJ9d8LpSZeaqi0DEMo6bi81+HNrEQ5+bpulzFRYRgJipYSCgVl0H1wkSYTggBBgzq0d16NY129ACW8wzHVxe7t75vMNrbmw/+aHN177sAGBG0c+ZsCWTzQJZRqOUiIOwZD1qyxwg53GWGnc3NwAIQjAAgKCo0bVQNc9EjgLrKENhblOTI5dSLJSIEjeLtt1d7tkzLg5E2NrYuHp5t13Mx6xdN7t0uGq7OQbmvidhchSFGi0CCiQcAWaR0qwf82YXPh4SMGSYbW1EVh2cpWl5UmxZOALZRIrEcI0pcmOatZhZIHDDFAyBFlOoyNrXXIUsNSkAK8AcotYZUHEcwehqzExMDsDOEuABBdzIOFgCc9hRVmCspXPmhUnMbGNjI+dcNYybm5uHh4eUGpikJEdTtAo8AZ/2UUiIFl79e/V9MatHR22sAwAKQVQssJsoEImZNSy57pI9VAsRzdpWi6tqtpxY1NzAKRxIhpLRg0Hq4iClNPYDJWmaBpmDZXRFhEFLMC7zEGGGwdUNhciSmHgijVB0QB7uWFn8oDWLlzHMIqJr2jzm5XKZ2rZfjYhYP7TKDEkNI7OZmWqbIjXobm6GakSUBCDKFB0CoaoQRGsbwnqZHYDgEB4IgIQSRzKTo/kiMhKKhSNxww7gCa26vAJKnc9HFdsSImqgefHAks0tSNZapACPaLVVTByI5km6UbaIz+6NgoHjuDvfOiXHrtsoG7PihxHm+disqWbCGTc2ZGJwQnU3KHPv3vvILX9u9oVNj6uLWPTlCraLokfDQmAgIAuDwA3n4ydPXNm9WtwYyV1ns9kwDFAKJSFirTR/JicyMwE0izoAYJy0mRa+OZsPeVx/OkkIsZGUiEul2E8uBQ2ABGxgJTxNiHCGRgrvPLKbv+NlN777U+d9+0Rfsrp2KXkQ7uzg5z/wrac+f/Or7zrZza7ZeBVeO7/u9F1v/bl/9p3f+X3a5/n81ENP7P7ZP/F9l57Yi5tXN5z61ceWdsvJmSy10UzkWQYubV2DVf8XAhwJbRAnTVx4GFA1YjReXx93CJBpLIKIYtORxIBQGbWETFU+6OZeVcHTtgkRCaFGma0vbKuHByEiKoZ7dUVVeqYOYcyYJE03rnsABCHFFMCMiOGgBu5FLCwUsI7Sag3uEQDMnosR1MgkqmN/RGSKygiEI4pyFbKiQjAAwhRyUlXQ4KHMBCiVRRkQRMHkDFS8YcaA/nCJFVaOVIYxRAo4OzBRMFFEsRym+urXXPzAB2750Acvv/Rljx3fWPRqBGpGQqoa4UCTE7nqMNUNuZrKK+QhLBQJyetGkCbuIISHBYYWn0nDROqec+YabgQkVHUeHhE1b44oiBkinCYbukckJMVwC4K69QNE5PWe10O7prGIUAyDLnWllDL2BND3PQCIyAwg55z7YevYjpn144rXKsYp1DHCIQggzEXEYo3pMwdGGxTazoiaMS/PXHf+Za943gfeffz+B3af+7y0ux+NjEqB1LeFwDqX4p5SwyKA7hDqlrhB5IAprWUyrK21kfUTgAhHZASEcHAHI2Qz6/veJ5rutGJfrg6IKK15n9M17zifz2cbG7q/p0OZNZ276liY2Zm0XltaANkhzFcJgZBNuZtvYAzCTF1TxjIcLAFonmZjmOY8laFeu746bA9GdncS9nUgY31PAwMJTM3dDYy5wkSIIxDJ0SbJFgAGhTsT1SQAIoLASh4Nd0SsAaVE1CAD1a5Aq8muahsqM3LaHFmulyJz6vs+pVR0lEQWseaFxbqSnmTFYA4YQuQwLTOFaL1QADOPqKPjaTOd2hZrPQoOrszk6JPvECeLpBavteCi7fIwAhNzm3Vk5ro8NTOEcAcr3khLjATO4RSsZohRqVxYLDXcMq3MqTa5EVWrHaEAkE0BkZCQuK4YzYwNMKXKRm2aBgjNgZuEHlpGZpZEEWGBYPWLRlWf9qw4ORp47Sub3gsIAAOg4oaqlOr0lGgdUgl1A8jsobiW6URgEPj6qmbmrIUQ1V2QLDS0pmalqaMJRUQGDsDKcYdaMbgzYjj20qGTauGuONLu7iDLJ1Lrg18izwVvHPuTnYwOB1EYCfau7ktg10lEFC0JIuuYY5h1Ked84LOPXbzjB258IPUHA7bHRs5iAWBWzA0ngSERQ0+2W5YjQzaTKER4MPbhTuJVmY1IiIxWOw9EYZik7FKJs9VmNqFXgJiFkSSqwYYRqmmNqkqIuqp8Zm6chRkDjcApTiZ4/+fv+Ykf+K6X3falP7p4tWVqebbUQ/ZNQ9vUw7f8+E88+6Vfv7d3eLA/nD598mO/9/HrTt398INXP/WpPzpz0w2LWfOvfuof3Pbcu1YP3rNg2T17cOL29uFdm8kOwKGVOUOpKyugyYiJBBjsYPG0LxkBvIZGKNcsHcej6HVzoikTiAGQnk5FZkR1I0GZbq+na+cadl3/vQmVMJVyNX+xUrknGUU2o0SIVMvMym0wCHOLUszBonZSyA7mpppFWJAwpswsdfMAIXaucSLV7YCTc6mKs+hIqh/PrNlT4tAQoYjQYiRsYeIcoWUaEgRicAQYEsuEBDEjIlyveRSDNAwhCFrAQCihrDaO/d6f+FN7//gf3vye/33xh35QD1faKS+DhBnYnsFusKgIWQREixonurY0IOEUROOIiEHgHkQQwYRtUzH9joiNpIlN4Q4IGOhQj0IAdwsjYnO3MAgioDroqF9AIBoCRnAAMQW4QkQpWL09zG6OiKWUra2tiDg4OKhPPwoLNqvVSt1SYnJajx7rXx0AzaOmHtZ9dH0q0GFTmsOVjm1DsILdi2df9rIbf+/9pz79qa8899pH9IqmZF7myr0bQUlFMgU6hVVzO0YYGkMgRziCh7vVgQ0BUOjT3pKaqDdVhIhg4OGkVYxYfZaEgATqgfVbnsoIYusLAOhgHp4Im8wGXtgjEEYEgGDEMrapCwQtox4uN9rWQCzapukIIhd3c7IAIBittNy6AQCDlFLqIGdSjQCTyw6d8rW+ERHNrTJ+kIJxsqNOBQYy1izKp4WyCkBgmkQIQLhme5CFMyUzq0xTRCcmAnYqlfxZ/ziqyBEkm3BeUBEllBppO4/12hUZkQGcYsrJq+W3g9VPOBDDq78J61VUSqnxcyklAFB1s0KG4NWJBCSMWDNxQ5CPFuERYUXdvQpax3FsulZEeDIROAEGWSBBxFjyYrGonvZx7JsuBZC5Nsxj0SBUdxJEjPplRERxkzo7rlkNNSbPfDogAoSwLzWlDasEnonNLGupKkXTmqoCROQQZpZA3AIAmRsAqNp7gxAwD496xz9DCiNIxas1kfHIS+RhbAjVSl3fKHdEswAO1+LqWJyYw0HJDVyQI6JuoOvv7esFvJu5K0ljEUxUs5/Nx7k0HNT7mPH4l8/TqVWvtNEl2rx++54rq37vojU9y4KAox951uZlL4YUAdm6JjXIB6uRY5Vmm/Ny5Q++uPGsrRPPn5/dP8Shy5WAVt3NU+RuWAQQ4dXLV2azWSsshE3TlDFL2/SRa8k8BfFG4DQomsQHtdqrhycGam1lEczMwAQRmKupqwWoLk9mxgZoDFMIUhcUa+oWyq47tfPYPfkzFy7+wz/90u/6Zx/Szc0YXWTBFjLjA9OHL1x64O3ve/i+T1x3yw3v+e0Pfes3f+0dzz39xNlLL3n5qxLHsa308Y99enWwu3nzs16ZLn3q3JMrfOlidhUsDm3YgBSIAuTkuObsANRR+vQjrvv92toGYmUO1PITjsAa9bUkwkohqCnQ5rYmeCAeBV+AuQVATaFimDIx6t8TkdQ/EwGZAtnCPQAlQZT6GzISeG1Y6z9wqwQToAgMqkeqq0ZN1IMwkAQA5iXcRYQipIojCbwyoNQASJBq5VizPQAAMdI06/a1aaWy91Ew6gTWoq6365YbBs9VBtJ2nZmpWd1dubtwfVe43jDIJMRlb3/31udefM3rn/OxD97y0hff8/wX6MHlDZmZWW18j87N+ngxp9q1mTkzT0F/MfkV62dNGAyEEU4AR0nXQjBdqMEsGM6VFhgT/iamtxqomAqhORLlADR3AgOaLmGoiTcQyJXaQiLFXET6YVllKX3fFzMSGUpOKSFAMW3bNgGj2yRHj3jm92YQAJS1si2nS1Fdl1pmkcaiRbgbVnjrLcuXvap89aO//PAnRhFYxTRImaTvR/7cyVmIky/w6f+Nycs43f84GQin0utojhfrvIX6lK9lgYhT7fIMK2LAMxgUk3ERpt8Bmdjr3DscBwSgcAdwViylcJMiQEuRJCKN1RSXgBieNm3GOllyKlwDAuJPbv748XRqci0yhak7EgTU0WRAFXZHOGJtPX29Za3HlhLX+NQpzKpWohpeGASwAXbTEZSIIKIF6t0soIbbrEd87mv7by2UK3yjmtdxOkmDkGogx3S6+BqhYFblJ4FcwpEQmHBiVDFRIBYAIIbar9cFEyEVHQHAp8bRzJyIUNiLl1KweKVu5ZwZiYndghAjyTgUEkHAoWQAX8zaqP8aY1TdbzEiMisWkx4bJkW7a0XYAIgDENo6KbVAnX9R4tSwLJcH28d2MOfVaoBp6usknIiKmapbeN1klRzFlYimxwPCwwPAwGEtPq3axpQSC00XZzisV+cIWP36GIFQs5JwigOiaUJQiiGihRus5XUatZA4evmQGMAtD/B/SIsZgtxixp5thZBaTE8Oi8ditqC2szCDc97PsLO8BxvHbFAnnjEPPgi7lrFfjeEyrg4AnLstiFKgt4Sbwr/1+f3r795sOGcaQRs40vkSAgB6AEADyKmpfpVipnkMgr4MDRBT4+BW116CznVrLQShtdhSY6oJymG1jqmTFjchpEU3215srFuLGq2DjEhgQcjq4FAwUBiKbc22vvO1m//t7V/41b/1DX/zG5/zk+++rzk2n41N9iuz6PI1tw9aLl58cHd3f/PssVe9/Flf/MI9yva9f/yHfu6tb1UbS/hf/et/7Td+5be/8U1v7uiRj559Qg/7xtMhbaWa2InALCiVgeAAUIWIrFwDPj2IACbyCAI4ua9jjD0CARPXcBwWZJbaUJqZugF4DR+3yUFRD6iAgOJuEIJEGFVTLijC7BRMdUCK9RavY25nsaIAkIjdvebGJCQVaIlrx4QVZonoSISJKk0/CJnq7jgi3JSZm2Cb8AJAgMhgDkh19XhE2YUaYRIerSQjMDOc4uTQGMEcI5ogAKx0EUNSHTC1ABE5Z1PEqcIgD+XgQKpPQUAKtsAN2DhcHVz9ru89+6WP3PJbb3vqWWcu4qKEctSwQ9Q6BEOEOtA2RwBmsXpEOzBwVGfrusqpQk0EQI8cKpOszd0n9ggCCDMEFlecXNoAhEDkGgyoaziGAyRER7DJMEqAaBQQSAgEDEHDkM1t9J4bFpTqZkopMfPEpASourmUEhyxKQAQKMIAyCa7insFJh9tZRFGKF1HNx0/df7SQV92udiTL3rh5hfex0t58+0/unN10XZ5IAZPJYCwMDAAsDRmRTUTpzBomtY9HzHDw6Zb1iHAsY5zOCEAuHntNesRZV5Dv1mEp7l4GBEfZeQAQCmFCAnEvPKVMNBRXYzcoemaQXN4mTcwLJcYYTjkYSXmFy9ene8cy2XX9vtVX665+bYTtz6ndxCvdQIGBDG7WUJGmIJhL+WL71v+aomxlkxHHWSYg/mEIvYaPesRHqiVXQVrAwJW6B5TXwoTuPu8m9VsJSvGdf6EMalwa6ojGk+2wCoM9mIaEQTojvXDcVdOVT0IzKkmxNVB6PSxTwN/pKiEZgtGrNxL1Vg3iPG0hwIqcnmxWIzjWKlArrY9m5vqMtcQi/rbToWmBcyInAzACYIByaPObK0mqEZISrWEyqpdIz4WCCYIK8pRjcjsph4+JVGuHWJ1tjR4ERRkYGSop8H0erqHtm27v7/vAG3b1i0MYmjOPt0xTEiuViyvE6e4gkeQpZbzCHyUlbuuBa0UI55+dhgWHhAO0484IAIRPILpaLYEbOEA6hUbEACBDi1xwQmu4kFH9B6WidqrqsXqu66AAlVShZTNZ7PZU+eGwTd3dk6Me5cQSK/qfHuctYuS57N00B+OuQlR8azIwCkhdsyK6NmCaPtg6FXbnZYtrvvcxeWrrz8rhx4N1VO3Uo+Omv6cNaXkpkjUirgDILacRrUICAipIxlwdASAcEMmUBVmr/bN9S8PjYAkRJyEEGezbjabqWo4JJEwdzcT3AQkIlN3hFTTf2e4d5DvvvPkfbvwPz54/gff9OIn+oP/9v5L5eSsl1NoY7Sbpaf7/+jj7fFrHxn7P/893/OT/+BfvuCOF5w+duKOu176/LvuOtx79H/9/M8/94W3f+jdv3jh/ntjftd88TrI3DW+OmDmhBgikibym2s4uzsEEiiGVcwFINSvMjwTmCoApjWduN7LUMHLzLQuZ8BDA3JWAKAqGEAqR90HIRHVaUq9YBiBwoM5NU0Sqpq2VL1zat32ou/7ccj1FV2I1E+/d0NEmUJTA6qPhBCx4rUqLgASgKcUVUY9hckXM7OYXGKpjvgwtHqhKrsBfDRNgV3TFHBVIEMDAwTKagROYIgRjgEE0ERI16lqmHlQnXu4V9gyBYUHEAUzshOah6OCLnJc3d4ev+XbTv/Sb976O78zftv35sPDmiJw1I1FBGMAg5sBgEhCnIwWwBRRQRlrB0at3CPWzY1DTBZVQUJEtTqAnrrGhthiGv4ysjEDhAICBEaVcNU7Kaa2v8pVDSUiECmgaSTnnEjUshu07cygMKfcD5VE42bMZCUzMxDWBFqECdUCUzw5CNX/i1TLQeZNWazKcOHqRVam1On+8vD259j1N/Pqq2cOWty8ZV6WYyOzkRUZeCRsVLVeQ5FMmlaLEzC1ceSVCnIGBCb1Es4R69E9IhLUcUAtwmoWKQCsn3GycAKkZ6igscOsjshBAUEEkG1IRCc2Ng/2l2MppzZF81L3Lst+fuTBe68eXlEtttJWZkNztdj+dcdO0TJ2rts4M7vlsFdDT1gBa0HMIFGD1dzdInI1JkBMcm6isCJEDqlWMxGIRGEQCEA+TYammTBG1ChsVDUmQgZGrOw2QEwiPhatOyiIhCQQBcAwhCRcgzCrxjrrwt0Fm9Cqnis7W8f29/dLcXNFprqlIiJmnppvAHernEqoXx0AWEAEcJUBFAQGQJGmfvhZywyimCJixZ1UVAtRAHplpFdsi5khyVBGCw/E2WxWWRYVxMHAmNjyWHPMEDHnjBHSzWIKiTdHcANA17BKRAOUiHBbewjATSLQ0IAAhJhrYo102XJWrW8fMZcyikioqVYfFBNRBJgZBcAUJ+4BUF28buvRHXjdjCADVRENgJn1PtSjD7B+lR4QRXMlpgRXBxdYOIG5GUMy4Grjd6/0AwhCQQ4Pda2VZb2fSgkm8YhiAWCBhOEMJpxK6Rvu3CUsvnTfQ7y9efVghIVsQ3vtdtpdMOBoy71DHDe6nUPtJUfLMwh1K4ZGlgPUo/XRb9rAre7imPjy6sSXrzZvvB4vN7O5OUIITikUsN5oaMtjKDMlrOxSc/OUWmN0dwhPjBgQOul1RnAwq1TgiCiqiGgQnUjxmPI7wgQJMTG3DSZBJCFGD0JYhc1RyGIlSm0KtWrUarePveVPfftf/XNvee5L/8xnnrj0lm967WLx6X/7m/fQ1tyuOUFAD9z3yPd/25seunzFcO//fstb/sbf+Ov/+31v+/7v+qevetUbPvGx9++NuMzDZx66fKG54Sp//fNue+XWzubelX3g+dZWx4xVQCxICGDupLk4TOwJBCcmIkYIM80jalBwwajuoDp0MowITzy93hpBALDeKxBRLZkxIIBjkl5i9VYyPe0Prg/6HBmjSKTUJp6lWDverGhi4bnU3zClBBGqpWk23BVjyudxCENw4oSAVjvgWjFCHfiDeSR0CAkWBwBQosG1caw0BjN3EMEa3qICLQeEuZAIMXkQzlZl9KaR6cxGj5r6GoiYgBkxROpuYxyL1uGccANo7AbQaFWQcetszaHrrLt6yb/uOx740Kfv/P2Pnn3Zay/fcEL6KBU7N+3JNYgIMDVcSjEvajrpBomylhS0pmpTxToHVhk3FLV26kdDszJzpRBAVZ64VvtjIEgrEJS1iLlN2hUoGBzBwqHGAA5oEObOCBRYwpk5WxEhdyWk7e2tw34QZMsF11pcQgQKAC5esEK0cK3xQwZECrKIND0twUwawQQ8QOJF9tym3mGWMujOzu5dd1L+4vbD56++/Hl+ZUzQGLuDGVgbwUR1KFrcwUOYIQjAJk+5GnpwkqAAnS7agIm1yyhH0+WpjnyGpB8RE4mOmdPTO2Azw6jBHMncGsSunR3a8Fh/ZbHRLYzG809eufjE2UsPPXH/Q5G9L27IY79CH7d35gDdld1Hztz0rM2Tp/cuXZ3LZmko+9hSNx2dtfGpWeSAUJn/KQmKTBNlSCkNo01atghAcjCIgKPeF/iIYAZgANAhB2FxbSRZ0YbFigWBCZFBAsquI6gCWrggj1qk5vysFZqIaOCoHoRAJsR7V6+aGaMwp8H6IAJHIwJAd6uaeYio4iZZB0tgOAH3ZiKTcLsUZyB1Q8Q2yf7uVeYETJgEAy7uXmHmpul8gnwGIjGzutWjuZ3N1E1VGZFFimkwgRp4CHHFZnVN2847VeXKDTEVaWrVbAEcHBjMzMj1NCciJgbziAKQqubFwQ0BggG8Dg4Jqu5JmdndVJWYzGxq0r0GAwMAJEoR4eoptR6+0kxE4QASpoaAAhQR5lOoV32RTDMAChOjIGIIHhEmq5cMPRg5UI0JgxgCDCEwAh19cBVXqKUYVZ9HzRYBTk24I+ZArusDDwT2knBEDIgt72+9bvEHH/vd133NZjQZx93Hzu+eH28rJZ1cpMXW8Yf3z89oq9DYDyt35WbmioQphai1uSlnrrl493VXL18oZ/3c/QcvetJlVg6R26kHq5OoOihg7jypKlOkRF4sEVHTjlqkKgvWHFZkAWQPBCyxhvq5QKhVknfJAwlz1XgACiGlpmlSSgCI06xPAglDoHjwLFJiUsw0W5RsJzb97m/6kR/98f/0b3769Ou+8U33nH/yT3z93Xdsbf6dX/747rlucfKG//aB93/yc585e9n6lJRe8J3/7wdkfqs++28/9Fh02AzHTt5yenux2djuBi+fOnnjTUvvIDWLZmOOqxUQm4NDQ9AjR14WbjYAVjCCM4qEgAiBmjkEMDUyZmVmQBeCCLSqaCdKbWtWAhEQHMBMK4xd3dQwggwQTZvEAIYQm5LmzBqeTcNUkCACkQpaojQNSs2TCNIkuYKY/PVtQB1QEzMSUGqJaNp7rE8HejrS42nacJ0j1emWr5lECWDOHRSr/xAAPGztepfsLsTTdg1JzQr41mIbSiAGT44f0rraxFD1NjWIWErhJCyYVUspgOaAVZ5mCIzhnlduc0vAeqjDFox73/09q3/308953/sOfvhPHvZlNk829o13CPkAteVtg7Euc1EhAdVGNtCJE6C7K9VYdKJSTIRmbRqGEkQWUeUGmIIS1qOvZK36Z6/pBABREMBIHYiYYsxZmDsSzyOgMyAhT9oTJjfvXU8eW+z3vQ3ednMWynkoXiR5FA7CEQoDSMRGO1P1UsoMkwKUSiBUl0pDQjQdBZGYHDE1AOALSjWtZQfiwKNQtxM0pDH63Yt3v0S/+psbX/wk3P0NI2TKCUJSk3lMSgWIRJqccytNcYMpLz5apOwWTIJgEKmZkULx0pCwwqguXTuqMRIEOSkR2ZSzyeYO6KbeIBNRLnZ0AUvb2LAKdqICFL2lmedWaZZ2/PDCuUc+e/6BL439/tkDfOD8xc354sRi63B1MELZ3tzYWw1Y0NLq3JUkn/vI+fPnn33nS57/stfnEYk9GUGAgwWgOTE3bj0wrS8tCBQiHsvKcHQzYigZ25mU0iMQoJJJCApNkul6ewahuWeIBDVzPojIIIygWKaGIFzDhKCRJiIgDB0b99ozhU98XQ9PQgOWxFKr7lI0pYSBpkPnXE3VlgslaFNSVUYwpBoGXIelYSZJci4oVEoRIgSU8FJGqJ2yBaXG1Vri0ZwDEnfGGOZ1H8qYuEnDOBIRCpF2qe10XAUCiShEsJg7hrXtLK+WCJza5B5N08y6brW3THMZLavnRprSKzdJzYSmwMRxHAHRSZXrFd4wkoUnEXVnShX2xuiIYUURseVUTIu6hXeVeE2kPqG+zAsnlllbVkOTUsk5l4JM6JbAVY3ciYiDMAiYGBg0KAUCIDdgAAH9MLpH7t3EwQLJIgyRomjGiJQwF1UNwgo5RWRXTCyFMoAyMeewVoI4HKUvJDGOuRSLsFmbMDDHEGCostXGoGMP0M/vxmvbTz3y4YaufRJf8sToTKe6DvvE43JoRkCzNoZmsbG3XHnRBhSDV4ibeEl969KyfedXrhUS3JIhl9c+tfz46R2PJXAHAFRWDWyYH6oxRRnNoAYbRWEiC3U1QCY1w5oHX7fegWCMyChIgR4iUs0LgsiI883FcLBskYBgZSpA2HEzkyaEELFxiAhnnJvsic8jqzTNIbSLdiy600K2jTteffjKw+/5Fz/7o+dJ3vTqb7z0xP5z73z2f/2/r/t//sPvfHW35xf8hS9D0VMpUZvbw1N88sKl++ebkObHbv3y710d4pHD7XTp4vV3Xnf9ydf41uYvPx5Nuu7O2eXnb63asSM5JNzoU6aBLKQlX/qygS3FHGDhQZ4AAZEMIywkEUulYk1otGlAClxgWpIBYACquWtBYoFQcEKkmhMUDIhqVhWPdWWbTcG8aRqMmvfiYU40NWru3nRtbUQEyS0mbjuAUwgSETEJEcXaH3KkET06KNdf5zpDZpKPrH/VeKP1v1brVAxoarHvdYKDAtFGADFIIBpSJe2BOTg6IoZOyK3MFIQYTUqsiYtFVNnR0UKXiIhWnqIYdNuriwfLl9z1+Ctf9pyP/uH1d9/11EtfubxwbiYnvPSQyiwvIMLJFU2qkxWQglIl4TtmoJQ6wECP0GBgrOL2ahMGNAv3iYEAlfnsjljHCVxKQSL39d4OgGCyCQJApXRDHaBjrV00whlxKLmSp3IZajTeAA7oswoBZQIAIDByEwgkdnQKCq8ejlYSMDlhmnX1A081mK9OulMUDGjkmM7JsFBYRo9IxzfLbD47+9XZYw8/cv1NslRcDD4Kigvw9vb21d39us9LSJOqIzBDEAmZFzBX81VfVCuepbiJCCKjKwnqNLKiidswmeMmXUwQVrLSUQc8my0q8XQcx7bw2DHNnPefeOLzH7104fHifuUQyPKcwoex98OxX7WzbjzMy4Plzsk5yvzJx5/qLy23Nja/+JlPHr/uphtuu7OUgiDuXjRLkzzQ1VhYdFqaqASEhuO87UzzSnWRWkGagYTwsgyQuARRQN2wMvNRdgUzSwUzuVU8eO1HE5KWYBamtckHOMitznIjfMrMKdXshBHJ0MOq86GZOExRPESQCJklAUaEllIhblpDWuoEpb7/lTmJFOgWwQBV5+XuWYt5IHJCsjF3i9k4jp6SEoAGMyOAqpJCy5SwKaWYeyklsUw/OICKgiZEtRwRxOhWapB2BgOyrCZMHpBzlrYx041Fl3N2tSrbdDD3cI+6IhlLFqGch6bpsCbIgpdRu6679vip8+fPg/k1O8fPX7o8n81Xw9IBopi7N00jNZHJI+chQnMuNUkQhcswEqIyu1bviYng2io/ffjqjl6/oensUjMmUlUgLGOeNa2qF/OCUblmBh6BGB6B4C7eMGEgCamZi5uBEcc4mLmnxMxNKSUl2t4+vuxXiVocM9Hc0R7dO+t83QN7d+yqIt6Mp1qIC5B91DbnYSMSp3JYWmEtoF2IEYUebsywzQtBeOj8yQFWaZ6SUbdz7Ivv+dff+W2v/IXrXnxm1VPXrBJhWYFwEeeC6D7lLk65sRHhVXcznccwrUfXMwA3cwsv6ormUvtpEFVKYkX7fpSulQqvA6yCvtr2ARIqlHnhBFvg/ThHNqYuqy0ImoMHhze/5qGf3/ue9/7iL9x447W3nbprdXChEfgPP/qGv/U/PvJHXz7XbWwGnnOft4d2QR7fuKDjxz7HLz59y6VPPvv0Cx6DZ5+5+zXNYoMQsG33+2F7GL4cJwo1r906X0bwZLq/u9icm7X9sJ90sce5CWNzCHRUCpiSWChowtB7Iq7PTb3tyMM8eViu5FAHilI8CMIZQVGqba5uAAEisUJYKVY0IkILREBKYz9UXmA1MwBADXCthtqaGBoSqtXYRwqGRIwkLMwMTJN8ccKmT03zM6/h+leOoLW41d2pgSqhXUexrO1SiLVQqGkS9Z9HBLREsSYJ19YcAQmi+No5KhFRl+vFrZTqv9cjh3j9etpQTexjTy13Szr/7d+9/MKn2w9+ZOPM87vFdj+W0kBg8g67MhK2GfWoa43K2QhECPagwHAH5IyOCUczqusRRFsruWoIZjhiZa1oJOIwq4tAqCYXnDLk6jdXtVsYoQEUDpPfEACcmFeroZhySkQUFDUfkIgSwFFgMyIIMSFGcBfUIDggM5MFM9fbOAFFTMyBSgjiJBHRmY8CrQJnGwnFvSz7xSLZYv7ogW588UtbZ247iNw4kRMIIYMTozACJ0nDMKBwILDDGNYgEgAyMaIAYCukCoyODCQM2LA0XbdcHlAwPCNKxSMmPT8LPVMCDVATXkuukj7WNjs1PBzc/5nff/iBL/dJWLpLZy94k9Lm8SsXL636PEAZDq82hJubi1XfH5Sh6xoXuro82Dqxc/7sQ9dfdwtwM2WQIAJ4eAg36E7hCFPWdYWZEyWNMpsthmElODtYrhCjhndU16jgBGWbphcAFt44GIY7gAQgWGDN36XQmibmAOvBaRVSIAUguqqbKQAIUzETYQoiJHd3BAtvWDqiohoURFx9XIjohL0rcIXt1OV0YECtWc2sUqVq4A8iR4SrSV0tmRGAlZG5qotImgRHjjV3QAcEYgiAMEVhDLBSuGnQHcMD3VQxrKoEJjVlqLMLUCJRVWQupo0whpkVwonE3o+5UqjaxEMuzPXqh5yHyg/pOkmLRZ/zxf1dZXT3q6vDgjaOfSUFAD5ddidu3L2UEQGa1Ax5nG9sDcNggG4huXa0QSCBrhSGHjXcov4ODBE1YLRayrx6EoDAzKQRQDVTDYtqN6rmJa/5FwwtAVDRkViIaMFNbyWTCsR8Y7Hsx5yziADgctmbByXfx9Zst/X5Fs73Z91w/ctfdu4L/3LrZ37ywet/N31be2rW7j2aYgapqArb4eoyb0kiPLjqbZuPHbR04NzymNKlY9DFMOYCqZu975abX/+ZD9z25jt3BWe6YkJsaCO1lM3QKnDcg9wACRgmLzxUMdo6eDYmQXhVQETdIjISIdSf8mroE9Lm5sZ2c/L8pYsCAMZoiSSwJgoAAFsUSpTAOKcRx1mmfuZl89SGf/RzV/7Lx2+d3/RNWy+8ZvG1899byds/+sj3vvTYzSk9uYJ//udf/+9+49Pv/INH4tQpg4MZS+PbB9chXf/N9NhHr8qdV+54ySnZSotNJmk35r0hYnsp5PiFy1+lE3fCk6eY9oa9OHbj/hP3lbFZNWMCSrCZvSRkAfZsHvUFxCTiCAzInJqmqeFudWxLHu6mbmKiqsXXjaYFIxIDC1VQZxOCCJoHo8GLVlVRKWUoebQaxqgAzszuXd1NJggzq8D9qV9lrqpZIqbJFw9EhMx1D18v13Wcw9P9SuD60lxfwDgZSADqPsT/j765xtwG4RSPWLUkasAE8DSfNSZ8vINgqNU1xuQwQ0yqJqaqpVBxU1XUKXUcyR0aAeeZ2OFVuO6Wx77vB794/3uO733h6xav3cXzG0ZLmiGuei6JEzuFlynyYF36ASCksJiOCSGKiFST62qoXEyGMMQJ2eNu9YxjwVKcGSOQWQqsTS811qn29cgEVBPloMI+MFBEmCMCmVKXmJkB591M1ZEnWkAQgjkhVPg5AGD1SiIk4qp2cwSjaIOFua4VYjYLhOJWBTwzNZIwKlAKNs0hqBLgbPaJF96a+aHX7p6F+YYbyZyhkTa792MnCcxDTQDRwhGCBWuKggcIwdqDTJWLwugaagoeZsXBAKQ+mXV3Pj07R1v/Z5R0iJxzrhupWtNsg332Ux/6ypc+BUYeTTPDkcqyH7Q/iIBhVOnaY1ubaDr0ucjh1vwYku3t7YKDpHjovi+cOn3rDc9+QX84VudAKSWi6iegEHgERxVLIyD2eXSMZEDSBMkQQ9u2YTDmFUFqKFUO3VTRMkWEO9r6Ma9JChGBREDIkSZbGlWolpqZIPna/CpEpdQHe0rBIkDysElkD44u8Iz9uXsAUBIEQrNK/Z12MWY1iwoZXZURs6q7p5T6fsz90DQNKhQbiSiDNdiGGRHBUAzN3VmkEQpE1QDC4CltlwBrBEX9STFiXwoDi0jbtMZSq1LzCVsxGS+ZIwoi5jwgEwRZ0aIajiQNBvRjrof+hL1cl++qObAd+wEkIYWrZncBVFcLtHCpG2QznUzPLtJUoXjTNHt7e23btm0ax9EtgkCYoDreHISIELV4RV1OiEDwCLBwpMnP5hYAtBqH2jNMQCl1qNveMCRGt15X86Z1hkJuEZ2FI2VkinKw7GMdwCNIHpBEBiuduUM78KzIYrTLQYt7muf/vl55z6s++l+/8E9/4r7XDdffsTGnftQ5lcTzS4w+m8dyvjMenpT7brKzx9PFm2K/86sRHOhQ5EN/oGd+7Ecuvff+1z92zy+eeXE7joFpyGE2FqeEROSVfAB1El1JyWvFLqy3jVUBXeX6jEQiqsoA4VEMEKdCrR+yOZiZQJV4IiIgeujRW+1IMJJCCZFL3Xwz5s35j9+3+bYPXbzmha9JmwsEuNoHom0eu+YXPjV88w3jK1+0uPjQ3t/85tdec3zx8+/4I5jdyAvfS1lQACyfeNYf3HX8WTKbX3s9RKR5C0QOSPkwuvlBpO2HH7vnum6Lz89k+/F7P7i443mX26XuLgU2OM5xtwFE1S6gmiMipbYhEUIR6bquaZqqyqsNItWtuCoKH/2XVQOBAL0o1aM2nBQggLMLu6oXVQsftKibW54zZy0w9G1qSDjMY/CcqU1NmHpqaXIITbNiwqf/PmKyYCJOgYBANVRl6mXrBfx0AeVT9YTVWrOWZU5lNdSJ9+QKmNyNlUxCHDwdyGsYWBhEOCBTfa/YzMzYLCKgEKvVGWAyy6KllKnSdxRHTovsI3eUr16+9kWv/c1b7j9/y73bezfdGDsFSsJwE3RULgnY1mRQh5qQU2fFEDVSzWuYqCECIhOCutf4cER014ioDTQAIFIdILu7SEJYt1wBWA3iWL8r5EnPU43g0EhCDGZu21mgA1MrCQNaSd4Cc6o1D61zR8yMK9eappkBM0MEiwRPFlcza0laSapTpK6qesPIPoDlBLFXEGABcniYEUhffudnT95zonvsWzZeu7e66gxZi02e79CwPIyz2WIsVd7iDXLxdT0XFqkBNWTS8AAKdBQK9ZwHEg5FX9sxATyqbyqI1i/s0QXs7hHetq0g9X2faPPC1fsfefgrMlq07VLw3NVLNGCvuc9jy7OGG5Fm0S22Z7PLh3tpLHOT4sjbO6axWg4Bu4/d/+ANz7lTNbepw2BEE2HV0jRJgut16e5UAzBdmVMxQ4Scc91/E/DOzo7msHGoTmKaSoeIQBGpTwLHRAnXCn8UIgsLR4SaHFwpBrhGHKjqfGOzgpSz5pSSWACjIjKJACTiEt5rkbYx96l3cY+iNctr1IxYRzXT+xgRTCTB7t62rar2fT+bzTbm3eXLlwkbN5W2SU0DHkIYHmJRolRKvJkDTQnGUxkEIETqvlgsDvaXVAPCqQUAV8vjWBPCgYmlyVFKLoAIQeom0qhaYrZ1yEQpFoQMFBSWnZkRkFhMvWk5Z2UWdxjK0Laz+mYRWSkFKyXUEan+sIS5FstATcrZU0oOqKZdI+FaIgO6MXqo1+s3CAAMORGTazhGhLrHtIBbu+nWYzliUjUUdgQkBA+tCC+IIHCMIGdMCEyEzEgRWgYnCFCDZsxj0zRd21ge3a0mlHg0i0W7Wl3p+yeXuycRtppxn66b/b/nv+2XvnLrf7n9U/fd+Dt/6yv3/fLZl8GJa4bZibCrN+SLt1y97672/MkzPe1fwqHpmq4I5rKA7ePDctzZKG9cHMJ88/qXvKrbP0CGcCipnQcaZWBMxccJTIA17B2riz0MHatf2dcPT72Ga4AQBpb12iiAAsjMXE0RR7XFYiGAkBwbR8UAAnHEACdorR/aLctZTm5tYj68CJ+9r33fvVfmN85uLLt7SrTYccCDAHPfOUHvuffR3bP73/31t9x/5ZHvfs1dpxbdT7/74/vDZpOS0qEPDMdPb+0fPLXrp29IVItBwI596LYQrDRt2h/nG9dtvOA5x/r+4U9+7sLP/uVr/sT/tbzt5lU5EHJvPMzUw3KpBJZ67rdtEpEqI6s1oCOYa7VJEFXfvIdDScXMkIks3MzcNTxrqYm9NGvdqajnPLp7Imo58bSgNC0jgsMK6igypQQepRQRrX8urH84wnRU0zhV4dt0vx5dpc/8NV3bML386JMJJp5uaWp/ty7SnzGL5ro8qRKwCaPJMCWd1hsZapdfp+i1P6iHQpDV6tXM0Lhm9bi7WbSuuSAhpZxWvJqxvP7k1/7MLR96e/+lv/6V13hsBY/gKNKAoUepFhpkmqT1SICQFBtugGUcxwC0CGQKx46d4GlrzWTbx0CSanV1IHdFRLdgsArgOUp49GoKB4xwjyAGRExMXRJmRuGURESQqW0SAyJQTWydipsKbSGqhij0CAYKQKjWlMC0bkbXXHsAkGhqwFXXdZaLQoniVGLlXtv3/Tx4+JmdG37v9nO/2z36mgdfTTw3IgkXMxQ2U4FoZ3OAUAcCMICmMvFrQ0BEIg1yjgIBwuiIQgRCXiplpG6fAmuwLESlwDDV5ONnaAvQEdDMmNHMZB5nP3dP3l9qs3O43KcotCp9Qonu1ptvXu7ub2xshMDV8+d9MT9x+mSuOiMrs8WcHU/OTiz3D648ft/F8y/e2b7Wyzr1aFq+GgYgkCEYoyMwesuCBgaQUsLi4gZI6jgcLgMTgSNSw6k61hjRtdTXFoImiiXWa9gjwHHNkA6ICEH26jykAIiaVRCEBlYf79oxETgATd6k0Aibonk9qlkKa+iWVa6vg6MDIFbGcIEKYoRw1bqJGIYB3EVkWFv/G2rysqdZN4YzcRACcTChe0UtgAM6GEJ9ywglqwOTqQmzZZsWWADMOGphZFVDkggspk3TMUK4IwKQWMlVBlEVaqq5FtBuJsymCgBaKjQj1L3pulLGlFLRXNy4SWZGTbJBG5ZRi5m17dxda52xbp2r0ZxUMyASkaipVsodEkEQR4Q6tIhBFVWAaw5MXSlhBaVNePUkdVbTBCpEAtKACVMJgIhtEsIQj5m0RkDqDtEGqVnbtqUUJmibpuQBkQgCadgbhig+W5weCuHJ7prTtzZtd91dXg6PfccXbv2evdt+4Rs++Vfu/a2fuv+6r5TLX7Nx9dnXxzVb286zg8V2GrfPy94h0aUb3nju9Nf2i2us2QY3GHfPNPzkxU9euuZaplXC1jw8ymg6E4JEUOoZ7u6BRBMUCkKQoAqPjtaFUwdE7l4bkfqxBBIFIKBFpNRSgGUTiHA3U11LdqdebUyFeWxOpvFd73lgSf/TzlybD7W3sydfvNmU1GxAqISOjiO2Kfj0Hc/+xG//0n1PfPkv/9A37R2U173ozhtPnPyHv/g753qx2Q7BUnvtabHY2Sl74/WnTyqCR+jhLmGybhOBRx38d35z78Y/e2EsL/+Jv/jeL31m61/+2Ilv/+FrfuSvPPHIFxcsQixIQUZQvbZAGPUhPrL3cUVnOCXiOq0dhzyOIyUySWam4cRBgOqmEOza9jNiki6hCoW0OEM3Qao57CUcq1MtF82FOYmINY03QUQped0V1Rlv5Q/Uv0FhogkUhwFHM4pn/Iygzkvrk1hHkFNeIYTFUVNYlyswtcuTsyPAnHjyWSHh+taIihk66ohqlzxd80S1kyMiYHd3SmJmpKqqNVRuFUNkQHQpvErUSLcbqxf0z3nFlac+8twn//Dqg2944AWXwZvIe6lpNFF4APL6+5qmJxC5ZhUDIkuEJaSwIMcgJGCidUQtAAAQcL1Y3IOQqu/ZPRSmZE1zC7AK7E6MQlwpWpSoEWpJarQDN0kIU0rIkohlYlISEApx1Y07RDAlXHeSazIak7g78iRGn1FSNyCckj0QEWAcRxQcAca1Vl6Leylg7manyuaLhjs+8eyvfvr8Y1/X35ExdzY4o4NTYveQJi0Pe2AyIA6vEZ/kFszhEyAfkBNTI4Lh6uZAjaRSDCkIiCp9jFIQKjhNaOb/o6ojInfNORszMYy2/9DDnz1/9rHl2EHnx2eLDe62O2xOtH1/sDNLAPboY0+1JGG0tz/gKrTsRdaB2n5cPcjando+fXyey7Jt+HAoyODuxbxNjYbVo3/istX2D8ncpW3HcdWmOYa7R2q4lFyJMiLChGZOQSSsqmoFgQKi8mWr2xQRa849TaEqWAMTHadsbEQEiHHsq69XRCyc2oYDkkVxK+hVs9pJwmLEhAEGQcLMEqqefcKxQbi7iDytuQDmdRx3DYastS85UAAQ7h8eLqSptv6ohSFERGpSSilpLqoqKIpaA48AYbVaIYm6MXPbthEBDGYFGEC9nc28711NhJ3DORKLFw2iYsoIYWoewkEQ7hCuFZFYJwH1IuxmjbpDBAxDwxwlJyIPEIdjWzvjOF4ZDwwq1aRunYBqnaEBhE3Xah6PHTt+6cJFV89uFW5DiAkFAcGD3QlMyaNaaSZYdABEbW0AINSCq3C06gSBURwxiZDVnLdpA511aJqZEKaIYlbcg6VBkuQWlfIZplr9g1k1AY0Ag3Wn89VrztxZjp0WQBCGEN7YPv2KrXc+mG783Jf//vbl3/y61ZO7xz69ar44jPecw8NudWofFrMTG8733vWDV3fu6rsTpVl4/VHPT612r/zbZ//AzVtXz+SHDLAtpXRTRsBheItS8zgCrG5AYYp0hxpLEkdHenUSVcBTVHEzmlnlw1ixLjWmVo9jCY++jMvcsyMjmUyG9G5+6vKjF576xz92y0c/uHXmFTf+X/+UTu6Uz311OBYZZ8wpSMBVSCB8dNPAn/ixb/ntX/n1X/k3//11r3jueOtzDjeOf/+f/7bf/eSTDz72cLNrx+A8hN86frbrb7mq3zZCAqTCM0QgHbVdHG5fd8t//ul8TXf1ujvLwxePvfG1+3/4gb13/tKLvvHPbL/425567BNdSkmEGcXF3WcpNZKarqv3X71aDOuk1o8dO9l1Xcl6eHCwv78Pa+2GuaNHIh6sFAwwFU1ItMHtFs0sNQ5Rygjo6g5mM1Ui8slYG4gQauqjZkspuVopRUSqdFBVw5yTNITMVC/DSnT2o841pglnLYv0CP68xlzUL5XWG1Ve++psAlwiQA10mQqt8KMhJALwlGQZgUAU6FDFTRYBhFhPMhExNAZG52lNK6KqqtoUPkzKNO6zHzsYeuA9cXD61ief/5FbLr//9ifuvnjDYtUddtRqSpDHSj10q5gvYQZCV2N0tOwOSBSggG4RzkiGRHWwUP1+VO2h7lATVdWsaZtax6g6ek26mHSzDTMjtZJAkoGllBohIa71lszajpiECRkA2qYhImZyCBPgILApcgTXaWEuhABugUxQ+aeIAKAWyAkZzY0BQQ0tksYqj5FzDIOvBsi5P1z2VhQDEHBbXrF35mN6/+ef9fAf++qtyxml7AxtUWXkcRxXeweVsQaA6FFCuQaeMRMymruQI6BBAui62SqPBXyWFuiolt3dipZS6sQ01Bwg3FJK2fLTFzCTO1QxDhMJNy/85lfsba3snNqVvb29YdEcvzSeb3ISaYRgNax8hhs729I26Xg3m6VhU3hz3jab186lObmYd+2zr7vmZroBFIhBUgqLLjVmNus698r7IKkbMopKJQgryJRrb8CIpkSYWCJMVQGrNjAmYAUTAym4Vv83BSEmRnZWAtVAQkKYAAi1VQ6IiJzzbDYrpTBzMe26ecLI5gGhbgLoxYKjd2MnRCYCRKybdVTrSA50NDdBmS5Qd4ypyK13CWDUEVcdt7JFm2QEdyHsmq7rbBi5QGICcFDL7ohYwh3JCcFBVTfnm8vlMlhMFUnMAsjNlIgCrPqkm0b63iUEADhRgJuVra2NK7u7yDXkLSCciMaK1nFPTGrBzBsb8+Vy6aE5AzfctG3Ont0AHD2QeSjl4ML52hgUN2aWlEopibjr2lIKCxTX1WpILJcuXI5AQmEmHy0CI7DGzkdUVYcJNVVK6e6AU2YcV8x21DjrwABQQ3dhWoVWD3H9mGvvzyJgC0idWvbA7IU7tsgxlHBwiMRCNecbUS2AZITC2jZN/4Bv+vEbmZmbZi1NxGZ54YY7Nh9c2a81d3/36vwf3ntw3xv+whOnXrTb0nw88Cc+Kg+88+z8xefa2wrOtd2qIhAACIhz3amdRf+V2DpxeE439GQuWBhiGHl74WSY6y0TYYBwdOcWrKq+6RBmJMRgRKAK4VdEcDd1bRoCgBijVh6llK5N4mZ5ZYe03Bxwf97RYZ4DHC5OfOaPzj65F3/tv//25R//a/O3/8qZT73r3m/6gXMXz+uZPnXzIOGymo+XVmnb0kah5vxAP//Fxbf/ybc8f/mln/2tD9375GY7OxDh7e3m5c+/49qr9778gd/Yjnzf8Vd+6pavyeqR2K0AEnresYMxNpc7N3z+VX/mFf/+b39meKots604sdicO23e995ffeGrX3vNiefm4bJh4Qa7CAvH1HQtUJNYpNoyIiJKEBGhXLh01aNHZFPoWobg4gGsbiQOgRaGZLKgYS5EgNJK185KKejRJeokRUSuiAl3VTVX04oTcNcILLkMteScd204q7ZNM1PPGOFNysMwTx2gAzOYYytlzI0wANSkI4/glMgmtsf6QsVgwNoSIrq7whQyDzUcau1ZpICaoEeMHk6AHlbfT0Rx14AIciRWM6QpuQhioutxEndrm8ZyCWFVzUQppZCCVgbleSmFyXOelTTK3snhxLc/eOoDd114x+0P/qnPvUR1nCEqY1IqZkYMAEhkAayWkLKHgQlRuAogBCNCaiRnHUwBGZAZ0ENBcABrHAQRa6RmmaIOE0dUhjQRETCjEIk0bWoUyrxtmSQRt20rTESUUkKmWk/UD80RamBk6wQAyBPOPgACHepxgQAJA+qIDd0qzDlq1VNje4qp5aKqvQ2lH60vy2V/MBwO4xiBDaEwMcvL48wNV2/40vHHzm2tnrM6dW5OPBYM4SET83xjgaP24QCGTOSerbAwqhJKAEhxJpzN5w7Wm21ubiJizpkbHkYUk2U6ryPL2DH10qQhBTsUP0x48ugCDhyUkyHOuzLPSduuO3Pzq577+ojZ8vDQd+MQl80wAMRhink0CLZxbHFQhmHVH4Rtdc0GHO/cDtkddCirPvGl4fBMt9miHNuU/Vw6ItPOorcAD0fEBpzcpk2EBpsXCTeIGOqoH8NbFkL34CygBKn6JBO1PLMxK7mbsQQihqlFjAYAIIwBhKkppThWcTKFFU6N5tJK24jUrUqbGg7HYHIr6kLi4YagWZumAYuGeDGbuetYMgIHU3FgYGbMOQeFAjg4Tpm45EJogUEFwzRqK+simQShT57Ucj8IeC4AKVJpWAI7xEELEQkEeQiEzLpBVyioZThx7FjfL4urGzFwqDbAmE0AL1++TMLYNlhGg3pX4f7hipvWS+6hDpU6AFAKYOBovCDYAOa5X0WEUAoH0xi8ZydBrrpM9wADQWFkxDCoZ4OlJgXiYKODYzQsSSzXIUbd94pDTwooJSgRI2RzbaU1JzQzqKBsDEQGxoAZshA6eHELj83F1uHh4ZqXJy2JWt6Yz2aLzbPnLszn86zaingeu8SK0UliSn3fS1oMq9EZrc7AYgpiD4aI+UjLjRb+aLjLzdpWMAzBvAwoHcw2MWe94bUPPf6r//rR8ut/7O/65nHy1Hg3T+3FW9984tRLtmw8Np+f3Tq1npzH9B/Ew2g3vTwxu/6mw/svJd5xTCTBGWkOYRjRWCBgDnAgoTAEdocgMycEImyY3LRJNFIkYi9oZu7E0hUNwgChw7FvmiYoDF1K2HL/yX25cdyZ46NfvubG24bVXZ/8wuU7b7vp279p84l3/vr42d8/ueXH3/9fZmdeZZSoaanb8HHJLKvZqQAQ7bvxYphTA//187eBvySefYceXo22A5mhK/d7q8WtV6//3q2Hfu/xM9/BssDZxpE0idMMZu1LNg8vvveXPvqt/0+S5uZ3/ZOn7uoeu23P/eDmTwU9+NjBuSujpsXGrbk8W709OwABAABJREFUBXF5GLJQ1wirBpESMmEdwlQeUFU/UqLWzbuuG5YDwhAR1M6bUlLbrPqhI5cGlgOLbCAAN81Mug6bQMhR6n3VBCkKmTEXVQ6y6u6ot3ANMFot3dXatk0NAbACWMMwjC3LYT5opGGk0hAejhHhNQrOg3lyOBztQqbhLUUlA3PVO8AajbQe8NI6A9UBCKlmlx79qq1VRKnaLiIMc/ApHrr21DFx92p69IS3ZebAEhHrCArOyI6SgQuW4kGm3/DkCz93/Yc+etO5Fz711PPPnTk/P79ZTjiNRDR5rSoYkoASz6wdhoHalJo0ltznUVKa9PXARTWxuJmIFC+CuN4iOUxw0GlGLFR3ugAAjMTMraSmaUC6lBICJeKu61gIA5jZGmYkJEIgDOcAckTEmuN0VOg8PViu4ZEWiBX+URmUT3vGEFGteNGcs+YyrpY569D34zh40ZYldW3GOY2cZnKy3Xnz7nN+7sSFD1771Refu2lbc0i38iHm7YaDmucNmQNYLu6g7rwOz8EJV4odCUekeVejbziJzFo1WyQ1zQvdbA2pjcEoWbdAL7K9Sdinp+f5Du2xZc6OPZYr43LnrD41u/TU1hMzb2aMsMkGZVzMBm2S+SBXfYYP3fcox/YLr33p183uuP7Ys7c3btrf+8Wt2SsO9cSXL3/5Y5f+5+5mOX4ysS96zS21oEuEQYOGwTurEjvwxO7upRCRdMKr4fjxY4eHh9nDAymlQlQsL5AZw9yLesOoqjlrtc77pM5ba12m1EtHCi+KiJYLUa1fIedcJdnjUJIwMtf4vHEcAUASuYHBZPkrpcxSU2fUNXnW6rnp6uoeWqlNVgozd83M3YuXKA5eu2GMQEMKD9fiEIxIEG6gOhJGSqlGtQBWPV893NwiiCR7uFW8JB7sL9VyxZIjI7ezrCMAEnXQD2VUoGJoLXBC1prKoKHu2+2Ga3jxiGhREBNEOFrumuKuCk3TmFnXdaWMlEQtaqiDuzoCSk0jKjzJ5QiBEZgADYgJ0NTDDY1TapGrQ7FgdJKyQmIppXgoCw55JG4DMgBU8XmEAxpAmJUS02+uWpb9WAF8Hs7EGgYAs9kMALo2qWViECRiOjIBqDsQGcRsMe/7XseCSVDEwc1NABYUA7AV6RKc7J9azm8BSRQE0gGiRbeX5gM07bj66qkbaPMUbp6w+XYPsEKKMlyY30DtDJ8BqQbEylRCBAt08mIyxxxChRid0K24KmNyNMEKuXJE5cBSDIgZbRrHB6FxIwbIAQSE4R7EMKllibgTHoGymzMOmgU8Lo1b1zz01Z0Pf3RrfssDf+zrH+0feeO3nOm+/KVLf/qfyKc/cmrr5EVtj+Vd+sq72jf+g7x1Ml94FG3Y2NlxbtAK6UoQnvfA/5R7PvDmN/3AWw9eMLvxxc3JWyIiygDSWJofLC8tXvhm/5o3bOP21ZGyYoCjK5LMQEvf7+x+4Vr83OK37v2D7/zH+ObVYy/5xwAuAR8u/ty3f/qO/uLgkiNtNsdUD7klHyMPYySOXsPBixqnuojV4AgjRigJMfby/rXHTjfUS6KzF1dti4fj7uZip1+tDg6XSk3xAIKEzCxO4e5NFWgwaYdsFqDuzhBOIZX3KK6jE5Kp5pyLjsMwtF1pW+OEvvRM1EibWPqiIFKWpWEBjDFCiAEgcCSceEB1eF6PnFjbgoWkbrxonQNBASJSqoGSJzqnuVa/jeMkJjq6ZpCgKCAQ1YejOv9ijXGfzLjkNAmvpdK6GkEDIhBFJwrCEHL1JfGJXfrmB2/5z19z33vvfOTOC9sn+m4pZQ6kCLG2XVIAI6ZAhZw6Uh+HZZ+61M5EXUf1UGfAuUhN7VKfBMY4Tc4B1nsUxCDiNqUjeZ2INCzT1r/hlBIhM3OXGmI8WkNERA0DAkBDMIz19ADW73n9m6e16BRAEehTuNJR9JaZYfi46l1t6Ps8jDquxqIH/aGrtUIppW5j48AOAGneJD/ZfmN+4S/vfvoD25//gcNXHc+zXfRG0BhbA82FCVrHglIQ3b2YIaKH1sSqWmGklNquYWZBYuQqGVPKecnzZrOkUnToYj6bb3lzcHi57O3uTS5MAABYdH0czBYz34FTp05de83mlS98dtXcvT0eDlC0hwI+Bx2U93ST9i/pg++5/Jav/bPXzu88lm6+8Zruvst7n/jo+645devZJx84e/bTt91w/V/95v/wb774o1869+WX3voNtCqtNaONjqtkvPDuAMgBVmYDa0JqRLjGIDW0GpZZS9fOSymlOAgJthBWSl1YcvGA6jYi8CDGmrjtdYpuEVwjSAE8lJAiDFHqiNg8EKC62wFCiJi5lHE+75bLXqc/Ety9ndZDxu42eIXGeASjBxgRE6VSimpxAFN3GBCZBCv93AIFEACVUIJqcisCswTJHMArDM4NFpSCKYehASA4EgIMeUxt51U0yZyHIo1U9itSMjfhJufcr/okIsIQilUqCEwEEpgIiMRQAIujBiBzCiBHdYhutjEMgxAgShnHUgo6NsgrCoMAhCB2K+6KFKlNno2ImOo7xRFBUxKr6ZRFbR5Y3fYBYEXRQTUgrBGZvnhEjOQQgAIBphoVEA4k3DiCIFYgdlCNoiFVSw0Vw4uXL7vDYrFYA7MNkcyNpIa4RCCYZoVMCJ2wY4WvgRAIcQGZNQSrcbEzP1BephbC2/4CMx3KDkaQZ5XFl577w0F1Dxu12AZEkMbHpZdhZ3XxYOsGYAF3gKhMmwAkCDLCyGNqIoOSztW545nmZXHqWtJiFt40ydGHMUQcIufctu049kI8pf0QAhMAAyGoQ9QEenTE4gYRCQgcglhK0dMXHrv7yny865WfecG3r5J9y7fecfknf+rqv/rbNyyuGRIfrB437vabzS/f/RbbOvXYb/zUTv8AvfQHY8ZzgezQ6Hjzg7++df4zi2Pd2Y/+9uxlL7e9J3HnxgCgZjbtpZqbLitc1s3pkCDA8aDJyzI/2RfDfnz3F/ZevXXXnZ/7O5/4QUp7/3Bxyc6/4B+SYbeLX/kTe2+j//TG+98S/dXt2el7PvvEzbfvdKlBS6UfOZHlYlAQx+kWSxIRSE4FLHzr+PEP/sH//pl/9vcOD/d//bc+8Ovvfc+/+1f//Du+/ft+7G/9/SsHK+FstrLqbjIzAgevMeVGAIg6BVvWIWgDocXN1ADgafqVwajFowegBkjdvW1WVroQRITErGHMURW8NQslfPIsIiPikS/+6EpgpGogRsR6rQqST0upaQVRmwY0sAgPq/fr0QUG6xsX1z7RI7sUETGRuQNTgDMzVLx7lYsEcMD01CMZkpBvlLyf7GufeNbHb9j7wg1n3/PsR7/3vtuXkc3IwoPDqfLzA2o9JAERwkQkphmY2DxUg1oKFyTEoHmrZmEQRVGk7pMqUocZRUSE29SmlCRxFe+IiNTzIlG9mIkIq9oNkZlronIdF/jUSKMj8DNU5QCTJi6qSK1qFNfeMazbd3dTNTMrOvaDFR3HVd/3pV+NpuDRte2sSW3btvPFoswEaWPWaqPPS2e+uX/B23c+8vnZE98hLxxK33Cr4FWJKmZgDrPEowKAxcQiRUSqMBChOj9viBtJjBQBDcKhptNbaa9fdhzN/PQewpNf/II9kZtr/OT11922/fyj72zzYJ+uPsAPnrdHPlvu+XDs93aTrF78rNvbkxfLrqW+005mvVO7e+7gvrc99Y/+yj997Qvf/Pf/+c/sPvFfT8nsgm8eOzb76sNyZmf72LUAx/oTduwlx183lvMLaosWgrBu1h9kVwPsCzgCJGYKMlUmrolG1LXDMDKliXsFWOVaRAYeQUJM4R4RjYj6hIuBOulAQnR3P8qgr59SYoYjoZ9b27TVIVYd2+PYz9qOiCooKhBhXd0CAAsGhTtURwCRmKsDhIS75zAPIOaIGNWYoYm6ZiaYZDduEBQI4cIgLEgajsQh2BC7YYCHhRaKFjkhKQQSijRlzADgaiLSzWdjxWmR5VHRY2OxSNL4kDHAHJJACgpkYxYHxFCGJppdK0wQQlAl3WCG6NKMyx7BZ22XR00pafHU8FBy4BQEvvZJirtqNgZ+hnPS1gdOQMLGOAUBoQO6EBkQQFApxVi4TutaaYfDg9QwIGOEaUBYXQWvZaShpYg0RFQjqtC9YdGI1WoQIZHEDDnniEAGA5ucmVXsNumaHAWZOCIwgoCQISEhoDZ2mIss0vXN5fu7uwCAQ8f2GCJSwCxfTfnw9s/8DHr60st/bOx2VOY1QhKWu5E6OLgg3cay26x7XFiTbSAQI+aiHnGanso6NMPihp3NQ97ftVhG3/HMNAdQ4QBVZsaOoWSBhOA69F1KRNRKalgisMJTgcmLAkS4URC4YytRgeoA6Chbly7c/vZ/fuzut3z2h/+Mb156JRye+/6/1P3226+75fmr5RUsMZw8fen84a/9+HuWJ265+QNvvffKQwcX7rv0mR+57nVvfP7zb19eePLY6oGUB43Z0vMm7G8JHdSwuX4/2gU1izCdHAY0KdOx9Bv9JW22CkQAMPPz7jxzdUj3/uB2f+0f5PTu44/+5OXn/v7y5IfGHdx8LD51zXte/Nh3b1/dWczlwx/95Gc/N/zwW/7sQw8+emyxcJRcxqPnDBGxMACAuSAUjc3trZ//uZ8+d/6xMupvve2X3v6OD1x//PrPf+pj//Fn/ulf/et/87HHHrVcwHEANXR2okAXNECISBFjoEONr8QpbAu8oi8BLMAAJ0ydmw3DCgrkrk1eGm4PoWy0M91fjkKgNYerMnudOXGSUsqRgjXwaBBdR9AMhA7hCITMgMysVW7GzMzVSFMLN/dJEW3qNUm7GlsqphERwwF9rSUBJ6I6f3bwSkKoUi8CqnTKCoiomjV31/DA0Tn10L7+kZvvOfn4793+1Nc8de3xfnuQEhbkLlPUDFd5uUgXWTU7M0NgQ2Ku4A6MLbcEkVpxQhHRMWMkI6sM9HraikjbpJQSsTRN0zSJuQqk6sUsR7r36T71qOvbyddbXV1TRCJU3jdURVuVBdVBA0KsI5jWifExrRjMKvxvHMc89uM46phzP6zGlaBszubSNqmVpmnapt2QGTB17cZCZjZrfuDqK97Vf/Ztm1/83oOXbkAgYqu2EgsAtsjgiTCg1GY9CCeMO0EilhoWSSTCzEzCgZTDtubzfHCp29nM2zed/dQnHv7IR2954Ute8+o7/PFl/PbbL33yJ48u4McvP77JzcHlq4tbrtPnPvelP/xPrnnPL3/+bR9+66/97tv+1//+T+f/B23tHZbZ5gY+ee/w6le87mu/7s2f+dC9L3nZGXzxDZ3xsTOn92080LPzizfd+/hDn/zMw1/7/CtMDpSvPXZs9/ApMEOLVmlldtDkmk4lxVMCbJp21iGi9gMpzWSulkspkpIjWClIFBGzthuzTh4h1SowrplIlch2JEhkZveo9Sb45Nar9hh3zzkjQISVPqeUEHEYhmEY1LReukfaflWVVsCIqtZ9vV8goggcxxzutSAgFJQp0WhCtdaHwqdKXESIjtLj4eiVRSL3CIBEjBZmJTgkJQ+btWnIWRrWYsuhlyZBooa5X64akeXyAIBmXVdUpWEMoxrhQgGAGu5qFiARiByWwjNxAcQIokloCMPQm066WjULsHaWEnX1U6r2JKEGMTQ8IAABQxHqW1WHT0g8bZMsEIMM1MyQpZvLashMGO7Lw8NETIhApVZJCMBMlCPAkU29qCoBIE3pMBXRQQxdapBBs6GjiACTuWPD1f6g6muJnDOSQ2QvlVcvYaQYBCEMZo3NFsev+eKD9+rpu2H38QVoSRtBwdaDlp1LX24v/lHs3CU+uh4ENzweKIpvnEB32jiB0oJrtW48sybf3D+/4ctjp+nZ3QnVdJ77y3u7QSPCYiZioZ20vWZTb9sZIpaSE4JV4xswIpZSTp28ZrlcBoAQpsRYlTdmiIyOpmo6ISjaWTeOo6jGheM797/jX93wrn9747V3XDp3/41EeuKYmVnGPd+7tLv9P/7mh3K79S3/+s2r5bmv3PZd9+y3lPjhL9zz+p3zs4PlELNIlprlIrb6Vb4mX0nzxV6Yz7ciD4CAksAyVfMXJ3AHTsPGaaMGILq2tbZ5qpOBntfzvVtfBadxdexz+zd/qNkFS9GfwE2kr1zziW84+M6zZ594zWu/9jMf/3Qpi1VZxf4h9AsMj4h6Lk8vLYJ4M5bVzs7xz37qj84/+eSxnWtK8V/4L/+J0sbm1uk8DO9+12/d/dJX3/LcO8MZwqeoevBwgAicAr+AmcPNItQttHgoRgiRgjVCiOwO7iBIgYEUY3isemjAQYlk1DjoV4omweuT5aihrj5Fefo1xmesHoMmeR4CIdfLpl5LnERCAqmeICU8Ijy7u4epmYX50bIz1vanow4YYGr3RKSYdl1XRaTuzsgFHatY2iHcDbyAqxlqu1XypabccX7zGx+5/X13fvn3bnnsz33xJYOUQAggA2QSJGIE8MiHh/O2a5sUCCVAQ7PlWWqIUFoRrELWcDVGajoZdCT8PxxlTZtEhFhSqhpzOfJQiQgdUd9q0AVgrFOhj35NthYHRIxwniiZAACTzToCBMHDwat3mwDNzM1KKWUcVLVfrcZxHJYrVc05U9M00mzMFiSMbeoWXcMNjy0iHCz4We3OoeU7t2571vKGr+w89unmwoubG/dtFKI5iJmRUBKCodg8wZoQfnQl0ETyRAAgnFYPCg6EdnA4O30TnL988V/8o+tvuvlrv/2P77/zHQ/97L93zQ0NN3//nz36rs+dv3Lqjpef/p7XNTN86sNfOl+euvn0zfqZD//497/h7/yDt977+Os/+NivHY9ZX/DG24Y/eveHP/3xT73sRS+9/typx6889pWDBx556h7w4WTaSPP07BuuffGZW7544ZNfbD94/7u+tHn2+q/52lddeur8aCOEoSkb5okkRRSQhzKWLMRCVMKYORdLKUUEgrdMRASOFEAYFfONiERYbJ0DfZQhXeVD5lU6CxG1zUXEoh4Rs9msDKO7N41sbm4sl0t3WywWeSgUU5bU9KgAMIpZVleRGuNr0xYWgwISsa8buQCrjGhz0zXeTSE8ggCZiZvkGoDOLMiMSBhoFigBiQiRCN2UE2OE5iLV7GsGIpKo4w6ZxpwtlAShEZ0IdBEe6EoOhZAjqFhJEGFkOGJIaET1JzODBSIAmkeEWfVkY9nY2FoOIzeSFRhRLZdRSykQRETqCgDwDGx4jfWsVSwZjAgghAFNAEAaAjSckKvRow7J6rApJS5WiAgBBKVpRFwAavolSCImDiSpEUmmaI4eTuYWxIwBZuFmIFBHXlP4NlAlGtQcOeYEFTcctUmH7JYO8Lbn3vJv/91/7L/jX9sX3nnxkU9uvuZPJjQLTwQ7Fz95x4PvtO74eOnhZnXJ4CSmLZUZAKCOQRKz7bCClbviMSlOIgBpdvXxu//oN255/D3/YnvzuTc8+4Uv+pp0+/OuPnV2Q6CUA/F25SsJniMV14al5WawUdEqQ7BhMoQnLpzDgLZtkcEZKaB40cq9RjPwRIII6harPiJkuX3iwt/+2c1dGd/39vR77z597HRubXV4iLu7A9qT17zg1/7Cr4H5t/zL1+ClR3Fn58y5z9xz3anmsfOP7z76lYubL7p28+DiEptN9Pac7W40euOTv3f5rh/aKlf79ph2C6ttPjceDsTgDuFBVLir48EVALmVS+dumy8/+4pf22/ee/V6fs57P3v8qy+88pwvoIEPEQ6HdPlwGBfB48qe/+JXPf7k/i23vvBLn/94SlaFw8xMjJUsGBGJmzGCm4P/9cs/d8M1156/sr8c9hui7RPH1TS7LTa2f/6//cJdL3rp7a+9LRC0H0YaqlVfADHAqgVVw92RiYAKgqmZWUWpwVoSVTO4RIgTOyAWY8DDftU6H8aBSjCSMVT23qTYPxoUx4SYRnp6XOYRWDGi7lHXwMSUJKWUBqqe1ya1tX9yd6iaxMot8HCdngkCVC/rCL7JeIcBAQY1NT3CtUyVPgICoTsJw1QuIXqIe2hghiVgSH+V4XUP3/WpG5/88J2Pv/TiDTdd2vIgZ3CIHFYR0IR04ppjh1f3LTIxq6tBzNrUNtJQCkJHsKLztjMzIACMrutoslKvPUWJRSSYEwsL1cN0+nw8KqMg1gQuiHUIR6X0I9ThgUcAgmFwgEHwesccE2IMIWt1eEwjUHMrauY59+M45nHs+34cxzL2jkCJmlnXSifdLKXUdKmbzVJqKRpCbBXCYMByg2z+ELzy7+nbfoM+cXecaYOCkYmQzBmREFpsAp55AU+ZQohTdKNPS6NwCAhk2bnxxstvf//5n/v7r/zhv3awcdtnf/QvnJ5t3vza7+q+9RV84qTedvPTh+pPvvWpv/unt9/zHWevPHX8s5+8alcvPvsFr/vLf++XfvJvPm7/6OWvfdVD54fm5mYxLPwGOvMt8Xfe8Zde+Zmv+8bnffdtG7dyavw6bghX1i/b3f54/1S+/wuPvW/z1u39R6786j0/+/I3vmZYln1bllERvY22Tmx60EIgqcG6mg3Syg5tKBB1LE1KSGi5UJJSCgtXh2jHHMiIay/ANF6uXBlX0ymFej0a8UANd3fVjAAiOOQxDiEQU2oPD1eJpaa405o8E45ARtPDU5HS62veIrtWGHTN+KubJWYMM2VCQAhwNEdPACQYEUxJEhA2xTNiIlJmJnCFCLWWJRDbWWdFV6sVABUbF5ubfd9H4OZ8fnBw0Eor5MKwykNNLg+PlFIUNUFECg2LKaMLERnZ3KLmrmIyJKE6MgBCQYzxcL/r5vv7+9J241hEaLlcIjAhJmmj2kYAUaYCNSLqzKvOfYmQ2pZKmYaUFgyRGJ0Ey4TmrXDyiOAmFcvuxMjkEohayI0B0Aphwyk1HGTugQLoQslKJkIDMMuASIHVsyXUaClCbOtxgk14A0DgCmWB6mWAMDMtdtMdZ/7Tb/z7K3f/habf+8p//0uad7/Dv4o71wtTN1yead+0MajExtZ1D7374ef/4Ly/UGTTWKCMRWaR5sASjUA4ag+c0rDf9MvDEze/6Pf/47Me+sNnaXzfOf97n/5f73jPr3/vt3zPa/7Un1l+9fxBO2cYZyEjhSO2JEYwoCcgChKR2Wx2dXeXag0N4O4pqL69jVENGs+uATiUzESMFKopJdk+feoFd9/SrU4tvuUN+siPPPJjf/umz93TnNg5d/XCU6ef/z//0jvS8tIbfv4729d8/eY3/vHjW2fu/Yk/ff2V9vypO66z4SOP8410duv4yau7/eaihzzsc3vN5S/d/Pj7H7/hDd3q3Jg2vDkWlABpGsQTVesnunXDXhu61V9608f/zamhv9QfPPaKx67O799+8kdNDrV7oDmgvOm6AD+MY4cnV9Zfv3XL5z/72Re95kUG13R88823yFfu+UDTSKJJeVgfVzNjPCiyuPrIU/fc82lU0rQ4fs3J/YtPHhzE6eNpmYcmdf1q99Mf/f2x2YVbadn3GUcEQOARARjMjBE011sTISbBVGIR4WEYs2ZzRSRmqe+zh/EISlBKSZQKhlBiU0M42sfUyVsNx3ZzX2M10PHoAq5nTcmmqlNbQMRNamddIZzQH5VqhqBuIlJ3KhCOHla0IoqIyEPdfe27DWZOjO5eMMAjiayWy6Zp+iGntnELrtE6TBFR9cO1YyiRh4iu4LzZafavvukrd/yPl37hXXd94a98+OsYoGX29RLICIBpvy/OSQCL2azbBICGsCFBjkiczVLbAVCXWF0hccOCQEeLXiJiIWa2gKZpGpYAhyrRAcTp+HjaNr1uc2MEq95oAKDwuj5ngBKOAKWe5l4J2+gIAlMomyOEmhV1dfToc19K6VerUoqOQxAK82w2a7q2aWcptfNullgwEJgiEQC1AQFZRC748o/xC//96j2/s/3Fv7H6tpbICAGQOYFZyuFt4+AYSOvtZn0wAKABWvYrwilWGVPiWTeaPvr//8vbH3r/69/6y5/93T/Q3/oPr/zFt1775lf8/k+/ffjmbzhxchueEUc45xvS5sI/9o5rFW3nho1v/YtPnnzwq+9951/8oR/9J//2x979vl/fgJNf/rpjz3rTGcbtneuPHTu+dcMGvu3sf+hX/oJTt1Ps3HJsa9Ucp/GpXIaPPvau2295/tkLV208df7wqT/86sdO+qn+YLRsXXIYjChXFbTSBNAwd0WfObgrMWYt3DVBNI4qTQPhiCxIJTKzENBqHEMIqUqF6zPkQRPVtc45GTHnnFJSNUQEZCJjJIxomqaS07OqiNSbTJqmznWmWUINKSICMyCKiHpuanjBCDNkjKBsJiJB1GuecTKmaehNgAFNICYCaigIcCwWAdbIRpPcNcis5xDihbSXD/f2loccQMIRQNJkNU5NmK8Oe0ZBh6y5Sd1WszAzDAyIcAvhMaIBdALzaDWCsBCwEYUoGGIlcAASMiOQWbiVLG1TvFTiWyK2YiIdIrpa0VJTUhzBzYRSncM5CUVUbAaRFAwLbwwRsYQXmEjFAYyhbdvmrBaBdS9GmKaJHUS4qk0lLTgQEgk5BXkguAMzo7g7QhihqGqDCTHc1DxSsCCpKjA5RDEVaeoRKECtCDEPrsVKO+uOzxd/+Id/eN/erde99hW7v/Adx3fK5cfx4MpTL/Qrl5tufurk4dkDSIu+aCtw/WN/gB4P3fV9jXmgq4c1m83q8jA7xl4W/XkF6jeuh8DZ8mI0s7N3vObOL77roo5/7MabPpBu/mSUd/7Gb5xFu/WbX/viw243qIQ30CRAVUUnxihugliGvD9mCkhIDaaiY5tSdgQPNK2+fyMwQ6VoTRDAIkKodxUI/PnfuPf9H7mw2Iq/+APPfeMvv/uev/j9t376Q0/e9upf+ZFf2zg4+/rDt535L7+9Zdfu60VAvvVNb3rkV351dewNLmPMj73j3MbL8yM3NsxxzUw2gnYfO34jBc/6S5e3b8/dMaiooXDOvZRD49aka4dDTM1tj3zszO4jL8bd4epTLgkzvPTd8OiftI1Hv3V5+v39iVUApD3Qzbi4/fiZ83dYyYZiZf9//ud/9f/7ibde2Lt4bOfkHc9/9ZOf+1AsupF4IwFoipI1DvaWZfPk1rnPPTT0NjtNi329eu7CSPL6Hbjt9HD/6Zc8eHaW8J7hsDx+8X682dFhmXOXNBe2JFIGsbQkbwG1Ti+1JHRwCAElJm5drYakeYxSFx5GlJzcI5wJCcFCo46p1JgxwttGahoMNEyQ1DIAFEVJKcxTSqCFkUqACzpR8Sga6N6Gm5YkbR4LM4+WTT0C2QG0YBgFbi42L1w4nzoS8nEwcmZQQq8kcVOwCAt2t/CMHqDs7rlfoaRxVRDY6vygbcMRmzSoB2E2D3Pycpg6KVe42Xr1U/LRG7764HWrT9zy0Bseu0PRD6kkQTJB3U5+ZQCctalifYihm8+jZDMDkGQ0r4plRmBquUmEBIKILAIA3Eyy5wDv2q5OI56eK0PUM8jMgBCFixYighp0YXyEhPOpeLB1cW1HQ+aj9KcBTFDAw9Xr0H307K7jquQ85LGvZJU2tU0jnGTWbTaJ27aVVig1xCxBrWMAuGBhkawr8lu746+5ePuvnfz0O/DDf7V585OrfWzdiswt7c1W4kI6UewRkYXcvaLXSlZvhcZxCCUX04PxEw/4v/jJ65YXTv3cez/373/hum+/69k/97mv/Py77nnjW9qz99xw440ZFNIzxortqthWOr6Vt58Ff/fHl7e/Yv7Bn7l6+O6f+68fbry5/adePZ4//OpPfVK/fOX6N9524uUnvTRBvLGd02bzA3d+0/bm6S/977eRXnz2q7/p0dj/2JXZo6tXDTi77rt7mO3e1+2nx/d3dXU8cBgl5hY5MGAmMkui6hDUEIOpUgRScSNJUCzQCf8/st47zK6ruv9eZe99zr13qkajLlmWi9yNC2AMphdjICT0BEISUkkhvzRIQt60XxJSgUAob0ICCQQIgZBQbIOxjQHb2Mbdsq1iSVbXSFNvO+fsvdZ6/9h3RJ7nnUePH3tmPHM1c85Ze631/X6+oqCuCLGJjZhDb2oWnFrtDaMCEgKCqhITEpmoiiZS731sKmTsVj3vCgYUiWhGjhRRzVhVkgXvQWhISklR0szEFHlaXFzM0JXKYLIs+92VolUmIEOOYOCgjFRbzlhh5xAAHGVnDBFxSk0gRiPxJGCBGFRUIzIFpqhlEytRBDJHCE0C8stxGMp2XdcZQcdOidGTr+taUkJiEcnbkZQSM6sZZ4g9ICEVqbKEZmyACQQ8ZpAUMLGZpGgALhQCms9zTBZCIQnrlNmtGrUhsmRAYs7Q0BKYAIOBtyJZ7VwIodCY8kw+CohZqDEAWjAx46RGCMwmElHEzCNbqokcgpGYR1RiVGFCAKwlJZIMHCrFUJu8UU5qgFBJYkcczVQRidipgYIWRSulhrgYpn5HfQMEisnTcMil7zHRhJtZxJ4tL0APZws3eXzvZQ+e7Fmz8ac+N/etDyw9/EgqFtGVDxw9+syzrxisLMHBwy3ftpjazuqlBiSUR7/97FP3La+/vGslrMw9fdUvJ9+BcgokRZWEDgwwNUODtUcf3X/xKy7+9M9Vk5v9wvIv8rq7ml2t9tjj372VLz1r7uCjr7zoqoXWhn4zR57GdTJq6vgyggwVMr7NAIRgmBpGrmpxHgTR2ExjuyybOrEIACoIGakpI3tid+D04MmjcXJD0U3wvr974t4Xnn7zb33wjr/5iy+//i/X9Q7/7I9fUD/1G033+OH6ifGVU3Fy6/YXvWHrTf++1D94bO0OmHu6tWXj3Z2f4E53y/aLcPOVK61NgISaOiuHxo9+nwB6ay+OvqNhLEifUy2hw5rGFw5Nw/Al9/8ZnFqpKmBsuoadztjODz1+7ob182PXNGt/Yc1RrqP5ymSiVW8NH3nVO99x44eTnT02MV6OTW7aumFu8eTp04ONm3fghWnf0bs6rmRoo0vQIPqp2dnWoi1994nbzXtdhiH6GlYu2rLukkvWHtErdj3pl7AaT5e/+Dnj1SzM28FaEho1fQVASTWTiYGANkkcMJlSduUygqgziTFJI1Q4xxzrKgF65GjKnKHiBnn/qiMHKvpMVzdAVDTFHJWrBGxmjtBzyHxmxxxC0CSaUAWiiaKYIkrSaGDMYIOqdkmS5qA9YgJlYMDl7gryCLPiHZkBqTNUMDDTHKauIKrqEDOzAgBEBFUUySAliUVoKagLblAPXCiIaaLsNMNGhQwECg9x2IqdHzt+xd9Ofee28+avWlycrDdPRu5rN2CRilhiGYInMGQjIiHSlFB0NEYwoewmcgR5b0yUNcChKMwsZ0oiIpM/o+C3//WmqvEMX9O73NyoKqpJ0jNtcbZ15ZAJXo2ezanmmgnhZgRYnVFlAUiKMdaq2iSRmDM/vPc+hLIoy1C44Fves/eevdPsEMvTMwAwLUyNiyE3K1q9rrzyP3qPfNU//rOD68pQxNhzrjSEUjyKwP+a2OSjA+axHBekQ8YJaJYN+iat+qufnR0ubf2Dz973sQ+Xj9wOLzlvz39+r/tHf7h2YVdrcrobxjy2Eeoz9TcNtfC9Xj2R/s//M3zJ1Z/4l8eO0I+0nvWGzectwI2fSxcuTf7a1snWSv+j+x79t3vXPL1j5wu23g8PNRzX8MZqkU7efc/KnuNbJiYOfufeOzZeWZcvl2jWeNfy/uw1J6qS42ObYXc3UVlKNEkIlnM8OUEyA8tx5AQECGRooqMViFqK0hB673IWAhKtRt5KVkuZKKwawMgAiUrHTd2gWUpx4/qNJ0+dSiIhhKpRE3POIWrKYemootGieOeJ3eLKInsnpswuNjEHFJZlScxg2KRMulDNzmPQzJiDEacOlDAAkjF7V8fYckFEoqnDETsppSSWjbCoIoronAMwUGiahohGpgIzEDWULBMTEVQUWLUwrHrNnXPZsQDClUdKhElqUohKio1nEMkdJxGBWh56F2U5rOs8QgsODDEpMQbnnEeqmyaZGZLLGC+xxMDgyQg0ISUDEfCgbMqJFFUh5TzabKBGU/WGrdZYr99vtTrDpkZGIFRTl3DkYARwREE5O6eSAZghgidylBX+QERARkCi2TlCIJrn6w0ulzpbuUUFJhnO1G4Oezw5FU3l9Jx3Mxec95rx9a2J47t637rx9FP7v/873xw/9OADt/7RVMsNlmR8YurJuVNPLi+c44tjgyphfxI6A+Uw3h66qjUIprF14C5M0q3r9tbr+huuFKnEl0O3Na/424FsZv0Op0+XW8oP3Rk29Hf9whufZ/iyYvOtKM2JQ+9+9ksPXMyH/uYjm179vFOz500vr9TFEpdrF6QvKj4/ONTATDQBADpHzknKhRmD46qqTBEAmFEVgcnjaCTp4vKpNbJch84F1Z0XbDp01mHbd+6Pf+PHP3jOY7e88pM/Hltf6u08q11b2V9JwCdPx22XXjn2uj886+kjxTNe56c28sQMAFiK8yf3bTn5g3PSwuSJ3WX1ZH/YBwlNKB5f8xccZejbtR9HPwFg7ZWTarL5zk+Vjz1pg2GJmAAlKgBP++KSO3/iO6+gyd7NzQqgKzbdpxf8145zfunvPv7in/vYj/78z938D9e96NXXXveS+YUlYmeJlo+dWL/uvKHVc8ceDtxExdDmYdO02hsf+Oa3Dz6+ryy8s4gAsYlhuNJvvfiru4PTfWmlvPYF1206K/zL3XdNrjfTVA+GGKMYJtZEBpJSUCJWQBNBNENFQpUUo6SoRigKCRRcQLUIGopAq0bevFWFkY3CkoqaCgAg+dz9ZRGKEeV4CFMiJELnfJMFDjTaewGAgsWUxAQSe0coyl6SAgCwAYNx6Tw7qesQHABkKkV+hFn2Eyka2yq4xjwHQKW88sER7V8RWkUgB0CqqL7w6BARkjSu47AKE1qbc1VplRbPWjr3RUcO3r7jwE0Xrf/Vh7d2S/YwWfnOWOrW7dCJTjWxRwMEsJTEEzmmBHAmMAoARswpNVcGyK8bLKvqVEEBjCFvBM/8VDNdIW8YRYSa0VQ/11cVOyNAAzRVXX2sU2YNnpE+SnaFJUj5h8+MBJpEkzCiWQKA4HxZliGU3vtQFGVZulCMoN80coPBqqSaACIJaVGktIzDq8cvvmx5y6MbDtwUn3oDXnGcKwBASVawi1FcRtjBGc0RIgKQ4IABBuO9cCp1Y+usTefs+PKnujcfvO+3f+eav343vOof73/WG9Y/+ocbxvzi7A5JEWNfCuf/d3YHIHc1/ejrB6+5+oOfW5jrQ3vLhAi1p8OWl70jnZgHeWDn77+9+8v7Dv/OHfOfPvDgvcc3XbadL+vIlu6Jzad1e9s2nvfUxul+vf7x45t1GI3XUpsoENbQcnF50zPLA09v8AOKRY2F0HL2WZNajpgEBkVjy/EgLJIQEFCJCM04KhNEpsaMmWKUFgeUZLy6P4PRD0UAGLCpajPzPngOp06dIgB0LqkiURIhZ84xEAbPiKiNtsFLTIo4NtGJmm0BMj45Ucccv6LaKDnviJFJJK5aG0a50nndY6Ae8tSKAYARU0rOBRklHIqqWnYlAWaycb4IiYg8mWRflQKAyz8Q1YyRjyKKesYcaGfyvDNVFg0dcQRBEAKHJGCJIOAIIIIIWaxX+qASGYGBkQ1Nk6a8mFGlplb2pKpCkLNMyEBQFQBJAK2pEcky5Z5ZFM2BQ4CkCpRl0JgUcAQzkQgKlv2KKTBLEmJTZMsLAkW0EcDV/Ei6ZmZgIiIaNVmOmzIDgeyoJLAkAgLawvZyWFHhfmy55ak1a2qc/u4DHBcmXv5TMy++Mh16ove3f9T/ymc3TG/4+Ov/brEY/90v/O7TAzlK7MtOUy8FdV+48/HfecVz277uDuoBli32jdeipw0au6KWFFzY2JkeP3j7kxuu5u5cbE1J6KDGIEM3Pu36R6rDc3ye7Z2+eGcL7IXXLv7Pd9+09fKb6oexnPjL9/3JX//lx85718/e84mP4fPr05sualdB6m4gInTNSEygo1x5wvzMUWVfejOLMaHaCARtho5XRZdGRA4lTTzwj3987b9esf30dAfuHHv9+7sXXaX3XXXnR0K9NLzxc3rBXz/h9BjoCZg6ieOyh+Dyd7hzTk+cfGpx7nj10G10fN+adsewM2x2jcfbwa9bKbhF5aDbPfnoXqv/uXrOT1E8BG4M2Y8Nuq7uthaPXnjf13HocbK1HPsT45smxid6Rw5M1v2Ta5+14eB9b3rnUntqbM/M1qePHWttXL+1v+EXv/AP//pj7/34q3/up2/+4CWnn9Pv98tO6dkv+0Hv2PHt264syE4ffqw9PlGlQVlJ3T1RdY8j1B03vVTNi50an1pzzjNuuP8kprl51zn7tW8+a0330Kc+//2Ji84bVHU9rE51jwVf1mYYwAFjg74AIULH3nuH1vIeNIHjRo0ECblSbWIqnHOAapJD6RGRVjs2YEAlNVWifNQ2QEViXO25HCXVJAkdO8cGQg6JvIgxEiNyZrBZUjU1i/XAOUJEDp7ZmxmDIShpU/gQMuFF1XuPZkx0xsVhZC7jGJEMjQkwW5CZGUZyeTYiNCZ2ZSmGQPnDDGaByYoWFCzIU+S9SNnwj/af84OlA9+d2v+CzTsumJ8uWzyIiK3xlpoyIhoSqIhkfC95QyAksOzAzxRbdsTkcsTsqF6eQe+CIaCsMnpVRDSXzaw7J8oGzfz0HL0BZuOpEaKa2qjrzUpzVRVJ+UuJ5Ux4AlQDoFwA1IgYnR+ZcT0XRcv5IidusXf/e0+fB+JgMDLPKDYGXqMXS2h1y70xXbGr2vUFuveGtNOATJoBWUiojlZf7KrlBkba1Da6FaNyaf7EfHPWhTvbm8vT//Vw/MRHLn3Pz+x/bK/9yfun99xLk9PddROTR04MtJ4oZuvZtelg90z51bHF5Rpodvbjn9q9stjjmbOG1ZghJEU/4dOK2YPbn9j9tztfeu3sJ9+27g8eP/7Htx65+2n8RNobxo+d81DaOGnTluZ6k5f9Ls3M8MQm8myhi23Pg7FkxXgHy3Oet7jny+s6ISh2UcEgJomcmDzTKO9FJOd6rb4qAWJwRA4opYSORSKSEUGMNYJlmxwRpZTOSPdlFIGFahnpN4rUJfbksOTQLlsAOhgMMrZirNWuBsMQQlIbVM0oogC0Hg6Qgtrqt1BjIJHESIkMcXTmA6Ocr25AjrmfKgIqhYMrakmGZkmESVUpX09EJmoGwOB5RKRPKTGamNAIWmd5HkM00lkjoqpEwxyvNNJCZlwkgDIQYgOSQNrOOQ4xl1E1511G2mdsCxDmXAcky+lpCQ0BxVAFmlQbYWbhZWgJAIAJExCSgDEViDZiXoLVSTxCFGFAEHWhAAMilyxWsfGhjHVThGCmbIDo9AxaTnOSiObZVBQLzMCQ7y/PLAASU07UzhTfLEbJvwunThar3mzLpQ6u9K94+Osbdl5UbT93ccul668/b+VP3ysf+5uNbdedXHPrZa/YdfWbX/bJdxQLT719Yv37lhY09c++4MJOe/z+B+/54Ldued2znlm6Vh0xtmEN+H6r1Ni01qzt2emFhcVDTz6p7hGOOnjWOy1VZORLL64zPPTExbLr1Hwa33DsoaZ+zP6h/fo1/SPNdceXzm3KA07uuuuua6+56m3veNMv/e5v7/rofxzn6TS7rjOIfS6dI7MEoiISvM9ybjETU2/YDKui8AiIhN65siyXe11YlfjkG98h4f959uGLsTvrJ77Vfsfft/7sufVNV8m9T/zETz566vdPb7ksHeoAQEdDsTy/rjpx7eWb23PHer//HGqaGy/6SbnsJUuukVpLOB1mrpgbjrWam9bguiHWE5Nrpi6c6R2+84Dp/GU/5mZD0MpXvc1777zitg9D6lbOij6wptScYLJBKxWoBy5+2bXf/BAYJbHO8QXqr6w995w60Lpq/a/d/Kl/evE7/+lVv/yWb/zJVcdf5kjBoofWxJqO+fqcC58NvcHKyh4X2hXQ2Jr2htl1DoolS+rgR178k5dedh1Mtk/de+KcnRtf/cyZR+/97lfu+MEFz3vR6epYU2u3VqcJUl2BQa2kHhP5JEzmQ2EwCg7SFI3JABUhSkoAaiaiwXuHzpKAJxoxH1fjjxgAMpRlFB04UiSDIIIgGKESeiIBc9nKzMCIoJiUGMzByGZjCq4IZGAmKWkWWDGBI0BzGhOWXiQyu5Cjv1WzC0NEVEfpvLnz1NWbf7UjzGZhCy6EoiD2RtyIOucCeSJiwqb0noAiEBbNRFpO+tx03msXrvxM+64vzT7y1/a6FVmcLLTftMCiYWTmBGKElsw5Z4QKmiNccj5jrp1ogAZ1zHw+a5oql1QcRVbkigtnCnB+WiWQLH2PMWbha37epZSSiukIQ8irMY5Z6AQAuVHOnwYACcgRMiATBufZO+c9eBeYveccc+lccD744JjRiAxRVlnRkM1OYAogBhwxQSWEReKl4eJ1xcVrB2seXbt/16lD22lLV6sAMNAU0I2aegBbpaFZMtFkwBQMBuvPO3/D2PqwctNDvd/4pU2zsPDn9w/2P6Br1kyGKamW7cipgbSmzrkkzm6YPuc5Rzffd6YANzd+o7Vhy2M337b82uvi+HopJ0ETDhZxbFpgElYWqlu2nf/vg/NvffTYTlx6xs51H//NRT6udx+o9hx66jtP2r3H6MHQ2nrl5IZr3fRZ5MYQiKDYZGlRrZakLR6fPW+Hf8nT+77linUeAMBS0oolOAJEFmAkWZ1DOCQjlJQsAQA0JUpMZcJSRgiSShP5QGb0v5xIP/wnk6gpYJNiuywkNibGBElqUNdA1kWydy6lpASh6FSxUTJTbbfHhv2uz8GUzCbiXM7hMUPIWmhYxSSNbkoiJFKzJkZybGbkWEQNoW6afAFlILtb3XRkFRU551cBF4iYzVdN0xABAJlZzHajVU+gmeiINQsEI5YMACTVSIaITiEheSIvGiFRVFd4IJKRQrNBT4JQFBSjILL3gVQbSwCJWJMBGrCAqgkYMgFSgaRiyIhs7MRyuTAEdWYpqSopADGypMwOEA+5nQB22KTaOTeURARIjmTEmVrlzYEDHOGDbGQrGNmc1NhxPeibGcAPYXyIqE3TzE5sOX5i+113TO979AI7Pfzyf37/hne1n/38Ez/y4sn7vj89tbam6QNjUzf96PsvuPcLV3//c0vFxEta7f9pwcOpfejJQ6//yZ88fvrEsSNP/8/3H3rFRZcUbZi2chn7oUSyYvnIiWEzJMCZqelBb2j33RLuvj1cer2ee0k54VxDy8dP7G27eunk0Redmqx+/dC13/Lt6qGd+r39+2f/7+RTT1i5dla09Zl/+fwddz784V/+zd6e/ac8dscntB4kakNKOeo0zzMAcygnerZkSUWJXCsUZhqbxiE1krKhNMZaRNx4qLdgmnG22138950/Q5M7i1feZa/YLAc26K4dX/vjrTe8agnCOfz4gd7WtevLzTx1eqIl7cn19cJVe7/wyLqw+dLX28IRdqZ1dXrdudMHz7XFB9VtGeqpcR7y+qnzcXd57weentl69uLC9qceL0/s75K0JifTSrc20pIJGj12aMZN7Nv5wqacuHTXNxwIadEy9GCxPd6GzmLq8cC9/ca/+tKL/vqzr3xvfWfveXvf6Ah8JWHCnzx6tChbV137qscfLJ868nBncgpqfeiJ3cYOBgsveOELw+aX/PmnH5mcrl74jLNt4dFPf/5Y5M4b3/SWfYf31L3TSXRuBTbOTshgWKCr66GBU9KUmpnxsYKdDiMDCdQKgDE5g5QxjZLAjMk5QjYkzHD6vIYcPYDQkAmSGJgSEqIyoHeUUxUE1RHlGHMycs6DatIU0I0aBzUVA0VGBgRNElW9Z02iRgbKxIzoiPMrYgQaIQjyLm7UqBHQGU4WESPkDebqdQNoZrnt0yRMRoQTrY73IdP+auTAKTSc2gZNKpui5HSq3X9HeuldKwcen9nzzZX735iuOdKcDEEXlVqKCSQnNXliAgTL062R2ntk+ReJdUOAAoAEWc6NpmaYQ75oFJ0wsm+emdkOqgEiFj6oqni/ugZOVdVkP1UjSVWDY8Y8nRZmBsXcXYlITBEAEhOi8+w9c1EURavE4JCoQA4hhODIeSbnfWY9Gdlq+liOpQMQ1JSigaW6UW4qXwcr+klVhmtl8nl66Zf4/n8PD/1xf1LVKdZofgmbcvUkpiODCGYkGRetvQ/fff7ml42fJftvfqj82AfXTy72DleNzPn154REQbpoiO3Nk9e9otl0bnPs+N5nPxuLF5wpwAvf+e7izuu+efVblzZfAuzAFJBschNZArXO2HSprRc+/4ZO2b34SZy/e+2Tn7nGn7+jPsvjBb2tL3+kaT9cHdwZDj0fh1PQtEAJDACo25VEnqo+d8Z8I5df+sxTi/sH3cPMCJlLgYxMmVjhXCgKl7fvZ6YFisSAKsDoklggGuGlyDGzpZFcbvTJq+y2JkVR8N674AeDQUEUgk+i3lEI3jkHiqqqSUDNTJOYiMnqI57JI7KYFAQxiQKBKHOeMFmjmvXwOdvWVseJgNCYBmUArFImS2COPc5qSh4xKEZGxJxGmC/REEJKyRHVdc3AGWZjiLD6N8rlJ++8R8dKxMCUTVi5HhKB5oDt7NlXdZ5y2iMTOeaUEgJk9T4AIDLYqssfTTQ5F2KdQLMLFNWM8odMwAyN02jbbmoJQInQogKRKrgQqrpOOTCbgJhUkiNOamiQsa8je1iePRGoqQHw6g8kH7IBIfs4ACiqOOeAWFOOmaIqNtrYmvUz/u4fnDxw8CjAiU7x9Cl+yeyOn//el47e/PGTvWZ8bN1CgsJ3b/6JfysGyzd88V1Dr6aDucHgF8rWr4RUx8WpqfD21779rz/6NwvLSzc9/tA1O87hgGOEWFuI9TJEZHJM23acnZZ6ZTQA8HSgOX5UHzs1/cbfvq9fT3em7/+125t1T+K33+3iZUvjN7XW0kGs9APefjz5OKFV7SY3H9v9xC/98/v+8f0feujz37jyhTc8fXxpJnGfRroaW2W05aELOlJRIvKe67pmZoWsA2VVHbFKEN1YEEiwptVskO/8yvK7+37dcwZf2yFP1GH25Anc9c3HVmbx/N/5E3hqBma140J7ek1nvc6PT9ni8asbdHd+5gfUOu+Zry1I6zryGK855517v/2R/u473OT0oNECh2losmmNHNl72d5+5GolSBHNutZGl6RyiYcqNO6h7u++7CWTi0enTzyoncInwGgd6Gw754p6obe01Fsz027jxNu/89dfqia++Ly/WA7zL3/o5930xJGTh9sxlO3wyL79G866KnT8vu/eMrbjAjc2PbFuA0g6eqp/fPjQmrNnrBp847Y9m9aXV132DGP3+IE9J47Y5GVrBoB79q1cNLVJUxJAknwoBTR2zhFSjsJARE9IgCAZZ2dmIqaezTQlTSGETLNFPLO7zbcEFeQxqSc0M0YlQANVoCITVZghH6EB1cy7oD88LxuqZbMvIjpiYBwfH+/3hgqmWcFESKZs5JCYAG20RkMCFUBEtWzNyXcNII2ww6tz29VLBzh/oCgKYBeKApHYOVVtgYskhs5QGUwVvOMV7m/Hbb+wcN17J/77MzN3XXf8vDGYGNLytFKDyIaGKCKMqCIAhkzeMwHmMVQSaZqmqRozEwWRyJTPAYTIuc7lMCIYpYWnlLLOR4HQzMSnrDgzEyBU1dg0ZohMYpBSAlHvWFW9y2mMCmZqlNKIxOQQvWPvXfBFKIsQAntCRO9K55nyiQQhqqQkAAoC2eCHuopIAq2q2kTrQbXCXWnUJDWgCZVl8RXpgq9MPfJdt+vhdNb63lkJu1pMDNMgo9oxzx0yd5FsfHz8ie/eu3Kk2vCGybn7npr/0ld3/vQvH//P1pqVm6b85mLtcGzDKTJItGN+/Jq63DS/fkf94tc/4aeO1KOH7/6eO/KXB4T9zOmnQ/cUmdXtyTL1nec+jxv5prUBtF5eeyn3Hx7MzDxx8RtqPxuV3YKz5fX0+Dnbw+vmKBrgMDpbnVyCWY8LTk17+bh0tpyztRz0lq++4qUP3vrluphDREeOAVkBENQwiVHA7IIDQzEFZAQgdJgaYo6gdY7wEGRAJ9bkyM5Vdi9kRRJgy7sqmaqCYrssTJJzDj3lJEMzdMQjzbxJ0zTEjCpqQkj1oFbVqlZXuKoa5L8NMqlqjMlw5JMZ2Y4FDFRWI0+YOcWoqpoD1piZsR9rBjYzNQEAZgQjI2Ty3o8myWeEjWbmvR82CUbvpDMnXUTOEIzR/Yi4OhBScMSVKLEFcLUZgjIGQHYsCIYK5BGxcF7R1DBFAEVCM0gAyoxADiIQIKIJwqi9BzDVRkf8YyI2dQDGpARKBHVMOYxETGOT7zpFRs2PC2Ay8N6pqndeVWtRI4Jcz8VM8oROzQwBGQkYgDCp5nMrquUwDNVGcTQB8IXv7dl/+uZvTp21c3B07/29uacgfefEwie8/4fNW3c2bve+x1ybbnvhe46c9cw3feB6R63leFIwHZqduW4Ar1b+eoKHf/DQjguufutP/OS9j9677+EnvvrEE88YLj1r2/kToW3QG0+um2oMvhcH5URrWMcEpi2oW3zRAw86ixu2XH5oy5P9sbP84g7xKzx/UTr7P3sblMDGXc2vxeV/68bCWpV2pjccfuToH7/3T5pB99TK6de/7h379j0JRSsLApIkZmZDIpKUuhJd8GIQYwbymahhcCSqCtJEZkYGR6YEwaEpcAf625bvuIQfRIsRN+i4Y4axx+4Pp6GanN26Zfqphx/c/6G/Tfffsu3QXLtcF6H3zFi2b/3MD7C38/p3jvfnyXs/u+HHfvbv7/vK3849dNOayalhC1vUWtPy2+97ypZq8rGTTD07XeoTEAUzXwJjd0VcOHjJa3Y8eUuVZNo59ckMmq07jq5fVxRw9tlnrawsFYVfWl75se//ytRw3def9Q/D8fk33PpeDqZtODq/NOFtXgebt11+sLhv+XMfXhvWzRXj0+uLxRtO2w3/XY13edDZuufS2RvP+d6dh5uVfpp0a6ZxeIplB5443B1eIJ5xGFPBDtQsxTbxoN8tyjYVWJOUxqlJLvgIygIxRc9UlAxqAErOGWJWGCGO9ChomW5FiOBGkBBhBGYSADYyTY653R7rDwZsmLEbAKBAQESOUSBjIkyJ2GJTu6zOEEkqRIQOARCBDYTJ5YmfCz7GRlUdlYiGqGcm4oCZOLPKyM00H8QmimoOL4FGmrFWiWhFqwBDDj6mqqihNqwbA0KDGpknsFyR0y8ur3ru0Ue/c86+j/Zu/79zr4wVNRwlqpIZmDTRcUCDHGpUhEJEYowpaZNiVVV11YCZGsYYCc1MmVFVDYmZKR9hVlPHc+KyiI5NjGsSTcqekqTV4Bd1zsUoAOBcHiFYbpSRhJnMcNWu7Nm7oijQNITgi9K74EKGIKhHRqKRWURgpNPJjNqoCGS5lc9rL7KmqU21qquBWyIpKqrLJAOlWA63DqYvWVr7yOYT35vf/4b+WYdb/YlFNBr2ISu9KeviYmoQsfTh1GLzvLe+dLgCT/3Bn2/eeb4/9/If1IPnQ/+8F9euPUQmaI8DL03AbXfsuOGuzS9/5KSdjD80IXWa7rP86cvf97bx+X2f/rkvVp3pVHSiH2uIwbRoVlhrALxj2xvJXhdin5WVnTmOo4MiPi3qxTeAZ6RsuQInv7Ky/f1LO5a2L521ee3PL57obx4f33r5VfNP34yRgvMuGaJy8IKggHU95FVZiollGZaBAbJm07YoIgGamIGIAhI5yxN9gFzGVJUNAnIURIckQt4758wAkXNhyyENquaCyxwVNGsVoWkSojH7RgUMOt6L6bCpAnvNiGJTM2OgHE6vI7P+KjI9pgTmCCaK9kq/lwoEogIZ2KmOTm+jdQgCM0cRZg7e5zY3r58lpQzLY/5hoFmu97XUjgOvvjlHAJk9gZUTB+DNVV4ckkcwj3lzQ47RUGMKoYimjAgQxGoDJbYUUxIRJVVKVT8BgiNC55A8sfBIwKimiKpYMwISgxKYc2StoqySSJN6/Z4LPop45wpypGcW2SamMTVi6oWASBg1GZk4MERQQgDw+cZVXUUVYUrC7KM0koQcM7ms+ZiYmNj7vTvK2Wnon3wiLh/opJlqgltxheUt+5/65LrZ87Zsvn3q/Nte/u7n3fSXW/bctoS6fMVLirf85I7r3/CNt73k5/cduKUVvvPQPY8+vuvnf+4XXnrlyy8867I777vjgSePLpwcXnb25i0bZ6msxXs2qua7WiiWYanX8NqLptpnPfnyly+0rptfs76hF8ze+4vK/eHE3qPX/IGvyQy6a2BqUcdfwMNPu3Yi1wEqsWz8Qw8+Ggp+cM9TG2a3XHb1844cP5YnLSqSCb4+J/aQbxKUjgEZUOvYGJOqoWLpg5CklFI0VyspWD/ylJdlmtki9xM2DbXB9FR5rpcH4kK3PVGdeGLXnk9+Sv7ry+uHS210jn3SfgmFI9zpcfqe/3rAe3nh29eBdYZVdyxd8dr33H/la4984z/Om39yw/yhHUPsLvWaonJ1KYUPKUIoWhr7UdsuKWjVGltYf97i2rNe+pVb2jMz873ejFHA+MCBxzf80VtnL31muvK1577qlQNoUdn0ZfjCXb8wWU1/7rl/vuwW3n7bn1ZLAGPQp7HlU83YlPD0psWHfnDR2ZctXrFl9zu+reeEhgthhbLb3Xnn3tkHZ5ZefqGdv3vf4tSYP2GnLemY7xqCIbh8RGU1wcTBQzIzRnbEoOaQMZN7DLz3hgBiJTqmVTElGgHm+CFkYqa8k2fiYDnN1BlCDlggp43mVJw6FKxJaTWtiFTRjNEhKhIAmYE1SRKSAB2bO8VIgIpEsRJl5KJwLlRJgtOSCxwpjxxaUoM8oGMiZDWjLBMb+WiZIImaOUAl8MSBnS88kPnARDTCRYCrOdbWgIoYOiSrrW6aSCk28FNw3fe7x2+dffRFKzuv7O88RU8VbhxTDdF5dVgOArSYyoDDYYoOKRAKJLUmC7VSFaOCjQQtQORUAVlTEk+MiKrGzCHTFbxD1qoZEFHhA6CWjk2TAyJyDSihiZilpiBywefTP5FTFWam0fOdmFkNkX27PSYSHZlJQnaGvskr2UqiGhBmZSOqsWEF4hSQaagpEFuTyPGw11fVbtVvpYqowaQVEXhemh8iTz7v+I4Ht3W/se7gNU/tnxj6bliAirFESrFGVzgMyS/pysb2tKyEdefu3LR9w6GPfq63/0F9128/eceX3AP3bb2h7yfVOaQy7R6/4Lbwo3e0X/s0nMdHpdObv7izMnn0qVwnvTbjF1y69IxLZ2/atfOef33khb86Pn+oak9aZ4wklrHLGK989COO3Z7z37R/+pkV+f+dy1YDAHAN+aCGASIrdsuVla03Tx14w4kLbm5Vh/am6o823/yW4Xsm+1dtXrdj76kNVGGrjaU4IyOCpOiDSmJYVcAX7FJKZkkAqGA0KHOIL7MYpCaamWFCcpaMiPORxzFbkpocASCNqikzIzs2IxAkNFVRASBRHPYbVUpJHXI1GBCREcVYMRLX3HiGpJ3QGlS1ApEjNPJJGp8IFJREDcCFvFdgQOe5agoKiZlbrQBkIkLo0RQIfWkAYpSVZSISzAhpRODKdnNFA2M0ZG6aKhNmRCzPqAONAWgOnURESaYC5Fy0hjKenMwHAxAwAjEuvYmamJi5IvQlAgApc2qSJQPAqKTEampKjr3rNE2TNOddYlQjdGTGaOCSoqgZYFBjQxgf70x02kcPHQVwqkJtLyAoQILJmvUbNp08NQdGBIxmUlXsTIlTrLKYI6kmNTAgBTRoRJMSOUYQFVFVRC8i7IKZWWyS9Mi7wI4FewcPrltZOUXyQMCxOFPzQqWxrFrOwU8cOvzpC555y9s+tuHwQy+98xNPP/c5+mM/e8k1rzxxZGVsvgev+cn0t3/2pnL801W/16++duuNP/rqHz9y7PBzrrq6e0Hv9ttuH0rct3j84s3nlGvP749t12dceHxsc2/ynKazHgBQ0vTJPWOLe7fsvvnYM+4+cu3DC7PD87/6+JZHX3L8otuE1CEAWavQXkSesKqSVHfNUMsWhKKD8dOf+qff2rimM7ttsLDcNMk550wTSL9ufBFakZx3qW4sSO7AWCFGRaJqOHTOkXcxqVusnRmeGISZ1mCZZlqcauE6zALQQbtmhr7kek/tefuL/e6D26w3gdgNbRRFpmQKAVOdMKULNm9dc/d/73vkrqOXXDe35pzeRDM8dbw5ePKKAw/uXD4+E9rDalgEW+G167ACqY3KGEVLGjONsYa2tiU8fOlrXTPY/vBtCn3vPVZL9at/b0uvV9zyj3bvzYv33fitr543c/mLZ8+/cvLc83Fqw/OO/nTrvu3/evU7PzHxnnd+5yM4dIXFYVE9/v2vH/nnv79KJu86Mf/ULy/o2ZDG+jqznNtQ329xx+TtD+3/q8me+N7e5WKmGm+HX3jbq5vlbrcRbhcWxTMKqFFdlh2zMzkt4L2vYsPMwsBmrKCqyYERoUEAFFXLB3uzDAZCRIdMSMaIiMwspgAjKJWpAJEm0SxdzrL+PBwWTWqgFtglFMeOi6I/jA7JTAFVzZIKEI+Qe2YAIy8jjMjP6JjyfDnGNBLumpoiOa//i3pvlusymigAenKOPKHDvJxUIzXO8XBqKlKbIRAwMUK3dXrHysSrj1x44zkPfWL2nv+7PDU9XNv4rhpBSUUIIp4Ni4Ia9C0sTJNE8+bBUfDmua6RUYUM8gxZk4TAZpZxPzQKGURVLYJDRBUa2VoAsiKUmAFRsx0agDm3UD/ct6GKGROSjIDb2XOZjHhlZWms00ki3nsgbGJNiRMI5pk9QhRVE4bR3H4ZEtXGUfsOY0oBqK4qUamqWjtGAN1+j4gEzIdwujd/bmqtnfen18YfTB97xrH1i34wEbnpIwDFDhRNW1MCtNmztk6s2b5eZDAHh278irN45AO/m+694+rzys5UMgeLE7Ov37i3pra3ep0cuXz4HUoynLq86Rew49r8SxcwXTcxd9Hr+vMlEq/Z8535C1/iJGozAIIg1dknbyf0Kkth6eB4ub1XrE3kYHVECaMhSTYlmwMOGHc/+90xnFrz1E8Uva29DffJwBGd+OR5f/Tbj3x0HZ+/aeM2WwBENzG9ZjjsKqA3BEgK5rwHUSJSEUIkQO89OMSMXwYywhH6Xy06ZyO1riIy5faXGUAjGRsWgODZERfOR4cQoyRNllRBQVSyFlU0phqkDEUdmyiSUpqenKwGddUbeGLPARHBIKXESMzARgYwmp2qKVrugAmYQ1lb0mo41iqDYdOoMaEg0Wg2nz18o74WDEw1aca5wUjmYFFFmiaE0Gq1EHEwGACwjsJj8g5IzNCHQOTzXY/AWSxm2bme58tJGCmJsHNN02RfOyLKqu8RYXWaDWgAkgyBAyOscvrEakSM0ZiRRg+kDBtJw15/0F0BADMhorqpAKBVlAysTPOnTntDyUoMAg9MIlXdeEMsvDliEFAyREFAEwQwFBUjAyRz5IjYQDXGpKYcQGm2M3ni1FxrSwd6S1XRvl1PlX2FgEMVHqQI1cYZPues2fdd+wsTrck//Ohr903i+f92Y3ffSjUv3ZN7ynjeWc9+5X2tP/3F1L4bF/aZe2rX43/x8Huuf/nrJ2Z2PtI7ufU1f+A3XhS2XrJ/5hxgDwCuNzc1PLJl7h47ufv799z463N7t5xeOrLz/OKl1923547DL16YOvR2w9hbf397EXrTkEo4vUHPuq08PeadqGOqhsOibA3rWsxIjcz+8aMfec/v/XlyrkikSStKgWHT2vUnTs5LqWap4KAxEbMR9qshuWBNA2aqSswG5hpx/75vyx+ee4RRl3HmaL8odHy25R5a8zPd/sYppVAN2k/tbnVa0SYW6wFHQWZBZHJNjEVwky4sPrWnKCauag5d8o1/GJQzg3LNTG9p7VTBY1PLTXFIU2vMQg1j0psvkm8UWAownpoZHl8sxze4tvSX+3t3vnD7k7cW29ev9NK6IwdPXHjl/Asv3qale8Yrjn71L6on71x74ige+fTTN31e2q0ocXxqZqycuv6Ktd/857v/7wXPfuVbNroTMKz7oX9qHaw5WuA9frF7TeWdT2ujqbplhAlft4e4HAY7j8E4XDK95ZVXvZhmj31y4Xu7Dv7g/OlzJ6Aj4lyH6xTbhYuxLyJEGZusnbFOPRgWzkcVQuDs2ee8Y8l+AzTEjHvGVaw8Z1jxqvnPMOfOIjkg54KAmCKREclIN4higqbM7CBFUEYIjiVJTFL6ggDJaX7EG+YCw2UovHdEuipWytoljBqR8yl7VJWZCQgh39YIAGAjSVQGaSMjEboRkRLyMH0ETGeFFCXGqCNxBzkkr7AY+28cXHlv/8DxyZVvTj7+o/UVNWrb+0nkbuBWDIwoLMgB1bIH2jIG20kIgXM4XEoSE5kXjYFdSqkIXjQSGpNLKSFYK3jVUaRrLsw8ikZ2BoLIoEbMYKvBwExIRjQKfwQAJg+Q8+oM0ciUfEgZ3IQsVplZSkNFgJGzYrQCABU0EFNFIOZALI0MYzMcLaGhrqsBDgBATb2hmg76fUFck6YuOzp5x8yJezcfO+fwjK/TctEAuraGNBgES12y9skjYd1F1ZiOr6OTX7gp7dk73Qybh+6eXbN57eZ5IvRFs4ZO72weqFJo+zqh7/F0LMZ6OKHj0yfiiCiejOLegW56ob7tFfD/e+uPbTkxc8n///0wggPjqiCe8gN9ANhzce2uXzEaKA2LxSu62768UiTBE2uQv7fxKz964leKMMHs261xIkJkGomNuCSUqgEA5zCqtjpt730UcQRoufaCqAKh98zMmio1ysG6MvKdKQLW2nhDZZMQWoCIkBw4M+dDtCQiuXbKqk2cGCbHp1dWVhBRRYqiWFpa8hyMLCXtDfre+xA8IqaUkigYKoIiOTNBNIKE5pQqb8GgNB81gkBiA89sgug8koApWLbYImQ3rjPEnI2Zj3jEYEqMHEIws+FwmI03eXiWL0EAzR9NqpAF1ZiNUM4MyFBREQ2YGAARg3OqidGBKBloTNnsi/9L9UkAKqqYYfJGJgCaBRAKwMAiMjIzGznvCucRNOUtdTKNEQ3a7XZKqiDsfJTUDp5UdXW3DQDeF4mAwShppUkkoZmqCURmIEMABcXMm8lZC2LGLkgSi9Bd7oXCLa/Me9C7uT49tJJUquWWLy47S3/pkpVz18g90y//5+1vfc69b1/THm56YuHA7//m2b/+4ZPHn9b12x54avcrX/mqx17yqmN33f3T217/ocmzx7ddXK698MSmC0+V4zPPARkuV8d22dFH4q7/6izudif37nl8VxdqK7laSbNs54/tWC5WWocOdHZvunY48703HW4feslw5q7B2CIAtLrgK1har+76Kn5Kx/Z1ujIIRRjWQ2NOGhlpYnpSAv/HZz/5U7/wawcPzqkjMkyajg2WU+GZrCi9NZLVpMAI5BQMaNWkqkoZzv31Q+t1eeJ/rr97hdb0UvHd4+MTZ//e8sbny5G7BRyLtamsajKIgQzAcREASJrIjmsTGDTrxiaWUjMvMDa1dQ22N8CyrJ9Z7C3Glf3k4nRYMyjbVcfRqWXRRDsuilr7pw9WJ08X3nVXDo6thP5Z24+c+5yrP/+urX//Lw/99y3Ln/jd/o/cMBdTssfXbrzs7J/5yOnDt8zf/uW4+/vjqSm7BID18MQAD194oFMeGLvxq92vfe3YK181PfVE3QmzVVzYPb3j1X/0G3/nf60paiODFsAchMrJxsTjqbDW81/efsaheOzkLQePHcBtun7dpEVA1woKFmusxcrgyhKThbJIKXlHVdWkHKhAyEmVQPN0NAsN0MQDaK4No943Hyoxp5spKhgkxRzgY6CqhqCryUuimv2mZiZIhilKUlXErAUzD5AACZEhB+OONMwOAVf9qbjqMBu9AIPT6UQjUUclOVdccORg9T2jHCFEYiLgsikLbDsJ7IJzDghNLcWYUqqbuqljSnHUbTsmEN8r+qFeu9C+tpn+wo6Hbhob7FiEaZ3pS03ex2XwHoEwDdQFAOTVp6sBgIJGjMmENDYpNiLJRNDISCiROZZEuppeDECEKS+I0cgIDBx6AyFgXQ1YAABVs1XtCSqiGRJlEh6SU9UkSUzMjIG1VnLehzL2ExGBKjMlNRUVlQxAABglKHFZUJUaSRqY1VDMGBfSyUbr03FO1BDBOU+GnrlKFRD2sbX1qNm6+adD/7GW37oYBqASsN21Zswf1b7T4Pbd+/T5lzIC7x48+uW/A314fNtOG/ByWk5l6rNOeuhCZ9PSF1938kN+ErH0PbcpTZy9Z/Of9P7r30/f+G9nSumEDgqYgK2bmmNLu7k/6cYTQjWzFp59FfaglsZNdCK5XmvjgY3PTxiib4MBENtIvoA/HEqDVe19EQ+jFicuff9w5juz99v8Zuh1ljtx+uF4+zP7LztdHxVIxwZPD6s+AiooJp5tb40UNc8MAAO76Ykp59zi4mIi0Jy77fKaxdRMFIiocIzIMUZLKSuh0Kwknyw5oICMnhHRG2p+vGsGtmhaNapledNKb1nMiqLQqiKiTnu8qWsEdsFjdqCe4ZchgowS24kIFIQNABiQoyhCCg7MqQgKksNI7BRzfg4aICEhAiqxy2nYaqY54310BB7502OMjij39FVVeV+EokjSJB0Z3EWk8D7Poc6YGJ1zRNm2kxRIVZkIAJgxJwwKrF7nWd0PgJn8jyBggmoiWeWHRMAEoIZgoxATAgDR6IhH0SwSkSBn24BojDGEkKpUtosGUhGYmlSwj8x1jA0YiSUAJGMFh2wAnPf6AAaZWAKqiKiAWtBYpNpMAxCURdX0fclaDXc3/UOu5QZaY10YXLm+ft9zV5i0X2793NaPPnf5c2/x/+6f7XeubDjyn5888OJXVltetXDy1AMrxw88VJ142UfvekUJANtS3T+xpznx+PKur9bHHxvOPSmnTyJAINh61o5mtnxo9yMKUCSPDQ/Lzo7xJFIN6v448Mq3vm5Er/vD8rY3vaje8Fcb91Mi8xUC2Ms/3br1DTXcMajfxfJ5UzT2IUpCBCQ4OXf8x9764/uGi3ffddulVz/v9MnllvkasVhOvlWWDj27ngyccykKqkWJCuZo1LihGjp2AABoh7sBAJZo5lfufiYtbfrdF54/I67SNpeT3veaZlhgMLQKWp5i0zREziOBIpYeEHtVLBy0RK2eX8QusufFo+04NDcuYbI7qMZ8ZxCAaWpC55tTc62mf8IATNbE/uy7fvvAnY+evuZ1hnz1z/7s0dTx5GH8rKWzzuFTzRit7eLTcaracv2PXfLatx98/L6n7/yfpQfv7u7bTyhBG4DhzCPw5ue2v/yN4f/cNvea61uTT9KuDRs2P/91l2x4tgNMmCAAVAhVwNmhA2lrZTHe/+3PPfJAc82LX/esa16wZ+UhMiGi0A6pGSDEji/qQVU6VOLcxYpYLbUnSim5IiCSqUbI4UjoiYmRjDJHQgHdakUctRfAYhkCi47QwCRBkobRkXN1bEDBe4eIsa7NxJCjJAXwRUBgqZuMO0fPOXwUR4GFaKAZs6eK7JjImB2uKoeXdf7Dc38LAAgj9gcCwuozFuEMxylT+Eb3KiHlir56hjCDEbzxTNkefZ1M8AEEA0ZqHZo/7fX9zbc2pvERgxNX73jLM7szP5XRkX11PpZZFpZTV898Sn5Jo89cdd/mBF/8X+DAM3+71RJsuOrGzt9ytbcbfcIqRMBU1Hu/2hDT6sfzOeaH33H0/yJSD/MPYSSwM0SABpoFnbtx8FkH/swxaDTMNTVE7mH4Tm+50K9Ut0/WQRukvgkidBHBSMFv7rbmD8s3B9rrxqtX4BIg3meqgBS8tiYSOzhO44V99qa1BmCgUfVkPDXff/wlFBr80R9mzFnwpgNIexhxgKDa90AAc43f6ze3TRXACMEQe93PCHlDMszV10byY01GLv97auZSseB7W9PxOQ1LczWlXWqc5uzIgp74m+EvJa1PNUf+a/4TnkKexwLAO/i9M25NUiACH3woi6VhX0w5cEAYplRHZfyhQymKeEZyZCKeHTMrQPayDwYDSWai0oziTJBBEZJotjmJjZzi2TQgYIzsPTZNE0Jo6mSknjiShhAARitby6iQfPbM+g3LunYERGBwYoQkKZnnSCRETqGjrsEMW/zhcTb/ql3mcwHQ6r0/UmiZZc9JFm15789Q27KDWUSIgci32+2maQZVlbfXLsd/kYmoAIACEYkqex9jBIKsZQMAJECkzJxDACKHaKqRGZHYZHQ0MQVAHuUBGxBzphKQYxPNqrFaaiJyLlRVQ44VhDmYWWpi4djM0HMgVzVNC9lAFUQNMEfsCIojJGea8XoIiGpAhOhwmBIzsppQqlQa0Lqfpjudo8zNUtcTcWVhavw9zzrJqJvG4d2bPz1hC28f/O3eda9/Ei8+8nNXHHYXz9vZcITAthSbythbuWrrzMrH3/WMh2+/TO23lo48hFVJbdOI2jShJEHiNN9dPLbnaMu3Y2jqgBO+A4PqBZGLtOLDuEiaDEUFrnz6WtLxs3d/6+AYJQdbd9EzP5uuvdtf89HJd7/v5OBfu3Sdt99kqs0TkQIZkPGtN99y9hUXPbZ2obf7nktnz0OhernSmbEpManjUm9IoUhgKbtDDQK7JDJyqSJ6QacqAXBdJwHAMs0Q0cLC0hO7n3zp2WdLGoikYFonahwiQadpJS+IqDH5okxig34VQjCCCKENQ0xIuDwYWCdw5dcW0ZtPk62xxe6yFmPLvJzG22PVQOPKxX/8kd71r3r4N375ede8tnXujsePXLy1NH9w39YXvmbxbx5YfOFz64amknanLaivTz+9Z+7Y5s1XbLzguZsufzHq8NSJp+ulw+nI/iOHD08e3rvc67/tzYuf++RjX/7O8Mf//Zcv6F4/jt0D4fEwcGlTggSgJmcNa0JK5HskS7xpYdvvvf+9c0hPHL0HERos2+1xaJCQa0FBaHXGtOlTsZqXacqEoomYmhH9H0FRBSKZoBbGQS2X3ZwXvzqFhoyLEYMElnlzgAhgSa3RiIiihjjCgyKiKaIjFDJLKaqCpSgCtpoSC24EgUoiwkT4v8ua5VnRaiPoDBF/bOots27d6vEcDMEhn6k2GZOZB9CuDN6HEIoMoCDnAFBFG0lNbGKMIsoAlrt2QkNmGyYMAH7a+9PFwp9vuKVbuHccef4VacvAEgSRYdL8ihMBZS0YY24PRg5KjcOhmjZ1qurKEETUewaAwMxEagZiMcZkCmYKJkirpDBCMB6t3U3MRNR0VGzNjD055ywvXcxMNakSuSipruvStweDQdkp8lMun9/z0kEBR7QsAEYCIkSUGM0xAQbmKGIAhDjfnPhK9zOvGvvxDWGzgBEhEaMZGlSspWmnGbt3+uBNFxydSPCa+89u9yZqHFARqF+5iTX26APnz9Ub3/HeR373jy/Z1LnvwMLk/d/SYrJsmggGLf/ynzhhxm/beONvLfzaM+V2aaCpSdAe/Xq/GXKl7RaEMwW4jk2kgblOi8fmTE9Xw0mmNqQ5SctXnzWzaSuvDKhoeWddnty17YaBmxQOVbnWiICcr5eLakVCa1hOO4m9zXcOZx+b3vUbe1/5chlbcsPSYOCxmLGzL5i/4g0nf3W+PvrxPb//2qm3rS83qNJcdfSrS5+hAsFziwtC1JhMkldwxKBAphxV1UTFDC3F7ODxZUmAycw5JoQYo0jUJORcW6BhUcagZEQJjcViSslUwQhAkWE1ui8T3EyN0KWozIwATGyrgY8joxG6FOsoKV8SuMriYMTs72awiGKEHRcsUykYB2Q+d5WqhoCqhARmYBZjzAoPIjLV7JQbXYHM2cuX8zSJaDgcBg6Ayt7n6M+YYr/fz8m7zjkgAlO1BjUTKwHJwGgUrjzyLJlm0YfRmQUwIKARqCIZ5SpNo91S1hKSga5GnDERucDeGYmpppSAMCVlZKIsyUAOhKCBOFbRmERVGwnk2XEMFIQlNTUp5ChuxOxfglHkuTtzTKGSCgBtoEuChfeCa6Ynv3/XnUuL3YlOqZrM83ljy9Oumij17ok3PhauZWt+ceP9ADART24ePHbpQ7elm+/b+vbXT0+OWaRBv7VtXfv7/nTrxO7FmXXvGRv79W7vZMAyOgqFi9F77EV5xllbCsPTJ+e1jpOdsaVmuIn0Cur0ZRmamkIRhzLu3bFLXlR259763sfBddCNUd2XtFJT3NQLE29rL91R6wdic424t5a2mxCxaZpWq9Xrdh+78XbP/LDoygtfecOb3lZ1/LDbWyp4XIiIQgiVRENLSVohpLpBHK17EFERXNkqEWBda1SAgw+d9lQCa5yj4aKH2DcgIhADrMyr1AkckXdVqhGxdJxSg+Rd7MbQwnXrl8/bufWaV5347AenDxxL1C16zRHE8TAJ/eWZF71q/b9++qG/+Dv56HvXaFm3tl3zgX/tA401rcNbd96woTr97j87+IFfbpeh+1t/XA8EAnk0F43bMxGauaV9eMqxX1Nge2xs0/SGiwc7eFMritjyYCDVwu/vXfjQzC9/5uf/fudDDww3nrQ2DFs1MMA8cOM1SGG+3fclY/h0sc2ta8Gm5eHhwnuqyIGPBuiMk29Dy9RHxDq4iRDKsuh2u2PtVqqGhJiTKaNpjuxNaJC1vIRA6HHUOCJiLoP5T1JNpglwtA4wNAI18M4lNQU1tXpYMWMZCkOAJoGaJ58omhgREVAjyZJkx8LIOAFZIkUgAmiskO31+VnADCJqZmto7Xq3eVVyBQo2OqcbJJUoYghoSAbelY5d4MDsmTyhByBwUEvdYKxZgCGDA9EUER0FcamD2IhLODy3uegFcXDL1ke/Fk689OBFa4tpqnvDNgNQgqpiHecAhIboiBwxGjQp1rEBauq6riw10JiZOckbL8hZcGYKGjVmMoOZAQVEdEgIypwncyammkTYFCDvwZnZATmkHKqskIwsaTLjJqU6RgbPqeUr5xhD6RGxCK38jIuQOZ7ARAwoYAriCo9qDZonZrGIltAwCoObdRu2trb3+310DtGcd2Rg7BJGj/7awfgDxic2L/SPF+c8ubbfUewP3fRGq+tzvn7/5u2Xhfb5U4NxN/OCTT+4aao/VkKhXcDtm7aMP3VZYQ8tX1j4iy+e726svUeUFPfclTYeXTPcuF4OPlXS1JkCvNTiyWYsWu3IxpOtSA8MJ1zYwmP33LovXlusX7e+XFA3xmvC8pqHv3iq3Hly5pKj5zwDkkoYp8I8D41bHaJOWm4df2Pdfdtg652waR6CpbopezAD61yreOX+t21qnePQBSxmw6Zz11zgXTvMe1gCApsoWylGTGZAqYkCiEhJFXuDBlSIqiaRGogyUkopQmLMJCkCGm0QySCYNZ5CBDKsGcgSR6pNgVFzBV6FtuQBSumDRmViQR2lehA1mjyyiLjgwUBVMx+N2YslRnRI5hFAeeTJxhqAAArjQrHWFMFcCJwEEQWSgmXcsaBlthajQ4bc4GqKRgZACkbONU2TyzMi9ldWcpIHASWDHL+NjERkIu2yZYbOswJYBjBniA4igwGaY26aJvMdgaiua+8cjBrf0RkRBBSA1AmYmBkgEBBzbtsZzTnKDTVmIk1EkUhGOZ0ppXoEvBHxzqWmJs+esUnJOZ/ENOlY0erqIBiBZ3KhU6ceMQCEBOKMCDk/TUABMjYuQZOAGZmcFdKoRyCt77rz1habc64WHaT+plYyhBbrK+JXHut9JCzvv8geuxwe6q/M92p44jub6MiR6vxy9pf+4sT9j89sHsOpYvaNr5//2ufK/srasv1bOP3uetG5NRaHzoXY6MzkxKlTpxaG3Yrl4suu3PvkrhjTy9tTHaSVGDuu7Jpxacl6xy562dYnbgtk4qhBIVLyJVEICuSc/zTKDwr51368a4C/XaZPRJ+dVKDtDWtx0CjZ7ffctv/pg29680+eu3373KlTEQouQj5pEoECeO9TSrGuDQAdO0JQJR/89PTaCeqJYQ8nHXMzSIIAwrq0YFGqokQyBDUoG3DBOUccU50sqSZGmOx0HCNNznb+42vTH/rYWb/+mydn1ndXBrVfHl628zS1dnzyq+EL9yxKf/L1r3/4i9/b/ob/Y1e8eu7bX5sJ1b2//7vTw9NHV5aaMLbmPTec1ZyYWFqpn/fawzQWcFHGgQeG7CQBo8Y0OH7w8TR/hJqFwdKx+ad3ydze3sFDC3v3NkeejCt9nYefufnDM/ObHr/qttOTJ4+NP2WFwQmgFkkZ4SjBAtsKbPt4a/nLMDh9uj5+fGOnFKmNgEk9NAwxat14HFAtWAdsmqbp9XqZBsXBj/oqRE3SaDO0VGkSERRBRGMCNVBb1SSPjqW5qVVABRMDSdZIEjExreuoqrGRmBR9IOejSQPmifNaC4xENMYcYeQYDQ2yAxURM0ERmDJVx+CHQ91VNI+emZKNNr4GKSW1ZJJyk3FmnGaGTVXHuk4pakoS62o4HPYHw/6w6Vdx2KQmZoSFmQEhOS48tNoFoqMCgDvLduwtS5eO9cNTnWNfH39qTeRF77xqiiIVsaa6rjMcZ+S2HPmOSAmNXfb7mlkZApqlplHV3Kbk8WPuAFJKI7qPqKS8BRQxjSpiFmOMURTIkJvV5AwCzns7RgS1VDfSRAbq9Wt2LqUEoMF5N7LHuIyLy3TBkbqHwHtPzORdixwzR4ekVgqqZwAA0H5vhdBgtLlHIgIzJ4X4Zm2cuOzEWKX6wLbugJpUOTVqtKV79m09emilPum6dHs7PP700WL//cogzkFspUHvkuuWj+wf/+97rwEAPHHq5ByfPLDusRvXHt09afWAnn6Kyfdh6cyftWNrdWKi1tZwvJwCKM59xjk/9/srZz9zf909O6ZDD+5ZWopubCzROMHkeqLZxQcnH/379Xu+FqrlMJw3s9p3DDE0C25svPBTLnaG536I+y0A3HhkbP1BCBquP/ZL58VLg5CKAmLwZUppOByyYyYsWwUROecAbWQ5T1r3Bqk/jKCDullYWZ6bPz3fXR40tRBEGM2TETHG2FS1rU6YFSEAAGqyBCmqagOKIt773G5mjmO2qyEiKRIiqjE65xw5FjAKLl85I/mB5tXsaEeSez2xpJDfBWbSjuQ5DEFO1/3MY/SiU87ZCN+t/+vqdUSu9AENUhMlJkQsnM/9bv5e3vtOp5PHzvkayxfziB4TY55L55tLkuXbGQAQ2RRVCNTQIDU1AYJaq9XyPNKKa5Iz2igzk9EDgBGZXcgGgTwbyl4DUDNRMgDVpmlSSkSOEL33ScUFj6tD8uGw70xFYkQjH0yAjb0rKk0tRGJwqiwipMaECBJy4wtGNnpZIAAKKC1rRXNDwYJKqnVqsvPAI/ecOrGnPcYrgy4nYs8nh0JmpUMEbZRfu/Sha9Mt03YqJhwk/UoVJmBaeqfG14/58896eteB3R/5a33/BzeDr8wd7A4vP//Cv5w4Nzan6iJQBLXGkV+zZfO5m7d40ZN7Dg9IL6D2j7qpk8vLlbkaa1/61sbty+2tp7defu7ubydtQ21+qXZDSENAaiORpURE/jEun9vBz7N9rKJ/S36SSuYxHwa9QZPEraTpcnxu8cQ/ffwDX/qvz8dAylw3KTYNm5Iao610u6LqiYkImYBQAKnfGw4G9cxYvRhLQ1paXCnXls8/7+I1k+Xscneae2GYJJlhq8SqTCDgI0Jwky1m9KE1rI8vnaqrJd2y0297xtyhXvehRybWMlx49aY///edX//u4Fc/WOy4qircybKlE+dv2LJ+WB298F8/dWjLdnr4O9N77zv+iufsunvX2NKxzfvvH2/MTYwfueDioq4dtEIP6mANqxRVNCtaJTg5tvjUvmMPDOMp8jwUrSn5SRhbf/Z4GCvJNWNHSXzotbpjpxtXOQ1umicHE2Nx2n2Pyj/oXP+PP3WZ/9XXvP6dV/VPlHE5hrFCQ/aJiniL4JS4SkENLRl6AEIgTdLv96uqEhABNVAGJiVK6pEQWJSSQoxJCQUhmtaZ05P9HCJISgqsBFFEaySNsXbgkIm9C50iQkoaY4yg6NHXmtRMAJMKgAEKWAoeCnKpbvpVv05S1zE1Yk3yKEoWITWxghQtCRAoqMWYUpOP6jFGRCZyKakmgAgj5gChiEaxRq2fmtqkFqmaWDVN1aQkUkvdHfYGcVDHodS11Q0m9ezKUJSFd0WJVjZlWUQXo3Td+Jpl9+YDzwf0n9vwyFN4dLqZ6DmsbFm9d+KRAlAAoKx5FomqKWoEZU1CYETgkJo6ISIQkuBIyIJQpbjcH4hxMt/IsNam1qZou6QNMxExqU8KaiEpDgZ1k5IooA+NYkqaxBq1WiwBJiVBjojBY4oREQE5zwx96dgZADtDUkFtUEDFMClLREhEioEIrVBzCCoRmgYN0qDRqElMk5EgJQ1ArVBQGLokyTdXHNlU9nX3ZP/w7Mnxfo+wbKTZ+NiTGhAPHi+jJt/uff79m5clchnne8L1+TuOlB15+OZWb3yLa4YP/0+6+8aZ++4OdbO2Ra0eEptHKEPROvMnze2WU0fH/uqD/JFvLWvx3Nf90nXv+7PzPvzByV/5FX/BlqK3sO/AvuQ8oZJKjwvHME7luoe/yAv7qVrxi4f90tFW/2gHbGKw3KPylZvDH/b+7YL+CwFt7cHyim/C//nmu17W/VkXWbxKaWgw7koXbNJzSQEACaImJQMiiqa9punWtS9bg8FgsTvodvvVyqAA4sTDfnP02NxKd9gdNoNhbIaNNNGSpLqBJKhmSWJKmBeoxJhU64jeY1KpGmdWtgtzIKSI0imCsJhFgaguGiqoeDAUwwzgrZsYoyEOmmGyJsWhRBXAlJLWQmrDuhrWdWpsgFEsmagvQi2gQqrUbzSKOKLSeW/mCNBENaXUVLESEbEklgyhIYumCpDIATsF6A/rOpoZNrFqB6eqjBaACmRvZjEhYpOiQyUUAiFCIBSJLNJSiYjGjn3IYu0MpSHnatEEmKJqMjayZADApXOkjhRStJjYyGEgIgrQOE7olMukJKNQpqSaaoak4iC/bs3ZXy32jUZVhSaRNA4b014oVaVmJJcwD+eFuUEzA8oRDeBMfQQWMCeGolWUvi7k/4ypXwULY2OP3nPvmC+rCC2DZBVEfPhEa7HB8WC3F681DpsnJDAsDbFWu/VgcVdcnuu4otZHbvrM8fe+Dn7/+vbH/3TDo/cpU8uo1SoPHXngihb/adh61gC62FWwNdPTM0DjU+sHIMfr3sXJ/T9lOYwryjIJooKduq5OnTix8xoA2PbYLR4rBB/HfXv7Oj8+TQUSDocttcbAYTNQfGfBby/01TK8sz+8uE5JXG3keOPFZy/G5eHyQrM0/51bv/HtPd/vzIaGUyUqCUWiQPKOQmQOXsBiSrnxcYDaWzm9oc0LdQkAm7Zt+KWX//hlL7jiazffsf/h/3p5e33f+Y1pWKp2ByW30TcNV/0hRAc1jW878qpr15yzZfPVr3j80x/q/tqbZ1/zq0tjk+de+/wDX7t7odCp061n/Myb9t9z/7bts91X/3a4cuPu3/q1dd+9XW/4ybO+d/vez31o81jZGp86fPENFz7xTXadNFhZvOia7mQL6qqJ2got1DoJjnU6dWxiI2BGjE1TP31o34UXrpmZXVMvYW+4Ug0XU2q4CLu3/8B5b3WkgibSbGljS/5Yt7WyvT73sp949UULL2214ZIrLr/rjW+endDh9rVNNZdwkGWNgKo8kjLkvZFzjpCZsmJQCfPp0RgEkE2FHPsimGJT1Sml4ELuezLoOJMf1fIQ3yxFFwK7ACrOOcdeVUEsNVFVHWDpstde42qMPCCxdwbYYgdo3vtYZSCiRkt5z1LFpqqNgy/IoWNhJAI0BDAlZHIGpiKJpKn6mNNeAJSI6YeBaCAqqo4AUjLATGs1NKOkhpokh3rmt9WIXJcHA84hmVMPHqyfmsUAr1iavXVhev+aE/++5uHfO7G9R4mtLdHMBVwdDzCggoCamKJhbnBzjlGKadTbhxBrQcyoz2RmzK6JNhwOnXPee0Cqo6UkzAJGKYkYGELdJERMKdEqQmvIgICSt1+GEURMGtEw0sUgAsQ6sacsFsu8fsgWcAIwXO2XOEuNTDWLw0a/YgDIVi4zZCKHSJRMqbHgfOXwpFZre3TuXOfRC/v37uye99SkBJ75+o2zjz5hrZJOHcf+gRdfcGH7i7q4Vjp1rAorO+nSa3t77ylPLcPi+LqxpWPsWERXBv0WwvhEJ/Qb59suLVaD/g9H0BPbJn/mF9uXXqYwNr9xcnZ45IM3//Vrdzz75e//wK6/vWLqn//xs0/f8/jabVfsvGClfhosJN+uq97RY/vpE7/Yu/w1dsGLwvj6qQ4/88AT39943abi4HMO37rn9NyznrfjwbfC29697vy9dfinayg2qfSJkM0AsAgT6Dr9GLUkAMTkuU31YFjXtREOBoMitPr97sLCQoXgiIFQ1BCEmBSsX1cuoflg0TMjeee9N7Ek0RUud4e5c2XmqJJSyusBRJSYLwxW1KiRFMV7RvIKSTUiKGIQqKoKVgFb+VrKPlrvnGoyAAQFpYzRMEQ1EDPPHIiReNRnggGY5WFMEXLLbqOLSgFGmVaZTTjywjUVGkhKeSkVWi3QRtWcc2aCWd5PvG3btiNHjohIg+wAxbImUsmREmoSzgGmaJm7CUA5XaRkn1LiIiRVkxRCMIkoKobeB4Ua4ygMW9RCaEFSy0lFIwoemqGIiUSMSmieWFe1FewLYpaYCJ2KqSYAaIYNAIllmhACGNoInIZ5w2mmIIQIZlEFDEUSeq6qquUdGnfK4sSRw4cOPc3eaRJVSYrouKrl6SWmbemLxc+uq/eeHtKggUGE0xX+40NuGGPRnsIbv1B89StrrUplRyEQckLBpu+aFlg4dPzwMybG/roY+xbIbd04AJ2rV1rFxLoY3zC15gYsUQYnm1j6Ti/1PFpsYkjx4EU3rDv8yPRgRUHVloPo4FhqpkN5Yvnk1EaNPc+hagaeOaXEX/D0kJd/G8Y7Bvp7ZedjRX+lv+HsbVe94ZVL/a6tVJ3ZtUeD/OPf/dXP/NyvYKu9tNxtO3ZJkzS1sdTqkAJSrGvXaTtQmly/dtwfXJISAH7ix3903brJD33yS5/9198rtP7G+OzywokrPP9OvVQGrJpUbTibLU698zcPfuHGLc+89Lzf+/P9N321fvSe+rFdl//puw8N5qoVe/rDf+bu+M+z3vKRvd/+1uzWdfE7t9RPjRenHj/8mheev2//uAP9z4/oeGvz2Gxsh+74xvnZc1/+9T/1dY2uPPyy67vLg2mLsdXiFEPbmWEaxmRKwKICZMzY8vzAw9/dtuWcDdNbm9qDpbHZUhqs2wMDAK9cqT+01Nm4dhjGtGXLx0/sPX7r5Gcem3zLG1eOb0qLu3XsnKI90ZyY92EcKiUzQMgKUURCQMZRxFhZlq1WQQyaJMY6D3ckmTdNGoHIAMuyLIIrPJspMTtigFVIRJZioYZWCcxN04A6x+SQfVEMh3V+sgTnvfdCIy0JMmWbYsEuB/8yYKfTWVlexj7E1JCBKqSULCUkoMjsjHm13hvkaHQTMjMFI6KkChqzOiyKEgXOSTfozRpSK31RS6SsWFYzXFVmZWYds3PBrb5l+2ZIJh5TnSI6LtBb7HtcG6fevHTh+8aXb1779AvirmeunDtfNMTgRQlIQQVMQVHR1HJIaZa25EwFDkxEdTOsBgnRMXMmdKJCjJJTxiUBIRlB3QiCi4IAFg0y9DCnStR1jQhVVY8GmcQiKiCgiEaMGEhQhZkQ0TkmouAKQqeSkxYULW+gQUHBTJBcPhoAqJGaAOY6AAYmJgrCyERA3hEzkikrJgHzbQUf2s85uOmJzY8dGe+fOBcvemhx5Y7/Gdt6YRxKPTjd/Z//mbnl+xhCbMSbb9fxGS9ZEqF79m+r3aA3s2Vs8ViMiZmVcTk1ZatjhvWwctiXZ77kTAGe+t0Pdradt7jrsa1nz55cv/2/yiNf//aRC8OWubs/UH/0ny5N8pp12w9Pdg7OPzWlziZ8I9X0uulOGMO55vQjXx/f9e1Nz7lo09zSw+PPmL/87e+680NhuPfZi01ccxIAHBwZlGsLHwDAVT6VfSdIiEvLy+PtSe9cGiQzS5CGw6HEGGMc1g0izs3NMUEyJXajs6mBZRUVATskhRRjt2kAwHnfKkPpAwGmqhopKrJ+cGS0Hbl0ADGmmGNAASDGCMTeKHfeycABE6CioYmqmpGiri47UxZbIYxcSTHGbIVXABd8zuBNTURUdk4IDNASgEAIDhGLoqjrGvLaOCu00WVizEh0QCSOLIn3bjismb3GFEIwTZkKGYiYuWmakydPVk3D3lsOUMjCJQRmyrHhSMSIjMg+E1XVzCwZAas2YGogOZ6N1BgQzDVNoypGObOTkZwag0U7c+xefTOzgKQMgOrYmyWxrJJTVCdiDWl2ThKDJHEuiNnItWaQVab5SzkwzSIsQARIhIjo1WmjPnDGv09OTx584pFh1V27dq00sZY655aunShffd7y1/e5Q1dvXxcfnR+Qmd59hN9/Dy91nQP1DVgi3xkPU1sWji4W7QHEYVlMKxeuAd/gynkbD84dH2/8m9za10745bnuoZNVpzy1bXxtOTYxv7zS7S0XFAhSCA00TN6T4cGLXnbxXZ9qfM3olDCsBFhTmPJYMbkLBjX2U+qhtkQTEznnqicbe37BfyXy/qr3Ymm/s3P3V765/tEncbyF7WLct5/etTut9D/8vj9/49t+euvOC5cWB4U6xNTY0GFbJTnAdig0iTOw+vjp9ZcV0lkLAP/y//5rv3da5k5vnxifGNu4fLrbbRbvd3jkulec/f3vhNmtF/z3vV9/17tf9Oqffezex07vPuqPme45SmjS1Hs//Ln2z71tttzQufz6/rfun//EF/Hg3pOHjq5pnazmhltwcmjD9hTHem0samcyBJV+/+6rXupSvX3vHfVwQL/4e/snpsYXawikwEZSgaC4GsQxp6hEXNcVe4qxmZhsHTvx1NLK8Z0XPG/D2h3HT59s6nqmWo8IXJGM6XCqGQ52NxOMNQSxjU/0thyK5drzJjfPHAN66a+/b49id2UherBsn9CRP4UAHRISGEIRAhGoKjsOIRRFkU+4OV86Sg6/Q0fsGEXEe8yRIIw58BJHsQFgBiimjM6zIyKNCYk7HZfvpZSUiEQiOc5SjrpJIoLOZ/Q/Io51OqZKhHVsUpRB3TRNo4CeGQmQyRDzMVxGZB8gzhsxEBE0G01zVyEh+UZXE1UlREPLPS4zOecRkdij8wBgAM4574uciZv1MoiYHIgDFaAE3lHQUCAut4rrB9c8ePKpWzYd+uT0Y89J50/V2CAKMwmYAICY5ade7iZXBVMuNE1jqiICas57QNbRCZmco3wGJyIKhQAkRVLy3sfReYFFs6TMxaTMvixCjDH4thmaQDIyQzQZxVeA5VjAvJnLNpXsg1LU1XVg/kmZ4siKldH0lFMhDY0Q3KiEM7P3nhznr+CJI0AkowInhn7BpYvn1607PXHyLDuwZvGZX/zaCRUbIDaV76w79jcfHJQ0s2Z6pak0wuzZePbl/fvu3eak045Vb3rz1IndTYwBIISi11STaoVrJzkVf+Yv1vzc7515mBYJ7r3pu1rWO66+oDe17b7hHMwNv7nr/jcvh2Zl/kC3ufjK62XDBfv3PA7VickWULWS+mRFx18QZjedj9MbCvZw+LG73/Olq7/9z/A/fxE3n7dc68TZ0wDA1F9z3gtwx/b+YuXXttrDIoydEoncX3H94UClkmgm2vSF16z0+nl30OtX6HhQVaEsYiMiiZnLIoBRjGKmDES8WlQARGRYNSlpYNeZ6AwGPTJIpmVZiogvAgCcMcsSUUEcRRDBOawATExSamB0bAVEUUUCSdmcw86FM/C4bL/JA5WmaRpJuhoy7UfcaQNARRjxswBbrRYzx1SXZdmklFJyWUZAoxsQgOxMgDGSQH5cYIyRSAyiI1aJ3vvchSPi4uJiq9XKeV9ZnxyYVM2SoHfMfrTf1VGEw2hCBiCm6Fhig0ymqaokOKfAGUJLAGKkCmqKaE1KrImMFPGMCCNX4qw2NGJw7ES8QmMo7CQJIEoy59nMCEd8PAUFwNzxE4xCPw2UwcTUwBBWZWgGHhDMqWqyBoAmy/LwU3tFGwUbDofouI6NI/qJnSvTpf3+dyfo0vbXd8u37ipPdenoAMASGLjCJVPEcRf7aW6hA65O1LYJkcLHfipXUvRhif0gNTEeanHblVG6FyhrGhv4cu74YeeoDK1CmgqGJB1BMJXFrZcNx2d2PnVb5Imy6TZqUsb+wnyH2l3lOyhC6pQM4iBzW5xzJREQ0nupf9uK/mPTv3PFv6Mzd8fRKStM0jxKa3yMp9YQps9+6iO/+H/eM7PhnFNzSyipLJ2kpIbgfNTkDJ2B7bjqmTPjN+1rJgFgMH3uxpWF427P8eq403OkSKh04fZLXvu1r331VW/fvnZqd23DOz+hh34nrN2hS7dMnFUUV1++dt3EoO5c8MZX1haPf+cJmZnobN/c/9qnynEKw7He5nM7utgbDClMxUEVi8iaODWDdpv7xYFLXrH9iW8NF+e3ffAfbpu6ID1wB0zP9ABbUX1BlcIE+C43UtcIHEJhViSVVqvTr4atdhljvP/hb27ecuE5G3cK6OWHrvvezi9O1usX4tF62gASVGniCFi/Pvs/pwatqcmJpnfg4IVXvrLeeUF314PFVIhVTrFWA2MAVCPIz1jHwXnPqxPX4L33zmXjvKoqCKdkCKaYof/OiXMjFbRfxayfuSebFAMiFgxmTB5bFlNCMlM0s6DKzgGAZXiEkuNgSCLC5DDfKgatVukc17EaDKqUUvI+xigiwXEiZUJngEYCCmpOUVDMQFJKlM6kDZqp9w5H6EpIqvlDQIgjnYh3wRM6Dt77ApAhh+WxRyZEZKT87BuoIRk5iCUUKdbt0FGixODpZ3ov+17/P/Z2Dn5x4r6fXn7F0/G0byv2kcwoaSOiJmQAhia51hER6SoSiIm994Nh7ZzzgSWZSFJV731RFJUaGBlAAgRyqqoCZuZdaSpKaiJlu/TOV1UiRCgI2fPoWWOkgpBE0CubmV/9rWV1DBEpyOjZkkXugEhI5GAEGciu4JGmhpUQ0Xtf+IIz3hDJoXPoIAVEALR+6WxQt0LnWXOb/nvznocvGlzW340Wmu2b27uPDoqJ5c78eFkMNRbR1PuLX3C4u1w+/D3lOEdg3elNZ+2703sfY2Szuqm7vcHmzsTR3uHZ515nMjhTgJtqntik3W4GvYnT+/BY/8TcU/d8pXv08ped86yX7rj1sxsf+Pxxuu7CV/6CfPgP6rn9/QsuKFJsWnVrUPL6UDvtfP/e21/7d0XdfcnXf19SU5w6jtT4+3YBwLE17vK3vqtLYyWsAA1cZ7JeHKokiak7rBU01bWqLSyvNLaAiP1+37uiP6hCCETUJC1CEJFRSqZlLCITQeAQVWSkPEJCNLNGEg4GTZNyfLuqppTYO+ec0chpGtgRETWNYjIzQE4opuoSKkJEi5owieGoCuZam1IqsipV8hhGjTgZiGa2BZfOs3Mj0zQAmjISsjOznC7nAy8sLWUtWF3XzD43poLoVptLkWiGInnDxcDJSJKBpuS4BADTPFTBLNFSVQuIBoaaiAExqbKpgvlsL1RjxhxxSETMWKk69hZNkxA59GSICUChLsqg6qwBAhOJmiqHYq5ARIcjImt+nYgoq8xXRENPBOATsWrjhM2DGTp2CKMTiVFCy9Z7BCRDzlscAOecoiWVDCeAUZqiSY67dgCWBt2VfXufDME1TeNcqKQuQ/CWfv1Zw8/v8k+dlvNDa6kXTxxjBEVHiK4x9S60yEXXr60lE+tdU9fdXvBxaCvOksSyREenBw7dclBKVW9xmZmOoVLVTNlkcAwoaNzz1oYJS92CysrSvsuu93Vv++P39mPPwAlWg/GxrR/77GPv+Y0Lx4oj3An7D0fTaDo2Pj4cDuuYMUTgnAs3lunZJJ+smpu78Ceh+PdZN05LVx3WZ/fM9/ye4L8WPvxXf/6qH3vzy1/zur17nqr7qRgrJWmdIisgmWsV5RzytrHe0/0CAJ61M+xemnLL685rtXDrpB4c1BPp0sMnT3zridk/+ENo2cJnP77eQTFu02efU9xz0/ynP+y+971jx5daj333wKGHxl76qrBtzVk3PPcHt97WZi7ceJqEzqETDa0QWyEhsnIamlbWmi5iPXDFwe3XvOD+j1/yvUfnx2Yf+X8/sGW8rFL0vjDQWlLLFYhYClK73esNLDamqAJNLSqWTAK2fOEPHtk7OHX8wvOu42bHcx976/cu/bQftgbd49IMix5SlLNvK9bf3uu1sFfRcnN8YeHgk0/+IPpW0bMADZjmsQmOOC6GSMwcioKAvC9arSKXKCTHBMSGBkkjUaNgpmgCzjkmSJnG7nBUfpGYPSKCUdnK3lzQDMNyVJCdyUA1w5GA0ExECM6EgYtzLnN/QM0F16Q6JO9cQCDqDQY2yDutqEJKbEokgpDDw6s0zDsZQGLyiBilGfV0ZmiIRN5zRsalFMk5co4cIzOxyypKIpcJvbBK+DIEBFDVMfGNNi6BOtSMi/WsFpddvNjO/ZHexV8s7/yse/JFfNm2sG45Vk3RYMyTc0nZeqtmKs65pqmy9tKxR2TVNKwr51k1gQAiB3aIbACIXFVDdgzGgE6zKwpBVK0RR5w9l4UPZtbudJwLwECOFfKSzxwYoolEjVE02qpDmph9CNmIbNl1DQo5fZ0pLxcUzEwyaiBv0RgZADhvLkkVkJkdZQeqMroklRC1Ch8Jrzmx+RtLe7rb5btvvfLHPnZPe2rsqIitzLf8WBwMeXKMOq1N25fWbR7ceP/F2ml8nVR4MD47uXwizzdU1ZFb7g/Wjk0GoPLB7y2ef/6ZAjzYd+zSl5y3ZcO64w/vX6jWxP5Faza/aWLjcw+fkFNnw563/cjE/JM/9YYbBh/6k07vOJ5z/ev/7p+/9ifv2Z6WFji1B+XOfU8vdnY+9ozr3/IvP99uaFi4w7KyFlqdZQYAvOFV/Suej6ePFePrWNLpo4+L9jTFBnwcGx+ePK710NQiFJrEOTc9ObW80puanDQA8UwEZpiBw8w8EvIDGkhBzpsm/f+oes94u67q3HuUOddau5yuakmWJduyLfeKCwabajqEXhMgCRBC6s1NcpNLbgqE9EJCICEhJIBDDcQ0U9x7t+UuS5as3k7dZa055xjj/TD3EbzHH6zfsayztfdaa8wxxvM8f0HLgamcBfB1Xedfe1/me4GRnHNEaBlYK4BqBRNwESQlw4zVM0+gKjnS0gzAiEeOWAAQEcC8VHYpWYgxppSJgYzEDkFUIKF3zjkkJylBdh8gcOFTChKkKEadtHMuJRndGobsR4snAIhiaOZcEWKfmBTEMec6P7IDqJNkgJJ/dJKExCBqJgDgiBAgJRHKZvfluTEhESUIRJhSYiMzJXLMrpbIjosmsoohgWFEAMfKFDUVZmpGRAyERMsWPtDCOSBvKJbAoSEwADeslJda3kyQ0HvOe6KMxSYAUGNmBgZAZjZHZOaXzdYAUVWjmjr0VAaNZcHP7tq+e/fOqZXdMGwsGTKC2rvOHKzp2p/dXcYYyVfa9Du+HKahNzQzh9SNqXTxsKWWQBGxZ6k7fYKFXlX3qaokiRFwhc0wOvIdLaVQdNZp3BIGdQcFfCe4vu+xlRySq9qp1a56YeeWq0584keDLvdXrxi6iQs/9bl//dgfb7zsqqWi89yLfuaNL3jFZ3/9LYeGTadoiaaiKJqmaVdVCMERoQg9S/zaqcHv9PQPw4GX77RkOIEKaAj1Ban+mWH3T8du+N43tj1y1/s/8Bto7X1z+ybak2kYyTt06JLvHtwrra0LmzZtAoDtjx/btHbwzLHxXuhNHB5Y8NxMXFWl+b/841X/9JnhnXdMfunTMzK+93c/UD2xoyP95hN3l0vNeKubSl71klcvjrvhkzc88O9/suqOu7A7VicxIEx949IqWBpyURTkxtNwtr/YA62ffMtHlP2L/uAjcwcOPnjdDyepRWNVObQCUS0pMBj10mCi7DYgiGimAPkJqK1WJ2m0GCGVVQUhLjy7Z9ulV7zsbcNfXvXE1B2bv3tkclw0tdvV2bdtvlRfufpj9r2/+5M1DZ9x9kuf+WBRt9tcD5MCAOgovlmBnAKQITE757xzY50uZpsvELPPw2EzEEkOWMQnFREzMgCIKRHl5zSTd46YiBx5RlIA5zifiF3bq2qUqKiVL/MNNZp05j2KM0cuR0NbldefpqqOOKRIDY7uAcMs+4ixkYwNTtaQSVRkIMAoBqOmjUSsaSIzG5CBiSkBK4xCCPISC1S9L0buDnLLJ2IejatRM5diNLMyAICE2lh0ISVDAaxqFrYaGjLYw7M/N7z8tsGu58aOfmZw28fd20F7JpBMTdVUNaZkimqECJCOG5yCJrKf5MsDQD6dx2RkCuyQ0BETEkKGGkHe8xFh6T3ByDQJmJCw2+mWRUs0ZtoEM+fsAyIQKzRJiHUIIaUAoMRkpGIGo6UzGQjAMnYZcshAll5RnnAyo3OMRK7wVHgFIUNDVFVRJd9E58G4laRRHpbQ3SWnPysPrm8OvOWc/Z+76+Sb7gpTHSfgmzoVnfrYQmdq7LxLD+zb2Z69bn684JpxaXo9EJUHnpUYseCCPKrUoVkymfInzu/eNaHpeAGG4dJzDz/71R/uv3m3Lr7578l3u7I0WJgvN+IQYn/9i3ZMnzfYWV3Ka89LYZp977++e8W997fnjzYc2wvVoLvwH7+/7ZQnbjzjye/VhB1om8yHjSf3Fw4DLPRfcn7cc2CiakGnOHbXg1IlqJLUzfzhZ8dm1ieIwdTAnMJ4p53XCqtXrTCgqFL68WY4QF+klACV0Jlh5Yuy8kQwXBzIcuqYQzKzOoZoOjE1aaJLS0sppYIpIw1GUkkDNWN2qomIgMkxxCYVBoAYCAyhUFTj4CDPbDLNN6XEBGbICAZaMFnhsikgR1WAmhaECJ4oxZSD1CwPaUVijLkdJ6Ls4RGJ2a6Wd9UpIpKM3EoKABxjZOZhGJaVt2SOClE1MEQMTc3Mlqt4CCYGbJpBwMxEgAglUFI0zqcOynapPLBxztUp4mjFi/nGYoBQFTFGh44dUxQH2KIiJgmGx81+hEYEnhnRRDSnw5uJ987MwGEygDRSjeSge4HRlsobZUeWZU9yztBkigasaIqGmEaAYIumMUVVTBBXzUzfe+NjYFYUhVeuIQmIt/hbl4UvP0H7abozXqNrNaGXPKj3LnGEFEW6hJaGLnWcpziYAxy4gWuKobpYBqq4IKqwXkgd4uHSYU0AyWZWdFJTDdolhSjJpdRpau0UwoNjQxkf9OZWbTx0yuUX3P1Xg5/74Mlvfd11L77q1DVnX/HRa/VAP540ubRhw/DI7Ef+8M8//bE/mqv7DDis60ypibG/ZcuWfr+3c9eecjF2/6Dd3Omba/tAAHOYWgYEsIR4iAe/v9Te0dr55JPXX/f1Nes2n3fpxQf3Ha1c1wzJO2ehmYD5yvrb9hFsACpg2xPutFM3hCNw+MhiWt1bf2R4IhTxnu/1Lt+svd60c6FV4J33lW0J1u3w+HAcwSIB7/+r/9M/9Fw7hZkSOrR2jhYmUzlwTGNTVRjGpjesF0Ot4o+l9Vu6V1yy+sMfvvXYOWsP97e/452n/cybHxsOV01WMrTkqdHBCiVDqtla5JckShPzzFOWsT8xRnIYucnbVYBqfjB/853/WcCqLdXGV6Vrn5p/GEpY609treuGzXjembR3YubQ0r5UhWF/Ycb7OiX0hURDtJx1rgDgCJHQc2a+ppgTTYgcI1N2xhORQ58HQc45ZsxxtGajq9OI8wwyS3EIiJkAzBWegEWUiMqyJRZYXf6/IB97R1wWAyTnvWJWOSgXLCIORxT3lIKZldXIbFrXg2FoICQSCKaJ1Ct55ggQQ1AzMVUCSZp50aqmIs5x0sw/YQUlwKIsNSOAFMkAR8LLkQQmr6Byp5iHcgAUbOCAh5QwQCSvWihEZ4lrXugsrdfuzx153sfHvv+jsceunr/jZXbBHp3PVXYkT1MjRGIOdR1TEImIKKI5fD4/9I4Ph8HAOSeAdV1XXCU1ESVPBJrMyHKcooSUKu8KX3rvylbF5LwvahtN2JxzHkkzEtaoInRUkUEgSCqaJKgAoecCkRFzbBwCABmAaJBAmMnOmmswERXOISIQIRMRGuTOzBA4xRjNVUO/6AdAbR2n+PTTz39421NXvvTw6Z2DV6zv3Rj9HESUnkOJ/VaFW07d1xkPN/33TOgqMHDi+elVANA5sst7H2n05AXHixony/2nDod3HHPH6+8nn117cDEtFTSzYm0RnpUFngvoyOFSHdVmjz518sruMZn7ztv+YmHVme/98R+M3/f1edo42w2TIcQV7oZr/qLXWfne/3lrGs7OROmR777tQ/vOuXjdprPLdOGSG8IY2dj40aeegDIJTzjuN4PeDMtkYYt9K4kRwfvkmEYH0KLQZexPNdYFY62UiLLNvfKF914lQreVT2BkAGYpCaIVpctnzTzBZnYaU2YAM6AaMFJRFFEtJRFNtcQKSRygohchIGFA1CKSknhXFM5JspRSURSqqSx9v99nzwXQMJuEVJnZIYWUyrLstNp1XasCEtUxiKkzFNO8llo2EUC+QXDZbR9VMF/hBMyYQswda6fVQkQzIHXmU0hS+EJzuhvgsKnRgMCpkYAZsxKYiUNijxABAMTUFFSVCLLdllQK58QQkKMYpOQLD6IEVFEBoGbJSAYxgioSkDlDJeIsWEazbImoGogM0YGKdYw0SSAMbChYFEWM4n1JBCmmoihE1QPJT4WGjybZgOidSwYKEQ0QHVIiUISyrGjAzvkQ6wceuK8zPrm40KsQy6oVhd5+anPiuP4BvPB5V6z9/tf+B4kgNRIjV75lzswg1Sva3Sr1W1AEHE74Vs/hZCzbw6V+RRGLFrh+bCYqhkYXL7p8xUd+r8VMRX/n33+qvOXHPSjHQZ5dN+Mmz5p87L5mct36H92085N/u+vhvYp8+Yc/vPcbX1i7Y7ZoTTfP3Pb012/Y/Mo3BFpbr90kB54py5Pe9tv/7z8/8dGlfm9ycrIehqZput3uM888oypVuyUITUoOuXkKYYvZSoMaoEEYM0CRfdp/XWw9O3bzD68XkH37XvfOd7/v2T2HkySqwYlZtxgw2Xcfn4cN8Esvv/yhbau+ev3dg2Zlu7V6Yvx5L3rRK/GmvwFud2M/jU8upjpy0W2lNkmyANGV3oSxpFLn961ctY4VFuqlIaKngsSl4RzK0aOpANbOhc+beOO721s3jZ91aaxl5/XfepBPPvuGv3/BWVsfPXVL9cMf6coVEoeUUoEUHCOqa4J5TxCBUTUbhIwpk/5oMAi+XSAiiymbs6ZZSLXtO5oaq9qnnXpmM0jgqMK6bOrHHmp2Pv3UmpOmG5KJctwclqkUs4SAhkLEwKyU1MiRVyY0RUgxIBMVPAxNIeLZEWluD0VNs3EoJRMFNGUANQIkNUhigFAwEIqpQ8bl+VGua4bIUBjnxauZCTCZEYCRgaCaJiJiYkXKDyaE5R0MQOmNgAmGYMYEVHAY1nVdMxBEiSB1aBAxJDS1JCCeuWCVFCV2Wu2y6DSDvvOECIiQ6eSqllIEAg8SYu3yntOiIRyvmtkEQqNMaAusDlCiVuwR4iAumCM1q4wHg94B6L8MNn776KYH1u38t+b2y2a3uMQxj8tUE5qZJUzJgrKIWUzSrlox9gVALTnLMmIwQCYiUFFNBsikAsyIDsGGDskTmmFUhSitgr13eW3PzI4IIbYYc8AkEQLkWg0iIKCNWSq8BIGkzliSsXeKqhpyx2w58syAiAKwR2IdRf7mgbMxASCyR1+YqkrM+Q5Ro48VxbDgB9QH4iU3N9Gqj51y/e51z8btG3XfL15R/OCLumlLPRA5eGyybEM5PPPKpWce7vZmK28SG42oi2OrAaBY2B9hyOyaQTJwhfNl1DR1zqdPuObTN88dfwp2tdWZ6G0gPSZ7aLHU3kK/qb3nqhFql4P5/mxZFZ0uHtt+39UfenLzWe/77r9dvO8rvlekqty3Ysutl7/3Bdf9wdTOO7unXrDAbC972+xLX3TTr73/0pe8pfWazt5y+xPdB1Zvn5gOLZhZWx1eem5pQQaL1XicN4jBoQ0NyJdlWZTHJ7RAyOwE8qreCHJkOnnvkyRHTi2zhDJYE0II4DyqVcZJUy0xkpklYk9IiCSASliBD6wh1CBm3lhSmSrDwVw57grGOpaLddESVQdOl4ah2ynDMCRTYxyIofnCXMK4OGy67CrkoSSP5JZZJY6oHwZRk3OuaRoCJmAA8AjSJFeymTBTkmgoRVbeZfiAChKbISjl3Y+ABQmFK0ARTJEFEUsmi5L5oIaa1d35oOuJFZICKgqYagSnpaGy57oeeHYpQQSrynGJPTZgNNVkCMzMCg4oOlMwFCiEklokFsjspkjERpIQPDgFUCE0XHChANIg4HgI6h1blBLBSidhWLmSTDSC896SIXPApGCAoxjqDEJBAA9JEKkqNQpaQE+U1BujkjnqTo4/t3dHb+5YMcapJqGy6TV1WPiNS+RuO/XI6Zd+/4/+tju9GgA4Dh1iAokRS+9rrC+hMobeou+txMljg7lqcipOT8wfni2VicH5pqqVhr35grb++9d33vnw4NmDC4tH08MPxZ95M156xcMf/b/nfPYb1abLbz0dL3/7W5fm/EmvfNsPeecKGs5df/3Fb3kvxGew0+Xatb7z1Yf/86+n/vfH58LRqpjp9Rc3blj7K7/70Ws//+9PP/Zou10xcx0Ds0dkk5SDyeI5CSNabYAALYB5A0UbM+ednRODhHKs45y79YZvP/vc9vd+8NdC8vNLQ8eElR0GgLroAsCff+mHrz9r+v+978qv37P9/ocP7X1u7osTU2de8DOn3/6V/vjKcth0KSJ1qDWTBvPJWeoyGkJA9jZ043r4EGl0aosYB+R64+v9JZdNnr5lzUtfjhe/iHY/fnDnruFNj23/9d9sHdy9cMLFzf9+37k77rBf+8Mn7t8xMVE2zSxTu+CRV9Wza3eqwVIP0YwYkVRHrL2maQDI+4KaaAAOsfAZ74OOuaqq3Xsf2bfvyVUrT1ioh22mudnD1C2Ozu7ZetKqWhoS80CNRzBxBgp5UzRKjgVUMQUFkTzNHaE3g2jQxjnPzllKvqpCkhSiR9IkIQVlhGQpJV+4oigQsa7r3BNHSDkuLidBAoAhmZnzLAAgqjhiqGTlJDCBaDIBiCNZEKJaMrSf7iCXRcvshOIyhziE4L1XUxEJEAwgiaQmQB6VAsYQVCTmrRWZoDrn1DTGiAgiGkJQACfGUXJGdd77MrOkJCnF5agv1dQQm1mIEQHIgBKa6DHfjNVyrGCw/vsPnP3UxJ4dq+FLi3e9Z3jFvC1EqCsUiQDJDbHfTtgYpKbJIhTnChEBIAXiZaGmjhKICNEISTWakQGhkuVIIIiMYgh5vm2Wde2kYKPEJBhpR4+LwFNKkhKo5VKcxPIQRH8qymqZ4zDqcpwioCkiEBooGjIYigIYiFpIRGZqIkHUUDFgo+wKBWnjgLiana9vu2Ns6ei5d+7avem0p86fvOPM8aufme8VWDkyiudctmgAd9/UiTGIKHtHAIOJdeVgnpNIlLGJjYvWd/Fo48tyYfHzb/7s19ddsGl+3/ECLNL0caynS+M4PrBGuu0V3upBjIFg0KPBfI8aN14QTbeGDy+uPPmj7/z45bdf+CvPXVsv9a57zcemZ3e/4tKZuY3/Z+1Vb3x8cVfjvT7zmNv31A+bTw5o6fZV33uyc58/0W2dv/Q1N75i/cQpu66/pT1VzVe0Kg4GLRVl7gEXVVkxMxBiBjOLqaiAocMRzwdgRNXQmPJHBqZgFmPKB1XvudfrefKOyJFrwpBAY0yeKkRyoomSJK8pCIaidiqtxE1dVVuX5q747g0H16674ZwL+v265SoA6nKlSw0RcOEAEsdY13XfsYNiHHyoa2DqVC0q/eLSkjfwQClqXnplCb2iAlCQ6JxzxKMg85TjSlEVsrIMADQvk/OORlURmNmTNzMEI+/QIFjMqbWkBKKmhGRg0IjkqDsBYe8AkBwxoDAAGIO2XCEi3jtj16Rsg1QEThoVwEyJHRAzKgMoaWKJoKhIiGxgngzQEWbohZhGi4qegR07NDIDSwYIZKBgyOSrajhsCND7MokgmUQ7vopSyc4RgByCCwUzxBiYDFRiikQFmBtj7VksJvm2b98Sh3M4Pi6pSYtLCOlX33DqKdNP/e72ld/90l92WmNUdAEgNEMwxNpLKabRQXG5ay+53ZOpG1JKM5013B3u39G2YONTqS+Hm6XYak++4PW2dGjXX/3Z+AUXLqXm+b/5ka8PHz35qjdOvuSVR8JqwzWzh3cl5/lFr4uzB8fOPXfuyNZzaO/sQ994+uYvTb76NaccemLfu16zedijS686uub0NPdkq6gEO3t2P9eG9gc+/Nv33HPL97799cXDh8a7YzHpMIYTT1y/OL+wtLDQqVrmUtSAgnAQbK3B0LJ2B4HMLKtlZ2baYeHo9V/+wlvf8/OtEh0Sb6oqANiyeesegKMJ//G/71g55S7etOYX3nRWb745aFNwdNFBMd72nVY1HJh2KExUS3tkMlkYDEERSRZ6kanbjK0MK1rjF1yw+jVvmtp8aoTYrNkY7n/s0Je/qNf+S/vxo9hbbO/fs4rnoT193fPf3Fk8fMbpa5dOOFfveLg9OT6ZpB+RCCSlVlm1Wq3+0sLWrVufeuoppIyNWaZROlYjExUVQHTOsXOIKGAJQcDalVe1vXu3c6uMiEXJ49NTzGwsXedFRMFUDQhkORg2t6HImVRh7DwSm1oMKUq/9K4oClADwJgSmAlAowkNQoiM2IycADlr2zIWyXvPeQu7nM98XJ1hhGbWBPPEeQodkqgqiDrmoOIy88gsw8IIEdSMR6dsGVmKDHIQhFpVlJo0hOCczw8D9ozosAZyzORyrI/3rX6/r5KZZUiAKgmZRVUQPSOIxBgNGSA/XbJTglQVeXREGFUvESzIoqJajrTMnl1ErGobqNTReql/4nDsVQdP/sqpO745/dj54YSNx8bnkCNCkcKgjEUfl1K2YzoC7fUG+Y0qilJVj/PGzcBycgdkEJICIKqYISgYj3BIBTvLzskRjkKByRFCxgbr6JvH1+2ST0XB2DAgGC/bokaaAyRSRMTMP0akpIagjhIYGroMzUsGZqiJVNDQxEKd8n5OLRg7B1j1oByf7D15T/fIUbN1L/rikz987ab9U8Wzv/yS8z500+TE6rl0ZHJseNr5/Ydun4lSJBPkDLnS/tTa9uy+kmtL7WODY9Wgjg479dFtZff2+afXTqyWQe94AW6qsSI0Acv5uJDfnyhd5Z4bH6Y5KVesmV47Uw+GTZw/umCTPVs30/rOC9+0U179gVee8tw2/vXD102++l2H77/94XphdmEwOdV55oHbd7+5v+3DfU0YPC11Fn1TPtS6dfcLH/uVO/9i7XRLjMIJa2LTjDfSwwBoYENNkRGAUMEIXU7AiKJZdMBEo/ktQKbxOcchBGRCguFwSMxV1RIDZwaWByISIYBoJGayOkiBrrYGJTnQeUY08AInzg3f8bnP1gtH1nk68cjBL1z6snl1rpnVquU0EmKKmoK1uUWekvQNfOWcUFHHUIKm0BBDu/CmI+kDIS278wgEhNAwI6Il75UyHcQhmoKajMIYgVSX7wgzMyGmHDmJqAmsMAIDMFSzCKY24oXTcYoXQPZWmKKhlagRNAFR5UItHe8YCUMYgokCY/b6M2IWJeTJDChjyLiGJGxgGc7ABEiADMiOScwMk+PKDFQhWjIzcD4bmSVGMcvz9iiBnBPVGKPPeSBGy4+gETQtL9FLzyIRAKksUwRknEdaMTn59LNP+dPXlvetBJGZk9ZsvPz8Ezed+qvxH27Y377+2ofaVUdQtXAAoCYL0Lh6obDOwNyV3c5UfWhPpAm0WT+3qr/qiOyXcrqcqpJQWgUr/u8/DC1Nrrt09l8+7c3GJlvN8OCjX/gf/c4PWy//pae+fes5P/Mzd33zP55/xaVPvuFd0+dd8eBHf/m5G04YXPD75Zd/74xnH7Fnno4/uK4qcJxS9OWB51/ZDBe6ODWEprBm0nf7QfcePHLq1vO2bDnriUceuuHH1w/nj46Pdw8dOJhSqqqyubPRlyssoJUGHYAaoAXQA02G9xovpyLOH1585WuuTsqf/Ju/fvPb3+1Mk/kaAO5+bAgb4LQNM9v7k/N9+fZd+9Y8O7v2lDNfeuTGjfd+/hgMO0d6R8UB+LB0TA45Uh4AuFPPaJ11YXfzhrETxjtbLk1nn+ahWrjh+nhk/2N3PEK3/niKl/zGLc1s/dzM2LZVqza+/30X/M+1dNdNRN1HT33ZqY99f/Lt731mz46xVlW0cThP3jdmUrbbqtI0DTAt9JaccwIWg2Szh0hyziFSvzcsSg+IitCkOGorEURhOEitVqvdbvtOKw7qpgkxqCnveWbH2c+7oN/maedDCgWAY5aozjkUVE1GBJCzZiClZGKAqAKWRMUQrKhAEZzRoKlHea2mISQRAUEEUtWYALJx1hU/7bcbbYiPByAjmCQiMlFX+CYmANCYvHOD0BTsRgrFLIUiwuyUWs52FpGkEjOUTS2E4LwHxBCCEbaqtpmVVlJgRqrK0nuff3er05EYAcA5IqKkqJChw440zxGYmYl9lqpm3dnobP9T5KU8OJeUMOlxVYghAFNqZEgioAppKM2rDm25bfWh/auGX2ge/O3ZK1V4QFqRtWIt4gRJIeaV7/HtVFYlI/6kWBohWs6iAkUCGunCmJCJDBzAstFoRJMFACADVIsa89GHYRSbkF9tIPOC+UHJynkOASqIBAaEAKLZSJo/R869jYLhaBUn5AQQALL+hQDNUBWCqIKBUmqwX6qUqUOQbn0Y1HDlxOp7j1x6z/wPXjj9xFWbzpm4aYsnLIoLXnykt8AP3V1F0bJVZXG7IxqsWD++cFBaZWNWGMrKGZtbcJZuPunkYsur+ocP9PknO+DUp6pcLFO1xDDWLjA46daeV0QoxldD7eHoUk3Oa2hVrFrp0WOLm63PNX19m5576LHOjf916+HHo3e6ZjVR6i0Np8844+k3rAZbSn6QcCguYcFjfpzL8oa1/77i6992byEjXGyGHWmlomVq0lijURUAohkiRWafVFJKOTLDMR9H8mUe7mBoRISmWSIUmxjqBGq95icOK9FI3tUxoCbPrmYTDWKAWBHEtoYj0xOvveHHzdx+aU9ZpSt/fMOWdSfed8qWEjQkLpQAtWwXAx3UgwUqSnKdJDLUxI49jGBZHlmbCG5EeFRVkUgEnjikkO9fUSEiAGSkqAkBmpRFKux4JLQm8stDF03JvDFalhOSIZSCZhaXs6kz3WF01ANABw6ImBHRM5tZQiPnUtSUUgHOQhqEwIVnIZVoaCRYFA5MHKAHqllJMOcHYO59Dcxxhi4bQmPCamyAJibaeIIoRAQFEWJSQ1NA8lyEGGOQZAkApGlSSlVVSbLcqOT7QkUMAJCSDEEMgsN8fiAz06JgqWl6bOLuv/hquvykl3zwPT7AwNLwwOHihs9tvPLIz/7ILVVVy7wDZa4AQPvzY73B+LrVwwUcDA+8rbMp9YaF6jG0Pk15Pbzit/7voSPWd/Wal15z++//3tsufct9X/1SePY/ez/476lXvPKp+7Z1HaypHp2a2TB20uTOv/zUaW5+8MOvzy88N7W0+ORvvLv93a/veOEH6Ozm8ps+L3Go3e5kOV2Hfr3Yi+c+b9fmC9rH9qXY0gJZdWEppLJB1cX5gMBnnHfpGedfdOMPvnvPHbd2CgeVNamJP270fUYbSFFhCiAaBIRxg9XmvlHkYHAAaI+N3XjLzWddeMHzrrho+9PbnHe2dowBYMdSXA0wJe51zztz7/6lZ5fg8J7DunPxzN95Fx+8qt6/B+YO+hUr3OTkxMo11aYzcWZyugjaW1zSYjCMg3seXPrPf937yG2dIawcmzkMceaULfvOPv2fD+w68OTT+z3Ujx6ywf4Pn3Dqur3NUcBmauXhlae84MF/r8983ZFb7mlNcJmcH6uGas65DBuQFKuic+ToURvJcSB7gULC3AiVZckE2RuTUjJR54p81XK7qOsBkw2GSxW5ZBZTXZQ0+8TTN//9v218/pXtfh2cN4iAyMso8syJyu2dmQWzFnOOi5MQY4yc9zQIAmxmilA3w4nuWK/XyxbSHDBJREXpU5Sm7uWXbcs2p3xL0/JXLoohBAockgCAhEiIriqbplnG9i7HXhgIjlBfeSKaUd75HLC8meBWt5Pfk6Io+qEkxHa73Spb+XmXyWjMrCr5aZLtSeQyP2mUCok0SrzLIZSqNkKtmYmkZJr7VBBj5EhGvmBHloTzQJ5knNuNRUYwazrHJn5mx8n/esa2RyaP3LBi3/P3rO2VwsCNVU0pzqMOmQnz2GBU6gkAgNFpJp0BADIsUxySJXKUq6sjZnSEDoiSJhpxgkf9BBpoEhnFRyr/ZNphMcbEZGJJpXCeiHSUzDHqmHP7ki+JkWqMGMxolOA/arRlGVOcUqYagxioYDDpGNYJyvleM+3CM8+0n9vbWjlGdYpRz/3Wk3c//9KdM2nXK9efce2zJ56H609ufvyNGU1ATJ2x8dnZWURoUlqcOGHF7vtcpPbKqcGxw+nwbFmWB1QPbnpJyZPzuAO4dbxKFUn6AZLjbqyHgs1gGIZDF48lCL0hFKle166U3FIrHRq2ulVTzhTztmL1tJf+0oUPf/bZlzx/aaLbsY5gbQxlw9c/+48AIa4tgAdF7ceOdgerm16rV/W69/J3Xms9LVsDU9dvPEDd1GLQRB2kiHVCGxlmzAbZdQbEuUkahV0ULhcgMRKRsvTOudBEypEpioBsIwCzJhgVEhSrmBOhiZGCkLJIdLKqSRt3z5OHhGnhyqu6q0450OggIvRZbIEAgySxSo0LVzrDmGJpHC1ZBE+uH2vnHKnVAC4FGnGxiYjQDMmQjJF12aw8Cs+Jo7spO4xHrh5VZmT2IQRHHEJAg8I5Gk2MMRKagRgYoTOy7AlUSJh3H2xmvKzqyjaL0Bu2W11VtZjQcbvVUjNkl6KqaD7WqBqqqSYrSSWBGKGhmKgCAnp27HIuR84JAh0dg5Ik0BwP6oiRABBQRDPGSRFMrYmh2+7MzMzMHTsmyIjIxDm8KLvmk6qYjnfG+/1+bo41ma+4qYerp9c+cPetD+54qLXz4b2dLpXl4tHZaPH+X0w3P+du3j/RipqwGZY03R4DAC/10VZ7Wl0vHX51a/pUwd02x0yTH/uLdeef9d13vOGtH/jwvR/5nS3rZ+qFWNYymE+LzeKpk5sWXvhCmO5uvuLVzXNLcwuPTx3e++x73nzK3gMPfe2Lz7Nq7sYfrQIATCeX47dd9KYNO28P7al00jndJx8d4AIVVQdx+xXn2SCGkkoFxG5oFgflsFB2zhFYSOnI/OGiqK55zRsvueQF133z2u1PP9ltt1u+CL/fLH18noxgFsyMmGxa9UTVn0vwiSI//AO3pA533XTnJb922ep165zX8Jb194vBP1568x/B7zy858GH7uHuhrGNK/2lJ53wjne+ceXKlUfPfN46F7ngCFwt9NLBx3r3fWewf+/BZ56Th5/o7d/e1QanZ+joscs76wZvfe+fHtu/Z673T6/63U9f908LBm8++5L5zpov7vjzzqqTzlnVrffv4IIfOvNqjs0Fl6yf1XGEQae9AtIiRcfYHQ6HrvCWBAmYGSOqJjCrWh0AFNWiKkREk7RamQkFRui8z9mwmbcLTO12u6kHmjTl5a7acLBUxjDdGTuMCSqqwMWYYklWZ08nmrEu7zYAgBmJRoZ/WVb/jvpUkFz1U4wLaVFERVRAzY3aLxVTjdnqJyJq0ZZXvHmjfPxMVDiPiCgURc0sj6AlRFWFfEDOZO/RA9YQETKwVnX0h4sKmBFWVVVV1eLiYg7tanXax+aImNutioC8d0AYVTw5TBpjtIwF9Q4RmZ2qenZGSXO1EvPLvkkeIVxQM7oYSVERMUkixyoGFnNatjEmFYdxiF5MXbToi73l4JKjJ90/u++R6WPfW7l969GpIsGRogbqtpuei5gcO1fGGM3EzMqyhctOD015/zaCsOY9d9XyWV2OQJybETJkAHUOyQxNNBdOTWpi4DJJGY8vgEejMxEVAYScdukUDBUc5/KffSn5TICOEVEJ2YAVVRUBcyZRWp6QA2FOyU8qiIiAQxLngSsv42N850PSP9Ia25BWtIOkjV95ZMtHzn9kc+ehd2w952tPvvj5gwPPFbufrpjZAHq9nvccY0SC/tTaTfc/lxzjYmOuiLY4CaSunJs5s10fs6LVhonjBbhpxW5iHUJd+nWycPEqt/qEyelqasU0z6zyq2dWTPqyn442Q79jd/jKI8/c8vjBmTPXhHUnvbx8Ir3r1e1F343HzEQ0cqtbD3WIe1JvWCcDhGqxKGqKfRq26tg/EDrmdYyYVXii1XJiHBIgaKhTVkqimhgiKqiIJFN2PovpGZAYSZaVw5LMbFA3KfSKogIzkeSIETVnL4uYAaYYq6piojoGMDZjL2JUi+B8wZcdnK92PdyH2sE4H2zw0C0/Mzb+HX/JI2Oo5AdRWlpUfREIkSGA6/hOAw06hqQakwGGOlbIRVWKRkBgdIX3RCQSATKjmmIUGPmxAcGWtdD57h4ltHNmQJmQAaEjHD1XVNXAiCgDdAlUEZVQ1bIYizFnbSKYiQgYJVJD6IArO+PoXb/fL8tCJUkjjhGK0lx+SZbAECGhGYMpiqFDYMSCuCFJqmgWNUulyAExsjIkRAVpGZkDEWmagI48OWZCM8nBfzEBgSMeDAZ5l6kEpS8Q828BZgIAU42CsTIziakREe9Li0zqmfQr131zvDPFBc5pHO833enpF56weMHapZd9uRj3QAVxYz5IpBIA6jp1yHbPHjvP+D3d9tHFI9PmwML+W27Z8Oo3Trz915YOGH3v39f+w9cPltFWVcNpW3P6xe65h3rHjs387M/G3tHFQ7tXv/xN27Z959Rv/WCFXzUxOT4UHver6v68WWsOw+4NF17+3Y91FkKzd0eEXsKJzuxg9oxNvXMu7xyZawp0laubWKArKE2V47U0wxTIF20uwOzAof2d8c67fvGDzzz+xLf/+2u92WP0DHTeNxFfHupzh1wwP+nwOg7vruXjoQEoP9FmZtRhWaBpuvmm727ZssWd3JndqIYAG/wxAPj2yx7+xMPn/M/2+T3bj1x51slr16zav3tYFUZ1YWvDcx/7ff+dfy1BqhSrBIyuW5Xa7dZ+9QkbTtIrN986c8rHb/767PYHz73oqtmHf/ihwVy1e9cZKzb8v7k9g2b4she+ZsuDdw16u8fXrXnm5Jec+PQtkxdt2T/c1ZBMIg65pFa7m2K7rOq6Jl8gARpooYyUJJRVO4kGSb5wqJZCJOIEFmPMzuDs0QkpChgmVNGqqqQJZA5MGbl0Zcs7XtFtCapIcISMrk5ZXe3Y5SNt9jk4z8SIUZCodD7ByIqjquQ4hQRmddMg4uzCfFVVAOSQUtIMPosxHnfspJSSpHzOjyppBIoBInKesQTvvYrEODLCemHW/PRBAEuSJ0ACajTy5mZSioww9AbGDpEkNMGgVZRVq0REjKksSzMFhFa7lWs/A3p2BqpgKSVXeMoyK6BMO85ncYSs7CUCZHZYMIQ8OedclgAgoXDXk4EjZ5l76JAcK2PLnE8wQA8kJEZMEPGaPac8Nrn01PiBH5zw7LufOqkpuQJVLiSpL7J9hYuilSUsyMTMiKbJMrMhP6MBgExzPIiCgeHxAgyILYej8TXmZl0FFJFIjJiJOR9ZbBkMR0kiqHpnADmUSUgRUZPocgylUdbPjYy/RHn3BRkkYKIZPqyqgIYEgkpgxEiA4J0OC2tzyW7+iSeu+vCHn7rrIb7/hsnWujU1nv7fT9/3WxcdvHDD2te3JmYWr//OagVgRDFtlnpVu0whqq+aznR7dq8OewM93JtaLTGd0AwXLz7LYjLPYzYeLR4vwEgE5rijcqj+vQ+c8rKr1x84tP/I/oOb1py87/B8UdW3PnT7ZDG1YuO6885qv/iyi+7ZfvhPHl016C3NtI6uGi71sCg7kwNmH4Z+rDu3+6lyX22IpAYIkftD7tcOMEG5KON7vbYLaLmqLDiU0CXqtSAgVO2Si6zYzzcCZ3d9jqlhh2pFUeDyaVJEwmAoKeW0Yx3FPtuwGRihxgQ5po0IIZvWzSnUTiuqsOCE0QceMz1U2dylL53e8URoVRNxX7H78NzmU86578vnFrxv66n7127Yu3bjvC/TYK6lfSdDk6hUmJiJJkllVYZURzUYRiqZ0NkyB3N0TY5yeqwoChExQkXIOZRkYBYFFJhd4QBH2QICZiqSj2dGZppHyqww0p4piKlaUjADwwToMfvN2UY+N0MYSPJirNGJOMZkwu2ilgRNyCd+SSnG6AgMkdRBSqqaEHJQtjEBGCVVFs+OGFRTrp2qEsWApECPhGQC5ABIxIQU0GWShEqSGBFRoyJwdkuiCeRbCREBwazyVa/XU4t5iiWGILJ61aobb/n+4b17T5heYTEmHSQSCOG3nze8c5+/6WkpJuoGXQTulK2SKwBIsIStzkumVrx/x0EbLAwBk8YSEW+/kY8cuObPf+/Wd//yqojHnrpneLQ564HHj7z80qW9j9cBTkSMq9fghVsmpkrbMNPtr0vC1O7zHDSOUnO01Yp1CrvOujoV7RMf/1FTDONRR521FQTV2XDly+p+RdhQGMQyrZ4sF48NK5gUjCklMAqN5ilrUbjhsNdbqtdt2vzLv/5bd99+61133j43dzT+F1bfmoQURcTA3F+V4Fz8ozoglh9vOaRo4orWtseevOiCSxwCeAIEKDkBQIX9z5z51el1v79tZ/t973n30aXIhYkPAeqyvQpPPG1VPZw84cTZ1BBwtLiETTO3tHrL6T+47NXXPnz9tu//97Sm1RtPO23rFfD0Y8WPvjTWWXnj2LrPX/tnqzad/PqZNfW1n+xOrrn7RT+7Y9Nl1zz6b2NXfnD23h+2u2NkrbHSfIQasSjLnHkNYDFGR2xmagSI7MqCKiQA0dIXRARRky+BIEqmlSAiKhh6E1FtpNUqdahqAmgizcGnd6zdckr7BF5icGIFe98qIWThVf5y7AuPkIMuXOFTFNL8vC4AIGpMQThr/xBDjMyc0/KKohh59pef7/kBnoVwCGAEJqoKpiPBgvOc62LWGOegDwYEtIyCEBFyzCoxxtQEjYJMAAACTQy5ACOShuS9r6oq3y2QtGq18pAcEMl7Acvmd01iosiAYohYlqWKHX/BkmKGDaDjUYkCZGYgMu+yhDV3MIjoRzs8Mz8awxIYqFVUDgi7KbhI6o2sLmI7jIUzwxkv3L/nupP33LHi0ZfPbVqzOHOUl8hPdMoemPPej9ItmPPSGwwFFZPmAsySBIwByVSEDZUgAQoTMDoEZ0AGQc0IctoAiJoiOOcZlIhGC2zmfI4JkkoBYFBG1YQABAyGSuiRRGSUu5mXGs6ZGakQck4PYSQyk5jyElhFTBQxpzJlLnCCZMlpWfDEkOpde8d/4d2v/suPff2N7zn83S+vg3DG/2xb86ELoq9fdtbc9gfKQwuutbxNbLfb/f6SZ+pPrweA7uw+K8Fd8u7zf/F3Wsfmdt743+Pn9pe+fs9w+oWD+sgGP3O8AB+xYTdV7S4OYFj68o5b7r7/5lusgq8e/WLZaQ8O904544zHn7npvidveeEL3txPsv4FH+aq8+xdd95zeuv9569biAvt4YSDofOuVU0e6Q023IzP9CBzXUMbUhsAcfwgE/KZ3x+r1q6kojfO7WpiRaVxvtXFRWw59JVDZFQjRsmXOgAySUxFUTBgu9USSaNdD8BYqyVqTROhcDElA0EGKnl6bGrQ6w+HQ0SKquw9GlC+PpnZkpn3TatXhK7JgfGxf3rpVRuvfPVCK9TzS65glmSXhKnHnjhp250n3nMfjHXnL77g6c1bDpZra8NFaTpNH5LWKQBTGNbRIN/tLcdZeSTJEAyR8/ykCUMzc0URh0MAEFNfeAXjUaicX1aViXOOmdvtdkopH+byRAuhiKk28mIgYkkTqCFAgcQEkuUIzqkq6nI+NiKqhRAMpCh8IlOA8U4H6uGw3/POAzFospiAEJlAlaIktJSDXXN3Qs4Fdcwj+DcTIJoIAXcc9bUxEEOOhgVxLdmL4VIQ7/3S0hIxlGU5HDQCwcyACdUEDRELdjkHVGISaJwjsWIZsmSmoT+Yv/+mH3RMjg7nk8YipIbTZSvrS9fJq6+FEzedPnHSSdt+dOtUWfU0Tra6AED18Koa3rN0dKFrtS8m5xZLLD1PdpcOLv7W+/dWM1P3Xr9qbLL/t3+Nqm0umsNHVruVra4eCMM1b/6l+dk9zZ3feu6/XrDhoTvd1Fpcmo++5VZQSyaWDi5WZbP/5Bd0Fg6sO/RElRzPdFrTU4e3P95+/webK15a79k+xqsKI8CyEWvKllE/BqemBUKr7eqQEpDHNjcBQBYXBsp2wVVXb7nw/MVDhx9/+IGnH31sAIswogYo/blj8eljDZj5P63QsHQ+Nem/v/Md9xOXRR66oo3ZkVWL9/3rX//bc4cGYW5JC99ODp2mQeD1mxeBoOmlUJfJsdeCOhoX5IStx/qDn1ux8c6rT/raLV+amO2dsmKVu/mGgMDXvPszjz0MoXnda9+2+rYfl+tO/vd1W3c1XXXFFdc877newmB2oVoxYwETIFVYqkdE16qyytdTMSJMovfe52WtIRjCyAYexIECQKGZL2veOREBS8BFKnRpbhFKZENWJCzPe9db2pVflF5FRgK8nBrnmASMGQkBJKljAq6AmaDqVMNmyI5SCnl03MRgaGAYVMB7B6pJSk9gNaCvqirLx6IKEjIyJVAEFkTNh0tMKZAhE6CB956zxBo0FzwAGEVRZnCbSAhNIhLnQqxTkJSSKTogXd6JemQEjSlAno+bRRFyRAZoIEmU1cBQIlOukqySFW1SFEWKAmYxBEbKeXsgWvgiHwicc2iI3uXnCDCrKrFDRNY0MghhPmMs4+tNyblWYQAgVqCHDnhXxHcdufrBqa8cnOAvrN/2f/dfqdhJxuDHOrFIbOjYAxmCEhKiMxJVKE1VNYnLcjNTM+MCGRzA6PLA0U8HAZcDvVU1mTrHAGCWAFgsn4OIAcHQ1EhAPGlIqKBREZELJ3WDkgyRGfPm25Jk0dDocxFloozEQEQsXdIciYuIyOBVzBODWUmuBiAMqeykup5M8Y5f+tmVN9zz8k//y+FHfvO573+l+toPTvqfB9+25ogj+/qjK9YP+gMphcAzDVOfwOoQe1MbAaALYew3/+Xvbvvxvt/6mV94+4e7F5938MnvbubeB1/QBLfm379/8HgBvmTN6v5c/0AgH6EstDkc5mLaWK5cecHU6rHzJ8f8c3t3rX/R6jOe96IjR3YeOtD7xs72Cc3++1PU/mC+GZ9K7aYzwOhD0+r4NOgf0b5c/Jfj37l2qVi07mFUxlYfXYA1z/iLvmj1Wy9BdwtY8F4iSNUYI5VUVlyZmZIRWsF4XP3gyiqLgUMKRDA67YE6IkwjVgg71gRIBOQSijijlhMxMiKAnNZcgLWjGiF7EwyVSK1UROPQ7LcgNRWVb0QGBDhWHLnywieff/bM0cWxJ55af8+D537ve5dNrZg76YzdZ2x9cmpqvqV+wBZqiNJ21SClKfQ1DRsLRFRhiQkiAjqLGByRmfUX++i4ZBfRhhJLQ2Dy7DTF8cmJQ0eOdFotZ1SYq5tgZmiKQKhGXMRayFUpNY5IRdHAiImoZG+qEQMQeWZg1GTArGAiSdAyfizf70VRLi31FaDTrkLMM30WsIGIQ4MYoxgyAbHm0Z0ogpnnximKOSVkVjBFIjBV9cZgwJYcIqdIvrAMBiU1gLIoQpOEOAEaCDEZI4siUVF4SFpLY2jsyYhDLaUnDU1U6VrLu+q58di+7PQV+2fGwGvSwey8DsPHXnVwe2B9+RvXrpl65vPXAeCcD13VXlGuAvgHnpzEdBiWPLVxrNLktNDF+aOtzhrZ9uBEZLV2H2rXnpzQcqjzgEAzfnbv/DTJ7nddZEqrETaLaTktItDtJtNi3h0tetOd0LjJnWe9YsuTPy7CoFYXjy3tqZdO/JOPt1/9vju+9pXpmUkJyVwZUt0MC8RA5rPlMKImNfLOVKM0VKhjlkY9F3NHe+yLlRtOesfFF3//uuu++9//Oj62Ymk4HJ+aWFiYo7+ujCj9cQNI7b/o1imW7fLYkcMODBSgFkzMAOBQHNEvPS8+vHe/WqdsVQEghmimhVh37Sroroc0rFLbzBBsoVmamKnm7/7e1Tq36aJXPFat4xr67SNTgWTp8Q1rVj6wunv/d36wcs2GS2dD3Hbv5IaTdjX98RMuaC/umXntOU/84MHueIcGtXZ91XhAz/44nWN51gqABjbito7UUgqZLwRElvF2ICzkU0opCRJJghDCIISiKvuxATBirjrtg7sPnHnhBYcEG4urWlM9rHtLQ0C0qOYtOSqYC8wORkjOMmJaDDRFAjK1GOpsN2qa2lQFEhnkiFoiane6eTuLy4YcUAMm0mwvMDNzwOwLXgajIlOOdPe+Ot43//SbkE0aKTkRcQ0HCiEEETFFZMppyjmzTslERGIyACV0wDnzOYQQIKpJghFHoZEmv7sGUMfANgrSG6SQOz+PjCkyc+E8GwIzIuYMu7zuAs3m4GJ0gDueUqujw/4y03Dke85ovwkcf/PsWX8zfu+TY/sf6Bw6z87q26GuTEglyICIHggAzBEhO0DN6XhmmiSrvvOhkm3k5spvoCIkMDNric+vQY9/LTcfo2U255JsAOCYkkpeGRyXWTnnlrmukFVoeTxAAJD9xKJ5uFSWpYhIEpdbMufKsoxJsFUkAEUzs0JAqXTDY/0f3b666VH39N51//qD73x17Xt/dstr31K94a0bFx5/1cPv/PPLrr7ziLzh7++rO+PWxKYJJWJEK8Y7Cxs2girM7zr4Z+9/SXfj92d37XzkUSZPa0Jz5LbQe+zMU874+FtXHi/AJ25sv+4PdoUVJ6n3c/PpzLPPuuCqi6ar1vYnF4/VOzavOad90tldSIiG/hV/f28am7MrJg7/eKBvuvqceuFQt7QYBi1PTSLyRe/QgQ7C6h+yMUztZKcESVpDPvP7xZX/MePN6PxTobm5ZN9xZUgAlccMlXUuv5+qOlIgji4ZIsdu+b/mzQUphEz2ZFbL7SbmK2k4HGoSgrxwzyWbmbksqoRmOCKjOKLCUdJoSYFdBUxEzpDUAgIFtYb73VVzV647+sKXjh06PLl928ptD55/zw+3TkzOb9q6e+s5uycm5ro0kFQuDbunTsZdPEApKm7I0BpwxTCFFVgtNY0WjIwAEAELMW8YSpVkwxQIcO/BA2WrSmDIGGMDyFm+QTQy+scYQYXIokgUGy2LzeoYLAlXjogUgICAFLOO2IwRFUDMfFZkejIyD0jiTGpNOJr+MalCk/P7ANOyQhPUwIwKR+IMUcgIjFXZVFAaUDJvZgIKo4NOQEQFKMrKs2vqxaQSl2rLAdLIIkkAyQwElJHJoWAGobrC11JjbNpFSWZFCd/70rWdLRs2Ta6Z8i3XrqBwW2n/8+Rf/9f+Kx676/bntu8oGHynlOQFyirDLmd3DFptwMaWhtPzjp0OahzDtnXY25gYallh21mIcTAPVKtUxVwoisJcsaouxXFoeWCqQmgAKwYfWtI7NtkaNokW3NSRtVu3/ujPD288zW1ax696/7lveRXX5c3XfcmVhYjAyNDFCE6BU1RVQAJWQBAENoMEYIhNqgt2HecXQxKEJsnT23dedcVVEgbf+dZXJifHpI5MFTLETwBDkf64rh2Vn+jEGBnR5d5X1UxHfCt2BOzn5/rsIbaKoih8WZgZBi1Wrp5r0cyqdv9obUsDiL5DYzCoq3HqHer3qslds8+iDNesPmvat3RucOSFb/iHm3+YYvORN/zypm9/ad4PxU+tp9Xb11/46rX1E3c8vG92e3dl0XYt1yxyNRa8FTSKP+Qsx+dRGUYbBTvCTwUpKAK5UQFWVeHIkZkFY1SyynEAgyjknSX1VTm/tNh/7N71M1Vr/Tkk7WGZ2gFLLMVURMwZAiNmsh8AkRkwc5MiM4OM4h0ASBVALH9AGqP3rihGrWS0BAIjD4waMCqYy0kQBplTIgBkTAiMBN4zOcfeEReuAETvfDJ1PouBwTK0L+tsNVbVCPMQQhAdlZPMtLdlKLYRGqEhCIJk0pCaJTFVQcyFIUga5ch7BkMqXErJAJJKTJGNEYFSSpyKjCPMGzgCB5jfIgAgwECjwpaph2aGyyFZo18gyKicARk0VXp774obDu98bPXcv0889K/9Uw3H0aoCU7ZbOkNENEdITJIzeFVVJQfpKx3XT432wTDqfQuzZJoPPWTLnfjyF45MRaM8DVuO7cWUlrP+/eioxy6/i5koh4iy7OUgIiUj772MlHSF86paYoEIGZNsZnk5gmgCVtf1KuqecM7lO+/deaA5staOTbTWTC0NDn7y7/Z98i9DNfG+39wwNz7xsTM2nVVN7/zsw5tjHFiIBXKgBvHWpcMHqPRLh5dmd5dJt0w36wu38vDezw6r00494XWvfevv/9E/Tb3+U5ev6B8vwIObZg/5qS40BaRuAe3Cf+u7dx/Ytffi7/7mtS/rnz72onfNHLk3XDXdXXH7ruqR6Q//8gWxfaAzXNh/89OH33BWMzvbdFvdZrAEUAnw8Oghp3DsbIkte/GHcdVTBY3Z6gMT7GOr33cXXCwnr7NH1XmHTCSo+Yb1Hj3lZYqNJL6wvExFYiYenZ+QiA2IWZeteiIiqESkSKpaKNryLub4B1ewE4IcTYqIYOqQcl6VMoKnAlkQAK1EVgRWs4Idx1azGKMdGav2vuAF7nkXrDqysPbx7esfe/CcB2++sL1ifsOZD5x9ypF1K585sCDTbrIxiMG5UpFCFC9uAZIHFAGHiL5A4iwBoKQgI9BWt2o1IULJdV175wxEc0JNTujPpEugbNvJl6YmyfrHwvs8uUUUG8k/R8wPReWkYJCQCK0UBAXzBIDsnVMwwqSC2e4LSORSEiJ0mMnHWBQeEUdDIkKzrKAgUUuizqGJqSIxonPErKpqOhg2YLUvi0KVGc0wpVSgzwGTlP9GjlA5CyWyJRqMXdHCgopO9ci9d+z43o/bN1W7iAdhkFTY8Dtvs8cn4K8/d4fFwo2NuWEyDmWV+sOFU0/Y5NAef/VLZ7753ydW44uFX3DaHg6dJYJqOH8ExZWdLrdYmwR1BHKhmC6qsr/UECuB+IqqotVvEgxmNapQqBUb8nXheP1Z3XMuefbcN4LZFX/0B5NnbMJubb3iwD9/6sD133Pv+wD0DyiyqpJqWZaqNBioKimIQ2QEshFeNgeVOCTHrtf02bFDNLGi6sz2Bhde/pK5udn77ry5KktPPIx1VbbSnzZjneml35k1Nf5jXxXlqAC3PeQHvgIi0OGZF0zPrK7r0K+HzXDoPTviirVYsTKsPNGevruADpcqaHUhdQ/KOEgEQjrbLIiVZ245fcUNn5k48+xPTpbbbr5vZuOWk8bG09wz43H86d23bHnBhx4fW9Ed3/fE7fecfdoJh5Ol1phiRNC2FkLKlGXwucYCMQERA2J24hDCyNZmDtFQ0QiyyNBZLgyiWnI17PV9VYbQz31PTMF5PuW8S8547VtuePCpsRJDrxm6gBoJSJkRuTDEpNGhIXnIxkDTmHLrI2YipghJLCd5jHfaSWKoG1V1zjO7qIoshJAkpSSUU7QYPDEgjGqeEtLyuRfNOOecjzrItJywcfw7RITo89pSUhi1m6P7CpCJzBhZQZMlIjIGIpczrbJSM/dvx/vUrEG1JAJZFDN6LQrmFCQpAaHLDwsQgkhWGOTQiZErIzt8lv+VUWTLBTjrWkf+Xcss1eVNB2hdTIz92uCaDy5+9fEV+/6nefzt/pJjoUFiR8jLBVgZAQlNjQEMnRItE3LymxPzdhtg5Js0AASPrtaEyzC70Qkmj8TFEDFLnUffoVE8DQBkKCwAaJJsy0YmWxbgJBn9UGZWMgbkikUkpZSnMscWKyKq2q12qy0ijkdONo84tWbN8OCx3vbnrvnnP9j+9l+4/f+8v3XXzYHjWFGN8czaU/tr/UPf3fy79XD3gfM3P3fJ9Ck3HUTPPhhDOdviiUF5eHqjzO3ZpjoGuGvf7ilLT+29f+9L/+7wNz/+87/y9recv+ML9/zP7Re96ngBDvMHqja5fnIJqyl32/e+cdMT+9pj686YOvk9L/ztgwee8MPPD3bclk5+x52T72wdvHf+iQOTq05vdeIX7hsszffecn776Ox8qyRJYTCoB0fmJqh64uKGImy8r2gHx8e4hJCqFlHdvvTSvgAYRGe1iiPHBmC27NCWtHwBAxIgmKnLEwgAQSEDILTcFzOPpicAGXFPAJLdsSPOAaEbucK855xJy469Y0sSY2zqYAZYMLMDwwSKBA7JATQowLqUUgsK7lZd5fH5MCA6sGLs4DWXPvKCC2b2zZ+0/+CKx+995RdvoOnVz56xdfaUcw5sWDNnxYIm9qkaDkxBfGvo/HSMS63gJIAUoaw6sahNPZKqKoImm56YbppGLKmqjcgmyEgiYslExFQQOFvdEZCZkcgAgqTMmtafBKETAZgoieTDbiuqkcWSFK0UM9OMbzIlDWogZopIMSXnHJipJuccACiId17MCgQEEMBEoIaGRsKCokkQkcgfNxwiYOELSaEe9LxnA/CuEPEEzAi5CNlyiwQATNgqy+GwsSRRNAVpTbbvvfP21WMTQ9eeGhvH3tJA6kvWwstPPfwbd2wYm7bQG0BoxLcQCxRphbpbjRnBlf/7y/te+YMn/uQDG57bt8J1BmMVROqHUCnWSMk8DiHUDUIqyRVDhXq+TCgy7ANFEujPgmuVJ50+OOEEPKGaOuOFnUteOXbiZllD1Ek//rFbN29Szx649s6l+36k37+xmN+1/v2/9vAQCBQERCIqAYvEgAImoqRmZKPhBBLm2C9D9Cmlqt1qhrVHBwgJbKjKST74y//731xx0w3XTUxMFMp1f+ichT8WP2zFPxwmjXikchmdfDx7xYDr1rp94y9FhaJw3BpnkVqiJhg2A00tbK8GgTjeng1LYyqYtAOduDC/aWrTD321FPslFOvXnuv3HG3OvGT3oV1rN6x/4wtfVdz2owO9fnXhJSve/s4jF7/fP1Hf953Pv+OSM56s54rxlX6wWHU7DJLcEK0ws9GLwuUpNObmDmQZA6IIuQDkkpIvFyJCBlVlJzHEoigkNZ12S5uhQELEdtW676ufb/Y8O/P6d9eDhkokgGR4/CoadVcCiAZopJBCMjNJKd/5QZKCKRAQEtDSoG+g7ary3kvSJEIFqyoYxJAULK9yRQSS4LKA2S1Dg80squTf4NmJJHLLEolgudU8Xi0gA9xyO0bg3KiaEhEQMpKCeWURG2EBycRk1KuiAZrlHC9EYyABA8wDVVULolEFADiLN3PMo41SLEA0rwGyzjzX4CzvKgCPHxSOX0iIKKgIo5mby2vanMme/FFvLxjb9LL5s68bv//z3bte1Jy1otXpSxIEx+wMzbLwGI1G4jjOgpvl1AIBKzQnOkAehORnOiJ2koPlncVPF2Dzo1/TTwehqPKIw5hG9uKU8oPyODGG8/bX0DTTaTBKQsSyLLMZGgBcWRBRq2qVrcrMMunBSueqsqH+kS9/+tC3f7D/G287/dd/9+03fvfZ+7bv+PpX9bZrp+udV1wdDu+H4UF/2gVn7V849NQvXXHpzd+ksfEwaJ6qFxf70iVPU+un5vevUGIKWrTn02B+1aXHNl1R3Dy75+DRVRu3bNn1H/sPbjxegHFmozNJLXLz1E7uTe96zztQa8G6eM/iwXTOhlcd7P/i868Z/69tKHv4U79wsc4fLaTeNJOeSuGhhfZb0SSlvmGboNdbxKCNuv2X12seYKrRHLfKDoemLeVw3YrqvHPSwhwAhNAEaYCdxKhmsWmihdGUIvuACXMgHwQ1BDHRUfAEgqiq2jJh2syy/dzMTJX9iEU/kkaP5hlQeCLn80egqKZQx+QKj4iVgBEQgCAoIUcTUw5SFWXLOJo0MPQleOROMphPwXcOr+wc2bJ5zZtePbj1nlO3P7PuwfvX3Xnj+bzi6AXn7Tzz9AMrZ4YrV8wvLLEBxMEsUpVcLNmBdaSJFRbiQ2NYFBICeZrvL0xOTvt2tbi4WBAhj1i5ZiYgo2svjZz0ZqI6oi0hoGre9qBIylc+AIhIBPEREHHoEwN3DM0gkTkxRGMEZARHSiACiNgkCyEUntGxxOQ8eV+ElApuIxCCEQpCMotmgiQqo5EbqmlUIyEiYpdiQE0rV84sLS2iYxs9fjAZstho1aWj5E1PrkwwJOOqCE3qjrWPPPvcnh3PtsarJtWzszWqlaT/6/ylnQvun2+ba5pUOY4+enOmXtvdodSnnHPhLrSDe56dOOuilV+8+5n/+Ntjn/unsfnQNat8ReQrNG5qjI3D4RCl1wAjOF8sdleGtasnzrvEbbpw7NSt1fpTcP2GlSuKqrCwkBZ3zM7e8o3B7T+K2/c+9fYvnHnPfy5+7TcrcdPGM9brbT3n4Qsvj73draIlAqhmZIOlnqp6XyZNmIHjIJi3G0gOANE1aoVzOkwFMpUcRVITKlcmkfsf3PaK1715duHwtgfuqorWqaecrAw7t+9wH2dMbB9Lw/09h4SKRTJohAAg+ak7z/vntowNC1JpwNg512LCkqVpXKsMZ540+8iwo4MxqjJuuVc41aK3YnKuO3lgfl97ywkrPQ9n5+Tgk+8ZX7fqshdNllNHXrX6wk99Cs9atfvxQ/9z07H62ft/5fJLn7t/W/W8c2HQlzGqA3SxwpiwIDKwZdwbHU+uwPzcPJ7SBqNWGDnDPo0MGNRguT+z/D+HEFxG2xotLCyed82bXvjBn7vxx/dNjLsYqWoXZAbhJ42XmXnJNDmrSctM9/7pYSahSuJROhIw+5BEDIqiEENrIiIyYDLVHM2QhNCSGbsR4C//pUxRLIP0LMsmzYwRQMQILS2XNABb3lxmRbSYoo4G0cf70dxtKnhNkpLko4mS8WC0Nc/Dulx7PLGmlPM9RGTUYWSPcp5g57xGBROFkEwsaZ7fAgMoYoaPjTLocLSVBxjVTsRlsByO9N60HFAVfVFavw/F/3JX3Xzsof3js58NP/5D9/YAg+zmz9EWZgZgjKA5F0Uz7zFPwtktryFgOXDj+DManTvei//kUlmuu/aTYMsRWKIAR0TkR55sL8vpRUy0PDVd3qLlmi15IwMAfrlLblUVE/uq8FVZsIOkZhZVsSqOPvBk+OEdDvvwlc/c/bXPPvKyt2/++de8/sKdY2dU8fDEmByci7z56U+/ZN+f/cvqg0uXrt15+sTpe+P+Gbf/UJhMbt7CYHp959HvFeNlq5xujiw40KfXnL9u7cHnv/vlN91w48tf9qrzT556+sd/ffwv2776N1J3mtSSiz/Ytu979z+0awcdbtxSb75vRYganKzurMTzLpk8+OSf/P2x0zdNTMPS4Z6bXDGzf/euZ3cN152wcmEQABAlYRMSwYEr9LRv+cIxJChnihQY5hbLV17TrF1pB48aAKbUBqwtiURA0OXVo5mBgqAYjSz1kDTrCoFQj0fomCFx9m2xI9CcVvZTQyA/Cqai5UAeQMuifQBAdlygqRo70kTJIovly5OARAEgAZYh1hSEyAdwCROAJ682bMVBBGmOHdl7cJ8/cf19m9c//crnt57cs7lUd913L7zzDj82eXTLabvOOfPIzIrZzsQc1hZjp588FFyUyASinTYljSIIBih69MAh8o4cE5GNkgDARCjHz/kiSF15772PMQ5DY6iOuCh8XSczcarMCAhqSQEUDBM0CGDmghpj45KZuWBxeayFBrkME3kiauLQLZ9ZkcEQkyoROTPBqIRoSmIsaEbmyKuZjmDoZjnm3MikcARQDPo955xAznylEJIBkZixLZ+ERyfgkCITQLKQ4opu565HHhYRdYVPAA5DjFun9fVb4q/+aFINy7F2vxmYVBU4K2Rcao1Ujq/lAJ3K6AANKn/SB/6YXvOLB2786pHHbiueeIgOHQ1NFCRuj/uZTbhmA208tdq4gc++cuXZV3TWuIoHNbT786D7nzv2wy+m7U/CY3uHD9+++tQ1S6vW9RZ7B07b2rQmz7j7q2vGVixIbCc61ODBt/5CfwBjMkMcEvuoETWrUTXFUSYfjtaHCmAC5pEY0AGIJF8VZlIPhr4siqIIoSnQuxIXBoOffd+vfq1o33Xnj3v9hdnFpl1VZVHIsWpx35ytM9drnzxfrJub35EmNgPAtvP/o01lqw0WTC3GYVRicsyMCWMpYCedTpHHpnkhSukLNKtVW0pwYP+5BfzWCeduPGlt+9Bj869+xaoLXnj2qWs6qzamYuqkFfH2W554+DNf+Oev//vWP37kjS84894//aXOz77NmgYr6XQ6/V4aJJ3sViqjfjcLN2yZ6qWOIBtNEClPNQEMISOGEMABGwIy5OUcMKXQzMzMHA2HmlBLjJXZxMTE4v7n7vnHf1nx0pf0Fxcm2qUBFOjZlIk9sRJmk4OZJtOkwuZDCFx4QIx1cM4BsYiACiOqoRmycwgoQZhRTAFAAMQ0ZbyBAbFz5MFMMj9EzVSTWDLwZDEkIVUxdsRplIKZM6WPrx5zBJjl0/LoPWFC/GlhWm4dhRJBMjMg0NwJGCAQOZ9fORkgIhWouqyJcQ4Rk+lokI8FLEdVi0htofQ+mhagZsrMDrwDAMeEqNkxBKNOW0ZHJGOkEVYRkfJMGE0BnNYCrVmsN1Vr3t6/9LPdm69rPfPi/raL/RYRFVQEVAQBoJx5tNywLjdMo4oblwksPBoUABiYYixyeOfyAWCZ4pBNimCWJdD5L0iqZIAEaDwq2A4AgFSPN9zHo8FMAQEajd57RMwFJteDVqoQkYjLsvSQVaVqYKp46kuuWf/j2x/+1KfnH7l95tDOwQ3/uWn6P0OXBjNjU9VSEFKzdZtnX/O1j37uYx96qlj0b99S/b/b5xNNtMdk46rhtl1xcv3Y4jGisV6n0z8yO+PwMLmXrTv6Tjv/2/d/9h927f/83/3FQL94vAB/78ZPlhd8CFudzuqpT3zzGAhxVUgbO64b0ziNl1AfGtt8Ovb6D993AP3CN+85VpQmq1dX/Tkpq2989/pP/P77Fncd6XQmm8Uj9eEDfGJa3KRrb3MRUknYm52jokjTU6e8/rV9IFe2mKhqlaVvJZQSKiJstzqFrwD+/3dxvpgdIlqe+efzbL4gFUacBkIAgJHajtATJxvlgOb3PC8dyJWGiMSWSRiQ6bxBCWoDiNSwRYtegAQVwSQkoGhQJjSjvkfR2AXSVttFDQlb5YRi7WOvXqqXqNi78QTprozv3fjkod2rHtu24ZFt59x7u67sxFPOfGrrlsMnbT0y0VkMi+NQU6vk6IoQCKxTVk3TOHLdsZYrq8FwOLoHltci2XOo0LSKkomYAAom38rbEInR0FJSM2MokE1HTiSIphmE3iCgCg6RDAIhkTM1yCKY5aWJqlaOk4pjBzTaVXlmVRVoYAT3JTAiI08kCGrDXH3zgT//gSbKVWFJ2HlVJeQYo6owYAJQRgCDmIXXBmBBUsPkia1pGAlSfGbn04kVsmjUrK7rj74Q9yzxf20fq7q4sLBQFh0HmBgQ3QHpv/aFL2uNz7gjwkU51LlqWMjuWWu1Ou/4rRPgN7A+sNRfIKF21SnHxnB8GrsOnXjipYNNb9fTw+8+EB+4Xg4801ba++jj4/2eh7R25fqFN/3sJx977ND+w3sGR1dseOWqNMSrti7+663V1Io69ekjv3dk9erxYWLfREPiwgHEJhWtiiDOLwSuCjJCyFAnZMsSQQWikqEv0hiAmif2iuY4OYUoBBgaPXx08Z0/96Fket89t09MTFbVOKgNXrcEhxFWmMM6Hbj6z772tW2rTj4DBgA0Ttjrh6Jb+KnJ1Y44NFDXTRNDrwlFPzTrNpNx/+jBlGCA3pw3x0srW4MLrqpmOi/5lV91W68oTixKX/Z68MyjTw4f3LHq5BWXnX7mhZedf8naKze/+j2MduKn/l/3yvOXTjqp3HdooUXtoxFLZKJ+LxTOk2MkyroqJCjIETH81Fd+EOeT12icaGCYjSCjCQkiItPhI0fQzDmnnIpWdWx2/tJNZ2084+Sds4vjbfZcRJXkyJDIEyM5Q0NQB2IqoihWa1A0BMsLEu9cEwJBDqoxYgcAKWreUxOiR44xNinmNC5E9K4oXZFiBKXlPs5MMUf+pyAmAZmIQlmWAHF0bnVOVQGUAInIp8TMYJZ1uYCEsHxjE+XGH2CUUpmlQAqqGvNcOCs7bEQyJgQAzzmY15U5zsZY0XkmASM2gGQqYMEkmSkQqqJQfjqyMJCRARsFVLblSQXkaHrLdREyWgjzP6hoAiYOnAhjsQ+PfIhffMvck0/O9P8mXP9fcAZCNAXlfKLPojNLWdwCuNzoj+bQfFx0DaZm+QeBQ8pxh7astwKgvFd0RABqiiNqw+iLmQHN4YgNQMvB3QbAiJn07kadQ1Yk+BhjHkGLCKoxkkNGzGz5SlIyIvTF1PRUOda2Ggay/lWf/vjd3/qfpb/92svWP7Zy6tGykJIXwIDIjU/LYBEuag//6DOPHdq6accbX3/rx246a1hJHf22PeXYKq26NNifFg/KIs0Azq+7lDdfuGv29E/tLadf91nZ8fd/9g//MDf3ExoSD47K/v+u117dOgxrpjvDbkFBDMGsWtcOzY571p16cjMxsff+uyZXjiUtdDpF9V7iOCBNrHvgth1PPfbAKWddubSQ5mefluHs4YsaAFh7B0mKHmoHk6E/t/qVL5s483TZuaeqKkKkVgnOVa5sYxsBi6Koup2shyew5bgYMzNkIEDHnAcMCuSAXFGIKo0iP0HBQEXNIEmBbDFgluDldBMAVEvEMYkZmJpJijESqII1hmKpVB6SBollLQnQgCpvwCzRokGkhh23GrWqiIiBrUXGEpZQe+RdWbqQpvsyN7+rqMq5dSceO/X0PS/vrXp2V/vRR9c/9cR5t94VZjqDc87dd+YF+09Y7/s+YdMwCqKCUeHRjL0fNE0iYDMQy3iQ4+sbQCwrD2puFIJrYj85MvqiyBckAnnPCaCuawBIhIBIBiYaUYig4sJX7RhjSkFUEVkBRExVWjR6RDAROIdmJkqICb0H8IYKElHVmQN1gLUiOBqdTc1yAC0ihihVUUqMTEWQROQ8oQkkiQmMVUE05vGEQVRRoJSapIm9015/MDdXjVUgqSRQsC1r4NUnLf7Gj9tzS0tVUTpTTE0o0DGP+8neoQOnb11XF569xujHy05PekkLP0ydHbuGZRXVleVaKqp68WB/+8O02DSLu3t7HuLDC3r/7RNpOHb+C7f5sj9zemqNXXbRZfEL/zK96axbrnnpxz71ydNmZn77je/9qx99RU+4pHn21gkLAO3e4mz3537t4NUv99vuCq1uqW3yIYmiIyQHGIuyssUIZJYAwRygZ0fIYipgajaUOptX0bmgQc0gJDIEMq9lUjTS3QcOv+M9H+z3hs88/UDQbtMfyGRDLbIETvfuevBXf+WUX/jtVSecCM9AsOSTTrRa+/Y/duP19z67+9FzTnveaadvHZ8+dW17Y7lGu/1THn/160Ub6k6215+ezjlzet1WXrVy7SljCWBu33DHA49t++E2G7o7v/+lW+/9/nt/7iOdBze32sX/+t3/O1FtnH9qrL/91u5z28d+5y8P7d+GHS4ihjKntlcM0SE4ZiZis5CEySkgGHgwAkMCcKhoAOAMTFWJGVBECBGYVcUQyLFrMJphBhapEaEDGOu2H77+i9K8avKCi4Yywt14IcBcMFQVl10rAkny6heYhsNGDKt269hwCBYLdhAzgokNs5QRk2lokvMYUjTDZKNTb8GgFhMLM4DkiB+utQFyKaUoseKCRCEoGimYr8qkgnWdNRRR1TkHzHFZ5kDECAyE4DRhGsUrMhCyZV4jKuSkYkE1yRghBgMEJhJLVHiNWhRFztNoUvSuEFMAFlYCkib43BarRNMQtI0ugpEnAEBwHknFEBSYyJAFzExGSRaI+afbaLMOy2nJDpACB9+gVUnTRNX55f6LfmN47ROd/n/07vn58ryDjfOaEAeCRUFFrWqYyAgQUo60ygcsYpaRsyhLsH6yh87tFI0kfHk8goSigZGIR9Ls0cOOTHDU4GYjVjYbiKozJ2CZrwiIkrPswZwhISNQpsaNnM8eDJWpE21YmCu6U/35w4//8b9wAdDZsOl1r7/tG1+58W++fMZr3jnlb28j+paC4HCRwXPZtvaKVj/wa3og1W/s6Zf3/tHMgX/6w7hn11DcYHIFAODhp+ax1WbYc8X7jlz9ERw/cSlGmVgFF59ebTn/gU9fw/3h8QKsXZ3a8/XkZqTT7g+SSleQCle2+9tX293vefc1n6tfvDXtSA99cWHLy2ZpDAbWKWONcsThGotpqf6rv/jE57/1cocUDuxRsEOXubFdMtmvHDaxO+lXt9M+Wf3Sd87Xfd9u+5oAscOtdqtjZse8JyLf9h3HgGwI2YeaZxKIBgnNzPlKVWnZYwYA3kAoG7/QJXWSxFQcQcI2VVmvpKqS5b6oKIFUUW0YGsgCYAMMsazaSeoGtYwKTUhigN5QyZVIruMp1rGwwiX0BYHCGKprt4eLvUCEil10A40AJs7QV7WZ7w+oVy+ozG3cKFtO3dY0m/ccW7/jcf/UAyvuuOfgSSfe/ZrzqzNOWlevmB6UYQgAEKt0NPba7AxYg1UFBY2ohuSSQqca6zchNjW1OuiKMiSU1JBYUUiToktOgZUETMmaWKsAIgubgBUC5rlIgAUXqp3xznBYqwl5cuwG9VCTMCKoDhCI2CUa9gIzu9KRQwXRZtBaMSODOgVFTxhiRVWM0dCc42QaVTygFxSDxOiQ8olTQFzeA5oBileVkCIgOtYmUIsA0IIZDgNqq+iMOe31Fxb6sy3rxnakSADyvy7rHerjt/aumOjE+fn5qakp4hSWxLSqhwurNpxK1//oYPVDf9rz1EudBmOu1Q91IrGWY00tR8OlwaoTV227/p/9X/3v6Wq11UdWMjjRVZde9YNLLv3PH90kLh3atzctzF351ne97aqr5ybW/N1//Qf059/42ldWs/v6Vs1suuSkg9/ofu3GAQ7c5nPCxVfs3XbnWGt8qqxmQ2gJGUUg9gVr0GZJO52xGrTlYgJS54cSWyyQhIhFlQmSYAVtFQGCQOoT+MjROS1Tu5aG2Sfghx/5s1O23jOInzm8O7iWWwjSHsV3u97RAydeeM7OJxoA4IlOszggTTDwX7728weeueOHrX876ZQrr3nT+07YvO7JL9/+kpe/6Ox/unamY9yhhbnaFp1IGPb5r37nL5687wdrNm256+77juy455d/56MTM9NnrL/wnR/81bnB7jdefcvhm6+bu/Z3n/ntO98wsb36zY8+s+uJiXbsG7CqNXVBnv2QVEW4aZrcxuWlKRhQTqUyNAWSTEI//rwFI0TF493wqMP0yImgSQVyZE0ERLQ4O/viX/7AyqkTds83BXmLQ+cLgALNoqSEgsgCJjkaCgyYJIFFVDADWpofMJpzZTOIhaMYo4CQW55eqqlqPwobFeh97rcYRKTOx/ckZVnWdS0hIYKkVFVVhTwcDvMOMpiQQRzU3nszUJFoqmBRzEPmloA4ZDJGyCIlW9YTgYGpwvK2WmTkj8TlJXqQ5ImBqFONpRSo8KrKTKraKkpTUCRV8VnyVvggKYfgkSqaRlIHXNehqipUizFWrVJNwHAUlf+TDyQzi/CnimL+LqgZsapwK/ZLP3EA5l7UPeOquZOvX7f7m3Dzq8OZLWirV21KdTSMkZ3m4SUAUG66M0oBRiGfOloD/9RHf/zSgJFIL5/N0cgAARDV8PgKwyCLvEwhbwfyqyVEsIxTBcj7kOOtvJpQVl+LQzTHMW+JjNAAbVxhmIp45z99pv7831Q0Vmr/6L9+9Oi6l77rE3+2ZrwbHxvX2f2IIBHMjaWpaSf7BbDnZw6u3/zDJ/c4V0xvvmbyYxfWn/uLM5b+a+H5Z/wVwG+/+NnZU/vfO3LJ9sveI750nvvK9dhaGjblik0nvenTe7+4rII2szg+Z4oHH42zM3DZuYX1rOjFV9xwdMtNNjP8x8de6fYtvfmq+dmb1hyY9bpSfYclFF1RI90f68q7hx996jc+9KH/+M+vV348QDh4Oay5q+AgjZH3fGD2yNZ3/9zkC85ePHa01e4UWhCSK3xRFADgpUCkoizLssqTCMlDkVyAVahwZgaGI6w1gYARESbNdjUzQ2cmLqkkU3SmqhJTVhhQ1sIki2gEYKolu5gSGiQ1Y04pio4MS2KmoIRC6Ia9YX6q5JtCFRWImVWgGIaas9tMl1KDItGElJgZQJXQEFDZR8W6b0O7d/2a7aedPP6SqztP7T6w58lt+PT9q5/srj7xzHr1yXOrNu8fX71QdXVCQJZcYtS+1s5IzEsApxE6uGqsCo2gKafGHDfeB2GVxpOtMLcE0vfYQlcZ1JaMUM1AhJCYHZg655iYifI9XlVVjFGBPBfkKaUUm8ZxkVISiOpEyUTV1Ilpt6xs0MQYhcFE0buhqaA6VygYiFaGSNQ4A7VKiQizIBYAEpho9uInS6oAhpSFkfVgYKYqamwmNggDN9FeWloaDocrJlfUaWgJTmjVbzu9/v1bWiECEaUYRSTUwF32rrswf3RqbPW5Wy7avbAUH3+wPbNp4OxYGk60Wm7QRNPoMIFyVQvOa0mVc9Nla6m1gerD7uIrPnfqBf/+qb951RWXvv+l7/joNz779MO9uccfpRdc9v2HHunt2X3y6WetbY89srCw6DeuIH7DQ98bf252bsPJJ37p6w8/eWhybikaD+sE5SzaKs5zMjIjM48I5gXIO0xqJlnvYphVImDU4tgs6iKyq4S9UKCIHVw13j28f6ku4/jSwvjRQ6uP7PLP7X/NlplXrRh7z6NPPn17RW9obE6diKYztqbJsWe+/tfw4j+TR+5bf+Ulexfl/CtPPf2U0/fsfHB61abLr7li64bV3/zqZ7/zrf+6/bYfXXvt9Tf96OZbfvhdrZceeuy2hdlDn/jE177wmT+sQ/8z7/u1w/sOvPJVf3zNe9/ysjS9trfw7K0397/3Tffj7weMiz//5+rLdRtX7H7u2bGt61tyaHFADoGZYkDzidmTLQPnHWepbRbi5gyI0fNRLYNxAExBcQSkHIE5bVkwiSNtEtKyqBIADt326NoXn2RWU6kxeiTzECwvkRAQQXPMwuhnQZTAzDEEEyUiRGeaiIGIokaJQjYK/TdRM4sARCich0viyBMh5/QlMuecY59iDYgppUK1ZmvIKgTOf83SB9PkzCmaKIiiqoEk1kgopmzsSD0ZGxJxNggCABllQK+oimiQpCaAI4WL5NgOQw2SUmq1SjAkYqIRmy+psIEjFkliyZBSSg6cApioI0wKdV0XpZcQEzvnXIxN5lahLdtmLSvjIAOjYLTHtWwmyZ9dH6RNhWgSB67Ggedfp1fcufQvO7rHPrnw/Y+5Nx+IgwpbpMNE6IGXObw/UeoTjER6x/EUWRqyXHWX6+9Pxsx5B0wwYurkYjr6M83U6PjYDQEABRBy6C/89BkCEQFNCDN+GRjVRqQBcIwIlsT52TK6+mhzxrs/sPKt1zT3PLLjG994cOXzr3zz+w5i8aNbHn2+W7+p09uMe8sycH+O6yEWBK412ak2n7P1mvWbDu4m5iVqr7j4V6dat3aub69li1PjMr554ufPPmp8762dnyNEAyEEKCtUcidfxidek1/ncM9DCIOIJ1xxxuIlL37qb/7uvnDRNe5PPxdXLrAu2fxF/NzLFy76zY9O/mhLvyzaJ8/RxmqIkZcWx7Esm4ndC7OHHnQ0NjHeLWZoz7MPlW0+coGc/V8ppTDt2nGhf0LRXvf6t6rEsvRVYF8WSOi9L4oCEYtUIGHhi7JsKZgp8PKVAD+d4gIgiXN2hMtTjWWZupnl2XUEZREUNbPk0igAVYVURQGTEZGRpShBNGcxqmrSCACqYAbOFWYGKsyWKZb5fUNGBUuqQaXFZTLzSoaqSSlGMysQUYFVFU0J0bOpiqjGJGVaPZsWeguI1px6YvucM95Z1xftmnss7nj4xIN3r97VOaN9er3xefvXrdtTzRwrWT3bILqwCP02FQ17Px/qSlG64k1JvSSHkNjQE0FREoWmiaoBg4A5xJKoNkEjAvQFa6NYMqokBKtrpGLQr51zDGBIYhajsCtiP6YUpqbH+mkphdRutyFJ6YpIUBRukIaucNqPZVkOYtMuKo0pMRoDJEVRRFOghOhNzLKWEnNsteT0GbP8/SwElZSHYDFZIkQxQ18szi9EUa6c6yMC/Obz6sVINy5sOXjwyaIoyqo1NzeP1PaKM2PNNK7dtOVc2Ptgvf5Nk2vXdk5a29v5XCGw2CxyqyzQOzVWESi0divXnkIJNB1KaIStJ8875/Of+8cVJ5z0inPPOfjEvb0jEECff/ppncXmsAZQPWFmxQm24m/vvXHy6t+ZGuyvb739P1evfdNHPiq8YW7/LVU3cYKGJ8dlhZVEAv8fXf8dZtdV3ovj7/uutXY5bfqMumRJtuTeOxjbdAyEGiAJkAAhjdzccJObcpOQQAK5JJSEhCTAvQmBAAESWqg2BhuMe5XcZHVpNBpNn9P23mu97/v7Y52RxX2+v/P4kY80Z/Y5e++z1ts+RaOogB1cUyscRK0zqASkeZ73+/2yqgiMD5A5a30JGgKCZXCipQ2LK8upsa7dHz96aNv0HGgfbTK7WDRVPn7u9o988+Q3rz+ECDYAjF50xcZsKj05DQC99/33YxP15B2/s3j1jbe89dfVzDcbm37ld//o6J75u+6+26OfGhv/yYPfePcvvUqcvOqmN49NnnXTc9+oyfBzXvb8V73hD86/7OI/Hnfj6YbH//QD+PgjM4eOtboLQ7ZenrOj2rdv382/NFEUzen9fO4lWkkPbCOtB65EVRJiiwxgVcmYSPhjFQOEuFbnre2GIjJQ0FRVRIWou6SifPoFXFRFVXpU9oEALUMIYWhiLPf+8bt/vO3mm062F1rWFogVGQCN81rWIACKcQfQ+EbsK4NAFrM0K8uSRUKQPE/FD2QRZYCzRRU1HpkCM3tHiGgRjIABNYklIl8FihaEzKjQ63QFNCEjAJ5AQBJVgwSeCaCKEv8qIkIqFklFSicBQqDgnEsMaGAWFOWILBWRCLnkykfoSvAMCqWvNNWiKM7ddU5ntd3pdJI02j9wLB+dc2VZMjMmBirRwFaAglTeVxzQkCUWMS6xp4EkqmTMGjwKADASgEEwotjij3CNYDEoXg2BBsY8C75nrVuu+jtqZ7+puOQTzb3/2Xj41b3dl9NFM9JJEBMDWDkivxZ7T1eosNZcfjbiDmA+OqiJ16rbMwJwLKN1gNU842dolBhUIhVtUCYP5sTROu50eR3VtKJTjREQAKEIASNQhUTCcr2f8UizfvnGqSNHt366MfztC65ubN71rf3tPq0krUtmzFt+1/z5Edi2PRxMal6gBGoKNsLs7AN/+RlXfn00ET61MtSYyS84aIBP1SYneXp92q6y/H53SahvhSRDAFUqCRXFsgTj6he+K37A4PbKU3+QDp89Xxy95JINt349/8WZ907bPljUYZ2450Od9T9cueBzxZz9yVvHerdtr3f7TKxiW23TDa0N+tT2s8e3XzD20pe9rl/iytMnOpcaSXjdXSYPLJmFojv8iletP++yYnE+GOdzQ54A0JBBR4hoyBCgJSJrUBUI5YwAvDbFVyUkC/a0oAqqKhswAKAsg1aHChnh4A1gJIN57w0DAjkUQlBVT9hjH0CUAQXJA6FChFogRvZx9LgPhBZJgVUZVK1xisgsBpABMAR1VFWVZQgGEjQhsSEwKthBHwhYGJHYG0+2SUYM+8L3uitgaVdVO2flmhcc4rnW8p2bj+yd2vPgunvwrIld5cbLZrdvP5JOnMxsJ9Bw6q3x7LumCrUyF8gEgyAo5RVVUhUoImyRALWQoBgxKRSzZIfEqhYJHFohsWQVA4pLjMY+g0jpq+ZQc35hyYpaMmW/ij4iEriWp957QNPu95I8AQDjjJS+nmX9bs+5NCHDiIG8smRixZhqMJWXiHBURWI1AqRRSH6wFRtjILI2CFTVkmEDZNzsyRPW2q3Nzgt3tXcMhTeeV/3zI+aRx/fZJGFFY0yjVePuqmbpavBbUN5OvvbYo+WLJ1qbzlo6NZ8PtTSw9aHbKQqVLHW5TUpxBA0OVRuCs1NVdzq96sp9tfGbLnveCzZc1LS1Tx544PCxvRdcf8MNmBycmb37vqd277rs13Zf9tWD+w8cfOqq//6KCw/c9tjujenUtnUvfdMTj+9ho/3QaFmj2q0IrCKiERBBBkRCUgVEEkek4IAEoQqBo4srS07CQCgGFcUBCxMgB6xXvjU3M3X8WNo+ikurPsmNVHXNV2x/CIb+PJvc8X7827ccthXi5puvP/LIw81jRwGgMTzkHvze8gO3zV501bm3vPqG3/zD9tarTxxZsmP+v73nL060ly6/+KLzLjjrf3/287bqk2uclTU3XHrtyQ78/sKrV/Z8/cFP//XIHd+Z7VdNcG3oD23YbNoLyR/9zfhvvf2R17zzsXZz8kefzV52QdCsWJ5tbWjkoSo8sjKRGs0gVGAUQQdQBRqAXmkAf40CYbEdorGOOcP89aeKHkSM2D+NhvJEiGitPfTUnhte89qi6rkKFNEoJEEBkYU9hDWkyBqyHxHYGSIJlYJYm/XK4IMYlysCGgIZVN5xGKmIZECiQgqRQcQoFxkdlohkze8WERNA733dmNjTiGhkYEZEZ2wvVKdxK5GjxBTLbLEEYOIolAIwGCBAkWj9Pcg/omUYIkb6A5EtyzJPsyOHj1pDzrmgEfENLCyBjVEfQgghKiiGwhuiwByPiYhF1Tc2L4oitU5VfRXyPBcBAoUzQekxH0KIMmWn78Xp+1VTA2iFqxxMaft1X58Ji29LXnjr0oGDY/jh8ttfyi7NV6myklRBY18XANai4OnO9mkg1SBennZ9OV3Iru31g9bz6U44rQXSGFL5jO+MDlDN8VCRxByZyGsRXRkgEUBFT4qIqZKoemYh6Pju1M66po3Z6c57P/noNx9bziYnh7ZvSLuhaJl1Va1kfxdccgVfcyP95GDYuqM47jmEuY5o5+AdUn96ITFkkFjLqVsE2pqNyhJNDPvZLyVvv735pr3pdS2e3+SfPgpnlVUwztokIWOSej585XPjKfD6XZXZQ8kUb9n+9MJDYeNE4zmLmzws01LjsZcknS37n/s6r0uGJsMVVf6DaceJS0fq42NQCt7/rX0/+dhvf+SFOHXRbOua//jeY/OPPd7/LbQdGHvYWsgr9a003/jzP98vi6KqXJ4ErgaXkU4vGYABN93AQKpsLQDHVYkegAxFZyAAAFFGJGYgJAKUQaUMRiLfXxHRAiFQdNuLHCVDVBRFnN8TaAheRa0xRiWImsiUEyVjnLXAEqcEIoBIwqyK1lpLpsCKBAtirEIVpDLkGEqCVLEiDQqiglXAIF40CsyBg2DFBk+ImbNGdZY4lW62TFuWh95x/LJk/OoHx6bv2Xhsb/bUU5ueSjcPbfbrLjo2tm0m2XQERLKW2l6/CparlDJyItAPlRdvkEtGsSAi6tmA6QsrsiVUg6xAwMYZRbTWqiGL5H2Ja9bgqpIYa60VCSYx/dKHIAgG0AZAD+JqrmVqPQ2FL+MW4cgV3a5YYuXEixFRK2JQDYKoeHbpoHOga4ksESkQRZKYxk2GBSKFzDi1CsaHvrP2+NFD774mvOG8E6CyqSmIcPkG+fgr8Pd+kJRqVJWsqQ2P9Ip2tzf7M7/5rmT/sad8023fzeq7J+eHJicEFNAMN5re+64vSylT5k7WLzvLGUHKStj0B6ZftPSVscXp8ar7Cbvzx3uevmZ8y6+uhqNLi5+bPt4cSs/fOtUrFr6674HWhgs5HZ18+JsLx0/d8I7f7vVl8fgeQ5ql9T53peqCqxsJYikiVg0SAQgBJ04VstRJ6b2vCB0AiYQ8SbrlaqKppK707Bg8UepMvtpfd+ro6OEjre50L2/pFdcmS7P+8BE0PUKV7vLMxh3v3PnizV94xEqWTe7c/Pj/+a9hUQDocDU1tmE8sH9i74HH7nvmAzgyNdHfdm66beeVu26i5tjS3Y8c/6/bz6s31Qc+8tC8nnr013++ufsybG1qXHHRhuJRGJmUK88++eT+S7/w5X3fuy/rTzeuumH1S1/X7RevavqaUTg1dj4/ua++fryZcVkuEiRIDkW1chZVIqlGhSh+k+R0WD1zrDjY1hFIB9h5GMi0DypXDxIqz0XJoiWFICHn+syx6XNHh4rlstv03LTcRpslFXhQFZY4TVYAVVYc+O4RaAgBLSYmbffaqpKmiSqHwABgaSChB6f/NKCCFpBEoyqQpiADEiOkWQYAkcILUKbWiQRmNkQgGnlnPgRRZYOEFKFGwhgkeAIBQDRkSY2AsPceRaPrihDG0RewhBC895FkFJvpIQSX2FBWAaBer1c+YFQUItPv9ThICL0Y6Y0nY20pEqM4iFhVa4wSeu8dofc+sYlNUUQogp1Q1iYCaxcBNPJ2AMDAoOUYb2SBkqNqQM3EKbEV6fdb6cZf6V33R+XXH673/rV/zy/Z647yKQUjCRPH0esZByfUtcTr/98D15Rb4utjBF9L0PD0oQYvAIGYLKAOFKwhxobT9dmz72Uk8r+JkIClEo+IKSIynb9u87e/fu/ffnH5ECbZ5NaNl2yppaYo+31TYQEVWSLFPv4j/f4z9NWb0+9NwGItrPaW6nOPj56Yo/p6IdBVHQo8lwwvYRLYmMs7t/547KV/O/6x+O6rZnzVjBNAlqRnnm+t2Rg8ObcB5/5rfP5dBngYGg9DA2AKAABWtn+WaL9wJX4Bu6Gav2vD5rfmo6O+10ZbTr38Revf9tI9SR566TBNrS679ts+fvSGN03eG0bUrYopi+kAAQAASURBVDrcqNgN3MARAW+NsbqGvD/zyg+G84N4fLrnHFF4RoGiSOia5g3S4LYgkSLG3DfqhyOiAQRro+vO6cyVWUWgUm/ZaaVN54pSJM/7/b4icgUMbMhFcAaSAUuM4GKwFzCGgJWZ400UES9CQD74qBgVdUOcD2pQiYKIilokhsACYL2VMNzPgsk8GvAgiC1ISpYi1QD9DuNwf/TyAxufP7PtWLO/r7507+jTT6/fv3/34ebZrQ29kUvmptY/k21fctQj3za9RDkPRJVhDQHYmcxaDb5UFoMIlkitiR0FIIPGOFFllTjeibCSqFQuIgp88sQMEYZQgXBKaRkYANI0raqqlmcC0C+KVqtV9nvtslRVYiVrDaBXz4ZSskzAgAqYWmOUNJpfR7SExUjVdyIasdsskXsJAKysgkroLCSql6VPv+FcnxmcqvFoDpXgcM1eOln+j2vs++8eCBsUQp5qjaa88lfes6SN8jlf8KZWhwJb9cXZufHRUUjSIlTMXE9zz4HUoxaJy1DA6WqR1munphF6Py7d4b48euLIp179c1Of/b9lWixdtfvgw3eN1bNLzjrHG+4f3X/eOz9mudzeOfTQWHKFdI7vf/jYiWNbL76ovbQ8BKbMag203RBlmYB04D0JGs9MqqqKNn3CYABtkgQJlNdYODMAokxOfTHSLxsri8P79zU682XN5Ve+ML342pOP/9jOnqzKrqvV2pWtn3fJocuuusoNWcqboCb8+I7JtAEArhu4G6o0IJqz3vEni97Tpz84snLXyXt/XNH/YaEhkBbQcaMNVkbojG25+MZXbPy9vz6yb3Hz2eu+/+O7n/uR/2jfdH76ozk6f6Pe9fDYlgueeP1Nrfb0/lf8Wd5fTS67rmpPU4u8dmbufnTr8y9YWVoNiI4JUhTmqPwSI18kdyLiYP5AeLpeiRXtoBsYewQG14pFDiGgMACwqoDKmtZgr9+56X/+j+PHj1c9nxnns3JVPLMDiGajp4ku8ZgKAEwhyYwqIVFCNrBHDMYYQxYAiOxpwmhkLoboR4QYKaYDOQIc8CAjd2VAIY2nAMQEbA2reAQDmKW5L0pTiRIGVEZki1ypBLFIagAVUFRQSLnUgWAQEHCQEEL0G45gLFDwHGL0qKqqlmZRmUQJHRsRKVH7/T6hiZ/Ke0YFcCFItC+WaDpOiECOg2Bm2AdOAkAa2BtLKPpsQyI+w8jfRQVFUYzEnrW2ICN2QpEnQz4sCmYpiSbJXLXy6vz6r6/e89Bo8Y/++9fR1nFM2xbdoP5EgNgKRgSMwv4Ez5KDYbDnKwAQxys90KZ/NnCi4YHGJ+gaqS2SgGFQn6FGUZE11C4OyPeACoKABKqaCq1aAJA6WxToOrBEtT5mCXzsyz/6wjcnNu/asXOkFYS4r8td73KoU82A9EOItl1LZfmt9IZ/4jf+zpNfeMeDnzrg5rns+1ClOLTg+77fQ6pWC/PtLe/46rrfnnY7Luz++DUrf3OV3NmXfDWZvJNedEf+GjEpgTjuQ9luLT15yYl/j2e01Dzn6PqbZ0YuV7LWnwpJ9/Bz/ySknZDPU8gQSh3J06RsDku7v3DuNReawrEph7dPGURjcK7KTCGZTeb70Kf2qefcfeJ5vfGHGk/fLLvuTqQs8osugnXDWno0Kl44OZ14gZ6hkrPWVY4MBo2iogDAoBFJOyDDrw3aT2fWqhrvcFQiBUIThVBQo2IlIhKJqmFmyhNrKFQBAChxiHEZgo1yN4RR0DEEYUEMXgG48pgmMJCyVYFAjAEwYQ2I1jrLWqAYgMqgIjpCAkZjLDksSxHxTABQglSkZMhaKpDQKyYKQtYbRFNU/T7wcqXZKbzSTF5zuNWrlw+NLzw8dOTJTUfnti6ubrEbi/ScxYkLDo9OHbPJYhWM+CyBJCMJiSKR5VzQJSkjcmBgh4SEBoissaxMgEqVMne7ROSDOOeQrXof0XBQhuHh0ZWVdhYlBHxwNumu9PqpJ6KyLIEMiBprFWU4q/eKbqXKKqwEXlkCEAZACRZAJC5qBIM2mnyCMSGwSwwzG0WLCAoSAqgEonq9tnDixM9sWzCEm5qc2bhCYX1digpeuKX3iUfyhR6AqNieLKy8/d3vL7q6dGx5+Hmv8k8oGmrVG11nD506OdEYqdfrPsUQgmHtig71qNdZtkS+HKbQ6U4M7X/tL7/vnz/sTiz90XW/OvXZT/vR3GTjR/rGz53qWTfT1S/e85ONF165ffct7qkfnXjmqZ95689df9MLf+6d73zVz78VO8VwYhUMWdccG12dmbWkRjROoQIzi0eMzRiqwKuoJRdrK+Nsa2h4aWlOSp8qInC6vDw5f3K8vxJWV0AqamypXXxxMTGSb5miRFNu1K996erciW5tqLVpRzi6ZF0rn5+fG7rzAbjgOgAQZ4Sq0mJNi94jd+363n995+TChvnHr/3c9x593/tr3WP2vHMXPvyhzb/09iXMYWL9uicPrspy74m9riiefPBYzXardduPfOFHreUnHv7Hh5ITzxzZ99A6i3VID1/xsxvSts9Gk85CmqfthUNP/vtnL3z5h7gNXUOup5BLCKKBjDGWjLAIcFRrEhEW1SCyBrGhNQ8AsyZCGZd/bMUwc7/TDSGUwN4HYwwEVtUkz5aCHWpsXph7uj6axDKz4wQACdDE3QJUaNDiVFWkJMDAvL6W5gCUJ2meJRzN7Y0ZmAR7z8xR/YoRwTpEkxjrEBIXreZdbEQjPrvhMLMYNMwqasmEEFgkG2mGELr9wqkxAijgBBVQo0VB3NhEiQgptuOJRTRw5C6zsIiyCgcNKhBFeQOLhgrKLMs63W6SZ0HKAdpFxAsjRg08E1BDtFIRNWgrLSsOQJgwJWmiMlD99KHKbOKlTNiiifjmwZB1rXQEBTmj0hnsnmkIwaQ9361jvULvIc2JK+23XfIHxRt+fvWjp0bsv56680/SN87zfMO7Ihn8Lsizs14AQmVFMGsV1//zUFWKKK3T9syYAApDkNOjCtTYnhORyC4Vjd1qZVATvxM4YDYjIYAQYQe4xpZVVtG71I26nISPavXMdGfvU7WzX3Z1EqohJzP9xLEqghj1EqxJAlf9IAg2RZS+a1TV3mxj33HwQ2m/Xbm0I7pcFmZ4y13P+bmPXv/OfjpyY/uLfzH/+t3+YY+O0Ryr1m/whz5f/6W0N2dB1RgBaZzcc9mhrwxRFF2G0c7eDbMPcQIHt9301Ka3AazffPc/zZ7/ofbGO8QWAGCWd5oAQyWNV+utvlhrua3V+sygoN4RSoDEe+hs/MGxS/84uEVJis7W5h0fWHjioL/lXbh585aRrZPzBxdsXQBA1nKUwf2FSMyKlzc2kzTSXk8LqpzZezhjxC5KFlnXFjgCABESgBpcS1WBmRUi8dChB++9pqQIaWoAoC4piGYmZ/bee1BWgIql8gKqWgWbODYO0bCspXOqJRpgaEOwZISkNChl4NQaNchiPTglRhXQUpVBjTIBClpghEqshtTiqtOkMITeI2dkjZIkaQihIChM1wcjK8klC5NX1zevPlPds3Hm6dHpgyMzxyZnbtuWTFXjFx1ff/GBofETCCtdmyad0C0MW7R5UBVmBCKwDGIgQUIih6hEJFpyQDRV5YmIg4qIKpJSVVVKNqnn3F4xVoIq+5AliTWZQXaKVb9kS9baQoI6ml9cSIfzpKfoQZQDQRyEMlelMYiAOkB6DrSHgnhAMJTlaVUUHBQDxRVjyZXKeZodOXD/xkaoJ2RI2xWOZNrmvGWK0ZopVuWSKb3ziCGihaWVN775Z3/rt//gridPaqNXLc8G2J1yoSKtJHcTk6vtbn/VZ0maZKmkmLUBrUkwW0TJ6kW12l//+rd//u7vQdn93d/8wHOS5g8vuri8+TnnfP/r120c/s751/Rmpr/18H2XbZp42djZn0jWXR6+99xv/OAphf/1gY9MbapNtcaOrB7Beg1ZMpAj0yeMIxQhBhAJBqIQolVENWXwaMQZS1EPhjCo752YSWwa8tQX/Yn5xbGjx7ITB7VYqScJd0lqptCqmJumh58wra16ye6VK68pZ2dgfsFwt5taq4H9A4/Vq5XCBwBgSA3anIOrj8w99MND7/6TK//8t6oive+v/mLp2596+V2HH7rrO3j+hRv/8i/D7U+GxB/5wbd2NM/qn1ogrPvOofzRA8d/79X+vh9WCCNmpObyVeuw0VjdcMH0ut1X6ixV074Pw1s233frP4+2EJMNhZlrhqFuHlLlQGAcCUIZpfZVCSCCgMAPgM1xC/beR/saFY66+aEKzKzKzB6EvUdm0YoJVJQZNHhWxqaBVdPzjud9SQoJOopYXQQwpIoiSGhYWCVYa8kHAEJrAJxKyPO8liax452kDq1xZFDBGvCBLKgkA54DElhL1jhEJGsMUNR8iG5PaeIMIXtGroKAqCijVVOyzMyeZFALSSWMqlYRFcA6HBSUiGTImiDiUmMRxVdJknSCEFHFQRWiAbh1lksfQBFRgINKCKHslWSSTlHGRCCOeaIksiB4DorGWVRmkZDnufclAAQRNFU/AFqTsPSL0lgnjBZtBcGhIzsYtHoVQ4bQQJCohsGgSCiiGvWkgAAkIQzEVgnUi6DDtO27Oxtb3tC79hP6yOeG73tR77xL8OpTbn8Lml7RalDUTKASCFlmpAhnTCRO7+kqoBF0Gx0aIiAbFRH71EMhUhPLWTFRelQ5Gk6qEChbCABGwCExeSUjQgYUfGWRKqQ+GTCU12XEEtlshf2J5e5tj/rPP9qXofo5l51ddIqu1wVVMj1VdDZH9qrUl2BKNKKqlfcewRBVTzfX+2o089Mdl5aZn0633P/y9zx+9ZtAZPd9n/mN9ENbzaG8Rt7gsmssSMurPbwwdvHTf58OXbVan6gFbCw+7I/cF0YbMDr6bK3vVUOSdPq13uF+uq6XlBse/VN49P8rTWnCIFkKDDYDAAEDAGK6Ry7+H2qCb84AQDnc7/dwcad8/6Np8rYvXHHwD2xrvfbbIIHASATHRd0vIonMQFUJkVUQZTU0ynMDQHSRiaSvWDRbRRTk6MZGGCeacWYsoIooAjGRNGjIkWeP1oCKQWBmi0l02nA2ERGnXFVEYvtlAYpcFMYQs3KaiZcckyoEJEmSRJVLCQYUERIFAuVSrTHO5BAAERmJLYJy5tJOp2MxpInrdkt0DiwaYJVQWQuKLhAoW5ck0VKClENhrWVmZCQICLKSEUiVdeiGJzZeazd1hsLR4VP3bTx6YPzo96YO/mjbxOb+xPbF+q7DW6Zm7MgcM0EYotU0IGKtZyBHEBELGRCAOA4AUDfGh9DP08SrlCUTgSG0ZIClKo8cO9ZoDXkvwlxLUy575DxTYpTAGQU1iA4dk7haTmUI1laAzGpkMCZgMBQqMISGCC0hogqAgjU5GmEyCsYYRQEkqEDUVlQZyS1mjzz+Y7iURMAQFQwV67CrmNU6BxQq7qMdCsaASp1qSx02oWOTUdLKC4BTr2wqVdPIGhC63ZXujO3ltlbPra1RPds03BQYWVn1L3zV/3744YMPP9yg+v/5l7/7aKMe6q1XzC9ffWQhPfCp99742vbEeKPRaEBydPtl7LKX/e4v3vbNb/79X77rd//0z4YaN51aWaGUSDxVBomSzHIpHsFYVAVkyciKI2ZWAGYFRmPJGArAAmrBOrSdpFfr4MTs7OTxQ/XuSfBzJh0RD+3RegNSU6zq8LjbtBsvvLg3OlLPHSdNxKWq6GWNupV298Hvff7V687q+QIAnKn6xmPH9bEcagyv/MuHVr75eRVXXzg2njX3fepDve/+1+Tktv3f2ru49+6GAKBp33Bt87xLsZ71P3jr0Oy+xtzxUTO0WE+BK65obHTbQn/p9mtuQdDJztO1kZFlyIuZ+T3/8ok3vfYNYpdzTqu8coGrskpIQ7SDZXmW3SkaVCBEcfABHpZ5IHGXJk7WWEMi8Ska45xzlS8AgIMIqQrEqc/i3LwX1sAGkBDFhzKUsSQKIURoxsBlCBEHEsoDvoQxJqroZVmWO2ucBUORAJMkSWAJKqGsAMAYgwTGGEIzwHwOTPogzplYFRFtYiUMOJERRaVrvFdjUBgRgAaGoRLVJghVfSUmNc46ciFUTCBViUje+zi4xYGFsE2SRAtW1RCCWAGJNCEhIh81RmRw3SJk2hiDYJg9IsZGVrQPMoiWwPvSG4I8s8b4qoq3w3sBRFI0xoA1a2zsCE5fA0ifUZgOsqhnGbzRJEfTCudt5+f4ed9aeXx+LP3oyq2fqV1A3gaQRFkgZyjFiJoEQmEE2a6hsdaOCYpIILF/ufZ28d8BlcrEGEOkDF41GCWjRoJqEtJAglQZIsZEfKBQ2GDLtCNinSGUbChRqqaca4opuLr/SPKVPYvHO6FoJ8faKLVsqNVUgJWVDvhxhVJVLdRUNfhVRCOKDOpDMOBHbK+CTJg8ri5S/WRrhE8988zmnfc/7388c8mra52Fq7/1wd13/O1wv3+0pUM3EK5XpzTXGCog/beZq2rJ9nW1g8PHf7CevMFGH3tSmzBARad6NgDbVp6i2AZqCmDSYjhkj2F10dLWLy2f9RUQj7184zfK4fArxch5XJ8CDbVqOslGV8JwTGQWd3xOsAyNOXZdEFLizjqxSHt39V//Ire+WNk/nKnazKVBKzNAV62BsIDgp0cDg3sUb0WcGSP+FEidYp86ssVgsPTiL1KUn4yd59PC0ABCJMiRpUcilgeTZmb2mObWez9Sa/R7BWe1brcvRkoOntRzQGUIUmKFBnLjwkA3JtbeEYYpAOCcQSJmDswFx51Eq1Aa45jZKxhjrDNkLbOKqjU4MjLU6XQwqi4DAAMKWpES2CMmrBagb7hjsOGRFsJ58xue89SWUyO9Bzae2Lf+1IGRI/vGi9sm9qwPG86eX3/23Mj2g2HLoityx0a7IJBiT7niMk0ckuEQkAAxy6D0llOtIXMwha2qyoFh13KpL8IAe+GroeE6sICoN0qKFpGjQp1AAI1yRhH7gqg6YFSAkENQUjFYWSIEq5AwoIn4KyJjBIiMRq1QFucJ8qXF9uHZbj8Zh3I2CE00jKhJ0FeIFdSQesu4qeouJk0cGhr62q3feef/nAEaNgolrQadygUSNqSYWj/RWl+b1HYZ2qu9pf5i6Vf73Xmz5C1Red6FT7z8pSc/+fHrXnpLM09SluFO++wCJh/ff2K5k196Wb5169k3v2RpZNe51573xF7ODvXfdc0FW6/Y9upX//zZndq+sJBwrmI61K1hI5hcOm2mBKMEtwgBRt9qEcgCOjKKwiw9CJlSQlg6Xh6C5iJvOD5jTx1rzZ40vihNi1zStz4tV32vldTH3dZdYupU9ceGxvooplZCo+HTnNZZi878qHXh3RftuCHMAsA42vVeVq0uUo5lMdQY7a8YS20cyZrrtp/86/eOAiT06PTtt2ZZKPvdrc97aTKVnfjSP+Id35w6/KRm44s5Di0V+fK8qdliauJEX44tzx8+6+oxXd480jSeRiwUbLZsOV+yjIukK95BXkez5IACAoToz2MAZUDllUix1aiDE0mEzIajl6LGNTyQSmZmH2IJyCKDAaRADEJE5KxVr6igLFHKRAMDAosMRMhFQghJkhgYoP+TJIl1grVkrbXOZGlKgC5JIjAkbjEmCAknFKEig32EiERUAcyak9qgc37aXGEw5JJoeSISkAwBpC6JMOS1Hq4igrUWvMcsyVwCoitFLyHMrGv7PpH10fyNEBDFS1n2LRkAAdCo9ux9MEaDcEwPYs2BiAoUg6bRyNOJPB/11UARBWQAWkHEEII1BkBVOFQ+Nmrj8rNRLxBAhQURbYRCKeAAa4MDU/ZBX1fXLpSqalJWQlvTyXdU1723+NHe8cWvHL/j9SM3zxazkDuouCa1ru3X1DMjoAhHi8rTe/XpSAwAkZJ8RgAAsCloVQUWInJkITbV0JigFaiVkIuWpB7BeFJxjaZMpYkjWu6X3TbOLiffnfV7TiwcmnUnQg9sM0FGU01MJFrJUj9IJgxsAAykwqxYGVBVKD2LyAiu/kLti89P78qxKOvuB8XVn1595ZzfdtfO5z75mv956JznNXozFz3yyUuP3TvfOdHeceHi8T3S7k9/XbevtyOj2Uc2v+rh2vWHOuNvaD0yjsctuqzQLnUU0zQ1XFU2bT0bzqCv3qyD3lEkGyqmLO1uZlweOfL6fPHcoPtrs5c30tV+c1TqEwhCyKa1rldpNBoDgKVt3wALnHRN1UpWJkmrYujQcjNMtPXSX7xifrRZVVRTA1pVDmNxixLlYAefAREJ1lyqYmNfB3j2SAk3gLz28gFcfgDnGLC/4hRfIoh9jUE2CM+AiOisjStnEDJp0KMONoBoYh0B2obxPpBCCAFLtQQhoIIaMmxMqHwZqrCGFVhThTTRu8mQAoEqGWPEh8H4OcrUKwqCRUQwIgKgxhCJrqwsISKiiR1cVbXGlFYlIIIyiAFroyeDgjGmi72+DY0FvGFh/bVPTC0PV89squ5dd2hheOH20UN3rK9v3L3u/Pa6rTOt3UfTqZXakrS5JkndiQh4SiTXwJ761hKh1qHsKavNgfO8CpXzZb8gQAMgRpPEteoNFS5W+t4hEBkFEDECCFo6NNXaPkaKA//mSMwLIspIighoEiJScKocZ/mIoBRfowAMEoRBymartrra/eTd/f9+pT28xBta0qo5FiFkY9MHO0Oj63YUT0yrxzpkKyeOf+ZTf/O23/6rZx6fDpJ5BSueVZVwuTNz8In7H7rnh7svu3zrWdsmGtsqaTQnxldqzRkWeObBqY//5e9lY2H66JBiZ3xzff1Uc3TX8A0vw8vObWVJb3pl3+GDW3ds+pP/8UvHLvnj+sKTND76J7//V4++83eesSPdKy6FudWGCZlrUImifU5T8IqIBh0ox4oufuPKlJJ+FTBwYoxJmWxQxrLK20vDM/Mjx5+CTjfYlEFtkD6wCwL9YJDCoSOpa1RVxSKBKMsbXVih4WFWdI3MBtVmPux06w8mdjUBPn/WK6/TfOfikR1+oaKi9PP1sNCxTpZ0cemZtD5EVAC4jeDBNcGa1TvuDHfcltVcZpKl8fWNlbmJFe25Fl/7qkVPT/7ke0Om3s2HlzddfnmtWxsZ534PfL+q9Irf/l+wcpgDBBeysl8Eb5whtV5lIAiJwDzgEenaygzCz/YbjSJQpUpExlrFNa86Zu99xVUYwHApinsMOEuioBrXZOAq1rjGGw4hsg+99xqXPaFGC1gUawYeCUhgrWXghFJLxlgrOPDeUaREjUlQRAhB1lwTAAQRfQinPRVEOZbFqkhkEQMhsEr8d1BVoFAWatAZcpaCFy+eRZTY1pxhLUOJiU0rNElSEaaUBEBCjbSQEAKAhIpdHotgBQA0lBjLPoTKgyVBZRVFBDKASoAEIMxR3UOjUZKqM8YZ671nhMRaa20l3opB4xTBcyA0xhLwwG6IFFFU1jBZeNrhcbAxnynDDAy6Bs6iYAwV+bRduSW/6dvTjz46evhjtdtv6u1o1KaW+6suM+LJsCqqpYyhBwz6LPga4q3WyFmEM/wYEKIaNoVKidiaoBpQowOACJOxCRth9CGk1rqhzDrNy+rwjPn+fO/uE9X+U7q0Qv0CxBIk9ZrBWsaGOsgQKuyU/cgAVAURBvEqBlQdYeULAusMDenih4feM2kWHPgUCwfJ87N7aHTT5xrP+1bt9zcevOfG+//xlX/2q7Orbx3Vdxxd0KYU3D3SLvsLT947857fl5X6jxsXdVstMGZP0bqBVOyyykRRLVIImg0EP09fVQlVkmTrFve40RslAaY0mFwBkMusfQHyOZnMqR3SfBSADFSo2veOQwDDYByBKjKYgT24Mhb9VWlpMzXrR+pz51+6Ugw3RDhXLCQ7/b4KGuUmY/Nfnr3rg/+vccMY1AhGANZpKRVBUIrwWiVBAOAz7DRwIJkySLBO2xjGppQCKAzcGqy1Jk29rwyAL8rEpQKau1rlPTgyChykU/arIFbUEWnNdnpFXO9R0EYjfw7RGGAFkbBGSiQlCd6vwTiMIHjhiLY3hjwLGpu5NPaxiIDIsIBDg4QYpDJQkjgvTrSbQE3IBMfqZrPK2Ir63s2lV802rmw0inF7cGz+rol9hycOHW0eMK3WunOHL1zZuvtIvvlgDiccp2gzKZJOURNgcmBrYqskUCJNtb2kqKBHwSbWkagPwaaWVU7Oncptgi5JomeAag/FG0UGxwBoYl5tLQHDAH6JhqlSRVKwAZ0iWFXSioAYYU0xnn2c8ikgliUa9Nbp1m3bPv/oY5tbjdee0zm8JNSWzNmzW5zn+fvv8DfeODG2bqRThBVYdUOTH/+bv76iyjf/+h8u9ntwHEtrSm5PjJ/17f/zD5/71PsKB+M/3HjB9q0vf+1vV7Ws8+g3Lr/4xbv/416/dIxCPTGhbrUYnzh361VuMiz35Nie/U9+/v92Z5/59L983gxVn/mb/7z1+7eff/M/70wfe+uv/dHhX33HurO2rdx8ZffAiZFsKAiBL3rGCpNV0UEFhdZRCBLYA4Cx1BXp1yETytWytVZN1u1Luz156PjQ8Sdy6RANKwgnSca40l8SyOw1N7qt5y6vLKfHnnbjw54w2MQDi0WQRGbmQlixValzIOvLY3lnPQDsXbfpsaFXTnFmZg+eXcxsSHWq6CbdY2OLi7V+t7d6IlFnpV0knHSLet7ESYV2zlQHhgmWYuN5s9fcXD7n1bLrqiOf/zDd81UcGj689RpGe/kQLy8tZHZ4YWGuF7LJyS0jea29cjztJxX3JXdcSSVd4IigQQUMwoOdRdQrwxnG6Wv9roHgMgJIYBGB6CAVxe0QfQiiEqd98VdcmnSLfhk8OSsqJNAuuz5UYjWEAEAiYq313huD1hhjLTMzcERRxUo6coFiCCEkiqE6MiBBSTF2ntf08CpRSdM0KlUBACKRIysiIsYaEQsqA0taFhUUEC8BwETt4kH2TUREylIQ5MZCGYK1GgIF8aiA5nQ5GDcmAjTGqBcAMMaGEIxxiOicAxCIHjIGAWQwecMBuskYCwBJllki730IAUVNYrxwFbxRComwr7Ay1trEAPPAsjRugmd2BZ7df8/sFf90bGYAJEp92ofSIzcweZe+5M373seXJh+fuf29tV9eMitpMBV0CVU5sSaUZAyYMzOzeJEskqCepnGffqACm3oUyU4InQT1goiASbvX5TRpNqmRusrzkRPtn+wLd5+sji7UBMklrbIokszYNGTOSuXbCsUqO0WbmGCCKCtDaTwiVAXXjVH0ABw0DYLWhSS4d9Y+M0HzY7Q4SisdbP2g/uYv1t+1aNdvbz9wwae/eMlPPrT/JW8pjr89W146slIaosrY4GV08qL2Ju6HbplvDOK00vG6f2K1ddKN1hMz73veaU2l05PU2aBntKClXvQrUy5e+OQXHz7/rVk4qWTFWG8bVUJqkn5tEpEUyYS+IFnlGi9qMP1sOIAdtt4d2VDuPEJVIq5XDR3VZo8Qmj3Trk1Mwi3QYUA17DvkmgGZQKP9Fwx4d6dj6pl3H0SjxTIOMHmnA/fgy7FG5ic83bImQKIYiAflMiIRRt65njZviHIoRIPhMWJmjARO6paZydnKe+tcnmVahSp4cBQCS+nVJiZLQCmGzLhaASQWuiKS5VlWq3W7/bJX8hqDH9ZKAubB1EaV2QdEV3oWLgGlVqsNDTUXFxeNS63XyhAipqIkEhCQwCmWDgm8ABowDWhwwlVLSwLx5Obx3LnN5z+5eXWiv2fdkT1jM4dac9PDR787VJ88f+Li1Y0XTY+sP+ZqK9iirG99aXucJGRzVO1VXW+xKSOldCHBTqdnrPHMRDbJcla1hEZAQAMICRIgg1jWAGgMJYiGkAcCtwAgmWQBWQwHEjCaoCU1qbiATESxi3Y6x0JEUEOI3V55w80vevKJJ/7+gfybT1W/eH3r3A1D9z729OXnbX8pHbxy58VFUVxwwQXf/873pqa2KK1Cvn7uHz/Id3259lufBLi+mSWun7RnV9/2jnd8//v/vLQsWV7LxtZv3tL8wJ9+YHbuIL555aW/9j958aoTvshDAbW84cPXv/3vT939/cw1/u0//vayCy56wYtfvlrO/Prr/9tdD/54y03vQNCXmmT1j98+vnP70ht+uTvbnsybfRQNIdLhbBGqHAwLs1ciYxBRaQDU14zVBSNoPFpXcVJ10rnjtcNHh08drnVDmWcGexTUBPJljxp12LBdnn9zvzGKRw4kG7epYHniyebwqviaIGKSpkemV390my1tMk6ZZuLcKAA0qrJelqdMiVNbQuOyR2yRYBrEv/HGSxqmZ4OX+Xl/at8Q8/HD0+XxQ+uT2urC0uTWTXM0enjd+pUtu1obt7qlxYmDT46u27lfJOu3/UW35POHFY7Vm+uWpg+u+jAx4adoZM9td17+tpdUVVBy0hEwIhrp7mqQFJCZAdUgiUQ/Hqmqgct3nKQOyq8B3FLX+lEKAwgrqiqKkjNxcWrgvFZb7XUVgVU4REIRIRErswqiBgkODagacqfT6hg70zQFVGZ2znkOQgPrbEGM1CAlHEgKI0hkQA2oL1r5EiMOK35+sOoMM8vAE0J1ze5eRAHBOccEHEWKY5WPaJACcoNc1qyvrKxkACFwIEzAlBIAIQoGWWvXTH71tDqH5wEMyloLHMBaBAVSoeh3xmjQkgHC6FGqzABgkNAiGVBLIgEgFgcGAFRAWCsIp/0QB2hVQ6eDwen9d+AtCD/VpYxnHWOxR8iSUkzaK1auGrv0VYs3fHv/PV+bOnTL/MPXrT//cHcpB/DGWAZGdDLQo5S14wAqRctBoNO7+hqQJ57+KqIhcJHBIuLRAhpz1lQdPR44VXz5md5dh+hgJ2fIcwgh62RsEpZKq26FGIQrH0LFkDeMcGbmS59WaYpQciEWNVNrMIQqYvd6vZ5zhgOAn79u6N4Wtido4bDd/dbJBxToBb1/f/7sP4+Ux/cvbqoQ09HU1EVOUtpYZ9qLveV7kmSbobTzxBNTakT7UquXBWdQtDZuf3zunJvkkYXeiqgOpGkR+9UZZgy+PzE6kbuMFp+5+oG/Orr1hafGL6g0L23T9U5VtUkwTgHI9wAp8atiM4M5OPEmed66YvHY40/ffWVn+0/M0ojUT0raA1W7RAHXD3e3XX70bB2vtF0rDNRIqzP41s+2HM5MfdaEJwcxGAaaKlGtBc4gL8WiOcp069rvRm4DwwBPYKPamgIRiYOBKFrsCA8Ei8kqGCQhIHIBAyI4MiIigfvGu2CccyBa5lyWJbA06nkIIYInYgJvkBSViKoqBKm8DwPcBrNIiPKWP3VesW5mNZaqqkoz572fmZ0jIsWgoAgolkg08QAEAlAX7DI0ORWjDCpQKUm9RE5MDo0gvV6tVwOqzejzZnbdmOw4PtJ5bOvsvsbM9Ojh740+fetwfWrX1gtXtuycqe+eaY52rYgrHbIjRZeGqpQOZmmapktFn9nnJs1N1jC5S0xZ9hm1jyIqdUaj2BUshBERxRBIYIjUTTCkqoV4UDAMVk0ChNagoUBAQRlERAygEsWCmEVUOIgsLcu2nReedfY5Jw7uO8lj/+Mbi6955fNf9xt/89TRk1f33vMXV868+N9Wf/Htv3bnd7+der/IeScLd+Std+/df88fvgX+9EBNlvId62qh9o2v//2Rw8etwl/8yUef+/zf+NfPvO+Jx+/GRuOsS3bc+dXPfuyv379py2ivD4f27f2XT3/+/b/3cze88PrzzrkYxLz59e86ujB9y4te+qLX/472F2Z2bpg9+viBv3rFJW9+z8yd3+20Z23WUk8mUzWAmrerojmcur50uVjzXBvsTlFDiTWUCiiWhE1nZXT6yMTh/W5+Go1wo2lNTfxqpSERDRvG0m1X6YYNnohPHrehnwC1D55yM/NVc6EaVfF98P1wZH9y/Kj17Fl9mk946RqAzDCmyZY8qwJIZ7bltOiXL7nx8gmsekVe5TXcuRG3X66uNnvi+CP79hkztGFTc+GkNzi7qTY+0aSRk6unsGvO2fjEh94+ZhvqhhfPf+HI/V9Y7j2YvOm9y4Kt5lg9TTijyR2jS912EfpYUUaIpBi4slYDGyRAivQYAyLMxlpRjgE4PgxZipIxLFGILTKAJQyowCAxFDEBMg3y3LIsi6KoijJ1SZQyDpVXfHZFIaKAJsbEuamxNj6Jx1RVlkBEZNSCCkJMjQ2gKgCrUhxJCQdhZlSJH8BzMMY4dNaYqDll1BAaBh9x0VEAgMiyBmMcAwuiAUNRqXhQCEhqqFsVYSkYVXWkoSJBcWjQAEAVPK5Z1RrjVDXNM+hDt1+0UmttAiwcAkWtooirVo16ewAiotamiEiAykLMhkySJMxcFIW11sYOsw9gaKCkk1gxMshvTpekiLGOiY0HjIgsRUSKLuuRU7uWNCMA9A0OcU1VS2NWsfuHG97wo/0PdnaUf7vyvWtXt5Y1qlWaBvRWBEzCylEBG3CgszHgPUe/RsQzOt0DC3cdFvSB+6xlveaGmk0OuLTS+fxdvR8fDY8et6LDrYaMuI73nUIsd3gJJUhwajAIJqbSAKRN8adY3EqYSNPC9nveD+WtpSKW8cShMuoQzQY7e5l94hJ67LrmvZe4pwyKAiBhLp22GftB7XWztGF3+0c7h+7pItvJCXRp0cz90X2m+FEYeuHJZbwgrYq9947YZmYTDmDyHEuF/updKxdePPZgk9PVEApkl7kQQj1vnD7fQL1Kg1aluO64QuuZzyazerCTPnLVBzmpBwm2XEm5A4rd2hQaN9I7dcm3P3HH5T+LW/I31575/bs+dfTzn6w9mlXvLhQELTWbY8ND40OrjTd/78U4dsAm63pZRhVWaVnrrWnCxgx4IDypQiDABObMGLzWq4A1T41nbxIJSoQpwrOvjDTeQZyOWmeqg4QWIbITB2ncGcHernGfZM3UObGJqgRkSByAhqIMRWUTl6ROvdSUfRWYuV95730VfISIiYiABlZrLTAAiCrXs7xXVrEXJVEZWQaATeHKlz5N036/n2UZKJG1ZSWJUackDsWQxmwd1BtseKwSsEIlhQJ8rqCIKYtPmIxtWmvZLubFKvQa/Wpsjp5/ZNNN+dbZ8fLprQtPTp04lh24dejpH6yvj/Y2XLQ8deHixNZpai5gH1w/dUVauW7ZK0p1xhhSVYcAyqESjto5noG5F10SWawH76IN8UA6UCkqygr5kok8GgZUYxNF4kDR7gJQlRHWuEmqopI4o8Gzmk4Rnnvjzf93756h4fGh0dEvffk/+4W+4md+4Vb7a2/0v//8MVy3Yev6TduOzczWsmQyaXyT9cVubKdbBwCn/uztzfO3D7/+nS95+zuvvPGGH9761ZGtN88t23vve8BjtaFOGzac/Zu/9kLfmbv2+je2TyzsuOlFX/naVzc08re99S8rwn950Ss3XHTdldrMnrj/kb/7y/aDP37q1++8SmfO+fqPT33jR0+u7DtreLxYDf20QI9NqJWhYxHKLqsNKLi2LZFq0EEzgCygU1Xt15fnxw4dGT5+LOsvo6XKNEx/wQII1BWWOM3oouthy9mVFegXWci6h0+srq4wNhvDI6WnMHti9MSJ5f1Pw+GjFisrQosH72sv7dt43s0GoOdJl1bNqQWuFX6l6rJesHNrE4vp5VmTDdlVcquLqfPHO7V7fvSdnMY376jDwQPNxG+cWp96XFnoTCf+rNEpe2qFZhcyrRbHt5VD63Y99qPjMt/99iemahvTa64b3rQp69H6q194fHGPq2pii06OrnRCNhkMvgXJMMQBK2gIvDbfHdRYhpRQ10gLsfwNIXhfhlCpsvhKRKwza50lQAWXWBWxSNZaYwwpBPEYBWxpEO8j+lcQRNVGnXHmOCr2RVkGb4yxxnnxilBxiOV45hJCg4jCa04sQUIIChKNhM89f/f09Ey33cmy3DnHIt57jVaDAOzZmKCCRIqCRBSqEtAAUpQMgOh7bwhAcuNE1SWJBEZrggpwMEJoDQG6LI12UqDAzFWoYtvZGOO9d2SMMdFj1yIJgkiwREliAcCXlYQQEaGZSwShqgIzl8GTgiWTJgkRWTIiQAiOzBkbK8Ka7JQCk9pnuxFAOuAFrUkjyUBLMur0ikjNekYnCCnRarG8a2jnG0du+bh97L51Jz5/5L7X1F48ryeRUIUclRXYJOp3IupAZzgGXhoM/s8UmwQBxZ6sNnKaaGSI9SNz4TuP9+7cz0+eksWgrczVh0GgW3otS8fBSNBEc5E+kgdkBwg9AmNLkyxTaIol1XbZt4kjNt1+mQiB8ubeQy/JV69I91zqHtuWnASA2TD2RHXObBhlxf8cefeXG7/x18eeZxOzN7/hjvQVXx7//fC2tHnLvt1Tob1n2R367tbi0faW6++44/svu+kWQrN6/AGn2gY7lBWnIHinKeQL63fdemL9NcljXQ9C+bBHcq7TLk8H4MqTMWVie428BdJOk1p7pRpFZxFIuURE46iEMmvE7Lbqds89fPftr/nzXqd9+x0/PPTwD0eHRyYennr6TU9u/+3zJm/YPCTDG7+aXnHfrqHn37Jw2z+c++Z3LbWkVSgCdpIEC4XokBELwYEUWhymMugZ4OUzxv//z2MAy4oqKGsyozqw91jT96YoP3N6kDFw+jKAz7ZVwAAJrpXEseImhdh8Qh+QwNTIOYesrFLkAqW3NnAQIiqtpZIEVJXL0ltrgFlVOXhQFQ2AViSompjzrSnGh4HWLGrwpTHGoA2i3U6ZJGlAzw4tggFkA0bRMTCBN5Un9AQ5GyNWcocsUHkjoIlV5qrfaWVJEaiyDmsuUFV1u9khfM70hhtqm+Yn2ns2n3piZG66+fRtIwdum6q3zpu89NTktTMT4zPZ+lPNAoq2VGwsJYgKhVYBUAylXgMBqkoIlQKKJhwNN0UFDBkEY4wNyADCKoYy0KDApILMAo6BRNQhqxk0sUSEB7NyIwyIwVi3tLy8+/zLLrjwqmeefty0cGS49Z1vfWFpafW3//A9Pzl+3gde8MTdk9kFV14//Z1/G3FT00uzaujpjEdTAYDNK73mZ/5h5t8/OXPtTaOveesrX/Gqwm6bT/wff/TfZmefWZkpTLbrj/7wIz+88xtuaNPV5z7nhte8ua+rv/Ohvz92qIeze+SZk0//3dv9T25b7XdGr75m+eoX+3zo3FddPPNPHxn9p4/seP7Ll0SV+3kgAVYohK3P1FRlVhkGCiGIQJoRDeaEoIpiIe32msdOtA7ury9Mp8BoEh+SintNHPe6qrlmmy/tBK2NTIXJ4WShz3mOE1Pp04+bvofrLuwvLur88fTo7MrDD2DnlCu6ziTWuWzL9puWj/RPLR3bDHDs4I/8/CmraY+LRl7bvWXL1uHWoYMnMMPMzSceKXcuqR+fmR4b27Zp21bvS0yGa4FPLXUdFMOtsfVDTW8KyusjV79Ov/dxfttfuFA+tf8bhn36zE8QQ/m5oZWrXnb2W35reOLsrtZCp584xo5QnWq+7DsLos6gQUg0QlUxWCOBQwhgSAGEuaoqQoOiQYHSAZE3ep+VImUZxJMKlBIMkWEMCOLS0kuj2VxaXtWAIQCAKAIay4G5qCyAcQZLyeu5FwZDNk8yMZWyqgaVONbyLL7Xr1trRNAQKkjF4JisISKjGEAqDgawLPpRky8IP/3MYVW21ngugQZYDwZvmDxIgUJZQlo6jvIrpUUbe+AigVUILSIyApF11nVXV9I0RcQgAtZVwacIwGwMYggJYsxXrCUXEBGdInl1CgoCBGCjQS/HPUtFQ6VElJiMCcgmqtoXUUUliNEXLApw3PwCD2oLVTUMAYMSgiETjEFRQTCkoGgQDABE6V0gg0BiGUFUVAlRaaB8QkQIDiQFkSrrDklyvCjeNfHS7z5098wNk5849p0Xdc4zQ6O9Xq8BPdGGcR1Vp14VjHNWkVkFnCt9AIO1sihqLZESKqq4cvV8POE8aRxY7N3xk5XvHaoemTU+JJSaemonnLgCq1AUQVNlRae+rDhj6fbJTwTXDUmZshVR9ImAAvUTcaAlhLN47hr7yGW1B0ftw+93B/5t43vO0uSB3q72xpd/qb/1Y49u++Dvvfzf/+uB9677l2MHDn2x8d/e0PnoBfZJFNjRefrCxW8trdSfevjcO3fesG/dL9y/kNvmazYNv3Rpzx07Mp5fnU11Yz4zbbmbhyIJrCZHNhxWbT17iK7fVTxja+q7BTtbR1+QPR3MRDoEGaWNwrczGZ73q/0SsV3Y6UdWJs6npBlcrdNcB4iknBRL6dLcwbMu8UmyMB/+EV5km4f7T32sGp98z+v/9oYdr3zmvx45dNt3L3/li/UtL7z/z3/21S3XmUyobQXLSgEsyEDrmQwgAkZWEgkkSoPAGQvWNch7zJAQ0GiUlBEwpIaUAxACKgGAQV2bHSCAjZFbAIAYdc3wahDZ4+RhMMQxqEQoakSJkNdgYBGeEVtZFlXJsopVtczeWMvsvVcDFEKaWV8FzwHRhBAsErNGaTl0yUq/ayGL3LqYi4sKK5BSKEtnEgGwlLT7PWMMARBYpwwBFFnIkhIpFhVbawNSilZZQrRbrRiMgVo9t+hZjEtpNC/LksueMYZLBRZoWuODULlScf1Y+qLpHS9rXHBo6+yeibk9zdl5e/iO0WfunJrYumvzBcX4pmfMuuN5bY4FRTIXMmhYoqAdSq2ycMEQTJZIxRUHQ0Y5ENrRfGil28lV+hF9aSoxiGqR1JFRFkQUVDDkhUjBKGhgBkVnwQMBOi0C1QKXzXqtLPiml79k75P3rtPRpao3Nr6u1zvxrl9+nSxNH/htO3X/e8aHn3PeOVcuzMxtGB/vBvlSu3tOswUAXd+baAxvKoO/49beHbftzZvljh3pxg3ddefUr7psbPzcxelTl7/6NTe+4ec7Xa+dpSP3316/4zvfPnQiXTnR2fdgHmj9a39p9frnThw7denn7vjMPfvTxV717l9t/PArrdEN3xzJrnZm3veStJkag1ZDt0rFATlR50S0RTUDRbckmxiDaKlq92rLC82Z6clD+83KnLEovgLnRV2NJed+kaWsIhfuwvqmIrRtv9AqmFW0qyf71qVn7ZAk607Ppg/dHxb2JXOnlDJfz0LPW0XmvFp/zi6TNhlgx/brdXyW2W9bN3z9dVc16zZ4CoGdYwiJS6wnLPqy1INMoNPpGFQJfqTZ0sy1cpdZI2k3CxgELn7T67/03Y9X+dbRcrYzuv2Cc3YPHzjamn1ixaxfao61jQwbsFy6Wt+EfscM1Q20rWRo0EKUFCdAVhFWYgYyhpBVBgsYEVVUKQKjEJF9iD6+UrH3XmIIWnMYFRQkjeUsQ5RtEsWI8tBYuilBUAGFMvi11hYGVCIDKOpFAwOLKChy33vDgawhQEZiZmPJGIOKZVVRYiqRqvLO2qLfN9a2221HyM7kaSYiVemjIKJyEIKBqR/AANaBcUImIqwxDMpg3kWogRWMLdgP+mwKyYBvKxGfAiZqmVLsCsbNKvZmzUC+l5w1ETalRuObBi8AUCcDACWH2OWLU25VtQKGKP6Ha4wRDoGchbUBsKHBGDjiv3mtXllTLQMGJIJobhA/8ulBGgNa6adpWlZUtEg781pv/G7+hl859G/h7ImPPvG1DyS/eZwWNAwnVovKkabGCpNWIAHAmTT4fmqJ2FQ00g3txNOEg2RirNdvf++A/897lw7M0WqZS2rXNWxaei++1ysryj1g6dGgeuEiUAJJ0CXDjQRghntOASDxrrYqq2cp7aK7r032Xdncc2X26LhZAoD9YfNXix2zrvPANe99+b+ctfWc1ld/6apPfuy2N7x6J+eN2dXyxHPffVv7CdJw88pn21ov1S1JjQH3Pjh6/UOfLx/+2gVveOG+rL5Qjv3kmF89+5aHgI4ZWf/YkYnhS7vLe1rSNVVg8swefD7G2amd1/zghz968dTTAZoBq0pyX/ZOB+B6YwSUMHjPrf3zR7vtHhQFs3P3/V+85YPUPeXyYbEpl30HFRX+4ts/8sCL/ufGI48dXOFFWR66+T2TWy459PVfWypW2t3Sjk8+7zd+86HD8/e+64XvmTj2+HR543yvk4G1Sb3ybfNTep8xsg406UAB6DTOmU+bZ/z09DT2MBQGppBwBqJnkOHRYI3HEQchyhmWG2ceZ/DnYJ4zkE4cvFKViAZzCohmroP2OHi0xlgkAxhM8N5H1iI6IsB2v8tImDoU4bJoYFoCVr5w4FLriqIgIuei9o4haxBNVYUIqDYmERE2RkUFgJQJGBWQxKYOwcQUw1orqizMzAaQEeOYpqqqKIZT9CsiMg6NGHJZURXDI42i2+sZ64vOWU+tP+fJsZ9pXXh8U/fh9Qf3ZkenzVOnmqZ3TX2rH9q2PHH+oaFNhyropVi6Xr3Iw0oPUI2rhdSUZqXqMgkZhrLWs/2Oq3pJAItsAkmimisWiaALNDw83OGqWxapdcTqrdWIu0woISQPiOCsS6jGIkao7Pc7K70tW3decuV1j99/dz42NXdq8dKLJnbv3r28qv++cOjtU/f8zMW37HnMzswdP/uCzf3D5SnRb5U1AMjQdLvBDOed575Cfvi1DR7C3sdWHn+E0h+c/HSvl2OrrwcAEko8VU225WXXn5jatv2aly6cOrb9d95/fOb4jte/7j8/9I/P++WLHv7oXx7e+dapp78//MOvbJnc/GVu3PfYvhf9bF65jAX6BqzHFGulr4SCCHDZzamejTZXVvuNAArWl6ujnfbooUONI0/kvZXSWUsZuVx8lWZlRxMJASEYdhllNLmRl05wr8cuYZr39Xp6zo6qYeHYk42j+3HfM9521Fgy1paA1loAS5oqF2haAFAb3dTTYsfW4ddee12S4nKXTV0y9mkwrZH6/XsPHjs5KwKJSaKjQKOWmYjaR4cBOLP9ynsvzZZ7+Lbbm9t37GusP2/u7p//6O1uV23PFz77/W98b/NFV9zw+rfUm41lXW1k6Vy/M0rrskbFHhw1M4d27REDjPdeWIG5IvQcSERwzWdemZkJgUVCCCGEqqqiIUEkNqiFgWa5iJAq8sAcjWLHVAAQCBVADKohUopGLlHxQ1kqVHNGoxURVUJghahLxw4RPaqpKhNh04BKyD1vrWXQ3uoqWmdYrHgwWPlSWVQhMKdpSolRBWQgGKCdjTES/Q1RFeW06aKqAhAKlFaQ2RrDRSBjUSVEZyQ8PQPFGFCj9LFBC4Mmu8UBspEAwAAigXNucF5mMDOrJCAM6LwRWRYNqNRZRfQqorFjH1vNalVRwQCaNfmEOAMmIkVRQFDl2CcUEBBCUgOqA63lgdAjgPGaOlOG0KR8tezUE1wQ/8KpG57zxHfu2er/s3Xo59qPbJk6p91ut32SWicsBJa1y2pBScAjZAjch0KVN+fiRuonTurXf3jk9n35gRlZNzJac33rCmGzslzZzDj2GaadsgeU2rLXt5KE3AmC9hNHIVToXA2Lq/jEufTw5Y0nrq3t2WaOEGoHmg+Xu/+58/qH+mcftJeuwshS+cyC+7ODne11lG3D+bG50Kyt48SHDjYpO24v+0Zy8Y3zn6oYT8I4KCz44X/xb3vZ/OfnOR+95hJ39lTrGTpLZyeai5NNeryUE3Zs78z4Y+/+9o+K9tlP3ZH1azUdovJE05mFkzPa2nbk7Nfd+cS7b9y4rlOQG4LM5qdDkRcfyEmfe74H6PIkTZtZDZN271T9Jx88cdFbRYSVHRmScvM33z+1cuLErufd9O//64dbXu4Cr87eg+uuf+4v/SfP/rha5V27L/rMd+667WOv/dHN62pnj3cfWSir5d5IC8u+BwVs4lrUFECz1gtGABnQanHQlMY4BooEeVVQ0TXYLKqIRjKwrhHY1qYKICJARGfkagaerY8HMTsaHw1Ux2M8JgQ1sRk+8HXQ079yGiqICC4qh7AhQD+gGpAn0ysrRUjrGZdV1S0NITrX8R7ZDw0Ndbvdkn2E54AoV56c9awGWVXreY6IDFp6FjSARAaATMzzUQktWiYwIGiYgx+0lpCZKxU0pGiNMXmettttQBHRxKTGCBAouX4RKMkr4DQzp2Q5I8kx236odsnhy8vWpXtH5/eOnNjbmj6eHzjSOnr3ZWPNy+tXdCcveHJ43amhkc5QzuVcWi3mRZ2FuKoA1aMJnboZ7iz0OQlqaxRIuSKQBMlbYEszxWoClFlXhkCJJQ+INgK1YvYNAAhSBTEOnXMWEJS7Pbn5Ba94cs+DQTv1Wut73/vBi176kt/9/b/+t8/+/cUn7rnB/euuc1/w8IN3HX1mTpgaDXhouLUZYGPZ1szM9qsL//CPnnjlqw+84w1br7+h+bwXzX3qH2oXX0R799kJ19o0XlTbRjel5WMPXvauv53fNrV821eCzg+fu2l8fPKhb3733B1jh277r5XPfuHUX//eCx7+7rps+DGyn+xO677V2VMnEmurMu49REpxVxSt+omRbr/f7RkHnIV61Wse2986caQxPW0dSC2vu6zf7klTbVlQVYO6a9ewoWiX+9XMdNh9vviaOblCuc1sEya2qC2r0K5NTvB1E8X0qWT/Q+TIiPXNRJfb1nPVXp0vV6lhhhzAwuLMc3fteN4V51a+fejI6p0/fur5L7pyrI5p3jh6au7E8kpjdLLo9etJliRJWXWLqnLG1LIUnSXAlY5vZGlWs8J9WFmeX3c1CE8euveB/bff/Xt/t3Di5HlDk/U79u85cPfU2Zdc9ht/7rCDx7mVZ32uU8M7blDKMfSCIWOMBgYAMeJLISJSYtAIcY5Qo6LsK6RVVSGiD77ol7EEpCLgmgQxCaigY7EhOENxekSAAyMjUQQQFiUVFEsWFQgQWLjyYDFOmIFlAPSV6OZjUVBQgBAFYjONQMnYsqwUoVf0VZVB1FcOXMViLaEoh0iww4rFlJgai0RhDc0f34JEjUWAgQEhg8aTRUQNKsBAVCkHQANgGCwRpea0x4MxBggNgAhbIUR01rm1VkF8WASrhJZOB3hniAhBMCJROV4bBIEoYJIQEVmDREAY62Naoy9F41UAEFVDBIhRC2kNkDU4JqyZ7DIARG9rWGOIgnoxSgSBE5NUiMH3lxr2vevf8vq7P9K5Yf2f/fhr/zb+20tGjGVhVeMFBNg6BSQBATTYrzDFfGrSPXTCfPEns3eedL1itAa98VwgnChWGyu223B1ohAYqoVqlTv10XrPlw2TGOOM7xPlTW1faR64fP3TF8PDl2dPNl1fgJ4pz+pOXfn2n7zWb3re6Oadjy6t3nuiGE6bE4l1HARs4NAnaTaSo4tlPbHIPVfUmtb4tPrWCbGEn3xo2/2jf7LRHDtZ1e+mK7fNH/jl7uw0+OFrfuZU1W+O5zI0tC2dgL5c2E2uGavyv331zkNLB65+w7EdN7qLrjiHqN8+e3n6yetqP5wayz9/ZOM+e8Xlxd7GyFlL/VNbR4ZPR6Nep99RzZO04L7z0uvJyVPtWol5mp0Fczt++IHF9eeUY1tqoZwcLTf/7v/65l0n6kvTY4cfq8ZfkjFBbbi7dOxR2LTzvJdjmL317unbPvPHX3nlBRs2lCtLoTG5c9U1nXdOgjEYqt7pmfsZICzgtfAJAITP6jsP/DnOqHSfTdpOmy3T4MWDmDTAcAyAe1FrOoL84P/zofisV+UZHwPOQPzJ2jhaAdBYlEh/t1GHI65BJVNVBXvqG8hHm+12WzxnjGxMv9+PRbmigIGqqtAassgcRMgSGALmYJzNa7Wi34/vG0JAFTCoqtLrG0PRby0ICsTNDUQ4+CqvN4qi8KGczMc3bdq0b99+4xwa41GBuZbl/aASuG5sv+o1OU3qeZpnVconQycJdufx8XOeGX3ZyK6nh1cOjRx7aN3xOZj98Z577tm0fvP4+M7ark1zrQ377fp5F6gC55zKUm7qlGnZIdIEEwqhkaU9BbGUeUSVKoh1CbNACDVriqIUdLRG/Yp22gAArElqvQYNxKhpQvPz87vPOf+a59x8x61f37H14m4Tv/Pdr6+2q4vOP/uO5lsu95/5+R1LXyNXy4a7VY96whAA4Mft8jzxEIp9//7FrW//lYMXPP/qr9z62If+pLfjoiu//o3/vOGSG3/5D7rXPkefPDzzw6+O3LTjyIEHFu/bs/z0wcnNk3M/vqf3yKPy+EM8c3h09vjSc38DiK5+6rvH0+b7ux304Fspl51+PkxgnAKlpmAwxiIzKeXB9KndGm7aopJTM63Zo+sPHtbeCgzn3U41hFRxMC5FDpTXqomNlDdXV2dtWaYh0LFjRgqaHOXjJxKoir7HU4s0O12rpzq6sbuurps25vseC1Z6KFmnT0DWIFlI0iQ5NX98I8CNz73opu1T0F093On++9fu2nrWulpiNJQ+18eemm9OjJRtHxcMi0+SDImy1BljOtzVUodbzQL6oS/Lnd7ya5srcy/R3gN7qn2tY6vZvH/1Za+aefq+xAo/dOvcQ985uO3ckde+xK4erRKoYeISSiXzliMuSTDq/g8EGskagxBQMSgNtmxQ1aKqYrs55o+D1jMgGwQGiWgQgxznTIgIBCyxXIs+WwOER6zhAA1GQYnBG6CAKHMQFkXRaLeghkiir+z/64rnhfM8j1ygsiyjkB77oAih8gRYcRS8TIT7xhgxIc5TI8GfjIncR7NGrPIEIqIIKiqgIoKoHLwiiAZDxqA1ZMiYwRGioiSBqKqaLDhCzJzLTRq790SxtTyQ4nJk4qUDVBFxaRqEq8DKrAPFQGOQEsDEJpEMzczAagAMEpioHRQ7B4gEQko0MBQCRBrAkhERkIyyAACqyto+GdvUbMCzOgM+BGvTgiUHXi3md2265E33XPp3i4fu37b6tSN3vH7nS/cvztVTU4kx1iM3AAtERswZe87WhxqdT34rfPrJnFtJ5pMh6dbssQ14qOThQ3Zz5htQLYFr9U92/+odYxua5mc+uG/L9qnd5cPnVQ89Z/LJK+zerW4GAGb8yBG46Cv4pnW7XvS47vydvzv0p79xybeLo632+CtG0no7aQZNE5pZCP1C6hMGgY6t6rqx+qHFxa6BfCyf76+EBESTB+fwxrHVp1Ufpy33VWelajvSuOnJe1yYx8bEwt99uPrkeyt0+fCU33qWO+d8OueKdnN554OPbChx5Ft/93r58Mcv/o3vXPXzQyP19TsuXtp9VUF82dTMwZH33/ODX7y6cZK8K7vPsgMqAfKs/YVgHDocnxpu5thd9idPdg8tzvV9t7lv38bzzpaJYbz47UvuvCM7dj3n23+p2eaqWUtWV9sKLmA3Xfr8wSEze/SHt/3d5OJj2xvnLC0ttje+qj9xXUlSq5ZLqAVuU9aCXoxtZzSBIx9IMLZOeMAQWnOlPA1dXks3ow3zafEWWAvP8TmhiVylQcxbq4MHPZ8zI+8aOyiOnHFN8HIt/K9V3ACigmtGTKKMCESEzpExxCaiO1MkUoFUyCCS8WVSiUdAD1JxAAAkYtaqLKxzzIwhYrMxSZwxSGQUFVAMaGCvghHtiJRYZ4kIgL0PRBQn1LHHToguTaLaQeKypaWlXq9nDKZpao3ngKmrsw+pAZOSL3wjHephVRa9ggtr0wTqINg33aJZ+FV79tHaedl5zxvbNT22si/sPfjU4b164sDoAd29buyidefOT5x3ZHLdfmotlMOlTEsvXZeEQqkvvbzgShzWA3FPqizLQBgFTGJDCEHFORcYB/xpBEQMPoAqkAApCiK6ei0fHslkbuHQsaMvf+Ub9j2xx4deVfmRsckHH7hr35OPvOGNb9478dIX9L65YTI5tbSMmPYAG0AA8LGcr+4O/SrD0S9+prr0ohd/7Zt7P/Z3vf/9vvP+9H2dE6v+4FPrrrz+rvseGDs20zo1PXPkmZ2X7JqYvPrs85+/98Pvzu6+i/c90wIjm7bJ0OSBHVeMTT/2UKf3KerNSdWqtVSTYVdfJOMUjFVNVSsmRRSogDLEbqg2jg2v3vvw2InpodnptNOr6rXEJ9BftnljVTmTqp6Mtsc3mp07NGm6B1dMuch1gMXFbKFDo5sXQ6BuCe0e3HufP3IIztpsgqm3mmZ4tD8y1OI+IEtRQZpZY8zU5Fg1S+du3rIKkAnsO3J4fNPI97+5B126a9dmkLbY5PYfP7TcTdt+VTykxhiVsoI0qwH4wFWoyrSZJnne6a6mRuc2H/rKZX/fSbvjX/+txV0fmfm572+8f/fNT33ihi3Xnpo/VdXLo3d8vfrMJw595F3rzv2P+rrRqkRO+zUYUiuJTaKBgaoqSFgb3Hph0cgwGhjOnab8R9CvSHS1p35ZYQT4gGENAJEjSoKR1wBrhWJsO/hYuhlEAjRk3GA4ZCLFUYLEFRuElYVVlDAxCVUgA/rQs/m1AURjFMQgJGlKhEmShMqTghjs9/vMUdabAquIuMyBaCKAqLGdFZMPp0gEEn0VGREZGKsQVERILRkI7AiNoCVCi+CIiCzGKGwirymoqGqaJISYpS53afyXWMhqBFIFjsiU4D0RhRDUETElURBbBm721tq4ScUzJSIgIEMaU5A1BjBGz64IEB1UtgBwBu1kTexXKQp9S+SXkIJF9gReuGGpKMuciK21Vf1JmP7183/u2/f+wfEXj/3NzPdfuHJ9K615rhJINEjArjFOgQx4LWF0nL54Z+dT+2rj65JqAUbC0Xdmn7oyfQQAVKXbGPrX7qu+0r2hU/afOzF/E+xpHLn3yLV3T5X7UvIB0/s7O77de84LXvKSl3+KLrj80r9+49nfuv3g8MaNG0QkHH3LTRv/6weHni7bnZ5urLlH+gsma+ZOLLQTCGTo6JHOyy7eefL2k0UBFuRUqCUGlkd2pQRXjbc/AYGCG+m154bGdk8f+7nZb50iJFpcl9RC4bMA3fZBO/MEH3q886m/MmBHCaxzIRQWzOaZx5eWZssDpR0173je8ALSg62NZ121Sa+8c9/Jx8YXHp5/4gfxUienntw4VlNfLoujBNaZetHTvFWbHB87d9Nwd/lwr1c0Nm3sjJsqmWxmO+9+ei/UL7j83s/cM/XKtB3KWpUF6kg4K0jZLL45v3HumcfyofqKOCtnh9Zm3b09kdqilC5UmU2hrCJVd9AGiZqjODB5HAxcT09/RWVt/eoa8IJ04BiNoISRCT8Q/B8gPWKzea0yjof9f8S/AGCtnTKgIwvgadB1/FqiKEYHiHg8ATJoiAZ4BlEUiCmsxERdveaQWky86612G9YVNdOpqpzy0G6bxBVFwSKJy1iDqqIqWQMQ+2WhVqsJqNcwMtzqFX3PzAykxKwAogaT1CorALEEEAWEitlAFJIja61IKPvdTqezYcOmuYWlPKUG2F5VqKEGJrWhoYOdY3XAPmsGzqKiCYFKJWX2ECiYsjtk+kG5Zy5enrhk+HW9evd4euj+1T1Hbp85avefGK8dOv8ie+HElvbQxceN3TNUO+g9FdU4ElMzyZb9igUqXFqVZd1aleABKXGCa8M/FVKQwMKsLAogQKrqvW/VmyIyv7hkLYQQDOWvesNb/+Gv/nx8ZKSqXFpLO0X/H/7xY/8xTHt/mf/qBeZnP19ADZU1c0MAoAsnH27ki3nv/Olj9739rTPbzoGD+9YhHXt4r7p/GC/9of3TrbExW8u6rTy5/kW9s69bfXL/obnHzcFnSgCfJTNBmssnUg4Hzn9p777/+HOzYiVx2XBwhVkoTvU6o8Nj3aItBgIXFiyWlYIImHZYaSIs3frDbdMnsuUZI5UY7fa6tSqVRtIte5lJyryZ7DrH7Nhla1Ne+i2AVgg9Z7iz4men0x27sNBwaI+t+rT/mVq/UwwlhXYJbSCfVdyXChKDqQvsbVFW08dPvuKqV+48f+vnlmHKPw5m/ef+44GZ4/0Ld23cNDpUy3XPk0ePHFtNsqToFIrYEUaQVqu1tLxqDGapWT81kVqzuLhYr9f9WOdzV36woG6YuYC4vnrut5g7xy/Zc2Dz7aMPrLNEYWH18tf95vRVV+3/wO8e+eI/7Pq9Dy50jhutY5qg7QnYoOLQAICynpaWjDKTkWpijEFQiH4MA54SR9suFCVAjfgqVlKCqG4DQhWjZzAUx8MsMFiUcWWLouiaCzEFFVBBFQMoCB6EB3k8GiSDZBwhrnnQIhpAY9AYg5YcGZe6qqqyNFXVtNks+2UhQQIzsQERgcAsIqFSsDbWmE7XHP1igeoGmlzMbEOg4FGlUrWKBoiNDGplMqmxzrg4vTLGWENEhIYcgKo6dkhonUtSZ2BACiIiwOCcgwTQkK5tPcZaYBZjwKK3rGsPl1gFQCIRiD26eCFUFa1FY093tk9HXSKKDoox9mrcojS6QTwrxgEDKBYaIQfkkVXEoCGW4AzYjLBjh5r/PXnJ7z717RNnD3340c+/7+LfPDA/k1tWtmC8QqKKooVzjXan+38ecWkzXTy10vDLf976s0maq2m3jn1BGNLVPxv68H9r/N9JMzdpl+F+ONiZGr7gxvffd8OPFi/+xz9++a/978f2zbXu/PVLqvJLN184/rmvPPm+Dx869+KFj//Zeaj9nzy9es4Gc9+PGa5N0tQmI80FXc7QVgKlVxWxZBX56ErZIDhrUzp0yiyyLLgNv3ieHDx8atmPTXZ9Z2SoWOr/8X0f9X7W9OsNJysEhq0m2HVa13Hb17Haxn7V1Q1jYaldL3W57jfrTKaWGtUzvf4ffaF9/rGvzakPF9xCGy5qmRU557V8wS/GK+qK5bkjPxybvad16IkJ6i73etbalX5vYX5JzAjWi42bxqAOxXz38ZGdd3z7q82X/cZZD32j1Tu50JoIqfGVggaieh/D8aML15+9+5wX3PTkrd/ae2h1/JId2dbd7Kt+Q5ANct8rhSRjCboWZTXSkAZ/lzgHhbXyN6h4DhhUVQXWZGIp5niD1xiIuEjUnxLzBsGoVx55bj8Ve/EMVNfpsKwIqkhr0vG6VqCvwaIHtTLpwAoiokFiepqgJSIDmAbTqQK7JB01UJay3B6zLiRZu90OZZXneVEUELiR18qyH/rekk1qeZo6R2iRxKAIn14OMT8VERYJoFAookmMYVYDHLXgBdEqKWFVBUR1LlXVubk5NK4soNlM+90Vk6b9Slf6p8CZsiqyzFBIvGBSgbEi6iX4BBskecGL9dz1fHE0qYru8tRifWuy4YoN5x9rrBw3s4cWnzx069MH9CdHt47dv2lq5NUbt62On33IbD1g/GpNEklNVVKTADM0odNP6mmaZ6u9borOKZYqABDFkU5TPSE6xxmupGMlVaSAZaOenppd2H3etc+5+cV33f7t+tBQ8EqErebwStV7923Fp1/ee8HZ2Q+Oi+EcbSKh3HnhOQf2Hvqz5tQvm+oG9uUzezpJM5x/fvcb/0Ff+/Joku/7peenWcNi5cemtvzhB+a+++Wx9qnsm/9Vp0ZwsK4o0A5NM/zLpit8a+rI09+q+yQk3iFQD6FG3/nm13/51/974QgN2GBIKKAhhLSCMVE6dXJi+nB9ccYhB1WPkhCXY7Wq1xWSen3EX3RlcdZZtbM3FZ7w5AyyDxULpokUdOpwmD3qlueKpx+vJXlAUGtxbGJ45yXiceXE3ej7YCAxtbJY9Q7s+onhP3r+G25q3zl//JOfa/zbdeUX68ceALr4a8Mv2Do5kpTVwVNLjz19TLRGXCkHTyZxJrVJUZat1vDIyFCSJ57sfOFxaH0H3cPZfvvEOzNxtcWry8bh9viDgFjrbbzH3Xpe9oKp9kYYrp08/MTWi2/kN/7Rwb96y9DrfiGdXK/cKbAvVc1gZZDAWFCNOog++H5V+qIUBGEVEbDR0ixal2hiXafTievUh5BaV1VVz6hBIDIgioKBtEysdwYohmmtOMRhcFSXjqir02m1DKTwIh4KRFBRkciCQWFUQWOIMBqnEJIz1hi01gpxq9W4+sqr7rnv3n5VJkmiQet5jsGnxvgqlP2KFUwIYCgy7eLugNaQQTTGGWvQMokzRp0JlY94aEFiZCVS1YwsKVo0hGScMZkzSogYcciDOpgIER1miGTTxGWpO2PbctbhQPgPwBIABBVktkhRb8SIgahD5L1VEktJMuA6M7OImCiAlaaGDBmLhuJ+GsuWM3a6iL2JEBQVBaWBeAKeNvpFrQATTFCKyigba5SysmRTJExHq4WXXnTjl35478O76avu4Z87eN+6DZcsFidcQg5GNDrVSD0bTh/bc2LRQxPaBswb8m9NmblxXBqlhQyquG2zUh17/zR/S3f02le+7oa/+7q+43nn3Xhh772/cOsj8+U129Y9+v2n7nx88vkX7Np7/8m3v2nL5+48ef756QXnNM6bsvfsXdq9e5JuX9xztJORK4Oy16ikbQQQYa7DU+NpURt/y0fveukFU7NLq585MiP5xL13Pf7lH8yNjQz53KwuJn9z91+e17/zGcqG8nY3pFnotIiWyzBMo1j2u1Wn5TKQFZ3YLZ64d8zQyNbFlZovAqYbatxvTj6+9VdSn8rKCbt8am7uxOYvvf2aK54Tb+vKpud0t954fNer4Dlcm9vbOnzH8OE7cLbX41LL2ZHxsdUkHD701Parf+GCF55zX/1o+1irlX6oNzV6sLUBAtQw7SgPZ3gM9bwR+75fGHrfP71gk7u1ZjurQxeMLR3tiimTjR1dVJMY0TSUsuZLFt1zZDD14WhGeLpJrAAswiIUi6do2QmDL4KqEiiRRQWwA0slXUt5Y/OZ19bamhTHT8VgAMCf/jsiRBIgD8Rc448AEGx0fwAQFRWOoEwYlNsap1FKpAnWWs1QVraq8tT4sWRhdbnqVcPDw/1+n1kTNAAS+mXuHDZqhYa17hoiorUWlEIlKkjGiAiYQWEuyiKEEryoeA4cGAENNRqN7nInEotVwUZaARgvzAInV9tAOYEpoQRDTkDQYWAHRCgVBbDOgsUglUDNFTZrza4urUtGFotq0mGn1m2bamm+K6VuhJFzWs9fHu/M1hYOrj5z+PZnDtYPnEr84xefC9sbZ8PYrsezzSfHh9qgCZYJ9IeSVa5qfXYBRHyPCJ3xoYLAEfXOwoBgnS36Ia1b0VLJgKTWOpYydfXO6tKNL3jBvQ/+KERPuyoIAkP65X30zmP9v31huOTzTXJBbE19sX5kasmemK8W/kayCvWStMW+75++dx00EsACdGtNiqJP0AtLtPKuNw0heK6G0PUTa2nqySZ/E6qHy/7ojmvXl13dd48Yh6Ysur16uq7S5dmTh0PVV2Blk4srGTRJuOqaftmcXxx55lA2e6SsGYaUnAsJ5T3vegVWyBdf44e3+q1T2Gp11NVHR0K7gsSKE86TpO31qUP99MEw84SpVitZNWlOHZWEqlPzxbETdPARj33n8tAPBtwwql1dWg63/qZrzGPzQgDg3lwKvZcNP7h9HKZ33bIidGym161tTBojy2nianVOa2yyDrpgkmmyAcwgH10jIsrCVL66EGrzvn7y8PPeUuuM9YeW2nZ52E8dm3q8vljPi2aa2/lnDmy78YXHv/zq2kpncaiol0HKxJiOIKq1VdTWACWiXllUVVVUpa+Cc2kUlICYMIKqsO8FjV6ZZLywklFLaVAg5LLMXCKgKtJQJBErir5yxBbIe48ARhkVPCtbtYTe+7Hx0aIoyspH0isTSvAmoHG2QmENOVgR8WWZJAkRKSoDG5eqIcu4utK954GHiiCJyxHQkLrUoe+L2MqUzhoRKYqBlI+qOmsZgVWcTa011hgitNZaJPYysImP3iMAIQRjLRMY54JqkiSUJJUPzlhDBokSlxg7EO801trKIKIlcqd5F4pERDhQgI5wUBFBEauGGKwxABCYBUFETJ4iYgpgkKy1iuCFRNUCOuMCikECQ6xArNEuhqOGqgIpKaAQCuoaT1QhiBIimMjTVCJFMCqipQVEtQSsGioXv1eu1gbXxN/b/vrX/eSv+8/d8dHbvvlPGy/oiCOxIaksqkFXaNFCONZVI81mj1bLzo31u5zKCC1bZUbbllqG/WVuLcr4nlO7L7r+lpO+3hhb/tO/euD33rrrw3947qe/c/x/v+s5u7bbq8+fnLD0lj/fe9516z/zl8//0cPH3vXXD/XXbfz0nYsmrfKtzQf3lZbaSaE6sq4s55voSrAIcGCm+KN/mnFD+WOH4d4npmvD9d3P3bB48Phnn+xMtlpO0+nZxQ/e9/4Xd+++3ydjlqu+E1eOqF3OhhPyPi1N0MR5g2rzIXnkMYumk6T9AOOwvH3pxANnnZv9/6h673Dbzqref4zxlllW2f3s02t6rySEQAISitQLShexIcpV9F71ioIiiuXHFbxeC4iKIIogqLQAgdDSICG9nySnn93barO8ZYzfH3Pvo3c968lzsk/WPuvsrDnfUb7fz7fw5IetEcZyTuUZVJUe33Ni27sW7/zk1uHz6DWTf9hpzbTPeUFx6IVLl//MwjW/bIuV8ZPfmzl9tzn5aDF3JNs5e8//PPzk2Ccm7/oLkz/w6M/e/btvTfu3nGw9eohEWYbleoineu/8jXNPPLn27eWrf/KNf+6e+ZW9xZHkBK1fe9nIj5Qf9I3NKLcEkaMIsASGZrbMAiABILL8l9YTRDRgZHHiGBgBgwvoUWvdKLbQauCgABQAQFSASpMCEoRNbBo3KGnQSCLiG5qMIDQb3zPDZdriUcetzEvCJjClkXHxFqRWgiCSaioFAs8RCBQQCkoEq22M0WPANGEE9iHTahrGeqZwLhDaEELtpKgLo1TazdfW1pIkURSsTogIlEaBTNsYHZIWjkluNwZ9BhMZUMNqvdGSjonGpMo6XwUfQYsLaAl8QERrbfMmWUSCRPaglFKqqiqlFAcmoiQxSlkfAodgdcoRahG01pWjhNrOVRRxsegZY/qoMCIh25iUhlexXlsvuqtqH03tbu981vQNp7Pjy6b//fd9avbGy6996zV3ZSfv0Yvb5n1yOlx9esf2xbxsU0SoDZIFqh2wUxgi0oiRItfIKFCV9RR5dKhRmYSilhC0wpRVrIa9zuyObTMHy6VFIVciA7EiTVH+x7fSO36i9z8u2PjgDxIyifjyvgce4YyMbyuRz4K+p9PuFrTdtK12iqOGpGa1QY70mHYxT8dUkpZB962at3Ck6q1VpacIL4tjL7lpqL/bf19f3ZzInTjWnarqngYU7z/1sY/85M//cn+jHECFaUxcp7U6bC09YZ58Il8euB2zkqdx7nSagPGpA8Pbujysefu+5Irn5MP1WPTTOAmdMTVRBDVReaIwAqNMsR5++L2sNqQsR3ahhhzViYUIt9qnDtu547E9xlXpTZpDSQPSEEbb4clQkjaLAPCn0x+tt+U9mnKYwioAAEzvgWlwwhSc8zXGKtZFigzVSNUl+UpHN1hbRpGxVjZcXfrhe35XjcV6doOc9u1R1+0rREAJMPfL1Sx0EyWFuDFKRemxH3/2iVPHWztn512vFUdWa2SDiJtGWEQXgwveBd/4EBp5glLUNGHGmKJ2zeCLttB0zMzCWhsgyfPUF1VTobvIQMSaHLAXRkOoLQmwR/GiBTVS4CiE8ysrBglYcpvUjfapYchHFmRmcEq4LjKbhBCMMUTqzJy8gVcWRZFlGTNbpVGjNQYS9C6C0qrBZGpd1zUqSpVmESRMjLXGaK2tNgCgrCEBgoiIpCIAaK2bG1EE0dqIiCZlUXHtldFN4a0QWGKTztu0qs2xrdUmUBNgkwoEwo14qnFVNkIwZraJaX62FGPzxabWQUMKUJMSBGDVjO+UUkYbEFRb/o1N0ZuiBovfdDmNkDUiNrP62ETT0ebQshlfbo7ftxom3ho0Rl+nne4zYeXig5f/1NfO/7tqdOvu9c8/8M2XXPGjC6O1LLiaTApMSjlFUPsCYglBox6jgaWgUABNYDhWz56XnrQYWLglp2wOH/nHR79yfwIdPvKhIzc8b+w7D6z/6ofu23+g9RcfeFgZ1zlr5n/976e6SbkSKaiJtG3ShvhEZQtVqVLKYGywUInptzZC7EfhpLvo2lgWMctie6qb77Sj+uFe77HxyWoD61Dk/+vpj16YPHKzRkvlsvdpdxxUfqzeyDtFrJxC0ZkNENDEKvpElGSdqQvPefSBe88BOrjxtftV2+mlgF1tSumowdxiruJUR1/24glJfvdM+3f//R9TxcZY2T/rgXebu2uavWQ4fsnS7stPHHobcMg3DvuddyydukUKwrnZuYt+J9hoKlh+2SfzWtZO7rO+/KkX7+KRcmNLz5woU73wxe0XLWd//qdrH7jj0hcfk55aP6YYnOI8SY7rrFedBJAYJHpGxOhjg2CLgUERAsZN8sampkFAvI/sAzMr/E/MHDsmItDQ4EsjIqCQwkjIIIJARkmIDa6OFCo4sxNGQdjcI291ubK50Pl/OuMz82nYTDdEAGBNwEIstokfBQ4goImYgUgprQAAxRFAZCDM0ywh70i7GEwS0szWdd1b37AmFQZQjWSSEDFGBggFRmMNRfDeG21DxcToh/X2drtGPHLq6bWNlUsuvDg37aoYFfWamKx5e+w3OYjNzUcBQuTKe6VUM6NCFseFTVNu/A9bdgYistY65yLHxFhmJiRgIaVQYKiCDtzxiEoXKvalkv7I9mQvjB3Ipy786d9LutY8oH/MXmi76tHkxORlk0evLu8d9s97HHcft3lle7BOZmIjxVYdKwnjwGRVFRL2wBAr5JQShTpwjORIKc0oNVA7XVtYGvQ3bGpLV5nESmTxEQjvPlb+7cPd99ww+scHS2qNsStFokb2wNJKV5kXR32JHgIkxkby7CvQScwozVM/Kom9H1bQDpntoFBwTneQ/8zhoVb+4HWndv6aXBrjCwv1JVP+6YiQYowhxPvuufPQxTuec+1PnFg91WWS3nz3+DPJk0cm6n61fVvx/BeM1cat3dwbLhmoQouTi1/OfoMzQxMp0TiUQxgWblSSNdTNM5sFJzrr1qGyCXkpYmWAXJuyUQy0NO8WjgGwb00WVE5SN8ZBMHboV3U3idpn08mgiycO+YcPhcd2hWfa3KuL4ZG11peeObiyPqfK9cqva89Gp5ntKKPTblenSYyilc3GxjAxkz5ZO3VkaenE2PI2Hu+72I/Gg0Cvs4CAuk5Y4o5wdtUqU9OasdujL1qZOXTgBY9+/g/x3AviGvtWNTIYEbXWVmkOEQDqug4hRA42SRgEmOu6BtgkMRVFpZE8Sgxh84BstjsgjcRqY2PDohJSItKclAoVMKAgRJYm96C5Eq0eloVSylprtOYYlTIBUBwHBVEkEIAEjrw1ruUG+lgHjzEYYzhE7z1rY0AwojFGEwqw1oY0Eesk0dYY731duUZotknACEEjpUmqSCkiYwwAIGyeV80kQIVNqbHW2hrduIBM81MS2Yw2YuKtu5LaEkU3g0HY8mAg4paBiBp01xmwSdMHg6BSRCI68pm5HTOLIhFRpESkCQEkAUVEumnNm0gqINm0miit8Ez+Y+PvbDQ5/8+sEBq3MTczf4DwXwALm1slY4tQKVSLw/Lnn/vzN3/mV5d/Yvt3ZPFFrm5RUpPT0USshQVBR0EdhgCtEmFFpmdpSEgYa4t4IFkSkZJtYLXQ3v83H7wzmTk0cbaqoVgchU/evDgxvfvOY9X3Dg+TMV2FyYlQ7r+sM7/RyUdVJyEZRenG0kMmqajSqLFelvJKMaV7L9q94/DG+hOkyvQvRwg0ph0wII5UO6xWPOGiIIHeNzz5pecOP4ukG/cLIFMfBAhRuN94VUXqZvXZzOcFCoBF3C6kiPlfpH3HYNJq9qWAr+PUbp0a6lX8PW38f2Gkp7tPTie6H3/4YPRBGa+eiuYLskayqiBpeZOFhRwWEsE4gDdXjz4ECCAFxNPV+J+x22YyvNNlwwD//oO6nStzrutF+PpEcjfomeE3yke+BcLYrExxS3pHaNmwi0KbcZ8hcB28EoWKmjgQQgBmEfEhRudjbHJGuBE9KQ6bxHXnWelGbM9BPACikkYzpREASGGjBqHNLndLF71lZ6It5zGc8RyLiIhiiFv/0iw/ZAuwRZvjZ5FNfAcCYmh0C4iwKUJEEQFFpvKBlDXRBx5VpUKy2uRpUnoOIViTIGIzkGuM9WM6HZYOkWL0EoRAsaZawyCoOg4++w9/VfbW5p9948te9nqbtAoofO2s0gK8OaVHbN6SMfbQoUPf//73W92O2pyvh1Qlm7PurRQyFCEAoxQzIrDVGGMkQhDSSgMyxai0FoPCbIUsIyvyMbLKhvWo87SMfLE+aWa6xcJILjPbXu6u/7nio199+ptLY1fEN07sXWidf3hyZk6m19yIvDZpqWLbBgAkVlbA2JxUSqhEWKACBUIcVGwn+ejkBoeIbWNNpkltLK8a1ENXTk5O/vVj3VceevqvXk5/P4t1a/Spt6w9sw5/9UD7tqOFRsqNiWgEtABh9FprHyOEYLW2ANGFTpZiZXwZAlEna6+/exHPx1b9PBLbv/RrYFkWMb7S8+OSf7VtjFlZWekm6b9/8gvPvuRF09mkPvzw1MIJPX+4HUMvyfXV13aueGF9/ERIVC4TvO8coIDnnOdoKIefjqz9rl2+Wsb+IKwu6Lzlc1O4um1TRgoh6oFnCqln1lSU0Uxud6O1sVCVyGy625yt2Gd5p+6dWrn6Eq0wEsaU4pgsfXLtuX2zr+OPsqueGozlxb7/WIcd3Ux1u6BmxrMZkFYNG2meDV1FRBr0eGecrF3a2Di1dDKG4aGzD9bHzUOHPpf3psrxtSg+ap/2u0mR29XOiU+fuPpV1+KKf/yhf/7oF770tnf99+dc8CKz7kerKzw2uzZcyUZkNDGRb4wxACySJEmat3uDvtaWIWpSSZI456qqUsp4jpV3pJUxBjUMh0MiJqLRaICK0jRVTJ4jkvIx+BgaIpZVWisdY3MjiCJQe99WGhEkxLLyaZoGCc5HRmbPqtFqRgYCJqyBQWB9fX3TfrP1UEqVsarrOkkSAmlokYgCwFanzBwjK0ZLSkTAWBFhRQZNY4tq8O5NugMCISBgg+WRZo9FRMjcrMQIkAR8jKRUjDHyZvHbHOpNFUxGMwiAxMgcRalGogrM3Lg4mPnMcbi5bOOtX28pWRFRKdXcj4Qac3XzdjfvYJt2wCgAoDQ1eRQgIMLCglsnLzfDQpImzab54hkb11ax3wSwI+Pml9QoQk5GkpHbaKed39r+E799/JbvTj39/aV7nzV+lQ8qAqYMtQ8qMhiFyhikUSmHdz7/PPfxFZiexTmUkKt65PUGjxVR30tX5DszKgKS5IPUWl0mnlRMxvJuK8/celFEMPXjJ1uUVBmopYqiH8BijX6ElE5NtqbnHr98/vtxe+clv/Mb3eHIwc61cv/ScPD4I6ce+cHxY2ddMr3vQJ4nzzzxAOHYpdXiT9//11ODwWndzr11apR4W6Q61URWiwsqsa6qvKtyYoSUXRRNxhjHTpAhoGp3x6h+hsY/cv7PLO49h1dXX3H52Nn7k//7uZGPKwdO3bW3vu3Mh/BKdf65zzwRVVHZ1HhKmdZ0oIkEyhIjH3l9+wuvPoGrPwtx+8Jlfzi5KGGc+i0F6zNY2dZfvxeFpyH/+evOYq7Gu2P/+B+3fX2tvX8Kj83Rcy+nmw4tzZVJGgVQOxl2bGZ1q511J2jaOUdE3nsW8b6uqkppalSEAJtS5xijr+rm9D3jHhQEoqhDaEYogTZdeQCgWGtHRCQITWaRUopjRIGozrh3+IwouvkUbarzYVMRLZu1OZ3ZG29es4KIyHUA0+xemGFrbBOFtcLI0Ai1AKmZVRNpJIcYY9RGyJBzLsbIbLjymw5ABBHxMRAgM/SjkyBGwGpTKOdjJODpTiuy/uq//9vUWPecKy+tIRnVLoemNqMzNeiZS4MRhmXxwMMPpa08z/Ner5doQ4TOORTYjOuJEZv8qOblRhkyIVZEwABKKxFRpJufCRGNylIpBczaWgXgKaYmLTgmrY7V1i7GTi+Wuv/Ria9vS9Vb7x4/cfnSHXc/5r9PD79l384Ld+4YTBw8mnTmgUZcsCqkBuCcxlOWSHU0ViNlPjVMrKJLIiXmofvuQ+fStDPoVaDYpgkF1loPBoPDxeiDd9kP31T10se/TNceGo+HJuCmA72P3W9/7ZsaiJQ1AbhypUWNSkVmQh29l8iUZNDZxumEysaTsVnaOb790Jg+Od0unlu3jtTqKUgBdgs8g/gmrr9Qg0Cr1cq2bS9XBx/9xEd/81WvlyeeGNs4olhKHyUbp+2z6bAoTz1jjIG9+8pnX58c38h4oMYmqqwDvaKVj5fKEES9ui5zSy1mZxRpqcLQaIxViCSSmEb/q+IaYV2AQZ2KsAOOmsz6RjB28affpNO8M5tMa0/g17wvfXVKdF2LtXmn5/ekO3bvGMtPzp9ImJf6yzGsjOdjo7o0iUmSRBlT+0Airj8qwnCq3d6xfe/K99s7d1+5eP4DemVHObFWZBu2122ryfAb6u4HPoP9UQvbPzx2x/j2ydXDp8dfYMJ51xRSxfV5IYnBqLJUuBkCKBAFIMaYiRhjmkMGWJyrAAAVRWFiBoBGrqWQmrq4WRIzc5a1yv4QCZljlmWEWmcJK4xEmlSDceIAEsUISGQxehSdUWZUlVZpAqUwSIyR0CtkZM1oQUnkoNFz1KKam0Jd1w3lgrQSiXVdQ6vNzEQUglhrYwDYFFMxIjZ3KxF2IRilowh4T0TC6MBrLYgRAWIMUdjF0LxKa40CrnbaGOdcBCSiyjtmRuEGYKKUan4RQTTH6IOweO+dc2eUWUQkSjWzvv/q6GjwWM3gWoiaqp8jIyLQfyYck2xqahBRNsv0M9EI3Pw3wTkEIdkMaRdscJjUuEo2q4S4qVAFQVFb6YFNO4LQlB25SkK/jIGhZZd78zdccMNPLA8/NP7tv89/cMFwbxRL5DxxFaOUJRJxJB0V+NHJfa87+uS3LmydHrqiHdfm48xanQDwXzzzyo2el8kOGt5YrDgAj3qzmBc4RPFLJkdKIYxijiauiAjG0Z6W2jWR799H1529/dkXXfrKt73wpvXV15968uETBk69ak7vYQ4x7p/O07e/+nn3PPWOp25+5OH3/pt54L6Dx8Kr+l85/+hnN6qNSsYuDsoD1ASdmXHXrygfdwO2GGMVq6G10SbipOmIx7I2zajFjWjrQEILZTY1cygcu/bpP/733c99cPs5zz2488u/ff9bRgvP6z1yOSw9IFNn7tdDXtrLtg6mjBFzndRhh/MwNyjBmNQeO/FM27+wNf/7a2f/LW5b6c9Ke13BJGmkWFA37E+ovP1bKy8/d+zAORd2ZWF5fccOfU65tDpr47fual+rd+6fLft+YL3oyb1JjEaU5aQsCyAkotpVIuxc7agEAGttkxcEvOloCJG99z5yCKG5ipsKz7tojFGAjFG2LMI6Gmr0jlo1L282TQQY1Oahvrn53Tqz4lby95bxTUSaEG7V1LKbuxIGQNFai48MzLxJ/GhIqyQQHQCLOuNqAmxsF6AoyZImQElZY9OkLqu6rlsZ1QQxxi2LI0ThWNcmSB2hRFAomjCzcHpp7tHDD83a7lte/+NXXnfd2OSY26i/devt/aqXWCVBSbNKF0EAYfYcBaj5u9feYTFKshQFog+kgL3fKkSCUspq3YSpuOC12pSwMQOD+BAyokCAnpNIBjUY42KwpFNUXBajXCELSmWihHbCWgPHbYt6lg7JSy7fToNLRqcXtj11W7U4//ETfDC77dUzM7575Yk9O47bseUWMGr0IRMQNswabdNB+cASATQeOXIksSZUZTUcBa210c4VzNzpdAjk7ZcNAsNL6n+9LXt1boAFaoY3XdV6Mrnwi8cmOelgaxLTMduZUe0p1Zoy7WnVnlatSZV2/2u9wrEOK8shW/Gt+aPPfws8BTCFMCXQEt7Nqos0pCzLCl/qJDxx34MnO53rARa1SqJNOAizcnHDrYqh9PLnVnv25Lt2xRJ7vkqjDRMd6K+o3GBm635ljx7zTz2Oiws6Qmwpq8hXFVjdbXV7YSiVtAh8MVTKBLKZTsNwXRDJurhQLLz7Db1zd2kxE8JuhBNdWUsxkBqw4FrsAsgDcv322T1U+P27r8iVLTQzB4bIIRokrj0iMnJvYTmsb7S77aw9Oeq7WTO5/Wtveurp/cvPfmguPgb71s/fuLz/22riZPWWn/zvH//Wzfcf++ZLr7vhZ97wC6XNC4Eds1Nz8wt21+7hcJiQVAYUAoeoIiukLMus0ojK1VWa5s2qlQiYOdQcgxB71AoJkyTxjQM4slIqSW1Z+eGgkMDKKoFIBIiYGNscCRwisKDQZs6BSJqmVVUpgBD8+Ph4Xdc+hkbKiVEUI4koQhHxKMGHNE2D88zQaiVN1+i9r+u6+RRorQPEEEKapszspU6SBFia+07giATee0Il1KT2NscYxBCaQVgMIcYAhEHYOcc+aNokRzKzxv+EWFWuBhYjDACGVJN9RCFUIiUXLFzXdYWVnNFIa7LRNAfhZn7RVuuJsHXKatWU3s0960yXDLDJWGhsYRogCjfn+uYF0GziAZFAIzWWwUYU1miGYRMGQhQ39azNcI+2iCjNH9TcLp2MQFvgyOtiU//02pHXpM+5b+n4d7bPf+f4Q9e7S0eJY+8KhRvFMBVyAYPy7QQPwODv8994Od581fAz3VzEtB/p7fv84Mfu8OOOoxkuQemvnah2HTh4wVnpNQdngoYk4EkuF+cJIVeaCcLUuNo5NZvlNJGWZMc2wlkF8Buf+5z9//Glx3C2N1gxX/zY2Fs/yOsbHoC9f3qJ0p/7tY3tR8bXTvzix66f0KoXcYHJQJon4nU91KjTsWJ1w0xOB+c72tZeqIopO00+kInBQ113iiz219lICFWetoqpdjVY39gztX/Xrp9cvFV631m5p3d9knbasqrsYyHtdv9zBL17DlctazRGh3akVQWt7lQEJ+wghtnVq1uHP11P3DJ/5S9RlLoFGzNRXApV0p7bd/NvXTXsUXcyFgDzTxZv+di9vbEdk/kKBuwX1FOjv37S/K/uoB5FpYn6PbFZBU6QiXRzlDrnmMUHH5o5cwwhhDMMDRHxkUdVGXxERFSEW4BI4QgAsQGvb1GxlA9ExBJRq2Z0rIgaIUhzISihM7vf5jPqYzwD3ICtpQcAE+mtbTQ17wcbHI0iqnzTLYuIRxBCbArYBuu29YjMQhhisEorpZgZiTQhG6MAS1dDklRVdWab0zz6IAYNxgAoTqJG+tw//MPiySMf+9y/Xn/Fc9cHw/5q/9jjT69uLNmxdhCPm0ChAAAk0GiwNotvRIvW1QEAFJIxpiiKJEmQt2xdELnxJggASoyMsQGPNT8QFogGSRQ7jkgkkTWgc85HDkmqXEy1QVBQO5/6tLJMrSIZaKfK+RO7h2nZ6e5r33De467efezb20/Ndaul/sqDZ62m59l9C+y+J2WLd/bbghRBeQqsPRCCJIntFv1B4cs0NUV/mBrrYogugFYWSUR+7BK7bxwjS5t6e+NTb5h5aJ1mejQdUcNu2Hnmf0GxHoarYbQqxZpffLJ4+o5Yrte9hfP3zT7zyN3rC8dCuVFfuwZ/4GGbwBRACjC79WJBRJQgMcbV1VWtNdikM9P9yN13XXjR2ZlPq13bsLeQVREpTffu90keRalWmrqwlmqtU8q72JnQIx9Gw1CV/MRh8/hDYfWUjSQmKZxocEogQqiLUS4oSucqGamsqEYpu1jWip2Yjj25fPLV1x9/zfMOBKMD2LXxq+eeuSVLsRa77vNKMma8df2cZ4qMYUFjIjWNBD2KzhTGqKx1Htj5SACWRuCnds7YzowisEmirO6P+ufOvbD9FxckJ0899e8fOv7kQvVA/MnX/tyf/cvf9Fr8W7/8BxcdOH9ubRnyctt02z34JTOxpzh0MCtEEqdqba0GTVEYibywiy66sp1mzjlg0VrH6JlZGSPIUEdjksrV/X7fKD0zORVj3NjYqFGcC1lqjDEIYpVeX10LvpYQ2QdkoYYSywKRQQAUueCt0o1HphoNPQAQ6dCY/NETCERkwciEZMzm0dLE7jZztigcnAdCpbCoyhBCNI1eWovRTVZxCIEUNtSLuq5zm0XnIwgRUQwkoJVCRGNMCCHGIAhBuK5dqANwxRKU1jH45sLz3jOCRHYS2QkABIybdzdEllhSyZGrqhrFUWMTag7gerMT2MTl/2dUnPDmcUvYHKXN5RtCEMImwxEFmikd42YXcuaEbm6yuDlYbOybLLFZLgMTaUXNAay3YF2CELfSA+m/Rt0giohTkBV1rcE7SFzUENcJfzF/7rHqln+ZfPLqE2etxZjUXKm4EjQG7Bra8FliYnsKv/NVes6bf/bTX05/J//Am07/xsNrh1oZFckws+XGk8u//obWG3782lHfa8wWe1Iji9Hjxh6YkCiBgdLYdqHu+WptEY74jfGpKS/Q3U75Dx9ITj7Rs+NTnan+5z/df+27bDYp7DkUXA7V2Pknr7jkuk/+wtoF5/UfOwI5tYJY32JyNBrFG2+68Ou3fONNP7P/K39PMV8lGut0jBhfU0UKnIJEZTEpRIx1dli1JscHKNqHCof5enZCHvXcaWc2nVVLJZ4cxKxaz4yDxf9kQffTKo1pYTBV2jsZCwmgi7GwoPrju75502eVPFHte3NnjYeTqJ2ERGCjwwITD73oyUNzE5PT//rFh3ftPviyZ3U64xNrqHePw6pL1mW+nYzPLYR7FsevnTi67IzFrC/rHcoFiSg23qGqrkU4hFCjCyEwSDN6QcSGqNoMY5pxKLKwNMonIaLIYVOhwJGAkAVEkMhLMMY4v8leZpaGpwZb9JumZNzclYTA/2WCS7I5iD4zo2bAxuaLjRyaNtfJzeCtAYIgYqYTZg74n07iGDfrAPYBN1OtcTOgJUZA0YSamhX1ZjlORC2GOvoIFCSkSMeefqbf7z/7ec+ZnNix0RsYUmRh5sBsa3m+KFxLJU5i3IJ0NqQLRBJmEVYmcSEk1nrndWpEZHbbtuWVFYAmfwWJtI+img4k0a4oNRIjAQES2EQrlACiCJAFUBjJaA0sSEzAKakQ6kEWiNByqthzshq9VVxjmq3rwL5qDVU+4snJs15fHXrBN1bv3zl3eHLjeFY/qU9ANvy7645cHPW+ldb5cxOz6xadHmJV0cDmbrTUCxDEahusi0ECchSBoEiHEG7YWQOAJiDxI2xfXn1nUpYneLkT11ph5Uc/NvAbCxl5TTAqi/bk2MbGhiUNoiNCiNXK2oG1UyeCi76qJ54a68N67EWYEIgA2wFKAQ8yFHlUMm937Ntx4tQprVIeudF6sTxmvhzozTr0j823aSD5ZNlfz9cGCrHCKk9zb1SSpRllcUwpmRC36p95yp04oY4cLnrHrMYYJUbWSoMXJI0SFDIHI9GNqA4GLLCmCKRLSE2oRlO7F199PWUt+etbtDbqA3cdvGj14v9zxUIZ9DBmPe7c4Z538/AaTIxRWEWXCnsOBjUyGmRXlyRaA6bGeg6T7fZEqxUpjmc6S30NRT6mTh19Znlu+dWvfMX/vfvTvYufes2N7/z87V/cvmfHz73m5zut6ZPLS9P796qF3r+/+zem5x7LrjnbORMYeQhioXK1UZqIDFAoa6VUYkz0QSnFyE071aCUXPAJYl3XgNDIoObn5xv9UV3XWichsPcuSYwoTtPUGBM1OZQieh+aKxi8AomgBTAwWMOw6atoGO0RraLm6o0gJAIRkZGAQ2iygbX23qdp6mOItd/qAgEgljF2OjPlcLT54QLQpIio9r65wLqdzmAwjMygyBgTY4TI1hjabAI2FVhCKAzee1d5MKKFUVEsCvYhhlDVdZIkYshzbMwWAYKE2Jy1FZeReTQaDfWQGgYeISJo2QxFjlui0ObAayhaTT0hIlrrJqileSEZDZEbXVUUjghKNtvZZnDddAYAgIJRYgRpwsxQSBAiohLhJnH0/z2AG82ZiGwGKG215hBQkhj6WlMcqRS9FHhs53DHC9O9n5957IHx8uynXH885aKqKzWsMAQ1yWyzkMV28LhY6hfftB8ehOO9PVmWyMhLVMDpmKku3HP2kw/NgWDpbdSrLON9GSbOrSoJmBmoPUSUtLYxI2FndnPHqVLA9ONGBCI2I19IKM23PgmveI/tL6nYZlt/b063oZw9fduOC5419+jxyYFIuy50ZkuNVNY297pSvWJYw+4P/lmVZsfe82uZ6aelWBdTrhHaoDSH4XqozIEDdUz56DOTWKStCahHKQDFuUw6Q69iGGCKoBNRusl93GoTEgGXx1SVKm3hSlKOF8pwvtHpfOoXPqN99cY/e+O//HHBCqdXYbU1vbx3BOPL9rZXnPzms9/97Uf+6Y9fedXBmUvOMd+448iJ4XDP9PYsqfz6qdv+17Wv/6M7TrZ2fPuYuWK6m1uOI8BMl1QH4Wa4AQBlXUbmqnZVqJmZULkQm4xCH4NzjrYSihhEGpU+i2CovQAhaoUcRYQEFQOIcIikiQRGo6GIECqSzdVvQ6tFVMwceHMFyyHK1qnZnJEAACjNFQEAgTfjvxSCiGgg0ko0NX2zJQUsEtmbSjbhVoAoQhhjVEEhIwpobRDRNettIiAFBMwcFTkRo5W2ifdekQH2CWlQeuSqoCnrdMqyft6zn3vBvoO9uo7ALLxj154XvWDme7d+u+TCcIKIQhaA2QdQgADsw9jYROVdU+u30oyZEam3tq4IkETRJlAohBBZtNZ5aoAlTfMo7Fgix8QYkJCSdt6zRWJIgILzYJQkilhcYACbxw5I0MEHZuVaFk1BvcwF0paNDTBClNUYsqI10TevnL+gTs2p6aWHJlrfiffEuPrQxL33TyTfOjh2aLD9ksWpPcfT8X4+IZPfP/4IKFCJqkfB+dgdG9PC671VAWDm4XB4hpj2vvW3eSFLjACOMQjddOVLi3J8baN3/PTcefvPPnLqRJIaHZV3EtglqTn21OE073ryQRytk/6K5pdFmtNxKsAkQAQ4DtbY7lcmS6hW19eJKPR7VSs550euTCd23AK9V4zy7pNH5Nzz8dA54Fz95OOybSqahLgbNQCtOiJYW02GvugvJvfcqRfnO62kECVMQGwgYuSAxEoRmQpC1KNMk9PUBhsrrimmEdAEqrV6+ZVw4LJD3z869bHP6/X+8MSjT5yavKHi79y89qzPrr/g5LCVZ5SQ19qkSiOBSEwDBiLNIIA7pmfWVlcjxKCU0SZrT3mR1KsBDgHHEsqOH3uqv1Zeds2zn376oUNm393Pfezux7/Og9ZPvO5XU2mRH7R2nLV48+cWPvme6QN78t27Rw98N152SRmdpnGUSkQiQ3NPb1KRADiiiiyGLARHoMrhIEmSlCAYoEgQg4rakAmmDogBo0EbyrKdQdJO+8Oh0Uk1qjyLJiLRmkiBAWBAzjCnSAJRaxLghmaxNdJpHPuKmQ1qJKEmJwc9RyXCRBTZSfC1OGCkGD2SAYTAdXRg1NLaeqjqhHRNqJEQmAQQODKT0Q6QizoQaK3rYRVQVGKH3psglBgnHhFi5ROyAFRzKEKZIjnnmv2KSPTsp2enl1dWUsqZmSUyiwL0MaCxzFxwYJHAsSgKZkiSBACUUrUBqVjCllFBac+RAS2AUsqHYIzx3otImqaKqF8MG9SXAIdGJobkvTeIQAoIY4wo0AwMBQBRY+Bmgm2AlCGvAAMTB1RaKxNBvPdaa6UphIBGx2E11un2feVi0ACpsXWoQVRmLDM7jgB9EkiS5Mjw9Nv1c/VY8uudj71j6tnPXT142o8c+VFhcrNnNa7dODM+kAgAnZjaZAwAtpnefLZtJE7A+KHZMbsteLU0mNdiMQAHSey6Z98HUUppdI4p+pCYXlUk7W2zR/7uI2e/4931jrPjCLZf96aFu++ZQjHRsFJLX/7qjhe9HSkZar8Wxp4p8KW7qNq2bf5r/5iodu/QdHp0rpP4qlOX6zyx7wCN0n7SuvLOe9XVVzz8jl+eed2PHfyt999z5RV7rjp/VSbcrTcbvZ7LRPfD70+v/lEaqnjsB/Nf/2r7859ppXvY98kkfQkZBETyLGQrDBb+y4MMV4FzRGjp0lctULXFCPKZn/nHsjPztg/dNLO69us/ae56lXnyptaxAxfh3IQ8+2bedkwS/+43XBqK5UeP+n+9/8S3nlhLxw5N7nELx8qXPnfPfLnsQbtAp0ejHx7feem2x2owEANKKyrhYoSpEUWVr1nE126U1MwcKyEB7wMieucUEQlwZEFk5CisGsdOHY0xHBiDV0o1NR8DeO9L1KoWGYyi28zfbKo6G8kBo9XRB1TkOGqtlYAVapDpERAArNVNaehcZa0lgTTN66rGLS1CIDFIRhkvHAm01hTFAoGiZmXjnOvkLUZwwQshiYBIppNmC2N0AiKh9l4LM6PSECIAjEYjpVSUKFoAPHNtFKKP+3bu/qVf/pVsfOrwiWMinGfJwX37n3js8P33PTgzPc09X2uAyJm1VVUbS00mfABeHq3maZYmxntQCpVVgpAnbT8qUVEARAQSNlobY5yP/WGvlaYueDIWOACAcy5VJkQhMkSU5EmoakvkogOJCtEmttMZW1ycT5LEIwnqIJHYpzppt7P1tQ0EDSoXYCuCSVmL6pHvOt51otM6MvOY5C9/5HmnDrijE4On8sX7O4/dn+r0wt37l7vPXxvMPbROp9d9q+2M7Nwxu9wbeF+rVIcyAJpbT7feckk/SsNJAtuEeiAobfT0eR//zS8NC88IG6PBXbd8768++uGHH74nQWTRSCpEMDbrD4cxxryVV64OfwJ6zMbnBhgQMMM4gIPWJ9ozT2xf7C7LSj9J9NDqK15749KRU8u337zG5tPTu/77+Qfmr7oWL7pYry6TnfBp1h5thHohmCRppeVa7Z96ys6fToqREk5Dwb11z7khUSCRtSQOFQEbRM4pGwFFHdNRrLnn0pbxyTAZxVFhDpy9c/bgxDcfm/v4p2cz1r1+OTYxrhylFObd2PxoPBkLUotHiXVtMDYrBEu29JXN2hTrwXo/NalSqqxrRZCQ9t6PlFeDuF4W5WChHLqde84LdXXkh98+Fy67m75avAjePf3+qk2YdYYb60c/8Lr2E0/s7O50J09vPHn4ond9YH5q59GlORkvg2elFCBH5s3E7ghKIUuwyjKHGP3k5PgLb3r+LbfcUleulbYdlMZ2qlCE4Ehnvlxv5V1xYazbtalVSeJHA9JIgApEaWZxxpDWyCxKKYwMXpSxickYJATmGHHLF5sYABDmTTsNMwOpZvAFACE0tbYGNiLCURRwJAGtAI3W1OyCAkQfm0h6jm4zqRAhuF6vAtZEVDIGJqJYV6R14Tn1AQxaq4dl6bgGEReD7WYDVwOjRUEALULAq3OLVpvSu1aSEmOQMKwr0kYJi2Mvnpl94E1DV4xaa+eczSwAKMC6GkLjNSRk5qBVCMHatN8fNlO7fr+vtRatt9yGcEZ5JSI1CyOhokYf15QFqMhHUYHJ6CisI7BCJjRIzEEjIW5JuqjJagchVJ5Ho1FEAKMoyoiHyhpmiCqISBErpZRiKMsyoNxZPjA91s4vmLnre/dsOzVSew/0+qcG5RUhZRnitjQCyVKZ/9MP+6fc8lXjcNH0+mNPnZpIWzG4/jDoFg3L42v1yICyaDjIECBg1Jq01uxDSXFCd9eGzs901W3fmVm4R11xoHy8EjJTz3v+038G0YAvQGdJNv/g2ve/MHXTf++snP5umYyTv7Bt5t7wC6v3fPv8mV3pWRcUqxkPjmnVBajjcKPcGL3gE387vOWuU6+cMaFz1SOP3/2pT5PxZ3/pq4/dfNveX/jpyPT9d73zVT/+U09+8dGVe/5latv506btolFJLSrPjKrKwSD6Nqgxj7XRSfh/G2DXSTqWqrUQEUCHyE7JV9/6D6f3XfnTf/3q7tpjUk0g1m/8Yve3nv493nt5G5x77Pz6nR+Ar113YqOzdpf+k387Nn7O/vZEJzOr7VHn0LYwFeubbzl92baxmYmV+59sfWt9/MD2nSE/ORF2oZR+5MAozQLeKwEUqb1j9FqZcjhK01Rrba0tyzIIE0sjn7dZ6qtSaxVjFEEXHBEBBwLWpKIAENqsE9aHLtRBGp8hGlLiPArXpDyKCaiRWEAi++gii28yPZk3TfCYxhgDR00kghGgdHUzakJEohhQfGCjPROSVqX3FAWUCcJC2Ol0RGRUNRT3iIqyJGUfC1cYY1ikjsEqzcS8Je9oFsxak1LKcyRRW0UDGGO8qw8eOkuYH7v/ER/qycmJdtY+dvz477zvvb/6a//zkksvPn7idCuxzNzOW7V3IURgyWxi0HLtQSDTmrRyMWitWyZbgSpXNhHQiS2CC0AYRUVpJUkIAZWBUAPLVHcsa2en5+dSm0QBFnFVJZEbN3+zdI9RNjY2muva1xXpVCmDAnVdgoQYIyIohZEZFXAFRpNiCVyOUlgWloG0T9c/snCWHsO5meH9rSOPuMNL9r4nxtRTu7r5b+6ZfO2Pl//xZHbLqfmnT7WU+BSiSaBjktJ/+Qk5eR3uGxezuVkAAGQ0gDR39i8efWyhlbZAJE3br3rNqy+98vIP/MF7vvalzxsDIQZNCccAAEmS1HXdbrd9EfDdGC8E9ULhfSxvD/qLduMve3X3mFJKQhiG8vyXvnj98PETdz+STowrZf7j9DNv+OmfHLv0Wb3+aMRmvN0qhqv10sm8k6pOty43bH+teuBRvfgwEvnCKm1rJiUFKyscrY6hjqQUJXUVooBKNIVKKM2dTXSvnu644IsV39129TWy7fz1r30jW5wbTY5pwNhiGvVWEgrDMvRHq9PY9RAMUG4NIEel0jQdrPayLI1lHRJ0dZlpq0WLpkhQRR84oAJkXZ86NQCvs9yNjlxyzuWHnv+qO773tfGTO3e8bdfCV/w4jx2/687wT38wPjxO286LS4v9PN/3y7/Ref3PLt3/RDTWK7aRlFICIBBRaQFwHDVpo1QzINVo6ro+efJkZG51WmVZpmmOWmId81bOPuTQ8d6TNqPajYoixtjqtCvnnaKooZFBNaKhEEIDjgAEAQiAIhC29MbNcasUMDMRACki4rhpZ3RupLUFQhQTAgcXtNYqSQW8C4FEiCVjzSKOsCQmDwyMJKHJAQJAgRADKkQfwUVqbLu1U0xKq9GwTFLT3+jliQXY9D/URZGyJmWabKagQBkdgKrIFlVVVdiUFEoHZhAiBkQAkbIsWtzSWofg6ro0Jqkq17TCzrlmj2ttSkj9UdFut1vdzqAYhRCaGXtd+yZ7XIECaMKCWSNorUNgz15pi4gcI3JkBIqiEhU8i3fMDKiCSABxLIhIoJA9CjTrbUEmrROdxCi+dqxEHGgyhlRdOAUSpNbW1DFqQQgeIgeUOvrlx05Nh2r438797u2LP76wc4lMKWvK7A1RpR0ZejPbon7g21emYRzaCQWTV1LnzArrklXhjPahDlEUEJHnKBJ9FOVIgiiFa9BXQRlftT76Z9t/+71ulOmwsT7qpzsOxWdfF37wQOya1MdxnFj7978fu/71j5rseD95cXtYbQz2X/OyR6956eD7t6ytHFOqaNG4iV4mxstPf+L0o4+tdMfat9+aZPll3/3i6WL1of/19pd95J+W7h/qex+cN+ng4dsvff0bb33f3+2Q+Qte+wvzh5987Pd/afu+7XhqaGwxGoqOoLNpn0JkUnWota+kPnMAt1O0kDqwqCollCj/3Rd/4PGLX/mqT75t+sgDicqqtNwn9KW9P/aNPVdtw2XrLHz3JfNX34q/+eH9P7zpTz8/nDq0B1sqbDBtH1up+m++fqLbmVrw23u3PHXTlM124neeWvju2OSPHaiZCw+aQDwCRZbA3Fi+ERRjjN7a1LN4V1fedcbHnHPReSKMMVZVldrEBR9ZtFYNvoqBAMEDBxZgieLEqAikTVrXNQBVIba6GbBIkAZ9JZEFxDQKf+FGPIgBlVLGmCzLXKi99wCb4LnAUWkFWjWGBCWsrT7D17NKA4oPQSnlmXuDPiIaEYjc7rSrqnKlU0oFH5z3yhoQcMEZY2TLQNjskqzSymipaw0YgdJsM0aMfUDE2nuVJ1alg7K84/Y7x8fGfvc97/nnT39qdqx94/Nu/OHd9wQfRIRI57nmEBUgGShj7HS73vtRWaZpCgCD0XAszYMwE/gYMIAiEGA0ysdglCJmRAwkB8452N/opWoLs6OVRDCkRERIovew5StsCog8zbyPBoEVCKP3fgvLwyyBGGICEhmdAFGLVZ8JBKoUHEk1iJPr6cvg4hfm5z41tf4wzx1ZO7UxduTEjnb+7suy//GsqW+dWL7lUfzOUbU8ZBCenJYkf/VXyu+9HjPthSwjNXnRK/vfvL7/Z7YZ7WvfCFaOnVyZ3Lbvnb/0mz/8/h3z88db3QlA7VwNDS8IsdfrGWO88/phIw+JJnQTRfhpN/bXE1Pj00efecaadM855+zvtr/7lQfSzqQaYtaKA/R/9OXP/eX1128s9cyJ47iymFiEsrbOry+dzhYWh3NPJ8fvjb1CzWznpISIRCoDqKmxovugU0FOfF95y4lS3jNiEdwuTC+bmWG/IuXYiTe86uk95+Rrc+7w/em4dVgTs4xCvPisvQBQU75t9/gAvEgevO9Vo7V+r1haG8wvF77ql8PBYKMYjTSp4HysnRXUEdB7I6K4UCO9PhwN6zCj8JJrzn9m7uQnPvfB57/uLRctvfDxHd9FtvP/8Q/40f+x27eT9FC59ER1xTUX/fm/TbziF4ZrIZ/dlhqbcbvhRyjEzVTgLYJE8/NtdpOVqx9+5JEkSUVAJSmQ1gA6SLUxAuHIJIrAqMhApNMkiS5abVBR7Zy1KQrFCMJKqxRBbyFnSUSYAVEBqUbBiBI9cwTQWjcDWKWRFIhIq51qg8HVITqlUClkDmU1UEwKkZkDSC2RQbSgZVQEIBF40+DYfH+LCn1MlWGQofhSQzI+hqkto09SE4JLUxtjBETXiK2IfGJK4goZtdJI4EIupsWGfGQQSEwNrJAMU0O4bNpWDZQkSQjBWqsTy8jNIi1Gb4wC1eyDMTG23W7X3i0sLaIibQ0zT01NkdFKY57aNLPWWmNMg0oQYNQKCJGEFBABERk6wwwhYxQqtanzItJaazKICIpEN5HPaIyxmuqqQsSslVprE2NJAFEZUkQaYNPjWHvXrKURMVi7a3Xiui9SOVg4Nl3dG5ZybonlJBQ6mrYdL9fwTc9Kb9zr1xwCwNzJ3nieMqaDPMe2RSdTkzC7Y2+ijdaajCallNFEpLXNsixotqT43LP2fv6fthdrrjU9clRGDwFR8rGrX1Bx2cEO+BGEovfUw6cP33nvaGZChbNbjoGI8x0//z9PAE13u4ndWZvoiWxBU+2p1uMPnf3923e1x0N3rP9g9dSrXnT5a9+04+0/fuqeL6Em99Qj1Zc/m59z6ILzW+bKF+LBc0898b2J9/3BNcdOH/+Fd61cfpX8yq8tX3gxgfTWl8NwkYh8mk9uP3jmKRHKYoV97WquanX79b9854ve9eLP/dYVP/xSklZ15F1J+sjuF/5p9zXTw/li0Z8qRwMTt338PcL0zvQ3njhlE+NotW+sW19aWx3aD3197rf+6nFVbSSz8q3DcuNZ8A/vGB834SsnJyrDBUaVGwJgBG+pBBYAAuUDa209RwZBRc31q5E8QIybqr12u6211loLokGCEJWAAY0BDFKqDYQYJGiC2YmpHHUmMJ61JLK2how2iTXG2EQnWmWJTYy2Sjc54tbq1GgSjq6WyFprTWgUWW2MMVrrJEmaz6e1afMZU0ppUhIZeTOBraG3aq2ZOYQAkSFyjLEsS1KwJfluynEnIZIACRhFpFAbRQiKkBAJBWMIdRXqqhmJt9ttC6CiZDYxxmz0env273/jW978z5//7D998uPjY52J8TFDikMMzmMjOvOxZa0vqlg5gxRrp6OkqA2pGKO2NoSgFWqExqsiiK50VukYPAPf9+D9Tzz5eJNh06yqJPgYfQiOGUSwoaPAJsg7GEUQA9d19A4RhaEZKsToUZhD9N557yVEUGTJQNyEB4/8cKSGq1n/uF5bG7gdz7Rf9cglv/bMy1//2GXPeXTirKMuGQ2Gr9krf/6C8ltvrP70enXjjiKtJpfL40eT9z/8PABY5fENmVwYu/rpGz67ct1fMXMx6hdlb1CubQyW6hiPHV/cu/e8a5793EaN0/gVm09UnuetVqvxfzbKFRFR/5+BA+Be68tR1W516+DOveLSu+5+iDFRFB3WsfDd7bu/f89Df/vZjx245MoKOTiX7jpQHzxUri23Hron3vFN9a2vmtkpuvE5MKrzWgBYRx+xwwi+lZeYZzHJmCqgmKSKUq0s+TrFeE63DetL/tAhf+0LfGdsPzD/2YdbajU1SV453crS887ddlbWBgD2sN7bQMOaUzbZuGkn7aRZ7KcSWq2sjqGoSkUaKBKqGGNmdYziXcXenq4enyvWn2cGV/pC//O96PjqK159bG647b4DwyvXnnj8T/bd8qWJqUvWR8fqsXzvOz/WueElMgjrx4+urD5z1eUvf9LuiHFO66SROhttGsSaUUoRxdAMpDcVja1W2/sYA5BWw/5qtz3Z7rQIcH1jhFqATaKwFhCFHgFADKEWnGp1q6rarJUgKIXcSHYBUDaTbUNDjxJWiIDIrLQmY7bsjABEigFcUTcCEBCKMQJylmfWtubXeynphAFBRReCUayQARJQzbVqjQ0hYIP10NQ2KZfusgsu6lfFo4efGJ9I6tLZJsBeIwAwgjKb6CmjFJfeKsVIiOiCV0aX7HWatk3WL0cueE0qtUldlI2GuYlMShJrTFIHf0ZvrAiRBZRqev0tN1AEhYiYZVnT/lZQLSws5HlukBTpzVmxUgAcomcOCtCgUk0gkoDW0OzGYmSNJIo0gmJCRUxILCZCM9xrigOrjCaFLFPbJnvDHqPY5q6XGDI21C7GqI31MVijDETFYEghaK/Zkrnqymf/oL5t9Vz7xPG5K5cOjkmbVcbQN2EEOJKUlk8W2/IcAF58efzq1wfpJHLty1UHXRk5iD4gNjEYiiEqpQmMRo2IGUzLNn3l5/6lc9e35kHTkQfzF7wlVOuStcu1xT0vf8eTf/uRid7puazbftFLz3/dT87nl55exxdPBGFhZTY2Bjuufn7/V98/9+H3TE3OssuARJv1mjM0Uz4MI8Dsar348z+yl5WP2e2vfPm2i58/84a3PfCnH9j1jj9arXujOaX695z+9IfUwvz1n//Gye+P9px/6e6f/ensov3fWzwpTz6w/T0fwO37+n/w3nxwYhn/0y2DASaAK+87kNx91Y987TV/8Oxv//Xlt37I27ZsuG6rux6SX5FnveWNs6/ZnT+8VBz7wYM3+7Fjc6n+s1+p3/u76Y/eLN+/0egOJZKR7feKXpZ1p+zCfO+yPdPIQ5tAKe1LdpsP37z66kPji27VA2iRSAoRlGgBCAGCQpEYYlCgQBGCOOeQJc1zV1UcgzGm3+9Ls79gVg3fpWnERLTWxhhmIGFEtbS8aIhQGSKCCDFGpTRDFBFDOkIEwAhCiIYIGAgACZvkbolCiFGESAFKBAjBKTIooJBkCwtzRslPRJtGJGbaEjckSTIY9kXEJCkRiIhWCiLUZU1EVukIsVFIMPNmqiBHBcghaKUYhBQam9R1HTkWZUCBNE0RsXY+SbLV1dUdO3a9/nVv/PfPfObo0eMvefGPgkIiCMHBJt6AfIyKSGtSmzctQESPMdEqRWQAIaklEoqKERjA6H5VKE3iA0UypArnSICZJcRmBBiEwXlm1ggA2EjWRWKv6hFQ5MjITevJW5nrzYTfBh2MeKhdUTigAZYCoCsiDIhGRZP6CBRBSz+4nPPs/vLRz33l7CvP33/Z+Op12D9Ii7ryb9iP/+0KPjro3/6o//wT7FYA4PjV/5hdfH3KeajqqgpeuKxKBRJrR4Ct8Y4gepZnjh5VSiNLMRxqo0Q209v8lsR1c8aJmDyRxptD+csj+BS5ym0/+9CoqMoTTyftjiC1MoheYll2utnf/eNnn3fjy7vbxlhn9cSYHOnpux8xc0fsyvKgpyZ//92Ld95Gch9oUKrtlLJqaAplOrYC34cRKtLSTVkgepXqAGpSYaJqqQbW2SMHZrMa93zi48OuWnL1WBSf5iQIF1502cLGUQDIx2Z7I21w2yjUBj37YW+43KvX2NYCRW4j4oi0ChyB0EMs6zqCeF+H4Hq9o0sr7gqtnjUozerqjsXDk7rctvvcGGX7yvNUSQtTXx5Pz+utHtbXvfac9982fsWL1o4+kObwlc9+6r2//fof3Pu5VAiBG/0zbUX6WG0U6ib3qDHsiohEjp5jEK0t1PWuHbtIY+Fcryx0ojObKeGirpRVEPmM67eqqm3bZ42VvJ20WqnSwhIiOxYvIMyBABufAwBoIqXQKN0UyHKG09S8NxRr2oIJiBZGEUQw0bOrooqiAYGQNXmNoJUmlYNCFCJAFCI68wcx80jLWqzmVpYQMVWmqiqValTA2OyoaGpqqqpcQ35xzpHRESTP2syQZS0R6Xa7En2/LlObdHSiBCpXshJEMZu3FXExlGXZDMeY+UyTarUSYKVJaTLGAIlSaO0m0MN7n6fZ5OS4UmiVJhBk0QiNo3qrjdCJscYoIrCKjDHaKG2U1Sax1mqTGpsanVpjlCZArUkr1Jq0IaVRa51aowjrqkizRBtDRImxpLAsR4hijCEFaWKyxGyf2WYUKU1pmqjUANaxbL3m72fxvtUj58Unuksz4+TBe+1cGgvr55f4135s32UHtwFAFjaUUomyWmvTHs9bmOuq5wetLFcKAVk1OVSaAJiI7PbW1Z++dfvnPumTGQfcXzpZLK7VMc3VWCxrnJ7sv/TV1Y++dv/n7tj9vk+V259z53DbpAoH7EBnkKSUKFl+cvX8n/v10Rvfsba+2MqSSDzknJFAfJK0EgkmSbvtHTEbx6cemP3yN+Qvf/+Hl810Yu+C975rkFzR2dvdd8m+ctS78Q//9603f4/i6anrnr/yTDjy2ZsHn/rns//s49VzXjHz8jf3zrmoHKndf/uFM8/44Q8eZmi94+cevO5Hb37bP5x7/NYbP/NOfe75xoIdmyjr8unzn/PuP3zDa9vusS98Uf7x/Rd94lfe+tm3X7/DtI68yH7nJdWbP1RkS1GCBIOkk0zlFAHd7319OGq3Lz6/vViOPvEX909MjI0c33oq6+jEObd5HviYgW6C/VihC56IBGKoXWMcIqK6LBufjtY2/pfDjxEoMaLBQxAEH0Pl6shslLLWotFOQU1SctA2aWV5Zk1zqQoCg3jeFOErAdyCfgBi0/ACiyIE4RACsiCLAtSEwFGCby7Mxl/UMOkic+M2Bmjcs8wSAcCkiQ/OB1fXVVNoksBEd0w3Aakojbew+WdVVU1gTO1d82h+CxE5SiQog+uXI8d+NBpoMuvLa7nN3/gTby2K4g//6A/u+cHdidGNF8vHEAm6E+N1DJV3jByJwahIoJHSNC2rihGCd+wcgQQOLoa6KEPtXFHXPozKqqxdURRFXdV17Z2LIdR1Xdd1WZZVVTnnyrIMIVRVVdd1VdZVXTvvg3d1XUcXvY/MHGrnq9qVblQOfe28CEeMEbQXEKmkdtFAVDqIBhSt0aQARDYxeTfJuofvuL93y2M7/3H5go/ycw+fe9nqrunYM4ekevPB+NkX1M9SANDZf26xCksrq6PRoB71MQYSElFJ1jp4cI8hlxh+9NEHXc2KEgCwiWlaXmttURQxBgBploxKKUMmMMgfK7hQ4st95Hj2hRceeeJw7WXIOOhvDNaXR2XPra+G1A2L6k//9I92bz9H2ym3tBru+QE8eHcxOBHTItnfOf57v0pf/6KembHOxnpF1+tFadALmGgUZ76aTlqJ7RR1oVTd7/djUNtsQv1epavw8A9ad9wy9b3vuv7CotVTVSsopQZeR88PPPzEC/fuAICZ2V0X9pLO7FQYpf0hc5Igc8cpdmGUJkfm1jQgYrBKA1EMHoBjWUpd+dFw7tjK9ce+999e/KOrneeUD965MH7g+A2v2WCYcqX/2P/ddzmeeJEe/d+lsXe9d8eNP98f9jeWF7ZPH1xbHdz/8Ddb3ck//5Pfet5Nr73s4itHPFJEmyA6pUQkRBERQAbkECMKWGtDCFmauToAcFl6paD2orWN4OsIY5OdlX7fBZ8ZHVy0JgkxBoSjJ09G5uFw6FwtDaRZWYwgIppUs97ghg/VmAYRiEhCjMAN/QMRRZAZSFOsa0FmYWEB0FUZYhSdmdgEHoikqIhBiAJw4AAiJLgVZL2ZgoBexjqdhYWF1fnFVmLI6rquEq19k3vIsr6y3mq1iKg/7HVbOZAqi3pYjDqdTu1GmkxwNQo0OuQYmRCNTZxzJKwMaSBwoIhQQICzLGulWVEUm/cXQU1IAoKqmZeJNN5fVgolIqKwDwSiyHCTp4YokRGAkBCQUJhAEGgLDbzZUjBYbVCrIOzLSkIUYKsUi9AmZRCYtCIVBUQpZIgCQqSFUEBbk7by6LwwhLomRKM1x5AYi4oiQjpUMReKo0OHzt4D64f3jJ7oxYNPhxnRPUp4LT20C//yodVt02ZppceTtLZRw3inpr4wR/aAaM3stokReBDnfQyIQiDCAgg2s9vveMR85dOnxgMUve37z1tYrkKaRzMcQtVS4/5E/6q3f5ATCC5Zf+rUInVOeHVTd2BdiRaEx6ow7LY6vSPr57z3Lw5n3Y1P/FFubJbu0lotyvEuTpqYr7u1tjIhmJh0lTKJqIMH95Ygd77/L8dmd3au2nPXn//tWRuD9Xuf6P3z30xd9Jdf/j+/+6rf/OBn3/uRm770nfGX3XD8r7862jUdYxlvuKLzrCvOdMDbJ7onXvpq9bPv/Y/b07O30Ut2+6de/bYLXv+WU2/4kbHOTBrryy+97ORD37jlA+9LYZ4StUITh37yLe993dQff+bh7/zjr6gL7y/e+b/pD/6iKoPGoaKsNGnK6exs532fOhZ7/tO/eRX1jp9cXJq0/adPTD9nWlN0HoQ3XXCMIKgFOGbWxuglcJIkaZr2er1Wq6VQNSOosq4aaS4iap1EqCHGhkzOEoWRRFBkuj2+0Vvjqu52xpIkWVpYpjRl9JtgCdociTUW9MZTTKhAETO7GKAGZiYEAIwxxq2AXhExxrjabwaaiTQ7GmFuhIohBGCOMRpjrLUxBkYYFYVFbYz1zlVVxcABuChH3nsUwBh9DIioSCNAjFGiIJEPnohAwDmnUDnnQCiN1kcHGMGoOgQXAxFtlEMI8tofe93Xv/q122/77rnnni2CIqCV5dKvlytGMM/zoq60NRwdIYooT1hKUIAQREKsIEYQSgzXUQFWzkctEYE4pqQZSAKHGIDEcURFAAQsRXCNSyKEzZxWCYLIGphZSGGEKKLYewCSCGCZCy9ArHUwCiIhQpbqDraiqsDXRlllEgSjmTW7tKv7w4UrXvUCe+mB6pEjy6eOtD5ycnLHvtmzp0ZXTi/uoWPThzuHjsMJOL4a2R7rpLPVcJBopcVokSC8+8DMZz/zqUvOPfv8y646fHi4ffv2YW/n3PxxVLrhf1VVqbVmFmttMRppYwCQAMFg+sPU3yb+1+v81qm8nS0++dRke2KnlYvPuWDfZWedfehqYTUWlx+bO/m7f/eZv7395rf+2M+Up0/bC87zR56hWoYxtDhMz+6NBUY/CnknqHb0/Twbh4210cp6bhMa6y67kfVFptRGcJ2sU4S4zGGfGUtzRgpTKUC1+mjWib3VYZ6aCsJspl1wR546ee+pObgGMLHnHDqHtP3+0Tm0MmPHK+GRCXmapsGPtXJvKLrILGVVI0KSGiGsgj9y/NgF5+2b3f6Cxx/8bo4hZAcevvDcUI/GimH/P35/7+rGvm8k3/vDQv/K78J5rxgdeeI/vvE3t37ti3/+l1879fRDlODH//7Jn3vdxScXn37+C29aOrrU7XaV0SINTRgBRGvtpWZiagJ/OCiFZTnqdMYKH3vFOgSVJUYYGTOmenXkE6VBcR0DWT3wtQI0CKk2iKauGoajFcYorq6jgKDGEIJjQaWVUogiwA0GSyllrNKaiqKIUUDQ6KyOo4ieLErkGOKWD15BECBhq0RYR/GuIqWCABhq9iWNcsRzREVCZIJEYNvKFDNmCSsSEvasQJNusnw4Ou8gGkVGEQMgcGLSwWBgrWbkoiqVUhoUAKDVGDjTFlwATaAIAwIioChSzFzXdXS+KSOIAAHSJHPOCYhzjrRqOATBh2YCAZEjitY6wqZUUmPD8yAUCiEAASI02Uabs3wJhATAELk5qAGEFJGgUlR6T4DEggJGKWEJyEJoBEMIZJrRA3gX8k7bC4yqMrGWmTut9sLCwu6du4bFaG3Yt2kSsbZVVlr9mqXL/u6RWx86kMNdd6Tq7GDyhVheM6vX+6MD3R3nXb9z+Ei2vl500pEfSk0WkCeyzMfFXqHIRWgAnUAIooka/e3UFz9XmArr8UrWcC3bOP4FftVd3fOfRWsrS22YLnNfu7BSsU3a4/nXF/Ip7S5qcSVj5KCjdKWzChl0un68PO+3P3Dq/HNXf+9XD2ycoMk942anduRiSNPUOavFKZFK1XHk4cC129/xa23Voge/lZz7sh32X/ViefKDv3YV2Cdf9IJLdszc9yvvOjfrLh155J4L37vvp35s94Uv/VoRr/zQhx/69t1nDuDpwj7vI598z7+td3z/Zy6V8ujFO9/z7KO3/NM0Qqqwvevcx//1Dx7uj6YVZedcpS/5kekbbtpx6fUnnrrnJec5fVy+9Xe/63/zF0c/+s/y0HXm4GESzk6fV83tzTxt63ZOWfeuj9x78W71hovGLXdOOuxHr0FVtScUNHrEXgBAQUqGQ+zmrfX1dZWkTfaXC55okw69ufhohr0xpklSFAU1iNkQUchq471f6K1FXyfWlkVRDEttjSepvE+ZowIB8DEIg2giwIaJ7iBuls5N7Hdko7UPHgBo6/ivK9fklVV1mSRJI5n2ziGiAIQQRJA55HkeQ4g++Ohj4wYOElzT/npWIMi9oo+IOjYiEialQW9WBMzcfE9upE9Ede2b36iYk1T5EJADEbm6ttpEXztQbnXteTfekGb23/7t36699trp6VkEVYWaAFuJpcT4ciSiSCshqoNQ5VMwlXee2ccIPmY2qfqjCBijaK2jj0opIl1xIEbkSAIxBEEAEebonLNaIZJzQSkEUN7XJkmlYXUjAECMIhKMUgSKjPYQvPIRYYxQCGtSEIhMolREnShjE6UdMAdv0hQTY3R773nnFFDc/pt/0tad6CqXxViWu2f3GO6OXTjzIy9+1jn5ZNRt0iQbJDOkdQKKWuOTUDgB319b+vVf/ulffuevXHfDNVMT7cqFnTv2LCyeavJgYGsN3Czpd+7aFUJYXV3VSjuREIL6Yx2/4mffNL10eBEDn78v/ZN3f3C+c1BTd25YpHG9+OFXrrj4qm0Tt/3bVz/37Cuv3HvOlcuWcWYcnjg+mWVltMVgQ0i6ozhIXF4rkDzWp2trW9kMVT4Go4xjXgUal9aEXukZi8crLpTfk3W7zni14+jGgyujMA6qilFbM2LRKDq1MaENAHh6zlfJsJOo8yYnFwdDV4LgCIH6rsQkkTRT7GLpBIOPYXIs8zHt1/XxZ45P7Nwx0PnN+UW7r9p90WP3Lu+9fDRzfvfE43jz+7bTmM+Tc76/49vJo8efl71k0X381n/97tf+pZ2Y//2Bn1JGPftZNxyc6bzt5953avGxqYmZQ7LW63sXQGtEZHSihEKsRKFSChUysyAqRO+r/Xt2Hz91vBpVZCgwK6UUCtexZaxzTqij2XOsSJkYY05AqCdaLSQpXEA1TBBHG8NzLtlz18b4xtJwfGoHYVAQdSCtbCDvxRN6IIpOVFRRFEPQhrx3EqIO6CQCkUWKzFGiQQqKLKH4ElE5AVBKaYWuTJyttUKiUDkRVCyIwJl2Em1AUjpgNAFQWAOikGBwEWyacJAQvUjMWnmvKsShUgpJEqtJgKTBFSCiAmBgAUWjUEuiAYCbW5wARtKkEZsyNhJRAw70wqEsGERrhWqTQJumqQKsyloIjdXIEH2wVpXRp9Y0/HTHgRGYIyFCEwDLLEBRBIyqOWjAOtRKFAmQ0t55ReSDFxQGjtEbk7gYRJRGUkwefGZtXdeSJiECiIzW+kmShFEFhohoZXkRABaWFgHAgiJPwNqZ0pR+J+y89MEd39u+9NDP7Es/utZWnaeXa8SxdCy89SXT173joVee1xpvuXoIiCZDV4UC0TkxifN1oqGoybSGJmYYsOSxqbx+5DTMzVW5iuVGO9oRj1KEk598/9V/+sVVkomR8iYxoYa2gYBHBvp0SF8y0Y8xJAYYOSAyowEUK7bQw2c2znrrm/ovfOHT7/kl+/UvnMc5T45JonWVYAVeFSFRWuWtqenBd247fPvz1GR37JxLT3/oLyb7q7OXXRaeGPPV/EQ+40aj6bu+GZhXv/1Pl9rWyd+6/9HHHt2hBYvRwp99+MwBbF73uo/fdamv3B9dX335/3z01S+8/thtR8y7fymMz5yM+cL80/HAvvNe+IrWdddnZ5+Fti2sN049efrJR2nn5I/MHk6PTH7lG69zr/8rfO6XxOeMUFA0x8/O//mX1zZMqxqt6rGvPzZagXpDFbpP6zwxRvMBnIQ0V2IkEoCJogyKSK8YUpKUISTGchCtVBUdM7P3WhtgBIQonlFiic47hVoii6CABPaAMSHr6jpJ7QjqimsbSYvJsnxjuKpdE9UpGBmC8golshdItTIg4KNRqigrZU3hK/ANs73KskyT9t73+0MRtJaKUdmMr6Nn2UrnZPZOYixEIYDW7AP7AEqzAAEH4iBCThgJgDVgFaTdzp1zwXvhqLU2SpdFwdJwrMRYO6wrIki08XXNBisxyAQhMngCqMuKAazBzvj44sLC5Vdcnbc6p0+f1kryVtYZ29YIrZeXl7XVSBBCIJ0iOgAgIOUisiBAVDgKhYqEwgLsIzOCMGtQxCjkkTEKsjKMjCwSQavUe9eyZsOPUjAEiMbGUZXkrYggJKg2AZ1CVDsPHEjIkMpIoSFrjMWEIlqlE21AkUpINTwBrZDIc5ztdKYuOHjizjv3nbV9fjXaoCeZVidaq1xRtX7iO08evuOHP/J8WXsW7dm748n7H9+ddnzWtu3iL9//Gzf9t5vOO/9FqxtLwLA2/8yddz744OOnzznrssOHv3PxJc964IHv55aCGBImQ3v37jv+1LGiKH2MEqJogLLWbRO/kcE9Ze8Xe+XzSwH1zJG5Hz5w18a01nVvat8BaKXV7oszw9cenPnifY9+7atfef30Tl0OKmlNq2yovQ4RRUSRbyVcYdTEYYSoLHSgHHjPSQBUMUKbrzzfHJ1zSeI5dnQsefDQMkcKneXT3LUtMqVyJNFAOjYiAmTQOJa2AeCJ4/OPHjvBre7E/rFrbrgCjNdphphkSSvPEvbeF5VVseitTuetxJmpjKXsXXT+3qnp7sIzxw/Soow69z3nBeu7zoEn70u+/OeTnIEQ77986sUfHh/ufHzya//fn7/3meM/uOK6V4nmtdEzp+ZXXvSin3ri8JFnP/uFP/+zv8eCV1xxdZLl3MSeMDOCQFRqE4WqtmL2lFJpmj744IO9tfXEWGAG5hACh2CUZYZWZpFcBTUrRq6s+Ha3Pbdy6u/++R8TYzNQM3k3tUlrvHvs9On+YKBaSqFuURtFq1QH4wE4xSRhBMcYWW0mgWNZV6WvQ+mtNomxemvZoK2JCEZpYNnM3AXQWvsYrE2DIWZ2o5IQGYE1ESBXjjgGjIBsEDyEKFEiewhCSFoVVQUAWZblSd7K8oQMNtG/URRgAKkwBBQSaPCcDBJAYmOxjbEhwTZ0vSYOAQhZhEVcVfvaKQZ2Hn0Ez4oxlC7EOBoWVVkDALKURVVXDhGHVSk+uKKuOPSrIvogLqLVHGKSJMLgXWgamrqsIDJW3gjGGMEoryCCxBiD8xbIDYprrrzmp37ybe2xbu0dahVwc35ojPGlY2ZSqnL1em+jM9Y1NkXSNsm89y74EFkAitK3mLrU3cCkLDZeaa7cU03XaTmW8bjPT85X1idVhb/0t3Pv/4XzA6VptW4w01ozKI2dPXlHRyhZhyIwR1VsZMOayzil5e7FJNxzRKSwHsUJp2MMCUGu7v1e/+5bbXvnUKNXQy8SOfcJ3zdszZhwwNR1qgSVsonjWBtIA/sYcSayct96609MObn2X/5t17/e/tjzbpgv13hh3tcbtlMn7V0TISdouajtJG2f6OwsBO78dvfk0wl1508tt+sNdKPSDZRSIVG2253Zdpbbd9asTcI/ffzse+976mWvedZ3vto8r/zeLV94eu/8XPHWj7x5/jd+9pJPfmzuXe90v/OeojV9/8zB5RteNvOev7nwjz637afepQ5dUaythEFtyC989zbqqNTwYAN/dsfxi7bNQdSyfT5OLsjkAtjC7zq88lO/R9jTdooQ8jY9+nCRUzIqqvlTdTuUCbdiSzwFABAkIS0irU6biGL0MfreYCNJDGqsK88RhEgEXfBlXZW19yGGEAAoBBZGjoCggosYAQC0MSGyNQYZ6uCLqlxbWzFMDiSGQAE8YgQhDyyokZxzRVGGEFyILnjnHEdhUiFGASyruqrrsqqajVLpahdDCKEsS9jKqHbONZm7EmJd1nVZNSkLEaRGdsCeYwiBY8TAEKLUXjQUvh65CrRyzGVw/WLoJTJEFlFal2XZyD6YOU1TEYk+NNIhImIQIAIhAMqyVrsz5gOfc+5527fvPHL05He/c/uxw0+gSIxRJamg9iFaaxMFlhRHicysoWI3KgoAIMYzIaREDd0ORSQIKxeDAm8JEW0EFGhYVNba2rs8zRq3QghhZmYmBBdAUJEW1BGgDlx7ICRrksS0Wq28nWVZlqZpg+tJbJLneSvNGq9EmqbtLE+s1ajG9s4mBS/PrYx86yWsryVYBn82p79Ztd6FE/u6M8VYPtWqTq37YrA0u23yjns/+/d/+/v793Tv+OGdR44+mefgqgDkl/pFTXjk+z+84Vk7nn76yNT2s6c73cIFEg4EGnC4tmHyrDcY+LKCxGitpmdnq9FIa9Ef0mvnzZWX9TvjnYUS48J66J8YDbi1MQ9Pfo+K0xuj3o0v+hEV8YePPXr/nd+eyXIfBwXEqNsapPKV1LULdSfJWp22QlJkQCoITiOUFkURAVcPP2mHfW/BRKdGOrHd1vTubfsvMl2jgvdajCAC97E/1CVlVveHNSgHAKjyotLfuO0HS2tFubx89q6dSYRqbeOZxx5YPPGECjx/cuW8/fWPvnjf2edYx2unVw6vrC6eePrIaGk0tX1b2U0zPeR1X9/3jV23fnCPTUDq0VXPobf+9862HbMP7r9/8quPPPaFZx598I1veefV17x62E/e8Y5f606M+xCKsl5bL5Qe/9znv1QW3lorsqm2ZeTSbyLOAcDoRCsbY7TWNh4GiGy10cpS4xNGFeowipCRtqw0paAIkmxxtXflBRc/etcPjx07kk2OlRBFJFVJ4BiYNaAgB415q1OXFTQa4Fg7RCYExEa5kAAljEkAyKxoqkJEZRSgYqDAGSgIPkaPLM1ya3NtjBRQlDXGJBxBEJqpl0WFHIFEJCJwkFgHLyGaxDKIj7Ehyja3g2JQhDokQCxSQqzZCQcrSES1BgfshSWyCkw+EkdQgIagiSUnZIWRIBJHAg/cSASdc95FV4eyLOu6jjF6H0IIMYTgvfe+weRWtRPBBmhQOR8FnPN1XVdF6ZxfXloZFCPPcTgaFaMRCbhRGQG991K74dr6aG19VAwLCbUiB1DGsLi6vLq+VgyG5DkUlfYBgerKFaOysXwopRpJbKP03uj31jbWUVHw0Xs/Go1A/LpfXylG7Or12C/JnDva5+2gP9UPFBzhAPDFF018+vaVv/nXhdbE+NUHeH11pQBAAqmGMzPDxY2TGIpQlrXCQYzOlQNYO1qqr96bTS4+I1lVQLtCE3ZP9FuIRm2X5PS/fdiOhUHFVGFuBKE/N0zmon1We6i1whg9x+AFFXXJVgpDVaed9PTf/d45t3zm0Zdcv/R/PjZ53ZUXfP3L3X//7vrP/8LS7O7+oDccrRWjFaNKZQWlLVO7B7t2+Olu2m1ztYRLD3vXV+NtBIuedZpGgjDc6J88YgnydKrOMZ3My+6u5vm5n/zEsUPPefNfvX7n/D36e7ft9NJRYbm9K7zvUxf/8T9d8NZfnLnkSueGxcLJUG+k289NdX7X+9/b3jNlds5sVAPW+p6LQm/X6faoDdpJayBJIRPL0B7EmZXwrPtrqCgwem9avhVdK4a1SiqaiKrMo0690mQAhJmi8Gg0YmabpQDQ7XaVUqPRSCkDAI2SOQZxgWvvqqryPm4ucZm3tJCklEEFOkuCMHhuJ1mWZYyMwD4w1Ox8rJjBS3TRBQ8hig/ReQEo67qoSlLG+ygh1t55FkZopIhJksUYG28xomKkLc4Mi8QmcTzP8zRNjdKNQZGZZZMN1OyapcFXNsLt5rshSmOZa44tQkVE2ipBaGA1zOy9HwyHCpUCbKBDQA2NQInWpasPP/P0sKzqyEXtDh4659Chc4LHe+++a215fnJy0vsoCFmWcYgoHOuAAEG4ir4zOX7O+ec55xJrjTHaGo3NkhyaoRcQVsiKIRVCRLHKWt1KbG4NoiiF3kdr0hDYkKnqWilUAsBSBx9QTGLT1HazrJskSZ5kWZJtPay1iJRak6VJmiVJkqTGWmuTJDHaEtGEmSgHdXc8cyvLhwifrAZUjX5prPsJWPoy9X7GkN4otndobr363re+3d0xK9F+7l/+LgW56RWvW13qifcoGWDaX1k6sGvmyaPPzO4/pMvqgcdvfdOvvuvSay9tnMplWQ76I6tNmqZAqJPEOTcqiyRvO9efuXd/drLdf+c6EALozz9y17a0KGH02JPfGcvHZWzPgPPLLrh65/bu3MIzTy0sLKyGRKWl8uCHzhU5QVSBh6Mq1utzi1EYmNHXNTArFFIWFLSS1Hnt6lAObKeFmat7p9XCkdGT98LAWcqiFmFLnIvHltI0MzV56dn7oFgHgJIg0XGqNT1/bLlXxZEb7d03u2N2bHZsHIq6d2pxVyefmGkrq1odMCYuza+xOJ3nKk8znQ5PuznnZ37w5Svu/cQ0TCb1yvD615Q3/nqhpm6/49vyFe3PLscvvmCqM27b7Z/5qd/58//z1Zte9KpBsWKMTVJdufWxzo7Z2V24hU1vYvFijFmaNxuUZsQPsHkV6cQCQBCOwlW1ybCMQUjbWIXRaCBaD2uXoVYeWu3xU/Nzb//Vd15wwQU8qhJGq6xKrE0yRDRkXXSF6w+KHgChVwSoDDaXNMfYlJDDqix8HQmQ9HBUNqr3VtYOzpMAoRhSnbyjlFLKbHnRdGCxrEIUh9KeGEuMrYeFiIyCy/K0iWEhZRrKNShSyoggAVitm9zf5i9Oxo6C01pnRiskQWh6R8uoAQkgIgSFbJQyOlWmrex/9VLLZoAviIjDEBQ7ZEwNK4yARe08SxAOIQTPCrVEiFHq4D1HxRCRHIAEQZYq+NK7UNW+DkQ6BA6Bs6ylyETPrazdZ+cVBOCJ8e4bXvOag7v2hkEBIYayzm3y9OGnPvfZfy2KQhmNGlmhiwEAWq0Wx+id29jY8ByLulpZXl1f29i3bx8R9XsD730MgYOUcRQCu7JyEOuA9Wjpecf2zKyh33/UgF5zydMnR90uPffq7m/81MGlvp2B6rnnWN4YBVScM/g4Pjm7XklwMhoOdctUTl+Ybrvj6GTZ1x2sqkFE73OB0dMn7Uap60GgtLjvDnro2/smpkvyJQTx7oejzqxx+1IvgbPAWZZB7VKihLEKIR/vLjx+J//zxybbB6bq3vJ7337kxueUf/kfs2ddfOH//dAF374//fQ9/u1v7111/QakS/NHYX2OBht5UeZgSLccjKtku7TGylHbRVcRU2Nko3YH0sBxxLVCahXu2zf8QvO895o3vvQz7zqwcL/xfmz7uY+n9ptza0/XdWpVXDrZH9SjoO3Yts7+/VpX67d+62s/df2Bs6Z2XPPiYn4twfEy9p7a/oCLGNIaGCEQzc+KEHd6wFCfcz/TiOsBlE7ruVFvo93WL7vpgI9Ok5UaJaHQhGtpHwITadSKmcnoPM/Lumruy0apzc88bbpvEQBRCNBabTQlSaKUSpJEa8scg3CSZAp1nrZMkp6J9dWbAWIROIoII2ig6EOe50SktAZURJRlGQAo1EopjVopQ6Tb7bayRmgrbzvGzSAHZt6K3BYRBFDKGDK02Z4CRiYBDQ0Lj4AwKBUMKQYtmNsE4+bAjBBh68St68a1iDFGITTGsGcREQ6NvTZE9hxjjMaosYlxZQgUMMjqxvq2me3Pu+H5MzMzDz744JGnn04UEEhdVojoQszyNpI2xrRaufdubW3NpElkENxsf63SDYJeEBlAZZkxxqDSmpRVxhhrVGI0EaBWCnATv6NwVA6NMRZVamySmKyVJnmSpmm71WpnebfdSrMky9NWK8/zzBqDiNbYBt2zaa8nIiCNpEnVJnQIl9frwpr5UP9oJ//xlv768srRaNZ9lkCSpTyeynJBd37zy5Hhikteevnlz7nvntMXXHbt+sogRGPasmP7vsefPrFw+kRI5dSoPkjhW8nOqdvue8WPvDJibNnUJllAwcgSIxsFPoJIFNQqidFfdOnVs/+yV14u9bllt9M+emrYkV4r6bp8n19f1+2O+IX100ff9IpXri+urwxPPvXoreOnntIajEQEAYIEEC1JXbfG2uCFAbXNFKWKEhTCQFWI0ZqKVN7a5jhzQ+5ecDX9+GtHL7ixv222aGmWWqBCza2swyPWkfn1L7kh/+ESAKzXedZOoh/aBA8/cbI7TXlre3u8MxX3n149dXz+1AtecM5Yd3pjw3XGcMeO7tzibCm9EOL82vFUdkzEatftHz+3rlzSKesTR/fcCJe+aEebj93/3fmHbn/7iz/06/wsvPH0jWe/Y9x0e73exMTE+trIUDt40CCJSTl4a/KqXjcGEJWLoghJtA+iEyUiRtkQQmTWqBkk+hA5otJ1XaM2QspzzGwyGAwmJjsQXG9U2IScL7XKCOOp00exdP31jfbUTBlrEUnJ2kopQNYgISbKAmGEuNxfv3jv+Xt37fzmt27LslaI8UzYQGptjDFWwSobgEvnNnwPU+NAGCAB1FqXDq21ZV01geREBCxKE5H01tbR6iRLY4ypUVmrHd3/T9V7hll2XWXCK+y9T7ipUudutVpqZcnKsrJzzjlgDDZDMjOYMQYTPAaGzAzRZggegzFgAzYGnKMsS7ItS1aOre5W51i5bjjn7LDW9+NUi/nq6acftbq6qu69++611rveMARiNYQNKFMACMMJqZA1vm4QERBSSuSykCIyGWNay4t1QqkPSNRYZQUrYNcvJhLSGlLLkQIFVAHVM9rHdT1VCCmE0GbRIGKWZbVvAOgZRgMRiQIRiwgxhRQtUQiJjQVIuc0m47FGbe+syWRiiBGxaZoSGAWaGD3I3I6tN2Y3LywvoxIZDr4WScwcYgKAEAJnWUptopQHJetsSqHxnq2xSCoyXB1edsllTdM89dRTKijJm0DQ6WClObJp1Jegp9NLHt31mbNPFCTdMv7gycn0ZjqP6o/+6f2/dGGnSMNzdvbvemzBho4Fl0G9eHKtTMWYKVMzrnDA8YtHOvefyFxXKVDDZqhjYKtQRyCgbn92Os0Pn/7oH5/7BzfSUFNojunGU9G8cnqBNeRZv6lGUPtOnklTxe6gn7JyU/7YL31oF8aVMAKVfv8su+f+hV94/cm/uGzHW95R3fzcwa3XbnvdlWujMNp73H/3zlOP3G1uu41PPN6ZZJA5mC4lZTkrMOlUJ1Rar1WmkzKkKrIm40pS8N+/4o23v+KX25p0y5d/76YHvohrx/n8G27nsPDEEUHYeOGzpnacu7ZauboaH3546dEnmqeemjx2fxUXb3rPL2997TuPPvkkdigL47N6Gw73M4AkJCAEJFADNqR5QAlkh7zsG6wLsp1mOpbZOC8feaQ5u2gynJnkTR21ZeNFEWQGIg2oqgqytra2DrRCIlBLKEhCqkkNGUTD6yo1g4jImNr4UFFnXR0CGxZKk8kkkOZ5DjFqkMCASY2oWIMKVkBZWTiFCACCIKqgEJsGEVlFoiBiPamSyqiaREkCCglE2yAHaj3yDK8be0pMTZJWUkW0XhFb47xWhytJFFQIGKD152kVzG3ad4zRGFM3jXMuy3gyqY0xhSt88oKICdiub68UgZmjgqgioXNmNFprbQayLBtNxhs2bLjhOS+47bbbvv7VL5999lmXPOuy7Tt2NkmJswYSWVNVYwK1zI33RadsTeVas5127GZkQiAiF1NNCiQlGKcEBAKgmHLroopzLkZhZgaFzBrC5NgaztU6Y6y1yGQzh0DGIiITUSscNckgArXQRWudpAgICsrMmbHOlpsHU5ianut8GeKrm3w1d3t8c9Wg87S6vxwHKDtzeX2Pdr5821d/ZP99L37+tbc+/wX3PfztS254/skndy8snxacPv+Cc+781p79Bw4/77qbB6l4Krfo1352//g/brnB9UtFCJKscSlElzmQpCpsrI+BgIGAs8L+Ww5vwvjzDb87P7E6euyJA1uv2P7wsTSaHsC+b1NnSrPNl1w51TX/8NC3H7Av3bizdDOr2LguW59SAlE2TECdskyNV9RkHI9q45I3GkRMJY1JedmrxydTMTvz6rdMv+mVdrBl+0RGJ/bE1Wr80APVN7+py0dHGdhOaQB0ZW35+vM3wRGYyTtHF1YHs12b5Wxx5KvltTDXLYcrY/WjAhi9p1SxQlUNp+dwMNXVcYCAU2XJHbzszs9vns4OnjymYCZXv56uf9umwQDrtW997x9++I3v7dGGDQd3Vi/1rz3x7oXhWseZ2tdFngcPzjFAij5okaZ7M1GCT7Wq5NY1vlIBbVFakXYptB52ZAmYfWw0BDJOVeu6ttaujUfWmNXxpABmIQ2qbCdJXTXetX3n9OxskNR60nashQCZddAgkxauExL6apJleN7uHQ/d/8ATjzzZmZtGotgEZMIkwdesjhC5yKu1kZo2VoVi0gTJWZeSjKpaFFv1FIGmJEi2AQ8ROIjL8mhpEnyv6NR1vbi8YoUFJTVijEOGKOCIEJMCIK17JguCJK+KDjlIaiSxQoEWVAGJMptLEJUIKiAJAEXIKwMqnhl7z5BOAYBaFz8Rh6hAJjOh8QAAIQEAAypoaC8jY0BFRCIji1pBYVBCB2QIhRWJkqQWkCiL0tdNa5KnCCmFoihCHb75tW/u3r27Pz21vLyKIBPf7NyxfXZm7pFHHiGiNvEmM1ZIUkqmvU2IQgjbN23quvzIsaObN2++5ZYbvv3t7zDzzNzcqZMnEyCFRKhZPpi41TphbE7v3DN16c76u70TO4fl3SfCT11U/vvXTl1x5Q4/zOrx4vfuXRxMzfhqaQBw4Xlzjz1xcCrjQNzxJTpo6vyLi3MpDkNv07xMTXM9lsGAwNhyUjVR8qWF+aLoLNz3raNf//j0C97rTx+8py6m/OI5U0tzbuf86jJ1nU5iY4hyrpfWNu2e2/+Fvx/ce0eRTTeerOvyeKTYn+l20tGDi7/1i1U2mGzcsHDryzoveD7fcNP2H33HXPGOuABLT+6Br3925e5vp/1Hpg7tqSg5Kp0relDE3Wc1J497UjUBu7lB2LPrZV94259ec9fftC/xSz7/m0mbxVe99tHpZy3/0//Z0OsOx6eru2//wX9/0eLiYifkbKLhnDds2vmW18694YeqtcnivqeWadWwNhUMzr94Y29J9W7rc597sFE2rkIeoM5UJZ3ekkzWKwfAsSk2Rhcm88P5ii+a2TgaznNZ5BFXk6qqkUwJ2z1GkWUpQZsuH2MkMiBCRGf0asqMhOCsQ0RDyM6mlBhBQgTFoGqJQwi5cwlUICKaEAIaJpUYIgAYVWyZiyBZlrGl8XhonFXFGCWzthXdtTQiQNSoTdMkFWQ2yDGqJRZJbVltY0haVQMhoEKEJACGgVXrNi+4zdsUUFELaJCiCEC7TwVl40WJWAidtU1dP+vyy594Yk8KsbWKC5KsyVq/eQAAAURGECZt3bU6RRlCSCkhwNRUfzKZJLTPee4LHvzBPQ89+IOF+ZO3PO8FO3burmMkJBDpdrvJh6ZpOnkx9rUSGEMtOcsgQQQgUlWLBAwZsjEmty5zBggkaYwRVBIixASFFRFW4cyRCK7fcOCMdW59wCViRFnfJgAAEhuDgG3LrrgevkvMoNq+3JnGCDZX09jmRTiVYzWbilcVitK5sK4+XVdRw1ypS0MaDasfeeOrXvemd1z4rGv2PnXyuhfaCy9/1nCx6m7jc3ddfsdtX7XVeGlxZVTBz+3YffbC6v3dwSe//flU+zpnBHLEI/G33HjrXbffqUxRkVGJGNiSodOHT9k/KsKfTJr/ReY++ff77//VW57D1mOvPz135XDSjFZPbzj3wksvuvi+Rx548uChb6X4I5XxZUieQSWScojB6Mr8ohqgBKkaE2IdG2tMQ1owJoNYez73/M2vefPsNc+ZdHMAsSZNlec1bmAveVa+9eyV797eW5lvjh82olr0epPjo4jZu9904/cePvj9h44N67CxX4CWJw4tbn3WWVu2DLg8++SxesvWLVV7EkMxleF5O8tH98QRrhX9gd1/39rCoyOXdYuZU2/4zXrDbjM+XqfswCNPLB96uth0waP33T06mfQ9q/5Tp/IkSl2XZUkSGxAIhMqGY4zdzvTTh/dn3XV7Jstm0kysKbz3uc3bOAGXZYgYfPKhBiBmGo/Gqtrp9GKMrbMxN+gNUVFoaiSZkkxZmKVx9bGPfezGm56/sLhMjsSQcdZ6JsCMTdU0aDJB6XXKH37LG751+z3zK6OD+/aassgQoyYlRWs8akyNJm+dUUJrDTRhutcfN3WqKlPkTRI0Nkg0xqQUiUhSUsTMWsvQNE0ii0qhbjCKkIB1BJBiRMcSUwoJjIuSWoxdQxSJzDYmsdY0KbFPGRMiTlItQJk1GZmASVWNEgsysqICUSCVpAAKeCZHQZQQBbS9ZdpGX0SQz7DGFDW1zuzJZs6nSAqMbb0Wa0yUhEiSkqiGqmFsZ+l1JWUIDRm01q5Ig8JW0QgcP3DkyL6DzIzeC3HpssX5hVMnT+e9DtusrusIqYMmQCSipqnaL7Vp06YW4S+K4vjx4//62c/t378/y7KqqhQgaiTWrMzIYKFoakXGsBxe9NCmE1vr4SNlA50nTlvsyFuvn5bvd7Lx4R9/887f/OgR1x0wH5yfX6XSTsrp1IxWUyh09btLu9IiD5zMo//y9mv+y+Ev2+SHpqaqQ1lBySeQEPOzTOfIR35r9srn7dWdp6M5/9gXfux97/nlX/uH573q1U89fWzKzIz9JOu6MnO1P7HyN39yacyWrQZcyupilFFg6IzHrsjLqWIKSl04vfzpv5BPfrjZsvP47su6V13evfntm6/dnf/Pn5+FX145uuoeeHR0+HjY8+DSA98La4tmIO7ASq4RQ5IxLW296p/e/pGzn7ztFY9/bH1ou/yyr1+/bekNb+q99X9tT5NGZ/sXPNvuusJuKrbk0zNbL5c8zzcPaNN2zIrq+PGn/ulfn/OuHz/w9JPqbARIo+XzHjuPrjDdSXdsKrAJygl4g4sDBbRfu6ZXhVGyHms7f6g3M3AAl5+3JivBFXkVErKvkygAS0DEZlK3rqWTyaTT6TC7GGNICQFBVBAFSFEVCUEBiNlgm2xIKCBRgAAIDaHWzWhsgiJoFElJAI1rRxtUVQYUgETgkJAhRs/MGhMAOTYpRQBRw5AgSmQwQEiEItrKc0lFBFUSIoqqiCAZCTGCt8TOOU0CIoiALTIkkqgFeBEBFIEQQNYTkb337GwbzNA0ycbULTsPPfigYeesDTHGkNCRpnXPJlVlsqBqkJAlBmYlS2wsJkpsTAih1+vVIUKSW5//gk6n+N7d3/nKl77w3Oe/+Nrrbhg21XA4ZmZFYpdVvnHGaorJGCJqRcyx8aqaYhQRzW2pbMmqRbVkkRQVEa0xQcWIJoPGGK0DFc4oZmTRMWW2ZagZZQNUuDzi+k5QFRSAIwMiu3YCpjZgFFv3FQTRJESdaevr4TRu/3xYpM5EQh/9JMfTfzA497Nal46nc53dddHUnqeX5sd//dd/ASwg+tCB7z3y4J4XXH39n/7tX/78z//8I/u+MT9qpjb2ji0fjWdd7PDwPxx98mtPHO0W3aoJM71pBBhOhnfccUenLCZNowiUko910e0bK6unFzZ9cdvpXzkIP1vN/Le5I08d5uB37Lzsrnsfvfki4O65vPVSqVZuvuzs7z32QLFSF296xZPf+O6GxVOaO8jQ+hAcuKCSkVQNF5lNEl0WUuxGBJfVIbgmVqybXv/m2Re/oo5eKpSavWMM2m0mVMTBTc/uXnl5hGb4nTvM6cWVtX41V1Bcy5qYvejySy7bse0bDzxxbLnOHGbd/OTSwtzUlpTGVb3amwbBJBTJZLWsdTdy51QvytpkMjp/zw9mhVydzd/wI8fZFQe/m9vNVdc+eeJJhfCNv//dRot3/fIH/jz/mcd6D+1ONxoDgI01RdNEgoRMMaDL1ZjcGseAQVKKCSFmuZEk7Ultf6+qquXptcll1XhyxeVXnDh5cmlpxVrLSCklNUkQoBpPqtV8sClBCqEyrKp6+PCh/sxcJ8+aumbShCqgPuHAuaoadYvsxKn5j/7dp7pFt6kar6mNSAMAAiRElFSYLDFDSMomqqjhUV2hM6y5IBhjmiRMhCCGmKxrmsCITYpRoSgLS+wnlSVOjpzNRnWVscvYBBVC7VibRBOgEnrvLWH7pTJjk2ppXN00HtUZZGVLJorMT9bs+mKM0RgABdAWgJP1ARiBEAQEgVRFJBFoS45EDDEWnQIRJ5OJUex0utbapZVlJYQEeZ5LCk2KlDli01FXVRUaTsk74CBijAkpWWvH42FrnOtTnIrU8icjYzCAyOOqNrlVwCQpgWzcvCmkuLY2YkQQVFACZEtFd6au625Rbt++fd/evTNbtrg8O3Tk0OpkuH3nWYuLi6cXTlmbMWUoKWtgKVSFIjEE0yky4FOjyzP/Fczmsvi9/amTBieWRscPwnlbmu5MlEKNxT5mKToQ9JNFaDz0yo7fUvih5AVOzPTy5J7i7LdOb7LLB2auv2X8/XtjM1bMcjIuihSwfRxO/OZ7Hvr5O7e4yb7vfopC+O1fe305+OpVN7/45MFVZ9x4ZdTZvn3+ri8NnnhA+rPg18R0Q6qoMd08w8zJiFK21mRq3aBnp5BnNsTR8h1fyB753vCTf7NSV/6mVxYXXTF7y9XF5Vf1XngDdt+E4Oslx3uHS4t7dO8D9dEjo9D9xIU/vUFG737p1BcGr3pGhvTAhrX58p7njp6MOvGU73juW3e88IUro2HOxcTFzBdD12Qnj+WDTSf/44vT939z5WW3Nilm7MZxldJMOT9z6/de9s2bvojLU1qcxKVZmnRUkv3s8+jY1mUShtUuFbaP4kwV/cLSrkFnKSwltjZhYWSRFBrRlhQTQlhbjXmep5TasTK13giYRDFpa3vDhgmAGBgQJUSbZ3VdiwgAe++hCdaYgJpAEYABIxMANYgoSAIBNaWAiFIakQTrOUjYGrwDABoWxnZSk6i9Xq+JwalWTb1eKVLKnG3NYlNUlzsVGIYmQGBngVohL4qkTC1Cm/qJgJhQIyiokgARJh8cGwIybFWViI1jESmKos15a4MQAgiiimibyV0W/RACsSBC4xMBssHMuhBClGSsFYTSOhFxbG59/ov6M7Pfuu1rX/rCv4dq8qxrru8U5bipiQgIy7IsnQPRkW+e2W2DMUyELrPEURM4i4gkyoDsLBBbEU6KmEp2LRDVmZkJpCYBZxYALFNbzpltlERsbWwZ7+2C68wdQ0zExEwIKbWxpLieDllRrCvocLapP7sycbVM2XTezM6tMXxmVDcatpEFgLt+cG9Y7pdZSXlunF1bWd2/98Duiy/89D/9XT7Vufqqa3fPXTZaOdztbC7yas+WDW/71qe/MakddVJKXXLLy8udqf6WzVvWxqNAYNkEjSSJ2HT7/STeGJsm2vvr/tovreR/0GuWw3f+43OXvPXHVusdqpItP2EJ1sLJ5936so9+9o7Dx+87cHj3jh2bzzpxuOr2yqJc9UsUklqbam/yDADAsA/J2VITZDNbxssLtDTfv/qiqQuv0jpmFXAm3LFeXHLVWMhImfpUDBAVypfMmST6kc9+5errT+0o8q6lU8Nhb6p47YtuePqJA997as+qj6OjaXVt0u0Otm3p9wunSfJSG63qBvuFveKs9O20ofPo93uTowS0tOPcPbsv4dUV193WNcq2dHWj2sztPPdlr/yZ4dJ8Xncf33XPJfddbWI+cjyXwJo0Ie5JR6n2sSlMVljbpFUhIjRGLGKqwFNMo6ayRRl8JKUUBIgBSEm8Xx70TZZvPn7sWMdtACNidTQBPz69c8uGc87f+fjeA6bsro2afnf6F/77+//hM1/CCMvVKBeTYkyUQMEqo9ecC4ha5P3T8yvHdEkIu1wkL2qgDsHl1jnDmGtUAogEhpkEG/BMnJoGEbUJ4ExmEIICQMNKGgCikDEEJZvogyFTgY7BG4QYxDKTgoI6skG1RgWWGERULHNQTSJEQIiI6Bx2XTkeThSNiEeLMSZjjKTY7ZVVVUUMKqpez6zTAAFDklqTjcqOGxQ0hptIYBhJRTM2EMU6F9gA2rXRqmN2TCGpkJ00PiPDjNRa/aBiTpKEyCRVyriNjiEFBEYlADJo1CkDRJ8IMEoCw87lMXofU8e5GlI9nlSTCTKV7DxxG1PnvZ+Znu5mnVOnTj289igi7tu3N4RgrcudYyH1YjlnZJUQU4plf4ooJBARkmrofYH+nPHCHO1cFs8VrAz9x7+tb7T9OFn7lT9ZFFcOx2uDjgX0riWbuIFpUt0xjXZ5+UA16LrghmqeLi46d3K8OrK3RunuvLA6djRQjUnHoZ7tb9xnz5r3/Bp36kN3fj0rp7um+eSf/2K/GGy56CJ/ctTNypBGp/78dy60bimszaRimniEda1Znk+lrBjrcqFcyxqFLo5MrQ8WMzv6P/xfvrVxql7yF7rpjU9/z3z6j0/8pdiZ2XpA/pzds896jj3/snL3jqnzdsMN1wjLH31BskbetuXAntPzT9vTzxTgxy88tQE2ja+7OPv2o6fycmrLOWvH9qEpa5mwKxtLOYSIrszzyWR+y86zTt/19albrh/GKgL2Bv3J2F987Jrsazv+/qwvxa0nyWe87wLz1Zvgqeu0453h6IxLnWgmSGDUDvFEHRksAtRQNxGSIKQoyKllGiCAeiEiBooqKME6B9akJrIgqaqmSahLto001rTZxgRArd84qI2ATYyZZfIJBAUws6XUNSeBzGoSEjAmE0gESKApJFHg3KIkIcDMgpcCEJitySqo67rO8zw0MUcnqEoIIAGSzWxqYlKRmIC0m2e9Xm9ldRVSQgBTFBgjMYtgO3cbYxxim0OQlBKCoiCiEXRsjDPrJiGEgJq1mQrEwpiZLFXV9dfc+Oijj4mkvEDkpECInDM45xhNSqlwhTE2pRTPkECDpKB4/Y03Oee++pUvffWbX1sbrV5303NzY6OvyNlaZKrsYjX2zhljYuOVoez0VRUlOWOb4FvChyAgmbahMWRDLiVYC6alcSBpxsy2FX/adQNdZAAwZJCxQSKCgMEl0LQekZZIWgMlTUJM4CiEIElVIKBfaKpSe00cv9n2t0RYtLQ/VJ9Zmp/ubCgS9no1AJysOpzZqV5x6vTyyDemVx5/ZM+5V1zcmZn52F9+5G+ZRePbzbte+LJbHn34kQ3G/M3yJN8ww2vDGlMlSgia4rhuACImAEQbyVJex3qwYWrpxGmumjofDT41u/bfVld/Yt6+f+77B596TQaL6rMN5QB2NvVk8cQJ7C3t2rzhgaOHcX5yatcFftMWXT45HCVhZ601ZVds8lBlEWrBbqfw3nur9vBTuXP62lcMXvIaHkylEJMDVRT0SGA1Y8TGIielxoe6VgMmiQwKPH70+OIWferI8fPO2TkZV1jNn7t756Ztg7u+t/f4qUULGXg/PR1cibGCTmCxGUZKosXG/vmTLO5RhabCYnju843W01mnTrrkvV87dNENL7nhuS8+/9zzlxZOZIQXHLn6ye131/f/bOmCS7aCCYF1Pks8wixgUMpLQFNPki1sTPWb3vDGr339G0cPnD5r+86BMadOnTLWKgiwqkaISTnvz83ccfe9TnHD1NQkNiiKEPuY77jwkqXlU6RkBKT2m2bnXv3+9y2O11wvn4QmSEDGlLRKPonUIBONAMCKqjgo+2hwOB6JJsrMcDg879xdDvH48eNYsBjS4AElxHVyY0trUtVi0BPQuq6Ns0nFEokIGi7ASIpV9CIyCWNBYFFVJSQmkiQtd8kYkhiZUYhMG8liLSMZZ0NoiHBtdZJndnq6t7y8bG0xGQdrWTUQMeg6zRKUWjZESqkNUm3pV+sccoKWoqIIwJhlebt28r4W0CIkzjJPAoQZGEoIDAKxTfBlptbDL2JrFSQhxRajI2NQlKwhoqgiSbpFiRhDCEgokDQJkckANCaXmdWVlSzLTOa8j1yWDClFcd0ixliWJZK24u8YxGYuRbHOKYqAOmuMMSlhEhmPxyE227Ztq5tmdXU1z3OVjjF1P699pDJ6KNOjR9KLtgx6tsIO9ghTbleWJsO6p/mYA652eeD9fzydP1FvKsLRoh55e6LJd96fn3OduePAoZUSdHT4UI3JYA+yaKRXr9UHXvcbGx//+rXdez/897d94Kff8c+f/8pv/94fH35679aLLp3asOW0xb1/87Nz+x+LpZsKvTWIJkrP9XpQnq5WtapK4tSQSTTVpWBL85y3fmvj5i/c/dj4u3efPHYSi/KsXRdufv5r3nnl5eXPvW9wEuXxh+NXvjAJfhkhzs1pb/vn3/xXSxt3P/eRP7z/rC2z/blbNz7rmQJ87PP/sf3q8qkfu3Lm2/fvvuLF+bYN9fL8XN6tMfooq057ZdG1naN7Ht5wbG+xfVe68IrViVah6UHW6WSLK8soaceps+yd74rP+3bx8R+i71/jY6jTKZt3aQKchyE1uRSxHkXHh57Am69zx9fqiWEnitGDChoLRN57ay0BRxFS8CG0EqMm+PVloQAigkhmXd4pU0jWOSIKyYfk2VmKFIKYzImvQ0iGjTEUJJFBk1tGm1RCCJwUEZENGI4xuU7BSGSQg7RQdpFlbHmdGGyKqIKqxiEyobGtRCCmwMyYZSSYkqYog8FgbsOG1bW1jRs3TiYTALDW5kXWTpbtQ2jfU6pKQAlBQA2xAbBIyGzAorajQqtvakPWEA0L0alTp4qiQISUgnPOZUUIyXLR4skti1hiAkAACCkgmxBCjHFc1Vded13RKb/0xc9/9/ZvrS6vXH/L8wcbNtaTqiyy5dVFdq5fFgBAzrWWO4jISCnE9suqamtsh4ioQNhuccmyY2YCBEKilkVuEFEJSaHF2AEUCPMkUbmIuRAoekZSQOOddigSKCIwqmpE9aQBZCTJLy7XNk4H8xfDeSED4wiy8t7urBv7P2LpWwCAUd1dqesQ1sAwte07wvD4fHfQjz5Ya6vl1ZOHn7zlhdc87wXP/o0P/WziEGvyXjM13MkDJqmjMQZd+yqLYDPxjYBs2LJ1/xN7IuiOzZtPHDvJH6b0fs//K99zaPFr3/jawrD5zreOPfn4gWMrCxlJjbEYbCmtffiJB+8/fcCuVC/ONp7c0nVHjuVZtx6PZTjMi75KnRsNtUcAO5Smu3HqTW+efflLspkZ7z0CapsRtc5KU0XseJykJiJgbr33pl/m1XJjNukw0adv+86lTxy64borp6fnqtGqNcVN11957733jzwdW1y6dMuUAUmo6ogTR1SjouDP2jW1cGeN4psdl5466+w4rGsqeIZZoGP6vcJ2ynxh1AzKTq8szj9+62fO/W2xa6tep2TOWqVSA3twydpuEjZuSgmz3GSZGXn9xN996qprLl+p1zq9nAAxRmJaretOp8NAKBAh1CEZdowsoEiJmaHhjdNTG+bmnnjqsVOLy4PuNCqEED79r595x7vfORqt9W1uMjMaT5BN7StmFklAuC5CUPDegwdKWKu3BoyhVNcvfeUrP/uZf11rasgzIrDWeu/z3CXL3nsACCFW0Rsk53IgSiFUoxGAuCIn5hSiJ7XKNgS0jhArTqoQY7LEklJuHSOwhMzYEUqMkZEy6+pUtzkO0ScmmtRVE+osK4KPzjlC9Y03qEEmkJJBRARq852UFFq1gyRNJAACZJkRtKV6EiJTm5XIapghGGGiDpjQeJehEvgkyGRpfWcsIgZJlJhZCakd00VYUQFYgAhjSsS0bmVAqioqQsQaU57n3ntG4jzPs8xkGWdcSXQmU6MoWpbl6dOnN23aFGIMIVhnmLkoioX5xeXl5SyzTV2vrE6mBjPWMTPPzk3Pz8+nlDZv3jyZTHwDpeCWvNq31kGw1oRs2o0h72I1wnJ3P6w2fpRiZmVFJEl2Lvgvz29/cn5qY2hW2QSL1ahUN3p4+9Unj30myxelzgxoNykkX2fQQdpz3UsXN13wpk/87CP7vr3jD//2k1/ft7RY/+lf/MVvv//n/voj7//w//3y7EVXPrGwaAFT6k0w9gYWgccjyxj6RY+U4spa76JLH772ii/ff7/ZfeF3DhzXR59451tec/jo2mf/+ZMAK3sevuMlL7pu8Lf/1yBrJ/Mp11z70MPJWrYw/LvX/tKpHZe/BO6afs6Nk9UVvzIqp+eeKcBnLU0tfvfx7IfPf/Kybddc/jyMwWS91abBHBntrqne/s9/3vYG4/0P431fO+v5v3twY2dluDqFJnVczAhIc2sqpWx1ugKgmaEaMECbe70RQLSQm7wG6uSZcrY6TivN3NieiKA5GTApgYOkyY8DBGZOKqJCSinGJgYyDKApRCJICVoaLSSwbFaGQ8emZSe0kSGIBABkCUCdc22pVgVmAkZDxhqOKTETKxhCMhwxqWTqk2Xy3g8G/RhjlOTQRE6tt3xWthVUW92RBhEENpSSZebcOkJumiYGpwBHjx7N8zzG2Ov16rouy1JBAKCFl9q5sAWuGVlIE6glNkAEoKStO6uup40BKLUJb0iUrPXe9/t9ZorRIyJbk5ISqHMuqTDbVqxviFNKGWaVb4waMgweq7o5/6KL8073a1/49/seuHcS/Y3PedHWzTtM8GisEBMgAGTW2cwlFSJyxqYQU+u2HZPCOkTcOrg7Y4mI2baapba9IGrjWTGBgiIBtpstRNTcakioFIwkUYSEoOwkJz5jdw8hRQrRxtjEUIwqqX2vX+L07GXN8K02u3M8udRsWpTh35hhzxVnlQkATq8u9U0pIGidbYLEJCLV6eWpjXPHjxx1MabM3vb9h/7P//5DxfTxv/1kYQ02Sy5z6CO0lC8A2yur1IxXVnpAgkSGUbQ36Nejsc3yk8dPKCL+dYE/P8b3rlW/5D7yyX9RaETB9jf2ZqdJlBgyLUwBw1MLhe0Oz9ltdl1ZfO8uqGUIq+76Kwso157ca5ux2ixDHVGIW+d6Nz1v9s2vgW4/jUeC1PqToyhRG/ahCUCSoGFEgCTWZWZ2evCe977u7B88EYLxaeqBg6NDp75z+ZVnXbn73Gh8M/JXX73r9ruf8JG3bJ2hioVDYzjUCRg4UVNVdqbnLtg0ddmbD8GNdZ2EJslkKU46kYbg8ykS6gCMpT44XtDLwkWffo48fPa9rxi9w3Q7VT3CKP0OAZmUciB0BW3auO2JPac6nQKhKjvu1OmTKdLevU/lWSaQpqenr7ngwttvv6PX6QIgGelT6X30KILRGWQlLfPjK6cPfP/AYHZGiVSNAozqyYFDBzNn/aTacvZ2HmSPnXqy2xtAkjzPwEfidQsrAKhjVFWb2b4rmqbpluWJEyf+6V/+Zb3tFbKZQ6QQQqvfb9+NiJgQQDBJjKBN07zg1ufWdX3n3Xf5ARdJDZFLEA3XnPqChZIkIqKqqmzmyNnheC3P7ERTCLElHNa+EdDRZNxmoiWNpFR0plJKlR8NeiYFsZhFCjEFOgNVA2BKQVWB13lWSArMqmqQDBJASiKgUE8qRGYhy0aTAESfpNbYHfRKl68tLWfWouPopZUTgAHnspQqREaErG20ERSA2SUVYRRkR9ZHD60rGBIAOCRRrNWbwoiqddYYQ6KGoe9cUHFs1p8Ka5nZOIM1tM74IlJ2CpfZajyZnZ254dnP3vf00+3gjoidTqfT6YxGoxACOJrKO2c3p+9e2RmZYzAksBb7hLpR/PxqDtXsKE3GAbtdt8HFfaOt3zo963BlhaIDrU+OYdNctjY5ZHt7Nl573fFv74e1DhQjQ3mCnFPy5YMv++D2o/eYna4czez93f+2+7d3Zudf9WNvfOmjj96j0PzCz7/143975y2/8clHzzln5Q9/e4vthnJ75puEx0zocQZ1PdFdO9fe+eY/+7t/XNh77zsv2PW+nduXdrziofvu/MLnvpAKM9h48W984B3X/Ms/Dh9+RPudJvhMO7xSTeIpt2Prv/7w3z9+1suuKR6zayeGq4k8uOli9PiRZwpwfwCPvG7TuBhf+qMv3ri6eUWSB8mdwQRZr7e4/yl/YF9A7heuuPEFeOOVo6cOWguBTWHYSj5ZHUJB2ICqxbVePbXUMSbL8sW12lGXMhOxSiZfwspitpXGr79qtp4cVjFZsm6qOHRiDKpkEETImBBC+5oiosszHwMzKSI700wqa9fjwUSETBvAlVJSRG31wUkCaQqSGBiVkggAMAEBWGvRsDEGMmuJDTFZrL2nxGCBramqKi+LJtTO5JnJmmrinBNQY0yL5RrjQggQtU06UqPGECITIDjb7ZQAQIN+G2FECDPTUwBgLQOAgK5/JoCeYai0SXEEas16fhq0fOkzLw2uF2BFpswO1iObAIwpAaQoCufyajIy1kZJbeToeqUPsQm+3+mOJlVd1y1Du/Fh67btr3zDW+/4xhcfvO8HKysrL3rRq84/7zyJkUOyJRtjCJiIrHXrxVSBCFBBWUQjIgKtz8GEBgCQGc/oiKhtMNqNACogKCAgkaggaEysSTI1UTmysAOAOlaVEQAFJjGcUJNKk3AsqdCotfr50cLy8cv69Z+sDH91+uyvLB35cgq92W3Ncj1wdUiwGLM6hTLLJUTL3DQeAA7ue/qiW58NgBEAMZhu/o9/9/Gl5VOdTt5QByYJTGPKPKQIIsAwHK40kkCtR6vaNCls3rKlaZq1xeVep1eNxzYrdMGVH4fJu4f2DwZuNJPZEJM2FAEssuW6MdKs2aYg9McXDu4+Z8OPvP7Y0okpe2n+gquzi6/Kyi29U8dNClD5ev5Ef7ZXbt+RbC5IMB7FLIMmEREpKMT2eEgrCWMgBBQFUWONWVlZ2j85/NazNts63OJn79t7YkXpzvv3HT++cs3FZ5+zbaDQefYVl3WfeLRPUntNGZKIMuVoVCXjMsW44zk3L9zxvQNHDuOuXZuonKSiU3u3YXORlu1dt9H+e7MTB5q0OhE1BBueB4/v/bVrPv5kecs1Mze/ND97hz85b0axyocdEqUNU9NziLy2NiLDsWmOHzuRjJsq58iaCicRcHVxwVEKcUyGQyPGGABiYpHGi+S5E4DObL+n3dy64bgCYwAAkxnkPQKxTIcOHKiddPJODIGRRsNh4pRSIGcRsfVERKYYY2gaEkw+mixfaibOOUPOJKzrmohEQARaiLe18uHMhabJjDWW68lkbWW1qipDlkIkoAYEGYU4YUxRjbFeAQhdnqHhqh4DUSNx0O2xj1VVqWpSFdXWl0BiiipAZjyZpJSyIq+ahpUwoVFCBsMmSAJVZiJso2kNyDrUBIZRBBGjCp7pG5IKA+B6i6ZiGAU7NmsmjYRIuWM2pOAlEpESWuK27AEAMztrUkpIAgCoaNGkJG1kcpZlrUEmW44+MnGRZZyaFgm0bIg4t46tIcDpXreqqjaO1DknkoiYiLZs2ry6uuq9d8ZaNqFqpvqD3bt3n1pYWFxYKIo8BD89PdXpdMbDYa/TqUMYy2TnlJ09mpZENTfkcZI6ANDvLh2IJYQmstV8sNsde2Rt28ee3mjchKBXLx2sbIdjQ6eO5XMblyv4+ty1l538nusPZLRaSAlajSOevOHlq1sufOH9f372b//Z/a+5+dLl8f73veaGf//+VTc8/9EffNu67Nf/x++sdm33xPKNv/Jbj5x95cEP/sTuow+j21pKL3WsXzk8eN+v/ZXrffk3fyuMF9727vf+5MbN9y+d/q2P/09ZXuxu2/GTr3/r+UcOzf7h708WF6ScMcMJ9DlPzaKDje/7nS9c8qaHFrddLvdtW3ikMWq4yHLArm1GK88U4Oy228q3vuzwpFm+embxfrGVZH2HySPmVIXxkVOb3/pyF008cXjDua85XQe1nPXt+MjyzGUzmHwTU0fNKEUMiivTOL0WieKoIucKNxm5Dorr1BxttTLOLtgAB8dLO6BwVEc/qlez6blNcAqb4E3GZZ6P2/hLa1NKGpNl0x4bY0xkJiJr7DqNPmONCUQRIUVhRsNUe4+OC+NCSDGGLLPGEBIwo9HWXAKcsW3JYGYGowmMsyKa51nSmHPeuuKY9QjO9ZCGXFERLHHWdS2krBLN+vANrVVke0oNsYgMBoPWKs44xjOufO1epi3ArUA/SgBRPgMvqSoBn+mJgVpJHygxaCRrbasbbsswM4fQWOcAwBgnCESEou0eyhhT1zWBWuKUFFVSkKYJOzdtf/Ur32zRPfjgPV/44j/f9NwXXn3Vs0vjqqbucocsszXsLCJiAjKgCIYQRDUZIFUEJCUiFAIAoDPMzPWRgto3e/uI205aKCEAGhtiIoWucCKD7BCoR72MTEu8ggQaJCVlQfLSOHPUNGPkLRlwY5jCPc0qF+V71D06v/CDabdz1i01nqTMIdT12JGxRZH5RomRaegrVTXOVpX54LnnvWb7ee/+we2PLK9mk0XOOUygYc0zA0YTRgRTRATSgBWTSUG73W6om4ytqjpjU/S9PB/+nuh/UfjJmH0kE18TFSBVTCQShDVqzACLmcGJYyc//5XP/eR73nvRL34wrg2LXbuCSq6xOG+3V8+Ud/0lZF0lklWh16TKQBxHZjSKAJDOVF9BACaXQBRcljGgApl6XN3+/W/8yFV7pgv7xlc+58blldu/89Bj+0+cWKy/cfcDt1519bPOnyl5dPklF0Q+7rJ+B42g9hgU6jUUIuSkYsrezTc/a2/ad7AODNaurXHR//I/nrX33+ZAJs7WW6/ddPWV5exUHOmlT/7z/S/ft/LTnzj93b9e+bMtnTf/+NS7fzafmfULR01wWkdEjNE75RABgU3GqKKota8osydOnzhx5HCv1wOmoNLPstXx2GQ5R7XWQIZCyKIhhNI4ilKyTYZUlV3GQj4KOoPG2AxESEVCisTUekihJPCJAQUFAqiIGiSBzGWJQCm1wRo969pQFxX0TWyphu0VEEMEQrImxjgou3v37m2RKxOiMicAZ9gpWMQam8gqqpAiimIdiMGxoaR+rYoE69Y8qkRkiJqqBgDj8iZWyMCGUK1liqHqdouJT0oqLVOUSVDRMpN1WYYe2+Z3HdlTSCqICkyoQswhiUGKEo010SE3iHXqGBcJA6Zup+sUJ80inLEhExFUJVJrWS0qQUxBVVWDsy6GZBgJwDEpuXaiQYuWGEByYiXKnDN51gpIuMiqyRhaD1siFcnzPKXY9v4xxlbK0soip6amJpPJV77yFZdnZVkYYwCgzTQtisIYwy5P4jfnxRSvnIpbGYYoaWwyAGDwHDeabm+yPDrw+Im5a3f97d5B7pw0TTkmKjZVw0PUnRutzft9T/XP23UfXnFk5qpzVu86AgDgByBBO/e/6Fe2Pfy5reduPfnIiSO+2rZp98zxR+97+4t+6ssPLQtevnkq2zzzky8//9zNl7/xJ3/hpW99w6Hrb7zn1//73Bc/sw0kg+lxb8faJ//9XVdc+Ir3/Oj+UXnfU3f/n2F1fO14wWHNTr3wlptvWF2O//4v49I5dblM3DVXjx64+2RJ43e868CVr7nr+NaLwmOXyEnIO9MxNlo0Op5Td2J48pkCPPfUycu/Nz72vE1PD4/s6y3ujrucSGVkUGs0advVV3GvN1lcQfNELKZPn7i/zjlvNGX5bHe2AUlI1mYdSiFjWp1Kg2UKNAGcKuw0Z5PKaldM5mtjTdR3vvCCMBrj2qE1o0KZaTDSCIkK2zNIo7Vhm20gmgghJo+CZAwjSYjGGFUEQgFd98YCNIzGWDA2QQIA51xATSmRgiU0SABgkBjZWadnkNW2PlhjncmEJCigD4bYEDOSI+tj5DKPMRhjrCEFaK1V0ZFKYHLCEiNnLksadT1Fu+VKaAvAIGKrghVIRAbOuMsBACogYmwDGGQ9m7adLFUVlQERCYgIVUSkZXJDIERoY8naSbdliSMyIgIhiqACtkwOSSCaUmoRb0jRh+icwxBS8o3wK9/0zk6/c9cdX/3W1740Wh4+/7kv7Ez1FCmkSMIQpb2dLDuRCISMoESKICi0PtgiALXfmnC9yXjGdKyd4nFdhwKKGoksFxz88WZhOa0cHx+qUrVvZf9qt1HVGH1r7NU+CUMdQjXvlGbAj308f2rjOfHUE3FyWNMe8SfQXbR9+5apQ0s1NzTOwfbVNAICSkS9Xr87NUBEm2Wc9B27d7ymszUceeJPp3b9LK09PdiACrlk46U94+XTHEdCoFmX1SEmsYkSAEA56Bdoow95nreg+9iPZ3nryj8eCT8zsf/sZLW7sjy2FgAjSjQdK02aLvvjlWquM1hcOv2ZT/3Vn33kbx4/xb0QRWMVfeZECCWuoFhTBSCBjFcFI6TcV0kd8Hr0FiACEzGhYU4QYwCAmDRyMr1B/7pn3+iX79i34P/i8b/auvH8t7/0VU/uP/SNe+5dqoqv3n3f2uo5V110DuHqSl3UaWKDa0zMk43kQUgVTWb86iR24OzzihX0xxeKbiF8z3dunjnun3fz07h19qqXlJfd6rbOddg6m66bufBb296C//zJjV956ti//sXa3//P5Ts+d8kvfqS84aZwctFgyLKCqWyCJKgLy6FJiEWDNTGBpH63Cx3yPooXMs6nhIZVk3MZgMSo1pJKyk1u2WiKnTIfVhPDtmoaMIaIkAyx8aF2CbKiw2QkQAKFdnkJaNmklFRTVuSq6n1DgC6343FlnDPGVL5ht74Kag8oMYCgpliUnTYzABUQAJ1hypg4YDJK3QisLCjKBMZi0BxIAWamp4bDYZ2aCCoo1lr00TKbIgfC0WiESI4NAChgZm3bsqOKEnTyoqonmS0AIKkSEgH5pgFkdk5EAQEVjIKH1hCBkFAJVATOeOVEEFAxxFkTI2DsuCYlk8SAri0tAvF6qAOujxpKSUFFImuGbZuP4oyRJA7ZIINBUCBEY2yU1FJdgm8sUt4tIUrb8qOI983s7OzOrTsOHjy4trYGiEmk7HZ6ne7S0lJrmNDtdrGhTqeTZdnT+/a3r0trMdbr9VS1LEtEHI1GULiwhkWezttMe/bGHC1RtgRTALAV6xOMaTy0g/DwgeKxDVubfMVNhlq7RVNZ1lRPpdFCr7uxTo3f80Rn57P/4dxX/tpDPzCdAoeLK3Zw6LpXrW0698q/fdv0H35kwhte/fnv06bpR97+I+f94Cvffd0F/+V3Pzv9wmf/7i/8t9W1xRPFU5//t/9934Pf+q//7Vde8kf/dPJH7nzsL/9k+rYvbfATrk7QnUc33Qk7Nm189i/80lt/+UMzAMnTWbu298/ZVU+wKacHSp2dsw+dd8GTk9H1L3ozvf31+2XrXcd37OJjt+Z7VGk8kVhibDwpD0ONT+x/pgAzmNnPP8U3dJehuX3LoWue3LaUa85ZZSDaRrzJEo/2PNzPbOPiUFP0mU1KAZlmYGkJEWPjAyimCKtTML3cytUmsUarZWYoUdaQZiXQ8HOPnhpEuOGsEutVYsqEopekwoCCpEACkCR0ikKTNME751q9UOs5JSKta0yUYNkaY1C0TaWMQSKIYUbVJOJMGxKKUSOTMcRlWbSGU2wIAJitMcYQBw0AYl2WmgCZoZQym3GWASKZFudez/pud0ZtoWUwbA0zE5oWMxYBQ9z2f957NiYzJqXk0AHhM892CzUTIJh16LaFuIGwfYBtVjetZ2Uz0XrKOCAgtpmdgIbavpYJgyRmhijrntKECdRLEh+Qqa6r9o3TYR7XVUppKflENJwMb37+izudzu233Xb3HXf40eRFr37VdH+AQHXVZCW1zy2QGiaDRIQtCSu1q2oFAGmfitZPA1qepighICorisiZvwFVyBP6UB1cPHj3qQeW4lLC1XEc37P00PTyoZb+2Rp6tw+8bupZN9PoknNz43zyiWq0jLFJ8Wxf3Gz80SLc/tgeOsefXBWLpVZroUgERQihCaHswsmTJ8/ZNENEm13vJ7qDUxqfuvjW5fOuvXLDBW5qLovUpbS4vHbs3rvyhQP+1J7Fpx+hamiJvCchVZFG09z0DBBOJpMLLji/qeqn9+/PCKY/3Dv1zoXhGxa7f1VOdcrQ1A2LxzSVT3GHT5xadGgtIRWD+x54sgnBUT4uOB9qMN2AjSgmYQumYRAIJqpTw4agQKkinIEVkYmJCAkFIjMiJiJjGCSZsje49fxrdz++/ejILn99T1pb+T8f3feOd7z+DS+49rsPPfbogfEj+49fsPtcg57rcXImUZSUlkUKkSyZymGcjJyzpiLv6svOm5Xx6SNVb+vxe2M+f99NH+CzLt+yfc6GpbWjS8b0EsbdC5fYC7InrjzytnM/uOOd7zn8qb9b+/vf/sFP33zRf/9w/10/M1kYOQJmbZIXpCaAISMqgg4Fk28iBs4MOsSopM1EtHQm+tAkb9BYNlJ718kcuyjBWSOEpAApCjEUOYkyEgDleW49iQ9AgIgGKYVICmxANREBGZcQVJPLMwbydZMDMlDTeHaZagIg0YjAKhGQW15J9F4JlQmjqOq4jc5WzbMsJjEm8ykiohMoXJGqMCEhhcloTND6RCKQiVEyYE3QTCpgYqTM2BSipCAoBEaEcyLAiBRTBAbrU3TOkWpSIcMkDIIppaaqVBRVreGEogRMDASGMKVEbFJKxlhVVSRUCCiIBCGxqmUQAhFlayBEAWjtuRmpjewOkuqmbjt3y5j3OitLy9aZhGLJOjIpJAYkkwmkEKNxdudZO+ZHQ6kaRQXUrRs3jUNzwe5zkQwiGGNclhHRpBob4m63W/uGgSdU+QxEAAEAAElEQVRNnVLi4MmavCyKopAYAKCl0qhq+27v9Xq5aDNjs56bPKhICdAhND5NAIBpdRwwJ2ZsTm3pzixPisaOFfou0mhSIVlHpjNYHZ+gwbQJWO/5+qPnX/fFrc9/3dFvP26hK/Dwyz648+Evbzv06J4P/tfdH/iDo1+9r/rSXxWr48VbXtG742tP/uQLynf/znt/+bfT2uTowYdvvPDaj//LR77+tX/51N/c3bvk2td+7R8PP1Ad/dRfDr/yD9NP7i9FNv7wu373s5931dE1mL30uTf/+gvetPhnf+zmj6LNsm0bf+/ssy8X++Zbn7t4y8v3nF69u7l0g1l9jnlYtRRObkabiRjGCJRY7NrwmZJQZN1076MXnbjg/k2DJ8y+k1NXd33RgHOAZay0V8RTR7LDD3Tf8kPHTh1G6zqKDU3yol6ePzn0pzFKA2Jszgi8NPBnPd2EwEqYsoVhMyhjYAqUB4+bpuD7j9ZvvXVQuroZc5rqNLiSVWSAaiOtr3KbgtXyhJ1zikiAjCTr9zm311MCpZQUpHDZOlcYFAAqHxMCEcYYY4yucC0oqkTGcQhtkA+0REJVjZIoRWuMRSYLHhSsrWPU3GJK6+QXQlRgJmNMUmkNKoiQ2RCRILQV2iBKiFnhNElRZO2yxmaGpA04+U9bgha5gTNvDV43uFJCQgSi1LbAREyAIgoAiAbaCFNQZoqiTdMgovdN3QadhWCMIUIfAxoGRCCu6hqYal+lKK1LHRE1CQr2kKJIcf2tryw6c9/8+ufuvv+O1ca//CUvPeecc7z3GhOxEUkMyITt6rolaxpYH3SRUdcH3pYZvf4XZ2w326xvUARSUMKmGj+0+MQja0/VstQr3GIwIFrazLoMACS0JC4GAAUgy5pK19jjtDY9yk/h6i6FN+Z9BbkzmEcrgV62pRNWo6tlNet1TYCgIQO2Ga5ORrmS1N44+8YXvfzei178jc3njPK50zYS2G4TGplQ3ss3T21/2Ws6pph/8Pv0g6+a44+vLRxMqcY4Ll2edzv79jzF1hLA04cOaky9Ij9y9KA7WdJniuqnVvIPh6R2jCZPDGAna9GR7+bZmEQhddg8/tgDD3//4bOvuXpteSJMFhuF3AU1TOT9KCNiGxvPpjYRKjXOGWASERBYz/ATlZQoRO7mE4l5UpMSHTl68jPfuo80bNx87i/+6ofP2f2sJx6551/+/d+bxXjLTddcvvXcsLbw8IGDYwsT04UYJYhENAJBcIJBQkJEYZFSeqk0Nlx9w/TFW/K++kO9a+LchYWNTT1Mk2DBRcehOzBm7oKlax/ofnX5xGpKdudPv+/if3kie+5b7vnjnzv24V/tb+wBF6KWtO5HIpAiGEbqIku9esXl511y6fmxDoUpHeei6NiRyV3ZyazNC2ccGscMSKlBpcaLjyFZ8siQZECsyTix7CxLTBxcji2rgpkYNXMGQooxgjNKCpNJMw6kWV1JAAnWTYSBHKtXxclkFDUoBGszRBZAzl1uO+ohBQiIwaBxDJqoXfOIAkbGZIgaEVNk0LHEoEyTGKuU2JIxSBIcYcOhoZQMKRCxzbollxkWrtPrWufQOrAcJA2mZ2wnx5xdVhAZRGY0IQS2Fi0pCoNFVXQuOsughjAZgyohiSIRtaQSMYbIcFIBYCKDBsFwAFChjIwN0ZG1aFqWuI/Bx6RIg8F0rAMpsZJEXVsZZuSskFM2SoAETJhZtogIO7dvf+kLX3zJpZfbCBLFKhUmW1hdrSb1A48+/tBDDzeNVwCX2Q0b51S1rpuUBARQ0ZElwWZcD5dWcpdND6bKvGNtVpZlt9/LywwZELUsstgxSjzTy7ZPh3o0hOV5Ha6u+T4AdC3lFozVPvULKhIlDRX6ZjRcbhqEGJSbWuuyKKipiym0c+faxx/4+NQ1j2aXnmuzJ298y2jDuc/64gcz7GSP37vnR15S/p9f4SN7N/z4L+76wucOX3rt9vG4++H3Pf2ut3/gPe9927t/+tGTp3af+7yffPfb77r9w+96/lX/6x2/uHq4uuXXfu55375n62e+c/p1r37ksadegPqK5zxn92Xn/PDNL1r70E9vWlvoos2Gp09s2rLlsaeec83NR2593dPHjt7ubywpvLLzUOmILVhirg0lAIiEKYzWJisnn/nV+DjItr740LNM1cwX4e5ybz9m4MbMEyuOG1ePFu0VlyVrT9SBCaYwF8kBZHrDlB0Lq2RaWO8hVLw6C9MrKoY4EUqe5xpTXUuydR1GN+yYfdZGWl0YjYmRHE5qko4QK6KEmkUIFUDaifAZbi2zFeWkLAkskhFxioXLJCoAVT6oqoSYQpSYBFRTgpDa8IaqqROBkIbgl1bXPECjGhFFQGKKMYmCZxNCqpq6CR5DgpAUNQN0yBaIgBEZ2ShwElUBQUI2AkjIKmCJWYgFELFVADqTGbJE1nAGZNkaZBJGsAyWyTCxYTbGWGsdZxYMgVmPHjKOyeZAzGxbz2cyxJYVIhgGpDbyREIQSXVdhyjifYg+SowSJ5MJCKQ6pDq0+6Om9oSsMTWTygB2stxpAmVTDESgrlavu+6a17z2jf3ehr0P3v3Zf/n4/v2P5d0sokRIQNAa3kVUYRTAdh3eCpGiqCLEmEDX3UsYgQkF2Gvw4JEIEkiMAdIoyl2LD9w//2jVLBXWJLAqhohBIySCRku0GZiU0jrhAwk1uE1TvZqKGX0J8w2uc9yHfbJ0N0cuuaz9bEfK2Z2ldC2CqmaqE26cOmM4SLW04i/7qd9bfNv/+qtNl+xLdn60Ui5Xxcow+Cop1nFCk7EbLiwvHR8GGZz/XLjhrf1LXjBVntXvbaBgZqY3Hl08jY0HRojo8l4is3njdkTsfaSv21L1Q5KcKVNQikVpHKbg68lo7OdXZOwNYajjo3vvnhlQbcCCiYIikTg0mhpDNgGHaJxNNg/WIiZlJkGHxhIjshCrMUrsIVbjCYcURSYiRkHvuueBpetP7F+YfdIcffnrf/iKy67+t3/9m69+79OXD2+46Yadi2Hh6LET58wNnCxWtrAUERCI1lsoaVVqUEaadGI57th87Zyrd9z/lbMLQbQpr3xl+vlcWXJkaSSgzbqXLLzw387//WSGY+hkRxbMdHnNn/39vX984cGP/pZt7IW/8RtsqipSRWg0wzxK1WhWcp4vr45C3Rh2KWkCpYwdOsMGQACFmQG4zQqrNSKRCHAjCMCGK5W1aowcAcWwY0ZI7WpUEFQNp6hVU5MxigQx1tX48ksvLSl/6PE9mBkRYBANkQw14n2dLrv80sOHDyIyCIqIqDZNY8iygCVOMQCIYUwRMHi1SEQCam3Weni1egBNQshkCYBjqmNc3y3lxnkfCYAQreXQeE2toRvF6C3bGMPs7Ozq6mpLadF1ZItadEkAnDGqShHXOY2i0ZAqso/C6kxLM4EWOAMgkcRkAQXXtRNyxjkXVJOoJgmtRXN7k0pM8/PzBllaQhYgMCkSgCZQTalNsNi2bdvGTRvuueeeNlVwOBqHGNsN03gyaQnPEmVS171eL8SYUjp06BAAAIGIMFNrGVgU65AjgI7H43bqbW1qyeZncEXNAQ05TGHnVOfs82RxtaNQV94AQBmGJFxFtYzgGxpFSCGPFFAbEBCQxjvImSBSLUy11m7z2bB45M/LCz8Exx97+Qe23vdvmw4eGlHl8pm+6/m1E9M/9f6zf/O/fefdPyn7769/74+WvvTVwZ1ffPwlX7vwR3/0ore+oX/Zy5584LGfecdVimFp7awH7v3kR/7kOy94/ksuuOFFL/jHz6ztPTG582sX7zv2qrpa++s/HlXNsDpqZzqDl75NL7r6RdsfOHB4CA/ce+fGNybFN/UfmsqM98LMw/GErKEUvfebNu6s5vefXj71nxC04eO97gvg2m1Le/cUi7cPDrw8XQ2jjpgk1lS0VtNC99wrT506xblxLl8aTrIsWzu5snJgf+zkRj2YYLvWZjEs9rQ3KrJJCqgokxjzbr4zdE76FZKi6FbXbS9PHzsd6iYFgyxsLEcEUEOsAkBI2roVqoAyEiEGSRpqoyyIdQyucLmham3EeV77hgA7eUFEQdZ3wJokNl4RyBgiJAXDtihdPak0qYBqE3KXGbYCEJtGHYMqILb2eYgICiklQ+vEqDMj3zrxMKXUCmLPmGchE7WG6Ejcbl4RkYlbRLEdzZ/hYZ3ZkZ7hOdJ/8hmf+Zt1/BnPuLYSte8jJFCFEEITfJvCWVWVaJt7qO16K8akAMwsCK0ImIiYOaVUN01VVQLibOZ9ned5G116+ZVXTU9P/8dnP7Vn78FP/N0n3/L2N1977bW+RiASCaS5YQsAShpCaGmeRBQ0UUoqQBRTTMTMSAqQCTZqEklMEQLYojjdHLvv6bv3LjzWHQws2tgoGzSGADRfx7I1IaqKQY5REDDElAU5vfdghVispV1uYz1eEwhv4M3fHZ2e7xmnOJ3JFx/ZF7nr61HGgwBYNNGXjas0cLHlBT+28/pXf+/pvaBsEZkxOkAElxBjUkiNsc4VnbFfq4aRXO+6m5erpcm+B/tFt4GxAJZld2V9JMWmaRzAWjPxKL1HMPti4d/f2M8UnBsT0trKIrKZ2bJx57btO3fuuvvO7y2fmk/E37/vBz+05p3gxKgAUZNiYShKy2UhphbLaV/6IAmRW9vwqAmSKDApEFHd1I6K9t42gLx956bC6oHj4e8e+9aFWx941S23vOfnPvSP//BnjQ/lnr+/bvPMPcfPrlYnoVtG5MH6fkDaw3Xm7FFjEqeODNJwVYtUn3fD5U8fStl4flzkRTYmKUKdS2EdMI/iNadu/eeLfv3J6R9cvnCzK8u4NJ6fpOs/+Ot3r9YHPvE/M+cHuy9YHceB9T6CME46pqvEmB06cnTQm3IuTykpaLsiUk1ERLxOHEDE9XAx0Vg1QQIi2k5HEQJqYvWYCIQYtSUUEAFRqOqzNp4/NRg8+PijLisKNIby6ONiWE7gC2YQqMe1c6WiAFGe2z1P7rXWxtgYssYYlaRJJ1i1KSiCAKAeRAiITIEsoklVJLY/ZIyxCb6tHACIDEkYEcjaFNWqsAgjAwgk9D4QAQNW40mWZyEEZ7lpqsxYYBMlJUiI2PI6CE1M3lpGRUUAhBZSIiJNoIJouF2gtimnLdjFZI0xoGE9+nT90kBJIooxRkBFxE6nMxwOnXOt/kpUCEAQEpNRAIRI0FJF2Flj+NTC/PzyQqffO+vcXY888kiovKoOpqaqqjpn587xeHzkyBEAcNY2TZMkjsfj9mi26Z5FUTDxyspKuwCuqsoabtuXNgijvc6stW3uWx9dxdyou/pZ5qMPHW+qQWHtiBMADIo1TZ4BS0dBAERTrMeV9Ps2Dj2KIpmqaYwxmXW41nQM+LSYT286Ner+4SXv609tf+0d71gB6ANNZNmPqgi5uf0r6b2p+MePXfD7fzX9th9bGO2Ws5/Vu/sLo49/fOHj/zddePmlb/ulb33l9nsPTs654NIP/OjNRw/tv/++r/zsT/zCJ//s9OXPu/miS66/5JUXOFqr3vsz2LPF4ulTK5Px9MzTf/Bbm1f1mH96//N+eiWWr535wRQHItPyZrtlMZyM29MuEJZPnaIU/rMWJO7PbsLpbc9trtq7+oWjgxMPLO+9ga5egKGzGeL8oCyD6602R3xslMFkxhmbHV+oRo/oFVeAtZRkUHR6djRa2wAAsbeE8xvEuhKindhh17qJjdbe+Zh938s3NouH0jjmrhO4Sr49z8rI6AyqogoAYLsCQ0SALMuamBSRiRICACXVqBBDyKxlpElTt3Nzaw7TcWXkCIbZGBBVL7XUbRxvFeq2SFvUka9bXxsTFUXb1rbdxaJgSx5ERFoPIIztLdmecAUlRVFp0ePWQQIU15MSWnZSa+kFbb4YmjPSIkERRCVEPbNIRQDAM/aMQEz/b8HWMx9E1DRNWwJ9iFVVtfncTQzOOZR1F/Qk0hbdOoUWRWj/SUt6mEwmmhKgFkWuAnnuEDH4eMmll8/NbfzMv/7TY488+vGPfmK8Uj3n+TerCGiWUjKG23SKqGKJAaCuayVUVUPoo6iqM9RmwHiqmAuNypl6q08v7H/oxD2Hxo9R1hFFxZTlxkk9jpUACJVIEThFTW2NiCLRUODU2LVk/QBsrfgX9bEpizslWb/yodltt+fxc3FpQ6knJy4KFGQ1RQMhacfVXvtzZ7/ug/3nvGjfoWUvqbROWAnQ+CgCNSoyGI8mmJrSpB6mwwcDo18brz7yCJw+FLdud3l3eWHV2dyHUJZ5Nayz0lYSiwZnss5aip0/6S1//XT/VRL/MTZsr3vhizadv1NE/GiSZflVL73ZW9x370Nf/dJX6/85yooBJRyH2jhjGkBnUJICIEBKKUgiBWYrpAqQQNvWChGJkkFKoOtWJzEBokGQY8eW3FkeTI6SHj24dPDEF2686rwffecvfePBh/718a2vvnTw4IrJyjXhvJ84mf88Q2faPgSAlGVThWnm56mYa5YWz3rOs8unq70H4whNGGtwTafLqQlQUG3SxuVd0/XGU5feufWJW4+eHnachRQXnzpw8fv/x0OP37nvY7/XvOcX7ew5NsRyqru2Oup3RDSJQuaK8aRGZNXkLFtGEEBUJsB1yRoAQEop+qBgYooGBBTaJRIRoAeT0IIyqqCCgBFCRWS2Zbk8HpJhZg4glNk9T+9zaLdv2rK4spyIzKAXQtQQHQjYDAmaJqgqGanrWlEEKYhHpBgjEzAoCjIajeBTfCYkWDQKQBRRREZC5JQSKhlaVyyIpKhxfZmEEGMgBaK23xBNEVGt5eRDGwUqHhVbugSpCiIysDEGQWMUUBBQj2qjCCBadqiq6Ix9pgtGbBWAxMoCEhEBSBHabZwikKUQkkFshbmqCogxRkXIhdVxAjVJkmoCLNpcMgU06xYlRZnv378fAFqO6Gg0GlUTe/JkSmk8Gp1//vndbnfv3r2dXifG2F5M7Q8mIjG0vYLUdc3MgG1C4rrimZEISZO0wWe1QWIX/Oq526Zu2Fz+896q12t0ZCvJSq2tdwbUEVpHUe2w0iI347UhCtpEASJlxnvPmiZTWniyaRtPVsz0IH/xm5cf/8HSwcVLy+p0lVi7GWQBl80TB5ce+/PiTa/b+FM/du/vfWSKm3jFzat+XK3Ctb/xa098+457f/OtxYbt17ztJxamuh/4/T+647ZHr7n2hrtv/+vbv/LP3/zSR3/7D/9433e/+W//9um3vuNdhxbnTy4d5pS2fPmzr33X//jW3h8cec3PHYkzr+k8ckkfVkcqIs651pIpM3aqOzheHZ+fLK3tP0qm88zhj5htvfryNVq9YuXcOdOb70y+Uz56fX1RX+3S/Gmztm/jpTcdWT2eHM6V/RW/2s+yyLyh1t07uvvZGd9YVwJJz8mJpWkASDNDHO9AX+W2O4wTu1z1ZorJytIvv+HSpw8tZLSW2TwF9eoNFzEInLGIQkSD/MwUyNiaKiFkmY9RRB2xhthEL4SUkke0zCmFtgNrz57ElotEZ1AWFoBJaFhARICw9g0AEGM7D2QxPTN9GmM0gjGmxVXb+vdMaVRVFcFWG4TrXKq2+wSAdX4UALRmISm1vZ6QEq5HdK9/HVBFZUNt2QZoHdefISy1nOb//Kbr2/EQ22/kU2xiiCoppclkEgBCSKa1lI+h/WTLZK2VmNo/Iq7nZyuhRSdJvfdZlmVZRrheUOe2bHrHO979H//2mbu+ffs/ferjK0sLr37t6zkTQKMxtVkRiFiFOtSNtXYyaQwSOAMghliTBEkIZHudjdO9wpRLw+r7+++599DtJ+rTJpvpsAMyJrOQEkTtUMaATainpdOgrPqKLRFikbkQgqQ0xsygOV2f6k/3pz1UIT5M9knU//CLN2tnA4wzA6er6MRQ1s3QLodxN/nV6C569a9kz7rh6SOnxqt134WmzxkaQ6QGQdAqCkhAbeLY2qz2o9Hayuzu7c3iQRwdMYWZTHxmsuHqaOX0aQAkhbLIQvAO0WMKxEWl4W4x385X/uvS9jsuuvytr1l8cv/X/+4zOhzbzK3WQ2Ye9HpnvfoFsKH38J7HLjjv2eMmDopcfFCygiJIqgl0fRRGYkZiUk2SJLV4xpl2DRAxyzKN6/+fFGDL9HRpQiUmCc8Oepyb2+596mP/8KmLN21bsHNfPZTNDGqwc6UUlUNQAiWVZ2ZfVU0isaxIwljyzIioMyun1/JZtBu6moQRrEPnLCKmpGiy7uYdV09e9nW4rR41fVNaU/Rnu9mGneWuYvvP/fkIp+fUxE45wbQ2WbSFNVVhUnKETQyUcUJhRg21tHG5610qtML2lFKMUZsQQhIgYUwEggCiDCiMygSGE2NETQQJQQEKa/cfevrIieNFlrMCEqE1U71+1u+Pfd0ri5Jzp8YSK1N0NmlUQrJGEGrfhOSbGEIIRqBrM0esqlFBGCUzIw3t9rQFkYJPhqgFUUXAWj4zB0OKGoO0iltgUkJFQCY0rIRRwVqLiJl1iNjKIVJrNqskCC2BBVqCMa4LdlW11fC0YFpbzFSTYCtOQGOIqJWLxDP3JrXmz+0fWxP5Vt0YYwRYdx5Y/+IMSRVEA0hKqcVkYggxBAZ0bIwxddUcPXLMN6GVhDrnNmzY4L1fWFiwzrVVudfv+yaMx+MWi2tfxxZmjjHWdT2ZTKqqCiFMJpN0xnC/9X9XRGQyzpI1iqljNlW+uPpCYll1PqsNjLXMYSEWq4mlwe6Qp4/ND0tFM6wxScp0bEOEqFL3O5kjcdLV1FMburumexdcYIvi9FNHf/uiH/mHqWumsm4nNLX1hktnJv0sW/nCbd9955s3nr81uc7oS3/RP/yD6de++clTae7Hf+nSLz2+8d3v+86ffmjtJ14x8/GP/fjzn3vhFTftvuTam175pr/8i88cffrw7/zOz8xs6c1uKx3Nv+m8i174hc+8972//9U77ly8+S17Zy+7lR/eli+v1ZMW+R8PR01VV+NJCmltbS2l1JEiVBPO9ZlfC3FxjB2BsDWbu7U+T+t4f37k6eJEb6bsZWG6v3F1YeXE8X1CWBo3XfZ9ig0EZwg6dhUCOsmKsgN+1wz75QIAwvQiN7URqbUqyoxn4NjJ+I7nbvuH2/ccavzMwIUQ0bBxedsvtmdGRAjAELXZdahgkByyxpS7LHeOUA0CSgohtDl9bdYZM7eLiZZR/IyUpeVhESAzGyTwEWJSH1PtY92kOoCPmKSpvW9CU/sYUgwpxhhkvSS3s8gzF6KIhODbubPt9tpTLSKS1v+7rYsxxiRRVRD/f+ojAnyG6Ls+TBPqGdXv+uCL6yTjFjp+BvduW8y6rifjqqqquq4r37QUaCJyzjnnWiytKIr2yXmmV1ZV733TNIoAQM7lrNSCRsTY6ZZoSZJOT0//0Dve8dKXvXg0Gn7xi5//5Kc+MTc7fe655/b7fRRtmqbxdV3XRVFs2bJl+5at3W43+uDrxjnX6XT63d7UYDA3mO50ZkcSHzrx4H1H7lrUxU6vm7MzeT6RsFpPliaTEVoxDhHBYENZpNxw0bXdLuVGgERJJYvGZh3MOj4ZRZflxbSYBmBn5PO4mNk2BwCLVR8ZIabxZCGTYuyrbW/42bkXvoZszNHmllBsM6qrSTMOYRTjWFKTUghJfGRNICkf1Q24zqbz7NwWHdUCnehD7ZvJ8lB8mp6eDY0nBEJFppBiM6nQYEUxfijoFXrWhy7+/ic+9b3PfjZoLR3jWfpTM0Wn1/j06N9/ae8d991993enNruhiSPWhgEQW6BFVQHQGOuMNcSqSglbJKZ93ckwGVZDradbC8kAgAHQtWqcUYxoCmuG9SQr3MbO7IHx6J+//a2rL75oBujk0mOu2yw2blagYQYQAtB1K9F1dKWherKKXU618Zis5FyP/Ja5vAllM66bcd6YxelBZzCzFRbmT3/1rzfOPPmNX3vki7/44m0n+pBn2iko72646JoLf/Hnj/zQzyysrEztlCEZl3fqWEMmfbB13bg8b5omz3NUyUy2Y+vWQ6dOAUBrKyFnfm87kYgqqEGlffO3B5eTN6CUUCJSYEI0qqhikedmZkQkjirn8nYgNQiaEEgrEMiNFQIvmTNJpWmaGH1rltrtd4crq612sA3ljSqkBCAUgDXZOmlOMaWyKOqmsdbGGAkgqbaVktZNfqgNB2SDqiigZEyM4sjEGAUSI6WkNrMgur4oyvLGeyVFZEkJIIkogBCapmmib0QTIJRlOUWdtWoVYuxQTnkWQmgN8Fq2CACAJERYJ2ITCQKAGEAARsTae2MMiDBjCAggSdE6iyF6AidigIJlh5QJBsbSOQCoJhNjrTHGIGWZJQVRaSEKq9Y51+12x+Px4SNHnj5wIM9zY0y/N6jqSYzRsE3Rr41HFk2n01lbW1PVTqczHo97vV4btkMtZsBEAK3UUgMwg6fh0LuLNs5M0VAUe0CK8Nbp7zzXP3G67n755LlfXz3njc+//uTeg98+YFMIvq6KvI8GY4yTyaiwqanmM0JM/cmkOfvWa4cHDlRLh1yn+4Wzf+bpjQ/91MFPl6t7c5oWP01udOVgw6n/+PdT9z3KF1wUs8HxrRef++zrlw/vG37us7LrosnS/l1Zf26cHf7q1xe/8jmz64KrX/ma1//y7+DUuRsv3Pqm939gds3vfeyJKZ4d//7vP+vqF/7lf3y6uvD5D53/4osnd589N8/D7tDUZadomiakmBflaDiMybenPWQWVtbykX9mIKst2nIGEWtduRmv/PLCg6vbw+3uyctwx5ou9gY7933vm3zx+aNqzdYSQyXdUpZXRX10BatI0Pk4mSplxyxnezoVAA9WNKpYIcrCxNoApXML835bMX32IFZrk02DTbWX4IVb8gDieqCQthHzQERM67WHc2eADLuQkU9RFBmZkUTEGWMMtXyr1mMuqbQeuowIClGTBq9JAcAwSYwtoAeIIcYkxMJtiXqmykaljDhShCRERMiCsl5lRQBVfRARtP9ZNdsEzzbft934EiOiPbPchRZvXF/utPRgRIltqMP6DlgQnpmSn6nZ601ASClJUqmrxnvvmybUTR18CzX75Ht5yUQqgogCGpsGZd26mYgkRFBFxKQiMTLlqpplGSYgBSZoJuMsy0LSSRgaSz/+U/919/kX/+VfffhLX/78ZFL/6gc/tGXzptF4uLq20j7PO7fv2Lx5cwxpPB5PqjERTU1NreuSgawxx1ZWb3v4m4+e+K4UE2xMPaqn+jiK4IC2d2ZL24vkTg+PgVKqcTE/QULGshiy5GJQY0pN0qNs6+zs0XgIrPGUyGBdj03ZfZhHz6/dxmwFAJZGkZNOyJT5rA7ns6tede6L31XVyz456uEGQrSuEZ8YVZWCqkgwiARWUQQj8MSZcstGO7WhXlgbi+1qg5gJgq99ZrILLjz/sUcfHI2Gpo1VdaaMWKXg6jA3edbw8Mnv7P5scbIzN7Wh9k2nKDWJcZaL7NTy4mx3ZnVl4f/+zcd+6O0/OtBSmiSOK++dJGJqTQYlJllHPYCF251gUkkiKC0VgEAJzvittrsMtSAZxWE0iVJustCkxtR52Z+vxk8+vn/LpukNM1MjbwparTVHSqgg1G4XYV06hhhCSsRDtqau12DSSyCNJLs86GxdFRGb5bPn1H5t9S9/137pU7Zae/aGqU/9D5i/cP6Cu1fX8tVUG4Z8393/evizn7j+f/zBD5ZPDlfGpkxZU5ErxqGqU2RnfeMRASSxMU0IxxYX5cyJf+YCQlRq59r1ID+1CK05nA+hSslrYlQBFVKDGJIIaJWa00uLqml2MKPITJQgiWOrFilSSgZcEik7eaNxPGoMKSMmBSAKTYWk0UdEBoJJ4wGR+Bk4KqSMOEZrbVmWKSXD3NYzx6yCUZKACrRujlbVP4Pxt49OEJhZUmJnAcT7aIwRAct58EkhQZt8qMqmrW6aUKXlOiVV1fGoGvNYGBixCXVKsc1OBkQE0jbDBRmAgAnatl3kzKJBALgoiqZpGDHGUBRFVVVtV56xURUATbB+x0RAEVDVxvsNGzZkWTY/P98OCt57QWjNfVaWlweDQZusHlJ0NqtDpBTJGhWYm91w8ODBs3bsMIU7cvjouiYSaDyuiCBIckCaNLaRZ7Z10UMBgDzlCYeUQCyVC43pTs9t/LD+cFdHFCYmnNpWuGdfduB9nV0/8407Hnz4qXjF7/CIrjr3gice34e5Y0TgfOyXLzhr+/EjCyGuzGy/lrPi6CPfM1P9NMnm7NrTZ1//G9mmHz3+hUuO3p4gNDhzdFIX5ZZdx/eNt22vLr26u/WsU08f0cas1MMrrr/4q5/6q+KaW/c98J1OEKZucezAqQ//6f4P/0HZmcte/sZj0Bm60/7goV0HT/Quvvib93y9vuktD9303t3NkStX9jcb87EsG1esjseZsYo4aWownFIMmtRQiMOiFfaf+ejrTH/jrAAtYH2enn1lOO+7zdMP9I+dOLBnuj978uBpmXMxd2WmUpEQMtu1kyfPyjop6ztRzjo0lih8Vsf3DNVrPZxeMYkD96zUyI1irzc9PjBcuemsjdXqMANaWF5i4wznoAEEVCHLrXFOU0ohrnfqTKgIqs65FCK0mzBUkMSIBNjmFGgCXicfRUQmMmgBQqSkiKhEoklFGUGATOZAUlTRFCybqNI0wbaEjHZiVrFgU0rgAQnIZmjwGRC4razrE3aSFlMxxgioqqYzXA0GbB2vBHRdmPP//2BABE3cll9FbKW06zD1unOyqoioQIotX1NrHwDAGtPUtWEuMBulpAjPWIMBgGVqgleRjE0I/hnc0hhDTCkIIor4utYYTZZlqlp2im63n1IYlGWRb0yQpjZM/+z7/uuuc3b/+od+7Ztf//Lq6uqvfvCDF1xwQa/XWVxcFpFO2fVNEIS8U2ZloapAFJIAU+P9XQe//8C+e55e2tdg1HEMYbUoipUgW2zvuZfcdO3Z1+Y0GEW/d+WRu+/89+fsuk58MaxWT66dnl9e9LACEAtDfjgEmUlFSgY6JneC03XtS8I4OdWUIxdekRMALDVRqcgg4nhtNLPxpre/33WKuNiwlWEdJ05yE4KSjcoJBAlQswQRkjfikikTUGcq27IZOBbTfbtpSzr0FDurQBJV2Ty+52kf1OU5M6/Wkyl1IdY++sHszDUveu6X3vdR+IzYF5fNnSBol4ejLLN+ec2OmFOqZTiY3Xho7/5DTz16/k0vGJ+uXKKxEw4kiICsKilFSIIG2RoABTKIADGCyPrRQUiSEBGtA4iqagAQpAKAIJ1Ux1Aaz5A5zZtqW8aLy2sZyK6zXIoatTQ68YDMTAwoLRatqm39QwnjemIzFjQ0hhg0QhM72fTCih2j8/ffJp/4s97RvdOb5mK/9KdOb3/YPfaO/JatH9i+9Zxyy5ynbqH541/5ywc+/teXf+R3P//5b22AbsjHwUPfdCv2sYVXNSmboEEAR0201Pa52v4c7QI1pUSMioJAjtiAMqiAJgIiC0qSUpRaohI7BEakpmlecuP1+/fvX1kbdktHQZB0HUYDMiiIqBknFAhpKstDrJsgKgEAQpLcWQKMATAqobCzkhKgWjYQQVWTkZj8qVMn2oV8t9ttl6lBNYTAqADUwryICKAtriUxAjIKlmXR1HVmbJMiQGtcZ6OPQEjGRPUaE0EylpA0hMhAoJTnOdSMEQGg1pRFVRKxrmddVQcRIWuMAqgaw0T/Dy1FBZJoTK3OUSEJs2UmAkQjIlmWAZkQAjB3PU5AA0EeJCLUJEUgj+qcu/GWm+fn50+cPkUKEgOKtnnJqfFzc3Orq6uj8dh1CiWsvW+/9fLycpnlqkqI4/G4Xls2xvT7/aWlJRHodDrtYwdNMUYkEiRzBoYhgpTKKlaZ6UAMnZkdb+v+31sPfvT8TSeMjVM59ktQCMtVnBw+cFNth2e96pHRaCovVocrAcQ0HgQTGHVmfnUYShubTu/SK5b2PkknxmCCL4bA3X51pN7Y/TP3Xy6evv6tR/7trOXHxwnT3NkH6+ycN75x89XP2f+1L2267jIzt/O+55x/+v4v75jedPb7P/DYbT9Y1LoP2fw/fWzL8uq5v/a/H/v2Fzecc+7Jf/7T83/il5vvfHfQLb9z/+3+xrc98ro/3mnG1z74H3TFhpw6IS0xTLNpggoCEEKU5GMKSYg4q5qlU8dymDxTD+jim81MEYZ1aftNii+0l901fPjk1Km7usfesLbliXSsc/7uMBw764jKCnDANmoSiUuFXUtVbyylLaoQtkzpltni9PIg27xmB3nVVLmxwqGql6dJX7R701CzHGvWMmMIKQYvhSPnHI4hhhhNYgBkQpGQkgCQsZm1oMTWeIwpJcsGrTTBe02OrWWjmloU2hgHhL6J3U5Rp4nGlBAiphbxZUVGFtFWsK6qCaGtZ2c4Chg1EqzLxGOM1nKMUUT1DM6sIEkghtB+jj7TxRCrKhK1Cfbt/NuSFlv6TKukfQayRmxH4pYcI//PvIsALWrdutW2xAlsuSA2hipGTUmTtHRQVS3KUlCN4V7eGY5HdVOVnW637KwsLMYYB90eM6+srFRVRda0Moo8d7OzG5xzwUdV7XW6c3Nz/X6P0JJBm2chxdOLo1e95qWzM1MfeP8H7r33+7/6K7/0Cx/4xefcemu/PwUATAZgPSscEKRVBRuqQ3Pi5MlqdXzzBbfelJ59ev4UEAfR/4+9/4yy7KruvtE551prp5Mqd3d17lZ3q5UjyoAAGXCAx8YYE5wTvsY2tsHYPNgGY2w/9uOEczYYg8EgMpggjEACgSSUU2d1d3V3deWT9t4rzPl+WKdK4obxjjvGO+57P3hrCFrVders2mfvNdec8z9//1JsZav9s/tvuvA6I+nAhcKodtJKVHrlruv2tPYjheW1c4fPHn5q/si58vxatbo0XD1w4a7OZHhQHh7K8Mq0eJltHZXFUKRU1R1bhQkEgCXnK+0KI70h7b/pJ5tbd53rrSa5JcmaqvYI1qkUmFk8iAIhpACBQHLPTmNVr2jdWsub6tyZ8cnZfmtHrWfQOBZCpgBgkoJQuv1VJ2Esb6yurhw8ePDIocOX33bLybvur7/m6FHdf+Oy+a+WQgJCQTJ5oy6r8da4mWrWC6XSerm/nOfQr52gThRpTT6wZybUZBSQFxIiZBGhyMHXKEAChMQcCzQQjcJARJOgZgCAynpEwkDtjDLUfXJGF3kB0KpLSQmGJMSSGvQSgIFEBMXhaAhPSKOw9t7awOARBYS9L+ukXW4uWghr5//l9y86fz7MbO2tnYSknbzpjTe0T33ukn+f2fzTuiJVCYN27G9+6x/c975/OfWZr3c2Ff1KFw68rUpjCpChtddef/X8/Pzpk3Mt0wBFoM2wKnlQNdqtsqpSITQqMGoxNYAR1AqRjCdkMgop1MwiGBhRWNRkM9OKFlYFRHKVnjo1BwSBgifGBIwyBkEpFkFKc2ZAYW+ptGiMAWAINYZAQgLoKocAWaKGDFlqNIivQVAJaIcOlBinSUNZd6+69OKdO3d+/o4vZUWDHOsgBFpQPAdCEbaIhKgRPAgrZq1QgnMlM+CAndGkalAEJTjKDFiPAR2ojIJ1dqIxU9d1OVwG5Sk1vbpE9gCYqqTQuZUAJEbAC2glKtFpmlprkb13LssyZobAFlgQiSAxBlhc8KiQAnMAYUgoddapxKAgoAYGa1ADSghsSLxrpimwOOsCyN1f/9pabwDaBOdJOC6uxOCDEOp2a8w510iy2tV1EjQpV9VT4xOrve6Z+XOUpJUP45OT7MOgLE2Wlv2B99YGb6xJDSllFGGmFbqgjUFA71mTRa1sY+gHjQmmFzx/541PBKWnVT0XWJaGoaF8i6pd27e+qtX/y7+6ffKKG8/rLQtPr1AKDKqB1nkn3qwtdk2Sz1x8QKfZ0lOPh3EVnISKxQ5KcrmxCroPbdr72Phbn3fu3u88+/G955+eUemRX//Z6oJrdv+PH8CyPvlXf3AZbqofuGuzk2Of++jM2NTyzIya2TG51B0mg5OP3dk50z2vHt7VN8Vbf2F4wXMeXlnSr3rT3Te9bZrspQ+8b/nIXbuuf91wrTKUA9eJ9/1aciKTakUNIcdUjxUTfHbJP30spWQjADc3bUmKrW7YBa+Hhi/ILtp9JDt+xeoD470Xd3sqMb1erRnY8RDLlJVnlzG2M+oGaWKqU67R85D9eGfX5NrDq23fWcOqNiofgC28SdrtYV3++f3ldXvCd41XPT+sWSskpT0oE5wHAG1ISQARAlSKunY+ydO+tUppAC0uFszZhyBIjtjWzhilWSlC8FzoLLgQy72LPQYRz0FphUjBOqVUlmX9QQWANtiYLwbnEm186bUmDEhaAyF7TihJbCIijRBlE+icS7K0rmsgCsETo0kN14FIpXna0q0sy/rdfn9tzfsQR4fSNCUirQ0RmiSNi6jW2mhNSkXjvgAhRuXg2AXv2UtMshlIq+A9IIKMmn/OudLVzOyDraHywRd5vmn75OTkZK9fElGWZf2FXr02nN463mxndTNtQTbWGQMAs8TD4VAnxhgjCK1GK0mSJDGeAwdGkh6tDFzXxTmRoRKRwOHYkkxfNPabf/m23/v9dz74xN1v+703vfrp1936kluHNTvPKKx0ZBaxAvEg7CXTpirqfXt2EhECTk9PxhGpWMPXKj289KTEajjCqd7xypcne0cCOgIirWa2bpme3cxBnPe9XveCfVs/fvLfh8Xa1NSW/6q7d6Rd8CmoYIpqq05uHXf3WeDpfHtjcm7+bGP/rsbzLp2vHvRWey9ANTgnYIRDrbRiRhZH6KPuKXAKWOugBF3AXJe97gK5gdtey0LFCljKAIBIrMAQcQpAXoSTdmMRzm67ZV897h/94tfN9hb9WVn/Xim3OHlUaRInpdIEhR+YnrEVTwOTfPquz+y4+pJT3TJzOvRDgiYwx34BEooAIhhtIHCAqItniTN5iLkudjS3M7NSGEIAQh0QZjYlANCtEwvs3RAoZ1QtlbFJV5a6m1vNXDf6ZZlpCgLiHJEmJEYMLMgsjAzArnLOiaCAgGgfggKzacvm1VWXtirP7XRsp1s6YXxtk3zbH/0DPO+Gi5c/86E9f3Fo9a6d/X0oqqDUBjv/FF7wwhcf+tIXir71WA8Ar7/6MluGR556rNlsnjrydO2sMcZKyE0y6PW2zk7Nbtp83wMPNoqGr60NjrTRxhQm6/ZWE5NC1CQrJGHvLDGvhnqvmTS9GsaNQ6HIisvSs2fPplp10hzqEISpaTwE611iMvFBKWWdJ4LUYFn2sjhyH723gBlJCANiygC1cySsFSoKAkSkQVQzx+BZm5XB4Dnbt2tSCtQwuBQVg8Sy1mhPDQIAmcmtrQCVcJSiiE4UGTOWJQMYhhASZvGVAyCkXAAANJn5+Xki0jphBiRQqIFgREaPrDscsW2YMQQZbfFJKUQgJQJeWFGc3aSAIgQoBCyWAxICKYZAqXLexo4URswPAoqgiNa6qqxRemxsbFCV586dS5LMe5+ahINHRJFQOyFFp86dGWs1t+/Y0RsOBrYkoi1btpw5dXptbY1BmDlYN+x1y3IQd4szk1NrdsULF0VRlmWmW/HR11pLLLsTKqWCoEef9RKj7MogfPhfP3hg79pUVtdKFmulGuNsTO6XK6ntoD870Ro+/XezF77iTOPStOzXZB01ghuoTJNKJYGpiy9ZPvpENeiBGJNkeUEDqjIWu1LWqytJssrFxB0Tl39l9porFx567qm7bl470jp876nfv29p0ziz63Gts5kyS8aq4dpaP+PgjhyT1oS2Gf/ze8el2vqg1FNbP3L56z6XXvb7PzT7r1tfoVleuPzAve/9rct+7qeXRBEPtYgdQpGlE610ue727SD1xlvbaBam037yr9+zpR4u0TM94FRjEINGkKlCuxm3XKMvOn76jiMXLDw8szbNxXl0qWmUoUeoEiJnh/X5pWSs4QlK9sSePTiVrGH33DnClel6YjHNmKwtnMKksGEVqWFAjTmwEGs2gCCIIQTe4AcXJqucVVot+4V/G74bh1GdBTJqVUj8Z5SQAkA1oiAiIvZxo9fK6/NC60s9igh2kdcTUIpcKpY4p0sAGE1oNpqvMjLdYxmZiMSXYoQ7jbpXokilWUqkYjYavbZi2hpFVuvoyoh4ihOe8aUb/wMbXxAZvTh+E7OMJgQB4JmKNAfmWI3XlaauwqPwDF8r5ssrKCLrfEiMf7UBspKNN4ZndeHWrx0SxktMipgZAZUi/i6XXqqf6j3wO0888jcLm6ZnpmJtHCgia0e/DK4PRoOsXzAY/YbPLsLLaBYVKl+e7p34w3vflupsvUk+msjC6GF6SK3Vy/B9blnPTaFZCc5gAJAhgyV/Z8PdSzB8Ya9Sw2CtHV89Vv+uPRdQGDn6CMGGtyM+c73X7yMAJcgogggqwCQt+eB29HlqNdbzNnrwhKAU+WB7gZFoMfh+MTxLx+r/4Ul1vQh8AOzz+7gDsyxj7633pFTJVQmAhBL4ffUH7v/UQy5sKAH+n1sSsD519owL1rNO96++44NbGtvjnc4iWiutAwIAtiaQTTNv5KbRHa6u1PVU6N166YV790wsdY9kiQ7EwdfoAhNw8AFEa2Jm75lIY61QgEGcr8kIENZu0O2JA2ovpqvbp+HSa8Ohr4mrTGcy37xz+dDi3uyi3Lce2fJfe45dlECjzhTWmtEn1Ghu3nH25OPjzfHVgdWNpNUq6sc5FWgUxY6pHfc//GBR5P1+P9NGLK+udlOVZVlzgBadYx8GvgaAqYnJsiyFndYGSayrEcUG3/HkQcqm8YsrBGBjCzmERqsZfyMRlICuttFOoB5UCjAAa1K97uoLXnDr4cOHHzl2slEUta8VIAARQJZkVVW5XLGzyJIggGcCNEjEUoeQEDU7Y+eXl//un/6pmebDuoIk8TYE8bjeSpeNI0SeAUTCrefaO4chLNUVelZKYZp455Rn9t6hKCalVEAtiIIEIuw4M0YFhABAQBoNKaWUSAjCAMQszoX1OxtdkBBEoWghCYyEATEgK0JxgTSJQJaYqKZOjXbOoUCq00hdH/mHMxulvfcqMeN5trq6KoE1KeccCSulCp0Mh0NlTJEYDmFpcVGEC5U4hcvLy7EzZ0gFASHav2/f+YWFsVY7z/Nz586jNlmWGmMYpK5rrXXkGAhA7IohAbthkU0AdUuT5UTf/fLNk8vUVEkyDNPjWd9AWFiuQ73UrQiUiKrd4lX85OknPw4XvjHnvD9caVLb8UCBnth3CZmk98SDGbD3gIHEi23kCrW02zw2EexiMqxMf+D7/p7mnvsuvfpDUN7Q/erNp754wcrT5LXHZGiTau9E93SvkXdY05bJlIjEep/y8Wb7G7j7ruTSM3ZHOyx/aNNPDBhfXj/46G//yN69l6Uze4dhJeGOzRZYGgmLOIcutIM44UqBsnLu6NxHTxz5HwlOtHZtPPybv+M7g+8Zh+Cw2TKhN7wlvfT2c19Y3nr6Tnnyxzo3gmMUqKRs5g3f7VkECW4w1qjYN0SxQQkVBdvsZ1fvHn5zuS17jzsLOs1tsCgLU7TFuzrp5PvHz7PzOtUiorWqynJmZoZMkC6nJhGRIs201otDj4Avab5qwmyKy6AiRQLeBhecUloReOdEkUTfEa1HqlIARDCkNjyy6nok0F1ZWQFFzrm6rlNjrHVeOEkSQpwaG+sPhoPBwCQmSdIkSaIRnFIaUCRwlmV1XXsOWhlhLq3ttFpjnXaSpdPT0zpNxHFizNBVwhycr6vaOR9RMHmeW+9SkxijY1WZRvMXsqFrJSJQI7azMButouYZSWujAYBiQXsduIFEiTGglQ8+hEAMRBS8FwCVGBEJ3iPGBroQIBJFTwciRK3QR6I2j6aWEePIhSLyIGmSICKzmMSwBGEu0kaQwV/++T986pMfd0249MU3vfaHfihr5L26ykA5Dqg0ekYi1qAi6mSkBUFEJKUgVtSj3AhHts2n1o79wTfe+qZr3znb2Q1BNsS5seRug988Zb76iU/9yp/8Vj629Rb0j/RXksbkrW71LyBMcfFrt6xih3/iM2NVWarW5v2vfXNj8mA3hKQO1XCtG+pEa6MNQ0rgCUERAAsLr98niGyQXCA2QDAQpdWwnjv79X+DXjdJ0HnQWiNwpGvbetVJBV6qQXf/cy5Z6q6dfvJQkjVq8loH/1uV+t5UnlYgJA4bzab33oUAKEWa91ZXv++3X/Hi733FmYUySxSMpDIjFsVobJNotHsREGYg1FrPdU/80Td/c1D3pYjhOPY3hYErAEiKsVa9hVGXtn/Jnj1XXrjzgpmWRn2uO+fXrBJlrUsw8czBBR8EFDHHq4whBFTRB090mgRhDqxV4SxOtvn4Z/5908um0qtuHn7qb2aLZG3u6MKnbh/7qZ9fObt66dLNX5r+cJ8c03Bbd991y9/bHGa1hfbUtnD8XuHJsYn2Y08drod1p9MR5pXV7srqamYSOywFpD0+1ltdPXlufrw9VQ+cR0FQqSKnlLXWZ1m0CmTvvVfRoUgFyZp5o91aPdvTmcFE5zbDLrACy6GuPbAAgCYCwaFz1tcio9F+o8khfvmrX63rOiMDLqTaWGu9MClVlwPPoVEnLBKQPQqLxCEctj7TJhCXrm6YvJEUdfBaaXDBQwjCEoSiyWi8p0EFZiF07DmADTbRI3MDVjpXmpnLwIJac0BQotH5OggnpAgwOK9MYiWUYUStAkIWnygTx7Gcc0atu7BFe3AQ8p6ZFeGoHq40AHjHQASk0PvEmHowBIBojJii+o7bvuORxx87evRo3mz4CLkVYOHMJKfPzN14/Q1a67On55IkyZNkWFfeu6mpKSfcHw6UUpkmW1ZaUZ7n9aDMkzTRqQ+WiEIImzfNvPi2W79x34NPPfVUq9WamJjo9XpKKSBs5AV51tpEEBYCgBplJzopUIXgTToUb7qzL/1uc8cXBmdWyDJWg5z7IOAYBmxO9PCpc2uv/N4fO7myMjl/uAp/19/+6nZhrFkuelN1GycvvnLl8JN2UIESRV4AA4MZOpsKChZsOJn14wmKqJU1vXwqpW53svOJra/4QHrN9vMP31w+ta+a21edyU4s5DVCAgOjn2BaUPi0n3ykuPRU+8K1NM3bO6eLIr30hrPQ/PH22c/97Guec801jZf80CA5m5QJqmEWxpm4Fk1BjC4q5YY+kEvSJD+2drSv8vdPb7uqKDYC8H41wbkWSyG3VsYG9ak9ofHy/MZPdh97eGJ+Ydg7v1xPNHUTEyIyY03rKugPrUm9iLjQlrRPpif1RA6d8Um90vLjy0YS3+03W9nmma2HF89lenJLs5o2y1USQk1x21oUxeLi4jLMk1LRa4gRqtLGz2VCz2zSW7TWeZIOBgPUWhdpWdfW10QkKqSiIhMGAEiNDIK894oIDGitA3ONFhG5lJm8WZUlKjQNnaVpCKFoNGa3b5uYmGgm6dHjxxYXl4pGI8uyiYkJpWk4HDabzaIo6rpOtKmqutvrDYfDfr/fGuuMtdozm6ZFZGQw7IUiW+DZOQ1LTFujrUjsKwNANGhiZiGhdWuTOIkU9RwxMMcW8sarmLmZFyN3MsQotARSRBScFRGS0SgXI0TIDGgjISAKwTNcEVjvZ8efOZpgBtBILCHigBTp+J1+pMoEk+t3v+svLtx04K//7E8//g8fVivJm9/6pv3bZwbdgSApk4ALNniTaq5d3EyMNkQjFwqKWwEZFScEESWEVGXbW7v3jF24EXo3roZzbqzRnm0eMv3MGTnvssuluH2t+4P5psnhycNJ9hyEY33EAagVbjUu6C7NlD2bbJ9FqLTKU6VTSFMFAUkpVZBKUUIIwxC8iBLUgEoxWalcRRq5u7y2ttraclFr7ytWv/J+STkJAgx5YlDQkKoq1wsBguAajetNK+fXcFmrDElpeA/Sr5H6eYQfxcQkCaS266KVhQbSuYHl9M4Pf/H1r/v5pqQahGh05WMrRERCJDHgKCGp65qjI6cPIMIgDLLhEa1DAAoWAI6fHZxePX/xwdmXv/CGa/fuTp0+0lv76n1H15ZO7to9bqhSNZQWQgi180anqJRzdSx4es8eENVG/UTiLWudWx4UY1dfXq90iyuuXb3qxomvfrrRnj3zgT8wB68sX7B7Pnv6VPNJLUYBPdL5yh3bPvDDh37zov5zJ6ZnW0VrWJWTnSai8g7ssDTGeJQiTUMIxAhG9at6MBxcesUVq8vL588tsCYlXJalR07y9uLyciPPXRBESQQ5SO08cuiGOh8OG0lWQl31+4lGAMhEKQEnKFrFhBg9c2UhTeJNp5SxzlsXrAtaGxRh8bZyqJUCDOKV0QrIshihAgiDBAYkroGdloYEFEx0OqpECEJgcF4Z7Z1wCBHuQ0RIqEj5wKgIUAkBAWFCxOyFC50wgrOenE/TdOBdCEE5lSB54YqdMcYzK3GJSYN1sUASbwWlWEZyYtjYsAty9G0SEURAhqDEoyhhZCAWIPAgCekQROlEITnrjdZ1Xd9//wPRu0lrzdaKoDYGEdn5PG88/viTZTnITJLnea/XU4kxxsyfmssaxXjenJqZXl1dJc9FXlTDYZZl3jpEMMbkRRHX4uXlbsytz507Nzs722o1Tp2ZG28UdV230lxEfAhJVI0BhyBEpEj7QS9JxmzqQ7Dj5c1/89XOS+REQjRZQJ5Q6WTNmeD9+57svPaHf/jxQ488cu+96aZN4+HYlcXhry5fCOPZWJq2LjpAWi8+8oAE0nF2ABEUi8hEZoKV1coLG+gOMTGh1aFmW0wYLC/gmSfbjcbirhd+0F0h1bCztJLSArlhnSAX00zNribsTCuY1MpNkEpcP7/2us7ui/bMP/XwO7//4isu2f7qXz938pvDToerSqfJELgIQpXXKgdQFpwGchp8ioOTR7vczyE9mo1vhIq3//3ffM/zLr/isluLZgc9DP3qIMkOptd/7PiXViYnv+KPviC/pMtdQLWyvDI21rRr/QlTFJObWcBrZaFfqCLYoQX1ma+fl6tnpNn3blm7JNdsMMES6kRNSD8xduDGmjn6YENwItJoNMpQwBDYczbesrYS75VWIFKkSTtvloMhs1VBOPjgASSgC55rCFwaIyKJQkXoA4sIBR+sA6OSJBkMe3meX3zhwXa73V1dCyGkzYIE0jRt5IVSCkmlRR5CEOf2X3jggiBx2M9kaSRtxBmhZqNlrc2LxqbNm621iAiKvPcKSWmy0W4b0YfA66kNcMTUKCLSpCvnYi06PlNeRuODiAiESkYRMUTnX8JokRSDcQy3cczXcQAERoCIulRGPDvx0bGYmRWQ1hTbnHFljy6KLKMnOu5TR+H5WUf8/pH3MIJWhkG899FDDISdU244eNNb3rh109bfe+dvffwT/35u6ew73vGOvQf29lYrDVASG1IagPNEmEEwxhURAaDRCr8uPQMgGf3VyMaBRlsHiWeCAgrQD2Fsog3Meyamn5w7dZsyl1BR+N6PmaKqoFFQd1kGflgEmtpzUDcaq0+fTGZndNHGQYCQSJpQAuQtcwAAAWAfgP06AkIqJYlCo/JV6Ot0cuHEN7nbLZJWpSb6S+dyDYFdnYhJUpWYWqGqRRRqTNbqYVlXRdEIzieUVHVd/GVz8Ntd85jYgyU0RI4BfbYhX0cMajAo22Njd95558ljh7fuuXppuZ+gip/pxucePx1B8BxQIEmS0WIbuQVxFhwkUhY0EjQzBoAquJ94zfW3XX9NQyXHT5//0r0Pnji74GqzZUKCcxCcl+BRWDgu8WKD1iMygyHjvBARSAjsGo1GotWw30XgyYnGmePzx+//zPNuuGXqJ35t/pFv7uwPZrVaeNsPv/ehnWAQEOaKIwpU4dttO/HP+3/zlx96z4F093Ovf9EXvvblZtooe32xkJmECUFB6a0gKKW8SO2sydsPP/x4q2gAgC/rALxvz55ev392eVXrpKxdHLQ1xtTOE9FQi7LinBt6O2bMzMzMwwtPIeHAlWMEqYdQeq3Jx65nYhpp4pxzLnCoQxCjEu89ewnEIQRtiDkQUSNJIkDDE4qEAIIsohFEwPmGVk44EUVEdfBKqUIp733aaZZ1pUOQEZM2NmCImbXRXhiAvedEa+ccGU1Czg4NqRREUpXnJtRDJ0FGvmcCRB5EFDGzr6vUZFEJorXWohUSS0BCIpK4JUcZtbVg1H6DIIIgiC4EDdGBFQEgxOJ8CIoANXmCJM/Onp8H4FarVbmY6CAAJEnSLytQNHRDYxIiKorCey8izjnTyPddeAC1euihhxqNBhrdHQ6yLGPn0zRtt1r9waDf76vEBPbfevCBs/Pn0iSLVcc8zzudTiMvAnujEm1MzO8ZQaFSmhARnE+aTT8YstZK6d7C/NPq+//1oRM/fpV3WV4LiWZS+OXystYVB7/xja8dO3JyeuuW7sqAJi543U/v+351//CzJ7Z15K8v+syWc58d4OlzNI2csQekoLTUnub7XjCg8kbrOpDy3tQuJUKduem9dnroAqMbjvF4Z8v27uZ6QYtGSUoNRgPWHaZQlpx202EiSWr2XTS25+LlB+/rf/J/XnZg18wv/unZe+7mphtD1lQM2bXZewDvnBBWDEZDi7FMCMp+9bm7Lj9/cuHgweXlwcb6OwxzX/6GffzEmRuuvHzn5l3VoLemg9x7ctuF03PQezw5eWW93Q5Zt5KUtDi/cPbcxFizaraoN9TOC6QWg5J2K9HbZpOHVlIAGO7ERm/Tgqdz88OZsS0nF+fVUqUOmM2FrgOWw77WWlic9YE5ftDWVoPBYLwz1tnUKPqNyy+/Ynu+69ixY2dXl0AlwpKkaWHM2tpaXUp7rN1otQElTdNmXjjnCBXF6qVzzloR2b59++bNmxBxZmoaADxGbb4CAOGR13WcrNPaoGJBJKNFpI7UJx55S8e4FefIvbDWKiEK1o0y0cinUwoVxsxGJVrhKNrZ4CMNNF7qGCkRERUxM3MIADF5VUQYm0HrGJBRNrgRvGPnEhFx3fxAKUOmZhvN68QLAGlNG7FNax3zzhAEWUDYkPLRhUmpDfV1LBcTSBAgROudMSZJklGmHlSSOVurxYX+q173fTt3bn/jG3/hrru+8iu/8MZff/tvXHfNc/q9ARIFDhr0oCpzkxASESHQenssvktAil7IACMZCeI6GjauRwiKotsjQiowOdnMm2P9QVnnyTv7PWeUqaRBsMlUP5/VJ8q0ULnOyBy4dmzf3uXTp1afeKReXVh57OtVv0tbds/e+tJ02wWpq4NwBcDCWlQc/mJkG3wDYU9RaNENNeeuu2RNplqqPpX/wMLZI/Xasi97KytnMfhGkvVq58RYV6skzVoNIgjOizD6kGeF+5SFt4N/Q63nE8cOt2J1Sx8/ruGP20miBIKAfvef/Olf/sO/LsG3HdHqNd4aIbhYIY43j8j6lVr/mCLTRQtAf3keAN7y8z+5dc++lZXyI1978HP3fauEdGejk4prNnwjV6FMKh9I+UBIpCT4uPkiYO8ZCLKEo9MngnjrbOVsyVmSdavh1IVXzmzPl449Pb57pnj7+0+97ZXT1hy7cens6rl6ahwEPTnNqm9WAoapavPdOz/WefJ1Jz7/5daOYtX2W9rMTBQLq+cJyTmvlDJFVte1okS8J0obCQ16wzQ1rSztdrvzi112XnzQifbMiYpCX0FhROQAJftxlkbQlOhFNyzZCsvO7VvdORcgYYXWe4GgkYipVw211pQqz4wKRQKgsLC2ITcmMLNSQBQEKTXWWgOaog+MiNaZaKq8S7LMCEBgJK1Ia0LwzME5R3H6kIQYFKwPIwUQ55wQGmM0kSbFzOzYCSQGE5bgfUVYhwFXTillCS0ReJcwKi9aa0EYsqvZBh5tVDUpCEwCwQcGASCIJQscdXeARUA8MgWIKhFBjUjAYhiseKUUkyBK0ShQwForhJlJN5LpIB5Y93q9LE1r76KSRYw6dWZubGysHgwj8evM3NxgMMhVavvV9MxMWQ36/f6WrVvKspw/f15rneRZCCGAdPs9nSZJYuq6jjysVrPJ3unEGG2SJBFCL8yOASBih7XKB3WllUbvWbCRm8NHn7jhpe96591fzM99but4OtBTh8Kec/P9Y8c+qFPYPDWxsDQnlM9OTjeHd77C3lVdjH/PbxXUb6h/O7t05S1HXnlf/6AopUmBpoIGtS8YjYSe768qnXJKtYEQ2tQLQn0vHALrBNYCrdgugU5qqmAgKm0NVemrOgimzpRQ+aW0XUxfdd3Kg/c9fe89F17z41f8xBXHHjq8PH9q+sIL0kG9wGWr3yrTWilkrZUPcaWXoTWNsbnh6uf9ubEGXrK4qq9+wcYq8PTKiUrDQ4cfOXfmXGMy2TYxXTab7a/e/xxufGRb/9DU4onlpy/l/UphCVIHb62tlBw5c9YKNvLC6ip4EmhW9VpStKnexACtdKE80Roz6NNOCa4w7efeuOU5V7bPnO8J+uaBCxAxgrsfn3/YHDb79+3ZO35hWZYTExNl0UsOm7zIxycmLm4Uu+o6S1Id2fVKVVVdVZVCajYbMSalaVo7G5f76L1NRMYYF3xNQIpqZ5VSGUNgtj7EQBiroMygFEZ/TK2TWKCldc+fmCyaJGXnbHB5koPjqqoSbQDAsxAqUBjtEB1zzHoRRz8QEdezmVFEJBqlPoIRXg2jFiAiEkVne4FRiXijjQqB2fnIYweASGOOno0BRGttrTUqUSp6uccMmqJsK4RRDi2IgaNX2EjnJRK9Dkfl0IiGGPUnw2gTYIwJ7MWnxH1S2eJSee3N173nfR/8xV94/f333//rb/71N7/5zd/9PS9ZXBsSKcusVcrWo4JRQT5yvUAASGJv61ntYUCIOFvSSmEsQeMIC6/Umh5Mj2/f3B5fIwuUPrdhtq2Wx1r2O8d2HTl1eFuBRaWG9arOtvlmxwTYcclz5h56cOXpU56qdlunKmlBbr0LwYU4HSkCQgDA4iF41qJBzzZ4Buuk+7Ww47qvD5trTs9ccuPMwasi3qs/WHLlcM+WmbXF0+/7o58fb6bLvhqsdquy1DpB8jagt7X8moUFlFlxjRoApWY8p+V/hPqRKv18RsSCyeOPPuXLPqiNaxBvCQXrTQFjTKzVR1QWaYU0ch8AiOUDQiTta3frNQcA7ty5ffbz3zx9+5e+sbi0Ol5MbyqUUOK8z9CQwkp8BnntvABZ73SSJJA45zRSkiSRQZEpLSgW0dsKiLJcs7fdQVlMTN35iQ9MbL35YPNlZt8B87t/O/cbP3n6ugbgoOvnQaTg9vRw20J2slQ9xk1PZ/duav/KqXaWJK204XvLLviKQYA510kAqbwLIMboBNF520iTqelNJ0+e3Lp9J6CfXz6fqdSkytU2TVNblybJgre2rAjEBMlJD8taCJWCmby5kOWadWesdebsqkqUALk6aKWDUCTFalK+tkWaOq5NkhCCtRbyZgguODcqA1hPwaRgUAFojREem2kC5folgeVEgdF18Cnp4J1HTvLMDocMOnhh5hACrkN0RUSRRo3IgkS+jo8rCAipJGgdwKOwswxJgVqHuk6JSJvSVaTIIQhzO2/Vw1JrtSGdDCEQoBce6U9HFTYEIUJiYGTxGth7HRUExIEAPStAo5C9M0QgHOq61WqBD6CViNTeRXyBJkShSPFNtaE8r6rKe9fpdK6//vqzp+fOnjmzNuh3lAohJCZz0f4F0ORZVVX9fr8oClDxBjPR+klr7YWVpi2bZ8uyLKths9nUWoOgEEYTuBACsoAgKaygboRWBT0lUIX+7Nj+lq1+/11v2Dy+qaqm6bQHGPaH91IiRashzvWH89t2XjQ/P982525dORcMKkm/NPGT39f/64N0+ByNvXP37T/45BsG0hAmbxNnOqy9Ip/mOfrU1b1q0B9PJy0teeRAiQZToIi0Br7XVIX3AwXUBJWbLoR+7sDWtR1QkiBt27bjpS/rPv3U8rcegt6SjJeL89u+8E9vv/lHXuWHydABQhhmZSftDGy/UZtAHDLlgF1KRiXLx5bOLw+qsVaYNvjUQxsB+JL9F65U3ebEzkphc3wymZ5xA4a0ftHe2+449tHVy7p3bz5/ydyBM4PlPZOzCdDRpbXG1mm9aztUrpmmnCVopeTVC7ZednjuJNxdA0AJ72g9TbyAXm3HbVeVp2zjVd936SXfU8xXzcQ457RJnHPGGN+pGmcaF1ywd+/YXiJyzj21tMQiQaAOgRIzkTfFO60IEWv2SbMoxtohiJIAIQSQij1rAiRErAV1noqI816TQkRb2QSRAJhFkRbkmH0CKhBIEu1crbWOHNYYijY4zFpr7zfadaZ2lgiNMhyYNmA+AkQkHDQpYWFm0khaAQBzqF2NG5BniB4N8d+R1FYpFY1PAjOzAHgCtZExI6IClOjcFfiZ/vGobQyCG01h5EhKMjrKHkebDEKWEesjvjV4FhEOAtGOheIQQ3QSU56DVnqjKeu91yYB54xpo+KAtLxW7dm3+z3/8oFff/Ob7rzjS7/xm795dmnhx3/kh1b7dUBSARFD7H7LuvPxaFMCgPxMBzp4LyIhsOdAXryw1jrKcmNyj56KsXYx1hwu97TSL7H9TyH8uB7/yNLyf1L4h1wuGCZXDcKha144uXVfLziYwPGZKXukgzxW9U8H06uhcmHoA2sGLShAMa/SIlqY61QlUDv7pA05XdrpT6qVChEhXfTsKochzdTY5vFtmXX1d/3Aa2Txvvf/3V83N29JUVEQL4IuQGLUJeQOMNTxAwPoATSAtwY6pvj7a/lcKiE0G2MP3P/Qvfd85cBzvrPqDUdlGBGJ0nFQRBiCi7cEqSiaG13AuPLiepzW27fNPP/gXngQ3vFXn/vyQyc6Y8XM1DQpZcNwU5MWa6fQsPg6EVMHwNRwpVTqbJAAGtBoShQUzaRf16w1AIJTygMiMkBAbKsxX/VvfsF3z+OB4WCQnnly5rJbzG/8U2/pewkVYAAANXDKaEQCFAbxiodh/jwtN9fK8XxbMF3TsyFYC+A56EDKKFCKfTWeNxZrC+KttbESnhYNXVvDJC6ElFbK7lTeERuoqVWqxdnAbLncMbZ5eWWtXLaXXHfD+UNzfDw88OBj25p7ONQGCVkI0IsHVIAGUOk8dcyzO3cqpLm5s632pAMvVtksBQDjsZKaEmOQStfXJrM1gyhbs4DDzFQiaEMuiiR4xaAAmGpAqxP2oojAh8SY0vkQPHEolGJwGFAESSfKaGbWCjAEZg7eqjhHocSxc9Vw746dp84v1nWdGIUoEhwAdcue0gZEgYAGJYow0b6ySMpLSDh49ibJEFGYRzQBg8QkICWLAmxkRV0NUUHEVz3zVAt3B30R8cgKMM5taKWYmYURhUEosCJUwhw4WPfYw4+ISHOsIyKtZnPQ79e2BILSV9baNM9sZTvtsWanXfYHS0tLiUnz8VZ/MGDnJybGt8xsquu6LMsib4BgjLUKAYXruo67lsCBgEBpxz1QKKwSducH9rbXvfbr9zy41F1tZJyaTYniXBcCxrHrs2sWrT95x0d+4S3fc33zsHetjgw/pb+/xlxB+XDj1on6WMG9F3We+PTajXVgoxNfe6W502gmSbLSrbhoI+t2pzWo1dpaX2HP17pGZfIh1+JlSVgze1UYtDoUbVMk1tVmMMTG2PbbXj6cPz33xdtTrbKJSd9rP/ju/3XwebcU2/ctDE61daJUqzsY9oY9qy17BPTa9ZgMUTI101/cdOS1r7nswW+dPnb4OKpsIyjMnX5gvDO9bebydgfmHj/ZSi4fU1k1277+O3/8ik8e/sriQ8fbR/ff+MOXVxc1JzKdNu/60hd1s3PjjTfkQ9/Poc25xZ4tk9kt6a+945361rstgH1Tt/szCTwp6h8OJfc9vacx9vaf+9iV+760d+813cEQiJz3SBGO4kWkDqEWEe8Dc0AEHJXeAKAKtRJgxzJCY3Goa3AhJDpOTQIIIYoEEURF4L0gCIL3XsGoySooQCYAExEIROoAAYIAaQNEgT0CIIpSyIwMTCSew7qciBBRo0KW0WBLhFCsn8BIlrhOpIriDCIdxcexABvPQjggETIAaolehCQEqERIJAgrUjFjUxCH9UbZs5CS0ZuN+qYxpAMwKR1R7QDAzhMiAcXWr0KKo0wRRS0wCtc6DjuJAAcQZARk5tiBXqd8cITaiRMRxmjjzRpVf1C1pyb+7J//5h2/+tbPfeSDf/qH7zp3duWNb3yD1tZ1g1aFgyGIJhU4iLICCToOSgKCdqCYjDFKaRM/ZmDlQRg9lg5VVmFinPNpBZLnKk8yWR32a9aHlbN+9W1D2AP1ZCcBGD7uaZxa6XNvM5twuFLkWA4aumpWhHu6PlP9E0XVnyw2u0Gd5EnpQ0ppypqITbB9XWR1GCLcs2gpZEQHNWooGME3FQy7xeLxu2fWXJI1uWNEN7++cuLdf/vHsLb0X9+8a2LPLnfvYw4r1CoNUB3wiAANFBHoA54m2cQwLpILXiiehAkbqfR7cPLM8tWF6y7ZJEmAUERi/ZnFK1IS4VeigIUAWQRGWhtgHwBIKRARXRTpancZAL786GPFWKvValnHGfRrHFuxoHTmTbDgNSOiZJT2QxW8eM8aCYkQlWP24ohIE8mIwqrirYRaShMaA+3GiwnVnGrvzFRfza9uuuG6vU/+xIPhL4qBGo6Hgek590gwlHMnSZJLwjXf/N//q7l1qnfhBYPTp3Wq00whGluVlBgmABJUVDSKvGgmK64uXTW/kCf54bmTtq6LogjkFRl09XOuuOrRRx9PdFoNhm5YiaayhsWV7ou/48DqA/fXbO/6xteXqMcCrfExYRKEIAAGSRnkUNWuUIaZ8yINIczPz2tSxiiRkIhAajKtrPVOXNbKnXNDtlmSWeubzXa/37fWIkmRN2N/dwgBlSSAWpSVwBgMKZYQAJgQFRlQcSgZCNl7pRQZHUtnsTtV13WrUIO66nsHIopBCzjg+bXFCCgAiNNwJMyaSIBBiQQIyBGcSUYziPZSKUmATMyMAT2BBAmWAztBVEYHH5yvTaq996DIVzbW30IIxmQAEjUsWifRN1AplabpcDiMBcOyP6iGVilFibHBi8iZM2fGx8cPXHSwHpa1tVGSWqRZI8vLssREE9FgMOh3u3mep2m6srjkhZWQLavz589HZGAUfGmtna3j0hO8j0KteIkMYA0BOThOUtx87szZi6/e85a/+Jk73/fQF7/yn4PiUVATIF6cL0yKwmON1j+/910S3AWtvlS5Se332PfeXvzs7c1f+Ld2CwASHja3nt26BmsLy3V3BbtB7GDQq0CzdmLFhzSZq/uIqMbaCG3tnXF1CKEYyx22AyQZaeGB4MDZPq30fW9AnU1bv+N73HAwd+cX8/ZWCDB05SNL/VtueXk+YwfDlXYxMdMa27R5y8BWq8srY83W/PK8BpNlRVVVs1un73vgU/d960sHD2z50Zuu+9KX7rnv7rWNAOyGM5VbefTIl4EobaXqwfrk6VNvfefbN104+csLr37ogYeWxwZf6T/0Wzf86Lk1Hkq3BNfaumN2qnn2zGojKbiWQJPSWO2r4cM/9wW/fQAepMXOWtkj8DuS/XFePDV1enkpoYYoIGFEJQgoiCwCDAgRP0AjzQkAwDMplCIC0HFufl2xQonyKES00RARkdhCI60ZIQhHUwMkEgCM6F2W0exNJLYCkQgBsQ/MYSRCFo7eWRL8+uzvKO2Edb1SLAVv6KTin2PtNNZ7NwrIWmsexWlijp6GCF6EwPkaRjBKYESNsYtNbqSm5FGBGoGIhJDC+pmPiJUCACQBFG2UNJ9d3hT6tnnTjW+Iai8FGB8BLwyISkWDlNGvuv4iJCIvAQAktsx5JMMe8nAc8nf+zttnd8ze/md/94G//v2VhbNv+9W3F1O03F/KuS2qGvq8CJVo7Z1jUsqxwwqM0qTYMgQAAGRPKmhrLDaYhMQiDrQyEtKF0NuxpTMxNulXD2O7/Uk3+LVdV5+hvju19vLCAayurrk7ivyyRsOHQssQB9hiNb1jur/WednL6eA1g7uXjh6b27Gl0RxUA6XyoDNyjkEGRidlbYG8rM9+sQgEYS8SJqTwau3Msjtx+EHGCu1q6Ifdu3f95A8975fe8qsfe9Gn627X2Uobwxyc9xAwFgaBAD0BAiTxV0NwoBAkAAQBCcHb3JjRvcEg8Ez5It5C8cc8+8MCAGSJN3/cw2kEvOebj3zHuP7BF97w2a8cx0aqwhKamRSHqU6h9glptpxi4lVAcJnJgpcAEhkxLIIgAKijCBtREwUVYnVeQIyUlUbFpeHeyfmT7e3ZFqX0mcUbrvzdL4195fzag7ANENFrnyxBa6Vvzent7/qK+uw96g2/PnAw3nbNdOb44tlmlqYqFed1miGiD1IN63Mr/bTdtODFmImJqZuufs77P/4fnKmsDJY5NcmhJw+nJtm2bfvcyVPW+XJYT0y0O53OHXfcmTUb2jTKynrjiSgBapjUOcfOKzJ1ZRVhgmCtzYs0etFkyQjTSkKauYSgfEgIvQIAphC0oeDEWt8oUALHUBEbMJ4g9ZIogkR7kESQ64BKM6kqeI8BgqCMKmCgkDABxCRJRqUnEWZQylSenWPFmogCiAckhOGqE/SpSQMgCaAwk1fRk1gEAGzwjEF8AA0SggYmB0FLH31iEg2IHETQCscpwxACi7cBm0lReyfeG6M9BwQwpNh5ESEcmSfGcREKI0RlnM5UWUIiMR/aNDW1troGAAf27wfCBx5+6HnPe54h9eADDwiHztgYMpNWZTVkhLSRKUBhTkzaKvIiS51zq2srBNge6yCBD06AlVLW2ogZBkUhBI2klHK6zK2xVARVD1y/IH74kfmLXq3yq0P5x/vP3YthAEmSNRv50J1lgqfmTh7+9N/k2Th7GpQlpZJB+WeDWxGwrNtPwuUP0DXf4Ot8etnkxTvIGABg76pe166tmf5QrSyjG9resrOpk3robZJkgoaI/PJyWF0R6Nmy54Rxum1vPF1u8jDcsnf8VajN6S98Fq1U5YpWSdGEquLN+y+Y3pOAptlt29tFwySJ14AsTdBrrkaoUJpFnp89f/QzH78/VPu/8NSZyemFq27acs0lN2484Z/+8InDRxemp6b6vbqVtE8P5l7y0he++Ud/6rGV7m033rrvyOX36Sc/c+rzb7jke1QyOX9qcWVttbN7a9nzkiaudkRagptsN/5y/vfcrorGiSlAWyBjqAFP0/ANg6XXzl139bXbt+0ZrPWisVDEM5AahYFYCI2FT1IKAQmj+kSBsOcQJCilYjU3JgfREDAOAeL6BAsiBiBEidAL2bAz8pjmRVwpI3I5Gr0JghIgBE8UAw+RQhTPrJ8xL8dnr4kMcQgfBEkihxGA10Wq8qxjVBmGkbDZkNooKjKASTREBAdHdkSIOwiKchsGCDFDRhaAOLS3fhqjEnE0KsFv+2JY/7MSCM9sIOCZVylScewYAAAISRAYwuiKPSuQIzIjjmrsIoCoUIlEWid3/VAn2Rt/5Zc3TW9+3+/8zqc/+PdLy2d/722/O7V783ChHxKVUYViXLDxQ5FGSjaAs6wDaOXIMUig1AfQWIshQR2cYWCnkaoyI4PObxvfFPxgrJn3Btnb8+Xy1LIy6sZ0+CsAX1iqzBWvnNh1dbFSnrTDocHV5QVaCkjn+6GamBn7vr1Hv3XEfuGBKzcXmxEt+JpJh0DNYEqduOAIAEjHeKQgSuC0tBabYW7v9Vf6m272mABVXNlNg4Xl82tXXnrxS77zpV+552vtIl8b9EkrIFb3KI8oDgFExhhaAAg4RKyRvmkgeBaERBNIs1l0u0yko+MR8IiohEQiIWrrBEa1w3ifCIBWlBjDIlHfrhdXeotzZdUxX7z/uFfsbT9rtS1LgTZPMucpI0KPNTASo+eo79Oa1q17RGlEog0gyOjTZREgBNAWSw1tC2d7S83tMw2nnCbUCk4uvyF73+9veTnAk5OHJIeGAKL1L/kdHPvEHfnVt5258MLBydM3Pf/qwbmV03OkC+M5aG2QJS0ytDbYkJq021tNRbfyYnFl+UOf+cRUZ9IIdtHmYHq2TBJtV/uPLjw0tmWmZp9pkxGlSl9x6WWPHzqc54USr6MVn8KAQImpnS3rOk1TFgwuFGmikNiHWImNWxsGDonSSvvaEmDUNyVZVgdfss2yrD/oxufJ1nWSpkQE1rtUcWoyIqwtKKq9BREMHFuzG64SoxaQGpF3EDHScWNHoQ6W2aekGaUWywypNipOFyESUnR9QVBEpBVgGH0unoOsz58xQsi0EtDOpxhIgQPwCD6IOI8aSQKg+Nr2Q+T+qNrbVBtCjFgaIgIQ7/2wdtHRLH720UAtvl10d2kWjW63G7w3xiwvL4NWW7Zs6a6srqwuZ1mKiL1eT0S01jObNy0sLxFRqk2/22u1Wvv27Ws2m87btbW1cjC03pVlmabp+NRkqNxgOHTOaa1VYkIIEhgAyBWVX3XoU8DMVGfWyo/dO3zTy8LUvplf/sML/+1d33ru91zRD+D7fnDyJW6Bbz537siZw/d99UvDyYsyf5iYmXC87gHAOKyRXZ3sP3T36ZUTK4sgA5VPzG6dqdMMi3bemUi3bp9YD8l1t8sra00eHHvyyeHiajh/NlUNzIOjJm3amd74aPnq20NmQWjvo39v1orj5/6I7WbRaAS51018snp+Zdv+q6+95fr5xUErN+zFiUIAb8tgQGnI0ulut59k8Od//pciWFa9+aW1uTPy6BMr1z1n58a6vHnbRQ89+dCYMkk795Qtnzj0mj999TCBuvY9BT99wXd/c/nJR/XRu8889rJLn3vPN5cnWpON6WZtgxajjXipwOUp0h3m854DTDIwYIVJbXzhaRJZwsIlS5dV7b6tkolWWKsFMY42ChFESdG6k8dGjzCapGzIQGNki1EZEDb0zIAiwApwxKhAEor9V/bBxWFcRNSkREbyq0i0gPVBoBD8SBEUo04ktQGP2FHflhGOxmbUqHIYkTQjR7KNpGUkk1yfMxEkQSR5FtOLUEAg8MiIcBT4hFkAMcoYcZ0XsR5oRQGGdT7Seo/2mZaqAoyJ8jNnK6DW6wPfdv7rL4f1hT7aSa3zu0aGpKPdBjMqimJdWddOR7NRZ6wfSqjK1/zoa3Zu2fz3v/TGb3z6w6/v1r/9P3/34PW7uwvOgMU0mJA6JE5E9TymKSU5CI8unogONUgVBIW1gJD32gdWhlWmy9pZGt++xUPuaq3WBuNF2Wm3VlbmNqcaAA5pffXlV7agPsMrSaqRQufaq6rhzrqbpBOrJd01K3Mvv/bo7FT90c9fVtWbxsdT9miMHdb9NCSKNaBE3/iNxAMFladB3eirJkMrkyXUtW3vsAo05V0rv/bWt33hlht9xUQUndpwXukvpvbHSqgB+ggaYACyRBKE3mNE2JhsaWn54MEDL3nJS86vDp+9WWRmkQCxNU4bOyKCZ1UynA+R8YCAqEgvnl2cbO4oA6E/dvnUxUW79diZk8JT3jjIWJADO2TpQ+hojaKdH8bAEOexWDyyYuQER8n4qGuCwoICorO8KFpudanRMNq5DqUp6aoh42wmTrRua//IP8rbDvSe0z/+rW0P4LW3d5pn+qG5/XyjEexwPE+/+o1v4TBNJzPL4oS1NpCY1X4PEY1Jh8yJgNPiq2EOymT5WjlIjNFinAt5I9cE01u3dsYnjs2d0lon2hw5fHIwGCwunjMJO+7PTG9KmySrwMzaUFnXUTswNTV19PixIm/GClU8ABKtIcYYz0IMQTAAGFHWh6DF154DKA0jQTgqESnLUmudKo1OMgQiCAB18BKYKvYKkEWLWGavMIRQKJOicgBJkigEpUgE2+32cDi01mOQEryLzKm4WAQXCBIyzByn5mMsBglKJcSEgAZ0EESlIXgBtAh5AKWUU1BL4BCE0aDW1oE2sdkrAEppAlSoOUhmkrquNSkFaOualEqLPFXKhRoAYoUAQtBpEgVl4AOJAMjU+MTJkyedcxMTE6fn5qLb68LCgnV1o9GYmpoqy3J1dXV8cmLz5s1k9GAwqIdlo9natWvXxMRElMxsmskBJYRQVVUAaTQa9aBqNJvMTEbH2ZK4eDnbrd2YCvnacHXPzNaP3nnqUG/WrC0O2seoCoeP3f89u3eN7ywcHFp9qJ0efR4Nzm6zFxy4/Ll/+6l//IHvQRZYo7ShgYOUZJKMzqylX1i4ttGwwbMbVmePnkClnQ3AoBMjaaIzPb1lNhRZ3pn2nW27910KAOy96/aH/RW7Mqg63+q98qHQsNDq7njkj9qrtxy5/Ef6N30tWbw1+dIByQKNt33SgDL55JeeuvGWKwWy2rkERziyZtoacqnQdHsr4+Pjd33161/8wn9ecvCy5YUe43IVqgPbL7vnvrueWZSleNFLvgtBu+Bve8FLVhbPXnDRwcVl1qmx3fDcS5+z5TPbz2058dcPfOhVV968cOZcd7U31WgjabaOGYIAJrBWq9XminQDEpkVkyxnZT2QCxgSQQHcWdRHE18Px0uzGu2CAACBAQQRAEes+FGAGiV7Gy0DQhSFwhK3aHEmh+OfARFICGLjNnCIg6Qx1gIBCcXs0wmvYwfg2QeDCHN06gUACIyIqdJhnU+wsRoKxjmZ0VdABIBhPVzxhrh3PW8erbBECCAsbhT1ZTSJGmJWGamZuI6mRCdMiEYpFGCRIMwERGrE0x95WkeMDIsIyzP7A5J4PWGjNr6R1Er0cgdhlLj1VErRiNUMUZItEl/9DLJjY0uBpOLGQgFGNggqYzQEwJWl7vNf8oLJsfe+/5ff/oW7/vPnfvX029/2h7e+8PrBqrLkIAQGTpzhluHakiUEZpI8S5RW1Eg6nXGHogQcO+ykmpR1Igr7WIaUJidTSOqGoV5LL/R7HJTtyyWzZRB8x8/ceLetjpQd1lMICmw1oDNe0roRnuzDrrU9F5juoRPDq7fMj/2Pj3zoc9eeW7ho09SmqiqCWbZU5C6AUEAIBADiQYgFUKmApaqH6Le6hRtb76vC2D1rr0kUZ1m21h3u3X/gN9/6m29605uyRst6JgKtdfhDhJ8ooYt0eqSugiHA72byOJJWyuiJ6ckDBy9MG227OMjSCG6T9fmraMc3GjMjUhstDFIEAJ7FjbifKCwaAFLyFSv4v+NYMec2212v+8grz733cGPVpagc/Z+/6r+P/z7+Lz9yze20OtXTaiaVdu4qz567efs3n35RkP+PT4cvh+XSWmPQXa6Wc+g4dtCcyscmGmNN3Z5IO+32lm3KXAL3/nBQA1vM5b39x2/68e7Oj9LctHvFPebL+/7fkGT//+NouzZYQAHOQ8i9NAUA0CIA4PJ/P6X/ffxfcHRS/o1b7MsPsgDkjf319G2bZPkszgQk0LmiXSKLYM/9//q0WgKTkvzPAiqSpsAJlM8pWCNA+T9/7f+Xx38/SP99/Pfx38d/H/99/Pfxf8OhZ2ZbL2g1t7nmJ95w8Xz7hjW1Y1Db5X735InlQ4cOrQy7hRo3ZBpZ4coVTQmNSivMEs1txDOjYCBBCKjWuVwoxCggArbfHxh2U20jrdzlEKTXqXMNaWiYOTy21e284GffOPuaV/XvuOP8o3ftvuaG7vGl8998rNXKuisDgaTTxoEbEqaZSSQEW/H09DQG7veHrBBMmju2pPuhtqXPVKqQnARllGExpCg1T8+dTpXWDLWzrYkGpWql363ZN4r2ucWFxdXzDOKr2vu6kWfVYGiUccNqrNHy3lc2sBeIw71EiMp7RgwiimuHiWIEIGKlHAgoTeJDcEmi13s3KtaykjyzZVUjOwhGaRxWgGqQBHSimDVRqtAbMklSpEnDmOV+XyEQUWaSWCxi5sAsrJA1BAIvpBJRIhw0ktbKhwASgKL3i+LRVDEhwGg+gjCapAmqQBAkIKEBBdEk3DC1Utuz2pDSurbO8ajrpnRSl6UxhgSazeb49u1lWZ4/f95aS0aj8NjY2LYts0VRPPnkk+x8o5F3Gm3nXH84OHPmzNatW8fHxycnJ733vW631+v5EJqtxq5duxrtFjP3+31woWg1txU5s3jrDKksy7ywD15ENI74QXkz+gErVRREFCWjUXwIAN77pNi6mereoG5t3jxcG3z04bW0tX2lm7c7meTVgRt2p61ieWE1N5tbLVNTL21uaob0qpva311j38LHFr7/c5/895feap7/ikuedM2T5eSLXt588j1r3oxZbzNJe1ATc5YR+Fpr8bVrpg2sNaiplWCBQ9E9318+u6zRB99I83KmMm97f+a3p7w97e86sedHy+QJUEEaQ5wBurzi41N2kDQQvHKHjnInz07wkCXxwVspm1p5V3NSGOA8M9rAZ//zw3ZQ3fXlO9vtZq9rtmzauWf73u7a4sYj/Su//DuN1lhtfZZSb7DIYarTnu6RFNYs2e4FU5tf1rzxb+j8/frQg3Mn1lbOT26eGksnBmKV0siuSLNuz89ux6kvTcE0yZLwJPNUiYiZaZBN+oPVibv1pluK1lhjWQCYiRSMlB8jHUpEdscWb2zuIo7uQAVaCBmiTkQ0kmAECI3ETbhu7Rz/7EJQIwEXxWEPYGFmpShAEBEe2QwhAEtYrzPTupkPAEQN9rOKsesl2Q1F9HoL+VljQlF7sXH+GwJXimongmfq67ECqcwGhBdHU0UCgCpKzIDjHDMw0GiKF5/1viASEcHrVWIQCQEAGEdsgI1v3vilGEEBEgAjksRJl5FhQwgBlcZnEyLju4zAdqO1SUSYUFgQEUtvdfTraz49v3bRNVf87HvfPfYLb7rnzs/+0q/81G+/6e3f98pXrJTpqhpMg+Ehee9dwmbcNFTqVsQtlFxaPnJ8fq5UIUuH7GzpQqUdYJ6nm1qtzVv9GlRJroOzvQrA17b8s5cMr9/bbDecB/588WOMakXNKEgDalaArk5oOtHL1dr44Sfwxlee2j08caKb79ydv/kHH/jjDw2PnLp631RjhZsZQ00BQCAytUGERYEyQEr1jE0wxQGuzjvXBX0uX903PkY6JEnz5NzSL77xF//9Qx+895v3F2PjzaIxWOvKTQEA6GMZn0ISMUCEWGKIUvo8T5aXzj7v1ludJ9AmYr822sC0fgTxMVISjEbH4+Ng0kSpEYOFiPQ2vTw2XIVgO0+8e0q9u7vpxU9f9PYJou1XNq6/cNs99z2gcBkRsQqkiBEUmPh+CHEOU5AIAAOzQoUKgCh6FAIIokLnRcAxdEWorNpJy6SZBKqaNfj8fHZiz9pVgxMDajSnXvZD4y9/7exm/sfX/diBK3cf63eFnTHETowCz1YAs0YxKIe7t+9YPHNuqX9eNzLtlc+Tsru8a2aGm8Xho0faXAgyi/IhGMLFxcXAkCbaWzsx3tEa19bWfvS1P/KxT32yLv1kpz0/mFOK0GhITB18JcEozI0yRcbOiQ02VFmaKqXW1npKKZMkw6qkgEopDggAPq4WAJYdIaVp1uutBeuazXYdJ//Fc60DBHSYIgn6GlgB5lZqib7NShMRQAhcVZW3dZ4mSZIYRVmWWmvrugQAk6i6Lo0KHCxz8KQF0SRKbTyT7ImifnOkrSBCQWDyOo4EEgVmjcQg4iVVSrTyRCLBMKRBJEEgKKsBMyskh4LOB/GzU9M7d+/atGlTlmVGJ4PB4NSpUwvnz4ui6enpycnJLTObrKtJoVF6enq61WqHENa63RDCxMRE1JEBAG/eLFEdTZimqYiIhKmxSWEOAMYopRQVKCLWWmQkbQiQ2QNiVLTGQIsMXliBiu0WYY4QHpK6X4YKswtmijf/w9cXm1NFb22lTtuUjzWnF4/35g8tjV9VrJQDM/GkUnvYNhS6Gaxu7Tz2ji/De5+851w/+/w/2gsfPvxTf/5KINhvV19606GPf+XKZLJD7FJjfF3lJuPgOs1WP6tLlhXPZMstRpmsOD+0rmgnaDJiT77RXOqZp+3E4zLRUy4NzQGcmoIOgmJgARggj+s8OJJ8pvjqo+e/+PlHrnj+pUuL1gJlJlGgAnjk4IO0xuTBbz307+//wPXX7+AwsLVkzWzfhZfWa+Od9p6NALxrzz4HQwb98MMP/sd//OOerTtf/4afWjvTJch0ZpY8/NTB2/71oc8MOmv/dvQrbVuPdSYoMzy0BkkSVQ2rvdvb//nJD33+J/5Tf8CwYhywFIEZEiqmJ2f23/uiT8//x/n542Np03YDaBYUBBYQZhlJgYRFJO7AR66oPIpzSiAEZhBgIQEZBW7W62Kr0Z0cVw8iHSd0o7uqxICEUebKOFrYYqyLgZk4CqZGIRMQEIFDEK1jgxjl28IwrHeLR13eDR9D+TYJ8QgvBYBBojwKRjFWkEUEIg1bEGgEcAVEEBA9+g1BSIhAAQoLCIsasSol9ptlFIOBEDcU0iIowPBtQqpnnTkwgiKllBERCCwioKIZomBUdD9rxkkwAkNGMp0olnxmx5OgK2tIofahDY2zKytj2ze/6r1/P/nWtzfe90+/9Vu/cW7h3Gt/5ie2+ry35mQSOoUeVtK95/Gnb/9E/yufO+kO2xcsn/3bn5nEKwY7tyht0rwlu/atzGyuTt6n7vi8quYnbnrJwR0Hdu1+/lA56p+9bRffcmE30z6V/oLaioCrapOFXEGdiHeYizaANsfJ5tTyyaXGV+7d9cK91bh5KlmFnbP+l1/1+N9+cvjUqZtbjWZQaASBIcYdBmbmIEHQsHeW0nSYDGnyoewHmCYxNHbMNBqtpN/tUZIPLNz43Fvuv/c+RPTeW+DwXEeHVXqaAIGBHLALPjWpSJ0lBSKC2GuueQ57YPEMBhVFmP+zt0qw3tMP0awCRzoDFI4fcPwgdF1VThiIce34eVfI8u3u9PLKlX+QNvPSDa+6+OIT579ZV4OcE49qaFlJCCEIRwyb9p6REbUSlojkjTpAEQ4gCOBTVHa45+AFJ+ZCI/FZriVom1hi0yI4l5+8YfmVSRpqoP7JRZPi0bmePPXU5tf+zPHDRyg3ULdWpZ8iBbJK6V45TIrsG/ffl6Had+CCI3MnCTOo2ShdBgdl3yAwMhEYUJioQV2ljBw8aKVSM1gcnj+7unfHnk/c/hGVqERCb3nVQSBUO7fvePqBM1NTUwnh7u07+71e2Q8Gqfb1aP7VC6H2wfuyHF1oZK79RLvlQUJdJ6iYCMF0u2vXXX9tPSzv/ea30qKRpWlZD1cHPWOMp2DSvB6WCjWLWOZEG8/OcdAimoxWhBoxIRKM5irRl9R7j6iE2QiKSSuwWqcYkAOg0pULGn1gDhC0oRCQEY1KveMgXkAqb1lZZAwAIYgJuiIXwWl+UCqGNDNDNyzrcmpqKk0zIRkfH28VjSLLW51W1sh2Ts0WzQYQWmtBUWes3W5dBBdd5HA0XOGdJ6L9F+yLsFzrvBNutFtx8YrRtypLnaUCQEbH/UHUbAfvCXVgp0iHEGwIRmljjPUORSIFTCMgonNe6XWxTpTGbdDqEQIzsBKjJkg9+ODjH7w/37I/mV/tPvTk7I9c8fiS79Km5Mx5e4GfcatDaZIfX/XLm5HTi+b+qpL0L79ZVnplvDPRN4Mnv1G/89Xv+fX3fGffZy986dqxc4cOHdsmDWOcprzoBlG6EUq2NphEpUqXRiA0vHGuoZWEpiu5r4aJlZMt5VNy6LAfTA0kMLUGIjBMWDAsbVVpC6qSh0ZPZC6pP/XZB6+7bU9t66wxQYGDS1HbFJ1DzvL0Ix/5yE/+5E8//NB/tDsIkIkKpw6fWDh92qhqY3V+1zt/NcnH9h+4aGZTUmSrL7rplcaIU2lDeU/50qC7b+sF1z+x+w66/0vVY7csjF148KK6CNInjbzK5dapsW888PUf+7nXj+vZ9H/qsz96Gm7BLOS1c6vnFvv/NPjpm3/6Ne/57snpXTWRD31DJjDHUR4SoRjARCIlFAIH7+VZprUiQACklODIWAARVWLEhWfiCogIBmECIhAWwZELLsSIzQIKRQFGmocXRhZgQRYS8M4hkZBSSgfxSIhKU8xFRSIG7hktMWHkJ8T0dePPMffdCHsRSwkAGJntQeJMBG1EzJgJISgcbYGjj61HQUIGEGFgUIgQ07SoU4Znpb8xsmIcZhit34jICCKCUV8pG5dxA4epR+ZIhFEIJ8hK6w3R1vpBGx8BkeLgI5gzblNExCNIoxBrC5BaKx0adoW3tNrf9+4/mJjaOv7Xf/TX/+utT50/+T//57vau9Lls+eP/ePnlj92e/j6F5owmE7SsKWTqHTy4FXFzueaK67vze5cbbQG1OhhogeDfNsl7X//+/5H3nNw896fazTerRtpdvHz9hwty4WJbICpPEaXn1E7ejQFAAFIQAkIogbhgIWjuWbR+NbjuzG4W26cy3zvsUP1Nfsm3/EjT33om/rOO65Zls3jYwkFJUEi+IIwEJNRJslaqSsaecFi67UJLezwfEYdxs25znuOncD3vuL7/uwP/8h7W/XKNDXuZodf1ZV30WscWHLStXN5guy4V/bzZtppdrprIcsyAolOrCEEtU4VZeZ1Lf0oJ1539oDgHQEaRS54L15rDCkxATaoLrL6dE3TS1/8o/f9y4ELr7j28ktq68o6CHSQFslqRU4cOhuINCKFEILzaSMjEQ9W66K2nhQ08szVA6OMMBjUlLVOnziuafeho8trZdg1syUR9E66rfmBXttR7iYiBUhaJ1NjC1/6Vtv2GrPTq0/cN84trWvnBVVlOPVOTGrssEq0NkmaFC3FOpBLGTkx3WHp54etouEQsyCsPLnkkgsPHj9yeLw5M9ZpPTq8pzk57oGYvTbmubc89/Dhw6fPnhMWL27YW7v84oMLC4utsenl5VUbfFqk/X4/TbT3Hok4QLQjFcHgGCiQ0gLU65bNZrPylUdJAJiAWD999LTWlDUyY3QIrpk2hvUwPmHeVoTogxVFrMGHoUZdNAohLJ3VWiutNREDeBRf2TRLjDGhqq2vEFXtaoxTu+K11qQB0KUJjmXNRqtJREtLS1mW5nk+OTlZ1tWjcwtU43jRbCWd4XAodd0q0larEcdBlpaW9h/Y1Sia3W7XGNPpdDrtdpqmSZLkeZ4kSTyfEILjUIfAQWQkzgQkVAgKKWYeEbvgRGpbiwj7gOvG44EoAQ0ASmsGCRwi3RoAUJFnFgQRr5GEBQA2GAiJNg5CCFYhiEgE8rEQADiyRVDsfQnKqMxr3wjClHj0VvyeifDr//voYHrvZG0nOsXtn+vu3LHjBTef/t6XXnRizi3X/TqMdXB1MN3VZzGRUzvPf+oT6vk9++WxVuOKS2471zs+/+TDg6f1v731vtf/3a3Dk+5Fz6sfP4xAORjOOLCmPntUSUKGAAc6kDVDtKFfFxJAgkfyqUdlQBp0z5X+pm/CYgsmugAAmnG5Db5QDx/A7kSWmIqGwdS0gq0dU+/5/NG3PF2NjU34snKGU3S9oDVJMy+OPvXEi5730tXF83/+v/946+yO+eWTqNJOY1jVK0jPkLAuv/zSsZb+/Gc/ojL+03f/2bVXv3hhyeWGayTFLsWkl8kvbP7+O07f8+CewyrVL+w8j71C4qCGSlr9cv6VL/2eyvZCIhc0rrr4Mwc+/7+/jHuhwR19NHVrw1/78C/9wwf+9abnPffsXB/SRJCQR3kV0ogXgYjKjaaPTMxYEYGUl2fmaxFRCEREQojNqvUcFElAQAiYgosWJYioYj4iIYZPR7HsDBQkRY2EISL/kAkUIoIwi4tBlxHQeQBAXI9zAIIBAGh0EoiRmTCKUxjUqIYetxQQRoa7rEfpMgGID34d4sEwCpACI5daAiIASyEWinE0Z8gSqwKsKUbl0djKiONMRFprQAggAXjdCAHXe3/fZjvIDCiViDgORkfr30BEXDtZLwwIghCSBAgCLKLQW4eI1tqNLQgAGE8hCaIJPBAwcKi1gl5NWj3vHW9q7Zya+l9/9vg/vf+zZ7o3bZ1d/OBHZP6hTENrcptpXo5bZ2B3hztfPnvjSycu/t6VqfbAZofPLKV2KfHCJOPPv3XTy1/s/urPyo/944tKc6bq/2WausFyaNr51u6/6Pzqp4sfEcBEKoQw4c/1aaynJgFRIKmgZWWCUhaXH1q8rP1UuOMrngh+9ycfnBybe/0tT10ye+KDn7viyRM7i/b+dMxLXZDY4CloTk2zpF3UqAtnLDe7Ml8fOQELq8cm7l+57vVT+rkGiRA++7Hbp2a29gdrjUYDpzwfDPkf5hFpAIhpaqqq2joz060GtrJp0J3WhCmoCg4kMBiGoJUiEOYggjEeew5KAAIDCwjgSOgOMYdBREUaGDSh9F36WDnRNG42XZtOq/M+u5Ce2LLt8ot3Nx554JRwv5mG4cCx7oD0AztEFAkhhDzLjC4AgJ3N8sx5j4ASwFaV1jreM8HjoF5JFU12ZipCBcN+b7mZzzQb6lg2DwCTwx1DUJZZqWy86Z745L/uPHBZ6LTGVZEGXvP9glINGkmhxtpZo7DTalrrH33koaLRCiCMwTt+7nNvPnn8xMmTpwFZtELgENyhQ4fY15ub+fTMeJqqc2fOaE29Xm9mcuquu+4KIZgkVV6J45OnT22/cE+v39+xc+fRE0+3283FxUUiQkXBsVZaE4gY771GUgBBpBz2G40GsQyGPa11CE6b1DIXjWxu7pQxanrzpnPnzuV5Q0TyJB3WFeBolt4YY51TmnKVlmW5d+/F55cWpddlBgxMggLSHwz3771g955dSZKsLK8ePXp0rT+YHB9r5gURZVkxMTEWza6VpqnOhEkSRKyqShultc7zvHI2nzMfv+e9z7n2ul1j+621zltDKk3TLMuI6PTp081Oe9PM5hCCtbbRaCCP2EDxOa+DH9l/j1ZYXO+E4Sit4WdyhVHeAMgoQCPAdXzhqF9FJIBIKibEG7amSinxIY5RjoZA1i1OkUZtto0AIxAQlY6iA5MRV4hdw2lXbEO0BJgdb97+xcc+fHZ860xS28U6b6acvOt/X/2JzzS+/zvHHr3n3150647xnXJyKZPx47Jp33Me+GSti4Xv3l386b1r5YrL86t2Xf0VnQ4ePT539Fzl0yrArsnGli1quASu4Ax0yiSAlnBgoJGaXJQrAgOQMVAjg+mJQGBVC/lz5gPXyq4Tsh2lavLuUzBoYK8D8y34x9tCvbpmLUJTUmy0pFUUZzH70j0Pvvhlz1/up5T0manRyIfdnmSl+OaX7/z39//bn2zfOVGX1dRk23o3OaGI9i8sntm4PqsrC3Onzr7h//Hzd9/zFUWgFAwH5ciXngGBhv3yyiuvuu7Tm3urzWo5b+ySLEDX6rPn5669svMrv/iu5d7Spsn9q/XisQcPHbzh4tc976e+dsenjp0702zbzdt2dpqdn/6hH940MXPJlc8rLTL6uGcaZXwi0X4AtRIRZInzOcLM7EHQyYhGgc+aypXwLPtYGVV3YQPwAQCxLRq7rQIgwgJhPU8FEGFGRK11JEUQYMRWQvypfiO1hZhZAkB8n/WxH3rGpybeZswsgdYL3RKJlRt/KxJPKYDE4R/RmtYthKNlQixoJ7xu2CAxuCPH4R/xRCNoiay7JG10E0dPHCAKRHOIDSoA4jM1c46CDlKwPu4y+jmEcbca68zRFp2IFKEDfvZk6ugsoxubRUVUawRENmRYhKWubR7Ka37+R5uPH/+Bf/pr9Yl/LmvK2pS1tgZtQGm1c6v5jpeXu9v13IOrl7/g625q5YmlcVnZPzZmp7LmZtpdUDi5cP6/HpAnH4fhGmK4waqv+sGDsP/Izp+6b+yVY7zwyu6f3JveVqvmgppd1rMMSOy0L71KWWW52Wvq89zthmHvS0/sdNgokoWTg8Xj3Y5ao4v3nv2t1993z+MLX/jc2ZOPzowl052Ogowqu1KeOmOLQcD9VdJzsFIkm9qXT3fPrg30F7w+DS5hWEHKT5w8dn7h5MT4rDJFecsKAJiv5wwEpBFABIn0YFDaYImo2WldccUFs7PbTs8LIYKwc8F7P0K+rQ+Ox+0hUrSDGoFiAMHoJNLyR37SAOJEAWoxRrDLwQfBlrGvuu1qq9RAOQkQHJukqK1nrtrNMa21c25tbS0Ep7Uu0qwoJheW1rzzSqFS6G1lEg0QRAFCQgCN9pbAmGjfaTUSwNXF5dUentz7CADM2j0NplRsYsL5cxLu+Nj2X/n9Rw4f6Q767U5WTDQ3NyY14Om5s8CRN8J1XSNLkiTOVmi0aEAFd999t06MxyAirEh5QQlTU5O7d+6oK79zx9abbnjOv/zLe6cnJn/yx358eXl5cXFRa31y7vTJuWPa6IsvvviJx55qNBqPP/VkkiRzc3OXXXbZ8vLymXNnkySxVe2cM2SyxMSCU6LThqTWWofiapsoLSIrw5KMJoEk0WmaLi0tHTx4UASPHznqvUdCACYkZCzLEhCdswCstX7owQebrZZ1ttVqkUA5HJoku/Siiy86eLDZbIQQ2s32zPQ0C3bazY3HzxgDAIGZiJxzoCgIJ3kmIqQQiBJKx8YntNHj4+MzE1PMHGBkfxTr27sv2Euo6rpGxCRJqqraWBYRERUhROAoIAuuK54AQAAVIBCtMxWe0Xows0SIIFE81Vi+ExEURIEQRpQDgpFYhiEOZQI8q78FACTAPFo1FWBsAaIAIFMgJvYhRIqvuKATZEm8DMXaP/v06fbEdqgWSCW6H5SGscm5w4/OvuuR7c0tl//kWx753uelt373YtfMq7GPHhx85p5rb5k+sGPXiyePfvB82Ts0c8V3XVvWd/obmxMPz9Q9vcfc/cDKcr11bIwqC9agACjGlJFQ5Uku1gIaYWYtnkIClDnGROqCwVwgIupv3uxu/KK/5j7ePA/M9PEris/fjFbpJNPppqEblErKWnUaK42ZzZ/4r+Ovfs0LV1eBnVLGlK5UYFqN7OjRY//0T//w/Oe+9InHHyjdYjNtN/Ls3Oluo4mJ0hsXrZm3p8azb933zXvuvu8tb/1V70DrRGtNCp31iIp9aBXty+n6Ux98Ym7+ax8R/Qd/fvO850sP7n/k6w+dWzr0S3/8o2vHNj/wma8+efbQV7/2td2bn37Z6374zLGnP3z7e572J7ds3n7R/gONNFOJCVVJIBuTjvCsQ2u90X3A0aZLjRIuYcQR321jy7VxFwEwkdn4yoZ4KobtjW9TcfIbJAgDcMyQXfARayMjJRdpUoKj+3Y9+V7/gQDC65VtYBHayCxFRCkEgWdP5SIiEMYmN627Cm50UiMPcvT4RH0WC4l44Fgxkuh5MpJoxelpJGEUFIGRoRhhAkpEPPC6c50gC0Wp2nrlYOOCCIKwoFZ6vVG9caqkFbCw80REasT0GHFy1rncER4Sn3eJ/jECQmQcWwVESjd0WPLH//kLyUdv33rv1+302IpN2kICIXMNzgYhmRkeflJOn+y+4uVhCs7Mnb9A8otnitae8W0TqRsM+l/+xsmPfAwe/Jo++5AGI3m2iGp19lrzvNc/fsmtE/XJ1535pZ+mvxdf/9749FG6DD33qVMHk/lS+6rZP33xkQ8c3/aS4zte5Gdmx5YeOjj4YrtafXz7i//jP384sCkHOPaVlefv/9yV1z5x5c/d8eBXTz/wb2X5lfEt+Q5o9o9ufVFv7AVscp230uwSYp+iN9uDbfx6z9abyCilROCFL3rxBz54u5CqqsreVNERHc4qwFiZE+FAWvUHA9GSp/mZcyd/5LLvS7ICoFTKMDMHLyyCikjLOoMlKq82BAfRhAEBHI9ykpi0aBFsarsZVhLwIGAxB+AFdcGhY91V7galdu3atWVqR54jOIYUE5M3Go3VXvexxx7r9XpT4xOzs7NTU1Nrg9VTp04dP/60rZ1G7R2K0oBKgVWUp2a2PygNOqjbpEFrQHSn81OZbyRr+aBBanU4fvHsN3//XQBlcfNt4dyhCw9esH/XzuZke3xiquoOtp0++60H72eE8YnxZl4sLy+nqU6LxmDQ61XDTqu5Y8vWBx57ZHxsTAkFQ3tmt4+PT85smpyamhz0y80z+Ynj07Yuk9R0xtrtTmvnrh0gWLSaTy08yp6zLCuK7Morr5yemgnWnT59moiCdalJsiyb3byp0+wcP368211NkiRN06uuvWa6M370+LG+rdI0Jc8iEhCWl5dJYMvs5na73RrrTIxPlmU53u48+OC3qqpi5gDUaDQ2bdoKigSh02rGNxoM+1u2bNm2bVs1LIf9gcmKnTu2AcBgMEySRCnV7nSItLBn5kjFCsHH8hwAMXtginDd6CNECgHReicsla3LskREUKNOFUW9qKLIykFFIeqj4wM8svV8FpaW1ptVzBDpuvjMPnod+xe/gswBiZEw4iqBoyEaigCwCyEo0RsbeUGIjt8bK90onPMoqqtvzzyeWYBAGxOcx+AboH0uuDIM+zd3/vHj37i/n+7vuAFrppAjrAYlqo1TdeJ4uFCt2V1/9O7uv31i+KZfv+LVJ//WNhqHDzwHV/o/+BM3vu2DH+wOuSp0u7354uYTD9U//Bu//43nX9H/wjd3D9wk4hABvHDQqAR1AEOaBCpEVjZECm9IGFWgoIIo66v+Gtcl1VYfv9T8xxX1L33Y3/DY+GdeBl5brGwIw/6a1gRIlSg3b61Jv/F49+ihI2ZsTwhpVdeqSDw7Rj729P1pBg8//DCC77RmB73eoNub2aL3HWwXjemNS3T08GnqyeNPPTm7dfv+Cy49s9DtdNqusiGwQuTAHEQY9k027rrvwwcvOvDFj370yBt/btvOS+aePPQ77/7l7/9fF4qqF04/jf7qtf8MK8uHe/3Vf/ibv33xbS9+/c++8ct3f/mGm67/w9/9A895d+izFISfFSw3VEvMlXUAjDJCsHEI7IOIkI5OIoLf5iIPG2FMRETCsz7lZ5YwWP+irKuiaATHQDXyYQVNZuNWEUGREV8KEGSUT6uISnh2xIoxONKJ4n8okFH+Cev7AIy8vwDP0iSPXgkiChkgGpPIKOkRDqCBYKT6BhaOmisAAFKyzkSKNXbRiIAMsN6Kjk7AI20tI0X41/rJxKQcIgoRAET8xllRvDQCo+w8qt4ISauNkYEo2iIgIgIi4RAUIiCyUJAEkBq6v7Dyqbf87ky/O5Fp2b230T+3R/OwkhVpzndgq9+W0GrRNGsrpzof/bv8VbJvT3Xw0i3jnWTw1Jn+332x/6n38rFvpCZUGkxnvFZ0fPtz7rz1F48dfGGxcPzcf/zG5tUPvfiGE9mkPdIz31++5WMzv/V4dqOzgYNBlPHVwwePv1/L8iXH/+XquU8c33TDE9tefP+lb6ZQmXoN+gBJQyVuENSnH3/1icf+68ILnrrkiuqSv37ksfvvf+Ljp49038pbbxMkKCZ9MQbMrDIffJItcHHhe78x/fpL/LhJAPwHP/AJQw32lYD4G2v8qgbkkZbFWgks0SWTrVKKUCanp3wAVEBEwIGUlohY+XZ4uIx8P9b/k9dLQSAkFAVy2oMKQk1VG6MG0uhW+aKffNhe+rW//PSrb7ryB19+Q6lWULJME5CqvdLgAEAl5rLLLiOiVtGIVgFpIxkbG0vT9PATR6z1AuKcI8US6tbYVpCGYBhrJBSUQkpNkkm+XJyeGWy3a0ur549t2XflA5/+8qm/e9uuG18i23ad/dxHrvu+5+4/cIEG4w0lm/JGo0Eaa+/279+fmWRpaSlNU50mS/Pnj8+dHKyseeuuuvLKLTObGmmOaTLe7iBiHexKt2tQ+yA+1IgogaO7p4gI0uzs7A033vjFb30oOH/dNdeNj49PTUwy8+zsbFWWO3fscN53Op0sy7z3U1MToKhoNgBkvNkp0mxyctITAIBBQsRBXVlXF2mWpikieuHgudVqXXzxwYnJztmzZ3du33Hu/PyOHTs6nc6gKhExLxreupiVRnM9V9UE6HyISxEpFUtG1lqVCAkAYuw8pWkqDLV3EQYrIpqUiCitEJFHqQONhh4QibRE5D0he5fkmfc+6pOtc8ycZRn7ABG4E9j79X05kedA676qG6U2jiUwxGcvZ6OqIK6H0iAMIoERFUYT03XaphfGIECKCGF0Q44WYuQR6O/ZYlRez7ZR0AbRWntXG0nBkAazWPX2TI2dOfX0uz+1NjGzZ8UO8izxNvQwiBLvJbNDHYo6F24Nx1q4Ot/8+185+sZb7/lz/cIJ0+nkFq7d+st/8/w7bz918qFvfvyD759qbMPveOnRpZuOfrKe2Qp7kuH8cmaaPmHJMKmJQ0pikprEE2S+cJG+Kd7ZAUoQV6NzTTQhpdAugCQ4H45t4e//rwVebnDOug3odQba6QScJ0tUqKw4s8Dnzw4v3EZLy5UyZIdoUhd8fvedDwc/DAFAzOL5pQsv0S/+rn1VWDp5bKHTaW6EkX2pmZ9zh4+u/PTP/SQlXkQGg0ojkUJmJjJaJ3UFL3jpd/7R772lRhJv//Fv/+bd7/6TX3r3b17/+tmVFR8qnJytx64+t+vQzOnzR7VTkg8/+5//uf/gRc20+JHXvM6qfLXvEkUhIOF6zW295oGASmsiQFQooLRGwHgHCvCo7yCw8ZJ4MygY3bGxHLpebVWGRuHWrw/VbMTgEAKqUSbhvY03nsRs+FnjPaNordWI0Sgh5t//r+F/FEzj/wkTqjhTENcKAAgiMbA9u+oT3yVhiuvraNmNsyGEmhmRWCSqYZFjVBSHAjE6xlxWBAIwiB8RuBRi7F8DCyPghpMSrHeaR4sYjC5XfN3GuwdhHNXfZQSRRiTAaLKEHAvbo30zIiaoIEt0oOBra9AA6UQ9/tn/KubK6Vfc3Fuq/+rMZ9Ta6l6VnIbmkWa2oLCp8ufU2ct5Yc/EjrOyIovHN33k3/Le+PJXvlp9+n2w/HCilR9rVZhKTffve849z/+lk/tunp57/LXv+/nkvs/8Tqq+nuX/emjsd244Pz8wWVK+5OQfTa7d8WjjKgU0MTjVHJxnxaLHCLRyg90nP7HlxBe+ccWbzk9cXOczAADiEVNFIeXqiLvB3Zc+/C3X2rx360Uv3/Oqau7x7yoHWmQzkAZEIAMgoE2g2WEdckg+/zT/xCXpqePHH3n0oVSn3lV2PPBBb/4gVQjB2bqutdZpYmJLLn7Waab27TswLBkRFRIIIKIoQsb1ogUKPgMvYwCFzyBF8VlKaRHRzHC6ajeFJoqmZ7U4xH8Ir/V2ODu29YW33aATT5wEjUPnAYQ1iw/ee6VpemZqtGN1zMzsU0K1Y/veZtYKIZxfmD89P9ceb6ahEyQfuIpIo0dgbxo5ae0EFhonZ3rbMuDiggPnDn396d/+6UmCy1/98/fefdfmidaDjz2kHF948LIGpzaRPE337t+XJIn3vrJ2ZmYmruDbd+xodzqtZlNCMHkaK+6B2TknIh5C8KwUY/R+0jqOGURHZ/4/yHrvOEuO6l78nFNV3X3j5JnNeaXdVVrlLIIAgQgSORljg21sYz9jY/thg/2e43O237MNNhiDMcFEAUIgkhAIIZTTSqvNOU28Mzd1d1Wd8/ujuu+M/Lt/8IHlzr19u6vqpG/wrlqtrlm9Oo7jnTt2bWpuy9Lcex9FUa/XM8Y0m83Qm82yzDq3efNmMtoLCwGmzrFXSjF7RMxszsxAWK1WScA5x15QK2bO87xeqa5au2bt2rXAMjk5CYROuFqtivPWWlKkkJQm65zkOQJ473UcWWu1NmGLaq3DLhKFwIIq6C0XYRsRhNE6Fw4jpQoHCA+B3QPhJ7M4ZGRmEgJFwSkhtAojbYKv6sA9YpDNiQh4BiypGmVTxYsEL47wWnkeIYpGHYyHQ75ORftRtDKMZQ5e/KFfDrHMKiBokAfH8f9ffkZEDCnLFk2Mqc19X3NcXzt8+qm9/+tLz+YTF9ddx4q2PkO2hno61xa0MmN9SHML2hqf9ZNa9Y82f/d0NvzBL71y5D77K79Buy+D62+5+oFjL/zeHY83Ko1e4sySHxnOsObTXDlJqk3lCFFEa+0xTB2VWKcZPOYsjtGLImKsciSkbCK57dleli3m3nvSSPtWeZLRbTP54YtS161pI1rGRtR0q8uq6ZhQsePk5NnuRcZnOdeMUgha606n8+gjj1fiZKi6+uTZY7e/bfOui/UjP8wP7I3bHTTRzHIgATM2Nj40srB+3SZEZLFEFUH03hljEFTu3WKnO7Vu00W7rj925LmRNUPPPLTnYx/9cnJee6y+Y6E1P1SrOJziyulqY0Pe/dY73/FLj+559NiJM8dOHM27CxPjazoZZZAbJOC6h7QQNQx8XwAAUERFSVpOFijEWgne8oEDLEFqcVDsakSlkFc8eg7UpvCZK4SIhCUsV/FFCFNIDOKcQwEABlIDVjERGVJOnAxmqwL8fGLPYF0NXkqHzjGwLyQnUWkFEET2STB8DoR5kEiGLCAKMRxBDgFAfAl4ZoRwUoMKMThU2MC6NJNg5vI2sgiz5wHUC4QEnMvKpKRsaYKE7S3iy/Z+ee4TishAkZuIio50SZWhsg8vhB5EWDJd/KJEdE95RgDnzzy8Z8sNl+ep/uq37/j+8SfTSq0SjyWqppPEc2/R079h+jQmv7i4VFlvSGH/p19Y+ubd2F1sIp1tNi03tIU9W69/4FW/e3rD7tVHH33tR9+88anvDsVxN1JTEO+DPPGykKpXfHnb1tHoHTs3WUo3zD8ISRU4taAZQStHxOIRIea43k9G47zlUAOA6EQAHBhHSVcPz1U2Fw/y2fKJFneFByBwBQ5EpXlktTw3J8Oj5ouf+ebZs8fHm5M5m8rLIgu96KdxnudRFEVRhCLOBYKZsPN5P5+amrriquvSDBGR2QU2B3JY/xSAL4iC2oR8KHxx0ZMAQCgCd1hv+kzePNBbP2Hah47io/bC73VuWrduy02XqC3jm46e3Ds+ud21RYhQabQZcOZFR0ksIi4YyKMCRS7NAIFIhsfqo2NDSmh9e+P6uemJqYkho7959+PWZlFijFIsPnMZIwHns7WjFy29pDq6cc+d/3X2o39anTtjbnxRZ+flX/mD9/3Ob7y7umaqGccxRYioHXvxWmvHnkHq9Xq32wWASqWS2bw5MszMoCjPbFhSkSqcueI4jiOTpj2tUSmV9q33nowW7xklqVbSXj/rp8LS7fd9A8jo3FrwhFo5CdRDydN+pHQoagvoIKEScuwciEYCgUyYFIl1ufiKSdgzBZNUxCiK8jxnhchCiKR07h0qxZ4JKNJKKeWyHJQiIO88EUVR7L2PlHZlTWmtDQvIWiuCkTaI6JxD5MAER0UkBITgJRDACql34eUmXnmQBa2SsO0HlHBgJiJZVhPAMKgIVB8qZAS8wPNK0sCnKMrSwKpg8d6HgL3SvLw4C4wCB+XhgkUO55nFBz8aH/w8AhAWCn6LrOSNlC9WHEkCJjcqrsa1B7797e9//alHai8isEimqXsdx0K6qepeZ2dsKpZj8WK9oIAyG8yzbxx78H1H3gkb1enZxoc+1Llg1B9r1Vy8sTa+uX/4Tr/Yjs480h+5TXy/4fqOI0sZiu4rYPFKRAOCc95ZsD4XtM5q7zVznqfz6MQ5lTqKclJRc6SqVQUh9rMTs4zdHW19zA7FVUx9irLYY6UqEIFyQuR1VD15boE8A2kiJJMyxvOzixs3nLe0ND939tQb3r5peEL95Yce8Xlcr1f6qa3UK4PbsnHTuvEpGB69cM+TJ1/3OqU1iXilIg/YTftaxToy1npE2H31i7906NENeuy5h5/93hUfv+kXL2ydma1WKk07ssh28axRGQC4NTsuPX5m7qk9eyqV2ite88pVGzedmUkrlUqW9xMDzDQovAq0HoBlDygrH/8gmUMiYBGSYJEqyxoUOBisKkQfUFBQWAAh0UA4I5CXLDAiBnAyEmmtGcF6F7qsIt5DAA+HI2Fw/iKUFaeUk46iXYRChCAUgpMvS8wBApHACZIHVqEY9QVQUKTwnw+nrVYoJRIaRdwKfBpw0UMGAEVYrO3CxBA1FmWqBywmw8WsV6M8r1+/Mtkl4YIVzTywhAr/3WhFgIGnx8wKKVI6pN0+/LkiLA0NHQDlDKAjpU3OUaMyt/8o9/ocRZ+742t3HzhcGdmwJvImwVRlxB6FjVGA9Gg6cSha+sXuEpJGHI05VyPjS33Lvey5y2++9yXvn1l7wfpDP37dv75hzdN3J8TAmOcq1nZ7Hh2sR+uG+WQ7IuK+tV1vTZyxQ/ZQ9ejjvudIZYnR5LDnNHcqDQASNAQQ9ebypOlNHdgb7hvJx058a6yS6WpNCx3CS9qja1OuE1gAZtBhqgCARpGAWBat8PTs0n/8x78ntZoX8Yz2upQOKDiNxsR5bp3Lgx+dsGPh0dHxzmKnVqvVG8Mnz+Wmqm2eGWPCSUslrLAwu2RXrDEuOj2hxaeC4sPAVmQxNX/x7AvE16v1VRXHGtSmbfGVl18mbnGq0SQvoNh4Yg+kyXjjyqF9AWS1BQGZVMj8mIh87pMkWbtq9fD48PF9j0+fXZjcsn6pO505VU2aXqNJpDm0ZqZ2pvkA3P8/X8NPPjc+RCnAxT/7F489fXR1LNe+4PrMVKxLhakvXLPSR9aESCgizrlQ+Xlh0Mp7L5o8c0TECBbF51msY6XAMwt7ZrCWnTAQehBjTG4dIoYOQ4lpUs4xkOjIMAiDKKW0IkbQSCgAnpkhMZFzDsNHK0MayYsAREnMzBrQSuDsonOMSgVlDK21BQCF4tg5BiREZaJInEcQ8KyI2DrSChFJyPsC2GJQ+VIZIGAoSJFSBliYoYTesNYqd560UkoxMbCgVuiEy1gZsCqEZEghCCFqHTG7AjHvHCIWhhOEpEhr7XK73CfR2jmG0uYFyyMMsRwpESIoEV92Wgop+2D5IstHCYYJdHC8GLSsQ1aPPhRQoVcZFq8MypL/dgA5z6DY9tPKaGTblQ//1yemf/DNpfU/r/V402V709z10vVJI1X93CsEiaxXrFLb5UizS8W5P9h614l89LPT1zEqGs6RayfdJIyboWoK3srErkY20zt718iWF4BEuVE96lW91kxWS+5cFZRWyhrF4rX1oh3nuQVvwUeRaqooSRIHyHnixVmf9SVjblMX8MyE37Bf8qsiRskyTuJcDIvqtvvYT5NmXeu8Y3NFRqjjnAKXVYdqDz300ImTB4dq+oW3DKNuffhvjtQr66pNlfWXbn/DlS+6+YWDm5OnmlCfnn3uxKl7HnvspouuuHKx1Q+bNIoiAXLOVeq1c2c7rhKvXrVu37NHr/7Fm17+q7uP7T2VjI9Gw7TIqcWWdIeffPB7wyOrP/7hP1voz5+3bdveZ555w5tej4aVtypPwGjrWwSVQXD1wgFMR4jiPAZbAi7Q8uI8M6OERR5GujgA8YVqDQAEmFBJwP+VJFsW4WAJXGCYAitJEFCY2fnUWtAqRMGy7EMszLvYgwgJFR6qBIP0FILZiZCIL22IJFRLCgdhOySpodEHRkko6EuFLxEhZlHE3guLYw8sXPiKkvKMGDA4gIXGBhY9ZQy6ICCegVkRIaB3jpQJH84l0UBEYm0AgPF5u0YpQ1J0qkIdhjrQBooLc4V5GYY6gZd/CIfLQ0VBwUZbj0JdA7mBeNEmqnb8oaeyfv9z93zpyyceXbtuHUsHSWVqRFzUpl7MaK3Vikb7rU5c/zi10OdRRWzVZD3/4NVv+MmLfnNhctuWZ79303/9xobjT0SSLXlE0an3JBDFuI51BWF9Mz/R1h6yjjTmWG23cRrHfclZIib0IoQpERIyWVXrtkRYuQwicHHT5EteV4CU2Cy3nWF7zk8fPnbsaK9r1Wv/MuW6wp6IElFhqgYIiMoLIkpEqAiaceXg0b0gIyAGVG6vS6MHEmZwLhcE0sYLsHNRFLk8ZWZr/a233gpCXkDyvBJp61xRD7BHDPIYKjDWEFdQA6QwDwm+bYMjVA8P13Zun8jmahUVb9zaeMFVl+5Yvf7gmYVVk/muCyfnpjtxFKdoIUIizc4GxTeFFPy3iZQPPQ3HHhRahhgw0tqRVT4Zgj0//un8f3y1+Uu/bRoTzYrpKYkWZ1vnZg4/d499cRZ/5N+Hn66o4alzS8cm3/A78SWX5F/5zC++9CqOvO/2IkicFkEnisgzW4daGaXD2JKI2LMm8MxoRSNaFkSMFCGhBYcCPs214igkIDZmn7IX63yBzkViZ4VEQMBbAu8FAUCTUpqKyQoIEgYCByjIvSNF1lqFSAJggYwJAEtEYgWG0YP4IH3HXmEoKplElFJeA7PHgi8PXAJRCvcU7wFA2BORX2HvOMh5vTAoQmECYJJAEgQm8QyeCVG8K1CeAqENoIBEAHyhhCfOe2ZQKldOYXHEBGdTrY0HdM6JsOPg5oTBrwYRAZiCnh4QIlpg7cijhAIYAFBASZkhASoyIgLAhpQHGXQpFSAw+zTXWhOAE1ZKMUMBuwZBBBGGclYcJnBYgloFGBGd8wYlRxqZqB/cd/hTH/mYas28872/+CvfWbvYbbf62Rt39W/dver3vnLc+mZV5S4XwlpPlmoqTq3PBXZXTr9h5OH3HH9XW1CRR/AaxZtMAfT72phKDFP+kvfaH72fD3w5u+TX1dIMVb32NeSMbIRRz6GpovF2KWWTKAXsqkoJJt5LxNjPsl6aS7ePEQE6QVsZGvKWuF9RxzbgtjONqOd1QyqT0Gl307lGhNdvGn/h5bt3rGMLtXUbh3ptiVkk8t2sOYGwZkh1u6dHR0dXr6t8/pNnk2TI+kWXm3f/5oVpuvh3f/2lQQCemtxw8uTJStJQcY/cJ/7+yisXLXvjFJAGw04sZwBmYe5cRZnrbrz9kYnvvfhXLjt69DCMj1ZUP++YaCOcO7h4z78/d+mF2+984PuTtY2ve/W7k1VrNozdef01LzjVyiBWWoCAxcdaIQcNJkQFy21iBGIAAfRSQEAZgUmQmYg8gGc/CCQEKMIBpS8iRKJRIyCzY2BFugCvMIaWDwmgUR4YQICQEZiFPDtwUWyEhdkDBwEPUUSktQcSTYBomb2IQlECCODQF0W1EKIwAbEwM3hgM8jOCxlaIsl8rpRmZuHgpCseipGQ1grYD05YEUQgRiZCEfCeQST4JYP3SkhYMExhESnSZdMYnMuJabBBmEEEXAH2LmbOjEJEqCR3XgEpQDK6iKyh0y1YRHABCHLBiICkBMhoxAJcVqS4zITojYoIMcu4EWepHHr8yUf3PnRvvzNZX0c2hkrNRRHlvg8d4kjYp5BdOZSdv+mchuhHWfIM6hlfOX7V2++64ReWRtZuf/yO13/mZyaPPjfr+garuUVEcVpAKFNArEbVUrWLa+v5PSdrGtD3cKnT8WNDPu15VXWEHkgZFJv3M1FxRBGatD2xeGC6sd3YnjU11mMkjgGdbiSnHz/w9BNtWk3Vi6oXXNkbvTTqzfp+S0bWFwKcAUEnoFHqRpDgutXw1bu+tOUlt5749v0sPdqgZaenv4ssOIUSY+SZWAA1pz5PKO4L99OZF77kZo4x1hYi1XdOFQu+KHDFIyISautzKl+hX4KOAyYwpHQFCnpqbOh3b7912J2HRrMl23MZZwvTpzetG8kycMLOWVGIgnlmCVAcEIla2WAUQERLDNooFCYUxwZUrDSnfOS+h8aO3J/+4R6Oaiea0HeAC21yrVM3IABMntviosQunaquPn/qHT8z38o3nJqPx7ukta5om3MfXVUw18oQ5t5BqaMWCq9y5DlYll6ESRQgKqWdy1VkAAqdMA9CRChMZWOMiEyUABSAXu8YIw0A3kuoXEVEvA84KCn8laGsRHXIOoOqInCJLEJQAqhUARv27L0HEQcSlV6BoSOkkUiWM/HyJxSPh6R8eCv8JjWqcr+IiICiQWc2FJdYAmEGdYDWKkgAlOdIAG4EIAhj8BUPyBcQRGWM4QJoXaA5wqfpyKBjh8EELfCyUUCc8+GXUvE1hAiaFCF5b5fjbukHF27p4GqLmlgKxV1g/9++uhDfCO1xERHQRmcur9eHtJGvfP6O733nrjVDE7/9v/7HPc/ggVPtW65pvnKb31Ad3bJ+6LJVeO8xATIQ237KibKLYAmkkpsPrPvc4Wzyv05fN1Sv2l5OjhyKAyb2gsQV0yU1NPN4fMMfL/zgd8c3v6Cttuos7auONlEt4kwQUVLJ+5mrOBRQLNTnqsuWjOaco6hCBnPdHPZa56pX96N5mnpDkZPuoTXyyh+hmewt9jGf3zYV33jTtqsvXT0x1Eg7rqXAZbgwnS5tbxtGboNqz/LqsUozHm5sYV4crl/1O7/9nt/5wK+PT46+4xcnDx/Z983PZ0PN0UEAZv/MYruzZvXVaRrfdecP3vTmh6669uq5Vkdh3O2nUUTIpIBaS2dMdfjS3RfkNxxfPNOqueZoI2lsrFAUH5058KU/ePiiaNuoZBvWb3zL635heNfmtPHE+172e0ml2WrnFdIA3SzjSEeDQSMEqJR1IsIFwApDYF6erhaIpv+uMyUicRQx80C+eLAGBtM7IsISCYwlDjEsf2NMoAMVjevw7aCQMHTFnfeiVMlQAhUETYXBM0UKoLhUKobTAABCZU/o+XuTBAqgImIIn0FGJhghAwIRsQ5ZdcGVXwmZHvwu7wYmp6C1RtRECokcOyiRaGEGRCTOOa0Ulx2t5c4BMxba2FyeP+F4sbJiaoPLICCPqIsRwIr3A4BVkDDlzA5xNI5PPfzY/Xd+6xHfiuoj1vWQtNHa2gwyBSQM3dGEP3jZwfOGMwBA4ctN8r7G8Eff/R+mcdUlT3318o/8Te3MnmELogmhmlqCxFvXa+TcYSBOG8noEKoKdNbV85n2EGqx/exsp6N4BCFR3jtBj8JWovLpsPWRMptPfb113nsxnY98z6uIfC5su/X1vOpSfMH6mjA3JrLqRKVzavWZJ2ZWXyB2sYcVjhoKPBEZgpFYtJZhQy87j9/0/v/TjWvX/vrP/viv/g6vqwNAfk+/Kk0L3EULwEqMZlWPEwST5/n6NevWrds0v5ARKvaQRBXxYThIxaMp9WQCXGnwfElrrRUgIiEQYjmd0SLUWlImhsz162Qksnf96LmZU89csPPqzPnAWkEZoOlAmWXG4aA3KMCAyOzCQY4CXpzSujU923vkie0bLm+dm0l8W9KsxjXWsYrWH97RBzi5+kBf6dacru3+w48oSJyec/2ZyvatppG0TncSlQxp7bJcEQmJDoP3MEFRiogCnplBQCQ4gEqJxbV5jsJRNen1U+c9gxBRlmW9TteQyiUPqocSOkUIxkTBG5RIswtSX8jsENFbF0YmQQFAEcVxTAJF2837cPeLQIUldacsBQKJT2tCRYVsRaAAInrmcEaEKlMKU0kYBCERQS47UQNWRtmbEpEAkaeVUnnP31ch71Xl57tiYCsoAenEufMAQEYTomOnWSlERSqsHhFBpUABK1VUwgLI4lGUUEBeBtoElFiS4DVtbVY0XkqkZbDpcMJCEpALIoIUOg2hmyeolXDh80pl1RvOEe99HMd5nvd6vWazOT+z9MlP/euZU2de+pIXv/xlLx2q6Y/881PKNH/xonhkcmrv3mPjI41uKj3jJnS1l/sRSNN+NdFd9uri2p7bhh79lSPvUXFitAbviCoMngg0kxWxDmo737Pw+F+en9j0gpsXnviiesEfqiWlISIVpb5d1cMCtk8Zo9aK+5Y9+ApYGysHukpgnfRVovoZeKlA3qJTFY5Q+1xJ4+SuxdGvz/LZa7euev1123dsHrGSLHbx1OmeUT1xUb0WpX1OF1HXGx6sjlCh2XvgXKXWqFf1qtUXnjreQoKbX6U67fg7X4zHx4ec7w525eJiUkmaJ0/v0waTKn/6s5+79rqr2XLGvaQae6cAIFbQWsxgurdn7N6J9RXOZO21WxfOHf30b96/NC2L5w5f0V47vqn6/eMn3/OBPxyNG9FWm6wb3jl0bbudRRQJa48dodi63GAEAyowIRECFqwjZBJCED8YJEDpOT8Y9xYLWxUiaPj/Q9Qjoi/3LyEEgFZYiAopVMZChEgcCHISZqsiFBqCGASKwDsRKRxKVChniRAYimga+AUCQdKNmDCM8hAHCGQPQDFFzA5UsSyRQ2YujOGADKdx2KniBfSKBFpKgLdSihEYiwGwEAohIwgAahV0C8MdCIyGIrVfTkQQQAoCQpnHYIkkX7npBn/inHMlJMUJh140lGluOHYsOy1GCCXv/+gjH3967sT82qnY5hqH4wrmPkr7adVkNlXVCv7RNYe21PKaySlpfq3x3s/Ubuksvfe83r07P/6hm6f3n0wXDTsGgljFrBTOZ6l4hHR0e37ZCz1kjYfuGRG3Aer1SA4tRXGfMsye7S9d4SwkemwxXTRh0OY8oTYaBLxnTVSxc1fu+fDeda+YH9+lfcbCzblnhpaOnlr7Aq0TRmJT1Xlb2d788JbVJ37cmri439ysOG8mpJXUtSiCHaP2tRfpM888cOCh5zIj509uuObtb/vB5Z+lA2p8aawLXXA+iSpOWzReoWZIgKWi+abrrt2y9bzjs1mMRrFLu2kUhw4lAwAUAgcQ8qOQMiJiAZQJOs3hVFcEiERKt5Y6R45PV0bW9F3n8VOnH3r4EPf6n/7nt0FSm13oeOEojiR3SmvLXhktIhjqMAABLtVVJSGdeyYBUAQKLftmPTly+KQ9O51POYJM5XEXvFNsqBIpNb32ZG1aNX3lVGdm14f+X3/1hWNRJ7IEkMUbN2Q9AC9WOWQQRVrQr1jBIQDDAL4RiLAAFCR4WJDAqEhr6vf7kY7YCRF1l9qXXnzJ+PhogCFAMM9RUQiWYed7ZqUKfQDvPRCZsl4OyItC2gmWufBO2KBazkUGSb3n4H9RxEUoZjnh+YSzoARqDCrDkPPqEJ5DJQElTCPsq4AiCQHMlfNmIWLhUFIEtmXRoi8D+SBhAgBRgQmJyMJF8gxGBBWhIHsO25ZLrZZiw3tGBtQ4qGcY/ADbWfR3gti4BKzrsiV7iQxEVagxFPdkoJOFSN7bcM20wl0k3OQASo+iyFqrlKrX6/v37/+3D38UYv+mt7zpkot3N8bwjq8+tzetbRrTo8NTszP9WlVZNVttkD4O/aFOimzZqHhe5RXW9ncnvnQwXf1f3RsMiLUWCDVpE1p3hASY2owiU735j/c/fQec3BtBJzn8IK+5HEwOidb9qjHK93PlolRjmndiT058u9dTBDHpLM10HDWxgqhcNQZoJWpVSqIRWPXTpyYA4Fd+Q99avzrt947P9DSKJmtqZPVwgxIN/fmF/PjBsxdfFaUtzl1OUFvqdKemkiy13//h90+dnLnm6u0bN/DnP3F2fLLufM+56uApDw03TIyCabslQ8ND3/jmnZf+2+Xv+aV3nD0naT+lZIlsLBYWFhYPzz1y0bZKNRsb3pl84+Pfv+PvHuSe+Hzx2mtvWXfJ1r/99Ic/9DcfvmDNptOnexCdGPNbfbdqOWXjAA2qRHFktGePAEqAiUiVcPcy3HIYu+EgsiKEVuygsizuOVKBvS/Nfwbxpli3/w3NF5raRYtq+aPCFsBlUa2SRMusSu02KWpEDwAKB4WLACGzIEMQNx+UqkVNWZjeo8+9FyZmUFSwB1EJB0TZisinlqtPKWETgzvDzICiNGnQyz9KJGwHUIi+QE5BWQeHPv9yMA6nEGKARSIisgQzAimFa1Ag9y58ZgjAWuvM2YDUGRwpxS91zEZhZpuj1ZM/efxbd941O1XVnpCsMj73SSpLWoPLVLMa37SmtbmaDpl8ZuiSX5u6DwAuav9zJ3/2za0/fnSouXh2VS1SNl3ICbm75EV14lF93Qvg+lfhpS8YG2t2nvhR/8GHK9HcxfXTAHCkNzQbQaS6c/P6yPGFizevPxdl5JwmUEjM3rEgCiJa4FiUuOlL9/8HHWz04rpyC1bggSv/IOrN5rVJAEBvmUxWmaDuTBolFz725/e+5D9vGV9494snZxdtN5VGnKGYtZP0rY/9OAM/Ob7h+1/+6q2/8e7kNc3sm+3mxMjskcVEx5VE9bJMrBIQhlTXajNnjm3avEEUWgdxTGzZmKgEWyGDrORMki76gkQacfkRe2ZmRxQiKepz062//O6na3xvxEPa8FBTv/sNV1LDnD3V0pVYoYayuoqNZpBBG6TM9MK0g733KEETFYghY44NnD58rOKMn++aWBTpGlYiVuz8Qjrfvmho7Gx6eOng1vf+df2VL9ezs/PnsmQobrUWJtZuTftc8ShOnGblVUZOE7LIIHFzzoXTnJkJCUihALMbLHp03Ov3TKRDVAtz6/GxsdmZmQsv3NXPszRNk6QKngOtzoeeNgXYVAG7MNrYLA3B3uZWStKOZW9IgaJwXmBJZwxJUBCmGYAni8PIMylFZcwOMTJYmYRrZmZxnpE9oAc0Ax4tlFNdEfAsZVYLYXIlIrTMKpMSdkHyvFMscP60Li0QAAjAsgWAKIp4hdIyEoj4Qb4cmnuIofRACiRGQkACCQiS8oZjIVfLwTbO+xC/B6lAsdWFTKm9F65WqQKzAOVNGxypIR7neR66HY1qDQC+c/e3P/vZz16w87y3/+K7psYme10LafvLD843J9dz71RLxjrOekOVOEk7tG407sk8WFORc303rpS7Inri1uYjv3D0fQwVJ1YxcoTIXjIG1h5QNAJnaSoM7colbwOpdp77q2E82VE3TR87R7V4cuOqKKPFvJ0ZrmTgxWWZreU21sKgFzu9ofGhjKmVYxSjhy7Fq+MMDWUGoqUl+Z1XX/o3Pu5sPDv7ZJcE6lFDNImP2EsS6Yee/uld950+8Gzv5hsmv3LT9iXMvXEpwdmzZ5e6x9esGXrisX1XXjMxNjKcJNPrNmenj9dq1aEt29cPVsXR40+JrSql4grUqs1NG5MjR/btf/ZkfbRaqzX7/Wa1lnc7pxfOPLv2mjW2ayvbss997L67/ubRNaOTad0B1p/Z88ShE/saZqjb7i0KS3NRN9Nxe1G1odtIohjYKW+896wZSBc90CDrP7DhVEoIMUSjwQZBAK2ozK7CE0dBZqGyrl1Z+4Z0n0gH2Y6Vf0UlNj7EkpCcFfO254t1hP/pXWkAh+TDBmdkZFYCAApBQMLIlIUVFF6EEK6OWQTCzEiIwnL13rP4SGlRAY0cIN3l3hQQBCqHZYPNOLhCEQmpx2DnlhERADjkLd57sA6MVkp5EYLl0ZIsI28RkdSA71Ww/4vNSOVRg4ha6yiK8nz5kBzcUgYhBvDg0GsFT3/vJ0/4lKNhXGpjpVqtYcoxco65U5Iozq+eaCOgqtbPg/3XpHc/EN/yUPUVx7PP/NOmP5oY7p1YdfDGYw/Vjz6m8hatvUZfcwve9MKhVVtdFFc57y62cx+TY5be1mYE0N1wcnGs1Z2r6MeMevjMyQ1TQ6ri8gxQMEli59gLg5dAxQTrGAAjFdkedJe8gXMTV3BA/rnMpItRupjVJ71ORJn20I72+usZtY5dd8lN1vKuEkAGsLJI//rvHzVVba019ep37vj3/B+yXf4Fz+79KVQNVXW32ybSmrSIeOXytFuN9Stec9tix6EC63MFIOAL40kp+w3locgF4p28L/A0A6wMSiF3LyC6WjNrVq0xMh4xDg2PSZ7u2DqVp4Ziw8yxiZxzTjhS2lpLRITIUrRAQ+0SIrEDV4yBHEdhyOKgdewk4YKRDR3IWaUOkBUqYIXx9JqF4b121ZW3117z8wv7jwwliWkm7YV82+WXVLdtXWrZmlYIynj2WjvOxEJYRmH1rEwYmTlQaJYbuQKMEBkDIq3WvFaxMbVuv4+kp6amnGMQNCaWICnDjFDQHlCR9z6JYuasVGqlwXqlku0Qgx6wdwYpdtEpBVaDAFluvOD0OUhgKeTdAUIlxWGhEERhoQkVZjahKB/ANQGlnMg6WW4xIaKUYgVSNLEJEBkL0vfgOIMVL0YILXceTMTLBhwzMAJqpZQipYKRS2gAsGMQAgVBgd+DGKUGtx0LorBY55x3vqQgSzk7BwBEYMDBoTBoXCtNUOob4IDDzhzAtApxbLR+4sT0Jz/5ycNHDr3q1a985e23gkraS73x4cb+Z/c9MOPWboI0jTYPNboAdz87fbx14qET+fAQJVmdpP+377zw1/7v00vV5AOTX96frfly+3rTz1ysSJAJmV3R0UckQKVUVVTXKz9zanjny9LuwZa/dFPr0Pt/7wLfz373k08tVlY3K1q63X4fwcZa0SJ0X3795U/tPcmilVLAijGJvGWjoe+UEnSxMj0r0dmlyrr+eUeTZ6BGOXrPDFa0z2KtXY+vvuiKG6+UA8c7H/vEffc+cvya89YttZpzra5kenR0pDnWveyCl8TmlK7u+cG30vO37zh84IB15tln5gZP1jldqRgltajaSXvnzkyfcVdc0sv6s0dm643RdWtXf/POz77vvb+5buO6//mP723xGYLKd//9odG165LJ5vQzzzQb4yhgOyQ223LNkU7rwihpaFL1ZGIx62U2TZKE2ApLoiMbBKaBB60OLi2BhYuGHDOHsDqYzsJyHxVWZGbFYGbwQ8ot7gEK1SdYFvaTQko3jgY65FKQ18M4A/D5UlseJOShAICoNJV2TLKcpyJiGGUBYUhhRQQch8FTETUVhrJVSWGkyz74/vnBrysqIQEQLsfhVIhpURFo1YqBGqxIOABQhAr37qCmTkVx78EzS+Cf04C/hEIrJDMH29kNwnyZ/YT7bOKIA7F1BZcJWEgryZ2uaO707nnske7YSC1HRRzHdeTESeZ9DC7auFmmJl0EbQe8r3rDi/Kvv+3Ye99hF4+MrvufamZz/9HZsdsOv/w1e1UMAEMzR9YOx2u2rGm0Z1QPGn4+tTM9bGIUS6wrqR0fUpmFDe0zqzfXH5jOHsuWTi6pR6Znb9o+6RLvM8fMpBWVjQ0EAEUVItRKDMYpZeJzjDBIXYNUXJcUOrEeYg+o0Bxce6v2C188Mp/a/LYLxqZGGu18adNE4zMf/9ixYydGR6ayfjfGOH8RAGRDx9bf8oErH/r4p3oz57A2zGC8WIxUHaOZ1txtt7zssqtvPHB6KYmrWd5HVMwu8MJ5hZt1CLYFFl2W5ymB0S4rngsAaKMVuzxPq9WGcoszt730wmuv23TiRKpiFZPO+ykRRUmcOSvCpWA5kwymyBQE5zDS4BCKrimSRgaoGCO66pp5nvbqPGSMoMqdRJXh2tyaU+ueWrvuA3/Xa814M3yme3a0siVbenZiZCiPSYG0IQVdqYnui8SCmbOFLzcXqi4IaIVJwK7oQdFAZwQR2API6Oholro89/Vm83Of/+LNr7iNNBGRMlo8O+EiNxGx3iEXgAWNJCgBAxwQazqOEJGl+MeV85UVLQEImo5Qas0P3sNKPIjzToEqH0A48kHEI0sIrxiSXBDPrLUOirclgKJIAix7Cc2NUDIAIBWqGlKC1JYj2Yp5asgSvDAiDq4wfLghhcWwDaH0Cl15DjIhhVG/CHpkZCNQpgTBkVdCp5DFO2+VVoUQPLNjpjLPgODmG44QwBV9cmTxxKjK7KHoEwACQBzH99//yL9+9F+SJPnlX/7lSy65JPPO9yygalTh0989i9VVztpurr/61Pxzp1ufu3dh9fjwZHOK1Fw7kfnTnYqe+sNfiT/9r5+4ufbYe07/ntF1SVhl4k1HMbInRlFEhtAyi9F9oxz0arXk3KIb2fzmP33rhre+dXdGonx88yWTL/ngvUemJybrta50CebbYAxE37j7KYqgUktmuh7QVSs2d8YjqKpC7y23YxlZNdL5wg/273rHtlOj+1NLlahaQQvGUdLIbO4hn+1wtRVvW1/5i9+9ffbcdLYt5wTrpjY2Hs/9JN5xwQ4anlk4N9FaGKqOwPTZ9Pxd1Yfv6zSHaoPntXbt2sm1tto8t3ZDY9OW6rfvkvvvfe5Nb5R6bWxu5thC66Fjh2fSfnby7MzXvnjHTbdf86PPPhvR6HnrLxufaq4Z2XJw78OLrbSB3Y0vnhy9XLcef4ZWmbH6RkJyFuKoKplnBMeWLAspQY/FhlxW/wZBJyDsJZgSBcqZ50BRHRxTEGi4hZtW2FDLUXmQ2gIEJVReqQblvS8WklZhOxtjvPfAg4CqgqxVaBRprZm4UJPm4t8liOaXHSYM5oQQmMOh3HQrN7KI2NC7BiSFipR3hS7sYOsN3gkQuA5QZgYFfjm8IXgGS7kBg0oDlN4PiMTgBzvUOadMWQ+U/psEWNgdP0+Pvfj28gaGoVHA6LL3nsoe9eBqC+cV1IgSRdHJfUfuPXSgaip51qvEEcTk8o6mqlAvx3ius/jCF9f8KepCbSMfPKx2bjb7IRLy+xoob/YfPoQH/33xzZd/+4cbsHNk4qLZq19z8CTnMgEAQ9gYw5GxWI8lZqQ2NpodGhvOzy6qE2OVyrUbs4dm4wOtNq06eDzdMC6jwwY9Z87GsUGtFLP3jhShSCk3T4xic2/mj/r1QrbndaVTGVc+86ZOwgZc4ttnRy8He6wJ/p5jh7TK3rZ7q4HMZfyFL30Cpdpj50k5Ar6+j/vpgb/59K4bb7z5t371wMOP7bvrnpRTYV3t+8Woj9698fVvyQCElet2K7VEPHqfB54RkPIhwyozHo1GCktsUIEvioOUrGg3AoDu9PyQS5q0KDJaiWove9numQUVI9vQ5iVSWmfeIZFOtHbCxRG9fDSHUQR5DvJoihQzeEKvYGzNBFuswaSzx7t2LgMzDLEXzmp6aZ1s3/K6Xlu5NDfaGqg1FB08c+KGW3fnOassr5H0QUBjzIrTNmkTVmpYuKG5FGvj2SkoNqFzxZyGiKx3SMgsSsQYg4py56fWren2eohAuihTkySBpYImEcJZFEXW5rGKStw/DYJ6GMaEs2OAUQpSFYNiN4xboKxEi6wfMHyj+OLuM7MBo7RmBCm+HZRShAGdBADgg/cpLmf9EP5WWBAYi5Ml7ECAokMwCL2DlAAgzOqxUP0Lp1gQ9IFCwCio4oFIKKYH8T7IWBKiE1ZBGMKxiCALAhqkAJAREfEMCIo0lvNuo3HAREatAjzNQ5HBBLonaVUi8tl7zx7KOXHxiqKk3+9++ctf/ta3vrXrgp3vfOc7x8ZGlpZaGmIFgkbNTh//3qF4dGw89UvVav3/fvX0ptHsjg/s/MJ3j33zWTs6VGfpgG786JnDr7h049deeO+hmQ2fOLrT1BdAUaVOZEl7hWK6kgNxrLXNMAcTI4xH8cLp9Io105/725eOTm48PtNSjA781Pjmn/5j7RXv/fyzixsqtV6KtaQf6cSNVOoZpRn6WCoOrUdnAKtQSYUy0xt24/M6rziD1fHpx0Zmbvvuhokq98VmkAp64arBkajCcZNhYX4GUtVSYk7P51PDkeT89rfd/uBDj9z59R814njbeeefPjI0urojurVuYuT0hr5IfzkAb98/MhYvzft9T50YHW288XWvWJy+AUlMhDu27/qTP/nHz/7nV8bGx/u9/DuffPKHX3q43cJmvVYfjXpRtPP8y5pT6+696zMLrezXfv6WhgzJtuPe2BF7ixCKjYAcKMoAQGv2pAZ6igKIqJEUBoGJMPElAA5mHiXQBxwVtWxxehSgCAzDHR+CXjmw1FoDaGY3yHCL5c0SR5GU415ewZVHVLCcEAtyAaQI2i9BtHXQKA5OHggoAhyIxgBQ9g8BAFGt8HIFREUK2XnPToAUkrWWmY0xoGhg51VUOaHFXP7SMvQWbYDcu0HraEW8JE9Q8kRLvLSIcy6w5wnJi4grQBVIwP55A+agxYFKmwLLtox6K0hT3gfJLY2l7IlSWmtmQIJaVX3xJ49ML3RGGkmVFFSqFtBwDZiBQEXR0WONB+9p73zLmiNzu290n5jG8ZrxAGiV9iQWa3e4N8/Xts1vOv3SO/9u2/wn23u/Pvn7f3XMDJ/Oo/nFpRkz/lS/mZlR+IMf1lund7jHp3qPnVu7d+upoxsrC1ubuMf50+3Fxw/l1+7enBA4xwYBmQkAnNdGxZ4ycMTgnSz1+1nfxfPP6k0nXWMNYeSTBiiD4ivZgiGqddowwU7S3LUorn/niF1fnb39xlX7n3v6iaeOXHLJ1sf3HmxGNcn6vZv8+iMbe/Xu/vsePvHUvktufemrPvj+E0893T15stvt5g5v2rXz+utuml9gQ3Gi0DqnKDFKY6lqHAQ4PLMq0qbA/ggMNB+m/oiAWqGiILsvIlppAMqHh3zN6ze8ZseGtcNnTvaSKiqvGAEUevEoxY7xhAzBgQ6wrKMFhQWhwEYZBOXFK2bJuL7zAjFRmp5Nosain4trzTQTSdoLekYUrB69MT02HderHp0ovdjvbxgZHt9xUW+hL7HqojZEIMS+j0kFQraLmIdkUGtAYhYmEg1KKxEZiJGEeECAyMQKc5cpVem0W/32UiCyO+eSJLLMzuVh2wTPAGRhcMDixIXJqFKkSC1TaFwRb9I0LQSfnQv3wZBi7zPnoyiKTCETT0guwClRitrds0ZiLMbYUIRADDuUkX2g5hHCwHpICgloRPQKlaiydQYSRHxETJis8QCVVmQDIboiQgBX+1KyRweg2eBQIwIvSGQAPEPY3lSk5QKKjBeHwfQNCEALeiQBYLZUdJ5DGibeM6ESAO8CO1nQKELFzjvnUCsVmXDUsnfhsSqtGQkEdWzEee+cACBhUq105uf+5V/+5cCBA29885te+tJbQLDbyeKo7kEW8s628bHvfGvuMMimarvRs/MdvnJL/bdfVDOsd22Lvv5UB53rOzXUUP/03dlHv3/Xdy/+8dIL/vPPLtyVtdv/9JVzbqRiEsasB5hQFFEvMzW9qmpPzSo1lp893X/7JdHf/fmrumrqyMlerWZy9BWgM2lrJBn5+j+84bqf/8y07AY+oQQhp3m1BEZVsMnQRk1aJeIwxTxGU8PhHNMqIGiTRHpuz5be6zq/f/fX5PSqmZY723HU5loz3jSkkgZetHnkxiuHN1TWnumm02daqybXzM9n69dteN9v/+wXvjC9eDaePpEawuPP0tTq9ds2+be/u7nYXhgE4LPTs2vG1l510XjUsPuOyOPP1l98/QX3fPfrm7adv5T3b3nNax564CcHDpxpjA3VSXf7SWPCpd3Ouen5DZsai/PTazevGWmMX/jqkStft+XImfYUrd8gL63j9sx3SLEggoABAB9Ad8B+uf7zII69FOJKroiOwgLAVEh5F5ZEUACFBrriTpiCBEZo6SiNQemCrQqDWS6Ic4QUzLziKCoDpiCitRYRxC9H6xCXBmcCEgU0V+jeiRfvWQuAAtJKGNgPWHbIgdzIwgwoAIo8s3cZloKOAhDk5AJIgssB0wDWFERCPIjWOlwGO0+kQuVtFYYhcTEOLIcyIp4INSIFSSzmILZFtjD0BGApkW4gEMZ9VHYOgAUQWWy4vQX+B0S8C1rFRjRoct4SobM+czZJEmY2wD6qdHN44MH9WlAj51HU1IkHv+ilqslwYiC94FKd97JnjwzfMjY/nU2y6s54s9SmM9WIa9nXKj87T2sm8rkntp134cbrrlw4tLjvsaU9j6266cVjC3NaH1rwU23Q4N3c5z7Xnljjr7n43lW/3H3PxF0AJpurnHx67ZHHu+cOtlqH9p/rbRuNgLwBrGkjrHKCCrM1xjoC0E44z5mti4zavucT+658H2YS2SUhg5yBSSZPPzGbbBpeOHq2mUsaDedM+fRnDnZvuHDk1HP7uJt3F1sV5SPC3iT7853/d1jsLY2MjWZZ/9HPfQNGo/MvvmzkovM3Nkbnzx69bmRqYt3Gs9NpNcIuWe2NiM/YgmWtdRCvV0RaB9myYgLDVrwXYwwG304BdIyOEQE4BC7AoUYyPrpp3wMPb1q3y2VaNFgXKVUSXQS5MAAJkxUsjRwAih7LMj6eg9MOkbXc66UTE1PZeENYvDQS11NCfV6qqcb8Fg/Qqp8bS+Jh4H5XeCyitH1u1XqiKGLvSBAQvffBsY6xWKZFQcmsEBgLRVMpB0JKKfFMivI8FxFR5FlirfPcpWm6ds0655y1WbUKM3PWKksEkY7CQYIQqKilH2fYooGWGA6O0NrlMPSCSqUCAIPaV8JcFovm7QA/DCUamABQEYaNGaZNzllri2avIhDx5Rh1cJiWeXe5jaAQOhAqQBY4UJoFQKDgJLychYhAcEMPNyfgm7h4DxFJWTqUTcRiG/83JCry8mWQKvQqPYvzPgoKfERYXqYQalIBExj61l7Esw9cqUJpHtCz5xJV7pwzSayUcs4FrpcxRmn99FNP33nHHXmefvAPPnT++ed1Ov1etx+G96nCOo2wze76yZGJoYuqHddXUUb9ONGVNWsfeezAnn2gVN2jr+qu62NzHP732NceX1z/b49P3Xz1lniTbd6598BpVK5br9eNtpHVUK3Mp5nXNDKsZs/1f+a85B//8paT3arz00O1ocwqMhkLV6GysNgbW7Xmnz5ww+ve/1Bt5wU2PSGVRsVrFdsOz7M3dd/wYrlSNdzzgUJBpTaYOHNiIwDsiU/6Y2tHKv7idcO7tybH56fv3Q9Jqp45dvLbj81tHD70jtsv3lRVactGUZr1KpLXxyZzUmd0NT9xJLOu533tmQNNPtjod5eHIMqveqo1t9g+o2V9u90eH7vnR/fcf/ZsZ2H6zHVXvOzm11x57S3XOHz86NF+hucinDBWC3C3k3Zz5aPe6JnOjpdd8Z4Pbp6Zxonkwp3+5cauytUsYlIWuyvWDECsjZQeWUUjRIAgjIqCF0wxZQwMBSmQiGE7L1OSAMCDKEAk0lQ0S7z3VK7VFbMeBih4wFJ0boEgqGuJLxd/WEiyAoqBEmrbsqs0oOSKDEwGFRaAwYJ5WLTWQ9guOsBBP64YeJf9XjKF/eLK5rn3XgEqAil5eqF8LTZmIaU+2N+hP18g03wY5ykKgpHOW2RQwdEkIMFDXyFcgC90ESB06gGstSs9Hz2ElJ08hJEBQUG+JxHx7IgRIjp7ZvHgwf1Ki7PeVCpEus8qQuhBrxJpn4m19mfftuooXH/j3J9/aWZy/URlbQXPTfufnNUnNql74rVjkUTqsZ21/MyObW7P2Ig9u/CTB+mSF83PHsL24pleMrFxfOzMMf3TT13I/TdszX78SPymvZsv23zF1I3XnKudP3zlW4aHVgPAbLrUaR0c6xxe7U/3e4ebS6d1TDbzGDlASdNet9WbPzeXdfpEWkWzF7X/dGHnq86M7wQdV6W7RtVr861nr7v0xif/c+GiCzKt2iIX1J+YOTL/+Mmpb3zpS0t23rXqgo0e9OGFAgAnPj87bJo5giUV10i6dv+9P546tP7E0UPvXbf52vff1kdIEzMknIgG1MJIpAZA1AEICRgDzhwLh4wipnhhAci9y71DVIjA6DV735o59cMHj33ot9542RWXnzq3qKMKsRbJy0O8zBhRBCT4bBf7QHiAvZMScIGKwtc750ZHR+q7NvW+/0NpbFL1WLl+3VR7nbNzV61FOTs523SSegUVH1dGhpZmz60+b8oCeAYAUIgeiMETkRAqQyLC1jEV6R54xwCoNTMH1nNYy0UwNoWYYu4sg0RR0u12u50+KcxSQAIiYOdT56XMVZVSRKo4EXTAdgWqTJCXAIUEZX2Z91NjDKllIdyw5UxAzSGJeBZW4daxt+w1KaICZFI+KglTIkQULlBghT3ZinFXOEb+W0iWEq4SpDq4JAKF000G/OzQu4DlV4EIkEJDIPzecFIQUe5duKTBPK/4QGYIYkY8oCWzKk0/igOlBE8hosZCSgxBOe8ABLUOLevyNGZrbYC1e4A8z6uNOjMrUrVazVp75513/vin9+++4KLbX/fakZHm3FzLCVdqVQBi56tso2p15tTcN08PNdc1++lR44dGI/WTp6Yf2REfnc5MTaWV9ryuJb26S/rXyHPXVx7/8rq/f80VF56bbjuDH/rZ6w6cbW9bO/HJbz7ykwPVqALsqIZJijC9aK+uLP3t3738bDsG6VR8PReHxC7LUzQ1ZSu6OnN6/qYbL3vfW4//zVfPjK0e7XZmKB5CrMQuXbsuWZjtOG8w73KkGEDKDakQAcQsjWG7PnXp0bcMv/rybVNDTT2ho1NnVz/27BPxRBPj+myezsylD/31j37u1ov+6Fc3zi/ZVmt+5wW7Vq/69GLrrIqWFuZn77zzjtNnz60e39zt92b7BwbPN0kWx0fXL6X99VtaV+4eOnb6RNx61/iaC/7pw3/xiY985ET7zOl6c9Nrh3on9elnjz30vb3dmSyddeIyTbRqzUQ12bjD0cG5k+ePvvIS9YaOZBTPRrbpyZYYu+XVJCJ5nhutVVhCRJExAMDes/dIREAYsMEsIY0bzIBhoLc8MMFEHLCBwzsGvdnSVwaKEhWAVggSqCKphWVHIBzUBcV1EqAKKhxhV2AITygiyx1nAFCFhV9gR3BxMYWBAQeebkHMCyJzZZ7qGUp9hXDgFmHYutBNKsiECKgIAQhK3r8spwgSxEAGUFMK14heRARDbwxLXGf4NG9dcQ89F4EWIHSVB9M6GTAkRYLafEC9KKVMUJeDQJVUp08unTp1Ik6U9ypJKiKIXcvKGSBmjZHd+4z72hC9+QU9S1F/W31fPZ422f/8jDunTG/y2Pz3v5+8aOvEMG/Kn9i//uLZjdclB79CJ/eoxVPS6k5D59wcrr547dL+bp1BGR3X+vGiGp9fPL74g6umv7HxupFzT/UfWhid23hDvGZnY80uXnfD6foqANCuP9w7Otw6OLp0tN46oNqHgGWk0UxV1F7qtlvt2bknm4ePX7ZlvLH5QnXRS55af/bASF3ZpaGDP6rsOI8h82ktl/Gda5781j3//GhLR0kz9W2tQZDc9Yz7VbxocoC81Rlpjlhc6iFUK7WTs9NXTax6ydrVwy+8ppNaBt0zgF5pIctSAeUUEFFgp9PySB4ECYPuU5k4sngQQVKo9GALaCCp6OidP//iX/7lVx455Shh5dlol7sB56wgdGKxFNxgrayo1RgIvTAQBcsxRQqQUGD7bW9/6vsPbuLT7FYvVnuVLCGrOtvceH81g1QSFhd7hLksH0q71fW7e/2MQ+7MiM+DCIJG8hqwpPE49goQmY0xzjmFBCyhpvTeB9eEOI6VUh4ZAJZarSRJsqwfKGXhzcVCB2EebG0QBSWaDITK8W3YVOV+DkscoMAKFXeKyAcJ3JBiB4koAUSy3jKgLtq2BYaZiCwzlpoeHEYdUKAqBmcCIq7Aai6DO0K2JQIoGLpzg+MGIBTLK1TyB9YohSateO9DkaG1Zi6m3SGPFgpkyBUDKgJEUIJhzgEAqCh0s5e/cVB2D3A0ZXZPJfE3zJ98IDrZIDeNAJBUKuFKGrXqmdNn//M/PgUEP/PWt11x+WVZ5loLnSipVJSy1gJ4UNQXV0/0T/aemBobWkynq6qZs89s+69+fe2YH7/t+g0PPXXkKw+cmxirudhrpz84+m8zyYUTV/zCvoUDf/8fe268cfNLN63esnnkqq1rvvDdZ8SnpIyBjuNhImwsHP4/H3lhj2tZ3q7qob7psaio19exGFSeMgRSSh07hR989w3f+f6nnupfMBTVslxZx8bU5ha5L1AbNYRx2u5TwcMZTPcRkeToxuS802/edv6+g9htzZ/L7Njaqe0b4KmZfhLlFV3NuFJZLx/73H237Iivf8nOxW5aH4kbY/H2aFXak3pdvfzWN5w80ca0O75p1ee+9oXBamGYefLBn9xywUW9/knm3v79EwvnDuvjh7dee9Hb/sfP/fzPvg0aYxs2bD626tnd2zZd8+ZL56YXZo4feu4nebQQPfPTJw8f/Pw1t1719qnf3GCuWbIzTKOc13rSNhgFhOCg7VTUYQHBAxAMZMoIAVSiJUKLbFCfIXPRcSr/ZXmBAQmhrCC8CgIozSIDjYsQ4gbLDFf+fwAIoFbgB1euTOQikZZBbEYggYC7JizoBiJCxYXJQExnAKEypIJCVrEr9TJYIexiAnBlF634gQP4oRdBwAGEZjlUL19q6L2FxpQs48AFWUghoHh2hEoppVEFEwUoU38iFCoaC8VeQy5TkyKQI6JTYVeqQfnu2UNwAbYwfbabpb2oASiRAGS5q2m/aJQS4D7rCCqjdCh+5au7f316fGztmrinjE3T0WZncv3k0w1oPfVYDb917hUv7QItVUeODY1tE7CyQBwPn3d+Ldez7ZOjGme54tAMjacAoJfMOu9/jHj4BOw4I8O7R2/a33n8qa88d192JIm2bF7zwu07krUXt4c2t6ubzo5feXDDqwGAXNZoHRppHRprHRmb3jd5Zr9Nl0ylMnr+rsM37rj3uv+70Di1+rtf5+SOT/+fr0bHV9OxV+vYHutla8YFlvarSRPXzHCOXa6lboFvSPX9kWZrqnEtGp7vdOuxjq1XXWvrdMv6NWbHFrtjU3YurWAlQ08Q6NssJfIGRXx4XoXYgwSTLkFQFFD6BWEplGGDBayVJha45OLzbO6d91FUU8zWZgIDT03G0lJ4EA/Ca5DMrogWJZGNGQnS1F/+0tvO/HHjxEf/58TJE7E0OCOV6JmhmcmlTQqrzuXdtDPSaEI2X63Z+tDIXKtvlEYurjiQBxSg815K+sry1hroL0IB2UfELMuUUiQQxzEACIsCZOerSWW02Zifn9e6MA8J8cYXQ1yw7MsoGMZUEDqr8rwCEgBAKWVMxMyBxoqIoZUqCCVuq9hooV4UkWDvHHrpzztBwmd7BhEFy/26QX6DJaUvvF+VSW6xM4WYhQAZhEq4R2GtGvLy4qwrPsrQslUqAAzC9uA5Ds4+WAaPSDiVggp+me6VFCwBRBqQH7BsYodTA9VyK9t777Jcl0Tkol0WKg0RYwwzg4LDh4/++8c/Pjo88q5feNfQyHC3mwIAauWcC/cwcBm1d8MVufuJ3nw+IY2UUXSujNZ+rmIarSvf/PnP/J9Xrm6qOYmHuvOXNB68pvrMcxd9cebk8V428ku3X7B+bKQ+FZ18YuEPfvDEdw/2q9WqJxTXqFXM7PFTv/3GrZfs3nHkcHe4TrnpiNcmz3P0GiqiM8o0p+2oOdrP+zS89nd++dJ3/PER2LaGwaJGpUzWt0pVXJ8jRSBBGUwLIIsAU1gG+tTWY7seP/hsl8j5Siw2T1x6zebVB4/PxNXhRoPyvu1JrFdt+tr9z7z69Rce2L/wvW/tAcwf+MljP/P2N509e3pm7uRb3vTW+kT1f/3ah+7+wh2DxXnxxbvvf/jJY5d4pWVmpmtpwdKJi+jCDTL1o6OPfOyv/2L1lmvf9DO333jxTY8fvKcbd3pLrcsuu8KfOf4vn/jXZtN84P2/8bZf+R/ox6fTVkMPR1neMb7KNQc2PNNBEqGRmICMyfNcnAdEz5w7KyCklVAxLWFmAWCQlbUvlB5EgwwPRIikSFrLF0kg2/CgBi33lBRwqpJHu/LTBq/BwobBYCX0ocvgh6Wec/jwghAPQAKklk9JKPdFeQ04wFsNziIdZHCYizeUGyqwVwcbsxSuAS5J/IPsofDQJhSBMGkORkahkA0t/XDCqzAvQ/LskXTAmgHi8vyLMOgGltezAhetgjMBYeA8AoOgVlEv8wLQWcpy2zfi4ygOStqLyF6kzzAkABnaBr7pitpoOnMfbEmiBBazej3Slf54jZMIsmix9eQPzNRWe/FVqemfaybXQcU7Hxk08Tqfzl6+baNvJmk+P+RsrUYA0Omq8xXdn0Q/hUjum992PQ1ti146kmzr5g8/tbA029pHh1ctnFu/ZvhiE/Vz33XJYmPDYnPLUmPzzMTFR7e+EpDQ22b7xIg9c6771AObv9K1z5r2LtPbenbnr0ClN3P1F4bi2sjeW2zevWD1BSf0c/tPdus7ru888qRSvebmZGHXEv6N7rNbPdIEocV2l7zKIuqA28y0a26x9qKbG1nUchlGkuQeNAp4Ek6VxL4gc+KKgx0xMMBDKlliiYxBCvr7y8aaWqOqjta++u0nrrx0e11X08ywsqS1eLt8TENwbCUQUMFH4L8VW1BMSYt1ppT3HpwQ43xvZmjjBav+4hvHPvUn8J3PDEcx6MbcmkM75i7nupH5bGK0JqNDrbu+s/2GCxko/DmzAxbUBVoSPCNiIZkRuL8gSlRIQAhwaWmpUqnEwaRPqSzLKlE8OLLzPI+j+uzszImTx3ZsP6/fzxERUcVaBzEKhMLbhEp7cJHQQiDy4pwPg20JvTWE0KpdGXLCf4pnrVRxEARhFIAw6AJVdF+FSCh4bYMHCRFxJQojtIzd4PgYwDgRAEAHq08s4mqRWBXfWJwpRQ0iKBjK4MKNNcTRQYgVKVpcUBbZ3nthDlO6cC4s8ypIDXReKEgaSbG9EVEFZd3y0ATPqIhKVPbgRhGRNsZaCyhaKV1gVQpJbUT0uX3sscd27959662vEJHFxUX2EkVR0A3s9/tElGWZiCBMzpyafezkUj48PMzDKKnSec8N/c4nDv7Hn13ymT+/vSo9rzVJ3o3k9xqff6i7/b7FHZfuOi8+PW84XlL4n3c+8vkvnHWVeGzVkEaZ93m1GXV7fP1G/YFfvX76aKdZr6XpXK8DtaSi2OYUZ56rHlmDr1cqJBLZk6dat9xy7fVfOL53Tobr2Heptyauk2PJrHfcN2EOB6E7qhCVCHvH1RNbll7yjSPfXbh666qTrX5UtT1buXjbVH7PsbQDM9NZ7OqmUpGh9MFT/G8f+atv/+i7p87uXb922/Dw6FfveibPWwcO7Hv8yf/IsK6PPPvGS5d7qLV8zxWXDaM/MUwjM1l72BHx9siM3PXt7184tbbRrZx36fB4deriTTuu2nndmaOt9WvGnnnm6X+985eB+n//z//ytre8/egx9rgQa3H5gpGRiJc81osIUYLkwxIK6aBSCrVGDghbEQHrC5tLAPBFR0ScMPHzDo3BSyGCYAE1+e+6UYWFH5UF6SCgLodzKGyCwpIeLLaVZ9TyBy4rBgICOOdCtyaMukI1aZm1iPz/SMkIwCJa6zAqduyppNjyypZ3gE0oJCIGkNLSWAiRl/vtxQ8p7UYEMYDJizIclQ6ymFC8j0rt2wIjCQCFsXIJsnXAgMwcHOGL5AaRCAeaXyKMpANzCjE05QnB9HNOYgAA763RVRMby4wgaCJyUANhYuuy2o6fe4v79++dGr9jb/ONm1sEkenqG6+qR5vzB85xBBWdzy888NXG1OpsYtWsanSw5tHnNo2oYydM/+Ri+ys/Sr5zR8X06w1hhm5HdqloLclRUBkm1ad6q+bS2praTnC1anTfTOeZbrb3XO1ih43NU9osmSyrnX24evrB9aLYSSbxwvDW1shWWXfBwsh5RyeuHtr7K010PlqwybmZ8+4hQN3n+Yv+a/WBm05Mzzz+1MPrN0STa6fq237hafW12fs/Vt2NAEA/iQnl2PFT9chIpNKeq1eTVNLrolE1OTpx80ttx2mtbSKUowG0KApRswQsrZAKiIdBaC3Udj3bokGjB899JV1NW2s9e6UwiqJex5mYNJhcclR68GED5RpEBBYqi8KwIIsCdMXkI2THHjwRNaTeb+/VjZHt7/uHEzt3H//kH27uSWuzxA/AqFadhFLJqt3W/LEDjV9+Q97zZULKQbADhVBAWEARFe0iQCIRJq1QwFobaVOr1XSpZgcAlUqFmS17l3PwVbbeMfgXv+RFa9asKfzRPINSGgmJYIWgVbGfmQl1MK4omvgsDEHCTpiXgRiBh4BlP82KKyc1AEH8U7FCtOxXHhmDnjMTEJH4gsVPRJY9UgA3h6IJZEU1vLy9CRFAAnRFAFFKUZSybwbLZO1BOytIYoVyU0QCr3tASgvcD0IaHGdcSPyIIIQEyEMJiodgBVc88ZDdh4VCRIV1UTFMI0FRSCqKsNAYKcQyMZy8wcJdayG8/PLLkzheWlpyzpk4AgClSalo4MoQTsPGkDvyTOtwe3j1eLOdtkfELDlIKi5ateovPvvor113/lcffO7QGVy/3l0TPXRZ5enXn/nrOz919Irx/a971dap2ojN3fH9rZffsvX6LaO//42nKyYalSQVXZ07+ZF/vnkmi3JtG4Z9q1ozzoGkHmISInGOSGUKY23T79//2GLn6GWXXf6uV61/90fnJyujACk1qW9VzHHVpD2ACLUABtAcAQp7YCCQ5PTGJc3fPnf/Tee/woMhqPouDY/SSy6fGGqOuu5CZzE/OZ09fe7sYbP66VnZuePiay6/rdddJOWXFjNTf+zGG15t00rHp+PbXwbV7w62dK/yWIwLis3ek63ZmbypR0aGj1Hcwmu6pzo4gkPS7J587MmZxZnRVZM3v/jKb9z1+Xf+3LvSdu+Vt77ytte8fv+RBQ9SUYpyJcpz1Cff9OKEHWChN75yp3OJZ2aQEMxwhR4WlABdFcQRCQeEdShbo8WSXuEgAuWYRkRggF0OSAfEAFCwwgBCXExbkMsLgxV6jWX0DaI0xXm2opYNSuyDNw/+kIhAipp4sO+E2Xnvy/4NM4v3XhWytSTLf17cH8+lq+DyznXCqvRoCqmGrMhImFlLKealqGgHCIhnUIRKQRgSB7+WFT+QiACLoVIY6A4yA1kWgmAIxgDB+TP0TQVBVC/N+rmYCHSMpBWZJJCGtXDkLKpR7rdyUlSr3Hjtlgv6j37w8c1nUHXOpc1J0+r3rr82mReRM7Jhe3vIbXz8ibPTzz5df91OjI0Fi73c61p/uJ4/sX/2Hz7QPPTjCd0EXdfNhbQDXTKbQF+e9040kwUauruzeOnxZP2+RUnpAYT2+KYXXf/i6c7i4X37h/ziJZuTTsQ5K069sS7yoF1nYnrPxMK+eufB0bT3T3/0gOBO132lWdp17EXvjLve1lSeLBGo/uqnZh9oHRyKzqMLmwr69eT81/2y7SwsXftFPKDcSYlZ6SRm8kO6MjRVO3z6xJr1ky+0dXXBhcOj49NzrqJMjsAagcEJI5IGZVWBepMVqANEpBUChaoAuoMM2opl3qi11q5NP/PrV8dRhY1DaLOLvQYMXnzFSwZDhKIzU+AdGGAAFCyMa8IYwgSxYsd7T57RNKxJ+RP7d73yjRaHTn3s/b1xnpweooY3MVfqo+np/evO39TYPDVzYikxSWF4opUoDHYfzntxXiulUIcGVNlQImQfalxQOkx/EdFaq4wmrYAFFeU+B6pEsf7SF7947Q0vfNcv/GK7Gyx7ndZ6IG9ZhKhQp0pxjHCemSiCcqMaY6hUywvHjda6aJ+GNR3uRjDHCC1tBChHBYVnS4mdBgAvjIIexAtrKtpoqKj4hEHFSRhGaqpEiBBgIB2VZyEACnFx/MiKXwQCYeS6PAYeWBxisUuVUrjiT1YWu6Ez5pgJsDCGA/EioQMoskIvesUIzcpgEkZEhZ5fOLC5hImCImHw7J3nWGsWYZaxsTHrcmCJkjhM90UE2A9OEyLK87yu4keOnYuixPhckW6hrUqsfWrzru7A1Prkf134skZ07+eesX+48T8f6l9w//xVl25NX3PhqsmhVTH0vNcf/OVbmlXcc7QDve7Q8LhtyuKh05/9gyvGzt9y8sSRanNtu5vaIR8zxT7GqCrc5pQ7edY6OdNsjGRjzcs2n39iSS/a6RuuXLP9q6c4tpEf73VcpZqmyIhxg7Ul58UHEykNyN4CACGoo1MA8Gz92QX36ooSIsyRnYvef/tl/V4UJZkz1OvbvN365N2Hzh6p8/BSZ9GePHVs1wXbd+y65PCRptjmzOzZETPco5kjz2WDgCdufNXUxPTc8ZFRJrTH5s9lkT999LS1eOH22r5nn/TJU2v0N37w02Trhvfdf+83/vav/qReH9924YYvfPrr8x3fJNWXfle4qox33pMYyEAESyNwEZFSpo2IkBA8M7LR2jsXVpEqFJ9CcBQIRnvMSBQ2adgCuhxDeO8pJKCIEHJTKXyx0JMQKR3QEQXcF4SsYgpi9MA6eHAhDawIBoE8uCaEs9GHJACLIjOs2HA9xWb03heWpDAQ9PBlfGTvw4kRPnyQQYaZ6oBNNIjB4XcVNAii4CfBIKiISIktqoUV07twzVIMxTyxwsIhBoFIcxh5r2B/BKJgocgVWlxQKM0tZzArIDskQWSXvBckj4XGgHJZLmAcgzaYJBEIsTgMcqIqcb6VGtTW1S647a3yn0fatbv3yvDIqU5ng2r0f/rI4oXb61B3yuDF58VHn+0IMh25b+He6vz+RyVOhpby3rf/o/2JaffIPePp6SGY8rWMfdoYVr0liVPIY3edw/19OBgnWV3/kLDuat0o3XzRtTftuunIsWePPPlTFPuj46e0n9y8fk1EOkVgyRlBtHhtVYXifn9kZhaibj9+bvzZT/r4VNxf6I6CskDkSKSreuSWNoxODq2tLB3qS+qpgete9Ka9L7lD/8hFUUSUVlT1XHexkeNC1ieQTXFlSHjo1pdBzIkjrBC5vGIhZfYoBnwvNhXSgsAMUpJHBmspDOYVqMHYDgBC8zKEGgDQjVplok5EzTZ7ghy4ZpXV2oiItVYXRHLAwugAwiAWVcADi/dMmth7B6gICZDFqkgyD2BiSmA0bhykc9LOVg2t91l/9SUX/WBXBADmwbOt62alrqJ5222OTtz89tZ0t+rrbHIWz8DEopURZgTSSGGS5lAYUTwTELAAeKN01utXk8QzexAwKs9yEmAGQgDS3jpmZkv9XgaYrN+8NfdFRHTOOZejojCjEgQGIYUAgB58nhEiaOWD6wShZwZSwICoSARLMeqBinKBhJSC5igk4DnQf0mIkMBzgfwMXWIotMY1kUkKOgcRERZxlAt3NBHPAEACbmC4DUV3G0L1QBRYjMFZBQCIQQkQKQEwJgYgn2eImNSqxc4XABEgjKKImT1AFCUULIPDKgkC9KQYwRR0BQsQ0CsB1yeEwsIipaYVgtLKChYHpSJkwYA+EfHCGskYI8zsmBhABJg1gmC4QSAIBs0gQhvBDHLFROI8InhCsWiqrN1P96VpMtVjARRjc4WEEC30Ft5y+3nfvuvJH+/pvv+9l/UO/OuF8b6fPfW3s2n/Fy5Ze+MNq/c9Mz+9mI2urT/01LHmaPw3n3ukEo9jAu3Trf/xmuGXveLiZw/1k2hY+j1CqIoBBJ0g2Nwy5eAJfGSSpFrB1GWxbFh3PgI3V0+9/qrjn3i420jsnBlmTxo1SKevxpXtJ3Ec/JVFwJg4yx0hLS01aWZyfuLoidn+2qFa2s8BsSZybN7nckZ6kyLdRNNQHP/ee15599f/7dDRUzvOH4mS7UNDQ/sOfbs136/XNs/PnxzdvCaOlnZc+Oxg58dqZKq++9TSnqHGJGB9pLE6jitRbcG53HO2ftvwULIFAOSCnRuHd992+wvf/SvXP/bgwuVXvvj+576Z9tdOja0i5UeaEzZziBh5cR6EgHyfsIJKRIAVEJIXxZDrVDmPJgHrM0JtjAEQKrTMAYSggOKRADjvkYAJgvJpbm3oJA1qXywNFcSLVto5VzS1gZRSIB4QHDsda52JZ4dEGkmYbSHOqhUVDEYf1CcgsIZZPAaqwSDmMYEHUW6ATyoGd8WYVhWQfiVARM57UsoQccHj5cLYPWxAEVgxcAlpaBC+8cwYhDAJwyQLWKQgNCM/v2xSSoEQKfEgQEAA4sKURzHbcAJgmC4GJBUgEAEZz058AWoJkDdShQ5BKIgLSoIiBbrrlhqUiCevhWynza6/bxY2bjAKIq9U3mSVO0amKM8E0ApYcCgm2nLdq27u/cGf71t7y6t/5u7vff2xvdPVo+rD/5Te9urGjhsWGzGNjvfueg69arrZ2d43/0mnfWVUrvr6Sx+rs1U6moeqVb2RlDKE2ojvLJlM5RrlfIyu7mf7KpUcjYZ+W/ial7z2kguu/8pnPjF3+hCMIER6c6onT51Zt5j1tkwujdQka1IGGQJGFGsce/oQHXm6es5VFj6u8qGjN78gaft+A3zkKY0AdbKUVLOFHx2YyXFJkisWUdI+VHdN8nmp+uvRqq63lrpeZzXBNNEuz3xc33rstLri0o03v2R+OiejmT2CZAQKsQooKNpn1hMzax2BiEbM81zr0NgDo4wTJk2OvXNBPwkIIOgTEWoA0EabF95wQ2QUFrMRNpHqdXugSCMJiCIFpXFNqEiYGZkDNjiMQ6z4SGlk0joCiDKbKtKRodOnjn3//odXbb1ibMjYbpYt9usjprszAgC8c6/ZPt3cfE222LPt4WYUjQ/XZl2LJBYG75iJkajA8UpZggOiotBblxJAVK1WQ86oiBz74LvoAQmQSFmb1ZKKtfaqq666dPfu0eGRtN/37MSrKIqYnSYFAmC9FgQizy4IeiECsGhEWFZUJhw4WFORsnLZbSOgMCPCQJUGACm8CAepKEDRRhdfzIIECRQwgATzA1LMnGe5tTauVgbpNgAgCwuTLydwgF7RAN0ozFKEMwmTpJA5caAAiUf2RaXLArLsmiIu0L5AKQVUJP5KkQLgoIBPCCLB+bGsdMtLQoVFmV+6QbBwkKEPxYF1ubUAYIwJ5AcEDNJXBVAAUWvtnGPA0PQTAYTgaYfgRIJYIKDWBiAIHSlkgZ4/dqo1VN3gXB8JQJvMea0gxqFHD3bfftOu3Rf72dPt35367IPppd/l62vq3JNHj7/18m1qRO7+1vcv3rVh+/qRX/37+3RjsjlkOh139ar8T3/rtfvP+kqDLSO4UDeEZJbDBUdxnPXBeUFEpXVSraBwr9s9fvzkW2/d8c933780NhZRRxkib73UDaeglBd2Aug59BWCE1nkJTu+yW45cfKhpa1jjY5iFRm2TsBVeRx9iwAYK/0sPn5i7uZX3HY73Xzq9AlStTxtb94wNj+/JD6uV+PpuemlVnzk8I2DAFypZTMzDxKuS3sikOf5oW63m9R6pLOk2ttyXm3L5hnS/ZHGBV//7J/svnLqz/953Zah+9esu+Dee+/+3vcf+fm3/06zOTmxZhqgOj4+5fKsUa+wy9k2GdjnqYIYwJJyYjUA9yu5sEKqkEeW1HMmIBlkUDAJPKMVYQHL4tlLQAsrVAAgpeY5AFjnarVav993wf9HaxExUaSBnXMi3lpHREopQOVzr7URK8UEA1FBGE+w9wVx2KzQiFVK8YBFH5YulWAov4xDHCQBWGImBp0qLOdTGsr0WsomD2FgDcIKMGOQ34TSHqoYF4kQD3JvDK0mLqXuim8P2QoG+EaRPoQPDj38QQdrGUqG5fUwD5IAFqAADaLi9AAABGTo1LUOJhNZmlI0nj/45OmDJzfs2NTpZbW1Q6PrNy7l+2IbOcksskHn8xiAa5e8+g3qS9bL91vX/49f/bXayM59T/3pFVe3a8OrvvwNucahXAM9VF64wgR6HiuqooaQ+0kuWaKVdDIHolkDIIsiqTX83AktWT/mRrfSuVnJsbb5YXXYWL1h/aabr3/Vlz73qYUz+9VwAqRvanVf790ayNzsifb0TG9ipDMxOt+sqJoaT93Q0YXkyAn0lU0/fuPp1W+avfCteeNY1gBtwSp01YXa3AVypJIZOz0fPXNIjZ+/sXOmNV5307ueAQB5ckvLHq5XhvrUUWKiXjZSQ7V507q9h1a/8Y21+lC/01FGSdGAKHKmsDhQEwoBoji27BlhWay0qJOASCvllVLLo4gyImhmtnkvUpok1BjU7faNMSoAmpzzUsw8RCQo48dx7L0vskhmRDDGeJsnUW1+vlWtV+Kk2uv1xobNY48c/sc//fXJ87eNNlf/4rs/dOnOC+aB4je/TPc/ttZGh+7+6uZd50nuzl+95djT9x54eOb2X/256bN9HSS8EIM1QtgcK7SZEGAgagGhM0lKQbDPo0LTjpyQIu+81rrVao1PTD7++JOPP/HE8aNHLt59aSe1AJx7rwB9bgPAN3SLNSqPUuIytHgX0EpS6N45DMQseR4apbwVyAGXUQruIJTxikpIM65QFAmU33LTIgsXJDJxzhnruOwVKyjfCcuv5a8eqF9B4KEFddDCVwCLLqCAiFaKAjPceVBUssWLTIK54C087ygJKr7lYSQS6Blls0WRrJD+GPwu75cNmpY7YIjAJSBchEpAu9baEwRdgiDHPfhAUkqjABffS6QQdcXgzPTCqSWKh8jmEgnlHpWwtXayEf3nEwvDOr1468jcI5++uHL4jWf/X5LPq1Uj392bwucf/o2Xn/9r73rZwdn5D/zz/dnYurpud7N63D/2sb+79UzHimjUqK3WZUNCpNDlDs8dSVerVSRljCFPnh2S7vWzVVsnLlynnmz1oghQahm3FeYWCYTQC6miqSEiAmjZD5tK/+Rme+09h+7s33Il+bZH5hygluBIHZzUAFg4YpcenUs36UpUGdqwtUJE1Wo90pH3fnZmfrdwvZJ47vV7y6siiuPmUKXZaGRZf2lpif3SYitdmO9Nn12cnZuemT51eM/+uZmzVv3wmaeOxQpffdMPOx3ZsXP7K29+18035idPt+bmDx05MnPszGfXrdl14eb35l5Prq3EsRkdmRK0zEJcz7IUwTO7qC8OrVWgVEWIrCVgIkvWWyIiUggKAMUp8JogQHeFwYWCL3RoA3uw3++XYHjKnSMi551RFOKuc06As8wRUVhCIiG1EcLicJOyOTRYP6ETqKAoBMuVXUQsDPJYK6h0g2p4oCtQ9IUHVoMrJq9QwqeFinI5wBoBy9l4+VIryBQogAAWCnwylrzfAJb2IgBctspRhF2IyZ5lBX0FC1mRgOcQXDHeKiK046LpXU7Hwp5FSRASwV7bZpU6ZBkc+of/t/WGl1AV7IzbtmHt7osv+/YPHo8qFaXYO88+JsVA+dgNb33d4iU/al/8vg/+82J/7rVved2XF5/D2kfWbujPPjV2fE5zLjqLR5u1pQUPvunQVKmt0KDK8zwzOk65p42pMIhYIV9pSn9RKlHVZmmPogmT/ALRZGf2K0I3v/Ztjz17bM8zj8qQHnWdt3fyVxjFkrdTrxVH6WJ0YrF56vgwoCdVZTJpLoSza86bGf1bpT6p4i+sOkBZFQxH7eGx1uQZr1vzZ58xsYusqY1vTaOR7v49/XS+d/vBSmvT6KbX2v3/l+tVZV2SYn9E5z1bO3Fk4+VXbX7D688t5IlnVg6gBNaCIgrApwLQAwCgERG0UlAg2ItOqBcOd74AEclgEYGIaBFGcJow7VutI0QkSrTW3hXtzTDcDdAGZh/ARFA2TFiEJQi+uE6n4zijKMrzXMTHEfz4vh9Wh9cbik+e2vv7f/Ku21/x2itfeEt+XXfomNl6/sWnnrxv4cPJRX/+kYNP7/+zv/ybN77gMkZhZiCDiMwupJiaFCkVPECC3ibD8hQTEcnoANjhPM+zPKqZLMsUKBavKFIKm81mwNpffMklo6OjmgARSStNSpz3oZ2jUDB4eBOVmF6ttZS+T6E5HHo/PBill3nxyo2B5egIgjN4wCmukKoZZBKIKM4LS5hglRgz1MbEYcDDAooGu5dooG5RFKFF3C1nQsE4ZjnDEvAgAuJBjDEAYIxhkdzaUJIOmBuDk4JLs0Km5bVFuIx+Kn9giSLwpf+S58D4CjBUKQuIgSVDKHxDyTuY5oZvV0oxMCiEgB0tAf0ggkQKVMCOObCKgD0PjSQPPzm/qIZHIBMUxUAMJJ4dg2TjpnPZ1pHRYXjd+Fcemrn0aTqPE8OL6dhI58dP4j0P/OD3f/78y7dvcC4fTvo6iuenT/7jr18wumH8+OklrSykNZ2wt37lkwp33jlHWiWRQaBQuGvRqkGViku0ecGO6IF7MRnWvp/FPmIlQYeMmUWQgIPkRCDSOlD6xBa47fP70xlW52tRhtFGRD384KfujSqjUwkOr5Urtm/atX7YLranFxcQxUkuTD5X83OnTAXSTCKqN5sjfXdq8ATz3NQqoxMTEzbPkqQy1BxZvT7atKVpIjQRdNu9ah3ZU6+t20uzJ089/cD9T952Wwd48sMf/eMobjRGjtTijS+85pfWTe7Ys+e53sZHH37uzy5e+sfK0NAzT++ZXD22es22hYVjccVUkmalGoGPNebOOfE9EIuSATKQFfFBHct7DyACHilHxJBRqzCwYLYDv1uRAGkkIvSsAIGFvbdWTKS8K+rUANpw1g62kkjINxERi2FzieR4fh5ZCrbTcnhe3qf/HZf9PETVYEMhYg5FGFZQmHZD0Z0qquvlgBd0z1ZEdyk4aYGksIzExnLXCgISsfMCoEIHO7yNEOV5HzW4xMHAW0r9+WKTlkIlJapchYPCEPY5r4KvRErFjXOfuhNOHfVbJ23fD8e1k0fP7nn64Vq14QTF5THqzGeKXPPSN7zM3DepWnfO3njh3FFL7o6P3v34Qz9Zv1ZNDjujqkvzvSrz6Eg6uRZ6S6uU6nrsTSgwS/0uC2grLmaGqjJG2IqNm0QE3SXquz5prjvogVNR9S0GL9E6e+an+x5+8Eo8cWkeX2uj1VSfTTs5LAyBER3FUSJIeWYpy9mlzpCqqlzMV9/5L/XFs6/90w89+mbz8Gtcb5gxqtKxFzWe23TihX9KL6fFjy/uWLWjObm5p+omjtPWwkLt280ndo/f9IqTRw7S0W+Do9QQLPZwYg1MH9r5tjfkjXE6k/qkIM+Fk9YHYLsUJneDAgNYAkNEKSXLjY1gdjdgQyzjD0REK6UqtdiLQ23E5SxCAZ2kjIiwFwjNSZaAuzOkOESm8hVUqLyQMlGzqj1njkUp00/hskuvEv4qOW40h6o+uvNzH7vnq3f0vnS8MbT2Cw7T1RvsmaMffucbNm0ZedXrXz6xZizJlvcGYtl15oEaThEqFVK4snAsBgGHMIiN4zjLMh0ZdqK0do6ZIXPei3R6Xev94uJiZsN4PLTmbZLECGALIyCRAA8JY3PvoigKXxF6sOFu+NBaLl+DuIWIKgAlSrIBBDgzi8BA61UAILCPQqCy1jpmY4yUpEaWZX0+KVhlZcwPglNSKEsTFYqSK0MphcUh4koVrMCeQq2EkMWLKg6UAR4yXJiXgBpAERHHQqEzUKDQRSTQKAfe5gDgSkdIKcTAlRo4sA6AaQPIaynHEdT1bHmrvfdSSJCEZV5ArwEgZxuqAVeuWmt9JaJDZztdrk4AZ8zMoI0WazUZBkrao/tbasO+/6q2n7sj/nCMTUjtZNPM5MbX8o2TlSsuGfrtP73L66kGVU+eSX/+xtpbXn/BnuP9URP1XO5UpmQZ7gsrqh+llNJGRKTUuBcR55kFHFdue+l5f/ntp7VMOc61ri2yM8IoJAFzC8jChASASJQ5phObAeBEY1+3f22iqghkxVXrdLhnF5Yk7ne7z+af/Vb/wg3Zb7zzsiFO+31rfd7r5iRRtV6LYjl3dobd2W5vLLXLJXCrlU6t6ioVC2dZljEz6iWUJR35xcXFA88dW716cmx0HZkcIN246fLLrrzJ5arf49fia8+cntaRn54+N9OaB23O27VzJvv41l1rpo/MrYrN0SNHPvvZz1YqY299++s2bBo7fWrBqOGRsfFKpaK0A3CKDakYALz1IErKWUdY6dZa770xCZZNoEH8CIskieOBOJqIZFlar9e1quR5LsKCQVRoeVGtDJAra19c4YhQaH2sEMcIlzPoGk5McwABAABJREFU/YAvA+HzukvLuMLnxTyAAGsARAms/WUS87KUx/J/rojB4eWLwZNgUfcuR9+Q+gdR3EFCQESApaTd8/lUyKAUBmWrwPKC8mRm5kAYC4cJMiAyADmRDH1dScdF9Siae+zMwU/855OjceIW1+QKYvjuD+8/cHrP6snhrJ1pxXnqTAxIjfp1P/em+ZdPj9xw4cveM794urNI933vc53Z43W/htScdYvjQ8pqdbLlj+4bMsYCqWZfT1JfZTmqTDB2wFpTEkfEuXdQaQAAdBYNgANtenlH2byiTJ9gJyB/787LlY8qo5qTVHfPZHPgJBLloggcEQtqYNBMnhQiSpq7H73uf8+sueDtf//K8db8yz+sbvqXpF6jPS/5ta++7FVRGptK5/Br/2F19+Xjj2/PcPJcK5k/uS9depy3LsHnmt2jP1z3ottOP1z1P/zP3VftOnrs1OzCuV0bt2686apeHytEfaUw9xgM60rCGCCgFNj6wP3kUrAloO0Awhor5v3hiRcndtlx1N77Tr/TGGmyCINPKkmn06lWq52ldrVa1aTyLA8Nn9AmIoBYG8veex+KqjzPEVGpGiOIz4SUUoY9tRfzyy6/qjnkhhru3Nklqq8ZWjNOuZmZWHSP0nPt4+fm+iO12mJnenjqph8+8OT73/VzHIm3TiEBgFIY0mdvnTifJIlzTgijSAGgc8VxH1ouzBxFUZCzMMYIAgOLs1pVvLchN9yyZcuRI0dGR8cUKULlvTAXP4ElrFBBESeipJwGAaQ2R0StVGhCuqA+E8pQREQMNHlHRdJQdJ5WiCcLgBCqUmcqXC2VKnFYRiYPhdNnMEXzuVdKMQJ778EHu0ARGWwwD0zB3aFQyShqVoBlkCSAaK3DhYTnHa7K6Ci3loh0sKoJxF/vgy0oBXpRGDIFmVkOLtYQsNiDIhuDOwyi996xN1TELeaC+TgIXcuhN0wNyvNRghAPBJ1BKA+l4p5wkI5iQSJCDSiKjEc0JCdmvaEEnA+KulZYGeUtsjgYjv7ijjOPrv9/j1Wv7K6+bO99p0bWjNQiGcHk2OzxP3jPNf/06X17FpPx0Wim19vUnP+j973ixLSqqbzNJtGUg8QuZsNUVjMAYK0VEW2UMDALoCCSiTQVbVQ63eELL9p++67H7j5m6xXV5n41Nu0+VCjghzmAhhGDPAkKKjm5BpzqrTrc6dnYVDK20HfJ6MiGOM6Ubo41ar2u6OTBY/ZLX9zzztfHMwstL9TvZ7W4vnCurRSArhgZ7nR6eZ4OjvhGMpr2sumzx5Dc5MTaPKtLZrM8Zei3WvONoUZzqJ5lXdvPtIqtnZ6Zt+KrOk7zFCM9DirfvGGSveUM4gR63X9InV2IF5WefvmWW29+8Uu/dudn584d1jbas/cbU+fft8adH6sLlb4gNtuiaDLNNABWktHAkhNgARvaNYqqWikvqZTiiForIhQpirTABVeKvHV53l+zetW5c+ds3hkeHrYOoihJ015QoPOFmu7zzjIAQBBcuQgH+bEwDVLewAcJmHx43mtlvYvlXGkQucMrYhQBYRECJhTCotkjhRTD4ENCHgy4/C9BHifkytEKimCp5Q6IyAHRyTLgO4V/HFROQooERAKbC0ih9469FBXzoAVd5iKD/x7e76S2mLdr1WSpnc99+KMnhtR0dbL11MH1N8ORVvvub31tpBmB9aTAsidtEEidd/3FzXO782c+7//3+FSz2+l96r8+2s8Oj61aZZrtSjMnGna9HrOwxNUkS21jpnP0Rqmu87wIHn0caWhBFmlNbK1YEWk0AADSRZ1Uk55zdTPS8VmaLaE3HRPXIuik2tR83p5VKNo4UGRsxWZ5RFUAsWnfYdBFRCXq6M6bH7n5t17wld/fcPxx5SAXXWGEeN1j2y4kqjeSDJ742aWRg+fe8p2x9JqGHeq0ekkSJbeMTwPIj9Xpn3xu1a2vXP3aX9k/c9p3jo5uGJt/av8VV16y9rxd+0/3bGQiAUsMK/OwYnYgChQAE5JSipFDjAgK/VDGglBMFs1ULLoU4XzTAlCvV5MkSlOnjZ6fn++2O5W4Oj8/770fGRkJ1mDLydcAXq8KEJZzjq3LWZIo8hbBa+ctotKJSZdaZ08dODNdmRquz3XmfF9RBLzR2U8ttZca69ZOVFVM0jp5+Dk+eOCi//dHC32KokjrwOgt9IqVUoiaiFAXVBnvbdir4nwURf1+X0eGmfM0DfNpBmHHAk5RiECkiR59+JFLL9l93o5d7W4PA1yNlGOX5xYQSQUbw0J0hgtcLgEDM6dcyG4oTUgUmciHw6IcyypBXkHbD/8XlCoERX1ZpvZYUhcACVjQaL2iJ6aISMBFEOTdB1u3GAuFzwNEJIHlmSsAFLZOAAM+rhAACwJEShtSghTS5NAAEJGcHQkoVoHtoJSy3gUPUQRQhcMaK8CcLUDIxIvhu5Q0DyiH8VQq1A8OLC6PGyj5iEqpKIrYOReyH8/OO621kuDuVPyKguMGQIaCCDEphd4hYhQleb+z90ynWZ3wvqsAAck5qyIBRw6NSPqeDd/d2Tj9lXUfefNlu3avHvmNTz03a1adPXr0H3/p0qN7u3c9OL92dS3zHd/N/vTXt9ZWDS3uVaouRNaDrlPd2b7tWyqFOY0xuc3yNNNWV2t1pRSX6hBBmRUAapBl+dAvvfGKO/70oZHJ9bad61SSSlXluRNmEOe8Ugo8Q8illMdM67Mb3Ibj063O1lX1fj9VYKxxumn8zFzerTtGEtscUrN9r10Vs74mPVqrNmv1NEsAGLUC5cCuQpUvrz1QvX7Hudw7PH703PBounHTeq3AOopNL8/zTnexUR+OcKifttglzESqZ3POMwKZlk6lJ20dVZM6e2tUkk5Vk7WTWzrZBidzVWj+3Dt+q7W4j218ffNVUmvY9HAv/0Gafo6FDG0+l06k2eLRM4+PT9wSRZGJ6xGNImmtIiTnoQcSkyog+EV4QzDaBLSH1ipN00oSbd6y/jOf+crp06cf+PH9b/2Zd7zsllecPn2mUq0iovNeFQxyCcsekAppZUTn/aAQwWXJdArowpXlKUgxfR0E3cF2G0S+8PkSBiMrgFqhAEUQKaMuPH+ETAIDKbqBGXbAGBIsn+SMA6H94tuDHB6rQl2HEVQQm1MlXisIHKIq/qyQXOLwL+Gs1lqz48H1rDyXIp95FZGB01+95+CBn1bPv3K3T/Xho/WFMw/86PGZo3uaoxUnRCQOI3ZOwE+88Jd+dvF3+smGR7JLhw8d/rd//PCxIz9JGms9zo1PVUbOKsns6AjMazzyTDp7LpLGfJ0nrgSdpMfb5EcUdp113g4rhWnmnFUEtSGf9zG3ojlNABjnY4UVX0nR6Wyhz5GgdBa8SbxjNGmC5J2SSFeyvI1g0EPE2hKlnGfN5p3v+Mjmvffc+L1/ZIW5MSlETcbHRtYeaU4mooBi5bL13/n99huPPvfWf97yuUtHa3HldW958rI/hsN1/5xdf/1Lu5mY2We3vPW39nzmj7d221nmLn3pTa0lRG9ZTCDsAi8TVr04YiFhT4NOTNBiQQDy3qMKrDCUFdyk0qk11CFKKaVBYGRkWCkVTCarlWS4OcIMk6umOkttl+WGlPeeCbXWQEgg1loTR0qp48ePVyqV0aFhj6g0WNcB1oYMBll+6T+z96ef/OS/fvazX/3e3T+ojTQrptKqz0INzsPz86rtzi1k1YmoOvKh9/7m5l2bm6sms9YCUsLMbB1qRYqISDwAFKKPUE4TETEUhex8YNGQFOmwAgJCIWYnJCDO+9yJjy7aseu+7/5g777nrrn2uv58x0QGBcKdAgCfOwAgCRJXrIPdIXhAhUSBg+u9ZxFxjpkJg+j1CkMkWtbxIClEUYgDowBV4axUULAHZ4QEtoNI6FCgB2HvRYLW/HIkY+bAYiw3VZikukJlpzCuD9JXxbkWcHpcnDgh5VeACGiFJbTFRQqtbx9sEMGzZyjE2r0XZlAMBBia/FzUDaG4EWZhVxR2WmtUagDXYgyJAAykQ6mU/Ss4oAErhxj6K8XdCHUDFUN0D4KK0IEAKDLCecgcu52lQ3MLUW07F0Rk0gTczytSqcQw3VK/s+Y/7KaX9yauWJpx339yuhIpp1yeEIK5+5GjOF4Dn3T6eN2m3utu/f/Ies84S47yXvgJVR1OmLw5a5VzRgKBiEKAyNGYYGzAxgYHnH19HeAlXPsajG0wxmBskXM2AokkEKCAUEJZuyttDhNP6u6qep73Q3WfGXzPTx9GszNn+nRX1ZP+4ZKH9/nJWRZXebWBxElPsiTXfJx62sREBRMAiKlnjSeP7SYGUDBZ6/hK78onnPKsk3/2zcO6udvyRFwdB5iM1buqpmnuyyrOhNRqq0r8/pOH2/cduHH+zC0bEBU4mTTZ1llz56MsU0hA1tBCSBfzkE/aqbKFlCiENE0xMd77rJUw22LUs5yO9/hEe4pp8/LycsBKxBtjJrqZd4DcYsYTJ074goZakglkHCCmyQyAlEWf0FeVDMLhLCEmZJxVcALp/KBAPJJgKmA5HSK0N225ULFAMK68zKo6rUblsDd8VPHhsnc76GB58VP3jG5ins54x7xa7yrnhKhlTRtRRbyrmoyBMHiP4EMIxChe1q+fW1g88Wd/9tdzc3O/9muv/f73vnnOWadW5aDVyilCsVQVUULwITBbbVrFSBDX27i/HQH8qgqRMNk0IerDUAkg+mKvjmOhCdJxlao2DtmNNA0a1gZIpSJxVoIAYlZnFjjukEPkfWJ9IMTGpdbSmmNBLo2HRSNHH0IYa3J57wEpalPjmmE2jcM2Qpqm4y1fIxxFLHGTZ8A4JyZmV41MpzM8KnfdcEORzxxZPnjykYdPe+T43f+24YY79qetsqAOiEDpCIGNys4nbpztPuXY1z9z4jkbzt39kx9957GDt810Jlb80o4ZWp/CcDhI2va8i5LrPTx6IIWsnSblxlBOFsWIhhZRC7dCQkqdAEWoAqo1tj3lhj2qsOKRBC5BKABXVJQGjc0CFqRZzimEMiCodZUXgxM+DDNK+lUZDKUpSVkR6rdf+2Ehc/Un3ughUVbwBQlQOnnr7GaZ6E44XvS9nlRdSbd/4337Xvqr+5/7Z6d/7sN9SsIp904/fFn70ie7HTP8WNV/7FDI3fbHvezEvd/aPH3w8qc+b+jUU5apgKiDgBpqkgsbjP4aShIRPyreB0RkthCbchEBwas9labv0+jkozKzAZCJbgrKoKWKUVcIVAHQWjs5ORnBhyJC1gigKgbQtN2uqoKZJ6cmkiQBg2yML7wxU2RD5UfGUDtv3XrrNz77pT9/8iWvfu4r1u2+aNePvnvs/rsPbHnaSXthabTXT03PPrbv0em898CDD3zu25/6xG9888RC6VlSAUJUu6ryH1SFwMTFTUjM0fsWlYqqytiCKhoIIpQlLgTUoIrGGkWtgidwoIpChw8fe/oznrZ186aqckhSFMMsTeK4EwGjo7BTFwUBnHPMTGQAQKUW12RARqugKiqshJympqwqYPLeJ2BQxsUgRsEsQIjTYmFERVC1tfWvQiQcu8CIgUBEiaITG4Bq8HVrHRGN4djZCCDq6kRkbHsSw7MXIV8PrgJCBICQaJw5eVAPSgQGSUQIFIExabZoEFVlw5V3CZCqOgQBBVFCQAUnQpiCMkK0KdZo78PMqBC8xDa7l0BMJklVBTyE4ABAxjL6kZAlMOZiYj0WAWLGIOMyQn2IR7MxpgxFarrkoALHzKESMwGLB8Px/uSGaarKURCDTLmK57b4MhTu1Z1vb8b9/9+h/++v/+Tzr3vjuT+5e2GRW5vc0pzpfun+Y/e7ssvpIB9Vh4vffMO2+SK1WBVlyZwZwypBQSOPPISAho0xRVmKQp7n1trhcBhvcpIk41ZSCMEAexsWBu2/fNP533nTz8LsqUW/JzRtfZA0dm6CavCM6jVDztWPslwO7dBzfrzUky2b2sW+4aKWR32YqgzkyiazRb/wPpjElCsnb714OuuPOF1ZPuwLITQTuToFDLphbktodEABIM8z76updILRqOqocosrgyzLtPQhKAB5cW5UWMqYWTJ1tGStJcPMFAJ3zGxibAjBVQNmHox6zNxqtViQQMFZQOmVJ4gtc+X8khpmm0zm+cTU2UwXVPNnZul3zzz5b2etGRVHK3fw4PwdpT9x375P4vLFKUx1Jjd2J9eldiKxXUBGUsDKWsozqwr9/vDHN/3wxu9/57nPedZpp576nGdd9ft//Ofbd5188PCxxKbQwCGx7jTXOAwiDCAqQGRQarqEqsbjEiKynnSc+NbZXmSVaB3bxnVibDNRbW5PSKhBAigTxZoUAYXAiRDVktHS2LA3jSkdb15LVHP5AUBr+0FYxUYBqJLUEtPEJF6FAAgVkaLdnEqIMD7QZsmhkzo/MIIh/mxkVysYRDSoGkoUoyygopCqBlQJ3hhTlDiqNJlJf744PPrY/JVhtBLuv+Eb1x7srDfJtIyKQpKOkEtLKqotz/rDV/Xfi4h7Zl7wk69++kc3fbebWpeJW9KTtrJ6feR+nWrj3GzwI+kPBj6d7So9u4e7pTLI3hRDnjK6MsU01IJSsj4TsK2JsreEUMLo5FNL08q8b7U65d7bWwM2CXqXGwiqhTNk1EiogFssy0q5y8RWUz6s8IhLW9x9xW/vOfvq53/gBdnCQbKp9b6ijgV5OJk6dNbTkyw9trSv38NWq4MtTpentn/13Xtf9lv7nvm2Td/93XJu75bb3jh56ZPLxcUTJ346aI/M6IHZkLfPv+rXXnrlhi1z9zw62mjMQIFaaEYKaIBBVeOBKRgPMiGK2sKAa6ds0ZsZVt2ymQgBgkTwEkktvkS0buO60lVkWENtPzkcDPM8zdI0mu4lSVJ6lyRGgAigqqrUJiGEiYkJIirLEhHzFpFxw0EpEgrf37K59ZMfPvz1T41+/sMvtGf9K3/lWb/1psNfvPbBG+RRANgll2prabSuUGlt3XXahZc+9cEH75+bPd1IGUBEG89RBQkBARK2EHwAVS8BQyztE+bUWB9CxAsa4noBKngvapCJSIjYuKoKqvf84hfD3lKSJOWoIKJutzXsD7I8Fak7UfXOjPTipiMU2Qh1MQoRZQmqioYkhMKX1EjKhRCsMX6sq9Uw9sZt4TqBjd7GWGfERBSVNWvFdtDI6qmHw4oRfFtXC4wc7Q4hnjs14rGuxkIz9KKoPtUk9VBrQY9nVEQEilHFlJmxFraNLOEmhRibsiEYY1y9sLSmcTT3RwSCSARVRfGiWkUrADW68/ENg4I2siQAEPUOvNZGTvDLKBgljKdnxl3VIhh2qGhs8DKXwI0P7B9IHkIJgVNDKt4JKzrPrtXlP+l8+MvLT7xvevu/fODinRs2/ubL7d9+7ObvPuRncnPLXYMUOy3LCwN35tTi0570+OPLSwYbk4lmXK0AIp6ItNHpbLVaKsE5lySJjlW7x2xRIldV2rVH3eC8cy/8tSc/9KHb5qc3JK2ROGtU1TtvicvSqSqhUYTSplistB49adQZfOXw/uv/wC0Olwu/6Eft9jQXJoFiEFpZl0wXyLqqO5EdXfCdvE28Hrwb9IaGJ6enZ5aXl355MhWbkGg40QAIODGRE5EhQwkVRREXobUpsxGRUVkQcuUcIibWGsNGosAMewlAmOe5c240Gnkf8jxJTepCRWQkOBVAkCAozlWVY5OAjBpXGDfdPX1u8ixGwxP3t/f/8NRd10xTWhXLS6OHDs3frYqIaZZ2s3SCKVtaWtn/6GP79x+84+d3bt269fnPf/7DD+991zv/79/8zTuvvuape/ceS5JEar462AZxibgqfM4KPm6KNTCCcfERdaRX+9JNch8rWgDAJjA3u1XjqaqICCh1wEZSpFqLREBX7XwjCyCG1cbBsBm+UlTRq5EZ2NCHap/jCB1rilQRYSLmqGgHaBgDRg0RRqrtpwBUlWuuAcq4xQUAEBA4gCKAhmATIyIomhAqYUC1Ck5xUGIGTAAPP3zw2aefvP2hhx/D9Xfnc0QmFMJk2gmVpXNQmtOvzua2vvD4R2+Vy2ZPe/zBr/6wmj+wbvs2gbLnVzZusl++s7ztZ/yS57exPDoc4SCsn5HhNSfgbFxeAewoEc/MC27QLLGZE0ukGY96YdTulkf3JskfvKN91cupXM46neH9D/ff/vpWuT9AZlMLoaWu3/U0MiXkWbvMRon4IeWlTVMLLvMwXNpw2o0vfteF3/vAmXf+IJAJFYGxzpczLD+65NLl03d3jgyOVrquk010sqWllTS1kwvnbrzurw89989FHQDke85JkiRMd6d2blvpHUq87J9/YBNd1t610/c4N3kPylQR+0bsLxHVxl4LY/3juK6CrLb3oAHBxD6rNPjBZioPqmqyPM3b7L1TBWMTQ9mwGHW77aIYOUJj2Vg+euSYEq7fuKG3tIIKk5OTEfRRuUBEEddflYhuSMjWti1NHDuy+IUvfvo33/img8eW7r3nZ+/8q8+94FWndjd1kx3GLeJ3v/jJU864dMfObQ/c+8Dy8olHH/3Zug0vRxgVI+GEVZXG45a6rsdoURJqNGktdFG3fBkkiPNefWAiZk6IgdgHQcSyLL0Pvd7g6U+/6l8/8E83/+QnL3zJS47ML6NCnueuLJgp3iSpLYHjELKelUa/cUJS0IAaQAUEEMCJ+qCqaZoaw6oaKldVlU2SmrEz7kRFicXGPkW1rlbqHBmQaqF5BAWN8T9qckXTFfl/UMQNM2p8iABAWZbRIwVXmQkAAEGac6HmEdV/uihGnNgYTuI8GxHZsAZQDRi9biASwVUFuOEjIWJAVYmFOijUlg7xpIsbHmJpvmqopYiEoIKA0T6zkbRVUGjo6dL0aAQjwAFVtXJqrENgqJywuIqslLfs6wW7DtUbzKzVkfMqKUJfDL7Yf2kLHfqVo3/2xy8/H4riFw8c3rQuv2CLve7OEW9t54hVUaYpuwPHXvnqnZNdObhYJXlLNIRQ50PRI5mbQ7w+kUUVkIiITQjBWDs+ryMcodVqOfVB3fG+vOX1F37+J9cpnIXtyhdiyZCgsdaLRHEGD8qKeWbLQzsBQLbvfWK2ecfWU006msZ8aIcLC9XxAys/P1o9eqTf66vtJi4YtZawandbVd+fvGPzwPulhYPbd+xYWe67Ju2LK4GIsiytiirLWgLeGFNVDgBarVa7vX1lZaXXWw4qbJiZyVhmjgow8TSJphchhCRJmLnf7wNh4SqPrYitZdSqKKPUatrKRQSVVIcAMCx7gEqGRuUwTd2wqgq3pKDGTG2aOw8EDXNVDZXcqFwqy5FzbjQql5f6IdidO0/fvevM5ZX5u+66a+OmDf9x7UcmJyf37DvRaneq0tnG8R4iOmHtCYBKRFRryzQF7hoZjXEqrM1r/HBrgHQcB2kdwwEhesihgGCzl6KYpcYDV5uDIr4/4C+///hP1HMiUakHQPVrTBlaezEUuQOx/xQNyFVVQA2ACKgGWE27IZ4ykaegWpP3sN7vxAa9BA0JsiEqURSAgKVMfc9VCWw/85Lfvvue9Yf2Ym94S2fb/aZrtLDWiviiGKSZGa6EnS/402v6/zkZjjy69d8+/elPP3jfTVOT3B/MY+hMzWTrN/s7v51AYu570H3/IVq5Qk/T8nmLo13iKhy0NEuS7qHhUhu8RaveBUNUVkcA2+3trcm9/WQzv+CVgwMr+oubC0tHPvHR9vG9TnNMRAcnhgYQQJJ1vliyBQxoJa8IEvUueG1xGI04/drrPzd15KHHffEPweQ+SJIWGpKCBisTJx096cogEHx/W2uCWtxbHCZkoAMrw3Lj3ufRTfsOPOHfuD9ryw1oMMta3XPPShenj3zvJ3LwOOd683Lrir29qTlBzRTJJ5JgQz0XH0UOYuUTaTLj3N0ASdObVBCUIE3JUZ+9Xrz3dWGGYIzl0WBEGTGbclQFosRYkVAURcRb5SHtTnQE0DmXZRlBLaEcQrDG1lhqY5YWTrRaE61W4opiZkvn3z74xSRJjh078ePvfW1ibhOl6Zc+sW/79s7kS3K3yC987esfPbTPKCYIOWZ333znFz7x2de/8Y9CAYBVlKOgmlwXG0feGCONAmLkBYH3AGAMuRAifdAYk1gbg42GIM4DYp7noN4Y02q1VPWUU05xPpBCPDetSaIZw3ijelUDNLYWCHEEHUe2DTU2Hk9xyznnbIQWWxuNGaRxSKzDT423Wq2x1kZTbcpWrF2emsS8wU+C4QRqSh8xetHVy22uGaLGugJi0/omJDYGCXEVD6lraiVmJoV4waRgjAFQ7z2RUUFSRSYmUiJfi5rS+D7IGj/EqI0XtQjqA6WJxPHimk9ac4KjjPC4LK5rFsRYGYtIaOzZm25hGA1LA4aMk1JYrXfulj2jvN0CP0JJQpAATpFM0MT63578r6/2r7yve8mvvfuO9cWeKy489cYH9n7nXphblxYnitJU3Xa7X7n1ndE1T9l1eNHUjtFN40hVGwmRenAeG/6xCUFsxv2G1Xqda+Be35VTaWt5ZeXk00/63Ws2/s03l6c3Zh4kpQQEyijkhOw1UELGEyRJeWIdjFqy7aHfe9bvFIsycglwj8Is7e6ay0pAV5wY3nDPsYeO30fM1jIb0x/2Ttqx+ee33Pq+97+/1xu96EUveNnLXrG0uDJ+slmWRaPrvN3yTohrPa8kSay1Ubo8TdPBYECGQTG2uFqtloo455jrVV2WZafTGZVFWJFuq6sFOhdGo5IoypUG0OCcK8SjICO50gHAYrEYQiirIpu2qoGsADsAES0q1w9Bg6+SJGHgdnu22yUistaec9bFxEJI3gsAJAl5D4uLy8eP99rtdlW6cWyz1rqqYqYgHoEiZR2poeuIKiGvWV24piD+f79ARK9N7StNLBRQijYoEGJSPM6AdQxwXE3OmjNq1ZB77b9G3oTWI2GENX5r4+Jp9aoQGwoCAICgICFxhIMERIzEB21abqqKwojIYwof1MEbmULpmImJQnTAFCxD6K0UpGYoVefyJ/e+8m8TD977SHfu+2CIjLiyAkEFSmj52PLZz/odmdj4mqUPH0zO+9C3H7jr5q8m3Cuwa6Acjo5vmkzvvs/tPWpz7lT9opOxBPe8wfFJNQPAqc0XFNXK8vFHQmd280VXLiz79uMfh1/7b77qyunLrwqzxtx5xeLSZPnxT7bPPSs790LcsJm+/6P1z34JnH3OsQ/+U7apPXv5s1duu3X05b+fPfPK8mnXmLvvLm78ZPrit8jZFy69950d53/06//Sm93+K+95csrtQpYTMy2ykoLmjs2uK09smYUT8zQ1rbmUQ+j5UafTSfomlarqVFN3/vahiz8eWou45WF3+NQiwVx4dnYbXKIHe8vl0WOPnn7mZ/cef9XMusy4wUgM+1DVQmzYqKUCgCIURRXhuyGEqqqIDJnoEAxIxLEPCqoIRKvpYO1/Q2gIcVSMxIv4yhKKqHOVMnTa7bXgZyYmwADBUhJd3EMIriojOaeqqompyZRb4qs0SRD1uq9/dzhaFvSn7j778Py+DdOTu3dt9yFZ2HbrTH/buVdesutIe3F5eO5FZx85fthVeHj+OFAIbmRsMg4wiIiGY1BxrkKsxVSRiQGYLSL6YsTMnGXI5CtXeAcAJrGhcquJsOG8nR2bP7GwsJBkWfzwzjlLTBGYqkDMq5QGBGgMi8ZYDCLCNXoUpMDWImJZluPwD0zadIT+x1bUX978q/+0ppDVSLRVVYSEWOOjQojy9EEFRE3DRxrzDeL7RxZWzWtiWhMvASIyj2pHVW34kfFoQERQraqKrAGEOHMXBW0sE2ydrGnj/FDr8tdnYqhtElAxwtkiA93AKk9aY84EtZczxsI3gq4QVtFhdWyDtTfEOG9sXgb0yiZJOwbve2Tfo4fz6W2EQxCSyvnAwVDJnl6efmMzHXtV8Y/loLryHHj3G1+9Ibfv//KNX3uomEBOMnHBOektLZsX754+5dRNdz8y6LY4gCZJEnwttBJF1cd3TOL8Js+iRevakz2mC1FpZAiapG31wInZf9S9/hWXfeY7/31MzjCmqrwzZII0jGsiQPZJUI+p7SYHd/fX7b3/keXZVut4b1HYWh0EHDhvTNdOd+3znniWT7cuLg+Nlv2iPGnr5hu++a2/f8+7PvvJa392175vXvfV17zmVwaj/nhRTU5OMxMQusp3JtreORGx1o51y5M0m82z2dnZsiyLooxLHVQj3jumI61WO04sW5321NQUIpZF1e/3B4OBoCKBTayqIZMOipFBLFxpuU4TvQ/zi0sn8DgQttvtqlBV8GVYXu6laYogSBQEfKnGoPelDysEGkJI01xEy6qKHTxjTJJkrvKISNbEHS2NHo4xRgUkOoA1I3CEWikWf3mo8T+yz7Xdo9qjt0EkxOUaNXih0XurmzpxHtyoTa3d5vTLclrjwZOCINYitQRrEJEoIegYs7m2dAZD6GqIXxz0RiBORGWSUnyO8VYAAEoAAG3gFM1uR+89IybILgTHaoCtwnDQkwEM2tNpkeD6hF/868Pbbr4rNYe6nbQoxaTOOR8oJbNx/ZbsSa8+Y/iDHaObP996+y/u/H6xcrTbzsvg1aVJpzhy2N50lyhmuRml6/zJvZU7gHrMVgo67xn+9/7+wIffY47vufBdn106cHyuWuHnXr3/zofXP++1FeV871cBwG3ebU7ZWgyX2umOhe99K+S2+9LXHskpe8WvZhdcbk9eX91xf3vn45J3vs/uPnvhuuvdbd+bveoV/tJLe4eWDl/wrJ/73Vd84e1nvukvFgcqX/4Y4dFw592YeGlt3v2217/tlJM+c9NjP3jk+HKYNDiYywyZbBigbdGUsthakHSQLm2759lvOfUzH8v66x3hvKGZU06l9sx8sUKj/jePq949+I2z5iYmMPQl1J3FMA7DMVhE7baYoDMzM6lC5SpmNhSbGqj18F8BAA3F+C2gWEPiIxiYAFATYxFZVQnZmoSQBSGEsLi4OBgMDHFRFK4Yxby1v9IjojRNEZGN+CAhhAcfvO/eX9x15OgBCXjj9+7Zd/jE/n3L27aenE1svuf+h4qNS+V9R97/3nd877t77r1r5bZbHj1yoLjh2z/qtDcrmGEYodY9zBhXjDFsDTIhU8wsACBSAFyovK85yt772ouJSBGqUNf4zjknwfuKGe6///4sy4DQS/Aq4/5PWNO+i+8QuYaR4BlPpfiKN857X1VVjHtOQjws4t+KzyP+7vgJNQ3h1a+h6T+Pvx+bGKvpDq5BrkPdhYKmozX+yfG/QpCqqqJF8bi1NX7bGEXqv4WgjedSHPUBQIT7js+RcT0Qr4lq8CgbY+rbwrzaq4+ZfgzM0pAiRAIKGIxql+O/KCJBV2/I+Hxcex+ifQUpMKAljoI+ncy0k+5oUMxNJdfddnAI0y2sFAlIFIjJGtYU/O90PvrV0TMO6nYdFNu3tH5w456tT3vvB752eEebglY9GRqfO0lhcPRNLz9teUDd3ETh8eCj4LBSFPdvcpQ6a6H6U8QGIK95YYOBDOozhyGEzCbecb5x2+++cNP8kQNZkqoqECTGIqKAoGHxAUSVLaGYQyf5HY8d7oWe95RJrgSSOU7aaDorrr/i9p1YUuwEgdnpiY2bZj71iWs//l/Xfujf/mPnjm3/de2Hzz77bFVYe0lVVXkJ3ntlKENFzMZaEztDqhr128kQUSx88zyPreaGYhvSNI3BgsnEVZckSbvV6rTbBIzIedbqdDqtvD09PRt/PU3TdrezacvmmZkZQgwh9Hq90bA4cvh4rzeKSCLFoghLQbQsyxDcaNQbDJaXlhYG/eHSUu/oseUjRxdEOctaxpgsT5PUEmtcdaRgrY3RKIiwsdHFoV7DxPFrIhqLOGKd7RA1dLJxwBtvtHFfWho/0NWqtrHcHsfy+IVgZArS+J2hCfncqAVE3Eac3Y53dHyNL4Csifz7cQxuttLqNdS2DxrEu3GEHp9X40dmjEEiacxGURRE0XtBEOer4AFJnUMEXBn6kpgtOADnN1161f5zrr5VaKgc0BNag6bV6ZYjt+WprxhmM7/DH+rz+o/dnvePPNpqT4zUZAyirNTO8vIF15QveerKCi0uPWTWLXsCEPU5mOKM84PRTnvb+b//X+HoseKLH02f/sR9H/94nmbDdLp44OHOkfsAoKLZzsS2vL1p/ic3L73nb9efvGHBjOz+x/LTzw99LX54++DW73X+4m0LmFT3P5xqUtptSdkb7d1nnv3SL8jOHQ/ffPHmAT31+e3LLrG/8QZ/xUsXtTxejjb87Z/wE87fms+9+cozf/PSDZOmnLAZtzpBhi3oK8mI2W27HQB2X/f3KLzveX9YJiMLnFRySEs/3U7WzYSQ5qPwg0eOXXvX4bQXWomxaRId42K49d4HBVWM/ztOiaqq8t7HoUCsiWM7qh4QrFl7ceEZ5/309IwLQozWsPfBpAkEtTb13hsjiU2Fraz04hv1estTU1Mri0tJnm3btmV+fjE4nySJc2CNDHqjPDMLh8sDjz52xnmTk+che7P+Rec/sOfnjta/6FdmPrzuvq2HN+nx/r0L1011OpPdznRn3Uufd8Fv/9prl04sT7XbASGooKyh9ERtqSCRzBxlj+vRC6jzTgGifAQSo7jYJxQRMMzA3pdlWXmv3Va+efOmoijGkSOGxKZ5JWu3gdaQKw0oSBS8QBAjFMNDXP2iWtORVceFbRg7+2Lt4lB7E41vPVN0LluN0Fj/jCBgE5aCiNbcbQJQatT2ojnM6naFplNPEIXdI0YbAJiYayX3X4rl9XWCQgjaNEPSNCWiqhkGK9b8DEHANUoaa98kHgdkzdhrhghBRLwo4agqrbUYIBYrsXGDiC742C2LYNGYryPWH2IclXWsq2cZg1Hx3mk7p6JXfP3WQTK7XipXIYEGJTRiUfQl3c+tp/l/7L1BykFrfeuBh0fXPGfbte9+ZZK5V7z9pu6GjRaNbdtyWS7bbM44d1dv3iaZc6E+LuOtYOba+EuwPumwHnlEiMD4jo15nyIiKh2AXjEkSx0wlZSHFvF5L7vkQ9/7+qOj0lrrQjBEELXGrDFAlsNQqhYmycFTh5f/d1UsnbNj05HDCzLtuUglA4eCo9Dhyd6o7C8dmOfhL+4dfuKTn77nrpv/6yMfPvusk77/o9see+zIi17y4kNHjjPb8dNxPiRJogwQAiBGFDoSMrN3UAUPKsYYEU+GM2PHja6Yfllrm0I9FpcIisF5k9hWuz09qyG4NLGIqMGhwkS7E3/dWtvpdvNRyxgzPTm1feNOkya9Xs+VQ2JK02x2eqMgWLJFMUSCJLXOlSu9xaha4yU4z/5IYYyZmJhIksSYxBgG8LEoHyPgYgUcfS2hHrJIADVjvfT/KbBRRzhYU6quDaioGFHKNS8IEVAb1xMYn5vxX6MAHTBqI+Ycl2sVfB19VWqVjyi5EEJEcYzlKsEgNaa/QWtYIiIKAhEqNjqdMagjaRBGWpt2/1KGgVgbJmK9JrVxXXSqXhUSTgVdEOnwaM9jRZjK5mYLLBnsug2Ty69+3d4P7u1W4lAHVZFSMuitnH3+BXruC9f37z2j/8VvjK756jc+NmHKIISSVB4pVT/y6zb0n3LuzL17l7ZS9zlOZ9stRNeFTsZ+45bTN2/avfyKlw0+9eH+Df+69R2fGnZ2h5/ev+0trx500l5naGdWfEnDMNseLsD0uonJ1sopZ6177q8fvuuxjbtnlx490t3QPbx/z/Y3/F7r3KeV+w663dO6tJSet3vltNMWb77num1Po+Ceu+/69W99y+J/fC676pLpJ1+9f++RzhXXnPIHb8ovuTI9Vq6IUKd6zkUXrJva/6Ef732kcmmbsDIJg2qxsOW2dHFHdvzU3V/7hwde9vojz3jb1m+/O6i2PZXqqBgI55MJLHB5w6HeuoxfdtYG58roIxIt2+pOCZrYDgaQcT2DiKqNu0DwqsqiwE1MaWYEMbRFV3kYlaUqG5MzsyBkecuVjpnjrEhEOp1Op9NBxHa77ZwDVAItylJVnXNpmtqExLtWa+KU07tv+8t/PP2sza96zVW33fWtDk0uL/euOP2a79zwo4PFshro2PaT33zV9pndN9/6naNHh7MbNr/hrW+VdmfPXUe27myR5kFDxNTXx3GI/j4aT22RmlEf1SiAOdLTnQukQYOwiU5dJCrG2CA+VslJkux5+GFkhJg4k42uZTX6GGCc3SgEAIgy8cAAAD44FVBspJgRy7KM5qaqitwMRxGcc1hrFpo6hEMdV7RuIyM2hOa49+LUrR6sBlFVgxTiHKEJUfGDxyx7daSEdVOLiJSa/vAaeWrfcJTHnWdVHef1EZtnyZJCtAc3xqCKV5Ew1nyulQTGuNN65TWtgqhkjQqkwEiIoBRTJDJIXkPUkLLWEqCAWjbxmUZp8vGBwk0jAccNdhEV8VWZcBLEO+s708ndP314z/HJ6VNIBqTWhqpCwlBxrtVvz137xeLqvW6Lsd4WeBzwmCsXFkcHDi+86HETP9zjelDkQXu93iteunlion2i3++EDMAbgxDEWuudcxqoUQ5qDutVexwiUlHvPTfTbo66pExFVaatVqbQD1XIsLVcZVvX/c6LT3v9Px/YuHOrF2GEJLHDEADAKFYuw9yFEdCRzWDdl5fuP/zdcs9DNJDjw2CnhVqZ3bk1P2Pb3Lmn7Hzw7ge++KWPPbrfSdXbumPjr776Nd10dhjwf//1H6VJtlD08rwzjjTGpl5FxeetlvceGj4bMAkpQ93FQYDgRQkoJkbOaSPcOF7kGpXXRJxzEiogmpjoiEi0wEptogo240hJB4CiHAEqMc3OzbQm8qIqJ6cnF5dzVciyTJAUAUnyVha8pAmlUzOd9uTS4sLy8rKxGQDkeXtysguoVTXyoUwhT5MsZkLj2TZZA0DUyCIQEQDV3kFrgfRr5DUCKHkZP8Rx+qiqyASGx1tMVYGJDWsVoOlpxzhdIz9l/M4YQKgJh4nhceIICRFjjMoJGxQJDcYQscaDOOfrWRsTI0ljFhFCYOTgvPc+zTNAjTRI4Hr+JSLUtOJV1cc6G0kZGVZxYTagkDKTppYHZSDCljl07wNZr8wuP9ePDElYXvJ7VpaXCmp1RnlF0hJfwbC/svPKl96fr/ub8r2k4X0/zRJckaxtfODQczZlNlqGkzZPHlxe+q+v6EmFuQCPHy3aQL1pGRkMcu3/OXjD592Bh2nhoWxu1xCD+8jfzaw8sjiYlx/elHS46xaGZvPUW/93dfft7tCRgx/5wIbE9753Pdx+68KujcVDR5InXEFH9meXXvboO97qbrtx+xv+bMlRftaleKh/556FvZPmV677h+7Pvrj4xu+Wd9/kFt4285bXnnzN+Zve/GzetH5wfKVqmcwPPUyCuifsWD/bMdf+dP8dS6lPggUImve33Dpx6BIwPLFy9s7r377n2X/Mi7u2//S3DMCw79hMjjqlk9Qriet/fa/dNlecvynRIKJj6V5UjLwSjM1RRK318oxBJEG0bDCqaIg0rpq1L1x8fKhgmNmFcmZ2AkTBw8pgqQo+zbMsSZ3zzOycy1p5nudSuojoS9M0jEbeqYrL83ZRjVzwAoYopcQvr+DefQ//0z/+8z/9w4c++elfJHZqosuvedWuA/nXF954DADuevoDe/ko35Qs3RBO6WzcvfXs44eqyS5u27WOSI0gk0XkKlSxie4Kl9oMjYgEYiaGEAIbDiEgAjKpCKgyo4ggU40xDrFAq5iSzJpOTnseeXB2dvLU3eeURWWAK1eKiEECREJMjIlIIoMUQkisjdwMgxSKSp0HwsKXWZZF3Y9+bzA1NVUURdzGMTevxXiIlMipmMQG71nQGONDICSGaAaoyCSgQSRSXkiBsTa7QiZPCC4AooEIIUYhQCZRpSAmCp0QelQEMBoh2o3ZjkgMsUoIhkK04AghOiYlyCBaqZAhAiIgAPDBB++JOZYC4gJZg4gRrRfBAoDqg7AxquCrQCQGqZnmRrBJTQ62CSMA12O8VUR+nE+Lodp1asxgVgkqUPpxFzEelFiPYIWrkWM2vlpvO++58dFed/1UNfSQSnCZ2qDqbPWK9AsztPSBldc5KohaE3k40u/1loudUxMX7dhu8uM3/OXN6fSWgR+dNrX0K9c8+1CvTA1arjXNxRASmSyNhFr1HqCG6UVFm5j6WGKHqhKc9xF5i0AJkQgo2VAWLrEYJCusZOHIwRPPv+by//zSf/xkhNNWKvXoyWgiWpS2zRTaiB7VPnIWAPyC77vzutmsYzs4KXb4cK90ozB44Ei7OHjq5l+87fUnX44vvfiC0YG9e4OrXvac7dNzs9t2nbTjpF1Hjy60O5PjVAagnuIDQCicIVKwDj0nBkSNEgJU4hxjizOJyqohIIJhAkIFIWMlBGyGEUooQY21wQevIQIdRJEBFYWJgjonAYkZyRJkSaYAo6oKpSSYoNQ8GSeBogNgAE8AliFoURRgeXJ2dm5mrmBx/SERJe3cV65DlgErlYaRT6s6MEFEvKtbx06kads0AbjWRV+1n1LE2rh3bflb55c1jBoEVBAYiZEggCOMK9x5H6j28cVQU95jFwdFpeH4VavytAzNVJqJPAAyR6pifEwkSIhRnVYQAMiHYCOQAoCxHtl4FaiqBBLEyLysW8yISARMXJOSYxhQ0NpVBmMULilH6VUM7aH2bRpkcQq61aN32Fse9K98A6RMRehJdcctNwdXAE45rBKXF6Plx135rPvp9Jnq2GWDj19//PSf3n5XO8sgAKh3QsZkjkYBZddm2PcgHLg/OSkNxbCophRFSrBzYnXxQTj+YAe6QtOwdNj/1WvRi2Kr/2evqQRYQ/5SLU7aFW6+x7Uq+8QrzLc/Uf3wq+HdP8wQg7Ix+eD7n25ZOHGttF1lErP47t8MkNru9N7vff/Hb/70Zdf9w+6v/G3QIj+WZUbS9srs7omW3Vj0B3JiiLV1eIuqojA8yHHHptm3PXPuQ3fdd8Njzmp7qXW4mtnTvfU3gLny2t73lG23vmX/Jf+cLJw09YunBJROzqGCAqosy8j7YuX41x9ITprYMNPmUnyBOOPNCjvFkIfEYUUGo10Ns0VUL0EloHBFBKIMyGxUJUJrXFloy0dfOAUxKirO18Wf891uVwmLqoxJsUGam5urvFteXMqTdDgYQMJZlmnj1TMa9AeDQTI9nbQx+CCKC/Ojq65+7mmnnfG9H353x65NSMlgeOj9N/1t+BcHmwMASC6FDMJFPfsx2//Twde+9ulXve4FBIGACQQJ1XtgZOAQfBzBegkaxHvPtOqDFGNDUIm7K4bAuF1VVb0ScUCpqgqFXdCs1bLWjopCRGKDNCrIsDUAqArGmGjlqE1vuaoqTKxq7Vofxy3xB6J7Wgghy7J6MzOp90QUKidB0NSlagCQqgIT20K1Iq2KQggaQmITQQVDAvV0hxSsYODavSQG+DHCQqHulgtgxNqgRKRmo8LRWFfFADY2bcQI4ARFQkaGEKIkPEdztzVFwzhHi44RGgSJlGoWNBAaE5uT9TE3rq1BV7Ggq00FBEAY99kgBlXV0JyPFE80E/1wAjRumjEMB6RRnpuRiEmPHD347fv9xFQGZUAKBAKWDeO0jN7UvfazxbMPua1tDQX60rVSOtqegNv3Lf/0O4/c9WiFrWnRcuHI8B2/elpnIp8/MkhaUY6UJB7lquCFEZk5tYyIzvsQgoQQA4CIFK6Kj0lBEYERhRGJQTUOIQVECJkQ0Sb5RADzh68/84V/s2h2TnkHJet0KH1qKl96ZnRAxplikpbmzEn71t37NJYW0gqVlEx2XV7NwSaTyl0PPtZletwLrlpeGC2vLLXz7NixY61Wi5kf3ftYu9ONIo7jFzWmW/HBWGvr+QQCM6MCg0aBPESW8UOhegGI8whRbwWC1hQdIrRkOY5Latacar0REkKQEFGB4GuZM15dBqqAQBFXAQikUbw06lAxIhhiwIwhycH7ID4QUZJYVSUJsX5FXbPMGssBAFhT8a4aohjiqISjuoqbY2MiOiH+8Hg1UqMYVX8nMoZjQRxTRabayxrUUy0xq6KrGpMxuSTQyGZDbnpFIQQhwyC/hNiI7e7IoSDVUDe/Ke5ugxT3cqyrxlAV9EIKtMaXJX4MQYBVq1YQERBAJtURG8hKGlhUf3yiu3Hv9T9tfe3z0wWPbv6+efIzhcKxPcceeviB1oQJAyDEYbVy3jmX/sqf/t1H7uPH3fkHnfX73/mD2cyMmCa8W/SaQLstVWEq9ElqunjPL8rNmy589q9dA5//Ahz+bwBMTFJCvyNThm2wlUIJDj3nnCMLZDiV5iEZhsmddrT+HJyb48nN5Z03632/6IrqxLqRq0SGytoCKz7YBFPu+Cx3ULWqQRmWv/Sr/3fTgTufcsP7aLab+pmlpUPyW3/x5A+8Y/loMez38jwflVViMgCIsuFSlVzZNM2K7LFffOVd+dxV7rRLR9tuAYDpIxeKkyw1IDR9828MJ/cceNpfwaPvbZfnMBJ6bxLjQ2DQkKd7Dh368oPhNRftzoCUwJVVC60nqLSK91wxROYkclQrpjjZkeiCZ9iwMSZBQKoLjzHbEyEqH9vEVMF77wUhMTbGGGvtkSNH2u12PJSXl5cdSp7nZNhXjog6nY6KWGOkDEjcbqcLo4VuN//Epz5++OCRbTs2u1ABdunPR9qiaqYCBdqIpZS4iMG7+Zcf3PrBU3btPK0sDCARj4pCmDnOBa1JRIOTIIAWaKxXEwMbMSqA+OC9T9N01B+oapZlZVVZa+PtMMYQm36vAMJW3tm8bXvlyiTPEkhqNJb3kSIZfWNieydanplIK1JgNqhKyM57BFECVRhv5uiS5FUiSCSqOBGRD6HyzhhjiINItO1jiGKZDecAqQKJm5UVascxBqm5S6sTXKWasBjnpQDAiLXNDsWBv+emcSpRS0Siu1E9Po6RL4AQkQIQUJxuRsAar9GRH08y4qvGgQdR0BBWpwONwXl9BI4dIBSi7ABGYpIZG0dGopGOVYlWm3K6hulRP2IAVYEgDBoocaS7NvFnP//YvuW52VnINQxJAMSqquIr2p+dxJV/6b+2YG1pWo7KJO9XduPvffCh3shMTZkJWzo1vZIvnS1f/pLzDx8tWAg1hzAq1I9rbohqoyFEMN34xK9nw7H/H3wEA3L0W6aoNCRai6mBZUbk4FWFlpZHz3ziFU/b9dGfDLHdmpIw8GysZI4kCUYV2QaCyuzf5bbsCb4zGoHghAWPFVGwg8yvHKtSKTsTZnlhFECn5maLoti8c+doOEyN7XSwrKrYqFiNQ6pMFghVFNZwxuIDBgQGRkbf+Ntjk1OGUJfOOkb6NOriOjZMECFEMtxA7QAajnuMr/EVhzyqKg28mKl21RQE8KtktjgcCYarokrZIJLTGjaIiCax0YctZsY1aolIa1m1um07DrTa8HPG8xpkbtaYEtDavjSsQTXGBRBXfuwMGwUXBScBIIgPARApMdS0oE1tvlDHVI7e31xfEagCoGHjRep/bXrI9Sg4VsWqqBJVdQHASYgokNhmW3ttkdEXq3xpjg9ERMGoi1unrYABFEQz5p4PjAzlYGJm/YmfP3LgD/5kfen65bH09tvgSc8E5B8/fPfC8sKGSahkqFnuhqOJ9tzn71qeS5I37b79vqObbt/nWllHVBUTY6B00qKW+mGeVZinB44sPu7KS3c/5QXHTrpk+XOE+o2RL6bTqYDq/AiddE0OhKMwQmTVYMwEZFwllCcHjxwYHP3W25ORp9t+OFkc9V2qwjIpsySAHGDkjAfqlkpSrKQoiSRffsU/jTqzr/7nl4IdVvPY5yN7nvOanyrl193+hMvP9SGMXGUSy4LeS9JqIyk5rkaSpnrjjQ9+7/M/OvnsasvMugcvujNf2pkWm3wCKAHQiuWN1/+v3gv2HPqV/33yF957cPOdbsMeBpsePHXuwSuDnyysuWnP4tbOsaeePMejkRgm9YjWsCCyiCBjBC+LiHgPECXUgK1VrIUqQ6MPofXzVAAwTSKF4upZ6aA/mJiYAADv/ZGFxagUE0Loj4rhcDgxO9VutR577LGyctu3bhsMhxOTk66q9h84sG7dur37Hp6b6ZZlefvttxMbZmssrUz3wilSpRWkQELdfnvF9mQqcM8WlxfT3+T162YfeaTkrBoOwKZMzOIFsWZHUGqIjHhXR7gGBYNE3nsANMbgGp3huM8JpCgcpQaY2+32/PziM65+5sf/86OHDx8+7bTTXOVjfGIkm9joQDLek6sxnigSABBJEKLFWnRVE5E8z1W1qqpYfIuISWxwVcRnihMfREiAGA1jFNZQhSC+oQ8xczAEohHmEY9IUXXBj2nEdUxqcMyIAGNoVWgqbKyHkRDPnbHxi0YBDUQiSxxAUdSrSBCLFDtZY6NlaNjl8UAfH1vNaUWWwOuq9wOgIqIZ+5wzINXdPwaIHW9Y8xIExDqF59pWY9Vu6H84E8cvSMEgudEy5BPl0vEPfOnYxNwu9mUAShMWoMRDQoM3ZNd+YfS8o26LMvTcwLRBh/2OnWvPznZZqpVlKHJpq+yf/+3f2VIl7JYDJ6kuV5gjjJwHRaIkSZQwErAVKUKyg4hgJEnXVxvJNrH7IiLSOApAXUbHa/eiqqDeh4US//hXdz3/7/bg1GSy1PIpBT8IWa6jYZonxiRs0B7aXVz4AzB7507+aaoTAY4iVCi26OFLH//cwX1bRTOTJr4qQxBOkqpySZqBAipEFsT4CqGR448JnBJWwUf9lPHYE2JnJQqnrsEekjUYhcmah9KEkzokx1nvGGwSLTmtYefcWP6pTqpiWCLEZkMhxsAgwki1v+0qhDCgEpFTwbXi5yLia6HjVUhmAzzG1XUSNRphfKkRyRF/jIERIIqrYMMvWAU0MYEPGoKMjUwaKBZH5UkEEI0yPbHnFxl0Y1E5wQZYsQY/2GzV+iQxSNFyOKwBPdRmR402QIgenYgaBBqARfwsSkiEEXRW78dG1QeanJuaVhkiGkCvUoTKSOpIOgksH+/d84dvnthza5jelggXX/44X3SpPO0Zj/zkDoN9Ky1njR+Vu3acXm47c9nOPP3IxzfLjX99426TBW8KCRUQoiYd0sArxcheep7M5cENZs8+/TI31KSbbnz578PdP+PEmRN9MTTJ1iMtUNkmapXsgaxJfAKapu0WpWa48p3vt37h1AO2JkOa+ZEkoGqsFzKGRzTMJB/qyKC2PIXE3Hbxi++55KXP/fgbN7h9K4sLndPP2vDmf+6efc7R7/zoPf/6L4dPvOR5z3qWBOecq4IgIriI4QBOgoTeB9/7LyE1T37cup8ceu+JzbdNLF4MAiSVOkQwo2ox9MrNn3rHgTf9+oOvfGO6sD12Nvrr71w549sn3/QnZmmnePOt+47vnGudMWGXGTNfeeUUOGhkmqhgYGAisiZlZtUQQAVqXno8umOwpYYRKiIGAFJrUfTI0aOdTmdqdibzIU5c0jTtybKWwU5bYgQJO3fu6Ha788eOG2O2bt26sLi4tLiYb9nWL0abN61/ZO8+7/30zpO+/70f3/zT2yYmuv3hUrczU0wSKOWz7BUml6ao0FaaDCeqZIKLQdhyzpleQU1BhlO2EOUggFUxKvKwNUUxtFyvyHj2WWsB1Tln0cRoEcm4NSsretMaElUmGvVH6+fa11337Z/fecfs9EwIIVSOE8tIqbHqAzERN8eEXyX50RqzszHNI+4WanSAI1QNg0cCUInE5SjgkBqWWt2CUqqvM2iNlI5pLDsJoEroGVCAAC2gBvH0S/EJGicGrVuFzWETgcrQJA1Sh/Y4DTLE2FBaiQhEAjUiUxFlBrVuJdSDYwygVVXFlRRl32u1KyUAjoSm+qRDkohNaXK41VjbQFWlcc4YnzuxAaANFmb1YAUY690H0LGLakAVzLZ0ky/f8Ojty90tuzmUWBrMVZFQLL46/WwHhx8Y/HppIHPqAQMQmMlhUAjDURFM0hm2BsPF1hNOCS941mWHl0vKpBvM8mSfigBEjPGEQ4iWEhHhIiqqHKHdABH+E59CrP5V6nE7ISqhOvXkETE2ZpgBBMnwieXlxz3p4qd8av/18+Vst63loDA5lR7TxBkMEKwP5rEd4ZmHDetcZ97o0REOrViTpMtm/2R21m+98ZV2BgVEVVCJvVgyIfY3mUxiOdRqIeMsIfpsNFqn2Ox8iL1cAEABsqyqkRYWdVIJ6k7AOJyHEFZ1KgzyGvfPuOoUwTnnKm+tVZFgMAI9SVGixOsaphmMffeaKhlAUGs6gLHGe6+MRiOwPHaMIO7r8U4crzQcgzTrEw3H1zzu3GKjVqHUYBF0Nc+Lwj51j3dNrR+xXQqKjas3JhaUKSiKevGEjI3JoAIgRpXfRn8OARA1Nqwi0NLU23OcSdSLHOtkwnvvnCNrrLXU3OS4kbHBdgHhGOfMCmN5EMFf6iFBk8t68AzGOjtYl1fXfaPziztaUxPlaLnMiZYeks/+68GVhYP77m0lPNTMkyz35l//xj965NQXZEX1ivyrx+fTr9yTmSlwgZhaCFZg4JTKXnfXrqUXPT+/9+HB3I7Hn3LmydWgSBKTGDBz68KbX1d96vPJfT8flSvQXt/1kNAwtDBUiQZvEZL2umymAoBl6BDxqGU7YRDEh8yCI/bK1nmocm9CUnWBHHMPuT+x87oX/f25t3z60nu+2ls+Yc948snX/kvnzLMWji4+52lPmtq07Utf+PLCUv9Vr3iZomDKCkEDaAAvcPLG7vvf+/4HHrlr9onph5/zGdgtsm6lT/fc8ezfaN/+vO7dV6hqIJ8hg0XqzfgNe8vZfaCIilx0gXHvE95z+jf/vkQ8XMj19x7edvkpoL0ARKKFusSwMUajRL4EEFSNSXCINjyIqIiMyCZaODRG1HFcoqBV6QAgz3OyxrugzSQVAObm5vqDHhuqRqEsS9tqj0aF+JAae+LEiTzPd520uyzLqZnpsj/0Lpy0e6dhfeiBXzz84EOt9nSrlfsRzHElW2hkzULw87JMQwMdEVHXB0J79eOe7Z36yuZ5a1SMrFVGYjarGylOf9GM4aljHXyDodZyIirLMqoaFa4CABWlJMKLNU2SKFN1+umne+8jmM2qrUMp1uDnIB6agVAkYwCAMQZEIdTEgvi30jR1zhVFMb4SDR6VPKgK1CNSAIMUQJ04gChfCYQYwUcB6ulpgzglrGEUUfwGY8VSl79NgxcaRkQU5ozExoAgUBdf46cbGppTCCEC32MsFAQgtMSxoFeN1qjRXwXrUVecmTWN6OZcU4CwWv7G9D/UVW99eTU6/5fIEjForXqVhxoRujZgK6FpeFaIOO4rqmrwnHVxePjIX3/o5+tOevLA9zdMpIUrRECRJrH/2uTazxYvPCEbUg2qglpCPzekOSwT2iRNB1BBYLu4/51/8/gF5uDQoi5Zr4WgUzFkjCFALwKVGGZD7L3EI5uIBCHy/BjQGON8pdIkNLEHzzFJERRAQ6AI4AkxQDDExtsqbb3mOVu+9S/zg2nTCQpBLBhrwtAzAlmi9NCpABB2HgPpBDqCbtJK4vyAk90/euzHl+142dY8aeVsjbFokME7YQQ0WM8X/1+VicYCBIKAiYG2CV1cE501SC0Q0fR+IIioYhPLm94LNrkmxlURLSYRUFVRoagcSp1vqSqt/XWFMT8vXh8zo9QhCiAWjRhrhLg4EzYUGsdsa1jBl6WSalOLr6ky64/bLDMOa9YnjuWiQHVNFhtAaQz2ri+KEAXWNHsQEUUDCmtd5wpoTAFIQcms1soiSggQECn2F8drfvWJhFo5BBselIAAoCESkSr4SPYd79xauiFOvhs3p/HRV0fl1X4YNsq29YeKPSoiSg1bTyPr3Ylq4Utf2ZikBXlrq0Bg59aVN9103979i3lmyHoMadE/++JLvn3PQdyEL5t87NS9X37Pz6eyiU7PrRBWqm1QT2koqjQx1XOuhvmFwV23DC56+lVZN4X5wRJNHv/Gl2TzwoYrruie9bzet75RXvcZu+9nRgriNgOi7Qvb0pWpjCaSeQDIvFtKggfnRDOb+co7LSFtSXAhqCSZESOZKf1KntKXX/uB9nD+6i/8cX/5aOcZLz/jg/+sU+sWTowm0q7u4iuNmYHnffP73zlx+Ohbfuc3lbEYjXJKEAEMHl889onPfn7i4vUPv/XnmitkAQCCW0nM/MolH+eVsnvfRSbtAtLC6d8EsVS0Je9DZVFtaPUA2QEd2XZTZ9+TU/I37pfzNi1esSupMEUAqMoyjvCsoahVixTFDEpfxu9D9LdrhFPqGkZi6oakCiGE4MLk5GSapqOyEFBjkhCCaBAN09PTKysrzDwzM1NVFRKxsZ1OhxQymxTDEYgG59sTE2effbZhXVk+evToo+1OIiIr/eGGLcU7fvc8eAwW9ogMEDoiW6tgPfbRetqyf/NnP3TdrT+9e91cazRcIeMgiDjvnIs1BzMzYCfNx2CEuOxGo5Evq1jdE1FVVVFSYFgWcZmmrTwGMQJUHxKLg5Xe8vLy+vVzitBqtYDJq5RlWZRlc/LXOySu9ZoeKhLjcbTUjd+MyUGcC6pI8E6DgCiKBm2wiKpVVWkzUi29q6qqck4aBYyY/2JiyDAjsQCJQhAnoRD/P6Q5mkMHo9xVLE3qn2juyVgfg4hsI5gVT6oQpQZEQiMILlAXBwxIChAkhFCWJQQxSPGb8Yv488bEN6a1PYCxNMf4CyJiMlzbdZElTthE/DA2Y7Z4iEe3wbFhsDbKWV7Fa90VQEugdjYPn/nxgyf4ZCuA6ofDE1IBkUGmVyWfyLH4QPXahLAFCAxkWtTVZZYq5VLMcFS2Mjvai2998bZzLt69PN+zLZMkuaJPaMJAHu+Sl+Cci6x5aSTBAcBJUFVjTJ6k1lpoegnjFzbNUk4NWaOKzoXaxhGYkLMET5wYXXXVRU/Y3k+qiUHAtlnx6HQI5MSSQxTYfxII9dYfXOy3yIWWx0CesnxLe8IF3rd0b1IZViEFr+JUwDIatMRaeYie8GteUZcBACwxM6MKgaIKoI65OkQUaS/jcDV+OtoICERtFoNkkKLgqIiQNWQ4dk0YkAGzLMvzvB6VhlBbXcWG7SpyUCVyFRA44tvZxD8qjIjIComxsUjVqNtljQMpvKtbKa7WOojvWde1a9KOGnIFq43fuITGS84Fv2a7rBLeRCRA80WcOUTMlGgADTFPDRKcj2UordGfGfd+pVZjqBPH8ebFRrZwdZRLq78bSx1BoIRNmhgkFHUSoiMONGlofShJgCDqQ9z7XiXunTpyR62beHlMQMieK3STrdbw1p/lt91RuEFq0QOnPaOCfqZzaxoEocy0pN7Q8QuufmH38S/KRvNn7P0HC/4zd08PZCRqGBLGFoAMToBfOfDUp9Kje8OHP0yL5qxLz7rArZTDqYnillvo+o/DwqGV972HVlY2vvw1s+/7hPnTD4Snv2Zl6+n9tAulymCQyfyOrbeccc4jqnDWE/pPeGHvjB1D59wIjRC2jVg/MuAnMrBh2ejyaDQ/EdIbr/jTo1vPe95HXltye8Pfvv/Mz386THZDT9q5qaScySa2nnTqGZed/pIXvejEwvxfve0dS/PL3awLAMNRb2Y6/dp/X//YkYWllz46sTHJdiPOCQjwdFXNHEEOS1d8PZjSld4PytHsfeKdkoPKgnXJYB0oStIX0GLjIxmniDjA9PoHjs73gA16wAgKVlXxQXz0qfcxLTOJBUJVRAXGuLMizFQ0+PHSNaCaZZkxBhmDCCeWlKqqUvAKVA5HzrkxJLjVaSuTSRMEUtWjh49MTE5Za9GYyhXKsm793P33HOwt9Xbv2nVicdiZSt/0ZxP/9a937z/CyQcDHE5c7qmFHWOnzYydoZdXZ9LT3b/9x//+u7/7SJbnrjJZQv3BMDWJzWz0swMABFHU2F+NF7OystJKs263O9ao6vf7RVHYLK2PziCqmiYJAARXBZd1Op2TTjopbyfLyyOvAopZlpVlabMEG+6s85WBesNAozVDRNqEnHg93ntrbZ7nIjIqyySpAdUaVYSifAZR7RxMRApC6IOYBsHBEC3E0CgFUEVQRmATI5+KBGxaTFEHQyHOuqAxeR5fpBIC1coeQTVm3MwcyYLIBI2mTyzbEBFEjbXOqUogpBjsYlESvI8HZ/Ae1k7cE1MfcCAxUJFha9MgPiJD4rlWO340Ej+NukaDGiPE6PIrqGvq+3ikomFEJcCaAhwCqE6s53t//MCfXHv0lIsuHi6tMHLpuyZLzsA7n2RufEP20Vv9+SaAoFTG22FVGXZDl6RZKIRhmHZaK8dGV5y2+Ae/ec3+g24q7/rRQAx2bTbyg5CmqYKIAmCSpBV7HwIGHwvf8RmttZW6gionVmL1jRQgejkHAAjBMSWKikymwdRoAMHEYJFmU7/+/A2/++972lsmfH8yGKcgQEkQdeJQiY9uLTc+Gu4+l2Y7oSIIASVZCYOQDg4duqfcdrmfqpA4BhVXVYkhECAEH4IGqQ0cAQAgTZJ6DGwMEwE1a1jVSQjiasa4ABEZ4mhki42DHqwRRFsbqyhaDxHFDnRcLRwhfqLBe2PMWBIyZq6yBrc8xhOh18CAoBgkqAQSDYGDpClZIsWazBbUi2KaZlKW8UrGwD1Y01zBmgIXy0+IhBzgRoR3TeQjotpByNfBm2rFVqp9QTTEdlSEKUAIHlQYLbFVioOoqgGOjYMoAESFsSQqcQrAWKBDVSWgMVE4iZlrrVZVZaKmZUWGNM6aI7W3bmjVIDJoJtaWTYhYMKm3fOynxzx+Fa5lONqcU7A85X/4g1vw7z5wJiyfaOtgCCHosAudqngs6TyKrbQEW6VV0Jf8ym9OnfaE/tG5p+YHzv/Fp7/22MRx0x6FIh/ZlYpdOJFl2amnVi9+zjpIwwf+DdLJ7MVPeW5rdiNx6Q4+6D/69x1YQcTqjk+Vd98JV78yfeZL1j/vFXrNS0ZLK+HIQXzsIeofuSi7tg0LBucVCjuVY4tOfor3c63HbvJgYMlOkk9S8ivowXYRnUW5+5wrf3T17z31wa9ve/1vbbzyinWnbXWD5bxIRu0QQqCUB+UwZd65YduE6SLD17/6jf/7zve+5c2/vX33lmHZzxP44bduUFjqnTVPjGVSUR/tUuKz4CcqSJbJU3/2ft53TlASIWIURJAEApbdowBxuqcsmlSC1LYS7lwqbz3cu2bSOjGQAvrVfnKdfqkQGGVUXX181LiySo0QVFUCEIOIwBREimFhrTXWMNPKaNAy1gfNOp3RaJRnLRJkY+fn561Nut3usWMnnHPrNm6IJPjjJ45Mz8yYDhZO2lO7120+dTi4a8OGLc9/0bZvf+W+678s207dWr0tnHj13unz7WQr966VPDCX/Uv62dtPnHJKOhqd+JPf+933/fN/igk+UA1A9Q5BiBIxRqAiQa/eV7XczNzcnKpWwXsJvhIQqaoqTdNWllZVRQqi3rIdDEfGJIGkN5CLHnfpdd/8xrFjS2mSGWBAKoclmsg4AEF0qtYkMd7EswMNW0oGo36SJKBalmX8JrDxzrmqQgRrTdMGj8VdnLAqxOS3QRFHt20irKqKUpskiVZqyUqosDncMRJ8AUC1nkoSOgkGSUUZQEXUsMRaAqO/cBzQIgG6eHwYjkUyNFocoIqiARtBHxFPhN4RKDJjrDBUI+rNgUKEUNVADyAypBBBsxRUg5KCJaagqlUcxXmUtVM09QFC9AOp8bRa60kjIkljje69d8ElSeK9B66cmMQjGnYkOZgQTCundGn+975yaMPJZ/YXFnySIkgLlv88/4+Lk9vW03ELbo4W/mnizz8yfMn1oyc6SqAqTW6hX5kWQaUYRJeX3/XXFy2Ncktu5MpoBulGI0S2BF6CaWoUIrJp4spKVFgR13QUJap4goL3BpijFboKEQJYFC2qAmwwzACoCCoaG5gkXFg+sVw990mXvf/aLz6wPJUlntFqAPGaMDJam9rs0K5q6wNOnggaFCklG6xJR5Xi9CPFI4eHC/molbc9uMSYYIhFKRAgiVF2QaQBtQFAqQFMlGgQUeCAEjxwNNVQY2xogPpVVcVxb0wxoZ7IIBIxkfM+6j84EDBkfN2mBu99Vakx1lofQohwfWvq5E8iGQNrn9DGMkhKJ6XLWtaToKiCr7xDJkumkKqoSjAG44jUcBk8AxgE9F4NGTQxcIKCd84YY42RWipOIM5EARQJoB4AxRQBx4TaoEBNLhvzKkIEEFAKHmHVSsR7LyjMbJAQNHhRFkUCinMZolBzbuOEj5mZOGIbiSh2kta205yEOM5oGASKBKIB2DCwJQYBjKW/YRBlJBAFwGh/GUWkRaQCIibmRq48xFqfGClI5auKTV5JQF/lScpqPLo86Txyz88uOnpsKXVZmVSQ9MKKc8GE1t2aVOy5skQABn96112PnPr8uVy6N75v29alV99x2tH5IZN2p+FxW/3u06tt28uN61sTqf7HRxZHva1XPbV96uMvYLBF5fv/+eH84G2DDS2BIjcdOf4w/Ofbl677tLnoafnFT0zPvsCcfBZefPHW/R9oP2yzyiSjChXzjiriINl5xla7+Kp3Em/CBDKPpbGJcUbzRLjoJNfPbzupBU/51avWz5r1KQyXl0SpB46GEaiEqjpyDgAmJzpnnnQSPPNJ13//lnf847+84Q2/etm5Zy4dPnDnbXf8/t++4Z3Zu4ZuCAJhEWUhYBugK6rD1K7rbEg6/Q3WJvvmTx9uP4xlC9KBCmpSYDCmmAbFqSNnkcK8KSclWDE3Pnz4kvWduTlTDhNgw7YmSkSiTtQjZDRYx98IKDagAAqhcqrqUQEEghgicpULbc077VhclmXZyvJQlkmSOh+yJBXnh0XpXJXmmSFz/PjxIAIMIThEQ8CTk5MW26PFwlG5vDif2NbSQnji1d3F5dEtP6At2+bEDXvfkree+4R1U/6f/3LP/D1mc8l7jx7auuWkR/cdW5yfL93RH//0Jxdd+oSyWCHIRZxJVDy6omJGIJWgCORcMdZeNsYMh8Pl3nJirTZyP8tLK0TUbrWCd4NBwYkF0dj/7HY6Z511zuTk5GAwiLARRFUvEAICMjF6CbFB39jjkFK/7CMiMVdVBQ1HtqqqetaF9fCG1KiqxJynmcUaYyKGBQCMMeJ8nMhqiNJCgdmQYY0e9VoTBElXIyUomrGtAiAze13FodTtgbjbCceH8LiLDg0zYbXHOGYijiEbTa8sVhJrf3c1rYvszea3xo2B0JgIAcQ6chW2zRRbhaslFMYrEWVAVDBIQsTK4oWAsEpZybeBgFpl3nf9mbmkHZae91e3PDzYlk1DUJEQci7f3P3kRcntbeqvwxMO7CwunhD8zeyTB9ymO6tTrc2rwXKAdnsommV7Hjr+z2/ZuOv0U46fKIjAsF29dUFCKAGgcs6uwTkzM4Ra2RUbsA8AEGAAcD4IakKMzWAbUQQkM7YWK1GNbdVo+czQY2r7UrtbZl/87G1/9Zl+trML/UKyFFBdCIjsvNKBk/xTvxh8AogWjDK20UDGncr3y+Fg1B8Nk85EimqQok6ZKmjwqhDivR2vB2nMuIipVo6DGqI0jgprn924HYqISZJ4bapbZkQce2E50bpFDJCkKWIt1RmLwDi5DFwPXJgMJzZKeI7XDCKOBbeldrxADcJExpiiKIwxURJ19bOAmgaWEX+rzo9FHIiV+BFBDAZQCopBPDZ2KfXEBFBQQIE5UgkamPIY4Rj7z6vEBBGJGnI2SZgwhKBBIrFIINT65swEGqEVIoG0xpdRIyE+7ktz9Ns2sTZSVUVBpAiZrOFvukZmMs7NI/8DGnhHCIGtiU7kEITGFuAAqkCGM5NXZciyJAgE0ODd+un2TXc8dMsXvnrVROaOc1+kFxYZNanMfsu3t1oLx8kYtb7XyqrjxaPGzpnvvO/x7f+460SyxOEVT3XnXphMzIwOL7t9D7duuLE4dDCgw97iromNx84567kT9gxlGHzjevu9z3GqhTKpaJCEBTXPFx4pv3lP/7p/H2w4OWw/1Ww9/dxTr0M7b/Gwx7Swc6wulaU0LJfp5m1bVx7d8ezpZcUp1rIATVIllfCFh9Q7fd6+72zfdPLk3OZ+CDZPg189+Go38hAAYFiN5uZmzr/owpl1W79x3Xf+6R/fV73udZvWtak7/a2Pf7s4uaBZI+hxToFFOwoIsuxKO8p4x6gUj6ONv3janrN+aGBWlEM60GCUApUtO5gzBy4dcJU4LJU4tw+t9G453n/2VOotcMy2kcQ7IUySBFVQwVdRQAZixTjWQDPRFgUEAIjZqGqWpbWjy3DYarVSY6uqStIMAHq9XrfVTtO0EGl12kmenTh8bHFpaW5uDpgiCqndbhtOrQmD4YJTnJ2d7A+Pn1jS2dl19/9iZXKmOxwcf+SB4QteuW5u6/JfPuseNhPdrpGJ0RlnnG6trSq3dev5RVXe/rM7Lrj4cqJJxX6aTAx6vtUFo6EqhSjqCdRnBAN67wcrvVar1UMU57HxDBEEFllYXEyNEQATGMAjYlWUDz30kDjf7/eHw6E1tWqHiDjnADSUFWYA0XhOa1KT954TG4e+8fQJ3guiNabWPW/oXBHkEluX6r02r2gBNGaRRs0EbOyVNE7iEGt16GakRaIeFREj1SSeFJHGE3dsvVcb4Uls6uxxX25t8xDWNgDjDzRAKV0DPFn7ntCYJNaEB4RVd8U1VJBxo56invaaV5D62jix41hORKDBh2CTJIRgkKLbEyKGFieFZkQj1x+h3bGhM+wvPP/t379zZef0lBmhWEkcDiaS5ScnP05wtBUPA0AFpoN9y+5A2PDi1hfuHv0Jg0mER1hJku3ds/KWZ7Z/62WX73t0QCYlggAaQiNiZZCYXeXF+3r2CaoSDLGKkDWAFNVfxx8KVdFwZLCKiJeADTifTaqqIL42V4QosACGTQiDhJNDC/DsK3b9/ee+U5ZbWvaEF0aRuanpdivZv7CM8+u1u3zkTe9Z7B5rL01v3Hv6KXtOH7FPDZ/oLQ2r3qiYJmNESVQFESCAgCpEzrehVRT0uCHGEO3vKDo8xqevqorAY2XNNS80zMwgCMQIol41RGURRY3A6uaZMtcQoTUJijZdGVyzAiPIK3aDOPIGOc5YGuR2EIMExJTYuBmpEUQbh9u1KzmuPe89AXiGKAzIXi1AEHGoFKlNIuNm9erlxV9fA+FSVWhotYARaMZRgsN7D1i7sVHDabZE6kMkWdSNbkRAJQWRCLeuYRPAhNQAHSNVoXmJCDiVpCYXEZEX0THGmw1IzVdSQjbRFBgVBeOkADHi26ObGZGJQsQ2s148MXvnOu02E7z3/7z/7r33Hth+ysZ847Dcb8JyarqpjG6q7FGAZ567vG6HTk12ZmbhpzNv7RUPP2XiH562ceGG1sa//JM0nzAPHvYf+Vi4754uegmwOekUOQbOjm7esmNux3OUJ4d77ik//ffrbFGZTuacIjqG4GczxiIUytQBpUP3jg7dLQzpqwE7jJPBV8aP5oUonaiomIfjlf/h3y0+cm1vcgqEDBImTMPBTac85f4n/s7zPv57Z7zxGa0zto2KgTrVoiVZiCcY1C6otTeRsehcOTk5eeppneFw2Erhvz72mUsfd/4pF5zy1Y9/LL2h5V5S0QnrZx3MKQBQ37SqVPd6d3erNZUsl8N1/Y0bv/2Wo1f9KyqawQyQjOb2uNbSzm/+A/kEoMxCWlmyTpY0++6BxYvWtTZOp4UoqFhiLxgAYsUlIYQQYo0XhSKYa1RvYlNjEg0eQAjRqOpwOHSZI8NTM9OurIjIRxVGY9bPzhVFMRwM0jQ1iVHVvNUajkbHTxybmp3ZsmVLVbqq8gapCn5qYrIA2jiZn3n2dheKwUKy0isXl1ZW5t3jnzTznJfO/ft7B897/uuWV048eP+Dl132uKNHjs8vHBdvp2Y7Bw/3HnzwkZnp5NCR+cROjkajJFdXGWsya6sqFOMYVgyGEe/jfFWU2G63q1GR2sQHKVwVt1aWtUJVpHkWgkR3s6nJid7KYHKqG8NhjKZjaKKqRnRVDatRDc4BU1SzqlGjMQ2vC9+6RACIKgx1HDIxMINyc1hoo/JvCI01gFrvU61z2GiAG+mGNSgznnSEUW5TfZAQrLWCq6wk/eVxnTZiEeNQp01PVWNd3XTGxuXF+BdhzWtcDxGtMqGVUBE0wmitIV0N2DG5i9Xe+HrizJsagqMSxpM0/isDsjGhgcPkeS4CzjlWV3WMH5RpMr2zKzfedu9vfvSheTh//dSKlIvpADnNUJNL4AGFsAGPW/THwsxRWb+Bj3VxmGN5nnkMTLZUrCRZmbnJQ4cGLz3ZveutV+/ZWxWpz8HKKl1qdWxjrRVAY0wA1Uj/UFCiSKfRGuIEKLVoQxqbn813COK8AQbiUIEVLCESoSqBMKBzGVMpCD743SfteNyudd853m+1Ui/BtrLFwWjoBrxrsX/lNwDAzR4nKnuzC4N1Nw83HT75h09AAOHgoCpL7713TpgRgBE9EBpNaw3QNa9xYFOoHx+t+YH4w04CNhmSYM0ti2sVMbZ/ailFtnV7WkJDjmByror7KESJ0+bG1jG+QbDXyVmzF9gYCEHWBmyJKH0RH0ySjrPACGICQjKrVs0xTCZJgohVVQETi6IKAHhCIERCq4iGVRUjWMl7qn2IUBWgRkJgcycUoOY3ENfZCVLtTOqh7l0zEjAz1gQBGnekRDXO4QEAar6yqnoREYm+gdHHSUREJariEKAlBoIyaq+pRm9yAa0lwKxB5Ii1VAUQjXJhUb6DGrYCIKqKIjCx96FwhTUJoIYgeZ4n1rzqd//iZ9/7Nm7efJ2mr0wADXucSYJbcWCenLztdeasXXb/o8l//ueeE+ll/rlPmv/M/9oy3V+Y5j/6xrpBWSwcr1YW2omZbXec5cGw6lUDs/PsDMlcdO6vzp10Uu/owRPv/78bVu4dZnMEeUjmVYGBvXVi1BBxSeBkJe0CYqfsOcm03U7wBKbiQFOqAsBAuCejR8zG1saNev0XyZReqzTQwa2XfO9Vv37xbZ994d/+avfKS3uHhszWKnjTKCKsWfBxwyUmQ5Z40l58/lmEIUlat95x77atu1/5uj/6zH+9ny/1gmpWbMhEC+m2zY5tO16579eOzqQ/qsJ0sTKg1B45c+tn3j485Sflxn2EtnXwovkLP+2m9k0e3xqIvIogUiV51npoYfjzw8vXTG0MrkREX/MLMFQe2IzlAURC3DURFoOIHqSxtmNQMTFpS9PUBR/9RlxRxlN0WBYi0s5yie5aIILQarVGo9HKsBcrzqIoUpOy4YAUfFpqzyucfto5ZWmWjuvsOg+Ub9yaX/PSLfff1mINd9x7u6tk/ca5u+5+YDAoM2vOP+/8A4f2J3l73769H7v22je+4TX791fOlSZREej1KzaCYCIMIm6qqqpiKTwajWKiEEKoKscmKghKipjmWVGViU3JGmR2wbe7ncmJaW7c1GOiLU3zJwJMiLlyLlYDFMfpXmx0Z2uE2lU1SECtjWCrqpLGbzUGzgj3WK0+GzhVjEY++KgE1BxPHJtOSlTbhUKkBikRoqhvQhcgeAncmOWNz9O1rMG1MbXuNIICIBpeq9UgIqQQgtQFyhpeUC29u4ZWqIACGv8uIiITNpyW+JFVVRqBDlVVH7w2/owIY5h0nTTE6t+a4D0zj6pSFa21pThdSXZvTffsPfzGj+z50j2uPXneNCwMVUpqtdoyArBkiXyOo0nseaUVmEgYJc7VEEh9ZvsBCHyy5OGSucG//vVlh1cSZJeYyVD1sUGgrjY5FQAUDUcOTB2cnLPGRCWNsUl7TCmMMT5InNATYTLG7jW9C1VtQowCKiGhESuZ1wFIGtJw+J4fYPuZPDVtqwrBQGo8FCvXvF+4AgU/fUIUSCgbdI9ufax98rodD52WJalAnOjHs76ZcQKBqmlEqdY+XAAABAYkIh/8OLCtjR/jmxCfII9J9qpVVUVlt8beJ6iqSWwE37I2gLsmr8I1vrbjP2SJwy8BoSEG75j3xS5LrIydD6paFAUzxxHPeO1FZ6HxpXoVpgijpvEHxma4QYikNQcdGMYXCTFTHN8fXEVRwZh3BKRNGxhr2L9RwqDinVdVYoMUex8aLTlFxKuoj9BnoDX5a7wtIQQvwdTnCo87RAEARU1qSEFDDbZiQmBjkCJ7OiYBMedQDaqrN3a8j5CQictilLBJktz5ShGyNMeAf/Snf3r9Z745t2Gm6LuvwOGTJmYuzPP+sC99pDPy5/7ZdpWj1YjILOzexuaCP5ivHrpk9oOvOXPpi3s7iS89wKZN7alJv3LiqIOJ0cB7j5dfTts2nlg4cNnllz7ZLZbDz3xm4hf/vTI52R0lRTWELBBgCC7xOvRICQcfNOHc99kzn335jdQ5s/+9PvvNXZhIC1UoBI8OoBfsj7OLLnjsoZxNklvQrDStL//Gv8+Mjv3R711itu6SR3utTNAnRRVMO6Cv4aLNzdGgEoKEChsbGDWWL7jgvLTdOvPcs278wa2mm736xX/wmTd/1L36iH+m8iRv2tTe8ej6yQ9fuOuSJ1x8SfvwPfMPyhZEIRmmxaS555n4oGFmQHAz+x578rsn9nxasBVU0LsyTTQUonDTwf6FO/1sYgrv4tFiFUFBRNGaFtTw1iRJIl2+LEutkdCCjQU1MREz9fv9yCYqy7IoirijjDHdbrdwVcwbR8MibvWZdXObN21p5W0VyJM0Epu8eodVYtgF6K8ENwgnjodt26bPPOPs9dvnOV367rcf2vvoPYNeNSqWdp1y2ote/PI/euv/2rBx3d4DD4QQHnrooW075j7zyS+87W8+kKdJO8/ciFTZ2lgm1hQCYwxZExV5JGiaZKNhIUGd89r0qSxxv9/3Eqy1sQMctauWl5d7g/5wMKpKF3dIXcABgkKEAsUUJMuyen6Dq3o0cT+UZemCRyZijOBGbOZGY1dIRAyg8d1UlQHZkAYJXkCRyVjimPZ6CXEmCmNFm3pwxRGxiY3JaFxqUdrbYPMzDVtmXJKOj5XxF/Wx21Qn8f3rYZ4qKcT/tPEnrtvLTLUujyoEQS8QahdM733kGkpTxY6vk5kZMIQQbzis4SXX53XlSu+8SlVVMRMiorydDkb92aS9fV3xT/95x9P+9u4vP9bdsmFbV466QOCZwfvAVJTWuaHv7OIDBaQV2I14bCMdmrN9QR5ptld32UGeISc8N63HP/iOxy2m67wss0UeBiIgrmdvIcRaguONGV9JvDOxQ9JwdlZfMd6M0Qa1AnMTTlLkDDmJLE3QQOABKgTvSx8qY8l4LCX933/+TFk+PvSSKFFAS6zb98i6A5APQBEUTJko6qjTCyiHTn1QDKWUgGC7nTHHeC/eVzVmTn8p9K4NqLH2jQqO4yVBY6OLX/YFWrv2xrOMqqqGxajyDgBFNISQJEkkvscNGLXix4+4LltF486K7zZehETY8G5XCe4xzhljbJpgs97ijqiDbkQMNOmviFRVFQHzJCoI3qAassQpMsYWaJOP1lNSRS9aunrljnU5xtE3foTxMh5fcPw6ngAQQRuyKniCv/wCgLIs47JXXZ06oYKTUPeKoXYFjpmE+hCtz+rOc4PxHl/n2meKtXQNAVNt/RuLIh/aWQ5QO1UYY5jt//7Lt3/hC19Zv3WzgYJVSkf3hqLlEwk4ksGjp6Q337eyMMTDJya+dgPslyccSJ62/rF///UL8wT0P+5bVxhTiOnk8MTH8cte2H39q9wFp49e/PzsnItX7rm7fclVr5o8fWbh6L2Dr3+euUx8x1SBuKeKipBLcKYkcTqsck7JkU7tPHbeUz81teG3/vuuuw6UheR92ODDhPMd76bXmbS37+Qn3PLd7JZvorGi+QAPfevZb13aeMafvmB7tWlTubI4tBhKiwh5gqlfgzaMT4Hrk9CmbWMMUc3dZ7aXXnKer5auef5VNpsQV73xZb+z5XMXd19h37P/kvP+ZMudl4/u++x9o8XD/+fv/u2eT32uKnpCy62gAUkJOahxwiLbrv9Dny8euOKDKEqJRVTvvfXDnO19C8OHjg/SLLM2JSLxQUSIbQjBEFfBj9fD+HiMvaCYnIUQgoIRUOdXDfuWlpZarZaIlGVpszSoIFOe5eVwlCSJZVN6Z0CnpqaSJCmKIuoXumqEJhMj5HnQDy54k1RBZP/e4rRzR+tmn5Nk989u6jGdQpk7cvTEj2++cTAYodwxOzubtumKJ1y9Y8eODRsmTxzuf/u73/jWN799xZOunJ2Z6vX7YELwwDDhXF1zR+VnYCrL0lqbZVkcFCVpWrjKe9/JW1lqi6rIsixS45MkIeTLL7/8xLHjibX12hWJFCbTWBE0PWExzJlNRESDKNXT/viKB3RUFoqbhBteLDQ4kbjHoqINMQIoA3kdM20wYihK70TErTH9DkAAQlA7eoICMker4AbvUy8/BpRGjm58pP6/cXesQjVGXa3+wJqDWFXHKJ61Z4rWVsy1plJML5qWIOmaLveYf0lEaZpCU4fF/rML3hBHfmr9HefikWqt7S33Nq6fOPLYw3/8wWPfnc93b9290fnDxXJu046jAZaCkjmFTDvUe8fUu/qaHwybJmgwjUtWiiGkx3RWAb7tn15kTjg7+uADX377ZeumNh9d7neCd0nquTTIEMWeWFGilEH8bCoiDNHnJgJgMISAxKoSDz6IY0UNcaURkwT1jTGzIKghdk7ju1L0fq/VEoxNi2qJ3TTy8sr89HkXnXbStmNHqwzafbSkLNXGfYKg2QhAqUrTQVYJubz0xg0mVwIIgm2lnW7HivchMJKoAoJhNki+dP5/PPRGfAdiZ2IstzROI2g8RqUoC1pzxyvvROJ0hCxQWTnvfQwbqOBDxC3Ws//o6gXUjH5VIdT3COrZtECEjqsiAgKKSJxlaCMMDgBRKROJrULs0EaI4uqFKSCgh6ZubgBWnpFF2QkxBguegAKwk7H0F652aBqtx//Be44+EYioIIiNsLQoxRKZowm5MQZQnHPBOaBa+FYbXFiTg9Z8UO99lCVhZkNkskw01E4VPhBR7EWR4br2jUy2sXmGD8wWpZEbo1rKZLzTMeK3iUE1HltOPWIcJvqJydbHrv3KZz//pY0bNvuqX1YwMKO24I8KuYKz06l9rO2XJiY2TMndd9Kx+fKxo3z0lDfywqPf/vg3/88rTnz1WPd7D07aNNEqHH5ssP+h7PyLkksuGWw6WQ4dctf/yF7z7Ddf/JSrh8eOV1+5sd17RNLJ4XDBYZpJIGsBKmcwLywBCMqoWsyzierUi3+Ute96+E7C6s9uX/euiV2XmH1YVYp6dNTuL185/N5N5ehI1trIAfr9gwee+Tt3X/mm33gcbJkYmhUdGNtSFgyVqJjEOBRuskzRtQ/UqyejrORETNrq9QbTU/a97377RY+/5tdf/7pvff1bj9x/yxWXn375U7Z8+UO33vDNYve53aUT1dve9e5LLp24+innlTPlDYuzxwb9YIWUbFBEXzHTyqaNP/6NQ0/84Pr7r84XzwmQSSgtUqlU+nD7Y4sXbO3mNgEXPKEwBgQybIKGOl0GLyG6ZeBqltWM5IANAhJSZpMQgjgfTX9d8IaIDBeuct7pSGPcir8mIt6LSCUCBEDRFoCGVWiRx7RDLlS7dm2QxP3khuLkU2DrDv+fH5x/2rPO+f43lw4fnb/owvMeO3i0qEb9paOsrTf/3u+eeeYFS0vL6qTTbj/zadfcft/HP/GFr1zzzLds33760mKPKAFajvjV0bC01nrvl5aW4qmR53mccsctYYxxzoEx1toYfgREnMdWe+8je9bPzYUQtXEEAMbJJo6xiyLB+0jyidFJLQFEa2FOQs1t9TEdJoPRSFV1HLyxSa7x/8mUI3LHOZewGeta171qjjk4qkJAQEVxPia86uuBGTNb4tUxMK2WNbBm6gZrKgxFEBWNHqa46q2kqpZ4HLZjmV7/gw9BtSbyImKtt4UBNAETBeXX/qHQqPbX/7uKfKnzAxkfnaiiATwyUezhAtNwNNi6dfo73/npb3+ssjOTp29Jy8FSD23L8NAHAXGQ5OAG4rql/PvG3+tS79cWPvDG7se2wf4edhFUAhLQDeFpPxldNjNB9x8YvurJsxecs/vQ/MoETaAxoIaSuG5VVAySoobgicgYDqDiPBoDTRoS8epRuVcVEKO1FGC0wgNNIq9bKapNBRUEUkNB6v6SEWQlCIJBBClJ8lA6k3aMwokh+VKSlg9SsOTkPZcZqIJQQ2JVMR4AUBADQSAAnGpPtNtZCCvikNM6DVJVDQVQNKRYG3/rGqt+LgooGhu5IqIhAEPkz6iqkxAtQOKCsdaKUBzx5HmuQaKsjyUSy6v2lADGGCaK3Q5rLa9y04OqxjpVAQLUnV1BUARgShvN+gjpF+fYxo5uzHJXBSBDCOIkTVNjTPAOmsI6rrQErSdxoCRqAlilACoMFEXffF0HE5noV2obQ8MxWCHW2XWSimPhmvh/JE5iXWoQogxNKb70DhkAwNS1bJ2VMrGTWis79kiijAkzq0EkMEIBQggBRbwhAEiMjcrSIQTEMXYalEibvgVRfayxRufIeGsIsUZ/RgScSWxRFK1W69ChE//ygX+dnJpjm45ELJp2ZQqW5eHwq+i2w4Tv9Nefab5zfW+0aFw+f8+D5297ztUrX/3zN5x24uTJ8q3f2zGdIVIRUhCwi2H03VuKnz+YDEZTuKQTGzetm86q5RU5tOJ/8rmc+obyrrWhHPXDZJCBqqIYZCgTyUaYX/qs+cnNs48ef0Gy+OTTdh7wp67j6VsW3B9e/9icyEpZXnrhM54pMj0aYZ5oeWwpIL7pL79zydsuWgdPnOpp6UvTTchUNCDvFVoGyJs+YVov8jp/BgBABQeBpKoqxISLyqVpXhbDl73gmk9/+Rvqqte+6bdS27r3B59dfuDxR/Yc3bj18MIR1xsNfvv3HveMZ7z6p7esfPrf/91e/KSJU58+Or4oRJ6CEggJA66/7SULZ377kWe+85yPfkTMpLZ0saxYMGG67djCsxc7J63fRF7SxI5Yi+DaNsHCG2vixrRsx2cm1oezRkQTMhkFQMbFleU8SYnIIHnnEyDKs1FVMHMnb4EoBECTIpoUHRFJZLcQktqyKDAAkTHkR8VwXTp79llnfOGz1//+W19/773v+uZXq2TiUDZ9+IZvZttP8ffc7fY8tOfQ0SOzG8+8cPdm5IPziz/58W33n3TSFTfd+LNbbvrh5MyObquc23b9V760/5UvvbY916180JAgMAIaY7z3VTFqd1pZmjvnvHPQTC5rmyBriLkGCobAoABQlOXM3JzGLh6zNpqxUcNWQV1VaVrXuKoaTaOUUIMQgUEKlaOmewCKiiQI1tYxKb4nmzjFUY3sQwSUKCMiNaw6hFqXUUKstOoel2iUZScBIgoSoh2KF98IM0bTq1pzQ0SwgXFFzzhUGNP841gMABiJiREgAUqQFWKsABGp1MOakro+rBFjL0RFvQYA0MhLBiWiClTFZ0pE7AlUgUIjpTsmLFEM5yAoDNHlSWPPIHhRQDRGHWiKLNwTf+quiY9++bY//tTSrpNOdb4cFqWxFryIFy6CU/RGEFBSec/U28+zD75s5QN3+VP+YOUvr05/fGnyswnsHdHZb+lVe6qzUlsuFLB1qfeHr7rsmFNDbcvDnrRacf4KgZtZZR1pEIKqQcIsFREIABLbFBIDbS1a0ohsI2JiEiTyzqOuqlWnyOBjEYSIaJAYKSZ2RBTKfkqpQalCdbxy559y8hPOvu1jv1jauL4LQhWW8PDFFD4GZUsQJCkHMxUAsEvYm5lDm07o0R3ZyZsn14F1oTQ2YwFSDKjivVdlG+fNaxqWnBkBtTE5cJ6IAypEez6E2pNHQBMmNPFcj8NwChp8GRA0KDFpQ9cOISCRATA2EQQBBaiNHBAxt4kCqKEovoHGRA6UUCOa7QMokEJKJt7GAOpRgwaimvSPPhauBCAxGRWRaNDrIUAtfKnENNZAVlIURQGF4L2AMYZiPobApMTqAwQhDIYggJIgEeu4HxBCUEWFClREEjbGGEKMxulo0BgSEQUNEZZBEE08Y4gVRGstEMZkQQDIsASpEXyN9kjpHQgaYmY2MUE3NUlpOByqD3kUQTLsJagoG4bGllgQEInZaOUJyVqLErz33rsksSEEAWRmCCFImdiQZ/Yv3v2hw/uPb948U476xGnm7AjFSF8Nfb9iI4PffPLOLz0ox4+Pzjpv5lMfn9nwkje3R/s/sONfT53sjzz+zoXH0nv0W/smxLskSaxhIip6Xdt2WWeZtfff3/oCQOuKxz9++rd+v/rI+8uD9+ackrbRFsfIADgAKbAroyW98KqvTu646eGbN3GSl8nz5y6+6Me3HT575rq77nT94khh120/8xk7z5j8wj9msDKQudnnvfCgnfr5U/7CjOB1uwZCoGJJSpASQVSNYhUUkRL0jfpQ7FcgAJCIAAKpVSNKxBVa1oWlxXY+e+XF53/4I+/cvrU4Z/1lxk3ddPOdT3jK2Xd/Z3jH0i/e9/6nHtiXvfwF/2f/Yz+e7K6bXVrcMbO9mtg0GK0gtrOQZaNKrS2pvf3bf/zgK3/r4OO+sO72V0mFQCn4kUc4UsK37u+/YSZQJmUoTZl3QUeqllUAEIC8IBlBCir1AcuWgJmivXMwwXtUEO9K0G63W5ZlUbmJiQlflSmztex9RQiFq46eOL775FMp5KOqNEBIFEKoqspVktlEA5auJDKLS4Orn/WMr379kw/u/+JzX7r5p9cf+P5X0mc9/8X3330UZGHb9uMH9vbbreySC48D7N37yIjuOHjGmVsefaj97nf91Vm7T/3Rj3/YSvxv/G6yMpg/cOzHF2x59mC0YLQdxDGzOG+ZMU0BQH1gQLS2KArvfRzcjiu8yLhy3o+bRQcOHIhqgmOhuDHyqD5kIaoGsapGcZLYLvNNxykmtjF5T9iEBupWl5t1OSgBNE4mEGHsUlKUJVHswxpVjbJ1ibHRHAnWdALHEylBwNpCqeYBjdu84wJ0dRAriqLA0bZXJZqVjnWIasm9miRCRLBGv7Cu1DV2cgQinTGK/QKAKgBUvgqoBmtRsNAknrUIkeh4/o2IAsAUc7zVi2zulSMTWO2oKk9Z1/nEF3/+1s8WJ+08xQ8LgYCqXtR770WDAUTMArGXP5369xfm179l/h13VmdYwArSL7grrisuZgKmaUv90rpQ5Yfuu/+Tf3Xp+rlN+4/00la3DIZYAyRMNXd8DC4bmy85DfX0d43Tqo5r/FVStcb63jQthzHwOzRBfbyi1qY1Jp9YcfNTdh2SN7aQyldukXE9UggVChKPWtltTx496esAwJVFBPbWjnIMtPnu00dDv3XLtoluplphQyEjQKxbDqi6ilEav9Z2KZys8tPWLhgbCEhoDSAAsR5IMAEhiWitmacqXN+CsZsCNK20sqpMmmRpHsdg4/lLU8MRWQMAlYQo0h6N+bAWBQfLRlXVh3pZriLVa9ByaFyYnHNZmo+b5+PnEqchIYQo+oWAEkRAmZgQVAQCIIBQPcVp+O91k8qw8d4HUHVOG+cV72vBn2g9GXPcOMbzVS3PFyM4ENZbtD4lkBobkuhRVtcMVRVb4tQcFAkbAQTR4B2rWuboblHpKuhS6s8FiBjEx/sfpf+ROKgAqAQMUMxNzXzt6zd++tOf3rRtQ+kKUUjAr3DlvBhiA65CvaVFOKjm98vjn3DSV687nq67oHXmM1984A9mbTGZhBOF2dSq/uLigxOsX3p4AhVIkBGET0jlJNX5xeMrC4vr5rpheiZ54vNx+xOXP/1O/ck38v6KWGvbGWIhqITz3anN9510ytd/8BVaWjgUls94/HPgwJHebHVdOXj0yF2daRw4+6zLnz5z961+ZaFk3HLNq2/a+fjFif+frPcOs+SozofPOVXV3TdNntmctYqrHFAiS4DIYJID4IATtvlhjG2cA8bGBmNjDCYnYzIGiZyRUE4raZU2553dyXNjd1fVOd8f1d0z8jePHh7topm5HerEN0zsXUz+9OrByLBPvOoD04qPckkbc6FWXPH5KKbyRJ68zYWMFvKCKKSjpP7gnoce2/eoBty75ztDz/xhvX7OS4bX/+zBw2s3bfjwu1/2xa9/4pufPhaNDW1Zu9H5eH5mv/32f697zq+PrtnayWeJeCAGVabR+9OXjTz8qulrPzx68AbVXiOFhAPmg/7hxW7X+iYREgpb6yyRssyZcERKIYp4YQZhKDnoiEFwXXsQ0lqjwPjoWLPeyLIsrtfqzcZSr5P3rfJqYWaRLdhcrPXr1m3o9XoHDh+w1iKqQS9lJwDUbDYZwZBKTKRVkvYxiVr/8YF/f3j3nt337T171/YrnjWWibr0mfXGSON5Lz/7Jb+47gWvqvc7SzOn06ffcO5V126fOXN71pk9b8slg+7Cxbt2XHftruX5yCTt4fGk1wWxyuXivbd5KsGOFCg4AAbF2lqzEdUSJxzVEm2UdTmLt84FewZmbjabeZ6PjI5u3LiRFJLCKDaBzxDyLlQGAiHLVpjeEhlbLHW0juO4VqsF1wcFiFxgRqovRRQprZDC+C6Ettw7LBXUw2/BYPDAHEJ/Jd1s2Vv2RbIsNXsr+K5atamFFc0pDlM7YV75Y0kyoVXJIMQXJ/yUT7LKoKMK3EEYKHx7+CeAv4pQvlqmo/x0Ug72GcQL57nL87wAvwi7IuwjZWQpGrTd2EjzR/c89LYvnlmzfaztOrlii+IRnWMRRC+aQeWO2b+i8c23Dn/qncu/+9X82XkA8VtPHjesX8MwlNrUKpUt4LH9Rz77Zxc/88rNR063h+uR2J43pNBZIufToKO7GjhGRKQLdcDVKTNQjyp74+Jbys0CM0OVnQlRkTI6hGlcJY8AlWWht41a09qOuA55FKu3b5sE2yXSwA6FNHHrJ6/QJ7eBQNSuR8sN00+ifrLz1utbnTGwcNbkeUoXHrKhww4iz1prHZkAYqJVX+HRh6k5lbCyoOdcklsklF+VsWZARQSdSwBAKuyhwtsQXv7Ayguva1CBhmB7ZXTIRsCikTQUmIOgSIyIFQ3JGBMOTlHreA7cNlUF05K/G7IXltAwa61WxujIeufYQ6m9Gn5mgCg654KdObGQgGIAz+wEhFYeIq4yhyCUwv+pAMgUwEOtiAgB0jTNsqyYJGOVDwswPP3/WHxVPgifOTygAM9USoVBOlcgMkSJNcZGjGJNFiVDzsXnUvq6Iwa9GhQGYEEGQgZx7EERA6JWwWgWEU2kpk8tvO+9HxyfmkRka73WUS5WNNUxVk5hnsdiu5A8sJ8Pziaf+d/eqVNTF7/4NSP21AvTT24aygGgrt2WVqZJfuui06MJESIQegSF2vh6loK15td+/68uuvLZg9nF2Ki152+afPu75Y8/0r30hZlTsrQowiZqKmv8xOajIm6+PdYYGxpef9P2XTuXT8xu3H7Pnj0KbHvBbR0euX5u1j1wlx2f0q986xd1dPs3/vO+tU9/8TlwdpO1Vy6UIJWSGqIClBX4PVQvrXOOrWObU2aVF+WQ8jxBydq9LVPjp48d23///Ru2n7v/sWafG1/cd7TdW7hm/Zb+fOcHX512h3dtXL8loqgjkEVe1WoT9nB87Cf1dA4kUR5rYh3YPFYaZPyO31Rp6+gz/5G9N0qD+DzLIqAjS+mjR09rQecFJSej0WZ52fCEYBLaG3ZWhL2z3hey8957zcxepNPtxVEwA+43h1oqs9ooINUYGrZZ3mq14qjeSwdREicN7SGL4ka/m6OCJDL9rGOtpdYQEbKyzDzTbU9N7rjxuX/+7//64QdufzRKfD+f3bR5+JpnbJkcXT80NtOeacbDR9e1Zl265+iBVtS6evis6d9/54Y1zQtPzXRPHn90fOiaxXQ/iyYFue23GpCmkGZZIwDE8jxOEgdex5Fn1x/0tNaIIMCpzYOyoNHGex9WI+GlX79+fTi0RWQE9q7IkZWIRDjJK4cfUOsV7aSivRPPEmSOVpZnClAUERFbJ2WDRQLB6FRrbahYfYUjXfw653O2SKS1RlTVrwgONrBKrwoKhSxZ6YOlmDaH7woLuUBqLJTci3S4EnGQSzKVAK9uNcr0HL4CrIxL8Y3wtVrpVwhD0CwuP2guBhcXRAD27BWZQhgXsHJqAiAdQeagNdWYOXr4dz5yKt66Ebup1gqtR2aRkIDFOQ9KBOX6ePd7xv/pc92XfrzzK9r5sGonBOXskWMddKwSWl70dvHEF//yqqsunDp8Urim2/1BvV7PnHMeuYYKCFwxFQzBHfzK3poAK0dFKoFpAKxQVdwkRARVONhgAMKUi9fwWEO2w0o7ujRbVGg1EUexqFaNl10MaZp4JSAGIENEANSi1dw6NTu16f6zskiGF4fGD2+xxqcqbZrh7RNnCWUcxJicBLkW5CKXMAIUYjIrmSBE51BUFjtOgYBs0EoDISjizDJzWaQV1kaenQtnBEBQJGBOibzCcI0aKWTJ0I2hVklkCrQnYGhaAQEUKgyOjuK8FxAuESRh1VKBsJxzBSmo3KDDU2c8PnfOOdPUWus8zwEk6HhIyYXD0kJRgHPngEUpTUpxyVBG0AENhivLmfJeCaIAEGGYzIsgC2OQkTcsAJ4HbhBuby2qRVHkgMMlOOEwUKoODrIERyZYRZLRpS2VlCU1ADj26AvUgUL0LOysAkSi4D+4epSCWKyfREStmtMUPGzkofrwJz/2icOHj42vG86yPpDJnXWCCWpNOqU8Z2LlY9XI2jhQ9QbL2PqzZje95Jen/3hd3M8ZDYlCROLRxDuhqzZ3vn9sBBlJgJ2DuJkO7Bvf+NvPfuEr/+7v//jUEw9vu/CiZ1xz00XXXDl+w/MGFz7DPfGwves/QX03HcwzACwu7CA1smPb8UMPTzRHlqdPP7J543enu+hPXDeybdvIuvO3X9jcurn5zmePXnjtfl/b+1vPa/7Fj7aP0Yt3DAgVuBiwLRIhCq+SAKrOo4iE2OK9L6IiAClt2YIXJOo7Z2pqemb29Pzs2ZdfrRHaC743PxKf/cRff+voe/79H3/jFS/70N+9Y8PGdS+4+lnfu+Mn/T5Hw4NB1tqx6dKsczzdffP5z3n9A51+5NyQDPeXvdG92qCx7idvO/ayP1s69yejB5/n+plY10pqyyLHFgMKUEgHLLP2joIHZXB2L7KJUsVmEJDLqlSHXi9IWDQajW632+8OmvV6lnYWOu1Wq6ViGqTdTqfT7XZHxsfWr9sszHnfNurNPMtAY0RxY6TlvQdvyAsB1JqNxdn56665UL/9dd/5xi379x159Yvf6Fh968tfeeaz4uGRxqZtyZIdytp9axpRbWLd1MjpU3uOnzz241tOP7Fnfv2myT988y+v23h9bNbnjuuNcWbUsdNxFNSv4jjupYMkScLqN0BqmRkEQVApLSzhzFTDK0Scnp7euHFjEYHDSVArFTqW/D9EJAACDBBgVUoBhGLWujyEVypDrVbFblEBcpnCQVGBTBERwkipSi4/5GARUUXUKPGZsILkqiL46uK6GF+sKGkUO0gqUc2AGFgKUHAQCyb46tYWqMiXxSinhHSFX6EAgx1M+KPHlbo+jLhFxAXfhorf6TlIiOjAXBRhRlV6UK8uIMJ/3hfLrjmBs6/76D7d2gqQsm2KzQWy0G5BEMX3XhOdXzv+yfE/uyO97B2zf8jEwh6RQCtCiFKVK891vdAWtXjs5r+48qILNu2fdXGEID2Kh9NUEJ0XUJ7AqYo4iogBGy/V1YXPGNYNoREMu2EsjIaqG4UsQligqQnDNBjCgKwM66vLMgDwaMCToGcEGVCuYXp6WkU7nHOBs+qcJ/Ru3dHo9NTYk2tzqTc8CYto7vV75w5fPlobcdTXFBERc1HV+EAXRwgd9+pCKhzsQtqlhAcWdkYivhRQDGVWALPAKoBmUZ5iUJvBkB0LMThSRCs8nKBoUW27ARAkCMkVAx4CEAGSYmwOnnPvTeX4SSrcXieiS2smpf7vm8PeE2Lg+wVnrcxZBKy23tU7TERaGeesZwmDAUYVdkZQ0Yoq5+ynvpjVGQnaZxWLn4goTMJzm6b9dNBLmo2VOXxYQwRyoFJAKz9NlZMVTSrPc8u+4g2KCMkK5woAQm5WgIrIWgcAgRxRjApCpcheESptwgY6PClA0gnOzizccvP3ksR4myJqBEJKyYFFLwBooohbNu9jpDCuKcRMloee/uohN/Pc5U9hA5YydaZvtrQyQqh7j2CmYkdCjtiL1JPYUR5rthnNHJvf89Ctvdk93YXHebn/8GOPP+uG55y1aTtfe8XCVX+kHjgAv/jK9q0/HTt+cNe+R3/xvIvvaq3r2c5gZMv8+Ze9enJi56Z36ziJfaontnDiZw7NfuCW/z1/sLTuNe+caU6+5bJBK217oy2L1lEoH6sVXngg1eo33C5SYa4RVkIaI8fMBFoAai3z+J77c8u1JF880NMTEz/99qmX/vLa3Xfcf2ThC9nB5i9deeldg2TBD2565Wvv/MnNe5dgrbbf+N5XNqzf9rznbN2g2v1k5HhnMOh3CGuZMSg0tO9ZQweeOf3Mf42fvBhtPUItHljL8bbPnEUEmxsCn5NNIBJAX8i4FqKKNs+9cJ5lvpRT9c5pREjTfm281u320zRPksjnWS9LKSHvfbvdJlKjzaHxsQlEHB0anjlzBgBGWkPO5c1mM8uyTrc3ZEY0kohLIm9zn3Z8q1aPE/Wyl7/wphueMTO7mNtk/8F99Ubr4OHHgXozj0yjg+mZcanV8t7y9z7/pRe84IY/eOtXL9l28Me33Twxuuaqa57V7aVjI5sy13fOJzXFDgjRJHHuXZIkGmAwGChNyNJIah4kS3Nmr7UOSy+DhRKTLVc769atm5gcL4cYwcirwPpCNfYhqpx0EYBWgZzD0CPY4ChF3pXAtpLJ44SFRVdBxBUgFoUU5LFglVZUuUUFIgqOh1J6A1OpELI6mgOAEIBfmRszCJXjPoACYhquRKpPXwKFVoJauGwBKKxMV0JG1QWiBKBN+ZMJAYRKJT9WyMzKC4TPIEW1zuWGA8KeD0QAKwW+8ldInvF5a+Ft79/90PL6bZtzt1wbmEXKjWhkx+x9sE0klimZ+8zo207bqd858TcgxMqBAuccASuBnHxXRbMH2hc3uh/7m+u3X7T1+HRnpAmUG5RGJs4TJ0oDgR1kkTKZK4XAsHAeLsoQH9wDMKzPAxlFKmpNYZCBiIIsgmDZA1eFGgKRRiJSYSVcZQ4q9S4UxiReMCW0Whpg7a5zJ7/8aI+HCAG8AwFHKHbd0aFHt3qfW2+s1gBOaeyl/XO3nSvegQKFgfMtElDZ4hEIgJzLFTwl+0LZ6JeZpngixWiUSGMhbVaUCKvkmgmVAiQgV2U1AQDxzkppUkvV1WExP9CkgglPYOAAQBBwDgdKK4WEYf4cQlLFpl3RqEGsTIVDRvcgxhhUlCQJAFBZZyulEhOJiA/KIQIiYq3NXFaMfBEwFKDCqEprbc/ISISweqSxUrjIigps2PqUj89DQXcJyZODTHQ4KoEkJSLChFTt2ouDzEVYYMCq4heRLMtCv+5QNJETF0JHuPY0y5ElXAUR+QIOrRDRkEIgQnLIBaJbxHs/Nt76xle+v3fv3jXrx53PERK2LvggW3DKglPaYFJjckyx06DZD53f3HXjtcf+UnwuAEMRpwNaSPVEzY3Evp25ua5WAszACtPMxSY2UfzN736pMbL29W942/98/l8nGk0/PHT3j/77tm//29v/4eNnn33t0omOFdV84cvXvfgdp/be8/DN38mOTY/ng2TtlqGzzrvu2hvandlHD55UCa5bt+7Yj3/Wy5dvfejb/hufW/+mjx6/+BVvvlRGtc85VqybES/laDQgrsjbAgBjGNwxM2skLjToimEDWNE14syLRRAwJNPHj3jLktKl155zxwOPkB966IGFP/yL7fX4xOe/33jm9etvdI3vf+nW2Qs2XHfVteae3Y9OH2iMTqzZZM50fnTme1+D9b84fPZrF/pp3fY6LtZavPDkd//g8G+9Yf6ZH17/gz8RQkfQiqID88uPzzXP3zDh+mIilXtSyjj2IT6gKiztrXdVIVjFIp1nWbez7Ee8Mcp7n3tnFCwtLraGRkeGRpi53W4vLy9HRmtNoHhydK1SqttdarVGSGES634vEwtcT5xjO+g06sNJA7I83XvoxIEDez//yc9FunXBrrOHxxr9Lp44MjhyeGH7tgvWtNav3yZTG8ZHxtfDYOo33/rbVzz9XDt37tDYyP333/7wY3dcdPEF995z39XXPG2h39Fap2k/SGS4LA/zK40U4A/pIAvmgGmaFl0UIEChrSEFeZeQ4OTJk5s3bQFBZ733ecBkgRSqJFKlnDIohB2eUkSEjlfmY95xmI1U2ZGDN4uILV38nHMIEEWRNsaDaK25jDIFzqtM3qoQuw8jZEbEMLIuC21VjVyEEFaFDw9F1gwXIatm6USFjnGVaYJXMSLKKimt1VNTLAfFIYr4MLIDQC8B7alWb3/Llq8ayXqQgCANHymEG2DhQhxZmNkxb5xIvn/3w5+4V20+J1lYXmxyTDbzJLHV4rzPXFBRTjD7xNQ7DGSvOfVvZ6QWs2VEoxQLD9gCy8IJu2P9mbe+dPytv3hTG82Z0+2oZsAheJWjKMzRwECkRsZEkUNvwFTxUSpKAEIBd6/isgBJED7A1X2SlPVHAlTFZcceET2RglIBsdoTh5zCDNxhdFo12LGPlmsS3bV7tlnbihQQvCyivMp4YjpePNd60CA2mGL0PTPvHDvHcx7Hses7raPwBIuZXMDTaYWAEazInEkJlaLSu7OoDssSpCqJsFpYAjrnxDNCUXuF0F88agzSSyRYCH8U4UMKCjWH10AgSEuCiLeFZH5Z/YEwh4xY5e+qBi0+VcmtL2Qoqg7SxMwc+HBZlnmQOI5ZvFaGrWMRUkEfvlST1aSDf5dzhZ4XKRGxngOluMBiBOvcshhFQBAMkDMXaMosVa3svScQozWizm2h9A5CRoWxRCHbXiXacMDDvzsuxxXlixd+oCFFCpQXRDSoCUmQrXgu1LNBY+H8KIyCgEoDCIcFFiEAAbOJdJrKz372s1pdEXhxyOIQRAGx9waVkMvRW9S1egSRyQQsm7XX/iL3lxYe/CbvpNmBnqq5dXULAO2cAHDrcHb1pvYPTo1owQQ1DKlBj6yJB93ZW27+2O+86W0f+scv75tbOLLn/vjCpyUJ7N974suf/b39x38Y/6r+7v9+YWbPocMHHn/WM2668Hm/9Ol3vgUe//nhx++68bJdn/uvD/7sJx+uN9c2x0ZdRq/+5T8+/8B0tP7K3Ve+9hnb4GnNrmZVM40O5t08NzoRZyvEXPHGhm2BFH0FBsEEwaJMjihlRiQk7qeZTsZPn5mdXVxoNOn3/98b5/7+H448nj1+5+jOTe3hzcnEhtq//stHfu2mF736P/7x1r9+97HZA1c991d2zvzENg/UtOt430izZufr22nj3birp00i/dRGjq0MhiZ++KszL/7QyJMvSE5eiDoi4TPOfu2xubUj4+MR9/M0jpNe7owmQcXMyAwikdK1OFGkjIrCMjucDK2V8lm6sDjPAkQEwpEGbSDS6LMBaYWQe59ZQlQwM7fcTKLcZYBusOhTm0YqQqTDJ5aI5tNe3/t+npKJ3ez8oelTeURrt561uG3bRBzNzs0/emb+wbPPn4r0ltQ+evf+WzCtD/LGtc941nCj/ql//szXP/EdhUObztp07dNu+M53vnfq5LGjR89s3rTx/Is3pKmhZez3+2HwJMzBMS1L0ySpJTEJgE2zJEmc42AaKIFRAhAniXOu3++Pj47FtSQk6TALCo0mSwHBKtwO1FOWu1HprkoKAXQFbhIo5CmqE0WklVIg4EqZLWftYDCImE0SB+hm6JMUYDWMCkxKxEInqPrLUF7wivAkVfvcKnqGUFvBhUQk0DyoEN8QcR64uLrq/BfRo/zfIguIKEAqlrgAAAGCWDQIjj2K9wKIvixOFBKj2NyTUaSUFxHw4VOSrDRlCpCQQBWXZgf9P/rC0bGp86V9JrZNYI9qhMX28xQAJOiLsP/w2nedHx966bF/O5NNEYF3lsig9Q0hnbsI6INv2fbcp21pTdQPnhlocloZHmhlODeS6nTI1p1ljhgsW+/TKIrYETwlO4bhInjmAHSDYrAe/vEl+GilpwEO44oAvQKA3Dsum03ln9LrF1MBECR2WPeotFGuph57/MTPn+BkxxhJ39NAGIEbbuoYkJhFw15HSLl3CtnlfqjR2ji0niUDaSBnRQGBiArRS+D2oNbiGZ/aBGMJYqgar2IowkWlJQBMgkDMTAJQCoUSkWgNCtEVZhJYgJMZn6q4VUyMAIWZkChsLp1XFDDamNo89G0CIgA5e+89acWlAEhIz1IgwkDDCo5Pa131lxYZCLx3SBhI/2maKk3sxZAKq98wYKokcQio6HyZbZ5JqNQRuNQWFZFgVlXE7/JLAQahIlSkmJywd1YQoihSCC63aZoiFUYR4pFKTpFnj4ThIAdV2nBjw6IvSNOEX1GpQzvh4BAKihjEiUeAoCestPYrKjhhjeS9daFhKNjGIqiw0UgO7Dt81113NOrGsyUxznlSzJZYGe3YEmgQ8dZrHRMOYtWoDSW7nrf4s49/Z39y42Rt2wj0rW5EzjL1rBKRJxdbb75getdo/zdu3TlQ9UZs+r6d99OxZM1yu/vJf3z7a3ecv+uGGy5+zXPz2i8PYWP/I4/NnX5i6ryb9g3d98ht35o9sFcgPvvqXY/O3Z4lXWhm59x47f1Le+858s2t11y565Knn5qeu3zL+vXuaPuGax/YeSPRvmvGs8MDpzLlnIhyRsVgEcgXbwiwcLD4ExGJtGHmoPMqQTshlJtCDlkLEUjqvT15ytayeIJP9aa/dfuTz/+Vmz7zuc+fOd75xFfq17/EnXPpjFq7485HHn3y03/7/EvXjQ3ix3d/etvFW+PhiX687/wxPzy0xsL8geMfranfmuUxn3NOqUWJ47j18JVLW7eduOpdW255N/mYAbME7jgzd/GTCxduafZzj6B8pNF5IAQRQlLeg8jJ7pHUDU52j0IxBwXHXmsTjY5ONRut/qCXJJrF5WkPAE6dOebVHgvHfJ7lNh4fvgRl7WLntIJFoGnPc1GkHUu329XQHG1umMkeiKKI1bxPrKWa1PWGdS+sxRu3bD0/6yee08kJ2bgzakTbDa4fZPPPfKaLh/czHgH7ZOcCePThhdm5A93M7j001Wi2s2x57xODiYkNt3zjR1/4wuzk5PhLX/KibdvWdTreOsmyLLNZv99vNVomNtb1AFupZYOe2SP7OI5RyGhjrXXes0iS1BYXlybUuCkVRUK6LRq1oCUDGHzoQr3qxCmlgMN4F9kLETEUnkWoKAz3gw9SkOiz1npkCPORUojHA+a5S5Komgc6YSMYpm1IxYmiAF7lIARtPaEyEaA49goUSmEe6QAds5RcBRRgBGQpN7ul628JEQp9bdG6S6ibWQjQMwD6YtlMiADCAuA0kABIwJiVjCZFSlCUKEBdWq35cldNAujYIAppAREQUOQQBCypKEdJ8lgo7aA7d7z5zx/dfXp5anxt3u8n1kQMy5JZcXUvOXttAPrCfzf2sRc2bnvjib96uH8JYTbgfi2K62i9iVNnzlozd/O7n+fU0OJCNjPdieMYw6YAM2RUqGqpziUjrWqgvBIBajC7UE2X6jkqsLPZh1ColAo5uMoNyoaDzQBcmMUiWuCwrKQCkKwDQYURfGqJCIiC+SsROfbgwWHdUAYg8325fLz2od1PdmXdOKadPMtAEyBK2244CgDDS6bWaKT9LGcXqVp3MHvJ8DU6qXnMRUBizezIeQEgMj5wfgDYifcrFm1QTJJJKUUCsTbBnqVITgEiFMDJZZXghMG58PZmWdZqtZCBsNQ6FmbGSEdsHQiA51BlWlvo3wKBJh30OsK0ObyGtSi27K33uWOp6sXMqshUBQGWGnNOOPesSQVMtYgYUipskcgLoY5MKIOCCp1iEGRAct4KA2qlCJk5LH4ZxSGDJgXB6xoEUQuwD8eyqEjY+aIYAQianUQE3pNChYTAhhB8UZLmzoGA0qb4eMYwM2dWtAIAncToPWqT2pxIDwZpFEXBdV2g0ADgEjXGIBZYG40CEBWWkSIQQLNkDIuAY1BKG0M6VOQAAD63AEDgERWwBu8iJXfeuntxsb1p06Z+vy8oGFFuLaBVDiyCcFjaC7J4gTqo5hW/wlm3ff//chq9/Y4db7rg5LM3LmYSocjRTvypJ9bcPj3yxQNjn3jO/p++dM+v/ujc3Ys6dk0EyXigtDoRxz/fe//I7u/VPnGuet0b+5dcMbFp7a//zjtOLhz814P34/PdyLPWkZOv5u+dtzP1Vy6ZZHjfmrv3zt478oaaRN1H+TsykX5raZ4I87HNvcWvrW/J397hAQvZICFADMs2AijUg8r6UoQFwrCkgIuEMRaW5RwG5fsC5ME2e8mCUvJF+97oDKjnK7W4zI5+nGd3dU0yDnhDtOROfjQ6FEMzXzf3YHQ40nGsc7vc54VF50lgMWm+pwNRrsOQAyFHAsZbOJ84dYTeFvXGAZAHhAx/s1vWPlmLDDB4Eh1MZVdVrJC6wcnO0X+5789ilYQwJCIaQML8VhvlnPNWasmQ996YSWdrWXpRFCd93x2K13e6g8Hy/NFD9uSp+W63NzWxLR1YQbVu/cRPf/SDiy56+h23/6jTThuNWr/Xvf4Zz7npRefdd989oyMji0uHjp58gqR13bU3uTyZWZzp9dWaoR1PHJjOJRsbWxbg4aGp2Pjt5w0D6gMHv7Zz18ax5Mpb7/x8vf60Jx6ffdinvd7iBedfMjo+smHzmvGJzd62SBvSZD0BaQDbbAwNsjkCJARvGdAKeBYhJiKyzg0GgzBszrKMmeM4DnAGDMmnBGcJrLQyHDw7Q1AL+7ASvQnl9DUoCYQNMQepGs+uVKMNvTIz2zTDihdU/XARm1utNbOEaWHgVLCX3LtYK0Slg8i8ByFBImAhQAGUEjizuimBcn5edckYCAUERFQNrIWDpkEhzgWFHQ4iojgQIuDwCzCAzQCCQalImHs/dXYtIgAihTBQWCiSsCFhQyC55yiV3K8Zjh548sQ/3dNZO7lmJMkA9Zn2InvPsT5rCI7PRrkXyN2vj93y1vEv/OXMb393cDVQ5lmaoEYiWrb1sXqycHT67b+yVQZDc71OZOIkijloSAYaNKFlr5GYOc8ydGSMUUTChTAFr2ITBbNlI4oKxbTwRAoGWsY+5FFm8KXid0irKEV/DAAogESaCOMirIf5h4gErzrM2Xvw6JtRknbbX7/teDS5vZcOwDpjDAoiUW/NUVxu4aC34E2EnoVFA3K0Y+2u3EMUE4G1lokUhk/uXJjThJVmVTT8n9dAyslH9bJV70bVH8MqnEG4wNDGhelruJNcCV+vmh+EsCgiwGIDoU5ARPLcBjxT6NUCSQpApGxt82CpFB4ZFcQtZI505JwL4ch7D+HOeW9EiQAXGnwiSCpgNcSHI0NYNL6hg9dKIbMXCU2wUlQqR66ws1yYNgmAiCsQZCt3z1mf57liTGo1Y4zl4hdFZalnS7M47z04T0S9TjeKDQC26o00TRtJzQkzOwDwzotnrXWktIh463wYnlWOYMEQqfT5DqMyVfptB5ypVsazA6DAotBKAUCe585Fx44fUUrlaQogROC9AxZNJOhEEMSHgCXi2VmOG41Lblq69RNi+0qp5Yzf+8DG/3x0/YZWbi0d72gGRITbTgw9+393/feN+7/9kj1vu337Nx43pBkVapfHpI6Ob2z/8m+vb66dP/y4jkaydPf0vr3rbnjZW/BvFu79lpw84PuZ6h1t5Co2dW5Fc81kuDXZ63U6y3kmWl1zzWhz0u7f++2XvSs5+uN/ffUz0h7kiE7lBBZZk07Y9TTUmFkpLRLKyxIKo3Kf1gg9EgspzyKSaUyccGGMgsAAbPXUePO/Pvb25d4T2WkBGGIZNJvr1kxtZr/ct8eao2bpeG50I889TKfn7PgFHe28/as3w0iy6fxmr3ZydK1ZPzVxev90z112cupFD8/3pzSlkFnTIsal7Z9d2vWDDV/90+ZgW15HyZHS9A3njl+9fmqJrPIqIi3iWTwhEmkvfLxz6H33/9XbL3/n5qHtjhmV8t5rEcnzXCSAmFjrmK2r15rtwWB8YovPvXiu1btjYyP1ZmNktHXt1TckdYmMXpjrnTx1hpSv1Wpbt1zS7c1HtadrVc/5FFB348ba3OCnVOufmJ4777wLWqPAMOBoL/pz0jzbtHlzPZm+/txp29+mILaujxB1BjNnTqW9Pi/ORuPnjs91ePM5Z+08f+Gy67fU/cvanVmbw94nD+7dd2hscu/GDVtaQ2MjYyaOmix6eWkWOEQE1CrOc9domiiJ89xlWRbFtcADds5FUdTr9SqQBYTurwSCrKzIVzn6hXBVmGaXoSuEOA7TkBI/BQFIXA4hqzzNzBDIlFXIC/m+DH9USgJViGsXFlqlDqIIiKB7qovLSkgFYA6DxxVUQrgcrRUAeOHcF0oOIbYWobmEsYQlJyOw4yC25wtR4ULAEsrfC+WaMFy90rTy3SG8ARIhg0Sk2DFFKIAi0Yjq/u2n9pjhDTUDxNn9Z3hqeCjhPOu3DzmLggPwz2ve/Z71//HRhZd9dPGVWoFYn8eccK3d79e1zM7iRWuzZz/9rGPTXkdKRDQpR2CtJQFUCgWIkH0hmxC0xgQ4UrryPGcKja1wwLQEzHfhzEChkCYkjpARIYzoq9zjmVQJzHTel6OGMIqAp5KqwzCDNKLEAtxM6NCRY/cdipJNJs9zo5UHjIBJKd5wIp4dbZBY68WgYbJsxfO5G3f1srzZVIE/o5FAISAKkbCI58J8FJ5SiFUvnhcBz4X8+FM9r7hCGq5aBgd51EpBXgIOoOyew6tVvQNYJuEqexXHRhEgoqKgU1H8fChSOgAYUuXGBwEgLJ41KQIMLqjFwDbkVUK2HnClVgi/lwO4SQQBEERs4YNCCMWvLyrNov4IJkxY0vZWVKBX6tQVh0oiAssSAN7lYiisyfM8L5jHIoSoQvEtEugDqmSpeW/DoVJGY4hLzvsS5q2gVLUDgHLrFED6IQGLiAfQXMi+VsUTMwOQMDAyUSDCcpZlShEgs7fsXBRFQKiQHDAIeIawOkEUzzh65asl67fv+yoKByFvRMy8ObgctJs4xAUCmO4lL/7mBe+97tCHn3XgyvHuO+7dmKGJmROUpW7ve4/tH3rdpZue9rTF++5e+Ny/nvX7f8nnnNvqHMOJbesvfk46Np598ZPpY3do1rlaPzm9QJ2H4Hm/OnTFlY0N61tD4wtf/PiXXvYhf+TQuenS1qlLBu1ZlZu8Riheu9oAbAMS69vMrMgIhpFT8IYiEod1YKuiqO9ck6lN0BKRXDINiUBGmCgt/XRu29Tm81o3ff7mOUXR448dNLHVqp8N5kYnmVS3PtLcvnNqYo2uN6P+6NKSW16z7/ALYHj30WWEyXUbrvrOV7+979jjDaO+8On3dce2f/jegycyTcqbTJMZWvfAH/WuePD0Sz9/zuc/VUvzmsSZWVyD9c0jGzdY45QX1iHMa62DW4YIx5Ssb23ZOrqTvQTcSSG7mKYpERkTe+9B6cEgS2pqYWGBlMTapBkPUq9ibfvZqTMz4vuLS/NaJTqq1Wtxd5Cefc4FSd3feMOLAODkyVMiPDE5lTR7s+f1RxtbTkw/YdS5c/Oz03P3et9LavaxJ+6II5pZ3N+Mt/TSJ5PY1uvNiXXNoXFz7oUbhofR5rOQD9iNz3ePxTSxZmr4wPRH0u7a9VPXXXzpxRTlp0/35xb21KYnjDo0PNIYGW3UTBNVtLzUty4HlWcWBlmeJIlSqtPujo2NLS8vH15YOP+CC7QxGAAyUKSWYliw+kBW3aQiIRQi9iHrQCAvgS+AspWuVtgMqXLzBqsOWPDBIKIAFSYiFg8iSql6VPPeB0e2ohwmQq0MEnj24hVp1EiEvrQNroLg6q8wCKVCuJKrDJ05G5i/WLo2havjQCpe9bMCYbxSWAwiWFgKrnpfcrdWRdsQfYgISu/0CuBKyltQzJ6F8kxvncSPfOWRO6fHztmIRxZ547D8xoX2K3fMJBtGU1KxJfHpjvr+j6//h592r/yL07+HitgDkkIvaZa3WnHkYf74iT/4+wsXfd3Hg0ZUC655oZ4I+UlEPHvSyogqlBcBgMV7b7QGRK7om0E/yXknGERJww8JqyYgUsHnOfBSlCJVFmQBXI5MAlSZZATetS+qrlCNee8BhSNNFhBwogmf+flBH68dVdQmZRGJTKTyNPe8/nB9vjkcrVl2i47rCUs/W6ohrG1OtbudDbrhPYr3ucuhGMYEEmkxtFBl/1S9CVUR4EHoqXDoqvFdycEsvqoHAeJIhypQWKyzqxJwwYopX5kAjy7AWtUPD5yCQAIMd4NL5IG11npOkiS8mVhWcRBAzuzD6SgKViREjLQpNjNY/BbEAkUFJcopZFkiAkW2dFxYKTGLfh2oRD5CQHuUF1Jwu0tcGDNrJIiianIA1ckFiKIISkZiOAbhYxljRCN73+12I2PSPI/iOE7i3Fld2qkxMyDqEqUViicscSTVBw6vInMhqBcH4TznmDlJIudcwLgAgGfnLEdRZPOUaIjZEVGv10mimggXDToE2QonIlQfbl780vYdn0wwZ6NtzkGQyXqPKK56iGXUsqDecvvO++aH/unqw2dPdH/jx+cv26bVOdHgiSfv/tx7Dv7SM18SHX5EnXOxv+b5M/fduv7i86MrXkLbt7Zv/VFn7Xkjr31zttynjeuGZtrHP/e365/2S+nivuTT75qenv3Omz6Vt8YOffbNr3jFjRRzz1JDx+Q9WIUalXYuTVHVAaxXBMCCguSYnWWvWGuIUeW9zEbaodTJYGY7orX1KSLkvqc8qWhocdk94/lXnZp9/LJLrnvXu96968KdNpU8w4cevk/hmOTNu344fd45Y4udo7uuuuKR3Z0rrtolw/olesN3v/2DOWt+6RW/GY/UfuFNvzo5su7o0elLHoFZm/RJIknBtTOfr/nun5z4xT9cuPQr40+8dJ7SrRri4+/ZZ1501tZfjny7551SSAoFmL0PE/OAx5GSxA8AGkr/OGNMnqdFj5j1IR3EUcvbrOdSE404gUG/A4Raw3KnP+jmpGCpM7Nx07pmLZmZPd7vZa3WcD87bQdqaHjKLOXNkanNm1Q6kEsvu6jX4R3bz0uS5/Z7feF2v99b7vvO0msHvXRx+XRvEQ8f2bv71mO9Xmdh/qBOsubQqCRHLr5izXnnTXY7Q3vbP37ui+n49D1+5rIf/uBbM+nN26Zec801Nx46vK+mm3sO70ezNNQa3bzpgqGRqajh04GxKRNRr5sKCRL2+/35hbmzzjprkGeMYZkQQicG0eZqBB3uiyot6sJECFWA4AU1HJaCaRMoLQXelVeCGFRZClalcwgdFQISSiiAg7Bz2UxT4CcIuyyLkyjsqguhIFRFaGWRok+D1ee2ECdcFQ3Lg42IqLSqDnYRecsYtPJdq36gL3pcCd0kIlUmE6sHA0QkCEQqmJEpQeIiUWmjBpkjxeSjZp2PnDz1T99xI2sbS6lr1nHfdO/Lf3V+o3bff9yuzxqWxX62GdpfX/vXh+263zj+F4BGA/ddrgwlzrAeqCw7Noiff0ny4mdfcOBUV6GkNldagbD1Lqi5BR7I6nEiAfoCEQyZd8UwAwLSCgERCuw6MpZiaKUzoyrbIQEJmqPhz770iWMUVT59EFCaECS8LSEBhwW/DLwxBkGly3Pfv79P9bqzVmmtPNicObKkW7LhqN53dieLPSliAOR00N05cm7DDC/wgjKgAFkpKN3rAo0/aLFJYRyyctVVmqkAfYgroD8padZBlAoAkKSo3gCqeZhSSiltSjBXYeRHKy5GICKKVr+BK686AgCweBQUwqC1zt4ji6AUl1/WgtU76UECVrEgFocRgyl2rr60PSgeokAQSw8L1HBaLfssyyCOAQB5xQBOSjBxdcDD3CJkxZg0Bzo1lnwhJCzp+FVB6diTgNbagwzyLMyrnLXGGNQqtMsYzGCck+AmV4vChiAU66gUM6c2DxL0ofythlirRxTVUcXSCQRBYtLWWm20UvXcZoAURXp+fvH2O++Mk8TlNo50rd7Mc7e0tJREMbOH0CwoZEYRP3b9r4nL5m79VKKASIdRlWMg4OA3VdSrQACsilpNfebxLU/OxZ+4Yf9PX777Td899+7ZBkhkdHQyW9g398Rlj989OlrLPv/JvD27qNTC7Xetf8lNi3vuWv+M50f1Rv++70VL23DDBSqKBx9403I2ezjLfva8d5oLnt098iP75NF0bDjOoJYntkFEkMSSu34Nan3dj7UWIgUIYCTEF4xrRntjnVVaacwbqNI0zdsz7XWbhn3X16KadalJIi/K5WnWbo+ZLVomZk8vb9m0dW569srLn/3YnmNs3et+4ddPHj350r9/5RXnnfPrv/66wXH99r9938c+9A8mrscb+OKzho4tzf38sehlv/Dcr3/y61/+6W1XXHJ2ffLSNSPJ9EJqQLfRGgXNw5e2HnvuzDP+c/Toc2qL628Y+fYE3ProvdfVm3ObdrTUkmN2SmvvvTAopUKRzCwB/k9EIKIB4ETvqCDwoJCFMtYgolhATnPbEXIiWg90fzCwGWoFkQYe4kF/dlDLzjg310Fm6dLg+JnD2qBz+cHjT9ZmW625YWcpzRcbtdGJsfEkJu+FIDYR+NxhDcyaWgtHpmoNZnU5nQOUgaDL9eL8mXSQPfjAntMnZj95z11nn6suvOysj3x6ZmpjMqIGa87etFb9v8PHHt595L7TswfGxtzk2Zv6yyPT7eU9935OqebIyJp1G9ZOjEz0+31m7qW9OIojo+bk9PbRzU+c2eO9856jKGL2SHR8+RA89avKMQBAjgvuqAgFb0JAESFV8Iwr7KWU5NHVk73VZ6wgxRIJAgpxcCoqA3qlwgGeCcVnhSeoJw8OIUj+kggV0gtVnK0ScIiu1d9jMXDWgWIRIm/12YKcqgTLvdCmhIlihaetiL0StIdWJpCyWkYbFTOAoEZdFiICgN7xkIlyFuv6G8eTV7znyV5rw0bd6boEEfoRXfg7D26crE/WOM24ifTpTX9OwK89+rcdqUecK4FYIfhBjsYrZ2yNFw793d/dNL0wUICKtENhkCr6Vx+WtGLnhSWMXsELixhtcm+x7DkCFMuQMsZk4otmv3xwITpX+oiASCxc3pbCrEI8FCUZVok5iKMRaQAK1p+AElno2bTVaB7Yv3jXvmz4/ChPC5FSoyNGnQ0vQpwnS5C6HHykEgaG3PmdU5eCxAoRIlBaGAzqlW1uMcYQQQEhVKsScDWxxABCLjvFYKVV9cpcdsZFgRcGACARxtWYZMWIOiCTAQRXKLNhJi2r2kQIDReACmKNhUZLUQ1orZHAWV/Qa4JqViC/ITJhYXfIK4o0K3scKA9R5QaWZSKFc3BokbVRDdPwnsPSpNoKB8XQMDqSSpSmLCjDmL2aEIQ9URCBB4BiC44Fp8vmmS4R477stsNvEWYAMsFiHDFN016vlyQJCFQK7QKCjCJivVMMoFccyaryaPUjLq83DGAlqkV57sALAg0GvVqtNjLcCv51IaREUWStD1hxz0wognL+eHbTts7I2Oh/X/Pa+u4P+0FngCquJdrEllmAVaAsQlGDIiJysPkSRLEyuPNM6xnfvOBzzz7wjVc++va7t33hkTEFOCD3w8MHk1pz56GHkkNPjCqdff+LysDcobuaebc/d/r4gQdo+aACOKbQxCP3msbd8ejs2Hlnv+SP2qceOPrBP4VJuW0xuvKxo5dMTfqOG+hBpuqcN0RnaEZ73V6e5ySstRbwIl48a0NiLVI916cAmLiWdWl5dq4ezTtvFiQHtkm9+ZOf//fPfn7L+Wddve/gbUvzJ269O9+68dztl52zsHDr+Vfy2NptG7dPn5i9q7Nc/8r3vzIydOqS4/57v/RLG1Py3fgQPbhm4vxn/fWfjebtm2/+xtmbtu5a7E58+556/1h0w/Pdlu1u0BEGTKKEs+0/+cNHt//S8ef8y4XT4z+Z/OZtfqx3+12f/6OfvPMd7zjn8iv63Y4UO76ydhcAKeq88CZrEfno4+8FCE1BKMEK/x3vw3SIAtJToDAlAVyBd+FxpMocTYr/k0WYvT8sImwKoVo0OkJCIsXssZBjEnYeAupNytFWeH8JqU54DvBOvh/uuf/0Pb0kzU/1rPuTKKohqEajPni8Z3QyONPOH11IovHYDBuj7CDLF1z+ZKpUVKslxhgiCg3h/Pz8l3/aSGpJMWFFLC9FELARNatoHr64hI2UK8+VFVEIDdV/VsU4AODC3QypVJUranDrMIyvi41pcXSjUCKtcixQmuI4ydK8mIsSiDAVQKiVBlsC3rliWsr/cfAuEkOeWxHhVWm+CIgB4B8WuAhBHBg8u7CmYiiMBQEA0INw7qQcj2P5JSLMDgUUIBRoLQna6IiYi88sT0w2Pvm1+368b2jjudanMajM9HTa4D5MHl4wcbTYA/nC+r/ZER999cl/XZYJDoxrIFRapy4zuU6a06dm3/6i9RddsGHfibTViJk9ebBZHhR3vfeESFqJiHXOKE0EQX87pA3vfBLFUnojBkZ/MWgFCSqSIoXURuhoXXnHClnsclLgvX1Kt1fcfAl8FgAqfXapYplbm4009L//4Ak1toZIe43Ks9fkvbBo3rAPAJK5esrLDWpkPvdEzvLmoZ3ddIAechbjscRGFbmzGFQIq1Wm91Wqk9J5Agk1FsK5wbpbELTWxpgVe8pwCcyoyDmHSkvALTGX6Y2CD1LIaqgK4C6UM2GogOUACgkBNWDAUgXob3gjASVQZgMHoWrQSYITSWmRWzKAMSgfBfDwUwcw4TKxLAqZ2ToPBFEUKaWK63KevXfERISKNKkgFsMILByESZgolAyVQkgYdjNz4AGHTxt4/EEYBB3rKA4rgABvDuKykdLe+8xmUS0BAB1HzjqHOaEq6wlRgCr4wZQWxcWyAMGxh1Lx4yklgi9esHKJjKAjn6dxHI+MDB8+eGZmZiaKEq2jPM/n5hZYBJAFWesIxL35otOvOntZAL667k8N2Hcm/3LwRvvnP19nWQBZaWAn4hkEw3QvyNhKQewWRIhRjNDSQvzKmy9453WHPnDdoctHlv78zg0tX5ubO/15NK8Z2XpVZ5Yhb/LscAb2yDxLLKcfR2NOJduODo/MbL/8sV66fPCRRZNf8KYPcNad2f3NhpuJcO2xO/fdOX5s7Y3fM9l8v99ysKQlmfVLrJC9YmYQUsqAKKUMeEBEh9bRQpaO+oUX+uT7taSrR9TBxZ5jL4IaR9Olw4v90Zuu/0OtU8zT5OJrL9l1w+6HfrJt67lo5u+646EX/+bivkcevvjyi5GW+r39r3/byxcWTm694EHjO3Y5Zt64MPPzxWhoU/3XbnzVa+/56Tcves6zzx+a/NmDH9O9W9fC5FEtSCqlpA9RozO08clnHb30lgOTpuY5t435Z/5oMDX/hW9N/O1lVxWaN0Srh4uBLh9M5hhBv+uqjyz1FmtJgoilzhyySCDEgIhzXuu41+6wt0qpHBkYCEhpYBFSSqNyWe4Bs0Hfe0giozR6HGS9SMVWgfGeSUlcbwiriakGUmOQ9npLvTwfIDpgndR1rFq1ulJUyzkDZITIe+elh5CgJEpLq9GIa9Zb3e0uPblvd943arTjPF9wznMf2r2v0VAHT3+yJudcesFLMrswPjE8v9Q5fPgwCALT1m3bNm3a/MXPf/GqC5+2fft2X5IuwvlRSmmI1jU2ru4pi6AB4BUCoCdkRFAhBHj2nliKpqFcAAdpVhN4FKGOLBBYAABGKS48eUE8Y4CY4koIC/+ijRKRLBtoSoAEEVm8BwFmFWwMS2GmqlwFAFD4f+DZUk6htTaAK+2voWKfF3CkVa4uKjIkEU9ECBJQSyHks3AUmzBLCR7hKx2wCAZMUPibQuOT0PklVGNDODcz+8+3LJ215byl9lw/TxoKBypNcuWpL9LL7dA/TH3ohsZdv3L0HenEZWPcXuqkulbrOUgZRnQ8DLi85M4ZN3/ypqcfnu7V44bpD9IImbHoRawLOGf2jmjFL5YEKitipVSQoQiXqpQihRwklBVV3SFTmU0BXKmJiFK6Z4cRtaYyOVTb0DAtBAjkb+fKzQU457xSiqKs17l937Ia2qhS7XwaQYTsjESoYLDhCbRKLU9keqCY8oGK6wQAI7VRC6kh8ojeaiLnvXcsRACl95QEGbenvrcVyC68CQXMGFEBeu89+6J1C5ZEvrAhClfjvRfPFft5tThzJfUVCuSVyqNU9KzqEgDIra1ewuqreifJaEPkhZFBKRV2ukqArUNEKPQpufDJCE8hCL+thmIF+l1o9I0Rj9bbzNmYjASYFVHI8Y7ZOctAOohBYiF+B55ZGBVW0PdQW0sJHlud7ImIC3ILA4AiiqLYsheRWJssyxh8eCERcZClwJIkicszZzNjTKgMJOx0CINhapi3r2iSEwXNECqnEcUuXMKrZfI8Awh6nIYQdz/42J/+yV+fOHFiamI8TzOtIgYR55CA2QHgC7ctv+rs5VhJ1By5beK3Xtl+/1paaEyot1428667pxw4k8REyjkfoYZgaRoeKBTucIKCoKWmfEJ53//x7efuXjj1L9ccPn/c/vr3dy5HsVh1l24OgWlEyMAWhnxCM9Ho6aF1g1Zrfs265fpIZ2hNdtdXcziz9cXvMlPbj//v2/WpU/OQNvpnoic+852F8+65eXHDGjuxKUswdp1B3ygWj2wRyeZ5p7O41O4BkLPgva/jWDftN8fjXVsn7nqgHZll7wdK1ZAgVkOZPamj1u/9xifvvO1gr39qdHTXWWsu+59PfvGcHRd89dN7h0dql1/xW9/79DeedsXzU2lF4tzikw/fLFvPe/2//Okbd24E14rXbJjttMEv/CyZSs89703XXf3SO/fcOvW085571RvuvuerR9UTjq7q2m5il89Kjiebf/7TDT9KLJ5pWe2J7bxPPWzC215/Z7vbBmEjRALeeyVl+QxBTrbog/WUWTOUDDcajaiWZFmWZVlreCjPc5SycI4BAOyIZWcRkYAIteOiKkREQ9hX/VoSp2naG/Qb9WagoneTbr/fJ6NJQBHVa43U57qvN2/adPLY0VjIUz2KIgTlna9HzUiiZq1JgMysjC7fQgxWCsoBdFEIJ1rrL3j6FYqhn6XWu+X+4mU7t3Ta8yMXprq9PTumFnvdH3zpJ7/w8hc8d9fzGDxLdmj/qfse+eEkqg20YUdrJ1I0yFLLmQATmqTWsK6PXiEJKnEuB9QMkQB7TCOnjTHeCQmisPPWe0sKHESE7KxHiqJY5bZnvAYfWW+xJBohgSJSChjB+TC/xmD9q0hVEoAQdHGLXI6MyIDABREVEQ1pRvACDBwRMYoXCJ72GJb7zleqftXoLMQ8LUEbnwqCU2jcLWecxXFsSLEEA1QiJCdOrGd0SikmBMDQxIMrWFJE5EHEs0YKYtQJkAcQ7V0mxkTLYFuimVMbC/eTqab/nffvnh9aF2ftDa2hC3YuUCf/9t68PjHcn10exEO/0frSm0e/8sdn3vKDwY1yfNELDcUNHuCGGs3ZPmMjTRRMH/nYe27o6mjAVPfLPW0UiEaFpb0MsDfGEClmDl68yFKv1cq9vAAiKAp4ceEwOwYi8prY+TD2x4qZoxEANAN4LoyjFGEpp1wM86EYgGClfBSSAvsgDS4CzByW0yqh3Y8+ed9ifctkLfUDbchb8WAV1QaDmmx8NFmsq7zZApOhjaOey3wc14dr47mNKO7V8yYYYHZUjpoLfqkUfvHMzHaFCcyViRCIiGhUAKDCwroq9RSFAiKKgi6PDy0jIWhViGoVG2IuPSec10oJiGPnAYTBsScixajKSpSZHQeJEHTOIZAhFSlNiFop0gYAGJxzLixoC16cKkMNQGhPw2xJBXHKCkvNIgQMAkgIKDqkQnTsSQQRExWx96nLQhqLtQmVkDGGPDlkXjGBCOogACiKyXs2SIgq9LJAKMBBZTuMlUmCZCl48cpoKbwHy40yQq3ZCKnaW4E0I5Y8z8GzMQbQ9Qc9AEiSJNhXhN0zIYEiDVQQH0iBZ++sUkoAhFBAlFKOgUFUZCgfKDRRlLjBQCu32G3/1u++df703NTEhHhGJBREYY3kvEfRKPZ1Zy8SwMam/c7QyxyaTW5fs9Wwy/3nbO58/LE185kRJ4ReIwX5OQiyncIKUaF2ziGSQUQHhJyB8wiffnT08ZnoM887+KNXPfIbP9x557Q/ZO1nprbH51zGY+vdAHri45onXRupj+a2L2jgsXvy/Q+Pn3fT2PW/MX3LX6V3fYkmdiRrdtQW251ue/Nm2TF82b598/sfhZMzx2Tgxzasaxg1t7AIAEbpKy97/nUX7Dh89GhSrw0NDZ2ZOQGnz6i2e/DuW9qnqTuIuz3OnU3qDYTFqL503rYXHD9xx/iWgxvoii9/7ePznSM3Pf95s7NHl9OfXbD9hmhk7zOe/op+Ontm6eD9j+177atef+/dP4eJP3v5qzkfxF6c8a0RhdFFLeUfWDp+eP3Fb3n52IvcsWOzXa+ObkjoMxev+cbZw2MbknnxR7943hkNOTGwBs/k2cMEo9c4hPeffuyczsWtCRXBEjnraCr1HkGAMpQIsOe8AGY6y1PPbrm9VMvrqFWtVisUicuzGpZeABAgEt12FzDXOlI6vK9s6nVjzCBLrbPGGCQAFKUJCeIkQmMGvX4cxyG1p9ng+JGjKKzIsAcQUlqFHiJNU+99pA0RRQiFXiOpqggd2JxBvPfdbjfAHxrN5sjQhqmxDQqjNH/a3Ozymdmj2ycur4/F7/u3j77+Db966ZXXzMyc3rbjadu2XnX46BMHzzx2rHPi7C07105MNuM6SjKw2aDfBiBHWQxIGQNq1iTUB2cTVcsdiDilUMBnuUMgRYmzbBQBk2bvXNdzFJsWsPcycFy4foZJ5kofwIWQFa4iAWPQbHVemCUY6wKWbXExiAugHmYGVZgwVtPPsAcOKCGEpywyi54jcFFWjdAhGP0qYF4RAQ6+ExyEok1A2HIhf20dEcUm8lDISIeOMBgAA8qy4JDEKSInTLmvmZi8thDBkt2+hr9392Nf3jO2YWtjebBgs8HjR8U58dJodzIrcGN82z9OfPCji7/w2aXnOMmiejLR8x3ntGoeSTvNOjXYHNt74v2/tfW8XWOz852GrvVgqOmz3BvWLrijV61e1WYREQinWRZUmUo4KJT3U6Do1VGBKlIaSzEkIAyNcxAWJq1ZVkBYUJkLlRNULvnfEKQNpVR3Kn6f2J5dt6n1zf3z4BUZLeycOKXIYAOxCznitqPx3LCJOU8T51GDR4SEqJ60BpmLjFIahDzJStMp1cUCBI2GoF9RPd/qYhHRWY+IATIWREKql6r8DoFVL0/IjtX0tdrRFuYNRifK2NzlLgcWAAEUD55KopQmFZBp3hVyMVmeM4u1tmC6IwmhsASabLETkcJCuAIxhg9TDKsR2Toi4mK5m9fjuvMBdF7yhgWYBYnisPYurWSVUr4oSVXQOJcyDYcttY4qO2QxuhAJAQzHcUVhLBw5JMSn3t5qFVUNAAQxCHcX0pgKG40Ge7HWoi7cCUXEsw8KMMWWxPvAjwiYfBExWufOEpHSqt8fGKMU8iBtm0Szo3/+2490l5am1k+mWR7MyBiAXbURkOGYNw3ZhpFY8YvyL302/9N/nvj0e8TvmHzg7OUfb79gz9KeoyI5ESky1rpqqIHMQYI2KHVaa5GDeLhH0hpp91zzuf973ieee/AbL33iz27f+OnHxvtzhweD3tilN+gdZ8d9Bz4TwcVeP/P9hko0s0kaa1/7L70nb1u+8yuUjCZnPa2+6Wzc9wTvu/XQvodvfP71b/3HPz188GB7aXl5fmZ2qT9/pnf6zP4sy5bmF07O7s1hIc2y8WR8Yt3U1c96WWyibqc91KjvPGtro9VstYaX2t3jR07009O1Wm1+1h47+fDE6K6lRf/aX/xNINn96P7164af+fQ/mJ55oq9/PHdkz6UXPRuYkM3Xb/7chedece8998dGAfUR0WgYHtLdxci01NTk8vyRf1s7/vJmt5Y9+tAVlz1D1Yfcmh+IfxzmodfyBybzjFROYjrKHQJULNshmUoaWL8fbt/kzrOz8fjIxNRk1B50tCCLOCfMzHlUTyLnGjrLCok76/Kh1khk4hDCJCiiR5ExplhroXiQ4ZERZg7mHlongZZuXU7aNGv1tN8LIEZE1HE0yDOX+aSWkNGBJRIp7cF7K41GK4CkRDCKTFCWWVmH5HmtVmNmnxde1lDukgWAtKpHERFlPk97C0SRIdQ0MjEZjU6NZ7m97umbL7vgukefePDxPY8fPXn7+Fmf3rL2nCi6QtpbTx6aP31wYWR4bNPWsQ3rx8dHtzTjcQftzLMVdIgGfQyAKXipW6eV6XoGwMD+J/EsbI0mUeCyvJ7Uxlsji+3l1PZ98FFZITQFm+5ij6rLXa/3K5K8WmvvnYfAqSgGblh6BVRHuii0WcR50USiBBiomJFWxoKrJ36VRCUoAFhxESGpHOaVSCE9DYxh40sAohVzcHKT0FqhUkgKSHlmzz4MbAVDjyegwYNv5pR5ByiZzxTnKOibiU0H//jZdPO5yUgvs6jmu/kix2TySFOWd3fVjn583d//sPe0v5v/XYKsruI068/aWtSwyvdUwszxoWPH//EN69/4i9efOp0prkHMwZCbVYZSagWXsJci37hC8bwqLzxIlmW1OBIsKCtFsC4poVBINMrKUxOx3tkg7qFIKUVaBf/aagoKq76KGelqo+UAX2Vio6xLf7L7TLO10focWSLU4NiKeGqY+Fhvcr710A72fQuJ0o4zxZjVVT0yjX5voElpw875FWqrFAoYxbEtPQCqD7OirRjehHJh7ECEC5uEEOWrRBLuAzMzB5g3+/LnhzkHi+TOAoABgx5Dkg6cHI3/1xk3FAFBGw4AzMBAGPeKaK2LOXP5waoLKSCHq+D9oQfIwUeEoIKeDFrr2DrWNvxZIRqlSxJ2Acrz3hf8gmDHEm6RYPhggBg678ohsfr8mggAw40q/n51Ei7qvBWgZTjFwe5JSi41EIpnJaZwNyEIkyrLXnOhjocEGnV4R0NdgoiiCAHDFqMo0AGAxbOrJ3EKnq0kNawPNf793V/69re/tm7tZKedUqwIhSlstoUgoCoLTc1ISaSA8/a7nrxgUN94fOxZd0Y3/mz8t3ovHd/w/P7gyP354Xuzw/erheMVGrTqExhBCAPhnpmJALmQQ5nu0qu+c/bfPO34e59x4rKp/ltvm7DpfHrgQQLwazeTTrDdJeJESZZ2qTG+6fUfAJATX3tLPz2ZeMzOHDdrtrXOunB8/Wj7+MzBbCqurT/vwjXkcu18J+XMzaYZ9zrdznL7zJkznU5naWlp+syp2+6457a7HtZaj7SG2u2lQwf3X3bZZeedd96uCy6qxa3G0LptWy7YeY56evxs55ywyjN00BNL7XbfxM7mL1mcX44jHPTnuu1tl8kl06ePr53Y5N1/zUyfjmLV6Sx7P9g7Pat6+fxit9PvZb1Oa+qnmxqNy6PW0pc/Nf70G7v2zTk/TPCkLNjB4FBXvG8sSWYVKC9MSkcU5+3ukIlRs0maH/3oJ0+deOhtb//tep2MNrGZMvUodfPtvM+uppVSaZrWajVEzAepOB9FkQhHUWytjeM4CN4qpdJsICK1Wl1EeunACwcogdIUxa04qYXCOdKKmTNnjTJxkjS0RsHhZiuO45mZmW63W6sn4+PjeWbZS5ZlRketVouI0jQVwjDY8Y6DmTuV2vFFK6kKAedMPAmJSBKP5rYDyqRuLu01jIl15OwAh1tDz7z+OS6HSzo7u71dncHPhtfenY3dPLxly6B7wfLCxkcfmX30webo2NG1G1trN46vnVzX0A0g8pB3Bn3SJopEQZ+9VqRByFlRKlEaPGdKUbufbVzfmp0+et89+6+86pk5MGMuhJo0hP1UoQmwUtJTlUpLwQ0RYe9RhFYZEVa+91hKJVQLZmH2XkABlPK2IiIIAlTCulaCoIeCVhl2+WH+VmQaAKUj5pW4WVXxiAWyN5BVVNkJhdlDEZTDZwMAgBqhg9zmnMakUs/tfGjdeNcN1g2rD3xh9wEHG/u6neXaQwQeJHMWAfob1OkvbvzzfdnmN596x4B1jMqCqyFuX9M9sDDCDYfdJi+c+PSbN7/uJdfsn+1oIdBKFBnudjUY0Fxh4p7a+kdR5L334IPjmxNWJUXbg0dRBCjCyMCET1lmB7zVijJDCJsAAe/GjIREZbdU3uRQmCJiRcQN/VPIkZ59Uo+77blHT0hjtOEhVQTMkkSmUc8WF6Jk7YO+lUUzQ5wTkENgIjOwdm1jCNAw9pRSyuhB7hXC6s8JUKiu6PIjrSSJAp1X3JNyQh4qD/HCwAKlLGKVL0sodGEkEO5mhUYW5sQk4f0MrkRUjp2xNCisXmwp3/WqO0TEAId03mIp2oyroAkBIw0AUJKFinGCSKS0eCaBLM9UZKy1kdGois4ejQ4Fevgt3loqZaSqIZMiA6XpJ1S+EYhY8hfC5+GSy15M3ZVm5lKHGAEKzGNFa8ZKZT1ce8jXVAyllVKGFCJmPh/kWRRFKGBdDmgQkb1455RSRuuASw+9b+5d4TSjddB1r4o6Ad+sJybC9/z9pz74X++ZWjPqPNUinbMlpVFc4SKKAITgZTGVE1113tggnP9tw9bQoYt7By+c/u+e07/0wAvyjdc1dl47csNbUEeuPZMdum9w+N7BwXu5O4tBvF0AUVApEFEeCUqTcmIAyB3/yZ2bHpyN//2Zp84bz97wgy1HTu2vLy3ri25wE03kjrc1pVScuMama1o7rjn4gVd2Ty2Z0QtdTFBbU7PN3Nf1+NlTG6/Ix5vzM7O1SFDAx55rQzGPNoaiqUlk5gs8i3jH3lo7yLPe8lKWWhGcnj45e8lFneX2Y3se3X3v/WT0+g1b1m14eGhkNE6isbGxNeNT3gvFlogIqNvHbnepWZ9k7o8Nb59cR5nzF132rG536dKrGoBZvTasojgCtqQ1s4bBQp6yR9dPjy7N9/YcWPzhLR1wj+7vD5vL2+nWg9MH59fNdC89KXWGOvitDg0ZY5LlJG/3Jh9sNs+ZfOLIoc999r2aZqfGhnujdy6OHD516quN/rPHx68cGqFa4JbFcZzneThs3vv5+fkkSYIQTRRFeZ6LgmAlRkp3uu2wfQwtHSJqo1UppN5oNDQhM8dYE5FmqxWRGvRT732v12u1WlEUWXGziwsR6qGhoSiKwuFZXFxc7nbq9fr4yGgURWma9vv9fr9fr9cR0VobGcPMYCEXYYQoNoYUIKaZj+IkqC8oMoNsRtk4jlSWDzKrBExtuDU0+mqbvza3vfnentTubjQfHBv/4bpNjW5v08Lihif3jh3etzA8eWJivLZ+zcTadVvHhic8eJcPwBGSzn1OCnVCzLkipRV1up3Ldm34xtdu/o9/f3+e8q+8Yf6Vv/iL0zP9SLWUKpEmsipilup8FbKjysQh7lR/DFESACho0QX6YyWhAMCFiqKwBgBatdgrRar/j0xHCfGo2qTwGZzLV3dOZbhEcV4FwkkwbtIFuQhLPzsiKkBABEopdqgUWwCd52z9xJapo4cf7/d7uH7yoz8+Pta6tN2bS30cJ3kNKLV5ZE2sFj6/5e+8qNcf+6s+iEanleaO37Jh/NyNsmd+Lpvh8cbBW97/wrN2rt8/kydKUFOuRGweaSWKVG5Y+XC9VfrBVeIqImLiiJkNFDi7gqkEvBpAVMxey3p/9Y1j54GoBIcXsZWIVKnKtPK1CrwPBY+lIJNYa8eIjh+fXUrrQ5HqZrkmDUQuksQYkEG88zgARGeaRAopU4Aq1j53a5trCbWAj3XNAQb7FPn/qU7CU+kr1XWtfp2k/L+lyhwASilbNl4ABU1i9fcCAHixqyb8pqTkBoXO8BoX49/qBS5V0li42KeuAuSLCAtTScOoCutw02Sl5y5gdAEwRqDZOkWFwxggUmSclOBGCNZRhWeJc45Y4iQpNsrMjMXwqcr6iChU+O8yUaAhgSuYCLyK/ktEIp65hHwTVs+6OkfVcwnFnJTKWYgkAOw8ezE6QiBtKM/zPM/jOLbWutSGIYEJUOrq9ypARG9ddSsAwDtXN9pa/66//sT/fO4Dk6NN60AYEHpKRQqL/tsRhScgCAZgc9MqlIPL0WjMdcO5x7mBzj398Ghj4fheOL536baPU1Svb7si2f60xtnXNy55EQBkp/dmh+7Njj2Qn3yE2HlhZKKw/ncOQFAIURyTMvz5/eP75uufecGRn75i76/+YNv9p87Yfd/Is8vHz9plaay31G1G40PbntF+4JtbcX7shTvSelNF6xKBhvkO1kZm9PoFXrt/9qzDi3zJGmFfc+J9vxMJd7HQGzGkAhBdaRVj3FyzJs9cnrvJySkibDbrnXa71+t5O0hTs7B8utNpd5Zk9vT0Pnly29ZNLGR0bWgU6vWxpBZZnxHgfOeMXcA4jpeXFwnjpYXZOGpmfpbB1nAIOXORlpxNRCau151Z21wbPXNz/7zzH3xs98lb733g+L1nnry/116GYwAft3TK8BRDk2Ndb3aGzJzdOL9te3axoP3Pf/nntHPw/HMu23vgvsmzNtp4/+c+efMV5xy97JLObbd/5dwt12ooDeSFsJcOIqUBoNfpRXEcRdFgMGDxxDRgVkqxOEO6XIkJsFAAMSIgi9aKmXPnRUQbRUR5nlvxkTGurHBFRKs4IRyKasaYPM9L8yKpRXEjqYU6ILzlcRwnSRKOrjGm3W6H11cbk+e5OG+UJm3ZGaXAc2pd3mwM9zqD1LejetP5jEWcbaAsCHZNs7Zm6JKWv67vstTPd3tPZIMHN204MN95eGauuTS7vtsbf+yJAxOjT06uiXaetSk2Y8NDmwXZsEltnqW5MWYwyBTSjm0bPv6JT3/2k5/4/P/89+vf8Ks7zz83y1S91rK+S9QoOqRVqKjV/1JAaolCqZ7nOZQDN60UGhNmqmGzyIXWIISBGiIqTQWTzEoYLGutkRQ7D09NDGW+LaDJGglU+ZFWIVSrFTsGi3URYS6gwiX3A6hoNAuYUlH5IxKyQudZRy1J28lEvX14/4efcWM36x570b8OTzw7GwyGVGxa6sxy77IdQ7c/kscq/9iGf9psTr/oyH/O86hRXiG6PI+H6SDPPv7QaHsh+4Mrk7/6o5fk8djRYzOmnngk9iqhSEClnZ6ONCoUX4wNqh4r/EsoJatgWoWwwuStiPrF42BhglJHAldqI0Q0pHxod8p9YUgwRStZrhilIC6thGYpZMcQAYQhNvDIwTNdiEe1p1wJW41mkKU+jQC6vOk4AMTzBo0jj1SQjnH98A4AEGfrtab3Ie+WFsVlmgxtOK/Ko6tTcvUCVAkyTC2qvFVuIgpPreKZIoS+v/SUxOI/ALHWhtQL8JTKssqdoVihVVi/gLSiVRITRkXM7Jil9OWEwCVQKuDg9KqJryAppTxIwC43dH3gcm0MIgVpM2Z2wsHLJIyIiEhRseEWQlAUBus+t6wLjQtUFJrZQAAP38hYgK0qTiBLrsov8Ox9QQ0uzSCxMpMIY6dw2wOkoComvBJDJnxC9hzwNNZa9kWODxs8IlKRqW6U9z5KYvA8GAyC1whpXTfJO/7ivZ//4qc2rpm0uQesUa0nAwLxwrKKKlkQGv/h2unzx9OP7Zm4el2vZ8sphch9Z2r/dt9IzjkiIiixg+7e2zpP3jr33X+LR9Y2dl6TbL+qcdFNQ9e+XlyWn3hkcOT+7Mh97vQhVMx5oQEKAEYZzCWp6Sd7+vlf3vaRF5z+xksP/PXd6z70SKqfeMjMHR3ZumZiZBjO+TO3dLS1+91j649lM+3luRziRr0ejaxJ1o6uH7V7lqLJTv2l1m6jaKgnA8ylgXEHME87SZJESmdZJgJKae9EqSh3LqoPRfUwFLQ5u+bQeJS08kG/2YKJqXEg6ff7eZal6aDdbidxC6V/eH+b1HLSAFRRHFstwzZd6ixzljsTuwjHrZuzjodHmplua92MXCLMiFp8O88d5k2f6EZEuy64LOepb996S031Fp84fvTYUfWeUf+2JTxFa9Zsa9Rqvdn5sZmxXz/8J+OXXf7B//jAYw98bmykefzMwScODDYsbcyelyZrLjl702sOH/q9l71x/9c+29ZEBRkmz3MisrkzxpjEBCqYZ1dgBJiNMcIgINoUOI7wpNl5bQicd5J773UchZJTo2JQtVrcH6QFokpkqNUa2DyuJeg4qJ6GI9psNqutUmism80mlPIUSinn3Nq1awv5Hq1ExOfW5nma2U6/kyQJomYxgz43h42wdhIEjoymLrhYYd11jcf5GVxo1nRL11rJNRg/w7nu2vbJCzfMnlh4NIYx5NZ9Dz/y+H735LGjmzZsbtWeXD+5Zs2aDa16K4rH2Pmx8dFuZ/Dxj37g+PShr3/z22/+3bfuvPCKK57+tBMn50CUiZvgVoZ7q1sTDwHjRAGhpcpNkop1kLUNUhIK0bJnYF2CboqUAcwCyIyBaRmiqGcuhqgrMbFgy1S6WFwky1Jjzq+OvyzeigdBFQSNCZCJmbG0WvLOCWG1L2RmjSQAAVTsWAB83cRoFdfj8aHoQ3/84eF44ngydqR1YT2f7pk8zZQwpnnrgd09ari/n/zQsxsPvO7o3z02WGcUgtcevAdOctXLWq3u0c/+wXkvffElh066LE2HapPEWVdpHfe8tZoaUWTTPBPlDSku7d+pzEaImJgIAIKAfojLhafCU1qKCnpWPCPGcgiLCAAaC1d2RvCOxXkWAUXBSCdMJuH/P2yoRh0ipJUgASmVqANnMhfFjvMoqpPtS8+rRDtWSfNUd/y4WaqhU85oDdp7r7WIh00jZ7G3iqSRKGEk8ey5cpIACM56CIHS9lT9L6ByPRD+pA0ABBESEuAgY4mimRDRgyCVjW/ZZwOAC77OZToBFrZOAEEFEnxp7yEizgMAEwaDP0TA4ObHzMzOuSBLieXetwJDQFn8hao0aIZUp6bKYSrSoAQ9E6FGQlQgoElB+E2lQhmW2G9kCU7AAQlFREHBKhS1VjxxUaBoUFgGMSJSmgCAFIAnkYIJHja4pBCQijz3VK4iEQlKVfuqQhmekQgUklbA6D0jAjMTEJFib4koMjp8jJLkVt4QAtIqxMahoaHBYKAi02hE7/6Hj/z3/3xs46a1gwF7p5gWlB0SnTsWYC9SPEcAAcZfOmfuty+a/eNb13/8santw+lN29obm3k7V7ceb909PSQkyLmwKB0xs/dWKXQuh6VT/uFbenu+a4yJ1+7Um6+Itlw+dN2v0bN+1/cWsyMPdPfdmR66iztnQDwAi9c1HaXCZ9zQG3429ccXH/jHa6cvn5z8k5+Z2ZMn+4PpiRf/NURDe//t+dnJOaCelibJCCfO2e7DSkd114WFKTcvG09dv0Pg3BeDj6PUG8UpuVocOZuDVibSQVSOWFye6ZqxNg3TFxHvnFMJ6ETF8bCXVKsoy7IoVp0ONFtxvxFFsSIhlnTQz3vd1DoGZOTFRCsnkOXOLTnhOfamXm+2O/PaQE4jEwYmJ5IeNKMoQsq9qBrr5Sxt6cYlu7ar2ksWF6+7b/LWE9/96tD3ZfbnfMnfvWjL8zb2Hj65+cc7r4hfsf0FL/zG17//5U/9c2ws0IZLL3zazKljB2cfcZ3+iT2Pza05eOnVf73n1jtfct3rtAeJoziUWoo0W5fneRLHSZKEZqIgy4b9kNaxNtZa0lrrKMsGWmsdxSHuB0Ri5iwHryEPsUkcOx1HgRmSmCgdpCYyeZoZY5z3pCjWGkqhnNCyhKCJJbFBa62NAkWR0iiQW+ed9cLkRWsdiwxSSdM0TiJBilumVhsH0ZymqVBcM+ITZxbSPlnp1eq1GkWe89z30acmN8pgMrTJw85zRp++sHiq1pTrRkamTz+5MHdm/ujuA3NyZvM2kd3Dw8NA6HJ76vipRx99FAWvv/bK3/yN37ryqmt/9/ffMn18QWsFHjAXoMKGpdoVlc0uVTvUol1zjIgeQSlCAvRBdhlFIQO61GmttdJegp0aVjPAKn2KSDC6V6Xta0DzhlDrSxYsPEXHgAJWJZgA8yox/apQ8MKEpMp0ooCMplyKBFZhfMKFaNB9ybXPxyfMD7/y1S/f/5C+6PpH9rt1tVHfnjXW9MFqzYmWbmLePPSlXxv55p+cesv9gytRsWUloHJKG8PJicPy9LWHP/HvL6ttSu4/0WsZIKUyK47y1oB8nGTGWemzBoWRGK+9rhIeIiJj4SBbqlyFzimkYWYGZqikjBGRipE65z5gyKnUTw7OF8wggXVNqCONAlYKAxsAoPK//z8JGBFRkTAX0qGMdQ2nZvpJMsSSLy9lI3U0SZyR2KweNU/ayUGyMGxM1Ldc005pjJRWYCZb6yV3RlNkwIl4lwFqkXICsaI6CqEx8/KU5X+4vuIxFZ5FPqC/iUiVSPvQ2kK5ughmbtWM3Xvvy/aaiHQUMbMSIB2F8QwqkkIZHQXAMYsP0jpQjWqKmkbEl9hMa20ou6uGvvhdRc+9imEMAACZzRWgFhQRrUypmGECkKpqfEWkSO0EQbSkuMCy20YABFEQ7ELCwsV7Zl9WXmG3HdDvzjmttRWXZY6IYm2UUkBQbNDLFr9CqAAAaeWtkzDhJ0IiLnWsAoIpbPrSNA1TPRIojMaD69FgoJSKa4lzFhG0Ueyln6WoqNGI3v/+D/3HRz66ccO6dOAEsTZswI73Bh00sadgVSihBFICV63pve+ZJz/z2NgnHp8AgANL8QcfXgMAUGj1aBFnTIyI3gkCGq0FrNYE5TQ0iiJcOpnNHe3d+yWOtFm3K9l2VbL1ismX/yUi2dlD/QN3Dg7c0zlx9/xgWfu61toO7Lvu3PTA6doHnn3k7K1Lr2kNzfJLaPwXZr/815vi9sbr7cGjjUbSIaKjs9maJJocah89uRTxuqxp+ej8Nz/z1Wede3FjfEMHJa2pOigSPTI86qxtt9taa0IipYyO2AF7h060JgA0WucDp1SciZBSmcs1NupNBUp7R/XWWNrv5ANeu3a9d9jpdBh8p50nNZ3aNrhR0e2aBp9nWtW8dCM11ogVgcrywan5Xm47UaOWxJEsL9LIME20ullGqn3+jrW93gZBOPzk0cce+daVO1/yJ5ved+q9u+3iKZiobb76+vsffOyz7/k9zafXrbliIZs5sP/2yy9+Vm9m/hg/efjAQ4vXvPD8S357+67X5emsJgG2TmvNIuIch5GOSB4mwx6ceK11ZKLcOSGHiqKgg8peGa05UCvFEYXtWnDdCroHLgR/osREZT1rnLAxJlZaFU7wxTQmHLncu6LfQiBF2hQ2XowQGkUymjyD8xhm1Ybq9Waguud57nPv1MAkcVTXEQ6JZzHCaQ0xjZWmnJkyEEEh1IoUWefQeYCs6/OhRtNbs2Z4/aapa1CljpcXlk7aPh47enR5qT3oDmrNxhVXXvL8F97YTwcutze++MXbtm85fOgMKm0g8koAETxKAEARKKWjIGVnnU1T732cJEprx4EZAoAQSwEAkVLLHn1hAOALpTAIN7GwEIDC0JcEjDGpzcF7EYmN5uA/oxUjwQqasdQ1Ay8iBFCyolQAZ1HhHIFh9qUBmQVBuFQ5AETrHAKQKXqUQLkVQNDKWh9FCgR6/YV777p3dO15be5sX9fMegvLYmvQbZrWoG0HRl46+rO/n/rEvy+8+uOLL4gVRTnUGi5jEajP75n5tWub7/3XV7XPuMVTcS1eEjFkDYPVaAaxBxbwRIhaGBCJtagCi4uEoQkLazwsB7AFpgExjPgQAHy4zUFbH0EAkbQuNOKq2SwIEJFnFkJA9BD8kdAAGkErhe1VacBcmFigL3cEAJqUeM8kUQRIQDpl1pgObrp4ePfeXk/Q8kCJilonsrFu6+BaAIlRKbIa4k7eW9NYuyHa1Ol3GsOJqWPecbo2LJxba31utdYaddhuklYqKtJhlX9JgFG8MCoipcR7rRQqY13uvVOoEJFEoSbvQ/ZZcZYkDPtmAGAkUEjCUNgIlTNw9E4jMhIEzYaIBFA8kxSVpgCIgCJdYbUAUSGZIP3BAiKeczKamYULL20QcMIBpS+EAdvFIuBZKPhrA6BoUiBBDDLIdvuAOirYEwiJMhGpUCph6JVJgAU0oTK+4okJCq4y1oRSb8QLgyMiAUBPSivvfc4s3hORUdqyzfM8KbQ2g4EpCqHkDgmDvVqovcPjUAoD/CN4dpFAeCFDxYDs89wCSxxFzrm828tdpmPN0TAAKWPHRpsf/9CX3//eT05NDmfMDlkhkSimggehmDSgMDJ7IhpP+p9+/qHdM7W337bGeycQtgYELIAE7EX6RASMHiSUkezFCymKBDx7n/s8vEekDWmD1vGRh/qHH2izt7pe335VfcfVjXOfO3zNr6zxNj/+WP/gnd2DP+ueOiGu9+UpeeDp8dceye7IF/9i+EU/HPvf47/8X5P/Mb5uFM/e0nO5dOfdjVeYyQkB7Y/Nmjvuy0/OC7Qm77/vyFdu/tn/e8vvJcayi0kGGAcHM2406sycO0uRgWBuDwSaWCSKIsveKMUgMaCIQiBFzg5SBYLGa62T+pSIsHfeezSYp5nNcw08NjIeRzXH9UB/BRaAutYEOmrVG5Ex3vvOcrs36Nt+putmNu3q02lcr9WiOI6wEfPFZ235yVRzbOLsN731b3lpkM9Oe9fbfMFVZ+b8+9/z1nZv+pd/48/PPue6L3z2PxNaenjP3RsuW39CHehb+8QTj7cXZnLLRFprZSBUPuKTWpyQISJr7VNEDr0PfW0YO4NSSqlS4ztU1ViJ5VaCD4gYKVXI+peLRq01e1c1LpWBlyoRiRpIIXGpAq+RqLISE2DPoV6NdIQFF5OCoETVloV/SW2ukQDF5pYRVGQipRHABmyIBPGKlTUYaTOwfUR0vNzrgjBFUWN85DKayDZtv8AoAhRFmrSxHjyDAdfpdY8cnY2SGpDK8jwo2foSrxFOtYOi/VJKhZar9LFZgaVUtwsLoxcpDu6q0kREwAsSeSnk65RQqF2K4MtSKFUxo1ZExT636NgqGb/Q0AhoIAeMKNX4uuh1kCJjwkjNg4RC3jmrACOjc2cVktJaBJDIeR8pzdbbPGu2pp54dG+tNuztcNzyc0dvrm9+ubU06C6vH5/Yah/60Lr3frd77QeO/VqCuTI1wU6UN88IyLH9H/qdC171xitPHQfPDVUDZZMYxCJmxsWuaLDKXqrEkHsAZqU0KWXLLqRIS2GtyxKc9ZRSkdKOGKr5fBgSAq3cvaJYKW4OABitWcQxA4SbRh5RKTLlFDr0QEIF6RNLzDQzh7koCIKgG8CaZmJtr6Wb//TGXa9+x22nuNZI6sh9k7Sz0d7YXMuyVbrOIizc6XW3jp3TGBpqz6e1uvZeTKSZRBiMKZgt3vugwaTKjm31GAMDPbfCJPuC/7Oachr4Bavfw9AsAwIBBoYuBi5+IYBVyKUV93nVyoMdV71sMWwAAAAXJjeljAZR0BEp5Kt8Rbzm4ruEV4HLWSoR6QqiHORUBYlQhYFQYBEpIg/irA2XmXsLlUsVAHsu6nwXXu+y81YEKI5ZSiPRUrqm6JuLgTYiGWJmheSd94DgWQEKgo4MM2fOhjW2Knd5wCvqYGEOoVTA3kvwVhDCMEF1zgWxjlCCYJ4zM2bIGSB3lcLxyeGP/9fn/vnd75scG3aaXJpFQt5xl/th426YcwDPElZLMfn/ft5Bx/jG725xYhAFi3kGB0XbUHkxCEsFnRMkUYLMXSRBFOdsv99zdhDX6pFJAFSo8AQY0nb/iR/0H/s+OE+TG5pnXx9vvXboutePPvfNftDun7lz+Rnf2Tf6k6e9Yv/Xbr7sg3Ov+au19fdd4Tb98+C8H9duvbt938ONNdvxjI94Xy5Km7oZG++wbk4N2YU5+d/P//A5z3/hOeevy3p9jCDxIV6iiAChcY4RQFAQFUF4gZk5zLqiQFIVYGZCNMYLQpYNcmvZB0g7JUnSSGo2yxuNFgEKchRFykQi4qwf9HuBkeuFc5s554zWtUa9OdQKenCjjdYgzxaWFpeWF6MoikycNBsvf82r3vTrfza5duNiLx1+/i5JsbXtwn/8yzeeOXjH7/7RB1/+il+59Yd3nD45/Yzrznv00e7MiblkS9yHwf69T7SXl0fHp/pZqsOwFwASpSJjHHvvfRzHzq6g81eHJ3beAzFzYL2Gb8/FaaBwWlajDZVSztnq3Ia/D/HCu6DEuzLMCd9bVM0iIEBSQJmqKVPR3IR4F94eJETMvSOiMDavUE7siwLc5vnS0lIzabSazaJLIAoi1QNrLTMRGd1AFTGyiFdRwdBtD+YZfKRjmzOi0lorkw3yQRxHipFImzhGxCzLApzburyiEBUlBQRkowrsAqVURdwM96QIRauQq+EOBKXMKjiGQgQARClEdM55lKCVU8ZQUKgk7Go9gwTX7uLGS3nVWGr3UDDiCLlqFQJWYWE5wSXyGQgVAgpUVu25dcwMSrTWOfuaMbWoPt+efdf7/+p9//aJR27/ydTai9cmg0d/9g9nv+Cfjrbbo/zExzf9+WODbW879PYsyj1xH9JmPLrv1Oxlo51Pf/qF28/esP/gfD0aV6YNYIQk4wHpGH3h/RSeNRGGTRcAgPPV7SpKFioZVIjAhZctlcCWUE8yoi6caSVsKLVSIILMIW95BCciIrHSEu4OIgh4YARkLCQ/gzCFh8pnTjQpoRUzxyqUDwA2jw1hvjSfD1/1J3dq58dj7g56xrR57TQoacwOEXsgzj16Q1mWnTN1fpZzLj5JAJUhdha8wRUxSKVVjDpMLCr+wuoETIhBspuZyRRCLp4LcWxFWiv0uQ0JL3xgKSexUr2fQfoRixQY2D7VL6repWINXwl3YJGqi2K9RJtX9yQcVaBCriYYxYSPbdlXlWh1A8UVp1ihLnNzUbOCIjI67GXD9pcEfGBgVwxAhQgUmN7Oeyr7+BCFJASTyvgBA78dypMBzEwCJBJiEngO/H0QsBxm/QrCIC2s5Fcph4eXczWAkYG1iaqX1hgjWnxuB/00BAdh0KqmI2V9e2Rs4r8/860/f8c7166bBEIaOE3KIevIiPXs/Jo1U6DVmYU5zp0wo8h7rzt4/tjghf+7c2GQAArialkYFvGCjFi0QERUEuBFRJwfICOIZy8g5CinTKOAhxjYUUHBcsKeBBDEzR1dXpzP7voy6Thec0Fr55W1Z12+4dj7Nh7TNjn99sb4q7bteNcjx68dwF/d6M5qbt+y4aIt9cHdB3u3PjqnANDWKKqpmm9qsv1sx1nxJJz8+Q9+vuuC37fmNLlxhi5CYFICCYbtg1LKCrOIUio0PBSsukSK2UlgOyAZoxCR8jzYyCKLTTNWShDq9brWxvpM60hrDYISSZIkzubOOcc+/HAPMuj2hCFSCkQGLo/rtbFID3pdb533vt9ur59c42ESatGIi2xty8iWNR/74Lv37rnjb9/1/YuuvvrQyYMn5w50srmHnrxn+1kXPXjkISBcOzl1aO/BPQ/tfs4LXghZwH0UJTT2+31ENHFkjAHBJElCJhAqaPVElOd5LYoBMZfCWoeI0BfmdFhZepUbtUJVpxz9KaXCGSClq4dfRQGtdaiYmV0oFaXkgxfZgIuJFiCGwkQICo15QSdiPTOL4aIGV168Y2e9996zExQnhe5ghfnKrbXsbdZTWhNJWLW6zCllEEjrGBFNxOH8GKXQxACojHHOGU0ut0EDNsuyAsdR1s7MjCLhTAJ4RPDIgCsBGgAKTm0pGVF9+0qyFEEWJy78JSCFyFtsyim0vi403SHqBWq/BOekVb+rKqeq7ucpn0RABPKi+0EpiR9KKU3KkBnkg6CgqaCwFu512sMTI3awSJH++Od/+8Pv+f5k4wKA7NDhe1/38l+uGTi49z8m17/ow2vflULyhoN/v2gxSgB9bThKjh07/gfPrL3z/71yVjUPHu6aZAy5j1ahkpTSqNbM+4O6Ih/sPWAFs1OUcaVySyXdAKVoYojcJBAQuR7Ei1AQOfIu87Y4vQTgBQiRQSMFaopGDFtwF0o9KHKqF2EULHerq01wRQQErHcKiYNkRIGRFhFJM7nowtGpr3WGGr4/wMZIrTPPBFNxcsBNLQCAXm6xUiAOBCLQSuDcqV39PDOGYsNZ6hQyKi3ClWGRFw7OBgBQEflWnmZIY7bo5MAoCI73FAFAUGcLHDkSqRRMuVzqFxvhsAkvGmMK1W2VFFfGBoToC4J4OHHh6ZgSp7kaLl4RXovXzzNbz2WCp+DTsmo+VL3/q55q8VAQMXNWgy54YkWHHfBnTzlfZa1LCMgswlIEDpYAtgMs1FiL90oRqIJr45wjAee9MQZWEbQQ0Ql7a5FIKU0YEHsoIuLZl7CP6kp5FbVJyv2IIHjrAEDHEVuXpmlx/KMkk/bY5NjPfv7w+/79E5t2nsOS9TtZokkDsvMiLApFqUarqbWaX1xwiID4u7tOvebs+d/84bY9C8NCApKH6FLcuuCcCsoLA6kwt8eSig0ARtcFPACEwohIE0beYc6pUUoIQQqbnUIrQIxzGVKO7PzJ+5dPPHL89/4UhpNmcl1j7qqFsz9/yJ944Gz4xC3wpS/yt5pHbzh4+NKzct6Gj85H//7w+L55iZXzNmI9OLOYnLnf7Tyr7+GnC6duGhpdm9u2MqUvKgIgIIvNc4yiequZpqlLbZZlYaKjVLn2WKV3JAxaGVMzUsa9EBW5dOtiYBHxrjCBTZLEc5xlmXh2HMxCVH2oFeA8eZpl3ncHfaVUXK+RF/GcZVm9Tl/59mda0c6Xv+zG7Zt3/PCW7zz6wL3vfu+3d5y9s790RjJjrd20bor7WWemPTFWO5mfGRkdOQ3L3UE/3Hldq9X6vV64kjiOgx9wP0ubSSMg4AO2JYywwhUaJOd9gBoZVIjIhMGxmleBErHEJoTqskLnhvwR+E5F4QxCXOoQEXj2DKKqgwoQ0KekiKCQPQoHyBPoYM9HRb0ZOvIwtYjj2GW5994Ys37dBm0UW+ecq9frQZ82z/OwSIgUse3m+SDtucjUoihCNOGwBYk4rRWzY2ZPYJT2IOysUQrYE4ECsNbV4wQAHPFqP9Eq7RXm7SwIpNRKdADPVZcG1U1b1U8opcICKYwBrPXBXjvUsNVsXwJKUxgrP3bg3Isvn0gVCEKSRs9QETQBsBz5eSoyGQHqYMTnGQGtz0won0h57wnRumzb5vED+4781V+8+ZffdPHpg8voJ5oNPztb37Bu+Ec/+NrYxIaL121/S/MNG1T+tqX/PJPXBsPj9YW0OaSOHTj0l6/b8M7ffd6eM4uS9Ulb8mfAtMQ3nAxqmLiBRrReIiIHgYVVsVPCokFROJmEFNJtRSAJ99Mz++BXSIVdLhaELGLmPM9JFaUPhsZFwuuHyExIqBWyBPWr8N+E9vr/oGCrG4uIjMX61IOoQulBLWe8Y9tZjeaTfchj1jNzksdSy8g3zuTjA0p13NbEyoNz5AcMI/HI+uHN1maNOjbqUZ55jBVbRuaApAvXwswhJVTJb+UjKdKlOEkoWcWXeplBOwxXZu+qVH8kKagsCtAJO7dirszM4LnC9OGqJliRglX4L+UL1FOWZVwvCuPqU60eeiNi6QlTJIBwXzWSrCIHry5DV+bbYUdgDDNzqU3mmQFQaY1YmF5RlYhDBiJUSEqhcCHxHd55X/kNSannyhCYeuAZSlmPEAeKH8XBKAKBAdgDcqw1V/x+Du5jhbR1RYqD4Bm86oCHlycMlqJaopTq9/tOD6Ymx+6+6+Hvfvee//dHf/q0ay7/n899+tOf+mRjpBGYYNY7JMXsDx87Ap4RiUCetXn5b64+/v4H13794IQgWGEdOv9ST75cQwELhm0YFBcAROy9R4oK3EhQAPXI5IlIl8HMs0UAY2IpRpJKQUaswAOSdc6BEUk6y5tuaY/dIhNSz9TNY7L3tfL1W+R3T7bnNHjBnOj8Cf8fz5h+24+mnugQ1AxZaNZ91q/vfcLUGqde8uKDzdGWjYE8YoVsR/S+kDKVFJk5SQpZmICyDJJBHNbqWiGArAIDhvehVquFs+994VVaScpIKS+DiEprhdoJO+/D71aEumGGSfXTASCGck0pVachz/y866/++3f+y+nDJy677uqPfuCfX/mi1+7YedbM7InUSW14OAPVs37Tutbx4wc2X77hhEhq7bqNG3bsPKvb7Xp22js7PDwU8m6aZvVGAgCt5pChYrEUKV0eV0ZEVOSdZ+/JKKWUeCFEY2L0zpWzXygzsVIKKo374AFRTqFDaxWiVRjlQzlWqs5zVfMWJkLylNgHhAH3UTEdq5G49z6Ah8sWHAOOjsG1Wq3C+kZEKvUGZlB1HXMjwn4/9S6vN2LnM0YLEhMqAAJPOo4EJWcr4I2KxDsAqMU1X2ra5XkeGuiVBWF5/BAx4B3+P7LeO86Sq7gXr6pzTve9d+LmpM3SKoIikkCZJGMw2M82YDBgP5IBkw02yfAA++FswPgBFhgwJgojQCQhhCQQQQnlrF1t1OYwMzd0n1NVvz/qdM+I33wwHnZ37tzbfbriNyDNT+cAQMAQs3kW1n7wVgUMcksRmgcpX2QbP4pyCCGEIAoMjAqtm5uQ2u82O3R7D+0WkJrFqsVfJbSuyBEoqSTmOtpGARptEOdJEiM5AEqpXr1i0Y0/ue6f//GK1esnf/WLe675n61TyzoHjs36EvbvP1QQ7H7s4Q+dt+upk7MvuHLFg/rlKe15t7rjoux6+LlbVn7wNV+7d9tswE7qShnLGDURuJAoETCiRtcYN+VDtdBLoN21q6DYTVReYG4BLdoZG6dbEeXsL2mh0IrfZi2OSgTtNlQEAhm2RwRMOMkonuZyuLDEBABSwAXHXiyFE3nv68jdMTr5OLptX+z26Lxl5S/2zBwjWl/uP7h4UBwcrzQSOE8UIPZn+xvHN3SLRYfmji5b1kFEIBQ1t3RVESKyilOphX9rWgiBtkOFmdns0ABI0GZTRIzMSRsb4OZLVU3EgdSafspY62ajnJmEzfm0dMiNWHQeojlyzmFr75jvXb6DRCSgXLdiVWBjfBFRBOQmN2sm6aYFGGxasKJiZlXxztMCuSulbDGpIGgEZnpC5lZW5xyrsCQk9M4jorKQ84jKObMiIyhoO7EDAHLOIltiLkJge66JmNl2ZFVdc0xYBnIUFoA2tGHlYkNSmL9BIkWDniMiYBnF2j5mrxv27zr8qY9+8ZKnP3flyhWDWXHcDZg8UCSqhZNKFz0AxsRM6up4wpL6U5c+9OOd0x++eY2IiLPHuOVDLoDrm2ZDswhv5OQcoWMxDUsQTQC5fkkJi6JAJON2OfQuDyBBlECdE0LEmARIwiPd+OShS8lNq4geLhiGsLcDL34ufOfrcNwkDKOKcj/R433/pnNm/vf3hqCLJro94N7EWGfC9++6Y/b6n9/90ped7QZdcjVYY9CMDENRFEXBmjEHIQRTkbILHlPNKauqtQFWEFli6QrVeTsZRNUUfVnkGq6lNhB552JVQwM/8t5LYlV1iEml2+uhd7EapZQ8OfPofNpTL33RS7Z96C/f8NMfLToy7P/nl7Zv37/7+X/wB8OZfjUzmoQIsT8xtcUf5LnBEAGHo7hk8RpOevjosUDO7969e8WqlUuXLo3Ck73ecDjMiIBGp9Dmda1Ee82pCCGEwA4t8SAAOt+mQPPPMY14W3m2YTS3wkREFDqBiJIKtI9Qc0DNp69BmSICqmj7KHkkQ7oaakMTo6rBjlp0SXsDbMgMAAZaKUJZ17WAGBIbOK8SvfesQXWkkACHCigSQIOnMVX1nhA1QWQRYWVW7wttxkoGorMEGTql+dIIgtqcpqkhhJXIqaa2UXOQV3eijPCbCaaNO2LNHyKKxhgpI3ex2ZyhCkjNgqAipk9pL0FA5MjCGer8YBwAiLwDVZOMno/bhAogyQNG0ZhSG0NFJAEDhpSSd0TBx7pmqd/6lj8bjJau3nROLQcXL5rYfmS4dHE8dkC7XQWceM0Z+589vfON16+7fbS66/rV0Z1hbLsSxIF+7MrPPHR4joL4kgpBcVz4rlBIlSAzFUJFkWpwOiNuDJrAnJf+maqb84FtBIHQKhhqdh/YAKHtkgbnk2ZKkl09VXWAteR9mEGQENBm+zUnMtskItMU1cbdL0fYxGq/ountVLMGBylo47dDDsamFj37hMU37+z3lk/vjLp4bKycHbmxw9WSfnlgwjMBR/I+eT+XZtYvPh6140iXTJQpCQjUKmPODxIHRx6zOTcvkBx3iv//HAxZLAIdOURpOb5tMpBW3WnesTenP6sRs9Bp84uqZtmcf2lTWdo3NSejP7U20vbPJA97c4MJ2jySLNF6d0dAiEDez9NyvPcIauGFGpxmWyioKqBKFO+zq5hNNrI+uQKiOp3fIsOCQUUUVpAiFIQkIo4oKUOjrYELugVIrAAMmlREpUW6tU0tilLma2CVIkJ+k42GCVHwwXlzKsNmH7cw1nnvjUbIqGVZJo6ION4d+9u//sT42OJLL7vo0e07jhw59vBDDyLzkKMn0piIXC08SnWBzked6Mjnn/Xg/mF47XWbiHw24XQEycA92N5bVVG0TVMjcNmWBAgqrAoIQojks5ZnSiJJXYmE3lKC8ZTMNVjqhMDkAjj0OAVfD3jWY0ti72g56rNAB3AESyMum9WxAcwIzMLkohIWFZXrTC2fKv/0okuuvuPBxw8eSe4ocJruLhYY3Hvvbk5awNAIbqoKLEiE3vW6BTknzMCiMg8zQkRRxtARERRtB4e2REOzvwzBErD9raoa89a3OCfUDLckBwAdH6wVjOYKKAIOkyTHGspOGVQSpzoCwP7+3uc8+/If/+S5d9/yy6WLlj8+u++73/no7OH9L3/tm1IaLVuxfnLqlDB+3FMuOvugPLgVH66quP6kjWXZTSkN49AvW7bk4MGDR2dn1q1bB4Qm/aiKCNzWvG1B14KhRIQBnPO+4yXpYDgsCNGkbSijc+2wWpUxHxGaFxQR05PLaIgmOmhiJYLcNYJBNfJ8lK1VRbAFkkE9wVpLTZKIyLRmmBkQkICUrCslIgc4HA6f0LgQaUADragMnOpoVEPtyQWXyDmXYoUEaSQZTIhCAKAMUtWJnHOCKpLKskzMsY6+CHlETo2tqbD51WcD4MtIVOMAAQAASURBVGxtBFaWtk+jCIObr5ERnffYUB6zYo7VLsF5MON0RES0MpCZ0TsbwzXDpeydZ98750ofUkqjWBH5TqcDnFp4ljaeowBAos47DMFikjUopldRc2JhF3A4HC5ftnhYH/2z1736He96167HTvntUy5a94qJ933kq/t3ug3rjsPaXbB2z18/5dhntq774kPFBWdM33DXrqnx9UeGh3jmwH997m+WnnDC9l2zkxPjdV3X3hfk05AZRqHDPKqZO6oEyhyz7IeqZg+jZqTmwMCdzbLQtPmJGNSB2QS13nMAAILivDPaqF1288fNileg2mgFE2AgFyhDhIyOZXdTICuWAEAWJxNBVXN9tzcWyCXrHgz0pLqnX5xz7prhlfc/0l0WIPnUg7C350fVkv7YfSvIFZGHIF7FK/D65ZsTo/e+Exx2OoPBnCooZ5/2KlUAEEIofUgqSZjAmr0FKGjMZ08zR7yZiDQj6DaXaOPmxAsMQtQg4nZJm/JLJT/77cdspnm55G0fKLApfXPl7R9oBiJnZpw2GYCIkMgKRm0MpuxtUEOaoMZSTNHZEM4SBgAwaysso6rg0fsCjcGtOK93bbOlOoIjZ5pdzo2q2jwrY4ptUdJeAREIxkQirGIdikCAiCSthRQLBY9m/FwGh4U2uySzsRERqyECOYmpAYJAO9myD+Wa3O8JQtGdmiq/862fffY//vNN73jHMM7e88ADoP5Xt944OTklLJFZXd4fdcnbR/7k0x9d0a2f+a1T+lyURWDmqq5VFMGbLTmYRvQCl4t2NtTWYdjY3wE4ELOuDQBCprVjYiSOVQSEFQzeNSqLEhiq5KGomaXzq/SUncseXrP3yDbREcEmwR5MPa7v/i4ULlS9VROdJSB9GuzoeijHJ1/8h79/2QuW3XjPL/fsHazduOrBhx/9wXf/55G7b549OOgunYSq9tREUyJLuVWKJRCreh/s0FrzQ0ijuipCsCtgbYUSJuGUouEHHaD3fjQcOu9D4a0ly7bRLpu4FEWBDkyTyp4p9R4RY4xOgRFZjM8JnV7Xs1Yp0jAtWrLo93//Zb/4wTVaziwquuyqn9/0+RVrtvzBS1543KbuxJKJ+x/6yaUXPw+LJUXZ9c53y97k5FRRdkexpjrx1HRn7uihh+67M/aHlKVcnThgAghOPYGj0C3MZLAAr8jYEeeRyNd1Kruy5YTFmzcu6vYK6TjxxEIeyTQqFQHIZNuccyGQM4tQaxmJCMgBuXlFCFQFQYfoUIiiakzc2gYQkQJEFXBEPrhQRJVBNYrCLngBzR6lznnyCJRQsRtGkMg79K7X65nklkfSxGgMfe8SaARgAvIUpQKX0OMojih4QPKhEFUhZIRRimJoDxBQhhSdQhxVpBAoQCUZApBYEkvikrxpsqdYp1gHRE9IwA5FITkP5IMj770P5HxGUKIqS0z2P1NKo9GIJTkETyoOuKnHbZcJBFg4Mst0QjMtUEcJtErRJKtUs5QgAKiySLIKwCk4BSeiyjaAIk+iqo5cWQiCgiAJaO2wDJhKV/RHx5avGH/0wTvf+udvn+mP/vRPL/75jV8mDdo77opP/Ycv3MxgbkPv8Mcv3P+1B8q3Xj0IaXjjz+8cc/29u2591e9d/OCDt138zBfv2dPvdsaGNftQOEkqEZGdINZFGaaKUAZMDhhNZhPQe++dCbGJQyDQpPOAHVVVFhDlmIhVEhNrezGhWXNyHTWxR7LXRERyGcPlzEdPwTLHMNWcJLFYVvCAPoPnmgRACCGX2IbdBRYS1cQpJRNBLCh4peQkzsLKxb0lS7kokkB/JHMFxrnxo9JNnQPTFY/qoHPFqIjYi27L4qfMDPdOLaIw6Yd98UgkOnRlDu6EjDKMI8MSo4KKMCgTtP+JnDBJUHTOqfPCKqwgHBoOVW6IRFvd/6JTtrsJu1YpJY6JOQEBmQxL4SE4F7wLHh2p6V11CiqDC97kxljFBhKREwrmoV/hkFBAJYkkqJUTKqOwJAWTwzOuIVhRPop1zUkQMqqZAIJzRfA+r2Nd8D4U2PXQcZWkYV3ZQzcaVilywEYAxDsl1EZN03tT9PHBBRAgIl8WyXyTmoPEzJFTEk7IdYxm7JZ5laBR2JApIsI2pmYpnHcCyhLIAUsgF0LodDrdbpdESTLYfmGnYaUMMAQvqi6KYoA6aadb3n77I1d8+qpXveHPFi1e+uMf37l3167vf/1rEPtIRQJNInYxUEESg+h7ztrxjOOOvPaGk7Yd7QlCOTFGnSIRIrmaWFAQBBWSdTt5xsaIyApJlBHVOSaIIEikNkdyBQsM62okNTuVogO+AGejEnEegeuqfywNHUHlvR8vOqVAXemZp3fe0e9dfs0l8sPF7lHwtxbShZfd1D2F1vsQujpL9VFfHQSiVIwfnht+/OprP/7Nz23fvn3d+pXPvvwFH/rw3/7t339icvHJex4/MO4KCY7RgxZVXUdgRdCae6xJNKr265ESkh08RElSoIcoqU5RNYJY6xycL4qyLEvX1Mtj4+NFUQjkVtNGoY68Jw8CUiVOKXivqkoYQQTUBR9CQKJerzfe7XkkEZmbm6vrUbcIRYerkV7ytIsvfvZzjw7mBFMtneDLH37303ffftfKdWOXPePZoZo48vhDMQ5D2Vm98Yybfn7DN772hcnJ8bGi4/fs2RPT8MmnnXpwX9r62H1Llq5YvGQNupmAPU6JkYxvJ1HIoapEHJTU474ChkpHq9dMHd6/8y/f/O5Qjr3xre9SJkEsyxJTEnSOvC32rX6ElhbTACaJyJFTJLPPY2Z0viFrZgUoInJEwAwNsRUt6qIo5LpVVSUxEirnLsE3ZGIEIDGDGgDEmhN6V7gCWEzZIO/wPcWYcosvYG1lv9/vdrsppZQSMqO3pSiqZlPelFJG2aE16EgZvzEPpGoqZtXEgETosnGKqqgwZlQntChfwrZlsQbUqrEMOjLCdROecnEr2T9OVRPHLBpqcwijcDY4wMJ5w8FpM6bLtXBmjjJmSWRlVmNeAYBKTS4KjMd6bsPqZbfcfMN//dcnXvW21X/wrP/z0pf/zv964UXve8+7Nhy/6jOf+fy5Zz7r4Narv/i8wa/3hT//8ZinbpUG05Nrq3p0wknLP/LPH9x71B+eGyEQcoJ6VEcsu71RXRVFMDqBtDsLR2jSPLaHaAWZCQx7Ai1+u+nn5kc1C6D4RISU9Z8XDmMAgNA1o9k8EpQG1BZjREfOuVYZyjlH3hFr45ABlNURQREksikfMSgqCmiBDgGUqVdCdQjjDGOHnZCTqaJ7x3DpLAC4Qx1BmKgoFu5Amlk7tXZ6YnF/tr942qcaHQo4KsCDIhB5U29AH6vRcDjsdrvOOY0JEP2CDji1rbl3CtispcHYvUkUEZOwQ9IGdmRCnG6BhrYlDPPbIsROWaoAsMZUtzN851xA4tzciq1yicjAUNruL2zkoJksZ6N7kxoDYVBEAW60vVwjeqoNKzg35c0UmojE1saEXMfCBypKrqPdvrquUT0AaAMgsC9mXgguoYY6paqGCGlNFGzw3miRADUcfWPxB3I1p/bg5ZGVI49omjkLF4qmOmvvXGKqWNBn5oVpK8ZUICkKcnITU8XcXPXRf7ni3AtOedOf/++5uboWlPT0v969/7Gdv66lb9cjZrwLuOBfsPHAm8/c84GbN1zz2CSiIsvhw4dTSoQuTyAUBFQyxyyjnrNBaR4kOABxdnwUnSgY6EDRURa4Za8xjYQrUhaNcZQExDkkcrPVqOtTWRz1bnKslGUrjg7S+k1HX3Led7bcdOOnYuLxRxd94/KxP/7m0jQaujTb1X2qOlv77Xsfv3s/3Hzz9/Ye2FNLuemR0/ce3n/66RecfvoZo9GR226//bTTj0euCdCFUFDhXIiRWViKAKoWCe1gMwuEEEJIoB4aZQUFTimmhEXu9Iyoog3kzRchjiq7U5noEQJYyE2pKEsAiDGGEIBcPapy5GyKV1s8M/NwOCyLLlA1PT3+tre/86H77j6wf7sPFeJYNdh59be/tOmE/xto8TDyzNFqakUfIa5ZvW6089jnPvf5p5x78dMuvNBv3Lj+0IERJ/Tjj/Zo5dGDo6p6ZPWqDeokpTQ2NpZEmTmUhVlhCHYGw0pjPez3T3nS8bfc/Mu3v/H173jzmz7/pS9+5tOfePtfvnv3vlnW2uy0kkTfqPTaeRXINkH2JSIodX5WvTM7B9Xsw+obMjsiOhda1pq9lGn9hVAIQoyxrut242uXm0RVhJRUUCRZ6LX0k1JygLZ1Z+ZKZDAYWOtcliUnSSquCBLjzLFj1JCbSZWIKsnMZrXkBJIFhAB8mAdttqEkb7C8S8yo4iGLLWujl8MIrlHJR8SM37JZX2PfCwAWFUzZAFhSM6BGRRFuIEUsScc6XUHhJA6J5vdxaLnc+jZmRmzKF2xyEiIoqG3lQFJKdZ1h+h79SIYrly8+uPuRN//Za573h2f8zbtu2LLuKVf8v5988B/+4MjuQ7+9YXLfla99ySZ5xumzx0b+JVdPkutCJZ1u0enh44/t/Mjf/9tIujP94djYGKAMh3Od4DkmTVz4rkAEwlYSElvBjVY6W9S0r50pJcE8zrx9MFoMvFJW5bSmCgWdJ0AUVlEbFbbTfmzngW2ARsRArl2rGnWNxFZfaPcl5wPMlWIb08GRgBp63gECCwrsPdyfrbTrOhXHpMdCOASLBsDojnVVYeTAK0jV37j0Au9KD7OLJksekXNJEEFJuWYFh2QYCGGVlKAwtLavhds6rz3/aPA6ZSIPIK38pENlm7hL/pFskWtgHSJYkF8BQEUNI1g4j4g2dRBQEDUdCQiuZZPCAjy2kjp0AOBcsEqYFERYmnF3UXhrVVXR1hwtIiHfesOfa5OS0eJuLs8RQBL7wnkko/J772PMWFmSTOprntz8shZqWny1LoDNW+o1PB0pSKAsqM4sLAkEWJTYl4WNju3YJM0QFnRmVpYPFjaNQdKsrG5vwwFmbCYkSQAY0ehhKFf9z/fvvOOBcnz61l/tuv3W25avW1QWy2/42fWTU2Oqpfd2WSIAhKI4fWn/Xy965MpHln7i7jXz8AhRB1kjwQEAAdsDQq08uM2irfJ22pQjRCTgwQGiQ1GVCKLm/ovgiRyIilSgRSg6OQ7DiIoQqBfjkX41N+4XHdw/3Ds71Rk/YfHyw6L4lPMuv3Dud//lSa++Z/GSUw+u8mFxHBw7ePTY0f5sBPrm/ictWtqrxM0NZut45OZfXf2Ln39nOBx1iq7zxSWXXnTOU07ct++IoIBBH1QE3YhjEXLy02wll2k16J2hNNS+N1IMYqqjNjoK2pAUNCVvgD6RqqpsDuq9DxQwOVYlo2UmBptPFoFjSpp5jwpqEC1VrSsBVw+q0ZlnnfHe93/kja//E1cM05C5wK3bf/5Xb//Dg/u3ajzccScWa88QUXRu85aTH37oro//69+fcOIWv2Tx6sHs/rvv+8WKjbs7UzsdF3v3Tj/80LanPOW8devWVVXFyuPj4zFGh5RS6s8dHQwGpXenP+nEr33pq2/+89d+61v/88zLL7viC58799yzh/1kAnYsSZXK4KQyUNLCbRyQKhA5cIZBaFYRRhpxRjVDVHREkHsXcKSKqkwNs6Ep7ecd6BZ+b3Ezoxkxg6hNK9VgfNoQ4CzpKjavoOjLYmJiAhHn5ubYOaNgtb8CCbx3SdSaYY/emFqchBq5DGgeOZv8iDCKamJFYlJLz1kCJYNq8mNMRERPyAdtsZLfnkMEVIJM/Ld3xaqq5BCE8kq4QWm13aEx5ADA7E6NYG3GgjYfcEAIgD6YCrCiolcUQRRUYCxLneJ47LnP+e3+7Ozll7zqlOP3fOLj/wYBd373misurQ/vPzpGfvWaGAA+dgvvmp31XAXqDmO1f88Db3zzK1/0opfvPiAd8sKsDsqxnnCUqJxS4UIKC9BgMl+jtfc6N6lIAASkqOgWIE7bzrXNx1ZntJVfnvuhtjAuaUwh252c2QZk0qc9gaKChiNSVEUWtgO8YLiNgAiAnvJoxJHdU9NWVi7C2HDvMM3UkwHmEGPhCypGs8uG/kiXWCmJKjhwjHHDspNSBZOTVIZiUAEFr2qwGObmc7XDTGauqqrT6bTwvfYrqZAAOArkgFXJ2S4IEQWUzF+PEHVBZgIAhdZ4o+2DAQDRi2iVRuScYaZUFQkdEJFTTyBKCpaKcgIVAVRzyyB0gGiiMN6oE6oMikAxxTylaDQ+2/bROYekzBzraM9X67vbHhLnHLDUKQu7kqr33rgJLEkly6GbwXYLfrR43Z4cjyYukAfRxr7Dxp7SkpOqAgsDE1GqaovCzhZnTcXQZvScdw1iZuwjFQcYvAeAqqoMK86s3S6NapdSmpru3vyru77ype8wV6M0WLZ88s5f3/zc9c+rhqOox5yb1DSGOEBEZhKRJWX16YvufPBo760/2yxARsKiBouuqspAbsH5b94kAgplflauH7PYjCP0iAhI4NUxcawUHCJ6DDGNpFZQ5/PrC6sA+64L/eERQC67nqvRrbfKslXxjIsObj5x3ZJlZ7z8lX95Cp/x+ep9H3nGlk9eM50e+/GBAwdA070Hu/926/QDxx4nx1wLoRx6fDuRc0hVVc0SgS//5I9/9x3vfOufvPK1W3fu75adQb9f+NL6dyuwsnt001AhoYKmZA5y6hgRsSgKF4I0CrXWyHnvM8CeHCGScxYtU3PafREkMeWBjaooOgJCKgtp5FNSSlHZVmOuoKrWMoR9hw5e8vTL/ux1b/nYv3xgYsIHLoIMxrujpVu2bNi06cKLnvXo0Yfvxp8pDA8dPTIxNvbAfXcePHzI+w4/5fwTl2wt77j9jri8v2HDphM3rt25e/v41CS4xiwTwAHGmI4dPow+nrBpY3927r3ve9+PfvCt7/3g2xdfevG//Mt/3PvgQ0+98MLDR+ZCGFOoU0qOiljXLVoV8tyjwQYtGDE1h8NGQAgLsFoMStYAGb/OdO5t4IakZs6j2qHCEpjZf9rjhJCt0dHmqoRE6ARyI2gIEc1s0W63K6zMTBiLbqfT6fT7/ZRSCME14uyZCR0cIpbOm1ElubxnAkjA0MJTpQWmEgJg6UObyGtOiKAqLNnEV6GZeokQzNfm9uMmddh2zMapV0eqYlACw3pY5BKR4XDonPMu5FqkZSiauqSoc64FSLOpO0IG7qZR5Qrnva9TzazeOWt9hsP+CRuKP3vN2x565OEvffnzz3j6s9ZuPHHf4bnnPHn8VSfsG8zg2kU6BgMPeoinnr2ljuPH3dM746wLhzt2HaHRuf2ZA7fddtu6U56cOCkooBeFWIsPBSge6x8Zm5xAnZ8KtGm1GSTkxkUQTK3GU/a6cQvkJNtKpe112iyeOLb56TeKG8PFBHJRG98enY9Z1vA5O5kNFqHR2UYHaDfIOE6qKsySkzwKKBaF68Sjwwq9H1OnqpwC6NHR0kE4MAYJCDChRowkxZYVpwwH/ZWbiv4gJa40egYmzTaSJgCEonYmq6riBcCx9tNhg5xXlaaJyx0jAKCNlFqxEMtlNjk3hGzNZOTpJwK1shoDAlt5wTYJhBTZIQVyhKgND9BUirKMnTCocqZTZzjQwltjUor2I9LAcw0RzcymPC9gZVk+G8xc+kCNAK33ebGCiA7JHHGR8qZGUPIaYoFt1HzJwgKiSdkiRZ5DilARRObLYmkU6wxe69oZVfOe2zi2sC4EhCIUVVXlmtLuReKkqqCiEQB6427/ocOf//w3B1WdZLh5/QnLlo39/T9+KAG8+S3vHu8EUHRhAFEtDnvgT110b0D53z8+WbAkQlZGQAWXmoJGVJ2QAjJmfXLMcSj3xgCZmZ3Pj2FxAOzogiMVD8g+BGUmEleQiogkFkBUJEHx1ewIy1HRWe7VRVclWnr193eov3fN+k1nn3/p6WefQ9r9vT1//oX1H/rLbf/+44999bhlfgbKvUdLiolqVtKiU4DW4+M9YFLm8U4XPc0NB/v27/zaV776R3/8SvNOLkMQlpqTFOAEkUhtsmXKbhYiCDnVzjlAkMQWHaChHWKzDYFmm5Aka6YFCyNWn8VUp1iGIhk5uyisbVNRoLza6RRdRIeozIxEdV0VRYeBC98B0te8/vX333//j6/90kR3dT2nc3J4ev30maeff/yJJxX98sY90xec9NTr9t549PChyy9/5gknbPZjvem50cxxx63esmVzXfOwD+TqJ5+yvpI0MzsbQkEqVVUFcvaenB//4fe+//d/+8F1G1dd97MbZ2YH9z+050Mf/Pv3/98PFZ2xqjpQYExcF0XhFIwP2ca7hV/mLAPNdAiaKtLUZxBRm2WP0SIsUORTbklVTN0NRNU36gQWcESEvMMmYhKgCUA6cgTKzDElm1mZYHJKSRMXRRFCoGD5ZjgajYio9AG9cxLKsvTem8kEM5MvWlk7K+QJnQ9UKyPmCWquXewoSK7rk4pHT94BACpJ4qYPg3YYgIiKmUWjqsqSIDvrKQIzyxPlfrz3BKgCgNlnBoFUtRXmFNCak6hYVrANu3GWsGEu2Qs63wwAwZWhAwCpVmHesHHRl//7C5/5jy+86EWvuOX2e6/4j8uqem79xg0vPeFeVVlVUgEpkNYMHZnx0Hn6kl0/erj74INTy1f1Nj55/3333efGBqAl+BEqaWQCGit6dT0aae3GSompDXZtKsFGSheIUFQQOCURoUyr0BbS3EZD+2pTchsKMyVJDbc0L05iV8kj+U6nhf2jWvepwQoUyBW0Ctsd5JRya+WcGdW1g35StYmuDclTGnVct98f+tDHeknCIXulsWP10n7v7pXAwAGY/KA6ssyvWD+5YTCsJheNVQN0AVAR1SkkIAeSOaPaSAcAQKdbSmNj3H7eHJIoo+hNj72taPO1QoQkgGBO3u2FQgEGZWECKorCOWdTATLGoCIKeluLe8wq0wuupCgkZlWNMZpqc35hUFslCDe1YLNWh4x5gDrG9ia20QAR66puj4RQno274CFlPThwpqGClCUBGrUQRw6Aga024sbXWRfgBhacN5qv2EShwXi3+HBREQRP1Opg4wKZCABgzURzbkzPtIFlWG2BiKRQOGcWjUIaa1HQ8anOt751y733POKLOqX62h/eePKmkwdzo69c+fUHH7l9vFOmiiQMSy2tTv7IhY+duWz2D39wxt5hB0CJkCiYEROQzxuYQKRkrAjIBlDWGbT8nfnyVIC0QUG3SrqqKkIuFEU3DPtapQE2Bs8p1aPhCGDUcZMuTMeKvXfOF+K5OjL42fXXHXfCzksvuJxcZzQYvHTw+i/y335p4h3jqXpo72Lyrih1EEdUJueUGSITAzuM3mFk5ohLl2/sjNLOXft+8fNbnnzOU5KrSCSJlr1uhIhKbeIgndc5ISJBJOeUIIGiIiHGmKSZcmXCUkvLJswbOs6A/KYgSzFGZbaNhqqGTmk1mV2WJAyqrdxCb3wipQTMhL1R1Z+cGn/b2991+x0/nZnd2fMrRoPRw/ffLgLL1yzvre3o47p5w6b1f7zpkbvueOnL/nTR1LRXqEHGorgjM3Mq3heMMDYzPMzJlWXH3m4Ioa7rUBQrV6+894H7f/ij7517/nkv+qOX79zed0X5rne+8Ulnbn7hS172+J5jnbILBKXrsCR0COhBRZt7qwBtl9OEWESPpJgq0TpGtSkZEGX3HwO8q6oZms5HCrUfdyKJiNA5D82Cyla8C9CeLszD2WtNGDy1DVaWFyQh4/7Pe06UZdn2WJbGmFlUSNoUhYSKAFKzegjBEZFHBFFyJCI2kyQkIVu1gTXp1Jj6EaILWXKrxelBbmQz60NBUuT23Dj0gCoqCIDk2m2W/ayqFp1utoVgdkUJQ2wlf9vQlie6RKR5P2TanqCa4V3kvY3QqxEh9MbK7Q/vetWrX7F+w6YP/u1bfuvZv7ft0QOLl0/292/dMjGc6hYeR4cHoJ1xkHoS6h5VsQjlga1XffnkJYtHD99zz2f/63OnnnL+nt3ivCNyPpRcR+HoPPVCj0EhKSeTtBWTurQrX9e15ICRP4LBWc1vccGYNFdm1PA62gOmmUroAOwoNRIWFqtAAJWZY4yeSCiPn8UhMSCAt2G/XSXbTzfpX7OmYSNG1ihOtEWnA6ySeOVjh/vMpRRVjGPDel853JGmRp39PVRUiGO1OzbsP2nLeardqckBgBcSAnLAqCFpTMjeewEG22DZ5EbFk0uppqanyQGEWRbspBnEEHnYtJuASA6hVUYkQEHXVmAhd37z7TURIibbc6hyzEQaZ/AC51Dm6cJ21QGAa0ZvtaiZOiJ6B4oe1abArWkgABJR4bwx8fIIV1Qxu58Zj0CYY0pRhLwjJVBlFUSnxlFq0qFyyiwUVSJvwmaC4J23RzJ7SipYC2WHzdxcstqaQ2zq5vmHBUBVa072cKlZQLaWEqq+CLY6swTcbnzquvbek1mYN0GJEH0RhoPh9OJFWx858t3vXNft+UMH965ateZd73njpo0n3nnXfQ9v3V6UGhzEREDjiSsR+dOTH3/ZiY+/7ecn3X5ksSKnlICEiFiFAJ1DNe1M55zzImxh06B24jCLUxAJsJgYSxOdWdQ5hwAI6JDUgaAQuhRVmJwLjlxdV6PBHJDts1yf61CpR4lyTGix1Dg5PnXo2PYdP9177plnqSBRbxrc5pvOue+VP3WfXB4PjjgWIkheQbt1H3ulFzdyDp0rYl079Kra7/cXL1v5wH33//cXv3Lu0546MxrWdUXoNAm5JpuGIDExsw5HtqY1tKbpFqM3DBkgPGEetnBFhaLgcsGaDzwoetd1rq5rKkIoisFg0ApYhk6JiJ6C/aIYGUCIqK6T8xAoWHCpY3zSmae+7GVv+Ng//SVOJOp0+/XwoW333Xrz7ad0TgaGWA9P3HTSBz7wodPPPnM4HHrVMSRwLmpSX4hKOeKjwU27UFtzJqpJ2BdBHFYpnnnak9e+70NKuG/vQRHokX/pS15x4kkbZ471U3TOKRIDeOd94prAG8LSxh8AIKqgapVLdgpqkhwiGRtdEqfI5JzJa6CCqEJD8LermVQdOMSsz2coFGxpiw3fXFUUKIdPVU5JUMkRGWqRxXlHiKBaS6b8+0bmwmBvJiTimlWBDSVQlAElMWCj+9oI1lvnRJqxECklFXFEFSlpBkQREYqyiHOkzXbKqMbzFbpavmdEdJ6QEQAkJgHJUsSIzjnfJpLmZ62rMHZHWZZIZDIFhultFdfyykqy20G+MyIOHSCKsigTQdnVXs9NjvnLX/gSgMmJqQ3PfeYrB31Zt2Hj3GBfVyGl2ix4x6cWP7Ln2FgBk2NQBMKERShqmZkYW/76N/3hc37nwt3bR0VPHZdJNEISryjqAZGlQ77GdhJAYILMwqpaFoWIpBZhK2AKRJxZpPOiSwv7eAP1QIPgIxPH0EZsQ4FsVqnMAEVRcB1NVYeoge8SqbBZyYI1zaKq6hSR0AXvpHFfloyLbqR0ne3ViMiHEGSELKNhLLrLoah4ptiybO7Jz37q5+hXdLiHiCKp8lDXo4s3nNcfyIplhUTyJWBC0hjIIXhRsYex7Qux+ZghBGE1GHD+7I7mtUFYrFbFFoRiWabRzc5qGxmKoGiaBo0eedsZpJTIuzzX0eySGW0IXwTEFoqRK2R7jkz5xLTYIqcYI5EHUjN4sPGSUX2yeKcoZ8UYRM0jaE+5MwOi4L0CCKESAks7IqKF9B4g69c1Z5s8gcotb4Pyc85ZchoMBk4yA5CIrAhglVjFtm3Kt7VRR7fugJDUdE4RUTS1zGZRIswZXcQ5l4QlahabU3CIZVmywMRkT1W//MXv79z5uAuj2ZnB29/2jtXLjt+6bfspZ5ywacuWrQ8dBMdUxFoYES9cfezD5z16xf3H/ffDKxAZCW2QZlwph1RVQ8v9AkrBY1JhwcajRE27Fz0haFO82vsHQgajGqkwI4BD7xwQ0ij1FaJolFSJxqIEERlWgxSH0p0KWPXceE1BiTvkZEDjU4sx1Nddc+35512+6eQ1N11/787/2Ae/1OHv9YvPTYFnwqQ1A2jougHXBRJXFYVCU4oanQ/DuUOLFo+vXHXc9df/7Bc/vfW8C86e67NzIdbJERC6Vuu+qY0UEQXUZ8iQknNICMnoXoLBG8JjwcADgvPMzCyuKBAzRJGCdzEvKEejkR3jFg9BRN4HACiKoqoqydz2hOw0AYZhURQpaR2qV77yjdf+8NoHH75pydTKWHfHis6W9Zs7HRKVmZmDe2YeWbdubWJNMRJoRRgRiFyRGARi6buglYgAizHbPDmOSVgLCsMYu2Pjve7YunXHdcdLKvHCS5++bOXGxIReVVmjQIxaM6lHSKSAopgydA7NvS34hKoILniHQRm996FTMnOKnFhYRERQFJOY/5cHCIgEqo3eGDiQvANaEHZFPFLHBSJC78RhdNLn0VBTDcIIhTjHCpFVlQkyt8+5oui0da5FIgZlwDzoAPRoe2PRpLbUx4KwDBE1dEoDZEEgu2iGhGKR5ofER3GSQU/zChsup8y8xGplJQCKEDqhKCggLDA2N6V4R7pAzzL3hZhTrEdKwjFGSFGVFUFBg/fEWveHWicb3CF0RQCQHdWoyYGrEmsIFUmt7AkDpgKr/bt3fO5Tn738smfce//uc84556yn8aoNUnaLOKq7YfERrd3UevVjLODqI6vG4opeIsIaOjHpY4dRh3PDQXrTm957tMLoSs8omkgF1bTBMQGwQgLN1wFJYjKGpaoCYR0rs4hul75FUYAjH5zVOpFFkZxzAagAdC6QwbbJGcjIqXKKCTWhKqErSvQhp20gR0byyfWTxBo4qbKJk0tDFKZmvM2QPf6a20EUfFEUQEa6BAYFzsrnVYq+pEG/vn1n3UXGwVw98HsG2x8rdwKA37tYVZic1INpv2HFklPQ7SsnOyx1CZ5jEvKVqyNI4YPRxI1GbyghRARFqdkBUvDtf5hZEnNMRk515Ann5UcSaiKICEkzgtc5V/hgmSaEoABq6mop2nzBgfPola3kc1h4KgN4Zx+fa2ZWADJsi8Rk6xGmzMU0KBQnTsKqDJHNTEZVR7G2tqPVBkFUkSSSPKFDCI5KHwCgFo6oGhwWHhWI1SlAYonJ0BWIaM+puY6SWpUh6IA8tg1QLjLM9QGhTrHolOSd4V1qTtbfmBiLnUNDw0pMXsBxrkiMd24VSbIqWwCdVyQqg3qqUjSiYKCydAgqATyqR0RGSaAppaKkH1xz3bU/+snEOAyPjVavWHvilie//S2vu+rKbxzbW+/deW+3LFknY4w+4fru8IpL7vvl3qkP37qRAEFyGaQsDgkVmJl8QOcFUBVHVR9j3U1SRCZWAPAC3QgksXAFJ1UU9BhRxVNyNVJyIFBHVYHCxQJ8p9CkENmDAPdjmmOp67quh1UBvtubnvShpPEKnSsmvRQpOvCYkp8qxu59+Pb/+p8vPPrwo5/93L/O3nWg/G4n/XnVKRFFFb2SA0WHgXxITFqgC77jJwShnw4ORwfi3GDtcauPzVU/ue5aH8Gp5zp23FgiMlyOxYFeb7zsdY2D7skllZRSUAyKACA+e6hrYlIonC+cN05pkhxwsPAjjtZlBSCqkhCGTmlVWkbtELoiWNBmjgAyGg1UGR2RDwU6VXRFAC1RCVWqOk2t6L7+LW/pdlZ7T1jQ+PKlf/CKF55x1rnOucFsFcpJVX7ggQdYxLe5vR1UWufRZgIRicKREwEC5XrQesEWioaIvU7XuoF2vmeh6jc0ehCVwCGiqe3kBoWMbidmBSUi9uJ5BB8CIjoiFrVhrPPBIOAiQg3wLz85ROZXk1BRIQAVmFvkCCyIkcQjYVJgLojADNu9M6v2djLZTCry/zToORqCRAXRIyqwCggRISApCBE5RwTGu7dE3q6I8pTDcHTkctfOT5DFQUS7dg4wNerNxl+kBmOpzRhWRBBdbGwtVNXWOO3IhUV4NIp1BIWYMoZeEIwcIjDrA8WoKiWLqlbdXlnHqOzGxzDFwTe/8Z0De4/88he/uu3WW4Zz/S0nb/G+GBvzp589/qKXr55eNPa+t1+1e3f3qkfDSzalA1W5yA8nSmTRGSmwhz+719PSs84/cfp1r3/lkrXTu3an0g8GI3TeJngIDfXTVJSBbKIAVpcIMxI650QBjGaQazeykh8cilH6yDlyMdbeoAKcCOdVnAxh54IXAEnMmsiERlyjOChGUYBEmlSEk3MOiTim9hGwgZUJCAlBW6Xldk9VVa2gtvuYQDWxB4+IQF4ib73nEZiUyOt603PlqttuHv7CzQY/mo0jVigPHpz5nc2/s7xeciTsdM7VMUpVNyUCenASUwu9xgxrsA4sL3oXYLCg3XW183lTCzdYk/fe4CTUGBtbQdYesMJGrJIndVZGigiQmtKjad34tsMjJ5JaA297nbK0hSVY59GeyRhj4Xx7E9sHRBO3aucLr22+/ka0Nc9gyI8VNWgjkQyaV4Io7BoAHWKGYuTtTFQiAgRSEARon1B4AurT2l8A8N6hZPYEQytUmt1TCMk1Si82XGFzZIO8xrF2mxBZUgL2rquqCpGwEBaBempq6sGHd3/2iq9V9SAUaTia3Xjq5iVLlrznPe85/qQt//2Vqw4fPbR00STXkcj3Anzhmff1k3v19ScOk6raJUXQBpFKqJnM3Fy0hEqUvCMFIm8eAgnBgx+OZovgBAOKK0khidMOoNQoYXx8rOjGfhVUii70qapdpXUlzEEdq9TK5fRYYg7gOEqUWHZDVQ0BqPQFx6RaJSwWjU38+oaf7Lz/7kceuX2yx/Dx3syPDvcvnfPfKwnUlyUIVVWFJEWgKmEVRwGVlVatfEqsqoPHDq/ZSGPjne9+71uvePkfLVu5EhlAxSG5AM1yBImUJbtVYgON1kb22WYf2Jzw+UkGQKorDMFuuvlRQmNj1TbKbbnWBnDJOuS6YDSixkG17JMPsMjMsWPP+73fvu/uX3/q3/5p1crFSyaX7tn1uC5h56jX6bhxvO6mG/7uwx95yQtf1RotgEOCRsPdHg9s1eDAYyKDM7b+YvaB26cFs95phvhnReh5goQ2y18UVVLwDeDFPpiBAuzqdIqe5Sf0jhAVW0mtzJ3XeXuDJyxp7EsQ1FoWVWodXRBJkTAL7oMnJRBESRxHVXAFdlymwzYn2D4O+qDKsHBrSKikJBkuCwAgoohSs3ghaeaoC7SvXSNV7yFrNuX3PO8XMC9ej5jlDHIWX7DmXBiYsNFBtSG2a9zftFnbOwUhDM54nuiyLbH5mwJgF0F90BSBbFEAgupWLiseffTu//vBj245/oR/++T/Oe3UM1esdM/93Uvvu/eB2286/MgD47NzBwVvfdbzl3zgny76hw9dfcUtxbnr163oP3jEucIhg8Ykxaru7nPWHb3yYOHrzSecsP+Qkh/6OKVdVmVsEB85ASjb1kARPLkFLjaaUlqIt7LnKjfKCOaj4JGURRMn28QkFpMRUQU0mB4qKgGSQXOBYoxJhZA8UquwGEJAo4SBArPX7HqZn8DmDcOC2Zdr9MjsRVoSiGSJAE7C472JPY/ec+TuL0Xf8Z2VFRyu7x7gb4PfMwaD7pQfX7t2w/f3Xn/xuc+Jc3HsBFLIBRk6JwIOBIk8heSEmYHztKPN/IiYVMy4wr6yclODe1dVo8gTUZZps+ZeFUwKUUETExEDWJ298DTaGfNCJhxrgQIhK8wQUSvlCE24AATT6zC8mCkFOu9sTCoiC5E+piuUGnqJ4HxxY1zbuilkSZsyw7KvqW0z15xQ0Nqg/HQYO9ECiyISmqo+5Gya76o0aL72vCEiq2hkcHm8RETkyFMQkZqZmb3Ov3lpgDkAEBrpj6ZSzx+EApbUVS4YBmXhODnn2fsiKn/jyquOHj2k6uf6yYfeo49su/+BHaecuPH711z7X1/83MTkOAMLYHD+X55254aJ4W9f/eSDAzL1TVU13rw2ijQLQjECgHeOHSYAZfHMzioFR8JVCA6AQINzBUvtnCOlsugkkiGPDuzYPRV6x61atffAzr1H9k33ep2yHJAfpREGQsU4qIIrakmFL13hItdAnghSimVRooKEsvAymjuya+exboGaaHSzo5/70RsH498PzOypI+iAVKUWdqHocRzMVHOLVp16+XNfWQ/6V37zk0cOH3jN/34FYDw6c2T5cWt7RTEcDBwQEagyAiFRVdf5AVTxjgwr2ZaSrCICRrqzprmuaxUJITBmQ1VNrMI2uEZENcWjHNSbur9RdLAz1l5qaXiwdtnLsqvKMQqhA4K5obz4pS+789abfvHznxw9NluNBgV0nfPLV62aO1pte2B3Gs5+4T8/4tu324T1nBe9z/AwZm6ETBERIydyuYBtf9ZKTCKyxRuDkohzzjtP86roYiIPoCCipl1gn23exoYQG2BbW+fa30VhZ1thNbWJHP6MhsQLcp79+4KcAETN1qeC4BUJMNZpFGsXvC8L43X47hgpRGhoJJhlFmC+Hp/fvbWtLTOTczleiJgtqyFm2wmza+yeUrJeLcvRtGXKPOx2gRJvfrCbbS7DPEKPGhBBjNEWuqHwiBCcl3peisFacafAmMMlAKAjh8TM3rxeJSkgKDtPiOSpO5zrj4+N3XvHzdf88Cc/uPrqa4vqla969ewxvfYHP73lpn1veOe5l11+6L1vu27ZihXeL7nqi9vuu7n44Mee/+8f//7v/OuBF527+lnHHV3Zq/pcXLe9+P73yv/z0d897cGrVk6ffPyJmx7d1e8VEyx1zAUOYKv/QGiDUG2QirYUtECfUoJWBD8XauiRgEAVyDtlQdKqGnnvBSAJl85Jk5kMUyMimkRDC3+1SlnUzOEdCQAuIDJ5JCDUOrWznFxNGrWfPDXcJxFTUmz4Y08E1gooMy8u08e/euXhujjnKSfdd/92F5I/vPjYoq3u52ODrx7ADemO2285//STDuzcp7j1uaetrpUNma+ckIhRRSFQs7FeqJ0OQM4pKKbfZBm0ySPT3uraZH3yk9XuWRRsq5NTqxF1BLBpc7XZAZtbkc4XJGgABWh0LexPNLFwgkZNmpwL5MqiyEWMqJJqYzKdh0MKiOidE0OutXCqlqBsJBBrUIyjyJJITEjHFWE+ItqHcuQw72JyuWwKJAYNY87+xnlMja2WdfuAswohNQJfjG7+8SQiyeYKkOl9je2bpPmRSWOdSQCYYtUpu3XksuuyqDrEbnfym9+46bof39jthaOHo0KsUn355ZccPbb/Vzfvf/ihRwEkBAfKoVO8+aSHXrDx4J/85NR7j4wLpFZn28K1CZzZXberZzVcrTUqAREFBODcAGU1oi4DuBJriehCp1PENNp9cAeJ27h+0x+96o8uvey800456aH7H7vi01+8+uqvjaiYGF80NjFWpQGAMoIkLYuxqqoKdI4CIiYB73wSRvFVVfmA1O1ERsJJwujibPlvxfBLg3R2LH/tR3UtkFTYY6HiY107p6HoLVmyfmrxqrAcVq89ZfvWm9evO+5Zlz9jVA9nZ/v1cG7R9EThwihVzsKzqDlKoaOCvNS1PTTOObXpTo78qo3VniiLCkDohCKm5L2nQCZcaJE2xtjr9Sz+eKQoiugU0HrodqQ0f6NFyKEtX+t6JDGFEMpQxFRjCSvXrPzXT1zxnW9/fdXKNaeedvo9e+8E0HKi3LzqpPe/55SnX3zZ+z/wFs+Nv0KTUJsg7gkQMedgMVylI2ff5ISan+T5x56IoMh/DWD4ykw0Mi0eJAJRROCY0LnW2LzJz0BEw2oUnDebARFpalsCsF2OM/8EuxzOhzZ0tsUsAJirBSIIKBIF50yAAoMHAGH2Mc+E0YfUeAi1LwXzb+w35Rra+tdoxCKC0oBOEdURLahDERFakciUgBBEXTNDZlCtE1He6baFMwDYUE5VUTR4LwgNxmQ+E1sCoMatmREAgZxDh8a2UYBmPIMO28bDyFGI4CoWJCVRjtWypV3C/s5te/bs2v6MZ5z37ve877vfu/7rX/n84sXT9937yKteeM+fvPH4r1337Pe+5Wd7HsNTTjv92OGZt73y+r/71KWPb/vxV2+d+e7DONZbPxqNxsfHy870X7/lm/t23XbN9f94+AgE8Bx9DQJQK3rKRlbYFhyWL0kVUW1zhoiBHIb5G2HWMc1ZTZyrxbbgcBYOa8wqgACAmnd46Jz1S0lFJDnn7A4jYsZqmbgE5+8dUO0Isl+bInMemFPunNpf2hZ8iGirh7bXBACjD1x3403LFi96/rMve+yR/45Qaifqlmrsh6vKRYux7493a55x3DM/85F/L8pxry993u/9weHBjDPuECKSisgwsVe0hhVFK83UIMpP04IBdFO5tyfQjkdbsCOiI6pTtFZi4Q8SUXCEJhkn8zbD9gEZxTmXufJE3vvEGmN0PhgIqHA+am0e20RIgM47apymzU5QGopBe91U8xYKoN372C+F3EP7wiqG9u9UFQCT1daCC5+aXAeQEmRXIvtrMRdui8stT8kmTws0goxt7xjQiuD867jt2YPzqQE0ZXhpyya3E9ZcUUQEUkTnvK+qWJRFSomTdy6NT3S3bzv8qU/+57GZI34AmroVj7Zs2fKud71z2Ie5Qb3x+ONv+uX1Bw4eGx/vXbpk5zvPfPTv79j4g50rECOiM0wqIoqAGSoDzPcMAGCyQ0Lo7M8JgYhVgdUJgC8FUEEItFvgaDjYs2v32FjnWU9/1kte9PwLn3bm+Pj4IGp/EM8594zzzzvjBz/67Q+878NbH9y2du3auq5ZkyIqgST1vjAQLSL5bCcv2uFxcVVNozSqq5mx8e6KVcfv33mvfh/xEareMEh/3AUsnWcEQeisWX/iwWP752Z2cwIXOkmqid7U+g0nPfTr6398zQ83Hb9x6coVae4oAh881J+cWLZ46dTYGMzOQooyrCvjpA2HQ5eVd1FVIbEgGM8IkVS55mS6GSICqETOBFsQkVt8AKiIDIdDizZmWICYV8vaoL3mQ3SuYmlUjUIIRVGI9yha17WqpqFITItWLvvzd7yZCA4eGBIFRFIXqJwoe/qcP/ytdcf/jzcJOvOwUlVLnogoolZKOecos+9RBAofRAQQwdPCp1oRQbhti6HpGObnQnbm8141pxBjGVoFTQqKgAq0QKYKcjlv5qyAqICKeQKp1gvalrQd9tpLRQcOEFkcK6oSYWYjCHTKcjAcWjMhKiiJfABhanVxm99r+d4UDBwgIWmzS/DOm4OmNEazudwWIe9KzEQgZgbK4w4L99h411h2b/foVpcAZP2d6FQQOKaFKb+99/aCttgD1VGsOqGw1iajzUWMkNqI/TbHpelCvC+rKha+QNS6Gi1fNv7ow49+9Stf3P7IgeUrF7/+9X/0uc9+7T//67OXXnaZMK9aOX3PPXf++98/sHtH958+ffFfvvaWPdvmFi2Ghx86+I0v7Hn127Z85C8PTi+NB/YfWTy9qtvtDUeHd+x88APvft/ZT7lk96GZTujGVLluhDqQNf2iNodvz4kDTDBv15On9/PNBJgYi8uOsIiG2EQE5hCCEnr0phGRhJWThSRqzrDkZo44s2nyCqNW9kiA6IAQs7+sCUFjO8hSAc6nlFsSsPftSApaVqhBDu0+KghLd3xi2dT4PXf97G8+tGPpspX17IysUOgK3TngQ3urLv9638P9K/ectmXdjoMHHnhg+wt7cOhQohAAIUkkIO8KVQZR16jH2FBEQJkTyHy11+bLdlIioNScFm3LYiKHREiCwqAO0WY5C/NHzu4LXtM5h0CIklGFmtBhCCEZHAmRyBE68t4WrCICJhyWe8Q8ol9oByQiQJm7avVhO+5rWAya+bstAMJK4Qbq3xZD802qIoiySrLA2gRNQXAL/LABIFs1mEa0dRqSZ3H25LZtOioIs6iCc+jzISFA1ywdVdUFDwDa6HUIKDVbI++LmEYA4F2Bro41fepTn9+z54Hp6cWd0KmTzh3Z3+/3P/yhfz7tpNPPv+hp3/7Odx/fs2t6emJdsf//XXT/d7ev+Ohda5veY4Hep9hQXu3/HKIsgIk4V5ACCLKCekJEQgikFTBh6vouV7p/1/7lS7uvefnvvfylf3zS6ZuHc8oMBw8OgYA1HTo6Klxx8bOe/pVTnvSv//SxL//3l6YXL1L1FvICOF8E0Uwkc45mZma6Y+P9mWMR/DCGM8/atEQP1zP940467Sdzxw5sfbz7773hP8z1Tl25Vk7Z9dg9kUe+6J529tPuve/OmSN7ywJ7nQIJRNKqVWsmJ5fdftvND9z/9Cd1O1W/v3jx9NxgeGj/1j17wpe//OUNGza9+CV/FMgP6ypJpuc5csB562cuZ8oCDtDl/WkRgjKbEqCdhJgtXTIDzcaKqkoK1v461+zvQNrnfeHBg0b5dTQaAUBZlhbRSAeuKGdmB4PaIaIDX5YFgDpxgnJ0hntl/5RTN/qFCdIjoULSfEc1S08Jemf0jDZiYkN7R8TsskCoOUlo+85IQdGcZ+dL8tbBGxyZVlMziGZs4F0GVXPOIRoHlxFaMTob4wCi+U5Dm+9txeUAWZWtY2bRxFam2sOGAoBIZWAjZCgiq2O2ulEaJFr7VKvJWyROgN57YFFHqsggSQWzZp4FuJw7VTNiy0K2UTYzAUkBGuWEZJ5hC57hNsvaz7ZrNrvOwXt0zkJDzk8tHVMhxijW+CqIIoiCggBWKYpKvg7NhULEmmvfEa5rYVy+YvxnN/34r/7iPePlmmc/76Tfee4L3/++jzz04LaTTz772OwoVnNLFy+77OnPcyFddeVNpQ/v/MA5b3/lDWXYvPl4ufrr91322xec9dT9t/+8NzneY5Y1q5b++Cc3//nr/+Jt733Xrr1z6MsowXUBUvDkktRop6thpjmTjUqsDceDghcF5jzTc85lq5yUrMoBR55IQAAgCRvlw85GGYqUkvVhzjmxHleEzSVPRCRb0tpdKGw8xTleIyGYiGI7lM4nGpRFGxVGjw4RW49Iu1nee2tYRexWQErsAabGJsbHx5cuWco8dDiTTh0AAD0oI1dTNVLmAwf3TI6N3/mLH7zrXW8fVuCL0oQEUcm0Lq2btKOJ3jUqfAAKor/ZAbcVsEEaG/ZBruHa3Yc0S2Jp5jTtXwGAQ2czCma2ghgdoYH/vWPmGGun1Ol06lxnsi2SnfOGrYQFMrGWLA3eIo0jePuWUvaumZ9vQ44XQIAeUNSufK4QRMQnMI4iZII9SKbeZeHJFoWnDQgLm/GVa3T8rVYz0IZiWyi4LIgdMuQNWvFSBXIYjYWhoCoGIERAYUHzB2zvhSqICkhKEUSd54CdKs5NdMe/fdVPv/Odq8c7YzKimisBneiN79y17Y5bfnbxJc89/5ILzLRt0o/+++n37eh333DDidLOkCEnWYAMS8AmYlg3pg0VO08eEL2NDxUUmRF73aIeDQ7u3bVk0bLXveZP//erXnzSycsPH6kO7DuGVLBS8B1UQUUgnB32Dw/nxnqdv/+7D510wqb3ve/93d54UXjQFOvRbJ9D2UEIqKgInfGxienx9WvXlT0XetN/+PwL5+6/6e4bvr9hwp173oWHjx7S/zoM755zb4XTfnzZ3p3bR/GAjuHcUBITgpTB13UV6ySA04uWTC2aPnRk25GjB0ej0ZEDu6cme4P+6AtXfP5pl56/atWqD3/4gyeeeOJFl1yYVFyD/BcRjpGZM1fbdrSYQzRLas6UgeXsaUGTibczTAqdTk9VLZFnUKyjoiiYo6kFtPEZ8/q1eS69BwBrf9GRpwAK3bLDMvLeQ+LRsFLA0jtPg05nSqqOQPJt6spFlUjiRESmJDWPu5y3CmjwuvM4FJ9SsoYSEQHEqnJqlQXbkWyeygIqMYnL5rjzECoLtVxHdFlaHaAVIzQysECTeICspW5EDwxNxmL7YKoZDdodnO2QkNUJDBwqx9IHVOWYHBGWJACORQFaSduc2zw18gPzmg8ApCoG8AHvDADl0LUdRvvk2xcRoSOIrO1z0nRLtibE+Y1O09eLmJJRIOcWig42cruSRfnFsGw21iNymQfX8PyieVkDtgFPmz23IlVxBOomx3u33P7T//76h97zwRfedP3Wtct/6/4HHr7/obvOOvOSio+yjh7fMXvc2tWzM4PrfnLt1BL8wbce2njCGb//ipVf+uSOdcdPJ+3/6sY9lzxn6sff25Gk89zfvvSGG3/83ne/7/3v/z9bDwypyzxwWlR1FZ2t3GR+2N6WU2CIRhFmrjmVhBlgoqrKXuZhEWLK1QvuODV0rDwmivPyEaZ+oKrism4aETUEsZzduY7OOYcoBn81jqnDQhZgIF3e0TCzSawkY0Z5bynKDF+tP8OmCyGiEMKBg4cf2PbwTM0zW7c6qcqpKd0EMETe672SE1d0OhDhp7+67Q9f8ke/d/kzHj0wCEWhigoafAEAXLOqRq8Lk73E7H49UsEm1bVfvjEjcs7FVGPjdEQK3IydSAG8k7xmRTuTHskoGfMRqgWOSrKJLUKj8STmbusAwNKeHd0WhWTAZskwiZzzQoM+Je/MEtykMOwfsDZcPAUASKokyvbkZaV1q5GhtcTOW2REDQ4ANCYAMHK23YU8uzLstwJ4T2roPBXmjPJDBAIRMdGQpFqQU9WkDADeeYLc+KKo+QvHGIU5b3YU2sUhNQonRmDzZalCLIroyi48/vjjd9y2lYKr46j0VNVHAcb6A12yYvniJRM7d+y75ppr7r333tljR776jEcnQvrDH58xYO9DApEc7khhfh7fjMEVEA1Gmg+tB0kAbHZBQojoAoGnvTsOdQr3ipe/+FWvffFJJx2/9+DM3Y8e6o2NOewgKEutXFv7wFz3QqeToD8cHIzD17z1T1duWPuet/+VzsyVoBPr12zYdDxSGI3S1OSiTq97waUXrzxu1SPb9nCa2/Xo46l2c1IO0/DQrrtOPO2S3adfeOtPrvKfdkffsvvoTyrRkoiGdZ+wTCnWsV9BWY8SW2B1xeSi6Z07Dj3wwH3rN2/uBfVIO3fsWbFi1W89+/Ki6Hz36qvrujZ6d6oj+QyMQO86IVgja0CZrOSLoIkAwCZbzEyNXJ2d9hZ20G76mkiclLNygyOipl9qcwEAeW89kjWMeboDwKO5UafTcepcQhbEgAAqCo4nk5+DsTA7cllAxEBi1sYRuirFXkIiSmoHkkDyGixPlbOwKqqYbTo6F1SVRQSFnM3iwBFFRwxq+k3Y4PU9qVMzlUN0baRzqsDIVDgVYBVfBAJURVJgZJX5Wg9s0ePIOAOI6LxThmQrbRMudECmMOeIQrAGq3SoFFCB0PsyAAjmDjHL93vXRFURSKCJ569PlqJNgCACSqgxtahpRTBvwVYxGJqGWFgY0aMzDQ3bhZeQGeXmn2NKD46AyIECoJJ3SZWVwWSfY/Tqg/fGVKnrutvt2uQQDQUbGUCtfs9HJCYeJURg5uFolDVGiLwgi6jDUIbBzMGdj31r47ruFZ/48uvfds7nPvu3L/5f7//oJz75mf+44uzTn3bs6OH77/jG1PTiq7/17VNOe/K+A1v37Nr9tS/c/Z5/OPVrX7x/x87hyiWb7rzl0DOedxbCA+96x1/s2r31z9/wZ3/xzr9+YOdMt+wQd8UnhxB8xyT7yedri+gs0GYbmSpmbI5oHFXoXQgBAZVcQnXOOXCld+aybu7cNu0wQhEiJtu8IqJoxwVENN1NU/mHZukbgpN5iWkQT1EFBYBQrepSQAWTQGFCBkFVj3mCKiAg6J0DgDiqrF/Hwkkq63So43tJvC80Rjfk4cTE5MF9+865rPO6d53/wN3Hbv7p47+84UFYQ+5RX0hHsRIKPOz7JUs7ROvWH4cFxxg7LqBJiSAJgvdZCsMHbylKQJlUgAmdrYQWJmBtaBiOSBKbKH/eDXnnBTWxJubgsEFfeXNAMWkYZ0BlIHQi4pDynlUAQFnEHlhfFihKGJIkUqC8GmBTVSPWyBEICZAMR60YFFOKGYQsSoClc5XM16CQ5w2qpmOJiIhCKlFQtDTYRBMfraSwcVpSUVEUREfgyVjaJGCgR0urNidXI0ALU1QiCs5LUy6AAgEiKyKU5G1+EDxlTw7RyEIKzrvcWbqyqipy6J1HhyZowwRVqkGl48vsQCU68iNKRS9JKPxHv3HznmNTp538tLtvvRYnRojl6Jg+5eyz3/SON6XUPXRwz9hUOT3ReevGHz1t6tAf/ejsx+d6CBVIKQQeVCSSogCqZo/KKBIEADEBOxTT3HTok2AqsZdcBFAfxtEdmD0Y52Z/63kX/cUb33bGeaccOxgf3b2/SMViklHVZ1eCOsRSPXlgjbEsXK16BEejWBc1bd9+8KxNm/7mec/DHY9MDPvTf/Jif/qpDzx24LH7d3kKw2pw232/5l/dMDMb6+qY98Vl5z+p2D8xWDo9UR5z++980vr19yxefOzjc/C2tP2S68Z+3OkfkV631+2IIHiYZOQqHYO5w37JFKGKnww+BKwP7ty7bsNSV3YG/b4Evuqb37/nnrve9Ja3X3DpxTODikWKEKRxqTG5GFENITiiGKMkVWbnnEqmmavpurCYEAUiSmIH6IpCVY0wkxSc8yJSsyBASuILF0XM+zrGqAJN22OcBQKVGJuFCACA64w5REwJbI2lwghYS2SsuEaoUxnQtwCNdqjtyBWEnIx2N79tBta29wVpWIkGilFVW6uYaUy2DsvZElVV2DKSAyTDBTTdsGu4xSAq5rEKBKj251l8R5CA0DdzrXbnGpNpwaioKZ5jQ45y6BfqkLXpEBpGU9N7KZud3HyDngdOInls23a0GV5rIuaZI6SqCpSfbSTkxARoTGFo2s18V5oFeH5LLRRLxAw7FyDcVdV28OaAS74p26NKYpVmYuGNhcjKoL4Bo0p+QQRCdFb0CAbKFGwkUcWSuA6Lu8WPr//JwYP3Pf8FZ//6V9++5tsP/8ErBg/d+y9/et6XX/eqiTe86RWbNm65/DnP2XD8cX/1ng+fcMIJ3/vetyenrrnvzl27Hi0vftbqa7/Z6U55DDqs3JYTNx8+NLtixYp3/tV7tz7G3V5XWYhc15Xmbt+sSxd2wNqMLyVvu32DlGEGytYlYBMBIiCAzPPBAMjMSdU3miQGevQCQMQIYufJIF3JzAnmx/uwAKzkGvFIZhFVRHJI6Bw3ukUWxLP6qTU8hK6h66SURDkQ94rJalQjMsfC+8prx9HcfY99Kfh4y8/3Pu2S9Rc/c+XenWe859Tv1o+pKieuy6LATnn06BHVtGTxquHQO+eGdRUQiZwIU/Ah+JSSK0I+sbbBQacgzOyb+gOe+KXNWIscGjKl3Vlg8MaFdSH790XOAnCi7bOxgE6DSDDvPcUpgaEOKSeYpAosKBn/n0elSaLUzjkrYW3p4EKIKgbFR3RAWjjP7jfNrBaGHUN9orNGICsHMOfVHZjXEjVcjMQ2QybNphFWNjnE1FTqC3+RnT0bjTgkc73I+3LM2kFi3bgjBw5xvlmHxn0IFFNiEfaFU0UQJW2MwB2h4LiEuYqLFeUttz76vWvvm1i24aRNp4b7flnFY4UPiu7y5z5/5arj9u0dbFh34mw18/trbzrh0C/e/atTbzowicQu+BTRQXIEDklQmTmhIPkksQCSAiTVBQCgCxqcCyNJhaoqcomFuhp42/7tT9m86S/e9TcveMHlw/5o3/46SSyKxX7Mj2oJBUpCSSPnZDQ8dmjf4fEwIeXYoCgAxkB6M6NBf/bA4EffX3zbzSXFHWNp/OEDBwb3Pbj38QN7D0yPd5SwRx2ZXMFuxEe5P5irAQZQHpWJY0f06L4Da867cO36k2Zu3eG/Umx93s0r/uYMEeiWPnSmKmZ1yk5nB/3ZYb3KdagUSskhKZVbd+245oYfPv1Zz7zgnAtf8Pzn+tCpqt/tjY0llpS4KAphYWZfeGZRFRC1hYgabB5EFTj7JOXdok1Sk4pANk23rZ6qMttKNNMNHLj5Y8mSNJqKnCckQmm0DRExF5ZP5MRak6CqYC8CSs6hI+GkqphSdiC305zbNTWu0Py8tMlVoKqski3KWbR5rnL6UTMEABBVBLaUKop5ZWKI1ZyB8rQnS8Nbz6GIAIomhGUpJCsQgaaqNpsEyA+DpXW2zZw0S7v2YuGCYW8TUgABs4bRgi/D5hCCyRq0N8kBgoMozZBKtdHxVxFxrOScLuB9oiNEMt+M9ldkDBFhyx63yp2ZBRvFksSISB4NGYQNlbBZQudbgwYu4wjNIjml5BrcmUorvgdktsqEPgQqrGAHH0IuoJzTxCokOoOlv+ee7X4MDs/e/5LXjg+PVAd3zCxb/dA3r37vZRe/4T3vfv83vnnVxs1POnxwsP/Atsd2P/rbz/vdLVtOetvPX757a7lp02LvZycWrTz3/BPSSOYGw2t/cvV11//k4a0jLdOYm6hSZfsYG0WSgqK2pnVtKFQriWwV3AxwUowSIxF5tnDMSASZzUymle8aPJeKAVpBVWtgT54akBeDCkBkKRq0fIsQxmbRbrApQsTGXUdEkJxI3sE3SBcAQufIcA62ioacsVClTlVwrsSQUmSO1eTk5KNb7/qXj3zx17+Yc6781lceXbWmeO3rXupO1elf06Gj/aUrFs3O9CfGO4sXr7jvvoc2bzq+jiMRKUPhKUufWkojzV4FmdvGrAigJKzQkGQWPvNP+L6ZJms2gUZEZABODZIA5g0qCnKp2Qq1m1oATQscLECV8kI6i2loNswAQkBHdjPGu70GxpEDSEoJvfPO2+7cftB7752LzV7zNxIwAGTBEJZo5TuRIIjk4jpvd59IAPVI2Hh6ShNYW6Hp9smihq6R35vOn0nnnNmZ4bzHAzQVJLTnpyiKBizGhMQx+SKUZWk1ByKO6tqJ96xTwR87OvzsF36R4tjhvTOzyzZuevLFt/3yqu7kGJXguuHKq7595de+/qxnXP6CM6Y2PvSaHx4754oHpxxFYVWxaJRAYWJqotPpHJk5NtufI4cM4BOxQkRM1CEmVRlWcxM9HIUx3+cxDQdnD4Y0fN9bX/9nb35dMV3u2lvXRQFu2EWqBoM0eyhOr4j3b68f2wVYVY/v3nPvw+XmjUefds7WQT3ae2zPod37js3NHZp58trlqyUePnXj2o2bppTuvfFHvGHD9Ibjj1LvwYf39/v90aEjOw/vrRJXc3MJ9ZY77ovDfsFSJ+zX6Xc3zS1bszHcGdy/1MM/qeAPh/IhSjyQuiKtAUYOtHTIaRRjleIg1n0iuu3Wu6aXrxz1577yla+tXrrupFO2zI1GQnhsbtYSZKxqu1+pToiIhEpA6qFoVvgNpduefY+EoMJcliUKN65fKlaxI6B3igY4EGgsH6xsRUQDJ7WHxw6s7bMWHq35mphFQJg5Iw80n+92heybFJj/VBoyGbUIwAUtoH3rkBwgZCV6hWyn04BWRIkQTBoGsRVA5izTAvY+pFHSsH7aPoxHYpWFPZzZY9uaB0XBoYCiApupsOUizqxZbZR92oekfZ6tlAaAAAS2BM0VcX6ioki75slX3HvnHML8pAtaChaCcySqzI0QvxXOSV0bblpaBeVf0qbSdm3ZZhFEtIQxH0lZjJvBreM3YrZVFoXm0qlmaYj5zOGdATIFgJxzAzOcQVUzGwMlRUSO0imWxFp/53d/6+V/cP09t8cTzqiXL59ZPL3xW18/dNWV//wnr9bnPPe33vLmD3z/mm+vWLn06CP7tm579MTNZy5btsTR1M5dW8/ePDk+GTu9ZavW0cx+2rV957e+cxO7RepH4GAwGJBCxRxjNGk344YiASrpwpoMwGTEVFUS2zonhGBNpzMtVoQkIsygTGITCM3kK2YWaflzWHgiB5G9c7YbZ4KkkhdCRO2TM18Oq5K2MDlCURSNDcDYHNozDQnR8L2NFwg2/41SaeQEXhxIp4P9Yzg5Sddf+8vbbzr2xrf+0aH99d69e2+44Yb3/u1nZ145e/7onNH47v7gMKBz1D1y5Njq46b++gPvePLpV/vOKgAQJERAj2rjehM6bfgCbYoy2U6e98j9zS/NaKT5GhqaDZY2k+r2mtjuxkFD6m1+C0CGVTQnE6gd4AtITO11aOtU2+a05Xv7e5GlYX08ATgJqkBPmFdJA4kyFxq7/jXP48+5kTfSBV8ZDUDWr5roWc7BIeRm4zeWfMZWsbjEC8S5HBijCQ2y11Y5bUi07bW2nk5Ew1pSHV2ZJ1lWegq54Wi0aqr8n6/cdvNdRwpf8tzMtp0z52w5Y+f9v6pnDhVF+S//8JHRoNq3Z9umXnVe90e/erz7kh9A6Naa0PtulKgyFO8RdWrR4kVTk6oqkQlQwFVBu4xjrlNxQkJmhd70fokynBsbwv5Dey4+/4y/++cPnHXqaQ8dOjrz092TMjy0Y6+kYXfdiQe/9z2YCCOhx3703QNzh+b6xw4c2j/wJa/e0L/huqXrNqxYsmLJqqnzT928ZvnKNStXjnVLRCynJ4re2IcPyhWf+eTYeKeeHaCjxDoWujjecaDj3aIsw8zMgU7okECnKLoBHt+7fdFUr+iNyyMzYz+dOvTybeEfxwUgxn4A6JbTFLQ/2x/MznW65f4Dj+8/vKc71uvPxf5gx+oV6wZy7AfX/uSiyy8BglCGoD5FNoU47/38+VQQFkUKrsCM+Js/fiklIAVUBalStFuPiI0AH6Aq+IAGeTIk//y6d34B2pRuSkTACUQy36RpvRCRTbVUEdGZ9oAPBSJK4npUUcMj9TZ7EVzgqApZU6nN5/ZXucA0GJmpwiESorQYEJ23NrM3IqDtqNk2uAJqvPi2XpjPTApK2krNZd9vyA+qPWxmlAS5es7kh5xxFwAvDUvSRgR780bMb6h7LVzxCY+9PXjGxsjJOHhVNTbzwnIEWSgPyBVFBRSzYvBvRsA8XYD5iGMv0mbQ9nmWxOhAiVjFwYKiZB73iCW5JAkR1aOxRZNKXdcBCYkEAZhbp0818wBAUFQGjw4BJQoiUgCk/uBIZ/OmtWc81V39nTu4fvHkxXTDD6575MHJJavXbt/98Gz/wu07f7Fn77bppWPPe+7/2rVrBznZvv3BSy59DnXvHtZrlq/csvnUyW7Z++j//ddPfOLjm48/+/FDNXmlWDBwCMECZQ70JrEidvFRSR00wHaGovCx0UDN6PGUjEtgWwoiyiholpTYXlla3dMGwVsIKmjCptoDIMAu+gQJWIQl29w0NRM6MnFHVjEgLhGRd5CkfaRVFQUEwUxhc9e4cDikWpRdQWBNKens3GhqcvzhBx++4pOfP/OczYlnv/ilL6xYsWJqemKweQgAj313ZvPmE1B7vpRf/OKak7c8+ejh8MDdO3bve/SUJx9/+PBMjOxNH5zAYV5aq0BsPPuccw4wmdMQPPHMLZhXIUCjHzUvjGX/ph2l1nVtVaCxls2FsH1w8uclq/TylwlaCaiIzm/Tzb8vu2BArawE3kBtzbTWBoLY0K8bUhkb2wPmq/x5pS2rce3w203XxhKufYc5FJiVFy6ApBECIKGnlpexYFRgX7Gq28+7sI2WxMyMgibXCwsIcu3v5cbMUUXqGMsQhnVVVVUIgeskIr4sRqmaXDJx92N7PvW1W8vjVvUEcNbvP3Jw9yyec9lzvvWl/zjv9NPf/Na/OnTgyJF9j/zp6H1Oyk9XL1+y5MFjx46UZa8ajkRqH5CcA5bB3FysqsFgiIh1VaeUDqYRVCOoBuDCWG9qstfrzh44a3J6xamnLB+bPn7LSec9+2K8/9Btt3z34P1bRysn7p3o7vzUJ8tzz3/wR1dX3//O0bVbasDFvTDeWTK15aTzTli/6bhNk0tWLT5h45J1K8d6ZQf8nLIQQgUQqVJNWnM1eOc7Xnf/A/f88tqfr9i0qU8VxgQVu4QsVPiglXT8RBRR0k4JNc9tf+CuPd0x31vq3BR/go5+5cHp39msN3Zuu/OO2X4qxtdRoGElc6OooTw606/ruQlfqFCSuHvPfvbDa3/045c9+NIzn3TKwYOHumU3hEA+iEgUHo0Gpn5qh8TM40HV6qr2UM3nMsA6Ru89W0HWbFgE8i7VIYJDb+eeRVXrOnlPCwtWiz+uCQUW09s/t/YHnygPBQCenAlTp5RAwStLG9/nD27zvTyRhGrtV97x2jFtVr0A4IiU1NQSsp5UE78EcgPqmmepzaxtNjId6daRUrLS23wFoGoWo0ZHVCVz0muUsygzHKzZX3jRn5DpmxF0+98oKioYfOtqHnB+G1vXdX4/CzoPRHSI3jlqbnAwg0BqO+pmyLmgBf+N9mXhqwHkBD8/xzDvFPsecstitm7eOWguvooKZFG6/FKEIMpGM3fEzcqwxZrW1RAAoCcw4iXF2HXXfmJs6c//7h/+8cLLnvnD79+wYqlcs/OzTzr9zI997F+/+Llv3HPPPaMR7pnae+ap592855bd+uhUZ+XiqanJ1R0na088edUfvPj0rffu++u//sf//eo/23OgwiI66XmMjgoWcYjOuVGsAcCbslWw6sAW4/k0O+c8Ekk2W9VGehAAvG1QFAgphHn6kDyxYaKG9lZLAp6XLAVVVqEQDHzLtrMQZZejLSAKKmQLaszab5LmnVEX2OVqFqLKewUABMhFQ4SIQM45FZeqNLXI/e2HPrdz17bTTrlkurcBoVMNKfLM2FmdPsChW3aXneJpF59YluWJm886/Umnbt91x09/eU1McyDJOQR1KSVgIIeKxGo2i2rhAABAIRFm6PX/bwG88IQDoEH/5k8dArp5OUl7lNrRkdnntdGqPaIZMLiQCmHoLZ2ntznn7CQiYuiUuEAkBBsktiOEZhEAjhQUWE14auE/bn8LATpy7ZKYmr37/NipVVx36BQMcpgXA7nPBgK0Th0he4djUwG7TkfbRnzBV8sPpIx+sTwPsAAKSkSIWTBEQMBRKIsG7+ZjjFVVlYAu1ld8/paD4JZPdmRu1I/QocG2rYfPfdK6J51x6eTExNpVa1cuXr1l+HdL+1tvPPWql1xywaLvfufLX/kCEXkP5MpU1Wk0ZObds7PDasSqVAbv/eTk5NOm109tWLP5uHWnnnzq0o1rVqQh3vjTif179h067Ddv3v3UJ//n3/z7aPvDh6brmW37p04+aXjkYNnRpY/u2rJx7fK//pvjlq5afPrmxcuW9qAYG+8wMPlQMwwqiKoHZ2qA2Y74WlBdkFDWwouo4OGwO9n7p4/9/Rte/qrb7r57cuUyqSKi+aP0B0k9lijkwRH52K973UkncTgzEBkXjXp94e4a67/20MQNpxw8tK/bGUcpo4BKFatRrAb7H3+snpvVsQK6PqokqQsqhv0jt//i9qecccr42BgCWTFq25NOp1PXeRYNACDaWBCQ+V63hydD7RVKH8BR5MQxWTmbxZ1QTNgfckNlyB1Lprai4jYgiEjbwqEtUJsHJ8ZqnsLjQBrJFAAwAoVzLqXkcYG6U86jgLlbtRXRAg/F9jFARLVGHXOzi9rIM+F8Gre/sqfI6MLUSHGIzI9O8+dBGyg3sywDVZJNeNQ2zVm6tvkptl/lyMAxOegYW8llCbr5kG28KcgluTYbYtfkXVYRZiJi+3AIgKg8/5EXXHTwPiQVFiFA770DrFKMKQXnF6bV+W9wvtRo/0H7b2zfXJCDtk63jA55s2tfAo0uQavrq0qOiqJwAlEycIlY8+BOgTlBntULc2RJAuK9d6nX64WxcTnhhEuXL3nGt35w/Qc/8rQ1y1e/4sWvP/H4026/+ZaXvfh/8XAs4THBWuT0s88++6wzz3nk0Tul6m/fed+Fpyxa1D3pghefe8E55z/jAqyT27WXfa/seVeN5nwYTwBSS1t5AAASQWK7ZYrZzr19ZiplXiBaEsBnT0mfL9e80iagKqJou03HFleVonpC0cJ5Tsky8yjWtXJhiPtG5VTZLLbBBd/AmghNV1YS19GXRSa8GMrRQBwg2JDxG7BRU2l5lyWohVcu7976y1u+eeU3161d/atf/SoU8PwXPOdnP7u+qjq6JfndvhgtPlbt/dEPv7vquOVPfepTo9t2rHrooosu+sB7/+2r33iWJ0RyKSWwuQioOo9N12tvP6lYKZnPw4ImeP7g2ecqnCzkcYHaPExStPff7XZVNUqWs+aa2zV5S46w78WU1xRSypjqEPxwWLeLXiumFv52ZmZhBU0qkMXFBBGN5WhBQ0VjjGZppY3AcvuGdcHWxr6cc2VZAgvDfM62k5BECuehncTZ/xNlFddU/ND0FTnpunzAXKvxqYqiCRW9QxYVk7MlRtvNEQBETnYbbNbknAs+pCY8iogjsppv6aLedTc8dP3PH1u5eTXUiN773pjMHhUNv7h964XnXHDHr37w0pe+9I1n7Lho88++O/3+bceWjqd904uXHzh8JPRrgkjkSf3YWHfx1OLlK5YtXb5s8fJlm084/uTTTl5/3FosYxgWB+v4+GMP7fjm166/9ZeDJdNHyce9h+Hhh/CaKxdvWLPiOWdv6i0+66ILggurFy1dtHFThFHXUY3FIA18CgX7w9XsgblDofRqqnNKjgpCn3RcKChBSVCl1ClpODM3Vnb6/WrV6mWf/ty/vfZPXnvnHfetWL5mNBqpHymhL0NKUSsoioJTJCSptZKI6oIL6H2MWn5q9eATD6eTh72HOyhJCSkEqhJBAh7ueuw+1NgpFw81OV9AXWtC59ytt/z6T1/xRySYlE2GCK3lTdyS+6uqso9A3qkKtfWoI1QC0SSMbTHKGRJoKcVaJktqyiLZ6BBtLGyNtdoIj8gFIlVMyXZkUZhg3n3HF8GsHttWVphVNTIz6CjWRKQIvg3l9ETzBxM3SDSviWO/nlizUkdOvWgfo41E2oaCrDtBRGCf0P6NAAMokKHLxGCw0DQx81kKM+XJHlFFRaScXBcsnLhJyAvnZm0Fzaqu6aEdOHKEqDaksi8AcOQcUSXJtJbsH3Pz+eyymIbAwlZjWFcGYY+cUkqW101sua0PcIGqrZCyMqf8iShTuYhsOS/N5E3NwhMBtKU4Yzv3QKPHgA3JC+cTNdFKMz3M3l4RggLUnCh4QDR6q3BC1LGx7vhEp5TRlVdd+V9f/MbJJz+JPJIfPO/pzz26c+adf/nqdWs3XXD+mccOpd/5g1NWn7Dj4NH9n/zH67/wJXznO97231++7bFHb33N6y4+7+zLT9zy21NLyn3bZofjTrvd6EljrGY1dMdGXKWohiSyyI6ORMQ7VzGXzpoPbaMtokMEGwJLg1czTRiXGtxyCyOw3rQxvfe+yGhWVQLogKs1JZUqRUsbmhhYoCzbQVB7W1FEuQnNVmw1Uyx8IgIxcgIjyDYNIsB8nkOEWIPzEFxRx9mxcb7yK98aH+seO3K4Gnau+sa1k4sWM/uVx+mxLbU+SNOLwonHb6hqdp3quuu/++n/t2ftulOe8pR1q1etn5oYP3BwVkGKonCgLEmNuYvgkaSBCDkEWzJZPsMFk9U8g2kkmTSl9oMkycOxNr9aPosxlj4YTpiIhJMFCFqgep3rDEAktLWC1HVyyTk06Zu2orUHh2MiImiMp5SQCZjFLdiiJWaHRMEjQ524rahoAfokONLEGcBlLvcsDeIQuBmZtEnUKqPMnsgOGdCGl3bI197Z9hBa4owxZrdy1MIHBTCiJhpvOGkWmva5dQGVGCMqIHgWAWeyR06FrTPbPXP0Xz53nV8UnPjRaDgxuZwnDiRaynNz04uW7N654+jex1974eQbN970he1nX797yV0Pf9Z7v3rN8nPOO23T5rXTk1OLFi1etXzN5lM21aOqqob9fv/xfXurVD143703XPfjmQOzhwYzpbJWs8dGRzecc/q6YvrpKzeuPf+UYry3YtGSJUumoQQiTH044gEjHB0Mkqa+9EZ6sJM6I60Q5lzRKakYjeqe9wQingSYavVBo6aaBKIW5EcjQXKHJXZA+32ZXLX2E1f850tf/rJHHn10zdSyYawd+5ojkobCq9SiyblezaId75OQcqqjdwG/Pz3cE+rX7er+5enkNQlEAvZ+UPO+A4f27n88eakigxdKQADKUBTF7ffcs2/P42vXHzcYVpk8AqAgIXhocIudIgiCijmKC6ozIo5zRv4WMOtjRE8Owvx20tYnli88Ocl6A4QoiE40USOu3h5OVaVGjVwa0J+d4VAW2jSxGSXgHSA6R0VR1Cman4gXQBcKEM2jJFQBBYQEQOgK8KqKDIhkOzT0JYIgZhMuc1YxcobauBMXlJmirihdKDjbhrBRU0QEAkqT+G3+bE9FrWy6JFbyazPaqklBFYkU0KbcDCoL0m07RBI0Cz/NDCtOIkrolBQ9oprTPSOSKVullLz3AQgRnWtwWFl9GmyIbNq2NvFPtrKyNXmjeavZlkoDZj/5trO3tXdwgZkpOFXVxIJCTKrK5ovn8jydjE5oRX2D+ZKG/qiqiI4ci7AjDwCoSQVBSgwepEJ0wZNwJQJIBXlNARWQRcqi4zqRgIazs/fefc2fv/qvZ2aPTo6vuuvXd73z3e976tMuIJT/9XuXvfUv3vPLX91y74P3ffRj779r63u/c9Vwojz1uc8/9aorb/7UP//b7IG9f/UXb1u5fOW2rdUs337Ho7f/7Nu/HPXTB9/zjlPPfsq+w3OuSyMpe1oK1QKKgaxCceoAYCQJvE8CKtEcseq69kh2nwsi21UQEEQGxOC9MCtAHSNbKQdNMiAzqcOUahEJ5ACQWWrIwKKyLGOMmrjT6ZhjKzWeCgiICgyqhClGbUUkHTrnA6txSGOM0KhS+0BRrMhTA04732hVIgAoIVcRHfa7pTu49+DPf/lrpnjWBUtXbSyWrdi0aBGVxcTadZOvPv6L/FU+dqSq+jNPu+ypew9tW7tm6pKLT7vl1kf+58rPvOG1f6JEibUokJMIogA6MYwYCkISRpHCbD3IbO8UkkC7+YCMkFLvGvgBqqY2NDgiYta6BiBgILM5KUx13JgFERC7RRljrIejTqcTyKHzSQVFnQHaEVkEgquFvTjBbCaoCNIoI5J3xgEzsab2CSWkvCpLrCIZui9SeG9lpgteGiclVa2bmjU4n2JyRFCEGKPHeQVpG3jUwkCYQMnl8p1VmE1NFpTZJFYAQNGLArHty7zaTloFVByCuUhATKISJaHDThlSVXsoQLzIsFZbEwbnCwWloIicUkCKJTvAKgYEKYPOTnTcZz9750P7q/VLj5s7fEwWLzpwaE88NFNVVQG0adncnT//wbte+/Q3uL85MP3M5c/56ouq+hn9PbGiFSsmVqxYMTc3t2PHjtm5ODfYd+N1Pz28b99wNMeUyGMc1r3xye7U1PiK6WlYMjw6t2P76MUveu2zL7+40wGSNDdyoDqaG+7af8wVwSA1wXlVJPAFeoaq58eFxKOKeE3REU32gk13jDcIIRc3XSwTpkSuBC8uUmLxJFXkmI5bs/zT//7xV73qVXt2752eXlxVVWmWFRptI6OSyAkmJ8ICCOiFUYeu+5njBn/52PS/VrC3V3ooeMSh7B+d23n/ncpHponII8CQkWrnRFI53jtyYP8jDz2+YdNxMdW+6GZErHolMGqcVUUIiATkndZ1PeqLwFhvnDTEFAN5Cg4g8x6l6bhEBB0VIWidBLSua1uQOSuzOWqDmcUFXCNqBNWxwQfY+sMFb3QmxAXKMwY8sm2OgiQuffCo85M0MCyz4fEVFFiayS8oSjLeS9E814iEC8yOLPktAIAYtsU6wtbNtCUAZPy0CfpmlKMCEBI+ESPNzCIKniKzUsZmpxYbbBMkme/97c2YVIKy2AKofSceyTbJ+T0TUgZ3P6EcFhHrq1zwTcdqbXX2xeuM9VoQUDsfY2YiB/MS/00ynWchASJSOwhV5fQEEGkuRxBbydz5+5ph6qTihBnREwIQiox8AQDovRORWJN3JEnqeoaROn1xBJPTOBrp3m0P7j54xZGZmw4f3tXn4ebNl77oRc/+wpe+7Fz16KP3f/MbV21Yf8Lv/+5z9u0+ev21N339i9+78NnnXvLMh7/75SMbNp0y6N+5aFqe/3svveuR+2f69cN3bB85PfmizU966qk3fvOml7/yVV/9+hfWrznlaNROEI3NMAAAkq0t0Vrb4INp9aqxhxDNzFxSptmwgQaYIfv+kvMevQcVRVBFsY1+g+2yq2r6TQRgiB6rhbtF2V5JtSOd1cjneyAjjaSUGJS8Q1GAPJDKz7NFbMLgMjuFFGyYIZRRxkSEmgAVgMqi98ubfrV37wPL1gzPPHfj0Uoe27n79ltT/2j3/gcemtlWT+xZOjZGv77zsX0HedGS1atWLI8TB5/7u2evXAvnPO38ugIlNJMDew+s7L3PZBSElNJwOLRf6q0Z9h4XzGZsUKzcENYVWlAbNICJJIKoFAggr1SxRV0B2MUXESWsUjT0ADgDdmSKBHhHCKgOoqYUpbX/agpPNktHk9yCbH1o3XxmWDZmjpoa62uWlD8mmSulSXdZYKmqqqoq733R7RRFYQhYaGxOiMgTRk6+IStSo5hmbyihAotjk9MCQdACBYMHcAKlK5QTCFJR5AtVIJOgoAcHKQj6mkdlkDpREcEFLySpmkF04jtOfIEDRgFAFpPQmuktmfjez+776GduK5ZN7z9yJHTUD/ctLuO6Mye7Dk85df2KxeFPXrLpBdveMNLlvz7t748d3cojOTJzYHZ29p57Dhw5PBtHlSSMNRSldsc9BqLkguvMHJudG9a7dm+bOXpk2J/bvXN7IH3Xu9/5zEvOG83NzRyMDok9Fs06DDgTvBEV86SQmfOE32S6TXCsNapqRSBK523RJnazEBHAFYE5hRAkxdn+3KmnnvgP//APr3rlawaDQVmWKSVW8Q6Z2XunKgDz8A4wqBxi5yurhm/ZMXj5rsl/OsE5H8UnkcMz+/mxQSgW08RSLboMTpEI1EFyEWQo9z1w/0WXnc3MGiN5W3x4VE3N3sRCKDNzTMwsSK5wSUVTNS9m2ljMYUN2FdNjSMk1L4ILvoQy8HAhT9WuXnDzwKu2RebE6DyAMGeMoffeuQDNNtOeFwb12UahWZHmf2F4RZGkidCoUEpEwZuz2Dw/NS0Y/kCzwW37DMvTkFenyYDdC4MFLNjFqiqreIOaifV6xIhgvCY0PWABltSoTBDmRNqsorF9Y1n3Jm8H3fxwwNRxFvBrTdTeNbZ3bSSyK6UmBkuojSBAqeXC9wwLnPJCCLY5aOV1LIK0rJX82QmoIR2hgLT1AQJK3ue1N1gXTNeZmVxy1HHBxZhUPZr1kSpz8iWriMdSOAGUY2Pl+OI0IyFW9S+u+Xk33hgmHqmLn4SAm9dv+b0XTvzzB26cmBz7s1e/6XVv+PMLLnjqKSedfdHTnvq1K3+0dMttf/zGZVd/8d61Jx135rnnXPScQxqnPvQ3f7Vsas39jzy4fsPxHOmUM06YnTsyt6e/fO2qpz39nKuvOvLJj3/y01f8v/3b+iWV7BIIiYhHsqWpjexSSqmu1UoTM4ByGV9r8q0MyoY+NTcks4VGU1cnA/yKdX+BOEl7hLRBzmNMAJJMj9A8Im1J3EyVlVqtvvyz4F1BxKCmB6ItC9uwhM3iHxHrFDvkVdWErHEBsQc1BOIUq8nJ3o0/+eG+3bvOPecFf/e+r69YsrksJoZDXrN+ctOrzrgj/IJ52K/d4hVL9+w+Mjs72x8suf++x5yb/cyX/u65v/WKI7Mj5wJSvuOmsMPNzJYADWSgqkbDNSWBhX7AYAZ8AKCaUvK+aGNJWyi7BSVgW3/bMc7eZc65IrRjW6sUvffYrGmN7144ryAt8pCZOTKoEkBwHsUinoKCxKQsHik1SNH2rlERPBYOsK7rFKMrgh0GFE2cWjcObcbgJpfvg0+NZVNSBpZMTzL1+UYWFBGBnKq6pAoCohqjceXVIJzsQYCBBYA8IaEHRwA1k2MihOgkpUEQcXWtruwCJESoU+Fw5FClLkaqLgjWAOMaQkkwivVEZ2rnjiPf/t6Nz7x4xZNOXb562arV61dM9WDRxNLayeyRQwf27jg6c+wpD70LRjs/PPP6bf/1ZWZGCH5sSafHve5Ub3yZn5Kjh4/OzB7Y8/ih/tzgcH9m5uixONdn5ggSnJ/uje/fu+PMs578sY/985PPOnHvvqMpgfOlJASIqqZ/n/GJIlLFEQajXrvWN1pYAdQ7Z7pAVs2ERjqQzObQKTlDHYLYug0cAARfKMixmcGFF53/3r9+3zvf+c6y2wEQ5xAAnMeYauccKIN6mN/ZIwHSXOh+dc3si3d2PrFGqx5S4QsWSEePzY71VtZJBEEkIjCkSkGQCnLyyKPbkIiCV2nE/xEY5vOXc84cyRCxQ8QhGNwJmq5MxHKKRW8FADVdypwjwJQUjSnTDiQJ8p9YUAYxySmsjIbXdJhtHomcWzhoNJ0kxpaPKo0yq7dnwCMlzSrK9qCS5OTRPi1EVBRFZLH+oO3Y2rrbvsEGjoHZGxgR0SMoYqtsYFekTdVt4CA1NUHzaEQLCuZaKqqhcTSzMlkacYD2lzKo0aly8FLUZq21MLMCzE95tXF6b+8ZtAcEgZC00SlpwT5KGQ7XBrV2LU1ExlT5DWIINki3dr5tc2ki9OR4AcBIWk5nZGyy1PwlAhDGFGPoqFBERZRkFsNIo3qknqA7JmVRDgdx9+M77//Z9ffufqA/mA0br924eN2hudnZw6vKicGwxme9YOnM7Mb/+rcfPnDPg0+/6Pd373tg5+TObdv3vvrlL3zs4CNnPemctRPdu+/YdnDnzLMvf+bsUbf+xFN2bX8gVv19O/bODkdFJ2w+bm16fO/hnbuLRYtOOf74t775LXtnpRO8yEhdCU0Wa5YoxMBJxaaX2PDAiKjV41QAFLRVDQBk4pYIp6xrkJevJtrpsT0/2uiuGPilvbUmegyNyYnFdFW1DVCOSgtU+EtfQACxwplZCVXyisR0UEWkTjUSgSNT0LRHURMruRiTcuAa7r37vk9+6qOvftWfnXPW/8fWf8dZl1V14vAKe59z76341JNz50DT5BzErIgBFDAOBkBHcRzRcczjIKCOA+gYxlERMWfHNIIgiEgS6ABN5/zkUPVUvvees/da6/fH2udUNe9bH+1PU111695z9lnxG56bsuYMq09aPfMza9PrpgAg36P8ilZ/U1+uX3z77bfPze6dmT11zbX7vuJLv3FlY0pi3EXGwg7i4Pwr6XwA+6TYNbsqmPsT4teFOlm0nHMGjRx63y3v8KjTl/g88AR32o24KzcjoOPJcxJw+xdASdmVp/qVec7Zl+NYJO1M3TkNwAwsizL4vqYvxPtHuFVl5ioMsLMRCyGEuuprXEFwoTRX7xoMBm5aHEKgGEv8NSNmMPcy9HCP5v8pKREBuyQwAKGJWcqbUYhQmzwYjAhj07QhRBGpyBpkNYyKjJyDSSaccq5tgAg1yzBGYso5T6eTyQZIRgqbtqkwkVRNJ+Nk+Qfe8F3DkW6tXrp4aXznfZ+ebly6cmZ9+coq4ZgkvHrpI9eMbntX/M+L17/gyxf3jYYLSPHv/+EDd9378bUrzfbWdHtrWbMxZjQ0GlqwesCDuZkqMjK10+bSubPf+u2vfMtb3jKcmTl7aUshQkA15IpVRKVUIYDFal7NsqSuMyrNojd2jRUUHsWQc9bptOIwrGoVAZG2bSkWJ2zstvgxxty0YGSgK2tb3/DqVz1+5vRv/NpvLC3uEUna3cRy2FyCVw1AEZkYEHH23SfG33F6+qpL/PsnAgcMAYERKWVkyorKoGDZCFRBEKTi06dPN02DiLar38MehpmyZRFIRn16Khx3VRHXn0IiIjbUDhIIAMDEhobQW9uVkO7alCFCSk5gNKReTQgAQM1tlwFBpYwQvBbUbG5njmo555SymZEBA1q3Sw5+cAMSxUDQUYbUZS5KPvbnoYtTQTX3qKvSbvbiuoSOgIDOpYTYs2BBPPZN/c54to+hndA8AvZqyogI3RTFESiKhdpUsGpWJHEQ0eW0ulwY+hcH0U7iHf1+dP2rfN5BtF2fugRBKz+vXmt3wcueCFvrA1AR8Nu9WgDAQP3P9FHS/ym7MCD9tfCElEQISinaX2ev2iQjE7dNjrE2y9LCaDRY2B+m4+aB+x96+PHPzh84e+Hc1j23b9934WPNVZNJc83ZzQcuXfn3Q/tOVnig2dz3mY/MPP95L7zlScfe/uZ//eQdZ5/3vC+OQ53bS4+ee4jh6x574NLNTzv2pKd94fratJmOqNp86NTWTFh60k0LdT28uHm5la2NrZWM6fLja8/+oqe99h3fuLh46NxaMwyKwLk1joCIxQrbQE2zZjEFz3kIPmJhREaKyKkb0VNvpWdGBnE4gA506lWRoRceGaFbDvQ3GsrZ6Fs3KXv0oq+yu9pziGwf38119pmZWRGsA1OU1G7qDzn2NthdQwbopjhQD4hNHn74rr1793z367/7He/41fvueyDjdOFFB5tf2BxTlpBAQQapXYDl/3LuzJ8+uPTY0qmzjxDIV375183UR9a2JATNOTdNw0UI3VCNERQKJhm7ZSoiBh/wqfZprP9EESMHJsacVUSzZf9oiGVr4x+tv9rWMbClTa7gt3NcRac5UwzQIRuo+zKn5HbIfGauY3TlTlcyYUAvd5jJSVNt00DXm3axAAyg7G6c7sDkEptZxZeR1MHB+l9ymEhuk6RcVZXjOdSKPX33sPuYwgCMh0NXO0BVtQzgBpQMJAFQTDk3BinnljkiynaCQQgcLCEZV5WFwfygCVAJbGtut8bb9z26ubJeHz6wdOB40OHlZvW++z5zZX0ltbC6vLE9uTwzY1trqxvbmYQp0nA0H0LYN7f30J6rH7ty9unth7925sO/c/GLBl/8/TfP7mnTliFvbco//9Pfrqw9urR0DJFHowFzlNxAxoaamWow3Z5KFTkM1lYuaZ7+z3f8wmte+01rG+PN1S3CKtQVmjtACiErKQGCi4IhUOAqMkhGRNEMVm5Z2R0AtG3rQ/4ikWs2TS0HEoVsiikFJAwmIiqZnXfr3LMY2vGYCL7v+9/w4H0PfvCDH1xa3NNKE0P0IgkKFwassxwFCkQE50fD9xwef8eZPX9xHSqioAqFENS2OSAIgQHYwExdIxKpWl/f3J42c/Oj1KpBRkRTVJXy5PpYSEsDJyJEoW0zM2ZJZlZVFRiqKHIgLB64vT8sFDASaJeGiciozMz7OOyny00JyzBJ1DqCTtl17pI0QMaqe14cpO2YRzMr7phGVqxdTEEtBNIs5nLEBgxd+lFhl0nvSHK7nwdwKpGZuViVE38ditazFDoHpCdkHX+ysxgbETlBBcuypshUAlPOWXLqq2ZmjjG2vkAywE4po39BH0Hv+jaBQ6LAZTF20i13sl99ILPOIFlsJ+P2Ienz8uhOuUTkTZUVYLJ5IxWMdl+u3b/bVZS7WDYA0I30VXU6nZKBa4sLmAVDxJwkhIDSENNgyPWinj195oMfuPvB+x5eXtl+/otuufj40t13TNfW2+e96FmfevQz99x+6pq9exYWvuWxTxw8dAxXtt7fpJSWJ7Ojg//nj1/bblw7rJ5KgacpGNP29uT4wpGt8ViUFuZlMr6SNgOqMM4NZ3hu79yBkyfmBjqanbF638JwpmZqWZY3EitlymR1xZS0NQTVbIa9BTozS2dD5PwfdGoZQPHk8NPfy5J3nSuoOYmqn/OEEFxfHLsdfDldbbEINELFwjRWVQUrE9Ein6KkhoBqyjEwBhdeUFVgMgTn9feTcxNFosDMVTAz6QDz1E0ygEktDurR2c2Uktx9z71v/+VfrAbxe7/3jb/+tLdlSnZcYR4AAA5Anm/3bu575GsvvHLyA6PZD3729jtSO2rbJFIYe8wcidO0YWaFMjDouYN91QiOwe7Gif7lbbGmXCjgIbpI544/jGtfEyAR6M58y8ykTVVVKULbtsWmhUraTiqBArgjcsra3QUREZBS+CNp59+XOwYIAgACxyI7X9dROxXM8uBYSRL8RAEQ6sxW+5teTCbYV/XutwZWYCIKjsYywSJwhMVr0czARJOqMkJNbEaqamCMrNnDd2zMFDHMzBJTXfH8ILeBU0ZYa9Ly2sb5C2FlJS1fPv/wudWzDy1fPtceOgJPfubmA2fPnPnLC1dODVMYb2wlmC7s2Tc/v6iE0+2Z2frIddccX95KG02zvD48f2n15msHn/7gX46a237l6+/9w3v3/Pfbht8w+6ntcWotz86OrlxZO3Dk5PzePVeurIApETfNFECQAmFst5tRXSnlRx+798k33vj2t//PZ77g6atXJlkhhCApa2vucEORICKk3K97/BEAKNBFVVXbEQ2kTt2lr1xLgEJTKc2uP6pERIgZoGkaZ/6AgBkOBoPJZDIcDN761jd/y7c8/Ngjj+7Zs7eZTAOzOG+/I2jsRD9kAJj/3esv/vW/Tr/scvWPe5kqUxaRqooCYKRmAgJmwKoENhPj2sbG5ubmnqX5tmkMjJmRgiqJJB9PIhZIbN/k+FdZ2RKbQis5W0Yi4BKOS2aFDtCDgGqFWQQEWnQaoHP+6MO+b2fNzNMcV5Fxx5/ezBx1YgiuoVtVladhVa1Hw9C/uQ6rAkQETKCAoCRmYA4OdM5vn/xsl9Fm3zL6q5UP05US3hT6N7mjCYopMe2UHt6FiCqA5Cw5BwDZPTruIjh2w0ZUsyxoPQK0yBF2mVLQJTN9FgHkBQGCQsmPyICIxUkoa2lxbFdL0X+cvsLo97IVh76x1k440zm40GFB1VTAeoDqTnbfpTmQ1eX1d83Si7iHVlUlIm3bQidcUFVVEsp5wsDzoyosxoceemjlyoVsl+69++ywmls8QIPRoYdPfXph4fgNN10/v3DdRx76lxiH999+x9J1LxwvHxwcunNlPAr2/L0LJw9fNTx85HiaYqrnLi+fz7y+fDEMquH+YxvSwp76ZhrAZJoOzh1dvGFWYW4Yc6xG0xyZqJZQj+ZTrWNp262sI03S6DgQWaybnGaQCgjAFSj9QyGi89/9UidNZuoK4m4a1ilMlQ5PVVkJ2e0tPFgDGQViLb45/VRgp6CxjqFUurTO6AIQUDpNUTPr1NmKUBeUEXcCzabWNDtpScpamgw0BjdsoHI2ir0V2KRNtHexunJleXXt3Cc/+cnz586eOH7Lv93xYfvGhDNkQ6F1tmXMCxnm4dL08tzemeXDD1/84Hh17dKNNz2pno3p0pXAQyKqR0Mzk+mUuIMiqrf9XpuymSVRRDQVAtqdgPvSLZuCKBgwlwGdJ6qyYq8rRDQC1B3cShbpYTiIuMNlssIpQDWXgHUgYUppt2pHzllEHF3p0laq2mtjUc+f7kZOO0UDWCDuJ3jMjIDgO+Bc4Hj+bHqzi2qtCSj4D2e01o1a1UJXspgPxMzQFAEiUFbNIE6AJqLAkZkGg1pBeMgThTzJsrK9eebC9voGrSzLheX06CPNuUcmV86vbK2d3t4+O2nWUrMxSZdHo/V9jwzuu/vgkaP79u8/Uo+uu+W6m264+cQN1973yKl3/s7vX3Xzl931eLW8ttl+9AwMq9RMeK152lOu+dTHPohrn/ubVz30wObcD37k+i/4wmd97au+bGNzjBSapkHI//G1r33vP/7zr/76W2dn0ARnYtWmbSJtWx7Ozky2r6xcufCNr/76//6zb9qzd9/5Sxs1DUXawBACMRoRT7OYgeVsXbByiXVQA9Xu4pc7XuDiohVy2zTZtK7rojgGSkSpzTsOykRmFmNEooorr+eAqeLgKqFt2x4+vu+Xfunt3/kd39G203oQU0rqTTAqFPQLOpXfzIwg3jVb//vS6mse3Pu3+ygyB8k5q0RjRBQggWDRSBGQoKo5N6kZT0pIRE0pedPHnaueV+0lcSCoaAjkpzelNG0LoTTnzCHYE61RQQ2RmZnI1Ld7Vrp2camJXSmmFMHeoBJ2ew+fuEDo0L1I4OrqCECIROhDzZRSM56EfqrjJZLr36kI+KaL1FR8Vm6EYERIqqVkLpt8URFJzkM17JtCBzl7TWVUZKz7NMNVLIYKWRAxIBFSloxhZ6xtHc8anQO6S/zIM6v/EeqQ1arZUSRIVGT9O7UHRIeOEYBpP5DufjfnjF2BRkRurVjOLj4Bb0WdjLP/VtIdArhPJGoKRSWo+6Se5HEXAK3nPSuYqREX4Hc5NFDwWdO2kZQHg8HcqJpM8vbWlopAFRaXBtbavffcs3rl3Cc/cffM7NKeg7a+Jrz/wtNe3N53h+xdevY/v+/u666t7rv7wp2PPjR6wdyzX/yKj73/n44f2fqyJ70q2N4TVx8YDJv18crKlSsrl1to1/cszNWj+eNHFpcWua6GddxLVaI4w4xklseYMJuGSdNGtklqJWaxdZwQxGE7zFWumMZxZBWOxu32gAXUmNixlEzuM0PEGIwAMak4jJmIuBp4q0I7ztO+ZRcRgcDuOqK9XgqYolrK5JD4LrjvPsOeMplZwXyahIFRrH8FKBBZ7C+4F4EAoFyspnugRHmcDAHIqbTknZmBuhoZIULVTCdVDZeXL507d+lDH7q9GswDwD0rnxEQqMAAbNVgG2bnRk2V9hxdmk+LL3r5sw5fufquuz74pFufs7Fhs7PzbW7G48lwONQsGQSSeR2mhN77UgehENO6rsUwq+R2Z4LSYYwDqCkpCGgqszIn/vrxa5rkma6vicGXu4j+PBqYYCc1hShmbju403EieMyNMRbZAFQiQnd6yOKK8SDaaecYuOMhdms301ItdSV1QGKk4gDMFDkAhz4BO/zKMz1XQU3QLKm4aAYR5SSutGqEnUsGMAIjaaBqdjgYkBBkNW11utVMxlv4wDk9fwEefhRPn83nL69dPL16+bHx1srlzekVaS8ZLFPc4AEt7RsuLs2eWFg8sO/ak4dfdPTI0aXDRw6fXDpydG7/3uFincfAIa9tbf3OD//Oxz74b6PqRMDrCJvZQU7YKOpTn7KUm3sunb/rH1/6YEX2Hf/y1EblzMWV973v0zkbB50ZjhhHDw5PfequOy0mrEgTTVNDHJosMyNdWb/MBj/3prd+z/d818q0uby+TRQzJqyolcyAYpBzU8+OWslRgUNgQOnGeU7sFCNEVlAAoLDjMIGCVVWhinXGWWKWco7Y7w0ZEZvcgBZGRk+w8SbBf2h1df3Zz7n1zW/+2Te+8YcCo5duOWcCIAqAiAqmO8u4ZGn0rhOrv3lnfvY0fnamGtrAZibbTCqCAEgUoaoGuQmt5LYVaNvpdAqiRFTVMecMRoYgOfVjsDJYJQSzULGZtVmYY1UNcs4pNaoFyVQ6orIzREQEMaNCbnCIR1kLUhQDd7tmLgI12inygedt9+oGI8CUUozRxfVKY4Dk/+7Rya9z4EElIgwYKKqqZgHC2NsOIwIHU4Us4LcwoMgOSoKZDUEIIhBQ4VmWR1pUVIop+i7tWUNIOVWZ1OFnTADQ5swGkVh9yjGovagXkTYnVa1j5WqoZtZJgpU9lqpCYNd99f9TAOsaVgAIhKYJswO5gwI5K1RRVbWiwC5JqE50MZE2hMBMbdv63/LnH8rAwaxjPtRELu2EoqRmklNVLH086DMzGZBhhk7OWtVjNwBwN5xQQOziYDSOHBRD0s3DB+fXVy598O8/9twXPHXvvpPrUwW49P/+9uMVnr4E4899In7LV77kfR/78Kk7JouL+zY3B6cfmL/jE5/dv2hp+/zhE8+86blPGt41edfjt91/8f6v+OJXP/tZTxsMOemm8GoznV+amV9cvOZpNy7FeoYoOHR22iICKVgrlbWmOZe6gUB1qmyWBaWtquGAhiICbWMAEoExEqBADhIAclZTkBhjDAUiEBzH7xqaiCbKSKaW2wYRiYKCQ3AVy1oQiTgxgBtwUTc58LvfV0JWzLTdwJUBp21LRKGKbdsSIKnVVdV60VNFEEVRYAR0X1WhXmwykjtikVgWZXaVflCRLBmBTEyyDodDAEiSBYOzcZnRkGZqTS3c/cA9Z86cOnfuj2I9bGV7uDZqdEO2BPZaOMJ7j9fjIAjIKbRtfvC9929vrj/rWS84fuLAlStrWM9UEJRFJKtZPZgx0RgDmjYiQBQCA8B0OuZAkSmnqc8H6rruk2jOucg3+r4tUkoFY0/k75YRMUk2k2aSAxaMOhEhs4JlMCNw7p/zgXoBZ0XDQEYgWbAVAMCKx9Mxc6hjFQe1bbMhCAcycgUpJQADR05FimXeqSKaBYAxADIqUlARYA5giALMHLnKOQsnUhzESpHMMCABkVYWsqaKpRVoszGaGooxUw6BUAd1gJpsEBURBFIDtnFlcmY1XViZPnbKzpyFU4/b44+3Fy+sb1y5sHXlQrN5ObWrAGvIG1VN8wtzV9+8sHfvkRPHnnzyquMnTx49cfLgkaMzCwu8NESo4gAUbGszbW/qyvo2XNmeW5qhTfr+//RTn7ztU/PHl1YvPHLwydee3qDAtnV5cs3RvXW18dFPvP+XX/jg0/Zvfe37bz2XBnXdPnTPnffc9omMNjs/LynJNIch1vVwbmZGRAGEiEBkJlaXLzx+4uSxt7/jHS984XOX17dELBATsdO8gi96wWKM2kgNJB7nu1OBAAyIQOYZoRCQuAz7iJAMidlcv0gDcqAQgQXKSsCLaQrs0GFry2QrSdJd8v7M8dKl9Ze+9KsuXVh+05vetLRnn5gimqoRgaEaqalEjk07qUYzCLn+14Ph4Zmt77pv5j8/36RqVY2nwVhMkdjQSIwJWNCQOG5gEjKaTseWAkRSsjozAkoWYBBL3oN6snAmLSOaicOgSq8bCBm1M+yKXEmxI1UPDREp5eRTBFeNRjTQnZkQgIll7Fbm0O1NyhivqtosIFBxIKKsqiYIkAFbMwWNIQwHdchN662AT3PcLz01rU+ofIZDgKGqrJvF7ex+/H/GUFUVmCLs0GG942SOfTFLvcybUylEfVroK6WUEgPWVSW5GDH2IGcTzSmB2mAwoMBgO4Z2AEWyEVXBl9CiudshlfrAzCdjAFBxIMQswlWUlMU0ELsYveRciu4YEdFXjzHGnsPa10pkTzDEYMeNs7EoImZTfylyfCaSowELML8DvkNHVQTALKII2Pdwnp9gfW44+NhH/+V//c/fqAjf9e7f/sk3fc9mekDy+a126ey5pa//6qXL953/98cfvOUp18/NLH7yznPX37RhWzNXXfWca561dBNdc9tHPvC5372t2hdmrh185zd/2ZOPXDvezlW1NDd7axX22CAiECpORbc2VNXBdzo7S20zRcRQRRHhHhlHGGOV26RIg8FAVVtpB4ORptZ33u6oDGred3pv5BfK7edCCMOqhv8f7Lp/te0Uu30wABBZJ3RMDnfkTu9QAVU1GGLHugOA7MUTQjKByAogKkRUIQOZZlEqfhiKIGBcCnYCAAyoKTdNA0LVoI7Eqsoh2C62jD9vqMYUyuQNMOWGMATGtk0Bg9Agt/C52+8L9SLYOk4bGe4JqbEPVPVX5hmOW7FdzmNtATfCxvrWtVeOxpUD9z/6sac+7UlHDh773AMXYhi1uR0O6o3xZoyRCFSxzUIxRirNaAihrmt0aGRW4Cc41HoC9qa5vM/OE9crbujwjdWgNlEMoCmPx+MYYzUcSO/V2PFosVs2l2LXL4XTQRkZ0Aw1K2hu/Jvmw0ItG2osaHZ078IsVc0gZApEEQrbMYGZSKCAWSTnVNdRVVPeIqIURqSSchMAMPAURXOulVsMIZlFthEOkaJhE2lrCGGSZVPWT61tXzwrl8+HcxfokYfTmYebi9vjKxcm65fX1i9f0ekZgDMcVjlM43CwuDB/6Oo9h49fc+zk8auvO3H1dfuPHJpdWhrNz80szCFDVmiSNTldUa03Ocs2m0iqOEgdSOJwbl8QpB/74R//0Ac+dODIXrF8+tTDC0duPTyaOT0Jswfmjlw1e/tH/ul11z/+mhvP/fBHbrrn8tJswISYA/P8COt6bm4uT9u0NcYqVFXY2FgjCmCoSebm6scff/iLvvgLfu3XfmXv3r3nL14JIVCI/ogx72BKsGzQLdmOCxB0U5wMxmJIjOQM7CydSAMD5CzM7qX8+f40fdLyMSK4RlDwdTCaiUMu/FdyEkbe3Nr6ttd865W1lV/9lV+fn19k5m7xn5kjEOW2DXEgKRNBJJp793Wrb/rM5MTG4Ny8qhpgEyjmEFRbS+OQsykjzmKV65lqOGhyE0IIVKkKITbTabbCZXczBusknvzcgqfPXXLifWveNq2ISJWJCrNC3Y4vBCNkZJ/3ZNUQiLoRpkf4yMEAPFn0j4n4eAwIO3vpDjVdjFMiBzcUB9EQiFVVU04pq/OFAcGgnTYOf4BdPEjuTLlLZ+n0A4ScM2FRlfQP1r8bJ4PLLvhiuUyD2vM3EYW69pABBj4t7rOvpzH35vNFdZIyl2ZmRCLG0M3EuJuWuO6/7UJzePgmorZpzUwmEgd15GhZ3GWBO1mAnh9V7pzD4boxDLg3mwOYwXoovJkZOfba+7yenmTgE7CdzUFJJGju1qKum+evTy6WZtikOL8HHnroyr5DJ3/1Xd/2I2982+/92a9893/8wgvn5558c/vwY825x15w603h8Y302U+eJ7mzmpnbf/BLTlUrK5fv/Ngf/NUwDZ76pBt+9L/8J5lJ//Wj/21h7rq9iy9c2lMBVUk1K6StbWYmRlMlNCRTFTNZX5F9Bw9sb297h9q2bV3XqkpA5IA+5hBKElLNXpQ0OTFz4KBoSKQifo9CCFJUk01V3aLKdkHNscOKd/qOhVQtKqBkCKHDysEOiIMIsFMsVzFlZicFKUKNbABJRQEwhmSGSKgl44KaHwxHFJuhmEJPhDVLKTGSP8Pl1pnGGAnZa0cOwUwQoRNnL28Y6iSTGKp0/uID25v56c+4YfnS+lTk8qX1599xTL537dEr63mVbECcEROGhp7/0S+N9eg5z3zlRz/w4X9+/98/+/lfdXljTETj8bgOUcwk5RjrEMK0bcyUuChvlLURU8BIYrsFSWDXlsSHhElyHy98HWRdEgWASCyhwMVFJOdch4j9ZN5UXLLBrSwRGcg6Z0+PSqioWSmQas4556YFgNDDqUT9CWIsgS8JWWrBcoxMscqACowhGDdZNBJXIWA2QwQe5Jw5bQQKiCgIQW2GkGciDBgqaCc6nWRZn2wur2xdPGuPPz587Bycvbi9fGZ68Vy7ur6xuXZ5unahna5DWGddA2gXFuubb146dnLvoSPPu+rk0auOXX/i5MKB/fP7Dw9mZolABJJYtpwTi+ny6jjnXCFXIQZDS9pQUw0AZDgcUUqQc1rcw20LP/5jP/m3f/tXRw8cHbcUiCdp5dSDH7/2yV+yulndcHz2/s9+8CnV5976wsffde/RP37ggGJuswbHsxo04/FaasFBwmPJeRqroEkZGavq7NnTr3rV1/3Sr/wqM65cWaurATB1oJBQlDK7R8mbVUTq7FaxUJGcIoA7i4z+56lDyJYGSc20KDAhk3Uahbv/ipkBsZmKup12qWXNrOIKEdt2Kqr/5Ud+aHNr63d/591LS/s0C6jFGB3H4M8RcSQCIhr9w/H1/3zf5mseij//NEQmYk4qBE1kU6sMg5gaTgNEHaiZr05SVkQNjJHQ37GZtZL7LOPDFv+Y3ruiV/Rm2QfpgINYKfssTQNXZhI4CJfcYYQBAyISc85t6tRscBcomoiYKLUJADhwgYyUkN7DeX2XC+b3vXMpDqVbDQy5KKGAA716rwJmfzIBXM965zmHHvZs5netHzKLqQ+dYDB0vU7PtdSZmVgxBt6hJJtZRhhQSP5OOwJiJI6DQe5I3IHYLWPRQEWxx4AYIBUccr/W8g/SbwVEBAObajOeqOpoNMLOrsdlH7DT/fB7oJ3RbPehSzmZbUcpuv8+9v4qzGSF1ORKe9gVLuVH1fz/+2MNu5CHaKCag01Xzw+/6Zu/6P3v+euPfej2CO2+fdeAxXHanufrXvgUOXfmsbZCbOnkLQvHnvz0u+/997f/0i/aePPmE09+3dd+6/VPPjKaPWKAp9ZPIdfze082NGozgIlqRrRBNUcMiKAqktssGbNBVlVN0yY37fr6+t69e4fD4fb2dikV23ZmZqZtW/chcQBOQCKA0OmXOSsOO+ELF3VzsU8gFJFQRUQqYmudpYFqpxjc6wkjl0l+SuLHTLXsIykgsRRDrA5z5ycoa2MuJF5eok0t+u0jKz7wskPIRjWfpjrO3H0Xit9AZ54BAAQUI5d04qTGnAUxhKAqBhjrSqVaGOW11fEP/8ib3vBd3xNCOnbNvn97/8Nv/bkfeMmrP/tT33fn5WvH9deyWCIJ1UfnNt8yvv3qj/yXn/yRC+cee+W3Peltv/j2t/2va/ceetJ4Mm1FKyYToRg0tdPCeU27OnJkDubAcMkqknXnhLpFgVcYKaW+cOmTrh+ztm0jsbvzDgaDpOUZ992K9zTARLue0FIjmjMTwIkKdRVFdjpsX9OqGgYGM5ZC+PYBuCFoznFQA9STNtk0xxgDGbaNB5UsOYlwVRNHrriiKg4G2yLGkVrY2thqV9bl4nI6e2l86t7qzCU4/djk4pl25eJ4bX19e3tdmpXAq2YrkXV+AffOze27cenEicNHDj7zuhsOHDpy/OTJ/QcOLC4uxiELGgTYbDAnXdua5Evrms1MeBAwEE8t1N5KBBGZWktEMKIRNOMJcLTt1gBoNAztGP7rD/3kP733PSePHktbyUIUE56J5y88cNOx2bc+m3n5k1vHzr3y+uVPXlz8qduuTVHRQEERCLMhACtYEs+EGLBtE5MFJEC5dOnC6177mrf+3M+sjZvtte2Z0WzOKskpZKCqRWbfANVtzouwXz8TKYkWC1iVdxwvSkIC2CG/OoCGmX1zDJ13MvUr0p70EdAyWAd3sSxuB8cYRVI1HCTNeSI//dM/vbKy+ld/8ZdHDx7ZnkxQDEwjV+qenpKYBmaAU5r546u2Xv+Q/Z9beL0u8lYACqZW7DuIsAbeWN665777n/rMm9psSbHkEYJIEdwxWrSHHH5eiPbU2+N5zSxZGbh2QPoWzKq69jxq3fwGuuS9o17SzTi1Y+SHLuL5i2su0Fp3QPfvGzj0pe1X76HNKYTASBCKxoU/LVWsPBhFYiSPNapm2lFByp0wcBEQ6/he/na1u7s55xhjPyosES2wH4UnaNgSUWDNOygqKVsMZGYSYaIQyDo7MzRgQDHFTpaFmYtQu9f4TJEKQ79UCaoOh4sxOpt7NBqNm2nbtlTVIlJVlTd83qsRkW8urUvB1ikz9zejD0zmzndqiNDfBusw1URkYOBI9x35z87P1Qoi1yV+ETDQvLFKnv2lX3rbr//yb5x/cO+LvuD529Ojc4PZB++/MDPU2Zv1sfsutdvV3WfPvPsPf/PpVz3z27/2lS940Ytn5xebNJ2mKreRg02mZ1ShabPmNIjs2dfMEk5BS2lhmJWBOAzruL01nkwmjiybTqfOC3TDHBFpmsbvlMfZtm2BmJirEEVEUvYo4MrAOWftkkGJAlTglEhUyNxdHdMvFgCgHxJYp2fkFjSesEuaMb8pRbTSX6Si0BbSvfajp1LzSXZnyaqqGEu2iMRGXi9aQPKxs2cRrHbhE7s4hUw552JVxQDgXbsF4nG7tTA3+Mwd9zz+0MrefXsMQWHy5V+38LJvmnz7qz995vSeZ9x0/MKfr8ZFWr1PD+1Lb/jZLzz2pKrW9tHHzsyP5FnP2/+Hv/fb//3n3rayOq3qIZhUg6GkFgDAcJLaem5G1fMnAZRRkIigCAZuOskRjw6DwQARnerg2JDIYSd07jQH3ZiHiweXw7vUFETJCNmVm93irdwR7XaLZakckENIbQaAUA/YLSMVWsvu3ACd+ZiCggFRkFbQoEZOmtN4SnWcGY0aBmQcVoAIaDDdmGwsr2+urev5i3rmHD52Lp09tXX21PTSxY0rly+tXQGA89Celck2UB6MaHZh/vixhcNHDh49fuuJk0evuWb/8SNze/fsO3R4bmGeI0wVAKAdw8bWZH2zCduIkgOAVkMz00ai+jHjgBGBZFBgMghWuxJeVkuSqaoqEFNEGo3qzbXmB773xz720Q9de/3hjdWpZtaqJVDEmW+58cLrT/5Z1da5Gl9/44TQlptqfoBrY9KJcjTDrIChitEPm2Q0CMMoiRFRc3P+3GNv+P7/+Na3/uTy8rgFnZmdUwVkCj0pw4Sg45WU5ZyXsJ3um/Whq0QYopIzHGxR0ioa2BNGzeh4FwQAch4N7G4hOoWDMscCzKYqiopSIBluoGOtpje96WceffiRe++6Z8++Pdvb217+hiq27TQOR5IyGRDCzB+f3Hr9Q9uvfnTxt25V1YYlqA6Is4XMkFQr4hqoCfX73/cvL/vqLycgjiSKgGxs4NoMVvhU0DV1O0mn04YqscVlF5wzjO5FaCK5Ik4pmWQDty7nz/vgiFhx8KFRztmyuKFWSQc7VEkyc/izEhb/QQSgyIogukPlL2Nkc3ZOV+d445xzTpBijDFGZDdmQ3/TRAGREM3xw8TsvMzymQF7akRvC4h9LYwFFi/drKwMQDo7Gp+ZoAESwRN9acwA1UDUEANzJiBAV47dKcfMrJCSykSu66uwSS0BYuCKBu5VZx1qzBv9XviiD1W9s8Luf/bbsr7D9iED+6zSdlejpW9w1KjTIjvGlAZ0vdbChvJfiTECcAvb2wnm9x18zeu+6ffe/Ud333fpzGo6//jZLVuNMnro4Xtn5+ZvPHbD6778G46+9vuOXnu0merGKl1ZnVIVWxWqU9KAIQLitG3QJbFbY4pMAalDVwGAkbPtiGhmZgYAvLttmmZ7Y3N+z+LW1tb6+vpoNJqfn2fm6fbYF43NdCpESOSKRZ7DZBdfi5i8iRARzcVmgwA985pZgZkUI0roLqm7AmRfK5irvnEgYqe0mmQAQwQzgc4TNzdtBogxStvmlBRBNStC4ecR1XXdz0KKlQh27UF5AovWaxWjdVM+RlJTx3mGEBye7Z2/M6wiB1WtwnAy4YP795967O7zKxcam/m+H3zGjc9c/+ZX/dqD94Tnv+Cpe/fu2VxuT3/m/Dd9x4Hv+k83vv/vT/3dzy48+vgviqbLZ7avvW7p615549bG5SoO6yrkDCKGHNA0Na2q2ZQRkcowUXPOWTIRKZJldb8B/xqNRj61AqCqqtp26nhJ3xw7Vh+52HBRDNB1ve7vxjFIYeoLwc4Bxg4R0ov6dnWnhFC1mM0sBA5VhG7/UorJEq9NinDBTD2owwCBrCJDo2bSXtlYH29sbl26lC9dmp4+M378scn58+3y8pULF9fWlze0vTDZujKdbiJuDiPMz89ee+zQaGnv8WO3nLz6xIkTh6+7av+h/Qf37Z09sMg8QxGywGRs03HTtDK5uN0k40qtSaAa64ojC4IQhBCrLIaKrJmyIRIFCsCIEWoxzaaGqgTZ+XII1sYI7XQ82b9v9uL5y9/73T/+mbtvP3LNwZXNTeBYzVQIyYyfs7T53becjWj7eXXPUmaEs9vVVXPj/3rrgz/24avrOJtti4mQA3EQEUPLjDFGU0GEQawfO/vYf/ze17/5LT+5fGViwETYtrkv+kMgM4tciZgzfKAvE50I220Kip2eP2OdJF9hSPeKvO6xBz0H0hSBiAKxdq/tjDvsB91ZiBkDWecLWYIhQzaVNnm8nU6aAwcW3/Tm//6d/+G1W1tbzoIF0HbahHqgORGhKhIHWq1m/vb4xjc/vPDum3jCkLMSJgYMoTasTAQhoS3u23/7Z+6767P3Pf95T1/dHgeuwYRq0s604/OS7u52saPGdsU9IgVEI8aix4eIoJhS6/1bCEHAzdyCea/sqtGdejkRTXOOoWAyyq2h4GM260iSWEDDCmAc2Ekx/ueK04DrehkWFxHtaMHU8SABwG0JwCMfAGgnQeWKKoGxW/XhrrEGRC67om4yLFZ2Qj2QXVVBtHwk1+hSAygyOv4raKjobg2IgUJ3TQlod4PL3akKMRSoN1iH4ytifp4+q7oCtZRSXdcMOE0tMGXTtm2tXzdS8VvdOXbd+mT3EmWn7EA01VLEqEp3ZEtANCDDTs/cjStgl3vyzutkU9KMMlPF6Xgjnzh584/+2A//6q//drt6eTodX7N0+MYbb37FS77iOc969nBhMM2wmfSxs1N3ciYOmGAmDscJhMbGrZUdHxoFCGZgGVsU85YcjQCIydAAEbiqPNlsbW2Z2ezsbDOeuIz46vIKGUwmk5TS0aNHva1XM1WRpmHmGNl5gdr5YmYtvNLeFDJ0qsUe5Q0BVFQ19CuAAlAzMiRin0GBKCq411ux3PHL2JHqTNQnGQv7lrIqV2HAwROtiCCCEVKnqqEKIRA6YsKVszrnzqxFajGpdHNyR/iVfqiIDwC4WiUjCQugRRiB6cH9B/75A38/Nz8chquWlpbe81cP3X37/LU3HDpz5oHTj9SXLmz/9C8fevaLFl/7DZ+473Pj+T1BLR09fM23f8/L/+Hv//Ev/vyTL3jh3c941pcsr6wLAHJQU80SY4XIOSuAZSsMwKJVlEUBHfvaH6FqUOe2oN5CCACVz96lo9KaGbiPZydgS6p+R1JKwQKY7UzeoPifSsedY2YMQVXJgIjanKlwbjGllFJjplkaSQ1yVVVcDQYcaO+emf17FjTbWNq1tUsXH7iwfO7CxoWLkzPnt0+dGV+63KxeMLPV9bULKytjlBXJm2Q2HFRLi4f3nDy0d/8tR4+duO6aq06cOH746JFDh2b2LQzqERI2AllABKatXVqTgU4mSYGwqrQyUg3IWNVC00YZNJAQqI/lADHr1CNfqBACMwdkEVVRKCqIaiYMGMBbDWx5olIfOLTw4L0P/6c3/Ngjjz5w+Mjhza1VtjmjZgK5stkk+ZVXXwC0IzPTmSCBYJpxqc5TCc/Yu3HN3uaR9RoIRYUVgjcLFBK0woCN1TFcuHDqO7/zP/zUT/3E5eUNoJjVqhg76X1xzqsHZ7e5NTOzjMiud1Hivrmn+OdtcN1IG4ssnR+MfhBFiO7w4zFZtHOvAB+99L0QI1HZRPh/w0gMAE1u6rqONGjblgMi8erm+LkvfPob3/jGn/ypHz985EjbTIpAtZqBQEB0yxPD2d+7buvVj01e9vjcX18DXJkZON+fyUIRvadBmGzAxz5+x4tf9HQkZSJtQUOPtCrzUeiG7btAWN0/mQAg+FVSMFNB6wno09QaYBVKvasO5EGJMZqJiqoqUyEW55zr0VA7SDmiPwLJ+X5Qxm8I4F2rgEFR9UFEZDMJaEBd4ePXsfTyMTAUYwAfBTvDkmLwZFnCZAdCUUNCjjE6UDLn7CVWFuGqisbO9zUEy5ZUsMlVVXEV+2OBANYZajoCwE3T/QYrGBOBgZQSHPzQSJcUAcp3cJddFBAykjOpvdBTQlV1nSMFqwa1qbnDYBlcIJq5QzjUde2yJv4GegQpGfRaYH5MfUXNMZipk5r81Yo9FCFhLOP2fvjjC/BuY6GdbqKaaZZWNxkWKDPEdGWaR/XSG37guydrMjfYExdajENCPX9xO01xWAMIDrCS3GDIbc4WZy1khraWkTU1KEaIuWkt5BijIhDHtm0BCYHLkwmkJgaqkneIoWaxqlZXVyeTyezMaGF2rg/izXRaVRUhZgQmyipg2OSUVDgGfz6139pCmQGQwQ643UydgNfVRqVY6ecDXOYgjOwao16iKZiqOF8ti7j7RQihGtT1cABJ2IAIc9O2kgeDgRIoWgTMbUJ2dfqCB/Z8HHrfbMKKK83SSukIAcD9tvuh1tbW1mhmSMBEXAUWkbadEpjVMAfhzs999MRVTwVd3d6a/9zdmywLAFf27KkvX9T1zfN/+t7XCV5+xQv+GmTx6LVLNdvKpbPHTux9+cu/40Vf+JJfesfbfu1X3v2rv/a0hZnZ1a0pEokhVQGATVFlUlWVahFD9edfTYzi/Nyc7irj2rZFA9+keF1fVZWq+gS+CA1CCUy+zqfAzkTwWQV1oEWn+LuEO/iQ02c4BprFsriFZpJkiMO6JoKZdhgCL+2ZP7B3b9vq5ubmhXPntre23/OP79eL/3xl+dLq6fOTK6tbly6N169Mm+1xnm5J25jI7MLS/n1zR5aG1x87evjIs48cv+7qa08eP3H8xFWLexcXFmeZUcFaxUkLbaMrTQsbGxkVKwqlB+ShggQYkCByVpoEgpAjQsggVicQAI4YokEFnDELmLsZswAKUkCMpKhQE2fxolF8tutas2Yo7f4Dc5/+1COve+0Pbm6t7Dt4oJ20Q1hCADWZYiZLoQ637NkcBZ2LQgintgbjTCdmpvMxb6fq1j3rD21FtpAJjRAQCCAAhqQDhlANz18489Kv/NK3vPVnxuNxVnTumWRj5pxbACdagiQngvuiVE2ByHYlS4FujLzTFHXTO4cg+XSKiCKxSlFdEXP5fUQzUHOl+q7f2PHzANGEYh1/VlTNVR+IUkoMRSE8BJrmZmOjec13/Id/+dcP/vM///PhQwfaNgOgWkYKAmLAjICI1ZmF4YcOr77mgdm/PpGFmChw5e88m6AhAghpPb943/2PbKxNOUDbttEqyy12Hja7OxkRIS6OCn3p4M0wAzvZPWUB1IBEgCDA0ZfPktq2VCXk3L9pVRXG0U7/GoLPn/35IiIGTCm1WapRjVCUqH0QjVg28eiO0aKmFkLHDnY+LgCgQdktYwF0GEKsq7ZtJ20z6B18EQBQtdMrABBTzZ0tQQwBzMxGhqJGCoyUwFChAuLIzJXjPjxeZysj0FY6mBlglhJrAAC6LaC78Fq2ZBqIBQ3Rp//9VNO1vw37X6GC0gGmQATE1jWmXhbEupo0U28svCkiCkRkAsmSqsYYycAI/XhRYOeHIWIH+PRJjSbTlIsvFQBBVgIkwgTZYEdiWkQMlJkzgqFJKtBuz0N1XXMLAkJklgwRm3Y71rODPTzOohsIMBURRmYQSGgIGU1V89hGoxGQ5VZQMNsYsEEERQUmydq22wTIzKnVUMUQkQiSCnZqjkCdDwaiqK6vr4vIwvysqDZN07atGZrBypW1wWBQVZXbNDpKyx0uzWcMDMgkqfU+1Vf1SQUJkFwG3aCslojLTAxQDajY3fjLUiAzxFj5aMF8sKkIEZsWVKGqTUVSQgKMrFOlQORmWJE4Zw1VNLNMBpEtCVdsRJKRGFiJA5iVVYiaVcQERmCmDC5k5u6wEM1k2mxbNvf6VlQ0JQoBCcFySjwzuPOO82tbK3PzC9Vc+OxtW69+7dzevx498NB5bLd+5beftnblc//p9ffUo6tGM5ttWp00YW7+sHL967/5tld87at+5Id/6h/+8V9/9Z1/8F3f+T1hECeTXFWV2DgreXLJOglUATaqbqySIwYaVka2e4xCxYylDcREoCiImKU1A2+VmiaJCFWx4uDoWFXjMPBrbpqzihoQGQREYm0VRInNHYFFTK2ZqaswGNRxABWaAZiNx9MrVzYee+yxjfXNv/qLv8kX/ubShYsXL164nE+dufXU//7nt8MlMFFCyyIcw/z8/OKx4yf37z9x4sSxY8eOHb36+IkThw4d2rNnb13H4SAAQZN0YyKNyvmVcfZVAnAnvYJWVVFLggg+cSEAzVZ04BWTuEoREEKNA+CcM0DWGCaWHTLCuVUFMYxVTCkVXL1iJpCUQ2DXh2EKYsCiR4/u/dC/fPb1r/ve6XS6tGc2tw0AGWZFUcCglSCA5tlKrpqbEsJaG1abWJNAf9yBQqokxIAS0FpLSHEqLQNE4pXls894+q1v+bm3bjdp2rR1XYtlAktmQbppJYAHyaZJzNHQEDEOauhk3SCgKeWcg2HRO3I7ZMJobibEjnt3H08FYzDXKQI1QEYkY1VUMBXLoCWWugZFVmFG7jk5VsRfkyn4C3r0F3fM5ZxsMIQ3/9ybPvOZz2xtjUejum0ZoCHDmE0rMxQ01TbP/u7Vl//gY5svvjj6yAF2GSAFQGVyRBZGqCtuHr948dLG6pH9h1PaFFbOmEgQ0aTHDkcQbXMLZoJl8KngXmhmZm2n1UF1zLkNIXqOCGLAJN3SmxFBsiFAm4xJiRKoggYg782w+6Je8ppIRdT1Q7EYukCndg5AiCyoJkoIAQPTLlHJvnbwjSF1sF4nn1hKuS2+rdR9+Zvw2Z12acnDLqKvvTsgNoGp+exCVRjK4k07JxNfVu1+M9DDg31f12nAKppKbrNA9E+O2sHlzWzHLbgfHbs5BKJ129fSHEdy7Xvfl2e1MkwI2KEVuoimYmJumkSdDpf5KI+QfArkPR2igklOBEgUTLVtk6KGEIh2OyuYSDsczVinMu2zX1f88c+b2mxeIsTYNilNprulcQHAoWTVoG6axhFVSQVUsgoBRmZ3xfFbaYg2zaGuEBG5bdvp9vZmVVVxUBNjVUfvgUzUWUY+rhnwgJgxhlZyUiEK9WDgf32apiDKVazruo6VZfFpZ4xRVBUsdl5giFiFiIhZM5ghUgjc21Mys0+3smW1ogks7sGC0Qypk0K07gxIkrqq62E1mYxzzkSoHFVZUwNV5UQpRSBvbc1MLBCqOs4AQYVCyNIGq5iozS1HArM2a+AICpKzogIaUwBCEwXQEEI9P0IsaGRgzHkcEAbDWqbTKtJDD9910423rKysTiaTlbO6dkm/6GUH/ubdp9/4Mzceu2nwY6//eM68/9hovIoQ5uf2D5qt6Utf8pV7Fg/eccdt2eilX/4lqdn6xL994OZbbpqd2zOdpFBFI0spcVxm228mkoF5aCQpRSDA7K7Lu6CehRDSxWKsDaDiKtTkQ2bHkUXGzk3ce5qUTVQ0ZDCRVo2IBoNBQCbGwbCuajQzCwQE4+lge6tZX9s8e/a+9UunT58+df782Qvnz168ePGyXjj/7FO/9U+/YufLk0cHQG/RpaXFq45ds2/fvmPXnjx27MTJq686sP/g0r69i4tLVYWqkBsQg63JeDOntfE45zaEwFWsqCYA6yacAIaEjkym0p1oT9f2fOAi4Nj5wXiAAYBAHIkVQYv0OjKj5BFQ4gCmypUbDKjaJOQKEuUJWPRKTebmRlNJv/27f/3mn/15TbC0d18z2TKFuqo1gxIykRmqyfc96fQNC+NxprFSzXpydhJJAWCjDQBw1+o8MIFOmCtpDREt5BiIjFavbMwvjN729l+Yn59Z3dgAhZzEnaprP/+iqfODIQocA2ahwAomKZsJABGTDwC5E0ztByRoIOqGRAW3GH2d7wNaolDk5BDQ0JCZLXcdlyuDamsuxtClnD4FdMG2mIVAB4byN7GxvXX99Sd+4id+4o0/9P2j4ckQJmo1oRC5klVJE/yphfjZxfXX3D/zb4eIAlNUcHqNlhyWNSKmyXhjfe3g0gERIQ7gGzQA60Y75hwHEwBE2uktofvgLlpg7rNi7hwPZW5fnm4q7j5mbuPWNA2JL9Q8bQkDWi7CYf26Fg2RSDWjoDrPtBCmoTPn3sF0hd33pieMqWqZyiK1bZOsRaaqqgaDQWpa/3j9Vd5dd/dsMH8azFmYCBiIDAwNCcgAiI2MiApSidHfQD8l8I+0u6zQlJ30otixghCRoSfzOH6bOoBrQI8upuhaMYUOJloEOtQ3AE8UIsdu1+WpH3EHSub/STvOsSfCYrpg5kgHBkxJEbXUDcyIagxmFkNERDNxFIy/oAq4bKmXM7mT3mzblqwIaSmiZTEz9/fq32T/hj1rhhBym5IK5kyBu3W7AYCBUSctwlUUMMiCkYhhOIgisrG2SrFck5nRrBuwq2ph1DGLSJMyENbDgSpkT/AE2MEcPPBpzl7gIWKaTikG4gBqgSIRiaiI+sADEbEfR5MBeK1bpHnM+zImAPPBKSMFYnPuBBgz18HUth+8966jR49ec/LwlfXxtMmSIXJop00m8tzftm2apuFwKE3ORMNB1bQtRgqBt7Y2h4MBmGnKaToRCaGKvtS0bJJbp2hXlYUQFRVchUMyF7k0YWBUayxPN5sAbDhGGX3iox946dfdcuGMrF7g+z699PJvueGTH3nsC15y1Yfec8c3/+ej//vNF8YXbfHInjyOV85cfNNbf+hLv/xVk2YSFuvxeHzg6GJFJ0XbKo4CVk2zYamdnVka1rjRJoOWIBMEBDYQMFKQZn1zMBjUo2H/JHYBl8QSAgZyl0xpmmRWjg0AWJtzcVFFwoCIxQF4ZjyKIz9aVUVosLExXt1eufTg6oUL506deuzs2bMXL14+d/701ubKyuo5nA7U8vb2psdkPmDMdMM111z11GuOHj164uTVdIjeefF/ve21v/aUo8+cmRlaAFVIrTVtzllW1rb9iZOUHfjJzDFERlK0LBpMEItSuHs2Z1OTNngr3MVTM/Nx3xNCRHdBsLsshOQ4EiDULGCAIbWthDgHQVM7VckxDMAGWcc4iNI0s/Vgfm52fdL87V//07v+z+996rZPHzhwYDAzaCYToBgiNyqhDtIKA+yp89uff/dXHF/5i4cP3rC4HdAODNuZIMlwdRq3E9+xPP/I1hwQRMRImBKCEZppypIkt5O3veN/Hz95fH1rEwmqepimjSEhllkddU690HHzYkkkGV2rXLOiUYgk5n4kaoahqHV44BIw69aWfRK1LCWLsse1IjXKwWlIZmjkgRpMTQmeQGzxkO+0FmY29+wqHj+qqiHy5ZXmld/48ve+973v+6d/OnhwHwqoZpVWkyEzAxkiGM6865q1X759+Qc/qzeMdSHFi6OZfzs6+77jmJkIGQwkD0fVzGAIAOxwBGJfkED3MWFXXgiwg8yyTmiT/YddjpiKrpSptrqjYVC0tBDYtTNVNYuDmQIXVn7btp4jUkolLAcOSqZmJAhsvrJ09QgokBe/5gwYevRXqVk8CwOmXR63/tOaRSyj+toCpJMsVlXLgoFLH77LxBsAsqoYBCsgOzNUMTEJDht2FrMvAnccZJ/A7ekbxmzqUGF1TA0F6hDO1lfynR53cHFaU+qfT1FffniD1edaRKw4iIgjv2DXF1GhbwNAVVU9Y4yIiBGhYNYAwKfcYFCF8nggW7/v5Bj8hbVDfWNPRe3ePHbALk9padoEKqoXRoWrTUTuZqWdoq8fsqZpOAZCHAwGjldX1UL7IwIreiY+ycg5U4zeBPjdjzFWVdW2bTOZIFBAGri0oYCjCYiIkUurqqYKqmVVzIBA5CZaPmFPOWcR8rqBxDgglhGLK7ox9rxdIyJkMiqq3eVPUFcuxCCtSsqqxSuQmMVUTA8emP+NX//1P/nj37/xxptvvvmWr/36VyztPaQ8MEsWzMwmTcvMhcadJQYCgPFkQkQm1qbGDKZNW0eynABlPJlQQ/VgkLPEWAM64S/3RWaRjzcL9aCdTuoQRWQ0Gm1Pt1VRWiUbxRgnk+aFz3vZuYsPfO5zf/HQA4u3f1Lf8j+ffPbMhV9/x8o3vObIa96w+PP/9fSeA09K6fKhA8NnP/OLz12Yrm9sbSxXayvN59IDcbR43bW33PfZR9bWH7/u2hsHcbA2vDg32jeYP2qwAgBkdW7HqplCRIZ6ZiaLyC4taGGjwKGKKC75OHUYBQVCZBFpUxaRdtpUVajrOlY8PzOoalSElNLGenXp/MVLly6dP3P61OnHL126cPrc2QsXzm2tbk/GG81ko5mOmSMqhhD27FmY3b+w/8De66677uixEwf2H+YD9I5H3/Q/vusdT7/muRxBBe6+dO/vv/c3B7N7xgk3roxZGgBQYPd8ChQJWDX7OkQRRCQSByxjp+QOSEQYAqG3Ce4sC1K4gtCX0aqOhdkxTcEOekNEItk5ihCYDDVLUkXkqopJtiSbGVVxRnIWGUNt8wNYnJt96NGz7/79j/zDX/zd3XffNdozvPrqa5pmur29Xdd1mw0IskFqpwzh6Xs3f/PFn5uN+ds+8JQPnNv/DVefe91NZ86O6w7oAI9sDH/hzqsNURVCGEybKYASEQgx0frm8q//2ju+8AtfuHxlA5lCCJPJJBLHGHPKDIauo4L9gRQrpudmnUyCh0rotY536dj7xNH8InZNVLk4u6KTL8qsf0KBvJEkewLyVGVnV4UdKt4AVAE7r25/6su7dTViyz/5kz/94AP3XrmyWnFAs2ytIRIktYBMhhQ+Mw8T2v6Gx8OFEUTTY7n9ps3xsy7u/8Xn4BSx4mZzctVTrjt09JCoEbGjmjvXO3QN87IMDkx9kuqCrYgQBerZqgBVVZm52xn6CJ2jj2l9ocmqud+vM7hUg6pqksxayLF9+ELEbLnbFCgi9ZhzQCAql9Eve+hvm++Wc9nvYkBi4kAch3G3H3iniKCK3YTBAEPwXUKvtrE7oxgadDr4iEiERqCSvQDpr0JfoeyaZnRvDBEDq2oPr4Vu0tt/9dnI/26bc/k+ubCwM6Cz91g9pNOrh+zTChFV9Tzn6bkvCApk18yHeGY2G2fMTHZZCDumKlBQU0Qj4myaJXsD5/i5/tP5K4cQnKCpqg4Ol+6rdN6EpoaBUc3xjbsb36ZpYoxuDpOzgEEIwdtlf/Z8wd9fVf/g5CxehMgBgWoOXNP29raZDeqhZZlKy0Sxqmqqk2RLZoQkKLnYJ0Riw6LUnRDadqqqVVVFDu7GSEhoMG2aEEI94LZ1jkoIIfj57u8XIgLiEwgS3QHtdxN1XVcccs4ppbZtuYpIeOb8ystf9YqFvbN7F5fW1tY+8MH3PvMZzzt69NrJNHEVQ6hUvbYILknhvx4HtSEDoqpwrFNKeTIWSQ7SZkBWVMBA3HbueACgCkQBCEwLdLqu62Y8qarB6sZ6jGwIdRysr0/WNperKjxw39lvfM3L/+jPf2917bF/+KtqcQmuvfbYLU9Z/+23Xfi133/6K75t8vH35WrP4oWNyeu++ychX5yO15/1koX5Pen05yZVdXpYfclf/f6Zixfal3/9K86f2b60evvzXnz1F33pm+qRNU0TORKhu7w17RRp0DSplZ0lMMdAUpxYYsWSfWeiAFLX1Wg4cBZTGO5tW1hf3Tjz6Nm11dXz58+ePn36/Pmzlx+/fP78+e2ttTZNtravNO2YI9V1jFGW9hzaf80t+/cdOnHixImrTu4/ePDo0eMHDx/at28JAIiCAdx78T4+HSEMzl9a9WHI+sZYRMeTxoZGAExVCEEAc1bVTEQMfrZRzRi9YCYVKSGbiwxx21ncAFNk9g/8ec8UIppan2D6MOKPP6ARlsffT2AIwXAgOROHuqbpdNqm7UEd987PQ4KPfPzOP/vTv7nt3z+xvHIWB3jwhqN5DKvTbWaiURSxyAhitRlz+LabLvzMMx6468rc17/vGWfHIwD7y0cOf+rynpeevHxydjxO4aPnFz98fkHAgegwaXIVouYUAk+2x5Pp5tve/vMvf8VXLa9NkIOIEMJoNELTaWoAAbuO02Olh2kAspyBCUyziovrIWHOGZyOQbzrKhEzZ+uXPtC1Niq+IfZ14RN5H6rgoFgFcygqIiIHBiUrtn99+jBEipWIQvHhtqLpS9yKEU4m0/r6m06+7Ku/7ld/6W2HjxxJKall0goK3QQg0PbrH6HVqIebVG8iICiGlWFz3er6Vz+08Gc3JZTpdHzkyKHZ+cHqlcZEq2pgOWUqwxAE4F20VaLQO61T58elqobmWm8AgCU4ozdj2TTnDGogGkIIxMYmlou/SLfU80iVmtS2bopMtCPoAQCAVnzPAQk7uLi33F1qs+DiVl11UNhHiDgcDj1mobF1s0EvFlJK2XTXwBZjjIw7ybAvrESEDMTBr4gKyGYiKgY5J4+MJe0hcicG2d176qq8knR7X+Gk0tc7+ER5AeupV90XdLh8ZGIMEQgLNaUr3DzTEwK47MAOjg4RHXRqWbKagPXyim1TiG7F8wvVlzLqP2+gqrRLtAw5mGafWffv0A8EdbZI/hndfy2GiN5wmxCioplqSsk1O/viBgAqDsAgqZWcwV0iYmHgGJETpLkbkmjKfje3JmNVPbhv//b2tqkOBiNEbNvWEdDjyWRg1hPMc9OSSy93ta2//1iFNgsiZlOdTiXGyIHdATSrqk4mk6TifDPsxuAdSQCJKKsUaSqfCkJxx+pDQIzRvaUdi+CWIDmlJCFA+Mqv+VbJeW5ukBJMxtPt7aYaDnLO07bxc7idWxPVOgYiigEIU25rGjASICbUGGovpIZhSICgyOAkWkUMjMHM/I44Dc/EkCxJioM6iw4GAwCoA002pgfmZ7iy0dzs7bd/+vWvf/07f/P//vz/+Ll7P/3YH74Tv+cH0jf+h6Pb2/qLP3H2R99y3WMPPXjnp6p6QLL/Ey/60sXhYHD+9ObZh7dPPzq99alPWW8f1dlTr/++d1574+F/+Nv3PPCJ5tA195w7/4FrT35VMx1L2Kp4zmg6aaDNUxgEI+y9vMpdVq2qajQa1KEaHMDApAqTzTweTy+eXjl16szZ02cefPSu5UuXz58/u7J8aWtjbWNjVXJLBMC5qgbzc4uHDu4/eODGw4ePHT56/OjRo8evPrl36eDCwsLSnjkHOaUWphNpRZdXpxxJdZpSWtvaEJVpboZVrapGGAgBjERIs5kl5OzxwTHGmqWrqlVEQA0VTB24B0wMBI4QyCIiCMDG4DM9clzujoG0Hxo/O+zqEh6TCXM26uQwoSvNEdGwRULGSqbNTD1Y3Ds4e2bt/e9/75/+2T/effttotM9B5YWDxzQaZu3JspMvkpUUNXAAREra3/pxY989clLv3XfybfccUMryIAGWQEe2xr+xj3HwAi1azNB0YwMIaCZzczMra9eQmp/67d+5au+6ssuLW+qETA4JzsgAfqDjB4fnDDXxXeAzn/TW/9OsImRgQBEVXLuA5qAiWTqJSd7S2lEVNN+6AgFUkruP9vRvrUj66MCIvgI0zqVlb5Zoqhd98Iu6EAGormKgza1pjJp02u/+zvvv//+D3zgPfsW96ugmYBJQDbSfHicnrIGU/JPSGuVzuW8bxwnc1tffGr2z66zNk2m2whl5OPtBxIx7AiJlOvT8XSK+EIXb51Cjbs0oLB0xtn7MeBOz1VVVYnEZ87KZYFoviF2vmtdFYE8I0XtNS3UBNQEFA0o7KQV3dneUtcBd72zmWtJG3YOU74w6GtGT8MYuA6MTM6jdVZyXdeO1uwbF6+JkNmTJREBoZl2iu47dBQnopkBMPSaR/3PaOEHIyO5nKHmDESRA3QmybuTLhmYA3q9YHPb9qKmRh1svquJzHcApFqaTu+9nGyD7pNIJCSqKio5Z2SKFHPObnhG7lHVyVhTl3HFLRYQDd0YABEZO0uAwuxS7R3i+v1udIJym0CFlMkhX56qQ6nf/U94BdMVqqoIdYwARfE1m0Xjtm1F5LHlByxLltRMp22TZmdnWs2rl69cag7EGKXT2fcgqGZt2ypCYPbJdW5ar7h116jfKQlgaAhJJKWEAMEF3iTXHIGpmU5VdTgzMxqNmMnECMCFSDkWqwzoKg9E7Ab+DsgEABBRNEACL3QUwEHvZhpCPLWhiJjPZiJi15VtPFiUU5FSMxmP27bds7g0badVVatKO5kszs8BAVcRgFWNibeN26bJDuYGAQDnzTNHA1HJHCtmjrEG0JSanMWs0JpVRSGde2jpK7/ty97/qQ995O6Pf8krvvQFX/AFl6bLzd6GMvzKO+/+sq89/OJv3Jpcefo7/+qzX/HKvQeu1337xxbjQ6fbja3Hjx1auvop9ZNfOp/S6UfW6Ekv/gI+DB/4zPvy4vZXfOOX3/XgO69Zvn3m4FNXmyvaCuQKdOyiL1WqY1UNaWcHHAKqWjtuti6uX7lyZfny6vLK5fNnz1y4cH515dLyyqX1jfXUTkeD2LYJAGdGcwtH5o886dqDBw/uP3Bg76GTBw8dOnT4wNzCwuz8bFVVbiyWFDdkc3nrysObKFkQNBCLZAUiAgUJIdb14FJzwUc7xXtUAdUQdkwPTZS4ICV3opVbsAS/mGgIzKzZSdgJgTx/AFP/+BgaQ4Ceu9HFASbeTX4lZ9Oo+Z3yp5KQimecCjObtjlt713cNxnD7/zG3/72u37n0ccfGQzjzNzCfDUn4yQpc6zAcpWEGU3RlABVwG6Yv/KuL3xgb92+/iPPfM+ZQwY+YDbAQObPKbqqEVrRyzckYkTVyHzm8Yeuv+HkL//SLz7rOU+/eGkNqA4Ivguoqiq1qaRbUSV2iHJPgvdRo386N0ExcrVU5I5n6xGUiJjZuWegO2PFvg/GXWhZNUNTQDSi3tc5e91QUouaQfYVqqLn4Ejsbnhq1nWhBoDUMXMUp2Bs0I637cjx/S9+8Uve997/Z8SlGDJDMiVLJzbBAEYKBjgJtDyA3OreRocJMsrCRFchiO6bn7eUEZGIc5vc+wAMimFvad488JvHeQCwLEaInSaBd4sezQISB1cUACIE9sFpp4uHnFJpTY0AnMjcATw5ujOpohoGNrO2bQMhIJiC+9Zbp3VIRfDfzfcwICL0mk1EisDet2mxceizkTqtMGLg4AYMpYxihk5Mp/dCRyd0MqiCluIoiwKqsYIh1MOBdcq6kbiXFPeZrar6ihc7PWo0NTIVkSSmGmPlY0kFr4JFO7S2ay9ANx9GxH6qbC7pgMjQ+yuUHA+EDnYt2QXAXJofylifiEAl5xy5ohgGITgVEnzP6qxlk8jRi3AAwsIARgwM4uyjshtAAkYGgB5vXFaqzESUcutT9Nw2dV0Dl9KHmVPTlkDWi5/5gicGmTbT1Pq/F2kzwoiRiH71Mz9XfqnT5XSrVrtfAwef2xiAmULZ/4DuWofvCmgAvbadU5yt+96u6rP8T39RBACf1hB0QAB/NIo5iHUPSxdJO+hYKbj7f+8qXK+etA+8iFgeOegnsdj93P+fNcf62iqYxRgNrKpqVfVnkoiqKuKuuOYJv8hQd7ah5SPCzl8Av54KIfDCa2R6ceVic+kPt26n/YP6eHRkzJ9cORW3oBqcr54Hf7BqeJU/E1ZdFwPTOTnzyWkd2mE73RJls7U9dr9ahlnIud04tvrIqT+fWflAkkzMRfeii68p59S2/cX3/9m2bU6+jk9lR7gHeC/7NGIUuKrjKAyrWMWqCpEaHp/Cx07jYwCftDWzVQ8XO3fSb4SfDlcS1SLz0M+fujUXYo0zzNEIU0qNgAFkgFaBiNkyKZWBnIhlf9JrlYygRshIIAWYGmMNKE6YkX6spZZzRgIIQBRhl8WNxyjvTgjRMfCimlR8BmNm4P4EVhT6Ukqz9agaVB/7t0+99a1v/dy9Dw7nFg4fOol5TErN+hYFApaU23owk8SQMhhpthDiN179+Juf+8jDGzPf9i8vfnyyQAwmqtDaE0agxc/AeSVmxgQxxK0r2yvbK6989cve/KafnlvYc/HCehyOGs2mRhSqqhKxECrPrG1KnYe5E3MUDMiAAafTieuuW4F2ExlIFjEBwoCFVOK6oUTkTUuneQC9tkH3TzRVZ0iCqJq47Tshq5j0oBZm6hJ28WOFvskrYtTdVj77HlpMiWJgzjlvrOtTnn7ToSMnp5ONWM0lA0ZDMgDVCXj0AadEodogAwBkBDMbG4oNY33jtdfllETA2kxEAmZG4CtWROgAJdAp0VInOVLWSUCdixQHtJSSo5/MrB5UWSRJAvcUMciiZhm5HB4kMrM2J3+UfAPd1XxFViGEgC5XjI5VEPPe2pzZy94IglrArH5enc7l4F5VdbGV3AkA9WlV2+RWqQFQ/AFlAu+sVQrOy4onNhqYCQERk7N0QU3EejEBhwsZYYyRidDAyxAFdNEfFx5SVTQCMbIdWHJSAQdXm5m5VRywQRZpNXl8RkQ2YDUC04DKhFZA4WqqptgVRJVbO3SaDJbdiS8oeWsIWSRJLsE/C1sAg4RgoOjrGAV3h2mahgLXda0iqKhmFbNIYkTo6BPgfnmIudM6V6c/dRv+Ms8BBDVFFS3lDnctuxB1lCcwM2q0DpFiKFMU1RijAVy1eM3//pI/2Wq3XHjLVDULIq1srQWkmdGwnTaRQ6hiyuIHIyAhQCva5uS3yWMiB1YtkFSf2ABAdhNGUFUtNn8AqpZNLYkBcghFG0uBAebn55AIRDEQEaoAI0L3GPu9hi5KihbRFehwjP1CAVzjxWs1wKqu2dN4oN2DE9/wEBKWYwxoOL+wgGq5TW0zEQAzzand3Fh9z//7uwfvu/uqq06Y5D1Le6+78aalvQe4HszNL83OzgUOo+GwmgVESllzUlOeTqcpSc4ZEg8XZ0TykFf/8f0/vLgkzWT2/rumpx9Ye/zU+PDxg8Nq4Yu/6GW/9bu/P1jcvPrYwtOef932lUvnVx4lW6jD8Qubj7AtnD61dvnMII0rps0f/O/fIwQPPfzYtceeetvj/7gU9nzXq9/w8EOX1jfWVy5fPHf29IUL5y8un9u4eGHt0trm1mafgOMgqqYQw749B4aj4b6lIwcPHj5w4MDRo0f37z+47+CBmdn50WjkUkam0DSpSdmvmIgEKvM66FGNTIhEnj2ozEhCCITkMFbEothjZkA4U8+dWDzp5K8QqKoJEYggBEIDC7zjJ4qoBNlyTk0dKz+fXVWlIZBZMmAwCRzZdeUAM1oc1IHQzFLTeoEOokxExALmgN4MQLESAiAKEJRyrSg6SJUM2kYFNQLWoyptTM+vnn/P3/3Rb777zjQ+cuSIKqXxOkRCVAlgBAxhUEcehGa6DUKIOhzK/3jOva+69uLvPXD0v91+s1BtACJGQAGqDJlMAJEAk5FhAtIMSVMeDgbatmcff/jWW07+yI+9+au+5ss2x/nyxpYRynTKwEYA5CR4n5Z7p8TRB8OMgVjMRFqx7NwQ7a6mgAGImhpCRZxzNtPA7NYCOQsABCQrnBFy/Wif82nx2/YRoNu7EUJomzZnAcRqOEDT1DbMXMXQtg0jG0DcpZZsngVEmVlFERFUM4N7s6iqioHWBu1Tn/rUm2588qc+9eFR7Q4E1KCi8uAzs80WyyICgA2zXrMNANgQTjjcPx8TNe3WvuNz1z/9+pwiawI0RdUERJ5xUU3dtRYck2TGXQ1ORMTBp6QZBBAAQQAwsHRiGK0KIBBEVRVAZo7k6B/zOCYpkUEMpKbTybjmATNny56IAoRuwszKwGLZB6OA4iL8BlmL1o2RhWwKWXmXKqSqKhiIElHA4ohgnQAedL4ZwOTUESgE7bIk7jhl4CzPwNyb6QI4ehmHcajON1N1vB8DmqqalokSU8Qyyy01V7e68M/pU1MwjBwUjIKLDrP/CQQqyw9y3WjfsAMgFaKLGRMJaK+A4SCyoiy+6+/2GyYAqEIZ+bZtm/I0hMAxALCYgBaVPv8c2mqR0CqLaQam5ObPgcu6RbFNyVWUseg/g7NusGPBYudkINnMrKpiYCo2iE4b67uOupQy3otoN5FDsEOzx/yapJSkTaYaQlgKGyml2cEgjJxl1GY2ABDNAWkwGASOk7ZpUhtCqGMVY+0ltttGmZmB5pzBMFtBkEE3xgJRgTJNAHIbTfRpT1Ac1sM4ihjY1+/FENPPXZeD+4a1xGlkLDCQ8qUuONchBnqhUwplXOFvMufsSB80ICKRDpLGiLXxiBty1WJbvH7uC5/2FePt9WZ7e7y1/f0/+Lp3/9Lvv/hFX/DMZzz7T//0z4+dPHHw8AEK4cCBI4cOHTh06MDCwsLs7OyRI8diGMwuzs/sHdz5sQ/PxKC0cPn+41vDtW9/1f+U9W9/0fNv+vIX/dnWZOuBz9z1jp/72SMYH7kzn7995bq93/CSr3zFL//Cjxutfvrjn+Cm3rOvueqapZtuPXjiqv1rV5r3vfuvpMWmSfeHz73wJc+5/84H/+t/+PHl1eVLF05vbixrbup6OL9ndnHxyDOufc6B/Yf7K3Pw8PEjR44ePHhwad/S4uLicHFmcX6BiMho0jaImLIvmtSyqdgcAY+i06yT5LDbPanLqZ5ivW52IGQPHlQoCrvU7YMQUZO3oeUBAAN1lXcikCxg2DcoBoroZwA7EGk39yt7sXIGaCfamJlK98R1ay/ufMQpMAPmnFNy75BABsphmm0umE4aHNSJdY6qlckla+OV97137Q9+a0V1Zv5AY0jNtgVD0ewKa668L6INkBEyXzu39VsvuvvYzPQNH7nlrx8/YoYASS0NBwORjAErCikpI7XThusqqdahBoAw4OXLZ2bn4o/86Pd957d/1/6DC5cvbyWV4XDYNmkymdShVgQX7SjzeSZSVzgiICBERTAynzsW7oWIC4sWkggYIWa/zR16lLkAy4v9QAG+dQbzu1S7S6Pc6QXVVZVFcs44bZi5pgAGMm0BwG0gds+WygADduidnVBUEpGZmbmmmRikyaSd37fnliff+KF/ee+e+T2holYA1ACF2jD8syObP/kgJKT1CqLiNNBGBYpzf3YCDTTLvn375ufnjRAD+66q9zB263dMCNTNFM058ghEgoBMCiBmHn362ZhHXehWkF2v6whwA4RQV+ozMUJVQzFjQKKk0koLjlbpuC1EhICKBlhgQNxNCDR3NsySyce8ItJb1nsIY2Lqzjnu8mBq27YOEXwfI4JcbhUWc1bPOLt4uj7k7fYQPfrLed4FfiXIzEbEiNwlwl6hsJ8kJBUiQgrcP4EgAODtpjfrSuq4nlbyMETFEiz8WoJXR26b1UV5v/q9bw/0DF2nJAIyEDujCXa4ZQTIITBz+aDdvSQDJasHlWk5vl6yNHkS6qqHTfmtFQDqsEV+F3bGDKq9Nqm/Vf/3IoTWA+79yibXuOds6mS43LaICEyBgy/picjMeVYAAM1k6tIZo0GtWaqqqqpqPJlyFZF0c219c7MZDEYYuK4iAlmWRid1XQPANLX+0WIVYozaipcAO0e5GzwAo2ueBuCAIIAJNBg5fptiiHVFMYCK5oKMMDMfJrv8IXbTQneNKlWRmuPdiDmbEw3R98pm1k6nfn0cHx45KIKqRop+bdziFAkFzNBiNkVUsZWVNQohVLMyCot7D/3Jn//9H7z7D44fPvKi57/g9KkL//aRD58/f7ZpmtFodro9reKQKIzHWzPzoyzTffuW9swc/NSnPzYazYUQnv705336jgf/6S/e0LT8zd/6nL85+8epXWu3m4OH9t1258rSwSOi6bf+z9tvuGbpt37vt/ftP/ToQ//62GOnttP5jc0Hua0vLm+eOVvTYLixdTnWsLV58c//5M+n2/nktftPHjvwzFuvv+bqk1dfdf3Cwp650fFqP+7fd3A4mN0pTZSmk7ZN05xbIxtvNzmtgZqbZIcQQqgoBo6sYuozgrJWFyqjPOjCkeu7+XKiiOL55RVTMwvEAgrq82j0+Ye5ioAVAbuerVtietYSoHsHCVEEF/3dEQDo15Md8KdslLQbpWYRLHFyV5RRxUEsfmIA2VREQLIBwESRhg0lipUJxKV45o57qn+9bf3JV69/8qOLPE31gohAkBSAUzQsDDRHBZCb/xm84tpzb3vug6e26q/8h2c8tDVHBMyhqqqkqZ8JuX+sgQ0HM0nbYV2jwdb6xtralS/+khf/2E+88RnPvHlleXxpeaOPq2S4MDvXtm2/WfF04pnMsTjuS1Qs1YpIvgGAdbsz6PgUTsDzR6DbJBY8lK/DsKtcfXOZXSCiy/q77Whtl7lQzhm8j8TOrdMxVo4TKhAwl3AvxTSVo2XMPJlsM0ekVHGdWrjhxquXlg5OJtthMOM334wAtP6b/Vs//AhsAl2uyp5qO8z+n2voM7PCSZrpgb37qqqajKeGRIWEEsjUwGWKzQwsF/eBlHNRIzHHbGegQETkEAX0nVuhZiiUYXUHM98Ja7u83WpvsnzxRAhmxUjeg7a3TwGDgBJ0I1WkrlYAIgCV1sfA3jH0N6+/8QRuxhds1xd2EhBQ8E07jkC9Lhd0fLLSZXesmLJnBShTVSOi4CmsvPtd9VdfSuAulpTfUX+rFNh9h4DMOfvO2epPQAxRy2zDGyJXqjJvE6kjN3MnT+9SGL6WLi1UJyEGJaMUVy4iilUN7qjcna3yJkXBjANjwJxEVQOSoKmqU4am02lKaTgcOvSpWEg+kdQLAMxccegdav07QNiktgqxvyBdIWLmuw99gqEYYM9cLdc/Etd1LTk3zVSVFubnNWV3v84q1ajOIggwOztbVVWM9aRt2rZ1R+nxeDKZTHowc4isSuvr6/6QM2FZuxSWAlLRPiVEjEAebRmVkEXEL2vO2VIKoapCdHKXIXmjXBCqouLLoI5MZZJL8SRKnbd0CAGZrXANHbmjvQ+xGyg1Ofmxge5PUwfjRAciIKuoTBIZjdfGgwq/9/v/87iZnj179kff8rPfcN+9jz360MMPPPjvn/jIzExtxuPt6eFjRwlDPRw0TXPPQw9ddd1TOK63m/HuB27fc3D/eHt9jui9//gX0+0cY20GC/sWEq3v2bfnNd/6xl/7X7+wZ/91w2q0cmYF45P/8M/+7tzdt28nm2YOZsG2DhxceMrTbxCsHnv4zA1PGrzu9T/6wCOf/aqvedl0Yoyjtm0ppO3JGmq9sTVZ35zCrq/JZBsRYxUYsAojAmLyUUpxZTCznHeYAo6XAAAGzB3Gx6OrP9A7XYKv9a1jUXhha2AA7velXccqxRxGyyt3HAdmdl4hEQCiYzIAICD2eM9esDqlFMuyzLMr9D7c3eHu8M/UaY+3Sa1o7xihbyvNrGUaaZJ2ilJtzw6qRx5f//Xfm7/p0NK1L1hL201sRxuTYT2HW5OKqjZPOFQEoilLsUMLNcFbn/vIt9904c8f2v+jH7+usQoBJGWuKISQk6q29WAIotmwirVp1iQBK20nFy6efu7znv6d3/lTX/M1L51kvXBpk5DNNSAUpMmhrtu2tW5BBk8k0YKz+TtoIXaR0FxHoRM2KI+GKnEQyeWaY+lzAcAfrK4TKL9uKh4grAvm2u31+5EndvtU6Pa+vWONETpOXcFUtNsJuoiCeuByXmKMESBU0aYT2d5uvvVbX/XQA2ff+c5fnR8MEZERVQXUdNb0yHT089fxuQEsCl8a1J/eiy0bgiJIykuLCwyYJIdQOSSNwXoUkUPHCzMasWUIxVahMJAYzCFm3pgCgJKDaQTAPG2bmfvwlfNutisVFHFpAJ/CFvi9ZZE2cYUhRjU1E+uQKyklN6FFX9Zo0dlFtZBSKi1Fl438cdJdYiJ9XgwhSJsEzFfuO7fcjDomch8F+mTg0y2xMlbyl3ITAjd3o268/HmpaPef6F+5ixkFheIDInCjCIOcMyAMYiWe+MGgQLuFAAOSMLnSWL/thm5Rj443Uf8soN357osP6sXG0OHcuPN+PD0gBkIRMS2wulKOMAUDMrAsuWk1ROgAzH1o669z/9lV1Re+paryJ7JNjh/IZuU2kGPWyKuywMx1TUSCKiL+84GLv7oBWECITIht29Z1TVlSSsBkIhi4DhFC8R5AIKbAhGa2WEdvNIlobmZkZg5s0Y5bXOCavTIRIpmxESEpggAwQgU8RSOm0DHQsqik7NdGwWyX97uVIQcXjVozF4MsF83ARBFNJJsJc5RyT4OZIUbPzSkl5zthDEWT0MHtxACQkyaQGGOWHDkwo6YcAyVVSfVjj6/EulpYOppzfuozv+DFL/7ynNN47eJkMtnY2PjUpz711//3L8+ePb19bmswqJ/1wmffc8dDYYBjOjcazaT21HAUMlabkzUcDaY4Tu3kusOHDu178gfe/7F/HP1JPTf3oY+896lPeR7Nx7i1Ftr1TR3P82Q2DAdh7vylrWxz1934lOc+9yWf+ui///Lb/9t11161uOe6P3jX773sq79++cpp5IA0O7uImNV39P0XMw6G86rQNokwiCTmCggV1QIQsiIkyQOKu88edpgrF6Xvzvwu2roBdBv6EBgRsxqIdsJ6XaHcg31UVJw7lM2sR1wSldGXOzC5rj0TZREmAupdwLsJs/Sfy9ECZbfnDl3WNb7U7XEEjHyv3xkyeo07wLqBjQghWMxKyx+7bfDw/fz8q9fuv2gbk1msj1T2kVYWh1HHgDF4NMg5V1UgxKP11ju/9KHrFidv/Ldr//CBA27b5p1CznmyPRYmDrUIIkQiMMPhcAYHunL+dIjylrf+9Ld966sGo8GV1a0maawHLmUjItVwIG1yA9DdebcPOB7BA5IDOhF81+vXJFpOPvB0ahaq+dbNg7n7mvcQdOy0d6xDtFUcjBi9DXTxV799nccAdSsAIwQfojo0MWsxKCQqSn6E3sOUrgnRUDuCJVShmk7bEDqtCKBzFy7feednXLavrC9UDTE/cxMY6n/dGx6ZKYUFuTSmsalIWlhYIMLOYA3LNIsc11PqclUt+dwxpbsknkAVlPoe3cwUi7egI4L671uHO+n0nE1hVx7GYG7jVLrwYr0KUAbvBR/atUlgvi4oOc49YIKZqYiqVhzMTDoneQHph6IMBUhMRBRjSklAGRhpZ+TdN2R9WgK3ESya0GhSqgif42NhxCc3NXOCTVZxF53d1RZ2JZuIYDdSFxF18gyq+UxbzSs4cqhXt3cyw96VQU29uNPOvAJ7sKKBj+OsKynKgx1DObkAAObQLTCgXeq7ffkJAA6ntiLyZy7aFZitU7EIIUynUyi6MEX+uifI+5eIiAozByqmImZGgMRB2oREzMyAZSq46wtt5/24Hm7/zphZEXLObc4+dnHBZDbHxEtPOfODht1CDhEjcT2ovEll92xXHQwGOhhOxtOUks/oXA/b1wcKSNrtlQitHGGqIjka0B+VumZNOaeG60okO53REE3VpUBp59oqAe0+FWYWmZVLXgVVRhTRpmmIaDAYDQYDSa3PV1TNOVqMkFKiUJlZVVVZk5d3WcXIlFUDKlFluncwlJxB2wC6uXZ5bc0oIIcRz45mBgtf861PuuX5L3jokbtn5uPq6uXB3PDE82+bwBrik5eGB2KYcn00hcGiEuGwbXTvgXDXx87AFhy47+FP3XYHJ7n6Va+cmeWNjXN33vnoiRMnbrrp0GZKp9fPbmzfd9UN+8ZXmk/e9g8ffM/ftA3O7wm/8Stvv/Upz7rx5meN1y9/wXOffnl1KwOub0wENCBJbvrTmFKOkatQcUUxRn9McpacxczNwYm99JSi9NNPejzKgD+mZruVZb39ol2iQog7nFQ/dn3MQiY1AzY0QzUwEC0ubAUj4lAvQ8SCPgUmBXAfce0EXzsCAnbt7y6wj3g1H1z+EDtKPZo5rY8Qs2rTNIbASKMYrR5By+OBjO//rH34Qzxehi3Ye+uTL199aPxvn7tRImobUs5VTMqo7p2FAPTS48tvf9GDy5P4VX/35M8tz3JAhyAoGHMEwFYa5IHlXAXmgI4429pc3riyeuuTTv7sm3/m1qfcvLm5ubE5qQbDEJQINBsQhhCm03YwGFhuTcssjXEnxfZFkvVKQf1OsDAPyLMc9EzojkgJALvQyOrPpphQB5vq7eOIUbJC1x0Z7VJ1eGLv6yGVYiAF6BSKUASYGCJi8ZP1xOzOxB4WJ5NJVQ0ADCAgGoKZhu3JdnCFJS04PjRrn72Gy7F6eMCmiCiAZIQIbMqAqrqwsIBum9umEMjn0CFUIGomiLsaEpEKg5moCRJhKFQ3M4NUKjuXQ/Zoj4jFKnHXlSdAQxYRIKTO0x072Hn5Qylrl4YFzLd+4CQ04hAq7ugiHLh0pF7LVlWlVHQtRATVHAHnPDkiAlFRsZwphuClb0/MMCOfaQDmJ0BkAByNBYzd/IqIfGisqmoCyGgasFR1JpolS07DwUypAUWlo79Q57+oWrB8fQjw0wBgkoXRpxzQ+frl0sIymbe0WrbiHpTNrMxUJXsTUUBM/mcJVXXSTL2I3sW9Iyyyq5pNzdBnEX2tGmOk7n9ShwxSAFMNMWBg9wkIRR5PJpMJFbt4bNtERBUHCaWess6BrjwrMQCTZ0rrVmAV8g4hUk1Mc84uDloPBv3lYoAMgAaRYxW4DtHMmmlLBrGuU851iKZaygc1DsxYNkO9V52ZDQYD7ZAdNij/FQAioxEWdTBRIFJEclt2V50lqpkd65banB2ojEDMWZIk8TPkgR8R2ZN42bvvyOD1RR52FlKoQISaBQvMxEzUpX+sEM0pt0kkBaTA7Gx4EbE6pJTrWJkouMFUBsuqzG3ObgiKSHOzi01ugJBaQ9QAsH358vGlQzccv3Z1bQuuomh466EvzmnCwBDi2mRDdStmk3gF7UJVjds8fPeHPv7wXY895eanLdf3P+VFNzfbW//6qX/4p//3dzecOD4zC9vr9oYf/OZ/fv+/jScvvPex269+8bFnPO0lPp2641Ofu/jgpeUrp8+cX/vxn/jZA/uPHjx49fW3Hnj2c583isNub7jzNZ2kqgYAhSyM0czMoQbMqkqKHh0cMd6nWI9cHT0PGbCHTxoCGKoqd3My7ebIZcTc1aiFEJn7vpWIGREYECRnsEgRCANxWUh5x6YW4xNSe9cla00V7JIZ6e++Vw9e8/kszetCRSrcwxC4qE8IAWzzeGFrsLJ1cfv+26d//jdzZ+8fwySfPZ8IdWEPSj4ZZS600zxneZMDWDYGZIb/9syHvufWC3/7yNIPfeTazZaJCF3lAgxVjYo4lLaTKsSACAJb62sbm6tXnTjy/T/2fa9+9TcMBoOVKxuBIwQaT1OsQ5smwzCcTCZxMKyHg+3xeDQYmqVS0yCygWMXDJSIyUmynRp8CTVqBMQxqJZS3J8ztxjPKSOit2I796sUPN2IHjqvdyDoh5q8cyMAwNt0j5beKogKJPm8CShkH3QUNC6RayqwSIsGSBRjLSIiKYQqSzs3Nzx/buXcuXMhhCQZlL1cN7P0nPXwyTmVbMiEwY+fyx8iGgVeWJzzE2U9BcbbNgRG17TvFlWEDOyIHDVTEacvIiKVJhWLokgWz6ymamo9+6IETyDXX1LNqm51795xpYjt5xZUmOrU75UBkGPgbpvQN8peZYZyoc1yzqaKRBWzUZFkMrPQPXLZtGkaF6Yog1/fWID7+ILPcPsyDTkgYuiYwX2VHQgjV40U8A6WuZJyoFiN2mlpTFUVuibMD5An1/5wlGNB5DMuVTEinwwX0a/AERAQxY21waiofpWlFHQgTwdbWqeK5dAtQjIBx4ihy3sSIpGVGQIiYkSH9pCb7BhoIN+aQwgF/iMikH2aBEmye7UCQBJxXJUj4Nq29Q/oN96lcFwOhXBnxyOm4KKsTGSAav6R+lEwmBXEuKt0ERKR5AwupeJzZSIwdQpHDKFpGmdxSM5kwI677kB5YhlEAQkJVSHllFQilcRGyHVdV1VlZsRIMbQpbU8n5O1LYDQIhoQEjIJQgRkgE2NEkyymCsZMjIRAYt7KKhEgkpogBNg1BenPki8CRCQgMWAMgZmbppEso6oOIZhpO5kSQRVjNQwOQsEYfPIxTS1yCCFgq6zIACJmxACYAQSBARQhg0IkQBVpgi+0B6SqplDxjKqub29N8vZkMhnLeH64F2NYnyw341RJtWe+SimNp7a+ubKRP/7gfdPnPf/IV7xs69KlC8PzNz/z+c/VNP83f/Qn199w7Ve/rr3/c/e95Ycu3X7b+48ePqSTmZtuvnZhe9+f/cYfnb947sabDnzPDx1qxxvN+JrV1ZXl1dUL5z87Wb945oHD15+YgUOHEOpBtdDnp8AzORkCq+Zs2kapQzTkDFAFZgyOFQgBVXYJxnVp2Le5pipmvkNlZkMgMRFx9Ywe3O9EPlXrlem8iDcTiKXoR8cCeHMmKpZKNejSA93sOqfUl9TYNXlmlqYpIu8eIfrPcCh5oj+o3XHVEINlzEkMtaoqNmiaBpivbF+59Ce/u/DxT4zSZCs2IbV2+nOrd/373M1Pnrx/z4G08szN9Y8MB/PEY4VIcrDe/u0vfeAp+8Y//tGr3nXPQTEBVlLLOQsIBcYOUoCIdYSZYdxYWd1cW73hxmve9N/e+OVf8UX7l/Ysr463tyd1XWdFpgDIKU1DCG3T+GfMOccYp23DSJaz82eclMjM7qWNzuwMzFRK/2lqzXAwGAAhgKfVoqbo00SvknNH+eqWSsiGfadkvSoTMnLwZoI7K3u/vFkFCQMFaZO0CVkRIISgJp7tuBf3VZMdhlnxFEpNDoGY2U1hQwgqZGaA8P73fejSpQsHDi2kVhHRee0whPy0rZn/cbUEZCRDEp9voonPH9GqujaDwA5w1pQzonAYECKo7FSTCAiQQJm95jbfeZEBE2UquKJg3D8FTGRAqpnM3O+u64xZLVnBHzAAojpssNwd6+zDzUwNOQYTsEAsZMXYEL1U1Y7e4h5WQaatN09uU1NCm/ibRuuWr4IAiEqUswYkss78oYrOXzK1GKvUtGaWkgxiQF/kOM6oH6QgqFFKCQODgUFBNolBISlXoZTVZUomFQdEalVcmy2EIIwAECkGQA2Yc5Zc+rM+PYdYAULyqUk3KJDOuhhEo98Ss8BshGRWNKtEzNTndYhYU5Bp620BmTFAMlPGULoDRVVCBCTPSUWbDSGXsb/59YlAmhMDDAYDh4a1OYkBItezs2iqKft6xndgNVQMaAKxqia59fxtWS2LYfZN2yBWPtZmDIjGzKgGndoAGUQgC8HEa2EoWGV0mgOZqLrSfV0BAIKGECIU5dFi/2cGWKmqH5KKA3DozzcRBpe86XTdUK3CEOqZNmQRoVxUBn3rw2rAqGyZDJlxKhUwMJmYd1fEQVUZ2Rc8ZlaY4pIRg9f+/v0QQoxR2qTlq8VQc8UYaoisbc7TFqoQKVgWGlRg6v1fDANXuzTJqW3DcKiIhuL7alWBrGRGgCISmKVtfXfeIpCSQwNiXQGT5lxTNYjVJNT1mCxPEXlhtGTDgmNhsoN7ji/uefrW5suff+vStF3d2Fwhok9++o7xhfb6G66FaviX//f/3Xvvk2ZGCz/9ppfefc99mxcGew9UgeYo0oH9h06cPHjfXY89/Ohnplv3zi/M4pCOL/G1t6rBlGG6vX5ubXMf6dO26qrPQDPzBwfxOoU5g8iMczyc2jaSgDJAQCSzpsaENDNNLRFlSISBqTJrIDMACeTAmJNGqqTNxhpjSJIjESFlUQsUYkxNZgUDYUQIVDo2LN4v2qQyLuJOk847D0QAdBKUgGFwCX5gjNqR3bvWAYgCDE0NLRuq1vVQtVE1jkwK6ivnGETV3HA654HRBGwwRAwVNNC0W8AhzNWA4cLf/VX84IdlMAUxbIwjrD3+4Ik2rDz7uZP9J/eevvKsAB+2JDoE3nrJsSv/+wsf3kr0sr+78bblWY5IObBhS9OKw56ZeUDcbqfMCJaHodpe3zx34cyxY4d/+Id+5Ju+5Ztn5+uV5e1T51cJmWLIAAqiORNRHSsxNSZJGR0nyBFC0XBgAhFJOScVKo2tP4YJjRwzKyIBIwEjYk5TIuIQNBfIMSCJWWO53xrs9C3MioE6YLlZLhsuKlBQBmxFiP4/tv483LbtqgtFf6213seYcxW7OvuUOTmpE07gJCEhBSQkGAyFFAooUUQFLvDgGr0q3/X54IqgiHoFr6gooj7EJ09BEQllqIKEBEhCKpKQOqcu9j5nF2uvteaco/fW2v2j9T7m3Fz39xHO2WfvucYco49W/grOOSdiMHkD2WAYGjsDAKkaUykRl1xyomGotRKJbwoRWehGAYOwsLC5JYWRV3ZaA3S8OX3Lr/66SBFVMw03eFPUl6yw8PyOvWwEOIkLta0CCVXHvBYp5JITuSaDT2rTxokoiUNjA8JtSewW0nhEkoexg16Te1UXkYDXA3CYOQdIIsrGREwsKu1tDhf3LIGQDiVUuDslFmGde2sis0oMMq+k3tmWOSjIwpIYTuQQ4rRFyu3yL4Wd4H3h6u6BXEzCknmz2ZSupaxTIaJhGJy8TmW1WsX8PaVFhIPJmu4E+kaZmTPYHHNx57NzZ9OggHfpFgBhw5fHwd2iBg9dJSGJfcOQBiEJIREmbjbRtXGX551x9Khx4Ig67UdYVa2YSJPKRF+ltKsN4H7bLjc4GM+DMrg5RDizmNk0TdjZDc9lJvqyZLPZxCcEi1rVWKQxzYhAnodcphpD6YODA1utzGwYhlprYpn6+N2qIqeT9SqzlFJKOeXcTZo7cbmN06dNy6OBsgleSSeTtGDXtoMeu393D4W57eiirWLbeiTqjLZiAouDdrYmDih8QApqejxEhTuchREgA7CwcM7SG6BQMI7cT6Hm1xpfJBbtM8gZDRAfyzmNLLXW9Xo9y5iTg1LiEc6k7lUrT2DmTsJCEyGfCmeeaiEiApl6lNWcMplPOiEWB0xqmiXVUmqXJS+bKeaiSp6Ix8UC/cAQN/ZLDDZKXZP4uVsG1c1mZVYWe4f7X/j6L12OuHr9+Bu++ZvKZvPIpx746Ic++PD9j156/KmHP3by3vc98dKXfq5zec/73v7qV3/h3/mnr/7oQz+0PLj9+FiI7PRGRV0AWC4vn5xevfTIF50ePwk+M5+3c7du9g8eW+6XWy4+LfHh9c3ehduetUjngclwtdZ9Ei6Wa1UWrsaoPOyl05NVSsllI5YIqU4rFjP1lAfVul6fGqfMgTuCmpZSBCCGz3xKcw81PuZWDbtzR2JyjEP6qi/eCMwOPFHL7vDv58zBJEGrkyS1rr1WDVSqMIXSELWzF8O9lRaxvDm9WsfloR5UZnFfLg4/+hu/vv8rP0d7q+vr4/1JMRyeVl9ef+z08ccuft4Lbtxx8b9/UH5x/xBJUK9/90sf/asvfviX7z/7prc978lTZJbk4uyQRAWmlGRg5tVqtVmd1rI5LvV5n/Gcb/2Kb/6ar/mqO+66eP366cOPXkuSJQ8hfxWthse7lmlIudZKIl1kLXZSzCybmAkPuc3du1IVO6nqVBu2ObEQBaiSyahONZQbiMihQh72ZfNodDtdCMQW4GwMNkL0cfHepfB8VC2lcHPYbeseZyIWaowjC00FIiql+FRSSika8SCIb0Mfu3s1A4OcIbBCh+cWH/7QJz/20Q8sFsvNujqHqAGnlKZXPUlHIn+4X0VEsgMcdApnQlOUvHbtqL3mDf7HJE3goe7AeJk5EZMZNUp6LwSZmBrBnHcwtvG/zYO1o/8jkILA6O5DQCkFWiFtNhw/a4uEdMBjno45i3V6AMSJFWpqTCQdtRUmNjNYLm5xgNPUTEDRxtVaPfX69OZcZaYOj8GpldpMMHLuIHYCYKrcxBnZXRtyakcWnN2dQMRGsCah6iCKaYCIGDWhRFerwYjLyX3H4rdU7ziyeUJFfZmBDskGg7tZYUzanck8VMMUwpwkjmnRmjh5G/G03arBKzVtz0Bjlq7/MrnCbkKD94fSXqRYAKxWKyKqpjJyrSYiaZB6uoFWIh/HsZgeHx+nlKpbIolNTGYxUDTK5jrk0d1FsoJK2fgOl4yIoCBhLTWgFk1GNnBh3eaTFETNAZqCU0KNGxe7qDkORoDz1p20Y9YDJ5i4aaE7BZaNm4qCc0dUVlNVW7Bw5OO42kB7wNM4lFJoC3c3dwez1YYSmre5RM0xKa6qujlTWNlG+BZzJEnjIKDqYcLBVurmdJXGYb6TeSFm1gARqsGIk4CTMriTVBnCZomDTS9hsmRmXo2I1EppWrvZLFywOM8kequbWpiyqpvhlosXmEN+0Y9PNovF4s7Fcxbp7Otf+2WifHy8Oj596skrn7rtlnve8pZf+h+/9ZsXbz3z1re+5ZZ79u951vMOF5u7bue4yOP1kZa8WiH52dvvemjv4m8eX3nJfNIuf/RrVk8cafrER8v9L7nvS/LiqQc+/Vi2e87t3Xr2wuGZswvVnIQqjqfCJ+unJC9JHanUyUeROtiQM0o1VHguRYbFAFV4NSELBwJvJEs3iwaXQyC4cwfMXfqNtaqI9VuEC++FGlOIQsRlC9pj3QmIzRUgL0Zyq9MGAEuCeuyVW9B0JGoiuO7O5HUP5zZnS6H1crW0wXl88PfeNfzov78x3S+yHE+WRNfs2lPr5eEtvKA3/9xbfvwnfubBJx7df9p1Ts8fy794w0c/5/br3/t7z/hn77/TGUkoEUEtDcPaleHnz59V2zz86EOlTM9//vNf9+rP+6I//obP+pzPPn9+cXRUHn3iqogwSSw9yKuAHeSEqE7IPDATkMYdUDgcxdXMkMPJu0mNzvVKHPhQATSNabMW0MBEzEGcr6WICIxI2Nx2o1Db68EbwqfddP4j7YIHDTzm1eYEaIdDUsfioIPPKYACwavRABI3ek9Iy7e3gMi8khkaJDvv7eX3v+fD165fvuXcWVVNnMwlMDbllUfpXQfsXOuagiBDIgSHu1ZnKpvp0YcfiY1SfDzMGVxszruNWepVJ68BgI3hPIRJGv/c4BB2opbKmhB9h0b3gG9WnYmTOIQAzIfWLPDSc9ihHTZB/Ml2tnf2O2jQOYY08FMK5EL85UD8+uw86FBrA+thGDILM9eO2o22KdJJMY1AltOgql4157zZbKZpoh1De+5aNgZvd4GMep9khJD2px0a3LaCAJMk2DRfbWwsAsIdl7FLO55tQOYCEEAsqOJqwSQQIoKapKTozhBwMkM4WUf+CKpoN3KIYSw1nAj66Nu9VmZmadhj9LKmHXViEQmX+DZUEBmZvarWalklLSI3sGMYBiOsVquzwyCdN+Z9iGdmLGRVwyJisVgw10HSkId4drNTBzOjVmkg8ObGIi1ruqqSOxNnSW3gbzAyuLeBmFME0/b+OzW2iTWeLm3XSeg3mTgJmMQgfXoRR5tmHgsxMeZmF30TP0Q8DVBD6uVXAOl5S6UAQMKz6E9wsbIMQuxVp1Amc1c3CvHmnNggKdVaBRRS4ZUbdH876mDebDabWhZ5GIbBu0SaqjKnWnX2h3CPZiDoE80GQ2NVFVFTISKSmWgUnWqhcRhhE6AscKfluH92cfbktLicFp0+9cDjzInTam/vwl3nXrY+5j/79d/+p9/4DZefePgdv/3O7//u737iicvPfs5nnL1lfcfTT579vL3bb5c7nr6+5Va96+kHe+PjOj376t575ptz/eqHL9ymj33yM06OLtz/6bdP9Zlnbr908Vn/+dGrT/uDjz3D0+LOpz9zf3n72bO3nTncv+XwdrP1Zl0Sn81naFOuEaSUTZJlnLTlko9uPJVSSrIPh8agYN50zBS+/p7G/dQ2O+1TFkKzUIWniMtMc9kd8c40nhdx2AAAsWk2eN3UlHhYLEspBCYydw0DxkjVcTZqWzYlmkqVg8KaqW4O967d/+Dxj/3Y2csfob2ln55Q0RX7mQG5XP10Gd784T/4bRxsnnZPOjn6gtue+JHXfbQ6f83Pf9ZvP3rAOU3TmomsSW75wLzYH69euVR188Vf8vpv/MZvvO++zzzY21PFtRvHDz54mVLOaQDAwlFxduvrfmxiF1aqJ5bg/DIoTMsMZD4bYzdnNmvgZwsFghjgmQUTl4EyKW97zv/JL9pRGghA+Fzi9JvPROQBoup/R0QiwWVh9y2Si5ljiRzPXUM4ZSdrgCHcXW2o5R8isVqJzS2L6KXHr/3cf//ZnEAQ86ptMUhKWj/nxvKf3wVgCKFUNaCaiMOckRcZ4CeffJLD3piIKAhFrf7aTXWt6d9ldkHYm1uGiDDNrUW7YCePdgvY0Zbq5CWjprmUA0IobbHmZq6Gxv7oALdSDQ1CEcmihbMklluJwGHS6h3T1K+GmbmuN6rmTCmlarrZbDSlYRhI20o/Cg3M3YzGmD4BoCRGqG5uvsgJbh6UfQaZK2JWnAHEZIXaLDcSHhG1pmrb5ahW2yCohfAIo/HQvCqhOUOICAlnCnRGq9rmBOy9QZwn7e0UmjGzajNQCuY4ESli5486heMeQk7MS/W+n5/JEjmLMkJVgHv2dXhYiQEoptGobWohojQODdddqogYfJqmlHMtJWzXPLGITNOUUipWYymrU+ksZE8pbaZ1kjzVUrTGFCjaRLf2f+Gr1Q8RYXZTBiSRgKpv5y0UwCcmMyObvTIRWgvqWw9EauC5LTCK0HIzcaOdOTszDyax0tapBHYDM42kNuR5lDhuJkkYgTeBAGHekimFSXvAsOPXvOmX5sUWL6qzMIc3m7upWTUIO0mtJu57e3vBpIqRvqrmnEOgA0x5GJroP3w1bRIop5wkTYhRc/u5ou6BlnSPUQSIzCwlcSd3jiGKmbElYl4s9opspulkHEZTOT0u45hrnVjWw0I4y4IWBmcaa82r1WZdUxZcvlqFpuXZC1/7F/7M67/kNf/j137j4SeuPu+5n/Xwg4+8/z2//863Pnp68tQLXnjruDjV8uhdz/7Y8z7z9jngvvhl955cX+DZl27QL9flZz/20Y/94ad//75NuvvOjz7rM6lsPvdn/v3jL3xRPnuBlsPLnvn8u/fOndk/c+uQeTrxBS1OVwRe8eCqBK/rCQvaL+sNBgWTNGJfp3qzaMxVCAxq4wRsXy4KZdieCfoCuEGt58NpcO40m6aewQy4CBMgkqZ1IRNAjIkSOWyqGqqo86tNXT8H4E1ZTws6yOeuPPbI9K9+VD7+npoP5PjoCCaZzzrduLF66yr/t8OLj5xbHJ5Z3nK1fsuLH/4bL/rE2x49941vfcHJWp2dtO6FhQOZubL7/rC4evXqhYvnv/fvfc8bvui1AI6O1o9ePiIimO7vH5apksOZaq3DmN3dp3gBGq7TmhenGFOgXuAeHSYcBmftNwcMbhAe6tRE1M41SsJEBh+WQ7wgBBMiZq61VitDHucmVeeM670A7wsjAHCdZ11CPGOeI4hJW0JRhIOGEDOTxvdsIcXNlGBwVI1ecVbqIIrdVQJt1P1wf/Gud3z0Xe/6vcViod5ou8TExNO9N/zA0u8e1lrAo4gkYncn47g5VphFVHuiVSOJcxUCPpF9Gdieh4LKLAKBmqlZpym3dVLDCUYuoq690fa27atFTGsv/XZeGwNc0+LuIDQfPG3HeEyZI3PVOutqEcisOpSYQlw9aRvyN0mg3te7mQmRgJ0pNcI8amMGNdKtu8/OX0SUJFNqAdfMcs5pyEH4KK7ecKhEPXvFq8ddcSSmtBAOnBn6DwKBBZPWGV89P9f4V23z3T6fFMgOHW37Ye45SXJKnpxbSRL3vVjznGBsv46IxG6zeiietFs/aY2pl0Z3BjPzaeq8MeeGI+1HPGgaBq+hXKEap812NEAMGjo469UmpbS/GFbTxswmnVTbGxk6oKqahxTo6MVicXqyWt04Gsdxb2hSl/NIJL504/KCnd3CW9Q9Fg7cpXTDCjAqniRSmudl38ISMfO6FtpheJOTA5HD0NmB7b8q3J2EVZVBOedB0gRSVSaSHLhxazJq5k4esTyqhvkEBkuSrfvWwGYuRPsz0VlV7ZfV+H+cmU2J0NbcoGiIU2ORpUjica9ySIQ2pWiNBB91j5utVqthGIpp4A9EhCSHdgRi2BA+1vBqlZmiwWBmd4qJDpRzWuzvH+pUCLy/vzw+PnZgrbRMF4hPzKZBzoCq08mSzxpNizwQOyBF6ZEnjiTtvf7L31jX15ntS7/sdSl9/cn66o2jyn5G1R/8+INXbjzw/HufOyfgi+eeWff5GXedvPxFWCzO7g+4/6FH/t1/+trLjxWzU6T3ftU3fIWnD57q+xN+/d1v/4qTa/dcvGdz8bbbP+PezzLg8smlg/1z63UVcpZS65R4qZZIJ/JQ/PHMQo5AsDOzkUfWiW5Henx3d+2K4AHHFZZ5DOYzk5BAZpwlgALUN8HUJcdhWCyGzenGzJbLcVMKC4mEqYPPFWSUxQl0nOwWOlglu/r23zr+qZ+2T77/3CIdHV/xxMtN3pumj0L/Uzn/a+fuOKh1H+vbbhz9s9d/8DV3PvWP3vesf/Tup533RFAR9VqhVKFIGJaLZUrXn3rqzrtu+9f/5kefe++zHn38SZHs7ikP6nSwWM7yauxIKWsJOfooE3pn1i844D9zuxZ1CZkLcYT6WitqQ3KE7LzVZooKgKy96YXJY3ZI4CTUSEtbnzRjYm/cgzk+YOcZxT/Pw3/fbY5DFZF5/lvsBETC2woFhtqBBDyzU4PQJ0YkzHB2UUrxgr/ll38DrovxzDRV6aBOIqqvuoE17X3kLEY6UXMCsbE5nIQZClR192ExRkcayBl3AygYwNRQymiQY0J0pq232MluZaq5/+hoW2hevMWb22oIiq2c9JviIQHXUlbIkmwbvPnz1+t10EDmnieiJKbKk1JqNLzkfZUivZyMzJSXCwBQs1KdKUvHoFojz3tHtxNRNWUnJdda3Qlg67J/TuzkCDWUPuDgcATtVzzHjljcUh9L2mx9xWkvpcm1urG7mjaKJ5OAotsLPpyZWSmekk6tY5vb6FhSKbNQ+EBxoKmZ2eCu1tzCADcYqYCki+fFOx77mGBlBMh+kOZIGsc0D8m0VY5wRwDU3Y1gVa2qEXsjJpK755yr1RqGJ4qqxasiJQjnnKGWc4qH4O5WagUZIXYP1W2gtNls1ut1SikfLgA0LhsRBfLQrIG+O8KlmIVnn5gRSwreJjU4FZLM6yGaf3HweLufaxSZ3G0upNHBoz4M0peqLjCoqgHh2EhEMVefuqB/lI3tjAEig7eLd0dU6sTERhbiDERUSmlu85K82yIFpDnn7IQozMGUlQFYci1VDBBhSeouQwY45GGZuWpB6EgRdCrTag3g8PBwkQd3j7zrTLCwVQEQPGwy99ADt27PLJIACEXI5egC1a0UE8mlTAKXVB2aBzaI2rSankpVkyyVT8kT+QGIUoapbE5ORQ6Ma0qAD6vVyiitTk+Op4cHOUAdF+Oi2BXh4d7Pvjstnnt6uqXirO3EkzOyOV09unbkqzPnbn/t5/2Vhx77ry942lccT5949OgHzox33XXbbfnwupffW+CWd73zU29/23t+Vn/lOc95xtPvfdbq+LHDs3L+9nx6I91y/umQDaGaljQOIVKZ9g6CJ1Oj8AkWTCPQd7Ho3upxTq0MUvWqqQO25/K3HSo04s02MZCb68B5ss04pFvvPrfZ6NH14zFlM6MkDTWNBiNC+ARbPciLpzYn1//lT0zv/80zl69OUo5Wq4F85Vf3DL9zuviRdO7xcxeehrrneM75o3/z+R8S9q9+yws/fHTbuX25fmMiLqQ+LkYA5jUPMqZ8dHT9zPlzP/rv/tWznv2sxx9/Mg1ZiAPvqFaOVxqaaynlaZqmMo0pl6lEFJauujxPblHbW8YsbUxLLEK1ToSo3uDurISYz6euIdjFRGNHG74XaJpIsFIjI+ZxcMW8QqbAkKqq/FHuWWg5VLjWhhdJXTfUqRHMfE4tHfI2Z4E5WzPICAFbMaPZb6OlRocgDUt88hMPvvnNPz+OrUE3ixJF3L2+6iS/93DwJe1JZtqsVlZLrcqcEidjUObqdvbsYci1EYcNA9sOPXre40bmE4t87NL36AonI1GIiJVK0QaYGZOE6WBAWlob3XwKQwbVCV2uqd+HVgXA6vYdFImiYx7yt9i4ZdNVjY1JUjh3Nt6uSlmTqiEZZLvTjhZqbhScKYa0EdnnAqQ5P4PVyRH3qZNurVECs1AsCBHgDYAaqtnQMgdi3wCm6jaCydoUi8JMlLxqTTJIzMEABMZKzcziVMFcZ089lng4McBMKcFs0hrJILEwxYYS1iT3EOyaENMQbgB6Vl4LAAEAAElEQVRjSWJwm8oMNohWkhi1ViahDlyc85WaHV8/muOLmVFI/AQl1iURQxIzHx4eFtPT03XQimIWV9drVF0sFicnJ5SEGJzTOI7Hx8cHBwf7hwethuguHO2nU1NCqRansmF0k2QRKZOJMKXQ8Xa3kMcgi7WwpHlBy3AAQxrm6T0RJWJKJMyUBrLtyfPguxGC5igipdZNLSmlsFuICX/Eo4GaeIJX3dR1QBnBTIxYNJmZe9Ts6n1n3Pye424Hvsw9BI9SSlWrJWb3jFZgCYcSrbkDTFaNiDglMs85T1U3dSMgJhqHodZqqqUUSzQMwyIvtYufJ+IyTTI0NCIRjePoqjaVlNIM+zQiCGchB9hMoaCNWWFK00aAlFKqdTNwkpRLWZt7tcnZGDYMvNFBZC3jXqKahM1MK41JHfvjmFUdajIyD4tst1U9WR2xndzgtJwfAfEZoY0XoXy0kAMfFsd69WUv/7P3Hb3x4q362GOPfN/3PPSnvvJzf+w//djBefrMV350efbDn/dlT/ui/XPXLp27fuWDF277lY++6zN+9hceWx7Ia177hhtPPpiX041jWuxfODxzjpnHvLh4thLR2bPnJYtpQUfJtvkKgWJFF4teYcQWHwTqBtjNTxZzV1Fdo9SGqZl1aXk/PTq9484L683xv/6X//zCxdu+8iu+6vrRWmis1nTZpJPCmy611TOy9/G3/Wz6tf+8GOjI82K9Rl17qVnzb9KdP3x2WQud26CS/dnP+vh3vujT77507lt/83lPrnPmcZoKZ3Uge6qAJyIahPn09NQJP/Svfviepz/n8lNHy72zqmpTZWYYEmdFCe61TpXAOWdwEkrQqZTihBTdvzW2rvfkp27mcHehBEqao4dmYmIP+BY8iA8SwAMREWiTx2ZOBiNiGDKLweId7H7t3MNyow+UKVxe2ja6bTabhrRx+MVFKeMeqBTqWQQ92c6tWtU6SBJwqVGMwXNqM7AIj1ahZAQ2OI37B/SRP/zE5ctP3H7ruXW5QTwywogPk5b6yhvjj992sl7lnG89d+tTx1OplTgb+KRqMV9Jdve9vb248qA1cih/9nqCuk91gxBtAeChFUEknJJQobiN6AvsGGDCWUhmTN+2vtSGH54RWgDcjMNngWiWLubZu6Kzok2jU6hwN0ZI4ZORQJJXNTKipIiI4dGDEHkQlYwRvb6A2UwTuYFSqqWG9mDLlAZnArt5ZVDuOKCiBlOJx29q5tOkHgS0lGZEUmTcaZpYHGB02aO4Blav7CIyTKGdRNVMXU3NyCDMnYja4GNR0wQOkMndN6GYKonZqxsRNrW0iSVETd3cyI1QyR2eJIkx1OC6Wa3DM7GfPHf3nAeFN+h4UTI3Rxpys/TotndKcEDN9s7sExpbyUOpIGUFLSSvi5o7C2lgMOBOZlOt7ptaxuVCFgODwrZsdXqq5jlnrWbqYBvH0UqddBOVV845Saqm5kbC7uHQbYG645xKKdOmhBtE5JUIgVY1oO8SJy7g70ThY1WmiSjwaMLxxrpDQNa2yLwDxxK4DEPUesMwVLf1ej26LxYLMoezEUikukM4MROjmJpa2RQRSWkwQgjPgs2tkoOsMbadyYSsamIh4ViAxbVoqcTAZC6sAgFRTg5iIkOUqaGxzkRt8JRzamL901qiJIEMOZ+WExVrTR3HDJ+zLKJYzizS9QeYmRBrL5DBNOy2hIiSc87iTE4ws8HAnEyIPInDzBIv3eqAxESOZGUaknhNzEYiHt20OHDgZpREoO5spW70mJKktHA3x3ATBsdXClB2wz7EteqARV3dyAu5dHnaO7j1//oXP7q3TM94zhu+46//tfe+Uy7ees7p5Nwt689+Zb5wKz/8wNGFZ7zvL7zpZWcXX3j30z7zwQce4TO/9qH3bB64//ELh2cfvP+hV73q81bJplIvX3r4rjvvXgxLZEESciJnNhihuqa8Z7apOtVaAXIldxpSXvlp2awPl2eqk0FD6iCxZCymsrJciYRIoFZrdUrPfsHeW37mV37x5/77k1ev/fa7fuerv/qrk4wmlMhrBVGefDXysKapbmyf0plbD97x+++T//LmWxbLYztaT9d0TQe0ulHzr05nfuZw7wb4riUn3/zAyz7whqdf+mcffPY/fN+zq5mIH0/HLLF3JU7sbGYuMhjo2pXr/+QHv//zX/FZD1++QeTr0xNBQyoFrHYDYSXqDq3qbiHWn5KEYVQfEXHHqbGwqrlB2piaat0ERNWCRNNa4d6iNH+kqE0i3ooCDImQXWCcmCEMFLfw9RUmZRSvBBKR6gVaAQnaGJiMYOZUNaTx3L3reiaAYvIVAgPYyb4ADJ7zQMzBxiZzuJOquGud8mJZXUNIWZycpWJVNofvf88HErGqJ9mbFExmUGPW5678Ql383tnkXDfl8kMPnix4ECal2y8sXuHT1etXDhSLQ3vFM+8w4cQDgXWCEBcpVNDRukCsMIThIBazACeRSNYyiYGCgE60mjbR4psqm4DbTCv0i+YclBw9fhgRITGMVFUgJgj3V85NOKE6iKgSWHJMW2GViAkGuNUY2gmnBFCKBrSiaXBTQzA54DFSQPccdCDk6IjI1Tpfs802RUShs+AGdfFSIenwWzCFGKX3UXGn6rfNRIA4AjGA+WFHbYaOkIprizUJxQJYtcOqGVb7CKLDa3eaM6iVWqKENAZpIJlDjsBcjR2p4XhUCSJMGgKkbn2UyuYewGRhh2upMVkK6cox52kq1JSrm7R3zjkmFewIcHjOmZlKUbLEzMMwgPx0sw7P4MRSSYloXC6JqDZYj4EboE5E6qbG3Yg6LnXm5WaziTQcLfBUpvjuYmYE7koxMxJt3mFw7jqjTcrcoW6dtpRSCvyCSCulI3kbc51KDOpTSh4tJ1PU+4Fpiv8Un1xLcSYioQ4mjyp1yClA3WaotZIweeOPxdzG0Fjg7MzCQ5bVtDHTnHNKGQDMRcjYTd3MjLbae3Hnfa6LezPt7kSBl/ZmLOG1TBNzMjNXK6YxKxuGoWym2AWMqTlJeFCKHaYaqGDufgDqRuZt008MIkaPWuiSC0IAq0qAJohIRDalUKdv0gyNCUFTZs5tAqmqgAF1kGym88ofc5dCjRDCnFxryCznxbgpZbU6uXq13v3MW3/uLf+FyYXH9bEen6iqD8OwPtmsNpcXZ/Vg+ax1vfzsZ7/Q9EV3ve7E0uaJRx/7vFd+wa233r6ZahqHp566ypzSclyv16hVRDRlFs6SslrlE7ZMvGA396p+ymSb6fRwf1mh02mVnMBkrsMw2OTFTykTMZe1w4rQdNed54nwt/637/jIxz/0H/6/P/m1f/qNf/97vt9ZTvXK0m51RlkkwZonOabpwsG5fNay8pt/+X2/+73f+Q37ZaWq5cbZqy6Zp6PxJy7c/fOeD6fFfn3ivqcvf+C+39lP5Rt/7aW/8PgtDAfVGGMJESAwVICRBubE/NhD9/+d7/mur/v6r3r0iWvMOe6wdHmvPhaKhj8SpSvaoDhEcrCDC2md65BDoxHzvjwGyyRofjjufXPMbkZz5ovfaw9aWKgtQZ3a39KG+nGggap5VhwTtKDUzoy2XelyHD3o+ATuaCP0xcKcdMPwIFq96DXQGOCB3Jxn1eGACHePP+Duw97w1FPXf//3fz8icDWL+YEZFDq94ggV6T2HUeVvMmVw0U229BnT0df4Vd6biqTl0z/z6c9/pho2gCeVqhWkxpmIuhVxe7+I0Vt58uLUSDHBLxXKRTWEGaKWGtN2cU59oxHBNrAjAVYIrBcjHPnYglEWCa4RAWLMqXJzyZJyokZnr2bT4APnnIJosfPCK+BEqKrmgWoN17+myBJo10hj/efFzPN/joOPvDlvc+dml6RpOupMECQJr9ZIl0CMvmOUbck5ApyqqhsJs3As/MyMvFtzO4g8CKC9VOxLJnd3ZSNwyNSBmQlsVdnhWYSaqR7UJQkYSubrgn74BOTmprW6kaQ4V6qaGrKfVLVORVX3F0uf5dDC3MJNSzVgHMcmPzkVI4x7WUOcjAO/3uZ4e8tlqXWQNGllkAJmVoCcc2RZCNdal8slmaeUpmlS1WEYQmVoWq03tQzDEATueBZBLECnr7Ft1VwxD6ncZgACt0WPoZtUN/x5Jws5AR2rb33qFaPsSWt0EEQNHNtOf5IYijlR10eCwqnU+Fh3I5HgAFiALeEVJiIQklB0q+osZA44haJLeKkBodPrnYdGFI/Um6B3FA1mZK5ezEwSE0PdMosDktirMWNMIxGJe51K7DWk72I2tWQWzikG6TWOpVvs9YOPiJ7sjb1Z7jo0flsrM6mGlnKcDo7q0MzGcZzfx7ixgQAPZUF0e66QnapqCZZZkPP8xjXQba+xiIeUB5iWUobFMrNgHOC2Wo+rzWR6UusVGLnR/uHBRk/z3t549jaS4caqcJJ1JbcN5+JTeu5zPrOUUosi8VTswvlbiWg9nQ6SrOoiL546upYXI6svOBVyYR7TwNhnyovlhToM7n7jaEqynHRyOqYB5LluKqwOzJtTlqRnz9CFs/vXn5Sf/emf/tF/+wNf+6e+/ru+6ztf+bmf9+V/4ov+0jf+uU8/dJpkv+qNjErICh/q3t65/MDjT/7iD//mb7/tp9/9gU+86dy+XNjjx9a2OeZp/YilH7Ez71jzbboB7C++8Nrfeenv/sGVg296+8s+eXkz5LUkEqapbNg59lARMZfLfduUhz/9yf/ju77jW7/lGx574lra3/epPR1F0wyO/fcgw5yfiEiYU89YbW8WU9BqtoOEaiQU83mfqk3YeUcOSCsx+87e0XfQMwH5nX+zRzxjSKCr0Wl+7uTkKW1xmvG/IW5l4wgiGXLalhQxDPemPB9GZyDiBs6Vjsps6F0GGdC+V2ZmFrZ4Qx2T1nEc3/++D37wgx9c7u+5uymGIQVSBI76imP5wD5OOdRbmIFqQxqguOvkyA+GTdGTlHD3ndOw2JtsLw2TKIHcMVDqfraY71KMl4VIA+cUYgdOGg+ilNW0KSUiq5dSOKckY5ZUSnE16/VUwMJzbvyOCF+qGuOIoGm1HxoLcsRzacQk6k7ADnIQOwLEIMTR7/Fc7GAHmigEWHO2oaD0CXeZ/nnBQCklr+pmVTpBpf8iCklMm0/StoyiaB/bHiJ+p9bq3gx3+7lpPRALrVarRDyOo4Sav3QspRoT+U6/SyQG4x1/YnRIFxFxcN7n88RMAjeHGrOk4FYRhRo2Sk0hjNx3W1FzCFPwieMThLsjT+cmaXc8DEndMHNGs9ByZo54WkyrWwB2WKG1hd0kUmst0xS7RkoyN22mBqLQHd3bXzBzuMcwUTUr0zRTjUeAuxoJd//jwLG7u3e64TyEADjKaiGOGrjJpITinU4h0qY6zY+4qiYZRTKzht0T0DRfZiO7KIwCJIW2VYdTVNbk3HEiHhku5CfFQ51SdcjSsALcbjX6zgbC0ivc1MvbyP8iW9ZwbMdLKdRtN/vQhYjIvdHP3JyIhmHgnGLSE8cpSmBVzXlMaahWAk9XpylgpdSlSNopDCHxJDD3qg2lqR2g1JdoCRTjVzCxBA+0CRXNNfhuoFytVimlMeW4qxbyokwB2BbeJuC5uY/oX81gJKC16jStnWmQwUHOKzOTtBjTXt1UI9us67A3Oq3VD7WeDOPgdEHLseRUyzIPfONkHaHc1HMeTCuZB9zXQap6uH+QUtqcro6ObzBNT1z5Tc5PXl7XaXPtxpX3jYeDyPlzd5+dihEfrDbLTakHB2NKvhycjDnD1T70oQ/92i986H3vec+dt9/xQz/wwx//1COv+bxXf/u3/YW//je/6yOfuHa4OHCCplP1A/JptU77d+UH3v6+t77pb77nY7//vozls55xeDDRVT2Va8NgJ3c8+0eu4N28d57W58b6fa/6g6981pM//unnfdc77uFTfel9z37k8avH168Ni7ypJpwcCLDIwd7+6uTGlacu/Z3v+f+86X/95stXrihJOZ1GNHmjOay5u8HD0IWI0J4mDI0OEKBCD1ADLEKy9mGMTiVekPayeAslnaZh4RdHfSuJnRGjmUHNOiSWHUSwYDWZbgOvGUiYydlNjUKqApTahTAzRw8Q/FLWhpqOrTAIBu+uQe5Va9A1u8Vk7FyZo3xphBHvbL2m8SjEhPe9+wOr1er82XOlFAQ4mpqzvb7qxvDmW6zvbjOMcqqal7y69647D//SN1+6cuXcmfOLZz4tnb2zmPO0GihbGmAOnaHakay6tKI1YSUgiJIWkEAAVE1AwzAQuZXGYmVmLU2EuDXQnboZjy7Ov4gk5mguw6E84gz5NuNIj0iNzTmTsoTTOES0NLNE5gKym/1GIme4moHExAhz+TMv9qjPx+IANdXozoduuXbn0MwJsj2zTiBrOk3c2sSo5eKAtVPee2jv7oeYQzkhduDV3auCCX3fFvfi//mj3YiEc+ZAirXBqVZ28qqVKADi8QmjpAonZnJrYF2EwQu7GiNuHcBNdyYiNTNPWqEWmWO2Q2Zmlm4PucNBauqYO/kb7uM4skhYypdSNmWSTuvmnbFVAIPnKULMvdkMRIEMmtlWdPNgc0Y5zx8VtyvRdtdTqS1+nCnTsK2md+cKDftnRDRwYmZt3wS1NsdTnS3WbYtA2T1s8/mhkJgmCsfFlNIsmWlVlTS8S+Ojkrd5rJl5lkRcm6bVFkyLNhfieM3mag9tjdGyonAqWkSSEwUVMiYHZrbIQy1V4aVozpkTpZRYUnUrWq1tzdkpPFKQUorIBfeYxruauc2McHeHugWnv0t6AQgwTDzBLpjaljvoZfjpVERkGIa9cVFMa60yDJtaynqLgxv69p0dYEpmdVOUaMyhXx7tETuG5SibzUZSMptSGph5s1bOzlB1JU9uLkOtm5QHmWoVdq01c4iOKblllqJGKalwcUtOuplqrUhSnW+9+Pp/+A++8/O+/Hkg+/jH33n80CcXS79242j/IC3G82f27j09OXji0Sc+9bE/PL5+XIkvXX70icefOr5Rjo9Pnvf8e4a8+K7v+t7zt138qf/231/44s/46Kcu7R9eqLUQ5QrZW0td4vxF+tjbPvjOb/+rlx9+14WL5+8a77q6Pvu4+Onh5bNHTz60Ofvmp9/90TWdvfHgF3/mub/9ot86n06/5a33vvWJ57md4lzeT7ckPyqbSs5JcnikGmFc5GuXLsngP/Qv/vHXfu2XP/HYkXkK+3erdbbK4UCJUvi99yAfUbuJgrURbgyT1Q1wIsosja1TG/+Q+iyKeyfTszuzA+S6s5WbizOK3Qq8icv206UU66HW4bRimiN/ONhN1anRsZMkEZlqsapaS+ZWsovDQmKBiYQJQgRJTgJ2RPvRjm6f1riDkrD5bmcVsFkn22zsne9892KxMDhxatWzMNzt7rXdNeV3ngW3tXrlYW9/nHTvrpPVcPTwcOdth+cOj689dch7jKxCvDe6aZ2KE1Ji76rwAZxo1xZg2664Z2bkzXJtoBZhVEtKaRzHcbn0Xvj2uNE2dACCAa+qXr12b15mngvr6Cy3uWZbKnHPvgRAHbUaEQ/LvXjTU3B7dqOwuw/DYFUj6Guf4KIjpVNKTtsAx8zVLXggtEOBdWsQvvl4zWcoPk5VY5gZAL8woouc0dMwxc5ksVjEdw19DyYwSEDg4N2agrr6timcb9aD5Lk2kWiYGWqu5iDKQhTLcg/RPHOExw4RtFadHZbMKxzm1FU+0J2IgkAmoNjSId6z3verKgurqhDHBjHa3yDYRAoBU4TOUkqpVRtG31pp3BAfvehRjb/YnB5ynqmrIqJmtVYZ8mzzMfdSTXS1790juNcwrg6MwI7AUIxcwKEo2cdQvP1eTJzTVhYjDig7OXMIccyjNpu5Ew1Z3Vi6QNxCkiETEYc+bQc3WSiNzXpJZtWawBkJE0i6BqyZVQaEUbfjuPiJzIkEtZRgaLRD1UD+8wsTbsakapG/vZkYUuRU1AoJQhbVUqFGSYZhwLwoyol7193XTpjvfpO8nndvasZzJm40hgjc0oX0cPMvEs6UTQxqm82mBc6UAAyz6F3/6VGFxAhRUgi8gsCqhdnVnMxlyGaeMkHLweFyvV5TzlwVOhitx3xovgE72/4wei0i2eA+8FBrdbecUkrJaxM7KqqBhuWUHNhbDGUznT175o1f+7++58HfIzq4+xlvHI7Pvu23f/3jH7r/oQc+Ni7tB//p657//Hte8JzP0nX51V9986133vmaz//csweHFy7eutw/uPTUpTvvvOPb/sq33X77HTfW9tFPniyXB1pPNlXyABRZL+rZxd6DD1x921/7GxcvfXB62ovetrZLciMP+x+aLCn/YT3/L6fhvX/w4J15fOMLnvy+l//up472vuk3X3PZn3a0frh6ecbF5z109crlxy/fcsstm83KzJx1sRg3m9XDjzz6yhff949+4B8874XPfujRp3JakDIUQiCR2D216RQRp6DUd6s17xucNj70NlMGS5BcAhQ+ixeGrQ9zTBZNMQf3dkTdPeTjdvzL5/DIHrY8Tg62dpojCs+LDHdnhldSt5R4rsKjeoC6u6c0OFVVbWJuqs6N4Nfjv0VCiuM6A9B8t5ZV02aH5Rref0zmYMfycHH/px7/wz/8yMHBgbq7MIwgEGGHTa88ApDefZZ68U0Y9pCB4WWc7zy59vg//SfVii2W+esPZXxuWTElVrYhixJxdbT5t6si9GKjBlILAmEkVAKDIU5wVWfPWeKlbhKbapKT9fmuM4nPGOGmD9NKfq2VGj5jG0Z0vhNMag3s0vuNfh6IhGEeSJc0ad3WC9EnwaNwMBA7YkMViGp2bFzdvWiTSul6p87Mcc52uxsDhXEyEOh2D5fE0KaIjnaQBKCYcpII/GZdHoWIeNaS5BAuzcK9loSqEsMQXsQOazVIcppb/rjsbTIWqLnXCrVZmDA+KrMk4lCFIeJq1aumnKq1eNruY59IxF0jILpbUGsY5+lltH2bzeb09HSxtyCgVgrpqxAlrqZeNWBfGmVsEgmDl2hqhZ0pzAPinkxaTWtKqZhKkk0tg6Rwl6q1hhuxSDNK876B3n1pg9tQS1FphR6j3a6oA9xjbQkysqhngOo6d+09cQJMiqZ93QKOcBzK5ovMHE+Tukm47pjH7RQH5K5xG0WS18qcRaiLdUAkuLcUPXQphYxDeol7g6iqjciEdnVzNQl3YlCSpmU2C5m5DSm7OweMpaq6Aa3ocXcaRacSdzunBGCz2SyGkQWbzWaz2eRxyDkjYT60ZmF8GeMpa+VOKAhtcTQkQy6lhHA6MzMnIpacdv3UdgMuiEk8MbO0A2ZqqhbMy7yzAw6RLwAQZvPJNkMeg2eZCJKYFOrqOlX1PO4ZQZ2Hxb6q5sSllMT7p6fHOedhWJpV1c2w0FKT1cqSxpzMqqpaIhYB3FQHSa6GIa2nyQlWysGwtz5avf51r15+Sn/mN//d/Q+8+72/8YcPP/zwfffd98f/xJfcedutd9393Dwutaav+vNf/+e/9eve+54PJamv+pyXXj+eqgtnsQk3rtpDD1/jRRr39siROdFYAFtvRMYb19b5rW/6Zrly9aEXvpYPS3n0eLp8tKjT4wv798fLt09nDl997yufuPTXnvm2P/PcK/+/j9/+Xb/73PPnbz1/297lp1x8+eCDDw/Teu9w/3h1PGYZF8N6vX74wU/eetv5v/s9f+ub/tJfLKpXr9wgEjJnITNTGFMbys3hxdV0Z6HTH/HctARMtpfs6CVnj0jjOLb/pHB46gF9JgKBiT1U27l3LvGjCZFrgWhPmQnmTiaM1CKpA2BjAMZwZ9uUgPgyc7y5TQPHEDMAMo/nG2XcxkrE9UQpopyZmatVY8fcJIX8bTjWI3yEAiFLFKX32UHe++73PvXk9XO3nClWHGSOISUDs1t91Y30kf10Y+kSzFsqQtem+jQ6foZd34ysn/jQeOaM3vY0OreUMemx2kY9mRFcCJMhpeBGegckcZveCZlGuDZ3Mq9eiCSlFJ29mcW2yIoZoTnAMhsh/DrDv1fLjqNLE/olItKp8CyYFS+7cPx/d7dQEoXHRJqAWidyA7VCPJ2cnIzjGLtVd7dmce29ZQn0VcMHEVOzmbTtzDMR11qNGpGUt+vYEFPjWiu4Fflx8uZoVUqJ7V0pRXJ4a2zHpA7rDTbXohZajyFTbBAiEAos2jkiCgJxZkmSdBYe7yLI7u7BqOqo+iRSVbXWlJInLlUpiCdMygZ2HpIXNWuTajTlGqiphqqteXMacAtnbqiBMENp4i+eOXOmWiWAhUhRStnUIiJpyHErmPthlrbeHqW55tVa0UfW83s7TZOIBOBZ4c1DmxCzWSKSnEJdJPWlaaSE8EykTnTWLi42l9vxJSINoztpMIcpVEcJ9mpAVU3ajdemxs5F63rahPdWFPXRpkfXTl3eyMyCkhugQSeN7tMIVY3cmXOoZEl3U5jTNgCrrm5QpJS4v+2u3hwXaVZ1aEkxiYiAwO6O2LQSUK1qGSQBxIxaFfBanRhuDSDTRM57hlsMo5m5+TAMiUJqo/SIWVvV1RtoNyMPH2GzYPjF0h1k7jnnMeXA7hULg3eSXjDN7ctOmRLB1+cFdxy8ePTzrxDA0S4Uk4YM56owJUk5hlQpDeaFE9U6qXpKTMJOLmlYSJ6m9bCYrCzW6+OUUkqH6+k0E4sM7CCQuqQ8AJi0QiglslIl5UmrLAadClW7TmUY8fi16yyHy+Xy3P7h1Ucf+lNf+CW2v3z2s+58+l3PXJ2wmqfh9OqRXXlquHjr8/7wgx/55bd8+N77nlVR1ROsmp/CMq348DxvprIpSC6OhPHk/N6tD3/qwcff97sPvfKLLj3tOY/+7I9dIr3jzmeenvqT0P+yxrm7zrx6eeNvveI37lie/NXffsFPfvoeFH3i0mNPXn5c3WnEAl7zYGXa31/Wsn7k0UcWi+Ev/sU3vulN3/6cZ9956cqNskHOS3YlVtU1j6PWXfumm+rI+GfbyiG0d4SdLGQSCTGxZADOlGie9FCnk4hI6OoERFw6G7OYxkm+GZPcVmzqIUw3HwImclOb3xaHkRODQOCU1MxDl5EQCv+EaF6EyFMSojzpZAYnCItpkGttRvMHHgNqcA8MvxslYgM0CnrhNgNgiuRRin3gAx+IDYu6E5MIu1uwE6aXXx/ffoGZ0SQbIWW6fmKvomvPTzc2JPn8wXo42Hva0/af8cJsi82iMNOe8bRSGOnIuqk5Z2IK7mUIkQtRdYQ2gTO38YAZkVdtyDczy5JSGlt7A4K56nZL2NaCaKt8AiLqmmo1nWuxefhBDfNFRCyylRgzDSE/YTTVRQanUtR9mk9MnKFQdApJdOMQoA5Z65BNYXAwtz2F7Ab5gpOqggBpQTwEQadah8XoahLzgcCgMjFIqy4WC5BYqYMkFHOYEHfqWZNXk74KVVWH22rNzDJkcweRKJEjXAUjUDqTmrHCO/OjOQcAZpbMKYmLu5MLc2JSY+YB8LQVYwM8boknDIHH7jSkmOJmSIVF+hFirS2VxT0spQSoNXRq2lOsdRiGBAGwHEYzs00pphwIgrBDpiYIiakmZoBYkiUodz2QBvYTJ458Fs8+wL3KTfA97h8cAnINQy5hZmcAVFVbi9wHrc6BO6Bovikg39L7tsxcEaUGei73bnXs3Ms1uDvEweCmt0AkoHh8KaVQdqTAUQoHgpoZAiqSBnIyRzFmqma22QhRMS9qPIOP2MWhasLhdGeTTtyGVgRQ3UwcaiRVo+Ns4ngFzBy1GDPMXAzCyZncHGQAUWY2IwiBaRQm9qlGhRSyvZxEawm5FVHPOe8NY6lV4cSoaKOUmC7mJBUO00rKzEI0iEyu1Fxw2N0VRuIgTxbHRkW4rUJiDweJOVrxqanldOQLpRBTgLvTvCEEAE/EEK5uq2mTaXAyMJjhwbBnUneqDdA5DI37mFmoltLWZmd8cLOkqrUeiQiyOaCxeGK4TUBIMLHFO0WUkayaECPzSGaTD2kgt1L0hfe9+E/+xNfcdvHgkw9cu379+nJ/Ly8GAjOWQwYybCovetFzj46OPv7Rj128ePHw8HA9bdarzdXVk0tw3ZyT5TItls6ndSqjDCd6+pEH3/yhFz3rBp3ZtyPc9tzNIx9dUrVxWNKeJ/+TFz75/c/8+COnyze8+bM+cXzofkLMDqqGnAdymPpiGPIiP/HIw4nqV/2JL/7Lb/p/3XvfvVeOT+5//Ap7IqJJ1ywMJkoLM2WzLNm7A0cMsdQsSRosubtBQ/1KodFmVa+SRFxCuRMUGgFAE1CTvpoRdhBksRyZJNojYo4glnMuWt2MXWZ5RSIHmU0WaOSUmKi32g6P+VbMgTpkFe6V4URCIqAA2LeqkU2ngiwTkbuJpCHMFDyZuEJVy6lODZkhCWCS5FpZm+6vdjv2NAyraWWGPA5alVmYsTrB+z/8QR6pAMILkO0txtPT48w4vbjS56zyD50pqgd+sOaVEbOXl7z0ea+7/sj+x69MB4fY+JCujy98+XjXxY3XBSevXt1lLxu8qEJ8Y5M4C3KmBLKeQQFmrb5arRbDyIlDRRgh0FiaBbuzC2SQtEZpQwVusoWYKgE8ZDicyVkcIHMRzo7K1cxSEgGmaTLXREmtROYibkU3gSKIOXgyuJOIgDnt7y+pE0Vqrc1veaeIA5oFCoM638ZhxoD2tS6D1EIn2t0UQGJhR611kbOZF602ZIC8mrsLOAAsTXtT4KGd6+TWUKmqjaQfl0HdViFASWWt0Vsvl0s1o05Hnq/fzL1P7X3mPhEBThayULG+wMwyat/aEbB7dNocEDEwGGSt9myWwLGYYWSW6EXS7GPfF0Lx03NKFIb23heozC3Y90njPHuwyXQqIsI5xX9NofDqzrGrYGrzfvdg8kkSM2OzLdzAgifdWDHc9DY6SC8nd28+SyGg786GlIRBaUcY1kwR9to7X2eekgnIzRiQYevFNOQs/d0exzFaJSJaLBZhrtWeaW+ja8gA3dxVQIiJA80ff0zhktg6fywGMGrttWfmlNJyuYxj7H0s1h8uiqnrFvgdJlHWBPU8GgIiDmsUnYoztwEyswDuFhD9IY9x9jabjdYa5A1zbXAPMyJar9dTscViYZMi1uTVVFWbdpcZxam+qYsKcyxqjL/AwQbSvmO1zNsimeC1BkQgwG7zrYuJFKTB40MvbJ4HqKrCWv6O3bwb+sq81MmJF4sFd2Vs6l46Zu3BRRM223xZrbtfYbcdHBZDKYUkEdF6tTke1kfXbyz2z128eJGZvQeTKE8lJQBnzp5d7i+vXLny0CMPp2E0s/OHtw97gKTptLIcL5aHlAuXxQcf/I8/+VN/+/2fOrj7njuWizuf8cLnj5IefOyxpz/r8MZTl//hax7/uuc9/tOfvu1//51n3Fg3b0m1mlJKBHglyHIxXLt27fjGk6/7gs//G3/jf3v5Kz57vaqPX77OJEs5BGoMIbSWyLUSsnXuXVXYzIxBALlaMRchyRk926l6LdPW8I9a+Y7tMAPoIz4AZrBSDcYSXSkBrYwnIk5BoXCJVS/grgxKi9HMap1qbe9+bDRUS39eNz0UuIVWYOhyBAYoxqSqlsyIjIlYpHi0HCAiSiICDrJcqT45pca2QF/0EBHnlAxlM5nbkBcJDFPXstzf//BHPvXAAw/s7e3FVYzDUrUOw2JTi7ymAsjvvLiP4fp4vL8h57J3261f+pLPfNb7r69yobKZFmcO8sH5FzxnOSwDtgZphb/BEzNJju+l0yaA3SH0VMldjYecOYWbrVFz/hEi460LUfsWriEa3/gaQTMDplA9Y3KvETkjhOVuegEgBPEiosDahgidyk9uBEpEOUs7WdGZ7OaJtjVQi6LJ+6o/jk51m4nYIpKC9haTT2zNtyNOwZyAWkrKmXPSBtVBEmkDTYspTUiWNOdLs5iLNoCDTiWWprMhgYU64zSZcEqJk6C2bV9M0cmciQxeSgloUnSKrVYIlhshXKL7qxIaNgBgMeFEYxKEinec42ayFqfcibj9c9CT0lbDpE0fZuaou5urOLuhlKnlEhEAAeSZG9l4BO6exsHMpk4rGiR5cwak2RVr/kHuLs0VsXEQIQwBdT2Q9mT7qx5ArajygpYfZ7e6Z3PpsTSuPLnDEYPWmeLiHVo1L7RmbJfZdirL3fx1HnGPqWE6dod1ADxo52Fc4Q3T70wSUs2dHR6SwgIytxn+MCcJACVO5jiQeRh4iEiOBrqD+yItxeoiclPok8TtjNFvsDxiEoJ+qe6eIEhzYeqBlfNaQ918BnDlnM11mqaEmS4fVhNRDga3sqIleDSdMkYeF+7ezk1tCHYHQnG+32QBxw1ErDl2a5di6gTqgHBqRBQHYr9ibURm5hSqS22m2rI1iVeFIN4jFjGy2s0qdCohVM7ShAxlB0Eyn2QAtdo2qBE5ZFwclM20nso4jgFjiT+pqiyknIYkxTSPi9vuuHPv4HC9Xu/v74tV3hvV+daDXNb8q7/6X376J9/yVV/zElx4x0f/IK3reOXx9127/OgLXvHqr/lzX/7vf+LH7xyv/8LXPXpHevJvvO05P/XJO6vFC43EFJIoDOQk0zTd/+n777nn7r/73d/3p//cGyXzk9eP1utpkEVOo1Vn6dqW7oLGtkAIXZhZDHZna9QuaywVYLKmkRnk0y32Pk4UUbTA21es2WoBlAjaKWQNQryFMTLIHcwelmVmxixFpyCnxKFUVSeeXXOwQwBpaSbsqztau6GycwY5XMkbmtrMYgsii9TiYiOfzM+3Asjc+HyhAE8AM5EpG0CmWpihCs786U8/cOPajQsXb3WnYlpKyQEgYN289LI8uExPLJDSUKsNe6PQtJo2//HH8vHV9S0j8iC2LhefPT7zuVprnSoRiTQLcIalJC0r78TSYIjQkIgojwvLVkqx0KgjMg0ZRPJYDHiL/eHyF70/Cc8AZy7WUptIcFl0KpNO0K2ZRBc0I2bO1HSF2itcVUs1t1orrFGezKxlX+3K6QJyp6iUzUxA4U4fFUGt1al7RITLLNrMkzsUs50qNYOzyOnJyZ7IIGmCmnsiFuZaaxjgzHnR1bpwTxy4xIzMjcQSMWtOUTnlCHPNrEoYANQBZA43WGfmASmGkDZXkcxR7AvIpaEBpe9OvG+p3d3Q6gxnYpOUEiWRzn+Ppah104xtLuQtwjAy+pxThzSatP54tpCaXw/vKGLsdBLzpHez2RTmJn9IIbW2E9oc7l47/aa66YyWYvbmi9wSj/fme67gWgPYx5mxzp9FRokabyqSq3Rvy/kLzr7ffjObPBI/gWYqMxHVWpMQGVXcjOcKEp7DyMKLkCIBx1kK1K4I4OqmqpQSYunLlDnPCbhtmDopK3OOwxMrc+9Wm+iTGzMLgGmcAWZItyK3HQ5bjXahVW0Bim8ovLj4WoqYzFVF/CBXL2UjucFgox9VDjBaomanpKrq1d1L/EV1i7FEZpGUEGUzEakStlceEq0kNPs47QZZ6Yo3/W1qjx/MiVgZ0R7NvHYgfCR5wQut2zvZKtrwuoBRF+CstZJ2FH2nG8xlWfyaN+gcuyiiakR59GkdEnj9tfH1ej0MQx7yptTWtdR67ty5k5MTEdnL51a22jsYjo78Lb/0X37+l/7t//gfv/XH/+QXT48fXbqGC2eR6t7Zs2m6evlTjx5+1fPr97/oD45s75vf9cd/7WPXgcIipRa4MFniPA4jOZ586tI0nX7T//Ln3/RXvu1pd9197eh00soiOY0gKFVDhQmoTw7mewiIcHWLe7J9oRyUJAiBBI4g64HA36lOdl/A+TT+kfs2s87cEZ1DvCZBNAr7c2ZWN3ZwYvabdtIcDrXuVm17Wojmy0jEpm1Z1mI1MTFTKezwxA42M3Ils8CoujujP2VC5HvBVmdqhl4CULO0GCMCGExECtwTPv6xTwS9FnCRXKuKSJmmcVxc+5yn8jvPeeI1lGsC0bWc7ltfvbdc2RzUqnlJWKEMz/sMvvWCbhpE2RA8Pg8ZjRYzZ7jP/PJOhZmRmfqgN8TtRYaogAeWqIHiiUA4orYTiNrjsL5uMzOY10gx8YG8HbI6wbx6WG7U7WSOOicq4n4UN97qwi7S3aJtChqYV1chDjUkq2psiUVEduEec/IzOFuXYybE7zjcCEFmFfdBxJ1rFHLCVtvayj0EObYzz5RSW7IQ5ZxjshesxwDWqnvgbNfrtcCYOVymiaiVGF2OgDsWunUh3eYh7oa7k7n2cx+P0AhwJw20tkUrZ4oUM9LAJTE7QvU8CqTGR8LNvWzwoefXLG51oO/adKt57ICI5pK19ZGlIenRaT/VTR3cRxLb0Gl9JNiaTmbmIEMXrVFHS6cDzv+7yENcuVn3c40K2pUAJ1gvw82NzIlb5MU8MGAO/+PWy3bNJmbO45DR5cmjBClh+mQy5GaT7G6w+XSGweU82GSQkXvTPe+xmijE4tOODdFc9ARKAEymDdsySJpT0VwJRaSYydkeQgGuDjeH19i8BruD4k7CKPiA3EHUvbClAJznDouLB8rMARpfLBahIwpgSCMzN0UX9kBszafCe/dctIafabtmV4U7MHKOwELxdsZ03RvsLt6R+Vd8/bgh8eExRKaAhQjPIWM+rk2zPjWX8jjnbR7DJCLmTkwBbEGQVUyhFpDyOdOjl+CANXs+GICUm5R65C106zOAMouApvWKufFodCrjOB7u7a/X6+Pp2v7+/pnD4Tu+97tPrt24+2lnvvlbX/2Sz/7iH/2hXzw9mS7m5Up1unrl+tWH/9wtv/Ktn3PpN688+y/+tzOWT9x1GBLYoSaUmdJiWJSyunz5ic952Uu+87v+5mte+/Kjo/Wjl66KiDslYFws1+u1C1zYNDRdotAkuIfosfWXfs5q7ACBrK3eAaiDzE21eh2Wi7n6t51hw9wVzCk28IPcQ1ngg2zHcSj+aItmMbKqlXMC4HWm33vQTmNm5h1PMP9coPU82nMqzQ1A8IbBIINR20tNExEh3SQ/4EzSy1Dv298/EujcXYjULWC2DzzwEDMnlvWmAJ5ZSlEwl4XoC072/8OzBpMbw7TkVKwsyuqPrS8tUjmdeCle/HTId5x50cuGw2zd+Mu1gsh3KhgiMqKAysYBgzBvJnbUqYAp5wxzs5paxtkW2XNGQ1U0oBy4tkRA7jGQn2eIFnp87g4nEnUDhTKQ9B/tiKU4mq5o+xlV53aXiJKBUrBgrTESY7YuEAbF21jcTNsCzHsy28038cZGlFezYIOUAH+PWdUklgdutZQ0DjSkTDyvnAMfKxB319hINsSWJ7THHFcs3a9QUoqaRUt1bpzT6LGi5WoZXd26bE1Uc1HROHeXkAaabVkkJkvYGZk2OVbaPqF2AjlMNoHIxDvEqjmoKZB6/hCOwsYpyUhtWVi6jmicIJ9pQkTcVwMkje6pbrWG0PfNqx0JVr/ND4WZA15fa2XZEm/mX7Yjrz3fhvji3iwQtiUz2J223W3E0Li92tcNs8QENYiyWidmzH+RG1pdQIjx1sxYIyI4uqwfejR3IoK5MIdLGBESMXXGze6rHnqcRF5mZSv3UgqMg6UTICNVrX08ED83ZCZD48bc1KoaHDLPTna/OJjYmgxkvEbZJYq8mClJpwCMkluAa4JXrVwwVTWtbXLf5gSRRLGDoY1JIACwhCRHSJG7R0WGZqZjpqREfzSOpK5qm1KK/tgdzs1rnSCJEubhU59ymwVtn0SEd/YpMXziJFG5CxppMhplzBK7O/MPACw0SJ4rACISoaJGTMwSAp8AyHy52FPVRBZiZLQj45CHtHdwIBv59V/+2bf+xk/dd+/rz9+yOHNw93/9D5/+uZ977y3Li7Way+rOg8W/ef2TLzy/+sFPv/zdeMPTn/fQxz72kdX6eD1RSmkcl0y8tziz3tw4XV3/h//wu7/+L32dG1++dOKQxeG+lVpXG8fgk41pnEydEpGbmxPIHL59ja1sV+PzQYp0KCIsVGtlODNL928NlEZUndQhmXMBvc3E3MbU3Nil0Xb3rZbHNqy9R4lQUX2HSNIuxsxbkdCCs8R73Q/JHOh8e/0KRU3sFeQY4ICYQGNGqvHX3KzGHrCZAXV6ISXJXQOgDU0DuGAuwgLmlFdXTx9++OG9vT0ickT7mKCaUjp66eNgLN55UEHJ0+B6lOQrN+tXbE6v5+lcusALKaXU2+/ml7xgr+CY4L2P2i1fIh14oM1BjdhZa3C0oVrVjTTAPClxsUJM3HNwQ0G5NSRsVENNSICJKOeh1lrKhnuuCckRY3dY2yrP3ppu0e45duaCsdZPzUOhwd/MTIkigYNgXuOqkqQgg83F1xwvrA+duP8nAI7mhwx3chjCIzYWGwkcKkXeBTe2nSIRRdvkRA5Pqck5pdy0nVv+83aP5rObc2aiVZloZw0Tbjnc5LMZQOkgnVjLhgujV4/MZISYoaWUovdtPy6UqXd0AVt2EYa7mpE3+dZIJNJXBd3Ftse1Zhpm88Wj22JHJvOti2Qbc8U5hjAxibRso6rCzGkr7DyPs+YHlFnCEKKWWidj5iFnNEovsZADQm0pWFW5q6850/zVY3RG1iIOEe1iRuZec46zoe2MPvmULouRbo4vQIO2tUTVd6UAGpEmhKKCPFctPMWCsR7s6BaHwLEfCQhVA1oHqiB8y/s7UGuNvDpNU8QFWSyYmYqGJQkAyaloNH/Mwrn3ygIyayMQ7FScJJyJmEhn3FxDyvTVace1BuRpU0sOtzi0AE1EQhxDXgDaKf/zBKg9UGooBO5qcZq3ZG53dUCtwszXNfym5uzr7gEs8KoppUB18ZCZuSmQq1Wd0jgwKFRU3T3ya0gNci+BI1IToG6BtAg64/zoc07aN+u7RQAR6VSFExGnFidARAznjotsKIudppCZq9t6PQ3DUGvJQzqzv/+BD3/k+7/zR55x7/vOHdx4+FOPveD5L3/s8q//zI+/o7rIgZ5sHv/6z73n777kQ8cTfc3PP/ODK9nbe+dycXDPM+7mNJyc3njyicen9WZvTy499eCNo2v//F/8k2/6pj/z0ENX1lM5OLylFoUagw4PD09PTzdFAZYhM2fyOlfYkQZBZBpmBu0mbKe4wqlrH86mrnEYat/0A1D1cBcDEzTel4xWyhQ3GDnNg4S2fowck4gEWs1gVqPPDkJBrW1biy78IsxOqJuyGxwie2vfv4TWRLyGZF5rVXNXY/YOemEQc2IKE5RazTWQvRB2IHOOj5rFGqPJiVeIma0qzEl4HPNDDz3y4IMP5pxVy5ByqDhE3emvuiKXxsPHDq+NcnaNkzS9cEOv06uboSaX62njaX/YDPufed/T73nWaW0kPWnK2g5YCH4FGCosooNmAHD4rESIMFPVEq9Y7UpK3d83pLIYCoucAGdAKcKSi3CtG3LMCgHbCiDoYaHJHbEWBEfDbjIBDHMnZxECWa+54wil7SQzCm3rhUz0mtYUkeY/07T4OxoY/UUtEffCtdhdQ+gypXixTYiIJQu7J05edfIKJslJRMLMlZgINE2TJJYkCNUttIKxLeeIUkrTNDmae6uAgts6aZs8R9U/x33MQhYOMxvyEJ8cd8vMNEyZchP/66aPTcNLQk5FtXaNXwI5e91USsJNLjVEngOlNUehjj/HTWAoYprLmkQs8/CwWYZ4tD61m+ySIzfKXXjVs/cxdCCVWixwTHViIhFJIl7dqk7uYTFGREkSM1Pf3NRa1TQOIvf9DYAwTATmyS8FNDjquxB5mPPE3Ap4X5LFkwqPoN1Ji7sz4GmO1/3TOQTuiBQsEpNzn9Fx7ilQV8wECMi2H7hd96IvFJhZe2EuXbMzrpm670FEw+oWGzsNy1VYU1Lp7fiMkggtmhCeJHUrmmIAENN59yQEEgLDbZ6+lFLMLC9GnQqlQHMrMw0pq6qup5xz9OVxQ7R7bM9ZtmdfCT1nT1F4NwyXuwuJM9d1uWmlCoCplCJtcaxODJC6ulZyUJJsbGabzSae+1xiEhFAItxm+H0HaWYOD9yfwkMjNuRplTz3Z4rt8Dm2HrnWOsjQZLhAGrI5HcJpVZlZoU3OOrGBBCIimzItl0sRfvMv/NIv/NxPy8Vf/uo/+7q6vvbLv/helFe/7lC/5dvl6uMnD1+7cduhf+0L3vfWJ2759l8/e7Sy5fL65ctXl8uzz33+fUPew5N89jmHDz/06RtHV7/oy177Td/wv7zyFa944P4j0GJc7K82axERTcOQNrWkcdhM65xFyMinJAPA6tXNQmQ0yOnuIG367egSbyKi8FKn+HbDMKCrc5iqiAzDYO6qk6qDda50VWed9uQxgNOoR33eAc+1b8T3MACOyK5qnAQQq5O7B70jSre8QwBpgIwolPvApl1/qSIyDoMrlI0E1W2qJRyyRSRoc0ScjJiEmRt4ofk3s3XxanY4M5LUWjPgTMXUYUvGk9evnqxO9/b23N1MtVp8kpnWl13ff8+tfnjh/HVdy7GP/IYb6zuWj08TJ9/Pg8hUbiyWh5/7mtPieZp0ECISFu+TAweqaZ3KkPK8QTcLAC1BUPs0DkDsTKc6ASTEfdjQ9KtTSqQFXdIAsbkzd1jKKf6ZdvhB6Pu4fhg6IpWoaiEwhW6YOzvYCYCkJF0ZiYgSw92QOPriQJexyIBJMwty2yYS0MTlRYpbWO5kxN8CDSHR03g9AIKnb2ZKIGEGiSK+GFg1hNOYwSDHYhjLelOx3l/uAYTqtRZmjqwWLoGzWps5lsOImPo4peXo5gLeE5mmSdVkyJykTiUugDzkTlraGMz78ETiH3gxAvCqUynjOEaFEjPblJL23J9DaVndmTLEFxLKvW7mzJO1Hf5uckLDGQJMxLEb4HCpiyBbvfY8zcJMOc1vde7IuMiEUQ81RIC5wZk5BbEx1kVMiDEmEYCcMzp1x8lcdZbiiYpwGBpzupSyqzgRh+vmvl+YuZjCuz5qgPOZwERqiaj2zj7O4mq1yuMgWXoNxGaWupdieBiYWSQwc1N4NmAyESEzYa5Rh1ZVkDBbVXU1xyziQyTqxRiJ2NUUgYyw3PGD8XATE8PJTVJer9fMHExxBqWw+bLaU3gD+pFwdWNKZhoWRmbKzKGZhUSxlCUDyNm9lOLAyEx9ADA3tZvNJhEnp6qacq6qsdonFtlfaKnuOyAydG8GokS82WxoyOpVTUVkALQUY8K85nAytcX+XkAjt/mX2d0D/UsEckUrbin6k8qQnHlzkyROnRoKTLKY+d64cKailZkHYrc2ElF4MYWqEAtIAE0IEGmFK4NIxMHaFvzVqzaftLDhassGMhfCpIVzdjW1ukEdPYMXTnaQ0zjSP/jH//pFL7z7r3z7X7hsz3/skj3jOdefecv9X3zt/3zuRTs83LODgy/cPHEw4K0P7339zy+9LrA8XZ0YD3Vaby5durTanG4mtmJ33v7MH/zHf/lPfOVrV6e4dn2Vh9GIi9boQqpPXjURA0xprGYTGRG5VAEJiUvzKYusaKZOxEDgfufaJboF6TWNhsyoGnPToWDmcczxt8xdFkPcKOp9thvI3GYdFcKmFnePxjGzcOZSzN0ZVKsiQd0lRM8ocR8Xxv022KaWnLPkFJD9UkoSiZ0OutFhHKeoccM9bsbB1cjotVmysES8ARlSzuYttg2RO9xV44aEDStcjYXg5B6WusIaqjNV3cWzqWDhm/uOzvzgM2mqTGWzOPMiv3rfhUfr0ZLdVTKJrlcsL773js++b0yLDZ0WCyyiJmIQFVMwyZAXqAar1ZmTSI7+xN1LNSICuVV1d4EQ0cg5XnndQklSbDJTSsSuqtYhJlGs05AYxHAiKHl1J3MxTszupPCYybl76HBZCbg73EvoZofebynl9PQ05zwMg7s34SRvznEYUiaiUksKdkoUsyLkwV9FVXUCk7hbdROn6paUQa7U5yYNAcg5Z6ulmTQxEdjcq2k1zZKYGdb6JHf3BsElajPSJjrNAUtrntQU8+FoyKLDdHbuTjIRiMJhkKjBo0KVxNypc5pn9yTqP25uBVoX0ne0MeuYZ6eQUI1xAqx3eHPApc4raMu2na0Y9V+zMGn862az8Xk0bToXVsw8SJpzITqzTVUDCB0/y4hm9hd2VvJEBKLctMIZcaSqVY8Nljg3mfJ42bRTmEA2S0j2NOzu3rTIt1OQtvo269vrnZ+eWWAO8hkie5NMbm/043QlClcUn2sRavzqEBJqIBF2JlBkqVqrQEgQEt4CkpScqZrWUrEDcmFJSXLay/GvHkzxEGnlREQuflPCTsm2km2Y66F5Ugr0xQoRO7GwMHEUZ/3Jxg+KlfY8qoqVj3e4e91MQiwpzedBgvgLyiyr01M180rRTtVSN149LL1jPW8dZhKlw81UE+5VlHbi8ZwnWu+OmxY6c0UfBxJqOlDI2ELYGawUE2wAYx7iEcScYO4txAGHkZo5jIrVuQzt99ENDk7CKkxaZTGMm7LKwpn3Bl5DaTNdHcbDZR7/3vf+4HOff+df+LqvevLx46feffiP//bfrOVd/+GN+ZaT1TCV88P1TOoDHjxK58f6ZffmN38KStUsyTSsV4+urj2+GO+5fvLIZ7zwRf/tp3/szNnlY4/dUFBKydyYMOYUYzABQU3Joxli52JqapNO8wsYty7tYAJ0JkD0lYfsML5sdtaat8Vqc7YWEYGE5QscTggZQQXIYrTarJDix81D/uBtRnvAhmaAbaE67pDtBhIxLqJOLWMeJMX4dFNLzDXjNLh7KINoJ/LFlrDNihwuoUdr1v9AyEyycFjn2LYKYc4NPzsX9GWqtVqt1ayacxzpWRV5eukRsvM7zms5PZXFnaurX5OvjcdrU6m+ttGXEE18/mWv2rvl3FQLc8oha6Vaw+YzMt6moEdU72oQEg1cCgI9c2qRPObUjWzZIa5m1SyscVhEEnWJfm+PcnO6ymBhhrmn5stG1nIFzdC5bsvLuXvPMC2GBRHhGHA31VLK/NakGJeFMAd1L8ycUq0VTjGWc2/rBReGbZGrPeEjpNYwk8ollPfa+C4KqEDZxFIRjKBAzJ3WMAwOi53fPDmZA5+7GzDnVKcu9dujYVOL7mPkyJfeo2cs5wJOJSzzynPOWO7kDM7J3INWmzz0kiz6j/gWk1avLjlFd25oQxjdycRzhLWb14e2wyXlHcRME5PqpptzoE/E3j/H+wzKzChJI/QTqE1qez7ozyXaqXAQbUzwqPAa+NpUp1kJK74C9eCy3qwclnNGh0RVN3dLM520I9KYSFhiz6o7dNJEDGbfCfrzr/nYRR5rxU2UIzs+RfM/UGdfxEYnxu2qmoiFJDiWqmrtYpITp9SXati66jLzMAxt2NsVUuZo2FQ75lzblJwZoWm/84dbfKM2pkbTvaJCyszO2+pK4Tzn3dlRY6c/LlVJ4MHqdw9FwCFn6y+8NwFCQG1gmUxT7Nc7ZtDMMouR8U1ZrpmS9ju+61TTIXLwyKM+T1l2IAWB7VRVIoSfnBE5Q0sREXRQbs6ZLCT72R1MlJjbX2Ewe63tKFrkEm1a4urKgmKa8rher8dFFrjW4pSvH0+333G4t5Qf/Ec/+vx7n/Ht3/ZVH/vY6uve+KUffP/vpZGffZiepuXcheXe5jTDFWwkF5Z6rONXPvPKT3/iQpkq7Bg5j3k5LIbLV979hi/+6u//P38wjYsnnrzC+UwkPKC9jEIQyeSkvQSjJEI8EDnZVEqxMsMaVDVU2YWkF/AwbNfAIRxLFG3LPNLfUga258e8ulXTxJJiW0kIWlG8c7SzI/QuVOLExdy9RtAIqIGrNbiAN6zcHJOr2rxaMrONl8DnLxaLMMLz/osDFV+jS/P5VAR+nXPiDuE229GBcJm/Y/u57ARkTkZW3WLtnIgTcVlvqk7uydScGEhCSRLVV1zja8k+OYxMCf75duNZ5dJJnVajpimZKNXBLpy/7fVvqENWXbOShA8Lb+UrArWqM4Bx9nPTqFJ3EGo7W5L2KnX3Ww/FISCqndSZ1d6xPothyQ7T0gwzkjhIPNQu4FZ9p8tqryGTVoWDvJe9wJiHw71992YI2zgM7q5qgUZm5lqsmoavpRefHZBIOFg07t37sInM9GfQTwzvgEqAti1r3YBwEpnNOlpJKLEfbfaKcXRshz3Sbhy3Ozwnj1jPzLd4TnjtVejlJIRjOY2c6Y/gqgAiqNb52Xh0lszYaRQCHWdmZKaqdXaOCw3Irr+PDoid78Ac6LdvoPtsPtguPXxudzJQ/E0jGKFxD+LQlLbYJqK2RgLInFzmO0AdIufuyG1rSESUJWD7HEDl3hTOVU60+NXNahERpjYV6JDMnV+Nse8QNm3DqHb34kbtlA5zGwFA0PRM0myk1glIM5BtvgHUJgo78Sx0NhK7k1kDhcY9DQaz7Jh0zR+kcCsbxSweT/PVxgKPd7Zl6Ix29xgateKG+0a5hdcY9wAOnumStMPeRn+g1ZThslMeMXNmcXPr8PJaaxIRafB4SYmS+DTFx3IThOeoLqP7aojrrri5fTLRRPWvuhuF26vhcMJW/GH37xJxSKQJu7u5i7a9kjTFvnYD5+aglgoEtpcjIIAJQpmy755wtdhADQO0gjmVumZxrQaEvArddc/B5Qc2f/+Hf+ALvuTVX/knXnvtqdOf+q8/9sEPfPCWWy4O2e679fj69enW0YnpibUcY3l+wJ6c7CW/czjeK0v1ZbHJ6joP505OT9/017/z+7//+566oqcnmuWcg0RSrdP8HEWkGuXuC65wqDpzOI8tU9psNm3DJwwwgi1a1To9VGcsj4abtYc/Wxs/9Ez/R24ygES8sVrcnSUw5wIO0OwcyuZw1G44SIYE4/BcsU5t8kCHzTzd/qLlYZgj56zRVt1stY49znZdWmo1a5zjPsNjB3XPkt2xHFMDVYSUUKauaAv3GhevKWdhCTVYuItgsRiC1ODuwbEnGCDTy6/kd104nNKV5C+yJ/+4PrXarNbDuK9eBhKm1UqXL3vl/vOft16VxGJQjiYkJsxVg1ZBzpD+/qJHkv66zRcPIHTuAHDTyTDr1MRe6AT7dJuDw5GQg3XJSSQxNfpim8jGNBAeAP74fDMNzc4o+szM1EHkHYwZF5Z2ckOptVELzIyTuCFIXg3XxxTNF8+piztIzxE4TOvy4tt8Q83qXEA9cxMU2mertB3puOu2QtkWIH0DamYMNu+KTl1xu9cv1uO1oy9fnSlCCfc/Nrf/nIT6eWVGbD+ts0GY2YgibQOAMHf29xyqonltOn9mcSLnlDZfzByX54wSHwhgbuuiT53Tf+TCZhbWUYlGUZa2pTIAmh2f2OcEMMMrND7QiYg8mLU7Y17pdMz4WSGKJiJ5SK1LrrE/b8ew9QcQ96ZlV93M4NZu1/ywGiNrRqnAzZSoi4xqB/rz9iu3P0mBMoNHiRD3JMau/Zn2HE9GqG7S202NfABSM7ftPMDdKYmIOIx0SwjevdvL5ZJmHlr/X06NCjXXcHMwbW+1eSQeoqZIM4dL7JR3s163EaznPO5iq7E4dve6mTabTeEi4yAi681m5BFMbkgS1lisO7znqFl7DVHNbipDdyuM3SpwvjAnWHfa2ClDiYhCozR+P+ac/QDLnFTaqYtXUppon4V+WMSkXogwN8Vy7hPast4kGROxsjBzmaoSSPyOM+nf/buf/sff/z2nx4/d/8kP/73/93c89tClCcOFO8+N0Gkle4ejyBOnq7LcY+XslYWUISS8XC4vX36caS8d7umpnpSjTb3mZTo9xtHVaRylrmvFtFgsMrcmj0iAVEoxKITD3dLMqqmBZhtKI6gqOkTO3WXWggXCDC7etbgDu9omu88Cto0b8TtLyrVWrcWSSU4QdmaDp9puuHUZh/45LCIxkHN3K5WSaKjCNXFWoE8EEfY2fZI842qZ2Url0M4sbUcw5ExEtc9C4jdTg/vFa95+IeZbsTnKmZmJuVvU9PmK2jRNIY/YfjRhf39/GAYgBAMIsFKKpDK95Pr+//WM6zrtbcqX4niP6xFGcV1rljGL6snZs/d86VfQkNIxbPAmAO9ucIWLJCbyMJbtmr4WsKm5y+oyrn3G0848M2KjFxlBRAQUpnwxoqxuXC1itYiEnQa1OjMFUYBBwf+MImxXyonN4Rq4OMlMzE0wSrjGKoaZmZPkFL1wZonRUDxv6dwy5yZjNNfv8Q/SRTmYWECxvW0/vqpZG84Yw4Mc1heoMcYKjWLemdDGvw5IUVz0XEWRVDa1zNnxpvjST1wsC2PxDqYu9L8tJA0e5h3uTsJNgMO20Wc+7nOcJaIS717j4nsHIrJZI5ijDzHILLEgb9eN2wPRg+BuNJyj8CyMFb9ahb4FWnY9BN0yXNGpH/NzySzF1Hd6jjZyn9pNjKvst0KNtwCcuVoKnGrUlLGDbylTW9XZbk6oIhJA205rvmPo/Jl29VsqhyPSauBTrJ0E7LQIu+VLhBVwb4IjusyMQ2lnIHyE2id4KKoxBabZzMxDJzwRM3mwA+cH4e7Tar02FxEZMnc5z5QSgUAM2a6T44Fyn/XFRakZkc/Tkfl8zp8/DEMUanF35kbWokDvf2ze5kaHnVIqdVIzZtkF3G4DOoJ0Hpe0BQqgzxu8c8qre621rW/6Q4Q7i8zVtvtWGw8dt0/ebJu9d8xxkVGO55yZuNYahHXM92SWJu2nov1EYUpCauxEbEoVLl48sTtjHA/+3nf/6x/4Z3/r4q3lzLlbf+mX/+Pe3vLC+TtFN5XAlJOkdz1m+689uHZyfGaYbh1N8wYGy8sNhk+uDr/sq/7MmVsOfO/S44+986knjk5u8JNH77l+bbXcHzZ6XARUrJbNuFiQe4Uxi2ooaSvvoDQAuHmFW6k557hj2p0BOaCmf6QQmWd75rbDvgOA1Hfk0oSAWj0HzyxIsV+FVSU1Ek5JAsQ054z5cTOoTmWzWS/ykFNa9yLSAXKob9NneG5an6ygl0EApmkaJWEn4s2xNILA/L2qh5MhWibuXypawyiq5oAFIDu1LifgJiF83mp3cE4551IqYCJZ3WrV6QXHvrTFBy4srH65X30JX79UJk9MOB7ksGJcb3z/FS8+99KXUylC49oUQdSiKHipwhIzCTuTW0UX6vEGPFUzQ68Y3B3Y1qbqZOYEbkvAqhqAmD7KRkw4LE60MzNLajSNOZFbfKYzCGlHdc49Bd/EK8yUlZljP0Mi4XcXnUnyjt0NSuKYsvXxlLs7gRrMJDXKo0/sHGLRDZ8DdyBzb3m7pFX7nhog/pDZi60ep9DUide897jo0CFvo0vvHGByJxaK5aP1ZaG1hwwBwZzmw0QANxnIuJUtu3sD74YzDHof09Kowsxm17lWxXDUPhaWhVEKwFxrJQmSGeKrUdftq6WhSXfD5XwWd/Nxu6rURLexk54BSBfEaL9pzqB0cziev6C7wzGLN0Xeap+jFssFNQu7I6L4Wze9gbFS6nOt7dicHeaqqkLDnP7ZGxADQJUuMTHTSwgAwo6EqKk4M5E7YM4psMdhggDsZO7dYwMA5upO4K3BZRMB2FpvRg1UqQ15oh2f08/8Uaqa0lZ5xcxUG0E7iYTgdtwEr1pKWRwOm1qD6TRnNY4adqqp87wD39S27AbcrAYVP33+cMRwfpbmFgKTmwfYsgdfrNdrMl8uFierdXXLidaljOPIO77O/89iZb7Om6I/YERJkntzlmTvFeoOsiwSTHxBVaVizgHxRjT3tdX4sFKIKAy8rVQw58VYtbhDmystSBsuhZhD3E37xRARkgio+DRtypiXTATn82eX//VnfvGf/tu/+ZJXHWzWuPbo4bnzd+wd7B+cvf2hh+8f00I5IZ186nL9/dPnfsHtTzz8xCO3HdpyoKdOdTWlYVj88LvseV/w9HtffOv1cvW+V7yAaHV67cyrX/odeQ/Xj09YlAepG/NSggQSxEVAExNC8cZ6KQmKTqXJtoSyW85BYjStFGnYsUU5mIcXC88pfKfonENcL8IAIgYVWHgIkDmKuirU2AJyse0K5qcpcAHG1DDz8XoOw+BVtQfd3TMwZ/E51KQhZx5CAp8oXiJS99DMiQQc0cE6n0K6EnUmsVlUDgBRLaXt2vqPJYugjZRSQjKzGts6s3G5EEnr9TpncVcGO2zzyiOc8ua95xZsd5qaTfskxVVLvp59H1wWi6e/7gvywb6fTuoTu6BhaFoAUzfb0g6zu1r3CoMkZqE2uwqElhAZM9jhTLXfxiw5zrO7iggVjel1SyPsgYJmB2eVnJrgsyNck4kkksAMF3UKyVcnojyk1JdxkdfqVIKAF48mEUHNwtvcwkI2Hr82U2MDPBiowrGyNlerCmdOWR1E5uay9dXaUUkkYnPmpvagVs0sCYiZnNqyamdjNweF+df8B0jatF3dVC0WdU4IPxJW7x2ZKTkBKToVb15y8WYxowQniDnCq+SWSMpU53ozxr/BBLAdiYOGYwRSStXUzIRYCVariITG9aR1HMcmizgP2HtfFWPe3dj3R96Z+UVtYIeeaZjIyDisNPsvswbccKBqjT/vMzQ5QjDcBZAE8KQa02AWpK4kR0TzOjClFOwsMxNJOTajAVMPgf5+xXGh1hVN5+ufC4IGUSeCtexVHZDmgNT/HMJ7KlapRGRM84zLrOnwoX/ZeO4xkDBsv6dHrRJjusxmFpQD2pEMC7LsPOGI1X6cMGbOw7DZbOJljsJuOYyqJRqIYFhFTTOkDCY1U1P1tppxwKxBxnYfJfV+Hb2bmXRa5MHd19UjJDDYzIkQNM+Ukji0VFIfRLTPCclc5tY2tVC4RUHvaJJ779Ww4zJi/Uq0zye8Y6niM+cOOBrgnVlmr2gN3Cclc+4PTz2wNwY1QLMEjbfRSyNB9MubbMWyyOO4Or0hhqcuX333Ox+98sQTX/GnXnb/hz999ZE0jLD1eHL9+qZOA2XgpFo6PDy4euOJH/nDW287s3zWwfqTTz2xd7A4WFzcnJYbL37TJ3/25z71S7/4K7+8QjmVvP85r3rxd/zv33fLxWce3biRiJKe19UpxmZsambCDHhOyWql3pu2r00UFV4YrYPaVq5Bf4mmaRrHkYikfzGP2qg3LbvcJKhpH43OUTHuz+BUzQ0W43oDqlvRkmULL9p+TquHiAIe293PrGUKBxN30ibc1Gq1diVNvle1xedYEnF/NdA012jmH/M2c1e3hIZvkpQiVsxxvp2BeZMY0l3E1tnwA6eVqqrmnJMM8d6VugmhMH3Fjfzuw/2TTR3SP8fyd/3Cl9XVnayU9s4lN+J04dZzn/Vidl1Xl1SIxsRZde01jpYw04wligFfwHh3mpMAa851KqDWKicElKTdpRRKyXUbtOcXWVgSsWmFmpGKACSuRvDMogEtauyaCEfC7BPUt2hzSiRDSkRYDDnsjgCYaqqlhPGzEZxDnsxjx2FVHc3Q0LWCmEXq5GQ+DIMRQhvXqiLYxIQtGEGkmMbbKMNgpepUFymBoEVda+hytctzRBXGRETcAE3kWkM1Rtw9GcipmI55CK3z5ETqDK91QylZZnUlJrYuNyLsMfMUbuvPJBJtq5s4ORhd8iGuJM6KldqiW08wQuxq6Oi4SL2xaojuQbWyc6AZA/cfibbpsxNxEnCjphFT4hQ1NQMViJl583FyTFrNPOXMoFqrkfE4sPDpZhMlZ8QCIQI1bHl1kyQDtzCqtYmkg4hj92C+ZLYoqGsQuT3nHEsH6i2sWWkdktvJRlNKDmdmdSezJl7D5AQRoQovZRwGZ5qHmT3WCJFVt5QYTqpKKZsTtM7j3BDXFSfv6oNM7EzMKfA+TETmBBg3nJq7o6h6dZZaK8aBHFY1syjcXFNiVYvmWFXhzkzqRl2ymJs0Y+sClZH2FlqVQu3WNBxznJ1IQh0lgdw8PEwmGHnQpBKMVRVt5RwVRc+FBCaOvcAM2DGzzNmoeckl50yiMGVnEfGG11KgWs0HSwDURYgSc1igoeuQzwMVIso7dVBx9W7qYGYSIrBgZgGMrZV35G7uOhVKWSRLN9HjxRASeOwwZmcRSq4O8ig13B2JBeJVy2qN5TB6w8h6FksMhRiUqjkIrJMC4ODhsIyLw2GwG1eOxOxf/PCPDqN8+1/+hvW0ye9+7sc+fv/5i3tIN2T/ItZP1I0Qcyk2DLy3WL7kxa+4/tSNb/jJ61/64ue/5o4XfPxd70i33/74rV94+a0nhxfTgvYf/tR07dq0PLv32j/2Z+6885lPXrnGJO5A3qjo3rgsTXul9fvulZnJKElSdm26SpUMAyVL4gDt8BQinEKaInrq3gMAJq1hxwlTFqa2N22BxbRWasDJxJKoOcNLLJsdLR3CAQQv3/pKzqy7fwS8v0zc1pxBGiTSGpET8K5+ysQ0BBvVXaeigEQbxKJu1ri/W+fZOQYyCOZuUVc1AiqZkbOVqO9pkQdTNRYzy0ylTjAPzJc7uAELM5m5UXIR1icuX1+dHmdJtZRE7IxJV+UVR3v/5q6azctGPf9aqQ/ccvEF0/q51y/dm4ZxdXp8z93Lp99TJuSRsMbK14fjmWEYpk0xYpEE90bNIaAq0Q75Tau5D8MgCLlJh7pHmRsu1oyQM3GOUqNGx5Vjv0sunBDTIzcjZSRVtak4a6DQg7vrMcyVHM1ab3ZBkMCnh9uAAkXVgaLaNkEa4VHybs7nkKgmgyHwkN6nqdqk9lsRZ+4ACYl2Qb44FrXWmQAXENNaa2hEzIhTVdVJox2k1ptugbKELkvbmlHvxTUFWyPUmhROzNAYOOyIyzcE9E1IVOz8mrP+PJzxvv0NRsoumG3+u0FsKWisJGr9IsUOf676c87eMZbeYTjMHG9RS1HN56ehQYJVddMlBiKOaOYbbDablNI4jq17ti797FuxazOr1kdJXa3U+/4bfSoV0wioxWI/buYc0IPFOPsQQ40ASQkOV2v3p3fJgQidtIYY9+4URKt6U+Mmamx3dmrInbnGj4YDfboAQDs4KLMArS+coXZz9+yMlJg5lINqtcigLSExx5hK2tZNlYe8KVMpZRzHuXFkZkVzH+KmFsJNoMqnMAKZPQGpT0fiu1OXFHX3sOClQHwEH19N3QCqWueDHbzE6LAXKVPfm0pQqxtOp33TMjVpFyZxkBuqG1HfuTsaj6sBkrcnp/W+3uTTtSoROXl1AyyIjwZrvhDu7N58COMOzyLe8WkeYIQOhwQ1O0MQizBR7RLxwYohSSwMUyaJ0DOOOcKFmQ1Dvnrlxg98/z997gsOLl64/TWv+Zw773ja7//eh9//vt//5KcfHBZlfXpjdR37y+ODxbNX0w2F7o0Xrl6/8oRd/7Y/++2/8daflTP7P/P2T7z91guPPXHbU+/4g3tfurn82MnmaDh7VjY1P+czP+Pvfe/f//zXvPLyU6fjsJhf4XEcve9HrPN11KzUmnNWNWdiNKufLkjwPwkdmDWk1Gqn54aDRZTdDUVMHLoWcSerW5Q7MK8W80SX6IRujksAFouFdx5w/BLinHPRKhR2XdSBI+xq0iNnqBt5H3iOKQPIYSBhZlW1N6bkOxCNPqOOk1zM5uhnDX8X/nvhTdBaDjMj7qi60DRGBjHgmhgABUsBwkRDSr/7tt+5fPnSrbfdAg/moNpnFT+r8jv7PG0mMnM6TMOlE3vA/NfSufNF0tX1N7/8c+XcudXR6dJ445rJix6HN50ZYBqAwWbtRQh0etzGCkf4oqZ+n7mJVMfW3Fqzu52TtfFYOBHCqltMNfqmoRFNravIhUVe5qSqppWIsgikmXD3u7sFCrh7wF+qacwzhtiI9DzE2AJqWHLDaEQGjcScUnLXKBlg5k4KNAEWbi6Zc/MX3V5rEOGY+U5wSuJhpdBtNDrOqy9uGarm8Ibzh5coz/uEDcCMDg9ZM28QOw5nEsVNTiONQ2otSzOzEfEOZDE6lTmTBbYZAc3tgkHwNkzOOc82fHOqiIKjtNK1BWsyD7/r+Otw7IgmzFEuLrqVDESUiDFku3nhp11Dipm5M08ULoATgudKO95wZhbC7u4ekWXnKIQjztaehcwh7IScpVZnONTIGjhcOJUyoW+esFOuWe/4Y9I13xNjdLhPY5KxOzFnaTTB+VC6bdMbADerbhHo5wzn2MKXnBuR2hJTgJCDUUccQVN2CNkQYmu0KBHBtoagmEZyTlUrd0l9RzNKsqkEmAWhXO8ONKm/ltvmiIy2/kcgFqOeiEfQaf6+A+yPbxQzmRgIJycymHkok/ccG74jTAE36wg7atZTYEeZr203lNP2KcQBZu6ny5smFkDoJJnws9MdLUzuLgItATB70ARimBd240SpEQdqc9CKXbtybLXUNA15vig3SymVUiTZt37bn/+p//zjj91//KKX4IN/8J4HP/3Q2972m5euPrS3uOU5zz73uZ9/8Fu//MQH33d9eW5Bi3U5vTKMtnd2+RM//R9e/9rPv+XiM3/gt/+Psn5M0nD+3D0veMGFb/1rT/+tX7py7ZFn3PfZL/0rf+1bzp277fFLN4Y8MnNon3XWkLOwdAUoo2ZWPd9VZqaG06X43d3UGIcx0hL6sHr+r9uv2UeazhxTa6MW0xvrzr3CzUyndiRiscM7OhLtwKjFQB9Ehgp4O/kUSKvmmxLAjziQ2q0volmiJN3pdqebakv69kO5ky3nCjjQNN4bmKo14jnIAkqr7kwNMoLEQx6JYSC1lt5AIFdJ1C1Z5OOf+PSwFIO6VWI3s/rKY0zE71569Zy82gSpZTrh6sNi+ciNq694xR974zd9qx2vRMQMJcvhMJJyMeXQs1QdyFNKlBLAzjqL+Vg3/mJmNfPmOdslJTpGek4l/T7E4D2MwrlW9ZlxQ+Q3q/ZWt2Lqm+o+hXYHNbBeDV6teuRg6hMxapKsgflpQY/TZrOJtz3Mkdxb5RXZFz2hujtggAQKZuZ9dj5rfCBijmcE3dEHjoNV0eZzqhrbUO6gFTOLRrbn0Z1uuIu/Gzxx0w3vl0Rm5uwtVEM4MYGhpqYzdC3O8dxc7Bq3Wfse7T1srW28av0Let9Ms4O4kfDmAx3vFXWolJlJFgDoDvaZZeCtdUG8c+hws/m1pJvZIMycUwrgVXwLAYXGLzlExPqBaKfBmkPLfElzctWboRn9SihKInfHDFXrfyDqIS82U2jinqPHlGAmRB6IysA6ewd98ckMIo4RDFp1aeRtbt/rPACAIIp67wOD1Dem3H1OrNSpUxUp1rFqFBuvLnBBRNr1Bb2PQ+baVq2wkJkHhzKOHzNb9zSdb6CwgGBapmmKwkK6FIOZoY+yd6NzBHQEBCkeYq+EpNNvWt88v2zVnBB0ICdQV9tmaz5xKXGthjmCSpJeVRA5mbcNSANd7zzgqqH/CgCBloDP9JhozYkxP3d3Z4d2rGxkYgHZDtCXnIRD2aXpM5hbBYkTd7HunDO5uXtggc3JqpZS1hFhctPuPzzYm45XGD547kK9fnT2p/7/P/vQQx86f/7Wu255NhFfewqg9C1/6/yH3zv+t5986PjqbWcO946nKycnJ17tHW/7Hxdu+ejZw0F4EN5j0K/89/tNb3nJKy68/akrX/SGLz08uHjlqZPFuDSzaZoWi0UxlZ35CvVRR5wiIkxWAAzcVPHjuU52E0m6v5ut4o8qMODQtUu4Mwlj+xzcPYBpPk+eumOpdSRte1xubAi1wjneppT+b/b+Pdj2NbsKw8aY81tr73Pu7du3u+lWowcICUkIJCTEQ+ERnoEqkJBDrBhsbCo4wYBtgisVJ+WquFIuUsGJHedRJYKJnTi4cAxFyggMMZJsCDKWkQ3IQlJLLanV6m61+v2499xz9l6/75szf4w5v7VOK/k3f2l3173n7rP3Wuv3PeZjzDHHDAsAntCz+PlEVtikfMmQQWzHgBsZgMw8Hh5zrgvmJgToa8+1XTfNdfutO/ctZJA0ODLjOC5+GjaGXK+lmUuAKNx9IWOtlXEaJ1ymDxwr0nA639/fjY9+4hM/8IM/9Pa3vz0zL2tKVe345jfHDzyN58ej0TjMzus43MfpHu985ytzvvErvvIdrz3h58Jzxhh+t5gP82RPl+eMMNXjHx8zc9zdrwy3U9w4HbNhZoMW60IiCUe7kpcnl6A4Ou2q5BprPGetp9E4Gr+Uq0GRRdaKtXIfDKC8jZQ3MpOE3lA9BV4KVKErrGKDhESidxFmtmXldzbt7isrdoJyqM0RyIhYwnkUfN2SO/as42gi0pwTNyYyM+dagm3NrFnQrQWfi6CyiYXYeTl+7lfS3AKQnrDiA/U7cS9nJytfsAdjvMR9sD2fWMY0cqd9q+rLZV7tthR3OimniSxuMG744WyDp0tYZ8XMo+LYGoBT0XeFXrsaVAIR/SG3Tdl3hjtXljtxY3OkIiOyI/Ca+FoVgdW7sGIR1NhLj+pDGD4i4rKmn4azxExuaSZ7xfbpr0BqrjSaenDNXIOqc6mdVKb8Okyiod1tSm6jt3qvRM61GiMZtMwryej2Lt2am2yFZ7TV8x7vuA98zcUjMWqqkr6utHAOktSYTloiFV86G+yZc2VUCrWfYgMANzTD/TmLE8BExFo9ChMwJyKDGblWhr7Lyqs3JYc1VKf7yG8sf9PFs4bolZiX6jU3UNg2GbUs/dT1Km4QF7SJJMHiAbn7leu3YpjNnJE5SKOtkqrl8JGIJ6f7u4dzZhpHBJ6e7s42vv/vf+e733v3o+/74Hd95//2Xe96z1d8+dd/xde+EQ/23/zDN5P8v/+5z77znU9//z/72v/y3/jl3/G//tBHPvDK+ek7nr/5xhl44xPHRz70AS6cn7yNZ7x2/9TtxV/9Cz/6/d/zRf/Gv/Vnv+FX/8pPvjGfuB/HoQEPESEIaoxxmYfaefcZ0/1lXk9ONkvgdNNukLvLqL/qO2atW6V7V1t/K2Kls60OhbzJRhTO6Md2ZlwfI2M1cGVgkJVsLezDqaQQEKUob7/2J2RjPK5tjKVa1X4WufaF66PhhjxIwrLm+qU4AaL+pcBNmNvp5AzbeVomFjQ0BKfkistaKx7z+ZvPnt7ff/LheOWVV9ZauaYPn7/++fk/fJ3M5evuYQ6cn8PCnDOZp1ff8e4f+c6//v/+jb/9V//eb/X11kM+2Iw1Tkc+1wChk3kMe3x8PI5jiRLq189vJbKQEXPbkylgychwybvhJsDa14fkTEHE1eJ/+1dLMGYfpNPpZJZia94GOlejhMzoTj4Rr+biDGtSpwL8OlJzzrWOW4rKth06NDcwYCXjm/yxB3fw5a/ri+R1d0+n0630BAVaVUBRKXhE6CLNY2XmsFLzUSunKrIO7rXLm6xdibj12LvtovQD+yPxBvlhz88p46vo0qqpzvvrdDrd39/vUGOttfDSI+vDm9n5fJZ1fpyHJuLtAIqk93XeG6Y3un4nS7lm224mvDvw0H5O4Y6qs7d1o/2wuJkEsiWTUiHPzVLrOxGBZDYxTS8ip3X7+bUOoitrmrqSpx21YA9cOuYOvzZ/rVr7b0R/6uC6qaKsnm8dyMuae2uAG38QVePsELWzhxthzn2G5YCGn+7O97f2sSyU+0ai0hjEJqaRvKxZAw0bIDGzYb5VKnUdzj5O5nU40covG1e/OW/1wTIjoloqh6caOjJzz4jMBNJM/cuxH6QWpkqzGGP41TXUMd6VsH4dWCKOmdJI75FEtyup+xgt13A1xMM1UNn66/ZBzMy80LPb12RXbeKYCtPnvNyf/XRaH/rwD338kz/w1/7ST33v33zlldfe8y/+yX/ha7/2V2cef+Cffe/b3vEpO813/IK3f+6z+Wf+9Ed+/Afv/2d/6pd91Tc8++xnP+k8v3j26Tc/80ZcjjHGwov1aI+PLz71mTe/4Vd/w1/+K9/1Lb/vW956eLw7P2jBnzx5QkCkK+UAo2uWmTWjV+sz/OQ2AtxXZt10c9xeyS+4WduS9GsuRerskgpJs7qtW2i2jI8m2q4oQqXZuPnKFvHeplXkj62J0Yk6IuLs11/0G7m9WEtp4jwOZYqa2CWnpZMPVEJmZoyMY67LEcdkpEM4dEhxYowTYRnYMpYPD88FJpnZ3TjV1UiEc2bA7XS6A8AZX/NLfsnv/Z3/necvns11QazT6XT8ohf5nunf9yRyYq5nvt4cl9M5gIcXePHxT37MPvnWvJzv7MmTE54M2HC7eyXvnj6mtK7iiLWQp/s7O421DvTQs6sLaBu7d7DvDm6v5Bd4XwA3M32vtjSi3vH2FmTnkO4nac2W0buZFydlOGNRndB8qX1+Rhuyqy6EvtQGw5FyltFzLveHVk4mDZi0KwzoNzpb7n4chxoH11oOCMLdZ+XKQ3Enr2lQV3uXzPeZJxsOdc0OFyvLaqIC1o0U37Z0t0Grvn/EWhEjdSUIAeJdD9j50F59/H8r82wEO3gNjafFKkQw1aiX9tLngXFqvu0Xxicyc/v1q8HN3QkemsVtmtPdJSvDfq6rd+n32r6kP3amuNBbmSSxqhKg8jmy8PMgqUHOImeeTufYslC0Zcqjw06DwWx1X9lr7yx/jAFjZA732UMOBi161BKHuZrQKWmu6mRIe8lx7o6nmSEQIrYUhru7X148jDAOp3oqyBUxQBqP5nDGKqgKgLjcas9fa+1pgLMHnu9jIAu4r5ms3pzzRNJokSWqQM5YmSEmQTUrR2pQB9u67ePHSBLZpI/Lccw5709nGx5uIBnwhRVhp9pB92paswrqNzBQZ0CWeB/O61lljdi5HuPdN8L6/SB26uXDVtbdP9+dgOvRYuW+VVCvwnAVjwHSaXY2jcwZN81LPs655hhDZI77JwN4/le+8y//jb/6H/3QD/9Y5iv/xB/8733Dr/p1P/ljH/rb//l3v/HZT/2m3/b6n/yf/qH/23d8+H0f+E9fffLOIP+v3/G+Pxy//o/9yW/+03/qr3/iJ17/yl/6yz/9Mx+Jcff8+cOZr4/EJz7+sT/0h//4/+Jf+1dgpw999NP345VBh9lxHGrcjwjR0zd7sQwO947ctNK5+XCdNGRi7HD8JbitkOcbm+6gjs3+DpbGdg2ogcL0pqmCLrv2hL747Fp+EpwhQi9U+1eUSbPzPSrNSMtIpGdEzMfHStyLKtVISWrGecRxuaAxVYUjty5Hp0WeqpbopnG5qoBmfj4nrGM+AsjH1FRZl8pHpoHmPjOYjMCxptmYucbpDszzCcRBi9PZXvzG5wi88o/esU7BtTIQa4UFja/ev8LT0/HK659/x/3lK9/7wvKNh4fXnr7D0h4m4HfEzExNa1V5CMBQetm9GJUo6vZ1INUewffTvex6X+LmtLBdfStiuQu6T/EE9uWy5nkA1zrgnFMTmWhZ3J5MDgfB4fCKqTOz533egIckafAosPFyuay1/HxClwNJSpWpxsJ4jdrVK6wMN8+uk4lku+2dIUnu0GnXRcraVYnb1K56Ot2Tj+ty5FxzXUTNBbDWOplLbtjdsTEQNla5M/i4WlI92rocIiDR1R9ssn5YUbJzN+AqeRVHxQ47Xkar9p7pZF8eH0VtW12E3tFAZi7COoeT51PkIfKNBh2czAjMtSTeWR1KO2rIcpwb+dkX5rZsv0N4ufBoxg1ZhSo3M3cNvc/MXDFOJwLHDCDWnEFq4xreVHy3LNsRiu60hcnmkv4c3Y7jOI8TWlinoshcGmGYrb4Lgyc0bnLLTOpxjlib3LRPpX5gznm5XFQ2BhIr3N06hZURMWWO5JZUMzMJQu0EMVRauz/P0AC16u4ddmLXeMyMqLJxRDgdAKMS3GJ7QRtTx+PkV/VBa2aKfL+iln52rsvxuOJ8dzeZHH5qhCCiQEjQI7COl+rr+58hcvJaX0CmjQhxrbTp5lVd1tZLgkTHqY6lpRm3tloba2MW7pKqdAKan1rvUl7+pTSCDc/uzSIlXGGf+dTH/+Zf+Mt/8f/x7+LhbR/60Kf+tf/NH/llX/s7fuwnfuT/8B3/OvnxcZ5/8c9/8F/6V7/8d//j7x5/9Vd98IMfOZ8ZtD//77z/n/8TX/dP/9Gv+jN/6mc+8tFXTsw5/cn9q6+99o6f+vCP/5/+9//uH/4j3/7hn372/PE4Pzk/HC8eg7by7u7uchwRcX9/P9fKo1r8RePwnhOlstcY50Qe65KZp9NJ3aRmdrneLyMHq6Ab0h3zlWVpumsWrWh0VCdFEfhLs/AmD9NEk+3Vsza0LQwwNhv0BqXIm3V2d6ASBrbcVa9/16SHzwgD7u7vzUQmSXfPls98KbknjWDjf208I0nQARrHseYqQM7XWjT4sHWsy+WSmeZuwDqmlcw74b7ox7w8xGH34/F442mOFeutZ5fj1z3zH77H55nBRw32PA9Dnpg5X9jgGvfr2d3dHP48eH76cExyDYD3w6fPyDHGzHj++DBoJx+RpVeobiKSjDyaHL4JHwooMjKJ0zjtn0QhQGWs3EmrLpW9a6tnKmdaVuAaSaxjmekHDt4UIDTKU/VckEjFRpTJJekVsxrMHMWlqtb+OWe6m8HcaOCyK00GNHLJCw4/nU62Esjsqok6ya5CoPs1sxVYCD8NfaIoKQWLrLIuI8lwt5mx1uGnYcOP48AKTtELyYU5L0qjH2IaOTTNY62aZeSuuzFjBjUJIm04wWCaicdZYp4qmmsnzj5qduO6ipUoTbwK3gJpTmgeakXPaa6ZYmOMY8455/39vbkdxwHkyf1AWaWVaYbM2GK81bEKqOs/Ee4u8HmEaUA3Ota6OIDQ1ZFZn8dhp+FEkIvm5iHBWOIxcMrEXHTbCZ+ZISJG49iJyMgrhZjwU0TMtWCs2c+ZnOv+dM6RDw8PmTkjzufzk6f3D3M6G9RVO8sBAy5qapbWhDI2PwNIzHlR9HrXYSnhPmKFXUFd9ybcDc/M2SOkSGpa2pXv7Yxcx0wFnrg/OZyRsYKkRgmcWPrJ5Z4Tc86FPD05+/JAzpjmLpCfMyxSw5T0YQYktRGBPPlgorqz3IKZKyxyOozMFQMmZ6x5UyNzGZxGHQDXVOl4MiyGR4Tg5vU4j1hjjBy2iFKXXNPcz25rrXRDA6cqJVhkxkHn7EFD+jrd3RtZvLnhMCeQKxx5zMlM6gwgIxFOzfgdZiKixDHpDnNoHpTEF0vRD2jmtmLdAFZbcwAhN+IWmbnmoKWfFo7H52/8pb/wp/+L7/qp4/mr8+HFP/HP/JO/5Ku++UD8/b/3fc8/++HXX331dHf/0z9xfNdf/sy3/YFXP/Tht97//ocnT17hyucvfub/8n/+zB/9l3/xt/3Bt/37f+YTfHqmDb87fejDP/En/uS//Pt+/7d/4KfeIH2ccr54MewkhGEdERHn81kp4Oq5EUGgLRKtBnAdcWTmSM655nzcLZQn2JorbYm1K0HgyNT4ry161ZrkXGtZJoefWozWCQ5XM3QUiQ9mwnYrRyhEoTJtc46lrgWzze0MIkknsGJFOAwr3MvMHsfhY2y0RoocERiAuvOzOhVd8IRf61R10WTWaBYqWdLWWitXRoB5OjJJMcLINLMZayLcxkqY+2kDh0YDc67DfNgxIk/mPPvpdPrgT/7QevY53L+Sv+b543/3+eO3PeMn7fN/4kNPvvOV00+cnz+uV+4G1vPF07rL+8vp+OhPfvXX/bqvee97DqaUSe4Ou5xoR4QPZnjCE5qlTfBILJpnkv7kyQnGiOXmwieUmVQ5YB0Rgch57o/dZfzTyQDMdYin5SnQT0y3HMchUgvIzZg7+VAEFiG+ZOVF1b44J82EPUDNDsBY6YGgxuKsgcrTXwpj3R0ac5Xq5UMQucfXtJBQgdKRJ9pSJ1lDrztg13E8jmO3qyq9y07eOx15qRiuzxTdb+7uSp2jmrHW5TIzM09+d3c3L8daSyLP4pKoaFd9nNbqwXNFhJ2Hq76uS/By9oAbulYHPlFpKJtLDVT8qJGvN0lAjZ1w34h9dlyMTOXaK1auVhoRSMteruEuqcu16kWqB98B5IrjOHDSmEvEXDSjypYRrpoHqBa3ORfcfNTcaZPmzm0KpYdvXRh0+kKWqngx4LqUoIGmqnBYDw/JgIElomJ28uG9a9KRTBQkmZmMtUC1MmaPTK6VZFXObmbxEkanb32ZvSP156YySodEbb/Zunobk606BYGbwXAakw6xE41MnOi44ShFj5eA6ohiWAyPatBsc8MrJ5F5nT+x8z+Z1rQiZWWmBtidfKxc5/s7JE1tfu4adXLyERG3rJyaHbOWoeB0NlxpTbTZqcw2AXuhjqOHLXq3S7XImqih2ROvIyIdEWF01YkLlyECMR/WGMNOQ4O79Cy3T5ovk9uddxEvBsbPfuwffubzP/t3vvfhbn3xwc980zf/2q/95d/44Z/+yI+87x9993f9x6+//s758OJ89rGe/p3v/tiXf8UX/ZZv4X/9X//sT//wl9298sbwVz77ifk3/tIn/sAfe/INv/atH/7+u1ff9fDs2eO/9D/+V/74n/wX3njjTcWQDy8u7lxZFJmjHcycMzK3nrMMnXdioMt1lupLH7y4KV25V7kuM0vIpcaxJcQIpFKlVFltWxtTZCsVbr8yNKPrxyTXOoTs6CdXZs4FLBhjrQlmpchFcdC4NjsN0hlXIot3052AE/0GAE2OM7Mjlnq+5WxyvdQgsJ83JM2TCVWLfaiP/+RDIOgYw8m5lpmdxzjiQCZoJy/2T73FWIm7GeRxz6Dd483P4TOf+uxXfN1rn/69n3rz21/kKXEPAut3Pnvrt7/19E89ffq95+PF4QP2xNLiFA+v5uXbvuVbn37xOz7xqc/e2Rlux/BT8DEO5YpOU0dEX66E0cBqEtM/Akr3Xa35KXAojQmvvc6N3vdJLo28ZKlJN0KfIyMz1lJYQxIrVuQY57r16q4eV52APOYecmNmSoPDuES3Jyxt1LDV6wg2CRhY7j3LJG03a1Zwd1NIiChpFaAGY+yHcRfDtwa63rpYCeRuY5Gd7G9npm4HRGktlo2QJY2lTBHMI3NYpchbdHeM8TiPhcwISSFu1PE4prhtPYaItyjuF9iR+lQ7RQbgzEyCkLBGY3QUAaF1MKS5KLD0bpzu7u5GQA0bmrtJ0GFWdqFCGXejkVZdXLZ7zubMzOr0IJG55owbqggly5bASu2B1ouFWyMzsZJNMdiLnLd0TSlzXTsmriB2iOIZAeBeiBaQmWrUkXS0BHSORmfAugzJ6tAuk13lBnWfV3dHBCuj7U9XTsUMWXV3CWK4u0wempRYO8V6he19tVCyUtGjujpuKD5kRCysyBg0ZjihStgO2vYtjQjOFQYgo9oKO16RH8L1S5AJtgfVtMoqPZhoPDMWaRhF+zKzBCPj1GfPbqZlk7SmmMnY1gcLSRIbX24KiNa9AWDyrcScepcU/1LLSNVDI6TxAeByWTlUf6xDMnNFzJOd07p5uqMTc8vrHNaK7fTZJt+4P7/22Y//2Pve95+89ex4e75+8meffvMzX/nVv+zHf/KDv/hLv+xvffffeO2V8cr56UffeHZ3h3GKN9/6+L/3b//dP/7ar/hDf+xr/nf/6k+/+Px77l/9/KtvP/3IDzz76R/98t/yO/0Hv/+jD8++6D/8i//2b/jNv/4jH3l2Ot2dTiMiXnnllcvlwWzMo4ArTd+QVoZbVRO0nrMZbSQ1ZELn6nw+3/qzPXcSV5Zoid6or8F6trRC5Ig4YjGrrcvM4GaBE73y3K68WEvWADKOQePIgqCwIldWctK4IwAxgMYYpEQX6vKexhlZAgA5c2ZgFaaqcqP3+8pLczhWjbnbFSvlTiywShQbCyKQl2MGYS3t7qSBay64FXM2xVmpgzoTPmY+Ph13j5d8Y744f+wT3/MH/ujxk2/7mj/31f8gHu3Zu44E4l3B+7Sf9bf+52+98oOn1/OL3vzM595m959/441/+o/+D/7YH/+DwXc8u8wnd/d0i+fHY6xYsDtX7pmMpc5aBbXjRA2mkwq+G4MR4fCV10lWu9bangTlCFYJgpI81tSYvzQC5nQyYVzHVDKgEGzQwmDk5XI5n4e5kT3ppNycwW2AspMky5sbzcYsjkgPR8zbL3X6Vgfqtb5Ibz73zu1YJ2lmDHeSs+XN8kY/a1/XrCgPBsh2Z16Vm/TmVjrpIDm67F9e0KTpEiRP5worLnMdK+TRFRycTqfLmvf390pDc3PVQGaefFTNrCtYMyMj1eGqCDBubMoYvnCTUjXBIVezWnr0uso4aaS538QT2WH1JljuQppZ9cUWhLjMUvU3tjZH/Zj+4JrZGEHy7u4OHQ8JF2LJFSm9y1hXGs6Oiqj2Ps1XvMnY0L451lVhOLuGjRXoILG4LTfkEbbHvkaUqpQjEwHQCIq3SVyOR7OiBe7QR1ayoi5eTyO67r4jP5I032sixol2rQZzlsCNqT980MWEWt0VRrKabmsrAYDWImjkZc0MKJVfuM5+ST1Nk8VwG6JthFyiCy+xPHQXpCNpkF+fsZyxlmWx5U11kTSsMIC0EFe5dCvTaYwU+imNTpILy2F79Nv2vvpUyre8qG28Ei9pZrbZ+70OsTKwwsJXTE5wuIZ9M410qR3uA5ytscNI1Q5nYxI6Cecn/vnPf/ZHf/yvfu4zHzgujHc8/+TPvPG1v/Hrn/yiV58E/5P/7D/6THzk9ddf+ezDJ89f4o843I/7dzz97Buf+g++832//3/4rm/81vPf+o8/fv/6q6+87e14M/7uP/rE7/rH3vOOX+bf/u3f/u5f/kX/1U/86MrAcwJ088Bca2qa8owSJ7AHoeJp7pFhNM2bgfbHykzdVQZcMd9OhqILau5GXrkFIQ15GkQH2oB8aHR3BzIrakpgBV64Fgma2xMRWAuEmTWuUC2hmTWSeYtUX9YSTRU3bVTo8z90+Fv0OI0ZYaVgVbIhmcnEjMWd+67rlnH4ELSmCJ6EZDzWopkUbCLifD6fxkjkFO1hrlwNUqqp7+QPlzDnWm/cnd7+Ux/6vu//gX/voz/1xk/8rk/G58d8ZeJN4E3wGfKcy5ctf/yWF2//8dfmxy8PD8/HU3v3r3rvzz556zOf/uR4RpAPXE9RM1LjOd1PiVQzZ+7GXBbAjj2yk93R69U9laEyX/FtUhr1reGIHYjc6BIKPpO4tt3GpFuEoFGusga8ktpkfoHYStQffetDmRkrdN6oKc4/+bHLvqv7Jdx9JAXR6MrNKAnG1Wk1WY1JUodQVCh/Ka98HIeDcLvm1j2jMDN37mI29nGKCA0qKdOgeKDZm9HDhkkmQvpTj8d0DbQxP5/Pc04B9LppJ7sybnDDTro5vjgkquJlwrZavX5M4zZLyjFr/ZOGm3eBxmQqRzd6TxbSezk45xR1NrpTkLttw4CZl8vF3f1czdOWNTD4Fhup3Se08e6uWuYOkE+nU4/TWFBHLHWp+AWPrCvKnlFaJtUqeK9vWuFvZUFGyTryZnhD6a50cnz7LpbQPLPrayaVQmEDLZQ3NGuO0uYka0lJPhwX/Uwnf6nGGH3429mC9WiRUVJB3EtXIU6zl5VYqByeVH8XdmPG4zwEQFUzW6pZxVfEZU0hVCdzuAmDNdysZPcw7H+eaFNatccCQLdcwRUJddjH3d0dUPo4+vzo5D6zo4qIkdxE8b1rkRV13TrgTbyoG30cMNLHjgJJWuLAEl+m9y4va855OWHQEpbuJ9jZ7dx3oeJpvZ0c8O70yExNw9yH7cnT+w9+4Eff//4//cM//YPf8dEfS+B8unvve3/hSnzop38qY2kiAgqfaGg9YM63v/3p295+/thHP395zNPdQLj5fMcv8Kf37331ldeViaALVeUCu0H85aYSoL0ddolAIVffZvbfZJNaAf6c3/6539Fr1E7UP1j/Q39DP05+wSsUI+eGdnvz2hXJ7493m1TdfN08xfW3N+kd6I/xc1ZjE4uwRX73R+J+HG1INr+wn/sKBb70irfPl+A1wcCnPvXxT33iU8ex8pdM3iPvkokRlpnTEwk8km+QHzNAPAP7yq/8pfdPnqwZbH+k0CGKUXl99peWRYkxOuK4WfC8/ffLpSLuPOzlhcV1d+uVc2emWWu1l+W6AL2w+9f2G+oMZOb/8bf++1/82i+2cap45TaY0qsS1X9tPVkwbicUGU0KI52pDHNvgIIv8bPNpF3WIcb+q8wcJeOZt8UMWglI7aRnJ0DYilQVfUfW4Ewvf+A2M9I4bESEiNl5Aw3u9qy94HbDjuYOhXg16BUE9K5cbd/LzmY/4A5s90OxI6JiS0YNcTI3EmtORmbcjMuOEP4crYJ2zdvEQmdaSzHPCCelXhT9pPrAZoa1juPA6WTNfGZHhWYW3ZWUXf+riLuZrtmOTdOQjLWh3IMXM7gW/SWkdK8zdJRYCWJLT2ILzeu/b2nbe0fkIzOvZ2P/jLzLWmsPTdonSh9AiAVubVjmKo1IsUljVzEBi5hOUx+d3uIcUJBhu/lYLTo9K8kUgUdETQ25AQO6jr4PhrsvhOX1VqYRwYxVQho61ekAlHCjnKtU/OoM5LEASjyekZlLNxFd/L7e4pdXrCCzm4HwEFtH87OZkaF6NsmTnWIduWbGjDwZj2Vr+Ol8vi81p26EBeAoLohe82R+O9k6jsunP/sPf/rD7/+S1971S//Gl3/ssw///W//p37FL/mmD3z4g//B9/255298+u50yoVxNjN7842HcUck1vJ5vPUrv/l3/p7f96X/+Uf+2t/+rhdv/wWvXC7zyatP/rl/8Xf/5v/WH372bPqZx4Gze0bMtbYku51srlVDgbIL8HLwJGdkxCpXjcomAWUzY4zqsXFn5GpBWY3yhHQOVAwCXNYpU39aERISMncYRYM6FSsqjZ632e96aXxkEPrbjC6K5YLVUUxCfe3rdgh6FhevLLztUDMxl2qiLJZLGTRjJdNCPrLrRNo7aXpnW4OMwmOTFhkqmct8ZV4rXKVc1O0nQuOr8DPnsdaTJ29/43PP/s1//X91+cn3/ebf9tv/3jd9z6df/9mH9zxH8u7zZ38Sbzy55AvYz/rpu87nv3l/RN4/Ob/15uOv/ad+wx/+I//8em5255d5nGc+nIgV4+6cK6qE15okEVFR6jDEOhV6B2XAmY3Hmwkk0BQ7NBKGngHlYiwCbkP04IrRSXMnTbMs0eP4zudTWfvLccxJsoZz7ItPh6ByeTGaGe/H0y++/2JVMDXuYKyVWmtYGInOcuTg89qQfgXcrDs1Z8Q6akx3s+qv+1rtv6WrN3T4bi3vToA61xVVialwbF0bNDMrrTEbpN5oCYdIxPl83qY8MzUNAijSkHwbh6tl0Ttu2i9bVikrwy6tzkz5fiSYG1vomhC+kJNVKbINTUJBF5OwIqXvjz0wEbkCNCLdeByTt1laO6GThsMfEyr8NHEJSJh5w5B1aSM4TAVmFZ/GyYiTxdwLrseRkpaDIE1FlK4YCUiXhBC7vHoyj3HVhApSKjBBqGok/jGbArZQNYWcCyQgdCZ1Y9K4btIXfaqN/WoXtgWXLVDZ28zUx6z+rjlna+gybo5cXUiWqv42Q0AyckYojBaRSh2BTNC4Wehx1KlT+3ufgbwcBwB3lTmU+Fpqiqz0he0l13vr8ySWLj84a5fTzsMS8p/rWHubtmLXF1Q9VgSNSEtkocpgGmMV2oybr6xORM9MusifoQ5Xa5hKua9uXSj1TDPjzHXMS+JCuwB3eUzm3WLASotjH+w2f1d/XyYgEsiPffKzn33j++aciVd/0bse/dmX/aJ3fM14620f+vsf/OyPf+7tr94P2uVxreER+YTn9eyUOCyPU7z6D77nb7/ntfd80zfd/8h3z/Pje77uG37Ft3zL7/5Nv/m3nU9vf7z7PPMVMrlnl1Vxok6UawF77sX1/M9rRX8HtXocHdpt3zbSU8FKEw/NGXMdc51OJzdbKu0LvJGKQPfd6eJUObmE4q/xbt7EmvFzErByD63wgVZo8duEj5l97g/A0EzPjaMmEUcZN/Md+lsioqqEWwpJvyhzvQ1FGnVBTla7fK0Gik7RzeLufmWWGJHh65T2eP/q+d/68//Oj//dn/ryr/myL3316/7W3/6e/Cc5Tqf1+nz27kcqVf4Zw9vSvtcuH3043T3FZyM+/+x93/u9b//n/idf9qVf+4k33rx7dfjKi+VCnlBaimyVuhYeMQcfY8Y87k9nbZl0sNdKP99lJkNZgdhnw8wOzGowWS1d52ZmcwZQ+ZvyPTuNMmidXO2DR3K8fj6OozikEWkU+ddBaZ4IsdOmRIQzQvpR0qUQf2+PhwyNNZwLo7LznSaqc+42J8tIQZQr4v7Jk7UWsrgGOrw9xQHQ69wUF2kEbFUHRWSmQPbz+U77GgjLq57RVaiyuGpnR0aEQMtrh0mnzoMc7tm/q7TW3Y8IjVTqPK89cewpH1fMJ7uGl43T1pCA1vh7CSAyp1muuWNVMxOnCZnHDTdVXP1tW927S+FyMTONyMhMulrF8vHxEbN4ZGe6yIrutntPzYxjxFw2CMrXhCXoHqOKuLf3PDOPy+HuGqqIqC4T9oBYWWoNRdY5uxwHlfsiaKYZfIoAlG8mrglrShZ3FYNfipg0o53IyLSXZHX7C12/PJnb6URZk1FDQbaQlozjWlXKyhtlXZLqTo8WmYDOFmFkrMjGUQljNUHpvIeZOS0thg2DB5YZ6+CVfGvpHO2VrJPc3NF8ORfXD4lhezJ344w15zEzznS4ieU1GnrZ0i5lgvvkVzx3Gnm1mCByRawI1li8l7y1PthQ2dgrKDB5FgETRixoU9KI1mlySr/+OOZbHhg+M0+RM3OO02txIwem6bDo0QKZyRvHBuCNN38G8Tk6PvCTn/66r3/lbePXfcmXvecH/5v3ff/3/Z23v/q2nM9jzCAxT4h1d5czH8iMC8fIh7fiQx/89Df/pm/4lV/3zv/2b/k9/9i3ffvT1+/fems9PMy1Bjnv7lbmnbs/Xh6MOsg+53Q3I9ZauuLRfLG1lsa4Z/fe6M6TXFpKVLSNlttEpsydEKa5jpOd3H2civU6zLyirkSrm5Vvi7jM5ZJ0FkVjzjmnIU+n0/ATKh4oeUi6WUlxqSc7KLXKzETp6nM7vGFuzh5cfRrG9hbyT0mLWJ7JzaAEDGV7V+v4ZvezqF98tbRtaIqxlFeghj5xLxjIGkJj1NDDjJgZlm1Oj5XGNf2V18bf//s/8D3/6f8r8Ln3vPsbP/rJn3njr3zGftsFB/AC/roxbX067IX7D493/eSXzNcuX/KVX/313/irPvyB9yd+6m/+pf/n7/i2/9GXvee9l+Px4R73iweCM4aPRVSDDiGwIZNJDhuPx0UZ12XNO7s7jROLrLYBy+LDH8eRA9JXOJ3PAI5YuVZs/FpVbafRMzJi2WlsDFWc5lQLhayBV0IghYY554mmGeSn85nqJARsuGyOKsCDxt/1e/8sxeQEinKV1NgFfRglfkYr6f6CU2SztQbJlGgjVak1FB4BlAA26DNm0evAzHAiM0Cj9QzVFIa/oCbFLeoSBBAyvgg3y0g3I5girCLVwcgi/SZQtaDusell1W2RD8QVm4dC1KI3t+wQUJ/sWigQPB96oqrR9xsAAE3DyYlkT99TsiqlFqh7xnplhI8VYKs3qCrCzWvDbt5Gbz8BRPjWpkkuL9URRMrZSWAKliJ1Zd3Dygrp5j6MNoxjVMODmc3+YNaPFYAY89diSjMO0DXL6+5l4RI0y/4Z+Y0+DASbrmNEwgrgV0LQ6v+1mlV5iqqGlfqDzB52cHTzO19Q9wqt8f4UkTf/hcxWYovM4ksgMyIDcMlnq/81KoYgVuyHqvCrpLwREb1qAhRAsPj/GezZ2omMTEdNGS70RPoqQMSU3JbKe9o5AKtwmvYThBkj0kCgeswAYx+dffeATLIAsdR1jbUq0JxrQd8MjRm0FdM0q1O3v7o7gka3wuTnnBoMVUcLIKtfobcg8yp5zzodsR1cfTvNEjgj5ww16V3mBKGQ1CBsAY2M9CY3Qyqp2buq/AWKC9WHV43+4q6RVgetBzbrNmKfXrZQlUYQ8DTGMS8BmPE0xvFwCSvbYl0H1IwLJdrVSWlASuPAksoubChWXkGzlTnIlZFy+bAVaXoKC4NJn0/1nnnMoS502UezpeObGDQxy4KwyIlsmRHOSDciCArbiDKyZmosuS4kCDBimVlmjCYK2PUt9VMZWadauqUgvSqiiDoKasNoaD3CylYhzaim8u00QsOoxNBk9jwbkf0zLgFpg9DaHkJPwNsLXlNGWNTIlLY8CbexYpHCMqMLYfVbllDBrt1tGQ61/dYPluJgkmBaxCqnENdnBNLsmlL2NFFYVxyErkXfBAMCm7VXrzaASFgVCQo5S7SxNY0gA2amYB/CJLcn77utTO9pXcCr/8iMTGvDaqAZaneSYGYZCXRO2QJbXUlPVRIzhzHSOnpIZvqwy5waK6i92AmttUKG7A6tNDeuzhgQQFq6dCW3Rxrilhcnxb6QFZG5MyJCgVgmd+GfAKvbRIMvuiasNpUyQ/v4RHaFWpcetTlZDbs3LIA+HvIMofbWtMMC4Ej18O33EkhAQZ+eXk1TN4Lw18/CZk6a2gDtlsCxt+B6GXtP9zduS+FZ/839Ite/3BeAlcH1aYYuZVoKYN3bltSgtdv9wl5HfcCuMNdf34QtrFgnQ367qCV6o+wfFmX1hhhQvEBZEFMVLYG++ZHhzAgGTAErbKlxSUdVRvmqWlG/T0l+glILdB18kpJcKKYkEmnUULX2DalToZVIlTR7PZW73jq63Mh16MNATew7yiw3KIsfgHfzdBW7bzZZNHz9sw4ygETkCiRpKyJj+XACkSnMXyWAChFobXgTibA+7/36ESCxMiVfqifKRMygZDvr4BREwqqcuLyu2plRkY1VFrHXDJTIUu/Gy6EZkC1CSTUmFpW1D0SZ0ezmAmzToZ8tClPdo/1krHVHSiNV+UxkSvfAy8YZlNpiGU2mz1UFMhAeiAxahaCSj7O4OsTKtpNpKR8MFZQisiYblz3psx6Iki+re1nCAACtVBy6h8UwuOYCTKHyjqpA7ia+1KaiNlpYEpBmQ+1aAQggs0hRSOrKa3uYYHqxliITpAUCGlmNXb2vfcwAYZlXRyozotb4FUtr2Iz3sOrW7TnPN1F3CJ+TNVeETjWzCLfNDvMSWSaffStLaLZbECudBSC/JijCKjNmyRiC4Ep1lsqUWd/Hl6c9/PzXz3/9/NfPf/38189//fzX/3++BgAEwgBgRW7XXag0gJo4JNknGqFkUkBmqBAlWQ9ckYrOY7Ky0o6VBBxW+bV4eptmzAqiOq1SAKeYyaR/oHzaQBsRseYc1IfVIPMUmzMVNGYqcd1tAMJ6d1rG+i0AiCywPDuqgxBL84wGbTqPIqUmiduXUno5EUwYzOyamlg9WdUjEYChgIbs5G3ju7h2uFYilIJbIDjRAkug/4KTkyhMJQE1XpNzHVJqsmqZKw3m26S0YlFCCZzd0Ig6bIa9lOJev3YikPlzfqKw9c4nXko8GhypJCW7X4GIRGm25U2msV+ynr8zFmzEPrsd4OW3aainSvqbbQeE9eI2fxIJSUVquLQ+IlFlpkozN2JQiWEIUCpB0y94+luQipGJVoOOIJEnH8eaymIK7bHG5RNmlEori3hI6XUisWvuDfhydQqBSnuEfBg6b71qnYjsnwJRuZAGmHkgIibSgqjmcYRVha+oGJUxxRSYW9eG8OEA1gqhrLHCJGnJLc7VRKiuD0VhD0LuMzMbZqwSlxKOQrl0JpM0qqs1ZqlWGZoOpjS3XrIWv9FPlDlROirjU+ewtxYFxG1Ux4wZa8ail4r6sYJmyKVX28yuTjJ10oqEiKYC6MOoGbkRIAJXnbWs+i6IKD1pNVUDoiIr8VpI6adn4VsqcITyXUPCYOQEajoHbFsVFLZXKH2ycPiCZFW6D52V6CsR6yKJur5jNGQkyK6XEQm7WkO0JQA4Y0WkzgMCRmbV1ovDYHVduFDAopLmsuENOxcLU2WVqq0kJFlTCnaoDYgg6MOnlPyByFxzmblYV5Wv14vbrBOZlqzaGhoBrUJ+lT0EbajMpVxcNRenxZo6rFWvidCoWBGZhWfqz556ah5MGrG2AUwTZ8FqQCxFaREQj93ypCUu5pEGCPVsGp08k7KBemdCvqCNsshWyEwXGHDT6ImyGWxg5Op5qb+4uRWkzbkAjJODXD3krnHbxiUis4X8BBrq+y3WkAne/p/9/4jAjSZGkXdrJvkGPepPWUhgfX9bedvS0BsSjoiIKe6coDj5yqjXkUncjn+/9XWFsqKSOrWJ1OzCiGWYGap1UZymmtdUrE43S0uYWhxsewrWZjdgX16UsKySZl89cC/FS//frvzmo0mvU9FQ2f9dj2Rd8QyI+a+jEhEyA9GgWEb1ZsBQami7gvIyXNNRYtbpwUsl+dSlslpMWXlWeGN1onXRy2cW4FcbaPUkeROz0KQ4psisfgm7bN+02Zuv6PhJF2Z1hTnbCPUNq+cxFrndSSICa80VKzIyBUpCwuQAa8yoRveUidOGiBm6b9m10r3/cgOm1qTKPtL6jrHFBaRhnZr/UUUsRMnXRK5AqDbKTJhbRJzcLLHmXLHUBBOwWW+4xzcZrEbOQjVm6BZFWXyVWgtFzRoUUIdLJiTciz+yXlbD5l7PfRzqO7QbfZtEtgxOHR6VdOX8cONE5Wprd2vdy8D02gJkVoxR3EBZ3oUa9chApErutiIuc3azU7Va1AY2uNyPsDtfqU/CJhpkRWVyXSrc6qwhqlAhy5lV4lIcVVdun45IpkqVqDACVIMZIjKySx6dEpTQI0FNIpXJlZyidlOckj2GzAEP+ExLpNkyhDNUwE2dHAKeaVA3Ti264HFpzcqG5HB79dVX1wppHIq2eDoPGqpM2JnWGG5iqVbRvA8Ndmsqg1gGAJZV3NbV33Wctob7uvS5kfnW6pdrr78W4WBlwLgyV2S6rQTdqtdPwbvIJhkjIoFFeiYEXgc0BKZCN31dWUkKvasG0FPHI7ipH6iaBlBYuX5+X45EIGyD34rLRfYsR1b2KdsVqXkG5gZCEoxGGj0zFm4y7P5iMcNgHeYBWrWylaqG3Tzf9dOxpqoHmrykf++wVzWxCuuyjyT7wZIVT0UCdAkdRAXg15RHL8Yu/yJfrnWx9Q2rpKvH6Dp9yfmO81mJSahan5FEzJDIorpdA3LPpulxUIIHy8ho7Y06a1Y5HjsJv2kt3mev/5D9ECp+9gFlh1SL3YZbmQZJEa633+0lB1m6uy+pCggmwBe+8z6QN+sIGYjtYmBgbXp9DEOTIHh9Lcpm3jSH04hAMhAikcr1oZ1DaFVoCp+JAKVc1Qyj7Og2xWQ5In2zvQFzS8TCrE9/Q1xoSstV0sQMESDo7lutotGlyIp1b0K3L1ijzqs6om+f3JTClLx/xQMVclV7lfp6oCom4irdR0QywiFaewkAiMemW7ViSV4nsiroRJGGeu9St6WxkjZfomRW4J0iWNX2RFzWrBirIj9GrMiaioErk6OYl5WYteOoe9qKDtj10V7RxnL23cQ+//0K3vqq0eVC2ZJryLwPcB3lHXtm81Ep1gPdHBmx1Npdv6zE3ZBWUUiRXKuaLZyIqIrofp8IyaOKR1Rkj9toNFPyo9ig4o0kbfF+5IuNBvg4zTlNmoHR8E+9CMoAVg1etysBWiAy5HZnrK0PhSBlBMi4vhZc/JqEWGAoVDUaMwBrbI409qW/lJ/+zKfOp3MttTLz0nXPymwoVVoE0ksmJ6LGi5n6O5ROaid7c6kDvM0ZKuSh2gdsm0NRQcwOXdV2vqRmjSq61HdapaAogrLQRaYEI1cMM48IujOWImJFoxoe10c4s9tYK3Mo8lIRXyPFbDUAZeh7bzNTzlbCtWJjxtWiJ8227TZ0c+9ttsNSKJT7N/UcC3mwhiPLIZTjIujOHmmKHpyKNhNK/gLXO4Yr9snKA25XsLclGm6BiRNRl/bqgMRGqQOhjGkHX70w3DEly4cX8Nu0Br1uYrN8cx8KJywxDQEMMIDZyZTZQAaHr5jerVaXdUFv3vU5sfEzdsJx7dnXJ8wb5snthb59setXpQi9DO3HUYc80Y0u0G244gc6dFxi0ORGWgAweoxxfGH622u+s/c+bLfRfT1KB136VHkNgXdcX5tYFCptYNO7N0RRHzbC0BxvABUL2hToGleyXYVoRGRQGQIsJPCjBLD2gpWgE2heJXRfEoIT5wwbVwMQKop0gIHK3xDN3WyWTu9UsUyRxY6pYk223rWOpNXBrKYXkYzWitIslnEq1NHA1AkrPWrrbICYuSsAoslJJa4AiltClN7YqImZgUgqbWGauTDB2NumP9Dq5pJzTl16jVysd/w5Z/N6YiKvbrfBw67LNBSUtBKIrfgIVWpyxWpmhaLVmoiQWbWYKgI0oJME1LwhqZNUzKE/CYgexXsVMkhTTc66CQ1gmEyrpnleI19IliXIAZ+WWBkZu9p1m5NUYKLaYh36huBgrPBd+ZSwwJmxLE8iKr90V5RmwLKrd2VEU9EpIzNnnIZ3NzsVJk6kp1tqjAoDySICkoR5YZQakFKJm9J2fVKmKOLn81mVjib+8vHhoQZ8EkCMbi8sBqHRwm5xBL01wVK7phITAtXGoOUp60VKPYB5fQXIAsx1RdjaXBqBgJsraoAyIjBm1Blhv0gaGEPwTsbSbkQsnVRFWFV7yjpeZS/KzAkXqnJnzOx7RZ3czoWuWWY5kwSQKzrw3K23BV0zVfapoeaVBwTWGGMYVyy0EQDIPVVwxy4ZlIzHS7evGlc3MteYSjbQXamIrIC5F5p9Y9BBw9X0gdl9MnUyM7Fl2Co+QbsfQ8MgpGUPLCk06WpeRFwtv3O1Frk/c8QK4zCzsMc4ukG2SoYqLJnZikVgRUdFxd176SIVyF6gVCTqGmhmI/uNC0UNhZf9QUxJO7e/IW681C4E01oBPcURRANbnQPH0G8sgH2JJP6+z1N7F32iG09cWJF2oLSdetGsme+RL2HT+xOiQiKN0ZCfqHMfMCQjVx1IKnBTOtixQ99DPdf2K0pWqoZDjArARI5nrDD6NU1OdHm6viS4kV33MCuTHFMXQYU6xVsGpEpK2X6xI81y7Tq/VniMDrMC/e40KYMoR/aSQkWm+gOKZ6xGRG2OrNGMFfMY5mOMOQ8zEjjmLO7u9RlVKmu4DFXm3aGmogFczxZIaBQf9z4Zhw+ozFxiYWUWkan+sapvXq+LIJHs4ky/3zU21AZseI5XqyDl+AhAeqWZ1YlWqe0NamHl4Kqkb40FMprHXaoZNMqPSIZBBTtYIoa7ux0xsXs1+hxLrHb/qJuAPQTRI8TQHT8LSNKEoGcfe3XuNoSR1ZCBCm1IguHmknOSpV+RPk6zOoO3MkJ1yqkOwQIRWyo4sUB3ywgQPgZnzHkBbBgCmEuXpAKXIJAWWFbbw45BCiqRO7EmXaDcUDaXwMjMSDO6j0apGMHLnHUvmYydYlFjaZ2WNT0Ge9wPJOlvQKeR6NVBNztlk8ZVB19cNnyPd2ClviFcGLUMABhYZlLmNYXcazsUsxEBH7bmolHDDoafqrWuOt3KvokMxS5/aWx3W75s65lqOWOj7YKXt5e6oo8M0GTVGazoh1Vhsjq6vRLIYR5zCchAUbEskDZM2bC0IO36K9igUhd/lcsYOlTYRTvFvTXxSbHnyr0BdYt7E+RfVvemFru+y2MeujSCRcCVXFm1WHTeyStoPsyiO8Wo+6we5jKLZRJ34mH0mTEBDlMLIcxypY+RGXMeJFN9JtriTooqiy5CxLZiAbScG6HOfaaKwbLFigGTeQNbFHHDwGRUTN8Grp1TZSlXl6c/qAweKr4Jx9INF+BiDL1lUxhw20FQAc3m0VSFYLt+FZWsmj5RSlWsTIuV7LRVLz+NyiWQEUU20c4O82jQRAJtKIahOZlJlXhAJGGhuwmCzlzF6wJiatLqlFgSOIZ37bvbVbZLCtAtViAx3KLUjkBzU9Onmt1TMnCRaGPSq8y24ExAqUDpMmomSvU/AhkzO5XU8mktma3YEaEJK2qi6QbR9tuggQESNagAwxgRJ3dLm7EQoVkykViYMxN06AB2HYC91tnoSxRWVN0c23gajTDNszF4IleujRZkdVGvTkEbu7J9UkCRmOR6hacauxOw64O0JIyuD+BuCO4YonMybVwz6DpRQp9zshIXSVDKRjiIlTPTvX1xVPqpKz/nZJ1OM69hkNoaBUNWPZyMOZUGRYSy+JnL1PqTXLEMQ2to3J+PHXbUo+xUTuObIqeZrRT5DpmYRfxD4cTy6rFxaJkjImvUGEmHYcl64OHhwWigybIYOXR4iJUp7YyVMDtlZgCxAvSGuiY6XREBMIFAeA51FOtw7kB8tsDiEtfRncZYKyDB+2CiBJEKWKtmzmtiRXonw+j0EWXy5R0omq7QEWa1azcUrDVOAGYMICONQFznRkPFFTXvNUDkNgaNuUKBFao5T534ZMJFVszVRBiXP1ZYrrRyA7UxYwwvw0YiFt1tAiaCZW6WS8XlQcTqR8Ld/f3DwwMw3FIj5dVCaZB56xJlB5cATM1hVRTR1aUe1digmXw9IBEgFoKkG7MAmp9iBVaYwwohr+hlZQ5d0Zt4PBDBOKfLQ0Y0iy0YkcvSzarwH0kHHZHBUDe6yx7Si8sn1pohDDbL3UVYezspNlV4GAxkhNNyIpBoL5pU5oTz/WnNWCsSFolqiduoMrF7/2jVPOcEYrlGGAnrMkwVorDhfQSVvO4jCyXccKA1RnV8s5xwOc9OAHu6H3JmRq6KB9wjYy7Y2RKBVYIy2mtpterlrKNRyGdXWyT351Hs5pUEa0xwofwR1YEXebWi2MW/xv/NmGmdWiESNmzOVMd6wcvRPKJCGndoIcDYVk3HsggkbKDEeGhDPzinQsooMlUCRNTYsUSaIY1pASBXChmI7d/rHqXiBYSNiEXbNysQSIqCP5WQVc6GjIVMivYJhpnRcFymBnPUsAeEhCAUKKghGnKkyIx0YPVWDB8rYpV8Ac0cyODaVKKyRLDaNKAUPwC6IRR0V+vnXNNtZIZp+k2FUYaSd52QGlpOAGPjBqq2I3Sv7893x7xEI96RKI5LV1zEx9g3NzMrlJHrjAws6vXlGpQ2kqcVYVwqJwQHkImVLS8t4xLwYYHAXCljTIIZuYbbCb4SRl9r0ap9lnoPeuXnwRlKUJOZbphLTbSM0qwbVonMQlgUKF6Apc5G9cAXHl+Ox4C8IemnheoxWhflKZrjYmbNM6yAXRc+CmbaFT056cIBDSsBupr5bRNTAgZamPhMOfRJSwVi3Z3Pcx4p/mXCMlYXHAARVIiIQQsTgTjEUqrqbckI1FHItUhgGVIDcuRGwwidZRi8yyBmuuVdXaUhFxHqf+5Af21pjQCi2U7IHerWWyMlakZOnf3AzRQfUmlmaj6uuDg5Yw9REeWncgWg4Sd51uyekkZgtwDCym2hW900IgG3ofpQuC47sCe1Uahc5cKy6ef7O/G42br1sVZZHveUSWXncVX9QDnEWn2XPdWQSgzbGUFYZjCJAMYu2JjKr2gFhGu1qR/UemebiYM+5fL7WRwaQAKwdj6fjjV7QZ2jXTZ2lbpC/EgViaNyCcl/3AbkrAnOEau8GJHAoK/MhZCENiFXUys/ZylebTaBcdQH0DUSDJZi99RNDTNtnmdXX02/UFjZQjBRClHoHawuFaVbjQwXclaZgJKNACJyZUasJRJwe8y1VkWdxwTNRgDMnrOMW295dXXb7uL6V/V4N3+NBhzk4YrM2T/TudXO2AurF0oouQrhUkp2BdG3fml064axNk2WKRv7pfyLNNmzBVRoksWQeqq4CMHIDGpas2SJ5CzVoMfS+FFspgOdCsjqs0drMYEynQpBFcHd1DAyGpOqsQ5aeDMv4uCNDIccPOqJNyAmfKsWr0IEEV4DWNLbqU4pdseUFDaCtGTuuncgmUGo50cXbFW2QGakGWYrPPimkmTRGcu8VfEo9CSeoNnj5VHvr50aQPAUKwAY7Xw6XY6LsrswRCzdamkn6cCaERG5AINZ0RcjJCTSRrJV7sWJyc3vMKylCIYV+Ncxk30nmCtW1oAEndGm8HFfnyRtUDX7Dv5khjq97lvYVNn2QB2Sqh6aIHfm8BJHo8syNxyKAoc2LwJVRLwpNMV+HLT4Wnb4AehYtq5tZvYc7mkc3LyAvrBmiJVrzlQoJIRDQaQ+b0UKEBNCTTtzLisuVfcT7qxAxWMAKhW2s6tXUzxnJiIxDRkhBZIEYy23EQknYX6JWfl6ZIOJ/S6sf65eFXQoHxG4vid7wSiEHaj2i5U4kUkmcrBdL5p6BoUidX9Vf0X36ulQVvhPVudOVjLWjorouSNQqK7t0fkVlhJEZpgAN2AeKzM06hWVsG5qZqKSOdOfTFMbG2CWkHZExgKZYxAccVyp4fveVBJdomMouRZZTFqB6bw9zWUsbjfA0Lg6isRwJSWZym5a7x3MRCdMWgUKUQsVr0Sd7phVNr2hgtbUwb6c5UIMpijISSNXSfxCmVJ0fYq1q0hkCkOoPCaCVRezgAFcsRzLMMp0WOWyvacRkSUozcQKs4GQ0F/FInWf+x9tzhU8yZ1UyUqXox2maUPKdCjXYTEgyZu2bUC0x9qaW8cMlG95aSJCsWo6Nb/elzJEpO1ARIjxNqNmGVSWJZUcjqrt1pAVUL/STfOhHI9p9HqOBAJh9Vu1Hex8qVWYJCUsExZI5Moeiy5O0s29L5vWuLzRqFzfd/sAIGZy9s9vISztPG9U+Hflhtsco7Pl62h2qxgPlR1bL+3Smc606s3CjggVP+Yq4s92mlmIYmUvqRRhC4oSwvjVUjL6SMsLdYiBAbYAXX1cEycLi7UaHt3hU+1JNCNixeU4qiXaak6q7Pgwz7VUvRFrIB2DdCKTs5iAZf/UVJwarlp/UdrpAATkgiBLJHPv3T60RWvIm24Q5ZW7fFAqUJHAsl1ASfQyURFo8bOAZm1HwhtqDyC7zRpAFB2qK3GVDfdeFGpXLcIdB7TbvtrFjPY6fWV3CHv15dW8Xl/VBMVkRATrI8UWk4pyLjcBi4DWcmTDLSUyddMYse92dvjcEwuBoCe7lCwK8LZQMSN8j4MzkRuC7hNB2kRihnWZUNxH1u5uV5ALTJW/UJfLKpiqBm1r6nJ9ykAYxqY5C3uowCRRhIPKjsqiJlKiWkqFFB0hKyBFiLHKSFHaTJZf3S8rFnkNxaMNoKLiELlbFw4w2rGmDqV1UcqrgiVu8HVrUeeA6iEpbojWpTLrJNSxXscor/YF2xrXz1eHUzNEMpEsIkUiItSP9fL7F6QAseqLeogVsS4Xh5l7cpfjOznkKu+f1bYhyNHM0fZHWym6jLqhmMUk2o5kinYrc2RU7mJFc1SnU+X4ethugGoizgZQsyKi6NJQynkbkAIuOxIW/B3dw6o+VBWgkKDudS1w05sL+NRWRv2ewuEmorNfm5KJQKv11jHQHbsGN7WBV/G2bN41cCWrX6N5mQ+FbMktx5iNIBFgdU8ykUZ2tUHWGvCKiOQJU1mkzoOx+tGiPsMwn7nkmcUKSgIix1mfr6vhghc+pviDJacPjOotUGZFtRqRgEp6YkKKtNlxACqcwclHHUgAUjzY9L4OZvQRzHYx4ioZUyxwfaiKRFOSGjrvpfbSZ7UnNFe9uYJOcQlU3E+U4RJfJFSvhzqNtYO1yTtYBzbJeSl1NhI8nWwewu6LpZ3amm1/KxaC69U1UcCoapIRmWLZ5HmMuZYiryI8h46/RoJiRjo1lC2Ozvh3f2qhHFHxi9UayrYViFYHA47sYEmsKy0y/Yb4Zrh2i5sOfmrEl7yhbG5xNORphbiYlVvpCE/jrXCtZO47g52OVSJVAS+JHePJmVQZpG9Tlnh+OUhCSUvw5jSjX0v3up9FksPl5DJDlVpr8ZMVy8zcbIcQnVLrObebxU7L1wpGurGrE9GfUKa4IbFyi2oopbkhVW6ruKdUUCKEdhSsFxJtNek10Noa6Cejoh4WOq0deekrKvM11y27IoGlp44uGou2p7r+uI1TdhzTL09F27ahCCI1H3a7MgAsZ0p0flKnkNU+WGFd32MgNS7ePSL30JLa+KpVWr/G/u+2JZSEbJIi1QpPbRBAy8MdevRHxM6B+oiiTE3Fj9G1MYWNyrbb8fLlxS4nYlVcRqAGmoAZs4ugYPHdSkTlZquqZGpXrKJi+f2Zo6LY8pPYNDLui6ELhIiprICFClR9JLsaq9bC/kgJtc0ojMgEuBhqBDnv8k4vkXoJVHKSnYIOPkGEgbF07jq8LIpdoJAVNYPVfCtUM1p98kqyM2kWkWF6D0g7p13mNiAvfeWVZ546XO0ty6zLCPZHzoVUt6hYRmCxLpsnVo6elJ8ilCAlpKdTf0/pohUDVCnN5hNXuFHvWaH3blS4XjEZXasSjwHSwJUuxbZb7GOR14ev6ngW/42IJLwr5XAfiFgxh4/LPJrgU2u4qysOuMyoWfSMjsjcRu0KTxTKa5X3bNZ5Xj9abXMD1qCIGNWfCexcH0LgpHCSuPqgigUqMyi0OetqJbXVOVDZjtbzJpZWiB9pSDNP8oigQ+wwJ0GpLsN7sFvOuePiwqNKeCUjYshAIRMYsIxYWfyuThI4ux5fjGCrp0BEf/iKLnTSOntK0nKFyiysm9LMehMeBpUhrZmQdGP76MoMGq/bZ0s5t/D8trHsSnAB0eX9O4/W9683qm/bbTagwL5zlvq9myZXYTLCnMpJ6Lazcm1lsZHJ4WNHiygao96wYIDmpVXsIY8QGQbT7NrGM8TVkqEKqvFjP0MpHURDNfugau+ILA5ouaxMAvd354fHB81ZNxg9czs8de7uy5hdbejEDAAFz1ifSKWAdeJr2TRCbiIMcBBuFiAwFG5vqGG/kxGrQAkabqEwi0xIErHkONClMWTTJYFtCGFK0VB+EQFmgBaRInYWtzLTzaRIJ90+7toPWyJBVjt2Rp3SeSollEqt6D6OmGyyVS2XkL99dW/BSWDt5hGk+BxEUc62CdwPVie+rGmZUuqAt6bKhr+KR9W0YNV8bhztcpqk1qDIC42F110pn2a1hKqVIFMjGQABxlnGiFSXqo5yH+dqAK94WlYjIQ0c6V8ljGtXXW5AZcVylSWkfpQeDHh0XefqYbo9UILn3amPSrF33b66rG5+j8zAQsjekwCdYFSOWcvVAN1tpaDObcU6DbRXV8ON8FpUplLsm70XMlZsn2PqPMwkTHVOyaxaKflYue4bTCiBXBXdKsxlTQarpnPIyti1Kljt9s031zI256DeohCBvne4GQ6Bqk9U4jzcAMw11woa5pzXnh+IuZMVesvqqvKqzcriE1RYWyegTgLBzIAGcaodRphVOSEACtCrgoMVaRhj2KpXh8HMZgTMEhn1GRoP1QpkAREqeQth0chrTQh4XBcZuDRYEeX7A4JgiLVBJGFuXGuCtiJcSFJHG0xcouQJ63BHJhOFT+naptXdpxBtrLDciraRuGpk1Fck1NHQqA9AYMF0IiLpDGkWCqSkUkaNBq89r4Spojhdf6KumRxmqkONia2CBDmcClg0hA1NTt7X6/amVODbXyJQt4fNvGZMu/UVAJiIKhDIBieY1HS7KhMXbi7hnYgA0w0RFhHX9AtgroyAnW58Bap2odtYj9WfNrq4Aur8RSx3FIUtNR6nsrJdSASoiKZgO7VIB7zI84Gm58hQh1UkpwWqH89wG0rKomDLBq5FXFdMJqaLjuMK842X9F7oV1hVwoUE4eC4klxu1rqWZJfvwZYFBKvTq5T7oretVjyk3CqBJaENKMSIQF6BAjNTH31GuFt2AQ9EXqvACkPQFKksVJ1IpPVOd8xWamgRsdYctpV32E23LVArGRLdPSEUbcsrZdTGKZ7CbQqB6AAwUPPFuKOXemk9NM0LWSVgxlW5mJ4UFW5n1Mnf4Z/YXC+Rj25KNVRa1xyHFWL1qIxcssNZToFoLZrMdj1NF8oYYYsIq7Ys1y7GVomouDfamFAmuiqJai3MwEIBEn17u9B7G+YoBFBrh5rZ+kPqgYq3FxZVyyxYoxLM269ranljU/YypUSxiXaRWTXlOtwZvWPVIG0bu95GE0QD6EbKUaqp1xgzqHbh7A6HXQBvgCgXOmmTs0YHZr2VQCBHq1jWkUMkOBEsrRfWGCgCsOzCNtUkQESXDGiIDPMBYKqZ0Gwdx+l0musgrD/AdamOTFuSfmSBF5lQ88dLa62LX2GbUwkaEkFzo401c7ONHWJlBYAZSgki4oa6EdhiC6iIWqGLpJ0I9myYmjwzO8pyM/nxREA83RIBsxsGDpG5Vg4Po81VV0+6ZthhMcr/R8HHpBmq81CyHqBkPlVKWGkyIGYemYZZ0EdVIyPh3OOSCp+7iZ5vAvjKEkEaWBHxmmHWaGkJoJpML0r2DofoPoouwBYn71huT2Wqe/VSFrsrOB2Rc1OnsiqBQDXm2K3PzqrgJHv4Z9c/u6P86s9V7uyIuE22WK6hzLtobrFKx4ZW4m7Fa4zOjHYqUS+25TbIVd5SUE1FjFHWBwKGtxBh4Wfev91nSbGdoFzJviZ4zGnuWEvWMLBQfelMM1mNykpTdSszclZdoOB3rb3yH5rFmlCw0hMzM9v/qDKVqgp3ytfBi+mFmvfbebygQ9CWkndjJND6A2ulDa2XDB8buRV/dNawp1SX+CKjG/avELTeRi8YoHVDsc6wwJz6CWFlaRozXD2COhTFkdbCy/r0mUNlqblpEVUKRabV93epTPevFMh4jakUDgBEWImgFbzDMiR68IIkaJkpy9iHdhXgZCZhQkRRx4t6DIpBGg3rERQaYXRzxkyDwblyRfXLhspA3T8go9tnmUA0I7I+hZqlQtrPgVxWV7X4YGFgBpmAuKE7e0YnHyqR16BgFVoFBjbwcVPmrZ0tA92OJ2JVgKJ7kOrCDYg6WxLbCsaxAXWUebnqAzQ2QaC7J9BgTGRE1hXSS9I6mld1Cr1hWmE0ZliDOFW/zFgOmttaQRHE+rh2+Kc31adW5h8stqBWwhTzqddyIkVSWJltKDBRYzoL96smwJRBr9KKws+KRRkIGh8vFz2EdIJoyFh7smHnJ3UNTmLtCkLsJNCkUdMpSiNa2uzYnduyP5mYir2twmFUvxINwBizOVCW0qiiBPcNLS8IvalsFoXnuYl83s8GCK0VteaIMAPYUwpLa1QRE3Z8EVLqMVfcb2nbU5kxZkAkIIP6CEnkDHeb2XRP84qojRkLTbdlD78OwMvMWjLKXgiZq9kbzAxToKz4lzoH+tAWCUOaD4mHlMKfUO2ydabumsw8wSKjuGodh3UpqqJ5kMVqUmIOMX3r7qOQR+ykioDBrhBqZyClBHAlVZT2i8oQfYz6VJGiiK9ye2g4Q4+gxdV1jlGup7BDZA3+ESLbuj+NqVg1aFFUtTaMwoTBFPPfh69IE4dHD1Oi1sqdK+2N5pfpyleCbZaJuWKMMdfiqhDOKXXIZYSZrxlsrsM2LHNOWfAqK2xPrIDyZIRhIhFwpxmwruUppEnCIhGBseOZst0hEd8ARl/GakNSN6rgHxOkA3UlMgPn03h4uCgPiChljWE+92QMRFnhLCUwKIUARZnOpLsHokDE5oCWXG+nGBVcZ4TmWlQQN2t4hw1MFg/NfKnNgLVwOhqmDImkXxuqQRX2k5mZpsnwgZoeS0f3kBqqpBNqOK4SS/Oeh1oHwMfLxWlYLGnXAICVS9tUdbAo4p+VtMjw4RExjwPeEu3lm1lXP1T+CSSGrH4B3ZFmcVQPt5tx5hIjD8BVJS97NULJk4Icp3GmucFyrrABSw26CNVMpgq5gcxwd1MOgSlHOHxA7cdV+FdRqEDIJspl0SYEXiUHPFbp17rXSUwJBIZmxAqOvpFlecmhsINK7sqCidkr9t5cBFdMQF2z5o5YoKWile24pYMjl5uKJGwi3IkDOZaJDLcgtYxIiH8g+2mJmAE1BlRDWRs20av6rpLdXS1EYsZUKBby9gKV2r52T0Fa1yK2CJQsawDSLKtGBtrlODJy2BDtI9iktcxtNzvVz537WpmOuQjCHcxEMMQWSjccMRMYlvVQWYIlsjqpavlWAam8Rq5ok2aHLpT5bA5ExCKoxZFUgsh+trkZRqTLxxrVm8lCxK1UVtDwNcibwL201CMzudYKsyF8x9JAOEZasGsJC2lmCHHQu8ga6TbEvY2MCRjNc0sJSEKnMgKV4Ws95WaZ0xABS9cTAopHYhS1J0QpchpBM1zW4d33H+yecpSHUuNKzkp9loZWoV1XN0Io7eyqYdKRq0KWbIwwMt09lXpRRIfEsQiRcjICAxYRJ7M6qLDhDBaHfZhlRV1qTE5kkCMi3AcsVkiIz5UTzDWVDlam5z3ZPa8dF8NGrCCvemcowURDBtXMZiX3a7BoCmSoepGJhKPq+ZT4azZsZpYDl4gBpeMzQ/3BcTLPzAwLxBqZU+9uwnekxkODZZEQprgzsYFPJHIU9hDHAck+FApyBK1JEmlgG2wLIAcK/KkeDFbS1oBDdqU0UzX/q6ZB1HcUBpWSC0TBKA7znBObj65EwMAonqNILpF5dz5H5FoZcRDeBWxKaacKj8L3ncKxUeTyataPrc8SAYsOCdOFxUtAh14BVRVOKyok4Vn52gQ0jiKAkRitXLlWtBNrkVBBaFeguDxDdF3RWIKuTbVucl4Hm5WLo0ifgUTMuERxXAvJUrlts6aL5agwtm+8loPH40UdxUmsuXwnQNKjKSWgK7wTAXObK1wiOCAWxMsOZf1KbSM3zoMufBW/0cdcE1jH6qJuyulk2dYC/QKZM2Miz1AtM7KbBgJJYhV5AxaRm5iJVhHesEXn3/trwy3KfqMi05hzCUWc4rtmqfWJHKxs3qxoLMMMRJAjM4GLmweRNo/DzRcncy6cNWr8ZgOrYlZujazIUfe9PlMT4lR4FihdyZaCN/YVQyJK7KBzARqcNsiHuQTV1luj/pyVuN4kRMaknIZAtqITZqvm4P/XVwllyW9NwFQowZx0YyKXEmBW9M4m0/boKMUNM2OYYIPmX3XLpgFzzs5vRcDOXKHJNXMWCh5AanQebrLbypNYF/D2DLz8g/uH9EcZQ/l0klDHhy0U15KFYmUANGdVd8qFBSqCqjOTmR33Lr2fRZh5SEJRQoRAJldpRCoHyybE0ejATYeZcUVYYgJmQ1hjZMYMd9G3q9InZyztd6+yRvtflITAldPQy5TdjWNmK5ecSlWQ9AAz4aYu0EBiLaVjsc816jgrHair1Ite1t2YE8NBQdl6cas40s3J/TsC6jclY4PbGappoklcaO9rKM23rKdaEtAIuNk1g2e9loKVVYUH8gblNoC0tSaQwz26rIlMDrNVAOUqaWuweKmmSluIlYVszoTcvD5AkUcRNoDlVy0kTyximCTwypTpY41N7dETyPpFCIssB8su0Pr1FMrxMiIippmtOZt3lhDtu7OWanfbQLl6k4T/u0mbQapMFPcv86oJoNy3gtqEcu5sOEKIYx+BzNBtA9NszGNWvVSQy7VJlNlqcIUT1weqS0ygmt6bbpFKWWhAehNoAaC7fm/LewgxeLPRtKyLIQXlLoPV7QPnpklnZuRiJjlQePuy2BM0AS7GdcyI/Fgy1RakCVG7nlKkJ6scuNpWgQ1IKUqobDFTd1IJzP7YeihCYmhlTtvrxGq4OHNfJIPwhIScXVYIysyTscqwqKE7FT+y1zKxAlhh5T6y4e6uuVU0eG32fYnEkOHK2CO4YrhfjoMr3EdFfcIOhA4XqYdD4DCJyAuAjNMFhyHyUey56TEwxqwVzECkyB1o41+IqdCq3h61sDZFsfaiiii7VN7eiMBiAu7IK86RkRMi6qgip06JAOA+1ppNSauLRlQXXcYs3qkuLErnMSuAziv6eA3iYviIJcTLbLMdxXMnxB6aanu1DtAUtGtGQtaNiooutEPWETwzlguXAnz4cRyoOYMVGNOt1k0yNCrsVKipk75PBItIK49zjSt6YgqyeQAoqmz3rV2JcnoCBeSqKmdmhncIGJFj+IoAKMboDncEKsgjaixiESgS1TJuBpU83TWBBATdaMyV2452RGzmnMdxHqeIjJwa6ldxdzsXK3OjOgSye24aa7ZsUpciMiVUMGYwM6ret2MFs8w1aCox1Nkza7AqMtXWXGd0bXQL7b+zXPDJfA3CrM2OEAkFnnD3uSah2gfozLg+CzrCEEWkr4N2qokB9YNloXWuFeu0vbMOkFLqtpuCBDElEhIcCiA11ipl6qJE5eY04F3vfOenP/0ZuCWYsZiQjFrUcWPbeYot3ziNplInEmcgJQuq985cRCKnlF7kLrKqqUNHDv1oIuPBrLgO2eM/OkqISLfdCApzI3LFUoy8VCGW9aBu1qp70WQp9cvQPXJlJGiXOU/DCUbM6DEzrFuMZKqWrafBlV2sgKagYNkUlqJstPZNm2YU/r7U7FFdipUtJ5nGQRuodFWbvFrmTi9jhQgULJBdmG73QRY3rWR9geIXim9SVL59TAQttlepcacm0QOV9LPaEdtnG26oObWX7OE+NnxEzFzB4Sf3NSe6zRxXsZ7sfpkgqYpmaKKczNiMGEzN9663SgvMDKOt6vmpX+8wtgh6KAYWgVCEDsPKLKBYD7jNcj9GFEfP2qyFROBJCy5z189eve9N+NKkOvkxpTEprseCdrmcvK51JCLXDavfZNS8mLy0AMjHUak0sS50mwxLuMV6wfSynFHxVVV3XlLm0nmrFhWd4go1BB/fuCeIXJqmuGqu2cUsG+0hEvCqklf9hT2DWkvfx7zA+jIo3d/aAZt8kmp6JZC0VZIgupohJZXQUSiMQTCq5wMqlwDV+L+JC7l5XmFso0hsx9gUg2J/A0U3rE2UFyk8uzVVFLVls5lKGiGtQOKbtb6a6+t/6FbpygEwFQ6EBBYVILrsy+52zIrEbuiTQNFwWLMBaus2+jtW5gqjwE8DlqCmGdXfAUTkMliCM2Jsi5QVGhFUu5rD1GCfsIhwswUk0hrxCkBDx6Ouckew25Kg1IVuQAIShKUsum3KGBMIVuRBo4xfhSO0TjoUZ9QBuh6X+gNRqigygxFVQm7jBLFiLZBwt0SuiOtHw45B9b3dYVzVJW4LThpWglI9ReZwQ5Uvbw5Cg8LZnO0COEAwQ8Lmxb7SaMlqBSClXB0f/8Qnx/kU3cxD6qN0xIrNB8oDiUjLBgGIxSpwJrPFzcLTNDljXWv1ay61BRRzyqKE7VJXMnM5vdICSsdP8wVY6CGr6RgRS7n4SjMz80Y8yjJaZYkhuqHQggxgFGXaaAGjVA+y7qddqSsV2un2laY2KlbaW3jzL1PBIFAhi+6q05tfHnX0K2dphgxNwUJBLzQ3AzPWggYLZhk+8e+3sAPZ955sDKRC29J4UUQjFUiU5+lLTJVpvfKGaqW4kZZCn/OyLLehwPXxEzR78fBimJ3GuBxHzGVE1qie2BmAma1rx0JGqAcX5tBjTksHRrJnUeUKpWaNfxIk11qEiWKWOx4gTS3HiwurGqfKlZXNLRLETRhBFYn7QSLh0JzUqJPJ5ubVhaB11/+NEa50Z0UkYiFWxpxLWcBlzeLi6jgxI9Kvi6fH0twKWuQJufLIIxFhIz09kQsPJ/NYfar3iWsCaNUTZMWbC9Ya9zp6kcUK7t/rfd7mf4BBCsJTywBRdZsxxpwx11QeM2PKEGem19lgwfZVgDJI23VHiqpRtwPtyLs+qfqXdPjnmoNF9SwSZQeObnT6SgAtB3hNplU5vaq57a+SvOiMPyMAJ9ol09w9o4IvRYjobLWyhratumFfAKUn8tqpqOSrDgvAJpvU7ZdMV7vRjow67scqsPc6rHAHG4Q1ebvOb0ZEW2ArB8XoiWYqy+bWowASmoiliyNfWmxeJt0sM9aqICAiODSfRAbgZatw8+xIw/UivvR3isOQGOM057zpHi60RrGzmeWK6sFDSKSqfFc5wcpAK8GuhoWiTWYiV5iZ9zSC7PoMW/VCgYLaNkvPvCGMBiYrnNX8DyGGddM0q55lT2rd0TLvRIdQG9RJbyOtbIbXmymrYmCuWAJes44I7DxCxQgggcBysUUig9miRzESMFvIVOexckOouy5sSUKaME6kJ1ddk7zVUwhgoJLxKEJJZkITCSNVqGhLl6kPme4nAGsuIlwthAKwru25gSYPiXq3je72RxmLu45rnLGIcD8xaor4rlfqpOh3LSsY17oK0pAsf8OtAmyqvBSBzDS7duJaWjB3yUThtdXH7aiFpZY2yEkLCrBSuqPA3FqaYZuLSlPJHSKxnO4+BLschGbTRLRrKp995Vq098pKj/XAyiolzYHbvHDO+fTpE0Qej4/nuxMy1wzAIpNWAgi5wn1kHnEtHVbrF1INdjngRLkxQI2QsIAE4tGiclYATOH1GRvRFjEt1lDX6jLQUGcxUvJZGRqvygTgaGGaWhDh36EbtjKoqKtqATIZ1+h+uxCtEEXCWmEa7VchZ+33hn9Ulo6w0/BlEYAHPC0tL5Y2Vy67wI6nOF1iHuv+dJ4x52Tl21fXlW0Xr9qbVXjorvSC1BSXZUZHJDum3n4rIipMsPYV4JaR2k79al0JGZ9syyu8suxOJw9FNbg6rSaC7qLmjXPSSg6z7CjCIL2O9KRqEU2S7p/P2+AQxI33ze6MUFlrTXePCJqZ2ZyH1VtHliGs5uyo5Arbql4DzsjuzhCElEaLNbMiFbQBkVguaZniA3VR19SuTG3dDpXqxTIWhpvZnEsWkbSAVO2xl1sYYjbZR6jSLtKzTRBrP21GmHE0By3zqqVUAUcbrmNN0nz4movqSCaSsDAdXFQxA6nPEiylzk4GrCyNoHEzk7561Q0bR+8yGkUVXMiCs2YAlkOLp2eTQavTvl2yYI6KSUqCzMAlG1mJkNuQoVurPkPdiYRy6+6oZzelXHsZOs5NA8OAzJEMMLojdJjP3YyD2HVPIHff+fWrKq8lFY0+ElU0TISCiBRsqUMoOmFVetlhRewrlgX/CMeTTmHfPSJD3snTjs7utA/qPx+17rzmSab12iVy4HQaEVmjgsFYE1KPQaWFPpxml8vBTJJN9a62zw0xVWCkADQ0qzEyl/G0Eb0hhnbbyE1U0CxJPTu6qmmq5GUWSaucWgYy5gSKa6fEqEpw15K/3oK7A76D2gpsJ0IDOjOvsUBfsKxgJfUDveXlpFRcLnhJZY2Gu4pAVWBDhvTq+tZanaOrkKoafa1ve+6aNCBetlxL0rki4pggTuO05oy+cr0TiQSOiw44Nnkn95QqAzgzbLXMSCKL7atZOEFwJTRIJ/T2REq3HuXlMgKIkSMTUc8n+quldHRBCT8UHuLWSJNCxJB+ceiS9N+xy0GtrmyqBfAlpCCDWKKmsvPvYsPJZ15/WlVvTM6TjQI6S43x2Qnn+yfnyFc/9tF48vSNtz998fzBeQfOjIyMmutULVftwXYm2N9JoIaqSpJVnCg9hWJgMUFWYLcuCaa6EkaYyCPl+iZLKaIzt46cdVokqcuEa8sr5gTRLoLsQ7OR871yCSTMlV6MMeaaytXYZlDYYiAZ08wrQcy+G9q+uqJZ2L/k3VP+HxpxJqe1YpoN3A5ZAc1FW02gX6Dxcx2O8iFlxmpHi7ZSbTjIDopS7jOqoq5MqJPCUBsAAE3XMYGpCBpC/pwEKJVc96b/AmCRIYMJmPfyzgUrj5IpuV8p9cYELA2BxJpG9mCauA2GDDXzRHMwI3E6jbVWnZaX8lurZj0IpfTEVXz4GqFulSoz1+ATQa8sBi1hQFhijBFrwhCZK1ZxgLGLmK39qSBOIZT0cSJE3AE00Suw1Jli5iNyIYqHWOWyQmEREa4G2QJ5EzvNkA/RkVaz57WHs85Glb1akyHbl+8osE0Nek6YflWBAtKYbb5kNmCKc4ZEi2bdjUhUd4xyM/ESIxmWFlXIiT6T8tElIhPdq04sqn5hUO3covLb5DCkn9zpDw+Pque5jYfLwzBnTRYKjjMxA4ow+9rt66iqkWbUDo9YQsl0qsqwJbBPUKrtjak8wKra0Him1rJO/+4Gaz66Th8aQItWJtsmQKUsG2ZTk+RA7ga+hAGeyCpVcZTVrYtugEYJKtoLBqNuFOlm4qdUTl8fNcCtkiESPyrPdVCjsiv67pxoOwBB93ntjWubKEqLFjAyGC1b6TOqRihnU6eNMHDFIuG0Fy+em9kY47KWZVkHurvZvFyGD0QVAVM18arA0mixNPeyOsJT4iqavhyQRvhcSawMuHsiSommHyAzpBYFWhpgHrlyzbOZ70CjQvUUrhfiP5o4z8GE2YBhRgwfEaFVNha7AoW3pRoS6mIKawuwyvNL8JWqoZVxbiCi5FNxmAxMTmRYBHl3me95EU8/8cOv/Vf/5eW9X3xcHn7BO7/kQ7/mV+PypvmTY12iHE3euvTsTrlEVPCsw4FiIkTCCRS0L39jRJjbXFNzMKUiGvroAMzcZMIzjVU0qbeXQ7fM5fBqWMoCnoOMWIirEH9Xq4ulVf6rc6DKureF7xurn1wRwxjIyAWaJxIMpqt4SKWE1UYvo6lZKzWdUJJ7w+cRSYxhl8vSiVJrQK4YpxOBFSsLe8c1uGKjnrjyi0iLGRpsw5KaK1C0Nqby2mobdHp0HyGYgaAjMscWAmgqT8CAKYhY5su2XQWsqZGFlKHkAySnJXiyIgHQjCu7njXGZU2iqsiszWchKEslwFhSYlC12uw41slsoR6nSF6Skdiq4NkFMlnaMlEFyepC1sNpDBqWACWJn2TiTDuP8/M5YxPlpaY5FE9vpI1aEEcRjkjJxuTUO6dlhFeJDQiJeEo9vc4Y1XdULUZZxDHhwhXL1WXBvrAGgZyskBkZMaoQuS4A0bRE9aFn8WbU2xaylNVuQDBWzOyG4FR5kYboqgFtlRsiwcExExUXG3OPEUqJ3gnKrti7xNKResgay0A6sNDHkqCZj1LUGIAdK2dOTQcx5LwsmJV0htl5jGNelMxxiaKgZAlBwkJII8zcUoV+o2MnjZmZRayjYG/FFRp8WiTnWmlFUq6WZLFR2rE6dKZaQwAacKaE0qgouI6eEVgxT+fTcVnulC9UV6S2NlCdwFEaNEUfbBWEMquDAL3Ya9c0gzWMVu9eE3WEdfO4rGZi5TJxwdngleDQMNoY4/FyABDRhkAyC4ToiUZVb1IHGQMJj6AXPAJgxooG6WYcd/BlFhN+4onjMo/h50mcAzMuaVhzALEADZK9qhV2/hkrxrAVK1Y150WsyEyar6nihArq4tvLZLiNWAuI0zhn5hGZBR7qepbIkSqyZsTO77fpJJIm6pDD3O2YM9KTyOPwnv1pw1WMMXAVQ/9ac6+00xAzzGwlUPos4XpxqyEjouxGhI2TLSaaS3bE6f7p2z70/i/9Bz/4/PLGs6/9JvzO3/OZD/3QO/7O958RzxdzzeTVrQvHliMEOyeQ+2JdtjRnhg43iVSYERkR7ozE5eEimyt+w8ywZIBmxsCKpcvT03/q88tMlouK7EZ7ENXQxoBbns+n5w8PNtTun5aMVBP/iOZzoWJ8VWYlACmdABCwQFiFE6pXuQ9zu8xZEDY7cdmgjcIkWvWUIGkMo0SWL9WauDMMc4UgtUCE9HiJMCnFlcSHC+w1m027aYFCmyuSGsoYdhrzWK451xE2LKZFgtzMOrZHWxKlVD1JrjJyk6SqplXNWLPy3iKYEhFVel8m5BuOxrgTwZhrmXtGOJlrnsGIDDVoa2Bz0N1WxJMn94+XR8C4IiPvzndu9vzhuZktZQ2KvXI5zUb5G4n1FEwi9yScyTIi3LxLv2OtKVxyapp4Apn3fnqYj8PHmnj+8FxgCqLr46VgzeEu0Kdq2CtnqiOua7ZCwQR1dQ4b4JwXMyMq0kEsB4bbMQXSmHKfGUG42RLi6SMPhaxKm51JzJjDTCxEs3HgCMMKUNwV2f+6Atdyub7nUu8EkupCLiswAxp53jifhGdTI9uxlsBThTSKPKpxJkMclIbzUCx4JzIsE/Qs9E+gdeO11SkOI2EOQ2QMWYUCgBSA0KyiwQZSYdIVS1JwjU4/EWpM2hjqTuEU4Bs593d3CUrAR3YLkQiy5QyCLYiQjUuZGZzzcpjMtZgyNLETK26vigo7OQFpa60xao4VDNI3YcN03WLWX+ao7qKdKMHM5mUqsqifsmEGUayVi9w2o4RkSZAS3GAUfroz9PJziDmniq8z0jS6GKCpDcwjVmHHqQBD8EEhOInUaJHiRIEGnDCAGQEMy4UcjzZeRTw/c8457s0vR4Iv0kfMh8h7871N+3OJESL6M2YsNFiZEYcg4EQCk0kmIxFxOp+XKKNpx7yQLP1FIxOG9LLCOo1SduziqPAAQUGRmRBNcal9LmhAmh1zupkNj6ojECrqM2/PlagNKiqsuSKWhiUCGG5FHDMjuWIRNNfU99TU1zmnGV/w8ReO1yLWw+/61jd++de/Eo/nOfPpU5zO9vji4IVhEGC8lmQvVW7jbpQUSEQStMSKVAk7opj58p00myvVF9ERtHT1opY8lyG3NEd2gVCNgqKvk5kro9XoVYFZpJFOAnnMkBkycpMPgkAsKuYwo/laS8zKQUdgYuXMkxFmU/YtiFgwpnEiMeewsVbj/IkGPxQEoYHL1MNZJGZle9adf+z7R7NcS1x9KhqTLk3SdA1UBFdwUyWRKzSNfefr+qu+Lr9l9ao7SdmfFBxgJFY1elQlz9qgV76Dyn0TMcaYc7qNRKxqc2dsaSJZgMJfZM/MAgtYoCfADMNAzoA7hJatFeZYcyJyqTROHvMym+p0LRQX7ipyFswKJszmHOjeqR5ktBXLzZO5akJ5zIDBjgxNRbjEMvqU8P5eQnWiqrquiEw8rF68balv0J8KO9UHoixCAOj5dIqM4zJPMycwLQeNyKXDDCPdaCvCqg7BiB67bDSj9G3EOsyi0EciPTHKTyAYjTMrRoKBDs6ScIM1Mi0YeHOgClrt/4CEPqpII1YCosuIQt7Coo+ZYKmWrkTmkafhdJutceCslnPpdtViKVpoJE6ibNeFbDgVw8d23/ICqaTHSoC8apOq01WnY7YMogBGBFbdKEB9k3K7hfK2w8fNV2SsKDRRpyIiYsYYCkzLbURES1uE2EOlSxAELEtaSq+DMRywiGXm5lK2oLRUMluJO8TZ2pG5/jvGcOTGYzMyjmPtj6bPbrunvkOtRAaSSCvHWahnFCgmUwvASOaKmEv8MkQg0lUXLVxn747ORdH1xEu37jE3NeVamE3kXCtmvsjAczyxt73+4tXT5W2vTifgk+fTaExP6VoTJaMWNqPuuZK4sigunl4mM1j9RjbnLJkOg7kVJMk0MIlluFgchjCEFMJNuVtd6azYTSVkZCACK2KF0sScUZWGOWeUSF4gY8VcsVasiFj6hUh9X6dckutaornmzEWvnN9Z1MHI9GVIPC5EMAP+1uNPf+m7n/+iL7//2CdeHX4gf/a/+HvzK77qrcfnmJlKBokJiKN5ohlM3xYqaQEPY1rSZsFeVdo10orclEW8Amqps6iwERYwcjAdGEiLsEyLjNPpfHd/l4k5g4Q5V6w5sN3RMqZhJH0BmSsxY9XcW+k+ElmdHn2gonL1rvVmML0yegtZyYRBl4ixhGRyLeUSBpr4BEGqeEGqgRVVpq/9RWW4Cp/Ue9pXL0UuzGyJF9lEaS9fIVBjV5/E7hefNCOrCYpIxkxK/0hmdzOesvKZBrHQTKZuGAOat6/sI8qkqV5jXECQC5J2YQocHn717Mqbsyg4g2Op3hg5WU2fCxJaiIrWkJl5zEPIpN49QqQxZNJuRqQL548MMAK5UJOSk+08b0xp0f5VpyBapEH9aVlHThcEHCh7WM7VkI4h86KrLdyqqSRarWgLAlwpGgYq1szIOS8xp5s9Imz4wOnh8RIaE5CNXZUJT9X+11qCiwTZlmdNAJaxmGFrT0UQ1NYeJNv0Vgoueq+xnDKi9lRvFb1jiRSv10Cr0prRxkjdjy4wiwUV6ufupVakop945ekTwuZcnjjR7sdJAa4Zl24gq0KvpzOD0UZvXPaB7JCWBa4WfU48ryzeLDelUAgzCeROaIr2Lh6iAbwpXGEn26wkr7NQefQuJaL4AqHDGmZ+66mHO8jjmCUSWbhDEet1JHRe3QjQnaHYpeKDxu4lsmnSB+NWF9A4+Awhw7OXpj6BjzHXVOof2QEFJEMoJ9vMoewHrdyg9mDzthYlzRQrc0Wcz+e5VpFNcgOroOh4hGWaIaqRo5pDScvgPJ1tLcwDp9OEjWXr6elLXjy+60d+5PzWxy+vf9GHvvTLngXvLZhYO6kXIJC41lMTVZzrLF8eMygdqLRMZTJh9KsmBrs4raJ7oWTbLBQuYdbPlNXZASOxtvZyS9Ao+y6zEREZw9SAVIJW9aaoLLROkdmcK2KNMVZwzQXBoUxlLcziWqu6vNJsHu6ZZi8QT4IPkx/4xq//ir/21+eHPmDf+u13v+bXfuSdX8Tj8chgcIoyEGnAypTX2NXTQE/KCGybpzNvZnGTNdSHrS5tM7PzGJM1uDQrvlQnDMw8Eg8PD2qBdfe1ZswJcPSYdQAWScrEtkEq8EpLWSU9V51YyXqGhai2WFnlSUWpK0NVUnM7DqVPcHh1UrHYEqVZlCAai6Aqk4Ei0SuJTESYV2UjmT6GbMWaRQ4ScI92YOUIY8rWyHGLFpI15rQHBqDaU6guqxQs17kaKyFnI81gEDZVHOkNKXIckqXM3UV+pbokA4OGgEOmPlXAXHsN5BawC4j0zGVCrDORs4pWnlPJg5mHCrTuHiu2woEZVqRlSpA9kde2TLL5D/3GRPPGC4mDkn6ZD0KTVpGYxFAAQmPmYnIt5SHZOIacaQLFNVQx8YpfbrtxJcMJe64trhq28F4gwwg/j3mZA76nD5AU14H1vt25VCMVPbG6N0dUDwAcLLr3jExDZjroKD5shbVKVGRom4fV5RFEhb9asStaXY4vUoRtqxZWme+CNdQ3bA1OsNVQjRXTrJhN44j7p08vx/G4Do2c21FC71zZw0EgjZ5EYiGAEGN9rZXFNirebT2aoF7BZSiJnqjDLzaA8hrF/GUGhMhVk09WILpNEfsC7ORctTUWg4/QWLrUXJTUUXfz02msVfwdqcjq88mInM/ny+USsdTuXJ1KNMFLdb+vUh1aoHKTbMgUiPP5bs6lmxorzOxyzN1n4e4ZNaWrzmWqOs2sQJzsWl3W52jAXb1Y4iqRhA1Y5FScWcyuJtXq1mt3lLPaaj6QlKzXYUGMkStPyPXk6de8//3n9/8IPM/n+/GR7//Gn/nIP/j1v+mtNU/sndvPfbV40sMyyJT1X4qtUIhh++hEUqWmuoa10dqja6ZV55VJLHWa7uYG3VpW5oDMQLJQay0WIpYCyYjoiobee4dkjUcCgRjul8h5rKr6RALpw+BeZdJGVhIZvKTjtOY0H7Fmnnh5/nD39BPf9Gvf+eZnHl49x7N3redvAX4yzMq/0aYKUYhoBS6KKDK5feA+XlGcadS5qhFSEspgxLoIozb5tDQzo2Y/VP7nwwHUsHYzqJktw25QSmWEZmYYiZmBjDQzMASGKwZguctuq2e1q3iAUOGZDDg4EWOGuwkduT/fqWXosmbNaswbsDQzq9ZVZ117JPswrGVkKlKvLg43amCGHkuYTGQOmHeEnoV1iRZb31ROHN3Z34p7yixLYd6QhCX7Rl9tT7mv7acLIkeJ0IUiSQVPYp106rd7mmk8YotUXBOLRFoiudgdhKjocpsdRGAMK95TZMFNUoDbpXUiYhUHrDCiOvZRU5gAiGxUDf16PIIr1hinjFw9CeZ8HiuxHi80ozPmch+ILDU1WtagITDAzJVFI0ftpKxlJ2DllYtmK66Te5MtkAm62guJcQlBcwwYbEI6oA5kRrptY6f4wuZcJIPAUg99RgTdJsolqToQ5FVwsXHm/UVQdSV3zxWojC40QGMHw3opI7P60a0JeaIG+ywpbaUpxc/unxBSj2H+/OFh+LBEGCLw7Pnz0MAB5e5kiWogsvQYEjUCsb1/N6tkhbDV9ZRtQ2jCs/sgZ/m0dumCz7PCH7Dm6Vinu3blAAtMutrOPqXYsBjSKrvMBPD0yVP3Zh+YJXDMOWfJQVSWUZlo0hAZj5dHFHs4MuP+7l7v2o9QD1DKhdvmtyXrS6WCHZS9265SRYhZ7pSphGxszQBCxU9R7xJXk51qu1b4IhXJnGp6HfZiXgJSPQB7yazVTIXWKVQ0bKMi+3WMjBwxMc+Rbz05felHf/qdH/iRV1+/H3dPnv8z//hP/Jrf8AxmJx+4m0rTvvDIsiNqnbCoa6a/M1imJ0bqsBCBU2JFt9wDPtzMit6fABDGZTmtxNgqhqwF6qhCEW5ubKijvIBlUfr1CxEx1xIGVBWCqENbSwzQbGUjihU3gIYpBUX9OBAZc86Yi8ZcBj9jxgKXSLCPx0e+6D2f+82/dbzz1fM9HzHv7KzfVXDBMSLS6RYwsyBVv6hCjZ4dNUVzw0GFUkOFuoJG2XBJBoweBSzasLPZIM3SIhKCAFdD/8X4zbSSUFNaqNJmz11S/hmNv+AlVkiGgQP1USXBoKrU1dQzDTlRRcphDrMj1oo1oIYCcTFU7wdRylzyXuX2dk2o2Cjl98sCaiyGNeWhrYNsSDZg1wel740OCRqwTiQ0YEM5jO4mGwa4tWHXqM3k6rtVdGWUxoBICDcXQ2UzRhqJFYiwhFet0Upuo9yAyltmSvHaoOnMS9GvBW0iYnb51mZpCLL4a7py9apbRl65b11cEl69txUsGIEro9WAJOleCf1ci2vR3d21lRU/9mVhKVq0ZVYDUMUXXNUqGnsR+wjuLBzJqAJcJlDGITImwmwgQjVTp3nlQPtyV7s7E6bRXOxwzSCXhEgLFPF61zGBhdT/9RiaUWdAxDydhjrfqPJFpt3Uv9Bem711ulu16TphUsFqq1XfL/KDSExK48yHaus4n07n0ykTYwy1hrNmIlWHOm9O4VDABr1B5+Jl3NUZr7+kuqpp5Ooga4tZsB3wtaNVHD3EqMIA0JIPdQ7LKfdKJNQ3RZa4thKqXYV8/tZb5j7GkEameMaR1WRWL918XgKmZrV2+XPNUvlJFeRUHaljDRWRCigqypOMFaH+cW7kZq1w964+FbOmlnW3E5VCcvnwBNw4126lquCGTIZ0ViyUygMawEIW5rztUv1DAqs6/SVpBZDmIy5ID4Y9MJ+u03s/9uE3fH5kji//db8mXsTT19/9ka+wF5a+Mof6pDohqYOrjCxV2rDbcAQAmVbD0opBUcuUEhxAaKZCLCGNgCFZCt5N19WF66wwqgcU2x0TuxlGjYoEOE4jIiKWAlg1dW5VBB2iAMSbMEiqjGvORI7hNoZKbmstJM0sVrj7+XSas+bmxULyPAIHJk93Mdf94+OnPvkJfHzeTbvzV494sQKwKaEQTo2ZqkFoah6sjrtOsgrQzD4BXbWRyhHMRYoQypdCDyTnIGGFdRS2XGUf/bZVrhSpsVGnYEfR8EASq2sXJqQhoWYYI27ktHUNI2pegmH3oSJZNV0k80TOVNzNFWs+PCRi2MAV82RzHzu7ZJUD65kAEUfnPMY4qUaYpoTe5pzVcVdiBH09wKWFgp5FFU155gIcahnYiyT1KKj4DINJWg4V9XX0X8iD7Ao6YlYmmQAmNY4zbsJTBkg3c4tcUeLSuTLNnLkU6PTStm2mWWAiEzHCFjJMqGU5yOwk5yq5oJsdbRNld6rQCOXF1V6KluYtzKl0Wpy2DMgQxxDIMTyOAG2uZQlzi1gzVgJrBX0P1at3ECagRnyXUa0BX0k1P9luA6ijVA1ytIhwKWZnmFuKKgi7DERczuZJroxXnzx5/vz5bQN4r5uCY5irFwheICqF2Wy6QbA6l1oFt/xnOY9eS8V5a80aIFRqBKngvmOLLLuqv6gmLgIri8tmAIr5zMpblGWWvIzVrMwV83S+rz6dzBkL7pl5rLmLktf4D6DZQEuuZaWqTNQpES9dsYHMFpDHnClXhUTC3UprIhStmgxhVoJuJbRWoxn1GZQLhV2PHRIpbqwi2thaCr3BHXKIDccC5Td+VChMEswAEz78Nsdykelb9LH/SjFcpacKPCI1KZZChlKD6DIpwaZqo8yqi+7iYwdFReZHQW9SuxABppXdtocJNcmphyIZGWnuSxOOo2IuFVJAI6VW1a9AtWEXqJoMunHGeYwHw6sPb96/8WB5f//s0/GD/+D+81/18LYnn3n3e+3hcvFkeBT6sWURlY2go0jcnm2wr3vdM5FqUmZsxXKJC6Q8HFauk5Iy9q+r5CXeDRVsX6PoonFlR6OZABdiwQAcczJzuAWqniIdAXmOKiakXDZmFO9mx/4xpw3PBcALUOnjlIkVYaeJwOB9Ance69GnEeMEPv3Ef/mffcUXvefNr/7ah2fzdL7LWeV7kZtKNicSbiW6lZIHgJ599x93BpMChG2MY0YDXYic8r0RQfg+tVEBOwurbai1SqJgRD5gWlQ1TnJ3VrV8QmCwsTRqYlVHS3Wmi4pKI2PBWPJsgVxUpz6YGU4LJjkzIuHk4JgzzG0nBWXHuggj6Zma6uQsSAMwkyTF0F/Pecj8dlmg6h3VsYrw1SQpAKV9ARAuHcEqI6ranABsMQlzNxvzckEj3vo1LeEVnqq9YFFL1PFhXJr5yGtFUYYbBGJl9B9j7bZ1kzBtahhOZwAJSMRBEkLDrPksQHkPdxczvzNkXb3SpLOtEFwfMErhSB02Uh7OIOlGRtFO3eDGDMtWQ58zI8EMafrkimJCZoabaL6yKSxQIK3LE9ceGpaXdDMx64PciWQ7N0jgSZBmFJa+IvjURticsZSevvHWWzS63fI9Kp4vQnauRL2aCSGYYSdfiYVFwmCjdnEZ7AqhZHd7w/xkj5cHurn60RXDlpZL0a7qzSiUQ0P8OpA0O41xzItAdQMT1AjIEOJmGD5S5AMlFrQjJiIj0txjrTGG7ofTZkoyQSQIAWTG3/Wtf9ZAM1xiLeSAZZNNsb+sYLIEVswBzy6nutHgKwLcYgDXpFaPR1A9wyiag7KbghYNVc2vu6F+iehpRf0VKA6G6hn78iEhSpL70JmPIgLEGGNGcFhGrLVOPoSVRxvoirBR8ryKjPdlyPJEtSGyMdGlK+nBd30kxZImiNY6T+WKrPqG0WCumhwMGXX4FXC0oksAUu9KQVQqtwJAVeuyyuPlJ6vuCnBiukY/5DDaw8leifiST3z6VffPfeLDr33Zlzz7Be/+wBsvTrw7/IJIM+PKhPRn1WCmhyqD1ah63T+CM0qFeJjrxgU0k1jK9URA86ccRclDpPfgM6BVnFTJo7CWHbGpe2TqYXv1PSM5ECk2LyMwHCumc+TLflyhb6iNp/rFYe7INHCuVmMFAFQnkqkdUYGz6tdcEZmBZbgfePaZ97zvx+y3/o4PfvIzT+9tHkYPRpiNNOQKcbqzRkqJmaM2VTFZsms81o8foqGttRqvvYbwSDWhRW8rmCnimo41Urr8UWOr5TgapE0k6Wo0tXZdevGIIOgiG7LqyNx92IgVKe6pgBClkqUyCAslNKVbhFyVvUhmUoecpXGIaFntSiIbLXSjlTojaBrIQ/G8Mqulcjcf1iFBnRKVaPQjoDuLeinvLqJZsyWohtfs8Hx3oYgfNGO1I6sLC6XeLRAhzUh9zjFOM1YGzFwavdb7GAK9ZQFgqpsoAxP9SZ+YbaxQRM/CISLSfEQ27KcnruXqwFf5cIabsafbqSQlWi5bw7Wg/uEEck7wmmbcdo0SNKspPejKRP07CxLcJSWaOarTLWSXAKxl5ql8pdSWxNQq1AQrnJBuIt2rraTLCL0V2JciSvENEUERFQ3iRe8ErWgjpRyyjKYJqpKQI2HmbP23KJNc/Zu8SnwIlmDXwMKcM5alIWNwLOUkUpwurYBciDFOug9WJ6lZIInTaUCa81bKowV3miHW3ktzrw5jqMJbhzWSA5qlHBg18CRhcLOlAlKVnjUlDkjATLPKFJatyMRMNAq2Ma4rrU62bceSZWpYCTH3OVsaf5EwOryDxU6RbFS/pNgQlScVeKBYKeqIy6e7r/bEmRg+VmwbsWMloXsVtdXz7hOSmUgPTAOZ1qOoE1B/RmEmFH2qiqaK4FmhnK6BgUizymkLpFZvMqFQjxzD51oJiSFUchexKlZL/WehPld0uxK5PGME5vRhCVsPT6etMd7/pe8YuJ/vevWXvfcd73zXF332pz74+c88Ow3PkrUi0Lcx02hDT1kY2hWCJhCI+/v7mDNA9Ug04w5BDNCAZUbECVyi02Yrn4BAjjHO5/OzZ8/MmrovFLqUDrS8kpfZMIFAVJVvQdBKiKXEEphpPZ2ol6R8sWZ9MAOpUYq6wtVmpvIKSY2j0KvdsM6RI9bDW+PVd7/x+EO/8KMfe/Xtr7y4XE4+iAyzABhQT63Kr9vGXFlpCTn1FUEKg0kYC7/ScNDmi+q72ogKeOrVKEhGaRfMQrS9ki7fqJVW2USJhNDOvCpd6aVAQFebUEksKv+sZkTlwImkGRExJTS4uZsp2rZZASKprFLHuq97i7QIB8ASV0Lgeb3INb4vFhVJ3hw4xZpLLKxKcHmdQ67ZpQAEhokMphyGeg/RrVVYLRsTxZ5Qs01khOSuMiJiyPQLQjAzTanLkP5ue4o6lAV0V7SNsNBwcS/MSka8SDtZt7UuUpXFU5+od1a9rWB0u3dm0YMtCfMCF2NfiwDgVeVuJVmA1c7TyU+DE2XOrIkWTevT2rhkhRp8UPkdRa3qKhMCK8zMxumIxWrjVDyHxI6csoB8My2sNv+6AmgDJicpZJQk0r19hHT3FJvGlhGUdke6eRKHmptrxkxERBNKNw0dm+izAahaCHXtEwho1mvSIjfbsauaNBs2bMzj2OUjqPLKVBPKrHmg9G4arUQglkycykrV/oRFoBRhq2M3jUuYXTZsZwA4lygGVym7UrGr9ZMSbGQ/JK5x6zXaqb/SqSKuHCe1BbDvvSUs4AmLzDU1TC6W0uqy9SXpXOV9tew220Wg/OxQsPIB85QqJ2yUjZeHlf4AswQTKtOIjClebBMVtaBhGtnIyej+cSTD3CjooPNYGmBUITeAUPMrsNQ0EUg1t67Z2rMlFFw6VKkYmpZGIJc4UE3ZQMRaTRCoT3ctoKdcGWzN08L0cTGcZzx9se4enp+Tn/rU8+Oty9OH5zyd11I8V77cEmrnTBFqb5EP2ZiKIxMhqdrl7j5c5foSJQeXAqHMrCiusltZc4CXy+WNN94wc1TVpSIJXZDNKmcTvzIRuWRGFVStaJG4AAoj1Snt7oOVGdXBLDu3Vqwl7kjsXIEdYYhjfP2SQyqcZBLmIy9f/hXrR3/ktfMTA09YrMJT41S8boOiznKwfReTOJ1PFdoraKmej0zk1UpLxqiQxvo0OrMbdzTokoBJC/MwiUVt0Kwg/KxW/gJmhS1uluA1RKmjHxtjYQ13U02RMD8NdlpMo5mtWHfnc1fs9fAF+8V1Gfro7KHiiYwcVhTGSC75w9jjIpUHdt0c1aUazbRD29MQgEbYGKyBKxGxGgepDY0o1anM2nFe+ydptGaNkVVozN2vZbCunVjbAxSmYxZd7BUKnnu5wSTSxLULK/aooDXRmUT+TONAO1T1SwCRmWHaKXjCQYkIROtrRktMyASZjAxyMVbBUakhYNnk2comuleJwDBzd7ehYqqDA16YH5tg2r63Ppber7pEaf4F3hS1eVn+xt2ze7RQzIXugkLKqffxyY6plsQNxLhUmFCZptsYThCxWtpBwWeVgzNvipvKLxFufnc+lceQRWMzoLJqMxYsTcCUSmJKTVQ1DOUEERFrtRvMrE6GtJK9TENkRhOl0OYlb+1DMXR1M6vDSzmXOFZFys1CKORgqiWkFBrqAArnJ1estWbF0MioPof+odbnU1hUrKSyfmVZCGQya4Jr1tYEEf7SvqJZxPWJsDsE92XX9l+9Rt3DnEvjGqslIkLEX9t4nQxKLaltdvf1ddgXdUHDpK5NO5ZYa9Ylb8PRNeniAuwDomKlmjqGu5MWIf+kCday3FPFdfRgWDIDXdsIsxEZEstVd40SEKvbSPJk4GvnQc4B8zxfzGz4sjGSb8z5kz/6Q8ff+O53ffZTON/ZrBtIwHrURLx0N0gzkJspTWDOWQdcucyKJINh6l1Brlh1StnHUehiBIxjjDFOiSLaWWUaRiomVZRv7DqFfMTKQ/lWVGE+Y3XRkEgiMjV/NWsYnUL4rJeGXpbWAxmvO6x9UybY+3Ql445B+Hr+nF/6Cz//5vOnH/rA+f7Vx3nImpgybIKhHpdN1d3IzQ5dsR+yjm4UB6I+BAzY1wCC1xTriq/EducuHQbEjAVDGKaJ+RzSi6ao4Zpav4+xCl5qu4pAkRhqh1jEQW2zmpVj07wQ6YVe0XqUb+xUMdtOo45/O36w9MCQRUbTFMcy6LKM9UcThT3aGbMTalJjGSObHivNnBTeG9IEMW78QA/sbgUGRkUatba9uXUK5HQBBxzGqH5KghGxVqlVsx1e/WJUOclMuFTZdvWYgA1JtaxExRZV4g5uH8T04eZom2MkdggTZrGdoSLtOjaRuefTVNxmADMQteZVpNshBJAC+pTyRaRY3OR5DAEGoQ+EYr3DxN4PD1hS2jgKYI+5nl8uI4srINmWsovaXrVpIEDuy50Jle1ub1/Z7c6vsk10xAQyYunnnebmqnfY+QTXgGRY6TJjpEXLLZiZ4gRUj2GHf7yGUYqwJjOYvvIUVO++8nDldmgZxv7xKhi70eq6IptQbFs0RAIj1vm9TGUzUmwTdKNygXrk6ZbVG0smYq5Exskq3705B9WCoi3vhNYqBygGoD6Z3Hd/BnTcBwONA1fiYV1C1uvIE6n8720R0CaR2f73hqB/BRaG6DA6qQGsSOTwEYlZ9aW9dtdXun4V4LSyZJUC2+Pt3IJWcQDoAr/KC0izAxuPuy6cCm3N/DLU9MoMZNZIoowreqBwPgOkzbWm6iKFBRkgsYLue9ABTrO0ZY+ZfH7gQqSTGZqsIO7/CZxPX7ev/qq3feoT57MfdlhWBWgCq+G1inTbZGiLpLzjZjCb/x+y/m3Jsiy7DsTGHGvtfY4f9/C4ZGRkVlZmVqKQqCoULgRBCqRIGsWWqDbKpBb7TQ96kOldX6MvkFmbyWTqNumh2SY11WQ3mwSbgIEgCBJAXRNZWZlReY2M8HA/fs7ea42phznX8SgyjCxExsXj+N5rzcuYY4ypXmuNXB36LnaJkER5i22u7nfWQpYls3ps5/WBLkfl6wYrJFlsIAF399QGiVHdzWPsZ4ZaK+BisJzQXE3eQ+ViRlqpLCUXjkTl3vvoYkYIsFGBRlBzv8Os4lT3BhZar6jl6ptv60/+9U69TWfy8COJucKoBEfTmmhblCzMCmldVu9ioCWKraNIzHnQFUEL6wo4kCuxs5KLz9bCfityh4FAhdeRJE5t/S/9SFqKnxRA+avjuHH8rUhRXaGlD+72EMkG8k+o9fhUy3Iczn+4+z93/6g48n8AAvEmDeip9Y0aDnetpJ0MwR0+RD0Dks9ReIy7FD93yNQEeQ+zTyPiQiUErVrCUR5mCAXzHazlgfzGv6HQBTnR0XGyrzbBPEecOaMcvqanbzSaDaTHmBmKW/GgRqZl7wC3FD6AkZSkdkrOuovmJUh5qew4Je+7Fv6Xrkb+j79SVdwdx3ysWSbFd5x/BihFQO+Bekol0ZH/4K9jEGTiy4dAlgMOiQIaDo3zGbivTjLRUwOIAcLg7qNGkeXutBLlV0lrXCMCwB64i8fAQTGVSG1kOFMG+8EiXlGOroAcrMuX1hOp8qSIjxbQEGQXRzM0go75l6/PqQk5PQvEtxF47VCBxTHy4VuCBFV1GvuMJCfAoq7NbzFfvSP474MA4DLl1Yy0H865qZ3IlkGuWkqtBQmXmZH9NCG++xGpl2PUGj6GJ4ISsswFg3PnTiDX0HepRas3pGillld67FNpcrpW+dUMPvztnKWYI/YoBON/hJpXzmICK1HinwyKT8N2GF1MFWDECxu8DrKGBi/fWD6xcRkG/nyqLMLleFFvkhudJqCruVSKkSXGHoGYONR6nzczLKAnpjPpXc19F/mi2i6+uptYJ8ylGWjqq1cutGpwb1br83ffXb948eiDj8vuvBFRvTLrxjtaYIItr7xNt8DOW5QgURjFw55TkWKVZSJRrRPFgmyOMG+LV2ChVkvDxSgcRsILqWLEp2IMI+PBRim1xL+WIQkdJjrQZHAKBbEyCHS01ntraZDp2f+WUjKan/CPrAA0Ok8koycgN7KiyuDWtHT71W9psfuff1rmiVm7xYE2B07r4vyu1ucgs42OLzrjyGAZ1xNvtJMsLlhhTIFAHNZA8OJr9UFUqjBIJqfHbkaLw+kAPOaRd6dcSQsdQXbweE/TJQy8zGg+DOHgsVKOyAgIwAtTBT2ILoir5yO1cdyrfoLTPEv5pEgzS5/TDQz4nCy1njrZ2DfTXzmAisk/MJjbHMVo6K+j1cku0hO8G5VQLSXnxydkIOuPPOxxDEuOUhOHSJOyUWEFpzfu/OjxYn4JGxhllEejeM1gkMIhH2HC4mV5a+vQXNFDvDsAgzjJQcbQacJqUZKenokFIaAH5oQeCTV6n1OFDkSBHyczQVW4CllZYtuWjeJ7ZJjEdTpPpDLPaaxUhH5KZZ6ZLO7MVOugZQXqkbN7vxtz4JVPn7asHoO2GCmWkgGdBFBYDHZaQK210RNQiU8sqGGs5sjlFNlSD0HHqdk79bRyQEwrtyI3sxaUthPhlAkkOwKUIcbCpfy+g3rOkiMGjPY+KsjRe73yI0mGedEy6hoMLB2psYucnf4rgWgJgTRGXQ7v5oB1eWvZ8rYWYusMNjnXjVZ8YLHjEVAZizLv5mMxxPRCYDewljyIQ0o4WugeFLE4ktIYpNEGSj66CUMP1BrpsxGBZ2TP/Dj8JWQpqvV84omueAdQAdHdvBrqaJwLuLTWTq4IKZyHJbxsJliu9UNJ+UtHySFYoDqJ0hhqqbUSwUdgjOEJ9+12O5XplKsC/TOMUZ5FM+My75DzrNSidhCWlerwYsXF2inICrXcHub55q/8+u7nHz16/gyVAz2PPb3E4GIgi2un20B04pTZZt6ciu7gSjQTuiRvMadqObXoPb0mMr2552wkw7eHL3fcpsG0CBF40DwDDoJZcSmWL/h4MRrFZ1hARDUaUbewjk7ekDsTmryXk5XpAL6Yj/8/ahzjT9EA2URbVju72L/1Fj/42Xk/slSBwsgi2QflsCw49Om1Mtgw6b8IwCw+hmdQy048/nZ8d0kPgQaiHudZRXkZHN7MF+pYsFQqePXjOzv1Ma7Bo4tTaGbBDE987K7Cin/CgQJMpWabTsq9jcVDae3Zo5aKXBZHkJEFT/V+HIw7TGEUzNGNh1Z6DE1BWkziNdpbjPtp+WjGYx5T1PiiPUAqd7MgY/jdBwvD7fwV2iiTT3jUqzxLSxoTDV5ZKBXlB1SirOOjWliGWFbIiVY4kJacAnoyUcxgdJPU1SP/BSLcpaaQKqAPZ5b4p+QqYPCAnEm4oaHancY37rxGAKjG4jwBpPG/U9SyPvxWhp9gRlLWYiU2EqzHBa3HbBXmd7Dk+N8TSMiUG5MsU6n99IpPRRgAoRaSVO/uMmNhxQCYENPG0/5UOOA9lTTIN2VovZ+kj/EBEiIDSpTFU8DmicNLgnlXc3Xkkr7Aee+Agqxb8qcAQLfiZgbRw2YsC0dzpp1blhVRI3d1ue60BlE9hk4np3Y2nncWMz5wJgt31KgOPdnxA5QCBLoFO9cm2BRALECW2D8a1TKj3nfEsuUogaOMNtJY7FV9fPaEJoe8h9TYYw2qinuk4VGVmHkm5ixYFHr16JGVZnBqgjtho5RFpKtaR10f6EeWo3SguQBsNxtz99xPbJzH8ObV6t+HGihoFHfVed4zi82+hSxReMNxCgfZjKbzl3s2NfTocTVqF5Lp6+SRedwQzpqIdq01waIhgKBSyvXV9fF4rLVG1xiaTN599lG4mivlfqqkixSp3qxDDusNZFODTZ37h0+W978xP/1ky1oRTHtvnqtGoShQQuhgxU5lUGaT9XDU2rt7c3d3OpqL5ER2aTFU2BYFtDLVUxUeQuq8FDnKNam1XEGKMSDJOz16pOAohH1Q4g5A+NqgEyrstMXj/qmRLdzL/cS7wKDk9Cal167B7IQkjHnowMZOw9OCY/XqjlareX/5/lvt4493z154TXP8gJKJsF/27JmQagSckBmLoSlKrcEsIlNPF8dv/EpOy5e2yn1tDQOIq7VGUYgg0ovootM6pgaAhaWa1eGvmoPwE7h9OucDxoo7FYBztEtJPJR6CxIWT0NQJWmRa1vjHGJgCYmiRykxAvZg4TqGP1HC9cwxV/CCGRWHPNr9GLvK5YoR0OgYT58/uc+e/xE8bSKQALgszM7ymeYkqLcecyWc+L3jSxrZiQZvtFWtw1e1QCsMp/0KlhwiS3PA3nsfHyOGeRkks3PNVbRRKAIxNMmqEqNM70LcLVpxKcBgy43Lv9SMIlhPo4SwOyQaAOLYwwhaED9BoJghiUCn7zZasm6Q+Zowrjm8TqXWKXpGTzVdzlfcskmqyDFuj9/1tALt5lFzxAWzruPxaHlobXAaWIwZVP2Oq+N3+T1jv5KZ7KBVM7hY2YAmmZVwnWuutfXTvTAlH7KEGkwwiIMCctIow+/QofjvUAcEoYGenNnojpjZEtk3+gCUDdE0DSe2WJTQPFui+OJ3o8bECHygp/GbDhvtaBQwMd5QlMsr0Ib6E6nsROimgxmUI+HcyJoVdOwxgImM/brRzdQSxCZVCEQlCgsFmSd704yK2ORexUks0XBn98/42nI1l1iE0i0XuvcQwMFbaPVKjWbLXOHDV40VheDtcWk0N1pHZZ4IBTLqCoU6S8BjsccG4TJRjBi/sS3VhSY0OcFCNnSDQ50ArZLcbjcFRiuOEpuiCN/OU0krN2ve4aAVEVILdKc4N5wVnqWVAOghlC8ArLLUqiBIyPPQexbVEQfiDlYEIa+H1QLQETSMMABE68BWk0PXN8+Xb/1ancvZzz/Fbra1FeuzhdmhYDnDNcDg6j0G72Q9KY5QrNaS+cZBqzLrFgtBKbKbhRI4R5y04CvBnDRWJBvVIWBi+Ld50ZhAF4NZKaVWOjoJd6xNvYezXcsI1wR1uhcXvRegqhcpV9n2sNwyhWkHLAcCwwsuGPItbcwYSAcgs2KsAhqm2NJWnb7fl9fevX790b1PPgKsaakGqAQ2SxQqafrI9tFG1vBgmbMQLhqGGmbQjWO8Cjk6wu8dp/UWojnQ27oGs9m6o3tHbAFToYzKEefQxMeLi7GPw0uGSwe8ACG2CShb7k52ububVFwNArS4KD8v07bWCpdls1nAWsjK5pIJpBu6urlqYOkpkwmlrw3YBmbWtKYnLSEGeWdsihzwGMDELGGnxjm0SQKNkRSdMemLIJWLSTxGJDmgyaaUA7HN8oqgYM09cSv5hDpxgrw6K4o7rLCW+INA1OMJ7Fo0/+pewjLFPSEJjZFkVw5K8vOxQ52IuiFqi+qFzmpUbBaBGMOnICRmjjIoV3naK51voHlJ+48JRRBLBcKqsRAmtOMqu+vgYWZWHAWq1YG09I9wySasCtFFwFj5yAtQ3UC0OBVjXujqMLDSXZQIlVAns/SJyu2FicwB7uiLtVEd5Nw36hqzohwWeIDZLWApmMGK0RwzOLNGSSBvmWuTWkCHm5NBpGNFOXFlQJjJiXRGEcJquAQi1Kloim3sxGGAVGADetBzLRQyaWNzUlpIidh1ScHdgmWHonZ2NkstyNQ9ZiPmQoc7eu9MlZk7ZHQWBypeoWcgkZZ8R6diMfHxU0k/GnDLeXhIavxsLlpE4givqISrepMCeVXLpdAyC0qNBQDho3pAOMtHMx1CuxqAj+Rk9ewPaTT3nlWHJ8k//lhzR2wO8qjjknrW4BKiJ3T1qU69Bzmj1c3G3a15Mv7h8bdqPi01WUgu89YjRz8l34raqtbl6oWFjHVj3dLIJiuxnZcFavDY/CVDg+CoaURDKW3bLOe+QiqFR6cbIcpAUt3NrPeWAq3/CFuLYaRBQoEA2tLXqVrZ7r66uvnm4yfnH/5sffLgZpoLcCy9wM2LvHoW+4HwmlzdfbyohEsxdD5Jo5fIgvA3yNou6rN0aBrQlvkg8Q9HGLTenKR5qSU8nKPH9xAk1Jy3RRCyvMgkKY9yI3wJ0GMMn4RdFNbelSJjd1gOxySRNU5w4qNRent4Lyi09IUALV6iQ6ujFNc33/Uf/OjsV6/7vG2I7dRGq4LPxiZZMUqAqcX6LA/ERPIaW8CBAOdpBViC2TvVWdLaWjSUaq1W0qp6kwOMQ1yFPg6CmSMbpqiM78BkeQCGSYO9GxhmxgKQA1mvLKH6gAGu7jJDL5wEmS9q1tEMZgjHkFNLShAdvTenGa2kpcZdvJjq1NUHuORAGAYF/p0XuPdO2tJaLfTeY96czN0TBJKNi3JSHOBTfFdkZCPBa63BcIluJii6+V29EtYi0DBH0IZsu4YmCx5AFoAaVmsKXwGfphpenh1uhU2dhkBGi7G15l0Y+GLabwmAQuuYuux4/uaCh/lJGyvuw7sRJ9Myd5i5Yej6elQCgYj6iVhC6xgrPQYKCIOhBDOGNJfB0Hqa3gTRJtcwxbugj9MTkSpHkfJQ2L5ikZWXTnJh+BRE03N39MLJ09B7C7soJkMqKWVmhkQNzMGKkg03ks0aX66HO45bM4eFVRmbd8aqXIMD3U4oL0yp3HMfpYIppMSBP1XGBtheaK47Hvmo9HJB+KAdjxl+4jhuA9My2FRLhBG4jBm0pfxOb25uaTWaGJJ29+ZdNrzBUz0/xpc+SsSgEpjTnESFFaVCl2CF01ArK1HdC1BN1bwKFVYJok7LAsF6pbA612aUT3TSC3olLwp30gRN4AxUWjVMQHVU+QTMjhle4DRM8GooZLX4MEFeTHog4bVY0ja6NAayZiWQ8iEkTs52HGiptbYuBlvX3rsIkrUtq/dx2k80BM+hDtLZihj8REVPlfLLfMbhFYMYqbLG52y9hSjQodVCsGYqIT5LXK6ZRGZZe/cNp0Ynv34KZLwOUngseQ0SL0sCJyeU5RXskRYueDArEWZV5rNfoPD2ePHsc5sp5xkcQTIpciQN3YZlRCmFg+qlQfkBLMSaHd3hTQ0GFgoo29l5+gwcIE7iYAPM9lMqyRA7IB4EAwVmidj7HTs6xxvh3+Qh6Mw7YineS64EzcO7G6fNV6SVlNjnBzKDlaRX9oCEVlenq0CuRghu6huRh+P+nXf2aJtPPprqFp5ynA7LgjabEofLwMpQnwORfcfoBwCthnJPEmDLsrSWsTh6tiEMpEDJWnSsY1Al04nEy9gbbScidJyPYWgUegfFxYm6KPyh3MBSTo1rPG9mVwfIEDBrz1yHmB+3mIbB3dDHYCV4R6GfJgYnKztZOwHycRXi7cgVRVydavIYgj0ZzU2ODTA0r2M8kNsdShJkYgn1nX7cwhQlml9ZLmyuEfHiyaekJNLCIHtIg/0NuS9NAKTO4XPXlbC67p33TAABAABJREFUyeBeS01oEeiu0+YPwYPy6WPynqzBcE2JKtAgV1OzofgirPWWfv/xOiwATyIEbg6Mlbd3VKZYCeqnaU609YHIjsFX5nzYCQ43i+Y4Qdk4uVkfj3l03FRIQmqhHDmXs2iMQqx1x9cc/0cRsnJkUUpcNw/ClGcVZchGwcNJ21hBc7XwPQilDBg01UjYjuEUGA/BB8I+8laqfU4fJ54M4BzbMFEkVwDTBoWhr5AatMF9Hz9i9O+BM5shNo9DssE/OlGd40mZxbwVPSStQcW6e5ocY22esHCTirxG5s7/Nz5/T3y8RDMYLoFxdOXFjGBBzn0pD6/i6mIzimwLajuzPodtPW27Sp2Uqyl0ReWinsMLvDoKbHbMjiJM7hNQ3OleQigcjC6O0j8GlINiagCz0z8R/puC/RRh22NpeySO+O7NlKGhnoipMIuVMTHuiH/AB6c5n2psbKfHtY+DGyVNGIMZYxqVlKIEPj228bgbi4NSeIoVY4Wxqcg5pCkI71WzKJ4BOMONpYTZcosQD0vhrAOW5o7IMfjgoeUrB+FNUsDUBpB9WfjoweH1Jxcf/eLBeiB97UDdVIi9UT5FMpFXo8GtB+IYBUHWyNGy1MJqNXX9YMhyU09wRzKND5JR8jTE9qja3b1L3Zd1OTEdFCQ7JET1SoV4+r6cRpPBVVEKCA8z7OguKPUYGQcmllrHeJtSC8DiTioYSlNsSi1AgaHDXVWwFrbJ1NKX893y7ltnH300tVsvEwOKeWU04CGId5h5GHqfiBvrsm7PthfnF8FNE6RuloVaPCCv1eAtavKmaAuY9g2JJHOwQ06olaXkFH6X9oAYcfldwI5jHMuFLGwzuhpcva+9t6jwJiuz8p7H8GVqQMuy1+zuWMGy9YnE6e4nIRaSGgNgyGlgcoTP0e5sG/ErAnGYKRrJ0xrTLCF99K9KiYdlTYZIyX5HBmm9pbjJ4YaejYePXX1YDY3WzJuhGVZDg69IOftoelQCfRpnJphULJCae89X0FMeIqBJS2uO0fsCiK2pp7ibZlAKdn+8lHgyuUIt4MtEfm1wl+9Ao1BcvUoU9Oy/8idJsn/lPXvKxog0blQpJf4hndoxZN7yhJbjK4s46TgMJ56nIcuAnpZlxN0kWin6jAYl358Ze2sxc+Ev8Z/k8JIq0/A/sQa5OYPEEHlTHkh79eiR3Rwl06xbGgeNWsqh/A+i0JnkJyW8pWjeBC8swQ8BgNMGKwS0Zga6ldMSkYgJsTO11FJC1RZ75+C11M08YwCsyaoIPNhVxoEQFGo2gkFoTRlpSZKd0mYuaeuO3ETtAqux2EyrtGKsxuqsQ5RcDUH7qDFaktOpqSwAd8XmeRW1LTSrosyK5QCuGDbEtD/cwCZ4Ma9ABWe36h7dMGETQLMJPrtK0jjc5Bbcrt5kRrU7iUshC2ses2E9lnl0/DzbyXhD7lIDTV2J3SSAAjc0806Yo0a2HU1U3lQAoZUkfUCyGI8mNrLUwlQzWwEKvJSoypmV7JF+oHfCaT7wF8jNne4cZlIZlizrB6TM3lO3VIKI4sPMD4OmMTDrOIpk9FuxzM6gdTkevvv+8cXV5U8+ZLVSWc0hdhrNWmuBMEsqxlqqayzmiQuYpwWAGyko7GFb6wTXtY38EPMgAMx8G92vBt6V8JQDKbNMjMs95rxu6G5Ip0tYmmAE4jDKykF48YQKMv0AaR4UJyRyuY2QYoMb1YFm3oEmrLBuENCBCpZIvSiLOWhs6/6db+Hmevf5J17nLgUMkEZflgSLmHY5IHhXbsYslfub/fX1dWFp6vGRs7EtJbCspCONSGqWjI9gyLfgtNiJ9RqleJO9mo4By+XSKByNX+rsA7jkmAtI/U6tkk0QAHTCjd19NQDmpAC31CgasjoueezdXadO1OMo+shq8bUjU+a06fRjEMYC2hpdmUtSU5jGIQU08bgGipx+H0F5FYDworBsEhwpRdIo/uJXGX/CQckjYLkh7OaSnoqAX+Sa5tmD0GmcpikZcoRMy7ootqRLk7Hgzi1krvX0arp7k4cgZFVT77WUWqimWkstxWMXuFRLiRtC74iFuaBBQYUWkv0fN86DcSM0YdgVyxgSTPco9zNuhJUOE883t9QXJcJnDipxj0CnMhFHfo4qdLRyMd62kSGgJC/GENoAgpbk3m6mk+VT1GdMetM4JEpAIOeaQY6Q5A2g4ozFOzUUgf10xH2ElLtPW4eTYkEG6yDhFGNlhTsdc62baSogZGSxk/zBOch5jsj7iLxWgbxBykrQBe9S6IxbckuMLD0MDWmpix5dSny/NioeAnQv8HgF0WPX6FTCKMVPAKNZa8oiAGVshqUDYZQImvKxRtRzQ5GqqX54o93Z/fvz/NGzr7739rlaud7va5l9haE2WDPWcjGKxrv6K24yfMq7DJh3VzMr5j0AIKATReylUJZlHGFdnQVghSkD3OiX481xgLrLusTgMk6Zx4wkYgQZTvcNoEQwaxsaTvy2DF4K+RNhgSNFcw0MwCXxH+dQEA5TuySCUorGnj3+XUTWSvTPvYY3hSc8Fv2QacwnLNWiclWWLD1HqMPoGGGCEZKxNrRtnVeHW6td1+T867/64E9/8OitN764fDgdW7OpU0GjKyxG9tazv20Jg8fljqcSLaSojIOBqeaTjCdyasZdHuM2DHcMOmBSqIXINBHDGPcF5uUJvseBHtuZzFyxXt4w7G9ZC4Ae0ddidCpIpZRsTHND+11KiCBVZBabClGaVphE0GqYk8hAQRSs8HDbHr12vLjcffJ0fvu9hQiWR+QNs2AhJJjFkogus3uDI6SUoyaoamszKz5Qi9a81hLkBotP6d2TVSfmTTHFEH1QvU5kzxE7/QRnMbfIYWwFcuSoLjFsASVG3VYCJEixObIVCgFyZQRNuGXjMPKhjbsV/zQllUjYLrIiix7L6TvK4fbI8QeQHUNCIXls7no9hbx2QJw+dEoeoLK8D7whhn8hObbc95dSxlDT2i/1iFGt0ZkcxdQYj4vttKJkPTptdKhIsVP8hWA1h0L1jiyTPWu8ltir0eUKTYFa8FEJj/DtSDdvi1PEwLZOypyMKQTaqKDs7kbF0kjPTzP6/4ycUdRGjOlqduoccHoj8UfjMuYDHzS+0+/lDYhHaAMSVFRPngXfkHxHiRXT6gDY4zHE+hAPBkkBXRC6mTX0imKgS909JHqEWn6CAVkxx3hhzjG4+wNxiVl3hmnQzL2HG05Xr3UCumRqWpYFjEoxuvqo5CPsjMY3s+FprYADNTkuaJEGQiDemxui6kHsGpN64fD/ufO4OZU01ix5S5ZFC8y9evR+OSQfOxU8hawcGTq+V4uRj8hcAcbYdyI0+nHtux+9nJ+h2vP1V588+vZ7lxvTF1e3V+0+Uc/n44ZAr+pVbkSxSGEhsYCMQTpshHWtgBzdUQskXw3uaC7Igv9sUKwBeNUK0GGgmcyLU8PXOE51utmaFVqptZC3xwMHHjKUA2aOMoqsgC+Rt9Hy4UVoM4MPSAwAvKs7UFE9p0dCDB4Jd/VK9TzvhayCoOa55OVu0KVI96bghKUt5QBOpLCy7a2bWeutsngsonnlJOUrNDNjkwi6ukzrsnZOgGop2+Ph+t33+fOnlz/96fPf+73lYHVasUAlWC3uXUaTtD+sJEt4BbgioJwcLRhiFfUAoSGlBp6wnF9gKGI9M0NWbXl8Y9E01FlZSvHWo1c+EXGipSVozijXOxCfzchCE3K0XMjgOtgw/fB0oPHY+RS2OBEjC82llUa4FxDm8mLRb0UUUOnNAaM1eJFvbLp99/3thz/ePf/iePkaW5tgi7NbSzMxG2tqMjhZlyOyUXzdZKeBrlpq62KhuQtWS1CTIhpnfurqIUmqkMPEUAAiYIEMPyN4IHaaumeNNHLoXVxOmJM5XowkwJJaaka9lpv7EBZpco7QbkkAzndShlNUhFofVSiNUv4xG4simJGvdM/ZfCmlt15LGRauYwpzRwvKY5P4bDCa3KdSe4hIkVdTDrgInvwFQsvgwUMKfeIQwmVIVExQE4YxFpq5YmENeusW5FND6y0edCygqLWuXXKZsXsv8VV8VMo4BQlIndmzRoT1GE1K6q6SBetYhHBHGcunMVoEV4jiB7Q15rFJZ0RiOaNyQotRRVSzuPM+yjHBL0ey+LiUB21KUDGau7JzyW4ibFyB5HDwVAeYRwwAzLx3S2V/D2pSYZnnqS3LgKhz2tDkhNUSRHo5UMw2pcq9BYErQQUAQUdwomaEjLDDqpQ42wkRCTp3cOLhqvMkeRMAI6sNLRnjkIaPt1U/VS6eXyjIavnvYDiUDy8OMd9LoPMulVK658ruVObYaIXiPhIOL+KJkRc/c3hV/mNkTrrjb4cenZGDI3bCCdQgEsVg3mEF5t6EejM9+dSneT6+xX5Y/ME8f3lV/vCza+L+/ma52re6vXj3wfzdx3XmcsudKQqHmF5puPqrsJVKtEW+yBuwoMB7pS2G4mhR06o3WonihxZsSwMEGSELPMTC+NzCaWXtzTtKcs3Um8rJRVsWVJzYbVEBxsajDFGR2c1PFr7RjkolAu442KejHFS3QIPI0tdGkzGldgJadCUNxhrjQ4xJPMla6u3hINJibusWFW8PYh9QKtV7PXkqRYWBqMkiHrsneaw4vcob5KwACmsnSu8w3vzO7+7+9E9e+8sPvnrv2zocrFYzb61VFiPVZVGzY8wYs60GT88hOPlj4wcLmvd0VB+dbizwTmw5z7rDrBT6aSErXK2tcAwjheAORXdg6b0VGsrsDTgq8dDrB1gUiuPeesyfJIXdR2+dpBVE2USzUtjUikzeomO30W/BRaux3Aq0CVZ6b7W2dji+9+7mB392+cEn17/7WMAtZdIEhIOtBZWaDPMQdcHM3VKk7hELrPfuVjy6GHkPhqc3K4bYRwskRRl0gZWtrwYSBZ41uDPcixN6zLeREIIHJgtGoYMTITkqA7j1CKjyeZra2uC5Kps52XUAxczMOnq4BUAZfErQW3KDOKR0kQsykbsGxQzTPC3Lks+zaWDdbixyL6WcRgk5SHNIni4qo7XycbBjYNG9AynELIwylV3KDji8Azg6aEjR7AKepOGY6ytiadAtJHU3gxuLneaftNabkTFLNIIsXT3hmRjQkN6jj0BXNuVRUrs7zRLkiVYQgHthabmfkD2brzAzHwbTScIyc6d3WXoqZ5AJjGfU+FFq5/AVTlBpCmagCSgs0TRGPiswT4JaHIYsTOpU16UJThR5S2mAEbAU6wEG61KpZbAOQjBiYYcTZXpTo5VSKxzr2oIkUQvllnSBBOfQkGYRhdbkh3W1mNR63mIZ4GGqUlDMWzeGyRNrmVrvmXpZUoMOIJluBnRrvTeUQgddOZwLCQSGitLu6sdXaAXo8JptaqInwZlNHaOEBllgAumRHkOqxhK8EJOnaX98DRM6Q7KdYSB6+4qcS8bHIDyMCWNnWY1D415oNU06rS7E5GSRNazErb356e38iy9e+u3LptL89o1LfvVs+bOnV9xePpjK5aNHr7+1/fpZ++Dz9tlh91feuni03e9lrOEs0DZO9YqODqxyyY3VZHRzs947jYLB1/AGNw+lbFg3NplHKavQ5HmVy4rTraK4+rytt8djteoytyZ4H/ovRKhDBEjRES2UA2ZsHt2uADdyqiVMM8zgaAP0PwH9mWByIjtKzNW7KiOJRg0uRFcKkCUGZgyGQWxRaeu6WK0WSCM4aHoOWLXQX5g8uNAOd7OShobeI4lXFqmjd5AurQ6iehLF4E1rZV2v8PDhfntx/4c/Or725rOz7W5ZG0tjdXS4G00+wY6wUtNqFZvNfFyXiEi1mjoMpp48XjNWY+zpjIAhV1oHNofBaRBI856rSEJggPg2BWarPdCx7vHXW5i/hF2ilBUSiik4adFfK0IjLYoht/hfmCoTdwTcfHX1RdVitWLu4pDyw7rlmok4Jt3NjQZv62rnl4e33z7/xcfbq/dvdjObVSav0mC9hYeURqnuF+fnt7eH1lrQcJhjtQBz3YzeRSvzVFtvGUGyHSTMzD06dRsdTbYDlQphaNiUxtPKERgVoqVYXegWYt9oMAUICrcwAYSta/MwawvC2gjNHl0Zzax406ayzPX29oBS2phHjAQQDmCg0YXw1KdAcj0sMXte1w7aRHpTgupRE8STTxgsYLgBh1oxi9zpnq6c6YhoZFRqAcGiRHfTY5Sm6LERXSMNuas06AvhhhLgrcbIgzDnqeFM48zCCvraOoIELkcMvoSpFg+g0uVWpAazs3mj1iJn95BmWdgtmpsJnaDMltbCi0WuSvbezWwmJRgtTkGhh71aN5bUCiXyYCwhm4kCYpTukDcrtcndUcdxaYCIQjur88u20MNxg2ZxfgJLojmXpTkctO5LZfHuwWCfptrRpSiGfJ5ntWaJ75gnncl6a5gYxIhaixm6mlG99wJrvaWNjINpSmL0LjcYc3gW/mvZAQNk74oXSpd1gtXGgpulr2R1dfMCQezBUK4QfFI5bHXmU4OziShrIbxXQzeUo2mDeeZ0XFafIMyQ3I4FxcCgTyI93c1dRFGiQYhNBojd2WJDh5kTpqi3wm7A583m9nAI1FBSiX3AQ2cND0i0uFCTezUWRiUtK8Kh0a26FfciK3QSxpV169Zmb6vq+TM9+PjZ+sXN14CXzbnRuPTHF/UX16uVbV25yJ/9Yn92f/urb799dvbVJ4v/0Uv93pOL1+bDzbpfeWYsqwNNFGqz3g2LYZldMAS+uCCDf/BKJV/GdfViJWZMQRiV2gDfw/2rG9FaMzPLPccnkDauaJRuqkCagCCbPE8Gp2qlO3rvrUWlzuguaeYxrxE6dbIcjxB0qhmjwo3HGyhgCC9GyUpl1rTAsqLpPdEphQ4VEOodtCYEow+5Ot5pRa6SJUB2cLEgKIjACXzEbwy70Bo18XF98Zu/dvFPPr33lx9c/fZfPS6Lmdg6po3Y2AzWSULeEDNXb62FSBcurR1Ea63UE60x6o+sRgmMRbAxWnGKgnfALHxVXCxyVI5qM0AHy5ofhTY2wmLQTWupcg/BSbymAekoKA6GaMtOcgGvIoyy5OdEY94MSnjfJUUdEzRKWQ8oIsjEYzQDLLe3776zfPiXF19+evur35lwPDSfK9gZ1uiMgwQ0CcDV1ctsUFy0cIofEmSz4Zij1ls0lIIPg+E4bD6cXk/gG/NkWJLLxvKkQAjCNq+UFIAN6l5CwckrG3U9MECJ6OEsNn+kPtjC4GOuU2dzmNqgbTIWwXJ8gfiZ0QiKY3jfII/aJjS18NbbVGuTEv0yjzsbE+6xHA1uSAa7nSZomXzN4BmGo5OPKaXTT7YbA2sYU4+wCjoNZwN/jxs5ZNBJcQg1QsCqw+tCKaQxnCifcatj9VeAtNElL63VYG2ShPUwJLdTKIkBRCrQetTjClgmB8kun+c5Z0yhb5GH5CKq1/iuDQgPJ3cEilBY4Fhbq7XGgY3vg4lJWOuawVDSAGDEqIDkTsQFMgVy7qT1wD3boKmG6UmLUmb04EO0XWqQhNyBde2Dm2A0imaxFXvYNcQko8ELOZ4JlEBRBJCaNyG70upCqoKiwrLJBXLqshno3onSa4PXGav6enu4vdlv759v5wlq07LWmV0+N9rUBeL6sNRpUidM3SrMSiwy5wZw804WGntvgFwCa+Ll5oKpd4Q5TBKrQ16f05PjcozDQ+NUKPXKqgEY2kgVsBCdupHVpWKT5LQKD2+fCsyGAhRiDg4dWNRmtus6v/Wz6+1ffHklWK2P6gxb1tv9YVsvNyjP90fvXGnres5z3Zp+8MXHb7338OGyfq3DH+3b//xR3RpRjUY1oZtLWr30qqP8FjhWLBWQqwKLDf6BmYZpcYOhAEjdfLQHCtYnQTNJnUZpaCWjyD2xJu5+WLP/6BfH2ovTdDVy5th/lgYFKObm1Rl8iqF48uSHIAnSyn47G8OEIwe6jBNzJlEPDEw7YblwbqFZQw7F6JZ+pDbAqDEgGpyWvKwMN6lg1LhDaUagwrYuvLj/5fvvvfmDHzx6650vn7xWrq6xnZpYxQbQOmQ9XI0s7QYKS3BTDFBXISM4BTA1TTWoLDEYG+0EYCaJVHQ4xjAGTkcOgA65bDR/iVYN8DaDoku9t1duZkxcUIzmahkbskHz6JhDduOe/3RAfGlpMFidARhWqCM0+QHzNl+SIqAe9V9rx/7ocXvy+OyDH89vv3dTrajB0b1V1nVdYCylrq25j49JIh2ycz/8uMU0MyP9FUnUEIJEAE2uRgwgEBMQxUsIctigqJ3ISwmYeGDZuDtJ+SOO4p15x+mwA4VV/kqOMdBq7721xR0mimFzgWInaeO4Haf/cKze5zr11g2oha23ELKG9YfFLvEQ2bt5Vxos4LQIaWgOEBh6WFTEOBWl0jsincddHksP4qDc/c28Pkjng/i+7z4pNML5XXl695SyHFSgCwaDY57rsiwhP19aI2ya5h5sAVoHpNZIuas1jTp+wJYcnKwsTJGAQQLMcSd7oL3yJoX/JCmgpI4W3eFEydIjUhIskOE4n5KSNHXyInGH+VhtMSq1RIJtnC8vZI+5iZAzLbjDVogGOgtM5s1VBrwXQSaQkhCuG6wEJ9TdwkA3tMmSeiPTY0xmbqDKOISubOQ5TgB7PPjRM8dljW/WQcMUVGpHF+YyqWDX2vXViz2mzeeHJzT84jDbYfu93fNH8/6iujSJFVadXLFwM3c1w2zO4sdq22aeCmBAwavJtrwazdWjQm69mWF7drEsB6DCh196sGiBecy/PdWPlDC+g4IR/uVutAoLWDB8LCvgwMbdZBUo5jNtxhAIOWoAr9vNN354fPgXz17M9eFKtvW4W7mrbWV568m9l37QcYHtTFvV2c8X1Obz9PHh6zff3rV9v5n1E86/9XhX1KxKnbVTDb6qrQ0LsYXfEitwoB8qUQyMUglYgHqyppIaAFppfQVA1tZaEERjDndXKhu62ol0PiIWxh+I8HFH58Johl29h3YlBzwex7Q62tiGGyTGLG7yr+cpHHnVE/g3nDgQNLwiLB9+C4ZE5mKQZjGU88KgNRps+B7EDFs9SsuALhxh/Zb5O1igARURFs2ZBYnapsrWDkv77vdufv7RvX//b/Z/73+5lDnUL2wgvbit0bUPBT6GPD63tQ2BUvxOqTX8TwqsJbU92ZcchZMBafIX9mdmDFKrDQgHGiqGqB6ioIHBs55IWooH24lZGGTSDZq00dy9nPC6eA2hGI0KlMECjNccAS3CQ8b0zPBKFofLWxMB3+L52+9u/vhfnn326f6b7xY/HHop4LIsU60OX9cVjNQICRF6gijnIytEtmtSPEEz1lJq5XFZ8UryGKPRPKhB5k8SswOGudS1te4jGCKyq534QKdRCCPh+Z2GzKKviYIAopPGrgYP5+ogdtraOkOTZRaEm+bhLsi4Cz4aaslpKLWsba2F6motKEQhTTWRS2u5Kmtkv2DJamiiQnEYzyjKbSi5pcjohSF9hiX44ULYn4amNK4pzGC5wCmyXraZ+VhSMwoE+pTFtgNZp8llCcm7C+od4aoROD+89R5/mmMxNJLM53CEqVX6f79CnFBTMO0s6JlBrHSEOKi1Nu7TGAp4G3BdQVKFTgyPmCnkeY661TwOeWz3VYznOnLwXczieMgSROT45km21lhrVww+It0FBOYNQLLIkoOcUAAS6EtoMKDbfAfp5OdBJA50I2Y0FpI6DYqCG6agUCBminID6TXCu51GtvmPFjNrvdc6OevN1fVXz37y9Oc/myGevfF0/w6fPLi4nAn82xeXZ7ad69l7l/3JfNUOQi3EOZoVbMHFjcB5G9yyCBNmNtavxaeWcQ6CNllBLcvRE1sMNnHAEsaAvnjilgOOOVCrmLQFZdqjDEZV3jqStYm0Kq9AJQjMskm2ldM0G8yqYeMo7QfH8qPn1/MlxT7JsNbrXg8ibK5L+cWzK/gGZaMZ3DbOtc2wCzn5eWuvv31vbfuvJqyPZuooOrmxo7CiLLUdG1agGibHMSWgXM7Q3XLzW4Ml0BO0YNfavMOdLGSFqUs1NGTDb8gDGk1hSN6wcayj/DcXNdBaWSbp4nmIYswfOENGg6nMslXdw9jPwJ4HbtAu46cBrwknJ9BRiEc6GFHkFD0R1D4i7X6yGEsx3oDoToHhJHHOWRAspH6J30mijVbl1CnJCe822dRbLxe3v/Gb87/6lw8++Mln739H17eFrRUyqzubnW2oLXI8xzLgFPqQxDgQ5veJLCFRiFEqycxOvSSA4giZhxeaW52qmS3L6iFPikZIDniJL95WwMASyV4Rbu/w3hNU+0pxdUpkPjSfQgEZ+8tDsxLJxHIn2qig/BW7n6xsLMwaD4f+5rt992fbn3+4+davCKrcSq3Ewu6c08bn91P7Q0PrnqBFzjL95J05iFdACgnB0c354JAjJDKGYiWqMUm9NQwWvSwgDg+630mqhMzjltS8ccizWjyRz8wAL3Vy9dYCTWDvKjVmY/FCU3h1ekqFVZkqzNExZuCty2i1FrU0nFrkrDUSfg6YgUou6lFVWR4hD2i0dM8awRiEJuR6GMtv2UyGHtkPIQEMmOeV/t6j3MtrGQciOfhjUBxXKzU+QyznyVvOJxoejFE2Rzo2D9YKMCzXLchx49CMri52wQY+G9i7LLBNY6DpGEscWJO8Rhse3GFXknK2IDfnYjGD9d5LLXDEDGhp63azQevNe++5mLlY6d6VBrun1mPcEjeUGGW73MnYKaLcMnBqcj0WOolmsTw01UB5ueKxp59r3jpP/kzPxp3Bu7ewNAmqD5m4rxW5txZNBSUniOj1YV1Wai0tXxRYejeH17KB9OzTZ8fleW/13ffen1/7tQ++5uXhuLx8eXi5nx/eO7vctGWDpfzgZd2L7+32y+Kx0I68lGZphRUrFrQCeQcC919cACfXajbF0EXZEkiOOoQzbidqOsxRhSABKY4EgMImTYR7H1eNLLGvPdSMRoCGAtsAxTEBBpvpW/hsRtsaqvkEsNw7m+vXzS+4nBU6m9bpsDjKgrnu1TgtbUcSW2IrbRu2hoveqrjZtHN83Q/zm/NhuX2x88cXZW1eCRzcF+hg01r8Fqrme0eFBQMDwH6LGPOlIWgAwFGnFKCDRWrL0hXVthxmyUGH885R0gdMNdrEcYbG2NGBnDoG5prs8LEfNKx44ULrjQkGCU63Mfu1hLBsOK4IZFWXqyWubfQmd4Hh4pSbMTwhVRfQm9JBIria2Xm7u4c5S1tXRgMYjaQ7zDpOALdlgXsXi5Iu6w4avTTIrBTsr27efnfzzqf3/uIvzt948vX5Bfd7n+cu0Es1V3fOtbdkZrnC3sPglMkK3fNbUe9hbH5ngpHZ1taoYE023CXdLIyOGPVRlw35acAE0UW4o9SoTmvc7R69QLyiV7Itx/B+OKeEniQitgx0i41yGJhHFCsG8+iVAunohuBme3L1Q4vtZuaC+truXVx/453HP/5J/fKz/aN75aaRPm+2t4dbg5U6rW2tqJF5AOSI9xWVVDbIdprBph0jXukmkXWb7jJKJLpcpgUAvXcfjQjTjjHMKLzWEBnHvG7wZE/QfQI/afYCWPfOUjIxMr8Ia7HezbwAlnJnRMfYkI43GaYtaxfWqsNtDTBgaZVjRFtrSrHlm+1ksONykBrDrjzTQiqekDPHeKvx8QPVDOophrQ+3KfcDB1uv3Qc7k5FcMpeKWsSJgHvtK1DWzNkan76xpCD6VyHDADBTY4+OD5bXKukZZxAhvyuDOmWgzFWyKw2fu427j/uasGMM5YbK6Qx2Q29aJShnplSwUoZbKlTkwxZMEVQnQ25XIE5tkd39NZpNk+b43IAbFmWUkrUCXKPwiEGvAIUg6SohNwtiHJGo63LitPSz7sfioGFvMO9GB1eSuk97BMyW0eXQysOGKu7hRey3EIrb9oUmItRP4fHZZ13V8+vXlwfHj/6rnSr7fynH52tm81ZWeujh02HdkC/7mcXm1lnB7cf7S+49Lfuv1SvjnlVK5wnr603VdEL4AYJCyDX4oPEBjR4R2q7F8Dm+QzrCnSNsxP6FodQS1iZ4uQ40EWP/iuORUB/kKMSFWCxWYJxIye5Qa+OAmzN57KlbxxbagYms3rcb6YvO32aWRZgQdu2ZUu2vl/OL+c6l+X5S1+23E7awXYb7VZubXc5Hc66znSst21GO5+ena9PnsBacbg12BE4Om4dt7Br+Ay7BSaLCgENWOZ4owYHGiBz1mmzttW9uHeixu92eVbJSegBo0gd/q5Brxq4qcGxnopnBG0z0JHQJiM7ztS/pMdgU/gglSzqDdXYFYCaEssegTJvt3GEghFpNXreu6uKQOyEZpFSIIiBspKQGBCAIhCNARIwhqeR9gAEIMcwbfeonSM7dvhG1smqkGTr5a9/Z/rF0wc//NHhr/7OUqZZfgQLQjYf2mgbZFcgRnOGMNYvtTY1cppmrsvR5WCBh5w3UWfSBJ10EIh9ODRzqMMMrQkmGivDgS7EZYSrDaIvYjgsKOLl+OqM+GCJZ8YfOxmkJHAVfbAgQ0/CDTrzRRhUYkIjWVTsLBhGloEmSJ116rB5ORzeeef2w7989OnT69e+X/1YbG7LyjQtVy0ViLSORAAMJbFyeWZEwGWwxAnSNvkEhPjATzNB0sxKNXVJDTLSSsnWaSQADJA7ki2GecnI4JHWxg7zHNrmyFzuva2JzXrShQaOk3ZF0erlE4n27iQk8hAuytWmeepN4Z+qHkAqgKxCvHpTLzH2KmCa6HqgDAEGREeLsEs68X4AlhIn3BmGNiNrWoh2TphFFmGe/bSf8K4A58nx4vMqDkb36RdtjDfzWqL3xlLgStMIeOut1Bq4QmVRb8UQ6CpCujTyNQadLDkiXXDvCpFZEtJg3rqfSvfgPssFjrFBrmIKxmx3lFQ9xbRVqLX2tZFEaGrkFrasIYjyUwKI8tYC3QvR0rIs8UDqVMOvNSFT5bxJgdMEKTE0gGYBzgkJXKQSOum+8bxtU+raWjaJTPzOE9vP0ChHZSWppuawXLIc6P5sqNIMg6u6z5YPiUs7u9q3i4tv3Bx8Umn1MfyS6yJoyyKYn8F6bdelk128PNv9pJ053njjwbPaWebtaovowBbW1AERLU5eoy2w7lqAY++tsMFMauQG6MvSKk0xPrIYnRGUjblaplkDzVrs1YXFhp4o7WKhUqVVdxqKsQCFtoFPsInaoM5+z/s5sDGcATMwi/PcN7hWrZ0AZWe19X4U12oXeLm2ui72+J43+WTcdFwc7ZLcQRewB5t61nB+tp0XAtvN/vG8X2oVTar7i0l91gq9BHZm18De/Qoe0ECHrgrbHIkR3qL2U5d5n7dn6/FwahOKmXvLLcZhmt+7w2jljnLh42IEWtITGI66ePQc47y+QnLJP2QhXaAlLJxGyTZAmCCFBP0UYek+4E3FkQudmJAraSKjpQgjXTaRq6YoVyzwCs1lU2fzEEEFhbikZg9egnOUQ80gjyUxCch8DVX37lZLXbXCJ+z3fnFx9f6vPP7xDx+89e0v33ri+2uiNC4wUsUXhXMX4G4ELf2A1Ad5skhdSxvrR1wIFkWgzihWIHeTwSL/JV2oRv0YBU7qUDOgI2dTo2eNmuKUy0cNM7hUcpTcqztgOo518V0gZIGEWRiYic7U3UoSw4GE8HDCS7NJE7oJpXCe56v9oZK+HNprD67fuP/6x08v3nn/+RnOjhJy17G8uxuBUii51EkzlN5bTNyiEhpU4uhulDouuFuSui1pBAFRmtyhTomlMtYhBDlh9J75B7PP9bW3OBMETs5BkhieGXZKzAZ30YoHJxnex9Ygh7uUd6SH9cQ0zUrBlDFkM8xZa6H1hhAsrmjwEuhMtXQyoqP1ZrWsrTmqlNQVnthwnviMcpoywNagg8tZzIuhK4efsQMxthwou113HxOkRH3uOvVRkkUNPchvDpzmwfk0M135oHg5YGy9F2OhwTty1UQaOZKwBlYqJg0KWUQO/m284vh8rbVc6hCKhtFY5zvqie7GF1f3UZg3ZiVMEl3xzKlwteSY/gBIQTBKKa3lBGXJdbnpKBJWJoXsPc6eIylDMLAEJZm5oJSAOUgu3oYHzymODm4Dy4lFmbiCZOSi1npjLd4VxEvrqtEBVXZ35R1kzptUWdgVTcRsmonZxU6a7WhzYNjGelyq6Cwbrn3a1A+uLtu83drcWJsEFT9SDNzFWXfXh7PpXD8um1LuvfPwpnufdrX3pYjutRehA7eOlVgqlmrovRlrIY8OwkFOwhHywklYYEYU8wDscweme2cJpn2UTaox9U2DCB9tmJGsAsDSje4TsDHsHFtD7RP8QvUic2ffdttamX3d9stHD2fjtQ6VEFxH1FvrHZu23D5fvrpt9YlwNaMa77HdK7hY7N68LwfMx419dfHlh0/0ObfY1t988GTd8XjQ3MADd3vsDmV7vd1pA2zBK6BALIBjMTbT3tBm92ZW3KsLwkJWtUbW3lt8Ww5ZzA29J5+GNEBoPCHDCPlhZyksbL3F+QMZNAczuzP6c1ePeegw4AzHYnM31FIUJnZBq2J6sjOTu8Wk/hWwCsgo66pBe+zJSfCWvqmJRpY7CSNi4W6vrBA6vHCCnFG0WTeHdRtUECC+l4GwxdDL3dUbgGqEo61HI0ytF2rp/p3fWD/94vwn//7w+G9fk+aaMTc0Z6LkOdyWYCgBmNHUey21jy3qlQwihQ10jyi4683MXWWMIQlR4ZwSchQTkYJfFEbl6EonBbJF00NH7N5R8kQ7crLbvSMQA7Pee2VtrgKSk48+8CQ84jCUKE6SS4vW0BiPHeaIFj4yka/74wyiWy9u7sdvvrc+/YPLz55ev/eusFghFrA2Uc0nM6tNDlTS3bqjcHJrrvTsCZhVyV8tkBcLL1C4g0jPbcFhHtm6hkxo2AQZWAVZEGowyDdsamRlwVzKclicwYBCd5HWilWhuElALQC8tUI2ryXQjQKHF3g3KWgNrsBYaMV7LBs1Rx/ItofAOsZ669rcrGJGwq1yC72LKStUJyhvThbWWITjaRR1YjS5meW6ZhtIUGCVcthofSOxgYSVAne01koppRZ1772VUqMnCzRBlnZ+rSlwzyDu+auYcVJqEgSD5UpOOsLJxeFWUvLkjgJIWBbB0FvY6riZGS2XBAdVwnNhcJNVlpT1GnvvBjhh4RrhKrWEb8yqrt4NdO8xnQlbRPcUQ8ODE4MT2yB0MkFSDM9jea+luqP05ASENOoEzaVnAAiE5yLW1shSi7c4nJWAdbm3VmJCHCN5OVLMjw6VHNkPFGJQQDoMNPReyPCFXXsHDGwmGoxW3L17b2ZAAWEWa8Kre4XNsg1tMmDBPKFanVBXVtzq9uLyqnHWQQvb4XaBtkubC3iwFb4ha+SAWs4wt3rWfTttz44/nDb1vD+6r8lhuli5L9ZKn1yuvfut20o/QAeaiuPgZu7VjESLPWzw1bhxNPcWFtCAF+DEE7IMNyagR+3lUgtktIcWS4ZayywRPhkmYIY2jombonPZznXpunBelrIjdvCtaXO4/97F4+n62RfCFrNBK2zfsRyO1zPOLurTZ+3zmx2Oh/11Y+GZdlovt+3i9e27l+2N7dU5rn7j7Yt62P83f/Qvn7zz3pP6bIFd8WzVvRVnV9je8PLZg0fX261mWKwTc1gbleTLCZqBOco8dwnLQMFgCB8UR85pk590N0RLvC04/rmuJJahnn6Yjcv9CkgVQVO5xyVZLw4fG0V/ieCUBbUH14+VXNYGuzv5qdWLwjXmfeYc1KpQlLryhg+STKQw62qAqTvHlgWWILbkXgcgsc3T0K7EzF9alsXlQdlQArZpUVrgMDz77vuv/8G/3n3w0+vvfY/X12tltPkCspXEaJ0Yg1oLosGpDXMY6MXtDvwdYIIHOtMBoJYaZXzM4pMAEk85lgOOyseKFVpvuZFGJX1a82W5eBc98wPFZzRD720zbdbeSmVbvVaurQ+pE08NUPqG3GFi8fXSRwE5/XWDtQrKq2zZL4fXX192l+XjD+d331qACm8TzapJG6Gz+ZCYRCI5qXpbVy30k7/x6QWfjhsQMKkCw0FE2EA+oa4wKKSVqdbDejgBqfM8H24PdZpbV+W0X1qtk7lpXUGjkw2qlm8ulwl6KbU1zNW9LwaiY9rM3lvEi9iiyhxA2PBkDjQmLktiGMHwspInY7Bl853kbRpzTAzmnnJ2a5kn5SGYhsXuhBjgu6iwImjD1Cnf1emcGN01z3PvPSDped4sy2Isp3TwysPO6YDdnem83qUUjTfjuLOI7IkUGORM+bHT2KK38SQxTEkgV+sqRp3mxsbWu7lz0KFLYW8NZC3VAIR9B+DqbkGH9uisBsodI+vE32qAImGpMb4pD2jKVVl61+16W0pprcUIIzFtGIuFRU+TKi08hXqXhTUGMfpw9+RR2sDt87adYoEHomDFTzTSPI0WJJowxgzuQbg+hQdoz+8ECDMQK/IwXKoSpUJuzGbY1n3rNldUbYvNZlVlrm27sV07HGjF+s2hbi947958nOseyzL5Ymfz1NRAsRQvDQV+Dm1d2z6d1Z/wwV+999mm+u2MuekWdWqybtiaHRwL7AC/dhw5H3fb+fJweNFlZgugWktrQF9LYfBhIuF4+CIOb1C5mMvH4PB5moPi7gCQO5pqZRWKVIUKn8HJKnUmbMEL8wvvD8AHsHP4OXFB1Hmdrv76d+pnL55tzmbry6HUtptvZLZs8cEH3/nGZ/e//Xh3qOd4G9+cLt59/Q//8l/89u9997J99biuvHrxt37ze9urp/U+fnrxg91a39xda8VzbRdcH1gvdPnCD4Qw33/54CL0DtaAI2wNMT/8JpRRHWDxQGjpcLA6BDRDGV6JMSWV+UiRzOuaIoXojEP/jbsfMf4ZfUoOarO2c3hTS5VniYv7yoG0QfUyAOrpaprezp7khcC/YQEwneZPYCb0HFmfJnhxnH1YUiR3FrAEyhLHHlNPj9MAA3qMHtGWJf6im6k1kLEYj4w5Kzo6XHrn3f0HHzz42Yf7t755tZ2nJg9GVU7RR+waZKkOJcuFyWCLHaMjCWb+dQwCd9fEktTQniaeHtrqfDED0TKz05Ql0Gw/iRszBLuNziz/tV5LFRwN7oi+vPeVQFtTQjBVtCb1Nm+3Ll/7aqMZ4N18Ig8B4yTE24ppYpNqdbE0rQ/u3Xzzm69/8KOzZ88Oj5/wcKhkE2A8VhGqPT0NEu9EEk3tjmEa/06ycThORkLPaWnijuQfrW2Nw1DGfqe1Hc2sJFAKuE11nuZZx4Nan0p1l1ozxl4jdwfUo4N0X0uETLHOM7zRYNXUe1sOAGuZurtaCxoF3E9T6eCNe04RM4ieKLaBagPeY6Wop72gWbjI3x2OoA7l8HvwGXI8NLj9yCVO8JSKB4rl7lDaVboc5j1yMFlaW8kC9FyLPirwE2np7n2bM20ugt9gsdzGggsND9p+aHeTJRD3GAG3ynw8lBO/XTDjXNlbh5lZ6erpl2JpkuuAdyPD6TUF3Zn2FYCQs1ihrSfml2dzkBP1aE9jaJ+3x6GUy4/YBHnk21RTDp/XnEkDCPONkwdqxMxXGM7ZMhjQ5YYWhgYe2wMtF2MjiKKpssz3NwilqfEdZIB44DGxKWaxeTbEsiSqY5ImV5XN8C1sa7bVbO2iYDbN4kXXbAc1zleFatrjpbS7bC92tXXfH7E3O+DAA83qll680yn0qfW51YvSt33ZTj+fX/utx8+KlqZyJmt0b/Br+a3jOFyvDlgnYUWzbUcnZKhLW4Kd6+gJlhjNe6o80zwRUVXAe6xLU++hSTNjHEtzVaE6KlihSTazTL4TdqjnwAPTI5QHxkdmF1a260W5uVyfffXZ8nd/6/1nD59+erNu2Q+8eCkc/eJmu/ZHL3/3b/2tRxffOny6LF+2i3d3Hz/7+PIbb63Pvvj46Z/Nl+sb09UP/sUffOex6dkv3jJdvOQ7j/TVoW94f4/Dje6dYXHp1stKeS3X93bWYEfqRrh1m2AF7hUwD4oAg7zdbOQo6CT4jhylu1UHZup+Un3koTaCbs6sVDLW+4AQRmJA+C7FXk9niW5bQBB2Qp4kKIwJkQHFw1m3Bb8fp9RiQRs294ie2TD7yNyDCnz345T0ggqRNpo9vlunDIhQEWxeD5pROIfFDDqu6SmABplFsAq4obtT3tp6+I3fPvzzf/bgxz/e/87vaFk0y5a0JniVNz4cYJD8rrvZ+n/IRA16uZFwLxaTUcUIOT58RoOkvSC96OAxNwnGI3It+d24LCFtT3PnrIPUs9tiWjOoa7PduPtyXPpd71jb2uQaTfern9cxhDI40aI8s8aGZVnUawEaDjcvv/nNy7/84Pzp0+snT5pThKQ5VkSB0bqF5AgEPTVIya6KpjBkLoDfhbJEUzxqkZw1gidKj1xpZwUlzSgeOZdlBbDe7j22jYd+tUI+1qqRIRw3g+TretzUSaA3oFYrbGns5ADUVtUM2B60Ig77l6D6weHZE6eAx/OYBcHxJKqGy8MwFRH+LW8VDK5CIj2uO2JTvZUY6riBGEwIC5lPHuNU075CLwrBMAtYK9zbaf8VBvkHQ7ae2QI4ZfPYvGUspp6UDZThfzl2eIyhPYfa+E5rl+EnPIGGaUxnLWSubYqqWB4Gsd7U5zqH53FvK2sJtkclW3g3DAlkIB8eBZy9evoHoSSEuA6arcqIF38kONVdKQiOnRGWBVNcrQh9v3TqHMNWEacRevxggHbw8aZHxtUA5U+S7azXGGOCXN0ZDEohtlGdJlMFqFKJYysVeDHfCjtOs50TZ9CuTxe0Ldp572fbz6/s4rxpM1dyo83nL5fGG1+bbWfs6FeNu9q98Sxc0b2ylM7j1HnRuJun7dc/a/Oj892TzfVxsVYlgQJ2hmvgAGyB2X3vVnG42tdpKr6jqRb2JhaskKMH1hhXJPxFcqnDaHxjemIhzU/tqCfx1lDJIkxmc6lnwLlvzS/M7lm9sPZAegx75OWxXeDqnl3d8+dvX1z/6ZdX5Vr/p99e/8vf/0tW3rbdntujrvb1+oU+bvaNn3zZnn/w8YUuf/KDD7/ze+++9xj15bP3vnnvdfvqra0OP396sWB7Uefl8PQXX/T739gtz7BdS+UZlj3OZFxtBouwW853bSH24Ba+gc/A7FYNrRp4WgHp4Z0dmzPyuzYLB9bR89456rySQ5KQOtLHiTcBhhBlWG8AYT/rqUwJYCgHvVAuBotn/kpnYyFviuiFBKYVJaS8o+fdPO00S5f0UdtqBFyDCbLQn8NP9enAAFPqE4ZLgMVIDemZADPUWqX4RkP7hNEcJOPMCII6Hg8PH+/f+ZVHH3/44BtvfvHkiZbbmVOaSZ5K4/wft7RuflXT4a/cyoyWBisYRlB5jw3BbQk5rI+Q7Tkq98zcsO5hbwiz7imMHTCoeTJv8u9GUg/CSGtrmCOv6xqrWFrvpRQDmnophSgjCWR/e3c0AARG7TZgNZNrkVeUJjlZ22oPHixvvb17+vT8V95/drFDU6VTqqAcDWmWEu4Zcg+jZjINMBMVTQ5LekDG5CLx2nyWSJzPTj1fdoY0tKTZIDJrJKJSCrw3NZARQzfTpi3rKBI7UNVQvFC+v7k6u3fRDsv++uX24vz8/N6y9u5OwmLrQ7DHXdQgedLQLRLG8K8Nzm0bUPNpfpNs5LsleBgdfrzCoLLTlMfJzUyI9UYooLua0hCjwJrkSRy2kKHnE4wzk3bnylIwu7GRIP2V9ntwvJRjhvjIOWRKN1UrQ0QfqwmT/xvD6lia9arw1xhQgaSeLm0O9T46e48v2VsP48ygnkxTcad6yxo7OXjZfePUv3ty1gKxTxBo/F6PBTApE+e6HDdn29aV2E/MtBhSdZfSh7vUqob+S3OcDIZZDKapkZuZFWMwRmOV78i0MUrr3tzhA/8bZaWPhwMAsXYmh4luo4qLsGBgASY1AhNsC2xRJ+yIc8PO6yX6vYYz1kte1flqW2fWWuHA9dKmXVM5Fjvz5/LZpplt1whpUuvNC3s/osG2c7vo3DVtbbOzH6zHe2/MRb2Xhl4gs9l8cjsa5hj7o9BM1fZurUjT0htYV7WKaibHKhjQjBa7Fs1hCJwHkCxXDsQ8FIPtFzAA6lw2KFvYBth4mXBufiE7N3sAvCY8MT72x/jygX11yZcP+tfv1Nsn7y4/+cEf/q//zjvl29e//6PP1u29pd+7dlvw5PPd8ki/uF5eHnZ9Pixvf+fRly8/e3z92d/9jbc/+sM/OvinX/sX/+Bv/NqP/8c/f/Lu+3V5/vPPrnTRqpXl3ny2Xabzr2ceYWXlhdnWcd2x/eryPm4dV9QWdnBVswmmAquA4JNyGWbLVoXhONyy0x31SRDV8uolJc3SUCn1xGDIYoLWiIjkMZgdAXH4pjrpPXGGtBIMTxSiZI2a+cAwlAmInJxGTommSiSLxiYZi+SUCHq6FMXM0jJRuKOSCrZnKBCY4nzzX37JBqkxeCi954RpdBCZ7JFFdXHKMGNe1uPtr3//+vOPHvzgL14+un+DqQ93a3sFm4p/Jq9OenGd/gyCwBIV7knaYY6e8qXhSTJ+OwC6PhaPj6bWYo4YXKxBKM2WBaPr8hQBICRrrcldXWKxgPLgyPnyaNfCoaUEbwfDDxoWvLLxITDMcEZkMnT0Wu3+2cXNfln7weRXb76x/fCDs08+nn/j+357qFVLNYi6Yw4FRT6tbuP9nsS9/KWiJAmAyiItQ6zGUw0U8QQkAuqueL9AUDYZDtxBEYjJbVVoRbogl+Za1uMRfmtobVnkOrzcqy+t3+p2ffbs8+XR44vHT9IBLKYCUSTZyR4lIYF4kdE7JpZEutK0AQg/EACKEpijl7qDzAGjrUqh5FSncElKOd8rQ4EYP7v56Sq5w+Wx9sAQROvstq0w7NYtHQFxgpbis+PkoRBU6JjX6LQzKuAz773nayp0+bSZeqzLdDf17TRJWlrsUTqB5nlQa9rExReMDdiEoctslGVxTbtUGStMaPCQjSL4i5IQRKaIAyZPhlrzHrBaQMHD1SeKSZVal+MioJSythWp7OpDVmVmBnnz/sorCUbCkCxhsJkzFSMqglNDbDj9izhR1/KiDMEngjEZvJaSxxuOagxDHgPdqkSgGopxIidgJ82yWTvHhXBJXKI+JO6jb1Ef15dLa5jvbet0eAbw6kDb1Oli6kud7i3rl70tzgsKsA2sFE7VVln35uDF9jA3XZT7U9tNWtv+yeW+Lc2nWaxXux2u4Xu3GahADX5ZEWTd634XOqxKNB3N5UHsBYGGYbNgNISLg/yXWj7IPRIEoW5mlWUmN7Azacam8B5x4X4pv2+bh9Ue48H89RO8eA1fP+TV4+nZg+MXv/V4Ot/vP//RT//+Ny83z9qffPJJv3/vBc+POpTKJ+vF01/86OzsV5/X89/9K9//N//T/+fNx3zrbJ1eP/v+69++POqH/+wPHvtrP/uX//bm8y9dr8+P34NZBZYDKV5eHOvuWnZNbLvfrtjfbO4t96qdw/buR+POuAB9K+8O73nDwzmlGWBB8mGlAVQOKtzhXWBvQkBPIZplIECJ1aRuOoJmxLxiLKUEfBNrKd0hr5uptbW1SDOchoNlVAEclIf4O6OZc09eQ1CIA0TOZfJSP91+OZi7wJIrmQkY6K7iqKVqLIax2G0QAxbCQ2YbHZwba1WYd3jSXCKHRe0hgOZGMzEWEwiaOm43m/6d71786V88/OCn/Tvfb8tCyx0AWc8gabiBt5RSowkO767TLDDB9fj/aX86+jp3REGe43cfxDUUWA6ezc1ytevwL7hrm5AIeq7+hQ9rhtgT1WMpqSTUWgXRSotFLoCHh2JvZgRtdJVhD4h4XQM2P9UsEGzmvPZ2c7s30UrVYVlee+3w4LX500+27713XaupO21S+NEh2mipgaCNxcAKz6BT8IqHSXmH2SmdGcDYtR5vM7dvw+FdHVn8xflwxNK3GGpaViXuzQzbeXNc1tbWOlNd62HfDvvnX395uxyk7qsKJ76s3Y/3tuda5GqVRV2DgZ+taxZZbmH9G5R4nMA2mIVFQ2wqg929pdO4NVKbDXkA3GCKfZ6EORV0IkchvXXBnAK8JqcyCfMxV0x7FA/eqRMFctLk2s7b43JUd+8pE4olyZH5xqn0vI3w4Zea8BkAP61IYchT0NUnTMHvCqlcdqJp4WFgiCMSwW1q8cWmqb464zCwpOwiq4veOoqzBlHfWoxoBeSeLbew8MsNzsgbMVYsEp7GGiRZ5S3sYB1Os1ySqNFORyEaDnnImUHM9Bmrm4JqFzxRZYk9wK3A/Vp+BYvqPSCHJOiN1CtPRE4lQmkohofhtwejXrlglZyASV69F9rO/VxG7OSX5o8cD8T7pd9fy+PqF3a5vSoffvre5tE3rv9yN73Yoi6b+WraHuv1FS6fPbo/nZ8vh9XPfZrrjINrmWxdUA6Y3actrt6e1wf16w0P1nx3sO9eXl3R6CZNV3b/6mJ3fXGx3N9o66rSzP6VrACgu/n+zCWYZDJfgGrs5kEGNcAbwsIsFec9t+ehFnSZh40WQrBgtdQtbDabrZzZpfGcuHR7gPWBT4+42+0f48U36/PX9PyxvnrEry+W/l//X//L3/utv/b4m3/l08/2f23+1nTv6R9++PnD167w+GILr18tf++73/nzffsM1//iH/8//zd/+9df/uS////+P/6L3337/Mcffzrvr9ZP2y8+vb3p9w7Lt16///Z82BzXBajby2pH+IT58rDgsGC5xfFayw7Htp1wxrKFn5ndGs/gC20p0sFsld0gJkFYPJKPp1QbGPtwHDG+NRaJ7m0QPGDGEgfcGHAT/e56Vithk1sqLVqNOF8SyWliRPlS0iu/1Ky4y8g+bpAxjISQiHb0y3RPawoBHOi4YA0qSk6Oe0y7LJoRosRGLtqp+uWiFsvoR8obkzbLUtdHuxWmdC4P879YkUUBYSni9LK4aj3s9a3vPvvZ08cffvzyrW/p8owrfKzzBDDAOJTCru4x+IpKhiad5rJRFGbczkZNXkkrBXA1GU/dvY3IoAgDYSkg7zF0iqqhm9NjfJ7/YJCbzMBci23dO2OvOGy73YR1vnq6YuFUxBtfUaKO/t5OP+8nJ4sILiTYAJs7WinNUdmh7Xb/5psXP/p32y+vrt5+gkMvKIp1cvCSntkAYhcThpVCWrW4BMEKg+9qxgBNAlRheoON0P0f/IgJX1MphhxcK1xa5V5hKHXx9rzdznOdZP365X5/db1/dvXsOZpWwWGtrfCX2+0M1C8Ozy8vH212F4f97cQNSHkrdmLXBHCYfi7JGWZxz2oMAEtRGxCjO1J5/wpqghMEnL9YUxAukt5VSHU3BvEaBWzeGpyCPGwNFCln7NwYiIgSc6bZ8XCQFI5PzRvNUq+AhFM1jB6AV217nbBVHmsy4pGmk6RZLTwcDinXLjTHzeFAs1Jrjj3T7vFUmHqpU+yJMbPcy8sQT4RarMO91lprDX32CCxFdw7zHEzDBMzsROXQXQjLRxwnx5GGemGMZbGCKIQVMQzj3bUEitGdCIETfFU3xK4QT8r0AAodyeoHzL1FB0uW+GB3DgsZdBLwcCM6mPkoKkhrEHvuI/GwCwBiEmzcwraYV9wzu4Tfc79veM37E7PL9pq++iY+58XVZx//6fff7NvyEm354kZqb73V1zfnm6uZfznfbs7Pz/Bi224mHbbVqw4L5gXbRZuL8vW3d1fvXzyz/XXXcrW8dqnbR1qMdcXmuV+f495LXDzfPHj++JJh/XwQO7gDjm5Lpc6bRDSieqgCYbCalm22xjWnUbH10eDw2FV6d4etuLFOdRI2ZptSC7amc8MFeAm7BB/0+9P1Y795wpeP8OL1ejNftbMrf+vyr/+3/8Uf/YO/f/He27/2+Yur33zy1uM353/64ceHm8OTx/XDD/58/fTZD2+/sefZbNt/9D/86eN59+TR3/jh1Wdvok5mDx5uCqqez2jPL7g9X9BW2jJPS1uPsAIsON8uM1rRQtOOuJ6IamHbxUsLZN2ualnutXZrZuAtsTp8yHusVEgr8rgE1ziPp9SBOVCRoKSa2YZT8FRdcvUk5sQGvTEChHshB3UoL63cKwoQm0QJA2tJ3yAbF+RE5UpWTfxHwMCDepOjXScwsaJn65upNIMVO0JtHD0Hw1J4y21o2BOB81wHDXjs5bChRWsNXeppa4rR98XcpjdvVQS1qG3QD7/+6+sf/qvXfvLTp7/7m8uqOtNbK6hAW0zk7GhjoI0MLJn4o1qX0WAysxD7T7W01rOej2NbnCXBA2XSOXVJ8D7KCYJmrXeaVYtVcDEmGI5XNJc31+5sc1hXb2CtJNWbvJMe1lkdPbyg5jJJ6l3VwgBcgzGE0EnGlp5haQrAJyuSrGALPwJC3cIau7fD9bfeufjwx9unH27fftLRrRc4S+3eiqzDSFJdhaUHa4gyoCK2LmebzVIlyHuJcajAWvrw0HFTtPqhpIyJSZcKjETrDiCchGnuJhCr24ReZVPZ+nJz/fwXN88+b+tyfcSzm5t5mnbz9nY5dmizmQ9rM8G5Xt8+56e8ub5+9PjNJ9/89rFXMy+wHEDGbjEa1ECqt9CYxvCzaWEL2SvUUSb2vga2G6bdvJNH5mse/hnMeT8pwIkusQBDPlBZss+W1aTZm7nZYLWQbOiFsaMSXSopHGx1GFCqdzI5gMXyrJ4qAXexMCysu8JI2ejevY27mwB7Ibu7OYpVj248s3+x0PyYkWayUqvaEqWk4D4m1qXUti4GY63uTtZasR7WMrHBhV5YehMLm8K6xQBvrcfDlAUbq6Q2jJYVdVYVCjtAMxRjz+ToNYc5sQvBHBbyYk61L40s6r11gTB4QdieuoVXq2etD8FKcH1rVFZLa8gdrAbBqrsrDnyDgUTvSk6ZsuISiNqDBgFSUCmwyX3DdTJu+9S1kW81XRbct/5a8ye8OD+8jS8v9cWb+KxMFxfnV9vnP7igvcRjtc2Ot+/Xn+957/P12NoFNV/iy4tJWK+2fdlhv8f5tZ0BvvXjm8v14cvl0vYPN/sH7dl/cn3z8cVG3hc7v4eXF7p3g8c73KLq+aPL3oA1dJiyxbB6vy7AbFqFQhbBkf6MMku/jaChaJW7U6BZ3UztuIYGcJWskFBlnYpms41vaTsrW/jOcWHTBddLPvD9I96+tlw/mW+21/v6NfpX82tny9vvfv/3/9F/e/1X+f4bv7p/fnj85qN/+J2Lf/KjD75a1kdv/8rH+Mb2/JuPbD0WbXj59f76fG735odvf/6jw/X58/3GPrmqS8Xt7aP7j19+fawXD3Q4LDOIyrrgOJfzZdva5G1rTb5nfaTacOmobtcFBQTd5DfO2y161N/FSCDcrZshl756zmCUzFsaUYVm0XlFsRa88ezD5MbuDUIp0QBZIAYjj5ocpUZECEeLu+YwoozRLEfIp2pfp6uev5LpNitRnfgKWR9pzGpz8gnAPIxwTrjuaelNMBbHMiYfVr8WbSvM0Ht0qIUepbi/wuVCVgm0Bnpz1M16c1zefPLi7bde++hnl289vnrr7fXmeuLOe0NR7XMAs7H8ONtKD8t1QOgwlprkv9gOEs/ILceZUaePyXa0GwZzdysWAHLu7xvju1F9B1rtQ+edoELUHy1vO3pvACA0CIYaCB5Hg2JhW2OUOe8gk0qGDTKmfOCMHR/xz4S5cuGZpnijHn6559vrJ2++9uWn58+/fn55n4tsbt4IOsHtdns4HGIaWMYHBhBmBAXeIci1toCmEVabzFY4Iqa7q58IpkFMjYbJwUE0lE91CgvDpla7tWo2gYerF5/9fH/zQo79Aniv5t766ktf1zLVvmg5rtvdbJxevrhu+3Uzbz779OnZvfuXD1/v6iYK7urhkOVKNzJOU+9dZnDRMNcqaZWmUmmo4BQK+MII+cPWkBzfFMMn1dGHOCeKyJJqqTHVNQBBCdNpApwEiGh+hZKLmwf+FFu9gFyuyDB3cHXBRbKl2CEneAG1M/1DPbH9WAWhxPxhLGbeW+XU1b1YCGssJqpSESrNvfQud++9l6TgRBRKxp+8I69nByDlotmmHh12V2ctkuY5POTHswqwZ6zxaL2Tpt5YanJAoN5Uaz272F1f3wA43+5u9vtap9ZWIWgDXmpJIzZDby0mYCyFpNF66zQotiUCjjsXPAzGuJAYdFBokHIm5O1rmlgkdPdY3BanOtRMQIE7fTIrbpWocqNTcGNtkG/Ae+TD0i96eWjbt7Zl8/JN++rN/vQRnr2Fp4fjcmmH+rw9vxZQHj92bn5mzud65P1mg8el+K4/v7Qj8fzCly3319q0crb45QM8542E467c2gue1d3hg9//G995+99cvvlGffHwbH/Oy2vdTHx8YJHhxYMHtgAdaIaD4WB+lK2To5hVee5osBiKA0CLGU2Xy4RgK8IpxQKG1jtrgXvrqrCdcQKJiZiBDezMfAdd9Ad9/wDLA39xb3p+drv3K+jFZJ+X5Yft/e3Xf8zv/eS/+cP7f+f8weMn63pTHuB/+/63/39PP/rF888eXW4mPNt56wtvidduav/40+2b1w+ffcLDkxe3j4o261Flz2/cv3j204/ePnuB77+xLFWUboCj1+PVdn6w8Udsx424sPGBcyPM8Coj0BVXspDYbyEUFrKAFSiuxbAWAWgtyc+ykz6IgiaiGDVmOLJqUnet3j2c5BA+bWuzGurAbHoLawjXo8w1BqkvOAgp1LHAZsiMjAA02DY+rBZHIsZIKfbqL5YyIMdf6pstdl+N28z0NXDk9DK+cloiRAaKPpvs7mPrjisChEYmPn2k4hLprVu1acXNd76/fvq0fPjz+cGTOm1blyocREVVA0okICF4cDghLHQMfSTdBFpXzN0jrzk8wtZgd0UyTnvLjK2I0VGEr9PT8RzgKX/xVNk4zJZIYyV87LxOBZ4epKE4Df7tyXC4woJzGdSTCH+jjnCSBvZIPCVsK9SIKgSyyeZaVpvL4a23vvrk6fL555sHDxc0kzEeR3BrSSMK2VobAu7wpSIRWjEnYHWsY83lKFaMpdZlOVqWIKcTMViwr4Baseo5ZKwEVbuzsB2++vTDr5993kiy7l9eq5Yy7273+7X1BrWbQyE287yu61Gt1uK02/W4Pdtev3xx794jmIEFkuXGv/SINoioPQn48TEI1zTNrS20aVlaVoeGoRsl8vif6JBeY2B4N3wdKCdE98BXLdU2GfbNYeaxBhwAc5ctTwMXBwRVctitsNBO98hha8xQDEGE8DjA48dwlco6Nm4IkxigSON5E81YmABtou7xwiEhZVJ3w6lAf0IKJLJGupIckEwEQzVkuTybBpd3sywRWuuxy69UNnWzNHBVb2SROyvLPK+9+/EgGly369ItNhTnj4gkw9zeY6Nr+HVM86a1FvUxe0Ae0cbATVEiVYyZSkScTMOIPYIjNIklnEKCGx1vKoGbvK810naTVUOZuF01y2ZOqPemZdf71HhJXJrPh4f66vXyxT17/sg/el2ff3l1efXl3F5+861PP/tPN//qn3167y8vvjs/ro/PPsamzexnODzQp7vb22mFHdbDTXlt2/rD21scLvbXZT2yVSzorZe5/lQPfuXPPnj43dcPM3Z89trcWKCyu27Xq51fXzzwK/jWvMIm2ARWw1rBaQwxYChphw0wjhJwYqgFALC0Vsw2m3lb6vX+Jggw1XwnEjNZgQrfABvYBn1XduV6Z88f9Jf36jN/SXwx7176Rz++/eM/vlyXR74s7779/n3WP/mffvLdv/7NMy8vj/hPv/Urf7A8/Xe3Tx+cb3i4nasJ5cW9bb93cf/5h7d6fHv55vRsWW8rX7Rf++bjJx0f39r+s492n34w/Wd/53h9qMbDy2P9xkV/+UL9UmUueP4m8Px8t+x2rPQil3tzwErNhQQ8nJGVnICDZ1Qylwk1ODC9xPmT5MWcwVFlBVQqXWptcckHVUFCU+uS5e7GEEtUOlFBFA9bokEIHL7ljrFpxMKeIq0hE4aOVHkSdP7yD7eToHOMNx13+tpx6FML6ZbDWyRrKzNbfi2PeszD7Ssa/ZivxlMolKTebZC5khoQfAFCrFXLgfcevPjN3/n8q59sD5+/d+/d53Y9i6tVt3W1XgJz9/+wewdC3BRqnZDiaJQiRI5A87NjlAAxxmdB72JI6ow6yRCTJpNuRpbU5UHWtKB0WabbWmJsNof4yjJLpP9yIqD5CYLCygxxcIOAGgCG3MjJqluimjCrctKFXqXGssyioT558uE337rh1986XKPOcGAiCmuXt15js488YDoHnISH4dJQB47n4Ymd5g6mcLKNmSsiW7/inGjAYLHBwNZbenQbINvAP/3kZ19+/hRCePU3almb1iOA1sRat9udSa31zmVbz0KJFh34868+u7h4cO/RG+sSJEfG4vd4gTIo7abzR1N3Q3GQFcbmrZbijt6axUpGgzvdw9ox6rBXxeUBb+TmQTqTKp74jiIL+jgyHAhBPJbkZfvYuwxoKD/y60YbyuKIP3Q3hTlVoNnIFuuSy0vhuvbeWinFBHkD6FBBavrRXBY5keEAFufNc18TYkDN3LQBM1t7Y9ClStEga+oEy+e3zKEPbnEoNboCssKxhmbJ0UMglHEjTUD62sBi9C7BG2Ea4+xAVmK7otG8h4u7G6zUcjweSymlsudUIyJS3vGhvxzj5LEeSmGQAutdrDHLtrW3KJxSGBvlNEIb4gZf1SbSWXtYDImw2jDVGUeuPsN2tHvG+/YQzx/z+QN9/rY+eQNf9k+rnk7t01vcTF9+9uRD3v4f3/z5nzz95//44/eWd157/CvPBL2BZ/al7a/Zluovp+3LZXfvq8v7L8/u7e9/cazrra/07lj50ZUu/85f2//k2a/wi3/7jTdrO148ue4Vi9cL7e5j99LuXZ3f8z0wGyZDdZvNbwM3iUK5AH0MMwIoZaoKg/4bK59rgXxtvTh8GDlUe1C9jl4g8NZc32WUKhpcdc9ZqOX64882P/jxdWtb326l9Y8/2Iv9u6/zT37/g/d/753Xv7u9+eL4t//Kt86/fvrHf/QLHS955suu39veYt73l49+VrcPD5PmFeiXD/13v/NGe9k+//BfX/ze3zz88/8K//XL5e//zbqq+vbqkw+Xx78jHR4cfj7j5iXOtvXxng9fPHptnaq8ozlVzchmFCtqDfA5QMdISrk6i5BAxRNxg0ExIHHA0Qx0b2zu6JIgCr1JkrlpMkpa2lpLDb51aw3WaqlOFVZLcCZyUEJDPv57oKevRJg0gPzlXweAaKWj02F20ohG7i4BhUwqfl0+lqGnBcuAaOMuxl8LeN1g4WUfkUYd4UVqclen1CWT3M2NDpa5oZVq/XZ/8ca7f/7g2c2DL7eHy0vfKr0gaW4qKqcqgOZAUP7IRNZzOjV8NgZS5wMgR7YwyRoLdwgFQzsamqxFHGZg7HkbcMB4iA7zygJzGkupnmByAVBZvNwhmVGGBG8uGHejZrHTDHIUtZB7qawsigqmmpSbqBrUCT/IgBnEsZtVvfeNX+x+dlaff2f+1qHdgmjqCvNIh0xqrU5zTLLdFfM5DzwOHpozY5KNgqXlcvUWPG3P1AzgbrPG3Tg1Tpyj1EKEBnqzP3z1/Osv2eR1asT1Yc+GJrXWCmthJctc522t++XAptnZBW52EtoiYHn+7OW9174hrSwVXg3NSElTqY6mnmoyY471jSXOQO+dLE0ibLvdSq7WDOljZaMQCsOKkTvzKLhFpZlwRXpxDi7FMKjSNG96b2bW1Ush3TAWMtZAvaAmsZSgSseAw7sCLmxx0uR4tSCmUXR5LUWmdW3TVOe53u5vo79nYSkFyQILqkM4LcbVyro7G3kHYYLP83w8LsEiJGtUB5HkgihJlubwFjz8zJSSF9qw7rLe+wm6UQPT+o6xo6JLFgbvvZU6AbFoiz1JVXFvYu4TwD+CRakuFgqQvBY6ssKPBjoKrjhwDGRP8mDzpm1d/m60+vGmYnEqGHCFAWFXzlMMhEcDXg3VWAxVYaoH+MxeG7e1XlLnfZ4Pj/Tl6/ziCb58Z/5KT5f9R+vyk509n+35gj3/h+tf+9NPH/7v3/rk/zL/9L/74Ks/vX3r8tvL7dMdPjvce3nz4Oqr19frXWn29d52rI9q/9L67TyZLTd+xvX9uurrzb36dv1kwQRv6FN59PCg+ep5uTr282e+v9rdw87szLA128CrWxWWCqNZcXSPygmRbQUIKc/J++FI22PBujTN4ayAqkeoFai5KSO0NwKKrSi1dfJ83qgun+IXT+tPn96+Pr14+OUf/2D/QK9/6/Pt2b/8uR599+z7709/+m/+/IV/59f/2oNnL59/j092j+q/+uOPD9yU86LzxasBF/PV8eU1+lG8PvzNv/tdbOteU+tt93j+4rWLdw5f+4P7x28+3L7evvz0y/JH//fv/Obvvnzw5lf6+toePOe6x2Gy9Yvto/3l5Hsv6mioO1K1gjykqZtj6zhA7qzosPTORs/t627uXkNj0aWKEO3MxdvapdYb3Gi1EhabviF175nXYXEDY/smxTL4y4YTEpeZkBETMzfil0bAI08CHu2rhe8rTmOwux+J7kV4GktYI+XlutpcaQbjyNSWS0nNvNAcRjjlysGqW9zwKDQkU+/h44Tqah00ls6VawW/vXv3Xz348Afr53/ji3fdZ7ch4XNz76F/GKkxm/ciMysge2uOyHomR7UhKDmBexGqUokZPCOdGnFkgxtEm5hth7ALpzxcjLUw5bHhOxQrTIAQV8RqtvFiAJiizcqGPPpI3j1l5N/Mn+QHQa1VLdS0bvJ1xKBjX4rVh7sHx9c++8v64t2v3WzuAA18xYi/TjMCnAcAlBgHBMHGPBrE0GnSggANkK3D4BjTjFPZETqYbNMTN8kIGww3buzlp1/1pansluVo6Fx8Jej1wYP7y+E4zzOIw/W1z/PuYtdiECBN88a87KZpPRwPVy9ubvbbzXnYboROlUNcFAYjcdBoKqQJDWAheqoJ5NaWJQpJMzD8sIyFQE4ZIp0Bw6oR8NweOlAjj7cQpMPxdlrvHo4HZOploowKw+pg/bjMy11BnCsNABeHzXbWumbyziGSOVlVtJZbFFsA+65qVWtDrWGO7SUalleKQkfotMjYYzacrUIl1dPYKxJVU6ext3CFRZdKqSlEAzA+JJlMrqhQOXrQgWlFcw8FdtU7S+lq3UUWuVCIlh5b7l5Y4/CWWqKwGLbkGM6RQ8MXoItZmJ9I2XHQM+X6yUolnm5ib6fr48VM7gVFmaLC/xSFNcy4JlS3CqtEqaWoqJzVXqUZ9bJc4Pq+Xb2Gr17D5+3zG/+F6rOL9pTH58vmhbUVFS/31/Z/+/zd333t4T/89U/+Z+0H/+LHF1/89PatZ7ePiF3b+qHusZm5ubl3XG5VPyf2XNrhHi5uDnhW8f73+r0ffv6TN97hs7Wo+OxCv3jy7JFd3pbLXd+XWdqYT2bFvMCLoyDNf/OADsV3BjUMnmzYTdx1W2QxwLvLVWuteCgVoEKRzkfP0dmLLfMO+NEPn+0//vef3D//dNFev/ha33vtyd+4+fDrrz9/d7f755/rB5v3f+e9t/7e3/7eP/2Tv/gKD//6+fvHL/p76+uXD3f/4198cF3p97eYVs1q+2lu2/b09rfefffJcVq++vInP/33s75sjRO3zw6f1D/47/Wf/efXa//Wd3/nJz/8aPeP/quHf/V33/jbf/ej50/PaS/ZKujmn16+tiyVgDVYJxuw0CaaJrMVrFIttZpVSb2p947YYp6mAiIgbwKaN4O8yPsiN0IVBqyECLlaz5GTq0P9aCwUvcurG1iK00+DvNwDSguJ0B36ZX6HeOZE85Rt+MutMDPLnupye+V3fZST+YITPHslbQwByOlrjuEWfLhYRuBOnwGRWZiJlFyrt9jOa90WWmE9YH2yPnp7f/XR45c/u3327WdP9vCKdmSlMu+fqFBIEN37WP0e+wyKhQjrJGnWq6B17DUGbKzKQEy0B0QY+sexATLGV/FtkpUh1a6M7GsgS5jM0sZ00oxBUjHG8Pk0ID8BqDQGoTQmgRNLlPD5KYsZEC1IA7pc7qKpw7sgrP1wD9s32uNPHn319Ob5t9bH3XpVAzEW1zgLl6Uh6qZB/zGlMdnJnrwEWusQ5LDCNNH1dOKPQTVj9VBQXsdzNLi6uruRbN6//vrLm5c3S6+oflbnmdpUlB3betxWAnrx4roYXTgcG1aTVjQ09tb612Td7S52UPda5qUpkMjuvZYqz7Q0MJ6elikQSxnr9hwYm+3cwgOXhp62EhSk6M9SOIwUDI86IuuT0enLoVRtA0Dra+RtknK3UpPgYN4djm7GmrhC4gPG8BI57VoIFAapjfWc4qXP68lM5gT4O2C2LMvEGm1h6jppjlJZTkUtje3kUGBoa4NR6ERJ96YSnpQGodayNvkrFIRiVBcY1jQxRYzRVdQSY2GYB5/LAKsTY5TO1orReyvhjG3Ybra9tdu+KK+pj249lMYOC8J2Ozs721/fuKOFEZmDNq6bOwOPGHJknMzSc7zuiKeZPjaJPZtVhM4uQO14wlaaWqmVYAGWYSLKQlbXBKvCZKJmLBtbi447HNoB7bZeXN+eHab9XnbkSxZos+MG1B/f9MuPpv/Fr+3/D99Yr744e/p1/fzr9uULXXFFweNpNzcUV7kqum4Hrz86Prvdv/yds/L9P/zXH28315f3N3DBitTDExNcPLVSEU2Vnj1ZpUWA8KTi558aJb0i9cqVzu1SwmmkmVWjpOrnfd24F3AGyfRObai8OL74+It//v9+56N/stnZ5ZP/lc1bff3psxfLH321+97rb53huGB9tDytn13Pl839t//h3//13//pj/7d/+tP3rv32Oqjx237v3vvvX/65599cXW7OeP24ho3/vDw6fn18sYPPvr0z/afX3/20WefXrz19u73/7uLw8flPvnJT/qf//vDvde/mm7O6rcOH39w+PLfvvnGbz/4/hsfXn12RZ85Odm0/fzRa5NKUSle2WlHmMyW4lbgBtSz7a7W2juW43JcjgB9iKANKvDmMQBt7gK1dbZCFfqs3g8wSQ2+TmoWMBNysp58anlhcdE6SZZCOGUqXkSvWY1nSIGd1CwDNMy0M+IOXpkBAq7ByH4lO/u4/6MNusu5Zqe/m73J+L3AVUNql3/GAVqSwwtHPZ1xQ6VzoczaYjg7thV2pMPtOy+ffPTg9oPHV2/tL+e1LpVFLOhhPKhhdxQGky7FepVEmk2BmjpgiikmcRpc5lQg8qHJvZQaYxMpVurIR3leSINVEiyCF7IMQXRM1WrAZCCAWgjLHOGE+Su019N7iazmDiP9TqgnwUpQqOLzyQTK19ah5q350qDWljVxWINty9vH+x/r2acPn//qVw/XakVuqNFHtNbWw2LRT8Dg3kNY7NE4lJAtu0HCBNRaVzUBrNPsUNTMQ25pCGtVCCJL+nKkHGUKu9Rq2zfe/5XDFrqG9sfjsU9le2wv2RsJmlprPmHeblhKOZunWttmts2mcFvnyt1urttHFxf37XVoMhMLuvdqcj9OdW69K1H7Bo8tVxa4pRH91K1LFq8BKaj1tGtQuLGN8Uju2KahEKakTRpyyH06A5EP1VudJvUeVsy1TrQAT4fFRHeYVih23hmDD+Xu3eQFPKq75zboGBFjSM5Gm51MJXUJTqGQHS7SKgMOYUcIYSE1V0WNUa0N9s1mmtd18WxkqcAnPIGPaLZZiJaTqag85b7ZzreHAyzTGVzRlLrckT9nsXmel2VBuGUVK6V0ne6jQDb15ebajOC47aV09WKstfTeSXRXWxrJ/fVtQAk0eE+c6SSZYNqhldQbKo3rYpoVpOogZ5iHqBMsXNOjM5x8SKtwGCuc4CxBoBxW6chF03IvM8MfesZadZytEQceSl3Wm72u99up1RfEy9Z8ua77j783P/v2W+0Z9GflrV9/dvP0Ry92h0e/98I+nl9cd3zYjvv15cWX/eppa/u2qHkt71zWR0/Ot4f27z81/oPfWXjNYz1W26FbJ1rr5KR64qwplsYHrmkW9LmM15YYDgEUhidRuAlJQiUMPnxQeu+1lMA/aru89d3c5rah3e6qLX0Clnn3iw9ffsz6m//5//nwj///VP3rkyTZdR8I/s651z08PCIjIyMflZVVXV1dXWg0GkCj8SBIQiAHgkhK5Oo5K9NqaWv6vrb7r6zZ2uynXdtd08h2bbQajh4zFKWhSIiEQBAEgUaj0Wg0qqurq+uRlZWPyMjICA8P93vP2Q/nelarP1SXVWVlRHq433PO7/weK/7gf9mc3TsdfGHhVlV7tlw8/2XY/FzZjnrF527dHHofLx+efSQX/Rvf+Ppb7UcnP/mzH3/yzgMeDrb6/df6xZcKcqGlHFkgqXVVLS7mOK+bZ21zGQdDPxxd3y1uv1Yfvu2mC/r3f/osXLroCy6zPNOid/r9n107uPVSuT6RZcB8gbLhVeXRjjIfQC1TxZoBLenaqdi+r1hUABqARJx3A6gXZvOGYxGlqKokyNBAG1UNPvPex+hJg2fnWRVR4lo0iLYiIhrUKFlJ89I2MYDIe868VzCLY5eRROdUnJMQcueRWnYlzzGI64S69gu7ZMf8ApxOVpXG8zSuxJXraxLr2O8JafakbghRVTv7DFMCDOUiMdaD8YoVZhtEjqHi2EuIYBUTZjkGC6kEYW85pSJZ5MDrMpSfPR882F3+cnv6xaN9keDpUxKZ1CwY50IcUVCwRTSpWcMTE9iltRyIzKBCVcAIUIcXR4357xCUr6w1jeTGtoVy3jmBZM4RsSNyzpullpGfU9oIpyuUSBGSlCagK/mVIp0WHehvnax0h7whu0apFZEoIhI0SBukjU3bNqEJMULhnc97Huxu6NZodXpcXix67XY7WHiiGEHMITAz5w7RbCoEDqyI1g5b6QE4Khje5wppVfK8R6AogYlDBIMbt5RIHDwjsHeBlRSijaMSNhixU/jMI4uFuMKPm5d2NhS+bYLWoUHtQg2ExrWZKhDyft7EEEJoVHKf5djwyg0zwCGidYMq+JHf9sT9Xqhj8BRENCXOQzwRmw8rE7FTURZImnxC4sZBffJcpMCEpM8jdeQo02i58mKOZMYPtnM4yQackyhCRk4GBSHnNEbH3lmLRuSIk9GG0VuIbekrIo4doI4pzzJRiRZ8a6biICISCdrp2I0XxmzxyyATaofUKypb9EtwwqYag0YLoo6cFJDmWc3mhwuw90FaMGkM/X6/bVtRUSG2JhVEKgysVitiIseQqMnMjdZNy+xEpEUgMJGHbcu8sHpEk2JrbBsrD6qAaNDAesXJgoj9WJz4UwBUyYyDiIJGgRA8MbFGExebiQcDgQTEUe3yBlVhzkTMBcxGWxjLlAQ5s7CoebQp8rzXNAFJzuU9sUjMszzL+peLVZ73osCz12imfvDMxK5tmfMsaIRH9CCvXoLnNiPJKFLjQ9PmAbNFGS7q/rrqX3yyOVgfHj9ol3X58gHXw8HQn1fzH08v3z1chaOq5bJXFmWZV7ncRLM/dLvsn/fqftZfFeXb04vvPKO72eitN16rq+gHeSiVQ6jAhWHMFIitPxMGHMiMSdlRipIwAIsAsCcSIe84AI69Com5T7C3TQuYmtA679WU+gqo+sVu6/NV3NjIi9CbHfYn2yHsHk7X+9fKm9duH37w79zR6V4+6B+/7W/elInXURUm69Zf5uNx07S7zsFhuZdL83BYyfxo44Mfvvf00emzIOu6pdnZHmeTvDfey3MupBGZh9JNBl+68/yjB8Xpqcv967ffzG4fzG+6cPQwP58Vy3wzFIF6PratD66U9f2L9aM67vDOPq1iveKLy1AOqF6UpSyUvFDG5JFM6pVFeiaNdOxEyPs8NKHDK72Lwo5CWHkCO2k1Yx5IU7PPWDKPoAgRK0UkCawEakgcUytCShzR2jRswJSqtk1apTgGbNbzuQ/BETdxzewZEM9oAvQFZb/LXJCOApTYhkqiwZAaI5R14mO9WnlSB0MZ/0KuOJCfYgu/+La4Wssm9+mu/qdpAuwcLP8mSNIlG0McBOIQonSsl1curx1tfPJ4c3HtcrG3GC2yZS/2lc1bjoBkXKeAOsrEhRDg2TmOIm0I7BjJo5yiiCM2vYdFLbyAzD/djnQr3DSzEpjYMTvnYVbdYMfkvadETmM1VlxXaQlmI5BQyxfoAH1qO9BBj1dOxXixeAZg+jRbZkhoG4kSQggxaBRPzJlzLvMFQ7nU/G69/Xa5/Hhwtr8Y9SQq+6BBMp8rRDU4zgCNUTqbP7a3mVIoyRMT1GXe/EmYmaknqpkTlZBLzyvQQ1Bi8X1oZN9jtNxpkhoOyANkFbhY5JeeLwtkIp4dchYIch8ELHXghWaYnc1Ii2uD69v57kZ/p8h36/rnhX+lkc3j1cWT6n7dy8p8Qpq30ngKkApYCTSEJoMSq1CAc6JiyWzsmdpQ9oumacT0acyROUrI4RNTSZRJRTQGSdpuSkd1B/qklTAoeUlqEGIkODYmEW0M0Vb/NtnGtCs14+sEk0aJGXvDqGGdmArUTOi0yzZIsiLPGSAxCdbR9b2wVB8jDHKCfABEpLik5IaqyayTLNeKmGPqihVK67oVDRZ1TESUOQs6BDFCkGDuhmqbwM4pA1ApXN7N/7aycjDJr3cRSgJ2zhaKEiM5FlVKAUeSCAUGt2gH5yCZzwnSEgMm6HfOExkbSwDPLgjMpDpCmBFCpM6Y82rFe2Wo3S2MOIq0rSYFlhIlqwrOsh7AKRWGmJF1YBgDLEIgr6xZ37cUFEKOPYtH47TOUWeNUE1auXFbffnZ9z44fjba39q5+Rr8wYOPP14ImhYyC7Hy96brcpnPPD+HPqn0vGnq9WKLN/tx9X/ax8FkfF6H/++H5+dBmGY7124thsPmYu1KyZJCQaJjYWfXzXYkyiwOZsMtpJSKa6fEMMGZ9wJlhO6YZv6UPZhnjsauZwoixur1/cGll7B9/uHuo3v9bHT66lsX7dNvv7aF4+nZv/5Jdfhzlw9PZbLXm7vmF8XBK/EjcU2xjlLvRd8wqrgacrnjueD5/Udlv//O278Is3Xo9+dBMl+eUja/XG+Q/72/841e35dup57JX/3wRyfTarPIX335Lc23T54eFz3H+XZzcq+4CIX4OVEDFJGrYWCdymHVRorMW5P1pSw3fXUWqnk+0h6rk7QyMRVgJOJc4ZhdFE+sa8TBeORd4IwuV60nbtqmVw7bOqzbRrTHSuzJSV+pFm4IawdWWYNZiFi9oOl4fmBWQhBWCRFEKhJVYxNdYOfFe2UmNAgEz56JhRswy8rsUQC9IrLClgXa2delU6irkFdG0JaAY2OZWSCZhAMwbwsTC6YhOGkqrG4QotjC1aAu6V6gG5gT5at7N1YJkqNlcr1wIFPut8z9mu5Oxz8+OL2/M9tZ9srWNyw5SMhOyiQDJIJTEojzLBpCA+fZZSwqIUlxkLOVRRIxSmqHnH8KPicoEXnHieZGYGaXNokEJuccETGxdy7xdZCO8k/v7Mxw57+G7dNFIFxdsuRgm6hzXU6OqkA1hqAioQ0hBIltiLIOjYp6Jnbs8xzswD4HNHevyt679eGD4vkXm5f64muAmYjIWSqOXRxiMROoVHSE0sfHxOS6jXZyFLddKRBb55mFo0jwyDNfqF83VVzXrT3muVc0ed97xWQ43Clbff6scgcbIQhEWrRQgVRCS9fzdZWf36++euutQTbpu8mozE8refLo2WB463Iui8XF1sa1r7/xGz+4+I/HRX19uM9N7SsOVQO5cHKZI0RUjVRCYnwH5oQjiEMIjYjYDiiKHbHeDCfsRjc9b+cbQ51GvluH216RDChKrHhKBnC42tMakMTExBRj9Jlvm1ZVo0QFTERkyxUSlk6TpGqLDgEzw4lEUx6qUrCpnROzXzrabjAGPmtas7ASZ3azWiRiTg6MoGr2OeYvE0Jw3l/J3SVE9tzhV05VmDiIxDZ0PM4raV4XqUEg029QtOwhY2+JCFh8locQGCDiGINEgZIDtR0LRDWxq4jUeach7QLMvkBT/CIACd25Y0JlQUJ9SJJozjOz486njrvjKyltTMCRmNVEal0OOSXjcpFjHyHLqoXGLOsrSJRFcmavysy5wqLcRQJkLRzBgdGigV+hWFPRUCklZyNCEd3eZPDawa+ONt+bnjWn87vl6Pr1G9vMyDQWgUu//+rB2dHCfzDbywofV7vgS8eKC9JF3Qzefjp/or3r/fFNNM+a1Z39m0w5DZosz8JGixFrH9KTdSjXvifig+ZCOa+jrlXEswJNUGGFxBC85xDMp927tBd21pcY11MQEpwJgXaMToaZsPjh4qPbsni5mvZ3cbS3t+uevfVaf/Xnf1B//8+38+tT3jwLMedBFobF4uN85yW+lg0LLM7i5TDsMuYVsr5vy4o9D8Sf3X84cZvHGyFAV2EdVXNSHsgytP/2X/67v/33fzcfNe9876/f/fAXd1++sxm0avKLJ09DsVpGfxO75eXGfNYOe+XZahFJgvps7ck34VgkR8zq0cSvji/z7WHpqr40C5dRRuokQMjkC01kl6sT8hHUqNPesP/w6P4P/vI7TWj+ye//s5/f//CHf/EXr9/9/K99479ZzRszTAvBW3a6WJKBOaYSgSDRvHcyspKBGDRtR+TKv0IRgyUzk/ckrYjzQRtvPldGFTUvmFSASc2YoKuw+BRNLg2mppX79J+AXGfklAIQzQogiTS6TAlA9copia70s1ffpyv2NgSwWtiqXJU+M59LM7cjKJGy5jGsWW/NJ09G9fPR4v7k4o2zSYOrnaTKi/YBEsXyXJiIPYlEpBVQUHh0nFfKnBjd1XAcVe34HWZjwkw+UVuSA6iVJSJ68RX2wwAwYrakgSSdFd2U+yl2c3cJ7M1Kx4y2C5eW1kDauKqIxDZIlBDb0LYxhCgCUe+cd857531GzBnn7DV42dXx3bD3AR49z+afjXtBgmMn0CjCjlkECnjmKOiE4+aPRKCOvccg8kTMzm4gBRrRYa9chwZCPitrwuXzY5k3bsDlcDwuDgD0GqV6wedRZsdy8oFGp7vcvr61zXkltQbnqz5LJsjrxezsF5ff/vrvvLx36z9//7v1/OcD7i1l1O+PpzM/KnaKzRzXqX+j2L9+O2SLbMPHuaAG1z4cD2TmgEWEEsODBbVIw2QyH4H3IYS0sDQmla0kO5eajquvhiGndsnu+k7HewWigqCijo36QEpqIihbJ1twjzTRe2+Wxd1C5IWfMzFMi2oy1C7B0KzUJF4RAJDiIPnTD6N8ijagygzutplMyuRhlmqaaMS+uwdBYHYxmKmAMrPPsyBRRIk1hpagWZYzo7HXEbhk/0JKXYAdw6lvNVJX9QwCUIbChSYA4jMvUZidRGXbEOBqLWBEExYVCUJpajXGUCenB8DkFap8xfJiNValiAoZGxHk2IemYcdX5C0k+z47K5EuIHsyVawKlD2TKLUhMjvmHOAoBPUgr8gEGSMX9aCeae8hkYTIjOMbSHRNVrQoaxRVXvQGC55w1uh0Mtiq6q8VBx8tjsvh5rgoectzVvtJpAJUYevAu3lxOQ/rupYmLClsKr20ufVxlp2J5L1yj/MmLK7f/eyr9w97y+nZb//aenhBm9oOg8/zjVFxoRsXUsyUG851FTRwjIpGOBDAJJHAQC5BPOfE5Dh6M0WEMiLM8jNJRAmicNyd0vYwCBH7b7797yYXT3de/czyy2/63r09hMs/+P7kw1/68d60OSqkel7u5dMTwWTs6yI/5YOsIQQ052XYnYz4dInNmgeQKbch9uL6ZqbzVg/rdQxBiqxuqjLPsFy99NJr8x89+cWT7wzu3n1jd38/1OuNOwsOguDK3q2dreo0zuKg1OPTaq6TEcrRxZOjzdph2cbpQjY3tWizwMePHzw7qkdfuVufnwntgFMOJYkxQIBeAw+wcA/C6I16b3//LxfZXJx88OynH3zyYGN/46h6/KMPvvf1r/2N+ckFeUJ0jRduMg6sqsrOlutONZjKXCOpA2LKYCPLx5NE2bTEZ5EQWgiC907FsW8gufOybgIzWmHHHeSsVjyiyAvrDOrwNyMLqpEQE9Sc6rhVbzZZf/JpBOhqShCBptfQ/7qE23TRSZno6nefYiSl/9uPkmiN9sOLqlJUdi34ldn4pJx/vD2/fjnoh14w2aoSJ/8Ec/dVZg+xykuAc8RKYoorR45MqZI8ciNZxDcxoFdNi3POOZcERc7Z5IsOlDbg8UoplMJXupklnaBy5eL9qQYkwfLpmmunoHhhgZJs9tTQ9xBDiCGGICGGEEJomLiX5eyYPVsVzryH97mxcT1/sb55Lxy933v+xvp6DoDgRMmzAl5UEh8tMWzBFp1inkaU6Lip23DErHAK6rGL69oXZegVl0+fnj9+PL62f+ulXZ23+uGH1dMfALiomh5vrKssH9+Qu7sHv/Nb5dN3j/TR7/3jf/aLn33447OfoVk38zKvBpef8Es3d2/devPw0aPrB9ewv+fFFZt7jQ7W4rJ279SdP5XZrb0Ve0Upw0lRn15iLriECyyr3rqNXkASGC2ByXmXgaAhNCTkORcNVwLT5Gio8N6HIObYJSKdcs0Ssa7KcNrO6hXh6lMNqrEKzO2LVILYjUQhhBCCVWWkSmBhAum5M/PLFzJ9YgJiFLNwT2ws7qr+i23FC6ErM19RNoBPrTASpT25MUtUdJKhzLsQA3uSKOumZe/g4IhDbIi5aRuYtk2EPcNQ4pSSQGbCIgq23YpJ/iw4BUQwAMiF0GrywrTjRZ1nx14EIsHOMUa6wtJRpwGyTgKAKpGtorrjzBJbiNkzNyEwkQrapklsYBaCRQzYt1NoIOaYBmIh8gQBOcvGJSLvchBLZNKMOQdlioy4B8pEM0iOjCgn9IQcwyQpa1BDvMLaFUsqFjyspCgH63xvcPbwg58dvb8j7rO9rYN62HJo9rKmaItcwihmE3YevCm7t7a2L3ljWR3PL/OmvuF7/eFw5UQom4emX9Sbo8lw0dDyaW/abn0YTv/OrxS7mYybxbA9Xvs5ZRWGDY8q5NxyWIu24nJHJcUmuABdOG4KhYIQYxiUw6ZZgALDOUegIDCyG0HJenkDCbz3IUZWBxE/Wh2OBuf9D/7VzXv/PBuMF4vZS8RNf9hK24u56qw5vzzJb8SwUa2f8/GsPNibhXNs8lRis1dzX0I/40FwocnGRXsZtg42v3nc7vdmD9crlowd90OVD3kU6uXxdLe3EZ8939/sVdc+g+H1oa92D3aWQ2maajgsioMbfH40Lgp/2T6ZHvYgHtp4rz4XR7nPLpfzl16+9fRJHiUPsV2tl1hlVKnW4Ia5YSpAJWsOzji4UIyKZ/Xhorzsj8pI+s7Rj7Gd99wwhnBv9sFB/dL4xq5UQaogAVhDK9JVH4pEWYOa9C5oFG2TKZUSkxOIN6qnGmJsW1hEFW1a+EzREDh4bdpWSFJTqd1Nbrhx0iGhWwG/qBbUjW+adpNp4POOCZyeSjU7DpPfRrXFl4qaHOFTR0X6X4d+mzDJ9q+2OkoEWkpDSqevMBqHiiqJ60WpvO4s8juz7Y92Tj4eX7x1vB84efgoyPj6VgFj02TeO5dSgkUlavTsjCrFhBTxK0og79noj5zmQCJig2EpKXvT7+0qWwXu/I66V7ez81MnJF7YiBFw5ZL3qb9/4RKisMEapCpmzisxiEhomhBjaFsRiSHCe2bOfQYm8s5n3pFj9uKkdr6veSOym48nzcZpcXHolvtuVEtggrcf1kMtPCG3DEoFElcOiUPe/Z4sQIsEAHtZRz/cxrJa/sX3NzbHtz771vqXvzj/0Y9VWgdsfuFXASwWbrDz2eEbX3E7fCnHi/35+NqmPMcfv/8vfvNv/t7p4Z2PP3mvPMnacxpdzw/fPTl8cnbj2v5oMbpYzU+a6ezyEqplb4evuclouL8/fj5++nzn47Mnx3kYXn/9perJMp4HtIJKeFrEJjp4Qga0MYQQG2ZlaIQyIwZ1zqwk1XUCODJntmQ+RgREKCfIxrQsCblIcFGa0EyflgJKfOYlBBWw516eN02jkCzLJcQUjWBTNjMBDKcaJbmGp8J89RQwESlrykBU64wlGTYTrlzLTArLTgUgoS5GM/0jUjjqHi9lR7Yv7kSdSkTsyJMnphCjUAATUjueaBMkwmYPrkpRo1NVYUUE2Cw4lTv9HhhkPBSFMHGkmOe9JkTnOEj0hI6xEAHizh/3066lQCdQBZzFuDJI4RSOXGCYz6VEizK8ajjIObZAEQYY7DzHKBItElmN/KgAk09GYEoEp4AqE+ckXtWJmo9jTpoT55IL96B9IQ/KIR4sLAEUmAPWVK5QLjWv14PxAf/we392+sGDclQ+OTm/sbmZ9bnRpphkWhKK3I25LYNWQXq8LlqpZFTQiLaO1yFyPG7rUPGi8FyMFrJwxdbo0VHjV2ebvdH5R4Mfn/7FzWLn5cn4ldvr7Zdml0XgohLONSyGLQdmZSEh4Ty4EIIEldoDGbuo5OeLCgjeq+FApIiSfENVYyL3AaJqynIQsXc+3N0bfu7N3vFp8/Yvwv17m3kRcnWLS8/Vzohr7DjX+qZ5jFGG7R6aekFne7fKi8drqk62etf2erXEDLkM/WJY51uibt1m/o2LrZsYuwhPfBEuvducnl+MCh9IueeExsVLN0Ybl1Sv2/phgcXy0fNr42v+9hjvURGlVi53x7I7Ch/cL/OJZIhoGuFMqG2wt3/zaL4ebd0+nl64WmgNWoNqZgZyQk4olEsXe+DN5udvvz16dbBYr9u4dhmKopAosZZci7cPf7JXHdy6frvoe5lHJdIWaMF1Bm2VOJBnMa83D2QKEWnSLkkTjVKhph9g4zGCEYWBpm29UtNA2LSvMLlKN5xJYlWlSt/xsF7MYQY2WZopk+lb2QW765m9c51RkCLZyaoVFbPuNyROXjw9HRxna2CiVMMkqlLoRsRugZX8pRPcJYpADQAOqwy3Z7uHo8tHO/ODajSqcu34pxFKmupcf9BvVmtAzDtTYJgtO2IbOkWk84kEoN57dFpqplRtmVlt78v/9bzbaQ47KWYqsZ2qJC0GrmqxmHcikqxXP/13sZNIGeyZ1FiQ2IYYYwhtsBk4ACBH3nvHnn3GzM6zzzLHTsG5kyBQICBucP4Wbn5HfvE+PTnQz3swWAksJEpETHDpTVvuQwcXdp1MgviIKYMau9YXo43VLz5evv3dm1/++jqfPPuPfzLM/OatL/vXrnM5lPF1APjze5ff+U5v9nwxvuyXT+tbdXNn8uv/mzd+/uf/8e37F+OXPz+T4Ld99thrlo9l9zu//PObRzuv7r45zg+IxxiWzDthLzS36nC9vbx+Nsvfv7G13Pz4J4tHj1766j994Plyh8UJArx3cjSUEASRqWHuUSKL2lKCyZm/YDTwQmMkxzFGYtPhqmfqalYCeoioy1pINafbTKQeSxChkGAkIIQQ7NN07JumccQiEkU54cwJbTKLUvu9zSJGQI2peCZrC8ACD4D0JjpdqyHjiSBt96AXRFXHJJ3rs6qoZxbAZV6jNG0LZo2S5b0QWij1cr9eN56dYdQhBmsTYKEvsavYAtGIhG5REsFTWqR0/qwdoEXUNmvvs/V6zc5bNkPbtta+O7bKZ/jKizVLOmNMHk0E55JRO6VHwTEpM6LAOcRoCfKqYM9RQ1LGKoNIIpk+iyKRjX5qq3KjwUJCuoFFwPDQHqEHeEYp0XOftRQUQEmSqxsCGeC6bPYKssByVJzzMIvj6zvuj9//90u5726W0+MaRaYTj2VTOMgY+ZDdCDrkuF9G31ArOTOPfLOU+izwk1XN5DmTkUdgJ03Tz7FclvN5PurVJS1e2phsyBt+/e8ePVk+vXzjtXDti7+Cs+PgY466QHWytVP7whGrQ6PqhBHhm9xLXtcBlLETQFSCS7tF9WKyXjYv2RAjU9I0smOFKJz/xq/eKaKfO5+/9Yrc/Orsj/90dHrih8VCljLGweRZcP0qD6M7X7y48y0tytM//n9frHAy2J5I9f1L+fJ4ForJpH48KGuUYT30HnCLdR3JzyG9pobE07jIp3XBPsBH4iD45b0Cc92p+wcjXxSOlpOhq44ee1yrJru9+Wziy6oN9fGScwllj72HCw3TsD9+9tHjeOvlCOdpczzePP3kAbfsAlEQ6oNKYMhSCPfXMsxXvfkxHxKTDLP+ZrleXzYOw4ybJRx8aOvD5UN/wXdvfbaOtSeHFhQoNEDINUaiYJao5tOkICLnvT38jUgUjVc3t9rAEiGARGFy0W5SlSQIs/BqkYSmJmdmNo1dMoK+KsBEEjrfHxuAHWumUeBFuRugBRBVZpYYrCSRply/hJVDXtBL1fBb83dXKBxz27bOuTZE512nP+5CFT7FRYpAAHwg7wpfr+6ebr93/fkvd45+5dFtIjjuqE+U1MBNa34/FEW97wHwtj8jqCMxLx5LH4CAnUurN7oadylRRWCcpKutrrxAlK/GXrOShiqiGTPZINX94NzRzVLZle5SkTEiEtKgYsOSAGjNFq1tJUYJwZogn3nnvXOenc+9Z1OAMSsRxHsIEJi50uYOX/th++FHveNvhM96IjMsJjiIuqDiXQd0AmTa6DQNe7gmRICZXBQmzskXUfniP/7n3icf3/69f3z04LF88L2b/+gfDj9z/eEPfh7+P/+/sswgGwByeoW3+xo/GA5Ivjj6zFt7eflecf+Hv/3Wznd/8P+YfrR1B689fvnzg9u7pEVRFwXf2MiH718+CCHslSPGcLy/bK71ef8yvBwq/OXfGC/K5Qdt9d5a872zXolXHvjxg9u3fS0IQjXr8VC4AdUWKWd8BG/9JUEkkncgCkHYmWyNmLjVYB1lG4OyrW47CFqsn6W05lEBkQUtiHS2hsabUHXe2X0QDHBWS2Sx5E3Dh4mMTMTW6Vq7bKsCihbayQDSS4CoFcmIr/hhQsKAM6IkeShA0RydHDvHbLSRQGBGzr4K9bppqCMJEzuxnF3RpglETBZU5XzuMhVLroLB4BFXngHqRZFsNUmVryRx5gxpPloWt2SiXkqxYubt7A3hN1q4oQ2ikeFswaswmxeFEDsjUaqRs6OdPiakBosG53yUZNthA3uiab4ALLrDhSyG1WvCzJnIMTtVr8oML+IdFaC+iEfuuSAekhSCIWmpUgiPHHIFEWfEnok4LCTM4vne/kae/dWjH5+8N71RvPbKnSdhz6+OQjVpRsFX3md7eaMLbOar3E95b2/yiIMs+jxcgFdBMxlNs53GTdkthHxPRX0uPD5f5FLxRs+VKq5eboS7+eYXaxzifPnuH1d4Wt798htN9QSXp6g95Hi0X4UeReJG41p4zXIZ1hWA3EEcIBKcz4x9D1FTjimJqBcSLwQSS6UUiSCQitcn/L2/Onr/p3VW42uv79z52u+f/Mc/2Lr/CTxrxbPxzugrt0Zf+OZE7n4sdETji7u/df7uX877L8141fPueDH7bHgy9L3hCNk4R7NYZdLP+uqlQQ2vtOB+4aPwQlFj5dgVVSuxzXubWleR0T8om/m5eAcG5k8GvbotxYGz9aVcLuoNFMNMB96pBARlDtK896O/3P3GVxfrqsh3tvduXj55pOpjgXyLMGIdifTX617s7RSL2Yx3mmIoRb0SnQ5665fGYTDMn+Pg+aKUMA1nYVoe8+gOAhoXPURalsC8iBx8y+zgRG1vYtWDYWHVlAfUXVRwZPOAVSJWUoXG1M4av4bNwIig6h2zOdA6YniRaNtkcmwVEZaFAoChTF1FEK/EIo5NO0/RXBnSOBlNy9LLe8vFgj0xawypPTebPKM6iYDNGBaBBJEZKrFtiTk2Ma13FOw9TEQfAUZUqAirNM5zrIIvbs35ycbZdCM8HU9fme8ItCEx/wRIwagCyDsHEXhihs+8RlEVBTs1OX+q9J49256b0m6arByDDG9Mh0Wqtd3EZFM+wewFrv6OzOUsVTd9IfUy/wcA6cdPtThY9FFa/KqKRoSE4IUQY9Bk/+s9MznOfM6OvXPsmIyGDWZzaGJRZY7SEG35/q3lznvl4Qfh0dfdZy7DWpwicqa8zlpWhqgJZoiSzptZRTiKqvMUKaiD5AqSpwv9i7/aaNfl3/8/H/3wZ8PP3p78/X98+vYvTv/Ff3GLw9HoZkANKQEgd7Ll+aVe/OLWtX9y9+XJ+ebD78X6Ty+/t/q8LLO/94X7y+ft9xYf97+2sZ70hyWGDpN+zrVr8y9uvdmT4fHz97lZTm7dDTeni2fTm+ufF6e/DMcf9d8aHBQ/XRyvF7JXUXE03sNc+JQp40wL4lykUTgHhTZCCksHY4eoSsoWGO9cjBJVGA6qaiVTTb6Uxi8wiBxUzG/YOY4hEFETGlP0qYXUEpk0iMieLAelYIQXlbJXEGNV14YSC9Dzvl2vXeYFULDFRflIXcBjwlaMYWGrEBHxRog2syPrBlQBckRRfZRg+AsnvQHXoXU+izHaEcBMROrIhRDMhSO9mpKImCkbWUQKzPk1qPFkDTRmFcv3NKmQiALsnECZHDQygZ0ToaBqhmtGLBAFaYLlDGeDwqkXCszesbOAYGYSiET4KAyASVTZLDOZIRIpmXFCQro25k3dGfvYxk0obZi9KlSAwOSMVh3UYis9CRPl4FyRK3p+6CQX3nBh0OZDF4aEAcmQQp982ZLjXq+/4gZhgTkGl9yrphvzozh/9szfFFyElR/mT4sDOsrnN3b223WNfiv58NwP5jI4qTdFwuTak7JsY+3aR5GyGEqt6na1Et8va8e8qq9lef/y9KzQvM+rnZ6483A8ck/WX/Plv9WTXPKz6sNrWxuL5Sev7H3Rex+jVA7N5u24lnztY9W2NbSARiDkSiFIQ8hCaNnB1kYqyDzHaGs6TpHJaiijNzDC/5vvHE0PtSd+Pcd/+aPTp3erL7zxd2aH/7I3O2zeeHPn7/6jGk1sFsfhk956i3vDcPs3l/cfzpryebmXV9PNcmNZbx3Ws18bzl69M1pu7uJsjlnb9qXOWl8rLWIICL436g98BV9gclwVYQW60IkgHjYXh6TSRORF3jw9pSzLhiFkoWyziya4oZcJaIg4at3YXfpek2/XOfPmXrXiMFsMswltyfTsSTbyNHI0EQxAW8VglFF2/uzBD6/zUVGHHACqGyN/c49O5dbpaTVEHV0+eXOXdKMZr0OMDl7Wir5II5yrBC/iIcRwjAiwKhM7iDAoRGhQ8uyYYwiGytgwmpZdBiYSG8/K+EMpKQWdqknVSjgzOfJKBoKRZxeMeqgWTieqFFU0WiusTYicRmo2uYK5CK3XNRjMZEGkikTGuipbCaq23AM2E4gEmBhiLSLOeXPnaUNg74jI51lso6o4CLyDtD5mr1/uf7949PH26npdF2GjkLbRxpMnHz28846sXWYSkIiQWnYqVMXZhJts7ij5bBCz891BiAQtJPdQXAFo1nVH7X4itmQFm5BwFVKoHS/bzIOSO3wH82r6yg717V5BRWIMCkQRjVHVBh3H3nvvXacQcszkku2+XTgza3IKJS8c1xpe9/vvNUf3+OQr4WXPLsaG2avCq0NHUe0YYPZuDbzPSMEoIAo4icPwy7cHoRj95j87/MlP/cURerenj86aH/64nB1732vcBlOZBvtC/Dg0k9z9g2+88voRf/d/mhz/hdCu8rR6e771t+Jb325nfn751w8fPavKarJ9fXS4eyQbUsRh6GP56EnTX4yK3mz59NbJ/Qndu3n5k8vvz/zP4+jv3cyn1c352+vB5y9kcrS3G88VQ2AqVHMy3Yco28bE/FLN+zOxvM2cmZN1ZnLAsJgEY0tdsZ7M6Z+IPJMEIUBiHA6Gy2UVVLzzIUoUtYE19V5Q0QhVxw7ger0iZzIBW+hCRb33xnU285MXCd6G81zxL2ANMzHIMHPvvFW/JBZEsonuksBUKMUXqiJK7Jh+hv1CycIxoaq2EKJkP2q3H7MRH4igFIhIQIJgwJBCnCAleduawl5LMu/bYLYQ6hmG+potLhNCECGFWbxqygIhm4Bh+bUpy5SUxMZwpFdRtUdLnVKW5U3beJ+HGMyvJkJYWDqMjJFixiQJKUxP75gY6kXBlAMZ2EXJoAXQRw9SKIYI47UvB2G8kpHnoi1HvBg2fitXjbE63+Bsb3KggyHPZ8337p8dTW9de+XWsr0HHzOX1X6cu8PmfFiUk3J02OQrGUZsnsugyjdq7tWt39mcDv00FNpkggH7hgfgqayzlctJ8t5o8tWvLUsJb76084VeJSQBJ//j/3jL4Q5vPIDKJxd/43fuzHbo9P0/8699XcrP5/VUfDHdPqiaVit1K6AAGsCzBA945kAcVdjiRLstCQPKJswmZn4hGwbgFz/5Udl7o61HOJnLyi8eLKpJ4W9/PfvRv8fPft68/aXms+OcdcgXFxiEarVzsHPvpb+FX/zF+dYrOe+fHv/I3ZyU5e/+8OIHveHTG9eC+ixuNmEs+ajQ00Cu14fQmTRNNSw2RvWiqBb1UPj8Xj7MUbS+IxSgod6YV9slndR9CtpIMRqTXIR+CNtCI8KWP1zy+JWvvfby7uGKQ8ix4vXpYpBvt5txmT13Q4lDuD3WstkswuzBO8Xp+/tuVWCVwSNevBRo2x88Op3toirq4Y3bn+Fx9t2PFrsHO14kxhZLkQqSQZyCRRrOOFNEkQA4JC6ihSUFZadqtH9ndy07qzpkZJ/EzbdnwFzJACRrUUqnLzkk+FZsqczMQSW5ZkjHzkViJkKiY4LGbsllD5qyZ2bVEM0QlxTJCtkSHux0IenIJcrsbUELCwrodDmZs9w5COB84oWKRM6YgushKnPwGtTfqCe357OHW7P7u4OvPx81zIwicJbLOuY+i6wq7NJKMyXGJB5UYqJ2x55CiH2yaE80UOOYiJGE5YUlQud/gqTfMY6DiI1c+gIW676bmG8KiK4o6KmWQ5Nzkznnk62AFaLd4ATPznvP3jtm57333oqvqVaujiurMqSILCzOi9RoD3p71+rR8+HsvkzfoP1LDmn6ZnJq4ljV1I/Z97KPS1jzJoev8rVsbA5vbv3Tt9ZHF4ff+083//E38Lm/d/jP/9Xwoz8bXuPV7pYsIpatNIXDGgAKsBf52hu3v5S3f/UXs7f/p1t3Xp2/1xzwAaNpf1n3Pqdv/cb27FeuH/0vm9W/mDX+cnR9TDey3rBZTCq9nenlZPpG/0Zvujh/unNydvHD2erDOP7Ky17KW4s9DPhwfbqZz3OKdS+nXNizgpwSKH18SmB1ikhIic4gS3oGRWHPkWG7mBg1cywiKRfnKtMyrTIgIaqqkeGrZWX7CWO1iAgzmEmIvWMAGtWDLeUs72VRU7ho3stj1LSIiSaG4e6id/dKYn7ZAiiRL67Mba4I1enzTgQNe6wMC08tGLNLqQbayQuI0oDOCF2iOHUrkm7lb3VciYkiNDXTrOgM0rovNZ2aZ6cSjTUFsldIb06UJUrGbCohujqJoJqyWzQGQ/dNuauKqMmUQ6GUApgVEEQoqxgobSCeA0myTyclUhMbmqpalBgEl3QIdsUjhcRocwoH5NxzUiqGKkPBRha3a9dXbLWyxbrlbsXl9ic/z+SyfPXNjVfGuHiw/P6P6l/e88XOWU73zwaT7E7V8Ec8GvqtaaxzbP3ho/CNuy/NAs3bPGBc86jiXt24Gplw6fOqNwnDto8QKZOQt6M511J4V+W7Ob761lxmy0KQX8cshlmuxc3Vh48+P96/vz5C1vveH/zZb//+35189ov3P/rR9m13MdwYhvXMV76foWDJVXPAKzlC682VRgBSdr5QNVGz3W5BVboYKSKK5lCviJ5PK5x93/k3ctl//QCv7JfrabXcvDMvXimWvwzvvNe/81sxhr6fImyePfyw2H9jfP0l/DmPmufPb3+dDyZnzSfDUaDdb70bf9E2PxmOfLHV+p4Pl+vL6rKdNYiZz7J8FtqiLuqah8K5dwMPz5j0642mV4zyzbx5MutToxcXsu1XMfSJy1KnQHYt92OpN6RyOzPdvMBWnV07b3Kp4da8jm0zuxzfuO5yrfLjbK+gUbVdT7eaJa8/YjzeYUi4yLTeKNzr+y8/Xzzdryhk44MvjELz4fff+8nN8ZuXM78x2ln7xhU+5Eo52JHZJTStEAd2YMA7D2nALmogcUwaFCFGz8wgUXGcyCQW8N1tj5PxPnWtcbKa1HTqiqaIzSR2ZDhlQ0SNOkREnTRGY2yT9pUdsUMXDUQhOnbeFqGi7JzxDq9qDiFxSg305I62yURMzuRD9lLEzN6nwgK2IAFHBO/hScAFsVP1gV9vXzqsZ4+K2e2Nyc6q8J4bAVEvM10UpTlAPiU6tCNFVKFBwaSpnJltm0JVhLp5wuihUESk4CFoyi1+oRjpbPU1zbF27ncCWzW4wY4vmMBb0wyj6SXQfSaw87fjXRt51WXsHDM79pwcLgn/1X+WfAOyrGJEqxLR8efl+kk4eY+e3pUdBZPGFuqEJf2ASbCaTn8A8B6+QeFrXVTZePdWvrlVnT2PT3+49/tvTXWK7/ygGD3FuFjnvd7RIvRi3i9jKOWJA4A81Dl4pzz+kz8e/eX/7eW2J58s3GPF0O/43Uc/et772iQefzS58+rof/eF+PUz9847k8cPdt87bl1/tvWfjjduPOvt3/3he/v8ZGt6L8yPaCSj//21Mr/+xQeTWzuvNnfd8Xz2k9MGuXI0rq9CBCaNNFcjpc4pRq+ujgl8LMyImKJGaw9jDFYhiFiTeFW7OcxYWSRJG94NyqYO9z7zGaBtaO3OyrMsNIE9i6CxMFEAQGwDse0GiMACtUeVQMpKqilsqVMBWuPaSgDghR07k/IYHIwXdzKlkFtKqVYmc6IUhZPkfFc3pk2VBKhK1KQ/sx2EWVYBUDZQVwWaMau9bcCiNXDFzAQluNjIkpQ2x3YXiiJKNI6lyqeMdlRtqBdVJm/12giMUYQJ5tcsKva3RAyNQcSxjzE4581C50qxaD1EwtNgPBe1ZZIooMzk1JFGEiVJzGevWaSSeUg0Zt5iLUNz07txNq7PXv/k3VvbLh8jjPY271bxz/85fvQ99sN5MbrE3MfyWEZVdnM/v/Oz5nkh5fWd4cDj2dFHTx60r9x4NXB/JcM6G3rkK99rxI3KwqOabN+bH59qAb/fa8uoRchmfvPmqN7rnzUPNveLBau++xHP6/Z81HefWS4ebnI9qf15KY9/8fj/9X/9v7/5u5//2m99pfnx87Prx2eDnWGzOvc5e6hTsCopu+QBZJebNQtt9N7ZLe8YxPm6WQGE1HbbAQ4S9Vj5duXax+8cvPXrNw7eODx53ifuCYXrd9uPP9JP7hfn3z5dL/Ld08vjjY3hloSmX241uR/cO1qHh/LGnYs8HoVZi7ZXfsW148340xu0Hg+P8q3+7u1+4xarp1W7qEefuTnywX88l5zzwseNoCPxOTSjtdRFX8O2RERfZGi4Vmly9VIxr8u7W3GXa2zNsDnH4FKHZ03ecs/VwFq4yXrjTLfCZPcmQmjK012cj3B+0F/uDhZHeD7Udc6XX3jltRt7B6CsPDkdbm/eutF+/OTtdz9Z3L311cPmSZiF3sivRgVWIQyAStg5ZMS1cAtmOBA5UqHEiQQrxWjBH4AonCMGa2oDDYbrKEKGrnZuE6mXBcEMzJEmQRuITd/acU1IlDlF2XefrnPGj7JUCU07GLAyROCdaDS/RhMnmRZDTXRJfHUsmIChczrtqjTUsXfO2WbTuC2eHUDMiHCOgQgmHzJZe70l26+vrr+bPX5/cPTb+sZaV4XXJmTQSGbNj8SsYTbys1rxpa48gkWVFdKKGuVKYuiWtqnxl9R8oCvlNmWIqWbN1bITgCYCGmz7JaBuhpCU3959N0n5axaEQwAJeXYWaAjHntiiWZxzzEa4YWYyUPXqWLu6cqZApghBECIvXLf1Lb9btv3j8vxkORvzaC3RM9oojvjKmMtGdmPDiiLAgz1ktL2zn29s1dOj5p0/3HgFq4eHIR7JuCj2C63W2lSBfXFrL9bDfnNzXh4CiGf3s95o8d7bpfuR13a7Evl4PvvB8e3bX3wWAg7l8l88vfZae/7g8WTn4Wg/fu7v9rZovf/4sJ4ujx8uxod7WfWZW/zRa5vv572yeGs83D+4FvY/d/8VNz85nf+0/8YXr1+/u8/7P5uuQT1nTg0xKtRxcvkhOKMGEiUluoga6hY9JIoX9laniIKxgIDOhow6WrvNlAm0EBHvnUaBKgOiUZRjaI0d7zg5fjifBYnKgEie5W27NsW5bTJSy2m9knb9LyUjUJBxnYlUo0QjXzOzYVYhRoOjUq+WPjuJ0Vw+2HVuqQSybIYYRBlmTRKT3Ig6xVu6P5GSugid72z6Q1Wxui4iEIpgR1cGbZKigOGYJQoREZsTbwroStI2SViCbWsd7NrCQhk0LV5grpyiKmQLBE7WOtYP2OfBJBKYuRWxFTl3JAo1pZaqu/IJS+0MiBzgQJnjXmgZyJBDe6Ch6gZok3QUws18283ufvD+9em91/Fo4xeH89de7998U/+H/8vg8GFRFAtsn8h0S/KZlBvYPGrxxWz3l/XmIY9OjulLb71xUo3uzy8/eFLe3rru4YplT2K73N4hDn7eLsNWwMHmKzH6NWZN7tui8P5W4fdy3S3arD7tN/JhPZvFDVwvFsL5TnWRb9WrUeandfDjgUb/7p+/9/D86Jvf/juHZ0e526d8S7JG8hxeTIyl0OQxTID0mAkIKkzsPBPQighTL2pgAjNibFQCMSnYx9DQyuU8fPtHf+WHm9e3R029oGrRbt4o/U48P2pOHsb9YcY8GA2vDTaeh6i9zPtiyKuDj35+VDr+3Odm2Yx5N4TZbDC+MWup/jG4HA7medPSy8Vgazs7pWUIo2qGl3OKdchD/lIh4ybs4tTvRPhReznazCUDLmOuYFHy7O3cv9VbFntPq+ICk2NsznhnThNckFwoL8Eibs8t+bL22cHnPhPOLvP58/1isRfPn5y8d52O/Gz18sHtonrpv/wvz3su3L493m0fPnr6Mwzyb3/+8/fmj6vV+ZKQrYabo71Fk/s1h0XUQrWFrKTMcqdB24rgFK2AKbaW8qXqVKM9RWzPHhRmi/Fppq4miUNaClOSFVph7bYp6V8xO3vqXEJyruhFHS3TuBjW3oIUcGRW61b5u5w9EdstdYAQIJQ8eqyFRrJsT0knIHT7KGMUE6HIcis8UA3EjsRFlgyI4sWDpMqbL8udx+vzk3L6UX34htycy8J7WQXKhK78eFMwAlLPnCB4M2kWaSUyqDMPYonB3nHyCMAV0fLKYkRV0YYWRN6xCtR1BVglhGgHakh8Q+ZOo8UJJ7ZROHFELSfcs3NMZm1FjonIgc2Di9I87Jg7eXQHXV55XIqqEmkUUg4uOPGNBHAopXcLe+/Ts5/5o281BZRFAtTVFHwHhiSXB3F2/dnl0+fTycat3uZgenrkP/7B4GDV9ELcqPj61hDtUM6aka/yrd7m3Rgn8aPFWXGT9m8DaP7n+3ndYCFHQ/c69oV/Pnj9jRvtqMxo84Jee2mnuD1sXt55lr30pdP89UcfvPTdJ2VB6/yLuC2335iH35n+zOkrbjT88Gv5hz082Jr8h+LlfoXh0eparH/8w8nf/cKTFW/t3eaqCO0Fx75lEgI9kEKDarCGTST5O6VKBctCAhOLwJuHasrHZHTr2PRrQmUpigjgmBgc2uAS5wjMMI04jIPY2WrbNG7gCwgEZxWdExJiuYQMMe8TeyLTh/BiXgUi1CkBFCQmFD1BLUrJIdaeF9PxpRsCADsWEU8cQjCf9tRJJETa/L6JKc2malnBfIW528MCMygxHyUo2EHTDt0uoHRHQtcOaNdGk5oXlcQU44IuqcUmZPPB7nzRNfX/bMlRJArvXAhRGCRdnJ6KsVuAlIlypdWWhNGp3QTCPjUVyiBO3tDIozJzAeqhT1IqSgp90YH2b/WL+aON2QcRx8f5s+ny6a8NLr716D8s7/2r5SIUcFW9mIxOtiQ/wXjb+Se6oyqzZvnN4Ut/sMjjk5oPenfH3/iLe99vm/Y+H90cbpGnvACJzrdGwJZwTVDeGu/1H5VNjQou+Fh7leXGr+ysTi/7z0q/gZEfNdM4Gm4eNroxGF3OT/eK3YfNmZvlAYH7o8XD0++8/cdf+Dv/x3feezS5ffdZIyWFxoZgpxHmeS3Qvh3yKoESF6cl05irALlogBgq6kVUNXq/Se18xY3Pz3HvnbdvfvubvdaHofgyD3t7Xo5W08P93/ybMd/cGG4veML9DR5uhqxAWBxMiX/67iFn49c+i81J3V9m+TJOJo8ftvXp9w8GOkLjfSst5GZfq/Vw3kge1hBPwB6yHT6SyUz2zrXYzKuNUBWDy+1hbFAL+0yExa9kp538ylm197j26/L6lLbOaLdeZJiSTFUk5Pv53F1kpfM7PL98dmc82MqH/pOfTbZQ51UdC6zz+aP2Ekf9MNDL5qMfTod7/vqNfWS8mN53i+39SX7YLOqT42F/MC+9lOA+oQ8KQEGsTK2DeNIAmBexh7aARY2aCogUoiLsfeepkHZIXREmDxfMNxXKEAIb+dETYpTEAE7ifjB7I4lQN2GpSEcTZkB7ea9tWoF2/A0wlPUFocnce0yQ1k2SL7hMyWKY6OrY6+axpLj13oHMmx3MLKLeZKyWpASFgD3V1Ix5/NXVy9/pffBu+fjWYpKjCKj7QiGRwlPfbeM4iNkRIyF/IioxhGBeSVCN5qDJFthnfUGHoumVRLfj6yigkYkRha7gPvNIgg3TJrZmUoEzyaNNvZ0xDa4Ex46dc8477xw5u87ewHEy9yPtzj2BfUN6sYzWpHESpKhUkWCIoMqrsvNBc/wJnxzJeNiMBY34XpC2C+P5lO80odfzJ588X8/7wzfG1dlq9eyX27/1K4vLrJ9/uL3htsPDnpwOYxW3Xr7c2Zzl8XAyCa++cXFYuL4C8DffaM/e9dVkNtxaYaMecl9ONz57c358Vk4Gfj9MXhvfx/gC41u948/6k4N5cfxeXnw8kAFlBxi84W/82pxmN+J3svg2Rs+1n62byWLjG6559rOsOJ7O57OdyazJDkZ3nq1/gT5ownQ8RDIeXStUVB2roc1IfafxzRgSmSlCgxUGIepYUpJmUurWogTAOw7JZQbeO4jwlQGGSFQjEZpkCSFGpCW+gNhsGkMEOw6h7W57IGU2WNPzacWvEQlg1D+RqJ0gyjERUxsjfeohsvsUV3ZsClUll1pkAM5xG8W+CCka4UXZpCuEPk3dCiMchyhGPzNRIROLGuFASY0v4jr6liQHAYAEkA5oQ+rn9QWGBEWkZFtNTNaTkGnwCDEKkQGitghK18raAeON2IumXrzbnRgmZTYmiZQOl5Y97FSI4MFMkjHn6l3MohaQPnQI3nJNM8vu/3g0Xm/M7y2bT4Cjd86mD+bu94rRTsWnx8fsSYbUH6z3BodNuV3VbQ2cl73Xw/Sz5fVfDvH8/aOt0cEXJ28eHj6dPjr5JU72y/rGS9u9wldFjt4eI4I5yGnoofBVUS4Lv2bSydOTemuwUQ2CazzleV5QqKYP742u7eHm7ct3jiZFQ0L1RSOl+kWdTYbV4fR7f/qnp+2NWT2+/sbfPXlyDs6EEmmdlEhLZqhIo8Kcq90J5ICgCODCOjKJkTkHgkgAMk/rmGdlzRj2i+r86PTekztfvBPWs6wYrifDfIF89owbVKOtYnRwcaSHf/Wn50/rzbOl51JCc3Du/Q/fPaybnbe+RrlU/eGsLN/48v+h/eXw9Oj7bnPd374s9mNZu/HDcxQBueSs2qNmgGPsnWJ/qnsLDC+b2YCbocMZzbVdl5wRhxb88Wi7GX5u5ffbrZ3jdT53O+cz5+eBL52CaEuiD7ypugUfjnZ17qvF9VE/urp478+3VuFklpd1hiXtDHk2nyP2i92dqMtHv7gQbuWANw+ey6Jf+OFq6vv7k9z3F0XuBowFdB3znJtq7YXIu8jBgyWCHUUwaxCJTOS9NwcbNmJO4m5c3f3GcyW1FDZTIsLMu21hIkzk87xt2m5VaY8MgdJDk7pzMTc66RxxrIE2H8w095qPMIicY5Eoogzf9eCpHHbHQHr0r/6JRkGXTR4l5rknUpdlUDjvogQfYVn0IAIFEBfkG6le8ddfunz+ydb0R83Dv7m8uwwUKSZVD1RC9OzseCGCc16TxbKKmLmjWDseY2Q7OK4WJVdkJ1ttG76sooo8z1VEJSUimu7TIs0lpeJcMbdMLmS2A9y1IY6Znfekys45712i54BUzUXY/BpsEr9qA8y6S0FQ6QYnA7UBFaFI6gLBiwalpcdm29+ry+PR4lF1/kYznmdNXgMIbUeqVYDIRRGC9w7L2t16841QD6Zv/8noy9vuzf3DR82vzB7fxXQvPBjkzcbNnVV5Omf55M7tsOt/foazJ8/rcwC4+cUv8V/ex7N5GEw+Gt4tdPZGedqbzFc77UZfsY17DX0w/MzDcPsr8vbNeMnTzYOI2dl52459ljePqtFOsYJbz2MJ9SNZrqrdYq+ZHrnmyeP9G7NHPr+e99Ynt8eb1beuz9859t5DIEcDILAvbGQLoTHzJLUpyYAHBYzC0/3MBsgm2w4bAw07pW6to/BEUYiYWSOc6yikHmqmzaZFFXYMVTWXYe8lmo2Fi6pQZOwU0obgO0hZ0/6d2KRBXcqoYTAaRKBM6Ll83TZCYGXryqzP7izlRBSOnM2x5tPOL3jXQrYndmxNBCHZjAQJzI5ME80mEzDMjAILA6QcWNho8fYjU6eXjuKcj0iu1iqWNJpYDaokQpCmC2EhR2z2dtqBcKaWt8YdyrZbz5wPohLRNI3N8WD2L7J87J/b2kaNA528axLZPRmHmGM71AGO2EGdCBP3VLx65ZIxYOlHHWhvO49H7+4MVjvNY5EneX60N6/5tLe+0D+4nP7DfDCpR6erprxexNk5hgGjZ8PXvjD+wm/w3W+/86//09+cXn40zj85fPr8wclXX/vKnezVnZ1rjx4/fPbkcrVorzWjkS/n46IqD4L25iu/dmXfN7KabfXCzbY6qnW8bMYDptWjvmbzD56Ux7IVNp9/chghxJg0Yafnpj3xDO6D+nB9Pnx2cupHH529vzv4wt7w1vzZJa5Qx74qJVogwUVz+xUC2ii1clAEaPBcKDVWo8HMLL7ZEKrF9wMqX2blg6Nf3nzjJg9Y87bdnnCN2KzzjbCYXz7+yR8tflH1wv6k2WewtK2Pnue0E9B/+/1nNcubXxqyHhfjjVyuv/6P59ffmH30g9vVg4P6yV5Y6E5TK0Isa5er8DnvzXTjSLYr3jnGJPOTPuqhSo9WWV9DUw8JAfirmeA//1CvvRb3v56/9kY775Wr2CyCxMaPtR1EHcRiKOV6Osmnm1QPl2d5oXlfiycf7FZ7ax5XT49zf/PhtF+de6xW+PAcI1/eyDZ3R2fTuhxzU06HVenzMMDNARZVua19pVJRQXJmL1gTwTE8pGWwneSAaWYECk/uyuoh9bxdjWNmSaZ67NXmg6t1kRJrhD090XlWEVISO65SVh2nymGbUxEBK3C5rKxWEJHZwJH37DmIeCZvlNQ0T1iWoEANwureG17ASrZK5kToYM/MnkHp1LGoEwIH0qARKmojdNQQhSjEiC/h5SfrxYPy+PZ653q7XdG545wkIjKDwa1HRuwdQhBhwBNadMsmgoRoGgZJPoJsci2RkHJMFYYCxyayYxUEacmkxgSfgDK2ADixkAMJjpnZdUAdJ/Tb0EImJicKIpdnuaipPQXwIBabTKMKQuK8iBBAimCWBUytiGfWIMQUJRIQDLSgCNFABMf1qgX1bi3GR+P1/cHs5nSaB9e4GoHghSUGYs+OhWsJG3mhdW84GY3GO7Of/6yJR/Jr3zht3r8+e2cfH7/qjm/5p73VrDf6nTnPT2/fqjerk+c/bU+u++sH/acVgAVdayZ3J8/fwyh/MLhdIizCo5evF6xtFdbn/a2H8aUHi+tF2W75Op/XPB3UP28frjDc8NnxfEsW8zs3m4Om55m0jlH9KOeiOf3udzfeWtzU2a0P/7D/hY0JbT5sKz/5zA9fG0FqNCpVhrokVEDG3Kg4a0kh5LsFrQLkGID9SVLhhNiRlRgvlvRgZpJoWaxElqLg0rBpVYSoA2lZhIJEFTLD59g2TKxkZjhEkaIDouZJt0PETAqnEp0gCiWOpBkFAgTyjkPw5IWIve8kRMQ247FHRx1IASKi5E3xf2XVZj2GEnGIwSh9KmqKJsc5oM659JXJo5kFMfmhsbgrKy4F+yu/WcC5tltLsQnsABIlMCWqGjHnUaKkYReCxIB2ZC2QFVVnWsm8l/Wy8nI+N6YIZSwQ8w4QkcGoXC6rNAuDYgjMCvJRQhq9RaFwHUof1eKYTaBg8E6mwuwzFFGLIP2ADXab7IbozR7uNYcFnizcyU7VxtP1+lD81HONP1hc/JPR3l7IT1eL3rB/uTPWN798+9c+w2FQVkej117B9++/Ve7+aIDGh3sP778++QKO5y/1DtbbzcPjh4Fkul7svbTZ3y0ej67vDrdiPHbS7G2EE7SXx7/cB7KzB2526JbRVdvZaOTRyrNmnJcuNgHIOW5CzwhUIEAkNlG9+B58z6H46Ts//vVX+ll/s+VaIrhgHoiQthIcsxdisLk0UeOpGbJIlDVRG0LLTOS8ytruQS8O7CEZkEvR7y/qy8PTw5fuXhOvvszQY8b56b//75uFG2BvGy+fckHCFFmiQCEzQSM7ftR/74Pp8eP5ay8vbm41u6uny09Gs8Nbs7XUHPze05AplxWXOVArNVSeyeaFHy10fC4b5zxyMsk8l7L0Te1jVEY/VOvXvjFrZP3gg/ZJRfe/W3/vvbJ8pcyuF34bt4Zxy7mdQZEvtrHckmokF9s4H7gTfvIR3v6jybT36Hi1qPNisYFqloUwDgd91nw8DJ7r8/p8WTcDbqT2B81br8rkM7c/qObnUubDcTN0riKpVBfRlxmWIJici53zIbbEbORDtqwCMkthuMTHUQLQ5QwaKmxaMLOHSaEwqTiI3fqqmghWBjqJaBRRQNT0uiBm9k2IdiKAbDROxnuWPdLVXSCxFTv7ZBO2pqPByFm+45R0RwZfNbvWyzlK7iJph8RXGx+RaIcmg4G1ryZN77X5zoeTo7cHT769LopQRl6rEjx550QcAc6zgH2KpYNTgEkZHEIkCtJxRKNY0l8ak8yE0ogzqt5dZQcLumwXU29Z28O2YTHmDSzr1EDCZPeQLMc0yZ6VUK/rPM9Nww2iEGP6jDrowe705BbCtIZQUBY0SSJFXY9jUz41TUNM2oKZq7CaSFZWrirjYX+xfzmouc2FpQVAMUMIPUgAYVCO8/72QMtmyfOP7vEwzt/+EykevjxZUGjq9bpfqM6r5oN3mn/4Txvmh+/fO/ev7VwbH36Cvd0DAGW5UckWP2Of+yrivTuv7cne05p9T8rcTWl8pDvzaVbIelnkcSNvhnU1aO/Eg16oL7g6vbZz+uEPxn9x8plf/Vu5FP3pYlhg+vCPh3sLFwo9auTkLHzu8uBWkOKjOh++vP/qJ6fMJ8i38vY5FDkjAKwIzB7aUtLpkjOJDgOAsy2KJYU4B4WYQUWCjMg6oURQICXAgeCZiTw7YROrGwAEc9ASo8iLRKh3LsRoAGBeFKENoWkdkYgzOMp6KTZ5gvnQWZpmF5pFIGIfIAiSZ541xXmRJMoW8MJkzX6RRJ8ye0h062FEiY5dlnkQtU0LG4Ip0ZvU/O+8Kf/sfiPLGYkda8F87agjeEaJKVVCubOb6UDj7heD0p0RfoyuoJGIYtTUw5CZU7KIhKZt12sY5MQUYgCQOW+d92pZ2aoMpEpgECtCDKwEZx8oRZHAHRsJLyZtIiLyIAf2mkc41Z5HIYPN3iJfjouGm+OhW8z0ZK86xzm3p8pHEqcR4ntS/uHx8X+bjydFeHI9n3zz95vQhCdSDu83sX/r5is/y37yG7L1oI9pmZ8fH/+Xe39yd/x6GcqnT575wi/PJS9o5UMvuHapRxv9Zu9uATmu558cPfjbdHMPD9bni42Xr+PBE3YBWVOsL7lRXB5T1ahH7PMoZ5SBGZxRkJY5j1EChFRI9Ud//dff/My3hdl7lkKCg88wLAaL+UqgDHHitBFqCWtq64imh8Dp8xJSRINRPO94EDjPNJNWW8A9p6ObO/taCsTJJdyojVVV57cu9NpJ3FjIABXJGtRwXIlTLla8enLmJ70Dvdib/7B91G/3+mVR7RerPD84itmpLjbzYS80jfiVZw0+cLHAxqK4Nr3MG5r4ubR1Aw6znufrd5og5WL2dO+z69tv1iHn4TfW734PR4/KZo7ZT2fVezrJ4oPYO+hnG8xyuMZlLWdDOWnkNIsno/WTQejPmQ5R17NYNBNQ5qvjDNP++FePzi6ansYCOuzt3ejffeuV4V1+1NSPzz5B/5U+vBfPeR5YsowjNyLKIGKvWGd5FtuVYxctktqGtbS31bR7udpidSKctGqlq664W3Zxgq3T15iqw0q1SgKHzazSkDGRGMSzT//W9H2kJioybybDbkW140IqJJpG84qxkUQhafOY3lnSDF2RMo2emUocmbElDIMzjzu1EkgMcopVbD4fDp42s0Vvfb93/Hq4HqHecQFaO8rgEv/JFA7K7ACCRgiLT2bBKiIJEEAy1vDeGc6fctQB7xxUlLv3+KLDYMvY6QapNEuA2TA6JuoIaC6NHR05xfwCbW0gGmDi7rRIv5pDpDMasA6FHUiihhgS7VURtRNQQVlJVGKMSlRIcW3e+6RcPN2YTy76HGTtI8CZOmlbB6xB2bLmwa2QU2/Ay59/JOdnxbCV0ye9L4x6fDLIWw6X63WdF5Pw4B7ee2d++/ZOtukPNmKe08sH/PQIwNkf/aH/yT0py/yoOW9QNvnxeOtotBX2x97leRzUH81oqvrrxbwOs/q4x/PetQMKJ4t186DV9oPv33l0//lx8+Tp5Prr13t3qurdvx7lT+VWr37YFsfFuq70nSpIfvP1o7ls3fC7R/tjHAd9ruQ8NCNxgPcsEmoAzIiqWZ45dlGEzXRGiZRNCESOmaiVgC7+Om3TVRgUNDqBMtS5zHou06gzRzWPp443BSiUGL28aNZrG1md93Vd2xMmSk3bOOccOzLDbyt/pKoWE2FvTVkpkDqCV5esp0nBbD6ajszTC3QlcyCz97CClbpGZlUlgmmUtW2DkQmgcOw6+Zt6Z0wFw3e7iq6sCmNBXXXQZMiBFW/bfURR422m8yf13PZtVEjVBIqmNbL23B4yWNvOxJ6NB5rEQypCiizP7OowXFTJUj6XofQAwOyTQFkQVFTEibmdRTYeFj4FWiBTZXGgwiEXzbF2jRuzr4+HmM/pfNROZapyvM7OnEyZjqM0GsPsUos/kNnvb+8MP5zN/tUfj3/7d5dyQXeHy4uPb929+8ErN7MHs98c3/q304Y382Im9x/dw5S5Yt5hJ+KHg4VDWNdU4qyerYdRSwpRxlvDYc61K/TiOc9ivkGr40dy0qDOm+USa83Y64hbkrLH0g9c+MZHLv1l4JZdo9SSz3uFML337jtvHXx9FpZaEHkI66K3loJZ1LHTStFA10AP6DFWwMpj7VU9UUMgSAuC530OPWAALqEKzvLzchauB7fdlxVkzsti45yuHYXdM4zn2GqagmuHimQRqeW4lljLcNyr57ES5Drqz7Mh1rozfCyOeFWybrrJ1Dc+k7gMK4GM9xtQMwuraeEqbmazfMbaL+iV2+ePHt7+2//g6N6D9f0/ae58Zv4EEk7L9d7W3u9VeFB98AuZPumV4gvCAC5OR7PTHVwWfL4jxzeGYaN52p9fZOdlWNdnw/EXf/O3/ux/+J+n1bT0G2Wx2cwvitn9jeGNOPHFuL89zjd2qsvlRydP6+za7dGwfxEA9h7cREEQVWbvCWDvRBrHCEEkinNCBJKu5bMZ1BaGyeKX/uvcEUkMR6XUbhMlA4ZkSqydqE6ZiL1HUgWkXACjxxqvP1l5GBuLE2jL9mddKUVHwOjeRHpuqGuWrRR3bl1QTpMidVNI9zVdX83QlESqKh1xOu2qRVu/8mHYuM9c7Px458mDjdmtMCp1yBrUORc1ldugzqd1ODR5PouocIyBvEiQK38BBjNIxYQsdt4ZmwUkSKmO3K2dTOOoifCtBErV0mZoStsqe88WDmMUVEBJOEpL7OB9aBr7UdMbU9WrQn11FTNPIcbQtJ5Z7HOXBKSasx/B0MXM+bYNBAjTtWrwSXs5H9TzYj2pnAjEK+pAuat1zZqXJ8d0TaigOA/Td/8a1XPk18jn2qxbymrna7cxC2f7vSwflv6TH++/9da0OODRLje99vvfXfzrHwDoP2h8LKC5exjcQmLVYBRltuTX9mgVV8uTrM4iuH1cP7p5a7L+5I3Xj6T45dNFXIVY+otXj3lTXmK3an/23vns/uCbQ7k7rXg0ml54CCiQV15MfT7xQeBJyGfcq5u11BFqcGxM+YM+LSFz9kVeMHNdrxLZCmZFnJhHAqZUiCnG2LG2mKBe2Sqx61pBd8W1s+cGai7FqXqRNk0tKfQwMMFleYzBYkygL27wdHN3/WciKzIBYJCNluIIytZ/g3EVeIbEpE+V7yrlwMBeRpIVd9IClRhT1SQOMTjnmZ1oMO62KYCccwq9ct2wm5btZjczGhUiYmFiMp6Udl1+2qhb92yK3nTSmJO8iZbJJgRj/HURGJI4Z0yiQpxGZ4hKjOy8BPG5ixDHRCKeOTLFGAMsXtmahc63TrQD4FNIsHWwoOgojxSV1IGdp4DWeUIIp9KOpR0HCRK9ICicsnhB3Q439/oxPr8MP2kWvzruz3/+k/mX7uRfem0jTi+eflDufHV86/UHH3x/J8/fdO27a8odMu+VBdka8z4Jz5/MRnErcO/48RH6cHNHfQplPhzLjBDjcIjNJw8+PqBn/RHXzDzVwmXrShrPKnBAb0Ox0WDPy5bGsT/D3lS3ZjyeYbx4dvn69hen7epJ9fHenVvVeu17HD0chOE9wyk3VeCG41JQc6yi9sAFcw2tc6pzcG1X0csOUxG1p36UQRGccMFyLfZ2OFT5/JPrCy6P4+Ycm5c0Psc2LhFnkRbMa0YNEoeWmrk4gQ8KqeroUTOv5r4fpMwvh3tzF4oNJ6zCHmEVZj3fNIu5Q63FYl1+/huznz8fDjZG3/oH4d7T+VHOx4zzzXo1oakUUq6bi8hhNH5972tvzl47nLUf1PokFqc7OBvhdISzIaY7WGwtng4XMRwrz+SEhxuTN3b99QHcIqzquiGOlA33Bmf94WnM+kcX4cFsjbP1zV//3O2XP/Pcb6SICp8jCLzmuQshpGPEJlTRKMFcWdkZ4RIxlSQ1wiyspjLMLubqcbeh6cqwhu3JVIDElBhRIrpkUwnBFmGmOHLOE6iNHYTsXFLSmKWAqRAImnrcVNyuDgVKDhvMSIbu9qxqpxKwo8ycahNxOj1d6FhMGi0fyKjKKRfdDiTjXwaIW0j7anv98fr4pKze709/c7m5chAi70klRIInG21sCE+nglkkkY9NVLCnENCKU5CIJ4cg8C6VWOJPmVJ2o3t3Yr1oLNiWIwo2X+0kRtLuYEtkH0pb8KZphsNh0zR1XXvvrcOJIRA7+8HROSsYvMyAHXIsEEuYAMXUdsH2fOmlgMx5jSH6sNuOdpYXJ5P1yWgxqTYVLm+kyRhgT+wFWeR8b7I6PAyPjr0GQol6jaYNIWsLF0Me0ZvnE26r0X5RX57Lo0P+wleqd947+/F38GEzanIAMhqGUnX6vM/qj8eXC9koevknMntwPLq2j6WGSvw+5JCPi7175VeFyht7H77yrftLQfmTMns+Oj1faG80KvKdJmzufCX/2//oL1ajXz3+X9t7y/Xbj7PDxx7nVeEbbtfi1uw4MBpzJoQIeedFGmcoKMPsw+rQKoQcO0IbQpSUSWX/iUY24o6os8SRxLnVtmlVAVUJqizJvYbM0tzSklINT4laUIDZIcTAjiUKAQwWVrM4tR5BNdEatfP/7kom2U3PJkkXUUcCUiYW5OCAFwyurlW1Oy492mw85A6UppS2kh4z55k46xRxdlOLuUdlPosS2xDSHJ3Y+aqCaDe7aYOZo3RZJEm8fNVJW6Oc9BhMgMW9aCdyUHuTKtbQpIeFYG/SNi8MgEOIYAaE2KmqaHQ21zp2QAgxSy+YyruClKBEDpxaGgKURIUZRAgSOfUgEiAREhtQjy+JhvWaAB+Vfc9nro4rkehzrhZVpYHV/bRubrCf8OD5X/xp8Su3jqdH+SCfnp6P7nwxjH52djr9ik6O4+VRDD5kCEDbiGcsciJZ+dXls0s/zGITwxq9dQaE23Ou8oJ5Z6rV0K1m4MnOse87+Bi8czUrEBGEQ2+Y+9K5m15yPpOdGSYz3pnLuJaCLsPHTz/aurF7vLNq9OnetQl5jjHoIC+iSpQ6BPQdWpUCtAIVcDXLQiQn8hBHvOyDoQj+STuaDJqyqMIiOOfZcaX1SX760v7u8cNxKzuN9qcyPuXxlMZaDXjZaAVdiGu9NLqugg+q4gCfNwEBaOuwRLakMCrdzMlAer28Pqk1y2O7Es7yupV1s/urv9ccvPb83/7hrfyz/sa4+skno59qeDzb/tpr9XvPVv2XwlMqamkyda2Li4vpetFu7g/fuDW89Qpdb3v0cFw/mMw/9POPJ/P7wyYOn4+rauGXkt361g6PyyDhUVvWvQWCIFs3i0xqwnbbtKFtGo9BMfzm7/7uch9LmTNJo3nue2yoaoQAWZ5rbNP0Cesb2dzngjTMadhVVev/vSZHHEICyq6qb/KKQcLNLCQJiRrVORh3mDXZ+tglGq5EUZBEuao51OVr2ybMtmaf6u1ZO0apqvrMB7GHxcAhWxxfaXORGlV7qEXhbY0jpCISU7cvCXg2GZA9nqpIazNqGATvvNNfq+7+B//+g+Hp7XbyUhy3KnCiAQCELJddkvdtujKGs1EWIEBIkDFElB0A9kS2faOUsaidjwGlqSQdRqkcd2ohghhLAZwESLaCT1OtXTQRyXvFYlnlmWciibGbJdL3vfoQjY8KILQBjsh79+mOKoWMSeb91eQtCgJCxhk0E39zMZoNZ+ejupqVed1vKDATrdfcL+XoaSEZFZj+5O2DzfFZUfYXZ3Hls0rDolkM8opHF2Gu60HlWFCuOJ796Bez9+PZaaa4JU3uWg9Ajxqd1/AhcFvoLMxy4TwH55+cnr+cDzYHVDfRD90hYs4Pbt86hzzkyTi7+SV/b3NX5HQxGpQlgznEsT+Wpazml7tfmx/I1pvLef7T9k8eZzsZj2jBg5b7kQsvzrwlQQK2K5wFbTLOAYEEqDhNDjCkygIkVAEmK2LA5Q4ghRgpInmkiBBzZvFWNnyCxcANEUGHh9pIChAh2v7G1MaxYwEQI8l2bO9PRJyIBp0i4FPFLH3WkUSBnDOIOd2ggToxybGtRDtJOCFKpJQRlHKdbCjsVhWsFvFJRAqJLciDhJ0DGIIo0rSNRDFMCGxiOVGhzkRVoUTOpcPBcLfuzdLVZUDqyQlqulxDjqxA25GQHjvrRp1jx2kMT+lVafhWiCg5RwQ4sAQxHqKKOmZmiiASJgmRFKpOOVJnDZbMhPgKbiMjqRLWJMTM6suieHb4frtqfE7WeTVti2UTSXpw0pCE2rkeUwvGT+v6t8u8N70IJw/Hn92Kxd2qHZGo7r/UHp9Wsv6mz/9jaBaAB7PPEBeMfnPu9zdHrqHqaIVe7A3yehI2vO4v/SoPoRj2eS8PDbNvHI3G014ZdMzU5BQaFo0U+xvs++4k4znvn8r4CHtnunXOW/E0ZqusydfHZw95TUde69fvvvblN0lE1lX0PhPXCHl2YSE6UF2EbO3lIiAHKlVP5KEEqgpC8HHn9aPZ8SivivEi86xc1IGf8nXim89bFWxf6nBOm1PdmWvJ80amgjmo5lBFauADiaxZPS9iLDyVw/XW9ubdzyw++kHxbCHF2p/LvIdeXgDr8tZnBn//vz36s+/rD75TPvexHN382j9qaipW2bK+n7d19cffnf2v/z7zrvkb34rPtVFmL1yD2r54WYYpNUxN34Vsd5DvDG9uTsp9v78td4fhiJ4sxk/D7GHlnymO67zsMfdYhqimPKS8HLWLOjp1G9lkQo9n09FgyxfDNc994SHDgHIN37CPkmci2rIECsQFs/e8bkLuWYIQQUhVNYh0bkvJtceeAUfpP3QwUer0kRQMqhBOR7ZaALil/qqGNtg2F1ANArv72ZwxmIBoJ49ZN6E7P1SEGKJgpc4ujzo6h40LCTFWdI6LVxtQSDfvpiPKiC0xRvvGkZNNTwxi6kh0gwCUBI5Z2feAKFpLPa43X/Z7H20dvzM83J/lcH1qoxBBOSAExJ56088mr0dyEdEW3RolHRJq9DYxDnkaUIxvw2RVmJJQ0QwDrrwFUpW+YlBZhy4kUJK0H1cRAWmMIgGthBBFNZi9AwDvs+7YS3Q6Tgc7FOK8I9UAMz0mgQqBo0SCAxxz07Tk2JyDFPDgSFJz2KkHZV0thquLsr5W5chJ1w0VeWyryf2PeuMbTZCgYT7co6MzcJYREMgXGyuKS0ymRZCAtumvNLi8PKrz03o99y/JeozzqV7mAFpxBWdRF5BlLtUaQcQT5yOfXT5+fklbo3LHHTvKHUHaBU7G148mO/tFfxSPRsN7/WvDYsmLc6KNFd25cTLGIpQ//KjeyBef2zku6qkfkxxs0DDMw+bC9yst0BAC8rzHfW2bPEpNFHuOJfV8LDHYnCSi0rQBqkwhiEnJM08iEiGUPB7sxk5AtQOiIxeZAIsM8xFRDZFNWHSXlEWq8OxU1FwjjP4IM0oDiyg7iyWwPXLaRZhNdXIEJVg7HBgEeJAXBFFhYXYsRr2OXVet0qHPliji2CVan1VfImaEaDgwiGldr51jcpY4bKQqhoNngqjzrDBbMFz1z0ACi2wwFlGz/YLjEKLrusOEJ9n7l06cpZRoh5wo48ZKTE28acCiBZNaRioHhIQcizpPEgM587ERhheFiuberzV4mEOKz6LZn8CJaTHtFdLJlBR8MUAMTPRqhEwJTx49uMlGbaEWwSsLQ6N4X4SikVWRZVRVwj1+CjnNqWAJj5+Uv/q6zkMxKmZF1n/j87N3/rNv1qVmv079/xRXrH3EwCwSFmXRr2andaVBZO/mwbQ6jj15dVjkS6pJdGfzLA89X3mNAdJq3/XmG+WSVCgqqQc78jxtdo958xzjc4ymPDnH1uqC+BLRRR0iG5fohSILJ6c/57988JUvvD4ZD2fLRpGrd4F781ERCq85OLB4Fh9sMjOxGgVGU/jl+HOrZnK+PNzskWj0nM8byXCT5eazekFy7SLfXMdiqls6pThjt4BWCItIS6IVWDTPNEhNbuj/3j/1meYlLep6fRI81TLcD7Pp5H/7T+qNyeJf/ncbX3/j+fcejfd/bdp7snjn3sbuFx7/mz+5/at/Y3F62vvg8dnH/91YuRfXize+crHIs7DUvMdzkLI04E0JEi9nJ8PtcRbLul5Uq+MRXQSc1+F0gOeyzOIyFDEPiwVFkpjHtdy5/pXpyfHetb0nJ0/8sDwarDcc5nV9kTd9X0VeDHf8YSxmflhLsUKxQi8uBTVkHdGQb2KoSZrG4BzytssVMyJXBItYYJs66SqcO2n0uw6QAGg0oofRGQDz44FoUOrs46znFFWB5swSuuIjlhnOTJysnKz5tl0yE8z3gK6G7qQ6hNFQrzT5XRnukOBuVYVuoauIwaZQOLJNKicFZ+zOkrSLTcszx4B3FJQ8IFktl1+o9x63p9Nsfq93/qWmPHfcC9qKVU+JMELKlUpXbeYQJpVkka1Q71yM0Xzwo1oAmoDJMdufO5dZ86AKOIUoEaLN1CJqSYxk/FgC2LFDF7MjgMSoUQlo2uCcmfunAHMRYXZG8MKn2g0iMDn7sTNAiYwA7RXiUmRb0zTGoRM12g0JwOqVQ9n29hf5B4Pq2WYzOQsScoZGzXl6Orq8qDfGw4Yf5nRjNuvPHiOUWjFNORy1jeRPrx3M1zxDLy8EYVn66zOhJ2HcnOV4OuUz18xqAGWzGfJ1rEPsoagXbjLZuv365cNH1fTJFujRs4v8RjEq+nK6DaBchOVQLm+u6bWbx/TGdOf5OJzVPNIK2NHet97wO8NHza053/rri6MlD7++/SPczujVweP8pUNsn8r1mWzQZevWmVQiobYORsULgrGBjKZEojEEu81CjE0Tm7Zl5oydz1yEkqgjthkLane1MKAgZ8BJkjEhELOKhUrZrhNEDizp7wHDRcDaqcrYM4ONtq9Xpe2qM7ZGjQWglDitkkVWz0FjiI33HqosmjOvbeFs9b5T4jKxJxaNEqNtbh2xAjFJ6pSZvfetGWcyhxjBnMytIBrh2Po3AKxG0pTOLLgLWSCQxGh7a595EQvd7KjXysbr1+7hNws8MoqIJqjeLg5gTC6EGDwMYFJ2LkhkxxJsxNc2tDnnIgLPcF4VrAzmoJKZ3MCeNTJVtEZKln9K2pmTKUFB0YuTwNKqCy5WTUH5s0ePTxb1Xjk4a/MJbYzHl1JFXhDvcX1cc8HwIqEuB4xNCqPmw9HwrbxoisrvF4t868mTcHj/nerxcqPhUPNs0V7b3P+ts+WftWfiyAcIlky+GPncxZOT1eLwoil1p8her4rF8QKBmePsYDcfjsK8t0J/k7cqrTbiRS+sFQiQ3BeBikfizvzmPI4uMH4eJ/N25GbqHNGY3QaQVWOZl3U9KoKvq+c/es8f7N+88xlIHgVBe2MaLPKN+c5otQZ5cMZSKJaAhwAIoFD4v3rMG7SXO5wj5txbVAFl+fr2l0+LG5frY9D4LGyvJMcs87Mas1zmLAvxVY/XbWzZh8tFWHgOfmvPbY6Wjw5ds+ztb639weitb7svv/74r95zej1ctIu531lsD9m1p5e7/80/PPzT744+eNT/6OniR//Ppo19CeqzjJl9Ps933SwwMggCK4sgF1E454X1cnW+vLgYbjfsuY0aWbJCehirtk0QF1C3fv3s0fXXvrBatrev320evDweXx+98vL9D/76oq12vvg5v99/jWXw5H0/qOfD3XqGBZUVNheysdSSl8BCsIY2qi2TGSNKaENN3DBHggAhbU9FOSUKQWw93IX/WfG4mjKJlQQMjhKVAhxLVHYODGJ2GTVtywpVeGYHFyQKILANro0FNqJxCFEodIbvIFXnIIwEzlnZcCxQjiKszN7iSmzmjDGIWkRWEgGbFSDBgl9IVTWKagCnmJQYJPFdFDBjBO8SYMZeQMGrD9KKNJwXdfj8+e23dz55b/j81nTUD5s111Fqdn2OagQyWEBhwr8lWlqaaDo+hbrkmNRFW1MdREIQgESdSmvNQJ77EIL3HgALAoLCCTQ2kT0BcM5HCIl1PAqFAJJeEM6REa2tskLVeU7YhMLIptCkmUq+RB2ondg6IhAhQZRABGGGKBORKDOx81HXFFRc3J9v3J+cn/bCxWC1PUPwWVCMjqdwjmYXXoKwa977L9ewfZkDz8DKyg2v/GLOl+Obj4c3ENQjaF0Mai/P6uak9s+dPPN+BgBSfaJS5b/zW2Fzs/6D//7WG2/ufOMrzw5vz++9Sw8+9Gcn09mT4UFJUDotm4s+D9Dzvir5nb0bkC/dmDx5efyoHOXl7u2mj6NwJ9z5tl/encvk/fYRlh/0kVH42rH/leN646m7qU+Un6I3z0Jb59wIYlQBgkqw7X5QbUVjiHnu66oKojHEKMETQ6iNsW2X3ns48oRMTDhLimj6vbSeBale+atEeAeBhsDElPs2RHNiylwW1MgTRuZnqDpAheAt9TrY4BukRVrFcNoDCZxHG6MqiLklMea08z4qSBlKTdCILtEhLWINLJKQxm1NdH22Dhu2tVbVJgSJClCIwWj8KXEMECRH+Rglcx2Z4SqCTIiBwOmeEwndOkjAHC1wIoIBcmSJiewZISppDKoWV0okJKAUpkDE0q2QrRGPRIjKSHmdzqDnGCIiKyGAWYhZNDjvQ4h0ZQ5HEGZjRye1EljVCUAQp1ElBo1Clxw2ea1SN2GgpeTPHzwNbjSTy5EOZ1T2s6I/rmWNnDP4KFWQDfExx1riduSy92RSf2bIflPvffjo8J0fLJ7yGjeBA9SUrUnEz6fP9rH5LRq9HU6mtGKNRR8l1k3Ra1G3a78H9xulD7O1BulDmoic4vG1vJHtNWdzaTa5KjHhvMnLflu1mUcL9yDL5mG0dBvncbBcl3SiTE62FJsywqIMi7Ff7O/xevZk2FxsYqUf/0I2T167+Zl5FVvtrXS0QH/Im1VvNPeTuszjNGimYOYIqQMC/FO67lfVuNzW9WWeF/1J+eUvfHX98o2f3H/y9GhxO95uGrexaP1c18eeK/As0qxtK8mrQL6cv3an3Mo3Dm6e/PTHzR/98/Kz36h9nEyun8vj6gLFE79/8/Pnv3i2uVM24284v3H6H/5o8PFD3H1z88HD6X/6qw3vMs+ZbzjESCT1crF/a00RqyZGzXwB1II8L4vIMUaLiqPQhunF7MauG5VZWecuINQrqYXgVSTv9ZePPjk5n02++ptVxEuv/8aHP/03X/6nf++bv/OrH3xw34+wd3f/8R/9wWAi7e3yPLgpepe0OcfGggZ120OtqAkr0BpsNAJ7DsBJwQIme4LU8q0dVEMI9lSg869JQFUyn0K35SJiZzzbTnoE0zAy4Jk16Ruv1osWIKmec0DZOQlmN6QiwWqGqoSYvBPBLA50lUDA5JgljYoSQ6SOuSRmuZiML4wTbObUBvOKJHjXHBU1mXiQiUccs7MSrKpszBMGA41I7XG3Lj9e9c/7i3fL599cjBsIIdeYbG2vWN8G62oCEgEGgkI1SnKN997FoAS2uRaqxBxF29Ays2MnZChEInxaJ6FADLbT6jQYIoEASUpOURIkJSkrbN4lIEbpXFIAWy1qx/BKIw9e1IYrC0l0fkFMHZpgCmoSgKM6doGx1FAGmiyK493mcHu9Pe2pc/1798rnU/U5LZfUTG/vjHNpV2WVBQlgXJReoU0TdqCF0KYHUYNcqjYT5IuMTwNf5Hy+CosLAHXhire+5q9tC/Jmo1+2ix/c//5nxwev/s7vHH9/p/jJ2z+bHZ6Uw/2dm004x3pb2mF81MzqxXSWHU5e2ZP8DoZ3d17aOx+cvVefXufni8v5O9+rXttu7r75o+YHw3wiw28t5G4zF52Cj4Bjly2KwJdNrNhHiqKtMiM0bYgtiNum9Z6bplmt6mBTGSgokk0ptAmBBWCBOGZYsAGZd41jgFTNYywSkcl7tUs6sp0FMamQaGSL3jVFmqYB2allsqYPzu4i0W7Bn/jTlk9v7874zHDMjonMuiIFiapRLJ13Kt3iqSMlJApCArgIRAjBOnG7f5xZ5qkyO72yFScab47n83mrbSTmK3scAjGlkGWiDoVOImMD6olYRMizrXGdd7D3alfRR43oHEvgXRZfjMPJctYaEYVSwrZYyZA2wHmYvbnZh9nuKYi1wkTd46GwodcunBKAQMgBg6NEEJzTUIcsZzScB14czy+ezfhGXkl/jnIu5ZBH+bByNUNUgsgAaDnUQgE6AUptN6LfLuTZB/9/qv79Sa4rvw8EP9/vOffcmzezsrIeKAAFEARBEGSDaDabzWY/1Gq1pJYsWTPWwx7PrNezs+GYjZiI/R/2p/0HdtexP8xu7HrC3gjvjGK0Y8u2xpJbcqvVD4rNZpNsPkC8H4VCoR5ZWZk3b557zve7P5yTxTaC0YFGAVVZWeee7+Pzmj32jC22L3RYR1tJJ7zoeG7Fm8lkcs7a7xp7B/ae7wLms8CFXetLe73qvyQVZnEm0cJ58QyNFFnidGW1HawPMDuRaQ8npSzkpBd6ppwujqvRg0gLHk5iQQ3TvtCU9KzyiAd6NMJ4xLPaP7s8Ort9/bL1hwM/7tf2yBw/+tEff+mNr3c82G+PB3Z1IPOxzkh9qNbDes8wSQzsC+2MBrHy0GK4uecb06+Lrv+rr3+jLft/9oN7H/z0L+1Cbnb99mh6ztOvjFs7oTDugqwxx+rr3xh/+NFwe7jxrW8e3Xo/PH0Q9h6e+41vHftnwRfjt/+U7302uvH7hx99Uq8O5aNbcVSZR8+O/+k/2zg8KBnykx+hpBULcZ0N86kNli3HBkyTF88tFkc9jbEoSFprWVBKJ5J7tDSbUFHwk937w6FfrxqETr04ZxOLJ4gvpmPZu7v77NGlb/+XxbmL1y7+V7d/8OeDezzhcfXNVxc8FLcvg/W23Dya2taunXS9OdVTDNBAWyJPCOAOpmOKZAu2hSMSFZFoFbnSZ3NzIoVaa61hazi72pzCM0umMUMtWRCHHBJJDDLWdiFAIBAjatikXnVpSJtUwEvVDci5om0X8JAc05lrT1o5ceZ5LgU6yDsgkexDmXQ1CaJOA3rmZ7JRRBK1xobTKFOFprTurAEhypkE+ZdhImYrEAOJImA2MBq9oTpWr7abP3DtrXp8OT7bXqw3JhCBl+QQxVKGmQk5DI2qEBEisoZAHGMXfAdiItZsSS1RNCbryqT7YsQgKcIF0KhL3IwZjBgiGCFEqOQLJUPKhOyHjTSyJsScAcMW4CUlNq/HVZdtA38e45g0YKe7TEFe9TOBmBKhnQjC6XbjQsFcPDce7A/3JmU3XTdndtvFvc/c6pnYIYR28cnN+s4OjMbYOVgXWhlXhqtZH7rnbaFyjIQ86oJaLzYU2KcwDrw4kQvnAFTf+pZbHc33dlbXyll/8xPb3Lx3uHllOHv0k/DOT89KfHkwPK5k3DystEJZxijVeHCGHYVBM3Ht1nM7N9YXs8mHjx/58urKs8v8yZOLn0X/aFzcn07uuQkuVQ838Zh5D9J43rNmn08OJ6hmzEGkldgCXdctVDqJMcQAwnTaMCP5NWYx2PItTaANabIaigDYcEpdJlAIIZGIMkCTfaBTE8ggChLTLhhAjOkAAsoCkSwbIk2eiTmCSyWFXUq2oz6laCR2MeWUXJMpiWnaY455qZUeFiaQsSaEgNTBSkwubMm9LWnP8wMpSsxdCEnHb4zRdGhEDRMRqeh0Og0xEJt8gtK9kfvdhJoYSqIuk9Mec/8HTpi0QiBA0IwNKUsUUcUpi4tYcqzJ58v6018GpMSJ7p34p8tIY6uqMfu2gxlRhNksS/iSfb18ojg9BSnoBZKCylmgsTNGZEGxCVXsj/d2w9TXi3ruVqcymdCoJ42zWN/c76InIvagDrpQBKWhom9kKLyKA9k4Kc5Oq+f3TtYntsKksYtKvOE5TIN2MBi3h2Vob3D9Smnb6XQylcL6VWet06ad+iOynQHE+IDAFAwFhNa1TecHfXYrXJ0pvUjlOuEVE+/D7rYzboAG2giNibc59MNAjjbMZEUPR2a6xWN/847fo9rNVKQLRTg4qDp/7/DR9a++vr7xwl47m+l0hQasc4N46EbNaNWKRVSagwZkhSWMp+7M1vmz59pp+Ov/+W+COJ02I5Ql3OJwsXg2f6I0Gb249ui+ldXR3/tvP/vjv3jh6o29R7ea/X0zDXowIWKVcPiT9+wbr9e2dOcudXceN+++i/HRdDyrinmYxSF6HbyrJIYqWmF0AVF8bOFL1AFt6Dx/5VuHJcp2AZZEqAsoIRI7YWEJQkKxjeRpEUBV/2B6WC3GK5v99U0+8TOpgl3hOISOih6qRdvc+/7/eOnXf3fv5OagDoPJkR8s7KWN8uXes7fr1W//w5ty9nHbju3mHjbGujZb1HoMTIEG3DB5aAdDCgoqnq2wIWscMn4bQJxt/dM2l0klLTBBxDkXIU2NmTVColqQM5bBQBQQu8IliEYk6eiFmI1hgGJymuPTDMFkgKxMCDGKaBdC6uUpTXBMuuQDJ7U9QGxYU2QcTu8+XXKQT5/E7MKDZY3MHrUAsQFnzzxeFuDTLAcQhCHpJmMwU6ligLawV7uLu7PDOyuTn/X2Lsp6FUlAaapeWngtGaiJVaXJpscGCaqA5lkBRKLQmC5rTq0HgckaASDETMQ25nV8isTRlEVDbKxlicLGpUFVQIrkRs9QZaa0l+PUXVh7+i4JlmOWfs47TZ7PqdOhRNRasqSzYSbAxpySc9IdFQAyVArPGWfmg34zmY3oqDe/8NGtKRQdI4opBtMfvt1Z2+vVPkaJ3rrSSyOxLp6xqtfKaBFjJ8bABOPbEGDtpJCwL298tX7jGwCMyONbd9WGte3KV2uPuxNMcfvZ01cXRtrFkY9nzl+RwebR/lOEw6oAhYkcbuh8zYirZQN2gPeN3r6DhS3aZtK838d6u6P2ubX57rNq1+PcJXo86o4Ch8KNa16g3X9C7R6ZJmgTeUFxTnGhKq2PQFRi7wMxdSEYaxOTPtMMlZKTBC9pt0t2u4YQhNkQF6ULnYdCoNZaVaF0FPOpJQIbQ1GUSMAcMi9RYrKwSFoBAeWdcCRiZpPCCTVrhIiZjbExMQvytJjm8CUonMlNQqCisEQcJTjrgkjqYtPEe0pLRo4uAifvaOTlCpEmCYAEMSZbkQBI+rfsIK0gJGdrqGrSRy8f0M/tenLPpwImjQmNkhDEMGfTkJTbCFqamOc0qNSPLDmcSzEVACLlzIlK+jpZRnyKqE0+1Mnmi+jUcg9LpeLn3T4SxzS5dUZW4VTQtZOFwnMZ7OTJoZxEmeKorKxZ5zhj9hKsZ2xeOEQTpCXpxHiKAvQVNXw12MParl/bj6P9mZliEI7ZTaxMrDnxEhbi2bbM3kdZjK04FtF2U70IdcbNpvvMleUV22jgDrFI/boJGubTarX0s9KahRQ8L7gLbaF2pnyXF2icXUAWkKmoVbZsK1tbqShUXbNi5hWaofM82acmdGOZt2J7jk0PjXzw52+/+V333JlzT2aeYxMtIAWgqHtNcDwDKqBiK8O4efFy4zZ3nZSo/PbW6mR8gv3pdMrzdRGhGZ2ptl7+u//o5u6fjGx1QOj23pXJ1229hfZZuVqZ7Yt133TBbl6/EuCn9w+0VxSjorv5Q+uMCZUfrhe68F1DhmPnowmskWXRFYY8Ktv56Y5nt/E7f/dutSlP7qGqPaSIwtYFSAVaAOoDApuEMQRF0VsEkqKexdndp494qNuDvtasPV3ZXjn71lef/fjfjHxZy9Hef/yXvOnDiGdnet2gqkZh75j8uW8/3rjx8FlzVJ3fjxtj2jwMA52AJqBjpRlTA/LMkQ17Y3QZDWszLVcgeeFqNVuvpeWQ8rJLz3f6Mk3AgKJEBpFZftwiStqJpudp6aOU4Mjc0FPykV4ecRSFZWYrwXchXQBRRESNTT4ABAipSeRQVpUoy7smK6NEREXML3mF5Gk4lw3C6ZBLTMYYNglvJiJKcVDL2w1EQbOrvVpYkVAYp0RCMPS6f/GB/8WBG39U7rzeXh1LYwqhLgkYkheRJMLo6ZYeDAQkvJnBbEzXBWZmyyLZxscYNtYEVSjnPJcEawkUathCRFgRYQtrmEPwJImenrytQUg2+aJCNr1RSwswFSFjsnHBsjnJziZ5y54qQmqz0lAMkJCavIlL7tewDGZKac4MoLOEEAtTXJgNPxkePj0j5/wR1MbRqNhvWlO3ri2tDUomFjA2UCjqbj59xFKxlkLWukKDRhUWhDhfuPlK0Tvxvv/cyynUJ4Y5USm2jIHLpqFpN21OHn/anZx7ce3iK2t3fj54cnNK7Zmrb8jbfxGak27zopFj6Zh2Vo2a2Kn7+aPquJwzcduaxtpuipMuHDyzpW2cGb72Nf/M2WkL1zFXYdGGdqFCXccKjhBI9L7tRAnqfWC2XeeNscQURQ3bZOeYpqbEVwLBsonCOTQ4yQkUEUJdF2IGaxIGQSY/UOmkmuQKlwY2Ups9QZWFdEmcJtEliyLVWhYRMkYk6YTTVjn7W6Vu1vJp6kOOC+PM7kUIkVmYuWnbLMgJgZhTirH+khowk/FT65zVUZrTTij1eURESlq4QlVPn16FCJv0PbGKJPuR1J5w4n6m1o4iCZNBTDIIhgGyUVewxoiCIvIbJ4FJlG1e+vyybj5h10A2rMloc1oF5C+eLo7Trd5yQYb0PTNxCoVkZiWSpBVTARJpvBOxIi24grfdZHH4cN+ULEeBC3MwGpb2jBON7BZiW7G9atqrwLEpEIhlitVprAOP9rG6z9Ueto7K7cVsBQfeTEWmLc9Ep9a2TBPPJK0Rlta3c+JuAuUgFYaGFfBA5xFtWyF6qzaoRDKOB9x2XMxhWVwIfbf6d/5o78//1zPOTNSZ/UlsJXo4cWEUYowKRQyGvSPhMLNh4tzcibd2xTianExVKGhkw2z57T/9/ktfv3H19dfuHo4XnV13VSe2kf60PoMeTJ+0gb24veqd/+bl+eHe0U0/uXZl69GTinmwXhUwpd7zYUO2TqbTvWf13/01BJ2//c6ghh1wNTprH33c/Pyn/ODWyXRq924fHe+4Ky/warH60pmduz3HC8s9qRbF8V4kTwQrJlLkxACzzkgLiZPW8+UXtr77e43rP33nR0Nng8yMMQoXJRRcg8QGkC186LCARsBjHotaXRsLob7wcHeyY8OzzfVLYWGbptt445Xy/Nr0o3cWD+8XIK09D5wv51pGb3niw9NWHu7LAW9P/eiYBmMZyFgxJpqApoQTpQWxJwMmDmzUWuQKlc4+W9YgmnKQWEEqSDOj5NTrTPWlU8tnkF1qC3JEIJOhjIaqajLVoWVFTDGhzJTIU0C2zWJmNsHIckIl7jqfqxkkImlmk31OWlD7pWIHp1kM+WZQzZA1GeS9dFy2DUycKohB2pcnpnR+gXnUhqpTDlFY0DHl7TWzIi44bmH9ZX/mI/vwA96/zGdHtt/GEE3M9hiy9IUXIHlPhpBuLSbDpCoSYmBD2XUfbJaueEQcQsjRrsjx4imeJQRNPwZmtswKuMJxkogkHcYpAQaqGjVK2h7nJoCZjUkfy0ntvLxymIiJifMKNd/pULWJ3UUGjJRNA2KTV9pgJhYNAmONjcDF6ehWO1mMcP+1C194Z6eo6okatKHgQfSgqk/OAUY8fL0K33AApBNEK0rSQVRhmapFkLquDQ/sk8P5xkUA3WFz9oUXhoPy5OmzeTgTfdVbecOtnD+extkIB699qWz2v3T9pfD2fyh8h/WrX/jtP7z5V/9hJE2D4J6sbBzOBvtH3sUi9ESJTXuMk5o2yHEcofrON7pvPQ9/Yoo+ddLs7lku/aGH3xA362atURKwoFZpiVFVbrFoq7JWQMWkFk41pE1yyrpK9camdOzUI+aBGCISQqAUnWutqCyfprT/56wEVjUEGI6JeZ4OJ6fTrblwLTlH6fBmJbykhwtRJPok3FNOlpin7SozmHOQiUJzIHEQUcOGmFVikhEmPAVMZPRzmoXm4qUhEpPmSqc4zbZUUslL5dQL5BTAJeSUXy7xf1IwEwYiMb1SBmm2BuCgkZlMVE6iAJBQ1r0ETVoBMGsSQyfONpDjPzmTVBJeRgikOajYJNV9SklKjhq6tOFkMsh7O5NgbU68lLTNQBelUA4MCKIJbrx7OH5yVF1wchTVqKLYG211gobrE6qOYz2y3gZfGoG0rNqgN0XNcbDLw9soj+TcvF2Tfe8WKzr1duyptdIoeZBtQ2iZggOLIeJYRPLwwmNB5SJ5MyHUvABHJ4WzEtrFwh57P+z7Udlxef53f/9nf/P9kb28eFqMr17/wvNX3735P81mcGq0FMMcEa2xUFjSCr4nfuBQy8y2ZrZ/ImMYz0YgLCIkhotVd/fHHz3defTGd75RYti2z87YsgtHHfemq8PFuLOltW7Q7/YPhvP2rVeG4YfT6U57ZZV2WlnUUjSdrhieVZdJ2ve/P/iN/6y79ai6/U5duckP/rV9cOT8JPzgP1jfWguxtr7yii+l27+7+973Bg8fwZVBZprs+ohhsQhsDIgr6bxfTKEB2xdHv/rr9vkX5ofT3U8/rkipjKYTS3XybQHUy6IydQgKC+0UgSSoVxY78Dqbx6aRQWFHLZ2M/d7Fy1d8wc3xeDi6uPHiFd80i/CsbZ+Ys4P+unz26fet1Ctb17s3L4+Ltd2wNZFh8FU8ZjqOOhZMWE9ADWHO7Nk47wpL5NNjQ6TGUDI0FkmS289JHalwprPNzGSW5CQyyQSXmROkwpYVKhIlObHnlvn0Wk+1nBMNi23S0qdOnUQihVSh0yzHAGIM6cBrWt0ibcIIApM/P4sKoiyBm6zUy6rejKJFAAmaQt6ip6/BSzYqlGQpeM7MI4YJiBxEDCnIBhbSgMiMCc1fD889CONj1/y0e/Cb/EWIz0pjFUBT2sRye5cMGFShcZn0d3rvJJJXthXiZLCQzIRyio1mQgxZm4xQCFCQEFFhneFCU+Z4kk7lWwyqRkQkhiAiEgBNiZFLkk4KulUsreUpA/rLpUb6QiRCdunDl6wAobBZsGZCZIvgCpEI6izcOGweYXeIk1fPT97bXbu3K1WflU2AmF5opKicwgrgHrVkykAGZGEkAFoYKtgYJpKguqi1wqiNx2UvUd91fDT/xb3u/qFtX/0vmNnJNCymZlU6iBleOOzNuzFfpK1zcq+mVf/hznM7J65tA8eidWJOqKhEnFgfQudQAoN4behXm+JrV4v//IuxN63qAj0zv7WrQ8Fx0e7sy7Syfk1QqbaqYLFlUakKM2x/AJBoNIZit4AxIsVypwvL1lomRtd2S3EQmFhVg0RRKXuVinrvJWV8FRagz83WoUScFeEMFnCUpI4LRKppICbh7JecjrGIpCYuMd0NEQwnaf2yfuPUTFxEKBuA5+MVJSbXLsoBWSzJVV/zJE6Rl4KgTCxMyoIuBGNZRZlMcuAiUIyBiXOGpkaNyyDOrDRI2xdeZjQoIZveZQIzs8So4JRIqrmvRrTGxxTXCIrKIEtGsh1OvmOIlHL2dyIW5KU0G874twUkYnniE9MlXwfIcTB6yj6ETTY7nLoKNYIAZShFeBEYYYGrdeXxoz3M1DZGmYITZfVsj0bb4+7wbH+96Q732mNntMcAeyc4QTUV16fBQ6l2Zdi1fRzOcRj4gOOs06nYCdu5oSAUFt513DUzFaDVXuGks8FYsGhgal1gdSzSNh1Xh6XvrRbf+fWTx5+W21fXX3/503/xzzYmZ5+7+g/0ppe28s0wfDb/2iu/9c73/uPcdVRqCCHd6fDd5np/1FU09jY0lQQ0QMNVw2jgfTQgCREFQxhex+3+reojd/nw0sWv3Js8HXHZYGW2OuIBY0rWTp6tYfzuB4+r8Nx3Xt/6iw93xs+KsxtuNsN+x7Luh/vdqi3j3q3mn/9fMfaVcCgZOztWNYh1XHWlgS4Anv7oL/z0sJCmZ0NBvTnNKy0Ck9rShijRd2ERgwqTDDfcpRcGb32lGm7Onu7u/st/sfmF63ud71ciXRRTLGRRo1RygWJhaKFe50xMFEhaoIUuqKmKivvHPDNYEPoTrNv6+N7kfev7lSlH62uT8Zgr7lfnh6PnZRPnXqHjl+q73q3Y3th3C3aNuHZe8YFwAxqTTKBToCVaEAUGdZBWojB3xMKsxCIQRn4gkB4UIk1kSc2GssnKmLJ7Iac/yftdNgClTFlTFKwx3QtZdar5VKfqYw0lPqVC2BhIzlGxIJGIALFagogQAocYEIUEIcU0CZIIVWPU5UY1SMrsSxVGOMGouftWAowxp5JB0uU8klHlZGStp5Hd+UFFYFCAIELIsDKgrMIttc4P1b0+u/DX7tad8tkL7YMr2J5oK7JkehIlQ9q0yosiKpFOt+6fU2SANAmleQIIIVi2koWglHw9NXPPJYpYZsPWGDaFJbAxHNKeOe0hsXyXFZYgZBGCwEqyHRMBkyED8Oc8k9yzaEwiGTLLvoHBbMHCnO1xiQkm+60kAxNlG+yCW6DQEnLw9NLu4cHz682mmz637u8yt1bAno1EsrZEK+QMowgOcMRMwSYDssiOxQAmR9H6MlblZL3XPSwYwE/iynQmXlBvbtr2SBrMozKVtGgF3Daz9X7ViL954w/a/qUv3/mP5c6jljYbx1WUWJdMG1gw2S52szoaz4X7zlcnb20Pv7QVvmDXB+NB0QxLnhxOmm3a5/M8ocMfPLq4dQWgdr9maiO3yoHg2ChB2SbhuwGCLSsoqw2UjKkkWGPTQhiuEBFdwpXpKKZ5i5D6WCZijULE4M8jJY21kvzZRYOKJRJSCBlVBgmDcnitMhvDnPT0bEwyZOx8R4YYkChJrpAIwFHEGuMKF0JIQ2k6mkmlboiI7TKkm5ar53xOY9aap00QsniXEnhMKmBlMSKiiTKSvucuhsRY0izzZ+Ik1mdK0Y5JykhImUhLbmeyHqPEk0jGNVAlJZt7CVHSTgNi+uysdLp5WNKzQFYQKd1raoHkfxkIAFljYxTDlhgSJdG/TXa+B053Sek3JslGEImR/eMFBGuATiyTxPnu3R1XVu2BtwGmKMQoVKbT9vmvvG5XzN0P314rV0U6J2KtVMCxlhMxZ3m0H7ibWDkJ5cT6GVUzY8eLbkqx4cKT13llO4S42D5ff+1rBQnMdPyTt+39Rx5VCT4ajrhar/b2YrU+/N/9t+O335X33lt//qvli1/b/fiDlXutaepw58Hhp3fXr74Sj1dCHMnhoalGN976zs/f/0sffeWqAAmdWFftHz4GZmcKB3DsUDAHCaaoogQGqSgHhcTFSQdjrHP3P7ot48/Kidl+7bfHk8DiQZGNwogNwVcUxOEnnz24+LT6jRs3Pnrq3729fxLqgLp0Fy5feYne/xHIueilrHwXorHOivEqHCFqOCqThQ3tcd1fYS3bMOsghsEiEuaQ6UQsWNz2ufILX3Jn1tzWtgQZ3/pk/90/NfuPLr/5rb2NVXv7jvZriYHFMFFkBSLHoBwIHrFCIHhQC54DU5q6FQmBXGRYCCqKTFhbeRbak6Y5grUbL27FucDCrgWshJ0Z3T5AO1r3tOptOeHVVkY0g0xgjzjOhSZEE5aJYk5GArgVeJGoWIDRxWhEmEPyF0jmGcImefBnhT6n2xvpgQCAJKWAUM7ozbMXMhJjFMjS29MFFGviSqQxN/m6Jek9JThGNX9iS4QOKJhAhmIXQggMStZSQSOIjLJC2HCabRMiVFhrrIves8HnY51i6RIFA40SMs4Z5XMiKSEZUvLyn0iyoxC1zEAMUZTT/GG7zk/hX8ToZrP2ZHj0bnh4cb7JQgJWRFYJeR+fcnRVSSPUWRu9T7ZTnMmVSe3PAFRFNA2nyJIhDUwwTKqUiNSW2Rg2xjAbSn5gkIIzMJ/XbprylNN1I2KMQBHAyTgQrKyqcdklcN4UEEWQIZPqMpMBKWCULZSXUUiJHW1S3BvHiqK03HFnQZHnpQ1+/fZ05YgPV/nkK1fM7Y905WzojExDZXsRBsZBSrJsnESGOIgRVzhvIheBLCfKOVtrrIo9987Fq+8ctgDc0LoVX3WYd8ckVn3btcEIbGQqjG+7uXHG1ZjPdi5/e3/t6hufvb89/ZjFSOnEWmgTKDCj2jq3YNYXbsy/88K9O//LxddvvFKXr4Z7q3bRe7ofSae9M7uz/V/UL3TVLTt6fjpHPOhxNGJ7hYlAtIaIRJGWCizJaFwjJVMqIthCJKZZitMaAQaK5FNBqlaNiASko5a5drQkMllwII0hQFRZWcSKVepaU7JjhGgXwRSJ0qQ+dK5woRNBYhUBYKMsFLsuOmYLDipMOdSEIUzsg0/ciC4KI38wVeskdshjK+kpTwoJ206lPOPUUGjUaNmoUqqIABnSbLIhqpQzR5Z4c/6YkgKCCFYDEmYOwTNzemysLSV6BkCqohHZMpdBkTON0EjyryHhZDMilEYFQs6GVKjSggODVWIisjEToloCLEvoLBuCaETyrSTioKe3GCEbe+dtmZAhWySILa+uMz4cXUXjyYE/PuGVAkpKpouxO2mrNffK73xDBJ9878du9cxMlYv0LAUX4alq2+nLi2F33PiJryfVfNzatoq+6iatmRF5ZdPYMEOYtiZs/cE/OXz4qBtP2sVYnu7EL1yji1ee/uX3t/7eH9jRlQf/9P/83I1rizaMrl69/+mHW4f3D/ebC9duQMbwjsds3/lo969/1PuV35wfNdbXvvGrF1a+/vVf++D+ewfNXhEsi5nGcsj9TnsLOQnsomHjAIfYxeiCSGAUatUUtlcXC/YBalxRVObB3Zs3j+25N/+AxbSLUBW9yMGWBYfZdOvMoJn7h/t+58cfvHj5+e+8fundnebOgziZzD9Y9LYuXt/86MPO9E2ITiK0oKIWaQUSHJEaRJCNHUozm7HOWTsP7zl4NzQXr1Sbm4MrV7H9Ah8/mx4dNvd2Dv7s3xTT4zLEui67stZXrz17clCVFMKMqeREw4AYRlFQt+iIWNXBM+bQmWitchxANFtfc0EIAkjPSKFQ0s2tQ+ursd+bTPcH5bCVrgh0tC8nk7VHc9MfbS50MBHXYCBzxlS4gZ8IHys1RDPQnBA7oZbVq0aRwPCqBMSoIYbAnFmHbK0gpNzvFNWglHwt2FhO/AXRkIUrUQgJvcGS/0uqYEMxtf1Aptmm4sCJ0auApMXpEi/VZd5mquFMaWFFEin144gSktZQ04aKWUUoo2gMaBTRrksiJ2JIiiqGJmm/LqWTLKA0dmDJe0lBxSIhTwDJK5cBLJI6U8ECVW04lkEbQ1D/5enZg+r4aIAPFo++1D03xkIQLKlEQDhgYYUiICGKijKYrUpSHObZnYD8yrBc8dHyLyg0hw8mymV+N7PTr+box4SjSbIjzsy35JklSY2MlKCULQZENUVX8CmtJkkqlx6iAJnM+kqXOpk0ziR3rMQYI5hIUCosnBQIxHbehgdPykU493B6vLZxcK5+uNV/4TB4NpZLoIjiCE6doFS1Sj2GAxNQB7AVtNXKaBE9ownWmrD4+et/7+P6/Kg5ASAhdHDifdlWnQRxrnYIM4nzEj5wjH4xZxia92w38atb3/v6b13aufjW7APpFtweheFa9dYbTdOuXLq+vxgHYsUzrg/dvb+4uL3+oru1zuOBVKGuZoe/2Bpc3fnw00tXhk2feK/zG2qPeqVGGGHynEzT88qzS9jBaZZgej8lx2DLcumCGDPznA157xmGCUw2xo6gEgMbS5zUr1BlkSiINrBIIRyDtZu+vfTZnelgePfc+eC9ZQuQYyuLmHhEgLDGEELHzDAlTAwBjMJaMrzw3qSfYD5+uYQmdY1I/GVsKGY28n9KaELeBacdiyRSQe7q8hImIkue0lmDcNJn5bgVTTRjztvenAcGQCxbETGGlThKXO7jc3BhdjjnJVODRFijACBOWy6TxQuJ1yaqMaVBaBL0Q3Wpi07fBqVcqYRMWRFNqmzK1ZWTB2BGZsQQFUQcozKZfHWRJeKS2SOaKjy4eTOGA3Tr0kLmCxrI5tqlC1959fH7n+3fve2GJabEDsLKlgJXsRCaB56a50yxOPHVxMWxiBSDmeueHhatQJ3IbBqOxWr1/BUsjsY/+qvq3NZC2kvfePPjbmf98mvVlWuzOABcO9sTDnTlBWn23bnt4o0vSHgy3719cP+d6tor69M7kz/+f6x1gS5ebupamv2SWWk0ORoXQ/eVL//KTnjw2fSj+WRab1VBylkszo7OhLaZt5PhiNHKZDo9s7k5OZrqPBHqOxGTRgSRKEHrnpstmtu/eH/ttUu1Kby0rGSL0vXntV/4Gxe3buvuzty+88l9rp6trW29cX3b78bppMLhgp0txbjOdo2qodi3/liqIH7uVT3IL7wn4ljWvq7Lc5v9l69trg0iJA5Ww87T6Yfv48O37V4D3xbTSZ9auB5qt5jOyldeXaxs6sPHtnKVwAtSFIk1tihst1ic2Tqzvz9l6uB70orOgSnMCksBNXo4WBVSYVMxmFiDKqhXL9xAoeHYH2rhjnQwNaNFdW7M+55WwetH0sxlICeCOXQKN6cwgc4UcyERUAME4kAUoD7GEEWMEWsEGhKBCAAHHyGkCFEYCBKJExsLp9REw6zEWQaQ5L2ntGcGFFFOAUteUoMTJpRzg3C68c5sXCTV3fLeWmJUAmsMREKURJ9WgDjtotKiHCqwlpmt7zxoOa4BEEGKMNWEcC3h3WU9Syia6DKqLQVLpOplOMn/M38qmeSArGinGoS9dKPgXpqu/2L98JPe3vk4WG2qOVEkGImdUe7I54QcZsD7kJFay9nOL13jurQewikNMxkN5ZuM8hY6rRfyraVIuux8KWU22tLlJHU0xKCgDIpLZnP6d9kV8fSWVQUyKqBMAkrOSAyjykoplQogJiUNSVBmBQFEDGM7WFf7/R03E8XWCx9M77xybtIrjt66du5Pd6r+qF2EKNZWFXociiBOyBEqlUJRwLtgaqi4pmqsBrFw1Oyxezg4GAwGYj2AyM50MS5sa1qwoKUoTuF5EGQuphj0Vurgu+DbJmi1wLCyN1+5fsjX/uittSf//J+d/+KXcOOVkycPnlLb+K6qisPpAzvyw/YX1Xu7W9dfpHk1OWghU9rUYbF3tV8/IXd4aYhxrFgkElmoxuXxXh4esoZDlJh4BNmAIq2MBGmUDDGCiBhd1xGRtUXyg0niLxUVRKTwIE6PG4dUYoCWFRAWDNvutZ+9GxazIe+sNtP3L77YKnOcq7WMSEQiJBEFFWAW9QS2zMocotjk9cGwhrPKJlOa81MLQeQU+5Nq5vJ4paYrWwJwZtmrikrmTmlyzDiNitB0K6d/KVlBvmSk/SelPD/+BkmWS7CcJEwMItFuCegu8ZzMPkmWq0ocSYAUgJGOb3p1nNZvnBOyhdimT5KxoSVupiIRkltzDcwsCpHIKU5JKfFNcxyyGlVmsFn6VpIxEpnYttC6Kg7GR2azNjsG0taD9dHzl4YXNkLV3f7XP/HtxPUKmYv0SCyMtS0HloVxRbfgS7HoLWbHDZcztLO2ntezyYn62jiILqRP9bd/L6Cthlvzn/6QNbiKQ2j33r+pn92yV7+2f/Pe2S9cffTJh5eeu2xfeaU+d3b3e3/W3rLrZy/t/dW/OwOrh+N4+2fWUEU+GkwvrYdw4BA7wKivUPtOJ+Nm/eK5r72ytW93H0xutu0hqsHRyW4h2rN1Gw6ZrKn4eDoRKDnNWbKhk6hLJg3apr1w7eIjHbz94x8NbvznLA4GNvrpDLE4Dg/D0dXtczIOj09i0xU3H52YvccD3nrx8N7o3nuNDW7qm4bhOTZznTE8+QBe3yy2Nsv1GgPjNs/J1ipDFnc/jrOjvYeP6MHtHi14dT224bjn9gb16rXXz3/6IT26R3DKDDTVjS8fTg6dtdaZrkWS7ZjCQSWGDsStb5hFEWP0WFizYGmF58wFddIZ5slgQyiRXKSDBDKF+qLregVR0bErJr6axMrL6ECwd0hnLo4OnOtPjUwjN+CW2za6YLEQhIXynBCIAiAiXiWAglJQSdb8nbGsEAaFmNlXBE0oESQ/M8kog5OOiTPBIT3PaeGcqqICqpLiT9hy8l+UKIa5k2CI8/T5+TWQC0teeolIYjKpKDSEyMZYohAiGIUtVDNZSaIYw8awRBFoUThNHh8MMKfQs6z9zfAOE2c9Uoa6kMfE5ctZFv4kYU5M0aVnAhgSJLCK6hzSSXxpuvGgPz3ph/fD7q/ML0G5U7GAjUGFl15YCizDSpEyIpjwebHUZJxPdPo+gIWyFReU+NSCZCm/XtZO0QjJIyxIk/iESFVi0g+pJk9cpJtl6eiZ627iwJFFTjVPwFuaK4yQZaQcWAKY1KhaTdgvCqgVNWJVDQp28mAMlFRX/ceLi49w63K5f3lwVO2vmwo2KhBdRCFSqx1YqUQduCTukZRGehqgxkGrGouWyd/bXOeXXuqOp75mANKR1YWd2YVD2bc0Y6mD6eoI01MEg2YRSIw2hY0CliYu1tizAA/H/RdfmuzdG3/SRsM66HMl3oXh89tY++LZvXvV4Qfv/4ufnNv+1ubmK8FNXRzo5AG/++Dsmd86qMizd1yAy+BPyBIANUAUaAALE0SDSBBQFsImpiClOOsgHXjJjFJVFV2EBRQ+htNyJFGIOUhAx4Y4MFSjAAQLRKcy61Wv3L0T5hNxPbXav3N3Y7i6s75hoCJshEFiChvUhy6QMWAnKl3ypMuUaTAYUZZ8umwPt+R4hXQYMkEhpSOoUOq8s01NevxToUtkal2mMyDBHwoyKf8M2Z/u9HvMzR5rPse5W6fEkpCoKsJgDRIlEpvkcaWkpDDMgCZINsX8JhJ/wpcUAJPlzOkPUE6vShWqERKjEBHMUjmdRR0co4hKepEhRhFY69IDmxtaWJHEiSxEIGqhjsAKAzIKa5glcM/Vj/7m5/Lc8MqbXzNROg1hOr/3N7+YTvZNH7G2dma4VJRSGl5w57SrRoOuRTef3jAjaYJpZd6gayteNPWb35zN2o7bwYuXHnzvz25cvLLz0Xtx/Lf+9ge9qy/s7+w4loFtqt6wHNnxD3+yyU13+/12cVj56f6f/X+LW59a0cA/3hB0YHJVZQch+rBo4rkL4/WRbQ5EKjUVq7RNlCJSkMUkUk2bly9uXt0+flQ3j94ZmDbaeSPB8aQsvR2YEDrrCmnFlgWA4AJ6whVThY6duJVb9x+F8+fOXXrp/v7eWtgsn+/bjbr10Q5Lms8OPvhsvL1xduPC+t6UdxfVzrE5mS3OfOs1un45PD7G0ymHmn1V6cCWZxCrXgzqj7xG3/mwc9///GeTp3dc8LUzMyx668OTrc2fTg+n+/sTg7A3gz95a7AxnIQGcPCYLtqz58PW2dm9h0VFRsDOBfXMFsoAiQRrqqaZADah/ewdLRhNwBgIMLXlALRohsOd0s60nPGgwaii1haEMC8oxI48D8axt5DNJ3Zlth/v/fDRYHSp2AtxwjgRmqFqrTSAF6R4F12IdKoiCJYjNIp2ktyeKKTefikJRYhd6Uoffbq1mZJxFVnLIhqCRy50+suVcznTsjEcI6IEEk5LLQnCBLY2SkSiLqbKndas+WFOD6wul3hIXyVIYOLCuQTxGF4ydU02yWKb1k2izDmeG5ochLLDBhOJgJdkaGB512SsLq9ucwYcJ3NQAbE1id7FBBWNDEdF1CR0Dq6pvnC0/u7m3m7V3K1PLh2veCsMCrDRKjOSPkUUp6LnJYj2udY/K0AplVThfKHk1Xpa2C0tprFc0SegV5WW2g/kXgJLYwgVFVVLnBjMy0uUgEyxphQRm+l1FulFKotSqvoCXhZmpNVcSksUMQU4CNlpF3pWDqd23Np6hQKJyNlPJ48ubR/1eHx1c/PDhgc9dUoVtFYaULFezblBBXEiJUIN1+dqUId2JmjMwJ4A0ytX7FrV0hG0AGAX2uzD9L0AAQAASURBVAWIZTcNHSH6GOcd+0YQfQcrYcVZBfueTJvCSbQrpnX12vTp3vf+RF+80V6/tKidg1MEZRSBn/7i3atvyvlrL1+2v43wSPYXoTrsbV88erzjP/t5iQKjsrMaEwu/DYHFCokoBSEEkIA6IGbjNlbNGEcAYAyLAhoELEFsSk2Iy5OVFx15xZOAED31sFzy7oSURSNrP8jqcUsGAmkvXXb99WnULgIdibYAiYpoVJBhw0pRolGSBLISdxJSIQ4AS0jQTAYulIgVKeUw1UtKdpOpuDJU0+iZCm2OK6P06CFKWuFyPpbIQROCzDXIwDEgpMlFLQHJ6SEQgBWd71zhUsYUMVmbyOTWx5BMIhOlK90IMCxRUhoG6XKLZPLyXD9XEGQSo4pko6DTTRFIVSSCCFGhiijRFYXrVfN5l5ihRDYFgCkMwFCnSmXR9x2gDrAq1jgbogwGgye7O7snJ/bD6cTuszFtMxP1zN7aYZgXtlOxMdQoOl4gFOAFuzqwb2fXpLehNJ637Ln65m+tjLZu/fH/59WvvPb43/3bzaENi9aGtmv9Ikw2qpXF5Quoef3S1XA8bxdPe83k6E/+p/XJbPej9y/Atffu9MEA1mwvjDZ0vO+roYxGbn8vxAmMLeAnz62rn4vtTGeJOHSLYAwvDJdMAbGVZn9q2G6+/M3Vi2cPP/nLZ/sz41Aa6q3vCTe2MtKKeEUSmhpQDxiABnSC4YTXxmFl597BC1/fuLC+PW6mTN76qTfaTOfd2TV3HvUnT+9iv5XV4Zl+XB+dvXblSzXVzeaF4cXIE44Ttk+97O35u5/5ncl050iePvInO05b6jlqxs+5unv1+g/mB8fz5veufeOdT99uFdfPXmyLwftH33eD4dbAhpMDMvA20lyqV683UhC6oqgggSOYXNfBGCR2PmdfOSGItQyx6mG8k3nKgc5R9uioW1s5HA4Ex16HBfmChYtIYR5FWho2VLey/qjbt9Pm3MJNRTAlO2WZSvBEDagDc6caFREUgQAIcSAIKJCqJoyGEfKGNu1uRWJcSLs0qJFUL5LEEIhLwV8O/Uvr20RoXD6wYDYJ5I05K0CT7VvOC8cveScDaSGMJOBNBq25kc6YjbV20XrDbI21RRF8V5ZORUIUa1KLL9mtIkJz8meytuKU9ymMPH9CzXIRzsyS0N7MzE7BQipRiUlVJUr2HElEG0ggFZUiIhozsf5CM9qZT/Z681v9gzNNjwUzGwBXRM/CwmA2EtM0D2uL5fYdKVglsZaX+mNwYXKFPJUnp5BlXnKzcneRsDFJHZMud/pLiwQkSBvL6ZuXautc95PRMCfSl82DjDKr0TQwKSckHUKAhaa4eIgyqAA4B0pbo2UPDx9qF2w5krqI4lZ/cbzxtUtPR3b3tQtbn9yujKpT1EQDwgC+8lyzVEIOqMFVlKElG3gA9u3AEmNRrFVUH5qCWUsAMUTXsk4QCh66xXbNg+2qx1T3qNc3g6qu2Hhp4tQc7cYPHx4+OJi6kWu2SvvGP5pWUgR2mCuLBqHChUZ1uj9/sHv04CfN8G61OGcV093H0/t3xO1W56aEK+htauCysEyIWKhlCV6ybwtUhEgFksI9SAkaEkMnnab0k0xecl2IEryx6T1MhN7s7pnOYpBorSVQkJhot6wCCqLwTBenrR3vdgiMiqeRpvdfKcvP+MJTB2UTotpobJekeIjgwriIkOPLRBQUQrRgtlYRiYSILZu0TgaSPicTkpMBdNKrJaLvqZddIlkkZj5pFktkM7wsPuYliqIEUk5PuUCRg0LTQy4JVoISLLgsKjIcxNuEywRlBoxJEzsoW38Jpcg1YLn6AiU7DyVFBAipWhKBlSBEgBYKZVaRGISSgR+ylCqVfABMxncSo1dhIbLGUC69htlBrYiNkcVWUI1ihdj0WR0xmAb6i/c/KTd6RGijlF10vb7MS15Ej3kpoM6KV20RKljnQieuiONJc07pS6Zo5rNex4jh5OP7w9+6Ut54bTFt+bMfD/7uH03NXPvo6jDYPMuTR7456X3pevTTxeyw/+L1vaefrH96p+ZBWbkgpjR16Lyq8xpo8kTYFG0TJ21EK1QV825+ZujPni2a48iWLXzwhkqm0JM6dLGbB25tsQCmevL0xKyvXX7jN+z+uf2Pf0DzFmiHQ7jah4k3gUMICBpdpAGjksb0Zxg04jz3PLvb9+5ubR76vR0lsbGbYb4Y1LyYnkQrX3nx+YeTcHc8OToYr26Vg/X+yZPOetDEaIzHf/Xn/OOf2UbsROwE3LCzXl0VuFxZHeilK/frwQ/uvdccPji3vd3u3vlqN7fH4zP16l+1RyGEq89f23zyyPvjajgIHVoNvZWNNowDS0UUyKByTqI1JgRwYQALGGjebxhLIiJdxYHhVSgysxCiRA0p/Rwnw62Taq2AtxKsemOli7HVQSulbRwOJsV+S6uuWEAnGqeKE+LjaFqr6pmDSABLevyyrVsMIBhDyc4fEBVDy+i0ECNA80Vrrc0cD0kQLLKyIs9bKqnwqsYUTJRLLzGTtWA2KohLIaxJ2UVL4amoxOWaN29XKbXhS0QNCckkCSFqKKyx1qS/Ya2NMTJT4Wx6CamzT3CoyikzC8kkJ9f8/KUpQ8jEZBlB89dBRnpF1bilS85y+ZyiyQs4FgQyiVAMYkRcPV5/Vu3uu+ntlaPX9kfRkmVVMiKaAgDZcHJjVmiiLGcFtCBbdyxxstQyIO/Hl/swwHKemk6vvDxMC36JA516f2aGFRWQGkpQHijpRrFMk1PNLZWlJDCGobTrziACq1oIS7J4Ypudq5VBjsEwLJ3TAoZNuz+5/NVfPXi0R0/uVsVoELD58WLnV9an53l8Ze9SKKaY2VoxYB1ocN6uGCkiDZT6sKYb0IkN01DVLJOzYfF0e/NM3AUPhooFZgDaouyM9SXr0/Dt19avnBlOd0+ak+losD6ZtcaFBzsPK67q0fDcwL5wefvJ8ex//fjw0PZOqrBiWl+wKarOkOkCO9cu9qVdtFyp9JpZqI4/kf1WFoX2unpULEx9bNZnWxft1LIXMMgRtIJli5DswWKUhAsKWZLIpESOODKbfPkTJLEXUxdDWcoFRggeRMm6EvmtpqR0ZyCwWlhiGymaSE4xszK/+GJ9+CxYW8mEx7O4tn5258Ozhk/OrJ+srE4Gqy1bCa0VzxqQchVSnqiIsUZERIEQ2VIK7EtCoMQ0ON0HseFkeXXqT05KSlEiMwPmtIySplDCpbUdsso2Oc7mpjrnaSFTMFP3qEtmV3Lm6USMKkViESYIlJwJIohh6Z8jEtOqXw0zgig05ZAAyXE7G7TSMqOT8jygUZKmgymvxBhJ908C4syFVJWY/i8BBkpQS2QAQ3BMDrCiheXSeyvKqIASWhMq7ffru49uzXiyslWjixI6CcIxwqi3DieipY/UxEgF98zUEztFEFtcqfpvHE3VzjtR0dYg4OH73Lxy9bfevP8n/6qWtjm4F5p268nj5p//3xeTJyGGIWIcODq/UVaM1cp1QxESu6AWkUXaztoYxEdLHI1qF63ExrJzFkG1jc+/GrwwhCRE4/uV8423qDUGaRVzDhOxjskQ93hcW7B7bvT8tW8Mpw/eHz96v53vlJgO1r2RVuYCAVUcbK/VstVqrCsNDSZaW2vMs/evbL/KW6Li7VefG82ms6eH+93Joi26hdgr25tnR3z3KFz70itNjOSgJnZtsNLHaGOAruqvNjGwcmTxGsJ8Mdg8e/u5Fz7Y/XTv1vs9tIPR5uaZSzjYNXc+KF19t+y/98Hf9Efrr/QG4YO3XVU/ef56fetmefVK+fyVvce3C1eSFs56FgSQtUk/awGSqExW08RCHXMAAd5iJlBDIHQqlUFAVNFWMUesybuiY1WTwkFIp6pNBCA7cXrzcOXiejFg3wifgGdGPcMr0AIBCARhw4zApAJhwyIxAY2ABZJ5TmLLAkhhdpyaR2Ps8jlNzCnOLInl45CkfEsIEks5YJIDJPGwIc60rHSLqTCxpGQGCTHpLtL8lshfid4lKsYYa21KN4PAWoNEHiaiJPCFWsNZsJzpIjDWLodzMsZoelABcPbESjwLAKfOtJq2W3QK5GW/amQCpVq2HcNJ5JRTr8FGGwrZkjPPn0xurk8e1ntXB2uDtm7Ik62c9UsZxXKsTY145pvJaQFOhn0EVWElUaRFSFpccwbgE0DMoBQxQSC2DKHkTkIQYuKY1JNWJPAyjDYjCxnlS5HpomC2SSOauOeEIqlGmYyqTe+rKKsyUKSkchhLalUAYeHCFlSpC82iunjh2t/7nY//+E8Ob3+4UsmZZ08H1fmm1r03N7e+/0DOGC0hdcSA7Krtet6skO1pT8YDzGrTlhdf3Hzj6735yfHdj6pzzYOP3g318CQ0I14FsA+C1L5cebxmzIZ5NH68c/8eHGZ775vChalf39zc27+388m9y2de9Y1+7Y3XN2bF0WH7mHtfvrDS8qKSkjmwcOGqpgkMmjeje9X2Fk7Wtt8zazU7xwUWRf+eXni2eu1weKV65MRbu4ixGPnYFERZPKcCqlS9IhmGOo3BWDCMtUXyR1QoEJ0tVDVGAXGUlGMEstxzVee7LnRMHFWSQ1mmUaSVqxoTC2+Cg5w4986Vy6NL11obQ+vZEovohdjb2x89fbj6eAfOtRe2D9bWp3YQQAuJRfAQDRLBqXOFklhiZpMjB5Ydb2p4Q+wUysaG0OXu0xoNSdDGxphlpy3JhDQF92JJdUwrE5GgafrMBrEKwCYaP2m6+DRDTZLOP0FDVECM4ST5qAqH0HXem6QjVGgicxIh2dMvBRWSO2vmz8EdgElBECGQY+M1GKiCkotQSDoxZglqDC8WnoitdV0XYgLLmWJOpCLLDNgoToSJHFuLUlFTLFX6Si52vfbJ+E4xksa2ItF4iSJxHsUIKdZWNks3enrnZs+K12NnjNcpC14IxZcmk9ZpMKjmc0NkyLrF7uLf/48T63o7n/Sd9T/+K2goGGHWDbi0zkxjO7j+lXY+iQ8/mXz4/xruPuJqAN8KF1yzFeOnYq1HgLA1xEaIahSVmx0+K974SnzuUjjeL3nVagRxlBCsBc19JF1YuwAJh0bUsqkLFDxdW33a+pa7c5df39g+V0z35rs3Z3sPG5y4XkMaF1ospNdq6bmeoj/RGsA27yM0Rze//403X28nY/vcuULn9KWrV2ZN++GD2aOW3/t0d+3s1h/+ne/cnQ4b78Ww6ZityjzyaL0FIXrpAi+YolotREQGm3PfvV6vPnrh+kf336kav17XfP+TCNDVL/507yliePmVG/0Hd81w9N5wc3q4/5Z2g7e+fezbMF/YuqcRCiIrVhiE5BWlSmwIiARiFMYkTUhUKIITDwAQScENBiKdqFfTshiBEZAVK7716kFTYgUfmq2LN1zg9tBbD0zBLUtLhEY5iCyYO0AgHVgAtQATbGG6GIhIJBASaVNSnyoqSR8nopYJGkDGWhtjIMNRlRgEImElYRApmBEAkUiZbwRjmAwbNqno5uY3DV5EsJRo7Mn5WSSk32o2ScyfxRARJOM9AmGVZWFkIolLApVIMpASTR49JqmmJPG/JDJIl/MjL83rOWGhnO8RMhnGJkp8jc/HUqR/rkpQYi4yn5Rh4GCY4mvN5d3qo2lF7w+ffhvPKxWiBONcNCkaMC+QkxtR8jcxWCqGPs+Sg6Hk+Z7FIrR07ksc1uxoItnFUwGY5L+VcT3lzPs0ghiQLjYYsgbBU0x3UXoTDATMRhWAJTDUEBliEyKILFknwlCGFoSCYFXZwMECFoHBNkrlBKEaxgc//JP+5NGL//jvzY6+efzgQ/vJndHhbnflzOT62cNPds5Y39VGhuARwkpHfVg720RbVe2Im/W3vvbug09v/fn/5WtffGP1gi32736B/WuXrs7YvX3rEMDlwdqkneyEJkTXG63NQ2jPy6rr16PewJ6vSh6Px8OXBmeOL88Oxu2T8UE5Xr/MqKyM2najrNiGKpByjLaoxI9nsi465v3rV++Om8rMXD/0gVb6LdZ2urWdc9+O7RD7nqeQqIU6YmYUwgtgoSCG51w0RRHZuCgdU861pSz/ZSbWlMCRQYBcZgWqrGxZBEuncghggCKKEjGrULCqQchGUIgTzDWQsZxDiZ2ZPX9+/9JWr1mU+wfDx0/OfvbZc716PjpzfGZzv+q1pNwRYkDUgm2nUsEECsFEJrKwOZSDEwmaRaVbeGI2xCDtJFpQsm1RkbIqZ7NZURQMYuWQkkNTDRUYZgkCTi1fgmDTyWVDnGxRmRKdaqnbSiBX9qezqYJaw37hFXDOxpiTiQTwIsxsRWI2ryRhShYFqZ+OrKSa2/sEq6uqilGGgiEM4myCm1hvUMCYQgKETUKVma1yoWIJzpgKUnmxZGu2TiqKhdoVkirEWl2/MAM+XpNCN+tZr4TRIN1Jq62UvboerJvGeK4O/+ZT1NVc4KJt2F8mvMGmkG6GBVNJLnXOcdFObeF0714VVVS9bbmwldqgDSDooZ34Hsv4f/7voTQgjERhe6IK5wQwLTfG91yMXLEEJyTRB5XYtJOwWP2NXy+uffnhR7/o1ZUGD1NEaaMscXn1iBznhiZItmvxMBBD2O6NtjzX8+ZwxG403Dy3ff3k5of3P/nxirMheJSD6aIL3PNwB1jrIW6bZ0M5KovONCfrE2exsH/99t9YlhrFte3117Y3aU/c9vbVazeOJk3ACrOFIqogiIlwKzXcENNgQ7aUa8O8qm37+NblR5PR9uVntuIAX8yqyLLYXx3UO323c/NWPRhebEJ8+rhaHY1D97yguvgctrdnt3ZdWaALcMZGgxxAkO7VtBo9/U+YlSkCERQVBQeTBLO6tI6SwOJFvKQTKSqeAsPEJqJRgG3jprsnW5vnwxyxk76vfBu8n1QMDR7cCUcmsSSESCTCSiqKpOoMadyUGNImM4SQ16OazCMJxM45IkJiPZ1KTjmL4ZO+NPE1iJbGxJRM5onT8os5DbW5eQWJCAuLBFUNkQUhJHKi0jKeNq9EJZtViQak6dSQiTkLCcnNJyFDibqZMsokhlOE1EtMbjvMHCUykYHJJDDK3w1l1/ZEGc5d9XL0x9LJK7cCGaBC8i/BgMrr7daPq8f77uSJm57DVqdTp5XYDJ8ni4Bky8uJsUJ5tEiEElFAhWB06apHbDRLptSmOTYV4ISRQ0+bCgYop8QBGYlcqogzv0s5Yb2J/iIMZlUmGAKDDVEBTZJfa00halQsw4IKopK4EDBZFQexgFXDgDNMjX/wcDDwZDf98c9u/+lHg2++vvF71+3v36imi39l35tuVk+/ur7+8U5Yd6glVNH2qbaTddeuuKYfDoZ6PPjBe9909YN2r7fLBcvGYDxr7p7xF19cf+6lGwygHOGf/eVJU1/2pui1+5uXV1/7wtWetYf7fhoOR4PtydlzFmiolq7gdtHOwmg4Kbr2+ptng07LSlXmJbMPbPsId0+Ks4CKHw/vu6uGK26ainuNK1oaHZj12H8Ft7R4yhQ41gGGeWE1gJFMxChH8gBAUCEiyQqXRI/TAMBqlpJnv3MBGQZUVUPoEp+Rs9lUtqUx1iYtWMpFYbCxEIkpOswmQrKAVQKBomrgzvXnl1aay1fcdFYdPq2f7p57fHuzqtrRmeMzZ4/Lcu64UzGLzm1UMiYPtZYCKzSAbZBQw/oQxeY+IAJWYZSiFRFKnetkemKsTaKiGAOSvHdJjFbNHuggxM/zvilZb6ooWwYtCZ6USfvEZLM4HYmokSZdBrEiSFBZPl/MUIQlCXGZBYEMORsmTbgvCCAVo1CkQEeDpSwsi/qRSq9l5hi8AHHhlQxTQbAQIzCkDloJV0w1OysD4QoYcKgDquhWDa+oGdGt+x8U11bXMOhZywmdEJZ53H98OD98fHz/yDiYcyZMjU5Lltl2oZgddVaBGfxJr2Vm8YySoI6Nh6jCEJxFCDG0gFd13Aa2pOwGgYU5FAwiG6OALMMEo76pbIhCIl0AT501gw1eW6GX3jh34xUK5v6nH7AllQBmUCC2hKBQkbkqiISaAVlNDWLC8EMINvB8tD7jqqHpiUyfHp6cufStIq5//On7dWW7ULTEge0i6Bo369jfosk6HRZxZjH396ZbmxftAD3j4rOd+3XzZDgYbPTc5av/eGaraRMWHDuKRowhVmYKYnqDOVM9sL4NmEd07LiA72ylfuq9rcbzI0gY9Dd6bHUeZpdfe/v+XRH92itfX7v5YWtEubeKlcWze8Pv/ub9B7sn7YGrjWXLsSVbxuw0l5Uo+fCmHVDyiiAAQhRUA4S1tcRQBxWVoGSFC5GZBBIltcQhKqKSZ7RirGl3W/9oZzi1tjrLLQcORZwL4kJmhXpoAJRIgEBpYYxAhCiBCJwYRwCyl17aTnFKFjKWRdQaE1VMKqpZp0OAMrGeTmgQFU3fIyXYkzml6xo2IBg2yzRyAFDBkkJtRaK1JEQUY4xRsrI/ER5zYnzi4YKRNIuSk8j01OYyhQ/lfNZT4SxzWpeJarpNUmkTkkSlTiR0ygYB+VZhUKAMj+UfWcZMlyX4VN6bvxkEK1/0z92bHu4N2veq3d/364oKsAZCtGwoQEjG+5pJoipJ28gsSUeEhFRhCdymLy6aoIJMISWlJYSry5/FkhWd8UeAhDlxvHMRP3WKACBCp2QTgJkLgWHjICxiQIVRp2DAiRTElhzBiq0MSqJS1SIg1LYcnrt0uBhPd5tBf15uDnpVN93/8ckPfhircuvV7166fPZesz97c/twZ3e9J34YdY2GZjrA5MTvBn84QjvkvU052qhXbvB42Nz8ucdGPbz4/Pm3v/+/yCv/cKsGgMm9o9oUW3jKcNtmZY3njz67szsen//sLz57MSzc1Rv1/H68WrvNJ/vN2Lz88mvXLTV9PjpcyI2tVpqmZ0W7NsAVqI+bWx09s6a/UB637n1ctaSEfqdsJ2C3XR+PZKcrCqsDJYIUYMPSVBCfoUYFNCZcQlmAwOR46aQEtRkJ5tycqeYmTECkypqGseycmtbBZhkvli3h8p6GI4LmYTQLkPNvVGGJSWxciNfG2cmly3xhuz9rV/YPh3tPzu7eO2/rdnVr9+z6bKU+PFlIxVVUiDKsAjEKK7cQA0L2eEq+kqoUSJCNOaDOFjFGgEPmVOfoolP5oCQcJP39fAQlNbPG5FtFIbI8wrRsw1PetRARxCrLKWFhaaAhKbU3Eb54aTeGLAqwhvPNyaflFZrX1VkPka8zZnCOW+hCIqtbltxzi7BhJ2QVjuAAB+6Rs6ZPOuDYF14lDJh7BTZgN+3Tk4dHJ3eKuT1hjcF7MHmynkMbpQU6w5slsygHY8Vre663ff33f/+TP/+T+pP3hrbwHFvrXQisgeBiOxMR6yxbkhgQAiDRFsZytyAiMIQtWbY+CuI8eXMHRSATDPNwy527IKMNjKr1rSu8uU4uqLcn77xzcvsz/vJX0DUCgQYgpV/GLogKKxZExNTSrKcAQtL8gyMx2DeeVtx4Zb0pBrUdTXwwz32b5qO7j+6wdYEYsT1vDtbC/sWV9gydVJN9nUf2NH40Dq6zh82EF/HCxXPbPXitpBpSeUZUXG9IwXZtF9uOO2bP1htT19Jf1buPTONSAFWwGryYGIWgFOdxIbBbm5v1vXfKrfNvV27v3m49OjMqa2nHZSz3xw8vbV5+4b/53+8IP3tw5+xmPROILUExQAs1mvQHmV+TRkgBhFJSPXlNVFfyQAQKKEtrsWAuSZN8yYBJARtCZ4yJiw4BNOdoIk94vbpw5tyrdx89c7aDXxDPjQYiCAUmMSokQTiAkvWjgCBRUs0FZYDxlDlbuUIkhhBVcoaoaHKDhaikfjIl/SZaLTNpVinQcjpWTbVridDIMsfllzyYAGbDoAiRiJyXl5nWaflKSJsrSbLBNLAmPJc46REUn8+pSpkADLLJlD3XTD4tT6kLIihDCGYpQ1oG2WedrE0/rfR55Zfq3JK5rbmLOq1pwZTu6+Hqny4+elZPPgn7X+TtuUQsg24YREg5u5RKeBqXkmm8sqpyIkXnLX36q8sg1ZBgLzLJUvj0yyY/MSVJHt6a7fglRgGsiNDSA4tQaFJPQaEGZCQxnEFEBZNjWLJOhUUsc4+4jMGoJXLgkqUQ7gEFxApKVP1R8I2X8dV//GuHv/bGg5/9q6K9F0cydLFkW9iT8f6PXv7q794rHk1fHE5e7q1PpjSkvmlHOOZiPOwmKxgL9iFH6k+Odo57Xk7GDyeXfnf2V99/460v3sDh+x+/O73yEoAu7p9z9aQDS7XV25jd+lu/v9t3g/We/zuXf3VycnChuzc7Ogzrb3JJz+79qB0ebg+GF4v9vZ3Zbtu+fk5kPimNBjHaDdaaBxaHUSaBKoUJqLzWom1nh9SG4tJFqYAVRJuWmMnkSUkIjVMJCotsTG5STiCzJQSAs5iOU6VF1sUgpHxbyR2n8vLcKGn+gRKlTVNybWYQhERiTLnYJvuVZnMVBSfaMmMhUsCQsw7s2hAYJ7WbXr24e+lcfdKOTqb1s52r79/lXv/ozNZ8fWs6HMxhWxE2YrsOgHDRMdciCyukHsrRWBc55JFSBVDRqqxjDDHJEjh3rEyULe5EhWLiGKfHIjvJA1GFwUppYbzsMRILMIokgmFUsEZDCrWaogBJGKyMZahXKupLw9oMwQjEsFHVvE4CSeolVCAkeSQgMjnBTUCAGjYiMXSB2YgYY5xqSXBMVlCRVko9OEsr0IHyKnhku37AishAZAOjtWb3k7dfcofCti6t922jHKuej8YN1vdniAOPcVBrUThYtW2oh84v1i79yv/25KXX97//x8PjvZoRHCDkY2vVJ1EKgsSwIIghmI4Q5kZIpfNgIYHOwdaMNtuVFazY3ubz7uI1NxzJgNkJPIepLI534wfv+507cuuebcfDL3/9aRBChEI0cOr3YoQYlagc0t5CFTSrsWRpcmDxYoc2zgKf2FBX46oUUjJ84ct/fy/++N7dT7dW/FndH3TP1nl/0OxXfkK7FKdipZBJmLQntvX6xtd+c7TCB+/929JNq83roXS+kQiGcaWpiCVANCK0naCAGSAi2sKrd9KRbywoLtpRb3jbWB+9BQ8H58zxOG5dGE/Hg9X165dfMQ/unfhgty/VN167+OU3fvzhO+On0y9e2NgPC+MqExbWOYIIB2g6rjGzWqFEChJVISx75+SuDAt0IEMwINbWkhgyrKQMkqgGRiQ4KbTrdA6ysI3d+eidsLdTv/Jq8A1sIPioAUaMRiCqRtUAREDBSiohAoiy/MoxUZlBqTAsfKcQZy2zSVthGEpWyTGk9VmCGhVIxsJYXiKUV6tpaSzKnO2gs2Ap2TItbbNSB0oEoszBYl5W06zZh56KOzSTMBWSsdH81VKi0nLfmqq3CoSC5FXW6dKQU1XTZVnltAT7fBud/LHMLxU5peVnJZLlcpqW2+j0Vli4xuASr11pt26WT37uHl0JW7UtfAqXyViu5k91Kmxik0pt6m4UbJJRNDhpk7JImEyR+LTL9VsiVOfXn96QfMEl5+2QEmpFQvLTExEmFQ3Lnw4TuUSzUrXMBaGIaoksmUrUEpVAqU64AtUMBy4UpWil5JhrG0o/+9t3pvfuTI5ubP7Kt774f/pHenQvfPwj/uxv3XjP+WpFnvh7Fzdf25rszfZ/7bmL/+ETNzQroQnh6aA72MLxFg6HPLbNlI9EO27brl29OO+e4yft5E5T2431vfemvArAbKyOMLPVgdDKWRlfeW1zgDoovPmddgodrk78pbeu1g0GDQ3feKPyk3FP9z+sd57J1LfuDFqRQ6MCLlt/vBmeAYcCK9rzsMI1jPhoa9jDi5v26tm4CFhBkIAO6BidqFVlgBU+J04Rp0eVFIoYQVH0cwZjsnjSpX0iZyO51Bx+LkRf/ijzQTaGiTkBMEk2EKKwNZRWOpTPsBKxaIBSiNZaqyTQgI4tGOwEaCWym/Vds7E+uH7NP9jZODhcebKz8vDuOaqb7XPjM5sn/V7Xr9vFghWIXQOywjHt3TRGS6xGoGCjIRJz27VV1TOFbRcLSwaf+8HkzWWS2dMpFILcpCac3KQnXCTPIEiePmIiiCiwMLhIJtGE5F3HS5ETKwvyfjuEaA2DWaOwIcM2iFi2lO0A0mwTFUosqssNVeKBAJQWCTESUNc97yM4RVEZQiFqWa3CwTiuSfuQVdgNxiq6EWjdoJZtNx0e3To4fK+sOEhwc7HazamcSQm3eny8G2J/srbRObBlgMW54MPV119VG6eHs3LrUv/v/3eH731v/t4Py1YdOssAs4Vw8BDP8AHiBUTCxnjXD4N+de4ij867jTN2uI7hsF9bazS04o/m8wefdA/uyMEYs32JzM3YKlfKtXq/dXZ3+7nojwsuNN2CjG6hqmCuBWkk6xSLBIly02MGsQ2qLKoL4R5zR7FRKaK1Vkie7Dy9uvbqfDaL4/cL48+uD8/Q1B48iNOIiXLHfh60UbNg+/U/+O9Wi/7x8e59vgRxr73w7b1uALFTWzXBoRUOXESiSKLRqpHRaC7BdZ2LFggK9iwq5GvXOnfSjouNQc3azYNOJ6+71fq5lcoMm5fWtn/vD7BVHz+b/vUP3rt965Pf/+a3jp/s2Qtb6Fp1FAI7awGhjLLkWSqZLsjni8T0ywOS7B9YrSACNtnPSjQkhABEJQJFhC6wknZCFBbt+NzVK5fffO3unQdlKSLeWgoUkJ0YJUWWmSztk0BiJW1xl0rc9NiL5NJIYDJBlBENWwGQ8k9yYEjSuKfYsmV0CZY3S4IzOT17KWRAsmAQyzK4fC9oGUWQADFKgFiyrkkW0OkvMqc+IMNOxHnPipwjlL4JkyyrksWeZlVF2kvJMus37aYScgXNEcZE2UdgmWWQLJnzsJrvSv7855T+5Jcn0cjWqu9gv8mX7ze7J1XzbrzzHb4REJbf8+knShAiAxDNnu/43HMDS4oAliwBqBLZxJnRX/qa6YJLQJfgdDVKJBps0idx+upJyiykIYmcNXGek3hSLHGhUhh2CgdYQz3VEpWaynBFUgNOuc9wihJSC2rbHU3i9BFveTx75/G/eXfv0Y2rX99+8atfPvN3Xtp9997ev/4XfT4XPnzv8rU//Jmf+qsrh59VV9sj02swmwxl5nBoecx+bOfOLnphv2WvB73zK9300nMv3nv37ouvXTuP3i9+9iMA9vo3ZNTrnT2oOJw8vX1r56k/nDcRjQ+tFl5My1yharkWN+zcysWRApORb/o1uvE0jmVryN53DO/ErMTjiCm4FNHIdiHgnmsioZ1U127EM3194tWCjBjDwQpMhDPapDOcAGCSxLcnRT5yeSGk+bIXQHAqg8snORlMLdc/n8vkUivGSI9SWv8Qk7GqQYlJhAWR8wOknAu9gEyIgVWIOaQpKW2wA0sXobFpJtMJD4c7o2H50iW7f7xulT/97PzDhxfLqlnfHJ87M+vV86JsKSCK82AwWWOIoWIsC6JkcxxtTmZsGNl4IyPVSOmlTGSMSrDGJAeeLgYAzGTYhCxwzql/pyRpEkQCVDlCRaITVeUuR4UAmS+jhJRCHmO3zHdRImjKLkxaJMrO9aTKQgmyye7op08rQVWZNGFPXReYi6UVkokBioLUgpxUyjVhABkAqyTDyOsoB4t+2HvOLWZPf3ZBHzh2JKHgoBJaLRdahnBSw/XKdQo4GGxwYBWtENqK+ufPdidwFfOJ8daO3vwuXv7S9O7PZs9ummcPaNbEGATKruDeiAZDWl2zo1XeulSfveQGbMkHON9Cp+P57Q/kcB97k+7pw/7GwNcr3vu4Qmv9c3rzZlHXrYgTmgZMX/1K16GUHiW2HwHKKYglG8ggEoKqVxgmqyCe98iAI2kQ07faSZh33LfWmaDBGmbQwvkvvfH1/ZtjPLovftYsjtetOBSWre88Bw5tYE+2G1yYqexPd978u//Vwdg2bnPSOXXVPDppgpyIdkyByBvpovXQepPm7JhbiLEgcNDOQnEyOcfyKytbqyNXzI7aa9f7569sbQxcvSq2HNXx4b1nT9/56O13v//aG29+57f/4PH3/tx96bpGkOXCVZ0PnUjlbOJALgk8Apg8znESpgUmQjLZSb0KJaVLIDVJ2qkCkIBEQuxVvabrOvEqHWDLCouT8aO3/6a+crnzbWmgiJbUShCOYAhJUnHq0ihOwCFEtgyChMjJhzVd9pnb8zmBhLJxYrLaS815SJEC2fCOloQMSeQuGGgMwqwhuSFK3uaGZTGibHN3urDWVPjTfcVJP5troyrAUeTzzjpxlE/DhDLaQwQ2p3wqpLVechYCMrs4zZaiEgKsYdVgLCuSUYdJe6/k3qhLDwwk9wEAORMmL73zLJ87lqCwc4SRHXyxu/iu3P+0OHzB713gjS471qvmH21icCfL2bSPzIw8IhOJIJwsMyjHExGUY8aqTwtzWl7n65sAyib5UFLWCBViITWq6WFL58oSpTQkJlgVQ2wS8zOyZa6QADBUKAkVqE/aU5RCfTY1oxSpgYqkj/WXrg6/8k92f/6OHNxZnz5b3PvJ4f09xmzxypUrf/Td6c83/YOJLHr23Z/ya1f313zxxujSX912crhh/WBE9d7Jhsww6enE+mnhx/M6YHbML3bNa935m7949+3d6R/+F7/VdR8AuPXeT8zVr1IsNi8sbn1yx8wieaMGhdPIxD2uMA2YzHl9OhM1e+0j37fz0WAF/l5nsX/rzhvffv1gfFIUKyeLWTXbNSaILAKChzWQMBdn+r46s/XK5QMDrgsTFV5YWFQ5MJRQWxFBB6ADOsACMdXg5DksEpc71mwbrks5QMKDRbI9Bae2NfXg6Syl/2WTm7plU8wEkQhCACCIhKjRREBJAEgUkAgsK5Q6A1VxICkKFg0Ca0uNwYgPPnjiyWhFXR2/PNqfjuu9vdW9p1s7D1C7uH7mYGtjOjrT2CJEXyGgsBzZxEhQZ20IAWBXGja2C92y/csNSeYyRBTWJIDHGCZTQDXG1DUnS1mkUF7Jju+ImXWigRRK1EUoIlOagVPZlCUALBDLJCnZGpnYkdy1BGmvQ0pZnshkBAB1UOjnXI5MYGRrVYTYQEEpbVkTJYKVCrBFoVIpBsBQw4hkA4NVf8Y/WcHeRTl5ePTzAd2vg4EEgyBhUQ7Pjr2dYjp0A9s2ZKWEfbpRKsw0+JdvvLh6bnM6bbk2XehsgIwnVBTutV8t8SaFg4WfUUpyK5nKmhyDhYn9NPjxfvhsNz65pSeHFjTZe1Z6z5BBPVxcf/0nz/Zm09mxbzCW7SvXrr/xev/dd21VB+n4a7/a9PtlEDYxKhEbhsYAWzBA7aJjmzRbkRABz+BkbsaTnu3Bew1thFN2zDPALTH5LYqCxrdffv3LB7jnH/24X3W1WARUXM+nbZwG2xppxOqDR+GVa7tdhX0J0ne6MVGV1rrAK1Jxj6Ii+Bh88I2YRYzliFryfio+BHjlTrlb1OjOX7K1ufK1t/jMc7xaWOO858O9/bA7q9f1//lP/2/bF89vDy6uXdz++lu/uv9v/93g+ef9aMuenLTWFQ3BWga8l7RqSEoZUSUEQ8I5yy+NcFmEqQlhUpu6xuTNkAz0OTF6ODbNCSDMMbI3tjefH26sDVY3R+P22Fk1bKOGSKl7VYJaZSVoCvdVhWjQuAR5JOGUIcT89Kf4ESCb+jATkSHEGGMaQpN3g7GGWaIg+UchATsZJJWgyukFRGstIMkXV7K0Ni2EhJNhRq4kGTzKjWuOzl2W9uwhkh6iVJfSi8y3Qc73MYkAAluYjAupcqpklPoPKBBVhbJoOUUeIPszalJVxbTxo+XA8ksgAVJua56oKdU9YbCogT2h5k26cq/d36+7H4Vb/wCblF7kKWMLSU9FedKlZL5DAKky6+cMeVXW9HvmjB7n85Aik1K2MZNCEQjJFChNF5EZQGQW1bisvvk/SoFqsMyG4MBG1RE5EQsqDPfUMErlAaOn6LP0CCustWifaWirrcoOC+Yoc/uN33/94FPWH0/PDE3bzNanc/fx2+5n565evHR3+mCloW3etA9OZNHz1y8//evvnw3TPiajvd0RzxbjzrYVnk1lj+o5xNuI+eCijn+008Mb4u/+4N+/3ZoTANQ0Wn3cuRdsD6vBda2jiQJQSysuxvuP19zCD9YORoejoUA74xbSMM8OK0tYHUwffCx79vLWC7N2vpg/Wwm7lmih/YhQSBERgrCIH730BTozaI66sNLjELrICLDCNiJGoYKlpwgKKfO1pVANiuRQAhFhhSTPRxBbSm7JCXcXIJOaRAw4SEijMevnqX/C2bQVmrThqXPSkMqPUMeIGpPplgLWAEQSERWRAjPbqGpNBAXSgkEaPaknZltykJ6XeTu21k6Hq8365uSq74/Hxd7e8ODZuQePQl2ErXOTrfMnK0PjWRBDSuZIblkMMtzFIKf4CWmCbjTxPAjGGogmURPnrVea5ilFH0uyNmUS4RgDshaAUiJ3hBLDgthaSR4cqglZT0TrImXCpAVXpnKkxBLDgFEIREgTVGdBQQjL2BFdgj9EHKJYY1WU4FSZyRJZVSdiBYYKgYPUygPCADIUuyLrYe8MdjZ4/4Lfi/Ob7XRuPEggAeIJw4NhFevaDOwaY+hEiUNHPNl8zk+mmxdX7JlCjkSfhbIofOxELHe+OBx3VkSstUMYBD/pDp9hEeNi7Ce7NG3x5GEpXXn+8lO2Xb0p1l3cfk7e/2k12npw9cXv/+1PNnu9X7n+5R/d+cXB/rPjh7fLixeAwvu5e/3r0xde5KePYuGgBXEUVXAWcVnLWAjQqWoKWmFKqFgURNEYG+bGoQSVHFm0AIzAAZtq2AiTGplNj9947bUdf5v277W+i4fd9FFTodLWy1RX0LNHf/G9/ffv2G+8Wa1t7s1d8GxOYqV2Mt67d3tn/Gxva+Xi5nCzjBsrcdVYdf31Z1dekXlkBzscytlhfzikulxZN4IwP+kOnxzt3XmmnX146xcPHt96/fW33O66tb1vfuu7pR1e37u48//+76v+mfLqt2bTXSoKKxotAIlgQicgm6czhvjkgCMQRmAIJX5vptayKgtFhlmismnKjGBwDFEFkCCBIITAoNKFp7f+VsNL1flznaY3N7AqbAosjcnvQdNmLPlSqYCp64IqWVc0XQeNltM+i4lJU8tIENUQhBnLTKMEgNES0pJTuyhlSrmnIhI0WtiUPJigMrZWVCgkIiUUif6Uq3zGezPvSohgUpPNpwNelOU2OW/BMzCWZkMSFbIsEYZNMroIyb1LNdt7EmmMhvMKMapGaAGOVpPZVwQzOF0TGUnWtB9fTp2pvRDkBXnSAqVqGTiYQGpFpLTure7Kv+8+2Hfd+37nDXNuGmFUQEHBTC7kqs45+UDT3M+ggoWS3Z6CQZZ+qRgTGckGRsvFZR6WmMjmvUNad1OXIwQ1MFg1Kgkgoh2rUeRhWmF1+fkZluAIpahVp9QDeipDQs9zvx8GczNgs15514zf/fHANnVRbL98efbxT6c/fvvCtVfO7z/tY7zoYcOLfu//Vw8G57du9C693h42Z6pLdvB66cfdr/9O/51/WR9PKm3sfOombrHfxuPCTRBnXbdypel/ye+d8Hgh/PrJxJw8+qEd9QHoupQPP5a6Flf4RnTu9IQZxmE81J03v3ypGm5Ppkc/+fTm4qUXm67ETF2QwNJUNJhHszv/2Z/9xR/+k/+mruzkZKfG1KmrOAbLC+8XruwG4iY0uHLtIEyqYjgHYV6UHWkfGqCl2I4RYIOgMvBOEHNZzJWTgAWzUxVWC5Lsnc2JK2gBsaIipEgKJirILgHgdIwTmymyKhQhBmQuOyhGa51ICFAjipAEsEYBYkNg50iCWFhOMLOgZOHCdgufFK+O2EfJ8ArbALDv2IdWtF0dyfr6Xoyj4/nwaM8c7NaPHk9Hq4+unbObo2Goq2ClA4BopYneEYNYIqzJciAiFoWzzocYQ2BbKFuTBEwksEaCRBaWlD6omognAiJSUkkydsNWBJaMqCtd14VkGMfMXehUlEFQ6SAEZkHwkZnJcIZxQijqWkNAgBiiqJZMjBGkzEYgEWqQ6Rvxc1t7m2dozdOOUQ6RxQIVoYi2X0gf2tNhOBjg8AyfnOfjYffY707tUycU4QlB2FOYdPVWLSGw23VDMNfiyw6rXg51e52e3Nn58H8YvPCd0FsPc3Ha62QmnNaPLbMG3/WHg6e3PzU//Hc9O9Aw6zNYtL54+c6Fiz+/c08Z05OJtO2lG6/duPzivBz8+MP30fkvvPyync98UGF96crL7tOPOkQenZPt5yZPH7mirIxtEW12tU/OszEsxDkbwBYqEGGIBsstxJzCj6LBLQYyZ1SIFgyYEUdhQIqgIRLYhqeH31k/D3/xwfhmW9uijt20ReVEukqNdVhpDk7Obl54Og71YKDR+XlDIpiYD//2venuo9v6s9Hw+auXXl8xw/1PHlzZuLL1O3+/psBFaNspFkG0CV5++Bd/s79ze7B25tHjvebw6Vvf+s2qt7E5xGtv/kYbDq9f/j9O73/c/fx7R83e9td/q9k4czielM506kzsVMQSk8m6kRCUSJiZ2KYIr+RhlBzCKdmi52k4ZC+kNKQm0QoiVJUjiyB4SxwRhSOxb+eTK299qa4Gx603DEjM8XnSiYY0UabCu7ygoZrQJlZg0fpEBQidGNYYYzJxywofBVQ7UVJYMgYQUWKoSljeIcbaEIKGTLi11gIUQlgGhSoppAtsGJq1rwnZTq7Y0CwForRVWtaYbJCsv1x1lxASERL5EmKYQOQKJyK8PEWiao1NOLOomvTp2KT5IYOrokLKoBCCtcmPNlpr9HRV/MuoL5Z0rSUIq6crYCCtFU30xlRTnV9xm5fna7eGx5/g3jU5Y1GogQaTehRmUuTiSnn/bJf8MAuwKAM2l2FYgJXMslhSTl5blmAsm4RkQ5SmW2TsORFYKCPBhOTll3M3MorNIAu1ylZhYZULaMWxp2qFew59YFDqsJMNOfjhDwY//3cFYoVx+BmXK+e++90vbTrZG5brP9tfwcnMLWTjavP6W/v28kEzaAcVtjcfHe2sH09eWNs+/xt/cOa9P97eeeBl+2nztBxLPPThwMa5dtb6E78/mg4G/fHuY1x8q9q5FXkPAFC1tWJnTw7rsHrWzryOibkMUzl/ZX0lbNz/8O5LN64Ow+Hj+wZG2LOcGGdYK530g93jpw/2/+x/+Dd/9F//wzqgCpO6sAGDIFOGMVwfNs/OvfZNe3nYNK0Ug1AYaQEPdIQaEhgdKIC8ILAKWIOqA3VAQQrkIxclPRAk2fNUYiLsqYIYaaEtmhQEyX80/9DSiY/Za0UMLfNLVEEcJYoKkoYAiYYsAAefDbnTPklAbMDEUWCCBMpm0wuJpBohdBoOmFWzRFEQona6Mxwcbq6VV14oDo6nx/t7ONwZ7LvBaCv01+b9tZNy0FqHSlUWLATtNCbD8ChgFTgaOBuikoIkgDkwRyHVaEhrZQ/tWAtiqwgaNJ9aoSTOzJHeREySLdNtjFGVTHINSTbRxoiIJosigmhWLjlrNYQYExytYO4gSspsk3TfKhEnvBkWBOVT0EfAoiyqUFKxyhaOUKr0tCs89bhn2lWM1+VgRfY2yn3sTcLjrh7Xbddxi7iIBVlTMTrFKgvLwI1H1npnPW8ctdxubJ2rtunT9w7v/PHGd/4PndO5j2VRcRciushG4MkGpRaGLHPP2oVdRZjyc5ff2zj/3t/+5NqlS1++8oW//Pjdgza0e4d8+eKt3ad+Mlvf3F4p6r2FPz44vnDp6rX9MR9389XNjX/wv3m6P6naEBUhCMwc6C8NuqOIwiggLGBDKqTwyO178mED2HIIrXpiZ9uCYaUkMjRwbjr3QUI59YYbO5sujmcvba5/rb7wr4+fHI7YWYSJMOvMezs/8vGl5wXl0x/8h83dw/prvz3cemHS6PmzG5uDzUnY7VVrz1187kxv8Mk77958+8MH9Z1/8Lv/9b3H9+7ffl/DdPfZrbY5/O53/+H7P/1RiP4/e+M3Zifvv/TSV66+/toVGa349ujBHX/rI75zSxAGr1yvr/9BA3c8Pi63hoWc+C6lS8YYlU1HREZz0I1mYFEpMwwy6x6Iy+VrYkonM5jEcpAM3ebrNQIhqYcIHdQCMn2wM7hyTRHIcIQSR6saNIniUu0KywARARA1cLKbTtI6YqgkXFY0aowpyzPVYKhGwBILpUhtSWQMXsIryW8jaACRiBjRwAhJ4wJAlKyJEEkco9Q7J7tYhhCyk7Mq5+Q8SiGdDIGwZB62JrfMbH0VRRLLiiHKElXEp8KfZ2n9XGdriU5vvCRmSJ0Cg4QQQkhelelD2f02s+VS+V/Ov0vzDV5u2pd1GR5SwIqyMjiQZ/46v/TQ//TQNT9pb/0mv3oSO4uStBM2CYJdrpoLEAE2/YnCgtKHrC7/EMl9Cvz5PzodyXMPBA255ibBtyKAApCUBgEIyGE4y4/mT7G8iYgBB0uooD3VHqgHHTIPgVp5fW5rLrvJy196YfvV36t2bi0+eve4v/mFV6+VmO0+eLDf8ifP///5+rcmObIrPRT81trbd3h4REZGBjITicSlUCiwWDcWi8Xmpclusu+6taTWZTSyoyON2ZnHmYeZnzHzMM/zdMwkkx2TnSPT9LRarT4tdTebosgiu7pYJIt1RaFQKCCRyEtkZISHh/v2vdeah+2RAFuySUsDEgkgMjPCt6+1vvVdXrm9PPr6/MP5wff8RLZfubY3PGtH1+OwebzXn59vDWm+6Yob/+TvDX9/dvPO4Zs3v3Jv7CwdhtVJgeBpA/NZzsutzdw9rDwyuf1b8eTfAwBaMRvXt5ur+ydv/PGBty879GOBga/PpndXBzt5GJ6/fzj49I69MallMy8pNk3dJ4uQT5vVo0O2Lm8cW5ofH7rIDuxcVUo25MG8qSZm1H/hxaWE3Eob1FqOjuGADGQhrvNuJ6NqFGwEZk0p0A4kggGUxUjH/FWiTvbf0fUEytr5fivAT9mfXQi4E2mIOGWBdawhaOfmpomMaBJ9MwXOY32ciaFQURKNlo2IGk070UhRABgkRjEUmmDHxCHQKGJlWNe1r0GIk1G2u/2FEPZn9ZFMD0flg+2Z28m2w/jqfGNjbovKMriWIBxqeEc2kuE6BCsQJwZEyiK8Jv4RrAUkxogLa2wYsHZBUDCWNACm89AKIQC29aFLDE1iTVFiE72IhH7Ra32QGGzmIGrZCCHlnLJhbcUYE2K0xkIERJICwzto3wiZjkBB0M63g1WNCosyeoIc1GPuk+SRCmzobCTTCU4nerTR1qf3G3mkvGReEtUgD2ddvr1xdHpsLrEZ21rr0TOHNbh2k30MRjsFFo/qAjsvfNXsbvj5zLTkgyfLhjKoT6GLEmyxMSGBSilgJneyd/ntH/9VsTG5fXmvPHnklzbC3NjeyWopRSE07A+HOnzj4bvcc1/e2ik/uHdvsPnS176lNF7N71kXrNhAPdc1iIkjrsoCGBLhxEY1aacWrHUhxBBLhovBWrYsBK0FOWvOIQ8iVVVbx1yGIs42bAW1DQ+OqvMNuN99ZutH7fKDem42SUSCYVs1NJzcGPnB8aPZ6PHj6n/7X9qtG+bVb6z2bj7/wldRVo5Hv/SVb80+qe5//ECCDLPis0fv/u+//6+UmxeefaEoRjdvvAQ7uvG5z7/wyq/u7l39djEqzPbRd36E05Py7LzXrHLOwqVr4fTYff03Vqzx/qndua7Bt2idIdFaNKQ4Z0VgCLPp6AXd1jEpANdriiQ61+7OqGmfiW4L0v0DaSVIiFHQihCn6iQ+L7JM2uPP7o6ffab0q15yhSRlTT24KqJiHb0DVYlEkva5hmGtDSEmxnKWGY2d2YauZzwlcCQhESE2kiqc6XgmzEwxCBEbY0RUFK33ChjudAWphhMYUQiIFzkpmliUlBZdDGFiBicDPO1uLSLa+TSKiKbNEJFGVWhEtGxDCNuXLnnfeO+NYVWNsbN0ZmNCiCoCw4gCUVawSJdzTMScjBFYNdnYrkPjLijQa+Lqeo2bNtgX5iDrATjh8dbG6JlNHduJvfSFsPemO3rPPXoxbF+hKwt4S8YQU7TaGbOkFsWmcquaqq8FXPpA1ZJhMtCs+zQSbaCTw3TfG7XQJ2FXDO/QOoYKWoHnzgw8EPlUetff+0UBtt27Vc4gGSgHCvCQtGhpwuKcceWOq6+OyJwPP3CjR7uvjTeLn01PezjtO2tz3rZ6H7de+ejwdhPks7euvfrKaeVO3v3h49Lncbs1Y7f8FO3D8+Kwvzw6mj++1Wbv7f99LhyNZ8fH572qFbEHR9fK6rzuLbk55uGI5esA4P/U9iZVdb5XbPzzv2P//D++8eCdEfMzyxeu3T8LL83pxu7+vffem5VXfDN2iyCVaM35gnxRbISTLVNsXb10e++lMKX60cK2rGrJ+hxtsGJD7Z5/bby7G6vARJqpVRFjOpMSS5yWZwxloUS67UD79NQZgBWdJzGvyUMgVV271KSkBQBIlkNCAAuJKCR2jnPrXk4IPlXpRAuJSimGHp30J0VsogPQLrrzTj8clTqWtAgMSRQWCMMQi+H0wPykzosSqVAEO2aQxCDR12C+JHZSX7t1JlWvvjc6Pxo+Phg+wFaxHUdXyvHWzBalLbwgN8IkLXkWyaJVWKWEnWeRg8aAqInGAgodDN+5oYuo6aKsGQxOKRAKoTWOlRhWElyvV61WDDBzbONaYiE2y2IUEDUhGJueGEKUzNrWt4YtExGxMCBilZVM7DiaBHDS/pEQiSVkggwWapT7Sn3SAjaPQ5QbOp/QfAszmg2W9xc85XAWsRStNefetFq4wwWPWSNRS845mdaTa2cLOd3F8HXMZ4tpee2yfumr9aMqG+bqhb3zvg7w1mrGJkTLcCIxAIYL8SWu7p9kw2f3n7k1vOK499b0aDY/2b3x7DPUOytXDw5m29vXv7J9/f2z+fRs9sKrX5xMz97d3jKD0fD2F4+OHilzK3nOrOqFuitHO6SFkC4mhXTrLoA0ihdEJgvxGbeCjBSEvrCoBCjEw9Xi5uXQn9utcz+oa0u1ZILijHsbefbt7WKX+bsPZsPdQV0Gu/C8c+P27N6j/OCE3WbOEu7ere/cLS/tb1998ZnXvtUUV+dHKzbx67/8G/NZvV/s7mwXv/0P/z7HGgZb1m7sPVN6/pXqi/Xjewdvv92/96AM7Bz7PORbI1qszLf/VvGVLx3+m3+PFT34D3+4/zd/V3QY6llvNLSiyQSdOZIaqKxtY5KPTdLcJ4+kbl2UdP3pFCYgIDGIsJ6AAQCBOqQrMWvSKjkwy9nJ4c2XXgqxMSHAMifXcw4iUIkJx0XnYQ4lIK69akHMViAiypxp59SUNC0djT8xCTUVZTLccZESc6kLMmAmwCRehYhktCZopHQeUYIaZi+xIwSjU/VrhzGv5fQdQrqucWkMWN+eYucSSxAIhAzHEKy1s9l50jxIylODptsLg1PcQxdxGCKBQrpFAiAKoXUuCyFYNlAVEctZAsg6XmX3Rli73OLpz61/N92qPmbgwK2LbiHVa/zcx/XZWZ++Hz/+J/Zq1phIZGJ6aE7bfYVN4y/Bgax0ZdYRLCzDAbbLIU2fg3uyAAZABqrrjaTvZl0EaA0IwTu0FgikHvDp1Vg//0mBdTEBW2sAS9JT9MnkpIX6XHhCceD3trBlQzaf/uCtu8ePDyeDuD2RXX/o3DyfhVBLNED0m9K8pfu3Zx/u5yfzf/3/yrGxiYJksqAtQ5dEl4Ij6GHEg9oUw8PPTPPeQbnJjysznX9h+ADLF468vcf9MNnYQW4z0+w+A8Cc7Kkdx/Z2dXh/azj+rW9dP3up/fmHb3/47ge+ufqHH5zs35RZvlOOn+FTT6WnynCwQURm75/O3/nm37u+sZtt2p2Ddz5dfXK4MzBV8NaTc/AaCpP1X30pBB8FmSWSwEkjZi4im2kt4kjojHROGiksI7nGdHT8TkR0IWLrMF/pXM4YULkYdKnDWNBFXzMhhCCagu5JJCa6vKCjC2qnkEts4k7spwJKicTJOBYUEKEIJBQQRMBsBNHAJJIeAVAEgXSxiAqwgbCyCBFZw6S6hBp4W/Oozl+fF7a4elAsHmycH9mTk9GJHfVGMrw8748XdnQOFZsrB4rCIoYtsShaiVEjQ6OSJPpzFAaH9TYkIemAmpTZxbxeU8YkGk7OPTZFp0hnVu9TqAqxABFqMnacBZUgMd0cGCy+FUYSXrJAWZQpwYkaha37RfjIMEOFiYkMJFMYUqNq2aEu2Pc15GhiJabm+aMZ1xwWrVYSmYvJZrlabbNDLRRIWyVPPfQ5VKY9ffVrv11NmxNyO7/zt6sTr3Wdo1AGYHPXFxEvTZRoNHjbRl9bYiMGyOVsdXv1Qb9eFoH+irfvH51fK3Z+qeZz1/5sXrlevjvebIN///TIwm26UTl7aMndfP2LPph6PiWy1uSt1hqjcJYplLs9JneyDRXDBFjDGjXEhmyWQL3MwofWkKrpBVEWiVBDnK3CMMzy8/N8c+Fz1964tVI7ny3mvHDYmIVVcb145XPbvd3Dj2t///0HtnHbo/7Wh29/f2uBxsS2jQ6jQkQ+PTr7+cHUI8+LMNyxbrLff/Zq7K8+fTR//3Q7Y8Razk8rbR//4R+77evIx27/yigssTmWvUm5mF75H//J6d0D28zdczfqBx8Md4uj//gv977wreXGVX10nG2MnW1jaAjMMIRWgzIpKF38QrDJ3HR9l4/r/B7pmD+pCOl64mJZR8hFRRsRJLYSoqgGgqhkQuX8bLuwoa69s9KDNIGtDagRsEataS1UTyzCNcmZiYkb3wCwNvXEQEIq00HFunZz6pwSN5cABZOuG3drk3OUMCdToFTCu/yhdKeJIorOjySRylRIuu13au9JjUI0IqYldbr7dI59ApFk8LSujoBGsZYliECcy6JIQpcMk/dRREXa9JMyEzMH6VA36JOIhBgjI5nuGjCpimpSJeOCBP0L0y4AgNE5+aa/CNCMIEJqlZWFVduQ29GX/fU/ix8cZvEn4eBLfG0mtSZLM33CsUofKyyeOOHZThD0332n9QKXu2UFAhAJyXXUAx6wyXsO7AneIXLnpSkXt57wNATNsGKUepTKfMwjcso2KLrs1jZ/+t5f/eCdA6XF7kCe2Yu75jyPpa1rTBHmTDUogG0u8Cc8ul9tXqmmmPCg1z6Mam1r4VcwlQwaX3jbC6GQugkcQlCtVryqrwxCruWXzM8j88Pp8ebu9ZPD83k5Gu8/A6C88evVsF9OP/vu9xbN8ZzIDm9t7N3e/e1X0Z48eO+dD++8Hazf59uXYp45WF5U1cf32B3v3c6/9FvP2FHWnI02r22d3nsYT88rhfRcHqnmsKHhTKSAXaJxZC0kSzTyp/3akxvq0y0XPUUP0DUYsrZFpTW8kKAtSmcPnaMidcEkyeA5FWWosmqIAKsxIXawTZa1batEmk4xcVIBEChdpd20vZ5qRKU7RNoN3+sowc5O2aQtKXHaszJRF83NApG8tUJWmFO+rhMTVYNRQfCKPPT3pxu3FuPzXnua1Q/6J6fD6XSb3aQ3avO95XA4tVs1k2dpyFuoFaLIKUaayTJDEDoO2pruwUydEbShdVJwFFhrQggdSUZVoeViAUodiVrKgigUJrMxxCy3ANoYeq4X0PoQAUA0OeyJRmG2Hc+ElMmA0wZdLg50ehWYjZBy0nMkjAKAAIFUhMgwWLUs50ScfNiJ2fV6o81RKMt84CIiABUNgkiZ6/HzX/72h3q72F+g3ukjrPJsNa2KmCPdiESdyURbiELFcA61Bm2wRVYuAb0fslngx2X591740vCnPwm2rK/tnx0+LrJ8b7wvLOG8vPTM7WfOF1RszdDuq8ynJ+dltXl526+qHC5YdoDvODOi61tAWkSqIKIlSv2HMBSGRD1ZqxBLgoSbSttvKzcPeTt1RRV6nI2eM5evP5a+llMXK9sLLNn1Z4rp3tbVzd4VyL9bTa0rnsE5yTufblS57zk7LUVEWFDx1kvfqirhN/9rfvJZWd4Pqx/rEr3ge2jm3DgRRe6Lncs3X9/4lb99PlttXh7enR7f+Nu/7V/ctY8ren4Di8Oit3v0J/8yrxc2YPjstflrr6guMDEiKD87HT97qa4XCssaYEN6BQEBCXWi0KQEbaXzZO9KHSfCUHfQO0m6KlSCqIgEQgREk6cVIBIB9aG++Y1fns9nMbSWjNjYIBhhS3HNkoUqMckFgTaSsOX0WSNJVyfGcJftRWyZ044q2YaKhCQpkKT5TVW589cka01aySYrD+oCH0SZL9y/rLUSIgUFp5NFyqQxzamdQySpKimJBqjRqCnfVzSJEmSdZZt6WWtNStvOrAUQQgSDhQGN0DYE6vSCGqNKAIx0GuiO1UWsAjIqCssXOceiQokHffFkXbCukkxKk7NeEpWt781EPoaMe1FWgDWksGYZ6xftjQ/rBweFvCl3r2OroMyT6Wz9u0Vvt+7FehQGW13LceGgnTqXqAfqQ9xTdRPo4LwG6gEHBMCDPMgC6VdLSorGUkxzv3asacXaFZEBtsK1U2TqLMNpm4MH5Aa4tIe/fOvtOx+e3byU7fTzsU7H7Zlr51xHu8h4oe1MuBIANULmQuD+YX312upRi6VutMPeKo+oIvJgKtps46qRWMNmNkSrbWjVDWQwHjmPE4mQvsf1yZWG2/2ivjqsiuwYQHS9KubE4XxoPOthkIMHD+/d5Z+N7DPjyZd+Y/JrcA8Pzt5/5/4YW/VcFsvl7/zzS1vZ85cn+f3lkg9G1y5/iUtt7ldUAuMc0YZW+qCIkO9d4qHj0BoSEwNM4sFjPZ7+tQbsKeV1IkdAmUg1Bdo+BYpQ58uouLiIaG3h3UVwqRJf7ImVRZWsYSKJAvukLKUYwKe6QRVJ29EIQILA8vrhFSSkJFCTrmMyrBo6SwFgna6VFPwUoqR7LxCByJIEQyHx+YwCzMmsKrQBWkdvS9rnwbVZr83io6I6zGfHo/lyXDdjHgUzqQa7s3w4Z16JkERrYSypGCUiFgsYttpZRZt1ICIzIZEhk6uXb4lJRA0bUkgUaywARMnzvK69ZWYmjcJsfN0GKwQKMazxVSai3Lo2+Khph5VQn9hxOBJItx6CmTofoeTaY7KkNk6Mi1Q0IkzMjF2dl029MmREhI1pIaFtJ1tbZbUKbTC03j9w1KZ+/evfkjb68yq/+UJ8txKT9XrkCz47LQeQLDNijQhItFXqBdf6wGRiGJJ4XwymL37luz/+ES9OvnX9q8OfvhP7A7bDWeuk8p7jwmfvPLg/2n32m1df4u//+czZF1579cbNz//bP/z9F77wKvmQWwcIMbt+0SzmzArpFhainbiclKAsFBLKqiAgkjH9Xl6vGo2tUSbypg6DqiyYRWpwIJlko8tBcnXjpd0odbe+dmtR3Xtsw3h0NUx4NnvfXbV2uLdXHpW9nx2gyOPCqyeWEJiyKvi7n23/H/6HO4fVxvz4+j/954//5L/Y+Yx3N6rv/9nml76yoiGKyXC6bGzdVkech+PleTZp5dpk9uB+jqPD7xzy/HR29Gg4hvOGh1emr/+q2MKUlR0bfz47/uSnuy/8DfGtZza+hYWIUwnEyiTQVpOtMYTIqLadkhXd7Nn5vFIX956GY1FRDSrS+lYkRsQYhTgdwmgt14Lcba6qk6xv1mRyXafjrBv0ZPaZSljSqoM0SGYzIFo2mTVpN8NMySUxdqvhDq1K0b5MbAjGEJLoPiXCJ3EexXQ7EdLkm2hTsLbCOudF2hiMpEiIRAlJlhm0ZmuB17g3wElp2aWtqCggKiopoNuEEFL/mlBo33qT2dSRoHP2SKQwEJGwSkweWGRAUSUktxQlS50kXxUiwbIVDVZcWgJ0d0+9AADTIUu3O13fFcmKCBkvrYOLFCOMhUYKDZtfCa/+u/pHy1x+svzs2+bVSusicrAsYCIDNR3KTA4oiFhzcA7JgaffHdCH5qAcai5qsCavUDSi7Xr2raEeXENWyg2hBgAYoLbiewztwmworSrTKG29U8csrI2Npm/6Q0d9afL29//so7YMz35usi2P97jqhdVYaluxLEjOhUsj89iWSmALKLEz8aQuwlKEesb7fCd6Lm2UDe7PYWsMvKnqeJ7nTdMzRjUDma1eHiseuXquFBqNpHUIPdvfGGqfAYgN/eAJ9RWqT3xYeQmcN9zmpZycPfj+HYwuZ59/Zufrr199/8MJI/vSN+fV6nx+ePTZ3Xht//r23st1NOfT8+M77/e8sZWnE81ytQ03O8GONvvjweJMkCWv1eRb9aTt4jVHA93Z0bW9Bq3V20/K9MW0nBz/kljnyd8y0Tq+ueN9rMODrGUSxCgwpATblQcLgSWrKjF2PXeUdTpnELaGOe2lu42RqiYvFw9hYmWNgAYR2wVJJARLSbVj6IGTCRwxhBCUIcTkWTkQU4ykNmkjjUnRaYHbGEgb3lsNrmWjeioPNhan/cVZvjgfLu6O7VCKy/Ph5WleLICmZcNefOc3KZwOMnd2JN3uxjBikhKqgCgGSTX4wng9xXsZl6lvwCqAiFhrmCxBDCi0UTuUS9RwtapMnpnEWOUkEqAk1A7JSrN7Saljs4oGAgg2y0IW1EAZDLUQy0QaMzLHJ0fRtxZWQ0yi2mI4nC2r0eZm6WeGjSAS82rVvPLKy5/75W/8xXFQ10Q/y3hIMagiN5nZGDbns9bXlsVaFtuzjSXODPIVGesk1hi+/OV3PruPKN987bdumOLe5Wvx2ZuXPvno+sbozs7NsFh+dHh8ZbT1+f44/OUP58/dvvGrv3MC+dPv/dVwNBz2RrNmSq5H0lrI+aLsyM3K0ChM6U7OolBECSnPjlQSSVw0hMW54Vwsx6CDatk/r+yigTi3aWTJ0nKoJBzNaXm8svuz69ftfr65HEt1L+iWMbtbu5v5lG08aUL5MFvUIVqpWuXMSLAQI/nyk3uz/++f73/96+Ltwz/7bv3OW8//T/+Xg/vv0u7uxm/9rnyykB6dn/z5lttquWLnImZZPZ2/+W/k9F4YIO/nmcmabSZ20GHztW9UstOXeRTkxejh/bf7I6KNjdAse37UZrVRAwaRB0mEJ1iGUGdwmqw2QuqXiZLiNo02KTqFEFSUOhWvJklC1BCog2GjSFQRx+K5iRyqmCBFw6Shk9ESQCqyhqeEmTlxgpnBBirWWmcTsQfWGGIyRKkfiAIGxPCF8RJzYmN2AQbcpRkkJN0QSEUgUZST/R6DRaUsSwEYJiQB+IWPBBidlRZzQgAMM0FjYGtbicScTKtFRKFsWIMIJLMmhlYAEcQQQNaHLu8IgMbuJ01nNX2bCUwwWRY9kFAYxCAgIaPShkDsUsZEgJikiSICENO0yIzEYkWHQqW+oPPq7XJwZO3CCUPGSztxo5fba3+Fo5/lj55rr+zR9aUpe8gEGalVGKu9oD2xQ86i5IqctAByoADSB32gDy2AHlAk3k96p9a2FAg1oQZaSANqQUtITVpBVsqZqoVUIMCETBE1aTXQIDZEJGQCGGxsofmQeOzqgSzE330s7z0Il4Y71yeZbQ5jbFZYDXmhvpbKYim6pLAQmpMtFRwFEcxs42nYkDK3uvCZjT6KqQtytfNcxRkXhfYtxpM+oX/SFpbmUN9a761VdtL44FYBFIP1YavMtgsAnKvMnJQ4aT3yMPD+IIRTMUPwtil6Jpgz+uzxSWWn0nt8reD7P6uPlvb5K8NXX32mP7hZhzG7+g/+/e8P4/3dawPb9v39uRbW1G68T//5zgffOFtJb68NuQgEHKkjNiXbEknLxggEUCS6QJ8pdbKxk9Sg20d04GVi7XcQMXXG3ujI9QlTJlUmIrZxHZeU+BNMKZKS2LEKDCTESMqp49QgzCSi0rOIsCl7kjXJ36OmPVd3gUhIzbRd04ApMqBqjfXeM8Qa9q10MgaoQpKHC0vaepsEiSmrSkihLBAiCEFrS4BYj5vHG9d55HM5z5cPN87PitnHg+mn48FmO9haZduzzUEp/aUqQXKqjRCQtYSMEhU3xWSmZCRHFFVDZk1UjVFSV8+GgkLk/Pzc9fKkinDWSGiJo1orUKRFshLIiApnGUcR5gh0frPaRd+QxAsHPSbqqB0My6wgimBNtp4IoKDWEyJ6nNmD6T12toFcv70/OzxZzKq97XE5O93b2/nwuIpZQ0UmOSMTmOLcZ1EEdmjbvgQEBolQhKqzzkqrTVu2IbC1GRuLgR2NeuL6dYy3Xv6vj47PDo8dFT9+++03XF+y/ucrXJ1FO33312++3AwWzjkHq4Ph9v/p/+4m/bvvfPCj7/3RN3/t2z33uWVds+1BIkcHCcZCQogJ51IljZZYmJJDWdJfpWgNgSoCI2OwNz7zGJTLYr50rYeAeFdq4/uZqyzPagwLxjZ29o72c5st1PiMKh9GubsKmg9vfsWGnz+4fz6/yePerIbhEENLQp5bir0ib97/fn33HQR2s3nB7uTtN8OHPy+Gk+lHs3p66HpAQc3L13rXr9DQhr+6m9tTV83zS3k9NnBRhQo7rnx9Ohrm13ZxNMtcUdssrKrHd/7qC8+8rEVtnY0ABxMjONkpw6uY1CenXD3RCBEioQ7WXfe2UOKUayndWVZRCBGxQYyppkg3a0ok1roqoygkMgyBIKGLAu/y+IjXwXy0doF8whUxhrqOMjEeUtwJACgjMZIlBqzXx0SckFhdO6unMOB0jEhBposTFAERYrrnd0QW0uRPgI7MmMhORIAEYcvMhlkkCkNDICKJUXFBzVAw2LBEjSHGpGzq7LAE1KVKdLdI1WTzwZ35RiQiNmY92XBKCRQJUQiwnL6uEHdW02nTyhdAYdeC/CI22d1W8QufTIChKtlIlQtf0Fsf1dOqsG80n/xDu09iBclHoCcwwhnQh43sSArq6u4Qv/BBqsd95LSyCIaUIUyRRTWzPsvrDdMEww3Ys/QUjZqM4Cg4wwYMEUiQyMtemwxHKNg8A+nAcE9M6MnDkt87rue5hD7Px6RX7MYVY2QRqgWr76Hs6bIPTyXJeYOSdE4613guVGMdUI9om1XMytCX1dSXNQxYZexkHno5uJaiRt6gf1g1zw5d3LAsUldLmhRYbQQ3t56lgaq4fEBjCkUAwMPc1gg9S/WEl/Xm6Oi6x6RUeFEJNey0ws5oc5wxehu5W4wYm1v2pS/cHI2fL5fjYiz3Dg9Pjh/vf/GG3eL+vCgeA8Zb4bcfVtdujwbBn+a+1aGYTHTNDkrchPSu6xd9jdSsB07qelJacwGeev3/GmuA1gfq4l90Ir7kOSOJcEkpM0m0o/Eri0aYjGOUPHOhDWKz1rfKCCJJBZEmxJSPlLHpmJbrO4l2XBMYQwpKkoJUy0UQJTKxigpi5wDL3F38THk/994DnWUmFBBiSIRGIquafPu8IReVKtmpNm6cbC7z9mC0OB0up/nstAh3i8dDGV2qhpeqfHwm44pDZoTUA7AskKhq2YBJJPGSrUUUVgNLosIBIUYDVnZZT0KHGIYovTzr1IZEnXE0EgrR7QM6pUfKwVAVFYCVOL1gjMgUQBYgWc/kJMRB4Vk8KhSV5nMuKgwXdFq75sZrN+7O7izC4ku/8Ut1JUU+KO7b4fXhsNezEzc3FU/Q28t/+PHJt7+pJW8vdRhXDZd5tiStmTwsYq9fZAP2oWh8u2qXITTBCtcZcy9sXz5+/uXyrTev3/6Cy3KrnHuZBDs8rsva2b1b2fj6pWcvr/pbO9d2W40f//zjt//Nv93cH734wi9f8uMTESO5Ah4rRyzs1M8VWQf5ISbvYNIggBUyXXS6tAqrMMSRtc7VrcLGvOJlnZeeBJEL4hiiNQsfzzyvCtZttY6qUJT9Yzcss17j4MxWMVw+XJ4Mt25a4/I7xcaHO7ee9w+eaadDX0mMtaLmDHXoDYqwImaPbevsVvnWX+Q17HGY3/vXdizBtZsvfc5ctYuDN+nwo6E/1r2i2qKcgi0q3uRQFPMV5mUV9qHj+TDP2XOeI8x5fG1Ht60Uph2IsTareisFiQUqSIAaIKqkzW5qoZMep8uWVwnrEI/upk5MnaI1rotuIvKvrX9TNCYzd66/XayCqgoZk8Lb0ZnbielYyyqidj3yJoIScyLCkrGG1hmC4JR0J4azp+8u1AX4dCbywIU9BcNARZioQ7yTFVfS/IAMJyWworN5vaBJR7LWsgG0Dq0BWeZGA4PjmrGMNLMGuWgPDDMUqVEWFe5c79PejdYjydrrD53wK4SQfHYgiXAFAokIkyTOl0TpuHBrttMaFteuLF8U3iexMOtP6cWXZFVVG6LaTTN8Pd78i/DZUeHfmz98OX++DC2sg2SZ5i1bayA5oa+ag4aEIX7hvQANtUA10EWhK4tgujTFaI1KpFp6Nfdrk9fDfi25OOYlxILPJYNEVVFwJKnZDXQQnCGqW996Lmt7p9SjxWqmbj4Cxs5mglyKEaOn8DV0Zah18D1IX70LFdfQJeJcZSFYEEporZFT5gcTokpv7t1mXa/Go3Dzmh9Et9kv/VDqyXIuMz+ELAfWTc1WGETOLDQ/j8uBnbMaW8E7r7B2k7QnvOUAsLZg6g/z6KWdW1sVQ56KPQ2+Lag4ma2GagcSy3m9vetGeTEZ5+bW5w7uLqcfH934pe1gwHkmaO6Ws92t7SKsXMahoFmYf27kr7/0xU/6RROtahaUamNDzKhN5F26sK4BrU1HuzOQhEICdHnV9BRZa72t6IzKtDvS1IEnirXBzpOeLYWJiYI7skwHvaiKsEBgWJLyPgHlIoKQ/L4FUCYjRCIS5MJtPe380J3llHbIiMmFquNqdK2DggSwlNifCiCJmep61Xmzo1tfMVPs4kw7R7jOYB5gphZty+IqPFNtXOdBncvpKDwczqq8+qQ4+3Qj29ge7vjheJFvn5thndXwYsU4IxAEtpqpaKSWmYg0QwxQ5Qyc2ShiYmxD2t4pKRvTyxygoW7jWpslEIoAEHn9wmlKce2G4LUDt4iapCgzrKTCiawta8leo1hp9FzaotR8qoPd3rjO/cHJw3y/f/bg7GePfvYbv/l3Ht9/PMXZS8++MhoMett2enZuNrPWbR6X/F/eeu/S179RHmeytLZWWUbxjIC6WpydP3r04O72lcvjrdHAjaMGNyjqbHMhPUxPBm/+6Jt2QxY+V/HFxG2MXP9y/syLuLKXO+OrZjqfbl6a/Nl3/vjk8GBSFBhe+vav/O7hH/zxKW+1+zuo5o6dNRliC23VFIhtEo6KkGorokngGg1xG5REDBNbBQeAYrCVz8s6n8/hRbgAlMUHrFgU58wDyKfnNnOhiDoUsWTVVVo8GD7vkA9djTgezee22hF7qS+D0fsbg498c7ku9+vjSX2+Fau4RKgqt4T3rEvU1amd5FwFVG5UCDYdBlzP78l379pNtiOzGm24bDkYKHLTv36FY3X04E5OBY1i/7UbYXcEWD0LOG9jhv2/+y2czXQE8WLnIYgwMa1yhRd16KylfFdT13DUOtcP6IL2ui1IytlNA7CoRImhs0hdM6sicLE+1Q7CEo0AGcPO9VZ1lcpVFEEXWpSAH147mz8hZQjUUpp/Oxg1kThJeZ0ND3QpQxDWVLeIYAxfdN2KLr8vJeKprCVNyScnBhAZIrYsMf2lKIEzw6pRIlm2gchyZLKSMrM59fep/5AonLE1NoQ2+eeYZB4bJeW/dSgfd7SVtEKnbnjvsBgDZmbRqKRp6I8qDDFgJUQVUqYUgpgepLtzPgkz/v/zponilRTfZCm4BTfPZ7fuzE8O++UP7b1n2ysu26oDszUsjtQgV+4bGQbkwAAYAiNg1H2Q82oDiyGWAyyGVHYTsCpBnIRIvLJFhV6Jvqe85v5ssFH1hzZjISjEwGTGcqYZxdkRf3LkP5u104pXtYbQKhPMRrbJzgk5TxkE8BBYDNBarTOqnS57WuVac01StqiYa8ZStVReAjWki5YQMK0QfIUQREbPXf32310M2mLHlPW204l691mcnB2/XXznfy3YlmYzvwR4euzdjQbiaqwGYVmxiG6pFkQTAiCzYJ2FlxArY51pLOpLWch7dm7a8vmt4VyKFQbSDyvr/Wi3blTuNp/8lx8+d/mqfP0FZ7Kjg0ewWLTl/KyyhwiBRj3+Fy+NJq+P33r+1SWGLK7NjAZiYQEj+eIEUGIUJOebbtkr2kn01qNmujxSQ33B16MnBZifTM2p1nZH7iLVowsMSCko6fynJjvxMg0kBgJLiNaaENS6LIq4rpVWH9ogsCnay1ofAnAR6ZXgqY5/lcBy7e4ZpASRuM4QZQFUdL3y5qhiYY0xneskQIZFYIjAlJziI0lyk2oNrBIJK3hphThwEK7s1WVx1blQ8LRffTaYzgazc3fGl3rDnXy3Hm/P7GhmsTBqwBbB+pClrFHOkq+J0Z6yNwHwEDZsSDWKsGGFlNXSsiE2JjHKFZEQWUlgUhfRZUx1Riga0WF0AGm6WtNzrEoxMbYlQDxkpVqr1qiGxSIMSupVPMy3r5TTe/lW7hqcr87+w3f+XW4dJu3u5/ffOPzB7es3+4O+35Ij9Ja9K3/+xk+/FN/kL/0jx62sQt0aU9e52/roox/97K3vBpbi3nB3a+P5F78WHfzhvSt7z2z/k/9rrE9YLLN1bEKxtb15wwxs3Zrzs+nJ3R/7+vQn77xDw/gPf+f/eOezt1//0utuPHmRt2ff+YPhlXHz/M326Dy3Q1GPWIUEF2okztLLzQyRKOoBYTZeRR2sslVWZlayPmjTDmbz3vwkUyEaAkEs21bqcE6IfO1zPLlWn9a2d8q7eQgkhdGEaVammkvjRG0xm3o7s6Eca5HPXcFq8dBNDmgyUEvzs0vzxYbHcO55Pi+OV9l5207n3BhmHzJhG9zQYQfoWRk7DDAYi2xdyq5N+jeuD7Y3w8/+0/UH90xvfOKFP/yT/WuTgwrV5u55aNrGDvqb/UHW8Nz0OfqgYBWNtSCyrlU7QXRtqYTY8Qm64PQUhbMO8iGkstO5MQEpRKXzRe7g0HTk2Vpf10FiYi9DNcTQtN6wTcpDSQldIpR8JrkTHZlu+aMiQsRdAhJS4j11HuZPELWkmlgn30GsNSLSZfylfFvpBE6J6Slpgl4XsBgFhi+UG0gDMBFUAmCZk4MziVCA0DqkYQ38pkVsqo1pRxVCTPZAJj27etHornVc60Y//WhsrURIghNUiSmqskQGiaqKIAZmNpwIKrwGrtYxbP9tudVfqMrreZsVABsbERAjGYfsq3j+351+X/b6Pyo//Y3sas2tlSwC5BTWcC5xQDQgDFL11VSDN2mxifkQ8w0shlSOMLfJ1wXKKoYoKDWaN1Ss1C2lv8Kgj/BY/WK81bPkCg6nev6w+eyRPDiIs8cZ5sztMISltWDrLVtENCzBC3swWKyITeF6jeOA1jsTLXmHSjykIQ7CNUkjUkE9qAaxmrRlgzibcekjU6hjexztcjVrYq9PTD2KPBzuVKPnp9If2mKqk6FpJ3vL4/O8bPsucFX52AeL+AHMJstWBECVCyZE1OQyOM+zugiSWc5ku2d2phQrB98IBNVi9LOzkyvFKCtmzz73ZbdZAKjr87feeYOLzDCkZ/IbfcraW7stXzHvfe5b09GzwZsAFqWS+hAXlShAg6YyjHa9D1YgOUKi23igK2kXs2y39E2Xxbp5pTWKnXCvrkZ39ThVxrVGkDq3gJRV0q13mIgpU1HrWLpfhUUyazVIlAgmKyJBwIatASgR/buEsYSVQ6FqM2tt1rYhSEjaPHSe6520IB0wVUEUkAkJdCNxmevlblWt2BhEElIwG8U6ywWsFBmUYndBDrkaiT0EAoS5op3l5u7JZlO0j4fnR8Vi1qvm+fmdvWy4O7jcbFxe9IfnxtbkyASWwK0akzJ1ffDCcNqP2sKo9+tVNNhaq+mrr59SEjBIkvctlAmGmIiU09adALVqBQBHIQMK6VbIiQ8cMgSFB2pFBapQbgz72CyxPAqr4bNfOT9+KCqxbSejye5o9523f/b8V1+UPTd4bhTGYXdv961702pwbanjZXbt0Zs/xU9Oimu/2+s9I7HBqm3a+kvf+qVPjn66KsU6Zwfbo/H4e9/5Trk8oi+G27/0NVndKGNtheEKF+2HH909OfnE5u6nH//wyrXLtz7/fDMqv/KNr96v72987vLtX/saDs6Wf/KHxWRcv/5665tinIcGaFgBqOdQi3UkrWoAJeid18+WWhUWi5SREcRIMMu5m83zap55BOuYPEnkkEls2Q2wMdBbV4IbYWbZjFEhLE4cD1FlyoShyapFc3jX9alZzu3sxsj1M93wphiC/SB6w7ykgO1NCVcOy2AqK4v4yvaeW7Q8Fz2s4tFJXmjNi8Bnw53M9+rimVG1mccReiM3HPGwPrw8/bQYVid4kIXZmHOZlf35O8btP1rMlmZkrg4Hq/7R3U/3b94OtahnDQqnakWbBIEke39NeiQkZvx6w6qQC0cc7vjPyaVCL5gfqSClNWfaYKahOsusb7vaqdKZrLJlEUmxPaJiwIASmdRcp3qsmuDlLg4wpgqdVFLdF1Rdo2fcOdd2SzAFQozrJQsAZTAZo90altadA6+hWRjDSlBIFE69uU3VkuDIWJfVTWMBEVUmBkVJuaosidWpujboW3PHNW3KU7MriRCFRERJEwZ3MqduC94FmXXQmqb/9Qu1EyqISFT1ZMhFjKTK+G/LLy6epl+syh2CKATLKkxtaPb7+y9UN+5Mjz4Yls9XJ9eHN2atZuDYY+TQgkwPKKBD6BAYE2+EMWYTnY9ovoH5COcjzEeYZxosQtpVGq2FbI3hCsOF5n3JV1xkXA/7O+fR3s2H7x609+/T2aHVumdZZOStkKkktsZHhoiSF2nUu6xQsVRFscEYb0IdAmceLGS9sFf2nDPNOWOxCBzgSHOC1+gjszJDEMUSS+sKjUJ20/JQAbZFP8a89oeBRxG9o5NgcekYcpLtL0MpYHd1+6A6fl4+q+ZePVSgQ/giD8wA8lFdSGEL67kWRu4oPzdtoGVTLZeBTOE091QBLrPIjUoOm91ye0ITb7fpo/sHYbS0k16cANsIQxX4s2vup1//2pRejWU+LwYrv3nCGyvamGKkc8WKUAMrRQO0QL1mmIsmklBn+9nl/ooiUuJhKdbQZzq2igtI5qkL7KL6XrRuRASzPiTobg+JSkBp2cwgWIJYQuQOOWqzyEJODFSj1RgDVJ3LkntrFEmAWxdyTRSDCGKMHdMzGdSkPO/1Uuzp712ZSSQYa6LERVkx4SKHG0yqaiMHEoAyJa9wYsFJMRGE4CKE2cKJ+jZrM1BW4mZ56abZmvf90ebyxC0W/dnH/dOP82x4aXO3GU8W2faiV7QsypFJDIGMlRjVkzXG2FVoVWJmjSXr2DJzjEEIASKAU7CShwbptki8poSkYw+loBFgFmaGgRApKIoJJKzaasWcAyuWpSBDyPl0sNVTz7W/ORkuL30S6b3xFh/Pp9vP7P7j3/ln5yfVbLy8/JX9D+9/duuF33h878dV3H6ouwd2u42jX74zffy9f10MX+g9/2t6fZRJ9uGHfznDnHP85u/8zRvXv/KTH//FcXlAcOO9a5++/+EPv/+no81eaPOz6fnv/dN//N03/93Nl65vX7uMOX/x1786k8Xz37r93G98A2F1+5euHX33j+vTT/b+5q+VH97x4yVLDyVxUC2JV8OmqXu507ZqtYRaJe1uZjCJNiugCCGxgLCv+vPzwWzK1RIMcT1mixgj1EBlY2TGN3RjLNRKecICW20106UJldgqhAJosWjleGqOzoVDlrNdDJcy7k0K9PxBZmurYJOZzDZiF1Q1ozxIuL2/X4QYoo1ExBs0uoJRVvbmh3rK273hrV5t4oQe3LB+uzffK2cWx1cn2fkP/u1NPlX2gfNFfc998B/MF/7BUmF7tOwVfoDBs/1V4UPd0opscpGFBDWaOs6kAScwJAXvKCR0/hKSdEgpXoi141GKSjpR6ReoMneRmYkPqdAYYwghhGjYcrcZDUQ2lYGOsQtNZjMJj2Bmotg9ZtoldyXJYO2ZzF0VW3f4aZMsaXMvieaQ4grTY3UPkgL9ujTdLmMhQphMVy3XFVDWZcyAvASpkx4cECVRmI4AJYlmkbghZFTVuqypm8YHY5jZJpNdWm+8NFnArmleCmXYNQ1GWRVExlhVCSG5zqbdkIA5RiEWBmuKs0kLuTXyTuuynzg06T7a1fqnSVgdtI+WTE9BYgJbT+2vjl69P/0LP5Ef1p9c8/vR5pklYyG5as4mV+krBqAR0QhbON+S2QRnY50NaTHGbAtnG2GRvK6SIzi3mRifD46G7mTTjVo3WUh8VMe//Cy8PVu9N98+w7bro7/lY/ChYWmkthBDbBm1JZu1EHB0EssAU0mRmyBt20pOPVFE6gm7OnBAFjTjDGQRlcRGMMMyWZCFsMBCoMFltp4Vslr2hbcK5DZk1jezEB753i+f1BuwfHJUTvjygtsj2S7sCKGXhbiov+iKht1KrAfFxg1XUowsALjJw8gCG1S97Ts5CzxGPfXm2I7UnLQ+9OEMrKr3frK5NS5L+fitj66/3PvwT5771f179QNMYMbAWCT35aZ74fdevfbCN5ZVcViqtbGyxcxuzsPGuR2Y86ALg1K1UqxADdBAW6VA4pXhgUCkqrKW40d0K17BU2+cYrPT2Vtzn5P5WLKxSQW483LRXyjGqrrmYBDQRQx0O2ZmQA0ZkAolFRkkBAmRDRnLELWqCeAJIhJjkNhh4arSGYCsJ3doZm0bY/KW6uzlkgMsASohRGtN27bWWijDcogwrCykBkqsVklYgUhwArFgMYElIGYKJbIqYgQZO2ZWrm1oEFwIxZKfnY1u2s2yiKfj1fFwPrdnd3un94ZZf3t0uR7srgbjOXoramFaw8FE00YfItZ07USs6m5GqdEQbdNTK8rJRV7X9I+08SYBhEUFRhKKZpIPTiCpVQCw+h7VhKV2TuAz9a44c2OnrQ1NfvNrnx5NMWxRND8+vtts8Yu3Xz7nrJRr758f7Y4+X48Wd8tiZoctTT48k9tlsVtxee+d6b33s+dfy1/+8u1vv77/6o17H7yf792sHD+cH8RCRo43Jtt/9Id/JH5+/cZLzRxbL+28d//90bZ97dd+Swb0e996YePWtf1ezy4eHr7/PX9wL56ebP7SN4ev/Hr19v2Tx9PxtSKcS8gDSnKcRfKsNngGe2jW7UYobVNMciJjpCTT1tVVcXaWz89tW4MR2VFYJYqoIqjJ6PI+NsfCFm1tJW9np7WvlYfO5XHO4udFW9bVCcpz1hitkFWb5W1v+pf2pNoZ5i7PhRmtcCV9K2iKRkfDvRtuHBbzkpDzlMxyZVjmpX1wdMdeLkbDDP5suze9OWi3pSxWjzM+fbZox1XZX3wy1KUP8xLhKoaHpx8sPvqPk+yZ0TXbjLbmoT17+bn5wWO2mfSDD+DaaI9NA61Tyk06hqxKuvZj1+Q6ngoBc3JrFU1x19KdIxFAVSJU2HBcky0IMIZTpkJKEmRAJBKRy1wrIcR4AcAma+gk29VkRsEkIYYUoJR0eEBsRVWZ2RqbfFw1EbGf5BGlK592drfn84X33trMMHcgM7T7ctyphEGxG8SjwHRZPF3R6tasmhFrQrETFwUKFZLO1N5aG0NMUICqxhDZMNJDx2iYmKhztSEocTIGMpYBxBAT6SshZyCEIJwctBQMtobR5ZQlRIufTLEJPH8yHa/zGdKgvcYR5Yl8ef2qwEA146CwChhCHfx2b/eV/PM/4vnD4eqd88MXN1+pTEk90hwmD7Fgs0G6ARphhPmWziZ0tq0nW3S+oWdjzHrLBjPArwtwCx8a52gwsTTimS8PqvqdafHhcuszicHics4Z9ITHwbP2WXM1uYUNyCKcmJrgSThGtrVoT5gE3gf2TJF9E3OXNYR5HQrrPJnzshlmaBtvjXHOegoB0VijjsAgQ9awZ4ytuHw5zTC6PlmWZSmfZL1FtfnlDz99uHnz5jmy8/l0pls1UNpxCQmGC2RheJXK6U370PuglCEWjdmo/RmAGLeujudi1fVyDBszyJr7sWdyzoLr0RZlUG480DNDl+WmVx18cHu+Wr7863de+D9/+NknZ/5evt8f7A1OcXL1xVf2XvxbOrx695PHZRzMn3/Z331v8MUvHLtxiwGV8KU1paAkqiAVdKW6Wk/AMQh5aMtrw09+wpO+qL5PynCHheJpnXDiZD25SpSe1glfZMc/oVArCCRdq3oBgiU827ARAYGsNcyQFMmhHEVYNIXuMiOkHlxiEOZkg5W6Z00GW5qGxdQcdLB1gpSIoBJj4k+IovUx+b2r6fTjKbElxYcJS3LzyJRZWa0hTRG9UMMQlbZ21gZBZIPMCAXxwc7oxmL4TDaqiuZoc3nUrxbu5G5/enfgejvFleXwWlkUC7ux7AUE1aCdgghd+BqRiSrpbiLSAiRq0q0GShcTDbOg23FzB8gF051ujcqqgVGDAZ/p0ih1e2+yBOB8d4ic63p+Y+crfvf03ZOPXR6LfPfgzvyj+uhLv/o3Pjn85AGuzjael30+vHPY8o3ywRIndFLJdjDhhb2t86P2wZvlo7fKqzf7L7z2+a+9EGRcSfzWP/lH5clpcxSouPSt3/yb9z76wPQ2rr0yeebLr7b95htX//Y8BtBjtOXpG38Qp3ebzPefu2Zf28ppP/vtr82+/6P+gx9sfeH51WWFbe2SlQQUVDga8ExsmwuCSFSNxtq0yxMVgoKFfd2bl+7sxK3mBgFsRRClccgFNSzZ0Z4Xzvq5DCyvoJnQIDcnkdtKrl9rVx7Hc7sq6+kBsDRoTcbiVDPY20PJi02U83z5jpmXygTuM0wlGNmtyfiZIh8uan/eG9u6skRwxnA2L+fF9nj03DhuhUERt8Vn9UmO073e6kbPj/ik38Lbl3Dvze1f+eVNH++9+ycuHE6mH60wXvzsqr/6jY0v/h0z+NzJZiZlMCuhUpGzy2JrGTAGlsmSGtUIsLLR5EJFRkEqMTnEQZiMGGOeMrxDEIlBNJIqJApRMo4xyiZEuJ5b1U2SLSZJIsjU3pMIJ8GuwmYmhdNxZq1QoLTzXHNLBL5tlZnUpH8P0SDKnOyhIZ3KkEJo014pSjyZzqBpaRtAFh1GHEk4QgKBrKEQk+uHSGRK8zCriJKkRCWltNrl1jfWmg64ZooiNsVFMUjSKUsOB8m9g1PvYtAZCuCJSLObHiSAGJatMIgNkgWoUlJxJEciSQUfkG6bqwA42X8QQBBhg7U94QWHdc0XTX9kfYpmwxc3z7SxdwCLjblk8yBfLT5/59EPy2cGfzW/95xcp0E/ZK3LouSOBh4Fa4EBVxPMJjSb6OnEnI3jyYSn2XEIi56cRV4ieDFsC5VM3bRpP/2o/jjKoSEZGmzV207Z0DQYEm1EvGJRbEo/xNyKCyGPhTctsuiU60y15ujRulbUBMQg8NbWglDXYgcqv/rC6LO7i/OK//Frl/Pz8t1j/w++du3tHx188fUb//k/P3hQV7bIVXLjgkKY7bA60yyE3LqN/kk47OVxufnaOx8/dvnevGlqGFpUVZP7zE1lImxjKDLxyDDF12N4M8taaaPwiMmuacPVgHm87UNRWpNXZdMC1NLK1/XCZ2ztcCS5Nc6Jc2bx2M2bqhheefC9n9/4O2/KLc5nIfwwjotvfP3vjm9+/aNH/pMf38k//9XZldc//u6/f6a3sRxcmtejbB5iLaggS1ANqolrRU2UgOgajKDJ6lNbUFIpJaZT7LgM6Y/pako+J6nIpt+e8KmfNu8goaev2Kd+V+1KMjOSmdMFc2RNn+hoDSnGWpNNbLqjdDiVFbHWxSBJHywiDEo6WlUQm6ZtGUmIiyAx5bJ1LMsQDbMorLG+DbTmaplU6ild6MSg5F6QUonSLEpESM1sZjNGVCXDbLMQgmpkJokKAXrMUZRjE4Obm1uLyW23O9ssjwbV415Z8exef3pvWIwvbe6GYnRKw3mWVaoQtUYcHBOLerasohoEwpY1JFkEQ4TAfdtrvLeKwMwESepgKADDrBJBNkWHJckka61VXxmUsS6EDLEEMe5si9vREKGyz//GhyfLsWatrIYFtW3+g3//o5N5uHLr19/81M/yWzzekQfLUVX4St/1/uYeuwlXGVyWj7xIedf/6O6RuDiamHzD55fc6ApPdurVcv/LL9z8whd8E+Hq8/NPsrt37ryzsP2Fbx/ZIYZf/ZK/fWNv/vjmP/vd6fFH9Z2H8p/+1ZVPP5BrV968Zq9d4QrBFM70mPoQCpYNAuu8zyGgFy1RaFMYLhNzbGpbV73F2WB2wvWSWFVCMoHLRDP1K8sKkcsTysZBhNsaganpcTNtubVbYzV1uzg2B8eympuy0czGvpUYjTPixE74fmHq0SUZx6GTLDGDVWQ0LDZvPLfKJrPIM+GZMWeXLpkxxxMKj3X1GHYMz5566Meziat3bH3VNns827LT3qyVeb63+/K7339zeL+txuOm3N6dbO9i1oazRzRRtxQ6dbSzwdVyknHd+ibvC5pa7MpQQdzkzGAESYYYGkEsZEWjrI9dt/uhTt6TjlLCVaLEBIUKdV2cQtJ2vVu70rpPTjBN8vxHNzayCK3pHkJdCGLi5CdndgBtFFbpfK3AabdHzKIIUciQqsYYDXPbtsTsmyaZY1lroRJCZylAyett7bi6piCnjVSXD5FILB0XtItk4yAx2dWysBFc/ETd5ja5yqXV65Px4SKkl0wimjEpKZOhdNQBBwYQVNZPbxe2yMqcjLwSITzFOEAutL+cokr5AstK/SO6SSX1LkD3cl0A0evvSwBGa02vDhRyA1+rG34je/UPZ3dksv1G9cFv4qvzXqW93PYReozcmg0ZUjXAfIDZhOaT+HibZjjheJy3M89H1A9UcL+tm49P5L275VlN9TDDtgx32TQSY3STx8GiBVdRPVELO3d9DJxUtVk5k6NsWnaAGPK9RpeFsvfCjaEcVq0E+MCxtdSawXiwtUsfvIcXdt0Lt4qf/ujjr39pWzO31ABnv/rq5qOjxxZGohcbPXuxdltOna24bzG8PNg1G9u37h7Mbtx6adq7Vbqrj48f5EsJZU7AWbu1Qs9rlkXKLTeTa9W91QvDxwE2IBuKingAzu1PcRak2XYyradtGVAF8RwlIHdNMJZccHx2cjwYb+3MKgsvVcgwgSxXbd17/tsD3pvN/vAw9Bs/LAs3+cprP5/lP/mjP/zaoL0zL69NXeYDV8Yt0CwVNVEFpPcVUDO8Aq3CA612469IF4iSrlG5eO2JoGs5QFcudd2oXXzQndhOUPDU7vWpN+om3Scfrx8vdYmJ1bou1d1SB6rJEEWoUyvFZMUoCb5GCF4SUQMiITgyAYgxGMN27S5nDHc7FmYChyBIQebEqinQeI1SpS/OYEtrTlrKNNLuHAkkKZaYQgyiktkshNSZQ5TJ2BBD3nfB+8ASgx+fbFw6KT7f252P2kcbZ0f2fE4nyx6119xY8nFd7Mzy0Vk0raVgvAtW6hakZDJYCtxEr6TEgpC13HoWbwVMQgFqVTNAjJJR5M55VR+j5SSuMJBWKSNVblhKZSLKDBsSVg60qvVeUexvvyT7Rx8f3Mv7vYNy+fndvd72uN8UUzf4T9+/8zvf/j1+668Wn8wvDTfbjViO8NOCv2kPTm/2fRPZWf/M83r3/Y0lpHpcl4c0/6R8P/ga+QzTOcyKY5TekONL1+dXxpPnblf5fPzF35yb+fVXb372g+88/+XPhzf+190er959q1+ej4rNN3ReTz8avvaFCKtzBAtm2OgCgkbRFnrGlpzt9xrfmGAViHFZ+Do/m7nzw8w30SRjJaMSrI2eTQrsggRLzMOhrEoJtRqnNJesZ7cH0QWcP3SzKaaHwlGJWQhLJRgE6ErtNuqhzkY62+LFRs8EZL5dboyLz1/bquzq0C/6GQqVgbjhxta7ZTuvllrDMHPONODNvN1BHKHcQjmW2cjOMfXtMXqNOXzzk3zpwmJTvf7Sa/+Cb7qjw58en9zJb155+aXPnfZwqIe7lj8uRnY0tBANMMueLYi7zb8Q+oq1PYVooGQuE1W6bBJNfjiAJuM5kRhSEu46z4hxEZCSqsYTPPqiUKRDyx2olTzc19QgDdC1mrY74FgnMaRoQAKEhGIy30jLa0iryRG+bnw6ZxwjmKJo1w6oGGPZcqqZF6pcJlrzqaGkXdOvF6swBAZpZCIJwikti6SjoKwLGxOBSZKbQEobTI1894N3PTm443J3RZKUmcO6agJYWwiCUoOSdl8pKaRjUitDeR3d/IsEq/XqV7XbMXcu+6R08S87EJEAijBMUaTHeROiY15pfG5w48bx4YPPyXty9oX2cNNdamzTkDWOxUmO4FBmWvW0cbzMEVBRKANKHdXK4sqpvn94fu/ITk91mPUt2sIFKalG4G1ijjY3/cF5Tv0i2DPOrYQcvrJkMhKO5Ix1krNt20Ax7rr2wNcbrtjeohOpG+/I10NYjvC+Pa7s0cwJ9Ww+PqjEZ3uyNROB27IyhNulbJtbMYYjmG0cbmzytXpewfZf3uNbgxjpuLLSv47erRqbvtg8ef+AVyHE3J2oGbc+3zmxpjB0XC7gRseXfm1x/B9ujoZlcFu5T8HNpA7IQnuGWPaGxo7Vemtz9oUPMw7HK1podVZvu9z4ql2cTIbcOtuMvIyCHajHA+xf3771P81KH5oxbd98487srR/+4e/duhEmu8O7q1jWLY2obmUFNI6qiBraKDxRQwhAUCBoylOhtKZJFTf1kbK+mpP4KFGQsSZRJqC0s03vMJy1Dji9XTRvT11lqcReNNTpKK0JrNAkTJCn/0Pnpg6TDoGSALHzn49GyIfIBJtZiRJ9MARi4yVCtZfnrfdBY2fPrNAYYVi6eGnNbEZEAg0iButoxOSUSx1RgiXJp6AiMYmJiVQlhgQOMDE5zrxvACiEyZoUzEKmDUImC1Dbo0pqy2pht86yvdl+7O0d9aujfnnk5vNsOuvNPtsrelfcflvsHufDZZ77npVY2bCywQlIQgSpEInPOPerVo0oO5LEiRQDCHNgXoSVgbFsg3g2jBgBw+xVLTU5d/cujbVyJAqGV0BfZrEo9n/15weRla3jg7vn127tfeOXfuenb7zZPviYHppLfueQPjvvL3XCbgP3Cr7UNF/Sut7AHHz977/++OjG7A/+cLj3bO/yc8s33nS3RvjwhIemJ0WYjYvcBP9o73f+VnVzWM/eE6qGLwxvLqvw0fde3Tob3H3P/dVfbBIFFIWlI14dNUfFtLdZvWz742M3VpOS9oDIiKKNtCsrdQhtTezU+iyG3vnMLU7dfMpG1dmMufUNHLgJFB0y9hYOhuso5Sxs74qzXC4pY8sWg6GyRonZsJAbvbA4t9PHbAwptMdSCwUmNtbW93KUhcx6kKAmxnjr2u7NvUshnM7Kxaf35dLtF4cZSTY6Kk9L5G6zCFVwsDw0MW85rCyvCtvkxg/RYFnp0nLLUraY1v54dfhf/2z8yusH07MHf/ajqlfuXB0MUJ5MF9Wl1za/8j+cE43nFoVtF0y5cObYCQdGkzwzDIQBVknSe1KmLqsaChVVCRoAihIJiCIhxFTHOEgq0d0hFbAoS0qXSojp04umRJJiTRksHXNBNCL5zqT8hOQageSrt5bGaXLHTIGZgGEOIQIIodVuj6AAokhKd0lemUQUNXCEIU5urk/KFjFrZ0YPUk1J2d2NKrGrVZhiKn5Igz2RoQ7JS/ZclObqjl8qXautRBfmWus9cHenSpZX66DWtRV2QsQvtubJF2ytWkqSowQfXxhwaGcRTOm2omuFyZPyzE8wRlrj1EkIzUoZBIZtgJEoKxd+ffjq//bZ2/6V4XdO3v9H9MsrC3ICC3XR0sqK9OEL1IWsCq7DkkyVDZgPp/TOW+X9E/Z17kIoWkWzCN418M44siIVRy/e+P4wGqlyHvUYudQ5QmkKgti+aY7q9lC+/eXi7K48uBtefm7jsx8uRxvjieWW+WFT59HZyPAiNR8z5siD23i4wsq4uRbzwDe2IbvRTyQElgkZsCUKLKFnMn8y2iznmeSvvbAYaeW35vlY7HbZTqp297w3PD4+vr6EBM7Ow/BoVrprtcvq8Phz9rP+7qs/W+3d9S8MyiPHWwd2mbJuvCdVGOuh7nJutWhL21hBltvhds7OxYpGGBln7e5o43d/5+zNPx0fflTt5rIhGAKjvOblgb8x2Z2ImDufnb3zkx/+3gs37GAUTsTxpKmdEcOVsCepgtZEK8IK6pUioYUmd4aOzixMyeZcu+v4Yk+kT6Ei6fLrtrVPXSbUpRV1bjZr6gD9tQL8196e3hQ/YWtd0A6e5GYq1rImRbJ9I0UkkFAGihI0spBkfdc0XkWskDBCaCkZBpMQUwwxxVVLR5JI17GwYevS/AoQdSZclLyUQlpVJaZT4ot1IbQaLbsQQpQwLIrRaHRyOmVjiLv8IGezNqUDMYXgnVrDmclstFKKN8KTeTE5LW73L53mzVn//HA4X6K8//jBg9FwVBSTbHtUuY0pDysjiDCGVWvLDhbREyVHLXHWtAHKbAVBVSSwcSICCY4phPriFVKtFSK1o8Aa1BQco2hJ2ofZ4GpabV/duYZn7334wXDrsldz588/bN6Ry8PJ1698s56ZrZd2red81/Xj6UBWNlQNNdL6XaNjEfvOH736+ld/vptd+x//1qc/eAOfm+z8vX/x3v/z/33zuV/xgxv4aFa+/34+mZzLwergqNbp5Fo2uP+WPXx35/jnk8X9neX9LUdRfF/ozAze8I/3Y1G44nK4P7c1o51ubYuxwSSRC1OLrDZtLTa3Nogu573l4+H0BGGJ3LY+9iiB8kYlIHOx2CDbq33JIRgJdD4jrXmQ67w1qEKrWM6pLDPnUBTt0OrI0mktTAHBehiQmExV7I6pNmJZRD9/NC96+Usvfn5/c4xpU0p77+Gdza1nxrwqxdZZfX4SNwdXFxaw0AyaRdO3hcXQos/RSMlhkbc21EFKaU4l374x3FnOzz85PmkM5nYqL7zyQukf9sTT4Uf+cObHL9KLv+KaULXI+oYrss6KFbYMA4FNoyMJKXXxBiRM66KR3kIIaaqkLswg4dCdqDbVNSaoSf+va4hpzc3VdS3gdVWmTry0luCmvEDpGEWJoaDMLB1X66+hYgk7UhVhDjGmB0ojc5BASHaQxMwaIhHZTlLc8UrWhlndhCmikmhSnaC4Uy111puaJHuc0hJh0n0teUei8xXqgpFI14IkuijSyRWrcypRQEVhjBVIFFHRtayZCGxAhg2z6RRWopyoXQx0DkDc3fIS1bWjzCXOV0dqpYvkJL3YQydppSpzFBiTOpUsiM0AH6pLo5uvHB7+qJodTOoPFp++dOX2VCprKRhyaHJID95R0yNB07LPevBv/dS8/Z7VhbGl2ajbJoZllBYul2BLC7uC6QXvf/s3io3r9G8+Ph3sTWa+lnKuo6GEyqrzeV6fLF641EevHlgbJ71Hf3n+eb8x7GlZB2PzLLCrjWmoPJc2ar5v2NuHdWb6m8er1SmNlv3tg/ZkzxS+cPYSAeBLRFnjlYxxwdibjx7wpKKNYv7em6cfvntG42V+7XTz5vn2C9PJK+e96dZHj1EXUoLOaNTnT3Ily/vF5q/8yjNN7U4PTx7KNz679/vXRiV5c3xpA4BH46Qp6hnxsM8bl69qT9CylIf+rKwCvIvZaPMSj/KN28+0e9v1pZgFksvjcDkrhtKMUcpoaeXxlG6Wh5/cfX+nXu67m0dNqCevhfx6WKlr61BlsvQUHGpFC2qpkx6pUIKdJRAFkCiSt/867yj51aSXWzWlz+saAnqCSj3Riz/BSeipbfB/p+xe0AD/+/V57d+Bi/PedYcKMJMSqyalWVTAWKIA2HRT4BhjDDE9dJSO+axKSRcgqtw517K1aQ2TrHQSnq1ppZeOKJlkGS0xGc53o3836pMxyX7AGruqa98GJrLGpPpuTSYihsAWEsSZ3EsMwRsNzMYgg1JLbeiF2PClc7tjd24Wl+b9ZipH05PZkS6mxRTbw2J3uF0Nds4Hwynlq5hHWag3QytBOUjLIkEMOWFpo6R9GYmQ4eRcxIZEoqpfC8dEokftEDIEkCdacbaR5WQXWM0W8+c3Xjk5P4rLEKPkdfHok/unxeEr33rV7dmllf627IR7Q1Q5yl6YjVF/1kOI7nX105//cX9PfvWf/vpnP/r9rf/6X2782t+ayUzsyfALN+6/fVCsSqeLcnE62dmm3at7l67J9/+Xrc9mo+kH+zgbjbDTW2ysDsXSKW3+PJ6MVDN3Odf2Bp8dsDFR2fDR9i5EEIgrxAq2T97JqBjVDx4XZZmXK+tjdM6IIFRsradgNWTW+WLEk4myM49qCit1gtXSrmrqj1YSyCsaj4efxfM5xmOSmPX6nCPk6jSCAkIU42zbBg72Sl4XLddT2nfb+5d23RlOy1kx7t/98DHn9sZtI5gFMo/uf6L+aivTOo4MM7NGgqWMYwVdxjjL3WqQtf6oNefIVpZaAzt5QGOMXt7fv1UuppMXXrrx/NVyuJRr4exgSj85KH/wB/nOzd7wxqrKxbWZ6SEDG8NsyIA1WY8SMSEmM0TtKmxnCNuBWukkxBjTUY0xUpfrnQjMUKUENjwF6HbrzafJmAQwsenwIu4qQ0Kb0RGfFaLUBTB0s99TbwSkKBGiLqpBDUuUtMZtQ5ITC8DCqqrGcqCO2mEMra0u0ckNRQjSYe3gFOKbrKcgwkSsqfISDIOI15qp1F2IqqryejQV5SRKThUaqkTMaS4nSJSukWei5DoigvW+fb34fQo4Zuqc67rnIXHAac3x+sVR5GLj+/Qz1f2FavKnhShLlOjYhqAZibBjyU/C4it7X7jz6E/nXyjeaO7e8td7YxtZrDFWwFhZlhxtjiWWyEHvvu/fepAVzHEGrfx5y4EpqKnFN5xv1qIr9vMw3raXs0I0fPNm8acPptvbxZdfuPYvfzLPe8Pf/a3b//O/OXjpxuh3didv/eTMsRsPnYK++OzGh/dOT0PtKx7l5nC1It+zXlh8VqPnsocn9Mrl3ZN701kYlthohOaUldn2zGwU2wvZFjYmD0017G+X8y/kHy0HtCTbYKsNfcikbiguTv3sYTU/aOthfg6uWarANQ0PzlBEK9oUxeztsJFNX0T2+LRe1K815VuFHZ2JB5DtDkopT2RExm+RLAI2L8tg2N/ezv3JLJwFxxvecugNenuTmX+/0EPas+UlawcSxtEKDTEbSZw687gahOnHvd6Wkaan2zwaYrJlZ1lVBVOyrQ1qoZpoBW1AAWiRPPkBSdDUE/URImlUiaLxiTy+qz8EjRcniahDcVLFfBLmkFrhrgdeIytPXUv0CxfV0zWYuk7vr+HW1MWOpgP+FCoEIgohwsIyjHBbe8cmWPIiFka8Z8Op6We2mpz5NQkQOtl95qwCUSXPeyGE2PkDcQLOhNWk3F1c3Fw0ytqONrXmIjF4D78xHFWr2ho4cBuCMjti18unfu4QgsDCMCmRCEeQqkQIhIPPOUgUT3v1YC9/KWTt3Jw9rB+f3y1nPJ0Xbra7y5cHo6a3N2d+nLsziRRiARL0rK1jzeBgTIzBJSeiKGRtYpgLWlUhkERRDRArEATwwsU65uI0aFWv2EGskLcvXP7im298tyj6UhuzYf0w/OXdH5nyL5+50nsmn/XLBxM772s11nIcSgmVuv62DbcW2Sd/8PFi/Gz/bHUVveWj94XtKG9mMs/3C2YrPctXnvPPXvc6zabvXp59NMbJxJ4O5GS7Lvvz6qgc3YEcmYqJ+y4bTg6yZpCHg2u544BAbgV7PhrTPGouWnDjapdhdfez8fzM1jPSRhmtrzOBcaYNrWUOmeNL27x1id2AJPSAnkhrWP0qlnO7dYmCyNkRi2A6y9oQegjYZUIgb6UN4mENLERWYnPjvV08PPQLd3ty7ep4DF9X81pc72fvflxW7e7zo21CsOXhiW/Px5ktyGeecgRZ1eipk3IRaAZ7NhqGLW5WpytXZYXv+UWsZ839Nz+Gtdf3b00fzMcDd8ltHs3PuMcyq2++9KV6/7XH3/tp9c6PBr/y/AItSUawYA9YQbLC6C5pFZEYk1VyOj6c3Ey77ZECnegnldxErEy/Gr0QwAiLpsi9NGCmDu4ie4VUIAzuXJdTsU3gqXZu812PzQkl5ifHOrX5qXyCYZgNc4hijVFm60wIMah0VnWdJYZ2xvCMgBRjxtqBu2SIySA5ciW2ZkybGQEn99lE+0jYM3PCsdEV4DQEw6CbllW7Ze262SCmRFlmpMdL+HmSYIkmlxPhZL4NhRrDul7qdvKGC4S5E1Bhve66uBkySNdR6x3qmKbp/3ZESbC1gYkEJCMaZRgCLJFybr8ebv/J9KPFzd4Pyp/9+q2vnfHCGrEqjlYZvIU3uuLg/Ny/dc/YaFfHTVzKitBkAh98QGBZhipzbrDgoE0xlsXj+j/+6af/7P/2+XfOHnx6vvzctuxm4aRUa7Qwy5v7ez99/+S7f3G2na9+97VtaPjstLm0oQ/vByjbyNb0Vk1tPccKUmmEGN+ryB7U/ZpHZrTFy2xm3YNF/xuT8ft3Zmf9yZbzZb65rPW3pj87M2bVXpqb3glNSh2UdnTCRY2dedgUKmzlYQupG7fUOsjodGHHTFZO5+EP7stOebci6V2+1dob92f3X9qVzdwBOKymlPdJJmMKR6EBb8SszLOpFn3aDxu7fXiEmT8aXr9/9v7LVzd6BUBauUJWLBFwYns2Hy5O5/Ha5Pbo1vbdj4/K2WbYu2rGQ/ExFEoLpqaNFUtj7Up4RfAEjy5guVNet8lzA5A0ukoydhcRTZ8HeL1ETZdMoj6tLcOfvkLSMUVHqfqFRu7pSvzkf6VLcc20V3oKd8YaekmB90/makIyYlQGI7NQIe9F2NgiRwha+z4gbL33EsVmWQgBIi7LIlpphdiytcayIWKw8hMDHOoYkelbFRFCUIAtsybf2DQEk6bdcAzpJFoAVVWBOQS4nm3bJcOEaJp2CaYQg7VMYqJScmNLaeCGHAkFWWWWW4nnLMHXgyrbtBv7w51z18x5ebY6nt09neqD2Wb/YDTIXxyNm+LSjDenLN5qgGGJZEnJgsQHzozJer71BjCgJqGASiKtiFMNnW6YhL0Jc8/OYsXiWleY5bTaHl+/sfHc/U/vuP1cBkpj6u3leT6TsztjzLfN4wnORloPdVqYMHTD6fTRnYF73YZvwjyeLk54e7Zz8+yDH8QPPgy8/+DP/hVPHZ9EqQejV36rOnx31zyefPTdGyi3zfxGeLzDi8UUb0/zB9OKLWdgcWJsi5rG4/nsw+8/+5Wx5xAor6XX2mK1kVHJdo4iA9pyUM+y1dIQpMtkoNCz0nphda5Pu5fD1qa7NAoRVJZpJFQyrIGXU1lOuC7D6ePM9AQhslC/n092EVEvPkVcgojJRPHCLMZr8PZXP/fitrlkSzN/NJc2tyH/+TsfBKt5bsfIzSJW8XhxYnK1hS5zqcQK52xrDhpHGUZ5Mc6KnGahrYfWWmA6L8Ms3n/nk6Etnr167bw8zTeywaXJfL7KxtZ6Qu7ms+PJ5dcnr4w+/P4P8NIh7DWFDwgSM46BY1p6JGZijCJBYgytduTcCIBSl61JhmtSFEnyqrTMIcTAHZ8KUAgJU2COybqZSRWxg5W7A2/5wte9O6d4whVak5S7PKqU1mVoPWETsSHuChhLz7lrV68+ePiwjcEYC1FnLYmkxN/QRtFEfjboqNbpMZgYSIzkpPSltIFOrUjik3UGV7ZjHFNaJ5HtjNPW1Te5VjKlOLS11d4TLlmnXlz/hNzxOVWFbbfz5nUejUrkRJW2hp6y2OwQfWvT7J26gbVA8wnYQE/Pzt3Xpgte6xqESIm7Ju3h1DCptUGFA2t+Hla3L9/8+dGDR8/T+3L4henD4XjPYwauGX2jkaGs1lpzeFKvAnqiJJR28i6Szwutq7b1RC5UZZYXq8A9YzY3cPv5nRbm739t///x+4ujsr4xHv/sk7P7x4tbu5eODsrXb4/e2S93nNnddbsjfnDkt7cL+iQ8Pm1s34ZGlRQZwRKXsCyLI8n3+yfZ/v/8xuLLu9vZSn76xklVPiM/Dp980t/suyYL1cr91mffl7C6R3s2w4mMKuk1tHkaRpXdncf+fJX1VopFrYNtCpDVnH0+Pm+yWRA2IyetHRyPfsmoxWzOOerhFz6+96fP3coB7AxHdcA58yNf+VCqLFeynasUvdVwmDUis6OTyee+eHvnxUWzooc/q6tqZDfPZAMlbN/4oDnRvMX13H/jVf2DN/e3zcGAy0U+6denlodBN1qt1TCL2mXQmrUmNKrqAQ+KIE/qQUIUtGNgiUJEQnrFO/iqK3gXF7YqMUOeGMUBXTFOx7djL/Oa0/DXa/Bfg1cuCrGknVMn6gf0yfpKO0bmkzWx4iIKgpQ5cz2JIUaxNpPCVHUtQfI8DyGIqAEBJG20xsC50DW3XchncqyR2C2YteMxdpREFRAkarfpEQIIzjlfe5Cm1IY1jEXJ0b5sPCgjUEAEwQBKBt23oQFi2TBYhaMgM4Ftr/SrIffrEAcG3rWe4qryiNhAfqn3bF340q5m/nR2bzrNzioTj/d2MHaX0L90bDfLoucBRrAIPRaNrq1ZWDV4ImITu3tiEiUCJGwotGKyTEOLkKPucW6kjlYzX69u7t96cHZfhuARYUNc3o4xv8KLXZzv0/kGn2zo0p9E8m44HNaPyiqr3rD2K072dqciIZ4uB5jkGDAWfetOB2NaevHUfOff5vso9OEuHo/M2TbV4trvT/nRcSvH3s7BgFhDWQwxZGHYhhrDB6N4vIG41GGto5VU1cZEp544Ol/l8zNbnUc2ioxsX4zJWjbtKkbmyzckH8Zx3/aMV876uTQCZmWRzJpG9GQWzANZHFFsojZsLXlVq1LOw7ykswdCNRsnrSdojsDn9cmtPXv3xx98WuY9Ka7kk9Uq3Ht0Uq5C3s+2Crcz3FJRPzvPvAzdaoZy0N8eOTYyNCDesP2NBjr3kco27G7kfjGbznyowsnDg+GG2+/vndWV3SiGk8Ei1MUoZxt9r7boRZvPT+fDm8+Hd8uskYZC1og0lmovDcFz8AJUioYQ2tC9iXhmy8wSAxImnCynUmSYJgNISSCrSfNwDNaYhBw7EEFZQTEaEkYCqwGwoXTHJiVSiUVRJLcsTQwKIkhMU2fSD1tlIo1ta4xNY6FCyFgQsXLT+AcHj4KIMRYgJmVnEFsohxBS6nMKGkrcKsOd62Qy5+r2SAknjl3TbhMtOiT7aFZOmRAwbMgaSdm8xAo2hokZmoZlZgEISnKh7EDazF7cLtZmeybBwd1uDrKG72ENiEwy6mIGEJVUlUHMRiC87mmw3t5dyI0u6Ffxwm963c5w+j8KBSsxK4DAsNCUlhYCu3RPyzxxi29uvvS/3v9+eH3yRvnh78puq5loLxgNMID1ilEW5gIyzik37HNmFVqEUFWenMt6kDpazl0vuEZvXskrYTey3/nhwS//9uXf/NUr379Tvf7VV/wEdmfDXG3/P395tNsb/oPfevbT98//6HuH7XD0k3sV2SobF49OGzatcVA3jKvKORZnwfHkoP2zWaTt3p02e3h0OrLDLaBv4+NPz3fyjNSvysVvPnxzq52/H3ccm9DyuckjNk7sdhkHc1vUbc+sIpXKTa6PH3NJ0dtQo/D11ml5cGnblpHYZ0IqC/QySOB889z+8k8f/ATA1TjFpmKkfVe4UGy5YS+TQes2WaiidjZ3dmPjxsvnZ1W7CPWdD/fyfvSDFW848RzIEqqVx7L5+t/YW5wsP6muPvPKTpz+aOyn5Qz15c97bVkaH4xFZtKNfwWitqu+6qFeJRAnHXCHP0OVQaIi8Oli1BiTmVpX+QxDRYQ5aeg64kDq5FIiKSUhYQoJkQuVb+I94UK2tK69ctHVrst5QogvYBtJxhiJ3oCOirwGwQ0bVokkZK1S0CjWmiLv1W2QKAQWkSjsQzBM1rlVvTLGMGATn4KZFBkb0UjCCrHW1L4RsCqIsYp1pj1DYGsQRSQqMUTIskYhqLEGytpt3TSZyyZsPNlwIW24CKmCG7adq4GxMbSGXJTAQktpDHNDTAKwWjGtwQqhrltX0Zj7I3dzv7i5sLMl+wff+enw5t61V69+ls0f8nK4iGYhVxcbw2UWHAsQGGyA4JNBkZLx0bKGFtwLCBItiKIlZGxbRSOVYXKiIqPG7W0Mt7fa/lJHIWxhzKe7mF7W4119vBvv43EbS8JUtcJhfSQWTI4ZP+/xQ7h8cx57vGCeaVYiP0b/Xn/gt3JuJBvlW73qTIw1Wpn2IDzGUSmPFcfqZg5ztDGQiQC5fh645gAt5JM3/vzGL//Npp6XOBlaXvCIyGf+xJyduNqHrYGurC4qa5SjEdgwKNSTDsd2/4rzS/GVVcDl1A/gPAizeBg27UoO7meRUx0K0nJmaDYTfMinZ3ZxKq6noY4md1KTt4tcVq8/bx88OPOnsb/M3z35uLDuS6+/bPdzgniHTz58ZMtePZTdsa1j3NAwWy1RT5er6EJWT8M8VtATR9OqOqntyfWsatq6nM7Gm6PtS9uL45oyk28Naq3yYggOTah7XFiLBhBiYeteujGdz3WwV7a1a1sWpsYki9LEqIzaRAkxya00ITjoiIgCY9hLTMyKtIVJNGGFMhuwGmtTeUaKOQIJU0h1jYnYQgHRoGrTalUVhEVVGQAKa2zylErsEeogcSQ+ZGZM50PJJCqI8cJNxrdtZjOFWGakbEFDEiWlgktn5RjIkE0h9YSL6mvYAGBroACEyEaSrq2+8N5gVoDBhkiDdH701HGvSDUJotK+mQhE/BSPtAP/qLMHgKoaY9ZTMncZEpK+eUXyteVU0LtbWnpGmMmwTcPKRWBUkoWoyAXjunva1oSsNVTYeXUpJRIdA3SBFiYjFIBUgnH9s1Bd3rrypenOj317t1i9d3D39qXP1XIuQg25BllkJ5kHSXAaXOAeI+eB8JVh3N7u3/30/INz9JxDEE8+UGX6ozd/fvwhDG7i8Adz98z2ncP20x8c+a1rP/wvx3ICNy7+0/emvWVbnZGc59YbCwd4oM6oH8AUbG9eBnDjauUgrVrbA2tzJNlQ2smVEgtB0ee6j6zGPPPyuwdvjdvVT2SXWNoozMOSRrMIz1tzybVkWxopRVaIC7GV0ZBv7eyevnd/1MQrDx8eul2NIpwb9hH9+sy7kWzkev25Pdl9FsCDxffYrcxwsit/ST2F4bha9Vzm2DdBaLix7Z3+p/+w9+Lni8t7J7XPlvFksDGlIkdTRzUcvnF7rG4w3MiOp6Enq7vDl07NzV+t/urg2s1Vr0+hgrKIQOOKnBNhkEoLtAl/7gopknyNNZWcDh2FgkWCSkhudCKRuhYxEhEzkrpHAEYSTKauE0ydF9762r0YY7u8nr++1OC/9ue0+Ohwma7uAsIMVdYUj5Em4tR+andqAEs2IKT/56wVkkgcVYyItTaEUNd1OgLohO/roVDEQ41hVohEZiNBSSE+DJ0LTGfz6aquLu9cdsYF3/pYw1isuV5P/0gMQCVETS07AFYK6q21TwECmtpfwyYt7AybjkoqyZEHnoUFPQGIg1GvQevWQDeRb2XF7mu/ZnPDh/yS3TWOjux5sdc/u9oeeL99jNG5yYJpULEpKssumqDIEdmwE16pBzgQWeps8JUaYsvqVMfc59ovG63NkGOOwoaBLvs6H1LZ90dDdld3br13911X92UaaUWcQUjRs5XT0tWIAndybvkR9Q/i/ISLR3a4GA1kHmBl01f7bvasWd5AuRdLrsmsQN6EuZcZsSWwMFMILfWhTqWSgzuf5dfeGl3726hqp62pZ7Zc2ZNZ7n0YFWH72d4Jx9VZ4xcML1ljL7+gcaYZI7dAzm0F72MbyDB6WcZWAjhzQYKxELQSDDg4ZF6Fq3ksZ4BK1msp9KmnUgtzkKr86peakbGsGxsZosi4KF578RWLEM+rCH7v4490x2CKclTZbTlyB4/jtROez42exW27yNkbY9qCm2VPi2LDSr1aHS6Xs83R8JId+6PaZGYw7AX1tlc0WlmrbjMPRbC5c2aska3NxlvP3X/vI75yU+Yaq+ArEt9ywzZApAHaGLxIrfBsVMCsGqMgxjR3+jZy8mNf+y6rJlqtkgGAuq4NkRIr1BhDnW1cou91esTuErfsvScma4xJ5EY2a+vn7jJPIfYEIAhIjbUhhigiKmxYRYNEZmMAUggnuxs13ZKUzbr8hhDZMbUd8CwiDLbWpnUqmy60nVLWWlr1dukpzJySObs/JNtLqCDRKtPkK5LSkCgxpdfeA139BXUjRuKVq9IaOk6RLgRD0ETzXpOwuqeVmXQdVkWKi104rfF6WodLpJ4gOWLJRbHtGoCn7oxrqDDRsBUKJBdWQCMQwdyGQEpL3375+pc/+un/Xv3m8B6VzzWRXc+TEXUtuFWGZcnA7OGyNkPPyQ3TTjb13TuzF25v2Dsnd+fWZb5CThP64x9+Zq9O+s9RHLfnyvc/qrm4/MmMDk/rudsM3vVDO95zi0OX1aEXWGvVnoQIC4Bqpl7DuVSxsM2tS8OZrwLy00+W9QaynQEFDXXVjLKqGBUhI+1v6OV/8PD3B1XzNu/lxD4yQG2WLaUIvY3lysgy2tqEElKCK1BJWkOq5vzRg16FpZjb8w/enzzDo9GGL5dillX12ueL/x9Z//4kR3bld4LnnHv8uodHZGTkA4kECkChUA+iqsDis9kkm6TYLUqipNasVrMjk2k1M7a/rOmH/U/WbH9Ys11b2djKbGbWZsZ2tbbd6pnu3pbEJtlsssRHVbFYBdYDhcIjkUgkMiMjIzw83K+fe87+cD1QLVv8hKpCRSYAv37P4/v9fC9c9J9oqP3G4nwGAJefHn15VMrZx1e6k7GHQRF44gXyUK82Cs6nYaNl2520Z2fHDx/4IIg85W1wwBFqoeu7/ndvjB80cP/RdPfS6NoE3quOPuGXZu4PXx6PYtMCRI4C6kJEjkGNMBIYA4ChI4gKokoKHfXT07QaTHIJjAoawQySnBERDCNBvz2xGC3Vn4iqCJCOezLIEayfRftMtrFGaK21xPRsJbxmavU1ZX+19rIPhP/EWJyGQp+tkhH1mWEPgIiYOf0XFVVURxrVOukQgLzPmDtVVXWOsbf6gqmCo4I4iCR5CaghkBEIQasULbz/zi+lqasr119+5RZxptBFiSmp7G949tP3wNvbWwcHBz7P+987KqPrJwW0zoMCQwBHlLbKjiglf8MaBNY7LRP61oCMgCCqGnETOz+FTjsZuGHeVQXs0/CVeO1Pul99PL27LPbt1mBcZRdOy+ECylXsMJDzQuSdAGrmCpWAzhMpAhiIASGxGalTHmQhNMYGI6Ixj/0yrxcbUPk4vTCIfJrdf3A/q71OI8wQG1QUn7sOOvLoc6cZQEkb2/XS6prCyuqBVDVvOw8WdcBuIMwiBJKzb7vGs++0AyESMFEkBI8qkQNjpHpa5znf/fW7r178+jjbaU5PJ1Wsq9ajthnT1St+80bsztURZwOY7AA2sL0bqbCTYzbWca5ScQi6qogzzVxQ8cSGpGoUREFZzQg7UVeMYlfnGgTRXD6MJNZlGUszry9fkhvPleRYq5A3QA1/7ytfqRciUmhmP/vlhwHafFxYq5t+iKyXN0vO9sj2Gyg12w1OQMkXfuS7jFttptOqzlS29rdHzWh5LN5n+ahooePcz+vTYtOv2rZ6eP+151+X1h0/ee9/+WC+/3UuLnwDVhCWtWXDpq25RmoQWgyNqbYALUDnHJDzbZgTrZOzHEcVkY4Q1UxUkCh5nEMIyRITQouEzLxOCsJoGlVTjnCSG1uv7TIAkCjPusZOlJlVNUY1NIuG6awqGIEBCCgYrFbN2n6TfigiiUqE6NgBBHYOuZ+XOeJ1OiEwohmCc2CmyWGbRrNqBGaUsM7pptRnVxdiojf3aigEJICYJtJpswQJzaVGpGkivs5MpH5Vjeu31TpyTP8TWQsRfZZ49IwTmVQysDY8f9ZGf/bS6iWc/RJ4/Tbrf8OfadV6DJnBs5CM/z8b12dXcv+PnUJuDjl0zUjzb5df+A+nd+5fmh5Uh6/sTqL6DliNJRI5g5wgJ2qpQ70wgbCID479l69t/cW7x1ev79LxSTYqo1vxyEk5BlT0yMbMuWMWcpTnud8sQ/KQx6dnnqJkQJWAxQCrFmMNqOUgHy8O96tjHY1e+ua3C+6kHFPe1VwczWaHdw5Pn78cR4VrtC2WXXlh3y2+cfSXXbt8113i6DsU0qzhjIAEClhECs5m0C46vzJqWBdGDbrWwSraKlqDnXDp57//4C9+fOmbx9vP6VLfuLm1e43/8qMgk7D74Tvb4zsAcHmnsMPD8fiUXTWpnz5nJxvucFI2RTXHOQ2K5+omSPNYhhcAyAUdcPm0vADBKVNW4MFM/+Rn0y994cL1C5NQbI5oGtSVfv7O7MLZ02Jvq5oZKyIARlli9CAecoCIIBlGjiDp8lCNSQyY0qnTdkgNVfpyUQ3UYlrOkgqtazVCYk4iPFBHFJWQknJJ+8QhQwDF9VW8fub6r9u3vn9zj5yeQXq2JabPqORoMSoR0LPQRERANFNah58YQOLwIDCzoqpIyhojhxKjJUOeqMY1it3WadwGnYkpODMi6siiKgGUuVeljz/4bZn77UsXI7hOIvOz/cx/chzSTRwkHB0/YZ9xxm3TOuoDXUCSPaHnE/UfYP19qya0ZlWnVXRyLSBhCEKEoEbsECiiMbGgOZ87IqrMNybU/mrwyZDxi4+K2aX64aOTeADHb0w2LmxshOFk5vKFYpAuow4aZmX0ZsFMzClB4dQ7LZS8FkCFe3LwGJxwWQYvha5G3I60nlgl09X8YAUnBDPQqbpz1kUE7wJFIIxeoUTKSZdARVUMhgWtBlqXGGYQOnZIqGCaQFYU27oFgbgSXUZoAGsgT+bMWqPMSYhQg9/MOBuJtb/59c+ufW5Sn8CyaaKOxdR8geMRS9fMz8gxTHa6K/tuNmcI5AvJcmg7n3GX6If1CqTypimjRjSksAADSC92IEOrEaMAAXFSqCsBNbUSV196fVnIzr0pf/v3v7aDm2XNJ4+WP/3offYFMLrdjc3RBZ10cTts7PjzbBaMz5sw00o8h1YoEHeMgaY8GsgiNGXQ0cBfvrQLzZO554KGrp22g43B2fJcR3H3ua233vuLdjTVB1rh/juzJowuPT3Viy9sNZs3ZCl2urAVaENUK9YAoSPqmXaqyomibF0yDUgUAE0gxmRciFFVwzMcRNTkXDfOM2kDIkazjDMCoswpglFPfkrZCQbgDKNGcNSZElInHWFyuvY4WCFISsXkhFWCmO5K0ESPS45fpAShFMg8WO/tIfeZ8UANPvNKIaiqQ1LoQR+qCNJvw1L/qmbRtO9TU3yTRHIULUGrMKokWQclGCYikSGhihH17UV6u/QskGT6SJGFfYYbrDMT0i9L61zCZ/ZjSJc/WFKX2lofDWuQ1VoiDtDzjRL4t7/EtX+3GFAyYiYJGJqtsRzOkNaILERQQwWLCuJJZSVqBuyWq8Xzl57/wjL8DD99Gw+uhGJsLEgtcm1s2GEG5pAcgpPxZHS+Wly7ODg+13E5mE7r3PsYKmaSKtq4oAE0jWiE6WypcKHBJlh17hbzzsOs0waoqaECmIfNjjZynmzhte3Rlb2t//GP/vWNJt6az57MSjh+bV6OoVH1job565fGkyfvLn78y9lLr7eTHU/hJv5md3m74eax3yoIG4VonR8ObSHqCqvMBdJGdY5ce6yjteBW1gKTDnnRaBANgM1cSndRj/7RJ//zB09fOhpdvdhsfPTnR7fy6vrseH+7OsISAAI3uEUIGYhZZlFkKGE4X0CjTrg+uO9hVFgp3Fb3Hm4wLUM+H+xuQqhCN/CeNX78yenzVwdbu2OQ2bTVMRfHSxj6+MEDACtL7FoJLnTERTS1XCzp3QKgEAQCZVMxoAjgyHpAkqqBpMWLqqT8IQBASs+VxRidcwZ9iHDS0pM5sn5Is54/96Wd4jq865msIT1l+mzJkm6vtJsCBOopcZhQOAAIRGRRKWW9WO9OT4vh5LHv9VT6N78SuIxB1EzROWKOncQonklAUhr2+ha2KOIUxKBDSKi4jG1RVcfToxEVb9x6/fKVK0VZSCP37j5opHUOrS8A+gbYzBQUAE0NCSVG6DqXMaZN3FpoTQkQ8WwY1ZPkk9gj8WhBVdmhIoAqKzkgIIqgDsiTmUjHadgvZE69AyIAGi5pCFvw0qURtvvdohqe3pfl4u1z287u3yyHcXDpfHNjxkVN0iqhdGwA5AwIGYAVSBWNADyczc94yymLWeO6qnShkGpgyyx4XFmYdTQnrrhd2lzF1TIuR2VeMkCzXOkswsgwg+FgWeiigEFBIYdGihKQ2rAyigTIJi6aimIHZTZYyUoFHAAQoAdTxQAYkS2TVUdjnR0+2vEf7MKVpzQ0x46UwChqAyvLgC9dkzFm41yla2PNWuogh6ZGVmSOGGg209OnsKxIQT0RgooAUe7zRgMIeADtApJT4owYQ2MI5NQqqb59q9kZbS5WxQ9+xlu7pc6m01X28/fehwJrazyDLzItNB8XdGEAeZzsXo58ZUD7pV4G2PHLkfNoVYQKq3LjafW0bQr2ews/fNIuLu2AX4UAq3JYns6f6rB7/vrld+/8dHjRfe13vveDe9Pbs+PJla9889bff8SXz2w0ssHiSYVhHKpAKxAFUlDoSFvCyBk5BESIMTI7U4jaYZq6RjE1UEn6Yec4pQUnNSUzdxJDK5DkxtAHBrDjNH9OY1E07LV8YGmpgwAKvdwx9vkniim4bx0HGtMzzZxygrzLUnWtMabwJTUlIgVKv4yM1CKz+xsFsgL2I19LEUp9MlIfegiQXBsKBGqmMeo6nRR6+cm68zYQFdC0QkphCo4UkVQsIZr7SZth76FCAibX18xrah+sqVSpT088rtS2/qeG6V4LY6bW87bWHM/+k1JSeurTEbQvI9Z2S4P1TIwMzMiQwCKQw+QZ7e1dGYAauMaWQAQdaGXsdTo7ezW7+vh4du9a9WE7K3GYuYJiKTSeUODdJm6jDqLvYHu8WlA2lebtu/Z7b+TzpZ6B87uDWJzHUt2khkG8siu0f+Xa9la2tXdI+6e6d9cmswMHI6K5wUjLgBs6zGosGkHwjZYdNJ+/9tzkg7vHULZB6KM3iy/+A60bVTCIUwC++c3Nj/7bm7d/1JWWl9wyVhmQ52xgkTUAEuXdaeMGpYrmgWRlVAOvIrWqDVmjsBSvbPUqBtNGMoKuJAmLZjyajMdfrD610+P6x821MefbUKN7apznEQDGLdZRC2RE9EqBSHjUQcNQQ9AiZzmba31OTx+PKC81fLB7a5qVQ603C/ov/uByUMi3rRvBoun+p7fnR/nzWgTQzVayJeovj+hbeRtbQ0GsGiMWjmAKHSGvxfoCBgRKZmDOVCOhmiXls6paEFUN6YJFBYA14CJFCfUPIgIg6ZodvX7msRdE9nPmv2kuor7s7pfE/Ql5djyQ1vXlusVc41oxBQWteV3Q3/1gieT8WSeqhpgcFskb2OsbHCGwqKRXR18H9LgAbdOFnM4zKiHdfuedaj79z/7pP7126dqqDU3dzk6mdVO7wqvFVOz3bsF13OGzapUdaEw3LhI5keAcY0IDJT2IgUKfWKGma+dGOnLaOyQQxAwJTI0Aosaopo5JlckBEEhUjixslHWupUjdYjYO3OV+0z+/+1RlPLs3mi+8LMPp0faKd3m3K/dmm5M5FauBQWbkowFQBASwzGVl6EIHwuxSPhhqBOmIIEO0zspsQOjaNoBAq3GwtX1xc5QRZ+SGPp/qUS21J26sNRCy6DNmTUoBjo1ce/VVPKpDgwFdxwreInQ+H/DARy/GgA7RE2UoPpqzOtbUEijTsHzz0ePP7+1BzGI+wrziaGDMGxPdYY1j8jXHZsVA5NF7kIKCaBdUOj2ZZk+PdDVnI3PciVGvslfpOm9oRIyuw6yTjlVMBS0i5jSfn9+8Nrt5bUvJ/eg3hTasEHg4/I9335nBshyP0DT3mVmn2BjnYkiZa4XOpJuDVAytReg01mALtQJgAMduXIwuh3yrwpNzN8i5KPLjyTWeHiyqvPriF1579+5b5/7k6298/U/fu/fYT2597Z/w1iv3muKEs5xc/PRDN98Moy1uDaLQqo+ATkWrmkYI1nWeKUqAfkzamRqSS4+cc05iDNIQUVmUZto0jYBEUWaXmj5H1KxqjQJr5T/YM1gjAAA4iqouTaEBJIS+eFZI7sAIYGgpxih9YLpaUiuZXgrppgREJAgirGSO1MAJmaNkg0zVa3qbxBiZOPF3AJGSCiR9wz16pj+IEkUlGXM1RSQ5dpg24tgv1UysP589YBbMjNmtQRr9jC35laNJWiF9lkHz2fuqf+M925GnaqD3bq4XuevEYvyb/88zWMJnr7uUv679C+5ZDUHrGAwAUiCADoDXuXUMENPXj4RZV0vMNILLjDJtPPxOcW02v/ve9skLcF20cDIwKgc6ihjyS/Nm7lnM7+GDY/3O6+XFLjz/+eGf/cdH2eWB7lhX+GxDmrj85teyV7754mG7fQLbd9vxFMu5K50UV4amjZogD3zsYrOS1TlMz5uizBU0HwEfHrj5cetGAz9ob7/dvPoNdhum0UJnGkbD3SdXXj298y5v7zazKRSUsTFl2kaSzq7f2Pnn/+Xdf/PHk4/eQeFaqABPNWmNIgg1QSRuuOvASaRaspwCBopBIHDTzexYbeQ554KWDs5bzZom6yLMMwBoLwgrB+cMuYuUaZbMAgwAaIIRPJANui4gMsXy9u4r7KVTKUd8upgXW+X7d47Hl7deeH1IxUaHvigKiEXXRWI/m+ujorgCs2VHTFnLjccMIoIaxv6vmNKVailNKNFdDNdW1xglxmgGKTJs/Vis7wjskWwJG97XoKDUp4b0B9OexZasNc62FjHoOtb22eP7TPUAfYUJPQ8vPbXrh7nXBq7TCzNymtSca72gJmqHmfRlASIApoC0lI77THykAAhRIwJ6AOmTwiIjzabTJjRXnr9WFKOmDQ4InA4nQ79chE49udifnb7qpWc/M0NHquDIxajEDsCG5WhZ131hktbpakkL6RhjkJ5DnQ6dSzo2QABK32M/TkvLNWUg1Ri4A0A2VohKYkpogpytSE3FB+JgRbn9umxdv7s62pifDsI5xyN3cnRhRrvjvdV4Upe7i/FolUHMAqpAw+i6RauiZuQiRYAAeaO+Ae4od35lHiAzZXVML167VikYsPe5hm4+m011al618QQEoH4wbBoI5AS9CliAp1Uolce0MZfB3mBSbNftsq26ioYEHRgbeOpIgDTb9RuXN2Y8p01egT9Z+Pv5RqejCZXxaOHPW+hKWTaMLRkINcygBI45Q7AcyAqLCzubyuyMZk+7ZuYoGckoyXcRCUyJ1NRZjB1GJXCgSS8rwKQSBuPlzWuUefvFJ8ODezze4tGe//EPPjyTpd8pYpA8z9TbsCw7ABiiG6GU0pFvzUX0AllEr61gIApAtYsL1cm4KfIKzsaZLdgBwLjAav7I8fnL33j1V3c/WClcePlrP35w0mxev/nq91q//2CZNZMrYTG8+0f/rnxwwvmOGOkKoQKLILFxGDDZ2buOSB2l1AVUi31tl1IMVB2ixAgARM7UqqpK000RIeIkOWYmA2Nmck4JI0BQdc9uHARAyAyiGrCz9Qg2nVrtr4y0l+ybwmR8BxVEIqKo/c54vfRKqX/aRfV5KaFLomtI3huC2CVqM3ifhxBSLpsjJ6ZgRuQIoO2numkajJbclKLJ5Y+E1qlGNVWJ4hxbP2IyQFRQU0385hAUYY1sTho0hPSd9PrttTS0t1SuF2HpMiVKylbr/8eeP5JekPbspk2yZ0ur47U0JsnREe1Zs67Q/+eeFwIGRoaJP00AAsBgPpn6LbHLFM1FbUvKocsZ5tC52QZv3BiNb49PHuT0wqI4LjZjtyqkCyJPja9sVy7TbNPrBah26Fv/cPyTnx/PJxueDQYKI4IR5yA71y589CQ7geIojo4JT2xwBqTTenUGeso0F50aLFhayxozQW+s2BlIsNoggmmnK1CiT38Br3zP1YHY60D0nDYuvdF+en+C+4vz2aACKGPgjCsyFNnMdCZ0KOEYxt/7vmg2+4u/YAlcmWuMm4DqIZCFsBJx41JM7ex8gCvORiAdA6hWDBRWTq3FEgHYsA+AhOgAhJVdZJdlgSMEYvBAQTECgRITimW2Lc3tvc9P81Eeayz901r//bvH/+QPb17Wcv+Ku/2wmgY3KEfCRVit/stvfe6/+cliTvTpib/kc5+bdgCOBKKioayFyaQAREJgpgoMIOsuUlUkxh6/lmJDkvI98RxihD5JLK030gNhKZMWmdcuf6R036blR1ocm6nFtHkxtc8UVs80DD1RDiHpGIDWOblGQPCMbIu4NhRaTGjY9bE3BDNFJewbSQeA1uMnkZA4CY17lSIRu35YpcpIgBRUlCDzXrp4/eq1vcl2I6maho3x5g0/fHD30w7EqVuXBPYMmQmqeT6Q3khtnjmN6dtmlVLRsR88J4eIARCDBwBmtvUOnMkBKCPFGM0l8CZojEAEjGYoqmCONQdQilHBMGaETrBhVSRn4BQCAKxUs5ANgD5X7UXmebmcjbuH+XKZLY+L6qgo8q3RVru7v9Tx+bho8iIOD86OQYEUu07jymC84W0krRdgYJvVM0Z2OV28ug9nmhFtjje6sJrPp9uj4pXr17tdWe22s2Kxvbt9b67GHo1VUYOw0MnxfL/YCDSqoayhzIcVjXFAZb1agRFmpiTj7QmwrYpVXaxohJo3Z27L3Xh5o3j1Pdj+cj3Oq6kVF+DCDsxirJ4aenVKxpE8oEZEWNUuSNcu+dFDWp7lGXfpLgBwYOlpMHTgnIAqdBlAJPJAKhRJ2QCcUiR6+RJs7W8dnA3e+i2PyhgDv/f0wYP68fDSOKsiW67MXZQ6b5gZJxmPqfGjyjaCjlY0DFo05kdcNrLSaNqQa1ym2YwGXvEIRZEj+JNZKJrJC1eufDBdtNnuVOcPHlZL2n7h1t+bw+4yDuuN64d3Hs9+9FeljT2Nw937dv2i1JGkgNiAiaEgpETMNIIyBVWLDp1pR4AhBHbMffgtgCkpATkFUQRAI2QV8czsuQnBkZMgakaQnHrr2SsqozdQMyHqb1nCz+Y/CACIpuAwjXBAVRGjGZoBuTRtilEiKJKpYA9ZjjGCo2XdqIgjij03w6CH6hkQRUALUQkIKIZWwZAZYiAFdBQTokLUESUUZacdA2qMhC4V/9FiOSzrumbyts5awkTy6j1EQEhEqF0KUONUNIOLIAaqjthAESla/1rrhdnkVKOZMTMiNV1I02voPZsEgKqR1k2FmqGt4+MAksB6jSeAHp6tiqpIROQUTKMSMRKqAjrWoIUfttpEJQJmykUjgGakBhY7BmhRgTN3BouvFM/TBv/F9q9+p7zypbqstTDItSvnbvuePf7iftNurmAf8oucD6h8qfCHAkXeFa2NKHoi3TuOe4+Mp5ZP1U11MHeubkTnQHOiSmRONlPXtFI7j8Ozd3+2/dWvyGikQUdXb1WP/v0A1CkoUf3RB6MXvwI6CKK48hTno8nlY9io3nnXOd/slnwyz30UHzVocXULn3Azzy79k/897l86/qM/K597bet3f//R//Vfbe5dWLWFfPyxk1VmWf73fo+fewGD2OzB4pOP/O0PMp6ANkCDYMoAEakzQydon901oqDgNMtaBafcMQVVr8aOlIwYgDVXF/3uOxc+RywkBCSQw7e+vK+2fLrQ2x/O3j1nyK/zJp7M7PrzO2fSBvAx0KIOh2FjP3sqhQNQxMwUNHTpPa4QEyNM03pCFKFPGYlRk1gotb2AEC0SmqGBWBIEgcZ0h5lGBYiqXSr32k5jRKQEcUUAZxABEp0YENSsHxoZphsnEVTZ9eAOicLOgQFzFlVgPY5WAgdA5NRMU3WqaWuYdiyoMfrMW9KUUOp3LSNGBDVw5ABMRSNZqlVBIwB0oSOiPpwBVFUIEaNuboy/9rtf52Jwcj4D0yzjrc3Jycnp0eGTsiy11UgAppljkYCOCCnltiy72jMzO9VIa7Oec167DpB6mIgpEznHEi2EJmPWhPoxA4CokZFUDckhoMtYRRxSNEnuauc4z/NquWDHkfrcVrDAxN5zs2oQCFIwJwCwRKAFRK+wMcu3ZvnlYhIKPhrNzwbxNOuO8qMjrvjC3qSeXDdXPVnhdKXexxFs5MPalrPIYy6azo888cjDTLlwWe5G2xubVHjvRxf2tm99/tWbn7N9CDtRr0Kz3d6tju7+6ujuE2lRgyEoaQ3U8BnxSHnbT2a6zImKCTcaYAIKkQunADbWfKsIPgK3bkKnNBm++sbBzD96cPjAnO8GX/Nbi/EVHV+kuITlwEB812hUJeWMpGni9IlbnHNXoxmrWNuoZklRrkBGEbBfZWaYdYRGysHEWuWMIgcOFjrb2tkYbhV3j6u33xuxLsllmvG7T6rieok1yBDBwDllD2AQPehWrP3wLE7msDnDC6dxPM+2aUbhaeCGUbGrImbgBqQQn4zHoQ2hq6dtyMPOxfHlA6lPD0+2tl86srN5YW+88d0TP2l4d9pkD/7qR9mdk9FqpCfz5unp3it/e2EbZ9UCgqi1hB2AqAphZ9CBJZCzEoKZqOmg8DdefP7OnY+iaJZ7jR25XLQDFSBWaXyWW9Qi95S63tBSb+cBIjOIzhERWn+RqKmSc0kmqGpRDfsqXOlZp7weZPXrIwClfrgGQKrO+uK9v1whSaDA+s8EBSQE0xjXeAzVphVQMoJOMa2boxCRRuBI4IAdNdJFQzCIpi7nEKWHNQIQGIGtqqUjJ1Eyx8mQFUSIHIJZTMdPMu9jTEIYJaIY1WWpg0CR8Oy3ZgDc374sEtI6rW0DEQHR+j7tM4x63VXfjpAleWvfwmBMGow0f1FQSuM/MlVa+5LNFDD2yZIYMLoQ5gYMLkOToEJcpt24gYoyEiOBcKcKD/GoHPtsPHx4dLgJW7ubE2pnJs8dM4yCLLlZlGdnG/BubIfHs1dulVXZPJq2xSRrmJ+GMsv8ofAjGZ5DPsNypsXUnM4CzYDOCWbazW3Q5M1cIvnm3r2yekj7vy8njWEYPH9t+qYUTkJHxI6rB6uD3wxe/GZe1yoDDgjMxfWv1p/c293cYLogK7b5jHIPrdhR0x10N77zn4VPHs7/z/9HCv7yv/w/PPrpb6iKO/+7/+3xh/e2X79lKgd//m8/9/qrpx8e1ofvDsrdkjiqEneqnFEuAsHEAzlFAXLSb+gtemY2nIt1EbBTbBCiy0Wb3Gtso9NCqd3M+D/sfLH2jlWigqcMVt1c6uaI/vqTWfH8hAZDdJEC75akRh/cqXbLix7qx9PsUygmgw3N5oNmA6GLjUCBBGAhEgMEFVCMRui7rmIGIscMIq2mujM1DUxRBJK4Aixq4ksoEhJQmkg59tqEqLKeVasjAlEAFSTFfm+RTE19GFq/n0o2IkNgSxroNXFSNKpqT+FASyfdkSamjKiCGpDTKIaY+9wMgnR9GAsiM4Napx2RS+IyR5RkiUnYgQQGfS54sgJq0pEZkHNRZHt729ROHh9HlUFZ+MzPZuc/+OEPvvHNb17c35udzzPHBuozLzGmz8zSqkkiAKQNtKg6JO+yupMMiQGIuVNRIEy2ZseqikjJ9VXmOftsUS2Y2BKcTOSZgEvXu6qmaQDQDGIUIk7GRJEubTHSCC/tnEyACDIw1dAxGTkS8TN9Ybb1YpEtSj3y9bHUFR+e5GfT4Zy/Pi6/+np3fMLNvDqaj3ehYa7cRp1vAs1gKDKU8WBzlO8sHtbfevWlzd29s8XckM5i16xarxkEY/bPv/bFv3N5a/rjD+5+tDQlaBVatlrrslzx5Fxmm35rgXM/PtMguEeUo7FxxguYr6ihDYJczyD3L37+cGoPD1ang+dWuPHBw7NbL30hz55rPw2hcwVn0sybauZ9gd6LKDW1HD2l6hGgxS5ltjJCB+gUzJFqNEREF0XNCB2hCiBn5hy1sfSi2tWaDy8/B6Pd5s5dXi5CkTslg45P3HgcC/KzyUBiVCEoi0JB2Wvtt+awNaPNmreP6+GKtxbdQJcQqi6rHdUEjLYCWUQ1RYTzwVZThQzKUZZZWGzvXB1d3/vo/r3s0peuXrl5NwTR3cODavXLH+Un56i79rhqGj95/Vv+ypfx4xMLTjQ66BDFQDDhdUDFlFBd0kylZjHK+fmZAWR5Jl3HnAGZinrvNWoGPsaI5EJU6BpV9bmXqJEgJRj1NI9kT4LezJoGpEkvhGuVkPVmHVsbBnveE4BJ7IgcIAA4VQVTIkLHBrHHwBqwkoFFACHDCAZma9p8usb6PjUqpAMDCDGmRMQuiGOqm5aTLZiADKTr2AjIgaohKAI6UjAxdeiSgizFEmsaCaQDAxBCQOvXaSLiHIv0gg6JMTVQRIwITei8977IQ9VpjEldKRIF5NnYAPFv9LUJhmq9fBrXDTgxaa+8UUbqST22nlemVy+AQkx/A448mK5X2krgHVEXAgIq1OQGYpEaAlaYm6pJx3U2L0nCyzt35vUw7h7hrhqcUlkpnOTNPR0t/Nl9jXXo6rrVXaqpm+Ggi4MpZGajzZhP1Z/qZEEb5zCRWYSZ6czonOzccIn1vKUaicX/6q9H3/7d2BFpWIUlb0zsyiV9dKJ5zhEKoNVv3yyuvbrMXGa1QS7V2WTnpafbL4XDT5rpDF2XUeGCGhXy5juLT47rvPAP7jry+//1P5vP6if/9k9e+cP/VfVoQYf3Kwfhyd2Lt1769Idvbtj8wqtfrk5Pn/74F6PJBOaNo65TIS2JhpHACFE0qiYFkFdPAVorGspbKzrKY1d0ZTE3G1PtEQRkM+CH26/d3b5cQs3BwcCtiuryjY39K/kP7i+K5zeX+XjWjOp8pxL/yrULS78/0En2UXVji7jDe4+q+zh4bSzGnTaEBNqBqYEHU8AOwAMGMlHmQiGoBgnii0GUYKkhNRURJo4ae+64pZsUASD2CyAgiEakQEQkMQKgqGYFp6yHfrajYGAuqYv6VGBT6DUfWZaJRo2xV1mZ9QU3JQkkkgExIWKMPUgOklmI0Mza0KRjB2o+9yKiEhHJokYM6BwgRBFKZkJTQkpbsARkNwNKPBKm5JZMZXPUiN5lyKGThw8eFnn+3e985zfvvTss/PXnrx8+eqRRlQCJMqYklUICUfO516hBJPmSQwgFZ4lLoJoywcEA0GE0ddgnpSjCZGc7NA2nEYJZIvE5IjMwNNBo69jxFB/pmWNU1zcJGGMkpLWmTcHAGEwN1QAwA1I1NRMmgCgtDlb8Mly4kWWnJTzRZraihs9me95/dZ/3nxs057PmePTgpK7dDPyouLB96RSsGxb5yPt/+E/+3uH988fnx7xfbGxsQu6GV0dxEukSypa7d+674dXPfe3GDw5+1B0FrwPoSGqBGpZlsYDx06bK3ASibO+cAwJ6MNLotBjmxXgwk1lNE9u5Rvnu4cdPG7+/CmUmFpr4V7dv/8MvXmtCS2fn0FbOLZ0IRW3qOVfzMD92swNtWhqWwKu0j3MAEaGvAIkNjLVFdYAMogbQaRwD75dD1RokP791czreyeoqnhxx4RQjokVRPgqTCuXK9t50cexyG4yyJ42iFaR6LpvnsaziYOnyUyyfSrE6D3DuuHZaKdbmvIMaIIvOwECwyk7NQzl5zgNcufbB7OnHH73/ha99v4bRx+etFruz396lX78zbja7+VaYneD4pb0vf4f8bpiq55I7ZyhAghgAAiGkZwZBUxqCWkxzTIn65PiYOQMzYk4ZAqQgTSDuLebJh+OQmKlfySBJjESMhtEADYk4ybAQXTqk2rNbe1vOM6t/el7TUAgMNIL3rGoiEVCJnJmpqpp45w0tjcskpRkDYJIggcEa9NgDMgA1RnYsqsGUnMt8YaoiwkyqSfitiBRjmo5hdC7N2DhJVqJ64r7oR0BHKekMDclRtJRRAQRIjmMUZgZCA0VLfAJ1PVQLiICI0VHUWFVVEkyp6nAwWK1WZJETamDNAUx8ASDqk5KfbdOSnAb7JZopfWYtRkxfF9aLKwJIC7wokjFnGcXUAakCgCMDUNNoFgC8dIFXTEy6UmXaeDK4Oj2/P65mqPebcGF4eeW609az+hl1B/XZ7uUNpdWHs+b8qHaglLtFzJe+bIiwy6pBsYQLs4oq2IQ50hxgblYZ1UQdhSawcBxvT978adGdi/cQtVMBNTTKn7sqBw8LzEFrUGqn9fz0E9p7DaQxSn+zg40vf/n8wUcXfR4kjxqAHAN4KPXh8Y4CcHHus+ZhO/3Zv95/7ZXRV148+MWvc2ri6Yl89Jb//j+4oE3g67C1Pf/wl8V3/+DKd//Wx3/6w+LxveLqzeWdw8GsbuuaCTgfCnDhNwAgrpZxXkMGi9Eg140StzJY2Wo1wk3zPIDqwp4c85WfDV8ts0oC1KXmGQ7yQZiEP/4QT2nCZTGvR+fZxtNluXTbn97Brj7+4huyNe4ezAfXr/MXXy0+vNd9vBpc36wkU5+T1skMCxGU1aGhqjLkUQwoIvo0FCakAISGgA5MvS/asAIzQHP9/hIIyFQdAKbCDpQQRoOyXi5NzWVeNDp2Fg0RQA0JSBW4L5+VAAEZ17dzjJC8eUmYj6RgfXpYYtaBSyDjRFBPydupjiSipP8CSxAvS6V5OpKY1kCO0lch6De3zmEES5D1tJZXsJQVrGpJVum9V41gRo4BoGnb8WTy+TfeeO/2e2ezs+euXNWooRNRTaSw9BrKHGmXGNRgIo5cApiIiXMcRZLvKCahFWIUyb0XESA4evJYJXrH1tc/2BstTJHSBjOmqiEJSYk4mmqUJI/TZxv73sQNERQNSBGYGDCopvlUiAFQNGONSmG40eRbvEs8eSLnZ2EVINYDCS9vwv5o5sbFJ5PyI4ITKptQbDeDreH1V2/+9N23yo6G13Y2t7ccZ0rabQfdVtmk43b7Cew8OM/c5PrmlacHd9+DFqw1q4EaqspR7XeXKqciETFYvrsz42EAAmIO3gv7efvcWce7l177zcPqHLaWOKzQ43nMeXxw5/gteOtLV7/z9O4ZR+GtzTairaZ+dhKnhzQ7cuMS9vfg8JgJhIzUjLxCNJ+pmFdnKB0iOiZkgqgxOOLtwkO91IvbkI2jLyZg1Zs/y0bogLELih1xwfneTcZl56ETlNA+rUgdeCvFeaHtxm83MFpq2diEaLcw7WqhhiAYNGhi7EEdaie2cufu6SI2z2+E8TBUM7a4cpe++8FiG1QzP1n96pf+9ruF7q1msyh+87V/5PdegjNtjmf18fTyxisnODB7TBTAhEgdqUE0UEeKZNY/CEm+ZD7zyaQDhKGpc1/63BPAqgnpoDGhrJWNaX5CBmWWp14h3Zc96g4R0NAACB08M7r2wfJmRASOuJ/DAKTjFjvpcRlGagaomc8cZVXTMpIzYACNqo6M0AC434cZOaeqBmhqQOgdg+j+7l4j3dPpSUFORHuRJvXizDXpBogIJFIiNiJGjUTUQSRm71zbBVElJHYudl2fs9aLrQCQYtJJrWF8mC6/Hr6TbtXE20WXOVUlJBGpqirz3kMvXrGevJP6fE2L9P5DUJMCLhUEBCldCtCS9QkRzKVEZQDrF2lIwAhWjvImBANiNAAjdkCkombRMUft2JGBw867lsCRtuCQLm9dPTi7v7rAJ6vF5dXWeblzTGOCk6cqEbHlzeV83vr4nW+P3v9k+vZHjQwGK8kWNfuiOIj5LG4t2ohLpIpsprgkWiG1BA1kUtqALr39Xn7w0QKUzg7ohRsqlWXUNYvNV7548tbPi2a64NK/+Mru61+GYrdZzSH3FhtAbprz0eX95utfn7/5w3KwoYoAgWAllgFmAYNCHNbN8k/+1aZBtPDgf/zXw71rw1tvHP3sYOOr36njPCyI2vvz936CVXXtn/5X5wfN5u7e+Mtf5r39+9Wf2cnJ6Lt/gPuT9q0fZM15HQgAQGDAID5KPprzKtPM+4sajjrkuqFJVoqFf5u/8sbXLtwa05OlzA8OP1Z/VDs99yEfNDQGs4p25m6jwZ3TpuQsjAaBq3Bzc6MAKfIOyp2LV/I3313dvFBUuIo+eckQAUlQOnOM5kFMtSPUjJwgOI0CBsx5lAjWEbnUXyKSQcS1hn6tcuoZcoQKgNWyckDkegeR9bMiAwTXy/f6+jZB7vqMEDBKAipESwpVSLJNcegSxM0gkd0+cxInjQNhbztOox1mbkNrBs6xQwXoIz5FBCE5/G0NnbO0G7KYBA89SAQQmVM6uHYS0mcCgog6l61W9Wi08fqtz//2vffPZucvvfhSmrupKq1de6kvX9Ne+zpXUZmQ+0gp60ds6XQRtRLSQB4VCTDESACqhqhJo6pgEBXM0mtGui7p46K0SeyWfl+9GhMB+g7EXD9ck9h1Ctihc4auQwMxl7tIHAGgA6ZW6wzK7EF3/JOPdm7uTr5Y1B22hPPnyvzWteLmRGaH+YP3Nm9/sM/hZDJ94Zsvl1nB6DsRUY0Msi22SU/C9hS262JrBZOFDp8ezSkQrKybB8rRltaUfp5tkNYATQTuwIty6WsgH8x1jSEXc8lx+9LTbvT0fL70exVuUfDagFVdjvzWr96/VrxSlAWwi4W3M6FHx7Q4onoeGip+/1vLh/fQDoGUiIWQMbgOwTuB2EBARIKcDcAiMinQAIExmgQX+WxrmEXY/PU7ocBljIVB5EwBWZSf0u4Ll25++PC9r7/x3Y8++OTovB5wqTF48K2OZyGvqGh5fBbLcmky77B2WhnVqI1FUSKnIaqPgc6XppcG9Ny8Q78aUldNNnhyuXtKzOXynZ8Ujz4q2t2mmvLFW5Pr32Z19cOjcb733i/e/ej9N7/79X/M0HXQOQQDSzoATAcHyFQM0u4QUkGaikoip9KNN8ahS2kNRo4ckYgGEU5ZvKkvUxOT4WjkGNgziGlK/zEliMkISMCAhKimPf6GADV1yeuGeC2+FHBe+5VnOg7OokU1VCMHgKi9PARpLYeitDPudV09wKcj7FTm9TLPc0YnIsRsGpNDABDLoqyWKwB0jmKU1G1n7EWE2UeV3OehC21UZkaDaCqxAwRCwx50pVFBLTrX73EdEiKqJqa0pUrf9TEsCK5nc6lqxuyYEugnaVSpx14lrGef3AKYwmSod4ukmx9Bk0NCAYlSwFVaixsmAYMSEROooEjHzMkC7ZANTaR2WBA5AGHKgGJeZE0dMAyYSQJADDotXvvL0VtvTGd7o5NqOdRJE2jlxgfMC/CLhXzztefe/bj+b346v3Zh/xEpYFETV74cZXGGOJ+BrzKtDCrDc3IrhKXBUrF1jv3FNz/OP/plU2YaurA8oeU8Gnl0oathM29ffkm6MPmd7/Fwr1tF1zmfNaqdyxyAkkl9Ul/4ypdPqpPVe78cDTYlxmCKKKiBHSgEx3mOmcUOT++NTkzvf3D4kz8q3vjChb/13YP3D/LNsyHywePVi3/wvU8/vnvhuf3BtSv1VOPTj9p3f73/D/7pcv/y5NrF80/eg7P5+Pv/DADO2+PTv/rT3W99ZTn9RLfHhZ/MDt6NOy/Z4qjDkXSzZv/a69/81n6sb3/4QKcPBg/vPkeD6o3//CAWjfASxjHAyk0qnRj63ezcY+0x/voO/a0vuVd26XC++OTnxy9+9XNxEO9CdmUcQhUzTTkalhXUdWYdGKBEpcyZqEYkIHCMqFGCARoQE0m01F72YUXEYBJNIW0twNLACR1YTEoGAFByLgkdOo0WLW00TdP932dspUNJROQcqK7rbFW1Z7GZRJactpDU0wpgSmuGRnKWWD9f7EtTxy5GSRaAtOpCg8EgF5He2a9p39yDu1JJKhr7ix8pXaiqYASdxvSNadc64qZeZZx9/o03fnv79k/+6sc3Xnzp8pUrphEM1RSQikFRr1YA4MgBWXIaMiI513Wdpb2aKhMl2zVETUOBXqSGqKrJ55DMETHG/qWWduQQnxmsTXV90yegOyWqgammTBdRTbjcpNV26W0MApA5RTJAsEiASXRJkbBgzU8/OhrzaEMm4+lw55u7dmXQZPem27s4uVnf2i7eOfqD6y+XzN00tlojgIKi51AWU9s+4wt+tFM35YqG954udGG0ctCAi05rdQXihjstBgZjBInkg+UB8hJWAiwGXBRB4Bz8xb2bd05WZzo+s83ThvGktjm5KbJyWMrPfvKTf/TG36lkFZaVPnoATx4IN447nvD5j/48U6Bh6VZVpzUBBfBe1Zw5BRTxvuyMu7bymQuNmKNhztg2QkLHB76ee/DSVkuisiNhozYCsSPgg+XG7FAub9385UFVTl5WnXajkYZuEaDm8UKHUYdVW8gSZk9WNAWYqVsQLFErBVEjAi/qwnxVX2se3HztpdX4ajc9qNif799snsCglfjBj8vzB7QadVIVt74z2v9qmDXNrBrR1uq4Pbp/11vx85/8+bUbz+9f3OpghRgRDUARLV0DZpo2Ksnn4ygBLjIVBbCu06TPIHIGKgrFwNdtKxozRxrNJaEjwNn83NRCCBrFrM/RTWeeiaEPxlubECG5FiDFHkIiQZkZoFoaz3Zp1pWIs9KpmlDmEhIvofXSIU/zNDBDeJbEtzbrRCi8r6pqVS0zphT37YjWWCxb1Y33GQK1ofE+A0TpYui6PM9FAgGpChokUEASmLBjjWnMlfyKyUaZfOaZ56zrgqa5ccrzNVhHChr0uavppgREsNg7otNfR/ImppEz2NqngYD2WWAr9UnBzhGqQdQk+jCHaXOUZNOJG0SWvEiJ/0NEierliDOf6iyNbYIPmDaOHGK0leeAuuFp1m2V2+NZc7odTkS3TrQM1ACHQeH2L7z5cFUXebXMLl3de/ekPiwoEsQAXQVZHdxGNlx0sIBuHnWuWAHWBg1ADU7d6N6x+/itedFAaEaTcVU3yqROAsQMvZ6vLn/5D4xJY7Y6PUH2xBwjORV0ZFiKSu7z9qzd+VvfPc20+fVPM3LMm0S0hFOAkhRWeuoRFUmdESGbTbYnAu3DH72Zjzbyy5sP33xnu1mtHh+3v3m73PuHH73548996x+8/5c/v/HP/6vi5VfO374bdkrrxC5f9hcuAcBolZ9fvTl+41sPfvj0C//sP18cf1C9s3Xj1o2zf/OvNn05Ut25eLN+MvvFX/2AYYEOBXe2v/D1125dPXrv7JHs5UqV26q0yGVVwnRAwtSWoMWoeOfde3Wb/eG3bpRNu6oOLjhdtLu0QygxKkBQ8GADI1FQjGrZgCyqBec4Z3Zts8i8Q3QIZMpJ95ewEkhkILDOHExjriS4LH3RNCuTmOe5c1xXNfHaNN9Xlv2PNDvVNfQ8Bf2BJJs+AGB/O/Y0Oks1unOEmN4kvYyjL0bXRb5zjhwnf13XdQTkHMWoIpIszKHrNCboHkZTRFBYA9jNAEk1ppJdoxBiIhWyspoAGhCKqqgSYiMBFF597dYnd+48uH9vZ2fb+qPkoIurrnYAWea7KAm5kc5uBOtMKZmW1USjgqFzpkqAElWp12Rwb1sAMIkASRMKAGCgGtUgGSnTn+yaBWZgBsi9ulIjJK61M+06AzQiJWQwJiIG1MxIQQNhjg4RHJkStJxDW1WXvviCe2Eiy9ny3pmfnw+uTvTGC3jpueXmlVX5kV0ZfyzXB1oNxmc5b4XQOSJ0+XHcmsIYN6/86v2PaGdH96/W9x6Pwih0G/OTGZ6TU0p0Jgru6Xh3NRyfdGdjWo2gGkBQJCNC8EElDMbR3/jw9C76S2Cj12ljcmV7+9pzcIbF2fJ4Y/7Dv3zvrfLjN1672c0rtzfRWQ4SgmJmUg7H1qnGRrNciS0GnxXQ1F29Yucwz5ex4yhM1GjMM9+pLVU3qWA2JB0wgKyeste2DhmTgI4yJWyXwp/M2FXZ3am+cHFva1B0288FcgdnT8AB2vZCc6sxC8yzyMssBrRatYa4EFgBizMEwTitZnv7myN//eTd+5lXHWw9ubSjD7oihPb2jzbrpkWnZsUbfxv4xe7OyQe3f3n39kd//3v/Yn50hA384//1v/y3/8//03x59MLwQj1b+ZzRpfOQHgglgmj9cpAQLNEfpfM+F7VWVqDIjsDQgI2k7oIjSkpFcNTGiIAOgckBuigKiIjOFA3TQxt5wKqa0MqJWAdruysiOUYi7EKXEhwcZdE6hUiMacODvXBrrUdlUlMyUBVMegW37p9TmW/rNlFNydhnaIoZ6xpGRSnrCACdqaiBEKHrpRXmHLdt6xwZWCeC6yE1MqFaRq5LrT8hmhFTWhADQJTYxlTtptUXsOMYe6btsyFXmp5hMhWBEdFnFMn+NdfX+mmaDYhmz7bbhik1zda8Z0gtBSJSYnCmJXsSz+jafdz/4SMhQhQpSq8CQYWdVwve51V1Pt7YCV3XhEBWQi0k3Cm9erj/ttx9cjmDg4fsdhS5KoSH1NwPW7uj3Xzjp2+t6q7gXK1WDQi1DTzHYtnOEeYGDcASqQZogFrUFYBB+cE7wdUoJLDoVllz977/3BfyCxexni+9lIIxNlorsPeFA2miuswNBMki5RSETKAFouZ8sfvt35tfGK9++OeT5pgG48J5EommzKqCZC1BFNIQFCa7o69+3WOJTz51O5/b4N9SJfO//veXwZ389//d3mjj8Z//L7vZRj09PPy//HjypS+Ot16+U+ul7/+9498+AoCycK98/x9/8tMf7NCwmXHHL+1+5/LhJ29OYLIil413P3r/vWmoSxzwzgt08YXy+Rdl/9rd6dPJLm7Mxh+tJpuxLmjuqSmhLbXZgEYxcCwGeXns7Ce//Hg0Ht7Y237XskVcCRsWJCGSNyhIJJIH6pA9mZgvfNOsCLIoHRKpCqxbrDV/FJDQLLLjTkLK5zKNAMSEMWrVrEzFses66YIQUySTqGw9kl1V7NmdaWZmaeuE/RgYwBJSug8/S+2dKGQZA1gy0Kd9aJSU75LKfQRQ77OEoosaU+1sCl0nyeOXZJxt1yAgWcrV1GdyzrQU78F8/coFJca03WpMmFG1N0ZEESSnKgoUV/W1688z028/+ODKlStlOUyXNAFmzqGj2KkREqEhRTWQyOCkDxY2UGVikU7TNonIoiEhIUqShkAP84FeQAQpAQIBYu/aJlUhx6kOt/5Eg6m6tNpmiqBKqgBFookBGjMQI0Ukh8qOMgVTbZlzcEAEm5PNruke/Nu/9pNcN0X3VN+T8bUxjfKta3TlxVfmGy88HF/KpM6bnawcGQki5MX4uNuoYFCvyv/2T3/58tf2vjPxZeXlqY5kXC3mUBMogBgGhAB6rvMxb1y8URdqzXSEXQRQBScUVCc712fL4dN699p49L0v/d2q2qJ5Pj/pGFfh+OPLw8tDfHD747euXCo3dyZLN4aS4aQeMIhh19ZA0XcWXPABAFjjXMhl2RAlqjpyorYiLCArqG6J8VyikBtznitFGs2aJ3XQAkjUyLlgxmLggPl4gKXWXt+eN1eu7ZYevCPMrixDsCXDKmADbd3hykGd4XnUWYRK41IHlqnGsIiz0/Nia9TOs49wbzwY7509rulSWO4Wxyfw4Q9HmEdk5rH70tfRLk9m+vbb7997/z0P7qd/9kfo9OrlK5NSv/iVr8+XjwYDvwVF2wZVJdIkkiUAjQLU+3BtfZFplMl4fD4/FxUgUuv9eybmnZMYAXOyqCpIzlQdAgEVWYYIISpicIjdKlx57vKF3e3fvPdRXnokJVBSJHSKGiECKBCIIFKvNCQHMUZTJUVVAwQHZAYKKZoJGdFiB4hJxsBEGMVFioSApFEsJcEgqqNo5tKcB8HpWs5hvYWS2PU0LFDOfCvdGmAFLiWnQL/BRSQATSCOTqM56l91BmA9ZwN7OajCWiEVwbQTS0usnkNrzEwAKQ7ZMYGCqjrSTiM7R2k3vEZp9omF1tcUyaYVQQlAVMgIDIgoxohICRCdBC1ELlqSwaJDjJAckJ0xqBEAhdWKXa4hiIuIWV0vAFy1PAfwBDnFoDUrCFW6AaOL1eh+WT95eZN/s/Lgp4cRdnNu9Avb5f/9T5/M3bgYmSwCNsRdlEUHw6gFuZnGQLAQaH1oNAuqlRWcyZMZzGfio3WNN+usZtT5r//y8t/9L1akg86iI6cIHkE1RjAHBKgamEABFZ0aOYjmnOswnM233/hce+PK9Ad/7u58uGvOBgUQkUQTjVQrEyFlZRHufXr63/1rHBTFzuXFT98sQjvcv6wnPsqi8APtuvLgjlpWf3p7n0fn/+H4+Ox4JIBVV73zMwDIvvoq2MXzN995+V/+1+/8xU9vfvHaIUz9f/iZFFefapTFabb1wvbeC/HaNdjeBeeD0WxeH53EbmPvymjGXX2gk6GKt2ZghqxKqzwgAcxjPRCqaPvkOMygFRRrBy2UjhslVeUMgMg4LVsYDayVgOz6cY4CE4mBagTtEugCMFl+1UQkKiEkjgxAYmuYQ6cRHLkOokRxSmSUZVkTVhQBkNKIFhQEIXnueG2QJ8JOBIlClMSm7bou44yQYtS2DUn7J9olcfSz2BAi0vRdheTtJFDVqIB936kYNZ0cSMfPRM17Lyoa1czSDrvrAhilxt4516kQgqOEwlAx6j9MDQGiSGJF5kWxrKr9y8+xz+fzOaFlWZYXZaJhL+uaHPVrOOqlIAkR8Oy1ETSQ9mO7aMnYkWZSaBhRk8KiTwk3BSKOGr3jJnbc04LYOnHsDZ/1DEBIhhCjgmmalTEhELnUrlDCmyAQJWE5RUzWb7Uw8r7cG83uPZ1sjBaduYb8DFaq9ZFQ2cyncnDvbEjivxi//91vV48fbvNeBHC+e/PHP7l48wvF7qtPGzmGvYsP5g/8k6OP5tu0P13e2xs+d/T4wDVoLWFjqDjhyezRrFt2OrAO8zAaqgh5Z5JFq/fh6uHbn8AJTQ+qQzhohGjalLwFgYUvcqtXxvzh0eM7H793q3ydupWAK4kCBVIii6CoGZqwkpl2CMSQQ9eqmtMOoxp4uHSBzuaRnZrlDjpon9RgqH65sNx5cgICYGRYdCgqwMw6E9cQF8OizE8/mFYlX9m/OFCcjLYPjo6pYa0h6wha0lmEM3FLWE1XQxo5FedDtWr2dnIBf/5gtr09kGV5NHnBQ0mfPHb3flUCAyFsXYSXv+FwMxzOf/KbX4Ywv7TzytMnt1f6WOrsxkvfOzl9fOXqlbK8UVWHly5dfPjoUII4VFMxAIRIlIxAQJQCGPqYsCdPniCaI7feHmGfEm/g2QUVSSQwEwfg82Jez99+79dMPouaey8aofCns9mimlOOhMTAYoJMCgqqDA4BogBigrNTNOuiRFXsNHMce+KqAa41nEhgmhA5aXcSTZ1jTaYj6YjInimsJJ1CcKgOe3lh0lkjEBCICBFlzGCQZWxRowNIib+I2m+UkiEymcHX+u31YG79L/vNcyLRIVgURUQi0pS1kN5HIuCwC9ILO8C6IAjA7IIIgqmaEqioS3xpJpWY+awLktgdAOl7RohAhApKjuN6RmiqxNyF9tq1axf399+//du6WTnO0kxATck5FSWCFOEp0vliqEpg6ggaadffqgvSFFRQO6gkkDafG1w+mx1WeTeszRo/ny340lCO4U///eL3X7zw0w+qp7PIPoOg1ilVfqxKWd3NCVYKrXLdZIE0wEjoQa17jw4NKo5diGLea1QApMNPwuHHbu9G01WEAuoMc+Pgg6pj1RgZUIGYRLpILo/QovLQNIRP/+iPX/ru98v/zT+p7j58+uZfF4d3fTBgx3mOuJ1LE5xXZTegEbLrKDw89IhcbFbz1keJaqKdIxdZHWVDP5R8NJw18tY7Ozmf/uv/6XIZAKA+eXr8zrvXazv+f/3P+8uT6sPfCK/qwYV7xdZob2dyaa/Y26qZRU1XM85GnLv5vSc2HpSus0X45kZ1QsvfnGWIxk3NMWRRnDatBW+FukkGUno+edLmg9EihOU82/UCkIdcY2cEoI7JKQD4wocmqKppFGm8JyCVEAnJ0BloVAETA8VUZ/d8GzIDItRoaRRDzqmaI4oCohFNQ9cxoSBQmrIiEBgqRkBCiKpRlYkMKGqkXo6Q9I/YiRChSEzRgaICfdWpziWaL4hEADPqt6qwhrgamAAS9IYDUkDo71dzFFSiCDOrqWpcc6F7smwnHTmXRmLMHKKY6drmCAqfRRVnmfd5oaq7u7uz2ezs7HxV13sXdidbu2pGnAFAVGNm6q9vVUAjEBERSeFv6fQnR+LfoGmqU1CHikgGrKb9lM/YOYkxY04Ky051XA7rpgH3TKIJEBWSlZ/IASWOQkpEdewQ0dA5dIiuJxkhAaT1aCwm3h00dTX1uv0SSSd6t9Hdhm/VFDf4vZGeiS5Yf3q7vf6Ni364+e7hg+Ojk+/9ve/+1eHPXtorXxvv1E9O7bEuW4mbMHvn8PrG+M7ds/3JS2X3tJ41PKBYQ9ZAqBpy3BwHVyCURAUVxaiq5szs2MWi03viG1/NG/Urdedynm/yIs7mtNIm0+sv7X98eOfw5NHOQ3/j0t6ZNgJixKRdiMEhRnS5I8pcaCIiGghoJKDOJbmpyZMTBlAH1EXsiHKPpefc6+JEVSOhUzSIAVtAJnKAyhehPHhS+bGHusqQZU6fHD66uDuhvN5pR+cnVThtqvN5Dn4I2/NH9Ys7fnB5swndydN5Xc9WjTULzIrJoOQOMWvaZupldnf3+J1RPpLYdJdvZC++4Wk0vXP37vvvzqtHKvH7//CffXC7vvfpB1/73d8tCgrdUjXWq4DEt3/7blGUKbYgJX4oWIzqKEsH1RH3kWfOmVqSDjA5NUxkZgCIIiGjDKkzI2IFAcfVqrm8t/f04eFgY7K1u9upIBgjq+pKokM0NEXIKO9CQw6RSGJEdIl3nE6mA0JFVIgZGYCIOXYYEx5TGShqVFACkhhTeFFajPZZCwamZgSZc7GTVG6nlSkmlqwBaWp8+/GuRgUCNOha0aiOKIIJGFhEAyZS7DsASpyvRAXHtVy5XzX3BzFBf6xvsXWtWjGlSOTM1CKuR8Q90Kp/K6XsYTSNgEjRIpiZEpouqw4IEEgkEAI5J0HYMcQIACGs0tg9GcYYoFNd1stVs+pCwGgK4pAUMYokVKEqO0cACoiqHVFW1y1gIPKqHQCJCFG50hY7tY5WqgPwu7PN88nTAK1Ww0jYzuGlYvDee/XywH7nC+Ons9VPby+YCxaAszAcanUyx6aItRmQLiMEFZMz9XcO+fnlU+MmmBNQGnOoahQbCc5v/8etqzfaFZSUebZgHUQyz6aJewoxUegJcwJRUBH2xcl//IudO+8dHxwOv/nt8stfyv/Fv2jvP2p++xu9c2cwnxuhSkMloyOIhQ3HLYCtKlY0CSBVVMKihAZQldibgYY6NnWJI+JC0DjLBAoA8LM5VZVsE/32fjkptdSz4dj93t/d29vmTOIATuOSm07znEcv+k6mP/qTa5+7LBukzVlGIoPxXt58vlveOSVHlEk3tKBgwMAKBsFpRmpZJqQhVx8kii/UBY8ZRUMHSqBGgDF0nQGQZ5XO5wVBCGFF6AC6Z3b4BIkBjS6NMRUgLeyAABSRknlOJJKC5yyqJQlxSgtWAEVaZxH0o23VuO59EdHFqEQWEycSgB3F+FnSKHESSqOp9EVscjoQETsCVBGBRFNNLt71CP1ZXC/0s28VQXw22u3TNrGn1STsTJr7RBEldtCLSCIQQoQEpxaNJ9MpO05a0+3tHeZsWT04fHTg83xr+8KqDeQoFeJApFEBQUHFNB8Um0UxPZlmzL31Q9dpDGkkTyganREnoQcDATKgKigkDY16zqJEhyQSkz4cwMQMAJgpMeTTFpDJERElfFi/NiJHDEgKREAJXSuKSFYQd2GVF1Gq2ZYf3ZYWC/taXv5MZl7oS5b/cCX5JDtfnN79xYOXPn+rmp/94mc/+fY3/8HFva+t7rc2jnDGcM5tqLa+UJ4cTG9+/nk6l6PF3c9/+Wv3P7l9/OgUyYlJDMobmWZkQSk6XcTgOzav0g63J+3jpjms2RVwTr/96OGt53fOKnw6P7g8vLzKY4jL/d290YgW1dPTamtvNWGiDiWREzKCCApBxGdx0RCnSaVESOHoxGaSOe5Se6LeF6ptbOcuUlepA+LMB1IQh+hMQ56RRiDy/NWrt2D50cPHM97w5tSReiqreZ1vcVzFTR25FqAOXR2auhrHbIAeXch861xdnc/IeeKcWBxAs2hadpsPHm2fP8xw6EJVX7up+1/DoEcP7qymD7/29W/99Cd/ruGEvH75S19/9eatyVZZr6bOMaJIDOVgOBqWUVNPRqZsIKaaca6mmKLuDQCAHFlUcmQa1RQtPTRkaYFKLkrsTICyECV3ZBHYF8eLR1/9xtdOjqehEUeIyMaI4AwjRYwaFSJFIkCIiAToECQFICQKh4U+CREQKHRiCBq1yHzT1EQEaA6JKNP+SIOqUgKpA4qBARRF3olI6NKRKzx3EgEMkc0EIKEjnarheiC25sMqOuqisiOihLZIagng3j0FCXqbtj4O0SEKpdSi/6Q5NoPY+xWAmFQVAFOMBKChGlqfBpHIu0jARhFAknjKmWg07aNUE50HQX3mNUaN5jPfxI4JQXVQ5C++cOPho0dnsxlxpqreuenp9Gw6U1PnODED1BQBssx1XTQNTQzoWMRCEGa/OdlczOvQrpxTRANlAUUgVIzEVnsJ9fO0eTR8Gmnm5ruVuun9zld4bZD/3vPF229VL27758kdHAZl1hrgRIvxcDaN3JhoKCALK9kbubeegISQQy2hRXIeNExnpIIQIhbd0WN8crA5uVaHioBABSk3EyAEBQZDn8XQMrNTW2nMirw6uQe/+eXAj7NYL3/wx6t3f1V+5Tujm6+M//73dPEHq8Oz9sH77YMHOJvr4rQkR54JWMERoUQix8DaBQJbSXKwgXcADlQtBAQn3i9ECgIALTwTu0WTFbsndZgdrtptd2mGFs5DkcPI82gzFlmn8/Dp/aNf/cmNz1278NyVJ4/e9RmpzstYzBscDSLLmWta9iQFUAwcvEBwGTgB0sL76aKBohh97sbmyZMFOTZDyCx2iKDApmrsnFLi45LPcNXUzFkS2KqCofWZXIZJ3YiAxAQAznGCWBFCVDEi5gwkEjtQ7dbUNgJUMF274AzQAUaN3mcxSqLXIkKWcYxCmEDmiOgQwXvfhrC2NmCSO2ifswTPQh9SbolL1kfrnRTJ4qT9twAKfVODgM451diPttJtbP1OmYgAMaoiMpFLsypL0SzrGzxlJRZFEbVXmdVNMypH165fPz588OToCYAbjkYGFjshR6qaZb4TIYIsw6i6qhtiZ/2VC4hrbgElQqcRZw4QjRTVCB30xU7XG59TTgwiWJDAzGBAjtQUHTkkQmTnABLOkqjXsPZFBhFjDy6BZ7d/QvUJhRy75WpFDJUsX8GsCXLncX02dOPArI4LDcJA7uDNj27s3boML19y7z9+e75HVx49uK0XHVUwCpsnj+fVw7nWMD+Lk0b/0I8+ePDpKy9cPXp4UISJGMROnRiwmkesFMiMkZyTNlzYu9wcLfVYlTQXfzYP+aTxzUanI11NaTK2arpa6Buv3PjJW7+ow/T0WC7Pj1tSB33IlQOLDizGrPAxhKSzNzBCp2agIEmFrsB+IIYammLvctze7uoqHk8JRGNnoEiUUa5dRGKxwM0T+cpzr0z0wZ3DY819UbDFQASnT+Y5Y5aNfO0H7USq+fnT+fbV7dxr07R5IRsjXFSFQKMKVXPEtlFYHD94d0c0Mos8no1vwMVLI1/NHn+6ePLgK1//O0TB08n+K68VLrZNNRjQajV3CBoDOWEC0JZcLnGViseEhMTE4XNkpo5Sa5jSpVVVE3pGoiA5IIyqGXPbtoNBDipN6MhhgqsB2Hxxhl1sV40fDDuLYMbkMCoqdc4gJi00KGjThr3N3c3xxt2797PMq5pZCnJBcmxmJkroFExibGKD7NKg1fUXEjqiLkpfCwNCj6fEtmmAiDNO4uQs8yYBCI0AumTvS+AqQEdR1ukOpoacQgxpLZVKkYIaI6CLDtDAqTlKpD4ANOldv333m37Wv2EQgJxG7TkDiAm29yxVLYEEiNIrMvUAlORmMRqRUzIm14XOUvYDUdeFtAmSKAyIBlE1gpWbo6vuWr1aoREQqkqKOE31RoyRHav1XQ2YpfdLfAbU1K5tmr29HRGcns7MYjQlMfA5dC0DkFgkgxN5Ebdu7y6yuXrQw4+6wQR3WN76iyPYHwcOW1n+oKmdJ6fkCOqm9cwhJxcsRCyYPpoXhwtmH0HzSEULAuQBagUFLP1gqPXZ2Vu/3Pre82RsUZCGmc9CqMiMHUvXkKBnp7FTX+TG2Yif/vsfTkAabQCsKDbo9FH97/6H6heXNm+90V27UTx/dePmxRBimC7k4b3qySHdu0eLp1mXRfZQeFNmVCCALIsdSFDKGofUKYOBy8hARDgGAoAcPdZzvHrlnmg9n+sIRpf3i3yrVXFBwuy4fvA0Vqc6P8j09JWvfWnv5kvtyYM86wYqeZ4XZdGsThS4VDMS19RZnSLVo1ARBBoEBoK40Xqu2D097tiJc4OOY4rRQHIRBV1CU6CpgWHTtmugRMKwOwMzcmCRiACYehoLIRh+hnUB51hiJEJFlCCRjJkTGDECgEJy8gKAUwAyBErK6iQKBgAVeXYZJt+tGQSRRNYAxbTWTcAZ6O286/sjwSoI+/ThZ5udlEhqqexP9W5a9xgRRYngqAe+ijA757DrhIiYMk1KUktMmiRkhD4XGyyBHELdIiECsXNBwrAcXr1+49NPP737yZ3JZHzh4sXxeDMaAHIEI0dd1yGYIxSVzGemEEWwf2lYrwJBQEBSEzRAY6AMMP0pGSo7UgN2qOl9CwbsCNEQiZB7S5dDROov4GSYRqIE+eqb/l6bjsnDbWZKBI6AHY1yRqs90B2Ir3TWmD9ZwWWfTRv+5YnCpodKs9Hw41/c+cIrhy/euHytvHH4m/t7V16oVtv1wRKk2MWt+09Op5/Oro+u5TOeasQ4/fpUPri25YoOoNIVO8y0UZc7IAMyZBch9tyVGbePGz73YIgRqmU4Hp9s7Gfzehm2HUw/AM/gRhcu7XmKT+4duJeenzAMWo3Okwtq6eoEBPDMrYghKDEGIda0maDOhJR9LqGybDB45Vbx+isuH43FwuJU2647etzd/RSbeeDOZVmOYGoMM13N6hvD/c1Low/vHc2Pm2LgHTIChCAriiVk4SzYfJlJDRHQCgTppC5KKQpogkIMLiP0q737t0dFmFUzAN9dvolXXh3lBjK99/Cnb7z2u8D1wcEHNB7ffOVm3S68e8Z7SodINYKxG/gNVVQLAMSOYwxJ3mcp/M/6AU+MkZxLSh9VI2IzExEi14TgiJoQGIgALRoQdQpOusl4MhgMYvKYm2XkQIEcq1PSgOzVQDphhu3t8dHjo5Pjk6wsAEjX2l2NQuAQAJmlDZD0B0Ca5NFE6YQnGSQ8AzSm9bsCRXCO1WGIMWcvIvWqSfsDjdbLqA0YHNB6dgw9VwtMzNAhRTUDIwDXT5YJnWOIaYJmYJSWTzGVpp/BrC1dpQAIiVuvLjl6nUtuCugdR8kCqCkxKdXyiWxL1puOXDItkQGBqjnn1CxjjhLZcXpdmUXmTCXevfPp9vZ2PiiaVQsAncTNzXE5KI+Pn/Q0hlQ8ApopORLpUvk1Hm145+eLajQqr127ev/eASKU5aCqGgPDqITkOOuciZnKanNR7D0nD7erCWUHoF+9yh/cX+7fGqPIAfFRW+XXBiqrXGD3Zvn0ZFYouQazJkMEWfBH07FxiG6z1u0CH3aW5yDkSLRVy5pamMf14aP53duDG78Tl1Ok/Ojg7oUr2wMa102D3oOokCJDWK1G26PpR2/nh3eZfYzkHGJoALPSe52frH78Zx2XUo7q6y/5F27g1avjL94qsze0puZkCp982Bzct7NFMTvrUBgzcnkOrNsjqaoIESgCO4dBbZBT6JoIABZX9edvHu/sre79crjpW1d1h/cO/91/X+vKe6aR0oBxZ3jx1ku7r33fNeft9CTCiimqxHxnRzdL/XTaKbRUDtkErcNVptRgbDkLmrMfBSy6bKelUVPDvMv2ymFolwQZA6iARHWRVVVFo8QMXDKWAqiqIEQAwTSsIQPt0RKOEl1d0TlLgUaaInXNIcYY2TlLpSlRTMho6+NSKNWSZADIzpHDEAK5VJ5DqtcRktk4Ha7eRISIhJgWzGnyjCmYQWMaIUGfvWLWY61MwHolIwIZGBhp/yGQ5uYKRiTJTI/AjkTk4sWLT5+epsp1rQthSvkN/YVOaeMTYzQzn2UxJtckFUUeQmdA16+/cHR4eHR0WC+ray/cGG9ui/bwSe+9aRSJnrMgYp8lflpvykBESO8scIBExI6dS4LVnvPVB6llLum7iQk1pcekEDXHzqXkt8T7XH8JAOjjxgEAgdflBfTmCFMidBYUhE0i1S/CmLEplT6HiFWx28j71GmIec6wghbl//N/+x9eff2N3YuXpw+rKxPaLfbDofiSttz+vdkdmnXN0yZcar4+Lib18rGn39z7tclS2ACIMA+tPf/cSw/uPTRkRSQ0cARCOKPlwwqXmHkTi6TVB4/vfOfahGgBfrMoJ6HrQntWbk32diePjw9OZqN7Jl/oYJWJRgcGSkoKkbSpV0lraBIQQVQdkRAwJDRshO2d8ubr5eXnO88A4EiKre3IhbtwkTcmzcN7eVPHxblqQCSOtfi8qE6bzWL05etXHj6ZPXqyaE2HOUPIquVqvMOjDSBfVnPa2MjFBMggQsG0PfHHJ03AjvOhOzsI9cHUec9++drflnKXuqWYLo5OVudPeTQ+fnzvgw/e+dY3vqsyZ+vAmDhb63EdQOp7cu93z84b9gxAZkrEnTSOUGPLro/8Y3YJpKlr718Xghl47zXFI5mRohICZ6hiRhlRxtQEeevtt69efaFeNebQCIidM3RRgVzbCRADqvf+jVuv3bt3UDdhNp0SM2NyxIM5EgBVAVVylMjLIFr4vItinVDG0RSJYnKvmxIiJJg7OUKQKIqEhlEEVZVsjQpRcAhqFi3hajUasQPt04cSN0pUyRJJAzoTA2RHjiiaAhoZYR+gaIAY0VKsyXoF3I/7DMw5F9OlC32lkMp/wl6LmiRRoj1N8zONaIInmJqBdtpzuhDRjJlVIxISUQOCSg6ADBez8/l0lkQyhpixWy3rZVWxz5BYRATUIwEpAIqIGWaZHxUlAGo05kFtkzf5AAEAAElEQVS1qH/72ztnZ+fODaQjAK/GSMzZCGnIKQK+RHXxxXa4eF7CeRZ99nTo8Ird+mLxybxersKXL05+/Ms5+YLirB43mFNnhVWhXWkWmwcysYLyXa013mmvf7m638Q2UI3dANmDmoJXpU3i85//vNz/XKTxqPTNcvrH/48//tZ3//ELr7x8OpsVVITYsGfPJnHRvP3mnnJDELF2kgXGiOZD49hxURbgrV6s3v+5vftm3Jgstvf8pcv+2hujy7v8+78zgN9r5oGPTsJsEU+P6scPNbSUqztr2BRVrAtgWY1VcBv+4mUAuHtjvPrS6/mf//WYQ4Qy37lCV/fdBR6NB+X+RR0x7xW0PR5ysMWT8/d+c+lLr02nC2ZUcBqa5ryNCI5CB76LiNG1EiHDgD42RISNuRqhXlZWFgCDvZ3OZtE57lpjkaBE5ihEU5RWSJ0D10jjfUJLYkx5eX1qR3rO0gq0lxqk0Skk92y6mcxiDJ2pYQ+4sNQsq6a8aNdnLPUXTNoBJ0Le+mY1S85FU7KU3dWb0dPwumdPJ3N/ooiqCkSHRI5MDdccqNT6rtM9+wu65032fXPsIZpEGo1UPWdPjo4IeT26056zldzDBtgzvxIfnQiQkMhBQkrHGPPcS1QwuPbCC1nGDw8e3Pn4o+svvHj5uatBuhA6JDRAcq6T6JKMgxAJnBE5p5L4Nwqmxi4zJHKpoHgWyN1zN5N0hRhEMXNk2Cuc17ou6sMhWdcssBQ7s56XISggkK6v+1QaqUUjyQYQpS6w/DDOMANTwUgclt/LL95myZC0kes3X3p4cqd52v3qP/wy6duOPn14fHB6Y+/K9//wD7/x+jee3L5bT2OR42J1rJubDuTd+eEnJ1PPpUhd+BFCDF1z/94HnvMQOzNABRXgzJO0zfxsNCol1Hnmspzmp0eoq/Fk8uDw6NqukJ/gxjZIfe3i+OCpZo3w6y+c3H04XFXGDM6RdurAxWhM2onLUvKvi6pOAYgFIkft0Iavvl6++LKomqAJCBEq+DZApvnVK/7SvoK0Dx6sbn/g64rffPOD37n52mRcns0qdPzixt7FbHz38cl8LgzgyKrVtCwGqtrJKi+cQgAUJBBrfKm+ILUgnY1PHgwsOon1lVcXFHn2Cbtx5/mkOjRo7r77w2j0rW98hzwHCcgjoFQd+CiUAmM1ErEncERjBI0myfrtOBXRZkYIChC67llMN5qSdGH/4v6iqpqmISICTGxIRSAJoWs4HyloVEmF5/l8lhdlxhyTspdQ0DRi8iP6jKtq+dY7v/HsRaKYQh+aBABAgGiaERtR6m3VzAiDCDKhcUocFQVCAlACQqIYlSD5+YAzNqSo4pCMkQiDCBMxUTRDtMwlUxMCwToOAQGAiRQgI5YokcwRopFDUtM6NOkSJwBMUWhgz2zM/QgK+mYkeXvjWiCKCFE18wyAnXRkkHnvyK2aVQoZ70UrquAIkTy4TjqkpDJdK71VnXMhhFRzqEGhpMlqhaAEyCCdEJP1UhYdjUbRtG1C31OsB1lFWYqI52I8Hk+n56PRpmM9n1dNV483L9SrUNXBuRLJo7GTYuWBM8AcY5llA8As7G/EO96VQz1g83vFYhI+OKheeW3H73V6bEyQo9M9BwFiW4NXUM7qURaCIWNFxbI72Jrc2h3StCv2b3YH91QaQ8dITtGYxkGqH//Fxvf/xbyZHh4do7q/+uH/2xf/fP/ajWpWMxWhafy4rB98Wjw9gbyE2AAVqopCnhmZLJBx07I5HuRUIg2GGlb377gnR+E37zai8erN7MLlwfNXsouX/Q0P/gtjEKk9TrumPrPpI5mfc6ftuCzBZ5c+99sHTwDg8VZY+kfXw4k6ERc2b9zafONGw4FHHDaUcw55LKopFlx/+OHw8R15edtMCDFqg5ppi6YQgSLBMvelaa5b2lXQOWGqkQSEoKQiF/Ir4Hm9O6bjuDKKTrVwQagxUdNgBKQhNo0yk2owDQjRenyGGqCaWG/zcdCDk02jOmaRvv+LEiGpDp8BZQEVCQAlmc8VI5hZRERjTjf7Wv3UD42xD43rCane51EjAYhID680c85BKigVHDtLuUmg6DwQaDRK4yJz2C+DMPWWCsBglMSSEgkpweCeUeEMgLNMY4/WInAKBpj8P0SOPPuomoCVMaYYGHDOxxhTzCIgZI5TGNTzN27kZXnv7p2PP/pAu27v8pWMuYsCSICYec6IASxITJC/XiVFCOZcmjyQ6/E7AOQIkNSM1CJixs7ITC0r8whGBgl3RQkI4Ny6fXeoCkCpIOnlcIl/3VfxKegMEq4LTLVjFYEMeUSDBligoHZ7sL2h7nZYxk65y6LTe++8B5lnlxEyOdc0zdnd2fZ49/2f/pqb7NLF57ZhL9RPvWfm7mSD/82nn96VxgGqth5908x9MRyN8tBJpMYpatrCEfuCzVZEQZU8q8QWkATCww/ev3Dri81kCBBdc+xo0sT5C8+/+NZv78yqw9n57nhzsrlYivfMrm2WGM2cU+mIfW9PVWPypsCjUVjVuqrzS7vF7iUQ5eSTyCiaU+6CEkFmBTIAAPBLJZWT5Z3b3IT6R++89bnn967vb6v4KjTeu5tX96dPpwenx02MYd417dx7tzECnwmYAlmERiTmGe5v6j3Ns+ODvDtBgNV452R7i5qKfekpoiMnDVhTbm68/MpXuthoFKbSqycogsNScyLtiNm8oag6pjJz82gVAAMAgSCEDlpQDVEcYxREIItJl0uGEkXyghyPDh7OB3mZ0g9CB7Fdbm6Uk+39k+kZZT4E8X7wzW98893bd1ChkcBGJgoMYNjnIiCDAnO+rJu5rQDBY5ZGTBKVmNgRAieKlQIQkkMVNSLUtHoRBUfckwZA0CglSSARQkakouSoA1OQNLZy1GeUO0eaTI2oPR4vuTTUkHpyVsbkOevazoDAorlkxiUw9bnvuk5RzcyiEVFKRCBDMTUAUiNGSSM2UQRKLxKHBArOoSIBUQgtITqiqAZIQSKnrhbQTAUBmVSNkMwM2SWABliyaScqIIFLRZX2rA1CcqwaVS1zJADSdV3ogNCjE+wX1DHqoPDeFdWyevLkBJGn0zNVJCq8Y7QMBBx5xAKV1ViHvhiQ5qIeqOxar9lQtgZ1OdhsBhEzmIO80xjc8G/NToslw+WsjW3uHOxH1yg2RBsZ1SZjisFTmHWFZ+cC0nR8YburJJwJej/Z6+ZzJQHVTnVQjMLxrP35/5de/cbh/YecjTyFd3/+Q8/lxu52rGrPPppWv/irXZfVMZSaA7kAjQAxe2XurMmMRVvUgpQ6O+bBOH/jy/dGI6ltl4fl9BG9//PFL3/uBiMpME62y/0XaPtStr1Z7OzC1SucqdUrmbYhhieL1dRVAPB0ty5h1F3ZcyfHS/bFxlazmMIokxAoeMkjW3RoJcMsnBeTjfrBnfLabtRGAfMi19Bi5jCyaRI+Fx0JEEkxFvJGg86NwEZCeUcMmtVQlwEBEESggRjBgKAzVLNIaISYamhFMIWIFskpOLAYU9ingYp2jC6xDOBv/EgyRgWIqs6lIEIAAHZsIqSqjiANTYiT+jBlcZoBcg+QwYwgmgMEAkeuW3t1NCqnVXRKCAUlRxBVzUgVwDw77/OmacAUASjL0hWeqNdpzMtpvQRghrpeEhMgoUsihlT+AhqzpUBxRWByKnL5ytXj46dgyhlCTOg8RFJH/T2XUbauZdOMHaOaGly5ctURfXLnzp1PP2lCc+XadSansQPnRLEoPHTBMRGRigJBNsjMDMFSYmmPc8d+5EAAjC6yZoCUblHG9IvT3U+YZm2UxFVJtSXgCCGicnIFExBA7BMonCkCOXSkahbBDBShjpDBUNRuUTmiUDt3Fpe3mzDIxpmpqBRQrFqXeSyGWbVcKUTibHH3ZOu1C1ks3vrhz5HArPn866/feOnl4yf3h6TvNDWXBYaVYJCUPqmhsw7AUCkFqjsE0booR/XilKSW/x9X//5k2XXdd4Lftfba+5z7yJuPysp6oFAoFEAABEGQoiiKoiialmVZlt3yo3tmuifGHv8wPTF/wPwpE+GZXzwRPdMdPW7HtEMt2zRFqyValiWKoviEQBAoFgqFemRlZd28eR/n7rP2WvPDPlmkphCBQKCybt6bdc7Z6/H9fr5qklLWjKIs7f3549dFN+jDODSYFN1uzhfU+N509mhxhrWu9g7LdO3dMueNUwiBOApCMuRgUKcUg5VsATx/KhLwxifaV17nprVhEksgAyG4EKEQkTkVM1UwRjeuTo4OJet8p23vfHjv+OTJyy9evrS/l/tMfXdwMJ7uHNy7//h82VVV8GjUSYymGottAxOTWZFJutTDnmbHVhG3BzcY61ZEbdsVt+748MbNG7dePNy/tFkvhNogCSEppimIuCiI0ASNRgUhUBGSCEy1LxzZfPupT775wZ33FvPHu7M9Zl0uz5gj0NdIH1hxRjNu793/mJ3GbdtbIXPAGsju4dG6W3KVIWiZjMavfemL6z6HRrIVQwGJO6DWq4pINROTE4BGGmJs+2zuHHi73V462GdguTwnGTjGILvQDQ5yDXeXNvnghWUHAlVrESewm/VW4F7xrRVAXdWZdR9UtDAPo6iq7nD3wIEAllBKYcK260VCO0rdpuNQKysGWdU+UAVSgoi4ajutHm30/LlWLRSDzxeEEMXq0qmoAbE4hVAIqIqbeg3BLnRYdQMUquDCzYpb4AAfgNLVlW/u5p4kAjTMEyt8g5hgbh4Cd10nIbAEVeMUq+MsBDFDTIFQgxAFoCDRLXBoHcFgwi1zayRG6KUvVHYOdlTKNnSyKz6LvKeNaZlSDCWQLvv1NOneeJ26TCwFrpt+h7WfWh5NViTNsvz4oTyZTWO3kJwLzjXtPlrs39h++OysixF5s9BkHFqIcd+WruxND8/f/fG1o2u//Y//29//vX/5v/lv/uk3//g/Lp4tZodX20laBX/ynX8zPn1qEW1pOoDNG5EG7Uo7qAjIC7FTm4qFwC+9cXcyfu/+cb5/Z7lYQuLe/tH09pW3r15LX/t6swSO/8re/1ZvpQNsPKO0Q9nXauW1NzeHt8bp8NbkKoDzH783ux2f/sLV0e8/OLh6W3YmWlYjSZrMyDrhlHgawvrk/vT8STObtoe7fa9W8gjUJDrvMpmTkzKrERuyw503uu2l2fahiHekytJDM2P+WG/uhPOsvVFQUFf/QIRZ6ZU9EFWkRjHLZj2zufVDjBsK4HALIUgMXpiFCVzcihuHin0Ah1DMS7FhsgUnJpZACI76/6tXjivwVaLUVGEqDrg5JIT6agQQR4NRRbQSEYe60h1KWBluSTNvm2Y8mWy33Xg87fsMQCSKBKCCKQf9lF+km1T8HRNfcOLIQiAf5C0Y6PWwoSPn1XIVRYhQjQAVyMUcq/Qp8sCwqPdscSNitmJmWfXajRsxpffe+/H9u3e3XXfj5u1mMtZeo4SuW1MIjUQAlORirk9MsGIpJvqZlmzQZxJRhWoEErqAHYFAg/osXEzPng8VqKYRRpPatrDDwEHF2GqsIzi6k8ELeQFni7YuyjKy9s/zximhZ5h+Me1wxp9yMUA5R05d11vu6qMQUSz3+dlJimZJA1Pf5eX8wUu3f/Xl25f/8A/+jbGYJlUVRE4oKK4ds5BEq2IY0lyKg8bT9OzkoSHPprPl+QL9ljmA08nZ/IM7765zf++nZydPni66pZArmTSzyM3jJ6cPl8vQ6W2ZLqcii3MJjebOcicSHUVYTXsAnEtJk/bNT40+8YqMxqVoVQ9UaR4GCR/F4r2pEUhYS5EQMG7l9tXpxw/mltrTfrv+6enRyezF69dGI9F8ysw3b+x//GCV1Rebs6OdxNgaigeiIeSuALq7l9b3MnxbZger3ZltszK4BaCRR01sYmzXmZs4S3GqHkKYRE1d0TaMuVFuUaggWODkIE6tryR0owDJGr/3vQ+uXT/sNKckQCFbMXunOUWpUkkzywamwMO96MQE5UnbjsfjJ6fHq3XXpJaAYvbOO++8/QufzblrWFg4ZyVmL6VNTVYFU41ooAHZU3tHhUttcF977bV333mnU4UICMKh1MRAJi0GwIvVOXNgAZOX0ucegEQhYitm5AzmUqqWX9kcqAnb7ia1c3YTcK4xn6AwmHyo7quYqNdei9aNeAjDOpwJxRXV4F/FoVbxTDbIroYT+CIwYfi6encS4OQcCEXARAkoaiwEQjEQU6BhZ+wDnICoclZtcDOz06Ctxs/x3wfIwKBQg5mIlDpXF5EQOAgF7t2YmZlhHFNcLTeT6cwMZuQIRCnG8XqVu24rMlKFbnM7asLYaUqjvdEaKxOfHk110usEcUY7rW6szHQlWI1HvVg38m2hfpZoU/rO9QUuS/jWd6424+/JlZPReFK0K2xb9BZB+dH168vzd4KvwYHJkxOgCkTlsvVuqTu89/ibf7r7m3/vv/yn/9f1uvutv/+P/uPXf+/b3/rGb//O/250+cqTdccQ86YnNE0AQp+JqDRxQk7WbePlq4+vX/3Jw4/54PCjZ+d+fP+zn3p9vti8+6PvAfnk8U9feeVK+90/YWyRSE0g1GCGvo/r7XbT5Zdekzc/T2ncdqxdTqMdALu5XT98IrcvnVyfXX/hZeLCbbP1QgzisDdu1j/5waYBn96Vh+9PXv7Vbpy2eT0mQWINoiQIqQcDnBkBQT0TWOKsQ9pyazzuMIWMnWddHnebcR4tLbvkAHXz4GroMjKxs6ub93CDq5oSw1BQClNxKACqQiSjbrsVZtdayFUGLWOQD3q4mJqiSq6Y6nTX3GuaUrWhGtw9oDgzilrbNLX6DGCrrtQqGveKSqWBzg5U3GolqDORajErDiwWi7p8aZpGVauNCsDFkV81RlXzSBWNWRFRz2+uv3baOYYDmODGxbRpG6ZantalthM8iJgbUwBVfxS5eSBo0fqptVCvenB4+GZK77/37sNHD3qzF2/d3pnOuBiYLwbDCIFDEIeBWJir/wuoxLpaN4CYQLXEHxRWldsz2LZqBY6Lm3g4fwlS5+9s7ObgAiawgC2Zm7sQYEYVCFfMJLspNW2LdveK+lvcftiXI9nZWP8d1iY0XqQvCJ6bJG5MIQRVN4ZDV95O95aLtTFc2p9+/Oxbf/IdIH/3uz8WbqmoSEJRkDhlh3IT1LvcdQk1TBrm1rSmecmiy+WJEyz0bRPUt53hWz/4PqDmFtpJM94hI2IOSBzLdqUxjLcHh7x3Nd6/Cw1baLhxI0K2J49ZtwgkVDKUd9p089boU68jtdZnv1A61J8bXdjHi9WLFXAPIXCKqkXefuvmJ1+/+uOf3P/4eGHSPJovzpbnV67uXzvYd+4067Vrk7v3H5vZzjRB3aneQwpWNqhmHsVwadIefeoML6qqc+cci1m0tEWUNgntAO6a83qV0r6K8V7am+3xNCkyxFNDcHYTAEF4mmcnj5YxRXKNmKxWajZ+ejqXUByxbdP1wxt3776XYgIyMbUcVLWQOSEQGORRzrv1/ON5MxrVTZMDWfv5fC6BS9bp3oxbOV6epNS6+eH+wYOHj6jeRYMp3upyKIVYVFOKy+X5D3/0w7oUwfMNipVaqA5Gw+rDczI3MxTV2y/dUtV79+9pS9HB4GAwJmVrjATkRkSkfc8iFHjbbyWEDC82kKrUigM51zJczI0Kxdi6W6+5adgLAomRqZchhAhkgFtx+HPBZX2D7j6kRFzkO2ruqZblNa3JrTgyLLUpBek2XWAmYVMjZmEGQ0T6XgeF5HPNM0AcHG4EY5ZKoq8BFFQH3XBiRZ1tYBhzOZjRMhdHYAYH7fvK2eEQVWtrLW4UYxOCaB/G4+mlg+uny3lp1VvQCKlNcTflNpdJwQG1h/FGfoQlZtwlO2+tb0oXad56wJamPa/s/JKVgwTn1cf52mq9PEzd+d44EOvTjKOxHOvZfjq5cv2Fxx8+420kyURiEHds2Jn5pRtrk/To3tM//g8Hv74vh0e/+y//xfHxXaD/+r//1//wH/6Tl/7mf3188PXuT/54h9uSLgV1o4WUCQVSzb5/efv2L/zZ976/fnr8mUtHv7J3sJl98vHDu+/9+D0TamdXv/rlt67/6Hv50SnaqEXFI3elt3WY7T177Va++UY3mlG39q5jG8uozU/PADQTHH9mmtv+6POvTOK0Ey9ikhiMME6bxVN/dmKUJxGTGy/jxWv56SNi7okji3u72c4hqRTuiKVQD46cYsCzLBmzjUwzZM3tmuIGaazlzQPo8szXHLeBmZfrpXRgE+sdhd0UsKJboA8SimWqYr0QSp9rBEglr1aYg5mb60BXAvvPwsqqXbjynKt7LlDNiA8h1EMjkJZCxlXmq9xLEi0aWAJL6XMQGZDm7ubOHGpyyQWAsqJYmQEJzEkAELiUIemobVsAgS8GzRfo9eHJ6oM4ETU4/OLM5Z87gCv0HQCIJAS7iG0SYcAlxsDSax5ep86HmWsUipbSpJSzqmo93ovZdGf22iffunfnvUcPH3Rdd/v2a4eXDsyMzEMciBnEqHCMgbrFIIcPvkiqxqd64F58pkEUAoQB7F5NjARHAJwuEB/sbmJsTBacGebZC3sEBMzOyQAHq1l2FTBUdAXddEcN/2mnv9YevN8tf2KhGe+VbXHqPZqxKCyKuBqPqZQC4vny7PDFF4BU5QOc+Aff/eGmW6Q0UjBUwb1EKW5wBiNvO0XGsBRQNZtOp0U32/U8paR5wxJrcI1QHxiBxyGYVU0MWrCQGrvkAHGxhc0P2slnP3O+6drA8vLVcPlI4qRZnbFlaNbVIo0kzXYsMEjQZwsBZfCLD+k+F1F6hS8soDUcWi2A5cfvfGe2c/gLb9761Gv9u+9/+ODpsgPde7RZLufXD2d7swj4jav7x08eNdypkou5O6gIwYMJBzPdfenG+t5Hz86e0f7+pLTZR0khkwNxDfc+omffCedLdXNv+5BCY7HdX77ycvrE9fbVV+Vo19Yr3lovOZIhTFod45yzZ5qybXSxVEds5YCCKi0MoVsPKUBgtqLmAgjB3HIBSMiBOE4NkjBv82DMZ+ZGmur2O5vPNXiSaFYYuPfTuyElN7AwCG5eFc5mBlUCXI2DdEWZmSFspKbEw0C3ig4GZkUIpSZmB9Lett22154poBhVgiyTg50qRZJL1S6IEHOvGaACa1JiveDwVEUKc5Dg1bvP3Gtv5iGKllJZ8FxrB2ZzB5woeKVJBzb3C7MeUUVNDRw61CN5mE9XXiQzuSeO2qubsXAVk1Q2Ci7MG/X2JKIQ2Kx+++FB417tkQgh1CVWZVISOEZR1zoPZGaA64SQQG1Kqr2qEzEHcSNiZ6LJZG+7VS01ZzKaUpvGB0cHK16voXFHSiztpTbtp963zWHUHWvDs6vtCgsn70ayGut27OsRrZq46dys+ITpitBB2NzRaw+enR+xrmUmy/liclh658U5H05UcOfohaPlfZ62nrtoEd73hVJUW+Z05fr0M597+N/9i6N1fvb1/++N//qfXrvx2vGDexyar/6Nv7NNTTrfvvjl3z7euz3/g39zsHhGvBsx8xRL96z50lf/gtNPvvlNy+u3PveVz4+nD7rFN7/7J96t02z2+TfePFg8G//Jn+lm6WmKbUYTxcpaePrFX5/ffHV5vkHuabkoHJgTc0JKerYEEO7dkS99Yu7a3Rxtzp3dpBWIEgvD+uVy/61XJpbT8vHs4FbWrnCQNuWzdbs3UcPWOEKWhm1JykFK7NmRy5rHLnHDaYux6qQLzVnX3ECen29m2yhZbZV1G8ZplhdrzSolJUjG1tEzk7v5IGM0JmeG8UUbC6u4m3rc1rkrEXFA1kLCQqEGIkgIzAQaMEu14A1ciajVLcfONQ/NRYLDhISJmZmpqS1dXRLVHYwRS+KqjoYZD4ciRAZXPBMzk5u3bauq5sbhwiaMCrfC4F8iZmZDqdrl581vpck+/yO1u2eCG0vAAMOq7K2qyhweU/J85utmFDgxq2o1+3qNTi1eXPcms/a1TzGFR48+fu8nP+z629ev3Qgc+qKJYqhlbAi1/KUBJFkno3xRGVdpdwCAAVcy3ORDesxFFVEZ20MNwaFmzEQjo5iZyagFFyJCMASYoGrBXahAQ7tg6andCS2rEOHjAmrHX+BwXNYP9kPLjQbqexU2tRwSB4muxc3AlBv1MUIQ3chXLh28MT341w/eP+7WomsSlD4omwSAk0EBieYgLyDmaMVSmhYNwmNAApubJZFVp5JCoCYE8dIRRcCttOTB2c04QGTcLBfLH79/9xc//2uXf/U3bbuR/R1zFVc5uGrYEjjZFkzqvfSaVJVhhiGiYrBiDs0JmMRgDpFAEAe0KDPJ0wf3H4UHT+ePLu3uvfna1Rc33d2PHj05XZ6v7c79ZzevX7160ETaXD3aMVqGIJHYYYkJ1m/JCMTuxjHdvHnlaTmdW6EYgm5pgp/8eHb6wRhJQ6uzW5NrL6edkTEY8255TGffX3/3O+370/i5z7Wf+6LIqCwX7MHZsANri4NtC3LmwN6ZK5dSEEbL1flysU6pBZt5TmG07TccmAwcBFz54W7mgQMMkbkmBnAINeKAAhNzkDqvtWImbSrVrGMOK4QBYY6admsQDlY5knAza1ioohmdilbYzWCPqwnEVYXRxvT09KmqSoxsBmKDMXMAAqCosd2AKQBo9a0zmZdOjUFMF0YrMHHpFQAHKaqDZMIDE5n1TYpZHUNi71Dr1jJYYtRSzLQOn9wc1UpUp8huYC7mTKRuzGyBuBjUE4sBRpZiYqe+bIbxVaVWOYg8hFBtCHZR6lW7RS3Aw5CA5iCAKRBVWqcTQpD6EGIQS+j7Hn0GwBzcSEQqJh4UzMBBUkhW2Dxxm3r19+++H6YSx5FHjAk0qbLKbuQ93mnXe766IavMp2uVGZ6OsZlw19pCuLgRUlh1mp5spi9MH5yuroenE203uU8yknx/Mbv+zEFPnjYH+w/06mJ1bX9z76wHrLQOy8G6AAPfur5cLM9Yd3YP22ePHvyr/8/n/w//7QZ0dSoy3flf/vt/fjA9evMXv/zqW5+d3/jEgz/8/fF77+4AAbE0R/n7d37h6uEnfukLz3J8+PTet7a03C4jh46nt2/evrFVe/fHOZJ4EkO4/kJ+eG+ZKL/9WXnp9vLxQ/EE2oGUkbEiqfPY09KWAMZny6sP+/NPT055cTpZH4z2ObgmtAxjO7h2edoUXs/B2aRdLo+LQJRzaGPa6SAdGuboZBuJ0cLWezJRKOIOaLTRSZdG2qQ1T3jtb9++ZKc9Nmc5u+XAKzLtAiSokFpebZ2K+2D8Nc9khdhqS8cX1lsHAg3plhU8BQy8tiChGv/Jq7Kpdmp1ml5VVxx4ON8Ch1CnL4AXq1tYJg7gYiZxIGMMib9ONY8EVigENjaYBKmBobjgR9bZy/M3IyRDFzn0ifVMrU4bZ+bgF7Punx25dZs62Azd7CJrbEjQdqrkRrZitQ6vc6oLzQb5gJTy6v0zc7hZsSHq2LU4vfbmZ1KK9+59cPeD9/Mmv3zrdmyTE9emkM2dqgIyqBmYqji4vjx5lVbRhU8i0EVTfNHD/8yB+PxTGTFz5KKLsul8WZuU4OKBATHzYsFdQAyXrRt6C2hGiNnipfHOvq6exH4R/CmXc4TDqzOFrnIpOQsHcdaaT2popE1tiynCbuCCty/N3kizsj75u+Ojf8ebZ82IXAXerx/nbs62dqq27gSqIgEBNLa7kcZWRCQZGQFZy2h2ULpNiJFErHC3KSFEUIIxS4D6KKbc6TjtrtfLd374o9/+7b9/vIypRHf0ZkFgCEBPxlzxQYLO2VCkqBMbXVRcg9OEwEMsNSoUnyEpmZu8/NrNbLjz/g8/+nDx4Pj2dHr46VdfOzmd3/n4waYvH9y/m7u9q4czom2npt4FZWUTJyOFBQAcWLfZIvYuNR3l8/UoSaKPH98cb8vs1Tkdja59Kh59Qo7GccRhYmjzdJTy6kE+vnN+58/Dh9/cLN47+tJvxxsvlvmGpUgQ3olF3YJKJFs5EG2thBZOKRW4mC1NM3NryMTJoTFUzLBRJDcXlpqLkKJse61g52FHQgxiLSqOING4lHpKENUoslBdv+4hirubFzMwh9xnDsKMvnLe8dzzbjWLyM1ijKWYWqnhucTMIoPTDkg2aEmqrpAMtRQetW3O25qM6PAQqBowOAqIcs4gXBjeCWGAY9SBcpTYay8sdfJcy9dSioPCgMuukUkXlsdKocIF7dnNh5PVQSTFDGSRtcKlHd1mMyQ60M+aYFxkp/OFBhUgqb7n+tku3PnCVN0LAMyUwTGleiFWJ6KWMhqN9ma78/l82xUQuVtMo5TablNKUfOYUlQKbdwRmZzO17FlJFgyaintJ+xCLgvtEbXnh7I+yGdHMsd08eDUDspignXA1lb9eAtlmGVG6R70H4b2UD7u8rOk+ye8usQ7qlPLD2znxrKE8uRJmr7w/Zde++rDB+yRunWHhrsOgTL44Pqs34zf+Gf/J7Sj4//hfz649/5H/+M//9xv/Jej20f/8eu/u93mZVy89+63Hzx69IVf/sorv/m/X37moyff/nb705+MValf4F43vWe70/ELX/rSv/rGH4xhZs3e/rTZv6J90ThtHWmveXSwe6LdjVc+Q59+fUuNPTkZteOAYECfyVIwJYZkVzx7BoCFx3dO6NNpK3p3ena9m60JwqFPsEZTUTfSkwdToRIsu7kJmbtxwQzdWhE75Q14a0kpMDcgKGlnyaL0MlIbZWslR1/n9+6umiVeDIK+cKGglNfoNzrS6IUd6ijmloThQQsHIbdtFW0QXyhUyM2tyu3gF9zWcuHud3JY4IEHUXtQJkp18HNhzyMK9XAtXlBl0qUgMLkHluEwYXv+9RjibYeDkZjZ62/WsfigcqjhIlZKVScMw/CfHa7P9zqoF/uF+BpDyiYuNBG1s3Q4sw8LoqrJqPvZ2lk617kU8VAmE+rtXOCuBcRqGQARRQm9qrl1BifOfb55+9WY0t2f/vT+vQ9L7l954/U2NQBpr5KEaVjkDjveeu9TGN7hUEcE/MysWBlA1WxFPOS3DL/lgLho0flmcX/5cGPbyFIQzAIjEII5MyXihhBBogVjmRUvQXbyWL9HuWtMg+1BbkpZpHJ3eQJmZuNp69YZO3s0MlWLLZa6PGhHmNDU0y/utEvY00u3NvvXr8phkHFQT9RvNsvzB3dkfaLLR5v5A+o7rmGPxPBQEMejXVDKPR9eOlLV+emzgBQIZrASqKQ2JVNWSk7Uyogin6/WAQ2Tc4wPH63VWuGQBZKjc2vYAlLFsoXU4GxFqnpW1FSZqJIT66Vby0cjRqC6k0eFOKrJlZduNW5XDvYeHj/6+N5PLK///C9O3n77jTdfvnrv8ePjZ9vHp/NLBzuMnjV7YCUz985J3IO5ilnfMUfuqYgdHUw925k2s/MTk/Lw5hdpdmt6sMfjdWdrpsZhlMhGGl+8OvnCjV36/NkH39v+8D8++Pa/OPz8320/80t5kWUKOkXZFNtS6cCRfW0emFZknRt6lgJOBAM0O6cgVop6Gbx3qiGGQKFaVK1WdPWIFeEhGZdEJBTyYiBnZjcvpXLpqJJhKITaULIEApsWATEw4NRRzXBGqEYGIoYbrNSbh+sR1RflUMdiUpGrpTJxDG0Q603ZCOhzfm5jMCYzBLAb1Pq6cxZm8youLtV8IUQgq7RXAhezIML1YOYhNNncrM9VoxKYHTbMkAmBuFI8zSxcpJiS47lSg+GB4AS4UwhehhPXh4PYCWRkqlrhQYEhKXbdhpmdPHAI4AGsw1InDcxhb293nbfWo573s/Ekl3J4sP98TRAkEUnfK3NJKWlhptAXcw9qTh5kFOM0elswg02dp4wZeMa8hwPbXsXyUtsdNZvu0bxHt+sdr7WcdXIW++w9WEAsvpQ0uttPpt7OnrWiKS+foLPQh8Sa76/3rltP+uDO8dEL78nLb8zvnrRIalbLBytP//TfHnzmby9+8rD/0bdlmTefeC395IOT3/sf0me/8sVf+03P/WL+5Mbh7e/+6Nt37tz5r37n/9xcfuWNf/L6/FG/+MG38vvfb0+eJrfx7bf++K/uiuYOzdGtV796+43Nn/5xWM/BY9lt/nhvdsX9UzdvbF66vVzPJ31n45FrMAiIwzhoJmYyExOEfgtAUrLj48ubw4c77RM5XY6vpRCVgzAilBJseRYXj9Kn3lyv5haiuHfcQ7Bcd13pehMF55C2FIOXArPCcN765HwbEds1RshiS5oY7t/Tty410quuyJCKdTEj9iGrc2fuvbm6ZXODlyDkMKDiqGrJR/XxVPNp4S5BULOGAAB9GYRCdQsbIl9A5YgDU6kTncqtgMPNwG5gDkTOQeHErGYQRrUqXJydzMSQWnGaGYYHZNVIDOIvLyYibiaxShycmXlYrVwwrIbszRr9N0ytUWNXiKziGGujWduf6q+oySpuPpDeXbUQqBTVIeHTaq+uZsRUX1dVQaTW1wUxM8OpGIQK3NzkxkuvxTS+88GP7z/6cFvKq6+8enCwr1pgThe5g0zPFc3hec9Lwwe+aHkvDlpgGEvAL1bfxJVJoqqP1ifH26fq20ZiQfCMlFJ1F7sxKBKF6nLiwLCWSc7b3EKWst0PeLNNLrjnfkyOsYzZqU2r0klMZChmAcyErWcJbKnwhN986bWP927fSQcZ47UZMqetFSuKVqaYvfpWZKwefcgPfszLR9164WZsJYUgaXJ6ck48IuDZ2dLNU2wXi3XgQBIYJH0yHmdqpCGLyMwinkZtD4MhqjxZzh+fLPeOrm+XPagPVBwTtq3Qlss2CzOJlTWzspm6cGCQVYdnFarCAHcqSknUTQxs5sU4BPna73/rxZs3bl8/evXWp2699OYHH7z7k/fe+dG7cvvqay+9eJPL8fHp48fz5eGlkEubrDMwhuxZGBUvTghgglCjUYWu3dhpTsYGnqeXbHyzbUTTNjQks2R7VPaaMEWYaRlvNxNNu2HvrV+xv/32yR997cH7XzvcO9/7wt/Sp+5PGStt11E7j0tWIRHk0l29fGTbvePjeymRAWZdJeUyg6HMAHogEIhcHVyKo056neFoid04gOtSzJkCsRPX29BIhYOVYkAVYqDviyNJq6pg8xCKMzHYixP11XJHzCFU3RsLC2LWvq6GiCpixGlA3AEwImfi4kWiGGnN+eoHFC2Y4WrEXGjQaDJAHEKK0GKmIlJ0GOCZ+bgdqfbFjBEIVB1wZkYchk6bBKaQYERUjIislvdel1T1YHZmqswfgCtbB6Dixk6BmIoZBcCLlxrpDYADjZrRcrWuIxY3bLutQAigYSddVdZMcDPszmZHl49Ywr1798yMgwiH9XYL4OHxCQ2mDg7Co3Y8ny9Ui0gNdaJAUQtrNtMsbRjttH3MpTWZMM/YdopNfIrlYVxNsdwpy6vNOY9Olg8yVo6V+4pt04ftVJCZPHGD4N66rxWdtXuPp7bXs/XkndNMDtdl2c3GygdYPPzujesH7frS9v751nmNsloJtbZ48PQP/l/jNYrZ6Nd+a/KFX37wz/+fh/fv2rf+4NmD93/1q79x/+TR3bsPD/bfuvXalXv3v/+d77/z8u3br7715Zf+zq/2f/tX1z89OfnL/7h8ur4dJvu3Pvlkvf3MzTfyH/z+RBrCuM9n59Mb0+OTlz7z2bOXXtwuHo9k7I0KyMJFnaZecYwELZ31yyUAhrWj2StPrjyePlwlvl9O31hf15grOX8bxpbPRldvcZge61aYxhS23hk2cXy0PD1dwwTNRn1RmDkZk5qDLWO6ljGvXTsLpvrMX5yNFyV3x7lvGWumrL6Olotn9m5LroQeyMwGV2YD3GHMwVHcvTJQuSZpBjHtUZPHUK28Nsx366YDbHBVZRFmFCvrrqueNybiQcsMZirEKG7Q4aSvfEoAF6zE2oz6BQWyBokMy1p4eH5EEygweS0cBwdBFSzVJVT1IPGF+JJ/NrPF8zkRD2tUqzeADYHcQ5QvjBjsBisFblqq5Kx4hQg5VAtzcC2gKqhEKYWJ1bSopSgiknMPMMekqtp3169fTzG895MfP310X7fLN9586+DwyMyoMq6rVtydKKBSMsyY4B7cjDh4sWHGRsSAUzAneAGcSWDujkKkRg/Wj07XpwaVkAzizhIFzkCCIZIASU2YI9AwCALZb9Macc9vKI2TnEvJ0t3n2EaOVKyh116//Vfvv08J3iO4q2jSZGJF+47Lla/+xvryp/5qfu6dce7EiEAWvRYlksGqG2i22By+0e9db589Kqcn5l3pctvun8wfUgmIAgsi0cp2Ot3tNuvIEjgps01ClOJTjq0QWfFSrKipkMQQ+4Ud4/61qy+sTpG2bd8X6sEUiotxYMuOTDwx7oEMy6DANcJh2AnyRaxf8V6JyaiSySDFxEw+uPvg/v2T60d7169f/cQbn7969NJfvfMX73/03tXt9Zs3Lq1LXiw2++Mktu5DCAPKdZikDGMWQCzkhNQ3LLx3ff/R+0fiIySW2GtMMk1xx2hWfEp8mMqMm5HZrlGTZ5snaYSrv/M37v1p3Hz363ms0y//HX6k2tK6JV4zkoGLk7BJx9myMo3d1wZBSEJg1ou7yYAwXMEV+gSY1o6UzG3bZ+I6/BQOVYfkDgPMg3hBX5SGzaVp3185Okokj548hdSUX0cxZlKUon7l6tH8bE5EMHIydy9amJlr/NkwZYUbQAUBlSAbQqjRK4OiYVBVEgBzrSj62vJqGcJeAlPR4labXTIrgYNZGY9G225bIRkXkQ9MAXh+dntNVKDqYqohq1TMyQNHh9ftT50XD3+XdCGRhD8HBFakX7VFmV1krhmtVqvnc7nawA5dS/25lmLm+/s7k8nk4wcfc+Aud3nZ17YDQM59CFzLuV41paaO8efzs8E8Z16dJkQcYypFOAREZM8I4JZ5zDximlK7YwfY7vj6AKurtN63OdT2SNZdtI3SgnnptjAOrABHAoqJYq9IJjOfzuZGtinIaDvOC+vQpC0p7Yy33frbVw+/uj5r+83ZGU+YMmeWNo2a8vS8fftLe1/5wkf/4/9iywf6W7/V/dX7zYfvnvy//7vDz3/u8POfbq6+enL85N/87v8DbJu4+3Dxg2/97kcv33rl8MYrt/+P/9vtk/P+7p3D48XrWbu//G7WtNUcRty8+kt++fLt2UfzM+eHj5v9HXAfkJihBUwh9wqOMLNSppOprjar5QoAjek8pJfzC7P505N2czfNX6XrWCeHQbhfbzdwPnhluTzbyLThtM69hHa5LGWeu7jXuTJLTrISc43ZKBCrw7eSex2Z7K7SctvRmcTUvzCNq8erktWXTArKgo6sc3Z1ZHDProAC6ihEhWHmxS3XK1XNJICZdZtZWEsBkESIeLA4BmZnew43JqrqfUlBta+WHhTjIMO9owqpC8uabFijEvxCn4GLze7PHZVemdBDf4efP0oHf3CV8A8Pu1p30l87butvXCyoMSiGn7/GxesPvMj6QgN3HShWSrEaKtNrj+chQgMcX+tBbkCxUu+auj7XUrzXASg7oEWKWbly7Vo7Gr37zg9Ons6/993vf+rtt164fr0oAHYrRMJU4bBuVtzrmWtWvYbVrFiMWJjh7uJBnbz+hRTimDpdPHj24HR9nJqWSUzryErISMDZ2QCQwANTNBNGMCkSbHk+10TRfG86UWyR7M12el/XqzEHUM/l3Xvv2DQUZGmb4iSl6ERFUVKcfvoXdm++cf/JUyQOVpUxRBFhS4ChZs36JPXbrDBIeuHVTtGfzps4LV0HtJJmoOxOcNbCAe1WoSSpCGgkl0InHY+ZG9/SGonHs8nu3mzvYP/+Bx91ZyuL/PH6wafbEqbUt/BMWLmVEelQvREJcTbUMY8ZbKjs6q7DKyUGRKyqQYSGoGVXzQKZ7s+Sqd07zseL+4c7J6+/dOMLX/zb3//+f1ZjOXnnxrS9fz7ru2KpNfKAAtiQNkLDDgYIhcGWrGHdhmh8cOPms7Ox8Do3Ijs9T6K14nscZsRToz0v43yJ11M934tb7tadtp/7ypt3u+Pt9/8nkvPx5Ze7ENpGVQChTJzAbDSfL1puQxi7d0BgSkDvDqJAXC56LjIrzGxw6wt5IRAnckIhGKPACMNkeljkiGjX706mbdM+OjkOQaLVwYKtbWPQyBEFmjWEWO3rIuHk5Gmltw8eVncYeq/R9OwEwAuqJpgFXFet9flSnYjFys8JKyt1B1QJtIYLSrTDyVTrza59LyLFCjNr6UNFEAwq0iGClIjNS6BhOl7Vn3AiJrdKlx8EAhXkU9dfVe8GFDisHudU36djQHMBQExxu91KqEM80GBvxBDFDBjXyaGzMDOt1uvVZh1T2t3ff/z4sam5e9u2fd8fHOzn3C8WZwCYQ1FzIOfCHJiFOPS9xpgY3HVdjEkk9m5BmCNbY/UAxghhJ+yGzcw3u7Y8wmaPViPt2qb1B+d62sQu+MJ4rmHLRs6O1FT6kHvWbeeNMKvt7c0zybqcjXgy5dk0HxtrNi3tpM/le3nyy/HJ/mi6YE+BlTsralPh4/dP/9Dkvb+89Jv/xeiVX1jnAz+8mh69l9/77vrOd+zq1aO3v/zP/i//7MGi3zs6+sbv/ovF6bOHZ+//snzpBz9YXXnp5uUXblx++5KUbf+VXyIOslgul9qP4vw/fWPapaUuD197CXDjLbuBLARzp0HWgACYQzerc8oFAIk0YUJh59bm+tPFe4uD80f56Q2+tuYcJFBYNZCtTRb9OjttorB4Yu4Wz+Z5nY+um6tsjENL27zZNkCxXHtsjhmhC1vjsGRbhw+X4VeuT7Q786XJNpVefV2CUrENk0IU3teHEbieYRU/TiVXTVDNEWQbghqKcCCg13rksMOsaAzJmOsQBeZeoK5D9JtXQWJgeC5axZJsGCJQ3N2GhIahwL5YfdZ8zeGEHQB0VIX6dSdycXr+/PPtwulQG9+fnd+owhGqL2nDEf9zB/BFkNLzA78qzpiKajFzRzFT7c1MtRSzIAyDWRGJ7mZmZqxWmAnVNOyIMQFQ7d0MhBjFDSIBhKJ2dHRlPB6/884Pj4+Pv/sX3+27/qVbNyst270ajg3E5sQUQFyH2z7kE5uDA8g8gFAIHKIZWKBMz9aLx8uP5/kphcaRvG7xvJRi6pF5BHIQmwsQ4MHA1qq1rjvZd0srQcf4tizalvaSM3dfmezcbe29fpPaae8bCy4pwIzNDElcvRnvvfGV5sYrp/NN2fXYs/dOmXhr3kHZIeBcY9U8k9l6BVD5iLvHC2w6253xGBvLMo6aPLWx32qI0rtGD+NmunVrx+5tGY1lm7Y6Cy+8dntydQ/sqr0kuXblZnE6/fDR+6cf6G4Oo5Y3yBvlxLQE+hbb4AiE3swNBhcmMvQXzhirVsxh8Qe/2AZXaiACizBkPrco7Sgx3I7nPj+/d/P6pc9+9jfuPHrwV8fr14+SdCJxqcyNkTOhZuT6kItT40KMm1ZSWWfIQdnk3Vu34wpPzfKErfEyLfES2RQ4ID3UPZ4f4vyTL2h5dor1fMyWTfJT2/3SJz4++fb5d/7V4S/9XZu+1I/HMm7yk9yy1RJQiuSiFNm3EkITOMMd5ExVR1C7XjYvpopKKq53Sr2WCVCwI1QV4mCXZnjoWTnGrt8SExEXgCQ8fXYaKMwmO5tu48TcihXzYgGGIESoASYMUlPAHWReqn9pkD4NUiNUhQVzICIfdE+44KnWW5gGVDyomF8YguAEWKmmherncbOKi3MrtVWA1WlkrcedCDyIoNGbc8X/DMMBApMQuSNwAPkQyUDEVNOjqC6uhvlBHXYTqumZQW6DOrR2wwYI2IUcYDMnMiMBVaPnoH0DYpRnz56hzhWJc86575fLpZnnnC8dXEopPT2dp9SYQbUQcykZHuFqRkADuKpRGxFhAZQYCWjAY5bWRr6cYrlD6ynP9+jcjnV/276Y4w9PetbiJ6EshNcIDCfnlthhHKz3CM6caQ/BeWd/Pg3TeTm/RA9Ka6KmNLZ+2bVt187+fHHlM+tnhwe2AFFIwcR8w5v55p1vyec+OfnSLzz4+rdaKXbrZjfJ+gDXf+OrJ/fvPvjOv5L3Ztd/4XNrpC//w79z94Pj69du3L/znbuPfvjTj//i13/9t07fvfNXP3znrTc/e7ZYL8/ntPWdH/3VG5/5tbund3ev7rI5OIxl3OUN3CpPEZDAoU3p/Dyv+tX22ZyQAJjJzuUr27y9WvbHklZtfy8c39DLbcfrvOZnp7NbNxfzczMacdP1W4kxB44r2ZX22TrYRgMlmDcrXy2pIc4ZRoHWKpq2XR+6PlHszza/9tbR6eO15C5AvIdlZcSaD39x/QQmB2K12BDAcHKHxGJq5lIdrqYgqpOfULUXVEdIqBva553r8/WlWl3OGGqC9YUvH1XlcHH4VVP7cxlznec8DyDC0P7W03L4n1ZpGvWLB88uD+T0iwO8lhIX32N4pTrYru+zehae19R/vVl2s+rJsiEt0U2tmLuZq/YFKNkYNc+01JwiJh80z3UARigXkEtmcUOFa4pIfQOqOp5O3377c++++869u3d/+IPvduv16298iupqaIgQNxCrWVENzLkvDKrzAwZAod7WkuJkNIos6609efbw47MPl7rhMI2UcGEjgVFiYUpqHiEF3JXqRuHYckmG5HkceMSrftVMUyvQZI9G4UT83WZzs41S+jxSB0JiblMw3lhOvXYmh5/9Srjy4rNulVnbUdGGRJm3hIYg4MTo3CLKug8xKHJO3Xg208UcvOAp96FIE7bIna7RAg1iEkMRowIvbFG9b5Vnknk9ubl/5Uuvr9end+69A2RupLMtF2pDM/v0y5inYzq+tH+jj9ZOxM4KgvjW3CNyPWsjoRAlQg7sbsW91Ej3ykmzodELMB9CM5lEgjhNZuOJWW8gOI8bMeQ7H2+W8zufePnmw7B8/wxtkxH2xHIfVWpnZwBpTZ0F4MZJA9hcRgwpoe1WJvshIKF12kHYQ7gUaN/tkl9Kyxfb9aW0Hp3Pp/n+lLsEtTatMbadvPvFtz/4vd/d9WNLs3W3mk9mjDFTQC5cOGeTqZga940XNSeWCBRQANzRuxvczYyLVzTHxWDo4t8MJwaz07AHrfdhCvJs/gxADIJhf8mtNA7uTZsUVd0cRq4M4wAzIqbAXopaQVViAEI0pDYNlHI4c6/KTlwTlYjdLUgoZsyE4kG4lIE75zVqcJilXdzsdX3EZABX7mNltg7q5vrJyJ+f5KA6sauc+lJqLjBbbRRqlgzgMCKuMpb6YzC3MDxbhmDE59/frcY64XkrPBz3QM2fqpqDavKoXEAC1ZAJA7Qvi7xIKYUQVIuIjCdRs3ZdV72bxNQ0SbX0WpiCFSWSGjNMHkqBqpkVTn3wYMjCQsJIoJZGlMfYjGkz4/WEO+lyXE76U7qW6J2zbchNtwAvbIFOhFmllGQmeTkfHxFMCe6JcluanfVYFnvpcKynxYTcc+hkkharZe4bnU2+P949Wh6/cJC3lrNBSgyUG0+ru3c++rf/086Nt/TpKn/07Smt9bOfOjEb/9qXp3/jC9uP3r/3p1+fvven7Us3f/HtL/d719fl0YvB3n7lrUf3P/rWX/znmy+8Nr4cl/3qU9PL3R/+4e1f+9vf+eH7e9PdeHQ1FxXhTrOZc0g5b6wU80KI25LNNHlY9+u6WFnnddOnpGWK8UvdwTvrR49GZ8/o/DIuFV2JTLen3dKeyXgSoxAjb9UiUkcYc7csjSIgsXb7hZ4se4WpkvROW6hphPhIF0v7xbdm3z8+2UmjwwOxpTGIk/jaHfCtELkXJR42p4CTOxOYUCxLiAQ37yuStBQDkwQys9rqhsDE5EbMbGrDBWZGBuFwMbux+soDzYKriIpKvard6QKpg8rrIKpQD3p+Zrq52VAo81CqwquPiapAbHAYGTBkFV/4SYbBzzCbGmxLPjxdfm7MTRen+zDDdqtv2CsRvVjptaj2NZO7mFNgBklgZtYL39Tzr8ZFCWGu8CoWRQjibnwhFK/oLje0bfv2228n4Q8+uPOTn/wkq/7Kl36lbWfr9Xq97rSaEs2SpPF4ouo5a5+zg2LbRkn1dE9pFGOTtX+8fPTw7MHGS0pjWGKRbMXVYZx41HJkBzgUGxkxEydp0Tgl98atNWk57EZILBPmKDJGTNg05SjSpWm820xiznkUmbfC2UpuaNxrnr31pfHrr+tmKUu2CGxDyYo+IAEKJKIlwIZcH5MupjoN8cola8/wRBGSxaIEcEay0eG41xyYUX2Yau4mU9GJmSz33n7h+ts379774VoXcjUhskcbS6NGmv345H2B37f7L9x+aflILRMFBIEvAXWHUG55kN+5ubEBrnVi4iCwVt17YLJKaBoqPrNCAjRdDqlJDLBLVg0xTaPMc/ejD4+vHd4Yoyy7xyHpRpuRU6GhRHPEC2McA0EB6yRxUGay5GNWKjv7UoKUVnWkOrLRYXxhkg/XD6bvfzM8+i6Wd8xOW1+SBx7HiYzj5Vs3f/Wr5e1rj7sPb2HykHdYwtOdkBEaC9oXUdGsMhask1iZ7czOVk+BYDXb3QmoERxcZ6KD9QaINPDsyKqhFzBQqRa44qbEMh6P3NyyhiBOjufkcniPizQiszZEc1dXsxooxKmR3G3rl9fg4kpgroU5wVgNEsw8StSiIdTzaWDggfhCTMLPVRqguklgrVm5Zj6g6RCkttQOOIuo1mjyevjWnEAiYi1qpQABTEkSg7p+CzMh1AQFM6PBYFCfG3VsR5XWYbgIWyUioLqEq4dxkIC5BwleTAniNTySGJUTTbH2373W+TwTiOXibVd9DYuElFLO+ezs7NmzZyKJWdpm1GuB1u0vtjkzOKXxdtu7x4TY9zmhqQkQHBhMgS3AE0pka4qGLUq/zZ1cTqN2u/WOOMuzhovsYdNJXq0WD0eyfvPWdevmdzfs66ILjeOGFI1sJc8nQbe9BUZvO5YpxdQRlrpCOri/99Zj7T6pd1N3Km1reRRivjYbr+68u7pzTFcPbbdZHBwevHGj257mJ+/a4aGOn+1dTmOSxfzO5g/f49ml66++/uY/+A2k/fGt2Zu/9avjhZ1+/KRdjfKf/Kerr97+9kfvTKc4ePMLj+bvtu2EMjIjxqaUrRkkxrzti/dmAYAFoMuSCwBN4DgiJ7XuZrn2/vJRN7a7OLlSdre6adrd03s/pWsHebPlzs17T8nXnW+K9ZEZvvW15pZ85hyWFplKBoJ7AgmKcYiSGl4H3bk02mutn+fpwVRXXlbDxIQ5+IqCkPt2OKi4UoEI7iTCMOZYQObZaKCimkP4Z4egWbkAJw+VMGqsnGk92pgwZBTUO64OVS+KVq8ed8BAUnlIwypniEkb1H4EWHH7a0vdobccvnVldhDq6YthXjaMmYfhNIgIQyEPuhh4/8wzC3reBjv8ot2FailFrRTToqVUa5a5ichAIwENIx8ftF5wdjeqYzY4zJjE3UVC5WwwQftcd0OqmRm/+PkvHBxe/fa3/+wn7/+47+0rX/kb0+lOzn23ynA3K7uzg+l018wrZgskbdsyixUHQmA57/Kd43vH5x8jFtKkGW2DbBzAszSNnIyDqWmfrXjHHblwGFvrMgkWwaMYRp52wvTyeOFn2OHSGO3A0PNOetzkl1NIusSeTnMeeedAkODbY7v6yauv3Mjl48ycZ6IjUW91o7YhdI41wF7YKSJsAYEyqXDKk3B5rNblCSdTRDGGxhJauXRw6cnxo6zb50P4qNBUuC07nzw6/MUX3vnBn8cD7Oy2UpY7UlrfpGAq4znnOJ2ed/qdB9/59Bc/2xwKNm5CSoWrkYwIJq7iECADDGeiWEfQPhhk6mOWgZ+Z4IkCQKLUtpyM2BjCIhgXNWUWGa/6zcnJemcSJ6O9rCq0UG+IthguTbsY+QpRNAtGIXNi5S0stebi1m6a6Wx7aH5J5Mb+Lh6mb//bnfe+cV3vHlG33xzTsdJJ2drWmEOSrvl+/5//5Eu/81vf7XrpPqR4MCk5sT2c7uqGSIP1BVNAjVvWVTrf9A4BInl5Pn8mYmKuDWmVPVxofWGl9G518+QEZ+cLADtM81oBG7WjeoB5nWqBUQHuVdgbRd1yLoOeAiCCqYLqYIGY0evFfUcX97UwmzGHmMS3xsRalGu6tZNdRJI6QBSIansKGkxOg7vX3YgCYFpq/BGYxXR4XgCB3Go5VD977UHdUUrZlhxqxrhDi7pbjGnwYNSSHxdi0frBanXgPyvno0gpWvfrUaRXrWEPgTgMK7CLTzusx1CKTiaTEMJqta55ZVZ/Ru6q1m26pm1rhGpxkyBajFwp9G48Ho/m8+Xu7Ijj6Oysc18TjQDOXc8trDibu7oVowJFcJCxGNgEISCzwxiyVk7UXhovHq2fnWxP7l9qV9PD8eGbL+9MLv/VX/5Jnp/YW79Oytd3D5+sT5E5tmWP1yir63uz87N15xvvx3OihG7aTqzXw3D6aHrzm+HobX3vqLvryYq2C9U4mu4vT3Oa6SvX09FsSWeYsWJz9XOX3//Gtw8/d+v0yb0EZ0+iz5Z3/+z0zp8kGYcbb55r3OrKjud7x8vmlcM7P/0gNfsHX/ovTu4/TTZVgC2zhG2/DQynkLVHJfPDwKFYlouiuuVxszMyxpr1wPeu6qWP8rOH0/Pz1Uk7Gy0XK2/ZRETc+6oX4H6+3EV0NLx1LpE6mNAuW0rYMoUxIFw4BlGwOjcp9c+4uxmn/XYrM16vNzwKDAEXJzfzOBHk5E5ehiaSmAgKZwnJLMOVmcwD3IkKodQziYwvxEdWkRvEYKvIl7ox8gpSNRAHsepZcgvEDq/T1GFr42bwgGDmgFFdGPGwlHt+SVeNVRUXOi7m1X7Be8KF38P9wtL7//+L6pk7CK2AYWF8QenwoWH2QYtTu3bUpUxgLqpMJEGyVwpN7dkJQCDSUuDOxL2VwRx8sYdyKwS4qyrMWERUEZOk1LqXRmKUicHayeSLX/zC/t7BH/7hH9354Cfdtv/KV75yeOlyalabTXbzKG0p7gRJjaQWzqjiZ0Ypfu/0/qPTR8+6eQE8o1iOIl2haWhuXX7p+t4LQqNsaq5Z1/Nlt+g0qy61W9lmXTpEk0BWMrz1XbM14lRCQ22rNiJq+t69wdkXffWjspywJS8JW/S6GU9fePvNEFfbTS6BVsqr0Cjn8zTiSMwwqQFDMHdlF+YUwLENNsXY4qU2HO346Sk1DGGQO/PJ6llpPUCYudO+NTZWbbW9PL722Vt3fvTd5oAmuz71k4R1m8+n0nO3KtyMLbiP9ka7j+bvbfqPpi++0T/uxblXY4fXHLctWU6wLbFQHSGSE5mbwQtQt5yVpgIKof7tcQggEpqysTbjmcJctbAVRyBIwU4ZbTYqoP1dMSPzCWNVjJjrNT+8LsA1td2taA9hQkIWs2hgi7N+nUI/De3xj6bf/9dXF++8OFleb9ezxf18Ktxck0+8PIt78WBc2iRRTu59+/g/fOvT/+TvnL93LEhBOOtIuXm02/rGrQWSIbEFM+asHCq4/PnEiwA3N/MLw28YKGxAxefUttbMoG5OFIiYiLf99qUbLz57dtptc4oBxasnPgBM7GQEKjLoe1sRM1UbxGjmLiEYzApQB/8hVOVwTfpzd2cy1+VyWXvflJLVZAL3UpvyQT48uCGGpVeNNnKKUVQ1MJdB7wEaVjVEzIbi5gyrgnetai5nEe7NxYI5FCa1VA/chNCr1XSj2k9wtQ7XAVyVlZhfLOHc65OA6jqbzRGC1K0wiFKhHm4EMRhMGbGwkYUgN27eXK9X56sVHGalci/rdHE8Hm+3Xe57jhGAlgIicu42nYS29hh932k25tg07WbTu2uSMWrrY2ZmpRA5g2AUDAyCe1SDUESxlGZvyXfmP/oDOTvbHyX/xU/Obrylm3zy/k//+Lt/Jim9fvWVx+vc7km36koy7pBGmxlOPfSh204jOKfOy06JapQ1d+JGabc/3k6OvnX1c4d0460P391bH2cnm+zNIXtffHP6iVunD9+bfuIKH+0++O//b0t6f3ZruveVXzm+//KatdWw+u5f7qy3B7/6m8cfvDee7S+//WeXPv1lPf2oPYr3HtxNt144eOvvWSZaPcXBNHJT+kxoib14qQ938ypFMCYJqpvVQtADsMsv8SiWraZJU9xu48q9/Hhpy3uy+OTm+kk5T4cHWXNAII+9UWtsal5sDc65R4dIomI7R75T4nHW2UwyS196keBkvW1GI7xyMN2uRdbKSaRBWZnBIhMJZyq2MgC0TcTmbsXUYMJcbb7MrBC3HIiJuZgWINDwQLnYAQuAUixGqTxUJxis6grYQfXpU+qQadjrBuYLBBsMRsbOVmUJQ9Mz7FP8otyFWWFiNwIKDBABuE6VvRqSieEV1DXQL2u9OnxLv8jaHu4Q++sYqQFkUVfQQx9LFEIIZr31Q4Fba1yHxOjkzJQk5j5rKRJTirFbb8ysTQ0zdV2nqsRcUSJJZDQeV/acuzcxjcfj1DQ1CjSIFMdyk19/47XRePyNr/+HBx/f/4Nv/MGXvvzlWy/dXjcdwET1I9c8ZhiYEYhZi50vV7r1m4ev3rSyXG9BwdwVUNWD2eHNwxuM1BdLwQBvZLbTiHsi5g0tT8vTEzpdhlXXdufWXbq815g/Whz3s/7aOL42Tmuaj8Om1c1O2Zym/vK2G6dzoX7MujHcfPHN2Y4u8v1JdPO0S7ak0domDSarNNrstdQ5CVlwRI8rLyOodTxOHSfanI+OdvKj3aJjJKsoA2cEiWLIuTO2FrFbd4dXDk/L6dVfurlYP8iHemWyveSPpnY2xWZM5xPkVrqNMqeZjqeb9dPI4SB/sDe78qAfQxCEmMnUzUCFg0ZsJ07V2VVXn05s5NULWukQVh90gdlB9T8kHXAB5XEmKuxCTg2TgHNvbDEOMotEWBPMPTAzLDuCoxD64YwwA0c3NvOtOZISwdmMNEg/3WlovMWffP1w/WB3zGn7aMxzfuuLB7ufln5XTwxLsLorG9nNz/zmgwffXX7/w8PDadb1tEhrS8FOSrtZygu3r6+wXJwtUkiIBEOvW9+W1IReEdwpoHhguMLYnQnEwTB490x9kCAQ3GicIhOttz3DI8ezxQIEIzNyCJhYhhIXHMQcBC/KWowDA4wqlQI5yLQAEOFsJMJc+ZJEDi4oYITCzKylu3Z0tLu3d+fOHYkJZmyo+REXs9kyHILVS1QLfC+mDqCHBQYVsENRIAFq5Cg1I8ysjW0ppdcNyFjCtig7EbGAwRU47uQwBxMosEgoagQzKxUACYd6qUtkIYajmFGd3tW5PcSGnwDqlLpUQoGbM8EsBQF7KcXgH93/aLvNqHxKd/cKF4CZE3Fq2lIsBimmBcbERW08mnTbslguSaKat6PGLWbtg8Q+q5mqGitXKAo5BGSFKj2+GDMVZipJjWLrfPP20X4Mve9/sMCH9++u//Jr+fR4PJn8+t/9nfd+/P3Hd/5qPHtxlafrpxu6AjOa8CrZ1hXUaZui0KiltJW2g7OlkXZMZwvlKadtosf7B0/8yy/Jg9c+fnf/+GyyE06/9Xv66Pr+59/Cbjn76Z9cvTLx/u7R8db+1Z/eaiWPx5geGE7zjnX5vUlar+jx3ozjX/67/vCFx8/W7Rc/t3f9F2RNxz+4068/2rv6dr9UxhiwYMgKIaoBBI7sbG0SPy82fxZIAKSdndDsFMow9PADOdw7lfmse9jmV7aZhbe5VGxFDxUjU5NCDXibkYLwzFXM2Gy33UP3ZAGagbtuxGxBG2dLjdj2z9f99dZeE92yVsw6sWEdjIzV2YNvDBnwEEgcnFJr2tU5jheIA8TDBoHEVBEY7AEMtyhSDz8m9NqD3GprTGKlgCjEtM0ZoDpYJuLeSuCgpszUq9JFzjxdjIaAisWwohaimBcQzIxBFNiLuZEkaZo2Rem6vO3WZkbERjVGydwqh2sIAg+BQwjPGUfPsZpWrFTp8kDgAPGgl6nSSyIuRR3GIVRNrANtbA4PL03G423uiSjGeH5+vt50+/t7bdsuV0sGjdsRgNV6lXMOIXAIANqmEZEQpLi5G4GYAzOp1SFUzSXDctXdvHnzH/zDf/C1f/+1R48efPMPvrn+pfzGp17PSlpAzkzizOQkREZshRpusZMOdq8RCQ0aUgYGBypzcjMFURDDhfKc2ANBMJ7MxpOdG9OXLMHG1kl3+dbu9979dv/ge9O9vbuSH9PjndJNfD3C6nrcrsv2RruNvL3UhPX5CU8PD1/eM/9ohGQ9G9nWOflsbCvhWYPRiifdeLSWCAbDAqiwQtlJW469d2iLX2mwAYRBxRkgMgJL8I6JjcwlNV1cH7x2ha+l4/sfH01WV8rxZRzvYz71xQ4tJ7ZsuV8jWBnpctRJGz3hwz+9/srtDpP54VUFzNjWQG+uTlvynAgNE6OKYp0dwYlB9TJCTcWpFi8QcWB3yPYQs51UMfgcnLUHxMGpCBA2J3kaU5ylfKZiwbxH2RI7IQDqXqsiMzCsNwvwWJVBRkbMk/29TkrY0QTstHG8me/YYsqb2Zd/Gzsv9h9s7cEcz5rSZ0okSSyW5Rkd3HzlpPtgt1sueb5EfPXazqgfnzzt0mxydjovfeEYiliUmJf9znQ8O5o+eHQ3xmCqxWuofGyZc15KCMPNNnhjCpt3pgdhxFvFKJQq/BNm5+VyKUxtEKgbjFMyWLFS0zppeGogAH2fhQKDacC7u9XLARAAalYXAXyxGoNTEnKDhU3OL8xmTEzg3opUkRz9zL1Q/yuwmCmI6yXvBg4M5lakR3Z3NodpHajFYTfGq9Wq3of1xbgSU+rNj4q+qk+koce9aAyIq2iLzIZdb9Vo1ck0wb1g4JoCRkLFCvnPVfg0lDXErKqBuG3bXnW5XAYWM5MQMOybrRjAtFiet03a3dvd5r7f9iCe7kzPz9ZdtzUQPFjRzXbV91SMmUfT8V5XzLo+jqP2KpZQiOpjnOFuvRMxCoLCZcuM0vX2n/703f50NbafPOiWgfNbn31jdvW3Uzt79uDu3Q8fHYR29vQvdq588pyuhJxLX8pZMs3MTBDbltgsU8uCKfkkMGuLY/ax0bpTdKuQO8fop7Or9/j61ebRS3bvppw29uDsnQfr+aiV1TQsWuYotqML69bJqDsla2ZWuPvLrycg83i1d/S9q2+/v73yW18Y8+h2Pl2v5quT9/7o6PXPbxIhlmQouUThUWo2ZZUti5GZpRS5aU/+/Hs7WtZcr2IyDxQdAYoyxc51XJ7Pf3p6fX086sYaV1YkRdUMMgEs93q2CeNoM/RsMRoY1oRutu3OyiRRk5/uipoFKQ5Jak8DIkN30lSnoEAkgNSrqQCoDNwEUTXaNg4LlMx61FOaDEEI6sYg9kHli8HBwR4CV6Wju4FY3QEEkVq9iQRT86J1H8uABAFAF2kiDBeJ5mbmUUJlLEuouFOtsOicM9xZQsVMWq9wa1K7O9tNTeNuUZKZqqoWcytQSKjp9GxmMUpKKVyYj//akgZArPP0Un9FETev1MwQAhGDyaxor9przjlJbNtmPB5LSsycGnN3M9/d3dvb57osnM1mNDwBfEdmBGJhwoUSvM43veZ2kxUrxZxIJLjBzGJKfV/W6+3eweE//Ef/+Ov/7ps//em73/yPf7hc9Z/7/C8SvM9kLGziIAejhBBEDVGSF/EhuUEABocBGUAEcYQhcsD0YroQYNE9GkUgAWOWXW6atsw8HTSYQtvlbwTc35xdle0OTk96/SrrCZ49QJmW5cww1SftS68dTvzM5w6C+7ZgbUm4JIyC6QqTiH6NzHGyHo9g0Km3yj1bAdMOZGfXck9Ts8skKSiMC8OrSoUO2v1Vd6ZdTpQ2m/Vkd29Rnk3k/IhOD/34Kh5cwum4zG/uj9dP+vXp+mCWCi+pYeUUwvTk3W8efvWrS961Ep/MDkoHjIEMyu4b9+TYirnC2AbpAhOLe3EzRxnWmhigDOTkcGmvji2RSMPiRfuEJCVtN12Xdaz55U8c7rftZnEqO2wbtuxUGgfcsoGYa99lICYNQHSEWjtCUELOBZbQWEgptVcutafd2OaztkTb23ywxrzYdpTWbpsgKZYxEbNvjRcpyd5yeXx4mJd5xXF7qUE82XRxkto4vrL74PGjKE22LCww7bocSERiD4eZW86Wk/B4NOr7bQ0LA1BMCa5urZHBNbGtNwQoAszEkVKqKrnaKNatEihoVgYBysR52916+dbp09Pj+VmUWGpyA4gBYVFVE7IaCsYEc6qUVVBxC0Spaddd9xd/+ZcpxL4omK2GsNjP2wWHqRSqg5irKlKtGLltVMlATCShlEIGNytV3D0YDwf5lxeXwOYVb+nEFKrdF25uqEC8Yu4AkzuhdsZDpWZ1j+H1H3didoeE4ObuFpjNCjkHrtB8qyttNw/EZkYhtFG6roM7M5dSCGAmYdE+cwgcgpmv12u4RwrG2Ky7OtwLVbZKdunSpdWqb5txjJPlcgMeC0vdf2tWLswWLJdt4Z7FQlSSvnhsEpqttiINfe7zO711B7PdzDeWbPOFvv+tb7//nY9sfZZS24CV1tfoZHH6Yxx9MZrk0y5RU0JPlQs09mCQ8ZodHXm2VUzTRB4bSe20S5CswRaW7f7ewYO96z+a6Y3mw5t05yW9v58XLbbBWHbBiyc7UgKXMI6F1tvCneAktXcwey9MH4HQzPXGCymvddF9/J3f3X/hqhzu910XY1to5etxqJvNQq1zcVMCqS2fzd+dn74hNkp7AKa3X3XdBhCMUmJb55f4yrunH3R7i7ty8lm5CTdkqGniaNutTYC29IdjnVoURguHJuilom/sLX+0sglbYysOTfFC3o9ox8oqt+XWaJlB3d64YlzVdDIaN9vmtHs2Go/cXVSYWHMB1E3BYp5RjMng7GUALhLHGl7iQ3IqYcj5YBBGJKUoO4KkXlU4pOl4vV4HCVaKucGKaqlRCsw0Ho1yzv02BwmVPsPupqocAFfVGMVKqbAsM9eibdOOJ+PBxkMgp5TS3sFB1Tz2vVoxIsSYUopqRVhC4L/mJPbBLuXD5c0IwcTrZe9uogXMgQMAYpiFNiar1hVikQAmM1PV4YNXaHPg4UsAI5TK5GF2914LE6FGNVXXx4Uwu264CeiziqQQRNWjRHNsszZp9vd+57f+8H+d/fAHP/j2t//zet1/4Ze/2LTtVkEIZoFYAHYTBIEFJ4KLI1AkBHBgBDjXc5/A5kIwkNEQbeoAuwegISRQa2iYpizJ0thojJmblOWhrWZUPl+WfxK6R0VfDatl3kyll8WibePlS23Uh+6BlHLuYCbcRuQVZoACRmzBFDCL1O0lAtSZ3JgtZEEDzqOxXTnDh6qdRCkKBsNhwTfojeEj7re5uMZDCeHZLh5d1ZNDfvACHk27U1rg9G7n57Dzgl2nSCUaTTYvXtWH83dOf/SNV9/+jXzelNScHcywcldHJu7Ytoac3MxhBHUwjJyIWYisGMz1olobwhoIEFzu+xhSG9pRo9vWCus2H432ru3tHVDiJS8X5+ZGiQpKKMlKMlWzAHa4gULdq2AQ9hJLtGBgcEwlYDzBswc/fGX31t61g/a97iBu0vGj1bf+qr35y93J03SS+lOUkhAV54WahsTKGO3BzLqP253ucJzvP/3wad/tNy8/FOrOtt2ykzYU9M6exm1ez8+WZ6PUlLw1YkIIzEaiJYuk4eNWxW/dErlLijE1Xc4sgZiTc8nZBFYxdnAMIhDqTcuF5QEAsxXg7of3SlFBgFngUGetRKTam1vScGFocAcqJ97UhIOT96aRJTZSLsDrNig6f9b/Vj+R+XAmwlGJ8A4HkxMHZrj3dnEqIoCpmJJ7GAa8xiwFpmbCg0jTUYO+ATcrzhzgsLpOBteCupLxHW7mHAiAVUAdEdUPm/valMCMwbdfeeX4yZPT09OY0s+WbIAEWZyf37xxg5mXi3PmECVkVTObjcdrWM59PYxzb0yIMWrfC0vgUKfT7j6dTF+9/fL9B6cnJ4ummbRtkzMIhEKRI5mxMRshs3rK1GSkNcad7GrqwkRl5Ja3lz/3IlAe/sXpR9+5f/+xLh4sdNkfSCPTmaqWZvPSZz97NtmMz59q/os8fytFNt7E87FGM2IucDAY49EKBMF00i/Pw0y8a2Fd2rWDAAb1HfcLid121r539MkP8879VfN2//5NPb+uJ9P5aly2LfcWbK7NOTdPbPqRXHrSXH0apm3b3JT88Fm3tNXhwc57v/svX3jjejx4u18vwzzAclyPzLz0LdwDjxWcrSNVCTzvzjL5D8bpWmQAl/rWe/bM3mpZthmLfY6vp5s/Pj5+vLda63alZTTm1AbeIdpLFhXc61RsDLBNg8PVbDOOfRjpA1/MGKmsbXsmSaaT9nT9sfC4TdurweY8QqHtbOzs0WU9X2+0a/dbXxG0eKXWIDm2gMALcYiBc+7BzkmsuNqWYAio45o6QaoK/GFrC4QgF1c+1Ip2azBUewBRRIJICBLTbHfWjkYphGfP5uv1OqYkIqNRS8za55SSxFSKBmbVst1u+15zztN2p03NdDr26jTiaqeDiNQruWnr46OWxZZiU5v9IUOwGn9rhTtgFxwVnjk0u1WpHeqExqwmFnqSWNmUqKEplZjHwascGqgWZwMCB4cRh0rMGTIdBjKIXcgvqyzAvZoMwYDFwFV9y8TmZAYCb7caovz6b/z6dLb37T/91nf/8i+2nf3qV77cjnfz1omFuQVYjQPEjFFtfgQI0MBgYHJ2YqrdsgcblLhemSsAGA3QEFpHizIya61rI3bTrnRtyGqbV5vl/f7Zfiwv9Mca+HZZzHnJkm2Zk7/Yn6zL/KPR/k5gbV1HzAXjDpbQN5zHZB3ZEn0wA2G3bu9AXJDNckq5LeelNDd30vjV7v4PLDkpXE3AVCeCGswVHSOR7CY+W+xgPcX5DIupnjbLRAvKx5k3gddJlz2P2VvnFUGwv7t48IP/9dVPvX0pNBufrcYT3WFsgDW8cSR4AjIThFhgUqw3t8BWUZQYvHl/7ZfkPfDI+1Y3UGtwebbz+o0XX5juhQ2fPu0+fu/Zdnu2d7nlpRJBAZunoiWwgLMVAxmzD7QZHuJxh7EmvBRbl9hev2qbDV3dy9eu270fx7iz/N5/CnYtjm9mOxNM3FQUABsKHCVbO5o2XdNnTTvcQkIZUFBGEAlMTjBwyFn7Pl+5eqXbnC6XS7ASVPvOSFlkvVnHWNffwy6mmMG8cxXtY5AeiqwjGRkhOisGIrPX4lMLtECqrA01xLuYFyvMoXoFixoxM2rgDzNY4cFJqjXQQeQKN/Y46J/kos0dIiIGYpfVIxuDoa8usYjqCJpAJFWI4pGDE4oamUUJmdXM2Ab0s3oJHMzhKCEE02EWwkNXWo/1wRZZ52Z+YXr0QbrlTm71iXKha6vRxWZOHJjIijFzKfrw4cPqV2BmtwrAZyKyYjHG4ycnqllYYpTtNrMwsyzPziVKG9N4Mum6DqYpivYqIqZG4BBYYnJnZl5vutoqLZeL6fRS04zPznVEqWRNraCHdR4KSNFJs/bUoV1jssxhf0ds38xKqzf+4H/5s/e/+QEtUthgfzbdNKvRaHy+XE6m6RNf/OUn/vjx2cdyc9qun12V03uLQzTSQorm4PACJ+IAZxqPuzE2LfKh5GK60M54htLDgk9amjR+yXtZ7WzuXo7Ldm/3Ubm61PTxZm+HTsa2SLwJqdlies6pb/cZu5dYxmi3dj5PpJdm4/XiwZ/93tFrN3df/PLypx/30nhWltCzRQP1CJYAL1BGMmYTymdnW6wj5FQA4A//4o9fu3Xz6pVXY2zRo49dbuXy5Ma7yzubw/GH+uzl9mgbO6TQedc2SSmPdmI6nAqpoE/oWnYu8xHyhx/Np9aOF4/G4gxqOIzRmm7B/SXIAXtvMcQ8D3EzETdESlxT/woEjbqiA4PLWmJsmYL2K3NlJytwK4BTscpetUBwDzwgBSrOryYaBQ69ZhE5OjxMTZu7ztwkJTgkhBRjNbIHiTW07NLhpQM/qLRXljCcUwOpI5kVkTSdTlULEepUmWjI5b3gvA7tbPX0Eep3ICbWUi7ifmmw+12UzIPXF4N4q8AuGFhD0oOTVUuTBLLBMH+xHkbwqsSkGqQKqtTKisoEX+RT/IwKwBfZUHSx5Q5D7esDTbP2CxwBMoNIBAjgUkKBfelXvzSbXPrjP/qjH7/37nKT/+bf/M39S0fbDoykoMCJhbwFxBHJg7u4p2oxcSdQqAGjgzuqohEHYyfI2dG4t6AW1IDHMMHexKa2eGkU8/mTV2n5ed4c2dO/xaukejXkvU13ulA5pfH1y3yWuvMz0QmnhtQAiWMdXzpLlheuLXTtmW2srgkdwOQoPMRgLIAcxnl5n9rDvSly22u3CMx9IAtkoUVgWE59n0eRm6C8HZflNNmorPawtrnRArKJ/bK3Z4Y1LDlnQkM04iyamvb0wx/Y/KeX9w6fdosxzRbtlMaEBp7cgiOAEJ2q1Ig5RLLBnsMsTlUUXX5OUe8y2zsfRfTb9TTqm5997caNa8bj40X3/vHx/HRdmHcOYNUAW9zcXBGWY+t7V2dOtb0MRL0Nl497iakJLfWcOfi4Tef98uTRg0+8+Ob4c19eHX/rynw1Lbz5xv88/qV/lMY37GRbAmthDlxypparxe2l67fv9HdbEd4aDImJiCBQG67Q+p5CbB89PmliD5hpBvqD/b2cu+VmzZWAMWTThmIGYmXnAivWW2lDmEyny3VGDNs+B5ZgcK1pBW5wCCcJpVgpZqbmwzHs5k4wNw5UdUUpyGCfACptkuqeykHFInOBiTOIihkxRSYzkyb1RdkqePZnZVEdmw8qLPPAXIpxYHKU0geiAEBIJJiqDebD4e60urUxN1NhqXvrOoTnQbcM4gGs4ReEoYvwQMAGj6PVTd3FteLPUX4EYjJCEDlfrgBvmqTFnlseOQTtFSAtPQepE7wqTDErHOXg8JCYHz9+FGMi5q7vRQRmItKkce4158whuedHjx6dr3LgNqVR161FpG3HEeJkDGFnKlT55/2ozTze0mhd2k062KGPMXM2zvPV7MpL0xuPxz574dK1oqLam+sLQWKSj9f3TnkxubmztZ704O1PHtyKz771n9LSual5zESITitxALDxuEu27XPX01po3XJexURsjCIjwkRkMtnFYuZ5r5xcjeujCJ4yeDalIIsDODep7EbK5WwjK8vtQmwJpmWXWsMPv3Pz0kH3xb/x7L1j3yttB17Hnq1RswzL5iWoU+DYuPUB6Lf6wf0ry8X68rVu0wHIdv/u/e3JfHHj5o29q3vKeevmDxazG5Nz2T4Zz6/RTMm5pdCwJ1uvl9PdIK1NtptYujF6IU1eDri/PtMH6yeXJsacYxAzzavV1XZ8tn60u067l0Yhjp85b7OWuKdTMTNkWGumgGsf+tFOuxf3Unc5NdnKZv7s8Xk3BwusBAmRpdua99a0MTURhBBCirEUY6odKZVSSjHAZ7Pd6XRCoOl4XKtwmNWzsHJ3fDADgzlU+XJl8VZoFMzNaiAmuVsp4BoPyhSYTY2HrsF8CCessMBabqJOsotbCOHiLLyAYlQyBy7q2QvHVCWq4+dVzgM0uAbyEqpD2odpNjMFCsWViZ1rc0s8pCnh58XV9jwyBWwD8YCfI0Dqkc+g4sPNzhw51A8OtxCESgnrlb319qf2di9/7Wtfu3fvwb//t9/4ta/+xgvXb+UMBPboSJRZY8PUEAlRIk9w8gvAj4EBqRMAAkCF3AF1YvLsFY9DLWEMnmCU+uko7yVt+0WU9ffzkx0+u6ynO7SZbumd8866JCXynMIL11s/6J4uuvNj1VX39CPVjMO92duvTq4fJNFz7wEzbOHmvDCwO7KhBXZTUkfhx+sXmrljxHklR3mJrtOsZdltzDKHVnXZe96A+yZMm5KoG5XlxBdTO+d1zE9186RLy9R27fLJKoyDr4DWaUwiATvWhu69//y1t37n9Wk3XWO5mE6xBFogoQYwYstuNRIKTEws8AJUhbwZqKrjf2ZVv0yPyvz0rWuHX/7F2zszm28++uHd1Y8fnJ5jyntHqLmrIGNWMgpuGQTyhXjfEDmgXgzBJRSzTBTcYbmUjBJNIFvN4yvXru/FzbO57o3iV//x2e/+3yfO7Ln7d/92+rm/n3ZurE/XSZCDEadQVCMv16v58d30hsw1C2OnbaxzJtdSI9xFSXnALXEK3G/7IJQkbbu8Wndu6qYcGI5AdQHpgBHBak6De3Im8Lrk3pX7crC3dzZfGNipygiNwWS81Z6ZWdgG1NVwR7G6BDZzDBAUIAQrhcF0wd4JQcCsVlg41IKamAIPyWpWF1JVxcRDCQuyarO3IdCXuUowKhEHIQwzYnWo9dBCTMakIJgFgNUribp3U5i4MwIu4Hx43vwOJsbnhqfh+xsZDVtw8xrd5mBHcSNmJ2NAUiJHMa2hFs+fSubGxDlnkaBW0ykUgRfni7ZtS9/XJ8nyfJH7XkhK1vF0rNrnnKc7O9r3q/WKWYLEWlN0OdfQMNXM3JhpSgw1Ng7ObIIC27p3vp1JV9IKzYZ35uWspZ3d8dx6ShKe+pPP/vZXHnz/3g/e+2HbNs14ZzKedOvN/PFTvoLp1fGaF1jKDk8Wm/PD/dUXPr394/8UlVuJAWzGXjlMHLmd9hNsRqYtWWOZ85J4hkD9DDZpUsjT7Xzmp1M/3QunUzsVnY98G85J11k3IW1Zk5qAxmUnuctJsRPGSORIu7x3/eXXfuGN9x7/9LH1zY3L8lDX0z5po0mphWemDFIhL54ppOa8X39g6zbFo3Xma58AMO8WyvLo9HRZurgIs8NJv03t8YMXJvGd6/np/nq+nB/FQx6jj15aQ9+nsu4Wxw3yRFx4E00b2LicHEhZt6td6LRfdFtLzCYto0tsn32xObg2+3iJEeHg0vSMpk/7MR822yfbPMrjfhyWQZu+zeMZ72DupitqJCba05kEZeoJhdhVt6odw1KSQcQjoYoq3Asz9zX0N7CZVQOWWkW0wxzVd8AD8LxerCjFqhJwWEle0HcGfpzUHXCREGBQ1TDEVKPKi+pStZQhPpAIdcgHUGDxC3/wIEb0gRTLxAM55CL/tTa7uKAAAT5UpwYrRj9DMdQngbkT2JhZiwXi2gDUXGGqYYsXlK4KeK2VPl04kOF0IQczghjTBfSazCvQzjlEgGBCZhxkvcH1my/9w3/833zta197+PD4G7//R1/6Vbz2xutr69FQCcYpuBglQzMwXwmDZckjDd+enYzICQ4qQCECcwJaoCW0hJHRiInPDkdy1HqD5Zg2b6Zyq1srd5+wvdP5073N+ION3es23M1s3YZTzNL18yePusWZqSZmWYS0iGVszXSpCT3cPLP3ZlwAmPVMU/D1OIqkfXdvufva+/1YSrd/NJPLqVMgNOvcb9V2p7P1evnd//y1cWrPDdwtkp5NudujNZ0Xmxs/ozAXX1A/V9fGljlNBA3KyllAYyBId3xn3J/OaLrGMkbtR4IWaBkJSO7JQk7g1tF7bck4VPD/RYgghjKpMjT25z/+lc+8/MVPHyae/+T+vXfvPl2scSNeLrEswM9GUxm3AExdTLRkTGkIv1uMrSgTi4hpD2IhBkIBzBTGEtlRtr3GOP7wJz8IO9KnsR0chr/198/v/e5enGVeL77+e3uf++3xrZvds1XsyRmUQpM1yOQsSUAzStCNZmwZBivC4gy1uiDhwFSsS4HHk/HZfLmzOwa61WYp5EG4qAbhUpRZzKz0SkBwCLFqcYAYk5ioUEjtqG1PMScWXCw+rR6lleZZTEIwL1VOoaWgFbeC59bDYkQcBpsxg+BsLAww5Z5RLBDCQK5wM4VJFO37Gg3p7sMEuEZEwAdMkDuBTA1MA5XZgzEbDHArjhCZ2YoGMDP3RY2HzLVGUumVf5ayBjNn0MATqrPmiykaoabb/f/I+tMmSZIjSxB8zCIqqqZ2uPkRHh5HRkRGJhKJRCIBJLJRqCoABVSjj6qu6e2eHdrZOZZoP+yH/VdL1EQ9O7RTVDu03V1TjUIdOBr3kcgbeURGRsbh4eFubm6HmqqoCPN8EFX3wKx/yHT3MDdVMxMRZn78+D0kO8Ok9awEJUGCsRkiMT2ZxuCcg0/aJBq6N4FYiRSc6FrMZG0IQUTyvLh+/fpysVguV433ObGIWjYiEtqQuCghBO+9tXk3s2SsCjNB+2JlNBqGYEPwDgUTJ+CLhVlYg4pX7/KayxVWY909Q124isfLydZuPgo/euOvR3FkLxeBJGBxWp8ww15x2Ip+uJqYS6ujdSO1Wxer43Zk9SufWf7ily4bWmIIWrYCJ7DMwVsLi7rgamCnI7E1ENQP8kEYViOcjuhsSqf7OtvHeipHZQ1ZBF7BVWRrD+8tQ0wIlsyQwhC7l7x3YRnLUFdbhbfr6fGvf3P5S9+alVwPLTbSos1t3rY+y1gtFJlI9DZjttVps65CKEod5jg5ArC/e1CH4MpByMmNB2YyjKxw4fb+C3eW79V7/t5Otd9igc3O9tiWaI7OJmOUU+tCGNmYWZdHZDq7MRm65YPF8aMpfles3pqk+UWe0OSgOZPdl197bv+KKfN9U55EMzFuHIuVmcpEZFfsymIOKjnOYphHhRIspCW2RTGCVMwCIGqwzrpiJBIYIQFOQfs9B44gtik6JhYCYhCbEBpNZZ90iC0YgDUcY+w6IB1/iXqSfnIa0CQ/yZTCPNKwPlHfM1NiItWE4qp2mHGaCJAood9GqbKl5DGcpvWJ0JuKobt6pwt0AVNz2mSAqiSguIOxe31M0S7sp+8ShytZICe9D3TaHmkykqlr/HYTFEwEten+mEmEOfE4yABGBGwsIluTg4yQ2TSyvXvp3/yb/+573/3HT+48/Icf/GCF+stf/WKNKI7YAo40h+YCBziQJbKEZDxIgIFCNWgXgEHqlQBphXMmh9QGLrkZSTMtZLvwRVVt0ebVuHgiiy+vinfXm/fnKGv55sr4hZlNnx20O81xhKWBL+OsQChCWGjThFmQvJWog/ECmQA6oFU3voEYoisN70ReCjHz0K/360OL4GxgaSrJ1IyGhTOuUFm9/PL1SXXrN7/+TT7aneLMixgNrBHJS09A4FAH8hLIWIJEIYEKNIKiOFscPjg8vH83v/68NiAitqxGNWHyIEIS+k2jryYNdqb2vPZcIkUvukaw//d/dvPFayO/fvjr37798eHZqJhMyks1Sy35lmspbvJiF3kRWrAIomUfGEaiwCpHy+wMiXWZjzXYImlDCSdNdhHN1Yn3V2+9VFC98Uf1cp5fu2n+9N+s/uf/dZTnKGn+3b/KX/n64KuvBhKmEBhckJj12lYutm3YbY2hBkHFQzhE9sSRodAQCuMqCFRiCCLBEGxmOSR3AlHDdWhKm0OEwGQYMaho1HarGG02dVvF/WvPHJ8dL5erT+eLzDnRwMQ2kQ+7sRkDYs5IRMfTLQItF6s8dxGC2A31GEFABBtDaKNnY1OTKwQAgawJUERYBUGE05bhoIhsVMBEImoMhyiiQiKOk4ECVAnWsGEVIUIXqCVyn1YLooR2Z2trsa5CjCaN54oQqAk+taUhYGYhIcsSIoEjhEUFwViLzr4t8Y/BygptVVnJORtCAHX0j3S4MJGo1t4DqTfcCVInuZJumDn1vJL8p6pKPHp8BKgrckBz51rvg7RgBAkhRmuthFAUhcsHrZfNpmFGNsh8Cw2xyIfj0VYIHIJYayEKD2TENbDRuIpUkBY42d5iaZmDlSi8a4ChU1vi9ne+8On8sKpqNxZrRoY0C5mCBeIn0WXuX/7hf/vXf/k/LR/OVxuzfQne6+Vn4rXHZw9ObJYJrBOGzTlaAbsQWNiYrMzMwDdGKYewm+bOiWvaUhYuLBhLlz2x9UbOPJYsS+HGoGKFMwMrGoy00oBiDKayEyqK2WhwyXk7/+nfXb31+XYyOm4X1jkuXLNovfeRoipBhINIUGYqc18OF6+88szhw9XsZEnUAlgsFoN8NNm7WuRYns3z3YPC2XCpuP7Sqwf3Z/fC4Xw829l/5bK75C7ZYRZXd+qRc8/d2Bv6GpkfqVosXXBXR5vffu8Xo9U95z4uwMWoqKs1ySGdHW1lxY//052tnYPJzksb75kixDMHVt8qCxQsIAZESGHAlhAN1AAmiGdI0qhPp4+ECAmSPLhSZNMuxnYqNAQldPTODiFSJZMy1C5ypswVTJyoT4mZpNzlrNQP2T9FXk7gj57jP+d9007loxfsAVI8TjrqHVAkCnRyC8kD7bxRk+6jD63MnO6ll9np55XO5xRSPUt9DpymaVWVktYr+lw+3VgvsUUdfySB0+fQWXJFTNMLmnBOTW1b1vS9AklrFpbVgqxvOS+nf/Zv/uvv/+M/fPjR2z/99Q9Xtv7at77KRYxQLq24gIIpF81BrLAkVtkomMWSGrAwx0TdATyJQKPQRmBtYMMUWSuroTQysI2GRZBVxVWs6r9fYbuKCKaqcBgwqJ3Zf84wQp1ZDm3gEALJjg+WFvNs5ct2FDfRDNlYv0MLm1T1JdbW2iggriuQZp5GNVbOthn8iGttJZw+zBtWW0peBC7r+u5/9199tWyO339wONm+8ulDDoRAZAgCTUmGLbOwEm3FEqtRZbIZRSNiKHMKptmqHli0lUST+M3omDKdJ5wi+QEnyav06Uoq1xX9/DcDRtm+enmD1ZM3f/Izf1bdLJ3m7VqCx2BFe6totpnYZN6By5xK2Mb4RRCrwsqO4HOgiRpIPZNNWkqdbFQLqoFAbavZnNeXpmPyWhQthU04mr5yrV5+e/Gfvj/NJpnL6rv/GMKd/OtfLa7fsCPOruuHv/kH95nybLzzeMEnNp/ZUUXjsFDbEFogKoQy6zI2tuYQJKxXmc1OFguJlc1IoQSD6K8dXDk6emLYhNZLCMrcRlSb5rnnduvDR6LtvQefClMbUZRlkDTClTjQhlRDjJZMElxV1vVqzUTGkKpaANZYphgkQqzNosRW1FobgzhXeO+To2eWOVWBIkDASIacMXGkiZOYViJ7sOldiJiSBhYbTlOAxKxACCHPyIfgk6ehgAGBrupKYpdWU+o3aU/HSEdYsl/sfddYEFht59+r0rWsgKjJzSlVDFFiAgBBJDGyMUlkzFoDaIyRiJK/U9sGY9ka66VNmvrBt6FtmYnYJGWAxWI5GBS7ly7Fto0Src1iDNbYzNq2TWau5Fvvm+TuwptqI8oMF4NfrZaqTDRgFmZlsdIGeFBDshbNlDNWi9Px1IkSgmiMolMMThbz3du3/uB//MMHv/z0zvsfensEHkAEUawxGKBo89+88QOIeL9uZz7egC2lrsrnP9Mcn6287pmiHhQjO2jWRjaiLQaR8k2DhiFitBZlszR+SE1ZSAFbZpjEplCfwUV1CmuZETy8j7Wn5UqolYHjgaJwPCwt3NDyPPqzanH75rMyzI/CYpJd5q3JMB+1LtSmLgZuFVYsxvos1PV4kj189MbDwwd7u5Mv/cGzdz4+fHgvApAwCFwfre5CYHYtzeLZ0dk3/uJbo6uDP3IvP14cbnJ/jx//ye0vNdLmODaoJuPRpVJ8qAcGeWiYuXCV47pefJrLI9ciVLKu1600EBmNyiLPqV4XBCGfwTsKGYJD4ORwZDT5sxCnqRXpi0AFlJmTqIZSL57MRMSS0N7OyT5Vn0JEXXOnU2TueEuJIa+4GINFciXRpK6T4iIl8Y0+rvVkwj7Id1UsUUdFZFBn5prCOElHv0qTUeiIzV2cTk7eiZ2YiplwwarpPc0STwqceGCdfnp3pfMb1+6Gzl9E1y7uvnr8Wi9C9PnjASTRzHQ+AOhNwC06Msp50qBJoFdAgEkhRZRFicCtoHD223/+rdFb4/de/9Vbb/+ozlff/Kffcjta8SYb5JqHdmCzPMCxQDRjUJRMkBMzIXbhJnXOuDExOBmDJcCJyzDQKLIsirBd8KJeFW5zTxZfL64uKx/X9fUNWd/MF+3Hwey3mawctx4CV3NpSx+LF17Apdvtp+50ttgaF66tvMuF6ogmEkOslq2XTttAXC5s14w1qctQj6Myrdv6qDk5bojbIN5z2N4zr+z/8z/+wt1/98aw2cnCig2rGhGBpVSZWGTFIINE5QhDnBEsyHa5lQBRYIxNNvVJ+p9ianhQ0uXW3lkrlS1d/qTK1KdJCiEC1O40y+9+7/VmVr18cP2DR6f5pWZYftrwpZJiwSSRFXYZyQ8LqRV1tIUVq+oUNp3wJnUGuFNBJdagsIiMCG6VNm3wVKttVB6tvJsUYx6O7cPBN74ow8H8f/nr6YDp9gThcP6T/2lz+bJev4a7c1vfoT/45lImvhiJuTyrbG2H1htshKMlD/EIdVw1a5MDEBgUxeDG1Ztvvvdba8kGihBrzMnxzLCZTLaWZ2cxStuGwSDPi+LjO59YlzFnIURYdpllJWIrEI1CzDFE6to4gdlKFGKyhru+jVIntiXoqYhCoqnNGkUyQDs/E05AkxCMwIJgWKBGqAPEwAEiJEjCdorOnIgNADaGexsJleTNABGQMhF1lQKZUIuSGLbdKtBOPyitAQGJihVW0eQmwxArUFZPYtikho4qIpL8XkeZigJns5iIYKZTFzKpgd31nyhNcIgIq4Qo1hqJEkIkazhJ76oOy2FdNwB2d/dA9Ojx41u3bjL48PAQkDwvSAGmEFolti4jMDRYQ8YWmc1FUDcVw7qiIA6iHt6y4VgLlkIWsCyZMFFk+2Qy5ihEVtisZDlkd/8xPfPyle2rq90f31g9JHix1rrMhrhwvj6+v5jd/aWtCkvm4ZOHl+xVaxbe+nzKn3tp9cb9HVhUtKIIOygiFyHKRqRiXkZ4dhApMqMulNE7bIwsHNeO2oE0cuz1cKOrJs69LBR+INH4YS7DYRY1BlBgqjSc1sw8HmWLUO3tlsX2DvNwa3LFtztmzpIBDm7OdRZoEqh2Vuxq/fCD3z2WcPnO8aYsZwc3tq8e7AD44P35yaIaTkufBTcoFlg+/9LtP/7qV47Q3P7Ms7u/O3hYHr9ff/Q1PF9a9WczambT7eu2qciGLDSOg5V2J5fV0aELi52R5coURSY+zNGCrW1dfbK8/czlS5Ph46bK1GVoMgSLkHEwmdNcuSFk6A6sNDkKKAzIQoN0uFKKKdzZenQ4aiqjEiEqRU/ukGTtxnhUARGTZQnBuxCBQaqIQei9udA1gEXPSUp9w1T7xBQX5MPzsHdupdBJJWj3DZFKGswnMhfiH1AgAb24aOqkUQPqst+u9qaOCaVPBdA+qvaOwr15YqI39d9zx4x8Ksh3FTt3ysNIbxYpcWdxhkTF7MjKSGNDYCajaogswZEycQ5mz4EL+7U/+cPR1dEbP//B+x/9qsqX3/mX/7S8OgrwOmA7DFQYsREWasEWLngrDUMkMwLTZpmSExUMVSNRw7IxsErsTagLUhv9TlFsZDVx3lf2H3UTZhtqWRrJRc2i5f3PlcVVOw8LDZKhPq14rZStfRUGXLx4aXbow0eLK6NiRGeCdS2RFeTALXESX4IyBuTKkJfzkTqHxtHGtovBwTQ+s1PTIAA+mLz1bn148/L1L71w5Xf3P9xxLF7ABAMCrLVixUOYlEXhYC0jY2GBEQHAVsDWuaphJZvg3s42rjMYkD5LOv8tABFVSwo2yaZQqZtGsb/+h7v3fnpvOtq58+BUjEr0bsfyZDHI68I4L454ADEblOsiZwcMmAtSx2mbScvMTMkCBwCECRIYUdGStmRqCh6m4iOg2Jo24k8l7Lnl0K8mr77QjraO/+o/5o8Oi50rVIwiKnnyO4T58NXn7u09f3zmJreeP15NquWgpQFVgT1RowYZxSBerCHvK6PirKvq6u0P3iuL3CDW8BlMExtrEBp/VNXFqIwqltkSWeaDy/tPTmaZzSjx00RgbMKUQgwSxFqrCo0xs5YpcZRZUpcnLW9KA76ROtdQsdZGkVaDtdb7JoW+INFaQ0SIIoaCNZaAqCCElBmpiEoH8RL60qCTnU1d4RTtUoYbJIiKpTSFLVAYTv2wjoGZdCY5wVMdNpcmO1I7ol8qlknBUQyYU0eVEmdekjImSCVEL0hjGFEkscfS5D9zooJJG2MInGVpvKrT4+1Y2CpMcC73TSMixnC9qcA8Ho2bTb2pa2sNgXzjFWrYjEbD9aYhgmX2TXCu2NnZca4UobqJoZUo3LbBGi1Kp2Lb4GMtXDMVRtcKFljyPDrKZUMQyIjNac1vPgjf+eza7sRn//nzb/7g8NZn972Cvd+eV3ZW3xitZqPZwzfv3H7hljyo3nn9o1e+ecWKb2t7cFsP+ehwdckNorgs5H4jU1uOqHVt4ECZjxrq4ArWzGeymoTquVGU44dOjnR5YmeOWo3BkZ1ax+wpBEITCaH1ZKIhERRkLHTux3Zxto57u/Hg5j5XW6c2m9UmMsFCEITBFrYYNpUvLH7+s59DbQhxVYXF0h7NF9dubgMY7V16XD2WnHlYSG43m+MvvPqyLxCC+ByvXn3hPh0f5bMni0+/fHnv4XK+k/NkKC6uDVpngtUqUxlSU82faLVwFhqtbPyl6XRHykVTr5pQV/X+dYtYFcWWayRDNAgWwSDCEhiSUnx0xF/qmmOmg3Y09SuS72zHuu8MfbUzmUZnqclpQYuqiHS/TcLsne7EhQwkAQl87krlhAJ1xa10pQnw1OAQ+nKbLn7Rx/7ziQLpBvSoj5sseIo30Udx6iedUpWTInAHTctTD+32d3KI1d+7jdRdwvl4MYF7ZL0Prt0N9nXwU7/AeSpxPjxIZIiQzNoVpnsIGSUmslBLnBnKiDItVMoorKLtF772hemN0a9++Df3H77zH38Sv/Vnf3rphe1GIpcoijoTIUS2oWhqazSz0WoIYN9pH7tWRcBNVjRZ6XPLXkysmVCE2krYntgniFn0NA9FzRC32SxplVWLDUt2dXLFVWG53rBhE7WYHoTpVvDGog7+3vZ68dmD0xHF904uh2pURIOW2MY2iCXDymBVhjqVMWgIZxv2oEC+dr5wsK5wSwxiyLcKrvdogbD4869/6f7/699TyBvKvGbqgBJUgoZMG0FgBGgBnwWTW80FAzFju0E92rt+4/mX3q4RKVPfqYOgFURFgIYkoR9Tj6X/fJSAKELUf8B948H++i9/PRlPZLWA2stb+650R9UZJiVPV8Xl05JylqJlM0PJowHmLCaQA2VIY09oWWFUW0MMBKgm2SVEiCeqlYMtxEm1aaejKvrgJg3LsatcxlQvixd2n/l//t/mP/3Vyds/zERslolrcLB1WOycxmGTbb35YPXET5pyElcaa+VgoFyvGmrIELy0rBI5SqgziLGmCTVzZHCMMcsyZinHk2IwmC3OmNmymZ2cee+rasVGI/y4HBXlYHZ8yizM1IbIzKQoy3J2OnOZIyRXAuklLy6Gd0m6iQIDigJllZiiJFRiT6igZMlniElgY1J5RnJco2Q2L2BoVBWCqDoYBgtgrCGAGVB2eR7aNkShpN1BEO2H/1SEYNPB1OXdSOUFG5uybwZCSo21M4ixAmaOhNgLWjIxx5gq73T4JGeW5MFqmUOIzMSg9I2xmTWIIQCIMTGmIhuTjFvQSQqgHAzm8zMRGQyKxXKZ9OjX60okZM6VZdm2oa7rQTkYjUZgbtsY2zZzdjqdlGUuAmYe2RwwolkIVpFlmYutzaxDJmA2MGkcRILIxgc7qfPJcTBuuPfG3cMP/O6Xam6LMA7Z2/OH0+3pcMtPcX/42JX2GpvlZLSzN7jxwc9++9nJzvHdh8efWV597tLipKrr+MVXlut3dU1X3aD1xrZSLGptqFDkIRphw0pVs94B53LmeLNane2XboxBHi5pkYW6NWqoanWFdkPWt1BiMwza0JKwqQFVCI1ysC1y88nH9z9/80WHiZGGrKohgrjcBQ7Epmk3g+Hg3gef3rlzZ3/voFovQG3INnt7l+8f3wNAJrv90mdQspRy++Xnayx3rl+qRLnkAL1149r44aQtj948/OU3rnwrro5Rn+1kWpDnuCngGd4ZodAMC+9w5vhyPnRR+eP7DwuQNQk8zUMwHDajUC1oYCE5wgbCqbPFJKyctBoSO6BTOMW5d0hXmApAyslDWiIoOXASOvm4RIJi6pk2Ca5OTKfzhu5FgOq+NM3QUTrkVIiIic/bv/1h2A3aPfV3CXrqilLtTX77OJe6Nz0CrX0uQN0TqXaMrb7o7ZJoUaEUCZFk5npcW5FkCNNPqX1IRPp0uNaLG+jQazx1e/3r6E0eUk0AJWG2KqyqRCYhz9DzuV1WYSIWZVJDsJIJLJAbzqElbdDceuXZwfV/++aP/vHO/MO//uniWzv/7PZL11xdDXBm0RqEQQwmDyb4IrQZWs8W1sJaL4ZhWnALX2ld5UNPAycYUMhq70y9VYrjzdC0HlwtvJ5ynKuTcOMzr4y4rE7qJvhKWxudWXPMajEmkh4vMJ1t75TNyWG4emNdXH3nnV9fWx1fGrpRkEzsJiLLEkzBkBLwKjVAgCfyJCEEJ2Ounpm8EdbF/fiyK8N2qBv/eLK39Rff+PL/+t0fsdtay2BjitHES63waiIrAxmQk2Yas4ASPGHaYrc3yvdubbK9ZVVuzJBawCt50siJspBIeeg+laQrSMnTNgq4pxyoJnUZ2Ndeff727o29fKfexJZjHKovY100c2xOaDYLruAtT6OxlXXr46CkArCAU1iBJQUlFxAhJY5EgSgqWrSOAySQ1tHPW8PRTrbqfDTLhDWUYVogK4c2w2pQ8s5ffG38rz7vP/14Pbs3vfHMcWUePtRlfnCyGZxgry4mZyGnhdja6FriSoZ2SJn4dgUOIMqiRoLXNrbecsfiYAOCGhAZni8WSVAnSHSlY8sb76NKZvNVtV77TVS1rRCxy2zwntiID0WWi0hAPynEbIkI1E/qQ0Mk0/mdKnGP30JEjOVuZqEzC4fNbGhDJEn2LwgCUGuSzLkyGAQxSWKSnTEb7wlgIsud3VzHlQZT+gsRIosup+7EBDpMujtf0jFEfWUBYmhIHTkWgqhQEscSFVUYUG5iI2zAzCGEiGTcAWYTQstsGHDOTSaDENrVai0SkyReURST8SjL3PHxMUSyLCsyJyK+9YvFcjKZFEVRlqWI+KZuGi8qLi+n06nLnap67xHVOjfJMiglFrq1TjRK95opSZpnLgOYSDiDowIpo2iBGrCQSpDlozx6DvXurYd+8f2jKnfFXd+WhV/b2F5/ceX2ZH1/YqbGRZSNaUdO7YHLq5PZ+3fe//Lnbv3d//b6lZeOv/FffXZ+VgvLy1+hn3/cyKhYt7bFcIFyhcLbQRDDPkoT3cRlwTMVXiUgsNRGFk0dpaoyn4UAYwvDXoL6DbNqLpXL1Y/VuowcIktsTNbCUTidSWk5ybaJUNTWEUmMwpllzazhiA8+fDt4f+/uPVeUTTDj6db29k6jFYA//MafulERCrFj8lmlXObF0Fs4mE2sd/LRZ92NN3H6iGeHy3mzWRWjUWEGQWvHViRY60JTZ1t87/6jVvLZ8liWtkRxaXvn63/25z/51U+P33m7HAxGk8wU5QmsFxPYBrat2tjJLSXTD6G+6OzqMxKQGHCKfCqsFLuqDpqSTPRBkQkKJkLsB4nSs/Zht6MBAudRj4DO5KgPXheArZ6juoQuOl9EXj1vwKIPb2n7iChpL64BSZym86L16ZCf2tL9idBBV+mRiT8l/awU47zNnShg51VQSibOMXDt9OUJlNC2zoPt4i3tEG0knVh0o8iaKLiJzE2aUAZKMZgVtpN0hgWswlAOzZQcwYgUQoXh0s21ufT8wWs3/qz48XfvH3/4jz/4yzJ89auff8a0K+H1FtT6YKQm04yKOGBpa65XMfoWEtYqhbroHUsGcVYKb4fZaDEeiW08Gyl1LQtCLWEVZB2uXrm5v3Vz9vHh0Z1ju/tidcXWfqRYkbFFRblF5lSrYnYPz9w4m9J8Psum0+yPDh795F47+/TqbpltMmeQuDUEJZTMjWqtRGRq4rYxjcUAbVavqugLXpl6hwuay2TYrBaPv/O1z7/39m8/fDCLxSRm+5jcZ0+AESPCoFyNI3YcnPJIZQK7bR9t+MvPvrKQUcWjhkpsCI2qJwpQZY4AWPn8UwIgBE0DSGwtIIkYlAT2AdhvfuZLjx9Udz6d2xaTLXYmd0VRRppsZ9f3JjT3x6gKqvPQFNyuClDGsCpWYAlMKobYAFY1QIhMACJR1AB4oCZaEXJEhoz0eDAcGc2sOVYEt5jIAHwycMvSttGN+MofZPiGjviNv/q+HnzmUz+eY7jkyTJu8QKdTEmT+WW7XU4qXVa+4kwYUSyFyk/LQt3g5PSoSJpXYEgUcFVVCliTSZRBUTCjrusvfeGL773/u9jKoMibULvcSRQQB5EAZUJmyDiLGCVqlGCtYeK6aZiYjdHObKKjXEpa6oqISETGWO9rCeJcHqVDfyUGhWiEIQYkQBhko4ZON4C4I49q0CAxZsZYY5jIWhOlk98zhkNoTTfH1FlMMDGfz+mrdN7jyf9JAUqDvcJpYJJZRVJkTgRsZRZWhRqFFagBCG1oVbQjkogoZFyW0+l0OBpZaw0b79vF4my9WivTsBwOysF4OIwxEsMQl8PSuUJFGt+I6GBQ9D1j6GiE1GQGGWvS+VYWg3RQGgMi5ow1qSiogmE6I/XUzYtAFKlJIWBuXJJchSorE4gQfZQwtDuT7Lu/vP/AHpS+vR8OJrQZufzh/GR2Essr5Vmw+eCMeWxzcCm5G1SjzUwXvzi6y9v20YPV3/+nt77yr17SLewO6s+4419/+nw1mGxC7nlUBxds2UpZSu5taHNsbL0lfkSY2Ayeox1kVq1TYcmN+HpOXnQTrm9tH4f1xrIdOCs1WqEIsHIm0SArsrtH1YcfHY9uXT2uEEA2YSEiJCpRndXDT47eeuPN69cnqhKjsaXbvXo5hEG+2QYw3d0V67Xkx8vDd+78Znp567VvfqXZNFpahtl4vDq9/c7snU0hv53d245k83GwTkPLMGQ1+ObSVvnO797/3j/+ZMtNA29s2VTz0yXMb99/44+/+Y2r1/Z+/NO/X/gl25FvysC5p8zDtci8ZvCKmtAqWtVACNBWKSSUODJiGrNNykmkna88VHqH0PMoFlP/hElSL1RwHlLPS1fqg2pPqdIkFJGKDT2PSxBJIFIfap8ufqlvBxNRp8/bRdpzolRXimuCmhMvq7uu9nMJCatOTeiLoT8wqKd/dbB8V+KmOQgi7e/0HDm+uKgmpF6SIG26WX06fei6VBbpNYL6CS7COacyIbNgJSYyqgxygGW2YIMM7BgFuKSYCRyCk7zIVrwproxe/u//9eQnfzt74/u//Mf/MKk++/XXnisEqDeubEc2IDRy/1F874367iexWigMq90aXG22JoEHlG3T9May3KsX95Z3fsvh4eTG9PKkeLx9y9RrsstinF29fKuOxft3Kj+XwtO+hipUjbEDsswIRcwyO1Cg5PnKfXJ/evtmKEYnZoYtlj+8+eSXq/bk9EZeuC5NSq83QroRaEFjtInRG7sx3g0el5/XUIKz6ThzrfEnTb5rbKxeuLn/6MHDiHwuI+OK4VZNqoZgGFqQOMRMTGExCHYv80W+WBejq59bymipZSUFGmij1IoGRoBEsAqoW7JAIEpm0toj0hetB7Ksovb//e9++ujR3HrsFKPaYefG9PoL0yu3d0yWBdc+d2nUrjeLsBprvaRmYYQyUavKRBmRY0l8YzIqHX+QKEBblUy8RQ1pwOt2ur8zn6srZDHeM+qMjZWWLVW1HYywKaWpkYUziMn8Mn56Mpi88vWjWTizk3nYb2feLliWgSv2Zy1He//jh3bld3Yns+UTgiAGJgSNCMGkApVgwGTQhmAS4MVElmPlq2W9s7X93rvvkiED8Zs6kgyLYjgpPr1/vyxLA9re2vJN03pl4iCtMUYkGSawqEi4mPATkTLPBSoxmqQDS+yb5tr1a7FtHzw4NFlmjW1jW3tvmIXVMIe2ZbBCo6phTvaunGwrmXp3zhQ+NSYFMhECdTQQNgJhNiRpU3KIwiSSyNbcgWgGJs1vCCNKsKA039BNA1NizEPalhTWmlb8JoSyLI21jfdFUeTOZda6PLeZnZbjzDkwYohgKgqXu0uX9i7F/mYTT213Z4c5caQlQjOXJ8QuHUyhDWytAmA2KQdUTX1wEHeWTSpBArNlJpHYi/EFQmqj+ERtgwbVWhru5eG74wyREEwR6fCtJ2/91o6umvUqfHT34Is71bEJYTg4WdMNmUh96t1wM2ADB244+GJrSBNZ53WxPfChPV6E7//d69/4H1/whd1/wZZV8+Q0q7KilqLJtk6l3EjJK5FKeIcdG4JlcWBWZwji4OHZhyCeLbtyWFQxHNWxZbYxSFW3Q0tG2DgyrNSiJSnz1hbvf3D40nOfa2NgNgSV1oCD0SiqNrPvvPvOq6++9vjxb/I8A1t1enYyX1cLkwcAP/zhd82g2L22P9xna+vb1z5voDHaTEU0q5pmd7JzcLb3GGe/C2cvVeHG3v7GZeRzQ1LHdrucfPTozl/9p++Ni8vi/GLdjKy9fPvS6WL5zns//92d1//0O9/6r//7f6vltQWVldqG8kZtrc4ja9WxVwRCoyokrcBDvXKg1CVTFUAI0uWFib9PIDYaleiiW4ZzAnNXUz5FWlJIon0mHI8g2kuppsGlKMSkSMizgIgspRn3897sRQzjTi8yHZAX8zzUDSN1ZyVTl+AmqeceKe5EKbtWsnTQ91P3mhBJ7dMK7p+e9WL2qa/BLyIqIekmabekcTGo1L8/KeUgTo5SQEKb0zMxsyiA3+sBpKoXysSZiDGUq1Mkz6IMUgCjTIuYjTSOmYdOMt2e8NU/+4N5efr4F39350dvjNcvfPsb35xOfVjN8et3wnu/ze+/W2Kzz5mxI51cCtOJ7Jbh4OrZ+ODM7XhMNzQq2nwzmUze+rv6nd9cHcWvOPNmkZnRJSfu6OFsdRTGmHJQsO5sHrrTwm8/wzQwUudcmsiWfcvB5e7R8TamcvPq0s6aJ4/i1Xzw7RdO3n7Adz+5WtOoKEz3eRnlDcMKvDEtG5/bJnOSicRYD7gScWu7U8h6ZLPML9UW/rWXnvn5j38UxC58ubTb2eg0M7WysGMdAE4zx8FGO4JO9bAZrd2Wd5eOm7G3E1qDW1ZP0ioLpdFX0QCN0KAa0lGsqqklrCrGJJUVEXTjxXZ9fzkyhgLiMRfbmD9cz6v6o4fHN1+cXH1130xWJsBBHc8LKdkFsRRJyDIskiINM5NS0rsMITD7zBYaavYjbECOaeAW9+e8PT2pN7WoXN/K0HqpxK6GMtzwxsG34MCs5ej07r2zuKXjm/eOn0jY4bPIZ8A6mDMjC+WG5Sxww2xhHbOKUjAa1aBuvaw3uXWRvE2CE2L29/ZOZ7NBMSoKd+Tvu3IgvW7zrZs3T05OFssVFKtqlTEOLu2t11VellVVi4rJjPfemiQYS6KIIbI13aYhSeln07TOOZGQjCmUAOWz0wUz2cwaw6LiTNYidBKPMRBINCqRMkQCE2fOgqiNkbsvSp3aEKK1YGYNGiSAKEqgbow7SXUAFI2lwrrMOSZUm421xtqsLAchhPWqEhF2hQIhBAnBZTbPs5TGV9Vmd3fqnKvrxhjO86IocmsMG5tZa2zykmARFZWYfJfQdzk4be6umE4SAaIIMUJVRdPcU/JYMgllZE6yPkzUdbYSnEig5APRaw2pJG92FohoS2BFm05s0aQKLpnmKptQ5xwLgbgWGo0ECa1MLf3dT2at7pSP4kCzd/9Ls1VMbn9pcfsz1zaL49MgY5lOUFVDY5elzde+cKObud7hbJgdXH1uFU5Xs8dt4Nd/evjSv/78fD7YvzV4czZe87TC0OsgchFrQctsDYMCR0RuKXgfMkSnXgLECrGBNcKyXK9l2aKJNCDkxg4LGTrYCCZrOJDXLNIm5JPy13cWt+fiipKrIFBD0QtzVGez04dPbt/8TD0/+9mPl5PJ3qo+o8LmpQ+oKbcALo/2iwF/dOcdPtJ/+W//7Oqzz63XMXMSQRSjDaYBvjr4/P93fed+eVaY+oXiWiO54VCTVx2vwuLf/fu/QiBY2tva39rbfvTJu5ivKSt4a+B9/A/f/cFf/Df/za1bL95fcG3KDZWNDBq4mgaiVr3CK7WEjZAHWlAkBKTiVFRMZ2uQiBNJ3CkSdd3VxI1IzVFOlkl9b7afhe3kNWI//EqSPPFIkzEQlNh0ShapaauqdK7y2sVWIFGTO0C8a6me1yaAcFKE7GU0pIuSyr3UAvpBASSaxzkyfG6MSAAllIvO4WJoz45NJXl/V4kBrcpE1MnqdQPRXf+qv7Ne4gNIBzuCKkSlG0FRgFgCLkI9oNQpVWlUMEtQIhM5wqILwDk4Yy1VS0IJjBRDpTLkfjHlxc1vv7Tcmi3/y39ufvP9+fKTvUnOb/06W306YF/aIfMUW0PensrO/mIysXtX12WWx2azOBJZFVJ6DOnWZfPZ/2v8xdi/97c3WopN/Yto6lVFS88alosF6jCxrjBcrB/XsJvhXnRObASZCqXCUwa4bFZfLj6VO68LrfBPXzh0bvnqlZO9fP7OhwfHT7ZcsWdK0SZDHqU2UqmNrm2mHKIVFu9qrOLRHKGeuUf1tddKd5Mt8QZ333hjr7SrdkHOneLSWsajYrF1bVWEBWoyA9NqyIc7y4DHYfsE+8v80lF29Ui213VBx9C58ppQKdaqFTgwm14kXNJSFyJNCSYJogQk9VDtPnsLmxmP2LrGYhe6Nxqu87Cqw7jE/sgd+QUDhWmobcGGVSInAyRRhXXWuAwStOMAe4KqNDFUzCXIa1PoSltTG6HBcBQMeN7WdjO7vi+uDvBjWp3JxiK2gkiudPbt3/2S9r64yZ+RurWV1POQLQ1vmCpCzWEZuUFhbAz+6PGJzYwiKomI3Lxx/Wx+fHY2A3UMSJV4MpuphMzZ4bAwllfLJTN53wwH5b1790TUGCOqEsLpfLa7t9d4vzWdzubzIndVVaWR/TRTzwQkkffUTlKE1mdZRtC29akFy2yiwjm7WJwx83A0Wq1WWZapwrIJMY3LEQA2HKMQU2ZN27Y7O5dWVaW+UekKVQV88Hs7O9Pp1FizqerT01nj23JQOOuIYK0bDIrkv81MZTFgNkQIIbBhZs6sDSK+8RLFGFYiiTFKNETGWmstgRaLhSvy0XAkKjFI5jJSJUpeNCqiQaSbozpXJ6B+vrjf4+gmJrveFCMRYbXT6e064yCAu+qkI76KaKeMyt28J3WdEXSmsN0fp2upgoE2HZksoiBhhgpiMMuiySQLrBtM1L1358k7Hw3GOyYsqjBxtjQ//Mer7z90r31BVvfvvnJr4qZ+Vk3KYoORtYt9njzY+1rl3nINNrJrD8ZX7x7nq6Plg1M9kO0zze1g24yGVVWu3WCDUkKR1ZCGWtbMsQWbzm3DIDBgPQsEHFT9igIzG1sM4IwWjFJggVAJM2JofAAciMyAC+dauA/vH40+e1uCgXooOZN5aSCtirv78Vtv/uZHW9NRDLEc5cKxHDJlu2u/BFD7anm0+uoff/Xe8b0UJ9omGGM0KFogo1C3V/avXP7p1NZSbzKeWoitQnFWrZ85mP7t3/zdSUO7gyurOHtwuKFnxs+/9PUnH78xWy2sc6PJ1sTlf/lXf/0XxY3iyis+lrW6GkWDwmsmjcADHtooC2urFFUSR5Q9NABROthZO1E3SBdHzoHY8z5rz2Ppv9eL2k8VpFHT1mQg6bInravYs5qSxrJ2API5nQnSi16dX6K7Cj2FHWuK5udjS0/dSQrY6ZfSxXHqhOK6GCldu7kTS74I7p2CVcLUn5LRSjU5JZXYjuHVBXIVRde86R5+jpmnP2BikOl0tRJ8zhBRJjBbgAUGSP48TpARW8Aig1qljFIA1jwiIyoQR4Qx2aKdYDXRxSgcb8v8mT+4FY4vX/31b93vPjLRE4n1hYQcLfNkzNsv1vsHi8FQyuFsHjdHZ4Pi9LPD6WnJYWSNKxbzw8cPV+vj01XrG2yuw9+LdGjVZjE6mJylsaLEjtCur6w+cc1RCxtMvrGDZliEzIkrgxdZy537U5k7u1yfyUz9QFbFaCxffe3hw+P13Q9XZw+H42JYBIKnUNVhtgyh8WGCQqWIWVkU5V7t27a6I9UCAwO7QZXNH5362fFkOmHTNlLPzfRUd49RFbwuylo480Joh2vhM9peFVfzg+er8QvzkxHPgLnKqciSeE2ogRoaauG06EOi8KdsLzVGDLMiKS11xQcA601wzrHywXRUk3+4qQdTlxX4/EtXZIe8l+Cdh1Eug3CImnPBjiWLtTTihcHOsbXDanMmkZmVWUXqpE2IQKhzWGSDsc5hW8nzzDBVcVONqRyXw2ExNFJoCCow5sHKPPp4vv/Vrx9+uGgeeojNWjfSAQdanC2xJt5AawRek9ZsVKQGB+IAkk8/vctGhDoxdhIloCwH061pDLK1NXnm+rXXX/9tOShf/dKrm01VVRUzny0Xi8VqNBptb08ePHjonHtyfGwsL5aLy/sHm7parlbGmhiixMhkbN9vJeZMjUgUoHMOBjbSdgLUho2xm0116dKeKk5npyLSzSpACRTaAEJs1UOZ+fDw0Lk8SnAuZ6D1rbH28qVLly5dci4T0dzlo2EpSkXhzkcX2CStPk3K9SBWiMnsebhjorIsRdV0WuApaddzCfnpzjYThxgAMpZDSEivJHerjniaTiJJ/M2kS9sfLdyfLz2BBaCnuJnaHU7UJS3d8HmPMHZAe3+0dsck9RVKgikVSSaJYZ5qzRGpBSUDOMNoNQqTxbKQPKjEn/58kRcTzNa8ZXgl5FAMFif3xn+7GO2Ppv/z9979o1vll19A4CfLyXE5CjNX7Y8uZ597tHh7c9yGcvR8GbYeykSKRQyF3R59eJgdhnFdjJcyWnEZa3ANWhPGlMFaMoBjNcQ2WgfkhQ0YaCwBDJQBiIZAVdDWax3VqnWZyQPnlsthm7VtBh8sMm+Ge+/dPfvjLxgCacsE07Yti3HOns7mv/nNr29de+7Jk8dt3DiTZ5ldLRpXwuYMwNm8LOyjhw/vP374dfdHEsAwLExIvXxSrzm5S3qzffvTsLr3OuTmn11tRLb3rn56/8Fxtfryv/ianLrFB+/OVw9X92aL0dkXX/naZLZ8573fLs/a0WhQ7t4IdlKb0SoUGxQ1FR7OB4saKQAjgAMnQQyGKkciKEmqYdN4W7dQNM2qQhH7BSRMvXPAeUVK3fLTPrHuPOig52CwUleScifoik7JFSZpn59ni+fL9KlfqPaDuOmnhP/q/3FqqQufneABuCPbUFfSn8/m9s+TtKH7H7qq/ZxtRpSGD89fJ8OCE3LUezp0u6JjPWu3NTpWdZra4iR9pyrnU7/KTFBIFGbDxMpGhSSNC8BRllSLRXOCg+aqBUkuxrEM1GRaohphPck2o80qvP5bevdn1x+8NRk29QZ5cFqrrTJFK37oHx/j/s/rL7zmbz2/erwoIJemWT4tJ5dkh09md+8cv/NQDufj1VKQi5UVyY7yFnAoDBZoTEJ6zpiCM1Ej0nKIBUAeIyvcENhwY5ugsTZYIDvFahlOPsoW7f7JfTPdoUvXNtuXm92v3jn6ZHH2ZpDDYlRMEerVop7VXiCT0ajgAtNiePXq48bv3LC2NdgYcowKtw+ef+vtd+Fhs1XOLsNgpaMndK2g2mkTNBPm4LnhbGN3H63c5y+/UrXbOG5pxbqArAQrwZpRMTw6EjZ5IJAGIgViSi8ZiOlDYlCaX1FVVSsFEEBZeLiYlcNy75msKoKbFrO6qRdNXRbldG+vvCn2GuRyIZfYZK7M6qI5Wh75lS8nxXg8Kktu2uHZ2dHpaSWxZipEGrCBZlwbttZWIx8CtxGaUw32oCr6lQ0jrcuSS0dVNdwdf/KDH1ZzmxW35f5sDzu7xdQVrhiVgdvJavnoySOtUObGZbGqgjNinfW+8aEucp6Mtg+PHhSFY1hh2ZlsFcVgOCrLsmx9Oyqz+XwoMRjLReHywm1Np1BkuauqjW99llnn7MHBwbAcqcTFYgGQBDFsrLXj0bBwxenpvGnqFFyvXrta5sVsPvcxWGOoN/Xd1BUpxuORy4u8yAeDQWjDoCgODx+FEJJFqHPZ1nCSQnGROwKrRO/9aLw3mUxC2wbfsrXTrQkA74M1zEQuL5IrQzeAAEiQjmaVsirWXptPmahTsEplAIE6cdxkU9opb4DpPNYmP5fzxhcTkqZLd1R1Gx1dE6wPrin6prMqCQppX6b0oRfo5OTTGRtFFZ2/eFcA4PzSSufxnBJVn6SDujvr0Zi+JxJAjGmjQCQDh0x548Nunv/6rQcP52ZvIl5YnGaKekVqCzIBQU8rG8ON/+2nj998L/zpNy49e3nZmNnmzIVNdfXLX3zn7Xd9u+vcfuOm1s3uxD/9yx+d7B/Ydx5cfyJXKhouMakrSxVoBV7DOaYay3JYUL7Q0mIyoM0Z+enejDwoSpAadQBHzlUtcck0sDxgOI5FiKW0ruEhKh0tdXi8LlZcHh7h+ZMVy0SjDU1Sfo0a9PTkoWE8fvwYCHk+aivfqi+nvLtXZIMSwGyxYI/j0+PR7tbu6PJy0RTjPDZRjCTGrIpqiV2Tvf7wzo298p13P/4nX/vK9tbOk+Pjv//pD1/+Z9eZfFhUIi9UH5JsHsx8+4Nf/u7Z25958bXv3L13d//G9e/803++0q0nbelNucGkxmijI25Ya9Va4YEGoYnwCg+0AIJqgAo0gGOqS0n1ov5ELxfZEVdiD7B0WWYXKymtjc54l8CahK+I+vpP+aIF2zGf+gq7t9JFt6ihuGgz98GYemIXdd93JLGnYjbQDwc/TUhOUpOMi0eDunH6foYYvT5Dt7lU4oWVQt8Qlj7QEy72XfIk7RVLOh53F7LJdj9o2sUp2TUgBpjUqhp0dGhLnCUtDhiFVbVgp3CMAlqoDGGHoAFGWE9kNXFVUT1++Lf/vxv+YWYX5bbwY7NTq19pvTHrgHE9sW3Nnuvjs8GjH5WvSPnSLbc/GVjb3lv4n9+Jh29M/H3mCfO0yXdOaOjV1j5w1u6QMGxgdlbFijqG07qIzkOFoojNRI2F4eACA5bNIFPxgVbiN7Vv8fijqUy33Lhdb+Tsva3iAbb2xjsHYf9fPa4ePzp+b3HyIQc/WkaJiqpuqlDxGc8fHO5+2Vzd2zeLy5EFbFDKWz97j6tMbXCTMKA8E1ewGfDwBPtVyIIaUVabN0INbc98/OJmSw5BM9CKMY9myVqpVqCNAk0KvdAABEVQBCBCIzRq0mKgjgFwfsra6V5RV8Rz/eM/unFcrw5DbQdWCvvdN95/+atXbrzweeKpYG/EOyPsbdbbnAsInJmDSwewyDdszAZoTFYWxZ4xYXZ8FKMHQWIEKdS75gALh6wpkCEwNTCtsfMsjpAVJnK1odl498qjN+4u/uPfT595XlfT1evvXrt5c9fucM3iyfrMuYwmFMp6b3dgTVNVmbWRDVXrs/nixNeVxObKlcvj4SCzGRkq8gJA1Lipa0MsqkkxTkWjSIdpgsajye3btyXG0PprV68PiqIsByI6Go1DCNPplojkeW6tFZGyHIDIOgfowOWZsWVZ9kK0BFAbQpSQWWuM7TqgoomtVAyK1XI53dparVeTrWmR5z60RJRlmcQES6NruIZIoCjSTz9RGoQUiX2ZSKIRCmNtMjmmvhJN3C7TuZZ2v+rHNTQRMxOBRYLazIokuQwOUZKWSB+eU6yN/VYnSZpCnYRRjw1KNynCRD0Z9GkySXeMdDTPblqj43um9wdQgIn/D/IGROgTfpBq0rtLCp6UGA1BhBkSQ+/05KpQTUtezhc/f6Mud7Y3VZs5K7U0KuogC7XSMrJ66kI53M2nh+vwv/zH5bP7cbRb/+Erw5it22s7n/mL4Z13lncOq3ffese5nfq50ay6/ub7uRkXZNzZZqfRQXamdslxDamh0YYG0nDjppV6J2GFdR6WObgoV1RHB6OOdJxhDamltVFsi4KynLTMsSU8hbh8g8mK8w3G3o6eVHa24h1HVRtYKW7IqGigex8/1hDERsBU69Xevnv+czvBbM6W67xwAHYdrys5Odq8+gevkgpa+HXggigjJSEyzCaucOuZF378o++tqfBif/LLt/7iz77zdz/78aXXLs03RkK2NQlyxduTy0frZS2+sPT6h4e7e/vGTm9/4etntD3z+ZpGC4wqKldStDVoo+SJWtJaKZC0SsKUFOmpT9o60YxAKqKBqNM/pYSrnhPyushIBDJMHcn4XBUKXeujzxrT1F+kRC/Q35OL6mOvgqlXupJOGepC+aoPgL/31Y0OJc077TjPvSZX38o9X+LJGxwduNX9nilZeSfHpD7A9p2ZLpp2ruDaa2B1c9J0AQGky1/U5x1Sn1KL1F7vdMXOD/WEMCT+cxIqMCDDMKIGFmRVjGqmYpkLUE5mAIwsjznLqoLWY6ymZlV98Mb+8snVz03Kjf/lJxnfr7eDWVRuJnbdkNvY69Xos816u9yqmxC++4PJvaP4+S9sHnwS7r2B4sjskEzzcruZSjPXWdmKJwpiPWdj2jgOwWUhAA2zYVYYjoBmUMNMZFP+wFwwGBQEISxttgyx5pXQbDUPWTkZTDHgTE3t6+re+NH9uDfJbl67fu1rR8OXn3xyZxHvxXmNmlejYXZw2dy8UV6+vDtpPmsWu+KCy83Zw9Oju48tW0GIQLa1LMkF5LVUHFzgvZoncx0HyWLQrMrMrNrRXX+kNCdeQxZARVST1gppiVpQAAQk/SJJKma/p2n2dCMfqrberIQtRq51bVkU8/vV9LrVzI8mk9svPlNbGyX3bCvhChyjaBRphCOXgxIbIHiNURNHGNl0a9tZVsV63SzW63zgjBQK365rGINIKMS0lgNLBixUMrWZ2t291Rufnv3Nfyg3OHj1qw9/eW+0zA/fPaRS98aXXbCh1azhne0hGydShbgeDi0RibaTyTDPNXf7qtHYcxEJEYnadSE7+w7tRFNTGyZlwZI5m+dbqjBMIhJCTLKLST0jdzkR2NgYQhSZTrcp4fgECiL9SCKBQkxBgrIso16CEUyiKiG6LBuNR5PxGNByOASRqDjnVCRKkgsgYooinUuGClsjCdOXDlDrNIZS7pDUXwWUsDaCEpKnd1fXamefSD3oJVB0ZoNC2snWq4KZobDMIKNQZk7mZ91IMbj7vmd9nqf/0vkd/37Zeg6PE0s/TAH0CFwCpvlpXmf6vM5H56SvY3rB+vPbvzgcA9QakqieTEahCdIyCjcuV0cP/uGd0zB8xtVNtCZuIhANWl6xgJkHrQ2BIFKc2QkyGDt8+2RYPNj69O76G18bHlwJ5XNX5vP8nfceI7vqbembHV9secmqOPAoPZVYKq3AK9YK4kGedC2cY527XJ3FYMFDx5MJmnI7wAZZtXEWYwjCSgXxgIqcTMFt6VrbuonBRGMxrutiQ9OVjmsuanHzedi7pKFWF5giWNjX/tH9R5nlwhWL1fzFL+ztX3YPP5WTU9MImXwNgKwZTMo8q7fKKVqSJhpj0UIrYWa0FElqtKPxdH/rxsnJcTnZuvdg8aNf3fW7Fm5nUa8HzqyQV9lR664s/Z2Xv/ja7Ojj2dn6wSJ43/xpeTCL4zPYmopKJyst6tZppahANaEBBaKWOAIenUowCZEkuIUZik6ZKoHIRLELtkQEVhbtrD1UcWHmx08FTJU0KayJ6Idu4FhFpIuO/YpPzoCkEO1BIwWRoHfhffqra4P0iWJanufJaOJGJUci7qwO+g2hFNNUb/93Qv0TJmib+kWcxpL6nXC+0EU01dWdnEaHJ1EPOSWntb6/c/7udP3iJHyd8OdOtkQ1iYWlmQlKnmKixGRgGAy2IobgoKxqVBzBAAaFiiOfU+2kOXtw78aN3SzM33v7gzuPH4fgsnZgJWNvtQlNjV834XFjv7Jp9oZZJty886vwuzehTV5gte/E51ypoCkmlYMppD4xW2vKFrDDkQxrOvHRBejAxAgRygorW4FXpEWmFMAkFuwEpDogsDFGNAMaa3xA5WfhoXdL5qwo7Gh3L2PUi7Vvy0cz68t8/Ozk8390eu2fbFqSYRhJyAdSZrMMD2+UB8/TQGNoRkP3szc+WC3Oyp1hgClcNtoan604xggDYzolXYXKWvVM4yaMdHS1vBGOgQVJJVyTVEAFCgAHqBfxhEAcqMNpOnXxHpGJF5hOn1NZuZnZMEQdvn/nntsfF7eyyXU7uco7V6ef+hq8cxy2Z7w3p91VGGEZdMU2WA2QSlCDNYAg0YNaplgMbDGYMuzWVphUzXC0nXP54YenMa4NDbnOIBo5qBJMdEWQ6cjq6Ognb61+/QNXrfjGLW+vvvujv/njL33Z5sOitXZjEMBBFDVzK+qBxjn2bQ0Ea03UkA+cioBMCCHlrJYTGqvGGGOSiAQxUWiTvReriBKszULbMiJAkcDGJG5UskOQDhRFCK1ltmwB9K5kRMqiIh1bEjFZm0SJgOVOf0o6QwUTQgR3dApmDiLEnMaBDBMTS4xQZiQ5PTbGJOPCjr9JiLGLQFEkUbpAKYL2SUCSDaKu+fR006s7ovqfGUSEqF3A09g/qDN+6dtSSQeAKbkUpvOsL1p7OPmpSPl7sJ/oxYzm759xZLg/WdOJkrBH6fBmJN8I1ov4Sx2+2PV/038DwKBAamGiIZsZfPrROx//7uSh+wxjAxS5iG9Ahp1xYsOqClC1qvAKQT0uvQ0OtsiHvpivqun/5+9XlwbNaV0Es2PKF/zsNETEVb3ae7FRRmSprawjLbldQlbKS+WWUAmKiFxrZ+euZKlLlFkomPIodsusTOkZ7ErHU0veqoM4tA7qfFFkGMmcJ7Uva5psTNlK7uG8nSxmNU0EwhRhEERNvWy2RnueV9Vq/tIXJsUQP/r7+xILV9gAsdECmO5NygJFsX/0YPG5l4gDqRdiqyx+FRiW2UgdUOBg99Y7Hz6cjrYOH5zZq+8++5Vr8+XaZmMnxVL9fLUJ0a1Q8t5zsmxPjj7MsuzZF1/C1vXTSrwtF1JUPNpoyTW0VtRIYVgboFH2hJCGj1LXQEGafOhJFdzP7CCmTdrpxXQ9C0pC9Qkb6TPOniOliSwtXUGZGh/MIIoq4ORlL1000vNQiT4k4+nqkjqFVyRpdTylEd01fLs2Tbej9HwfacdMUIUkk4kkhkP94G/XGgbQ2fGi2279nXR5RspWtUeMSC/CbHoo9zX3U9uo3zvoTPBIE4kt/bOoUsLAjIJVjSgnFpZ0+vMg7jQ5lFWMilFmDYYz8oU0pRM5eZyFKjf8wXuvf/TwNIuTSascKPhItepauTHw/Gg9/B7V32zluispwrKnomxCxLI1dhMEklmmKicmZoNyrYOzII7Xu05mUwMmKphsaFvxYzFlgCPJ4ABxLaxltsaSFK2IaqDMZTnixKqWReEsypztqMjYbtpNdTYuyyKnUiWsTNa4COJmuqzL03tlvipHGI6L7NZnbtncBzA5bnz92zd+Y0snRtVSJKxXdZCCTNZGeJFgOJKJShJ14Eq/9I4y1xaLYzEVyyqgZl0pQiT2QA00QKNoAVFNQHRURE58BVI6J6Pq+YKGbUZRxMsk5PvjzCkmLV8dX/vSlTWbjbtR686MdxYyPZMprWCXnGK+VsKBIRD1Kp65HwGHMEOCt9ZORoOidGfHj1erzWh7q27n0ReZTISJjRaDifhQv3H/5P2f69FxWSBUuPz17zx6bz5e6fWdZ0LIRAMaCuqd1C0CcyD2osllKDKrogWLSASTaLRMAhFoG6LhNByWmITopNyp2zAJhY4xMHPXDaXEqwXbzsOAmFNQM2m5iwLodKlSDk7Mva6rtUYUNhk1ddaBSCcCkrlQ2jUdHpEYVJR8UqBKaRaWiYlJOfGqACSTrD4+qogmKUeoqiRNDwBimYIocS9eoHqhEtKfMqJgUuJO/ZqJocLMlKQeKKk3azI6ZWYJopAEjiVjta6g7e6/0xt66mzgpNaVzoGE2PVvzzlk1/nPJP+Wpw3O06mcnofo6fMq1SVCXRs4AFYVqh4GEmoeGG3cL9762frjj5utV5ltLnIcGqllIqPAbUhwYiscOMRWlZUDWonTouKiMDAoTVHlGK/FxtJSxiJaD18ycbNendih5cZKiG0TsiXzGWQNWQgviC2JJ62Zq6gDWWYDi9KhcHZcUmgt1xgPos9G67gKoYjaNGQAp2QRLZsCaykWNK50stCtuXezlla5Dcy+jhQMghdPaEKm2f1P7y9ms8Lh1nMFuPrFj4+cnRZ5FkJ88aWrt164BUCEYXnlj8/WHz+6e/Pyrau1DyAhZmtIPQRiJ9n62Eu0Yzc5vj+/9kc3b37l4HS2sOWuK2gmwuRX7d6jB+/L4OCHv/hNDPPJzo3jJ0/+5KWvrs2okuBlUvFoJaxrYAOqCR7SKFpQQ9QCdWJEJ45cSNopqkIsSBNDEKDfIxdEaCg0dUi69mmCV4Be/Pm8Pcvo5sahIiFtHGb09INzXZokYtXFeDr/dYfZUFfHJn0QPSdJJaYUUVLY6cGyCBhCKlO7RyReviqnKShNTWzR1DainpJxnkoA6JWpCV3MTJK0KXlVIWbivlODruF83lHqeRRpv3RWhMwUo0JAbPp/7mfi9dykuEt609B0Sq1hiByxVUZakAL2rt6UhV8+uGfb5Qcff3Tv6NOxm+iZR8vBF6hsU3u7Rqwj1ygWtS/cj7n6+nq1X5rGMM1DGAWyiEMrVsOm5XU2GFRWipzDTEdOaWD9sFxnBWBUl61y9D7fTGh3Y4OzrQ0Cqy4qKzhQBhooLZlqCbZllmGbCQlJ2GwW2RDVBou23R5syWp5eP/TB23c2S22eXX0u0U7uLq+NxisysEk3xq5F28840bWG8+WyRIyM1s9QTkAGZgQTVAeirAPiMTKWQAHAUwGCSoSW3n+xc+ghUZECVZYqgRd1yoNyDPVTLFf22mII40iCXqaC+nFx59Wtf3c16+tZ36zbmy006vu5mcPtvb37i2tjJzbv3G4ni7MpRkmjTdmRboUbIANuGF4IASGdJJnnVQEgyNZkFCkaItwdO9O/fqd4rXMuK3chpZXZhnr03bz6PWzww/K1VFhLOXD9enZ8HN/bPKDeOeNV3eu6Uq1aQxEOBDVykLiVTzQGhJRzxyZIBKZRDV2csopPqUJ9bRzQuRzPpFYlQCFSNcrYpBKVCZR4aTwrGlOhohNmmTt+lTd8GJquKbrpsYsiA1UFMSkSmwoHQvQxPLsqEopGWflrn0FFQXrueRAOoy6nyKeUuLpkmXSRBwEU4dyccKzCMKqgnS353xSBhMbgoioaGefhC7MAiws563a1GyzbASUHt/nECQi1E1ZRE5D36mFDLDQU8SSdM0OXxEigkn/ZIgUlGBApqTHBQkp9kMgTJwcGfsUsT85+wKo438iDVqnY0kMmZaoKHl2cvrGL75Pdf3Fr778nz6aNH5TB/PSHn3mYOfv3lvHusggIuDCtdpkMAEaI4xnjcwDtIght2JHjRFmq3Dq2QhLu4/pS/79/2xX74adP+DlmmLkVYaziIqpCtIYJ6atm1CzdYRKbMa+3Ko0VEKzqBpk0rYla0m2LOtixLohJUslBFY0nPH4jPbmPjsLrrFuOr1068qzPNlZNNNJzMNcba0a0Tb5MGJi2beng9KMJvz26ytrTZTKRPvq166GUP/0H94BMNzaWlSLzOVUeAqvX7tyFY0mQ3uGEdEYWljeLNbZhp/ZefGhu/Psi1fm90/hBnYVQmV4ivmRvvWr+TP7z3386XsmK1584TU7GsfBB8Prr35SW7EcMKpCgVXgNWSpWCkq4gpaAZWiJtSkdRI99aQBFBSiFFRjMo6RxKfrbYlUYyolU2DsFka/YPSi56sKYgV16JRe6FMKBGKtkaSCpNJVwMSJd5WwnZiG4nr2oHQ7MQV3lRSMRSEg07dJUrAiUtaoSQ+g38tdOzZFQQuVTgu2K61JoNwFWoVq2jqQ3sE+2UgQwdD5/JGKkPZbo+sCd6jTBY5FqoTUseoKZ8MQTiIlBOpmlZQVSYUv8SWZYNgyOWgG5V5RmpQyqOMStQu1zYWDXz/6dPPk4yo8LGVCa4M2E2+pEl95rhkbDZtoK64k5BWtefBLxrekHVlaKFyQyrauisRZqKQoBCwDW9e8YR3OaeS0FW5tvgkZx4zYkVbUlF62clkFNVaI1BiypBwDQDlTINnAFDbkPq/ROicspaifn6zWyzIbnp4uZ/NTq800z3Q+OPmBu1cV7jP7bIYrWx0Uk1vPPJsbaULFha0g+wX/5oO3t59/YfHwnpLnzCrFINSAO+t4YVWrbNsoVmyICL66des2mAwiQCEkF8QItEiiM5pQQhbxIEk20h3/LqnQMBIsyomvQKSq9rU/+bwzoV4KiCPbs1A+0sG9qiknlxH2jnV6LFurUGKm8STSknQFbIQ2RA2BQiJ9EQVhQSo4IZBIYEOiwZ/e+2Qw/yj8/aHYwcLZVhzVAhEmu0O2dRPxrKtVNp6Obr9YH2+2Hi5sWRM2nEkMEshn2kYGI+HPHqSMIBqTmHuqRLtBZ+6qSXQeeYGY07bqYmLaEBckNBi2QSQZwktawAnGSUxLRSfYiASKKqVXKJJEVpNLWrczqKMNMdIEftoGiUylAu0LVkpluf19dInOWUtAshPsJh37QiGdBCk8defOU6+mrycStbivABTMHHv0uR9j0P5J0qtIHJPUs0qVhEjfo0jupoAyG8iFYH2islA6sC4oX+nSxAQmElERYaaUFen5VTq+taSKpgu151VLT5hOl1aVRALtKVzKhmMQ43I29N7bb9/56KNRMfqjb71298icLOvnr9sXdvzEjna2zMEo3F0AxODY1mopNiwE2Gii1iRs1pKVmVQiJDHXCDKiKhBr/IyK9SM7+vbm/b8tb95s2h32IdSeK5PViBvQRgPFtpLMQi1rQa3NGhUuCqMD2EkOb/MssG/4TEV9WJlJG1tXSWBXtuLWtS7ioBhObt86GB9ci8WlmR89xkRWqFehHnleQSvQWSX5wMIW+VhlUbjrf/xHX/ru9/6mHE5eebU8nR9+8JYW+RiARN8EP96fBG/ef+vjz7/08Nqta5X3xOxDMCWREDuq50sTiisH+/HqWf2wziQvJ9btZGTMfHbyzn98sB92bAY7uHr15Vdlf1S5wxee+z8tsoNVIxaMWsLa28rKWrFSXQOV6lqlVt6w1uANkQfRptuwaBUBEKBVFaKeqQBNtPYUOHtwJSEeqVylPgdNoshdP6bvDRMAY4yeP5ecAzCdNXbafjinNfS7rGuqmMSx6rIAXDguoSNQ9Ffpfq8daKOd7MV5dZ6OHJCycqeomS7XL+VuM1KCqVOa0ENBaYP0TOauu41uCFBFtJPq1B4JS3fUm3hLPMetu/aPCIhsxzgj9M8s3WtPlhCGYAEDtRozWCZGZAojA//wk8P33/ay2HEsjYdnEyB1QMVoVDYt1WxqqaNpTKExRLSHXt6Q4jVZWZZGIY3BCmwVRRZqQgnD6zyUQLXQycSWFj6Qr3IeIQTWqGHlPJcDsCVSYahVYjHp8A4Aiyk4lMoDKw0ywRgmSpsxl+OJD3JUVaNBNhiPx9ZyEAt77cpOo/n89KhYrvZu3XJ5bAVgF1hKw97gV7/4od9y1//wi/d+8xPLJXHWBJ+RVWQ11PejmI4sWQ6rMBlNJqNpVUdWkghrLZBUTrnLxLQf/TBWxItKwoMJFhpS5tQ9DDhfG/ZevKWtjxygjWBYcf6be/XjRXx+bw9y6Zi3Fn6EmWAOnEGXSmtGBdRACKpe0QKtagAkCWKqJu+wwAb1ahEe3pts7dbrjZUVAkqUypZNAbY+IgvHwmHD9uCb/5JQCdcSHtndLc7rellZ5oJFgicgTRcCkoYNmZPBU+z5GCkesJImiZkYhRSZs74NIpJ4jSHE1reGKEI5Yc5IxvVk2Ma+Zy4qKe0VFUpJZBdvut1njYWC+Xz+Jy1z7htawO+F1oRnd814OiedoMew5Jw4oh1s+7QWn9LvP1cfkdOW1nMhoQuE7fdieW9KqNLHPwUlP0qQqGoSBzeMpBGmKSnvGsx6TsUy6HlUqU8L06MDXWOjv+FUr8YY0g2I9ApCBGYS7WvdRCFJSGI6L9PTXXgzpqMxzWWIiFrLMUjrQ164er15/be/Xi6q27dvPP/cZ4vM/uLnT5jHr+5jMJw+eVKVA9MGH+CHYdBWMnAheGejV88ovUZjnKJgExitUGbFChM4UhQVhjNfqT/5L7vDGNyzm/fe4St/whVxZVCbUDdZXcBLmwW0jIBYi1SSWYmWVuWkUFmIEEvtV4X4MVDxeixD5rbhjHi68VK1uLJbPnf9erm9c6bTJ+3kbFGs2GEWXGXCXIIHS64hssm5NseHqyxj59xovLOcr0Dh9gvRe3z0NsoyF6kANHVhbbaYnbCFJX3jV28+c+UaWo1obWGkZgQYh3oWcRSOzN3hpQwzHd/Y3qznb3zv07rSZnV6VSblnrtzf/Hat/5kUOQmi8V4tJ3dbB4LCyOytB6VjetgNhaVUkWooDV4TdqAalUfqWuG1apJEytl6qIUoEIk52spCY12+6tr0EoX0xKGjUSC6lyS0AfY/pFMTD2ztC9aE2bTiywnwZc+LCckO4kR6XngSvNCibaFC34+niJMk+3War/RUgWuqqBe1urinEjNbu1bNug5Y0ysT3krpct3SQBzgibTlukNjvU8tz5nMqY93BEkkdITApFqP+TSpS+GyUrihCZeJOP3Zo+ZiclYaCaFhoJiJutHv/zZojrKJxKrwL6wkWJlwypYH+KSjafQEElRUbUhDho4asv2PR/2vb0BrZzYtWhGKNm2zL4KK2Qu2sHQXPlCwGj+4IMJog925Vrk6xHXgcMTg6tbEUMu21hbA1ZlUUtsGGeIVjkjzQklhUpMFa1QlMgSxdiVw8CWEUStbEL0bIIHbfygeVw38xu7B5Pplg0EK5nVqJEH9ujw05PHx3GNvYOt669+4e4Hbw5olLlR25KIsnFgKwZKrN5q1Ax06+r1nenO/DBaGI4S6mCEu3ZvGvaFQoXSIHy/qrpjmJgJgqhQ6jUB0wKz37sTdieTcWZE6sNl88GD5bp1//LPv72y2w/rcuFHvDRYCVcUN0qeUSsa1hbd0B9aRQsEC4FGSt7IhBhD4dzpfBZXZ3EkhMjReIhww8SG2rY9lWxkKFb1cu+b/6YdTwbGm8jA2m5djm0NaYQJIsqpsg2gABWRCESmNFEQ+wyzH+/vGrowxGy4bVvLJogykW/8lcuXy7KQ3vFTRJhNyn2JiaTbYN2/ps5wP3LTkR3Omzr9dQViwAp9KkyeRyLgqZkc6PlGSoSsiyZnl8ymTiczdWSm/iA4H7jVzkiNmSRJ54iiQwC6AfxUk3fUaL24V5wnBR1dpE/gFQoY7Xuv/WtU7TMP6l9OP8Rxnvn3PVw8VXMngA1MTBefiZxH6dR9Phfb6o+S9BgmouQpfv5+EshYbr1YYyQGYlO47GQ2+/XPX4fF51/+wsHBVTcw7703exLstERZlNW6dU6EFpmLPPctI0SWaCjb0NLqQDSw8SpltLXGTKDElo0hMoQIFgSNBLbTb588fA/Hx8Z72zyQ4gq3EZ555TiwBiG2IUeovGGClUYCCyx4XjjY0kEJWXAxApZ211Q7BM/wKzsZZ1955fJkZ2se8o/XWU2jCvnKjcIqy8Vy3W7O4mK+2h/asBZZB6Ks9pvhEDHIx3c/XC6q69e3t7bo7d/My2Eh0ohkAIqC2RpAmorzsnz/zd9d2bvylT/44qrSEAKVnqzRJepZPTt5ePlZm60GxZ59//sfv/uTB4gqsbl+/bnJ3vaP3/jlN//8z/fjdHHUwp2V7Y5SFkMAiQRDwfLa8IZ1BdSMSskT16SVolYVD/WgNAic7NFS+6vjZCUBRVAiZ1EXczq28DlE9NSa/f1N1QezZEetzJ1VUYqg/eJEZ3hO6N0ZeoCmOylBF3ksoUe5L5jH6EjUiZScfuxoJCQ9Zx/8FF+h41z1Qwf9Nk//PwedOkoHd9Tl/nKJjEZ9QptiMCX6CCUHbuqT1C4lvtjd6RYScJQ44kxAEOkZKQn1RIQASoaSmjYIYgQWBDUUbfTjQaw//fTT370RRrolnrxSq7EyoarZQxZcRBMbbjxqrTfKtWTRWBOkUFlY/DY2W3GQe47VJpaki1os+0HJl27yl19wN768U171h6vZ/TmbVqSZo/Q8XGfzoVtKK3Pa7G9vrSQShC2IWVjIKzmCQ2zV5BQK5RFZIW0By96j0rBqsGibpdKQHVtiIDIUoRU/HUxufv7Lw9EWfARzSzFQNijx4Hf3Ims52rrz3nsvfOfLB1946dG999102MyhbI21CEC0EaxtYMrWi7Pps1sKStWKRmWYJP3cH+AxqZ0rhKCK2CGBHczTCfslTDIdayoKgv3Ln63cYJ5nJlcT2KGYfPal5x/n146Wtg0lLwkL8IqwYuOBDaQhbIDo+1GDSNCe9yUpVU0ymMbIcnaaCWlVGcMMdURGokq10VBuX47Ozh9/sP3Vf+5euMrVSb1SW5i6Pi0ngxAqqx6iwpGFIwmTaJduCBBFhCm1D1N/J5F6nmL1CNrg2bCkAkuVgXIwqKpqf3+/bZoQg7VZWsWi0q9/ViQaUwITjMSQEs8gQVMjJ5kopEnGLmj0hXTaEUzJ0j5tqnPctWtGU+ruaNqDTMn8N7UJVCEpWJn/v1OoaydcyOydi2n00lKaYjYU2vPtkJ45ZeLMnN4xAEyIEgGY5KjYEZ2615B2bULyu1jemc0hcVqQ+sAXmUh6E7rqHkTQ2Mdv6rF6JVIQDBOgzCTCCmF6aiwkJQQ9gTRVKjEIMWKU3GUAf/jhh2+99falvd0vvPqVUTluvSJs3nmwKka78OsaQy8sHK1F8HFSitcl4sD6JjRDKoSjl8yKV/IkFDkjNQLTE7MTJyaGEI2yt9nLiNY//nEhC1/eWB2tWU2Zj2wb6yDRSVZBrISVOETDqpZr8cVBuTJOoh3aPEflzM0ytpYqB60a+ernt29cvnIa9P4CFUa1KVc8qttcz9Ru+MEH999/ezm7096eDv8v3971DSRKAFarmW9nozEdPppduz4cDKy1m8m2X5xZl9nt3S0A87MnKjmRMRk7O5yOJqePTk4eLNxW5mzerjkbSrtY1o+fTKajeCqD/fjWDz55/8ePxoNhsAJyTz48PH144urcPw61U8Sa16Fc7mc5ayBRoBWqWTeqXrBhVLiQ4PAC1EDNFMFJBrojQxMFUEBXdPZsdgUQRTt5U+1SvD5iddG5qw7PV0iSukrxNW05kXghtXYRorqYCzpXXiMGCyF1ydDZH2mnqpXuqevMSfp77eUvuMOYO9pDj5xxBzk+dXvAhRui6FPlaId1XzyO+sB/ztQ6F+zqGJdR2DARKyUGogI9iIWuJUbdSH467QlgEJNy+lGVickQM1tjbQy9mM35jQg68x7lQBC2j+48vK+YmlyaUxhyJYXKkItgYWdJItls0EggcBaK1kporSVugwk4RXHE4TYHKUid0v51fuV5fPlWcXVHLhuPzNfiJavEzTH0ynWsDmoppVDLMyMfqh+NglpuPRMbY7MgmBQrFWGwFClpA1mGg9aiDRWBmiClZ7I5CxiwzA6UEahth4NMptdO2sEui92L3gK52iLqAL/66Fe8yzIS47KP3n/9s9/6mt/++q9+e39td5tsdOZNxUOP0qPQVtpV64Sff/azzUoQIHWkAAQlVSAqQmrpnwOQmibwVDWmECGcVJA6s3hKYkoiwkQ2XPtcCFrbNpdoinElLmw/f7zZ8auAFUxFshBZgjeQhdCKuCYJodOdI08UgYjEMiYRiIqY9PEK12dzYMnY9lDmjcCoUQpgRQizeLYcXbuVvfBiffyosJYL29Syc3Ur2xk0dZWxAmokKFtR3+OUsRtsgCQxts4cVCMn9FY5VZ5KMMxQbOqayTA7HwIxD0ejxEtgY7SbYFdKYSmN2otYY0MMna33BXWS0hCREiwSkwvUKVtQLxiXZlr7FLjffv3+1PPEuhMh4J4DkoCm86L4KT2C30efu7q2/9c+6nfiPgooMRgGdAEQd/sfv/cl6NL1c2Cvr/RJ+xDNnDylL5q/aXSqO9eA1Ni+wM/TSaoaRaRPDw33AbXrbYk8JYPVvy+dSU7fiu7ivZJoABGYqCizs7PV66+/fnp69sILz33mxRfBzjdtWQxOnszur2U8lcCY5pmH/fBodlafPFi0RZ7bCNb6n39p+69/etJIkQurBIpsNkEyhoVaEpY0iqnCyW7YBfIgrZdF/nwws3pxMK1O//DVS1rFv3390zM7zG2ulbQCqOVxrNk//+zVx+sF1POKPfKN2xdZ1Tyybay5sTKxXC/V3m2mhvfOfBTHKww2OtAV87q1NcsC14dXb7yis6n/9Y/u3b1zdn0w8atiYzcIUgxsUVZX+JYxS7ZHH38Qd3d3ZidzEVs9WQMQsdZahjU2Bn+6qtZyda/dtNWmcqPBZG/0wW/f+Ju//c+TncnX//U/qesVV/ajHz4cZBNr3Pr4SZ6XJIg14OPO5NQ/2TdFzmfkeFgv2xiCZUshoob1JtakNVBLJ0IZg6oHamIPRJEu9DIiECQpYUGB1NwRgAUxxRS5kF48Xw+Kp1ZNt3P6OrJL7IztPHC6irabAAaBLiiMXZQ5TxaJmcDKvV/uxVbt00ruJ3n7v7rAr5i4Q7dVejPCLnftdxddtIy0L8S7/DIl+B3cfg5H9cmrdvGZ+k0kqU3c8amT6KpKoj4/3fXtSv6uAu4w9/S2pY2jXScYxhrh2G/+vjYLSiCJCmujlzuPDptBKVGIyGYOsGKjsAXrdAdDlgcfLL0pCuGVt2BTcMWeRWQ82dm9MX2wOt07erzleJ5p+bVvZq+9ACyCMMeNj+RpS20Gy1UYEjLX2gHO9qajJ1U9j1I3xq3dM7vjWqARVhxYLaEcr7sjTtgyYBk5zDFDJGRSeFpxGBhnnIXCZKS+1bYtrGnz/Vm5d7Ss64V8dmcwnObe1tOd/I33fzUPi8HuMObelFbK4hc/+s3Oa9++9vU/Of31p4tKN1npMVjDBYELXFWbF2/cvnJw8+R+YyWLsTXKKiIagaDJZ5AASDrYu0QqUdGTHW3n7NEVUgnMsMwALMKlk0UVbVEM2a6az17dv5LvLO4GDmQqjotADZE3cRl1raYibECd0FwKw21St1HToUqdBTWTAtZk4FzyNobWaaGGiGqBtYUNi4fYuzL5+jd8fSyGl+18kO3E+nhYIFpPCB4N2DhFi2i0jSKGDSEBnIp+YB/aTbYkmm/XXb3gPmg5KEKQKOLy/M233nn2+ReJmRhJ1lG0d73tW0WJ6JicylKSkrSiyHblXxrV/f2O61OJLXfqj0/L66Rxvk5uXbo7JwJEE5KcOCDnQnkpBDFzp5v3VIKddqOez95312bR7t0/b0FBkSC0BBAkLl6y/r3Y6+hmgQxTio1dfs3n/d4uKBL1ar797u8KjESVQtdv65tYkZkT2JJYVf1hpwD6qafuwBM9P0GFtDN8UOrbfwAAY8y9ew9/+atfWmtfe+21g4PLQUR8CyKXyRt3FsgmEts24L3Hy+NleOtuPSpHQzcgWjTW1AuxvP8n/2Tvr//LocZpEUBOYSw1EOeZKKbKXYnQaX6FyCKtU7teSOFe/tMXt17+3EGMQqLPXtZ/93d356vtYZG3jQdV3hgW8/7rhzRGtmPXKtjAbmPuhlIMM0cDqUmbDPs0CH9/V2RvK8u4adl6m9WCSqhxcSlShWoldmF3suw7X3mxOlnHS1FAjlGWurnPe/sHXKw2q7zmzA2wXtV7+/Twk8blOYDJeFiO4Yqz8aSY7piP3m8/vXv4+Ze8y8pqflbXD+aPq7CKC1m/96v3br58/e5Pj82m2B0clK4Y7+zMnjyo65C7dnq9LIZcb46s4bKeQggBRjNtFQppRWpFS9oq1YAHiSi1RB4aoEGgqlEpMtIhFZHkgTQ5w4SukdKjHdTZ7eJ8tST24UXDJnGg+12WiEapk5soq4b5KdoEel1I9AIg3K2vPqj2s314+qsjZ2iHPJ+3fi8u2gdP5o5jiH7n0+891XnGkPa5dIdVv4u530B6ToTuu1XoybHdEwkEkmT1uQfQzw1we6rH+U+EdOpL2vLcHxIAICrntkrdGINAo6qoRlYFbDY/Xt2ZnU5NHsIi2BxZK+yZM818zGyFza3rTuvw0e+qz17JRpXeOa4IMG5y+fbWzuXtMBht6tuP9c0hPmTVuJiRX7CDNb71TaOlZAWxDeAltpWbh6HkYd48M5w/mM1nC4KVM9OU23lhRJWFSo6Gyeaa85ptEtEkWBEBDRAbaSkKxEE268pbW2S2sC43NhbW5G42vgYe5IXc2cw4TF6eZOyijOSdw99gatstEcd15tZSiM3e+tXvspvF5T/8k+rB+tEHpws1m8BZFZq1opKXnns5eiCwVG2mVgOkjR3szEEgpNJ5v7EwEbTLL8mgJ9d356OcQzMpDH/6dp0PnHUNqoGV7LnPHVT32a40BkglvCbyHBuhFXMN3kClTkAT4KFBNRK68T5ocsyxKqwkQrYcTVSsw5bEMy8+ggtjBQKDmGW7r/6RF5XgDZOBdSSz1ezGZw5iCBy84xgiwGSEEQJxGrJB55SpQsSWO8vbFFPT3GvCeS/cvkDMhoii6HAybtsW3eoEAGutxNjP7hOghlkkWjbnDEbSbmiVWJlS34ekG4xV5m6uqQ9L6dLaI7HaRaxu8jWdOaoQhqGuU3seJrnj/Wq/j3GeSXTfikg6PeQ8/wclcKZL0LXnQZ2fB9ojW6l31Hem0uxjL2PZVRx6nr6g29QdCQpq0iUSzi8KkCES0Q7VTpkFcX/ywDCp9ETQ/uYEHTVaRZL+V5qUTnzypEP/dPi31ratf/fddz/44INLly596UtfHJSDuqkZlgExplrP7sx4UBZBW+v4p++dTQf033599+2P1h8cyyDPoBHM946Onr+y+z984+D7b84enIkpGDlnFmjBymD2iMxqAAkkzASUsPU8XC3W/+d/8dwg31os1yRBEIbl8P/x57f//V+/96Q+sNwGdbY2zJKxi00IKzXRCkRV2FHmbHC0zGLBW2c22lrE829en/+LV2/WVSsriRXCBtrANhg0mUquXG9OEDY112Z5FkdONPovvPL8/Ycfvv/eR87Szu7Oau6KUc1cTYbFcqtR1ADGu48HA1NvzMlRNSiLlz73bL2+BfJs3N5g9wc/+tmb775Xjst2HT766eHd1x/6gLxwbtu2Ne+VB/n+5O4Hb2wW8avffi5f5bBn4mMxex6ABksiYI0CRNYG7JHGf0lbUGBqAQFFVVES6njCnQpB2oZCTwGg51z3RLzv+yTnHUuGTf2g3wtsClUYNh3RQ8+RHVII/V5E7dutHe7UZ4xPY8X9f5IAQKpOn8p107RIPw6QMN3UJBLiNOCnYoxBL6XRU676IHdR6HY3kjZm55jUA2Np33GfuJ/veiKWjuTRiY1oYmhoz2eWvuOT0gMSgCgZMKQZJPQuh2BNRZsIAjgQArEASpasKCjCWnPn08N5LVuOmW2wA+HA7JKjEudmfprfv9e89FKxmNWKzWefL45njej04HPTyfWrdbNTi/UDt3jmVnv8yNp5/fDDkX+2nWR1jtg0prAcqbbsuWjFgsZrqh9sBsMTCtZG5xbCC4/FKV872E4F3A6qhAqWROPtxpkYVbCBGGqoDSxixHOgIBIQHUpiF5FZO2VGJetAm8x6V9HQfVjJpK1evD06WT0+9PODF3cerM6i3aqj1Xxy+fpnTu6efPLJ2frw3eELr1z/xqvxyBcPVx6tBLrx/N6NS7eqM2VvrEJq4WiNdueeklLqBif3BYmK1B5VEHpphaTmy90qpi5BU1GbL4FFKMbqwC+9sLe1LpbH3iJJXOICZaoVNak0QhV0Q2gBr2gT8phG2IDASX4FRMqI7C5dVS5C8NaO67YyWRGiqm1oVWUvvMyXr/uzhXVOEVS4CWGrcOXejq89rHice7Z7WIPOxy+JACemEqcCFjhvd59HHk2ZoSorI0okynxTB9+g3+dJOkMkgtC1ikUT6QKKThMyKbwz9V3UbreIagjBWNNHYigpKUM1iBhmwyZhUyCSvsJGYk9oGrHlFIdwMTikKvjf2fq7JsmOKzsUXHu7Hz8nTkRGRmZlZWV9oAAUCmARAEGQRJMUxf5St9RSt3TV0h3Z2J0xm+f5J/M8z2MzTzNzNWMaXUlXV5K1eqi+3RRFsdkkGwTRYBEoFAr1mZWVGRkZceIcP+57z4P7iUxQU0ajFbIiIyIj3ffH2mutvVlGrrkuzoOfYZ6FoTfP3WH2hVSYvKpo0wOnGJBs3DVh2RmrZwaUs0HHEBrA6TWTenfQSCAj1KycJvvIJkbD5Iygcr55nJJfhyT5pkjSoCtxXh4uIsTMlpHH1UmtTYOdnrIxOUwARLCF9ev1j3/84xcvXrz19tu3br0GUO+DNVbAbfS7dfXxr45PEGe2LYM0nq/vFN95lYzi8q788rCFhF5sWdKP7i1++NGL3/raS//gOwefPlvETn70UStVaYyi78GWrYEPpuSKZLFgqsPyNLyzZ/7gd1/veTRfnBVOIvsCsgzryo7+h79/5//6r3656l9Gs+ACCLwOHYRtdAieQAwLRnDROFPYKnIoDEGNDfz8WP9s/nTdNmFFq4UsF8JnUsDOlK3iSlXdnFXbND2TsFo0kytuve6m0+m3v/P2Lz6Yd0tdnnrmsHiO8dZ4d0Zf+brtug7ActVO6q1rV0rriqM5P33Or9zcuf/JL2a7V1rB7du3H33+8MXxWTktHbhfWVdLWPvli2Y2c51vtqaTUaz3b1fXX949mXdjbM/sLYfdoJ4lgAABY0C+AtRHQlR0RFHSxDe1uQgggUYllWwnLkndPtSSuWhG/jehQZWa+Bx5qSBiyi0XuIRpWAxj7CbRErId9IYCAvx6AXo+Ihq+JiosDE60FYiCFWmepMNuTcmlc5qqhAHHJmJEkZwXk7tXUhJdbJsTq5AHS5EMm6mqxkFzN/x0SRRAgKTOn4mESVVY08ApwdQkA4d2gL3Ox9NDka2KmMw0LwD6kogNnLz2EIlJVPqgVqxEMQIl20p48GgOGCHj2Rl2zNIZLQoybMHhpZc4zuPZKd68Y3/6vj8YFbdu+I8/Xq+fLGU6KbemjHXNzdODS/v60jV/3J4+aY+fmBuvEnWB3ZmvTxFZShYBwpL3e23uSzX27PZEu+np+oyMWfflcrk1G9mSAsNadlZtx1Z1tTOexxXgWEoJBdQKj9lpYcSLi5ZsIFbxxk553dSf3d+6dPN4NNXC1EbJL99/6G/uVWcvXsgy+nlbou+tLmCXPGlP+qO+iKPLTaye/vzhfNTWu69Wk73ZaNSY+c04qavt5fPgAnkRDqy9RAmIntkDQRPrK/Hl0sFJjicixljRSDL0qLnDIQYlWr+tXhh2VHP94tGj2c3L8oxxChHDSV68Vm0JXuBF0Sl1RK2ig6ZbmPcF5JEiWLJjG2uUvpd6vB3rWgFVZ0UYLmhb0LhH3Lv9btuKNRNI74HacvDNZNvAOGibWMkqITnXJQA0lZ8D7oxh81g6YrmKTFxaiVEVYBIVyyZCQghb06mISAy2gDQSKTKB2YjEgXAESBrSDL4budNTDAMmHay1i8ICuZhND0h9K2eX5oE/TCmxpQiQuBlJlKMQlSi522MkHHpInBjkfkM0OW+HkR5PQ2+8ge+SEHGgHiN/TLnFzNi8ZM8LZLx+yOx0Yaik52jV8AFLfhspHnEuuSEiyfUzaZ9zYb7RZeuwRSIhYCl0DfVSFjmlkl6YrUnmXalPYTbMfPjs8Jd/81GU8Ju//Vt7l/a87/u+T8E3kDhyEv2vPn9RV1cKH3s2EdHYophMHz89OTxKBBYtuJXeuhooq+/9l48Wb+7dunHTzsjdfXx8NmaJzlXMwmuG4XXfrZhGzM1x886u/cPfe2Xhjcjz0rkYlYwXDQW07dp6MvnD7x78v/7kodu7EVcLVKVVyxy9tBrZ+VLWUavC2F5ZxUJZyaa2UDji06edepYGFXBQVAf7dj5f3n8M2/Pzs8XHtJ7h5J3bl8stH9poTBu9QbCjSUu8YtefnoQoQcUcvij1mPveAmCZPWu71p+w7vjO1/Wv7n/62XIp7Wr10rUvvfqlmy+9ui+K+TxGbpjHJjJU+nnwhoX8qPV7B9e+8c3Z6hnGZv8ybhuZBGqIbNJZZKqwJC1jsCxAL9JDQtp6RCSEze4jyUAnkgBOwJKr5DTsVxkauAzY5v3zuRPNphkX9XW5k85h7nwIlAdsNKTbL1yV4XGZInF+m3IPKgBSHuO0hTqZU23kgoS0C5FBMmTKc6UAIGR4aE2HYbaqqDDSPgZsqncZmCYZcju/ekjML4iCNGaqdErK2aguAe6yIWFlkD5hgcRsAE77jmK64xnzl6EvT379BDAUpMzpM46igSC8OOuPjo+ZKUSgKJRtK5UzoWdfVCwWkeK7X5+gOavZXHncx9X65lVz/9Nu/eyYzEf4yh03qq15uleHs4NdeVxXtGwfPqS3Xlm3J0stTnqPGfOy8X7Z66xXW4X6fVeWi3hr3/rLI/vQnrI9je50oZd0vO90XBbHzBL6bTPuYqcssBrY931ofBPbQEvmwLUt61Af977UdTG77Lbf6M1zN7p/CfOHciN68qe4PH26+rx9uj+5+9MP/WK9OHIYl0V1WhvTrBcvVmJ4vyG3pHFXjL0v1h89GGP74ePjb/LsxrtfCgEBpgpie4YwBEDMqxcghII4pgXcAEMtUVAloixSTyyHwTGCCTkaArD9o3WEf9E8+q3vvHXVXj37vGO11LOGCA9tFUGhHeBBa6gHBWiXlPUJckb2kEHenZ5qOLYiqKvK7e/19+7DzchVEHFce3+G3Zf6yS63bQQrcSHWVkXXNJODbQGLpKSV2rIsvGOTaopzUgSyBJajwppsvJx/ToBMMljWKKKAsbb33vsAphiQHHJEVCQwERNnqj4P0WDYxKJJHJR1u6nhJwChD8ZwVssrYZizMnH+vqR3TZlMNEIS5XkTFvLYc8h7Kbzp0HZeRIL1YvU+ZMYN6y4JOTYVQx4wXYDfMkxyXq3kcMS86a+TKWaafyfrnPPAN0iWJYsXdbPPN6H90KQ94tQQ57qAsRll5WVqaRIggwuHqsQoGGrGGENRuuR/4pyTGO/e/eWDh58fXN6/8+U7VVU161ZUisIBEFGHyIVrzpa/OnPltOzD3Eg9svrw2enjPTNfeuNsKMKaS+uN2F6Coeb499/bO9jfWi5PxZjf+uru8TLsbm397OOnn79wpmAVOFAgrLr1jeLsD/7gzrLz0M6qRmlAosEHaMFiebRaLF6+efnbb5/+4KPn9WTLL1sKJbSwNmw5u1546ZgKr5aVoaxsGQm9iApBEXW3wsv746vTSQWuA59h8vTDp6YoKbjGh0bah//57rtvzH73vf21j2272tvfnUz+uG0XbNu2Wf7y7i/PlqtJPelD1fQnAKzr6lHVBWzvLK8djOZnC9v+Rr115Ud/+cN//A//3qJbnzk7+zL7hV0+Xzy8d9w3EtZRbeCJTrarwk72JBzPjy/VbxzYNz0C6NRGJ9wDoFxhIlN3VKJ4w2COyZAOLCJBJQAhERWIguqQg1NVO+z03dxjHYYsuT1IHMUBud1QkYeDnQ7UudbnAuExT0+/OI7Nd4dBSpscnf47qwnOUW4alh0inV3d1Ad5BxEnuiVtKlQaSk5cTP+kqRYWkWR/sXlPlGzkkBRSeVa2CQBfuOeEVPEqCfi8nDgHrgk6bFHTtM4hEyuJ2XDWGtKmUCBSkchqVFQjCGTI5E01CgjNl/F40ezZohenVrx2sWcteuNYLGMsRyfyy8/jN94cOR9u3yk5oPL9bCZNVL/6NHwGfPmtupaZe/pictDEl6z/G2oPWc9U+xMUJ03vrlSNX7LaQK4Vmi1pRNXnJW099PTKDrbL7tTP7QQez08szfqyhA/BWqm5b6NrZOTsKlrBSEfTMqjppO+WvvGt89XU1eWla+b6awueuNOHfiXrwxaXxXRBjItSX548+9XPf/T4iLktw6JjAo1QlW2NUXS+C57a5aScdhAv5KRYzJfXeXKr2qoObvgmqmevTEE5UgxaAJKwDTggEDit9YWqkhBZ0qAkQzUYIEByiDyXpYIAS0txat++9dp7t14/eSwQoaBGJHglAcEDXskTAlEPksGOKmE6nI8zQqr2QBawEBAVAAHVpS+99/Te0QwBMm3tuogli5hqanjUQaxlCAvJOnIZ4LYP+sAKmxzXgXB+k0gMUcLAkmtdVE37RYzhRIyCIkZh5qhR+gjAWsODs3/XttbaGPqNFZRIRDIuSUShTcKj8ypXKUuBL7agmiQ9A3g16A7yKHNznXHe3CJGVRbOL6L5ehHLwDlO8DaxpKhyYRaVa/ZcKec3kDHfhD4l8t2FEDV0x8PfEw0q/0DDv2XrPFFmTmuIUqrlIWZkNkpCyQahhw44fPKM3iBfKVCdU1ZVh7EcDT8DJFM7B52TxM3PaYtCRJm5LNzZ2fL9n/0MzO+8/c61a9dCkLYN1joiTtIpMPfQsXUPnzeTUdWGs4LKSBKC//1vTmutvvTS7NHh6YefL+tRITaysJf1735979J0+mJ98l/++ujlly+/Npvu7Iyu70x/ce+JakvEjCBaAXDrk9/7h6/0yiGeOS5641XJ9B3bVBtGgifm+Zn/ra8ffHLvF89CUVkKHUSEq2J9on2BwjKTDW2AAQyBB88JIVK0Da6+Ur61v/fiED6sl12sy8luhWcnvVWx0Ahx2/jJz+/d3vM3b81az66Cq6tdrkIIztnbb7y5OO0Q+no2+eCXHwAQtM8efn57/6rvl6rh+MV4vXzOpy92X9r5l//uX7z77jtwxXS7Pp28ONitb7y91yx9s5gfPQim7Q8fPpofH914/fpXJt/a5pudnCiNIOqxNGpy1smnKwoiNEBjiAoEiZ4NW0uEAAoZwKHk+Cr5B04I8ZBuzj3dNsL7xBPU4RgTMNSy+RYMS7cG7EdV8yaR4eHn5S0u1K+QXAunznN4vBKTyLD+IV1q2jTkw7dKzp6GKRlupDT+hdfSTOk/t0vPGqns1JevxxeFCHquP0CGoi+MkVPKzJqAPFEXzf70JJq1VUNDIEqGNL9ZlSg02E+SVSiTEAlYEUFgDYoA7VWNwpOsoSvMF7oIRe3qJcUKtoje8YS3gjOeOnAPW/Ln8373hd653s9ucb/m2GB03e9Maqr7j+99bhZudf21vuJuVM1ltLtAnLVSl766xbK37qzhyRJmSZOey9j4urfTtT5wdLLCngXvzfbU94f+qI3HVoKrjCupqiqYOrSGdyn0lyYdeaEozhajSTmeij8L8bQ3XVHby/7S7U6nIoooQDChRRfEOGKZd3FrW+FfcG3sylhGDxfCWl3LrBBYxyOunvuolq2AvEig10ZbPN6R6SwchqIrQqcUwMl0CoGIiexQK0ZNBQ2Sj1dMvytoVAXBEqXJPSuyXXBi9dsquvbMH4wuxWOVhVrjSELUHlBFT4hAIATAqyZf9ZDRTKLcGqaNfJrU3RYwgIVakA3BXn3ta2e/u7v4q+/Vi4UdjTWOyHI/F6wLpplI6EMYuQJx7QrnymnTemamRO8iaBYzJJavDPOSTWqhpAgASCExKoFiDERsSPKgKNWjIoW1tXPrtk2eVKKaUR2R5ICR0GVRPV/QMHxMv/aH03qP9NpMDB68JrCZEg25OveLho0O3OiBTHFhUJM92YeKXjZJ/QL+vMHNhpdBhuJTmwsawL0LcHLyCqLN91J27h7C0Pk+hQ1sfbEiP+dj6pCSkQfGuZw/r+UwvL2B8J1dAbB5JyIh0a8UQNqONjhjK3M6vubkZPHTn/xkVFVf+/p75ajyPgAAGxG1BUNg2EYRllAV9PHT2MgYLiiUhRxD18xl83/5F7/4p7//+lYpa2jpfWtQ23BlVi3mR32o3ruzMx2VbiKLp+1/+vThJ8eds5WQqMDZ0CwW33lrcuVgOj9pKifRdCrMMUREhgF3CKyhsdWkDy2qyd9+7/K//PMX2N1RtATHvQunkSorABuCpt6XwJBkliZQQYHibz5sX648daJksBYbww03OV4vbG2dkxh8j8iT+qMHj9/48vTF0fLex4+B9uHnj9/5yp3lslmtF2+/9bYbV3/27/7041/8DYD9KwefP3p2ehCJadWooBU+2eeDbdSfzR/+5D9/b7Lz8lvvvPnyletPjj/zpun98uBgV84WP/5P3y9L892/9Rtf+Y1vQ+tVmJdcmLD2rIWykB9EN2l7oDJUSchoDF4lgKJqiAJVMcnME6LDfAaAilJaiIRBlr8p5vJXztuC88udVxRdKDyhuLDHns7P8YZpmJMZNsgNMITFgcQ8VKKbl6JBGJDnJrlk1+EFQeenOLMrL8j6NlaR+QpvIKi0QvQcHdcBb9/I94enGPr4dB0VA9eKh7IWm7oWhpkGAzxOsQKZt5hWPaY9LmminbCHqJrESAWUEIkShOEVhrhn3yoa+HlsxKwwGhsslRw8URc01NViMvMQkhCh+skJXbuJ8Z7FSXATw/thvCPVDtuzddvfN91uPLgWJmFF9ibb+YRXPG3ttb51W7u7p+VsHnyQaq1j00A9Lp3i8zE/tBXeb3df5XKHX9uqd6vw6LBtj+J9Mt5H2iomZiIhiGwZdDvTIwT0JFyACsvGOJ5wrPmzdfnZ++SKvauvirUBQn4xmA2w9P3l8f4iHr0I3u295NtnpL5yxUpIGD2krlwDx14hLNZ4lpnS/lnnXr/lGtu2HXq1rcBDe2XEwLBiU5IYxvA9lC84UnDaCpc6NUp7BwCCGaiIIIWtEKTsP/ro02t7IwcTIjH1xKqSs28aMiNvWUqr7NORzIzb4SQakIMStCByqhZiSd3aL8vtW5Pfv3361/8rPvl5ZUpw1fslrVW2p7T245HRkW3vfr57c09QA2CGRg/4jLfk3GYEQsP8Qwb2bupPu9YXhbVskLiYIRbWiIgxrIoYg2HXNM3pYr63u9v3AZl0PIgRsyXyIP4fKss0zMzOWXldZ6rrWfRCp7u52ILBxWoAfzfoLZNKdnvSVM1TAoCSQkc1u3xkNEsx/HKHYJFej3VoHQaS9ab1pFxqKzYxizZeIRk+5gzT5RxLabg1vFD2sqeNv8+FPwPbEsPFB5BG8jRk+iGKIHG0N/zqi0/BbAQRSBk3f3Ka/EfAGuTJk6cHB9def+M2lLvOq8AYJjJKGnqATIyJejdbLZrHiygVVTqBBua+l/o//vT4j3/v4J/+3u1CgzKA0Ntgw9apP3lwfHKwt2fPWtZRx/T+3YcffLCUwtSTEZNvJLrS+D7e3A7f/Y2D1XxZuiKEVe9RWMOIwhSkdyLCEEcWwVpZLBav3d6/+eHR88ZXjnvxEmGdk5ZiFDG9YYJN3QiI0wBJRbQkNIt28chf26kXy95o7AVXdm38dBk8VqvOCJuCUIZHC/nJX/7ZJw/uni2fTrdmVeU++tXjEP3x8dGTZz+JcDx//uaBAnDy9Nq1kuSkovEq9JUQ6xaz+dXTX+1Pxq4p967auij2ZzvXLl9Zzv10Mnn+/Nlf/fLfgpo/+KP/7p2335nPVXBqWCU0jLHBWsnhvBZMZzD5OasgEse8YkDSlg+JssGckxpV0985Q8pZorr5s2mFU9Y5z6nIw5IvzF++MDLefG0z3uXhMV84eBe72mGh0PCec35Nd5CgGlVZBV/opXPFDKQ1CQSoJAORvMPhi3clY1jnWBeQ8fHN+zmHrnOcoGHpS/b1SpaSecGwDDvbktEzBspi1ieqqpL0AgM1w0UMUFYSpJFj/il60jLJVqAgASIhmOCj7YEW62A7nnbWriAV1pa3SukdBJN5aIN4R4z5aXvv2L715ZbUmJ5ffttN9+LRqbfTAuV6vfjImUncnjR96dfuDHUn1Zrc6Xh6uiiO/+Zh+ORxYEutRE/a634stpaYWw6w9hM/2Q2udpeDFME8OPOfW16sOhWzPRvDrIpYUihZq+lkiRb9mYTWh7XUQNUGK6i3SoQ+PH8oiyNBUJ4IGYh3UszXq6ePH02v2fFk7HbfOjz+ZXP6V6U3Uu+A2Kh9cXrWmBpmFHpxzAHhphlRUdW3bslCWFgACAwoGuVgWaJISG0DkYIM5SkAE6sKq4S8dATMNPDo8hHZ5GC1y8XnKAvmztrT1sMYYiBoIFbKrhfY3BkihghhULGD0gMYVsBQAxQgR+QIhWhBVDu44Odcud3v/pPF1VfnP/mzHS/W7J0eNzs7L3nbBhXrsT5t3f4NCY5Yock9RwbHWFKNyV5qgKc0NU2kiFEsc+EKBm18/IvCJlN9iWqNMcaKikJevfXq1tb0IozLYKG8y1N0AGFz45hnpJul9BuKm6Y7wVCBaExBIhXCUS9qDPNnzpzWAaUWiJDScDJJJwKRDut+mSlmICkVAvmGDSypTcOp2WAjx640efoCfnYRkt6UA2ltmjEmf1YMJLvPDBFL9r2ijX9WuupKwxPqBUiAzkUauPjSRJydcHMdwSn3w1gMqg+T2+P8gTMJMSuZq1evWeu6NojA2GTZwUScsUoFYBRaVjg5DPOuntQjH3yl3AVrC+bJ9Ps/f/rNl3Y/enR4vMR06j3ipFoGdn/6s6Nr9dM339gbuzpGzI8Wt29v39wZ/endw8JwDRtUXXPyR3/0ahNi5OiMakuFCQIJEiyZzDTgwChZ2k8fPO788uDajXffGP2bHy/Hdgz0VNpe1PiyiOgTLmqgZtPiCQIYMEbQVw/uLV76CkkgNr36vhr5W1e5rArxwbd+0YTD5eKEq8MmXN67dOPqrb5viELX9eye3bz5uoTCS6h3X4X7BIC3Ty1WDPN80TQrrbiuqpZMQzdWCx9HGKEaL548WnWLejJ+9ZWX7v7qx//qX/3r0PVvvPHGnTduH53MFVoQQSwoKp2RFqJdYucSJa7lEPLzVsGYeWXSA8KWVGJeEwZB7nV1k++Gs4gLKIxiMAYfZj4p4Gg6IBf633wFIhSDa2VCjs9P+AY0GkKWpgS8OZmbHvU839HwD5lqlY54HtUMjo+5NkUmialk+xDVbCxDdD6ghSidz60yyCSabTiYMHiP5LedPlMeClwQwwwfVWI6M5HmKLHxTteNnIWMiAoiZRJYVtIn8+1UUQDMbCkZokSgVzUAm7COgZQn4ABquZlNlyCRvlBvpSt5j3sQx629ObeteKMWz8G+DO4qt03/0oEtKz28F3buiCm3nh4tV/7Q3bgcpDw+mpz144b3ltXe/Gk8/OH325OF8qU1dsPK21a8L2YtX239aW1bLj9edlfndju0GvjzCfy12cvXXmlt8/nRvS2R2zu7anqjp31ACGICEGAjWbaWnVutq6POVogM2d3m44UBonOtUcvE0GbdHrf2kuyXjN7ZS2++F++te3+XbYhRSIlspcQlF5PJ5Oik25rUr6wcXd6vqnp1IkUwAYAjEKRTcsS+Em6zyWK+3IrMt6XBtncjAOfBZ3DjaJ6XZNl6PF4uwzvfuWm4FA6MVoQ0GwNtJiyaGYpIiwc4B+IcXNMVcqqWUBJK0cJQCTiBPWpbHlc8ZgnHe197K06q1Q/+pKpgRhEF2LC1kzA/ml66Vu7srU6X1pSqBERiVvIgQIOIqkTkzV3IjSgTEjdfITGmXh8QIopRyDBxwmQoagBZY/nDD39x4+YrX//6NzovCXxOdhwbL6eL/nOJpaEhGGuQP1NKm8IS30FENLkuG9KBJ6ybAjeBT4PuOo2SB470OScqdXOKpLhPPNG8kmEDlCXycNZbAQM/NPXZw8Bok2gvpt30DJI5lhtPSpJBzLDBxM+ppqpKwzNj426yYWblaHo+S5Zc09Pmm5mYBwFwDnCaBxWb0V/mdUITWAnLLMqA1nWyKoOxho2RzP3KdpjpIEoUR/x43rCtWInItSQFLKsXEfY82Xa/s/+64/sfPJcpm6Adh+LqrtzZn9ZVZdCr0G+993Lp6HDu0S9LM5Mytifz/91vXR1dmiwWx67c8r6RUgzICkVjoF5D8BLbxdqV4zAaXd2ZLTp08eTmte3dyQkMG3W9b4uiC1wquVJcVNEABKNgBqmE3Bra4FA8W71YS2lJmEIgFWn/1p2dEMjYQhh96GI3+tnHL5Zz1gq+7RaLo739nb296yfzErFsmmXFrqfTk6MOAMRNJvWqORvVPZPM1/3E6Nl8HqPZv2RePD/S8GjCH95/aHen335w/4MffP9/dW6yvz/53/zTP259XxF6BA8tSFRETGQEDBDH0G/FdBxSvoGERKhljtDIsCnup6OdLyrAJMgbe5ExlJSfJO2Pz3ToBLKI5glngnUGxzTN60GVhZUG7kU6n4nbtMnhQ+Ibsq3+WtLNB50HBgOyOiCHu8QuTveGh3/dbJFLbz6tEUuIL51/OOcJO2YL2wE7TgY4nBUiw928cFNzWBNVEOsmQqRhj55jXMPFJjsAYJmzYVLPraIIAEM3BZNQSsYaIF57R72gAAVCIGlE2UgL9rDBBF8sy0owLSk4FpbjzkwRI9C5uisdFKtTBD/ZJtc/nHf7M1dMUN2013f56dwjKvGD02eT/mi9bffW3dbxx/fnP/t0/vhFCEZwdVmMcBZ47dCKbRCi3FzTcYPjykbmzzpyvfNlmO3fuPnKK/PTw5OzJ1uVv784nml/aRsV2QJAzOCIsLAhu+on85ZVfXcmpuDFUXf2lKhqXQUhQ+J7UOi267qc2O5FgFcimr769unjFxqW1jJICzJNLzGGNjaM0cwUJaN6/TasWiFUxBJtRBBVC4gGRzY4IGT74IxKpPCc5hipp0j6zYDcUiVGam7lALIvX3vls88eA7MOFcNDC6GeWZOf4GY/KylUo+YDFgf8l9K1UCkEjmEJpaJgU0ZyqA2NMGrdsVvCxMlsW6t+6439p79yZWHW68euegWWjY/9rB7vfb1dWacTQasQRUHaM1kRSRPspOGQtOg4LcoWAGqIQx+stTo4H4YQE5c59Z4pTapQ7yNgt3d2wqabRxCJxOfUjA1RkgGVmKYreZCTbLBoQMHTM5jUECf9Yuabb0AzTWEod7lpDJj5n9m+RoigEpSZ2NiE6A/kYxBBZaBlqKbCPAxRg4aMmn8PxNkQbegaOBfyLKpsrIio9ABZZ/N4S7M2io1JEcAYm7elDWFB0kJfggFpkmhgU9OnCJXiJW8AcWJOY2AaenjC5vFCYE4blFMtqAQFg3WAEDT1jGSYkrAMEVEldQSU+AAwpbA+fMHBToIIoCxCYELV+uXbd658fPf5g0P/nW8efPTkaTt2pS8WIX79yuTmzcnR83XTxmpaPTp8UVbmBx88LmxJNviz9bfesLdvXzo8WVtjtW8JsAm3twzpJDu2RDawBVHwwch0OiNQNdl689rJzx43zvqGJ6rE6KG+5wlJb00h6hPaYJiCCEHbGJ0NZ8tu0aympQ19D1Kn4bSVqEv4StFZ1tLQb37j1Y/vvjien+5dKtheqqri6ORuuw7OzZrmtNrZMqbb238OwHI1cfuL7rAqx0BVuYm1Jbu1CFRke9eVdgYw9i/Pqr3/8Z//26//xo0nj/zVazc/P/oo9Fvj0YRZq7KWGJTUiIiykrIGRZFtX0hyC4jIQVWJrCQZLhkCAlFaIAcoKyLlLZKiIsSiA44TY0yreNIRVgVp3hYKTZvKRFSJVMTkHEwqKmzBIfk6gZFYWnHIp0kEkZY0ZEIhBoPnoXIfzhEwaNpB+XjnILBJ0gxQWhHGRMRKw44QOc+ZmwJluDGgYdYiWcKIPGXKWHBSzX+BgpWjEQbO4uBZMwSCC55b0KigvGWbQFbzxufh5hOYWQQ0DKtEk3RMAPTSOrbwJE7Je0/SHza4uW068Jp5Wco0Los6srGxVxFBVwssBcO9hnZa6iu33nxw7+eP48o1/Jcfhzu2fGXWvny7fDRfH/7Ehp16KXjxq7s7oV7w7pzr5Ycftyh73lli0lJVrDgulQKkNdIF7vVSb64v4lEtYMO274LeePfOlRs3P/r4Z409xj6c5Wmw07OjWbesZ205skWoySKUYMfWY3R0LIeHdGatGm9HOHrKfSvGtXXVsXVsDc0L3372fBUrr6NrLWnoUe3uOX3dH31UsAudeg6FUrAcRMS4nZMz2j6YvXprfRJpxOoVQcMIbNIGQrAPEll6YraAsHI60sib9KwoyIpIn2HFYVSi57MSKGC36r03bl+yZkRapaKJTdH3LQhMFpB0+EUDFGyMSMgz2UTxZRAKhbFUQgvmCeACgWo2NZ35+afHjycH10ZbHGMM6N3E8OWSpFkvHu6apty/EZY+Lis3svW0bOZCWkJFJShFkEl4Y8IviQJSd6UB+R4QlIrCpqTBJi2/S3rzfOxFpLA2xnjjxvWDg4NRNQp9r5CowmwAAZOECAgzITlLAyAGSW74L0yENoZ4iqEB1XSrktQhpTXS8w7z3GkqPwNy7T98NeXlJJclQ5Qa+hijKQo+Z2CnrlR4E0KQ9NyatBIb1ljerHpRAJXCgEgKTJvRU6Z2SH6Bi+bMGe84xwQ1xjg0w+ffO/TxumFZqw701xQRRGJMKx/MpiXJJ5JZJe17sSLJCIEHcQgTGDBpxAxigmNChBCZlKXR0+lZqGwp0hMDbKNASQ1NHh/rOy9fP7gSmzPfw22Lay0XvHg6X759dY8rfPyre/uXty9Nx//LDz9jNy5L9j5cn/R/5ztfOlqGopQoSUWD/IkKMYEts2EOQcQQhJitY1L1vZ8v+rffmP3o40dtvWO5YS4gvWpt0IJEsnsLAAblcsyIF2WluDhrdkdjzz0ZqxEKX2hF0gAATAhuvuhfvf2VO/T64mxO7GJYz6bTtl2rkLO2WZ/5lk5ObgAoXFytHhJNgmegD/Ki99E6T6zWxp29amdmiWXk9n75/n86uDb6vT+a/J//Tx9sTXfu3797797jr73zHedG42kJuLreEomlcyKCWIA0hpCmvWkpH6DBBk3TG4WiV9FELLLpJKDPx4QywAyR1D0iEaM150MRca7o+x5RiDg1mmwsJ9u3rPKnRGWRMKymzjks0wIx+HAQITE/hpPM2aRgeNzwa9W8HSRd64EUSUTn0G7OjenYn5swY4PDpwHTFzO3ptycTSXPSZS0+f8NZYw2L3LOd94AWMOuhwEYz0+UJgHpQakWZ4VCTAJD80HLdDIgWwoJMQSt4xqIqia0obB1+OzZsl1Mb8z8KrqiqtzMr44YvJqUBiOLNct2WqfMKirranz5lXf+tlQv3T/83ivXz4rLW+8/tgunX/v66MWiOIaD1gI+s1sjni6l8FIvHOocOwABAABJREFU7WwFeyaThsfwTEulFlgF03FcB9uWffCvqp4em/sjw8rbB7NXb7zx4d331+GIDyyUX27lWxpehu40Z9PlfHt8VhZhbSyXXEPK5dqeLcjZWMCGyLKWoKymIBSuLIgNiY/LMIqr3hwuqJ5t+3VbB5FiOr526/jscNkFLmYBUFgJKAszm8ymhydbd94qXNkXnrdYe9GeqANsZhiDGYa4M+gNRCN6hRUFIJnxAQsEYsfomYEMSwiGJZLJP8OGYCVEppqoSnRE79fGjIiVSERC2nLFyRZOIkDGQiVmq3JRIsM8UjEF100rrmY7NT33o5k5ezr/rz/5d+Mbu6N66xvf/K2Dvctr8KXf+b3VD/5dVcvJ049mL+2ilr169/Tx/eNHqzvvvLs69kxqTMJde8kaVlX1qZ48x5wuYj5pbdfmokJZkKAcZm7bth7XT548ffr06en85MrBVR8iDQ50GjRfekmbDDih2irISSKRMnRIgtjoDXVIhQQMy08yepS7u7zve6CS5EGRpOs7lM5Dp5j639TvioiRzbBnA6D9WtmcX0jOcyHOnzKpTsFDkYCUZdMISkXSykDQ0GALFBupSR6J5fbkwrvNlMxNnEhGH9gg4pqNwTYVRoZNNPNXZBMfldjmSQlbIYbSMFcwyCvTEjFNoFahgAVZQAtjmlW7aEu7XcZIRilqBCRGGZfV+0+7iruDHXf34WJcVUFgpePJ1r3n/k/lybdv73zza68dN6s//dH9UI9rbn2sbZj/oz9448x3iUPBCrZpfBNTy5PmakwqBFcYEBJqIKIECX2c7GztT+Ozds2WCTYiEpoIC2VCBOXPOn14UaRiDhSY9Xi1vl1U0nlmjRBnuXJWdAIo1Kj4eSMzdmzd9u6IoIWzhkkkNs3yQMUVrNL2AQCMsWXJZTkKsfetF227NqzX0ix906xXzerk8GjdNJHuPX92apj+x//bPe9lb2/n9VffffVmWJy16/Z4Pm/myw+mk/39nW+Jcr1VWMOj0YS4E2VWF0PyABAToiBImtAjfc2m3Y+DKjcV7wMbnhKQrMPMggHVGJm573tjTOIqhmTALmI4w71JuRZC2KgPMthMuTD/4qUgDI/IxuubAvp8GqyUfp0Di2m4m5RKduRLDmYMefYL2TfdqCEla/5XGmYyF8ZAQ97PfycgbmhlOjxZLlUSW+08CQtApElGtWm583hK2BhOw6i0BymhZUypOklXLGTGKgAIabo+sfNalIg9Tv78hzt3bjHgz2R3Z+tg6+CT508YlqCnky0SjnTaAzC0XDZv33n3K19973F/uvf2dz/+sT8p/nO33T97NvVLmR6bk960zjWtERkdYbtGseByqeVCqpbHcy29jsYraCPaCK8IK7Heyip4b8a9/XpHddd8VODWl77y9Mnp4ePHuMYj779yJm/O2AkvQlFRVQWSU1/yvFp6PWX7AmaZ7gqLBAnM0nMACxXVttY741K1atrFodkWU7Hb3wnbo/7F4eK4qV57s7g0leW78sndzk27WIdQt9VW249wdDK7em3nu19ehWgLlShJjase5Jk6QqdoCQ3IE1rIuqJYEnuIV3DilwNelYGQJgsDQJ0aoBQchcBWqoBCeILghKmgSDYYZlEJqhGJiiRBFURBYhLIyBDMITCiBdQwnPcsACYSHbQSO8ODX9wv9rd5ahf6/E/f/9d3bt+5/srtnbenZ7+0e7PZ4tln7S/t/m//0fH9F3/+0x+8dflAa9UFIVREKhqR3gAxYAc2sGxQ0KSpIhAxi4gxRmKUGA2bEGIiVTAxM5WlA0DMVw4ORqM6xXhiZrCKxBgs25Rc0x00IFECy4UpEQ0UDx3a4Fzd02YtRC6wN9koZ9XMX8w3NPM10iyTEkNaMLjJ5uzJxpjN9zP4/A5n45uLuQ1DcX0hz2ZoV5LlSgaG2VrLzMmAk9nQBTXIhT8bcHkocDJV9fxBF/9rgN8pGXki437nug9rLXLgzZUJDRsVB6yPmU3qdCGJxz/8TwGyDBFlVRZEJhal0rlHp6uWpyOwwrKSqGHN+tSa47WdcTUqvvOVl//kLx+1jD6W0sbRKDx4Vnz6+YPf/NrutUsTkVChY+Z+Of/Db+3V29X8rGUCgmMraVCXfSFSs6ZRJIBhDacBokjSZVoRWJZX9ujh/WArK6EzYpXDgElwAiLzZ5bFIwyIMXG+XgttMZi1FBvI0/d+9rkpqrGlakuvXZpe3q6k9Q1akIp2gEqI6/WpKSSEnslWZdnLKYAY4YpRXY8lBmttWZZb2/VsZ2SMYcPeB+dIxATPXdsszg4///zpl+546OjHf/U9tq6s5oXdfuXGN6Znu4eHx2H2+aOj719p/9BW5eHh08mknkx32/WJKaiwzlpAC6YoMqgkFGySVlBS7TvgsQPoOtwPzWSLSKCUvFURYyRiSKZBqUiURHvLp5yZoCISUw89VN4D52LQ+13YypCvoUo2Tx5633zIM7zyhcOfHzZ0vZsiMvm2Sr6eoE0uH7CnZIk5tOQX8u7F7Dv8w4XX+sK9yvrDFBmGJYqbV8inJ/UbeYEbKM+pU4WXXDTJ5Eemjz87FYohCfAWpqCSpFr99Jd4MdfJJJxpNSoWj5eHnzwuqlIWhD4a4ZN6HDn0bDtvb+4fHMruZ2trMH74V5++eNx8Y3q5rfwp7ayaeCNEjKmZumM/jYQjqg+oXnQ819GSSy/jpjfWs1kjNqJrlkZoTbEL5OEa9F6oL95mHIw4PHl4dPbwerG42pgb1kzYLTyYtYBtufRm6uGbGCbVkbZBJsREQoAIRQoKm8hZPvT7uyc3JmrJlTB9WNLZ3v5eebDTT5wJJpTt/KMf8MtfxcvvNPOqmx+2Ui/NyLcU6pntj/f+7pvhlRrHQWZp70HKwZAW1AINdK2wqh2BFYawJgoV1BJY2RI8QIokAmaiOJzK8w4nVWq2HJVRezUCl9xrlAjSM7OhfJqF2CYOIxGzWiAw80A5ZsNjEScoqCyrseokSgWemDDB1beu4+FHvKtlXVk2dw9/cv/4o/G0uvKbtw+fHIXRtoT5j/78/z1zozf+1u2xHdkKGAMda1sSFJzoQCkLimpMq0UotSaZuixpcZCoMBEZG2NgyyrD1nrRKCoK3/so0rZtlJwMBCIiRWGJqI+RQEPeSdMohkgimqtuMNjNvaTc8tGFe0YJZLzgIZ/qT4Vi4HErAJhN2U6sGmNUY/gCs0rTQCvFigsKoOyQoVBK5DslZtIMfA963/zUQzMKhSpxVqNqli5hw+rYEDJzd5KPRzo8SpvtS78WSvIh0gEBTxmLByibL7BGNbNJsjVlknmYKERgJhYhTQZqWa9joZwahqhKsAALCLAAx4jCFCdL6XU8ho1Jp2EYsWMYUGG76qgt1qfdl25Mt7ZGfQOEOK60CUVXtNu1u3al/JM//5XylmOzWPp3X7Zvf3n3cLEeGeol7TbhQW6uF4oepbzMGKoBMEm0klxcBO7Oa9vf/+Q5Yxo0MlOnzPC5+cu8xUytYeYgYIa1YbHmvi8sl0AhyoXjk1CsvbN975/LBx8v9rfDt756UKINfRSF955B1o2sCctlr7LoexekA9C2fjzxRKQabTSiFbEQ1szUdv74aDHZmtbVDCYAYXt25er1axI4BBV86exsyUZXq1XTrsC4dHl3FX6yszdazZcTi/n86IOff26L0dtfeXM2Gy0WK8NuNKptUTArICxWSQCRuFlZnbOvZkBHzDnBXok0SfNSgrHWSIwJRFYghuCcY7YxRmiyFjovhVW/kCCHvL7JVkOm4uE2bbJgEtulBcR6Xnj+N8DSZuT0xWzJPNCXkcvadLOIN1YeFyLBF580A+bDygj9wrMPDlnQC9K9AY4W/PofhuT6jygVpsIgZqtghYBM6vAlyZtIABINgdQRe6md5fWj5fFfvv90ZG3bbq0JBe599Pnx0eFkr4ohsmoQYWA12V23i4PJ1ru/8/u+mT9tj9BWP7/3mTTyZbk558ULsZcr62v7eN4+OJoqu0Dc9XVk1wSs2J1h2qvlVmxv0QRtoA1Ry9oyOkHg0HtaoxAODnsCfHz32paY3YqDDdIvfQMPKcrW9K1UjZZrrixq5npUNxQRRNBD1mS1sH2HSCGY2pnn1/fXlwx74a2tnUu3rVA9MrEeL9k2/kXsD4tr+/P5akWP3StfbR8/X99/uHP9cjxdL9sHN3fHr70y6uVYKrMsyr4W6oAe5Ek76AooQZaICUXebqSq1BNaCy0v2L30RCCK6Xc2BJNcFAIMFrvuO0EoxyUAsFhn/dJbU/iuc4VhsiF0hlmhEjOJ17CNGlVgjFW1URgo2ZVSQeuAMdGUsUXdOF79ynX3gZSXZSUd11sV1RA+7V6sxLudsOJ+FIt20VRbNz87eXrt9Xd1F9qIGoIY7itJSzFFIGytEYGSsUYBkrRd2rLECKgmyUpicJgkg1CRyFSIRlUlxmxndz6f13WdTIkS2dIYBtD3PQyLgqEySGlTngppoElUFAXS7moBVDJCiyyWlwFyvrCCBQlASlgVbXgWeWJKFx+WXSE30iGCRDUmAU8iCh5gtNyUSzLDVuZMg0/1d7r0OXWkRaCcLfGGQAgCGTYxCjE4lc8ZHZUUARhpJfAgaFbZnKiLNUSKEYnsIiqiajauYTIA1ch2Y+epF7kD1k3XnjC3jLwlUNqmyJIZbcpEhmFASnBEYHKnTWmohBAQkOzCDSOqwqOi7/+NDy8WZ8d+azb65Wfz0VZZcKysm5/Mf/s3D370/vFh5+pR0fT9rGx/99u3Fo1x1HdCliGQQqwa2RB3EritymwAmOTpSoAxAJJVr1l62b+yfefyo4/n3hXcaWuN8wGWhgXKSS5AWaKiZAVSFJO+iz4UhkdRCb239WjbjCPVbiSu98r88NRvfbj46pvUrL0AIWhhaL30xAHsjLL3XYwAUNpx8P1Kz0AyqcfCoghRgkDatnVlWZY2xFZCYLZNPGvWAWLZhhDIcgWW2fYIIhJhLfr+7wWRtW2Zm9s7t2698spHd3++Xh1x3Ds8ujvZux9kz9A+8yVrdpnH1qIouCiqGNvUBw+/XGZYNhANqsBQlmVcR5WyTW6yf5cY+62tyXK1kuCrqhJVY+yAPyf9PWc0WS8UptnGMlnDpsOe0/M5YpRXlQj9enLEr+fadIEuDIaRTOpSx5tQnqRX0mw9iQvp/7999g2eBZz33PqFh1IagSFvLc0vmrmKUMAg19rpvxOLAiJgNiAeyB8FYBIFg/NmB1ZlUlIUnUhh4b1f/9cfnRpe2XH7+fH2bczn3cfvf1TBYCFUIQbhiSWwb5pie7L35jc+etEfXL7WHFcffPDDJtrJ5OZJdXbqzAuuxeMw6CFmC+u8TE58c0W3JJZPgVZq7dE30ayZ2yiN6Ep5zbwmXZOF6ztxvvIxBu8h7Nm6Ebxn7lWahhyYBYGCHTfBNzxu0ZwFp3DAuKyClUDKCuEANsoeMbINCPXWo1cvY9uVMfpRoHI2Gn9Z5NRvV169nXG9c20xubl4sTr6xT3/+r6985vF6i/2/JPJlI4PX7x37c7re/F48TDYaqplY0o/KsK4bFFpRzgDLUEOVBAaZWK2pIVSB2FgXaoAaFPtlArFYYioGVODKEI6nVYgVeWsMyEIW27O1n3rt+qiXXcqbjSyLGZQ8PEmfWjayAXLXIk4MIUy2ImRmrBFsiU0Jd43Ae1ydLwGX65DQQ+rQGQo1h3Wj3S0617d4+Bw3C6WR1geX7nzOy2TmRluSER0aRAs4DgbUBJxJCQfMC8iRAIha0zfB7akghiCTZOkPIoUYmVmWGbwk0ePDw4Odvf2ut4TQaIMDbSSMSCiND/P9OJcwEOhkDAIsoiTFMkky2QeqmdW+oJhwCYxbxBsbOr7jFyDMlBMQ7N7Tv9SIgMyfAHvvzDcBaW+U9MU8vxmnwNsuc5ikKoBiE0KJnmVoWrqKKIKZfpYgppTOklRLK9bhYCZgkQMPdwmUF1smBOcmD/8wetDkVFuDNtUiYh5Q5qzEBYBc8EgoMjtL1lFQSBVw4Y4WpBRMqRE5AybGMJRU7pyR8mzImVBYwUiQgL1k4l965XpTlXtXy4Otmb/4a8PGx4v58d/+I1X50f+7iM/3SqDeOnj3/nmfjGZdM9BTtkEhXVwIl3sM4OXmY3hvg8h9CzsHDEnAy/RZMIKBqSAD6F6782Dj/78yWiyI13gAGstp72VoCCWyakSUKSfEQIytSCsWrc72Qq9MFfClsupNCJ9JVIBKMu+CZGlQiyYpCpGVcEhVkAAR5CHTME+Rfi+b0R6EZrPu6qW2WyLBVGCZY4afd+WrjJwfWgJViFEPkaRwMEu0ViPyFxYp300ZP3Y2q165uNUtOGievedb7XdEaKtqtsoTAwnQT4J4a9F1fCMcNnyri22J8UeGzbGETNIVKOiFwjUbEY65yrzZGUAMFMIwVq7t7P9/s8/XC6Wnz948JV33rl1+/Xl2ZlNHuAb1Y3mS5EpgprAMN0YRxMNgmLKGNIGxt1k/c2NSwd08/fNNkylROI452AOZzt3skMvnF8xtb1fyK8XQaMEGF8AyLOST/ND000UHqZClF1slQA16WprZrElfIhjfjYDcHL6ZM4jc4JJq5V0cIExQkrMRhYf3Ts+vl9ceuWAhJ/N3enZw2dPm6eHbmYFTEHgrIhQCD11X3v726/tf/WTw3sL4p//+O76ZF6XrzZ01ozrVb08Cq6qzaIyd++2D1ajWDrR3ZdRr/rGe6m8+i5KJ1VLaKIsYsJvueOwForRCrRoDVCwDb2wX4fWaAG/FONEe+LWUqPryjo7WcXlispSRhbBQU7Rz6p5ASExplehENWEYEqPJ7Pp/PLYjghiqIyxCN2WK0ZXxPZ11dlbbz9e8+MPP17JxN68s4661/xy7ys3j99/cKPvivDk3Vvv7raPnfZexp5dy3WrzsO1qFbl+Kwca5EQusxGF1Yw1CSzOCHvBjQwndEwhP00GkhmHTaVnRaEejIiYiWRKK60VTFCj3qy5btWojJZ0TD80pVZRfIegtNFY21RV5VUSjVi5VGzmRpMlS6p7vTPjx/+H/6H37n3wY8PP/75pCLHpotrp5MrB5NnzdFZs2zdHnbte7/znR03K80kHK8xsQqgFQ2M1YhpWB6ASJQuYT9s42aAVKKxnCFZ5NVgkqqMZN0gKkFU+cre3mf3Pj06Orpx42YyvASQ9cRIi4mIcL5oTAfKFYiJ05Nn+S9l2CoZGw/spJy4aJMKE/Caym4emJbIde3Az2Kkadn57RWoamb7pscjc7N56MuJkLKdDAXxxpUeecSfCiziCzwTGvL4BivLCTX7bCiQaOQ5gCSP97SLcbMcVS+07wpoztdJZpWXHFEmfig2TTM21HAaPuAkVbTMvGl8BxPTAnAKIwCoSAbjbAqgVzY0Mj2vj+3aXrok6ZcSwQztxVpbQM+a4tWq+s239+4+bLoVffrs1Jpa2EZbAaOPHy+onkDgg31pdvbl128cz0NZMwSiVkgjOrWmAMvwhw0n3tzw+cTBLizjSFCwLRq/fvnm7Pbu44+XccsZYXBYAhNCCbYEa20lgRQFYJXZSmF0FMQverk8sqg8YMuJnR7w08csFSBkQG1TrispRnUVDCgCvTEKcSLeWmUOITRMBkBZWOKtrm0UQRCYkWjMRAUzNU0rQb165gAWkFiqAAnBg4IE8dJYC+aaaQREoFj3PZHPrhC2Jbgtd6AUCBzjgVFEjSH2vp8rjhSLrn20bu42KIlGlrcLu1u5mSsrY5xlkIGqSBwM3olEJPmDE5GCxnW9bps//dP/VNf1u+9+9f79X+3v70nobVGkTVmqmrakJLHDOQeCN1k2JzrdHPOsD8b5TCcnu3SJzrvenMnTBU+ZckO9TIgQZxF8Tq8ywNCcr2q6+blpHhApGizqkEdHSRp1nqQzoyIhYLKRMWXTK+ahwVbkUciF9t2aQrMtEjMbgFTYJPQoS4HTqMoQTIxgV/qVPbz3INhq2R3vLheX5u2z/zy5d3xqfAiNQ1CQ0AgUsF40t978St3s/+R7P7tx5+bnH947evh8VO0c+XhtUjQGj705dvXtm+XDtdw9kaWtmGQShJs2tD33hLX4Xqkj1yOsgi7BnrGGWInbkb0qAoTVUwwhBmJvU8NbSAEf1QMuxg7cu4bDmCbz2BJXworggYll1NXScVAYNYoQKIC0fHRlS6+4MvJaWl9GVxoes5YwZUGVbVwhPB1/efdsDjsbXZ2fTU6fmqK8cmPWHX4yrZbv3ZqyPK1JBYtWyiXXrZZeixajmiclpovt7VBYsWkVkYpoEroD0AhVxTrJ9hRQEEQDEDkRI8BAXoLHbC0oVrWFIUSBZe0DEITIFKaiUSJCA5QG+6oSwaYoJbaGbFVNmUeoiCcUSuG6oi0NWz3vUrFfvDj94PBv/vn+9Vu//fZifbU7/PTo6LDbm11eLg+nYVxVs8fzIy2aByft54/cnX/8f2weRYlsIyCkniFAD+2dQpJhpyKALJMQF6pKmvyJbGIpK4SsiapJbsWGlUyUtBMNpHy2bG7denW6tRUlAhJCtNakO0HEnLch5CF5lMhMNDiZpLqSgGFhbiYeG8sxBhCLJB3AhVkUshVFMtJKVweZJnHOKqYoOV2pDvgvUorWgXyRBY4KgaR+NDWvm/SmAoFQXid6XpGzDpOnpOXKJtjId9qem9SpKjMHEQMCksf9ID4GooLV4EKxkTI3M5Fm2ouC0oYlZqtpMKURw4eQP4e0L2so5kGWiQELMiQGZFUdYCEOsCDHbIPCOgeCjBLoolxj3WhTVZMZQt9HZYCtBokOfYhRCuZjy//xo+Of/MXDr33lyueH0tJkIm3N04+O/JGYkkpfdHEZvvH1S+tQGVqHEJldHvcjEjSlBjAxcwhRAVtYZhN6n11QrSEYhXBClWMUjmtf/NZv7H/6vzzX+nLwQTFhKWAKwAFG1QlbFWNROVCoy66MbLktsHW1CEu/1rAaSVUxdsCF5eCDqFjDtt+9NhstfN/bbrWU0BFCadcRAWLHda3qAdgCIm01Tpy5GGK37qK12fwyCdGieMOWiGBtpMYwE8OCReC4NGxURWJHTL33RFw4y6qpAAbFLiyTV1qUACY2piq4rPaIrqiI1hFAG45DWMZ45vsHzeoTLF1lty0qV01cVVuumB2IiRSIxpC1Bgrv/eefP/js/r03Xn99b+/S//P/8X//9t/+7vZs92y5ZLb5VlHWQRAPW4SSeiCFPzl3qsro33AXUm4bwOQ0Xzq3rtzks4FyqMlcJodXKOd3C8rLGBV5uy/pINREYpcNEmEFmXzvLiDPw2VMhXqW8mbciwdX+3zDBotPM8SULMiP+aWYlZVoAJwNwAwGG4AiMSfmhLAFS1JZsw2xDMHwaPS8Xa8W7cskno/uffCzxXTCdoRVCME4i95EimE0mr50cKtyo91Q3/vxBw+e3CuNiVARnV623ZofPquBkbX29IVvjn0Aj1RuL3i3i9yTSPBtxZ2vIvVtoI65syCWHQgHBPSzSxGFDWLVxWdP7BmxI+kKNqJ9kEDcs3aK0tC6C7Y8K2sTd1fauWCIW5bggMgyLc/KqRqIiEPAMVfL126ZmV01c9+iqB1NmAvBOMJRZ6eraJcY99PZ1piq9RGap4U7c2erUlw8uPzOW5dubPmnp8cHzAutXNHZEFoatVQVKiwqoACeVzOUghpoQR0QoAkRcwSvMCqRCMl+JzJbSEjjmORRKiqp77NuaicHW5EjOUYPMEG5X/tCrC1tWAdjK1IE6dloAtwkBsOVqjg3YR4HF+FgZ8Sz6McROxp2/NU9//TnHy4/ePD5g0/a+vi7b1/57nv6y/dP7917dG1vtLf18qI/2hrLmaKe2evXJ2Fxj3duIUYJCsmUM2qhPQNsqYCygFIcUBWGEJFNA05OJSenm4VMGkouOWndQlTo4eFh71tjTOwDM9vCBu9tYUOIheEocVPBbmpSzdImXOAgJY2fkiVVCUGYEyCMRDORjZFWou1kBnG2vNChp9ZkA6XAuTwlA0+aJ4UYCJASQgbWmNMANaFoIB7K7USAvHDZcyIfEjCdC6KQ3w8oCfiH0ZuKbrbBIIeFgd7JzEMe1QvtfeKy5kRFwGCRm51LNuaYKXzk+Z9YyrUBAxDlgW/FROd9sFIJLciQrYzagIJjCWONQOsJPpuf+i0rWxE9G056UVbE4MWQmcX49ChcKekP/9mtWTX5BvP/+leP7j23dREePltbmlju214vV6tbL7/SdJ5BRJYoq055GNyngir5dJrCqqpIZGPTY1I05kE2KrFXZ5bSH1y58e4ry7963FbjquidcCFaiBqmKkanaolKLSmMGNzZymJs767Dve8/WGsXsJa+KEYcatPCa21LYdeS4Vju2FUUp45cjbbyrWcaTUe2axc6LA0TCVDHWahLriyYQrIACCEApIgp40Klj56yzJ9M3u2lCekRDaxsC5YoofciYgtrYaLEPKQACEGUNWpI1BAFG6uIgI7cLpX7nKf+3kvj/UJC24bjxfEhAMBa66wtGbZtu8Xp6enp2dOnT6fT6Zfu3Dk+Pvn+93/wO7/zd26/cevkZMnGYtgxxswiuqE9b5JnRns2Z+0C0pv92PMt2OiOcgUIgDLD7tfmR5svbHDsxHhSzk50GFAwztdk0Fedj4KS4o/yTTyvFTb5GAPaDNUUkZPdCVFihaQyncnIcO80s5qZwEpJqmcG1YCV9HVhY62KAciSBbGSNWojuNfCWubKHLdnrx/sbp8cn27Vz0Y1EUsQFi4MRRHh6KFfuvXmWKZH95/t3bh+9+HH8WhR7W5LiF17NrlpPvqlf/wLevN1R0fNk4fGn2xV8G+ssB+7rifnmVCv1xgHy8GKZ2phtfe2Z/TMpXz399wrb8F31rrw+Nj/6b8p1qcarWGDYLXzruFgAwpbtLb3kBUaM6ssW+kIJyJjZRYttrRUlNNqVRVt7ONI9cH1G+1rO27tl4Txti2ntoW3Y/Z2vJCi59E6jBbmUtH7Xbt2o9bNYHxThHXbPpzy5PLO7rRbBGZGKHTU+ZHj0CBU6BzVhgAYge21PBtV1BMqUEcIEFGU4EjwQA9EoxpFVYgNgcBEqZayBFH0qRGyl27uuCl5L8rgio23fduXhQtNiBSYiZVX86UGGk+qtvWAVJWLoQWcKosRVMCYpCSpAm8RzYrJFOXywfMP/+wPvvGKWT0Nh599/J/e//o7o1cnonxmF9Icfjg5eOON2dVPn59Ky26++/LkreeYH0/H1JMGcAO0UKdgQbBkRGFJOU9GEsBCChjmlEo0ikCUCURsmJB6VkKIQUS7rr9167W//PF/ffTw4Z0331w2rWHYwkoIzBxC2Nw8zms6cylr2WyyjgwUNhA0Zt84ZpvsKSVIlMj58TjHnDQXR5kzMgSHVD5TwoiQm2cdquPNFIiENjpFnDe4eSOCDjrpEANn1Qxp8o7IQRkXRLo5wKhqCD0Zk9B7TauUE4yczTTS55txP00uJTmIpdxLiqFuGDic+aljerec7XkIACOtoFAGMxOLpgUFFpTAlQKwqqkDdiAHFHBQp+rAVbJZi1qQKPMkPgqdbo8xFg7WWPQiCIbEiwP5vilKVzdvf+VltOH50+XE2oPL5uPnPZWjghCDWIt4HL7yzl5V8mItrrCqMRUTGUIQHUQmlIDmYdzNSfPGxuAC74YIpigTzbHx+NbXr3/4+X3FDI4lGGZHUrIZiVgyBg5SgmtYx+oEYHa4OaPZ7i67UMH2Nqzb0Cz8k2U4nXsvMBMTK9aJIYmucIFld7zV+9iuj6azPd8uo1gAMTZga61K6KwlQWA2EnsAhS1m29ttt/a+U8S0m5nYEGfvl9T7B+kJnEWmLN57EIKIIws4AAoedMAwhR2KUgBgY11hQdyH3loJIRCzKjGPtuopACaO0YOkj20MIUrs+9i2XtTMZns7O3tdtz58+nSyNfnHf/yPy6o8njfOuRAkbx4TBbBZA7ihqdNgznKe+XCejBN8hQF20g06lBFq3QgtMNzB4R+HdYCbXJ0HzMMTbprrXGduioHc6ib6pA4OrJsMT5mQeVFdlNX5g+kVYTMuzrwKlqTxTZMqbFKvBWj4CwNWQWBLQlGNpYKpCDAKg4KVjPQxjjB9+fpvrA/HcoIyPNrePrIlIxgxKhKCt8yykBu33tgZ77lo9orrH/zwg+OHD6qS/Ys1jKsKOw7y9AODOR/9Kj74gNtW90P/Wu93QhTpC2+NurOuLbyYwAhRAlGIS0fObMOQXJrwnbf93OPJw0C0/MlP3epEYgEofOMFZAGppQks6NVbIMwQSeZbhUoZMIlU9TIPQORRJxSs2WIbi6avZotXb8kOSdltsyPLHUKg6qwaL/qSeOojGjurZTHidhvtyK7He1U3v7R81OBoxTN5esgnthlNQLBltbTOW0iFfq2lVU8aFBLIeCr7SR2UxQs8WJgBScsCK9JAFER7CxRAr7CEoJLXshOl5ikqsR3vVR49RjCGgxdVGDYSJfgAGIlakLqJU08iZG1FCCLCbFUtUaUl1CmPqDGNKys7Y5321+r5wx//5yv2ZLw6Xdz/4Y26XS67j/59O9t2l0fbW+Ws2tqay9zoYYfWUdU8+uDZ+39x7b3LTT3pPetaUIMaUqdagAKrMrMVFEnFLKBE/wOEE8AWk6cSG06ywqROEgGKouhVmLlwBRS7u7uphE+THmarQN4clS6Zgi9IBYatR3kbX776RDIMP0ViGpIZwzFKbgM3G09zmNZsYrwpzNMdFD6XuKhuNJGbTJlysL0QdeT80m5CDGgzLEre2MluJSHEkvhQgx7pPAowAzFt5N0sNBVJRpKkw0ok2kyUaQho50zvjQgrBxjlQQk1xI8Baqc8ImcogwyBVRM/04ItUJIawBEVghLWwgElUAKV9EVgx1RHNUwM2ZZHNhTXCxSBIqtRlagCDkoRsed41uDa+F998HS8PLl56dL9RfvpU9R7NixiFHbFlg+rseM3Xr18tq6sTYt9kNXOWWg+BGtKvjYDfztPbvhCOM0LyEjZx1DZuu1kd+/yt96Y/9mvYjUpBc5gBJQxOpTMFcQpTYgdY8R9GYjBZfj2794MvfZqYD0JiB1z/BpiWIR7D5cvFke0yyYtj1v62Wzr6aePfvgXf+nP+i9/6bW33nqjXZ8BsNZFaYBgHUsc9l6DrWHDzJys1433fUohzMYYLgoHlRglYTFRYgzROdfHINo5V2roY9SAkJxIVAUqUSSokILAyewM5GPlClvYooAKTDo7ClBMLHmJxhoGFUVdOoDYGL5y+RqxEBJXH8ayCNp12zSdK4oYY4JWVDXdr4R3nCdaHkTwenFTKW1O7IXUmDvW4S/Z3Z0GoAkD5jHMmC4UF+dz1+F0/9oV/IKxZP67KNK6sFQjyyD1T+XOr91iEEElcyIBUEKI7PCEnC33lQGjwpopQEwwRCa1vykxExcSiNkSnMBqASZEKz4oVdy76O684p/+VXn8/GS3vs/MFaOPEYoIVm6b9uD69du335xM6vXz5pcPHjx7+kuGD96xCb30k6l59rHOnxrbuuBD5enVZnE1UFD2impyEELXzk+UR9P9l5uluKs36Fe/ondfrl65HWa161arH37fvP+B279sr15FtcUfPxi/9CamV1bf/6+2dPXB7W7+uH/wg9HLL8cvv8HNs3Dygf3qN+X21eUv/oKrcdyb4umHEYhwQU622YlwRAWp29lXHm7dmDcyrrYKKwiYB4abBF8vtejdqNdqFo8m1MyMd7og38czrrspxjcWz7r4pJk7/MI371yrrY1+RWa6mpRrxsRybdAzqYJ7uJZGTVgsRlMesbQSfGDHcEwlqAUVIMeqTFIqEUPz+qLk6ZzOGDEgtqi57dvCOWExBmoR2wiGmxbih1OSDBUFaNmwE3hViLJQYMuoEJ1Uu5XZKXQSd7jZpeMP7v7ldv/wMty1KZYP2smLcsdty6kxy9EEB5fG05ktWg1Xbrtj+Mdherp+uIujI+HjrQM0hCVQAQ5UMSLEG4m6mZYQGyClWpLgmYmtBZFIDInhY1mDnGcMpqKwq6ZZrxtjk780RYkm2fEkyloWamKjoNmsABvK59xcIsFdOcEhxigSkRy4+CLGtRmZ0kbST1+8urk7zPE+UUJymjSDb8PgypMus26EQDKIKlJ+M2QHpnEyb09pY5ANp0iD7GZLRNZy0jvkZ5OIzCXRNLeVmL5n0/puGvvNJolEw04057waOgcbynyxVBsktFo0cgbNKOUthQVZFUeUsq8Dai4ZFVBCS9VKuRKui+CgFbOzrsJROz8d2eoyUYAygohCOUbuGQHaguoyLMMre/j9d96ZBPOjn312dxFKYSMSG5WAtrNv7k4vXdp9dtyWzgmCNSQSksMZNKbfWO6GE75eWAyenRfiu0ZNfAruQcbWEMsGiyV//e1XPvj0swZjsItSMFfihGtoDYwIY5KRwMGMnHFYqz/idrTrVn0LZmYv8CLMzlQT/tL+vsi0tT0j+jbOdrfuvf/JD/7rX/yzf/pPntyf/+qDX37VftWHFYCqqkgUFCV6VxYinUrHieBPvWo0xtZ1XY8QYgghDgdSibhwJhGJCy7Sj1mgqKqKCDFE7733fXI0NIaT5UYfAlPa9ZmEf1g3rbehsB5ErijS7gwV8bGzNiNJoolTwCLBdwGAihhbQDXEbH3DzNkHngjMGmOa7CZKZt5ocgF+Pj+e55XiecP5hXr2XN59kbdI57n54rVE7n15KL7l/AxcyJtfzL6b2TMRIJwXjuYqIUfgC1Lj/DLJLoyGQXUUTZJ6ERBbpIDNRoWJWIihTCBoIuBS7oOJQVaECcbCReVYgC0Zg047Yfi6sjA0M/z1d/s/f/hsymeVMyFqaSRGCWyJJ+Pptdde80X34NnhbOvK88/uh/WyHNngRWFNGZZPzOctVG3he+tkf9HuLk0LQAIf3JLf+HuLH/6AlydX//4/a4+betLym7cXL47Hb7wbnZXjo+UnHxRXdujSNPi2mGyvP/1YrCnfencZyL7zjr10g6fj+L0jV1+3v/kPzLX99eNP5Oi+u/q23Lzu1y2/8VI76rv/+fD6b/1R25/JRz9ieipPBWYZ7OVrv/s7713a+vDBycPj0yDOwoutA08XUolxIboJjifUTrmtwoJ9oAZxhablkbtE10fr0NGx/7hRrPuvXa6rKclpkBp1vbTwSiLEHq5BW+q65rFH37oCBbhijqwFIoQNsSUqQBESkvcCJVnLgEOGYVUGWWVRAzHCliEwylqo9gLLLEZtVIEa7dZdgaJwRWgDENkapqKNvipGtjLBBapUnKqT9vizVfNYVk+39Ozk/ok786v73Uv1DbuuH372+Y3rs8MX79/7iUxvHJhdG6cN7+nh8YdX33ulhLeyNC7EitURlQQHsgQLDUbFEFuVCIRUrYr0gBpm0ZBpjmCwiCJKQgRIRDIWzTg6OrLWgpJVjzJlFmVq+y5e0AvYKSHbECZkmDSmnlgsGyJEVeZkw01JHpod9fKlSnV5xqo2rWSe+SbfnNxsDWmbBqbIhWt+DoWBRJW+cOGRGtaIAIDBaVlyqrEpp/VNVBmuqiJ1vSqkg+PuYI6f30SCz3jAtXOwuUAfy2l3MLTauEykDVDC2X0+MT9FGEqSwn2aIICJGWqBAXlGBQc4RUWoQTXxiDCC1tHVjNq26rcvlz/96Mzvj7auRI1ITUMSO8MzWqVWi6CdhO0b1f3j+Z/86d2yms52R74JQYRDEQlYrN5761rri7IQlQBAECQttiIwjKBHHnzr5peiLCIZQ09gtaZqBCzKAi4ii8KyC1LYyfTbd07/7QftZHsniEENU7HW0Bo0Zh0p1aS1oVpQIQY+mypvCxthMQIrFi6AEb2Hb9tp5YR1tFNqxx/86K8/vvfxP/rf/+PZZPrv//R7r77yKgrDXAOIwWtaus4SxBsqyKhq0OwWsNk3rYYt2bywllOZxSQi1piEmibSOxEMG7YM1d4HEBXWJEW+MRZtAwWRFNaWVRlDXLfrKOJ9YLa+89a6uq6iCigE6Q25tCMo/X8IPUAiEkJkDnVd27x9EslZdpMEYUyyMQ8BzOmtZo9SlbQqDJvdpRdTY74d5+xofBGphiYVYqZh5IuxMQHH5ptS9kVSNXxBNJhJGIPMPfOlNzQxGkpw6PD4bFTwRYONzY02qeFPliGaLF05fRSUMLvMliDDCXNWo5p637QP2JISyIrYaAUlYCI5g7aXwvHEIAClTL50+/jR7cf9k74m6yPBcFRDrm/We1ev7r91s2JeLye/+vC+l9OirPpeil4ksjpnNdz5UqA+/PhvQnFcTfpoA/cUrHC4cSALdevtvTf/WI9X4d7P7D/8+8e/et/t2H6vkuNDloWcfWJefctemYqX9cmj9hd/Pn7znWYW+LDhmwfSITx/4h/fn/z27zYtuwfHtjNNO+WF7z6f11e/DJLjP/kPozffO7v1G271rBtN4vFn86dHp5Ev/d2/P3/pzdrr7Vf2Xf30Z4eNJW7tpBXjUQJ2Sqe7tJzq2VTPTCvaGn8a7ZJ1JWcQExwL69xYo/ePVnwSf+PmxGxzEFYJznmu5qQxAA2XY1QdRo0UTbnFI9ZOQxHIchqcSVBZp57HAgr4VFkNJKx0VJkgdtk009l261tSYWIhZWNE1MAKC7MaTohHFxELFF3nq8r4tmXrprtbTVyriHFGLLjSSGFivS7XYf5k/2C1NxNmHr998OJXT+J68uat+uTomPsxXrTP+4+rtSuDG7n6629cvf3unU+b06nbXsD3poLVPOawgAUpq1KaAUOZNq5+0JjpGVmzSQNHI/G9CSQSY4wiWhZ2a2srhLAhGiEz+3kY5WGw19jcveTiiLQgjxPhIr2gIdHsD6Z6XmbLoIcfAMvzwVKm9mzc6AmZwbRhTxM2/aXmxDogaWl2QBAV3QSR/E1Dq7npUZEJ0ol18oWKffiPwa4rr45htkRJgkWZgZWGoYlKndaE0xefJTGrTfKXkFy8SM78IUQefPCZLYOUmIjihgumDDGAIRjAAYUSqwONFCPVGjyB1owpY6SoRUoUNUIdPm7a6WXawZw4CiHCKBWqGhytxrV61mVXuOKFhDduuD/+p+9YH//Fv//cVROOwbDxx3pjWuxd2fPrwDYmY06RJNhIb40BFgmbD0sl2anpANSnED3UUsIAO5APIGYHG8Us1/zGW68e3H9w2huuWUrhmrVWGauZMo+IpxIq2NqgYqvQ7f7aa3W7bNUGExgWkYT6sOZptxTfLhoOh4f+5z/64PDjR//kH/13+9s79//m8alffvnrX14sVlxYALGHtapEqpLs0wEa7JRtFJ96S6gnJpt3YORIgOQzkho6GujBadWOMRZuVENErEmseyGFK1z+vJidKwMHYhKRoijY2M531hpnbSCUphTAkAnBEwEwIrHt1pspDwkvl4GZq6pkNswENkmtkZRgxuSCYJP4MgsxTU43eezXsOH8Rfo1lPi8SR0Ap/PREJT+G3h4uEME4uStzhe+nMt9wqBtHoiVep7TNycrfX+6gHRBLp8+csplOgHEZDRn4kT0S1orKLIEFbAKq4lFoTaNgQmWYKOyFIKSbUFSQMYUTk5DUdnRKMTIE1OPyu5vv3vy/twxZKR9Hww4+G7/2tXZ3uXPju+/tH/16fz53Yc/K8dRBNTbSKCo0sq49K8W1eG6nZ64NwIuWdd7YXGEWMnelu50b7/l7/3EP/jx9B/+9/2NHfnF0fT3vtpfs90koLc27MmBxaU1irGbFO3J5fF3v3b2eD65VrePl+XYnS1PZr/zbXvzlj1cyH6lbWsnu7661N07nL5zvbn3uCr2yjvffPZXDyav705feW8595Ob39759lv+2vXTpp9LtK7fu7r7ZjX/2YPjx7ES5xD4Cs9rzEsst3QpZ6oroInV0vqlyBIuUNDAvReyVYmG4r2Fr0Pz9quT2AfqwSM4hGm9ilo0WLaYtGgqKawtpaqoJhIiT+oJPcirWtEAimnOaBks2RMUGP6iEFu4Qgl9jGqJDRMRDCy72AhbMs70q16CuMo5OIpUlC7GABCDgkQ4RI7GGa5YbZwUcmtv/Mn3Pri5b779zsvPPvylK0bd4+6me+PeRw8a8lVVVa668frLk5e2H7afLuFHbvqd77x74mT97NTOlgV5KVxqfGFyu5QSV+LZDysUBlEB5/0yMQql3X+sacSa1asQNqxQNvbk+Dh1mMzMZMCZ5CwSedibltpB5MuZq/CUgEHnwt8QB9g5dRwpKlA2fuIsbKJNtEhYnyh4WNYtqpl/RUOqSsEeSsqS3a0uBAaVhL9LnhLniRIASrvHNKuDMGCnqbwYoPj8ToaYlMI0pWVviQOS6KwDuJ1eNm9FVR0IYkNPkNSFmgUfiZcyxBNKcYIEFGNQJWPy3NdkDwo7QGcmGVMwlzoCTYAJYaJSKcaKLZGJmC0jY5GJTEbRP/zMNaf7u2d1v4RBCFHYIFoBB4ORTpqybqvKnNHK62oc16t2sWjvfMk9eBh9DIWiY//221tVOTn2c6cV4JkV0jGXIq1qTJsF0jEZ3DShiUgGBjSNMPKYgpNY2YYIW5RWrVdWa23HdrT9zS9f/Tc/bSZ7U6mVathtE0aCCXgMGVvsiBrIdseIh2fP608/W76IHk0vPFa11kynbmt799LuwaPDxYf3Pjw9jOq66SuT/89//J/KrvZn+O2/+x07tevGF7UFYLtaIqnCWiPSQAWIqlGS1TgbRZDkci7CnMcLeePyMLMYUk4+DDGKcACzcy4jJ6qWrQKMIqn3AITQg8i5go0FaYixqkpmlhBtUYR8a6QorAiMgTEj58p2vW67lmEBFIUrSweCxCAaLAprbbqe1trsp2Z4OKr5UnzBJOM8X57/XaAYQB3azJN1CIPpyxvLMyJizizCCy3zBX5VpjQkvWJK5MkgLWMJJpfFUcUMUlDkCXXuvEU3hnfZOCA9vQiZJCmUaKxDRiJYyKZIJIk2IUxsUiAhYsApGaKSyJJaMHMhYsGOUBvmIA60y4t7RzZGe/mKBOZaOsbJpGtnKKreelJW8fDL7uCdG3feeOf02Unv6dHyvtnulBx7QdvJyFBgiO5cLhfc/vVd7DR8IA1pFUwctevORn3/+2dHH8ZwTHxsb8zCJY2f/aDeOmmna20f2D2KLdu/+5vVl27EJ0/kZLn4/C8nU+nm9/Ds0bqchM+X5vJNllN77cbpT/5DvPdg9uXvtgZ2eh3PPX/w03j8hJsX8M/aw4dh+eD09O/wb70zeuPO+JvvdBM0TWutWOkYplGpt7fv3N47feiftmaXn4/gK6jhNTzgiXuSVkITtSPbMffw60hcBReb3ggR1N8l3p6Gg4LVAkbgYV0ora8QHHqHvuJQMbclR45qlS2TJS3AjqkiCYogEhQgJpvAURUhTts+owpsVVVRwmjkYIEePrTBixVr2UoPAkWN1hWFtdKKahSItTa0LMIao61cKHphUQZVxhmPrl3PH/9v//7vfPK9//DB957zSVWd0DsHO8sn97z3vl9Mph7Nzt1PPziSk93b9e7+fvM8Ftfb/R320BH8WcVwHDlSQVQgqhhjSayIMBk2RrRjptwJcq488zJRm4JExkrBQrCWxVmeHx+NRuXu7pUYAoNjjApJAxrLhohERIkYnCr9kKyKhUUCooAQolhr2VioeN9XVRVCSPfZWqMZKU6egxwVbFlFSGDYiEiSDmZLUIZms1YgqXspixNBnEhiqnQ+piJQQgWzQJBAkDQSSwnwfFadLeOVCMSqkk0xmECUlk6EBLtJQkWQtN7MlFXQkvB8EhFiJHcwEEQ0+1RGTYLmPAtNP4OCMCw2Tk4mKX6wS0PfZHgkxCl2MVkip+oEFrDCkRyRY64VNdEWaEKYQsdKk96VYUuOrxj5/mefbDscSFOhY+kBG0CeDQmFaCZo5qg62u6m4xcN/CjMDqqrl2bcre49eWS2tnrt92bt2+/cXizFsjWcdxcrK3EPLlJ5opJoQSGRytPIgzlXP5rofZRX/BhYVQaxRIlsoMYGB2uXZ3Lnzq2fPfrZQ4eq1jiJqC2PjUxC2Cl4om4Gq13NfsvOZb56+PysdsGRsdz3XnyPkxe+D5N6cu+bX79R3rqxvFkcPV7qPL71xnYlo23e3S5my+frYlqhUSQ0IxRAkKDMBSARkQygPashaNAYKTq2gABRMw8xpxtio3mNs+YDJmDDKip51H3eszFIkctSJiKGYe4lSAwMNpTOj4IoimYaclrkzQTVEAKYylFdj+rAGn1PIOusRLHkCIjI6xkSNp5VvEKqQTKuEzEU4XpxwjLojjZjEU1bvDWNS84nKptGV3OzmnmHg3Zwswcx8/6H9HteyaZzPDAthjY2F6iZWpLa1jwGSug0mDhdSkr+AcljgInSymFRIAbApaSLPCpKpSAzF1CjWgCGkHTzFmTVWC2AUsUU5Lo44sKor6xW62pCEp7q8bHYb2BKsNI5efrRIx0HbFXSixEbuu7GrduPFi/Ko0+uXDn46Z//+MXqYTGz8IAVATGs9EG8zC7zfInFwuzUGpaB1fNKQwk7tuqO0Ry7idOdCpOl/OJ/IqtaF/79fykOFVoH3fr6t8qnC1+E/s7LeGLw2Wf93c9sQ7okbgv/9APbo7mrxTqWltc/+Z+1Mbwzkvd/aIsz//gTM4pSBbu0toL1beUqO3ttLo1v1iDHIgXBiF8yUIg1O//gteNPn91dnD7Z1s7hdCY9+ajRxg7ixYFjE0PHaKMGFI51LQFirSWRcNbcfWx3JuORo9gjBIwCsw2G2kp7A+84FBTWBuQMexWrQpFAEQwoWyLDBFZtIxQhZs/ixDMiUDJZTUYI6Zq5qnSGQhcQIBALrid1bGPbthY29B7M1lqUDobJog++D95yZUoSEwvtpFm+efuVq5fs//fuve3RFjXs++WPf/DnriudKTQK1li+OPbFero7Cctw94MP3vmDO4TOQRx6i8zmI0vJX8QULCvJey1JNgpVSs6J+VKIYavZ/zlZDathEmiUAGERtc4ZY0IIqtLHaNhYYzOuKLnkj0PxnDjSMQqZQRBkLOf9SFAg7VBTVWtt7hYNq4hhSutkkRijCW6KIesnNuwLzcimTYuATP5CuqUM6CAz+v9X4QuS0wBlq8jNoIryJ6Iby4GsO6ShKMFGHK0J+2ImQxnS19zWpd2im8UDct5oaLYg2MQgPcfXFAoZlpxv+gdJU6tEQ8hiCRrY00aVCU4LRg0dQWuRCqjBY+gW0ZR0RiPTzsJiwmf18tnq6PHLldsLZxWtLLywC2yh5Nm0xNtxMtatuXbruMM2VPvTJw/bh0+Pnz2IdKnSVWwW4e985ZKbFeu2N9ZFCUyFUlRERUDKR2wNCwExjfSAtD5LBUGDAqob4bMZoHkmtaxWQYoC7ABnrZOKv/OdvX/+k5anldSI23BbUXY4TKKZxBktamom5MdhMXGrrerMCZhbChpLE200MI2ND4+OX+LqjTs3j9fu2Y2rGuvlo8Z1lk759Pi0qEpVoR4AtCDqmcQBEbBsVDWABGqIlcAsnBx10jKVvCdiQEiS2JaH1JWWBSlRpgbQME0ZfrubkbIkKoHmr2xoyRhQkWxCtXFQywVkHt9aBltIcgshYmMApYEMeX68NOefPGS9gDZvWt7sS5PTaPLYGab1yAr+DWJNQ/sO0KD9xvATZUcqzlsgcmGSJg8ybCBKhWficWo2xkmwlOTAcqEZzwTJvHBhIH8OP6BmFj4rqTFEZGhjaCXpnjPAqlaVhslcoVoABZHTgtUqKpCDuh41rKW+IrhVuTWZHz60hx+OLIfVfX7pNVhtzhbHfFRfERva4NhLvPrqS7/1+3/vFz/5+dOnT9xe+WT9md2NHEtpW+kYlZN1YICnlndw+Ji39q6+/uYb+MmHePgxFWwq17vWlRVPWKdRtwJqyFZBJUyJitjatu57+/qXtl65Rhz60q6e/TI8v1di2UxmjWVwr0YLY3QpzGTJiCsiBac9FovoiCaVaUGlcb5qj5fy3e++8pu/185Dv+zsVjUvCraGWCV4Q8qhH4fuhj3Zsg/jR//6ar012tsuw4ksDK8ga7XCwVO/VNvZsA79qTfGUAAFsGEhYaiyPXm++GgiXx3vWhZ1iDZMKr9QgXpLwcJb7dROKPkGOaUSshZYFVaFGmPYsooDAtTmHm0zZgSsYQqQZOoUokgUTctQBWyZySznywIFMzPQrlsJsDUni38GO+dQCDNrVCJyzkgIdWl+/vP3l4vlVKcxCNRVlRop+i5e3rt+48bN4xfzZVy3zSlPUNnJbLp3HDix/EIImYcPYraC9I6IoYAgeUblTCy5fZNgjO29B2CtDTEaNhniYSJm3wUwWeu2trejBJM25BFE04CJkr2wtTbdEmZWETYco6Swkqrj5AidpEDDrDfV6SSa2I6k2Y4HIhqkZ2YiFoGqUDakzAEghaOYkuXgTpXC1mZOutmNoiBFdtNKVZQZJsdpZWnUZBdLREqD8kgya/o8tCg2pM2cXEPEEAmHKJbzbgbuLn6raJoJnI/BzsPZedjVlNuz8CivF6S0M5w0re9iwIoYwIEtKqAASqAGamACmQJTxbbOcDrF2Qwnr07OPv3wbtU2e3XYxaLEwiLtH7OebFD2ahc0HcGbvmmLNZvpj3788bOwa9iVsxjBneDGpfD2ewfLF4EsYEu0GhAITGmQBlZNEKgYBgYj0cH+HwSOKmDLKX+QJTIAiTKDiZ3CGnaAE6ngqKXw2pdv3nr2s88diu1Kp16mzDNXl82uLmYyn5qzCVZOmik3E2lt74XEQEwAVHoLLKsbCNfKvX7tFBNT7x4GN7056194Y41jivMgUahIYKiwZfhk5xk1h+xhx1RyK0wu6KzJxgbDqGI4ldnyYiAe6AY+1dSD5jVopJvpaxJHp29IzKONDWk624yM8RLSBh8dYF1VVeYQomUmaLzAk2KbymLJNXHaXJ20OF9IvRfqv7zKV9PFTxIlyizAX1MKpU5WhrxKF28AE2RzZUTTg/K0STVzSzZvIH0G52h4Rp3t4F2TPhdNwSp/DjSojNUkgnXidiTWm7GqUM3ZNyv3wJrWKiTnOBSEQmFVC4KDTfI20kpRwpbcVZGdovTlzriZnyz+y38cT8TbxnSPsf0aShw9f1D5x5fKhjRGO2pCe9VdfvHxj1+5sVu8fv3HP/0JjddcOgTAMXuEVpy1aoINkaZm8aC9cef6zpfuNLvXVj8aVR/+TUCgolKH3gSClFzAUk+9YYIG5oKtE8PWVv39j/3pE+6Bx0/K0Isrg3YMl1jcor1YgZTBAL61EQwj3ihgFiJTwVPy9fLkzXceLrj4yZOXXrsi0NBGrjlvorOVWOWJKcJybNsX9x8++fDuzr65VN9e+S0XCukCR6BRdEY99U3vT1vu2BoOXeDk6RkCKSlzcPzgcTutl6/erGkVUDCHvix8ha6i4DQUHAvWWLH0Cso728jCONYIiZpKNiZSgoJJk4OKAhYQKxgShoopjEL7rnfGgSEiy+U6R2pR733vQ+kq59x8Po9w27vTPvRlWUWJi8Wi3h0/Xx5eGcUQ4rOnj9LIli159Mzu9PisnrivvfOVs/WR758UdSu2atar3YmZjOsjEbXeeyHLCThOEYCZYBkFw6fGjXVYtMKU1ZnMTNlCXUUk3XeGhhDJMoiKwjXN+rXbr73/s5+enS33Ll3yMc2GlMDGsO/8IJM4B680O5sLG04sI4EyNMYE86q1BaDJ0lIVKmBLGjJYl0y0JAGVw6wnZbEM8YKYSTayiBwXkpB3wJNB54X3MIXFJqVKwq9T+ZwYanKxOaAkBxLoJmFrnooNXk6cPGvT9+Y3cIHXcqGCp3S+z9nOGUo/fzHioaFITKVzjx5Or6UpI6NgFIAFShEHx3BADR2pVooJ8v+mmGGxh5NJf7hbNJfax3/20f2XajuTs0lo0UCDWg5SIJRgSGCuuJ3rity06RfOxKrermh50pWh3gIU8+a996ZhTHKmVBk0Ecai6/NonQmctpobUEg4syqUhAfYQlQNMYiR/IkGCnTy5UzONrk7saROxcm6oO98a/bPP5zTdmUqJzOM7HybFrvhxWV7us3rCa8rPa1lMbWH051PrJaCFRAomhDoWy994+ho67IeB1Na8SToDY4dm22HCOrBwWhUDgxAOslmdmIVIpt12jmCZ3xFOZUUG4kdyHD2TdvoU88tXzbzy5zYRFhJCWo4Cd/zcwyXMXfVBCDTuVLGzdbsuZLJz5zx3iFXDfT75FNvTZqwnB8wXEyiQ+6jYeaSYfg0YWXeLO5S3aDKG46kJqJm4rUPCBANr5MxY9XBnllzM0vnj8llhibaWq5ZL3a8eekZ5Zu8uZJDyM1MnKTUAHHy0EtVayJ8gFjJUHZ7JlVStUQWMAoLcsSODGsJdaARqCYpNIwCj4z8/8j61yc7juxOEPyd4x4eceM+8oFEIgmCIAmCjyIpFkWV2KWaMrVebSNppttmxmZsZ8z2w/5X+2Vt9+OOjbWtWbdpu3slbUsqlaR6k8VisUiQBEG8CCSAzJv3ETfCw/2c/eAeN7NsYWUsVgHIvDdvuJ9zfuf3GMGN0Fl//C//qZSHennGBcdHv6yf7FcvH+H+J4d4eKAiRvr+7Pmd3dotv/nlD268+TbvHDR3fjGpyPMoeKAEWuss1Psg/Py+1hMVOzq8fk0MeGJn3/uTvt7DrV+QtKKojBHLDUdnURgSy5YNjJBlY6x8+rMQ1hU3UZjtDtsiBLagyEbADOptb9siaM+sVlmZo1UqwI2aCYenrbt6efK9vyivXVk9vPsvf/OT5dM333jrZmhFGokSyRAK0cvk0NamqWTxkx/9YGTbt65Nnn72wVl7effyO7QyFHs0wJr7RaenkddcEIdOjDXKCgkchGBtSaEhMH/xoNmduYMD7gKsV2N9hb7QrlBfSGuw6U3NBZEzVBI6FadIRHUDONWQFvap3UpuZenxZQtiywBhtV4766qisk4ZLFZMYZRUgxpn1Er0uru7U5qyWa6ZeTbZbX3b8qbAjg/9dDI+Xcwth2p3fOfOXb7/sCpLv2qdjKI3RSjefGf36LlJ54/bZr6/O+53Z/0kfHXW7O88LxFkvGGxhSFAogCsTDEECkTK0vesgYlUJe0uDRuQxCgmawA0iXGtNaqSzBSYIcniuO/HdfHFF19+8+hRPapEVaKwYSYyzCLiXDGwk1I0wvYYpdZfMcjyiIgZMUko85XExhhwds7CYLszMDnTvpDMgGaJpBkid+QsJMjWU0hZgYBmjVIGw9JLyzvXIf7l/JwP18LA6soAAghpw01bpypNxniZ6Z1MNJF9RxSSVmAIMftga45HVEApOXsO2zAAKXz0t8jRw6/kapnTafJLtUDisRODlYioAAyn8bcCRkBNUhNqlYnyDBNe7WI+o/kuzV9yq5Pbt2I7f24GOUFckW1AgdSBLBxLsKSlXBqfVlUzQrfkXRYjstmELtqDk52i86MXrskb3762Og6YSOmLduZpoeCCEIgCUa9I9gWqsCRIQZ6ULG7iUJmS05mykkBtbn2YNLJSmgYt4FApVUQ1N9xde/PqSyeL2xxGl9yoWExptR+eHpj5JX46QzOTxUQXRReKJtazhnUVNLAwW9v6s4PJ3e+/996iXqzgoBsQh2iV7elkt29JPHFv4IUCA9CN6kbJEnyh6AlMZHXwriHNs15i50vyYEm7KMnQ6eC6nLiIg28PD4tfTamWeTSOUSQpd1MgDKUdCqV5enjkBqT2vIgmpZFA8uFiw3mnviVDJSPlmNYspMNYSReXG6nsp4Z0eO4AGihRudYqaMsU+61hGUqJlIHzpckw/NN2R6JsmCVBb6mdRu7RM/xEqflA/kmm46I6HMZhiZNORLohBNgO8SISY0ybXWYWIVVhpvRDTy9HKbHT0yonR5gkOJoMawFYhYOWhApaqIyEa2bH/Z4NX91ym0fFgQuu5SpM2q9Gv9bYvczzWwdmUQMMv/Th915/96U33lotuier9gc/+s+zsKxqtxL1ReWLkZheiIK4vaP2W9+yT+739dEL+8/vx2WwFQMw77/fXDvUL37M/psgHdy4tDDcSwmAWb0FVcWIjUpZkfNo62gLq+I1klWJDAFxFNZCWG1wgcWStymnhVSlYNOdrvm5l/b//C/d1cubfvPa4YtVufPpjz9t5/6d995So2xZIXoEFHDSHk3kix/9S3Ny58WJfH3r7mW7Vzx42MTn4KeBAy8EC8habM8cuG8jrHFewBR9r8QYEdawDqGSVaO37y9nO5cQOg1UoYP6gqMjKUisBhiBYTUpT14FCkPEokzMRCbvp4ZpBJnAimhFRAgWXFhLNisfIyIxgVHXtVdPoCgaQzDk+j6kzWXTNMXY7u7uBcRqPIrUi8jB/szy2fzpYzx9tk9kXSEe4zq8/3sFFsWvP1icHJ+QFrPxXrtodVfdrL75yg2JKmrYuhhVOLlJZvYyOOGkQoBybretSdc+MXGMAYgApUmUmXzIMDXbvLC1xhg2UeTg4EBEoLLd4yTYJ4Q+22oMThrbY8wJ4VVVkqHLJmOsiIQQtutSiIBYUtrFcCEkN8eoMTMxIJQTc7MEE9Aoen6tJJIVcsjhwCYhbAfS9Adzq5BJQBg8ZoHzFbOoQIkp35cZ3R5IvayMrWoiLSVo4P1ut2JMqSSnbz7Aihcw52HDdWFeTv/YXpDpgbMETpeTgiA0+A8pQLACC7XEllCCxsCYzJhK52dY72CxK4tDt9hZ3f2Hn3982b3oH/lJa8Jp0DOosHFQC7VghqmgE613l0XtRxQsNoauwDoGB0Vn/J/+yfWWSByMRVuKrhSsag3DEYIIJ2CAmUVYwcy5gImE1BExc6LjDhe9Zm2f2jxVpeprhRzEKVfEFYdp8e33Zl/cauIUs7jY1dPLmF/h45mc7GA1lZbXgjV45bBywiv0JasReDZ7d9t71yZv7bz4tBxZaCCqwC7EKpCbT6YUCB7YkLYAQAXBgYTUA2LArMoXhlrLJJo95NKQR8NnJpINLjIKkn+bc/eVnlsZPtT0uYcYSZU5B4BuR7pzV4tUAPM5yqkoF/bAw2QtooBJ/aCKgoxhUoQYiFj54uN0/gXzM5hp9zKARbk6DkjS+Z+VJNlmGuolBlXv+YMLSjCXYDB7VpxXcNLB3W7bhJJcOMs0jP1J8kmpa89Wdxm8UmQ4TGUI/iXibTc/vNTUhxMy0zFh0QVR0s0XyVFc1MIBDlwTasgIOlIeMU/AFYexiI3tw88m+yaOMIkri2ZS9/buR0/mv66tr2npEKrQvHD16untDw5tO7185Se/+nkxv3dUjU+jLahtdKdH19TFxtSW46tvYOP7x6f9c6+9YncMYt/6qv34x1VbTn/nve7GVf/4Vnf8axO+YQnEBQvBtqAgAqPG9h6xtSEw9+AIcY44BAW8ukpaUVW1lgOr4xA6V6hY1cpp471fu6tvXf53f6HleHPcl65Uyy+NuL78+ucff9U8Xr3//e/AIezFwho4lNzH9uTTjz+8Mik3T589fto+aze2tfLklweX31k5RyuhJnBbwiO2XoIrREIQL75ypXVl3wsK67teOrJVvHOiV55uro9NDNYiutiStoSOTbQqXBAMKwuMRkpCmNRCiYBYE7GHhkcn1y8o2TTeiaCsKoX2XQCU2eQIXJVRVbWLzsJWo5Ffdw6OmJ2z3oslG3xAD91IsVce7h56fnLa6qNV3He7Onvivd+/Xnz3zy9//sOzz34q0sf6kCDh0dmDUe12nh9feuHgwy8/Hb+/G6++d9q5jalacdqKNpBW2YOFEcRxIRBFBCKpgBBCx3mGA0AisSisMdaHkE6CcdncjpNQz9i+7bquG9e1AoUtJK+PRSHJVQv5AA98XRURyeYMbNLZS5MQDVkoGa+WFH2ohIHkDMr/LyeAHEFiqqvJ916gibdCNiv7MvCsEJJz1tO2yx8mAwx0mBSEti18iSiU0wZV072luewjWzwPoBgRUlDR+fyqqkCMgZkTxpr0VflyoqFnFyZGJidZ5gSnXDAVyvyvdMVm/nRq+kwy6gYzwDK0FwNErWopewkUihIOmxFtxtROYnNol1/fvbtZ7dUEWUi/6DB33DAkMUBFnTombnMkkCmjLZYlcxlaG1bTqjo97v7wrb3D63snt7tiZE3DvRFjSnAUFo2sSjJ0ZcOcSKop+0SZ2XChGlWZODGCzdaPN6VksXGASzxIOJABW6KCbS2N9K+8+dz1xW9aKUpd7fGzWhbTfrHPp1Xw1FA8I21UzorWurpQIyQsZIqZK5exmR8/2d29ujOa94oO9Qp1bTYtquVoJiuBY5SEZKjqOK0Ok1nV8LnytoVL70vEpMJzvqOkLYaa8haJiNKnldwRZatRS2VPlUA2GWek50I0bZQvFpL0PKStZzqtqnoBh9YULZRABVWQimGOhAjdVi/E1GkPT5UORhgXHrkt4TlX81yIs19A1GHPo9jC4tDMOqOhBwXAwxOv+eFPbLTk5sx8/raGAwNoNuBJLBXddseJ6oyB95gRhsyOTN6cwswJlYQgx5mxuUB4RKJIiuSBfHCsTvsORgEUgIM4kCOMlGpCBa5YprHadfP7X9vFI2NXOya4uBr5zdituA63jeygq+zSaeNEvnPz956t+n5xp+MlvvnoOUKLjWg9IleSbXUjjQPOrr195ayVe7fYzS4/f+M5CTHul+HT+3znx1jIZn5i/uj3Jt9+N1RvhPltWd3p5InBchIah01FvTxdw0TE3kGAOIZrCLDOcKwgUXtYshVC3zKx71GKo3UI3qOV3k13/vhPdt/5femCLrUoOfpYo7SjUg6oKMqv73/1d//5B3/w3/2ruh5JFTgud+r41ce/8avTerZ89dWq3t/RxnerdtaHp92a4YIDW6TdvGgnXTAGKG1VVgaWgEIQioiKbcGw2rP98nR9+dqssggwYBIYQSIvJ5RUgMQiZDGKbUPIpFCRMBwf3TIGBNaKSFUViWSiCjaGFDHEpI+PIYhP0liSGJ1zGpht7jrXpyvnKtMz9SRN0DGwd/Dk+KunbXm4dz1udH+6+c633Id/+/j2HZldmzCK1bKbTIrf+6Od599+LcyasHvSlPHvf/Wzbx293xQ7q7Dj7UxWalviYLSPCEAA0AMBGpNzDih0rS8snHPGsDEmBPbehxCMtcjLHmjOaoBG0SjOub29vcLZtu1TC2ytjSFy9tRJ94gkntQA0qb6m7ko5y2qiDGmKAoRjbG3ho01UBVVYwaHemKVQLkzVxCJCmu230ohC6R55ExpSTB58zYwJFPVFdULkuSkgk44dNoagRIxjJQCBMMFitQOMJ03Xbm6AAI2nNjaSdua3hozVHK82rBLy9bWZFiH+pqsTYiJjdmqnraXYb5e08irihxImNbgJl+Kmj0pE9ACCyWlklCBKoKTEptCVk4XO+P15v7X//izJ5fKq/5pR3OK85Ibo3NRQSxUCrEWajVUYjRGQwKpD/ugTYuTqavX6+XNS/ij77z58CRWdSmrXhllYXvutTQmsAYGjDFF5CjSxcyGS8MVZ0/fYS9BbFNSDRMlsuPQQYCoULJAwQ4ogApaQEvLZbCT6jvvjn768wc703Xtw5TPpli4rpclsBJaASuWk0JshZnTCKiSNa3xOvWLB8dx/4Vy0k5G6xXWI2qKuHRcVbppXCmFwmjagBhjhQVI0GdyRStI+2QKIBpJJD02lHaieUO75eGmWqwD0gOirdBHiZLwL3OZsjujQCX7RwL5GmJjdKh1+dlTVeLsx4WMzyipqrKKzfCLAiCmRPKy1kgIlN1B018akJ9hD5u/Ui6WWY3EZghR0GStw0xK2xZXQWmSTeSr/M6zmv2CG3iSEybXHMmuzumwUcbx8zZX86mFnr/G9BWJWXKh3TalUBqc8fiiaklzYUUWgjGTwEChyoZYwaIKIQUj52cbSnyDBHsWQElwKqVyaWgmXz96QL/4yTWzAK2tj5UuC7epZdGAImEnruvYOlm98/a3Xzqavsj2pMEvfvHBns6ldKytjWHu605CaScv7ZsXX9tp7eLvfuZ43755/fVib4JNiMfP5Nf/7IqWa9d/8zP87R28945578367bcxeTNU3oQTd/ag8KdWuyqsaXNGcYX12nXr1rfaE28CswlsohRquTOCopQQjUFbsTnat/YIBy9cOrhej2Zx2RUN9yPRXgnk295G3jU75bSka7j16PN//pd/ef/y+zvVtCBfW3/85SeVzGl13MbN4v4zOTMuTp8+mi/mH8zefp+ntUhIabVsuBhbZ1kcG3Av0Vq2wp7VpnEEBVt57MODtX+NWdSIYZIhkGPAMNOAs82UzPeGJOpE4qYmGDVdzYZykg+xiITQG8MMZuI2dk6NKKxzfddbW1APYt4sNkaNK0zTnMVox9NdAAhYz1ejac0Rm9a46nmZvXjcz8eTyze+Fb/8+uvbxzp7fYKlbZaLN78zufnG5WZx9f7Cf3Nyb1P1s+v7D0L18X/50e/+xbsNj9vWmk7QAm0kr+SNCit7iIgGSExKh7quFb2oiCbPIokxGGOttUPitxjiPgRmIyxdj+euPf/FF7dW69YamxnIfUxXkCD9TDTlnfEARIOJyfS9T/U1ynYpyBJjDDFNtHGoZAms2v5KFU+3ziHZ+TIKwRpGJMOUIU1JCuJhzbUFphhRhJOpSPqIOa1mE+SJDBEmkaQKCJR2VpoPtWbDS9IBaFOBMFESDGUXsC1CRspDZ7+9ePMlJBmgjwqFYSJV5M2oSrLm3UIsmt+OIvN6Em0nqTEl2/4bEY4UrbXCgiIEwwYESyU2Y3QT9Xu232mf/O2nD8fusn+4kcZipbSIvCpoIVAqVPyIxBZiNuQ0ioEJXDBO4uzKqo1uLKuyk7/81y+ueuVSA3s4wFKMgZKGnITBmtnC1haQ2CiS39G5z69iQOtFOAWvghI8SSghRYhs2DAXmV5mwdngi0JFDcV3r+8ff/gr0/mZmU9pM9NlnMMsiVfEG2MWfXcSpaggqsIGpJYNIrQ6LU+XJ02x6+rqbCKjhsdTmna6cdQ1dckNiVNJLGgrsKQ2MW6JhRUC5sTkZTbZJYYpxl4VyWY1PbBZijrcBqlmRCiYBpeZBMZHYjaGRRMUAzYpL1iJYNmIikgiCg/7zxgV5JwZjKIkihDDEAeNPgSkIEQiMAcZ4ghEwJTtcUSgEIkJPRo6YQUGe3jKhzU/8VmPNyxZKJfPVDs1t6BgFQIUxETCmsTGKcOLQPnY0hYvpMxUTFHc5wz/YTecRYe5R2emdCiGCEUkLpjmQG8yMLmRUagFhEEMtQmuijF5EBmFRFgiS2nqFSaxAMOCDQsHYaHCBiNkpagslyx1LCbu9M7Dl/ypNWez2FgE1vlIFvvinzFPyc90U9PG2cXy0a3FzBwe3rjz6a313Z9N7Giz0Qk5VP65WVseuHK34Xo3WvzTL5a1P3r+9erSK0fUcVD2t35h40MeuaBchDL4E/PJP7Tzj/m1l4s3XzQ3j8b7e9XVmeOFE5lSa0KwFDi0HHvbR9tJ17PBtLc1ZLwOlWmFG2sWTMIlldqXdKa18tijX7TaU6eBNkzEUgAGvY+wKK07mO1j98UvVw9+8Iuf/N7zv/PtqxbL45OHd//dH7178suH9341p4elLKPtzoywmy/lwZ39t7+z3rVqDBvT2o0R26sWQkpqU8i8ITZkGGrQuFhZMZa/ni+f987ssPSFsAW5mNZWiW+TZlvKDFeBwICJVZDGZDY238eiADHEusKFGIipcEXyewoxOC6kD8kP3RqDTvo+yEZMYTlys16LCkikFw5MgUuquC3CPEQbn8I0fOmpn75+c/KwO3v0aDK9obrwm+jf/7PDK0fm7//6wWr+YHKtWrlVNTv6al59tXEP4qK4f8bPvyYrj43VJnKn0pG2kXxSKAgBIQZmAWKyqQt913atMVvFrXZtS8SusFHEh8CGkXiOhNK5w8tXqrL0fZ/II0SawoLYGhVECdkJaFuAFV58OrdRFIAxSXYSMfyUOeNddrilsT3qAAuyFzTzFroFqcS8ocrGwnnoSmDy9q7XxF8fEDPoxR3teZ1HZkoPjGpsqSLp7tHhhsp/Ls3c20ZBVQd3Z4Fe+Lv4rX/JK3NgEIASZZA/jRMJP6fhm6QWRySV9jQC5O/BSI4d+RYSCCyIrSWSGlTRWBSymdbtjpz8p7/7tFtWdoW4Ul2IXURaGpxp1zIDHNlu+t54Y01fgTiwK6LpFEVR6WTm752c/OX7s6OD+nazMWaKMq/NUAFGBRGEmI2FOZVdJguNgFGNQ35VAilYQRkXIKbkg58QBpDlkmA1GXgYIkuxADlw1VHtpNS9iX7nNfubjx9Ndhczf4JWsYKcRaxZlpAzhIWKWiiMcHKZgiVnxVvfn/r+mXVTMxs3K25raUY0rrBRO4NVWCQZkiS7TwYZot7kj1i2nyNn/oGG4WGwiYBMBGN4oN4Pi1JNjzmiZFsVAMZaDPArDU9bRnjShgUMTqIayWeASJPflioRJ38ZgLbqnhCS9sOeUxiApEEYJoyBt5jPhLDm31QmycQXFZxj4IlqtiV0bPkOifw4sLGS9U1qbgePOVEBGcOaGGJDvDc4e8wNlEVVaIyat0Msw+YiES01JUkQZfmR6rC9Th35QFRDCpQa5nBkVWE6PazZnwB5O5KaHDYprDNnAFQcjNjaahWkglRxsufurp7NP/3g0LWjpnGyMLoaYzWJKzXdpijKdVPyyvWrwofls88+u/vlw4Ojebu6Ws1HdbW3i53nJnG0+aYdPTzZfXDHP15iFcOm3d+fPHnl8uvlZD869J99aR79mmv0zEUbxLJ1IGeK/iR+feznP6fb+2a3NLPaTeuijOS8VE4qy+TsyLItpdxB5QLVdTdCNW67Ao2xa0KlWPT9/Wf0YLF/6bByUy/K1mTjWnjAIDA8lBUWvenr56qjg+cqO/t8dfsnP/3Rwe8+P6mXVWluf/wRffNNoSMLgStiFwoP77n96iu58mL14uW+CWxD6Yz33hinCmFi4fR4q0NXACZYpsDEFZ+Iv7/xzwkCs6R8aKIgaagxaSaSIKTMOXmVND0FxhJGRGm7GJgZGqDBSgyFK4QFQO/7AoVlDl6MsejhN95pYYxVCdYZS7Z5tt60TV0zCKENpjHcMXvLXmWx0QJ05eC4L+bt3mY0vvWk0FGY2vvRH7/xJ3X9Uvsf/uqYa+dmTo56V99cmN3TODOz61YuffpN+9KlCitG423v/FIKgYpK7MGBTYaF2CjDioS+64rCEpEOWYSZSaLSbFqbZlGmtO6KITw7OdEo3vs+9IYNgBhj2kmRkAzpSdkPmRLnU9hyujt4SB4G0vcaqiQRnYeVkmIrpcyqhORkkISjQ4ec1MCsA8UqFemt+DCJF4c/mvv3vFy9cPWluyZ9bZFz843tDZHr7gCNZX0wBgPcoaBeUBYN/554Z5ope+klbevxVhq1xS0B2ho4pAk4827JseVhrWWYbB6wDKsmbZym+AupyBawFfrCW7RHExn5xX/8h0+4Mzse/VLN3MSlD0szmpv1pm+Mg4ozgUWNN6VvxQdrC3ZqmEIZdWK7uPjjV+r33tr/9Gw54lmJ0DgrLAmdhSMOiK0ohr1DmrvAKsRmy1eUAaHVZFwy/KAoUXfBrDCGbdpIqwUXiRqmcISS1fkx9/1m8+3r5aNfH48CZrSQhaVnWrejwvPZs45XqJZFhA0lCgGzteBYiLFopO1Pfb87QsPVuK3RVNiUaCrdlGhbV8GBHQHQArDIUi8QEUv+xAzIKKJms/+t0EWH/ouzXBdJCCRbH46tJj190Im+MDylw+efsV4MVL/8GBnKIEtyTcfAZx62sJKmXmJWQYxCDB6M2VN7un2Gt98we8eySALRRRMrXfJAfA6nD0vuXPlyv7ztPNNknM+LMpFKUgLpNnpyu6tD1uvl30L6CSuBlYGYoflBkkc0AOkDWXoYo5NFt3AWHhBlIbaopsEpya+2P2tiO5B3TAoiJLCqUauSOKpWqSQzMlIJjVgqcbOCxvjw737QzL+WWTW1QGjHerrD/W48exgil/y7Rye7la96V6klMV3Te75//Tma7dnpjhRjedqf/PMv66+PsRbeYLdwTU0E6/en1w53nwsUFs083vmnehKidUWATCgoOI6sUtCgSg7KqydmtVRqoW10hVgpnAWitTYU7GkiXDc9L9W21X4jEwkFGqJF3y96+WaOJR98/0+LvVm/6NUrWgdJo1enMKSksEYtMXNBEmJpyoNdF3ZfuB/w01/e0udlcnT0+UcfvuKm1UTKXbc+acQpheBm0/2DanHymT+q7GTShd45Y2AEkUAGrFaDQpm0SFHLaplDwQy0ZO80m0krdmQoc/VMHjZUaeDaGGKkD1OQLdZhbSJpq88tITHAVoJEeLLElitXSS+ILKFP24Z6VIdV6DtvjWVl7dUWhentullVo2o2cREkG+GKZS2Vq0KHCRf28KVe22826LvOt5t60918eXLlffrZv3TP//7rfbc6efpk8sKbx6vi8cacyR7VV5+s6vaJfsub5XxjVmV/FkxE3LDxlk0XpcvOG4TgO2ZhliiBghaFkaCGraiEmFF5awuVYKxV0SCRmauy8q2vKpco/oOqPp/s8xgTHparyeyIWdONnKfT3ypxmbiypVRyup62KloiIhVR0uRQneQWW0R2cGzP1tPnhlb5SiMmSt6hqmKMkfMVEwZkF8QZ2FLJBlrbqyd1zfmrDRV4yyFlupA3fn7D0XBnZoM9oswZ2A6/dKG08zYZly5setP9muVYlMJckrgzeacQ2+F6oqI0YiEsDAkVSx8LUx0U80cPv/mrD76Y4fDV6is6bW0DDpaiRaAYexbD2gZDbWAGdlkRUcZJgLZhYzhYrpYL/9ar+Fd/cOPLU/UWERzAGkGSHB6UBBAyZISJWQSsogQhCDHl1GMSJU0alPSBZucyBYZsgNQheRVSoVzhKPVLDEi0hCjoRcOl3ekLe1W3noOseDHGbrrer9WyY4iPHmwEiCBKQ5sHAhQqEmMvEkQCUSEMNSQMsUpsznnCbDnto9PDqbRlFdG2u4uq2SQxEbwRkcIVMjw7oDiZ5T08axKJCMRBAg9C1cRgyt0ZAQO7OAuOzzNOeCvZx/AHctKnqoow2226SmLvpy1o9l1HHjmtNQDFGEDJ/12QmmuiZLqRwAjKe6kUkJDldLkEpjcGpIwDybKlYZJPrqu5DVOk9f5v2X5QgrEoDcZDa5D2U2k0T2JnztHXQIYeB9fS7cIL+RAlI24Vobzaopx3qpwVF4OCINMlyAJWDcgxFSwcI4IxFlalUFtbM+b/8Ld/c3bn4+nUPIS/bn3Bm4p0oqsirmYv6b/7Xb46krO77sMfzjdzt5hzkJKqvqjZ1izGN514FDzDXtlVpFag/ezyoVkRz668crBXPlkfx5/8qMaTfjImWK1b9cQFS4xiwEU27+xMWVBvg7e7+6dXb/5m8U0VmpLEaXR9CBNZb+bF6OWaC719B+x0JXaF2DpZeGee2/sf/7vy0vPdcU9qTIAA6KESAAGCZtWmBchYC4j2yj1dvXIZ0EXV33n08c3Z1Vff/ZOzT/5f1XQV52zHppfYtv71t/HdP/resgkPV+buQhmdN0ac40hIwkqTc2ENiFnFkDBglEWLonjW9Q+WfrcyITWqqaGPLEFUWTPAARWRKCSkULLMcBL77LtABE2CfbYxirE2RaOICBNLiMkZqO+DtlqgALH0IqKIKIrCucI3yQvLh8aYylBHuiE1JnInc+xXR6cH8rB5Vo/Xp0SHs/HsLfroYfxGw+dP5hrr2eTmV4/ds75uzay+8q2vlmZeHLZ35r8Mv/zOy98+W8a4CtwBG9u1HXMLCCiIRpUgEkR6IFrDIXSpK9ffShQXY4itDTFaY9JhiCqudGU1GhpbykyHrVukInGXosa8NocKlESNMcj2FBkg3sp5AKhGETUmRw+BksrnApB7wZBOAclCfDUwaVWWLyzWC+60GY0mVRlYTkyQNBZcrJqaq2sS++YLNEMombFMyHvcC39JkIgAlPHqxPQE0iAyoGbDGx64XOlN0xAbNHQjOYhYc4XP4l/aqrHT6m5AHaEKNlaFiagPUYUMc4So5/0dM2+X//EX9+bH6+vV0RhfRoRAtijQM4xlQDwI0ECWESaOQt9HMdawgReGdQS2reD5Sf8X//rlrzorzOCRj4iABskO2BezcUAiUZH667RrTO1EzLqwdIGnzaIm8wgQsrlUXu4lJitEIxA5nUIFg2FgCQIhtbQ8vuOKmsbWIABMhhVRRSkI2LJNu3RlaMjNFdkE/uQ6OrzmrIZKtLZc85K8LhdEIhFc/NTTCCh6/jRoBnE57aNINUYRCLNJ7m9KyYnTDJDp0LLlzxYXNLh5vwDAZO7WhW/NlIDUTIIeOrjEbgohEJNhc868T73mhfk3AbaclXLDt8yrF1B+9DWzG0k1WWOmJffwSpIt1tAuQAcvdJyTl5HuE8IQ05lO0UBgZSJjhnDuZIOTZId5xZLhgbQIT6oHSpw9KAYLWbbpMGk2dSWALJMRMMEQM2AEhFx6efsf1bzvIiYYRApsuSgLKaJa2JGlgv76H/7m4WcfXR2j9uu7WL1WhjeKzvZnY78aX+5ufr9EswwnRCu/R/Ty1WJVnz1dNZevugf34OdFX2Dqit6Ib1bG9SEUlejRC8ZMumeL164/f9htFsXHX+w9+XBe7trAgXsyUhS2s8EIhwAIq4gadupd7Pnw2t29/U/uf6qrrydFOR0faWzVAPPVKe++cHmPTxaBHMMprILFL4rrrz33h3+uvCdPuiICPYegHIVEkh+SkgcYSaQQST1pUOoJUTnwc4dXRnvYPTKrOx/ulpOb3/6TZ7/8L7HcUG8d2T/8bw/jSfGzX9/+1rfffu05t7rdPA1TFKDec+Rk8MRQCBlSriClyoikEjgNE6NliGy+XOKlXSu28lJ42B4WERwhPUjYZUNWhgEsokY4YWYEQwIKTIggI9IzyDrrFPCtt4UVFQ0aQ2BlCJjZFtZ7zwoQhT4wDKtUdQ0OhY3QUHAhHrpRsQoDY1nm8I914+396d7LzysfPLeQjx8bc+uL9Xrdgq5A25f2X9k5uLFTX/3xJ5/fW5gzunT2xclz7ujX//jJ6lf+/dd+n7wNy0gdGY4KD/X5YmOoSSEtIYWSdl1nbaEiGEzUDbP3vnDWmLSC5ShRVdquZWN6H2IQsmJMInREACqw1oQQADLGIvnZC5hNRlZVmZMYOubLggY8KvneabKP5oQmDxksw6lOFI2MGA+tsQgRWwLl0WGIL0x41HDFnF+XaclKqdBejGQcakLGrtNfPL/+Buzt/J4kcLK/puFPpqYtzy3neHOG2Uk1qZzzvncY0wehaPqKKZc1wwmJRMaZjY1cliXGNLzEaLgiQ8RkK9OFbmzLou5//OGzn66X+664Odmp/HFM2KEEUUOh52hF2bIECoDpxVXV6r2XZrfuLM68ZccuSoCIHVU0/+//zbW1nfgQwUZ6I8ZG2GR1r4FY0qyrSCtJEUp+C5w+67RUSDV16DLy1A4lM+wELDQqIpHY9DNTUeR6qQIVQAICMxMHimr+9fdv/vMPvgilWLBGygUjMcIFig2AKNJLNCALYsCwhaIoLHNGI0QimBhqEnlvqINZS8NpCzuUFB0egPQsZBruFoJOxVoJ+SwgGdxITLGAOdzZmPzlKe+Gc9neno7hG+WudAtT57q8xX3TxoYw7GtAJBgMI9NskLHjVDcptYaaOhvD4NxZCBMTJftVgUZSiDIrD3mfmi1Itl0tpbSD4YXgfOLdnpftqyYkKx2opq6XDA9MLspkq2zVgUTGTH3NEFiZHgVJNl80aEBTfAWLKEz6n9sORpI+HqDzLUf+mWWtHnFqqpAp1lQ4G1mEVYyyM1yZ//rDv/vNvU+vzlwdj0vtrLQraSfiRTqjC3+pf3Af+4bCWfnFxyGcjqZd0R5Prl1zbRNWz8A1hVbrGa5epmLXFQfx3rON2yv73bNPPt996a03Zgf08Pghbn1Y0aaS3dZILDo1HEotluKtsDJ6KWobRFHtrqaHX5Zy794nrjk5nD33ezdf29t46RsD6drQhFIefrk6WUWzp33Rh6V4e/D2v5589193TeSzjZDhnkxEIUSBBWE4kh4oQJEREQ1roRI0EoKIBwVzeO3a4os71994q/1qvtm4N373j+79+Ocb/uZP//jqpz9f3flRWx8tX6WXfvib26unwOGbmEjRcEy9ryorwCoCVCwTYAqaGlS9TIRrafjoyWbEjZvuz9owa1E3oTSNoGNtxfQ2tIEVGAEGGBH3gCcKUE8Iqh20t4gF0CsKm/J02bBGNWzati1soUFDiEZMeoass8EHQ8YoB4kMqarKmhiChxLaUqyALYxC2TuNJ8KQDfj2yezykc5G339i755Nn7R8iW1sVqvHD/y0j2tddKMXgep3dm7uXN2dhLLZ819+8PkXH355fe+lWguPDWwrEiBGpGMS0SASjDGGOMTAZKy1qkJMhm2QEKM466zhEIO1Nt0R1hgCX7v2QrNem4Rf5wh6Y4xBBkgxqD+FiW2CSROvnNLRUYUkYVJG0n5rDpastNna5XBOEwKQTBwTrI2sduAgkVSiMmUgL8VQJGZmPq9Zj3l+M2D736m0pfrIpNuaeXH0SOPvtsAOF02+bHjwob1IqbkwVySQVXXrl6DKW6g5OaTknJl0MwMZh7fbH4iSgSKqcEqiJZMS42OkqEHRGzJd200ulavm5K//6/qrmb38yt5+3HRhI8ZWUSmR6IIix3Sw8AZaWCaJ4dGKfvGwFVjLwZoi2CjWrudP/9f/9lpdz572wgrPNpAKrApRzP0RgiIJf4Oq9IwoEpDSjCkhrpoTLzmJNJE2FcQmASciYAqUyi0LS6+w6X2zpmEtYfI2oFWxyn2z4atXD/Zmn2uwsJ7YgjIIy0wCMrYHNEJYojJZslAQ2BlXOiMQpa2xBjMbA5GL9hbpaUkUeJWtT0u+yJNUnTgFjSDRhimzSJLIiAwZUIxxyAFKp0PYZGqwymBqlffDlCGYbeNH214uFfdMSlLK25pkGg8g5t0s87D6gWSdffbEyU9urvyZF0EUEyE4KiUnVwYELNsjkRlnRNuPLT/fafkK0PkaJe+4U8XbCo14S6WGakoTN5wtb3VYtST6FqCpC92yzJiJQWxtfuOiySmI0t3B2Y8usy6JAU5fidmkuKgkOBgyK7YHlxWG2CqTsYBBJKGCyJEUUs2KX3722cdffHpwqXLhKYk3vBzJ5jisVxRe4p6dX5OpRI7vYnUc50/o5Cv5/GmjwY6/lGWPYK0ZWSVZPvNnT+zR6/b5HT8by2K1/vIuXX/t3Rdfvnaybsxntyb+cWN2EFaBCjKKktFodFysiHqo137TFiMXxlfvjGh+8nUgh8ml2f6Lnz2Lq2fHQqEup6/NnpdHt9cheHuorekXS+MOj/78z6trL8eTvlig74zdEFqNrWqw3EE5AAEaVAToiQzQgUhaS7WyJ/HKfdGt/Khx/58ffvjO9f2/eO+7y1v6xdMPrr39wmsvTT/52cPbT8Peu64N4e8//+HzL5U3X7sSEG4/q5ulV1IosSqpxjQm1YKaTE26o1La6LDBbKGzMxnfOrMvzUYNz1qZmNZIB7SgwFyA90FCCBCv8Eoe8KStIkA9xCq8UiB4g2AsKamqhVUSDeKsI6UgIR24EERC1F4tmSgiUQCBRpEYtM++DrGklsj2gQsW2BrxNO7ujNXEe78O+7/n2ivV3/98/vqrV7/5vF2vV5efe2WzXCzj7moFavT9d9657I7a4w4bdZvilcPXHn390Ud3Pn3tuX+1uzNq2x4AeM0AIDF6NhAJbdsSo7BsbZFMKDG0/yIRzNmghygpGFBgfnI6rut8fyXTaGSrjeEvKlRy3GtW+6ViyprMFzRfZKICzfNuquWDkYGQDs7BF0Ct8xlENcbszpFu6AGD4nN2CAEEjQk9o+HCVSbehiNju4JMr3MoxcNvnU+9GYrD0LKnziEnHeVXRUMvn1cXrOecFclRNgJY5oGSdGEVfbE1uOicOXxLyfMzskdhst0kEFlY0wc/2xl99fX9v/oqmherg12DvunRF0xRJEAVsKBexUQW9Vw4G8oAjjE6VquuWcUJCmOZo1Qj87QN77xdP/fc/qJri9E+uIRasUVAoR0QlAOl7BoSYpCkhEjWLYacrCfT9JqklyAQQ+WcRke8HdGgiARJ1uSKqGIYREERgF4kErON0rN1BdB7SFBjVWMgKVhEBTEIhFWU0afRS1lVyUNKMASVK4vCqrQSARZGhASgj9Lnj2G7dt2Om0wJulDNH1rKWAQlcCJhyEKK5DFqmIVIYgRgbZEmP6gakBrKIjTOlX9oQSLYpDkyr3Nz/keeQElZE/xKAMGSSRQtZhJVjZoMWpMCcOAaIpVM1WiMTbEo+ZvmIwZLLNCYh5WMXCinVY9km9dERVMoYBiJSZECFHnoFPNSacu8TjatlL+jpm6ZGaCYpFOU1kJEeWpXApGhqMqGUouNYeamwbx1AAnyghgJ0pB0q0Tks2CQ2RKcKXJIMmFmFKKsaiSRFVMvnG4UCypMQChGdrFufvLBT6tJXXBnOJQk4xiY+jas7mD5Esi6frxTfPWJ75+yNMtHnxWyqNG05HnZwlaugNImiIOyaTfhzu3waGX9qCKO451yrxLbLe3S8r2PHDqh8cQEHzZLMzMmUMqWthpErUdx+WZzaVqfrG9G8QdXWzncY7M2m48f3JK2iRpev3RzEa2EUcNsFxu/ovKN95///X8jvfWPOm4ltiW3LH2PIAgFBxLeECLQg4U0Ii20IKQhaqRe4kao5bAWMyrivL/xyjuffPkLFzd/+vvfXXF5584/uJMb93pX3DzetM0G8vt/+OKbN15/eLv5+B9/YfZfdFdvhGajKUNToUZICCUwRZyQsdK7el2Uizha6bjl+nhNdUtcVz44E1iAUEVXGHjhCSNCexhPslF0Cg+ypEHRgQ3BEiIJK3lYFSXmtm0tW2awsARhIWYbQiAlx0XajhEsGDZ56FAABaZI4BgaNKPEDA5tP56MDouD33x6+7t/9t6TzQ+/+EF4/bvL0Ppf/kJ2LttHtyfrNi43WrM9Go3ILzdf37+3ebrnrt+99fDhx3dL2S1DqM3tz35z/Dtv/bmrNYhPtyFBmCESYvCFs6n0JmljJomkBZihXDuTpB9QIIQwqmtNKeScDZnTuU7QYggYYhKS0JC3aFgqGJrLPNKKMyFYxANKl1prZgZrbqsFgwOdJmHl8Hok7YI5Vbv0nbJ5MinSjGKMVRVR2eLPw5qWhhQXHWbu5CKEcz+AYehJVTTPuanC5sFMw5bhsv3FAMGk7DYdLA4wTBWMIApVCyKwMFRpK8nEhW+aBhvNM1LaXGV0NtHYVVgNGVAncmmv+vCzh399r9198ZJw6H00XAbhKBQDe3DLtjZRa+gIJjD6qJXaIOPAnbKj6EiUhSzzSFqLGbo/eP/ayiiPXGReax1ggpQejiNRBAIogpUlcZ6FYCnjxSqQoNRTHu8klxJBGmIsc2JxJ5oQoIAYpNlJQKKqLEQCCaJBSUm8t96AqBc5Fbx86fDqYX18fDIpSlgKJsI5a6WoxW2U7cIICwXLzgaG1bVZ7db7k9kYo6iOURQBLpANarxQUIMApGIPpMCQzC+SAEBJkBcJiVEQoEIm7SCGtgsgQWquVQY6OxGT0WTqAjDbPImyJjSIAJuMo5O8TkFsUkiwDiz7pA3gLOfVVJW2pCRj8pNGaVmau1FNdtM6zK+pieBhhTy84kSS09xtp2UCAUTKgCCpFrN3VxJJbCUIwyohAgoxmoo7ZVoga0p8St9I0kFXzq9LVABjGMlaezhlms2wMjqtiigCzWwsTlzMTP3k3gcVFNYyWzCJJEG5SXtNAitZkGVyGhlk2VQQFrEibGwp2difAFEK1klRFf/fn/x82a53D0oTViWFSegtpNamZv8odJ/I2feOph/e0+ZpOBxXv/pgFFp2vWhrYyATmUDiRQvSLprScE1ccjh1hsWWLXB294sPAurdF97c+867Tz+4RYuV42KkaMsQ+mSeJ4GdtC1efOVT2rn36MF0Zqwxbxxc3Xv0sLsy/ujxcRMcYOud/cPdl/pPfgEPCZW79saiq47e+450JjzrJYAbgzaijWgELauP6JVIID0QwR7oAVUKQBAByJJ3Kqo9qIVptTnZuK7ef+7Vf/zgb3Zn/vXxFZLrP7u/PHz57cXtLxf4+k/+8ubxfPrD//2T/uyzvRL1vN29siOzSes9obBirAjAYcQ8Dt5ZT+ONVus4XqFeyWhOs+Mwkafu5rWxQOM0cFU44aBwESqgCOoELaMk6cAe1DG1QCEoCIWIV2sYnmzbtPW41iDRBssuhBCClFxKiEbJEItEKEIf14tmb3+HlfsQmECkIn0UiUGssdpUEgM53jztb1555dYnHz+785vXjib37y/u/MC++u6bT9crPNzMfLO45V1pr15rsJzPv+npyeJgPJt37of/6e8Pi/2739y1Xt57v+/608X6i6Ppdd+vGaoaQFFjYGKYNA8rg8AcQhAR55wO410qe2xM7Pu8hGVaLBbG8NBfp+O2tbXLeHL2kNIsBc6N+ZaojPMFp0l6jDRh5FFSc5BLshUgPh97Fcn5gZitZdWsVGI2aZrOnfiw8c2BgJTi1BLJ5wKrSzk5H6VlkGbEMdNRCOcQIIbXvRVnUK6VxDyMvLlk5+FOhsE9qShpwL9jUB3McLfMsKHV2P78aJBCZS+utEtLLYJoBES1Zwaj6kN/aTr56ONv/svdsP/CJVkHHQkajRusiqpCXfGkpp1am8n0uNiAG2VLsRA2pBWKRaj7wCW4rLjwoTLi7EKf/s//w7XJtelZ1cXR9ER2lzxbYKelSYsxNUQd0GseIAPQQ3oFBZBaonR1qjIQsB3H0jtRyrweGpIJKLGe0/uXARqIKko95XE6gKXs2obiuHEzh1UTq7nYFe9P9le6AghwASRxaRDieBRskSobQ1ULjVOZHe1UBxbTgAktMFthssa04Wkj4w1N0Cp1hDQJB0iv8KkeiyBAY3opipiIVIpohMBCTAwDxGF6JqPJiTNndg1Meh7aKpw/gclcLgY21lrLKSgrP1qsOuAyBqrpjkxri0wgUBUBGxrqdfq6A5TCDAFIs/JYNbepICRxW2IBbjGeTPVLkZ7pKUz6Nr2All9sSzU3npxyQgd/Wc1G7jnNLBH6Acl9LpnBBhxbhd7Q1WY0PjW6Oa+COAkjRQOSpomQeEPZ+BYsIbKxCecn5agEMCWes5IoCJZQiLCiABXMFg5UsFgVhhIJh3o8unX7648//Xjy8oRkbdWPqBXeFNKU1NVoKmqfWv+LhW0e6Qs7u5992PSrceV7PnNhHW3P4noEIoYpSZgRwUJkoUWjjSirX8zDRvZr4ZGtXrxR77zRfPwh7j+yfs22MrUjJ8ICbiqePR1funX3U+bNct0dvP4qHq06DZ9s5MFi6UZlH8zVq2/Lw5MwD7bF9MY79+z13eZk4md+5bmDa9i3wq2iBVrAq3qk/2J4oFMJRKnfDEBPzAIv3nHP2qk2qg1b6775/NGTeDzH/m8en+GlB/vF5f3y6PbDb9zs9Xf/7A/+66e/uv3hN5NKDycvrbRrZOUffTJ5+T23uxtkQaIhMkyAdQvMVlxF1B3Vjbq1jta00/Bs0Y/Cxt6QfuICIDAcBZGNqAnp0W8JraJVbBSd0VaoYJTErcJxfndMti5rRIxcpdDgg7UFQdtNZ3qyUjSL1mkBr9LLdDrp+269nNdjZ4z23jP3gDonUVpDQO8kmnCiblb++ff+/Ac/+yuu4sHBlTjpw2M6mhbPjsPN/Utr1xmO6/utX8n1ywfTEqd3vyrwymU6CieLK5O9cd23zSO2TVVL7wOkFwXIq/aagloVbFhEQ4jMbF0RAomqsVYlhBCZIUIiaowVic65EGI1qkZVlRzgGBcjU/IFcz5rJtuprI/X85VrIkjCDEcfyJIU2vIrmcBZ3pcN4jXXq20HPtwJqlDmbZneTompDGZOVS7E2z+i2y+Q6+45BKyiSeq7nUnP6VdbjFm3t+D2C9FFkpduv93FrwRQhirPX1X6BoNIaYDQB2ljGoQIlkBKieDDSPbKlqUL9bi+fe+bv/5kPbm+0/rOliw9qCNtZL0zHslkhVkdNzWmlW3qS6vowbWiUnaEUmiE2Wi8bPtgg51wDLTq5//Tvz168e3Z3LV2d/RMxidmt9F6QdOFVNICjWIDdEAHBFAaAZMaCVsIGonKOvQ1SCah2fZbc+BP/iCHFFyoJoQGgA6ll4QokHZSiItN11hbyXgpM7d7uHryVHYs7QoscwlKhFx4moWgcIVREbVgB0ywf/Uy7avuwlflmqaNzjY03dCk5Yl6y14pkPYKgLrcKGSFDguQZk6oREkjX1qARgEJmJP/eG7dcuGgbSBYSmncVrGtTj09TUlVo3lbnDCf9CXOH66k9E4FSEXSoKmixAIaAlzTlLvtcdPqIlW/YUMUVWiocZLSBQ0ASBQRSQI83tKNt6c2r2wzETqP/JTXLkRZRJ+Mlkk5sacSGZOtMWxySBk0WZtnvofq+ZkEBhZFgrUHMwBookoZsuktByS7bII1JAknsJEYNNjEZdg5hZdks3GFAVuIEU2hv+ARq4taKBfEBa82m3/5yU/qnZqcmNhVHI22jvuJSCX9SJsxGt/Zh3OVU3vyRd8/HRuvbcOmDbRBCIE92AssqCfjTIpv0w68YTgO6EX4/X/z3StXrz5en9ajA1zeid/7Q39/gU/v4PhMfQO2tioFKvuzeVBZdvWlERXFqzuH+08fPz3cf/DwEYl0S9ofja8vGrl7X9yYr731sbfLn/zH1/7d/+KFQ0POk3hwz8l6GEHhwegl/Q8NqgL2qoB6DFYrDKFQYeVgemNsOOn3xqPVvbNn84fTFy9/9ajYeXH24bP5+5cn49nOvdP7608m67M3ZXJvIQvRzZjbkmsrLGdzWx6uaFwiGhODCd5MfBytxAWMPIoWdUvjhU7nsfaoqvYkzNvDSy6osKpw4QUC65XBNlRlU9XSEjloG9UoWUabmstzF3/rV96WNrTRGgOFl96xo2g4EiI7LqWJDs4660NrrNpCFJ6NVe8JwTD1cRWjpVKIgmqprWl8W+/vvXLpD3/005895GNbi6dm51L5wos7Nc8cVn7lDM6ms7WcPj5rSo7Xyuny/d+fjnGwahaLs2d1ebAJj0QDcQjiywIhhBh9YY0oQpTSWiCyNaqSQRBAIUEkHQDDqUIH54oQoqpMpzPVYb3LpKJpFN7a7wwNt+YbMYkq2Vyom6nSydDwntc5ypcQiwgGKwLeyicS+2KwyBkOMSAShj+QrKbSGAs+t8o4/87nsyq2ZtC5MKchTWUQvxBSuUv3RvpTyX46OxtfLOQ4N57HBT4V/dY3v+jAsLU9ylClbr8SJZWJ5H1kGrv1XONBykaCiBu79dmzv/rZxhzuownsGK3SRnRNUqovzcJWjuoSdYEdi64o/Wi/w0rJAU6oglnLPHQYgyu0LNKv/uc/vXr19clpiX6vetaPl8WlhUzmOl3YcYMJGqCFblRb1Q7wgAc6pFmY0SsCECllNWUl8zDW5Y+FeAi30a3vHAQaB52nci7X4MiICZYW9oTeet71tF6Zeh52z8gvYHZmT2AIFSVdKxctzzSoERYWCk64gJtWe1f3dS/Gms90dqbTJdVLjNc6blCjgTbARqWNANARPKGHhlSz4jCJx1w9iECMGCQHYGr69EihKnHQ6WRrZQBMQsm3ljhHcSN9vmCybEVigqMlK7KG4pl1ADIAxiwpdleFBsGbyJBVlFYyg5XN9nGXIAnZYuYYBGnhkXbTqRsggMiyVWgUgSgTE7OeP+CpuIEuUsEH9w0oBlNopFVVeqZNInuI9tKnqddyYS0LNFGwz+Vg2/E39WbCF05s0iJqZnQPfa6oUpCMuyPz0ih3EEZzc88pmYTIAEbVKiw5ggUctFAuSQoVK1VRfvDxB6eni3pWBulhOYpEhSE2bEiMAJHEsQ0dQnQuaOWKuGkLsUIdyJioXLAS1GjPKQyCU8ciAmYXRL79u7/38qtv/ud/+Ju7Tzpz5ebOC78/vvZ6fePm5vCd4qn3n3zK7Z3+ZMEM2Ha/purS3sI/qlF1y9XjndnnS0+yuO52d6vp4f4VN5u5P3pptHv92cY++3/+399/4+bs+o3mpGcmBAvpNBpKEXeBgEAULA1kQ5WUYy8IpCmex4AQtUHDVHBgYUPLx+vVo/Wlg2vcoXsii2e780v+/7h19u/+/L954Y3vfPz3f3V5Nj689s7Dex8vQoXyrAujK7Mj79v20cPy5Xced95KX2ux6orAZk2TjqpWit6MNjReBOcjLtuVqkfrL8H2EGUXlcDkxQbmAOu1MOg3Vd06hwZkSDdgS5okLD4pHcmGpdieQ/BgLQoXW99TcKaI3rfrxonjQCFsvF9731R1NZmOob2EzhU2Bg/uDdmiYtUOwqREUhVm3B5vXtg55Hfe/vyLT09O5m+99W0huvVPn7x4w5aVm81sizLMe/GOQz111erR8WJ+cvuzk6ePT6Y77g9+/w+nswNr6ijeFYVqZMNsit531hprqQ/BWps8sGKM2/MARTbW0YiBWpVq3mq1nM1m2yOTElZ02y8Pc+526uMkgRhsKSl7YwUdqmQ+ZulSG6jO6WYBgxM/BTo4EWxB7Px6tqLGBLJRmj6SW+y2Cl6okjivc3kwJtrelNv6d+47H1TzLaDYaj/yn6PhhsrQdC7k6S0rbV3z88tILKzE6JEsP8kzYJadg5lM/llpirrM8N2AA6rCAvAQFa6x/Pc/f8buKoLAO22icMRGMQZalZUuZ/WIJ43pVjE4dFb90eyYy8hrQkUYwZQUVVFzE5Rk8b/9ydXDF3ZOnMRpear1ylyZh50FZguMG5n4xnEraAFP0hG8Up/3djmZCQkLTUbGvWpgEqLzdSblf0ZFMpzTNPhmhYlKXsBCCFGDzaNYgPYsLaMT3VBjxmfcf7PC2u7PhVy5rGwrbdpvEY8bqYNqqnmESrz0B1efGx1UsuOXdLCi3ZVOz3RnicmKRr41aMAdwQOeACCAY4I3RSHM6QHIOI7mhA/JNh/DI5Xnwhz5Q7o9CcgzaPJuOlf8IhMeJLsn5ydI8o5/qGbJGCaDQIkomXsyw5R+sJqrFLCt3xl3SP8qycGDFIaZmcIFuiVyI5CMpYjJiESBDBaACf7V/PSee6P+lrNXfjJFt6crmWDT8DYjSCWG0IfQW1fkZRHOiWM5sOF8kzX8dJRTWGrUFI5i07tPjZCmZDBw4nIQiMgm8lYEUaaAk6bAI2WyzBXDijhopVQRV+CaGt18dvsLa1liIGGFVYIq96AeKLkgjCQ0sEyV5R6h9CEQSsNGYEspRAliQQ7ExAXIQC3EkhTqKiMuWqeR+OzMP3j04GTd+TY862bhuNu98bvFbFa+cBivXMONpvv53e7+r0bL00Mc/86rV+4tJj35vtrZXL361m59af/PuLJGAlc7anV9sv7xx785PGtvOnP0/vdD21vv1XOM4GgwrIcQBOhVg2qPRIHOC3lz4UIECGSCaktNLYAd8dM7D+McRRXbpz0f1nf+efXad2f3+9WXZw/np7j2/BtP+7DQ1cG3vv/kqw8ftm6fu3/+4tHOdPbayweWJmzdwnfL0PeoN1z2Wm60iuw2Wq77AiIzWk3UK3vT2R2xQiLRCTiQAmUAB6WNliPqVuoLdeuq8mxhYK1lCzHQTRKoqOUV+tZbtt6HgNZaVgm9BmKSlXShI5HK0WhkAaocNetTQCoHEe+cDaHxXhwLkwOiNTbGTfCFs5Vty9ePbr56eH3VtUL22dmz4rA8ufcEE//owQoeq7NaYeOm/eKXv7x589r73/3Lo93j23c/qqvq6rWrvm9HVR2lFfG2QBQhCFsSFWsNA30fkvmbs4VAY0iXDkfVhC0lLyfJCTA6mUzrepQ3xOdszSSnSQQOZK3DUNSYznURqjkgiLIhzvAEZL4XEm7H2y4+5RNR1i5slcHZaHcwtWJCuhNUAQgpJ8bI8PuE7eu56NRxoSSfY9Cg1BsMcB5dxL23Y0Eq8vk2GG6jc1AkzUMX0UYeoLsEsqWRSBUweQWdbKIJUNKs6kgnhARCeaBMll0iIVyeyH/50YNH7aXd3SC+CralwBCIVzQqVuFULC/ryYh8FVurrUNv2VdlOyka44UqYCQetmm6Kzv+3/7x9f2ruwvfST1eyu4ae3PMTmi2ot0V7y5CxY2ElaABd4weFAkRFHI3RBRAScii+a5OZryJ657xhZha8OSjMvQyBiwMIUocpagI0IBo8yXSgbxhr9oG6sTXu6vo64PDxfHJwo0rzIK19WyRY2eplSJGMcYyVGhEfdsfPHege7IwuwvaX2JnidkK0zXNVjrhDWMDaYQ2gAcAeEgUdIB60fNrDIhp7WiJlVgRE1VJttvZtDkZOrDzjx6ARCVK7lk0tIWJ2ZiRZebtw5J6yiRz2npMnFc5HZQF2yKWnkDBQKvOqh02SchjUx3MTQAbyxZQyQliAEEkxiCaTXdTwytBlLYMZNEBdD6n7GP7RnD+a4sRDIcF287b8G+rrdKhpmwpli6cC2xnJLecJNmjrCDSGISTYy6B86o42ZsYKGsQ2vbJakUJMGAiWKYiy40smRGpE6lUnIz2yk8/+/JZ+3R8WEsRKNooCCABQvrBEASVRWXIG2YYjcWIHYSDjJQ6MR1DBRawxKxkUj4AyACWAolhY5259dXHfuforW9//we/+o0rDnxZ3739yfzWrbf/5P+Mg4PN0yb25eR73y713dXp/ceffx6bZT0JfrJT7l9+4dqNTtbHpwuqMJ1O51/d6fvu64e35KOPXrj2+8//9/+b3d+PJ73CUGSn2noyMY2/AHrVnqhX9JCkVRBWKxDAACH1VIjCBWkM6DyiMyMs78/lRJTt0dGluw8eoywfVZvvvrHnusXP7rpXXz18RXDv148eXnb7z7/T3//q7upRVU1HO+VdP8cXP8DsrX7/W11Ya3RLGQWuNihDMCIYY+O4r6irqauNx6YPTXE0rYIwmBtFwa6DiaANlS15p96JZa0bW7ezKY2ANTFr/iEr7OL+vJpUo2pshFQ1kBhG27RlVVWolH0X1l0nxghzBHd1VTFH7xvnHHGw1vV9A/EoWERiaAs7s0WMsn56+uzky9OPP/iEeXx4eFCNKu9p8TDMtd2/dDguxlPXj2dVVY/Qhfe+++7VFyfSuLL2Dx/efvzkiytXDh7cP7l27crGB2aE0Bmj1rKEc51FqjYhRmYuChtCuMi2sNaISMo4SuPoYrGczXbTURIJQ6hARlQvnM9cHNPox8PmaVh85qxDulgdoUkXGM8n3QjAGmZjBJrkjnlhTNlzY4uGE3F20ItKIGU2vMU/L4gUt93/cH2ct/Oa916qaQTJffSgfQBt0fXBjABpJkn7Xs2Lu+FHsnXYTDC62e7OMIiVc4FXJFWwgKCMXLsHt2ElZagSaUh74Vldf3H/4QcPsXOJ27Z1OqImqDW8ZiKNLMqqFmAsrEMxCfAbRIF45RrdEuuibGrXKK/36/V3X6q/8/abK3KPOoR6tpLZQnbnmC2pPjOzp7q7wC43LCvhhqklakk71U7h0wmPxJGG7iGNX5wXpcP/ceGJIIIFq7KAgChCpEHIMAKYoIzEzOyFe4aHdMLrIE64cGJVuG1m4189cit7eMIWghqrEeqd8Wo06qhocqpOumGsatDZC88d035vLp2EnQVfeqZ7c9o9o1lcG1oDG0JL5IlTeHeilUVhFki6xkQ0qopoTH0HkQytV1p7UFqaUH5oB+5xXnAPbRtBkyArF+EheQ0DbSHvb879zC/+4NJKgpDXPefmGGndkgIqUxxR8nJEIlFaEU14fwhRglprVJXJZBcRImZDnP9uplsnKR3UJEkCiSRxYO4/hzE9hzVJxsNT9UzYmGZ8LMnVABhmIoQoQ6QxTA5lSD+MXJwTYVwHwDnHMQ27o3SSJAaGTWFJTGzIAAZEAhKYxMBiYs36LSME40gd1AoctARGjErMmIPTO8d37C7TRHREXAYHzyAVZmYh9kQd2NvSWYkdojHOSWiiWjWWgg1irRMnxitDClDJYpSssoUtGDUFQ7G0QZuPP/vozff+9C/+9P/0VVPcPV41h6Pazp48W/36o/9ycuvOqzd/L1bF6fz+6fzJSy+/euX5dz78wX/C6dfz/t6NNw4/+ulP73z5s8JNXFlJT2+99d8cPFxVYXLplecvXb20edgZzyYUXS++CSZYbYVaICiSayOIyAIKjQQCwsC6MKpCJLASNHfDvfccxqvjdfOsdZbf/7NvNz/+wXwen348ujRu2dpa7T/87UeLt1/99l/8T9/83X89bvzsxh9W68/YHYPpRGMRNlX32PH1YxxuuAyKRmovVOt6R1c7vLHcV2jHHEZonawfP9Eb1a4zphOZGeejmbLtyTRartFb7UoalUZbxkq4sXUw9vxSj7C1RWiajU/IqgHUMFiFgwhWzAFoVXuRCJZ1453lKC0Q+jaE2BpikKwXLWgT+qgSYyjZctMsV0tl2t3df7y7e9na0DSr1ebRweWaeTfIrfvPnlEw/V37wovXK9d9+E8//PSDisnO9icvPH/t889vLZfH8/liZ/b+5aNRCIQ2hOAH5EySfXwMwVprLQFI/y6iIcYB11UA1loRCX1fjyprbQg5CiZZN2cj9FRzMpJE2zpHBMt5rZuPzOBKkQtXHl6TzGDrg5MmXRaJfR+Mgq1JSsq0/iUgyRpTyxyjDKk7yEYGKgm4kvMKyxd4ucM/L7Kd82r3HB9WDGTrbfnVbbHNUz8pJavqBKblwSa9kNz+J6INJLnwJPt5MChpTjhKTDk2qpw8MPNeMb9g4TyXm/Szjb3/64+fjupr2p0Z2YX0pFVsU8wzYDSlnCZK+GpnwhYCFiHPZRX9TJtCmhbtX/4rd/P5ma3dr9cQspFtCG7F0wVPT7j2snsaZ8/MzMxZzySsDK+F19BGpRG0oJZIYuYNs4emhW3eGG4/XzYGRNCYdL6DwS8sAzAh781jdmZKPS0JNEhnyRActIO0TjbMJaSn+/cXHz3ZOdirC9gAHqGqRBqsKpxd2S0qGltYz7AwohCartzrC60DnpvLeMX7JzQ7of3QFLRQrEBLsGd0Sh0AoIf2oCS0opAXAkSD+UhMNmBKQ4sG3Sp/mClBpdimBg7UqvNN+LZogiyrig6x89AB2iWgl0ggw5S3NokwwdlxGhcfVxCT0DbCIQHRxIlKHUnBGXYqbBElptykqIHBzAYDDpVAdUkJhmmOFESNTDJARYMOQBMqlE0vNf+GDgxtQjJCz6A52BgG0kFOQj0RBQtJkWp9mnGjJKuNpGseThHDsgEAZVVYm6jdGHYeQLbVlJRemTxbBYZgoRBNnhCkEtgwLLhiFNBCqaZiYk82p/ef3HMzlkq45JEsJ7waSVNQhNhA1sGoamACODq2tvcoyKl1QWBd4YKVHq04ZkvEapiIRRjiIKUpatNXrVCwdve0lZ/94L++sP/K3o13qzffPCyOPB189ai9v4r7b79947XX//GH/2nR3DOg117900erR8yt1t2Lr9/Y2PbR2a2DGy9ePXx1/nR1fbaz2wX//Buzd75/eHl/c9yjU12Lb4QaIbHoQUSwgEDFqESkOD9Em+RhZAaCewYOSSVtARkizLqOtdbFmlYPmwcf+3df/t0fffzTxbz98J/cze/S9atdPb78zfGzsx/85M2DqxTk4df3rl47rEaVt8f7dahGB8Di6elXka/NZSbRBgoTLGbWj9FVsig1jo2foh2hta5dN0HO2r39UQvE6DyXyc21QFUgOPQF9Ra6SgR+lUW5I7VJ3j4U2Nb1LEgQSN9Ha6GqMfQAr1Ynwg8FzyRsouioOiStNt2CsQYvRBbWQiR43zJc5aZN/MYaFu7UklCJwk2nrxdmf3d3L/ROVep6fyZS2F2DWR+al148suWx0AnifX9Zjh+vmqb1MTw9cc69HMPq6RNf15PPPv3iVx8vx3X5+us3dvcmvkv6hxgl9L53rjSWo/RAqVEzsiuSwCvDJkoUFQWste2mrevamKQCyvMsEavGJDxIu9Nh36eikjXByKBxYuXkOyQphoaNlckPbxyEg6nG21T1JIq1ZjtDi8KQqlI2m01+win8INXN5O1HFgRJGzgIJFm1Uw42ADBYDQyuOTgvx9st7rASzCZJCaSjAfJOI2q+VaHJLSi9v8EhUYGkOky3G5KLniIoISmf1ZBQGmTSLQpKM3IkYwNgowV5j3BQmx/+7NGqrUeTNvQcjSqvEAqsxlIHMLNFbwFGkXyqxC13po1DYKPkrIlziVfrJ//rn11zLLebvlsRWRdhe0EANzpd03QZJgtM15jwnHQBXVJxprIm3QBrzdXXEyQAXsSL9MxdgmqJE/gllIyx8medo51SqgZIFIagyZolPzKhVzCzVxTMgaSUoPAq3hof0Wqz0Kuz4qdfPT31V2rSGLsNuMZkhNUakxEM8eVRgeBjr3BUtv3qsLp+bK+dURlweGana53MZRbXjDV0DV2qaYAVpBFpAIA9Q5Q4MgIziQrRoCOXPPamHkIRtnA6QKqx76UsS2i2ehx4UWTSrZf2I8RQRI0JlQeDk6XlVuqtAKEwJqrGFIKb6IFQRKHBz3LAigkqQZG+KeUlcIKT049eQWCT7Dt0eCwzXziLgrJuUIlSUHaShg1NAdKuHjmRlzgRnlVkm0qSIKu8uqZzUT40G9RlRS+nx16TfFljzEsba1mFYEKMxBxCMIOqWLMQP/f4KRpDVJJVHLElZVVOHY6CjLGJ0gtmNkxcAIWkvGkIGGSTlIVRiB3r/XuPNqbd2dsJpp/ibGLWozivsZ7JakIrUhVYgQ1iPdvKajJ3EJZIxE5lI1oqWWtZxYgWiEVUmwZyiISm78zIwaIDIpuFLf3T4+cf/e3mF7c3b39vffRGtXP1d7/zffgWXfOv3n9nPd+vGn/kmsd3P3mDH88uvfTyK9eirP/iu38w3d3tg9nM6vknH7fg2Uvvr+WZXnoOJ70QODJ5jZYZBOklsKZl78bmAE0KMUQhwwyJIcXkJoojawR6hlERYTEkvgvXn78xtf9cTtWfPRjN6PvvvfnJ3c96tt98NIevL12lw+v7rTxpL+3uupf00fyLk3r/0uH+/uX77QM/71t/APDBtbLzk9isdu1qFps6tLs43bEr5jCRdsbNWFojPTX6ydeyK4e7E3i7KKTuGSLUolpobxEs96SB0FsNFlNRXY53BCw+mkqtUjAGTEVSralqYZ2oGjOTqCHsGYu+X1Vm3HkfuvX8NCyXK+/bcb0TegbFyXR05/bnh1eu37t7u+uCc6Pet9dfvPnqq7MHD++NqnrTbs4WJ0B1/YWXROx6s/a9n5SzpyfHQZu6bhWxKkfW8O6BBXBy+qv9w8nIHn19/5eueO7pk+ax+r5fXb58NKqr6WwyqmdGSuKWmWMKskZ0RdnHJjkZD8st3Z6CKNL3IY2AfYiqYoxVEQzbobTxGaiSNLRXMjTmidqUA9IyWCmJwGzSDKGJwUP5ryXNEqUEX9UYAm0vpLxnEkAlRmYWwbA5S7eQBhHLAjDniZxBMhB/cof9/wf3De0+Bh4JaIBxgAFEzpZ+AyangwNf/lspQkkSHJLu67TXTuTSVK23oUdpg56Kt003vSZiTSyAzpDVGGBFA48r9/Bp88MH7aSuKtsDxdovVQq1tF+ExcoGUSCSAGAxAhJIVNEwrVezuo3THSvx7OT771x6EA56vyqYo+EoEHAQCmoamq6kbmga1iTriCV4Y3hNuoRZQddQT+QJAdAgFIh6Q8LZdUEVUSWmYTHF6RBn5vp2QygZZU0hT0n2JWnCTx+oqodYUEfBkWdsIAbK6mDDSfebXy2M2ztdw9ndzpS1bkY8OY1xt9yI7lNnDUkvVNpRh8108u5DPVpxJdidy6TVCc5EFsBasCFaE22gDailvAPuAfi0OQN61YEUpknsK8OMl7qurUsNEbExHGNMj2he36qmLK4txDI8SwPkrBoR0zBLQ7yRMTY/xlmjpHnSZY4Ss5mWAJzCKJlFDdNgDZscMCioqqjhrd0j0vcY+ASSDkvmG4pqrlrMyT12kCUMmNE5CTnjTDr8vQs7HIAkaow9K2xRMBtRGU6lJUKMcp7yKwIRIu47bywDcIULIThrZZDJa1RhMLNlVk3bYUkx1JpW3/n4mYS4J4ZJnkbVxKhEliuSQuAUTsQyl0CFWERxZh7mNGGYboL1lFa1LmusJmhmdDbWFUMMZfy/F3bcskM0AgsuGEa10BQjDQN2EAZsSt8bIpcsEwcqNLJVISX3rN7fvfle5Y4W80V8Ou8fbcLTH+3deMWw7Nz5pZl/w761t//2jwMbE3T9m8UPH4/Kyabzq5YiSn75xcMbV9znv/71x//H8oU3vzV9i1oblEKIoMBsxBOg7FkCCEwi8GnhxdYy2EtwBEb0RCwKlYaIoxKzgr1CQNYHmdaTl1862sijZ4+/fLaspPTVperw8t5zZeX5RLyd33tiStcuHs3b+4cv3rxRH3754YcnE7d7dMNX83pmd3cmi7NPX5Z9mV5frZtLpikxH3E3QlvH1YTaUViVgYOQzmnRxrld7O1NxZqee2OYjUyq1jpfUl9pa+EdRyvCUIEIzKKuNRBUbYxJ9IYQeqgyW4lSFEXXh1G9o1Kp9EVB1chax9Xo8IVr12wRDWPTbJarBcgXRbG7e+j9xhYvMBVB16Awm0kTbpGNi9Xi4OBKWc0EQc19yEGIzc5OXdgn1w+exb4msEgHiA/r9VL7PraN1Adl4+Ps0u6ly83R9ZmTV1u/jhFPn548fXo6Go9m092yqqrKGOsAbts1hlUUkw0izhljTQwSYiysjVGqUSUibCz6fpAeZQQpmzTn/VZe7WS6Fm0JoQMqnfQoyMQrwm95euTarEgiRmQ8OVlOcip7tPXeG26DXKoHpCx10AM1OlfVdM0MPJbkAoKBowvJQQn5yCHdtJTykXOa+VCUMxFmkD9m1mn6HzrgjQplMjoU4NzN51QWIEW1qBLbYZhIOmGbF+psLJwIcWYEU8Xt33/whMuZNUIaH651XE4sU/SbU4kkZWgtsxjmaDPYrZ1GEedts+ntZDLvzNUJl9ev3VpRxRsLVTIB3EdlIFDRaB08YwE0Qg2hIW1UGjVr1jV0rWiTVkcEHuQVgXKErND5YlsBEps4b0PaASCqydVLwYpIwgKBilIgEGABIgSQIQqqgYJFx1SAOyMNSofTu2cPvzb2eRMldNNqM61rdI48qwb3RPi5EAxTwvCLIN3Ls3fvxCN2lddZWME2wJqxVLSkDXQh0gAb0kbRAgC8DuTRHuKJA5FkaXjuLmPCgXVQvSN7RDBRsoBWIhXhhBsPdMJ0IrbtHpEOySd5v5umUMkKusyHzp2aDinU21Ch4ZFOEAoRSKIQwRTJgCgbZkoU5PYzPc6Z+TCcLxCpxkHkllYdWRKYzwsApgFuT89oXgLTtje9QHQnsCKm2V8Ge5wUzSlDOkXGrm2ag9NXSU9FyjoaWn9mzmphURl+SAZJhMVpvCaymtOaCMQiIKU4vGZVS4UIE4zCAY7VQUulSqVSqTS6UIxlRgsXFxMsZ7yZYD6hzURPp7ogCGssEA1FVWrZ1lMPFSqg6V1Yooq0z0WXUwEuQAVQAxNgrGZKi2J8htlKpy3Vc2/Xx5sX3z4srn27eTC3H/3nm+9/1xwg+lPMVvvXnuORlY8X/skDltjbvXJ1oifL8sa7s/0X4/SAyt3Vh7+4tzl5EOIrB6XsFdKvTZq4DbOxoQ3OlrH1MIKNSVchvMtzVARTkMjGkggTWoNKNQAthAECCmaJsjDsxtXRVx+dENsn6xMeC0/a6OajfaD2btftHtX1LuzE9Vaezk8nd9sXGvPorGlX1u1e+fXDz5/Rw1nV/l/+h3fqEb5oV4irkppxWEzY12hqXfWt2FUFH93KxBD1ySZSVTCLDZGZDHSk1WzDlaoE5czdVIJX7tQFYFWP0cMmZ5YQPBOIbSLy9n1vC9q0DVFnWEOMIYAtfC/L9QrSbtpVovsXjn3vL126ZAu9ceNVQJaLpULremxdaA5C5fYWq2eGJs2mXTYPoAtruydPHhir680DZ2Y+zK0NrnD11JYje3BlXJYjiWeIa5Vi4xeWDseT8uTRz4OfTMYvHD13hVhWK9/MjwtTM51WlatGhTUOZLvWRxFQCNGGEKy1TNS1vq5Hx8fdaTO/fHiY0CRkSoayYU16isETYyi6qgoQpwVRhiM1+dtlLlMqmbL9vYSv5VyjLVa97chpuD5y/0vEhS1UJQ6saM0EEE5Oc4oAYmbWC4Pz9jb57V+63T1hGBpU82xO+RdrpiVjIHHnWqxpLzbsj9MLFzAJKwyDEy0z2x8k3UfiDSuIrcpwnbLd/vWIQkVVJUbereXnnzy+t9y5NDPzTbxahfeu+F/fndvZTmCYGFQkhMoGy5vkQMACUE0QCmV0e8b0aBaL3/s3R6fLWRujq3ZEgwSwsiRjZyVdA41QS9QYaRQNsvy3EW6YPGun0qmSJ/KqAdoLoohPRsiAaM7mAUFUZeiHhslNkuYl/8x48ItI6HsSdQMsapA2U12JktM7qmt8+OmpbCbVEt6zBCCYUJatLby1rpxYfnnNK0XBQkEigxp349SPdvvKPmMsJSwFDbilbKOT3x2xh/j0ibeE9NZSlFNQDdDMxqLB1lRUEyM4pcarZrqZNbx9kgfZUC5+23/ZPl3Y8gsy9WAYmmNkohyRqVmiJFGhYq3ViwdlyDJJwX55Rzsg1JY5aegHD43Ml05/f+iVh9OQorqEMWgQhg8uT9903kVhW6NZWZMgd9hdMZLLXhqQ0zvP2+l0aQwzOrbkK2NYmaDqvTfGhNAbY6y1Ibl34cKaOVtepkRI0KCBBOW1DhODDBLOxGzZwbJYEYatWCoJNsIaLkUrkUrNzJZxUZM4XdW0Lvv51DRTXc14NcGSJBr0jIA+UfFITFFMrNi0OWdYCAt66G8NvkBBNCKdaZhw69wGrsW0pdmay85WT54+ffLPP3nlpXdHp/dGl0aTa5f9w19dvjLjq4f1ft3euSUTM3vrj3wXeTbltV989Pf1tRfa9lQ//JfTZTxx+6dvv1/fvvV8JRO7Pq7ZhaRatGSJnQgCWatIfIHUQ4l2EiNYmeHAnQ9qGIQSHEQCGCItYEWVhcm4TedfeuXyan3p6OiVf/zxPx9e348WYvHo7CFJpezuP10dXBm1enb40tXju5198bDa5dfs7ItbXzaef+ed9+yR/e57N44qofn98Lh9GkNJYaJNLQunK2yqcVuFTUsdb5qwKzAP/+mkfW3/4B3DrWdhS6QEq5Y3E6uq3JMNsKLcwW1Q9bCdqUJlLdSLcAo8ieJFxLDto0cIxrLGtpdo2ApC8B1ImUPbtb3vmbXtNrOdsbO8Xs/7Xspy48NKgpRl3bXOVaOdHQp9fO5ov/fY2yNrrwXvFb73vg3i29eCD5t20bc4nR9/c2fR+02zecrWu3IE+/To6uTgoPbePeu+evk1nK3u6+ro9he3VuGzvclbL1y7cTI/Kdg9fvSMuHXlaHfnsqvGRdGHYCQIEXsfQADD+75pmv39/RDT1mnYVxJF0XRURbZtPm9r8LlCV5O3RVYf5qEvF+wEv5GI/naUWD5fFwDhHJGbyjcYiiSTSrcPMbMoUpSTqKqKSZbUua3fntr863z3djE4HLr9x1B6KYUHZjnUsCA+Z3JCk7lESqYQpHdrVEnVKDGooGTNk/Lf1aYarMQEQ0QCYRgIIWVXGNuHwBxI2VmaL87+8ZarJlUbgnPybLX8X/7wUmGPf3x3vV+51kcrLrDHxogkLouGEEmN7Q36nqOeOXNzZl/bu3zywFOhoQhsGTHjfBQRoySec65PLagZpsMGoRXeMKIQeYIHApGHRlAAb2POBCqqQpC0AEyjo55f6IkMH9MolYpyekyII7MRCQRR9SBKAlZdFgRDhsKz5stbnrmWU6EJk4e0KiMhV3K5Xo37oPuNqThaC2n69kp10MWDft5wBQ7QFWGtuoFulDxRC9owddAWyIQy6GAgxBSFg0gkCHHK10owjYgGa+yA1tBg6gQoQsxkQyJmNmnJkHQEwort6RgkaxkBJt0+SDqodfLjM1iDpSlUotAQ5JBWGJornSaWYhYW06DnG0CmVN+zEarCsB30b7ncCiTEYI1Fso9ISQyUe4PBOz1/Qc2trRqwJqAIqSOlLXsZOM9okIE1KdAQA+c41MjJEXpwFMkrPNGIaAqbCW6S4QEZmqqtXUmyaM0QAEjUgAyzgVpRQ6keOoaDrSFWuOJixLEKqNhMuMHmyf1bu7at42ZqVpeKdhIb1x7PTDuLc7fxJNCQ/iPqAY1BlSqQY9TJzxJUgHqIRUJwkvqACtAENOPoRguplma8CtVKx2sp1xh5rucBV5u715/86qDaFL/SrltM6Eq8++Xu6y82x7/euX7DuPny4adFu4vpUWnm5U/+b4sQ5sE+qG90v/PWRJa7iyeT0eYgzj3Gy5mjSmyLuPK2LXrbm5aJiSydLwjUFMIiIhKYmGJB3ISgbdNNZ5X61ppUwiBqRGLsFpUZsdpmdbZTzZrj5vmXXzo+W8iZvPPq7y665evfe+Pq0cF/+H//+3CPv/dnf/7zD35gKmumerRfzrm5ezZ/5+bVOx98+IOvPnrzyB6OqytVq+18BysT1rQBNqFt+sudPdsYOy9u8K9q9/Xx8XV7o9k5dOxFSTiQBIGgHHdSrqJCQAE8gptyKcqtVn05sURBIWxSuyFEShxcCUQD0qgBFARdCOT7LgYxLIVlO6l8v6nYGAPR4AorCG27ZmYgLNcnXbfpwkgC9bFxxXQyrovCtq0yLBsDlKORTCYTBhfusgox3QQFABKwXi/73t+7++DsdPXxndtHR5Or1w8++TBM98Y1ja8eHYJvPD25v141Xbu0dbxydd83e23bPnz4FbOr6tnu7mxST7z3IuRDZ60FhJl2d3dCCCnkwBorKgQSEWOGxO3hA0+WcgAoZchw8gLOQHUCjbfePSLJanGwp7qwS95OyQCQL/VMepLMvpChTqfdkiZfwmQDBEA4pYhSbpuJtt33hXo7hCzgXD2U2/whXCGPHOdrL8Ugek59ZtoGIjUs+ZYlVaswqgYwClZYgKGsxKombY0UBpp8w9I6nACocMUIiCJ+NuL//Z9XfXkw47WXAtCey//rXz2bjVFnWvqISCh0AEMrQmSFAUGCVCxBuCs0nPzRe68u7/cEYsfCqhYslCKsNNFtW6XA2oi2IE/Uka5UO0VrZBM1ptLbAz3UM4JhiUiRS6IaFJHSJhhqtkhpLl8DhpmX9JLlWPn/jsMP1xAHSPLWB9AVPfmlOriTk/benVi9ZGUl6kUrcG3gOZLh8VOu2rAsAENWoJBWLl0+whNDc8IU1IEaQ62ihbaKDdADm1R9ARHiHgBpTHZ9RIEBUAQJZ/PklJggAERjIvrmuTbxpEitMUNpHDKoodu3rL8VbJ+e5lxicyWGDNPmMNFSrmRpIZw9s0QzaXB48EFkDFMgDBGZaft8/nhn9v4gUY5JXztIjQlM7Jw7H9+3vLA8KA9g1fnpoHwACFuiBw3fOn/QQ7ucuhCJmZK51SaliT8tqxSJraZgCiF477OIOZ11TQprgnICUUADMgYiWAVlw66Ei8EANpMgLWmpXBupBKWi4uB6Oymmtdt3vsN6Qs0Mq13jx3E5seuRnNmVlwU4ABEUSIW5BwXEgL4MtrRcGTGiNdgBflj9MmCIWOFUavKVO41VQ5MlJgszWdC4o9GmL1uMa3r67HQe7aqe36rnvyopmC+ITVud1vtxZZsvVieP9tpTC16wOTDVU1M+sMWxjGkvjNa/6W/d2R1XJ23z9MndWX0VElZV0boacGoDUdWjj12kVtmyWk1AGytpKyQm8gLoSCh43zWN40Y0ip4AbG115+6v7tz98mD/+snJvbZp2vu6O7u2t3d1c/b15T0dVbszt1zM73XH9tcPP6m65dGZfPH/+PczsMKcXno0uXrw0r/9/mjq73z645s7uN4uDj5/etCHxY39sGuKfiVzkDe2CdyUpw3Z3l3f3N+jX3bt0fHD9uOf/tUf//F/c+na1RA6TXoIBQmVWE/LxIljD+fFeZgaVWcqG+LKWE6sHGvJ+/SBAKTed4pQOCPRe9+pqjEk0msEVAwzG0cEy9wGTyTWqgglvbz3m+VqGSVM6lpFVZtxvWsLJkN99IYRetv3q77zSesBDL4YxhAX1aT81rtvM1MM34GIRCDsz88ePmw+m00PGdW1a9cfP7k/G1958uzzrx7++PLOu7vTm9cPXl2tTudnza3ju6NqdvngYDqbjes6LUEXi9XTpyeXLx/QsI7igXCFCxYBeZilbX3Nwfd5H5wOqugFsG44y4OYmIbSu0WiExScih8NLFACKSSd2yTDSBMKM1lrQpCcWZiSeTStoy4U3W3KzVCDcfHXMOKGOCyS88vii7+fHffB6ZaAIJFMBsw5+TMYhdWY6isTWyILGEKRrlJVZhTDIc5wN5iCaIyxHpcf/ObeV8+mswOWMAJ79gJXeFSnm2BN41Us+QiMTBHE9JHVsxomw+zFizC51en6e2/OrlTTZyfBlQatEhARsnIjZIdRdIidmJ7RQTrRTrlleOhGrALsFUG1h3poUPgoogiESBygQjqYjGbv3vRzpgF0zp/3sDLcmpgmFhsjZyAkVyprmDKfq9dKqh/96iljTCtWB/YqQhJUNwwTKjPHHAGtK1ygIAxZ6Wyy75/2tEAMygHSRLTEPdBCNsL5PSqHtPf1AECRKAAxLUnSMCoac4eRUFAa9GwDAIrskCKJaqhD2FEioKbnPxWvRO/fAtE0hBwMjUpS1xLIJPDmwoOWv5lcoD2KKCMVex4kAMPJAW9ld0Mvm98O8l/JL15VYkotMCZvrJUGj9kkRch8L4UmpVzCetMWSVKxT6FPyqmXyzpgQowqGTo2xESibE2KsuQs30qWdpy2SGwtALYsIhJiOknpp8QgmLSeUKSWKdtjpvUgAyA2KWIRzAQGc5pKxSgXgCVURl0wtalmZZg/iutnExtmvKnCmpvTEvMxFtx5biyWgl4kxWSF7MVilNEqnKJWKgCrMIQSPEbOjykAghYI1jRSeRq1OvGh9jTxKDbMHragZh+dbZ59TYtrFfa7ZYF2hM0otnwS1Bs8vltaXvJsVVXtpavHMbQnj2G7A4e4szPvTqw8rumwvXv7dDR/4ZVHdSAvlwLQVrPYt0rQRFZnYmUIkzAEJCSIEtrgrbQvqf20sCuuwknbiHpozzQK7dmmH9+8/n1moXDFHtmjw9e+efRgb/cScbx3/9lrb22enT6+snuFNm1oTt759httv9y9/pCdF2dlf9a6u+2k2rv0ztH41c2dHx29/Nz18mDxzd/U/tjhmu8UnkJrfWPcyh20m8Pul9f634i0y81zx1+dPvrm08/Kne/9xfOIOR2P0lrd0qjYBC6DcEtlC+fJVhiVurHEbdeGwllEYiZrAYiKEKMoo6qItMZw6AXSE7NSSIWEU56bahBOCUUhCoDC2tIVSr7vLBeRYFQEKl2/6nqeTEqyrut9t1mF0DMiYGzBlp11TFREDYBEb9vWKzyphRZs9Oq1566/fCiR2/b/R9a/Ptl1XfmB4G+tvc8+5z7yZiKRBEAAJMGHKIqiHkVLKpWqVA+7LNfDdle33R12z0zMRMf8PzMR82VmYj5MdHQ4OtzT5bK7qizbKrkeLImlkiiKRVEU3wRBEEwAicybN+89Z5+915oPa++bkJshBR8AMm/ee/Zea/3W77H59PCjw8ND9rra3Hnqxldv3Xo8OLp558+awyuPXf3i3oKuPXpwtunv3T+8d/8ulA4ODvYu7JuI0I6Ec5RTtkgg8+sAHjbiOK9iUoKJyMI6CxpZO9W6Kt5CalId9cqFveVamYSfqncBkcW+bWVOWiBoR1BNKTH5869R7hUrkaCCX9fryf5WQLayvaylWcv4Uhyq1dXuwMo/AJS1rqUhOVVix/ZjK3zNZgnOexVWZUIAGOqVGPBQBnnz7SGyJtr4OdILT9ivV+u/fkv2dx/ph9Mxh0BN4t5nEU6QIWcPTpkiZd1bXFyv+6OYWHbGDVJGF1ybaDiTi637xlOPP/hobHzjNKUGxjKHQFXYJJ1ZKTNFIMMyODFAR6WByJgaiEQJlECJkFREJIITlUFFKzQvAOS82ylM3oK/MteZEVUhyyq2QrYAnETMImumIJKEhODzGjc/6tkv6JilSa5h9MLBU8DImyasNYXMIzNJImoYG3R7k7xJbk0qpJGpFx0kRfAIbCBJMAIpKZLCvK3BNMICF5FURzJP8aLDoToaiqnXSkw9FU6SqCLnLXJLVc7+8BNlCbdFUWvEwv+dsWNK2fnzcGuU0lkPFDOxrXzBXBpAOxL2jQqxkR/6o2XeLi+lbgbL35xzKiKSk4iveZx1gFdRzSmreVMSnR8YgUHqlQpNID5XJsJ+PkIhOxTxoVbEyznOqoA65pwzzIAvC4HGnKDqvZeURZTZe09ETusag5nMtkbELEcs3MOVCCZyTE7hRRmuLGU5uOwSAlzwNGWa4vjTmz/4T/9WlrcvTIdpOt3ls12sF7Jq1kmWwBHwAFrczUHJsg1EBeSRQ5YgPHUcWEh4xhDQDqhDDK2Ak6KXbkWzJfbWfmfl56fj9Ay7GwTWcVfPZrSeNat9OTnjFRAbymNs+n6uic5yt3I7KbTrnXnfdtHP06dvyNHZdO5Tc5eOfujTzn3c9XHw95b3+yfu/zzRrJPdnNAu4/CAp3HlcCp0gnwk8bDv70esSM4gg4axi6sUnLu0t/jok9FxFDkzQxRPTZIjduFrv/R7H908GeNJN9nbn137u1feuLh/7Y1Xb7fd7OrVz73zg3euXX0+SXAicnzv0xPsPf6ll/7zv7l4CfKIn+WzOINsPtiLJ88cPHX1+pPLw5/g2vTz1z6z/vilLHqyuRZX0Z8N++uT3eHehc2tnXyaT1k3z7x3sz96521Obr3keC+CBKnUBhIjpep8sRzBPbpe26hh4oZOe6+5hyaI941POaWU264VySqAZrFyKMJOhASUGhARi2ZVdVx0hYIcvGNwHJPznpmZp9A4jpmcHSBmaJK4WqXFwsXNKsZBReAcIMOg4iWrC0EJ0Axy4hlAq6AsY8q6Xo92RL3zj117hhRjSqLSj6udPQzDeu/SmvtwdO94Ex98fPvj55975tLBJUBF8/H9o49vvcPUIwsBnv2YU4ZCEoHJOSHxttdio59w4dVQcsLMbCQjgopkESGGwrHZUJNznnOOrAz1qud2tcSFxWlU6XoBmFMmV8IkAdiqLVWhpmvZphRWryADWQ1Ys1Nb9I51NKhkZgW21r3l6tMKfRsKJ1lVs/fOkTOmGUrSLTRDMxOxGi2SAxAsjpTgiYKAoI4RCF4JvoEEoBFRkHcDcsssnOGgA8+m8sev3Fnvz3dy3HGzS92aor59D810Oq5Pk5tBN5wgDLC7szxRbTvPkjY7frpeC2JIU8Lq+J/8/lNx6VKk4IeRHRXjRIYi5QSBY0cgjcqZJQlGbdBoL5qSWvAC90A0qJlUwIlYFLYozdBUzK8IakIoK8TF8Pd8j1+Afi2IJsiS6l0pEhrZMjKJRY1t2rNvPjk8vL1Oe1OXliN3EAcZhBuK3LjuVrMT02loPSeXnSZRdanpjieZPY0bnwISNAoSXCSjb+sIIBIiWFQHzT0AoQyMzDlhhAo7M0IxeEbKE2XEX4J3rrLoVUW5Zv2g8pi2jpslDRBUcJ+yU2WW8jQasaBUTLadKJklM4pBunEcpPzS1qXSYTsrF1sYKKBUch4riq21LbIp2TKcQOYgQiDPXkUSki1cXN00s2OIAYFb0dE5bGQuWbbrLuZcVrbpYfZWOVeiYGc2GiYQLl5pPjQlmhOSU2JoSjmJWvJpShEI3jsQQA4CFS6h38RSdj8MVUlS8DJiteengTpQcORHbpinXkLiiQzcf+dP/iisbj865Ymup7SZYT3X026IsgSdMI4FS9DAEEWEpoQEyqw5U+fIMwVCBFrhhpFL4KICvunPeOeUd9cSVpivsTiT+RmmS0zPaJIEl/h4R0/mdH9f1rt6P8Xjn/K+m1/BzkJOETfiVSBN5yeyidg4HN+SO3c8PNbghbr4/vxgX+fT1PencT1100l75cP7fHqYPj1bn6ZOdtpTt3d6P+IIfMLXpo8+/tj+g4+O/aRpqV3dXyKtqJfbt94aVi6OTRybLKNvmLDmJj5y4fpy+f5k98EOXX/jjVfWw71nnnlqvV4N6c1H9p513cdPPPHEmI7PVvH2/ZMXvvKlj48+xPw7z31BxXnZFQ5tF+CvhT3+SJY/37n87JOTJ6bL93NcT45Dv/7J5eHNgzTZobWmE9ePutHYN3yy9/aH09vvfJj7NXeLJy99IX7qlXKrxM72Q7MccmgSWkybvsfpSkJHaMau46nPOarKEJNIA6bGNyV0E8a6YOeM+Gc+UykOEcg2SYpkQLlpmF1KKdsylcp9DoLznhyPcQyOU8qhCyml5fEJziNtidns8DWlpCLMjomYPIjMjaewDkGjZOviY4ymCgghdO1iNlkQcUrX1uvhbH28N320mbjvv/yjL33py1euPXa2Wu3tX9/bu358cvfo7PDk5vLi3v58Og0+kPoxp3EcAIqUPBElBTGYhBIkezTZxiECoCnZlsubKQcysUIkqjrvWoiKjqIFeC6LWhsYdIsRSwW6C0BXPKKr4LhcLNu+n0iLNsjwNFdHCdTfWC7AyqTRh9e8KMi5bgtz+aM2alTlFJG1DgyAHStY4Zga0UZzA2oddwpHZg3PrKzwltGivecu+BSysJJo43zx98n5wq68c+vwp+tusR+Gfp3TeHeEDI0gDHEUdCqZNDqnpMuk7Jq2iylKz+yP0zKE0Aif3Ox/5+t7j0wnZ0cxsI/cBqQMp964a1sZswJAghbYjVK23NDILCKxInHKlOzzIAgxXHk/SDWV0UiBylkFPB7e+uFhvRmbhKuk/tjcZfpayuWdpiSjzhfh50dLiIKjZpYNUUMOARoh1M2PdBnYQeCFiaEE8Zmavh2TNEJMQBYMhAREaFJN6TytTROQyFnTIEZ4BtRCBEAixVwTxW2GapgztrwpGIojKpXvTSpaQ/ZMegti9sySJUku9dTaSuMZlTUw6iLFNDllR2vvcfHtUNVswzeXRqd6T6K8HiMel8i/optCKfzO2a5XtCae2TxOTA5ea/hEPTtAQZwr6v4QHMXOVIJqj721vwUI04fUAPbSKncKei5isgamrCTKVomYSVXGUZi5CUFt2GUy3bAh4aJC5JkZYBGCgtnZEkCVHXOGEJg8JRmZiTySRA4MR9//87+VfrUzn3A+dUgeiZFYTT5Vugs2knM0SxUCq9FsBcLsyBNYzXpLWEryoQdIs1IG1pgu+cIyT1ey06NbyjS66R6OO/RTOmsldnw2QXJjGM8ejPfj9MJTPLnoBkGfNaEfYqIUHLhX9AmUiVvuPRr18+t7ly7l+w/c/Y/j/Z9ff/r6N/7BVz86Go97d7jhw767dTa/vZ/S3dR/0p8+uJ9jnyhN2+l0Z3b98ef8yPFBDOov7k+aFm1Lfb8+Ob6X0pFvdH02Lk8/mYaDfpM+/4Uvg3Dn8N58Pnvi8b93evYgHf98ffzJlSufg4LEvfnGa5cev/rxzdt+lzGJyMQe3Q7HlfOL4cJ0dXb8N7uTqz6u0+Hh41dudAuW+A7u3cU9jBNJPSCz++vLd98OH7zznspKEb72xd989ODqsj8JTZNP/XQ2nXV+oEFHpB40iPeJs3be9dIETh4bn1LxtcmS29A59gr1JgZUy7WCSCLxdua6rlOVrNmsHCWLSBZJYBd8SCnWyHrPjlNOksR7z+amqnDMChHREEIckx0M75jZVVkdRAU5sW9EoTlZHAK2G1EATI1riDhJSmnN7Blg7qZTN5lNcs6PP7H36COPH9775O6nhyenNyf7r+7OD9hdbYa909V69WDdtdPdvW5nMZ10e8FPBDGLiDFTSDwYCaJNJmYXRcrlVaJSRezfJGfv/SR0m6FPeRRF8cYCtvtgrdZZ1efE7vKyMC5hTTCbgXLCt3SrrZSiMDPt2mGFWtxg2VhvB+WHYb/zIkzYcrsBlOa7rqa0anzLF4FX9ib8VQ3QINkzt0QTRRAGAigQe2hD6hUN4CFBAlNyAtLMJb1A2Oc8/tX7afcp361SZt6sc58dNdl1Lg0JmLBGplbJk6aGJKWztcycH0mEPUPwYLn6B1++8OXnHj29L0xeXCXHct6G87BJuO2SFkgUIJNm1VE1A1EhOfce2UhJCoEmMsVRCfEV1awKIjHf/DKsWTPD4CLgVlRN68PXc327VSUrDE0d66TMwhDp37/zIIR90Q0pO7ToIZG1bRwfU78U8UQxk2cvmkihDRq3bsY+sWdmlagkhASIkCawpSxkQMQMvMoAmu1nAZkLtNr2zLjIZo+SpeIm9mMQHEo6rRU2038Ydcl+tiQCC8FWVMtlk80UDfV5pbJ3ht3Wk1VEzMqNHWnedp+13NojaRPw+TNaVu4Z4rZljyhL1izKZgjMXFoKAQDZusCWl274eXHjKpgQofpBi5hLa6rfVFhrA7GFkQrQXT9hUWxDorUYXpZYrPNewgS89dmkghCKCisTOYXtnupcLgI4Aik7mAJCAOQMhS+7LR9cYiUSH9BMm5df/ek7b//08bkb4xk5JQizsgggEKHCXWMoMUjZuGoEIZEsksmRZFswFnoKOXOxABgJTsCZGiHOcL2brTFfS9vzpNM+oA8YvAxz7hsdfB60J4zE/ToNn2AK9buUPM4iOfFe0xAZE+KZplORmPreO+T7J67dm853w5yOlzJm3fXMly9ekvaq7NxL8+vSHeWd5bEbbvdnN8+Ge0M/7U8PVx/evvXhcId77nI3LPsH9+88+uj+wcHupUsXGs9Nt3Nhd3f/Ipx/QiSpOslOMGrmYQB7lsT9RpxrxjHFvfZRT6t0Ml/sSv+P1+OKJxTjoKvx3p0zXsoxn9yPn+6On3w8+/CpsHyBce+N1/YvXY3yNVndQbqHlfQns6Nbs9sfLNcfvO+V1jE9/dlfevozz5ze+WB6cCmxOhd+9MNXTvnOr3zrKzuLvdDNB4ZrEssgeZVkwj6JimemlJL3DQE5RXBi5xVwzouoc5Q1qYCJU06q2vgGgKZBy04EzOza1vmGiSjCMVQ0STY9nO0gu9A6587O1jFG3/jpZJJz9oKcE7MPIdjLKAYxIBHknEUKz6UOjVq9LDSj6IG8n4gMYJdkjRzYOWbJI9q2feLxJyXjynAxjpeG8YNufitN3mx391K81Pfp8PDs8M5p1x3PF+18MZnPdhr2cKTIQxqZHXswRlW26iuiTJ4YqpkJ/ZgXO+369Pj2raOr154QqFK2U2SYs2KrGC1HuIQ6lF78nHlFWqZm+89cBblFy08EkHM1clSIWQDejrwlhsguPzmvtRX602J4VKZw8x4R4oZMHVK8NawSO6gHGoBVG1HPPAFa1aAdxAsFQgcKRAEUIA5NgPgcWVNr/hN5ujONGOct/eDVT47mWDAPObOHg4KzjAQZCaJjoFHAbdLkMResPbX78/XReoowIrayufcHX7n4wmcfOVrfZzRAAMB5E9Vslsquvb4P9i/JO4hGkQwdiZJoAhQ0KrJqgklgkUmTCcegafshKZlU2zI7z6NptdIASqDe1sbQ3nbodtiyxxRFaC2i2Tc+DqeHyzF0KlgTBVXxvmVPm820ae/pWdLeKzOcwAllN+ZhHlqcOV2PFJiZ00apEKwESBbkZMEPbFN/MfMShYCKmIqp2jxohtERivckbx/LLSNA1Z6xCghYmVCFwptFrSJnUQgpC5s06BdIgVo07PTQ/thgaNaSj4CqiVPVwmCi6v4mFdPnSnBwzCpKipSz6ZScY1Bx7QBzTuemIjln856zJrZo3/lhtKmsgYouCOVwqW51ClR22mUfDBPN16ertrpWpc9/dAsPozoeg5gcEdnDJ5mZrXswUFpVU05EzrEX9QCV+FQpPEjzrzU8vgapaBO86+iv//LHf/vO9/YPAnQzYfU6eoYTZdKGktn0QIwQRppZxSwFGGTEPOVAymWHkJOQrymiXPET8kKsykok5CO6NeYsMkGcIHYYpjwE3QSKGhVrdanTmOLxfZ8H3n1KfKA4KBry5INAna5d6qPGjg8uSWasZs1RyG3D84ODS2E+QbP+ZMI7GXP2sWt4Vzy3zk33pYMuBMcqxyrLPJ7l8UGflhlndHp3eXbpYhxO7x4e3rl9kzjtLNr5Ttd1wXmaTNr5dCJC5I351sToY5TQXIDqtN2Z7Uwy+8vzG9H3V/YazHKz09K+9/uaD5jn2i42s3zngj6YxyPf39w5fCu89yCyHN6P3fJqv9x78PHRg1ur/oP3sFqDsVnH/Us3vvq131h/cnd1+IlGzG/cuHd09Nor3+O99Wy/czcPD54NBzdu+HwQdl7Y6Vp2fr3mKbPPIs77LKMoA05Z4npjzAFAnPM5RyWxSANijnGwS4+Z1RzDCs9HlRCaYBxhT42qhrZ1oJSSiIqMIQTnOKus+w2Du65lntoR6vu+j0NomknXOedTSuMYxzE1jSeilLJzTiv0pIDzbEbnKan3XoWZHSuP6UzIe0c5p5xZwU3XtpPPz/ILSeImHia5E8LtyfRuWjQx7m76nXv3p8dHm3a6nE6bndl0vrM37aYClTRadmqSzEzsISKOiUBDjFcvLd58482XX35ZEr74pfVzX3hhtRqdb6mSpre9e72WC1yMh2BM1W0MqvXvUndUVQlCJhepdFOi0vdsIVEbta2slin2F4ez7RatLKZNcMYqmcgTuaJ1IBYhwEMcswe8aEPoiKfkgrSCDtwwJuCW0ao2Si04kDriRrIDN1mSTBez4+O7KUb42Q/vLSf7V4bNOql3bfYeibM7YwmiZrGZGWCGZ4bGuLfTHSzip+tlPsuTZviXv/vc/v7e0dnSk4DbTIOKemYQszgzMULpLKjWgAyBagISe4KODklVmJPpc2r6r1lcCVSqlZPqQz7fQB0FK1AArQ4rdP676o27/RxgdcMaRhERjRPyJycP+oTWScyRSUBeXPLOQ1M3Oe4m3K+iTSkMJrBGmc/ndMa6yR4saVBNoKwqTLnolVGKMQFKmajEIZENwVYXrKhpxSVhCdmUy5BbqseW/H/+g2mJ07Y3lV3x07CcXaMccHXteLgBsodz24xs1UxFWXC+1rXO1AyzuOgLqBwaVaBYoFjqp020AtjeWcqLzWK59gYdi4h5faAeMaUSB147qdJdoWQDl5KL4h9fdsNc0KVqkFb3tOenaZtlUpAnxda5urYu5SeQ0n9YOLH5AuWcJalzjgiuhJyWtwpgghdbyTIRKRxEpGEW6F9990d/98EPZlcCSc9IisGTNJIapEDZkVY0ByQkSTjXlYAIWffoGarkQZ7FBPwsWcRxUydgr8SJGqEg3IqyqAM3E5xOZd2hn2Do0Acd5m4lPfFAtIm5JyRPw0pWb+bJo5O9S+InMUafoZo7ni8W1D3KaRK43fEOYfM2uDs729nofMU+Xz7Zm6kXFuUUnQADyIF6N3OBxKt45Sk5cQGzzJJZps2UDq6EIENcjnGtskoprfujOAza57PVyX3Fhb2FgB2HtgtN2PGNEx0Jfh1X0o+umfWyoc73OHMp5P5MB/GxpT7LAbs+HbjonHTCi7C4eOPpxYFbHb69vH3r9uHts5u3x8Oelgwe2M1G4knovv7Lv72+e9S/8/N27wBdA80/+Ku/TusHj1x99P7h7f3pweqTvXdv/uTg2Ys7T4RXbt7xu1+48Mw34+a+L7ZrpCCJKXlmQGMcvHfMlNIgmpmQUiQiFXLMglyOmiV7VekGwylJssUwMREZjme5JbXSGHUGnW+YXc4pZzvq2jjXeH+ukyHy3lkroKrMLo59yuK9Y3Y5Z4gwM3MWcfaEi0gI7TiMSaPzQTSpapSGdKMUXfCzcKXVx0dJSdcx3kvpk93F0Tp+erYO/dlOjNO7d4+mk3vTmdvfX3g37cLCjnESSSkxu3HMTLiwt3jllVdf/fEr//yf/Td/+Id/tP/IQU7cNG3W6NDUo4l6F5zjdOWmqG245TdsbwdbOtagJDV0uvBtoUWjyTVh3RQdWkJZpWSVP1x4qVRthdqlU2AnI7EUI3g1ySsRkVdtgEbFgzxzq+jIBXSwQiyN8IQxAQLQAhOgIW0oO+W2VfShC8Pq/g//8H+MOS4/84+6gyfTapw0nqd8dtI/uhdufiDUQUroqRDAQ/AkktW17ZFu7t7xw4a/dg2/8Ss3snPHJ/dcE4S85uQ5KCQNIzsPZgPzQCVhphRKpJRHZq2GI5nLOjYREljqlFMWpfXSItVcCDiKqjUz2/5tP/RwmdlqWG3Lb59lrcmF61TyABynwwcnEdJxTzmrjkxtypDUAcmFIxIAPZx9TUfsVM92Og+cQpZNE0RBbDizrTTE9Mow6y5JJcsOQC3PREIFhpUiQdM6k2I7uJLtWesO46H/236DAGtSspiJ0/ljpYCZYGgdomE7YSv5qBt0qp6O5JiLtZgqsRG+bDhkqFBF9rdFX+tmlYgaDqMk7z2IVe20q6hJBKUEX1a1mOnwC8FYoDmLMrZhv8WrVe3IGfHdqp+1vyIAZSKjTTtArXd+aClMxSqNSnJjTf8tbYq1YsLqyNypt29IWULVTEYVSUDDngkMOBWnKuwbgFMaPTtyIM9N47/z59/7u1s/XlydZhImdi5yEtZMlBjCEIgWKXtpMtnARMn2X6SYf5ffSWACQ6BgTVLkfGCf0ABOwGI1mYhUJxzb3Le6bmndoZ9x5DP2kZGz9Oqzh/g8iMY1P7jj7p10+7PptBlTGuMwlZOpX6WPhyFmdKHpuNvz88Wi40/73Vn0n/XrvcVuGNPxPAvQRMSQVlM/j7PFuMoawB1LUu5cjuK61iVrcSRjHcLMe04jhdBPp5fAOcZBcp9S7GPvXUcYj48iOPkmECXnibEjqR96zb3wTFye5HEtHh2HNB14Fvzajx0iXNS8SkMn+dinqW8uXXo035i+ff+t5nLcrJYnwzH1U6GeNLz41d+bt+0nr/+kc769cnF66dEf/M0PDm+9Nr0Ulsuje31ayepRf725+tjF6ZP3H/zx81+il19rlrcf+2iVPCErQdVsYClnuKKoywDV0FwR0YJEGWEBykRiakxR0zGY5TCbF3XZOalv3DimWn21DWGU7L1Hyd+2I8YhBCJyzqNCRiGEegDJavl8PnfOE3PJUEsiklPKcYx2OAVujBpap+DCUwUzItQ7aiQ6xfoMm9BwQBOm18k9IRLnwyl2zpabQ48O2t7+9NO79+XeyfFisdf6ezvT2Xy+E3zr/VREptNJHMZXfvQ3y9MH/+Jf/vd/8iff3r989erj15enayixC7Whpf+y9Na7r3Bh6kBKjrWYAgIl9FC0WP2j3G1k1EzVGnNWCkDdnhHJ9n74hb8VDth2auP6QrjkDBrlV5ngiT0okDrVAPVEE0IrjWACnjGmQAuZCneMKdCRdtCg8Np0jhxJ56dT97f/9ofdlekS0+NHL/vxdJzkdMYYkCTcXo60r7JWXkkZf5QhLFEVyedmzNTGB//1Lx989tn9B8uYUt81LVQjg32vOTEF7zTlTRbvmARkBoEPDf3Zs8J8hyTZ+2gZK5oF0BKyXAbfrY/3FlMuhZSLKwMEpEJF+8sAzHNSt59n/b5SpqRCSJdyOAjscXQ2CnvR6JynnDH26r3o6MP9hLtpHUAq8AyvMoJHaL/bzVU2TEPjARXSpCq6LbQlKkoBgWaFWDQ82P6hdAfMDmWno0wkYsR+3e47YRpgouoIrqWEaCnB1n5INjNoSxuymc4S4YxGWThKBHOlgSKLZKP4EbFVbFE1W/JSAgt3CtBk1Kz6Tup27UKOoUSixGBlQiGTgUyRXNcOBGNfkNrK2WiUZDsfs38U0Ux5u2soTFG1wlrY3zYu149QiqyZLRZQt2aW278IrKz/5YETLUszZkjhORfPEyIRJTjnfUW8iLmoFsy6E+RFBCztpB0lMXOYuJde/tFrb/1o8cR8VCiYdE1CnhPLyGK0+LTFdMpmWYFMWht9+xGVmJwIC0iZnLCqCIEEGVCQChjswE7F2+8ToEFsEAMNHfUBMaAPWGOATz6NEKGgPsesg3fjhCPOTpehP512OqyPV4MscwcZWQOaDp0IxzuBfSfR99P5Bp+cPv4IuivPzhmagvCmw2rH+42cdbyRSScbwQDuIUjcutwnQBkOLJKEvTL71rWqmZlSiuxCHCS07CN57wCvmtMoYzSLYk+SPXcCylkEommtG24WYXiwZo9M3VQxnfueUoRG1l6zSl7ntMPuypV9evbZ/sJjt/OHy/tvBMh6iM995tcef/Kp+++93SG31x+dPvrYm2+888aPX3KzDN25cun6mZw8OLkbP3hnEffXj+5d+9yvvfnh+nOPffUvfvrK5ed/2wtG73xK4hwzs2ZJafTee+9zjsww+xcPWHiJZyeSwcxMmhJVwoUVBecoSZas3jsIHHvLPxARgnp2KSXnOKfsHBs50MHjfOVp/+C2xwZbPqpp+RRZCpmJBMzsnaaElJLzDoBvnW86KGtKKZMLDPWifRohGn3jPXuRlHUkTZwcO/h2Idg/mDyx2SybgMe77nR1b7M+2xx/crTWs90Lqp+0XQeCZFktTw8PD6H0+GNX/92/++Nr165/9WtfXy3XNg1Q1opFUr2jDV5jsPkbnP+iqYHtP1Ex4DNnC9LzsYO3HMuyJ9aHzzvKCst25aUuFKhbz/vxSpe2q8tYoDZnWNaCKXrhAa9oBJ7QAQFTRSfUMe8gT4QmRHOiCXQCnSsFoo4okPjY5bg7i++9+ebN43f4xsG9Iyz2pB/ErTk54V59gxgcr2Jm3yoxU88iQhDOkhrulsfyxPz4n/7OZ/2u3F72LQszpTwKpTAGdZxdyppsmIGLrA0A1VSRQJtySnFliBIcmT8zFCZer1ITG0NIARaREnKBQkwrliTbt4+Jmcn8sar/yvb3P/SXFLsj4rK6o6yqDaflau39AuiHnruG2YdEWbL6cJ84O6+KHLM0nODhmRlpGlrknnnw7ASiMtRiqVRaASlAek6w2gwDTA0yMf9nex4SKbbMqi1br/KMKtlIUayjzBh5C1MTee+K3Ie9Hb5qzFq+pkCQC7hr1z3Oke36jhFEMqpId1twRUoTuIW+7NURIeXEIC4JKyW6m0tCKLaDb/mPlotiubwwLpRo9bMz6Nm+gZSNr9a9LVkKHG/FS8xZJVl8GdvSiR7i20vF/Or7xef4wnZFUfZlAmJ473OSnJP3nuDImM/lPdEsiZi8b40Kx0wgHnNCg9C4l//mb//mjR8tru6kUXRKTcsNT3Q8FucTcwJnYSUITEho21yFJxA0gxtAgGwiSFJHHJy12eTBvlGfORA8hDQKJ24STwUhic/sRZyn1CB55AapRR+0pyRJc94InzWcvEShtVKGxI0MpIr1Og4rT3xhfzosQjy624QwoKGTPs06P+N4fNqz28kh6yfrt77/xo2rlxePyADHvltgKojJc8rp1At3TD3QEYvTEdbXml0uc8jj2tb5FgTJFELTGFo4DW0aBxn9fD4V8XGAgofovQ9JwDJRHn0DaRJ3XhGdnwRPcJwlrU5j6jc+RHYShs3QpTidrHI/CatLl+cxLDDgwa3juydvXb3x7Be+8sLRa+9qf0bXLi4ev377/uGrP/xTdqud3avr5uzowc2rT9+QPJ6ko3h0q083Dq5845lL+8fJf/PK858OU89IhdZraxQTyGPMKdlhECizseSzkmRWZzOxEjlmrdWkonDOvA/FLjy1k+ULOYPIsSicM2uZOo4VN0cAyGV0NiTLQu5rww5kKDE7lSxVre/YN8E5tvRfzSI0sneuYde0tpnW5IHkiEmgkmAzExv3W0gESFGlbYJkN2t3FrPHwEmk7/tlTnRyfNL3fYrJh3D16pWnn3l6TKNIfurZZy9c2HvwYEXMDk6sHdZzjIsdM1ghmiVHURHnPTFLIVkAgMmkH64ilsFmYh6pjve1eaetcSQUTC5JMs8fR0VJTJbcUN6xuh+um7JKX2I18YE6pgYUCI2CVRpGgHjiRhtoAKaEHchUaA7MCXNgAp4Tz8BBOh68DMENHeIknaRbrz4xlzVuL+ZhHG8ew8fOr6cXx2NJnBwLey+rFBM8kR/ZzyUxgGZz6+yXHsO3/tEL/arvl67xGwUoN4rI0ORGgJAdleLGpAyqHD1z/rd6aaOnAiiPNFHKomyJ9DX/1lQoKGzaMvpVnyWgLCZLhGxdWloMMue6vz9vhmziEVVkqCMIE6skYjgHYmELSkvpmcvTO/dTVBZkUuVwjyWp9iDx5InUwQ1pnDVh4bphPG06cBPTkLhhlSSSkbONg4aYECs52spwq3gomxpGJTMr4ERyNjYQwMooLqrGr98uuEl1+9DUrac9nKgPlEjhBxYkpsyRpEzO3kHUIbWMhlzkN1Zrrb4mLeYwcJ7LF95S0CtzqujBqk8IqLjXlQ6pgkuGgps1pScHou06tjpg2jXi6qesdidwmd2LHQ1t/dy3BtFMZoNtTTUzC3JO4rlGoZHC3GVFyrEsYaGoxdiaoAIhMCCSDQ9gdlBSSVDvnIqkHMcsI3uGa6FMyJMuvPKjn778wx/PrrY5qWSlCCSOoYnUJcQknaDL2o45iA9oemkzJ9aNai8QJiIoIykUyKIyUkdoSIOgI2qhrNoxdaydbrQ9y2EFt4LvuRu5G6WNmHSyTipZLSs7iAYhphwb7TLGlEakDpI4Q0Wdekmpa5y4tqfkBz9vsX85SkbM8vQlN1soghwn/ujTfNpDu9ntd4/f+MkHv/zbX93rAEGCjE5WEhsd2vlCRqSzzJ1DAgXlQAikEc47GcX5IIgeUGXihqB5HBlQB2bnmznUqbKqI6c5uSyJkabd3LETbpNP2pgKK7AD2Ie2cVOnU0mhS2Nej5u137mbNrpS+KYNTDMEyOUbu+/9PHTLiy/+2m/qasz+VKdx8cy1FevLr387dqsvfOObF6899vr7P/C7/Z31rYtPH2w2/RrHd88Oj3tdZlnTdOCQ2z1LPDbpnvrGm8A3Z8EWFVKIsBEsmZyK2mqArSs0KkO1hrPOZFtRHJutf2k5TXhTdypaBTaKSjk1o9RqPVNs5Mp1abeDqMWr+ZKTy4C0XeuMkZOqJh6wxQagkrMC7NkVRWX1g5VtxjdBlZiTJCAJhjFCFc6FSXeVOO3uPcLMNouDnQhE4SDDGI9Pzgz9Tjk7dkQ1WqHixoWYqmAmLaPUecEtlwHOaVnlHS07ROWH/6OWbJaC+ZEtAvg8R0kKjmUcDlT+UAWxy9xknTsDQo6IAU9FBtgQeaLGU4uGMYFOgDnpDHkqPAUvOE+FFggLnsrplPogm45Sl8+avN4L+MHhG1d918sktbQ6/ut298VTbTlGt39wvPbDOjYEFzl1iV2DODgKqwFY3f/9r196/oVHTk96VU9NTzl4DEISHbwokB6CmaXahwFQcxHOFjSBYndsBB/L9BXJzMrkhBTm+Afj0wOi9ibj/C/acqkcs+g5Qk1WH4gcXGWvi/GEWJmIqtbUNsrbxxAypllAzkPg5re/vP+vv3NzqQiOoWvv16pRVVQTcQsAmofxbG/yaNNyv46NZ1E4LwoGkmMLTBTVnCWb5tVQkvre2A8BBVSSBdBy/cvOl1huZYXQtQ7CxhIoimY7xcU62d6P7QL0oU2tzZ9a/vlcRgitS8iyYEKBuIBqFW6rYa1ZzLr9gLe6usrVVojJu5iowsJmXSdELCW4Scu2tfa+9rPp9ms/hHBscZItHl7hY+tFhNgaXrKkbzKPNKg5kyjBsVNolqrO14ox1dOF2imTme3BGAUMhgqcgYiqzI3znuCzuaBm0iSikQmTZvLKD1576fXvz/Y6EUjMPJJGjWcjPHXwSZoI16MZ0DToOmy6doOWMCpNCBGGpEHEJmCbEzUAjSAALSGAnKIj7WLyzQC/ET4bZSVp7WnDTY9uEB8K/9Pe1AwIUWTno4xQk1ElqLCGEeywgSBiMo7xkUafvjy0nD98b/j4bjM/wFl0+mlGwzx3ExfVhdk0b4K+8cN3b3zuMwef2TmNE+/mrcaOxinnicowC3zGmoBIGgQNxAl50qhMXjB6dkwBagp4dQolzimmnNjAMO6c8433ObnQqCkpHQdyE7QqXpMbk0t2W+Y+y1q4Y+46P7/YgFncJASXw6a/cz+e7rS9z97vh+e+8fyLL/7ajBebe6n9yiUkavcvv/Tdf7NKN7/yO7//3HNf/ODWRytZPX7toF8dno6rS1cufrhcfnx09mTv8nS+TmElzRnYFws0I8KyE1UVMVt2V8V8D19PKioQIjv6ZaMpkP/deStl1Vwez2UPVlWr0zKqHsFwWq6J3KUylpFSH6Zo1rXx9msa41+Y2XufcpHCEVjVkFkSyX3fB9/YXtn+lGMmppQ0Z5M2NsTGnxFyZq0uQ1orxLGXrOX1cR7z6JxjgMDOORCllLxzoC3BCtvXp9a7u3LjcYkMP7/oUeVIlS8K2z+JpZdTuRe4ig1NbVi5LWq7JpQzX1PjRFEc37diTtQ7aCv8NTGPR7W8U/Ugz2i18WihQTFR6ghz4gWwgMyFFgjTPM/HCz2Z8mbOvZe4cOupj4jH//J3nnvp5Z+8e/P1+fzqgQ8ffPDBpWf+q4+H1Es8k+kw3UOCSFJIPEthNrm/PHvUxT/4589eaHeOPjn1riHe2H1lHyiZ7zIVc7GisqYMVD/D4rG9JcFUkFns4hGihLoHBcz9j5wzFSZtPZ6qPkfV/KChnou0095c47ArabX8LHCFGokWwmQzGm35tKqAygi/Ow2UV5t04f/9n95j0anzcUzMA7tTKDNUEAEkYWbkvLk428u5Fwzem3NoyoAjKJlmPDOTY2NZSU5piyURshlFMcxLF1rpaPbs2WnVnLX2defHkqAPr0i2JQvISY3ra3+Galtoz6Mx0NT4gtjym1FPa+2VFFmEGOCaCAQQE1v6rZ1a3maK1EmVt1/SvpaUG4mIzKRd7acnsoCkglaUT4hQfM1qphlUy05BpHitb9OWHppeS+GnigiakPoX+5LyBlnmlaEE57LDAqfUe4yhQuwdYOwOdc6pehWMMREJUQCEKbBnkdhNpq/9+K3v/vlfzK/OIKAonEgyWEhHTclN5ouGOW3Oxuyi8ggetAnsOCgHSAfqqTjpmblBVrMgVS/qbY4UNFAHDSI+9dL14F78AN+LGxJFjx48CgSUTQGu2fY7pFD1QpnJicBJJiLV6MYxi1N0NKRnH+8/ezXud56xN3OzPOjh8frozikFEHtMGp5pWECafOEZN907vfmzm5ee/FrHfSfcIzY0sKZG4+Bb7pyslQOJU2GwJ3auIH7ijRHMTCJklENnjsoZMFNCyVmiFQLfBOYgJplhBhheffDis2292TMHFo+UYwpNpqnwZiXDtNkJLmlMLPdU05iGxcFcVjPAd8mL7HWz+Y/+9qV7q49+8x//n65cv/5g/eCUj4Z2fWf98f71y3dW96aL6czvfXy0uXXn/u4zz/ToErcjOm8Jesb8TOMIAjsLLBLvPRFyFjAce9uyZUkNexByJYhyYeFTrSr1GBBU1ciHVNqoLbBFxLWwPmSaWN6UOhEaSqiESss4V03gIep/uXQtUk5FBU5LGSJz5ckl6gxVw0NExOwdA8g5Z1Wk0QZUi4aQLESOAQu4Zac2lzuGeRJYG27sMNtL5Vy4Zrblom2TUTZlSgQpXXS5kmAdM1E1q6rHulCWt+urSoEmAGyzLnMFG8xqpy53pbLbbI8OpodyZYpn5UPRhbwVATO8akjE8EIB2gETyFRoSrzDvOC0k7pJ3NHlLpYX+Hgi6zCeLKbcjfd3fP/j1/5/P/jee124cgnr9YPDzz335QWnD+9/+2D+K3dSexA2LoXTaecBik1L/uT+ydeea/7+5z63PgtHdyL7OemKxIMlQVzo0tg35MRQQUpmq1AAX2jdS5QBDKCtdaIR9Klc3yxQQSZ2qgyVLCogxw68jf4FKaMSatTKaoExqabiaKEHPRQIWQuY2oXLRHZJb1lQqkhZL18Kszc2bRjHpKGjYZMIrff3mDcibXGc0gjAIZCmg9lezBvm0TnJCURmgmrYj6qB41Rgd1fyl20HDNVEQsXfkQsITFVGY8mA5WTWNYUBvecJlbWxLcIhIgv2MQzq/Fki+3ZFDiaqRFx0upJruEdZ6EqtQ8XnRMTcw7aZCnUpvB2Fy6dSv5lu52siKuJarQepgNa07ScKbaLcSFanoapGBiOQlA+WHnJN326LSUFWsGtO2jlGQgRR0xwTmQ1x6Q/UQhtJraMzAZZtwInJjmcGDK6o7JZiVynsEuDhKes4me58cPP297/348Xegfo0puzFpClZR0IiFdIwJ0bq2xFhpHbQOGjTUjvpRAQYM6b2qcCSOAz9UVUEokAwtmVQsKIDui5iGjEXmidajNgZaLaWyVq8AuoKCGqV2BDbYuAK8USAk6SaU4NJTNrR+oufj48fsKT5Rzfnn37k37k3cDzl6DpqRBmcPIkMTmNaLf3qPdl/apT4fn/vmcnBxSafBucDYoPYUERQeAgnYu+nIa2TrJAkM1iSGWtT+fTYGylQlZldKMt4L+JEDPywe9s+ySwysgSrucIuUQZUkugITtxwq5NWBH0aNjocp43StAkXOsHUr507o4Z+eusnbb//3NWnLhzsv/vWW4erW7/93/33+3v7sT9T4jzJu9dm0uaBhvklv0ynmF48W8Xj1AR0a2nPNAzkvfc+jdGucue8iHjHKafgG+8dQCAx7q7tYpmDMws6AoEcmCwV9iHZnIEy5Uibb0aBe8UiVLJmV74aAVTsVc8raz2WlbRLxdDWHHjONzei20gye9zBzGAybNCxk5RVhR3vzBfmxSMiTfCOmGw9qOqcdwzNMedxHMXlxjlXIwrKUMts04dqcWZWkcxMKPwdiEhjcaSMX6i+9TLDQ1jneUA4VZ1IWXZhO8sS2R1FBVwVVRXnOIvZLFSLuTrc2MVjb13JdANEhSxC0ZBqFVUmJtFqMA8z6HGkthX22gFB0RE68AwyJUyBHZVFmnX9Lq/29HhXjy7gJMjJlV1Z37/z/T/7w19+cWf64OaBrBfN4fIMl3cmy3f/sptef3bOP7v1v15dfNVf+twbS13Pro69DzM+OTr69S8t/v7nnv709kbjyCFTjLaSUs2eg6REBIFjzgCpAGTm9dnGXSWGGviLIhjSap+AeuWiJmbUSUq1uAFmSdvGxwDKUphsGWggvql+qktopTk/DFmXP2kDuKBy/W07SkxMQ6ILewchHI8gL361VnHis0qzFDP8QIJa5KSMkM75RTeXPIYgofE5JeehueDKFeFUFZN5WqnY1k7lbY9LrAw1++ba69lCR6QOleegS+k2BCLbbrrC/fkcXT4v0kwMVpE6y0IVknKpfNu3xm4ArhxruzQs6wGoppha7wlUZjJte1D7YamwzAqM5GATd8V7BHBcceDqrlngbIN+7djailyVCwurfFoP40+GgpTICmYQl7yz8pvrKKBAFhD5kmZmJGxUX1K7k7auahDJVLjk9otOITkTEfuGifyYEihNZ/NbH915553DX/6Vb1x/+uprH7z66s9fDTtNjpk7lihIpFEPl+teVvugiDBSm3To0QUaQxghI0fCaDcTHnJ7A4GoBTWEAHilADiVifa8SJgnzCPmvUwHbddoB/jErXEPx3KreIITseSIZDZrIBEVYseNX29kl8evvpiuXp7eeb97883TmzePVv1pZOccTzm0TumsaRrOQpgyBh+SpN7fv83N/PSzXz7a2Q+NnwSZBYotxY5SR2nTePECzoaR+ODRiY4QOgcvBMJsdkb2eZlik4zI0TRMFLQmTSGZ9Xc217g8iIpSIBJiYckqUSkSBsR2KmEk0pwkkh8196yOu9kkhHn/zAvX//zfv7RaLh+9dP2Vn//1cy9+fv/G/mq1TFM0octrimtdXA4n49HB5Yu3H0jklhcXuv3ry8gbhEhdlMarSNt2IuI8p5QbHwCE4LlOYFtncxsRiE3BpsaPMtsfR2wZYCjbOGuNC56lhBoagO1FUF2uitXxlqy4PbbnZ2N7lPUX775C8GXdukUzGJ5qWmABLO2uKFaYaNtQGZXlolEVZIACewmO4phUsm8gmhUZ8AQWEITYO4WqiuHSxippfGPVEUDOUpdt2/HWDmHNLyp9BD0U25uJXL026vABlaxUkAkQkdnoGFBox1hEgaSqzjm2zLgSjmZlfOtsb+ZPRY3p2JvmgcgpmMiZ4E+pYTTwjlpgAm0VrUrHvCAsFHMNbdzl1b7c28NyF8e7cu/KPN774I1Xvve9xxbr47fuvf+9e4uFk/vHC4/04LgNOI39bG9+meXmnb9I6/cv4kqix5e0J8ujZx+Z/9av/reHHw2OvXRwvc/GxGGBENSTJpAr21/bZdQ9lJIxhhIA2XoB2udyDrTURwJkvYbWze92G1rDj7RU6ZrhZ5tENWiatz7cpR485GJRoE4ttcMO//YhNla2F9GmcQcLd3sFH+j6rLl1OvSU99waQhkZqgTPJA4ah7gX9jy369hPZx7m1wSCQEkgxU+ZCMpl4CMleQjiMIH1tgbaQFnzhEhURKoIB/WnoKo1VzNrrpiUlCPL1VwGFRqWrEK6hbWYSeDqzGuGU+UNAgCw2uyebXvEREWvVE3M7d4o2LWWkmsfHxOjSHJVBYWuXGdme6fVZvFCvahIEYoSWjVr2RarmASZTBfNBa8zRops37Iq1a0bNNhYUowPyLhuAJByZhJyjhjOWHpVeSgwwJzqxn2LzsB5EslQRywQpGKZHRqP9fLsR3/z6hM3XpjPJ+MGHBunwkJZOEcRr83ISOglNGh68ZFDROjReQy9BM/ttEuIilTqLunWpVQF4MDwQIBNlvAkodlo6Knr0a019Oh6ChFuLUEzyMNQxEwsag4zVMG4c0qNYxLxV3bxtc/7zuWX/0I+ePN03cfUKE/amCJGSYizbjENDbLm1RrShRCwadqZB42f3owfvH34hetXO2DgGBA77RsMDeImeA7edyxrmFENsxOXXWM3sIgMItYEFdN+Kyoi4pntH4hGoAEEktk7p6zqVFUt4oeJhWQQJCASJ+LEMkrfdj3FlY6h2R3Z9yl5UebQcMPT9fXn915YPvjLP/qTD29NNn589ebJiT/97HPPp82YJbULwUlsL+5zrw9i6Kk7jq1OH1lL1/e8RrOEHCXxp6fL2c58Op1mkbYLYxq9c1vQ2J7C6qRh774wO2YoQ0UMeaESaG8r4HO1DNkjVs86qgzfcK3yqzbcbJFZgB+6P+yqqE2lGnGqLLdsEioKSMvrFIsqIhhgDOfYSldWJSXnnNnpGRNbtbw65vp5QIBRQYUhzI0tGEDl8JuJLLPbiiLM56tQTL0rxjr2ercH3G5F8/FRFRNQgM7hu1ot7I6jcm0WG/ktTTPnbLcXak9TbjHr7QtJpNBHTbmVkwU50XasAEBacgZBjuFQ0nwDPMEJB5IgOQh1oAljBp6mmT5YyMlcjvbo5MAdz9PhvsY//uP/Ma2mePRqUkzOwvFJms5kiNp0Ct9Od5dnH/ykpcuPTC+eyf2+Xy/COmP/BPPf+b1/ev800gScmKOiUT/xsmZJiRRwQg4qxDpWsjsRCVHl31qhsXkFUh4T0vrRwwCXyk5itduOSqdENWCKKiZfoehSvhk+i1TxlpEWAPuktp2iCZnZUeG0mfSLiWzXTNXUm5p25+n92cdLaUK3zNwFuCFziKoNpLxcYhLWKHFvugDAGKfBiSQIMiEwR8nOXhpyncmlLmDrVvIcR7ZSUUZyq5X1zauRRrVLKT915fGecxPqL6VsYA8V4c2WfVmiBbYeMKWPoeILoMUDtQ6kttxB6dSpOD2AmIoQiCpL0UxoSPl8sWv3g60asjKTq3ut8qVqv1D7960Gv5zBLKoQ77x1VUVwj/OTdx4HVdf4omrC50KnwNZMS7dAfZZctl/nzpzEjh25YpVDBCVLUto+mbbsUmUBrLQDCN7/1fd/EJruxpPXH5z0m7P+6O59ijKOwhHawCVOg6Y++Y6TbwaabBDXaIHkKXnKHpmYZvMVUAdfLToIc5rRBtRAG9UG8JTQrDE908lSZ2eY9jTteR5pp9f5IB1WSnOKoUvURQ1RuacmchsRQogUsnYOROyDLN1il7/xhRCcvPTdo4/f6x3JopuvZFhu+ul0duMzz9+4/swUTKvjEPHp6Z2bJ5+s4kayYCPd2Gkb7z5YKkDIAla4BC8GjIJDcBiIvVbFOFX3b6PFeIaHZClU1tpxqulmvcjDMZo5p8E7x1wF0wAyNAOZEOGTg6AEn/S6mXQNx6UOkOz9XqPMskJGgiqvnvnS0+/dfPbw3VtTNz3dnL39yd8MfPblF39ZJE3bvXbzCHb39y5/5jjxaby73PBicS36xVrCSea+6XpM/HQ2Xa/X/TDs7u6CYToqu98K/diuG1QxS114KEDMjlgF4zg6tkmFq9lDgVqY3PYsKyrHAqpKosas5jp5KJGFn3Ap1bRdpuD8sq22+BU5I4CoNMDFz3ZrokrEOafKmcI4Jluj1muKlZULfjSSIqcMYSJHAiaWnEGQtEWHbQlpXgPGqFJRMaGk5Fx8V4GSLAbL9y7HkAlqk6vU0cNuovIu0xY7s0bG9tkWkJPFljnk2NZsRWrqHUuWQuIqhsAMVBmI1suV2LEXQcqJELwvngYKBrySI23ABA9uwC1h6jADpqRTyI7sYLmrDxZyvJCjfb/cGQ+vTddyv//Kjb/3n/7kz5Znlz5z7Ym9R9vvvvzTtae9/QV62nB0PV99hvP60ysHOz8+PJqHJ47TWnv83h98a3rt4sl7QzsPeUgamDuWU1XOjkVTLqaYmiQzfL1Ut8MFjKZH9edTsiIrpk+tg5sKyZY7BDEBG9VRieyZEyYj6heg0a5je4qt3mP7KQHnhtAoQLCIEqmS4y3Xl7yQANZsgYHVyFev741v3D/y3kFYpnBHDWs2HwcKWQeItxKyNz0QcWwBI55TiqqwmGmFppRgbaWRtJHtOdo+0NtCpXqeCVCfLyMSO5Pz2NayaNPr/ltstquLNXuzVArv0X5AEJEii6qehybbclchJJW5L3puPH2OahksbPOu3RSFiGzfoUDfYNbq+FZ+kjLM169SOu9SB5m4IlLlAamjMBFKvmhpQMnyK0PjRerIbM1uUWSBqbQbKefSv3N5PsoXLy4FwmapsYWwpVhmKkAsDJYsYCF4GBHMnhyBFtZqyZdhx10Xfv7mzR//6JVf/tXfTLk/vHcIXd66+UFYdOgleyBAe5CXJrAGbBbTllKDsdEezA17FpZMg7SZm/lixcVqxgQB1ggxtRB2EWFUN2hI8D26Jc2X2FthvtSdU1qc4OIaEywFKyBw6rpBpz3Wa52sYf9rJnNPZ0iR4ZMM6nX9/Jcm08XyL757cusIYQ/ct+uT40Dd3/vSb9z4zPOzxd7cBz06zMHPLy0uyVPP8erm+tbSp90b8/ty9M6dN48e3N6Mspm2m+x7NJl8ohCptXCzHMWNxSvE7MokqXOigGRhhvcs6gkJ8GbfIpKzREAZDRHSOBKT8yEne6ZHGxdNCunUA5CUOAE9IwAB8BhDt/HwJAzLj1RtWpEuyiqlsDe78OxXvnLr9rugYTJtNKZbp6/O7118/vOfXxw0ON15++jBlccf03CBdy+sh8PW78VwkPx+lnb0ezywzyJt589O1/fj5uLFK77xsFUtqwDkuMBM3llpdEqCTN4WOpxzCoEWe1NWnK7HZAc+qSOtxJRyeVHZFm+HQ84iZGwIS26xla89Mob0gADlc1ivINW5+mcRSHISyeyc887YR8wlT75cu41LIsyOgBAaIsrZ6qZsD70U9BbEJCmzJ2KfUioWBI5zzia1MpCZt1OtgokkmcjSIauQvQADyNSzM4qLZKnDdEE9FcJMusXuKipoN5QatwsQkZzNvIaIVMgWUQXnz0mraoKFRIugy4gXpV+xSUQkMzcoocMgeCJnSbxQp3DkgAB0JA3QEc987jKm0vlhqssF9Xs43uezLh1em68fvHXn+9/+m0s7e7/0xOM/eenvnp1cl7j4p9/8J3/6H/6klyjq2POwGIZ37+5c8vduvXuhvXS6HH/txX/w9Df/2VHz+IPl2ExDWotrvbiEsnMnkobdBOhVs0KUPRsKzQxlazsMYJGHYb0yghSQU4sGphaH+vCoWFpGZesQUKTqZfqxyYeIRsmmtrWBj83WAaiOJ/YnoOIM4OSCczhVi81mEJx6VQirDjzvuumUkmPJo2jywAgVzB1iVhGmxNKlxme/P70e02o68dz5uLH+QEZmZ3o2hmoec/LkmZmy9a0o8Kmp+JiKNVppNdQV/sUWEK1aWGIjYasNe8UiUcy7lJihlHMmb9pALqXPjq8vLHEUGXahPBRjWNtAWaHS+r259k9a2Itk/E+FkbU1F4pysbww+ygwnX+KBJA6AMijiKhnR4Q8Zst9MdoEavyg1VSbCmyHYZw+8lQAa2wreimwun2nKsnRKNBWoA1vcrbKIK8qCmXyRteyDSNZMoQYRxoELvxnURV4R2q/WUg0s6OU0AV/+/bRKz9648WvfWUymb73/u3V6eqTm4cYR8qTnEQGdT2xI+kha0VLxFgtpoHGlUQhF0KXk+sFUx6i+BGObVSAgOGc6b00cRfVJ3WR20Q+quu1XdLuiS7OsLPk3aXMT1Ora6EVdPAcWaIMLvToIk968WcjTWky9XG+GMPoJabIuPKof+Lx4da9y/cH8d19TtO+P730+Gde/KVf3997LMV+6COH1F09ON5sXr/z0eGDW9Ksu91m/+nLT73w3BevTx+/+9Rbn75+/3Sk2TxyO2gXdbLOPjasI3SQJiONKqPmMTk4YxYSkWYpTy1EkGElRtUxs3NQSC5xrk3wAAkkNMFCuhpPImCBROha4MGBdQBFSC/q4Zjh0U8nIbBDy5lFqI+bGdOe9ypICTuPf3nxzAcfvvPazIekjkP/+s03Ztcfu3D9+oUnv/Lea28erkaZTfL+oyyP/Oyj+8vFO5/56u8MQ9PHLqj3p8tTkfHSpUvrMzk+PpxM55PpAhQdfDG9cSwqmg34lYTkOcioIJc1zXe6fn3ynX//Z+zD177+a+as6ryzpfEWjq5Lpy3AhWrHWuR9SsZuL7u4X7w5iSvT2mbxAtSqqlFSK+HQyldKGVscm2Dc6MpxpawCJsceApEsuTjUmvp5izHmnFUlxpJCISLmx1PZjeWHKd4lqJSwIrfX7c+6ddIw4aAjribDduzNdOMc96szvd0dUgDqMlzVXga1UtfO226QUmUkO+cKG4XY1Nuor8oRK3m1/lutB6+bYDPC8kBQ9VCvNCGeuimv5zJOaTVBCvn06hx33vngtT//wYtfXvzr/+dffPGzz37uySe++799Z28+/6/+2X999cLTt++9vbh4YbVeoVPOPvVpvtOus14+CN/6R7/xXu+HGEEBJNCUEzyHJIkbVvPG0voRk3lcS2G6lf9DaWvshTqzcn0iCj5ayN42yVBFG6xpqlezve2MwkIiw28KwkVZspHXtukLRYamKtuNKbHF4qmNWWCAFM74zw7e7sDGh7RpJQb4lpWhM+ePlQJnb0NKSCJe1zIuuoMu7I2x7zqW5JmyfSxQBqVSWJglpTQm3/jiBlVHVtkizGVbpEDxKdmWFTE117n5Zllvq1VSQXVhJBjHzZ4lUUHSKhckJmeYw/lIi7Jwr/1JebyL6QVtQYaK9xh6vT3pYGLh8yKILYJTDFHwUO8ACw73jiQXEmLK2dsX5vr5UjklD3ViYLLIC/PPoipftnHZMHzd3juFSw9WIkeUy3pcUJOUQN7kYIBhyzazl9lb6hyTx5HYe9eUSkHIlnFMLMJtF2KMf/PyD689dumXv/a1GDmjUZn85+X3jm/fy+OokdAgD0aDB09YN6qM1IWlnzFFEZbNupN2SntR+o4aUU+AXZqOhECOMoCMJlIT0SQ0ESGR77lb6mKJxQrzU1mcpZaWymvQGQSQPinnzaINCD75Bl1H057Tceq7dvDznmNoHGaX+nG66xbPXHpsevP+UZ/Onnr+xa9/6R96msi6B/HOYt7H/pVXfnjz5quZV83CrcZTOfV74fJqd3WZH7/y1JW0379/58G1Ky6KG+FH12buOPkcRUfR5CBa1khi20IBMjsCmOGg0dguKpIlkS9ZlkV6p8UMjL2TNJYVjkQiIhcgrEmlF985OIgXQ7WzZBFx4JWfClOmDOcFvehqHFcL1wzU73bd89/4vffuDp+ePfAuE3ZlXP/gzY9+af+FJfNdub3u5+2kXTvm3SeG9eSlV991V48Prj0bNkk34vcu7G7Okgo4HAWa9+uU09F8Z08JIhJCIwrRYicJQMnHmCA5xfHg0v7t27f+45/+8Te+/vVXX3vtx6/84Fd+9ddPzwbVTMyGcZ1jVNjyHcrqytSDgpq7wnV0tHGjFLvCGSmDXWV01oOm5Oz6k5zEed5+JaDMLNYqbSuhgqoikQDyjkQppTzGsRqwezVHWedUchx6FIGyGok2ay5MyO29D1jXwA9xybZiR9T5X8SyQiuSim0DcY6vEm2XuOUr/SJlpn41W7/b7tkqjt2vduR8U7LMKvkeBWETokDk1D5TuxmVQQ5sBZjQAC1oCkwgrUiTO1nNuV9gmOjJlWnC6a1v/+v/7dlLV/7qjz44aK6+8hcf/NavPb+80C/85b/7wd92Sn7UeDwKpMk+9/DgEMbx+P5v/MPfhyKPMTTcNy6l6D0Li8KgPIEj07iiEgClMGXYYLvttrfCkXX1WdUsJrxSQEmLs1HBL00zUuUfDwUkl6FQ6BwNBUDkHvo9VbpmH7gDlIrPhGNi+1TrRoph3YyKwDFscO1WGzekruF5yhsBsZM2dxGWKoHEwkqa0oXFVeYJI09ar8kRxTKqazZ8XdTkjxAROBAXYkElatfrBgCkGACXlsUqyjmrWxUWnGlto20wrBhXfKXQjH3R+qsxtGDfXRRFoqPn7dD24aeHil45+WLjKROxmTxXDzJA5RyvUJGtjnj7xNvkWwo3scUSeybKKsU/IBtou/MAAQAASURBVBePERZUCRnXbWBB5WtHVTgW0HOeowKsABMpaXn6iqaxBFIQs/dQm6RJTAdichDy5akzKWDt68RCD+qNYB+E9cQQKCVQa2f2zTff+fTOPRfmtz8+/uT2vdnuwrm9D26+100biGOBJugocHDiKLOOOQ0ZPfqdbk2z0kJCGSzU5KJKAUOYhRSu2AFoRojwkUKCT/CZ/Ibmp7R3hvkaO71MeZOxBjaCNeCZR5YREjU20+TiiBgx9FgHP4kAh6MoMbSTtY4r3/q9i91iox5Xbzzz9a//XmBeL1eN5851tz68/cO/++5y/V674y9cvaQeOVHsovjNrcO3PhreGt9M2J0ueb+58fXp1cdk5RO5hMAZmqCJ05gd/JY2z0U3qDln4qIvVREq+/RMIMmiCqZkMmubw0QSkxdAVVIyFBrMgdgDJBb1OQC9oGEGc3K6EfFyNu0Eo4ISeSEW5Zz6jnmT1xevPPvib/zBn/7pHwUekFx2TX9y8v5//A9Hq00vGmi6N297nSQ3by92J/c/+PPv/+h3f+tam7u8ET+dLNKw+vTerfne0rdLErdadfePHly7em2xu5dzgkhogmSxKjYOMcboma9cOnj99de//ad//C/+xX/31DNPvvLqq9euXk2jWCssKgryTJJs4qinGZU/44iEmSpTUbcYGNe5V4tqq86TxrraaljNQqtAvWWpdX7cYfhzaVrLIZDyL1SHRRVTkHlXLgkFRNn5tg0gxKhCWyMhu8+lcDh1y9y2KZ6UKuD+EFouWtMDbZiFqGHCxBYkYAtIrZujSiLY0gO28/CW+UKF16rnP6kppJwjUdacf9Hpg7ZXs6qzt5fACqewTCGnwtooe4IHdaxBxAs60FSntJ7QeqqnHW12NE/kwb/6f/1P4734zPMvPvLM6d/e+gEG/OSlW19+7okfvfTuY27n4oWn+/nmw8P3dEqyZO6adJoHvfMrv/wPv/j5L76zlgCwJmF1bSPeJDLCwmJGZQ8PUNuhH1YOthxvuyhhnVW9fLQ+WgVhVtsR14euEuW0fP4o9QYFbTRyrwBgV/jQdZda3Qpx7s1AtnGw0BhbZxIznCqBPAOijmDvNnOgVQpDXjiwufCw85kmoo6RSWzHS0pub3pVUmhD47hLEHJQE3LWdahFpDAzcyM14Q6V9quoygwucb6oZv8gWKgYtOrPasGzN0ur/LzgyeWiI4Xl1xaNgSqKkw6RMtvcJ6TbT0FhImVbbxodQRRgx1S82JRAuS5ESSmrSeQLmFFyFVRzNkjaPohz7AeFGCEpwfa3KB7RhbFcjlWpfqXrUoGQosqZ1MgkVE9WIZURWTwWbACwOR/Gu7SrrALhXM4+jHhW2wStMLigyA0AxwwgF6N4VtWmcSmLSOq69tatO6+/9jORmKSfzcKdTz56dveFPEbBhjRoDNSMNCI7qEADsFExaYUHPNbdVADhkLXJ2gQdI4VRg1Jxq3EQUjBImErdRROptX8eMFnShSUtUgx0qjgjPU04Y6zBcydDUgIajmHWs0Qd1tqvtZ8Qn4w9tdwEyDrdPtbZkVzZW+9f353sX/nSN341XEBcxjCf+ihv/OiNH77yp9SedYsZGne0foBWtREKWOcTikQjZ5/OTtMxh3/9h3/4+W/87vUX/9FqScn5cTX66DRBEyQKBFyGHfsMc+nLxJK/TVNgNGFSERj3FGJtpFYEgkkBKgx3HaHEg+fWYVQwOEB7mC8q1iBmddCsm9lOYh+1iWimHKa+H1I38M7ZZr3z1N976ivxb1/+7iQ4iGzUeT/3B1cP9h698sRzxz2vR78c3N3+LPm9e+9/uv7CZnexN/Tw7OXq9YPJsb9z+47M497e/sGFxfL0OHRdfeZLRdEs/aYnzhf3L4xD/O5//u6777z5f/g//osnbtx4+fs/Orx/7/rjj/d9ZG7MBZDJGWt3O7Nu71Z96G6khxwTDc0rwBG2uCoZXKplV1QgRzAMJVNVvyUZUlUioWpa6rcAkYWDmwVhYYRuDdObBoV8mdl7730co612DAhlZuccrLISOWJnQ23RKJN13rVb2KqZC3TmmS1dQauEUczSpz5M1qKI2FBEtSqXjtqqbhniCaSkrACy3bjVtowJGZrG0bjrujXJgzA3aurhYurlAIb4mjumcISgGYlb5o4liARMaNihYcqpG48f3Vt/79/+0dGto//mN//gyctP/9/+P/+P1b0489PVYXw7fviZG3vrVfvOx28+9sxTZ/2pm3aXnp0++pV0Enrau3onrg4/udk+suslMRIzhCBJ2HsQhtw3FCp9rxYGVS1jnN1uBTcRyaJihBqm4ol6rr2sYtDSxpUqDtG8XQecfxcCTErLYDKlYXnYdfsyyrNDhcRaRjZbvNvH0hA5KABnLY6AQca2ZbiGvfajB08bdKpOpQOQdVJthCGUMwQ6vTi/lkaaX5iMiURFMyuUysKAFMrkoMLsmZFS3Lac9SgZIajgTA/9kOW3MaC/eB4LI5lVQZK1wPVgsD3MIqLOrECqJ92W2CZjPq+yDFPT2QagdES6hZTrHWLtz1YoJKpQ5xhUvU5QO1ZR71zl3VXCpf13dmV1YAlLFTEzdX1BGolsE7F1GTrniz3U1KHsn+pTAs1ZybMKTCJepfasqsyFT2kyQoJJIVhYoQwoF1SugOCeOSHVl2bEDsO4zbCam4DVev2Tn7w55iyI+3sXplP/rW/9hiD8+2//ZfAKTawjBnMkIvWip1mZQHDMuSdZYWSf2kWSdQeOFFrZDJj0mBASEXGRUMCRKigiJPjMTUJjBTjyZIkL6cxhrTgjOoGcMHp1yWkU6pUdNCKNtGmaNbUTDhuZnQ5D48PE7ztE5o2E6Vsfn2g83GkvXH3qxuUnrnLvXQc4fe31t179yb9zs+xmjTaZnMArvLqZxzSF3RY7wFxTN2FepNQ8WPWvvP72/hd/X9xkjHDiZIREkRGclDJpUkmKbA4zDsj28DCTgFVyNQ3UaosEwLzTHQHsSFRIs4EhWwstyVHWzjsnKuTgnANji99qEu3g1KfJdNVOE9YR3VrOJn4esfKYO9anv/oP37s3vPfe652fchSoD7v7e1de4Is3Ou2uT/bas3jzxz/v7378zJWnLnYX0nGmDfnQdDENi8XOxf39LDJGMOdLB3sZEmNkdkxIKTsisxQnDu++9fZLL/3l3t78//w//A/DEO/dW/7lX/z1b/z23/c+pHTmPItmIx6LEJ1TqX/xr+Kven4bqpkriBhGrdjK93Uryaf6NyKQnIvcyma2Aldip6W4JWmBK7c6A81ZbAa1imzcxczsvHdZmAjjmFJKBHau0EK888ysxf3KRN825qo53RvNMldCTL12itQC1RNCQUxltjeSVbmRiq7SYi5sJ7kdfOtG3VghWqG7Umce8t8uQhCyKyNntchVBScRqAORAWWqFhTBBKeOyvbXgzuggQRBR6FzE6SQh1bOrl6Q9/7u5R+/9JPPP/Xl2+/cfeXf/H/Tcdxr99JqI6s4mT7Vfxqf+kJ37/5Hr7/y09n+ZLNe+b0+3A7TJ/2Fy+vTk+PQbEaQ59EhQ4WFfQgJKUGo9TgTLe3sOaWXzJ+ECcSkZGuRImKh8gwZ7lDdEVEu4nN3CpRHyMplcUOyYavs+7IkNkSzWkOTwpR1la+lZR0iTMSCsjtkgNmDyP4RCERQZSav5AFWdSLsuY3jPXaO0lSIlTtpQOjILM5YhFXSesYX99pLcRy6bppSruw9VkRQgEYVu+eL2BGAbzy2moBCzuVKQ9DqxK7nTyNt/2jZZJw/qlr2oQyQY4KzpGo2FhUpqRZvtVKKwXyuHjReqlVdG79RcB/rAQ2kkbq7KfXQflvOWzb19nwzs6aUt5Y1CpBFs/iCgRu1qvj3KBcOCJ2Pp0UdXF5D6YO3BZLsVZCV7erBqWYbCVXzMSwKaQUX3Vppt4s0jbSo6rd04zIv2JurzBY6YzM6FS9urdGQofM/f/Pm3cN77EgkvffOhwd7j6aI1994+97R3eAbyVBEnxsdAWeyQkKE9qIe5OG8Uy+adN1Os2+SbkbXJI2j+i0zDwSG2Esw2DnDZwqZfEQzYJKjow10Q7QGRdaoGkFgjyZlpCGiBW9onMx6WZ+OoQEank74YJlOEnfBty6cOV7dPH5/geWN55/mCz6t4mQePvlw+eqH3/EXE7kOHbsZRk7UCk9UmiRTlk54rtI1S5mcyESmjyHs3VnKO7eO+Op1v/ZIoiMcvJlaawYSKIMF7HPZchKELMXDIpqJoFmSPTrEYBLAQbOSEgJIqOwmRHW0XRMzy8iyEQ1CjsVlzXDkpTguko4iECTwSH03TaGb+kmSfhAOhJzipPXP/co/efPO8WpYBp6kEfHe2af46POLJ6YX91ZuurNz6ctPTY5eu/OFZ36pk+705KwZvVdkoMnCOUYVZidAiGktws57Cyh35LIk5928m9+9d++d9965fv3a51/40vIkEvs/+86fXrqy//kXvni66r1vwHDwaqbrxYr8v6y95/9oV6uB0DkXtNoOYTmrWs6koppZWOtqPXU152Vy26ZdraDWqUCVHZtZL9mUY8faGuQtzklQIEk2Kg9UvfMlxalSXcRopeXl1rnIfgHC7KiwacsxPh/xaTsB2G3EVEEwM7e0Or21EQZI6ysmG1MEwsompUCugL5Fvlqdtn2Veu/ZsSF/7Ey1WZklhG2dNrZrMQ3evpemSyJlx/DseeQUGWMbdHn/+I//3f96cbH7W7/59f/p//4/H7+77jScnR5z7/cvXIpy8t47d5sLj+3t79x5cC+thr0rk7tHx8dv7k/O0v3XD7/5z/6ve5du3Fv6kZuRGlYvSSRmFrDzZY9QbqdESIbzA5RzKrvyWhy3K4GigUatp+c/4jk3DYbXK865tAVuBGBRBAW3LAaK9akrvlgoq7v6ODkiJrWyR4BXZRUGmBDADUDMjRZdNTNcFmYNw9ors0w551kaNj730vkgjERADgl96i/vP6m603UKNIpRwYTEcAIRTcxBMAKqYBURE5ZDRc5jbotPexk3a158bQSNzvuQREArPFDOo2Muw2s1cSuP8zYAOcv2WNK2GhXouJAJC5JfekfzpSGqWQRGVtCs229LZJlhW2xXRQxVtqTTUsltw2tttVX2+iwbgZIMTkBdHVi9tE+RjYqIrTWsufSUH8Ex1evE/rTjYu3BxdgFXqFZzGiToRBlqJKWCZi9aTvIGgzmYn6QTXzBrBU1JRKCsEMaUzfpHhwNb731kW/Cer2cz+ff/PWvXNg7uPPpyf3jB84Ls0hKQCtRxTTuxIigSNqzQMBqAW8UwBFj1wzBNYzEsdFN5XMSUL1nBYkbASV4QZO0gTJ6wgq0JlqDVkRrcO90UGKW3rynHAXOSAnj8XzCvOd1EPHIKVPq0TfomhSm7Q7zpydHq2vXr+gBoQtA+ujNN3BhzRc6SUkbrxNQUIQmeYQJp53Eexgn02WenGFxoovNGGR2cOcwfe+1D75+7VdkM8hZ5kjSKycLPiajIookHSMoMatIAkSpkDFIhZCLbzuySgPWasgrQIIySIDE7FUFyOYQ3GiTByVil3xcRYlCChnK3l0aJ1nQIQdFJ6nlZddR1zU8C8gJfSO+e/Tg2hd/72+/9x8mbZd9u8opPsj00dlTgqmHUDzg3d988bcuLx5ND0Y3BOnhVRsAzCKizouKT9ozdY6l8oohJMxOSbLIlUuXfv3XfwtMq9VaBMHzF1748sWDvWEYzZS7oFjMopnB513h9v4rLSWLefiwmtERO3LOiakTszkLWX3eQlrlS6C67RS6ofmhQem8whjHov6LVcpCVbJphkSUVbckV3PJgZi4CQBZrmK1wSgpEc4XuoWg5ACU1SXZmqdEbPM5E7KwmNOWxGyvW7VgyaAtFkDlWq8V2DZZZFFoZTqWLXmUaIuxqEJBZI5DVUwCkHPOPCEEJgpxYjRXrWW7SJQItt4sHkqmXMpAJk8huEnj/+h/+XcJs3a++6/+1b8dsy4O9uLJigfeyIM2U9fu/+4f/M5//MtvNwvvZ83+1YWGRNMmtbHdmX7phRcfe/bZ9459bHzUea/TPIgmYFQeiQa4kXMWZBAlUAagkCxZkb3zqr3pVW1AtUV9Vq2UdC4gx0PI8bnHb3Htofo8gEFSPSytjXLOKXLO2TmPaloJ2s55TiuKryBWT8Rg74TJMGfxarCaOMAzB7Dx2QJ5dk1Ci9RltzfDIum6ubgXn3ryuZ9+/JY05CKrIKUgvX9875kx+dlsIuLYM4lnDExEkKQkkghOkUGuNl8CqHNORDVn2PNUHBy1Nhl2B7Ma56j0KNbTF3SoHEmqKC9VC1itDxLbJlmriLq8y+wtH7B+NZC1AI7ZXGtwzn0kJi58LyYoObtxzOkCZpJugggyvpeUjFQj4NleHrbXRdbCtpNas0uMIG05WGI9VyFwwsBELl4FhQU+xkSsbJYO5fEy0ZRg6wlSUDPrDAq2RORsGiYFlI26T+RLXGqRJ9nIq7ZlKhtjcs6TKkKrCnr9tTeXy4FY4qDf+JUXd6Z7D46PLl05uLC/9+DoNpjJpwwhtBididOF1EiKBDI+JwtzoORHDMQtp4D1fAfcmcHWNsvVyC6iDFEdlQTIYAUiyRrujHCmciZ8xrRmiuBAqY9gEWRwVsncIHXNMQeRbvTkKQXyPaJHmnLXJ6GJm1w6en/53nV+5sL1xUdvHf7s01d4D0ICDmiEWlGnCMl1HH32c1r7sMqLpUxWmK95fhx9mnS8M3/jg3vXfn772vzROA4sTkahESQsUWgkHaG28UVSTTUKiEyYTgobgUgI5loqoxQxCIPEEVRH1aRluyUAgz1JpOy190kTPFhYBOhEwYjgqUOEz5z6rGvSTjUodxx9E4MTapFo3fKTX/zWj94++uTBzS7M47oP4g/yfrPiMY2ao56lXb8rxyoQbBwG54FsVAUqIV/ZsxfNdTawLQuyZX6BUxYfAoDdxUJE2OHxGzcAJEllL5NBlGE5viRsPgC65SMySJmrRsKxdaZmNa2iRl2zi5a1yEIqn5NsKVWt4lDZE9vKXHg6pXEmGIAQJaEwPcmZsTDKhkpFncVMwBcjizIdGfQEqlKfMmdIWVczkTgmgoi44lYv4IK75WJjglrdjUBqizSUoaHsz6RkB9bR1H4e51w59lZdsYVYAdSSo0JUlt+s5hQFJmRRm+eUXfWJcCqU0wh4UXKegUYBImXKpjxJkpmckBDUMRMyoGeny1fe/8mfv/Wf6e7qs1evX9lbHveajp2c5qadjmn1wrNfi8dnR3fu/+zdN0bXi3b7j4fFwd7t40NuRVwcffelr33rXu7OeJa03WgYydMGiNBIEsEjJClGS/HJIok4qyalDNIs0QTThtMzJXhGYcCxiGQVNnRSFSBlhglpzkn4KiLknd2JTG4Lo9h1bORcqA3B9plbPS+iQvtzIEC9gKBs9yzgS8Uln8UbK0bQsDTUMBrKnDkgBflEst8R6mJO7Wl7vA6aZsEzq08izHHs0t58cQWb5Nu5anRwSQawCkbVxpXsge2YZmO8g8Is6U1yYV5AgBAsqrvACsxKoExb8+TyVYjYk2F7wuyyZGtgs1gvS0bofdiHA0Cdh0mSzcOsELOkLkeEACl1vTSOxASlaohKoJSNVU5ce+XypakaRDJ5ZhFN9aKwwwCxXkIF4hyr6YlViazwl51N+Z7bL1lM08mM7LQcW1d+EOsuBGwZwnZIhcmgdTUfaVI2Jy+CeGuB6oTiiJwS4AJUk4DhrCQzpWxptPYc2tMlGlp+660P33v3Vgh+vUo7s4sHF6//x2//yYWDS1/60q+slp82jlR9lsjUqI7IhIFYIEIosvSi71eBNErBIUIGRUDqRwpgYQWE2eRRnCEMDx/HiKxkLx7AmHDKtIGuBGeERBLho1MRiDKrpFEkGalAVGRvHrwGXq/hj2gVPLzEtcSOo4rfd+Fsc+cnh699ae/5H996edg5nc5D40OKMTHQMUjRgDpWzycyPXPTNe+vsHMi/kT8MnuNLS+uLD8e33/jvcdeeJQjyyb7MYzjSL2wMDIcOdew2tqEzPI8Q5It1QAIVyW4KECOQISUE8DkDJFkJSSNTELwDEbuwXCeRZJsPLcMAgQkTkQ4MWJG0DQIPKgl9MyNqAe1rN5WT5I92q756rNf/85/OGYPrBCm0+dvfD6gu/fx3fXxqtVOs957cO/qwaMhUb9MHltnoFrFpHbE5d9VsmrxfOcKpILYE2+t2Ika761H3W56ij/rQ2wQFLKDjYKKOm5w+T4oyrnidmkaUDW2sJnImw01eyq5YDVtCWWPi7otLiffFbQMqpKhRGqJJCQKVQcCQXIhPgFbAHJ7QZfV0ENYnsJ8MAroS/Wgg+q61RY9xe7n/G0sc7adZwCqKpoITDYD17vJqrGIqumd7OarbThK+XcKIWVRcdsF2raMb6H4ZF7KlfxMTlC2mYqB2ZsDoCqpJO+9qOhIwZMgvvnGW313Fm/9ON3+8UE8+tzBnvBp6MKVx8MLN+bdOnz3D988PfZHy08CuzDt3v34591+t87j3tULB08M7/xtevTK5dlj3Yu/+1Xe3VuddiP7VXI9+3FgDII1MAADMBKiWaOPgBAZym/eIzZfmWtKNkWm/TsTWby5rcbF4p8JJf6dihW2QO03KGDBssIFmrSPRsWsnFmMFifCTCA2qabhMICtMJiIbbkL9Vr2vl7VQ4OjIPAqnhxLAJxwx+QJLYvXB6sjzDRPd5su+sXtn68edIuZIMlaAL/uh2cvf3bWTDdYsm/GSJoSwaIzmcuZ9MQCOFIUO0MIzC6tntatSj2r1JQO59iJiAUDw8bGEgFupdi4bAyo2wrntnsVsJnAW36BqJZUAy3wcnFkozqSERNxlgzYUTUfCgY0a/bMRbFtV4411aIPAzkPY9qAaYnY6m0BNGyuVdSMCa1LhnMK5DkQZhoLyfZ9ySb1kmxVONIKhTluV49nZmfjghrREVxoY0LMrvSB9hiY5ZmRnFCHhXoYVSHm11HaEqMCSdtO7t9f/viVn6Ws7CildOHSwWQ6++av/8b+wcHfvf7Opj+bdp1KZPIAiJLC9MSskGJpoMX/FABS+R8lQgS8akdVu1XM4WzKG9PgiUQdCXlRCCh6rCFrcA4NGlllXokHoqTkMvqsXplIGdmJnzeSdNPtcnZes2Nap9GjmXLXSzRdwSzozz48/vTey8v+MDyi0kuilIOYt5QLPmmzyi5K0/PsROZnudtQOMUU88eRJvd6N9ubN3L69ms///KVL0wxRwKiUmIWaFSLNGXOqgm2JyJFidHLIgKy/zFREklEvtADDRpMo3NeVE3+AuSqkKCcIxFADRNhbEAVwIdqUgmCAApEHdFACKoB8JCNmOCfQOpkoOHZRz9z96k7P3z5ezthMuHp8tbp/jyEsRk3DXl88NYHL33npS985sVvfvXXJiyeanNYHkctT3BRfTAxM0EtMlqxJVQC1VTIgN7t7qyKL6kcjlKCtnXBUFg9p1fUZGB7sAF4bxbgRQOKSpOutb6QoW0MLQvbh6o8ChW0cCHq+qNMSAV5ZIYFvInmlBw78lvP+u11BlWAXWFlgAolikvbXFk8qAOB1GNeNEi1tzGRaCnRqNwzra/S7i/d+suSHWMD+x7G0VG/HdiV99/eUrs5jClZun9b6JUzajcIg2w5bG9JAzjL8CKmyj+i+cw9WB3+1UsvHzyz/8PXvvvCJTefuxefewaHbxzePJN7m3jnTO/ffvqRyW/+3uPf+85by6PTp64//nfvvtVMm+x1sk/v3VwePHf5a7+9ePtw7ZxML167t+l69jHvRr8YMKVENBBGaFTTv1MkHQEkZsscTYBAy0BXZiQRVCZdtUq0Ho5KvB2L+Z6qKjSXt9dufGv6iiEplX0iLPFIyqawBGPbGy620tzuKUvWgtoW0FswIpMHtdBG1EMDu6BB1SsaIEAaES9h3q6WdzZnfyejl2GeZZPPxm5nmueg4DsOi/neO/0HTzz9TN5IOIAykbCmBkYmQCRuCBBSUYZmwnYzysaLkMJEqk8dpDzKCsmZnSOuLqUVZMFWZ1VckcvOohwoq30FzjZJnNTaUrIfCFxzKQwwKoc3S1LYCbNyVVS3TIb8AwRS1mopK4ZPVJbC9kknQhLCFhquD75hRtZJ2As4p4YVyAgFp7LHg8l4jtuvXS4A1KFDC925OHZxKd0gb2QDFUpKqqiutYXrTuQAD7CzA0tOq0e0PYPk4OChXjU5RyoOlB1D4H72xjt9nwCKkdhNHhyt7t47u3Txkbffef8nr73Stt68bh1LztGE+9AE8RBvvYER9KzuqlcNauXB7JwkGrUBLFvRM4kmB4IwhEmdSiZiGtgnlh5jn9Z3T9uhWfB8tV6uaNWl4MilxIZmgzkfZQeOSjLb845Fl54kUF5L7lgZTebNyM0ybo6XQ+MeaYmZU5DezcIAn8BAN1A3QAa4Ic/Wbm+p4XgMmF+//pnfbMfw7ptv4aj/e5/9Ek6lX25m3SIojzFxBGVT8xKxJBkJQhizjswC5MK0YaBas9lH7pyNZ0mFnGMtV65AtH7kBDiDOQAxY2jRAX3LsbG+gQNTqldWr+q1fLBe4eG7BqRZM3mGSjzRF65+8U5389YHH/Q05BcSEnXDjOGO7xw/eG8pR/Enf/XSg/cO/8Fv/LYnLufBHvwijwe2hszmmFqo3UWzUZGobYSqWhxAER0KlIQsLmlbM1ApIaX6SOE3Udks0ba8cl2+FqdUKAFZtSSbmohaxXwzmLaHtlAfbVy1S0qqHaaUNQxJliSJmdl74280TaC66y048flIgW0NrG6xtg0zMRPXmV+9ZxT8Q223RVvScpnOiUtXatXEuuXqSVQX2hVUt+1vmUgqeo9KYlXJQsQJxr4Gey56a/ttJbEBMP6jhVYxCGxLzfo2KZCsuDA14xgDh7t3Pn73/vvvvPnWe0fpl771td14uHzn7oc3V7/7qzeaZ9yr3759YeFZJm++9eDwI/f3//FzP3jp7dde/9n80d3cpzGMYdq6Tl594/63/vmLJ9M3Z9cPuotXV8t2dHtRmg3CBp2ZvVEEjaSRJIkmRVbizCXJKhnhWCSDpIj2bPGv1aYJIGYVZUJKqdh2qnrj1Bgbo864KkqOyudYAuEFiiTFufihBq6UHU2ytUaxlkiV1fxKyDlqFKxic7AHOmWPVs+TVgMQIE4n+/KDn77Rz9zVZw4O75+wE+4nq+XAS258Qifro9vXDw7O0kr16NmdRbY0wEiI3i5aKQ5sYlAUSGzdqCXvxpOkumTUgrRyuU6yJAgsPaVsfKG2liwLdTO+2FKSiwzovPKBQDUtxNppKeXF4s6YTPRfA4KMn7xNIKsPf4VLpWaC1G7StD1aN7xUH/qt/qjasQAVKSqL3bKBLuVyy+0gMxFFeUu2H2xZFNsPouWllp2uHTU1or09NFy/ogeD1ZxQScnkeo2oVWwm8iqoVbk6iNhbkdW7Jme4xqvaPB2bpvvZGzffe/8THyb9egQkiTzz9FN9P976+N7R0RIQdgqF80b9Mutj0yALiaAP1m6Z9yIy4IBGqSENSp5SSNQYGlQvahRXImgjADMkZ4A9+xzT8v4xrflCd+ELzz1z4/K1S/ODe0fHr/zstbdu/tQl184njWtyP8JDGuiJOvZpzA8u7CXyHpk1BdaAlMStcHrGvmvGRoYG/YQmLq8Cb1R45FbgYvYjmgFO0Gx0fpq7nrqlm3fTG/3kus6mU16e3Pp47/riqeeeSkdpWEY5jp20Lvk0ZEpEIkB2DMOfnSNNGUimCd52WuesQAjAomb35L1zWRKzJ1bYphWs0CwaGhZNsE+x+Jpl6T3HpjTWHgggJg0KB2qImTVoXiaFsHc+OAEQMJf57/zqP31r7415t/NIuJzuRYj65B/pHrn04qUnd5788+98++ZP3/zOA/IiZgxULdfrhGtcJBKyR9d+pBq+a4J8e9a2uA+KiV1RStcjsD1plnnEZLTlEjFf0GeUE6hQ4jEnR+zYWQJBUfiU5VxVyXMZMLe4Wbkohc9rp3X0AECuqARB7JGgImwSWqiyk19g8Jz326Xa1osZMHlQaSmNjFp2XVIuK1TK5zmoX76ilgNvYxRTkUhYg1OGjfNxtxjGKqjoSaCidbVNRfaQbXvHhahFgIIZJSRdTYdORGz7AzNIKDMIBEASJcpIkDHNFh4cT26fnh6fPHX9+jf/6a+/ffTBa2+//vhk8eHhvf/5f7nzm1/jf/R/+aVXv/3qOuaDZ68M94dv/+UH//B3bqzG948+WYWJdruzFFK7F9xO990fvnnGt3//d//l7c3eCrsnsneE3VNM08B0prJR3hAGQlREYAQwkgg42fRJNqc73vZthiMXxllVEnH56JlK7KPkspTg+kgqwWyNbacjxda3oB1UZWz2RNuZJQYlduWBLP5PVMF8B3UKBjXEHWmr2sE7mpAEkUb8zFNLBli5zuV5fu/so+lj3bNfe/L4ldcyWiTx8MEzpKHI+1g8deOpH3/vh24M3H3h2aeez5QosPaqo6cMMUYSHIq2JKfSvTEs6aSQpqCogdwgYSHzltNzBIXBWTL9QltcfoVdMSawOqj1+UfFsawXt7pazFmZoULEjjiTpJRRmptKAKmRLVRYk6zWR9lAhe27Wj4rAIUxYk+v97X9PWdOwBh2hEJ5qBcQgbSqA7doGbZYbenldXtGYRqjcz8NIlWI8SO97R1UbYHqCA0VgIQBVmqKDJ+YKEhZCtQvRQCc3QM5M3snklSYCKGl4wfDj3743tA3zALpkg4HFx/9tW9+K40pxri3f+Xmrdvr9YMQOMXI8IArr0y9Iqkyicpg/w1QkII8kAgjKAFeEQlN6enLu5PsE3Wm5YAjD6SYTjengfxTV57+4jOfffyRK0FCPMPYx6uXr1y/ceWdjz7z59//y+Oj44Vf5JyFBMY2I/DUCeN0vuAGpNI58jKk4OfSDTJ3svbpZBqm+/NLafkx0kqEBU0vnKgT5givmDR715Y9jmM6k+mML03TdCLtLh/cP/zgvZ+8uxcvTDGTZU9rXferFrMJ2hAwyCBZxzwSCTlK48gYqyzceKQlMJT//2T9+5Nc13UuCH5r7X0emZWV9UDhQbwIkqBIUZBIUdT7acqWZNn30TP3unt+mb6/9N80ETPRMREzExPdPW23R6Hr8bVlX1tXkmWaoiiK4psgCIIAWCgUsrIyT548Z++15oe19ynodoUfAFGVlXnO2evxre/7FnOUGETYNMFJl6KJdYXsWJ5CR+j7VuGY1ecyzToBKGHtEo4vUG+TLaVIQYKrnCucgiloXEcl2PB95MZfeu7LJGiOegqeIiRECoUnvfrk1a36v/3Hv/3bpx9/wufsYNwi2wtkj2qa3iaNqfV+krZhJ2JpZmql7GeTp9TJITXJJ4kojVzyKIYAmPIoC17tAA7nbbBNGDoRO1JKlPxsVBGjJLjbJKG5a06J2Njp9rlA1qR77/pezMoqipKFEuSVMvkrz2VPZELACR7m2Jsrlg7DKLuCSf2XoE4zhDRuPCWTnWEwnL45/0LNXFIQITKpzaeRYQZL/hkUVYhzFgpVYvDO5SCccMgUbeyX2dA7PXtR1TH7GKPnGqAYu41qdHj/we9+/dpRWI4vjr747Gdf/eXvfn3711efu8yyvzlBv9/8wy8OVvP6j/70sy//+HeLG119BoefNG9+dPz8n+z+7K+aupZG2tH2pNgq+7I9Wh88961/O7rw7IfNeOF2jqVuilETp27F6IA10Ck6sewrvbCIoDPwOQVWFYKcdP/22YiJNMaETthkgRN5hvNTqkCwH7Ioz8l9jIw4YzAGEYiHhSEWMFOujdHWXTibL6UdmMSgQuFUWNUzF8yVUkWFxxiogRrsGWNFTfZf1Is/U22cL/fv3Pwvb/xsfGYS2paCE16LgNR1pd5d3u9uH595fOvosDkIR9dOYdUJB4YjWQnWjuMoe2FaP9gplDgpsIA4zB0VYWjo7TmwXU+pwFU4TpMmZA/1PEA9KaNTAfJ7eZEyKpS0tRbdJFWwsF0N6lhVRYSJJZjHnME6TACT8UisY05UkVw/2axgOBVpmGqJ1px2DChKHS7ByHdAqqUyOmWTs/Thh71MlnNTHDNVp70MuRRFNJ1egFRJRMymhigN+xUKZbCHQuFJmbg0VrxqQY4Br7bVV222AWJWEoYXDYBn9mAR5pd/96v5+kE9GnvyIuiWseuKn/7jq2f29i4++sg777y1mHd1vdl3K5fcd6OqAp7SztYIBPTFUI4oQB7koWWSplHtSEABYqM+BQkcEFVBwaOQpS6bZqP2X/jUp5+99rm9rZ0wV1mjWQasoWhXbXB9eeXRx/7d2TO/fPml199+rd4ZwbM2ClJycJ61JZCqY3iet8GPJ227XkMLwaOPnNrUHd8txnvnPu6KxYMP63K8jkA1radnZ7PDLvbkth85/7l+fz4/OGjdeBRHNIeKbNK0iuM77398sPHY2amPx32tdb/sm8Xhce9e/9Vr29uja5+96kj72BMiIAJxQEKPQUwsiIa5EDtbg+MdW7uTE5pI8leBUgCYmSUa4S/EvJyabISOnpkgDkISEupAzlh7DCD0AUmUr4Ay92C/7kKPCCEWLtTFdceRNErbaYH+9Nb2f/Nv/r1z8DQ895R614yxWkrTfGoMyLVlPEizn9QYp25iwMGQJ8oGFA7kijx0AZBWlJjCY5jTprdBef8BMRHlRHiSIPNhQaZmpvyUkGmQpkfR4EdrTsWk2slbyzmBKsFq++x8k0VAavVxYmgClJW+bHfNMOikJmSCOTOQhTxKfTMRM6umWYO90cSfUCRcgQYPDdXc3qUYSCdLHWxI5thcIEREoTGXQUlXE2PMJFerC8yzAUEjyGyfbWurZRyN2rH3EjpV3pjUNz/84Cc/+7tyd/OJF/aeunbtP//8Z/fDg9Pnzi8XAubxaHrxsed33OFrb/y24OrL3/nMP//oVc/bOxv6zsf3rnz+0vnnmtsfFNVWoRuYXBy/+cHhtS/9q6e+9a9vLsZz3nkg28d+upQdbgtZRDRAC6wJa0IERWVhoIcaaSKQ0YZUYL5diVqUNS3W0jKskRIVtoVSpLDtqmJHSdmmqLkagapB78l6WqECl+Ypht+kB+qh/pBIOfts2K9jIs9UEpVRHMihFK5Za+UJUwUdKcbCNWGs4pV3UZ0tSyrHO2NGG7o1kXo4XcfIoCZoKY0c12V59+Z73/jMV/sReNejZVoaVEroHHUVcUJHKA/A81HNciOAElELooGYE2M/TT2NKmrWFlkRngubJPzNZmrpqbNHKB9PRvIzjzEyk/c+whKuZoNMB0J2ExuwHCuXDLbJ2kG1zZuZ22ixIXMgEtygqZfUREhPXTGnHUonDCRFOhKcOTn509kIDSftrj0KYr6jyHIsTimbHRGLUsq+YMBbxQ4lokLFqTLBAwSUBE/glJgJKG1GASLAQdn28wRmYfZBuqoq337r5jt33il3CwgJBRGUZTnvZnff+ejRxZMXr1z1flqW24qOmMEh9j1zr3CAAC6PqIhIEUoLNLBthw4INpIm6gEPwGQFABJo4AsnXd8083G58cWnnnvu+Wt7pzbaZWzurdGxdOx6T+gABUnXtauDrhj5733zD/Z2tv/zz/+hmJS8zRCJfVivxK08jRlEKODrouJytPuE25BRgQtPnZN7H9z/8M1Jea64uHPUjo76Vas63Xq8Pvupg6PXmzjnYlf73XtSLptlGSmEGAtRUN2Pqr5e3Z+1B02QsDqcV7Xvj8Nv/uk3l85dnGxWP/3p3546VT965ZyokjMLFmh2cHCOTXQDEFiIPJu6PUOnSllDDoUDG8NTlKHeF6o2eA2qxCzgkh2pBLOlQ/BEnhyRkBpQAqAAOwYQ2mBIiv3Vw2sfGIQgfa+A84IowYdKO6c+RG37EEw/k6X3FotsMEac5pL2LCdEPU1B03FCmssmbVCC9hJh8SGYK/8pf3CCjVs4pafs2WKrglSiAae5P86jUEUejiKlXxrq5VTsG8+YmSgma2nYLREj+6IngopjZiWNwgywEyBr6SGp3DXmTfKaFs1rzO3BTtvWYALq9M1DSMsBMqVV++eYk0dKtFa5Zwoa8oTNynFRMMxgkohijDHqScwFoMpEg78xmymmCUQFvx9wPaewBIOgNWHZHGIH+Kr0t+988Nprv/jWdz5z8+DBVnX13t379+7cPf/0lTBuxYWljv3W9lG7f/ODt7ZH41feOj61Oz7/7MV3Xz48dbqOYXbrePHol6rr945k6i9f/fQbNz+59q1/+8Vv//DmcnzkNw/7rSO38SCMl8HzvJek+id0sP/RzhrWaBokRXAUcQIvBs5wiA6aLJhJJaWBpgXyhHgm4MRweWtuErpPRODk05LwKNKgnMXgAlF1yQsqMfw9sQd5hVMhVePalCqeuGCuacw6Ui1Ex6AxYQSMCBPFWDFhLtHUqxnd89WCj+6W2tb12LNDdF0pinI1qbpJhRYfHtz5zDevffrzjz046HnK8KRmPAlIEMBHLaAdyHhz0bYvOeeCMYkTXCR5tuqISBCQmXpWf4nE5PySV3KZJRAx2BYYA5mKaOd9KLsp95HJYjVxQYR9BrWyYt7mBTYQFZE4GLolMS9ATJp2HoKJs+4wa/vzk26kYU2p2IrNJBFO0LWmGABrZWNSGmmuVNgcvOxYqRoYnhwzrS5PfUXKwUoQJc9elUQMSy8YJJbi1DN5go+RFSWRI8fKKqXAgz07T/BQSgWbLQcXBZg947hZ3F0+oB2OIQAsoYUUfaejrUm9Wx01y+t3rt97MGtbjMcToFVZgxwI0IiTCxdOjI2ix5ohoEDKQK/kiRzxWsTbAyFsO1cZYFp80njPzz1z7fkXru3t7i4W6/1bq4IL6jx1qn0fNQA90Kuq57oW3y9D04cXPv/5yXTr7//zTzDr3Firjc3tvV1suOClHtW+8peuXpnsTQ7Xx1p289XxwXrSojmUO+t5nJx5VM5t3P7gLWE9iufacHYf97tY0rKaHk9k/yge9LFzMhUdAww6dnVXz+83BzfubcddH8Dgo7uLSbF59YnLzsk777wSpQUJsUjsmQF4hSOOjjwlu7GeSAmFmYcm3wQ2z0lDYNN5MW2rs3JQI0GBtJlbJKhVe+KZCxOhGXSjawYcPDGT9gLH7GFuezYYCKHz7EmIFBpBQioiwoRCeIkS6yAlBBLTjiPnTKKQntIgUgybRRLRNN33BNmRPQ4WIoUJNp80YQ6yXomS9Qslt/KURkFp97ykvlmz0lCTVTGMkOgzl1oT7W/IxFA18YKc9IyGcllFS2reMdbOMBE7hYqIiUjJQChv1IuUKc2+nzOvRFRZjKOSVbl5AJm72Nxe5y/JTLBhZGXgp82IzSQwxTgiD04tnflZKpDXs9hrm80NElaoMQozHDMxi8QQYlF48wwCEXNigBANemZFVJHel3UUCVGYPZEQmXwkEBXOo18vjmZv7my5V37x+gt/dP7Vd392bfvbf/z9P/31u6+c37nUlqu37r7RXzl//a3XHz/zqXVzYzFvf/6b7k//6NH5a4v5ka9HV9/5ZHXu06eb0eq5r33/7nx99YvPfubrP3hnziu3eyTbRzw5xrShbV4oN6CWdKWyEloxdUBv/bmoWPsrUJEQwb0lCiUWe88AuDjBpNOTYatBAZCYZgZEUJ91qwpjX5yMOIzVnsfHUE78O8CIa5Ss2iGAU04b8hhM7AmFooB45hIoBEJOacQ0YYxdHK/8xMuYeUoyprgRNjaYlgfXrsyvXqwX++0nN8OtG9cVVTnaALxAG95cBK/be6uIrUtT2tPYivcMqAZFJBVwYADSFOxqpMlISCeH2BGreDXIXSESYJWwSB7cJgCLrVIx777cfxovz9SyNgmnhNQIDzC0YXX2fxXsPEGJnIhkCCn5p5prekznmkxnjLT912ZF5sWqnilIFjadfNnJSbiyEGkUgjo4MwBId5oMg1K7GqrZaXJo7iXx/iXxyRJgJlBR5WhTIZ+mNSYzg5IwETN7EWJ2jj1sK6dClFVL4kLBQMXeRxUUyqVSBTiGh5boWUDiS08MLQBC8AHiC4gr+JfvfnxcVmcev/TJrevkA6ILS71w6fyXvvpl6YrVbF44X2/4esN/fP2WUyLiiBbqQB0jiAZK0F+vRpZXcPQQLyxkM00PcixEUpIXEgDsClDTNtKtr37q0a+98NVzF0+vF/Hw7tIFNyIJsVdxikCk4MgIGtfsWNC12oWeXShmt5vz1c6LFz9F4bByfX3tGj96+qBdzhZzdq7X/s7bd+Tt9dpJLFqu3ZXJWb+ou/UEjpqDpq4faehBOz+a9l1HvT8q1vfVF0UxgR6CD0qJGroW9Yqrmo4gi5oXjpvY3FlMx2MiH5atUPvWW7/b37/1la988fKVS11vPv1eEcgkhOyM8MosTIhRVEwgYIxLtjicJjQQy5nJsNF5wIz4SSQQMRCDWIPkmSlCHHtmRBvns1MJJE4DAYxEAYJtuwPIwxNIgkZRRWQGEyICQzQGRPEOoiLc+wyT5sELkWExKprniKpZU2ngrLUhqWswdFCRF1pbFhkOlR31k9yZf2iAicCExNg9+R5Wc08zzVGiJib75Yc8rqyXSxBujMGaP06MGz450Qqz5OL/HcJo3aY90EhtUiqukbnKwxR7mAqnykNTv2ovbJlBJGmvTgQJqdJOUqTcKasBqWm5Zea5WFOSUzgGFY19KFvBTiKDWaBV+XZ9rZETGfoh45tAJAqEnE320jpdciSRSs/Xb7zfNLefeurxO59cv/67+8883x/e+6fnHvt3Lzxd/dU//W/bl/euXnmymu5+7lv/9spucefdXx1Xv/nok1vXH5waPbG6+SYmFVpefxRPxz1/s9kKk8lnvvGD92bVuqiOZHPJW8fYmscRjhQL6DHQACtwx2n624p2QdGItCq982J7glViJGGGIqqa1ZDpBazDIU6EI2Ul8DAmTENf8/EyGwVzEHgIoMgjD9VBUZMxA8kQhiNmVSYTK5nLkuUo8ooCVBJKGolW0Fq0EjeRYlqGWmhTdep4Grao3eT5wezvz/GdcKu99mj19ccxn5957ZWjB7NZF0IQWbn12NfHbddX4/HFST9i2qIe0UVCIIhA2UWWKBw8hKEe2iOpS6NIz2wCH8pFiT3SCgzGy4anEFEyuAaBHMcIEVFJJL6Hz0SeIKfvzUJ385HLJyQZhRkKiqSfThIFiZJm5qYUAECckigTIAyGI1aXfEQHDaRm7Dp1pIzEvkiGQCRi65iTGD/dOIjaipUcbm0+k07EcBxOuJGU/HxAxBbKHJRFU+cPZVUmMuGZZzCImQsVM0wn8mSaH3GqZeSStCCQkif1Ag8UDIfSuy6I2yhu337w3t175eb23unTbnkr9GsnDKUnnn1qsjddzvvtcm+9Xl+enirryUcff4ReET1xLdITlKCORElUeoEnoqBrD69OVNZOGOIYjoIPEMeqPSmTU4qQWTM/v7PztRe/+/RTT4R1WB7YjtsRM4cQnVNBhPTEIfTLZnlcMquveyaghPI6xH7ddO9/MP7wYzeSo6mUHy2XKxyERdMt67FXR955lBNxQVvpFl2coTt07WHVzrTFcnr58lT22vtHLR232tAD1gfwFbtFFQ8VC0inXdutR3Fzy9NKeCG8Iqz8YTt775P3Hn/0yqXz55566iKzxHjFF15ERXp2XjWIOHZONaoKNGpqmTIrIpktURLTZb24KBSByCfJjvSqpBpJS1hzhGG3rCg6FRYVqBDMtskrA/B2qKyqTekye2PYw4rM3RI1KpLEGKARYmNS9TmUp6NlSSVnHEI+iil4JWcDe0aHw2Kjl8ThBRLersiI8cMjJ+S0dHKWc6uCNCnVYb8pZf8JqAQhHo5PLoUhqjJATMO/aNb74CHKU/5AJzOz9N9sumXvRoZyOU2xs7nukJNhjRaJmdoZsJ3DCGeedv4VJ/1avtBWv4uoIrH1RMSqC80XAxj2CRJSBXLCvR4IM5J0kEREKtnNw3h9oskVymwsyZYjJcxOk6FlBx/29x9wIavuzueeLXuKzf56fOrwrdf+/sq1L33zC9958+O3tquzq/3uwHez+YPPfepLO6fO/Pyj/+eNB2c2t8OMlxht7FzcOQj97X79yY3DP/sPf/bug0nrS+HpTOpGNle6wUtgCVpCV5BG0AIt0AEtuGfhQCow9iAJkaioatAYmQygtHQiCSZJehlJDoKaROEmAYyaRvWcMWl7Dn1ueS1t29VlYlExgMVQCR0g/lxvWfAlQxWJiUtCAfVaAQWhVpTAiLSKUjFPHLYEU5nq4V7ZrR+8987P/uHoo9kGd4vXZ7ub8fkvPvvvv3f2r//6pbu3Y72xcbAOVbm9Na7W+4eXd8u4EbChrI5B0is60k6jFzhERBKT5TsRB9tOIQwW2LwXeIiLa8OKE9hJBtIVoGLbBpLSwqhcznbcIrnZWxOaBEJ2spIIPtW1Ji6yJwsph1m9KKUvLR8nAZ/BdszMyTZEVYzAYm5xw/NsSVeHWGH0EdU4UCApFwbG+EhibSak7bD8EA1l2HMKGgp6CxDOaOOSlFF0IjuEI/YQD3giDyqgDvAgM/cuAUcVoYArHTloAfGRShIvPHKucPAKBkoKHJiYC1TMLfWvfPiRbJYrXncbOztPPnr7xltjV6AG7bg3br39xm/eeOLCE5fOPXbjxluHt+9JHdmJdtC2YHiLPVVVeo/VeildsMzAIgKJAHFJIgqJYVUVHLjmlRTMTbd00n3rKy+88JUXfO3nR+vIDFkXSqFvpFvGeiPeeyCzQ9A6HM+O7+27nWl7+cyDTsMiHq+axVq6pj833ZpA2q2t6elTdYX9t96XM9v1md12XRzsL7uuD91qLotQakQnXm+/d0+kZ4iAOpKnuRt3O27GCOsW81rr+X2SqpfNiFnALHCAq0lnIWqQRYhHHa3o9nt362ISutXrr786GX9tb2+3C0HIrbtITExOYiCUzCIxEhyxKIQ4GKOWtAchI0M2wjVzBXHOeMRgMrE0ksKE2L5fradSG+kxsydEkSgSQJ6UQSb860VB6tKQUXPzZhMRieAoplcTsT5YiZhi0j2Zp1oqcE2NzqmsTS2cqQzsRW0FJYFtunZyCnJvkEvo5CihECg9ZGM/6D1SRLVEp+YlccICUWjag00JvafMvjKmjeBEp5u+JIG9FosHz5xhFpu6VcOE08tYLj3pbmOKRzIEdGKTGQ94l0mm0+tQssZLIUCh2VEgFRuZ9zP8yvQe2Fav2pjB8Hwd8vtDtPFEJdC80CZRV4bG5AQPHzSgqRE2ESZUzCOAJfnXIV1phvlPeB5J0KeevvK//S839++sT50LGzsyrrff+vXqrRv/9NwBrj5/9Suf+867n7y9cX7cHi7vdA+2di/tjC/epwvX5+7R3fOzainF7t529dEyvHd0+7//P/0Pn/DlB8wtlW03ajDpV0TLnlvHS9ZjwZxoCTSkjXILXUPRAZ2iB0XSCAnKQsmDNPVAZNqCNPwjJltDoBbB7Wk5eR6YkzwGZtZremeNcRCO583uGWMQ2K+j1O4l0ScyV5+gnK12ChVSccrgAjQi1KANkk3VDY0bggncVPb88ag9Ol+tfnv91e6jj3/4lSfcciaL9vaH7/7uJ59cunLu6e2xP9gP/T1gNxBmq/DodPzrf/hffvDf7fH2eQAijE5ICFElCPeMlSJ4IhAJKKhtw9EA4Qx4/NdfVrNhcNd56Mhquj5W5JlAyBgVNnVKsK45PaWqkEybfdJqa8x5K9topEVm6Ynlk6AAIJvpDU932phiASS3vMONGdzX7Qei0axyFjcfdB1OdjrNEBY20VEa/Cig7JIiVsR0iZIYy2CCo+RMmSpfUiawCinYeQeU1u0CJTyTAyqoV6pJPXhM8I5r9KxSRq49vKqDq8ix05ICwqT2b75++3bf8imPfv3At+cfPzdvb4XFylful7/9WWjDYjmrDhgl/+r1f/Yi5dijBTsfIdqpomKialSPKlKNKg1BVDgU0QtK5iCt+Qy5olyoop8XgZfd6tGL5/7oB3987vSZw9Wy/XBVa2iO5iqh2Npp3n0XFYLw7Ppby27edavl6igwy+a0+7Aab+1NxpvjyeTi6e3pxnQ62S58RVS4euJG1U9/hlf+5eVy4kPsyLNAi6qgiedSXcGudt39pS88FN4777C4O6uLwnWlrjs5DuNy061LRMhx5xbw65oi+rbvx53f8MtmsTw6LlD0nfTr+eZk2gd+78btR5/4FNg7LqFRokrskPX8TCYWElXjM1tfyxn7kERoteGHSrQFdNaiJUdvm1Sm3AtRhZHPjR0kCeO05bfmpkAusS+IIT6fEgBOxYoAsbaBc8qIsZeEVYumlSjqmZCMhHOWAwYImShbbQDD6NdS4TB6TfPrlBf05MgNnL10em1mM/TTOccPgYGBZFyZqJ6/138Pcp0MH1qBnoruoUZP5HNzOtFsSGhFAxFy96t0knpziZ/SGmftnMUpdkZVT1mf0m80ID6xSwY04OFXRf5FqaDK/8F6U2u4SHObDjKeF7P5C55QvhmDUwFMISIqRKRs4lcIVGIcIHSIKqu9TTUzTkr4CcMsxoKiAAtx27e8sz05dzG+887HGl+o/MZbN64fLqtxsTW7d7+bXz5a3Do+flCPi09d+PS8O+qa4kazmFz56oH/wIfxclKOz5SHrnjpv/ziOz/8D/3us7dXZc9+HWvp4Vp2x9ClyEp0odSAG+gCZJPgtRICKDCCoPccY3aiYRZjt7mkdrRpOJu3vQ23mVnFJMMGs6cizINU8iY2S90gb2dxMAKlRIlNsg3Ngrpsf0JkR8slF08whEFsRtAKRyW0UCoJNXSkGKnbKngHsimbMh91989Vjd7/4ObLf/fZ83FPPnrztz8/O9HzTDGA7jadFE/tjFSrueff3Xrz4u6Td1v5aH/pFh/snd3d70p0ymPmoOjAPWurVDHWiAbhgokdwws8KFg7BCAfhJMHOB9j/N5fszo+BjPVUWaydT9EabySn3+1LiEd4qQlTwlvMIoZKIqmAwwJDkotgc1Z8jlI7A+jEIqcbENJVWbS+iO7d6VIQkwZTrTePv1vrk1Pbrew0SwIhLxIgWGq6OFIGTIf06YOs+0COYLJzLwIA6xSKljVQwsuGRW0UKpIK7OdUqkZtQSvrnahDMEHVzvlGErl2gWO1Ua1Pzt++c5td2VSKNBx0zfHYzr/3JNv/epXF8+e/fIL31jN2uWDeRnHXtynmqv7N2+vZ60ryrgI2gcmolAgou8kBhO8cAydCBoRREEMIFcWo7Ioi/XyXF1P9nY2ivHuqb0LT1yhew/ufPxJc+8wTKp7VXH0q5f9+fMH77fxvTfb6akIGZWu9L46dfriqSd2plvVaDQ6tT2aTsuy9vCdshIjeIiPcKJBWnz9Cy8c3N2/devmxpndngMg6IUXJAW7ktHCF1UkVYYvEV03u/EJc8HtmKVe3esCLet+Gtfx9vt3u6W4sEVKodVuIUqu7bpInd9w6kgkzkOj6K9//MGzy889sne6Waw8F65gxBKIUfoYg/MVEIHI7JMjBwJISXsbRCA52Rn0xTHakq40b0qu8rBlh5qN5m3woZaw2dZKk2MSVRva9pTYAyBK2pjUkIETgAoxQjVIVNWxbXxhkQBNIxuvghMm00MVq+UASVKdhGhlXHfYDZd/wHp1K6aN5ZvSL+WqGycAdPqBDEsb4Ju7xFzzUu4Hf7/btX8yr30dIkmGixXDlcNJvqScp80PPqt18mvZvNuE9nZEHfMwtg4xUsbYh1RNCaQ2dpoCygNQffJm03wghQljiSvnaTvoZFEyIZVfRInJDgCpnbCRV27sKTkJpeugGXY+KXQIkEQsIqZMfGUAbOY5IQCC0iG4EY9vXP9FOf7wD//oh5cfe+b9d+9O/JXr+6+eefzcD7/9g9fee3N/sR9KHBeLR05d/Pj27fnBg3oy4TDtx6cPcVb3NvaunT3cP37mO4+ff/7LHzYu9MJdwWtBD12CGmDJsQlowA1LI2iJGqYWiB0QiFtCIAGTpOkuJdTQsE4CqUYr2Ngl4aboAACkR3CY7ocsDx2aKrVlFGRXXpI/uV3FxPPK5ROzKucRYlYBaMrBBvAyFUoOHijJtL9aK40pTiJNeYuX27LY6o/PTRe/+se/d/P3r5zevVgWHzWrOB9J0022JzX0aP+e29y+9NjFR07pI7tPV2ee/HDe/vrWbFP2nc6P6+l6MpJW0IBLqCdznQUD4qAOICgJDw0kD6L53KCmZ3EAgZGzshW+UeNw3rNyFhKjBZqThAggj0jsXmRQHw85G5hkj0zYSyBnbmT56aT8QOeRWnobyK27IvtP5WY6ubQhCeiBwZ9ssK96SGKQErS93yFpU37lRE8hHiAx62Yce7ZgqiAoG1c4t1AMFARW8xv0Ci/mMKqVogaNCAVQi5ZArRiDK2c1GXmOJKEKriSu4yu/vt1MebLltQv9El7Dg7i6sLt15uqVsqi2HtmaTKenz+2tZyE2/Qtf/+p707d/+9JrrNAAIi+rKF0vnc7XqxAb0Y5dYA5lXVyqt6rtrd3p7pm90+PtzQ0RunmzXCwWq5Z3JscXd1/9Lz8Ls8Omjt1sWe3t9W3jvYwfHO7tTMbf/vZ0PB6f3a036hIoSlZEMIlKFyCgpg3A2sNHccolGEFlRNAgrhp/7w//8K9+9KPb+/vVdKwiVLCCgK6P4MJTB2Ymx7IORVUxYlDSvowiEI5dz/AIvpFFgZI6lgCNQShI0S/DLPJaC081S1TR4NmFRXvn3p0Ll0+XXEBIegEB4lTU+zJKRyBiB8RU9CECBTQkeh08WQgVUbX1l1b/BWZieBj30yY6iXcAkECJyHGK/aYkUJh6GMJwqsMpS4Qb1bRH20xTzbI4yxMikTLbXgMFIBK9zVFPBo5IPN50VGwgPPhESaqECaSsGbdD7jfYIsOQxi3N5u/CSZohDPzE34OpTjAtsjGl2i6Gh/M0Tkhd6TdliJpzLpLM2U4Zzmga1uBYF27deJKYDr9dVYXActK6AwMkn1t6m/h6dpIpVoaFBonmw/df1xrJFE3JuNlEQ2zLkouE8bk8wkuXLffiD1+fgW6ayy0FcXJ+zg4JBiKmQYBIhhps366oGRWJ974qy7B76vLnxlfffu/WT3/2f9ncOP3stW+eGp+5897tv/h//c9SllK3GEeJZ8+fmz8yOn+4uqtdN7t17/JoVMvpixcuXKwvuquI4IM7YKUiSGw6DiU6yFJ0CbTKLcO63gW0AxrRKNAW2kF65h7oogTRFlDmKCpOJO1NYDU44CFoxTjotvM1VVQW4lWidV5s1wcEUNCoqi65KWanChWNmd2WqOguOW8IJAr7FI5BED3ZZgjvU+ProQ6oQTXRiGhCNbcbstiQ+bmNdnHrrTtv/PLKlixvvf6gufD0uSdvvn4jzL2Hax70rq3Xi8X1++9OLm9cvBbAVRWaz16+8tLf/8N3/+wzE/h+MpaFoCZZKwqoZ/agklgK1aDqBAGSWZFDvhqukBX7zAPhMHlBmz2qpGLO+wLQCDXrSom21ROAVSxI8EPa6SaqqrZNm4mZQ8gLMFKXmctJTUCMqDKpJqxQJbOeUrhQW1wSFWkfsOoJKdoyvc1rk7gQwsTOeXulAVOyfxaB44eekxQgEhaZCNywwbdNbYYVLMzsCF7hFETqhBhcQApjQbPzWgEj4ppQI1ailaIWjMA1aExu7KUCalABLZU8F05K7aYjfPTh/uzWB5d3NyEzeGkrwfq491t35+35T1+++/57f/6f/nxUjMc8evyRx70WZazr7ckSKxcjiZAwgcuRH23UG8Wp8YYbbRQ7pyanz0y3pjW5zvVFI3o8O5y99dqN27e6cd3Cy6LB/du4/sp4e7pxdXenrB+5fJmZp/W43t4VdJ4Rwb10LOSUVnG1XPTOJWCJ4AmeyIuywimb0W3nPffrdenqvl9MJuWf/ukf/fgv/393796fTDdDF9CLeOKSZR1V4QsIRSpIV9qxkIqLDmAJQFARkHApHiLwhJEjEYwFdZi39zASPy57JwyHGBVgpdsP7gT/WZQkIlyydAIBB68BDiNAFX0IHcwZkEvVwPAEAiTRpLNG1XpTaKrtwJp5nQoopaFWJE07rGCbNBXmaGIWyqQM6YlIAJVo40lLJuxyKhSbM0bApqpKoGADQUQAxORBNBCBcTJGHgzQsombsJqSXh+S3uRkogkAPKlkhzRiDJFh8EZEkjHhDAPqsOAWGHpW8yJU4lwV5yZvYCYn2nDKnEjK/FxeG2ybZQ7WYrJB5ppZSKoqDMPdes2bzB/qzJELDYkyvAH76kOkvI7QpMDWvya5sA5XIPPxSEXD4MmcPlcqnATJLsAg1yy3+L2GOykeQTIkZsc84IOJeDYMIpwDECRB6KYqVg1Erix9WRYO8Y23fvOb1949vXee2BPjU4891s7xk5/8+XR67tLuufWxPP35venFeYPFy6/ceFXoG3/w1ddevz1b3n7hm1cunH7i1Okn64lffLzmmrQo4po0Rm3AKEMXpAX3rI3ElfCasYa2yh31jbooTJ1on1TAIkQRFEQEENWo1p+pBkROUnCyihQn9z4PF9gMsMw3CR4cVYQ0OfU7RlCVCO+BE6YeDV6mw021etDAe2ak4QKsaIymaQBpANdKRGYMCw8qFRXEUY12zN2GLHbK41+9/vJu2Vbtfjj0b713vepGesSTQmtxs7vzelKe2tmOoiTxxu/eubN6vd/6dH3h3NnJZKuSWbPy1GtdUAktRApERACsFCPIfCNBIrYGCshJbTCQtC+CDM2lQFQ0gbAEdmypNEZxeds0EatIkEBgLoiS21ZEfrAo9Q0iQZltR2TCIezY2h+McwAom2EW0pDGCqesgRIC28MpIgSnJ1QQsg0OSVcoicClCc6IOUPnijg1y3TClwCI1NhhGZXi3LtoqtXUErvB6k4BiQolZi/qHBcKA6K9OpVCURIq4pp5xDSGjBSVxlJoBJmwVIIR+1JqhAJdJauxl7Bevv3qq3s1KokhhKLe7MqloO+7FepdaW4FfXDuyiUEPr7X7M/u7N8+4JYnfnL+6pnd8VYdqppGk2K6u7EdFzEu+65tF4uDKO39/YMPr8/aZrHql04VsW9Du31+Z9vVu5Od6cUzriw3RqPxqIYDMUmHFYMEbTcXCKsP2nrxwSY+RqAOobTHg0URKYKZI4IQhciefAgA+RWih+/7db05+uG//t5f/OWPDg8fTOutvhfyLq4iCnXM2kaQoCwiASWTKHoREVaWSBBhD5QllSoeUkIih0oWaBZ6LBONpYAVRq4RsHd35vvLcLy1Oe3WidaPAHjlyFDSDhRK7xzIqXQqvSKoFsm+AgCVQAdlsD1dDGdDFLWnThCZAAijUI55CqMEbw6lIIaarjZtNDTUhZQVnYEtRpZm562/S4NePfGEYuZoE2KFSPCOPUDM5rmakFV7ywoQmVTKenFbCaRktkOU00PCro1bm+GcVDkIBOyZnEt+6llnA1W4NLhL4FJOwVGTywclXQ0AZaaQmDJsyBVR3lGffn3+7envDBMuWHVgQ1OjVeHEzsExW2nPac0YQGztxAkOYTtGlTjB8mZ/QWanQ0gvi8y7TjPXh3pXi+qOnELgoGo01ASSJeMeTm6ZJ5QzALldVgFUE2SX9spIGoobXQ0OzNCY5dBJrMaskYmEVYL3FXsluH7d7e+/91c/+um6C1W5/bs7d7/2ze9euvQYkf+f/6f/91e+9vVbt+7d2z/843/94t3F37/9cqgmZ568cOatdz5+ef1S1y2+8dWvTWTy4P2wfnDn7uzOzbdvhSgvfvNrpx+5sFwEEgprlL2XGKVTWhMtIa3QmtAhtAIRQRBtCWsmidJ5UkIPiCfJeVQQBSQ2CwAQY0zD3lRtwVIvsuzKeLiqOe8SwTuRCFHvve3jsymDZkWSAGBEicmaW6AMdkxCBAfiGNNSL4Zn9kIFUUXkhUVJ2QMFoVQpgVJL3xdxVdLR1LdYHBzeem+T5md3x5t7bkO26479spxW1dvvfnTzYI2li9xdunpxUcy2NqtHn/K/25+/+cbPH/vCH1bUei1r1/feoyD1IMB7VSZ1EBKo8WIdJV6YGzpas4YBC2Fo2S2vQa3yH7SCAgKiKDkHKInJHyJAnr2IhD547x0T4G2gkfB/qKqwp6hpgu7Y4pFZc6S4AUEC4DIUraqJhwFVMf2X0ZTF2TojR0ZdT7SstJaYjfltQA7Y2f2i3AA7c00zFj0SzK0WopMbHiiDJFZ6KEBCUCEyoyuD8h0RETlmj+hUWJRB3tcsXrhkFIwihFJQgWvHG05HwChgLFIzj3Sqq1qXzoVSZITFtpeXf/0+FvcujKddO9d61DeHvlnHGEuU041w/eaN77zwpXNPPXvUV9R5zENzPJcj3qByQyfdvJvfna3n0s0XH965udpfhEUnnRB3sW/KEr6icjKu4UPbHs3aa5/57NWrV7wHVLpAUA1dP1+uzQxEkVjCBDiIoiuYlVsGbHclE+rSaWauMYltaATIkxcSoejglHuyJagRErvptP5XP3zxRz/68fHxQV1vhl49vKygLOxZQCiFWKklUUUAhAUET1wXqJlLqGNXw7kg7Dvu5sd3ddTWjlCCEYQpgBHEoVg1zeHiePvMVDQwFxQUAWbVjABbMYC1t9DNzDFIjEtFKLxnOJWeqAYHoFcJqeeFS1ALi2fWKEoapQXI0hxDoVHAEGI2tBiKPiPMGIpfAaVtHwxbI0uZ5zU8mKIq0hkv1LMLGvp18ClFDJ2fkYwp+84aOdLcIQYHduQOMUNcw1zzoeybBjwiwprg16HnU8quEbl8xvCq5iaSnKJsTXpCvqKaOQ2IEmHGUqYVAimR51+eFUwq1qgrFBJFk6mOxaahHUraWyQyDghCiqii5DgPA4mNyiMiAl96sXmijbPIMIZE0D2Jh7mT1kwpITJvLx1alYfmX5T6Y0qzrkQ/QQqixrpSdea5yQSQV5gsweoAlWgBS2NYC1EZnCuJamobXTy4d9y8ulrfaleLTmVn9+q1z1z9zW/fYl4/eHD3zTfe296ePPP0pcW8uXH9w9+98sblxy8+eu7+u++stsen+/t367P61FOf279+0J0K9w+OQqGnH909u3Hmw7dv/sX//Uf//s/+D1vjvXYNr9C1ciRpRTtgBawFPWsPCb3zgbBWCYq1cgSiaAR6xDVM0GtEdLVkrCAiZwPDdJWS/5fdhnzt0sw7VStm6kSekzOOISL0EJkOeTTgPUNUBIJA8DYHMljFrm+eFtiOH28dqBYiDDhBSSjBIx5hNabjGutN7o4+/hCLGxvl7JHpTntfZ3ePmzvS7xcHN+87X2z4Ka3Dnfdmy0OpL29OLmzEunnu6V2educu7fnQ1Vg7WVE91grkSQtlz1LA9jhJQB/Meh9MykxpIR6g6MSIOURGBU80BQiBhiVj9mnsnKhktgbSaQUSrB9FRJJlrCVRK+8ELoFEApEItQVHNvG1QjwZsWd6pA1fyVZ4Gdxt/Sdk+BmrvOwoafrZdGchIcQYmNkV3jsnMbmx6KDEI44q7HLBLvTwGnIhmOWRbS8TsDoCFQQmYccFhEGOqLSWl50X8lDP3sM7rTiWwY8RavIFaMw6VqnXqEgnnsZ+7JcVrTbRlbr02ntdbY/0k5u33v71W9tj79tl5cEB3uvWI5Uj3j6zzaP6+WefL7cuHscjAc3AHbtWmnXo9g+Wq/11XETMKTZwvfrAqAgde/j1ousC5sdHXXvUd4vj+SeM1Te+9Y3Hr5wLXdM2yiBh+IR1DFMy09Alaq5GUQQYEYJtrh9FlJLLouEE4tmJCEMURhw3tY9TDcwO0ncdTp/Z/qPvvfijH/2471fOlyKdWg3YMbM3Fo+kGStQAqXCE2rQCDQCWLlm8SzQlVtK6N10RDKG8wIGmbYIti/zXnfw6Oi8qEIiCkIAR4aotAIGBSIQdYVGaBRRp1QQQcyP5YQRTIBLQ1kSBpktXg4ldsaTBCKBvqb+FLOXTxQjUTMMT5ods00kUF4IQpnxCWYGkUhksimOJrdqCDN8GptqVpEiY9kJ0UngKvK8zOa/Ce3Nxo046VYzDcS0NsMQVhWkiDm9/d6XmgFUmmzntfYJhyZG1n+KZceItCc0Y0knI9V0lu34WfNv8yWDpiQBtZSIN9YWqK2lG9JsXs7INGyEJ5hWFEwO3i4aTjYhpvErJUGm5hKAhtueq5OHY78mUW76CeP0Ui7lzddHVNWYoqn3symvhyUgnJDFVYS9ioo5aQO+LF05Eul5djA/OLjTdjOuZuI+ZHY7W6c+/Znqn/7helUWLzz/hf/4V3996fKjp/cuPXrp4utvvDk+9eHnvuTeee3G1t7eucnZR8912tYvvvCN8WR68OHB1s6uzHC6PtWFVXej29iaXNo8/86d9uW/fvlf/fBPmvu9JydBIIQVOELXrGtrjYLwWkKrCMw9ECltXouwREJRoRpt1bZwIssYqcBSBgEJvgfzwJJNz2CalQiQDktaf6xKljrsUrFmQD//bBayKqBRBEwwerC3IYIqiyop9xDv1aQmKM0PGCihhdboN9CNZb5bN69/8KrO727vXf35//eNjX7XNWW4p1OqLo6e/ODjD8qNVojHk/H8VrvmrpPRwUcz/uDGt/7bP9u6+tSH60XN44oClVBbbmj2Dw5mNcycNpFCO1UVCPFD5H5k/V4iGJ88IPmp5oGelnA20mSFbd9gLJF0agTI+8rTD6bC1zE/zO5XSRa25DjLkpH5XWp8RUmRLF11qxK8AwExmmmltdOJu6AAkhGZLfUiiQIGu1RgUVYdMjOnfd8JH8mkFlYoS8bmgyoicbLYJ2UWG/064gLwDE/wUWuWkgoXS5Wy50q5jFr5YgwZgUbixhQ2CSNx4zD27QSzEcLUNSO0XhYjj/5odv/dl79ymS6cqSbjyWR7WhWoqkkgbVfr2WJ+v2vuLfzNjz9cdDzvaKH1Els9b/ugRVUV2xtVrS21bdscN00369ujdXvYyryXJgpWzF3t0SwOz53f++MffPvs+d3ForFqSQVAgm6Y7CqRqoRgBAAmVk7mrqoaVA1JcoDRh8iluUAgCQo26Cg9DwKyHRPpQMb1url8+dy3vvX1n/zk75yfIOFMzOyi9AQHYcQKhdLYjgyoBEaEMUuhWgoqoHRciJbS+nWxOZHMsycIJMDM7Asc6gw7xCPWAATRXtEDQZVVTS8Q2LFD60nG3jl1TNSJBGhkm8gpoMocB7BUAWN9igopkzlFmzLuZHZp6lARJSBAyfhDcXAzzdeDQI5dcgOxgRpITfJOJooylyRv8xDHzltmHZDO5AqExN7SlOBSrmLyUSU1CPkxz3zmE0DYsmGe3OZMmU88E5llXeq5B9sPBpTNK8cYNJztK+xMsq2ksw3FDE1ddGrfEys2vYvU7WCYPD+UWXN5zemDpYvlTgQRQ/NqA2mLJWZxxQmptiuQMNET4ylONcl/9UXZLCJTNzXlcGR/ysG5K39FyYNy+r3XU4QQnVchIfWUvJwADjEoE4pCnPd9F48XR/du3miWbUHjjemH21vVquuWq42qjL3oE0+P193may+9fbB/78rlJ4+Xh/Py/my2eP7ZZ2fNwfmz56ZVvX+3aY4OH3/iU11bbpfn5ocHcd4vF4s1gqt5d3sqx4vVbO64Po3drz75lcUn6lesMUAdxKyemRDJDGbRC9ZMa1vyRWRLf0Pa+cURpiPlVLGndVZRkpORUJaipchPw3OXmto8RslXyrzEAVvdE9NZ03RdgfyEJ4yImTyYVVgkSfPN1Y0Yqk4BjRoRAIYHlZw4saUW6ArtCm1K6cvQzvdv/Zvvfu/5iy/8X1//v8mhSodu2dUbuxu8I8c3p1vbVHDXtk9fuXi3u1uFUclH25PxZx6/+lG7qHVccajQO+21KOGVSxYWMATCadEfm3WkvVUSu5gARQBEnigDNVCX91IDic88KHwobeMG2KKPIUBIGp90mAlQUWRyZ1pGBIkGDlsxeSLyNYMa1rSrRFXNeZeTUj+N3SlpI1QhJgNiT6DkjM9s635hZC5rJYhiFBHx3lvTYQklvWkFOFO58klJaIrttiKY8zuYoawBa6fMkCjel4QiRsdcqpZMZXReK+KauBAZq9TEJctY/ZiwTbrt3CZxFSfhsAoPNmW2wbHWeY3jMvYcuhGaP/nS5Q3fop3Nl4eHB9eP1qtmvl6s1kDstF66nVBsPzI6pZu7Wp/pi5057bz6zt1bRwftvdAdhG7WYA5aCLXQZaG9+oL9uPBcAWUMi+Xx4Wefu/bii18rSpkvGsCDRaHk0v1IURDWDFkA66FMkgzDTNWnogHRYjKYRKKEwOQL70QIIjEGWztB7IgEysTMXEiw5Q+yaptnrj11NH/wLy+9Mq7HoqIaRJi5ADw5ryW0BCqgFioZNWhMqKElUc1Sgscgz6hBnkSIEOHMeiGJg1SgNc3dUZhG1KZetJ3ZQAcqiTpCK+hEzQJgzdDSagw+aYLZYrR5LikCQEA0dUquU09iMBtTix1EmITIM9nqu8w8ENIE8DNUQrDHmPPOnpiaKJEYJXkAw4RBkWDIZkwyJKLEPEkBLct1LVKJpP6M06LvZEpHSbRnyCullKUPWWLZ58hdb3KyIGsWh9z0EI+DWbOR8YnwVjPMbf8vR82Tf9QUeim345Q7Jvs5OqmKATASQ1hzq5pC0eDH+RDD8gQkT2QWSSqX1HINadjilCTay6AzzmA00dCU549t10uQc3NuyRImT6nMd+RsdUwOXgqwCpg4RnHOW8NXlL4ec+jDwf3DB7P9amO+OO7u3emOj+5u75wpxnsrubls70zGWw6bsRvfvekvXrx0Zu/4Fz/96PbdxcWLT3AhZaGz4wPCk7OD7vS53TPnyrZFDCPy8fBoUVB9evKIr/0iNtF17WwlnSwP2gtPnHv+29fqcjI/DAXAQjGCAQQFByCItEAQ7RSdaFAEhZD9XxUCHGnOlbZDzsBSZcAVHkDMdClDOCgxDImNTSUJUB2Mm+yOpBex2tPICUMpkyFOe2DyqjzJnZ5TwGy1lAtoeoaZXSbV2SCfyBEYTOoRCq814vzB7enYP//553/5F/98cPe+zMI4bJdU3f3wzkfHN0YjJ8cdnIs+hNWqHtXz2QxTufrY0yNXE8gzSILEFhxABQgQW+sEx44cIZKNYgBiZoVLxWE+c8yp4zd6oDlp52X1pJqs+ShxEFI3ykzGNMx1v2U/BInEjLyDwQSGnP2fBylBsmNTIPkBpnPOLiFpqhqDFYtZcDEU3unvVj0xOwMy0tIQGwMbRdK+MQ2QY9QozrNZcsgw9xlObipclQsvZiCkibpFxOzZ5HmkYDEIKdoR7AW+FCpVSsLYuZHzOz5M4TfRjSSWfd88CG0znXA5wZYGH5ezT2669b0qNGiOEGbTcnXY3teudVHJU1HUnv25csyj8WGzWHXtevGxd+srFz8Vy1NH0S8I7Trcfe+Vg1Y8dqnkctMTnCKCERAq8qENyo68b48ble573//e555/vG3n61XH5NkxNABREVWjJd1kEUgwMU1q7DSokjn0JxYBKAYRCcyemWwLRohC7JRUBBQ7JkdQEVIB+SKBnhwVLoaO4L/4pefvHxzcuPFhXY8kgLgSOOYCJVAoFdBSUIJGwJhpBIzAlecxoRbUUE/qwTWL9KmhBKAeqrYTgbxrq3Vfh2pSxFa1E+qhK4LTZAvKSDaUhaooWiK4KETsDBAzAyzVmKaT6pVC/rOAnCk2c6FplTps05cqiGJSJ5KYZCk58ikr2aocESFjWoGYweYaZZh1CPYe2DFLGj2q2kYlMbUP2xICwIT5ompWVgoGSfJ4sB5lSG8GNBO5dCowJMeBEJVcrZAuKpGKWWH+ngoowasmpx2yFmx9iR1zEhG7Oql+59Tv53o9oZB4KPFajWAkkSywUsWwpiulW+8sB2PYy5S+L32axIX+31dJeOh1ACJWztbV9p8NJz5xyDvhKacfZfNKGfSLwz+yeTNKCIGy0smSr91qZiaNEPIF+Rrzo/kH1/fvHxw2TX/p8unF0Wj/TmjX8tjVyx988J7I6Xo8qqpnZofTyRRNdyPGmTSx9Jt/+n98Nq5PFe482IdYgn3XyVY97XoRRV2Fvj+K6xJQosIXZVXXY7dVlVrUJdy4KgpHJCrNTDiycCB4pxJhfIeoCEAAAlEHBEUgCkRCTmByBARAk5NcbnqQkZIokQyVVxGVbBHKfAJBJ9MkszHXGGFRxzh7lJK2II92c0tHGSMhZzNlEgk2slQSz17VKTuyFcICYmVick49lKEu7aqBI0/qWRlUOw7rQLG7d/vgFz/7uQO/8MI3XvvZa91qzWDv/OZkevnKlVt3b33q6jOz1fLimSf25frdcFfWHjE46ZmloOAIBWsngTKDwvAf8iRdTCmTHMCkrMT+oe1D5jsLCCcpkUaJEHDeyZCmI1ncY3WkRHWOlRCDqGRIIHsU2DhABmYUDViUiogNdTUdBBKVQW2Y3qeqKrxLDvNZNjUkVJy426ajyxbgLTwSnORlkYkkqoZfiUgy7jAhHqVhQjqSxgmT1IxHR0XeuAomtjqG2Ad1IM9FxVw5N67GFGuS0jwb2n6+4NVK9peH4XgVDht/LOc3p2c2uvvd4fwDXt08JYuqPZzgkEe8U0lBfbnWcu1H5YVmFVuJTVcsmtXeKdz+8DcHiwM3diuQTi60W9sP+vGRjtvy1L22PDv1xbj6ZIUYlMDSBVQKYQouLKOvPFQeHB6c2dz9/vf++JHLp9rmSMDMXmMnEEgkUnIgZpGgRvwhU6lZBLUbElPxZB61iWpqIXrQGXC6G0TOaRqtIRqnPYY1vAcY4kHRe+7Dynv/3e9+48///GD2oKlHu7FnplKTX42iRtqZPQJtACPGGKgJI+KxC2XgirRk7zqf2e7QADH6PqLzRcetdOtJNxpXoYnoQB2bO42shDpKH8WQSyUIqK+IzC4jqqojUoQYWcxKb+AMmVaGJPVgBCiiaAbblfKoRRJRMfVVySKJIhJbh7L7RmRIbgMszgt7ZkBEYowi6kvv2MUQfRqaGulnEKFm7U7e7Sa5nLQ8nOqTQaXwsEkWIQHogyhiyMikxGwTBOSmmWARD6SCvIbPhD2sJy486ZMkDwrTLqsmiX2GnYemM1V+NFxkG7XaayQWLCVsxrhMdsHsLanKibdWeueZZS25bWbiYdyk2XWTORkHpb5KcyeP9JHTH06cBzSQUd5zpje4XVSh3jlRxMyLEWHnOKoX6Rlcec+1Ozw8XB0uBMt7+/PCV/UG+2LyYH67qrb29nbLavdgtj/dPHv39ofnzpzvmomf3G26gvXiuNqebJeb050YSHzdNI1Q1yxLz+V4KhJ97c6Q5z7oRlnXp8aK2jM7XwUpCN6r91RHlr7vYy9aIAbRDlQF5yDqidaAKnqVSBRBUaUHRccBHAlKJIJeVW2cPbDP1JpgtVslnIH/E65aMmDRdK/xe70PmWjPLibDjAZTLCZkiyxL0eknsz8cMTuyHd8qIYYMTTgRUmFSSV5YeSQJJrAVWAG65tjVdWjbed/OPr758WI2n/KZj9/92AfP4kiVHI4P25fv/gq+2L978Ogz5yeYLo76lhZ722dKD40tOBC0LNJuF2ZlR0KqRoAOtgnTKVy0j2BtYnK6cICHRjM+g/mZsIHVZsAcSch5ziBwEmSJwPq/5PKiPEyekDw3ALV8ljA80cgpQ6ZraBeWszTYxmqDUYa1x0ogHQik6SykMyepU7dMLHnjiKqYfbp3zorQoMpmzMJOCDHtG1YmUhooJlY8p0CZaNcICiYUzJ7Z+bIWePZl0FJ6lpWu53Pq5ySNhia2sxgOe16sfHtUd8eT2G7EtorNXlF+/NHeQXt2wuc3uh3fPL7rr+xevrj3eHN4+zcvvbI5eeKT266ZdfJgDnJRArXH5x7ZuX3zg2Z+f1x7RsG+fexC/fQzpw666pi2H8TxDNMXPv/5V949+E8v/barNzVQOXJh3bEgNFxuFH3brprFtc8+/Z2v/kFdu8Vi7tmLMDu2LWfsECSqMrS30CMcQZzsIxKAxNCBJ2oXWDxRDFEg3pXm9GkTOpHAoGHQqUrOFUTkPEvsiDwYTF40AJBIm1vTH/zgT/7yL/9KApyvxUUtlGqgUtTgmmRMGJGOiDagE6AWGamOSGt4LxuYe2md2B4uhXZWTYkgkO9L7iSgbrG7qaVgLbJSE2+TI7QgIvIkIhCQkjJkDuaKORgkH9KCWi8iZjaYHWw4VR7kbf+cZElqXkBETJJHsoL8PCNbyuctJebVYwknzcIUUI3M5NmpROcShBu6zspYn/jGIPu9nFxCxIZwRKLWCapmnkr+TrLT6hNVSDJQm7OgfSsNJjZysvsWBHLO+g/NWjylNGdN504Ty9XOa5pppPSXgeyHMi+xzZ0kWd0xkKB/mCjQkGYxwCFluhS5rE8dsjy7h3YfmnaMkTksGaYmiEgU9dmeKYoZLPPDM+CMtuVKik4ysU2CIcgGtellNWfoEINNvErvQ5Cu66AMx/XII+q9g/22WXx8a78oR6OJrluljcW5R+PBnXo8unD9/Xu7O+7g3mJ/f/+5L1yrxzvvvPfa1mb3xOlrjPF0e8P7uA7tqlk2jSJ2dVX6cjLdnIxq71zt3YQY5EZMDiDpnRAgvo/iKPQxqusELfUEV8RCnDChc6UyuSCtJw/tiKAaxEjaZHMbI2xHQUBaFqvWmikwuBwmrNmIOExMpJJLweHhEsm1UyribCCipgwWHQJ3Iruz0Tk1P0upF7Y7IgqyDAWAA3LTZs0hYBo6Mk2OcgJnEnBBypACkNAXTo6aWXM8v/HeHUaNgE/273BHXuqw7ufzvgA+/eyj5y+dv3Dhal9JdcX729X+Ox/snT7ftjou+YH0Ia7VB5FO4KGR4JVU2To545DZpRHn0igv42aJ/5zESiJpiR8xO7JVCwBCMLOCE+KkZdlUVCgjzbokdceWfU2pZdiBxakYjTWVoH62klj05GSqIg+bzBEjc99Sl6sp0UMN8LCcoDCv51Spsxm6aRYZs+GDanvBUqsuUUzV+zALgBPCRL708F6ZRZwKhU5CJzg41kWHwwXNW1mEdtGumqZH17jQVKEZodlw7djz3thvj8uzVX1hvHtx85ldvjQOlyY4u4mzG+u9+njazbZ5huP5j3/y8kev3yyubPNql2fBLyT2UUTO7Yzkk3vzu/slE7yThdKYFx8vDq7/bi6TOW93frfHmeNi/mB/MXbHVBYrHYUuYswhaDmR1aohh+/+4YsvPP35Zh6btiMuBB05l4ytRYIEV7qogTVt8tFhzAKwLTyhlFSsyUpXStTEaKpB1Nk5EjEjC2NsOSISlShRoUwuI97GDGCgBPFqFc6fv/DiH3z/r//TP5a1wCvXLIVQIVQzRkQ1dES0oZgAE8Q60gS+wBRhhNW0WIy073tijUpRKToS53yIshZeCYfop2F/inJVoR+P+xpYklakCzHzXcdOg1rfDAWPCKsiCJjhnRdpYzQ+dI68yWlHyHidlgXIEWkyi8hRfCAbMbmEqGW0Bkgep2JCcpB1jzmhiLFHQ+ydpbsUhZIu0rM3rjkZa9wOQPKUSXNeSvFOLTtBxGBkHSQKaYdAsmlOCIb5e9hpsWOS9hMQRMSlEiMRtILJOohNHU3sbCQtolFELc+lGIrB1iMpTIwhBgUDSQOZT3ICM20hgSrEMQ/UPiN/ugS1mU2lgeFpR5vEmIJ4HnI7UFbLCRF7ytZ6YqIxjThZ4UQMsv3nmsQfRJlyMoShXCgN0dApg1iJRdaTjXq9Wtx459aFS2fH4602AFi8+/Yth/kS3f4t99mrj77/0c353b6ux93azw+qu7c+GdeQ/niy/cjexdNbB/V7N6+HdX/1sWfOnz/nCxbplNsYylExquvxub0N9jXBKwHwIRolkqMQYi/S2dTPOLhKpEKk4sh7VApFWCkgjtmcWiCsAupEA6COiTlCeyJ4F0VEk1mrqAROSc9W2OaqLT0Uakk1GoAzcJbTHBhKRLbr1pR/QPbo1xAjiNhxiMIAKbzjYE+OY6gOoKchGzQ010xgFgEldXgmwKlGFSBCg0RXcDGw0NkxnHhWB4x9TzHcP9ifH86O919z6qXrPDw6tItGFvTktXNf/FK94XebxXR+vAxLvbO63/mj82cubY0n7bzViZi6mCQA4stCWZ1nIgkQ44mBEIKYAFfimokB57wdls4I4soBAvKIImpWaJrgYAJF41T1af9t+s8+4Qaas2MivmquUWDGsKwitmOGPPchELF37Ngb9VPJriDspKuqRGUCwyGN5iURKWBeoJLMR83owI4AsYgICymcY7PjMVRaoSwqnjQogqQsHoTIPJDgPcFBvVMiCGKAdk03X8qiDbOFzhsczXV2LMu2XcdFFxchNuJXKFtUa79B03F1aq/aGW8+Mj1zfnv6yNb0wtbGpWlxtubznjd418+2MJusD6puLu1x18z8+Jjb1Y//17+/9d7tqqjb/cPJZGe+pHKl3azf2R759frmR9e9Y/EsEtgXruTDj+/em91alWVbn34QNx+E6aLYOnanx8Vup9bbEaCld8tmtjWZfu+PfnB590Jz0IkoAwQH9oQO6hUdzNUkqgcJSJJdSYYlTlABsZKHiVMiMEYOpaJG04yfmVlOqqRoha25BCHAOLqivYDtgVUwkV824eqTT32jwT/+8z+MxmMpFBVpoVQCI9ExUIublGHcu0lBtdQ+jnU1ofUI64m2I1kKme4mEEVAC3WK2KCoyPVOpnK0rVMKYR07Kccdld5x0n8qC6L2aeSlQckzPHMoobbMgwEPBGVHDCPtK7Ejr9qriq26IRVHFNNCPjXUHbkOtX0MFtFtN0gyj4EaoxFK6ljEnD3TxFcgpuaJIoAws/cFFKLqJcSEB5tSkElVYojOu8x/Mk8Mr1Ad/NNzk6ER5Nh5a63JxstJ50904phof9WUcRxlJpT9tygxigDeOxG1RaIZIbHWOQao957M9mhoMZP60JKnfffg0Ji1E9mC0Q62sS6JnUaxXBijsLUXKiB2jhUUY/ZREtAJipzSApECSbee3OSzsa01CZIVYGRLw5AcuTRXUnmhOlmRoTk4KTTbhrRl4W999MEvf/4vjuiVV3/1re98oZNDkeMujI4Xo6c/NV0eLG7NDk+f3a3K+uO7x7t7a3Tl9vaFnfOjPdq+c/OD/V/fHlejRy9fOPfIbln50An7uiqnjjfgbWeWC0rSqiBaGV2WLNEkC0Xe5c5JIkLexMjesyKIivdOYrQRu9qoNg2cRDWoBHt0o/TQyAzvWNJqqTyVSLN5SZ7bw2XmRDwb2GsGpaaCFMIJBk6yUdG0Sj6KWs0T1eK+sXZ10P9K7n0zDY6JIVFsn7TzzOSgyHumo6pXjarObhVrEfvIgSAkXdSOuceq9GvyDU2aGG7eXUg99ZNjmgXRmiWuFmHnwvi7f3aqkPjqbw4//OhAfbl9ZuPilQt+pzhY3Tv7+OnNM9O7aBqpGimi31j0vncFelBE6ITVsU17BMyAJwKrigqEI3O+7BLYqarEKMQGqPBJaZPdYbx35joiUULonWNXeMlyAwLSHCyPlszmwk5XlsjB6J82Awqqlv4c04m6wRAmWMGjQDB6HTS3ERDRCFVVBptNQXTOQaWTwMyRPalGiQwBUyCoBCccydg/hNIVIAYCc+fBQaSTdtZ2y7kuF7xY0OGhzB+EZd+v2r5dtetVq5iD51yuyAe34evNanK2nuztTM9Mt89t75wbn90sT42KrbLYq2gDMkIca9iWdqxOuWqPRedRFgUvN7mZcLc37jbQ/uTHf3fjtzc2eKSNHDUPqt3FZF3M51xqtenLO9ff5za4wsOR92xSXSmIR74YcVm7ELjvII56B79uHI0dVYFQ1W52/8GVxy//8Gs/HIfRYn/FYHhGQBTJ0njJF5oBjScmQmLsXAtKBMnkHYvikidhJkhhTptVdeCRZ46tAUygBEIx0lzD/lKYX7mqFyGG7zr53Bc+2/rmn3/zUlXUXJF6aAUthMeMMcko8tS7cTdGO6Fuk5oNtBVWm7KsdUVQZS4keI1RI6ABNEYZ4FtH5/yslrpgWlNYaN+UGwtUISqJOarYqBoalJUlKHpAQMEDYgp2VVH0zAxRib1oSCu/E7gZRETT3kCDkNTQHcsCdugI7IgVsDSR8GqDYxhsjQoAGO/QYg1D1RFnRn7KYd4YhjZ2VRLOcS6GYPkjpbWTFo5OOsskFUlTtMRq1wRnWTZMrrxiOPCA+ik7NxAd2Xl2yYcuz4VT9rUEAFECW7dkwLSRkpI/RqqAxAZ+ROAh7VnUIHLkbTodQgQACeydd2xEFFUD0JIxXoYlJaEZSAE7vbOTSX86ATayyIh2ZmaniXZilg7A8lCPUsq6mk0mKNUorFDEyFWNw8PVeLL9w3/zub/561+8+ruXXnjhyuK4OnM6PpjFxezS2b39o7Xsf7yA3vVFuTF5/KhtmubuR795oxB/9vTe17/2pe3NKcGL+qqaoGawjyIClq6nNJkzy7fkGr5eyXhj2vfB5twhRu8dFCrBOUlgLvsokdmJ9oAqIYokhgyZmWq07dP2nAO9RWFRsbVPMIPWdEmTlHOYu6ggmmSPTAg61IHpQtFwJ2x2z5QZP/BkGLc9PTw8/ym7K7I4x9vLZPdjEJkjeyRDg0ihbGWjc46I8kpZNqEZC0GBHtrqeuxbVzT9uHXtJ4vGrcefvlw20oYDXS7Xj35u8gdf33vnnw5e/vsVirHfBtUtJiVve94pLjz6zEfzD68fvD15/IWmHTW0MeuLwBsrrWglLjiOHLqoHSiChZlEEYlEoexBQiJij5ARNAElVscuarBiMB1bVhqKHxvS5BbXDCaNyUUJbIdgqGWHeiXPZVSDuV3YWE0kiii7ovAW2jTD3XaiTYggyogB5pjhXMob7MzxxoGdQVxEzF5EKHapfCDbSURUOHiCQwwUe5G265pVt5jr0ayYLXC86Jp5WBzHdr1et01oFzG04JalhZdq7E6fHU13xpPdi9s7m9u7p6anq8lONd7x5YRQq5ZRvWyIVKylNutenPiCGaab1cih5q7WMHF9Hde1LHZHzSg2//lHf/vWy29OZbNbEDfUL1dHxx/tTh5vG39qo7z/3vVucVSPaulEnESnkJ49oSZeIfgeesgQhuO+JFmUrqxi26HWqpzfP3rms0/94Gt/zHNqPmld4cFmYcCOvWQtGeCQ5gKmEODM08lqFkBhg8V0K9UCWIpESRNGaXAYQQD5PIXTHKocACU1Q0+FQHzSEJOHcpJIRadOv/YHX12X3a/ffnVcjWMRUSlvsowVI/AEvu620Yy52aLVGO0EixGaDaw2aMVkpPTITlTEGA1rVCsugrpt3J9SsSDXSOep9Yie69nmWLxTQnSCCOqIIgHgQOgB81UIXjUyIpQT0RienVnDBVJh9qqByasTpCc41RnMLBKMi5PNeWyGbgpeW3iTZqAmDaAhrtnEFGTyXc5ejCmXgLyNXcFpF70k/+jEYXBpdUMKW8P6IQyjHQxch4GjlPwATLRX+rRQOlNYCdDhtwBGXUyCfQU8ucTCTE0hHMh5Lyk0WOBIFZiKptJt+H7lh7ulvGEuAd32/QoJXXAqXJQAMVOQwHoCc5JddAy0zP/qS5OzykMjLQvzxqMzO8jEg0hDAitHNI2eNX36rIFOODrScg2ICmtoF8W1a1fef/fNmzfuMOJ4vA3lTrqSdi+dk/l8JkyItHW6mp45d+/g1j/98ufarU9vn3n+6c+dOrPpy00ADp7YMRddNGm4iAqReK6GOygSc7UhqhRDJzG262Y8mhSe+75l9iLaxb4oyigSxTG5GHvOrDOG8dBsGCggMCUjQomdSEj6URUmRyxZmWaPjE0ohtLOfFg4918xj2w1Dwg5KwCS7IhtHzNURYNtYrCpv1k3WMYhRJP3CiiR+NLZNBEPs8t2cYEsdOXHnWCuNxjGS9pJ7JQjaxDtIKhXsrlTLGetf/Zrf/Lyj/7HWMv04vjD/Qff/dMvPXpp/6f/6/4Hr3WnHpv2unZVGQgfz2/Lg/i173x9Uc6e2T79Dy/96vndz64nu4uw2WjVaK0tuIWsRBrx0RuII0GgnSIt2JAoKsHKdgDGMGfHBIrSD4CCmg1YftRCjI7IRrXe+yj5PKbmNe3aSrcz/Xgey9uUi5STY0OqhWwOHaMJlYmUKI27LBok5ZjzHkCIokGcOVeJgaUkIlGEvSeTNxBKL52qkkNEt+5Cs9JlE4+XYXaP5w3ms7CYx9Wib9u279cSV0wr6IpZqppGZTU+VW9vTSaT87u748nu1vbOxsZWXY+5KAUE9l2oRHzbsSw6FSgClzXqxKvjkrhi8SJOuSJMaOoXvm+8a5wcV1hu+K7qm//y1z9+/5X3tnkq84jWaQPueHF0WG9Or2w/ce/m9eb+QT2qQ4B4KCs5CCmtiApFBfao6rbkdYmu4s7F1tPKY+ype7BeP//F51788rfbe7HrurIqZa1mFE8OJkQ0R+vEo7UNuOlxz+4ClpshhqYmbioy+gykSWc6WEQpDwAarFtIVBn7XitJ2ZlJRpqhCikY7ElLVe9GPjoRlW//0bdWVfvGB29Mp5OuClQqRuBN8qN+G82WzjbRTdBsotnAYoxmimaMlgFmc92KQGCQMHWo1iiOGuDg7TPn61rcHNEjeBGPCTmZT6adQFk5sAbVoAjgklHnkB4Z4kCeKBgtPxrjiZ2IqJKIQIU9k03LSRLok3CjzDdRNd8oZlsarZScTKw8JUuXw1lDlsVIIiravDlP2gheJKZO13j91jMSPDvTGDgQgbLYDjpIQTA0JIYEI1uvIxFfTG4mwo45u/VY8WX43kPcaSA3u+aTbJMpo3SYVS+JOYG4RGI2dX6GMZPtOlkITfUJ6EQCgYRci1045zjGSNR7X4QYQoyFg4gyO4PBJbPdOPFwTkDobDlpawBw0iFAjZttowCQaaYNVM5Ef9XBhcuC3OC3ggzwpB6Za0BEyu9//3v/8st/WRyOLl+51IXNypeH9xdlgXJPZwdL6dz+fP7qa796ZPuRZz/1zKXLj5Z1HWIIkUUcs/YhCLTwAhXv8pJd1UgBGQ+HOfU59s71XQihY+bSuxDaEAMz+7RtKcZkDyIx9sNoiNiUmqIxGCzpnFHug+EwzJT59GpAKqUCMRcx+R6dPAs56aYS3W4DnXxPqi1hdIRoBE8HTsWnWrGVcH7VPMcAOe9SM6fJr99UeQxiZlKOJuZhDw2qhb0aUQQKsEgQCtAISJo2UQD33BYIZX377nx1qBifavCgHM+vPl89+ZXwl//j7fm8Pvv0ZHHQ+qpaNHGyE//4T69Ozzm3F2bdvNyVbZz+5W/efOa7X1jMfOc2Wx058dJHrIEV9cvOxcKsso2DLBJEelEhFWKOMVgC9h6AxhhFI5tuanAvQboynNEp5NnK4I1j3FgbuQ7erqllSBOoLEww5iTI3E7Ye/u3NIxH6rlzqE/QidVDDhRFYwxwXJZFIBCRFeoEhLbvmmbdtrpY6HxBs3mcz7v5LDTLdbNcti2AY4RjDT1IfIGyrrem1WS6MZ2enW5v7m5vTKfluB5PJmVVs0MQAD4GXnfargP3BPM1cYWqahA29TEKhgOTlEbGUXj4guGSLp2jjn2sdT1Ct1EKt+3f/PjH995+b7ecrO8HXbB2kRpFX/q1zI/vyv0w2993TCGqlMSelexJg45Ea3DvOLJGHblurF0odBSxIiolNvMHL3z5D178wjeb/T5Cy7rUFnDEfqiKTgyMbDCv0MQwN2FFAtgkA5ZJBTxwSK0ZSPTok7Nn8ShH9pynH+5ENNEkHREDSRiQlPZwNmlVh8DynT/89oO/Obw3PxhN6r7sUBHGmHAzltmmX4xjO9FmkxYTaiau3cRiQ1oRgISjspIYW4F0POqEyoL13vu/+cynLm2jYtvSYDgw+V6LfrJBII6M3mpUYA1UQAQLIyi6EgDQE3sAkIjEj2BRhvYOLMmNPgw05oe/jP2c9DoihtaYNQlwouths99Qa6gSzQKicGnCmNidpKpkGILJtk2ag9w+A9AoEgWOnXNsg3slm/4qKVNid6QhQbpPuTsc/HeQYiVOtEpWT6sOit7sopHeXCrZk29kHkqkqGlQSapKLE9mIXB6KA1Xzb/MXs+o45mYY1gAUZCY57jJWEDTIlo8NDUchs46fJbBhcAQMitrBOrTU46Ex8JWEKYX06EMyZcrbWJD1qRplriABaET1OONZ5+/9uqrv713sJyv4uLouNMVa3l4eK8sq73pqeevfnr6/Benu9MQZL3i1SqQ46hKrhdbLwDtYzC8HaIE55ijvd/0W611VWIUhQfEKEghxK5t61HddYu2XZdFVVYVswudiIhzHEIQYiKw8wpl4yxIANjGe0TEPjPbJRBYJQKabyuQTLx/7ylPQ36Jkqn3ZoxmOcFYcqmzgiI3wxJErMBSMU8Jc/4WBYhYyXlvJRWG+QGd/FIbQ9rBcezAJsAQ88oNMQK97fvSICyMCO2SAZ6spas2j0M3Gp++M1t80kip+MxX9s7vHv35n790v+VLV8+O4qiT+0eL42vfnHz+G6euXz9667Vq5n8hW/Hg1SLuXj79zM5srS1vdG7Sh1ob0Jqp19hFaVVboo4IEWRet71Ib45YBvgD8M4TQyUA6p0PsRNEZvbewYYymp1YIeQS/5/A7FhEyNm0KCny85FNRxWpXEq0LAKp6uBNxuyQjXzSk251qCZsy8od7wv2BFJHCnDsw6pd9926WyylWYajeX8064+PpWmaxbJdN53EReiaEDqgKxyqqtydTop6tDU9vbWzvbU12d0eT8aT8bjcqJlKWzUQOg0hhiAhdCGCnWhggJyr2HkDasHOyGLEhmtEIkUB9mDvtVDxqoVKCakVNaEmL51Hx/1yOm5pce8nP/rz7t5b02rS3OmwYr920kRdEnfAgtDKrNvvpC/YOSh1EK+A+qIU7mhENGJ0rK1irWW1Ln2sZFXSRs1o5w++8sXvPP/iN5s7PTyTl2gevQywsidldVqI9gmHzLRFInONGmZaJxC01ZxJAI4hq2qySbDvlnyXbT2lIonnf28OxzD9AZGhqgSnxFAGsTApInmCR+CwsVP/wQ++85d/9aOOOzdxmGjNzTgcbfvjTTnapNVUF1NaTfV4Im3ZBjTEEQnHBUHZRwJExuCy2xtx+8k7/SfvnL14mfs1k3rA+0jCNshcTMcQoAf1xMroQD0hQKGIBCEEBxS2FI4ZmbQcmbKSJbaqAlYbpKSeUIWzOk4TOQvMFCSag0QmNquV86DUf6rCVJAKBYTZSWLAGdIAqPqhtwaljGEiVBVhSnxjVYkR5NiWTiSWr6ZMQ5RkAyrWkGYpnr1TZ3S7iKwNethmgV2ygNbMyTMxQxYNJqT7YYvk1JZmypXRrOwz2rYie944mWqlKVZGrMU+rIg67yCIEr13DBdiNCA1xoefWsoOTEOCUNNfc648HkrNIEoWD5SNMa1BJ2IdJgIP1ZqJG/PQVwLzCawBWnoOXSdb26e/8Y2vvvQvr8iqCaHfqTdP7Z3+9KNPXDh/wVc+CDqR2TyAAQ22i6bgIkQIdybOsksOskUDKgjZXCt/ttSzEHsbk0vXRVUtSx/6jhlAWDVr6DgEEYmbm1uqQiRAjAqNweAGZgcWtVW+RmIIRmGwB2NQmENEUxkvSH7XoJNiTg0SQ8LIRAnJ49+QhFyak2ODbtCHECXU47EIyLPLsUlVKOMhhHS8bYiRlHEpMFHCVcBZDenSWDtZVgToml0NIQRoD+2UWpJG0CDWZVOPxxunX/vg9rTaaXirH8nvrh/ea6qdK5P54v7R3Dfovv3fTc4/Xv/oL28dHPbllLEr5ejco5994ZfvHL7/xkIur/z5U4um0KWgZW01NuKiI7B0omsV7eA6mHeLqkhQRIU6xwCcp+SGz8zMHi5GgRpxMuVGyuVNolhJOg4S7TXz85AsOhSJXAJORgPelH5EHAe2MyhKVDUTWQP3nWewK7wzVAoQ7TS27XJ5f9EcL9aLRZgvuqN5v2xiu1DV1bpdNKsAaVQ6gnrv6noy2pqMNs5Mp1u7O9tbW1ubm5PJpBzX3hUgimLeCAhBl6167XtRAN6nmbgSXKkURJk17UASZ2CZSLAoZi7QXDFKNStxEmGIV5CyA3vAc43OU1tK2Jnw4mD/P//Hv9AHH5wtRut7LS1LNKFvxC1LWYrRArq160hQjlZd5yBVdOgEnoJEFMBauRVulCpCYEZgCk762sV2cfCF57793Le+9slqLcwC9d6JeY+qsGNlNWINyAyCeLibORDFbD+Wu5AMPjKxDpMFSUcjPfycD57Y6E4GqHIgP+a45c3zQomNdUTqQWUUciWo8pEje6KCV9pfeOKRr3zrK3//yt9NJptcrjbQTGgxkeNNzKa+28JiC4uJLHjBuoBbmHOyQ69pjRBBiXkNLUAbLCqH7733zKVpQOnZmy1sgAvwAV6Yl3WFElSTBoUHCqAABCiBAEgB9IwSujaZIaDmoc2MEDrAMSNRD+0SqyTphJphh2HBEBHnCzOCTCtPokoUW/RpySARoQRqRNs8bSGijPipz1SgFPsNs4NtAcoXPROelZnJ5RWKVk2pZgdHAsg5NjKe5Im/iDjnSJ1lJvtFUVWCMDs2T4ABA7GoZ3Qp63xyftLcyhqL2fJJfr7ULDecLW40TnRGUYgSkweaAGuFem9eg2oMI1OwuGxEoooo0U7yYEs5pHm7KCdeYJKnXQlzTl5Nw6OvCjCQDODT58y8LVU56b+t9rS7G3RNqEUIHFdBCj/+4pe/0Lda+drVEVwQ5HjZS4B3gMKDRSJYRES5BAshFFIGEUh0rBIELMxOCUQuSszNEFJ5DDF1lbU1BmYyu7Zt+17KouCyEjFGCsfQsfMWrzhx+iUKokZm05bQQ3UVrOSjh2uO39vLYWP/7OCZLx0RZXuY1IOp2TmL5rrM5gFgZuedI4+YzNokxiDivQchQh1IgiQtSxoXpOczubuoIm2NVNvZ6cjuS1Qh4sLyUtc1pR8jgDp2gaRTWQZUihLLenK4/w5tP1nouOuat/YPJ/rIUSlnz1ftga6x+Hf/4Xkplv/T/+dNlPXm0yNXYL/XydalM0+/+MIV/Md/eutv/vnGl3/4NeVNLAO10DVzZHSsHaQXzyoaRQLQM0dVUY1KVFelaAcgSiRV70gyl9B5VoFpDCwrG8pEBI2RmMj4/yIgSiwNmwzZmY8n5aWwZkmeBWcBEBUgKpyzs1gUzppsk623q0WzXM7n88X8uGkW7XwRmrZrFl3bhtAFCZ3GoCplNd4Yl5OR351OJ5vnp9Pd7d3t6dZ0e7se1XVdGjwWQX2ARGmCoF0LKbk8TwMXCmF4EiIWQWAGixmlqPoIBryDZ7CHswkmkZDCCLSkBMdKigLm6UHJfcTqbImIpHE85nu3Zz/70V9Pu+Nzk3FczAuuwVDmwEIsXDD6qI7AWjofAWbnRTkqPIsLWkTyLAVQEmpYz63CEC9uY7ZYX7n62S9/97v3eg5p2w+kUxKSaG5XAkGM0VtSsApTkSVcQ9xLwFryksmZWdKSC+UEJzGb85EmAI+S9j0hGRjW3+XYbXWfalY9IE0yyLAEEVa2Zp09BQrrGJ974dkbqxvXZ9fPTVBJW6Ir0YyoLdGOsByhQUNomRaqCyuKjdLsjPavUOqISijgXX3w4aw9CJtb3TqMNlD2Qh18h3JN4x6hqWvUQAt1ihJwpqkEUXJkwskGaI3RCg/HMKs40/NqjIEQhh0nIQTvkuMpJYQSAMMcUlWDRCM3xSgq4gpvExqk6tXq11S9cOqk7S6QZxqyylDgKLEZe1tMtEVILkoMEm3jYqKnGJVD8+3NxTLIGNKqQGF9joLJXDXBSYbgJEGdJ2wpUKLqWT+U/8w4SWi59TF2uC0vs50ngA05csepDxUW+WdTcqGcRG3DBTnn+hgMr88oAYPIzIOgyo4H46AokVOpDyIjqZ2Yb0gKfdnTJ+qJMdBg9qEabed8pvYP1G5NJbyTUCVA1Va3rjt2pa+pF+1aAoKqsHW7QgCERKJKQFkWIEgUUrK4bKs9zfAoxN54FNFMdJwjgmi0D0sMpO1B9jZ1vW5FpKpKVQkhiERVp6rNqvHeO2ekY2KGSBTAM2W3NIBYtCMCyEtqvk7QhVQmpqqPh2cPeaxotz5dRsPPaWjRCIwYoQoyyDyCQI4kIHsQgByRSLp3ZjuoUdlbYGIiDJ6qg0MMW8+b5phBwUTOUHaAQuggrB20VXURLfGasQKWJJVg4t+5w/tdQdVZLY/evBO+/Hms3ohvH67Gsvp3/+dHWuz/1V/dw5nTKOMyStByWZ7p6PTfvvzGuae//pmv/uuX3pv93SvvP/fYDrcc5uJar6tO1owOEC/asC3iRBejKIIjhldNZmqw0U3UkF3kbEYVVJMHe5QoqmmzYgYImNN2L2SsxDAtU11kSI2gziijhXPsnWMPl1TSfR/Wq3WzbBaLxfx4vjg+Xi6Wy8Vi3Tbrru3a1nK8nVp2XJZ1PZ1ujTe2trem0+l0c3u6tT2ZTEb1yHlXeAYhRmmDBtXjVS8W9TKTjInUu5RFc3wQwjAUBACJ5sGlBHh4OBEFArjsEYgKKNsiOqMUi0TYMpPorOvnHCXY3HuEtjZHtz746Bc/+otpaOpRIWZ6mnIesXoFwAJPvAaTBhERdaIOIAdlIcdErJ7ZCxcQEngOLAGFcrlomtPnnv7qd78/j+iCeLBEYYGIciCSzAEUMFwIa2YxOoXzdga6IfGKRM52eypihEOXDU5yFhUgufJS1rvDQHkrxswwWIbnpCKYTwOZCZyBfKbfVuuZRYSBoatyLFCUePH737n713elu1+WoQx9SatKVxuyrt2KQ8AKWIgsCQ0j2OzHAWbhpcmotwZ7zz7M9hfN4Woy2hy7446rkfCKVp7GhbYFypJHsa5QaHQRMIoUwGkaSAQVkrwPjX0h0rPjJMSwmiL/jWBeubY3m8UZacVE2MMg0Uy3EqDPtnHQhsGUEo4Ve6YGo/ygUtJtkLcNDfYYnWAYiUxhzTA48Xo52kyYkr8dDYbTSTGWtwVkeecA3OZiDarqcjo0LFcUSXOkdrk5daQD33pgw+f+0e6ISnYnACHvGs+EHs2Dbgxf+TkbPmWy8heREEOyZUNMYCWnbjS/EKIqVBx7HggM5lACNeWX6uCdQnlOSUwshhSSDosW7SKJKiC+KHOJoGKmemRcFSKQGfMSwTkXY4whe3YT7BmNIRIpe280HCQFjlpatkKJwVECwysrQna+VhEJfb92zrN3THCeRYTJJg6BOe13897+yFFDDEos3htKgRA7qJBz3jvvOInZVJxzohFQ53jYfOWYVWhY3JsqNB2wYWguxTkBppZX3cBlGKgAVrI45713IfSGhRI5hTN7JmaOUR4aEqhGo7kn8AYQYpYYmRwTBYnGJxCJzB5qdGkFApMHRWgPKHPpKweKuiZllcIFH9jBj5wcBzele7cO/KknF6s7KtQcd7cb3vwUbr5697vfOdvt6d/86MO7sjOeTlYr6rniUX3YjZ++8pWuPv/GnfVM52cff66bhVtvfHC62CtXo3DcccvaQTrhrgFINaoEYke2FJ2UxayhB2FD9tIjBTkoHJfs82IGa4sZ2dnNZj1RoCJqcr4YwETee6tBfeG80faYwQjBd11o2/Xx/GC9PDo6mh8v5ovFfLlYrtv1umv70EkSP8IxV2U5ntiUdjzd3Z5Ot7a3tzc2NkbjcV2PnCND5kTR9d1aorZ9ooWy8yk0wg3FNzliVjELWYsNMgx1kuu8ESjgUjhSBYTJJJjIvAdhEtEaqYkWYqfw5CPQszgJkDW0ZmhkknpUuBG/+us3fv3T/3gqxo1xLWGmAsfO4B0w4EmNIZpgHIWIA7xlXw84SvsrfeCClQEPOHElt1wfN4HrzRe//4O+Gq3W6OGlU+mUAzs4VUVQCYIAc1JnBmxhF0TEdtNrmtCbJzYk+RgOIcmCEqVQyMmKz94tJ7l4AiNtZZYx8RKhXWMAewvbOjB0yGVtC4NZMmJNzj4p4Gkt3e6ZrW+9+I1f/af/R1mg5rZGP6bViLqaWmoYDfRY9RiYC0Q8HIFZWWPCV1ErdQwRN4Jw395vN6Yb1bQdcRPIN1qN0K7Qluo9Rr33VCi8wBMKQo/8ToAAUkdwBgMJBDDEmDNFzWZQSJyixPVBjMHEwETJz5yTx7iQOcDamEpM2Jn3UefdwKkJNg32SUaCiHh56A7lAZyZIQsRE7HE3lxlvWPvvcRgQpokLDpJb9YKnfSplnFTq5wmFJo7WVJIplDZzreET56glMpGbU2vF7M71e//yoybZ8EP8l5hZAjpIfTETikzG58suT8yICkrkYHC6WtIxSl/i6iyKbPphDoOQy+T9XQXU6GuosyG6SAnFVKjzyW0n8w0AYkQbnZdxMxRAiXWERGlKW5MyL+1JJLOBiFGAUW27TcQEslLglJoUighbTVgq0lFybGKFL4Uka5dGUCpqkVRcsp2hr2CiEU0aATIFQyFaLTLaneNjZerqcsh21MdYtqDZ4MTstowcX8ol2yS4mMa65vfYS6bGEjUZc5yFyvTiG0PQnd4MNucbm5vb7ZtF4KIwBGHEG3TOsFJjKELReE1ioAK76JEI253XVd4D6hEldArM3unxEGC4Ql2z533phE0WWDUjpQhLCS+JCo0egkcOLJOe8yLD/avX3tuu593q0V45/b2569dfPdmVz968aX3rp/5ytUPfto8WEz95jj0/njePv/iD8488aXDOC5vN76vax6Py1rL6JqCG47HXVxR2Y1IwjpAsSZ0lMYaSRDUr9fewxUm4bXzTYpelVK8UAkhwmBV5wBoNIacpAYPlHYBlsFxYRMU5xiKruvabrU8Xi0Wi6Oj2Xw+Xy6b4+Ojrls17ZyCV5Wu7+zceubReLSzPZ1MJtPN6XRre2t7e2trqx6NyrIqy8IU1BI1RBHRpu3trGm2O/+9/YYqLIOk0OK9FRcxu/xxrqcVYKg+HDaHCKEwACZmoYQAzsR44BAFBVUgH6NAe4aHetEeJSvH0vlyVK4pvvXm+2+881J3+7ULGzSqqxiOGGBfxLBiz0rRcHsbyJIDSmhPLiqI2AO2usPygQeX4ILEKxxToeKli9QKv/i9H4629h50EsmrG2mvEIKwdoIIEhrsDa2jcVDAGi4LfpLeiTASIQdIi9lFRZO5lSEjRnEni35D3UDDHNJuB+zKpqrG/s2gSh2AK+O6mCei7dtTVWVlz1RAvHBFTR+fu/Z4896Vw/d+NZqsa+kqbSptsQiy8HzMugCOFQtCb0ZJSI7UgcgDAhKwIHbiyZdtgQXI82hzGcmPtWp0XGFVw5dYN8WGeqCEeGVP8Ektle5OCs/GaHBAqdraVCvCAjt5LpG8sUCSVSsiSsnAleCIk08iAIl5O5JtJRGYNaZmcF6GfJuuYfaNtOXtqTfXdBwBEmhUtZUOueGjhOEYUJl5vilf284yQnZutg0c9lCoEAY8BCAT63DadX8CLlsOI9vqOfSvJ0RKDCazpkBLMMigKLWnJhFsk028jbuSHiKTXNl8GzTnWoLj5FWZX2dI8bmUAJxnEuLEO0/UNTpBL9OH8Ozsc5GzNj3XPsPBAQZPyqTXyPk4tbYMEo4x5N9ijn3wDHiKIVqREGAe9ATrg5358HlmR5wisf1zDGnVgSGPIgJPSIxlA/md8y6GGENvb8M7J+BE+hAr8Snt+zbD5ROjfdgkAqrZ2EskUUbSDhtkcZgJtxLlPRMCLYdnJqHZspLECCJiRlCRZEhuJagoFHGyUb/8Ly/99rXfnNo7fXrv9FPPfHo0mjB720YOaCJoWzAR4zGiD8G63yhBFSEEx9441X3oKJLzXkSdmSwDCqPyWYIIiqDKzlcSOh8qXWkxLrq2o4IUwseFW7hwP545da3dufve/i/lYOJvy+e+98wH8+7nv5h87rnxI1+Uf/jbdntjr5HgxzvV+RduLqb763q/qZtPuoBDXtW75emDGw/ae7PdYs+vfdsvq3bsfa1YAB0pS+xUO3AklrLwokFjBBAtSDuCeEAlSTmNnkymKhSRGILz7J1nR1XpnSMlxBi7Nc8Xy+VicTyfz+ezxXIxPz5eLI67tgtdF0IbQmBmm5aNRnU5rjc2xru7u5vT6cbG5vb2zng8ruuqGo3IAYIgav4PIUi36kijVbWJJUYMkKg4ZiRkSPhkEpdoJOaSa1k5LzJmNb/QdPCMD2B7J4Gs9LHTiUGbiKgCYsNXDWqPjssonQornONCRDT28Kg8/Lg8XM6vf3DznVtv74dPJmfk4vY2x4O+78klZ15RRAlku6s5+Zvaig4qIJG8Lbn0YoQgLQgMLnzwAV65JDgC+0UT//CHPzh35bGbrQTyQmUblCOROOmEg1JPiNnl2I6gRqWQ3O/JAm1EIlKoWeJmHRjMnSkpMIw7K5kfKpkYm05mIlsMFzC7vSaCpIANv6Y0qtAcrDXjUKScMnrKfB5UkdfuD7/11Z/ef51X92puRrqqwlIXnuaiC9CS9JhoroiEqLaDhmzKwIQA7QDh2Ibtvd0JTXQBLqmueymbMUYTrFpadzoq0ZcudKVHyVQqeVJWGBBh4zoZ7PwFUOdE1QHROQ8FYPZTUZWInI0YUozKsHoKgWo3PMlGkf+cZ76AaT1SejJakA4Rk1NvTZ4yfm2NjTlRpfEYMRNzYQKG7KTBmXdlAKtFKeaYyJWRT5g0CcegQfeKlHDNDjqnLT3JellBm3PywNmxPiu35fm7c3Y8AVpy1uGYroRCbQWDQZpCZI5aGXxRzZZXGqOqCtkSCmszErvg/8/WvzZJll3XgeBa+5x7/bqHZ2Tko7IeKBQKhSIIFB4EwacefIqS2BQlU6tHsp4xm4/zh+brPGw+aMxmRmbdY209akkjsdkUm6TEF0gU8SwAhUIhKysrKzMywsP9+r3n7D0f9j43At2TBIGqjAgP93vP3Y+111rbVJ3Da26Yp4Y+9Q1G9peSRjcVNfj6h4VZFcaBFtyFxjr0vQ8urdHA8NVUq5er1zWAtOE22vxdWLXWUlMSCd1ZkHvRKhYQAqlqyUUmpKdtadYtQbaSrFmmaQKQc2eqxYxgyikzV1OYxq7lwDCZfEelX9JYYGcpJ4SFc3w6j9e5Zy1u0iZeg9+4Ya2JbnhAA8d8/Va4aOSc3By41moGJ+5dXO4/99bnVut+PazHcfz+99955ZVXT2/dmUtlSuLSE5WUBMmbeFWtkpOXwVYhKWmtWopqlfYeXDUgYIWKZJ9LmCq86Q4t95xSrmWSeTU+HwViGYl5fDaPT/dyJR+/c/GFX/niX/zN2+Mo59+oZbj3yt2VvNT92z86/+1/+on7X/74ve/d0nW/H+v/7b/72l71cpRX7q9WpV48K+lwkW9/+pt/crF7VD/36ud2j+bd0w9efXD702/8UuqPtYwQAYuwKmotRxcBO1lKBLDiPjwp0cLc0wyWs3QpA6aq0m1qwTiO+/OL8TBeXl48v7jY7S725/vL3W6exlLnaT7UMjMxJ5Fkm/V2s3mw2WzPTm+fnt0+2W5vnZ5ut9vNZuMFpTlPqRYzXF4dEEQVb5SWajV5BaYKU3W9pLRCmz6Wk2WBSmy1MmPsO6KH99ygLIaEJmJjck69+3u3gli8IGyBh77WPA4/O1Xni+dSrOqcU1qdrtDjvR8/evt//tYHz9/f50vexfYTp7mfxvJ844YTnmoVyYwi2vZGkEACKoykUDqgABlMRDJ2MJplzLScRXuVXmZOV2P5zX/4T1/5/OcejTDmYrkw59yDVoqv5BOrwPIfgMunjOWkNcBFcTG2W3UHvBidHBnEKTYya0M6b/bVZjeDr2cKdzDQ1i1JIFKNTKNGqioVIkY1Nac+VauuqqjJILMVfXD/5K3PvvmDP357OJ26etRJZQIOajvYleBA7I2TAaJOpfQdBf5JJ2jSUufTvO2Rx32xlaVtWvfjiseeU4+55zxgzqil6y0bszC7KNmcPR4VnFd7ABxGsurkTi/g4gKqijBJ8uXlwQkgI+l4jKpaq5KURELUmgO3H0+PJLG3jc4TbomuWU3B8jXGG02L+lHvuuwxqzW6MTVy6xA1b6p89yfJ5BtYYuYSra2ZL8ISQGP3eqs/fYbnRCfzFCvCGx3tkpxrzBqaoBNA9RlENFftt0UxZlCCzaKzzYH9qjFJLIHQ8MW065mwe/XJcv6srbbw66lWrrOIoZTa6KL+nepOqxpwGtCmsK2qEFDbWQ7euN8e8a7O2gBbFQgpqveybOYdtWrK4sQxIX0CkGJrRfGewZ1P4vlxXoG3nk7tVq1mpExlhtnJZjPPs6nl3JGstbgt9lyKV85mEBF1o+Y2EkJADRCRokagwqz4gFrcl0pVASulqK/+TkKgqrJlX8eAvBlhlF1+NCMIxAmW5B8liGQOgWitKoL+zZ/+sqn2fVbFPJV5LqnLPtSHQaHTHBw6B48Aqtacmg8lIUw+e87S+a0iZLEFEQpMwtQ6CVnFCORqo+ReJ+Qu4whZybwr/VnPg/Vj/8G3H/7cz371n/yd//o//uUffvTk/Pf/Yvrbv7x58cv94+npf/t7+W//5mfHp8/efzhIlnyyf/Xl4aVJLn98vDyfnz8sL24fHOWZlouvfvGf3B2233n0ztOHZbv+4HL3nbtnn5jKZOK2XHOpU9EJOVtbsli1mtaUU5dzlpRP3EUPZdJ5LruL/fPzi8uLi4/PH+93+8vdxWF/NR3H4/Hgs39QJeehH7Ynm+3Jve329Nbp7Vunt26fna3X22G1Wq/DPU0rStGith+LBLBcQZqpiHQpLcRHH7j4/VS4uCsGUu6aGXZnAZj5YwBt0vp4DjXIGM2n1sOyRAVsi6WPYyveE/sv8hhFhkXREgq8Vy2UjshWtEt5WA+X0/i9t3/49g+/+9Hzh3arDC9uhvXGqDoVXatBFFIsF6NKp8iWTbqEQXAAeliwDoIkDFEkYQZ7IhO9IhszuYGt0N/qR9lzXf7+P/oX9z775ff3GK07slPpR82YgAJxy5fS7nAD4gMElACfKC4KJR2IV5rzW69DGczdgOMl/A74smlrN4uBGTSe4+L00eZ4kcN9MGZhVuKJG2bmy7iaFtSYCEKpKeeaNNmEMv3Cz32x//ivd9//o25QHMWOyiNlhs1mI+wA1kBqjVJhJiqG1YEwlr7MZcZkGAGFFMERaa1dqhnl5n/QgR2dLuWtJJLznWgZVAlcGyAn9UaiVqOCypCvRpAWEQ/dGtYa6iNvAySlNuTzplIslk3HDDWCe7sL2hhSaMoOheXosZq7lTbPpzC4caN3M1hB5D+DW8UJoahu7K0l5xzPWYgmzUfazrrzDlxkedQCBXEw2/d7AGiboVpT7J/YzIfF7Vk134KT4JzZduTj/UV+WDDhNrpo/xVP6pJ143GuN5vOJi8m6DotYxNHewuYfHnDkt79t/skV9rlbnwHhKbbayA6Xd/MvBpPotFcWjwWKcOs1goVSvtLd/KVtjWZZgZJcq2SUhixaMdi3KCiWkspOeeUBGallFrqauhFuN8dupxTShqwledpJaXWMs1TqM4ozR4Eja4cn1wNAkrKFK2lavC/SlXtJIE8TpNN2vd9z54Clw95SyIpBdHU/KVi6TKalBqAxOAD7j4T91V9G6yScjxOQhyniaBISl2QAzXigtVS5jLXY92sN1MpOWczncbDehhArLoESLbOwZ5aSlU1rVUNBtUK60RgJo6OUuYkPVDM3Y8VOGyQoCi2rleP8fOf/7l3v/f++28//Jf/x//HZ7785rwrGDHl/vd+v7z18w/ufUm2V5/6/a/9+K3P35a1bk8mG/XpezI+f3onb1+80336bFUv98enfP3BT58NZ4/ffziw//JbX3r4+D/M05MuvzZrMhTVqnpU1JQIsst97hKAJGZWy1yO+8PT/X6321/tLp+fP7u4uNhfXeyudofDvtbS51RqhWG1Gtbr4e7Z7Vu3Tk9Pt5vTe6enp6e3bw3r9WpY5ezgPKpB1cpcD8eywML+ICD2A6a+X6kGVh/moc65ZRMBAIgt4RZ7qwhD8uUuDXaO7c9NjRt9lqEtIwPMv0czYli8cH0RtqiBsjqdEMawEdUmFfSYoS5V16o2roezMstf/tk3//xvvnZen+Wz1J+t8mqlh2pQ9oKjqiblatKhYDVhvcdBdFwPW5QJs1BhZtwDIzAIR+UImwQF1oMdkKGdIZMrcG1ymxf16d2Xzn7zn/7u8Mpb7+1lh+0oJyNP9toXWetYeUVMwBFWgGKY4A6LKA4n15Yl1NECH/slb2PUc4i6ehstxt4EFCO1EgtkGv6sYc8X/Gj1Ba8OpcTPB5HN5Z2JvvJNDKCLKqvPJ02EWlVLQWVNdpzx4HT76mtvfPOdPwQF7kshBhfbh2TKxU0wqgqX7bIgtRZR3fQrVAe7RVVFAGG4LYBBUl46zdZfoVjY/5ifD8dHi6nvBEvG4hwUUFQApZnVWkip0KXyu06UBrigAA12dm+SYu58RbcyVo1ZO9qaUF/O3ZJT9pzZEP+gLgd3GqbubiGEuVW9b3wTx5DNsPCknOlq8WqtO21rdBt82wh0RM45WmqSpIYiiWqldaZB93UwHGaQmC/CLDHFODMujNqy0QnBiLZm/OuocqAB2lYlO6RFLyCsqcQc9Yo0XrU60yk4CE6zT4ki2V0zG1zmIaCwJjbOZqhhnOPilaUt0AMQIUVrbKOM1Om2jaquFHKuL+hFromw7deAYakYDACTWNHi2HJMxTyxaddnNnmGiCDbPM1K9Kv+cNgPw5Bz5114KUWSWNWcc3GD6IpqVUCEi2m7mzEu0mvi+wIlGADMpVKiXi6lGqYud4jGxp/mWLYEByUZIJe0xb0xe/YdCWZNMxa/izbbLBRRQRIhMwHV0hgB8UDmruv6TlW1aNdlU+ty/sE734Hq9tZWTU9Pz6ZacspZcs7p9PRW7vvUixVMk2pN5KpWm+a56kT1bioD2Zw6qzOvElRBHnW/vX3/v/oH/+g/fe2/vzhe/Of/4ff67f3bL2+1L1rLu3943N7f3r779HMvDeUDfbCBTpwu8FP31sOd0935Bxg3Q33x+Yc/mi563a82r29euvsqNrrfPXq2u9dJn6SQdbXqBao61HKstU7T8Xx/eXFxDuBqt7+8eHpx8Xy/u9rvr8bxqpTiJ3HV96tV//KDFzab9emd+5vN9vTW6fb0dHuyHdaDpOQ4ctGqqkX1eCzHsfilFhVQU0JV5JxVUXVmrJ5BeNCXQopRTJVMEGqtraj1cljonD5/7Fu8FXbxUDhQrSAzTIUJrF5pOoQqPjxSMMzg5SZDK5AjNpA1gDarFSk1rUZY/olRoKiqfZLE4f0f/vgP/uOfPv7oMp+cbO/dZi5UlP0kfqYmzdbVYse+X3E4lOEoJ3s9ZClXWs82hZMQYDHN1XpjIXrY4DZMhgSsiKRIJivKKk1pnrB/65c/+yu/+/eOqxff33XH7mxvm72tR6xHuaUjZEqYwUItVWdH9LwOdRmBClnKJFI80AEVoISSrIAqjkebkXH54qm4FkNGflr+22sd94QDNPwC28xucS2UAPyxBLPlNgQCwYY0OMHYlJKSJKoej3jppfs/3N6dy49WIpr8572uVpgEuF1iXlt94bxPGsy6Lt+/e9crZGhpazIi4URZYe0YZELCkE3VUJxFgMgrFrix4+gw38WnjiNLAHYw02Ww5nGreM5OCUtq84wXhzg2L0skE1U1kkzeFbhZZcw3CWbqIv9URahcPOk61AyD1oUvBa2xc0MaIrL0nDfyugKMwaE15ZizJrwxrorcuntTI5MX84BIcumPQs0kSfZj4cYostRugEKvPYPVi5+Yu1arCy4Nc/GxqcRILPiTZs7risYVMGs+Hg76O2WpTbDVl8JGundimU9kLO6+k3qTznMVYcrZlzZA3YCmurGZRgXgHLSAkYNjsmS4wPG1ob2xs8kLAXrVExYaQTwRH6r5dkxf+CMJQE4SUij/NZL8Gh6ngwD9MMylaFVJqapRoNVCtk4ptQIq3gQ3BIZkdsy8qoA1NGf+nAaUJWAVP1C0JBDOqmU8CsBhYDONF8K9vn0SEfdSfSRDA7QuIINYbRMsr7ak8yZGq0q1nAl/qJI7IIYAwxAnkUlgCqFW++KXvwo1rbUUt04yrWUc91/7i689fvTw/v27prrZnLzw0osnJ3cl98P69qrfiKS+H/JKwKxVahUzlHkulXoUqAx3+nKpp5vTV077N+7kL37+/uOPpqfPr55eTKcvnfb77tObL/7hv/+T9TDeO9188vUX5t3lxdMnu+Mxz+uLi7Uc5OkHPzx/OJddkenrD/rXdMaTj96/f+dBrXzv4eNXP/XmdD4+f7a7unx6fv7xxcXHF7sn4+XF5cXFcRoBt3TQlGWz2aw33f37r56enp3e2p6dnW1vnW5v3epXQ9/3ENdto5Raaj3WanPxbikemajqrHENXWEIg87TlFKsLXI6rK9LVa1utC4REy0k3JF9fdYibf4gbG73WuecOscwXfjQqjQDOphrIZyKQyVTdrq+aAmKfoRURrgjqVBIZ2LwQT5KRlalZctOuRKz1GfVcjlevvPNv/6zbz1S3jq9a8g6HrEKqq8apEpKSShF5xm5QKbUjegGrPdmA6dBpmEzmUI2FEmaFZMxQybWiaaGXi0DonmVLZWL8emLr539nd/9jTd/6TNPyvajY97L+qL0I1YT+5GrCb0UogJVbQaKJPMmGEnFjGazWVGdXLbQ1tPCW16jOmECVhsNPoaTwvhQrU6+OckKu1w1mjlCKqWoqIJJciZQqgotiVSNsXtaojrgiB6qiVIVrDFjcOqn2zgBuUJffunl+/cfzA9/GKHeKU+guL+6OZZnCakuilPWlKTovLnb333prmqSGIZrrR79HTlT98iEz0abWgp+9EQcPVaURQALUFEd1q3aRrjGCKYxYg96nWoVwC0hyjxlySLSzIZMorWNyZ9LjrwhrITbsZEqFEo4PRosB6PVdwcbmjjHPE8IF39qp/42/WADXJcpgLBtxAUsmmLE8gNc/3FScpbsYCWgXs8K3AeglWcCQWJTr3lJYmZtl4c1zR+Se26IQxHeRZrzpkA6Py3kb06CpGjMk8IwyMtDbbxteh1ni6mILYzuFHU3SlVqERFJzuR0kAxEit9XLawzw8vSQKmqFGeTOUQD1SopWfRrCfBZGgiqWi1Kuk28j6JNmCQKrxjRtNkNgRy1rBiDuxtMgHgMPEr66wiThKo7STK1aTr6x/WhvrOpYVpqFZWckqQcA7zl5LrxjfB6pN2gHx8QmYj5wM1pZDQzO46j4+E0CflJDO9xfX6s1duIStH5Nm2CFYW2P2b+z6WUoP1EkbZMi70CtfiSGcjxcAAJNRG6ZyYkn21f+JVf/415OpZpKtP0r//Nf/fH/+mPX3vtjVdefu3tt795enZnu70NyduTO9vt7e329mq17fvtrdMHItuep/0mP/rB9/IgGFaPf7DJj8evfOU3dum/ffCZu2+8+b+ZbHr68eM/+r3fP1F9+rCcl4uT+c3X3/jl7337P6AeHr7zfe7TOh/PTtavv3Dr9qc247PytT/6E9OxlvE9sddef+HJo4f/z//7v9yPF1e7p9NxpzrlhNU6D8P2xVdeOtlsAJzcOj29dXqy3a43wzAMeeiHfhABTEotHkr2x6M/eK2hkXA7WarZOEbtwW43ug2vGknHRwRtPNjKcDVtPgxoM4MGA8H8VTzRh7+CW984P8tLKK3xnJoBCI+tNurxWi259a0PRFQjGCl8kY6LjLwdTgSQpNTai5QyIQ+VumJ3KAer6/H774x//bU9un51Vg0oBYOhQieTQbKKGnQyG5UrzOthtHmw/QH9WtYZuscma5FuGk7N1DhAiuihyixlX8SSzZr6jB7Sy/540Z/I3/l7v/Czf+tnN68Oj/f9x1gf89m+3Lqs/V7WO673uiozuTceiVk4m01qE+GU4OroHBqA7xCxCaNzADT4mKpm1QO2szThiKBhKfQdhrUbrJfWKQV4mJM4cx6lijA7a73UBrFhoUogENb4+7iTIAxWqs7WSW9mCitz6Tbp3oMHP3y3nqZkObmWI5ANhydqlBN+xrwBdmOKzWazGgZLcIMcOAF92bZitWiprBGpmklU8Mi8lXfadwMk42OHbLr1kA6UtH5Scopr562pBjVfzWopAFJy/x5bDjfo5SsaC8kWyE9Nm70f4SSsSLFN1MtFZIPGiovOXkutuZFioAaxNk7m8nb9k/ubsObba2YtKrbBrRsVqhONRcWXHUUijEDgAZdAuJEEbuvPok+ray2ITU2mzmQzLaqduO4ecQVi6m3aHDE9DXuIcFwbpLiJRwiSvKX2uG1qbRTiYwuRBZINuoLFJ045WcDV5oBqUZWcFtmOg9beBDrY0GjSy/Wrquo0dL8PIhSmxagLjcAFM6vq0wtt+LnW0sRYEkmXRDO1AVDmklJOYn1OqiaJyWSeC1Mi7TiO01Rzzggff5hqsdn/uWhxF1n3uwgRYUMWr59kJ8OFlFAEMLJSfVVzrYVJ3GshmJiC8GxG4PXxaEQctyjafYgQl4NunOmgvcPT/kgQSE4Qj8GLClKcQ4aLtwkUSAojTe1wGCki0lsvw2b7X/2L/91ffe2vb29PX3v19Yvnux++96Pd5UWp7PLjMmtKa6Kfi/b9Vk02m7tDd+fh4w/yppdBXn7t1YdPP/7eX/5/apIvfvWVb330dcVYrW7L5uHDw2a1VdW/+L0/vs/NP/57/3jTb589+uH54/N53B0vn8oh7c6PFyMky7jfSy7T8eJvvv5+mcfbd7rbp8PLD167e+f07OxsWHV9fyttcLLZZjcONZRZqxbVAqJMZV/H4OUE3zf5Ll5baOgRcNWRFv+bhiC2xyAiigfVUJCFq0k8QBH+zWBNcYtWEFl78q7nLzHQdahTIj5p9FRkfJYgHGh7NOgyYfeWd49BLz6jAqRr+aHiqvSmH8OskL6KAiuopQ0vPnicfvjR/ODF8f33Bk6aNmoGmVSylISqKIBCi+rRpKcdYUeUGaOkUftRhisrQluzlMTRVNKu2856NFFBL1as2/R1KhkZGVM5juXwxpde+7u//rdeeu2FA6bHU77A9lJP9nUYsdH+zr5u9tiMGHCETWaj4QBOFBUthkqrPhGM2Za2GBaFEQGraub9Tkowy4CDx+q8LDrDQxtPpsKNoTyOibg2EkGItFAMShshe8Mj9F1mBITMvgrJrJoVsoMmbXGG7qAmZOI8TdYlmDH1s8rd+3cfDafzeCnoWq0mFEOCTcaUXGsoPqgwVQULNJXNepOylFoMYcXp2Hr0INbynKrX92II/6qI+h4sADi5xltRWgstcNqwzybjr64pyykltgY1wCLC3NBRQ59THe9UZZAU1CDBpW1NQVVHb4WUvHiT+WMYdazLvCAiy/swL0S0mUcGI0xi6LawG4Cw3SckMeBQmP+ltMQMGqK783PUEu+iJHaMuBVkMATi6M+zxPzTdUkRQ9tzS480AaAzklHrn8zX0EWsiZ4VdBx6qYkkYIiWUJyaQMe7JGVI8J8BuyaIKGDmidObYIF3fnDJUKmlau1ylyiuNYqb5CVby2JeS0oYtba/8U2ubr4DM7PrIxKnxloLGucfcZ+cdoAk9FXnpRQYV6uV1dg9h6rOHwbQ931OWSQVrT5ENEMpcymzSCLCjJPG4/GYmhCqwQQthrONjIgELr264+GhPlTVWkVSEtHauKPRmrVKfKlZbPEXa0N+l8T5gWy3IKwA1Eqt4l/yxGyxNNvfotc3iCcJFEkWGjAC0zjnhF/4xV+ai15cXP6d3/y1zz/5+PzZ+bOnF++//37XdwDmadqe3hJ0KW9q1Y+ePjw7vctUa8kf/eiD9fZkOo698J0/+0YxlVVGwrBd6eVx4PpnvvAL//k//eFQ7na77rA/cHrw13/yrd0Hj6aDlQmCWbA7OZEHL9829udPy/0XNl/96i8+OX//s599oxQlpNZCmed5D6RxOmKa/HOVefLnNhSEICkpSa1eZZvW2nQG9BDpZ6+NBSIPL2n4epTjdV+EisaKuu6Al6N3zZvzjirMClzgFyq8uCmkBBwNWswARSQBVtU32zlq7RpbJ7dkusGgGeiObCGMRMUyBzOm6BtQK9FBrBQYpl7y0/Pjn/5Vf/9sc2c76lxS7Y7HnDeY5oS+liI1UQ2z1qOhMy2CWXQCR0xDP3FV0M/W761spJ/ERj2i1jtb08H0CFkJitpBZZVR5t3x+atvvvyVr/7qZ7/4U0V0Z8c5r/e2OXA7YntVh3062dVusn5iryU58UpH9UVbKIRCCqHLamwFqgfrcLpjwF2IdFIAr8hDmeK5iQQt1rEETBn7sawFryWCLOofLGcFFB/atb/z3kRid6MhOhfHcavPIenTB7cGkpSt6DzVL33prfzknW/88f+0ko4xdVAo3fTY0GwEnMXr2YDQqpvNIGBVjRXgrtM3DZsMiIBexJGofl4icsCqspo7GpLBYNA4gQDcvEoM1Sfa7e9MmpNGC0VoUKlfAIOaVk0Jvu/I2jjNXElJg/dOzi4DXUzvGdQtYMIPIQjI/hvU27OWgXytnZjVGoZvjeTeKmLD9cOI9mwCCCK0mtUa6iCvPJrAWZu5mb+SQiUwwyY1YyvPETXv0lgji2gzu2DzoG9/GQW8u4kFk8kpcL6MOQDiKJ/i9Hk3ZjdzdWDfN6Dp62PogYyEGZdFJVZNGOHeDGjH3lS1VJOEdG3Ge/1KiCNrMVh1SnKTePsF0uofP0pS0ox0poDTE4UJ2Rs/Nd9yQ0FytzXA1fEEWWt1pmut6hsVIJLcSL2hUj4dMFrOPZwdJlxJNqCqppQ8ZzdNURzTKKkRZdbSzWZIcfgxB9qhRlWVCOy2rDrys95Mdxbrl5/gnQU5Q2s1c+ZzaKIBiouLtNbqymkmlwhEEyyh3VKDJUnRIJLmS4FgVvP5+SFlGTa3VPHiy6996lNvqto0Hstcj8f5xw8ff+ub37m4fDLNyHn1ymuf/ujRU0E369zVk/r8vBtEU57GETmX+aCY765f2Q4Pvv/N97579fV0WL379jsv5VeRJe1GuTger44rHPrMLNztL9TWd+/dfvXVVx++9+6f/PH/9+7dfhhO/+ov/+KnPvu5/eGKAiL3g6F5jHjMzXllhlprUGGzOEppzQa1qma3zmtFToPpXAYXFzc8YhbWRHvQwwtFrzfdLVHJwWpH87xv8QG+P+bCxiTw/bXh2ydk9nOnQt9urBBxJmNoI+jmH/7omyZK28bj2ne0ZzMeoxI9SvJHqWRI1YNIL0ox7t9/nJ8+k1fvjR9f2PHYg7dSfa+UIWebCyw501hHlU6kSDmo9Iqe2FsR2ffrjlWIasyqOuWt6In0ausuzemk9KhDVk56eH4h1N/8u7/25a9+Qfr+YtRj7S2f7jDseHqhm6vu7lVdn9ftTs52PN3rynaGHbinHMWb4HqoMgpmsEAKm8w3Tr6IGhSmvqPETN16BNCqlUQQ2wLlbQxFEGCiBPzsYUXDRT8U/najmpeIGnGrg5Glvq7AfTpAgUnb9E2nZ6LCiomlOmnZ5Mlk5jBh9fGuvPfoGdbCSq7JNWw0DMAkXl2gA6oT15SJWEPWZlZXtwdsSAgHYCBWmKQ76mpCP1ma2B2RJ02Yqs4S8i1H7yO3GFQhZlbM1KwYCzCLk9yCbStR36CNoH180qajgYG2+ag/R7IAosJW1QPROrrha7haAkGy8acsY9HDeKHakqt5BdJcp2I1IomUaq0Go8rSBS6kJidfL++zJZIYFsvyrGukd1iNQllI9xlilLHXTHnCgQYNNqQwGkBFm0diida8trLEjUo84oeFaRvMQkQd7zPW58UrRTOtZvBNkQ0naTx8oA0P/as/Cb+iGT0hpqaelX1iKiLBTRXHM8y5hhocRZKoqmombujlE5XYNiGRUL0DjU9s2tDCRR593eNcv63wP6kaa4lEpNbqpd4CbNz448ZrJJiFKcduu8bOw6rPplZKqbW6Mmq5SteaBMYDbG3l3yJFUzNCcoJWt8gQU5ecSavELarjuAMBvAPwF7NFNhBdjwpEDbUWgjl3OWfz1k9VDTks0KBaKRlmSbIr6fwKQKBUEzFSoGvX4egs4DSO42giHWXFvuty99Nf+sKDV195+uxZv1ofxmPu17dffVxQwH7dnSYB0/0q/UBI7orZ5pY8/tEFDCf29OE3HlH17JW3upHH6fmjR0/Ott39uy9P9XhxfDZOj85Upv3Vww/efvedvyil9Ovyp3/yBy++9ODe/ZfK8fL1V1+8OowKPR6LqqP6PuKtjv1QmJLEAxmu+0G5yYsFd+NEXN/u5Zxbq3F9ZLgUms0stuXuG4BLK5sozlA0KgOwEGMsU5AAkiC+9IwQX+GuyFHsGU1FKa05Rmt//Xclg0ATKWBmoORoT5N3HwmgWrViRhXmTjpkQcXcl/nJI3v3fc4TJlk/uH91ZzP/8Ml9IzGJqiZByTYpEy0Tk9gIGLAidj5F455rdKgiW+kNebbLCfmgq6NsOtGVTFkUx4t62L38yqu/+Ru/cu+llx8ea9kReThKrtLvS7eT2zvZPivbKZ8+5/bKTkZd89JkD90DB+AAjMAITsRodlQ9GEaKzN5dGhSoi/QLqLHjLhyhI6O0sAU4vaPZPlyDHGSbl8fQ0r9/KYJv9AmOpkYUN3NVf4VUX8fQaL/RT6KJpub9nG/lCf2IzYbjEcPetuelK7IdTmaMZpNg9rpCzZhMUEA1KwwwcgOuzQyrByueAlDdQLbA1uVbt452MmEzcTXbapbejrBRZRJMZkUxCQvFhx25oBagAMWiSfGprAIqNHf9X4JnYKuxtj2um1nrD5XWvK9IhKf6EnbJNn+H3ViebRqwBCE55WTqCz6parGMN6ZxdDKXG2cgFk15CWzOgJEW8JU/+ThG0HR4JN6u470NuQV8g3Eg6f5/NeWucXAWtxAKG4vIzIKrZkt+obMrgzWWAGgMwzVaWInW1t+JUEJSZRBf+eqzKA9ALSLRYswuvvMnFE1e4wfdOlo0Z4ShAeVJogww919Fszo3oWSh1sVWhmZ1nmdSck4wmlZzy/l8bWCpjcJM76qb6UnARzE6u24OHQYJU9+c0cIng56OxJQSs4gBtVSYy0s0O68SrVMUprDpZq0q0YUj5xwyuCS5fRVA8gfRj6cahMtekKXpzKGSo09CQsYZOrRW8TVKSXTP0ajdsEDj8oT4BEJN0UYvCBtO8w0oQlNXUqmqahW/UgFkFCTxVh5mUBOI758xxt4Iv2R931c1iLAoaALOV89P15t7p3cPowJdQnpx+6oqiB7Sj3M1U9FkMhJ7SUWP/de+8f7TD5++9ML9ff/xg9fu1f3uhz/+q3e++417pyddr/Nx/sVf/sL3v/e9qdx5cv7js9c2r7z0KqyC9dHDR7uPn+8PH1/s9v/+3/+PJyfbk5Ozey+uP/GJV3Pqlj0cAErRlAFYVQ2T+DC78GLZsyyxROs26HEIx6+7LMi8178R1umVZjhVNcuq5Y8A1x5zcROdnUJzNaMXYJKinYKEaZdkQ2x0N5VWmktm1ygm/v0IY19zzC4vPYu6tDw+VHFugqJCK6Gz6GrqD9NuevLj8o23+4vzAurFoZK2WtHKbWEvY9FbpiMFduyZCFKlUkmldQYBc3SH+9OhUop1xWRkN9bdLekrj2Kqh109Xt4/e/BzP/drX/rCT1uWH+5VZVUkTUWY81xZ5NZ5We3znV23vZj6MZ8dp8wrcm/YUfame+AKtjdO5ERMtIPpaOIJDXPViaKSFuGJebEiUgHTWhocYNfGNkstewPk8Pvod8trWe9cHBD0yaVqaeIYN59W0u2VEaFCTTmbkZKhRBEUsFLVODt5O+vRyqRTv9prX/vN+UX+8LKcyHZK52ljHA0FqFAYCsJsvwRNhkIOxo0RHB6suAVBuwU7wRW2e5zssd5zc+Rm4kmxwaY+VTUllVC1Qitmtamo1YVJ3rUC5voOdWtgY4k1YrgmIImItWAfodjbNQuUsiXshvvdoDE5kzr2FAQUxMZqUIDZIsKZqprbTZMWAiI1mDSsWaG1VK9ykzg5OEhYZkCIqbmob8HWOXsCDk09XOpV2q6a9nWjsEtdDXWB5/hGZ7XQJPmn8+osGhsynHJgDYRqcVqY0CC4hY7ZxlrSRBKR1a4H9f59hPg+wsDQNGAKiWIBIJGC2iPRUzjD0Lw5E/P5QDWleg1fTWlIKQPhqykSm2pKQWqLCuDs0IgrC6c3mnVHkGO2bWEVbtTwCvWlAWYGesSlbwlWc/QjqPEWHtFO1IIa3VEuiKlcFG5q6s23kUbzHjo5w1krISllN/n3k1FqncvsIw1XH4Z9lrvF+PeRbIMJDfcAoiEHTlgA6B82QJDGG/SzZBZC91YUiQhLqaa1T8kVGmUuIkiSkvTRuonUWkiU6kMIQVUaWduYwSthIi2fPwGEWo0kkF3xQMkZqMd5LFpKwaR16G5B8rGcl0mS9cOqV61zOY7H86O+//ETffWTp2/+lOz2T/PlrVc++YLV/K2v//ndu7c/+9XDx48f/8G/u3z4wTdOtxsrcv/+7WHq3/6zr+12z+7dX//c3xp0PpT57njY7cf9bvdoHi8uPt7eu90N2y2QcxoACDs3r3BKmiZLbv4FB68y4SczBojLYbsGcNokwIxOZSBBRa0l+C6BJbkZbjBVlxfhcjxbncToZc2UjWuHdnidLJuqkhAyI4iY2Z2LtXg9nSgOOJNMvtoDBiC7iYenfrjyJDk+UwBNKZvUUmbQDtO4e/sbw4/e7epxEkglLp6OH37Yv3B//l53ovWV4+G9ru+Zi6kodFRz8FOADDsoMizTqbwiPN4aTDpFKuwgkvLJ8fDxPO5fuvfSL/3a3//sm6+dDvnZOGEuNffFxJgn5qKq0h3r6iAnO5xe6GafNtM+cQ/uFHvD3rAzXgmOlCKcwQlaSIUIaNVsKnU2lOw4EbwDNrMKGimm5m4zQb9qgc5nsTd7pOuA4HvPAMKpUAuAaM5F0QKtKlIR3E8NUJICiAZxS00L/G0WWLE6qgxIJVkhCqTImFcrdCOHd7//6OOr0m9P9nU6XV/ZxlDACawkqAYxWqGz6CwZV2aDmVg6TbaFkLYV7WSv/R55ktWEYUI/WXdEhwItYIEWiElwf1S1GAvEqtLFvkXUCGVDniN/GNql8kTGql7gekhbfPZb50hJqXW5oLhffqS+6ASC42UKd8cXiR4Tlq0UUAyNRu485MjEsuQkr0VMnEoN+vJhU6aUJPn1l5S1FhiqanZeTlh5sbHnYaC5J2Jr76xlleiLU5DxggNnmiAki5kWjfblBrdWpXU2EpScnxjyxTAXQuhNRVGzCNBmdtiazZgLRtXi4+RSm00HnfYbVAb/gQATGNqYmFs1LqAhJTFzQVeht48wkHWuHhtT32czrU6lizFzKzIhKRc/2VStrnT04kVymDyH+2PbL8bIcUACTdwb4WZpEkVXUwxTciQ3xgjIhK4q9SJQtI3XEhdjI4bSKMJf+w61TJHUq6iaUoPw1vx4gBgVGbOgaAITaAq0/U5+caQ1ZWi1oE9AboDPIkm0KKKZrZQsmaYZiVpVa4U7UKoipygWTZPkVrZprUW6ztSCNuhKGTWam465lU9o4xTQMJM1ST0IVUuCTVrNU03FTEcyr7quz54VVJhPhjvDcG+a3vjki+tSjuN0JeT7Dx/Nu+O9e3cg9o1vff3Jk3tdJ7/6659//NGjaSebExEmpnqyGW7ffvHJh0+fPXtYpserIaOz0w3vPIBhEpTp+PY4baAv+R3sV9ssdxW9j6Q6dsUmF6qg+fQkKtEXK6S4S6jLtt041aAi0GopJa1qtOQIgbMQAoNMbtUbvIxgf5gtGsJaXUpBkSilHDQigM5TqdtkQRKMwt4UZpkUIJu/W8nozCyZkpZS6i2eWhGBipvzimtGvXbMhlktr0TqCiNqPUJE+gxi951vp+8/1Kwwcs6S8nj+/Lb2+srd8eRv1s+fvCLlXZvMMmSqJjL3IJCs0kLKk1BREmRYr3DEjKLrbrdJkldltGe78wenL/7c3/rSV770+WHFj/aHD3dzAlKCQmYkVRqziUzIE7udbva6OeoJJ8l7YAfsIRN0b7rTulffM20T7Agcq02kFKKoVslKgCiqE1BaOWVmCveAcCovdKmMzGdsjYbrT1QgZo6MeHtHJcQlgq708OYuZ4ZbUXSFrFbFy99kyKI+1aoVUDsqB3JC7sEqLAY1TLBRxiGvsL6ox7/+3qMiw5XmZMe+O/a3JgBWhUIU9ZkEARQP0MoTaAcmwSns1Cogt2XHW6NtD3Wzq6sDu4Os9lhNHDBD5gALdDIUokIsJRBUbRCKD9HNJ9WYQQhrg9/cfylbWjBOZ/I4qrgA+VGGLC0wfZF4+BY1eML7QFlysWsqCYGqZYepF9FlIEiNIBo2iDBqGIxLYqmlLm4StSqYnOtRa5mL2y6KBPJZrNlMuj0w4FVVC9gBG0Vcj9paGnGAAKqZVU3ZReha1ByUJpxHjixJGTZJiA2fUd+FcWmgxEtBAMeNzQChUzmi572BFTSYHJIbSuNXqGGqS29NYfI9kFqg/4tJatQVGt7Idcle4uviJSok/4Xe+JYyk13f9zoXM5WUVVUovgHJDKaKhLmoUGot6i4c/lltqWpIwkq1BVFBA6AYk/GgtqmZmTHAFosvRYnBAKTIxitts+bofWLC18prU0BMdOl1ok5pJirRwgqc2Bc1Qa0xmnRXHcfIWkXAawZEVEIxinSTZ+d1qyobqYTiW33Dvk2qPx6tp5UkpHuQVa3emwdsH78eRSsk2PwuJ1OtsAlMBBWTy2C8QkiZPVPLHK6vDFxDtVAwrFOQz7Xkvnvj06/mjHEcv/LVz9X6mctnz548fnRx/nS/u7z4+Pjo0cVLL79kHD549P3XXnv913775Y8v3u36zTQZYPMxSruu20/z4eriM/O0h/QAhs3zvt/lvm42twSrsXbrk7Msa6AaDqo9SEUUbQpAIUnKXETEWGlGiJaZAtUiyd/zrJTkN993lGv1rrcdkNYyewNAVF1Kbp+6Z68X/dq3ElWaLiMDQopZVvOvZmECMgmtkYxVxaCWodmYiOSQaZCFKBST2SpVqh40db32ChFD7tZPvv+9/p0foJdjQVcVaT0r8/xs3h02r947brffevzkna6DqNW96KA6VaiUQQ8akkzGuTSFdEnAMpZ6NWtfn/R6/5OvffGnf/VnP//66a3h8TiWyynzlogmVJopk7rLsIqmHpJH5CsMdgUc1EbgADmABykHxQg5Jk7AERjNRkMBjzQ7VD0KFSyCQlZYhRXSVAt8cxDN1KSReE2bMQMbB2aZ3LvOdcEpBHSTCJiq1doEqbLc4KDcGmBWgN63mpmp1SpSBSk8dr0q87a8iM2w0ZAVg6CglGQn24cfPXr48cVZPr0se5FNRjldX/Y+8EkIfyUVU0ilexBjA1kBgrE/cksSe2522O6wPXA7ya2Dnex1NSJNk3FUHCGTcFIWl/iES1Lrk5rtYPi9iO/QqLHfw9UYFqM0H6O2qW6tFdYKV/jRW+jIS33iX2mJegmkBk8EFsUQmSVLdJON7RWv48WuS5lV4JwO/+HWRTXgm20Q4AERqlYDO42fwsLpMBpoQidKMkoLx5z9G8wW5kWD4eGuU05iblt+w5LUxW1Bf41ZNaKfN2VQpsJPNKDKKGYURiXaIjw6ndzJOFwG6Wq6rOFUNU91BtOWovxmVnOTS9HWOv+v/sTUzUlVzsNywnfY4SbRufoLp5Sq2TRNIuI2LI5MJNKES6OcUoJBslhB1SqIrU9Ls0uhVceL2ECPuH5BDtBI/NcIfUt/0dCjzdni0eX1vW9/vA9qZWKwA4UxD4FXSiElNHGv0AatLHdZUq5aXZa3HDAGZuHcW8dtzH3gGxsLSm2pnICpHx0JYEgN7EkQqmUukt3hQYUiORyenXPgb2ipnm6yk2BuoeNnQhGibSVF1aqBzGQiFSgMSxv67pWiR6Izq2ZpsxFwZaaqZZpKzrrdbrP0n/7UJ0W/PE3HqVzt9082m9PvvfPtH773/c2m/8EPvrM+zbfvnPV52m5FpAKYylGrlBliq5PTi37z7rR/CcDVx2/td6PK0yd6/tKDN1PePz/fiZ4O/ckwrLoh++o/xVQV83Rg6mDFKVxJqMkkJVQn1kqpTLnzytVIN9UJcRkFMRUGEabuanA+YCP/q1ZFGMkEecdrFKftuz0jkMQEzJRMy2aZzGbiXbtkNyoo6FSyqEA6MsGEEINAFlWrGWGaMUwrnVm6OdcES88f/jj/9V9PuGLNadoQqvNUutWaG3zre+987a++9fziort1FA4GwAomx8lVJ5l6EUBNmAoNinU32GU9f/5c+3rvpXufeuOTb771xoM3Xh5O84h6/nxi2jgoJNSkJRHqayMNxmSVVoAJGKPrtcmwR92Do6ISe+hBMcJGcAQmn7kWYM4oQFGdDMVX9CRxDNkUqrUEiVbols7Xcae5lCysXaDhEcsj7I3EcuwVXKgnDAGwiAC5cQK8eCLMezNb6LcErDIGrhVajIUYK4+UYypnmx8+3D0dbT2cXik7WIYJcLo5ZJ2CWqyFEBQJmy8aVmprViuXuJiHvJuHKzu9xMmVbfbYXNpwwGpmP3HDQkzAZDoWmcUmtRE8AsUdv9TfcTR3po6oqaFNnILU5qCaVx4RkoQx7TW415NLWxuNpV0XAG7N1F4zwqo04LHNDw0ws9ysggAAbYuOxuIimGmplWDKDsRQ20J730/gisNlk4FIMjUTTUlqrbXqorz0T2ytH4rhcduhFG+I7gbusTzcrmRJBCJWqy0yxojTqma+Jlli4Ylj/tamztrm3HALbv8I3oWCoFLamjzPRHQll0UP3a4/ETZnManV4P7QdczuXhlWIfHmrjOxkKFKjXzmjLbkVtTqJUSICZBSNmAu85AyGSXMdcfh9k/qBYblnFU0O0wWbF4yvC1Zm9Cei2mKH5F45MwgKbbwBbULsFDf2ULnXlrPGx+M5A26AbzMMropGg3J0QttG1f8J/U6x0tcKr9lzEzWnm9t9wKxhjHkLF51uXUJAJOWEFIS53OKehGkFrBKM00SEXXTB0e0b1xR+GuWUqtqTpJSrss4XE3adLz9qXEWmszMwqkskpEZRDQlARKtajVKhs6O7auh67pVTnOBsajtn53vKKBMXV5v7z4ok33xyz/71he/eLU7f/+9H/3B7/+PV7vLO3fvDuuyvT2f3e22W9meYrPRW7f7Lu1U74z5AwDj+NH6xC6f3df9cH7+I9WzfrvbnL19Od56/OS2Sb51+6zrtqvVybDq1qsTs1KKClcysNSRoNYi7EioWZc4HvciItLHs8OfONLRPxibhI3x+PjoivQZIUzM3LInATRkxvhIgA7IZonoYB2QKRnIdBcdgYpKZuqzQpHJZJZMBchAjjob7shuPm6p2vc6m5hWdOPT59M3v7YqT/Kqw2GG6ow8JEidznX49pOn7wH11j2ZL2nTpJWpT1WqFhGpWgCxOVGIYlKQkQ7Pd3qob37x01/55a+8+MkH/UlnPcbd9Hx/xSGllD1aFLe9YL9wiFEhgBVoURTIUTiCI2ymzcAEHmGzwZPuBFaimBU1TGYTWXwI4IAdRAnVUiG1SW6XPzf/2buEprD3+2ctV1xH5RsmwkQztLJ0bUbhEKEYFZbC91GLoxQx/WVB8xBllTBBmcFJbFT0Zocka714Uv/8W+/vZXuJbgau5/eabp9c5jTDkKJhdd0sDWZr5K2Uff/4mC9weom85/YKty5weuD2gJO9bfbYTCU39jgx0SbYbDabTeAsVoxaDLOILnr2MIQwT2UL9+QmvdACMXCxq4N30aHJTzCK0Wa+gWTLItgJy6lm4eCNpn9zDkHLsnYmcO2kpcLcDVbUrJQqYjktCkJ3YfRY6QQohmrVu9JglVkOfpM66u22RAoThA+3l07Lrhs0W5DrDj7wvIpsFvbK152KE19VVX0IKhAkM6UF8W8Rp/qF8oS9eFSwBU1t++TdbM8xWBdD+1ojDZyczjzR4MQ5jM2UfIdGEEqB5ffFe63OZRep6h1bil5cw2e9lOIbamGN1Cu+H1CqhfRIi8aVhIuIijAVr6GKSi/BEmjwCa69t9rjF1gDkohVVffHb2iKf3wL3rpf7DijGsZwrU32SxRCkaX2jd9o9LE3skFFVQ1aPZ+1R98rJKItpDMY2+pBQ9ijqFkSmN9frSGE83vjRSglgox5NevVmsJcrFidOwY1GnLuGjCTnSvgZiMAQKaUGuMfpRZpXloFi5RMwt26ZaIo1Sm2iHEt0rtZMcskcmalqu5TzqYyTZozVAtoKZNSzT8rqdqVcoRCiKtxEtRuWL31M2+9/uarP/z+Dy6uru7dfXDx/PLRBx88fPdimg73XzjJedK6u3X343svbAG89Mr9acw4u5rwjuaXLz9+Mp5/8GKR0+2TswfQ8slvfm33wktpGJjzy2d3T7th1a02OUmZLDOXGcYiycfcWioyey0F2QvA4FqGjQZlaajY5EzL1JEUqDA4nkLXiUJgmcxAxwCfs6GnZlUBOkNmpqwEgmBAZylQZiKLZd8xp5WWfMGf2yU4GRRkJQx1LnXN3ob9+WX9xp9z/4FuejmO40oFHMpwHMcfzPKtVb4Y0PcYxlI4FJ2yYKoqmI2JVjoJqisKcNS+y+P5ONwdfuPv/fpnvvAp9BgvyuXVEQPQWbdZ6aTB7IbmPhnNvPxUgBCV8JXwtawH2Ag7Gia63MiOwNE4khMwivN1VWZaURSzyVvhyMSEAqmTqPyCX+jFfHVTGW9Sq0P0UfBe55Rrp1HPzE62ctZnq7ElFCqt04njXcPqAGDg3qoswAwFOAHiRGhMQCZ7YBR0xXrrTrofP3ryvfc+vj3cPcduQ4FlZSroRwyT9dvVTlAFTIIMI2qmqfGIvnJ9Tntid57i7s5kZ7d23F7i1oHbC7t1yVuHMmBH7A178EhO1FlRyEpUYFYU865cUUClVZdTO2zAyAXRYogwtmgysHf/04AxmiG27UYZiGBJm2VJgPPHtX2VXFhBN5opAk4IgqGxrdhCoDXuZDxSALD4RMakMWgYaDR3p114wKUkEVmCWvOpZiS35aXkupuNVvgmOuKYpIgUjRfmT3zRvdcDyFSXMYvKjaHkNUAfPLPYruce2l5NVDNEzxQXiDcuduu5o+IuqjSKO294bjJqjV/nBKYbWA8a8GuqJim5RSPCORIkwjRDFZ6GRfqc51JNUcMxkwY697hWFZGqlUDOeZ7nMpWUc+c+fKQFASuq2jbTjGdwyRJxu0AEIEx6XSV0ItqN7tBF2Gq0xSPFIWJVX/mwYNsIORYMbHuLJSVajRVPlCQabbYias+mkXBldxMB+3133IwQgzO5wz6MbGf4ujel+VZmimOdEUIC/FdZPCEovs2QRMqJDeGQUMqgFk0ipjbXOaWsMdWtIvSlf+0Me2dNgqqVbszUCORGNaiYJJG+77UqE/o+TdNoQDF2siEns1lkBRTwIFwZ55wSUQFUw+XVSOk+/dnPazlS7M2fel3ky1M5TJPSBjM7//j5eDy/+8JdAJvhTDs5uzV94iXkPHSC84uLv3z7X13tqmmBfPD5r/y0yePZPhR8/+GPPjuNZ5vbZXOyvf/CAwBX867vBhYljayqKswKyboIEzRJOzBoroStJAMWVg98aaAZolCyTCZhski6HSBEZ+xpQhng0q4V0UF7ZSZ7WjYIci+1VkuWh1ylsiczLQHZ/LB7nwQFFZPpuvazYvzBD6e/+obuPhxOZdrvq0g3pq7WJ1rftu33h02vY49R9kWFRQA9mephAxSIsDrTUgFIzrbJJuPFfnt3+7v/4Hfvvnzn4vFehoTeZEiq6Ieks7ILXDKnpKaS2PhhMOf6qPtCkEYcDQfYZJjBiTgAR5OJmIERelQUhUzALGLhWKNFbYYVolIUVioBc4LuYujkMENjDqGRL6O0ZoNL0R4xf4Tkxr9eh1dtPvkxa6NGuWm17dNVRYGYoDd4ECzAjLnHESDQEUckiB7F9sYDvve1d3Fhx+H2M02TrAq6gtXMYW/rIsMkW5pW0wRdSRWt2UwlHXRQ7Z8BZ/mFp7h7CVzp9kq2z3FrtO2lnRzqwB15ReypR9gRmKATpICFVLMqhgkoZlOtJVEVM+HJuQAV9Lt13c4uKPt1KPdYz/g2a5izBajgYc1KKeJTQ7LJBqBV/TRQFvDeDMjtoltMc9no6V0GXHukEKe/0pMR28+jDVbdvkNp0UH4UxmOleKbjZbCKgpmiyqglWfwIGhLEdIoYd6S9iLFLJ79YAw1vNcNwxJ9sYRWg/jq70ambWnUADedDhZcaH8ZsK5fE8KU6qSCYBg6pBsvFPolNTNz36h2WCXlZssVtwV63S35nNHRMpOAmiV8uH2m7fsNxRsyp+yKv4ghNGgubYVCTTNzKdVztvQrLJxsxOyiUcauE5uTVhBosty4DwAWQ8d2VVq9hIATW/Z2RpXfDdEbz7J/HWaaE73wWlDqnJMCJdTdZkstZWoqvsUISwZvVZ15cyMkpUKrVix6VcDVc3SOAhHiZ0G6LnpUDD7yNQNTglvZkQA1vCgIQKvWuQBY5VXuM9xat92/5i7nUhx3wKe0NZrwdSvNaJEhxynqTTi7OpnARERNm/XrPNciqsLOeCAElkGTpKYyFxXJFmN0ljIrpEzzVC6S9NCcM9T2RH7h5VPJd+dJARTMECOSGg/jOGJeDdvXXv2li9037p9+dipPL6Y/GtKt7ckm9SPqjzM2P3747EfvffDtv3nnzt2z2/fP5mm3GmR9ItNRNutTsAACUcmi6tGjdwKBN0ps7isLtQpIodFjoiRTANkMVgzZDZw7wPPnAGRPybIBeqAHEqwzrMzE0ipVqXmQk5Oh0kadUk7Iil6MaglMgBkKYdCqVrTP+XCYxz/9en3/3dV8qCd1QhFFKYcOeH/MfybD7nRzWpCP/b5WgRQtqodNlzQN43GkKK1POQFKuGV6OV7qsB3+8X/xj+5s7uye7GWTBLCZKNCs02jSER0ki1atVnOWWpUptlxAfYkvzVf3VMEU8CxmYCKP5ASdlDPsCNRiKNTJ2zVKJZSihqpaDEqtcJURq8tKYLASE8fkK+5uQF/+5Ku7OzdWpSHmfa369xqq+QPSrsmV8WC2MnthcdA/GwEjikjyjQi0wilDgKO6/kSypBWevvf823/+nbRK9hxXJydzNxTmytVkqw2GgvUkk4gJTecjdabWLBDIngNk9RTlldXLz3j3EtjLds/NzrZXtpnmFXZmO7M9cQXsIUdgokziS5Tp24/E8SuIKcWoCiitOns87hNVwuQk2oJGbLmOS0ugW6zXFo+rCBAtOLVo5qiyQ4sIzi8CLAx0abFDujHmCUQx+9IoT3uA473+a9x1wZvhoNV4sIwBfmx9MK/MAvKN9JQkBoSLdJ+tOXbUGQ38BOlMKLa6G23qrapZ2v5d/1Ai5v4OQQKGG3f4rNo/x2JdCdMayhgLa6lWsESPFjoLttALp+8rACs3Br2MQmSxA/jJW2Vm03EkhdLFXyy0QpKUzFhILNIrdJrDA8altFoKFDnnaZohBJQiXc7TNPV93/c9l/EblpLLb5VruizyJgAiSSJZq78FwbWdEOHzbDjtVW5e2JRpgeyCWHpJQDIW2Co+rVZIKdWvuaoWrSJONbTWkYOw5IpVU6cMO5HbS0F/U16BSRuNU5iZEFld2lyWBvNtPz6yVXFbPHEX1+hq4+Mz9PMiNBNKUSsoXpQ6NK1qqsWESSSnzgcrKUHIWqosbEQw5cTojJMG4aAC2kxRFGZWCY6OJ5WSvK1XLUmQfQOaoaIYhNAkUpWUSXIWqMNJZhliQJdzUjWoSQYli2xVp3kkZIJkAOSKLKjCdMzSW8qTHV75xBdfPH5hs7HL3cUf/P7F53761a99/Wv9mg8+8XEePvrkT51+phvG/TDuH2+233vy4/vf/s5l7uW119+Y9s+lq9OE3G36YRAgpbxZbUisVoOkIMh58b48OwAECZTmuQFYMgOYVU2YwOwJGMhAb6T1Jj05CHqzbByITGSbMW/P1kWmP//GfxpOt59763MjCnPWXE0gA53K7naDUsVm7bvu4/e+LY/ezsRxkGwFfUFRKendbvufT7OOHCbUalOuaaIrrzK6WlhrleScUyiKSfISa55H0/Tb/+Af3V7duTo/dtuVjWazsiNmyEpsUl15meFiOYEreVQrKjQm3lZg3gQb7GicaL5wcAKLyCxWoEeIKljIIij+M6oTpIrUeO4cTMPiR1Rpod8wA1FVuTzPaBiYWa3FKCJiMGmsWYSBrDlnXdRhTgOdlLpEkuUPAdCn0wIRmmoFZoAmDskKUFA7m5VHukWCpdTf4pMfPd1/tDt5cSiXEy2VIe23d4m+mkzoi+axHDrhrWEYpx10FjFAi8qonXK9t7LvXnhupxdqE052aXuQk2nMPBB72Gg8mI0OIRCjcQIrUIhCs9FYKTUg5/DsgT/LkOIgcqAFBkN1ulRk0MWPQtrlsECNyZtYZuiTGtxrtkxNed3dEO3fDBkO6/pEZxk/OykYoRcyt/QFne/oWnitGgvisKDadNMbNvDamp9lMPHMzFBLMTiVq2mEWsatqm1XzdK5Qtr0n8JUvWQLwnPYk9zIFKoWr6lBXvIG1/0Xw2KcRteZmIFsXubRyCl9rCsNRNNS6uIztYwgU0raBgCoYYQuWRCmWG4I117TrFv1QFMrNcmLEVlS0WKxpccdVD2LqZpVldRl5kSgquWcS5nN6CbMzklPKZtqiTqDIsnzUJvqIrpVL+mSaNXqgyI3HGrHCW19UytDvJpwHIVaa4Oy2VBmA8EmMm6ic/+NxiTNGiaJWSklJ+Sco81dHn3Pk0Q1M9NafDdUBhoOwsbGdJdKEl6jqrlJtiL6eomljUClSSgHRMQCMmAUdk2aBkkwSxK/x4EjQSaYpJt10uhfnDNPE6YuO5rhspxGE4hq0KsKN5Dx0kdMmJxFlgyWXVhMZcwpVJjMipNmDbGFGk6KEIIC13ijb1EAAKxq1QkiIhnZXWr9shYFmBToIKZqCVnnY8q82mvfb377d363y3J2541/92//zaMfy2YzGKdhXV5+VYYNL86P69uPvvyLLw/5jdPTB+fnFzJ8//HDcn6+26yG8/OLV199dRZo1auri1vbU9+X5UI6eh1EUaNItgW+gmt8U5Zuhpaqfe4UvUFo2YQyMPW55mKDYi3oiAzNik7uPOje+e733nn3W/syvvft99/6lc9JydZBVqYGDKlgzppKrTpbX6Uf+vd/9Ijvf2t9lqfjsdTRKnorR5Xv1f5bfTcl3uo5XxmN2aB5ozqbGYmpjBTCOmKiFEg2TAQNHA+Hf/gP/+FrL9272E1MKGVmT+mJDFlBZpREGX0mHWdbgyoRZRgA88JMaRVqJpU2mRVIFRawQseCCtikLEIFimICC1ylCncXQeClokBjVsHlvwvDIlczWoO/BdWa1wRdJOLypMgKanB2qBnMLBJB0+77kcb1J7v+B3H3CSok0RRWaALL0CnlldqMku1oJM2oadbL1aPvfihXtOeQmmsB1ygmz05OD5IHHSdsRuxZ69OLUvNZFktqJ5v8CdPnxyJ2cnelw9lrH8vdK5Ej1lfzWg6oR+XOOAI72GjYw/bGPTGCs+hRWQQwZkGtbmLuYFXRiVbIqlp8gYuPdU3NNXqRfRw1MGhYdsTmPULMnYQAL8oXKDFQNTq7WsnYitHWKAd507vs3PrXGjhfy+emRtDCTFBa0LSYwpo2XDYgCBFpySPylBNjhFyUwGwJHeETW9vfeJr0+k4Dn2yvZEFp5XImAAslq4d9rzdaQ3o9p/6JQ+NfMlusTzwVm/pn18BrITEKcF4ngUb49uW1dP2SX3gCZrWZt5GmllMqtTIg+kB7JCU2fVitcynVTa+qxuAhpQxiLqWG1TMd8ctd9qGA86zbBuygTosQnl4MmUlVYailqu8NhsE3VkeBa21eYa3Ht7iUcdYkCVvd0HxEVF1wJe4hHINjqud77/urquuLpOGQNBcOhcSa4rRzMxe/8xpDdkY/mJKvxaxuFwO2waIXOa7h9ewmcO+QokUVyf0cAJj53oiFPdc8+NDGXP56akFuj5OqZjRrkjm1qqT4K9RqOSc1pJxqqcFRj87ezCC+ntssEhCYEi2U1RBHH/zkwkgNY632/76cWjXqXhEW1yVDGWIQLBIs5+H7VdCQ22ly9xV/ZqOU5cLIaH7ulJyK1nmcR+jp2cn/9n//LwgIU5lsmlUVKUmZ61yu8mB9vlP06u7dF1Rf3L4+Q8rucvfqJ/LJybZWlSz7/UgRybmUAlWRbMJgZqo7OHYeW1QnmAhzUV11a0VXZ2HqAbFO0jpZZ7Ur2IAbqRnIKutyeneNDv/+9//tk91H/+U//+f/6v/9r37zn/ym3Za57Lu8sVW2lawwopYj6yqfpGxS+O1vPXr/j//DV3otazU9rjJkzTqnr986/Y6kfsp92XFzKlOZVlNvuZjKlIxCZEoWVNCA7D11ggj18uKjX//13/jSlz+z210Ie5szq1ApCmTabD6NRvZ13nEfdCHuEVBf0wSooZpWEyZUUMlCzC7oVbPCpMAR5r4QFTabVUINhdTGdq7BOkMY0wGMbrhFzagbA/cK0mUAlHIjKlqwo7Obl1qt7cQGwYIGeGwLyMoDqIhb1bk3cPUjB1SzCaawrHoEaRWYhDCtTCnvnxw/eOchD+QVzfXMk2qFHW2/Hg6rYUrTHhuhQlSg0DkDJ8VewOFeHo6ytdNX5N4bH+vdK8BmcFfKSDtSroCDYFSMwB48uH8n9KiYYe78LNVQiNkw01CtljL7FIFcTKGclsWWEWOPy7UJ3zK/ix7vOk8z6K5OG44XiTREJAnPgFIKzAQpSfJoln0uFb5+bV7pSc0pDiFgEs+oERYdhW5OvQtr5v/fn2WUF+8bpoRoeDqaWo1A6E/wdeqNHGGQZuMAI4N5TAHEi7AYG7ct03FegjSOxsuMtOMLgxpoIITS1BLhxjAh9TKjiP9elAouR7BR3eAxOqKh+FDbYKaTVlXrcxdTZYJEYltt3bi1btIAApLMV0zFDfSBDbqcq2os72sgQXX2sm9Bpqhq12WaUaRWVdWckz8otZSiNackSa7vTAO9/chIo2QEZ5JREcet9BVFAb8HXLPMgW+sn2jlsSfOlm/qDRW2iJjbVgThcBl6xK9SGKtnyOD1sRUJcWTc09ureDOYgmEVGyYzhiWp+XeYRm1SFQbLKUYVNAlDNNPQtgZwRoO5ooaNnCVUrSoi4YqlBkP1gVnzMtUoBKMSrT85fVD42hJrD0mFgUgRLQPJ921uqmrZOV9hI2iI6K055zatoVsJBHgmSCTiCVd1BnOkZCdBmmpNuXPKGUxLyaVWtVm1uKtg3/dVp5T7tNqSMpVKYVGaVko1lbt3XlCtqgaRUrEeNiRLnZOIqWbJ++OUcscyZ2RfpZCkhyXpEpCKwpCOR4oM1TIU6A0raFasNfUovcigwymG0+4Ifuv9b/7523/0hS9/+Ve/+qv/p3/5f/7sW29+5e9/6dlh7pL0+nwto6kNOHaa6jA8uTr/xjfef/e9b7/34cVXN2ueDHxSbJ7kpFwU+bM6vK/c1IJsKsNudyFdNsi+HAQZgEx91UJkIixryDlnoujF+Ye/+iu/8nNf/eJut5N+baWAxbS3KakRxefXyJZRA28k3C6u9Y5uWmWGaqY0Nalut09RsdmoBlRFJd3ZqhIVrE7cp5U2Zy2ONiP0Pf4fi142YMj27AHWVNrR6YKgmwpLQFuAtYiElceDlL0YXV6nDR6V1iJgKAdbqR1rN+ARGgqoiIpYOLpqZmGFZk2PHj5+/N7jbtPbwawiQVpLT0yGAfvNMA6DT7tFlQKp+sl5f95vxiqjbPKtV3TeDhc5F9RJcQCvILPoTnGM3RUYYQfjERghky9wnIHZtPg+QliF1qKTOmVMFSiSsjAJk3sVLPQUUyvQ1Ahufk1dkwMg3egt/1d/4kH1e9AClWVJ7p7L+BtkkG1JpP/c0jW05iRa8PZ6IXlVb3JFPJyZnzTeeAsEkej51sNPdDWijD1YArlmZPvwDQCa7X6QdUOjOQsk5xTWNNd6lp/YmeNHyYJfjiU93HAw9bbPjcSDcW1mUNDX9nkFCAYkmBIQRGu2S9oMnxVhxyVN7uqopDVxgDRX1fbvjBor56xu++GCJoSoW8NtL/w1p0nJMBuJubsZwJQSgC73bNYiXvT61mEP6RnJOzlrF5QQZduzEZ7axvD5iIhBNopTu76etooWoaPydbnSairwO+If8poN3X42MIskKV7Ili/4916Lk6/LRpEguakx0bkCKfvzT21Uhmgw1ECVVrc2y//rW++/sFZ1GrgIm1jOk7EtPQIJSob8xCDfV4XBLKUkOavWuL9WrxktAHi960u8qDYL53dTLGO5UDnUZgBSIY3pFswwfx6jJAo0wjCXWURyK6XN3xXhQjWmwIea1RhgVChMBCxqKAXiRaqYFFMVySJZi5qhFE1dNhZYrzqnnAwb04lJtHZJMM3FzCA0dcBNrYZrG0BVXXW9SK7FjlMhbXf4rsiU5TZ02w+nq+GM3A6nG9UOOCmUkrQ/TbKxvAV78hawsse7x9//3kePnn+wvb/9L/7Z73y8u/i//qv/yy/85s/8rd/4uxdPn9/J6DglGXtUsUMuZXMrP3nv3fN//fv7p4/3ku+e3b/bz5v5+GyLRExnd//sGR4yD1MRRRk1o8+r08Ozsav6wqv3L5+M0/kovdjYkWYovkCq6/I8HQ6Hy1/79V/9xZ//6tX4XNHXCQkZzBQQPVWsJiuGHJ7qzF5PgXOU+3H0NewHfD7gqVNVqTQthiJCYGrj12qYoVVRgFngMIte591YFaCmTnO2ZRKPIG6TUeW2ZoohjnBoJAZsERspQkfsHF91U3u6m0cTyaJRbv0JF4FD2WRjZtEklir5kzSTKSaXc0chJjx699G8L+thqKMCgkPkJSpcvGWKWsIZWFNWpox5fXKrfOWr4+WhT0Penkq5pZfAOKeabMoYDaPiKHKEjcIZOAJFMUNn2KSwAt8IgcrklBGBFlFNgvBN8jJYzGpAC83vz/taiX7UFGQ8RJGeowNWNLjOo1d8tbGbbnxBhNCI7T7+zAE2LLHPotdR1YaiCtgyYTSxkJTaX9CaBMe99v1U+G0LKs0SmZbY7FErOktj7Gy+pgvRSzYsKS8MDm8uYIlCzSdu0SvHbFstrBaXbHDdsCkhEHeRUefL+AY1NjcvH/MqgOzTAA+y3jwDEHj77poXIyChqaFXgeItjOdTlbZ1wDlLuHHvHBm9IW61dsiZU6KIqroJYq21lZ6RLP2JqDUW7sGr3Gs+o48/G+weehz1XjLuAK+f3AX9WLYGoz0gfmgT8oIlYMEVDE2zbgASZTnT8CPkhiFWfeCvgeHc1L3FrbEw1ncgF6QkwiSGi2AYPgelyq01vAhyXNrH+26hItdwCOAZXkgvh83fFKJmjM9CiloNrzSKqjlyUKpmZleZuwyMvgvSdyZpUNaEDd13WVIYtrsViLB5RTUMvFVlwE8Itb1zWdayOpRDIxIAqqnqXCdSck6d5GqmqinlohWlAMgpBbhgcba0FCOytEF3u50p51qKV88iQkotimQCVXMbUWNSLZKyk8bNvNiSGFGkZr+jhAOSVlXVIFmhJ5vP/OEf/scvvHV7e6KPP/wx+CjnNE7W95vcn/an9wv73ZOrZ4ePJk46yBUudnqYUp1kvveJ25Lzf/jT31/f2/zX/4d//uqL2/2z919cSa9jj5Jx2JQqeb/ZlKfvPd3/9/9udfHRq5tbms4uythLOelrLnicbv/13dOPBUN9fvfszmGL6cPzepikiAxAlr7fSD/VpAQl91SoqVJykvHqgqn+9u/81he/8NndbmcmkhB50b2spMB6c6rfnDnRzUUQbgFw1YLHPZ/5qCkqCCShKmjVrJQyApqzAMWc1CmzwEjn3zeBBcrN7Cs0M/ElgO2xb4+lF9V0gMhxPmuTCQBLKjUwTrLTqGIKEqALm7wyVm1Edg4AU5ZA2AKZAFUdE7JCilkBCppg3KZc9/bwBw9zyTrCZ9yqhiK+spAF6GETeDQmKnI35ILu1jzn8UKmkxVX08W+rx0lqUA02dH0UDFBCrE3nYiD2gw5CkY3w1KBgbO60MiKqSpmYEpupCMwgwhy7nPnPdt1W4bABT34WfxRC8f6hY6BBtBHEFyQ+3h6PVC2GXD8hOSOQDVVVYegYQ2jbSNA8zWFDfWLv1zuikiD/RCdmQ/WFgTT4kzEG1rms0sY8DCs7jhlof8VekHdDgkjqQDIObd23g2b3EqHaO6FCizGwg2/vG6LW8W3nBzGIh7f8e4diIU9GNwHDwShWq/7tUCgCerSdlsDmuKbEptiBQBpgljcSHNlpSS/yGpKQJiaeIspZVJVq6pO0TyFdSXbdfYLrsGbUJ9cklKruuja54Wq6h8gyhifgbbJTrPciF5QrzXiETJaX8gYgsPC9YVodMB4Nn1DVZDIzMwHsWiWZh6Nwozn2qST0W3HrzRAsiBGVBIkNW8e/UU0Hgk3OLtGzMHkLsGIUUDjEHgd6dOKGFR71RPIcbT8S61LuvupQZwk4Y5pAcC41ZOPn6lVYUoRieFZI6+1Z9KLhSaQJRCmM4HnL4fMn3G5foA1NllFPXsTzKb4IINQdxRHbHMCMpOPVBolMSzVbsAA7oXeWmNJCKGq9qtcSoH3NJoUJaderYAQ6ySbk9MBuud2Y0+2zY+gWk2SBQS6BM2rQasOq80X3vqly93+9PT1O/e2l7vje+/96Onjj5+fP0q3Hv7Df/bbp/dO7/UP9FH9/qPvbDbb11775HB3NdzfdKf9Fa62D7a/+ODnX9gKy96efXg/z2u9TDoPUrKWPk+383x4/nz/b/6bl66eDKcvvV3mgY9ynqba3Tf7oW6+rvvvP3mY0qneSqPpPI7DWZ9Pht2TC+317PTuxe5wNV1tTjflakYx0y5nrbVcXO5effHeb/2DX7v74PT55XmSTCToRJBMtGpIphMwE4mSKAnsWMXK4m3RLrobQPpTbAZUeCuDCipYKIVQT7cano2AFA+0RAVqW3O0lLzmsy2izbccP2M7VNH4NttEA0V9M4AInZGBKHJjmiySDM0XwXsAbzSk+QF7iWxqSAITZkRrKGRbLqniNYOZmk0w8T1JMO1Wm/Mnu49+9KTPve1hQzRmVFoBC2wCVuARWPlimNSNCZpe1rTVcfdv/lhLhXTyVs/VXZ0AFauWZtERmCATdYLtgRJzXx58zerMkHk5hq9ENRqsGmtKCJ8hV3pqpQgQumonh1obogX66Ah1NaWy2QqgJTtr/V7zNrjBI47FdCEaUtVoEhJzjR12bLnbE240GbQFN1U3CqmNohPZ14NDgyvip68Dig/z/O4328klphAu0QRQPV7EEW7x0xwLiR69kZ3dZwQI+DEil0e+SLI3RBHmq+yXKOfbP3zpjZerpgCqqSBYTV7BqDkE7fu2brxtRz7blvCY//FmHLXWDJHCWss0z7nLdC5j+FHkaBZVpSmSAG/Vkg8dI2ILBEL6rhhzQxKnvLmvllBqVVxvhYI0JMA/99IO+kzfe0rVWun4umMa/s4JYhmBREEV08hmnSJLJ+eImnMw2CShBJmEWpWSPCFK8nmF0tN4Sy2tRiS9GIMJwSA4NT0ivF73EZt4DeTfK54ahfAFmwii+/X5a8HIwQl/GTrtzYFhVJFkBrEK0CKDQZgcgmHyRWyibXtMrSVLJqSUorVIyikJBI1vGJVulGfmtLXr08jgwqmk5JWW1Vj9F09aUArNl8u1H7SF2iYSOjrHgV3NnRgupMEkJAlWK76vOWprCRqsWVFFSp0CZky5V/d40SrST/OURFLOpmpWJZuqmGqiSPK/9F/hhZgmyaaGlEqtYLJae1mXo73x+uu7/XG/Pz599v533/nRxcX44ouffOMLr2xfOLn14K6cZBvk869/+cu3v/zo2WM51VfffHnsq66EtznogePIi6fbPPa5bDBusc/p0GOSMq/yri/lw3/9/3qwf7574d5Zvz/boVyNohW5e3cavlnT7U++8uYlPto93p3evTqcay/bfjuw319BND/fX6R96bf9tJ/yioJUduXi+eXJSfqNX//Vn/2ZL6qN4/5AZlcbmlVDBiuDkZpj6mliVXz7jG/puEaGYDCfF9TokiIjskmDNeWZ0e8W0+rsDkDNqpn6YsBIlzRfLOKwVCBebE8sAQtaBRktuAYo1b6JtFrD7YFEePYxoENxg6A4Ql7fV6vBpI4D7KczCkQfBtMIJjLDlzoji+ttmw4e0JT6Dx6+ezg/DmeragqjFSQTq5RsmMDOdCB68OipnCP1FqezeiyJ9vSjnHsdTjl2cim2N9RqajYDM+AboSZYEZuASTGBpuAMFueak2ZWCFd9qQgSSBG1DCN01koQWud4RGnR/IuIJKs1rpT7QsRYjVbrIhYCfMgOf07DpscCzfXHttSaWm9tIQxinqY555RTxHfzIxKDTJOGR0RSElJya0QM7mvv8g9bIM3WF3oUEripamOMRevrb129bwM0bIbgW9pb+vCCgCS1mnqX2jZx+FmvcMJt4M2mSEKImwYTzWBsyY2+TYIRGUVNg+LrA63qEZLqyqsk0IZq+ibCuH7RzfkGGK84g2erBiLlFNAvRUSG1SrgbSEUtWrViRTJ0j6jtFtlJmYGcRO2JsKJDifm6KhahBJzWTh121k3Ys6Pdoy9+Wt6Y+cDW2vDAUTP1CDPVrPHZWugSbu4S1K31u2br+zwVq6B8DTVUi01Sruqtk0elrOPXQPeafWTX4QgTANu1mlB/5OWodVb24DcoOa1oDReSFwmbwWccgJR789hQiavA6zlfvEhfM2OMTR0bhlz+OxAfG13iryeJfuHzTkpkvl0mQBQqt9iWRh55psheKMQ8JGO+ughZUm+46uqOnEmvsGxQkX06sT1PYNds819VK7wNRwUQkVNrSJs6Hxa5ysrzTvsYE6q1oZyirfFWURrybmq5lImERFZlTIlUpiicAClbbMGIcxaNUlXTSX3WkHlaDXnvBsnyvrBC/fG/Y8Pl1ef+/Rb1m/OXrh1+8FZMYhAVvNB7XBMm7t3P9o/eefhR/ffONOkwzhRd4Odb+y4Loc7Q+nrZV/HDQ49iuSrs47jsyf50dvPP/Hm+a2Tj7/9N8rule39q/nygPzDUvpbm1PuteBKSe7PV2dY6dV4ud/ttDdWZIVarlq7Taf7snv2PBf5ys986Rd/8Wfu3FlfHS61VpGOaqSqjUydqkIlur2GwZLSRCKCMP9yzxjf0VgYo0AvfloVJgaraq7119hNK+6oo77BmRG2GqsO5komNuzGqRge4G8SgdoxjsLazOjPPtzsOCrxCsSGqyj9A2L2mr5Bm1zCnXvKWTgfiZt8qfrmoGAimcT+ZKPXi2Gbpjo+/vDHakdMvZuC+yDbZmMHdMCKUoEimEBAWMYZr8p4T441i6S+TH03nHbpBXmeS6009jPLlUJpZnZUKcIKzrRJaAUyUYthMnNuNmDFUNQqrWromw1WRCCSAS3KAJvdYz8ePFN/qFsHFgNZU10mWAiQD2FnHDLa6MgCuq5WkVL4ZhBgkHA0q1opCtRr0ygzSRKmQoC1yireFUAIRE1jFmEwJTLFVL1ltgUVAapqygkWgmYLFRoFUNWcMyhWNQWpVRkR9fpEOSPJU4sBWgoJpuT5TjSkOE6qT5JMYGoxS4xj07ACOP9anIHqVaNbtjnmgobAx/mjQDRJuD4tl1sAgehCADOfHzdkFFCtCDPiIHd5ZYlG2s0pm6kVl91o6OvRsGYhtOGfFEjUsFGhAPAarsGXvL6nCMPkgDrRbL8ILoLN2HXlH8ahJza01Ck2cXWcVI9lciVxZYJ3bLEKWkLQ5aW5AARqVFT0stpxyzIXZw9ISO3inNDX+Tr06uY5ZlqKEFWh6vaqJGlUTzsNcrCiVWJ+4fhwjYxnen0AAKuQRpOSkB4hUcJMxuNdXFAS4vHJivsNiThVW8Q9Q7z3F5GcUgSs5vXqtpUgkvi0zAsLEWMWlrZzxksPhT8ycSqqamp2MWZwh944SIw9l3odCiQqIy7jKmsYvttZpxihJyw/o7EMhiCyc6Td5Ua1RmpfUWAQVVMdhU4XAxgFYGAziALLEf6ErK5MTDkjWUkiK2jaXc0vvfL6f/nPfupks316dTx2x+60k9OMDWToZAtsoav64it3j3l6On58X3Cbc18v+nKB+bzHfl3KOpdtnjayy2XqRNc27p//yf5Otnl6AR/YCeaL8zPIKp3M7E8Mu+nJ4eMp2+kG2ayugauTrZEoEE2osAm5kzSk3eMLOern3/rML3zx5144vX+Ydue7PUFATGeKGgjpzSYxiHTw4Q6VzBpQYhYmmBhQS3PpcbsVqIj7WbgjXZN4qJIqVLXirZavns/ZUX1F7Pw2wiSpakEUzTHM8YLMavBp2xoXoIGYwOIYsSCSpkHl8ZGM1uqkZRGB1YokBfTyzgUesKxholVL1SCgSAayk2DF6Ls0zJJqgiZJedYEE0mdKoQZlHmSRx89ZVJFoSbs0W3yfDWlXsqsKWfUWgX9IZe+mJCKl+7f/dTxonu6r5sexVI6ppdfybapO80qKNAC0WTF6gTMqqVKpVQmVvCoOgNFWJBqncs8l5xBp1yGiKtaVdANdYyQ7Pbbar6cBwbnQBNgvqasee0DSA43wpjGqhVTODpFh2sXSJh0U3wDnFclFDhNRJH7LiNmuh5bnHyEKNajf1oA5xj44Vqx5ElliXfL3/hKL81JDFZNwwNTfdLMampqEAO04XJRZEd89ykGDRYoSG0OVqq1luJGgLnLDmrGYFUVMT30Ika4UEnjKlpzyGiVwjWJus0s26eA2eKWvAT0JVcRjEExSZqa7zOXKIiocNwUBmUSUSBFU+UvJBazhOVNxPBaq2lVETanYUgTqwTEKjFF9+832DK3bDVEXAdtuHFT7cQfv+MwqtPRYAifNoms33Y6GBQVTevj8GgQvTxVe1hhStE+kiknqlvkIOfsqYVgztmXawWRIWxJFAamm3X8kuXJsBNxhY1FEvVC3KXP/tSYUiAivnGhyWT9xgbqXF0FfuO3I4JV6+aXmwpYrTV+S3MrO3BHAABdzUlEQVRCszgwrpdW1VqLj4n9sy/ECBKllFprztmqIZ6tWEICczrNMkIC2nkSi50ofqF9NhKFUUMfrH0kVZdRJdU4Q/6u0PoYbV6+IkSYjrmwUBhzkuvxXnFuR3Y2UWuVzdQUJeA4L5iaGM2WHsD8Cb32t5KUh6oZ0gukzDDg+HyXbw1pu+EGWIsNZmvjCW1tshFsMWz7OzrK/snx8sNVOoiNL6zsNF+dyXFVxjPZbYQ5HXKRx4/++u3/+fc+/EF/enuby/bB2XYt5enu2Z2zdL7fMetcUE1ytQ3WFf2MPCPN6zUnYFL2kgcZd+O033/q9U/97Z/9pVfuvFwudHc+EuhkBZhqAUyLgtXvkhDOew8XeU6t8a1qVSRRciJckmtqxevIa6Kft8AtIJjBjaGTBg6lxVBJEkrx7rZW82VmBNWNt3W54iBzhlnIwxgVW6jsImZ4FPfHydcGagBCIiISTFGXMxQPmASdLobFzhiWBQalabZokRKYYAJ0LoWWlMVyrQITSb1g5WGoW/UfPX12vh+701uQBGFKSakJuah28yBA2WGVMQ5TP9FEu+3mzdMHdz4cy0FRSs1DL/16fTcfs85e09KKWTEUk0rW5IRntaIoZBEppCoVVpg1hYVfAR3eN99V6x2RitK0UX/CCSdCEA1wlnurfmGOwhIQyQu/qa2sdbTVXFjqj6SrZJIv3HMjPq/iDCSyi1ha2rHITeo4yaLrjOlrY+s46SZW2ysgKTTbHsyC5uNLhKpKEgYj10jXHiFYq+JJZQlBfmHMxa90kwcXYhIEqxZnwkCjJIvmxGs3xnzCE1OtanBDREGcUAd1/bxq5BMAqNd8hgh07aQDHsIcTUQUGdeVJhqGLHHxWzy9NuiKTBZOV6qqKjkmBim3OxdBLa66ZDGNylQgWcRa62M3ytpWPfjqJEd2gsqEBF4vuXI+EtuGvlZ7xNiK3repIS2E9yje3JsP6jzHhYKFwMQFNxhbaFG7tmF8u5aNoMQsqW2+ckI2vYpp5daSfuIKhzGXLmJ5eiFkWIBwf1BCUK6mge0r1LSNDxLBAEkQeaXJ6xk0+MDm6fWIow1+mqRVXmYxkZf25RjXLqYoFEkwcyDBaq0S3C0wZsMBLNrCwg7cOY5/SnlhRDZn9bgg7e2isQ+vfen80tRWsgK63KnmTadxIyhu5AMsljVRuQaXkmEonsTZD/ChvJYCYZKO0q5LnHABEtD2HUFUc4BERpG+Vkl5rakW0dxl7RVZ0QEdrDMMsLXkW9Lb7laaTrbs+34o+7NeTu1i2x1PcHWvPw5lfPc73/jmX77z+c+8hPyjj78v5SIfDo/G5yf33vzkW2989mvf/proxeffePNHH43j5b6gHzQr04xxRn+UuWKNjNRLuSrnu93t9elv/PJvvvX6F2Xi/tmxHGtCFiG0QCiSVJtwvI1r1EAUcwliW/ykVolU3bzMOwkJhlLTbQPu+ePZkIBp4zmbaW2wDxDohDp5C3S1fdw/f8pCKmK+v6569R/DuxhF3KCeLlECMcNqt2x5XxSXsyw31J9+x3jCINfFLQDFTICsKpBOzIkpK0hSTUAPdjQYMiRZIlc0Krc8//D5lI/r7Yk5+URUskAMRYlSYJgIlbRX7btM1Oe1/slfpulQthmWOZZ6epaGu7ZXnRrNsJLVUCgqZZ4ROxWK88lrnQBlVkIlCQRVq8HoHHJV+CJ2CSDYm0BHEbzxitrbo2V7TEKBJKaBXLbdcGRrCYjk20tNgtEWrYM6scud5ynB4oDlqMccN6EL9xnHLTwRXMDtDA5PXR42CcDcMs+VsdqSj5kPIigyT1MnfaJU74x9oKuKtv3BKWOmbX2giKk6FuiYeXDpLI6UNjmzzzX91yxHrI11QYZnRYMjFxVvS6OyfJKbZ3T5R8+FZqRoy/QRDU2u5yPXP0MCJkt3yPYtbuzgZYCIlFLiZ5b07f9znf8bOUl87omihXCbqibN9hq6sdb9fgUdvd34NrxVNh47GxHZvy8awUja8MbWm0dZlg15pLaA8duvCDhBAma4LkR+4vKBCHvIYByoqghE40hft4CeulqQYyupoqBUEEaGssJM254PP0TJK1UXU8RPa6ymjMNTK5uZFMmYMSPAA204B264uTpg6H9iuBAGacs7WnKiy4LFIlezDebc8rNVIOLDGtJ04ZdGEG4lBxp4QUIW+7C4pF6CBgRNB4P8f9zrDghwxYd2XD6i5wYR0ASmi6XoUtEQgsxs6sZlgTN5uZySL1OGSFKlutEwk8+b/UHyjb/RESKLZCCL9KYZzMakHdFndKV2lT0xAANwYqUrMqR0SzpenXJ/JrtBr06HaTUfT3i4n6aVXdzuD/nq+J2//JvvfOMv3v3OD9+482adxqtHWAMydkMS/XB/Lqt+3ozHPU8fnab+HDqrbEWKYrBUKDN7yevDdr2/2NVafvYXv/SLn/v5U56Oz+Yy+qoOIMf98LAqIkAfzlNW/SL4SUhJ1fzJUjGhwLEQClTV2bNu/3wDAA5ULJ5zA8XXeWt7dtThQNOq4Q9jzvcHGNS8YIRFHcYbqdYPbwBjUcXGSVlgFoGEuCAYD02nrxUGSPLiK+ZZJqbJT7K76ICJyGSS2OIM1QzJwk6kB3rTTgayM0tWxaRjTcAtfFye4pS85eCSKEx61FJTznMtNKFKUaUKFCPkwXF8YTwUVb1AFhTU/OC+cLBLteK8XNCMCitaUYgCFLKACsxmLvxVLYVSRARuq27mEZwpvCuSX2ItABiLCxg0NzQ/d7O2fUcDrGusGaOkG8b/LT2y1c3emP3ELQq5bIvyZOjKb3BumdgIP9IwVu8cBEIPMMsLWrgNG4z6E78qAFwgxKyx3pnR9Ui8y9bvLGZACBQ3ch8ZdGw3qVBJQq/yKGZaSmGjmXnStcggEUkXZUh0fu1qRbdlN+Joa0T0+gNEcFSIaJBgG3U9PnX8qOfEn8yr0fVyMUMP1phHVefOWPRhYHiiAy0tWdWF/OSUqChvmk/Gcm+bOUf8ZjGCsebYiWvtB9Qrd4PCfLXzAmtaaG8jXjtM7zdnAez92vpnC4zLE4pqG4Wb1qoUSMrJq2ZPcQaoVi+DmIDom/WGDrbtPFvuF7Sh7DcOMEhSRWSRKnmzoY4BmBqcJGyFKu4ZCaOi1ZkOA4Qg2/xJ9ofHz4VqaaVlRKp4O4EFeRlGi3koYkwQOFVoOej3N5YWBbpFSddKhdZ+Rk3gI5SwIFWTdvTQNkoB2a0OYU3sFadOxFeJSLxNNZJZEHtoYQ5lXscXp7bFV/3YejxpKbndYjOY04nM37yPuOKB9Ztn6i40jZ3E7Ao+x7MNAsupT5bBDFmL9YYBXNPWhoGyoWwgHNd6ucHzDXabenGW9qfdVV8uhvr8Xj+uC//d//AH05PpFKuvvvnaS+vP/PmffXd+XDeSC/VqOuzPLz/6+Mkn3nhtwvzonQ+607zBiUln0k+qyl45aKpaLz8ax5dffflXfudvv3bnE8ePysWTw8Ki6LquXBWjT8OBZc4U7AqnUvjwvIJCnzdBwAqDUEHfLFdgvpgXKefrI0rloo93o2bT69/CSA9m6mxnMzW3/6HTIglW+DMaTkgxtQlnDs+nKu05BtpcqVX47ZvMII0gY/R1kT7isuj2DSYArSqZIBQmQMyyMRk7sQw3+bHOtFdmcCVdZ4OyJ3uaWMpi2SQTWzzX57hNuSVFFQLJrOL6mAwFjVI4JctVtGou5dNpl5POIzqF6iRl2996ReakkwHON3TGohmW3teACisuQRYBpLAUWtFqoInAy2ehtlt5HeiDWaG6RCDvRSIQtZ6NATyx/bg2SpMPaNrFd3SvqarNWhBb6Bs39Es52CyBbMRM31RlAcYQtgFBcg8m3P8i3wCmYQjlMY5UQlWZRbWht6ZaFTk5vXsZOYe4EBLyyIC5EWY/TQocULB6dxjO+1bDucBbNH/Nlrhg1aw16E4nCLq0t9vu0HWjj1w8QxpAERsZfvJPNLWAw7Wt30NonOJLdvPqtPEyAGFCTl4xNVMps9YBs5Uj4X5l9PIcrnVVieJBbsh+fGyxsJqAoGHacikoaOPagBNMbcFgydYuASo33q//DRl8tfiC/0LHmoO7Zu08tVmF21ff2BvoLxmd8JKzl6vjRdzyb2LwcpWIBUpeG0AQprvt15EM6a+IGK3Gbg6ahYZbRJI7wAW731RjpErCRbFwyxB4iaFLXMaNtxkfRX0yEiMMqARO5bh0G9UmMt6IIHAFr5IaGW/pgM0J9uF1Hb8HN36rBN1J26FyHAQ3suSCnVx38E4CjzgudFWDj+8sHAeWFn4pS31ijCYijaMukpw9K60tcBDUS3z1sZJPf02ATHYpZbMMW5mKdGQP7Q0ZHKhZpRdvgvNpn4fpBLut7W5xt7Xdlle38PwWLtbp6qwf5bn84Bvfevdrf/Ng+PR6yIOcfuN/evadP320LhutZlJQV91pzjvZP9qfPTi7v7n/dPekXz0daZ3YaT4DDpJPp3qFefqd3/pHP/3mz+McV49nKPOmR1FlgYnBUk5azJCFxSJamCzwv4vS4Ri+P8cAKoy+H1ekqPq+X8nup4dqVtDG9hD1YlGWoB9SCp/XqHNJASUtaCeGBtotAiIaRL2irLpMubwpWICU5W8Qbbi1w+NhGIBD6a3TUtDi0JskDRqeGAQqbZ9TzPjNMpmBTOmFvVlvvsV5ADpFT+nAjpYhg8ycL3DRv9BhCPhIsgAqWWYtnSZg1toJJRfdQz67L59AGbsy5A1BHasOp/L6/T5hopodmzRDDRAWUFULUJxZQRSjKQq0xs5MmzUgphlm8KFcwA/+yAAQxeJgRMCadaEPF5OqVq3e2zTQ35X1MZFt+cPakkL9ieYhoiDpCVIbgijMHgKiZ27OB0aIz4pseZ2YRzBokNdRouWbFhuWjjzuqFHEL5jjZTEtW9ovUKuvejZnB3jZ6EbP3rl4gPbO2BpLKyUh6aSe9g7o0cCNhBw6cOqN+ab21miaaTNkaGlxUZ23582d1vwCMFoXZ6FRfeH4Nda+BGwQQfpvdkveC2ubtSvoVacAdJkTlusV/xPsGYfhWyA0R7CvcdG461zuQvNIM1Wt1UC65bc/hM7Qi5wAU1NGVx90p0DJ/I3cGDI7VN/QCrVmu73UEzdxbCJFWlkw+AbbmtccLniV61dQa0ZR105q6lHCmQTBvXK8AtEveIUU1OcmbnTOXfulakoYSq2wwqCDtXqJAUa12YQXsdZy/wLqLmVp5PtEoBVnuMYQFKBp7PmwOKsoGso/99akOFAQRpv+LmsIjZbyKG5oGJQyYChN2R9Ca1Wqw2haNOe0uC4EpUBj9zAIppR8b3FVp+y4jsiNyRDUbvPsL+0IsE0+Ldxy/F35XYtJlrr9bAvs1xV5jbWDgmRdI2Zlk16wIgagBwagVwwyYFzpeCL7wfZdvdik3Vov1rK/tzp+9O7TP/xv/uz2+tEwHi8+3N1/7ZXL3fe/+efv6ZECm8bd2b2Xu6E/TBfTs2lXno77XXfan65Pz87ywKy7qZTd0G0uDh+Ox81v/84/f+1n3/rgyVhG7ft1zeY5px9W82Gu7scMSSJQWZoWRBpUU2u+hOoAshsSICiVRU1US1wOgiJqc0uINa6hBAXX4Q4ApiVmXfT1bQZoXHgq6ZJ0CdaAOYzhd5lOS4QfBy8Cneda9Lp58paKsc08emeYUNxPN0pDE68l3VEztEbI5nkJ2rzyE0w8+5plgxg6ZmJlMsAyrAc3plRZAT3SSX6+uzyvz2WbdKWJ4kouETGxDEuaRWVUribM0PsFr9uhiMokx1xM+6S5f+3F0xfOpomsSeYVMZlNsAKU0GIHB3gGKlyCGglQHdAkixfedJKD02OpQdpxnMNXNALwa0E0CJbQAvgW1zYiikfEVSGeCdrht3b1iNA1Ne5QdYPrm8HTkHnDsYGeoZ2enJITVHltZIVWjaE5Bt94KbZ/bDnAKdoUmkIDxUnicJmzthuLqpYSE1qyamm0KW9HjBALH0c/ZFKL0he8qxJw5UY1Fe/9xMdRi6CTTlv1q50S2yt7nI6pmFxvqYl8S4PREgNNVvOyxGEjaNFwsXTyiQNJLpm//hPsOVVv7qnqvLOAjZaWosX66z4wymMPeN5ZqxMi2II0m6kiDEZj1eK5SHwyoVZRfJ1zYIwt2bqOzZpltfiAdfldP/kRLPIfjFT1TQE1qsNQBzYfFJ8gUJIktosc/LTI39mtla8/W9AJfEzpBahFRckYcTTozz9yy7emjGo2Dp0ZW56md6QWF1+rVcG1XFyE6jxFVUVAb0tJ2AoapzWAy/ebQq2oiSQ6WzwIpdeNskuwATo3NeVsWilcAhwlmZgWFaZQ1kVG9fLFZ9vLhxLQ0WRrhmbe/TZFOGmqtVY2HqPWGDq0jntZtw4Ik4mab/Kgu0rHpSMRY+OQQXoJ62cyyrqoKZPjlAZpGy0JpDYpTESiZFUkZKO0xtjYQTtjBw7iWKUNEI4Dx42MKxzXPN5Kx5Xutnk8lf13v/7d7/7pN4l3Pv/6p/Th+M53Pqh3Xu/K2RffGL7z9uM6HpOmlQxX54cDRm5Uet0/GzNWd09fzH0+mr5wd3t8rpfHy89+9o3f+spvnb7yuR+dj2anKctci4BiIpKqqqRUWSSJlArzdXVp8egDlomV+2vXJeb67mmDVK1RiIcIwidMSkrKXvJVx5d98kTficoFJWv2Fu4e0NJdPDXamoJr2wexNhRTLYZWNzWdSIP07CfgQMTh9391cmtOYm1orVDVKkhejrolDQk6z1k6WFZLap3DGoaMjtope9iKWFN7TQORWVdqHfIp9v+/tt6uSbLruhJbe5+TN7OyqqsbjcYHQRD8FAlRJEVSpKihpNF4wpIdMyNPeMIO+8m/zQ9+dEz4acLhcEgamdJQQ4mi+AFCBEiCJNBoNIHuQnVVVubNc/byw977ZLZmMoJEd3VW5r33nLM/1l577f3N/nS/WC1QQTW6Ey8guCilSumyWN2wtZmqn75sZ9NV34reTGUpMvdZF9NnXtmvUKzZSqEq80pcbFOEaMa9de+f7nHWnY0p3mVkRPdbd0JMtz2crjsInoH8CIxIIdigUhigrsODtKIYfdceVDFsZMKrB0KGxMy8tKSqyYDzXFXdAXuq6m7A+ytViiqaKTSHXBIAzDF07fFXC3CM7gP9biS/rEIDpnMz7QyD4fWcKe3mr5baW2vAtKiAIDpYAgT3CDGSB5DEorruD0lRb0MSWYgfgy5aDgC1J5yubujpqXnA4w+cXkp3j9dbL6UGBgDxVgxXGQTM5/15LVchqCUeGIzQxogV4tYip0nXlMlSeOzs5nI4MlM3DLUHp7c+JXztLCnJrITeJJpe9bjQAAFQSmSiTmkl+yjSEx6TBCnYLDpknSWkIscSnvnVri8cyDyObkq8DExXzIi8cN/2pRYpScxzuWDx3qbw3cjw3S+yEPCRmKAgLzbEOTSAm/AG6gAH2X0hmeSITinODvbYUf2iCEKLtNYAKUWdKqFFSB4UYjBo5mIe9o3E3SCKGPynGRll9OzMpQIRkVLKSARV0XtT8U5Q06JG2t4iBpiqd/OPiDgRrUjme28oQporiFUMbV4/pgIIjXWqI5r0+q7hSHomFYs8NjKBFEWjQDygJJKyTkgRGhdaPdLyMtIYemKQTnqNQsNjUxBYnIkIirAIlVYFk1FRiApRwSRYiCzoEYVMaIuuqzLJdslNxeYENyfYr2R/rv2szt/+y7958eT8G7/9pesP7149wO3T3RIXP/3J688/e/7xj3xi/dVX/ubP/moh9vjX79myyWrCytoOsjRrfbO52mvbTKvHVk9Pn//GH//JvVe/9HC++/7OpCy0aXeOJ2Hd2LzIJIJi3QHlBjFFd+kMBN0vGQPiQ3+6WQHC0Hsa4KeYINmS1FoEzXd1KdEPTFJrlH1z41lAS4qoOfq0clC1GLtCtUiPBncxs+BXRM3SYSwPW33NrRmLeqUPyNGlku3+uTNAWjNEwgKouo5VqBVlUauKFsFEVJGFypKowAJYlFpkDVuAJ+SSOKVMglOgUibIJLgD2UPviKyAalSjJ0yqUtGrKCjN5KQ3TC+27QunV/bhQhakTxu9VH3h3tmnXqham852AxAwaFtC2K1BKSoVMOyflkg2RtzpPVYkXGhMqpaE99kbA/rykFMjc6GPGhKlmPVs9YWbEpercNnbCGs01DkSywxGs0dhMW8AItabGYuWUg+VjRo5bjjwSLS7RWnLXbX4W5SuswO/OBfWoRhMqUHUOVwnASmltKxzelTtdtRgRVQQpBPgEK+5PyKcJwJNu51WX5jZKQYPUKlB4kkfYyFFbInshhGSrG+NcnM6MElc9FD8JM2663w5fKGacVQcnLhCRoDlMUL2tg6aCw5fJANMgOsoSGs9wCUFes6BAiBSVY4+AsiAIAlvkq7hqYT18Ac5oBvhlUhji+RGg7kTa0CLKXsQx/aP3T8QrHpPqoZJRirI5h3GPxaR0cmGSGvhJYZ4HMNN5YOLqSriwI+TswQezUQKIupjm2HWHOuBQGABJBBiMFeu8ZtNkKVMSZtHurGMcyWpWemwnfJnOH4dZDiPHvLgHXh1biAWh+QDgMDMCSASjYH5stZ17Dn/veTtFZX9vGcUoli0Wnem5tGO9ShaIyCLmpJ3OfvGcFQ/uW0ONMRxSZIjsyQPAIrWOoxakxrky0AVV4EGalGaeqe9HiaHFkUxavAnKN3pIgqpzpwFlFSiqq4oK2HVelKx2K5luwZv6f7U5mW/ONPNrbr/27/4i7ursy997jc39+fNL6a/+r//X7t65zc+9cwv32z3f/r+3cX0sZe/Jr9r3/3299quT6cn+9oJM5ia7neX+0dXVc6vbq5Onnv2T/+Xf7dZ3rv/pF2hNdUGgaFArZvrMaOTnega4jcO77hlTzDFI3hGZjwO9oHr4YT7iNTcPIZOcFQJEgoyES2x22OAjWe0xDAqNDYE5cbLlL6vE6NSOFtYNA09Rig8VN6gYnFMFO6H3aoP+EyizSHaMkkADVZFxVVvBRVaDKostBrdEygiKrIAJhbh0jBBlpAT5VqwNiyBU+ikXKGr2RltZ7xtPHVVRU7sql5MbgW6qytb6v50urXf/qZty6axi9XOJRZbNcjqMy8v7q36pnvXKxnMFrRJQNoEziaLTCxT792lxNQAA118z7d8b0EucTpJrJpPslONDkMDyRyeVkpvzR8aGZJgR73Cbp2O8OAAD9zLoGqNGK13i8SS0l1Lzhu5FNaNRAKFRkpxXcbweBilKSoDZsdRjJ3WhiOXPwziCP/kpdZAJCXy8sEMEkitrmylgcVRgicM723D4AI6IyjAs0iL45O16DBPXq4J4dxMZUCoDvY+hzF17r346FwROCsDSroqcXjl5uUT1eQ9xUnLKUp+p8mPYtpj3xzB/x/HLl6jGur54GC/qIijLMxrN6OqC24cP4SDbx9jHD2n9M+1/LQAGL3aZ5YdAeGmRePMt96AoqVk/IU8exIrfNRomDo/MASLCDgugRzCDmR/znFEoQnyPRXsIa2I777hDkaKb9CsgNAGT8G1L8OjxcoFc9xEtJTIFIMaGh2OVNE2+jVzGSRDnzSBR1vl4GZ9XQmiO6XlOHbL5najmbEUL7ZEWCPC7qMeiAYCVAeogkkjpRZkXy9oMXpINIWm4VuliDCURY5ekutp42FzTOYhfTz4AV/Jo+qA2eHngVUAtJ6wP0RQioJqRgTzU8VlGVAgFShiJYIvAYVDn8OUUmFquqpN22nhCvPCbhbsNm9vneG8tu/91X967s7tr7/6m+//cv/v//f/4+Gbb+tGcaXXp/jtz3/iO9/6xYNfvlPsXtGz5TTVRW2Yu+2tGdoMai0LXZTN7v4nf+urv/PH/6aX6clmZ+UOuTAWg1o3hTqtCBKpkITYIkTgA6HM9iLR89N7h5pqtFr4GUsasdOZ7XC0kQ0Co3Z+CLrVl9S5wP7gLJvJBycmN3DuIROK9GTVFpRQ/Us0kRgd5cNQwGuNni51Mbd7tVYLNRam91VRiIkLVpKa/crhfFQUUo1FWMACFDMVKVJElq4iCazAFWWFslZbwVYmp5AT0ap6Jv26l9VupXPFXsgKq9CFNm/pqpiqTB2Lj2P7kXJ9vdJdFb1SzgYUTidnr37KTtWsKWJKMkxJ0ARWBRRUhkRSiUASGmJLaTCSveyUNgaCGD4tzlL8J7tGPMNUAYmqVSKOMu+B9OQiR94OA5EfJwAzIo8eaqjDY462ksxGweqVWl+THkNd6DNPvWLGnPMaAYLqoF1K7LWnsjSHaINSxYMDGlThpHUGedUzNe9RE0mlNZFo+wTh7ITMwvMWgQN/5GB6NIFfv/jAoUNyy4G2mhlcUvABFyfx8+Y3l91W3uWRlhlRz87JdkGf8XqbaQRFLqw/4qu8MuR3McqmeQ6e+venjlJupEAtAbA7oiAD4HY8n9ka6zSkYy/nRjbjXhUSWoCgcJgQPDLC8E7isLnB+ToOuA7X5zcog6iPJFv6ph6oDGMUk0N6MVxZDrGSb+e46APKnrHF4IIOCyWKqEHkgrsZSoT2EPkkZVdIa8fhSl6h19uCbTdEAzPfHX0DiXsHKD6OGsbHmTFM37ig/DdPRiXeSHpp0CGFMHdeL9fiEsI++EGFzUFLeIImCqEeQjRECHK0bXzvBhohGhod+Suhz+ocv7itcXQgoh52xuWLdzx5xylHbd1zPIqIQyYH/RknymopUsnKKXLf2IQqdYIpZFKrTVdc2G6BeYmbBXf3bhs/3P31f/6zL33mzm+++PL24fzad7/38FcPT3RdFFtrP/3RxbMn79856/OG791/Y7V8eX26tJ08fv/h8l6ty2pm1prqat/a7/7RP//mv/5X79zcebw30TVRRRdmaihKtWYqah3qT8D7N1vIj6iK6qK1PX00vQTd2czMOmFZS3MbVOD2bnQkJE/Hw9n/4tiYCppZF8ujGgTIoSYa7M/EIFzsIRrLnIsLQTZeRn50KC0KgFKFFoY0qaZmpO1bVnB859A/UbWK6BHh2t2Pd3/WbDArKguzShaqyoJlEqzABbgEV4YVsBI9VT1VrGErw4TpdP/Mxc0dPD4TqM0CqsgKVmGK3lHv2GKDetf61+ymWRNdL6bTzd1FuUEzLj7y0cUrz7Yr07UaTC3keKLSMi/UFPReeYsRZCP+dKn3iG4F41lAwGzuY/D+qYkikSbUmOgd3GTnr0qMuEmTnlFV2Ck7Eg6ItkCNeIikhYrtMfnUM+MaWJyid2NSd4zBuwGR4reAT2jKWmzEefAHggzWYehO6oldIVm+hrOpkr+QKGQkbRmYAL77kpMShsyLuCam5oK3EgmKm2wGOU1GvpL5+3gOgWz37ocn8Nn4M2LpJB9ZwNuReHHQzkzTe0M0WqZRigQgr671hqDrZv7kWyIB2eHTwjThAHmmH+NBJsFDB4mB12HQwnKOLtqjgGv8Jo/dQCRB8cvBh4vPoyDmG0axKlhnMMlw3tFgUX8WxIGT6fhtNnrJ8U1k22uSqIyiQd1M5zig6NgqYcxjLOfh54ghModc1NGlyFqQR8Xbn6M9KSo67rsyjmO6qNyx4KIuAgCgCwkJYaIaM3wjwo1pE3FJwRz1zNxrdxa1c8ld7fFvtp8xW4lyz0v8USO87q2ZipYioq23gurcuxIjpNQzKpUYli75RT0xxKOSxVFAdHC0HnFm2u60d+ZlCKRodtMzOfFEwJUFB+K9R9KqIUYYuBGzddHH40jNDGkyq0Blh2kpuhBOUics+3Yp24Xu7k3zT/7+H17/y3+/nn+F917+1i/+w5P7131TVotV2Znd6Eldm/TXvnv/q1/97NtvPbLNQkp59u7HLx49eOH8hftXv6i60OXCdra/mrttYW2edbftKNoa9mhWARSDsnsfkPbebaYTMgQSrFF0jdEICinGPUKST8kWjEoZsbefBhsW4wiG9LXg8dnOc2vViz5eai2K7CpTiwQg2OnOtnLDI+LAjADsJqo9lc4hg/eZGV+KBAKppyQqJbSj6TqbEWqo+IzLeBUJbqCKeFkBDE64ZBvSpCcqk2AlmIgVuaScCE7BtfUTcg0907KyhW3Ppel0/Wy5OkErsld0hS3MJvQi1iCzLc+afAV8TvZX0AnbSzNdYHN6stfl+Vc+J+eqDVQ6mVcITqDRpwFyowApJpzg3CQa6QOJA3VDGAdLk2zeuk9jty4EqijQO0SjjOKWLyzlQTQwj1wWfBIEkwMeHYkDh95CUYhIFHRDyAWpWwUoqhRnuKCoSEQNGKEZkAWGDLMjX0HWn+LNkVCEgc6oPCoNfpKjD0gydTiOIw7JcfXyLA/wa3LBbYAuh2BNRqoKpz4diwqlfQsvbpkokV4DDg8Q2YBflvyTzEB69ssm8hTr4tJd+UvqgvhBWwsk+egfRiacSe/glqfaYALU43ZjqcJJOOh0lO4dckS/2hCYOsjfxddJizf5gvmWSVXSmKA3ENfot4Z6j0Mwc2Hs2eLgH2UKsSN7c/zHEWCkKzhcDtMmgbBuhwp1/GLmlwxiV3qU8JhpB4hBE4FrFKWjCUxBoh5PiyxOxVt7HbaXyJJddHLfm+uq1ujVj2qQGzZxvmoE0+KN44N6HmfahtuOAxIXDRKlaoysABAAY9pmibErcLnHwC3N+826NQKHijsiMDlsTg0KgCBDn0Bv4tdcZStOR8y8FCJEbNw0++q7Uz9KozO9HhmzZ9LZ1gT1bMRjFAEK6e3FIqwi4tqUqEABFiqVWECqyJJcGhaysN1KdpPMZ8X+5i/++rVv/58vnX5wZ2Vv/vD79bqu7Zb0ZlvotmCnfW+ffPnlX7zx4HvffvMLX7336P7lOz/ffPkr33jy4SOd+OqrX169OPF8c6Vv32CeV9jsH2y3TReLPfus1juaGaurAUEg1q1IMTYXuYrSFgh0Q2OfS6hCOUnSWwUS1RugPVwex1ylZQSVB1vmdsh5kkmoNr8IdTqXx2TeuqBwUFM8nxkpVlxwb60WVdUWUW7Eq4N8FTY6hFOzByHz3d6sBMdFUr4pzbaISIV3//lXh+8vpLqqKE1NVKXqpKiQpWCCnAhOUNbgqciZ4FRsTZ5ZmeYz3KxwfW77qpf39KL2bcV+UgJWbL9AX6m1Ou33i09j/xtoH3Y917oCFmqXAKzz059YfeolzF1Pq1fFsQIJNLAJK9Qok7BNsE5RxSIZgTBrPicP4iNQO3Ao21rEw4iD6Q4hRqd4npb21ftXRZKLxzRUkrY3aFxhWL0EifTTRob2GYQCbyIdYLCIiFamoKMF40PzyMXxA9RLj4mXN3FxgQR1PYgOhC5lhAJ89M3o1a7opIOMYQQcySvytzMJih9HNpnFXt8fVFhgcZFThN05uIF42hEBUSLX85N0BGf6ycncnSyafbdMg6vO0+lhlFWdDQ5xRTRk0peKrCH1F/mhf1OMf4xQhgF+HDzVsM+HG5dMcIZCmHM7DKGoJR7apk8CA5cYLbHxJGKMuiEapsJQU4Iv5m+M8mRGUQejTTDkIoLhTM87Bxc/dKjiWWqG4/p0dhvkzoRoHbx/KlQYrhvpbul4Q6I2HElj/M8L2wMB944k76Kx1PpxOzmwKxFxBGrU3H2asirQnKPO3ntdLpsFoSafRWaz3eUUMOoXETYRyTrL2xGC6M3KUA7J2IJZvaD/Mc6iAGitoaPW2nvzDdl6L7Xowb7HEvkdwJtx8zoHMcd75ty+xE/SqHBEfd73D3UkI1hHUUkZFocU0Bqg1ZP13lRLqcuoJgJAhyxgHSDRRIs1g0GayMI1kgUQKejWbbbSvYW0rJbT66+/9a3v/vlnXz45aev5qq5ut1qmqZ/e3FyUUq0q1vvthXGx/IM/+eZ//H/+/GdvXbz6hTuvvfnLn//qx7dfuvfG2z/4whe/cO8zp9vV2y/cu4fztq/TR1/9w3mxejKXnS6aTru22PS61QqD7gUh4k+fIc9ONkoXRBFfUSo5sxui4A1gz+46JtH/hjTVHinKIQg97OFMEvK5+0FzRasB4nWH9ogQW+NhNkuCNJp6N5J6f+KTHywZ70+dI6QP9zqykVBn3lo6DQqc6u+0opAdFUQ27FFD8YS4SDFU6II2EVWWYtW8qK8LYAWsISvihDiDrnW92q94vbYnJ7w+Q1svru7qJdrVstgCTUHFvrJP4LatiujLnG+jLWVxg8XOtOl0jkWr9exTz8vdCZeNc4/4skEaZBKG5pWgUkwEC6Kb7eESBaoqC/dyManND4TGyDzLMPWAErgsRHefNA60C8V2ElKyu9XNJsyOFOmPn79TBUQE0OhJiCjJWx6HZQ4/XUWinykrwTZw4DSD7omC9iKqTHaL+DBzsSwk29gfaf9dNkad6G2wTmoO+Dg2ClG7ZQaK+fKcxnt30qLGIFhLXMGEAcz6SGkAPjLb4BGQ59MQgDrCwtCUKHGh1g2pFom8mFypeOWjgascZBQpFgoMamY+flE1Wl09wjCjwHxrQGTIPBwybRz91W1jBKkjjx8QpyML7pF9bHFE4ozEDBnVAJ73q3sxae54fHe4ohMpfv2IUAui0e/jOa+AOeXAs96nLvnIRSGshiR9VIK0Y9l6yPCZZC5JVGGfclopFQKXpsL4rNBaFU1rxyQMOZbh5C91aRMXm4p3OoDSR5AkSbKIL7KuWnpvXkXyL6paMip9KmhTLQhBGxgjTh2X+bT7Rci7c3TEYW9WtQJsPrYos2EmRqLeDWYmhqJi3dUYosE/DodGhBe3w6MhOYdBEaFg5n514EeOiIu3YxFgc1kN89z46U3JCBKCGIQAcMSMKi1SNZbw+FHPN9L9cd4UXdgPfd8EVRX7ecbCHl3fXF3dx9Xm06++0h/+/MMnp7VgWevetr23siywvTWd6iTbqyft8mJ359WvfvEH//Cf7Gf1a7//uw8fXHzuy5/+1fbNN995482rhpNZb08vff7Fb/53/7LdfvnhvJqx2tndua/netplvbXab0y2gi3VFLOhi3QRI1rAigJzuoA/CLJ3M5Xme8isOTw1YJzYURYia8N2iWRuGwIB45ESiNF0wSoVTZzYigdMGdSMlAPD5rgugkZf22GvqceVhDN2GMBHKRFaZe3AFzCJsn48PJuCmPOFJTjrhqJQsyJStCyApWHBmvMzlpQTsRWwgpwAZ8AZeI6V3pzZk1t6dUuuF/3JHduW8vh5fUz5cKUQ21b2hfSFWLXWuZjL6mfAFfkpw5nUra4nrZeiujpZPr+6qZcXp2tBh1atxXpjA2dKE0wCAw3WujSFqGiBBYc87ksK0JkHBuMIOAIMJmVagcGOPGx+j9aVSljKARkYrKMiwmBlH638weda2Gkf3kKSLN4KEalp6NJV693jUw/qY5e4azFD5KzOBVIVaZ1ClhJAhxR1Vp95A2r4JIpEN4KjcOxGWvV2qKhKp3V00zyIYa7WLQJx5UWBKMHiVw0rWpyhrZkcW29UlaKdXUTEXJQIyPmvkHSH3ocLRHyII/Re/PyZanF5GM3pC75mbkWdAG3mFRpvMgi1iAQmvYWyeD24d2MQVb2izZEuS+RDGJglYhRuqHB4gObeHaVokX1vkrlltETFrwepw4ds09N5MlqwM+ZaCJzaDcIrkqVo5wgUSArp41lghJPyCNeqo1JG9iaAqMAU1ouWiN8DN4iMByJBcYI4e5Cejcal0VEUTTwHYEDzLp3IBM0PSbcEAshGUTMrtXj0oUFYdkZVR/Qcm29FO0zdGFTkAINMoLU6SyLCpcgBKPQFi8QfART7PYYPH5FUhKcZsdEr5yI9xdxNAJpKSa/EEpPS6R3nggjMHAjUyQVInXzpgg8YDeUJGsUFl8hDAQxG57AqOIIJhqehNzg6iODmQ6uaBaXUeX2DhOGREhG96jT0fcNiUagC0jpLdMgq1GCEiRXrrM1JR0DXqpMK5+1OV/z2d777XHn8J19/ZWOPLu4///DR2x9ZnzR9Mp2cYf3ArlRWZrPhRCfUF9cf3V9tX/vZ9+89e/7q17/+w+/+HRYP777y4g/femO6p/V8urTrLaxO+PhvfX66+/yjDbayusFqW+rGlq2c7fpqZwu7MZ2BWbE1aYoG7WqB4YDoIKvAHA0GJFQ11FyYXLyNxLL5iwA6qT7AKsrCASTlO5xd50llCGT4Xg8dlMFGAXp3zYdISKKMCBEtBMxa4oVu3EMo3O1IJk2OJntZGjFMXhx0dEarEMY483oUbLkwlmR5T1WLd6AJK3ulCFDrUrgyrgrX1BOxdcMZ9FbhGXELa92e4uo2rm7bh7fwZMUnz8iT6+v7d+b7K7kpfb+QPQC7MW1FTLjarfSJLeSy65vrW3d6W+8268l0v9u9+OIz5/aoXUppFydnbW4TpnqrtL1hD5iiUxpkAgnsISy9R6ENBqAnqTWZk847kiyuBIYWmJlv+iRbQKA4CGMguEvdeTJI/ZNBrdSs2iPKP8ho/VgnCALasPvNxBOKqlIwQrSE1bwIqUU5QjJXs8tCUURUGqyO+G0LYQrVotkHF5oYjOtRD9bo8251VB+HCQMgx1MjvHKVqIByiNk6P983nUKSOuQ1wKNckonnHL0iKxIZTRfhUM0OBna81V85U4oHa+uhE3q4HnWv6QRd8658N9QOaEgm9MIInuBn56iSPV6ee5Xi9tFab6paS03+HrIBN2yxBwGWE/7Cx6d05RGsTfG5PhYEgZKrGeg1YQZ1bggN0dMZ4yQtM0tnaR2Su6iUhG9z4xAj2QQxbtMdRxbIfWsgi5pM5nGW2eEijkfCb3Ioe/sKDGEZcz6uaoaAHsBJtPwRRinSelfrpZTohM6ML7I3CEj1+T+03lCKqupgTh02hIcqGhk7vdbkVRtJcp9PLx7DoOIIhvwLjVVL1CCCzDXYOgIRhHJk9JO4cTUYsl7p7OloIckLPOoAhOhwv8kfG0fN3GJ7q1603pPRSzggNQ3mvwKdKBEnifn3lCpGqJLO/meDVHW+KCphsFJVrTXpysYC3FzPf/2fv333s9P6pdOPvfTSy3c+8qt3fnXz4NfvX1yhymYP2U1a2+nqmX3bEay23j7aXLXt1778tZ//6B/PzhcPfv3B2enm5PnV2x+8sV082uzmtqirlbSzcvcj9/6bf/svX/rcJ967mXb1fIezHc52OGn1dMtV0yXmIo2cITNsBuamLaFy84fvSEEzdKALjsfLWJ53D7mTuOG8GQsqF6Oaf4iRzKl/BAiTOLJa9QCV5JoDqNUdeVgJLyiVop0uJBjuOyMoBMMEGRR6tQhWtQAo3ozjam80cwzbFzacj59GiCpcU98Jz8FlFEUVcdnnQlQUckGrkBWwEqxpK8MKulacyTTtbsnVGS5v4/KcF7d5dSqX9/Tqx7/43nT59qqeYE/uyb2ha597aQVL49K4kFuTSr98tOgPdLHty6ub7adeunV3tZ13F5VtL/bk1pnpzL3qmaIBnWIKo7UuRWRBm1XUu4GtQwJlHnM6FQh5JRf/PzoO+RABZLN1lJFlpJKHRWLvzWJwgRRRY6h2FMns4GAuBiwVPtMAEfNpeVWj0lMl4axx+sNYVBdZMEdWk60zeMxuYwQ6ZtNh6AGkaLMfc4qnRxaKvh7gOUhoBrJLdCYF0piAD48u/9Bt5BiK98fFF3slEC4x74ckwFX/KDmAOV69ZTbxSeogRJ8lJFmmo6brZjYGQ/gDR60l4lC/XwmzSNBlwkJNItT/4yEAQRNiGrqnDmBmT46eoGTBL42jgSiBS6UAFizJwkVrpLb5uR7lOOR47OAd2T7IsI3VE6GgDANsAWB48t7ZYfThEPFIclO6CXeNgUhmBUxdyUg0jSE3M0oxiIT5sIHi8syLagyeV8aGKhKUKI5SthdcEj6JZxFtHCMYNZeojPZ5C5kbRDyp6vs4HlnWYsDuNisBGn8kHP8/nqhm8zGOwrXw+wdSQ6TF4t11yuDbw9NaOfC3It71QxJuNpaNvvlzDoLBZPxKXMshiD1kyZknj02WtxOoDqMlxs8S44GC3goBMLvEKVH0yR3QLSYNOyvOIM2sEE3rhOyUJ6lQaybFfuerX/zR/X94gvnFM7zxwTvl8VsXv/wRtg+er9PtZ/hbr+gHb87X71wupMqimW7KGgssvv/m9z/1yisn6zt//eDPbPdEl3U13b730ZOP/9b5L97bbnn7+c9+5Bv/8qur588eXuus61nXmzbtMN3IYivTFotZl9phe2Ojj8/hHmjkntKA5gOhLTo2YsEJmoj/fEQ2lAxkecyXCHsXIczADZ1qEZLBET8am1uNgBclbdHhXc4uISAMHaODpk0ySXzdxX1pTnAS0mDWg+Tlq5x5VFhUJVCyjlOgoClQoQALBIT39kjy7DJXXsAqZCFcmFVggq6rnIFr4cpOsFlhs5arU16e6dWtfrHGk1tys3n3vl4LSc4mnbajNkgT21MrdQ0sOlZm13tUrs+5k/deevmLv/eVzz2eL86lVMqVVptOd6w2U3a0SooVLVoVk8IU1XQPoEA6TV0MVWSmw25pqN1zjaEm3j+XRiOaUaJeYM4NGmoqKQrtb4I51N3QvbkfkCy/HmQhhpGUDLRzkzxVs6+9dc/coz7s9QIPxJIRHaYgDOGhMuE2JgyDf2dIfGPA4h7H+cmNgrQx1Z4PNhemLoNPOG8tvy+3miErHWaeFzpBKCZ9ucXyOnlUZ/0+wgzkBWczWFrJg1uPtnQRiAY9NdNThq4QRM0aydaaQ3G+dOEjvdZbIgLo1gVSVMo4LfALkrSQsfLZP5QlOO/3L+q5URZtCBHrHU6Yghu+wfxy9ZLIdyUuKeYf/VdfljMBDy54vJx83wdlFx6BhdYFxAdj5m15speJNwDP8AGvBribTTlot1xBDBzeIjNF+CIOAlfgsl5aJ0cJ3T9GQohLguMj0o867dxjZEu2NOsiQljvMdYitSWQzsMPktMoYOxm3cx1wVIPLSssg842thLHnXtwx2xK0rStfvjMIzOFHQfG3kUYZWR37wGVH+3cqPv7mXA77c9y1IYdPvbH6gSUCELSZUQgmHYh7byKaAYiekAa4NFqdlKZUClOvYjn6hqHKWRLEqpmFFqzHaAo6+ItWta5rJOVBnm4OrHt1fL1139aL//xxZXdPblb5epywyb2pT84mx/M//h3j+YPT5dndcZ2v93T+Mv33lqvz5bPFI15tHjz7Qu7ffLiq+tfvX/zmS98Znl3/cF2Mdfzja2ftNVczzd2eoPTGy53WNgNsAP2kL1gDxjEtM2GjtpS+QICSIsSDLMjzh8nkQLCw6RE6zjoDIujczbQHn/y47znY823WkJPiLeox6dhuAmAZj116pk+PsgAROxwYOQvokprHcaOFoIEaa6j6h9IJX0ervPORBFd21YZ5PUKrUDpHVJES8UkrJSlyIliTaxMz8TWxAlP8eSWXN22i9t6ecsuzuXimXJ1u2zmXz968OOHq+sVNuhbk1m0C/cmTW3utoSeKJbKarLUusZJWdjyyWefufnI4hFps+mknE0u2kKmZ2/WalvKCTCj7zoqdVmtQRdqs8IqYEQTrQJTQWdzxEFgIvGvgw8aO5xeYhoPPI1heN/wjQNqhEIjcHXoGhbsEzdp/jm++scf58GShDkf5ysIAQzNXonhOFCRUOyDDssoGk0TmeYEk0/CRrPUVNhh05L1s5BljkKpqqI6MZcR+nnWTxdOY3x12N+hsSkanR7BJztI+D31EvGAjpmyZyyfHnoIB4zDEhYv9PkiTnW4KcOAAI0zAsquaGQBX7JjyqeXw0E+I4TdXOA5cuWgJsUW8McZd2KQLCMz08gDbTqK4hj8qlF9LNGbNKTxogXfwdB8d+SaPJiUrAhEzUnFOwtD6Juh4ulvMK9ehKmIUDoQC/OLpXOSgAwqnTOm6TQj0404KnfnYSWish60lEwmBvUvNpl3WXs7rCNs6UNytRhtY2N9BxQTgEwGf3BBjFx6h5MjEvbvjH0riCqNguaohtBL//mMQ18oqWHOyDv+/vGhSF9dfDApCPh4xHTYMqImEqkuiKwrHMKWML2h4flUmJUhVX63l48jsCYEAycN/50Li9jYphAf0eghQXyvCCgF8Gb3rlItSLZQiLETXVCdN1C1QtGaVRSzPklR6tvvvH66Ku8/vvjua391+7npzp2X7t379brp9YMnCvzN39389ES/+ZVbf/Sn9Tt/dnH59lTPVnPfVcXcNpeXF3ICnSYoVnXSXfvJz96/vz374//p37zw2Rffb6udrj/s68u+vNLTa1vf4OSGy1nXN7bEzmQrMgv3RANmSBc1dTaUzYQ1oKtayegRgQ0Y2dwfu7KtdwQ4EhX7JPG23FdpfCOJHfG3H/7snwuVnTzuCJUDl5lI5iC7UaUXCyb92Fd56CLMHvlxRk4kLeCsYUATMtTDnGlmRF09l3YJDkFxEhNNSBUWFkoRTsQCMklZiazVVsQa07Kd8maNq1u4OsfVHbm6zQ9v8UqvWntvXlzW64c2tYVuDHtTU9samskOXJlOpqu6nwQrkQ0FZbpz+vjvf/T4Y39753O/d2OFsDPDTlcddbN+RjZSTsRm6dve92bSXeNlxKnqPPYIghTZjpdha1IQ/Uw9fXJEosYo2Vp7/IpjZlCJSAeSIhnZVOYrEE/56cV1+3OItfJMV1VX7U9LgAMNdbT8+rVHLJ/azhIeP7yGG1rHp13fJyB3w4gvUqhAipZu2c/m290vwdSUisBM2Ifvh6syuZ3j6HIbsb/foCU/YfQ5hfBAvOGgWo/BEwJCfc39aByeDAGQUkUjHioi6umCkT7jc+SvAzlXEdSgUTSDZMFmYKojDBur71tjJB++2gH3ZczgRF+EGfU1os+zM+Z834iqh9/J24xDOkJxupIU0kPTvNHY6bAjPIzqV9LmgEBL1JHfohJpuSqC0eC2wxyDsRRP1eCqBxAyzszAWJEzjDl0NG3kfInxDkPmNxNYfFg8HU9KomszoAvkZR/L1Xg+kayKdK4QwSCmAeg0aSiuIK+x67wLjkEaAwMgD11BxzkSIMjFxIguvOcNHg0E00vE2xtwCP6O3HY2JyDDCP+zJu2bKUkztpBHVPCt2Z2+LhmQIvt6CUR5goYcZxnxtfjoL1GDjXpoEFsUMB0gqjsqoGSY1FUnWtNe60Jt32pVNV4+enj94YOfvPv4qiymZ06/8rtff/yzN3d49KWv3Pv+n783zxNWtx9trv7jt65//xt3vvEnJ9/9y3ff/em8uDXtt5u+qVhT68JqAxet7jc2v/DxF//1//o/3vv0c7/eL7Z19cTOr3C2recbO7my5TVObvT0ylZyo9yQW2IL2YFbYIY0ERM2WqN0knuROeT7Y2igl2Asl9FXpNNc887XxY1g1hQPHcVOBvUdlopiQgSDksI0KA5rID6MNNAyDkSI+Fs3sSQPjQNr3jIagdQQqSHggbADZi7JISJAa01VcopimiFIFAgBwNS/21OtjKARVWFncaHbXj2yLlInXchc0ZeYF7qvmCfZn8q+tr1s8OzJM5996VN//dr9aoqtqanNhpnckzvAOBOqrara3PbKq4urCWq3yolyWdppnzc4mVSKirQ9YVK9iYtlKrag3XQf1guJNsSMjTzujPgXIIccHI597n8tbgVGBOVZos+UH9UdpFsVjLyNAUErxtpFJuL22I9y/K7FUaWQrH6Boy2NDi+6KwPFh44BnnfK0RVIADKZFaukroyrqxBwJkhPNg3Fi0U0VY3TPjyihDCbf4iEUQ3WT5Hilx9yYAAGCEi30TJ2H2MJ4j4zZ2A3EKaoTz3x3LgMUPawBIdndshekOkTaOpTeKB+ah2DRvF2vbjcSAoRXXchVRBXdZTMjEsKIAlA1pyiYhn0gRESebkAUTcKz+ZM3sNWkRQTHHGzRGwR2S0FTG0J/16nDmVd08PsoBfBS7lFksrgiKqm7I4g4EpBzI0wl7BQjKF9ApFD0SBYKVFxiUtWSspLAFkhjnw4ygSq0vdNqc5TTUMWlf0QQY6C3vAcHtiy905aKcU1v4zDcPlj8L61Qy7ugaHZCOqYTcGe94pi1PPAaD7JDs5M3T2XPdhohXWzbrX4UJKwozCExLJ7wlFoybyJucsRmzy2ygDzR0jKEUO4c2f2JOCp1lEvkZkR6LWmUG1EgRJaDxnOkIiBkGH4RVQNXjb06zLAVCb4AOxutkddFtj+9ddee+OXP344f8A7i9/65udf/I2XHj26fP2X7yy3eOUT+OI3/9kb//n+g4vv3alLtVt/8Z3tN/jC537/9mX7/vxrfeal524+uKTKft/KtFLienf121//+j//4z/grfKrm8VWn9lhfYXzC55e28lGzy9560pu3cgZ9yvZmtwINsQW2JFbyA7YAlvjDMyANFXHZWPQbOj4y7E9dRkpNT00+iNiHY3dyYzaQ8gi1pG5VuGbc3mHbQlUx8xHSoeuOiGiCvH5zRFQe785SFrvcSKCWnvY6EF6sN4OXxGo4fAyHqz6+EgJ0Q4kCpgiQyIqZQKqKVBNqwY+rTQ0T0UmbleYl9oW3K24XWFbbYMt7MZ0V7GDboFm2EKh2BJNZC99Z9JNDdjTmmHChEkWVetqd6fy/KRgy1ZOp9tbcmX7jZzUYr2qFUJRVLUIJmjL+cl+ktWrh3l3UjXs5li0zEAOL3fXMt6ELCL4iVYpQeVJLk2ApsGfikDZj6rTbx1tjnwBwzoJYWLeBxRCpxVpxzVOPFzKI+BLWu+NpGi4wGB1Bt0pj3+ebbjwk+hAOj2dVdVSDhRZH58p1PEwgjyqEJQ0+lK0dPRQpmZ3BNkfSgkg3QIZdNhcM/F2n+q05dH96VK6rUez9LAtntkZkXzRTBR5uLfEMDN1MOIwwDWNkYDovWv4YK8/iD/hsc4eEYUrdmYQc9kjMhURsZhTMaIcd8weSgclGZmY+35y0W5VV4oYfPboJYmWhoNDEvHJPyWgLM2xjEDqqtK0lGHvLaMzZF1/JND+i3QgRNGNC9WG8b3u6Q6ch4hoXL/SWbiH1N9zNgqyE0wkFbnFH751xqpELWNAvuIn0buvEO4H7jQ9gkyZVpBorWstlk17jiWrim8ZZlDSu2vaDLmYQCk1p4plbEsHpfPshicWSBLyHGEJ1p51a2QpxSQo3NFChiztOoetE5Lc5rDcwwoI7ShspA26uN9yXJyn19FTEW8Fotco4vkjy6xHJA+LKGbcmIsGJzSetizQo1hA76UysE61XF1e/OjN7/3ozb9HnT7E5l/84R/ce/FTH3zw8Nvf/xZu76mr7/zD/N/+ET76+Xvv/eNnri4eoMhKNv/fd/d/+LvPfOJrn/z+X34wd1ndgVGrTsvVycXlB//9v/23v/07X/jgQ7vYnG7qrQ9tfWPrK6635fZlnzY83dbzS7vVt4rrLk8E15CdyCyYiUbbkVvTWaWh9wbutDQVE3SIGRvMRDsiGYi79EVMMECybOzPDIR0RrDos8NCkiAj6gAL04akfckH7/lm0lDiQSa5JJfbd68JQtzliHMrIwtwhypAqTVPKKFeufS0TTL59TNSVCWdrrqVZhgvVdTuyKBAq1g1FP8Du7YCFLYJ86L7bKvdCXaTdduoXVvbdp21X21hyq3tt01ZuYc2wRZtKewARH249mYvFZybXda60WXbTXW961uVzYT1VNcntr+uK53ECvdo6oqSSq2AaouuO4m6AozoquYq1+F+xZ9ECTwvirWOVBiR5cDjV2oLui0Zi0VB7zmunK4xEFbFCU3I2jAiJnYdPoWySNJggApkdj02h8BCEA+gF7koaZWRKb4ZS3U6SWwWGwY0RyiGxxUBYI7GOIk3sytEZA4GTweRKPh0aHYp6mm0GcPcRXrrfk4bfFCgN75R1e2g+gmxcKuRB0KQCXCYe58GikyCiqgDDjxiGSXAdPibu02BHDqFo5199Muj1oqsZxeVnsGVKywa4GC+oKoMsDgaykQ0GIsIFjUAV9DoAufLi1tYg3VTLwTH7D4lzK1oNxTQf1O0RG+MdwS6L9EcWyNjGQEpodwQvgskxBUklK01kkYrRReL2o54T/QmaIMADUyAMh+SDqNhAFSrDr+iqkFPz3DU23gj8RzPIFyZ77FAZ0VIhu2DoZZBKhYBxbFR8Zotkk/nktRlUVzy22hu9Zx/5U4pnPGhOuCl8GgzipgOUWg1JeTgDslgVGgIwImYQw7eh82qaip+4gpozSxmMGAAmh7mivdeewwSPFsg0A16YOG7pZSqkn1FmuEWoT7Dg/7Usk7p9L/ObIx2ddBI3uD63hmkxuaM2AvASBccnvGKcYEYaLSmolSh9ffvv/XDn/ztm+8+6DJZbb/1O1945vbLNtu7b769v/hwNU22Or//6Pr7r+nXvrBeXS7f/2CFWvdcsV3++d8+/uY3X3j+i+X171yvFs+K1lLKzeX113/vf3jhC3/w88fzLMsrPbls62s53/LkiZxt7NYVplnPt3rHNuQG2FA2ghvwxngluhXZieyIPawZ9qaczfZmW60doIfxZl3QgQ50iietSZ8dfhHC0Wmm4n0poyqLdHK0sO6ajU3erh2Iguc9Kori/vNQ5XUjlfp/Dn/BvLGeouqxfkInhwghtl0KBSVXZyxjEIGS4ehBQAHUJHllRghLkCAMRaX4PCszhS7UKmShOmnFbsI8ybzEPNluIbuVXOuWumNvWnbl4p2HvNgCk87GLcCGnc1t6tUWG+k7TqaYZ9sXM9Za8P7jZ+9+9N50atidSNtiXtm+apv6rqjJQqRAK1hcMkRMPDOjQBZ14TIHYU4OztEjfCO7d97gcIo8lPZHkRVPpiUWkCi9Myibia4BRdRFIWjmAKPnyNG4SCdUax6SwBIBIVxtK4rwNY6Q/1PaODmkpglvAtHXmb2tYW0BuJ6AdxmNCDu61US0kGEiRCOjHEp4kEE6ZW658fLIwQJSyZ9JKNkaSS1aSvVRi9FtBxUJhx19nMlBdfGiYHYdJcr/5CU4LswkpHgEGY23ZX5xlAsE4DCkPceTZ1waMr3RNKEw+nA3YFh3b5IdGU24GkPvFs8L3n8W9Xoji/Ogw1LS0CGSSgLMqz2kUEHBsH8i3kQ39xhFfXierWBoQjhV6qieKuOOSoZyg0o/IGAyQQaJcn5qwrvmgEXJISMhOEHzaADDeDQAvAN2XLevJjzEzfM11mQsnYe93k2ZOWmsY4x0tVRjN2aDAcx6kMBR/Nbs8L3/ZAvEfwYpj0GZjdOLsNtSRAmrtSCFQVTTcqpmtwDILDcmp10yVPZ7FQ+ULKm6KXnkd9d79u5lYSK0kkTooLJnZtEnB9Ccb5YCr44u05oPSctCQ37FAbRgh5R4FgKVSqOiP/7w7df+8VtvvvlEVqeoNx956aXnnnnxw4eX77/93k9/8ZPV7RPbNKk611s/+Wm7d+f0ld84vX/fHj2c1tM8ye35evv3r/GrX7uzfkkv7ms9sXmWr3/jjz7/jd/99c4a1ibToz5tZX3F9QZnG66vuL7mauYZroTXlJ3IjdiN4QbYQmfYhtzS9pQmOisxEw1ogJntVTtJwHx4O2lkDxYGvA4TuQQS7TfrTusjnHLCYRtdikccT3E3bA7qmxx08d1B0wEqI73zc2wkkRh3KD78kUPTDIdM+tCXPDAkEWEnlVkFQxYRjhQYPTviKGYNYEeULKqL3s3MdOF5FkWlVO3aIdAiWnVSTugLtCXmSXdLzAvb2XXFNWSL3QU2j27unK/axbx5YrqXSdFRt9g3E6KvZthVlzNoVTYr2zbt+uc+/tk6nWw2N2WxqzKr9gkN1oh9F4vpxoXuFCFUF810zJwgCsO0BDCWFe50OnC2kB/qA3yfxUUAQWFJ3ygjKAn1T/MS3iFUSnUtc5pWSjnBCwThfI76h3xorQkrMghLGT46mjo6g49NjEBIF+45cIlTItFDCybY7Uud9v3IEIdNKJ4ijx8eRdphNEKCIdnBHCGjuRSneq9UUxGzPtJuf5SGzrHJdHSDeGZ3AE4Nh5IbwkAy4cu0/ZKrSeCIwTuKcFEzIRxSdv0uKMzMNdBrrQoTxBQUEcCio8ZcONF/GJGBROVRpBSJ3eBWNas9MQeX5p1XIhnZWDSn+HkaVSjk6eJTD/kAqPj9uGV1CRF/9BKTSZNQAtRaI/Ukes8ZjsARlSMstEdEDNVTcXBGcDA9mcV62vBUlE6DiIeRgyTlhFsdZU45gC4ROMhxz1lCsUxtCpe9iiMVnCmGm4Mi/VYoyB5efmAl+4VdsEAiQEx3fuz0DThSPYvlyj4XBX3uO53NxNS2IIQ0RbIXJcpSUUpAVJoZY62yBhkxn47D6FcQpB2axyRZlUBWRfwJRXTmF986CyE4lkOnmZVExWgmqKIGuD6Dx8ea/sntlJlsaj3fXr3/4N0fvv2LK9hJ4Xyzubl7fu+Dhxe36/nPf/7GcqkTFpdXc1koT/Wx2bf+7vEfrV745NfsB3/29m773Hp6slgu33gwP/v++vanz9+8/7jOp//uf/7Tl1/5+IPLWUtB0ZmLzeLkw7660bOrfnKj6yuc7WzS6yo3xmtoEz6hbIQbYgvbkBvyhrIV7mE2A021u8qMBBDVzBrQxIF0d5FO9oi2tugZgYNxGoziDLrgVl0JhVIikfJNp3KYCZ07xUGo2PTedesMCK+wSCQ/EhXHqIiBoM8SEDIcMzj4EqLp9iEaea0P+HKRCWY/gqX5h7DEpoBCq6H2bpQqUKi5wowqTA0LQSWVLFaxr2hL7JdoK2xXusX1pPvW551dlauHP/vCb3b76EsX714+eHP/9ju7yyfWVGa0Bm1GUz3Xs/3Vdip1O2+/9KUvf+1ffZHPnswbq6u6qm253wJbse2qrifspQqqsQp90mUVrYXFySVFpDvVkgZFNcySQav7TfyTg8p4Wv4mV/sOK3RoQvLGX3ipaxQeBejdCjCKtgBCX9HTxCjKdJ9kmL+ldvCsUEgCshyHOXNXIOu4npAneGUejBc3rKIK60YnZ0s7Jid7inWYQ4BhNgD0Zlmj8icUOqhHZcVMgPxy3C2Gjry41pao9IwIHdY266qlmdVaI7YEYVGT9bBlJH1hrEj6sIGBOTPzYFfwxjFNChJofoAGHFXNTAXjpEAGx8zv2glowPgi3/cZmRAwijpYHsTs/MZY4BRZIkQOup4wxBy9+DYHcGNR8/89LZN0wpJ5/bge916JuwIExdm84s7C80BVjW5WpNsetN0MpDy6Gus43L6K9N69Li0yEBpfl3Qtge+RTM5oakRmt3vAdk+fpEMY6J7NP0pTFSEBCUiEkcdLCXcwGTcMnMnVrNTJqHFAJJ3XIaIUZC9APJAM0TJXBWIqsMQFmvfKUQCXeReoglHASgIlTUY7qko2STizi2ounCciQx6PQabzTZHGJ1jKliPzjk1Q3Geiy6SJBdcxtkCSBgfPE4Sx5RYqErIqnZ5SU7TW7fb6/Uc/vtk8vHwk09o2T24+8rGPnd96Fqfy5k9f2z65Onlm2l1sF2fVbqDSdL3+YHf9ne/v/+D3X7zzmZu3vn9tevdsNfXt/Prbuy9++W470y989V+cvfKV+5veq8d7tetiw+VVWW3kfFtWV1j3G9Ub6BbYgBtgD26oN8prckdswC2xFzRhb0UbbTZr5E5kr9occ4Y2wCShvqiYZNirQmeuRTp5aKwedQMas51y2HxJkRTHCxnyNSo8KBqUMrQLvQM8Em7zSZodQy8lNzFT+U5VJcSenEjomzto7SIU0zTFGvl2RL7dk6HQH6JFM6s7YlRQuxmsF6m+U0XBShTX/NhDu4op+lSKdhOx3naVJx88/tlbP/ubi3/cLjb1ufM7n/pNe+Zjm3fe3b71q/nyCZqpCppidX6mG9h+X1f1xZdemm7dvuZcdg0mxv2iYjJvLg9Q6rB7fUFMo+DrDZG0YV5VK2C0xmiAhNs+d2UFUSQlvbwjyTgBgugf3aQHAJbZCZLsVxsfHNZU0rhIvoEiNNMIVeHCBRFUAUx+3UGt3b8eGsPS3I1HVqAqbjgcMOH4UsZRd0OgKjCadZAael1xzA8NiKNWcsiK/8tXsl4QA5U4XOPY0ICKtt5FrJRqZq03CNq++T8Vp9EnrAowbWgiu9FEcaT+EYhcpAr/RdjEtLKZ4hxSCkT9NYr6Umv1WCnJ24jGNSTCrM6eYuR2I471VNuO2e+IQN2TqShAR/Oq+SinsPCRHIkFHSCj+4Rw5UDRQ7jnIGUcyHCOUzH7QQOBZJ5bwtdbk+yWeRpyrx1WMZPwp0Ch4IMzKgzpOUbxwkOiZj0iU3j1IfJ+5Nvig/NesjUGBxZWBkABYoPqVYzsrk4I4ZDz+eqIA6+IG/UWkdgNI0SJZc+XHm41ww/RDOBIiDOLhUIljd1Ca1MJPH0O+PRfAsI4yAUIBg9oFEzgPsOfDrshEur8RUdNaSJDgS/gKjNTqJkZmrK4hMxBt8a9gVA0mgU8KIvACM2LoYBBWKTsthfz/t2i+2mi9f7c3Y/+s6//4Xay//Bn/5dN29XdhV2bCG3TYaU1VJuxWL53hZ+8defVz37z7fs/+PWF9bXWaXF/e3Fvu/i9P/3fXv7oq7/aoIfqaDEpe8oNzjZlfWUrs6lf0WctcAPZiW5gM7EVuzbeEFvILNJEdsJOyAzsRWaykY3cq7oER8819CJqHxgHIYDD+lnF8nMe210lHybSRjq56djAefEdaWyZfyadP+DDbQBwcFs1SloUwHIxJfs6A9sJZ+/xZsktP/YpRMB+0I/xDe/G6CnmWBD+zOEi33QKIcOmiXeVm5Cgqkkx0QaZiXnfrS6kzmU6LSd2/71f/+AfPnzy1iyXuFXr3Vvl/LmTVz525+zu5vXXP3zwcLuq60Xbf/jwIc+0TJBVPbt3SvjkSlgFdLXrtaHuUBtk7mpmMKipD/ZBA5r5RDYVkn3w/d3iHQ298T4qI7t7j3QKhAOxQa9jmn13yALxKsOwbNQYtEOxkAqAow2eT2csFsFYLoEZYR0qwXWNlUPNqt1hi0i0GME11KIYdfTvAnXYxPE7jXQ7ew3TdgRDWNzFHNt6ePkWDpnnGAMnSfnUGrfNURnNjne3zk7bJNniUan5MB9vWBS4yw+PMzo1IkVzUJN5J3mFQdCKxlM3lCMHHU8xHclgoBydq9HvnNeZ4GA4pqhoMijRDvKT7v3T+Hp+Yk5Xjd6uEQSMRytHE04ki+Ijq47cTpW9WzcUCFIhS8ath1k4ypO9Wyhhl/FRh7wTkh3vWdUAmKcU414PFa1jJ5wHwYnN8UAZqaAeriz2UD5p/2rfTJo0YoteKSC9ZjwXv2956ge+7rl1wQOTEaCTVbK24PMOWFEQ2s45Y5PRBCnQSI6FdA24DJBEXOQ1WhDj1xLmO7LB3moXhT1v/GDI8B7ui5mTJt9P2ZNalfoN8bC8uf/Inh9WWiH/5GfDQhwd7OD0QKOd1yfgSjdU1VJLDd6kmAV6GaiAHxPAlDXW2k9X399s37348P3VdLpazk+u7JOvfJKml+8/XtU6UzCTitLqknW3a6UoN5BJt5w/+PD5Ldbnr7zz/uXmiRVaO7Fn9fzVs5e+/t62s1QDVEujmBnLdIPTy77EXm1juhVsITfCG2IGtsANsRO5AbawOTJgmSE6C2azPdBULVZZRUIN3kSMMLMGmGoBrIdGAiBCM2/tc9TKzMQFDxSOhBaIEVKjQePoOMk4QchmMC+JSWgfSZBWEniynF4U4b4dYmB4FwMTaREYKCFCn/YHQ4/IBd3dcLqF9MAa2YrudsmPgmt6G9gpVUthIUhrxN60KzphEGopxagdCilNptYu534z1ZOr6/nnbz3oLF/66pfbRXv0y4ePHm021/Xsaj57Vk6nUpup2CRSJrRqpdZdn3/8kx/c+cI/r0VxoqzN9rIvtRGoK9hkFBikA83QvOnHDW4wVCWG9kWFxgmtyIKRecFA4CfS8Uivannc7JIYHoIbTEyzXSHCXC99qgZObRjedwB+gtG65lRnJw6IaHH1ZcFT/a6oxsy5XZln9GRmcOUw3SBKw0uEkcaS3UIM4Vj6h5AMnlNZ5BiT9N0WriW1H5DBXG7XdEuMQaWZq4yIIeyHaQkFLt9tZlaqwoTslsllhKeHPNS/buDx4X0Z4iMMlDmB2vH9GFkVDtVgHJyyJ4mZb9FTTimlOE3RT+RAOFWld0vzGc4wGDseQZiZc7NE3RV6jJzdTcNR+fu9wJygKcqoN8TpTVXaofh06A0N/4XW+qHO6oQpj4z9wcS3alYoU7ZMDtZeHMtJHUWJTejWWhKphiTtytLBH4U0kpizj1TzYC52H5DSmCIIITNmiJD3k8HT8OYSEyHS0gAHzGOw0AXsESP6E/T7AdgdhyxBXnTqN48jQjk8xfje/EzfrG7vknFDSUV+ORo0mZ8QUM/hcQR0Rb/fCBv9rgbmnDshGXyQbLVP9k3sM/e1/tCgXiEW8b5ma6TDJpU0ovqAEYGL4QR2huAuMCbCjHGIZkC/2uxutr80a0A9O//Q7G6dVleXm3ff/tXVo6vVnYIZRmMBDGUFY0Hv7E2s/uK11xfL9b2Pns7rxZNy+tJLd1/97KfOP/qxD/piL3vrSwgI7RSUxR4aGpMb6Ky6hV0Te5GtYOesK+UNuUFIcNxQ9gbMsB2wVzVDh3ZBA42YE1L06q+XdcPmAjTrTmA6wHcGAI48ESlsklRTNwj+vKIZiEAiMYOsMCAuscRJ6L0s0U+aGIRE9Ba5RHJgj5rNou54oGn52ZGgvQxIPNDy0B1xNk4mKxJdZB6QEqTQ3a2qFJgaXI7OoMZOMRSiNJ2ManoyrS5rsx+89sMHDx/cefZ0unX2sx/+sM7QqbS9vfuz6/5W3+1xWkvFLMBee5kWps1k+8sHr/92++3bZ89dYVdr2duiscxSe8dMNV1oV+tmjWJBJhagw9gs9CGtQU1BM9OiZBE0EnAsUYuqdrSoMiLpKBDvJnCIAZH+ZCYreWg9Z2qRbWopjODXae5QLW56W2vmhNmiWYOHFscJs5YBAFLNuewBPaY5c2K9n97oqQzuMUJpwZeQZiyQbqyLGowWSTs5qIDuyEUHQMfRKWSpiZY4bqll6EuAB63jyEiQpRct4aHgkanPyERUP82B3mjGHYRkFXTSZ/alx8FA8wdVbdgyCRCeEa6GnMTB0A9amz9hcTLE+NSgKQsynshY1dnx4WpExAWgDF2d6xDfE0hV7w0ZjhUJfpwGBckDLIiqGYsqRDodAoBqzNcNye58kehmDl0Ed0rjhLtg7HBiA2Pp1quUeCKS3jD+6DfL4VyZwQfCu4T5d1h5ACqayk0BaTNcT1FSC6KVw0cvW0a4nhO7I0S0c3vUGLvOqQPI70SsVMz6C5q3BIjnS5fjN0QoVFHPUh2rMTPNgADMOkiayqPG0JH6cgQQyL4yr9IaaNYzU4pA/QAbhL9P+xnfkX/T7M4d25PZCj7WNVjrVCqJQQ2RZLaHRYkkPlsIaQy5eTWa2Sz+SFAcolIsBRYhdQTT6mFkSAgQSVkyALv5AtiI2qPH188/r5O+dH5r/d4H77/z87dWiyXmPasRwo2ArAprDUbO0I62sYtf3nz07gsvrtavfOYzr776+bquT2ZSYK0LpRTCCgzWOhp0FtmJbUz2gh1wbdJUmti1YQ/dq+1MtsSs2BMGXezJRnZBozRwZowabH79Ht47KVJVe+9OI1b1miIwSuSMPiOkURIIYa0heNFmouqdAwqqFtUU64Wzunw95KD0QiL24WHzSM6VVI01cBaV01m8ug86NBoxWoI/ZtnaGkGDJ0L0qN7HWHmiX32jkA2oYFNUshEFMJHq6YxXxjNnMi/JUErHwjqa1NkWOk3v/vr+z371Buv29OzFq+3l1eb9qanOtc3z+a0z5a33Hz1aT4ulclpNtmy3Xrz3wide/HD+gGePf/rGjz959+T2R842KLu6gC2BqVsVrQZlBxvYw/u6FVJoj76YubNVodvQwTdKqMlIa6352DoIfBKgwXvnJP2a45CxxtapRV0D1/9hxEtpJEWSbkxjs1ZyNlopBYdOS1c7cVPE0Wr//wNfoigaFBtyugAAAABJRU5ErkJggg==", + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "if local_runtime:\n", + " from IPython.display import Image, display\n", + " import tempfile\n", + " import os.path as osp\n", + " import cv2\n", + " with tempfile.TemporaryDirectory() as tmpdir:\n", + " file_name = osp.join(tmpdir, 'pose_results.png')\n", + " cv2.imwrite(file_name, vis_result[:,:,::-1])\n", + " display(Image(file_name))\n", + "else:\n", + " cv2_imshow(vis_result[:,:,::-1]) #RGB2BGR to fit cv2" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "42HG6DSNI0Ke" + }, + "source": [ + "### Add a new dataset\n", + "\n", + "There are two methods to support a customized dataset in MMPose. The first one is to convert the data to a supported format (e.g. COCO) and use the corresponding dataset class (e.g. BaseCocoStyleDataset), as described in the [document](https://mmpose.readthedocs.io/en/1.x/user_guides/prepare_datasets.html). The second one is to add a new dataset class. In this tutorial, we give an example of the second method.\n", + "\n", + "We first download the demo dataset, which contains 100 samples (75 for training and 25 for validation) selected from COCO train2017 dataset. The annotations are stored in a different format from the original COCO format.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "qGzSb0Rm-p3V", + "outputId": "2e7ec2ba-88e1-490f-cd5a-66ef06ec3e52" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "/content/mmpose/data\n", + "--2022-09-14 10:39:37-- https://download.openmmlab.com/mmpose/datasets/coco_tiny.tar\n", + "Resolving download.openmmlab.com (download.openmmlab.com)... 47.89.140.71\n", + "Connecting to download.openmmlab.com (download.openmmlab.com)|47.89.140.71|:443... connected.\n", + "HTTP request sent, awaiting response... 200 OK\n", + "Length: 16558080 (16M) [application/x-tar]\n", + "Saving to: ‘coco_tiny.tar’\n", + "\n", + "coco_tiny.tar 100%[===================>] 15.79M 9.14MB/s in 1.7s \n", + "\n", + "2022-09-14 10:39:40 (9.14 MB/s) - ‘coco_tiny.tar’ saved [16558080/16558080]\n", + "\n", + "/content/mmpose\n" + ] + } + ], + "source": [ + "# download dataset\n", + "%mkdir data\n", + "%cd data\n", + "!wget https://download.openmmlab.com/mmpose/datasets/coco_tiny.tar\n", + "!tar -xf coco_tiny.tar\n", + "%cd .." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "fL6S62JWJls0", + "outputId": "fe4cf7c9-5a8c-4542-f0b1-fe01908ca3e4" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Reading package lists...\n", + "Building dependency tree...\n", + "Reading state information...\n", + "The following package was automatically installed and is no longer required:\n", + " libnvidia-common-460\n", + "Use 'apt autoremove' to remove it.\n", + "The following NEW packages will be installed:\n", + " tree\n", + "0 upgraded, 1 newly installed, 0 to remove and 32 not upgraded.\n", + "Need to get 40.7 kB of archives.\n", + "After this operation, 105 kB of additional disk space will be used.\n", + "Get:1 http://archive.ubuntu.com/ubuntu bionic/universe amd64 tree amd64 1.7.0-5 [40.7 kB]\n", + "Fetched 40.7 kB in 0s (161 kB/s)\n", + "Selecting previously unselected package tree.\n", + "(Reading database ... 155685 files and directories currently installed.)\n", + "Preparing to unpack .../tree_1.7.0-5_amd64.deb ...\n", + "Unpacking tree (1.7.0-5) ...\n", + "Setting up tree (1.7.0-5) ...\n", + "Processing triggers for man-db (2.8.3-2ubuntu0.1) ...\n", + "data/coco_tiny\n", + "├── images\n", + "│   ├── 000000012754.jpg\n", + "│   ├── 000000017741.jpg\n", + "│   ├── 000000019157.jpg\n", + "│   ├── 000000019523.jpg\n", + "│   ├── 000000019608.jpg\n", + "│   ├── 000000022816.jpg\n", + "│   ├── 000000031092.jpg\n", + "│   ├── 000000032124.jpg\n", + "│   ├── 000000037209.jpg\n", + "│   ├── 000000050713.jpg\n", + "│   ├── 000000057703.jpg\n", + "│   ├── 000000064909.jpg\n", + "│   ├── 000000076942.jpg\n", + "│   ├── 000000079754.jpg\n", + "│   ├── 000000083935.jpg\n", + "│   ├── 000000085316.jpg\n", + "│   ├── 000000101013.jpg\n", + "│   ├── 000000101172.jpg\n", + "│   ├── 000000103134.jpg\n", + "│   ├── 000000103163.jpg\n", + "│   ├── 000000105647.jpg\n", + "│   ├── 000000107960.jpg\n", + "│   ├── 000000117891.jpg\n", + "│   ├── 000000118181.jpg\n", + "│   ├── 000000120021.jpg\n", + "│   ├── 000000128119.jpg\n", + "│   ├── 000000143908.jpg\n", + "│   ├── 000000145025.jpg\n", + "│   ├── 000000147386.jpg\n", + "│   ├── 000000147979.jpg\n", + "│   ├── 000000154222.jpg\n", + "│   ├── 000000160190.jpg\n", + "│   ├── 000000161112.jpg\n", + "│   ├── 000000175737.jpg\n", + "│   ├── 000000177069.jpg\n", + "│   ├── 000000184659.jpg\n", + "│   ├── 000000209468.jpg\n", + "│   ├── 000000210060.jpg\n", + "│   ├── 000000215867.jpg\n", + "│   ├── 000000216861.jpg\n", + "│   ├── 000000227224.jpg\n", + "│   ├── 000000246265.jpg\n", + "│   ├── 000000254919.jpg\n", + "│   ├── 000000263687.jpg\n", + "│   ├── 000000264628.jpg\n", + "│   ├── 000000268927.jpg\n", + "│   ├── 000000271177.jpg\n", + "│   ├── 000000275219.jpg\n", + "│   ├── 000000277542.jpg\n", + "│   ├── 000000279140.jpg\n", + "│   ├── 000000286813.jpg\n", + "│   ├── 000000297980.jpg\n", + "│   ├── 000000301641.jpg\n", + "│   ├── 000000312341.jpg\n", + "│   ├── 000000325768.jpg\n", + "│   ├── 000000332221.jpg\n", + "│   ├── 000000345071.jpg\n", + "│   ├── 000000346965.jpg\n", + "│   ├── 000000347836.jpg\n", + "│   ├── 000000349437.jpg\n", + "│   ├── 000000360735.jpg\n", + "│   ├── 000000362343.jpg\n", + "│   ├── 000000364079.jpg\n", + "│   ├── 000000364113.jpg\n", + "│   ├── 000000386279.jpg\n", + "│   ├── 000000386968.jpg\n", + "│   ├── 000000388619.jpg\n", + "│   ├── 000000390137.jpg\n", + "│   ├── 000000390241.jpg\n", + "│   ├── 000000390298.jpg\n", + "│   ├── 000000390348.jpg\n", + "│   ├── 000000398606.jpg\n", + "│   ├── 000000400456.jpg\n", + "│   ├── 000000402514.jpg\n", + "│   ├── 000000403255.jpg\n", + "│   ├── 000000403432.jpg\n", + "│   ├── 000000410350.jpg\n", + "│   ├── 000000453065.jpg\n", + "│   ├── 000000457254.jpg\n", + "│   ├── 000000464153.jpg\n", + "│   ├── 000000464515.jpg\n", + "│   ├── 000000465418.jpg\n", + "│   ├── 000000480591.jpg\n", + "│   ├── 000000484279.jpg\n", + "│   ├── 000000494014.jpg\n", + "│   ├── 000000515289.jpg\n", + "│   ├── 000000516805.jpg\n", + "│   ├── 000000521994.jpg\n", + "│   ├── 000000528962.jpg\n", + "│   ├── 000000534736.jpg\n", + "│   ├── 000000535588.jpg\n", + "│   ├── 000000537548.jpg\n", + "│   ├── 000000553698.jpg\n", + "│   ├── 000000555622.jpg\n", + "│   ├── 000000566456.jpg\n", + "│   ├── 000000567171.jpg\n", + "│   └── 000000568961.jpg\n", + "├── train.json\n", + "└── val.json\n", + "\n", + "1 directory, 99 files\n" + ] + } + ], + "source": [ + "# check the directory structure\n", + "!apt-get -q install tree\n", + "!tree data/coco_tiny" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "Hl09rtA4Jn5b", + "outputId": "e94e84ea-7192-4d2f-9747-716931953d6d" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " 75\n", + "{'bbox': [267.03, 104.32, 229.19, 320],\n", + " 'image_file': '000000537548.jpg',\n", + " 'image_size': [640, 480],\n", + " 'keypoints': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 325, 160, 2, 398,\n", + " 177, 2, 0, 0, 0, 437, 238, 2, 0, 0, 0, 477, 270, 2, 287, 255, 1,\n", + " 339, 267, 2, 0, 0, 0, 423, 314, 2, 0, 0, 0, 355, 367, 2]}\n" + ] + } + ], + "source": [ + "# check the annotation format\n", + "import json\n", + "import pprint\n", + "\n", + "anns = json.load(open('data/coco_tiny/train.json'))\n", + "\n", + "print(type(anns), len(anns))\n", + "pprint.pprint(anns[0], compact=True)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "H-dMbjgnJzbH" + }, + "source": [ + "After downloading the data, we implement a new dataset class to load data samples for model training and validation. Assume that we are going to train a top-down pose estimation model, the new dataset class inherits `BaseCocoStyleDataset`.\n", + "\n", + "We have already implemented a `CocoDataset` so that we can take it as an example." + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "jCu4npV2rl_Q" + }, + "source": [ + "#### Note\n", + "If you meet the following error:\n", + "```shell\n", + "AssertionError: class `PoseLocalVisualizer` in mmpose/visualization/local_visualizer.py: instance named of visualizer has been created, the method `get_instance` should not access any other arguments\n", + "```\n", + "Please reboot your jupyter kernel and start running from here." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "3I66Pi5Er94J" + }, + "outputs": [], + "source": [ + "%cd mmpose" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "rRNq50dytJki" + }, + "outputs": [], + "source": [ + "# Copyright (c) OpenMMLab. All rights reserved.\n", + "import json\n", + "import os.path as osp\n", + "from typing import Callable, List, Optional, Sequence, Union\n", + "\n", + "import numpy as np\n", + "from mmengine.utils import check_file_exist\n", + "\n", + "from mmpose.registry import DATASETS\n", + "from mmpose.datasets.datasets.base import BaseCocoStyleDataset\n", + "\n", + "\n", + "@DATASETS.register_module()\n", + "class TinyCocoDataset(BaseCocoStyleDataset):\n", + " METAINFO: dict = dict(from_file='configs/_base_/datasets/coco.py')\n", + "\n", + " def _load_annotations(self) -> List[dict]:\n", + " \"\"\"Load data from annotations in MPII format.\"\"\"\n", + "\n", + " check_file_exist(self.ann_file)\n", + " with open(self.ann_file) as anno_file:\n", + " anns = json.load(anno_file)\n", + "\n", + " data_list = []\n", + " ann_id = 0\n", + "\n", + " for idx, ann in enumerate(anns):\n", + " img_h, img_w = ann['image_size']\n", + "\n", + " # get bbox in shape [1, 4], formatted as xywh\n", + " x, y, w, h = ann['bbox']\n", + " x1 = np.clip(x, 0, img_w - 1)\n", + " y1 = np.clip(y, 0, img_h - 1)\n", + " x2 = np.clip(x + w, 0, img_w - 1)\n", + " y2 = np.clip(y + h, 0, img_h - 1)\n", + "\n", + " bbox = np.array([x1, y1, x2, y2], dtype=np.float32).reshape(1, 4)\n", + "\n", + " # load keypoints in shape [1, K, 2] and keypoints_visible in [1, K]\n", + " joints_3d = np.array(ann['keypoints']).reshape(1, -1, 3)\n", + " num_joints = joints_3d.shape[1]\n", + " keypoints = np.zeros((1, num_joints, 2), dtype=np.float32)\n", + " keypoints[:, :, :2] = joints_3d[:, :, :2]\n", + " keypoints_visible = np.minimum(1, joints_3d[:, :, 2:3])\n", + " keypoints_visible = keypoints_visible.reshape(1, -1)\n", + "\n", + " data_info = {\n", + " 'id': ann_id,\n", + " 'img_id': int(ann['image_file'].split('.')[0]),\n", + " 'img_path': osp.join(self.data_prefix['img'], ann['image_file']),\n", + " 'bbox': bbox,\n", + " 'bbox_score': np.ones(1, dtype=np.float32),\n", + " 'keypoints': keypoints,\n", + " 'keypoints_visible': keypoints_visible,\n", + " }\n", + "\n", + " data_list.append(data_info)\n", + " ann_id = ann_id + 1\n", + "\n", + " return data_list, None\n" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "UmGitQZkUnom" + }, + "source": [ + "### Create a config file\n", + "\n", + "In the next step, we create a config file which configures the model, dataset and runtime settings. More information can be found at [Configs](https://mmpose.readthedocs.io/en/1.x/user_guides/configs.html). A common practice to create a config file is deriving from a existing one. In this tutorial, we load a config file that trains a HRNet on COCO dataset, and modify it to adapt to the COCOTiny dataset." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "sMbVVHPXK87s", + "outputId": "a23a1ed9-a2ee-4a6a-93da-3c1968c8a2ec" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "default_scope = 'mmpose'\n", + "default_hooks = dict(\n", + " timer=dict(type='IterTimerHook'),\n", + " logger=dict(type='LoggerHook', interval=50),\n", + " param_scheduler=dict(type='ParamSchedulerHook'),\n", + " checkpoint=dict(\n", + " type='CheckpointHook',\n", + " interval=1,\n", + " save_best='pck/PCK@0.05',\n", + " rule='greater',\n", + " max_keep_ckpts=1),\n", + " sampler_seed=dict(type='DistSamplerSeedHook'),\n", + " visualization=dict(type='PoseVisualizationHook', enable=False))\n", + "custom_hooks = [dict(type='SyncBuffersHook')]\n", + "env_cfg = dict(\n", + " cudnn_benchmark=False,\n", + " mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),\n", + " dist_cfg=dict(backend='nccl'))\n", + "vis_backends = [dict(type='LocalVisBackend')]\n", + "visualizer = dict(\n", + " type='PoseLocalVisualizer',\n", + " vis_backends=[dict(type='LocalVisBackend')],\n", + " name='visualizer')\n", + "log_processor = dict(\n", + " type='LogProcessor', window_size=50, by_epoch=True, num_digits=6)\n", + "log_level = 'INFO'\n", + "load_from = None\n", + "resume = False\n", + "file_client_args = dict(backend='disk')\n", + "train_cfg = dict(by_epoch=True, max_epochs=40, val_interval=1)\n", + "val_cfg = dict()\n", + "test_cfg = dict()\n", + "optim_wrapper = dict(optimizer=dict(type='Adam', lr=0.0005))\n", + "param_scheduler = [\n", + " dict(type='LinearLR', begin=0, end=10, start_factor=0.001, by_epoch=False),\n", + " dict(\n", + " type='MultiStepLR',\n", + " begin=0,\n", + " end=40,\n", + " milestones=[17, 35],\n", + " gamma=0.1,\n", + " by_epoch=True)\n", + "]\n", + "auto_scale_lr = dict(base_batch_size=512)\n", + "codec = dict(\n", + " type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2)\n", + "model = dict(\n", + " type='TopdownPoseEstimator',\n", + " data_preprocessor=dict(\n", + " type='PoseDataPreprocessor',\n", + " mean=[123.675, 116.28, 103.53],\n", + " std=[58.395, 57.12, 57.375],\n", + " bgr_to_rgb=True),\n", + " backbone=dict(\n", + " type='HRNet',\n", + " in_channels=3,\n", + " extra=dict(\n", + " stage1=dict(\n", + " num_modules=1,\n", + " num_branches=1,\n", + " block='BOTTLENECK',\n", + " num_blocks=(4, ),\n", + " num_channels=(64, )),\n", + " stage2=dict(\n", + " num_modules=1,\n", + " num_branches=2,\n", + " block='BASIC',\n", + " num_blocks=(4, 4),\n", + " num_channels=(32, 64)),\n", + " stage3=dict(\n", + " num_modules=4,\n", + " num_branches=3,\n", + " block='BASIC',\n", + " num_blocks=(4, 4, 4),\n", + " num_channels=(32, 64, 128)),\n", + " stage4=dict(\n", + " num_modules=3,\n", + " num_branches=4,\n", + " block='BASIC',\n", + " num_blocks=(4, 4, 4, 4),\n", + " num_channels=(32, 64, 128, 256))),\n", + " init_cfg=dict(\n", + " type='Pretrained',\n", + " checkpoint=\n", + " 'https://download.openmmlab.com/mmpose/pretrain_models/hrnet_w32-36af842e.pth'\n", + " )),\n", + " head=dict(\n", + " type='HeatmapHead',\n", + " in_channels=32,\n", + " out_channels=17,\n", + " deconv_out_channels=None,\n", + " loss=dict(type='KeypointMSELoss', use_target_weight=True),\n", + " decoder=dict(\n", + " type='MSRAHeatmap',\n", + " input_size=(192, 256),\n", + " heatmap_size=(48, 64),\n", + " sigma=2)),\n", + " test_cfg=dict(flip_test=True, flip_mode='heatmap', shift_heatmap=True))\n", + "dataset_type = 'TinyCocoDataset'\n", + "data_mode = 'topdown'\n", + "data_root = 'data/coco_tiny'\n", + "train_pipeline = [\n", + " dict(type='LoadImage', file_client_args=dict(backend='disk')),\n", + " dict(type='GetBBoxCenterScale'),\n", + " dict(type='RandomFlip', direction='horizontal'),\n", + " dict(type='RandomHalfBody'),\n", + " dict(type='RandomBBoxTransform'),\n", + " dict(type='TopdownAffine', input_size=(192, 256)),\n", + " dict(\n", + " type='GenerateTarget',\n", + " target_type='heatmap',\n", + " encoder=dict(\n", + " type='MSRAHeatmap',\n", + " input_size=(192, 256),\n", + " heatmap_size=(48, 64),\n", + " sigma=2)),\n", + " dict(type='PackPoseInputs')\n", + "]\n", + "test_pipeline = [\n", + " dict(type='LoadImage', file_client_args=dict(backend='disk')),\n", + " dict(type='GetBBoxCenterScale'),\n", + " dict(type='TopdownAffine', input_size=(192, 256)),\n", + " dict(type='PackPoseInputs')\n", + "]\n", + "train_dataloader = dict(\n", + " batch_size=16,\n", + " num_workers=2,\n", + " persistent_workers=True,\n", + " sampler=dict(type='DefaultSampler', shuffle=True),\n", + " dataset=dict(\n", + " type='TinyCocoDataset',\n", + " data_root='data/coco_tiny',\n", + " data_mode='topdown',\n", + " ann_file='train.json',\n", + " data_prefix=dict(img='images/'),\n", + " pipeline=[\n", + " dict(type='LoadImage', file_client_args=dict(backend='disk')),\n", + " dict(type='GetBBoxCenterScale'),\n", + " dict(type='RandomFlip', direction='horizontal'),\n", + " dict(type='RandomHalfBody'),\n", + " dict(type='RandomBBoxTransform'),\n", + " dict(type='TopdownAffine', input_size=(192, 256)),\n", + " dict(\n", + " type='GenerateTarget',\n", + " target_type='heatmap',\n", + " encoder=dict(\n", + " type='MSRAHeatmap',\n", + " input_size=(192, 256),\n", + " heatmap_size=(48, 64),\n", + " sigma=2)),\n", + " dict(type='PackPoseInputs')\n", + " ]))\n", + "val_dataloader = dict(\n", + " batch_size=16,\n", + " num_workers=2,\n", + " persistent_workers=True,\n", + " drop_last=False,\n", + " sampler=dict(type='DefaultSampler', shuffle=False, round_up=False),\n", + " dataset=dict(\n", + " type='TinyCocoDataset',\n", + " data_root='data/coco_tiny',\n", + " data_mode='topdown',\n", + " ann_file='val.json',\n", + " bbox_file=None,\n", + " data_prefix=dict(img='images/'),\n", + " test_mode=True,\n", + " pipeline=[\n", + " dict(type='LoadImage', file_client_args=dict(backend='disk')),\n", + " dict(type='GetBBoxCenterScale'),\n", + " dict(type='TopdownAffine', input_size=(192, 256)),\n", + " dict(type='PackPoseInputs')\n", + " ]))\n", + "test_dataloader = dict(\n", + " batch_size=16,\n", + " num_workers=2,\n", + " persistent_workers=True,\n", + " drop_last=False,\n", + " sampler=dict(type='DefaultSampler', shuffle=False, round_up=False),\n", + " dataset=dict(\n", + " type='TinyCocoDataset',\n", + " data_root='data/coco_tiny',\n", + " data_mode='topdown',\n", + " ann_file='val.json',\n", + " bbox_file=None,\n", + " data_prefix=dict(img='images/'),\n", + " test_mode=True,\n", + " pipeline=[\n", + " dict(type='LoadImage', file_client_args=dict(backend='disk')),\n", + " dict(type='GetBBoxCenterScale'),\n", + " dict(type='TopdownAffine', input_size=(192, 256)),\n", + " dict(type='PackPoseInputs')\n", + " ]))\n", + "val_evaluator = dict(type='PCKAccuracy')\n", + "test_evaluator = dict(type='PCKAccuracy')\n", + "work_dir = 'work_dirs/hrnet_w32_coco_tiny_256x192'\n", + "randomness = dict(seed=0)\n", + "\n" + ] + } + ], + "source": [ + "from mmengine import Config\n", + "\n", + "cfg = Config.fromfile(\n", + " './configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py'\n", + ")\n", + "\n", + "# set basic configs\n", + "cfg.data_root = 'data/coco_tiny'\n", + "cfg.work_dir = 'work_dirs/hrnet_w32_coco_tiny_256x192'\n", + "cfg.randomness = dict(seed=0)\n", + "\n", + "# set log interval\n", + "cfg.train_cfg.val_interval = 1\n", + "\n", + "# set num of epoch\n", + "cfg.train_cfg.max_epochs = 40\n", + "\n", + "# set optimizer\n", + "cfg.optim_wrapper = dict(optimizer=dict(\n", + " type='Adam',\n", + " lr=5e-4,\n", + "))\n", + "\n", + "# set learning rate policy\n", + "cfg.param_scheduler = [\n", + " dict(\n", + " type='LinearLR', begin=0, end=10, start_factor=0.001,\n", + " by_epoch=False), # warm-up\n", + " dict(\n", + " type='MultiStepLR',\n", + " begin=0,\n", + " end=cfg.train_cfg.max_epochs,\n", + " milestones=[17, 35],\n", + " gamma=0.1,\n", + " by_epoch=True)\n", + "]\n", + "\n", + "\n", + "# set batch size\n", + "cfg.train_dataloader.batch_size = 16\n", + "cfg.val_dataloader.batch_size = 16\n", + "cfg.test_dataloader.batch_size = 16\n", + "\n", + "# set dataset configs\n", + "cfg.dataset_type = 'TinyCocoDataset'\n", + "cfg.train_dataloader.dataset.type = cfg.dataset_type\n", + "cfg.train_dataloader.dataset.ann_file = 'train.json'\n", + "cfg.train_dataloader.dataset.data_root = cfg.data_root\n", + "cfg.train_dataloader.dataset.data_prefix = dict(img='images/')\n", + "\n", + "\n", + "cfg.val_dataloader.dataset.type = cfg.dataset_type\n", + "cfg.val_dataloader.dataset.bbox_file = None\n", + "cfg.val_dataloader.dataset.ann_file = 'val.json'\n", + "cfg.val_dataloader.dataset.data_root = cfg.data_root\n", + "cfg.val_dataloader.dataset.data_prefix = dict(img='images/')\n", + "\n", + "cfg.test_dataloader.dataset.type = cfg.dataset_type\n", + "cfg.test_dataloader.dataset.bbox_file = None\n", + "cfg.test_dataloader.dataset.ann_file = 'val.json'\n", + "cfg.test_dataloader.dataset.data_root = cfg.data_root\n", + "cfg.test_dataloader.dataset.data_prefix = dict(img='images/')\n", + "\n", + "# set evaluator\n", + "cfg.val_evaluator = dict(type='PCKAccuracy')\n", + "cfg.test_evaluator = cfg.val_evaluator\n", + "\n", + "cfg.default_hooks.checkpoint.save_best = 'PCK'\n", + "cfg.default_hooks.checkpoint.max_keep_ckpts = 1\n", + "\n", + "print(cfg.pretty_text)\n" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "UlD8iDZehE2S" + }, + "source": [ + "or you can create a config file like follows:\n", + "```Python3\n", + "_base_ = ['../../../_base_/default_runtime.py']\n", + "\n", + "# runtime\n", + "train_cfg = dict(max_epochs=40, val_interval=1)\n", + "\n", + "# optimizer\n", + "optim_wrapper = dict(optimizer=dict(\n", + " type='Adam',\n", + " lr=5e-4,\n", + "))\n", + "\n", + "# learning policy\n", + "param_scheduler = [\n", + " dict(\n", + " type='LinearLR', begin=0, end=500, start_factor=0.001,\n", + " by_epoch=False), # warm-up\n", + " dict(\n", + " type='MultiStepLR',\n", + " begin=0,\n", + " end=train_cfg.max_epochs,\n", + " milestones=[17, 35],\n", + " gamma=0.1,\n", + " by_epoch=True)\n", + "]\n", + "\n", + "# automatically scaling LR based on the actual training batch size\n", + "auto_scale_lr = dict(base_batch_size=512)\n", + "\n", + "# codec settings\n", + "codec = dict(\n", + " type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2)\n", + "\n", + "# model settings\n", + "model = dict(\n", + " type='TopdownPoseEstimator',\n", + " data_preprocessor=dict(\n", + " type='PoseDataPreprocessor',\n", + " mean=[123.675, 116.28, 103.53],\n", + " std=[58.395, 57.12, 57.375],\n", + " bgr_to_rgb=True),\n", + " backbone=dict(\n", + " type='HRNet',\n", + " in_channels=3,\n", + " extra=dict(\n", + " stage1=dict(\n", + " num_modules=1,\n", + " num_branches=1,\n", + " block='BOTTLENECK',\n", + " num_blocks=(4, ),\n", + " num_channels=(64, )),\n", + " stage2=dict(\n", + " num_modules=1,\n", + " num_branches=2,\n", + " block='BASIC',\n", + " num_blocks=(4, 4),\n", + " num_channels=(32, 64)),\n", + " stage3=dict(\n", + " num_modules=4,\n", + " num_branches=3,\n", + " block='BASIC',\n", + " num_blocks=(4, 4, 4),\n", + " num_channels=(32, 64, 128)),\n", + " stage4=dict(\n", + " num_modules=3,\n", + " num_branches=4,\n", + " block='BASIC',\n", + " num_blocks=(4, 4, 4, 4),\n", + " num_channels=(32, 64, 128, 256))),\n", + " init_cfg=dict(\n", + " type='Pretrained',\n", + " checkpoint='https://download.openmmlab.com/mmpose/'\n", + " 'pretrain_models/hrnet_w32-36af842e.pth'),\n", + " ),\n", + " head=dict(\n", + " type='HeatmapHead',\n", + " in_channels=32,\n", + " out_channels=17,\n", + " deconv_out_channels=None,\n", + " loss=dict(type='KeypointMSELoss', use_target_weight=True),\n", + " decoder=codec),\n", + " test_cfg=dict(\n", + " flip_test=True,\n", + " flip_mode='heatmap',\n", + " shift_heatmap=True,\n", + " ))\n", + "\n", + "# base dataset settings\n", + "dataset_type = 'TinyCocoDataset'\n", + "data_mode = 'topdown'\n", + "data_root = 'data/coco_tiny'\n", + "work_dir = 'work_dirs/hrnet_w32_coco_tiny_256x192'\n", + "randomness = dict(seed=0)\n", + "\n", + "# pipelines\n", + "train_pipeline = [\n", + " dict(type='LoadImage'),\n", + " dict(type='GetBBoxCenterScale'),\n", + " dict(type='RandomFlip', direction='horizontal'),\n", + " dict(type='RandomHalfBody'),\n", + " dict(type='RandomBBoxTransform'),\n", + " dict(type='TopdownAffine', input_size=codec['input_size']),\n", + " dict(type='GenerateTarget', target_type='heatmap', encoder=codec),\n", + " dict(type='PackPoseInputs')\n", + "]\n", + "test_pipeline = [\n", + " dict(type='LoadImage'),\n", + " dict(type='GetBBoxCenterScale'),\n", + " dict(type='TopdownAffine', input_size=codec['input_size']),\n", + " dict(type='PackPoseInputs')\n", + "]\n", + "\n", + "# data loaders\n", + "train_dataloader = dict(\n", + " batch_size=16,\n", + " num_workers=2,\n", + " persistent_workers=True,\n", + " sampler=dict(type='DefaultSampler', shuffle=True),\n", + " dataset=dict(\n", + " type=dataset_type,\n", + " data_root=data_root,\n", + " data_mode=data_mode,\n", + " ann_file='train.json',\n", + " data_prefix=dict(img='images/'),\n", + " pipeline=train_pipeline,\n", + " ))\n", + "val_dataloader = dict(\n", + " batch_size=16,\n", + " num_workers=2,\n", + " persistent_workers=True,\n", + " drop_last=False,\n", + " sampler=dict(type='DefaultSampler', shuffle=False, round_up=False),\n", + " dataset=dict(\n", + " type=dataset_type,\n", + " data_root=data_root,\n", + " data_mode=data_mode,\n", + " ann_file='val.json',\n", + " data_prefix=dict(img='images/'),\n", + " test_mode=True,\n", + " pipeline=test_pipeline,\n", + " ))\n", + "test_dataloader = val_dataloader\n", + "\n", + "# evaluators\n", + "val_evaluator = dict(\n", + " type='PCKAccuracy')\n", + "test_evaluator = val_evaluator\n", + "\n", + "# hooks\n", + "default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater'))\n", + "```" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "ChVqB1oYncmo" + }, + "source": [ + "### Train and Evaluation\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 1000, + "referenced_widgets": [ + "2a079d9c0b9845318e6c612ca9601b86", + "3554753622334094961a47daf9362c59", + "08e0412b8dd54d28a26c232e75ea6088", + "558a9420b0b34be2a2ca8a8b8af9cbfc", + "a9bd3e477f07449788f0e95e3cd13ddc", + "5b2ee1f3e78d4cd993009d04baf76b24", + "a3e5aa31c3f644b5a677ec49fe2e0832", + "d2ee56f920a245d9875de8e37596a5c8", + "b5f8c86d48a04afa997fc137e1acd716", + "1c1b09d91dec4e3dadefe953daf50745", + "6af448aebdb744b98a2807f66b1d6e5d" + ] + }, + "id": "Ab3xsUdPlXuJ", + "outputId": "c07394b8-21f4-4766-af2b-87d2caa6e74c" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "09/15 12:42:06 - mmengine - \u001b[5m\u001b[4m\u001b[33mWARNING\u001b[0m - Failed to search registry with scope \"mmpose\" in the \"log_processor\" registry tree. As a workaround, the current \"log_processor\" registry in \"mmengine\" is used to build instance. This may cause unexpected failure when running the built modules. Please check whether \"mmpose\" is a correct scope, or whether the registry is initialized.\n", + "09/15 12:42:06 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - \n", + "------------------------------------------------------------\n", + "System environment:\n", + " sys.platform: linux\n", + " Python: 3.7.13 (default, Mar 29 2022, 02:18:16) [GCC 7.5.0]\n", + " CUDA available: True\n", + " numpy_random_seed: 0\n", + " GPU 0: NVIDIA GeForce GTX 1660 Ti\n", + " CUDA_HOME: /usr/local/cuda\n", + " NVCC: Cuda compilation tools, release 11.3, V11.3.109\n", + " GCC: gcc (Ubuntu 5.4.0-6ubuntu1~16.04.12) 5.4.0 20160609\n", + " PyTorch: 1.12.0+cu113\n", + " PyTorch compiling details: PyTorch built with:\n", + " - GCC 9.3\n", + " - C++ Version: 201402\n", + " - Intel(R) Math Kernel Library Version 2020.0.0 Product Build 20191122 for Intel(R) 64 architecture applications\n", + " - Intel(R) MKL-DNN v2.6.0 (Git Hash 52b5f107dd9cf10910aaa19cb47f3abf9b349815)\n", + " - OpenMP 201511 (a.k.a. OpenMP 4.5)\n", + " - LAPACK is enabled (usually provided by MKL)\n", + " - NNPACK is enabled\n", + " - CPU capability usage: AVX2\n", + " - CUDA Runtime 11.3\n", + " - NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86\n", + " - CuDNN 8.3.2 (built against CUDA 11.5)\n", + " - Magma 2.5.2\n", + " - Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=11.3, CUDNN_VERSION=8.3.2, CXX_COMPILER=/opt/rh/devtoolset-9/root/usr/bin/c++, CXX_FLAGS= -Wno-deprecated -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_KINETO -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE -DEDGE_PROFILER_USE_KINETO -O2 -fPIC -Wno-narrowing -Wall -Wextra -Werror=return-type -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-unused-parameter -Wno-unused-function -Wno-unused-result -Wno-unused-local-typedefs -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Werror=cast-function-type -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_VERSION=1.12.0, USE_CUDA=ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=OFF, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON, USE_ROCM=OFF, \n", + "\n", + " TorchVision: 0.13.0+cu113\n", + " OpenCV: 4.6.0\n", + " MMEngine: 0.1.0\n", + "\n", + "Runtime environment:\n", + " cudnn_benchmark: False\n", + " mp_cfg: {'mp_start_method': 'fork', 'opencv_num_threads': 0}\n", + " dist_cfg: {'backend': 'nccl'}\n", + " seed: 0\n", + " Distributed launcher: none\n", + " Distributed training: False\n", + " GPU number: 1\n", + "------------------------------------------------------------\n", + "\n", + "09/15 12:42:06 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Config:\n", + "default_scope = 'mmpose'\n", + "default_hooks = dict(\n", + " timer=dict(type='IterTimerHook'),\n", + " logger=dict(type='LoggerHook', interval=50),\n", + " param_scheduler=dict(type='ParamSchedulerHook'),\n", + " checkpoint=dict(\n", + " type='CheckpointHook',\n", + " interval=1,\n", + " save_best='pck/PCK@0.05',\n", + " rule='greater',\n", + " max_keep_ckpts=1),\n", + " sampler_seed=dict(type='DistSamplerSeedHook'),\n", + " visualization=dict(type='PoseVisualizationHook', enable=False))\n", + "custom_hooks = [dict(type='SyncBuffersHook')]\n", + "env_cfg = dict(\n", + " cudnn_benchmark=False,\n", + " mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),\n", + " dist_cfg=dict(backend='nccl'))\n", + "vis_backends = [dict(type='LocalVisBackend')]\n", + "visualizer = dict(\n", + " type='PoseLocalVisualizer',\n", + " vis_backends=[dict(type='LocalVisBackend')],\n", + " name='visualizer')\n", + "log_processor = dict(\n", + " type='LogProcessor', window_size=50, by_epoch=True, num_digits=6)\n", + "log_level = 'INFO'\n", + "load_from = None\n", + "resume = False\n", + "file_client_args = dict(backend='disk')\n", + "train_cfg = dict(by_epoch=True, max_epochs=40, val_interval=1)\n", + "val_cfg = dict()\n", + "test_cfg = dict()\n", + "optim_wrapper = dict(optimizer=dict(type='Adam', lr=0.0005))\n", + "param_scheduler = [\n", + " dict(type='LinearLR', begin=0, end=10, start_factor=0.001, by_epoch=False),\n", + " dict(\n", + " type='MultiStepLR',\n", + " begin=0,\n", + " end=40,\n", + " milestones=[17, 35],\n", + " gamma=0.1,\n", + " by_epoch=True)\n", + "]\n", + "auto_scale_lr = dict(base_batch_size=512)\n", + "codec = dict(\n", + " type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2)\n", + "model = dict(\n", + " type='TopdownPoseEstimator',\n", + " data_preprocessor=dict(\n", + " type='PoseDataPreprocessor',\n", + " mean=[123.675, 116.28, 103.53],\n", + " std=[58.395, 57.12, 57.375],\n", + " bgr_to_rgb=True),\n", + " backbone=dict(\n", + " type='HRNet',\n", + " in_channels=3,\n", + " extra=dict(\n", + " stage1=dict(\n", + " num_modules=1,\n", + " num_branches=1,\n", + " block='BOTTLENECK',\n", + " num_blocks=(4, ),\n", + " num_channels=(64, )),\n", + " stage2=dict(\n", + " num_modules=1,\n", + " num_branches=2,\n", + " block='BASIC',\n", + " num_blocks=(4, 4),\n", + " num_channels=(32, 64)),\n", + " stage3=dict(\n", + " num_modules=4,\n", + " num_branches=3,\n", + " block='BASIC',\n", + " num_blocks=(4, 4, 4),\n", + " num_channels=(32, 64, 128)),\n", + " stage4=dict(\n", + " num_modules=3,\n", + " num_branches=4,\n", + " block='BASIC',\n", + " num_blocks=(4, 4, 4, 4),\n", + " num_channels=(32, 64, 128, 256))),\n", + " init_cfg=dict(\n", + " type='Pretrained',\n", + " checkpoint=\n", + " 'https://download.openmmlab.com/mmpose/pretrain_models/hrnet_w32-36af842e.pth'\n", + " )),\n", + " head=dict(\n", + " type='HeatmapHead',\n", + " in_channels=32,\n", + " out_channels=17,\n", + " deconv_out_channels=None,\n", + " loss=dict(type='KeypointMSELoss', use_target_weight=True),\n", + " decoder=dict(\n", + " type='MSRAHeatmap',\n", + " input_size=(192, 256),\n", + " heatmap_size=(48, 64),\n", + " sigma=2)),\n", + " test_cfg=dict(flip_test=True, flip_mode='heatmap', shift_heatmap=True))\n", + "dataset_type = 'TinyCocoDataset'\n", + "data_mode = 'topdown'\n", + "data_root = 'data/coco_tiny'\n", + "train_pipeline = [\n", + " dict(type='LoadImage', file_client_args=dict(backend='disk')),\n", + " dict(type='GetBBoxCenterScale'),\n", + " dict(type='RandomFlip', direction='horizontal'),\n", + " dict(type='RandomHalfBody'),\n", + " dict(type='RandomBBoxTransform'),\n", + " dict(type='TopdownAffine', input_size=(192, 256)),\n", + " dict(\n", + " type='GenerateTarget',\n", + " target_type='heatmap',\n", + " encoder=dict(\n", + " type='MSRAHeatmap',\n", + " input_size=(192, 256),\n", + " heatmap_size=(48, 64),\n", + " sigma=2)),\n", + " dict(type='PackPoseInputs')\n", + "]\n", + "test_pipeline = [\n", + " dict(type='LoadImage', file_client_args=dict(backend='disk')),\n", + " dict(type='GetBBoxCenterScale'),\n", + " dict(type='TopdownAffine', input_size=(192, 256)),\n", + " dict(type='PackPoseInputs')\n", + "]\n", + "train_dataloader = dict(\n", + " batch_size=16,\n", + " num_workers=2,\n", + " persistent_workers=True,\n", + " sampler=dict(type='DefaultSampler', shuffle=True),\n", + " dataset=dict(\n", + " type='TinyCocoDataset',\n", + " data_root='data/coco_tiny',\n", + " data_mode='topdown',\n", + " ann_file='train.json',\n", + " data_prefix=dict(img='images/'),\n", + " pipeline=[\n", + " dict(type='LoadImage', file_client_args=dict(backend='disk')),\n", + " dict(type='GetBBoxCenterScale'),\n", + " dict(type='RandomFlip', direction='horizontal'),\n", + " dict(type='RandomHalfBody'),\n", + " dict(type='RandomBBoxTransform'),\n", + " dict(type='TopdownAffine', input_size=(192, 256)),\n", + " dict(\n", + " type='GenerateTarget',\n", + " target_type='heatmap',\n", + " encoder=dict(\n", + " type='MSRAHeatmap',\n", + " input_size=(192, 256),\n", + " heatmap_size=(48, 64),\n", + " sigma=2)),\n", + " dict(type='PackPoseInputs')\n", + " ]))\n", + "val_dataloader = dict(\n", + " batch_size=16,\n", + " num_workers=2,\n", + " persistent_workers=True,\n", + " drop_last=False,\n", + " sampler=dict(type='DefaultSampler', shuffle=False, round_up=False),\n", + " dataset=dict(\n", + " type='TinyCocoDataset',\n", + " data_root='data/coco_tiny',\n", + " data_mode='topdown',\n", + " ann_file='val.json',\n", + " bbox_file=None,\n", + " data_prefix=dict(img='images/'),\n", + " test_mode=True,\n", + " pipeline=[\n", + " dict(type='LoadImage', file_client_args=dict(backend='disk')),\n", + " dict(type='GetBBoxCenterScale'),\n", + " dict(type='TopdownAffine', input_size=(192, 256)),\n", + " dict(type='PackPoseInputs')\n", + " ]))\n", + "test_dataloader = dict(\n", + " batch_size=16,\n", + " num_workers=2,\n", + " persistent_workers=True,\n", + " drop_last=False,\n", + " sampler=dict(type='DefaultSampler', shuffle=False, round_up=False),\n", + " dataset=dict(\n", + " type='TinyCocoDataset',\n", + " data_root='data/coco_tiny',\n", + " data_mode='topdown',\n", + " ann_file='val.json',\n", + " bbox_file=None,\n", + " data_prefix=dict(img='images/'),\n", + " test_mode=True,\n", + " pipeline=[\n", + " dict(type='LoadImage', file_client_args=dict(backend='disk')),\n", + " dict(type='GetBBoxCenterScale'),\n", + " dict(type='TopdownAffine', input_size=(192, 256)),\n", + " dict(type='PackPoseInputs')\n", + " ]))\n", + "val_evaluator = dict(type='PCKAccuracy')\n", + "test_evaluator = dict(type='PCKAccuracy')\n", + "work_dir = 'work_dirs/hrnet_w32_coco_tiny_256x192'\n", + "randomness = dict(seed=0)\n", + "\n", + "Result has been saved to /home/PJLAB/jiangtao/Documents/git-clone/mmpose/work_dirs/hrnet_w32_coco_tiny_256x192/modules_statistic_results.json\n", + "09/15 12:42:07 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Distributed training is not used, all SyncBatchNorm (SyncBN) layers in the model will be automatically reverted to BatchNormXd layers if they are used.\n", + "09/15 12:42:08 - mmengine - \u001b[5m\u001b[4m\u001b[33mWARNING\u001b[0m - Failed to search registry with scope \"mmpose\" in the \"data sampler\" registry tree. As a workaround, the current \"data sampler\" registry in \"mmengine\" is used to build instance. This may cause unexpected failure when running the built modules. Please check whether \"mmpose\" is a correct scope, or whether the registry is initialized.\n", + "09/15 12:42:08 - mmengine - \u001b[5m\u001b[4m\u001b[33mWARNING\u001b[0m - Failed to search registry with scope \"mmpose\" in the \"optimizer wrapper constructor\" registry tree. As a workaround, the current \"optimizer wrapper constructor\" registry in \"mmengine\" is used to build instance. This may cause unexpected failure when running the built modules. Please check whether \"mmpose\" is a correct scope, or whether the registry is initialized.\n", + "09/15 12:42:08 - mmengine - \u001b[5m\u001b[4m\u001b[33mWARNING\u001b[0m - Failed to search registry with scope \"mmpose\" in the \"optimizer\" registry tree. As a workaround, the current \"optimizer\" registry in \"mmengine\" is used to build instance. This may cause unexpected failure when running the built modules. Please check whether \"mmpose\" is a correct scope, or whether the registry is initialized.\n", + "09/15 12:42:08 - mmengine - \u001b[5m\u001b[4m\u001b[33mWARNING\u001b[0m - Failed to search registry with scope \"mmpose\" in the \"optim_wrapper\" registry tree. As a workaround, the current \"optim_wrapper\" registry in \"mmengine\" is used to build instance. This may cause unexpected failure when running the built modules. Please check whether \"mmpose\" is a correct scope, or whether the registry is initialized.\n", + "09/15 12:42:08 - mmengine - \u001b[5m\u001b[4m\u001b[33mWARNING\u001b[0m - Failed to search registry with scope \"mmpose\" in the \"parameter scheduler\" registry tree. As a workaround, the current \"parameter scheduler\" registry in \"mmengine\" is used to build instance. This may cause unexpected failure when running the built modules. Please check whether \"mmpose\" is a correct scope, or whether the registry is initialized.\n", + "09/15 12:42:08 - mmengine - \u001b[5m\u001b[4m\u001b[33mWARNING\u001b[0m - Failed to search registry with scope \"mmpose\" in the \"parameter scheduler\" registry tree. As a workaround, the current \"parameter scheduler\" registry in \"mmengine\" is used to build instance. This may cause unexpected failure when running the built modules. Please check whether \"mmpose\" is a correct scope, or whether the registry is initialized.\n", + "09/15 12:42:08 - mmengine - \u001b[5m\u001b[4m\u001b[33mWARNING\u001b[0m - Failed to search registry with scope \"mmpose\" in the \"parameter scheduler\" registry tree. As a workaround, the current \"parameter scheduler\" registry in \"mmengine\" is used to build instance. This may cause unexpected failure when running the built modules. Please check whether \"mmpose\" is a correct scope, or whether the registry is initialized.\n", + "09/15 12:42:08 - mmengine - \u001b[5m\u001b[4m\u001b[33mWARNING\u001b[0m - Failed to search registry with scope \"mmpose\" in the \"parameter scheduler\" registry tree. As a workaround, the current \"parameter scheduler\" registry in \"mmengine\" is used to build instance. This may cause unexpected failure when running the built modules. Please check whether \"mmpose\" is a correct scope, or whether the registry is initialized.\n", + "09/15 12:42:08 - mmengine - \u001b[5m\u001b[4m\u001b[33mWARNING\u001b[0m - Failed to search registry with scope \"mmpose\" in the \"data sampler\" registry tree. As a workaround, the current \"data sampler\" registry in \"mmengine\" is used to build instance. This may cause unexpected failure when running the built modules. Please check whether \"mmpose\" is a correct scope, or whether the registry is initialized.\n", + "09/15 12:42:08 - mmengine - \u001b[5m\u001b[4m\u001b[33mWARNING\u001b[0m - Failed to search registry with scope \"mmpose\" in the \"weight initializer\" registry tree. As a workaround, the current \"weight initializer\" registry in \"mmengine\" is used to build instance. This may cause unexpected failure when running the built modules. Please check whether \"mmpose\" is a correct scope, or whether the registry is initialized.\n", + "09/15 12:42:08 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - load model from: https://download.openmmlab.com/mmpose/pretrain_models/hrnet_w32-36af842e.pth\n", + "09/15 12:42:08 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - http loads checkpoint from path: https://download.openmmlab.com/mmpose/pretrain_models/hrnet_w32-36af842e.pth\n", + "09/15 12:42:09 - mmengine - \u001b[5m\u001b[4m\u001b[33mWARNING\u001b[0m - The model and loaded state dict do not match exactly\n", + "\n", + "unexpected key in source state_dict: head.0.0.0.conv1.weight, head.0.0.0.bn1.weight, head.0.0.0.bn1.bias, head.0.0.0.bn1.running_mean, head.0.0.0.bn1.running_var, head.0.0.0.bn1.num_batches_tracked, head.0.0.0.conv2.weight, head.0.0.0.bn2.weight, head.0.0.0.bn2.bias, head.0.0.0.bn2.running_mean, head.0.0.0.bn2.running_var, head.0.0.0.bn2.num_batches_tracked, head.0.0.0.conv3.weight, head.0.0.0.bn3.weight, head.0.0.0.bn3.bias, head.0.0.0.bn3.running_mean, head.0.0.0.bn3.running_var, head.0.0.0.bn3.num_batches_tracked, head.0.0.0.downsample.0.weight, head.0.0.0.downsample.1.weight, head.0.0.0.downsample.1.bias, head.0.0.0.downsample.1.running_mean, head.0.0.0.downsample.1.running_var, head.0.0.0.downsample.1.num_batches_tracked, head.0.1.0.conv1.weight, head.0.1.0.bn1.weight, head.0.1.0.bn1.bias, head.0.1.0.bn1.running_mean, head.0.1.0.bn1.running_var, head.0.1.0.bn1.num_batches_tracked, head.0.1.0.conv2.weight, head.0.1.0.bn2.weight, head.0.1.0.bn2.bias, head.0.1.0.bn2.running_mean, head.0.1.0.bn2.running_var, head.0.1.0.bn2.num_batches_tracked, head.0.1.0.conv3.weight, head.0.1.0.bn3.weight, head.0.1.0.bn3.bias, head.0.1.0.bn3.running_mean, head.0.1.0.bn3.running_var, head.0.1.0.bn3.num_batches_tracked, head.0.1.0.downsample.0.weight, head.0.1.0.downsample.1.weight, head.0.1.0.downsample.1.bias, head.0.1.0.downsample.1.running_mean, head.0.1.0.downsample.1.running_var, head.0.1.0.downsample.1.num_batches_tracked, head.0.2.0.conv1.weight, head.0.2.0.bn1.weight, head.0.2.0.bn1.bias, head.0.2.0.bn1.running_mean, head.0.2.0.bn1.running_var, head.0.2.0.bn1.num_batches_tracked, head.0.2.0.conv2.weight, head.0.2.0.bn2.weight, head.0.2.0.bn2.bias, head.0.2.0.bn2.running_mean, head.0.2.0.bn2.running_var, head.0.2.0.bn2.num_batches_tracked, head.0.2.0.conv3.weight, head.0.2.0.bn3.weight, head.0.2.0.bn3.bias, head.0.2.0.bn3.running_mean, head.0.2.0.bn3.running_var, head.0.2.0.bn3.num_batches_tracked, head.0.2.0.downsample.0.weight, head.0.2.0.downsample.1.weight, head.0.2.0.downsample.1.bias, head.0.2.0.downsample.1.running_mean, head.0.2.0.downsample.1.running_var, head.0.2.0.downsample.1.num_batches_tracked, head.1.0.0.conv1.weight, head.1.0.0.bn1.weight, head.1.0.0.bn1.bias, head.1.0.0.bn1.running_mean, head.1.0.0.bn1.running_var, head.1.0.0.bn1.num_batches_tracked, head.1.0.0.conv2.weight, head.1.0.0.bn2.weight, head.1.0.0.bn2.bias, head.1.0.0.bn2.running_mean, head.1.0.0.bn2.running_var, head.1.0.0.bn2.num_batches_tracked, head.1.0.0.conv3.weight, head.1.0.0.bn3.weight, head.1.0.0.bn3.bias, head.1.0.0.bn3.running_mean, head.1.0.0.bn3.running_var, head.1.0.0.bn3.num_batches_tracked, head.1.0.0.downsample.0.weight, head.1.0.0.downsample.1.weight, head.1.0.0.downsample.1.bias, head.1.0.0.downsample.1.running_mean, head.1.0.0.downsample.1.running_var, head.1.0.0.downsample.1.num_batches_tracked, head.1.1.0.conv1.weight, head.1.1.0.bn1.weight, head.1.1.0.bn1.bias, head.1.1.0.bn1.running_mean, head.1.1.0.bn1.running_var, head.1.1.0.bn1.num_batches_tracked, head.1.1.0.conv2.weight, head.1.1.0.bn2.weight, head.1.1.0.bn2.bias, head.1.1.0.bn2.running_mean, head.1.1.0.bn2.running_var, head.1.1.0.bn2.num_batches_tracked, head.1.1.0.conv3.weight, head.1.1.0.bn3.weight, head.1.1.0.bn3.bias, head.1.1.0.bn3.running_mean, head.1.1.0.bn3.running_var, head.1.1.0.bn3.num_batches_tracked, head.1.1.0.downsample.0.weight, head.1.1.0.downsample.1.weight, head.1.1.0.downsample.1.bias, head.1.1.0.downsample.1.running_mean, head.1.1.0.downsample.1.running_var, head.1.1.0.downsample.1.num_batches_tracked, head.2.0.0.conv1.weight, head.2.0.0.bn1.weight, head.2.0.0.bn1.bias, head.2.0.0.bn1.running_mean, head.2.0.0.bn1.running_var, head.2.0.0.bn1.num_batches_tracked, head.2.0.0.conv2.weight, head.2.0.0.bn2.weight, head.2.0.0.bn2.bias, head.2.0.0.bn2.running_mean, head.2.0.0.bn2.running_var, head.2.0.0.bn2.num_batches_tracked, head.2.0.0.conv3.weight, head.2.0.0.bn3.weight, head.2.0.0.bn3.bias, head.2.0.0.bn3.running_mean, head.2.0.0.bn3.running_var, head.2.0.0.bn3.num_batches_tracked, head.2.0.0.downsample.0.weight, head.2.0.0.downsample.1.weight, head.2.0.0.downsample.1.bias, head.2.0.0.downsample.1.running_mean, head.2.0.0.downsample.1.running_var, head.2.0.0.downsample.1.num_batches_tracked, head.3.0.0.conv1.weight, head.3.0.0.bn1.weight, head.3.0.0.bn1.bias, head.3.0.0.bn1.running_mean, head.3.0.0.bn1.running_var, head.3.0.0.bn1.num_batches_tracked, head.3.0.0.conv2.weight, head.3.0.0.bn2.weight, head.3.0.0.bn2.bias, head.3.0.0.bn2.running_mean, head.3.0.0.bn2.running_var, head.3.0.0.bn2.num_batches_tracked, head.3.0.0.conv3.weight, head.3.0.0.bn3.weight, head.3.0.0.bn3.bias, head.3.0.0.bn3.running_mean, head.3.0.0.bn3.running_var, head.3.0.0.bn3.num_batches_tracked, head.3.0.0.downsample.0.weight, head.3.0.0.downsample.1.weight, head.3.0.0.downsample.1.bias, head.3.0.0.downsample.1.running_mean, head.3.0.0.downsample.1.running_var, head.3.0.0.downsample.1.num_batches_tracked, fc.weight, fc.bias, stage4.2.fuse_layers.1.0.0.0.weight, stage4.2.fuse_layers.1.0.0.1.weight, stage4.2.fuse_layers.1.0.0.1.bias, stage4.2.fuse_layers.1.0.0.1.running_mean, stage4.2.fuse_layers.1.0.0.1.running_var, stage4.2.fuse_layers.1.0.0.1.num_batches_tracked, stage4.2.fuse_layers.1.2.0.weight, stage4.2.fuse_layers.1.2.1.weight, stage4.2.fuse_layers.1.2.1.bias, stage4.2.fuse_layers.1.2.1.running_mean, stage4.2.fuse_layers.1.2.1.running_var, stage4.2.fuse_layers.1.2.1.num_batches_tracked, stage4.2.fuse_layers.1.3.0.weight, stage4.2.fuse_layers.1.3.1.weight, stage4.2.fuse_layers.1.3.1.bias, stage4.2.fuse_layers.1.3.1.running_mean, stage4.2.fuse_layers.1.3.1.running_var, stage4.2.fuse_layers.1.3.1.num_batches_tracked, stage4.2.fuse_layers.2.0.0.0.weight, stage4.2.fuse_layers.2.0.0.1.weight, stage4.2.fuse_layers.2.0.0.1.bias, stage4.2.fuse_layers.2.0.0.1.running_mean, stage4.2.fuse_layers.2.0.0.1.running_var, stage4.2.fuse_layers.2.0.0.1.num_batches_tracked, stage4.2.fuse_layers.2.0.1.0.weight, stage4.2.fuse_layers.2.0.1.1.weight, stage4.2.fuse_layers.2.0.1.1.bias, stage4.2.fuse_layers.2.0.1.1.running_mean, stage4.2.fuse_layers.2.0.1.1.running_var, stage4.2.fuse_layers.2.0.1.1.num_batches_tracked, stage4.2.fuse_layers.2.1.0.0.weight, stage4.2.fuse_layers.2.1.0.1.weight, stage4.2.fuse_layers.2.1.0.1.bias, stage4.2.fuse_layers.2.1.0.1.running_mean, stage4.2.fuse_layers.2.1.0.1.running_var, stage4.2.fuse_layers.2.1.0.1.num_batches_tracked, stage4.2.fuse_layers.2.3.0.weight, stage4.2.fuse_layers.2.3.1.weight, stage4.2.fuse_layers.2.3.1.bias, stage4.2.fuse_layers.2.3.1.running_mean, stage4.2.fuse_layers.2.3.1.running_var, stage4.2.fuse_layers.2.3.1.num_batches_tracked, stage4.2.fuse_layers.3.0.0.0.weight, stage4.2.fuse_layers.3.0.0.1.weight, stage4.2.fuse_layers.3.0.0.1.bias, stage4.2.fuse_layers.3.0.0.1.running_mean, stage4.2.fuse_layers.3.0.0.1.running_var, stage4.2.fuse_layers.3.0.0.1.num_batches_tracked, stage4.2.fuse_layers.3.0.1.0.weight, stage4.2.fuse_layers.3.0.1.1.weight, stage4.2.fuse_layers.3.0.1.1.bias, stage4.2.fuse_layers.3.0.1.1.running_mean, stage4.2.fuse_layers.3.0.1.1.running_var, stage4.2.fuse_layers.3.0.1.1.num_batches_tracked, stage4.2.fuse_layers.3.0.2.0.weight, stage4.2.fuse_layers.3.0.2.1.weight, stage4.2.fuse_layers.3.0.2.1.bias, stage4.2.fuse_layers.3.0.2.1.running_mean, stage4.2.fuse_layers.3.0.2.1.running_var, stage4.2.fuse_layers.3.0.2.1.num_batches_tracked, stage4.2.fuse_layers.3.1.0.0.weight, stage4.2.fuse_layers.3.1.0.1.weight, stage4.2.fuse_layers.3.1.0.1.bias, stage4.2.fuse_layers.3.1.0.1.running_mean, stage4.2.fuse_layers.3.1.0.1.running_var, stage4.2.fuse_layers.3.1.0.1.num_batches_tracked, stage4.2.fuse_layers.3.1.1.0.weight, stage4.2.fuse_layers.3.1.1.1.weight, stage4.2.fuse_layers.3.1.1.1.bias, stage4.2.fuse_layers.3.1.1.1.running_mean, stage4.2.fuse_layers.3.1.1.1.running_var, stage4.2.fuse_layers.3.1.1.1.num_batches_tracked, stage4.2.fuse_layers.3.2.0.0.weight, stage4.2.fuse_layers.3.2.0.1.weight, stage4.2.fuse_layers.3.2.0.1.bias, stage4.2.fuse_layers.3.2.0.1.running_mean, stage4.2.fuse_layers.3.2.0.1.running_var, stage4.2.fuse_layers.3.2.0.1.num_batches_tracked\n", + "\n", + "09/15 12:42:09 - mmengine - \u001b[5m\u001b[4m\u001b[33mWARNING\u001b[0m - Failed to search registry with scope \"mmpose\" in the \"weight initializer\" registry tree. As a workaround, the current \"weight initializer\" registry in \"mmengine\" is used to build instance. This may cause unexpected failure when running the built modules. Please check whether \"mmpose\" is a correct scope, or whether the registry is initialized.\n", + "09/15 12:42:09 - mmengine - \u001b[5m\u001b[4m\u001b[33mWARNING\u001b[0m - Failed to search registry with scope \"mmpose\" in the \"weight initializer\" registry tree. As a workaround, the current \"weight initializer\" registry in \"mmengine\" is used to build instance. This may cause unexpected failure when running the built modules. Please check whether \"mmpose\" is a correct scope, or whether the registry is initialized.\n", + "09/15 12:42:09 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Checkpoints will be saved to /home/PJLAB/jiangtao/Documents/git-clone/mmpose/work_dirs/hrnet_w32_coco_tiny_256x192 by HardDiskBackend.\n", + "09/15 12:42:12 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:42:12 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 1 epochs\n", + "09/15 12:42:13 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:42:13 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [1][2/2] pck/PCK@0.05: 0.009035\n", + "09/15 12:42:14 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The best checkpoint with 0.0090 pck/PCK@0.05 at 1 epoch is saved to best_pck/PCK@0.05_epoch_1.pth.\n", + "09/15 12:42:16 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:42:16 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 2 epochs\n", + "09/15 12:42:17 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:42:17 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [2][2/2] pck/PCK@0.05: 0.163666\n", + "09/15 12:42:17 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The previous best checkpoint /home/PJLAB/jiangtao/Documents/git-clone/mmpose/work_dirs/hrnet_w32_coco_tiny_256x192/best_pck/PCK@0.05_epoch_1.pth is removed\n", + "09/15 12:42:17 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The best checkpoint with 0.1637 pck/PCK@0.05 at 2 epoch is saved to best_pck/PCK@0.05_epoch_2.pth.\n", + "09/15 12:42:19 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:42:19 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 3 epochs\n", + "09/15 12:42:21 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:42:21 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [3][2/2] pck/PCK@0.05: 0.201942\n", + "09/15 12:42:21 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The previous best checkpoint /home/PJLAB/jiangtao/Documents/git-clone/mmpose/work_dirs/hrnet_w32_coco_tiny_256x192/best_pck/PCK@0.05_epoch_2.pth is removed\n", + "09/15 12:42:21 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The best checkpoint with 0.2019 pck/PCK@0.05 at 3 epoch is saved to best_pck/PCK@0.05_epoch_3.pth.\n", + "09/15 12:42:23 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:42:23 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 4 epochs\n", + "09/15 12:42:24 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:42:24 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [4][2/2] pck/PCK@0.05: 0.247750\n", + "09/15 12:42:24 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The previous best checkpoint /home/PJLAB/jiangtao/Documents/git-clone/mmpose/work_dirs/hrnet_w32_coco_tiny_256x192/best_pck/PCK@0.05_epoch_3.pth is removed\n", + "09/15 12:42:25 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The best checkpoint with 0.2477 pck/PCK@0.05 at 4 epoch is saved to best_pck/PCK@0.05_epoch_4.pth.\n", + "09/15 12:42:27 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:42:27 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 5 epochs\n", + "09/15 12:42:28 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:42:28 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [5][2/2] pck/PCK@0.05: 0.296205\n", + "09/15 12:42:28 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The previous best checkpoint /home/PJLAB/jiangtao/Documents/git-clone/mmpose/work_dirs/hrnet_w32_coco_tiny_256x192/best_pck/PCK@0.05_epoch_4.pth is removed\n", + "09/15 12:42:29 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The best checkpoint with 0.2962 pck/PCK@0.05 at 5 epoch is saved to best_pck/PCK@0.05_epoch_5.pth.\n", + "09/15 12:42:31 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:42:31 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 6 epochs\n", + "09/15 12:42:32 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:42:32 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [6][2/2] pck/PCK@0.05: 0.316309\n", + "09/15 12:42:32 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The previous best checkpoint /home/PJLAB/jiangtao/Documents/git-clone/mmpose/work_dirs/hrnet_w32_coco_tiny_256x192/best_pck/PCK@0.05_epoch_5.pth is removed\n", + "09/15 12:42:33 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The best checkpoint with 0.3163 pck/PCK@0.05 at 6 epoch is saved to best_pck/PCK@0.05_epoch_6.pth.\n", + "09/15 12:42:35 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:42:35 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 7 epochs\n", + "09/15 12:42:36 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:42:36 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [7][2/2] pck/PCK@0.05: 0.290834\n", + "09/15 12:42:38 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:42:38 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 8 epochs\n", + "09/15 12:42:39 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:42:39 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [8][2/2] pck/PCK@0.05: 0.335645\n", + "09/15 12:42:39 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The previous best checkpoint /home/PJLAB/jiangtao/Documents/git-clone/mmpose/work_dirs/hrnet_w32_coco_tiny_256x192/best_pck/PCK@0.05_epoch_6.pth is removed\n", + "09/15 12:42:40 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The best checkpoint with 0.3356 pck/PCK@0.05 at 8 epoch is saved to best_pck/PCK@0.05_epoch_8.pth.\n", + "09/15 12:42:42 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:42:42 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 9 epochs\n", + "09/15 12:42:43 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:42:43 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [9][2/2] pck/PCK@0.05: 0.348761\n", + "09/15 12:42:43 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The previous best checkpoint /home/PJLAB/jiangtao/Documents/git-clone/mmpose/work_dirs/hrnet_w32_coco_tiny_256x192/best_pck/PCK@0.05_epoch_8.pth is removed\n", + "09/15 12:42:44 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The best checkpoint with 0.3488 pck/PCK@0.05 at 9 epoch is saved to best_pck/PCK@0.05_epoch_9.pth.\n", + "09/15 12:42:46 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:42:46 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 10 epochs\n", + "09/15 12:42:47 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:42:47 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [10][2/2] pck/PCK@0.05: 0.310204\n", + "09/15 12:42:49 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:42:49 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 11 epochs\n", + "09/15 12:42:50 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:42:50 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [11][2/2] pck/PCK@0.05: 0.338200\n", + "09/15 12:42:52 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:42:52 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 12 epochs\n", + "09/15 12:42:53 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:42:53 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [12][2/2] pck/PCK@0.05: 0.356559\n", + "09/15 12:42:53 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The previous best checkpoint /home/PJLAB/jiangtao/Documents/git-clone/mmpose/work_dirs/hrnet_w32_coco_tiny_256x192/best_pck/PCK@0.05_epoch_9.pth is removed\n", + "09/15 12:42:54 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The best checkpoint with 0.3566 pck/PCK@0.05 at 12 epoch is saved to best_pck/PCK@0.05_epoch_12.pth.\n", + "09/15 12:42:56 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:42:56 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 13 epochs\n", + "09/15 12:42:57 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:42:57 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [13][2/2] pck/PCK@0.05: 0.384718\n", + "09/15 12:42:57 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The previous best checkpoint /home/PJLAB/jiangtao/Documents/git-clone/mmpose/work_dirs/hrnet_w32_coco_tiny_256x192/best_pck/PCK@0.05_epoch_12.pth is removed\n", + "09/15 12:42:58 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The best checkpoint with 0.3847 pck/PCK@0.05 at 13 epoch is saved to best_pck/PCK@0.05_epoch_13.pth.\n", + "09/15 12:43:00 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:43:00 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 14 epochs\n", + "09/15 12:43:01 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:43:01 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [14][2/2] pck/PCK@0.05: 0.372036\n", + "09/15 12:43:03 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:43:03 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 15 epochs\n", + "09/15 12:43:04 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:43:04 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [15][2/2] pck/PCK@0.05: 0.331702\n", + "09/15 12:43:06 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:43:06 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 16 epochs\n", + "09/15 12:43:07 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:43:07 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [16][2/2] pck/PCK@0.05: 0.350346\n", + "09/15 12:43:09 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:43:09 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 17 epochs\n", + "09/15 12:43:10 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:43:10 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [17][2/2] pck/PCK@0.05: 0.358399\n", + "09/15 12:43:12 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:43:12 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 18 epochs\n", + "09/15 12:43:14 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:43:14 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [18][2/2] pck/PCK@0.05: 0.377378\n", + "09/15 12:43:15 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:43:15 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 19 epochs\n", + "09/15 12:43:17 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:43:17 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [19][2/2] pck/PCK@0.05: 0.392675\n", + "09/15 12:43:17 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The previous best checkpoint /home/PJLAB/jiangtao/Documents/git-clone/mmpose/work_dirs/hrnet_w32_coco_tiny_256x192/best_pck/PCK@0.05_epoch_13.pth is removed\n", + "09/15 12:43:17 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The best checkpoint with 0.3927 pck/PCK@0.05 at 19 epoch is saved to best_pck/PCK@0.05_epoch_19.pth.\n", + "09/15 12:43:19 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:43:19 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 20 epochs\n", + "09/15 12:43:21 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:43:21 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [20][2/2] pck/PCK@0.05: 0.413536\n", + "09/15 12:43:21 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The previous best checkpoint /home/PJLAB/jiangtao/Documents/git-clone/mmpose/work_dirs/hrnet_w32_coco_tiny_256x192/best_pck/PCK@0.05_epoch_19.pth is removed\n", + "09/15 12:43:21 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The best checkpoint with 0.4135 pck/PCK@0.05 at 20 epoch is saved to best_pck/PCK@0.05_epoch_20.pth.\n", + "09/15 12:43:23 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:43:23 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 21 epochs\n", + "09/15 12:43:24 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:43:24 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [21][2/2] pck/PCK@0.05: 0.422105\n", + "09/15 12:43:24 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The previous best checkpoint /home/PJLAB/jiangtao/Documents/git-clone/mmpose/work_dirs/hrnet_w32_coco_tiny_256x192/best_pck/PCK@0.05_epoch_20.pth is removed\n", + "09/15 12:43:25 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The best checkpoint with 0.4221 pck/PCK@0.05 at 21 epoch is saved to best_pck/PCK@0.05_epoch_21.pth.\n", + "09/15 12:43:27 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:43:27 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 22 epochs\n", + "09/15 12:43:28 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:43:28 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [22][2/2] pck/PCK@0.05: 0.430300\n", + "09/15 12:43:28 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The previous best checkpoint /home/PJLAB/jiangtao/Documents/git-clone/mmpose/work_dirs/hrnet_w32_coco_tiny_256x192/best_pck/PCK@0.05_epoch_21.pth is removed\n", + "09/15 12:43:29 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The best checkpoint with 0.4303 pck/PCK@0.05 at 22 epoch is saved to best_pck/PCK@0.05_epoch_22.pth.\n", + "09/15 12:43:31 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:43:31 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 23 epochs\n", + "09/15 12:43:32 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:43:32 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [23][2/2] pck/PCK@0.05: 0.440251\n", + "09/15 12:43:32 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The previous best checkpoint /home/PJLAB/jiangtao/Documents/git-clone/mmpose/work_dirs/hrnet_w32_coco_tiny_256x192/best_pck/PCK@0.05_epoch_22.pth is removed\n", + "09/15 12:43:33 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The best checkpoint with 0.4403 pck/PCK@0.05 at 23 epoch is saved to best_pck/PCK@0.05_epoch_23.pth.\n", + "09/15 12:43:34 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:43:34 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 24 epochs\n", + "09/15 12:43:36 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:43:36 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [24][2/2] pck/PCK@0.05: 0.433262\n", + "09/15 12:43:38 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:43:38 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 25 epochs\n", + "09/15 12:43:39 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:43:39 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [25][2/2] pck/PCK@0.05: 0.429440\n", + "09/15 12:43:41 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:43:41 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 26 epochs\n", + "09/15 12:43:42 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:43:42 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [26][2/2] pck/PCK@0.05: 0.423034\n", + "09/15 12:43:44 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:43:44 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 27 epochs\n", + "09/15 12:43:45 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:43:45 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [27][2/2] pck/PCK@0.05: 0.440554\n", + "09/15 12:43:45 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The previous best checkpoint /home/PJLAB/jiangtao/Documents/git-clone/mmpose/work_dirs/hrnet_w32_coco_tiny_256x192/best_pck/PCK@0.05_epoch_23.pth is removed\n", + "09/15 12:43:46 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The best checkpoint with 0.4406 pck/PCK@0.05 at 27 epoch is saved to best_pck/PCK@0.05_epoch_27.pth.\n", + "09/15 12:43:48 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:43:48 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 28 epochs\n", + "09/15 12:43:49 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:43:49 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [28][2/2] pck/PCK@0.05: 0.454103\n", + "09/15 12:43:49 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The previous best checkpoint /home/PJLAB/jiangtao/Documents/git-clone/mmpose/work_dirs/hrnet_w32_coco_tiny_256x192/best_pck/PCK@0.05_epoch_27.pth is removed\n", + "09/15 12:43:50 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - The best checkpoint with 0.4541 pck/PCK@0.05 at 28 epoch is saved to best_pck/PCK@0.05_epoch_28.pth.\n", + "09/15 12:43:52 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:43:52 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 29 epochs\n", + "09/15 12:43:53 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:43:53 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [29][2/2] pck/PCK@0.05: 0.434462\n", + "09/15 12:43:55 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:43:55 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 30 epochs\n", + "09/15 12:43:56 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:43:56 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [30][2/2] pck/PCK@0.05: 0.434963\n", + "09/15 12:43:58 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:43:58 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 31 epochs\n", + "09/15 12:43:59 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:43:59 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [31][2/2] pck/PCK@0.05: 0.445667\n", + "09/15 12:44:01 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:44:01 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 32 epochs\n", + "09/15 12:44:03 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:44:03 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [32][2/2] pck/PCK@0.05: 0.445784\n", + "09/15 12:44:04 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:44:04 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 33 epochs\n", + "09/15 12:44:06 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:44:06 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [33][2/2] pck/PCK@0.05: 0.434502\n", + "09/15 12:44:08 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:44:08 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 34 epochs\n", + "09/15 12:44:09 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:44:09 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [34][2/2] pck/PCK@0.05: 0.435661\n", + "09/15 12:44:11 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:44:11 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 35 epochs\n", + "09/15 12:44:12 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:44:12 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [35][2/2] pck/PCK@0.05: 0.425407\n", + "09/15 12:44:14 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:44:14 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 36 epochs\n", + "09/15 12:44:15 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:44:15 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [36][2/2] pck/PCK@0.05: 0.428712\n", + "09/15 12:44:17 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:44:17 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 37 epochs\n", + "09/15 12:44:18 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:44:18 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [37][2/2] pck/PCK@0.05: 0.423183\n", + "09/15 12:44:20 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:44:20 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 38 epochs\n", + "09/15 12:44:22 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:44:22 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [38][2/2] pck/PCK@0.05: 0.432350\n", + "09/15 12:44:23 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:44:23 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 39 epochs\n", + "09/15 12:44:25 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:44:25 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [39][2/2] pck/PCK@0.05: 0.423967\n", + "09/15 12:44:27 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Exp name: td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220915_124206\n", + "09/15 12:44:27 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Saving checkpoint at 40 epochs\n", + "09/15 12:44:28 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Evaluating PCKAccuracy (normalized by ``\"bbox_size\"``)...\n", + "09/15 12:44:28 - mmengine - \u001b[4m\u001b[37mINFO\u001b[0m - Epoch(val) [40][2/2] pck/PCK@0.05: 0.429198\n" + ] + }, + { + "data": { + "text/plain": [ + "TopdownPoseEstimator(\n", + " (data_preprocessor): PoseDataPreprocessor()\n", + " (backbone): HRNet(\n", + " (conv1): Conv2d(3, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " (layer1): Sequential(\n", + " (0): Bottleneck(\n", + " (conv1): Conv2d(64, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " (downsample): Sequential(\n", + " (0): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (1): Bottleneck(\n", + " (conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): Bottleneck(\n", + " (conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (3): Bottleneck(\n", + " (conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " (transition1): ModuleList(\n", + " (0): Sequential(\n", + " (0): Conv2d(256, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU(inplace=True)\n", + " )\n", + " (1): Sequential(\n", + " (0): Sequential(\n", + " (0): Conv2d(256, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU(inplace=True)\n", + " )\n", + " )\n", + " )\n", + " (stage2): Sequential(\n", + " (0): HRModule(\n", + " (branches): ModuleList(\n", + " (0): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (3): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " (1): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (3): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " )\n", + " (fuse_layers): ModuleList(\n", + " (0): ModuleList(\n", + " (0): None\n", + " (1): Sequential(\n", + " (0): Conv2d(64, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Upsample(scale_factor=2.0, mode=nearest)\n", + " )\n", + " )\n", + " (1): ModuleList(\n", + " (0): Sequential(\n", + " (0): Sequential(\n", + " (0): Conv2d(32, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (1): None\n", + " )\n", + " )\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " (transition2): ModuleList(\n", + " (0): None\n", + " (1): None\n", + " (2): Sequential(\n", + " (0): Sequential(\n", + " (0): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU(inplace=True)\n", + " )\n", + " )\n", + " )\n", + " (stage3): Sequential(\n", + " (0): HRModule(\n", + " (branches): ModuleList(\n", + " (0): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (3): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " (1): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (3): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " (2): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (3): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " )\n", + " (fuse_layers): ModuleList(\n", + " (0): ModuleList(\n", + " (0): None\n", + " (1): Sequential(\n", + " (0): Conv2d(64, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Upsample(scale_factor=2.0, mode=nearest)\n", + " )\n", + " (2): Sequential(\n", + " (0): Conv2d(128, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Upsample(scale_factor=4.0, mode=nearest)\n", + " )\n", + " )\n", + " (1): ModuleList(\n", + " (0): Sequential(\n", + " (0): Sequential(\n", + " (0): Conv2d(32, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (1): None\n", + " (2): Sequential(\n", + " (0): Conv2d(128, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Upsample(scale_factor=2.0, mode=nearest)\n", + " )\n", + " )\n", + " (2): ModuleList(\n", + " (0): Sequential(\n", + " (0): Sequential(\n", + " (0): Conv2d(32, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU(inplace=True)\n", + " )\n", + " (1): Sequential(\n", + " (0): Conv2d(32, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (1): Sequential(\n", + " (0): Sequential(\n", + " (0): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (2): None\n", + " )\n", + " )\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (1): HRModule(\n", + " (branches): ModuleList(\n", + " (0): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (3): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " (1): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (3): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " (2): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (3): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " )\n", + " (fuse_layers): ModuleList(\n", + " (0): ModuleList(\n", + " (0): None\n", + " (1): Sequential(\n", + " (0): Conv2d(64, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Upsample(scale_factor=2.0, mode=nearest)\n", + " )\n", + " (2): Sequential(\n", + " (0): Conv2d(128, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Upsample(scale_factor=4.0, mode=nearest)\n", + " )\n", + " )\n", + " (1): ModuleList(\n", + " (0): Sequential(\n", + " (0): Sequential(\n", + " (0): Conv2d(32, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (1): None\n", + " (2): Sequential(\n", + " (0): Conv2d(128, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Upsample(scale_factor=2.0, mode=nearest)\n", + " )\n", + " )\n", + " (2): ModuleList(\n", + " (0): Sequential(\n", + " (0): Sequential(\n", + " (0): Conv2d(32, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU(inplace=True)\n", + " )\n", + " (1): Sequential(\n", + " (0): Conv2d(32, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (1): Sequential(\n", + " (0): Sequential(\n", + " (0): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (2): None\n", + " )\n", + " )\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): HRModule(\n", + " (branches): ModuleList(\n", + " (0): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (3): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " (1): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (3): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " (2): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (3): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " )\n", + " (fuse_layers): ModuleList(\n", + " (0): ModuleList(\n", + " (0): None\n", + " (1): Sequential(\n", + " (0): Conv2d(64, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Upsample(scale_factor=2.0, mode=nearest)\n", + " )\n", + " (2): Sequential(\n", + " (0): Conv2d(128, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Upsample(scale_factor=4.0, mode=nearest)\n", + " )\n", + " )\n", + " (1): ModuleList(\n", + " (0): Sequential(\n", + " (0): Sequential(\n", + " (0): Conv2d(32, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (1): None\n", + " (2): Sequential(\n", + " (0): Conv2d(128, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Upsample(scale_factor=2.0, mode=nearest)\n", + " )\n", + " )\n", + " (2): ModuleList(\n", + " (0): Sequential(\n", + " (0): Sequential(\n", + " (0): Conv2d(32, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU(inplace=True)\n", + " )\n", + " (1): Sequential(\n", + " (0): Conv2d(32, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (1): Sequential(\n", + " (0): Sequential(\n", + " (0): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (2): None\n", + " )\n", + " )\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (3): HRModule(\n", + " (branches): ModuleList(\n", + " (0): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (3): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " (1): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (3): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " (2): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (3): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " )\n", + " (fuse_layers): ModuleList(\n", + " (0): ModuleList(\n", + " (0): None\n", + " (1): Sequential(\n", + " (0): Conv2d(64, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Upsample(scale_factor=2.0, mode=nearest)\n", + " )\n", + " (2): Sequential(\n", + " (0): Conv2d(128, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Upsample(scale_factor=4.0, mode=nearest)\n", + " )\n", + " )\n", + " (1): ModuleList(\n", + " (0): Sequential(\n", + " (0): Sequential(\n", + " (0): Conv2d(32, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (1): None\n", + " (2): Sequential(\n", + " (0): Conv2d(128, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Upsample(scale_factor=2.0, mode=nearest)\n", + " )\n", + " )\n", + " (2): ModuleList(\n", + " (0): Sequential(\n", + " (0): Sequential(\n", + " (0): Conv2d(32, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU(inplace=True)\n", + " )\n", + " (1): Sequential(\n", + " (0): Conv2d(32, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (1): Sequential(\n", + " (0): Sequential(\n", + " (0): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (2): None\n", + " )\n", + " )\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " (transition3): ModuleList(\n", + " (0): None\n", + " (1): None\n", + " (2): None\n", + " (3): Sequential(\n", + " (0): Sequential(\n", + " (0): Conv2d(128, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU(inplace=True)\n", + " )\n", + " )\n", + " )\n", + " (stage4): Sequential(\n", + " (0): HRModule(\n", + " (branches): ModuleList(\n", + " (0): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (3): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " (1): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (3): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " (2): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (3): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " (3): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): BasicBlock(\n", + " (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (3): BasicBlock(\n", + " (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " )\n", + " (fuse_layers): ModuleList(\n", + " (0): ModuleList(\n", + " (0): None\n", + " (1): Sequential(\n", + " (0): Conv2d(64, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Upsample(scale_factor=2.0, mode=nearest)\n", + " )\n", + " (2): Sequential(\n", + " (0): Conv2d(128, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Upsample(scale_factor=4.0, mode=nearest)\n", + " )\n", + " (3): Sequential(\n", + " (0): Conv2d(256, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Upsample(scale_factor=8.0, mode=nearest)\n", + " )\n", + " )\n", + " (1): ModuleList(\n", + " (0): Sequential(\n", + " (0): Sequential(\n", + " (0): Conv2d(32, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (1): None\n", + " (2): Sequential(\n", + " (0): Conv2d(128, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Upsample(scale_factor=2.0, mode=nearest)\n", + " )\n", + " (3): Sequential(\n", + " (0): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Upsample(scale_factor=4.0, mode=nearest)\n", + " )\n", + " )\n", + " (2): ModuleList(\n", + " (0): Sequential(\n", + " (0): Sequential(\n", + " (0): Conv2d(32, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU(inplace=True)\n", + " )\n", + " (1): Sequential(\n", + " (0): Conv2d(32, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (1): Sequential(\n", + " (0): Sequential(\n", + " (0): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (2): None\n", + " (3): Sequential(\n", + " (0): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Upsample(scale_factor=2.0, mode=nearest)\n", + " )\n", + " )\n", + " (3): ModuleList(\n", + " (0): Sequential(\n", + " (0): Sequential(\n", + " (0): Conv2d(32, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU(inplace=True)\n", + " )\n", + " (1): Sequential(\n", + " (0): Conv2d(32, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU(inplace=True)\n", + " )\n", + " (2): Sequential(\n", + " (0): Conv2d(32, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (1): Sequential(\n", + " (0): Sequential(\n", + " (0): Conv2d(64, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU(inplace=True)\n", + " )\n", + " (1): Sequential(\n", + " (0): Conv2d(64, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (2): Sequential(\n", + " (0): Sequential(\n", + " (0): Conv2d(128, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (3): None\n", + " )\n", + " )\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (1): HRModule(\n", + " (branches): ModuleList(\n", + " (0): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (3): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " (1): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (3): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " (2): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (3): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " (3): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): BasicBlock(\n", + " (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (3): BasicBlock(\n", + " (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " )\n", + " (fuse_layers): ModuleList(\n", + " (0): ModuleList(\n", + " (0): None\n", + " (1): Sequential(\n", + " (0): Conv2d(64, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Upsample(scale_factor=2.0, mode=nearest)\n", + " )\n", + " (2): Sequential(\n", + " (0): Conv2d(128, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Upsample(scale_factor=4.0, mode=nearest)\n", + " )\n", + " (3): Sequential(\n", + " (0): Conv2d(256, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Upsample(scale_factor=8.0, mode=nearest)\n", + " )\n", + " )\n", + " (1): ModuleList(\n", + " (0): Sequential(\n", + " (0): Sequential(\n", + " (0): Conv2d(32, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (1): None\n", + " (2): Sequential(\n", + " (0): Conv2d(128, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Upsample(scale_factor=2.0, mode=nearest)\n", + " )\n", + " (3): Sequential(\n", + " (0): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Upsample(scale_factor=4.0, mode=nearest)\n", + " )\n", + " )\n", + " (2): ModuleList(\n", + " (0): Sequential(\n", + " (0): Sequential(\n", + " (0): Conv2d(32, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU(inplace=True)\n", + " )\n", + " (1): Sequential(\n", + " (0): Conv2d(32, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (1): Sequential(\n", + " (0): Sequential(\n", + " (0): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (2): None\n", + " (3): Sequential(\n", + " (0): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Upsample(scale_factor=2.0, mode=nearest)\n", + " )\n", + " )\n", + " (3): ModuleList(\n", + " (0): Sequential(\n", + " (0): Sequential(\n", + " (0): Conv2d(32, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU(inplace=True)\n", + " )\n", + " (1): Sequential(\n", + " (0): Conv2d(32, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU(inplace=True)\n", + " )\n", + " (2): Sequential(\n", + " (0): Conv2d(32, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (1): Sequential(\n", + " (0): Sequential(\n", + " (0): Conv2d(64, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU(inplace=True)\n", + " )\n", + " (1): Sequential(\n", + " (0): Conv2d(64, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (2): Sequential(\n", + " (0): Sequential(\n", + " (0): Conv2d(128, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (3): None\n", + " )\n", + " )\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): HRModule(\n", + " (branches): ModuleList(\n", + " (0): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (3): BasicBlock(\n", + " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " (1): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (3): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " (2): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (3): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " (3): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): BasicBlock(\n", + " (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (3): BasicBlock(\n", + " (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " )\n", + " (fuse_layers): ModuleList(\n", + " (0): ModuleList(\n", + " (0): None\n", + " (1): Sequential(\n", + " (0): Conv2d(64, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Upsample(scale_factor=2.0, mode=nearest)\n", + " )\n", + " (2): Sequential(\n", + " (0): Conv2d(128, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Upsample(scale_factor=4.0, mode=nearest)\n", + " )\n", + " (3): Sequential(\n", + " (0): Conv2d(256, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Upsample(scale_factor=8.0, mode=nearest)\n", + " )\n", + " )\n", + " )\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " )\n", + " init_cfg={'type': 'Pretrained', 'checkpoint': 'https://download.openmmlab.com/mmpose/pretrain_models/hrnet_w32-36af842e.pth'}\n", + " (head): HeatmapHead(\n", + " (loss_module): KeypointMSELoss(\n", + " (criterion): MSELoss()\n", + " )\n", + " (deconv_layers): Identity()\n", + " (conv_layers): Identity()\n", + " (final_layer): Conv2d(32, 17, kernel_size=(1, 1), stride=(1, 1))\n", + " )\n", + " init_cfg=[{'type': 'Normal', 'layer': ['Conv2d', 'ConvTranspose2d'], 'std': 0.001}, {'type': 'Constant', 'layer': 'BatchNorm2d', 'val': 1}]\n", + ")" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from mmengine.config import Config, DictAction\n", + "from mmengine.runner import Runner\n", + "\n", + "# set preprocess configs to model\n", + "cfg.model.setdefault('data_preprocessor', cfg.get('preprocess_cfg', {}))\n", + "\n", + "# build the runner from config\n", + "runner = Runner.from_cfg(cfg)\n", + "\n", + "# start training\n", + "runner.train()" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "sdLwcaojhE2T" + }, + "source": [ + "#### Note\n", + "The recommended best practice is to convert your customized data into COCO format." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "zJyteZNGqwNk" + }, + "outputs": [], + "source": [] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "provenance": [] + }, + "gpuClass": "standard", + "kernelspec": { + "display_name": "dev2.0", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.5" + }, + "vscode": { + "interpreter": { + "hash": "383ba00087b5a9caebf3648b758a31e474cc01be975489b58f119fa4bc17e1f8" + } + }, + "widgets": { + "application/vnd.jupyter.widget-state+json": { + "08e0412b8dd54d28a26c232e75ea6088": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "FloatProgressModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_d2ee56f920a245d9875de8e37596a5c8", + "max": 132594821, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_b5f8c86d48a04afa997fc137e1acd716", + "value": 132594821 + } + }, + "1c1b09d91dec4e3dadefe953daf50745": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "2a079d9c0b9845318e6c612ca9601b86": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HBoxModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_3554753622334094961a47daf9362c59", + "IPY_MODEL_08e0412b8dd54d28a26c232e75ea6088", + "IPY_MODEL_558a9420b0b34be2a2ca8a8b8af9cbfc" + ], + "layout": "IPY_MODEL_a9bd3e477f07449788f0e95e3cd13ddc" + } + }, + "3554753622334094961a47daf9362c59": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_5b2ee1f3e78d4cd993009d04baf76b24", + "placeholder": "​", + "style": "IPY_MODEL_a3e5aa31c3f644b5a677ec49fe2e0832", + "value": "100%" + } + }, + "558a9420b0b34be2a2ca8a8b8af9cbfc": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_1c1b09d91dec4e3dadefe953daf50745", + "placeholder": "​", + "style": "IPY_MODEL_6af448aebdb744b98a2807f66b1d6e5d", + "value": " 126M/126M [00:14<00:00, 9.32MB/s]" + } + }, + "5b2ee1f3e78d4cd993009d04baf76b24": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "6af448aebdb744b98a2807f66b1d6e5d": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "a3e5aa31c3f644b5a677ec49fe2e0832": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "a9bd3e477f07449788f0e95e3cd13ddc": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "b5f8c86d48a04afa997fc137e1acd716": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "ProgressStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "d2ee56f920a245d9875de8e37596a5c8": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + } + } + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/demo/body3d_pose_lifter_demo.py b/demo/body3d_pose_lifter_demo.py index 840cd4edc9..84539484fe 100644 --- a/demo/body3d_pose_lifter_demo.py +++ b/demo/body3d_pose_lifter_demo.py @@ -1,481 +1,481 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import mimetypes -import os -import time -from argparse import ArgumentParser -from functools import partial - -import cv2 -import json_tricks as json -import mmcv -import mmengine -import numpy as np -from mmengine.structures import InstanceData - -from mmpose.apis import (_track_by_iou, _track_by_oks, collect_multi_frames, - convert_keypoint_definition, extract_pose_sequence, - inference_pose_lifter_model, inference_topdown, - init_model) -from mmpose.models.pose_estimators import PoseLifter -from mmpose.models.pose_estimators.topdown import TopdownPoseEstimator -from mmpose.registry import VISUALIZERS -from mmpose.structures import (PoseDataSample, merge_data_samples, - split_instances) -from mmpose.utils import adapt_mmdet_pipeline - -try: - from mmdet.apis import inference_detector, init_detector - has_mmdet = True -except (ImportError, ModuleNotFoundError): - has_mmdet = False - - -def parse_args(): - parser = ArgumentParser() - parser.add_argument('det_config', help='Config file for detection') - parser.add_argument('det_checkpoint', help='Checkpoint file for detection') - parser.add_argument( - 'pose_estimator_config', - type=str, - default=None, - help='Config file for the 1st stage 2D pose estimator') - parser.add_argument( - 'pose_estimator_checkpoint', - type=str, - default=None, - help='Checkpoint file for the 1st stage 2D pose estimator') - parser.add_argument( - 'pose_lifter_config', - help='Config file for the 2nd stage pose lifter model') - parser.add_argument( - 'pose_lifter_checkpoint', - help='Checkpoint file for the 2nd stage pose lifter model') - parser.add_argument('--input', type=str, default='', help='Video path') - parser.add_argument( - '--show', - action='store_true', - default=False, - help='Whether to show visualizations') - parser.add_argument( - '--rebase-keypoint-height', - action='store_true', - help='Rebase the predicted 3D pose so its lowest keypoint has a ' - 'height of 0 (landing on the ground). This is useful for ' - 'visualization when the model do not predict the global position ' - 'of the 3D pose.') - parser.add_argument( - '--norm-pose-2d', - action='store_true', - help='Scale the bbox (along with the 2D pose) to the average bbox ' - 'scale of the dataset, and move the bbox (along with the 2D pose) to ' - 'the average bbox center of the dataset. This is useful when bbox ' - 'is small, especially in multi-person scenarios.') - parser.add_argument( - '--num-instances', - type=int, - default=-1, - help='The number of 3D poses to be visualized in every frame. If ' - 'less than 0, it will be set to the number of pose results in the ' - 'first frame.') - parser.add_argument( - '--output-root', - type=str, - default='', - help='Root of the output video file. ' - 'Default not saving the visualization video.') - parser.add_argument( - '--save-predictions', - action='store_true', - default=False, - help='whether to save predicted results') - parser.add_argument( - '--device', default='cuda:0', help='Device used for inference') - parser.add_argument( - '--det-cat-id', - type=int, - default=0, - help='Category id for bounding box detection model') - parser.add_argument( - '--bbox-thr', - type=float, - default=0.9, - help='Bounding box score threshold') - parser.add_argument('--kpt-thr', type=float, default=0.3) - parser.add_argument( - '--use-oks-tracking', action='store_true', help='Using OKS tracking') - parser.add_argument( - '--tracking-thr', type=float, default=0.3, help='Tracking threshold') - parser.add_argument( - '--show-interval', type=int, default=0, help='Sleep seconds per frame') - parser.add_argument( - '--thickness', - type=int, - default=1, - help='Link thickness for visualization') - parser.add_argument( - '--radius', - type=int, - default=3, - help='Keypoint radius for visualization') - parser.add_argument( - '--use-multi-frames', - action='store_true', - default=False, - help='whether to use multi frames for inference in the 2D pose' - 'detection stage. Default: False.') - - args = parser.parse_args() - return args - - -def get_area(results): - for i, data_sample in enumerate(results): - pred_instance = data_sample.pred_instances.cpu().numpy() - if 'bboxes' in pred_instance: - bboxes = pred_instance.bboxes - results[i].pred_instances.set_field( - np.array([(bbox[2] - bbox[0]) * (bbox[3] - bbox[1]) - for bbox in bboxes]), 'areas') - else: - keypoints = pred_instance.keypoints - areas, bboxes = [], [] - for keypoint in keypoints: - xmin = np.min(keypoint[:, 0][keypoint[:, 0] > 0], initial=1e10) - xmax = np.max(keypoint[:, 0]) - ymin = np.min(keypoint[:, 1][keypoint[:, 1] > 0], initial=1e10) - ymax = np.max(keypoint[:, 1]) - areas.append((xmax - xmin) * (ymax - ymin)) - bboxes.append([xmin, ymin, xmax, ymax]) - results[i].pred_instances.areas = np.array(areas) - results[i].pred_instances.bboxes = np.array(bboxes) - return results - - -def get_pose_est_results(args, pose_estimator, frame, bboxes, - pose_est_results_last, next_id, pose_lift_dataset): - pose_det_dataset = pose_estimator.cfg.test_dataloader.dataset - - # make person results for current image - pose_est_results = inference_topdown(pose_estimator, frame, bboxes) - - pose_est_results = get_area(pose_est_results) - if args.use_oks_tracking: - _track = partial(_track_by_oks) - else: - _track = _track_by_iou - - for i, result in enumerate(pose_est_results): - track_id, pose_est_results_last, match_result = _track( - result, pose_est_results_last, args.tracking_thr) - if track_id == -1: - pred_instances = result.pred_instances.cpu().numpy() - keypoints = pred_instances.keypoints - if np.count_nonzero(keypoints[:, :, 1]) >= 3: - pose_est_results[i].set_field(next_id, 'track_id') - next_id += 1 - else: - # If the number of keypoints detected is small, - # delete that person instance. - keypoints[:, :, 1] = -10 - pose_est_results[i].pred_instances.set_field( - keypoints, 'keypoints') - bboxes = pred_instances.bboxes * 0 - pose_est_results[i].pred_instances.set_field(bboxes, 'bboxes') - pose_est_results[i].set_field(-1, 'track_id') - pose_est_results[i].set_field(pred_instances, 'pred_instances') - else: - pose_est_results[i].set_field(track_id, 'track_id') - - del match_result - - pose_est_results_converted = [] - for pose_est_result in pose_est_results: - pose_est_result_converted = PoseDataSample() - gt_instances = InstanceData() - pred_instances = InstanceData() - for k in pose_est_result.gt_instances.keys(): - gt_instances.set_field(pose_est_result.gt_instances[k], k) - for k in pose_est_result.pred_instances.keys(): - pred_instances.set_field(pose_est_result.pred_instances[k], k) - pose_est_result_converted.gt_instances = gt_instances - pose_est_result_converted.pred_instances = pred_instances - pose_est_result_converted.track_id = pose_est_result.track_id - - keypoints = convert_keypoint_definition(pred_instances.keypoints, - pose_det_dataset['type'], - pose_lift_dataset['type']) - pose_est_result_converted.pred_instances.keypoints = keypoints - pose_est_results_converted.append(pose_est_result_converted) - return pose_est_results, pose_est_results_converted, next_id - - -def get_pose_lift_results(args, visualizer, pose_lifter, pose_est_results_list, - frame, frame_idx, pose_est_results): - pose_lift_dataset = pose_lifter.cfg.test_dataloader.dataset - # extract and pad input pose2d sequence - pose_seq_2d = extract_pose_sequence( - pose_est_results_list, - frame_idx=frame_idx, - causal=pose_lift_dataset.get('causal', False), - seq_len=pose_lift_dataset.get('seq_len', 1), - step=pose_lift_dataset.get('seq_step', 1)) - - # 2D-to-3D pose lifting - width, height = frame.shape[:2] - pose_lift_results = inference_pose_lifter_model( - pose_lifter, - pose_seq_2d, - image_size=(width, height), - norm_pose_2d=args.norm_pose_2d) - - # Pose processing - for idx, pose_lift_res in enumerate(pose_lift_results): - pose_lift_res.track_id = pose_est_results[idx].get('track_id', 1e4) - - pred_instances = pose_lift_res.pred_instances - keypoints = pred_instances.keypoints - keypoint_scores = pred_instances.keypoint_scores - if keypoint_scores.ndim == 3: - keypoint_scores = np.squeeze(keypoint_scores, axis=1) - pose_lift_results[ - idx].pred_instances.keypoint_scores = keypoint_scores - if keypoints.ndim == 4: - keypoints = np.squeeze(keypoints, axis=1) - - keypoints = keypoints[..., [0, 2, 1]] - keypoints[..., 0] = -keypoints[..., 0] - keypoints[..., 2] = -keypoints[..., 2] - - # rebase height (z-axis) - if args.rebase_keypoint_height: - keypoints[..., 2] -= np.min( - keypoints[..., 2], axis=-1, keepdims=True) - - pose_lift_results[idx].pred_instances.keypoints = keypoints - - pose_lift_results = sorted( - pose_lift_results, key=lambda x: x.get('track_id', 1e4)) - - pred_3d_data_samples = merge_data_samples(pose_lift_results) - det_data_sample = merge_data_samples(pose_est_results) - - if args.num_instances < 0: - args.num_instances = len(pose_lift_results) - - # Visualization - if visualizer is not None: - visualizer.add_datasample( - 'result', - frame, - data_sample=pred_3d_data_samples, - det_data_sample=det_data_sample, - draw_gt=False, - show=args.show, - draw_bbox=True, - kpt_thr=args.kpt_thr, - num_instances=args.num_instances, - wait_time=args.show_interval) - - return pred_3d_data_samples.get('pred_instances', None) - - -def get_bbox(args, detector, frame): - det_result = inference_detector(detector, frame) - pred_instance = det_result.pred_instances.cpu().numpy() - - bboxes = pred_instance.bboxes - bboxes = bboxes[np.logical_and(pred_instance.labels == args.det_cat_id, - pred_instance.scores > args.bbox_thr)] - return bboxes - - -def main(): - assert has_mmdet, 'Please install mmdet to run the demo.' - - args = parse_args() - - assert args.show or (args.output_root != '') - assert args.input != '' - assert args.det_config is not None - assert args.det_checkpoint is not None - - detector = init_detector( - args.det_config, args.det_checkpoint, device=args.device.lower()) - detector.cfg = adapt_mmdet_pipeline(detector.cfg) - - pose_estimator = init_model( - args.pose_estimator_config, - args.pose_estimator_checkpoint, - device=args.device.lower()) - - assert isinstance(pose_estimator, TopdownPoseEstimator), 'Only "TopDown"' \ - 'model is supported for the 1st stage (2D pose detection)' - - det_kpt_color = pose_estimator.dataset_meta.get('keypoint_colors', None) - det_dataset_skeleton = pose_estimator.dataset_meta.get( - 'skeleton_links', None) - det_dataset_link_color = pose_estimator.dataset_meta.get( - 'skeleton_link_colors', None) - - # frame index offsets for inference, used in multi-frame inference setting - if args.use_multi_frames: - assert 'frame_indices' in pose_estimator.cfg.test_dataloader.dataset - indices = pose_estimator.cfg.test_dataloader.dataset[ - 'frame_indices_test'] - - pose_lifter = init_model( - args.pose_lifter_config, - args.pose_lifter_checkpoint, - device=args.device.lower()) - - assert isinstance(pose_lifter, PoseLifter), \ - 'Only "PoseLifter" model is supported for the 2nd stage ' \ - '(2D-to-3D lifting)' - pose_lift_dataset = pose_lifter.cfg.test_dataloader.dataset - - pose_lifter.cfg.visualizer.radius = args.radius - pose_lifter.cfg.visualizer.line_width = args.thickness - pose_lifter.cfg.visualizer.det_kpt_color = det_kpt_color - pose_lifter.cfg.visualizer.det_dataset_skeleton = det_dataset_skeleton - pose_lifter.cfg.visualizer.det_dataset_link_color = det_dataset_link_color - visualizer = VISUALIZERS.build(pose_lifter.cfg.visualizer) - - # the dataset_meta is loaded from the checkpoint - visualizer.set_dataset_meta(pose_lifter.dataset_meta) - - if args.input == 'webcam': - input_type = 'webcam' - else: - input_type = mimetypes.guess_type(args.input)[0].split('/')[0] - - if args.output_root == '': - save_output = False - else: - mmengine.mkdir_or_exist(args.output_root) - output_file = os.path.join(args.output_root, - os.path.basename(args.input)) - if args.input == 'webcam': - output_file += '.mp4' - save_output = True - - if args.save_predictions: - assert args.output_root != '' - args.pred_save_path = f'{args.output_root}/results_' \ - f'{os.path.splitext(os.path.basename(args.input))[0]}.json' - - if save_output: - fourcc = cv2.VideoWriter_fourcc(*'mp4v') - - pose_est_results_list = [] - pred_instances_list = [] - if input_type == 'image': - frame = mmcv.imread(args.input, channel_order='rgb') - - # First stage: 2D pose detection - bboxes = get_bbox(args, detector, frame) - pose_est_results, pose_est_results_converted, _ = get_pose_est_results( - args, pose_estimator, frame, bboxes, [], 0, pose_lift_dataset) - pose_est_results_list.append(pose_est_results_converted.copy()) - pred_3d_pred = get_pose_lift_results(args, visualizer, pose_lifter, - pose_est_results_list, frame, 0, - pose_est_results) - - if args.save_predictions: - # save prediction results - pred_instances_list = split_instances(pred_3d_pred) - - if save_output: - frame_vis = visualizer.get_image() - mmcv.imwrite(mmcv.rgb2bgr(frame_vis), output_file) - - elif input_type in ['webcam', 'video']: - next_id = 0 - pose_est_results_converted = [] - - if args.input == 'webcam': - video = cv2.VideoCapture(0) - else: - video = cv2.VideoCapture(args.input) - - (major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.') - if int(major_ver) < 3: - fps = video.get(cv2.cv.CV_CAP_PROP_FPS) - else: - fps = video.get(cv2.CAP_PROP_FPS) - - video_writer = None - frame_idx = 0 - - while video.isOpened(): - success, frame = video.read() - frame_idx += 1 - - if not success: - break - - pose_est_results_last = pose_est_results_converted - - # First stage: 2D pose detection - if args.use_multi_frames: - frames = collect_multi_frames(video, frame_idx, indices, - args.online) - - # make person results for current image - bboxes = get_bbox(args, detector, frame) - pose_est_results, pose_est_results_converted, next_id = get_pose_est_results( # noqa: E501 - args, pose_estimator, - frames if args.use_multi_frames else frame, bboxes, - pose_est_results_last, next_id, pose_lift_dataset) - pose_est_results_list.append(pose_est_results_converted.copy()) - - # Second stage: Pose lifting - pred_3d_pred = get_pose_lift_results(args, visualizer, pose_lifter, - pose_est_results_list, - mmcv.bgr2rgb(frame), - frame_idx, pose_est_results) - - if args.save_predictions: - # save prediction results - pred_instances_list.append( - dict( - frame_id=frame_idx, - instances=split_instances(pred_3d_pred))) - - if save_output: - frame_vis = visualizer.get_image() - if video_writer is None: - # the size of the image with visualization may vary - # depending on the presence of heatmaps - video_writer = cv2.VideoWriter(output_file, fourcc, fps, - (frame_vis.shape[1], - frame_vis.shape[0])) - - video_writer.write(mmcv.rgb2bgr(frame_vis)) - - # press ESC to exit - if cv2.waitKey(5) & 0xFF == 27: - break - time.sleep(args.show_interval) - - video.release() - - if video_writer: - video_writer.release() - else: - args.save_predictions = False - raise ValueError( - f'file {os.path.basename(args.input)} has invalid format.') - - if args.save_predictions: - with open(args.pred_save_path, 'w') as f: - json.dump( - dict( - meta_info=pose_lifter.dataset_meta, - instance_info=pred_instances_list), - f, - indent='\t') - print(f'predictions have been saved at {args.pred_save_path}') - - -if __name__ == '__main__': - main() +# Copyright (c) OpenMMLab. All rights reserved. +import mimetypes +import os +import time +from argparse import ArgumentParser +from functools import partial + +import cv2 +import json_tricks as json +import mmcv +import mmengine +import numpy as np +from mmengine.structures import InstanceData + +from mmpose.apis import (_track_by_iou, _track_by_oks, collect_multi_frames, + convert_keypoint_definition, extract_pose_sequence, + inference_pose_lifter_model, inference_topdown, + init_model) +from mmpose.models.pose_estimators import PoseLifter +from mmpose.models.pose_estimators.topdown import TopdownPoseEstimator +from mmpose.registry import VISUALIZERS +from mmpose.structures import (PoseDataSample, merge_data_samples, + split_instances) +from mmpose.utils import adapt_mmdet_pipeline + +try: + from mmdet.apis import inference_detector, init_detector + has_mmdet = True +except (ImportError, ModuleNotFoundError): + has_mmdet = False + + +def parse_args(): + parser = ArgumentParser() + parser.add_argument('det_config', help='Config file for detection') + parser.add_argument('det_checkpoint', help='Checkpoint file for detection') + parser.add_argument( + 'pose_estimator_config', + type=str, + default=None, + help='Config file for the 1st stage 2D pose estimator') + parser.add_argument( + 'pose_estimator_checkpoint', + type=str, + default=None, + help='Checkpoint file for the 1st stage 2D pose estimator') + parser.add_argument( + 'pose_lifter_config', + help='Config file for the 2nd stage pose lifter model') + parser.add_argument( + 'pose_lifter_checkpoint', + help='Checkpoint file for the 2nd stage pose lifter model') + parser.add_argument('--input', type=str, default='', help='Video path') + parser.add_argument( + '--show', + action='store_true', + default=False, + help='Whether to show visualizations') + parser.add_argument( + '--rebase-keypoint-height', + action='store_true', + help='Rebase the predicted 3D pose so its lowest keypoint has a ' + 'height of 0 (landing on the ground). This is useful for ' + 'visualization when the model do not predict the global position ' + 'of the 3D pose.') + parser.add_argument( + '--norm-pose-2d', + action='store_true', + help='Scale the bbox (along with the 2D pose) to the average bbox ' + 'scale of the dataset, and move the bbox (along with the 2D pose) to ' + 'the average bbox center of the dataset. This is useful when bbox ' + 'is small, especially in multi-person scenarios.') + parser.add_argument( + '--num-instances', + type=int, + default=-1, + help='The number of 3D poses to be visualized in every frame. If ' + 'less than 0, it will be set to the number of pose results in the ' + 'first frame.') + parser.add_argument( + '--output-root', + type=str, + default='', + help='Root of the output video file. ' + 'Default not saving the visualization video.') + parser.add_argument( + '--save-predictions', + action='store_true', + default=False, + help='whether to save predicted results') + parser.add_argument( + '--device', default='cuda:0', help='Device used for inference') + parser.add_argument( + '--det-cat-id', + type=int, + default=0, + help='Category id for bounding box detection model') + parser.add_argument( + '--bbox-thr', + type=float, + default=0.9, + help='Bounding box score threshold') + parser.add_argument('--kpt-thr', type=float, default=0.3) + parser.add_argument( + '--use-oks-tracking', action='store_true', help='Using OKS tracking') + parser.add_argument( + '--tracking-thr', type=float, default=0.3, help='Tracking threshold') + parser.add_argument( + '--show-interval', type=int, default=0, help='Sleep seconds per frame') + parser.add_argument( + '--thickness', + type=int, + default=1, + help='Link thickness for visualization') + parser.add_argument( + '--radius', + type=int, + default=3, + help='Keypoint radius for visualization') + parser.add_argument( + '--use-multi-frames', + action='store_true', + default=False, + help='whether to use multi frames for inference in the 2D pose' + 'detection stage. Default: False.') + + args = parser.parse_args() + return args + + +def get_area(results): + for i, data_sample in enumerate(results): + pred_instance = data_sample.pred_instances.cpu().numpy() + if 'bboxes' in pred_instance: + bboxes = pred_instance.bboxes + results[i].pred_instances.set_field( + np.array([(bbox[2] - bbox[0]) * (bbox[3] - bbox[1]) + for bbox in bboxes]), 'areas') + else: + keypoints = pred_instance.keypoints + areas, bboxes = [], [] + for keypoint in keypoints: + xmin = np.min(keypoint[:, 0][keypoint[:, 0] > 0], initial=1e10) + xmax = np.max(keypoint[:, 0]) + ymin = np.min(keypoint[:, 1][keypoint[:, 1] > 0], initial=1e10) + ymax = np.max(keypoint[:, 1]) + areas.append((xmax - xmin) * (ymax - ymin)) + bboxes.append([xmin, ymin, xmax, ymax]) + results[i].pred_instances.areas = np.array(areas) + results[i].pred_instances.bboxes = np.array(bboxes) + return results + + +def get_pose_est_results(args, pose_estimator, frame, bboxes, + pose_est_results_last, next_id, pose_lift_dataset): + pose_det_dataset = pose_estimator.cfg.test_dataloader.dataset + + # make person results for current image + pose_est_results = inference_topdown(pose_estimator, frame, bboxes) + + pose_est_results = get_area(pose_est_results) + if args.use_oks_tracking: + _track = partial(_track_by_oks) + else: + _track = _track_by_iou + + for i, result in enumerate(pose_est_results): + track_id, pose_est_results_last, match_result = _track( + result, pose_est_results_last, args.tracking_thr) + if track_id == -1: + pred_instances = result.pred_instances.cpu().numpy() + keypoints = pred_instances.keypoints + if np.count_nonzero(keypoints[:, :, 1]) >= 3: + pose_est_results[i].set_field(next_id, 'track_id') + next_id += 1 + else: + # If the number of keypoints detected is small, + # delete that person instance. + keypoints[:, :, 1] = -10 + pose_est_results[i].pred_instances.set_field( + keypoints, 'keypoints') + bboxes = pred_instances.bboxes * 0 + pose_est_results[i].pred_instances.set_field(bboxes, 'bboxes') + pose_est_results[i].set_field(-1, 'track_id') + pose_est_results[i].set_field(pred_instances, 'pred_instances') + else: + pose_est_results[i].set_field(track_id, 'track_id') + + del match_result + + pose_est_results_converted = [] + for pose_est_result in pose_est_results: + pose_est_result_converted = PoseDataSample() + gt_instances = InstanceData() + pred_instances = InstanceData() + for k in pose_est_result.gt_instances.keys(): + gt_instances.set_field(pose_est_result.gt_instances[k], k) + for k in pose_est_result.pred_instances.keys(): + pred_instances.set_field(pose_est_result.pred_instances[k], k) + pose_est_result_converted.gt_instances = gt_instances + pose_est_result_converted.pred_instances = pred_instances + pose_est_result_converted.track_id = pose_est_result.track_id + + keypoints = convert_keypoint_definition(pred_instances.keypoints, + pose_det_dataset['type'], + pose_lift_dataset['type']) + pose_est_result_converted.pred_instances.keypoints = keypoints + pose_est_results_converted.append(pose_est_result_converted) + return pose_est_results, pose_est_results_converted, next_id + + +def get_pose_lift_results(args, visualizer, pose_lifter, pose_est_results_list, + frame, frame_idx, pose_est_results): + pose_lift_dataset = pose_lifter.cfg.test_dataloader.dataset + # extract and pad input pose2d sequence + pose_seq_2d = extract_pose_sequence( + pose_est_results_list, + frame_idx=frame_idx, + causal=pose_lift_dataset.get('causal', False), + seq_len=pose_lift_dataset.get('seq_len', 1), + step=pose_lift_dataset.get('seq_step', 1)) + + # 2D-to-3D pose lifting + width, height = frame.shape[:2] + pose_lift_results = inference_pose_lifter_model( + pose_lifter, + pose_seq_2d, + image_size=(width, height), + norm_pose_2d=args.norm_pose_2d) + + # Pose processing + for idx, pose_lift_res in enumerate(pose_lift_results): + pose_lift_res.track_id = pose_est_results[idx].get('track_id', 1e4) + + pred_instances = pose_lift_res.pred_instances + keypoints = pred_instances.keypoints + keypoint_scores = pred_instances.keypoint_scores + if keypoint_scores.ndim == 3: + keypoint_scores = np.squeeze(keypoint_scores, axis=1) + pose_lift_results[ + idx].pred_instances.keypoint_scores = keypoint_scores + if keypoints.ndim == 4: + keypoints = np.squeeze(keypoints, axis=1) + + keypoints = keypoints[..., [0, 2, 1]] + keypoints[..., 0] = -keypoints[..., 0] + keypoints[..., 2] = -keypoints[..., 2] + + # rebase height (z-axis) + if args.rebase_keypoint_height: + keypoints[..., 2] -= np.min( + keypoints[..., 2], axis=-1, keepdims=True) + + pose_lift_results[idx].pred_instances.keypoints = keypoints + + pose_lift_results = sorted( + pose_lift_results, key=lambda x: x.get('track_id', 1e4)) + + pred_3d_data_samples = merge_data_samples(pose_lift_results) + det_data_sample = merge_data_samples(pose_est_results) + + if args.num_instances < 0: + args.num_instances = len(pose_lift_results) + + # Visualization + if visualizer is not None: + visualizer.add_datasample( + 'result', + frame, + data_sample=pred_3d_data_samples, + det_data_sample=det_data_sample, + draw_gt=False, + show=args.show, + draw_bbox=True, + kpt_thr=args.kpt_thr, + num_instances=args.num_instances, + wait_time=args.show_interval) + + return pred_3d_data_samples.get('pred_instances', None) + + +def get_bbox(args, detector, frame): + det_result = inference_detector(detector, frame) + pred_instance = det_result.pred_instances.cpu().numpy() + + bboxes = pred_instance.bboxes + bboxes = bboxes[np.logical_and(pred_instance.labels == args.det_cat_id, + pred_instance.scores > args.bbox_thr)] + return bboxes + + +def main(): + assert has_mmdet, 'Please install mmdet to run the demo.' + + args = parse_args() + + assert args.show or (args.output_root != '') + assert args.input != '' + assert args.det_config is not None + assert args.det_checkpoint is not None + + detector = init_detector( + args.det_config, args.det_checkpoint, device=args.device.lower()) + detector.cfg = adapt_mmdet_pipeline(detector.cfg) + + pose_estimator = init_model( + args.pose_estimator_config, + args.pose_estimator_checkpoint, + device=args.device.lower()) + + assert isinstance(pose_estimator, TopdownPoseEstimator), 'Only "TopDown"' \ + 'model is supported for the 1st stage (2D pose detection)' + + det_kpt_color = pose_estimator.dataset_meta.get('keypoint_colors', None) + det_dataset_skeleton = pose_estimator.dataset_meta.get( + 'skeleton_links', None) + det_dataset_link_color = pose_estimator.dataset_meta.get( + 'skeleton_link_colors', None) + + # frame index offsets for inference, used in multi-frame inference setting + if args.use_multi_frames: + assert 'frame_indices' in pose_estimator.cfg.test_dataloader.dataset + indices = pose_estimator.cfg.test_dataloader.dataset[ + 'frame_indices_test'] + + pose_lifter = init_model( + args.pose_lifter_config, + args.pose_lifter_checkpoint, + device=args.device.lower()) + + assert isinstance(pose_lifter, PoseLifter), \ + 'Only "PoseLifter" model is supported for the 2nd stage ' \ + '(2D-to-3D lifting)' + pose_lift_dataset = pose_lifter.cfg.test_dataloader.dataset + + pose_lifter.cfg.visualizer.radius = args.radius + pose_lifter.cfg.visualizer.line_width = args.thickness + pose_lifter.cfg.visualizer.det_kpt_color = det_kpt_color + pose_lifter.cfg.visualizer.det_dataset_skeleton = det_dataset_skeleton + pose_lifter.cfg.visualizer.det_dataset_link_color = det_dataset_link_color + visualizer = VISUALIZERS.build(pose_lifter.cfg.visualizer) + + # the dataset_meta is loaded from the checkpoint + visualizer.set_dataset_meta(pose_lifter.dataset_meta) + + if args.input == 'webcam': + input_type = 'webcam' + else: + input_type = mimetypes.guess_type(args.input)[0].split('/')[0] + + if args.output_root == '': + save_output = False + else: + mmengine.mkdir_or_exist(args.output_root) + output_file = os.path.join(args.output_root, + os.path.basename(args.input)) + if args.input == 'webcam': + output_file += '.mp4' + save_output = True + + if args.save_predictions: + assert args.output_root != '' + args.pred_save_path = f'{args.output_root}/results_' \ + f'{os.path.splitext(os.path.basename(args.input))[0]}.json' + + if save_output: + fourcc = cv2.VideoWriter_fourcc(*'mp4v') + + pose_est_results_list = [] + pred_instances_list = [] + if input_type == 'image': + frame = mmcv.imread(args.input, channel_order='rgb') + + # First stage: 2D pose detection + bboxes = get_bbox(args, detector, frame) + pose_est_results, pose_est_results_converted, _ = get_pose_est_results( + args, pose_estimator, frame, bboxes, [], 0, pose_lift_dataset) + pose_est_results_list.append(pose_est_results_converted.copy()) + pred_3d_pred = get_pose_lift_results(args, visualizer, pose_lifter, + pose_est_results_list, frame, 0, + pose_est_results) + + if args.save_predictions: + # save prediction results + pred_instances_list = split_instances(pred_3d_pred) + + if save_output: + frame_vis = visualizer.get_image() + mmcv.imwrite(mmcv.rgb2bgr(frame_vis), output_file) + + elif input_type in ['webcam', 'video']: + next_id = 0 + pose_est_results_converted = [] + + if args.input == 'webcam': + video = cv2.VideoCapture(0) + else: + video = cv2.VideoCapture(args.input) + + (major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.') + if int(major_ver) < 3: + fps = video.get(cv2.cv.CV_CAP_PROP_FPS) + else: + fps = video.get(cv2.CAP_PROP_FPS) + + video_writer = None + frame_idx = 0 + + while video.isOpened(): + success, frame = video.read() + frame_idx += 1 + + if not success: + break + + pose_est_results_last = pose_est_results_converted + + # First stage: 2D pose detection + if args.use_multi_frames: + frames = collect_multi_frames(video, frame_idx, indices, + args.online) + + # make person results for current image + bboxes = get_bbox(args, detector, frame) + pose_est_results, pose_est_results_converted, next_id = get_pose_est_results( # noqa: E501 + args, pose_estimator, + frames if args.use_multi_frames else frame, bboxes, + pose_est_results_last, next_id, pose_lift_dataset) + pose_est_results_list.append(pose_est_results_converted.copy()) + + # Second stage: Pose lifting + pred_3d_pred = get_pose_lift_results(args, visualizer, pose_lifter, + pose_est_results_list, + mmcv.bgr2rgb(frame), + frame_idx, pose_est_results) + + if args.save_predictions: + # save prediction results + pred_instances_list.append( + dict( + frame_id=frame_idx, + instances=split_instances(pred_3d_pred))) + + if save_output: + frame_vis = visualizer.get_image() + if video_writer is None: + # the size of the image with visualization may vary + # depending on the presence of heatmaps + video_writer = cv2.VideoWriter(output_file, fourcc, fps, + (frame_vis.shape[1], + frame_vis.shape[0])) + + video_writer.write(mmcv.rgb2bgr(frame_vis)) + + # press ESC to exit + if cv2.waitKey(5) & 0xFF == 27: + break + time.sleep(args.show_interval) + + video.release() + + if video_writer: + video_writer.release() + else: + args.save_predictions = False + raise ValueError( + f'file {os.path.basename(args.input)} has invalid format.') + + if args.save_predictions: + with open(args.pred_save_path, 'w') as f: + json.dump( + dict( + meta_info=pose_lifter.dataset_meta, + instance_info=pred_instances_list), + f, + indent='\t') + print(f'predictions have been saved at {args.pred_save_path}') + + +if __name__ == '__main__': + main() diff --git a/demo/bottomup_demo.py b/demo/bottomup_demo.py index 3d6fee7a03..057419ec63 100644 --- a/demo/bottomup_demo.py +++ b/demo/bottomup_demo.py @@ -1,227 +1,227 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import mimetypes -import os -import time -from argparse import ArgumentParser - -import cv2 -import json_tricks as json -import mmcv -import mmengine -import numpy as np - -from mmpose.apis import inference_bottomup, init_model -from mmpose.registry import VISUALIZERS -from mmpose.structures import split_instances - - -def process_one_image(args, - img, - pose_estimator, - visualizer=None, - show_interval=0): - """Visualize predicted keypoints (and heatmaps) of one image.""" - - # inference a single image - batch_results = inference_bottomup(pose_estimator, img) - results = batch_results[0] - - # show the results - if isinstance(img, str): - img = mmcv.imread(img, channel_order='rgb') - elif isinstance(img, np.ndarray): - img = mmcv.bgr2rgb(img) - - if visualizer is not None: - visualizer.add_datasample( - 'result', - img, - data_sample=results, - draw_gt=False, - draw_bbox=False, - draw_heatmap=args.draw_heatmap, - show_kpt_idx=args.show_kpt_idx, - show=args.show, - wait_time=show_interval, - kpt_thr=args.kpt_thr) - - return results.pred_instances - - -def parse_args(): - parser = ArgumentParser() - parser.add_argument('config', help='Config file') - parser.add_argument('checkpoint', help='Checkpoint file') - parser.add_argument( - '--input', type=str, default='', help='Image/Video file') - parser.add_argument( - '--show', - action='store_true', - default=False, - help='whether to show img') - parser.add_argument( - '--output-root', - type=str, - default='', - help='root of the output img file. ' - 'Default not saving the visualization images.') - parser.add_argument( - '--save-predictions', - action='store_true', - default=False, - help='whether to save predicted results') - parser.add_argument( - '--device', default='cuda:0', help='Device used for inference') - parser.add_argument( - '--draw-heatmap', - action='store_true', - help='Visualize the predicted heatmap') - parser.add_argument( - '--show-kpt-idx', - action='store_true', - default=False, - help='Whether to show the index of keypoints') - parser.add_argument( - '--kpt-thr', type=float, default=0.3, help='Keypoint score threshold') - parser.add_argument( - '--radius', - type=int, - default=3, - help='Keypoint radius for visualization') - parser.add_argument( - '--thickness', - type=int, - default=1, - help='Link thickness for visualization') - parser.add_argument( - '--show-interval', type=int, default=0, help='Sleep seconds per frame') - args = parser.parse_args() - return args - - -def main(): - args = parse_args() - assert args.show or (args.output_root != '') - assert args.input != '' - - output_file = None - if args.output_root: - mmengine.mkdir_or_exist(args.output_root) - output_file = os.path.join(args.output_root, - os.path.basename(args.input)) - if args.input == 'webcam': - output_file += '.mp4' - - if args.save_predictions: - assert args.output_root != '' - args.pred_save_path = f'{args.output_root}/results_' \ - f'{os.path.splitext(os.path.basename(args.input))[0]}.json' - - # build the model from a config file and a checkpoint file - if args.draw_heatmap: - cfg_options = dict(model=dict(test_cfg=dict(output_heatmaps=True))) - else: - cfg_options = None - - model = init_model( - args.config, - args.checkpoint, - device=args.device, - cfg_options=cfg_options) - - # build visualizer - model.cfg.visualizer.radius = args.radius - model.cfg.visualizer.line_width = args.thickness - visualizer = VISUALIZERS.build(model.cfg.visualizer) - visualizer.set_dataset_meta(model.dataset_meta) - - if args.input == 'webcam': - input_type = 'webcam' - else: - input_type = mimetypes.guess_type(args.input)[0].split('/')[0] - - if input_type == 'image': - # inference - pred_instances = process_one_image( - args, args.input, model, visualizer, show_interval=0) - - if args.save_predictions: - pred_instances_list = split_instances(pred_instances) - - if output_file: - img_vis = visualizer.get_image() - mmcv.imwrite(mmcv.rgb2bgr(img_vis), output_file) - - elif input_type in ['webcam', 'video']: - - if args.input == 'webcam': - cap = cv2.VideoCapture(0) - else: - cap = cv2.VideoCapture(args.input) - - video_writer = None - pred_instances_list = [] - frame_idx = 0 - - while cap.isOpened(): - success, frame = cap.read() - frame_idx += 1 - - if not success: - break - - pred_instances = process_one_image(args, frame, model, visualizer, - 0.001) - - if args.save_predictions: - # save prediction results - pred_instances_list.append( - dict( - frame_id=frame_idx, - instances=split_instances(pred_instances))) - - # output videos - if output_file: - frame_vis = visualizer.get_image() - - if video_writer is None: - fourcc = cv2.VideoWriter_fourcc(*'mp4v') - # the size of the image with visualization may vary - # depending on the presence of heatmaps - video_writer = cv2.VideoWriter( - output_file, - fourcc, - 25, # saved fps - (frame_vis.shape[1], frame_vis.shape[0])) - - video_writer.write(mmcv.rgb2bgr(frame_vis)) - - # press ESC to exit - if cv2.waitKey(5) & 0xFF == 27: - break - - time.sleep(args.show_interval) - - if video_writer: - video_writer.release() - - cap.release() - - else: - args.save_predictions = False - raise ValueError( - f'file {os.path.basename(args.input)} has invalid format.') - - if args.save_predictions: - with open(args.pred_save_path, 'w') as f: - json.dump( - dict( - meta_info=model.dataset_meta, - instance_info=pred_instances_list), - f, - indent='\t') - print(f'predictions have been saved at {args.pred_save_path}') - - -if __name__ == '__main__': - main() +# Copyright (c) OpenMMLab. All rights reserved. +import mimetypes +import os +import time +from argparse import ArgumentParser + +import cv2 +import json_tricks as json +import mmcv +import mmengine +import numpy as np + +from mmpose.apis import inference_bottomup, init_model +from mmpose.registry import VISUALIZERS +from mmpose.structures import split_instances + + +def process_one_image(args, + img, + pose_estimator, + visualizer=None, + show_interval=0): + """Visualize predicted keypoints (and heatmaps) of one image.""" + + # inference a single image + batch_results = inference_bottomup(pose_estimator, img) + results = batch_results[0] + + # show the results + if isinstance(img, str): + img = mmcv.imread(img, channel_order='rgb') + elif isinstance(img, np.ndarray): + img = mmcv.bgr2rgb(img) + + if visualizer is not None: + visualizer.add_datasample( + 'result', + img, + data_sample=results, + draw_gt=False, + draw_bbox=False, + draw_heatmap=args.draw_heatmap, + show_kpt_idx=args.show_kpt_idx, + show=args.show, + wait_time=show_interval, + kpt_thr=args.kpt_thr) + + return results.pred_instances + + +def parse_args(): + parser = ArgumentParser() + parser.add_argument('config', help='Config file') + parser.add_argument('checkpoint', help='Checkpoint file') + parser.add_argument( + '--input', type=str, default='', help='Image/Video file') + parser.add_argument( + '--show', + action='store_true', + default=False, + help='whether to show img') + parser.add_argument( + '--output-root', + type=str, + default='', + help='root of the output img file. ' + 'Default not saving the visualization images.') + parser.add_argument( + '--save-predictions', + action='store_true', + default=False, + help='whether to save predicted results') + parser.add_argument( + '--device', default='cuda:0', help='Device used for inference') + parser.add_argument( + '--draw-heatmap', + action='store_true', + help='Visualize the predicted heatmap') + parser.add_argument( + '--show-kpt-idx', + action='store_true', + default=False, + help='Whether to show the index of keypoints') + parser.add_argument( + '--kpt-thr', type=float, default=0.3, help='Keypoint score threshold') + parser.add_argument( + '--radius', + type=int, + default=3, + help='Keypoint radius for visualization') + parser.add_argument( + '--thickness', + type=int, + default=1, + help='Link thickness for visualization') + parser.add_argument( + '--show-interval', type=int, default=0, help='Sleep seconds per frame') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + assert args.show or (args.output_root != '') + assert args.input != '' + + output_file = None + if args.output_root: + mmengine.mkdir_or_exist(args.output_root) + output_file = os.path.join(args.output_root, + os.path.basename(args.input)) + if args.input == 'webcam': + output_file += '.mp4' + + if args.save_predictions: + assert args.output_root != '' + args.pred_save_path = f'{args.output_root}/results_' \ + f'{os.path.splitext(os.path.basename(args.input))[0]}.json' + + # build the model from a config file and a checkpoint file + if args.draw_heatmap: + cfg_options = dict(model=dict(test_cfg=dict(output_heatmaps=True))) + else: + cfg_options = None + + model = init_model( + args.config, + args.checkpoint, + device=args.device, + cfg_options=cfg_options) + + # build visualizer + model.cfg.visualizer.radius = args.radius + model.cfg.visualizer.line_width = args.thickness + visualizer = VISUALIZERS.build(model.cfg.visualizer) + visualizer.set_dataset_meta(model.dataset_meta) + + if args.input == 'webcam': + input_type = 'webcam' + else: + input_type = mimetypes.guess_type(args.input)[0].split('/')[0] + + if input_type == 'image': + # inference + pred_instances = process_one_image( + args, args.input, model, visualizer, show_interval=0) + + if args.save_predictions: + pred_instances_list = split_instances(pred_instances) + + if output_file: + img_vis = visualizer.get_image() + mmcv.imwrite(mmcv.rgb2bgr(img_vis), output_file) + + elif input_type in ['webcam', 'video']: + + if args.input == 'webcam': + cap = cv2.VideoCapture(0) + else: + cap = cv2.VideoCapture(args.input) + + video_writer = None + pred_instances_list = [] + frame_idx = 0 + + while cap.isOpened(): + success, frame = cap.read() + frame_idx += 1 + + if not success: + break + + pred_instances = process_one_image(args, frame, model, visualizer, + 0.001) + + if args.save_predictions: + # save prediction results + pred_instances_list.append( + dict( + frame_id=frame_idx, + instances=split_instances(pred_instances))) + + # output videos + if output_file: + frame_vis = visualizer.get_image() + + if video_writer is None: + fourcc = cv2.VideoWriter_fourcc(*'mp4v') + # the size of the image with visualization may vary + # depending on the presence of heatmaps + video_writer = cv2.VideoWriter( + output_file, + fourcc, + 25, # saved fps + (frame_vis.shape[1], frame_vis.shape[0])) + + video_writer.write(mmcv.rgb2bgr(frame_vis)) + + # press ESC to exit + if cv2.waitKey(5) & 0xFF == 27: + break + + time.sleep(args.show_interval) + + if video_writer: + video_writer.release() + + cap.release() + + else: + args.save_predictions = False + raise ValueError( + f'file {os.path.basename(args.input)} has invalid format.') + + if args.save_predictions: + with open(args.pred_save_path, 'w') as f: + json.dump( + dict( + meta_info=model.dataset_meta, + instance_info=pred_instances_list), + f, + indent='\t') + print(f'predictions have been saved at {args.pred_save_path}') + + +if __name__ == '__main__': + main() diff --git a/demo/docs/en/2d_animal_demo.md b/demo/docs/en/2d_animal_demo.md index aa9970395b..5114f44fe2 100644 --- a/demo/docs/en/2d_animal_demo.md +++ b/demo/docs/en/2d_animal_demo.md @@ -1,120 +1,120 @@ -## 2D Animal Pose Demo - -We provide a demo script to test a single image or video with top-down pose estimators and animal detectors. Assume that you have already installed [mmdet](https://github.com/open-mmlab/mmdetection) with version >= 3.0. - -### 2D Animal Pose Image Demo - -```shell -python demo/topdown_demo_with_mmdet.py \ - ${MMDET_CONFIG_FILE} ${MMDET_CHECKPOINT_FILE} \ - ${MMPOSE_CONFIG_FILE} ${MMPOSE_CHECKPOINT_FILE} \ - --input ${INPUT_PATH} --det-cat-id ${DET_CAT_ID} \ - [--show] [--output-root ${OUTPUT_DIR}] [--save-predictions] \ - [--draw-heatmap ${DRAW_HEATMAP}] [--radius ${KPT_RADIUS}] \ - [--kpt-thr ${KPT_SCORE_THR}] [--bbox-thr ${BBOX_SCORE_THR}] \ - [--device ${GPU_ID or CPU}] -``` - -The pre-trained animal pose estimation model can be found from [model zoo](https://mmpose.readthedocs.io/en/latest/model_zoo/animal_2d_keypoint.html). -Take [animalpose model](https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_animalpose_256x256-1aa7f075_20210426.pth) as an example: - -```shell -python demo/topdown_demo_with_mmdet.py \ - demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py \ - https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ - configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py \ - https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_animalpose_256x256-1aa7f075_20210426.pth \ - --input tests/data/animalpose/ca110.jpeg \ - --show --draw-heatmap --det-cat-id=15 -``` - -Visualization result: - -
- -If you use a heatmap-based model and set argument `--draw-heatmap`, the predicted heatmap will be visualized together with the keypoints. - -The augement `--det-cat-id=15` selected detected bounding boxes with label 'cat'. 15 is the index of category 'cat' in COCO dataset, on which the detection model is trained. - -**COCO-animals** -In COCO dataset, there are 80 object categories, including 10 common `animal` categories (14: 'bird', 15: 'cat', 16: 'dog', 17: 'horse', 18: 'sheep', 19: 'cow', 20: 'elephant', 21: 'bear', 22: 'zebra', 23: 'giraffe'). - -For other animals, we have also provided some pre-trained animal detection models (1-class models). Supported models can be found in [detection model zoo](/demo/docs/en/mmdet_modelzoo.md). - -To save visualized results on disk: - -```shell -python demo/topdown_demo_with_mmdet.py \ - demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py \ - https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ - configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py \ - https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_animalpose_256x256-1aa7f075_20210426.pth \ - --input tests/data/animalpose/ca110.jpeg \ - --output-root vis_results --draw-heatmap --det-cat-id=15 -``` - -To save predicted results on disk: - -```shell -python demo/topdown_demo_with_mmdet.py \ - demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py \ - https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ - configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py \ - https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_animalpose_256x256-1aa7f075_20210426.pth \ - --input tests/data/animalpose/ca110.jpeg \ - --output-root vis_results --save-predictions --draw-heatmap --det-cat-id=15 -``` - -To run demos on CPU: - -```shell -python demo/topdown_demo_with_mmdet.py \ - demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py \ - https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ - configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py \ - https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_animalpose_256x256-1aa7f075_20210426.pth \ - --input tests/data/animalpose/ca110.jpeg \ - --show --draw-heatmap --det-cat-id=15 --device cpu -``` - -### 2D Animal Pose Video Demo - -Videos share the same interface with images. The difference is that the `${INPUT_PATH}` for videos can be the local path or **URL** link to video file. - -For example, - -```shell -python demo/topdown_demo_with_mmdet.py \ - demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py \ - https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ - configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py \ - https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_animalpose_256x256-1aa7f075_20210426.pth \ - --input demo/resources/ \ - --output-root vis_results --draw-heatmap --det-cat-id=16 -``` - -
- -The original video can be downloaded from [Google Drive](https://drive.google.com/file/d/18d8K3wuUpKiDFHvOx0mh1TEwYwpOc5UO/view?usp=sharing). - -### 2D Animal Pose Demo with Inferencer - -The Inferencer provides a convenient interface for inference, allowing customization using model aliases instead of configuration files and checkpoint paths. It supports various input formats, including image paths, video paths, image folder paths, and webcams. Below is an example command: - -```shell -python demo/inferencer_demo.py tests/data/ap10k \ - --pose2d animal --vis-out-dir vis_results/ap10k -``` - -This command infers all images located in `tests/data/ap10k` and saves the visualization results in the `vis_results/ap10k` directory. - -Image 1 Image 2 - -In addition, the Inferencer supports saving predicted poses. For more information, please refer to the [inferencer document](https://mmpose.readthedocs.io/en/dev-1.x/user_guides/inference.html#inferencer-a-unified-inference-interface). - -### Speed Up Inference - -Some tips to speed up MMPose inference: - -1. set `model.test_cfg.flip_test=False` in [animalpose_hrnet-w32](../../configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py#85). -2. use faster human bounding box detector, see [MMDetection](https://mmdetection.readthedocs.io/en/3.x/model_zoo.html). +## 2D Animal Pose Demo + +We provide a demo script to test a single image or video with top-down pose estimators and animal detectors. Assume that you have already installed [mmdet](https://github.com/open-mmlab/mmdetection) with version >= 3.0. + +### 2D Animal Pose Image Demo + +```shell +python demo/topdown_demo_with_mmdet.py \ + ${MMDET_CONFIG_FILE} ${MMDET_CHECKPOINT_FILE} \ + ${MMPOSE_CONFIG_FILE} ${MMPOSE_CHECKPOINT_FILE} \ + --input ${INPUT_PATH} --det-cat-id ${DET_CAT_ID} \ + [--show] [--output-root ${OUTPUT_DIR}] [--save-predictions] \ + [--draw-heatmap ${DRAW_HEATMAP}] [--radius ${KPT_RADIUS}] \ + [--kpt-thr ${KPT_SCORE_THR}] [--bbox-thr ${BBOX_SCORE_THR}] \ + [--device ${GPU_ID or CPU}] +``` + +The pre-trained animal pose estimation model can be found from [model zoo](https://mmpose.readthedocs.io/en/latest/model_zoo/animal_2d_keypoint.html). +Take [animalpose model](https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_animalpose_256x256-1aa7f075_20210426.pth) as an example: + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py \ + https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ + configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py \ + https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_animalpose_256x256-1aa7f075_20210426.pth \ + --input tests/data/animalpose/ca110.jpeg \ + --show --draw-heatmap --det-cat-id=15 +``` + +Visualization result: + +
+ +If you use a heatmap-based model and set argument `--draw-heatmap`, the predicted heatmap will be visualized together with the keypoints. + +The augement `--det-cat-id=15` selected detected bounding boxes with label 'cat'. 15 is the index of category 'cat' in COCO dataset, on which the detection model is trained. + +**COCO-animals** +In COCO dataset, there are 80 object categories, including 10 common `animal` categories (14: 'bird', 15: 'cat', 16: 'dog', 17: 'horse', 18: 'sheep', 19: 'cow', 20: 'elephant', 21: 'bear', 22: 'zebra', 23: 'giraffe'). + +For other animals, we have also provided some pre-trained animal detection models (1-class models). Supported models can be found in [detection model zoo](/demo/docs/en/mmdet_modelzoo.md). + +To save visualized results on disk: + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py \ + https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ + configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py \ + https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_animalpose_256x256-1aa7f075_20210426.pth \ + --input tests/data/animalpose/ca110.jpeg \ + --output-root vis_results --draw-heatmap --det-cat-id=15 +``` + +To save predicted results on disk: + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py \ + https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ + configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py \ + https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_animalpose_256x256-1aa7f075_20210426.pth \ + --input tests/data/animalpose/ca110.jpeg \ + --output-root vis_results --save-predictions --draw-heatmap --det-cat-id=15 +``` + +To run demos on CPU: + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py \ + https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ + configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py \ + https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_animalpose_256x256-1aa7f075_20210426.pth \ + --input tests/data/animalpose/ca110.jpeg \ + --show --draw-heatmap --det-cat-id=15 --device cpu +``` + +### 2D Animal Pose Video Demo + +Videos share the same interface with images. The difference is that the `${INPUT_PATH}` for videos can be the local path or **URL** link to video file. + +For example, + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py \ + https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ + configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py \ + https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_animalpose_256x256-1aa7f075_20210426.pth \ + --input demo/resources/ \ + --output-root vis_results --draw-heatmap --det-cat-id=16 +``` + +
+ +The original video can be downloaded from [Google Drive](https://drive.google.com/file/d/18d8K3wuUpKiDFHvOx0mh1TEwYwpOc5UO/view?usp=sharing). + +### 2D Animal Pose Demo with Inferencer + +The Inferencer provides a convenient interface for inference, allowing customization using model aliases instead of configuration files and checkpoint paths. It supports various input formats, including image paths, video paths, image folder paths, and webcams. Below is an example command: + +```shell +python demo/inferencer_demo.py tests/data/ap10k \ + --pose2d animal --vis-out-dir vis_results/ap10k +``` + +This command infers all images located in `tests/data/ap10k` and saves the visualization results in the `vis_results/ap10k` directory. + +Image 1 Image 2 + +In addition, the Inferencer supports saving predicted poses. For more information, please refer to the [inferencer document](https://mmpose.readthedocs.io/en/dev-1.x/user_guides/inference.html#inferencer-a-unified-inference-interface). + +### Speed Up Inference + +Some tips to speed up MMPose inference: + +1. set `model.test_cfg.flip_test=False` in [animalpose_hrnet-w32](../../configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py#85). +2. use faster human bounding box detector, see [MMDetection](https://mmdetection.readthedocs.io/en/3.x/model_zoo.html). diff --git a/demo/docs/en/2d_face_demo.md b/demo/docs/en/2d_face_demo.md index 9c60f68487..8a43a91da1 100644 --- a/demo/docs/en/2d_face_demo.md +++ b/demo/docs/en/2d_face_demo.md @@ -1,101 +1,101 @@ -## 2D Face Keypoint Demo - -We provide a demo script to test a single image or video with face detectors and top-down pose estimators. Assume that you have already installed [mmdet](https://github.com/open-mmlab/mmdetection) with version >= 3.0. - -**Face Bounding Box Model Preparation:** The pre-trained face box estimation model can be found in [mmdet model zoo](/demo/docs/en/mmdet_modelzoo.md#face-bounding-box-detection-models). - -### 2D Face Image Demo - -```shell -python demo/topdown_demo_with_mmdet.py \ - ${MMDET_CONFIG_FILE} ${MMDET_CHECKPOINT_FILE} \ - ${MMPOSE_CONFIG_FILE} ${MMPOSE_CHECKPOINT_FILE} \ - --input ${INPUT_PATH} [--output-root ${OUTPUT_DIR}] \ - [--show] [--device ${GPU_ID or CPU}] [--save-predictions] \ - [--draw-heatmap ${DRAW_HEATMAP}] [--radius ${KPT_RADIUS}] \ - [--kpt-thr ${KPT_SCORE_THR}] [--bbox-thr ${BBOX_SCORE_THR}] -``` - -The pre-trained face keypoint estimation models can be found from [model zoo](https://mmpose.readthedocs.io/en/latest/model_zoo/face_2d_keypoint.html). -Take [aflw model](https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_aflw_256x256-f2bbc62b_20210125.pth) as an example: - -```shell -python demo/topdown_demo_with_mmdet.py \ - demo/mmdetection_cfg/yolox-s_8xb8-300e_coco-face.py \ - https://download.openmmlab.com/mmpose/mmdet_pretrained/yolo-x_8xb8-300e_coco-face_13274d7c.pth \ - configs/face_2d_keypoint/topdown_heatmap/aflw/td-hm_hrnetv2-w18_8xb64-60e_aflw-256x256.py \ - https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_aflw_256x256-f2bbc62b_20210125.pth \ - --input tests/data/cofw/001766.jpg \ - --show --draw-heatmap -``` - -Visualization result: - -
- -If you use a heatmap-based model and set argument `--draw-heatmap`, the predicted heatmap will be visualized together with the keypoints. - -To save visualized results on disk: - -```shell -python demo/topdown_demo_with_mmdet.py \ - demo/mmdetection_cfg/yolox-s_8xb8-300e_coco-face.py \ - https://download.openmmlab.com/mmpose/mmdet_pretrained/yolo-x_8xb8-300e_coco-face_13274d7c.pth \ - configs/face_2d_keypoint/topdown_heatmap/aflw/td-hm_hrnetv2-w18_8xb64-60e_aflw-256x256.py \ - https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_aflw_256x256-f2bbc62b_20210125.pth \ - --input tests/data/cofw/001766.jpg \ - --draw-heatmap --output-root vis_results -``` - -To save the predicted results on disk, please specify `--save-predictions`. - -To run demos on CPU: - -```shell -python demo/topdown_demo_with_mmdet.py \ - demo/mmdetection_cfg/yolox-s_8xb8-300e_coco-face.py \ - https://download.openmmlab.com/mmpose/mmdet_pretrained/yolo-x_8xb8-300e_coco-face_13274d7c.pth \ - configs/face_2d_keypoint/topdown_heatmap/aflw/td-hm_hrnetv2-w18_8xb64-60e_aflw-256x256.py \ - https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_aflw_256x256-f2bbc62b_20210125.pth \ - --input tests/data/cofw/001766.jpg \ - --show --draw-heatmap --device=cpu -``` - -### 2D Face Video Demo - -Videos share the same interface with images. The difference is that the `${INPUT_PATH}` for videos can be the local path or **URL** link to video file. - -```shell -python demo/topdown_demo_with_mmdet.py \ - demo/mmdetection_cfg/yolox-s_8xb8-300e_coco-face.py \ - https://download.openmmlab.com/mmpose/mmdet_pretrained/yolo-x_8xb8-300e_coco-face_13274d7c.pth \ - configs/face_2d_keypoint/topdown_heatmap/aflw/td-hm_hrnetv2-w18_8xb64-60e_aflw-256x256.py \ - https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_aflw_256x256-f2bbc62b_20210125.pth \ - --input demo/resources/ \ - --show --draw-heatmap --output-root vis_results -``` - -
- -The original video can be downloaded from [Google Drive](https://drive.google.com/file/d/1kQt80t6w802b_vgVcmiV_QfcSJ3RWzmb/view?usp=sharing). - -### 2D Face Pose Demo with Inferencer - -The Inferencer provides a convenient interface for inference, allowing customization using model aliases instead of configuration files and checkpoint paths. It supports various input formats, including image paths, video paths, image folder paths, and webcams. Below is an example command: - -```shell -python demo/inferencer_demo.py tests/data/wflw \ - --pose2d face --vis-out-dir vis_results/wflw --radius 1 -``` - -This command infers all images located in `tests/data/wflw` and saves the visualization results in the `vis_results/wflw` directory. - -Image 1 - -Image 2 - -In addition, the Inferencer supports saving predicted poses. For more information, please refer to the [inferencer document](https://mmpose.readthedocs.io/en/dev-1.x/user_guides/inference.html#inferencer-a-unified-inference-interface). - -### Speed Up Inference - -For 2D face keypoint estimation models, try to edit the config file. For example, set `model.test_cfg.flip_test=False` in line 90 of [aflw_hrnetv2](../../../configs/face_2d_keypoint/topdown_heatmap/aflw/td-hm_hrnetv2-w18_8xb64-60e_aflw-256x256.py). +## 2D Face Keypoint Demo + +We provide a demo script to test a single image or video with face detectors and top-down pose estimators. Assume that you have already installed [mmdet](https://github.com/open-mmlab/mmdetection) with version >= 3.0. + +**Face Bounding Box Model Preparation:** The pre-trained face box estimation model can be found in [mmdet model zoo](/demo/docs/en/mmdet_modelzoo.md#face-bounding-box-detection-models). + +### 2D Face Image Demo + +```shell +python demo/topdown_demo_with_mmdet.py \ + ${MMDET_CONFIG_FILE} ${MMDET_CHECKPOINT_FILE} \ + ${MMPOSE_CONFIG_FILE} ${MMPOSE_CHECKPOINT_FILE} \ + --input ${INPUT_PATH} [--output-root ${OUTPUT_DIR}] \ + [--show] [--device ${GPU_ID or CPU}] [--save-predictions] \ + [--draw-heatmap ${DRAW_HEATMAP}] [--radius ${KPT_RADIUS}] \ + [--kpt-thr ${KPT_SCORE_THR}] [--bbox-thr ${BBOX_SCORE_THR}] +``` + +The pre-trained face keypoint estimation models can be found from [model zoo](https://mmpose.readthedocs.io/en/latest/model_zoo/face_2d_keypoint.html). +Take [aflw model](https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_aflw_256x256-f2bbc62b_20210125.pth) as an example: + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/yolox-s_8xb8-300e_coco-face.py \ + https://download.openmmlab.com/mmpose/mmdet_pretrained/yolo-x_8xb8-300e_coco-face_13274d7c.pth \ + configs/face_2d_keypoint/topdown_heatmap/aflw/td-hm_hrnetv2-w18_8xb64-60e_aflw-256x256.py \ + https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_aflw_256x256-f2bbc62b_20210125.pth \ + --input tests/data/cofw/001766.jpg \ + --show --draw-heatmap +``` + +Visualization result: + +
+ +If you use a heatmap-based model and set argument `--draw-heatmap`, the predicted heatmap will be visualized together with the keypoints. + +To save visualized results on disk: + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/yolox-s_8xb8-300e_coco-face.py \ + https://download.openmmlab.com/mmpose/mmdet_pretrained/yolo-x_8xb8-300e_coco-face_13274d7c.pth \ + configs/face_2d_keypoint/topdown_heatmap/aflw/td-hm_hrnetv2-w18_8xb64-60e_aflw-256x256.py \ + https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_aflw_256x256-f2bbc62b_20210125.pth \ + --input tests/data/cofw/001766.jpg \ + --draw-heatmap --output-root vis_results +``` + +To save the predicted results on disk, please specify `--save-predictions`. + +To run demos on CPU: + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/yolox-s_8xb8-300e_coco-face.py \ + https://download.openmmlab.com/mmpose/mmdet_pretrained/yolo-x_8xb8-300e_coco-face_13274d7c.pth \ + configs/face_2d_keypoint/topdown_heatmap/aflw/td-hm_hrnetv2-w18_8xb64-60e_aflw-256x256.py \ + https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_aflw_256x256-f2bbc62b_20210125.pth \ + --input tests/data/cofw/001766.jpg \ + --show --draw-heatmap --device=cpu +``` + +### 2D Face Video Demo + +Videos share the same interface with images. The difference is that the `${INPUT_PATH}` for videos can be the local path or **URL** link to video file. + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/yolox-s_8xb8-300e_coco-face.py \ + https://download.openmmlab.com/mmpose/mmdet_pretrained/yolo-x_8xb8-300e_coco-face_13274d7c.pth \ + configs/face_2d_keypoint/topdown_heatmap/aflw/td-hm_hrnetv2-w18_8xb64-60e_aflw-256x256.py \ + https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_aflw_256x256-f2bbc62b_20210125.pth \ + --input demo/resources/ \ + --show --draw-heatmap --output-root vis_results +``` + +
+ +The original video can be downloaded from [Google Drive](https://drive.google.com/file/d/1kQt80t6w802b_vgVcmiV_QfcSJ3RWzmb/view?usp=sharing). + +### 2D Face Pose Demo with Inferencer + +The Inferencer provides a convenient interface for inference, allowing customization using model aliases instead of configuration files and checkpoint paths. It supports various input formats, including image paths, video paths, image folder paths, and webcams. Below is an example command: + +```shell +python demo/inferencer_demo.py tests/data/wflw \ + --pose2d face --vis-out-dir vis_results/wflw --radius 1 +``` + +This command infers all images located in `tests/data/wflw` and saves the visualization results in the `vis_results/wflw` directory. + +Image 1 + +Image 2 + +In addition, the Inferencer supports saving predicted poses. For more information, please refer to the [inferencer document](https://mmpose.readthedocs.io/en/dev-1.x/user_guides/inference.html#inferencer-a-unified-inference-interface). + +### Speed Up Inference + +For 2D face keypoint estimation models, try to edit the config file. For example, set `model.test_cfg.flip_test=False` in line 90 of [aflw_hrnetv2](../../../configs/face_2d_keypoint/topdown_heatmap/aflw/td-hm_hrnetv2-w18_8xb64-60e_aflw-256x256.py). diff --git a/demo/docs/en/2d_hand_demo.md b/demo/docs/en/2d_hand_demo.md index f47b3695e3..02caf704dc 100644 --- a/demo/docs/en/2d_hand_demo.md +++ b/demo/docs/en/2d_hand_demo.md @@ -1,100 +1,100 @@ -## 2D Hand Keypoint Demo - -We provide a demo script to test a single image or video with hand detectors and top-down pose estimators. Assume that you have already installed [mmdet](https://github.com/open-mmlab/mmdetection) with version >= 3.0. - -**Hand Box Model Preparation:** The pre-trained hand box estimation model can be found in [mmdet model zoo](/demo/docs/en/mmdet_modelzoo.md#hand-bounding-box-detection-models). - -### 2D Hand Image Demo - -```shell -python demo/topdown_demo_with_mmdet.py \ - ${MMDET_CONFIG_FILE} ${MMDET_CHECKPOINT_FILE} \ - ${MMPOSE_CONFIG_FILE} ${MMPOSE_CHECKPOINT_FILE} \ - --input ${INPUT_PATH} [--output-root ${OUTPUT_DIR}] \ - [--show] [--device ${GPU_ID or CPU}] [--save-predictions] \ - [--draw-heatmap ${DRAW_HEATMAP}] [--radius ${KPT_RADIUS}] \ - [--kpt-thr ${KPT_SCORE_THR}] [--bbox-thr ${BBOX_SCORE_THR}] -``` - -The pre-trained hand pose estimation model can be downloaded from [model zoo](https://mmpose.readthedocs.io/en/latest/model_zoo/hand_2d_keypoint.html). -Take [onehand10k model](https://download.openmmlab.com/mmpose/hand/hrnetv2/hrnetv2_w18_onehand10k_256x256-30bc9c6b_20210330.pth) as an example: - -```shell -python demo/topdown_demo_with_mmdet.py \ - demo/mmdetection_cfg/cascade_rcnn_x101_64x4d_fpn_1class.py \ - https://download.openmmlab.com/mmpose/mmdet_pretrained/cascade_rcnn_x101_64x4d_fpn_20e_onehand10k-dac19597_20201030.pth \ - configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_8xb64-210e_onehand10k-256x256.py \ - https://download.openmmlab.com/mmpose/hand/hrnetv2/hrnetv2_w18_onehand10k_256x256-30bc9c6b_20210330.pth \ - --input tests/data/onehand10k/9.jpg \ - --show --draw-heatmap -``` - -Visualization result: - -
- -If you use a heatmap-based model and set argument `--draw-heatmap`, the predicted heatmap will be visualized together with the keypoints. - -To save visualized results on disk: - -```shell -python demo/topdown_demo_with_mmdet.py \ - demo/mmdetection_cfg/cascade_rcnn_x101_64x4d_fpn_1class.py \ - https://download.openmmlab.com/mmpose/mmdet_pretrained/cascade_rcnn_x101_64x4d_fpn_20e_onehand10k-dac19597_20201030.pth \ - configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_8xb64-210e_onehand10k-256x256.py \ - https://download.openmmlab.com/mmpose/hand/hrnetv2/hrnetv2_w18_onehand10k_256x256-30bc9c6b_20210330.pth \ - --input tests/data/onehand10k/9.jpg \ - --output-root vis_results --show --draw-heatmap -``` - -To save the predicted results on disk, please specify `--save-predictions`. - -To run demos on CPU: - -```shell -python demo/topdown_demo_with_mmdet.py \ - demo/mmdetection_cfg/cascade_rcnn_x101_64x4d_fpn_1class.py \ - https://download.openmmlab.com/mmpose/mmdet_pretrained/cascade_rcnn_x101_64x4d_fpn_20e_onehand10k-dac19597_20201030.pth \ - configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_8xb64-210e_onehand10k-256x256.py \ - https://download.openmmlab.com/mmpose/hand/hrnetv2/hrnetv2_w18_onehand10k_256x256-30bc9c6b_20210330.pth \ - --input tests/data/onehand10k/9.jpg \ - --show --draw-heatmap --device cpu -``` - -### 2D Hand Keypoints Video Demo - -Videos share the same interface with images. The difference is that the `${INPUT_PATH}` for videos can be the local path or **URL** link to video file. - -```shell -python demo/topdown_demo_with_mmdet.py \ - demo/mmdetection_cfg/cascade_rcnn_x101_64x4d_fpn_1class.py \ - https://download.openmmlab.com/mmpose/mmdet_pretrained/cascade_rcnn_x101_64x4d_fpn_20e_onehand10k-dac19597_20201030.pth \ - configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_8xb64-210e_onehand10k-256x256.py \ - https://download.openmmlab.com/mmpose/hand/hrnetv2/hrnetv2_w18_onehand10k_256x256-30bc9c6b_20210330.pth \ - --input demo/resources/ \ - --output-root vis_results --show --draw-heatmap -``` - -
- -The original video can be downloaded from [Github](https://raw.githubusercontent.com/open-mmlab/mmpose/master/tests/data/nvgesture/sk_color.avi). - -### 2D Hand Keypoints Demo with Inferencer - -The Inferencer provides a convenient interface for inference, allowing customization using model aliases instead of configuration files and checkpoint paths. It supports various input formats, including image paths, video paths, image folder paths, and webcams. Below is an example command: - -```shell -python demo/inferencer_demo.py tests/data/onehand10k \ - --pose2d hand --vis-out-dir vis_results/onehand10k \ - --bbox-thr 0.5 --kpt-thr 0.05 -``` - -This command infers all images located in `tests/data/onehand10k` and saves the visualization results in the `vis_results/onehand10k` directory. - -Image 1 Image 2 Image 3 Image 4 - -In addition, the Inferencer supports saving predicted poses. For more information, please refer to the [inferencer document](https://mmpose.readthedocs.io/en/dev-1.x/user_guides/inference.html#inferencer-a-unified-inference-interface). - -### Speed Up Inference - -For 2D hand keypoint estimation models, try to edit the config file. For example, set `model.test_cfg.flip_test=False` in [onehand10k_hrnetv2](../../configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_8xb64-210e_onehand10k-256x256.py#90). +## 2D Hand Keypoint Demo + +We provide a demo script to test a single image or video with hand detectors and top-down pose estimators. Assume that you have already installed [mmdet](https://github.com/open-mmlab/mmdetection) with version >= 3.0. + +**Hand Box Model Preparation:** The pre-trained hand box estimation model can be found in [mmdet model zoo](/demo/docs/en/mmdet_modelzoo.md#hand-bounding-box-detection-models). + +### 2D Hand Image Demo + +```shell +python demo/topdown_demo_with_mmdet.py \ + ${MMDET_CONFIG_FILE} ${MMDET_CHECKPOINT_FILE} \ + ${MMPOSE_CONFIG_FILE} ${MMPOSE_CHECKPOINT_FILE} \ + --input ${INPUT_PATH} [--output-root ${OUTPUT_DIR}] \ + [--show] [--device ${GPU_ID or CPU}] [--save-predictions] \ + [--draw-heatmap ${DRAW_HEATMAP}] [--radius ${KPT_RADIUS}] \ + [--kpt-thr ${KPT_SCORE_THR}] [--bbox-thr ${BBOX_SCORE_THR}] +``` + +The pre-trained hand pose estimation model can be downloaded from [model zoo](https://mmpose.readthedocs.io/en/latest/model_zoo/hand_2d_keypoint.html). +Take [onehand10k model](https://download.openmmlab.com/mmpose/hand/hrnetv2/hrnetv2_w18_onehand10k_256x256-30bc9c6b_20210330.pth) as an example: + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/cascade_rcnn_x101_64x4d_fpn_1class.py \ + https://download.openmmlab.com/mmpose/mmdet_pretrained/cascade_rcnn_x101_64x4d_fpn_20e_onehand10k-dac19597_20201030.pth \ + configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_8xb64-210e_onehand10k-256x256.py \ + https://download.openmmlab.com/mmpose/hand/hrnetv2/hrnetv2_w18_onehand10k_256x256-30bc9c6b_20210330.pth \ + --input tests/data/onehand10k/9.jpg \ + --show --draw-heatmap +``` + +Visualization result: + +
+ +If you use a heatmap-based model and set argument `--draw-heatmap`, the predicted heatmap will be visualized together with the keypoints. + +To save visualized results on disk: + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/cascade_rcnn_x101_64x4d_fpn_1class.py \ + https://download.openmmlab.com/mmpose/mmdet_pretrained/cascade_rcnn_x101_64x4d_fpn_20e_onehand10k-dac19597_20201030.pth \ + configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_8xb64-210e_onehand10k-256x256.py \ + https://download.openmmlab.com/mmpose/hand/hrnetv2/hrnetv2_w18_onehand10k_256x256-30bc9c6b_20210330.pth \ + --input tests/data/onehand10k/9.jpg \ + --output-root vis_results --show --draw-heatmap +``` + +To save the predicted results on disk, please specify `--save-predictions`. + +To run demos on CPU: + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/cascade_rcnn_x101_64x4d_fpn_1class.py \ + https://download.openmmlab.com/mmpose/mmdet_pretrained/cascade_rcnn_x101_64x4d_fpn_20e_onehand10k-dac19597_20201030.pth \ + configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_8xb64-210e_onehand10k-256x256.py \ + https://download.openmmlab.com/mmpose/hand/hrnetv2/hrnetv2_w18_onehand10k_256x256-30bc9c6b_20210330.pth \ + --input tests/data/onehand10k/9.jpg \ + --show --draw-heatmap --device cpu +``` + +### 2D Hand Keypoints Video Demo + +Videos share the same interface with images. The difference is that the `${INPUT_PATH}` for videos can be the local path or **URL** link to video file. + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/cascade_rcnn_x101_64x4d_fpn_1class.py \ + https://download.openmmlab.com/mmpose/mmdet_pretrained/cascade_rcnn_x101_64x4d_fpn_20e_onehand10k-dac19597_20201030.pth \ + configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_8xb64-210e_onehand10k-256x256.py \ + https://download.openmmlab.com/mmpose/hand/hrnetv2/hrnetv2_w18_onehand10k_256x256-30bc9c6b_20210330.pth \ + --input demo/resources/ \ + --output-root vis_results --show --draw-heatmap +``` + +
+ +The original video can be downloaded from [Github](https://raw.githubusercontent.com/open-mmlab/mmpose/master/tests/data/nvgesture/sk_color.avi). + +### 2D Hand Keypoints Demo with Inferencer + +The Inferencer provides a convenient interface for inference, allowing customization using model aliases instead of configuration files and checkpoint paths. It supports various input formats, including image paths, video paths, image folder paths, and webcams. Below is an example command: + +```shell +python demo/inferencer_demo.py tests/data/onehand10k \ + --pose2d hand --vis-out-dir vis_results/onehand10k \ + --bbox-thr 0.5 --kpt-thr 0.05 +``` + +This command infers all images located in `tests/data/onehand10k` and saves the visualization results in the `vis_results/onehand10k` directory. + +Image 1 Image 2 Image 3 Image 4 + +In addition, the Inferencer supports saving predicted poses. For more information, please refer to the [inferencer document](https://mmpose.readthedocs.io/en/dev-1.x/user_guides/inference.html#inferencer-a-unified-inference-interface). + +### Speed Up Inference + +For 2D hand keypoint estimation models, try to edit the config file. For example, set `model.test_cfg.flip_test=False` in [onehand10k_hrnetv2](../../configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_8xb64-210e_onehand10k-256x256.py#90). diff --git a/demo/docs/en/2d_human_pose_demo.md b/demo/docs/en/2d_human_pose_demo.md index a2e3cf59dd..5cb907571b 100644 --- a/demo/docs/en/2d_human_pose_demo.md +++ b/demo/docs/en/2d_human_pose_demo.md @@ -1,151 +1,151 @@ -## 2D Human Pose Demo - -We provide demo scripts to perform human pose estimation on images or videos. - -### 2D Human Pose Top-Down Image Demo - -#### Use full image as input - -We provide a demo script to test a single image, using the full image as input bounding box. - -```shell -python demo/image_demo.py \ - ${IMG_FILE} ${MMPOSE_CONFIG_FILE} ${MMPOSE_CHECKPOINT_FILE} \ - --out-file ${OUTPUT_FILE} \ - [--device ${GPU_ID or CPU}] \ - [--draw_heatmap] -``` - -If you use a heatmap-based model and set argument `--draw-heatmap`, the predicted heatmap will be visualized together with the keypoints. - -The pre-trained human pose estimation models can be downloaded from [model zoo](https://mmpose.readthedocs.io/en/latest/model_zoo/body_2d_keypoint.html). -Take [coco model](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_256x192-b9e0b3ab_20200708.pth) as an example: - -```shell -python demo/image_demo.py \ - tests/data/coco/000000000785.jpg \ - configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192.py \ - https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_256x192-b9e0b3ab_20200708.pth \ - --out-file vis_results.jpg \ - --draw-heatmap -``` - -To run this demo on CPU: - -```shell -python demo/image_demo.py \ - tests/data/coco/000000000785.jpg \ - configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192.py \ - https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_256x192-b9e0b3ab_20200708.pth \ - --out-file vis_results.jpg \ - --draw-heatmap \ - --device=cpu -``` - -Visualization result: - -
- -#### Use mmdet for human bounding box detection - -We provide a demo script to run mmdet for human detection, and mmpose for pose estimation. - -Assume that you have already installed [mmdet](https://github.com/open-mmlab/mmdetection) with version >= 3.0. - -```shell -python demo/topdown_demo_with_mmdet.py \ - ${MMDET_CONFIG_FILE} ${MMDET_CHECKPOINT_FILE} \ - ${MMPOSE_CONFIG_FILE} ${MMPOSE_CHECKPOINT_FILE} \ - --input ${INPUT_PATH} \ - [--output-root ${OUTPUT_DIR}] [--save-predictions] \ - [--show] [--draw-heatmap] [--device ${GPU_ID or CPU}] \ - [--bbox-thr ${BBOX_SCORE_THR}] [--kpt-thr ${KPT_SCORE_THR}] -``` - -Example: - -```shell -python demo/topdown_demo_with_mmdet.py \ - demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py \ - https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ - configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py \ - https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_coco_256x192-c78dce93_20200708.pth \ - --input tests/data/coco/000000197388.jpg --show --draw-heatmap \ - --output-root vis_results/ -``` - -Visualization result: - -
- -To save the predicted results on disk, please specify `--save-predictions`. - -### 2D Human Pose Top-Down Video Demo - -The above demo script can also take video as input, and run mmdet for human detection, and mmpose for pose estimation. The difference is, the `${INPUT_PATH}` for videos can be the local path or **URL** link to video file. - -Assume that you have already installed [mmdet](https://github.com/open-mmlab/mmdetection) with version >= 3.0. - -Example: - -```shell -python demo/topdown_demo_with_mmdet.py \ - demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py \ - https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ - configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py \ - https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192-81c58e40_20220909.pth \ - --input tests/data/posetrack18/videos/000001_mpiinew_test/000001_mpiinew_test.mp4 \ - --output-root=vis_results/demo --show --draw-heatmap -``` - -### 2D Human Pose Bottom-up Image/Video Demo - -We also provide a demo script using bottom-up models to estimate the human pose in an image or a video, which does not rely on human detectors. - -```shell -python demo/bottomup_demo.py \ - ${MMPOSE_CONFIG_FILE} ${MMPOSE_CHECKPOINT_FILE} \ - --input ${INPUT_PATH} \ - [--output-root ${OUTPUT_DIR}] [--save-predictions] \ - [--show] [--device ${GPU_ID or CPU}] \ - [--kpt-thr ${KPT_SCORE_THR}] -``` - -Example: - -```shell -python demo/bottomup_demo.py \ - configs/body_2d_keypoint/dekr/coco/dekr_hrnet-w32_8xb10-140e_coco-512x512.py \ - https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/dekr/coco/dekr_hrnet-w32_8xb10-140e_coco-512x512_ac7c17bf-20221228.pth \ - --input tests/data/coco/000000197388.jpg --output-root=vis_results \ - --show --save-predictions -``` - -Visualization result: - -
- -### 2D Human Pose Estimation with Inferencer - -The Inferencer provides a convenient interface for inference, allowing customization using model aliases instead of configuration files and checkpoint paths. It supports various input formats, including image paths, video paths, image folder paths, and webcams. Below is an example command: - -```shell -python demo/inferencer_demo.py \ - tests/data/posetrack18/videos/000001_mpiinew_test/000001_mpiinew_test.mp4 \ - --pose2d human --vis-out-dir vis_results/posetrack18 -``` - -This command infers the video and saves the visualization results in the `vis_results/posetrack18` directory. - -Image 1 - -In addition, the Inferencer supports saving predicted poses. For more information, please refer to the [inferencer document](https://mmpose.readthedocs.io/en/dev-1.x/user_guides/inference.html#inferencer-a-unified-inference-interface). - -### Speed Up Inference - -Some tips to speed up MMPose inference: - -For top-down models, try to edit the config file. For example, - -1. set `model.test_cfg.flip_test=False` in [topdown-res50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-256x192.py#L56). -2. use faster human bounding box detector, see [MMDetection](https://mmdetection.readthedocs.io/en/3.x/model_zoo.html). +## 2D Human Pose Demo + +We provide demo scripts to perform human pose estimation on images or videos. + +### 2D Human Pose Top-Down Image Demo + +#### Use full image as input + +We provide a demo script to test a single image, using the full image as input bounding box. + +```shell +python demo/image_demo.py \ + ${IMG_FILE} ${MMPOSE_CONFIG_FILE} ${MMPOSE_CHECKPOINT_FILE} \ + --out-file ${OUTPUT_FILE} \ + [--device ${GPU_ID or CPU}] \ + [--draw_heatmap] +``` + +If you use a heatmap-based model and set argument `--draw-heatmap`, the predicted heatmap will be visualized together with the keypoints. + +The pre-trained human pose estimation models can be downloaded from [model zoo](https://mmpose.readthedocs.io/en/latest/model_zoo/body_2d_keypoint.html). +Take [coco model](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_256x192-b9e0b3ab_20200708.pth) as an example: + +```shell +python demo/image_demo.py \ + tests/data/coco/000000000785.jpg \ + configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192.py \ + https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_256x192-b9e0b3ab_20200708.pth \ + --out-file vis_results.jpg \ + --draw-heatmap +``` + +To run this demo on CPU: + +```shell +python demo/image_demo.py \ + tests/data/coco/000000000785.jpg \ + configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192.py \ + https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_256x192-b9e0b3ab_20200708.pth \ + --out-file vis_results.jpg \ + --draw-heatmap \ + --device=cpu +``` + +Visualization result: + +
+ +#### Use mmdet for human bounding box detection + +We provide a demo script to run mmdet for human detection, and mmpose for pose estimation. + +Assume that you have already installed [mmdet](https://github.com/open-mmlab/mmdetection) with version >= 3.0. + +```shell +python demo/topdown_demo_with_mmdet.py \ + ${MMDET_CONFIG_FILE} ${MMDET_CHECKPOINT_FILE} \ + ${MMPOSE_CONFIG_FILE} ${MMPOSE_CHECKPOINT_FILE} \ + --input ${INPUT_PATH} \ + [--output-root ${OUTPUT_DIR}] [--save-predictions] \ + [--show] [--draw-heatmap] [--device ${GPU_ID or CPU}] \ + [--bbox-thr ${BBOX_SCORE_THR}] [--kpt-thr ${KPT_SCORE_THR}] +``` + +Example: + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py \ + https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ + configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py \ + https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_coco_256x192-c78dce93_20200708.pth \ + --input tests/data/coco/000000197388.jpg --show --draw-heatmap \ + --output-root vis_results/ +``` + +Visualization result: + +
+ +To save the predicted results on disk, please specify `--save-predictions`. + +### 2D Human Pose Top-Down Video Demo + +The above demo script can also take video as input, and run mmdet for human detection, and mmpose for pose estimation. The difference is, the `${INPUT_PATH}` for videos can be the local path or **URL** link to video file. + +Assume that you have already installed [mmdet](https://github.com/open-mmlab/mmdetection) with version >= 3.0. + +Example: + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py \ + https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ + configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py \ + https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192-81c58e40_20220909.pth \ + --input tests/data/posetrack18/videos/000001_mpiinew_test/000001_mpiinew_test.mp4 \ + --output-root=vis_results/demo --show --draw-heatmap +``` + +### 2D Human Pose Bottom-up Image/Video Demo + +We also provide a demo script using bottom-up models to estimate the human pose in an image or a video, which does not rely on human detectors. + +```shell +python demo/bottomup_demo.py \ + ${MMPOSE_CONFIG_FILE} ${MMPOSE_CHECKPOINT_FILE} \ + --input ${INPUT_PATH} \ + [--output-root ${OUTPUT_DIR}] [--save-predictions] \ + [--show] [--device ${GPU_ID or CPU}] \ + [--kpt-thr ${KPT_SCORE_THR}] +``` + +Example: + +```shell +python demo/bottomup_demo.py \ + configs/body_2d_keypoint/dekr/coco/dekr_hrnet-w32_8xb10-140e_coco-512x512.py \ + https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/dekr/coco/dekr_hrnet-w32_8xb10-140e_coco-512x512_ac7c17bf-20221228.pth \ + --input tests/data/coco/000000197388.jpg --output-root=vis_results \ + --show --save-predictions +``` + +Visualization result: + +
+ +### 2D Human Pose Estimation with Inferencer + +The Inferencer provides a convenient interface for inference, allowing customization using model aliases instead of configuration files and checkpoint paths. It supports various input formats, including image paths, video paths, image folder paths, and webcams. Below is an example command: + +```shell +python demo/inferencer_demo.py \ + tests/data/posetrack18/videos/000001_mpiinew_test/000001_mpiinew_test.mp4 \ + --pose2d human --vis-out-dir vis_results/posetrack18 +``` + +This command infers the video and saves the visualization results in the `vis_results/posetrack18` directory. + +Image 1 + +In addition, the Inferencer supports saving predicted poses. For more information, please refer to the [inferencer document](https://mmpose.readthedocs.io/en/dev-1.x/user_guides/inference.html#inferencer-a-unified-inference-interface). + +### Speed Up Inference + +Some tips to speed up MMPose inference: + +For top-down models, try to edit the config file. For example, + +1. set `model.test_cfg.flip_test=False` in [topdown-res50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-256x192.py#L56). +2. use faster human bounding box detector, see [MMDetection](https://mmdetection.readthedocs.io/en/3.x/model_zoo.html). diff --git a/demo/docs/en/2d_wholebody_pose_demo.md b/demo/docs/en/2d_wholebody_pose_demo.md index ddd4cbd13d..90f7bfd0ba 100644 --- a/demo/docs/en/2d_wholebody_pose_demo.md +++ b/demo/docs/en/2d_wholebody_pose_demo.md @@ -1,113 +1,113 @@ -## 2D Human Whole-Body Pose Demo - -### 2D Human Whole-Body Pose Top-Down Image Demo - -#### Use full image as input - -We provide a demo script to test a single image, using the full image as input bounding box. - -```shell -python demo/image_demo.py \ - ${IMG_FILE} ${MMPOSE_CONFIG_FILE} ${MMPOSE_CHECKPOINT_FILE} \ - --out-file ${OUTPUT_FILE} \ - [--device ${GPU_ID or CPU}] \ - [--draw_heatmap] -``` - -The pre-trained hand pose estimation models can be downloaded from [model zoo](https://mmpose.readthedocs.io/en/latest/model_zoo/2d_wholebody_keypoint.html). -Take [coco-wholebody_vipnas_res50_dark](https://download.openmmlab.com/mmpose/top_down/vipnas/vipnas_res50_wholebody_256x192_dark-67c0ce35_20211112.pth) model as an example: - -```shell -python demo/image_demo.py \ - tests/data/coco/000000000785.jpg \ - configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-res50_dark-8xb64-210e_coco-wholebody-256x192.py \ - https://download.openmmlab.com/mmpose/top_down/vipnas/vipnas_res50_wholebody_256x192_dark-67c0ce35_20211112.pth \ - --out-file vis_results.jpg -``` - -To run demos on CPU: - -```shell -python demo/image_demo.py \ - tests/data/coco/000000000785.jpg \ - configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-res50_dark-8xb64-210e_coco-wholebody-256x192.py \ - https://download.openmmlab.com/mmpose/top_down/vipnas/vipnas_res50_wholebody_256x192_dark-67c0ce35_20211112.pth \ - --out-file vis_results.jpg \ - --device=cpu -``` - -#### Use mmdet for human bounding box detection - -We provide a demo script to run mmdet for human detection, and mmpose for pose estimation. - -Assume that you have already installed [mmdet](https://github.com/open-mmlab/mmdetection) with version >= 3.0. - -```shell -python demo/topdown_demo_with_mmdet.py \ - ${MMDET_CONFIG_FILE} ${MMDET_CHECKPOINT_FILE} \ - ${MMPOSE_CONFIG_FILE} ${MMPOSE_CHECKPOINT_FILE} \ - --input ${INPUT_PATH} \ - [--output-root ${OUTPUT_DIR}] [--save-predictions] \ - [--show] [--draw-heatmap] [--device ${GPU_ID or CPU}] \ - [--bbox-thr ${BBOX_SCORE_THR}] [--kpt-thr ${KPT_SCORE_THR}] -``` - -Examples: - -```shell -python demo/topdown_demo_with_mmdet.py \ - demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py \ - https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ - configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_dark-8xb32-210e_coco-wholebody-384x288.py \ - https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_wholebody_384x288_dark-f5726563_20200918.pth \ - --input tests/data/coco/000000196141.jpg \ - --output-root vis_results/ --show -``` - -To save the predicted results on disk, please specify `--save-predictions`. - -### 2D Human Whole-Body Pose Top-Down Video Demo - -The above demo script can also take video as input, and run mmdet for human detection, and mmpose for pose estimation. - -Assume that you have already installed [mmdet](https://github.com/open-mmlab/mmdetection). - -Examples: - -```shell -python demo/topdown_demo_with_mmdet.py \ - demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py \ - https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ - configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_dark-8xb32-210e_coco-wholebody-384x288.py \ - https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_wholebody_384x288_dark-f5726563_20200918.pth \ - --input https://user-images.githubusercontent.com/87690686/137440639-fb08603d-9a35-474e-b65f-46b5c06b68d6.mp4 \ - --output-root vis_results/ --show -``` - -Visualization result: - -
- -### 2D Human Whole-Body Pose Estimation with Inferencer - -The Inferencer provides a convenient interface for inference, allowing customization using model aliases instead of configuration files and checkpoint paths. It supports various input formats, including image paths, video paths, image folder paths, and webcams. Below is an example command: - -```shell -python demo/inferencer_demo.py tests/data/crowdpose \ - --pose2d wholebody --vis-out-dir vis_results/crowdpose -``` - -This command infers all images located in `tests/data/crowdpose` and saves the visualization results in the `vis_results/crowdpose` directory. - -Image 1 Image 2 - -In addition, the Inferencer supports saving predicted poses. For more information, please refer to the [inferencer document](https://mmpose.readthedocs.io/en/dev-1.x/user_guides/inference.html#inferencer-a-unified-inference-interface). - -### Speed Up Inference - -Some tips to speed up MMPose inference: - -For top-down models, try to edit the config file. For example, - -1. set `model.test_cfg.flip_test=False` in [pose_hrnet_w48_dark+](/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_dark-8xb32-210e_coco-wholebody-384x288.py#L90). -2. use faster human bounding box detector, see [MMDetection](https://mmdetection.readthedocs.io/en/3.x/model_zoo.html). +## 2D Human Whole-Body Pose Demo + +### 2D Human Whole-Body Pose Top-Down Image Demo + +#### Use full image as input + +We provide a demo script to test a single image, using the full image as input bounding box. + +```shell +python demo/image_demo.py \ + ${IMG_FILE} ${MMPOSE_CONFIG_FILE} ${MMPOSE_CHECKPOINT_FILE} \ + --out-file ${OUTPUT_FILE} \ + [--device ${GPU_ID or CPU}] \ + [--draw_heatmap] +``` + +The pre-trained hand pose estimation models can be downloaded from [model zoo](https://mmpose.readthedocs.io/en/latest/model_zoo/2d_wholebody_keypoint.html). +Take [coco-wholebody_vipnas_res50_dark](https://download.openmmlab.com/mmpose/top_down/vipnas/vipnas_res50_wholebody_256x192_dark-67c0ce35_20211112.pth) model as an example: + +```shell +python demo/image_demo.py \ + tests/data/coco/000000000785.jpg \ + configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-res50_dark-8xb64-210e_coco-wholebody-256x192.py \ + https://download.openmmlab.com/mmpose/top_down/vipnas/vipnas_res50_wholebody_256x192_dark-67c0ce35_20211112.pth \ + --out-file vis_results.jpg +``` + +To run demos on CPU: + +```shell +python demo/image_demo.py \ + tests/data/coco/000000000785.jpg \ + configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-res50_dark-8xb64-210e_coco-wholebody-256x192.py \ + https://download.openmmlab.com/mmpose/top_down/vipnas/vipnas_res50_wholebody_256x192_dark-67c0ce35_20211112.pth \ + --out-file vis_results.jpg \ + --device=cpu +``` + +#### Use mmdet for human bounding box detection + +We provide a demo script to run mmdet for human detection, and mmpose for pose estimation. + +Assume that you have already installed [mmdet](https://github.com/open-mmlab/mmdetection) with version >= 3.0. + +```shell +python demo/topdown_demo_with_mmdet.py \ + ${MMDET_CONFIG_FILE} ${MMDET_CHECKPOINT_FILE} \ + ${MMPOSE_CONFIG_FILE} ${MMPOSE_CHECKPOINT_FILE} \ + --input ${INPUT_PATH} \ + [--output-root ${OUTPUT_DIR}] [--save-predictions] \ + [--show] [--draw-heatmap] [--device ${GPU_ID or CPU}] \ + [--bbox-thr ${BBOX_SCORE_THR}] [--kpt-thr ${KPT_SCORE_THR}] +``` + +Examples: + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py \ + https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ + configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_dark-8xb32-210e_coco-wholebody-384x288.py \ + https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_wholebody_384x288_dark-f5726563_20200918.pth \ + --input tests/data/coco/000000196141.jpg \ + --output-root vis_results/ --show +``` + +To save the predicted results on disk, please specify `--save-predictions`. + +### 2D Human Whole-Body Pose Top-Down Video Demo + +The above demo script can also take video as input, and run mmdet for human detection, and mmpose for pose estimation. + +Assume that you have already installed [mmdet](https://github.com/open-mmlab/mmdetection). + +Examples: + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py \ + https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ + configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_dark-8xb32-210e_coco-wholebody-384x288.py \ + https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_wholebody_384x288_dark-f5726563_20200918.pth \ + --input https://user-images.githubusercontent.com/87690686/137440639-fb08603d-9a35-474e-b65f-46b5c06b68d6.mp4 \ + --output-root vis_results/ --show +``` + +Visualization result: + +
+ +### 2D Human Whole-Body Pose Estimation with Inferencer + +The Inferencer provides a convenient interface for inference, allowing customization using model aliases instead of configuration files and checkpoint paths. It supports various input formats, including image paths, video paths, image folder paths, and webcams. Below is an example command: + +```shell +python demo/inferencer_demo.py tests/data/crowdpose \ + --pose2d wholebody --vis-out-dir vis_results/crowdpose +``` + +This command infers all images located in `tests/data/crowdpose` and saves the visualization results in the `vis_results/crowdpose` directory. + +Image 1 Image 2 + +In addition, the Inferencer supports saving predicted poses. For more information, please refer to the [inferencer document](https://mmpose.readthedocs.io/en/dev-1.x/user_guides/inference.html#inferencer-a-unified-inference-interface). + +### Speed Up Inference + +Some tips to speed up MMPose inference: + +For top-down models, try to edit the config file. For example, + +1. set `model.test_cfg.flip_test=False` in [pose_hrnet_w48_dark+](/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_dark-8xb32-210e_coco-wholebody-384x288.py#L90). +2. use faster human bounding box detector, see [MMDetection](https://mmdetection.readthedocs.io/en/3.x/model_zoo.html). diff --git a/demo/docs/en/3d_human_pose_demo.md b/demo/docs/en/3d_human_pose_demo.md index 367d98c403..8ad276671e 100644 --- a/demo/docs/en/3d_human_pose_demo.md +++ b/demo/docs/en/3d_human_pose_demo.md @@ -1,94 +1,94 @@ -## 3D Human Pose Demo - -
- -### 3D Human Pose Two-stage Estimation Demo - -#### Using mmdet for human bounding box detection and top-down model for the 1st stage (2D pose detection), and inference the 2nd stage (2D-to-3D lifting) - -Assume that you have already installed [mmdet](https://github.com/open-mmlab/mmdetection). - -```shell -python demo/body3d_pose_lifter_demo.py \ -${MMDET_CONFIG_FILE} \ -${MMDET_CHECKPOINT_FILE} \ -${MMPOSE_CONFIG_FILE_2D} \ -${MMPOSE_CHECKPOINT_FILE_2D} \ -${MMPOSE_CONFIG_FILE_3D} \ -${MMPOSE_CHECKPOINT_FILE_3D} \ ---input ${VIDEO_PATH or IMAGE_PATH or 'webcam'} \ -[--show] \ -[--rebase-keypoint-height] \ -[--norm-pose-2d] \ -[--num-instances] \ -[--output-root ${OUT_VIDEO_ROOT}] \ -[--save-predictions] -[--save-predictions] \ -[--device ${GPU_ID or CPU}] \ -[--det-cat-id DET_CAT_ID] \ -[--bbox-thr BBOX_THR] \ -[--kpt-thr KPT_THR] \ -[--use-oks-tracking] \ -[--tracking-thr TRACKING_THR] \ -[--show-interval INTERVAL] \ -[--thickness THICKNESS] \ -[--radius RADIUS] \ -[--use-multi-frames] [--online] -``` - -Note that - -1. `${VIDEO_PATH}` can be the local path or **URL** link to video file. - -2. You can turn on the `[--use-multi-frames]` option to use multi frames for inference in the 2D pose detection stage. - -3. If the `[--online]` option is set to **True**, future frame information can **not** be used when using multi frames for inference in the 2D pose detection stage. - -Examples: - -During 2D pose detection, for single-frame inference that do not rely on extra frames to get the final results of the current frame and save the prediction results, try this: - -```shell -python demo/body3d_pose_lifter_demo.py \ -demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py \ -https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ -configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192.py \ -https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_256x192-b9e0b3ab_20200708.pth \ -configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-243frm-supv-cpn-ft_8xb128-200e_h36m.py \ -https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_243frames_fullconv_supervised_cpn_ft-88f5abbb_20210527.pth \ ---input https://user-images.githubusercontent.com/87690686/164970135-b14e424c-765a-4180-9bc8-fa8d6abc5510.mp4 \ ---output-root vis_results \ ---rebase-keypoint-height --save-predictions -``` - -During 2D pose detection, for multi-frame inference that rely on extra frames to get the final results of the current frame, try this: - -```shell -python demo/body3d_pose_lifter_demo.py \ -demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py \ -https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ -configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w48_8xb64-20e_posetrack18-384x288.py \ -https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_posetrack18_384x288-5fd6d3ff_20211130.pth \ -configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-243frm-supv-cpn-ft_8xb128-200e_h36m.py \ -https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_243frames_fullconv_supervised_cpn_ft-88f5abbb_20210527.pth \ ---input https://user-images.githubusercontent.com/87690686/164970135-b14e424c-765a-4180-9bc8-fa8d6abc5510.mp4 \ ---output-root vis_results \ ---rebase-keypoint-height \ ---use-multi-frames --online -``` - -### 3D Human Pose Demo with Inferencer - -The Inferencer provides a convenient interface for inference, allowing customization using model aliases instead of configuration files and checkpoint paths. It supports various input formats, including image paths, video paths, image folder paths, and webcams. Below is an example command: - -```shell -python demo/inferencer_demo.py tests/data/coco/000000000785.jpg \ - --pose3d human3d --vis-out-dir vis_results/human3d \ - --rebase-keypoint-height -``` - -This command infers the image and saves the visualization results in the `vis_results/human3d` directory. - -Image 1 - -In addition, the Inferencer supports saving predicted poses. For more information, please refer to the [inferencer document](https://mmpose.readthedocs.io/en/latest/user_guides/inference.html#inferencer-a-unified-inference-interface). +## 3D Human Pose Demo + +
+ +### 3D Human Pose Two-stage Estimation Demo + +#### Using mmdet for human bounding box detection and top-down model for the 1st stage (2D pose detection), and inference the 2nd stage (2D-to-3D lifting) + +Assume that you have already installed [mmdet](https://github.com/open-mmlab/mmdetection). + +```shell +python demo/body3d_pose_lifter_demo.py \ +${MMDET_CONFIG_FILE} \ +${MMDET_CHECKPOINT_FILE} \ +${MMPOSE_CONFIG_FILE_2D} \ +${MMPOSE_CHECKPOINT_FILE_2D} \ +${MMPOSE_CONFIG_FILE_3D} \ +${MMPOSE_CHECKPOINT_FILE_3D} \ +--input ${VIDEO_PATH or IMAGE_PATH or 'webcam'} \ +[--show] \ +[--rebase-keypoint-height] \ +[--norm-pose-2d] \ +[--num-instances] \ +[--output-root ${OUT_VIDEO_ROOT}] \ +[--save-predictions] +[--save-predictions] \ +[--device ${GPU_ID or CPU}] \ +[--det-cat-id DET_CAT_ID] \ +[--bbox-thr BBOX_THR] \ +[--kpt-thr KPT_THR] \ +[--use-oks-tracking] \ +[--tracking-thr TRACKING_THR] \ +[--show-interval INTERVAL] \ +[--thickness THICKNESS] \ +[--radius RADIUS] \ +[--use-multi-frames] [--online] +``` + +Note that + +1. `${VIDEO_PATH}` can be the local path or **URL** link to video file. + +2. You can turn on the `[--use-multi-frames]` option to use multi frames for inference in the 2D pose detection stage. + +3. If the `[--online]` option is set to **True**, future frame information can **not** be used when using multi frames for inference in the 2D pose detection stage. + +Examples: + +During 2D pose detection, for single-frame inference that do not rely on extra frames to get the final results of the current frame and save the prediction results, try this: + +```shell +python demo/body3d_pose_lifter_demo.py \ +demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py \ +https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ +configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192.py \ +https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_256x192-b9e0b3ab_20200708.pth \ +configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-243frm-supv-cpn-ft_8xb128-200e_h36m.py \ +https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_243frames_fullconv_supervised_cpn_ft-88f5abbb_20210527.pth \ +--input https://user-images.githubusercontent.com/87690686/164970135-b14e424c-765a-4180-9bc8-fa8d6abc5510.mp4 \ +--output-root vis_results \ +--rebase-keypoint-height --save-predictions +``` + +During 2D pose detection, for multi-frame inference that rely on extra frames to get the final results of the current frame, try this: + +```shell +python demo/body3d_pose_lifter_demo.py \ +demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py \ +https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ +configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_hrnet-w48_8xb64-20e_posetrack18-384x288.py \ +https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_posetrack18_384x288-5fd6d3ff_20211130.pth \ +configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-243frm-supv-cpn-ft_8xb128-200e_h36m.py \ +https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_243frames_fullconv_supervised_cpn_ft-88f5abbb_20210527.pth \ +--input https://user-images.githubusercontent.com/87690686/164970135-b14e424c-765a-4180-9bc8-fa8d6abc5510.mp4 \ +--output-root vis_results \ +--rebase-keypoint-height \ +--use-multi-frames --online +``` + +### 3D Human Pose Demo with Inferencer + +The Inferencer provides a convenient interface for inference, allowing customization using model aliases instead of configuration files and checkpoint paths. It supports various input formats, including image paths, video paths, image folder paths, and webcams. Below is an example command: + +```shell +python demo/inferencer_demo.py tests/data/coco/000000000785.jpg \ + --pose3d human3d --vis-out-dir vis_results/human3d \ + --rebase-keypoint-height +``` + +This command infers the image and saves the visualization results in the `vis_results/human3d` directory. + +Image 1 + +In addition, the Inferencer supports saving predicted poses. For more information, please refer to the [inferencer document](https://mmpose.readthedocs.io/en/latest/user_guides/inference.html#inferencer-a-unified-inference-interface). diff --git a/demo/docs/en/mmdet_modelzoo.md b/demo/docs/en/mmdet_modelzoo.md index 5383cb953f..f854946dc0 100644 --- a/demo/docs/en/mmdet_modelzoo.md +++ b/demo/docs/en/mmdet_modelzoo.md @@ -1,40 +1,40 @@ -## Pre-trained Detection Models - -### Human Bounding Box Detection Models - -For human bounding box detection models, please download from [MMDetection Model Zoo](https://mmdetection.readthedocs.io/en/3.x/model_zoo.html). -MMDetection provides 80-class COCO-pretrained models, which already includes the `person` category. - -### Hand Bounding Box Detection Models - -For hand bounding box detection, we simply train our hand box models on OneHand10K dataset using MMDetection. - -#### Hand detection results on OneHand10K test set - -| Arch | Box AP | ckpt | log | -| :---------------------------------------------------------------- | :----: | :---------------------------------------------------------------: | :--------------------------------------------------------------: | -| [Cascade_R-CNN X-101-64x4d-FPN-1class](/demo/mmdetection_cfg/cascade_rcnn_x101_64x4d_fpn_1class.py) | 0.817 | [ckpt](https://download.openmmlab.com/mmpose/mmdet_pretrained/cascade_rcnn_x101_64x4d_fpn_20e_onehand10k-dac19597_20201030.pth) | [log](https://download.openmmlab.com/mmpose/mmdet_pretrained/cascade_rcnn_x101_64x4d_fpn_20e_onehand10k_20201030.log.json) | - -### Face Bounding Box Detection Models - -For face bounding box detection, we train a YOLOX detector on COCO-face data using MMDetection. - -#### Face detection results on COCO-face test set - -| Arch | Box AP | ckpt | -| :-------------------------------------------------------------- | :----: | :----------------------------------------------------------------------------------------------------: | -| [YOLOX-s](/demo/mmdetection_cfg/yolox-s_8xb8-300e_coco-face.py) | 0.408 | [ckpt](https://download.openmmlab.com/mmpose/mmdet_pretrained/yolo-x_8xb8-300e_coco-face_13274d7c.pth) | - -### Animal Bounding Box Detection Models - -#### COCO animals - -In COCO dataset, there are 80 object categories, including 10 common `animal` categories (14: 'bird', 15: 'cat', 16: 'dog', 17: 'horse', 18: 'sheep', 19: 'cow', 20: 'elephant', 21: 'bear', 22: 'zebra', 23: 'giraffe') -For animals in the categories, please download from [MMDetection Model Zoo](https://mmdetection.readthedocs.io/en/3.x/model_zoo.html). - -#### Macaque detection results on MacaquePose test set - -| Arch | Box AP | ckpt | log | -| :---------------------------------------------------------------- | :----: | :---------------------------------------------------------------: | :--------------------------------------------------------------: | -| [Faster_R-CNN_Res50-FPN-1class](/demo/mmdetection_cfg/faster_rcnn_r50_fpn_1class.py) | 0.840 | [ckpt](https://download.openmmlab.com/mmpose/mmdet_pretrained/faster_rcnn_r50_fpn_1x_macaque-f64f2812_20210409.pth) | [log](https://download.openmmlab.com/mmpose/mmdet_pretrained/faster_rcnn_r50_fpn_1x_macaque_20210409.log.json) | -| [Cascade_R-CNN X-101-64x4d-FPN-1class](/demo/mmdetection_cfg/cascade_rcnn_x101_64x4d_fpn_1class.py) | 0.879 | [ckpt](https://download.openmmlab.com/mmpose/mmdet_pretrained/cascade_rcnn_x101_64x4d_fpn_20e_macaque-e45e36f5_20210409.pth) | [log](https://download.openmmlab.com/mmpose/mmdet_pretrained/cascade_rcnn_x101_64x4d_fpn_20e_macaque_20210409.log.json) | +## Pre-trained Detection Models + +### Human Bounding Box Detection Models + +For human bounding box detection models, please download from [MMDetection Model Zoo](https://mmdetection.readthedocs.io/en/3.x/model_zoo.html). +MMDetection provides 80-class COCO-pretrained models, which already includes the `person` category. + +### Hand Bounding Box Detection Models + +For hand bounding box detection, we simply train our hand box models on OneHand10K dataset using MMDetection. + +#### Hand detection results on OneHand10K test set + +| Arch | Box AP | ckpt | log | +| :---------------------------------------------------------------- | :----: | :---------------------------------------------------------------: | :--------------------------------------------------------------: | +| [Cascade_R-CNN X-101-64x4d-FPN-1class](/demo/mmdetection_cfg/cascade_rcnn_x101_64x4d_fpn_1class.py) | 0.817 | [ckpt](https://download.openmmlab.com/mmpose/mmdet_pretrained/cascade_rcnn_x101_64x4d_fpn_20e_onehand10k-dac19597_20201030.pth) | [log](https://download.openmmlab.com/mmpose/mmdet_pretrained/cascade_rcnn_x101_64x4d_fpn_20e_onehand10k_20201030.log.json) | + +### Face Bounding Box Detection Models + +For face bounding box detection, we train a YOLOX detector on COCO-face data using MMDetection. + +#### Face detection results on COCO-face test set + +| Arch | Box AP | ckpt | +| :-------------------------------------------------------------- | :----: | :----------------------------------------------------------------------------------------------------: | +| [YOLOX-s](/demo/mmdetection_cfg/yolox-s_8xb8-300e_coco-face.py) | 0.408 | [ckpt](https://download.openmmlab.com/mmpose/mmdet_pretrained/yolo-x_8xb8-300e_coco-face_13274d7c.pth) | + +### Animal Bounding Box Detection Models + +#### COCO animals + +In COCO dataset, there are 80 object categories, including 10 common `animal` categories (14: 'bird', 15: 'cat', 16: 'dog', 17: 'horse', 18: 'sheep', 19: 'cow', 20: 'elephant', 21: 'bear', 22: 'zebra', 23: 'giraffe') +For animals in the categories, please download from [MMDetection Model Zoo](https://mmdetection.readthedocs.io/en/3.x/model_zoo.html). + +#### Macaque detection results on MacaquePose test set + +| Arch | Box AP | ckpt | log | +| :---------------------------------------------------------------- | :----: | :---------------------------------------------------------------: | :--------------------------------------------------------------: | +| [Faster_R-CNN_Res50-FPN-1class](/demo/mmdetection_cfg/faster_rcnn_r50_fpn_1class.py) | 0.840 | [ckpt](https://download.openmmlab.com/mmpose/mmdet_pretrained/faster_rcnn_r50_fpn_1x_macaque-f64f2812_20210409.pth) | [log](https://download.openmmlab.com/mmpose/mmdet_pretrained/faster_rcnn_r50_fpn_1x_macaque_20210409.log.json) | +| [Cascade_R-CNN X-101-64x4d-FPN-1class](/demo/mmdetection_cfg/cascade_rcnn_x101_64x4d_fpn_1class.py) | 0.879 | [ckpt](https://download.openmmlab.com/mmpose/mmdet_pretrained/cascade_rcnn_x101_64x4d_fpn_20e_macaque-e45e36f5_20210409.pth) | [log](https://download.openmmlab.com/mmpose/mmdet_pretrained/cascade_rcnn_x101_64x4d_fpn_20e_macaque_20210409.log.json) | diff --git a/demo/docs/en/webcam_api_demo.md b/demo/docs/en/webcam_api_demo.md index 9869392171..c83d95a4e6 100644 --- a/demo/docs/en/webcam_api_demo.md +++ b/demo/docs/en/webcam_api_demo.md @@ -1,30 +1,30 @@ -## Webcam Demo - -The original Webcam API has been deprecated starting from version v1.1.0. Users now have the option to utilize either the Inferencer or the demo script for conducting pose estimation using webcam input. - -### Webcam Demo with Inferencer - -Users can utilize the MMPose Inferencer to estimate human poses in webcam inputs by executing the following command: - -```shell -python demo/inferencer_demo.py webcam --pose2d 'human' -``` - -For additional information about the arguments of Inferencer, please refer to the [Inferencer Documentation](/docs/en/user_guides/inference.md). - -### Webcam Demo with Demo Script - -All of the demo scripts, except for `demo/image_demo.py`, support webcam input. - -Take `demo/topdown_demo_with_mmdet.py` as example, users can utilize this script with webcam input by specifying **`--input webcam`** in the command: - -```shell -# inference with webcam -python demo/topdown_demo_with_mmdet.py \ - projects/rtmpose/rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py \ - https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_nano_8xb32-100e_coco-obj365-person-05d8511e.pth \ - projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py \ - https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.pth \ - --input webcam \ - --show -``` +## Webcam Demo + +The original Webcam API has been deprecated starting from version v1.1.0. Users now have the option to utilize either the Inferencer or the demo script for conducting pose estimation using webcam input. + +### Webcam Demo with Inferencer + +Users can utilize the MMPose Inferencer to estimate human poses in webcam inputs by executing the following command: + +```shell +python demo/inferencer_demo.py webcam --pose2d 'human' +``` + +For additional information about the arguments of Inferencer, please refer to the [Inferencer Documentation](/docs/en/user_guides/inference.md). + +### Webcam Demo with Demo Script + +All of the demo scripts, except for `demo/image_demo.py`, support webcam input. + +Take `demo/topdown_demo_with_mmdet.py` as example, users can utilize this script with webcam input by specifying **`--input webcam`** in the command: + +```shell +# inference with webcam +python demo/topdown_demo_with_mmdet.py \ + projects/rtmpose/rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_nano_8xb32-100e_coco-obj365-person-05d8511e.pth \ + projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.pth \ + --input webcam \ + --show +``` diff --git a/demo/docs/zh_cn/2d_animal_demo.md b/demo/docs/zh_cn/2d_animal_demo.md index e49f292f56..687a0ff209 100644 --- a/demo/docs/zh_cn/2d_animal_demo.md +++ b/demo/docs/zh_cn/2d_animal_demo.md @@ -1,124 +1,124 @@ -## 2D Animal Pose Demo - -本系列文档我们会来介绍如何使用提供了的脚本进行完成基本的推理 demo ,本节先介绍如何对 top-down 结构和动物的 2D 姿态进行单张图片和视频推理,请确保你已经安装了 3.0 以上版本的 [MMDetection](https://github.com/open-mmlab/mmdetection) 。 - -### 2D 动物图片姿态识别推理 - -```shell -python demo/topdown_demo_with_mmdet.py \ - ${MMDET_CONFIG_FILE} ${MMDET_CHECKPOINT_FILE} \ - ${MMPOSE_CONFIG_FILE} ${MMPOSE_CHECKPOINT_FILE} \ - --input ${INPUT_PATH} --det-cat-id ${DET_CAT_ID} \ - [--show] [--output-root ${OUTPUT_DIR}] [--save-predictions] \ - [--draw-heatmap ${DRAW_HEATMAP}] [--radius ${KPT_RADIUS}] \ - [--kpt-thr ${KPT_SCORE_THR}] [--bbox-thr ${BBOX_SCORE_THR}] \ - [--device ${GPU_ID or CPU}] -``` - -用户可以在 [model zoo](https://mmpose.readthedocs.io/zh_CN/dev-1.x/model_zoo/animal_2d_keypoint.html) 获取预训练好的关键点识别模型。 - -这里我们用 [animalpose model](https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_animalpose_256x256-1aa7f075_20210426.pth) 来进行演示: - -```shell -python demo/topdown_demo_with_mmdet.py \ - demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py \ - https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ - configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py \ - https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_animalpose_256x256-1aa7f075_20210426.pth \ - --input tests/data/animalpose/ca110.jpeg \ - --show --draw-heatmap --det-cat-id=15 -``` - -可视化结果如下: - -
- -如果使用了 heatmap-based 模型同时设置了 `--draw-heatmap` ,预测的热图也会跟随关键点一同可视化出来。 - -`--det-cat-id=15` 参数用来指定模型只检测 `cat` 类型,这是基于 COCO 数据集的数据。 - -**COCO 数据集动物信息** - -COCO 数据集共包含 80 个类别,其中有 10 种常见动物,类别如下: - -(14: 'bird', 15: 'cat', 16: 'dog', 17: 'horse', 18: 'sheep', 19: 'cow', 20: 'elephant', 21: 'bear', 22: 'zebra', 23: 'giraffe') - -对于其他类型的动物,我们也提供了一些训练好的动物检测模型,用户可以前往 [detection model zoo](/demo/docs/zh_cn/mmdet_modelzoo.md) 下载。 - -如果想本地保存可视化结果可使用如下命令: - -```shell -python demo/topdown_demo_with_mmdet.py \ - demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py \ - https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ - configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py \ - https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_animalpose_256x256-1aa7f075_20210426.pth \ - --input tests/data/animalpose/ca110.jpeg \ - --output-root vis_results --draw-heatmap --det-cat-id=15 -``` - -如果想本地保存预测结果,需要使用 `--save-predictions` 。 - -```shell -python demo/topdown_demo_with_mmdet.py \ - demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py \ - https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ - configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py \ - https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_animalpose_256x256-1aa7f075_20210426.pth \ - --input tests/data/animalpose/ca110.jpeg \ - --output-root vis_results --save-predictions --draw-heatmap --det-cat-id=15 -``` - -仅使用 CPU: - -```shell -python demo/topdown_demo_with_mmdet.py \ - demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py \ - https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ - configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py \ - https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_animalpose_256x256-1aa7f075_20210426.pth \ - --input tests/data/animalpose/ca110.jpeg \ - --show --draw-heatmap --det-cat-id=15 --device cpu -``` - -### 2D 动物视频姿态识别推理 - -视频和图片使用了同样的接口,区别在于视频推理时 `${INPUT_PATH}` 既可以是本地视频文件的路径也可以是视频文件的 **URL** 地址。 - -例如: - -```shell -python demo/topdown_demo_with_mmdet.py \ - demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py \ - https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ - configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py \ - https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_animalpose_256x256-1aa7f075_20210426.pth \ - --input demo/resources/ \ - --output-root vis_results --draw-heatmap --det-cat-id=16 -``` - -
- -这段视频可以在 [Google Drive](https://drive.google.com/file/d/18d8K3wuUpKiDFHvOx0mh1TEwYwpOc5UO/view?usp=sharing) 下载。 - -### 使用 Inferencer 进行 2D 动物姿态识别推理 - -Inferencer 提供一个更便捷的推理接口,使得用户可以绕过模型的配置文件和 checkpoint 路径直接使用 model aliases ,支持包括图片路径、视频路径、图片文件夹路径和 webcams 在内的多种输入方式,例如可以这样使用: - -```shell -python demo/inferencer_demo.py tests/data/ap10k \ - --pose2d animal --vis-out-dir vis_results/ap10k -``` - -该命令会对输入的 `tests/data/ap10k` 下所有的图片进行推理并且把可视化结果都存入 `vis_results/ap10k` 文件夹下。 - -Image 1 Image 2 - -Inferencer 同样支持保存预测结果,更多的信息可以参考 [Inferencer 文档](https://mmpose.readthedocs.io/en/dev-1.x/user_guides/inference.html#inferencer-a-unified-inference-interface) 。 - -### 加速推理 - -用户可以通过修改配置文件来加速,更多具体例子可以参考: - -1. 设置 `model.test_cfg.flip_test=False`,如 [animalpose_hrnet-w32](../../configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py#85) 所示。 -2. 使用更快的 bounding box 检测器,可参考 [MMDetection](https://mmdetection.readthedocs.io/zh_CN/3.x/model_zoo.html) 。 +## 2D Animal Pose Demo + +本系列文档我们会来介绍如何使用提供了的脚本进行完成基本的推理 demo ,本节先介绍如何对 top-down 结构和动物的 2D 姿态进行单张图片和视频推理,请确保你已经安装了 3.0 以上版本的 [MMDetection](https://github.com/open-mmlab/mmdetection) 。 + +### 2D 动物图片姿态识别推理 + +```shell +python demo/topdown_demo_with_mmdet.py \ + ${MMDET_CONFIG_FILE} ${MMDET_CHECKPOINT_FILE} \ + ${MMPOSE_CONFIG_FILE} ${MMPOSE_CHECKPOINT_FILE} \ + --input ${INPUT_PATH} --det-cat-id ${DET_CAT_ID} \ + [--show] [--output-root ${OUTPUT_DIR}] [--save-predictions] \ + [--draw-heatmap ${DRAW_HEATMAP}] [--radius ${KPT_RADIUS}] \ + [--kpt-thr ${KPT_SCORE_THR}] [--bbox-thr ${BBOX_SCORE_THR}] \ + [--device ${GPU_ID or CPU}] +``` + +用户可以在 [model zoo](https://mmpose.readthedocs.io/zh_CN/dev-1.x/model_zoo/animal_2d_keypoint.html) 获取预训练好的关键点识别模型。 + +这里我们用 [animalpose model](https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_animalpose_256x256-1aa7f075_20210426.pth) 来进行演示: + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py \ + https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ + configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py \ + https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_animalpose_256x256-1aa7f075_20210426.pth \ + --input tests/data/animalpose/ca110.jpeg \ + --show --draw-heatmap --det-cat-id=15 +``` + +可视化结果如下: + +
+ +如果使用了 heatmap-based 模型同时设置了 `--draw-heatmap` ,预测的热图也会跟随关键点一同可视化出来。 + +`--det-cat-id=15` 参数用来指定模型只检测 `cat` 类型,这是基于 COCO 数据集的数据。 + +**COCO 数据集动物信息** + +COCO 数据集共包含 80 个类别,其中有 10 种常见动物,类别如下: + +(14: 'bird', 15: 'cat', 16: 'dog', 17: 'horse', 18: 'sheep', 19: 'cow', 20: 'elephant', 21: 'bear', 22: 'zebra', 23: 'giraffe') + +对于其他类型的动物,我们也提供了一些训练好的动物检测模型,用户可以前往 [detection model zoo](/demo/docs/zh_cn/mmdet_modelzoo.md) 下载。 + +如果想本地保存可视化结果可使用如下命令: + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py \ + https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ + configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py \ + https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_animalpose_256x256-1aa7f075_20210426.pth \ + --input tests/data/animalpose/ca110.jpeg \ + --output-root vis_results --draw-heatmap --det-cat-id=15 +``` + +如果想本地保存预测结果,需要使用 `--save-predictions` 。 + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py \ + https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ + configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py \ + https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_animalpose_256x256-1aa7f075_20210426.pth \ + --input tests/data/animalpose/ca110.jpeg \ + --output-root vis_results --save-predictions --draw-heatmap --det-cat-id=15 +``` + +仅使用 CPU: + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py \ + https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ + configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py \ + https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_animalpose_256x256-1aa7f075_20210426.pth \ + --input tests/data/animalpose/ca110.jpeg \ + --show --draw-heatmap --det-cat-id=15 --device cpu +``` + +### 2D 动物视频姿态识别推理 + +视频和图片使用了同样的接口,区别在于视频推理时 `${INPUT_PATH}` 既可以是本地视频文件的路径也可以是视频文件的 **URL** 地址。 + +例如: + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py \ + https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ + configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py \ + https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_animalpose_256x256-1aa7f075_20210426.pth \ + --input demo/resources/ \ + --output-root vis_results --draw-heatmap --det-cat-id=16 +``` + +
+ +这段视频可以在 [Google Drive](https://drive.google.com/file/d/18d8K3wuUpKiDFHvOx0mh1TEwYwpOc5UO/view?usp=sharing) 下载。 + +### 使用 Inferencer 进行 2D 动物姿态识别推理 + +Inferencer 提供一个更便捷的推理接口,使得用户可以绕过模型的配置文件和 checkpoint 路径直接使用 model aliases ,支持包括图片路径、视频路径、图片文件夹路径和 webcams 在内的多种输入方式,例如可以这样使用: + +```shell +python demo/inferencer_demo.py tests/data/ap10k \ + --pose2d animal --vis-out-dir vis_results/ap10k +``` + +该命令会对输入的 `tests/data/ap10k` 下所有的图片进行推理并且把可视化结果都存入 `vis_results/ap10k` 文件夹下。 + +Image 1 Image 2 + +Inferencer 同样支持保存预测结果,更多的信息可以参考 [Inferencer 文档](https://mmpose.readthedocs.io/en/dev-1.x/user_guides/inference.html#inferencer-a-unified-inference-interface) 。 + +### 加速推理 + +用户可以通过修改配置文件来加速,更多具体例子可以参考: + +1. 设置 `model.test_cfg.flip_test=False`,如 [animalpose_hrnet-w32](../../configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py#85) 所示。 +2. 使用更快的 bounding box 检测器,可参考 [MMDetection](https://mmdetection.readthedocs.io/zh_CN/3.x/model_zoo.html) 。 diff --git a/demo/docs/zh_cn/2d_face_demo.md b/demo/docs/zh_cn/2d_face_demo.md index e8a4e550db..2b7f0605a8 100644 --- a/demo/docs/zh_cn/2d_face_demo.md +++ b/demo/docs/zh_cn/2d_face_demo.md @@ -1,88 +1,88 @@ -## 2D Face Keypoint Demo - -本节我们继续演示如何使用 demo 脚本进行 2D 脸部关键点的识别。同样的,用户仍要确保开发环境已经安装了 3.0 版本以上的 [MMdetection](https://github.com/open-mmlab/mmdetection) 。 - -我们在 [mmdet model zoo](/demo/docs/zh_cn/mmdet_modelzoo.md#脸部-bounding-box-检测模型) 提供了一个预训练好的脸部 Bounding Box 预测模型,用户可以前往下载。 - -### 2D 脸部图片关键点识别推理 - -```shell -python demo/topdown_demo_with_mmdet.py \ - ${MMDET_CONFIG_FILE} ${MMDET_CHECKPOINT_FILE} \ - ${MMPOSE_CONFIG_FILE} ${MMPOSE_CHECKPOINT_FILE} \ - --input ${INPUT_PATH} [--output-root ${OUTPUT_DIR}] \ - [--show] [--device ${GPU_ID or CPU}] [--save-predictions] \ - [--draw-heatmap ${DRAW_HEATMAP}] [--radius ${KPT_RADIUS}] \ - [--kpt-thr ${KPT_SCORE_THR}] [--bbox-thr ${BBOX_SCORE_THR}] -``` - -用户可以在 [model zoo](https://mmpose.readthedocs.io/en/dev-1.x/model_zoo/face_2d_keypoint.html) 获取预训练好的脸部关键点识别模型。 - -这里我们用 [aflw model](https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_aflw_256x256-f2bbc62b_20210125.pth) 来进行演示: - -```shell -python demo/topdown_demo_with_mmdet.py \ - demo/mmdetection_cfg/yolox-s_8xb8-300e_coco-face.py \ - https://download.openmmlab.com/mmpose/mmdet_pretrained/yolo-x_8xb8-300e_coco-face_13274d7c.pth \ - configs/face_2d_keypoint/topdown_heatmap/aflw/td-hm_hrnetv2-w18_8xb64-60e_aflw-256x256.py \ - https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_aflw_256x256-f2bbc62b_20210125.pth \ - --input tests/data/cofw/001766.jpg \ - --show --draw-heatmap -``` - -可视化结果如下图所示: - -
- -如果使用了 heatmap-based 模型同时设置了 `--draw-heatmap` ,预测的热图也会跟随关键点一同可视化出来。 - -如果想本地保存可视化结果可使用如下命令: - -```shell -python demo/topdown_demo_with_mmdet.py \ - demo/mmdetection_cfg/yolox-s_8xb8-300e_coco-face.py \ - https://download.openmmlab.com/mmpose/mmdet_pretrained/yolo-x_8xb8-300e_coco-face_13274d7c.pth \ - configs/face_2d_keypoint/topdown_heatmap/aflw/td-hm_hrnetv2-w18_8xb64-60e_aflw-256x256.py \ - https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_aflw_256x256-f2bbc62b_20210125.pth \ - --input tests/data/cofw/001766.jpg \ - --draw-heatmap --output-root vis_results -``` - -### 2D 脸部视频关键点识别推理 - -视频和图片使用了同样的接口,区别在于视频推理时 `${INPUT_PATH}` 既可以是本地视频文件的路径也可以是视频文件的 **URL** 地址。 - -```shell -python demo/topdown_demo_with_mmdet.py \ - demo/mmdetection_cfg/yolox-s_8xb8-300e_coco-face.py \ - https://download.openmmlab.com/mmpose/mmdet_pretrained/yolo-x_8xb8-300e_coco-face_13274d7c.pth \ - configs/face_2d_keypoint/topdown_heatmap/aflw/td-hm_hrnetv2-w18_8xb64-60e_aflw-256x256.py \ - https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_aflw_256x256-f2bbc62b_20210125.pth \ - --input demo/resources/ \ - --show --draw-heatmap --output-root vis_results -``` - -
- -这段视频可以在 [Google Drive](https://drive.google.com/file/d/1kQt80t6w802b_vgVcmiV_QfcSJ3RWzmb/view?usp=sharing) 下载。 - -### 使用 Inferencer 进行 2D 脸部关键点识别推理 - -Inferencer 提供一个更便捷的推理接口,使得用户可以绕过模型的配置文件和 checkpoint 路径直接使用 model aliases ,支持包括图片路径、视频路径、图片文件夹路径和 webcams 在内的多种输入方式,例如可以这样使用: - -```shell -python demo/inferencer_demo.py tests/data/wflw \ - --pose2d face --vis-out-dir vis_results/wflw --radius 1 -``` - -该命令会对输入的 `tests/data/wflw` 下所有的图片进行推理并且把可视化结果都存入 `vis_results/wflw` 文件夹下。 - -Image 1 - -Image 2 - -除此之外, Inferencer 也支持保存预测的姿态结果。具体信息可在 [Inferencer 文档](https://mmpose.readthedocs.io/en/dev-1.x/user_guides/inference.html#inferencer-a-unified-inference-interface) 查看。 - -### 加速推理 - -对于 2D 脸部关键点预测模型,用户可以通过修改配置文件中的 `model.test_cfg.flip_test=False` 来加速,例如 [aflw_hrnetv2](../../../configs/face_2d_keypoint/topdown_heatmap/aflw/td-hm_hrnetv2-w18_8xb64-60e_aflw-256x256.py) 中的第 90 行。 +## 2D Face Keypoint Demo + +本节我们继续演示如何使用 demo 脚本进行 2D 脸部关键点的识别。同样的,用户仍要确保开发环境已经安装了 3.0 版本以上的 [MMdetection](https://github.com/open-mmlab/mmdetection) 。 + +我们在 [mmdet model zoo](/demo/docs/zh_cn/mmdet_modelzoo.md#脸部-bounding-box-检测模型) 提供了一个预训练好的脸部 Bounding Box 预测模型,用户可以前往下载。 + +### 2D 脸部图片关键点识别推理 + +```shell +python demo/topdown_demo_with_mmdet.py \ + ${MMDET_CONFIG_FILE} ${MMDET_CHECKPOINT_FILE} \ + ${MMPOSE_CONFIG_FILE} ${MMPOSE_CHECKPOINT_FILE} \ + --input ${INPUT_PATH} [--output-root ${OUTPUT_DIR}] \ + [--show] [--device ${GPU_ID or CPU}] [--save-predictions] \ + [--draw-heatmap ${DRAW_HEATMAP}] [--radius ${KPT_RADIUS}] \ + [--kpt-thr ${KPT_SCORE_THR}] [--bbox-thr ${BBOX_SCORE_THR}] +``` + +用户可以在 [model zoo](https://mmpose.readthedocs.io/en/dev-1.x/model_zoo/face_2d_keypoint.html) 获取预训练好的脸部关键点识别模型。 + +这里我们用 [aflw model](https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_aflw_256x256-f2bbc62b_20210125.pth) 来进行演示: + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/yolox-s_8xb8-300e_coco-face.py \ + https://download.openmmlab.com/mmpose/mmdet_pretrained/yolo-x_8xb8-300e_coco-face_13274d7c.pth \ + configs/face_2d_keypoint/topdown_heatmap/aflw/td-hm_hrnetv2-w18_8xb64-60e_aflw-256x256.py \ + https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_aflw_256x256-f2bbc62b_20210125.pth \ + --input tests/data/cofw/001766.jpg \ + --show --draw-heatmap +``` + +可视化结果如下图所示: + +
+ +如果使用了 heatmap-based 模型同时设置了 `--draw-heatmap` ,预测的热图也会跟随关键点一同可视化出来。 + +如果想本地保存可视化结果可使用如下命令: + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/yolox-s_8xb8-300e_coco-face.py \ + https://download.openmmlab.com/mmpose/mmdet_pretrained/yolo-x_8xb8-300e_coco-face_13274d7c.pth \ + configs/face_2d_keypoint/topdown_heatmap/aflw/td-hm_hrnetv2-w18_8xb64-60e_aflw-256x256.py \ + https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_aflw_256x256-f2bbc62b_20210125.pth \ + --input tests/data/cofw/001766.jpg \ + --draw-heatmap --output-root vis_results +``` + +### 2D 脸部视频关键点识别推理 + +视频和图片使用了同样的接口,区别在于视频推理时 `${INPUT_PATH}` 既可以是本地视频文件的路径也可以是视频文件的 **URL** 地址。 + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/yolox-s_8xb8-300e_coco-face.py \ + https://download.openmmlab.com/mmpose/mmdet_pretrained/yolo-x_8xb8-300e_coco-face_13274d7c.pth \ + configs/face_2d_keypoint/topdown_heatmap/aflw/td-hm_hrnetv2-w18_8xb64-60e_aflw-256x256.py \ + https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_aflw_256x256-f2bbc62b_20210125.pth \ + --input demo/resources/ \ + --show --draw-heatmap --output-root vis_results +``` + +
+ +这段视频可以在 [Google Drive](https://drive.google.com/file/d/1kQt80t6w802b_vgVcmiV_QfcSJ3RWzmb/view?usp=sharing) 下载。 + +### 使用 Inferencer 进行 2D 脸部关键点识别推理 + +Inferencer 提供一个更便捷的推理接口,使得用户可以绕过模型的配置文件和 checkpoint 路径直接使用 model aliases ,支持包括图片路径、视频路径、图片文件夹路径和 webcams 在内的多种输入方式,例如可以这样使用: + +```shell +python demo/inferencer_demo.py tests/data/wflw \ + --pose2d face --vis-out-dir vis_results/wflw --radius 1 +``` + +该命令会对输入的 `tests/data/wflw` 下所有的图片进行推理并且把可视化结果都存入 `vis_results/wflw` 文件夹下。 + +Image 1 + +Image 2 + +除此之外, Inferencer 也支持保存预测的姿态结果。具体信息可在 [Inferencer 文档](https://mmpose.readthedocs.io/en/dev-1.x/user_guides/inference.html#inferencer-a-unified-inference-interface) 查看。 + +### 加速推理 + +对于 2D 脸部关键点预测模型,用户可以通过修改配置文件中的 `model.test_cfg.flip_test=False` 来加速,例如 [aflw_hrnetv2](../../../configs/face_2d_keypoint/topdown_heatmap/aflw/td-hm_hrnetv2-w18_8xb64-60e_aflw-256x256.py) 中的第 90 行。 diff --git a/demo/docs/zh_cn/2d_hand_demo.md b/demo/docs/zh_cn/2d_hand_demo.md index c2d80edd4e..102049d2ad 100644 --- a/demo/docs/zh_cn/2d_hand_demo.md +++ b/demo/docs/zh_cn/2d_hand_demo.md @@ -1,101 +1,101 @@ -## 2D Hand Keypoint Demo - -本节我们继续通过 demo 脚本演示对单张图片或者视频的 2D 手部关键点的识别。同样的,用户仍要确保开发环境已经安装了 3.0 版本以上的 [MMDetection](https://github.com/open-mmlab/mmdetection) 。 - -我们在 [mmdet model zoo](/demo/docs/zh_cn/mmdet_modelzoo.md#手部-bounding-box-识别模型) 提供了预训练好的手部 Bounding Box 预测模型,用户可以前往下载。 - -### 2D 手部图片关键点识别 - -```shell -python demo/topdown_demo_with_mmdet.py \ - ${MMDET_CONFIG_FILE} ${MMDET_CHECKPOINT_FILE} \ - ${MMPOSE_CONFIG_FILE} ${MMPOSE_CHECKPOINT_FILE} \ - --input ${INPUT_PATH} [--output-root ${OUTPUT_DIR}] \ - [--show] [--device ${GPU_ID or CPU}] [--save-predictions] \ - [--draw-heatmap ${DRAW_HEATMAP}] [--radius ${KPT_RADIUS}] \ - [--kpt-thr ${KPT_SCORE_THR}] [--bbox-thr ${BBOX_SCORE_THR}] -``` - -用户可以在 [model zoo](https://mmpose.readthedocs.io/zh_CN/dev-1.x/model_zoo/hand_2d_keypoint.html) 获取预训练好的关键点识别模型。 - -这里我们用 [onehand10k model](https://download.openmmlab.com/mmpose/hand/hrnetv2/hrnetv2_w18_onehand10k_256x256-30bc9c6b_20210330.pth) 来进行演示: - -```shell -python demo/topdown_demo_with_mmdet.py \ - demo/mmdetection_cfg/cascade_rcnn_x101_64x4d_fpn_1class.py \ - https://download.openmmlab.com/mmpose/mmdet_pretrained/cascade_rcnn_x101_64x4d_fpn_20e_onehand10k-dac19597_20201030.pth \ - configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_8xb64-210e_onehand10k-256x256.py \ - https://download.openmmlab.com/mmpose/hand/hrnetv2/hrnetv2_w18_onehand10k_256x256-30bc9c6b_20210330.pth \ - --input tests/data/onehand10k/9.jpg \ - --show --draw-heatmap -``` - -可视化结果如下: - -
- -如果使用了 heatmap-based 模型同时设置了 `--draw-heatmap` ,预测的热图也会跟随关键点一同可视化出来。 - -如果想本地保存可视化结果可使用如下命令: - -```shell -python demo/topdown_demo_with_mmdet.py \ - demo/mmdetection_cfg/cascade_rcnn_x101_64x4d_fpn_1class.py \ - https://download.openmmlab.com/mmpose/mmdet_pretrained/cascade_rcnn_x101_64x4d_fpn_20e_onehand10k-dac19597_20201030.pth \ - configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_8xb64-210e_onehand10k-256x256.py \ - https://download.openmmlab.com/mmpose/hand/hrnetv2/hrnetv2_w18_onehand10k_256x256-30bc9c6b_20210330.pth \ - --input tests/data/onehand10k/9.jpg \ - --output-root vis_results --show --draw-heatmap -``` - -如果想本地保存预测结果,需要添加 `--save-predictions` 。 - -如果想用 CPU 进行 demo 需添加 `--device cpu` : - -```shell -python demo/topdown_demo_with_mmdet.py \ - demo/mmdetection_cfg/cascade_rcnn_x101_64x4d_fpn_1class.py \ - https://download.openmmlab.com/mmpose/mmdet_pretrained/cascade_rcnn_x101_64x4d_fpn_20e_onehand10k-dac19597_20201030.pth \ - configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_8xb64-210e_onehand10k-256x256.py \ - https://download.openmmlab.com/mmpose/hand/hrnetv2/hrnetv2_w18_onehand10k_256x256-30bc9c6b_20210330.pth \ - --input tests/data/onehand10k/9.jpg \ - --show --draw-heatmap --device cpu -``` - -### 2D 手部视频关键点识别推理 - -视频和图片使用了同样的接口,区别在于视频推理时 `${INPUT_PATH}` 既可以是本地视频文件的路径也可以是视频文件的 **URL** 地址。 - -```shell -python demo/topdown_demo_with_mmdet.py \ - demo/mmdetection_cfg/cascade_rcnn_x101_64x4d_fpn_1class.py \ - https://download.openmmlab.com/mmpose/mmdet_pretrained/cascade_rcnn_x101_64x4d_fpn_20e_onehand10k-dac19597_20201030.pth \ - configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_8xb64-210e_onehand10k-256x256.py \ - https://download.openmmlab.com/mmpose/hand/hrnetv2/hrnetv2_w18_onehand10k_256x256-30bc9c6b_20210330.pth \ - --input demo/resources/ \ - --output-root vis_results --show --draw-heatmap -``` - -
- -这段视频可以在 [Google Drive](https://raw.githubusercontent.com/open-mmlab/mmpose/master/tests/data/nvgesture/sk_color.avi) 下载到。 - -### 使用 Inferencer 进行 2D 手部关键点识别推理 - -Inferencer 提供一个更便捷的推理接口,使得用户可以绕过模型的配置文件和 checkpoint 路径直接使用 model aliases ,支持包括图片路径、视频路径、图片文件夹路径和 webcams 在内的多种输入方式,例如可以这样使用: - -```shell -python demo/inferencer_demo.py tests/data/onehand10k \ - --pose2d hand --vis-out-dir vis_results/onehand10k \ - --bbox-thr 0.5 --kpt-thr 0.05 -``` - -该命令会对输入的 `tests/data/onehand10k` 下所有的图片进行推理并且把可视化结果都存入 `vis_results/onehand10k` 文件夹下。 - -Image 1 Image 2 Image 3 Image 4 - -除此之外, Inferencer 也支持保存预测的姿态结果。具体信息可在 [Inferencer 文档](https://mmpose.readthedocs.io/zh_CN/dev-1.x/user_guides/inference.html) 查看。 - -### 加速推理 - -对于 2D 手部关键点预测模型,用户可以通过修改配置文件中的 `model.test_cfg.flip_test=False` 来加速,如 [onehand10k_hrnetv2](../../configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_8xb64-210e_onehand10k-256x256.py#90) 所示。 +## 2D Hand Keypoint Demo + +本节我们继续通过 demo 脚本演示对单张图片或者视频的 2D 手部关键点的识别。同样的,用户仍要确保开发环境已经安装了 3.0 版本以上的 [MMDetection](https://github.com/open-mmlab/mmdetection) 。 + +我们在 [mmdet model zoo](/demo/docs/zh_cn/mmdet_modelzoo.md#手部-bounding-box-识别模型) 提供了预训练好的手部 Bounding Box 预测模型,用户可以前往下载。 + +### 2D 手部图片关键点识别 + +```shell +python demo/topdown_demo_with_mmdet.py \ + ${MMDET_CONFIG_FILE} ${MMDET_CHECKPOINT_FILE} \ + ${MMPOSE_CONFIG_FILE} ${MMPOSE_CHECKPOINT_FILE} \ + --input ${INPUT_PATH} [--output-root ${OUTPUT_DIR}] \ + [--show] [--device ${GPU_ID or CPU}] [--save-predictions] \ + [--draw-heatmap ${DRAW_HEATMAP}] [--radius ${KPT_RADIUS}] \ + [--kpt-thr ${KPT_SCORE_THR}] [--bbox-thr ${BBOX_SCORE_THR}] +``` + +用户可以在 [model zoo](https://mmpose.readthedocs.io/zh_CN/dev-1.x/model_zoo/hand_2d_keypoint.html) 获取预训练好的关键点识别模型。 + +这里我们用 [onehand10k model](https://download.openmmlab.com/mmpose/hand/hrnetv2/hrnetv2_w18_onehand10k_256x256-30bc9c6b_20210330.pth) 来进行演示: + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/cascade_rcnn_x101_64x4d_fpn_1class.py \ + https://download.openmmlab.com/mmpose/mmdet_pretrained/cascade_rcnn_x101_64x4d_fpn_20e_onehand10k-dac19597_20201030.pth \ + configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_8xb64-210e_onehand10k-256x256.py \ + https://download.openmmlab.com/mmpose/hand/hrnetv2/hrnetv2_w18_onehand10k_256x256-30bc9c6b_20210330.pth \ + --input tests/data/onehand10k/9.jpg \ + --show --draw-heatmap +``` + +可视化结果如下: + +
+ +如果使用了 heatmap-based 模型同时设置了 `--draw-heatmap` ,预测的热图也会跟随关键点一同可视化出来。 + +如果想本地保存可视化结果可使用如下命令: + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/cascade_rcnn_x101_64x4d_fpn_1class.py \ + https://download.openmmlab.com/mmpose/mmdet_pretrained/cascade_rcnn_x101_64x4d_fpn_20e_onehand10k-dac19597_20201030.pth \ + configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_8xb64-210e_onehand10k-256x256.py \ + https://download.openmmlab.com/mmpose/hand/hrnetv2/hrnetv2_w18_onehand10k_256x256-30bc9c6b_20210330.pth \ + --input tests/data/onehand10k/9.jpg \ + --output-root vis_results --show --draw-heatmap +``` + +如果想本地保存预测结果,需要添加 `--save-predictions` 。 + +如果想用 CPU 进行 demo 需添加 `--device cpu` : + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/cascade_rcnn_x101_64x4d_fpn_1class.py \ + https://download.openmmlab.com/mmpose/mmdet_pretrained/cascade_rcnn_x101_64x4d_fpn_20e_onehand10k-dac19597_20201030.pth \ + configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_8xb64-210e_onehand10k-256x256.py \ + https://download.openmmlab.com/mmpose/hand/hrnetv2/hrnetv2_w18_onehand10k_256x256-30bc9c6b_20210330.pth \ + --input tests/data/onehand10k/9.jpg \ + --show --draw-heatmap --device cpu +``` + +### 2D 手部视频关键点识别推理 + +视频和图片使用了同样的接口,区别在于视频推理时 `${INPUT_PATH}` 既可以是本地视频文件的路径也可以是视频文件的 **URL** 地址。 + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/cascade_rcnn_x101_64x4d_fpn_1class.py \ + https://download.openmmlab.com/mmpose/mmdet_pretrained/cascade_rcnn_x101_64x4d_fpn_20e_onehand10k-dac19597_20201030.pth \ + configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_8xb64-210e_onehand10k-256x256.py \ + https://download.openmmlab.com/mmpose/hand/hrnetv2/hrnetv2_w18_onehand10k_256x256-30bc9c6b_20210330.pth \ + --input demo/resources/ \ + --output-root vis_results --show --draw-heatmap +``` + +
+ +这段视频可以在 [Google Drive](https://raw.githubusercontent.com/open-mmlab/mmpose/master/tests/data/nvgesture/sk_color.avi) 下载到。 + +### 使用 Inferencer 进行 2D 手部关键点识别推理 + +Inferencer 提供一个更便捷的推理接口,使得用户可以绕过模型的配置文件和 checkpoint 路径直接使用 model aliases ,支持包括图片路径、视频路径、图片文件夹路径和 webcams 在内的多种输入方式,例如可以这样使用: + +```shell +python demo/inferencer_demo.py tests/data/onehand10k \ + --pose2d hand --vis-out-dir vis_results/onehand10k \ + --bbox-thr 0.5 --kpt-thr 0.05 +``` + +该命令会对输入的 `tests/data/onehand10k` 下所有的图片进行推理并且把可视化结果都存入 `vis_results/onehand10k` 文件夹下。 + +Image 1 Image 2 Image 3 Image 4 + +除此之外, Inferencer 也支持保存预测的姿态结果。具体信息可在 [Inferencer 文档](https://mmpose.readthedocs.io/zh_CN/dev-1.x/user_guides/inference.html) 查看。 + +### 加速推理 + +对于 2D 手部关键点预测模型,用户可以通过修改配置文件中的 `model.test_cfg.flip_test=False` 来加速,如 [onehand10k_hrnetv2](../../configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_8xb64-210e_onehand10k-256x256.py#90) 所示。 diff --git a/demo/docs/zh_cn/2d_human_pose_demo.md b/demo/docs/zh_cn/2d_human_pose_demo.md index ff6484301a..c37d787498 100644 --- a/demo/docs/zh_cn/2d_human_pose_demo.md +++ b/demo/docs/zh_cn/2d_human_pose_demo.md @@ -1,146 +1,146 @@ -## 2D Human Pose Demo - -本节我们继续使用 demo 脚本演示 2D 人体关键点的识别。同样的,用户仍要确保开发环境已经安装了 3.0 版本以上的 [mmdet](https://github.com/open-mmlab/mmdetection) 。 - -### 2D 人体姿态 Top-Down 图片检测 - -#### 使用整张图片作为输入进行检测 - -此时输入的整张图片会被当作 bounding box 使用。 - -```shell -python demo/image_demo.py \ - ${IMG_FILE} ${MMPOSE_CONFIG_FILE} ${MMPOSE_CHECKPOINT_FILE} \ - --out-file ${OUTPUT_FILE} \ - [--device ${GPU_ID or CPU}] \ - [--draw_heatmap] -``` - -如果使用了 heatmap-based 模型同时设置了 `--draw-heatmap` ,预测的热图也会跟随关键点一同可视化出来。 - -用户可以在 [model zoo](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo/body_2d_keypoint.html) 获取预训练好的关键点识别模型。 - -这里我们用 [coco model](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_256x192-b9e0b3ab_20200708.pth) 来进行演示: - -```shell -python demo/image_demo.py \ - tests/data/coco/000000000785.jpg \ - configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192.py \ - https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_256x192-b9e0b3ab_20200708.pth \ - --out-file vis_results.jpg \ - --draw-heatmap -``` - -使用 CPU 推理: - -```shell -python demo/image_demo.py \ - tests/data/coco/000000000785.jpg \ - configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192.py \ - https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_256x192-b9e0b3ab_20200708.pth \ - --out-file vis_results.jpg \ - --draw-heatmap \ - --device=cpu -``` - -可视化结果如下: - -
- -#### 使用 MMDet 做人体 bounding box 检测 - -使用 MMDet 进行识别的命令如下所示: - -```shell -python demo/topdown_demo_with_mmdet.py \ - ${MMDET_CONFIG_FILE} ${MMDET_CHECKPOINT_FILE} \ - ${MMPOSE_CONFIG_FILE} ${MMPOSE_CHECKPOINT_FILE} \ - --input ${INPUT_PATH} \ - [--output-root ${OUTPUT_DIR}] [--save-predictions] \ - [--show] [--draw-heatmap] [--device ${GPU_ID or CPU}] \ - [--bbox-thr ${BBOX_SCORE_THR}] [--kpt-thr ${KPT_SCORE_THR}] -``` - -结合我们的具体例子: - -```shell -python demo/topdown_demo_with_mmdet.py \ - demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py \ - https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ - configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py \ - https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_coco_256x192-c78dce93_20200708.pth \ - --input tests/data/coco/000000197388.jpg --show --draw-heatmap \ - --output-root vis_results/ -``` - -可视化结果如下: - -
- -想要本地保存识别结果,用户需要加上 `--save-predictions` 。 - -### 2D 人体姿态 Top-Down 视频检测 - -我们的脚本同样支持视频作为输入,由 MMDet 完成人体检测后 MMPose 完成 Top-Down 的姿态预估,视频推理时 `${INPUT_PATH}` 既可以是本地视频文件的路径也可以是视频文件的 **URL** 地址。 - -例如: - -```shell -python demo/topdown_demo_with_mmdet.py \ - demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py \ - https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ - configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py \ - https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192-81c58e40_20220909.pth \ - --input tests/data/posetrack18/videos/000001_mpiinew_test/000001_mpiinew_test.mp4 \ - --output-root=vis_results/demo --show --draw-heatmap -``` - -### 2D 人体姿态 Bottom-Up 图片和视频识别检测 - -除了 Top-Down ,我们也支持 Bottom-Up 不依赖人体识别器的人体姿态预估识别,使用方式如下: - -```shell -python demo/bottomup_demo.py \ - ${MMPOSE_CONFIG_FILE} ${MMPOSE_CHECKPOINT_FILE} \ - --input ${INPUT_PATH} \ - [--output-root ${OUTPUT_DIR}] [--save-predictions] \ - [--show] [--device ${GPU_ID or CPU}] \ - [--kpt-thr ${KPT_SCORE_THR}] -``` - -结合具体示例如下: - -```shell -python demo/bottomup_demo.py \ - configs/body_2d_keypoint/dekr/coco/dekr_hrnet-w32_8xb10-140e_coco-512x512.py \ - https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/dekr/coco/dekr_hrnet-w32_8xb10-140e_coco-512x512_ac7c17bf-20221228.pth \ - --input tests/data/coco/000000197388.jpg --output-root=vis_results \ - --show --save-predictions -``` - -其可视化结果如图所示: - -
- -### 使用 Inferencer 进行 2D 人体姿态识别检测 - -Inferencer 提供一个更便捷的推理接口,使得用户可以绕过模型的配置文件和 checkpoint 路径直接使用 model aliases ,支持包括图片路径、视频路径、图片文件夹路径和 webcams 在内的多种输入方式,例如可以这样使用: - -```shell -python demo/inferencer_demo.py \ - tests/data/posetrack18/videos/000001_mpiinew_test/000001_mpiinew_test.mp4 \ - --pose2d human --vis-out-dir vis_results/posetrack18 -``` - -该命令会对输入的 `tests/data/posetrack18` 下的视频进行推理并且把可视化结果存入 `vis_results/posetrack18` 文件夹下。 - -Image 1 - -Inferencer 支持保存姿态的检测结果,具体的使用可参考 [inferencer document](https://mmpose.readthedocs.io/zh_CN/dev-1.x/user_guides/inference.html) 。 - -### 加速推理 - -对于 top-down 结构的模型,用户可以通过修改配置文件来加速,更多具体例子可以参考: - -1. 设置 `model.test_cfg.flip_test=False`,如 [topdown-res50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-256x192.py#L56) 所示。 -2. 使用更快的人体 bounding box 检测器,可参考 [MMDetection](https://mmdetection.readthedocs.io/zh_CN/3.x/model_zoo.html) 。 +## 2D Human Pose Demo + +本节我们继续使用 demo 脚本演示 2D 人体关键点的识别。同样的,用户仍要确保开发环境已经安装了 3.0 版本以上的 [mmdet](https://github.com/open-mmlab/mmdetection) 。 + +### 2D 人体姿态 Top-Down 图片检测 + +#### 使用整张图片作为输入进行检测 + +此时输入的整张图片会被当作 bounding box 使用。 + +```shell +python demo/image_demo.py \ + ${IMG_FILE} ${MMPOSE_CONFIG_FILE} ${MMPOSE_CHECKPOINT_FILE} \ + --out-file ${OUTPUT_FILE} \ + [--device ${GPU_ID or CPU}] \ + [--draw_heatmap] +``` + +如果使用了 heatmap-based 模型同时设置了 `--draw-heatmap` ,预测的热图也会跟随关键点一同可视化出来。 + +用户可以在 [model zoo](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo/body_2d_keypoint.html) 获取预训练好的关键点识别模型。 + +这里我们用 [coco model](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_256x192-b9e0b3ab_20200708.pth) 来进行演示: + +```shell +python demo/image_demo.py \ + tests/data/coco/000000000785.jpg \ + configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192.py \ + https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_256x192-b9e0b3ab_20200708.pth \ + --out-file vis_results.jpg \ + --draw-heatmap +``` + +使用 CPU 推理: + +```shell +python demo/image_demo.py \ + tests/data/coco/000000000785.jpg \ + configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192.py \ + https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_256x192-b9e0b3ab_20200708.pth \ + --out-file vis_results.jpg \ + --draw-heatmap \ + --device=cpu +``` + +可视化结果如下: + +
+ +#### 使用 MMDet 做人体 bounding box 检测 + +使用 MMDet 进行识别的命令如下所示: + +```shell +python demo/topdown_demo_with_mmdet.py \ + ${MMDET_CONFIG_FILE} ${MMDET_CHECKPOINT_FILE} \ + ${MMPOSE_CONFIG_FILE} ${MMPOSE_CHECKPOINT_FILE} \ + --input ${INPUT_PATH} \ + [--output-root ${OUTPUT_DIR}] [--save-predictions] \ + [--show] [--draw-heatmap] [--device ${GPU_ID or CPU}] \ + [--bbox-thr ${BBOX_SCORE_THR}] [--kpt-thr ${KPT_SCORE_THR}] +``` + +结合我们的具体例子: + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py \ + https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ + configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py \ + https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_coco_256x192-c78dce93_20200708.pth \ + --input tests/data/coco/000000197388.jpg --show --draw-heatmap \ + --output-root vis_results/ +``` + +可视化结果如下: + +
+ +想要本地保存识别结果,用户需要加上 `--save-predictions` 。 + +### 2D 人体姿态 Top-Down 视频检测 + +我们的脚本同样支持视频作为输入,由 MMDet 完成人体检测后 MMPose 完成 Top-Down 的姿态预估,视频推理时 `${INPUT_PATH}` 既可以是本地视频文件的路径也可以是视频文件的 **URL** 地址。 + +例如: + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py \ + https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ + configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py \ + https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192-81c58e40_20220909.pth \ + --input tests/data/posetrack18/videos/000001_mpiinew_test/000001_mpiinew_test.mp4 \ + --output-root=vis_results/demo --show --draw-heatmap +``` + +### 2D 人体姿态 Bottom-Up 图片和视频识别检测 + +除了 Top-Down ,我们也支持 Bottom-Up 不依赖人体识别器的人体姿态预估识别,使用方式如下: + +```shell +python demo/bottomup_demo.py \ + ${MMPOSE_CONFIG_FILE} ${MMPOSE_CHECKPOINT_FILE} \ + --input ${INPUT_PATH} \ + [--output-root ${OUTPUT_DIR}] [--save-predictions] \ + [--show] [--device ${GPU_ID or CPU}] \ + [--kpt-thr ${KPT_SCORE_THR}] +``` + +结合具体示例如下: + +```shell +python demo/bottomup_demo.py \ + configs/body_2d_keypoint/dekr/coco/dekr_hrnet-w32_8xb10-140e_coco-512x512.py \ + https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/dekr/coco/dekr_hrnet-w32_8xb10-140e_coco-512x512_ac7c17bf-20221228.pth \ + --input tests/data/coco/000000197388.jpg --output-root=vis_results \ + --show --save-predictions +``` + +其可视化结果如图所示: + +
+ +### 使用 Inferencer 进行 2D 人体姿态识别检测 + +Inferencer 提供一个更便捷的推理接口,使得用户可以绕过模型的配置文件和 checkpoint 路径直接使用 model aliases ,支持包括图片路径、视频路径、图片文件夹路径和 webcams 在内的多种输入方式,例如可以这样使用: + +```shell +python demo/inferencer_demo.py \ + tests/data/posetrack18/videos/000001_mpiinew_test/000001_mpiinew_test.mp4 \ + --pose2d human --vis-out-dir vis_results/posetrack18 +``` + +该命令会对输入的 `tests/data/posetrack18` 下的视频进行推理并且把可视化结果存入 `vis_results/posetrack18` 文件夹下。 + +Image 1 + +Inferencer 支持保存姿态的检测结果,具体的使用可参考 [inferencer document](https://mmpose.readthedocs.io/zh_CN/dev-1.x/user_guides/inference.html) 。 + +### 加速推理 + +对于 top-down 结构的模型,用户可以通过修改配置文件来加速,更多具体例子可以参考: + +1. 设置 `model.test_cfg.flip_test=False`,如 [topdown-res50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-256x192.py#L56) 所示。 +2. 使用更快的人体 bounding box 检测器,可参考 [MMDetection](https://mmdetection.readthedocs.io/zh_CN/3.x/model_zoo.html) 。 diff --git a/demo/docs/zh_cn/2d_wholebody_pose_demo.md b/demo/docs/zh_cn/2d_wholebody_pose_demo.md index 8c901d47fa..221d16865a 100644 --- a/demo/docs/zh_cn/2d_wholebody_pose_demo.md +++ b/demo/docs/zh_cn/2d_wholebody_pose_demo.md @@ -1,108 +1,108 @@ -## 2D Human Whole-Body Pose Demo - -### 2D 人体全身姿态 Top-Down 图片识别 - -#### 使用整张图片作为输入进行检测 - -此时输入的整张图片会被当作 bounding box 使用。 - -```shell -python demo/image_demo.py \ - ${IMG_FILE} ${MMPOSE_CONFIG_FILE} ${MMPOSE_CHECKPOINT_FILE} \ - --out-file ${OUTPUT_FILE} \ - [--device ${GPU_ID or CPU}] \ - [--draw_heatmap] -``` - -用户可以在 [model zoo](https://mmpose.readthedocs.io/zh_CN/dev-1.x/model_zoo/2d_wholebody_keypoint.html) 获取预训练好的关键点识别模型。 - -这里我们用 [coco-wholebody_vipnas_res50_dark](https://download.openmmlab.com/mmpose/top_down/vipnas/vipnas_res50_wholebody_256x192_dark-67c0ce35_20211112.pth) 来进行演示: - -```shell -python demo/image_demo.py \ - tests/data/coco/000000000785.jpg \ - configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-res50_dark-8xb64-210e_coco-wholebody-256x192.py \ - https://download.openmmlab.com/mmpose/top_down/vipnas/vipnas_res50_wholebody_256x192_dark-67c0ce35_20211112.pth \ - --out-file vis_results.jpg -``` - -使用 CPU 推理: - -```shell -python demo/image_demo.py \ - tests/data/coco/000000000785.jpg \ - configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-res50_dark-8xb64-210e_coco-wholebody-256x192.py \ - https://download.openmmlab.com/mmpose/top_down/vipnas/vipnas_res50_wholebody_256x192_dark-67c0ce35_20211112.pth \ - --out-file vis_results.jpg \ - --device=cpu -``` - -#### 使用 MMDet 进行人体 bounding box 检测 - -使用 MMDet 进行识别的命令格式如下: - -```shell -python demo/topdown_demo_with_mmdet.py \ - ${MMDET_CONFIG_FILE} ${MMDET_CHECKPOINT_FILE} \ - ${MMPOSE_CONFIG_FILE} ${MMPOSE_CHECKPOINT_FILE} \ - --input ${INPUT_PATH} \ - [--output-root ${OUTPUT_DIR}] [--save-predictions] \ - [--show] [--draw-heatmap] [--device ${GPU_ID or CPU}] \ - [--bbox-thr ${BBOX_SCORE_THR}] [--kpt-thr ${KPT_SCORE_THR}] -``` - -具体可例如: - -```shell -python demo/topdown_demo_with_mmdet.py \ - demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py \ - https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ - configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_dark-8xb32-210e_coco-wholebody-384x288.py \ - https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_wholebody_384x288_dark-f5726563_20200918.pth \ - --input tests/data/coco/000000196141.jpg \ - --output-root vis_results/ --show -``` - -想要本地保存识别结果,用户需要加上 `--save-predictions` 。 - -### 2D 人体全身姿态 Top-Down 视频识别检测 - -我们的脚本同样支持视频作为输入,由 MMDet 完成人体检测后 MMPose 完成 Top-Down 的姿态预估。 - -例如: - -```shell -python demo/topdown_demo_with_mmdet.py \ - demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py \ - https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ - configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_dark-8xb32-210e_coco-wholebody-384x288.py \ - https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_wholebody_384x288_dark-f5726563_20200918.pth \ - --input https://user-images.githubusercontent.com/87690686/137440639-fb08603d-9a35-474e-b65f-46b5c06b68d6.mp4 \ - --output-root vis_results/ --show -``` - -可视化结果如下: - -
- -### 使用 Inferencer 进行 2D 人体全身姿态识别 - -Inferencer 提供一个更便捷的推理接口,使得用户可以绕过模型的配置文件和 checkpoint 路径直接使用 model aliases ,支持包括图片路径、视频路径、图片文件夹路径和 webcams 在内的多种输入方式,例如可以这样使用: - -```shell -python demo/inferencer_demo.py tests/data/crowdpose \ - --pose2d wholebody --vis-out-dir vis_results/crowdpose -``` - -该命令会对输入的 `tests/data/crowdpose` 下所有图片进行推理并且把可视化结果存入 `vis_results/crowdpose` 文件夹下。 - -Image 1 Image 2 - -Inferencer 支持保存姿态的检测结果,具体的使用可参考 [Inferencer 文档](https://mmpose.readthedocs.io/zh_CN/dev-1.x/user_guides/#inferencer-a-unified-inference-interface) 。 - -### 加速推理 - -对于 top-down 结构的模型,用户可以通过修改配置文件来加速,更多具体例子可以参考: - -1. 设置 `model.test_cfg.flip_test=False`,用户可参考 [pose_hrnet_w48_dark+](/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_dark-8xb32-210e_coco-wholebody-384x288.py#L90) 。 -2. 使用更快的人体 bounding box 检测器,如 [MMDetection](https://mmdetection.readthedocs.io/zh_CN/3.x/model_zoo.html) 。 +## 2D Human Whole-Body Pose Demo + +### 2D 人体全身姿态 Top-Down 图片识别 + +#### 使用整张图片作为输入进行检测 + +此时输入的整张图片会被当作 bounding box 使用。 + +```shell +python demo/image_demo.py \ + ${IMG_FILE} ${MMPOSE_CONFIG_FILE} ${MMPOSE_CHECKPOINT_FILE} \ + --out-file ${OUTPUT_FILE} \ + [--device ${GPU_ID or CPU}] \ + [--draw_heatmap] +``` + +用户可以在 [model zoo](https://mmpose.readthedocs.io/zh_CN/dev-1.x/model_zoo/2d_wholebody_keypoint.html) 获取预训练好的关键点识别模型。 + +这里我们用 [coco-wholebody_vipnas_res50_dark](https://download.openmmlab.com/mmpose/top_down/vipnas/vipnas_res50_wholebody_256x192_dark-67c0ce35_20211112.pth) 来进行演示: + +```shell +python demo/image_demo.py \ + tests/data/coco/000000000785.jpg \ + configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-res50_dark-8xb64-210e_coco-wholebody-256x192.py \ + https://download.openmmlab.com/mmpose/top_down/vipnas/vipnas_res50_wholebody_256x192_dark-67c0ce35_20211112.pth \ + --out-file vis_results.jpg +``` + +使用 CPU 推理: + +```shell +python demo/image_demo.py \ + tests/data/coco/000000000785.jpg \ + configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-res50_dark-8xb64-210e_coco-wholebody-256x192.py \ + https://download.openmmlab.com/mmpose/top_down/vipnas/vipnas_res50_wholebody_256x192_dark-67c0ce35_20211112.pth \ + --out-file vis_results.jpg \ + --device=cpu +``` + +#### 使用 MMDet 进行人体 bounding box 检测 + +使用 MMDet 进行识别的命令格式如下: + +```shell +python demo/topdown_demo_with_mmdet.py \ + ${MMDET_CONFIG_FILE} ${MMDET_CHECKPOINT_FILE} \ + ${MMPOSE_CONFIG_FILE} ${MMPOSE_CHECKPOINT_FILE} \ + --input ${INPUT_PATH} \ + [--output-root ${OUTPUT_DIR}] [--save-predictions] \ + [--show] [--draw-heatmap] [--device ${GPU_ID or CPU}] \ + [--bbox-thr ${BBOX_SCORE_THR}] [--kpt-thr ${KPT_SCORE_THR}] +``` + +具体可例如: + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py \ + https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ + configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_dark-8xb32-210e_coco-wholebody-384x288.py \ + https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_wholebody_384x288_dark-f5726563_20200918.pth \ + --input tests/data/coco/000000196141.jpg \ + --output-root vis_results/ --show +``` + +想要本地保存识别结果,用户需要加上 `--save-predictions` 。 + +### 2D 人体全身姿态 Top-Down 视频识别检测 + +我们的脚本同样支持视频作为输入,由 MMDet 完成人体检测后 MMPose 完成 Top-Down 的姿态预估。 + +例如: + +```shell +python demo/topdown_demo_with_mmdet.py \ + demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py \ + https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ + configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_dark-8xb32-210e_coco-wholebody-384x288.py \ + https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_wholebody_384x288_dark-f5726563_20200918.pth \ + --input https://user-images.githubusercontent.com/87690686/137440639-fb08603d-9a35-474e-b65f-46b5c06b68d6.mp4 \ + --output-root vis_results/ --show +``` + +可视化结果如下: + +
+ +### 使用 Inferencer 进行 2D 人体全身姿态识别 + +Inferencer 提供一个更便捷的推理接口,使得用户可以绕过模型的配置文件和 checkpoint 路径直接使用 model aliases ,支持包括图片路径、视频路径、图片文件夹路径和 webcams 在内的多种输入方式,例如可以这样使用: + +```shell +python demo/inferencer_demo.py tests/data/crowdpose \ + --pose2d wholebody --vis-out-dir vis_results/crowdpose +``` + +该命令会对输入的 `tests/data/crowdpose` 下所有图片进行推理并且把可视化结果存入 `vis_results/crowdpose` 文件夹下。 + +Image 1 Image 2 + +Inferencer 支持保存姿态的检测结果,具体的使用可参考 [Inferencer 文档](https://mmpose.readthedocs.io/zh_CN/dev-1.x/user_guides/#inferencer-a-unified-inference-interface) 。 + +### 加速推理 + +对于 top-down 结构的模型,用户可以通过修改配置文件来加速,更多具体例子可以参考: + +1. 设置 `model.test_cfg.flip_test=False`,用户可参考 [pose_hrnet_w48_dark+](/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_dark-8xb32-210e_coco-wholebody-384x288.py#L90) 。 +2. 使用更快的人体 bounding box 检测器,如 [MMDetection](https://mmdetection.readthedocs.io/zh_CN/3.x/model_zoo.html) 。 diff --git a/demo/docs/zh_cn/3d_human_pose_demo.md b/demo/docs/zh_cn/3d_human_pose_demo.md index 6ed9dd67de..0682543db3 100644 --- a/demo/docs/zh_cn/3d_human_pose_demo.md +++ b/demo/docs/zh_cn/3d_human_pose_demo.md @@ -1 +1 @@ -coming soon +coming soon diff --git a/demo/docs/zh_cn/mmdet_modelzoo.md b/demo/docs/zh_cn/mmdet_modelzoo.md index aabfb1768d..789a97dc95 100644 --- a/demo/docs/zh_cn/mmdet_modelzoo.md +++ b/demo/docs/zh_cn/mmdet_modelzoo.md @@ -1,42 +1,42 @@ -## Pre-trained Detection Models - -### 人体 Bounding Box 检测模型 - -MMDetection 提供了基于 COCO 的包括 `person` 在内的 80 个类别的预训练模型,用户可前往 [MMDetection Model Zoo](https://mmdetection.readthedocs.io/zh_CN/3.x/model_zoo.html) 下载并将其用作人体 bounding box 识别模型。 - -### 手部 Bounding Box 检测模型 - -对于手部 bounding box 检测模型,我们提供了一个通过 MMDetection 基于 OneHand10K 数据库训练的模型。 - -#### 基于 OneHand10K 测试集的测试结果 - -| Arch | Box AP | ckpt | log | -| :---------------------------------------------------------------- | :----: | :---------------------------------------------------------------: | :--------------------------------------------------------------: | -| [Cascade_R-CNN X-101-64x4d-FPN-1class](/demo/mmdetection_cfg/cascade_rcnn_x101_64x4d_fpn_1class.py) | 0.817 | [ckpt](https://download.openmmlab.com/mmpose/mmdet_pretrained/cascade_rcnn_x101_64x4d_fpn_20e_onehand10k-dac19597_20201030.pth) | [log](https://download.openmmlab.com/mmpose/mmdet_pretrained/cascade_rcnn_x101_64x4d_fpn_20e_onehand10k_20201030.log.json) | - -### 脸部 Bounding Box 检测模型 - -对于脸部 bounding box 检测模型,我们提供了一个通过 MMDetection 基于 COCO-Face 数据库训练的 YOLOX 检测器。 - -#### 基于 COCO-face 测试集的测试结果 - -| Arch | Box AP | ckpt | -| :-------------------------------------------------------------- | :----: | :----------------------------------------------------------------------------------------------------: | -| [YOLOX-s](/demo/mmdetection_cfg/yolox-s_8xb8-300e_coco-face.py) | 0.408 | [ckpt](https://download.openmmlab.com/mmpose/mmdet_pretrained/yolo-x_8xb8-300e_coco-face_13274d7c.pth) | - -### 动物 Bounding Box 检测模型 - -#### COCO animals - -COCO 数据集内包括了 10 种常见的 `animal` 类型: - -(14: 'bird', 15: 'cat', 16: 'dog', 17: 'horse', 18: 'sheep', 19: 'cow', 20: 'elephant', 21: 'bear', 22: 'zebra', 23: 'giraffe') 。 - -用户如果需要使用以上类别的动物检测模型,可以前往 [MMDetection Model Zoo](https://mmdetection.readthedocs.io/zh_CN/3.x/model_zoo.html) 下载。 - -#### 基于 MacaquePose 测试集的测试结果 - -| Arch | Box AP | ckpt | log | -| :---------------------------------------------------------------- | :----: | :---------------------------------------------------------------: | :--------------------------------------------------------------: | -| [Faster_R-CNN_Res50-FPN-1class](/demo/mmdetection_cfg/faster_rcnn_r50_fpn_1class.py) | 0.840 | [ckpt](https://download.openmmlab.com/mmpose/mmdet_pretrained/faster_rcnn_r50_fpn_1x_macaque-f64f2812_20210409.pth) | [log](https://download.openmmlab.com/mmpose/mmdet_pretrained/faster_rcnn_r50_fpn_1x_macaque_20210409.log.json) | -| [Cascade_R-CNN X-101-64x4d-FPN-1class](/demo/mmdetection_cfg/cascade_rcnn_x101_64x4d_fpn_1class.py) | 0.879 | [ckpt](https://download.openmmlab.com/mmpose/mmdet_pretrained/cascade_rcnn_x101_64x4d_fpn_20e_macaque-e45e36f5_20210409.pth) | [log](https://download.openmmlab.com/mmpose/mmdet_pretrained/cascade_rcnn_x101_64x4d_fpn_20e_macaque_20210409.log.json) | +## Pre-trained Detection Models + +### 人体 Bounding Box 检测模型 + +MMDetection 提供了基于 COCO 的包括 `person` 在内的 80 个类别的预训练模型,用户可前往 [MMDetection Model Zoo](https://mmdetection.readthedocs.io/zh_CN/3.x/model_zoo.html) 下载并将其用作人体 bounding box 识别模型。 + +### 手部 Bounding Box 检测模型 + +对于手部 bounding box 检测模型,我们提供了一个通过 MMDetection 基于 OneHand10K 数据库训练的模型。 + +#### 基于 OneHand10K 测试集的测试结果 + +| Arch | Box AP | ckpt | log | +| :---------------------------------------------------------------- | :----: | :---------------------------------------------------------------: | :--------------------------------------------------------------: | +| [Cascade_R-CNN X-101-64x4d-FPN-1class](/demo/mmdetection_cfg/cascade_rcnn_x101_64x4d_fpn_1class.py) | 0.817 | [ckpt](https://download.openmmlab.com/mmpose/mmdet_pretrained/cascade_rcnn_x101_64x4d_fpn_20e_onehand10k-dac19597_20201030.pth) | [log](https://download.openmmlab.com/mmpose/mmdet_pretrained/cascade_rcnn_x101_64x4d_fpn_20e_onehand10k_20201030.log.json) | + +### 脸部 Bounding Box 检测模型 + +对于脸部 bounding box 检测模型,我们提供了一个通过 MMDetection 基于 COCO-Face 数据库训练的 YOLOX 检测器。 + +#### 基于 COCO-face 测试集的测试结果 + +| Arch | Box AP | ckpt | +| :-------------------------------------------------------------- | :----: | :----------------------------------------------------------------------------------------------------: | +| [YOLOX-s](/demo/mmdetection_cfg/yolox-s_8xb8-300e_coco-face.py) | 0.408 | [ckpt](https://download.openmmlab.com/mmpose/mmdet_pretrained/yolo-x_8xb8-300e_coco-face_13274d7c.pth) | + +### 动物 Bounding Box 检测模型 + +#### COCO animals + +COCO 数据集内包括了 10 种常见的 `animal` 类型: + +(14: 'bird', 15: 'cat', 16: 'dog', 17: 'horse', 18: 'sheep', 19: 'cow', 20: 'elephant', 21: 'bear', 22: 'zebra', 23: 'giraffe') 。 + +用户如果需要使用以上类别的动物检测模型,可以前往 [MMDetection Model Zoo](https://mmdetection.readthedocs.io/zh_CN/3.x/model_zoo.html) 下载。 + +#### 基于 MacaquePose 测试集的测试结果 + +| Arch | Box AP | ckpt | log | +| :---------------------------------------------------------------- | :----: | :---------------------------------------------------------------: | :--------------------------------------------------------------: | +| [Faster_R-CNN_Res50-FPN-1class](/demo/mmdetection_cfg/faster_rcnn_r50_fpn_1class.py) | 0.840 | [ckpt](https://download.openmmlab.com/mmpose/mmdet_pretrained/faster_rcnn_r50_fpn_1x_macaque-f64f2812_20210409.pth) | [log](https://download.openmmlab.com/mmpose/mmdet_pretrained/faster_rcnn_r50_fpn_1x_macaque_20210409.log.json) | +| [Cascade_R-CNN X-101-64x4d-FPN-1class](/demo/mmdetection_cfg/cascade_rcnn_x101_64x4d_fpn_1class.py) | 0.879 | [ckpt](https://download.openmmlab.com/mmpose/mmdet_pretrained/cascade_rcnn_x101_64x4d_fpn_20e_macaque-e45e36f5_20210409.pth) | [log](https://download.openmmlab.com/mmpose/mmdet_pretrained/cascade_rcnn_x101_64x4d_fpn_20e_macaque_20210409.log.json) | diff --git a/demo/docs/zh_cn/webcam_api_demo.md b/demo/docs/zh_cn/webcam_api_demo.md index 66099c9ca6..914f5d4286 100644 --- a/demo/docs/zh_cn/webcam_api_demo.md +++ b/demo/docs/zh_cn/webcam_api_demo.md @@ -1,30 +1,30 @@ -## 摄像头推理 - -从版本 v1.1.0 开始,原来的摄像头 API 已被弃用。用户现在可以选择使用推理器(Inferencer)或 Demo 脚本从摄像头读取的视频中进行姿势估计。 - -### 使用推理器进行摄像头推理 - -用户可以通过执行以下命令来利用 MMPose Inferencer 对摄像头输入进行人体姿势估计: - -```shell -python demo/inferencer_demo.py webcam --pose2d 'human' -``` - -有关推理器的参数详细信息,请参阅 [推理器文档](/docs/en/user_guides/inference.md)。 - -### 使用 Demo 脚本进行摄像头推理 - -除了 `demo/image_demo.py` 之外,所有的 Demo 脚本都支持摄像头输入。 - -以 `demo/topdown_demo_with_mmdet.py` 为例,用户可以通过在命令中指定 **`--input webcam`** 来使用该脚本对摄像头输入进行推理: - -```shell -# inference with webcam -python demo/topdown_demo_with_mmdet.py \ - projects/rtmpose/rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py \ - https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_nano_8xb32-100e_coco-obj365-person-05d8511e.pth \ - projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py \ - https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.pth \ - --input webcam \ - --show -``` +## 摄像头推理 + +从版本 v1.1.0 开始,原来的摄像头 API 已被弃用。用户现在可以选择使用推理器(Inferencer)或 Demo 脚本从摄像头读取的视频中进行姿势估计。 + +### 使用推理器进行摄像头推理 + +用户可以通过执行以下命令来利用 MMPose Inferencer 对摄像头输入进行人体姿势估计: + +```shell +python demo/inferencer_demo.py webcam --pose2d 'human' +``` + +有关推理器的参数详细信息,请参阅 [推理器文档](/docs/en/user_guides/inference.md)。 + +### 使用 Demo 脚本进行摄像头推理 + +除了 `demo/image_demo.py` 之外,所有的 Demo 脚本都支持摄像头输入。 + +以 `demo/topdown_demo_with_mmdet.py` 为例,用户可以通过在命令中指定 **`--input webcam`** 来使用该脚本对摄像头输入进行推理: + +```shell +# inference with webcam +python demo/topdown_demo_with_mmdet.py \ + projects/rtmpose/rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_nano_8xb32-100e_coco-obj365-person-05d8511e.pth \ + projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.pth \ + --input webcam \ + --show +``` diff --git a/demo/image_demo.py b/demo/image_demo.py index bfbc808b1e..4e00388810 100644 --- a/demo/image_demo.py +++ b/demo/image_demo.py @@ -1,105 +1,105 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from argparse import ArgumentParser - -from mmcv.image import imread - -from mmpose.apis import inference_topdown, init_model -from mmpose.registry import VISUALIZERS -from mmpose.structures import merge_data_samples - - -def parse_args(): - parser = ArgumentParser() - parser.add_argument('img', help='Image file') - parser.add_argument('config', help='Config file') - parser.add_argument('checkpoint', help='Checkpoint file') - parser.add_argument('--out-file', default=None, help='Path to output file') - parser.add_argument( - '--device', default='cuda:0', help='Device used for inference') - parser.add_argument( - '--draw-heatmap', - action='store_true', - help='Visualize the predicted heatmap') - parser.add_argument( - '--show-kpt-idx', - action='store_true', - default=False, - help='Whether to show the index of keypoints') - parser.add_argument( - '--skeleton-style', - default='mmpose', - type=str, - choices=['mmpose', 'openpose'], - help='Skeleton style selection') - parser.add_argument( - '--kpt-thr', - type=float, - default=0.3, - help='Visualizing keypoint thresholds') - parser.add_argument( - '--radius', - type=int, - default=3, - help='Keypoint radius for visualization') - parser.add_argument( - '--thickness', - type=int, - default=1, - help='Link thickness for visualization') - parser.add_argument( - '--alpha', type=float, default=0.8, help='The transparency of bboxes') - parser.add_argument( - '--show', - action='store_true', - default=False, - help='whether to show img') - args = parser.parse_args() - return args - - -def main(): - args = parse_args() - - # build the model from a config file and a checkpoint file - if args.draw_heatmap: - cfg_options = dict(model=dict(test_cfg=dict(output_heatmaps=True))) - else: - cfg_options = None - - model = init_model( - args.config, - args.checkpoint, - device=args.device, - cfg_options=cfg_options) - - # init visualizer - model.cfg.visualizer.radius = args.radius - model.cfg.visualizer.alpha = args.alpha - model.cfg.visualizer.line_width = args.thickness - - visualizer = VISUALIZERS.build(model.cfg.visualizer) - visualizer.set_dataset_meta( - model.dataset_meta, skeleton_style=args.skeleton_style) - - # inference a single image - batch_results = inference_topdown(model, args.img) - results = merge_data_samples(batch_results) - - # show the results - img = imread(args.img, channel_order='rgb') - visualizer.add_datasample( - 'result', - img, - data_sample=results, - draw_gt=False, - draw_bbox=True, - kpt_thr=args.kpt_thr, - draw_heatmap=args.draw_heatmap, - show_kpt_idx=args.show_kpt_idx, - skeleton_style=args.skeleton_style, - show=args.show, - out_file=args.out_file) - - -if __name__ == '__main__': - main() +# Copyright (c) OpenMMLab. All rights reserved. +from argparse import ArgumentParser + +from mmcv.image import imread + +from mmpose.apis import inference_topdown, init_model +from mmpose.registry import VISUALIZERS +from mmpose.structures import merge_data_samples + + +def parse_args(): + parser = ArgumentParser() + parser.add_argument('img', help='Image file') + parser.add_argument('config', help='Config file') + parser.add_argument('checkpoint', help='Checkpoint file') + parser.add_argument('--out-file', default=None, help='Path to output file') + parser.add_argument( + '--device', default='cuda:0', help='Device used for inference') + parser.add_argument( + '--draw-heatmap', + action='store_true', + help='Visualize the predicted heatmap') + parser.add_argument( + '--show-kpt-idx', + action='store_true', + default=False, + help='Whether to show the index of keypoints') + parser.add_argument( + '--skeleton-style', + default='mmpose', + type=str, + choices=['mmpose', 'openpose'], + help='Skeleton style selection') + parser.add_argument( + '--kpt-thr', + type=float, + default=0.3, + help='Visualizing keypoint thresholds') + parser.add_argument( + '--radius', + type=int, + default=3, + help='Keypoint radius for visualization') + parser.add_argument( + '--thickness', + type=int, + default=1, + help='Link thickness for visualization') + parser.add_argument( + '--alpha', type=float, default=0.8, help='The transparency of bboxes') + parser.add_argument( + '--show', + action='store_true', + default=False, + help='whether to show img') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + + # build the model from a config file and a checkpoint file + if args.draw_heatmap: + cfg_options = dict(model=dict(test_cfg=dict(output_heatmaps=True))) + else: + cfg_options = None + + model = init_model( + args.config, + args.checkpoint, + device=args.device, + cfg_options=cfg_options) + + # init visualizer + model.cfg.visualizer.radius = args.radius + model.cfg.visualizer.alpha = args.alpha + model.cfg.visualizer.line_width = args.thickness + + visualizer = VISUALIZERS.build(model.cfg.visualizer) + visualizer.set_dataset_meta( + model.dataset_meta, skeleton_style=args.skeleton_style) + + # inference a single image + batch_results = inference_topdown(model, args.img) + results = merge_data_samples(batch_results) + + # show the results + img = imread(args.img, channel_order='rgb') + visualizer.add_datasample( + 'result', + img, + data_sample=results, + draw_gt=False, + draw_bbox=True, + kpt_thr=args.kpt_thr, + draw_heatmap=args.draw_heatmap, + show_kpt_idx=args.show_kpt_idx, + skeleton_style=args.skeleton_style, + show=args.show, + out_file=args.out_file) + + +if __name__ == '__main__': + main() diff --git a/demo/inferencer_demo.py b/demo/inferencer_demo.py index b91e91f74b..5b02f7277e 100644 --- a/demo/inferencer_demo.py +++ b/demo/inferencer_demo.py @@ -1,185 +1,185 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from argparse import ArgumentParser -from typing import Dict - -from mmpose.apis.inferencers import MMPoseInferencer, get_model_aliases - - -def parse_args(): - parser = ArgumentParser() - parser.add_argument( - 'inputs', - type=str, - nargs='?', - help='Input image/video path or folder path.') - parser.add_argument( - '--pose2d', - type=str, - default=None, - help='Pretrained 2D pose estimation algorithm. It\'s the path to the ' - 'config file or the model name defined in metafile.') - parser.add_argument( - '--pose2d-weights', - type=str, - default=None, - help='Path to the custom checkpoint file of the selected pose model. ' - 'If it is not specified and "pose2d" is a model name of metafile, ' - 'the weights will be loaded from metafile.') - parser.add_argument( - '--pose3d', - type=str, - default=None, - help='Pretrained 3D pose estimation algorithm. It\'s the path to the ' - 'config file or the model name defined in metafile.') - parser.add_argument( - '--pose3d-weights', - type=str, - default=None, - help='Path to the custom checkpoint file of the selected pose model. ' - 'If it is not specified and "pose3d" is a model name of metafile, ' - 'the weights will be loaded from metafile.') - parser.add_argument( - '--det-model', - type=str, - default=None, - help='Config path or alias of detection model.') - parser.add_argument( - '--det-weights', - type=str, - default=None, - help='Path to the checkpoints of detection model.') - parser.add_argument( - '--det-cat-ids', - type=int, - nargs='+', - default=0, - help='Category id for detection model.') - parser.add_argument( - '--scope', - type=str, - default='mmpose', - help='Scope where modules are defined.') - parser.add_argument( - '--device', - type=str, - default=None, - help='Device used for inference. ' - 'If not specified, the available device will be automatically used.') - parser.add_argument( - '--show', - action='store_true', - help='Display the image/video in a popup window.') - parser.add_argument( - '--draw-bbox', - action='store_true', - help='Whether to draw the bounding boxes.') - parser.add_argument( - '--draw-heatmap', - action='store_true', - default=False, - help='Whether to draw the predicted heatmaps.') - parser.add_argument( - '--bbox-thr', - type=float, - default=0.3, - help='Bounding box score threshold') - parser.add_argument( - '--nms-thr', - type=float, - default=0.3, - help='IoU threshold for bounding box NMS') - parser.add_argument( - '--kpt-thr', type=float, default=0.3, help='Keypoint score threshold') - parser.add_argument( - '--tracking-thr', type=float, default=0.3, help='Tracking threshold') - parser.add_argument( - '--use-oks-tracking', - action='store_true', - help='Whether to use OKS as similarity in tracking') - parser.add_argument( - '--norm-pose-2d', - action='store_true', - help='Scale the bbox (along with the 2D pose) to the average bbox ' - 'scale of the dataset, and move the bbox (along with the 2D pose) to ' - 'the average bbox center of the dataset. This is useful when bbox ' - 'is small, especially in multi-person scenarios.') - parser.add_argument( - '--rebase-keypoint-height', - action='store_true', - help='Rebase the predicted 3D pose so its lowest keypoint has a ' - 'height of 0 (landing on the ground). This is useful for ' - 'visualization when the model do not predict the global position ' - 'of the 3D pose.') - parser.add_argument( - '--radius', - type=int, - default=3, - help='Keypoint radius for visualization.') - parser.add_argument( - '--thickness', - type=int, - default=1, - help='Link thickness for visualization.') - parser.add_argument( - '--skeleton-style', - default='mmpose', - type=str, - choices=['mmpose', 'openpose'], - help='Skeleton style selection') - parser.add_argument( - '--black-background', - action='store_true', - help='Plot predictions on a black image') - parser.add_argument( - '--vis-out-dir', - type=str, - default='', - help='Directory for saving visualized results.') - parser.add_argument( - '--pred-out-dir', - type=str, - default='', - help='Directory for saving inference results.') - parser.add_argument( - '--show-alias', - action='store_true', - help='Display all the available model aliases.') - - call_args = vars(parser.parse_args()) - - init_kws = [ - 'pose2d', 'pose2d_weights', 'scope', 'device', 'det_model', - 'det_weights', 'det_cat_ids', 'pose3d', 'pose3d_weights' - ] - init_args = {} - for init_kw in init_kws: - init_args[init_kw] = call_args.pop(init_kw) - - diaplay_alias = call_args.pop('show_alias') - - return init_args, call_args, diaplay_alias - - -def display_model_aliases(model_aliases: Dict[str, str]) -> None: - """Display the available model aliases and their corresponding model - names.""" - aliases = list(model_aliases.keys()) - max_alias_length = max(map(len, aliases)) - print(f'{"ALIAS".ljust(max_alias_length+2)}MODEL_NAME') - for alias in sorted(aliases): - print(f'{alias.ljust(max_alias_length+2)}{model_aliases[alias]}') - - -def main(): - init_args, call_args, diaplay_alias = parse_args() - if diaplay_alias: - model_alises = get_model_aliases(init_args['scope']) - display_model_aliases(model_alises) - else: - inferencer = MMPoseInferencer(**init_args) - for _ in inferencer(**call_args): - pass - - -if __name__ == '__main__': - main() +# Copyright (c) OpenMMLab. All rights reserved. +from argparse import ArgumentParser +from typing import Dict + +from mmpose.apis.inferencers import MMPoseInferencer, get_model_aliases + + +def parse_args(): + parser = ArgumentParser() + parser.add_argument( + 'inputs', + type=str, + nargs='?', + help='Input image/video path or folder path.') + parser.add_argument( + '--pose2d', + type=str, + default=None, + help='Pretrained 2D pose estimation algorithm. It\'s the path to the ' + 'config file or the model name defined in metafile.') + parser.add_argument( + '--pose2d-weights', + type=str, + default=None, + help='Path to the custom checkpoint file of the selected pose model. ' + 'If it is not specified and "pose2d" is a model name of metafile, ' + 'the weights will be loaded from metafile.') + parser.add_argument( + '--pose3d', + type=str, + default=None, + help='Pretrained 3D pose estimation algorithm. It\'s the path to the ' + 'config file or the model name defined in metafile.') + parser.add_argument( + '--pose3d-weights', + type=str, + default=None, + help='Path to the custom checkpoint file of the selected pose model. ' + 'If it is not specified and "pose3d" is a model name of metafile, ' + 'the weights will be loaded from metafile.') + parser.add_argument( + '--det-model', + type=str, + default=None, + help='Config path or alias of detection model.') + parser.add_argument( + '--det-weights', + type=str, + default=None, + help='Path to the checkpoints of detection model.') + parser.add_argument( + '--det-cat-ids', + type=int, + nargs='+', + default=0, + help='Category id for detection model.') + parser.add_argument( + '--scope', + type=str, + default='mmpose', + help='Scope where modules are defined.') + parser.add_argument( + '--device', + type=str, + default=None, + help='Device used for inference. ' + 'If not specified, the available device will be automatically used.') + parser.add_argument( + '--show', + action='store_true', + help='Display the image/video in a popup window.') + parser.add_argument( + '--draw-bbox', + action='store_true', + help='Whether to draw the bounding boxes.') + parser.add_argument( + '--draw-heatmap', + action='store_true', + default=False, + help='Whether to draw the predicted heatmaps.') + parser.add_argument( + '--bbox-thr', + type=float, + default=0.3, + help='Bounding box score threshold') + parser.add_argument( + '--nms-thr', + type=float, + default=0.3, + help='IoU threshold for bounding box NMS') + parser.add_argument( + '--kpt-thr', type=float, default=0.3, help='Keypoint score threshold') + parser.add_argument( + '--tracking-thr', type=float, default=0.3, help='Tracking threshold') + parser.add_argument( + '--use-oks-tracking', + action='store_true', + help='Whether to use OKS as similarity in tracking') + parser.add_argument( + '--norm-pose-2d', + action='store_true', + help='Scale the bbox (along with the 2D pose) to the average bbox ' + 'scale of the dataset, and move the bbox (along with the 2D pose) to ' + 'the average bbox center of the dataset. This is useful when bbox ' + 'is small, especially in multi-person scenarios.') + parser.add_argument( + '--rebase-keypoint-height', + action='store_true', + help='Rebase the predicted 3D pose so its lowest keypoint has a ' + 'height of 0 (landing on the ground). This is useful for ' + 'visualization when the model do not predict the global position ' + 'of the 3D pose.') + parser.add_argument( + '--radius', + type=int, + default=3, + help='Keypoint radius for visualization.') + parser.add_argument( + '--thickness', + type=int, + default=1, + help='Link thickness for visualization.') + parser.add_argument( + '--skeleton-style', + default='mmpose', + type=str, + choices=['mmpose', 'openpose'], + help='Skeleton style selection') + parser.add_argument( + '--black-background', + action='store_true', + help='Plot predictions on a black image') + parser.add_argument( + '--vis-out-dir', + type=str, + default='', + help='Directory for saving visualized results.') + parser.add_argument( + '--pred-out-dir', + type=str, + default='', + help='Directory for saving inference results.') + parser.add_argument( + '--show-alias', + action='store_true', + help='Display all the available model aliases.') + + call_args = vars(parser.parse_args()) + + init_kws = [ + 'pose2d', 'pose2d_weights', 'scope', 'device', 'det_model', + 'det_weights', 'det_cat_ids', 'pose3d', 'pose3d_weights' + ] + init_args = {} + for init_kw in init_kws: + init_args[init_kw] = call_args.pop(init_kw) + + diaplay_alias = call_args.pop('show_alias') + + return init_args, call_args, diaplay_alias + + +def display_model_aliases(model_aliases: Dict[str, str]) -> None: + """Display the available model aliases and their corresponding model + names.""" + aliases = list(model_aliases.keys()) + max_alias_length = max(map(len, aliases)) + print(f'{"ALIAS".ljust(max_alias_length+2)}MODEL_NAME') + for alias in sorted(aliases): + print(f'{alias.ljust(max_alias_length+2)}{model_aliases[alias]}') + + +def main(): + init_args, call_args, diaplay_alias = parse_args() + if diaplay_alias: + model_alises = get_model_aliases(init_args['scope']) + display_model_aliases(model_alises) + else: + inferencer = MMPoseInferencer(**init_args) + for _ in inferencer(**call_args): + pass + + +if __name__ == '__main__': + main() diff --git a/demo/mmdetection_cfg/cascade_rcnn_x101_64x4d_fpn_1class.py b/demo/mmdetection_cfg/cascade_rcnn_x101_64x4d_fpn_1class.py index 0ccb78cfca..4e4b8e337e 100644 --- a/demo/mmdetection_cfg/cascade_rcnn_x101_64x4d_fpn_1class.py +++ b/demo/mmdetection_cfg/cascade_rcnn_x101_64x4d_fpn_1class.py @@ -1,270 +1,270 @@ -# runtime settings -default_scope = 'mmdet' - -default_hooks = dict( - timer=dict(type='IterTimerHook'), - logger=dict(type='LoggerHook', interval=50), - param_scheduler=dict(type='ParamSchedulerHook'), - checkpoint=dict(type='CheckpointHook', interval=1), - sampler_seed=dict(type='DistSamplerSeedHook'), - visualization=dict(type='DetVisualizationHook')) - -env_cfg = dict( - cudnn_benchmark=False, - mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), - dist_cfg=dict(backend='nccl'), -) - -vis_backends = [dict(type='LocalVisBackend')] -visualizer = dict( - type='DetLocalVisualizer', vis_backends=vis_backends, name='visualizer') -log_processor = dict(type='LogProcessor', window_size=50, by_epoch=True) - -log_level = 'INFO' -load_from = None -resume = False - -# model settings -model = dict( - type='CascadeRCNN', - data_preprocessor=dict( - type='DetDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True, - pad_mask=True, - pad_size_divisor=32), - backbone=dict( - type='ResNeXt', - depth=101, - groups=64, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - num_outs=5), - rpn_head=dict( - type='RPNHead', - in_channels=256, - feat_channels=256, - anchor_generator=dict( - type='AnchorGenerator', - scales=[8], - ratios=[0.5, 1.0, 2.0], - strides=[4, 8, 16, 32, 64]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[1.0, 1.0, 1.0, 1.0]), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), - roi_head=dict( - type='CascadeRoIHead', - num_stages=3, - stage_loss_weights=[1, 0.5, 0.25], - bbox_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), - out_channels=256, - featmap_strides=[4, 8, 16, 32]), - bbox_head=[ - dict( - type='Shared2FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=1, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), - reg_class_agnostic=True, - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, - loss_weight=1.0)), - dict( - type='Shared2FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=1, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.05, 0.05, 0.1, 0.1]), - reg_class_agnostic=True, - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, - loss_weight=1.0)), - dict( - type='Shared2FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=1, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.033, 0.033, 0.067, 0.067]), - reg_class_agnostic=True, - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) - ]), - # model training and testing settings - train_cfg=dict( - rpn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.3, - min_pos_iou=0.3, - match_low_quality=True, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=256, - pos_fraction=0.5, - neg_pos_ub=-1, - add_gt_as_proposals=False), - allowed_border=0, - pos_weight=-1, - debug=False), - rpn_proposal=dict( - nms_pre=2000, - max_per_img=2000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=[ - dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.5, - min_pos_iou=0.5, - match_low_quality=False, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True), - pos_weight=-1, - debug=False), - dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.6, - neg_iou_thr=0.6, - min_pos_iou=0.6, - match_low_quality=False, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True), - pos_weight=-1, - debug=False), - dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.7, - min_pos_iou=0.7, - match_low_quality=False, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True), - pos_weight=-1, - debug=False) - ]), - test_cfg=dict( - rpn=dict( - nms_pre=1000, - max_per_img=1000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.5), - max_per_img=100))) - -# dataset settings -dataset_type = 'CocoDataset' -data_root = 'data/coco/' - -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict(type='Resize', scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', prob=0.5), - dict(type='PackDetInputs') -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='Resize', scale=(1333, 800), keep_ratio=True), - # If you don't have a gt annotation, delete the pipeline - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='PackDetInputs', - meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', - 'scale_factor')) -] -train_dataloader = dict( - batch_size=2, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - batch_sampler=dict(type='AspectRatioBatchSampler'), - dataset=dict( - type=dataset_type, - data_root=data_root, - ann_file='annotations/instances_train2017.json', - data_prefix=dict(img='train2017/'), - filter_cfg=dict(filter_empty_gt=True, min_size=32), - pipeline=train_pipeline)) -val_dataloader = dict( - batch_size=1, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - ann_file='annotations/instances_val2017.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=test_pipeline)) -test_dataloader = val_dataloader - -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/instances_val2017.json', - metric='bbox', - format_only=False) -test_evaluator = val_evaluator +# runtime settings +default_scope = 'mmdet' + +default_hooks = dict( + timer=dict(type='IterTimerHook'), + logger=dict(type='LoggerHook', interval=50), + param_scheduler=dict(type='ParamSchedulerHook'), + checkpoint=dict(type='CheckpointHook', interval=1), + sampler_seed=dict(type='DistSamplerSeedHook'), + visualization=dict(type='DetVisualizationHook')) + +env_cfg = dict( + cudnn_benchmark=False, + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), + dist_cfg=dict(backend='nccl'), +) + +vis_backends = [dict(type='LocalVisBackend')] +visualizer = dict( + type='DetLocalVisualizer', vis_backends=vis_backends, name='visualizer') +log_processor = dict(type='LogProcessor', window_size=50, by_epoch=True) + +log_level = 'INFO' +load_from = None +resume = False + +# model settings +model = dict( + type='CascadeRCNN', + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_mask=True, + pad_size_divisor=32), + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), + roi_head=dict( + type='CascadeRoIHead', + num_stages=3, + stage_loss_weights=[1, 0.5, 0.25], + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=[ + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=1, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=1, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.05, 0.05, 0.1, 0.1]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=1, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.033, 0.033, 0.067, 0.067]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) + ]), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_pre=2000, + max_per_img=2000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=[ + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.6, + neg_iou_thr=0.6, + min_pos_iou=0.6, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.7, + min_pos_iou=0.7, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False) + ]), + test_cfg=dict( + rpn=dict( + nms_pre=1000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100))) + +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + # If you don't have a gt annotation, delete the pipeline + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] +train_dataloader = dict( + batch_size=2, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + batch_sampler=dict(type='AspectRatioBatchSampler'), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/instances_train2017.json', + data_prefix=dict(img='train2017/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline)) +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/instances_val2017.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=test_pipeline)) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/instances_val2017.json', + metric='bbox', + format_only=False) +test_evaluator = val_evaluator diff --git a/demo/mmdetection_cfg/cascade_rcnn_x101_64x4d_fpn_coco.py b/demo/mmdetection_cfg/cascade_rcnn_x101_64x4d_fpn_coco.py index f91bd0d105..5b9d43a451 100644 --- a/demo/mmdetection_cfg/cascade_rcnn_x101_64x4d_fpn_coco.py +++ b/demo/mmdetection_cfg/cascade_rcnn_x101_64x4d_fpn_coco.py @@ -1,256 +1,256 @@ -checkpoint_config = dict(interval=1) -# yapf:disable -log_config = dict( - interval=50, - hooks=[ - dict(type='TextLoggerHook'), - # dict(type='TensorboardLoggerHook') - ]) -# yapf:enable -dist_params = dict(backend='nccl') -log_level = 'INFO' -load_from = None -resume_from = None -workflow = [('train', 1)] - -# optimizer -optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) -optimizer_config = dict(grad_clip=None) -# learning policy -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=500, - warmup_ratio=0.001, - step=[16, 19]) -total_epochs = 20 - -# model settings -model = dict( - type='CascadeRCNN', - pretrained='open-mmlab://resnext101_64x4d', - backbone=dict( - type='ResNeXt', - depth=101, - groups=64, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch'), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - num_outs=5), - rpn_head=dict( - type='RPNHead', - in_channels=256, - feat_channels=256, - anchor_generator=dict( - type='AnchorGenerator', - scales=[8], - ratios=[0.5, 1.0, 2.0], - strides=[4, 8, 16, 32, 64]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[1.0, 1.0, 1.0, 1.0]), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), - roi_head=dict( - type='CascadeRoIHead', - num_stages=3, - stage_loss_weights=[1, 0.5, 0.25], - bbox_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), - out_channels=256, - featmap_strides=[4, 8, 16, 32]), - bbox_head=[ - dict( - type='Shared2FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), - reg_class_agnostic=True, - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, - loss_weight=1.0)), - dict( - type='Shared2FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.05, 0.05, 0.1, 0.1]), - reg_class_agnostic=True, - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, - loss_weight=1.0)), - dict( - type='Shared2FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.033, 0.033, 0.067, 0.067]), - reg_class_agnostic=True, - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) - ]), - # model training and testing settings - train_cfg=dict( - rpn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.3, - min_pos_iou=0.3, - match_low_quality=True, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=256, - pos_fraction=0.5, - neg_pos_ub=-1, - add_gt_as_proposals=False), - allowed_border=0, - pos_weight=-1, - debug=False), - rpn_proposal=dict( - nms_pre=2000, - max_per_img=2000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=[ - dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.5, - min_pos_iou=0.5, - match_low_quality=False, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True), - pos_weight=-1, - debug=False), - dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.6, - neg_iou_thr=0.6, - min_pos_iou=0.6, - match_low_quality=False, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True), - pos_weight=-1, - debug=False), - dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.7, - min_pos_iou=0.7, - match_low_quality=False, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True), - pos_weight=-1, - debug=False) - ]), - test_cfg=dict( - rpn=dict( - nms_pre=1000, - max_per_img=1000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.5), - max_per_img=100))) - -dataset_type = 'CocoDataset' -data_root = 'data/coco' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict( - type=dataset_type, - ann_file=f'{data_root}/annotations/instances_train2017.json', - img_prefix=f'{data_root}/train2017/', - pipeline=train_pipeline), - val=dict( - type=dataset_type, - ann_file=f'{data_root}/annotations/instances_val2017.json', - img_prefix=f'{data_root}/val2017/', - pipeline=test_pipeline), - test=dict( - type=dataset_type, - ann_file=f'{data_root}/annotations/instances_val2017.json', - img_prefix=f'{data_root}/val2017/', - pipeline=test_pipeline)) -evaluation = dict(interval=1, metric='bbox') +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +dist_params = dict(backend='nccl') +log_level = 'INFO' +load_from = None +resume_from = None +workflow = [('train', 1)] + +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=0.001, + step=[16, 19]) +total_epochs = 20 + +# model settings +model = dict( + type='CascadeRCNN', + pretrained='open-mmlab://resnext101_64x4d', + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), + roi_head=dict( + type='CascadeRoIHead', + num_stages=3, + stage_loss_weights=[1, 0.5, 0.25], + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=[ + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.05, 0.05, 0.1, 0.1]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.033, 0.033, 0.067, 0.067]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) + ]), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_pre=2000, + max_per_img=2000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=[ + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.6, + neg_iou_thr=0.6, + min_pos_iou=0.6, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.7, + min_pos_iou=0.7, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False) + ]), + test_cfg=dict( + rpn=dict( + nms_pre=1000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100))) + +dataset_type = 'CocoDataset' +data_root = 'data/coco' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=f'{data_root}/annotations/instances_train2017.json', + img_prefix=f'{data_root}/train2017/', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=f'{data_root}/annotations/instances_val2017.json', + img_prefix=f'{data_root}/val2017/', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + ann_file=f'{data_root}/annotations/instances_val2017.json', + img_prefix=f'{data_root}/val2017/', + pipeline=test_pipeline)) +evaluation = dict(interval=1, metric='bbox') diff --git a/demo/mmdetection_cfg/faster_rcnn_r50_fpn_1class.py b/demo/mmdetection_cfg/faster_rcnn_r50_fpn_1class.py index ee54f5b66b..e1fc35d52e 100644 --- a/demo/mmdetection_cfg/faster_rcnn_r50_fpn_1class.py +++ b/demo/mmdetection_cfg/faster_rcnn_r50_fpn_1class.py @@ -1,182 +1,182 @@ -checkpoint_config = dict(interval=1) -# yapf:disable -log_config = dict( - interval=50, - hooks=[ - dict(type='TextLoggerHook'), - # dict(type='TensorboardLoggerHook') - ]) -# yapf:enable -dist_params = dict(backend='nccl') -log_level = 'INFO' -load_from = None -resume_from = None -workflow = [('train', 1)] -# optimizer -optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) -optimizer_config = dict(grad_clip=None) -# learning policy -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=500, - warmup_ratio=0.001, - step=[8, 11]) -total_epochs = 12 - -model = dict( - type='FasterRCNN', - pretrained='torchvision://resnet50', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch'), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - num_outs=5), - rpn_head=dict( - type='RPNHead', - in_channels=256, - feat_channels=256, - anchor_generator=dict( - type='AnchorGenerator', - scales=[8], - ratios=[0.5, 1.0, 2.0], - strides=[4, 8, 16, 32, 64]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[1.0, 1.0, 1.0, 1.0]), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=1.0)), - roi_head=dict( - type='StandardRoIHead', - bbox_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), - out_channels=256, - featmap_strides=[4, 8, 16, 32]), - bbox_head=dict( - type='Shared2FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=1, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), - reg_class_agnostic=False, - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=1.0))), - # model training and testing settings - train_cfg=dict( - rpn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.3, - min_pos_iou=0.3, - match_low_quality=True, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=256, - pos_fraction=0.5, - neg_pos_ub=-1, - add_gt_as_proposals=False), - allowed_border=-1, - pos_weight=-1, - debug=False), - rpn_proposal=dict( - nms_pre=2000, - max_per_img=1000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.5, - min_pos_iou=0.5, - match_low_quality=False, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True), - pos_weight=-1, - debug=False)), - test_cfg=dict( - rpn=dict( - nms_pre=1000, - max_per_img=1000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.5), - max_per_img=100) - # soft-nms is also supported for rcnn testing - # e.g., nms=dict(type='soft_nms', iou_threshold=0.5, min_score=0.05) - )) - -dataset_type = 'CocoDataset' -data_root = 'data/coco' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict( - type=dataset_type, - ann_file=f'{data_root}/annotations/instances_train2017.json', - img_prefix=f'{data_root}/train2017/', - pipeline=train_pipeline), - val=dict( - type=dataset_type, - ann_file=f'{data_root}/annotations/instances_val2017.json', - img_prefix=f'{data_root}/val2017/', - pipeline=test_pipeline), - test=dict( - type=dataset_type, - ann_file=f'{data_root}/annotations/instances_val2017.json', - img_prefix=f'{data_root}/val2017/', - pipeline=test_pipeline)) -evaluation = dict(interval=1, metric='bbox') +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +dist_params = dict(backend='nccl') +log_level = 'INFO' +load_from = None +resume_from = None +workflow = [('train', 1)] +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=0.001, + step=[8, 11]) +total_epochs = 12 + +model = dict( + type='FasterRCNN', + pretrained='torchvision://resnet50', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + roi_head=dict( + type='StandardRoIHead', + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=1, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=False, + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=-1, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_pre=2000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False)), + test_cfg=dict( + rpn=dict( + nms_pre=1000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100) + # soft-nms is also supported for rcnn testing + # e.g., nms=dict(type='soft_nms', iou_threshold=0.5, min_score=0.05) + )) + +dataset_type = 'CocoDataset' +data_root = 'data/coco' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=f'{data_root}/annotations/instances_train2017.json', + img_prefix=f'{data_root}/train2017/', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=f'{data_root}/annotations/instances_val2017.json', + img_prefix=f'{data_root}/val2017/', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + ann_file=f'{data_root}/annotations/instances_val2017.json', + img_prefix=f'{data_root}/val2017/', + pipeline=test_pipeline)) +evaluation = dict(interval=1, metric='bbox') diff --git a/demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py b/demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py index 5bceed65ba..74d5498779 100644 --- a/demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py +++ b/demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py @@ -1,196 +1,196 @@ -# runtime settings -default_scope = 'mmdet' - -default_hooks = dict( - timer=dict(type='IterTimerHook'), - logger=dict(type='LoggerHook', interval=50), - param_scheduler=dict(type='ParamSchedulerHook'), - checkpoint=dict(type='CheckpointHook', interval=1), - sampler_seed=dict(type='DistSamplerSeedHook'), - visualization=dict(type='DetVisualizationHook')) - -env_cfg = dict( - cudnn_benchmark=False, - mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), - dist_cfg=dict(backend='nccl'), -) - -vis_backends = [dict(type='LocalVisBackend')] -visualizer = dict( - type='DetLocalVisualizer', vis_backends=vis_backends, name='visualizer') -log_processor = dict(type='LogProcessor', window_size=50, by_epoch=True) - -log_level = 'INFO' -load_from = None -resume = False - -# model settings -model = dict( - type='FasterRCNN', - data_preprocessor=dict( - type='DetDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True, - pad_size_divisor=32), - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - num_outs=5), - rpn_head=dict( - type='RPNHead', - in_channels=256, - feat_channels=256, - anchor_generator=dict( - type='AnchorGenerator', - scales=[8], - ratios=[0.5, 1.0, 2.0], - strides=[4, 8, 16, 32, 64]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[1.0, 1.0, 1.0, 1.0]), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=1.0)), - roi_head=dict( - type='StandardRoIHead', - bbox_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), - out_channels=256, - featmap_strides=[4, 8, 16, 32]), - bbox_head=dict( - type='Shared2FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), - reg_class_agnostic=False, - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=1.0))), - # model training and testing settings - train_cfg=dict( - rpn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.3, - min_pos_iou=0.3, - match_low_quality=True, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=256, - pos_fraction=0.5, - neg_pos_ub=-1, - add_gt_as_proposals=False), - allowed_border=-1, - pos_weight=-1, - debug=False), - rpn_proposal=dict( - nms_pre=2000, - max_per_img=1000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.5, - min_pos_iou=0.5, - match_low_quality=False, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True), - pos_weight=-1, - debug=False)), - test_cfg=dict( - rpn=dict( - nms_pre=1000, - max_per_img=1000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.5), - max_per_img=100) - # soft-nms is also supported for rcnn testing - # e.g., nms=dict(type='soft_nms', iou_threshold=0.5, min_score=0.05) - )) - -# dataset settings -dataset_type = 'CocoDataset' -data_root = 'data/coco/' - -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict(type='Resize', scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', prob=0.5), - dict(type='PackDetInputs') -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='Resize', scale=(1333, 800), keep_ratio=True), - # If you don't have a gt annotation, delete the pipeline - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='PackDetInputs', - meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', - 'scale_factor')) -] -train_dataloader = dict( - batch_size=2, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - batch_sampler=dict(type='AspectRatioBatchSampler'), - dataset=dict( - type=dataset_type, - data_root=data_root, - ann_file='annotations/instances_train2017.json', - data_prefix=dict(img='train2017/'), - filter_cfg=dict(filter_empty_gt=True, min_size=32), - pipeline=train_pipeline)) -val_dataloader = dict( - batch_size=1, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - ann_file='annotations/instances_val2017.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=test_pipeline)) -test_dataloader = val_dataloader - -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/instances_val2017.json', - metric='bbox', - format_only=False) -test_evaluator = val_evaluator +# runtime settings +default_scope = 'mmdet' + +default_hooks = dict( + timer=dict(type='IterTimerHook'), + logger=dict(type='LoggerHook', interval=50), + param_scheduler=dict(type='ParamSchedulerHook'), + checkpoint=dict(type='CheckpointHook', interval=1), + sampler_seed=dict(type='DistSamplerSeedHook'), + visualization=dict(type='DetVisualizationHook')) + +env_cfg = dict( + cudnn_benchmark=False, + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), + dist_cfg=dict(backend='nccl'), +) + +vis_backends = [dict(type='LocalVisBackend')] +visualizer = dict( + type='DetLocalVisualizer', vis_backends=vis_backends, name='visualizer') +log_processor = dict(type='LogProcessor', window_size=50, by_epoch=True) + +log_level = 'INFO' +load_from = None +resume = False + +# model settings +model = dict( + type='FasterRCNN', + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=32), + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + roi_head=dict( + type='StandardRoIHead', + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=False, + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=-1, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_pre=2000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False)), + test_cfg=dict( + rpn=dict( + nms_pre=1000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100) + # soft-nms is also supported for rcnn testing + # e.g., nms=dict(type='soft_nms', iou_threshold=0.5, min_score=0.05) + )) + +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + # If you don't have a gt annotation, delete the pipeline + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] +train_dataloader = dict( + batch_size=2, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + batch_sampler=dict(type='AspectRatioBatchSampler'), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/instances_train2017.json', + data_prefix=dict(img='train2017/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline)) +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/instances_val2017.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=test_pipeline)) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/instances_val2017.json', + metric='bbox', + format_only=False) +test_evaluator = val_evaluator diff --git a/demo/mmdetection_cfg/mask_rcnn_r50_fpn_2x_coco.py b/demo/mmdetection_cfg/mask_rcnn_r50_fpn_2x_coco.py index 05d39fa9a8..226ae253f7 100644 --- a/demo/mmdetection_cfg/mask_rcnn_r50_fpn_2x_coco.py +++ b/demo/mmdetection_cfg/mask_rcnn_r50_fpn_2x_coco.py @@ -1,242 +1,242 @@ -model = dict( - type='MaskRCNN', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - num_outs=5), - rpn_head=dict( - type='RPNHead', - in_channels=256, - feat_channels=256, - anchor_generator=dict( - type='AnchorGenerator', - scales=[8], - ratios=[0.5, 1.0, 2.0], - strides=[4, 8, 16, 32, 64]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0.0, 0.0, 0.0, 0.0], - target_stds=[1.0, 1.0, 1.0, 1.0]), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=1.0)), - roi_head=dict( - type='StandardRoIHead', - bbox_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), - out_channels=256, - featmap_strides=[4, 8, 16, 32]), - bbox_head=dict( - type='Shared2FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0.0, 0.0, 0.0, 0.0], - target_stds=[0.1, 0.1, 0.2, 0.2]), - reg_class_agnostic=False, - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=1.0)), - mask_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), - out_channels=256, - featmap_strides=[4, 8, 16, 32]), - mask_head=dict( - type='FCNMaskHead', - num_convs=4, - in_channels=256, - conv_out_channels=256, - num_classes=80, - loss_mask=dict( - type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))), - train_cfg=dict( - rpn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.3, - min_pos_iou=0.3, - match_low_quality=True, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=256, - pos_fraction=0.5, - neg_pos_ub=-1, - add_gt_as_proposals=False), - allowed_border=-1, - pos_weight=-1, - debug=False), - rpn_proposal=dict( - nms_pre=2000, - max_per_img=1000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.5, - min_pos_iou=0.5, - match_low_quality=True, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True), - mask_size=28, - pos_weight=-1, - debug=False)), - test_cfg=dict( - rpn=dict( - nms_pre=1000, - max_per_img=1000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.5), - max_per_img=100, - mask_thr_binary=0.5))) -dataset_type = 'CocoDataset' -data_root = 'data/coco/' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict( - type='Normalize', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - to_rgb=True), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']) -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict( - type='Normalize', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - to_rgb=True), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']) - ]) -] -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict( - type='CocoDataset', - ann_file='data/coco/annotations/instances_train2017.json', - img_prefix='data/coco/train2017/', - pipeline=[ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict( - type='Normalize', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - to_rgb=True), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict( - type='Collect', - keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']) - ]), - val=dict( - type='CocoDataset', - ann_file='data/coco/annotations/instances_val2017.json', - img_prefix='data/coco/val2017/', - pipeline=[ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict( - type='Normalize', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - to_rgb=True), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']) - ]) - ]), - test=dict( - type='CocoDataset', - ann_file='data/coco/annotations/instances_val2017.json', - img_prefix='data/coco/val2017/', - pipeline=[ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict( - type='Normalize', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - to_rgb=True), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']) - ]) - ])) -evaluation = dict(metric=['bbox', 'segm']) -optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) -optimizer_config = dict(grad_clip=None) -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=500, - warmup_ratio=0.001, - step=[16, 22]) -runner = dict(type='EpochBasedRunner', max_epochs=24) -checkpoint_config = dict(interval=1) -log_config = dict(interval=50, hooks=[dict(type='TextLoggerHook')]) -custom_hooks = [dict(type='NumClassCheckHook')] -dist_params = dict(backend='nccl') -log_level = 'INFO' -load_from = None -resume_from = None -workflow = [('train', 1)] +model = dict( + type='MaskRCNN', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0.0, 0.0, 0.0, 0.0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + roi_head=dict( + type='StandardRoIHead', + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0.0, 0.0, 0.0, 0.0], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=False, + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + mask_head=dict( + type='FCNMaskHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=80, + loss_mask=dict( + type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))), + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=-1, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_pre=2000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False)), + test_cfg=dict( + rpn=dict( + nms_pre=1000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100, + mask_thr_binary=0.5))) +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type='CocoDataset', + ann_file='data/coco/annotations/instances_train2017.json', + img_prefix='data/coco/train2017/', + pipeline=[ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict( + type='Collect', + keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']) + ]), + val=dict( + type='CocoDataset', + ann_file='data/coco/annotations/instances_val2017.json', + img_prefix='data/coco/val2017/', + pipeline=[ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) + ]), + test=dict( + type='CocoDataset', + ann_file='data/coco/annotations/instances_val2017.json', + img_prefix='data/coco/val2017/', + pipeline=[ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) + ])) +evaluation = dict(metric=['bbox', 'segm']) +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=None) +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=0.001, + step=[16, 22]) +runner = dict(type='EpochBasedRunner', max_epochs=24) +checkpoint_config = dict(interval=1) +log_config = dict(interval=50, hooks=[dict(type='TextLoggerHook')]) +custom_hooks = [dict(type='NumClassCheckHook')] +dist_params = dict(backend='nccl') +log_level = 'INFO' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/demo/mmdetection_cfg/ssdlite_mobilenetv2-scratch_8xb24-600e_coco.py b/demo/mmdetection_cfg/ssdlite_mobilenetv2-scratch_8xb24-600e_coco.py index 05c6e9659c..ee463f0bcd 100644 --- a/demo/mmdetection_cfg/ssdlite_mobilenetv2-scratch_8xb24-600e_coco.py +++ b/demo/mmdetection_cfg/ssdlite_mobilenetv2-scratch_8xb24-600e_coco.py @@ -1,136 +1,136 @@ -# model settings -data_preprocessor = dict( - type='DetDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True, - pad_size_divisor=1) -model = dict( - type='SingleStageDetector', - data_preprocessor=data_preprocessor, - backbone=dict( - type='MobileNetV2', - out_indices=(4, 7), - norm_cfg=dict(type='BN', eps=0.001, momentum=0.03), - init_cfg=dict(type='TruncNormal', layer='Conv2d', std=0.03)), - neck=dict( - type='SSDNeck', - in_channels=(96, 1280), - out_channels=(96, 1280, 512, 256, 256, 128), - level_strides=(2, 2, 2, 2), - level_paddings=(1, 1, 1, 1), - l2_norm_scale=None, - use_depthwise=True, - norm_cfg=dict(type='BN', eps=0.001, momentum=0.03), - act_cfg=dict(type='ReLU6'), - init_cfg=dict(type='TruncNormal', layer='Conv2d', std=0.03)), - bbox_head=dict( - type='SSDHead', - in_channels=(96, 1280, 512, 256, 256, 128), - num_classes=80, - use_depthwise=True, - norm_cfg=dict(type='BN', eps=0.001, momentum=0.03), - act_cfg=dict(type='ReLU6'), - init_cfg=dict(type='Normal', layer='Conv2d', std=0.001), - - # set anchor size manually instead of using the predefined - # SSD300 setting. - anchor_generator=dict( - type='SSDAnchorGenerator', - scale_major=False, - strides=[16, 32, 64, 107, 160, 320], - ratios=[[2, 3], [2, 3], [2, 3], [2, 3], [2, 3], [2, 3]], - min_sizes=[48, 100, 150, 202, 253, 304], - max_sizes=[100, 150, 202, 253, 304, 320]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[0.1, 0.1, 0.2, 0.2])), - # model training and testing settings - train_cfg=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.5, - min_pos_iou=0., - ignore_iof_thr=-1, - gt_max_assign_all=False), - sampler=dict(type='PseudoSampler'), - smoothl1_beta=1., - allowed_border=-1, - pos_weight=-1, - neg_pos_ratio=3, - debug=False), - test_cfg=dict( - nms_pre=1000, - nms=dict(type='nms', iou_threshold=0.45), - min_bbox_size=0, - score_thr=0.02, - max_per_img=200)) -env_cfg = dict(cudnn_benchmark=True) - -# dataset settings -dataset_type = 'CocoDataset' -data_root = 'data/coco/' - -input_size = 320 -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='Expand', - mean=data_preprocessor['mean'], - to_rgb=data_preprocessor['bgr_to_rgb'], - ratio_range=(1, 4)), - dict( - type='MinIoURandomCrop', - min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), - min_crop_size=0.3), - dict(type='Resize', scale=(input_size, input_size), keep_ratio=False), - dict(type='RandomFlip', prob=0.5), - dict( - type='PhotoMetricDistortion', - brightness_delta=32, - contrast_range=(0.5, 1.5), - saturation_range=(0.5, 1.5), - hue_delta=18), - dict(type='PackDetInputs') -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='Resize', scale=(input_size, input_size), keep_ratio=False), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='PackDetInputs', - meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', - 'scale_factor')) -] -train_dataloader = dict( - batch_size=24, - num_workers=4, - batch_sampler=None, - dataset=dict( - _delete_=True, - type='RepeatDataset', - times=5, - dataset=dict( - type=dataset_type, - data_root=data_root, - ann_file='annotations/instances_train2017.json', - data_prefix=dict(img='train2017/'), - filter_cfg=dict(filter_empty_gt=True, min_size=32), - pipeline=train_pipeline))) -val_dataloader = dict( - batch_size=8, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - ann_file='annotations/instances_val2017.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=test_pipeline)) -test_dataloader = val_dataloader +# model settings +data_preprocessor = dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=1) +model = dict( + type='SingleStageDetector', + data_preprocessor=data_preprocessor, + backbone=dict( + type='MobileNetV2', + out_indices=(4, 7), + norm_cfg=dict(type='BN', eps=0.001, momentum=0.03), + init_cfg=dict(type='TruncNormal', layer='Conv2d', std=0.03)), + neck=dict( + type='SSDNeck', + in_channels=(96, 1280), + out_channels=(96, 1280, 512, 256, 256, 128), + level_strides=(2, 2, 2, 2), + level_paddings=(1, 1, 1, 1), + l2_norm_scale=None, + use_depthwise=True, + norm_cfg=dict(type='BN', eps=0.001, momentum=0.03), + act_cfg=dict(type='ReLU6'), + init_cfg=dict(type='TruncNormal', layer='Conv2d', std=0.03)), + bbox_head=dict( + type='SSDHead', + in_channels=(96, 1280, 512, 256, 256, 128), + num_classes=80, + use_depthwise=True, + norm_cfg=dict(type='BN', eps=0.001, momentum=0.03), + act_cfg=dict(type='ReLU6'), + init_cfg=dict(type='Normal', layer='Conv2d', std=0.001), + + # set anchor size manually instead of using the predefined + # SSD300 setting. + anchor_generator=dict( + type='SSDAnchorGenerator', + scale_major=False, + strides=[16, 32, 64, 107, 160, 320], + ratios=[[2, 3], [2, 3], [2, 3], [2, 3], [2, 3], [2, 3]], + min_sizes=[48, 100, 150, 202, 253, 304], + max_sizes=[100, 150, 202, 253, 304, 320]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.1, 0.1, 0.2, 0.2])), + # model training and testing settings + train_cfg=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0., + ignore_iof_thr=-1, + gt_max_assign_all=False), + sampler=dict(type='PseudoSampler'), + smoothl1_beta=1., + allowed_border=-1, + pos_weight=-1, + neg_pos_ratio=3, + debug=False), + test_cfg=dict( + nms_pre=1000, + nms=dict(type='nms', iou_threshold=0.45), + min_bbox_size=0, + score_thr=0.02, + max_per_img=200)) +env_cfg = dict(cudnn_benchmark=True) + +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' + +input_size = 320 +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Expand', + mean=data_preprocessor['mean'], + to_rgb=data_preprocessor['bgr_to_rgb'], + ratio_range=(1, 4)), + dict( + type='MinIoURandomCrop', + min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), + min_crop_size=0.3), + dict(type='Resize', scale=(input_size, input_size), keep_ratio=False), + dict(type='RandomFlip', prob=0.5), + dict( + type='PhotoMetricDistortion', + brightness_delta=32, + contrast_range=(0.5, 1.5), + saturation_range=(0.5, 1.5), + hue_delta=18), + dict(type='PackDetInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=(input_size, input_size), keep_ratio=False), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] +train_dataloader = dict( + batch_size=24, + num_workers=4, + batch_sampler=None, + dataset=dict( + _delete_=True, + type='RepeatDataset', + times=5, + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/instances_train2017.json', + data_prefix=dict(img='train2017/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline))) +val_dataloader = dict( + batch_size=8, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/instances_val2017.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=test_pipeline)) +test_dataloader = val_dataloader diff --git a/demo/mmdetection_cfg/ssdlite_mobilenetv2_scratch_600e_onehand.py b/demo/mmdetection_cfg/ssdlite_mobilenetv2_scratch_600e_onehand.py index ebdd2e719c..14489164e9 100644 --- a/demo/mmdetection_cfg/ssdlite_mobilenetv2_scratch_600e_onehand.py +++ b/demo/mmdetection_cfg/ssdlite_mobilenetv2_scratch_600e_onehand.py @@ -1,153 +1,153 @@ -# ========================================================= -# from 'mmdetection/configs/_base_/default_runtime.py' -# ========================================================= -default_scope = 'mmdet' -checkpoint_config = dict(interval=1) -# yapf:disable -log_config = dict( - interval=50, - hooks=[ - dict(type='TextLoggerHook'), - # dict(type='TensorboardLoggerHook') - ]) -# yapf:enable -custom_hooks = [dict(type='NumClassCheckHook')] -# ========================================================= - -# model settings -data_preprocessor = dict( - type='DetDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True, - pad_size_divisor=1) -model = dict( - type='SingleStageDetector', - data_preprocessor=data_preprocessor, - backbone=dict( - type='MobileNetV2', - out_indices=(4, 7), - norm_cfg=dict(type='BN', eps=0.001, momentum=0.03), - init_cfg=dict(type='TruncNormal', layer='Conv2d', std=0.03)), - neck=dict( - type='SSDNeck', - in_channels=(96, 1280), - out_channels=(96, 1280, 512, 256, 256, 128), - level_strides=(2, 2, 2, 2), - level_paddings=(1, 1, 1, 1), - l2_norm_scale=None, - use_depthwise=True, - norm_cfg=dict(type='BN', eps=0.001, momentum=0.03), - act_cfg=dict(type='ReLU6'), - init_cfg=dict(type='TruncNormal', layer='Conv2d', std=0.03)), - bbox_head=dict( - type='SSDHead', - in_channels=(96, 1280, 512, 256, 256, 128), - num_classes=1, - use_depthwise=True, - norm_cfg=dict(type='BN', eps=0.001, momentum=0.03), - act_cfg=dict(type='ReLU6'), - init_cfg=dict(type='Normal', layer='Conv2d', std=0.001), - - # set anchor size manually instead of using the predefined - # SSD300 setting. - anchor_generator=dict( - type='SSDAnchorGenerator', - scale_major=False, - strides=[16, 32, 64, 107, 160, 320], - ratios=[[2, 3], [2, 3], [2, 3], [2, 3], [2, 3], [2, 3]], - min_sizes=[48, 100, 150, 202, 253, 304], - max_sizes=[100, 150, 202, 253, 304, 320]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[0.1, 0.1, 0.2, 0.2])), - # model training and testing settings - train_cfg=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.5, - min_pos_iou=0., - ignore_iof_thr=-1, - gt_max_assign_all=False), - sampler=dict(type='PseudoSampler'), - smoothl1_beta=1., - allowed_border=-1, - pos_weight=-1, - neg_pos_ratio=3, - debug=False), - test_cfg=dict( - nms_pre=1000, - nms=dict(type='nms', iou_threshold=0.45), - min_bbox_size=0, - score_thr=0.02, - max_per_img=200)) -cudnn_benchmark = True - -# dataset settings -file_client_args = dict(backend='disk') - -dataset_type = 'CocoDataset' -data_root = 'data/onehand10k/' -classes = ('hand', ) -input_size = 320 -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='Resize', scale=(input_size, input_size), keep_ratio=False), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='PackDetInputs', - meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', - 'scale_factor')) -] - -val_dataloader = dict( - batch_size=8, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - ann_file='annotations/onehand10k_test.json', - test_mode=True, - pipeline=test_pipeline)) -test_dataloader = val_dataloader - -# optimizer -optimizer = dict(type='SGD', lr=0.015, momentum=0.9, weight_decay=4.0e-5) -optimizer_config = dict(grad_clip=None) - -# learning policy -lr_config = dict( - policy='CosineAnnealing', - warmup='linear', - warmup_iters=500, - warmup_ratio=0.001, - min_lr=0) -runner = dict(type='EpochBasedRunner', max_epochs=120) - -# Avoid evaluation and saving weights too frequently -evaluation = dict(interval=5, metric='bbox') -checkpoint_config = dict(interval=5) -custom_hooks = [ - dict(type='NumClassCheckHook'), - dict(type='CheckInvalidLossHook', interval=50, priority='VERY_LOW') -] - -log_config = dict(interval=5) - -# NOTE: `auto_scale_lr` is for automatically scaling LR, -# USER SHOULD NOT CHANGE ITS VALUES. -# base_batch_size = (8 GPUs) x (24 samples per GPU) -auto_scale_lr = dict(base_batch_size=192) - -load_from = 'https://download.openmmlab.com/mmdetection/' -'v2.0/ssd/ssdlite_mobilenetv2_scratch_600e_coco/' -'ssdlite_mobilenetv2_scratch_600e_coco_20210629_110627-974d9307.pth' - -vis_backends = [dict(type='LocalVisBackend')] -visualizer = dict( - type='DetLocalVisualizer', vis_backends=vis_backends, name='visualizer') +# ========================================================= +# from 'mmdetection/configs/_base_/default_runtime.py' +# ========================================================= +default_scope = 'mmdet' +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +custom_hooks = [dict(type='NumClassCheckHook')] +# ========================================================= + +# model settings +data_preprocessor = dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=1) +model = dict( + type='SingleStageDetector', + data_preprocessor=data_preprocessor, + backbone=dict( + type='MobileNetV2', + out_indices=(4, 7), + norm_cfg=dict(type='BN', eps=0.001, momentum=0.03), + init_cfg=dict(type='TruncNormal', layer='Conv2d', std=0.03)), + neck=dict( + type='SSDNeck', + in_channels=(96, 1280), + out_channels=(96, 1280, 512, 256, 256, 128), + level_strides=(2, 2, 2, 2), + level_paddings=(1, 1, 1, 1), + l2_norm_scale=None, + use_depthwise=True, + norm_cfg=dict(type='BN', eps=0.001, momentum=0.03), + act_cfg=dict(type='ReLU6'), + init_cfg=dict(type='TruncNormal', layer='Conv2d', std=0.03)), + bbox_head=dict( + type='SSDHead', + in_channels=(96, 1280, 512, 256, 256, 128), + num_classes=1, + use_depthwise=True, + norm_cfg=dict(type='BN', eps=0.001, momentum=0.03), + act_cfg=dict(type='ReLU6'), + init_cfg=dict(type='Normal', layer='Conv2d', std=0.001), + + # set anchor size manually instead of using the predefined + # SSD300 setting. + anchor_generator=dict( + type='SSDAnchorGenerator', + scale_major=False, + strides=[16, 32, 64, 107, 160, 320], + ratios=[[2, 3], [2, 3], [2, 3], [2, 3], [2, 3], [2, 3]], + min_sizes=[48, 100, 150, 202, 253, 304], + max_sizes=[100, 150, 202, 253, 304, 320]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.1, 0.1, 0.2, 0.2])), + # model training and testing settings + train_cfg=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0., + ignore_iof_thr=-1, + gt_max_assign_all=False), + sampler=dict(type='PseudoSampler'), + smoothl1_beta=1., + allowed_border=-1, + pos_weight=-1, + neg_pos_ratio=3, + debug=False), + test_cfg=dict( + nms_pre=1000, + nms=dict(type='nms', iou_threshold=0.45), + min_bbox_size=0, + score_thr=0.02, + max_per_img=200)) +cudnn_benchmark = True + +# dataset settings +file_client_args = dict(backend='disk') + +dataset_type = 'CocoDataset' +data_root = 'data/onehand10k/' +classes = ('hand', ) +input_size = 320 +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=(input_size, input_size), keep_ratio=False), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] + +val_dataloader = dict( + batch_size=8, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/onehand10k_test.json', + test_mode=True, + pipeline=test_pipeline)) +test_dataloader = val_dataloader + +# optimizer +optimizer = dict(type='SGD', lr=0.015, momentum=0.9, weight_decay=4.0e-5) +optimizer_config = dict(grad_clip=None) + +# learning policy +lr_config = dict( + policy='CosineAnnealing', + warmup='linear', + warmup_iters=500, + warmup_ratio=0.001, + min_lr=0) +runner = dict(type='EpochBasedRunner', max_epochs=120) + +# Avoid evaluation and saving weights too frequently +evaluation = dict(interval=5, metric='bbox') +checkpoint_config = dict(interval=5) +custom_hooks = [ + dict(type='NumClassCheckHook'), + dict(type='CheckInvalidLossHook', interval=50, priority='VERY_LOW') +] + +log_config = dict(interval=5) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (24 samples per GPU) +auto_scale_lr = dict(base_batch_size=192) + +load_from = 'https://download.openmmlab.com/mmdetection/' +'v2.0/ssd/ssdlite_mobilenetv2_scratch_600e_coco/' +'ssdlite_mobilenetv2_scratch_600e_coco_20210629_110627-974d9307.pth' + +vis_backends = [dict(type='LocalVisBackend')] +visualizer = dict( + type='DetLocalVisualizer', vis_backends=vis_backends, name='visualizer') diff --git a/demo/mmdetection_cfg/yolov3_d53_320_273e_coco.py b/demo/mmdetection_cfg/yolov3_d53_320_273e_coco.py index d7e9cca1eb..2d3efaba22 100644 --- a/demo/mmdetection_cfg/yolov3_d53_320_273e_coco.py +++ b/demo/mmdetection_cfg/yolov3_d53_320_273e_coco.py @@ -1,140 +1,140 @@ -# model settings -model = dict( - type='YOLOV3', - pretrained='open-mmlab://darknet53', - backbone=dict(type='Darknet', depth=53, out_indices=(3, 4, 5)), - neck=dict( - type='YOLOV3Neck', - num_scales=3, - in_channels=[1024, 512, 256], - out_channels=[512, 256, 128]), - bbox_head=dict( - type='YOLOV3Head', - num_classes=80, - in_channels=[512, 256, 128], - out_channels=[1024, 512, 256], - anchor_generator=dict( - type='YOLOAnchorGenerator', - base_sizes=[[(116, 90), (156, 198), (373, 326)], - [(30, 61), (62, 45), (59, 119)], - [(10, 13), (16, 30), (33, 23)]], - strides=[32, 16, 8]), - bbox_coder=dict(type='YOLOBBoxCoder'), - featmap_strides=[32, 16, 8], - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=True, - loss_weight=1.0, - reduction='sum'), - loss_conf=dict( - type='CrossEntropyLoss', - use_sigmoid=True, - loss_weight=1.0, - reduction='sum'), - loss_xy=dict( - type='CrossEntropyLoss', - use_sigmoid=True, - loss_weight=2.0, - reduction='sum'), - loss_wh=dict(type='MSELoss', loss_weight=2.0, reduction='sum')), - # training and testing settings - train_cfg=dict( - assigner=dict( - type='GridAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.5, - min_pos_iou=0)), - test_cfg=dict( - nms_pre=1000, - min_bbox_size=0, - score_thr=0.05, - conf_thr=0.005, - nms=dict(type='nms', iou_threshold=0.45), - max_per_img=100)) -# dataset settings -dataset_type = 'CocoDataset' -data_root = 'data/coco' -img_norm_cfg = dict(mean=[0, 0, 0], std=[255., 255., 255.], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile', to_float32=True), - dict(type='LoadAnnotations', with_bbox=True), - dict(type='PhotoMetricDistortion'), - dict( - type='Expand', - mean=img_norm_cfg['mean'], - to_rgb=img_norm_cfg['to_rgb'], - ratio_range=(1, 2)), - dict( - type='MinIoURandomCrop', - min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9), - min_crop_size=0.3), - dict(type='Resize', img_scale=(320, 320), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(320, 320), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img']) - ]) -] -data = dict( - samples_per_gpu=8, - workers_per_gpu=4, - train=dict( - type=dataset_type, - ann_file=f'{data_root}/annotations/instances_train2017.json', - img_prefix=f'{data_root}/train2017/', - pipeline=train_pipeline), - val=dict( - type=dataset_type, - ann_file=f'{data_root}/annotations/instances_val2017.json', - img_prefix=f'{data_root}/val2017/', - pipeline=test_pipeline), - test=dict( - type=dataset_type, - ann_file=f'{data_root}/annotations/instances_val2017.json', - img_prefix=f'{data_root}/val2017/', - pipeline=test_pipeline)) -# optimizer -optimizer = dict(type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0005) -optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) -# learning policy -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=2000, # same as burn-in in darknet - warmup_ratio=0.1, - step=[218, 246]) -# runtime settings -runner = dict(type='EpochBasedRunner', max_epochs=273) -evaluation = dict(interval=1, metric=['bbox']) - -checkpoint_config = dict(interval=1) -# yapf:disable -log_config = dict( - interval=50, - hooks=[ - dict(type='TextLoggerHook'), - # dict(type='TensorboardLoggerHook') - ]) -# yapf:enable -custom_hooks = [dict(type='NumClassCheckHook')] - -dist_params = dict(backend='nccl') -log_level = 'INFO' -load_from = None -resume_from = None -workflow = [('train', 1)] +# model settings +model = dict( + type='YOLOV3', + pretrained='open-mmlab://darknet53', + backbone=dict(type='Darknet', depth=53, out_indices=(3, 4, 5)), + neck=dict( + type='YOLOV3Neck', + num_scales=3, + in_channels=[1024, 512, 256], + out_channels=[512, 256, 128]), + bbox_head=dict( + type='YOLOV3Head', + num_classes=80, + in_channels=[512, 256, 128], + out_channels=[1024, 512, 256], + anchor_generator=dict( + type='YOLOAnchorGenerator', + base_sizes=[[(116, 90), (156, 198), (373, 326)], + [(30, 61), (62, 45), (59, 119)], + [(10, 13), (16, 30), (33, 23)]], + strides=[32, 16, 8]), + bbox_coder=dict(type='YOLOBBoxCoder'), + featmap_strides=[32, 16, 8], + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + loss_weight=1.0, + reduction='sum'), + loss_conf=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + loss_weight=1.0, + reduction='sum'), + loss_xy=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + loss_weight=2.0, + reduction='sum'), + loss_wh=dict(type='MSELoss', loss_weight=2.0, reduction='sum')), + # training and testing settings + train_cfg=dict( + assigner=dict( + type='GridAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0)), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + conf_thr=0.005, + nms=dict(type='nms', iou_threshold=0.45), + max_per_img=100)) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco' +img_norm_cfg = dict(mean=[0, 0, 0], std=[255., 255., 255.], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile', to_float32=True), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='PhotoMetricDistortion'), + dict( + type='Expand', + mean=img_norm_cfg['mean'], + to_rgb=img_norm_cfg['to_rgb'], + ratio_range=(1, 2)), + dict( + type='MinIoURandomCrop', + min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9), + min_crop_size=0.3), + dict(type='Resize', img_scale=(320, 320), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(320, 320), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img']) + ]) +] +data = dict( + samples_per_gpu=8, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=f'{data_root}/annotations/instances_train2017.json', + img_prefix=f'{data_root}/train2017/', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=f'{data_root}/annotations/instances_val2017.json', + img_prefix=f'{data_root}/val2017/', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + ann_file=f'{data_root}/annotations/instances_val2017.json', + img_prefix=f'{data_root}/val2017/', + pipeline=test_pipeline)) +# optimizer +optimizer = dict(type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0005) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=2000, # same as burn-in in darknet + warmup_ratio=0.1, + step=[218, 246]) +# runtime settings +runner = dict(type='EpochBasedRunner', max_epochs=273) +evaluation = dict(interval=1, metric=['bbox']) + +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +custom_hooks = [dict(type='NumClassCheckHook')] + +dist_params = dict(backend='nccl') +log_level = 'INFO' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/demo/mmdetection_cfg/yolox-s_8xb8-300e_coco-face.py b/demo/mmdetection_cfg/yolox-s_8xb8-300e_coco-face.py index 16f891304a..225d2c6eed 100644 --- a/demo/mmdetection_cfg/yolox-s_8xb8-300e_coco-face.py +++ b/demo/mmdetection_cfg/yolox-s_8xb8-300e_coco-face.py @@ -1,300 +1,300 @@ -train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=300, val_interval=10) -val_cfg = dict(type='ValLoop') -test_cfg = dict(type='TestLoop') -param_scheduler = [ - dict( - type='mmdet.QuadraticWarmupLR', - by_epoch=True, - begin=0, - end=5, - convert_to_iter_based=True), - dict( - type='CosineAnnealingLR', - eta_min=0.0005, - begin=5, - T_max=285, - end=285, - by_epoch=True, - convert_to_iter_based=True), - dict(type='ConstantLR', by_epoch=True, factor=1, begin=285, end=300) -] -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict( - type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005, nesterov=True), - paramwise_cfg=dict(norm_decay_mult=0.0, bias_decay_mult=0.0)) -auto_scale_lr = dict(enable=False, base_batch_size=64) -default_scope = 'mmdet' -default_hooks = dict( - timer=dict(type='IterTimerHook'), - logger=dict(type='LoggerHook', interval=50), - param_scheduler=dict(type='ParamSchedulerHook'), - checkpoint=dict(type='CheckpointHook', interval=10, max_keep_ckpts=3), - sampler_seed=dict(type='DistSamplerSeedHook'), - visualization=dict(type='DetVisualizationHook')) -env_cfg = dict( - cudnn_benchmark=False, - mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), - dist_cfg=dict(backend='nccl')) -vis_backends = [dict(type='LocalVisBackend')] -visualizer = dict( - type='DetLocalVisualizer', - vis_backends=[dict(type='LocalVisBackend')], - name='visualizer') -log_processor = dict(type='LogProcessor', window_size=50, by_epoch=True) -log_level = 'INFO' -load_from = 'https://download.openmmlab.com/mmdetection/' \ - 'v2.0/yolox/yolox_s_8x8_300e_coco/' \ - 'yolox_s_8x8_300e_coco_20211121_095711-4592a793.pth' -resume = False -img_scale = (640, 640) -model = dict( - type='YOLOX', - data_preprocessor=dict( - type='DetDataPreprocessor', - pad_size_divisor=32, - batch_augments=[ - dict( - type='BatchSyncRandomResize', - random_size_range=(480, 800), - size_divisor=32, - interval=10) - ]), - backbone=dict( - type='CSPDarknet', - deepen_factor=0.33, - widen_factor=0.5, - out_indices=(2, 3, 4), - use_depthwise=False, - spp_kernal_sizes=(5, 9, 13), - norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), - act_cfg=dict(type='Swish')), - neck=dict( - type='YOLOXPAFPN', - in_channels=[128, 256, 512], - out_channels=128, - num_csp_blocks=1, - use_depthwise=False, - upsample_cfg=dict(scale_factor=2, mode='nearest'), - norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), - act_cfg=dict(type='Swish')), - bbox_head=dict( - type='YOLOXHead', - num_classes=1, - in_channels=128, - feat_channels=128, - stacked_convs=2, - strides=(8, 16, 32), - use_depthwise=False, - norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), - act_cfg=dict(type='Swish'), - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=True, - reduction='sum', - loss_weight=1.0), - loss_bbox=dict( - type='IoULoss', - mode='square', - eps=1e-16, - reduction='sum', - loss_weight=5.0), - loss_obj=dict( - type='CrossEntropyLoss', - use_sigmoid=True, - reduction='sum', - loss_weight=1.0), - loss_l1=dict(type='L1Loss', reduction='sum', loss_weight=1.0)), - train_cfg=dict(assigner=dict(type='SimOTAAssigner', center_radius=2.5)), - test_cfg=dict(score_thr=0.01, nms=dict(type='nms', iou_threshold=0.65))) -data_root = 'data/coco/' -dataset_type = 'CocoDataset' -backend_args = dict(backend='local') -train_pipeline = [ - dict(type='Mosaic', img_scale=(640, 640), pad_val=114.0), - dict( - type='RandomAffine', scaling_ratio_range=(0.1, 2), - border=(-320, -320)), - dict( - type='MixUp', - img_scale=(640, 640), - ratio_range=(0.8, 1.6), - pad_val=114.0), - dict(type='YOLOXHSVRandomAug'), - dict(type='RandomFlip', prob=0.5), - dict(type='Resize', scale=(640, 640), keep_ratio=True), - dict( - type='Pad', - pad_to_square=True, - pad_val=dict(img=(114.0, 114.0, 114.0))), - dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1), keep_empty=False), - dict(type='PackDetInputs') -] -train_dataset = dict( - type='MultiImageMixDataset', - dataset=dict( - type='CocoDataset', - data_root='data/coco/', - ann_file='annotations/instances_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=[ - dict(type='LoadImageFromFile', backend_args=dict(backend='local')), - dict(type='LoadAnnotations', with_bbox=True) - ], - filter_cfg=dict(filter_empty_gt=False, min_size=32)), - pipeline=[ - dict(type='Mosaic', img_scale=(640, 640), pad_val=114.0), - dict( - type='RandomAffine', - scaling_ratio_range=(0.1, 2), - border=(-320, -320)), - dict( - type='MixUp', - img_scale=(640, 640), - ratio_range=(0.8, 1.6), - pad_val=114.0), - dict(type='YOLOXHSVRandomAug'), - dict(type='RandomFlip', prob=0.5), - dict(type='Resize', scale=(640, 640), keep_ratio=True), - dict( - type='Pad', - pad_to_square=True, - pad_val=dict(img=(114.0, 114.0, 114.0))), - dict( - type='FilterAnnotations', min_gt_bbox_wh=(1, 1), keep_empty=False), - dict(type='PackDetInputs') - ]) -test_pipeline = [ - dict(type='LoadImageFromFile', backend_args=dict(backend='local')), - dict(type='Resize', scale=(640, 640), keep_ratio=True), - dict( - type='Pad', - pad_to_square=True, - pad_val=dict(img=(114.0, 114.0, 114.0))), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='PackDetInputs', - meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', - 'scale_factor')) -] -train_dataloader = dict( - batch_size=8, - num_workers=4, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type='MultiImageMixDataset', - dataset=dict( - type='CocoDataset', - data_root='data/coco/', - ann_file='annotations/coco_face_train.json', - data_prefix=dict(img='train2017/'), - pipeline=[ - dict( - type='LoadImageFromFile', - backend_args=dict(backend='local')), - dict(type='LoadAnnotations', with_bbox=True) - ], - filter_cfg=dict(filter_empty_gt=False, min_size=32), - metainfo=dict(CLASSES=('person', ), PALETTE=(220, 20, 60))), - pipeline=[ - dict(type='Mosaic', img_scale=(640, 640), pad_val=114.0), - dict( - type='RandomAffine', - scaling_ratio_range=(0.1, 2), - border=(-320, -320)), - dict( - type='MixUp', - img_scale=(640, 640), - ratio_range=(0.8, 1.6), - pad_val=114.0), - dict(type='YOLOXHSVRandomAug'), - dict(type='RandomFlip', prob=0.5), - dict(type='Resize', scale=(640, 640), keep_ratio=True), - dict( - type='Pad', - pad_to_square=True, - pad_val=dict(img=(114.0, 114.0, 114.0))), - dict( - type='FilterAnnotations', - min_gt_bbox_wh=(1, 1), - keep_empty=False), - dict(type='PackDetInputs') - ])) -val_dataloader = dict( - batch_size=8, - num_workers=4, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False), - dataset=dict( - type='CocoDataset', - data_root='data/coco/', - ann_file='annotations/coco_face_val.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=[ - dict(type='LoadImageFromFile', backend_args=dict(backend='local')), - dict(type='Resize', scale=(640, 640), keep_ratio=True), - dict( - type='Pad', - pad_to_square=True, - pad_val=dict(img=(114.0, 114.0, 114.0))), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='PackDetInputs', - meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', - 'scale_factor')) - ], - metainfo=dict(CLASSES=('person', ), PALETTE=(220, 20, 60)))) -test_dataloader = dict( - batch_size=8, - num_workers=4, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False), - dataset=dict( - type='CocoDataset', - data_root='data/coco/', - ann_file='annotations/coco_face_val.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=[ - dict(type='LoadImageFromFile', backend_args=dict(backend='local')), - dict(type='Resize', scale=(640, 640), keep_ratio=True), - dict( - type='Pad', - pad_to_square=True, - pad_val=dict(img=(114.0, 114.0, 114.0))), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='PackDetInputs', - meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', - 'scale_factor')) - ], - metainfo=dict(CLASSES=('person', ), PALETTE=(220, 20, 60)))) -val_evaluator = dict( - type='CocoMetric', - ann_file='data/coco/annotations/coco_face_val.json', - metric='bbox') -test_evaluator = dict( - type='CocoMetric', - ann_file='data/coco/annotations/instances_val2017.json', - metric='bbox') -max_epochs = 300 -num_last_epochs = 15 -interval = 10 -base_lr = 0.01 -custom_hooks = [ - dict(type='YOLOXModeSwitchHook', num_last_epochs=15, priority=48), - dict(type='SyncNormHook', priority=48), - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0001, - strict_load=False, - update_buffers=True, - priority=49) -] -metainfo = dict(CLASSES=('person', ), PALETTE=(220, 20, 60)) -launcher = 'pytorch' +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=300, val_interval=10) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') +param_scheduler = [ + dict( + type='mmdet.QuadraticWarmupLR', + by_epoch=True, + begin=0, + end=5, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + eta_min=0.0005, + begin=5, + T_max=285, + end=285, + by_epoch=True, + convert_to_iter_based=True), + dict(type='ConstantLR', by_epoch=True, factor=1, begin=285, end=300) +] +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict( + type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005, nesterov=True), + paramwise_cfg=dict(norm_decay_mult=0.0, bias_decay_mult=0.0)) +auto_scale_lr = dict(enable=False, base_batch_size=64) +default_scope = 'mmdet' +default_hooks = dict( + timer=dict(type='IterTimerHook'), + logger=dict(type='LoggerHook', interval=50), + param_scheduler=dict(type='ParamSchedulerHook'), + checkpoint=dict(type='CheckpointHook', interval=10, max_keep_ckpts=3), + sampler_seed=dict(type='DistSamplerSeedHook'), + visualization=dict(type='DetVisualizationHook')) +env_cfg = dict( + cudnn_benchmark=False, + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), + dist_cfg=dict(backend='nccl')) +vis_backends = [dict(type='LocalVisBackend')] +visualizer = dict( + type='DetLocalVisualizer', + vis_backends=[dict(type='LocalVisBackend')], + name='visualizer') +log_processor = dict(type='LogProcessor', window_size=50, by_epoch=True) +log_level = 'INFO' +load_from = 'https://download.openmmlab.com/mmdetection/' \ + 'v2.0/yolox/yolox_s_8x8_300e_coco/' \ + 'yolox_s_8x8_300e_coco_20211121_095711-4592a793.pth' +resume = False +img_scale = (640, 640) +model = dict( + type='YOLOX', + data_preprocessor=dict( + type='DetDataPreprocessor', + pad_size_divisor=32, + batch_augments=[ + dict( + type='BatchSyncRandomResize', + random_size_range=(480, 800), + size_divisor=32, + interval=10) + ]), + backbone=dict( + type='CSPDarknet', + deepen_factor=0.33, + widen_factor=0.5, + out_indices=(2, 3, 4), + use_depthwise=False, + spp_kernal_sizes=(5, 9, 13), + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), + act_cfg=dict(type='Swish')), + neck=dict( + type='YOLOXPAFPN', + in_channels=[128, 256, 512], + out_channels=128, + num_csp_blocks=1, + use_depthwise=False, + upsample_cfg=dict(scale_factor=2, mode='nearest'), + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), + act_cfg=dict(type='Swish')), + bbox_head=dict( + type='YOLOXHead', + num_classes=1, + in_channels=128, + feat_channels=128, + stacked_convs=2, + strides=(8, 16, 32), + use_depthwise=False, + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), + act_cfg=dict(type='Swish'), + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + reduction='sum', + loss_weight=1.0), + loss_bbox=dict( + type='IoULoss', + mode='square', + eps=1e-16, + reduction='sum', + loss_weight=5.0), + loss_obj=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + reduction='sum', + loss_weight=1.0), + loss_l1=dict(type='L1Loss', reduction='sum', loss_weight=1.0)), + train_cfg=dict(assigner=dict(type='SimOTAAssigner', center_radius=2.5)), + test_cfg=dict(score_thr=0.01, nms=dict(type='nms', iou_threshold=0.65))) +data_root = 'data/coco/' +dataset_type = 'CocoDataset' +backend_args = dict(backend='local') +train_pipeline = [ + dict(type='Mosaic', img_scale=(640, 640), pad_val=114.0), + dict( + type='RandomAffine', scaling_ratio_range=(0.1, 2), + border=(-320, -320)), + dict( + type='MixUp', + img_scale=(640, 640), + ratio_range=(0.8, 1.6), + pad_val=114.0), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip', prob=0.5), + dict(type='Resize', scale=(640, 640), keep_ratio=True), + dict( + type='Pad', + pad_to_square=True, + pad_val=dict(img=(114.0, 114.0, 114.0))), + dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1), keep_empty=False), + dict(type='PackDetInputs') +] +train_dataset = dict( + type='MultiImageMixDataset', + dataset=dict( + type='CocoDataset', + data_root='data/coco/', + ann_file='annotations/instances_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=[ + dict(type='LoadImageFromFile', backend_args=dict(backend='local')), + dict(type='LoadAnnotations', with_bbox=True) + ], + filter_cfg=dict(filter_empty_gt=False, min_size=32)), + pipeline=[ + dict(type='Mosaic', img_scale=(640, 640), pad_val=114.0), + dict( + type='RandomAffine', + scaling_ratio_range=(0.1, 2), + border=(-320, -320)), + dict( + type='MixUp', + img_scale=(640, 640), + ratio_range=(0.8, 1.6), + pad_val=114.0), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip', prob=0.5), + dict(type='Resize', scale=(640, 640), keep_ratio=True), + dict( + type='Pad', + pad_to_square=True, + pad_val=dict(img=(114.0, 114.0, 114.0))), + dict( + type='FilterAnnotations', min_gt_bbox_wh=(1, 1), keep_empty=False), + dict(type='PackDetInputs') + ]) +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=dict(backend='local')), + dict(type='Resize', scale=(640, 640), keep_ratio=True), + dict( + type='Pad', + pad_to_square=True, + pad_val=dict(img=(114.0, 114.0, 114.0))), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] +train_dataloader = dict( + batch_size=8, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='MultiImageMixDataset', + dataset=dict( + type='CocoDataset', + data_root='data/coco/', + ann_file='annotations/coco_face_train.json', + data_prefix=dict(img='train2017/'), + pipeline=[ + dict( + type='LoadImageFromFile', + backend_args=dict(backend='local')), + dict(type='LoadAnnotations', with_bbox=True) + ], + filter_cfg=dict(filter_empty_gt=False, min_size=32), + metainfo=dict(CLASSES=('person', ), PALETTE=(220, 20, 60))), + pipeline=[ + dict(type='Mosaic', img_scale=(640, 640), pad_val=114.0), + dict( + type='RandomAffine', + scaling_ratio_range=(0.1, 2), + border=(-320, -320)), + dict( + type='MixUp', + img_scale=(640, 640), + ratio_range=(0.8, 1.6), + pad_val=114.0), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip', prob=0.5), + dict(type='Resize', scale=(640, 640), keep_ratio=True), + dict( + type='Pad', + pad_to_square=True, + pad_val=dict(img=(114.0, 114.0, 114.0))), + dict( + type='FilterAnnotations', + min_gt_bbox_wh=(1, 1), + keep_empty=False), + dict(type='PackDetInputs') + ])) +val_dataloader = dict( + batch_size=8, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type='CocoDataset', + data_root='data/coco/', + ann_file='annotations/coco_face_val.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=[ + dict(type='LoadImageFromFile', backend_args=dict(backend='local')), + dict(type='Resize', scale=(640, 640), keep_ratio=True), + dict( + type='Pad', + pad_to_square=True, + pad_val=dict(img=(114.0, 114.0, 114.0))), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) + ], + metainfo=dict(CLASSES=('person', ), PALETTE=(220, 20, 60)))) +test_dataloader = dict( + batch_size=8, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type='CocoDataset', + data_root='data/coco/', + ann_file='annotations/coco_face_val.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=[ + dict(type='LoadImageFromFile', backend_args=dict(backend='local')), + dict(type='Resize', scale=(640, 640), keep_ratio=True), + dict( + type='Pad', + pad_to_square=True, + pad_val=dict(img=(114.0, 114.0, 114.0))), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) + ], + metainfo=dict(CLASSES=('person', ), PALETTE=(220, 20, 60)))) +val_evaluator = dict( + type='CocoMetric', + ann_file='data/coco/annotations/coco_face_val.json', + metric='bbox') +test_evaluator = dict( + type='CocoMetric', + ann_file='data/coco/annotations/instances_val2017.json', + metric='bbox') +max_epochs = 300 +num_last_epochs = 15 +interval = 10 +base_lr = 0.01 +custom_hooks = [ + dict(type='YOLOXModeSwitchHook', num_last_epochs=15, priority=48), + dict(type='SyncNormHook', priority=48), + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0001, + strict_load=False, + update_buffers=True, + priority=49) +] +metainfo = dict(CLASSES=('person', ), PALETTE=(220, 20, 60)) +launcher = 'pytorch' diff --git a/demo/mmtracking_cfg/deepsort_faster-rcnn_fpn_4e_mot17-private-half.py b/demo/mmtracking_cfg/deepsort_faster-rcnn_fpn_4e_mot17-private-half.py index 1d7fccf0cb..3dd5129918 100644 --- a/demo/mmtracking_cfg/deepsort_faster-rcnn_fpn_4e_mot17-private-half.py +++ b/demo/mmtracking_cfg/deepsort_faster-rcnn_fpn_4e_mot17-private-half.py @@ -1,321 +1,321 @@ -model = dict( - detector=dict( - type='FasterRCNN', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='torchvision://resnet50')), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - num_outs=5), - rpn_head=dict( - type='RPNHead', - in_channels=256, - feat_channels=256, - anchor_generator=dict( - type='AnchorGenerator', - scales=[8], - ratios=[0.5, 1.0, 2.0], - strides=[4, 8, 16, 32, 64]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0.0, 0.0, 0.0, 0.0], - target_stds=[1.0, 1.0, 1.0, 1.0], - clip_border=False), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), - loss_bbox=dict( - type='SmoothL1Loss', beta=0.1111111111111111, - loss_weight=1.0)), - roi_head=dict( - type='StandardRoIHead', - bbox_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict( - type='RoIAlign', output_size=7, sampling_ratio=0), - out_channels=256, - featmap_strides=[4, 8, 16, 32]), - bbox_head=dict( - type='Shared2FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=1, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0.0, 0.0, 0.0, 0.0], - target_stds=[0.1, 0.1, 0.2, 0.2], - clip_border=False), - reg_class_agnostic=False, - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', loss_weight=1.0))), - train_cfg=dict( - rpn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.3, - min_pos_iou=0.3, - match_low_quality=True, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=256, - pos_fraction=0.5, - neg_pos_ub=-1, - add_gt_as_proposals=False), - allowed_border=-1, - pos_weight=-1, - debug=False), - rpn_proposal=dict( - nms_pre=2000, - max_per_img=1000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.5, - min_pos_iou=0.5, - match_low_quality=False, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True), - pos_weight=-1, - debug=False)), - test_cfg=dict( - rpn=dict( - nms_pre=1000, - max_per_img=1000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.5), - max_per_img=100)), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmtracking/' - 'mot/faster_rcnn/faster-rcnn_r50_fpn_4e_mot17-half-64ee2ed4.pth')), - type='DeepSORT', - motion=dict(type='KalmanFilter', center_only=False), - reid=dict( - type='BaseReID', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(3, ), - style='pytorch'), - neck=dict(type='GlobalAveragePooling', kernel_size=(8, 4), stride=1), - head=dict( - type='LinearReIDHead', - num_fcs=1, - in_channels=2048, - fc_channels=1024, - out_channels=128, - num_classes=380, - loss=dict(type='CrossEntropyLoss', loss_weight=1.0), - loss_pairwise=dict( - type='TripletLoss', margin=0.3, loss_weight=1.0), - norm_cfg=dict(type='BN1d'), - act_cfg=dict(type='ReLU')), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmtracking/' - 'mot/reid/tracktor_reid_r50_iter25245-a452f51f.pth')), - tracker=dict( - type='SortTracker', - obj_score_thr=0.5, - reid=dict( - num_samples=10, - img_scale=(256, 128), - img_norm_cfg=None, - match_score_thr=2.0), - match_iou_thr=0.5, - momentums=None, - num_tentatives=2, - num_frames_retain=100)) -dataset_type = 'MOTChallengeDataset' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadMultiImagesFromFile', to_float32=True), - dict(type='SeqLoadAnnotations', with_bbox=True, with_track=True), - dict( - type='SeqResize', - img_scale=(1088, 1088), - share_params=True, - ratio_range=(0.8, 1.2), - keep_ratio=True, - bbox_clip_border=False), - dict(type='SeqPhotoMetricDistortion', share_params=True), - dict( - type='SeqRandomCrop', - share_params=False, - crop_size=(1088, 1088), - bbox_clip_border=False), - dict(type='SeqRandomFlip', share_params=True, flip_ratio=0.5), - dict( - type='SeqNormalize', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - to_rgb=True), - dict(type='SeqPad', size_divisor=32), - dict(type='MatchInstances', skip_nomatch=True), - dict( - type='VideoCollect', - keys=[ - 'img', 'gt_bboxes', 'gt_labels', 'gt_match_indices', - 'gt_instance_ids' - ]), - dict(type='SeqDefaultFormatBundle', ref_prefix='ref') -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1088, 1088), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict( - type='Normalize', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - to_rgb=True), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='VideoCollect', keys=['img']) - ]) -] -data_root = 'data/MOT17/' -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict( - type='MOTChallengeDataset', - visibility_thr=-1, - ann_file='data/MOT17/annotations/half-train_cocoformat.json', - img_prefix='data/MOT17/train', - ref_img_sampler=dict( - num_ref_imgs=1, - frame_range=10, - filter_key_img=True, - method='uniform'), - pipeline=[ - dict(type='LoadMultiImagesFromFile', to_float32=True), - dict(type='SeqLoadAnnotations', with_bbox=True, with_track=True), - dict( - type='SeqResize', - img_scale=(1088, 1088), - share_params=True, - ratio_range=(0.8, 1.2), - keep_ratio=True, - bbox_clip_border=False), - dict(type='SeqPhotoMetricDistortion', share_params=True), - dict( - type='SeqRandomCrop', - share_params=False, - crop_size=(1088, 1088), - bbox_clip_border=False), - dict(type='SeqRandomFlip', share_params=True, flip_ratio=0.5), - dict( - type='SeqNormalize', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - to_rgb=True), - dict(type='SeqPad', size_divisor=32), - dict(type='MatchInstances', skip_nomatch=True), - dict( - type='VideoCollect', - keys=[ - 'img', 'gt_bboxes', 'gt_labels', 'gt_match_indices', - 'gt_instance_ids' - ]), - dict(type='SeqDefaultFormatBundle', ref_prefix='ref') - ]), - val=dict( - type='MOTChallengeDataset', - ann_file='data/MOT17/annotations/half-val_cocoformat.json', - img_prefix='data/MOT17/train', - ref_img_sampler=None, - pipeline=[ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1088, 1088), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict( - type='Normalize', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - to_rgb=True), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='VideoCollect', keys=['img']) - ]) - ]), - test=dict( - type='MOTChallengeDataset', - ann_file='data/MOT17/annotations/half-val_cocoformat.json', - img_prefix='data/MOT17/train', - ref_img_sampler=None, - pipeline=[ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1088, 1088), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict( - type='Normalize', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - to_rgb=True), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='VideoCollect', keys=['img']) - ]) - ])) -optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) -optimizer_config = dict(grad_clip=None) -checkpoint_config = dict(interval=1) -log_config = dict(interval=50, hooks=[dict(type='TextLoggerHook')]) -dist_params = dict(backend='nccl') -log_level = 'INFO' -load_from = None -resume_from = None -workflow = [('train', 1)] -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=100, - warmup_ratio=0.01, - step=[3]) -total_epochs = 4 -evaluation = dict(metric=['bbox', 'track'], interval=1) -search_metrics = ['MOTA', 'IDF1', 'FN', 'FP', 'IDs', 'MT', 'ML'] +model = dict( + detector=dict( + type='FasterRCNN', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0.0, 0.0, 0.0, 0.0], + target_stds=[1.0, 1.0, 1.0, 1.0], + clip_border=False), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict( + type='SmoothL1Loss', beta=0.1111111111111111, + loss_weight=1.0)), + roi_head=dict( + type='StandardRoIHead', + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict( + type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=1, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0.0, 0.0, 0.0, 0.0], + target_stds=[0.1, 0.1, 0.2, 0.2], + clip_border=False), + reg_class_agnostic=False, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', loss_weight=1.0))), + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=-1, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_pre=2000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False)), + test_cfg=dict( + rpn=dict( + nms_pre=1000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100)), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmtracking/' + 'mot/faster_rcnn/faster-rcnn_r50_fpn_4e_mot17-half-64ee2ed4.pth')), + type='DeepSORT', + motion=dict(type='KalmanFilter', center_only=False), + reid=dict( + type='BaseReID', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling', kernel_size=(8, 4), stride=1), + head=dict( + type='LinearReIDHead', + num_fcs=1, + in_channels=2048, + fc_channels=1024, + out_channels=128, + num_classes=380, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + loss_pairwise=dict( + type='TripletLoss', margin=0.3, loss_weight=1.0), + norm_cfg=dict(type='BN1d'), + act_cfg=dict(type='ReLU')), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmtracking/' + 'mot/reid/tracktor_reid_r50_iter25245-a452f51f.pth')), + tracker=dict( + type='SortTracker', + obj_score_thr=0.5, + reid=dict( + num_samples=10, + img_scale=(256, 128), + img_norm_cfg=None, + match_score_thr=2.0), + match_iou_thr=0.5, + momentums=None, + num_tentatives=2, + num_frames_retain=100)) +dataset_type = 'MOTChallengeDataset' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadMultiImagesFromFile', to_float32=True), + dict(type='SeqLoadAnnotations', with_bbox=True, with_track=True), + dict( + type='SeqResize', + img_scale=(1088, 1088), + share_params=True, + ratio_range=(0.8, 1.2), + keep_ratio=True, + bbox_clip_border=False), + dict(type='SeqPhotoMetricDistortion', share_params=True), + dict( + type='SeqRandomCrop', + share_params=False, + crop_size=(1088, 1088), + bbox_clip_border=False), + dict(type='SeqRandomFlip', share_params=True, flip_ratio=0.5), + dict( + type='SeqNormalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='SeqPad', size_divisor=32), + dict(type='MatchInstances', skip_nomatch=True), + dict( + type='VideoCollect', + keys=[ + 'img', 'gt_bboxes', 'gt_labels', 'gt_match_indices', + 'gt_instance_ids' + ]), + dict(type='SeqDefaultFormatBundle', ref_prefix='ref') +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1088, 1088), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='VideoCollect', keys=['img']) + ]) +] +data_root = 'data/MOT17/' +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type='MOTChallengeDataset', + visibility_thr=-1, + ann_file='data/MOT17/annotations/half-train_cocoformat.json', + img_prefix='data/MOT17/train', + ref_img_sampler=dict( + num_ref_imgs=1, + frame_range=10, + filter_key_img=True, + method='uniform'), + pipeline=[ + dict(type='LoadMultiImagesFromFile', to_float32=True), + dict(type='SeqLoadAnnotations', with_bbox=True, with_track=True), + dict( + type='SeqResize', + img_scale=(1088, 1088), + share_params=True, + ratio_range=(0.8, 1.2), + keep_ratio=True, + bbox_clip_border=False), + dict(type='SeqPhotoMetricDistortion', share_params=True), + dict( + type='SeqRandomCrop', + share_params=False, + crop_size=(1088, 1088), + bbox_clip_border=False), + dict(type='SeqRandomFlip', share_params=True, flip_ratio=0.5), + dict( + type='SeqNormalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='SeqPad', size_divisor=32), + dict(type='MatchInstances', skip_nomatch=True), + dict( + type='VideoCollect', + keys=[ + 'img', 'gt_bboxes', 'gt_labels', 'gt_match_indices', + 'gt_instance_ids' + ]), + dict(type='SeqDefaultFormatBundle', ref_prefix='ref') + ]), + val=dict( + type='MOTChallengeDataset', + ann_file='data/MOT17/annotations/half-val_cocoformat.json', + img_prefix='data/MOT17/train', + ref_img_sampler=None, + pipeline=[ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1088, 1088), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='VideoCollect', keys=['img']) + ]) + ]), + test=dict( + type='MOTChallengeDataset', + ann_file='data/MOT17/annotations/half-val_cocoformat.json', + img_prefix='data/MOT17/train', + ref_img_sampler=None, + pipeline=[ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1088, 1088), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='VideoCollect', keys=['img']) + ]) + ])) +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=None) +checkpoint_config = dict(interval=1) +log_config = dict(interval=50, hooks=[dict(type='TextLoggerHook')]) +dist_params = dict(backend='nccl') +log_level = 'INFO' +load_from = None +resume_from = None +workflow = [('train', 1)] +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=100, + warmup_ratio=0.01, + step=[3]) +total_epochs = 4 +evaluation = dict(metric=['bbox', 'track'], interval=1) +search_metrics = ['MOTA', 'IDF1', 'FN', 'FP', 'IDs', 'MT', 'ML'] diff --git a/demo/mmtracking_cfg/tracktor_faster-rcnn_r50_fpn_4e_mot17-private.py b/demo/mmtracking_cfg/tracktor_faster-rcnn_r50_fpn_4e_mot17-private.py index 9736269bd9..db944271c0 100644 --- a/demo/mmtracking_cfg/tracktor_faster-rcnn_r50_fpn_4e_mot17-private.py +++ b/demo/mmtracking_cfg/tracktor_faster-rcnn_r50_fpn_4e_mot17-private.py @@ -1,325 +1,325 @@ -model = dict( - detector=dict( - type='FasterRCNN', - pretrained='torchvision://resnet50', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch'), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - num_outs=5), - rpn_head=dict( - type='RPNHead', - in_channels=256, - feat_channels=256, - anchor_generator=dict( - type='AnchorGenerator', - scales=[8], - ratios=[0.5, 1.0, 2.0], - strides=[4, 8, 16, 32, 64]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0.0, 0.0, 0.0, 0.0], - target_stds=[1.0, 1.0, 1.0, 1.0], - clip_border=False), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), - loss_bbox=dict( - type='SmoothL1Loss', beta=0.1111111111111111, - loss_weight=1.0)), - roi_head=dict( - type='StandardRoIHead', - bbox_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict( - type='RoIAlign', output_size=7, sampling_ratio=0), - out_channels=256, - featmap_strides=[4, 8, 16, 32]), - bbox_head=dict( - type='Shared2FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=1, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0.0, 0.0, 0.0, 0.0], - target_stds=[0.1, 0.1, 0.2, 0.2], - clip_border=False), - reg_class_agnostic=False, - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', loss_weight=1.0))), - train_cfg=dict( - rpn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.3, - min_pos_iou=0.3, - match_low_quality=True, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=256, - pos_fraction=0.5, - neg_pos_ub=-1, - add_gt_as_proposals=False), - allowed_border=-1, - pos_weight=-1, - debug=False), - rpn_proposal=dict( - nms_pre=2000, - max_per_img=1000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.5, - min_pos_iou=0.5, - match_low_quality=False, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True), - pos_weight=-1, - debug=False)), - test_cfg=dict( - rpn=dict( - nms_pre=1000, - max_per_img=1000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.5), - max_per_img=100))), - type='Tracktor', - pretrains=dict( - detector='https://download.openmmlab.com/mmtracking/' - 'mot/faster_rcnn/faster-rcnn_r50_fpn_4e_mot17-ffa52ae7.pth', - reid='https://download.openmmlab.com/mmtracking/mot/' - 'reid/reid_r50_6e_mot17-4bf6b63d.pth'), - reid=dict( - type='BaseReID', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(3, ), - style='pytorch'), - neck=dict(type='GlobalAveragePooling', kernel_size=(8, 4), stride=1), - head=dict( - type='LinearReIDHead', - num_fcs=1, - in_channels=2048, - fc_channels=1024, - out_channels=128, - num_classes=378, - loss=dict(type='CrossEntropyLoss', loss_weight=1.0), - loss_pairwise=dict( - type='TripletLoss', margin=0.3, loss_weight=1.0), - norm_cfg=dict(type='BN1d'), - act_cfg=dict(type='ReLU'))), - motion=dict( - type='CameraMotionCompensation', - warp_mode='cv2.MOTION_EUCLIDEAN', - num_iters=100, - stop_eps=1e-05), - tracker=dict( - type='TracktorTracker', - obj_score_thr=0.5, - regression=dict( - obj_score_thr=0.5, - nms=dict(type='nms', iou_threshold=0.6), - match_iou_thr=0.3), - reid=dict( - num_samples=10, - img_scale=(256, 128), - img_norm_cfg=None, - match_score_thr=2.0, - match_iou_thr=0.2), - momentums=None, - num_frames_retain=10)) -dataset_type = 'MOTChallengeDataset' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadMultiImagesFromFile', to_float32=True), - dict(type='SeqLoadAnnotations', with_bbox=True, with_track=True), - dict( - type='SeqResize', - img_scale=(1088, 1088), - share_params=True, - ratio_range=(0.8, 1.2), - keep_ratio=True, - bbox_clip_border=False), - dict(type='SeqPhotoMetricDistortion', share_params=True), - dict( - type='SeqRandomCrop', - share_params=False, - crop_size=(1088, 1088), - bbox_clip_border=False), - dict(type='SeqRandomFlip', share_params=True, flip_ratio=0.5), - dict( - type='SeqNormalize', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - to_rgb=True), - dict(type='SeqPad', size_divisor=32), - dict(type='MatchInstances', skip_nomatch=True), - dict( - type='VideoCollect', - keys=[ - 'img', 'gt_bboxes', 'gt_labels', 'gt_match_indices', - 'gt_instance_ids' - ]), - dict(type='SeqDefaultFormatBundle', ref_prefix='ref') -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1088, 1088), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict( - type='Normalize', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - to_rgb=True), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='VideoCollect', keys=['img']) - ]) -] -data_root = 'data/MOT17/' -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict( - type='MOTChallengeDataset', - visibility_thr=-1, - ann_file='data/MOT17/annotations/train_cocoformat.json', - img_prefix='data/MOT17/train', - ref_img_sampler=dict( - num_ref_imgs=1, - frame_range=10, - filter_key_img=True, - method='uniform'), - pipeline=[ - dict(type='LoadMultiImagesFromFile', to_float32=True), - dict(type='SeqLoadAnnotations', with_bbox=True, with_track=True), - dict( - type='SeqResize', - img_scale=(1088, 1088), - share_params=True, - ratio_range=(0.8, 1.2), - keep_ratio=True, - bbox_clip_border=False), - dict(type='SeqPhotoMetricDistortion', share_params=True), - dict( - type='SeqRandomCrop', - share_params=False, - crop_size=(1088, 1088), - bbox_clip_border=False), - dict(type='SeqRandomFlip', share_params=True, flip_ratio=0.5), - dict( - type='SeqNormalize', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - to_rgb=True), - dict(type='SeqPad', size_divisor=32), - dict(type='MatchInstances', skip_nomatch=True), - dict( - type='VideoCollect', - keys=[ - 'img', 'gt_bboxes', 'gt_labels', 'gt_match_indices', - 'gt_instance_ids' - ]), - dict(type='SeqDefaultFormatBundle', ref_prefix='ref') - ]), - val=dict( - type='MOTChallengeDataset', - ann_file='data/MOT17/annotations/train_cocoformat.json', - img_prefix='data/MOT17/train', - ref_img_sampler=None, - pipeline=[ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1088, 1088), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict( - type='Normalize', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - to_rgb=True), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='VideoCollect', keys=['img']) - ]) - ]), - test=dict( - type='MOTChallengeDataset', - ann_file='data/MOT17/annotations/train_cocoformat.json', - img_prefix='data/MOT17/train', - ref_img_sampler=None, - pipeline=[ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1088, 1088), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict( - type='Normalize', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - to_rgb=True), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='VideoCollect', keys=['img']) - ]) - ])) -optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) -optimizer_config = dict(grad_clip=None) -checkpoint_config = dict(interval=1) -log_config = dict(interval=50, hooks=[dict(type='TextLoggerHook')]) -dist_params = dict(backend='nccl') -log_level = 'INFO' -load_from = None -resume_from = None -workflow = [('train', 1)] -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=100, - warmup_ratio=0.01, - step=[3]) -total_epochs = 4 -evaluation = dict(metric=['bbox', 'track'], interval=1) -search_metrics = ['MOTA', 'IDF1', 'FN', 'FP', 'IDs', 'MT', 'ML'] -test_set = 'train' +model = dict( + detector=dict( + type='FasterRCNN', + pretrained='torchvision://resnet50', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0.0, 0.0, 0.0, 0.0], + target_stds=[1.0, 1.0, 1.0, 1.0], + clip_border=False), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict( + type='SmoothL1Loss', beta=0.1111111111111111, + loss_weight=1.0)), + roi_head=dict( + type='StandardRoIHead', + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict( + type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=1, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0.0, 0.0, 0.0, 0.0], + target_stds=[0.1, 0.1, 0.2, 0.2], + clip_border=False), + reg_class_agnostic=False, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', loss_weight=1.0))), + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=-1, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_pre=2000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False)), + test_cfg=dict( + rpn=dict( + nms_pre=1000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100))), + type='Tracktor', + pretrains=dict( + detector='https://download.openmmlab.com/mmtracking/' + 'mot/faster_rcnn/faster-rcnn_r50_fpn_4e_mot17-ffa52ae7.pth', + reid='https://download.openmmlab.com/mmtracking/mot/' + 'reid/reid_r50_6e_mot17-4bf6b63d.pth'), + reid=dict( + type='BaseReID', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling', kernel_size=(8, 4), stride=1), + head=dict( + type='LinearReIDHead', + num_fcs=1, + in_channels=2048, + fc_channels=1024, + out_channels=128, + num_classes=378, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + loss_pairwise=dict( + type='TripletLoss', margin=0.3, loss_weight=1.0), + norm_cfg=dict(type='BN1d'), + act_cfg=dict(type='ReLU'))), + motion=dict( + type='CameraMotionCompensation', + warp_mode='cv2.MOTION_EUCLIDEAN', + num_iters=100, + stop_eps=1e-05), + tracker=dict( + type='TracktorTracker', + obj_score_thr=0.5, + regression=dict( + obj_score_thr=0.5, + nms=dict(type='nms', iou_threshold=0.6), + match_iou_thr=0.3), + reid=dict( + num_samples=10, + img_scale=(256, 128), + img_norm_cfg=None, + match_score_thr=2.0, + match_iou_thr=0.2), + momentums=None, + num_frames_retain=10)) +dataset_type = 'MOTChallengeDataset' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadMultiImagesFromFile', to_float32=True), + dict(type='SeqLoadAnnotations', with_bbox=True, with_track=True), + dict( + type='SeqResize', + img_scale=(1088, 1088), + share_params=True, + ratio_range=(0.8, 1.2), + keep_ratio=True, + bbox_clip_border=False), + dict(type='SeqPhotoMetricDistortion', share_params=True), + dict( + type='SeqRandomCrop', + share_params=False, + crop_size=(1088, 1088), + bbox_clip_border=False), + dict(type='SeqRandomFlip', share_params=True, flip_ratio=0.5), + dict( + type='SeqNormalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='SeqPad', size_divisor=32), + dict(type='MatchInstances', skip_nomatch=True), + dict( + type='VideoCollect', + keys=[ + 'img', 'gt_bboxes', 'gt_labels', 'gt_match_indices', + 'gt_instance_ids' + ]), + dict(type='SeqDefaultFormatBundle', ref_prefix='ref') +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1088, 1088), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='VideoCollect', keys=['img']) + ]) +] +data_root = 'data/MOT17/' +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type='MOTChallengeDataset', + visibility_thr=-1, + ann_file='data/MOT17/annotations/train_cocoformat.json', + img_prefix='data/MOT17/train', + ref_img_sampler=dict( + num_ref_imgs=1, + frame_range=10, + filter_key_img=True, + method='uniform'), + pipeline=[ + dict(type='LoadMultiImagesFromFile', to_float32=True), + dict(type='SeqLoadAnnotations', with_bbox=True, with_track=True), + dict( + type='SeqResize', + img_scale=(1088, 1088), + share_params=True, + ratio_range=(0.8, 1.2), + keep_ratio=True, + bbox_clip_border=False), + dict(type='SeqPhotoMetricDistortion', share_params=True), + dict( + type='SeqRandomCrop', + share_params=False, + crop_size=(1088, 1088), + bbox_clip_border=False), + dict(type='SeqRandomFlip', share_params=True, flip_ratio=0.5), + dict( + type='SeqNormalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='SeqPad', size_divisor=32), + dict(type='MatchInstances', skip_nomatch=True), + dict( + type='VideoCollect', + keys=[ + 'img', 'gt_bboxes', 'gt_labels', 'gt_match_indices', + 'gt_instance_ids' + ]), + dict(type='SeqDefaultFormatBundle', ref_prefix='ref') + ]), + val=dict( + type='MOTChallengeDataset', + ann_file='data/MOT17/annotations/train_cocoformat.json', + img_prefix='data/MOT17/train', + ref_img_sampler=None, + pipeline=[ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1088, 1088), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='VideoCollect', keys=['img']) + ]) + ]), + test=dict( + type='MOTChallengeDataset', + ann_file='data/MOT17/annotations/train_cocoformat.json', + img_prefix='data/MOT17/train', + ref_img_sampler=None, + pipeline=[ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1088, 1088), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='VideoCollect', keys=['img']) + ]) + ])) +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=None) +checkpoint_config = dict(interval=1) +log_config = dict(interval=50, hooks=[dict(type='TextLoggerHook')]) +dist_params = dict(backend='nccl') +log_level = 'INFO' +load_from = None +resume_from = None +workflow = [('train', 1)] +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=100, + warmup_ratio=0.01, + step=[3]) +total_epochs = 4 +evaluation = dict(metric=['bbox', 'track'], interval=1) +search_metrics = ['MOTA', 'IDF1', 'FN', 'FP', 'IDs', 'MT', 'ML'] +test_set = 'train' diff --git a/demo/topdown_demo_with_mmdet.py b/demo/topdown_demo_with_mmdet.py index 38f4e92e4e..38b1d819b9 100644 --- a/demo/topdown_demo_with_mmdet.py +++ b/demo/topdown_demo_with_mmdet.py @@ -1,292 +1,292 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import mimetypes -import os -import time -from argparse import ArgumentParser - -import cv2 -import json_tricks as json -import mmcv -import mmengine -import numpy as np - -from mmpose.apis import inference_topdown -from mmpose.apis import init_model as init_pose_estimator -from mmpose.evaluation.functional import nms -from mmpose.registry import VISUALIZERS -from mmpose.structures import merge_data_samples, split_instances -from mmpose.utils import adapt_mmdet_pipeline - -try: - from mmdet.apis import inference_detector, init_detector - has_mmdet = True -except (ImportError, ModuleNotFoundError): - has_mmdet = False - - -def process_one_image(args, - img, - detector, - pose_estimator, - visualizer=None, - show_interval=0): - """Visualize predicted keypoints (and heatmaps) of one image.""" - - # predict bbox - det_result = inference_detector(detector, img) - pred_instance = det_result.pred_instances.cpu().numpy() - bboxes = np.concatenate( - (pred_instance.bboxes, pred_instance.scores[:, None]), axis=1) - bboxes = bboxes[np.logical_and(pred_instance.labels == args.det_cat_id, - pred_instance.scores > args.bbox_thr)] - bboxes = bboxes[nms(bboxes, args.nms_thr), :4] - - # predict keypoints - pose_results = inference_topdown(pose_estimator, img, bboxes) - data_samples = merge_data_samples(pose_results) - - # show the results - if isinstance(img, str): - img = mmcv.imread(img, channel_order='rgb') - elif isinstance(img, np.ndarray): - img = mmcv.bgr2rgb(img) - - if visualizer is not None: - visualizer.add_datasample( - 'result', - img, - data_sample=data_samples, - draw_gt=False, - draw_heatmap=args.draw_heatmap, - draw_bbox=args.draw_bbox, - show_kpt_idx=args.show_kpt_idx, - skeleton_style=args.skeleton_style, - show=args.show, - wait_time=show_interval, - kpt_thr=args.kpt_thr) - - # if there is no instance detected, return None - return data_samples.get('pred_instances', None) - - -def main(): - """Visualize the demo images. - - Using mmdet to detect the human. - """ - parser = ArgumentParser() - parser.add_argument('det_config', help='Config file for detection') - parser.add_argument('det_checkpoint', help='Checkpoint file for detection') - parser.add_argument('pose_config', help='Config file for pose') - parser.add_argument('pose_checkpoint', help='Checkpoint file for pose') - parser.add_argument( - '--input', type=str, default='', help='Image/Video file') - parser.add_argument( - '--show', - action='store_true', - default=False, - help='whether to show img') - parser.add_argument( - '--output-root', - type=str, - default='', - help='root of the output img file. ' - 'Default not saving the visualization images.') - parser.add_argument( - '--save-predictions', - action='store_true', - default=False, - help='whether to save predicted results') - parser.add_argument( - '--device', default='cuda:0', help='Device used for inference') - parser.add_argument( - '--det-cat-id', - type=int, - default=0, - help='Category id for bounding box detection model') - parser.add_argument( - '--bbox-thr', - type=float, - default=0.3, - help='Bounding box score threshold') - parser.add_argument( - '--nms-thr', - type=float, - default=0.3, - help='IoU threshold for bounding box NMS') - parser.add_argument( - '--kpt-thr', - type=float, - default=0.3, - help='Visualizing keypoint thresholds') - parser.add_argument( - '--draw-heatmap', - action='store_true', - default=False, - help='Draw heatmap predicted by the model') - parser.add_argument( - '--show-kpt-idx', - action='store_true', - default=False, - help='Whether to show the index of keypoints') - parser.add_argument( - '--skeleton-style', - default='mmpose', - type=str, - choices=['mmpose', 'openpose'], - help='Skeleton style selection') - parser.add_argument( - '--radius', - type=int, - default=3, - help='Keypoint radius for visualization') - parser.add_argument( - '--thickness', - type=int, - default=1, - help='Link thickness for visualization') - parser.add_argument( - '--show-interval', type=int, default=0, help='Sleep seconds per frame') - parser.add_argument( - '--alpha', type=float, default=0.8, help='The transparency of bboxes') - parser.add_argument( - '--draw-bbox', action='store_true', help='Draw bboxes of instances') - - assert has_mmdet, 'Please install mmdet to run the demo.' - - args = parser.parse_args() - - assert args.show or (args.output_root != '') - assert args.input != '' - assert args.det_config is not None - assert args.det_checkpoint is not None - - output_file = None - if args.output_root: - mmengine.mkdir_or_exist(args.output_root) - output_file = os.path.join(args.output_root, - os.path.basename(args.input)) - if args.input == 'webcam': - output_file += '.mp4' - - if args.save_predictions: - assert args.output_root != '' - args.pred_save_path = f'{args.output_root}/results_' \ - f'{os.path.splitext(os.path.basename(args.input))[0]}.json' - - # build detector - detector = init_detector( - args.det_config, args.det_checkpoint, device=args.device) - detector.cfg = adapt_mmdet_pipeline(detector.cfg) - - # build pose estimator - pose_estimator = init_pose_estimator( - args.pose_config, - args.pose_checkpoint, - device=args.device, - cfg_options=dict( - model=dict(test_cfg=dict(output_heatmaps=args.draw_heatmap)))) - - # build visualizer - pose_estimator.cfg.visualizer.radius = args.radius - pose_estimator.cfg.visualizer.alpha = args.alpha - pose_estimator.cfg.visualizer.line_width = args.thickness - visualizer = VISUALIZERS.build(pose_estimator.cfg.visualizer) - # the dataset_meta is loaded from the checkpoint and - # then pass to the model in init_pose_estimator - visualizer.set_dataset_meta( - pose_estimator.dataset_meta, skeleton_style=args.skeleton_style) - - if args.input == 'webcam': - input_type = 'webcam' - else: - input_type = mimetypes.guess_type(args.input)[0].split('/')[0] - - if input_type == 'image': - - # inference - pred_instances = process_one_image(args, args.input, detector, - pose_estimator, visualizer) - - if args.save_predictions: - pred_instances_list = split_instances(pred_instances) - - if output_file: - img_vis = visualizer.get_image() - mmcv.imwrite(mmcv.rgb2bgr(img_vis), output_file) - - elif input_type in ['webcam', 'video']: - - if args.input == 'webcam': - cap = cv2.VideoCapture(0) - else: - cap = cv2.VideoCapture(args.input) - - video_writer = None - pred_instances_list = [] - frame_idx = 0 - - while cap.isOpened(): - success, frame = cap.read() - frame_idx += 1 - - if not success: - break - - # topdown pose estimation - pred_instances = process_one_image(args, frame, detector, - pose_estimator, visualizer, - 0.001) - - if args.save_predictions: - # save prediction results - pred_instances_list.append( - dict( - frame_id=frame_idx, - instances=split_instances(pred_instances))) - - # output videos - if output_file: - frame_vis = visualizer.get_image() - - if video_writer is None: - fourcc = cv2.VideoWriter_fourcc(*'mp4v') - # the size of the image with visualization may vary - # depending on the presence of heatmaps - video_writer = cv2.VideoWriter( - output_file, - fourcc, - 25, # saved fps - (frame_vis.shape[1], frame_vis.shape[0])) - - video_writer.write(mmcv.rgb2bgr(frame_vis)) - - # press ESC to exit - if cv2.waitKey(5) & 0xFF == 27: - break - - time.sleep(args.show_interval) - - if video_writer: - video_writer.release() - - cap.release() - - else: - args.save_predictions = False - raise ValueError( - f'file {os.path.basename(args.input)} has invalid format.') - - if args.save_predictions: - with open(args.pred_save_path, 'w') as f: - json.dump( - dict( - meta_info=pose_estimator.dataset_meta, - instance_info=pred_instances_list), - f, - indent='\t') - print(f'predictions have been saved at {args.pred_save_path}') - - -if __name__ == '__main__': - main() +# Copyright (c) OpenMMLab. All rights reserved. +import mimetypes +import os +import time +from argparse import ArgumentParser + +import cv2 +import json_tricks as json +import mmcv +import mmengine +import numpy as np + +from mmpose.apis import inference_topdown +from mmpose.apis import init_model as init_pose_estimator +from mmpose.evaluation.functional import nms +from mmpose.registry import VISUALIZERS +from mmpose.structures import merge_data_samples, split_instances +from mmpose.utils import adapt_mmdet_pipeline + +try: + from mmdet.apis import inference_detector, init_detector + has_mmdet = True +except (ImportError, ModuleNotFoundError): + has_mmdet = False + + +def process_one_image(args, + img, + detector, + pose_estimator, + visualizer=None, + show_interval=0): + """Visualize predicted keypoints (and heatmaps) of one image.""" + + # predict bbox + det_result = inference_detector(detector, img) + pred_instance = det_result.pred_instances.cpu().numpy() + bboxes = np.concatenate( + (pred_instance.bboxes, pred_instance.scores[:, None]), axis=1) + bboxes = bboxes[np.logical_and(pred_instance.labels == args.det_cat_id, + pred_instance.scores > args.bbox_thr)] + bboxes = bboxes[nms(bboxes, args.nms_thr), :4] + + # predict keypoints + pose_results = inference_topdown(pose_estimator, img, bboxes) + data_samples = merge_data_samples(pose_results) + + # show the results + if isinstance(img, str): + img = mmcv.imread(img, channel_order='rgb') + elif isinstance(img, np.ndarray): + img = mmcv.bgr2rgb(img) + + if visualizer is not None: + visualizer.add_datasample( + 'result', + img, + data_sample=data_samples, + draw_gt=False, + draw_heatmap=args.draw_heatmap, + draw_bbox=args.draw_bbox, + show_kpt_idx=args.show_kpt_idx, + skeleton_style=args.skeleton_style, + show=args.show, + wait_time=show_interval, + kpt_thr=args.kpt_thr) + + # if there is no instance detected, return None + return data_samples.get('pred_instances', None) + + +def main(): + """Visualize the demo images. + + Using mmdet to detect the human. + """ + parser = ArgumentParser() + parser.add_argument('det_config', help='Config file for detection') + parser.add_argument('det_checkpoint', help='Checkpoint file for detection') + parser.add_argument('pose_config', help='Config file for pose') + parser.add_argument('pose_checkpoint', help='Checkpoint file for pose') + parser.add_argument( + '--input', type=str, default='', help='Image/Video file') + parser.add_argument( + '--show', + action='store_true', + default=False, + help='whether to show img') + parser.add_argument( + '--output-root', + type=str, + default='', + help='root of the output img file. ' + 'Default not saving the visualization images.') + parser.add_argument( + '--save-predictions', + action='store_true', + default=False, + help='whether to save predicted results') + parser.add_argument( + '--device', default='cuda:0', help='Device used for inference') + parser.add_argument( + '--det-cat-id', + type=int, + default=0, + help='Category id for bounding box detection model') + parser.add_argument( + '--bbox-thr', + type=float, + default=0.3, + help='Bounding box score threshold') + parser.add_argument( + '--nms-thr', + type=float, + default=0.3, + help='IoU threshold for bounding box NMS') + parser.add_argument( + '--kpt-thr', + type=float, + default=0.3, + help='Visualizing keypoint thresholds') + parser.add_argument( + '--draw-heatmap', + action='store_true', + default=False, + help='Draw heatmap predicted by the model') + parser.add_argument( + '--show-kpt-idx', + action='store_true', + default=False, + help='Whether to show the index of keypoints') + parser.add_argument( + '--skeleton-style', + default='mmpose', + type=str, + choices=['mmpose', 'openpose'], + help='Skeleton style selection') + parser.add_argument( + '--radius', + type=int, + default=3, + help='Keypoint radius for visualization') + parser.add_argument( + '--thickness', + type=int, + default=1, + help='Link thickness for visualization') + parser.add_argument( + '--show-interval', type=int, default=0, help='Sleep seconds per frame') + parser.add_argument( + '--alpha', type=float, default=0.8, help='The transparency of bboxes') + parser.add_argument( + '--draw-bbox', action='store_true', help='Draw bboxes of instances') + + assert has_mmdet, 'Please install mmdet to run the demo.' + + args = parser.parse_args() + + assert args.show or (args.output_root != '') + assert args.input != '' + assert args.det_config is not None + assert args.det_checkpoint is not None + + output_file = None + if args.output_root: + mmengine.mkdir_or_exist(args.output_root) + output_file = os.path.join(args.output_root, + os.path.basename(args.input)) + if args.input == 'webcam': + output_file += '.mp4' + + if args.save_predictions: + assert args.output_root != '' + args.pred_save_path = f'{args.output_root}/results_' \ + f'{os.path.splitext(os.path.basename(args.input))[0]}.json' + + # build detector + detector = init_detector( + args.det_config, args.det_checkpoint, device=args.device) + detector.cfg = adapt_mmdet_pipeline(detector.cfg) + + # build pose estimator + pose_estimator = init_pose_estimator( + args.pose_config, + args.pose_checkpoint, + device=args.device, + cfg_options=dict( + model=dict(test_cfg=dict(output_heatmaps=args.draw_heatmap)))) + + # build visualizer + pose_estimator.cfg.visualizer.radius = args.radius + pose_estimator.cfg.visualizer.alpha = args.alpha + pose_estimator.cfg.visualizer.line_width = args.thickness + visualizer = VISUALIZERS.build(pose_estimator.cfg.visualizer) + # the dataset_meta is loaded from the checkpoint and + # then pass to the model in init_pose_estimator + visualizer.set_dataset_meta( + pose_estimator.dataset_meta, skeleton_style=args.skeleton_style) + + if args.input == 'webcam': + input_type = 'webcam' + else: + input_type = mimetypes.guess_type(args.input)[0].split('/')[0] + + if input_type == 'image': + + # inference + pred_instances = process_one_image(args, args.input, detector, + pose_estimator, visualizer) + + if args.save_predictions: + pred_instances_list = split_instances(pred_instances) + + if output_file: + img_vis = visualizer.get_image() + mmcv.imwrite(mmcv.rgb2bgr(img_vis), output_file) + + elif input_type in ['webcam', 'video']: + + if args.input == 'webcam': + cap = cv2.VideoCapture(0) + else: + cap = cv2.VideoCapture(args.input) + + video_writer = None + pred_instances_list = [] + frame_idx = 0 + + while cap.isOpened(): + success, frame = cap.read() + frame_idx += 1 + + if not success: + break + + # topdown pose estimation + pred_instances = process_one_image(args, frame, detector, + pose_estimator, visualizer, + 0.001) + + if args.save_predictions: + # save prediction results + pred_instances_list.append( + dict( + frame_id=frame_idx, + instances=split_instances(pred_instances))) + + # output videos + if output_file: + frame_vis = visualizer.get_image() + + if video_writer is None: + fourcc = cv2.VideoWriter_fourcc(*'mp4v') + # the size of the image with visualization may vary + # depending on the presence of heatmaps + video_writer = cv2.VideoWriter( + output_file, + fourcc, + 25, # saved fps + (frame_vis.shape[1], frame_vis.shape[0])) + + video_writer.write(mmcv.rgb2bgr(frame_vis)) + + # press ESC to exit + if cv2.waitKey(5) & 0xFF == 27: + break + + time.sleep(args.show_interval) + + if video_writer: + video_writer.release() + + cap.release() + + else: + args.save_predictions = False + raise ValueError( + f'file {os.path.basename(args.input)} has invalid format.') + + if args.save_predictions: + with open(args.pred_save_path, 'w') as f: + json.dump( + dict( + meta_info=pose_estimator.dataset_meta, + instance_info=pred_instances_list), + f, + indent='\t') + print(f'predictions have been saved at {args.pred_save_path}') + + +if __name__ == '__main__': + main() diff --git a/docker/Dockerfile b/docker/Dockerfile index 064b803979..ebbdd41043 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,34 +1,34 @@ -ARG PYTORCH="1.8.1" -ARG CUDA="10.2" -ARG CUDNN="7" - -FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-devel - -ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0+PTX" -ENV TORCH_NVCC_FLAGS="-Xfatbin -compress-all" -ENV CMAKE_PREFIX_PATH="$(dirname $(which conda))/../" - -# To fix GPG key error when running apt-get update -RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub -RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/7fa2af80.pub - -RUN apt-get update && apt-get install -y git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 libgl1-mesa-glx\ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - -# Install xtcocotools -RUN pip install cython -RUN pip install xtcocotools - -# Install MMEngine and MMCV -RUN pip install openmim -RUN mim install mmengine "mmcv>=2.0.0" - -# Install MMPose -RUN conda clean --all -RUN git clone https://github.com/open-mmlab/mmpose.git /mmpose -WORKDIR /mmpose -RUN git checkout main -ENV FORCE_CUDA="1" -RUN pip install -r requirements/build.txt -RUN pip install --no-cache-dir -e . +ARG PYTORCH="1.8.1" +ARG CUDA="10.2" +ARG CUDNN="7" + +FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-devel + +ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0+PTX" +ENV TORCH_NVCC_FLAGS="-Xfatbin -compress-all" +ENV CMAKE_PREFIX_PATH="$(dirname $(which conda))/../" + +# To fix GPG key error when running apt-get update +RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub +RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/7fa2af80.pub + +RUN apt-get update && apt-get install -y git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 libgl1-mesa-glx\ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + +# Install xtcocotools +RUN pip install cython +RUN pip install xtcocotools + +# Install MMEngine and MMCV +RUN pip install openmim +RUN mim install mmengine "mmcv>=2.0.0" + +# Install MMPose +RUN conda clean --all +RUN git clone https://github.com/open-mmlab/mmpose.git /mmpose +WORKDIR /mmpose +RUN git checkout main +ENV FORCE_CUDA="1" +RUN pip install -r requirements/build.txt +RUN pip install --no-cache-dir -e . diff --git a/docker/serve/Dockerfile b/docker/serve/Dockerfile index 091599b51a..041b127a8f 100644 --- a/docker/serve/Dockerfile +++ b/docker/serve/Dockerfile @@ -1,51 +1,51 @@ -ARG PYTORCH="1.8.1" -ARG CUDA="10.2" -ARG CUDNN="7" -FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-devel - -ARG MMCV="2.0.0rc4" -ARG MMPOSE="1.0.0rc1" - -ENV PYTHONUNBUFFERED TRUE - -RUN apt-get update && \ - DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y \ - ca-certificates \ - g++ \ - openjdk-11-jre-headless \ - # MMDet Requirements - ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 \ - && rm -rf /var/lib/apt/lists/* - -ENV PATH="/opt/conda/bin:$PATH" -RUN export FORCE_CUDA=1 - - -# MMLAB -ARG PYTORCH -ARG CUDA -RUN pip install mmengine -RUN ["/bin/bash", "-c", "pip install mmcv==${MMCV}} -f https://download.openmmlab.com/mmcv/dist/cu${CUDA//./}/torch${PYTORCH}/index.html"] -RUN pip install mmpose==${MMPOSE} - -# TORCHSEVER -RUN pip install torchserve torch-model-archiver - -RUN useradd -m model-server \ - && mkdir -p /home/model-server/tmp - -COPY entrypoint.sh /usr/local/bin/entrypoint.sh - -RUN chmod +x /usr/local/bin/entrypoint.sh \ - && chown -R model-server /home/model-server - -COPY config.properties /home/model-server/config.properties -RUN mkdir /home/model-server/model-store && chown -R model-server /home/model-server/model-store - -EXPOSE 8080 8081 8082 - -USER model-server -WORKDIR /home/model-server -ENV TEMP=/home/model-server/tmp -ENTRYPOINT ["/usr/local/bin/entrypoint.sh"] -CMD ["serve"] +ARG PYTORCH="1.8.1" +ARG CUDA="10.2" +ARG CUDNN="7" +FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-devel + +ARG MMCV="2.0.0rc4" +ARG MMPOSE="1.0.0rc1" + +ENV PYTHONUNBUFFERED TRUE + +RUN apt-get update && \ + DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y \ + ca-certificates \ + g++ \ + openjdk-11-jre-headless \ + # MMDet Requirements + ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 \ + && rm -rf /var/lib/apt/lists/* + +ENV PATH="/opt/conda/bin:$PATH" +RUN export FORCE_CUDA=1 + + +# MMLAB +ARG PYTORCH +ARG CUDA +RUN pip install mmengine +RUN ["/bin/bash", "-c", "pip install mmcv==${MMCV}} -f https://download.openmmlab.com/mmcv/dist/cu${CUDA//./}/torch${PYTORCH}/index.html"] +RUN pip install mmpose==${MMPOSE} + +# TORCHSEVER +RUN pip install torchserve torch-model-archiver + +RUN useradd -m model-server \ + && mkdir -p /home/model-server/tmp + +COPY entrypoint.sh /usr/local/bin/entrypoint.sh + +RUN chmod +x /usr/local/bin/entrypoint.sh \ + && chown -R model-server /home/model-server + +COPY config.properties /home/model-server/config.properties +RUN mkdir /home/model-server/model-store && chown -R model-server /home/model-server/model-store + +EXPOSE 8080 8081 8082 + +USER model-server +WORKDIR /home/model-server +ENV TEMP=/home/model-server/tmp +ENTRYPOINT ["/usr/local/bin/entrypoint.sh"] +CMD ["serve"] diff --git a/docker/serve/config.properties b/docker/serve/config.properties index efb9c47e40..dd9a685150 100644 --- a/docker/serve/config.properties +++ b/docker/serve/config.properties @@ -1,5 +1,5 @@ -inference_address=http://0.0.0.0:8080 -management_address=http://0.0.0.0:8081 -metrics_address=http://0.0.0.0:8082 -model_store=/home/model-server/model-store -load_models=all +inference_address=http://0.0.0.0:8080 +management_address=http://0.0.0.0:8081 +metrics_address=http://0.0.0.0:8082 +model_store=/home/model-server/model-store +load_models=all diff --git a/docker/serve/entrypoint.sh b/docker/serve/entrypoint.sh index 41ba00b048..d9aedae68f 100644 --- a/docker/serve/entrypoint.sh +++ b/docker/serve/entrypoint.sh @@ -1,12 +1,12 @@ -#!/bin/bash -set -e - -if [[ "$1" = "serve" ]]; then - shift 1 - torchserve --start --ts-config /home/model-server/config.properties -else - eval "$@" -fi - -# prevent docker exit -tail -f /dev/null +#!/bin/bash +set -e + +if [[ "$1" = "serve" ]]; then + shift 1 + torchserve --start --ts-config /home/model-server/config.properties +else + eval "$@" +fi + +# prevent docker exit +tail -f /dev/null diff --git a/docs/en/Makefile b/docs/en/Makefile index d4bb2cbb9e..73a28c7134 100644 --- a/docs/en/Makefile +++ b/docs/en/Makefile @@ -1,20 +1,20 @@ -# Minimal makefile for Sphinx documentation -# - -# You can set these variables from the command line, and also -# from the environment for the first two. -SPHINXOPTS ?= -SPHINXBUILD ?= sphinx-build -SOURCEDIR = . -BUILDDIR = _build - -# Put it first so that "make" without argument is like "make help". -help: - @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) - -.PHONY: help Makefile - -# Catch-all target: route all unknown targets to Sphinx using the new -# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). -%: Makefile - @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = . +BUILDDIR = _build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/en/_static/css/readthedocs.css b/docs/en/_static/css/readthedocs.css index e75ab1b46a..c8740686f8 100644 --- a/docs/en/_static/css/readthedocs.css +++ b/docs/en/_static/css/readthedocs.css @@ -1,10 +1,10 @@ -.header-logo { - background-image: url("../images/mmpose-logo.png"); - background-size: 120px 50px; - height: 50px; - width: 120px; -} - -table.autosummary td { - width: 35% -} +.header-logo { + background-image: url("../images/mmpose-logo.png"); + background-size: 120px 50px; + height: 50px; + width: 120px; +} + +table.autosummary td { + width: 35% +} diff --git a/docs/en/advanced_guides/codecs.md b/docs/en/advanced_guides/codecs.md index 610bd83a57..8c0eb8eb0c 100644 --- a/docs/en/advanced_guides/codecs.md +++ b/docs/en/advanced_guides/codecs.md @@ -1,227 +1,227 @@ -# Learn about Codecs - -In the keypoint detection task, depending on the algorithm, it is often necessary to generate targets in different formats, such as normalized coordinates, vectors and heatmaps, etc. Similarly, for the model outputs, a decoding process is required to transform them into coordinates. - -Encoding and decoding are closely related and inverse each other. In earlier versions of MMPose, encoding and decoding are implemented at different modules, making it less intuitive and unified. - -MMPose 1.0 introduced a new module **Codec** to integrate the encoding and decoding together in a modular and user-friendly form. - -Here is a diagram to show where the `Codec` is: - -![codec-en](https://user-images.githubusercontent.com/13503330/187112635-c01f13d1-a07e-420f-be50-3b8818524dec.png) - -A typical codec consists of two parts: - -- Encoder -- Decoder - -### Encoder - -The encoder transforms the coordinates in the input image space into the needed target format: - -- Normalized Coordinates -- One-dimensional Vectors -- Gaussian Heatmaps - -For example, in the Regression-based method, the encoder will be: - -```Python -def encode(self, - keypoints: np.ndarray, - keypoints_visible: Optional[np.ndarray] = None) -> dict: - """Encoding keypoints from input image space to normalized space. - - Args: - keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) - keypoints_visible (np.ndarray): Keypoint visibilities in shape - (N, K) - - Returns: - dict: - - keypoint_labels (np.ndarray): The normalized regression labels in - shape (N, K, D) where D is 2 for 2d coordinates - - keypoint_weights (np.ndarray): The target weights in shape - (N, K) - """ - if keypoints_visible is None: - keypoints_visible = np.ones(keypoints.shape[:2], dtype=np.float32) - - w, h = self.input_size - valid = ((keypoints >= 0) & - (keypoints <= [w - 1, h - 1])).all(axis=-1) & ( - keypoints_visible > 0.5) - - keypoint_labels = (keypoints / np.array([w, h])).astype(np.float32) - keypoint_weights = np.where(valid, 1., 0.).astype(np.float32) - - encoded = dict( - keypoint_labels=keypoint_labels, keypoint_weights=keypoint_weights) - - return encoded -``` - -The encoded data is converted to Tensor format in `PackPoseInputs` and packed in `data_sample.gt_instance_labels` for model calls, which is generally used for loss calculation, as demonstrated by `loss()` in `RegressionHead`. - -```Python -def loss(self, - inputs: Tuple[Tensor], - batch_data_samples: OptSampleList, - train_cfg: ConfigType = {}) -> dict: - """Calculate losses from a batch of inputs and data samples.""" - - pred_outputs = self.forward(inputs) - - keypoint_labels = torch.cat( - [d.gt_instance_labels.keypoint_labels for d in batch_data_samples]) - keypoint_weights = torch.cat([ - d.gt_instance_labels.keypoint_weights for d in batch_data_samples - ]) - - # calculate losses - losses = dict() - loss = self.loss_module(pred_outputs, keypoint_labels, - keypoint_weights.unsqueeze(-1)) - - losses.update(loss_kpt=loss) - ### Omitted ### -``` - -### Decoder - -The decoder transforms the model outputs into coordinates in the input image space, which is the opposite processing of the encoder. - -For example, in the Regression-based method, the decoder will be: - -```Python -def decode(self, encoded: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: - """Decode keypoint coordinates from normalized space to input image - space. - - Args: - encoded (np.ndarray): Coordinates in shape (N, K, D) - - Returns: - tuple: - - keypoints (np.ndarray): Decoded coordinates in shape (N, K, D) - - scores (np.ndarray): The keypoint scores in shape (N, K). - It usually represents the confidence of the keypoint prediction - - """ - - if encoded.shape[-1] == 2: - N, K, _ = encoded.shape - normalized_coords = encoded.copy() - scores = np.ones((N, K), dtype=np.float32) - elif encoded.shape[-1] == 4: - # split coords and sigma if outputs contain output_sigma - normalized_coords = encoded[..., :2].copy() - output_sigma = encoded[..., 2:4].copy() - scores = (1 - output_sigma).mean(axis=-1) - else: - raise ValueError( - 'Keypoint dimension should be 2 or 4 (with sigma), ' - f'but got {encoded.shape[-1]}') - - w, h = self.input_size - keypoints = normalized_coords * np.array([w, h]) - - return keypoints, scores -``` - -By default, the `decode()` method only performs decoding on a single instance. You can also implement the `batch_decode()` method to boost the decoding process. - -## Common Usage - -The example below shows how to use a codec in your config: - -- Define the Codec -- Generate Targets -- Head - -### Define the Codec - -Take the Regression-based method to generate normalized coordinates as an example, you can define a `codec` in your config as follows: - -```Python -codec = dict(type='RegressionLabel', input_size=(192, 256)) -``` - -### Generate Targets - -In pipelines, A codec should be passed into `GenerateTarget` to work as the `encoder`: - -```Python -dict(type='GenerateTarget', encoder=codec) -``` - -### Head - -In MMPose workflows, we decode the model outputs in `Head`, which requires a codec to work as the `decoder`: - -```Python -head=dict( - type='RLEHead', - in_channels=2048, - num_joints=17, - loss=dict(type='RLELoss', use_target_weight=True), - decoder=codec -) -``` - -Here is the phase of a config file: - -```Python - -# codec settings -codec = dict(type='RegressionLabel', input_size=(192, 256)) ## definition ## - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=50, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), - ), - neck=dict(type='GlobalAveragePooling'), - head=dict( - type='RLEHead', - in_channels=2048, - num_joints=17, - loss=dict(type='RLELoss', use_target_weight=True), - decoder=codec), ## Head ## - test_cfg=dict( - flip_test=True, - shift_coords=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -backend_args = dict(backend='local') - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), ## Generate Target ## - dict(type='PackPoseInputs') -] -test_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] -``` +# Learn about Codecs + +In the keypoint detection task, depending on the algorithm, it is often necessary to generate targets in different formats, such as normalized coordinates, vectors and heatmaps, etc. Similarly, for the model outputs, a decoding process is required to transform them into coordinates. + +Encoding and decoding are closely related and inverse each other. In earlier versions of MMPose, encoding and decoding are implemented at different modules, making it less intuitive and unified. + +MMPose 1.0 introduced a new module **Codec** to integrate the encoding and decoding together in a modular and user-friendly form. + +Here is a diagram to show where the `Codec` is: + +![codec-en](https://user-images.githubusercontent.com/13503330/187112635-c01f13d1-a07e-420f-be50-3b8818524dec.png) + +A typical codec consists of two parts: + +- Encoder +- Decoder + +### Encoder + +The encoder transforms the coordinates in the input image space into the needed target format: + +- Normalized Coordinates +- One-dimensional Vectors +- Gaussian Heatmaps + +For example, in the Regression-based method, the encoder will be: + +```Python +def encode(self, + keypoints: np.ndarray, + keypoints_visible: Optional[np.ndarray] = None) -> dict: + """Encoding keypoints from input image space to normalized space. + + Args: + keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) + keypoints_visible (np.ndarray): Keypoint visibilities in shape + (N, K) + + Returns: + dict: + - keypoint_labels (np.ndarray): The normalized regression labels in + shape (N, K, D) where D is 2 for 2d coordinates + - keypoint_weights (np.ndarray): The target weights in shape + (N, K) + """ + if keypoints_visible is None: + keypoints_visible = np.ones(keypoints.shape[:2], dtype=np.float32) + + w, h = self.input_size + valid = ((keypoints >= 0) & + (keypoints <= [w - 1, h - 1])).all(axis=-1) & ( + keypoints_visible > 0.5) + + keypoint_labels = (keypoints / np.array([w, h])).astype(np.float32) + keypoint_weights = np.where(valid, 1., 0.).astype(np.float32) + + encoded = dict( + keypoint_labels=keypoint_labels, keypoint_weights=keypoint_weights) + + return encoded +``` + +The encoded data is converted to Tensor format in `PackPoseInputs` and packed in `data_sample.gt_instance_labels` for model calls, which is generally used for loss calculation, as demonstrated by `loss()` in `RegressionHead`. + +```Python +def loss(self, + inputs: Tuple[Tensor], + batch_data_samples: OptSampleList, + train_cfg: ConfigType = {}) -> dict: + """Calculate losses from a batch of inputs and data samples.""" + + pred_outputs = self.forward(inputs) + + keypoint_labels = torch.cat( + [d.gt_instance_labels.keypoint_labels for d in batch_data_samples]) + keypoint_weights = torch.cat([ + d.gt_instance_labels.keypoint_weights for d in batch_data_samples + ]) + + # calculate losses + losses = dict() + loss = self.loss_module(pred_outputs, keypoint_labels, + keypoint_weights.unsqueeze(-1)) + + losses.update(loss_kpt=loss) + ### Omitted ### +``` + +### Decoder + +The decoder transforms the model outputs into coordinates in the input image space, which is the opposite processing of the encoder. + +For example, in the Regression-based method, the decoder will be: + +```Python +def decode(self, encoded: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + """Decode keypoint coordinates from normalized space to input image + space. + + Args: + encoded (np.ndarray): Coordinates in shape (N, K, D) + + Returns: + tuple: + - keypoints (np.ndarray): Decoded coordinates in shape (N, K, D) + - scores (np.ndarray): The keypoint scores in shape (N, K). + It usually represents the confidence of the keypoint prediction + + """ + + if encoded.shape[-1] == 2: + N, K, _ = encoded.shape + normalized_coords = encoded.copy() + scores = np.ones((N, K), dtype=np.float32) + elif encoded.shape[-1] == 4: + # split coords and sigma if outputs contain output_sigma + normalized_coords = encoded[..., :2].copy() + output_sigma = encoded[..., 2:4].copy() + scores = (1 - output_sigma).mean(axis=-1) + else: + raise ValueError( + 'Keypoint dimension should be 2 or 4 (with sigma), ' + f'but got {encoded.shape[-1]}') + + w, h = self.input_size + keypoints = normalized_coords * np.array([w, h]) + + return keypoints, scores +``` + +By default, the `decode()` method only performs decoding on a single instance. You can also implement the `batch_decode()` method to boost the decoding process. + +## Common Usage + +The example below shows how to use a codec in your config: + +- Define the Codec +- Generate Targets +- Head + +### Define the Codec + +Take the Regression-based method to generate normalized coordinates as an example, you can define a `codec` in your config as follows: + +```Python +codec = dict(type='RegressionLabel', input_size=(192, 256)) +``` + +### Generate Targets + +In pipelines, A codec should be passed into `GenerateTarget` to work as the `encoder`: + +```Python +dict(type='GenerateTarget', encoder=codec) +``` + +### Head + +In MMPose workflows, we decode the model outputs in `Head`, which requires a codec to work as the `decoder`: + +```Python +head=dict( + type='RLEHead', + in_channels=2048, + num_joints=17, + loss=dict(type='RLELoss', use_target_weight=True), + decoder=codec +) +``` + +Here is the phase of a config file: + +```Python + +# codec settings +codec = dict(type='RegressionLabel', input_size=(192, 256)) ## definition ## + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='RLEHead', + in_channels=2048, + num_joints=17, + loss=dict(type='RLELoss', use_target_weight=True), + decoder=codec), ## Head ## + test_cfg=dict( + flip_test=True, + shift_coords=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +backend_args = dict(backend='local') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), ## Generate Target ## + dict(type='PackPoseInputs') +] +test_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] +``` diff --git a/docs/en/advanced_guides/customize_datasets.md b/docs/en/advanced_guides/customize_datasets.md index 1aac418812..07695ddb68 100644 --- a/docs/en/advanced_guides/customize_datasets.md +++ b/docs/en/advanced_guides/customize_datasets.md @@ -1,259 +1,259 @@ -# Customize Datasets - -## Customize datasets by reorganizing data to COCO format - -The simplest way to use the custom dataset is to convert your annotation format to COCO dataset format. - -The annotation JSON files in COCO format have the following necessary keys: - -```python -'images': [ - { - 'file_name': '000000001268.jpg', - 'height': 427, - 'width': 640, - 'id': 1268 - }, - ... -], -'annotations': [ - { - 'segmentation': [[426.36, - ... - 424.34, - 223.3]], - 'keypoints': [0,0,0, - 0,0,0, - 0,0,0, - 427,220,2, - 443,222,2, - 414,228,2, - 449,232,2, - 408,248,1, - 454,261,2, - 0,0,0, - 0,0,0, - 411,287,2, - 431,287,2, - 0,0,0, - 458,265,2, - 0,0,0, - 466,300,1], - 'num_keypoints': 10, - 'area': 3894.5826, - 'iscrowd': 0, - 'image_id': 1268, - 'bbox': [402.34, 205.02, 65.26, 88.45], - 'category_id': 1, - 'id': 215218 - }, - ... -], -'categories': [ - {'id': 1, 'name': 'person'}, - ] -``` - -There are three necessary keys in the json file: - -- `images`: contains a list of images with their information like `file_name`, `height`, `width`, and `id`. -- `annotations`: contains the list of instance annotations. -- `categories`: contains the category name ('person') and its ID (1). - -If the annotations have been organized in COCO format, there is no need to create a new dataset class. You can use `CocoDataset` class alternatively. - -## Create a custom dataset_info config file for the dataset - -Add a new dataset info config file that contains the metainfo about the dataset. - -``` -configs/_base_/datasets/custom.py -``` - -An example of the dataset config is as follows. - -`keypoint_info` contains the information about each keypoint. - -1. `name`: the keypoint name. The keypoint name must be unique. -2. `id`: the keypoint id. -3. `color`: (\[B, G, R\]) is used for keypoint visualization. -4. `type`: 'upper' or 'lower', will be used in data augmentation. -5. `swap`: indicates the 'swap pair' (also known as 'flip pair'). When applying image horizontal flip, the left part will become the right part. We need to flip the keypoints accordingly. - -`skeleton_info` contains information about the keypoint connectivity, which is used for visualization. - -`joint_weights` assigns different loss weights to different keypoints. - -`sigmas` is used to calculate the OKS score. You can read [keypoints-eval](https://cocodataset.org/#keypoints-eval) to learn more about it. - -Here is an simplified example of dataset_info config file ([full text](/configs/_base_/datasets/coco.py)). - -``` -dataset_info = dict( - dataset_name='coco', - paper_info=dict( - author='Lin, Tsung-Yi and Maire, Michael and ' - 'Belongie, Serge and Hays, James and ' - 'Perona, Pietro and Ramanan, Deva and ' - r'Doll{\'a}r, Piotr and Zitnick, C Lawrence', - title='Microsoft coco: Common objects in context', - container='European conference on computer vision', - year='2014', - homepage='http://cocodataset.org/', - ), - keypoint_info={ - 0: - dict(name='nose', id=0, color=[51, 153, 255], type='upper', swap=''), - 1: - dict( - name='left_eye', - id=1, - color=[51, 153, 255], - type='upper', - swap='right_eye'), - ... - 16: - dict( - name='right_ankle', - id=16, - color=[255, 128, 0], - type='lower', - swap='left_ankle') - }, - skeleton_info={ - 0: - dict(link=('left_ankle', 'left_knee'), id=0, color=[0, 255, 0]), - ... - 18: - dict( - link=('right_ear', 'right_shoulder'), id=18, color=[51, 153, 255]) - }, - joint_weights=[ - 1., 1., 1., 1., 1., 1., 1., 1.2, 1.2, 1.5, 1.5, 1., 1., 1.2, 1.2, 1.5, - 1.5 - ], - sigmas=[ - 0.026, 0.025, 0.025, 0.035, 0.035, 0.079, 0.079, 0.072, 0.072, 0.062, - 0.062, 0.107, 0.107, 0.087, 0.087, 0.089, 0.089 - ]) -``` - -## Create a custom dataset class - -If the annotations are not organized in COCO format, you need to create a custom dataset class by the following steps: - -1. First create a package inside the `mmpose/datasets/datasets` folder. - -2. Create a class definition of your dataset in the package folder and register it in the registry with a name. Without a name, it will keep giving the error. `KeyError: 'XXXXX is not in the dataset registry'` - - ``` - from mmengine.dataset import BaseDataset - from mmpose.registry import DATASETS - - @DATASETS.register_module(name='MyCustomDataset') - class MyCustomDataset(BaseDataset): - ``` - - You can refer to [this doc](https://mmengine.readthedocs.io/en/latest/advanced_tutorials/basedataset.html) on how to build customed dataset class with `mmengine.BaseDataset`. - -3. Make sure you have updated the `__init__.py` of your package folder - -4. Make sure you have updated the `__init__.py` of the dataset package folder. - -## Create a custom training config file - -Create a custom training config file as per your need and the model/architecture you want to use in the configs folder. You may modify an existing config file to use the new custom dataset. - -In `configs/my_custom_config.py`: - -```python -... -# dataset and dataloader settings -dataset_type = 'MyCustomDataset' # or 'CocoDataset' - -train_dataloader = dict( - batch_size=2, - dataset=dict( - type=dataset_type, - data_root='root/of/your/train/data', - ann_file='path/to/your/train/json', - data_prefix=dict(img='path/to/your/train/img'), - metainfo=dict(from_file='configs/_base_/datasets/custom.py'), - ...), - ) - -val_dataloader = dict( - batch_size=2, - dataset=dict( - type=dataset_type, - data_root='root/of/your/val/data', - ann_file='path/to/your/val/json', - data_prefix=dict(img='path/to/your/val/img'), - metainfo=dict(from_file='configs/_base_/datasets/custom.py'), - ...), - ) - -test_dataloader = dict( - batch_size=2, - dataset=dict( - type=dataset_type, - data_root='root/of/your/test/data', - ann_file='path/to/your/test/json', - data_prefix=dict(img='path/to/your/test/img'), - metainfo=dict(from_file='configs/_base_/datasets/custom.py'), - ...), - ) -... -``` - -Make sure you have provided all the paths correctly. - -## Dataset Wrappers - -The following dataset wrappers are supported in [MMEngine](https://github.com/open-mmlab/mmengine), you can refer to [MMEngine tutorial](https://mmengine.readthedocs.io/en/latest) to learn how to use it. - -- [ConcatDataset](https://mmengine.readthedocs.io/en/latest/advanced_tutorials/basedataset.html#concatdataset) -- [RepeatDataset](https://mmengine.readthedocs.io/en/latest/advanced_tutorials/basedataset.html#repeatdataset) - -### CombinedDataset - -MMPose provides `CombinedDataset` to combine multiple datasets with different annotations. A combined dataset can be defined in config files as: - -```python -dataset_1 = dict( - type='dataset_type_1', - data_root='root/of/your/dataset1', - data_prefix=dict(img_path='path/to/your/img'), - ann_file='annotations/train.json', - pipeline=[ - # the converter transforms convert data into a unified format - converter_transform_1 - ]) - -dataset_2 = dict( - type='dataset_type_2', - data_root='root/of/your/dataset2', - data_prefix=dict(img_path='path/to/your/img'), - ann_file='annotations/train.json', - pipeline=[ - converter_transform_2 - ]) - -shared_pipeline = [ - LoadImage(), - ParseImage(), -] - -combined_dataset = dict( - type='CombinedDataset', - metainfo=dict(from_file='path/to/your/metainfo'), - datasets=[dataset_1, dataset_2], - pipeline=shared_pipeline, -) -``` - -- **MetaInfo of combined dataset** determines the annotation format. Either metainfo of a sub-dataset or a customed dataset metainfo is valid here. To custom a dataset metainfo, please refer to [Create a custom dataset_info config file for the dataset](#create-a-custom-datasetinfo-config-file-for-the-dataset). - -- **Converter transforms of sub-datasets** are applied when there exist mismatches of annotation format between sub-datasets and the combined dataset. For example, the number and order of keypoints might be different in the combined dataset and the sub-datasets. Then `KeypointConverter` can be used to unify the keypoints number and order. - -- More details about `CombinedDataset` and `KeypointConverter` can be found in Advanced Guides-[Training with Mixed Datasets](../user_guides/mixed_datasets.md). +# Customize Datasets + +## Customize datasets by reorganizing data to COCO format + +The simplest way to use the custom dataset is to convert your annotation format to COCO dataset format. + +The annotation JSON files in COCO format have the following necessary keys: + +```python +'images': [ + { + 'file_name': '000000001268.jpg', + 'height': 427, + 'width': 640, + 'id': 1268 + }, + ... +], +'annotations': [ + { + 'segmentation': [[426.36, + ... + 424.34, + 223.3]], + 'keypoints': [0,0,0, + 0,0,0, + 0,0,0, + 427,220,2, + 443,222,2, + 414,228,2, + 449,232,2, + 408,248,1, + 454,261,2, + 0,0,0, + 0,0,0, + 411,287,2, + 431,287,2, + 0,0,0, + 458,265,2, + 0,0,0, + 466,300,1], + 'num_keypoints': 10, + 'area': 3894.5826, + 'iscrowd': 0, + 'image_id': 1268, + 'bbox': [402.34, 205.02, 65.26, 88.45], + 'category_id': 1, + 'id': 215218 + }, + ... +], +'categories': [ + {'id': 1, 'name': 'person'}, + ] +``` + +There are three necessary keys in the json file: + +- `images`: contains a list of images with their information like `file_name`, `height`, `width`, and `id`. +- `annotations`: contains the list of instance annotations. +- `categories`: contains the category name ('person') and its ID (1). + +If the annotations have been organized in COCO format, there is no need to create a new dataset class. You can use `CocoDataset` class alternatively. + +## Create a custom dataset_info config file for the dataset + +Add a new dataset info config file that contains the metainfo about the dataset. + +``` +configs/_base_/datasets/custom.py +``` + +An example of the dataset config is as follows. + +`keypoint_info` contains the information about each keypoint. + +1. `name`: the keypoint name. The keypoint name must be unique. +2. `id`: the keypoint id. +3. `color`: (\[B, G, R\]) is used for keypoint visualization. +4. `type`: 'upper' or 'lower', will be used in data augmentation. +5. `swap`: indicates the 'swap pair' (also known as 'flip pair'). When applying image horizontal flip, the left part will become the right part. We need to flip the keypoints accordingly. + +`skeleton_info` contains information about the keypoint connectivity, which is used for visualization. + +`joint_weights` assigns different loss weights to different keypoints. + +`sigmas` is used to calculate the OKS score. You can read [keypoints-eval](https://cocodataset.org/#keypoints-eval) to learn more about it. + +Here is an simplified example of dataset_info config file ([full text](/configs/_base_/datasets/coco.py)). + +``` +dataset_info = dict( + dataset_name='coco', + paper_info=dict( + author='Lin, Tsung-Yi and Maire, Michael and ' + 'Belongie, Serge and Hays, James and ' + 'Perona, Pietro and Ramanan, Deva and ' + r'Doll{\'a}r, Piotr and Zitnick, C Lawrence', + title='Microsoft coco: Common objects in context', + container='European conference on computer vision', + year='2014', + homepage='http://cocodataset.org/', + ), + keypoint_info={ + 0: + dict(name='nose', id=0, color=[51, 153, 255], type='upper', swap=''), + 1: + dict( + name='left_eye', + id=1, + color=[51, 153, 255], + type='upper', + swap='right_eye'), + ... + 16: + dict( + name='right_ankle', + id=16, + color=[255, 128, 0], + type='lower', + swap='left_ankle') + }, + skeleton_info={ + 0: + dict(link=('left_ankle', 'left_knee'), id=0, color=[0, 255, 0]), + ... + 18: + dict( + link=('right_ear', 'right_shoulder'), id=18, color=[51, 153, 255]) + }, + joint_weights=[ + 1., 1., 1., 1., 1., 1., 1., 1.2, 1.2, 1.5, 1.5, 1., 1., 1.2, 1.2, 1.5, + 1.5 + ], + sigmas=[ + 0.026, 0.025, 0.025, 0.035, 0.035, 0.079, 0.079, 0.072, 0.072, 0.062, + 0.062, 0.107, 0.107, 0.087, 0.087, 0.089, 0.089 + ]) +``` + +## Create a custom dataset class + +If the annotations are not organized in COCO format, you need to create a custom dataset class by the following steps: + +1. First create a package inside the `mmpose/datasets/datasets` folder. + +2. Create a class definition of your dataset in the package folder and register it in the registry with a name. Without a name, it will keep giving the error. `KeyError: 'XXXXX is not in the dataset registry'` + + ``` + from mmengine.dataset import BaseDataset + from mmpose.registry import DATASETS + + @DATASETS.register_module(name='MyCustomDataset') + class MyCustomDataset(BaseDataset): + ``` + + You can refer to [this doc](https://mmengine.readthedocs.io/en/latest/advanced_tutorials/basedataset.html) on how to build customed dataset class with `mmengine.BaseDataset`. + +3. Make sure you have updated the `__init__.py` of your package folder + +4. Make sure you have updated the `__init__.py` of the dataset package folder. + +## Create a custom training config file + +Create a custom training config file as per your need and the model/architecture you want to use in the configs folder. You may modify an existing config file to use the new custom dataset. + +In `configs/my_custom_config.py`: + +```python +... +# dataset and dataloader settings +dataset_type = 'MyCustomDataset' # or 'CocoDataset' + +train_dataloader = dict( + batch_size=2, + dataset=dict( + type=dataset_type, + data_root='root/of/your/train/data', + ann_file='path/to/your/train/json', + data_prefix=dict(img='path/to/your/train/img'), + metainfo=dict(from_file='configs/_base_/datasets/custom.py'), + ...), + ) + +val_dataloader = dict( + batch_size=2, + dataset=dict( + type=dataset_type, + data_root='root/of/your/val/data', + ann_file='path/to/your/val/json', + data_prefix=dict(img='path/to/your/val/img'), + metainfo=dict(from_file='configs/_base_/datasets/custom.py'), + ...), + ) + +test_dataloader = dict( + batch_size=2, + dataset=dict( + type=dataset_type, + data_root='root/of/your/test/data', + ann_file='path/to/your/test/json', + data_prefix=dict(img='path/to/your/test/img'), + metainfo=dict(from_file='configs/_base_/datasets/custom.py'), + ...), + ) +... +``` + +Make sure you have provided all the paths correctly. + +## Dataset Wrappers + +The following dataset wrappers are supported in [MMEngine](https://github.com/open-mmlab/mmengine), you can refer to [MMEngine tutorial](https://mmengine.readthedocs.io/en/latest) to learn how to use it. + +- [ConcatDataset](https://mmengine.readthedocs.io/en/latest/advanced_tutorials/basedataset.html#concatdataset) +- [RepeatDataset](https://mmengine.readthedocs.io/en/latest/advanced_tutorials/basedataset.html#repeatdataset) + +### CombinedDataset + +MMPose provides `CombinedDataset` to combine multiple datasets with different annotations. A combined dataset can be defined in config files as: + +```python +dataset_1 = dict( + type='dataset_type_1', + data_root='root/of/your/dataset1', + data_prefix=dict(img_path='path/to/your/img'), + ann_file='annotations/train.json', + pipeline=[ + # the converter transforms convert data into a unified format + converter_transform_1 + ]) + +dataset_2 = dict( + type='dataset_type_2', + data_root='root/of/your/dataset2', + data_prefix=dict(img_path='path/to/your/img'), + ann_file='annotations/train.json', + pipeline=[ + converter_transform_2 + ]) + +shared_pipeline = [ + LoadImage(), + ParseImage(), +] + +combined_dataset = dict( + type='CombinedDataset', + metainfo=dict(from_file='path/to/your/metainfo'), + datasets=[dataset_1, dataset_2], + pipeline=shared_pipeline, +) +``` + +- **MetaInfo of combined dataset** determines the annotation format. Either metainfo of a sub-dataset or a customed dataset metainfo is valid here. To custom a dataset metainfo, please refer to [Create a custom dataset_info config file for the dataset](#create-a-custom-datasetinfo-config-file-for-the-dataset). + +- **Converter transforms of sub-datasets** are applied when there exist mismatches of annotation format between sub-datasets and the combined dataset. For example, the number and order of keypoints might be different in the combined dataset and the sub-datasets. Then `KeypointConverter` can be used to unify the keypoints number and order. + +- More details about `CombinedDataset` and `KeypointConverter` can be found in Advanced Guides-[Training with Mixed Datasets](../user_guides/mixed_datasets.md). diff --git a/docs/en/advanced_guides/customize_logging.md b/docs/en/advanced_guides/customize_logging.md index 093a530dba..8e2b7f311b 100644 --- a/docs/en/advanced_guides/customize_logging.md +++ b/docs/en/advanced_guides/customize_logging.md @@ -1,3 +1,3 @@ -# Customize Logging - -Coming soon. +# Customize Logging + +Coming soon. diff --git a/docs/en/advanced_guides/customize_optimizer.md b/docs/en/advanced_guides/customize_optimizer.md index fd6a28297f..09bcc80ca2 100644 --- a/docs/en/advanced_guides/customize_optimizer.md +++ b/docs/en/advanced_guides/customize_optimizer.md @@ -1,3 +1,3 @@ -# Customize Optimizer and Scheduler - -Coming soon. +# Customize Optimizer and Scheduler + +Coming soon. diff --git a/docs/en/advanced_guides/customize_transforms.md b/docs/en/advanced_guides/customize_transforms.md index 154413994b..860bd0515e 100644 --- a/docs/en/advanced_guides/customize_transforms.md +++ b/docs/en/advanced_guides/customize_transforms.md @@ -1,3 +1,3 @@ -# Customize Data Transformation and Augmentation - -Coming soon. +# Customize Data Transformation and Augmentation + +Coming soon. diff --git a/docs/en/advanced_guides/dataflow.md b/docs/en/advanced_guides/dataflow.md index 9f098b028d..c6d55c0d98 100644 --- a/docs/en/advanced_guides/dataflow.md +++ b/docs/en/advanced_guides/dataflow.md @@ -1,3 +1,3 @@ -# Dataflow in MMPose - -Coming soon. +# Dataflow in MMPose + +Coming soon. diff --git a/docs/en/advanced_guides/how_to_deploy.md b/docs/en/advanced_guides/how_to_deploy.md index b4fead876c..38c3cb771a 100644 --- a/docs/en/advanced_guides/how_to_deploy.md +++ b/docs/en/advanced_guides/how_to_deploy.md @@ -1,3 +1,3 @@ -# How to Deploy MMPose Models - -Coming soon. +# How to Deploy MMPose Models + +Coming soon. diff --git a/docs/en/advanced_guides/implement_new_models.md b/docs/en/advanced_guides/implement_new_models.md index 4a10b0c3c9..47a6c96bc4 100644 --- a/docs/en/advanced_guides/implement_new_models.md +++ b/docs/en/advanced_guides/implement_new_models.md @@ -1,3 +1,3 @@ -# Implement New Models - -Coming soon. +# Implement New Models + +Coming soon. diff --git a/docs/en/advanced_guides/model_analysis.md b/docs/en/advanced_guides/model_analysis.md index e10bb634a6..380af1f8c7 100644 --- a/docs/en/advanced_guides/model_analysis.md +++ b/docs/en/advanced_guides/model_analysis.md @@ -1,102 +1,102 @@ -# Model Analysis - -## Get Model Params & FLOPs - -MMPose provides `tools/analysis_tools/get_flops.py` to get model parameters and FLOPs. - -```shell -python tools/analysis_tools/get_flops.py ${CONFIG_FILE} [--shape ${INPUT_SHAPE}] [--cfg-options ${CFG_OPTIONS}] -``` - -Description of all arguments: - -`CONFIG_FILE` : The path of a model config file. - -`--shape`: The input shape to the model. - -`--input-constructor`: If specified as batch, it will generate a batch tensor to calculate FLOPs. - -`--batch-size`:If `--input-constructor` is specified as batch, it will generate a random tensor with shape `(batch_size, 3, **input_shape)` to calculate FLOPs. - -`--cfg-options`: If specified, the key-value pair optional `cfg` will be merged into config file. - -Example: - -```shell -python tools/analysis_tools/get_flops.py configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py -``` - -We will get the following results: - -```text -============================== -Input shape: (1, 3, 256, 192) -Flops: 7.7 GFLOPs -Params: 28.54 M -============================== -``` - -```{note} -This tool is still experimental and we do not guarantee that the number is absolutely correct. Some operators are not counted into FLOPs like GN and custom operators. -``` - -## Log Analysis - -MMPose provides `tools/analysis_tools/analyze_logs.py` to analyze the training log. The log file can be either a json file or a text file. The json file is recommended, because it is more convenient to parse and visualize. - -Currently, the following functions are supported: - -- Plot loss/accuracy curves -- Calculate training time - -### Plot Loss/Accuracy Curves - -The function depends on `seaborn`, please install it first by running `pip install seaborn`. - -![log_curve](https://user-images.githubusercontent.com/87690686/188538215-5d985aaa-59f8-44cf-b6f9-10890d599e9c.png) - -```shell -python tools/analysis_tools/analyze_logs.py plot_curve ${JSON_LOGS} [--keys ${KEYS}] [--title ${TITLE}] [--legend ${LEGEND}] [--backend ${BACKEND}] [--style ${STYLE}] [--out ${OUT_FILE}] -``` - -Examples: - -- Plot loss curve - - ```shell - python tools/analysis_tools/analyze_logs.py plot_curve log.json --keys loss_kpt --legend loss_kpt - ``` - -- Plot accuracy curve and export to PDF file - - ```shell - python tools/analysis_tools/analyze_logs.py plot_curve log.json --keys acc_pose --out results.pdf - ``` - -- Plot multiple log files on the same figure - - ```shell - python tools/analysis_tools/analyze_logs.py plot_curve log1.json log2.json --keys loss_kpt --legend run1 run2 --title loss_kpt --out loss_kpt.png - ``` - -### Calculate Training Time - -```shell -python tools/analysis_tools/analyze_logs.py cal_train_time ${JSON_LOGS} [--include-outliers] -``` - -Examples: - -```shell -python tools/analysis_tools/analyze_logs.py cal_train_time log.json -``` - -The result is as follows: - -```text ------Analyze train time of hrnet_w32_256x192.json----- -slowest epoch 56, average time is 0.6924 -fastest epoch 1, average time is 0.6502 -time std over epochs is 0.0085 -average iter time: 0.6688 s/iter -``` +# Model Analysis + +## Get Model Params & FLOPs + +MMPose provides `tools/analysis_tools/get_flops.py` to get model parameters and FLOPs. + +```shell +python tools/analysis_tools/get_flops.py ${CONFIG_FILE} [--shape ${INPUT_SHAPE}] [--cfg-options ${CFG_OPTIONS}] +``` + +Description of all arguments: + +`CONFIG_FILE` : The path of a model config file. + +`--shape`: The input shape to the model. + +`--input-constructor`: If specified as batch, it will generate a batch tensor to calculate FLOPs. + +`--batch-size`:If `--input-constructor` is specified as batch, it will generate a random tensor with shape `(batch_size, 3, **input_shape)` to calculate FLOPs. + +`--cfg-options`: If specified, the key-value pair optional `cfg` will be merged into config file. + +Example: + +```shell +python tools/analysis_tools/get_flops.py configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py +``` + +We will get the following results: + +```text +============================== +Input shape: (1, 3, 256, 192) +Flops: 7.7 GFLOPs +Params: 28.54 M +============================== +``` + +```{note} +This tool is still experimental and we do not guarantee that the number is absolutely correct. Some operators are not counted into FLOPs like GN and custom operators. +``` + +## Log Analysis + +MMPose provides `tools/analysis_tools/analyze_logs.py` to analyze the training log. The log file can be either a json file or a text file. The json file is recommended, because it is more convenient to parse and visualize. + +Currently, the following functions are supported: + +- Plot loss/accuracy curves +- Calculate training time + +### Plot Loss/Accuracy Curves + +The function depends on `seaborn`, please install it first by running `pip install seaborn`. + +![log_curve](https://user-images.githubusercontent.com/87690686/188538215-5d985aaa-59f8-44cf-b6f9-10890d599e9c.png) + +```shell +python tools/analysis_tools/analyze_logs.py plot_curve ${JSON_LOGS} [--keys ${KEYS}] [--title ${TITLE}] [--legend ${LEGEND}] [--backend ${BACKEND}] [--style ${STYLE}] [--out ${OUT_FILE}] +``` + +Examples: + +- Plot loss curve + + ```shell + python tools/analysis_tools/analyze_logs.py plot_curve log.json --keys loss_kpt --legend loss_kpt + ``` + +- Plot accuracy curve and export to PDF file + + ```shell + python tools/analysis_tools/analyze_logs.py plot_curve log.json --keys acc_pose --out results.pdf + ``` + +- Plot multiple log files on the same figure + + ```shell + python tools/analysis_tools/analyze_logs.py plot_curve log1.json log2.json --keys loss_kpt --legend run1 run2 --title loss_kpt --out loss_kpt.png + ``` + +### Calculate Training Time + +```shell +python tools/analysis_tools/analyze_logs.py cal_train_time ${JSON_LOGS} [--include-outliers] +``` + +Examples: + +```shell +python tools/analysis_tools/analyze_logs.py cal_train_time log.json +``` + +The result is as follows: + +```text +-----Analyze train time of hrnet_w32_256x192.json----- +slowest epoch 56, average time is 0.6924 +fastest epoch 1, average time is 0.6502 +time std over epochs is 0.0085 +average iter time: 0.6688 s/iter +``` diff --git a/docs/en/api.rst b/docs/en/api.rst index 48819a2531..07b679eb20 100644 --- a/docs/en/api.rst +++ b/docs/en/api.rst @@ -1,134 +1,134 @@ -mmpose.apis -------------- -.. automodule:: mmpose.apis - :members: - -mmpose.codecs -------------- -.. automodule:: mmpose.codecs - :members: - -mmpose.models ---------------- -backbones -^^^^^^^^^^^ -.. automodule:: mmpose.models.backbones - :members: - -necks -^^^^^^^^^^^ -.. automodule:: mmpose.models.necks - :members: - -detectors -^^^^^^^^^^^ -.. automodule:: mmpose.models.pose_estimators - :members: - -heads -^^^^^^^^^^^^^^^ -.. automodule:: mmpose.models.heads - :members: - -losses -^^^^^^^^^^^ -.. automodule:: mmpose.models.losses - :members: - -misc -^^^^^^^^^^^ -.. automodule:: mmpose.models.utils - :members: - -mmpose.datasets ------------------ -.. automodule:: mmpose.datasets - :members: - -datasets -^^^^^^^^^^^ -.. automodule:: mmpose.datasets.datasets.base - :members: - :noindex: - -.. automodule:: mmpose.datasets.datasets.body - :members: - :noindex: - -.. automodule:: mmpose.datasets.datasets.face - :members: - :noindex: - -.. automodule:: mmpose.datasets.datasets.hand - :members: - :noindex: - -.. automodule:: mmpose.datasets.datasets.animal - :members: - :noindex: - -.. automodule:: mmpose.datasets.datasets.fashion - :members: - :noindex: - -transforms -^^^^^^^^^^^ -.. automodule:: mmpose.datasets.transforms.loading - :members: - -.. automodule:: mmpose.datasets.transforms.common_transforms - :members: - -.. automodule:: mmpose.datasets.transforms.topdown_transforms - :members: - -.. automodule:: mmpose.datasets.transforms.bottomup_transforms - :members: - -.. automodule:: mmpose.datasets.transforms.formatting - :members: - -mmpose.structures ---------------- -.. automodule:: mmpose.structures - :members: - -bbox -^^^^^^^^^^^ -.. automodule:: mmpose.structures.bbox - :members: - -keypoint -^^^^^^^^^^^ -.. automodule:: mmpose.structures.keypoint - :members: - - -mmpose.registry ---------------- -.. automodule:: mmpose.registry - :members: - -mmpose.evaluation ------------------ -metrics -^^^^^^^^^^^ -.. automodule:: mmpose.evaluation.metrics - :members: - -functional -^^^^^^^^^^^ -.. automodule:: mmpose.evaluation.functional - :members: - -mmpose.visualization --------------------- -.. automodule:: mmpose.visualization - :members: - -mmpose.engine ---------------- -hooks -^^^^^^^^^^^ -.. automodule:: mmpose.engine.hooks - :members: +mmpose.apis +------------- +.. automodule:: mmpose.apis + :members: + +mmpose.codecs +------------- +.. automodule:: mmpose.codecs + :members: + +mmpose.models +--------------- +backbones +^^^^^^^^^^^ +.. automodule:: mmpose.models.backbones + :members: + +necks +^^^^^^^^^^^ +.. automodule:: mmpose.models.necks + :members: + +detectors +^^^^^^^^^^^ +.. automodule:: mmpose.models.pose_estimators + :members: + +heads +^^^^^^^^^^^^^^^ +.. automodule:: mmpose.models.heads + :members: + +losses +^^^^^^^^^^^ +.. automodule:: mmpose.models.losses + :members: + +misc +^^^^^^^^^^^ +.. automodule:: mmpose.models.utils + :members: + +mmpose.datasets +----------------- +.. automodule:: mmpose.datasets + :members: + +datasets +^^^^^^^^^^^ +.. automodule:: mmpose.datasets.datasets.base + :members: + :noindex: + +.. automodule:: mmpose.datasets.datasets.body + :members: + :noindex: + +.. automodule:: mmpose.datasets.datasets.face + :members: + :noindex: + +.. automodule:: mmpose.datasets.datasets.hand + :members: + :noindex: + +.. automodule:: mmpose.datasets.datasets.animal + :members: + :noindex: + +.. automodule:: mmpose.datasets.datasets.fashion + :members: + :noindex: + +transforms +^^^^^^^^^^^ +.. automodule:: mmpose.datasets.transforms.loading + :members: + +.. automodule:: mmpose.datasets.transforms.common_transforms + :members: + +.. automodule:: mmpose.datasets.transforms.topdown_transforms + :members: + +.. automodule:: mmpose.datasets.transforms.bottomup_transforms + :members: + +.. automodule:: mmpose.datasets.transforms.formatting + :members: + +mmpose.structures +--------------- +.. automodule:: mmpose.structures + :members: + +bbox +^^^^^^^^^^^ +.. automodule:: mmpose.structures.bbox + :members: + +keypoint +^^^^^^^^^^^ +.. automodule:: mmpose.structures.keypoint + :members: + + +mmpose.registry +--------------- +.. automodule:: mmpose.registry + :members: + +mmpose.evaluation +----------------- +metrics +^^^^^^^^^^^ +.. automodule:: mmpose.evaluation.metrics + :members: + +functional +^^^^^^^^^^^ +.. automodule:: mmpose.evaluation.functional + :members: + +mmpose.visualization +-------------------- +.. automodule:: mmpose.visualization + :members: + +mmpose.engine +--------------- +hooks +^^^^^^^^^^^ +.. automodule:: mmpose.engine.hooks + :members: diff --git a/docs/en/collect_modelzoo.py b/docs/en/collect_modelzoo.py index 0c87d3c6ef..965250f712 100644 --- a/docs/en/collect_modelzoo.py +++ b/docs/en/collect_modelzoo.py @@ -1,196 +1,196 @@ -#!/usr/bin/env python -# Copyright (c) OpenMMLab. All rights reserved. -import os -import os.path as osp -import re -from collections import defaultdict -from glob import glob - -from addict import Addict -from titlecase import titlecase - - -def _get_model_docs(): - """Get all model document files. - - Returns: - list[str]: file paths - """ - config_root = osp.join('..', '..', 'configs') - pattern = osp.sep.join(['*'] * 4) + '.md' - docs = glob(osp.join(config_root, pattern)) - docs = [doc for doc in docs if '_base_' not in doc] - return docs - - -def _parse_model_doc_path(path): - """Parse doc file path. - - Typical path would be like: - - configs////.md - - An example is: - - "configs/animal_2d_keypoint/topdown_heatmap/ - animalpose/resnet_animalpose.md" - - Returns: - tuple: - - task (str): e.g. ``'Animal 2D Keypoint'`` - - dataset (str): e.g. ``'animalpose'`` - - keywords (tuple): e.g. ``('topdown heatmap', 'resnet')`` - """ - _path = path.split(osp.sep) - _rel_path = _path[_path.index('configs'):] - - # get task - def _titlecase_callback(word, **kwargs): - if word == '2d': - return '2D' - if word == '3d': - return '3D' - - task = titlecase( - _rel_path[1].replace('_', ' '), callback=_titlecase_callback) - - # get dataset - dataset = _rel_path[3] - - # get keywords - keywords_algo = (_rel_path[2], ) - keywords_setting = tuple(_rel_path[4][:-3].split('_')) - keywords = keywords_algo + keywords_setting - - return task, dataset, keywords - - -def _get_paper_refs(): - """Get all paper references. - - Returns: - Dict[str, List[str]]: keys are paper categories and values are lists - of paper paths. - """ - papers = glob('../src/papers/*/*.md') - paper_refs = defaultdict(list) - for fn in papers: - category = fn.split(osp.sep)[3] - paper_refs[category].append(fn) - - return paper_refs - - -def _parse_paper_ref(fn): - """Get paper name and indicator pattern from a paper reference file. - - Returns: - tuple: - - paper_name (str) - - paper_indicator (str) - """ - indicator = None - with open(fn, 'r', encoding='utf-8') as f: - for line in f.readlines(): - if line.startswith('', '', indicator).strip() - return paper_name, indicator - - -def main(): - - # Build output folders - os.makedirs('model_zoo', exist_ok=True) - os.makedirs('model_zoo_papers', exist_ok=True) - - # Collect all document contents - model_doc_list = _get_model_docs() - model_docs = Addict() - - for path in model_doc_list: - task, dataset, keywords = _parse_model_doc_path(path) - with open(path, 'r', encoding='utf-8') as f: - doc = { - 'task': task, - 'dataset': dataset, - 'keywords': keywords, - 'path': path, - 'content': f.read() - } - model_docs[task][dataset][keywords] = doc - - # Write files by task - for task, dataset_dict in model_docs.items(): - lines = [f'# {task}', ''] - for dataset, keywords_dict in dataset_dict.items(): - lines += [ - '
', '

', '', f'## {titlecase(dataset)} Dataset', - '' - ] - - for keywords, doc in keywords_dict.items(): - keyword_strs = [ - titlecase(x.replace('_', ' ')) for x in keywords - ] - dataset_str = titlecase(dataset) - if dataset_str in keyword_strs: - keyword_strs.remove(dataset_str) - - lines += [ - '
', '', - (f'### {" + ".join(keyword_strs)}' - f' on {dataset_str}'), '', doc['content'], '' - ] - - fn = osp.join('model_zoo', f'{task.replace(" ", "_").lower()}.md') - with open(fn, 'w', encoding='utf-8') as f: - f.write('\n'.join(lines)) - - # Write files by paper - paper_refs = _get_paper_refs() - - for paper_cat, paper_list in paper_refs.items(): - lines = [] - for paper_fn in paper_list: - paper_name, indicator = _parse_paper_ref(paper_fn) - paperlines = [] - for task, dataset_dict in model_docs.items(): - for dataset, keywords_dict in dataset_dict.items(): - for keywords, doc_info in keywords_dict.items(): - - if indicator not in doc_info['content']: - continue - - keyword_strs = [ - titlecase(x.replace('_', ' ')) for x in keywords - ] - - dataset_str = titlecase(dataset) - if dataset_str in keyword_strs: - keyword_strs.remove(dataset_str) - paperlines += [ - '
', '', - (f'### {" + ".join(keyword_strs)}' - f' on {dataset_str}'), '', doc_info['content'], '' - ] - if paperlines: - lines += ['
', '

', '', f'## {paper_name}', ''] - lines += paperlines - - if lines: - lines = [f'# {titlecase(paper_cat)}', ''] + lines - with open( - osp.join('model_zoo_papers', f'{paper_cat.lower()}.md'), - 'w', - encoding='utf-8') as f: - f.write('\n'.join(lines)) - - -if __name__ == '__main__': - print('collect model zoo documents') - main() +#!/usr/bin/env python +# Copyright (c) OpenMMLab. All rights reserved. +import os +import os.path as osp +import re +from collections import defaultdict +from glob import glob + +from addict import Addict +from titlecase import titlecase + + +def _get_model_docs(): + """Get all model document files. + + Returns: + list[str]: file paths + """ + config_root = osp.join('..', '..', 'configs') + pattern = osp.sep.join(['*'] * 4) + '.md' + docs = glob(osp.join(config_root, pattern)) + docs = [doc for doc in docs if '_base_' not in doc] + return docs + + +def _parse_model_doc_path(path): + """Parse doc file path. + + Typical path would be like: + + configs////.md + + An example is: + + "configs/animal_2d_keypoint/topdown_heatmap/ + animalpose/resnet_animalpose.md" + + Returns: + tuple: + - task (str): e.g. ``'Animal 2D Keypoint'`` + - dataset (str): e.g. ``'animalpose'`` + - keywords (tuple): e.g. ``('topdown heatmap', 'resnet')`` + """ + _path = path.split(osp.sep) + _rel_path = _path[_path.index('configs'):] + + # get task + def _titlecase_callback(word, **kwargs): + if word == '2d': + return '2D' + if word == '3d': + return '3D' + + task = titlecase( + _rel_path[1].replace('_', ' '), callback=_titlecase_callback) + + # get dataset + dataset = _rel_path[3] + + # get keywords + keywords_algo = (_rel_path[2], ) + keywords_setting = tuple(_rel_path[4][:-3].split('_')) + keywords = keywords_algo + keywords_setting + + return task, dataset, keywords + + +def _get_paper_refs(): + """Get all paper references. + + Returns: + Dict[str, List[str]]: keys are paper categories and values are lists + of paper paths. + """ + papers = glob('../src/papers/*/*.md') + paper_refs = defaultdict(list) + for fn in papers: + category = fn.split(osp.sep)[3] + paper_refs[category].append(fn) + + return paper_refs + + +def _parse_paper_ref(fn): + """Get paper name and indicator pattern from a paper reference file. + + Returns: + tuple: + - paper_name (str) + - paper_indicator (str) + """ + indicator = None + with open(fn, 'r', encoding='utf-8') as f: + for line in f.readlines(): + if line.startswith('', '', indicator).strip() + return paper_name, indicator + + +def main(): + + # Build output folders + os.makedirs('model_zoo', exist_ok=True) + os.makedirs('model_zoo_papers', exist_ok=True) + + # Collect all document contents + model_doc_list = _get_model_docs() + model_docs = Addict() + + for path in model_doc_list: + task, dataset, keywords = _parse_model_doc_path(path) + with open(path, 'r', encoding='utf-8') as f: + doc = { + 'task': task, + 'dataset': dataset, + 'keywords': keywords, + 'path': path, + 'content': f.read() + } + model_docs[task][dataset][keywords] = doc + + # Write files by task + for task, dataset_dict in model_docs.items(): + lines = [f'# {task}', ''] + for dataset, keywords_dict in dataset_dict.items(): + lines += [ + '
', '

', '', f'## {titlecase(dataset)} Dataset', + '' + ] + + for keywords, doc in keywords_dict.items(): + keyword_strs = [ + titlecase(x.replace('_', ' ')) for x in keywords + ] + dataset_str = titlecase(dataset) + if dataset_str in keyword_strs: + keyword_strs.remove(dataset_str) + + lines += [ + '
', '', + (f'### {" + ".join(keyword_strs)}' + f' on {dataset_str}'), '', doc['content'], '' + ] + + fn = osp.join('model_zoo', f'{task.replace(" ", "_").lower()}.md') + with open(fn, 'w', encoding='utf-8') as f: + f.write('\n'.join(lines)) + + # Write files by paper + paper_refs = _get_paper_refs() + + for paper_cat, paper_list in paper_refs.items(): + lines = [] + for paper_fn in paper_list: + paper_name, indicator = _parse_paper_ref(paper_fn) + paperlines = [] + for task, dataset_dict in model_docs.items(): + for dataset, keywords_dict in dataset_dict.items(): + for keywords, doc_info in keywords_dict.items(): + + if indicator not in doc_info['content']: + continue + + keyword_strs = [ + titlecase(x.replace('_', ' ')) for x in keywords + ] + + dataset_str = titlecase(dataset) + if dataset_str in keyword_strs: + keyword_strs.remove(dataset_str) + paperlines += [ + '
', '', + (f'### {" + ".join(keyword_strs)}' + f' on {dataset_str}'), '', doc_info['content'], '' + ] + if paperlines: + lines += ['
', '

', '', f'## {paper_name}', ''] + lines += paperlines + + if lines: + lines = [f'# {titlecase(paper_cat)}', ''] + lines + with open( + osp.join('model_zoo_papers', f'{paper_cat.lower()}.md'), + 'w', + encoding='utf-8') as f: + f.write('\n'.join(lines)) + + +if __name__ == '__main__': + print('collect model zoo documents') + main() diff --git a/docs/en/collect_projects.py b/docs/en/collect_projects.py index 29c0449862..971e21cf66 100644 --- a/docs/en/collect_projects.py +++ b/docs/en/collect_projects.py @@ -1,116 +1,116 @@ -#!/usr/bin/env python -# Copyright (c) OpenMMLab. All rights reserved. -import os -import os.path as osp -import re -from glob import glob - - -def _get_project_docs(): - """Get all project document files. - - Returns: - list[str]: file paths - """ - project_root = osp.join('..', '..', 'projects') - pattern = osp.sep.join(['*'] * 2) + '.md' - docs = glob(osp.join(project_root, pattern)) - docs = [ - doc for doc in docs - if 'example_project' not in doc and '_CN' not in doc - ] - return docs - - -def _parse_project_doc_path(fn): - """Get project name and banner from a project reference file. - - Returns: - tuple: - - project_name (str) - - project_banner (str) - """ - project_banner, project_name = None, None - with open(fn, 'r', encoding='utf-8') as f: - for line in f.readlines(): - if re.match('^( )*', ' ' + banner, '', '
', '' - ] - - project_intro_doc = _get_project_intro_doc() - faq_doc = _get_faq_doc() - - with open( - osp.join('projects', 'community_projects.md'), 'w', - encoding='utf-8') as f: - f.write('# Projects from Community Contributors\n') - f.write(''.join(project_intro_doc)) - f.write('\n'.join(project_lines)) - f.write(''.join(faq_doc)) - - -if __name__ == '__main__': - print('collect project documents') - main() +#!/usr/bin/env python +# Copyright (c) OpenMMLab. All rights reserved. +import os +import os.path as osp +import re +from glob import glob + + +def _get_project_docs(): + """Get all project document files. + + Returns: + list[str]: file paths + """ + project_root = osp.join('..', '..', 'projects') + pattern = osp.sep.join(['*'] * 2) + '.md' + docs = glob(osp.join(project_root, pattern)) + docs = [ + doc for doc in docs + if 'example_project' not in doc and '_CN' not in doc + ] + return docs + + +def _parse_project_doc_path(fn): + """Get project name and banner from a project reference file. + + Returns: + tuple: + - project_name (str) + - project_banner (str) + """ + project_banner, project_name = None, None + with open(fn, 'r', encoding='utf-8') as f: + for line in f.readlines(): + if re.match('^( )*', ' ' + banner, '', '
', '' + ] + + project_intro_doc = _get_project_intro_doc() + faq_doc = _get_faq_doc() + + with open( + osp.join('projects', 'community_projects.md'), 'w', + encoding='utf-8') as f: + f.write('# Projects from Community Contributors\n') + f.write(''.join(project_intro_doc)) + f.write('\n'.join(project_lines)) + f.write(''.join(faq_doc)) + + +if __name__ == '__main__': + print('collect project documents') + main() diff --git a/docs/en/conf.py b/docs/en/conf.py index 4359aa46e9..90bf66d0dd 100644 --- a/docs/en/conf.py +++ b/docs/en/conf.py @@ -1,111 +1,111 @@ -# Copyright (c) OpenMMLab. All rights reserved. -# Configuration file for the Sphinx documentation builder. -# -# This file only contains a selection of the most common options. For a full -# list see the documentation: -# https://www.sphinx-doc.org/en/master/usage/configuration.html - -# -- Path setup -------------------------------------------------------------- - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. - -import os -import subprocess -import sys - -import pytorch_sphinx_theme - -sys.path.insert(0, os.path.abspath('../..')) - -# -- Project information ----------------------------------------------------- - -project = 'MMPose' -copyright = '2020-2021, OpenMMLab' -author = 'MMPose Authors' - -# The full version, including alpha/beta/rc tags -version_file = '../../mmpose/version.py' - - -def get_version(): - with open(version_file, 'r') as f: - exec(compile(f.read(), version_file, 'exec')) - return locals()['__version__'] - - -release = get_version() - -# -- General configuration --------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - 'sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'sphinx.ext.viewcode', - 'sphinx_markdown_tables', 'sphinx_copybutton', 'myst_parser', - 'sphinx.ext.autosummary' -] - -autodoc_mock_imports = ['json_tricks', 'mmpose.version'] - -# Ignore >>> when copying code -copybutton_prompt_text = r'>>> |\.\.\. ' -copybutton_prompt_is_regexp = True - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -# This pattern also affects html_static_path and html_extra_path. -exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] - -# -- Options for HTML output ------------------------------------------------- -source_suffix = { - '.rst': 'restructuredtext', - '.md': 'markdown', -} - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -# -html_theme = 'pytorch_sphinx_theme' -html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()] -html_theme_options = { - 'menu': [ - { - 'name': 'GitHub', - 'url': 'https://github.com/open-mmlab/mmpose/tree/main' - }, - ], - # Specify the language of the shared menu - 'menu_lang': - 'en' -} - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". - -language = 'en' - -html_static_path = ['_static'] -html_css_files = ['css/readthedocs.css'] - -# Enable ::: for my_st -myst_enable_extensions = ['colon_fence'] - -master_doc = 'index' - - -def builder_inited_handler(app): - subprocess.run(['python', './collect_modelzoo.py']) - subprocess.run(['python', './collect_projects.py']) - subprocess.run(['sh', './merge_docs.sh']) - subprocess.run(['python', './stats.py']) - - -def setup(app): - app.connect('builder-inited', builder_inited_handler) +# Copyright (c) OpenMMLab. All rights reserved. +# Configuration file for the Sphinx documentation builder. +# +# This file only contains a selection of the most common options. For a full +# list see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. + +import os +import subprocess +import sys + +import pytorch_sphinx_theme + +sys.path.insert(0, os.path.abspath('../..')) + +# -- Project information ----------------------------------------------------- + +project = 'MMPose' +copyright = '2020-2021, OpenMMLab' +author = 'MMPose Authors' + +# The full version, including alpha/beta/rc tags +version_file = '../../mmpose/version.py' + + +def get_version(): + with open(version_file, 'r') as f: + exec(compile(f.read(), version_file, 'exec')) + return locals()['__version__'] + + +release = get_version() + +# -- General configuration --------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'sphinx.ext.viewcode', + 'sphinx_markdown_tables', 'sphinx_copybutton', 'myst_parser', + 'sphinx.ext.autosummary' +] + +autodoc_mock_imports = ['json_tricks', 'mmpose.version'] + +# Ignore >>> when copying code +copybutton_prompt_text = r'>>> |\.\.\. ' +copybutton_prompt_is_regexp = True + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] + +# -- Options for HTML output ------------------------------------------------- +source_suffix = { + '.rst': 'restructuredtext', + '.md': 'markdown', +} + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'pytorch_sphinx_theme' +html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()] +html_theme_options = { + 'menu': [ + { + 'name': 'GitHub', + 'url': 'https://github.com/open-mmlab/mmpose/tree/main' + }, + ], + # Specify the language of the shared menu + 'menu_lang': + 'en' +} + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". + +language = 'en' + +html_static_path = ['_static'] +html_css_files = ['css/readthedocs.css'] + +# Enable ::: for my_st +myst_enable_extensions = ['colon_fence'] + +master_doc = 'index' + + +def builder_inited_handler(app): + subprocess.run(['python', './collect_modelzoo.py']) + subprocess.run(['python', './collect_projects.py']) + subprocess.run(['sh', './merge_docs.sh']) + subprocess.run(['python', './stats.py']) + + +def setup(app): + app.connect('builder-inited', builder_inited_handler) diff --git a/docs/en/contribution_guide.md b/docs/en/contribution_guide.md index 525ca9a7e1..60ecd37ce3 100644 --- a/docs/en/contribution_guide.md +++ b/docs/en/contribution_guide.md @@ -1,191 +1,191 @@ -# How to Contribute to MMPose - -Welcome to join the MMPose community, we are committed to building cutting-edge computer vision foundational library. All kinds of contributions are welcomed, including but not limited to: - -- **Fix bugs** - 1. If the modification involves significant changes, it's recommended to create an issue first that describes the error information and how to trigger the bug. Other developers will discuss it with you and propose a proper solution. - 2. Fix the bug and add the corresponding unit test, submit the PR. -- **Add new features or components** - 1. If the new feature or module involves a large amount of code changes, we suggest you to submit an issue first, and we will confirm the necessity of the function with you. - 2. Implement the new feature and add unit tests, submit the PR. -- **Improve documentation or translation** - - If you find errors or incomplete documentation, please submit a PR directly. - -```{note} -- If you hope to contribute to MMPose 1.0, please create a new branch from dev-1.x and submit a PR to the dev-1.x branch. -- If you are the author of papers in this field and would like to include your work to MMPose, please contact us. We will much appreciate your contribution. -- If you hope to share your MMPose-based projects with the community at once, consider creating a PR to `Projects` directory, which will simplify the review process and bring in the projects as soon as possible. Checkout our [example project](/projects/example_project) -- If you wish to join the MMPose developers, please feel free to contact us and we will invite you to join the MMPose developers group. -``` - -## Preparation - -The commands for processing pull requests are implemented using Git, and this chapter details Git Configuration and associated GitHub. - -### Git Configuration - -First, you need to install Git and configure your Git username and email. - -```shell -# view the Git version -git --version -``` - -Second, check your Git config and ensure that `user.name` and `user.email` are properly configured. - -```shell -# view the Git config -git config --global --list -# configure the user name and email -git config --global user.name "Change your user name here" -git config --global user.email "Change your user email here" -``` - -## Pull Request Workflow - -If you’re not familiar with Pull Request, don’t worry! The following guidance will tell you how to create a Pull Request step by step. If you want to dive into the development mode of Pull Request, you can refer to the [official documents](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/about-pull-requests). - -### 1. Fork and Clone - -If you are posting a pull request for the first time, you should fork the OpenMMLab repositories by clicking the **Fork** button in the top right corner of the GitHub page, and the forked repositories will appear under your GitHub profile. - -![](https://user-images.githubusercontent.com/13503330/223318144-a49c6cef-b1fb-45b8-aa2b-0833d0e3fd5c.png) - -Then you need to clone the forked repository to your local machine. - -```shell -# clone the forked repository -git clone https://github.com/username/mmpose.git - -# Add official repository as upstream remote -cd mmpose -git remote add upstream https://github.com/open-mmlab/mmpose.git -``` - -Enter the following command in the terminal to see if the remote repository was successfully added. - -```shell -git remote -v -``` - -If the following message appears, you have successfully added a remote repository. - -```Shell -origin https://github.com/{username}/mmpose.git (fetch) -origin https://github.com/{username}/mmpose.git (push) -upstream https://github.com/open-mmlab/mmpose.git (fetch) -upstream https://github.com/open-mmlab/mmpose.git (push) -``` - -```{note} -Here’s a brief introduction to the origin and upstream. When we use “git clone”, we create an “origin” remote by default, which points to the repository cloned from. As for “upstream”, we add it ourselves to point to the target repository. Of course, if you don’t like the name “upstream”, you could name it as you wish. Usually, we’ll push the code to “origin”. If the pushed code conflicts with the latest code in official(“upstream”), we should pull the latest code from upstream to resolve the conflicts, and then push to “origin” again. The posted Pull Request will be updated automatically. -``` - -### 2. Configure pre-commit - -You should configure pre-commit in the local development environment to make sure the code style matches that of OpenMMLab. Note: The following code should be executed under the MMPOSE directory. - -```Shell -pip install -U pre-commit -pre-commit install -``` - -Check that pre-commit is configured successfully, and install the hooks defined in `.pre-commit-config.yaml`. - -```Shell -pre-commit run --all-files -``` - -![](https://user-images.githubusercontent.com/57566630/202368856-0465a90d-8fce-4345-918e-67b8b9c82614.png) - -```{note} -Chinese users may fail to download the pre-commit hooks due to the network issue. In this case, you could download these hooks from: - -pip install -U pre-commit -i https://pypi.tuna.tsinghua.edu.cn/simple - -or: - -pip install -U pre-commit -i https://pypi.mirrors.ustc.edu.cn/simple -``` - -If the installation process is interrupted, you can repeatedly run `pre-commit run ...` to continue the installation. - -If the code does not conform to the code style specification, pre-commit will raise a warning and fixes some of the errors automatically. - -![](https://user-images.githubusercontent.com/57566630/202369176-67642454-0025-4023-a095-263529107aa3.png) - -### 3. Create a development branch - -After configuring the pre-commit, we should create a branch based on the dev branch to develop the new feature or fix the bug. The proposed branch name is `username/pr_name`. - -```Shell -git checkout -b username/refactor_contributing_doc -``` - -In subsequent development, if the dev branch of the local repository lags behind the dev branch of the official repository, you need to pull the upstream dev branch first and then rebase it to the local development branch. - -```Shell -git checkout username/refactor_contributing_doc -git fetch upstream -git rebase upstream/dev-1.x -``` - -When rebasing, if a conflict arises, you need to resolve the conflict manually, then execute the `git add` command, and then execute the `git rebase --continue` command until the rebase is complete. - -### 4. Commit the code and pass the unit test - -After the local development is done, we need to pass the unit tests locally and then commit the code. - -```shell -# run unit test -pytest tests/ - -# commit the code -git add . -git commit -m "commit message" -``` - -### 5. Push the code to the remote repository - -After the local development is done, we need to push the code to the remote repository. - -```Shell -git push origin username/refactor_contributing_doc -``` - -### 6. Create a Pull Request - -#### (1) Create a Pull Request on GitHub - -![](https://user-images.githubusercontent.com/13503330/223321382-e6068e18-1d91-4458-8328-b1c7c907b3b2.png) - -#### (2) Fill in the Pull Request template - -![](https://user-images.githubusercontent.com/57566630/167307569-a794b967-6e28-4eac-a942-00deb657815f.png) - -## Code Style - -### Python - -We adopt [PEP8](https://www.python.org/dev/peps/pep-0008/) as the preferred code style, and use the following tools for linting and formatting: - -- [flake8](https://github.com/PyCQA/flake8): A wrapper around some linter tools. -- [isort](https://github.com/timothycrosley/isort): A Python utility to sort imports. -- [yapf](https://github.com/google/yapf): A formatter for Python files. -- [codespell](https://github.com/codespell-project/codespell): A Python utility to fix common misspellings in text files. -- [mdformat](https://github.com/executablebooks/mdformat): Mdformat is an opinionated Markdown formatter that can be used to enforce a consistent style in Markdown files. -- [docformatter](https://github.com/myint/docformatter): A formatter to format docstring. - -Style configurations of yapf and isort can be found in [setup.cfg](/setup.cfg). - -We use [pre-commit hook](https://pre-commit.com/) that checks and formats for `flake8`, `yapf`, `isort`, `trailing whitespaces`, `markdown files`, -fixes `end-of-files`, `double-quoted-strings`, `python-encoding-pragma`, `mixed-line-ending`, sorts `requirments.txt` automatically on every commit. -The config for a pre-commit hook is stored in [.pre-commit-config](/.pre-commit-config.yaml). - -```{note} -Before you create a PR, make sure that your code lints and is formatted by yapf. -``` - -### C++ and CUDA - -We follow the [Google C++ Style Guide](https://google.github.io/styleguide/cppguide.html). +# How to Contribute to MMPose + +Welcome to join the MMPose community, we are committed to building cutting-edge computer vision foundational library. All kinds of contributions are welcomed, including but not limited to: + +- **Fix bugs** + 1. If the modification involves significant changes, it's recommended to create an issue first that describes the error information and how to trigger the bug. Other developers will discuss it with you and propose a proper solution. + 2. Fix the bug and add the corresponding unit test, submit the PR. +- **Add new features or components** + 1. If the new feature or module involves a large amount of code changes, we suggest you to submit an issue first, and we will confirm the necessity of the function with you. + 2. Implement the new feature and add unit tests, submit the PR. +- **Improve documentation or translation** + - If you find errors or incomplete documentation, please submit a PR directly. + +```{note} +- If you hope to contribute to MMPose 1.0, please create a new branch from dev-1.x and submit a PR to the dev-1.x branch. +- If you are the author of papers in this field and would like to include your work to MMPose, please contact us. We will much appreciate your contribution. +- If you hope to share your MMPose-based projects with the community at once, consider creating a PR to `Projects` directory, which will simplify the review process and bring in the projects as soon as possible. Checkout our [example project](/projects/example_project) +- If you wish to join the MMPose developers, please feel free to contact us and we will invite you to join the MMPose developers group. +``` + +## Preparation + +The commands for processing pull requests are implemented using Git, and this chapter details Git Configuration and associated GitHub. + +### Git Configuration + +First, you need to install Git and configure your Git username and email. + +```shell +# view the Git version +git --version +``` + +Second, check your Git config and ensure that `user.name` and `user.email` are properly configured. + +```shell +# view the Git config +git config --global --list +# configure the user name and email +git config --global user.name "Change your user name here" +git config --global user.email "Change your user email here" +``` + +## Pull Request Workflow + +If you’re not familiar with Pull Request, don’t worry! The following guidance will tell you how to create a Pull Request step by step. If you want to dive into the development mode of Pull Request, you can refer to the [official documents](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/about-pull-requests). + +### 1. Fork and Clone + +If you are posting a pull request for the first time, you should fork the OpenMMLab repositories by clicking the **Fork** button in the top right corner of the GitHub page, and the forked repositories will appear under your GitHub profile. + +![](https://user-images.githubusercontent.com/13503330/223318144-a49c6cef-b1fb-45b8-aa2b-0833d0e3fd5c.png) + +Then you need to clone the forked repository to your local machine. + +```shell +# clone the forked repository +git clone https://github.com/username/mmpose.git + +# Add official repository as upstream remote +cd mmpose +git remote add upstream https://github.com/open-mmlab/mmpose.git +``` + +Enter the following command in the terminal to see if the remote repository was successfully added. + +```shell +git remote -v +``` + +If the following message appears, you have successfully added a remote repository. + +```Shell +origin https://github.com/{username}/mmpose.git (fetch) +origin https://github.com/{username}/mmpose.git (push) +upstream https://github.com/open-mmlab/mmpose.git (fetch) +upstream https://github.com/open-mmlab/mmpose.git (push) +``` + +```{note} +Here’s a brief introduction to the origin and upstream. When we use “git clone”, we create an “origin” remote by default, which points to the repository cloned from. As for “upstream”, we add it ourselves to point to the target repository. Of course, if you don’t like the name “upstream”, you could name it as you wish. Usually, we’ll push the code to “origin”. If the pushed code conflicts with the latest code in official(“upstream”), we should pull the latest code from upstream to resolve the conflicts, and then push to “origin” again. The posted Pull Request will be updated automatically. +``` + +### 2. Configure pre-commit + +You should configure pre-commit in the local development environment to make sure the code style matches that of OpenMMLab. Note: The following code should be executed under the MMPOSE directory. + +```Shell +pip install -U pre-commit +pre-commit install +``` + +Check that pre-commit is configured successfully, and install the hooks defined in `.pre-commit-config.yaml`. + +```Shell +pre-commit run --all-files +``` + +![](https://user-images.githubusercontent.com/57566630/202368856-0465a90d-8fce-4345-918e-67b8b9c82614.png) + +```{note} +Chinese users may fail to download the pre-commit hooks due to the network issue. In this case, you could download these hooks from: + +pip install -U pre-commit -i https://pypi.tuna.tsinghua.edu.cn/simple + +or: + +pip install -U pre-commit -i https://pypi.mirrors.ustc.edu.cn/simple +``` + +If the installation process is interrupted, you can repeatedly run `pre-commit run ...` to continue the installation. + +If the code does not conform to the code style specification, pre-commit will raise a warning and fixes some of the errors automatically. + +![](https://user-images.githubusercontent.com/57566630/202369176-67642454-0025-4023-a095-263529107aa3.png) + +### 3. Create a development branch + +After configuring the pre-commit, we should create a branch based on the dev branch to develop the new feature or fix the bug. The proposed branch name is `username/pr_name`. + +```Shell +git checkout -b username/refactor_contributing_doc +``` + +In subsequent development, if the dev branch of the local repository lags behind the dev branch of the official repository, you need to pull the upstream dev branch first and then rebase it to the local development branch. + +```Shell +git checkout username/refactor_contributing_doc +git fetch upstream +git rebase upstream/dev-1.x +``` + +When rebasing, if a conflict arises, you need to resolve the conflict manually, then execute the `git add` command, and then execute the `git rebase --continue` command until the rebase is complete. + +### 4. Commit the code and pass the unit test + +After the local development is done, we need to pass the unit tests locally and then commit the code. + +```shell +# run unit test +pytest tests/ + +# commit the code +git add . +git commit -m "commit message" +``` + +### 5. Push the code to the remote repository + +After the local development is done, we need to push the code to the remote repository. + +```Shell +git push origin username/refactor_contributing_doc +``` + +### 6. Create a Pull Request + +#### (1) Create a Pull Request on GitHub + +![](https://user-images.githubusercontent.com/13503330/223321382-e6068e18-1d91-4458-8328-b1c7c907b3b2.png) + +#### (2) Fill in the Pull Request template + +![](https://user-images.githubusercontent.com/57566630/167307569-a794b967-6e28-4eac-a942-00deb657815f.png) + +## Code Style + +### Python + +We adopt [PEP8](https://www.python.org/dev/peps/pep-0008/) as the preferred code style, and use the following tools for linting and formatting: + +- [flake8](https://github.com/PyCQA/flake8): A wrapper around some linter tools. +- [isort](https://github.com/timothycrosley/isort): A Python utility to sort imports. +- [yapf](https://github.com/google/yapf): A formatter for Python files. +- [codespell](https://github.com/codespell-project/codespell): A Python utility to fix common misspellings in text files. +- [mdformat](https://github.com/executablebooks/mdformat): Mdformat is an opinionated Markdown formatter that can be used to enforce a consistent style in Markdown files. +- [docformatter](https://github.com/myint/docformatter): A formatter to format docstring. + +Style configurations of yapf and isort can be found in [setup.cfg](/setup.cfg). + +We use [pre-commit hook](https://pre-commit.com/) that checks and formats for `flake8`, `yapf`, `isort`, `trailing whitespaces`, `markdown files`, +fixes `end-of-files`, `double-quoted-strings`, `python-encoding-pragma`, `mixed-line-ending`, sorts `requirments.txt` automatically on every commit. +The config for a pre-commit hook is stored in [.pre-commit-config](/.pre-commit-config.yaml). + +```{note} +Before you create a PR, make sure that your code lints and is formatted by yapf. +``` + +### C++ and CUDA + +We follow the [Google C++ Style Guide](https://google.github.io/styleguide/cppguide.html). diff --git a/docs/en/dataset_zoo/2d_animal_keypoint.md b/docs/en/dataset_zoo/2d_animal_keypoint.md index 9ef6022ecc..1263f1bed8 100644 --- a/docs/en/dataset_zoo/2d_animal_keypoint.md +++ b/docs/en/dataset_zoo/2d_animal_keypoint.md @@ -1,535 +1,535 @@ -# 2D Animal Keypoint Dataset - -It is recommended to symlink the dataset root to `$MMPOSE/data`. -If your folder structure is different, you may need to change the corresponding paths in config files. - -MMPose supported datasets: - -- [Animal-Pose](#animal-pose) \[ [Homepage](https://sites.google.com/view/animal-pose/) \] -- [AP-10K](#ap-10k) \[ [Homepage](https://github.com/AlexTheBad/AP-10K/) \] -- [Horse-10](#horse-10) \[ [Homepage](http://www.mackenziemathislab.org/horse10) \] -- [MacaquePose](#macaquepose) \[ [Homepage](http://pri.ehub.kyoto-u.ac.jp/datasets/macaquepose/index.html) \] -- [Vinegar Fly](#vinegar-fly) \[ [Homepage](https://github.com/jgraving/DeepPoseKit-Data) \] -- [Desert Locust](#desert-locust) \[ [Homepage](https://github.com/jgraving/DeepPoseKit-Data) \] -- [Grévy’s Zebra](#grvys-zebra) \[ [Homepage](https://github.com/jgraving/DeepPoseKit-Data) \] -- [ATRW](#atrw) \[ [Homepage](https://cvwc2019.github.io/challenge.html) \] -- [Animal Kingdom](#Animal-Kindom) \[ [Homepage](https://openaccess.thecvf.com/content/CVPR2022/html/Ng_Animal_Kingdom_A_Large_and_Diverse_Dataset_for_Animal_Behavior_CVPR_2022_paper.html) \] - -## Animal-Pose - - - -
-Animal-Pose (ICCV'2019) - -```bibtex -@InProceedings{Cao_2019_ICCV, - author = {Cao, Jinkun and Tang, Hongyang and Fang, Hao-Shu and Shen, Xiaoyong and Lu, Cewu and Tai, Yu-Wing}, - title = {Cross-Domain Adaptation for Animal Pose Estimation}, - booktitle = {The IEEE International Conference on Computer Vision (ICCV)}, - month = {October}, - year = {2019} -} -``` - -
- -
- -
- -For [Animal-Pose](https://sites.google.com/view/animal-pose/) dataset, we prepare the dataset as follows: - -1. Download the images of [PASCAL VOC2012](http://host.robots.ox.ac.uk/pascal/VOC/voc2012/#data), especially the five categories (dog, cat, sheep, cow, horse), which we use as trainval dataset. -2. Download the [test-set](https://drive.google.com/drive/folders/1DwhQobZlGntOXxdm7vQsE4bqbFmN3b9y?usp=sharing) images with raw annotations (1000 images, 5 categories). -3. We have pre-processed the annotations to make it compatible with MMPose. Please download the annotation files from [annotations](https://download.openmmlab.com/mmpose/datasets/animalpose_annotations.tar). If you would like to generate the annotations by yourself, please check our dataset parsing [codes](/tools/dataset_converters/parse_animalpose_dataset.py). - -Extract them under {MMPose}/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── animalpose - │ - │-- VOC2012 - │ │-- Annotations - │ │-- ImageSets - │ │-- JPEGImages - │ │-- SegmentationClass - │ │-- SegmentationObject - │ - │-- animalpose_image_part2 - │ │-- cat - │ │-- cow - │ │-- dog - │ │-- horse - │ │-- sheep - │ - │-- annotations - │ │-- animalpose_train.json - │ |-- animalpose_val.json - │ |-- animalpose_trainval.json - │ │-- animalpose_test.json - │ - │-- PASCAL2011_animal_annotation - │ │-- cat - │ │ |-- 2007_000528_1.xml - │ │ |-- 2007_000549_1.xml - │ │ │-- ... - │ │-- cow - │ │-- dog - │ │-- horse - │ │-- sheep - │ - │-- annimalpose_anno2 - │ │-- cat - │ │ |-- ca1.xml - │ │ |-- ca2.xml - │ │ │-- ... - │ │-- cow - │ │-- dog - │ │-- horse - │ │-- sheep -``` - -The official dataset does not provide the official train/val/test set split. -We choose the images from PascalVOC for train & val. In total, we have 3608 images and 5117 annotations for train+val, where -2798 images with 4000 annotations are used for training, and 810 images with 1117 annotations are used for validation. -Those images from other sources (1000 images with 1000 annotations) are used for testing. - -## AP-10K - - - -
-AP-10K (NeurIPS'2021) - -```bibtex -@misc{yu2021ap10k, - title={AP-10K: A Benchmark for Animal Pose Estimation in the Wild}, - author={Hang Yu and Yufei Xu and Jing Zhang and Wei Zhao and Ziyu Guan and Dacheng Tao}, - year={2021}, - eprint={2108.12617}, - archivePrefix={arXiv}, - primaryClass={cs.CV} -} -``` - -
- -
- -
- -For [AP-10K](https://github.com/AlexTheBad/AP-10K/) dataset, images and annotations can be downloaded from [download](https://drive.google.com/file/d/1-FNNGcdtAQRehYYkGY1y4wzFNg4iWNad/view?usp=sharing). -Note, this data and annotation data is for non-commercial use only. - -Extract them under {MMPose}/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── ap10k - │-- annotations - │ │-- ap10k-train-split1.json - │ |-- ap10k-train-split2.json - │ |-- ap10k-train-split3.json - │ │-- ap10k-val-split1.json - │ |-- ap10k-val-split2.json - │ |-- ap10k-val-split3.json - │ |-- ap10k-test-split1.json - │ |-- ap10k-test-split2.json - │ |-- ap10k-test-split3.json - │-- data - │ │-- 000000000001.jpg - │ │-- 000000000002.jpg - │ │-- ... -``` - -The annotation files in 'annotation' folder contains 50 labeled animal species. There are total 10,015 labeled images with 13,028 instances in the AP-10K dataset. We randonly split them into train, val, and test set following the ratio of 7:1:2. - -## Horse-10 - - - -
-Horse-10 (WACV'2021) - -```bibtex -@inproceedings{mathis2021pretraining, - title={Pretraining boosts out-of-domain robustness for pose estimation}, - author={Mathis, Alexander and Biasi, Thomas and Schneider, Steffen and Yuksekgonul, Mert and Rogers, Byron and Bethge, Matthias and Mathis, Mackenzie W}, - booktitle={Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision}, - pages={1859--1868}, - year={2021} -} -``` - -
- -
- -
- -For [Horse-10](http://www.mackenziemathislab.org/horse10) dataset, images can be downloaded from [download](http://www.mackenziemathislab.org/horse10). -Please download the annotation files from [horse10_annotations](https://download.openmmlab.com/mmpose/datasets/horse10_annotations.tar). Note, this data and annotation data is for non-commercial use only, per the authors (see http://horse10.deeplabcut.org for more information). -Extract them under {MMPose}/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── horse10 - │-- annotations - │ │-- horse10-train-split1.json - │ |-- horse10-train-split2.json - │ |-- horse10-train-split3.json - │ │-- horse10-test-split1.json - │ |-- horse10-test-split2.json - │ |-- horse10-test-split3.json - │-- labeled-data - │ │-- BrownHorseinShadow - │ │-- BrownHorseintoshadow - │ │-- ... -``` - -## MacaquePose - - - -
-MacaquePose (bioRxiv'2020) - -```bibtex -@article{labuguen2020macaquepose, - title={MacaquePose: A novel ‘in the wild’macaque monkey pose dataset for markerless motion capture}, - author={Labuguen, Rollyn and Matsumoto, Jumpei and Negrete, Salvador and Nishimaru, Hiroshi and Nishijo, Hisao and Takada, Masahiko and Go, Yasuhiro and Inoue, Ken-ichi and Shibata, Tomohiro}, - journal={bioRxiv}, - year={2020}, - publisher={Cold Spring Harbor Laboratory} -} -``` - -
- -
- -
- -For [MacaquePose](http://pri.ehub.kyoto-u.ac.jp/datasets/macaquepose/index.html) dataset, images can be downloaded from [download](http://pri.ehub.kyoto-u.ac.jp/datasets/macaquepose/download.php). -Please download the annotation files from [macaque_annotations](https://download.openmmlab.com/mmpose/datasets/macaque_annotations.tar). -Extract them under {MMPose}/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── macaque - │-- annotations - │ │-- macaque_train.json - │ |-- macaque_test.json - │-- images - │ │-- 01418849d54b3005.jpg - │ │-- 0142d1d1a6904a70.jpg - │ │-- 01ef2c4c260321b7.jpg - │ │-- 020a1c75c8c85238.jpg - │ │-- 020b1506eef2557d.jpg - │ │-- ... -``` - -Since the official dataset does not provide the test set, we randomly select 12500 images for training, and the rest for evaluation (see [code](/tools/dataset/parse_macaquepose_dataset.py)). - -## Vinegar Fly - - - -
-Vinegar Fly (Nature Methods'2019) - -```bibtex -@article{pereira2019fast, - title={Fast animal pose estimation using deep neural networks}, - author={Pereira, Talmo D and Aldarondo, Diego E and Willmore, Lindsay and Kislin, Mikhail and Wang, Samuel S-H and Murthy, Mala and Shaevitz, Joshua W}, - journal={Nature methods}, - volume={16}, - number={1}, - pages={117--125}, - year={2019}, - publisher={Nature Publishing Group} -} -``` - -
- -
- -
- -For [Vinegar Fly](https://github.com/jgraving/DeepPoseKit-Data) dataset, images can be downloaded from [vinegar_fly_images](https://download.openmmlab.com/mmpose/datasets/vinegar_fly_images.tar). -Please download the annotation files from [vinegar_fly_annotations](https://download.openmmlab.com/mmpose/datasets/vinegar_fly_annotations.tar). -Extract them under {MMPose}/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── fly - │-- annotations - │ │-- fly_train.json - │ |-- fly_test.json - │-- images - │ │-- 0.jpg - │ │-- 1.jpg - │ │-- 2.jpg - │ │-- 3.jpg - │ │-- ... -``` - -Since the official dataset does not provide the test set, we randomly select 90% images for training, and the rest (10%) for evaluation (see [code](/tools/dataset_converters/parse_deepposekit_dataset.py)). - -## Desert Locust - - - -
-Desert Locust (Elife'2019) - -```bibtex -@article{graving2019deepposekit, - title={DeepPoseKit, a software toolkit for fast and robust animal pose estimation using deep learning}, - author={Graving, Jacob M and Chae, Daniel and Naik, Hemal and Li, Liang and Koger, Benjamin and Costelloe, Blair R and Couzin, Iain D}, - journal={Elife}, - volume={8}, - pages={e47994}, - year={2019}, - publisher={eLife Sciences Publications Limited} -} -``` - -
- -
- -
- -For [Desert Locust](https://github.com/jgraving/DeepPoseKit-Data) dataset, images can be downloaded from [locust_images](https://download.openmmlab.com/mmpose/datasets/locust_images.tar). -Please download the annotation files from [locust_annotations](https://download.openmmlab.com/mmpose/datasets/locust_annotations.tar). -Extract them under {MMPose}/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── locust - │-- annotations - │ │-- locust_train.json - │ |-- locust_test.json - │-- images - │ │-- 0.jpg - │ │-- 1.jpg - │ │-- 2.jpg - │ │-- 3.jpg - │ │-- ... -``` - -Since the official dataset does not provide the test set, we randomly select 90% images for training, and the rest (10%) for evaluation (see [code](/tools/dataset_converters/parse_deepposekit_dataset.py)). - -## Grévy’s Zebra - - - -
-Grévy’s Zebra (Elife'2019) - -```bibtex -@article{graving2019deepposekit, - title={DeepPoseKit, a software toolkit for fast and robust animal pose estimation using deep learning}, - author={Graving, Jacob M and Chae, Daniel and Naik, Hemal and Li, Liang and Koger, Benjamin and Costelloe, Blair R and Couzin, Iain D}, - journal={Elife}, - volume={8}, - pages={e47994}, - year={2019}, - publisher={eLife Sciences Publications Limited} -} -``` - -
- -
- -
-For [Grévy’s Zebra](https://github.com/jgraving/DeepPoseKit-Data) dataset, images can be downloaded from [zebra_images](https://download.openmmlab.com/mmpose/datasets/zebra_images.tar). -Please download the annotation files from [zebra_annotations](https://download.openmmlab.com/mmpose/datasets/zebra_annotations.tar). -Extract them under {MMPose}/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── zebra - │-- annotations - │ │-- zebra_train.json - │ |-- zebra_test.json - │-- images - │ │-- 0.jpg - │ │-- 1.jpg - │ │-- 2.jpg - │ │-- 3.jpg - │ │-- ... -``` - -Since the official dataset does not provide the test set, we randomly select 90% images for training, and the rest (10%) for evaluation (see [code](/tools/dataset_converters/parse_deepposekit_dataset.py)). - -## ATRW - - - -
-ATRW (ACM MM'2020) - -```bibtex -@inproceedings{li2020atrw, - title={ATRW: A Benchmark for Amur Tiger Re-identification in the Wild}, - author={Li, Shuyuan and Li, Jianguo and Tang, Hanlin and Qian, Rui and Lin, Weiyao}, - booktitle={Proceedings of the 28th ACM International Conference on Multimedia}, - pages={2590--2598}, - year={2020} -} -``` - -
- -
- -
-ATRW captures images of the Amur tiger (also known as Siberian tiger, Northeast-China tiger) in the wild. -For [ATRW](https://cvwc2019.github.io/challenge.html) dataset, please download images from -[Pose_train](https://lilablobssc.blob.core.windows.net/cvwc2019/train/atrw_pose_train.tar.gz), -[Pose_val](https://lilablobssc.blob.core.windows.net/cvwc2019/train/atrw_pose_val.tar.gz), and -[Pose_test](https://lilablobssc.blob.core.windows.net/cvwc2019/test/atrw_pose_test.tar.gz). -Note that in the ATRW official annotation files, the key "file_name" is written as "filename". To make it compatible with -other coco-type json files, we have modified this key. -Please download the modified annotation files from [atrw_annotations](https://download.openmmlab.com/mmpose/datasets/atrw_annotations.tar). -Extract them under {MMPose}/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── atrw - │-- annotations - │ │-- keypoint_train.json - │ │-- keypoint_val.json - │ │-- keypoint_trainval.json - │-- images - │ │-- train - │ │ │-- 000002.jpg - │ │ │-- 000003.jpg - │ │ │-- ... - │ │-- val - │ │ │-- 000001.jpg - │ │ │-- 000013.jpg - │ │ │-- ... - │ │-- test - │ │ │-- 000000.jpg - │ │ │-- 000004.jpg - │ │ │-- ... -``` - -## Animal Kingdom - -
-Animal Kingdom (CVPR'2022) -
-
- -
- -```bibtex -@inproceedings{Ng_2022_CVPR, - author = {Ng, Xun Long and Ong, Kian Eng and Zheng, Qichen and Ni, Yun and Yeo, Si Yong and Liu, Jun}, - title = {Animal Kingdom: A Large and Diverse Dataset for Animal Behavior Understanding}, - booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, - month = {June}, - year = {2022}, - pages = {19023-19034} - } -``` - -For [Animal Kingdom](https://github.com/sutdcv/Animal-Kingdom) dataset, images can be downloaded from [here](https://forms.office.com/pages/responsepage.aspx?id=drd2NJDpck-5UGJImDFiPVRYpnTEMixKqPJ1FxwK6VZUQkNTSkRISTNORUI2TDBWMUpZTlQ5WUlaSyQlQCN0PWcu). -Please Extract dataset under {MMPose}/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── ak - |--annotations - │ │-- ak_P1 - │ │ │-- train.json - │ │ │-- test.json - │ │-- ak_P2 - │ │ │-- train.json - │ │ │-- test.json - │ │-- ak_P3_amphibian - │ │ │-- train.json - │ │ │-- test.json - │ │-- ak_P3_bird - │ │ │-- train.json - │ │ │-- test.json - │ │-- ak_P3_fish - │ │ │-- train.json - │ │ │-- test.json - │ │-- ak_P3_mammal - │ │ │-- train.json - │ │ │-- test.json - │ │-- ak_P3_reptile - │ │-- train.json - │ │-- test.json - │-- images - │ │-- AAACXZTV - │ │ │--AAACXZTV_f000059.jpg - │ │ │--... - │ │-- AAAUILHH - │ │ │--AAAUILHH_f000098.jpg - │ │ │--... - │ │-- ... -``` +# 2D Animal Keypoint Dataset + +It is recommended to symlink the dataset root to `$MMPOSE/data`. +If your folder structure is different, you may need to change the corresponding paths in config files. + +MMPose supported datasets: + +- [Animal-Pose](#animal-pose) \[ [Homepage](https://sites.google.com/view/animal-pose/) \] +- [AP-10K](#ap-10k) \[ [Homepage](https://github.com/AlexTheBad/AP-10K/) \] +- [Horse-10](#horse-10) \[ [Homepage](http://www.mackenziemathislab.org/horse10) \] +- [MacaquePose](#macaquepose) \[ [Homepage](http://pri.ehub.kyoto-u.ac.jp/datasets/macaquepose/index.html) \] +- [Vinegar Fly](#vinegar-fly) \[ [Homepage](https://github.com/jgraving/DeepPoseKit-Data) \] +- [Desert Locust](#desert-locust) \[ [Homepage](https://github.com/jgraving/DeepPoseKit-Data) \] +- [Grévy’s Zebra](#grvys-zebra) \[ [Homepage](https://github.com/jgraving/DeepPoseKit-Data) \] +- [ATRW](#atrw) \[ [Homepage](https://cvwc2019.github.io/challenge.html) \] +- [Animal Kingdom](#Animal-Kindom) \[ [Homepage](https://openaccess.thecvf.com/content/CVPR2022/html/Ng_Animal_Kingdom_A_Large_and_Diverse_Dataset_for_Animal_Behavior_CVPR_2022_paper.html) \] + +## Animal-Pose + + + +
+Animal-Pose (ICCV'2019) + +```bibtex +@InProceedings{Cao_2019_ICCV, + author = {Cao, Jinkun and Tang, Hongyang and Fang, Hao-Shu and Shen, Xiaoyong and Lu, Cewu and Tai, Yu-Wing}, + title = {Cross-Domain Adaptation for Animal Pose Estimation}, + booktitle = {The IEEE International Conference on Computer Vision (ICCV)}, + month = {October}, + year = {2019} +} +``` + +
+ +
+ +
+ +For [Animal-Pose](https://sites.google.com/view/animal-pose/) dataset, we prepare the dataset as follows: + +1. Download the images of [PASCAL VOC2012](http://host.robots.ox.ac.uk/pascal/VOC/voc2012/#data), especially the five categories (dog, cat, sheep, cow, horse), which we use as trainval dataset. +2. Download the [test-set](https://drive.google.com/drive/folders/1DwhQobZlGntOXxdm7vQsE4bqbFmN3b9y?usp=sharing) images with raw annotations (1000 images, 5 categories). +3. We have pre-processed the annotations to make it compatible with MMPose. Please download the annotation files from [annotations](https://download.openmmlab.com/mmpose/datasets/animalpose_annotations.tar). If you would like to generate the annotations by yourself, please check our dataset parsing [codes](/tools/dataset_converters/parse_animalpose_dataset.py). + +Extract them under {MMPose}/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── animalpose + │ + │-- VOC2012 + │ │-- Annotations + │ │-- ImageSets + │ │-- JPEGImages + │ │-- SegmentationClass + │ │-- SegmentationObject + │ + │-- animalpose_image_part2 + │ │-- cat + │ │-- cow + │ │-- dog + │ │-- horse + │ │-- sheep + │ + │-- annotations + │ │-- animalpose_train.json + │ |-- animalpose_val.json + │ |-- animalpose_trainval.json + │ │-- animalpose_test.json + │ + │-- PASCAL2011_animal_annotation + │ │-- cat + │ │ |-- 2007_000528_1.xml + │ │ |-- 2007_000549_1.xml + │ │ │-- ... + │ │-- cow + │ │-- dog + │ │-- horse + │ │-- sheep + │ + │-- annimalpose_anno2 + │ │-- cat + │ │ |-- ca1.xml + │ │ |-- ca2.xml + │ │ │-- ... + │ │-- cow + │ │-- dog + │ │-- horse + │ │-- sheep +``` + +The official dataset does not provide the official train/val/test set split. +We choose the images from PascalVOC for train & val. In total, we have 3608 images and 5117 annotations for train+val, where +2798 images with 4000 annotations are used for training, and 810 images with 1117 annotations are used for validation. +Those images from other sources (1000 images with 1000 annotations) are used for testing. + +## AP-10K + + + +
+AP-10K (NeurIPS'2021) + +```bibtex +@misc{yu2021ap10k, + title={AP-10K: A Benchmark for Animal Pose Estimation in the Wild}, + author={Hang Yu and Yufei Xu and Jing Zhang and Wei Zhao and Ziyu Guan and Dacheng Tao}, + year={2021}, + eprint={2108.12617}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +
+ +
+ +
+ +For [AP-10K](https://github.com/AlexTheBad/AP-10K/) dataset, images and annotations can be downloaded from [download](https://drive.google.com/file/d/1-FNNGcdtAQRehYYkGY1y4wzFNg4iWNad/view?usp=sharing). +Note, this data and annotation data is for non-commercial use only. + +Extract them under {MMPose}/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── ap10k + │-- annotations + │ │-- ap10k-train-split1.json + │ |-- ap10k-train-split2.json + │ |-- ap10k-train-split3.json + │ │-- ap10k-val-split1.json + │ |-- ap10k-val-split2.json + │ |-- ap10k-val-split3.json + │ |-- ap10k-test-split1.json + │ |-- ap10k-test-split2.json + │ |-- ap10k-test-split3.json + │-- data + │ │-- 000000000001.jpg + │ │-- 000000000002.jpg + │ │-- ... +``` + +The annotation files in 'annotation' folder contains 50 labeled animal species. There are total 10,015 labeled images with 13,028 instances in the AP-10K dataset. We randonly split them into train, val, and test set following the ratio of 7:1:2. + +## Horse-10 + + + +
+Horse-10 (WACV'2021) + +```bibtex +@inproceedings{mathis2021pretraining, + title={Pretraining boosts out-of-domain robustness for pose estimation}, + author={Mathis, Alexander and Biasi, Thomas and Schneider, Steffen and Yuksekgonul, Mert and Rogers, Byron and Bethge, Matthias and Mathis, Mackenzie W}, + booktitle={Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision}, + pages={1859--1868}, + year={2021} +} +``` + +
+ +
+ +
+ +For [Horse-10](http://www.mackenziemathislab.org/horse10) dataset, images can be downloaded from [download](http://www.mackenziemathislab.org/horse10). +Please download the annotation files from [horse10_annotations](https://download.openmmlab.com/mmpose/datasets/horse10_annotations.tar). Note, this data and annotation data is for non-commercial use only, per the authors (see http://horse10.deeplabcut.org for more information). +Extract them under {MMPose}/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── horse10 + │-- annotations + │ │-- horse10-train-split1.json + │ |-- horse10-train-split2.json + │ |-- horse10-train-split3.json + │ │-- horse10-test-split1.json + │ |-- horse10-test-split2.json + │ |-- horse10-test-split3.json + │-- labeled-data + │ │-- BrownHorseinShadow + │ │-- BrownHorseintoshadow + │ │-- ... +``` + +## MacaquePose + + + +
+MacaquePose (bioRxiv'2020) + +```bibtex +@article{labuguen2020macaquepose, + title={MacaquePose: A novel ‘in the wild’macaque monkey pose dataset for markerless motion capture}, + author={Labuguen, Rollyn and Matsumoto, Jumpei and Negrete, Salvador and Nishimaru, Hiroshi and Nishijo, Hisao and Takada, Masahiko and Go, Yasuhiro and Inoue, Ken-ichi and Shibata, Tomohiro}, + journal={bioRxiv}, + year={2020}, + publisher={Cold Spring Harbor Laboratory} +} +``` + +
+ +
+ +
+ +For [MacaquePose](http://pri.ehub.kyoto-u.ac.jp/datasets/macaquepose/index.html) dataset, images can be downloaded from [download](http://pri.ehub.kyoto-u.ac.jp/datasets/macaquepose/download.php). +Please download the annotation files from [macaque_annotations](https://download.openmmlab.com/mmpose/datasets/macaque_annotations.tar). +Extract them under {MMPose}/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── macaque + │-- annotations + │ │-- macaque_train.json + │ |-- macaque_test.json + │-- images + │ │-- 01418849d54b3005.jpg + │ │-- 0142d1d1a6904a70.jpg + │ │-- 01ef2c4c260321b7.jpg + │ │-- 020a1c75c8c85238.jpg + │ │-- 020b1506eef2557d.jpg + │ │-- ... +``` + +Since the official dataset does not provide the test set, we randomly select 12500 images for training, and the rest for evaluation (see [code](/tools/dataset/parse_macaquepose_dataset.py)). + +## Vinegar Fly + + + +
+Vinegar Fly (Nature Methods'2019) + +```bibtex +@article{pereira2019fast, + title={Fast animal pose estimation using deep neural networks}, + author={Pereira, Talmo D and Aldarondo, Diego E and Willmore, Lindsay and Kislin, Mikhail and Wang, Samuel S-H and Murthy, Mala and Shaevitz, Joshua W}, + journal={Nature methods}, + volume={16}, + number={1}, + pages={117--125}, + year={2019}, + publisher={Nature Publishing Group} +} +``` + +
+ +
+ +
+ +For [Vinegar Fly](https://github.com/jgraving/DeepPoseKit-Data) dataset, images can be downloaded from [vinegar_fly_images](https://download.openmmlab.com/mmpose/datasets/vinegar_fly_images.tar). +Please download the annotation files from [vinegar_fly_annotations](https://download.openmmlab.com/mmpose/datasets/vinegar_fly_annotations.tar). +Extract them under {MMPose}/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── fly + │-- annotations + │ │-- fly_train.json + │ |-- fly_test.json + │-- images + │ │-- 0.jpg + │ │-- 1.jpg + │ │-- 2.jpg + │ │-- 3.jpg + │ │-- ... +``` + +Since the official dataset does not provide the test set, we randomly select 90% images for training, and the rest (10%) for evaluation (see [code](/tools/dataset_converters/parse_deepposekit_dataset.py)). + +## Desert Locust + + + +
+Desert Locust (Elife'2019) + +```bibtex +@article{graving2019deepposekit, + title={DeepPoseKit, a software toolkit for fast and robust animal pose estimation using deep learning}, + author={Graving, Jacob M and Chae, Daniel and Naik, Hemal and Li, Liang and Koger, Benjamin and Costelloe, Blair R and Couzin, Iain D}, + journal={Elife}, + volume={8}, + pages={e47994}, + year={2019}, + publisher={eLife Sciences Publications Limited} +} +``` + +
+ +
+ +
+ +For [Desert Locust](https://github.com/jgraving/DeepPoseKit-Data) dataset, images can be downloaded from [locust_images](https://download.openmmlab.com/mmpose/datasets/locust_images.tar). +Please download the annotation files from [locust_annotations](https://download.openmmlab.com/mmpose/datasets/locust_annotations.tar). +Extract them under {MMPose}/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── locust + │-- annotations + │ │-- locust_train.json + │ |-- locust_test.json + │-- images + │ │-- 0.jpg + │ │-- 1.jpg + │ │-- 2.jpg + │ │-- 3.jpg + │ │-- ... +``` + +Since the official dataset does not provide the test set, we randomly select 90% images for training, and the rest (10%) for evaluation (see [code](/tools/dataset_converters/parse_deepposekit_dataset.py)). + +## Grévy’s Zebra + + + +
+Grévy’s Zebra (Elife'2019) + +```bibtex +@article{graving2019deepposekit, + title={DeepPoseKit, a software toolkit for fast and robust animal pose estimation using deep learning}, + author={Graving, Jacob M and Chae, Daniel and Naik, Hemal and Li, Liang and Koger, Benjamin and Costelloe, Blair R and Couzin, Iain D}, + journal={Elife}, + volume={8}, + pages={e47994}, + year={2019}, + publisher={eLife Sciences Publications Limited} +} +``` + +
+ +
+ +
+For [Grévy’s Zebra](https://github.com/jgraving/DeepPoseKit-Data) dataset, images can be downloaded from [zebra_images](https://download.openmmlab.com/mmpose/datasets/zebra_images.tar). +Please download the annotation files from [zebra_annotations](https://download.openmmlab.com/mmpose/datasets/zebra_annotations.tar). +Extract them under {MMPose}/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── zebra + │-- annotations + │ │-- zebra_train.json + │ |-- zebra_test.json + │-- images + │ │-- 0.jpg + │ │-- 1.jpg + │ │-- 2.jpg + │ │-- 3.jpg + │ │-- ... +``` + +Since the official dataset does not provide the test set, we randomly select 90% images for training, and the rest (10%) for evaluation (see [code](/tools/dataset_converters/parse_deepposekit_dataset.py)). + +## ATRW + + + +
+ATRW (ACM MM'2020) + +```bibtex +@inproceedings{li2020atrw, + title={ATRW: A Benchmark for Amur Tiger Re-identification in the Wild}, + author={Li, Shuyuan and Li, Jianguo and Tang, Hanlin and Qian, Rui and Lin, Weiyao}, + booktitle={Proceedings of the 28th ACM International Conference on Multimedia}, + pages={2590--2598}, + year={2020} +} +``` + +
+ +
+ +
+ATRW captures images of the Amur tiger (also known as Siberian tiger, Northeast-China tiger) in the wild. +For [ATRW](https://cvwc2019.github.io/challenge.html) dataset, please download images from +[Pose_train](https://lilablobssc.blob.core.windows.net/cvwc2019/train/atrw_pose_train.tar.gz), +[Pose_val](https://lilablobssc.blob.core.windows.net/cvwc2019/train/atrw_pose_val.tar.gz), and +[Pose_test](https://lilablobssc.blob.core.windows.net/cvwc2019/test/atrw_pose_test.tar.gz). +Note that in the ATRW official annotation files, the key "file_name" is written as "filename". To make it compatible with +other coco-type json files, we have modified this key. +Please download the modified annotation files from [atrw_annotations](https://download.openmmlab.com/mmpose/datasets/atrw_annotations.tar). +Extract them under {MMPose}/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── atrw + │-- annotations + │ │-- keypoint_train.json + │ │-- keypoint_val.json + │ │-- keypoint_trainval.json + │-- images + │ │-- train + │ │ │-- 000002.jpg + │ │ │-- 000003.jpg + │ │ │-- ... + │ │-- val + │ │ │-- 000001.jpg + │ │ │-- 000013.jpg + │ │ │-- ... + │ │-- test + │ │ │-- 000000.jpg + │ │ │-- 000004.jpg + │ │ │-- ... +``` + +## Animal Kingdom + +
+Animal Kingdom (CVPR'2022) +
+
+ +
+ +```bibtex +@inproceedings{Ng_2022_CVPR, + author = {Ng, Xun Long and Ong, Kian Eng and Zheng, Qichen and Ni, Yun and Yeo, Si Yong and Liu, Jun}, + title = {Animal Kingdom: A Large and Diverse Dataset for Animal Behavior Understanding}, + booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, + month = {June}, + year = {2022}, + pages = {19023-19034} + } +``` + +For [Animal Kingdom](https://github.com/sutdcv/Animal-Kingdom) dataset, images can be downloaded from [here](https://forms.office.com/pages/responsepage.aspx?id=drd2NJDpck-5UGJImDFiPVRYpnTEMixKqPJ1FxwK6VZUQkNTSkRISTNORUI2TDBWMUpZTlQ5WUlaSyQlQCN0PWcu). +Please Extract dataset under {MMPose}/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── ak + |--annotations + │ │-- ak_P1 + │ │ │-- train.json + │ │ │-- test.json + │ │-- ak_P2 + │ │ │-- train.json + │ │ │-- test.json + │ │-- ak_P3_amphibian + │ │ │-- train.json + │ │ │-- test.json + │ │-- ak_P3_bird + │ │ │-- train.json + │ │ │-- test.json + │ │-- ak_P3_fish + │ │ │-- train.json + │ │ │-- test.json + │ │-- ak_P3_mammal + │ │ │-- train.json + │ │ │-- test.json + │ │-- ak_P3_reptile + │ │-- train.json + │ │-- test.json + │-- images + │ │-- AAACXZTV + │ │ │--AAACXZTV_f000059.jpg + │ │ │--... + │ │-- AAAUILHH + │ │ │--AAAUILHH_f000098.jpg + │ │ │--... + │ │-- ... +``` diff --git a/docs/en/dataset_zoo/2d_body_keypoint.md b/docs/en/dataset_zoo/2d_body_keypoint.md index 4448ebe8f4..3c68b1affc 100644 --- a/docs/en/dataset_zoo/2d_body_keypoint.md +++ b/docs/en/dataset_zoo/2d_body_keypoint.md @@ -1,588 +1,588 @@ -# 2D Body Keypoint Datasets - -It is recommended to symlink the dataset root to `$MMPOSE/data`. -If your folder structure is different, you may need to change the corresponding paths in config files. - -MMPose supported datasets: - -- Images - - [COCO](#coco) \[ [Homepage](http://cocodataset.org/) \] - - [MPII](#mpii) \[ [Homepage](http://human-pose.mpi-inf.mpg.de/) \] - - [MPII-TRB](#mpii-trb) \[ [Homepage](https://github.com/kennymckormick/Triplet-Representation-of-human-Body) \] - - [AI Challenger](#aic) \[ [Homepage](https://github.com/AIChallenger/AI_Challenger_2017) \] - - [CrowdPose](#crowdpose) \[ [Homepage](https://github.com/Jeff-sjtu/CrowdPose) \] - - [OCHuman](#ochuman) \[ [Homepage](https://github.com/liruilong940607/OCHumanApi) \] - - [MHP](#mhp) \[ [Homepage](https://lv-mhp.github.io/dataset) \] - - [Human-Art](#humanart) \[ [Homepage](https://idea-research.github.io/HumanArt/) \] -- Videos - - [PoseTrack18](#posetrack18) \[ [Homepage](https://posetrack.net/users/download.php) \] - - [sub-JHMDB](#sub-jhmdb-dataset) \[ [Homepage](http://jhmdb.is.tue.mpg.de/dataset) \] - -## COCO - - - -
-COCO (ECCV'2014) - -```bibtex -@inproceedings{lin2014microsoft, - title={Microsoft coco: Common objects in context}, - author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, - booktitle={European conference on computer vision}, - pages={740--755}, - year={2014}, - organization={Springer} -} -``` - -
- -
- -
- -For [COCO](http://cocodataset.org/) data, please download from [COCO download](http://cocodataset.org/#download), 2017 Train/Val is needed for COCO keypoints training and validation. -[HRNet-Human-Pose-Estimation](https://github.com/HRNet/HRNet-Human-Pose-Estimation) provides person detection result of COCO val2017 to reproduce our multi-person pose estimation results. -Please download from [OneDrive](https://1drv.ms/f/s!AhIXJn_J-blWzzDXoz5BeFl8sWM-) or [GoogleDrive](https://drive.google.com/drive/folders/1fRUDNUDxe9fjqcRZ2bnF_TKMlO0nB_dk?usp=sharing). -Optionally, to evaluate on COCO'2017 test-dev, please download the [image-info](https://download.openmmlab.com/mmpose/datasets/person_keypoints_test-dev-2017.json). -Download and extract them under $MMPOSE/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── coco - │-- annotations - │ │-- person_keypoints_train2017.json - │ |-- person_keypoints_val2017.json - │ |-- person_keypoints_test-dev-2017.json - |-- person_detection_results - | |-- COCO_val2017_detections_AP_H_56_person.json - | |-- COCO_test-dev2017_detections_AP_H_609_person.json - │-- train2017 - │ │-- 000000000009.jpg - │ │-- 000000000025.jpg - │ │-- 000000000030.jpg - │ │-- ... - `-- val2017 - │-- 000000000139.jpg - │-- 000000000285.jpg - │-- 000000000632.jpg - │-- ... - -``` - -## MPII - - - -
-MPII (CVPR'2014) - -```bibtex -@inproceedings{andriluka14cvpr, - author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, - title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, - booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, - year = {2014}, - month = {June} -} -``` - -
- -
- -
- -For [MPII](http://human-pose.mpi-inf.mpg.de/) data, please download from [MPII Human Pose Dataset](http://human-pose.mpi-inf.mpg.de/). -We have converted the original annotation files into json format, please download them from [mpii_annotations](https://download.openmmlab.com/mmpose/datasets/mpii_annotations.tar). -Extract them under {MMPose}/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── mpii - |── annotations - | |── mpii_gt_val.mat - | |── mpii_test.json - | |── mpii_train.json - | |── mpii_trainval.json - | `── mpii_val.json - `── images - |── 000001163.jpg - |── 000003072.jpg - -``` - -During training and inference, the prediction result will be saved as '.mat' format by default. We also provide a tool to convert this '.mat' to more readable '.json' format. - -```shell -python tools/dataset/mat2json ${PRED_MAT_FILE} ${GT_JSON_FILE} ${OUTPUT_PRED_JSON_FILE} -``` - -For example, - -```shell -python tools/dataset/mat2json work_dirs/res50_mpii_256x256/pred.mat data/mpii/annotations/mpii_val.json pred.json -``` - -## MPII-TRB - - - -
-MPII-TRB (ICCV'2019) - -```bibtex -@inproceedings{duan2019trb, - title={TRB: A Novel Triplet Representation for Understanding 2D Human Body}, - author={Duan, Haodong and Lin, Kwan-Yee and Jin, Sheng and Liu, Wentao and Qian, Chen and Ouyang, Wanli}, - booktitle={Proceedings of the IEEE International Conference on Computer Vision}, - pages={9479--9488}, - year={2019} -} -``` - -
- -
- -
- -For [MPII-TRB](https://github.com/kennymckormick/Triplet-Representation-of-human-Body) data, please download from [MPII Human Pose Dataset](http://human-pose.mpi-inf.mpg.de/). -Please download the annotation files from [mpii_trb_annotations](https://download.openmmlab.com/mmpose/datasets/mpii_trb_annotations.tar). -Extract them under {MMPose}/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── mpii - |── annotations - | |── mpii_trb_train.json - | |── mpii_trb_val.json - `── images - |── 000001163.jpg - |── 000003072.jpg - -``` - -## AIC - - - -
-AI Challenger (ArXiv'2017) - -```bibtex -@article{wu2017ai, - title={Ai challenger: A large-scale dataset for going deeper in image understanding}, - author={Wu, Jiahong and Zheng, He and Zhao, Bo and Li, Yixin and Yan, Baoming and Liang, Rui and Wang, Wenjia and Zhou, Shipei and Lin, Guosen and Fu, Yanwei and others}, - journal={arXiv preprint arXiv:1711.06475}, - year={2017} -} -``` - -
- -
- -
- -For [AIC](https://github.com/AIChallenger/AI_Challenger_2017) data, please download from [AI Challenger 2017](https://github.com/AIChallenger/AI_Challenger_2017), 2017 Train/Val is needed for keypoints training and validation. -Please download the annotation files from [aic_annotations](https://download.openmmlab.com/mmpose/datasets/aic_annotations.tar). -Download and extract them under $MMPOSE/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── aic - │-- annotations - │ │-- aic_train.json - │ |-- aic_val.json - │-- ai_challenger_keypoint_train_20170902 - │ │-- keypoint_train_images_20170902 - │ │ │-- 0000252aea98840a550dac9a78c476ecb9f47ffa.jpg - │ │ │-- 000050f770985ac9653198495ef9b5c82435d49c.jpg - │ │ │-- ... - `-- ai_challenger_keypoint_validation_20170911 - │-- keypoint_validation_images_20170911 - │-- 0002605c53fb92109a3f2de4fc3ce06425c3b61f.jpg - │-- 0003b55a2c991223e6d8b4b820045bd49507bf6d.jpg - │-- ... -``` - -## CrowdPose - - - -
-CrowdPose (CVPR'2019) - -```bibtex -@article{li2018crowdpose, - title={CrowdPose: Efficient Crowded Scenes Pose Estimation and A New Benchmark}, - author={Li, Jiefeng and Wang, Can and Zhu, Hao and Mao, Yihuan and Fang, Hao-Shu and Lu, Cewu}, - journal={arXiv preprint arXiv:1812.00324}, - year={2018} -} -``` - -
- -
- -
- -For [CrowdPose](https://github.com/Jeff-sjtu/CrowdPose) data, please download from [CrowdPose](https://github.com/Jeff-sjtu/CrowdPose). -Please download the annotation files and human detection results from [crowdpose_annotations](https://download.openmmlab.com/mmpose/datasets/crowdpose_annotations.tar). -For top-down approaches, we follow [CrowdPose](https://arxiv.org/abs/1812.00324) to use the [pre-trained weights](https://pjreddie.com/media/files/yolov3.weights) of [YOLOv3](https://github.com/eriklindernoren/PyTorch-YOLOv3) to generate the detected human bounding boxes. -For model training, we follow [HigherHRNet](https://github.com/HRNet/HigherHRNet-Human-Pose-Estimation) to train models on CrowdPose train/val dataset, and evaluate models on CrowdPose test dataset. -Download and extract them under $MMPOSE/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── crowdpose - │-- annotations - │ │-- mmpose_crowdpose_train.json - │ │-- mmpose_crowdpose_val.json - │ │-- mmpose_crowdpose_trainval.json - │ │-- mmpose_crowdpose_test.json - │ │-- det_for_crowd_test_0.1_0.5.json - │-- images - │-- 100000.jpg - │-- 100001.jpg - │-- 100002.jpg - │-- ... -``` - -## OCHuman - - - -
-OCHuman (CVPR'2019) - -```bibtex -@inproceedings{zhang2019pose2seg, - title={Pose2seg: Detection free human instance segmentation}, - author={Zhang, Song-Hai and Li, Ruilong and Dong, Xin and Rosin, Paul and Cai, Zixi and Han, Xi and Yang, Dingcheng and Huang, Haozhi and Hu, Shi-Min}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={889--898}, - year={2019} -} -``` - -
- -
- -
- -For [OCHuman](https://github.com/liruilong940607/OCHumanApi) data, please download the images and annotations from [OCHuman](https://github.com/liruilong940607/OCHumanApi), -Move them under $MMPOSE/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── ochuman - │-- annotations - │ │-- ochuman_coco_format_val_range_0.00_1.00.json - │ |-- ochuman_coco_format_test_range_0.00_1.00.json - |-- images - │-- 000001.jpg - │-- 000002.jpg - │-- 000003.jpg - │-- ... - -``` - -## MHP - - - -
-MHP (ACM MM'2018) - -```bibtex -@inproceedings{zhao2018understanding, - title={Understanding humans in crowded scenes: Deep nested adversarial learning and a new benchmark for multi-human parsing}, - author={Zhao, Jian and Li, Jianshu and Cheng, Yu and Sim, Terence and Yan, Shuicheng and Feng, Jiashi}, - booktitle={Proceedings of the 26th ACM international conference on Multimedia}, - pages={792--800}, - year={2018} -} -``` - -
- -
- -
- -For [MHP](https://lv-mhp.github.io/dataset) data, please download from [MHP](https://lv-mhp.github.io/dataset). -Please download the annotation files from [mhp_annotations](https://download.openmmlab.com/mmpose/datasets/mhp_annotations.tar.gz). -Please download and extract them under $MMPOSE/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── mhp - │-- annotations - │ │-- mhp_train.json - │ │-- mhp_val.json - │ - `-- train - │ │-- images - │ │ │-- 1004.jpg - │ │ │-- 10050.jpg - │ │ │-- ... - │ - `-- val - │ │-- images - │ │ │-- 10059.jpg - │ │ │-- 10068.jpg - │ │ │-- ... - │ - `-- test - │ │-- images - │ │ │-- 1005.jpg - │ │ │-- 10052.jpg - │ │ │-- ...~~~~ -``` - -## Human-Art dataset - - - -
-Human-Art (CVPR'2023) - -```bibtex -@inproceedings{ju2023humanart, - title={Human-Art: A Versatile Human-Centric Dataset Bridging Natural and Artificial Scenes}, - author={Ju, Xuan and Zeng, Ailing and Jianan, Wang and Qiang, Xu and Lei, Zhang}, - booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), - year={2023}} -``` - -
- -
- -
- -For [Human-Art](https://idea-research.github.io/HumanArt/) data, please download the images and annotation files from [its website](https://idea-research.github.io/HumanArt/). You need to fill in the [data form](https://docs.google.com/forms/d/e/1FAIpQLScroT_jvw6B9U2Qca1_cl5Kmmu1ceKtlh6DJNmWLte8xNEhEw/viewform) to get access to the data. -Move them under $MMPOSE/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -|── data - │── HumanArt - │-- images - │ │-- 2D_virtual_human - │ │ |-- cartoon - │ │ | |-- 000000000000.jpg - │ │ | |-- ... - │ │ |-- digital_art - │ │ |-- ... - │ |-- 3D_virtual_human - │ |-- real_human - |-- annotations - │ │-- validation_humanart.json - │ │-- training_humanart_coco.json - |-- person_detection_results - │ │-- HumanArt_validation_detections_AP_H_56_person.json -``` - -You can choose whether to download other annotation files in Human-Art. If you want to use additional annotation files (e.g. validation set of cartoon), you need to edit the corresponding code in config file. - -## PoseTrack18 - - - -
-PoseTrack18 (CVPR'2018) - -```bibtex -@inproceedings{andriluka2018posetrack, - title={Posetrack: A benchmark for human pose estimation and tracking}, - author={Andriluka, Mykhaylo and Iqbal, Umar and Insafutdinov, Eldar and Pishchulin, Leonid and Milan, Anton and Gall, Juergen and Schiele, Bernt}, - booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, - pages={5167--5176}, - year={2018} -} -``` - -
- -
- -
- -For [PoseTrack18](https://posetrack.net/users/download.php) data, please download from [PoseTrack18](https://posetrack.net/users/download.php). -Please download the annotation files from [posetrack18_annotations](https://download.openmmlab.com/mmpose/datasets/posetrack18_annotations.tar). -We have merged the video-wise separated official annotation files into two json files (posetrack18_train & posetrack18_val.json). We also generate the [mask files](https://download.openmmlab.com/mmpose/datasets/posetrack18_mask.tar) to speed up training. -For top-down approaches, we use [MMDetection](https://github.com/open-mmlab/mmdetection) pre-trained [Cascade R-CNN](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_20e_coco/cascade_rcnn_x101_64x4d_fpn_20e_coco_20200509_224357-051557b1.pth) (X-101-64x4d-FPN) to generate the detected human bounding boxes. -Please download and extract them under $MMPOSE/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── posetrack18 - │-- annotations - │ │-- posetrack18_train.json - │ │-- posetrack18_val.json - │ │-- posetrack18_val_human_detections.json - │ │-- train - │ │ │-- 000001_bonn_train.json - │ │ │-- 000002_bonn_train.json - │ │ │-- ... - │ │-- val - │ │ │-- 000342_mpii_test.json - │ │ │-- 000522_mpii_test.json - │ │ │-- ... - │ `-- test - │ │-- 000001_mpiinew_test.json - │ │-- 000002_mpiinew_test.json - │ │-- ... - │ - `-- images - │ │-- train - │ │ │-- 000001_bonn_train - │ │ │ │-- 000000.jpg - │ │ │ │-- 000001.jpg - │ │ │ │-- ... - │ │ │-- ... - │ │-- val - │ │ │-- 000342_mpii_test - │ │ │ │-- 000000.jpg - │ │ │ │-- 000001.jpg - │ │ │ │-- ... - │ │ │-- ... - │ `-- test - │ │-- 000001_mpiinew_test - │ │ │-- 000000.jpg - │ │ │-- 000001.jpg - │ │ │-- ... - │ │-- ... - `-- mask - │-- train - │ │-- 000002_bonn_train - │ │ │-- 000000.jpg - │ │ │-- 000001.jpg - │ │ │-- ... - │ │-- ... - `-- val - │-- 000522_mpii_test - │ │-- 000000.jpg - │ │-- 000001.jpg - │ │-- ... - │-- ... -``` - -The official evaluation tool for PoseTrack should be installed from GitHub. - -```shell -pip install git+https://github.com/svenkreiss/poseval.git -``` - -## sub-JHMDB dataset - - - -
-RSN (ECCV'2020) - -```bibtex -@misc{cai2020learning, - title={Learning Delicate Local Representations for Multi-Person Pose Estimation}, - author={Yuanhao Cai and Zhicheng Wang and Zhengxiong Luo and Binyi Yin and Angang Du and Haoqian Wang and Xinyu Zhou and Erjin Zhou and Xiangyu Zhang and Jian Sun}, - year={2020}, - eprint={2003.04030}, - archivePrefix={arXiv}, - primaryClass={cs.CV} -} -``` - -
- -
- -
- -For [sub-JHMDB](http://jhmdb.is.tue.mpg.de/dataset) data, please download the [images](<(http://files.is.tue.mpg.de/jhmdb/Rename_Images.tar.gz)>) from [JHMDB](http://jhmdb.is.tue.mpg.de/dataset), -Please download the annotation files from [jhmdb_annotations](https://download.openmmlab.com/mmpose/datasets/jhmdb_annotations.tar). -Move them under $MMPOSE/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── jhmdb - │-- annotations - │ │-- Sub1_train.json - │ |-- Sub1_test.json - │ │-- Sub2_train.json - │ |-- Sub2_test.json - │ │-- Sub3_train.json - │ |-- Sub3_test.json - |-- Rename_Images - │-- brush_hair - │ │--April_09_brush_hair_u_nm_np1_ba_goo_0 - | │ │--00001.png - | │ │--00002.png - │-- catch - │-- ... - -``` +# 2D Body Keypoint Datasets + +It is recommended to symlink the dataset root to `$MMPOSE/data`. +If your folder structure is different, you may need to change the corresponding paths in config files. + +MMPose supported datasets: + +- Images + - [COCO](#coco) \[ [Homepage](http://cocodataset.org/) \] + - [MPII](#mpii) \[ [Homepage](http://human-pose.mpi-inf.mpg.de/) \] + - [MPII-TRB](#mpii-trb) \[ [Homepage](https://github.com/kennymckormick/Triplet-Representation-of-human-Body) \] + - [AI Challenger](#aic) \[ [Homepage](https://github.com/AIChallenger/AI_Challenger_2017) \] + - [CrowdPose](#crowdpose) \[ [Homepage](https://github.com/Jeff-sjtu/CrowdPose) \] + - [OCHuman](#ochuman) \[ [Homepage](https://github.com/liruilong940607/OCHumanApi) \] + - [MHP](#mhp) \[ [Homepage](https://lv-mhp.github.io/dataset) \] + - [Human-Art](#humanart) \[ [Homepage](https://idea-research.github.io/HumanArt/) \] +- Videos + - [PoseTrack18](#posetrack18) \[ [Homepage](https://posetrack.net/users/download.php) \] + - [sub-JHMDB](#sub-jhmdb-dataset) \[ [Homepage](http://jhmdb.is.tue.mpg.de/dataset) \] + +## COCO + + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +
+ +
+ +For [COCO](http://cocodataset.org/) data, please download from [COCO download](http://cocodataset.org/#download), 2017 Train/Val is needed for COCO keypoints training and validation. +[HRNet-Human-Pose-Estimation](https://github.com/HRNet/HRNet-Human-Pose-Estimation) provides person detection result of COCO val2017 to reproduce our multi-person pose estimation results. +Please download from [OneDrive](https://1drv.ms/f/s!AhIXJn_J-blWzzDXoz5BeFl8sWM-) or [GoogleDrive](https://drive.google.com/drive/folders/1fRUDNUDxe9fjqcRZ2bnF_TKMlO0nB_dk?usp=sharing). +Optionally, to evaluate on COCO'2017 test-dev, please download the [image-info](https://download.openmmlab.com/mmpose/datasets/person_keypoints_test-dev-2017.json). +Download and extract them under $MMPOSE/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── coco + │-- annotations + │ │-- person_keypoints_train2017.json + │ |-- person_keypoints_val2017.json + │ |-- person_keypoints_test-dev-2017.json + |-- person_detection_results + | |-- COCO_val2017_detections_AP_H_56_person.json + | |-- COCO_test-dev2017_detections_AP_H_609_person.json + │-- train2017 + │ │-- 000000000009.jpg + │ │-- 000000000025.jpg + │ │-- 000000000030.jpg + │ │-- ... + `-- val2017 + │-- 000000000139.jpg + │-- 000000000285.jpg + │-- 000000000632.jpg + │-- ... + +``` + +## MPII + + + +
+MPII (CVPR'2014) + +```bibtex +@inproceedings{andriluka14cvpr, + author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, + title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, + booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + year = {2014}, + month = {June} +} +``` + +
+ +
+ +
+ +For [MPII](http://human-pose.mpi-inf.mpg.de/) data, please download from [MPII Human Pose Dataset](http://human-pose.mpi-inf.mpg.de/). +We have converted the original annotation files into json format, please download them from [mpii_annotations](https://download.openmmlab.com/mmpose/datasets/mpii_annotations.tar). +Extract them under {MMPose}/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── mpii + |── annotations + | |── mpii_gt_val.mat + | |── mpii_test.json + | |── mpii_train.json + | |── mpii_trainval.json + | `── mpii_val.json + `── images + |── 000001163.jpg + |── 000003072.jpg + +``` + +During training and inference, the prediction result will be saved as '.mat' format by default. We also provide a tool to convert this '.mat' to more readable '.json' format. + +```shell +python tools/dataset/mat2json ${PRED_MAT_FILE} ${GT_JSON_FILE} ${OUTPUT_PRED_JSON_FILE} +``` + +For example, + +```shell +python tools/dataset/mat2json work_dirs/res50_mpii_256x256/pred.mat data/mpii/annotations/mpii_val.json pred.json +``` + +## MPII-TRB + + + +
+MPII-TRB (ICCV'2019) + +```bibtex +@inproceedings{duan2019trb, + title={TRB: A Novel Triplet Representation for Understanding 2D Human Body}, + author={Duan, Haodong and Lin, Kwan-Yee and Jin, Sheng and Liu, Wentao and Qian, Chen and Ouyang, Wanli}, + booktitle={Proceedings of the IEEE International Conference on Computer Vision}, + pages={9479--9488}, + year={2019} +} +``` + +
+ +
+ +
+ +For [MPII-TRB](https://github.com/kennymckormick/Triplet-Representation-of-human-Body) data, please download from [MPII Human Pose Dataset](http://human-pose.mpi-inf.mpg.de/). +Please download the annotation files from [mpii_trb_annotations](https://download.openmmlab.com/mmpose/datasets/mpii_trb_annotations.tar). +Extract them under {MMPose}/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── mpii + |── annotations + | |── mpii_trb_train.json + | |── mpii_trb_val.json + `── images + |── 000001163.jpg + |── 000003072.jpg + +``` + +## AIC + + + +
+AI Challenger (ArXiv'2017) + +```bibtex +@article{wu2017ai, + title={Ai challenger: A large-scale dataset for going deeper in image understanding}, + author={Wu, Jiahong and Zheng, He and Zhao, Bo and Li, Yixin and Yan, Baoming and Liang, Rui and Wang, Wenjia and Zhou, Shipei and Lin, Guosen and Fu, Yanwei and others}, + journal={arXiv preprint arXiv:1711.06475}, + year={2017} +} +``` + +
+ +
+ +
+ +For [AIC](https://github.com/AIChallenger/AI_Challenger_2017) data, please download from [AI Challenger 2017](https://github.com/AIChallenger/AI_Challenger_2017), 2017 Train/Val is needed for keypoints training and validation. +Please download the annotation files from [aic_annotations](https://download.openmmlab.com/mmpose/datasets/aic_annotations.tar). +Download and extract them under $MMPOSE/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── aic + │-- annotations + │ │-- aic_train.json + │ |-- aic_val.json + │-- ai_challenger_keypoint_train_20170902 + │ │-- keypoint_train_images_20170902 + │ │ │-- 0000252aea98840a550dac9a78c476ecb9f47ffa.jpg + │ │ │-- 000050f770985ac9653198495ef9b5c82435d49c.jpg + │ │ │-- ... + `-- ai_challenger_keypoint_validation_20170911 + │-- keypoint_validation_images_20170911 + │-- 0002605c53fb92109a3f2de4fc3ce06425c3b61f.jpg + │-- 0003b55a2c991223e6d8b4b820045bd49507bf6d.jpg + │-- ... +``` + +## CrowdPose + + + +
+CrowdPose (CVPR'2019) + +```bibtex +@article{li2018crowdpose, + title={CrowdPose: Efficient Crowded Scenes Pose Estimation and A New Benchmark}, + author={Li, Jiefeng and Wang, Can and Zhu, Hao and Mao, Yihuan and Fang, Hao-Shu and Lu, Cewu}, + journal={arXiv preprint arXiv:1812.00324}, + year={2018} +} +``` + +
+ +
+ +
+ +For [CrowdPose](https://github.com/Jeff-sjtu/CrowdPose) data, please download from [CrowdPose](https://github.com/Jeff-sjtu/CrowdPose). +Please download the annotation files and human detection results from [crowdpose_annotations](https://download.openmmlab.com/mmpose/datasets/crowdpose_annotations.tar). +For top-down approaches, we follow [CrowdPose](https://arxiv.org/abs/1812.00324) to use the [pre-trained weights](https://pjreddie.com/media/files/yolov3.weights) of [YOLOv3](https://github.com/eriklindernoren/PyTorch-YOLOv3) to generate the detected human bounding boxes. +For model training, we follow [HigherHRNet](https://github.com/HRNet/HigherHRNet-Human-Pose-Estimation) to train models on CrowdPose train/val dataset, and evaluate models on CrowdPose test dataset. +Download and extract them under $MMPOSE/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── crowdpose + │-- annotations + │ │-- mmpose_crowdpose_train.json + │ │-- mmpose_crowdpose_val.json + │ │-- mmpose_crowdpose_trainval.json + │ │-- mmpose_crowdpose_test.json + │ │-- det_for_crowd_test_0.1_0.5.json + │-- images + │-- 100000.jpg + │-- 100001.jpg + │-- 100002.jpg + │-- ... +``` + +## OCHuman + + + +
+OCHuman (CVPR'2019) + +```bibtex +@inproceedings{zhang2019pose2seg, + title={Pose2seg: Detection free human instance segmentation}, + author={Zhang, Song-Hai and Li, Ruilong and Dong, Xin and Rosin, Paul and Cai, Zixi and Han, Xi and Yang, Dingcheng and Huang, Haozhi and Hu, Shi-Min}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={889--898}, + year={2019} +} +``` + +
+ +
+ +
+ +For [OCHuman](https://github.com/liruilong940607/OCHumanApi) data, please download the images and annotations from [OCHuman](https://github.com/liruilong940607/OCHumanApi), +Move them under $MMPOSE/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── ochuman + │-- annotations + │ │-- ochuman_coco_format_val_range_0.00_1.00.json + │ |-- ochuman_coco_format_test_range_0.00_1.00.json + |-- images + │-- 000001.jpg + │-- 000002.jpg + │-- 000003.jpg + │-- ... + +``` + +## MHP + + + +
+MHP (ACM MM'2018) + +```bibtex +@inproceedings{zhao2018understanding, + title={Understanding humans in crowded scenes: Deep nested adversarial learning and a new benchmark for multi-human parsing}, + author={Zhao, Jian and Li, Jianshu and Cheng, Yu and Sim, Terence and Yan, Shuicheng and Feng, Jiashi}, + booktitle={Proceedings of the 26th ACM international conference on Multimedia}, + pages={792--800}, + year={2018} +} +``` + +
+ +
+ +
+ +For [MHP](https://lv-mhp.github.io/dataset) data, please download from [MHP](https://lv-mhp.github.io/dataset). +Please download the annotation files from [mhp_annotations](https://download.openmmlab.com/mmpose/datasets/mhp_annotations.tar.gz). +Please download and extract them under $MMPOSE/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── mhp + │-- annotations + │ │-- mhp_train.json + │ │-- mhp_val.json + │ + `-- train + │ │-- images + │ │ │-- 1004.jpg + │ │ │-- 10050.jpg + │ │ │-- ... + │ + `-- val + │ │-- images + │ │ │-- 10059.jpg + │ │ │-- 10068.jpg + │ │ │-- ... + │ + `-- test + │ │-- images + │ │ │-- 1005.jpg + │ │ │-- 10052.jpg + │ │ │-- ...~~~~ +``` + +## Human-Art dataset + + + +
+Human-Art (CVPR'2023) + +```bibtex +@inproceedings{ju2023humanart, + title={Human-Art: A Versatile Human-Centric Dataset Bridging Natural and Artificial Scenes}, + author={Ju, Xuan and Zeng, Ailing and Jianan, Wang and Qiang, Xu and Lei, Zhang}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), + year={2023}} +``` + +
+ +
+ +
+ +For [Human-Art](https://idea-research.github.io/HumanArt/) data, please download the images and annotation files from [its website](https://idea-research.github.io/HumanArt/). You need to fill in the [data form](https://docs.google.com/forms/d/e/1FAIpQLScroT_jvw6B9U2Qca1_cl5Kmmu1ceKtlh6DJNmWLte8xNEhEw/viewform) to get access to the data. +Move them under $MMPOSE/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +|── data + │── HumanArt + │-- images + │ │-- 2D_virtual_human + │ │ |-- cartoon + │ │ | |-- 000000000000.jpg + │ │ | |-- ... + │ │ |-- digital_art + │ │ |-- ... + │ |-- 3D_virtual_human + │ |-- real_human + |-- annotations + │ │-- validation_humanart.json + │ │-- training_humanart_coco.json + |-- person_detection_results + │ │-- HumanArt_validation_detections_AP_H_56_person.json +``` + +You can choose whether to download other annotation files in Human-Art. If you want to use additional annotation files (e.g. validation set of cartoon), you need to edit the corresponding code in config file. + +## PoseTrack18 + + + +
+PoseTrack18 (CVPR'2018) + +```bibtex +@inproceedings{andriluka2018posetrack, + title={Posetrack: A benchmark for human pose estimation and tracking}, + author={Andriluka, Mykhaylo and Iqbal, Umar and Insafutdinov, Eldar and Pishchulin, Leonid and Milan, Anton and Gall, Juergen and Schiele, Bernt}, + booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, + pages={5167--5176}, + year={2018} +} +``` + +
+ +
+ +
+ +For [PoseTrack18](https://posetrack.net/users/download.php) data, please download from [PoseTrack18](https://posetrack.net/users/download.php). +Please download the annotation files from [posetrack18_annotations](https://download.openmmlab.com/mmpose/datasets/posetrack18_annotations.tar). +We have merged the video-wise separated official annotation files into two json files (posetrack18_train & posetrack18_val.json). We also generate the [mask files](https://download.openmmlab.com/mmpose/datasets/posetrack18_mask.tar) to speed up training. +For top-down approaches, we use [MMDetection](https://github.com/open-mmlab/mmdetection) pre-trained [Cascade R-CNN](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_20e_coco/cascade_rcnn_x101_64x4d_fpn_20e_coco_20200509_224357-051557b1.pth) (X-101-64x4d-FPN) to generate the detected human bounding boxes. +Please download and extract them under $MMPOSE/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── posetrack18 + │-- annotations + │ │-- posetrack18_train.json + │ │-- posetrack18_val.json + │ │-- posetrack18_val_human_detections.json + │ │-- train + │ │ │-- 000001_bonn_train.json + │ │ │-- 000002_bonn_train.json + │ │ │-- ... + │ │-- val + │ │ │-- 000342_mpii_test.json + │ │ │-- 000522_mpii_test.json + │ │ │-- ... + │ `-- test + │ │-- 000001_mpiinew_test.json + │ │-- 000002_mpiinew_test.json + │ │-- ... + │ + `-- images + │ │-- train + │ │ │-- 000001_bonn_train + │ │ │ │-- 000000.jpg + │ │ │ │-- 000001.jpg + │ │ │ │-- ... + │ │ │-- ... + │ │-- val + │ │ │-- 000342_mpii_test + │ │ │ │-- 000000.jpg + │ │ │ │-- 000001.jpg + │ │ │ │-- ... + │ │ │-- ... + │ `-- test + │ │-- 000001_mpiinew_test + │ │ │-- 000000.jpg + │ │ │-- 000001.jpg + │ │ │-- ... + │ │-- ... + `-- mask + │-- train + │ │-- 000002_bonn_train + │ │ │-- 000000.jpg + │ │ │-- 000001.jpg + │ │ │-- ... + │ │-- ... + `-- val + │-- 000522_mpii_test + │ │-- 000000.jpg + │ │-- 000001.jpg + │ │-- ... + │-- ... +``` + +The official evaluation tool for PoseTrack should be installed from GitHub. + +```shell +pip install git+https://github.com/svenkreiss/poseval.git +``` + +## sub-JHMDB dataset + + + +
+RSN (ECCV'2020) + +```bibtex +@misc{cai2020learning, + title={Learning Delicate Local Representations for Multi-Person Pose Estimation}, + author={Yuanhao Cai and Zhicheng Wang and Zhengxiong Luo and Binyi Yin and Angang Du and Haoqian Wang and Xinyu Zhou and Erjin Zhou and Xiangyu Zhang and Jian Sun}, + year={2020}, + eprint={2003.04030}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +
+ +
+ +
+ +For [sub-JHMDB](http://jhmdb.is.tue.mpg.de/dataset) data, please download the [images](<(http://files.is.tue.mpg.de/jhmdb/Rename_Images.tar.gz)>) from [JHMDB](http://jhmdb.is.tue.mpg.de/dataset), +Please download the annotation files from [jhmdb_annotations](https://download.openmmlab.com/mmpose/datasets/jhmdb_annotations.tar). +Move them under $MMPOSE/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── jhmdb + │-- annotations + │ │-- Sub1_train.json + │ |-- Sub1_test.json + │ │-- Sub2_train.json + │ |-- Sub2_test.json + │ │-- Sub3_train.json + │ |-- Sub3_test.json + |-- Rename_Images + │-- brush_hair + │ │--April_09_brush_hair_u_nm_np1_ba_goo_0 + | │ │--00001.png + | │ │--00002.png + │-- catch + │-- ... + +``` diff --git a/docs/en/dataset_zoo/2d_face_keypoint.md b/docs/en/dataset_zoo/2d_face_keypoint.md index 62f66bd82b..13bbb5dec4 100644 --- a/docs/en/dataset_zoo/2d_face_keypoint.md +++ b/docs/en/dataset_zoo/2d_face_keypoint.md @@ -1,384 +1,384 @@ -# 2D Face Keypoint Datasets - -It is recommended to symlink the dataset root to `$MMPOSE/data`. -If your folder structure is different, you may need to change the corresponding paths in config files. - -MMPose supported datasets: - -- [300W](#300w-dataset) \[ [Homepage](https://ibug.doc.ic.ac.uk/resources/300-W/) \] -- [WFLW](#wflw-dataset) \[ [Homepage](https://wywu.github.io/projects/LAB/WFLW.html) \] -- [AFLW](#aflw-dataset) \[ [Homepage](https://www.tugraz.at/institute/icg/research/team-bischof/lrs/downloads/aflw/) \] -- [COFW](#cofw-dataset) \[ [Homepage](http://www.vision.caltech.edu/xpburgos/ICCV13/) \] -- [COCO-WholeBody-Face](#coco-wholebody-face) \[ [Homepage](https://github.com/jin-s13/COCO-WholeBody/) \] -- [LaPa](#lapa-dataset) \[ [Homepage](https://github.com/JDAI-CV/lapa-dataset) \] - -## 300W Dataset - - - -
-300W (IMAVIS'2016) - -```bibtex -@article{sagonas2016300, - title={300 faces in-the-wild challenge: Database and results}, - author={Sagonas, Christos and Antonakos, Epameinondas and Tzimiropoulos, Georgios and Zafeiriou, Stefanos and Pantic, Maja}, - journal={Image and vision computing}, - volume={47}, - pages={3--18}, - year={2016}, - publisher={Elsevier} -} -``` - -
- -
- -
- -For 300W data, please download images from [300W Dataset](https://ibug.doc.ic.ac.uk/resources/300-W/). -Please download the annotation files from [300w_annotations](https://download.openmmlab.com/mmpose/datasets/300w_annotations.tar). -Extract them under {MMPose}/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── 300w - |── annotations - | |── face_landmarks_300w_train.json - | |── face_landmarks_300w_valid.json - | |── face_landmarks_300w_valid_common.json - | |── face_landmarks_300w_valid_challenge.json - | |── face_landmarks_300w_test.json - `── images - |── afw - | |── 1051618982_1.jpg - | |── 111076519_1.jpg - | ... - |── helen - | |── trainset - | | |── 100032540_1.jpg - | | |── 100040721_1.jpg - | | ... - | |── testset - | | |── 296814969_3.jpg - | | |── 2968560214_1.jpg - | | ... - |── ibug - | |── image_003_1.jpg - | |── image_004_1.jpg - | ... - |── lfpw - | |── trainset - | | |── image_0001.png - | | |── image_0002.png - | | ... - | |── testset - | | |── image_0001.png - | | |── image_0002.png - | | ... - `── Test - |── 01_Indoor - | |── indoor_001.png - | |── indoor_002.png - | ... - `── 02_Outdoor - |── outdoor_001.png - |── outdoor_002.png - ... -``` - -## WFLW Dataset - - - -
-WFLW (CVPR'2018) - -```bibtex -@inproceedings{wu2018look, - title={Look at boundary: A boundary-aware face alignment algorithm}, - author={Wu, Wayne and Qian, Chen and Yang, Shuo and Wang, Quan and Cai, Yici and Zhou, Qiang}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={2129--2138}, - year={2018} -} -``` - -
- -
- -
- -For WFLW data, please download images from [WFLW Dataset](https://wywu.github.io/projects/LAB/WFLW.html). -Please download the annotation files from [wflw_annotations](https://download.openmmlab.com/mmpose/datasets/wflw_annotations.tar). -Extract them under {MMPose}/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── wflw - |── annotations - | |── face_landmarks_wflw_train.json - | |── face_landmarks_wflw_test.json - | |── face_landmarks_wflw_test_blur.json - | |── face_landmarks_wflw_test_occlusion.json - | |── face_landmarks_wflw_test_expression.json - | |── face_landmarks_wflw_test_largepose.json - | |── face_landmarks_wflw_test_illumination.json - | |── face_landmarks_wflw_test_makeup.json - | - `── images - |── 0--Parade - | |── 0_Parade_marchingband_1_1015.jpg - | |── 0_Parade_marchingband_1_1031.jpg - | ... - |── 1--Handshaking - | |── 1_Handshaking_Handshaking_1_105.jpg - | |── 1_Handshaking_Handshaking_1_107.jpg - | ... - ... -``` - -## AFLW Dataset - - - -
-AFLW (ICCVW'2011) - -```bibtex -@inproceedings{koestinger2011annotated, - title={Annotated facial landmarks in the wild: A large-scale, real-world database for facial landmark localization}, - author={Koestinger, Martin and Wohlhart, Paul and Roth, Peter M and Bischof, Horst}, - booktitle={2011 IEEE international conference on computer vision workshops (ICCV workshops)}, - pages={2144--2151}, - year={2011}, - organization={IEEE} -} -``` - -
- -For AFLW data, please download images from [AFLW Dataset](https://www.tugraz.at/institute/icg/research/team-bischof/lrs/downloads/aflw/). -Please download the annotation files from [aflw_annotations](https://download.openmmlab.com/mmpose/datasets/aflw_annotations.tar). -Extract them under {MMPose}/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── aflw - |── annotations - | |── face_landmarks_aflw_train.json - | |── face_landmarks_aflw_test_frontal.json - | |── face_landmarks_aflw_test.json - `── images - |── flickr - |── 0 - | |── image00002.jpg - | |── image00013.jpg - | ... - |── 2 - | |── image00004.jpg - | |── image00006.jpg - | ... - `── 3 - |── image00032.jpg - |── image00035.jpg - ... -``` - -## COFW Dataset - - - -
-COFW (ICCV'2013) - -```bibtex -@inproceedings{burgos2013robust, - title={Robust face landmark estimation under occlusion}, - author={Burgos-Artizzu, Xavier P and Perona, Pietro and Doll{\'a}r, Piotr}, - booktitle={Proceedings of the IEEE international conference on computer vision}, - pages={1513--1520}, - year={2013} -} -``` - -
- -
- -
- -For COFW data, please download from [COFW Dataset (Color Images)](http://www.vision.caltech.edu/xpburgos/ICCV13/Data/COFW_color.zip). -Move `COFW_train_color.mat` and `COFW_test_color.mat` to `data/cofw/` and make them look like: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── cofw - |── COFW_train_color.mat - |── COFW_test_color.mat -``` - -Run the following script under `{MMPose}/data` - -`python tools/dataset_converters/parse_cofw_dataset.py` - -And you will get - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── cofw - |── COFW_train_color.mat - |── COFW_test_color.mat - |── annotations - | |── cofw_train.json - | |── cofw_test.json - |── images - |── 000001.jpg - |── 000002.jpg -``` - -## COCO-WholeBody (Face) - - - -
-COCO-WholeBody-Face (ECCV'2020) - -```bibtex -@inproceedings{jin2020whole, - title={Whole-Body Human Pose Estimation in the Wild}, - author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, - booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, - year={2020} -} -``` - -
- -
- -
- -For [COCO-WholeBody](https://github.com/jin-s13/COCO-WholeBody/) dataset, images can be downloaded from [COCO download](http://cocodataset.org/#download), 2017 Train/Val is needed for COCO keypoints training and validation. -Download COCO-WholeBody annotations for COCO-WholeBody annotations for [Train](https://drive.google.com/file/d/1thErEToRbmM9uLNi1JXXfOsaS5VK2FXf/view?usp=sharing) / [Validation](https://drive.google.com/file/d/1N6VgwKnj8DeyGXCvp1eYgNbRmw6jdfrb/view?usp=sharing) (Google Drive). -Download person detection result of COCO val2017 from [OneDrive](https://1drv.ms/f/s!AhIXJn_J-blWzzDXoz5BeFl8sWM-) or [GoogleDrive](https://drive.google.com/drive/folders/1fRUDNUDxe9fjqcRZ2bnF_TKMlO0nB_dk?usp=sharing). -Download and extract them under $MMPOSE/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── coco - │-- annotations - │ │-- coco_wholebody_train_v1.0.json - │ |-- coco_wholebody_val_v1.0.json - |-- person_detection_results - | |-- COCO_val2017_detections_AP_H_56_person.json - │-- train2017 - │ │-- 000000000009.jpg - │ │-- 000000000025.jpg - │ │-- 000000000030.jpg - │ │-- ... - `-- val2017 - │-- 000000000139.jpg - │-- 000000000285.jpg - │-- 000000000632.jpg - │-- ... - -``` - -Please also install the latest version of [Extended COCO API](https://github.com/jin-s13/xtcocoapi) to support COCO-WholeBody evaluation: - -`pip install xtcocotools` - -## LaPa - - - -
-LaPa (AAAI'2020) - -```bibtex -@inproceedings{liu2020new, - title={A New Dataset and Boundary-Attention Semantic Segmentation for Face Parsing.}, - author={Liu, Yinglu and Shi, Hailin and Shen, Hao and Si, Yue and Wang, Xiaobo and Mei, Tao}, - booktitle={AAAI}, - pages={11637--11644}, - year={2020} -} -``` - -
- -
- -
- -For [LaPa](https://github.com/JDAI-CV/lapa-dataset) dataset, images can be downloaded from [their github page](https://github.com/JDAI-CV/lapa-dataset). - -Download and extract them under $MMPOSE/data, and use our `tools/dataset_converters/lapa2coco.py` to make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── LaPa - │-- annotations - │ │-- lapa_train.json - │ |-- lapa_val.json - │ |-- lapa_test.json - | |-- lapa_trainval.json - │-- train - │ │-- images - │ │-- labels - │ │-- landmarks - │-- val - │ │-- images - │ │-- labels - │ │-- landmarks - `-- test - │ │-- images - │ │-- labels - │ │-- landmarks - -``` +# 2D Face Keypoint Datasets + +It is recommended to symlink the dataset root to `$MMPOSE/data`. +If your folder structure is different, you may need to change the corresponding paths in config files. + +MMPose supported datasets: + +- [300W](#300w-dataset) \[ [Homepage](https://ibug.doc.ic.ac.uk/resources/300-W/) \] +- [WFLW](#wflw-dataset) \[ [Homepage](https://wywu.github.io/projects/LAB/WFLW.html) \] +- [AFLW](#aflw-dataset) \[ [Homepage](https://www.tugraz.at/institute/icg/research/team-bischof/lrs/downloads/aflw/) \] +- [COFW](#cofw-dataset) \[ [Homepage](http://www.vision.caltech.edu/xpburgos/ICCV13/) \] +- [COCO-WholeBody-Face](#coco-wholebody-face) \[ [Homepage](https://github.com/jin-s13/COCO-WholeBody/) \] +- [LaPa](#lapa-dataset) \[ [Homepage](https://github.com/JDAI-CV/lapa-dataset) \] + +## 300W Dataset + + + +
+300W (IMAVIS'2016) + +```bibtex +@article{sagonas2016300, + title={300 faces in-the-wild challenge: Database and results}, + author={Sagonas, Christos and Antonakos, Epameinondas and Tzimiropoulos, Georgios and Zafeiriou, Stefanos and Pantic, Maja}, + journal={Image and vision computing}, + volume={47}, + pages={3--18}, + year={2016}, + publisher={Elsevier} +} +``` + +
+ +
+ +
+ +For 300W data, please download images from [300W Dataset](https://ibug.doc.ic.ac.uk/resources/300-W/). +Please download the annotation files from [300w_annotations](https://download.openmmlab.com/mmpose/datasets/300w_annotations.tar). +Extract them under {MMPose}/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── 300w + |── annotations + | |── face_landmarks_300w_train.json + | |── face_landmarks_300w_valid.json + | |── face_landmarks_300w_valid_common.json + | |── face_landmarks_300w_valid_challenge.json + | |── face_landmarks_300w_test.json + `── images + |── afw + | |── 1051618982_1.jpg + | |── 111076519_1.jpg + | ... + |── helen + | |── trainset + | | |── 100032540_1.jpg + | | |── 100040721_1.jpg + | | ... + | |── testset + | | |── 296814969_3.jpg + | | |── 2968560214_1.jpg + | | ... + |── ibug + | |── image_003_1.jpg + | |── image_004_1.jpg + | ... + |── lfpw + | |── trainset + | | |── image_0001.png + | | |── image_0002.png + | | ... + | |── testset + | | |── image_0001.png + | | |── image_0002.png + | | ... + `── Test + |── 01_Indoor + | |── indoor_001.png + | |── indoor_002.png + | ... + `── 02_Outdoor + |── outdoor_001.png + |── outdoor_002.png + ... +``` + +## WFLW Dataset + + + +
+WFLW (CVPR'2018) + +```bibtex +@inproceedings{wu2018look, + title={Look at boundary: A boundary-aware face alignment algorithm}, + author={Wu, Wayne and Qian, Chen and Yang, Shuo and Wang, Quan and Cai, Yici and Zhou, Qiang}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={2129--2138}, + year={2018} +} +``` + +
+ +
+ +
+ +For WFLW data, please download images from [WFLW Dataset](https://wywu.github.io/projects/LAB/WFLW.html). +Please download the annotation files from [wflw_annotations](https://download.openmmlab.com/mmpose/datasets/wflw_annotations.tar). +Extract them under {MMPose}/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── wflw + |── annotations + | |── face_landmarks_wflw_train.json + | |── face_landmarks_wflw_test.json + | |── face_landmarks_wflw_test_blur.json + | |── face_landmarks_wflw_test_occlusion.json + | |── face_landmarks_wflw_test_expression.json + | |── face_landmarks_wflw_test_largepose.json + | |── face_landmarks_wflw_test_illumination.json + | |── face_landmarks_wflw_test_makeup.json + | + `── images + |── 0--Parade + | |── 0_Parade_marchingband_1_1015.jpg + | |── 0_Parade_marchingband_1_1031.jpg + | ... + |── 1--Handshaking + | |── 1_Handshaking_Handshaking_1_105.jpg + | |── 1_Handshaking_Handshaking_1_107.jpg + | ... + ... +``` + +## AFLW Dataset + + + +
+AFLW (ICCVW'2011) + +```bibtex +@inproceedings{koestinger2011annotated, + title={Annotated facial landmarks in the wild: A large-scale, real-world database for facial landmark localization}, + author={Koestinger, Martin and Wohlhart, Paul and Roth, Peter M and Bischof, Horst}, + booktitle={2011 IEEE international conference on computer vision workshops (ICCV workshops)}, + pages={2144--2151}, + year={2011}, + organization={IEEE} +} +``` + +
+ +For AFLW data, please download images from [AFLW Dataset](https://www.tugraz.at/institute/icg/research/team-bischof/lrs/downloads/aflw/). +Please download the annotation files from [aflw_annotations](https://download.openmmlab.com/mmpose/datasets/aflw_annotations.tar). +Extract them under {MMPose}/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── aflw + |── annotations + | |── face_landmarks_aflw_train.json + | |── face_landmarks_aflw_test_frontal.json + | |── face_landmarks_aflw_test.json + `── images + |── flickr + |── 0 + | |── image00002.jpg + | |── image00013.jpg + | ... + |── 2 + | |── image00004.jpg + | |── image00006.jpg + | ... + `── 3 + |── image00032.jpg + |── image00035.jpg + ... +``` + +## COFW Dataset + + + +
+COFW (ICCV'2013) + +```bibtex +@inproceedings{burgos2013robust, + title={Robust face landmark estimation under occlusion}, + author={Burgos-Artizzu, Xavier P and Perona, Pietro and Doll{\'a}r, Piotr}, + booktitle={Proceedings of the IEEE international conference on computer vision}, + pages={1513--1520}, + year={2013} +} +``` + +
+ +
+ +
+ +For COFW data, please download from [COFW Dataset (Color Images)](http://www.vision.caltech.edu/xpburgos/ICCV13/Data/COFW_color.zip). +Move `COFW_train_color.mat` and `COFW_test_color.mat` to `data/cofw/` and make them look like: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── cofw + |── COFW_train_color.mat + |── COFW_test_color.mat +``` + +Run the following script under `{MMPose}/data` + +`python tools/dataset_converters/parse_cofw_dataset.py` + +And you will get + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── cofw + |── COFW_train_color.mat + |── COFW_test_color.mat + |── annotations + | |── cofw_train.json + | |── cofw_test.json + |── images + |── 000001.jpg + |── 000002.jpg +``` + +## COCO-WholeBody (Face) + + + +
+COCO-WholeBody-Face (ECCV'2020) + +```bibtex +@inproceedings{jin2020whole, + title={Whole-Body Human Pose Estimation in the Wild}, + author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2020} +} +``` + +
+ +
+ +
+ +For [COCO-WholeBody](https://github.com/jin-s13/COCO-WholeBody/) dataset, images can be downloaded from [COCO download](http://cocodataset.org/#download), 2017 Train/Val is needed for COCO keypoints training and validation. +Download COCO-WholeBody annotations for COCO-WholeBody annotations for [Train](https://drive.google.com/file/d/1thErEToRbmM9uLNi1JXXfOsaS5VK2FXf/view?usp=sharing) / [Validation](https://drive.google.com/file/d/1N6VgwKnj8DeyGXCvp1eYgNbRmw6jdfrb/view?usp=sharing) (Google Drive). +Download person detection result of COCO val2017 from [OneDrive](https://1drv.ms/f/s!AhIXJn_J-blWzzDXoz5BeFl8sWM-) or [GoogleDrive](https://drive.google.com/drive/folders/1fRUDNUDxe9fjqcRZ2bnF_TKMlO0nB_dk?usp=sharing). +Download and extract them under $MMPOSE/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── coco + │-- annotations + │ │-- coco_wholebody_train_v1.0.json + │ |-- coco_wholebody_val_v1.0.json + |-- person_detection_results + | |-- COCO_val2017_detections_AP_H_56_person.json + │-- train2017 + │ │-- 000000000009.jpg + │ │-- 000000000025.jpg + │ │-- 000000000030.jpg + │ │-- ... + `-- val2017 + │-- 000000000139.jpg + │-- 000000000285.jpg + │-- 000000000632.jpg + │-- ... + +``` + +Please also install the latest version of [Extended COCO API](https://github.com/jin-s13/xtcocoapi) to support COCO-WholeBody evaluation: + +`pip install xtcocotools` + +## LaPa + + + +
+LaPa (AAAI'2020) + +```bibtex +@inproceedings{liu2020new, + title={A New Dataset and Boundary-Attention Semantic Segmentation for Face Parsing.}, + author={Liu, Yinglu and Shi, Hailin and Shen, Hao and Si, Yue and Wang, Xiaobo and Mei, Tao}, + booktitle={AAAI}, + pages={11637--11644}, + year={2020} +} +``` + +
+ +
+ +
+ +For [LaPa](https://github.com/JDAI-CV/lapa-dataset) dataset, images can be downloaded from [their github page](https://github.com/JDAI-CV/lapa-dataset). + +Download and extract them under $MMPOSE/data, and use our `tools/dataset_converters/lapa2coco.py` to make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── LaPa + │-- annotations + │ │-- lapa_train.json + │ |-- lapa_val.json + │ |-- lapa_test.json + | |-- lapa_trainval.json + │-- train + │ │-- images + │ │-- labels + │ │-- landmarks + │-- val + │ │-- images + │ │-- labels + │ │-- landmarks + `-- test + │ │-- images + │ │-- labels + │ │-- landmarks + +``` diff --git a/docs/en/dataset_zoo/2d_fashion_landmark.md b/docs/en/dataset_zoo/2d_fashion_landmark.md index b1146b47b6..c13ee0308d 100644 --- a/docs/en/dataset_zoo/2d_fashion_landmark.md +++ b/docs/en/dataset_zoo/2d_fashion_landmark.md @@ -1,142 +1,142 @@ -# 2D Fashion Landmark Dataset - -It is recommended to symlink the dataset root to `$MMPOSE/data`. -If your folder structure is different, you may need to change the corresponding paths in config files. - -MMPose supported datasets: - -- [DeepFashion](#deepfashion) \[ [Homepage](http://mmlab.ie.cuhk.edu.hk/projects/DeepFashion/LandmarkDetection.html) \] -- [DeepFashion2](#deepfashion2) \[ [Homepage](https://github.com/switchablenorms/DeepFashion2) \] - -## DeepFashion (Fashion Landmark Detection, FLD) - - - -
-DeepFashion (CVPR'2016) - -```bibtex -@inproceedings{liuLQWTcvpr16DeepFashion, - author = {Liu, Ziwei and Luo, Ping and Qiu, Shi and Wang, Xiaogang and Tang, Xiaoou}, - title = {DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations}, - booktitle = {Proceedings of IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, - month = {June}, - year = {2016} -} -``` - -
- - - -
-DeepFashion (ECCV'2016) - -```bibtex -@inproceedings{liuYLWTeccv16FashionLandmark, - author = {Liu, Ziwei and Yan, Sijie and Luo, Ping and Wang, Xiaogang and Tang, Xiaoou}, - title = {Fashion Landmark Detection in the Wild}, - booktitle = {European Conference on Computer Vision (ECCV)}, - month = {October}, - year = {2016} - } -``` - -
- -
- -
- -For [DeepFashion](http://mmlab.ie.cuhk.edu.hk/projects/DeepFashion/LandmarkDetection.html) dataset, images can be downloaded from [download](http://mmlab.ie.cuhk.edu.hk/projects/DeepFashion/LandmarkDetection.html). -Please download the annotation files from [fld_annotations](https://download.openmmlab.com/mmpose/datasets/fld_annotations.tar). -Extract them under {MMPose}/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── fld - │-- annotations - │ │-- fld_upper_train.json - │ |-- fld_upper_val.json - │ |-- fld_upper_test.json - │ │-- fld_lower_train.json - │ |-- fld_lower_val.json - │ |-- fld_lower_test.json - │ │-- fld_full_train.json - │ |-- fld_full_val.json - │ |-- fld_full_test.json - │-- img - │ │-- img_00000001.jpg - │ │-- img_00000002.jpg - │ │-- img_00000003.jpg - │ │-- img_00000004.jpg - │ │-- img_00000005.jpg - │ │-- ... -``` - -## DeepFashion2 - - - -
-DeepFashion2 (CVPR'2019) - -```bibtex -@article{DeepFashion2, - author = {Yuying Ge and Ruimao Zhang and Lingyun Wu and Xiaogang Wang and Xiaoou Tang and Ping Luo}, - title={A Versatile Benchmark for Detection, Pose Estimation, Segmentation and Re-Identification of Clothing Images}, - journal={CVPR}, - year={2019} -} -``` - -
- - - -For [DeepFashion2](https://github.com/switchablenorms/DeepFashion2) dataset, images can be downloaded from [download](https://drive.google.com/drive/folders/125F48fsMBz2EF0Cpqk6aaHet5VH399Ok?usp=sharing). -Please download the [annotation files](https://drive.google.com/file/d/1RM9l9EaB9ULRXhoCS72PkCXtJ4Cn4i6O/view?usp=share_link). These annotation files are converted by [deepfashion2_to_coco.py](https://github.com/switchablenorms/DeepFashion2/blob/master/evaluation/deepfashion2_to_coco.py). -Extract them under {MMPose}/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── deepfashion2 - │── train - │-- deepfashion2_short_sleeved_outwear_train.json - │-- deepfashion2_short_sleeved_dress_train.json - │-- deepfashion2_skirt_train.json - │-- deepfashion2_sling_dress_train.json - │-- ... - │-- image - │ │-- 000001.jpg - │ │-- 000002.jpg - │ │-- 000003.jpg - │ │-- 000004.jpg - │ │-- 000005.jpg - │ │-- ... - │── validation - │-- deepfashion2_short_sleeved_dress_validation.json - │-- deepfashion2_long_sleeved_shirt_validation.json - │-- deepfashion2_trousers_validation.json - │-- deepfashion2_skirt_validation.json - │-- ... - │-- image - │ │-- 000001.jpg - │ │-- 000002.jpg - │ │-- 000003.jpg - │ │-- 000004.jpg - │ │-- 000005.jpg - │ │-- ... -``` +# 2D Fashion Landmark Dataset + +It is recommended to symlink the dataset root to `$MMPOSE/data`. +If your folder structure is different, you may need to change the corresponding paths in config files. + +MMPose supported datasets: + +- [DeepFashion](#deepfashion) \[ [Homepage](http://mmlab.ie.cuhk.edu.hk/projects/DeepFashion/LandmarkDetection.html) \] +- [DeepFashion2](#deepfashion2) \[ [Homepage](https://github.com/switchablenorms/DeepFashion2) \] + +## DeepFashion (Fashion Landmark Detection, FLD) + + + +
+DeepFashion (CVPR'2016) + +```bibtex +@inproceedings{liuLQWTcvpr16DeepFashion, + author = {Liu, Ziwei and Luo, Ping and Qiu, Shi and Wang, Xiaogang and Tang, Xiaoou}, + title = {DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations}, + booktitle = {Proceedings of IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + month = {June}, + year = {2016} +} +``` + +
+ + + +
+DeepFashion (ECCV'2016) + +```bibtex +@inproceedings{liuYLWTeccv16FashionLandmark, + author = {Liu, Ziwei and Yan, Sijie and Luo, Ping and Wang, Xiaogang and Tang, Xiaoou}, + title = {Fashion Landmark Detection in the Wild}, + booktitle = {European Conference on Computer Vision (ECCV)}, + month = {October}, + year = {2016} + } +``` + +
+ +
+ +
+ +For [DeepFashion](http://mmlab.ie.cuhk.edu.hk/projects/DeepFashion/LandmarkDetection.html) dataset, images can be downloaded from [download](http://mmlab.ie.cuhk.edu.hk/projects/DeepFashion/LandmarkDetection.html). +Please download the annotation files from [fld_annotations](https://download.openmmlab.com/mmpose/datasets/fld_annotations.tar). +Extract them under {MMPose}/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── fld + │-- annotations + │ │-- fld_upper_train.json + │ |-- fld_upper_val.json + │ |-- fld_upper_test.json + │ │-- fld_lower_train.json + │ |-- fld_lower_val.json + │ |-- fld_lower_test.json + │ │-- fld_full_train.json + │ |-- fld_full_val.json + │ |-- fld_full_test.json + │-- img + │ │-- img_00000001.jpg + │ │-- img_00000002.jpg + │ │-- img_00000003.jpg + │ │-- img_00000004.jpg + │ │-- img_00000005.jpg + │ │-- ... +``` + +## DeepFashion2 + + + +
+DeepFashion2 (CVPR'2019) + +```bibtex +@article{DeepFashion2, + author = {Yuying Ge and Ruimao Zhang and Lingyun Wu and Xiaogang Wang and Xiaoou Tang and Ping Luo}, + title={A Versatile Benchmark for Detection, Pose Estimation, Segmentation and Re-Identification of Clothing Images}, + journal={CVPR}, + year={2019} +} +``` + +
+ + + +For [DeepFashion2](https://github.com/switchablenorms/DeepFashion2) dataset, images can be downloaded from [download](https://drive.google.com/drive/folders/125F48fsMBz2EF0Cpqk6aaHet5VH399Ok?usp=sharing). +Please download the [annotation files](https://drive.google.com/file/d/1RM9l9EaB9ULRXhoCS72PkCXtJ4Cn4i6O/view?usp=share_link). These annotation files are converted by [deepfashion2_to_coco.py](https://github.com/switchablenorms/DeepFashion2/blob/master/evaluation/deepfashion2_to_coco.py). +Extract them under {MMPose}/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── deepfashion2 + │── train + │-- deepfashion2_short_sleeved_outwear_train.json + │-- deepfashion2_short_sleeved_dress_train.json + │-- deepfashion2_skirt_train.json + │-- deepfashion2_sling_dress_train.json + │-- ... + │-- image + │ │-- 000001.jpg + │ │-- 000002.jpg + │ │-- 000003.jpg + │ │-- 000004.jpg + │ │-- 000005.jpg + │ │-- ... + │── validation + │-- deepfashion2_short_sleeved_dress_validation.json + │-- deepfashion2_long_sleeved_shirt_validation.json + │-- deepfashion2_trousers_validation.json + │-- deepfashion2_skirt_validation.json + │-- ... + │-- image + │ │-- 000001.jpg + │ │-- 000002.jpg + │ │-- 000003.jpg + │ │-- 000004.jpg + │ │-- 000005.jpg + │ │-- ... +``` diff --git a/docs/en/dataset_zoo/2d_hand_keypoint.md b/docs/en/dataset_zoo/2d_hand_keypoint.md index d641bc311d..6c7cd0d43b 100644 --- a/docs/en/dataset_zoo/2d_hand_keypoint.md +++ b/docs/en/dataset_zoo/2d_hand_keypoint.md @@ -1,348 +1,348 @@ -# 2D Hand Keypoint Datasets - -It is recommended to symlink the dataset root to `$MMPOSE/data`. -If your folder structure is different, you may need to change the corresponding paths in config files. - -MMPose supported datasets: - -- [OneHand10K](#onehand10k) \[ [Homepage](https://www.yangangwang.com/papers/WANG-MCC-2018-10.html) \] -- [FreiHand](#freihand-dataset) \[ [Homepage](https://lmb.informatik.uni-freiburg.de/projects/freihand/) \] -- [CMU Panoptic HandDB](#cmu-panoptic-handdb) \[ [Homepage](http://domedb.perception.cs.cmu.edu/handdb.html) \] -- [InterHand2.6M](#interhand26m) \[ [Homepage](https://mks0601.github.io/InterHand2.6M/) \] -- [RHD](#rhd-dataset) \[ [Homepage](https://lmb.informatik.uni-freiburg.de/resources/datasets/RenderedHandposeDataset.en.html) \] -- [COCO-WholeBody-Hand](#coco-wholebody-hand) \[ [Homepage](https://github.com/jin-s13/COCO-WholeBody/) \] - -## OneHand10K - - - -
-OneHand10K (TCSVT'2019) - -```bibtex -@article{wang2018mask, - title={Mask-pose cascaded cnn for 2d hand pose estimation from single color image}, - author={Wang, Yangang and Peng, Cong and Liu, Yebin}, - journal={IEEE Transactions on Circuits and Systems for Video Technology}, - volume={29}, - number={11}, - pages={3258--3268}, - year={2018}, - publisher={IEEE} -} -``` - -
- -
- -
- -For [OneHand10K](https://www.yangangwang.com/papers/WANG-MCC-2018-10.html) data, please download from [OneHand10K Dataset](https://www.yangangwang.com/papers/WANG-MCC-2018-10.html). -Please download the annotation files from [onehand10k_annotations](https://download.openmmlab.com/mmpose/datasets/onehand10k_annotations.tar). -Extract them under {MMPose}/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── onehand10k - |── annotations - | |── onehand10k_train.json - | |── onehand10k_test.json - `── Train - | |── source - | |── 0.jpg - | |── 1.jpg - | ... - `── Test - |── source - |── 0.jpg - |── 1.jpg - -``` - -## FreiHAND Dataset - - - -
-FreiHand (ICCV'2019) - -```bibtex -@inproceedings{zimmermann2019freihand, - title={Freihand: A dataset for markerless capture of hand pose and shape from single rgb images}, - author={Zimmermann, Christian and Ceylan, Duygu and Yang, Jimei and Russell, Bryan and Argus, Max and Brox, Thomas}, - booktitle={Proceedings of the IEEE International Conference on Computer Vision}, - pages={813--822}, - year={2019} -} -``` - -
- -
- -
- -For [FreiHAND](https://lmb.informatik.uni-freiburg.de/projects/freihand/) data, please download from [FreiHand Dataset](https://lmb.informatik.uni-freiburg.de/resources/datasets/FreihandDataset.en.html). -Since the official dataset does not provide validation set, we randomly split the training data into 8:1:1 for train/val/test. -Please download the annotation files from [freihand_annotations](https://download.openmmlab.com/mmpose/datasets/frei_annotations.tar). -Extract them under {MMPose}/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── freihand - |── annotations - | |── freihand_train.json - | |── freihand_val.json - | |── freihand_test.json - `── training - |── rgb - | |── 00000000.jpg - | |── 00000001.jpg - | ... - |── mask - |── 00000000.jpg - |── 00000001.jpg - ... -``` - -## CMU Panoptic HandDB - - - -
-CMU Panoptic HandDB (CVPR'2017) - -```bibtex -@inproceedings{simon2017hand, - title={Hand keypoint detection in single images using multiview bootstrapping}, - author={Simon, Tomas and Joo, Hanbyul and Matthews, Iain and Sheikh, Yaser}, - booktitle={Proceedings of the IEEE conference on Computer Vision and Pattern Recognition}, - pages={1145--1153}, - year={2017} -} -``` - -
- -
- -
- -For [CMU Panoptic HandDB](http://domedb.perception.cs.cmu.edu/handdb.html), please download from [CMU Panoptic HandDB](http://domedb.perception.cs.cmu.edu/handdb.html). -Following [Simon et al](https://arxiv.org/abs/1704.07809), panoptic images (hand143_panopticdb) and MPII & NZSL training sets (manual_train) are used for training, while MPII & NZSL test set (manual_test) for testing. -Please download the annotation files from [panoptic_annotations](https://download.openmmlab.com/mmpose/datasets/panoptic_annotations.tar). -Extract them under {MMPose}/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── panoptic - |── annotations - | |── panoptic_train.json - | |── panoptic_test.json - | - `── hand143_panopticdb - | |── imgs - | | |── 00000000.jpg - | | |── 00000001.jpg - | | ... - | - `── hand_labels - |── manual_train - | |── 000015774_01_l.jpg - | |── 000015774_01_r.jpg - | ... - | - `── manual_test - |── 000648952_02_l.jpg - |── 000835470_01_l.jpg - ... -``` - -## InterHand2.6M - - - -
-InterHand2.6M (ECCV'2020) - -```bibtex -@InProceedings{Moon_2020_ECCV_InterHand2.6M, -author = {Moon, Gyeongsik and Yu, Shoou-I and Wen, He and Shiratori, Takaaki and Lee, Kyoung Mu}, -title = {InterHand2.6M: A Dataset and Baseline for 3D Interacting Hand Pose Estimation from a Single RGB Image}, -booktitle = {European Conference on Computer Vision (ECCV)}, -year = {2020} -} -``` - -
- -
- -
- -For [InterHand2.6M](https://mks0601.github.io/InterHand2.6M/), please download from [InterHand2.6M](https://mks0601.github.io/InterHand2.6M/). -Please download the annotation files from [annotations](https://download.openmmlab.com/mmpose/datasets/interhand2.6m_annotations.zip). -Extract them under {MMPose}/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── interhand2.6m - |── annotations - | |── all - | |── human_annot - | |── machine_annot - | |── skeleton.txt - | |── subject.txt - | - `── images - | |── train - | | |-- Capture0 ~ Capture26 - | |── val - | | |-- Capture0 - | |── test - | | |-- Capture0 ~ Capture7 -``` - -## RHD Dataset - - - -
-RHD (ICCV'2017) - -```bibtex -@TechReport{zb2017hand, - author={Christian Zimmermann and Thomas Brox}, - title={Learning to Estimate 3D Hand Pose from Single RGB Images}, - institution={arXiv:1705.01389}, - year={2017}, - note="https://arxiv.org/abs/1705.01389", - url="https://lmb.informatik.uni-freiburg.de/projects/hand3d/" -} -``` - -
- -
- -
- -For [RHD Dataset](https://lmb.informatik.uni-freiburg.de/resources/datasets/RenderedHandposeDataset.en.html), please download from [RHD Dataset](https://lmb.informatik.uni-freiburg.de/resources/datasets/RenderedHandposeDataset.en.html). -Please download the annotation files from [rhd_annotations](https://download.openmmlab.com/mmpose/datasets/rhd_annotations.zip). -Extract them under {MMPose}/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── rhd - |── annotations - | |── rhd_train.json - | |── rhd_test.json - `── training - | |── color - | | |── 00000.jpg - | | |── 00001.jpg - | |── depth - | | |── 00000.jpg - | | |── 00001.jpg - | |── mask - | | |── 00000.jpg - | | |── 00001.jpg - `── evaluation - | |── color - | | |── 00000.jpg - | | |── 00001.jpg - | |── depth - | | |── 00000.jpg - | | |── 00001.jpg - | |── mask - | | |── 00000.jpg - | | |── 00001.jpg -``` - -## COCO-WholeBody (Hand) - - - -
-COCO-WholeBody-Hand (ECCV'2020) - -```bibtex -@inproceedings{jin2020whole, - title={Whole-Body Human Pose Estimation in the Wild}, - author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, - booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, - year={2020} -} -``` - -
- -
- -
- -For [COCO-WholeBody](https://github.com/jin-s13/COCO-WholeBody/) dataset, images can be downloaded from [COCO download](http://cocodataset.org/#download), 2017 Train/Val is needed for COCO keypoints training and validation. -Download COCO-WholeBody annotations for COCO-WholeBody annotations for [Train](https://drive.google.com/file/d/1thErEToRbmM9uLNi1JXXfOsaS5VK2FXf/view?usp=sharing) / [Validation](https://drive.google.com/file/d/1N6VgwKnj8DeyGXCvp1eYgNbRmw6jdfrb/view?usp=sharing) (Google Drive). -Download person detection result of COCO val2017 from [OneDrive](https://1drv.ms/f/s!AhIXJn_J-blWzzDXoz5BeFl8sWM-) or [GoogleDrive](https://drive.google.com/drive/folders/1fRUDNUDxe9fjqcRZ2bnF_TKMlO0nB_dk?usp=sharing). -Download and extract them under $MMPOSE/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── coco - │-- annotations - │ │-- coco_wholebody_train_v1.0.json - │ |-- coco_wholebody_val_v1.0.json - |-- person_detection_results - | |-- COCO_val2017_detections_AP_H_56_person.json - │-- train2017 - │ │-- 000000000009.jpg - │ │-- 000000000025.jpg - │ │-- 000000000030.jpg - │ │-- ... - `-- val2017 - │-- 000000000139.jpg - │-- 000000000285.jpg - │-- 000000000632.jpg - │-- ... -``` - -Please also install the latest version of [Extended COCO API](https://github.com/jin-s13/xtcocoapi) to support COCO-WholeBody evaluation: - -`pip install xtcocotools` +# 2D Hand Keypoint Datasets + +It is recommended to symlink the dataset root to `$MMPOSE/data`. +If your folder structure is different, you may need to change the corresponding paths in config files. + +MMPose supported datasets: + +- [OneHand10K](#onehand10k) \[ [Homepage](https://www.yangangwang.com/papers/WANG-MCC-2018-10.html) \] +- [FreiHand](#freihand-dataset) \[ [Homepage](https://lmb.informatik.uni-freiburg.de/projects/freihand/) \] +- [CMU Panoptic HandDB](#cmu-panoptic-handdb) \[ [Homepage](http://domedb.perception.cs.cmu.edu/handdb.html) \] +- [InterHand2.6M](#interhand26m) \[ [Homepage](https://mks0601.github.io/InterHand2.6M/) \] +- [RHD](#rhd-dataset) \[ [Homepage](https://lmb.informatik.uni-freiburg.de/resources/datasets/RenderedHandposeDataset.en.html) \] +- [COCO-WholeBody-Hand](#coco-wholebody-hand) \[ [Homepage](https://github.com/jin-s13/COCO-WholeBody/) \] + +## OneHand10K + + + +
+OneHand10K (TCSVT'2019) + +```bibtex +@article{wang2018mask, + title={Mask-pose cascaded cnn for 2d hand pose estimation from single color image}, + author={Wang, Yangang and Peng, Cong and Liu, Yebin}, + journal={IEEE Transactions on Circuits and Systems for Video Technology}, + volume={29}, + number={11}, + pages={3258--3268}, + year={2018}, + publisher={IEEE} +} +``` + +
+ +
+ +
+ +For [OneHand10K](https://www.yangangwang.com/papers/WANG-MCC-2018-10.html) data, please download from [OneHand10K Dataset](https://www.yangangwang.com/papers/WANG-MCC-2018-10.html). +Please download the annotation files from [onehand10k_annotations](https://download.openmmlab.com/mmpose/datasets/onehand10k_annotations.tar). +Extract them under {MMPose}/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── onehand10k + |── annotations + | |── onehand10k_train.json + | |── onehand10k_test.json + `── Train + | |── source + | |── 0.jpg + | |── 1.jpg + | ... + `── Test + |── source + |── 0.jpg + |── 1.jpg + +``` + +## FreiHAND Dataset + + + +
+FreiHand (ICCV'2019) + +```bibtex +@inproceedings{zimmermann2019freihand, + title={Freihand: A dataset for markerless capture of hand pose and shape from single rgb images}, + author={Zimmermann, Christian and Ceylan, Duygu and Yang, Jimei and Russell, Bryan and Argus, Max and Brox, Thomas}, + booktitle={Proceedings of the IEEE International Conference on Computer Vision}, + pages={813--822}, + year={2019} +} +``` + +
+ +
+ +
+ +For [FreiHAND](https://lmb.informatik.uni-freiburg.de/projects/freihand/) data, please download from [FreiHand Dataset](https://lmb.informatik.uni-freiburg.de/resources/datasets/FreihandDataset.en.html). +Since the official dataset does not provide validation set, we randomly split the training data into 8:1:1 for train/val/test. +Please download the annotation files from [freihand_annotations](https://download.openmmlab.com/mmpose/datasets/frei_annotations.tar). +Extract them under {MMPose}/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── freihand + |── annotations + | |── freihand_train.json + | |── freihand_val.json + | |── freihand_test.json + `── training + |── rgb + | |── 00000000.jpg + | |── 00000001.jpg + | ... + |── mask + |── 00000000.jpg + |── 00000001.jpg + ... +``` + +## CMU Panoptic HandDB + + + +
+CMU Panoptic HandDB (CVPR'2017) + +```bibtex +@inproceedings{simon2017hand, + title={Hand keypoint detection in single images using multiview bootstrapping}, + author={Simon, Tomas and Joo, Hanbyul and Matthews, Iain and Sheikh, Yaser}, + booktitle={Proceedings of the IEEE conference on Computer Vision and Pattern Recognition}, + pages={1145--1153}, + year={2017} +} +``` + +
+ +
+ +
+ +For [CMU Panoptic HandDB](http://domedb.perception.cs.cmu.edu/handdb.html), please download from [CMU Panoptic HandDB](http://domedb.perception.cs.cmu.edu/handdb.html). +Following [Simon et al](https://arxiv.org/abs/1704.07809), panoptic images (hand143_panopticdb) and MPII & NZSL training sets (manual_train) are used for training, while MPII & NZSL test set (manual_test) for testing. +Please download the annotation files from [panoptic_annotations](https://download.openmmlab.com/mmpose/datasets/panoptic_annotations.tar). +Extract them under {MMPose}/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── panoptic + |── annotations + | |── panoptic_train.json + | |── panoptic_test.json + | + `── hand143_panopticdb + | |── imgs + | | |── 00000000.jpg + | | |── 00000001.jpg + | | ... + | + `── hand_labels + |── manual_train + | |── 000015774_01_l.jpg + | |── 000015774_01_r.jpg + | ... + | + `── manual_test + |── 000648952_02_l.jpg + |── 000835470_01_l.jpg + ... +``` + +## InterHand2.6M + + + +
+InterHand2.6M (ECCV'2020) + +```bibtex +@InProceedings{Moon_2020_ECCV_InterHand2.6M, +author = {Moon, Gyeongsik and Yu, Shoou-I and Wen, He and Shiratori, Takaaki and Lee, Kyoung Mu}, +title = {InterHand2.6M: A Dataset and Baseline for 3D Interacting Hand Pose Estimation from a Single RGB Image}, +booktitle = {European Conference on Computer Vision (ECCV)}, +year = {2020} +} +``` + +
+ +
+ +
+ +For [InterHand2.6M](https://mks0601.github.io/InterHand2.6M/), please download from [InterHand2.6M](https://mks0601.github.io/InterHand2.6M/). +Please download the annotation files from [annotations](https://download.openmmlab.com/mmpose/datasets/interhand2.6m_annotations.zip). +Extract them under {MMPose}/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── interhand2.6m + |── annotations + | |── all + | |── human_annot + | |── machine_annot + | |── skeleton.txt + | |── subject.txt + | + `── images + | |── train + | | |-- Capture0 ~ Capture26 + | |── val + | | |-- Capture0 + | |── test + | | |-- Capture0 ~ Capture7 +``` + +## RHD Dataset + + + +
+RHD (ICCV'2017) + +```bibtex +@TechReport{zb2017hand, + author={Christian Zimmermann and Thomas Brox}, + title={Learning to Estimate 3D Hand Pose from Single RGB Images}, + institution={arXiv:1705.01389}, + year={2017}, + note="https://arxiv.org/abs/1705.01389", + url="https://lmb.informatik.uni-freiburg.de/projects/hand3d/" +} +``` + +
+ +
+ +
+ +For [RHD Dataset](https://lmb.informatik.uni-freiburg.de/resources/datasets/RenderedHandposeDataset.en.html), please download from [RHD Dataset](https://lmb.informatik.uni-freiburg.de/resources/datasets/RenderedHandposeDataset.en.html). +Please download the annotation files from [rhd_annotations](https://download.openmmlab.com/mmpose/datasets/rhd_annotations.zip). +Extract them under {MMPose}/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── rhd + |── annotations + | |── rhd_train.json + | |── rhd_test.json + `── training + | |── color + | | |── 00000.jpg + | | |── 00001.jpg + | |── depth + | | |── 00000.jpg + | | |── 00001.jpg + | |── mask + | | |── 00000.jpg + | | |── 00001.jpg + `── evaluation + | |── color + | | |── 00000.jpg + | | |── 00001.jpg + | |── depth + | | |── 00000.jpg + | | |── 00001.jpg + | |── mask + | | |── 00000.jpg + | | |── 00001.jpg +``` + +## COCO-WholeBody (Hand) + + + +
+COCO-WholeBody-Hand (ECCV'2020) + +```bibtex +@inproceedings{jin2020whole, + title={Whole-Body Human Pose Estimation in the Wild}, + author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2020} +} +``` + +
+ +
+ +
+ +For [COCO-WholeBody](https://github.com/jin-s13/COCO-WholeBody/) dataset, images can be downloaded from [COCO download](http://cocodataset.org/#download), 2017 Train/Val is needed for COCO keypoints training and validation. +Download COCO-WholeBody annotations for COCO-WholeBody annotations for [Train](https://drive.google.com/file/d/1thErEToRbmM9uLNi1JXXfOsaS5VK2FXf/view?usp=sharing) / [Validation](https://drive.google.com/file/d/1N6VgwKnj8DeyGXCvp1eYgNbRmw6jdfrb/view?usp=sharing) (Google Drive). +Download person detection result of COCO val2017 from [OneDrive](https://1drv.ms/f/s!AhIXJn_J-blWzzDXoz5BeFl8sWM-) or [GoogleDrive](https://drive.google.com/drive/folders/1fRUDNUDxe9fjqcRZ2bnF_TKMlO0nB_dk?usp=sharing). +Download and extract them under $MMPOSE/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── coco + │-- annotations + │ │-- coco_wholebody_train_v1.0.json + │ |-- coco_wholebody_val_v1.0.json + |-- person_detection_results + | |-- COCO_val2017_detections_AP_H_56_person.json + │-- train2017 + │ │-- 000000000009.jpg + │ │-- 000000000025.jpg + │ │-- 000000000030.jpg + │ │-- ... + `-- val2017 + │-- 000000000139.jpg + │-- 000000000285.jpg + │-- 000000000632.jpg + │-- ... +``` + +Please also install the latest version of [Extended COCO API](https://github.com/jin-s13/xtcocoapi) to support COCO-WholeBody evaluation: + +`pip install xtcocotools` diff --git a/docs/en/dataset_zoo/2d_wholebody_keypoint.md b/docs/en/dataset_zoo/2d_wholebody_keypoint.md index a082c657c6..55a76139df 100644 --- a/docs/en/dataset_zoo/2d_wholebody_keypoint.md +++ b/docs/en/dataset_zoo/2d_wholebody_keypoint.md @@ -1,133 +1,133 @@ -# 2D Wholebody Keypoint Datasets - -It is recommended to symlink the dataset root to `$MMPOSE/data`. -If your folder structure is different, you may need to change the corresponding paths in config files. - -MMPose supported datasets: - -- [COCO-WholeBody](#coco-wholebody) \[ [Homepage](https://github.com/jin-s13/COCO-WholeBody/) \] -- [Halpe](#halpe) \[ [Homepage](https://github.com/Fang-Haoshu/Halpe-FullBody/) \] - -## COCO-WholeBody - - - -
-COCO-WholeBody (ECCV'2020) - -```bibtex -@inproceedings{jin2020whole, - title={Whole-Body Human Pose Estimation in the Wild}, - author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, - booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, - year={2020} -} -``` - -
- -
- -
- -For [COCO-WholeBody](https://github.com/jin-s13/COCO-WholeBody/) dataset, images can be downloaded from [COCO download](http://cocodataset.org/#download), 2017 Train/Val is needed for COCO keypoints training and validation. -Download COCO-WholeBody annotations for COCO-WholeBody annotations for [Train](https://drive.google.com/file/d/1thErEToRbmM9uLNi1JXXfOsaS5VK2FXf/view?usp=sharing) / [Validation](https://drive.google.com/file/d/1N6VgwKnj8DeyGXCvp1eYgNbRmw6jdfrb/view?usp=sharing) (Google Drive). -Download person detection result of COCO val2017 from [OneDrive](https://1drv.ms/f/s!AhIXJn_J-blWzzDXoz5BeFl8sWM-) or [GoogleDrive](https://drive.google.com/drive/folders/1fRUDNUDxe9fjqcRZ2bnF_TKMlO0nB_dk?usp=sharing). -Download and extract them under $MMPOSE/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── coco - │-- annotations - │ │-- coco_wholebody_train_v1.0.json - │ |-- coco_wholebody_val_v1.0.json - |-- person_detection_results - | |-- COCO_val2017_detections_AP_H_56_person.json - │-- train2017 - │ │-- 000000000009.jpg - │ │-- 000000000025.jpg - │ │-- 000000000030.jpg - │ │-- ... - `-- val2017 - │-- 000000000139.jpg - │-- 000000000285.jpg - │-- 000000000632.jpg - │-- ... - -``` - -Please also install the latest version of [Extended COCO API](https://github.com/jin-s13/xtcocoapi) (version>=1.5) to support COCO-WholeBody evaluation: - -`pip install xtcocotools` - -## Halpe - - - -
-Halpe (CVPR'2020) - -```bibtex -@inproceedings{li2020pastanet, - title={PaStaNet: Toward Human Activity Knowledge Engine}, - author={Li, Yong-Lu and Xu, Liang and Liu, Xinpeng and Huang, Xijie and Xu, Yue and Wang, Shiyi and Fang, Hao-Shu and Ma, Ze and Chen, Mingyang and Lu, Cewu}, - booktitle={CVPR}, - year={2020} -} -``` - -
- -
- -
- -For [Halpe](https://github.com/Fang-Haoshu/Halpe-FullBody/) dataset, please download images and annotations from [Halpe download](https://github.com/Fang-Haoshu/Halpe-FullBody). -The images of the training set are from [HICO-Det](https://drive.google.com/open?id=1QZcJmGVlF9f4h-XLWe9Gkmnmj2z1gSnk) and those of the validation set are from [COCO](http://images.cocodataset.org/zips/val2017.zip). -Download person detection result of COCO val2017 from [OneDrive](https://1drv.ms/f/s!AhIXJn_J-blWzzDXoz5BeFl8sWM-) or [GoogleDrive](https://drive.google.com/drive/folders/1fRUDNUDxe9fjqcRZ2bnF_TKMlO0nB_dk?usp=sharing). -Download and extract them under $MMPOSE/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── halpe - │-- annotations - │ │-- halpe_train_v1.json - │ |-- halpe_val_v1.json - |-- person_detection_results - | |-- COCO_val2017_detections_AP_H_56_person.json - │-- hico_20160224_det - │ │-- anno_bbox.mat - │ │-- anno.mat - │ │-- README - │ │-- images - │ │ │-- train2015 - │ │ │ │-- HICO_train2015_00000001.jpg - │ │ │ │-- HICO_train2015_00000002.jpg - │ │ │ │-- HICO_train2015_00000003.jpg - │ │ │ │-- ... - │ │ │-- test2015 - │ │-- tools - │ │-- ... - `-- val2017 - │-- 000000000139.jpg - │-- 000000000285.jpg - │-- 000000000632.jpg - │-- ... - -``` - -Please also install the latest version of [Extended COCO API](https://github.com/jin-s13/xtcocoapi) (version>=1.5) to support Halpe evaluation: - -`pip install xtcocotools` +# 2D Wholebody Keypoint Datasets + +It is recommended to symlink the dataset root to `$MMPOSE/data`. +If your folder structure is different, you may need to change the corresponding paths in config files. + +MMPose supported datasets: + +- [COCO-WholeBody](#coco-wholebody) \[ [Homepage](https://github.com/jin-s13/COCO-WholeBody/) \] +- [Halpe](#halpe) \[ [Homepage](https://github.com/Fang-Haoshu/Halpe-FullBody/) \] + +## COCO-WholeBody + + + +
+COCO-WholeBody (ECCV'2020) + +```bibtex +@inproceedings{jin2020whole, + title={Whole-Body Human Pose Estimation in the Wild}, + author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2020} +} +``` + +
+ +
+ +
+ +For [COCO-WholeBody](https://github.com/jin-s13/COCO-WholeBody/) dataset, images can be downloaded from [COCO download](http://cocodataset.org/#download), 2017 Train/Val is needed for COCO keypoints training and validation. +Download COCO-WholeBody annotations for COCO-WholeBody annotations for [Train](https://drive.google.com/file/d/1thErEToRbmM9uLNi1JXXfOsaS5VK2FXf/view?usp=sharing) / [Validation](https://drive.google.com/file/d/1N6VgwKnj8DeyGXCvp1eYgNbRmw6jdfrb/view?usp=sharing) (Google Drive). +Download person detection result of COCO val2017 from [OneDrive](https://1drv.ms/f/s!AhIXJn_J-blWzzDXoz5BeFl8sWM-) or [GoogleDrive](https://drive.google.com/drive/folders/1fRUDNUDxe9fjqcRZ2bnF_TKMlO0nB_dk?usp=sharing). +Download and extract them under $MMPOSE/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── coco + │-- annotations + │ │-- coco_wholebody_train_v1.0.json + │ |-- coco_wholebody_val_v1.0.json + |-- person_detection_results + | |-- COCO_val2017_detections_AP_H_56_person.json + │-- train2017 + │ │-- 000000000009.jpg + │ │-- 000000000025.jpg + │ │-- 000000000030.jpg + │ │-- ... + `-- val2017 + │-- 000000000139.jpg + │-- 000000000285.jpg + │-- 000000000632.jpg + │-- ... + +``` + +Please also install the latest version of [Extended COCO API](https://github.com/jin-s13/xtcocoapi) (version>=1.5) to support COCO-WholeBody evaluation: + +`pip install xtcocotools` + +## Halpe + + + +
+Halpe (CVPR'2020) + +```bibtex +@inproceedings{li2020pastanet, + title={PaStaNet: Toward Human Activity Knowledge Engine}, + author={Li, Yong-Lu and Xu, Liang and Liu, Xinpeng and Huang, Xijie and Xu, Yue and Wang, Shiyi and Fang, Hao-Shu and Ma, Ze and Chen, Mingyang and Lu, Cewu}, + booktitle={CVPR}, + year={2020} +} +``` + +
+ +
+ +
+ +For [Halpe](https://github.com/Fang-Haoshu/Halpe-FullBody/) dataset, please download images and annotations from [Halpe download](https://github.com/Fang-Haoshu/Halpe-FullBody). +The images of the training set are from [HICO-Det](https://drive.google.com/open?id=1QZcJmGVlF9f4h-XLWe9Gkmnmj2z1gSnk) and those of the validation set are from [COCO](http://images.cocodataset.org/zips/val2017.zip). +Download person detection result of COCO val2017 from [OneDrive](https://1drv.ms/f/s!AhIXJn_J-blWzzDXoz5BeFl8sWM-) or [GoogleDrive](https://drive.google.com/drive/folders/1fRUDNUDxe9fjqcRZ2bnF_TKMlO0nB_dk?usp=sharing). +Download and extract them under $MMPOSE/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── halpe + │-- annotations + │ │-- halpe_train_v1.json + │ |-- halpe_val_v1.json + |-- person_detection_results + | |-- COCO_val2017_detections_AP_H_56_person.json + │-- hico_20160224_det + │ │-- anno_bbox.mat + │ │-- anno.mat + │ │-- README + │ │-- images + │ │ │-- train2015 + │ │ │ │-- HICO_train2015_00000001.jpg + │ │ │ │-- HICO_train2015_00000002.jpg + │ │ │ │-- HICO_train2015_00000003.jpg + │ │ │ │-- ... + │ │ │-- test2015 + │ │-- tools + │ │-- ... + `-- val2017 + │-- 000000000139.jpg + │-- 000000000285.jpg + │-- 000000000632.jpg + │-- ... + +``` + +Please also install the latest version of [Extended COCO API](https://github.com/jin-s13/xtcocoapi) (version>=1.5) to support Halpe evaluation: + +`pip install xtcocotools` diff --git a/docs/en/dataset_zoo/3d_body_keypoint.md b/docs/en/dataset_zoo/3d_body_keypoint.md index 82e21010fc..25b1d8415c 100644 --- a/docs/en/dataset_zoo/3d_body_keypoint.md +++ b/docs/en/dataset_zoo/3d_body_keypoint.md @@ -1,199 +1,199 @@ -# 3D Body Keypoint Datasets - -It is recommended to symlink the dataset root to `$MMPOSE/data`. -If your folder structure is different, you may need to change the corresponding paths in config files. - -MMPose supported datasets: - -- [Human3.6M](#human36m) \[ [Homepage](http://vision.imar.ro/human3.6m/description.php) \] -- [CMU Panoptic](#cmu-panoptic) \[ [Homepage](http://domedb.perception.cs.cmu.edu/) \] -- [Campus/Shelf](#campus-and-shelf) \[ [Homepage](http://campar.in.tum.de/Chair/MultiHumanPose) \] - -## Human3.6M - - - -
-Human3.6M (TPAMI'2014) - -```bibtex -@article{h36m_pami, - author = {Ionescu, Catalin and Papava, Dragos and Olaru, Vlad and Sminchisescu, Cristian}, - title = {Human3.6M: Large Scale Datasets and Predictive Methods for 3D Human Sensing in Natural Environments}, - journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, - publisher = {IEEE Computer Society}, - volume = {36}, - number = {7}, - pages = {1325-1339}, - month = {jul}, - year = {2014} -} -``` - -
- -
- -
- -For [Human3.6M](http://vision.imar.ro/human3.6m/description.php), please download from the official website and run the [preprocessing script](/tools/dataset_converters/preprocess_h36m.py), which will extract camera parameters and pose annotations at full framerate (50 FPS) and downsampled framerate (10 FPS). The processed data should have the following structure: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - ├── h36m - ├── annotation_body3d - | ├── cameras.pkl - | ├── fps50 - | | ├── h36m_test.npz - | | ├── h36m_train.npz - | | ├── joint2d_rel_stats.pkl - | | ├── joint2d_stats.pkl - | | ├── joint3d_rel_stats.pkl - | | `── joint3d_stats.pkl - | `── fps10 - | ├── h36m_test.npz - | ├── h36m_train.npz - | ├── joint2d_rel_stats.pkl - | ├── joint2d_stats.pkl - | ├── joint3d_rel_stats.pkl - | `── joint3d_stats.pkl - `── images - ├── S1 - | ├── S1_Directions_1.54138969 - | | ├── S1_Directions_1.54138969_00001.jpg - | | ├── S1_Directions_1.54138969_00002.jpg - | | ├── ... - | ├── ... - ├── S5 - ├── S6 - ├── S7 - ├── S8 - ├── S9 - `── S11 -``` - -## CMU Panoptic - -
-CMU Panoptic (ICCV'2015) - -```bibtex -@Article = {joo_iccv_2015, -author = {Hanbyul Joo, Hao Liu, Lei Tan, Lin Gui, Bart Nabbe, Iain Matthews, Takeo Kanade, Shohei Nobuhara, and Yaser Sheikh}, -title = {Panoptic Studio: A Massively Multiview System for Social Motion Capture}, -booktitle = {ICCV}, -year = {2015} -} -``` - -
- -
- -
- -Please follow [voxelpose-pytorch](https://github.com/microsoft/voxelpose-pytorch) to prepare this dataset. - -1. Download the dataset by following the instructions in [panoptic-toolbox](https://github.com/CMU-Perceptual-Computing-Lab/panoptic-toolbox) and extract them under `$MMPOSE/data/panoptic`. - -2. Only download those sequences that are needed. You can also just download a subset of camera views by specifying the number of views (HD_Video_Number) and changing the camera order in `./scripts/getData.sh`. The used sequences and camera views can be found in [VoxelPose](https://arxiv.org/abs/2004.06239). Note that the sequence "160906_band3" might not be available due to errors on the server of CMU Panoptic. - -3. Note that we only use HD videos, calibration data, and 3D Body Keypoint in the codes. You can comment out other irrelevant codes such as downloading 3D Face data in `./scripts/getData.sh`. - -The directory tree should be like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - ├── panoptic - ├── 16060224_haggling1 - | | ├── hdImgs - | | ├── hdvideos - | | ├── hdPose3d_stage1_coco19 - | | ├── calibration_160224_haggling1.json - ├── 160226_haggling1 - ├── ... -``` - -## Campus and Shelf - -
-Campus and Shelf (CVPR'2014) - -```bibtex -@inproceedings {belagian14multi, - title = {{3D} Pictorial Structures for Multiple Human Pose Estimation}, - author = {Belagiannis, Vasileios and Amin, Sikandar and Andriluka, Mykhaylo and Schiele, Bernt and Navab - Nassir and Ilic, Slobo - booktitle = {IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR)}, - year = {2014}, - month = {June}, - organization={IEEE} -} -``` - -
- -
- -
- -Please follow [voxelpose-pytorch](https://github.com/microsoft/voxelpose-pytorch) to prepare these two datasets. - -1. Please download the datasets from the [official website](http://campar.in.tum.de/Chair/MultiHumanPose) and extract them under `$MMPOSE/data/campus` and `$MMPOSE/data/shelf`, respectively. The original data include images as well as the ground truth pose file `actorsGT.mat`. - -2. We directly use the processed camera parameters from [voxelpose-pytorch](https://github.com/microsoft/voxelpose-pytorch). You can download them from this repository and place in under `$MMPOSE/data/campus/calibration_campus.json` and `$MMPOSE/data/shelf/calibration_shelf.json`, respectively. - -3. Like [Voxelpose](https://github.com/microsoft/voxelpose-pytorch), due to the limited and incomplete annotations of the two datasets, we don't train the model using this dataset. Instead, we directly use the 2D pose estimator trained on COCO, and use independent 3D human poses from the CMU Panoptic dataset to train our 3D model. It lies in `${MMPOSE}/data/panoptic_training_pose.pkl`. - -4. Like [Voxelpose](https://github.com/microsoft/voxelpose-pytorch), for testing, we first estimate 2D poses and generate 2D heatmaps for these two datasets. You can download the predicted poses from [voxelpose-pytorch](https://github.com/microsoft/voxelpose-pytorch) and place them in `$MMPOSE/data/campus/pred_campus_maskrcnn_hrnet_coco.pkl` and `$MMPOSE/data/shelf/pred_shelf_maskrcnn_hrnet_coco.pkl`, respectively. You can also use the models trained on COCO dataset (like HigherHRNet) to generate 2D heatmaps directly. - -The directory tree should be like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - ├── panoptic_training_pose.pkl - ├── campus - | ├── Camera0 - | | | ├── campus4-c0-00000.png - | | | ├── ... - | | | ├── campus4-c0-01999.png - | ... - | ├── Camera2 - | | | ├── campus4-c2-00000.png - | | | ├── ... - | | | ├── campus4-c2-01999.png - | ├── calibration_campus.json - | ├── pred_campus_maskrcnn_hrnet_coco.pkl - | ├── actorsGT.mat - ├── shelf - | ├── Camera0 - | | | ├── img_000000.png - | | | ├── ... - | | | ├── img_003199.png - | ... - | ├── Camera4 - | | | ├── img_000000.png - | | | ├── ... - | | | ├── img_003199.png - | ├── calibration_shelf.json - | ├── pred_shelf_maskrcnn_hrnet_coco.pkl - | ├── actorsGT.mat -``` +# 3D Body Keypoint Datasets + +It is recommended to symlink the dataset root to `$MMPOSE/data`. +If your folder structure is different, you may need to change the corresponding paths in config files. + +MMPose supported datasets: + +- [Human3.6M](#human36m) \[ [Homepage](http://vision.imar.ro/human3.6m/description.php) \] +- [CMU Panoptic](#cmu-panoptic) \[ [Homepage](http://domedb.perception.cs.cmu.edu/) \] +- [Campus/Shelf](#campus-and-shelf) \[ [Homepage](http://campar.in.tum.de/Chair/MultiHumanPose) \] + +## Human3.6M + + + +
+Human3.6M (TPAMI'2014) + +```bibtex +@article{h36m_pami, + author = {Ionescu, Catalin and Papava, Dragos and Olaru, Vlad and Sminchisescu, Cristian}, + title = {Human3.6M: Large Scale Datasets and Predictive Methods for 3D Human Sensing in Natural Environments}, + journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, + publisher = {IEEE Computer Society}, + volume = {36}, + number = {7}, + pages = {1325-1339}, + month = {jul}, + year = {2014} +} +``` + +
+ +
+ +
+ +For [Human3.6M](http://vision.imar.ro/human3.6m/description.php), please download from the official website and run the [preprocessing script](/tools/dataset_converters/preprocess_h36m.py), which will extract camera parameters and pose annotations at full framerate (50 FPS) and downsampled framerate (10 FPS). The processed data should have the following structure: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + ├── h36m + ├── annotation_body3d + | ├── cameras.pkl + | ├── fps50 + | | ├── h36m_test.npz + | | ├── h36m_train.npz + | | ├── joint2d_rel_stats.pkl + | | ├── joint2d_stats.pkl + | | ├── joint3d_rel_stats.pkl + | | `── joint3d_stats.pkl + | `── fps10 + | ├── h36m_test.npz + | ├── h36m_train.npz + | ├── joint2d_rel_stats.pkl + | ├── joint2d_stats.pkl + | ├── joint3d_rel_stats.pkl + | `── joint3d_stats.pkl + `── images + ├── S1 + | ├── S1_Directions_1.54138969 + | | ├── S1_Directions_1.54138969_00001.jpg + | | ├── S1_Directions_1.54138969_00002.jpg + | | ├── ... + | ├── ... + ├── S5 + ├── S6 + ├── S7 + ├── S8 + ├── S9 + `── S11 +``` + +## CMU Panoptic + +
+CMU Panoptic (ICCV'2015) + +```bibtex +@Article = {joo_iccv_2015, +author = {Hanbyul Joo, Hao Liu, Lei Tan, Lin Gui, Bart Nabbe, Iain Matthews, Takeo Kanade, Shohei Nobuhara, and Yaser Sheikh}, +title = {Panoptic Studio: A Massively Multiview System for Social Motion Capture}, +booktitle = {ICCV}, +year = {2015} +} +``` + +
+ +
+ +
+ +Please follow [voxelpose-pytorch](https://github.com/microsoft/voxelpose-pytorch) to prepare this dataset. + +1. Download the dataset by following the instructions in [panoptic-toolbox](https://github.com/CMU-Perceptual-Computing-Lab/panoptic-toolbox) and extract them under `$MMPOSE/data/panoptic`. + +2. Only download those sequences that are needed. You can also just download a subset of camera views by specifying the number of views (HD_Video_Number) and changing the camera order in `./scripts/getData.sh`. The used sequences and camera views can be found in [VoxelPose](https://arxiv.org/abs/2004.06239). Note that the sequence "160906_band3" might not be available due to errors on the server of CMU Panoptic. + +3. Note that we only use HD videos, calibration data, and 3D Body Keypoint in the codes. You can comment out other irrelevant codes such as downloading 3D Face data in `./scripts/getData.sh`. + +The directory tree should be like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + ├── panoptic + ├── 16060224_haggling1 + | | ├── hdImgs + | | ├── hdvideos + | | ├── hdPose3d_stage1_coco19 + | | ├── calibration_160224_haggling1.json + ├── 160226_haggling1 + ├── ... +``` + +## Campus and Shelf + +
+Campus and Shelf (CVPR'2014) + +```bibtex +@inproceedings {belagian14multi, + title = {{3D} Pictorial Structures for Multiple Human Pose Estimation}, + author = {Belagiannis, Vasileios and Amin, Sikandar and Andriluka, Mykhaylo and Schiele, Bernt and Navab + Nassir and Ilic, Slobo + booktitle = {IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR)}, + year = {2014}, + month = {June}, + organization={IEEE} +} +``` + +
+ +
+ +
+ +Please follow [voxelpose-pytorch](https://github.com/microsoft/voxelpose-pytorch) to prepare these two datasets. + +1. Please download the datasets from the [official website](http://campar.in.tum.de/Chair/MultiHumanPose) and extract them under `$MMPOSE/data/campus` and `$MMPOSE/data/shelf`, respectively. The original data include images as well as the ground truth pose file `actorsGT.mat`. + +2. We directly use the processed camera parameters from [voxelpose-pytorch](https://github.com/microsoft/voxelpose-pytorch). You can download them from this repository and place in under `$MMPOSE/data/campus/calibration_campus.json` and `$MMPOSE/data/shelf/calibration_shelf.json`, respectively. + +3. Like [Voxelpose](https://github.com/microsoft/voxelpose-pytorch), due to the limited and incomplete annotations of the two datasets, we don't train the model using this dataset. Instead, we directly use the 2D pose estimator trained on COCO, and use independent 3D human poses from the CMU Panoptic dataset to train our 3D model. It lies in `${MMPOSE}/data/panoptic_training_pose.pkl`. + +4. Like [Voxelpose](https://github.com/microsoft/voxelpose-pytorch), for testing, we first estimate 2D poses and generate 2D heatmaps for these two datasets. You can download the predicted poses from [voxelpose-pytorch](https://github.com/microsoft/voxelpose-pytorch) and place them in `$MMPOSE/data/campus/pred_campus_maskrcnn_hrnet_coco.pkl` and `$MMPOSE/data/shelf/pred_shelf_maskrcnn_hrnet_coco.pkl`, respectively. You can also use the models trained on COCO dataset (like HigherHRNet) to generate 2D heatmaps directly. + +The directory tree should be like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + ├── panoptic_training_pose.pkl + ├── campus + | ├── Camera0 + | | | ├── campus4-c0-00000.png + | | | ├── ... + | | | ├── campus4-c0-01999.png + | ... + | ├── Camera2 + | | | ├── campus4-c2-00000.png + | | | ├── ... + | | | ├── campus4-c2-01999.png + | ├── calibration_campus.json + | ├── pred_campus_maskrcnn_hrnet_coco.pkl + | ├── actorsGT.mat + ├── shelf + | ├── Camera0 + | | | ├── img_000000.png + | | | ├── ... + | | | ├── img_003199.png + | ... + | ├── Camera4 + | | | ├── img_000000.png + | | | ├── ... + | | | ├── img_003199.png + | ├── calibration_shelf.json + | ├── pred_shelf_maskrcnn_hrnet_coco.pkl + | ├── actorsGT.mat +``` diff --git a/docs/en/dataset_zoo/3d_body_mesh.md b/docs/en/dataset_zoo/3d_body_mesh.md index aced63c802..25a08fd676 100644 --- a/docs/en/dataset_zoo/3d_body_mesh.md +++ b/docs/en/dataset_zoo/3d_body_mesh.md @@ -1,342 +1,342 @@ -# 3D Body Mesh Recovery Datasets - -It is recommended to symlink the dataset root to `$MMPOSE/data`. -If your folder structure is different, you may need to change the corresponding paths in config files. - -To achieve high-quality human mesh estimation, we use multiple datasets for training. -The following items should be prepared for human mesh training: - - - -- [3D Body Mesh Recovery Datasets](#3d-body-mesh-recovery-datasets) - - [Notes](#notes) - - [Annotation Files for Human Mesh Estimation](#annotation-files-for-human-mesh-estimation) - - [SMPL Model](#smpl-model) - - [COCO](#coco) - - [Human3.6M](#human36m) - - [MPI-INF-3DHP](#mpi-inf-3dhp) - - [LSP](#lsp) - - [LSPET](#lspet) - - [CMU MoShed Data](#cmu-moshed-data) - - - -## Notes - -### Annotation Files for Human Mesh Estimation - -For human mesh estimation, we use multiple datasets for training. -The annotation of different datasets are preprocessed to the same format. Please -follow the [preprocess procedure](https://github.com/nkolot/SPIN/tree/master/datasets/preprocess) -of SPIN to generate the annotation files or download the processed files from -[here](https://download.openmmlab.com/mmpose/datasets/mesh_annotation_files.zip), -and make it look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── mesh_annotation_files - ├── coco_2014_train.npz - ├── h36m_valid_protocol1.npz - ├── h36m_valid_protocol2.npz - ├── hr-lspet_train.npz - ├── lsp_dataset_original_train.npz - ├── mpi_inf_3dhp_train.npz - └── mpii_train.npz -``` - -### SMPL Model - -```bibtex -@article{loper2015smpl, - title={SMPL: A skinned multi-person linear model}, - author={Loper, Matthew and Mahmood, Naureen and Romero, Javier and Pons-Moll, Gerard and Black, Michael J}, - journal={ACM transactions on graphics (TOG)}, - volume={34}, - number={6}, - pages={1--16}, - year={2015}, - publisher={ACM New York, NY, USA} -} -``` - -For human mesh estimation, SMPL model is used to generate the human mesh. -Please download the [gender neutral SMPL model](http://smplify.is.tue.mpg.de/), -[joints regressor](https://download.openmmlab.com/mmpose/datasets/joints_regressor_cmr.npy) -and [mean parameters](https://download.openmmlab.com/mmpose/datasets/smpl_mean_params.npz) -under `$MMPOSE/models/smpl`, and make it look like this: - -```text -mmpose -├── mmpose -├── ... -├── models - │── smpl - ├── joints_regressor_cmr.npy - ├── smpl_mean_params.npz - └── SMPL_NEUTRAL.pkl -``` - -## COCO - - - -
-COCO (ECCV'2014) - -```bibtex -@inproceedings{lin2014microsoft, - title={Microsoft coco: Common objects in context}, - author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, - booktitle={European conference on computer vision}, - pages={740--755}, - year={2014}, - organization={Springer} -} -``` - -
- -For [COCO](http://cocodataset.org/) data, please download from [COCO download](http://cocodataset.org/#download). COCO'2014 Train is needed for human mesh estimation training. -Download and extract them under $MMPOSE/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── coco - │-- train2014 - │ ├── COCO_train2014_000000000009.jpg - │ ├── COCO_train2014_000000000025.jpg - │ ├── COCO_train2014_000000000030.jpg - | │-- ... - -``` - -## Human3.6M - - - -
-Human3.6M (TPAMI'2014) - -```bibtex -@article{h36m_pami, - author = {Ionescu, Catalin and Papava, Dragos and Olaru, Vlad and Sminchisescu, Cristian}, - title = {Human3.6M: Large Scale Datasets and Predictive Methods for 3D Human Sensing in Natural Environments}, - journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, - publisher = {IEEE Computer Society}, - volume = {36}, - number = {7}, - pages = {1325-1339}, - month = {jul}, - year = {2014} -} -``` - -
- -For [Human3.6M](http://vision.imar.ro/human3.6m/description.php), we use the MoShed data provided in [HMR](https://github.com/akanazawa/hmr) for training. -However, due to license limitations, we are not allowed to redistribute the MoShed data. - -For the evaluation on Human3.6M dataset, please follow the -[preprocess procedure](https://github.com/nkolot/SPIN/tree/master/datasets/preprocess) -of SPIN to extract test images from -[Human3.6M](http://vision.imar.ro/human3.6m/description.php) original videos, -and make it look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── Human3.6M - ├── images -    ├── S11_Directions_1.54138969_000001.jpg -    ├── S11_Directions_1.54138969_000006.jpg -    ├── S11_Directions_1.54138969_000011.jpg -    ├── ... -``` - -The download of Human3.6M dataset is quite difficult, you can also download the -[zip file](https://drive.google.com/file/d/1WnRJD9FS3NUf7MllwgLRJJC-JgYFr8oi/view?usp=sharing) -of the test images. However, due to the license limitations, we are not allowed to -redistribute the images either. So the users need to download the original video and -extract the images by themselves. - -## MPI-INF-3DHP - - - -```bibtex -@inproceedings{mono-3dhp2017, - author = {Mehta, Dushyant and Rhodin, Helge and Casas, Dan and Fua, Pascal and Sotnychenko, Oleksandr and Xu, Weipeng and Theobalt, Christian}, - title = {Monocular 3D Human Pose Estimation In The Wild Using Improved CNN Supervision}, - booktitle = {3D Vision (3DV), 2017 Fifth International Conference on}, - url = {http://gvv.mpi-inf.mpg.de/3dhp_dataset}, - year = {2017}, - organization={IEEE}, - doi={10.1109/3dv.2017.00064}, -} -``` - -For [MPI-INF-3DHP](http://gvv.mpi-inf.mpg.de/3dhp-dataset/), please follow the -[preprocess procedure](https://github.com/nkolot/SPIN/tree/master/datasets/preprocess) -of SPIN to sample images, and make them like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - ├── mpi_inf_3dhp_test_set - │   ├── TS1 - │   ├── TS2 - │   ├── TS3 - │   ├── TS4 - │   ├── TS5 - │   └── TS6 - ├── S1 - │   ├── Seq1 - │   └── Seq2 - ├── S2 - │   ├── Seq1 - │   └── Seq2 - ├── S3 - │   ├── Seq1 - │   └── Seq2 - ├── S4 - │   ├── Seq1 - │   └── Seq2 - ├── S5 - │   ├── Seq1 - │   └── Seq2 - ├── S6 - │   ├── Seq1 - │   └── Seq2 - ├── S7 - │   ├── Seq1 - │   └── Seq2 - └── S8 - ├── Seq1 - └── Seq2 -``` - -## LSP - - - -```bibtex -@inproceedings{johnson2010clustered, - title={Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation.}, - author={Johnson, Sam and Everingham, Mark}, - booktitle={bmvc}, - volume={2}, - number={4}, - pages={5}, - year={2010}, - organization={Citeseer} -} -``` - -For [LSP](https://sam.johnson.io/research/lsp.html), please download the high resolution version -[LSP dataset original](http://sam.johnson.io/research/lsp_dataset_original.zip). -Extract them under `$MMPOSE/data`, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── lsp_dataset_original - ├── images -    ├── im0001.jpg -    ├── im0002.jpg -    └── ... -``` - -## LSPET - - - -```bibtex -@inproceedings{johnson2011learning, - title={Learning effective human pose estimation from inaccurate annotation}, - author={Johnson, Sam and Everingham, Mark}, - booktitle={CVPR 2011}, - pages={1465--1472}, - year={2011}, - organization={IEEE} -} -``` - -For [LSPET](https://sam.johnson.io/research/lspet.html), please download its high resolution form -[HR-LSPET](http://datasets.d2.mpi-inf.mpg.de/hr-lspet/hr-lspet.zip). -Extract them under `$MMPOSE/data`, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── lspet_dataset - ├── images - │   ├── im00001.jpg - │   ├── im00002.jpg - │   ├── im00003.jpg - │   └── ... - └── joints.mat -``` - -## CMU MoShed Data - - - -```bibtex -@inproceedings{kanazawa2018end, - title={End-to-end recovery of human shape and pose}, - author={Kanazawa, Angjoo and Black, Michael J and Jacobs, David W and Malik, Jitendra}, - booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, - pages={7122--7131}, - year={2018} -} -``` - -Real-world SMPL parameters are used for the adversarial training in human mesh estimation. -The MoShed data provided in [HMR](https://github.com/akanazawa/hmr) is included in this -[zip file](https://download.openmmlab.com/mmpose/datasets/mesh_annotation_files.zip). -Please download and extract it under `$MMPOSE/data`, and make it look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── mesh_annotation_files - ├── CMU_mosh.npz - └── ... -``` +# 3D Body Mesh Recovery Datasets + +It is recommended to symlink the dataset root to `$MMPOSE/data`. +If your folder structure is different, you may need to change the corresponding paths in config files. + +To achieve high-quality human mesh estimation, we use multiple datasets for training. +The following items should be prepared for human mesh training: + + + +- [3D Body Mesh Recovery Datasets](#3d-body-mesh-recovery-datasets) + - [Notes](#notes) + - [Annotation Files for Human Mesh Estimation](#annotation-files-for-human-mesh-estimation) + - [SMPL Model](#smpl-model) + - [COCO](#coco) + - [Human3.6M](#human36m) + - [MPI-INF-3DHP](#mpi-inf-3dhp) + - [LSP](#lsp) + - [LSPET](#lspet) + - [CMU MoShed Data](#cmu-moshed-data) + + + +## Notes + +### Annotation Files for Human Mesh Estimation + +For human mesh estimation, we use multiple datasets for training. +The annotation of different datasets are preprocessed to the same format. Please +follow the [preprocess procedure](https://github.com/nkolot/SPIN/tree/master/datasets/preprocess) +of SPIN to generate the annotation files or download the processed files from +[here](https://download.openmmlab.com/mmpose/datasets/mesh_annotation_files.zip), +and make it look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── mesh_annotation_files + ├── coco_2014_train.npz + ├── h36m_valid_protocol1.npz + ├── h36m_valid_protocol2.npz + ├── hr-lspet_train.npz + ├── lsp_dataset_original_train.npz + ├── mpi_inf_3dhp_train.npz + └── mpii_train.npz +``` + +### SMPL Model + +```bibtex +@article{loper2015smpl, + title={SMPL: A skinned multi-person linear model}, + author={Loper, Matthew and Mahmood, Naureen and Romero, Javier and Pons-Moll, Gerard and Black, Michael J}, + journal={ACM transactions on graphics (TOG)}, + volume={34}, + number={6}, + pages={1--16}, + year={2015}, + publisher={ACM New York, NY, USA} +} +``` + +For human mesh estimation, SMPL model is used to generate the human mesh. +Please download the [gender neutral SMPL model](http://smplify.is.tue.mpg.de/), +[joints regressor](https://download.openmmlab.com/mmpose/datasets/joints_regressor_cmr.npy) +and [mean parameters](https://download.openmmlab.com/mmpose/datasets/smpl_mean_params.npz) +under `$MMPOSE/models/smpl`, and make it look like this: + +```text +mmpose +├── mmpose +├── ... +├── models + │── smpl + ├── joints_regressor_cmr.npy + ├── smpl_mean_params.npz + └── SMPL_NEUTRAL.pkl +``` + +## COCO + + + +
+COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
+ +For [COCO](http://cocodataset.org/) data, please download from [COCO download](http://cocodataset.org/#download). COCO'2014 Train is needed for human mesh estimation training. +Download and extract them under $MMPOSE/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── coco + │-- train2014 + │ ├── COCO_train2014_000000000009.jpg + │ ├── COCO_train2014_000000000025.jpg + │ ├── COCO_train2014_000000000030.jpg + | │-- ... + +``` + +## Human3.6M + + + +
+Human3.6M (TPAMI'2014) + +```bibtex +@article{h36m_pami, + author = {Ionescu, Catalin and Papava, Dragos and Olaru, Vlad and Sminchisescu, Cristian}, + title = {Human3.6M: Large Scale Datasets and Predictive Methods for 3D Human Sensing in Natural Environments}, + journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, + publisher = {IEEE Computer Society}, + volume = {36}, + number = {7}, + pages = {1325-1339}, + month = {jul}, + year = {2014} +} +``` + +
+ +For [Human3.6M](http://vision.imar.ro/human3.6m/description.php), we use the MoShed data provided in [HMR](https://github.com/akanazawa/hmr) for training. +However, due to license limitations, we are not allowed to redistribute the MoShed data. + +For the evaluation on Human3.6M dataset, please follow the +[preprocess procedure](https://github.com/nkolot/SPIN/tree/master/datasets/preprocess) +of SPIN to extract test images from +[Human3.6M](http://vision.imar.ro/human3.6m/description.php) original videos, +and make it look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── Human3.6M + ├── images +    ├── S11_Directions_1.54138969_000001.jpg +    ├── S11_Directions_1.54138969_000006.jpg +    ├── S11_Directions_1.54138969_000011.jpg +    ├── ... +``` + +The download of Human3.6M dataset is quite difficult, you can also download the +[zip file](https://drive.google.com/file/d/1WnRJD9FS3NUf7MllwgLRJJC-JgYFr8oi/view?usp=sharing) +of the test images. However, due to the license limitations, we are not allowed to +redistribute the images either. So the users need to download the original video and +extract the images by themselves. + +## MPI-INF-3DHP + + + +```bibtex +@inproceedings{mono-3dhp2017, + author = {Mehta, Dushyant and Rhodin, Helge and Casas, Dan and Fua, Pascal and Sotnychenko, Oleksandr and Xu, Weipeng and Theobalt, Christian}, + title = {Monocular 3D Human Pose Estimation In The Wild Using Improved CNN Supervision}, + booktitle = {3D Vision (3DV), 2017 Fifth International Conference on}, + url = {http://gvv.mpi-inf.mpg.de/3dhp_dataset}, + year = {2017}, + organization={IEEE}, + doi={10.1109/3dv.2017.00064}, +} +``` + +For [MPI-INF-3DHP](http://gvv.mpi-inf.mpg.de/3dhp-dataset/), please follow the +[preprocess procedure](https://github.com/nkolot/SPIN/tree/master/datasets/preprocess) +of SPIN to sample images, and make them like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + ├── mpi_inf_3dhp_test_set + │   ├── TS1 + │   ├── TS2 + │   ├── TS3 + │   ├── TS4 + │   ├── TS5 + │   └── TS6 + ├── S1 + │   ├── Seq1 + │   └── Seq2 + ├── S2 + │   ├── Seq1 + │   └── Seq2 + ├── S3 + │   ├── Seq1 + │   └── Seq2 + ├── S4 + │   ├── Seq1 + │   └── Seq2 + ├── S5 + │   ├── Seq1 + │   └── Seq2 + ├── S6 + │   ├── Seq1 + │   └── Seq2 + ├── S7 + │   ├── Seq1 + │   └── Seq2 + └── S8 + ├── Seq1 + └── Seq2 +``` + +## LSP + + + +```bibtex +@inproceedings{johnson2010clustered, + title={Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation.}, + author={Johnson, Sam and Everingham, Mark}, + booktitle={bmvc}, + volume={2}, + number={4}, + pages={5}, + year={2010}, + organization={Citeseer} +} +``` + +For [LSP](https://sam.johnson.io/research/lsp.html), please download the high resolution version +[LSP dataset original](http://sam.johnson.io/research/lsp_dataset_original.zip). +Extract them under `$MMPOSE/data`, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── lsp_dataset_original + ├── images +    ├── im0001.jpg +    ├── im0002.jpg +    └── ... +``` + +## LSPET + + + +```bibtex +@inproceedings{johnson2011learning, + title={Learning effective human pose estimation from inaccurate annotation}, + author={Johnson, Sam and Everingham, Mark}, + booktitle={CVPR 2011}, + pages={1465--1472}, + year={2011}, + organization={IEEE} +} +``` + +For [LSPET](https://sam.johnson.io/research/lspet.html), please download its high resolution form +[HR-LSPET](http://datasets.d2.mpi-inf.mpg.de/hr-lspet/hr-lspet.zip). +Extract them under `$MMPOSE/data`, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── lspet_dataset + ├── images + │   ├── im00001.jpg + │   ├── im00002.jpg + │   ├── im00003.jpg + │   └── ... + └── joints.mat +``` + +## CMU MoShed Data + + + +```bibtex +@inproceedings{kanazawa2018end, + title={End-to-end recovery of human shape and pose}, + author={Kanazawa, Angjoo and Black, Michael J and Jacobs, David W and Malik, Jitendra}, + booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, + pages={7122--7131}, + year={2018} +} +``` + +Real-world SMPL parameters are used for the adversarial training in human mesh estimation. +The MoShed data provided in [HMR](https://github.com/akanazawa/hmr) is included in this +[zip file](https://download.openmmlab.com/mmpose/datasets/mesh_annotation_files.zip). +Please download and extract it under `$MMPOSE/data`, and make it look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── mesh_annotation_files + ├── CMU_mosh.npz + └── ... +``` diff --git a/docs/en/dataset_zoo/3d_hand_keypoint.md b/docs/en/dataset_zoo/3d_hand_keypoint.md index 823dc6ad64..c49594bf8e 100644 --- a/docs/en/dataset_zoo/3d_hand_keypoint.md +++ b/docs/en/dataset_zoo/3d_hand_keypoint.md @@ -1,59 +1,59 @@ -# 3D Hand Keypoint Datasets - -It is recommended to symlink the dataset root to `$MMPOSE/data`. -If your folder structure is different, you may need to change the corresponding paths in config files. - -MMPose supported datasets: - -- [InterHand2.6M](#interhand26m) \[ [Homepage](https://mks0601.github.io/InterHand2.6M/) \] - -## InterHand2.6M - - - -
-InterHand2.6M (ECCV'2020) - -```bibtex -@InProceedings{Moon_2020_ECCV_InterHand2.6M, -author = {Moon, Gyeongsik and Yu, Shoou-I and Wen, He and Shiratori, Takaaki and Lee, Kyoung Mu}, -title = {InterHand2.6M: A Dataset and Baseline for 3D Interacting Hand Pose Estimation from a Single RGB Image}, -booktitle = {European Conference on Computer Vision (ECCV)}, -year = {2020} -} -``` - -
- -
- -
- -For [InterHand2.6M](https://mks0601.github.io/InterHand2.6M/), please download from [InterHand2.6M](https://mks0601.github.io/InterHand2.6M/). -Please download the annotation files from [annotations](https://download.openmmlab.com/mmpose/datasets/interhand2.6m_annotations.zip). -Extract them under {MMPose}/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── interhand2.6m - |── annotations - | |── all - | |── human_annot - | |── machine_annot - | |── skeleton.txt - | |── subject.txt - | - `── images - | |── train - | | |-- Capture0 ~ Capture26 - | |── val - | | |-- Capture0 - | |── test - | | |-- Capture0 ~ Capture7 -``` +# 3D Hand Keypoint Datasets + +It is recommended to symlink the dataset root to `$MMPOSE/data`. +If your folder structure is different, you may need to change the corresponding paths in config files. + +MMPose supported datasets: + +- [InterHand2.6M](#interhand26m) \[ [Homepage](https://mks0601.github.io/InterHand2.6M/) \] + +## InterHand2.6M + + + +
+InterHand2.6M (ECCV'2020) + +```bibtex +@InProceedings{Moon_2020_ECCV_InterHand2.6M, +author = {Moon, Gyeongsik and Yu, Shoou-I and Wen, He and Shiratori, Takaaki and Lee, Kyoung Mu}, +title = {InterHand2.6M: A Dataset and Baseline for 3D Interacting Hand Pose Estimation from a Single RGB Image}, +booktitle = {European Conference on Computer Vision (ECCV)}, +year = {2020} +} +``` + +
+ +
+ +
+ +For [InterHand2.6M](https://mks0601.github.io/InterHand2.6M/), please download from [InterHand2.6M](https://mks0601.github.io/InterHand2.6M/). +Please download the annotation files from [annotations](https://download.openmmlab.com/mmpose/datasets/interhand2.6m_annotations.zip). +Extract them under {MMPose}/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── interhand2.6m + |── annotations + | |── all + | |── human_annot + | |── machine_annot + | |── skeleton.txt + | |── subject.txt + | + `── images + | |── train + | | |-- Capture0 ~ Capture26 + | |── val + | | |-- Capture0 + | |── test + | | |-- Capture0 ~ Capture7 +``` diff --git a/docs/en/dataset_zoo/dataset_tools.md b/docs/en/dataset_zoo/dataset_tools.md index 44a7c96b2b..dd05d8f2af 100644 --- a/docs/en/dataset_zoo/dataset_tools.md +++ b/docs/en/dataset_zoo/dataset_tools.md @@ -1,398 +1,398 @@ -# Dataset Tools - -## Animal Pose - -
-Animal-Pose (ICCV'2019) - -```bibtex -@InProceedings{Cao_2019_ICCV, - author = {Cao, Jinkun and Tang, Hongyang and Fang, Hao-Shu and Shen, Xiaoyong and Lu, Cewu and Tai, Yu-Wing}, - title = {Cross-Domain Adaptation for Animal Pose Estimation}, - booktitle = {The IEEE International Conference on Computer Vision (ICCV)}, - month = {October}, - year = {2019} -} -``` - -
- -For [Animal-Pose](https://sites.google.com/view/animal-pose/) dataset, the images and annotations can be downloaded from [official website](https://sites.google.com/view/animal-pose/). The script `tools/dataset_converters/parse_animalpose_dataset.py` converts raw annotations into the format compatible with MMPose. The pre-processed [annotation files](https://download.openmmlab.com/mmpose/datasets/animalpose_annotations.tar) are available. If you would like to generate the annotations by yourself, please follow: - -1. Download the raw images and annotations and extract them under `$MMPOSE/data`. Make them look like this: - - ```text - mmpose - ├── mmpose - ├── docs - ├── tests - ├── tools - ├── configs - `── data - │── animalpose - │ - │-- VOC2012 - │ │-- Annotations - │ │-- ImageSets - │ │-- JPEGImages - │ │-- SegmentationClass - │ │-- SegmentationObject - │ - │-- animalpose_image_part2 - │ │-- cat - │ │-- cow - │ │-- dog - │ │-- horse - │ │-- sheep - │ - │-- PASCAL2011_animal_annotation - │ │-- cat - │ │ |-- 2007_000528_1.xml - │ │ |-- 2007_000549_1.xml - │ │ │-- ... - │ │-- cow - │ │-- dog - │ │-- horse - │ │-- sheep - │ - │-- annimalpose_anno2 - │ │-- cat - │ │ |-- ca1.xml - │ │ |-- ca2.xml - │ │ │-- ... - │ │-- cow - │ │-- dog - │ │-- horse - │ │-- sheep - ``` - -2. Run command - - ```bash - python tools/dataset_converters/parse_animalpose_dataset.py - ``` - - The generated annotation files are put in `$MMPOSE/data/animalpose/annotations`. - -The official dataset does not provide the official train/val/test set split. -We choose the images from PascalVOC for train & val. In total, we have 3608 images and 5117 annotations for train+val, where -2798 images with 4000 annotations are used for training, and 810 images with 1117 annotations are used for validation. -Those images from other sources (1000 images with 1000 annotations) are used for testing. - -## COFW - -
-COFW (ICCV'2013) - -```bibtex -@inproceedings{burgos2013robust, - title={Robust face landmark estimation under occlusion}, - author={Burgos-Artizzu, Xavier P and Perona, Pietro and Doll{\'a}r, Piotr}, - booktitle={Proceedings of the IEEE international conference on computer vision}, - pages={1513--1520}, - year={2013} -} -``` - -
- -For COFW data, please download from [COFW Dataset (Color Images)](https://data.caltech.edu/records/20099). -Move `COFW_train_color.mat` and `COFW_test_color.mat` to `$MMPOSE/data/cofw/` and make them look like: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── cofw - |── COFW_train_color.mat - |── COFW_test_color.mat -``` - -Run `pip install h5py` first to install the dependency, then run the following script under `$MMPOSE`: - -```bash -python tools/dataset_converters/parse_cofw_dataset.py -``` - -And you will get - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── cofw - |── COFW_train_color.mat - |── COFW_test_color.mat - |── annotations - | |── cofw_train.json - | |── cofw_test.json - |── images - |── 000001.jpg - |── 000002.jpg -``` - -## DeepposeKit - -
-Desert Locust (Elife'2019) - -```bibtex -@article{graving2019deepposekit, - title={DeepPoseKit, a software toolkit for fast and robust animal pose estimation using deep learning}, - author={Graving, Jacob M and Chae, Daniel and Naik, Hemal and Li, Liang and Koger, Benjamin and Costelloe, Blair R and Couzin, Iain D}, - journal={Elife}, - volume={8}, - pages={e47994}, - year={2019}, - publisher={eLife Sciences Publications Limited} -} -``` - -
- -For [Vinegar Fly](https://github.com/jgraving/DeepPoseKit-Data), [Desert Locust](https://github.com/jgraving/DeepPoseKit-Data), and [Grévy’s Zebra](https://github.com/jgraving/DeepPoseKit-Data) dataset, the annotations files can be downloaded from [DeepPoseKit-Data](https://github.com/jgraving/DeepPoseKit-Data). The script `tools/dataset_converters/parse_deepposekit_dataset.py` converts raw annotations into the format compatible with MMPose. The pre-processed annotation files are available at [vinegar_fly_annotations](https://download.openmmlab.com/mmpose/datasets/vinegar_fly_annotations.tar), [locust_annotations](https://download.openmmlab.com/mmpose/datasets/locust_annotations.tar), and [zebra_annotations](https://download.openmmlab.com/mmpose/datasets/zebra_annotations.tar). If you would like to generate the annotations by yourself, please follows: - -1. Download the raw images and annotations and extract them under `$MMPOSE/data`. Make them look like this: - - ```text - mmpose - ├── mmpose - ├── docs - ├── tests - ├── tools - ├── configs - `── data - | - |── DeepPoseKit-Data - | `── datasets - | |── fly - | | |── annotation_data_release.h5 - | | |── skeleton.csv - | | |── ... - | | - | |── locust - | | |── annotation_data_release.h5 - | | |── skeleton.csv - | | |── ... - | | - | `── zebra - | |── annotation_data_release.h5 - | |── skeleton.csv - | |── ... - | - │── fly - `-- images - │-- 0.jpg - │-- 1.jpg - │-- ... - ``` - - Note that the images can be downloaded from [vinegar_fly_images](https://download.openmmlab.com/mmpose/datasets/vinegar_fly_images.tar), [locust_images](https://download.openmmlab.com/mmpose/datasets/locust_images.tar), and [zebra_images](https://download.openmmlab.com/mmpose/datasets/zebra_images.tar). - -2. Run command - - ```bash - python tools/dataset_converters/parse_deepposekit_dataset.py - ``` - - The generated annotation files are put in `$MMPOSE/data/fly/annotations`, `$MMPOSE/data/locust/annotations`, and `$MMPOSE/data/zebra/annotations`. - -Since the official dataset does not provide the test set, we randomly select 90% images for training, and the rest (10%) for evaluation. - -## Macaque - -
-MacaquePose (bioRxiv'2020) - -```bibtex -@article{labuguen2020macaquepose, - title={MacaquePose: A novel ‘in the wild’macaque monkey pose dataset for markerless motion capture}, - author={Labuguen, Rollyn and Matsumoto, Jumpei and Negrete, Salvador and Nishimaru, Hiroshi and Nishijo, Hisao and Takada, Masahiko and Go, Yasuhiro and Inoue, Ken-ichi and Shibata, Tomohiro}, - journal={bioRxiv}, - year={2020}, - publisher={Cold Spring Harbor Laboratory} -} -``` - -
- -For [MacaquePose](http://www2.ehub.kyoto-u.ac.jp/datasets/macaquepose/index.html) dataset, images and annotations can be downloaded from [download](http://www2.ehub.kyoto-u.ac.jp/datasets/macaquepose/index.html). The script `tools/dataset_converters/parse_macaquepose_dataset.py` converts raw annotations into the format compatible with MMPose. The pre-processed [macaque_annotations](https://download.openmmlab.com/mmpose/datasets/macaque_annotations.tar) are available. If you would like to generate the annotations by yourself, please follows: - -1. Download the raw images and annotations and extract them under `$MMPOSE/data`. Make them look like this: - - ```text - mmpose - ├── mmpose - ├── docs - ├── tests - ├── tools - ├── configs - `── data - │── macaque - │-- annotations.csv - │-- images - │ │-- 01418849d54b3005.jpg - │ │-- 0142d1d1a6904a70.jpg - │ │-- 01ef2c4c260321b7.jpg - │ │-- 020a1c75c8c85238.jpg - │ │-- 020b1506eef2557d.jpg - │ │-- ... - ``` - -2. Run command - - ```bash - python tools/dataset_converters/parse_macaquepose_dataset.py - ``` - - The generated annotation files are put in `$MMPOSE/data/macaque/annotations`. - -Since the official dataset does not provide the test set, we randomly select 12500 images for training, and the rest for evaluation. - -## Human3.6M - -
-Human3.6M (TPAMI'2014) - -```bibtex -@article{h36m_pami, - author = {Ionescu, Catalin and Papava, Dragos and Olaru, Vlad and Sminchisescu, Cristian}, - title = {Human3.6M: Large Scale Datasets and Predictive Methods for 3D Human Sensing in Natural Environments}, - journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, - publisher = {IEEE Computer Society}, - volume = {36}, - number = {7}, - pages = {1325-1339}, - month = {jul}, - year = {2014} -} -``` - -
- -For [Human3.6M](http://vision.imar.ro/human3.6m/description.php), please download from the official website and place the files under `$MMPOSE/data/h36m`. -Then run the [preprocessing script](/tools/dataset_converters/preprocess_h36m.py): - -```bash -python tools/dataset_converters/preprocess_h36m.py --metadata {path to metadata.xml} --original data/h36m -``` - -This will extract camera parameters and pose annotations at full framerate (50 FPS) and downsampled framerate (10 FPS). The processed data should have the following structure: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - ├── h36m - ├── annotation_body3d - | ├── cameras.pkl - | ├── fps50 - | | ├── h36m_test.npz - | | ├── h36m_train.npz - | | ├── joint2d_rel_stats.pkl - | | ├── joint2d_stats.pkl - | | ├── joint3d_rel_stats.pkl - | | `── joint3d_stats.pkl - | `── fps10 - | ├── h36m_test.npz - | ├── h36m_train.npz - | ├── joint2d_rel_stats.pkl - | ├── joint2d_stats.pkl - | ├── joint3d_rel_stats.pkl - | `── joint3d_stats.pkl - `── images - ├── S1 - | ├── S1_Directions_1.54138969 - | | ├── S1_Directions_1.54138969_00001.jpg - | | ├── S1_Directions_1.54138969_00002.jpg - | | ├── ... - | ├── ... - ├── S5 - ├── S6 - ├── S7 - ├── S8 - ├── S9 - `── S11 -``` - -After that, the annotations need to be transformed into COCO format which is compatible with MMPose. Please run: - -```bash -python tools/dataset_converters/h36m_to_coco.py -``` - -## MPII - -
-MPII (CVPR'2014) - -```bibtex -@inproceedings{andriluka14cvpr, - author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, - title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, - booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, - year = {2014}, - month = {June} -} -``` - -
- -During training and inference for [MPII](http://human-pose.mpi-inf.mpg.de/), the prediction result will be saved as '.mat' format by default. We also provide a tool to convert this `.mat` to more readable `.json` format. - -```shell -python tools/dataset_converters/mat2json ${PRED_MAT_FILE} ${GT_JSON_FILE} ${OUTPUT_PRED_JSON_FILE} -``` - -For example, - -```shell -python tools/dataset/mat2json work_dirs/res50_mpii_256x256/pred.mat data/mpii/annotations/mpii_val.json pred.json -``` - -## Label Studio - -
-Label Studio - -```bibtex -@misc{Label Studio, - title={{Label Studio}: Data labeling software}, - url={https://github.com/heartexlabs/label-studio}, - note={Open source software available from https://github.com/heartexlabs/label-studio}, - author={ - Maxim Tkachenko and - Mikhail Malyuk and - Andrey Holmanyuk and - Nikolai Liubimov}, - year={2020-2022}, -} -``` - -
- -For users of [Label Studio](https://github.com/heartexlabs/label-studio/), please follow the instructions in the [Label Studio to COCO document](./label_studio.md) to annotate and export the results as a Label Studio `.json` file. And save the `Code` from the `Labeling Interface` as an `.xml` file. - -We provide a script to convert Label Studio `.json` annotation file to COCO `.json` format file. It can be used by running the following command: - -```shell -python tools/dataset_converters/labelstudio2coco.py ${LS_JSON_FILE} ${LS_XML_FILE} ${OUTPUT_COCO_JSON_FILE} -``` - -For example, - -```shell -python tools/dataset_converters/labelstudio2coco.py config.xml project-1-at-2023-05-13-09-22-91b53efa.json output/result.json -``` +# Dataset Tools + +## Animal Pose + +
+Animal-Pose (ICCV'2019) + +```bibtex +@InProceedings{Cao_2019_ICCV, + author = {Cao, Jinkun and Tang, Hongyang and Fang, Hao-Shu and Shen, Xiaoyong and Lu, Cewu and Tai, Yu-Wing}, + title = {Cross-Domain Adaptation for Animal Pose Estimation}, + booktitle = {The IEEE International Conference on Computer Vision (ICCV)}, + month = {October}, + year = {2019} +} +``` + +
+ +For [Animal-Pose](https://sites.google.com/view/animal-pose/) dataset, the images and annotations can be downloaded from [official website](https://sites.google.com/view/animal-pose/). The script `tools/dataset_converters/parse_animalpose_dataset.py` converts raw annotations into the format compatible with MMPose. The pre-processed [annotation files](https://download.openmmlab.com/mmpose/datasets/animalpose_annotations.tar) are available. If you would like to generate the annotations by yourself, please follow: + +1. Download the raw images and annotations and extract them under `$MMPOSE/data`. Make them look like this: + + ```text + mmpose + ├── mmpose + ├── docs + ├── tests + ├── tools + ├── configs + `── data + │── animalpose + │ + │-- VOC2012 + │ │-- Annotations + │ │-- ImageSets + │ │-- JPEGImages + │ │-- SegmentationClass + │ │-- SegmentationObject + │ + │-- animalpose_image_part2 + │ │-- cat + │ │-- cow + │ │-- dog + │ │-- horse + │ │-- sheep + │ + │-- PASCAL2011_animal_annotation + │ │-- cat + │ │ |-- 2007_000528_1.xml + │ │ |-- 2007_000549_1.xml + │ │ │-- ... + │ │-- cow + │ │-- dog + │ │-- horse + │ │-- sheep + │ + │-- annimalpose_anno2 + │ │-- cat + │ │ |-- ca1.xml + │ │ |-- ca2.xml + │ │ │-- ... + │ │-- cow + │ │-- dog + │ │-- horse + │ │-- sheep + ``` + +2. Run command + + ```bash + python tools/dataset_converters/parse_animalpose_dataset.py + ``` + + The generated annotation files are put in `$MMPOSE/data/animalpose/annotations`. + +The official dataset does not provide the official train/val/test set split. +We choose the images from PascalVOC for train & val. In total, we have 3608 images and 5117 annotations for train+val, where +2798 images with 4000 annotations are used for training, and 810 images with 1117 annotations are used for validation. +Those images from other sources (1000 images with 1000 annotations) are used for testing. + +## COFW + +
+COFW (ICCV'2013) + +```bibtex +@inproceedings{burgos2013robust, + title={Robust face landmark estimation under occlusion}, + author={Burgos-Artizzu, Xavier P and Perona, Pietro and Doll{\'a}r, Piotr}, + booktitle={Proceedings of the IEEE international conference on computer vision}, + pages={1513--1520}, + year={2013} +} +``` + +
+ +For COFW data, please download from [COFW Dataset (Color Images)](https://data.caltech.edu/records/20099). +Move `COFW_train_color.mat` and `COFW_test_color.mat` to `$MMPOSE/data/cofw/` and make them look like: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── cofw + |── COFW_train_color.mat + |── COFW_test_color.mat +``` + +Run `pip install h5py` first to install the dependency, then run the following script under `$MMPOSE`: + +```bash +python tools/dataset_converters/parse_cofw_dataset.py +``` + +And you will get + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── cofw + |── COFW_train_color.mat + |── COFW_test_color.mat + |── annotations + | |── cofw_train.json + | |── cofw_test.json + |── images + |── 000001.jpg + |── 000002.jpg +``` + +## DeepposeKit + +
+Desert Locust (Elife'2019) + +```bibtex +@article{graving2019deepposekit, + title={DeepPoseKit, a software toolkit for fast and robust animal pose estimation using deep learning}, + author={Graving, Jacob M and Chae, Daniel and Naik, Hemal and Li, Liang and Koger, Benjamin and Costelloe, Blair R and Couzin, Iain D}, + journal={Elife}, + volume={8}, + pages={e47994}, + year={2019}, + publisher={eLife Sciences Publications Limited} +} +``` + +
+ +For [Vinegar Fly](https://github.com/jgraving/DeepPoseKit-Data), [Desert Locust](https://github.com/jgraving/DeepPoseKit-Data), and [Grévy’s Zebra](https://github.com/jgraving/DeepPoseKit-Data) dataset, the annotations files can be downloaded from [DeepPoseKit-Data](https://github.com/jgraving/DeepPoseKit-Data). The script `tools/dataset_converters/parse_deepposekit_dataset.py` converts raw annotations into the format compatible with MMPose. The pre-processed annotation files are available at [vinegar_fly_annotations](https://download.openmmlab.com/mmpose/datasets/vinegar_fly_annotations.tar), [locust_annotations](https://download.openmmlab.com/mmpose/datasets/locust_annotations.tar), and [zebra_annotations](https://download.openmmlab.com/mmpose/datasets/zebra_annotations.tar). If you would like to generate the annotations by yourself, please follows: + +1. Download the raw images and annotations and extract them under `$MMPOSE/data`. Make them look like this: + + ```text + mmpose + ├── mmpose + ├── docs + ├── tests + ├── tools + ├── configs + `── data + | + |── DeepPoseKit-Data + | `── datasets + | |── fly + | | |── annotation_data_release.h5 + | | |── skeleton.csv + | | |── ... + | | + | |── locust + | | |── annotation_data_release.h5 + | | |── skeleton.csv + | | |── ... + | | + | `── zebra + | |── annotation_data_release.h5 + | |── skeleton.csv + | |── ... + | + │── fly + `-- images + │-- 0.jpg + │-- 1.jpg + │-- ... + ``` + + Note that the images can be downloaded from [vinegar_fly_images](https://download.openmmlab.com/mmpose/datasets/vinegar_fly_images.tar), [locust_images](https://download.openmmlab.com/mmpose/datasets/locust_images.tar), and [zebra_images](https://download.openmmlab.com/mmpose/datasets/zebra_images.tar). + +2. Run command + + ```bash + python tools/dataset_converters/parse_deepposekit_dataset.py + ``` + + The generated annotation files are put in `$MMPOSE/data/fly/annotations`, `$MMPOSE/data/locust/annotations`, and `$MMPOSE/data/zebra/annotations`. + +Since the official dataset does not provide the test set, we randomly select 90% images for training, and the rest (10%) for evaluation. + +## Macaque + +
+MacaquePose (bioRxiv'2020) + +```bibtex +@article{labuguen2020macaquepose, + title={MacaquePose: A novel ‘in the wild’macaque monkey pose dataset for markerless motion capture}, + author={Labuguen, Rollyn and Matsumoto, Jumpei and Negrete, Salvador and Nishimaru, Hiroshi and Nishijo, Hisao and Takada, Masahiko and Go, Yasuhiro and Inoue, Ken-ichi and Shibata, Tomohiro}, + journal={bioRxiv}, + year={2020}, + publisher={Cold Spring Harbor Laboratory} +} +``` + +
+ +For [MacaquePose](http://www2.ehub.kyoto-u.ac.jp/datasets/macaquepose/index.html) dataset, images and annotations can be downloaded from [download](http://www2.ehub.kyoto-u.ac.jp/datasets/macaquepose/index.html). The script `tools/dataset_converters/parse_macaquepose_dataset.py` converts raw annotations into the format compatible with MMPose. The pre-processed [macaque_annotations](https://download.openmmlab.com/mmpose/datasets/macaque_annotations.tar) are available. If you would like to generate the annotations by yourself, please follows: + +1. Download the raw images and annotations and extract them under `$MMPOSE/data`. Make them look like this: + + ```text + mmpose + ├── mmpose + ├── docs + ├── tests + ├── tools + ├── configs + `── data + │── macaque + │-- annotations.csv + │-- images + │ │-- 01418849d54b3005.jpg + │ │-- 0142d1d1a6904a70.jpg + │ │-- 01ef2c4c260321b7.jpg + │ │-- 020a1c75c8c85238.jpg + │ │-- 020b1506eef2557d.jpg + │ │-- ... + ``` + +2. Run command + + ```bash + python tools/dataset_converters/parse_macaquepose_dataset.py + ``` + + The generated annotation files are put in `$MMPOSE/data/macaque/annotations`. + +Since the official dataset does not provide the test set, we randomly select 12500 images for training, and the rest for evaluation. + +## Human3.6M + +
+Human3.6M (TPAMI'2014) + +```bibtex +@article{h36m_pami, + author = {Ionescu, Catalin and Papava, Dragos and Olaru, Vlad and Sminchisescu, Cristian}, + title = {Human3.6M: Large Scale Datasets and Predictive Methods for 3D Human Sensing in Natural Environments}, + journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, + publisher = {IEEE Computer Society}, + volume = {36}, + number = {7}, + pages = {1325-1339}, + month = {jul}, + year = {2014} +} +``` + +
+ +For [Human3.6M](http://vision.imar.ro/human3.6m/description.php), please download from the official website and place the files under `$MMPOSE/data/h36m`. +Then run the [preprocessing script](/tools/dataset_converters/preprocess_h36m.py): + +```bash +python tools/dataset_converters/preprocess_h36m.py --metadata {path to metadata.xml} --original data/h36m +``` + +This will extract camera parameters and pose annotations at full framerate (50 FPS) and downsampled framerate (10 FPS). The processed data should have the following structure: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + ├── h36m + ├── annotation_body3d + | ├── cameras.pkl + | ├── fps50 + | | ├── h36m_test.npz + | | ├── h36m_train.npz + | | ├── joint2d_rel_stats.pkl + | | ├── joint2d_stats.pkl + | | ├── joint3d_rel_stats.pkl + | | `── joint3d_stats.pkl + | `── fps10 + | ├── h36m_test.npz + | ├── h36m_train.npz + | ├── joint2d_rel_stats.pkl + | ├── joint2d_stats.pkl + | ├── joint3d_rel_stats.pkl + | `── joint3d_stats.pkl + `── images + ├── S1 + | ├── S1_Directions_1.54138969 + | | ├── S1_Directions_1.54138969_00001.jpg + | | ├── S1_Directions_1.54138969_00002.jpg + | | ├── ... + | ├── ... + ├── S5 + ├── S6 + ├── S7 + ├── S8 + ├── S9 + `── S11 +``` + +After that, the annotations need to be transformed into COCO format which is compatible with MMPose. Please run: + +```bash +python tools/dataset_converters/h36m_to_coco.py +``` + +## MPII + +
+MPII (CVPR'2014) + +```bibtex +@inproceedings{andriluka14cvpr, + author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, + title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, + booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + year = {2014}, + month = {June} +} +``` + +
+ +During training and inference for [MPII](http://human-pose.mpi-inf.mpg.de/), the prediction result will be saved as '.mat' format by default. We also provide a tool to convert this `.mat` to more readable `.json` format. + +```shell +python tools/dataset_converters/mat2json ${PRED_MAT_FILE} ${GT_JSON_FILE} ${OUTPUT_PRED_JSON_FILE} +``` + +For example, + +```shell +python tools/dataset/mat2json work_dirs/res50_mpii_256x256/pred.mat data/mpii/annotations/mpii_val.json pred.json +``` + +## Label Studio + +
+Label Studio + +```bibtex +@misc{Label Studio, + title={{Label Studio}: Data labeling software}, + url={https://github.com/heartexlabs/label-studio}, + note={Open source software available from https://github.com/heartexlabs/label-studio}, + author={ + Maxim Tkachenko and + Mikhail Malyuk and + Andrey Holmanyuk and + Nikolai Liubimov}, + year={2020-2022}, +} +``` + +
+ +For users of [Label Studio](https://github.com/heartexlabs/label-studio/), please follow the instructions in the [Label Studio to COCO document](./label_studio.md) to annotate and export the results as a Label Studio `.json` file. And save the `Code` from the `Labeling Interface` as an `.xml` file. + +We provide a script to convert Label Studio `.json` annotation file to COCO `.json` format file. It can be used by running the following command: + +```shell +python tools/dataset_converters/labelstudio2coco.py ${LS_JSON_FILE} ${LS_XML_FILE} ${OUTPUT_COCO_JSON_FILE} +``` + +For example, + +```shell +python tools/dataset_converters/labelstudio2coco.py config.xml project-1-at-2023-05-13-09-22-91b53efa.json output/result.json +``` diff --git a/docs/en/dataset_zoo/label_studio.md b/docs/en/dataset_zoo/label_studio.md index 3b499e05c6..93978a4172 100644 --- a/docs/en/dataset_zoo/label_studio.md +++ b/docs/en/dataset_zoo/label_studio.md @@ -1,76 +1,76 @@ -# Label Studio Annotations to COCO Script - -[Label Studio](https://labelstud.io/) is a popular deep learning annotation tool that can be used for annotating various tasks. However, for keypoint annotation, Label Studio can not directly export to the COCO format required by MMPose. This article will explain how to use Label Studio to annotate keypoint data and convert it into the required COCO format using the [labelstudio2coco.py](../../../tools/dataset_converters/labelstudio2coco.py) tool. - -## Label Studio Annotation Requirements - -According to the COCO format requirements, each annotated instance needs to include information about keypoints, segmentation, and bounding box (bbox). However, Label Studio scatters this information across different instances during annotation. Therefore, certain rules need to be followed during annotation to ensure proper usage with the subsequent scripts. - -1. Label Interface Setup - -For a newly created Label Studio project, the label interface needs to be set up. There should be three types of annotations: `KeyPointLabels`, `PolygonLabels`, and `RectangleLabels`, which correspond to `keypoints`, `segmentation`, and `bbox` in the COCO format, respectively. The following is an example of a label interface. You can find the `Labeling Interface` in the project's `Settings`, click on `Code`, and paste the following example. - -```xml - - - - - - - - - -``` - -2. Annotation Order - -Since it is necessary to combine annotations of different types into one instance, a specific order of annotation is required to determine whether the annotations belong to the same instance. Annotations should be made in the order of `KeyPointLabels` -> `PolygonLabels`/`RectangleLabels`. The order and number of `KeyPointLabels` should match the order and number of keypoints specified in the `dataset_info` in MMPose configuration file. The annotation order of `PolygonLabels` and `RectangleLabels` can be interchangeable, and only one of them needs to be annotated. The annotation should be within one instance starts with keypoints and ends with non-keypoints. The following image shows an annotation example: - -*Note: The bbox and area will be calculated based on the later PolygonLabels/RectangleLabels. If you annotate PolygonLabels first, the bbox will be based on the range of the later RectangleLabels, and the area will be equal to the area of the rectangle. Conversely, they will be based on the minimum bounding rectangle of the polygon and the area of the polygon.* - -![image](https://github.com/open-mmlab/mmpose/assets/15847281/b2d004d0-8361-42c5-9180-cfbac0373a94) - -3. Exporting Annotations - -Once the annotations are completed as described above, they need to be exported. Select the `Export` button on the project interface, choose the `JSON` format, and click `Export` to download the JSON file containing the labels. - -*Note: The exported file only contains the labels and does not include the original images. Therefore, the corresponding annotated images need to be provided separately. It is not recommended to use directly uploaded files because Label Studio truncates long filenames. Instead, use the export COCO format tool available in the `Export` functionality, which includes a folder with the image files within the downloaded compressed package.* - -![image](https://github.com/open-mmlab/mmpose/assets/15847281/9f54ca3d-8cdd-4d7f-8ed6-494badcfeaf2) - -## Usage of the Conversion Tool Script - -The conversion tool script is located at `tools/dataset_converters/labelstudio2coco.py`and can be used as follows: - -```bash -python tools/dataset_converters/labelstudio2coco.py config.xml project-1-at-2023-05-13-09-22-91b53efa.json output/result.json -``` - -Where `config.xml` contains the code from the Labeling Interface mentioned earlier, `project-1-at-2023-05-13-09-22-91b53efa.json` is the JSON file exported from Label Studio, and `output/result.json` is the path to the resulting JSON file in COCO format. If the path does not exist, the script will create it automatically. - -Afterward, place the image folder in the output directory to complete the conversion of the COCO dataset. The directory structure can be as follows: - -```bash -. -├── images -│   ├── 38b480f2.jpg -│   └── aeb26f04.jpg -└── result.json - -``` - -If you want to use this dataset in MMPose, you can make modifications like the following example: - -```python -dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='result.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, -) -``` +# Label Studio Annotations to COCO Script + +[Label Studio](https://labelstud.io/) is a popular deep learning annotation tool that can be used for annotating various tasks. However, for keypoint annotation, Label Studio can not directly export to the COCO format required by MMPose. This article will explain how to use Label Studio to annotate keypoint data and convert it into the required COCO format using the [labelstudio2coco.py](../../../tools/dataset_converters/labelstudio2coco.py) tool. + +## Label Studio Annotation Requirements + +According to the COCO format requirements, each annotated instance needs to include information about keypoints, segmentation, and bounding box (bbox). However, Label Studio scatters this information across different instances during annotation. Therefore, certain rules need to be followed during annotation to ensure proper usage with the subsequent scripts. + +1. Label Interface Setup + +For a newly created Label Studio project, the label interface needs to be set up. There should be three types of annotations: `KeyPointLabels`, `PolygonLabels`, and `RectangleLabels`, which correspond to `keypoints`, `segmentation`, and `bbox` in the COCO format, respectively. The following is an example of a label interface. You can find the `Labeling Interface` in the project's `Settings`, click on `Code`, and paste the following example. + +```xml + + + + + + + + + +``` + +2. Annotation Order + +Since it is necessary to combine annotations of different types into one instance, a specific order of annotation is required to determine whether the annotations belong to the same instance. Annotations should be made in the order of `KeyPointLabels` -> `PolygonLabels`/`RectangleLabels`. The order and number of `KeyPointLabels` should match the order and number of keypoints specified in the `dataset_info` in MMPose configuration file. The annotation order of `PolygonLabels` and `RectangleLabels` can be interchangeable, and only one of them needs to be annotated. The annotation should be within one instance starts with keypoints and ends with non-keypoints. The following image shows an annotation example: + +*Note: The bbox and area will be calculated based on the later PolygonLabels/RectangleLabels. If you annotate PolygonLabels first, the bbox will be based on the range of the later RectangleLabels, and the area will be equal to the area of the rectangle. Conversely, they will be based on the minimum bounding rectangle of the polygon and the area of the polygon.* + +![image](https://github.com/open-mmlab/mmpose/assets/15847281/b2d004d0-8361-42c5-9180-cfbac0373a94) + +3. Exporting Annotations + +Once the annotations are completed as described above, they need to be exported. Select the `Export` button on the project interface, choose the `JSON` format, and click `Export` to download the JSON file containing the labels. + +*Note: The exported file only contains the labels and does not include the original images. Therefore, the corresponding annotated images need to be provided separately. It is not recommended to use directly uploaded files because Label Studio truncates long filenames. Instead, use the export COCO format tool available in the `Export` functionality, which includes a folder with the image files within the downloaded compressed package.* + +![image](https://github.com/open-mmlab/mmpose/assets/15847281/9f54ca3d-8cdd-4d7f-8ed6-494badcfeaf2) + +## Usage of the Conversion Tool Script + +The conversion tool script is located at `tools/dataset_converters/labelstudio2coco.py`and can be used as follows: + +```bash +python tools/dataset_converters/labelstudio2coco.py config.xml project-1-at-2023-05-13-09-22-91b53efa.json output/result.json +``` + +Where `config.xml` contains the code from the Labeling Interface mentioned earlier, `project-1-at-2023-05-13-09-22-91b53efa.json` is the JSON file exported from Label Studio, and `output/result.json` is the path to the resulting JSON file in COCO format. If the path does not exist, the script will create it automatically. + +Afterward, place the image folder in the output directory to complete the conversion of the COCO dataset. The directory structure can be as follows: + +```bash +. +├── images +│   ├── 38b480f2.jpg +│   └── aeb26f04.jpg +└── result.json + +``` + +If you want to use this dataset in MMPose, you can make modifications like the following example: + +```python +dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='result.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, +) +``` diff --git a/docs/en/faq.md b/docs/en/faq.md index 3e81a312ca..80557aac7f 100644 --- a/docs/en/faq.md +++ b/docs/en/faq.md @@ -1,159 +1,159 @@ -# FAQ - -We list some common issues faced by many users and their corresponding solutions here. -Feel free to enrich the list if you find any frequent issues and have ways to help others to solve them. -If the contents here do not cover your issue, please create an issue using the [provided templates](/.github/ISSUE_TEMPLATE/error-report.md) and make sure you fill in all required information in the template. - -## Installation - -Compatibility issue between MMCV and MMPose; "AssertionError: MMCV==xxx is used but incompatible. Please install mmcv>=xxx, \<=xxx." - -Here are the version correspondences between `mmdet`, `mmcv` and `mmpose`: - -- mmdet 2.x \<=> mmpose 0.x \<=> mmcv 1.x -- mmdet 3.x \<=> mmpose 1.x \<=> mmcv 2.x - -Detailed compatible MMPose and MMCV versions are shown as below. Please choose the correct version of MMCV to avoid installation issues. - -### MMPose 1.x - -| MMPose version | MMCV/MMEngine version | -| :------------: | :-----------------------------: | -| 1.1.0 | mmcv>=2.0.1, mmengine>=0.8.0 | -| 1.0.0 | mmcv>=2.0.0, mmengine>=0.7.0 | -| 1.0.0rc1 | mmcv>=2.0.0rc4, mmengine>=0.6.0 | -| 1.0.0rc0 | mmcv>=2.0.0rc0, mmengine>=0.0.1 | -| 1.0.0b0 | mmcv>=2.0.0rc0, mmengine>=0.0.1 | - -### MMPose 0.x - -| MMPose version | MMCV version | -| :------------: | :-----------------------: | -| 0.x | mmcv-full>=1.3.8, \<1.8.0 | -| 0.29.0 | mmcv-full>=1.3.8, \<1.7.0 | -| 0.28.1 | mmcv-full>=1.3.8, \<1.7.0 | -| 0.28.0 | mmcv-full>=1.3.8, \<1.6.0 | -| 0.27.0 | mmcv-full>=1.3.8, \<1.6.0 | -| 0.26.0 | mmcv-full>=1.3.8, \<1.6.0 | -| 0.25.1 | mmcv-full>=1.3.8, \<1.6.0 | -| 0.25.0 | mmcv-full>=1.3.8, \<1.5.0 | -| 0.24.0 | mmcv-full>=1.3.8, \<1.5.0 | -| 0.23.0 | mmcv-full>=1.3.8, \<1.5.0 | -| 0.22.0 | mmcv-full>=1.3.8, \<1.5.0 | -| 0.21.0 | mmcv-full>=1.3.8, \<1.5.0 | -| 0.20.0 | mmcv-full>=1.3.8, \<1.4.0 | -| 0.19.0 | mmcv-full>=1.3.8, \<1.4.0 | -| 0.18.0 | mmcv-full>=1.3.8, \<1.4.0 | -| 0.17.0 | mmcv-full>=1.3.8, \<1.4.0 | -| 0.16.0 | mmcv-full>=1.3.8, \<1.4.0 | -| 0.14.0 | mmcv-full>=1.1.3, \<1.4.0 | -| 0.13.0 | mmcv-full>=1.1.3, \<1.4.0 | -| 0.12.0 | mmcv-full>=1.1.3, \<1.3 | -| 0.11.0 | mmcv-full>=1.1.3, \<1.3 | -| 0.10.0 | mmcv-full>=1.1.3, \<1.3 | -| 0.9.0 | mmcv-full>=1.1.3, \<1.3 | -| 0.8.0 | mmcv-full>=1.1.1, \<1.2 | -| 0.7.0 | mmcv-full>=1.1.1, \<1.2 | - -- **Unable to install xtcocotools** - - 1. Try to install it using pypi manually `pip install xtcocotools`. - 2. If step1 does not work. Try to install it from [source](https://github.com/jin-s13/xtcocoapi). - - ``` - git clone https://github.com/jin-s13/xtcocoapi - cd xtcocoapi - python setup.py install - ``` - -- **No matching distribution found for xtcocotools>=1.6** - - 1. Install cython by `pip install cython`. - 2. Install xtcocotools from [source](https://github.com/jin-s13/xtcocoapi). - - ``` - git clone https://github.com/jin-s13/xtcocoapi - cd xtcocoapi - python setup.py install - ``` - -- **"No module named 'mmcv.ops'"; "No module named 'mmcv.\_ext'"** - - 1. Uninstall existing mmcv in the environment using `pip uninstall mmcv`. - 2. Install mmcv following [mmcv installation instruction](https://mmcv.readthedocs.io/en/2.x/get_started/installation.html). - -## Data - -- **What if my custom dataset does not have bounding box label?** - - We can estimate the bounding box of a person as the minimal box that tightly bounds all the keypoints. - -- **What is `COCO_val2017_detections_AP_H_56_person.json`? Can I train pose models without it?** - - "COCO_val2017_detections_AP_H_56_person.json" contains the "detected" human bounding boxes for COCO validation set, which are generated by FasterRCNN. - One can choose to use gt bounding boxes to evaluate models, by setting `bbox_file=None` in `val_dataloader.dataset` in config. Or one can use detected boxes to evaluate - the generalizability of models, by setting `bbox_file='COCO_val2017_detections_AP_H_56_person.json'`. - -## Training - -- **RuntimeError: Address already in use** - - Set the environment variables `MASTER_PORT=XXX`. For example: - - ```shell - MASTER_PORT=29517 GPUS=16 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh train res50 configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_8xb64-210e_coco-256x192.py work_dirs/res50_coco_256x192 - ``` - -- **"Unexpected keys in source state dict" when loading pre-trained weights** - - It's normal that some layers in the pretrained model are not used in the pose model. ImageNet-pretrained classification network and the pose network may have different architectures (e.g. no classification head). So some unexpected keys in source state dict is actually expected. - -- **How to use trained models for backbone pre-training ?** - - Refer to [Migration - Step3: Model - Backbone](../migration.md). - - When training, the unexpected keys will be ignored. - -- **How to visualize the training accuracy/loss curves in real-time ?** - - Modify `vis_backends` in config file like: - - ```python - vis_backends = [ - dict(type='LocalVisBackend'), - dict(type='TensorboardVisBackend') - ] - ``` - - You can refer to [user_guides/visualization.md](../user_guides/visualization.md). - -- **Log info is NOT printed** - - Use smaller log interval. For example, change `interval=50` to `interval=1` in the config: - - ```python - # hooks - default_hooks = dict(logger=dict(interval=1)) - ``` - -## Evaluation - -- **How to evaluate on MPII test dataset?** - Since we do not have the ground-truth for test dataset, we cannot evaluate it 'locally'. - If you would like to evaluate the performance on test set, you have to upload the pred.mat (which is generated during testing) to the official server via email, according to [the MPII guideline](http://human-pose.mpi-inf.mpg.de/#evaluation). - -- **For top-down 2d pose estimation, why predicted joint coordinates can be out of the bounding box (bbox)?** - We do not directly use the bbox to crop the image. bbox will be first transformed to center & scale, and the scale will be multiplied by a factor (1.25) to include some context. If the ratio of width/height is different from that of model input (possibly 192/256), we will adjust the bbox. - -## Inference - -- **How to run mmpose on CPU?** - - Run demos with `--device=cpu`. - -- **How to speed up inference?** - - A few approaches may help to improve the inference speed: - - 1. Set `flip_test=False` in `init_cfg` in the config file. - 2. For top-down models, use faster human bounding box detector, see [MMDetection](https://mmdetection.readthedocs.io/en/3.x/model_zoo.html). +# FAQ + +We list some common issues faced by many users and their corresponding solutions here. +Feel free to enrich the list if you find any frequent issues and have ways to help others to solve them. +If the contents here do not cover your issue, please create an issue using the [provided templates](/.github/ISSUE_TEMPLATE/error-report.md) and make sure you fill in all required information in the template. + +## Installation + +Compatibility issue between MMCV and MMPose; "AssertionError: MMCV==xxx is used but incompatible. Please install mmcv>=xxx, \<=xxx." + +Here are the version correspondences between `mmdet`, `mmcv` and `mmpose`: + +- mmdet 2.x \<=> mmpose 0.x \<=> mmcv 1.x +- mmdet 3.x \<=> mmpose 1.x \<=> mmcv 2.x + +Detailed compatible MMPose and MMCV versions are shown as below. Please choose the correct version of MMCV to avoid installation issues. + +### MMPose 1.x + +| MMPose version | MMCV/MMEngine version | +| :------------: | :-----------------------------: | +| 1.1.0 | mmcv>=2.0.1, mmengine>=0.8.0 | +| 1.0.0 | mmcv>=2.0.0, mmengine>=0.7.0 | +| 1.0.0rc1 | mmcv>=2.0.0rc4, mmengine>=0.6.0 | +| 1.0.0rc0 | mmcv>=2.0.0rc0, mmengine>=0.0.1 | +| 1.0.0b0 | mmcv>=2.0.0rc0, mmengine>=0.0.1 | + +### MMPose 0.x + +| MMPose version | MMCV version | +| :------------: | :-----------------------: | +| 0.x | mmcv-full>=1.3.8, \<1.8.0 | +| 0.29.0 | mmcv-full>=1.3.8, \<1.7.0 | +| 0.28.1 | mmcv-full>=1.3.8, \<1.7.0 | +| 0.28.0 | mmcv-full>=1.3.8, \<1.6.0 | +| 0.27.0 | mmcv-full>=1.3.8, \<1.6.0 | +| 0.26.0 | mmcv-full>=1.3.8, \<1.6.0 | +| 0.25.1 | mmcv-full>=1.3.8, \<1.6.0 | +| 0.25.0 | mmcv-full>=1.3.8, \<1.5.0 | +| 0.24.0 | mmcv-full>=1.3.8, \<1.5.0 | +| 0.23.0 | mmcv-full>=1.3.8, \<1.5.0 | +| 0.22.0 | mmcv-full>=1.3.8, \<1.5.0 | +| 0.21.0 | mmcv-full>=1.3.8, \<1.5.0 | +| 0.20.0 | mmcv-full>=1.3.8, \<1.4.0 | +| 0.19.0 | mmcv-full>=1.3.8, \<1.4.0 | +| 0.18.0 | mmcv-full>=1.3.8, \<1.4.0 | +| 0.17.0 | mmcv-full>=1.3.8, \<1.4.0 | +| 0.16.0 | mmcv-full>=1.3.8, \<1.4.0 | +| 0.14.0 | mmcv-full>=1.1.3, \<1.4.0 | +| 0.13.0 | mmcv-full>=1.1.3, \<1.4.0 | +| 0.12.0 | mmcv-full>=1.1.3, \<1.3 | +| 0.11.0 | mmcv-full>=1.1.3, \<1.3 | +| 0.10.0 | mmcv-full>=1.1.3, \<1.3 | +| 0.9.0 | mmcv-full>=1.1.3, \<1.3 | +| 0.8.0 | mmcv-full>=1.1.1, \<1.2 | +| 0.7.0 | mmcv-full>=1.1.1, \<1.2 | + +- **Unable to install xtcocotools** + + 1. Try to install it using pypi manually `pip install xtcocotools`. + 2. If step1 does not work. Try to install it from [source](https://github.com/jin-s13/xtcocoapi). + + ``` + git clone https://github.com/jin-s13/xtcocoapi + cd xtcocoapi + python setup.py install + ``` + +- **No matching distribution found for xtcocotools>=1.6** + + 1. Install cython by `pip install cython`. + 2. Install xtcocotools from [source](https://github.com/jin-s13/xtcocoapi). + + ``` + git clone https://github.com/jin-s13/xtcocoapi + cd xtcocoapi + python setup.py install + ``` + +- **"No module named 'mmcv.ops'"; "No module named 'mmcv.\_ext'"** + + 1. Uninstall existing mmcv in the environment using `pip uninstall mmcv`. + 2. Install mmcv following [mmcv installation instruction](https://mmcv.readthedocs.io/en/2.x/get_started/installation.html). + +## Data + +- **What if my custom dataset does not have bounding box label?** + + We can estimate the bounding box of a person as the minimal box that tightly bounds all the keypoints. + +- **What is `COCO_val2017_detections_AP_H_56_person.json`? Can I train pose models without it?** + + "COCO_val2017_detections_AP_H_56_person.json" contains the "detected" human bounding boxes for COCO validation set, which are generated by FasterRCNN. + One can choose to use gt bounding boxes to evaluate models, by setting `bbox_file=None` in `val_dataloader.dataset` in config. Or one can use detected boxes to evaluate + the generalizability of models, by setting `bbox_file='COCO_val2017_detections_AP_H_56_person.json'`. + +## Training + +- **RuntimeError: Address already in use** + + Set the environment variables `MASTER_PORT=XXX`. For example: + + ```shell + MASTER_PORT=29517 GPUS=16 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh train res50 configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_8xb64-210e_coco-256x192.py work_dirs/res50_coco_256x192 + ``` + +- **"Unexpected keys in source state dict" when loading pre-trained weights** + + It's normal that some layers in the pretrained model are not used in the pose model. ImageNet-pretrained classification network and the pose network may have different architectures (e.g. no classification head). So some unexpected keys in source state dict is actually expected. + +- **How to use trained models for backbone pre-training ?** + + Refer to [Migration - Step3: Model - Backbone](../migration.md). + + When training, the unexpected keys will be ignored. + +- **How to visualize the training accuracy/loss curves in real-time ?** + + Modify `vis_backends` in config file like: + + ```python + vis_backends = [ + dict(type='LocalVisBackend'), + dict(type='TensorboardVisBackend') + ] + ``` + + You can refer to [user_guides/visualization.md](../user_guides/visualization.md). + +- **Log info is NOT printed** + + Use smaller log interval. For example, change `interval=50` to `interval=1` in the config: + + ```python + # hooks + default_hooks = dict(logger=dict(interval=1)) + ``` + +## Evaluation + +- **How to evaluate on MPII test dataset?** + Since we do not have the ground-truth for test dataset, we cannot evaluate it 'locally'. + If you would like to evaluate the performance on test set, you have to upload the pred.mat (which is generated during testing) to the official server via email, according to [the MPII guideline](http://human-pose.mpi-inf.mpg.de/#evaluation). + +- **For top-down 2d pose estimation, why predicted joint coordinates can be out of the bounding box (bbox)?** + We do not directly use the bbox to crop the image. bbox will be first transformed to center & scale, and the scale will be multiplied by a factor (1.25) to include some context. If the ratio of width/height is different from that of model input (possibly 192/256), we will adjust the bbox. + +## Inference + +- **How to run mmpose on CPU?** + + Run demos with `--device=cpu`. + +- **How to speed up inference?** + + A few approaches may help to improve the inference speed: + + 1. Set `flip_test=False` in `init_cfg` in the config file. + 2. For top-down models, use faster human bounding box detector, see [MMDetection](https://mmdetection.readthedocs.io/en/3.x/model_zoo.html). diff --git a/docs/en/guide_to_framework.md b/docs/en/guide_to_framework.md index 1bfe7d3b59..a1300eaa31 100644 --- a/docs/en/guide_to_framework.md +++ b/docs/en/guide_to_framework.md @@ -1,668 +1,668 @@ -# A 20-minute Tour to MMPose - -MMPose 1.0 is built upon a brand-new framework. For developers with basic knowledge of deep learning, this tutorial provides a overview of MMPose 1.0 framework design. Whether you are **a user of the previous version of MMPose**, or **a beginner of MMPose wishing to start with v1.0**, this tutorial will show you how to build a project based on MMPose 1.0. - -```{note} -This tutorial covers what developers will concern when using MMPose 1.0: - -- Overall code architecture - -- How to manage modules with configs - -- How to use my own custom datasets - -- How to add new modules(backbone, head, loss function, etc.) -``` - -The content of this tutorial is organized as follows: - -- [A 20 Minute Guide to MMPose Framework](#a-20-minute-guide-to-mmpose-framework) - - [Overview](#overview) - - [Step1: Configs](#step1-configs) - - [Step2: Data](#step2-data) - - [Dataset Meta Information](#dataset-meta-information) - - [Dataset](#dataset) - - [Pipeline](#pipeline) - - [i. Augmentation](#i-augmentation) - - [ii. Transformation](#ii-transformation) - - [iii. Encoding](#iii-encoding) - - [iv. Packing](#iv-packing) - - [Step3: Model](#step3-model) - - [Data Preprocessor](#data-preprocessor) - - [Backbone](#backbone) - - [Neck](#neck) - - [Head](#head) - -## Overview - -![overall-en](https://user-images.githubusercontent.com/13503330/187372008-2a94bad5-5252-4155-9ae3-3da1c426f569.png) - -Generally speaking, there are **five parts** developers will use during project development: - -- **General:** Environment, Hook, Checkpoint, Logger, etc. - -- **Data:** Dataset, Dataloader, Data Augmentation, etc. - -- **Training:** Optimizer, Learning Rate Scheduler, etc. - -- **Model:** Backbone, Neck, Head, Loss function, etc. - -- **Evaluation:** Metric, Evaluator, etc. - -Among them, modules related to **General**, **Training** and **Evaluation** are often provided by the training framework [MMEngine](https://github.com/open-mmlab/mmengine), and developers only need to call APIs and adjust the parameters. Developers mainly focus on implementing the **Data** and **Model** parts. - -## Step1: Configs - -In MMPose, we use a Python file as config for the definition and parameter management of the whole project. Therefore, we strongly recommend the developers who use MMPose for the first time to refer to [Configs](./user_guides/configs.md). - -Note that all new modules need to be registered using `Registry` and imported in `__init__.py` in the corresponding directory before we can create their instances from configs. - -## Step2: Data - -The organization of data in MMPose contains: - -- Dataset Meta Information - -- Dataset - -- Pipeline - -### Dataset Meta Information - -The meta information of a pose dataset usually includes the definition of keypoints and skeleton, symmetrical characteristic, and keypoint properties (e.g. belonging to upper or lower body, weights and sigmas). These information is important in data preprocessing, model training and evaluation. In MMpose, the dataset meta information is stored in configs files under `$MMPOSE/configs/_base_/datasets/`. - -To use a custom dataset in MMPose, you need to add a new config file of the dataset meta information. Take the MPII dataset (`$MMPOSE/configs/_base_/datasets/mpii.py`) as an example. Here is its dataset information: - -```Python -dataset_info = dict( - dataset_name='mpii', - paper_info=dict( - author='Mykhaylo Andriluka and Leonid Pishchulin and ' - 'Peter Gehler and Schiele, Bernt', - title='2D Human Pose Estimation: New Benchmark and ' - 'State of the Art Analysis', - container='IEEE Conference on Computer Vision and ' - 'Pattern Recognition (CVPR)', - year='2014', - homepage='http://human-pose.mpi-inf.mpg.de/', - ), - keypoint_info={ - 0: - dict( - name='right_ankle', - id=0, - color=[255, 128, 0], - type='lower', - swap='left_ankle'), - ## omitted - }, - skeleton_info={ - 0: - dict(link=('right_ankle', 'right_knee'), id=0, color=[255, 128, 0]), - ## omitted - }, - joint_weights=[ - 1.5, 1.2, 1., 1., 1.2, 1.5, 1., 1., 1., 1., 1.5, 1.2, 1., 1., 1.2, 1.5 - ], - # Adapted from COCO dataset. - sigmas=[ - 0.089, 0.083, 0.107, 0.107, 0.083, 0.089, 0.026, 0.026, 0.026, 0.026, - 0.062, 0.072, 0.179, 0.179, 0.072, 0.062 - ]) -``` - -In the model config, the user needs to specify the metainfo path of the custom dataset (e.g. `$MMPOSE/configs/_base_/datasets/custom.py`) as follows:\`\`\` - -```python -# dataset and dataloader settings -dataset_type = 'MyCustomDataset' # or 'CocoDataset' - -train_dataloader = dict( - batch_size=2, - dataset=dict( - type=dataset_type, - data_root='root/of/your/train/data', - ann_file='path/to/your/train/json', - data_prefix=dict(img='path/to/your/train/img'), - # specify the new dataset meta information config file - metainfo=dict(from_file='configs/_base_/datasets/custom.py'), - ...), - ) - -val_dataloader = dict( - batch_size=2, - dataset=dict( - type=dataset_type, - data_root='root/of/your/val/data', - ann_file='path/to/your/val/json', - data_prefix=dict(img='path/to/your/val/img'), - # specify the new dataset meta information config file - metainfo=dict(from_file='configs/_base_/datasets/custom.py'), - ...), - ) - -test_dataloader = val_dataloader -``` - -### Dataset - -To use custom dataset in MMPose, we recommend converting the annotations into a supported format (e.g. COCO or MPII) and directly using our implementation of the corresponding dataset. If this is not applicable, you may need to implement your own dataset class. - -Most 2D keypoint datasets in MMPose **organize the annotations in a COCO-like style**. Thus we provide a base class [BaseCocoStyleDataset](mmpose/datasets/datasets/base/base_coco_style_dataset.py) for these datasets. We recommend that users subclass `BaseCocoStyleDataset` and override the methods as needed (usually `__init__()` and `_load_annotations()`) to extend to a new custom 2D keypoint dataset. - -```{note} -Please refer to [COCO](./dataset_zoo/2d_body_keypoint.md) for more details about the COCO data format. -``` - -```{note} -The bbox format in MMPose is in `xyxy` instead of `xywh`, which is consistent with the format used in other OpenMMLab projects like [MMDetection](https://github.com/open-mmlab/mmdetection). We provide useful utils for bbox format conversion, such as `bbox_xyxy2xywh`, `bbox_xywh2xyxy`, `bbox_xyxy2cs`, etc., which are defined in `$MMPOSE/mmpose/structures/bbox/transforms.py`. -``` - -Let's take the implementation of the MPII dataset (`$MMPOSE/mmpose/datasets/datasets/body/mpii_dataset.py`) as an example. - -```Python -@DATASETS.register_module() -class MpiiDataset(BaseCocoStyleDataset): - METAINFO: dict = dict(from_file='configs/_base_/datasets/mpii.py') - - def __init__(self, - ## omitted - headbox_file: Optional[str] = None, - ## omitted - ): - - if headbox_file: - if data_mode != 'topdown': - raise ValueError( - f'{self.__class__.__name__} is set to {data_mode}: ' - 'mode, while "headbox_file" is only ' - 'supported in topdown mode.') - - if not test_mode: - raise ValueError( - f'{self.__class__.__name__} has `test_mode==False` ' - 'while "headbox_file" is only ' - 'supported when `test_mode==True`.') - - headbox_file_type = headbox_file[-3:] - allow_headbox_file_type = ['mat'] - if headbox_file_type not in allow_headbox_file_type: - raise KeyError( - f'The head boxes file type {headbox_file_type} is not ' - f'supported. Should be `mat` but got {headbox_file_type}.') - self.headbox_file = headbox_file - - super().__init__( - ## omitted - ) - - def _load_annotations(self) -> List[dict]: - """Load data from annotations in MPII format.""" - check_file_exist(self.ann_file) - with open(self.ann_file) as anno_file: - anns = json.load(anno_file) - - if self.headbox_file: - check_file_exist(self.headbox_file) - headbox_dict = loadmat(self.headbox_file) - headboxes_src = np.transpose(headbox_dict['headboxes_src'], - [2, 0, 1]) - SC_BIAS = 0.6 - - data_list = [] - ann_id = 0 - - # mpii bbox scales are normalized with factor 200. - pixel_std = 200. - - for idx, ann in enumerate(anns): - center = np.array(ann['center'], dtype=np.float32) - scale = np.array([ann['scale'], ann['scale']], - dtype=np.float32) * pixel_std - - # Adjust center/scale slightly to avoid cropping limbs - if center[0] != -1: - center[1] = center[1] + 15. / pixel_std * scale[1] - - # MPII uses matlab format, index is 1-based, - # we should first convert to 0-based index - center = center - 1 - - # unify shape with coco datasets - center = center.reshape(1, -1) - scale = scale.reshape(1, -1) - bbox = bbox_cs2xyxy(center, scale) - - # load keypoints in shape [1, K, 2] and keypoints_visible in [1, K] - keypoints = np.array(ann['joints']).reshape(1, -1, 2) - keypoints_visible = np.array(ann['joints_vis']).reshape(1, -1) - - data_info = { - 'id': ann_id, - 'img_id': int(ann['image'].split('.')[0]), - 'img_path': osp.join(self.data_prefix['img'], ann['image']), - 'bbox_center': center, - 'bbox_scale': scale, - 'bbox': bbox, - 'bbox_score': np.ones(1, dtype=np.float32), - 'keypoints': keypoints, - 'keypoints_visible': keypoints_visible, - } - - if self.headbox_file: - # calculate the diagonal length of head box as norm_factor - headbox = headboxes_src[idx] - head_size = np.linalg.norm(headbox[1] - headbox[0], axis=0) - head_size *= SC_BIAS - data_info['head_size'] = head_size.reshape(1, -1) - - data_list.append(data_info) - ann_id = ann_id + 1 - - return data_list -``` - -When supporting MPII dataset, since we need to use `head_size` to calculate `PCKh`, we add `headbox_file` to `__init__()` and override`_load_annotations()`. - -To support a dataset that is beyond the scope of `BaseCocoStyleDataset`, you may need to subclass from the `BaseDataset` provided by [MMEngine](https://github.com/open-mmlab/mmengine). Please refer to the [documents](https://mmengine.readthedocs.io/en/latest/advanced_tutorials/basedataset.html) for details. - -### Pipeline - -Data augmentations and transformations during pre-processing are organized as a pipeline. Here is an example of typical pipelines: - -```Python -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -test_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] -``` - -In a keypoint detection task, data will be transformed among three scale spaces: - -- **Original Image Space**: the space where the images are stored. The sizes of different images are not necessarily the same - -- **Input Image Space**: the image space used for model input. All **images** and **annotations** will be transformed into this space, such as `256x256`, `256x192`, etc. - -- **Output Space**: the scale space where model outputs are located, such as `64x64(Heatmap)`,`1x1(Regression)`, etc. The supervision signal is also in this space during training - -Here is a diagram to show the workflow of data transformation among the three scale spaces: - -![migration-en](https://user-images.githubusercontent.com/13503330/187190213-cad87b5f-0a95-4f1f-b722-15896914ded4.png) - -In MMPose, the modules used for data transformation are under `$MMPOSE/mmpose/datasets/transforms`, and their workflow is shown as follows: - -![transforms-en](https://user-images.githubusercontent.com/13503330/187190352-a7662346-b8da-4256-9192-c7a84b15cbb5.png) - -#### i. Augmentation - -Commonly used transforms are defined in `$MMPOSE/mmpose/datasets/transforms/common_transforms.py`, such as `RandomFlip`, `RandomHalfBody`, etc. - -For top-down methods, `Shift`, `Rotate`and `Resize` are implemented by `RandomBBoxTransform`**.** For bottom-up methods, `BottomupRandomAffine` is used. - -```{note} -Most data transforms depend on `bbox_center` and `bbox_scale`, which can be obtained by `GetBBoxCenterScale`. -``` - -#### ii. Transformation - -Affine transformation is used to convert images and annotations from the original image space to the input space. This is done by `TopdownAffine` for top-down methods and `BottomupRandomAffine` for bottom-up methods. - -#### iii. Encoding - -In training phase, after the data is transformed from the original image space into the input space, it is necessary to use `GenerateTarget` to obtain the training target(e.g. Gaussian Heatmaps). We name this process **Encoding**. Conversely, the process of getting the corresponding coordinates from Gaussian Heatmaps is called **Decoding**. - -In MMPose, we collect Encoding and Decoding processes into a **Codec**, in which `encode()` and `decode()` are implemented. - -Currently we support the following types of Targets. - -- `heatmap`: Gaussian heatmaps -- `keypoint_label`: keypoint representation (e.g. normalized coordinates) -- `keypoint_xy_label`: axis-wise keypoint representation -- `heatmap+keypoint_label`: Gaussian heatmaps and keypoint representation -- `multiscale_heatmap`: multi-scale Gaussian heatmaps - -and the generated targets will be packed as follows. - -- `heatmaps`: Gaussian heatmaps -- `keypoint_labels`: keypoint representation (e.g. normalized coordinates) -- `keypoint_x_labels`: keypoint x-axis representation -- `keypoint_y_labels`: keypoint y-axis representation -- `keypoint_weights`: keypoint visibility and weights - -Note that we unify the data format of top-down and bottom-up methods, which means that a new dimension is added to represent different instances from the same image, in shape: - -```Python -[batch_size, num_instances, num_keypoints, dim_coordinates] -``` - -- top-down: `[B, 1, K, D]` - -- Bottom-up: `[B, N, K, D]` - -The provided codecs are stored under `$MMPOSE/mmpose/codecs`. - -```{note} -If you wish to customize a new codec, you can refer to [Codec](./user_guides/codecs.md) for more details. -``` - -#### iv. Packing - -After the data is transformed, you need to pack it using `PackPoseInputs`. - -This method converts the data stored in the dictionary `results` into standard data structures in MMPose, such as `InstanceData`, `PixelData`, `PoseDataSample`, etc. - -Specifically, we divide the data into `gt` (ground-truth) and `pred` (prediction), each of which has the following types: - -- **instances**(numpy.array): instance-level raw annotations or predictions in the original scale space -- **instance_labels**(torch.tensor): instance-level training labels (e.g. normalized coordinates, keypoint visibility) in the output scale space -- **fields**(torch.tensor): pixel-level training labels or predictions (e.g. Gaussian Heatmaps) in the output scale space - -The following is an example of the implementation of `PoseDataSample` under the hood: - -```Python -def get_pose_data_sample(self): - # meta - pose_meta = dict( - img_shape=(600, 900), # [h, w, c] - crop_size=(256, 192), # [h, w] - heatmap_size=(64, 48), # [h, w] - ) - - # gt_instances - gt_instances = InstanceData() - gt_instances.bboxes = np.random.rand(1, 4) - gt_instances.keypoints = np.random.rand(1, 17, 2) - - # gt_instance_labels - gt_instance_labels = InstanceData() - gt_instance_labels.keypoint_labels = torch.rand(1, 17, 2) - gt_instance_labels.keypoint_weights = torch.rand(1, 17) - - # pred_instances - pred_instances = InstanceData() - pred_instances.keypoints = np.random.rand(1, 17, 2) - pred_instances.keypoint_scores = np.random.rand(1, 17) - - # gt_fields - gt_fields = PixelData() - gt_fields.heatmaps = torch.rand(17, 64, 48) - - # pred_fields - pred_fields = PixelData() - pred_fields.heatmaps = torch.rand(17, 64, 48) - data_sample = PoseDataSample( - gt_instances=gt_instances, - pred_instances=pred_instances, - gt_fields=gt_fields, - pred_fields=pred_fields, - metainfo=pose_meta) - - return data_sample -``` - -## Step3: Model - -In MMPose 1.0, the model consists of the following components: - -- **Data Preprocessor**: perform data normalization and channel transposition - -- **Backbone**: used for feature extraction - -- **Neck**: GAP,FPN, etc. are optional - -- **Head**: used to implement the core algorithm and loss function - -We define a base class `BasePoseEstimator` for the model in `$MMPOSE/models/pose_estimators/base.py`. All models, e.g. `TopdownPoseEstimator`, should inherit from this base class and override the corresponding methods. - -Three modes are provided in `forward()` of the estimator: - -- `mode == 'loss'`: return the result of loss function for model training - -- `mode == 'predict'`: return the prediction result in the input space, used for model inference - -- `mode == 'tensor'`: return the model output in the output space, i.e. model forward propagation only, for model export - -Developers should build the components by calling the corresponding registry. Taking the top-down model as an example: - -```Python -@MODELS.register_module() -class TopdownPoseEstimator(BasePoseEstimator): - def __init__(self, - backbone: ConfigType, - neck: OptConfigType = None, - head: OptConfigType = None, - train_cfg: OptConfigType = None, - test_cfg: OptConfigType = None, - data_preprocessor: OptConfigType = None, - init_cfg: OptMultiConfig = None): - super().__init__(data_preprocessor, init_cfg) - - self.backbone = MODELS.build(backbone) - - if neck is not None: - self.neck = MODELS.build(neck) - - if head is not None: - self.head = MODELS.build(head) -``` - -### Data Preprocessor - -Starting from MMPose 1.0, we have added a new module to the model called data preprocessor, which performs data preprocessings like image normalization and channel transposition. It can benefit from the high computing power of devices like GPU, and improve the integrity in model export and deployment. - -A typical `data_preprocessor` in the config is as follows: - -```Python -data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), -``` - -It will transpose the channel order of the input image from `bgr` to `rgb` and normalize the data according to `mean` and `std`. - -### Backbone - -MMPose provides some commonly used backbones under `$MMPOSE/mmpose/models/backbones`. - -In practice, developers often use pre-trained backbone weights for transfer learning, which can improve the performance of the model on small datasets. - -In MMPose, you can use the pre-trained weights by setting `init_cfg` in config: - -```Python -init_cfg=dict( - type='Pretrained', - checkpoint='PATH/TO/YOUR_MODEL_WEIGHTS.pth'), -``` - -If you want to load a checkpoint to your backbone, you should specify the `prefix`: - -```Python -init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='PATH/TO/YOUR_CHECKPOINT.pth'), -``` - -`checkpoint` can be either a local path or a download link. Thus, if you wish to use a pre-trained model provided by Torchvision(e.g. ResNet50), you can simply use: - -```Python -init_cfg=dict( - type='Pretrained', - checkpoint='torchvision://resnet50') -``` - -In addition to these commonly used backbones, you can easily use backbones from other repositories in the OpenMMLab family such as MMClassification, which all share the same config system and provide pre-trained weights. - -It should be emphasized that if you add a new backbone, you need to register it by doing: - -```Python -@MODELS.register_module() -class YourBackbone(BaseBackbone): -``` - -Besides, import it in `$MMPOSE/mmpose/models/backbones/__init__.py`, and add it to `__all__`. - -### Neck - -Neck is usually a module between Backbone and Head, which is used in some algorithms. Here are some commonly used Neck: - -- Global Average Pooling (GAP) - -- Feature Pyramid Networks (FPN) - -- Feature Map Processor (FMP) - - The `FeatureMapProcessor` is a flexible PyTorch module designed to transform the feature outputs generated by backbones into a format suitable for heads. It achieves this by utilizing non-parametric operations such as selecting, concatenating, and rescaling. Below are some examples along with their corresponding configurations: - - - Select operation - - ```python - neck=dict(type='FeatureMapProcessor', select_index=0) - ``` - -
- - - Concatenate operation - - ```python - neck=dict(type='FeatureMapProcessor', concat=True) - ``` - -
- - Note that all feature maps will be resized to match the shape of the first feature map (index 0) prior to concatenation. - - - rescale operation - - ```python - neck=dict(type='FeatureMapProcessor', scale_factor=2.0) - ``` - -
- -### Head - -Generally speaking, Head is often the core of an algorithm, which is used to make predictions and perform loss calculation. - -Modules related to Head in MMPose are defined under `$MMPOSE/mmpose/models/heads`, and developers need to inherit the base class `BaseHead` when customizing Head and override the following methods: - -- forward() - -- predict() - -- loss() - -Specifically, `predict()` method needs to return pose predictions in the image space, which is obtained from the model output though the decoding function provided by the codec. We implement this process in `BaseHead.decode()`. - -On the other hand, we will perform test-time augmentation(TTA) in `predict()`. - -A commonly used TTA is `flip_test`, namely, an image and its flipped version are sent into the model to inference, and the output of the flipped version will be flipped back, then average them to stabilize the prediction. - -Here is an example of `predict()` in `RegressionHead`: - -```Python -def predict(self, - feats: Tuple[Tensor], - batch_data_samples: OptSampleList, - test_cfg: ConfigType = {}) -> Predictions: - """Predict results from outputs.""" - - if test_cfg.get('flip_test', False): - # TTA: flip test -> feats = [orig, flipped] - assert isinstance(feats, list) and len(feats) == 2 - flip_indices = batch_data_samples[0].metainfo['flip_indices'] - input_size = batch_data_samples[0].metainfo['input_size'] - _feats, _feats_flip = feats - _batch_coords = self.forward(_feats) - _batch_coords_flip = flip_coordinates( - self.forward(_feats_flip), - flip_indices=flip_indices, - shift_coords=test_cfg.get('shift_coords', True), - input_size=input_size) - batch_coords = (_batch_coords + _batch_coords_flip) * 0.5 - else: - batch_coords = self.forward(feats) # (B, K, D) - - batch_coords.unsqueeze_(dim=1) # (B, N, K, D) - preds = self.decode(batch_coords) -``` - -The `loss()` not only performs the calculation of loss functions, but also the calculation of training-time metrics such as pose accuracy. The results are carried by a dictionary `losses`: - -```Python - # calculate accuracy -_, avg_acc, _ = keypoint_pck_accuracy( - pred=to_numpy(pred_coords), - gt=to_numpy(keypoint_labels), - mask=to_numpy(keypoint_weights) > 0, - thr=0.05, - norm_factor=np.ones((pred_coords.size(0), 2), dtype=np.float32)) - -acc_pose = torch.tensor(avg_acc, device=keypoint_labels.device) -losses.update(acc_pose=acc_pose) -``` - -The data of each batch is packaged into `batch_data_samples`. Taking the Regression-based method as an example, the normalized coordinates and keypoint weights can be obtained as follows: - -```Python -keypoint_labels = torch.cat( - [d.gt_instance_labels.keypoint_labels for d in batch_data_samples]) -keypoint_weights = torch.cat([ - d.gt_instance_labels.keypoint_weights for d in batch_data_samples -]) -``` - -Here is the complete implementation of `loss()` in `RegressionHead`: - -```Python -def loss(self, - inputs: Tuple[Tensor], - batch_data_samples: OptSampleList, - train_cfg: ConfigType = {}) -> dict: - """Calculate losses from a batch of inputs and data samples.""" - - pred_outputs = self.forward(inputs) - - keypoint_labels = torch.cat( - [d.gt_instance_labels.keypoint_labels for d in batch_data_samples]) - keypoint_weights = torch.cat([ - d.gt_instance_labels.keypoint_weights for d in batch_data_samples - ]) - - # calculate losses - losses = dict() - loss = self.loss_module(pred_outputs, keypoint_labels, - keypoint_weights.unsqueeze(-1)) - - if isinstance(loss, dict): - losses.update(loss) - else: - losses.update(loss_kpt=loss) - - # calculate accuracy - _, avg_acc, _ = keypoint_pck_accuracy( - pred=to_numpy(pred_outputs), - gt=to_numpy(keypoint_labels), - mask=to_numpy(keypoint_weights) > 0, - thr=0.05, - norm_factor=np.ones((pred_outputs.size(0), 2), dtype=np.float32)) - acc_pose = torch.tensor(avg_acc, device=keypoint_labels.device) - losses.update(acc_pose=acc_pose) - - return losses -``` +# A 20-minute Tour to MMPose + +MMPose 1.0 is built upon a brand-new framework. For developers with basic knowledge of deep learning, this tutorial provides a overview of MMPose 1.0 framework design. Whether you are **a user of the previous version of MMPose**, or **a beginner of MMPose wishing to start with v1.0**, this tutorial will show you how to build a project based on MMPose 1.0. + +```{note} +This tutorial covers what developers will concern when using MMPose 1.0: + +- Overall code architecture + +- How to manage modules with configs + +- How to use my own custom datasets + +- How to add new modules(backbone, head, loss function, etc.) +``` + +The content of this tutorial is organized as follows: + +- [A 20 Minute Guide to MMPose Framework](#a-20-minute-guide-to-mmpose-framework) + - [Overview](#overview) + - [Step1: Configs](#step1-configs) + - [Step2: Data](#step2-data) + - [Dataset Meta Information](#dataset-meta-information) + - [Dataset](#dataset) + - [Pipeline](#pipeline) + - [i. Augmentation](#i-augmentation) + - [ii. Transformation](#ii-transformation) + - [iii. Encoding](#iii-encoding) + - [iv. Packing](#iv-packing) + - [Step3: Model](#step3-model) + - [Data Preprocessor](#data-preprocessor) + - [Backbone](#backbone) + - [Neck](#neck) + - [Head](#head) + +## Overview + +![overall-en](https://user-images.githubusercontent.com/13503330/187372008-2a94bad5-5252-4155-9ae3-3da1c426f569.png) + +Generally speaking, there are **five parts** developers will use during project development: + +- **General:** Environment, Hook, Checkpoint, Logger, etc. + +- **Data:** Dataset, Dataloader, Data Augmentation, etc. + +- **Training:** Optimizer, Learning Rate Scheduler, etc. + +- **Model:** Backbone, Neck, Head, Loss function, etc. + +- **Evaluation:** Metric, Evaluator, etc. + +Among them, modules related to **General**, **Training** and **Evaluation** are often provided by the training framework [MMEngine](https://github.com/open-mmlab/mmengine), and developers only need to call APIs and adjust the parameters. Developers mainly focus on implementing the **Data** and **Model** parts. + +## Step1: Configs + +In MMPose, we use a Python file as config for the definition and parameter management of the whole project. Therefore, we strongly recommend the developers who use MMPose for the first time to refer to [Configs](./user_guides/configs.md). + +Note that all new modules need to be registered using `Registry` and imported in `__init__.py` in the corresponding directory before we can create their instances from configs. + +## Step2: Data + +The organization of data in MMPose contains: + +- Dataset Meta Information + +- Dataset + +- Pipeline + +### Dataset Meta Information + +The meta information of a pose dataset usually includes the definition of keypoints and skeleton, symmetrical characteristic, and keypoint properties (e.g. belonging to upper or lower body, weights and sigmas). These information is important in data preprocessing, model training and evaluation. In MMpose, the dataset meta information is stored in configs files under `$MMPOSE/configs/_base_/datasets/`. + +To use a custom dataset in MMPose, you need to add a new config file of the dataset meta information. Take the MPII dataset (`$MMPOSE/configs/_base_/datasets/mpii.py`) as an example. Here is its dataset information: + +```Python +dataset_info = dict( + dataset_name='mpii', + paper_info=dict( + author='Mykhaylo Andriluka and Leonid Pishchulin and ' + 'Peter Gehler and Schiele, Bernt', + title='2D Human Pose Estimation: New Benchmark and ' + 'State of the Art Analysis', + container='IEEE Conference on Computer Vision and ' + 'Pattern Recognition (CVPR)', + year='2014', + homepage='http://human-pose.mpi-inf.mpg.de/', + ), + keypoint_info={ + 0: + dict( + name='right_ankle', + id=0, + color=[255, 128, 0], + type='lower', + swap='left_ankle'), + ## omitted + }, + skeleton_info={ + 0: + dict(link=('right_ankle', 'right_knee'), id=0, color=[255, 128, 0]), + ## omitted + }, + joint_weights=[ + 1.5, 1.2, 1., 1., 1.2, 1.5, 1., 1., 1., 1., 1.5, 1.2, 1., 1., 1.2, 1.5 + ], + # Adapted from COCO dataset. + sigmas=[ + 0.089, 0.083, 0.107, 0.107, 0.083, 0.089, 0.026, 0.026, 0.026, 0.026, + 0.062, 0.072, 0.179, 0.179, 0.072, 0.062 + ]) +``` + +In the model config, the user needs to specify the metainfo path of the custom dataset (e.g. `$MMPOSE/configs/_base_/datasets/custom.py`) as follows:\`\`\` + +```python +# dataset and dataloader settings +dataset_type = 'MyCustomDataset' # or 'CocoDataset' + +train_dataloader = dict( + batch_size=2, + dataset=dict( + type=dataset_type, + data_root='root/of/your/train/data', + ann_file='path/to/your/train/json', + data_prefix=dict(img='path/to/your/train/img'), + # specify the new dataset meta information config file + metainfo=dict(from_file='configs/_base_/datasets/custom.py'), + ...), + ) + +val_dataloader = dict( + batch_size=2, + dataset=dict( + type=dataset_type, + data_root='root/of/your/val/data', + ann_file='path/to/your/val/json', + data_prefix=dict(img='path/to/your/val/img'), + # specify the new dataset meta information config file + metainfo=dict(from_file='configs/_base_/datasets/custom.py'), + ...), + ) + +test_dataloader = val_dataloader +``` + +### Dataset + +To use custom dataset in MMPose, we recommend converting the annotations into a supported format (e.g. COCO or MPII) and directly using our implementation of the corresponding dataset. If this is not applicable, you may need to implement your own dataset class. + +Most 2D keypoint datasets in MMPose **organize the annotations in a COCO-like style**. Thus we provide a base class [BaseCocoStyleDataset](mmpose/datasets/datasets/base/base_coco_style_dataset.py) for these datasets. We recommend that users subclass `BaseCocoStyleDataset` and override the methods as needed (usually `__init__()` and `_load_annotations()`) to extend to a new custom 2D keypoint dataset. + +```{note} +Please refer to [COCO](./dataset_zoo/2d_body_keypoint.md) for more details about the COCO data format. +``` + +```{note} +The bbox format in MMPose is in `xyxy` instead of `xywh`, which is consistent with the format used in other OpenMMLab projects like [MMDetection](https://github.com/open-mmlab/mmdetection). We provide useful utils for bbox format conversion, such as `bbox_xyxy2xywh`, `bbox_xywh2xyxy`, `bbox_xyxy2cs`, etc., which are defined in `$MMPOSE/mmpose/structures/bbox/transforms.py`. +``` + +Let's take the implementation of the MPII dataset (`$MMPOSE/mmpose/datasets/datasets/body/mpii_dataset.py`) as an example. + +```Python +@DATASETS.register_module() +class MpiiDataset(BaseCocoStyleDataset): + METAINFO: dict = dict(from_file='configs/_base_/datasets/mpii.py') + + def __init__(self, + ## omitted + headbox_file: Optional[str] = None, + ## omitted + ): + + if headbox_file: + if data_mode != 'topdown': + raise ValueError( + f'{self.__class__.__name__} is set to {data_mode}: ' + 'mode, while "headbox_file" is only ' + 'supported in topdown mode.') + + if not test_mode: + raise ValueError( + f'{self.__class__.__name__} has `test_mode==False` ' + 'while "headbox_file" is only ' + 'supported when `test_mode==True`.') + + headbox_file_type = headbox_file[-3:] + allow_headbox_file_type = ['mat'] + if headbox_file_type not in allow_headbox_file_type: + raise KeyError( + f'The head boxes file type {headbox_file_type} is not ' + f'supported. Should be `mat` but got {headbox_file_type}.') + self.headbox_file = headbox_file + + super().__init__( + ## omitted + ) + + def _load_annotations(self) -> List[dict]: + """Load data from annotations in MPII format.""" + check_file_exist(self.ann_file) + with open(self.ann_file) as anno_file: + anns = json.load(anno_file) + + if self.headbox_file: + check_file_exist(self.headbox_file) + headbox_dict = loadmat(self.headbox_file) + headboxes_src = np.transpose(headbox_dict['headboxes_src'], + [2, 0, 1]) + SC_BIAS = 0.6 + + data_list = [] + ann_id = 0 + + # mpii bbox scales are normalized with factor 200. + pixel_std = 200. + + for idx, ann in enumerate(anns): + center = np.array(ann['center'], dtype=np.float32) + scale = np.array([ann['scale'], ann['scale']], + dtype=np.float32) * pixel_std + + # Adjust center/scale slightly to avoid cropping limbs + if center[0] != -1: + center[1] = center[1] + 15. / pixel_std * scale[1] + + # MPII uses matlab format, index is 1-based, + # we should first convert to 0-based index + center = center - 1 + + # unify shape with coco datasets + center = center.reshape(1, -1) + scale = scale.reshape(1, -1) + bbox = bbox_cs2xyxy(center, scale) + + # load keypoints in shape [1, K, 2] and keypoints_visible in [1, K] + keypoints = np.array(ann['joints']).reshape(1, -1, 2) + keypoints_visible = np.array(ann['joints_vis']).reshape(1, -1) + + data_info = { + 'id': ann_id, + 'img_id': int(ann['image'].split('.')[0]), + 'img_path': osp.join(self.data_prefix['img'], ann['image']), + 'bbox_center': center, + 'bbox_scale': scale, + 'bbox': bbox, + 'bbox_score': np.ones(1, dtype=np.float32), + 'keypoints': keypoints, + 'keypoints_visible': keypoints_visible, + } + + if self.headbox_file: + # calculate the diagonal length of head box as norm_factor + headbox = headboxes_src[idx] + head_size = np.linalg.norm(headbox[1] - headbox[0], axis=0) + head_size *= SC_BIAS + data_info['head_size'] = head_size.reshape(1, -1) + + data_list.append(data_info) + ann_id = ann_id + 1 + + return data_list +``` + +When supporting MPII dataset, since we need to use `head_size` to calculate `PCKh`, we add `headbox_file` to `__init__()` and override`_load_annotations()`. + +To support a dataset that is beyond the scope of `BaseCocoStyleDataset`, you may need to subclass from the `BaseDataset` provided by [MMEngine](https://github.com/open-mmlab/mmengine). Please refer to the [documents](https://mmengine.readthedocs.io/en/latest/advanced_tutorials/basedataset.html) for details. + +### Pipeline + +Data augmentations and transformations during pre-processing are organized as a pipeline. Here is an example of typical pipelines: + +```Python +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +test_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] +``` + +In a keypoint detection task, data will be transformed among three scale spaces: + +- **Original Image Space**: the space where the images are stored. The sizes of different images are not necessarily the same + +- **Input Image Space**: the image space used for model input. All **images** and **annotations** will be transformed into this space, such as `256x256`, `256x192`, etc. + +- **Output Space**: the scale space where model outputs are located, such as `64x64(Heatmap)`,`1x1(Regression)`, etc. The supervision signal is also in this space during training + +Here is a diagram to show the workflow of data transformation among the three scale spaces: + +![migration-en](https://user-images.githubusercontent.com/13503330/187190213-cad87b5f-0a95-4f1f-b722-15896914ded4.png) + +In MMPose, the modules used for data transformation are under `$MMPOSE/mmpose/datasets/transforms`, and their workflow is shown as follows: + +![transforms-en](https://user-images.githubusercontent.com/13503330/187190352-a7662346-b8da-4256-9192-c7a84b15cbb5.png) + +#### i. Augmentation + +Commonly used transforms are defined in `$MMPOSE/mmpose/datasets/transforms/common_transforms.py`, such as `RandomFlip`, `RandomHalfBody`, etc. + +For top-down methods, `Shift`, `Rotate`and `Resize` are implemented by `RandomBBoxTransform`**.** For bottom-up methods, `BottomupRandomAffine` is used. + +```{note} +Most data transforms depend on `bbox_center` and `bbox_scale`, which can be obtained by `GetBBoxCenterScale`. +``` + +#### ii. Transformation + +Affine transformation is used to convert images and annotations from the original image space to the input space. This is done by `TopdownAffine` for top-down methods and `BottomupRandomAffine` for bottom-up methods. + +#### iii. Encoding + +In training phase, after the data is transformed from the original image space into the input space, it is necessary to use `GenerateTarget` to obtain the training target(e.g. Gaussian Heatmaps). We name this process **Encoding**. Conversely, the process of getting the corresponding coordinates from Gaussian Heatmaps is called **Decoding**. + +In MMPose, we collect Encoding and Decoding processes into a **Codec**, in which `encode()` and `decode()` are implemented. + +Currently we support the following types of Targets. + +- `heatmap`: Gaussian heatmaps +- `keypoint_label`: keypoint representation (e.g. normalized coordinates) +- `keypoint_xy_label`: axis-wise keypoint representation +- `heatmap+keypoint_label`: Gaussian heatmaps and keypoint representation +- `multiscale_heatmap`: multi-scale Gaussian heatmaps + +and the generated targets will be packed as follows. + +- `heatmaps`: Gaussian heatmaps +- `keypoint_labels`: keypoint representation (e.g. normalized coordinates) +- `keypoint_x_labels`: keypoint x-axis representation +- `keypoint_y_labels`: keypoint y-axis representation +- `keypoint_weights`: keypoint visibility and weights + +Note that we unify the data format of top-down and bottom-up methods, which means that a new dimension is added to represent different instances from the same image, in shape: + +```Python +[batch_size, num_instances, num_keypoints, dim_coordinates] +``` + +- top-down: `[B, 1, K, D]` + +- Bottom-up: `[B, N, K, D]` + +The provided codecs are stored under `$MMPOSE/mmpose/codecs`. + +```{note} +If you wish to customize a new codec, you can refer to [Codec](./user_guides/codecs.md) for more details. +``` + +#### iv. Packing + +After the data is transformed, you need to pack it using `PackPoseInputs`. + +This method converts the data stored in the dictionary `results` into standard data structures in MMPose, such as `InstanceData`, `PixelData`, `PoseDataSample`, etc. + +Specifically, we divide the data into `gt` (ground-truth) and `pred` (prediction), each of which has the following types: + +- **instances**(numpy.array): instance-level raw annotations or predictions in the original scale space +- **instance_labels**(torch.tensor): instance-level training labels (e.g. normalized coordinates, keypoint visibility) in the output scale space +- **fields**(torch.tensor): pixel-level training labels or predictions (e.g. Gaussian Heatmaps) in the output scale space + +The following is an example of the implementation of `PoseDataSample` under the hood: + +```Python +def get_pose_data_sample(self): + # meta + pose_meta = dict( + img_shape=(600, 900), # [h, w, c] + crop_size=(256, 192), # [h, w] + heatmap_size=(64, 48), # [h, w] + ) + + # gt_instances + gt_instances = InstanceData() + gt_instances.bboxes = np.random.rand(1, 4) + gt_instances.keypoints = np.random.rand(1, 17, 2) + + # gt_instance_labels + gt_instance_labels = InstanceData() + gt_instance_labels.keypoint_labels = torch.rand(1, 17, 2) + gt_instance_labels.keypoint_weights = torch.rand(1, 17) + + # pred_instances + pred_instances = InstanceData() + pred_instances.keypoints = np.random.rand(1, 17, 2) + pred_instances.keypoint_scores = np.random.rand(1, 17) + + # gt_fields + gt_fields = PixelData() + gt_fields.heatmaps = torch.rand(17, 64, 48) + + # pred_fields + pred_fields = PixelData() + pred_fields.heatmaps = torch.rand(17, 64, 48) + data_sample = PoseDataSample( + gt_instances=gt_instances, + pred_instances=pred_instances, + gt_fields=gt_fields, + pred_fields=pred_fields, + metainfo=pose_meta) + + return data_sample +``` + +## Step3: Model + +In MMPose 1.0, the model consists of the following components: + +- **Data Preprocessor**: perform data normalization and channel transposition + +- **Backbone**: used for feature extraction + +- **Neck**: GAP,FPN, etc. are optional + +- **Head**: used to implement the core algorithm and loss function + +We define a base class `BasePoseEstimator` for the model in `$MMPOSE/models/pose_estimators/base.py`. All models, e.g. `TopdownPoseEstimator`, should inherit from this base class and override the corresponding methods. + +Three modes are provided in `forward()` of the estimator: + +- `mode == 'loss'`: return the result of loss function for model training + +- `mode == 'predict'`: return the prediction result in the input space, used for model inference + +- `mode == 'tensor'`: return the model output in the output space, i.e. model forward propagation only, for model export + +Developers should build the components by calling the corresponding registry. Taking the top-down model as an example: + +```Python +@MODELS.register_module() +class TopdownPoseEstimator(BasePoseEstimator): + def __init__(self, + backbone: ConfigType, + neck: OptConfigType = None, + head: OptConfigType = None, + train_cfg: OptConfigType = None, + test_cfg: OptConfigType = None, + data_preprocessor: OptConfigType = None, + init_cfg: OptMultiConfig = None): + super().__init__(data_preprocessor, init_cfg) + + self.backbone = MODELS.build(backbone) + + if neck is not None: + self.neck = MODELS.build(neck) + + if head is not None: + self.head = MODELS.build(head) +``` + +### Data Preprocessor + +Starting from MMPose 1.0, we have added a new module to the model called data preprocessor, which performs data preprocessings like image normalization and channel transposition. It can benefit from the high computing power of devices like GPU, and improve the integrity in model export and deployment. + +A typical `data_preprocessor` in the config is as follows: + +```Python +data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), +``` + +It will transpose the channel order of the input image from `bgr` to `rgb` and normalize the data according to `mean` and `std`. + +### Backbone + +MMPose provides some commonly used backbones under `$MMPOSE/mmpose/models/backbones`. + +In practice, developers often use pre-trained backbone weights for transfer learning, which can improve the performance of the model on small datasets. + +In MMPose, you can use the pre-trained weights by setting `init_cfg` in config: + +```Python +init_cfg=dict( + type='Pretrained', + checkpoint='PATH/TO/YOUR_MODEL_WEIGHTS.pth'), +``` + +If you want to load a checkpoint to your backbone, you should specify the `prefix`: + +```Python +init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='PATH/TO/YOUR_CHECKPOINT.pth'), +``` + +`checkpoint` can be either a local path or a download link. Thus, if you wish to use a pre-trained model provided by Torchvision(e.g. ResNet50), you can simply use: + +```Python +init_cfg=dict( + type='Pretrained', + checkpoint='torchvision://resnet50') +``` + +In addition to these commonly used backbones, you can easily use backbones from other repositories in the OpenMMLab family such as MMClassification, which all share the same config system and provide pre-trained weights. + +It should be emphasized that if you add a new backbone, you need to register it by doing: + +```Python +@MODELS.register_module() +class YourBackbone(BaseBackbone): +``` + +Besides, import it in `$MMPOSE/mmpose/models/backbones/__init__.py`, and add it to `__all__`. + +### Neck + +Neck is usually a module between Backbone and Head, which is used in some algorithms. Here are some commonly used Neck: + +- Global Average Pooling (GAP) + +- Feature Pyramid Networks (FPN) + +- Feature Map Processor (FMP) + + The `FeatureMapProcessor` is a flexible PyTorch module designed to transform the feature outputs generated by backbones into a format suitable for heads. It achieves this by utilizing non-parametric operations such as selecting, concatenating, and rescaling. Below are some examples along with their corresponding configurations: + + - Select operation + + ```python + neck=dict(type='FeatureMapProcessor', select_index=0) + ``` + +
+ + - Concatenate operation + + ```python + neck=dict(type='FeatureMapProcessor', concat=True) + ``` + +
+ + Note that all feature maps will be resized to match the shape of the first feature map (index 0) prior to concatenation. + + - rescale operation + + ```python + neck=dict(type='FeatureMapProcessor', scale_factor=2.0) + ``` + +
+ +### Head + +Generally speaking, Head is often the core of an algorithm, which is used to make predictions and perform loss calculation. + +Modules related to Head in MMPose are defined under `$MMPOSE/mmpose/models/heads`, and developers need to inherit the base class `BaseHead` when customizing Head and override the following methods: + +- forward() + +- predict() + +- loss() + +Specifically, `predict()` method needs to return pose predictions in the image space, which is obtained from the model output though the decoding function provided by the codec. We implement this process in `BaseHead.decode()`. + +On the other hand, we will perform test-time augmentation(TTA) in `predict()`. + +A commonly used TTA is `flip_test`, namely, an image and its flipped version are sent into the model to inference, and the output of the flipped version will be flipped back, then average them to stabilize the prediction. + +Here is an example of `predict()` in `RegressionHead`: + +```Python +def predict(self, + feats: Tuple[Tensor], + batch_data_samples: OptSampleList, + test_cfg: ConfigType = {}) -> Predictions: + """Predict results from outputs.""" + + if test_cfg.get('flip_test', False): + # TTA: flip test -> feats = [orig, flipped] + assert isinstance(feats, list) and len(feats) == 2 + flip_indices = batch_data_samples[0].metainfo['flip_indices'] + input_size = batch_data_samples[0].metainfo['input_size'] + _feats, _feats_flip = feats + _batch_coords = self.forward(_feats) + _batch_coords_flip = flip_coordinates( + self.forward(_feats_flip), + flip_indices=flip_indices, + shift_coords=test_cfg.get('shift_coords', True), + input_size=input_size) + batch_coords = (_batch_coords + _batch_coords_flip) * 0.5 + else: + batch_coords = self.forward(feats) # (B, K, D) + + batch_coords.unsqueeze_(dim=1) # (B, N, K, D) + preds = self.decode(batch_coords) +``` + +The `loss()` not only performs the calculation of loss functions, but also the calculation of training-time metrics such as pose accuracy. The results are carried by a dictionary `losses`: + +```Python + # calculate accuracy +_, avg_acc, _ = keypoint_pck_accuracy( + pred=to_numpy(pred_coords), + gt=to_numpy(keypoint_labels), + mask=to_numpy(keypoint_weights) > 0, + thr=0.05, + norm_factor=np.ones((pred_coords.size(0), 2), dtype=np.float32)) + +acc_pose = torch.tensor(avg_acc, device=keypoint_labels.device) +losses.update(acc_pose=acc_pose) +``` + +The data of each batch is packaged into `batch_data_samples`. Taking the Regression-based method as an example, the normalized coordinates and keypoint weights can be obtained as follows: + +```Python +keypoint_labels = torch.cat( + [d.gt_instance_labels.keypoint_labels for d in batch_data_samples]) +keypoint_weights = torch.cat([ + d.gt_instance_labels.keypoint_weights for d in batch_data_samples +]) +``` + +Here is the complete implementation of `loss()` in `RegressionHead`: + +```Python +def loss(self, + inputs: Tuple[Tensor], + batch_data_samples: OptSampleList, + train_cfg: ConfigType = {}) -> dict: + """Calculate losses from a batch of inputs and data samples.""" + + pred_outputs = self.forward(inputs) + + keypoint_labels = torch.cat( + [d.gt_instance_labels.keypoint_labels for d in batch_data_samples]) + keypoint_weights = torch.cat([ + d.gt_instance_labels.keypoint_weights for d in batch_data_samples + ]) + + # calculate losses + losses = dict() + loss = self.loss_module(pred_outputs, keypoint_labels, + keypoint_weights.unsqueeze(-1)) + + if isinstance(loss, dict): + losses.update(loss) + else: + losses.update(loss_kpt=loss) + + # calculate accuracy + _, avg_acc, _ = keypoint_pck_accuracy( + pred=to_numpy(pred_outputs), + gt=to_numpy(keypoint_labels), + mask=to_numpy(keypoint_weights) > 0, + thr=0.05, + norm_factor=np.ones((pred_outputs.size(0), 2), dtype=np.float32)) + acc_pose = torch.tensor(avg_acc, device=keypoint_labels.device) + losses.update(acc_pose=acc_pose) + + return losses +``` diff --git a/docs/en/index.rst b/docs/en/index.rst index 044b54be0f..fe346656ee 100644 --- a/docs/en/index.rst +++ b/docs/en/index.rst @@ -1,116 +1,116 @@ -Welcome to MMPose's documentation! -================================== - -You can change the documentation language at the lower-left corner of the page. - -您可以在页面左下角切换文档语言。 - -.. toctree:: - :maxdepth: 1 - :caption: Get Started - - overview.md - installation.md - guide_to_framework.md - demos.md - contribution_guide.md - faq.md - -.. toctree:: - :maxdepth: 1 - :caption: User Guides - - user_guides/inference.md - user_guides/configs.md - user_guides/prepare_datasets.md - user_guides/train_and_test.md - -.. toctree:: - :maxdepth: 1 - :caption: Advanced Guides - - advanced_guides/codecs.md - advanced_guides/dataflow.md - advanced_guides/implement_new_models.md - advanced_guides/customize_datasets.md - advanced_guides/customize_transforms.md - advanced_guides/customize_optimizer.md - advanced_guides/customize_logging.md - advanced_guides/how_to_deploy.md - advanced_guides/model_analysis.md - -.. toctree:: - :maxdepth: 1 - :caption: Migration - - migration.md - -.. toctree:: - :maxdepth: 2 - :caption: Model Zoo - - model_zoo.txt - model_zoo/body_2d_keypoint.md - model_zoo/body_3d_keypoint.md - model_zoo/face_2d_keypoint.md - model_zoo/hand_2d_keypoint.md - model_zoo/wholebody_2d_keypoint.md - model_zoo/animal_2d_keypoint.md - -.. toctree:: - :maxdepth: 2 - :caption: Model Zoo (by paper) - - model_zoo_papers/algorithms.md - model_zoo_papers/backbones.md - model_zoo_papers/techniques.md - model_zoo_papers/datasets.md - -.. toctree:: - :maxdepth: 2 - :caption: Dataset Zoo - - dataset_zoo.md - dataset_zoo/2d_body_keypoint.md - dataset_zoo/2d_wholebody_keypoint.md - dataset_zoo/2d_face_keypoint.md - dataset_zoo/2d_hand_keypoint.md - dataset_zoo/2d_fashion_landmark.md - dataset_zoo/2d_animal_keypoint.md - dataset_zoo/3d_body_keypoint.md - dataset_zoo/3d_hand_keypoint.md - dataset_zoo/dataset_tools.md - -.. toctree:: - :maxdepth: 1 - :caption: Projects - - projects/community_projects.md - projects/projects.md - -.. toctree:: - :maxdepth: 1 - :caption: Notes - - notes/ecosystem.md - notes/changelog.md - notes/benchmark.md - notes/pytorch_2.md - -.. toctree:: - :caption: API Reference - - api.rst - -.. toctree:: - :caption: Switch Language - - switch_language.md - - - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`search` +Welcome to MMPose's documentation! +================================== + +You can change the documentation language at the lower-left corner of the page. + +您可以在页面左下角切换文档语言。 + +.. toctree:: + :maxdepth: 1 + :caption: Get Started + + overview.md + installation.md + guide_to_framework.md + demos.md + contribution_guide.md + faq.md + +.. toctree:: + :maxdepth: 1 + :caption: User Guides + + user_guides/inference.md + user_guides/configs.md + user_guides/prepare_datasets.md + user_guides/train_and_test.md + +.. toctree:: + :maxdepth: 1 + :caption: Advanced Guides + + advanced_guides/codecs.md + advanced_guides/dataflow.md + advanced_guides/implement_new_models.md + advanced_guides/customize_datasets.md + advanced_guides/customize_transforms.md + advanced_guides/customize_optimizer.md + advanced_guides/customize_logging.md + advanced_guides/how_to_deploy.md + advanced_guides/model_analysis.md + +.. toctree:: + :maxdepth: 1 + :caption: Migration + + migration.md + +.. toctree:: + :maxdepth: 2 + :caption: Model Zoo + + model_zoo.txt + model_zoo/body_2d_keypoint.md + model_zoo/body_3d_keypoint.md + model_zoo/face_2d_keypoint.md + model_zoo/hand_2d_keypoint.md + model_zoo/wholebody_2d_keypoint.md + model_zoo/animal_2d_keypoint.md + +.. toctree:: + :maxdepth: 2 + :caption: Model Zoo (by paper) + + model_zoo_papers/algorithms.md + model_zoo_papers/backbones.md + model_zoo_papers/techniques.md + model_zoo_papers/datasets.md + +.. toctree:: + :maxdepth: 2 + :caption: Dataset Zoo + + dataset_zoo.md + dataset_zoo/2d_body_keypoint.md + dataset_zoo/2d_wholebody_keypoint.md + dataset_zoo/2d_face_keypoint.md + dataset_zoo/2d_hand_keypoint.md + dataset_zoo/2d_fashion_landmark.md + dataset_zoo/2d_animal_keypoint.md + dataset_zoo/3d_body_keypoint.md + dataset_zoo/3d_hand_keypoint.md + dataset_zoo/dataset_tools.md + +.. toctree:: + :maxdepth: 1 + :caption: Projects + + projects/community_projects.md + projects/projects.md + +.. toctree:: + :maxdepth: 1 + :caption: Notes + + notes/ecosystem.md + notes/changelog.md + notes/benchmark.md + notes/pytorch_2.md + +.. toctree:: + :caption: API Reference + + api.rst + +.. toctree:: + :caption: Switch Language + + switch_language.md + + + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`search` diff --git a/docs/en/installation.md b/docs/en/installation.md index 47db25bb5f..987d754929 100644 --- a/docs/en/installation.md +++ b/docs/en/installation.md @@ -1,245 +1,245 @@ -# Installation - -We recommend that users follow our best practices to install MMPose. However, the whole process is highly customizable. See [Customize Installation](#customize-installation) section for more information. - -- [Installation](#installation) - - [Prerequisites](#prerequisites) - - [Best Practices](#best-practices) - - [Build MMPose from source](#build-mmpose-from-source) - - [Install as a Python package](#install-as-a-python-package) - - [Customize Installation](#customize-installation) - - [CUDA versions](#cuda-versions) - - [Install MMEngine without MIM](#install-mmengine-without-mim) - - [Install MMCV without MIM](#install-mmcv-without-mim) - - [Install on CPU-only platforms](#install-on-cpu-only-platforms) - - [Install on Google Colab](#install-on-google-colab) - - [Using MMPose with Docker](#using-mmpose-with-docker) - - [Verify the installation](#verify-the-installation) - - [Trouble shooting](#trouble-shooting) - - - -## Prerequisites - -In this section we demonstrate how to prepare an environment with PyTorch. - -MMPose works on Linux, Windows and macOS. It requires Python 3.7+, CUDA 9.2+ and PyTorch 1.8+. - -If you are experienced with PyTorch and have already installed it, you can skip this part and jump to the [MMPose Installation](#install-mmpose). Otherwise, you can follow these steps for the preparation. - -**Step 0.** Download and install Miniconda from the [official website](https://docs.conda.io/en/latest/miniconda.html). - -**Step 1.** Create a conda environment and activate it. - -```shell -conda create --name openmmlab python=3.8 -y -conda activate openmmlab -``` - -**Step 2.** Install PyTorch following [official instructions](https://pytorch.org/get-started/locally/), e.g. - -On GPU platforms: - -```shell -conda install pytorch torchvision -c pytorch -``` - -```{warning} -This command will automatically install the latest version PyTorch and cudatoolkit, please check whether they match your environment. -``` - -On CPU platforms: - -```shell -conda install pytorch torchvision cpuonly -c pytorch -``` - -**Step 3.** Install [MMEngine](https://github.com/open-mmlab/mmengine) and [MMCV](https://github.com/open-mmlab/mmcv/tree/2.x) using [MIM](https://github.com/open-mmlab/mim). - -```shell -pip install -U openmim -mim install mmengine -mim install "mmcv>=2.0.1" -``` - -Note that some of the demo scripts in MMPose require [MMDetection](https://github.com/open-mmlab/mmdetection) (mmdet) for human detection. If you want to run these demo scripts with mmdet, you can easily install mmdet as a dependency by running: - -```shell -mim install "mmdet>=3.1.0" -``` - -## Best Practices - -### Build MMPose from source - -To develop and run mmpose directly, install it from source: - -```shell -git clone https://github.com/open-mmlab/mmpose.git -cd mmpose -pip install -r requirements.txt -pip install -v -e . -# "-v" means verbose, or more output -# "-e" means installing a project in editable mode, -# thus any local modifications made to the code will take effect without reinstallation. -``` - -### Install as a Python package - -To use mmpose as a dependency or third-party package, install it with pip: - -```shell -mim install "mmpose>=1.1.0" -``` - -## Verify the installation - -To verify that MMPose is installed correctly, you can run an inference demo with the following steps. - -**Step 1.** We need to download config and checkpoint files. - -```shell -mim download mmpose --config td-hm_hrnet-w48_8xb32-210e_coco-256x192 --dest . -``` - -The downloading will take several seconds or more, depending on your network environment. When it is done, you will find two files `td-hm_hrnet-w48_8xb32-210e_coco-256x192.py` and `hrnet_w48_coco_256x192-b9e0b3ab_20200708.pth` in your current folder. - -**Step 2.** Run the inference demo. - -Option (A). If you install mmpose from source, just run the following command under the folder `$MMPOSE`: - -```shell -python demo/image_demo.py \ - tests/data/coco/000000000785.jpg \ - td-hm_hrnet-w48_8xb32-210e_coco-256x192.py \ - hrnet_w48_coco_256x192-b9e0b3ab_20200708.pth \ - --out-file vis_results.jpg \ - --draw-heatmap -``` - -If everything goes fine, you will be able to get the following visualization result from `vis_results.jpg` in your current folder, which displays the predicted keypoints and heatmaps overlaid on the person in the image. - -![image](https://user-images.githubusercontent.com/87690686/187824033-2cce0f55-034a-4127-82e2-52744178bc32.jpg) - -Option (B). If you install mmpose with pip, open you python interpreter and copy & paste the following codes. - -```python -from mmpose.apis import inference_topdown, init_model -from mmpose.utils import register_all_modules - -register_all_modules() - -config_file = 'td-hm_hrnet-w48_8xb32-210e_coco-256x192.py' -checkpoint_file = 'hrnet_w48_coco_256x192-b9e0b3ab_20200708.pth' -model = init_model(config_file, checkpoint_file, device='cpu') # or device='cuda:0' - -# please prepare an image with person -results = inference_topdown(model, 'demo.jpg') -``` - -The `demo.jpg` can be downloaded from [Github](https://raw.githubusercontent.com/open-mmlab/mmpose/main/tests/data/coco/000000000785.jpg). - -The inference results will be a list of `PoseDataSample`, and the predictions are in the `pred_instances`, indicating the detected keypoint locations and scores. - -## Customize Installation - -### CUDA versions - -When installing PyTorch, you need to specify the version of CUDA. If you are not clear on which to choose, follow our recommendations: - -- For Ampere-based NVIDIA GPUs, such as GeForce 30 series and NVIDIA A100, CUDA 11 is a must. -- For older NVIDIA GPUs, CUDA 11 is backward compatible, but CUDA 10.2 offers better compatibility and is more lightweight. - -Please make sure the GPU driver satisfies the minimum version requirements. See [this table](https://docs.nvidia.com/cuda/cuda-toolkit-release-notes/index.html#cuda-major-component-versions__table-cuda-toolkit-driver-versions) for more information. - -Installing CUDA runtime libraries is enough if you follow our best practices, because no CUDA code will be compiled locally. However if you hope to compile MMCV from source or develop other CUDA operators, you need to install the complete CUDA toolkit from NVIDIA's [website](https://developer.nvidia.com/cuda-downloads), and its version should match the CUDA version of PyTorch. i.e., the specified version of cudatoolkit in `conda install` command. - -### Install MMEngine without MIM - -To install MMEngine with pip instead of MIM, please follow [MMEngine installation guides](https://mmengine.readthedocs.io/zh_CN/latest/get_started/installation.html). - -For example, you can install MMEngine by the following command. - -```shell -pip install mmengine -``` - -### Install MMCV without MIM - -MMCV contains C++ and CUDA extensions, thus depending on PyTorch in a complex way. MIM solves such dependencies automatically and makes the installation easier. However, it is not a must. - -To install MMCV with pip instead of MIM, please follow [MMCV installation guides](https://mmcv.readthedocs.io/en/2.x/get_started/installation.html). This requires manually specifying a find-url based on PyTorch version and its CUDA version. - -For example, the following command install mmcv built for PyTorch 1.10.x and CUDA 11.3. - -```shell -pip install 'mmcv>=2.0.1' -f https://download.openmmlab.com/mmcv/dist/cu113/torch1.10/index.html -``` - -### Install on CPU-only platforms - -MMPose can be built for CPU only environment. In CPU mode you can train, test or inference a model. - -However, some functionalities are missing in this mode, usually GPU-compiled ops like `Deformable Convolution`. Most models in MMPose don't depend on these ops, but if you try to train/test/infer a model containing these ops, an error will be raised. - -### Install on Google Colab - -[Google Colab](https://colab.research.google.com/) usually has PyTorch installed, -thus we only need to install MMEngine, MMCV and MMPose with the following commands. - -**Step 1.** Install [MMEngine](https://github.com/open-mmlab/mmengine) and [MMCV](https://github.com/open-mmlab/mmcv/tree/2.x) using [MIM](https://github.com/open-mmlab/mim). - -```shell -!pip3 install openmim -!mim install mmengine -!mim install "mmcv>=2.0.1" -``` - -**Step 2.** Install MMPose from the source. - -```shell -!git clone https://github.com/open-mmlab/mmpose.git -%cd mmpose -!pip install -e . -``` - -**Step 3.** Verification. - -```python -import mmpose -print(mmpose.__version__) -# Example output: 1.1.0 -``` - -```{note} -Note that within Jupyter, the exclamation mark `!` is used to call external executables and `%cd` is a [magic command](https://ipython.readthedocs.io/en/stable/interactive/magics.html#magic-cd) to change the current working directory of Python. -``` - -### Using MMPose with Docker - -We provide a [Dockerfile](https://github.com/open-mmlab/mmpose/blob/master/docker/Dockerfile) to build an image. Ensure that your [docker version](https://docs.docker.com/engine/install/) >=19.03. - -```shell -# build an image with PyTorch 1.8.0, CUDA 10.1, CUDNN 7. -# If you prefer other versions, just modified the Dockerfile -docker build -t mmpose docker/ -``` - -**Important:** Make sure you've installed the [nvidia-container-toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#docker). - -Run it with - -```shell -docker run --gpus all --shm-size=8g -it -v {DATA_DIR}:/mmpose/data mmpose -``` - -`{DATA_DIR}` is your local folder containing all the datasets for mmpose. - -```{note} -If you encounter the error message like `permission denied`, please add `sudo` at the start of the command and try it again. -``` - -## Trouble shooting - -If you have some issues during the installation, please first view the [FAQ](./faq.md) page. -You may [open an issue](https://github.com/open-mmlab/mmpose/issues/new/choose) on GitHub if no solution is found. +# Installation + +We recommend that users follow our best practices to install MMPose. However, the whole process is highly customizable. See [Customize Installation](#customize-installation) section for more information. + +- [Installation](#installation) + - [Prerequisites](#prerequisites) + - [Best Practices](#best-practices) + - [Build MMPose from source](#build-mmpose-from-source) + - [Install as a Python package](#install-as-a-python-package) + - [Customize Installation](#customize-installation) + - [CUDA versions](#cuda-versions) + - [Install MMEngine without MIM](#install-mmengine-without-mim) + - [Install MMCV without MIM](#install-mmcv-without-mim) + - [Install on CPU-only platforms](#install-on-cpu-only-platforms) + - [Install on Google Colab](#install-on-google-colab) + - [Using MMPose with Docker](#using-mmpose-with-docker) + - [Verify the installation](#verify-the-installation) + - [Trouble shooting](#trouble-shooting) + + + +## Prerequisites + +In this section we demonstrate how to prepare an environment with PyTorch. + +MMPose works on Linux, Windows and macOS. It requires Python 3.7+, CUDA 9.2+ and PyTorch 1.8+. + +If you are experienced with PyTorch and have already installed it, you can skip this part and jump to the [MMPose Installation](#install-mmpose). Otherwise, you can follow these steps for the preparation. + +**Step 0.** Download and install Miniconda from the [official website](https://docs.conda.io/en/latest/miniconda.html). + +**Step 1.** Create a conda environment and activate it. + +```shell +conda create --name openmmlab python=3.8 -y +conda activate openmmlab +``` + +**Step 2.** Install PyTorch following [official instructions](https://pytorch.org/get-started/locally/), e.g. + +On GPU platforms: + +```shell +conda install pytorch torchvision -c pytorch +``` + +```{warning} +This command will automatically install the latest version PyTorch and cudatoolkit, please check whether they match your environment. +``` + +On CPU platforms: + +```shell +conda install pytorch torchvision cpuonly -c pytorch +``` + +**Step 3.** Install [MMEngine](https://github.com/open-mmlab/mmengine) and [MMCV](https://github.com/open-mmlab/mmcv/tree/2.x) using [MIM](https://github.com/open-mmlab/mim). + +```shell +pip install -U openmim +mim install mmengine +mim install "mmcv>=2.0.1" +``` + +Note that some of the demo scripts in MMPose require [MMDetection](https://github.com/open-mmlab/mmdetection) (mmdet) for human detection. If you want to run these demo scripts with mmdet, you can easily install mmdet as a dependency by running: + +```shell +mim install "mmdet>=3.1.0" +``` + +## Best Practices + +### Build MMPose from source + +To develop and run mmpose directly, install it from source: + +```shell +git clone https://github.com/open-mmlab/mmpose.git +cd mmpose +pip install -r requirements.txt +pip install -v -e . +# "-v" means verbose, or more output +# "-e" means installing a project in editable mode, +# thus any local modifications made to the code will take effect without reinstallation. +``` + +### Install as a Python package + +To use mmpose as a dependency or third-party package, install it with pip: + +```shell +mim install "mmpose>=1.1.0" +``` + +## Verify the installation + +To verify that MMPose is installed correctly, you can run an inference demo with the following steps. + +**Step 1.** We need to download config and checkpoint files. + +```shell +mim download mmpose --config td-hm_hrnet-w48_8xb32-210e_coco-256x192 --dest . +``` + +The downloading will take several seconds or more, depending on your network environment. When it is done, you will find two files `td-hm_hrnet-w48_8xb32-210e_coco-256x192.py` and `hrnet_w48_coco_256x192-b9e0b3ab_20200708.pth` in your current folder. + +**Step 2.** Run the inference demo. + +Option (A). If you install mmpose from source, just run the following command under the folder `$MMPOSE`: + +```shell +python demo/image_demo.py \ + tests/data/coco/000000000785.jpg \ + td-hm_hrnet-w48_8xb32-210e_coco-256x192.py \ + hrnet_w48_coco_256x192-b9e0b3ab_20200708.pth \ + --out-file vis_results.jpg \ + --draw-heatmap +``` + +If everything goes fine, you will be able to get the following visualization result from `vis_results.jpg` in your current folder, which displays the predicted keypoints and heatmaps overlaid on the person in the image. + +![image](https://user-images.githubusercontent.com/87690686/187824033-2cce0f55-034a-4127-82e2-52744178bc32.jpg) + +Option (B). If you install mmpose with pip, open you python interpreter and copy & paste the following codes. + +```python +from mmpose.apis import inference_topdown, init_model +from mmpose.utils import register_all_modules + +register_all_modules() + +config_file = 'td-hm_hrnet-w48_8xb32-210e_coco-256x192.py' +checkpoint_file = 'hrnet_w48_coco_256x192-b9e0b3ab_20200708.pth' +model = init_model(config_file, checkpoint_file, device='cpu') # or device='cuda:0' + +# please prepare an image with person +results = inference_topdown(model, 'demo.jpg') +``` + +The `demo.jpg` can be downloaded from [Github](https://raw.githubusercontent.com/open-mmlab/mmpose/main/tests/data/coco/000000000785.jpg). + +The inference results will be a list of `PoseDataSample`, and the predictions are in the `pred_instances`, indicating the detected keypoint locations and scores. + +## Customize Installation + +### CUDA versions + +When installing PyTorch, you need to specify the version of CUDA. If you are not clear on which to choose, follow our recommendations: + +- For Ampere-based NVIDIA GPUs, such as GeForce 30 series and NVIDIA A100, CUDA 11 is a must. +- For older NVIDIA GPUs, CUDA 11 is backward compatible, but CUDA 10.2 offers better compatibility and is more lightweight. + +Please make sure the GPU driver satisfies the minimum version requirements. See [this table](https://docs.nvidia.com/cuda/cuda-toolkit-release-notes/index.html#cuda-major-component-versions__table-cuda-toolkit-driver-versions) for more information. + +Installing CUDA runtime libraries is enough if you follow our best practices, because no CUDA code will be compiled locally. However if you hope to compile MMCV from source or develop other CUDA operators, you need to install the complete CUDA toolkit from NVIDIA's [website](https://developer.nvidia.com/cuda-downloads), and its version should match the CUDA version of PyTorch. i.e., the specified version of cudatoolkit in `conda install` command. + +### Install MMEngine without MIM + +To install MMEngine with pip instead of MIM, please follow [MMEngine installation guides](https://mmengine.readthedocs.io/zh_CN/latest/get_started/installation.html). + +For example, you can install MMEngine by the following command. + +```shell +pip install mmengine +``` + +### Install MMCV without MIM + +MMCV contains C++ and CUDA extensions, thus depending on PyTorch in a complex way. MIM solves such dependencies automatically and makes the installation easier. However, it is not a must. + +To install MMCV with pip instead of MIM, please follow [MMCV installation guides](https://mmcv.readthedocs.io/en/2.x/get_started/installation.html). This requires manually specifying a find-url based on PyTorch version and its CUDA version. + +For example, the following command install mmcv built for PyTorch 1.10.x and CUDA 11.3. + +```shell +pip install 'mmcv>=2.0.1' -f https://download.openmmlab.com/mmcv/dist/cu113/torch1.10/index.html +``` + +### Install on CPU-only platforms + +MMPose can be built for CPU only environment. In CPU mode you can train, test or inference a model. + +However, some functionalities are missing in this mode, usually GPU-compiled ops like `Deformable Convolution`. Most models in MMPose don't depend on these ops, but if you try to train/test/infer a model containing these ops, an error will be raised. + +### Install on Google Colab + +[Google Colab](https://colab.research.google.com/) usually has PyTorch installed, +thus we only need to install MMEngine, MMCV and MMPose with the following commands. + +**Step 1.** Install [MMEngine](https://github.com/open-mmlab/mmengine) and [MMCV](https://github.com/open-mmlab/mmcv/tree/2.x) using [MIM](https://github.com/open-mmlab/mim). + +```shell +!pip3 install openmim +!mim install mmengine +!mim install "mmcv>=2.0.1" +``` + +**Step 2.** Install MMPose from the source. + +```shell +!git clone https://github.com/open-mmlab/mmpose.git +%cd mmpose +!pip install -e . +``` + +**Step 3.** Verification. + +```python +import mmpose +print(mmpose.__version__) +# Example output: 1.1.0 +``` + +```{note} +Note that within Jupyter, the exclamation mark `!` is used to call external executables and `%cd` is a [magic command](https://ipython.readthedocs.io/en/stable/interactive/magics.html#magic-cd) to change the current working directory of Python. +``` + +### Using MMPose with Docker + +We provide a [Dockerfile](https://github.com/open-mmlab/mmpose/blob/master/docker/Dockerfile) to build an image. Ensure that your [docker version](https://docs.docker.com/engine/install/) >=19.03. + +```shell +# build an image with PyTorch 1.8.0, CUDA 10.1, CUDNN 7. +# If you prefer other versions, just modified the Dockerfile +docker build -t mmpose docker/ +``` + +**Important:** Make sure you've installed the [nvidia-container-toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#docker). + +Run it with + +```shell +docker run --gpus all --shm-size=8g -it -v {DATA_DIR}:/mmpose/data mmpose +``` + +`{DATA_DIR}` is your local folder containing all the datasets for mmpose. + +```{note} +If you encounter the error message like `permission denied`, please add `sudo` at the start of the command and try it again. +``` + +## Trouble shooting + +If you have some issues during the installation, please first view the [FAQ](./faq.md) page. +You may [open an issue](https://github.com/open-mmlab/mmpose/issues/new/choose) on GitHub if no solution is found. diff --git a/docs/en/make.bat b/docs/en/make.bat index 922152e96a..2119f51099 100644 --- a/docs/en/make.bat +++ b/docs/en/make.bat @@ -1,35 +1,35 @@ -@ECHO OFF - -pushd %~dp0 - -REM Command file for Sphinx documentation - -if "%SPHINXBUILD%" == "" ( - set SPHINXBUILD=sphinx-build -) -set SOURCEDIR=. -set BUILDDIR=_build - -if "%1" == "" goto help - -%SPHINXBUILD% >NUL 2>NUL -if errorlevel 9009 ( - echo. - echo.The 'sphinx-build' command was not found. Make sure you have Sphinx - echo.installed, then set the SPHINXBUILD environment variable to point - echo.to the full path of the 'sphinx-build' executable. Alternatively you - echo.may add the Sphinx directory to PATH. - echo. - echo.If you don't have Sphinx installed, grab it from - echo.http://sphinx-doc.org/ - exit /b 1 -) - -%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% -goto end - -:help -%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% - -:end -popd +@ECHO OFF + +pushd %~dp0 + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set SOURCEDIR=. +set BUILDDIR=_build + +if "%1" == "" goto help + +%SPHINXBUILD% >NUL 2>NUL +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.http://sphinx-doc.org/ + exit /b 1 +) + +%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% +goto end + +:help +%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% + +:end +popd diff --git a/docs/en/merge_docs.sh b/docs/en/merge_docs.sh index 23af31dd56..431b27a312 100644 --- a/docs/en/merge_docs.sh +++ b/docs/en/merge_docs.sh @@ -1,31 +1,31 @@ -#!/usr/bin/env bash -# Copyright (c) OpenMMLab. All rights reserved. - -sed -i '$a\\n' ../../demo/docs/en/*_demo.md -cat ../../demo/docs/en/*_demo.md | sed "s/^## 2D\(.*\)Demo/##\1Estimation/" | sed "s/md###t/html#t/g" | sed '1i\# Demos\n' | sed 's=](/docs/en/=](/=g' | sed 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' >demos.md - - # remove /docs/ for link used in doc site -sed -i 's=](/docs/en/=](=g' overview.md -sed -i 's=](/docs/en/=](=g' installation.md -sed -i 's=](/docs/en/=](=g' quick_run.md -sed -i 's=](/docs/en/=](=g' migration.md -sed -i 's=](/docs/en/=](=g' ./model_zoo/*.md -sed -i 's=](/docs/en/=](=g' ./model_zoo_papers/*.md -sed -i 's=](/docs/en/=](=g' ./user_guides/*.md -sed -i 's=](/docs/en/=](=g' ./advanced_guides/*.md -sed -i 's=](/docs/en/=](=g' ./dataset_zoo/*.md -sed -i 's=](/docs/en/=](=g' ./notes/*.md -sed -i 's=](/docs/en/=](=g' ./projects/*.md - - -sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' overview.md -sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' installation.md -sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' quick_run.md -sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' migration.md -sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' ./advanced_guides/*.md -sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' ./model_zoo/*.md -sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' ./model_zoo_papers/*.md -sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' ./user_guides/*.md -sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' ./dataset_zoo/*.md -sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' ./notes/*.md -sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' ./projects/*.md +#!/usr/bin/env bash +# Copyright (c) OpenMMLab. All rights reserved. + +sed -i '$a\\n' ../../demo/docs/en/*_demo.md +cat ../../demo/docs/en/*_demo.md | sed "s/^## 2D\(.*\)Demo/##\1Estimation/" | sed "s/md###t/html#t/g" | sed '1i\# Demos\n' | sed 's=](/docs/en/=](/=g' | sed 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' >demos.md + + # remove /docs/ for link used in doc site +sed -i 's=](/docs/en/=](=g' overview.md +sed -i 's=](/docs/en/=](=g' installation.md +sed -i 's=](/docs/en/=](=g' quick_run.md +sed -i 's=](/docs/en/=](=g' migration.md +sed -i 's=](/docs/en/=](=g' ./model_zoo/*.md +sed -i 's=](/docs/en/=](=g' ./model_zoo_papers/*.md +sed -i 's=](/docs/en/=](=g' ./user_guides/*.md +sed -i 's=](/docs/en/=](=g' ./advanced_guides/*.md +sed -i 's=](/docs/en/=](=g' ./dataset_zoo/*.md +sed -i 's=](/docs/en/=](=g' ./notes/*.md +sed -i 's=](/docs/en/=](=g' ./projects/*.md + + +sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' overview.md +sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' installation.md +sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' quick_run.md +sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' migration.md +sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' ./advanced_guides/*.md +sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' ./model_zoo/*.md +sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' ./model_zoo_papers/*.md +sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' ./user_guides/*.md +sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' ./dataset_zoo/*.md +sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' ./notes/*.md +sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' ./projects/*.md diff --git a/docs/en/migration.md b/docs/en/migration.md index 70ed0b5a52..56373b25aa 100644 --- a/docs/en/migration.md +++ b/docs/en/migration.md @@ -1,210 +1,210 @@ -# How to Migrate MMPose 0.x Projects to MMPose 1.0 - -MMPose 1.0 has been refactored extensively and addressed many legacy issues. Most of the code in MMPose 1.0 will not be compatible with 0.x version. - -To try our best to help you migrate your code and model, here are some major changes: - -## Data Transformation - -### Translation, Rotation and Scaling - -The transformation methods `TopDownRandomShiftBboxCenter` and `TopDownGetRandomScaleRotation` in old version, will be merged into `RandomBBoxTransform`. - -```Python -@TRANSFORMS.register_module() -class RandomBBoxTransform(BaseTransform): - r"""Rnadomly shift, resize and rotate the bounding boxes. - - Required Keys: - - - bbox_center - - bbox_scale - - Modified Keys: - - - bbox_center - - bbox_scale - - Added Keys: - - bbox_rotation - - Args: - shift_factor (float): Randomly shift the bbox in range - :math:`[-dx, dx]` and :math:`[-dy, dy]` in X and Y directions, - where :math:`dx(y) = x(y)_scale \cdot shift_factor` in pixels. - Defaults to 0.16 - shift_prob (float): Probability of applying random shift. Defaults to - 0.3 - scale_factor (Tuple[float, float]): Randomly resize the bbox in range - :math:`[scale_factor[0], scale_factor[1]]`. Defaults to (0.5, 1.5) - scale_prob (float): Probability of applying random resizing. Defaults - to 1.0 - rotate_factor (float): Randomly rotate the bbox in - :math:`[-rotate_factor, rotate_factor]` in degrees. Defaults - to 80.0 - rotate_prob (float): Probability of applying random rotation. Defaults - to 0.6 - """ - - def __init__(self, - shift_factor: float = 0.16, - shift_prob: float = 0.3, - scale_factor: Tuple[float, float] = (0.5, 1.5), - scale_prob: float = 1.0, - rotate_factor: float = 80.0, - rotate_prob: float = 0.6) -> None: -``` - -### Target Generation - -The old methods like: - -- `TopDownGenerateTarget` -- `TopDownGenerateTargetRegression` -- `BottomUpGenerateHeatmapTarget` -- `BottomUpGenerateTarget` - -will be merged in to `GenerateTarget`, and the actual generation methods are implemented in [Codec](./user_guides/codecs.md). - -```Python -@TRANSFORMS.register_module() -class GenerateTarget(BaseTransform): - """Encode keypoints into Target. - - The generated target is usually the supervision signal of the model - learning, e.g. heatmaps or regression labels. - - Required Keys: - - - keypoints - - keypoints_visible - - dataset_keypoint_weights - - Added Keys: - - - The keys of the encoded items from the codec will be updated into - the results, e.g. ``'heatmaps'`` or ``'keypoint_weights'``. See - the specific codec for more details. - - Args: - encoder (dict | list[dict]): The codec config for keypoint encoding. - Both single encoder and multiple encoders (given as a list) are - supported - multilevel (bool): Determine the method to handle multiple encoders. - If ``multilevel==True``, generate multilevel targets from a group - of encoders of the same type (e.g. multiple :class:`MSRAHeatmap` - encoders with different sigma values); If ``multilevel==False``, - generate combined targets from a group of different encoders. This - argument will have no effect in case of single encoder. Defaults - to ``False`` - use_dataset_keypoint_weights (bool): Whether use the keypoint weights - from the dataset meta information. Defaults to ``False`` - """ - - def __init__(self, - encoder: MultiConfig, - multilevel: bool = False, - use_dataset_keypoint_weights: bool = False) -> None: -``` - -### Data Normalization - -The data normalization operations `NormalizeTensor` and `ToTensor` will be replaced by **DataPreprocessor** module, which will no longer be used as a preprocessing operation, but will be merged as a part of the model forward propagation. - -## Compatibility of Models - -We have performed compatibility with the model weights provided by model zoo to ensure that the same model weights can get a comparable accuracy in both version. But note that due to the large number of differences in processing details, the inference outputs can be slightly different(less than 0.05% difference in accuracy). - -For model weights saved by training with 0.x version, we provide a `_load_state_dict_pre_hook()` method in Head to replace the old version of the `state_dict` with the new one. If you wish to make your model compatible with MMPose 1.0, you can refer to our implementation as follows. - -```Python -@MODELS.register_module() -class YourHead(BaseHead): -def __init__(self): - - ## omitted - - # Register the hook to automatically convert old version state dicts - self._register_load_state_dict_pre_hook(self._load_state_dict_pre_hook) -``` - -### Heatmap-based Model - -For models based on `SimpleBaseline` approach, developers need to pay attention to the last convolutional layer. - -```Python -def _load_state_dict_pre_hook(self, state_dict, prefix, local_meta, *args, - **kwargs): - version = local_meta.get('version', None) - - if version and version >= self._version: - return - - # convert old-version state dict - keys = list(state_dict.keys()) - for _k in keys: - if not _k.startswith(prefix): - continue - v = state_dict.pop(_k) - k = _k[len(prefix):] - # In old version, "final_layer" includes both intermediate - # conv layers (new "conv_layers") and final conv layers (new - # "final_layer"). - # - # If there is no intermediate conv layer, old "final_layer" will - # have keys like "final_layer.xxx", which should be still - # named "final_layer.xxx"; - # - # If there are intermediate conv layers, old "final_layer" will - # have keys like "final_layer.n.xxx", where the weights of the last - # one should be renamed "final_layer.xxx", and others should be - # renamed "conv_layers.n.xxx" - k_parts = k.split('.') - if k_parts[0] == 'final_layer': - if len(k_parts) == 3: - assert isinstance(self.conv_layers, nn.Sequential) - idx = int(k_parts[1]) - if idx < len(self.conv_layers): - # final_layer.n.xxx -> conv_layers.n.xxx - k_new = 'conv_layers.' + '.'.join(k_parts[1:]) - else: - # final_layer.n.xxx -> final_layer.xxx - k_new = 'final_layer.' + k_parts[2] - else: - # final_layer.xxx remains final_layer.xxx - k_new = k - else: - k_new = k - - state_dict[prefix + k_new] = v -``` - -### RLE-based Model - -For the RLE-based models, since the loss module is renamed to `loss_module` in MMPose 1.0, and the flow model is subsumed under the loss module, changes need to be made to the keys in `state_dict`: - -```Python -def _load_state_dict_pre_hook(self, state_dict, prefix, local_meta, *args, - **kwargs): - - version = local_meta.get('version', None) - - if version and version >= self._version: - return - - # convert old-version state dict - keys = list(state_dict.keys()) - for _k in keys: - v = state_dict.pop(_k) - k = _k.lstrip(prefix) - # In old version, "loss" includes the instances of loss, - # now it should be renamed "loss_module" - k_parts = k.split('.') - if k_parts[0] == 'loss': - # loss.xxx -> loss_module.xxx - k_new = prefix + 'loss_module.' + '.'.join(k_parts[1:]) - else: - k_new = _k - - state_dict[k_new] = v -``` +# How to Migrate MMPose 0.x Projects to MMPose 1.0 + +MMPose 1.0 has been refactored extensively and addressed many legacy issues. Most of the code in MMPose 1.0 will not be compatible with 0.x version. + +To try our best to help you migrate your code and model, here are some major changes: + +## Data Transformation + +### Translation, Rotation and Scaling + +The transformation methods `TopDownRandomShiftBboxCenter` and `TopDownGetRandomScaleRotation` in old version, will be merged into `RandomBBoxTransform`. + +```Python +@TRANSFORMS.register_module() +class RandomBBoxTransform(BaseTransform): + r"""Rnadomly shift, resize and rotate the bounding boxes. + + Required Keys: + + - bbox_center + - bbox_scale + + Modified Keys: + + - bbox_center + - bbox_scale + + Added Keys: + - bbox_rotation + + Args: + shift_factor (float): Randomly shift the bbox in range + :math:`[-dx, dx]` and :math:`[-dy, dy]` in X and Y directions, + where :math:`dx(y) = x(y)_scale \cdot shift_factor` in pixels. + Defaults to 0.16 + shift_prob (float): Probability of applying random shift. Defaults to + 0.3 + scale_factor (Tuple[float, float]): Randomly resize the bbox in range + :math:`[scale_factor[0], scale_factor[1]]`. Defaults to (0.5, 1.5) + scale_prob (float): Probability of applying random resizing. Defaults + to 1.0 + rotate_factor (float): Randomly rotate the bbox in + :math:`[-rotate_factor, rotate_factor]` in degrees. Defaults + to 80.0 + rotate_prob (float): Probability of applying random rotation. Defaults + to 0.6 + """ + + def __init__(self, + shift_factor: float = 0.16, + shift_prob: float = 0.3, + scale_factor: Tuple[float, float] = (0.5, 1.5), + scale_prob: float = 1.0, + rotate_factor: float = 80.0, + rotate_prob: float = 0.6) -> None: +``` + +### Target Generation + +The old methods like: + +- `TopDownGenerateTarget` +- `TopDownGenerateTargetRegression` +- `BottomUpGenerateHeatmapTarget` +- `BottomUpGenerateTarget` + +will be merged in to `GenerateTarget`, and the actual generation methods are implemented in [Codec](./user_guides/codecs.md). + +```Python +@TRANSFORMS.register_module() +class GenerateTarget(BaseTransform): + """Encode keypoints into Target. + + The generated target is usually the supervision signal of the model + learning, e.g. heatmaps or regression labels. + + Required Keys: + + - keypoints + - keypoints_visible + - dataset_keypoint_weights + + Added Keys: + + - The keys of the encoded items from the codec will be updated into + the results, e.g. ``'heatmaps'`` or ``'keypoint_weights'``. See + the specific codec for more details. + + Args: + encoder (dict | list[dict]): The codec config for keypoint encoding. + Both single encoder and multiple encoders (given as a list) are + supported + multilevel (bool): Determine the method to handle multiple encoders. + If ``multilevel==True``, generate multilevel targets from a group + of encoders of the same type (e.g. multiple :class:`MSRAHeatmap` + encoders with different sigma values); If ``multilevel==False``, + generate combined targets from a group of different encoders. This + argument will have no effect in case of single encoder. Defaults + to ``False`` + use_dataset_keypoint_weights (bool): Whether use the keypoint weights + from the dataset meta information. Defaults to ``False`` + """ + + def __init__(self, + encoder: MultiConfig, + multilevel: bool = False, + use_dataset_keypoint_weights: bool = False) -> None: +``` + +### Data Normalization + +The data normalization operations `NormalizeTensor` and `ToTensor` will be replaced by **DataPreprocessor** module, which will no longer be used as a preprocessing operation, but will be merged as a part of the model forward propagation. + +## Compatibility of Models + +We have performed compatibility with the model weights provided by model zoo to ensure that the same model weights can get a comparable accuracy in both version. But note that due to the large number of differences in processing details, the inference outputs can be slightly different(less than 0.05% difference in accuracy). + +For model weights saved by training with 0.x version, we provide a `_load_state_dict_pre_hook()` method in Head to replace the old version of the `state_dict` with the new one. If you wish to make your model compatible with MMPose 1.0, you can refer to our implementation as follows. + +```Python +@MODELS.register_module() +class YourHead(BaseHead): +def __init__(self): + + ## omitted + + # Register the hook to automatically convert old version state dicts + self._register_load_state_dict_pre_hook(self._load_state_dict_pre_hook) +``` + +### Heatmap-based Model + +For models based on `SimpleBaseline` approach, developers need to pay attention to the last convolutional layer. + +```Python +def _load_state_dict_pre_hook(self, state_dict, prefix, local_meta, *args, + **kwargs): + version = local_meta.get('version', None) + + if version and version >= self._version: + return + + # convert old-version state dict + keys = list(state_dict.keys()) + for _k in keys: + if not _k.startswith(prefix): + continue + v = state_dict.pop(_k) + k = _k[len(prefix):] + # In old version, "final_layer" includes both intermediate + # conv layers (new "conv_layers") and final conv layers (new + # "final_layer"). + # + # If there is no intermediate conv layer, old "final_layer" will + # have keys like "final_layer.xxx", which should be still + # named "final_layer.xxx"; + # + # If there are intermediate conv layers, old "final_layer" will + # have keys like "final_layer.n.xxx", where the weights of the last + # one should be renamed "final_layer.xxx", and others should be + # renamed "conv_layers.n.xxx" + k_parts = k.split('.') + if k_parts[0] == 'final_layer': + if len(k_parts) == 3: + assert isinstance(self.conv_layers, nn.Sequential) + idx = int(k_parts[1]) + if idx < len(self.conv_layers): + # final_layer.n.xxx -> conv_layers.n.xxx + k_new = 'conv_layers.' + '.'.join(k_parts[1:]) + else: + # final_layer.n.xxx -> final_layer.xxx + k_new = 'final_layer.' + k_parts[2] + else: + # final_layer.xxx remains final_layer.xxx + k_new = k + else: + k_new = k + + state_dict[prefix + k_new] = v +``` + +### RLE-based Model + +For the RLE-based models, since the loss module is renamed to `loss_module` in MMPose 1.0, and the flow model is subsumed under the loss module, changes need to be made to the keys in `state_dict`: + +```Python +def _load_state_dict_pre_hook(self, state_dict, prefix, local_meta, *args, + **kwargs): + + version = local_meta.get('version', None) + + if version and version >= self._version: + return + + # convert old-version state dict + keys = list(state_dict.keys()) + for _k in keys: + v = state_dict.pop(_k) + k = _k.lstrip(prefix) + # In old version, "loss" includes the instances of loss, + # now it should be renamed "loss_module" + k_parts = k.split('.') + if k_parts[0] == 'loss': + # loss.xxx -> loss_module.xxx + k_new = prefix + 'loss_module.' + '.'.join(k_parts[1:]) + else: + k_new = _k + + state_dict[k_new] = v +``` diff --git a/docs/en/notes/benchmark.md b/docs/en/notes/benchmark.md index 8c82383f8c..48a4d99cd6 100644 --- a/docs/en/notes/benchmark.md +++ b/docs/en/notes/benchmark.md @@ -1,46 +1,46 @@ -# Benchmark - -We compare our results with some popular frameworks and official releases in terms of speed and accuracy. - -## Comparison Rules - -Here we compare our MMPose repo with other pose estimation toolboxes in the same data and model settings. - -To ensure the fairness of the comparison, the comparison experiments were conducted under the same hardware environment and using the same dataset. -For each model setting, we kept the same data pre-processing methods to make sure the same feature input. -In addition, we also used Memcached, a distributed memory-caching system, to load the data in all the compared toolboxes. -This minimizes the IO time during benchmark. - -The time we measured is the average training time for an iteration, including data processing and model training. -The training speed is measure with s/iter. The lower, the better. - -### Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset - -We demonstrate the superiority of our MMPose framework in terms of speed and accuracy on the standard COCO keypoint detection benchmark. -The mAP (the mean average precision) is used as the evaluation metric. - -| Model | Input size | MMPose (s/iter) | HRNet (s/iter) | MMPose (mAP) | HRNet (mAP) | -| :--------- | :--------: | :-------------: | :------------: | :----------: | :---------: | -| resnet_50 | 256x192 | **0.28** | 0.64 | **0.718** | 0.704 | -| resnet_50 | 384x288 | **0.81** | 1.24 | **0.731** | 0.722 | -| resnet_101 | 256x192 | **0.36** | 0.84 | **0.726** | 0.714 | -| resnet_101 | 384x288 | **0.79** | 1.53 | **0.748** | 0.736 | -| resnet_152 | 256x192 | **0.49** | 1.00 | **0.735** | 0.720 | -| resnet_152 | 384x288 | **0.96** | 1.65 | **0.750** | 0.743 | -| hrnet_w32 | 256x192 | **0.54** | 1.31 | **0.746** | 0.744 | -| hrnet_w32 | 384x288 | **0.76** | 2.00 | **0.760** | 0.758 | -| hrnet_w48 | 256x192 | **0.66** | 1.55 | **0.756** | 0.751 | -| hrnet_w48 | 384x288 | **1.23** | 2.20 | **0.767** | 0.763 | - -## Hardware - -- 8 NVIDIA Tesla V100 (32G) GPUs -- Intel(R) Xeon(R) Gold 6148 CPU @ 2.40GHz - -## Software Environment - -- Python 3.7 -- PyTorch 1.4 -- CUDA 10.1 -- CUDNN 7.6.03 -- NCCL 2.4.08 +# Benchmark + +We compare our results with some popular frameworks and official releases in terms of speed and accuracy. + +## Comparison Rules + +Here we compare our MMPose repo with other pose estimation toolboxes in the same data and model settings. + +To ensure the fairness of the comparison, the comparison experiments were conducted under the same hardware environment and using the same dataset. +For each model setting, we kept the same data pre-processing methods to make sure the same feature input. +In addition, we also used Memcached, a distributed memory-caching system, to load the data in all the compared toolboxes. +This minimizes the IO time during benchmark. + +The time we measured is the average training time for an iteration, including data processing and model training. +The training speed is measure with s/iter. The lower, the better. + +### Results on COCO val2017 with detector having human AP of 56.4 on COCO val2017 dataset + +We demonstrate the superiority of our MMPose framework in terms of speed and accuracy on the standard COCO keypoint detection benchmark. +The mAP (the mean average precision) is used as the evaluation metric. + +| Model | Input size | MMPose (s/iter) | HRNet (s/iter) | MMPose (mAP) | HRNet (mAP) | +| :--------- | :--------: | :-------------: | :------------: | :----------: | :---------: | +| resnet_50 | 256x192 | **0.28** | 0.64 | **0.718** | 0.704 | +| resnet_50 | 384x288 | **0.81** | 1.24 | **0.731** | 0.722 | +| resnet_101 | 256x192 | **0.36** | 0.84 | **0.726** | 0.714 | +| resnet_101 | 384x288 | **0.79** | 1.53 | **0.748** | 0.736 | +| resnet_152 | 256x192 | **0.49** | 1.00 | **0.735** | 0.720 | +| resnet_152 | 384x288 | **0.96** | 1.65 | **0.750** | 0.743 | +| hrnet_w32 | 256x192 | **0.54** | 1.31 | **0.746** | 0.744 | +| hrnet_w32 | 384x288 | **0.76** | 2.00 | **0.760** | 0.758 | +| hrnet_w48 | 256x192 | **0.66** | 1.55 | **0.756** | 0.751 | +| hrnet_w48 | 384x288 | **1.23** | 2.20 | **0.767** | 0.763 | + +## Hardware + +- 8 NVIDIA Tesla V100 (32G) GPUs +- Intel(R) Xeon(R) Gold 6148 CPU @ 2.40GHz + +## Software Environment + +- Python 3.7 +- PyTorch 1.4 +- CUDA 10.1 +- CUDNN 7.6.03 +- NCCL 2.4.08 diff --git a/docs/en/notes/changelog.md b/docs/en/notes/changelog.md index 1d1be738e3..72fc7b085a 100644 --- a/docs/en/notes/changelog.md +++ b/docs/en/notes/changelog.md @@ -1,1314 +1,1314 @@ -# Changelog - -## **v1.0.0rc1 (14/10/2022)** - -**Highlights** - -- Release RTMPose, a high-performance real-time pose estimation algorithm with cross-platform deployment and inference support. See details at the [project page](/projects/rtmpose/) -- Support several new algorithms: ViTPose (arXiv'2022), CID (CVPR'2022), DEKR (CVPR'2021) -- Add Inferencer, a convenient inference interface that perform pose estimation and visualization on images, videos and webcam streams with only one line of code -- Introduce *Project*, a new form for rapid and easy implementation of new algorithms and features in MMPose, which is more handy for community contributors - -**New Features** - -- Support RTMPose ([#1971](https://github.com/open-mmlab/mmpose/pull/1971), [#2024](https://github.com/open-mmlab/mmpose/pull/2024), [#2028](https://github.com/open-mmlab/mmpose/pull/2028), [#2030](https://github.com/open-mmlab/mmpose/pull/2030), [#2040](https://github.com/open-mmlab/mmpose/pull/2040), [#2057](https://github.com/open-mmlab/mmpose/pull/2057)) -- Support Inferencer ([#1969](https://github.com/open-mmlab/mmpose/pull/1969)) -- Support ViTPose ([#1876](https://github.com/open-mmlab/mmpose/pull/1876), [#2056](https://github.com/open-mmlab/mmpose/pull/2056), [#2058](https://github.com/open-mmlab/mmpose/pull/2058), [#2065](https://github.com/open-mmlab/mmpose/pull/2065)) -- Support CID ([#1907](https://github.com/open-mmlab/mmpose/pull/1907)) -- Support DEKR ([#1834](https://github.com/open-mmlab/mmpose/pull/1834), [#1901](https://github.com/open-mmlab/mmpose/pull/1901)) -- Support training with multiple datasets ([#1767](https://github.com/open-mmlab/mmpose/pull/1767), [#1930](https://github.com/open-mmlab/mmpose/pull/1930), [#1938](https://github.com/open-mmlab/mmpose/pull/1938), [#2025](https://github.com/open-mmlab/mmpose/pull/2025)) -- Add *project* to allow rapid and easy implementation of new models and features ([#1914](https://github.com/open-mmlab/mmpose/pull/1914)) - -**Improvements** - -- Improve documentation quality ([#1846](https://github.com/open-mmlab/mmpose/pull/1846), [#1858](https://github.com/open-mmlab/mmpose/pull/1858), [#1872](https://github.com/open-mmlab/mmpose/pull/1872), [#1899](https://github.com/open-mmlab/mmpose/pull/1899), [#1925](https://github.com/open-mmlab/mmpose/pull/1925), [#1945](https://github.com/open-mmlab/mmpose/pull/1945), [#1952](https://github.com/open-mmlab/mmpose/pull/1952), [#1990](https://github.com/open-mmlab/mmpose/pull/1990), [#2023](https://github.com/open-mmlab/mmpose/pull/2023), [#2042](https://github.com/open-mmlab/mmpose/pull/2042)) -- Support visualizing keypoint indices ([#2051](https://github.com/open-mmlab/mmpose/pull/2051)) -- Support OpenPose style visualization ([#2055](https://github.com/open-mmlab/mmpose/pull/2055)) -- Accelerate image transpose in data pipelines with tensor operation ([#1976](https://github.com/open-mmlab/mmpose/pull/1976)) -- Support auto-import modules from registry ([#1961](https://github.com/open-mmlab/mmpose/pull/1961)) -- Support keypoint partition metric ([#1944](https://github.com/open-mmlab/mmpose/pull/1944)) -- Support SimCC 1D-heatmap visualization ([#1912](https://github.com/open-mmlab/mmpose/pull/1912)) -- Support saving predictions and data metainfo in demos ([#1814](https://github.com/open-mmlab/mmpose/pull/1814), [#1879](https://github.com/open-mmlab/mmpose/pull/1879)) -- Support SimCC with DARK ([#1870](https://github.com/open-mmlab/mmpose/pull/1870)) -- Remove Gaussian blur for offset maps in UDP-regress ([#1815](https://github.com/open-mmlab/mmpose/pull/1815)) -- Refactor encoding interface of Codec for better extendibility and easier configuration ([#1781](https://github.com/open-mmlab/mmpose/pull/1781)) -- Support evaluating CocoMetric without annotation file ([#1722](https://github.com/open-mmlab/mmpose/pull/1722)) -- Improve unit tests ([#1765](https://github.com/open-mmlab/mmpose/pull/1765)) - -**Bug Fixes** - -- Fix repeated warnings from different ranks ([#2053](https://github.com/open-mmlab/mmpose/pull/2053)) -- Avoid frequent scope switching when using mmdet inference api ([#2039](https://github.com/open-mmlab/mmpose/pull/2039)) -- Remove EMA parameters and message hub data when publishing model checkpoints ([#2036](https://github.com/open-mmlab/mmpose/pull/2036)) -- Fix metainfo copying in dataset class ([#2017](https://github.com/open-mmlab/mmpose/pull/2017)) -- Fix top-down demo bug when there is no object detected ([#2007](https://github.com/open-mmlab/mmpose/pull/2007)) -- Fix config errors ([#1882](https://github.com/open-mmlab/mmpose/pull/1882), [#1906](https://github.com/open-mmlab/mmpose/pull/1906), [#1995](https://github.com/open-mmlab/mmpose/pull/1995)) -- Fix image demo failure when GUI is unavailable ([#1968](https://github.com/open-mmlab/mmpose/pull/1968)) -- Fix bug in AdaptiveWingLoss ([#1953](https://github.com/open-mmlab/mmpose/pull/1953)) -- Fix incorrect importing of RepeatDataset which is deprecated ([#1943](https://github.com/open-mmlab/mmpose/pull/1943)) -- Fix bug in bottom-up datasets that ignores images without instances ([#1752](https://github.com/open-mmlab/mmpose/pull/1752), [#1936](https://github.com/open-mmlab/mmpose/pull/1936)) -- Fix upstream dependency issues ([#1867](https://github.com/open-mmlab/mmpose/pull/1867), [#1921](https://github.com/open-mmlab/mmpose/pull/1921)) -- Fix evaluation issues and update results ([#1763](https://github.com/open-mmlab/mmpose/pull/1763), [#1773](https://github.com/open-mmlab/mmpose/pull/1773), [#1780](https://github.com/open-mmlab/mmpose/pull/1780), [#1850](https://github.com/open-mmlab/mmpose/pull/1850), [#1868](https://github.com/open-mmlab/mmpose/pull/1868)) -- Fix local registry missing warnings ([#1849](https://github.com/open-mmlab/mmpose/pull/1849)) -- Remove deprecated scripts for model deployment ([#1845](https://github.com/open-mmlab/mmpose/pull/1845)) -- Fix a bug in input transformation in BaseHead ([#1843](https://github.com/open-mmlab/mmpose/pull/1843)) -- Fix an interface mismatch with MMDetection in webcam demo ([#1813](https://github.com/open-mmlab/mmpose/pull/1813)) -- Fix a bug in heatmap visualization that causes incorrect scale ([#1800](https://github.com/open-mmlab/mmpose/pull/1800)) -- Add model metafiles ([#1768](https://github.com/open-mmlab/mmpose/pull/1768)) - -## **v1.0.0rc0 (14/10/2022)** - -**New Features** - -- Support 4 light-weight pose estimation algorithms: [SimCC](https://doi.org/10.48550/arxiv.2107.03332) (ECCV'2022), [Debias-IPR](https://openaccess.thecvf.com/content/ICCV2021/papers/Gu_Removing_the_Bias_of_Integral_Pose_Regression_ICCV_2021_paper.pdf) (ICCV'2021), [IPR](https://arxiv.org/abs/1711.08229) (ECCV'2018), and [DSNT](https://arxiv.org/abs/1801.07372v2) (ArXiv'2018) ([#1628](https://github.com/open-mmlab/mmpose/pull/1628)) - -**Migrations** - -- Add Webcam API in MMPose 1.0 ([#1638](https://github.com/open-mmlab/mmpose/pull/1638), [#1662](https://github.com/open-mmlab/mmpose/pull/1662)) @Ben-Louis -- Add codec for Associative Embedding (beta) ([#1603](https://github.com/open-mmlab/mmpose/pull/1603)) @ly015 - -**Improvements** - -- Add a colab tutorial for MMPose 1.0 ([#1660](https://github.com/open-mmlab/mmpose/pull/1660)) @Tau-J -- Add model index in config folder ([#1710](https://github.com/open-mmlab/mmpose/pull/1710), [#1709](https://github.com/open-mmlab/mmpose/pull/1709), [#1627](https://github.com/open-mmlab/mmpose/pull/1627)) @ly015, @Tau-J, @Ben-Louis -- Update and improve documentation ([#1692](https://github.com/open-mmlab/mmpose/pull/1692), [#1656](https://github.com/open-mmlab/mmpose/pull/1656), [#1681](https://github.com/open-mmlab/mmpose/pull/1681), [#1677](https://github.com/open-mmlab/mmpose/pull/1677), [#1664](https://github.com/open-mmlab/mmpose/pull/1664), [#1659](https://github.com/open-mmlab/mmpose/pull/1659)) @Tau-J, @Ben-Louis, @liqikai9 -- Improve config structures and formats ([#1651](https://github.com/open-mmlab/mmpose/pull/1651)) @liqikai9 - -**Bug Fixes** - -- Update mmengine version requirements ([#1715](https://github.com/open-mmlab/mmpose/pull/1715)) @Ben-Louis -- Update dependencies of pre-commit hooks ([#1705](https://github.com/open-mmlab/mmpose/pull/1705)) @Ben-Louis -- Fix mmcv version in DockerFile ([#1704](https://github.com/open-mmlab/mmpose/pull/1704)) -- Fix a bug in setting dataset metainfo in configs ([#1684](https://github.com/open-mmlab/mmpose/pull/1684)) @ly015 -- Fix a bug in UDP training ([#1682](https://github.com/open-mmlab/mmpose/pull/1682)) @liqikai9 -- Fix a bug in Dark decoding ([#1676](https://github.com/open-mmlab/mmpose/pull/1676)) @liqikai9 -- Fix bugs in visualization ([#1671](https://github.com/open-mmlab/mmpose/pull/1671), [#1668](https://github.com/open-mmlab/mmpose/pull/1668), [#1657](https://github.com/open-mmlab/mmpose/pull/1657)) @liqikai9, @Ben-Louis -- Fix incorrect flops calculation ([#1669](https://github.com/open-mmlab/mmpose/pull/1669)) @liqikai9 -- Fix `tensor.tile` compatibility issue for pytorch 1.6 ([#1658](https://github.com/open-mmlab/mmpose/pull/1658)) @ly015 -- Fix compatibility with `MultilevelPixelData` ([#1647](https://github.com/open-mmlab/mmpose/pull/1647)) @liqikai9 - -## **v1.0.0beta (1/09/2022)** - -We are excited to announce the release of MMPose 1.0.0beta. -MMPose 1.0.0beta is the first version of MMPose 1.x, a part of the OpenMMLab 2.0 projects. -Built upon the new [training engine](https://github.com/open-mmlab/mmengine). - -**Highlights** - -- **New engines**. MMPose 1.x is based on [MMEngine](https://github.com/open-mmlab/mmengine), which provides a general and powerful runner that allows more flexible customizations and significantly simplifies the entrypoints of high-level interfaces. - -- **Unified interfaces**. As a part of the OpenMMLab 2.0 projects, MMPose 1.x unifies and refactors the interfaces and internal logics of train, testing, datasets, models, evaluation, and visualization. All the OpenMMLab 2.0 projects share the same design in those interfaces and logics to allow the emergence of multi-task/modality algorithms. - -- **More documentation and tutorials**. We add a bunch of documentation and tutorials to help users get started more smoothly. Read it [here](https://mmpose.readthedocs.io/en/latest/). - -**Breaking Changes** - -In this release, we made lots of major refactoring and modifications. Please refer to the [migration guide](../migration.md) for details and migration instructions. - -## **v0.28.1 (28/07/2022)** - -This release is meant to fix the compatibility with the latest mmcv v1.6.1 - -## **v0.28.0 (06/07/2022)** - -**Highlights** - -- Support [TCFormer](https://openaccess.thecvf.com/content/CVPR2022/html/Zeng_Not_All_Tokens_Are_Equal_Human-Centric_Visual_Analysis_via_Token_CVPR_2022_paper.html) backbone, CVPR'2022 ([#1447](https://github.com/open-mmlab/mmpose/pull/1447), [#1452](https://github.com/open-mmlab/mmpose/pull/1452)) @zengwang430521 - -- Add [RLE](https://arxiv.org/abs/2107.11291) models on COCO dataset ([#1424](https://github.com/open-mmlab/mmpose/pull/1424)) @Indigo6, @Ben-Louis, @ly015 - -- Update swin models with better performance ([#1467](https://github.com/open-mmlab/mmpose/pull/1434)) @jin-s13 - -**New Features** - -- Support [TCFormer](https://openaccess.thecvf.com/content/CVPR2022/html/Zeng_Not_All_Tokens_Are_Equal_Human-Centric_Visual_Analysis_via_Token_CVPR_2022_paper.html) backbone, CVPR'2022 ([#1447](https://github.com/open-mmlab/mmpose/pull/1447), [#1452](https://github.com/open-mmlab/mmpose/pull/1452)) @zengwang430521 - -- Add [RLE](https://arxiv.org/abs/2107.11291) models on COCO dataset ([#1424](https://github.com/open-mmlab/mmpose/pull/1424)) @Indigo6, @Ben-Louis, @ly015 - -- Support layer decay optimizer constructor and learning rate decay optimizer constructor ([#1423](https://github.com/open-mmlab/mmpose/pull/1423)) @jin-s13 - -**Improvements** - -- Improve documentation quality ([#1416](https://github.com/open-mmlab/mmpose/pull/1416), [#1421](https://github.com/open-mmlab/mmpose/pull/1421), [#1423](https://github.com/open-mmlab/mmpose/pull/1423), [#1426](https://github.com/open-mmlab/mmpose/pull/1426), [#1458](https://github.com/open-mmlab/mmpose/pull/1458), [#1463](https://github.com/open-mmlab/mmpose/pull/1463)) @ly015, @liqikai9 - -- Support installation by [mim](https://github.com/open-mmlab/mim) ([#1425](https://github.com/open-mmlab/mmpose/pull/1425)) @liqikai9 - -- Support PAVI logger ([#1434](https://github.com/open-mmlab/mmpose/pull/1434)) @EvelynWang-0423 - -- Add progress bar for some demos ([#1454](https://github.com/open-mmlab/mmpose/pull/1454)) @liqikai9 - -- Webcam API supports quick device setting in terminal commands ([#1466](https://github.com/open-mmlab/mmpose/pull/1466)) @ly015 - -- Update swin models with better performance ([#1467](https://github.com/open-mmlab/mmpose/pull/1434)) @jin-s13 - -**Bug Fixes** - -- Rename `custom_hooks_config` to `custom_hooks` in configs to align with the documentation ([#1427](https://github.com/open-mmlab/mmpose/pull/1427)) @ly015 - -- Fix deadlock issue in Webcam API ([#1430](https://github.com/open-mmlab/mmpose/pull/1430)) @ly015 - -- Fix smoother configs in video 3D demo ([#1457](https://github.com/open-mmlab/mmpose/pull/1457)) @ly015 - -## **v0.27.0 (07/06/2022)** - -**Highlights** - -- Support hand gesture recognition - - - Try the demo for gesture recognition - - Learn more about the algorithm, dataset and experiment results - -- Major upgrade to the Webcam API - - - Tutorials (EN|zh_CN) - - [API Reference](https://mmpose.readthedocs.io/en/latest/api.html#mmpose-apis-webcam) - - Demo - -**New Features** - -- Support gesture recognition algorithm [MTUT](https://openaccess.thecvf.com/content_CVPR_2019/html/Abavisani_Improving_the_Performance_of_Unimodal_Dynamic_Hand-Gesture_Recognition_With_Multimodal_CVPR_2019_paper.html) CVPR'2019 and dataset [NVGesture](https://openaccess.thecvf.com/content_cvpr_2016/html/Molchanov_Online_Detection_and_CVPR_2016_paper.html) CVPR'2016 ([#1380](https://github.com/open-mmlab/mmpose/pull/1380)) @Ben-Louis - -**Improvements** - -- Upgrade Webcam API and related documents ([#1393](https://github.com/open-mmlab/mmpose/pull/1393), [#1404](https://github.com/open-mmlab/mmpose/pull/1404), [#1413](https://github.com/open-mmlab/mmpose/pull/1413)) @ly015 - -- Support exporting COCO inference result without the annotation file ([#1368](https://github.com/open-mmlab/mmpose/pull/1368)) @liqikai9 - -- Replace markdownlint with mdformat in CI to avoid the dependence on ruby [#1382](https://github.com/open-mmlab/mmpose/pull/1382) @ly015 - -- Improve documentation quality ([#1385](https://github.com/open-mmlab/mmpose/pull/1385), [#1394](https://github.com/open-mmlab/mmpose/pull/1394), [#1395](https://github.com/open-mmlab/mmpose/pull/1395), [#1408](https://github.com/open-mmlab/mmpose/pull/1408)) @chubei-oppen, @ly015, @liqikai9 - -**Bug Fixes** - -- Fix xywh->xyxy bbox conversion in dataset sanity check ([#1367](https://github.com/open-mmlab/mmpose/pull/1367)) @jin-s13 - -- Fix a bug in two-stage 3D keypoint demo ([#1373](https://github.com/open-mmlab/mmpose/pull/1373)) @ly015 - -- Fix out-dated settings in PVT configs ([#1376](https://github.com/open-mmlab/mmpose/pull/1376)) @ly015 - -- Fix myst settings for document compiling ([#1381](https://github.com/open-mmlab/mmpose/pull/1381)) @ly015 - -- Fix a bug in bbox transform ([#1384](https://github.com/open-mmlab/mmpose/pull/1384)) @ly015 - -- Fix inaccurate description of `min_keypoints` in tracking apis ([#1398](https://github.com/open-mmlab/mmpose/pull/1398)) @pallgeuer - -- Fix warning with `torch.meshgrid` ([#1402](https://github.com/open-mmlab/mmpose/pull/1402)) @pallgeuer - -- Remove redundant transformer modules from `mmpose.datasets.backbones.utils` ([#1405](https://github.com/open-mmlab/mmpose/pull/1405)) @ly015 - -## **v0.26.0 (05/05/2022)** - -**Highlights** - -- Support [RLE (Residual Log-likelihood Estimation)](https://arxiv.org/abs/2107.11291), ICCV'2021 ([#1259](https://github.com/open-mmlab/mmpose/pull/1259)) @Indigo6, @ly015 - -- Support [Swin Transformer](https://arxiv.org/abs/2103.14030), ICCV'2021 ([#1300](https://github.com/open-mmlab/mmpose/pull/1300)) @yumendecc, @ly015 - -- Support [PVT](https://arxiv.org/abs/2102.12122), ICCV'2021 and [PVTv2](https://arxiv.org/abs/2106.13797), CVMJ'2022 ([#1343](https://github.com/open-mmlab/mmpose/pull/1343)) @zengwang430521 - -- Speed up inference and reduce CPU usage by optimizing the pre-processing pipeline ([#1320](https://github.com/open-mmlab/mmpose/pull/1320)) @chenxinfeng4, @liqikai9 - -**New Features** - -- Support [RLE (Residual Log-likelihood Estimation)](https://arxiv.org/abs/2107.11291), ICCV'2021 ([#1259](https://github.com/open-mmlab/mmpose/pull/1259)) @Indigo6, @ly015 - -- Support [Swin Transformer](https://arxiv.org/abs/2103.14030), ICCV'2021 ([#1300](https://github.com/open-mmlab/mmpose/pull/1300)) @yumendecc, @ly015 - -- Support [PVT](https://arxiv.org/abs/2102.12122), ICCV'2021 and [PVTv2](https://arxiv.org/abs/2106.13797), CVMJ'2022 ([#1343](https://github.com/open-mmlab/mmpose/pull/1343)) @zengwang430521 - -- Support [FPN](https://openaccess.thecvf.com/content_cvpr_2017/html/Lin_Feature_Pyramid_Networks_CVPR_2017_paper.html), CVPR'2017 ([#1300](https://github.com/open-mmlab/mmpose/pull/1300)) @yumendecc, @ly015 - -**Improvements** - -- Speed up inference and reduce CPU usage by optimizing the pre-processing pipeline ([#1320](https://github.com/open-mmlab/mmpose/pull/1320)) @chenxinfeng4, @liqikai9 - -- Video demo supports models that requires multi-frame inputs ([#1300](https://github.com/open-mmlab/mmpose/pull/1300)) @liqikai9, @jin-s13 - -- Update benchmark regression list ([#1328](https://github.com/open-mmlab/mmpose/pull/1328)) @ly015, @liqikai9 - -- Remove unnecessary warnings in `TopDownPoseTrack18VideoDataset` ([#1335](https://github.com/open-mmlab/mmpose/pull/1335)) @liqikai9 - -- Improve documentation quality ([#1313](https://github.com/open-mmlab/mmpose/pull/1313), [#1305](https://github.com/open-mmlab/mmpose/pull/1305)) @Ben-Louis, @ly015 - -- Update deprecating settings in configs ([#1317](https://github.com/open-mmlab/mmpose/pull/1317)) @ly015 - -**Bug Fixes** - -- Fix a bug in human skeleton grouping that may skip the matching process unexpectedly when `ignore_to_much` is True ([#1341](https://github.com/open-mmlab/mmpose/pull/1341)) @daixinghome - -- Fix a GPG key error that leads to CI failure ([#1354](https://github.com/open-mmlab/mmpose/pull/1354)) @ly015 - -- Fix bugs in distributed training script ([#1338](https://github.com/open-mmlab/mmpose/pull/1338), [#1298](https://github.com/open-mmlab/mmpose/pull/1298)) @ly015 - -- Fix an upstream bug in xtoccotools that causes incorrect AP(M) results ([#1308](https://github.com/open-mmlab/mmpose/pull/1308)) @jin-s13, @ly015 - -- Fix indentiation errors in the colab tutorial ([#1298](https://github.com/open-mmlab/mmpose/pull/1298)) @YuanZi1501040205 - -- Fix incompatible model weight initialization with other OpenMMLab codebases ([#1329](https://github.com/open-mmlab/mmpose/pull/1329)) @274869388 - -- Fix HRNet FP16 checkpoints download URL ([#1309](https://github.com/open-mmlab/mmpose/pull/1309)) @YinAoXiong - -- Fix typos in `body3d_two_stage_video_demo.py` ([#1295](https://github.com/open-mmlab/mmpose/pull/1295)) @mucozcan - -**Breaking Changes** - -- Refactor bbox processing in datasets and pipelines ([#1311](https://github.com/open-mmlab/mmpose/pull/1311)) @ly015, @Ben-Louis - -- The bbox format conversion (xywh to center-scale) and random translation are moved from the dataset to the pipeline. The comparison between new and old version is as below: - -v0.26.0v0.25.0Dataset -(e.g. [TopDownCOCODataset](https://github.com/open-mmlab/mmpose/blob/master/mmpose/datasets/datasets/top_down/topdown_coco_dataset.py)) - -... # Data sample only contains bbox rec.append({ 'bbox': obj\['clean_bbox\]\[:4\], ... }) - - - - - -... # Convert bbox from xywh to center-scale center, scale = self.\_xywh2cs(\*obj\['clean_bbox'\]\[:4\]) # Data sample contains center and scale rec.append({ 'bbox': obj\['clean_bbox\]\[:4\], 'center': center, 'scale': scale, ... }) - - - - - - - -Pipeline Config - -(e.g. [HRNet+COCO](https://github.com/open-mmlab/mmpose/blob/master/configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/coco/hrnet_w32_coco_256x192.py)) - - - -... train_pipeline = \[ dict(type='LoadImageFromFile'), # Convert bbox from xywh to center-scale dict(type='TopDownGetBboxCenterScale', padding=1.25), # Randomly shift bbox center dict(type='TopDownRandomShiftBboxCenter', shift_factor=0.16, prob=0.3), ... \] - - - - - -... train_pipeline = \[ dict(type='LoadImageFromFile'), ... \] - - - - - - - -Advantage - - - -
  • Simpler data sample content
  • - -
  • Flexible bbox format conversion and augmentation
  • - -
  • Apply bbox random translation every epoch (instead of only applying once at the annotation loading) - - - -- - - - - - -BC Breaking - -The method `_xywh2cs` of dataset base classes (e.g. [Kpt2dSviewRgbImgTopDownDataset](https://github.com/open-mmlab/mmpose/blob/master/mmpose/datasets/datasets/base/kpt_2d_sview_rgb_img_top_down_dataset.py)) will be deprecated in the future. Custom datasets will need modifications to move the bbox format conversion to pipelines. - -- - - - - - - - -## **v0.25.0 (02/04/2022)** - -**Highlights** - -- Support Shelf and Campus datasets with pre-trained VoxelPose models, ["3D Pictorial Structures for Multiple Human Pose Estimation"](http://campar.in.tum.de/pub/belagiannis2014cvpr/belagiannis2014cvpr.pdf), CVPR'2014 ([#1225](https://github.com/open-mmlab/mmpose/pull/1225)) @liqikai9, @wusize - -- Add `Smoother` module for temporal smoothing of the pose estimation with configurable filters ([#1127](https://github.com/open-mmlab/mmpose/pull/1127)) @ailingzengzzz, @ly015 - -- Support SmoothNet for pose smoothing, ["SmoothNet: A Plug-and-Play Network for Refining Human Poses in Videos"](https://arxiv.org/abs/2112.13715), arXiv'2021 ([#1279](https://github.com/open-mmlab/mmpose/pull/1279)) @ailingzengzzz, @ly015 - -- Add multiview 3D pose estimation demo ([#1270](https://github.com/open-mmlab/mmpose/pull/1270)) @wusize - -**New Features** - -- Support Shelf and Campus datasets with pre-trained VoxelPose models, ["3D Pictorial Structures for Multiple Human Pose Estimation"](http://campar.in.tum.de/pub/belagiannis2014cvpr/belagiannis2014cvpr.pdf), CVPR'2014 ([#1225](https://github.com/open-mmlab/mmpose/pull/1225)) @liqikai9, @wusize - -- Add `Smoother` module for temporal smoothing of the pose estimation with configurable filters ([#1127](https://github.com/open-mmlab/mmpose/pull/1127)) @ailingzengzzz, @ly015 - -- Support SmoothNet for pose smoothing, ["SmoothNet: A Plug-and-Play Network for Refining Human Poses in Videos"](https://arxiv.org/abs/2112.13715), arXiv'2021 ([#1279](https://github.com/open-mmlab/mmpose/pull/1279)) @ailingzengzzz, @ly015 - -- Add multiview 3D pose estimation demo ([#1270](https://github.com/open-mmlab/mmpose/pull/1270)) @wusize - -- Support multi-machine distributed training ([#1248](https://github.com/open-mmlab/mmpose/pull/1248)) @ly015 - -**Improvements** - -- Update HRFormer configs and checkpoints with relative position bias ([#1245](https://github.com/open-mmlab/mmpose/pull/1245)) @zengwang430521 - -- Support using different random seed for each distributed node ([#1257](https://github.com/open-mmlab/mmpose/pull/1257), [#1229](https://github.com/open-mmlab/mmpose/pull/1229)) @ly015 - -- Improve documentation quality ([#1275](https://github.com/open-mmlab/mmpose/pull/1275), [#1255](https://github.com/open-mmlab/mmpose/pull/1255), [#1258](https://github.com/open-mmlab/mmpose/pull/1258), [#1249](https://github.com/open-mmlab/mmpose/pull/1249), [#1247](https://github.com/open-mmlab/mmpose/pull/1247), [#1240](https://github.com/open-mmlab/mmpose/pull/1240), [#1235](https://github.com/open-mmlab/mmpose/pull/1235)) @ly015, @jin-s13, @YoniChechik - -**Bug Fixes** - -- Fix keypoint index in RHD dataset meta information ([#1265](https://github.com/open-mmlab/mmpose/pull/1265)) @liqikai9 - -- Fix pre-commit hook unexpected behavior on Windows ([#1282](https://github.com/open-mmlab/mmpose/pull/1282)) @liqikai9 - -- Remove python-dev installation in CI ([#1276](https://github.com/open-mmlab/mmpose/pull/1276)) @ly015 - -- Unify hyphens in argument names in tools and demos ([#1271](https://github.com/open-mmlab/mmpose/pull/1271)) @ly015 - -- Fix ambiguous channel size in `channel_shuffle` that may cause exporting failure (#1242) @PINTO0309 - -- Fix a bug in Webcam API that causes single-class detectors fail ([#1239](https://github.com/open-mmlab/mmpose/pull/1239)) @674106399 - -- Fix the issue that `custom_hook` can not be set in configs ([#1236](https://github.com/open-mmlab/mmpose/pull/1236)) @bladrome - -- Fix incompatible MMCV version in DockerFile ([#raykindle](https://github.com/open-mmlab/mmpose/pull/raykindle)) - -- Skip invisible joints in visualization ([#1228](https://github.com/open-mmlab/mmpose/pull/1228)) @womeier - -## **v0.24.0 (07/03/2022)** - -**Highlights** - -- Support HRFormer ["HRFormer: High-Resolution Vision Transformer for Dense Predict"](https://proceedings.neurips.cc/paper/2021/hash/3bbfdde8842a5c44a0323518eec97cbe-Abstract.html), NeurIPS'2021 ([#1203](https://github.com/open-mmlab/mmpose/pull/1203)) @zengwang430521 - -- Support Windows installation with pip ([#1213](https://github.com/open-mmlab/mmpose/pull/1213)) @jin-s13, @ly015 - -- Add WebcamAPI documents ([#1187](https://github.com/open-mmlab/mmpose/pull/1187)) @ly015 - -**New Features** - -- Support HRFormer ["HRFormer: High-Resolution Vision Transformer for Dense Predict"](https://proceedings.neurips.cc/paper/2021/hash/3bbfdde8842a5c44a0323518eec97cbe-Abstract.html), NeurIPS'2021 ([#1203](https://github.com/open-mmlab/mmpose/pull/1203)) @zengwang430521 - -- Support Windows installation with pip ([#1213](https://github.com/open-mmlab/mmpose/pull/1213)) @jin-s13, @ly015 - -- Support CPU training with mmcv \< v1.4.4 ([#1161](https://github.com/open-mmlab/mmpose/pull/1161)) @EasonQYS, @ly015 - -- Add "Valentine Magic" demo with WebcamAPI ([#1189](https://github.com/open-mmlab/mmpose/pull/1189), [#1191](https://github.com/open-mmlab/mmpose/pull/1191)) @liqikai9 - -**Improvements** - -- Refactor multi-view 3D pose estimation framework towards better modularization and expansibility ([#1196](https://github.com/open-mmlab/mmpose/pull/1196)) @wusize - -- Add WebcamAPI documents and tutorials ([#1187](https://github.com/open-mmlab/mmpose/pull/1187)) @ly015 - -- Refactor dataset evaluation interface to align with other OpenMMLab codebases ([#1209](https://github.com/open-mmlab/mmpose/pull/1209)) @ly015 - -- Add deprecation message for deploy tools since [MMDeploy](https://github.com/open-mmlab/mmdeploy) has supported MMPose ([#1207](https://github.com/open-mmlab/mmpose/pull/1207)) @QwQ2000 - -- Improve documentation quality ([#1206](https://github.com/open-mmlab/mmpose/pull/1206), [#1161](https://github.com/open-mmlab/mmpose/pull/1161)) @ly015 - -- Switch to OpenMMLab official pre-commit-hook for copyright check ([#1214](https://github.com/open-mmlab/mmpose/pull/1214)) @ly015 - -**Bug Fixes** - -- Fix hard-coded data collating and scattering in inference ([#1175](https://github.com/open-mmlab/mmpose/pull/1175)) @ly015 - -- Fix model configs on JHMDB dataset ([#1188](https://github.com/open-mmlab/mmpose/pull/1188)) @jin-s13 - -- Fix area calculation in pose tracking inference ([#1197](https://github.com/open-mmlab/mmpose/pull/1197)) @pallgeuer - -- Fix registry scope conflict of module wrapper ([#1204](https://github.com/open-mmlab/mmpose/pull/1204)) @ly015 - -- Update MMCV installation in CI and documents ([#1205](https://github.com/open-mmlab/mmpose/pull/1205)) - -- Fix incorrect color channel order in visualization functions ([#1212](https://github.com/open-mmlab/mmpose/pull/1212)) @ly015 - -## **v0.23.0 (11/02/2022)** - -**Highlights** - -- Add [MMPose Webcam API](https://github.com/open-mmlab/mmpose/tree/master/tools/webcam): A simple yet powerful tools to develop interactive webcam applications with MMPose functions. ([#1178](https://github.com/open-mmlab/mmpose/pull/1178), [#1173](https://github.com/open-mmlab/mmpose/pull/1173), [#1173](https://github.com/open-mmlab/mmpose/pull/1173), [#1143](https://github.com/open-mmlab/mmpose/pull/1143), [#1094](https://github.com/open-mmlab/mmpose/pull/1094), [#1133](https://github.com/open-mmlab/mmpose/pull/1133), [#1098](https://github.com/open-mmlab/mmpose/pull/1098), [#1160](https://github.com/open-mmlab/mmpose/pull/1160)) @ly015, @jin-s13, @liqikai9, @wusize, @luminxu, @zengwang430521 @mzr1996 - -**New Features** - -- Add [MMPose Webcam API](https://github.com/open-mmlab/mmpose/tree/master/tools/webcam): A simple yet powerful tools to develop interactive webcam applications with MMPose functions. ([#1178](https://github.com/open-mmlab/mmpose/pull/1178), [#1173](https://github.com/open-mmlab/mmpose/pull/1173), [#1173](https://github.com/open-mmlab/mmpose/pull/1173), [#1143](https://github.com/open-mmlab/mmpose/pull/1143), [#1094](https://github.com/open-mmlab/mmpose/pull/1094), [#1133](https://github.com/open-mmlab/mmpose/pull/1133), [#1098](https://github.com/open-mmlab/mmpose/pull/1098), [#1160](https://github.com/open-mmlab/mmpose/pull/1160)) @ly015, @jin-s13, @liqikai9, @wusize, @luminxu, @zengwang430521 @mzr1996 - -- Support ConcatDataset ([#1139](https://github.com/open-mmlab/mmpose/pull/1139)) @Canwang-sjtu - -- Support CPU training and testing ([#1157](https://github.com/open-mmlab/mmpose/pull/1157)) @ly015 - -**Improvements** - -- Add multi-processing configurations to speed up distributed training and testing ([#1146](https://github.com/open-mmlab/mmpose/pull/1146)) @ly015 - -- Add default runtime config ([#1145](https://github.com/open-mmlab/mmpose/pull/1145)) - -- Upgrade isort in pre-commit hook ([#1179](https://github.com/open-mmlab/mmpose/pull/1179)) @liqikai9 - -- Update README and documents ([#1171](https://github.com/open-mmlab/mmpose/pull/1171), [#1167](https://github.com/open-mmlab/mmpose/pull/1167), [#1153](https://github.com/open-mmlab/mmpose/pull/1153), [#1149](https://github.com/open-mmlab/mmpose/pull/1149), [#1148](https://github.com/open-mmlab/mmpose/pull/1148), [#1147](https://github.com/open-mmlab/mmpose/pull/1147), [#1140](https://github.com/open-mmlab/mmpose/pull/1140)) @jin-s13, @wusize, @TommyZihao, @ly015 - -**Bug Fixes** - -- Fix undeterministic behavior in pre-commit hooks ([#1136](https://github.com/open-mmlab/mmpose/pull/1136)) @jin-s13 - -- Deprecate the support for "python setup.py test" ([#1179](https://github.com/open-mmlab/mmpose/pull/1179)) @ly015 - -- Fix incompatible settings with MMCV on HSigmoid default parameters ([#1132](https://github.com/open-mmlab/mmpose/pull/1132)) @ly015 - -- Fix albumentation installation ([#1184](https://github.com/open-mmlab/mmpose/pull/1184)) @BIGWangYuDong - -## **v0.22.0 (04/01/2022)** - -**Highlights** - -- Support VoxelPose ["VoxelPose: Towards Multi-Camera 3D Human Pose Estimation in Wild Environment"](https://arxiv.org/abs/2004.06239), ECCV'2020 ([#1050](https://github.com/open-mmlab/mmpose/pull/1050)) @wusize - -- Support Soft Wing loss ["Structure-Coherent Deep Feature Learning for Robust Face Alignment"](https://linchunze.github.io/papers/TIP21_Structure_coherent_FA.pdf), TIP'2021 ([#1077](https://github.com/open-mmlab/mmpose/pull/1077)) @jin-s13 - -- Support Adaptive Wing loss ["Adaptive Wing Loss for Robust Face Alignment via Heatmap Regression"](https://arxiv.org/abs/1904.07399), ICCV'2019 ([#1072](https://github.com/open-mmlab/mmpose/pull/1072)) @jin-s13 - -**New Features** - -- Support VoxelPose ["VoxelPose: Towards Multi-Camera 3D Human Pose Estimation in Wild Environment"](https://arxiv.org/abs/2004.06239), ECCV'2020 ([#1050](https://github.com/open-mmlab/mmpose/pull/1050)) @wusize - -- Support Soft Wing loss ["Structure-Coherent Deep Feature Learning for Robust Face Alignment"](https://linchunze.github.io/papers/TIP21_Structure_coherent_FA.pdf), TIP'2021 ([#1077](https://github.com/open-mmlab/mmpose/pull/1077)) @jin-s13 - -- Support Adaptive Wing loss ["Adaptive Wing Loss for Robust Face Alignment via Heatmap Regression"](https://arxiv.org/abs/1904.07399), ICCV'2019 ([#1072](https://github.com/open-mmlab/mmpose/pull/1072)) @jin-s13 - -- Add LiteHRNet-18 Checkpoints trained on COCO. ([#1120](https://github.com/open-mmlab/mmpose/pull/1120)) @jin-s13 - -**Improvements** - -- Improve documentation quality ([#1115](https://github.com/open-mmlab/mmpose/pull/1115), [#1111](https://github.com/open-mmlab/mmpose/pull/1111), [#1105](https://github.com/open-mmlab/mmpose/pull/1105), [#1087](https://github.com/open-mmlab/mmpose/pull/1087), [#1086](https://github.com/open-mmlab/mmpose/pull/1086), [#1085](https://github.com/open-mmlab/mmpose/pull/1085), [#1084](https://github.com/open-mmlab/mmpose/pull/1084), [#1083](https://github.com/open-mmlab/mmpose/pull/1083), [#1124](https://github.com/open-mmlab/mmpose/pull/1124), [#1070](https://github.com/open-mmlab/mmpose/pull/1070), [#1068](https://github.com/open-mmlab/mmpose/pull/1068)) @jin-s13, @liqikai9, @ly015 - -- Support CircleCI ([#1074](https://github.com/open-mmlab/mmpose/pull/1074)) @ly015 - -- Skip unit tests in CI when only document files were changed ([#1074](https://github.com/open-mmlab/mmpose/pull/1074), [#1041](https://github.com/open-mmlab/mmpose/pull/1041)) @QwQ2000, @ly015 - -- Support file_client_args in LoadImageFromFile ([#1076](https://github.com/open-mmlab/mmpose/pull/1076)) @jin-s13 - -**Bug Fixes** - -- Fix a bug in Dark UDP postprocessing that causes error when the channel number is large. ([#1079](https://github.com/open-mmlab/mmpose/pull/1079), [#1116](https://github.com/open-mmlab/mmpose/pull/1116)) @X00123, @jin-s13 - -- Fix hard-coded `sigmas` in bottom-up image demo ([#1107](https://github.com/open-mmlab/mmpose/pull/1107), [#1101](https://github.com/open-mmlab/mmpose/pull/1101)) @chenxinfeng4, @liqikai9 - -- Fix unstable checks in unit tests ([#1112](https://github.com/open-mmlab/mmpose/pull/1112)) @ly015 - -- Do not destroy NULL windows if `args.show==False` in demo scripts ([#1104](https://github.com/open-mmlab/mmpose/pull/1104)) @bladrome - -## **v0.21.0 (06/12/2021)** - -**Highlights** - -- Support ["Learning Temporal Pose Estimation from Sparsely-Labeled Videos"](https://arxiv.org/abs/1906.04016), NeurIPS'2019 ([#932](https://github.com/open-mmlab/mmpose/pull/932), [#1006](https://github.com/open-mmlab/mmpose/pull/1006), [#1036](https://github.com/open-mmlab/mmpose/pull/1036), [#1060](https://github.com/open-mmlab/mmpose/pull/1060)) @liqikai9 - -- Add ViPNAS-MobileNetV3 models ([#1025](https://github.com/open-mmlab/mmpose/pull/1025)) @luminxu, @jin-s13 - -- Add inference speed benchmark ([#1028](https://github.com/open-mmlab/mmpose/pull/1028), [#1034](https://github.com/open-mmlab/mmpose/pull/1034), [#1044](https://github.com/open-mmlab/mmpose/pull/1044)) @liqikai9 - -**New Features** - -- Support ["Learning Temporal Pose Estimation from Sparsely-Labeled Videos"](https://arxiv.org/abs/1906.04016), NeurIPS'2019 ([#932](https://github.com/open-mmlab/mmpose/pull/932), [#1006](https://github.com/open-mmlab/mmpose/pull/1006), [#1036](https://github.com/open-mmlab/mmpose/pull/1036)) @liqikai9 - -- Add ViPNAS-MobileNetV3 models ([#1025](https://github.com/open-mmlab/mmpose/pull/1025)) @luminxu, @jin-s13 - -- Add light-weight top-down models for whole-body keypoint detection ([#1009](https://github.com/open-mmlab/mmpose/pull/1009), [#1020](https://github.com/open-mmlab/mmpose/pull/1020), [#1055](https://github.com/open-mmlab/mmpose/pull/1055)) @luminxu, @ly015 - -- Add HRNet checkpoints with various settings on PoseTrack18 ([#1035](https://github.com/open-mmlab/mmpose/pull/1035)) @liqikai9 - -**Improvements** - -- Add inference speed benchmark ([#1028](https://github.com/open-mmlab/mmpose/pull/1028), [#1034](https://github.com/open-mmlab/mmpose/pull/1034), [#1044](https://github.com/open-mmlab/mmpose/pull/1044)) @liqikai9 - -- Update model metafile format ([#1001](https://github.com/open-mmlab/mmpose/pull/1001)) @ly015 - -- Support minus output feature index in mobilenet_v3 ([#1005](https://github.com/open-mmlab/mmpose/pull/1005)) @luminxu - -- Improve documentation quality ([#1018](https://github.com/open-mmlab/mmpose/pull/1018), [#1026](https://github.com/open-mmlab/mmpose/pull/1026), [#1027](https://github.com/open-mmlab/mmpose/pull/1027), [#1031](https://github.com/open-mmlab/mmpose/pull/1031), [#1038](https://github.com/open-mmlab/mmpose/pull/1038), [#1046](https://github.com/open-mmlab/mmpose/pull/1046), [#1056](https://github.com/open-mmlab/mmpose/pull/1056), [#1057](https://github.com/open-mmlab/mmpose/pull/1057)) @edybk, @luminxu, @ly015, @jin-s13 - -- Set default random seed in training initialization ([#1030](https://github.com/open-mmlab/mmpose/pull/1030)) @ly015 - -- Skip CI when only specific files changed ([#1041](https://github.com/open-mmlab/mmpose/pull/1041), [#1059](https://github.com/open-mmlab/mmpose/pull/1059)) @QwQ2000, @ly015 - -- Automatically cancel uncompleted action runs when new commit arrives ([#1053](https://github.com/open-mmlab/mmpose/pull/1053)) @ly015 - -**Bug Fixes** - -- Update pose tracking demo to be compatible with latest mmtracking ([#1014](https://github.com/open-mmlab/mmpose/pull/1014)) @jin-s13 - -- Fix symlink creation failure when installed in Windows environments ([#1039](https://github.com/open-mmlab/mmpose/pull/1039)) @QwQ2000 - -- Fix AP-10K dataset sigmas ([#1040](https://github.com/open-mmlab/mmpose/pull/1040)) @jin-s13 - -## **v0.20.0 (01/11/2021)** - -**Highlights** - -- Add AP-10K dataset for animal pose estimation ([#987](https://github.com/open-mmlab/mmpose/pull/987)) @Annbless, @AlexTheBad, @jin-s13, @ly015 - -- Support TorchServe ([#979](https://github.com/open-mmlab/mmpose/pull/979)) @ly015 - -**New Features** - -- Add AP-10K dataset for animal pose estimation ([#987](https://github.com/open-mmlab/mmpose/pull/987)) @Annbless, @AlexTheBad, @jin-s13, @ly015 - -- Add HRNetv2 checkpoints on 300W and COFW datasets ([#980](https://github.com/open-mmlab/mmpose/pull/980)) @jin-s13 - -- Support TorchServe ([#979](https://github.com/open-mmlab/mmpose/pull/979)) @ly015 - -**Bug Fixes** - -- Fix some deprecated or risky settings in configs ([#963](https://github.com/open-mmlab/mmpose/pull/963), [#976](https://github.com/open-mmlab/mmpose/pull/976), [#992](https://github.com/open-mmlab/mmpose/pull/992)) @jin-s13, @wusize - -- Fix issues of default arguments of training and testing scripts ([#970](https://github.com/open-mmlab/mmpose/pull/970), [#985](https://github.com/open-mmlab/mmpose/pull/985)) @liqikai9, @wusize - -- Fix heatmap and tag size mismatch in bottom-up with UDP ([#994](https://github.com/open-mmlab/mmpose/pull/994)) @wusize - -- Fix python3.9 installation in CI ([#983](https://github.com/open-mmlab/mmpose/pull/983)) @ly015 - -- Fix model zoo document integrity issue ([#990](https://github.com/open-mmlab/mmpose/pull/990)) @jin-s13 - -**Improvements** - -- Support non-square input shape for bottom-up ([#991](https://github.com/open-mmlab/mmpose/pull/991)) @wusize - -- Add image and video resources for demo ([#971](https://github.com/open-mmlab/mmpose/pull/971)) @liqikai9 - -- Use CUDA docker images to accelerate CI ([#973](https://github.com/open-mmlab/mmpose/pull/973)) @ly015 - -- Add codespell hook and fix detected typos ([#977](https://github.com/open-mmlab/mmpose/pull/977)) @ly015 - -## **v0.19.0 (08/10/2021)** - -**Highlights** - -- Add models for Associative Embedding with Hourglass network backbone ([#906](https://github.com/open-mmlab/mmpose/pull/906), [#955](https://github.com/open-mmlab/mmpose/pull/955)) @jin-s13, @luminxu - -- Support COCO-Wholebody-Face and COCO-Wholebody-Hand datasets ([#813](https://github.com/open-mmlab/mmpose/pull/813)) @jin-s13, @innerlee, @luminxu - -- Upgrade dataset interface ([#901](https://github.com/open-mmlab/mmpose/pull/901), [#924](https://github.com/open-mmlab/mmpose/pull/924)) @jin-s13, @innerlee, @ly015, @liqikai9 - -- New style of documentation ([#945](https://github.com/open-mmlab/mmpose/pull/945)) @ly015 - -**New Features** - -- Add models for Associative Embedding with Hourglass network backbone ([#906](https://github.com/open-mmlab/mmpose/pull/906), [#955](https://github.com/open-mmlab/mmpose/pull/955)) @jin-s13, @luminxu - -- Support COCO-Wholebody-Face and COCO-Wholebody-Hand datasets ([#813](https://github.com/open-mmlab/mmpose/pull/813)) @jin-s13, @innerlee, @luminxu - -- Add pseudo-labeling tool to generate COCO style keypoint annotations with given bounding boxes ([#928](https://github.com/open-mmlab/mmpose/pull/928)) @soltkreig - -- New style of documentation ([#945](https://github.com/open-mmlab/mmpose/pull/945)) @ly015 - -**Bug Fixes** - -- Fix segmentation parsing in Macaque dataset preprocessing ([#948](https://github.com/open-mmlab/mmpose/pull/948)) @jin-s13 - -- Fix dependencies that may lead to CI failure in downstream projects ([#936](https://github.com/open-mmlab/mmpose/pull/936), [#953](https://github.com/open-mmlab/mmpose/pull/953)) @RangiLyu, @ly015 - -- Fix keypoint order in Human3.6M dataset ([#940](https://github.com/open-mmlab/mmpose/pull/940)) @ttxskk - -- Fix unstable image loading for Interhand2.6M ([#913](https://github.com/open-mmlab/mmpose/pull/913)) @zengwang430521 - -**Improvements** - -- Upgrade dataset interface ([#901](https://github.com/open-mmlab/mmpose/pull/901), [#924](https://github.com/open-mmlab/mmpose/pull/924)) @jin-s13, @innerlee, @ly015, @liqikai9 - -- Improve demo usability and stability ([#908](https://github.com/open-mmlab/mmpose/pull/908), [#934](https://github.com/open-mmlab/mmpose/pull/934)) @ly015 - -- Standardize model metafile format ([#941](https://github.com/open-mmlab/mmpose/pull/941)) @ly015 - -- Support `persistent_worker` and several other arguments in configs ([#946](https://github.com/open-mmlab/mmpose/pull/946)) @jin-s13 - -- Use MMCV root model registry to enable cross-project module building ([#935](https://github.com/open-mmlab/mmpose/pull/935)) @RangiLyu - -- Improve the document quality ([#916](https://github.com/open-mmlab/mmpose/pull/916), [#909](https://github.com/open-mmlab/mmpose/pull/909), [#942](https://github.com/open-mmlab/mmpose/pull/942), [#913](https://github.com/open-mmlab/mmpose/pull/913), [#956](https://github.com/open-mmlab/mmpose/pull/956)) @jin-s13, @ly015, @bit-scientist, @zengwang430521 - -- Improve pull request template ([#952](https://github.com/open-mmlab/mmpose/pull/952), [#954](https://github.com/open-mmlab/mmpose/pull/954)) @ly015 - -**Breaking Changes** - -- Upgrade dataset interface ([#901](https://github.com/open-mmlab/mmpose/pull/901)) @jin-s13, @innerlee, @ly015 - -## **v0.18.0 (01/09/2021)** - -**Bug Fixes** - -- Fix redundant model weight loading in pytorch-to-onnx conversion ([#850](https://github.com/open-mmlab/mmpose/pull/850)) @ly015 - -- Fix a bug in update_model_index.py that may cause pre-commit hook failure([#866](https://github.com/open-mmlab/mmpose/pull/866)) @ly015 - -- Fix a bug in interhand_3d_head ([#890](https://github.com/open-mmlab/mmpose/pull/890)) @zengwang430521 - -- Fix pose tracking demo failure caused by out-of-date configs ([#891](https://github.com/open-mmlab/mmpose/pull/891)) - -**Improvements** - -- Add automatic benchmark regression tools ([#849](https://github.com/open-mmlab/mmpose/pull/849), [#880](https://github.com/open-mmlab/mmpose/pull/880), [#885](https://github.com/open-mmlab/mmpose/pull/885)) @liqikai9, @ly015 - -- Add copyright information and checking hook ([#872](https://github.com/open-mmlab/mmpose/pull/872)) - -- Add PR template ([#875](https://github.com/open-mmlab/mmpose/pull/875)) @ly015 - -- Add citation information ([#876](https://github.com/open-mmlab/mmpose/pull/876)) @ly015 - -- Add python3.9 in CI ([#877](https://github.com/open-mmlab/mmpose/pull/877), [#883](https://github.com/open-mmlab/mmpose/pull/883)) @ly015 - -- Improve the quality of the documents ([#845](https://github.com/open-mmlab/mmpose/pull/845), [#845](https://github.com/open-mmlab/mmpose/pull/845), [#848](https://github.com/open-mmlab/mmpose/pull/848), [#867](https://github.com/open-mmlab/mmpose/pull/867), [#870](https://github.com/open-mmlab/mmpose/pull/870), [#873](https://github.com/open-mmlab/mmpose/pull/873), [#896](https://github.com/open-mmlab/mmpose/pull/896)) @jin-s13, @ly015, @zhiqwang - -## **v0.17.0 (06/08/2021)** - -**Highlights** - -1. Support ["Lite-HRNet: A Lightweight High-Resolution Network"](https://arxiv.org/abs/2104.06403) CVPR'2021 ([#733](https://github.com/open-mmlab/mmpose/pull/733),[#800](https://github.com/open-mmlab/mmpose/pull/800)) @jin-s13 - -2. Add 3d body mesh demo ([#771](https://github.com/open-mmlab/mmpose/pull/771)) @zengwang430521 - -3. Add Chinese documentation ([#787](https://github.com/open-mmlab/mmpose/pull/787), [#798](https://github.com/open-mmlab/mmpose/pull/798), [#799](https://github.com/open-mmlab/mmpose/pull/799), [#802](https://github.com/open-mmlab/mmpose/pull/802), [#804](https://github.com/open-mmlab/mmpose/pull/804), [#805](https://github.com/open-mmlab/mmpose/pull/805), [#815](https://github.com/open-mmlab/mmpose/pull/815), [#816](https://github.com/open-mmlab/mmpose/pull/816), [#817](https://github.com/open-mmlab/mmpose/pull/817), [#819](https://github.com/open-mmlab/mmpose/pull/819), [#839](https://github.com/open-mmlab/mmpose/pull/839)) @ly015, @luminxu, @jin-s13, @liqikai9, @zengwang430521 - -4. Add Colab Tutorial ([#834](https://github.com/open-mmlab/mmpose/pull/834)) @ly015 - -**New Features** - -- Support ["Lite-HRNet: A Lightweight High-Resolution Network"](https://arxiv.org/abs/2104.06403) CVPR'2021 ([#733](https://github.com/open-mmlab/mmpose/pull/733),[#800](https://github.com/open-mmlab/mmpose/pull/800)) @jin-s13 - -- Add 3d body mesh demo ([#771](https://github.com/open-mmlab/mmpose/pull/771)) @zengwang430521 - -- Add Chinese documentation ([#787](https://github.com/open-mmlab/mmpose/pull/787), [#798](https://github.com/open-mmlab/mmpose/pull/798), [#799](https://github.com/open-mmlab/mmpose/pull/799), [#802](https://github.com/open-mmlab/mmpose/pull/802), [#804](https://github.com/open-mmlab/mmpose/pull/804), [#805](https://github.com/open-mmlab/mmpose/pull/805), [#815](https://github.com/open-mmlab/mmpose/pull/815), [#816](https://github.com/open-mmlab/mmpose/pull/816), [#817](https://github.com/open-mmlab/mmpose/pull/817), [#819](https://github.com/open-mmlab/mmpose/pull/819), [#839](https://github.com/open-mmlab/mmpose/pull/839)) @ly015, @luminxu, @jin-s13, @liqikai9, @zengwang430521 - -- Add Colab Tutorial ([#834](https://github.com/open-mmlab/mmpose/pull/834)) @ly015 - -- Support training for InterHand v1.0 dataset ([#761](https://github.com/open-mmlab/mmpose/pull/761)) @zengwang430521 - -**Bug Fixes** - -- Fix mpii pckh@0.1 index ([#773](https://github.com/open-mmlab/mmpose/pull/773)) @jin-s13 - -- Fix multi-node distributed test ([#818](https://github.com/open-mmlab/mmpose/pull/818)) @ly015 - -- Fix docstring and init_weights error of ShuffleNetV1 ([#814](https://github.com/open-mmlab/mmpose/pull/814)) @Junjun2016 - -- Fix imshow_bbox error when input bboxes is empty ([#796](https://github.com/open-mmlab/mmpose/pull/796)) @ly015 - -- Fix model zoo doc generation ([#778](https://github.com/open-mmlab/mmpose/pull/778)) @ly015 - -- Fix typo ([#767](https://github.com/open-mmlab/mmpose/pull/767)), ([#780](https://github.com/open-mmlab/mmpose/pull/780), [#782](https://github.com/open-mmlab/mmpose/pull/782)) @ly015, @jin-s13 - -**Breaking Changes** - -- Use MMCV EvalHook ([#686](https://github.com/open-mmlab/mmpose/pull/686)) @ly015 - -**Improvements** - -- Add pytest.ini and fix docstring ([#812](https://github.com/open-mmlab/mmpose/pull/812)) @jin-s13 - -- Update MSELoss ([#829](https://github.com/open-mmlab/mmpose/pull/829)) @Ezra-Yu - -- Move process_mmdet_results into inference.py ([#831](https://github.com/open-mmlab/mmpose/pull/831)) @ly015 - -- Update resource limit ([#783](https://github.com/open-mmlab/mmpose/pull/783)) @jin-s13 - -- Use COCO 2D pose model in 3D demo examples ([#785](https://github.com/open-mmlab/mmpose/pull/785)) @ly015 - -- Change model zoo titles in the doc from center-aligned to left-aligned ([#792](https://github.com/open-mmlab/mmpose/pull/792), [#797](https://github.com/open-mmlab/mmpose/pull/797)) @ly015 - -- Support MIM ([#706](https://github.com/open-mmlab/mmpose/pull/706), [#794](https://github.com/open-mmlab/mmpose/pull/794)) @ly015 - -- Update out-of-date configs ([#827](https://github.com/open-mmlab/mmpose/pull/827)) @jin-s13 - -- Remove opencv-python-headless dependency by albumentations ([#833](https://github.com/open-mmlab/mmpose/pull/833)) @ly015 - -- Update QQ QR code in README_CN.md ([#832](https://github.com/open-mmlab/mmpose/pull/832)) @ly015 - -## **v0.16.0 (02/07/2021)** - -**Highlights** - -1. Support ["ViPNAS: Efficient Video Pose Estimation via Neural Architecture Search"](https://arxiv.org/abs/2105.10154) CVPR'2021 ([#742](https://github.com/open-mmlab/mmpose/pull/742),[#755](https://github.com/open-mmlab/mmpose/pull/755)). - -2. Support MPI-INF-3DHP dataset ([#683](https://github.com/open-mmlab/mmpose/pull/683),[#746](https://github.com/open-mmlab/mmpose/pull/746),[#751](https://github.com/open-mmlab/mmpose/pull/751)). - -3. Add webcam demo tool ([#729](https://github.com/open-mmlab/mmpose/pull/729)) - -4. Add 3d body and hand pose estimation demo ([#704](https://github.com/open-mmlab/mmpose/pull/704), [#727](https://github.com/open-mmlab/mmpose/pull/727)). - -**New Features** - -- Support ["ViPNAS: Efficient Video Pose Estimation via Neural Architecture Search"](https://arxiv.org/abs/2105.10154) CVPR'2021 ([#742](https://github.com/open-mmlab/mmpose/pull/742),[#755](https://github.com/open-mmlab/mmpose/pull/755)) - -- Support MPI-INF-3DHP dataset ([#683](https://github.com/open-mmlab/mmpose/pull/683),[#746](https://github.com/open-mmlab/mmpose/pull/746),[#751](https://github.com/open-mmlab/mmpose/pull/751)) - -- Support Webcam demo ([#729](https://github.com/open-mmlab/mmpose/pull/729)) - -- Support Interhand 3d demo ([#704](https://github.com/open-mmlab/mmpose/pull/704)) - -- Support 3d pose video demo ([#727](https://github.com/open-mmlab/mmpose/pull/727)) - -- Support H36m dataset for 2d pose estimation ([#709](https://github.com/open-mmlab/mmpose/pull/709), [#735](https://github.com/open-mmlab/mmpose/pull/735)) - -- Add scripts to generate mim metafile ([#749](https://github.com/open-mmlab/mmpose/pull/749)) - -**Bug Fixes** - -- Fix typos ([#692](https://github.com/open-mmlab/mmpose/pull/692),[#696](https://github.com/open-mmlab/mmpose/pull/696),[#697](https://github.com/open-mmlab/mmpose/pull/697),[#698](https://github.com/open-mmlab/mmpose/pull/698),[#712](https://github.com/open-mmlab/mmpose/pull/712),[#718](https://github.com/open-mmlab/mmpose/pull/718),[#728](https://github.com/open-mmlab/mmpose/pull/728)) - -- Change model download links from `http` to `https` ([#716](https://github.com/open-mmlab/mmpose/pull/716)) - -**Breaking Changes** - -- Switch to MMCV MODEL_REGISTRY ([#669](https://github.com/open-mmlab/mmpose/pull/669)) - -**Improvements** - -- Refactor MeshMixDataset ([#752](https://github.com/open-mmlab/mmpose/pull/752)) - -- Rename 'GaussianHeatMap' to 'GaussianHeatmap' ([#745](https://github.com/open-mmlab/mmpose/pull/745)) - -- Update out-of-date configs ([#734](https://github.com/open-mmlab/mmpose/pull/734)) - -- Improve compatibility for breaking changes ([#731](https://github.com/open-mmlab/mmpose/pull/731)) - -- Enable to control radius and thickness in visualization ([#722](https://github.com/open-mmlab/mmpose/pull/722)) - -- Add regex dependency ([#720](https://github.com/open-mmlab/mmpose/pull/720)) - -## **v0.15.0 (02/06/2021)** - -**Highlights** - -1. Support 3d video pose estimation (VideoPose3D). - -2. Support 3d hand pose estimation (InterNet). - -3. Improve presentation of modelzoo. - -**New Features** - -- Support "InterHand2.6M: A Dataset and Baseline for 3D Interacting Hand Pose Estimation from a Single RGB Image" (ECCV‘20) ([#624](https://github.com/open-mmlab/mmpose/pull/624)) - -- Support "3D human pose estimation in video with temporal convolutions and semi-supervised training" (CVPR'19) ([#602](https://github.com/open-mmlab/mmpose/pull/602), [#681](https://github.com/open-mmlab/mmpose/pull/681)) - -- Support 3d pose estimation demo ([#653](https://github.com/open-mmlab/mmpose/pull/653), [#670](https://github.com/open-mmlab/mmpose/pull/670)) - -- Support bottom-up whole-body pose estimation ([#689](https://github.com/open-mmlab/mmpose/pull/689)) - -- Support mmcli ([#634](https://github.com/open-mmlab/mmpose/pull/634)) - -**Bug Fixes** - -- Fix opencv compatibility ([#635](https://github.com/open-mmlab/mmpose/pull/635)) - -- Fix demo with UDP ([#637](https://github.com/open-mmlab/mmpose/pull/637)) - -- Fix bottom-up model onnx conversion ([#680](https://github.com/open-mmlab/mmpose/pull/680)) - -- Fix `GPU_IDS` in distributed training ([#668](https://github.com/open-mmlab/mmpose/pull/668)) - -- Fix MANIFEST.in ([#641](https://github.com/open-mmlab/mmpose/pull/641), [#657](https://github.com/open-mmlab/mmpose/pull/657)) - -- Fix docs ([#643](https://github.com/open-mmlab/mmpose/pull/643),[#684](https://github.com/open-mmlab/mmpose/pull/684),[#688](https://github.com/open-mmlab/mmpose/pull/688),[#690](https://github.com/open-mmlab/mmpose/pull/690),[#692](https://github.com/open-mmlab/mmpose/pull/692)) - -**Breaking Changes** - -- Reorganize configs by tasks, algorithms, datasets, and techniques ([#647](https://github.com/open-mmlab/mmpose/pull/647)) - -- Rename heads and detectors ([#667](https://github.com/open-mmlab/mmpose/pull/667)) - -**Improvements** - -- Add `radius` and `thickness` parameters in visualization ([#638](https://github.com/open-mmlab/mmpose/pull/638)) - -- Add `trans_prob` parameter in `TopDownRandomTranslation` ([#650](https://github.com/open-mmlab/mmpose/pull/650)) - -- Switch to `MMCV MODEL_REGISTRY` ([#669](https://github.com/open-mmlab/mmpose/pull/669)) - -- Update dependencies ([#674](https://github.com/open-mmlab/mmpose/pull/674), [#676](https://github.com/open-mmlab/mmpose/pull/676)) - -## **v0.14.0 (06/05/2021)** - -**Highlights** - -1. Support animal pose estimation with 7 popular datasets. - -2. Support "A simple yet effective baseline for 3d human pose estimation" (ICCV'17). - -**New Features** - -- Support "A simple yet effective baseline for 3d human pose estimation" (ICCV'17) ([#554](https://github.com/open-mmlab/mmpose/pull/554),[#558](https://github.com/open-mmlab/mmpose/pull/558),[#566](https://github.com/open-mmlab/mmpose/pull/566),[#570](https://github.com/open-mmlab/mmpose/pull/570),[#589](https://github.com/open-mmlab/mmpose/pull/589)) - -- Support animal pose estimation ([#559](https://github.com/open-mmlab/mmpose/pull/559),[#561](https://github.com/open-mmlab/mmpose/pull/561),[#563](https://github.com/open-mmlab/mmpose/pull/563),[#571](https://github.com/open-mmlab/mmpose/pull/571),[#603](https://github.com/open-mmlab/mmpose/pull/603),[#605](https://github.com/open-mmlab/mmpose/pull/605)) - -- Support Horse-10 dataset ([#561](https://github.com/open-mmlab/mmpose/pull/561)), MacaquePose dataset ([#561](https://github.com/open-mmlab/mmpose/pull/561)), Vinegar Fly dataset ([#561](https://github.com/open-mmlab/mmpose/pull/561)), Desert Locust dataset ([#561](https://github.com/open-mmlab/mmpose/pull/561)), Grevy's Zebra dataset ([#561](https://github.com/open-mmlab/mmpose/pull/561)), ATRW dataset ([#571](https://github.com/open-mmlab/mmpose/pull/571)), and Animal-Pose dataset ([#603](https://github.com/open-mmlab/mmpose/pull/603)) - -- Support bottom-up pose tracking demo ([#574](https://github.com/open-mmlab/mmpose/pull/574)) - -- Support FP16 training ([#584](https://github.com/open-mmlab/mmpose/pull/584),[#616](https://github.com/open-mmlab/mmpose/pull/616),[#626](https://github.com/open-mmlab/mmpose/pull/626)) - -- Support NMS for bottom-up ([#609](https://github.com/open-mmlab/mmpose/pull/609)) - -**Bug Fixes** - -- Fix bugs in the top-down demo, when there are no people in the images ([#569](https://github.com/open-mmlab/mmpose/pull/569)). - -- Fix the links in the doc ([#612](https://github.com/open-mmlab/mmpose/pull/612)) - -**Improvements** - -- Speed up top-down inference ([#560](https://github.com/open-mmlab/mmpose/pull/560)) - -- Update github CI ([#562](https://github.com/open-mmlab/mmpose/pull/562), [#564](https://github.com/open-mmlab/mmpose/pull/564)) - -- Update Readme ([#578](https://github.com/open-mmlab/mmpose/pull/578),[#579](https://github.com/open-mmlab/mmpose/pull/579),[#580](https://github.com/open-mmlab/mmpose/pull/580),[#592](https://github.com/open-mmlab/mmpose/pull/592),[#599](https://github.com/open-mmlab/mmpose/pull/599),[#600](https://github.com/open-mmlab/mmpose/pull/600),[#607](https://github.com/open-mmlab/mmpose/pull/607)) - -- Update Faq ([#587](https://github.com/open-mmlab/mmpose/pull/587), [#610](https://github.com/open-mmlab/mmpose/pull/610)) - -## **v0.13.0 (31/03/2021)** - -**Highlights** - -1. Support Wingloss. - -2. Support RHD hand dataset. - -**New Features** - -- Support Wingloss ([#482](https://github.com/open-mmlab/mmpose/pull/482)) - -- Support RHD hand dataset ([#523](https://github.com/open-mmlab/mmpose/pull/523), [#551](https://github.com/open-mmlab/mmpose/pull/551)) - -- Support Human3.6m dataset for 3d keypoint detection ([#518](https://github.com/open-mmlab/mmpose/pull/518), [#527](https://github.com/open-mmlab/mmpose/pull/527)) - -- Support TCN model for 3d keypoint detection ([#521](https://github.com/open-mmlab/mmpose/pull/521), [#522](https://github.com/open-mmlab/mmpose/pull/522)) - -- Support Interhand3D model for 3d hand detection ([#536](https://github.com/open-mmlab/mmpose/pull/536)) - -- Support Multi-task detector ([#480](https://github.com/open-mmlab/mmpose/pull/480)) - -**Bug Fixes** - -- Fix PCKh@0.1 calculation ([#516](https://github.com/open-mmlab/mmpose/pull/516)) - -- Fix unittest ([#529](https://github.com/open-mmlab/mmpose/pull/529)) - -- Fix circular importing ([#542](https://github.com/open-mmlab/mmpose/pull/542)) - -- Fix bugs in bottom-up keypoint score ([#548](https://github.com/open-mmlab/mmpose/pull/548)) - -**Improvements** - -- Update config & checkpoints ([#525](https://github.com/open-mmlab/mmpose/pull/525), [#546](https://github.com/open-mmlab/mmpose/pull/546)) - -- Fix typos ([#514](https://github.com/open-mmlab/mmpose/pull/514), [#519](https://github.com/open-mmlab/mmpose/pull/519), [#532](https://github.com/open-mmlab/mmpose/pull/532), [#537](https://github.com/open-mmlab/mmpose/pull/537), ) - -- Speed up post processing ([#535](https://github.com/open-mmlab/mmpose/pull/535)) - -- Update mmcv version dependency ([#544](https://github.com/open-mmlab/mmpose/pull/544)) - -## **v0.12.0 (28/02/2021)** - -**Highlights** - -1. Support DeepPose algorithm. - -**New Features** - -- Support DeepPose algorithm ([#446](https://github.com/open-mmlab/mmpose/pull/446), [#461](https://github.com/open-mmlab/mmpose/pull/461)) - -- Support interhand3d dataset ([#468](https://github.com/open-mmlab/mmpose/pull/468)) - -- Support Albumentation pipeline ([#469](https://github.com/open-mmlab/mmpose/pull/469)) - -- Support PhotometricDistortion pipeline ([#485](https://github.com/open-mmlab/mmpose/pull/485)) - -- Set seed option for training ([#493](https://github.com/open-mmlab/mmpose/pull/493)) - -- Add demos for face keypoint detection ([#502](https://github.com/open-mmlab/mmpose/pull/502)) - -**Bug Fixes** - -- Change channel order according to configs ([#504](https://github.com/open-mmlab/mmpose/pull/504)) - -- Fix `num_factors` in UDP encoding ([#495](https://github.com/open-mmlab/mmpose/pull/495)) - -- Fix configs ([#456](https://github.com/open-mmlab/mmpose/pull/456)) - -**Breaking Changes** - -- Refactor configs for wholebody pose estimation ([#487](https://github.com/open-mmlab/mmpose/pull/487), [#491](https://github.com/open-mmlab/mmpose/pull/491)) - -- Rename `decode` function for heads ([#481](https://github.com/open-mmlab/mmpose/pull/481)) - -**Improvements** - -- Update config & checkpoints ([#453](https://github.com/open-mmlab/mmpose/pull/453),[#484](https://github.com/open-mmlab/mmpose/pull/484),[#487](https://github.com/open-mmlab/mmpose/pull/487)) - -- Add README in Chinese ([#462](https://github.com/open-mmlab/mmpose/pull/462)) - -- Add tutorials about configs ([#465](https://github.com/open-mmlab/mmpose/pull/465)) - -- Add demo videos for various tasks ([#499](https://github.com/open-mmlab/mmpose/pull/499), [#503](https://github.com/open-mmlab/mmpose/pull/503)) - -- Update docs about MMPose installation ([#467](https://github.com/open-mmlab/mmpose/pull/467), [#505](https://github.com/open-mmlab/mmpose/pull/505)) - -- Rename `stat.py` to `stats.py` ([#483](https://github.com/open-mmlab/mmpose/pull/483)) - -- Fix typos ([#463](https://github.com/open-mmlab/mmpose/pull/463), [#464](https://github.com/open-mmlab/mmpose/pull/464), [#477](https://github.com/open-mmlab/mmpose/pull/477), [#481](https://github.com/open-mmlab/mmpose/pull/481)) - -- latex to bibtex ([#471](https://github.com/open-mmlab/mmpose/pull/471)) - -- Update FAQ ([#466](https://github.com/open-mmlab/mmpose/pull/466)) - -## **v0.11.0 (31/01/2021)** - -**Highlights** - -1. Support fashion landmark detection. - -2. Support face keypoint detection. - -3. Support pose tracking with MMTracking. - -**New Features** - -- Support fashion landmark detection (DeepFashion) ([#413](https://github.com/open-mmlab/mmpose/pull/413)) - -- Support face keypoint detection (300W, AFLW, COFW, WFLW) ([#367](https://github.com/open-mmlab/mmpose/pull/367)) - -- Support pose tracking demo with MMTracking ([#427](https://github.com/open-mmlab/mmpose/pull/427)) - -- Support face demo ([#443](https://github.com/open-mmlab/mmpose/pull/443)) - -- Support AIC dataset for bottom-up methods ([#438](https://github.com/open-mmlab/mmpose/pull/438), [#449](https://github.com/open-mmlab/mmpose/pull/449)) - -**Bug Fixes** - -- Fix multi-batch training ([#434](https://github.com/open-mmlab/mmpose/pull/434)) - -- Fix sigmas in AIC dataset ([#441](https://github.com/open-mmlab/mmpose/pull/441)) - -- Fix config file ([#420](https://github.com/open-mmlab/mmpose/pull/420)) - -**Breaking Changes** - -- Refactor Heads ([#382](https://github.com/open-mmlab/mmpose/pull/382)) - -**Improvements** - -- Update readme ([#409](https://github.com/open-mmlab/mmpose/pull/409), [#412](https://github.com/open-mmlab/mmpose/pull/412), [#415](https://github.com/open-mmlab/mmpose/pull/415), [#416](https://github.com/open-mmlab/mmpose/pull/416), [#419](https://github.com/open-mmlab/mmpose/pull/419), [#421](https://github.com/open-mmlab/mmpose/pull/421), [#422](https://github.com/open-mmlab/mmpose/pull/422), [#424](https://github.com/open-mmlab/mmpose/pull/424), [#425](https://github.com/open-mmlab/mmpose/pull/425), [#435](https://github.com/open-mmlab/mmpose/pull/435), [#436](https://github.com/open-mmlab/mmpose/pull/436), [#437](https://github.com/open-mmlab/mmpose/pull/437), [#444](https://github.com/open-mmlab/mmpose/pull/444), [#445](https://github.com/open-mmlab/mmpose/pull/445)) - -- Add GAP (global average pooling) neck ([#414](https://github.com/open-mmlab/mmpose/pull/414)) - -- Speed up ([#411](https://github.com/open-mmlab/mmpose/pull/411), [#423](https://github.com/open-mmlab/mmpose/pull/423)) - -- Support COCO test-dev test ([#433](https://github.com/open-mmlab/mmpose/pull/433)) - -## **v0.10.0 (31/12/2020)** - -**Highlights** - -1. Support more human pose estimation methods. - - 1. [UDP](https://arxiv.org/abs/1911.07524) - -2. Support pose tracking. - -3. Support multi-batch inference. - -4. Add some useful tools, including `analyze_logs`, `get_flops`, `print_config`. - -5. Support more backbone networks. - - 1. [ResNest](https://arxiv.org/pdf/2004.08955.pdf) - 2. [VGG](https://arxiv.org/abs/1409.1556) - -**New Features** - -- Support UDP ([#353](https://github.com/open-mmlab/mmpose/pull/353), [#371](https://github.com/open-mmlab/mmpose/pull/371), [#402](https://github.com/open-mmlab/mmpose/pull/402)) - -- Support multi-batch inference ([#390](https://github.com/open-mmlab/mmpose/pull/390)) - -- Support MHP dataset ([#386](https://github.com/open-mmlab/mmpose/pull/386)) - -- Support pose tracking demo ([#380](https://github.com/open-mmlab/mmpose/pull/380)) - -- Support mpii-trb demo ([#372](https://github.com/open-mmlab/mmpose/pull/372)) - -- Support mobilenet for hand pose estimation ([#377](https://github.com/open-mmlab/mmpose/pull/377)) - -- Support ResNest backbone ([#370](https://github.com/open-mmlab/mmpose/pull/370)) - -- Support VGG backbone ([#370](https://github.com/open-mmlab/mmpose/pull/370)) - -- Add some useful tools, including `analyze_logs`, `get_flops`, `print_config` ([#324](https://github.com/open-mmlab/mmpose/pull/324)) - -**Bug Fixes** - -- Fix bugs in pck evaluation ([#328](https://github.com/open-mmlab/mmpose/pull/328)) - -- Fix model download links in README ([#396](https://github.com/open-mmlab/mmpose/pull/396), [#397](https://github.com/open-mmlab/mmpose/pull/397)) - -- Fix CrowdPose annotations and update benchmarks ([#384](https://github.com/open-mmlab/mmpose/pull/384)) - -- Fix modelzoo stat ([#354](https://github.com/open-mmlab/mmpose/pull/354), [#360](https://github.com/open-mmlab/mmpose/pull/360), [#362](https://github.com/open-mmlab/mmpose/pull/362)) - -- Fix config files for aic datasets ([#340](https://github.com/open-mmlab/mmpose/pull/340)) - -**Breaking Changes** - -- Rename `image_thr` to `det_bbox_thr` for top-down methods. - -**Improvements** - -- Organize the readme files ([#398](https://github.com/open-mmlab/mmpose/pull/398), [#399](https://github.com/open-mmlab/mmpose/pull/399), [#400](https://github.com/open-mmlab/mmpose/pull/400)) - -- Check linting for markdown ([#379](https://github.com/open-mmlab/mmpose/pull/379)) - -- Add faq.md ([#350](https://github.com/open-mmlab/mmpose/pull/350)) - -- Remove PyTorch 1.4 in CI ([#338](https://github.com/open-mmlab/mmpose/pull/338)) - -- Add pypi badge in readme ([#329](https://github.com/open-mmlab/mmpose/pull/329)) - -## **v0.9.0 (30/11/2020)** - -**Highlights** - -1. Support more human pose estimation methods. - - 1. [MSPN](https://arxiv.org/abs/1901.00148) - 2. [RSN](https://arxiv.org/abs/2003.04030) - -2. Support video pose estimation datasets. - - 1. [sub-JHMDB](http://jhmdb.is.tue.mpg.de/dataset) - -3. Support Onnx model conversion. - -**New Features** - -- Support MSPN ([#278](https://github.com/open-mmlab/mmpose/pull/278)) - -- Support RSN ([#221](https://github.com/open-mmlab/mmpose/pull/221), [#318](https://github.com/open-mmlab/mmpose/pull/318)) - -- Support new post-processing method for MSPN & RSN ([#288](https://github.com/open-mmlab/mmpose/pull/288)) - -- Support sub-JHMDB dataset ([#292](https://github.com/open-mmlab/mmpose/pull/292)) - -- Support urls for pre-trained models in config files ([#232](https://github.com/open-mmlab/mmpose/pull/232)) - -- Support Onnx ([#305](https://github.com/open-mmlab/mmpose/pull/305)) - -**Bug Fixes** - -- Fix model download links in README ([#255](https://github.com/open-mmlab/mmpose/pull/255), [#315](https://github.com/open-mmlab/mmpose/pull/315)) - -**Breaking Changes** - -- `post_process=True|False` and `unbiased_decoding=True|False` are deprecated, use `post_process=None|default|unbiased` etc. instead ([#288](https://github.com/open-mmlab/mmpose/pull/288)) - -**Improvements** - -- Enrich the model zoo ([#256](https://github.com/open-mmlab/mmpose/pull/256), [#320](https://github.com/open-mmlab/mmpose/pull/320)) - -- Set the default map_location as 'cpu' to reduce gpu memory cost ([#227](https://github.com/open-mmlab/mmpose/pull/227)) - -- Support return heatmaps and backbone features for bottom-up models ([#229](https://github.com/open-mmlab/mmpose/pull/229)) - -- Upgrade mmcv maximum & minimum version ([#269](https://github.com/open-mmlab/mmpose/pull/269), [#313](https://github.com/open-mmlab/mmpose/pull/313)) - -- Automatically add modelzoo statistics to readthedocs ([#252](https://github.com/open-mmlab/mmpose/pull/252)) - -- Fix Pylint issues ([#258](https://github.com/open-mmlab/mmpose/pull/258), [#259](https://github.com/open-mmlab/mmpose/pull/259), [#260](https://github.com/open-mmlab/mmpose/pull/260), [#262](https://github.com/open-mmlab/mmpose/pull/262), [#265](https://github.com/open-mmlab/mmpose/pull/265), [#267](https://github.com/open-mmlab/mmpose/pull/267), [#268](https://github.com/open-mmlab/mmpose/pull/268), [#270](https://github.com/open-mmlab/mmpose/pull/270), [#271](https://github.com/open-mmlab/mmpose/pull/271), [#272](https://github.com/open-mmlab/mmpose/pull/272), [#273](https://github.com/open-mmlab/mmpose/pull/273), [#275](https://github.com/open-mmlab/mmpose/pull/275), [#276](https://github.com/open-mmlab/mmpose/pull/276), [#283](https://github.com/open-mmlab/mmpose/pull/283), [#285](https://github.com/open-mmlab/mmpose/pull/285), [#293](https://github.com/open-mmlab/mmpose/pull/293), [#294](https://github.com/open-mmlab/mmpose/pull/294), [#295](https://github.com/open-mmlab/mmpose/pull/295)) - -- Improve README ([#226](https://github.com/open-mmlab/mmpose/pull/226), [#257](https://github.com/open-mmlab/mmpose/pull/257), [#264](https://github.com/open-mmlab/mmpose/pull/264), [#280](https://github.com/open-mmlab/mmpose/pull/280), [#296](https://github.com/open-mmlab/mmpose/pull/296)) - -- Support PyTorch 1.7 in CI ([#274](https://github.com/open-mmlab/mmpose/pull/274)) - -- Add docs/tutorials for running demos ([#263](https://github.com/open-mmlab/mmpose/pull/263)) - -## **v0.8.0 (31/10/2020)** - -**Highlights** - -1. Support more human pose estimation datasets. - - 1. [CrowdPose](https://github.com/Jeff-sjtu/CrowdPose) - 2. [PoseTrack18](https://posetrack.net/) - -2. Support more 2D hand keypoint estimation datasets. - - 1. [InterHand2.6](https://github.com/facebookresearch/InterHand2.6M) - -3. Support adversarial training for 3D human shape recovery. - -4. Support multi-stage losses. - -5. Support mpii demo. - -**New Features** - -- Support [CrowdPose](https://github.com/Jeff-sjtu/CrowdPose) dataset ([#195](https://github.com/open-mmlab/mmpose/pull/195)) - -- Support [PoseTrack18](https://posetrack.net/) dataset ([#220](https://github.com/open-mmlab/mmpose/pull/220)) - -- Support [InterHand2.6](https://github.com/facebookresearch/InterHand2.6M) dataset ([#202](https://github.com/open-mmlab/mmpose/pull/202)) - -- Support adversarial training for 3D human shape recovery ([#192](https://github.com/open-mmlab/mmpose/pull/192)) - -- Support multi-stage losses ([#204](https://github.com/open-mmlab/mmpose/pull/204)) - -**Bug Fixes** - -- Fix config files ([#190](https://github.com/open-mmlab/mmpose/pull/190)) - -**Improvements** - -- Add mpii demo ([#216](https://github.com/open-mmlab/mmpose/pull/216)) - -- Improve README ([#181](https://github.com/open-mmlab/mmpose/pull/181), [#183](https://github.com/open-mmlab/mmpose/pull/183), [#208](https://github.com/open-mmlab/mmpose/pull/208)) - -- Support return heatmaps and backbone features ([#196](https://github.com/open-mmlab/mmpose/pull/196), [#212](https://github.com/open-mmlab/mmpose/pull/212)) - -- Support different return formats of mmdetection models ([#217](https://github.com/open-mmlab/mmpose/pull/217)) - -## **v0.7.0 (30/9/2020)** - -**Highlights** - -1. Support HMR for 3D human shape recovery. - -2. Support WholeBody human pose estimation. - - 1. [COCO-WholeBody](https://github.com/jin-s13/COCO-WholeBody) - -3. Support more 2D hand keypoint estimation datasets. - - 1. [Frei-hand](https://lmb.informatik.uni-freiburg.de/projects/freihand/) - 2. [CMU Panoptic HandDB](http://domedb.perception.cs.cmu.edu/handdb.html) - -4. Add more popular backbones & enrich the [modelzoo](https://mmpose.readthedocs.io/en/latest/model_zoo.html) - - 1. ShuffleNetv2 - -5. Support hand demo and whole-body demo. - -**New Features** - -- Support HMR for 3D human shape recovery ([#157](https://github.com/open-mmlab/mmpose/pull/157), [#160](https://github.com/open-mmlab/mmpose/pull/160), [#161](https://github.com/open-mmlab/mmpose/pull/161), [#162](https://github.com/open-mmlab/mmpose/pull/162)) - -- Support [COCO-WholeBody](https://github.com/jin-s13/COCO-WholeBody) dataset ([#133](https://github.com/open-mmlab/mmpose/pull/133)) - -- Support [Frei-hand](https://lmb.informatik.uni-freiburg.de/projects/freihand/) dataset ([#125](https://github.com/open-mmlab/mmpose/pull/125)) - -- Support [CMU Panoptic HandDB](http://domedb.perception.cs.cmu.edu/handdb.html) dataset ([#144](https://github.com/open-mmlab/mmpose/pull/144)) - -- Support H36M dataset ([#159](https://github.com/open-mmlab/mmpose/pull/159)) - -- Support ShuffleNetv2 ([#139](https://github.com/open-mmlab/mmpose/pull/139)) - -- Support saving best models based on key indicator ([#127](https://github.com/open-mmlab/mmpose/pull/127)) - -**Bug Fixes** - -- Fix typos in docs ([#121](https://github.com/open-mmlab/mmpose/pull/121)) - -- Fix assertion ([#142](https://github.com/open-mmlab/mmpose/pull/142)) - -**Improvements** - -- Add tools to transform .mat format to .json format ([#126](https://github.com/open-mmlab/mmpose/pull/126)) - -- Add hand demo ([#115](https://github.com/open-mmlab/mmpose/pull/115)) - -- Add whole-body demo ([#163](https://github.com/open-mmlab/mmpose/pull/163)) - -- Reuse mmcv utility function and update version files ([#135](https://github.com/open-mmlab/mmpose/pull/135), [#137](https://github.com/open-mmlab/mmpose/pull/137)) - -- Enrich the modelzoo ([#147](https://github.com/open-mmlab/mmpose/pull/147), [#169](https://github.com/open-mmlab/mmpose/pull/169)) - -- Improve docs ([#174](https://github.com/open-mmlab/mmpose/pull/174), [#175](https://github.com/open-mmlab/mmpose/pull/175), [#178](https://github.com/open-mmlab/mmpose/pull/178)) - -- Improve README ([#176](https://github.com/open-mmlab/mmpose/pull/176)) - -- Improve version.py ([#173](https://github.com/open-mmlab/mmpose/pull/173)) - -## **v0.6.0 (31/8/2020)** - -**Highlights** - -1. Add more popular backbones & enrich the [modelzoo](https://mmpose.readthedocs.io/en/latest/model_zoo.html) - - 1. ResNext - 2. SEResNet - 3. ResNetV1D - 4. MobileNetv2 - 5. ShuffleNetv1 - 6. CPM (Convolutional Pose Machine) - -2. Add more popular datasets: - - 1. [AIChallenger](https://arxiv.org/abs/1711.06475?context=cs.CV) - 2. [MPII](http://human-pose.mpi-inf.mpg.de/) - 3. [MPII-TRB](https://github.com/kennymckormick/Triplet-Representation-of-human-Body) - 4. [OCHuman](http://www.liruilong.cn/projects/pose2seg/index.html) - -3. Support 2d hand keypoint estimation. - - 1. [OneHand10K](https://www.yangangwang.com/papers/WANG-MCC-2018-10.html) - -4. Support bottom-up inference. - -**New Features** - -- Support [OneHand10K](https://www.yangangwang.com/papers/WANG-MCC-2018-10.html) dataset ([#52](https://github.com/open-mmlab/mmpose/pull/52)) - -- Support [MPII](http://human-pose.mpi-inf.mpg.de/) dataset ([#55](https://github.com/open-mmlab/mmpose/pull/55)) - -- Support [MPII-TRB](https://github.com/kennymckormick/Triplet-Representation-of-human-Body) dataset ([#19](https://github.com/open-mmlab/mmpose/pull/19), [#47](https://github.com/open-mmlab/mmpose/pull/47), [#48](https://github.com/open-mmlab/mmpose/pull/48)) - -- Support [OCHuman](http://www.liruilong.cn/projects/pose2seg/index.html) dataset ([#70](https://github.com/open-mmlab/mmpose/pull/70)) - -- Support [AIChallenger](https://arxiv.org/abs/1711.06475?context=cs.CV) dataset ([#87](https://github.com/open-mmlab/mmpose/pull/87)) - -- Support multiple backbones ([#26](https://github.com/open-mmlab/mmpose/pull/26)) - -- Support CPM model ([#56](https://github.com/open-mmlab/mmpose/pull/56)) - -**Bug Fixes** - -- Fix configs for MPII & MPII-TRB datasets ([#93](https://github.com/open-mmlab/mmpose/pull/93)) - -- Fix the bug of missing `test_pipeline` in configs ([#14](https://github.com/open-mmlab/mmpose/pull/14)) - -- Fix typos ([#27](https://github.com/open-mmlab/mmpose/pull/27), [#28](https://github.com/open-mmlab/mmpose/pull/28), [#50](https://github.com/open-mmlab/mmpose/pull/50), [#53](https://github.com/open-mmlab/mmpose/pull/53), [#63](https://github.com/open-mmlab/mmpose/pull/63)) - -**Improvements** - -- Update benchmark ([#93](https://github.com/open-mmlab/mmpose/pull/93)) - -- Add Dockerfile ([#44](https://github.com/open-mmlab/mmpose/pull/44)) - -- Improve unittest coverage and minor fix ([#18](https://github.com/open-mmlab/mmpose/pull/18)) - -- Support CPUs for train/val/demo ([#34](https://github.com/open-mmlab/mmpose/pull/34)) - -- Support bottom-up demo ([#69](https://github.com/open-mmlab/mmpose/pull/69)) - -- Add tools to publish model ([#62](https://github.com/open-mmlab/mmpose/pull/62)) - -- Enrich the modelzoo ([#64](https://github.com/open-mmlab/mmpose/pull/64), [#68](https://github.com/open-mmlab/mmpose/pull/68), [#82](https://github.com/open-mmlab/mmpose/pull/82)) - -## **v0.5.0 (21/7/2020)** - -**Highlights** - -- MMPose is released. - -**Main Features** - -- Support both top-down and bottom-up pose estimation approaches. - -- Achieve higher training efficiency and higher accuracy than other popular codebases (e.g. AlphaPose, HRNet) - -- Support various backbone models: ResNet, HRNet, SCNet, Houglass and HigherHRNet. +# Changelog + +## **v1.0.0rc1 (14/10/2022)** + +**Highlights** + +- Release RTMPose, a high-performance real-time pose estimation algorithm with cross-platform deployment and inference support. See details at the [project page](/projects/rtmpose/) +- Support several new algorithms: ViTPose (arXiv'2022), CID (CVPR'2022), DEKR (CVPR'2021) +- Add Inferencer, a convenient inference interface that perform pose estimation and visualization on images, videos and webcam streams with only one line of code +- Introduce *Project*, a new form for rapid and easy implementation of new algorithms and features in MMPose, which is more handy for community contributors + +**New Features** + +- Support RTMPose ([#1971](https://github.com/open-mmlab/mmpose/pull/1971), [#2024](https://github.com/open-mmlab/mmpose/pull/2024), [#2028](https://github.com/open-mmlab/mmpose/pull/2028), [#2030](https://github.com/open-mmlab/mmpose/pull/2030), [#2040](https://github.com/open-mmlab/mmpose/pull/2040), [#2057](https://github.com/open-mmlab/mmpose/pull/2057)) +- Support Inferencer ([#1969](https://github.com/open-mmlab/mmpose/pull/1969)) +- Support ViTPose ([#1876](https://github.com/open-mmlab/mmpose/pull/1876), [#2056](https://github.com/open-mmlab/mmpose/pull/2056), [#2058](https://github.com/open-mmlab/mmpose/pull/2058), [#2065](https://github.com/open-mmlab/mmpose/pull/2065)) +- Support CID ([#1907](https://github.com/open-mmlab/mmpose/pull/1907)) +- Support DEKR ([#1834](https://github.com/open-mmlab/mmpose/pull/1834), [#1901](https://github.com/open-mmlab/mmpose/pull/1901)) +- Support training with multiple datasets ([#1767](https://github.com/open-mmlab/mmpose/pull/1767), [#1930](https://github.com/open-mmlab/mmpose/pull/1930), [#1938](https://github.com/open-mmlab/mmpose/pull/1938), [#2025](https://github.com/open-mmlab/mmpose/pull/2025)) +- Add *project* to allow rapid and easy implementation of new models and features ([#1914](https://github.com/open-mmlab/mmpose/pull/1914)) + +**Improvements** + +- Improve documentation quality ([#1846](https://github.com/open-mmlab/mmpose/pull/1846), [#1858](https://github.com/open-mmlab/mmpose/pull/1858), [#1872](https://github.com/open-mmlab/mmpose/pull/1872), [#1899](https://github.com/open-mmlab/mmpose/pull/1899), [#1925](https://github.com/open-mmlab/mmpose/pull/1925), [#1945](https://github.com/open-mmlab/mmpose/pull/1945), [#1952](https://github.com/open-mmlab/mmpose/pull/1952), [#1990](https://github.com/open-mmlab/mmpose/pull/1990), [#2023](https://github.com/open-mmlab/mmpose/pull/2023), [#2042](https://github.com/open-mmlab/mmpose/pull/2042)) +- Support visualizing keypoint indices ([#2051](https://github.com/open-mmlab/mmpose/pull/2051)) +- Support OpenPose style visualization ([#2055](https://github.com/open-mmlab/mmpose/pull/2055)) +- Accelerate image transpose in data pipelines with tensor operation ([#1976](https://github.com/open-mmlab/mmpose/pull/1976)) +- Support auto-import modules from registry ([#1961](https://github.com/open-mmlab/mmpose/pull/1961)) +- Support keypoint partition metric ([#1944](https://github.com/open-mmlab/mmpose/pull/1944)) +- Support SimCC 1D-heatmap visualization ([#1912](https://github.com/open-mmlab/mmpose/pull/1912)) +- Support saving predictions and data metainfo in demos ([#1814](https://github.com/open-mmlab/mmpose/pull/1814), [#1879](https://github.com/open-mmlab/mmpose/pull/1879)) +- Support SimCC with DARK ([#1870](https://github.com/open-mmlab/mmpose/pull/1870)) +- Remove Gaussian blur for offset maps in UDP-regress ([#1815](https://github.com/open-mmlab/mmpose/pull/1815)) +- Refactor encoding interface of Codec for better extendibility and easier configuration ([#1781](https://github.com/open-mmlab/mmpose/pull/1781)) +- Support evaluating CocoMetric without annotation file ([#1722](https://github.com/open-mmlab/mmpose/pull/1722)) +- Improve unit tests ([#1765](https://github.com/open-mmlab/mmpose/pull/1765)) + +**Bug Fixes** + +- Fix repeated warnings from different ranks ([#2053](https://github.com/open-mmlab/mmpose/pull/2053)) +- Avoid frequent scope switching when using mmdet inference api ([#2039](https://github.com/open-mmlab/mmpose/pull/2039)) +- Remove EMA parameters and message hub data when publishing model checkpoints ([#2036](https://github.com/open-mmlab/mmpose/pull/2036)) +- Fix metainfo copying in dataset class ([#2017](https://github.com/open-mmlab/mmpose/pull/2017)) +- Fix top-down demo bug when there is no object detected ([#2007](https://github.com/open-mmlab/mmpose/pull/2007)) +- Fix config errors ([#1882](https://github.com/open-mmlab/mmpose/pull/1882), [#1906](https://github.com/open-mmlab/mmpose/pull/1906), [#1995](https://github.com/open-mmlab/mmpose/pull/1995)) +- Fix image demo failure when GUI is unavailable ([#1968](https://github.com/open-mmlab/mmpose/pull/1968)) +- Fix bug in AdaptiveWingLoss ([#1953](https://github.com/open-mmlab/mmpose/pull/1953)) +- Fix incorrect importing of RepeatDataset which is deprecated ([#1943](https://github.com/open-mmlab/mmpose/pull/1943)) +- Fix bug in bottom-up datasets that ignores images without instances ([#1752](https://github.com/open-mmlab/mmpose/pull/1752), [#1936](https://github.com/open-mmlab/mmpose/pull/1936)) +- Fix upstream dependency issues ([#1867](https://github.com/open-mmlab/mmpose/pull/1867), [#1921](https://github.com/open-mmlab/mmpose/pull/1921)) +- Fix evaluation issues and update results ([#1763](https://github.com/open-mmlab/mmpose/pull/1763), [#1773](https://github.com/open-mmlab/mmpose/pull/1773), [#1780](https://github.com/open-mmlab/mmpose/pull/1780), [#1850](https://github.com/open-mmlab/mmpose/pull/1850), [#1868](https://github.com/open-mmlab/mmpose/pull/1868)) +- Fix local registry missing warnings ([#1849](https://github.com/open-mmlab/mmpose/pull/1849)) +- Remove deprecated scripts for model deployment ([#1845](https://github.com/open-mmlab/mmpose/pull/1845)) +- Fix a bug in input transformation in BaseHead ([#1843](https://github.com/open-mmlab/mmpose/pull/1843)) +- Fix an interface mismatch with MMDetection in webcam demo ([#1813](https://github.com/open-mmlab/mmpose/pull/1813)) +- Fix a bug in heatmap visualization that causes incorrect scale ([#1800](https://github.com/open-mmlab/mmpose/pull/1800)) +- Add model metafiles ([#1768](https://github.com/open-mmlab/mmpose/pull/1768)) + +## **v1.0.0rc0 (14/10/2022)** + +**New Features** + +- Support 4 light-weight pose estimation algorithms: [SimCC](https://doi.org/10.48550/arxiv.2107.03332) (ECCV'2022), [Debias-IPR](https://openaccess.thecvf.com/content/ICCV2021/papers/Gu_Removing_the_Bias_of_Integral_Pose_Regression_ICCV_2021_paper.pdf) (ICCV'2021), [IPR](https://arxiv.org/abs/1711.08229) (ECCV'2018), and [DSNT](https://arxiv.org/abs/1801.07372v2) (ArXiv'2018) ([#1628](https://github.com/open-mmlab/mmpose/pull/1628)) + +**Migrations** + +- Add Webcam API in MMPose 1.0 ([#1638](https://github.com/open-mmlab/mmpose/pull/1638), [#1662](https://github.com/open-mmlab/mmpose/pull/1662)) @Ben-Louis +- Add codec for Associative Embedding (beta) ([#1603](https://github.com/open-mmlab/mmpose/pull/1603)) @ly015 + +**Improvements** + +- Add a colab tutorial for MMPose 1.0 ([#1660](https://github.com/open-mmlab/mmpose/pull/1660)) @Tau-J +- Add model index in config folder ([#1710](https://github.com/open-mmlab/mmpose/pull/1710), [#1709](https://github.com/open-mmlab/mmpose/pull/1709), [#1627](https://github.com/open-mmlab/mmpose/pull/1627)) @ly015, @Tau-J, @Ben-Louis +- Update and improve documentation ([#1692](https://github.com/open-mmlab/mmpose/pull/1692), [#1656](https://github.com/open-mmlab/mmpose/pull/1656), [#1681](https://github.com/open-mmlab/mmpose/pull/1681), [#1677](https://github.com/open-mmlab/mmpose/pull/1677), [#1664](https://github.com/open-mmlab/mmpose/pull/1664), [#1659](https://github.com/open-mmlab/mmpose/pull/1659)) @Tau-J, @Ben-Louis, @liqikai9 +- Improve config structures and formats ([#1651](https://github.com/open-mmlab/mmpose/pull/1651)) @liqikai9 + +**Bug Fixes** + +- Update mmengine version requirements ([#1715](https://github.com/open-mmlab/mmpose/pull/1715)) @Ben-Louis +- Update dependencies of pre-commit hooks ([#1705](https://github.com/open-mmlab/mmpose/pull/1705)) @Ben-Louis +- Fix mmcv version in DockerFile ([#1704](https://github.com/open-mmlab/mmpose/pull/1704)) +- Fix a bug in setting dataset metainfo in configs ([#1684](https://github.com/open-mmlab/mmpose/pull/1684)) @ly015 +- Fix a bug in UDP training ([#1682](https://github.com/open-mmlab/mmpose/pull/1682)) @liqikai9 +- Fix a bug in Dark decoding ([#1676](https://github.com/open-mmlab/mmpose/pull/1676)) @liqikai9 +- Fix bugs in visualization ([#1671](https://github.com/open-mmlab/mmpose/pull/1671), [#1668](https://github.com/open-mmlab/mmpose/pull/1668), [#1657](https://github.com/open-mmlab/mmpose/pull/1657)) @liqikai9, @Ben-Louis +- Fix incorrect flops calculation ([#1669](https://github.com/open-mmlab/mmpose/pull/1669)) @liqikai9 +- Fix `tensor.tile` compatibility issue for pytorch 1.6 ([#1658](https://github.com/open-mmlab/mmpose/pull/1658)) @ly015 +- Fix compatibility with `MultilevelPixelData` ([#1647](https://github.com/open-mmlab/mmpose/pull/1647)) @liqikai9 + +## **v1.0.0beta (1/09/2022)** + +We are excited to announce the release of MMPose 1.0.0beta. +MMPose 1.0.0beta is the first version of MMPose 1.x, a part of the OpenMMLab 2.0 projects. +Built upon the new [training engine](https://github.com/open-mmlab/mmengine). + +**Highlights** + +- **New engines**. MMPose 1.x is based on [MMEngine](https://github.com/open-mmlab/mmengine), which provides a general and powerful runner that allows more flexible customizations and significantly simplifies the entrypoints of high-level interfaces. + +- **Unified interfaces**. As a part of the OpenMMLab 2.0 projects, MMPose 1.x unifies and refactors the interfaces and internal logics of train, testing, datasets, models, evaluation, and visualization. All the OpenMMLab 2.0 projects share the same design in those interfaces and logics to allow the emergence of multi-task/modality algorithms. + +- **More documentation and tutorials**. We add a bunch of documentation and tutorials to help users get started more smoothly. Read it [here](https://mmpose.readthedocs.io/en/latest/). + +**Breaking Changes** + +In this release, we made lots of major refactoring and modifications. Please refer to the [migration guide](../migration.md) for details and migration instructions. + +## **v0.28.1 (28/07/2022)** + +This release is meant to fix the compatibility with the latest mmcv v1.6.1 + +## **v0.28.0 (06/07/2022)** + +**Highlights** + +- Support [TCFormer](https://openaccess.thecvf.com/content/CVPR2022/html/Zeng_Not_All_Tokens_Are_Equal_Human-Centric_Visual_Analysis_via_Token_CVPR_2022_paper.html) backbone, CVPR'2022 ([#1447](https://github.com/open-mmlab/mmpose/pull/1447), [#1452](https://github.com/open-mmlab/mmpose/pull/1452)) @zengwang430521 + +- Add [RLE](https://arxiv.org/abs/2107.11291) models on COCO dataset ([#1424](https://github.com/open-mmlab/mmpose/pull/1424)) @Indigo6, @Ben-Louis, @ly015 + +- Update swin models with better performance ([#1467](https://github.com/open-mmlab/mmpose/pull/1434)) @jin-s13 + +**New Features** + +- Support [TCFormer](https://openaccess.thecvf.com/content/CVPR2022/html/Zeng_Not_All_Tokens_Are_Equal_Human-Centric_Visual_Analysis_via_Token_CVPR_2022_paper.html) backbone, CVPR'2022 ([#1447](https://github.com/open-mmlab/mmpose/pull/1447), [#1452](https://github.com/open-mmlab/mmpose/pull/1452)) @zengwang430521 + +- Add [RLE](https://arxiv.org/abs/2107.11291) models on COCO dataset ([#1424](https://github.com/open-mmlab/mmpose/pull/1424)) @Indigo6, @Ben-Louis, @ly015 + +- Support layer decay optimizer constructor and learning rate decay optimizer constructor ([#1423](https://github.com/open-mmlab/mmpose/pull/1423)) @jin-s13 + +**Improvements** + +- Improve documentation quality ([#1416](https://github.com/open-mmlab/mmpose/pull/1416), [#1421](https://github.com/open-mmlab/mmpose/pull/1421), [#1423](https://github.com/open-mmlab/mmpose/pull/1423), [#1426](https://github.com/open-mmlab/mmpose/pull/1426), [#1458](https://github.com/open-mmlab/mmpose/pull/1458), [#1463](https://github.com/open-mmlab/mmpose/pull/1463)) @ly015, @liqikai9 + +- Support installation by [mim](https://github.com/open-mmlab/mim) ([#1425](https://github.com/open-mmlab/mmpose/pull/1425)) @liqikai9 + +- Support PAVI logger ([#1434](https://github.com/open-mmlab/mmpose/pull/1434)) @EvelynWang-0423 + +- Add progress bar for some demos ([#1454](https://github.com/open-mmlab/mmpose/pull/1454)) @liqikai9 + +- Webcam API supports quick device setting in terminal commands ([#1466](https://github.com/open-mmlab/mmpose/pull/1466)) @ly015 + +- Update swin models with better performance ([#1467](https://github.com/open-mmlab/mmpose/pull/1434)) @jin-s13 + +**Bug Fixes** + +- Rename `custom_hooks_config` to `custom_hooks` in configs to align with the documentation ([#1427](https://github.com/open-mmlab/mmpose/pull/1427)) @ly015 + +- Fix deadlock issue in Webcam API ([#1430](https://github.com/open-mmlab/mmpose/pull/1430)) @ly015 + +- Fix smoother configs in video 3D demo ([#1457](https://github.com/open-mmlab/mmpose/pull/1457)) @ly015 + +## **v0.27.0 (07/06/2022)** + +**Highlights** + +- Support hand gesture recognition + + - Try the demo for gesture recognition + - Learn more about the algorithm, dataset and experiment results + +- Major upgrade to the Webcam API + + - Tutorials (EN|zh_CN) + - [API Reference](https://mmpose.readthedocs.io/en/latest/api.html#mmpose-apis-webcam) + - Demo + +**New Features** + +- Support gesture recognition algorithm [MTUT](https://openaccess.thecvf.com/content_CVPR_2019/html/Abavisani_Improving_the_Performance_of_Unimodal_Dynamic_Hand-Gesture_Recognition_With_Multimodal_CVPR_2019_paper.html) CVPR'2019 and dataset [NVGesture](https://openaccess.thecvf.com/content_cvpr_2016/html/Molchanov_Online_Detection_and_CVPR_2016_paper.html) CVPR'2016 ([#1380](https://github.com/open-mmlab/mmpose/pull/1380)) @Ben-Louis + +**Improvements** + +- Upgrade Webcam API and related documents ([#1393](https://github.com/open-mmlab/mmpose/pull/1393), [#1404](https://github.com/open-mmlab/mmpose/pull/1404), [#1413](https://github.com/open-mmlab/mmpose/pull/1413)) @ly015 + +- Support exporting COCO inference result without the annotation file ([#1368](https://github.com/open-mmlab/mmpose/pull/1368)) @liqikai9 + +- Replace markdownlint with mdformat in CI to avoid the dependence on ruby [#1382](https://github.com/open-mmlab/mmpose/pull/1382) @ly015 + +- Improve documentation quality ([#1385](https://github.com/open-mmlab/mmpose/pull/1385), [#1394](https://github.com/open-mmlab/mmpose/pull/1394), [#1395](https://github.com/open-mmlab/mmpose/pull/1395), [#1408](https://github.com/open-mmlab/mmpose/pull/1408)) @chubei-oppen, @ly015, @liqikai9 + +**Bug Fixes** + +- Fix xywh->xyxy bbox conversion in dataset sanity check ([#1367](https://github.com/open-mmlab/mmpose/pull/1367)) @jin-s13 + +- Fix a bug in two-stage 3D keypoint demo ([#1373](https://github.com/open-mmlab/mmpose/pull/1373)) @ly015 + +- Fix out-dated settings in PVT configs ([#1376](https://github.com/open-mmlab/mmpose/pull/1376)) @ly015 + +- Fix myst settings for document compiling ([#1381](https://github.com/open-mmlab/mmpose/pull/1381)) @ly015 + +- Fix a bug in bbox transform ([#1384](https://github.com/open-mmlab/mmpose/pull/1384)) @ly015 + +- Fix inaccurate description of `min_keypoints` in tracking apis ([#1398](https://github.com/open-mmlab/mmpose/pull/1398)) @pallgeuer + +- Fix warning with `torch.meshgrid` ([#1402](https://github.com/open-mmlab/mmpose/pull/1402)) @pallgeuer + +- Remove redundant transformer modules from `mmpose.datasets.backbones.utils` ([#1405](https://github.com/open-mmlab/mmpose/pull/1405)) @ly015 + +## **v0.26.0 (05/05/2022)** + +**Highlights** + +- Support [RLE (Residual Log-likelihood Estimation)](https://arxiv.org/abs/2107.11291), ICCV'2021 ([#1259](https://github.com/open-mmlab/mmpose/pull/1259)) @Indigo6, @ly015 + +- Support [Swin Transformer](https://arxiv.org/abs/2103.14030), ICCV'2021 ([#1300](https://github.com/open-mmlab/mmpose/pull/1300)) @yumendecc, @ly015 + +- Support [PVT](https://arxiv.org/abs/2102.12122), ICCV'2021 and [PVTv2](https://arxiv.org/abs/2106.13797), CVMJ'2022 ([#1343](https://github.com/open-mmlab/mmpose/pull/1343)) @zengwang430521 + +- Speed up inference and reduce CPU usage by optimizing the pre-processing pipeline ([#1320](https://github.com/open-mmlab/mmpose/pull/1320)) @chenxinfeng4, @liqikai9 + +**New Features** + +- Support [RLE (Residual Log-likelihood Estimation)](https://arxiv.org/abs/2107.11291), ICCV'2021 ([#1259](https://github.com/open-mmlab/mmpose/pull/1259)) @Indigo6, @ly015 + +- Support [Swin Transformer](https://arxiv.org/abs/2103.14030), ICCV'2021 ([#1300](https://github.com/open-mmlab/mmpose/pull/1300)) @yumendecc, @ly015 + +- Support [PVT](https://arxiv.org/abs/2102.12122), ICCV'2021 and [PVTv2](https://arxiv.org/abs/2106.13797), CVMJ'2022 ([#1343](https://github.com/open-mmlab/mmpose/pull/1343)) @zengwang430521 + +- Support [FPN](https://openaccess.thecvf.com/content_cvpr_2017/html/Lin_Feature_Pyramid_Networks_CVPR_2017_paper.html), CVPR'2017 ([#1300](https://github.com/open-mmlab/mmpose/pull/1300)) @yumendecc, @ly015 + +**Improvements** + +- Speed up inference and reduce CPU usage by optimizing the pre-processing pipeline ([#1320](https://github.com/open-mmlab/mmpose/pull/1320)) @chenxinfeng4, @liqikai9 + +- Video demo supports models that requires multi-frame inputs ([#1300](https://github.com/open-mmlab/mmpose/pull/1300)) @liqikai9, @jin-s13 + +- Update benchmark regression list ([#1328](https://github.com/open-mmlab/mmpose/pull/1328)) @ly015, @liqikai9 + +- Remove unnecessary warnings in `TopDownPoseTrack18VideoDataset` ([#1335](https://github.com/open-mmlab/mmpose/pull/1335)) @liqikai9 + +- Improve documentation quality ([#1313](https://github.com/open-mmlab/mmpose/pull/1313), [#1305](https://github.com/open-mmlab/mmpose/pull/1305)) @Ben-Louis, @ly015 + +- Update deprecating settings in configs ([#1317](https://github.com/open-mmlab/mmpose/pull/1317)) @ly015 + +**Bug Fixes** + +- Fix a bug in human skeleton grouping that may skip the matching process unexpectedly when `ignore_to_much` is True ([#1341](https://github.com/open-mmlab/mmpose/pull/1341)) @daixinghome + +- Fix a GPG key error that leads to CI failure ([#1354](https://github.com/open-mmlab/mmpose/pull/1354)) @ly015 + +- Fix bugs in distributed training script ([#1338](https://github.com/open-mmlab/mmpose/pull/1338), [#1298](https://github.com/open-mmlab/mmpose/pull/1298)) @ly015 + +- Fix an upstream bug in xtoccotools that causes incorrect AP(M) results ([#1308](https://github.com/open-mmlab/mmpose/pull/1308)) @jin-s13, @ly015 + +- Fix indentiation errors in the colab tutorial ([#1298](https://github.com/open-mmlab/mmpose/pull/1298)) @YuanZi1501040205 + +- Fix incompatible model weight initialization with other OpenMMLab codebases ([#1329](https://github.com/open-mmlab/mmpose/pull/1329)) @274869388 + +- Fix HRNet FP16 checkpoints download URL ([#1309](https://github.com/open-mmlab/mmpose/pull/1309)) @YinAoXiong + +- Fix typos in `body3d_two_stage_video_demo.py` ([#1295](https://github.com/open-mmlab/mmpose/pull/1295)) @mucozcan + +**Breaking Changes** + +- Refactor bbox processing in datasets and pipelines ([#1311](https://github.com/open-mmlab/mmpose/pull/1311)) @ly015, @Ben-Louis + +- The bbox format conversion (xywh to center-scale) and random translation are moved from the dataset to the pipeline. The comparison between new and old version is as below: + +v0.26.0v0.25.0Dataset +(e.g. [TopDownCOCODataset](https://github.com/open-mmlab/mmpose/blob/master/mmpose/datasets/datasets/top_down/topdown_coco_dataset.py)) + +... # Data sample only contains bbox rec.append({ 'bbox': obj\['clean_bbox\]\[:4\], ... }) + + + + + +... # Convert bbox from xywh to center-scale center, scale = self.\_xywh2cs(\*obj\['clean_bbox'\]\[:4\]) # Data sample contains center and scale rec.append({ 'bbox': obj\['clean_bbox\]\[:4\], 'center': center, 'scale': scale, ... }) + + + + + + + +Pipeline Config + +(e.g. [HRNet+COCO](https://github.com/open-mmlab/mmpose/blob/master/configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/coco/hrnet_w32_coco_256x192.py)) + + + +... train_pipeline = \[ dict(type='LoadImageFromFile'), # Convert bbox from xywh to center-scale dict(type='TopDownGetBboxCenterScale', padding=1.25), # Randomly shift bbox center dict(type='TopDownRandomShiftBboxCenter', shift_factor=0.16, prob=0.3), ... \] + + + + + +... train_pipeline = \[ dict(type='LoadImageFromFile'), ... \] + + + + + + + +Advantage + + + +
  • Simpler data sample content
  • + +
  • Flexible bbox format conversion and augmentation
  • + +
  • Apply bbox random translation every epoch (instead of only applying once at the annotation loading) + + + +- + + + + + +BC Breaking + +The method `_xywh2cs` of dataset base classes (e.g. [Kpt2dSviewRgbImgTopDownDataset](https://github.com/open-mmlab/mmpose/blob/master/mmpose/datasets/datasets/base/kpt_2d_sview_rgb_img_top_down_dataset.py)) will be deprecated in the future. Custom datasets will need modifications to move the bbox format conversion to pipelines. + +- + + + + + + + +## **v0.25.0 (02/04/2022)** + +**Highlights** + +- Support Shelf and Campus datasets with pre-trained VoxelPose models, ["3D Pictorial Structures for Multiple Human Pose Estimation"](http://campar.in.tum.de/pub/belagiannis2014cvpr/belagiannis2014cvpr.pdf), CVPR'2014 ([#1225](https://github.com/open-mmlab/mmpose/pull/1225)) @liqikai9, @wusize + +- Add `Smoother` module for temporal smoothing of the pose estimation with configurable filters ([#1127](https://github.com/open-mmlab/mmpose/pull/1127)) @ailingzengzzz, @ly015 + +- Support SmoothNet for pose smoothing, ["SmoothNet: A Plug-and-Play Network for Refining Human Poses in Videos"](https://arxiv.org/abs/2112.13715), arXiv'2021 ([#1279](https://github.com/open-mmlab/mmpose/pull/1279)) @ailingzengzzz, @ly015 + +- Add multiview 3D pose estimation demo ([#1270](https://github.com/open-mmlab/mmpose/pull/1270)) @wusize + +**New Features** + +- Support Shelf and Campus datasets with pre-trained VoxelPose models, ["3D Pictorial Structures for Multiple Human Pose Estimation"](http://campar.in.tum.de/pub/belagiannis2014cvpr/belagiannis2014cvpr.pdf), CVPR'2014 ([#1225](https://github.com/open-mmlab/mmpose/pull/1225)) @liqikai9, @wusize + +- Add `Smoother` module for temporal smoothing of the pose estimation with configurable filters ([#1127](https://github.com/open-mmlab/mmpose/pull/1127)) @ailingzengzzz, @ly015 + +- Support SmoothNet for pose smoothing, ["SmoothNet: A Plug-and-Play Network for Refining Human Poses in Videos"](https://arxiv.org/abs/2112.13715), arXiv'2021 ([#1279](https://github.com/open-mmlab/mmpose/pull/1279)) @ailingzengzzz, @ly015 + +- Add multiview 3D pose estimation demo ([#1270](https://github.com/open-mmlab/mmpose/pull/1270)) @wusize + +- Support multi-machine distributed training ([#1248](https://github.com/open-mmlab/mmpose/pull/1248)) @ly015 + +**Improvements** + +- Update HRFormer configs and checkpoints with relative position bias ([#1245](https://github.com/open-mmlab/mmpose/pull/1245)) @zengwang430521 + +- Support using different random seed for each distributed node ([#1257](https://github.com/open-mmlab/mmpose/pull/1257), [#1229](https://github.com/open-mmlab/mmpose/pull/1229)) @ly015 + +- Improve documentation quality ([#1275](https://github.com/open-mmlab/mmpose/pull/1275), [#1255](https://github.com/open-mmlab/mmpose/pull/1255), [#1258](https://github.com/open-mmlab/mmpose/pull/1258), [#1249](https://github.com/open-mmlab/mmpose/pull/1249), [#1247](https://github.com/open-mmlab/mmpose/pull/1247), [#1240](https://github.com/open-mmlab/mmpose/pull/1240), [#1235](https://github.com/open-mmlab/mmpose/pull/1235)) @ly015, @jin-s13, @YoniChechik + +**Bug Fixes** + +- Fix keypoint index in RHD dataset meta information ([#1265](https://github.com/open-mmlab/mmpose/pull/1265)) @liqikai9 + +- Fix pre-commit hook unexpected behavior on Windows ([#1282](https://github.com/open-mmlab/mmpose/pull/1282)) @liqikai9 + +- Remove python-dev installation in CI ([#1276](https://github.com/open-mmlab/mmpose/pull/1276)) @ly015 + +- Unify hyphens in argument names in tools and demos ([#1271](https://github.com/open-mmlab/mmpose/pull/1271)) @ly015 + +- Fix ambiguous channel size in `channel_shuffle` that may cause exporting failure (#1242) @PINTO0309 + +- Fix a bug in Webcam API that causes single-class detectors fail ([#1239](https://github.com/open-mmlab/mmpose/pull/1239)) @674106399 + +- Fix the issue that `custom_hook` can not be set in configs ([#1236](https://github.com/open-mmlab/mmpose/pull/1236)) @bladrome + +- Fix incompatible MMCV version in DockerFile ([#raykindle](https://github.com/open-mmlab/mmpose/pull/raykindle)) + +- Skip invisible joints in visualization ([#1228](https://github.com/open-mmlab/mmpose/pull/1228)) @womeier + +## **v0.24.0 (07/03/2022)** + +**Highlights** + +- Support HRFormer ["HRFormer: High-Resolution Vision Transformer for Dense Predict"](https://proceedings.neurips.cc/paper/2021/hash/3bbfdde8842a5c44a0323518eec97cbe-Abstract.html), NeurIPS'2021 ([#1203](https://github.com/open-mmlab/mmpose/pull/1203)) @zengwang430521 + +- Support Windows installation with pip ([#1213](https://github.com/open-mmlab/mmpose/pull/1213)) @jin-s13, @ly015 + +- Add WebcamAPI documents ([#1187](https://github.com/open-mmlab/mmpose/pull/1187)) @ly015 + +**New Features** + +- Support HRFormer ["HRFormer: High-Resolution Vision Transformer for Dense Predict"](https://proceedings.neurips.cc/paper/2021/hash/3bbfdde8842a5c44a0323518eec97cbe-Abstract.html), NeurIPS'2021 ([#1203](https://github.com/open-mmlab/mmpose/pull/1203)) @zengwang430521 + +- Support Windows installation with pip ([#1213](https://github.com/open-mmlab/mmpose/pull/1213)) @jin-s13, @ly015 + +- Support CPU training with mmcv \< v1.4.4 ([#1161](https://github.com/open-mmlab/mmpose/pull/1161)) @EasonQYS, @ly015 + +- Add "Valentine Magic" demo with WebcamAPI ([#1189](https://github.com/open-mmlab/mmpose/pull/1189), [#1191](https://github.com/open-mmlab/mmpose/pull/1191)) @liqikai9 + +**Improvements** + +- Refactor multi-view 3D pose estimation framework towards better modularization and expansibility ([#1196](https://github.com/open-mmlab/mmpose/pull/1196)) @wusize + +- Add WebcamAPI documents and tutorials ([#1187](https://github.com/open-mmlab/mmpose/pull/1187)) @ly015 + +- Refactor dataset evaluation interface to align with other OpenMMLab codebases ([#1209](https://github.com/open-mmlab/mmpose/pull/1209)) @ly015 + +- Add deprecation message for deploy tools since [MMDeploy](https://github.com/open-mmlab/mmdeploy) has supported MMPose ([#1207](https://github.com/open-mmlab/mmpose/pull/1207)) @QwQ2000 + +- Improve documentation quality ([#1206](https://github.com/open-mmlab/mmpose/pull/1206), [#1161](https://github.com/open-mmlab/mmpose/pull/1161)) @ly015 + +- Switch to OpenMMLab official pre-commit-hook for copyright check ([#1214](https://github.com/open-mmlab/mmpose/pull/1214)) @ly015 + +**Bug Fixes** + +- Fix hard-coded data collating and scattering in inference ([#1175](https://github.com/open-mmlab/mmpose/pull/1175)) @ly015 + +- Fix model configs on JHMDB dataset ([#1188](https://github.com/open-mmlab/mmpose/pull/1188)) @jin-s13 + +- Fix area calculation in pose tracking inference ([#1197](https://github.com/open-mmlab/mmpose/pull/1197)) @pallgeuer + +- Fix registry scope conflict of module wrapper ([#1204](https://github.com/open-mmlab/mmpose/pull/1204)) @ly015 + +- Update MMCV installation in CI and documents ([#1205](https://github.com/open-mmlab/mmpose/pull/1205)) + +- Fix incorrect color channel order in visualization functions ([#1212](https://github.com/open-mmlab/mmpose/pull/1212)) @ly015 + +## **v0.23.0 (11/02/2022)** + +**Highlights** + +- Add [MMPose Webcam API](https://github.com/open-mmlab/mmpose/tree/master/tools/webcam): A simple yet powerful tools to develop interactive webcam applications with MMPose functions. ([#1178](https://github.com/open-mmlab/mmpose/pull/1178), [#1173](https://github.com/open-mmlab/mmpose/pull/1173), [#1173](https://github.com/open-mmlab/mmpose/pull/1173), [#1143](https://github.com/open-mmlab/mmpose/pull/1143), [#1094](https://github.com/open-mmlab/mmpose/pull/1094), [#1133](https://github.com/open-mmlab/mmpose/pull/1133), [#1098](https://github.com/open-mmlab/mmpose/pull/1098), [#1160](https://github.com/open-mmlab/mmpose/pull/1160)) @ly015, @jin-s13, @liqikai9, @wusize, @luminxu, @zengwang430521 @mzr1996 + +**New Features** + +- Add [MMPose Webcam API](https://github.com/open-mmlab/mmpose/tree/master/tools/webcam): A simple yet powerful tools to develop interactive webcam applications with MMPose functions. ([#1178](https://github.com/open-mmlab/mmpose/pull/1178), [#1173](https://github.com/open-mmlab/mmpose/pull/1173), [#1173](https://github.com/open-mmlab/mmpose/pull/1173), [#1143](https://github.com/open-mmlab/mmpose/pull/1143), [#1094](https://github.com/open-mmlab/mmpose/pull/1094), [#1133](https://github.com/open-mmlab/mmpose/pull/1133), [#1098](https://github.com/open-mmlab/mmpose/pull/1098), [#1160](https://github.com/open-mmlab/mmpose/pull/1160)) @ly015, @jin-s13, @liqikai9, @wusize, @luminxu, @zengwang430521 @mzr1996 + +- Support ConcatDataset ([#1139](https://github.com/open-mmlab/mmpose/pull/1139)) @Canwang-sjtu + +- Support CPU training and testing ([#1157](https://github.com/open-mmlab/mmpose/pull/1157)) @ly015 + +**Improvements** + +- Add multi-processing configurations to speed up distributed training and testing ([#1146](https://github.com/open-mmlab/mmpose/pull/1146)) @ly015 + +- Add default runtime config ([#1145](https://github.com/open-mmlab/mmpose/pull/1145)) + +- Upgrade isort in pre-commit hook ([#1179](https://github.com/open-mmlab/mmpose/pull/1179)) @liqikai9 + +- Update README and documents ([#1171](https://github.com/open-mmlab/mmpose/pull/1171), [#1167](https://github.com/open-mmlab/mmpose/pull/1167), [#1153](https://github.com/open-mmlab/mmpose/pull/1153), [#1149](https://github.com/open-mmlab/mmpose/pull/1149), [#1148](https://github.com/open-mmlab/mmpose/pull/1148), [#1147](https://github.com/open-mmlab/mmpose/pull/1147), [#1140](https://github.com/open-mmlab/mmpose/pull/1140)) @jin-s13, @wusize, @TommyZihao, @ly015 + +**Bug Fixes** + +- Fix undeterministic behavior in pre-commit hooks ([#1136](https://github.com/open-mmlab/mmpose/pull/1136)) @jin-s13 + +- Deprecate the support for "python setup.py test" ([#1179](https://github.com/open-mmlab/mmpose/pull/1179)) @ly015 + +- Fix incompatible settings with MMCV on HSigmoid default parameters ([#1132](https://github.com/open-mmlab/mmpose/pull/1132)) @ly015 + +- Fix albumentation installation ([#1184](https://github.com/open-mmlab/mmpose/pull/1184)) @BIGWangYuDong + +## **v0.22.0 (04/01/2022)** + +**Highlights** + +- Support VoxelPose ["VoxelPose: Towards Multi-Camera 3D Human Pose Estimation in Wild Environment"](https://arxiv.org/abs/2004.06239), ECCV'2020 ([#1050](https://github.com/open-mmlab/mmpose/pull/1050)) @wusize + +- Support Soft Wing loss ["Structure-Coherent Deep Feature Learning for Robust Face Alignment"](https://linchunze.github.io/papers/TIP21_Structure_coherent_FA.pdf), TIP'2021 ([#1077](https://github.com/open-mmlab/mmpose/pull/1077)) @jin-s13 + +- Support Adaptive Wing loss ["Adaptive Wing Loss for Robust Face Alignment via Heatmap Regression"](https://arxiv.org/abs/1904.07399), ICCV'2019 ([#1072](https://github.com/open-mmlab/mmpose/pull/1072)) @jin-s13 + +**New Features** + +- Support VoxelPose ["VoxelPose: Towards Multi-Camera 3D Human Pose Estimation in Wild Environment"](https://arxiv.org/abs/2004.06239), ECCV'2020 ([#1050](https://github.com/open-mmlab/mmpose/pull/1050)) @wusize + +- Support Soft Wing loss ["Structure-Coherent Deep Feature Learning for Robust Face Alignment"](https://linchunze.github.io/papers/TIP21_Structure_coherent_FA.pdf), TIP'2021 ([#1077](https://github.com/open-mmlab/mmpose/pull/1077)) @jin-s13 + +- Support Adaptive Wing loss ["Adaptive Wing Loss for Robust Face Alignment via Heatmap Regression"](https://arxiv.org/abs/1904.07399), ICCV'2019 ([#1072](https://github.com/open-mmlab/mmpose/pull/1072)) @jin-s13 + +- Add LiteHRNet-18 Checkpoints trained on COCO. ([#1120](https://github.com/open-mmlab/mmpose/pull/1120)) @jin-s13 + +**Improvements** + +- Improve documentation quality ([#1115](https://github.com/open-mmlab/mmpose/pull/1115), [#1111](https://github.com/open-mmlab/mmpose/pull/1111), [#1105](https://github.com/open-mmlab/mmpose/pull/1105), [#1087](https://github.com/open-mmlab/mmpose/pull/1087), [#1086](https://github.com/open-mmlab/mmpose/pull/1086), [#1085](https://github.com/open-mmlab/mmpose/pull/1085), [#1084](https://github.com/open-mmlab/mmpose/pull/1084), [#1083](https://github.com/open-mmlab/mmpose/pull/1083), [#1124](https://github.com/open-mmlab/mmpose/pull/1124), [#1070](https://github.com/open-mmlab/mmpose/pull/1070), [#1068](https://github.com/open-mmlab/mmpose/pull/1068)) @jin-s13, @liqikai9, @ly015 + +- Support CircleCI ([#1074](https://github.com/open-mmlab/mmpose/pull/1074)) @ly015 + +- Skip unit tests in CI when only document files were changed ([#1074](https://github.com/open-mmlab/mmpose/pull/1074), [#1041](https://github.com/open-mmlab/mmpose/pull/1041)) @QwQ2000, @ly015 + +- Support file_client_args in LoadImageFromFile ([#1076](https://github.com/open-mmlab/mmpose/pull/1076)) @jin-s13 + +**Bug Fixes** + +- Fix a bug in Dark UDP postprocessing that causes error when the channel number is large. ([#1079](https://github.com/open-mmlab/mmpose/pull/1079), [#1116](https://github.com/open-mmlab/mmpose/pull/1116)) @X00123, @jin-s13 + +- Fix hard-coded `sigmas` in bottom-up image demo ([#1107](https://github.com/open-mmlab/mmpose/pull/1107), [#1101](https://github.com/open-mmlab/mmpose/pull/1101)) @chenxinfeng4, @liqikai9 + +- Fix unstable checks in unit tests ([#1112](https://github.com/open-mmlab/mmpose/pull/1112)) @ly015 + +- Do not destroy NULL windows if `args.show==False` in demo scripts ([#1104](https://github.com/open-mmlab/mmpose/pull/1104)) @bladrome + +## **v0.21.0 (06/12/2021)** + +**Highlights** + +- Support ["Learning Temporal Pose Estimation from Sparsely-Labeled Videos"](https://arxiv.org/abs/1906.04016), NeurIPS'2019 ([#932](https://github.com/open-mmlab/mmpose/pull/932), [#1006](https://github.com/open-mmlab/mmpose/pull/1006), [#1036](https://github.com/open-mmlab/mmpose/pull/1036), [#1060](https://github.com/open-mmlab/mmpose/pull/1060)) @liqikai9 + +- Add ViPNAS-MobileNetV3 models ([#1025](https://github.com/open-mmlab/mmpose/pull/1025)) @luminxu, @jin-s13 + +- Add inference speed benchmark ([#1028](https://github.com/open-mmlab/mmpose/pull/1028), [#1034](https://github.com/open-mmlab/mmpose/pull/1034), [#1044](https://github.com/open-mmlab/mmpose/pull/1044)) @liqikai9 + +**New Features** + +- Support ["Learning Temporal Pose Estimation from Sparsely-Labeled Videos"](https://arxiv.org/abs/1906.04016), NeurIPS'2019 ([#932](https://github.com/open-mmlab/mmpose/pull/932), [#1006](https://github.com/open-mmlab/mmpose/pull/1006), [#1036](https://github.com/open-mmlab/mmpose/pull/1036)) @liqikai9 + +- Add ViPNAS-MobileNetV3 models ([#1025](https://github.com/open-mmlab/mmpose/pull/1025)) @luminxu, @jin-s13 + +- Add light-weight top-down models for whole-body keypoint detection ([#1009](https://github.com/open-mmlab/mmpose/pull/1009), [#1020](https://github.com/open-mmlab/mmpose/pull/1020), [#1055](https://github.com/open-mmlab/mmpose/pull/1055)) @luminxu, @ly015 + +- Add HRNet checkpoints with various settings on PoseTrack18 ([#1035](https://github.com/open-mmlab/mmpose/pull/1035)) @liqikai9 + +**Improvements** + +- Add inference speed benchmark ([#1028](https://github.com/open-mmlab/mmpose/pull/1028), [#1034](https://github.com/open-mmlab/mmpose/pull/1034), [#1044](https://github.com/open-mmlab/mmpose/pull/1044)) @liqikai9 + +- Update model metafile format ([#1001](https://github.com/open-mmlab/mmpose/pull/1001)) @ly015 + +- Support minus output feature index in mobilenet_v3 ([#1005](https://github.com/open-mmlab/mmpose/pull/1005)) @luminxu + +- Improve documentation quality ([#1018](https://github.com/open-mmlab/mmpose/pull/1018), [#1026](https://github.com/open-mmlab/mmpose/pull/1026), [#1027](https://github.com/open-mmlab/mmpose/pull/1027), [#1031](https://github.com/open-mmlab/mmpose/pull/1031), [#1038](https://github.com/open-mmlab/mmpose/pull/1038), [#1046](https://github.com/open-mmlab/mmpose/pull/1046), [#1056](https://github.com/open-mmlab/mmpose/pull/1056), [#1057](https://github.com/open-mmlab/mmpose/pull/1057)) @edybk, @luminxu, @ly015, @jin-s13 + +- Set default random seed in training initialization ([#1030](https://github.com/open-mmlab/mmpose/pull/1030)) @ly015 + +- Skip CI when only specific files changed ([#1041](https://github.com/open-mmlab/mmpose/pull/1041), [#1059](https://github.com/open-mmlab/mmpose/pull/1059)) @QwQ2000, @ly015 + +- Automatically cancel uncompleted action runs when new commit arrives ([#1053](https://github.com/open-mmlab/mmpose/pull/1053)) @ly015 + +**Bug Fixes** + +- Update pose tracking demo to be compatible with latest mmtracking ([#1014](https://github.com/open-mmlab/mmpose/pull/1014)) @jin-s13 + +- Fix symlink creation failure when installed in Windows environments ([#1039](https://github.com/open-mmlab/mmpose/pull/1039)) @QwQ2000 + +- Fix AP-10K dataset sigmas ([#1040](https://github.com/open-mmlab/mmpose/pull/1040)) @jin-s13 + +## **v0.20.0 (01/11/2021)** + +**Highlights** + +- Add AP-10K dataset for animal pose estimation ([#987](https://github.com/open-mmlab/mmpose/pull/987)) @Annbless, @AlexTheBad, @jin-s13, @ly015 + +- Support TorchServe ([#979](https://github.com/open-mmlab/mmpose/pull/979)) @ly015 + +**New Features** + +- Add AP-10K dataset for animal pose estimation ([#987](https://github.com/open-mmlab/mmpose/pull/987)) @Annbless, @AlexTheBad, @jin-s13, @ly015 + +- Add HRNetv2 checkpoints on 300W and COFW datasets ([#980](https://github.com/open-mmlab/mmpose/pull/980)) @jin-s13 + +- Support TorchServe ([#979](https://github.com/open-mmlab/mmpose/pull/979)) @ly015 + +**Bug Fixes** + +- Fix some deprecated or risky settings in configs ([#963](https://github.com/open-mmlab/mmpose/pull/963), [#976](https://github.com/open-mmlab/mmpose/pull/976), [#992](https://github.com/open-mmlab/mmpose/pull/992)) @jin-s13, @wusize + +- Fix issues of default arguments of training and testing scripts ([#970](https://github.com/open-mmlab/mmpose/pull/970), [#985](https://github.com/open-mmlab/mmpose/pull/985)) @liqikai9, @wusize + +- Fix heatmap and tag size mismatch in bottom-up with UDP ([#994](https://github.com/open-mmlab/mmpose/pull/994)) @wusize + +- Fix python3.9 installation in CI ([#983](https://github.com/open-mmlab/mmpose/pull/983)) @ly015 + +- Fix model zoo document integrity issue ([#990](https://github.com/open-mmlab/mmpose/pull/990)) @jin-s13 + +**Improvements** + +- Support non-square input shape for bottom-up ([#991](https://github.com/open-mmlab/mmpose/pull/991)) @wusize + +- Add image and video resources for demo ([#971](https://github.com/open-mmlab/mmpose/pull/971)) @liqikai9 + +- Use CUDA docker images to accelerate CI ([#973](https://github.com/open-mmlab/mmpose/pull/973)) @ly015 + +- Add codespell hook and fix detected typos ([#977](https://github.com/open-mmlab/mmpose/pull/977)) @ly015 + +## **v0.19.0 (08/10/2021)** + +**Highlights** + +- Add models for Associative Embedding with Hourglass network backbone ([#906](https://github.com/open-mmlab/mmpose/pull/906), [#955](https://github.com/open-mmlab/mmpose/pull/955)) @jin-s13, @luminxu + +- Support COCO-Wholebody-Face and COCO-Wholebody-Hand datasets ([#813](https://github.com/open-mmlab/mmpose/pull/813)) @jin-s13, @innerlee, @luminxu + +- Upgrade dataset interface ([#901](https://github.com/open-mmlab/mmpose/pull/901), [#924](https://github.com/open-mmlab/mmpose/pull/924)) @jin-s13, @innerlee, @ly015, @liqikai9 + +- New style of documentation ([#945](https://github.com/open-mmlab/mmpose/pull/945)) @ly015 + +**New Features** + +- Add models for Associative Embedding with Hourglass network backbone ([#906](https://github.com/open-mmlab/mmpose/pull/906), [#955](https://github.com/open-mmlab/mmpose/pull/955)) @jin-s13, @luminxu + +- Support COCO-Wholebody-Face and COCO-Wholebody-Hand datasets ([#813](https://github.com/open-mmlab/mmpose/pull/813)) @jin-s13, @innerlee, @luminxu + +- Add pseudo-labeling tool to generate COCO style keypoint annotations with given bounding boxes ([#928](https://github.com/open-mmlab/mmpose/pull/928)) @soltkreig + +- New style of documentation ([#945](https://github.com/open-mmlab/mmpose/pull/945)) @ly015 + +**Bug Fixes** + +- Fix segmentation parsing in Macaque dataset preprocessing ([#948](https://github.com/open-mmlab/mmpose/pull/948)) @jin-s13 + +- Fix dependencies that may lead to CI failure in downstream projects ([#936](https://github.com/open-mmlab/mmpose/pull/936), [#953](https://github.com/open-mmlab/mmpose/pull/953)) @RangiLyu, @ly015 + +- Fix keypoint order in Human3.6M dataset ([#940](https://github.com/open-mmlab/mmpose/pull/940)) @ttxskk + +- Fix unstable image loading for Interhand2.6M ([#913](https://github.com/open-mmlab/mmpose/pull/913)) @zengwang430521 + +**Improvements** + +- Upgrade dataset interface ([#901](https://github.com/open-mmlab/mmpose/pull/901), [#924](https://github.com/open-mmlab/mmpose/pull/924)) @jin-s13, @innerlee, @ly015, @liqikai9 + +- Improve demo usability and stability ([#908](https://github.com/open-mmlab/mmpose/pull/908), [#934](https://github.com/open-mmlab/mmpose/pull/934)) @ly015 + +- Standardize model metafile format ([#941](https://github.com/open-mmlab/mmpose/pull/941)) @ly015 + +- Support `persistent_worker` and several other arguments in configs ([#946](https://github.com/open-mmlab/mmpose/pull/946)) @jin-s13 + +- Use MMCV root model registry to enable cross-project module building ([#935](https://github.com/open-mmlab/mmpose/pull/935)) @RangiLyu + +- Improve the document quality ([#916](https://github.com/open-mmlab/mmpose/pull/916), [#909](https://github.com/open-mmlab/mmpose/pull/909), [#942](https://github.com/open-mmlab/mmpose/pull/942), [#913](https://github.com/open-mmlab/mmpose/pull/913), [#956](https://github.com/open-mmlab/mmpose/pull/956)) @jin-s13, @ly015, @bit-scientist, @zengwang430521 + +- Improve pull request template ([#952](https://github.com/open-mmlab/mmpose/pull/952), [#954](https://github.com/open-mmlab/mmpose/pull/954)) @ly015 + +**Breaking Changes** + +- Upgrade dataset interface ([#901](https://github.com/open-mmlab/mmpose/pull/901)) @jin-s13, @innerlee, @ly015 + +## **v0.18.0 (01/09/2021)** + +**Bug Fixes** + +- Fix redundant model weight loading in pytorch-to-onnx conversion ([#850](https://github.com/open-mmlab/mmpose/pull/850)) @ly015 + +- Fix a bug in update_model_index.py that may cause pre-commit hook failure([#866](https://github.com/open-mmlab/mmpose/pull/866)) @ly015 + +- Fix a bug in interhand_3d_head ([#890](https://github.com/open-mmlab/mmpose/pull/890)) @zengwang430521 + +- Fix pose tracking demo failure caused by out-of-date configs ([#891](https://github.com/open-mmlab/mmpose/pull/891)) + +**Improvements** + +- Add automatic benchmark regression tools ([#849](https://github.com/open-mmlab/mmpose/pull/849), [#880](https://github.com/open-mmlab/mmpose/pull/880), [#885](https://github.com/open-mmlab/mmpose/pull/885)) @liqikai9, @ly015 + +- Add copyright information and checking hook ([#872](https://github.com/open-mmlab/mmpose/pull/872)) + +- Add PR template ([#875](https://github.com/open-mmlab/mmpose/pull/875)) @ly015 + +- Add citation information ([#876](https://github.com/open-mmlab/mmpose/pull/876)) @ly015 + +- Add python3.9 in CI ([#877](https://github.com/open-mmlab/mmpose/pull/877), [#883](https://github.com/open-mmlab/mmpose/pull/883)) @ly015 + +- Improve the quality of the documents ([#845](https://github.com/open-mmlab/mmpose/pull/845), [#845](https://github.com/open-mmlab/mmpose/pull/845), [#848](https://github.com/open-mmlab/mmpose/pull/848), [#867](https://github.com/open-mmlab/mmpose/pull/867), [#870](https://github.com/open-mmlab/mmpose/pull/870), [#873](https://github.com/open-mmlab/mmpose/pull/873), [#896](https://github.com/open-mmlab/mmpose/pull/896)) @jin-s13, @ly015, @zhiqwang + +## **v0.17.0 (06/08/2021)** + +**Highlights** + +1. Support ["Lite-HRNet: A Lightweight High-Resolution Network"](https://arxiv.org/abs/2104.06403) CVPR'2021 ([#733](https://github.com/open-mmlab/mmpose/pull/733),[#800](https://github.com/open-mmlab/mmpose/pull/800)) @jin-s13 + +2. Add 3d body mesh demo ([#771](https://github.com/open-mmlab/mmpose/pull/771)) @zengwang430521 + +3. Add Chinese documentation ([#787](https://github.com/open-mmlab/mmpose/pull/787), [#798](https://github.com/open-mmlab/mmpose/pull/798), [#799](https://github.com/open-mmlab/mmpose/pull/799), [#802](https://github.com/open-mmlab/mmpose/pull/802), [#804](https://github.com/open-mmlab/mmpose/pull/804), [#805](https://github.com/open-mmlab/mmpose/pull/805), [#815](https://github.com/open-mmlab/mmpose/pull/815), [#816](https://github.com/open-mmlab/mmpose/pull/816), [#817](https://github.com/open-mmlab/mmpose/pull/817), [#819](https://github.com/open-mmlab/mmpose/pull/819), [#839](https://github.com/open-mmlab/mmpose/pull/839)) @ly015, @luminxu, @jin-s13, @liqikai9, @zengwang430521 + +4. Add Colab Tutorial ([#834](https://github.com/open-mmlab/mmpose/pull/834)) @ly015 + +**New Features** + +- Support ["Lite-HRNet: A Lightweight High-Resolution Network"](https://arxiv.org/abs/2104.06403) CVPR'2021 ([#733](https://github.com/open-mmlab/mmpose/pull/733),[#800](https://github.com/open-mmlab/mmpose/pull/800)) @jin-s13 + +- Add 3d body mesh demo ([#771](https://github.com/open-mmlab/mmpose/pull/771)) @zengwang430521 + +- Add Chinese documentation ([#787](https://github.com/open-mmlab/mmpose/pull/787), [#798](https://github.com/open-mmlab/mmpose/pull/798), [#799](https://github.com/open-mmlab/mmpose/pull/799), [#802](https://github.com/open-mmlab/mmpose/pull/802), [#804](https://github.com/open-mmlab/mmpose/pull/804), [#805](https://github.com/open-mmlab/mmpose/pull/805), [#815](https://github.com/open-mmlab/mmpose/pull/815), [#816](https://github.com/open-mmlab/mmpose/pull/816), [#817](https://github.com/open-mmlab/mmpose/pull/817), [#819](https://github.com/open-mmlab/mmpose/pull/819), [#839](https://github.com/open-mmlab/mmpose/pull/839)) @ly015, @luminxu, @jin-s13, @liqikai9, @zengwang430521 + +- Add Colab Tutorial ([#834](https://github.com/open-mmlab/mmpose/pull/834)) @ly015 + +- Support training for InterHand v1.0 dataset ([#761](https://github.com/open-mmlab/mmpose/pull/761)) @zengwang430521 + +**Bug Fixes** + +- Fix mpii pckh@0.1 index ([#773](https://github.com/open-mmlab/mmpose/pull/773)) @jin-s13 + +- Fix multi-node distributed test ([#818](https://github.com/open-mmlab/mmpose/pull/818)) @ly015 + +- Fix docstring and init_weights error of ShuffleNetV1 ([#814](https://github.com/open-mmlab/mmpose/pull/814)) @Junjun2016 + +- Fix imshow_bbox error when input bboxes is empty ([#796](https://github.com/open-mmlab/mmpose/pull/796)) @ly015 + +- Fix model zoo doc generation ([#778](https://github.com/open-mmlab/mmpose/pull/778)) @ly015 + +- Fix typo ([#767](https://github.com/open-mmlab/mmpose/pull/767)), ([#780](https://github.com/open-mmlab/mmpose/pull/780), [#782](https://github.com/open-mmlab/mmpose/pull/782)) @ly015, @jin-s13 + +**Breaking Changes** + +- Use MMCV EvalHook ([#686](https://github.com/open-mmlab/mmpose/pull/686)) @ly015 + +**Improvements** + +- Add pytest.ini and fix docstring ([#812](https://github.com/open-mmlab/mmpose/pull/812)) @jin-s13 + +- Update MSELoss ([#829](https://github.com/open-mmlab/mmpose/pull/829)) @Ezra-Yu + +- Move process_mmdet_results into inference.py ([#831](https://github.com/open-mmlab/mmpose/pull/831)) @ly015 + +- Update resource limit ([#783](https://github.com/open-mmlab/mmpose/pull/783)) @jin-s13 + +- Use COCO 2D pose model in 3D demo examples ([#785](https://github.com/open-mmlab/mmpose/pull/785)) @ly015 + +- Change model zoo titles in the doc from center-aligned to left-aligned ([#792](https://github.com/open-mmlab/mmpose/pull/792), [#797](https://github.com/open-mmlab/mmpose/pull/797)) @ly015 + +- Support MIM ([#706](https://github.com/open-mmlab/mmpose/pull/706), [#794](https://github.com/open-mmlab/mmpose/pull/794)) @ly015 + +- Update out-of-date configs ([#827](https://github.com/open-mmlab/mmpose/pull/827)) @jin-s13 + +- Remove opencv-python-headless dependency by albumentations ([#833](https://github.com/open-mmlab/mmpose/pull/833)) @ly015 + +- Update QQ QR code in README_CN.md ([#832](https://github.com/open-mmlab/mmpose/pull/832)) @ly015 + +## **v0.16.0 (02/07/2021)** + +**Highlights** + +1. Support ["ViPNAS: Efficient Video Pose Estimation via Neural Architecture Search"](https://arxiv.org/abs/2105.10154) CVPR'2021 ([#742](https://github.com/open-mmlab/mmpose/pull/742),[#755](https://github.com/open-mmlab/mmpose/pull/755)). + +2. Support MPI-INF-3DHP dataset ([#683](https://github.com/open-mmlab/mmpose/pull/683),[#746](https://github.com/open-mmlab/mmpose/pull/746),[#751](https://github.com/open-mmlab/mmpose/pull/751)). + +3. Add webcam demo tool ([#729](https://github.com/open-mmlab/mmpose/pull/729)) + +4. Add 3d body and hand pose estimation demo ([#704](https://github.com/open-mmlab/mmpose/pull/704), [#727](https://github.com/open-mmlab/mmpose/pull/727)). + +**New Features** + +- Support ["ViPNAS: Efficient Video Pose Estimation via Neural Architecture Search"](https://arxiv.org/abs/2105.10154) CVPR'2021 ([#742](https://github.com/open-mmlab/mmpose/pull/742),[#755](https://github.com/open-mmlab/mmpose/pull/755)) + +- Support MPI-INF-3DHP dataset ([#683](https://github.com/open-mmlab/mmpose/pull/683),[#746](https://github.com/open-mmlab/mmpose/pull/746),[#751](https://github.com/open-mmlab/mmpose/pull/751)) + +- Support Webcam demo ([#729](https://github.com/open-mmlab/mmpose/pull/729)) + +- Support Interhand 3d demo ([#704](https://github.com/open-mmlab/mmpose/pull/704)) + +- Support 3d pose video demo ([#727](https://github.com/open-mmlab/mmpose/pull/727)) + +- Support H36m dataset for 2d pose estimation ([#709](https://github.com/open-mmlab/mmpose/pull/709), [#735](https://github.com/open-mmlab/mmpose/pull/735)) + +- Add scripts to generate mim metafile ([#749](https://github.com/open-mmlab/mmpose/pull/749)) + +**Bug Fixes** + +- Fix typos ([#692](https://github.com/open-mmlab/mmpose/pull/692),[#696](https://github.com/open-mmlab/mmpose/pull/696),[#697](https://github.com/open-mmlab/mmpose/pull/697),[#698](https://github.com/open-mmlab/mmpose/pull/698),[#712](https://github.com/open-mmlab/mmpose/pull/712),[#718](https://github.com/open-mmlab/mmpose/pull/718),[#728](https://github.com/open-mmlab/mmpose/pull/728)) + +- Change model download links from `http` to `https` ([#716](https://github.com/open-mmlab/mmpose/pull/716)) + +**Breaking Changes** + +- Switch to MMCV MODEL_REGISTRY ([#669](https://github.com/open-mmlab/mmpose/pull/669)) + +**Improvements** + +- Refactor MeshMixDataset ([#752](https://github.com/open-mmlab/mmpose/pull/752)) + +- Rename 'GaussianHeatMap' to 'GaussianHeatmap' ([#745](https://github.com/open-mmlab/mmpose/pull/745)) + +- Update out-of-date configs ([#734](https://github.com/open-mmlab/mmpose/pull/734)) + +- Improve compatibility for breaking changes ([#731](https://github.com/open-mmlab/mmpose/pull/731)) + +- Enable to control radius and thickness in visualization ([#722](https://github.com/open-mmlab/mmpose/pull/722)) + +- Add regex dependency ([#720](https://github.com/open-mmlab/mmpose/pull/720)) + +## **v0.15.0 (02/06/2021)** + +**Highlights** + +1. Support 3d video pose estimation (VideoPose3D). + +2. Support 3d hand pose estimation (InterNet). + +3. Improve presentation of modelzoo. + +**New Features** + +- Support "InterHand2.6M: A Dataset and Baseline for 3D Interacting Hand Pose Estimation from a Single RGB Image" (ECCV‘20) ([#624](https://github.com/open-mmlab/mmpose/pull/624)) + +- Support "3D human pose estimation in video with temporal convolutions and semi-supervised training" (CVPR'19) ([#602](https://github.com/open-mmlab/mmpose/pull/602), [#681](https://github.com/open-mmlab/mmpose/pull/681)) + +- Support 3d pose estimation demo ([#653](https://github.com/open-mmlab/mmpose/pull/653), [#670](https://github.com/open-mmlab/mmpose/pull/670)) + +- Support bottom-up whole-body pose estimation ([#689](https://github.com/open-mmlab/mmpose/pull/689)) + +- Support mmcli ([#634](https://github.com/open-mmlab/mmpose/pull/634)) + +**Bug Fixes** + +- Fix opencv compatibility ([#635](https://github.com/open-mmlab/mmpose/pull/635)) + +- Fix demo with UDP ([#637](https://github.com/open-mmlab/mmpose/pull/637)) + +- Fix bottom-up model onnx conversion ([#680](https://github.com/open-mmlab/mmpose/pull/680)) + +- Fix `GPU_IDS` in distributed training ([#668](https://github.com/open-mmlab/mmpose/pull/668)) + +- Fix MANIFEST.in ([#641](https://github.com/open-mmlab/mmpose/pull/641), [#657](https://github.com/open-mmlab/mmpose/pull/657)) + +- Fix docs ([#643](https://github.com/open-mmlab/mmpose/pull/643),[#684](https://github.com/open-mmlab/mmpose/pull/684),[#688](https://github.com/open-mmlab/mmpose/pull/688),[#690](https://github.com/open-mmlab/mmpose/pull/690),[#692](https://github.com/open-mmlab/mmpose/pull/692)) + +**Breaking Changes** + +- Reorganize configs by tasks, algorithms, datasets, and techniques ([#647](https://github.com/open-mmlab/mmpose/pull/647)) + +- Rename heads and detectors ([#667](https://github.com/open-mmlab/mmpose/pull/667)) + +**Improvements** + +- Add `radius` and `thickness` parameters in visualization ([#638](https://github.com/open-mmlab/mmpose/pull/638)) + +- Add `trans_prob` parameter in `TopDownRandomTranslation` ([#650](https://github.com/open-mmlab/mmpose/pull/650)) + +- Switch to `MMCV MODEL_REGISTRY` ([#669](https://github.com/open-mmlab/mmpose/pull/669)) + +- Update dependencies ([#674](https://github.com/open-mmlab/mmpose/pull/674), [#676](https://github.com/open-mmlab/mmpose/pull/676)) + +## **v0.14.0 (06/05/2021)** + +**Highlights** + +1. Support animal pose estimation with 7 popular datasets. + +2. Support "A simple yet effective baseline for 3d human pose estimation" (ICCV'17). + +**New Features** + +- Support "A simple yet effective baseline for 3d human pose estimation" (ICCV'17) ([#554](https://github.com/open-mmlab/mmpose/pull/554),[#558](https://github.com/open-mmlab/mmpose/pull/558),[#566](https://github.com/open-mmlab/mmpose/pull/566),[#570](https://github.com/open-mmlab/mmpose/pull/570),[#589](https://github.com/open-mmlab/mmpose/pull/589)) + +- Support animal pose estimation ([#559](https://github.com/open-mmlab/mmpose/pull/559),[#561](https://github.com/open-mmlab/mmpose/pull/561),[#563](https://github.com/open-mmlab/mmpose/pull/563),[#571](https://github.com/open-mmlab/mmpose/pull/571),[#603](https://github.com/open-mmlab/mmpose/pull/603),[#605](https://github.com/open-mmlab/mmpose/pull/605)) + +- Support Horse-10 dataset ([#561](https://github.com/open-mmlab/mmpose/pull/561)), MacaquePose dataset ([#561](https://github.com/open-mmlab/mmpose/pull/561)), Vinegar Fly dataset ([#561](https://github.com/open-mmlab/mmpose/pull/561)), Desert Locust dataset ([#561](https://github.com/open-mmlab/mmpose/pull/561)), Grevy's Zebra dataset ([#561](https://github.com/open-mmlab/mmpose/pull/561)), ATRW dataset ([#571](https://github.com/open-mmlab/mmpose/pull/571)), and Animal-Pose dataset ([#603](https://github.com/open-mmlab/mmpose/pull/603)) + +- Support bottom-up pose tracking demo ([#574](https://github.com/open-mmlab/mmpose/pull/574)) + +- Support FP16 training ([#584](https://github.com/open-mmlab/mmpose/pull/584),[#616](https://github.com/open-mmlab/mmpose/pull/616),[#626](https://github.com/open-mmlab/mmpose/pull/626)) + +- Support NMS for bottom-up ([#609](https://github.com/open-mmlab/mmpose/pull/609)) + +**Bug Fixes** + +- Fix bugs in the top-down demo, when there are no people in the images ([#569](https://github.com/open-mmlab/mmpose/pull/569)). + +- Fix the links in the doc ([#612](https://github.com/open-mmlab/mmpose/pull/612)) + +**Improvements** + +- Speed up top-down inference ([#560](https://github.com/open-mmlab/mmpose/pull/560)) + +- Update github CI ([#562](https://github.com/open-mmlab/mmpose/pull/562), [#564](https://github.com/open-mmlab/mmpose/pull/564)) + +- Update Readme ([#578](https://github.com/open-mmlab/mmpose/pull/578),[#579](https://github.com/open-mmlab/mmpose/pull/579),[#580](https://github.com/open-mmlab/mmpose/pull/580),[#592](https://github.com/open-mmlab/mmpose/pull/592),[#599](https://github.com/open-mmlab/mmpose/pull/599),[#600](https://github.com/open-mmlab/mmpose/pull/600),[#607](https://github.com/open-mmlab/mmpose/pull/607)) + +- Update Faq ([#587](https://github.com/open-mmlab/mmpose/pull/587), [#610](https://github.com/open-mmlab/mmpose/pull/610)) + +## **v0.13.0 (31/03/2021)** + +**Highlights** + +1. Support Wingloss. + +2. Support RHD hand dataset. + +**New Features** + +- Support Wingloss ([#482](https://github.com/open-mmlab/mmpose/pull/482)) + +- Support RHD hand dataset ([#523](https://github.com/open-mmlab/mmpose/pull/523), [#551](https://github.com/open-mmlab/mmpose/pull/551)) + +- Support Human3.6m dataset for 3d keypoint detection ([#518](https://github.com/open-mmlab/mmpose/pull/518), [#527](https://github.com/open-mmlab/mmpose/pull/527)) + +- Support TCN model for 3d keypoint detection ([#521](https://github.com/open-mmlab/mmpose/pull/521), [#522](https://github.com/open-mmlab/mmpose/pull/522)) + +- Support Interhand3D model for 3d hand detection ([#536](https://github.com/open-mmlab/mmpose/pull/536)) + +- Support Multi-task detector ([#480](https://github.com/open-mmlab/mmpose/pull/480)) + +**Bug Fixes** + +- Fix PCKh@0.1 calculation ([#516](https://github.com/open-mmlab/mmpose/pull/516)) + +- Fix unittest ([#529](https://github.com/open-mmlab/mmpose/pull/529)) + +- Fix circular importing ([#542](https://github.com/open-mmlab/mmpose/pull/542)) + +- Fix bugs in bottom-up keypoint score ([#548](https://github.com/open-mmlab/mmpose/pull/548)) + +**Improvements** + +- Update config & checkpoints ([#525](https://github.com/open-mmlab/mmpose/pull/525), [#546](https://github.com/open-mmlab/mmpose/pull/546)) + +- Fix typos ([#514](https://github.com/open-mmlab/mmpose/pull/514), [#519](https://github.com/open-mmlab/mmpose/pull/519), [#532](https://github.com/open-mmlab/mmpose/pull/532), [#537](https://github.com/open-mmlab/mmpose/pull/537), ) + +- Speed up post processing ([#535](https://github.com/open-mmlab/mmpose/pull/535)) + +- Update mmcv version dependency ([#544](https://github.com/open-mmlab/mmpose/pull/544)) + +## **v0.12.0 (28/02/2021)** + +**Highlights** + +1. Support DeepPose algorithm. + +**New Features** + +- Support DeepPose algorithm ([#446](https://github.com/open-mmlab/mmpose/pull/446), [#461](https://github.com/open-mmlab/mmpose/pull/461)) + +- Support interhand3d dataset ([#468](https://github.com/open-mmlab/mmpose/pull/468)) + +- Support Albumentation pipeline ([#469](https://github.com/open-mmlab/mmpose/pull/469)) + +- Support PhotometricDistortion pipeline ([#485](https://github.com/open-mmlab/mmpose/pull/485)) + +- Set seed option for training ([#493](https://github.com/open-mmlab/mmpose/pull/493)) + +- Add demos for face keypoint detection ([#502](https://github.com/open-mmlab/mmpose/pull/502)) + +**Bug Fixes** + +- Change channel order according to configs ([#504](https://github.com/open-mmlab/mmpose/pull/504)) + +- Fix `num_factors` in UDP encoding ([#495](https://github.com/open-mmlab/mmpose/pull/495)) + +- Fix configs ([#456](https://github.com/open-mmlab/mmpose/pull/456)) + +**Breaking Changes** + +- Refactor configs for wholebody pose estimation ([#487](https://github.com/open-mmlab/mmpose/pull/487), [#491](https://github.com/open-mmlab/mmpose/pull/491)) + +- Rename `decode` function for heads ([#481](https://github.com/open-mmlab/mmpose/pull/481)) + +**Improvements** + +- Update config & checkpoints ([#453](https://github.com/open-mmlab/mmpose/pull/453),[#484](https://github.com/open-mmlab/mmpose/pull/484),[#487](https://github.com/open-mmlab/mmpose/pull/487)) + +- Add README in Chinese ([#462](https://github.com/open-mmlab/mmpose/pull/462)) + +- Add tutorials about configs ([#465](https://github.com/open-mmlab/mmpose/pull/465)) + +- Add demo videos for various tasks ([#499](https://github.com/open-mmlab/mmpose/pull/499), [#503](https://github.com/open-mmlab/mmpose/pull/503)) + +- Update docs about MMPose installation ([#467](https://github.com/open-mmlab/mmpose/pull/467), [#505](https://github.com/open-mmlab/mmpose/pull/505)) + +- Rename `stat.py` to `stats.py` ([#483](https://github.com/open-mmlab/mmpose/pull/483)) + +- Fix typos ([#463](https://github.com/open-mmlab/mmpose/pull/463), [#464](https://github.com/open-mmlab/mmpose/pull/464), [#477](https://github.com/open-mmlab/mmpose/pull/477), [#481](https://github.com/open-mmlab/mmpose/pull/481)) + +- latex to bibtex ([#471](https://github.com/open-mmlab/mmpose/pull/471)) + +- Update FAQ ([#466](https://github.com/open-mmlab/mmpose/pull/466)) + +## **v0.11.0 (31/01/2021)** + +**Highlights** + +1. Support fashion landmark detection. + +2. Support face keypoint detection. + +3. Support pose tracking with MMTracking. + +**New Features** + +- Support fashion landmark detection (DeepFashion) ([#413](https://github.com/open-mmlab/mmpose/pull/413)) + +- Support face keypoint detection (300W, AFLW, COFW, WFLW) ([#367](https://github.com/open-mmlab/mmpose/pull/367)) + +- Support pose tracking demo with MMTracking ([#427](https://github.com/open-mmlab/mmpose/pull/427)) + +- Support face demo ([#443](https://github.com/open-mmlab/mmpose/pull/443)) + +- Support AIC dataset for bottom-up methods ([#438](https://github.com/open-mmlab/mmpose/pull/438), [#449](https://github.com/open-mmlab/mmpose/pull/449)) + +**Bug Fixes** + +- Fix multi-batch training ([#434](https://github.com/open-mmlab/mmpose/pull/434)) + +- Fix sigmas in AIC dataset ([#441](https://github.com/open-mmlab/mmpose/pull/441)) + +- Fix config file ([#420](https://github.com/open-mmlab/mmpose/pull/420)) + +**Breaking Changes** + +- Refactor Heads ([#382](https://github.com/open-mmlab/mmpose/pull/382)) + +**Improvements** + +- Update readme ([#409](https://github.com/open-mmlab/mmpose/pull/409), [#412](https://github.com/open-mmlab/mmpose/pull/412), [#415](https://github.com/open-mmlab/mmpose/pull/415), [#416](https://github.com/open-mmlab/mmpose/pull/416), [#419](https://github.com/open-mmlab/mmpose/pull/419), [#421](https://github.com/open-mmlab/mmpose/pull/421), [#422](https://github.com/open-mmlab/mmpose/pull/422), [#424](https://github.com/open-mmlab/mmpose/pull/424), [#425](https://github.com/open-mmlab/mmpose/pull/425), [#435](https://github.com/open-mmlab/mmpose/pull/435), [#436](https://github.com/open-mmlab/mmpose/pull/436), [#437](https://github.com/open-mmlab/mmpose/pull/437), [#444](https://github.com/open-mmlab/mmpose/pull/444), [#445](https://github.com/open-mmlab/mmpose/pull/445)) + +- Add GAP (global average pooling) neck ([#414](https://github.com/open-mmlab/mmpose/pull/414)) + +- Speed up ([#411](https://github.com/open-mmlab/mmpose/pull/411), [#423](https://github.com/open-mmlab/mmpose/pull/423)) + +- Support COCO test-dev test ([#433](https://github.com/open-mmlab/mmpose/pull/433)) + +## **v0.10.0 (31/12/2020)** + +**Highlights** + +1. Support more human pose estimation methods. + + 1. [UDP](https://arxiv.org/abs/1911.07524) + +2. Support pose tracking. + +3. Support multi-batch inference. + +4. Add some useful tools, including `analyze_logs`, `get_flops`, `print_config`. + +5. Support more backbone networks. + + 1. [ResNest](https://arxiv.org/pdf/2004.08955.pdf) + 2. [VGG](https://arxiv.org/abs/1409.1556) + +**New Features** + +- Support UDP ([#353](https://github.com/open-mmlab/mmpose/pull/353), [#371](https://github.com/open-mmlab/mmpose/pull/371), [#402](https://github.com/open-mmlab/mmpose/pull/402)) + +- Support multi-batch inference ([#390](https://github.com/open-mmlab/mmpose/pull/390)) + +- Support MHP dataset ([#386](https://github.com/open-mmlab/mmpose/pull/386)) + +- Support pose tracking demo ([#380](https://github.com/open-mmlab/mmpose/pull/380)) + +- Support mpii-trb demo ([#372](https://github.com/open-mmlab/mmpose/pull/372)) + +- Support mobilenet for hand pose estimation ([#377](https://github.com/open-mmlab/mmpose/pull/377)) + +- Support ResNest backbone ([#370](https://github.com/open-mmlab/mmpose/pull/370)) + +- Support VGG backbone ([#370](https://github.com/open-mmlab/mmpose/pull/370)) + +- Add some useful tools, including `analyze_logs`, `get_flops`, `print_config` ([#324](https://github.com/open-mmlab/mmpose/pull/324)) + +**Bug Fixes** + +- Fix bugs in pck evaluation ([#328](https://github.com/open-mmlab/mmpose/pull/328)) + +- Fix model download links in README ([#396](https://github.com/open-mmlab/mmpose/pull/396), [#397](https://github.com/open-mmlab/mmpose/pull/397)) + +- Fix CrowdPose annotations and update benchmarks ([#384](https://github.com/open-mmlab/mmpose/pull/384)) + +- Fix modelzoo stat ([#354](https://github.com/open-mmlab/mmpose/pull/354), [#360](https://github.com/open-mmlab/mmpose/pull/360), [#362](https://github.com/open-mmlab/mmpose/pull/362)) + +- Fix config files for aic datasets ([#340](https://github.com/open-mmlab/mmpose/pull/340)) + +**Breaking Changes** + +- Rename `image_thr` to `det_bbox_thr` for top-down methods. + +**Improvements** + +- Organize the readme files ([#398](https://github.com/open-mmlab/mmpose/pull/398), [#399](https://github.com/open-mmlab/mmpose/pull/399), [#400](https://github.com/open-mmlab/mmpose/pull/400)) + +- Check linting for markdown ([#379](https://github.com/open-mmlab/mmpose/pull/379)) + +- Add faq.md ([#350](https://github.com/open-mmlab/mmpose/pull/350)) + +- Remove PyTorch 1.4 in CI ([#338](https://github.com/open-mmlab/mmpose/pull/338)) + +- Add pypi badge in readme ([#329](https://github.com/open-mmlab/mmpose/pull/329)) + +## **v0.9.0 (30/11/2020)** + +**Highlights** + +1. Support more human pose estimation methods. + + 1. [MSPN](https://arxiv.org/abs/1901.00148) + 2. [RSN](https://arxiv.org/abs/2003.04030) + +2. Support video pose estimation datasets. + + 1. [sub-JHMDB](http://jhmdb.is.tue.mpg.de/dataset) + +3. Support Onnx model conversion. + +**New Features** + +- Support MSPN ([#278](https://github.com/open-mmlab/mmpose/pull/278)) + +- Support RSN ([#221](https://github.com/open-mmlab/mmpose/pull/221), [#318](https://github.com/open-mmlab/mmpose/pull/318)) + +- Support new post-processing method for MSPN & RSN ([#288](https://github.com/open-mmlab/mmpose/pull/288)) + +- Support sub-JHMDB dataset ([#292](https://github.com/open-mmlab/mmpose/pull/292)) + +- Support urls for pre-trained models in config files ([#232](https://github.com/open-mmlab/mmpose/pull/232)) + +- Support Onnx ([#305](https://github.com/open-mmlab/mmpose/pull/305)) + +**Bug Fixes** + +- Fix model download links in README ([#255](https://github.com/open-mmlab/mmpose/pull/255), [#315](https://github.com/open-mmlab/mmpose/pull/315)) + +**Breaking Changes** + +- `post_process=True|False` and `unbiased_decoding=True|False` are deprecated, use `post_process=None|default|unbiased` etc. instead ([#288](https://github.com/open-mmlab/mmpose/pull/288)) + +**Improvements** + +- Enrich the model zoo ([#256](https://github.com/open-mmlab/mmpose/pull/256), [#320](https://github.com/open-mmlab/mmpose/pull/320)) + +- Set the default map_location as 'cpu' to reduce gpu memory cost ([#227](https://github.com/open-mmlab/mmpose/pull/227)) + +- Support return heatmaps and backbone features for bottom-up models ([#229](https://github.com/open-mmlab/mmpose/pull/229)) + +- Upgrade mmcv maximum & minimum version ([#269](https://github.com/open-mmlab/mmpose/pull/269), [#313](https://github.com/open-mmlab/mmpose/pull/313)) + +- Automatically add modelzoo statistics to readthedocs ([#252](https://github.com/open-mmlab/mmpose/pull/252)) + +- Fix Pylint issues ([#258](https://github.com/open-mmlab/mmpose/pull/258), [#259](https://github.com/open-mmlab/mmpose/pull/259), [#260](https://github.com/open-mmlab/mmpose/pull/260), [#262](https://github.com/open-mmlab/mmpose/pull/262), [#265](https://github.com/open-mmlab/mmpose/pull/265), [#267](https://github.com/open-mmlab/mmpose/pull/267), [#268](https://github.com/open-mmlab/mmpose/pull/268), [#270](https://github.com/open-mmlab/mmpose/pull/270), [#271](https://github.com/open-mmlab/mmpose/pull/271), [#272](https://github.com/open-mmlab/mmpose/pull/272), [#273](https://github.com/open-mmlab/mmpose/pull/273), [#275](https://github.com/open-mmlab/mmpose/pull/275), [#276](https://github.com/open-mmlab/mmpose/pull/276), [#283](https://github.com/open-mmlab/mmpose/pull/283), [#285](https://github.com/open-mmlab/mmpose/pull/285), [#293](https://github.com/open-mmlab/mmpose/pull/293), [#294](https://github.com/open-mmlab/mmpose/pull/294), [#295](https://github.com/open-mmlab/mmpose/pull/295)) + +- Improve README ([#226](https://github.com/open-mmlab/mmpose/pull/226), [#257](https://github.com/open-mmlab/mmpose/pull/257), [#264](https://github.com/open-mmlab/mmpose/pull/264), [#280](https://github.com/open-mmlab/mmpose/pull/280), [#296](https://github.com/open-mmlab/mmpose/pull/296)) + +- Support PyTorch 1.7 in CI ([#274](https://github.com/open-mmlab/mmpose/pull/274)) + +- Add docs/tutorials for running demos ([#263](https://github.com/open-mmlab/mmpose/pull/263)) + +## **v0.8.0 (31/10/2020)** + +**Highlights** + +1. Support more human pose estimation datasets. + + 1. [CrowdPose](https://github.com/Jeff-sjtu/CrowdPose) + 2. [PoseTrack18](https://posetrack.net/) + +2. Support more 2D hand keypoint estimation datasets. + + 1. [InterHand2.6](https://github.com/facebookresearch/InterHand2.6M) + +3. Support adversarial training for 3D human shape recovery. + +4. Support multi-stage losses. + +5. Support mpii demo. + +**New Features** + +- Support [CrowdPose](https://github.com/Jeff-sjtu/CrowdPose) dataset ([#195](https://github.com/open-mmlab/mmpose/pull/195)) + +- Support [PoseTrack18](https://posetrack.net/) dataset ([#220](https://github.com/open-mmlab/mmpose/pull/220)) + +- Support [InterHand2.6](https://github.com/facebookresearch/InterHand2.6M) dataset ([#202](https://github.com/open-mmlab/mmpose/pull/202)) + +- Support adversarial training for 3D human shape recovery ([#192](https://github.com/open-mmlab/mmpose/pull/192)) + +- Support multi-stage losses ([#204](https://github.com/open-mmlab/mmpose/pull/204)) + +**Bug Fixes** + +- Fix config files ([#190](https://github.com/open-mmlab/mmpose/pull/190)) + +**Improvements** + +- Add mpii demo ([#216](https://github.com/open-mmlab/mmpose/pull/216)) + +- Improve README ([#181](https://github.com/open-mmlab/mmpose/pull/181), [#183](https://github.com/open-mmlab/mmpose/pull/183), [#208](https://github.com/open-mmlab/mmpose/pull/208)) + +- Support return heatmaps and backbone features ([#196](https://github.com/open-mmlab/mmpose/pull/196), [#212](https://github.com/open-mmlab/mmpose/pull/212)) + +- Support different return formats of mmdetection models ([#217](https://github.com/open-mmlab/mmpose/pull/217)) + +## **v0.7.0 (30/9/2020)** + +**Highlights** + +1. Support HMR for 3D human shape recovery. + +2. Support WholeBody human pose estimation. + + 1. [COCO-WholeBody](https://github.com/jin-s13/COCO-WholeBody) + +3. Support more 2D hand keypoint estimation datasets. + + 1. [Frei-hand](https://lmb.informatik.uni-freiburg.de/projects/freihand/) + 2. [CMU Panoptic HandDB](http://domedb.perception.cs.cmu.edu/handdb.html) + +4. Add more popular backbones & enrich the [modelzoo](https://mmpose.readthedocs.io/en/latest/model_zoo.html) + + 1. ShuffleNetv2 + +5. Support hand demo and whole-body demo. + +**New Features** + +- Support HMR for 3D human shape recovery ([#157](https://github.com/open-mmlab/mmpose/pull/157), [#160](https://github.com/open-mmlab/mmpose/pull/160), [#161](https://github.com/open-mmlab/mmpose/pull/161), [#162](https://github.com/open-mmlab/mmpose/pull/162)) + +- Support [COCO-WholeBody](https://github.com/jin-s13/COCO-WholeBody) dataset ([#133](https://github.com/open-mmlab/mmpose/pull/133)) + +- Support [Frei-hand](https://lmb.informatik.uni-freiburg.de/projects/freihand/) dataset ([#125](https://github.com/open-mmlab/mmpose/pull/125)) + +- Support [CMU Panoptic HandDB](http://domedb.perception.cs.cmu.edu/handdb.html) dataset ([#144](https://github.com/open-mmlab/mmpose/pull/144)) + +- Support H36M dataset ([#159](https://github.com/open-mmlab/mmpose/pull/159)) + +- Support ShuffleNetv2 ([#139](https://github.com/open-mmlab/mmpose/pull/139)) + +- Support saving best models based on key indicator ([#127](https://github.com/open-mmlab/mmpose/pull/127)) + +**Bug Fixes** + +- Fix typos in docs ([#121](https://github.com/open-mmlab/mmpose/pull/121)) + +- Fix assertion ([#142](https://github.com/open-mmlab/mmpose/pull/142)) + +**Improvements** + +- Add tools to transform .mat format to .json format ([#126](https://github.com/open-mmlab/mmpose/pull/126)) + +- Add hand demo ([#115](https://github.com/open-mmlab/mmpose/pull/115)) + +- Add whole-body demo ([#163](https://github.com/open-mmlab/mmpose/pull/163)) + +- Reuse mmcv utility function and update version files ([#135](https://github.com/open-mmlab/mmpose/pull/135), [#137](https://github.com/open-mmlab/mmpose/pull/137)) + +- Enrich the modelzoo ([#147](https://github.com/open-mmlab/mmpose/pull/147), [#169](https://github.com/open-mmlab/mmpose/pull/169)) + +- Improve docs ([#174](https://github.com/open-mmlab/mmpose/pull/174), [#175](https://github.com/open-mmlab/mmpose/pull/175), [#178](https://github.com/open-mmlab/mmpose/pull/178)) + +- Improve README ([#176](https://github.com/open-mmlab/mmpose/pull/176)) + +- Improve version.py ([#173](https://github.com/open-mmlab/mmpose/pull/173)) + +## **v0.6.0 (31/8/2020)** + +**Highlights** + +1. Add more popular backbones & enrich the [modelzoo](https://mmpose.readthedocs.io/en/latest/model_zoo.html) + + 1. ResNext + 2. SEResNet + 3. ResNetV1D + 4. MobileNetv2 + 5. ShuffleNetv1 + 6. CPM (Convolutional Pose Machine) + +2. Add more popular datasets: + + 1. [AIChallenger](https://arxiv.org/abs/1711.06475?context=cs.CV) + 2. [MPII](http://human-pose.mpi-inf.mpg.de/) + 3. [MPII-TRB](https://github.com/kennymckormick/Triplet-Representation-of-human-Body) + 4. [OCHuman](http://www.liruilong.cn/projects/pose2seg/index.html) + +3. Support 2d hand keypoint estimation. + + 1. [OneHand10K](https://www.yangangwang.com/papers/WANG-MCC-2018-10.html) + +4. Support bottom-up inference. + +**New Features** + +- Support [OneHand10K](https://www.yangangwang.com/papers/WANG-MCC-2018-10.html) dataset ([#52](https://github.com/open-mmlab/mmpose/pull/52)) + +- Support [MPII](http://human-pose.mpi-inf.mpg.de/) dataset ([#55](https://github.com/open-mmlab/mmpose/pull/55)) + +- Support [MPII-TRB](https://github.com/kennymckormick/Triplet-Representation-of-human-Body) dataset ([#19](https://github.com/open-mmlab/mmpose/pull/19), [#47](https://github.com/open-mmlab/mmpose/pull/47), [#48](https://github.com/open-mmlab/mmpose/pull/48)) + +- Support [OCHuman](http://www.liruilong.cn/projects/pose2seg/index.html) dataset ([#70](https://github.com/open-mmlab/mmpose/pull/70)) + +- Support [AIChallenger](https://arxiv.org/abs/1711.06475?context=cs.CV) dataset ([#87](https://github.com/open-mmlab/mmpose/pull/87)) + +- Support multiple backbones ([#26](https://github.com/open-mmlab/mmpose/pull/26)) + +- Support CPM model ([#56](https://github.com/open-mmlab/mmpose/pull/56)) + +**Bug Fixes** + +- Fix configs for MPII & MPII-TRB datasets ([#93](https://github.com/open-mmlab/mmpose/pull/93)) + +- Fix the bug of missing `test_pipeline` in configs ([#14](https://github.com/open-mmlab/mmpose/pull/14)) + +- Fix typos ([#27](https://github.com/open-mmlab/mmpose/pull/27), [#28](https://github.com/open-mmlab/mmpose/pull/28), [#50](https://github.com/open-mmlab/mmpose/pull/50), [#53](https://github.com/open-mmlab/mmpose/pull/53), [#63](https://github.com/open-mmlab/mmpose/pull/63)) + +**Improvements** + +- Update benchmark ([#93](https://github.com/open-mmlab/mmpose/pull/93)) + +- Add Dockerfile ([#44](https://github.com/open-mmlab/mmpose/pull/44)) + +- Improve unittest coverage and minor fix ([#18](https://github.com/open-mmlab/mmpose/pull/18)) + +- Support CPUs for train/val/demo ([#34](https://github.com/open-mmlab/mmpose/pull/34)) + +- Support bottom-up demo ([#69](https://github.com/open-mmlab/mmpose/pull/69)) + +- Add tools to publish model ([#62](https://github.com/open-mmlab/mmpose/pull/62)) + +- Enrich the modelzoo ([#64](https://github.com/open-mmlab/mmpose/pull/64), [#68](https://github.com/open-mmlab/mmpose/pull/68), [#82](https://github.com/open-mmlab/mmpose/pull/82)) + +## **v0.5.0 (21/7/2020)** + +**Highlights** + +- MMPose is released. + +**Main Features** + +- Support both top-down and bottom-up pose estimation approaches. + +- Achieve higher training efficiency and higher accuracy than other popular codebases (e.g. AlphaPose, HRNet) + +- Support various backbone models: ResNet, HRNet, SCNet, Houglass and HigherHRNet. diff --git a/docs/en/notes/ecosystem.md b/docs/en/notes/ecosystem.md index b0027cfa53..6ae3dd5aa6 100644 --- a/docs/en/notes/ecosystem.md +++ b/docs/en/notes/ecosystem.md @@ -1,3 +1,3 @@ -# Ecosystem - -Coming soon. +# Ecosystem + +Coming soon. diff --git a/docs/en/notes/pytorch_2.md b/docs/en/notes/pytorch_2.md index 4892e554a5..932f9b0734 100644 --- a/docs/en/notes/pytorch_2.md +++ b/docs/en/notes/pytorch_2.md @@ -1,14 +1,14 @@ -# PyTorch 2.0 Compatibility and Benchmarks - -MMPose 1.0.0 is now compatible with PyTorch 2.0, ensuring that users can leverage the latest features and performance improvements offered by the PyTorch 2.0 framework when using MMPose. With the integration of inductor, users can expect faster model speeds. The table below shows several example models: - -| Model | Training Speed | Memory | -| :-------- | :---------------------: | :-----------: | -| ViTPose-B | 29.6% ↑ (0.931 → 0.655) | 10586 → 10663 | -| ViTPose-S | 33.7% ↑ (0.563 → 0.373) | 6091 → 6170 | -| HRNet-w32 | 12.8% ↑ (0.553 → 0.482) | 9849 → 10145 | -| HRNet-w48 | 37.1% ↑ (0.437 → 0.275) | 7319 → 7394 | -| RTMPose-t | 6.3% ↑ (1.533 → 1.437) | 6292 → 6489 | -| RTMPose-s | 13.1% ↑ (1.645 → 1.430) | 9013 → 9208 | - -- Pytorch 2.0 test, add projects doc and refactor by @LareinaM in [PR#2136](https://github.com/open-mmlab/mmpose/pull/2136) +# PyTorch 2.0 Compatibility and Benchmarks + +MMPose 1.0.0 is now compatible with PyTorch 2.0, ensuring that users can leverage the latest features and performance improvements offered by the PyTorch 2.0 framework when using MMPose. With the integration of inductor, users can expect faster model speeds. The table below shows several example models: + +| Model | Training Speed | Memory | +| :-------- | :---------------------: | :-----------: | +| ViTPose-B | 29.6% ↑ (0.931 → 0.655) | 10586 → 10663 | +| ViTPose-S | 33.7% ↑ (0.563 → 0.373) | 6091 → 6170 | +| HRNet-w32 | 12.8% ↑ (0.553 → 0.482) | 9849 → 10145 | +| HRNet-w48 | 37.1% ↑ (0.437 → 0.275) | 7319 → 7394 | +| RTMPose-t | 6.3% ↑ (1.533 → 1.437) | 6292 → 6489 | +| RTMPose-s | 13.1% ↑ (1.645 → 1.430) | 9013 → 9208 | + +- Pytorch 2.0 test, add projects doc and refactor by @LareinaM in [PR#2136](https://github.com/open-mmlab/mmpose/pull/2136) diff --git a/docs/en/overview.md b/docs/en/overview.md index b6e31dd239..ff56d162bd 100644 --- a/docs/en/overview.md +++ b/docs/en/overview.md @@ -1,66 +1,66 @@ -# Overview - -This chapter will introduce you to the overall framework of MMPose and provide links to detailed tutorials. - -## What is MMPose - -![overview](https://user-images.githubusercontent.com/13503330/191004511-508d3ec6-9ead-4c52-a522-4d9aa1f26027.png) - -MMPose is a Pytorch-based pose estimation open-source toolkit, a member of the [OpenMMLab Project](https://github.com/open-mmlab). It contains a rich set of algorithms for 2d multi-person human pose estimation, 2d hand pose estimation, 2d face landmark detection, 133 keypoint whole-body human pose estimation, fashion landmark detection and animal pose estimation as well as related components and modules, below is its overall framework. - -MMPose consists of **8** main components: - -- **apis** provides high-level APIs for model inference -- **structures** provides data structures like bbox, keypoint and PoseDataSample -- **datasets** supports various datasets for pose estimation - - **transforms** contains a lot of useful data augmentation transforms -- **codecs** provides pose encoders and decoders: an encoder encodes poses (mostly keypoints) into learning targets (e.g. heatmaps), and a decoder decodes model outputs into pose predictions -- **models** provides all components of pose estimation models in a modular structure - - **pose_estimators** defines all pose estimation model classes - - **data_preprocessors** is for preprocessing the input data of the model - - **backbones** provides a collection of backbone networks - - **necks** contains various neck modules - - **heads** contains various prediction heads that perform pose estimation - - **losses** contains various loss functions -- **engine** provides runtime components related to pose estimation - - **hooks** provides various hooks of the runner -- **evaluation** provides metrics for evaluating model performance -- **visualization** is for visualizing skeletons, heatmaps and other information - -## How to Use this Guide - -We have prepared detailed guidelines for all types of users: - -1. For installation instrunctions: - - - [Installation](./installation.md) - -2. For the basic usage of MMPose: - - - [A 20-minute Tour to MMPose](./guide_to_framework.md) - - [Demos](./demos.md) - - [Inference](./user_guides/inference.md) - - [Configs](./user_guides/configs.md) - - [Prepare Datasets](./user_guides/prepare_datasets.md) - - [Train and Test](./user_guides/train_and_test.md) - -3. For developers who wish to develop based on MMPose: - - - [Learn about Codecs](./advanced_guides/codecs.md) - - [Dataflow in MMPose](./advanced_guides/dataflow.md) - - [Implement New Models](./advanced_guides/implement_new_models.md) - - [Customize Datasets](./advanced_guides/customize_datasets.md) - - [Customize Data Transforms](./advanced_guides/customize_transforms.md) - - [Customize Optimizer](./advanced_guides/customize_optimizer.md) - - [Customize Logging](./advanced_guides/customize_logging.md) - - [How to Deploy](./advanced_guides/how_to_deploy.md) - - [Model Analysis](./advanced_guides/model_analysis.md) - - [Migration Guide](./migration.md) - -4. For researchers and developers who are willing to contribute to MMPose: - - - [Contribution Guide](./contribution_guide.md) - -5. For some common issues, we provide a FAQ list: - - - [FAQ](./faq.md) +# Overview + +This chapter will introduce you to the overall framework of MMPose and provide links to detailed tutorials. + +## What is MMPose + +![overview](https://user-images.githubusercontent.com/13503330/191004511-508d3ec6-9ead-4c52-a522-4d9aa1f26027.png) + +MMPose is a Pytorch-based pose estimation open-source toolkit, a member of the [OpenMMLab Project](https://github.com/open-mmlab). It contains a rich set of algorithms for 2d multi-person human pose estimation, 2d hand pose estimation, 2d face landmark detection, 133 keypoint whole-body human pose estimation, fashion landmark detection and animal pose estimation as well as related components and modules, below is its overall framework. + +MMPose consists of **8** main components: + +- **apis** provides high-level APIs for model inference +- **structures** provides data structures like bbox, keypoint and PoseDataSample +- **datasets** supports various datasets for pose estimation + - **transforms** contains a lot of useful data augmentation transforms +- **codecs** provides pose encoders and decoders: an encoder encodes poses (mostly keypoints) into learning targets (e.g. heatmaps), and a decoder decodes model outputs into pose predictions +- **models** provides all components of pose estimation models in a modular structure + - **pose_estimators** defines all pose estimation model classes + - **data_preprocessors** is for preprocessing the input data of the model + - **backbones** provides a collection of backbone networks + - **necks** contains various neck modules + - **heads** contains various prediction heads that perform pose estimation + - **losses** contains various loss functions +- **engine** provides runtime components related to pose estimation + - **hooks** provides various hooks of the runner +- **evaluation** provides metrics for evaluating model performance +- **visualization** is for visualizing skeletons, heatmaps and other information + +## How to Use this Guide + +We have prepared detailed guidelines for all types of users: + +1. For installation instrunctions: + + - [Installation](./installation.md) + +2. For the basic usage of MMPose: + + - [A 20-minute Tour to MMPose](./guide_to_framework.md) + - [Demos](./demos.md) + - [Inference](./user_guides/inference.md) + - [Configs](./user_guides/configs.md) + - [Prepare Datasets](./user_guides/prepare_datasets.md) + - [Train and Test](./user_guides/train_and_test.md) + +3. For developers who wish to develop based on MMPose: + + - [Learn about Codecs](./advanced_guides/codecs.md) + - [Dataflow in MMPose](./advanced_guides/dataflow.md) + - [Implement New Models](./advanced_guides/implement_new_models.md) + - [Customize Datasets](./advanced_guides/customize_datasets.md) + - [Customize Data Transforms](./advanced_guides/customize_transforms.md) + - [Customize Optimizer](./advanced_guides/customize_optimizer.md) + - [Customize Logging](./advanced_guides/customize_logging.md) + - [How to Deploy](./advanced_guides/how_to_deploy.md) + - [Model Analysis](./advanced_guides/model_analysis.md) + - [Migration Guide](./migration.md) + +4. For researchers and developers who are willing to contribute to MMPose: + + - [Contribution Guide](./contribution_guide.md) + +5. For some common issues, we provide a FAQ list: + + - [FAQ](./faq.md) diff --git a/docs/en/projects/projects.md b/docs/en/projects/projects.md index 460d8583bd..599c54055f 100644 --- a/docs/en/projects/projects.md +++ b/docs/en/projects/projects.md @@ -1,20 +1,20 @@ -# Projects based on MMPose - -There are many projects built upon MMPose. We list some of them as examples of how to extend MMPose for your own projects. As the page might not be completed, please feel free to create a PR to update this page. - -## Projects as an extension - -Some projects extend the boundary of MMPose for deployment or other research fields. They reveal the potential of what MMPose can do. We list several of them as below. - -- [Anime Face Detector](https://github.com/hysts/anime-face-detector): An anime face landmark detection toolbox. -- [PosePipeline](https://github.com/peabody124/PosePipeline): Open-Source Human Pose Estimation Pipeline for Clinical Research - -## Projects of papers - -There are also projects released with papers. Some of the papers are published in top-tier conferences (CVPR, ICCV, and ECCV), the others are also highly influential. We list some of these works as a reference for the community to develop and compare new pose estimation algorithms. Methods already supported and maintained by MMPose are not listed. - -- Pose for Everything: Towards Category-Agnostic Pose Estimation, ECCV 2022. [\[paper\]](https://arxiv.org/abs/2207.10387)[\[github\]](https://github.com/luminxu/Pose-for-Everything) -- UniFormer: Unified Transformer for Efficient Spatiotemporal Representation Learning, ICLR 2022. [\[paper\]](https://arxiv.org/abs/2201.04676)[\[github\]](https://github.com/Sense-X/UniFormer) -- Poseur:Direct Human Pose Regression with Transformers, ECCV 2022. [\[paper\]](https://arxiv.org/abs/2201.07412)[\[github\]](https://github.com/aim-uofa/Poseur) -- ViTAEv2: Vision Transformer Advanced by Exploring Inductive Bias for Image Recognition and Beyond, NeurIPS 2022. [\[paper\]](https://arxiv.org/abs/2106.03348)[\[github\]](https://github.com/ViTAE-Transformer/ViTAE-Transformer) -- Dite-HRNet:Dynamic Lightweight High-Resolution Network for Human Pose Estimation, IJCAI-ECAI 2021. [\[paper\]](https://arxiv.org/abs/2204.10762)[\[github\]](https://github.com/ZiyiZhang27/Dite-HRNet) +# Projects based on MMPose + +There are many projects built upon MMPose. We list some of them as examples of how to extend MMPose for your own projects. As the page might not be completed, please feel free to create a PR to update this page. + +## Projects as an extension + +Some projects extend the boundary of MMPose for deployment or other research fields. They reveal the potential of what MMPose can do. We list several of them as below. + +- [Anime Face Detector](https://github.com/hysts/anime-face-detector): An anime face landmark detection toolbox. +- [PosePipeline](https://github.com/peabody124/PosePipeline): Open-Source Human Pose Estimation Pipeline for Clinical Research + +## Projects of papers + +There are also projects released with papers. Some of the papers are published in top-tier conferences (CVPR, ICCV, and ECCV), the others are also highly influential. We list some of these works as a reference for the community to develop and compare new pose estimation algorithms. Methods already supported and maintained by MMPose are not listed. + +- Pose for Everything: Towards Category-Agnostic Pose Estimation, ECCV 2022. [\[paper\]](https://arxiv.org/abs/2207.10387)[\[github\]](https://github.com/luminxu/Pose-for-Everything) +- UniFormer: Unified Transformer for Efficient Spatiotemporal Representation Learning, ICLR 2022. [\[paper\]](https://arxiv.org/abs/2201.04676)[\[github\]](https://github.com/Sense-X/UniFormer) +- Poseur:Direct Human Pose Regression with Transformers, ECCV 2022. [\[paper\]](https://arxiv.org/abs/2201.07412)[\[github\]](https://github.com/aim-uofa/Poseur) +- ViTAEv2: Vision Transformer Advanced by Exploring Inductive Bias for Image Recognition and Beyond, NeurIPS 2022. [\[paper\]](https://arxiv.org/abs/2106.03348)[\[github\]](https://github.com/ViTAE-Transformer/ViTAE-Transformer) +- Dite-HRNet:Dynamic Lightweight High-Resolution Network for Human Pose Estimation, IJCAI-ECAI 2021. [\[paper\]](https://arxiv.org/abs/2204.10762)[\[github\]](https://github.com/ZiyiZhang27/Dite-HRNet) diff --git a/docs/en/quick_run.md b/docs/en/quick_run.md index 51aabfc967..5a208dce76 100644 --- a/docs/en/quick_run.md +++ b/docs/en/quick_run.md @@ -1,190 +1,190 @@ -# Quick Run - -This page provides a basic tutorial about the usage of MMPose. - -We will walk you through the 7 key steps of a typical MMPose workflow by training a top-down residual log-likelihood algorithm based on resnet50 on COCO dataset: - -1. Inference with a pretrained model -2. Prepare the dataset -3. Prepare a config -4. Browse the transformed images -5. Training -6. Testing -7. Visualization - -## Installation - -For installation instructions, please refer to [Installation](./installation.md). - -## Get Started - -### Inference with a pretrained model - -We provide a useful script to perform pose estimation with a pretrained model: - -```Bash -python demo/image_demo.py \ - tests/data/coco/000000000785.jpg \ - configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-8xb64-210e_coco-256x192.py \ - https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res50_coco_256x192_rle-2ea9bb4a_20220616.pth -``` - -If MMPose is properly installed, you will get the visualized result as follows: - -![inference_demo](https://user-images.githubusercontent.com/13503330/187112344-0c5062f2-689c-445c-a259-d5d4311e2497.png) - -```{note} -More demo and full instructions can be found in [Inference](./user_guides/inference.md). -``` - -### Prepare the dataset - -MMPose supports multiple tasks. We provide the corresponding guidelines for data preparation. - -- [2D Body Keypoint Detection](./dataset_zoo/2d_body_keypoint.md) - -- [3D Body Keypoint Detection](./dataset_zoo/3d_body_keypoint.md) - -- [2D Hand Keypoint Detection](./dataset_zoo/2d_hand_keypoint.md) - -- [3D Hand Keypoint Detection](./dataset_zoo/3d_hand_keypoint.md) - -- [2D Face Keypoint Detection](./dataset_zoo/2d_face_keypoint.md) - -- [2D WholeBody Keypoint Detection](./dataset_zoo/2d_wholebody_keypoint.md) - -- [2D Fashion Landmark Detection](./dataset_zoo/2d_fashion_landmark.md) - -- [2D Animal Keypoint Detection](./dataset_zoo/2d_animal_keypoint.md) - -You can refer to \[2D Body Keypoint Detection\] > \[COCO\] for COCO dataset preparation. - -```{note} -In MMPose, we suggest placing the data under `$MMPOSE/data`. -``` - -### Prepare a config - -MMPose is equipped with a powerful config system to conduct various experiments conveniently. A config file organizes the settings of: - -- **General**: basic configurations non-related to training or testing, such as Timer, Logger, Visualizer and other Hooks, as well as distributed-related environment settings - -- **Data**: dataset, dataloader and data augmentation - -- **Training**: resume, weights loading, optimizer, learning rate scheduling, epochs and valid interval etc. - -- **Model**: structure, module and loss function etc. - -- **Evaluation**: metrics - -We provide a bunch of well-prepared configs under `$MMPOSE/configs` so that you can directly use or modify. - -Going back to our example, we will use the prepared config: - -```Bash -$MMPOSE/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-8xb64-210e_coco-256x192.py -``` - -You can set the path of the COCO dataset by modifying `data_root` in the config: - -```Python -data_root = 'data/coco' -``` - -```{note} -If you wish to learn more about our config system, please refer to [Configs](./user_guides/configs.md). -``` - -### Browse the transformed images - -Before training, we can browse the transformed training data to check if the images are augmented properly: - -```Bash -python tools/misc/browse_dastaset.py \ - configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-8xb64-210e_coco-256x192.py \ - --mode transformed -``` - -![transformed_training_img](https://user-images.githubusercontent.com/13503330/187112376-e604edcb-46cc-4995-807b-e8f204f991b0.png) - -### Training - -Use the following command to train with a single GPU: - -```Bash -python tools/train.py configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-8xb64-210e_coco-256x192.py -``` - -```{note} -MMPose automates many useful training tricks and functions including: - -- Learning rate warmup and scheduling - -- ImageNet pretrained models - -- Automatic learning rate scaling - -- Multi-GPU and Multi-Node training support - -- Various Data backend support, e.g. HardDisk, LMDB, Petrel, HTTP etc. - -- Mixed precision training support - -- TensorBoard -``` - -### Testing - -Checkpoints and logs will be saved under `$MMPOSE/work_dirs` by default. The best model is under `$MMPOSE/work_dir/best_coco`. - -Use the following command to evaluate the model on COCO dataset: - -```Bash -python tools/test.py \ - configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-8xb64-210e_coco-256x192.py \ - work_dir/best_coco/AP_epoch_20.pth -``` - -Here is an example of evaluation results: - -```Bash - Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets= 20 ] = 0.704 - Average Precision (AP) @[ IoU=0.50 | area= all | maxDets= 20 ] = 0.883 - Average Precision (AP) @[ IoU=0.75 | area= all | maxDets= 20 ] = 0.777 - Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets= 20 ] = 0.667 - Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets= 20 ] = 0.769 - Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 20 ] = 0.751 - Average Recall (AR) @[ IoU=0.50 | area= all | maxDets= 20 ] = 0.920 - Average Recall (AR) @[ IoU=0.75 | area= all | maxDets= 20 ] = 0.815 - Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets= 20 ] = 0.709 - Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets= 20 ] = 0.811 -08/23 12:04:42 - mmengine - INFO - Epoch(test) [3254/3254] coco/AP: 0.704168 coco/AP .5: 0.883134 coco/AP .75: 0.777015 coco/AP (M): 0.667207 coco/AP (L): 0.768644 coco/AR: 0.750913 coco/AR .5: 0.919710 coco/AR .75: 0.815334 coco/AR (M): 0.709232 coco/AR (L): 0.811334 -``` - -```{note} -If you want to perform evaluation on other datasets, please refer to [Train & Test](./user_guides/train_and_test.md). -``` - -### Visualization - -In addition to the visualization of the keypoint skeleton, MMPose also supports the visualization of Heatmaps by setting `output_heatmap=True` in confg: - -```Python -model = dict( - ## omitted - test_cfg = dict( - ## omitted - output_heatmaps=True - ) -) -``` - -or add `--cfg-options='model.test_cfg.output_heatmaps=True'` at the end of your command. - -Visualization result (top: decoded keypoints; bottom: predicted heatmap): - -![vis_pred](https://user-images.githubusercontent.com/26127467/187578902-30ef7bb0-9a93-4e03-bae0-02aeccf7f689.jpg) - -```{note} -If you wish to apply MMPose to your own projects, we have prepared a detailed [Migration guide](./migration.md). -``` +# Quick Run + +This page provides a basic tutorial about the usage of MMPose. + +We will walk you through the 7 key steps of a typical MMPose workflow by training a top-down residual log-likelihood algorithm based on resnet50 on COCO dataset: + +1. Inference with a pretrained model +2. Prepare the dataset +3. Prepare a config +4. Browse the transformed images +5. Training +6. Testing +7. Visualization + +## Installation + +For installation instructions, please refer to [Installation](./installation.md). + +## Get Started + +### Inference with a pretrained model + +We provide a useful script to perform pose estimation with a pretrained model: + +```Bash +python demo/image_demo.py \ + tests/data/coco/000000000785.jpg \ + configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-8xb64-210e_coco-256x192.py \ + https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res50_coco_256x192_rle-2ea9bb4a_20220616.pth +``` + +If MMPose is properly installed, you will get the visualized result as follows: + +![inference_demo](https://user-images.githubusercontent.com/13503330/187112344-0c5062f2-689c-445c-a259-d5d4311e2497.png) + +```{note} +More demo and full instructions can be found in [Inference](./user_guides/inference.md). +``` + +### Prepare the dataset + +MMPose supports multiple tasks. We provide the corresponding guidelines for data preparation. + +- [2D Body Keypoint Detection](./dataset_zoo/2d_body_keypoint.md) + +- [3D Body Keypoint Detection](./dataset_zoo/3d_body_keypoint.md) + +- [2D Hand Keypoint Detection](./dataset_zoo/2d_hand_keypoint.md) + +- [3D Hand Keypoint Detection](./dataset_zoo/3d_hand_keypoint.md) + +- [2D Face Keypoint Detection](./dataset_zoo/2d_face_keypoint.md) + +- [2D WholeBody Keypoint Detection](./dataset_zoo/2d_wholebody_keypoint.md) + +- [2D Fashion Landmark Detection](./dataset_zoo/2d_fashion_landmark.md) + +- [2D Animal Keypoint Detection](./dataset_zoo/2d_animal_keypoint.md) + +You can refer to \[2D Body Keypoint Detection\] > \[COCO\] for COCO dataset preparation. + +```{note} +In MMPose, we suggest placing the data under `$MMPOSE/data`. +``` + +### Prepare a config + +MMPose is equipped with a powerful config system to conduct various experiments conveniently. A config file organizes the settings of: + +- **General**: basic configurations non-related to training or testing, such as Timer, Logger, Visualizer and other Hooks, as well as distributed-related environment settings + +- **Data**: dataset, dataloader and data augmentation + +- **Training**: resume, weights loading, optimizer, learning rate scheduling, epochs and valid interval etc. + +- **Model**: structure, module and loss function etc. + +- **Evaluation**: metrics + +We provide a bunch of well-prepared configs under `$MMPOSE/configs` so that you can directly use or modify. + +Going back to our example, we will use the prepared config: + +```Bash +$MMPOSE/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-8xb64-210e_coco-256x192.py +``` + +You can set the path of the COCO dataset by modifying `data_root` in the config: + +```Python +data_root = 'data/coco' +``` + +```{note} +If you wish to learn more about our config system, please refer to [Configs](./user_guides/configs.md). +``` + +### Browse the transformed images + +Before training, we can browse the transformed training data to check if the images are augmented properly: + +```Bash +python tools/misc/browse_dastaset.py \ + configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-8xb64-210e_coco-256x192.py \ + --mode transformed +``` + +![transformed_training_img](https://user-images.githubusercontent.com/13503330/187112376-e604edcb-46cc-4995-807b-e8f204f991b0.png) + +### Training + +Use the following command to train with a single GPU: + +```Bash +python tools/train.py configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-8xb64-210e_coco-256x192.py +``` + +```{note} +MMPose automates many useful training tricks and functions including: + +- Learning rate warmup and scheduling + +- ImageNet pretrained models + +- Automatic learning rate scaling + +- Multi-GPU and Multi-Node training support + +- Various Data backend support, e.g. HardDisk, LMDB, Petrel, HTTP etc. + +- Mixed precision training support + +- TensorBoard +``` + +### Testing + +Checkpoints and logs will be saved under `$MMPOSE/work_dirs` by default. The best model is under `$MMPOSE/work_dir/best_coco`. + +Use the following command to evaluate the model on COCO dataset: + +```Bash +python tools/test.py \ + configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-8xb64-210e_coco-256x192.py \ + work_dir/best_coco/AP_epoch_20.pth +``` + +Here is an example of evaluation results: + +```Bash + Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets= 20 ] = 0.704 + Average Precision (AP) @[ IoU=0.50 | area= all | maxDets= 20 ] = 0.883 + Average Precision (AP) @[ IoU=0.75 | area= all | maxDets= 20 ] = 0.777 + Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets= 20 ] = 0.667 + Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets= 20 ] = 0.769 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 20 ] = 0.751 + Average Recall (AR) @[ IoU=0.50 | area= all | maxDets= 20 ] = 0.920 + Average Recall (AR) @[ IoU=0.75 | area= all | maxDets= 20 ] = 0.815 + Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets= 20 ] = 0.709 + Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets= 20 ] = 0.811 +08/23 12:04:42 - mmengine - INFO - Epoch(test) [3254/3254] coco/AP: 0.704168 coco/AP .5: 0.883134 coco/AP .75: 0.777015 coco/AP (M): 0.667207 coco/AP (L): 0.768644 coco/AR: 0.750913 coco/AR .5: 0.919710 coco/AR .75: 0.815334 coco/AR (M): 0.709232 coco/AR (L): 0.811334 +``` + +```{note} +If you want to perform evaluation on other datasets, please refer to [Train & Test](./user_guides/train_and_test.md). +``` + +### Visualization + +In addition to the visualization of the keypoint skeleton, MMPose also supports the visualization of Heatmaps by setting `output_heatmap=True` in confg: + +```Python +model = dict( + ## omitted + test_cfg = dict( + ## omitted + output_heatmaps=True + ) +) +``` + +or add `--cfg-options='model.test_cfg.output_heatmaps=True'` at the end of your command. + +Visualization result (top: decoded keypoints; bottom: predicted heatmap): + +![vis_pred](https://user-images.githubusercontent.com/26127467/187578902-30ef7bb0-9a93-4e03-bae0-02aeccf7f689.jpg) + +```{note} +If you wish to apply MMPose to your own projects, we have prepared a detailed [Migration guide](./migration.md). +``` diff --git a/docs/en/stats.py b/docs/en/stats.py index 6d92d744ea..e5bc46d0df 100644 --- a/docs/en/stats.py +++ b/docs/en/stats.py @@ -1,176 +1,176 @@ -#!/usr/bin/env python -# Copyright (c) OpenMMLab. All rights reserved. -import functools as func -import glob -import re -from os.path import basename, splitext - -import numpy as np -import titlecase - - -def anchor(name): - return re.sub(r'-+', '-', re.sub(r'[^a-zA-Z0-9]', '-', - name.strip().lower())).strip('-') - - -# Count algorithms - -files = sorted(glob.glob('model_zoo/*.md')) - -stats = [] - -for f in files: - with open(f, 'r') as content_file: - content = content_file.read() - - # title - title = content.split('\n')[0].replace('#', '') - - # count papers - papers = set( - (papertype, titlecase.titlecase(paper.lower().strip())) - for (papertype, paper) in re.findall( - r'\s*\n.*?\btitle\s*=\s*{(.*?)}', - content, re.DOTALL)) - # paper links - revcontent = '\n'.join(list(reversed(content.splitlines()))) - paperlinks = {} - for _, p in papers: - # print(p) - paperlinks[p] = ', '.join( - ((f'[{paperlink} ⇨]' - f'(model_zoo/{splitext(basename(f))[0]}.html#' - f'{anchor(paperlink)})') for paperlink in re.findall( - rf'\btitle\s*=\s*{{\s*{p}\s*}}.*?\n### (.*?)\s*[,;]?\s*\n', - revcontent, re.DOTALL | re.IGNORECASE))) - # print(' ', paperlinks[p]) - paperlist = '\n'.join( - sorted(f' - [{t}] {x} ({paperlinks[x]})' for t, x in papers)) - # count configs - configs = set(x.lower().strip() - for x in re.findall(r'.*configs/.*\.py', content)) - - # count ckpts - ckpts = set(x.lower().strip() - for x in re.findall(r'https://download.*\.pth', content) - if 'mmpose' in x) - - statsmsg = f""" -## [{title}]({f}) - -* Number of checkpoints: {len(ckpts)} -* Number of configs: {len(configs)} -* Number of papers: {len(papers)} -{paperlist} - - """ - - stats.append((papers, configs, ckpts, statsmsg)) - -allpapers = func.reduce(lambda a, b: a.union(b), [p for p, _, _, _ in stats]) -allconfigs = func.reduce(lambda a, b: a.union(b), [c for _, c, _, _ in stats]) -allckpts = func.reduce(lambda a, b: a.union(b), [c for _, _, c, _ in stats]) - -# Summarize - -msglist = '\n'.join(x for _, _, _, x in stats) -papertypes, papercounts = np.unique([t for t, _ in allpapers], - return_counts=True) -countstr = '\n'.join( - [f' - {t}: {c}' for t, c in zip(papertypes, papercounts)]) - -modelzoo = f""" -# Overview - -* Number of checkpoints: {len(allckpts)} -* Number of configs: {len(allconfigs)} -* Number of papers: {len(allpapers)} -{countstr} - -For supported datasets, see [datasets overview](dataset_zoo.md). - -{msglist} - -""" - -with open('model_zoo.md', 'w') as f: - f.write(modelzoo) - -# Count datasets - -files = sorted(glob.glob('model_zoo/*.md')) -# files = sorted(glob.glob('docs/tasks/*.md')) - -datastats = [] - -for f in files: - with open(f, 'r') as content_file: - content = content_file.read() - - # title - title = content.split('\n')[0].replace('#', '') - - # count papers - papers = set( - (papertype, titlecase.titlecase(paper.lower().strip())) - for (papertype, paper) in re.findall( - r'\s*\n.*?\btitle\s*=\s*{(.*?)}', - content, re.DOTALL)) - # paper links - revcontent = '\n'.join(list(reversed(content.splitlines()))) - paperlinks = {} - for _, p in papers: - # print(p) - paperlinks[p] = ', '.join( - (f'[{p} ⇨](model_zoo/{splitext(basename(f))[0]}.html#' - f'{anchor(p)})' for p in re.findall( - rf'\btitle\s*=\s*{{\s*{p}\s*}}.*?\n## (.*?)\s*[,;]?\s*\n', - revcontent, re.DOTALL | re.IGNORECASE))) - # print(' ', paperlinks[p]) - paperlist = '\n'.join( - sorted(f' - [{t}] {x} ({paperlinks[x]})' for t, x in papers)) - # count configs - configs = set(x.lower().strip() - for x in re.findall(r'https.*configs/.*\.py', content)) - - # count ckpts - ckpts = set(x.lower().strip() - for x in re.findall(r'https://download.*\.pth', content) - if 'mmpose' in x) - - statsmsg = f""" -## [{title}]({f}) - -* Number of papers: {len(papers)} -{paperlist} - - """ - - datastats.append((papers, configs, ckpts, statsmsg)) - -alldatapapers = func.reduce(lambda a, b: a.union(b), - [p for p, _, _, _ in datastats]) - -# Summarize - -msglist = '\n'.join(x for _, _, _, x in stats) -datamsglist = '\n'.join(x for _, _, _, x in datastats) -papertypes, papercounts = np.unique([t for t, _ in alldatapapers], - return_counts=True) -countstr = '\n'.join( - [f' - {t}: {c}' for t, c in zip(papertypes, papercounts)]) - -dataset_zoo = f""" -# Overview - -* Number of papers: {len(alldatapapers)} -{countstr} - -For supported pose algorithms, see [modelzoo overview](model_zoo.md). - -{datamsglist} -""" - -with open('dataset_zoo.md', 'w') as f: - f.write(dataset_zoo) +#!/usr/bin/env python +# Copyright (c) OpenMMLab. All rights reserved. +import functools as func +import glob +import re +from os.path import basename, splitext + +import numpy as np +import titlecase + + +def anchor(name): + return re.sub(r'-+', '-', re.sub(r'[^a-zA-Z0-9]', '-', + name.strip().lower())).strip('-') + + +# Count algorithms + +files = sorted(glob.glob('model_zoo/*.md')) + +stats = [] + +for f in files: + with open(f, 'r') as content_file: + content = content_file.read() + + # title + title = content.split('\n')[0].replace('#', '') + + # count papers + papers = set( + (papertype, titlecase.titlecase(paper.lower().strip())) + for (papertype, paper) in re.findall( + r'\s*\n.*?\btitle\s*=\s*{(.*?)}', + content, re.DOTALL)) + # paper links + revcontent = '\n'.join(list(reversed(content.splitlines()))) + paperlinks = {} + for _, p in papers: + # print(p) + paperlinks[p] = ', '.join( + ((f'[{paperlink} ⇨]' + f'(model_zoo/{splitext(basename(f))[0]}.html#' + f'{anchor(paperlink)})') for paperlink in re.findall( + rf'\btitle\s*=\s*{{\s*{p}\s*}}.*?\n### (.*?)\s*[,;]?\s*\n', + revcontent, re.DOTALL | re.IGNORECASE))) + # print(' ', paperlinks[p]) + paperlist = '\n'.join( + sorted(f' - [{t}] {x} ({paperlinks[x]})' for t, x in papers)) + # count configs + configs = set(x.lower().strip() + for x in re.findall(r'.*configs/.*\.py', content)) + + # count ckpts + ckpts = set(x.lower().strip() + for x in re.findall(r'https://download.*\.pth', content) + if 'mmpose' in x) + + statsmsg = f""" +## [{title}]({f}) + +* Number of checkpoints: {len(ckpts)} +* Number of configs: {len(configs)} +* Number of papers: {len(papers)} +{paperlist} + + """ + + stats.append((papers, configs, ckpts, statsmsg)) + +allpapers = func.reduce(lambda a, b: a.union(b), [p for p, _, _, _ in stats]) +allconfigs = func.reduce(lambda a, b: a.union(b), [c for _, c, _, _ in stats]) +allckpts = func.reduce(lambda a, b: a.union(b), [c for _, _, c, _ in stats]) + +# Summarize + +msglist = '\n'.join(x for _, _, _, x in stats) +papertypes, papercounts = np.unique([t for t, _ in allpapers], + return_counts=True) +countstr = '\n'.join( + [f' - {t}: {c}' for t, c in zip(papertypes, papercounts)]) + +modelzoo = f""" +# Overview + +* Number of checkpoints: {len(allckpts)} +* Number of configs: {len(allconfigs)} +* Number of papers: {len(allpapers)} +{countstr} + +For supported datasets, see [datasets overview](dataset_zoo.md). + +{msglist} + +""" + +with open('model_zoo.md', 'w') as f: + f.write(modelzoo) + +# Count datasets + +files = sorted(glob.glob('model_zoo/*.md')) +# files = sorted(glob.glob('docs/tasks/*.md')) + +datastats = [] + +for f in files: + with open(f, 'r') as content_file: + content = content_file.read() + + # title + title = content.split('\n')[0].replace('#', '') + + # count papers + papers = set( + (papertype, titlecase.titlecase(paper.lower().strip())) + for (papertype, paper) in re.findall( + r'\s*\n.*?\btitle\s*=\s*{(.*?)}', + content, re.DOTALL)) + # paper links + revcontent = '\n'.join(list(reversed(content.splitlines()))) + paperlinks = {} + for _, p in papers: + # print(p) + paperlinks[p] = ', '.join( + (f'[{p} ⇨](model_zoo/{splitext(basename(f))[0]}.html#' + f'{anchor(p)})' for p in re.findall( + rf'\btitle\s*=\s*{{\s*{p}\s*}}.*?\n## (.*?)\s*[,;]?\s*\n', + revcontent, re.DOTALL | re.IGNORECASE))) + # print(' ', paperlinks[p]) + paperlist = '\n'.join( + sorted(f' - [{t}] {x} ({paperlinks[x]})' for t, x in papers)) + # count configs + configs = set(x.lower().strip() + for x in re.findall(r'https.*configs/.*\.py', content)) + + # count ckpts + ckpts = set(x.lower().strip() + for x in re.findall(r'https://download.*\.pth', content) + if 'mmpose' in x) + + statsmsg = f""" +## [{title}]({f}) + +* Number of papers: {len(papers)} +{paperlist} + + """ + + datastats.append((papers, configs, ckpts, statsmsg)) + +alldatapapers = func.reduce(lambda a, b: a.union(b), + [p for p, _, _, _ in datastats]) + +# Summarize + +msglist = '\n'.join(x for _, _, _, x in stats) +datamsglist = '\n'.join(x for _, _, _, x in datastats) +papertypes, papercounts = np.unique([t for t, _ in alldatapapers], + return_counts=True) +countstr = '\n'.join( + [f' - {t}: {c}' for t, c in zip(papertypes, papercounts)]) + +dataset_zoo = f""" +# Overview + +* Number of papers: {len(alldatapapers)} +{countstr} + +For supported pose algorithms, see [modelzoo overview](model_zoo.md). + +{datamsglist} +""" + +with open('dataset_zoo.md', 'w') as f: + f.write(dataset_zoo) diff --git a/docs/en/switch_language.md b/docs/en/switch_language.md index a0a6259bee..c0f410d59d 100644 --- a/docs/en/switch_language.md +++ b/docs/en/switch_language.md @@ -1,3 +1,3 @@ -## English - -## 简体中文 +## English + +## 简体中文 diff --git a/docs/en/user_guides/configs.md b/docs/en/user_guides/configs.md index 9d2c44f7ff..c441064a9c 100644 --- a/docs/en/user_guides/configs.md +++ b/docs/en/user_guides/configs.md @@ -1,462 +1,462 @@ -# Configs - -We use python files as configs and incorporate modular and inheritance design into our config system, which is convenient to conduct various experiments. - -## Introduction - -MMPose is equipped with a powerful config system. Cooperating with Registry, a config file can organize all the configurations in the form of python dictionaries and create instances of the corresponding modules. - -Here is a simple example of vanilla Pytorch module definition to show how the config system works: - -```Python -# Definition of Loss_A in loss_a.py -Class Loss_A(nn.Module): - def __init__(self, param1, param2): - self.param1 = param1 - self.param2 = param2 - def forward(self, x): - return x - -# Init the module -loss = Loss_A(param1=1.0, param2=True) -``` - -All you need to do is just to register the module to the pre-defined Registry `MODELS`: - -```Python -# Definition of Loss_A in loss_a.py -from mmpose.registry import MODELS - -@MODELS.register_module() # register the module to MODELS -Class Loss_A(nn.Module): - def __init__(self, param1, param2): - self.param1 = param1 - self.param2 = param2 - def forward(self, x): - return x -``` - -And import the new module in `__init__.py` in the corresponding directory: - -```Python -# __init__.py of mmpose/models/losses -from .loss_a.py import Loss_A - -__all__ = ['Loss_A'] -``` - -Then you can define the module anywhere you want: - -```Python -# config_file.py -loss_cfg = dict( - type='Loss_A', # specify your registered module via `type` - param1=1.0, # pass parameters to __init__() of the module - param2=True -) - -# Init the module -loss = MODELS.build(loss_cfg) # equals to `loss = Loss_A(param1=1.0, param2=True)` -``` - -```{note} -Note that all new modules need to be registered using `Registry` and imported in `__init__.py` in the corresponding directory before we can create their instances from configs. -``` - -Here is a list of pre-defined registries in MMPose: - -- `DATASETS`: data-related modules -- `TRANSFORMS`: data transformations -- `MODELS`: all kinds of modules inheriting `nn.Module` (Backbone, Neck, Head, Loss, etc.) -- `VISUALIZERS`: visualization tools -- `VISBACKENDS`: visualizer backend -- `METRICS`: all kinds of evaluation metrics -- `KEYPOINT_CODECS`: keypoint encoder/decoder -- `HOOKS`: all kinds of hooks like `CheckpointHook` - -All registries are defined in `$MMPOSE/mmpose/registry.py`. - -## Config System - -It is best practice to layer your configs in five sections: - -- **General**: basic configurations non-related to training or testing, such as Timer, Logger, Visualizer and other Hooks, as well as distributed-related environment settings - -- **Data**: dataset, dataloader and data augmentation - -- **Training**: resume, weights loading, optimizer, learning rate scheduling, epochs and valid interval etc. - -- **Model**: structure, module and loss function etc. - -- **Evaluation**: metrics - -You can find all the provided configs under `$MMPOSE/configs`. A config can inherit contents from another config.To keep a config file simple and easy to read, we store some necessary but unremarkable configurations to `$MMPOSE/configs/_base_`.You can inspect the complete configurations by: - -```Bash -python tools/analysis/print_config.py /PATH/TO/CONFIG -``` - -### General - -General configuration refers to the necessary configuration non-related to training or testing, mainly including: - -- **Default Hooks**: time statistics, training logs, checkpoints etc. - -- **Environment**: distributed backend, cudnn, multi-processing etc. - -- **Visualizer**: visualization backend and strategy - -- **Log**: log level, format, printing and recording interval etc. - -Here is the description of General configuration: - -```Python -# General -default_scope = 'mmpose' -default_hooks = dict( - timer=dict(type='IterTimerHook'), # time the data processing and model inference - logger=dict(type='LoggerHook', interval=50), # interval to print logs - param_scheduler=dict(type='ParamSchedulerHook'), # update lr - checkpoint=dict( - type='CheckpointHook', interval=1, save_best='coco/AP', # interval to save ckpt - rule='greater'), # rule to judge the metric - sampler_seed=dict(type='DistSamplerSeedHook')) # set the distributed seed -env_cfg = dict( - cudnn_benchmark=False, # cudnn benchmark flag - mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), # num of opencv threads - dist_cfg=dict(backend='nccl')) # distributed training backend -vis_backends = [dict(type='LocalVisBackend')] # visualizer backend -visualizer = dict( # Config of visualizer - type='PoseLocalVisualizer', - vis_backends=[dict(type='LocalVisBackend')], - name='visualizer') -log_processor = dict( # Format, interval to log - type='LogProcessor', window_size=50, by_epoch=True, num_digits=6) -log_level = 'INFO' # The level of logging -``` - -General configuration is stored alone in the `$MMPOSE/configs/_base_`, and inherited by doing: - -```Python -_base_ = ['../../../_base_/default_runtime.py'] # take the config file as the starting point of the relative path -``` - -```{note} -CheckpointHook: - -- save_best: `'coco/AP'` for `CocoMetric`, `'PCK'` for `PCKAccuracy` -- max_keep_ckpts: the maximum checkpoints to keep. Defaults to -1, which means unlimited. - -Example: - -`default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater', max_keep_ckpts=1))` -``` - -### Data - -Data configuration refers to the data processing related settings, mainly including: - -- **File Client**: data storage backend, default is `disk`, we also support `LMDB`, `S3 Bucket` etc. - -- **Dataset**: image and annotation file path - -- **Dataloader**: loading configuration, batch size etc. - -- **Pipeline**: data augmentation - -- **Input Encoder**: encoding the annotation into specific form of target - -Here is the description of Data configuration: - -```Python -backend_args = dict(backend='local') # data storage backend -dataset_type = 'CocoDataset' # name of dataset -data_mode = 'topdown' # type of the model -data_root = 'data/coco/' # root of the dataset - # config of codec,to generate targets and decode preds into coordinates -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) -train_pipeline = [ # data aug in training - dict(type='LoadImage', backend_args=backend_args, # image loading - dict(type='GetBBoxCenterScale'), # calculate center and scale of bbox - dict(type='RandomBBoxTransform'), # config of scaling, rotation and shifing - dict(type='RandomFlip', direction='horizontal'), # config of random flipping - dict(type='RandomHalfBody'), # config of half-body aug - dict(type='TopdownAffine', input_size=codec['input_size']), # update inputs via transform matrix - dict( - type='GenerateTarget', # generate targets via transformed inputs - # typeof targets - encoder=codec, # get encoder from codec - dict(type='PackPoseInputs') # pack targets -] -test_pipeline = [ # data aug in testing - dict(type='LoadImage', backend_args=backend_args), # image loading - dict(type='GetBBoxCenterScale'), # calculate center and scale of bbox - dict(type='TopdownAffine', input_size=codec['input_size']), # update inputs via transform matrix - dict(type='PackPoseInputs') # pack targets -] -train_dataloader = dict( - batch_size=64, # batch size of each single GPU during training - num_workers=2, # workers to pre-fetch data for each single GPU - persistent_workers=True, # workers will stay around (with their state) waiting for another call into that dataloader. - sampler=dict(type='DefaultSampler', shuffle=True), # data sampler, shuffle in traning - dataset=dict( - type=dataset_type , # name of dataset - data_root=data_root, # root of dataset - data_mode=data_mode, # type of the model - ann_file='annotations/person_keypoints_train2017.json', # path to annotation file - data_prefix=dict(img='train2017/'), # path to images - pipeline=train_pipeline - )) -val_dataloader = dict( - batch_size=32, # batch size of each single GPU during validation - num_workers=2, # workers to pre-fetch data for each single GPU - persistent_workers=True, # workers will stay around (with their state) waiting for another call into that dataloader. - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False), # data sampler - dataset=dict( - type=dataset_type , # name of dataset - data_root=data_root, # root of dataset - data_mode=data_mode, # type of the model - ann_file='annotations/person_keypoints_val2017.json', # path to annotation file - bbox_file= - 'data/coco/person_detection_results/COCO_val2017_detections_AP_H_56_person.json', # bbox file use for evaluation - data_prefix=dict(img='val2017/'), # path to images - test_mode=True, - pipeline=test_pipeline - )) -test_dataloader = val_dataloader # use val as test by default -``` - -```{note} -Common Usages: -- [Resume training](../common_usages/resume_training.md) -- [Automatic mixed precision (AMP) training](../common_usages/amp_training.md) -- [Set the random seed](../common_usages/set_random_seed.md) - -``` - -### Training - -Training configuration refers to the training related settings including: - -- Resume training - -- Model weights loading - -- Epochs of training and interval to validate - -- Learning rate adjustment strategies like warm-up, scheduling etc. - -- Optimizer and initial learning rate - -- Advanced tricks like auto learning rate scaling - -Here is the description of Training configuration: - -```Python -resume = False # resume checkpoints from a given path, the training will be resumed from the epoch when the checkpoint's is saved -load_from = None # load models as a pre-trained model from a given path -train_cfg = dict(by_epoch=True, max_epochs=210, val_interval=10) # max epochs of training, interval to validate -param_scheduler = [ - dict( # warmup strategy - type='LinearLR', begin=0, end=500, start_factor=0.001, by_epoch=False), - dict( # scheduler - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] -optim_wrapper = dict(optimizer=dict(type='Adam', lr=0.0005)) # optimizer and initial lr -auto_scale_lr = dict(base_batch_size=512) # auto scale the lr according to batch size -``` - -### Model - -Model configuration refers to model training and inference related settings including: - -- Model Structure - -- Loss Function - -- Output Decoding - -- Test-time augmentation - -Here is the description of Model configuration, which defines a Top-down Heatmap-based HRNetx32: - -```Python -# config of codec, if already defined in data configuration section, no need to define again -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -model = dict( - type='TopdownPoseEstimator', # Macro model structure - data_preprocessor=dict( # data normalization and channel transposition - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( # config of backbone - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(32, 64)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(32, 64, 128)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(32, 64, 128, 256))), - init_cfg=dict( - type='Pretrained', # load pretrained weights to backbone - checkpoint='https://download.openmmlab.com/mmpose' - '/pretrain_models/hrnet_w32-36af842e.pth'), - ), - head=dict( # config of head - type='HeatmapHead', - in_channels=32, - out_channels=17, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), # config of loss function - decoder=codec), # get decoder from codec - test_cfg=dict( - flip_test=True, # flag of flip test - flip_mode='heatmap', # heatmap flipping - shift_heatmap=True, # shift the flipped heatmap several pixels to get a better performance - )) -``` - -### Evaluation - -Evaluation configuration refers to metrics commonly used by public datasets for keypoint detection tasks, mainly including: - -- AR, AP and mAP - -- PCK, PCKh, tPCK - -- AUC - -- EPE - -- NME - -Here is the description of Evaluation configuration, which defines a COCO metric evaluator: - -```Python -val_evaluator = dict( - type='CocoMetric', # coco AP - ann_file=data_root + 'annotations/person_keypoints_val2017.json') # path to annotation file -test_evaluator = val_evaluator # use val as test by default -``` - -## Config File Naming Convention - -MMPose follow the style below to name config files: - -```Python -{{algorithm info}}_{{module info}}_{{training info}}_{{data info}}.py -``` - -The filename is divided into four parts: - -- **Algorithm Information**: the name of algorithm, such as `topdown-heatmap`, `topdown-rle` - -- **Module Information**: list of intermediate modules in the forward order, such as `res101`, `hrnet-w48` - -- **Training Information**: settings of training(e.g. `batch_size`, `scheduler`), such as `8xb64-210e` - -- **Data Information**: the name of dataset, the reshape of input data, such as `ap10k-256x256`, `zebra-160x160` - -Words between different parts are connected by `'_'`, and those from the same part are connected by `'-'`. - -To avoid a too long filename, some strong related modules in `{{module info}}` will be omitted, such as `gap` in `RLE` algorithm, `deconv` in `Heatmap-based` algorithm - -Contributors are advised to follow the same style. - -## Common Usage - -### Inheritance - -This is often used to inherit configurations from other config files. Let's assume two configs like: - -`optimizer_cfg.py`: - -```Python -optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) -``` - -`resnet50.py`: - -```Python -_base_ = ['optimizer_cfg.py'] -model = dict(type='ResNet', depth=50) -``` - -Although we did not define `optimizer` in `resnet50.py`, all configurations in `optimizer.py` will be inherited by setting `_base_ = ['optimizer_cfg.py']` - -```Python -cfg = Config.fromfile('resnet50.py') -cfg.optimizer # ConfigDict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) -``` - -### Modification - -For configurations already set in previous configs, you can directly modify arguments specific to that module. - -`resnet50_lr0.01.py`: - -```Python -_base_ = ['optimizer_cfg.py'] -model = dict(type='ResNet', depth=50) -optimizer = dict(lr=0.01) # modify specific filed -``` - -Now only `lr` is modified: - -```Python -cfg = Config.fromfile('resnet50_lr0.01.py') -cfg.optimizer # ConfigDict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) -``` - -### Delete - -For configurations already set in previous configs, if you wish to modify some specific argument and delete the remainders(in other words, discard the previous and redefine the module), you can set `_delete_=True`. - -`resnet50.py`: - -```Python -_base_ = ['optimizer_cfg.py', 'runtime_cfg.py'] -model = dict(type='ResNet', depth=50) -optimizer = dict(_delete_=True, type='SGD', lr=0.01) # discard the previous and redefine the module -``` - -Now only `type` and `lr` are kept: - -```Python -cfg = Config.fromfile('resnet50_lr0.01.py') -cfg.optimizer # ConfigDict(type='SGD', lr=0.01) -``` - -```{note} -If you wish to learn more about advanced usages of the config system, please refer to [MMEngine Config](https://mmengine.readthedocs.io/en/latest/tutorials/config.html). -``` +# Configs + +We use python files as configs and incorporate modular and inheritance design into our config system, which is convenient to conduct various experiments. + +## Introduction + +MMPose is equipped with a powerful config system. Cooperating with Registry, a config file can organize all the configurations in the form of python dictionaries and create instances of the corresponding modules. + +Here is a simple example of vanilla Pytorch module definition to show how the config system works: + +```Python +# Definition of Loss_A in loss_a.py +Class Loss_A(nn.Module): + def __init__(self, param1, param2): + self.param1 = param1 + self.param2 = param2 + def forward(self, x): + return x + +# Init the module +loss = Loss_A(param1=1.0, param2=True) +``` + +All you need to do is just to register the module to the pre-defined Registry `MODELS`: + +```Python +# Definition of Loss_A in loss_a.py +from mmpose.registry import MODELS + +@MODELS.register_module() # register the module to MODELS +Class Loss_A(nn.Module): + def __init__(self, param1, param2): + self.param1 = param1 + self.param2 = param2 + def forward(self, x): + return x +``` + +And import the new module in `__init__.py` in the corresponding directory: + +```Python +# __init__.py of mmpose/models/losses +from .loss_a.py import Loss_A + +__all__ = ['Loss_A'] +``` + +Then you can define the module anywhere you want: + +```Python +# config_file.py +loss_cfg = dict( + type='Loss_A', # specify your registered module via `type` + param1=1.0, # pass parameters to __init__() of the module + param2=True +) + +# Init the module +loss = MODELS.build(loss_cfg) # equals to `loss = Loss_A(param1=1.0, param2=True)` +``` + +```{note} +Note that all new modules need to be registered using `Registry` and imported in `__init__.py` in the corresponding directory before we can create their instances from configs. +``` + +Here is a list of pre-defined registries in MMPose: + +- `DATASETS`: data-related modules +- `TRANSFORMS`: data transformations +- `MODELS`: all kinds of modules inheriting `nn.Module` (Backbone, Neck, Head, Loss, etc.) +- `VISUALIZERS`: visualization tools +- `VISBACKENDS`: visualizer backend +- `METRICS`: all kinds of evaluation metrics +- `KEYPOINT_CODECS`: keypoint encoder/decoder +- `HOOKS`: all kinds of hooks like `CheckpointHook` + +All registries are defined in `$MMPOSE/mmpose/registry.py`. + +## Config System + +It is best practice to layer your configs in five sections: + +- **General**: basic configurations non-related to training or testing, such as Timer, Logger, Visualizer and other Hooks, as well as distributed-related environment settings + +- **Data**: dataset, dataloader and data augmentation + +- **Training**: resume, weights loading, optimizer, learning rate scheduling, epochs and valid interval etc. + +- **Model**: structure, module and loss function etc. + +- **Evaluation**: metrics + +You can find all the provided configs under `$MMPOSE/configs`. A config can inherit contents from another config.To keep a config file simple and easy to read, we store some necessary but unremarkable configurations to `$MMPOSE/configs/_base_`.You can inspect the complete configurations by: + +```Bash +python tools/analysis/print_config.py /PATH/TO/CONFIG +``` + +### General + +General configuration refers to the necessary configuration non-related to training or testing, mainly including: + +- **Default Hooks**: time statistics, training logs, checkpoints etc. + +- **Environment**: distributed backend, cudnn, multi-processing etc. + +- **Visualizer**: visualization backend and strategy + +- **Log**: log level, format, printing and recording interval etc. + +Here is the description of General configuration: + +```Python +# General +default_scope = 'mmpose' +default_hooks = dict( + timer=dict(type='IterTimerHook'), # time the data processing and model inference + logger=dict(type='LoggerHook', interval=50), # interval to print logs + param_scheduler=dict(type='ParamSchedulerHook'), # update lr + checkpoint=dict( + type='CheckpointHook', interval=1, save_best='coco/AP', # interval to save ckpt + rule='greater'), # rule to judge the metric + sampler_seed=dict(type='DistSamplerSeedHook')) # set the distributed seed +env_cfg = dict( + cudnn_benchmark=False, # cudnn benchmark flag + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), # num of opencv threads + dist_cfg=dict(backend='nccl')) # distributed training backend +vis_backends = [dict(type='LocalVisBackend')] # visualizer backend +visualizer = dict( # Config of visualizer + type='PoseLocalVisualizer', + vis_backends=[dict(type='LocalVisBackend')], + name='visualizer') +log_processor = dict( # Format, interval to log + type='LogProcessor', window_size=50, by_epoch=True, num_digits=6) +log_level = 'INFO' # The level of logging +``` + +General configuration is stored alone in the `$MMPOSE/configs/_base_`, and inherited by doing: + +```Python +_base_ = ['../../../_base_/default_runtime.py'] # take the config file as the starting point of the relative path +``` + +```{note} +CheckpointHook: + +- save_best: `'coco/AP'` for `CocoMetric`, `'PCK'` for `PCKAccuracy` +- max_keep_ckpts: the maximum checkpoints to keep. Defaults to -1, which means unlimited. + +Example: + +`default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater', max_keep_ckpts=1))` +``` + +### Data + +Data configuration refers to the data processing related settings, mainly including: + +- **File Client**: data storage backend, default is `disk`, we also support `LMDB`, `S3 Bucket` etc. + +- **Dataset**: image and annotation file path + +- **Dataloader**: loading configuration, batch size etc. + +- **Pipeline**: data augmentation + +- **Input Encoder**: encoding the annotation into specific form of target + +Here is the description of Data configuration: + +```Python +backend_args = dict(backend='local') # data storage backend +dataset_type = 'CocoDataset' # name of dataset +data_mode = 'topdown' # type of the model +data_root = 'data/coco/' # root of the dataset + # config of codec,to generate targets and decode preds into coordinates +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) +train_pipeline = [ # data aug in training + dict(type='LoadImage', backend_args=backend_args, # image loading + dict(type='GetBBoxCenterScale'), # calculate center and scale of bbox + dict(type='RandomBBoxTransform'), # config of scaling, rotation and shifing + dict(type='RandomFlip', direction='horizontal'), # config of random flipping + dict(type='RandomHalfBody'), # config of half-body aug + dict(type='TopdownAffine', input_size=codec['input_size']), # update inputs via transform matrix + dict( + type='GenerateTarget', # generate targets via transformed inputs + # typeof targets + encoder=codec, # get encoder from codec + dict(type='PackPoseInputs') # pack targets +] +test_pipeline = [ # data aug in testing + dict(type='LoadImage', backend_args=backend_args), # image loading + dict(type='GetBBoxCenterScale'), # calculate center and scale of bbox + dict(type='TopdownAffine', input_size=codec['input_size']), # update inputs via transform matrix + dict(type='PackPoseInputs') # pack targets +] +train_dataloader = dict( + batch_size=64, # batch size of each single GPU during training + num_workers=2, # workers to pre-fetch data for each single GPU + persistent_workers=True, # workers will stay around (with their state) waiting for another call into that dataloader. + sampler=dict(type='DefaultSampler', shuffle=True), # data sampler, shuffle in traning + dataset=dict( + type=dataset_type , # name of dataset + data_root=data_root, # root of dataset + data_mode=data_mode, # type of the model + ann_file='annotations/person_keypoints_train2017.json', # path to annotation file + data_prefix=dict(img='train2017/'), # path to images + pipeline=train_pipeline + )) +val_dataloader = dict( + batch_size=32, # batch size of each single GPU during validation + num_workers=2, # workers to pre-fetch data for each single GPU + persistent_workers=True, # workers will stay around (with their state) waiting for another call into that dataloader. + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), # data sampler + dataset=dict( + type=dataset_type , # name of dataset + data_root=data_root, # root of dataset + data_mode=data_mode, # type of the model + ann_file='annotations/person_keypoints_val2017.json', # path to annotation file + bbox_file= + 'data/coco/person_detection_results/COCO_val2017_detections_AP_H_56_person.json', # bbox file use for evaluation + data_prefix=dict(img='val2017/'), # path to images + test_mode=True, + pipeline=test_pipeline + )) +test_dataloader = val_dataloader # use val as test by default +``` + +```{note} +Common Usages: +- [Resume training](../common_usages/resume_training.md) +- [Automatic mixed precision (AMP) training](../common_usages/amp_training.md) +- [Set the random seed](../common_usages/set_random_seed.md) + +``` + +### Training + +Training configuration refers to the training related settings including: + +- Resume training + +- Model weights loading + +- Epochs of training and interval to validate + +- Learning rate adjustment strategies like warm-up, scheduling etc. + +- Optimizer and initial learning rate + +- Advanced tricks like auto learning rate scaling + +Here is the description of Training configuration: + +```Python +resume = False # resume checkpoints from a given path, the training will be resumed from the epoch when the checkpoint's is saved +load_from = None # load models as a pre-trained model from a given path +train_cfg = dict(by_epoch=True, max_epochs=210, val_interval=10) # max epochs of training, interval to validate +param_scheduler = [ + dict( # warmup strategy + type='LinearLR', begin=0, end=500, start_factor=0.001, by_epoch=False), + dict( # scheduler + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] +optim_wrapper = dict(optimizer=dict(type='Adam', lr=0.0005)) # optimizer and initial lr +auto_scale_lr = dict(base_batch_size=512) # auto scale the lr according to batch size +``` + +### Model + +Model configuration refers to model training and inference related settings including: + +- Model Structure + +- Loss Function + +- Output Decoding + +- Test-time augmentation + +Here is the description of Model configuration, which defines a Top-down Heatmap-based HRNetx32: + +```Python +# config of codec, if already defined in data configuration section, no need to define again +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +model = dict( + type='TopdownPoseEstimator', # Macro model structure + data_preprocessor=dict( # data normalization and channel transposition + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( # config of backbone + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', # load pretrained weights to backbone + checkpoint='https://download.openmmlab.com/mmpose' + '/pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( # config of head + type='HeatmapHead', + in_channels=32, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), # config of loss function + decoder=codec), # get decoder from codec + test_cfg=dict( + flip_test=True, # flag of flip test + flip_mode='heatmap', # heatmap flipping + shift_heatmap=True, # shift the flipped heatmap several pixels to get a better performance + )) +``` + +### Evaluation + +Evaluation configuration refers to metrics commonly used by public datasets for keypoint detection tasks, mainly including: + +- AR, AP and mAP + +- PCK, PCKh, tPCK + +- AUC + +- EPE + +- NME + +Here is the description of Evaluation configuration, which defines a COCO metric evaluator: + +```Python +val_evaluator = dict( + type='CocoMetric', # coco AP + ann_file=data_root + 'annotations/person_keypoints_val2017.json') # path to annotation file +test_evaluator = val_evaluator # use val as test by default +``` + +## Config File Naming Convention + +MMPose follow the style below to name config files: + +```Python +{{algorithm info}}_{{module info}}_{{training info}}_{{data info}}.py +``` + +The filename is divided into four parts: + +- **Algorithm Information**: the name of algorithm, such as `topdown-heatmap`, `topdown-rle` + +- **Module Information**: list of intermediate modules in the forward order, such as `res101`, `hrnet-w48` + +- **Training Information**: settings of training(e.g. `batch_size`, `scheduler`), such as `8xb64-210e` + +- **Data Information**: the name of dataset, the reshape of input data, such as `ap10k-256x256`, `zebra-160x160` + +Words between different parts are connected by `'_'`, and those from the same part are connected by `'-'`. + +To avoid a too long filename, some strong related modules in `{{module info}}` will be omitted, such as `gap` in `RLE` algorithm, `deconv` in `Heatmap-based` algorithm + +Contributors are advised to follow the same style. + +## Common Usage + +### Inheritance + +This is often used to inherit configurations from other config files. Let's assume two configs like: + +`optimizer_cfg.py`: + +```Python +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +``` + +`resnet50.py`: + +```Python +_base_ = ['optimizer_cfg.py'] +model = dict(type='ResNet', depth=50) +``` + +Although we did not define `optimizer` in `resnet50.py`, all configurations in `optimizer.py` will be inherited by setting `_base_ = ['optimizer_cfg.py']` + +```Python +cfg = Config.fromfile('resnet50.py') +cfg.optimizer # ConfigDict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +``` + +### Modification + +For configurations already set in previous configs, you can directly modify arguments specific to that module. + +`resnet50_lr0.01.py`: + +```Python +_base_ = ['optimizer_cfg.py'] +model = dict(type='ResNet', depth=50) +optimizer = dict(lr=0.01) # modify specific filed +``` + +Now only `lr` is modified: + +```Python +cfg = Config.fromfile('resnet50_lr0.01.py') +cfg.optimizer # ConfigDict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) +``` + +### Delete + +For configurations already set in previous configs, if you wish to modify some specific argument and delete the remainders(in other words, discard the previous and redefine the module), you can set `_delete_=True`. + +`resnet50.py`: + +```Python +_base_ = ['optimizer_cfg.py', 'runtime_cfg.py'] +model = dict(type='ResNet', depth=50) +optimizer = dict(_delete_=True, type='SGD', lr=0.01) # discard the previous and redefine the module +``` + +Now only `type` and `lr` are kept: + +```Python +cfg = Config.fromfile('resnet50_lr0.01.py') +cfg.optimizer # ConfigDict(type='SGD', lr=0.01) +``` + +```{note} +If you wish to learn more about advanced usages of the config system, please refer to [MMEngine Config](https://mmengine.readthedocs.io/en/latest/tutorials/config.html). +``` diff --git a/docs/en/user_guides/inference.md b/docs/en/user_guides/inference.md index fa51aa20fa..a42465d088 100644 --- a/docs/en/user_guides/inference.md +++ b/docs/en/user_guides/inference.md @@ -1,285 +1,285 @@ -# Inference with existing models - -MMPose provides a wide variety of pre-trained models for pose estimation, which can be found in the [Model Zoo](https://mmpose.readthedocs.io/en/latest/model_zoo.html). -This guide will demonstrate **how to perform inference**, or running pose estimation on provided images or videos using trained models. - -For instructions on testing existing models on standard datasets, refer to this [guide](./train_and_test.md#test). - -In MMPose, a model is defined by a configuration file, while its pre-existing parameters are stored in a checkpoint file. You can find the model configuration files and corresponding checkpoint URLs in the [Model Zoo](https://mmpose.readthedocs.io/en/latest/modelzoo.html). We recommend starting with the HRNet model, using [this configuration file](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py) and [this checkpoint file](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192-81c58e40_20220909.pth). - -## Inferencer: a Unified Inference Interface - -MMPose offers a comprehensive API for inference, known as `MMPoseInferencer`. This API enables users to perform inference on both images and videos using all the models supported by MMPose. Furthermore, the API provides automatic visualization of inference results and allows for the convenient saving of predictions. - -### Basic Usage - -The `MMPoseInferencer` can be used in any Python program to perform pose estimation. Below is an example of inference on a given image using the pre-trained human pose estimator within the Python shell. - -```python -from mmpose.apis import MMPoseInferencer - -img_path = 'tests/data/coco/000000000785.jpg' # replace this with your own image path - -# instantiate the inferencer using the model alias -inferencer = MMPoseInferencer('human') - -# The MMPoseInferencer API employs a lazy inference approach, -# creating a prediction generator when given input -result_generator = inferencer(img_path, show=True) -result = next(result_generator) -``` - -If everything works fine, you will see the following image in a new window: -![inferencer_result_coco](https://user-images.githubusercontent.com/26127467/220008302-4a57fd44-0978-408e-8351-600e5513316a.jpg) - -The `result` variable is a dictionary comprising two keys, `'visualization'` and `'predictions'`. - -- `'visualization'` holds a list which: - - - contains visualization results, such as the input image, markers of the estimated poses, and optional predicted heatmaps. - - remains empty if the `return_vis` argument is not specified. - -- `'predictions'` stores: - - - a list of estimated keypoints for each identified instance. - -The structure of the `result` dictionary is as follows: - -```python -result = { - 'visualization': [ - # number of elements: batch_size (defaults to 1) - vis_image_1, - ... - ], - 'predictions': [ - # pose estimation result of each image - # number of elements: batch_size (defaults to 1) - [ - # pose information of each detected instance - # number of elements: number of detected instances - {'keypoints': ..., # instance 1 - 'keypoint_scores': ..., - ... - }, - {'keypoints': ..., # instance 2 - 'keypoint_scores': ..., - ... - }, - ] - ... - ] -} - -``` - -A **command-line interface (CLI)** tool for the inferencer is also available: `demo/inferencer_demo.py`. This tool allows users to perform inference using the same model and inputs with the following command: - -```bash -python demo/inferencer_demo.py 'tests/data/coco/000000000785.jpg' \ - --pose2d 'human' --show --pred-out-dir 'predictions' -``` - -The predictions will be save in `predictions/000000000785.json`. The argument names correspond with the `MMPoseInferencer`, which serves as an API. - -The inferencer is capable of processing a range of input types, which includes the following: - -- A path to an image -- A path to a video -- A path to a folder (which will cause all images in that folder to be inferred) -- An image array (NA for CLI tool) -- A list of image arrays (NA for CLI tool) -- A webcam (in which case the `input` parameter should be set to either `'webcam'` or `'webcam:{CAMERA_ID}'`) - -Please note that when the input corresponds to multiple images, such as when the input is a video or a folder path, the inference process needs to iterate over the results generator in order to perform inference on all the frames or images within the folder. Here's an example in Python: - -```python -folder_path = 'tests/data/coco' - -result_generator = inferencer(folder_path, show=True) -results = [result for result in result_generator] -``` - -In this example, the `inferencer` takes the `folder_path` as input and returns a generator object (`result_generator`) that produces inference results. By iterating over the `result_generator` and storing each result in the `results` list, you can obtain the inference results for all the frames or images within the folder. - -### Custom Pose Estimation Models - -The inferencer provides several methods that can be used to customize the models employed: - -```python - -# build the inferencer with model alias -inferencer = MMPoseInferencer('human') - -# build the inferencer with model config name -inferencer = MMPoseInferencer('td-hm_hrnet-w32_8xb64-210e_coco-256x192') - -# build the inferencer with model config path and checkpoint path/URL -inferencer = MMPoseInferencer( - pose2d='configs/body_2d_keypoint/topdown_heatmap/coco/' \ - 'td-hm_hrnet-w32_8xb64-210e_coco-256x192.py', - pose2d_weights='https://download.openmmlab.com/mmpose/top_down/' \ - 'hrnet/hrnet_w32_coco_256x192-c78dce93_20200708.pth' -) -``` - -The complere list of model alias can be found in the [Model Alias](#model-alias) section. - -**Custom Object Detector for Top-down Pose Estimation Models** - -In addition, top-down pose estimators also require an object detection model. The inferencer is capable of inferring the instance type for models trained with datasets supported in MMPose, and subsequently constructing the necessary object detection model. Alternatively, users may also manually specify the detection model using the following methods: - -```python - -# specify detection model by alias -# the available aliases include 'human', 'hand', 'face', 'animal', -# as well as any additional aliases defined in mmdet -inferencer = MMPoseInferencer( - # suppose the pose estimator is trained on custom dataset - pose2d='custom_human_pose_estimator.py', - pose2d_weights='custom_human_pose_estimator.pth', - det_model='human' -) - -# specify detection model with model config name -inferencer = MMPoseInferencer( - pose2d='human', - det_model='yolox_l_8x8_300e_coco', - det_cat_ids=[0], # the category id of 'human' class -) - -# specify detection model with config path and checkpoint path/URL -inferencer = MMPoseInferencer( - pose2d='human', - det_model=f'{PATH_TO_MMDET}/configs/yolox/yolox_l_8x8_300e_coco.py', - det_weights='https://download.openmmlab.com/mmdetection/v2.0/' \ - 'yolox/yolox_l_8x8_300e_coco/' \ - 'yolox_l_8x8_300e_coco_20211126_140236-d3bd2b23.pth', - det_cat_ids=[0], # the category id of 'human' class -) -``` - -To perform top-down pose estimation on cropped images containing a single object, users can set `det_model='whole_image'`. This bypasses the object detector initialization, creating a bounding box that matches the input image size and directly sending the entire image to the top-down pose estimator. - -### Dump Results - -After performing pose estimation, you might want to save the results for further analysis or processing. This section will guide you through saving the predicted keypoints and visualizations to your local machine. - -To save the predictions in a JSON file, use the `pred_out_dir` argument when running the inferencer: - -```python -result_generator = inferencer(img_path, pred_out_dir='predictions') -result = next(result_generator) -``` - -The predictions will be saved in the `predictions/` folder in JSON format, with each file named after the corresponding input image or video. - -For more advanced scenarios, you can also access the predictions directly from the `result` dictionary returned by the inferencer. The key `'predictions'` contains a list of predicted keypoints for each individual instance in the input image or video. You can then manipulate or store these results using your preferred method. - -Keep in mind that if you want to save both the visualization images and the prediction files in a single folder, you can use the `out_dir` argument: - -```python -result_generator = inferencer(img_path, out_dir='output') -result = next(result_generator) -``` - -In this case, the visualization images will be saved in the `output/visualization/` folder, while the predictions will be stored in the `output/predictions/` folder. - -### Visualization - -The inferencer can automatically draw predictions on input images or videos. Visualization results can be displayed in a new window and saved locally. - -To view the visualization results in a new window, use the following code: - -```python -result_generator = inferencer(img_path, show=True) -result = next(result_generator) -``` - -Notice that: - -- If the input video comes from a webcam, displaying the visualization results in a new window will be enabled by default, allowing users to see the inputs. -- If there is no GUI on the platform, this step may become stuck. - -To save the visualization results locally, specify the `vis_out_dir` argument like this: - -```python -result_generator = inferencer(img_path, vis_out_dir='vis_results') -result = next(result_generator) -``` - -The input images or videos with predicted poses will be saved in the `vis_results/` folder. - -As seen in the above image, the visualization of estimated poses consists of keypoints (depicted by solid circles) and skeletons (represented by lines). The default size of these visual elements might not produce satisfactory results. Users can adjust the circle size and line thickness using the `radius` and `thickness` arguments, as shown below: - -```python -result_generator = inferencer(img_path, show=True, radius=4, thickness=2) -result = next(result_generator) -``` - -### Arguments of Inferencer - -The `MMPoseInferencer` offers a variety of arguments for customizing pose estimation, visualization, and saving predictions. Below is a list of the arguments available when initializing the inferencer and their descriptions: - -| Argument | Description | -| ---------------- | ---------------------------------------------------------------------------------------------------------------- | -| `pose2d` | Specifies the model alias, configuration file name, or configuration file path for the 2D pose estimation model. | -| `pose2d_weights` | Specifies the URL or local path to the 2D pose estimation model's checkpoint file. | -| `pose3d` | Specifies the model alias, configuration file name, or configuration file path for the 3D pose estimation model. | -| `pose3d_weights` | Specifies the URL or local path to the 3D pose estimation model's checkpoint file. | -| `det_model` | Specifies the model alias, configuration file name, or configuration file path for the object detection model. | -| `det_weights` | Specifies the URL or local path to the object detection model's checkpoint file. | -| `det_cat_ids` | Specifies the list of category IDs corresponding to the object classes to be detected. | -| `device` | The device to perform the inference. If left `None`, the Inferencer will select the most suitable one. | -| `scope` | The namespace where the model modules are defined. | - -The inferencer is designed for both visualization and saving predictions. The table below presents the list of arguments available when using the `MMPoseInferencer` for inference, along with their compatibility with 2D and 3D inferencing: - -| Argument | Description | 2D | 3D | -| ------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | --- | --- | -| `show` | Controls the display of the image or video in a pop-up window. | ✔️ | ✔️ | -| `radius` | Sets the visualization keypoint radius. | ✔️ | ✔️ | -| `thickness` | Determines the link thickness for visualization. | ✔️ | ✔️ | -| `kpt_thr` | Sets the keypoint score threshold. Keypoints with scores exceeding this threshold will be displayed. | ✔️ | ✔️ | -| `draw_bbox` | Decides whether to display the bounding boxes of instances. | ✔️ | ✔️ | -| `draw_heatmap` | Decides if the predicted heatmaps should be drawn. | ✔️ | ❌ | -| `black_background` | Decides whether the estimated poses should be displayed on a black background. | ✔️ | ❌ | -| `skeleton_style` | Sets the skeleton style. Options include 'mmpose' (default) and 'openpose'. | ✔️ | ❌ | -| `use_oks_tracking` | Decides whether to use OKS as a similarity measure in tracking. | ❌ | ✔️ | -| `tracking_thr` | Sets the similarity threshold for tracking. | ❌ | ✔️ | -| `norm_pose_2d` | Decides whether to scale the bounding box to the dataset's average bounding box scale and relocate the bounding box to the dataset's average bounding box center. | ❌ | ✔️ | -| `rebase_keypoint_height` | Decides whether to set the lowest keypoint with height 0. | ❌ | ✔️ | -| `return_vis` | Decides whether to include visualization images in the results. | ✔️ | ✔️ | -| `vis_out_dir` | Defines the folder path to save the visualization images. If unset, the visualization images will not be saved. | ✔️ | ✔️ | -| `return_datasample` | Determines if the prediction should be returned in the `PoseDataSample` format. | ✔️ | ✔️ | -| `pred_out_dir` | Specifies the folder path to save the predictions. If unset, the predictions will not be saved. | ✔️ | ✔️ | -| `out_dir` | If `vis_out_dir` or `pred_out_dir` is unset, these will be set to `f'{out_dir}/visualization'` or `f'{out_dir}/predictions'`, respectively. | ✔️ | ✔️ | - -### Model Alias - -The MMPose library has predefined aliases for several frequently used models. These aliases can be utilized as a shortcut when initializing the `MMPoseInferencer`, as an alternative to providing the full model configuration name. Here are the available 2D model aliases and their corresponding configuration names: - -| Alias | Configuration Name | Task | Pose Estimator | Detector | -| --------- | -------------------------------------------------- | ------------------------------- | -------------- | ------------------- | -| animal | rtmpose-m_8xb64-210e_ap10k-256x256 | Animal pose estimation | RTMPose-m | RTMDet-m | -| human | rtmpose-m_8xb256-420e_aic-coco-256x192 | Human pose estimation | RTMPose-m | RTMDet-m | -| face | rtmpose-m_8xb64-60e_wflw-256x256 | Face keypoint detection | RTMPose-m | yolox-s | -| hand | rtmpose-m_8xb32-210e_coco-wholebody-hand-256x256 | Hand keypoint detection | RTMPose-m | ssdlite_mobilenetv2 | -| wholebody | rtmpose-m_8xb64-270e_coco-wholebody-256x192 | Human wholebody pose estimation | RTMPose-m | RTMDet-m | -| vitpose | td-hm_ViTPose-base-simple_8xb64-210e_coco-256x192 | Human pose estimation | ViTPose-base | RTMDet-m | -| vitpose-s | td-hm_ViTPose-small-simple_8xb64-210e_coco-256x192 | Human pose estimation | ViTPose-small | RTMDet-m | -| vitpose-b | td-hm_ViTPose-base-simple_8xb64-210e_coco-256x192 | Human pose estimation | ViTPose-base | RTMDet-m | -| vitpose-l | td-hm_ViTPose-large-simple_8xb64-210e_coco-256x192 | Human pose estimation | ViTPose-large | RTMDet-m | -| vitpose-h | td-hm_ViTPose-huge-simple_8xb64-210e_coco-256x192 | Human pose estimation | ViTPose-huge | RTMDet-m | - -The following table lists the available 3D model aliases and their corresponding configuration names: - -| Alias | Configuration Name | Task | 3D Pose Estimator | 2D Pose Estimator | Detector | -| ------- | --------------------------------------------------------- | ------------------------ | ----------------- | ----------------- | -------- | -| human3d | pose-lift_videopose3d-243frm-supv-cpn-ft_8xb128-200e_h36m | Human 3D pose estimation | VideoPose3D | RTMPose-m | RTMDet-m | - -In addition, users can utilize the CLI tool to display all available aliases with the following command: - -```shell -python demo/inferencer_demo.py --show-alias -``` +# Inference with existing models + +MMPose provides a wide variety of pre-trained models for pose estimation, which can be found in the [Model Zoo](https://mmpose.readthedocs.io/en/latest/model_zoo.html). +This guide will demonstrate **how to perform inference**, or running pose estimation on provided images or videos using trained models. + +For instructions on testing existing models on standard datasets, refer to this [guide](./train_and_test.md#test). + +In MMPose, a model is defined by a configuration file, while its pre-existing parameters are stored in a checkpoint file. You can find the model configuration files and corresponding checkpoint URLs in the [Model Zoo](https://mmpose.readthedocs.io/en/latest/modelzoo.html). We recommend starting with the HRNet model, using [this configuration file](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py) and [this checkpoint file](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192-81c58e40_20220909.pth). + +## Inferencer: a Unified Inference Interface + +MMPose offers a comprehensive API for inference, known as `MMPoseInferencer`. This API enables users to perform inference on both images and videos using all the models supported by MMPose. Furthermore, the API provides automatic visualization of inference results and allows for the convenient saving of predictions. + +### Basic Usage + +The `MMPoseInferencer` can be used in any Python program to perform pose estimation. Below is an example of inference on a given image using the pre-trained human pose estimator within the Python shell. + +```python +from mmpose.apis import MMPoseInferencer + +img_path = 'tests/data/coco/000000000785.jpg' # replace this with your own image path + +# instantiate the inferencer using the model alias +inferencer = MMPoseInferencer('human') + +# The MMPoseInferencer API employs a lazy inference approach, +# creating a prediction generator when given input +result_generator = inferencer(img_path, show=True) +result = next(result_generator) +``` + +If everything works fine, you will see the following image in a new window: +![inferencer_result_coco](https://user-images.githubusercontent.com/26127467/220008302-4a57fd44-0978-408e-8351-600e5513316a.jpg) + +The `result` variable is a dictionary comprising two keys, `'visualization'` and `'predictions'`. + +- `'visualization'` holds a list which: + + - contains visualization results, such as the input image, markers of the estimated poses, and optional predicted heatmaps. + - remains empty if the `return_vis` argument is not specified. + +- `'predictions'` stores: + + - a list of estimated keypoints for each identified instance. + +The structure of the `result` dictionary is as follows: + +```python +result = { + 'visualization': [ + # number of elements: batch_size (defaults to 1) + vis_image_1, + ... + ], + 'predictions': [ + # pose estimation result of each image + # number of elements: batch_size (defaults to 1) + [ + # pose information of each detected instance + # number of elements: number of detected instances + {'keypoints': ..., # instance 1 + 'keypoint_scores': ..., + ... + }, + {'keypoints': ..., # instance 2 + 'keypoint_scores': ..., + ... + }, + ] + ... + ] +} + +``` + +A **command-line interface (CLI)** tool for the inferencer is also available: `demo/inferencer_demo.py`. This tool allows users to perform inference using the same model and inputs with the following command: + +```bash +python demo/inferencer_demo.py 'tests/data/coco/000000000785.jpg' \ + --pose2d 'human' --show --pred-out-dir 'predictions' +``` + +The predictions will be save in `predictions/000000000785.json`. The argument names correspond with the `MMPoseInferencer`, which serves as an API. + +The inferencer is capable of processing a range of input types, which includes the following: + +- A path to an image +- A path to a video +- A path to a folder (which will cause all images in that folder to be inferred) +- An image array (NA for CLI tool) +- A list of image arrays (NA for CLI tool) +- A webcam (in which case the `input` parameter should be set to either `'webcam'` or `'webcam:{CAMERA_ID}'`) + +Please note that when the input corresponds to multiple images, such as when the input is a video or a folder path, the inference process needs to iterate over the results generator in order to perform inference on all the frames or images within the folder. Here's an example in Python: + +```python +folder_path = 'tests/data/coco' + +result_generator = inferencer(folder_path, show=True) +results = [result for result in result_generator] +``` + +In this example, the `inferencer` takes the `folder_path` as input and returns a generator object (`result_generator`) that produces inference results. By iterating over the `result_generator` and storing each result in the `results` list, you can obtain the inference results for all the frames or images within the folder. + +### Custom Pose Estimation Models + +The inferencer provides several methods that can be used to customize the models employed: + +```python + +# build the inferencer with model alias +inferencer = MMPoseInferencer('human') + +# build the inferencer with model config name +inferencer = MMPoseInferencer('td-hm_hrnet-w32_8xb64-210e_coco-256x192') + +# build the inferencer with model config path and checkpoint path/URL +inferencer = MMPoseInferencer( + pose2d='configs/body_2d_keypoint/topdown_heatmap/coco/' \ + 'td-hm_hrnet-w32_8xb64-210e_coco-256x192.py', + pose2d_weights='https://download.openmmlab.com/mmpose/top_down/' \ + 'hrnet/hrnet_w32_coco_256x192-c78dce93_20200708.pth' +) +``` + +The complere list of model alias can be found in the [Model Alias](#model-alias) section. + +**Custom Object Detector for Top-down Pose Estimation Models** + +In addition, top-down pose estimators also require an object detection model. The inferencer is capable of inferring the instance type for models trained with datasets supported in MMPose, and subsequently constructing the necessary object detection model. Alternatively, users may also manually specify the detection model using the following methods: + +```python + +# specify detection model by alias +# the available aliases include 'human', 'hand', 'face', 'animal', +# as well as any additional aliases defined in mmdet +inferencer = MMPoseInferencer( + # suppose the pose estimator is trained on custom dataset + pose2d='custom_human_pose_estimator.py', + pose2d_weights='custom_human_pose_estimator.pth', + det_model='human' +) + +# specify detection model with model config name +inferencer = MMPoseInferencer( + pose2d='human', + det_model='yolox_l_8x8_300e_coco', + det_cat_ids=[0], # the category id of 'human' class +) + +# specify detection model with config path and checkpoint path/URL +inferencer = MMPoseInferencer( + pose2d='human', + det_model=f'{PATH_TO_MMDET}/configs/yolox/yolox_l_8x8_300e_coco.py', + det_weights='https://download.openmmlab.com/mmdetection/v2.0/' \ + 'yolox/yolox_l_8x8_300e_coco/' \ + 'yolox_l_8x8_300e_coco_20211126_140236-d3bd2b23.pth', + det_cat_ids=[0], # the category id of 'human' class +) +``` + +To perform top-down pose estimation on cropped images containing a single object, users can set `det_model='whole_image'`. This bypasses the object detector initialization, creating a bounding box that matches the input image size and directly sending the entire image to the top-down pose estimator. + +### Dump Results + +After performing pose estimation, you might want to save the results for further analysis or processing. This section will guide you through saving the predicted keypoints and visualizations to your local machine. + +To save the predictions in a JSON file, use the `pred_out_dir` argument when running the inferencer: + +```python +result_generator = inferencer(img_path, pred_out_dir='predictions') +result = next(result_generator) +``` + +The predictions will be saved in the `predictions/` folder in JSON format, with each file named after the corresponding input image or video. + +For more advanced scenarios, you can also access the predictions directly from the `result` dictionary returned by the inferencer. The key `'predictions'` contains a list of predicted keypoints for each individual instance in the input image or video. You can then manipulate or store these results using your preferred method. + +Keep in mind that if you want to save both the visualization images and the prediction files in a single folder, you can use the `out_dir` argument: + +```python +result_generator = inferencer(img_path, out_dir='output') +result = next(result_generator) +``` + +In this case, the visualization images will be saved in the `output/visualization/` folder, while the predictions will be stored in the `output/predictions/` folder. + +### Visualization + +The inferencer can automatically draw predictions on input images or videos. Visualization results can be displayed in a new window and saved locally. + +To view the visualization results in a new window, use the following code: + +```python +result_generator = inferencer(img_path, show=True) +result = next(result_generator) +``` + +Notice that: + +- If the input video comes from a webcam, displaying the visualization results in a new window will be enabled by default, allowing users to see the inputs. +- If there is no GUI on the platform, this step may become stuck. + +To save the visualization results locally, specify the `vis_out_dir` argument like this: + +```python +result_generator = inferencer(img_path, vis_out_dir='vis_results') +result = next(result_generator) +``` + +The input images or videos with predicted poses will be saved in the `vis_results/` folder. + +As seen in the above image, the visualization of estimated poses consists of keypoints (depicted by solid circles) and skeletons (represented by lines). The default size of these visual elements might not produce satisfactory results. Users can adjust the circle size and line thickness using the `radius` and `thickness` arguments, as shown below: + +```python +result_generator = inferencer(img_path, show=True, radius=4, thickness=2) +result = next(result_generator) +``` + +### Arguments of Inferencer + +The `MMPoseInferencer` offers a variety of arguments for customizing pose estimation, visualization, and saving predictions. Below is a list of the arguments available when initializing the inferencer and their descriptions: + +| Argument | Description | +| ---------------- | ---------------------------------------------------------------------------------------------------------------- | +| `pose2d` | Specifies the model alias, configuration file name, or configuration file path for the 2D pose estimation model. | +| `pose2d_weights` | Specifies the URL or local path to the 2D pose estimation model's checkpoint file. | +| `pose3d` | Specifies the model alias, configuration file name, or configuration file path for the 3D pose estimation model. | +| `pose3d_weights` | Specifies the URL or local path to the 3D pose estimation model's checkpoint file. | +| `det_model` | Specifies the model alias, configuration file name, or configuration file path for the object detection model. | +| `det_weights` | Specifies the URL or local path to the object detection model's checkpoint file. | +| `det_cat_ids` | Specifies the list of category IDs corresponding to the object classes to be detected. | +| `device` | The device to perform the inference. If left `None`, the Inferencer will select the most suitable one. | +| `scope` | The namespace where the model modules are defined. | + +The inferencer is designed for both visualization and saving predictions. The table below presents the list of arguments available when using the `MMPoseInferencer` for inference, along with their compatibility with 2D and 3D inferencing: + +| Argument | Description | 2D | 3D | +| ------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | --- | --- | +| `show` | Controls the display of the image or video in a pop-up window. | ✔️ | ✔️ | +| `radius` | Sets the visualization keypoint radius. | ✔️ | ✔️ | +| `thickness` | Determines the link thickness for visualization. | ✔️ | ✔️ | +| `kpt_thr` | Sets the keypoint score threshold. Keypoints with scores exceeding this threshold will be displayed. | ✔️ | ✔️ | +| `draw_bbox` | Decides whether to display the bounding boxes of instances. | ✔️ | ✔️ | +| `draw_heatmap` | Decides if the predicted heatmaps should be drawn. | ✔️ | ❌ | +| `black_background` | Decides whether the estimated poses should be displayed on a black background. | ✔️ | ❌ | +| `skeleton_style` | Sets the skeleton style. Options include 'mmpose' (default) and 'openpose'. | ✔️ | ❌ | +| `use_oks_tracking` | Decides whether to use OKS as a similarity measure in tracking. | ❌ | ✔️ | +| `tracking_thr` | Sets the similarity threshold for tracking. | ❌ | ✔️ | +| `norm_pose_2d` | Decides whether to scale the bounding box to the dataset's average bounding box scale and relocate the bounding box to the dataset's average bounding box center. | ❌ | ✔️ | +| `rebase_keypoint_height` | Decides whether to set the lowest keypoint with height 0. | ❌ | ✔️ | +| `return_vis` | Decides whether to include visualization images in the results. | ✔️ | ✔️ | +| `vis_out_dir` | Defines the folder path to save the visualization images. If unset, the visualization images will not be saved. | ✔️ | ✔️ | +| `return_datasample` | Determines if the prediction should be returned in the `PoseDataSample` format. | ✔️ | ✔️ | +| `pred_out_dir` | Specifies the folder path to save the predictions. If unset, the predictions will not be saved. | ✔️ | ✔️ | +| `out_dir` | If `vis_out_dir` or `pred_out_dir` is unset, these will be set to `f'{out_dir}/visualization'` or `f'{out_dir}/predictions'`, respectively. | ✔️ | ✔️ | + +### Model Alias + +The MMPose library has predefined aliases for several frequently used models. These aliases can be utilized as a shortcut when initializing the `MMPoseInferencer`, as an alternative to providing the full model configuration name. Here are the available 2D model aliases and their corresponding configuration names: + +| Alias | Configuration Name | Task | Pose Estimator | Detector | +| --------- | -------------------------------------------------- | ------------------------------- | -------------- | ------------------- | +| animal | rtmpose-m_8xb64-210e_ap10k-256x256 | Animal pose estimation | RTMPose-m | RTMDet-m | +| human | rtmpose-m_8xb256-420e_aic-coco-256x192 | Human pose estimation | RTMPose-m | RTMDet-m | +| face | rtmpose-m_8xb64-60e_wflw-256x256 | Face keypoint detection | RTMPose-m | yolox-s | +| hand | rtmpose-m_8xb32-210e_coco-wholebody-hand-256x256 | Hand keypoint detection | RTMPose-m | ssdlite_mobilenetv2 | +| wholebody | rtmpose-m_8xb64-270e_coco-wholebody-256x192 | Human wholebody pose estimation | RTMPose-m | RTMDet-m | +| vitpose | td-hm_ViTPose-base-simple_8xb64-210e_coco-256x192 | Human pose estimation | ViTPose-base | RTMDet-m | +| vitpose-s | td-hm_ViTPose-small-simple_8xb64-210e_coco-256x192 | Human pose estimation | ViTPose-small | RTMDet-m | +| vitpose-b | td-hm_ViTPose-base-simple_8xb64-210e_coco-256x192 | Human pose estimation | ViTPose-base | RTMDet-m | +| vitpose-l | td-hm_ViTPose-large-simple_8xb64-210e_coco-256x192 | Human pose estimation | ViTPose-large | RTMDet-m | +| vitpose-h | td-hm_ViTPose-huge-simple_8xb64-210e_coco-256x192 | Human pose estimation | ViTPose-huge | RTMDet-m | + +The following table lists the available 3D model aliases and their corresponding configuration names: + +| Alias | Configuration Name | Task | 3D Pose Estimator | 2D Pose Estimator | Detector | +| ------- | --------------------------------------------------------- | ------------------------ | ----------------- | ----------------- | -------- | +| human3d | pose-lift_videopose3d-243frm-supv-cpn-ft_8xb128-200e_h36m | Human 3D pose estimation | VideoPose3D | RTMPose-m | RTMDet-m | + +In addition, users can utilize the CLI tool to display all available aliases with the following command: + +```shell +python demo/inferencer_demo.py --show-alias +``` diff --git a/docs/en/user_guides/mixed_datasets.md b/docs/en/user_guides/mixed_datasets.md index f9bcc93e15..aa18b5e539 100644 --- a/docs/en/user_guides/mixed_datasets.md +++ b/docs/en/user_guides/mixed_datasets.md @@ -1,159 +1,159 @@ -# Use Mixed Datasets for Training - -MMPose offers a convenient and versatile solution for training with mixed datasets through its `CombinedDataset` tool. Acting as a wrapper, it allows for the inclusion of multiple datasets and seamlessly reads and converts data from varying sources into a unified format for model training. The data processing pipeline utilizing `CombinedDataset` is illustrated in the following figure. - -![combined_dataset_pipeline](https://user-images.githubusercontent.com/26127467/223333154-fb88e511-810a-423c-b755-c791d296bc43.jpg) - -The following section will provide a detailed description of how to configure `CombinedDataset` with an example that combines the COCO and AI Challenger (AIC) datasets. - -## COCO & AIC example - -The COCO and AIC datasets are both human 2D pose datasets, but they differ in the number and order of keypoints. Here are two instances from the respective datasets. - -
    - -Some keypoints, such as "left hand", are defined in both datasets, but they have different indices. Specifically, the index for the "left hand" keypoint is 9 in the COCO dataset and 5 in the AIC dataset. Furthermore, each dataset contains unique keypoints that are not present in the counterpart dataset. For instance, the facial keypoints (with indices 0~4) are only defined in the COCO dataset, whereas the "head top" (with index 12) and "neck" (with index 13) keypoints are exclusive to the AIC dataset. The relationship between the keypoints in both datasets is illustrated in the following Venn diagram. - -
    - -Next, we will discuss two methods of mixing datasets. - -- [Merge](#merge-aic-into-coco) -- [Combine](#combine-aic-and-coco) - -### Merge AIC into COCO - -If users aim to enhance their model's performance on the COCO dataset or other similar datasets, they can use the AIC dataset as an auxiliary source. To do so, they should select only the keypoints in AIC dataset that are shared with COCO datasets and ignore the rest. Moreover, the indices of these chosen keypoints in the AIC dataset should be transformed to match the corresponding indices in the COCO dataset. - -
    - -In this scenario, no data conversion is required for the elements from the COCO dataset. To configure the COCO dataset, use the following code: - -```python -dataset_coco = dict( - type='CocoDataset', - data_root='data/coco/', - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=[], # Leave the `pipeline` empty, as no conversion is needed -) -``` - -For AIC dataset, the order of the keypoints needs to be transformed. MMPose provides a `KeypointConverter` transform to achieve this. Here's an example of how to configure the AIC sub dataset: - -```python -dataset_aic = dict( - type='AicDataset', - data_root='data/aic/', - ann_file='annotations/aic_train.json', - data_prefix=dict(img='ai_challenger_keypoint_train_20170902/' - 'keypoint_train_images_20170902/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=17, # same as COCO dataset - mapping=[ # includes index pairs for corresponding keypoints - (0, 6), # index 0 (in AIC) -> index 6 (in COCO) - (1, 8), - (2, 10), - (3, 5), - (4, 7), - (5, 9), - (6, 12), - (7, 14), - (8, 16), - (9, 11), - (10, 13), - (11, 15), - ]) - ], -) -``` - -By using the `KeypointConverter`, the indices of keypoints with indices 0 to 11 will be transformed to corresponding indices among 5 to 16. Meanwhile, the keypoints with indices 12 and 13 will be removed. For the target keypoints with indices 0 to 4, which are not defined in the `mapping` argument, they will be set as invisible and won't be used in training. - -Once the sub datasets are configured, the `CombinedDataset` wrapper can be defined as follows: - -```python -dataset = dict( - type='CombinedDataset', - # Since the combined dataset has the same data format as COCO, - # it should use the same meta information for the dataset - metainfo=dict(from_file='configs/_base_/datasets/coco.py'), - datasets=[dataset_coco, dataset_aic], - # The pipeline includes typical transforms, such as loading the - # image and data augmentation - pipeline=train_pipeline, -) -``` - -A complete, ready-to-use [config file](https://github.com/open-mmlab/mmpose/blob/dev-1.x/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-merge.py) that merges the AIC dataset into the COCO dataset is also available. Users can refer to it for more details and use it as a template to build their own custom dataset. - -### Combine AIC and COCO - -The previously mentioned method discards some annotations in the AIC dataset. If users want to use all the information from both datasets, they can combine the two datasets. This means taking the union set of keypoints in both datasets. - -
    - -In this scenario, both COCO and AIC datasets need to adjust the keypoint indices using `KeypointConverter`: - -```python -dataset_coco = dict( - type='CocoDataset', - data_root='data/coco/', - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=19, # the size of union keypoint set - mapping=[ - (0, 0), - (1, 1), - # omitted - (16, 16), - ]) - ]) - -dataset_aic = dict( - type='AicDataset', - data_root='data/aic/', - ann_file='annotations/aic_train.json', - data_prefix=dict(img='ai_challenger_keypoint_train_20170902/' - 'keypoint_train_images_20170902/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=19, # the size of union keypoint set - mapping=[ - (0, 6), - # omitted - (12, 17), - (13, 18), - ]) - ], -) -``` - -To account for the fact that the combined dataset has 19 keypoints, which is different from either COCO or AIC dataset, a new dataset meta information file is needed to describe the new dataset. An example of such a file is [coco_aic.py](https://github.com/open-mmlab/mmpose/blob/dev-1.x/configs/_base_/datasets/coco_aic.py), which is based on [coco.py](https://github.com/open-mmlab/mmpose/blob/dev-1.x/configs/_base_/datasets/coco.py) but includes several updates: - -- The paper information of AIC dataset has been added. -- The 'head_top' and 'neck' keypoints, which are unique in AIC, have been added to the `keypoint_info`. -- A skeleton link between 'head_top' and 'neck' has been added. -- The `joint_weights` and `sigmas` have been extended for the newly added keypoints. - -Finally, the combined dataset can be configured as: - -```python -dataset = dict( - type='CombinedDataset', - # using new dataset meta information file - metainfo=dict(from_file='configs/_base_/datasets/coco_aic.py'), - datasets=[dataset_coco, dataset_aic], - # The pipeline includes typical transforms, such as loading the - # image and data augmentation - pipeline=train_pipeline, -) -``` - -Additionally, the output channel number of the model should be adjusted as the number of keypoints changes. If the users aim to evaluate the model on the COCO dataset, a subset of model outputs must be chosen. This subset can be customized using the `output_keypoint_indices` argument in `test_cfg`. Users can refer to the [config file](https://github.com/open-mmlab/mmpose/blob/dev-1.x/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-combine.py), which combines the COCO and AIC dataset, for more details and use it as a template to create their custom dataset. +# Use Mixed Datasets for Training + +MMPose offers a convenient and versatile solution for training with mixed datasets through its `CombinedDataset` tool. Acting as a wrapper, it allows for the inclusion of multiple datasets and seamlessly reads and converts data from varying sources into a unified format for model training. The data processing pipeline utilizing `CombinedDataset` is illustrated in the following figure. + +![combined_dataset_pipeline](https://user-images.githubusercontent.com/26127467/223333154-fb88e511-810a-423c-b755-c791d296bc43.jpg) + +The following section will provide a detailed description of how to configure `CombinedDataset` with an example that combines the COCO and AI Challenger (AIC) datasets. + +## COCO & AIC example + +The COCO and AIC datasets are both human 2D pose datasets, but they differ in the number and order of keypoints. Here are two instances from the respective datasets. + +
    + +Some keypoints, such as "left hand", are defined in both datasets, but they have different indices. Specifically, the index for the "left hand" keypoint is 9 in the COCO dataset and 5 in the AIC dataset. Furthermore, each dataset contains unique keypoints that are not present in the counterpart dataset. For instance, the facial keypoints (with indices 0~4) are only defined in the COCO dataset, whereas the "head top" (with index 12) and "neck" (with index 13) keypoints are exclusive to the AIC dataset. The relationship between the keypoints in both datasets is illustrated in the following Venn diagram. + +
    + +Next, we will discuss two methods of mixing datasets. + +- [Merge](#merge-aic-into-coco) +- [Combine](#combine-aic-and-coco) + +### Merge AIC into COCO + +If users aim to enhance their model's performance on the COCO dataset or other similar datasets, they can use the AIC dataset as an auxiliary source. To do so, they should select only the keypoints in AIC dataset that are shared with COCO datasets and ignore the rest. Moreover, the indices of these chosen keypoints in the AIC dataset should be transformed to match the corresponding indices in the COCO dataset. + +
    + +In this scenario, no data conversion is required for the elements from the COCO dataset. To configure the COCO dataset, use the following code: + +```python +dataset_coco = dict( + type='CocoDataset', + data_root='data/coco/', + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=[], # Leave the `pipeline` empty, as no conversion is needed +) +``` + +For AIC dataset, the order of the keypoints needs to be transformed. MMPose provides a `KeypointConverter` transform to achieve this. Here's an example of how to configure the AIC sub dataset: + +```python +dataset_aic = dict( + type='AicDataset', + data_root='data/aic/', + ann_file='annotations/aic_train.json', + data_prefix=dict(img='ai_challenger_keypoint_train_20170902/' + 'keypoint_train_images_20170902/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=17, # same as COCO dataset + mapping=[ # includes index pairs for corresponding keypoints + (0, 6), # index 0 (in AIC) -> index 6 (in COCO) + (1, 8), + (2, 10), + (3, 5), + (4, 7), + (5, 9), + (6, 12), + (7, 14), + (8, 16), + (9, 11), + (10, 13), + (11, 15), + ]) + ], +) +``` + +By using the `KeypointConverter`, the indices of keypoints with indices 0 to 11 will be transformed to corresponding indices among 5 to 16. Meanwhile, the keypoints with indices 12 and 13 will be removed. For the target keypoints with indices 0 to 4, which are not defined in the `mapping` argument, they will be set as invisible and won't be used in training. + +Once the sub datasets are configured, the `CombinedDataset` wrapper can be defined as follows: + +```python +dataset = dict( + type='CombinedDataset', + # Since the combined dataset has the same data format as COCO, + # it should use the same meta information for the dataset + metainfo=dict(from_file='configs/_base_/datasets/coco.py'), + datasets=[dataset_coco, dataset_aic], + # The pipeline includes typical transforms, such as loading the + # image and data augmentation + pipeline=train_pipeline, +) +``` + +A complete, ready-to-use [config file](https://github.com/open-mmlab/mmpose/blob/dev-1.x/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-merge.py) that merges the AIC dataset into the COCO dataset is also available. Users can refer to it for more details and use it as a template to build their own custom dataset. + +### Combine AIC and COCO + +The previously mentioned method discards some annotations in the AIC dataset. If users want to use all the information from both datasets, they can combine the two datasets. This means taking the union set of keypoints in both datasets. + +
    + +In this scenario, both COCO and AIC datasets need to adjust the keypoint indices using `KeypointConverter`: + +```python +dataset_coco = dict( + type='CocoDataset', + data_root='data/coco/', + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=19, # the size of union keypoint set + mapping=[ + (0, 0), + (1, 1), + # omitted + (16, 16), + ]) + ]) + +dataset_aic = dict( + type='AicDataset', + data_root='data/aic/', + ann_file='annotations/aic_train.json', + data_prefix=dict(img='ai_challenger_keypoint_train_20170902/' + 'keypoint_train_images_20170902/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=19, # the size of union keypoint set + mapping=[ + (0, 6), + # omitted + (12, 17), + (13, 18), + ]) + ], +) +``` + +To account for the fact that the combined dataset has 19 keypoints, which is different from either COCO or AIC dataset, a new dataset meta information file is needed to describe the new dataset. An example of such a file is [coco_aic.py](https://github.com/open-mmlab/mmpose/blob/dev-1.x/configs/_base_/datasets/coco_aic.py), which is based on [coco.py](https://github.com/open-mmlab/mmpose/blob/dev-1.x/configs/_base_/datasets/coco.py) but includes several updates: + +- The paper information of AIC dataset has been added. +- The 'head_top' and 'neck' keypoints, which are unique in AIC, have been added to the `keypoint_info`. +- A skeleton link between 'head_top' and 'neck' has been added. +- The `joint_weights` and `sigmas` have been extended for the newly added keypoints. + +Finally, the combined dataset can be configured as: + +```python +dataset = dict( + type='CombinedDataset', + # using new dataset meta information file + metainfo=dict(from_file='configs/_base_/datasets/coco_aic.py'), + datasets=[dataset_coco, dataset_aic], + # The pipeline includes typical transforms, such as loading the + # image and data augmentation + pipeline=train_pipeline, +) +``` + +Additionally, the output channel number of the model should be adjusted as the number of keypoints changes. If the users aim to evaluate the model on the COCO dataset, a subset of model outputs must be chosen. This subset can be customized using the `output_keypoint_indices` argument in `test_cfg`. Users can refer to the [config file](https://github.com/open-mmlab/mmpose/blob/dev-1.x/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-combine.py), which combines the COCO and AIC dataset, for more details and use it as a template to create their custom dataset. diff --git a/docs/en/user_guides/prepare_datasets.md b/docs/en/user_guides/prepare_datasets.md index 2f8ddcbc32..e3f91f0504 100644 --- a/docs/en/user_guides/prepare_datasets.md +++ b/docs/en/user_guides/prepare_datasets.md @@ -1,221 +1,221 @@ -# Prepare Datasets - -In this document, we will give a guide on the process of preparing datasets for the MMPose. Various aspects of dataset preparation will be discussed, including using built-in datasets, creating custom datasets, combining datasets for training, browsing and downloading the datasets. - -## Use built-in datasets - -**Step 1**: Prepare Data - -MMPose supports multiple tasks and corresponding datasets. You can find them in [dataset zoo](https://mmpose.readthedocs.io/en/latest/dataset_zoo.html). To properly prepare your data, please follow the guidelines associated with your chosen dataset. - -**Step 2**: Configure Dataset Settings in the Config File - -Before training or evaluating models, you must configure the dataset settings. Take [`td-hm_hrnet-w32_8xb64-210e_coco-256x192.py`](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py) for example, which can be used to train or evaluate the HRNet pose estimator on COCO dataset. We will go through the dataset configuration. - -- Basic Dataset Arguments - - ```python - # base dataset settings - dataset_type = 'CocoDataset' - data_mode = 'topdown' - data_root = 'data/coco/' - ``` - - - `dataset_type` specifies the class name of the dataset. Users can refer to [Datasets APIs](https://mmpose.readthedocs.io/en/latest/api.html#datasets) to find the class name of their desired dataset. - - `data_mode` determines the output format of the dataset, with two options available: `'topdown'` and `'bottomup'`. If `data_mode='topdown'`, the data element represents a single instance with its pose; otherwise, the data element is an entire image containing multiple instances and poses. - - `data_root` designates the root directory of the dataset. - -- Data Processing Pipelines - - ```python - # pipelines - train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') - ] - val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') - ] - ``` - - The `train_pipeline` and `val_pipeline` define the steps to process data elements during the training and evaluation phases, respectively. In addition to loading images and packing inputs, the `train_pipeline` primarily consists of data augmentation techniques and target generator, while the `val_pipeline` focuses on transforming data elements into a unified format. - -- Data Loaders - - ```python - # data loaders - train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) - val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) - test_dataloader = val_dataloader - ``` - - This section is crucial for configuring the dataset in the config file. In addition to the basic dataset arguments and pipelines discussed earlier, other important parameters are defined here. The `batch_size` determines the batch size per GPU; the `ann_file` indicates the annotation file for the dataset; and `data_prefix` specifies the image folder. The `bbox_file`, which supplies detected bounding box information, is only used in the val/test data loader for top-down datasets. - -We recommend copying the dataset configuration from provided config files that use the same dataset, rather than writing it from scratch, in order to minimize potential errors. By doing so, users can simply make the necessary modifications as needed, ensuring a more reliable and efficient setup process. - -## Use a custom dataset - -The [Customize Datasets](../advanced_guides/customize_datasets.md) guide provides detailed information on how to build a custom dataset. In this section, we will highlight some key tips for using and configuring custom datasets. - -- Determine the dataset class name. If you reorganize your dataset into the COCO format, you can simply use `CocoDataset` as the value for `dataset_type`. Otherwise, you will need to use the name of the custom dataset class you added. - -- Specify the meta information config file. MMPose 1.x employs a different strategy for specifying meta information compared to MMPose 0.x. In MMPose 1.x, users can specify the meta information config file as follows: - - ```python - train_dataloader = dict( - ... - dataset=dict( - type=dataset_type, - data_root='root/of/your/train/data', - ann_file='path/to/your/train/json', - data_prefix=dict(img='path/to/your/train/img'), - # specify dataset meta information - metainfo=dict(from_file='configs/_base_/datasets/custom.py'), - ...), - ) - ``` - - Note that the argument `metainfo` must be specified in the val/test data loaders as well. - -## Use mixed datasets for training - -MMPose offers a convenient and versatile solution for training with mixed datasets. Please refer to [Use Mixed Datasets for Training](./mixed_datasets.md). - -## Browse dataset - -`tools/analysis_tools/browse_dataset.py` helps the user to browse a pose dataset visually, or save the image to a designated directory. - -```shell -python tools/misc/browse_dataset.py ${CONFIG} [-h] [--output-dir ${OUTPUT_DIR}] [--not-show] [--phase ${PHASE}] [--mode ${MODE}] [--show-interval ${SHOW_INTERVAL}] -``` - -| ARGS | Description | -| -------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------- | -| `CONFIG` | The path to the config file. | -| `--output-dir OUTPUT_DIR` | The target folder to save visualization results. If not specified, the visualization results will not be saved. | -| `--not-show` | Do not show the visualization results in an external window. | -| `--phase {train, val, test}` | Options for dataset. | -| `--mode {original, transformed}` | Specify the type of visualized images. `original` means to show images without pre-processing; `transformed` means to show images are pre-processed. | -| `--show-interval SHOW_INTERVAL` | Time interval between visualizing two images. | - -For instance, users who want to visualize images and annotations in COCO dataset use: - -```shell -python tools/misc/browse_dataset.py configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-e210_coco-256x192.py --mode original -``` - -The bounding boxes and keypoints will be plotted on the original image. Following is an example: -![original_coco](https://user-images.githubusercontent.com/26127467/187383698-7e518f21-b4cc-4712-9e97-99ddd8f0e437.jpg) - -The original images need to be processed before being fed into models. To visualize pre-processed images and annotations, users need to modify the argument `mode` to `transformed`. For example: - -```shell -python tools/misc/browse_dataset.py configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-e210_coco-256x192.py --mode transformed -``` - -Here is a processed sample - -![transformed_coco](https://user-images.githubusercontent.com/26127467/187386652-bd47335d-797c-4e8c-b823-2a4915f9812f.jpg) - -The heatmap target will be visualized together if it is generated in the pipeline. - -## Download dataset via MIM - -By using [OpenDataLab](https://opendatalab.com/), you can obtain free formatted datasets in various fields. Through the search function of the platform, you may address the dataset they look for quickly and easily. Using the formatted datasets from the platform, you can efficiently conduct tasks across datasets. - -If you use MIM to download, make sure that the version is greater than v0.3.8. You can use the following command to update, install, login and download the dataset: - -```shell -# upgrade your MIM -pip install -U openmim - -# install OpenDataLab CLI tools -pip install -U opendatalab -# log in OpenDataLab, registry -odl login - -# download coco2017 and preprocess by MIM -mim download mmpose --dataset coco2017 -``` - -### Supported datasets - -Here is the list of supported datasets, we will continue to update it in the future. - -#### Body - -| Dataset name | Download command | -| ------------- | ----------------------------------------- | -| COCO 2017 | `mim download mmpose --dataset coco2017` | -| MPII | `mim download mmpose --dataset mpii` | -| AI Challenger | `mim download mmpose --dataset aic` | -| CrowdPose | `mim download mmpose --dataset crowdpose` | - -#### Face - -| Dataset name | Download command | -| ------------ | ------------------------------------ | -| LaPa | `mim download mmpose --dataset lapa` | -| 300W | `mim download mmpose --dataset 300w` | -| WFLW | `mim download mmpose --dataset wflw` | - -#### Hand - -| Dataset name | Download command | -| ------------ | ------------------------------------------ | -| OneHand10K | `mim download mmpose --dataset onehand10k` | -| FreiHand | `mim download mmpose --dataset freihand` | -| HaGRID | `mim download mmpose --dataset hagrid` | - -#### Whole Body - -| Dataset name | Download command | -| ------------ | ------------------------------------- | -| Halpe | `mim download mmpose --dataset halpe` | - -#### Animal - -| Dataset name | Download command | -| ------------ | ------------------------------------- | -| AP-10K | `mim download mmpose --dataset ap10k` | - -#### Fashion - -Coming Soon +# Prepare Datasets + +In this document, we will give a guide on the process of preparing datasets for the MMPose. Various aspects of dataset preparation will be discussed, including using built-in datasets, creating custom datasets, combining datasets for training, browsing and downloading the datasets. + +## Use built-in datasets + +**Step 1**: Prepare Data + +MMPose supports multiple tasks and corresponding datasets. You can find them in [dataset zoo](https://mmpose.readthedocs.io/en/latest/dataset_zoo.html). To properly prepare your data, please follow the guidelines associated with your chosen dataset. + +**Step 2**: Configure Dataset Settings in the Config File + +Before training or evaluating models, you must configure the dataset settings. Take [`td-hm_hrnet-w32_8xb64-210e_coco-256x192.py`](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py) for example, which can be used to train or evaluate the HRNet pose estimator on COCO dataset. We will go through the dataset configuration. + +- Basic Dataset Arguments + + ```python + # base dataset settings + dataset_type = 'CocoDataset' + data_mode = 'topdown' + data_root = 'data/coco/' + ``` + + - `dataset_type` specifies the class name of the dataset. Users can refer to [Datasets APIs](https://mmpose.readthedocs.io/en/latest/api.html#datasets) to find the class name of their desired dataset. + - `data_mode` determines the output format of the dataset, with two options available: `'topdown'` and `'bottomup'`. If `data_mode='topdown'`, the data element represents a single instance with its pose; otherwise, the data element is an entire image containing multiple instances and poses. + - `data_root` designates the root directory of the dataset. + +- Data Processing Pipelines + + ```python + # pipelines + train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') + ] + val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') + ] + ``` + + The `train_pipeline` and `val_pipeline` define the steps to process data elements during the training and evaluation phases, respectively. In addition to loading images and packing inputs, the `train_pipeline` primarily consists of data augmentation techniques and target generator, while the `val_pipeline` focuses on transforming data elements into a unified format. + +- Data Loaders + + ```python + # data loaders + train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) + val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) + test_dataloader = val_dataloader + ``` + + This section is crucial for configuring the dataset in the config file. In addition to the basic dataset arguments and pipelines discussed earlier, other important parameters are defined here. The `batch_size` determines the batch size per GPU; the `ann_file` indicates the annotation file for the dataset; and `data_prefix` specifies the image folder. The `bbox_file`, which supplies detected bounding box information, is only used in the val/test data loader for top-down datasets. + +We recommend copying the dataset configuration from provided config files that use the same dataset, rather than writing it from scratch, in order to minimize potential errors. By doing so, users can simply make the necessary modifications as needed, ensuring a more reliable and efficient setup process. + +## Use a custom dataset + +The [Customize Datasets](../advanced_guides/customize_datasets.md) guide provides detailed information on how to build a custom dataset. In this section, we will highlight some key tips for using and configuring custom datasets. + +- Determine the dataset class name. If you reorganize your dataset into the COCO format, you can simply use `CocoDataset` as the value for `dataset_type`. Otherwise, you will need to use the name of the custom dataset class you added. + +- Specify the meta information config file. MMPose 1.x employs a different strategy for specifying meta information compared to MMPose 0.x. In MMPose 1.x, users can specify the meta information config file as follows: + + ```python + train_dataloader = dict( + ... + dataset=dict( + type=dataset_type, + data_root='root/of/your/train/data', + ann_file='path/to/your/train/json', + data_prefix=dict(img='path/to/your/train/img'), + # specify dataset meta information + metainfo=dict(from_file='configs/_base_/datasets/custom.py'), + ...), + ) + ``` + + Note that the argument `metainfo` must be specified in the val/test data loaders as well. + +## Use mixed datasets for training + +MMPose offers a convenient and versatile solution for training with mixed datasets. Please refer to [Use Mixed Datasets for Training](./mixed_datasets.md). + +## Browse dataset + +`tools/analysis_tools/browse_dataset.py` helps the user to browse a pose dataset visually, or save the image to a designated directory. + +```shell +python tools/misc/browse_dataset.py ${CONFIG} [-h] [--output-dir ${OUTPUT_DIR}] [--not-show] [--phase ${PHASE}] [--mode ${MODE}] [--show-interval ${SHOW_INTERVAL}] +``` + +| ARGS | Description | +| -------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------- | +| `CONFIG` | The path to the config file. | +| `--output-dir OUTPUT_DIR` | The target folder to save visualization results. If not specified, the visualization results will not be saved. | +| `--not-show` | Do not show the visualization results in an external window. | +| `--phase {train, val, test}` | Options for dataset. | +| `--mode {original, transformed}` | Specify the type of visualized images. `original` means to show images without pre-processing; `transformed` means to show images are pre-processed. | +| `--show-interval SHOW_INTERVAL` | Time interval between visualizing two images. | + +For instance, users who want to visualize images and annotations in COCO dataset use: + +```shell +python tools/misc/browse_dataset.py configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-e210_coco-256x192.py --mode original +``` + +The bounding boxes and keypoints will be plotted on the original image. Following is an example: +![original_coco](https://user-images.githubusercontent.com/26127467/187383698-7e518f21-b4cc-4712-9e97-99ddd8f0e437.jpg) + +The original images need to be processed before being fed into models. To visualize pre-processed images and annotations, users need to modify the argument `mode` to `transformed`. For example: + +```shell +python tools/misc/browse_dataset.py configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-e210_coco-256x192.py --mode transformed +``` + +Here is a processed sample + +![transformed_coco](https://user-images.githubusercontent.com/26127467/187386652-bd47335d-797c-4e8c-b823-2a4915f9812f.jpg) + +The heatmap target will be visualized together if it is generated in the pipeline. + +## Download dataset via MIM + +By using [OpenDataLab](https://opendatalab.com/), you can obtain free formatted datasets in various fields. Through the search function of the platform, you may address the dataset they look for quickly and easily. Using the formatted datasets from the platform, you can efficiently conduct tasks across datasets. + +If you use MIM to download, make sure that the version is greater than v0.3.8. You can use the following command to update, install, login and download the dataset: + +```shell +# upgrade your MIM +pip install -U openmim + +# install OpenDataLab CLI tools +pip install -U opendatalab +# log in OpenDataLab, registry +odl login + +# download coco2017 and preprocess by MIM +mim download mmpose --dataset coco2017 +``` + +### Supported datasets + +Here is the list of supported datasets, we will continue to update it in the future. + +#### Body + +| Dataset name | Download command | +| ------------- | ----------------------------------------- | +| COCO 2017 | `mim download mmpose --dataset coco2017` | +| MPII | `mim download mmpose --dataset mpii` | +| AI Challenger | `mim download mmpose --dataset aic` | +| CrowdPose | `mim download mmpose --dataset crowdpose` | + +#### Face + +| Dataset name | Download command | +| ------------ | ------------------------------------ | +| LaPa | `mim download mmpose --dataset lapa` | +| 300W | `mim download mmpose --dataset 300w` | +| WFLW | `mim download mmpose --dataset wflw` | + +#### Hand + +| Dataset name | Download command | +| ------------ | ------------------------------------------ | +| OneHand10K | `mim download mmpose --dataset onehand10k` | +| FreiHand | `mim download mmpose --dataset freihand` | +| HaGRID | `mim download mmpose --dataset hagrid` | + +#### Whole Body + +| Dataset name | Download command | +| ------------ | ------------------------------------- | +| Halpe | `mim download mmpose --dataset halpe` | + +#### Animal + +| Dataset name | Download command | +| ------------ | ------------------------------------- | +| AP-10K | `mim download mmpose --dataset ap10k` | + +#### Fashion + +Coming Soon diff --git a/docs/en/user_guides/train_and_test.md b/docs/en/user_guides/train_and_test.md index 6bcc88fc3b..ef317ae321 100644 --- a/docs/en/user_guides/train_and_test.md +++ b/docs/en/user_guides/train_and_test.md @@ -1,369 +1,369 @@ -# Training and Testing - -## Launch training - -### Train with your PC - -You can use `tools/train.py` to train a model on a single machine with a CPU and optionally a GPU. - -Here is the full usage of the script: - -```shell -python tools/train.py ${CONFIG_FILE} [ARGS] -``` - -```{note} -By default, MMPose prefers GPU to CPU. If you want to train a model on CPU, please empty `CUDA_VISIBLE_DEVICES` or set it to -1 to make GPU invisible to the program. - -``` - -```shell -CUDA_VISIBLE_DEVICES=-1 python tools/train.py ${CONFIG_FILE} [ARGS] -``` - -| ARGS | Description | -| ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `CONFIG_FILE` | The path to the config file. | -| `--work-dir WORK_DIR` | The target folder to save logs and checkpoints. Defaults to a folder with the same name as the config file under `./work_dirs`. | -| `--resume [RESUME]` | Resume training. If specify a path, resume from it, while if not specify, try to auto resume from the latest checkpoint. | -| `--amp` | Enable automatic-mixed-precision training. | -| `--no-validate` | **Not suggested**. Disable checkpoint evaluation during training. | -| `--auto-scale-lr` | Automatically rescale the learning rate according to the actual batch size and the original batch size. | -| `--cfg-options CFG_OPTIONS` | Override some settings in the used config, the key-value pair in xxx=yyy format will be merged into the config file. If the value to be overwritten is a list, it should be of the form of either `key="[a,b]"` or `key=a,b`. The argument also allows nested list/tuple values, e.g. `key="[(a,b),(c,d)]"`. Note that quotation marks are necessary and that **no white space is allowed**. | -| `--show-dir SHOW_DIR` | The directory to save the result visualization images generated during validation. | -| `--show` | Visualize the prediction result in a window. | -| `--interval INTERVAL` | The interval of samples to visualize. | -| `--wait-time WAIT_TIME` | The display time of every window (in seconds). Defaults to 1. | -| `--launcher {none,pytorch,slurm,mpi}` | Options for job launcher. | - -### Train with multiple GPUs - -We provide a shell script to start a multi-GPUs task with `torch.distributed.launch`. - -```shell -bash ./tools/dist_train.sh ${CONFIG_FILE} ${GPU_NUM} [PY_ARGS] -``` - -| ARGS | Description | -| ------------- | ---------------------------------------------------------------------------------- | -| `CONFIG_FILE` | The path to the config file. | -| `GPU_NUM` | The number of GPUs to be used. | -| `[PYARGS]` | The other optional arguments of `tools/train.py`, see [here](#train-with-your-pc). | - -You can also specify extra arguments of the launcher by environment variables. For example, change the -communication port of the launcher to 29666 by the below command: - -```shell -PORT=29666 bash ./tools/dist_train.sh ${CONFIG_FILE} ${GPU_NUM} [PY_ARGS] -``` - -If you want to startup multiple training jobs and use different GPUs, you can launch them by specifying -different port and visible devices. - -```shell -CUDA_VISIBLE_DEVICES=0,1,2,3 PORT=29500 bash ./tools/dist_train.sh ${CONFIG_FILE1} 4 [PY_ARGS] -CUDA_VISIBLE_DEVICES=4,5,6,7 GPUS=29501 bash ./tools/dist_train.sh ${CONFIG_FILE2} 4 [PY_ARGS] -``` - -### Train with multiple machines - -#### Multiple machines in the same network - -If you launch a training job with multiple machines connected with ethernet, you can run the following commands: - -On the first machine: - -```shell -NNODES=2 NODE_RANK=0 PORT=$MASTER_PORT MASTER_ADDR=$MASTER_ADDR bash tools/dist_train.sh $CONFIG $GPUS -``` - -On the second machine: - -```shell -NNODES=2 NODE_RANK=1 PORT=$MASTER_PORT MASTER_ADDR=$MASTER_ADDR bash tools/dist_train.sh $CONFIG $GPUS -``` - -Compared with multi-GPUs in a single machine, you need to specify some extra environment variables: - -| ENV_VARS | Description | -| ------------- | ---------------------------------------------------------------------------- | -| `NNODES` | The total number of machines. | -| `NODE_RANK` | The index of the local machine. | -| `PORT` | The communication port, it should be the same in all machines. | -| `MASTER_ADDR` | The IP address of the master machine, it should be the same in all machines. | - -Usually, it is slow if you do not have high-speed networking like InfiniBand. - -#### Multiple machines managed with slurm - -If you run MMPose on a cluster managed with [slurm](https://slurm.schedmd.com/), you can use the script `slurm_train.sh`. - -```shell -[ENV_VARS] ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} ${CONFIG_FILE} ${WORK_DIR} [PY_ARGS] -``` - -Here are the arguments description of the script. - -| ARGS | Description | -| ------------- | ---------------------------------------------------------------------------------- | -| `PARTITION` | The partition to use in your cluster. | -| `JOB_NAME` | The name of your job, you can name it as you like. | -| `CONFIG_FILE` | The path to the config file. | -| `WORK_DIR` | The target folder to save logs and checkpoints. | -| `[PYARGS]` | The other optional arguments of `tools/train.py`, see [here](#train-with-your-pc). | - -Here are the environment variables that can be used to configure the slurm job. - -| ENV_VARS | Description | -| --------------- | ---------------------------------------------------------------------------------------------------------- | -| `GPUS` | The total number of GPUs to be used. Defaults to 8. | -| `GPUS_PER_NODE` | The number of GPUs to be allocated per node. Defaults to 8. | -| `CPUS_PER_TASK` | The number of CPUs to be allocated per task (Usually one GPU corresponds to one task). Defaults to 5. | -| `SRUN_ARGS` | The other arguments of `srun`. Available options can be found [here](https://slurm.schedmd.com/srun.html). | - -## Resume training - -Resume training means to continue training from the state saved from one of the previous trainings, where the state includes the model weights, the state of the optimizer and the optimizer parameter adjustment strategy. - -### Automatically resume training - -Users can add `--resume` to the end of the training command to resume training. The program will automatically load the latest weight file from `work_dirs` to resume training. If there is a latest `checkpoint` in `work_dirs` (e.g. the training was interrupted during the previous training), the training will be resumed from the `checkpoint`. Otherwise (e.g. the previous training did not save `checkpoint` in time or a new training task was started), the training will be restarted. - -Here is an example of resuming training: - -```shell -python tools/train.py configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-256x192.py --resume -``` - -### Specify the checkpoint to resume training - -You can also specify the `checkpoint` path for `--resume`. MMPose will automatically read the `checkpoint` and resume training from it. The command is as follows: - -```shell -python tools/train.py configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-256x192.py \ - --resume work_dirs/td-hm_res50_8xb64-210e_coco-256x192/latest.pth -``` - -If you hope to manually specify the `checkpoint` path in the config file, in addition to setting `resume=True`, you also need to set the `load_from`. - -It should be noted that if only `load_from` is set without setting `resume=True`, only the weights in the `checkpoint` will be loaded and the training will be restarted from scratch, instead of continuing from the previous state. - -The following example is equivalent to the example above that specifies the `--resume` parameter: - -```python -resume = True -load_from = 'work_dirs/td-hm_res50_8xb64-210e_coco-256x192/latest.pth' -# model settings -model = dict( - ## omitted ## - ) -``` - -## Freeze partial parameters during training - -In some scenarios, it might be desirable to freeze certain parameters of a model during training to fine-tune specific parts or to prevent overfitting. In MMPose, you can set different hyperparameters for any module in the model by setting custom_keys in `paramwise_cfg`. This allows you to control the learning rate and decay coefficient for specific parts of the model. - -For example, if you want to freeze the parameters in `backbone.layer0` and `backbone.layer1`, you can modify the optimizer wrapper in the config file as: - -```python -optim_wrapper = dict( - optimizer=dict(...), - paramwise_cfg=dict( - custom_keys={ - 'backbone.layer0': dict(lr_mult=0, decay_mult=0), - 'backbone.layer0': dict(lr_mult=0, decay_mult=0), - })) -``` - -This configuration will freeze the parameters in `backbone.layer0` and `backbone.layer1` by setting their learning rate and decay coefficient to 0. By using this approach, you can effectively control the training process and fine-tune specific parts of your model as needed. - -## Automatic Mixed Precision (AMP) training - -Mixed precision training can reduce training time and storage requirements without changing the model or reducing the model training accuracy, thus supporting larger batch sizes, larger models, and larger input sizes. - -To enable Automatic Mixing Precision (AMP) training, add `--amp` to the end of the training command, which is as follows: - -```shell -python tools/train.py ${CONFIG_FILE} --amp -``` - -Specific examples are as follows: - -```shell -python tools/train.py configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-256x192.py --amp -``` - -## Set the random seed - -If you want to specify the random seed during training, you can use the following command: - -```shell -python ./tools/train.py \ - ${CONFIG} \ # config file - --cfg-options randomness.seed=2023 \ # set the random seed = 2023 - [randomness.diff_rank_seed=True] \ # Set different seeds according to rank. - [randomness.deterministic=True] # Set the cuDNN backend deterministic option to True -# `[]` stands for optional parameters, when actually entering the command line, you do not need to enter `[]` -``` - -`randomness` has three parameters that can be set, with the following meanings. - -- `randomness.seed=2023`, set the random seed to `2023`. - -- `randomness.diff_rank_seed=True`, set different seeds according to global `rank`. Defaults to `False`. - -- `randomness.deterministic=True`, set the deterministic option for `cuDNN` backend, i.e., set `torch.backends.cudnn.deterministic` to `True` and `torch.backends.cudnn.benchmark` to `False`. Defaults to `False`. See [Pytorch Randomness](https://pytorch.org/docs/stable/notes/randomness.html) for more details. - -## Visualize training process - -Monitoring the training process is essential for understanding the performance of your model and making necessary adjustments. In this section, we will introduce two methods to visualize the training process of your MMPose model: TensorBoard and the MMEngine Visualizer. - -### TensorBoard - -TensorBoard is a powerful tool that allows you to visualize the changes in losses during training. To enable TensorBoard visualization, you may need to: - -1. Install TensorBoard environment - - ```shell - pip install tensorboard - ``` - -2. Enable TensorBoard in the config file - - ```python - visualizer = dict(vis_backends=[ - dict(type='LocalVisBackend'), - dict(type='TensorboardVisBackend'), - ]) - ``` - -The event file generated by TensorBoard will be save under the experiment log folder `${WORK_DIR}`, which defaults to `work_dir/${CONFIG}` or can be specified using the `--work-dir` option. To visualize the training process, use the following command: - -```shell -tensorboard --logdir ${WORK_DIR}/${TIMESTAMP}/vis_data -``` - -### MMEngine visualizer - -MMPose also supports visualizing model inference results during validation. To activate this function, please use the `--show` option or set `--show-dir` when launching training. This feature provides an effective way to analyze the model's performance on specific examples and make any necessary adjustments. - -## Test your model - -### Test with your PC - -You can use `tools/test.py` to test a model on a single machine with a CPU and optionally a GPU. - -Here is the full usage of the script: - -```shell -python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [ARGS] -``` - -```{note} -By default, MMPose prefers GPU to CPU. If you want to test a model on CPU, please empty `CUDA_VISIBLE_DEVICES` or set it to -1 to make GPU invisible to the program. - -``` - -```shell -CUDA_VISIBLE_DEVICES=-1 python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [ARGS] -``` - -| ARGS | Description | -| ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `CONFIG_FILE` | The path to the config file. | -| `CHECKPOINT_FILE` | The path to the checkpoint file (It can be a http link, and you can find checkpoints [here](https://MMPose.readthedocs.io/en/latest/model_zoo.html)). | -| `--work-dir WORK_DIR` | The directory to save the file containing evaluation metrics. | -| `--out OUT` | The path to save the file containing evaluation metrics. | -| `--dump DUMP` | The path to dump all outputs of the model for offline evaluation. | -| `--cfg-options CFG_OPTIONS` | Override some settings in the used config, the key-value pair in xxx=yyy format will be merged into the config file. If the value to be overwritten is a list, it should be of the form of either `key="[a,b]"` or `key=a,b`. The argument also allows nested list/tuple values, e.g. `key="[(a,b),(c,d)]"`. Note that quotation marks are necessary and that no white space is allowed. | -| `--show-dir SHOW_DIR` | The directory to save the result visualization images. | -| `--show` | Visualize the prediction result in a window. | -| `--interval INTERVAL` | The interval of samples to visualize. | -| `--wait-time WAIT_TIME` | The display time of every window (in seconds). Defaults to 1. | -| `--launcher {none,pytorch,slurm,mpi}` | Options for job launcher. | - -### Test with multiple GPUs - -We provide a shell script to start a multi-GPUs task with `torch.distributed.launch`. - -```shell -bash ./tools/dist_test.sh ${CONFIG_FILE} ${CHECKPOINT_FILE} ${GPU_NUM} [PY_ARGS] -``` - -| ARGS | Description | -| ----------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------- | -| `CONFIG_FILE` | The path to the config file. | -| `CHECKPOINT_FILE` | The path to the checkpoint file (It can be a http link, and you can find checkpoints [here](https://mmpose.readthedocs.io/en/latest/model_zoo.html)). | -| `GPU_NUM` | The number of GPUs to be used. | -| `[PYARGS]` | The other optional arguments of `tools/test.py`, see [here](#test-with-your-pc). | - -You can also specify extra arguments of the launcher by environment variables. For example, change the -communication port of the launcher to 29666 by the below command: - -```shell -PORT=29666 bash ./tools/dist_test.sh ${CONFIG_FILE} ${CHECKPOINT_FILE} ${GPU_NUM} [PY_ARGS] -``` - -If you want to startup multiple test jobs and use different GPUs, you can launch them by specifying -different port and visible devices. - -```shell -CUDA_VISIBLE_DEVICES=0,1,2,3 PORT=29500 bash ./tools/dist_test.sh ${CONFIG_FILE1} ${CHECKPOINT_FILE} 4 [PY_ARGS] -CUDA_VISIBLE_DEVICES=4,5,6,7 PORT=29501 bash ./tools/dist_test.sh ${CONFIG_FILE2} ${CHECKPOINT_FILE} 4 [PY_ARGS] -``` - -### Test with multiple machines - -#### Multiple machines in the same network - -If you launch a test job with multiple machines connected with ethernet, you can run the following commands: - -On the first machine: - -```shell -NNODES=2 NODE_RANK=0 PORT=$MASTER_PORT MASTER_ADDR=$MASTER_ADDR bash tools/dist_test.sh $CONFIG $CHECKPOINT_FILE $GPUS -``` - -On the second machine: - -```shell -NNODES=2 NODE_RANK=1 PORT=$MASTER_PORT MASTER_ADDR=$MASTER_ADDR bash tools/dist_test.sh $CONFIG $CHECKPOINT_FILE $GPUS -``` - -Compared with multi-GPUs in a single machine, you need to specify some extra environment variables: - -| ENV_VARS | Description | -| ------------- | ---------------------------------------------------------------------------- | -| `NNODES` | The total number of machines. | -| `NODE_RANK` | The index of the local machine. | -| `PORT` | The communication port, it should be the same in all machines. | -| `MASTER_ADDR` | The IP address of the master machine, it should be the same in all machines. | - -Usually, it is slow if you do not have high-speed networking like InfiniBand. - -#### Multiple machines managed with slurm - -If you run MMPose on a cluster managed with [slurm](https://slurm.schedmd.com/), you can use the script `slurm_test.sh`. - -```shell -[ENV_VARS] ./tools/slurm_test.sh ${PARTITION} ${JOB_NAME} ${CONFIG_FILE} ${CHECKPOINT_FILE} [PY_ARGS] -``` - -Here are the argument descriptions of the script. - -| ARGS | Description | -| ----------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------- | -| `PARTITION` | The partition to use in your cluster. | -| `JOB_NAME` | The name of your job, you can name it as you like. | -| `CONFIG_FILE` | The path to the config file. | -| `CHECKPOINT_FILE` | The path to the checkpoint file (It can be a http link, and you can find checkpoints [here](https://MMPose.readthedocs.io/en/latest/model_zoo.html)). | -| `[PYARGS]` | The other optional arguments of `tools/test.py`, see [here](#test-with-your-pc). | - -Here are the environment variables that can be used to configure the slurm job. - -| ENV_VARS | Description | -| --------------- | ---------------------------------------------------------------------------------------------------------- | -| `GPUS` | The total number of GPUs to be used. Defaults to 8. | -| `GPUS_PER_NODE` | The number of GPUs to be allocated per node. Defaults to 8. | -| `CPUS_PER_TASK` | The number of CPUs to be allocated per task (Usually one GPU corresponds to one task). Defaults to 5. | -| `SRUN_ARGS` | The other arguments of `srun`. Available options can be found [here](https://slurm.schedmd.com/srun.html). | +# Training and Testing + +## Launch training + +### Train with your PC + +You can use `tools/train.py` to train a model on a single machine with a CPU and optionally a GPU. + +Here is the full usage of the script: + +```shell +python tools/train.py ${CONFIG_FILE} [ARGS] +``` + +```{note} +By default, MMPose prefers GPU to CPU. If you want to train a model on CPU, please empty `CUDA_VISIBLE_DEVICES` or set it to -1 to make GPU invisible to the program. + +``` + +```shell +CUDA_VISIBLE_DEVICES=-1 python tools/train.py ${CONFIG_FILE} [ARGS] +``` + +| ARGS | Description | +| ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `CONFIG_FILE` | The path to the config file. | +| `--work-dir WORK_DIR` | The target folder to save logs and checkpoints. Defaults to a folder with the same name as the config file under `./work_dirs`. | +| `--resume [RESUME]` | Resume training. If specify a path, resume from it, while if not specify, try to auto resume from the latest checkpoint. | +| `--amp` | Enable automatic-mixed-precision training. | +| `--no-validate` | **Not suggested**. Disable checkpoint evaluation during training. | +| `--auto-scale-lr` | Automatically rescale the learning rate according to the actual batch size and the original batch size. | +| `--cfg-options CFG_OPTIONS` | Override some settings in the used config, the key-value pair in xxx=yyy format will be merged into the config file. If the value to be overwritten is a list, it should be of the form of either `key="[a,b]"` or `key=a,b`. The argument also allows nested list/tuple values, e.g. `key="[(a,b),(c,d)]"`. Note that quotation marks are necessary and that **no white space is allowed**. | +| `--show-dir SHOW_DIR` | The directory to save the result visualization images generated during validation. | +| `--show` | Visualize the prediction result in a window. | +| `--interval INTERVAL` | The interval of samples to visualize. | +| `--wait-time WAIT_TIME` | The display time of every window (in seconds). Defaults to 1. | +| `--launcher {none,pytorch,slurm,mpi}` | Options for job launcher. | + +### Train with multiple GPUs + +We provide a shell script to start a multi-GPUs task with `torch.distributed.launch`. + +```shell +bash ./tools/dist_train.sh ${CONFIG_FILE} ${GPU_NUM} [PY_ARGS] +``` + +| ARGS | Description | +| ------------- | ---------------------------------------------------------------------------------- | +| `CONFIG_FILE` | The path to the config file. | +| `GPU_NUM` | The number of GPUs to be used. | +| `[PYARGS]` | The other optional arguments of `tools/train.py`, see [here](#train-with-your-pc). | + +You can also specify extra arguments of the launcher by environment variables. For example, change the +communication port of the launcher to 29666 by the below command: + +```shell +PORT=29666 bash ./tools/dist_train.sh ${CONFIG_FILE} ${GPU_NUM} [PY_ARGS] +``` + +If you want to startup multiple training jobs and use different GPUs, you can launch them by specifying +different port and visible devices. + +```shell +CUDA_VISIBLE_DEVICES=0,1,2,3 PORT=29500 bash ./tools/dist_train.sh ${CONFIG_FILE1} 4 [PY_ARGS] +CUDA_VISIBLE_DEVICES=4,5,6,7 GPUS=29501 bash ./tools/dist_train.sh ${CONFIG_FILE2} 4 [PY_ARGS] +``` + +### Train with multiple machines + +#### Multiple machines in the same network + +If you launch a training job with multiple machines connected with ethernet, you can run the following commands: + +On the first machine: + +```shell +NNODES=2 NODE_RANK=0 PORT=$MASTER_PORT MASTER_ADDR=$MASTER_ADDR bash tools/dist_train.sh $CONFIG $GPUS +``` + +On the second machine: + +```shell +NNODES=2 NODE_RANK=1 PORT=$MASTER_PORT MASTER_ADDR=$MASTER_ADDR bash tools/dist_train.sh $CONFIG $GPUS +``` + +Compared with multi-GPUs in a single machine, you need to specify some extra environment variables: + +| ENV_VARS | Description | +| ------------- | ---------------------------------------------------------------------------- | +| `NNODES` | The total number of machines. | +| `NODE_RANK` | The index of the local machine. | +| `PORT` | The communication port, it should be the same in all machines. | +| `MASTER_ADDR` | The IP address of the master machine, it should be the same in all machines. | + +Usually, it is slow if you do not have high-speed networking like InfiniBand. + +#### Multiple machines managed with slurm + +If you run MMPose on a cluster managed with [slurm](https://slurm.schedmd.com/), you can use the script `slurm_train.sh`. + +```shell +[ENV_VARS] ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} ${CONFIG_FILE} ${WORK_DIR} [PY_ARGS] +``` + +Here are the arguments description of the script. + +| ARGS | Description | +| ------------- | ---------------------------------------------------------------------------------- | +| `PARTITION` | The partition to use in your cluster. | +| `JOB_NAME` | The name of your job, you can name it as you like. | +| `CONFIG_FILE` | The path to the config file. | +| `WORK_DIR` | The target folder to save logs and checkpoints. | +| `[PYARGS]` | The other optional arguments of `tools/train.py`, see [here](#train-with-your-pc). | + +Here are the environment variables that can be used to configure the slurm job. + +| ENV_VARS | Description | +| --------------- | ---------------------------------------------------------------------------------------------------------- | +| `GPUS` | The total number of GPUs to be used. Defaults to 8. | +| `GPUS_PER_NODE` | The number of GPUs to be allocated per node. Defaults to 8. | +| `CPUS_PER_TASK` | The number of CPUs to be allocated per task (Usually one GPU corresponds to one task). Defaults to 5. | +| `SRUN_ARGS` | The other arguments of `srun`. Available options can be found [here](https://slurm.schedmd.com/srun.html). | + +## Resume training + +Resume training means to continue training from the state saved from one of the previous trainings, where the state includes the model weights, the state of the optimizer and the optimizer parameter adjustment strategy. + +### Automatically resume training + +Users can add `--resume` to the end of the training command to resume training. The program will automatically load the latest weight file from `work_dirs` to resume training. If there is a latest `checkpoint` in `work_dirs` (e.g. the training was interrupted during the previous training), the training will be resumed from the `checkpoint`. Otherwise (e.g. the previous training did not save `checkpoint` in time or a new training task was started), the training will be restarted. + +Here is an example of resuming training: + +```shell +python tools/train.py configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-256x192.py --resume +``` + +### Specify the checkpoint to resume training + +You can also specify the `checkpoint` path for `--resume`. MMPose will automatically read the `checkpoint` and resume training from it. The command is as follows: + +```shell +python tools/train.py configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-256x192.py \ + --resume work_dirs/td-hm_res50_8xb64-210e_coco-256x192/latest.pth +``` + +If you hope to manually specify the `checkpoint` path in the config file, in addition to setting `resume=True`, you also need to set the `load_from`. + +It should be noted that if only `load_from` is set without setting `resume=True`, only the weights in the `checkpoint` will be loaded and the training will be restarted from scratch, instead of continuing from the previous state. + +The following example is equivalent to the example above that specifies the `--resume` parameter: + +```python +resume = True +load_from = 'work_dirs/td-hm_res50_8xb64-210e_coco-256x192/latest.pth' +# model settings +model = dict( + ## omitted ## + ) +``` + +## Freeze partial parameters during training + +In some scenarios, it might be desirable to freeze certain parameters of a model during training to fine-tune specific parts or to prevent overfitting. In MMPose, you can set different hyperparameters for any module in the model by setting custom_keys in `paramwise_cfg`. This allows you to control the learning rate and decay coefficient for specific parts of the model. + +For example, if you want to freeze the parameters in `backbone.layer0` and `backbone.layer1`, you can modify the optimizer wrapper in the config file as: + +```python +optim_wrapper = dict( + optimizer=dict(...), + paramwise_cfg=dict( + custom_keys={ + 'backbone.layer0': dict(lr_mult=0, decay_mult=0), + 'backbone.layer0': dict(lr_mult=0, decay_mult=0), + })) +``` + +This configuration will freeze the parameters in `backbone.layer0` and `backbone.layer1` by setting their learning rate and decay coefficient to 0. By using this approach, you can effectively control the training process and fine-tune specific parts of your model as needed. + +## Automatic Mixed Precision (AMP) training + +Mixed precision training can reduce training time and storage requirements without changing the model or reducing the model training accuracy, thus supporting larger batch sizes, larger models, and larger input sizes. + +To enable Automatic Mixing Precision (AMP) training, add `--amp` to the end of the training command, which is as follows: + +```shell +python tools/train.py ${CONFIG_FILE} --amp +``` + +Specific examples are as follows: + +```shell +python tools/train.py configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-256x192.py --amp +``` + +## Set the random seed + +If you want to specify the random seed during training, you can use the following command: + +```shell +python ./tools/train.py \ + ${CONFIG} \ # config file + --cfg-options randomness.seed=2023 \ # set the random seed = 2023 + [randomness.diff_rank_seed=True] \ # Set different seeds according to rank. + [randomness.deterministic=True] # Set the cuDNN backend deterministic option to True +# `[]` stands for optional parameters, when actually entering the command line, you do not need to enter `[]` +``` + +`randomness` has three parameters that can be set, with the following meanings. + +- `randomness.seed=2023`, set the random seed to `2023`. + +- `randomness.diff_rank_seed=True`, set different seeds according to global `rank`. Defaults to `False`. + +- `randomness.deterministic=True`, set the deterministic option for `cuDNN` backend, i.e., set `torch.backends.cudnn.deterministic` to `True` and `torch.backends.cudnn.benchmark` to `False`. Defaults to `False`. See [Pytorch Randomness](https://pytorch.org/docs/stable/notes/randomness.html) for more details. + +## Visualize training process + +Monitoring the training process is essential for understanding the performance of your model and making necessary adjustments. In this section, we will introduce two methods to visualize the training process of your MMPose model: TensorBoard and the MMEngine Visualizer. + +### TensorBoard + +TensorBoard is a powerful tool that allows you to visualize the changes in losses during training. To enable TensorBoard visualization, you may need to: + +1. Install TensorBoard environment + + ```shell + pip install tensorboard + ``` + +2. Enable TensorBoard in the config file + + ```python + visualizer = dict(vis_backends=[ + dict(type='LocalVisBackend'), + dict(type='TensorboardVisBackend'), + ]) + ``` + +The event file generated by TensorBoard will be save under the experiment log folder `${WORK_DIR}`, which defaults to `work_dir/${CONFIG}` or can be specified using the `--work-dir` option. To visualize the training process, use the following command: + +```shell +tensorboard --logdir ${WORK_DIR}/${TIMESTAMP}/vis_data +``` + +### MMEngine visualizer + +MMPose also supports visualizing model inference results during validation. To activate this function, please use the `--show` option or set `--show-dir` when launching training. This feature provides an effective way to analyze the model's performance on specific examples and make any necessary adjustments. + +## Test your model + +### Test with your PC + +You can use `tools/test.py` to test a model on a single machine with a CPU and optionally a GPU. + +Here is the full usage of the script: + +```shell +python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [ARGS] +``` + +```{note} +By default, MMPose prefers GPU to CPU. If you want to test a model on CPU, please empty `CUDA_VISIBLE_DEVICES` or set it to -1 to make GPU invisible to the program. + +``` + +```shell +CUDA_VISIBLE_DEVICES=-1 python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [ARGS] +``` + +| ARGS | Description | +| ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `CONFIG_FILE` | The path to the config file. | +| `CHECKPOINT_FILE` | The path to the checkpoint file (It can be a http link, and you can find checkpoints [here](https://MMPose.readthedocs.io/en/latest/model_zoo.html)). | +| `--work-dir WORK_DIR` | The directory to save the file containing evaluation metrics. | +| `--out OUT` | The path to save the file containing evaluation metrics. | +| `--dump DUMP` | The path to dump all outputs of the model for offline evaluation. | +| `--cfg-options CFG_OPTIONS` | Override some settings in the used config, the key-value pair in xxx=yyy format will be merged into the config file. If the value to be overwritten is a list, it should be of the form of either `key="[a,b]"` or `key=a,b`. The argument also allows nested list/tuple values, e.g. `key="[(a,b),(c,d)]"`. Note that quotation marks are necessary and that no white space is allowed. | +| `--show-dir SHOW_DIR` | The directory to save the result visualization images. | +| `--show` | Visualize the prediction result in a window. | +| `--interval INTERVAL` | The interval of samples to visualize. | +| `--wait-time WAIT_TIME` | The display time of every window (in seconds). Defaults to 1. | +| `--launcher {none,pytorch,slurm,mpi}` | Options for job launcher. | + +### Test with multiple GPUs + +We provide a shell script to start a multi-GPUs task with `torch.distributed.launch`. + +```shell +bash ./tools/dist_test.sh ${CONFIG_FILE} ${CHECKPOINT_FILE} ${GPU_NUM} [PY_ARGS] +``` + +| ARGS | Description | +| ----------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------- | +| `CONFIG_FILE` | The path to the config file. | +| `CHECKPOINT_FILE` | The path to the checkpoint file (It can be a http link, and you can find checkpoints [here](https://mmpose.readthedocs.io/en/latest/model_zoo.html)). | +| `GPU_NUM` | The number of GPUs to be used. | +| `[PYARGS]` | The other optional arguments of `tools/test.py`, see [here](#test-with-your-pc). | + +You can also specify extra arguments of the launcher by environment variables. For example, change the +communication port of the launcher to 29666 by the below command: + +```shell +PORT=29666 bash ./tools/dist_test.sh ${CONFIG_FILE} ${CHECKPOINT_FILE} ${GPU_NUM} [PY_ARGS] +``` + +If you want to startup multiple test jobs and use different GPUs, you can launch them by specifying +different port and visible devices. + +```shell +CUDA_VISIBLE_DEVICES=0,1,2,3 PORT=29500 bash ./tools/dist_test.sh ${CONFIG_FILE1} ${CHECKPOINT_FILE} 4 [PY_ARGS] +CUDA_VISIBLE_DEVICES=4,5,6,7 PORT=29501 bash ./tools/dist_test.sh ${CONFIG_FILE2} ${CHECKPOINT_FILE} 4 [PY_ARGS] +``` + +### Test with multiple machines + +#### Multiple machines in the same network + +If you launch a test job with multiple machines connected with ethernet, you can run the following commands: + +On the first machine: + +```shell +NNODES=2 NODE_RANK=0 PORT=$MASTER_PORT MASTER_ADDR=$MASTER_ADDR bash tools/dist_test.sh $CONFIG $CHECKPOINT_FILE $GPUS +``` + +On the second machine: + +```shell +NNODES=2 NODE_RANK=1 PORT=$MASTER_PORT MASTER_ADDR=$MASTER_ADDR bash tools/dist_test.sh $CONFIG $CHECKPOINT_FILE $GPUS +``` + +Compared with multi-GPUs in a single machine, you need to specify some extra environment variables: + +| ENV_VARS | Description | +| ------------- | ---------------------------------------------------------------------------- | +| `NNODES` | The total number of machines. | +| `NODE_RANK` | The index of the local machine. | +| `PORT` | The communication port, it should be the same in all machines. | +| `MASTER_ADDR` | The IP address of the master machine, it should be the same in all machines. | + +Usually, it is slow if you do not have high-speed networking like InfiniBand. + +#### Multiple machines managed with slurm + +If you run MMPose on a cluster managed with [slurm](https://slurm.schedmd.com/), you can use the script `slurm_test.sh`. + +```shell +[ENV_VARS] ./tools/slurm_test.sh ${PARTITION} ${JOB_NAME} ${CONFIG_FILE} ${CHECKPOINT_FILE} [PY_ARGS] +``` + +Here are the argument descriptions of the script. + +| ARGS | Description | +| ----------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------- | +| `PARTITION` | The partition to use in your cluster. | +| `JOB_NAME` | The name of your job, you can name it as you like. | +| `CONFIG_FILE` | The path to the config file. | +| `CHECKPOINT_FILE` | The path to the checkpoint file (It can be a http link, and you can find checkpoints [here](https://MMPose.readthedocs.io/en/latest/model_zoo.html)). | +| `[PYARGS]` | The other optional arguments of `tools/test.py`, see [here](#test-with-your-pc). | + +Here are the environment variables that can be used to configure the slurm job. + +| ENV_VARS | Description | +| --------------- | ---------------------------------------------------------------------------------------------------------- | +| `GPUS` | The total number of GPUs to be used. Defaults to 8. | +| `GPUS_PER_NODE` | The number of GPUs to be allocated per node. Defaults to 8. | +| `CPUS_PER_TASK` | The number of CPUs to be allocated per task (Usually one GPU corresponds to one task). Defaults to 5. | +| `SRUN_ARGS` | The other arguments of `srun`. Available options can be found [here](https://slurm.schedmd.com/srun.html). | diff --git a/docs/en/visualization.md b/docs/en/visualization.md index 2dd39c6f65..a5448ae975 100644 --- a/docs/en/visualization.md +++ b/docs/en/visualization.md @@ -1,103 +1,103 @@ -# Visualization - -- [Single Image](#single-image) -- [Browse Dataset](#browse-dataset) -- [Visualizer Hook](#visualizer-hook) - -## Single Image - -`demo/image_demo.py` helps the user to visualize the prediction result of a single image, including the skeleton and heatmaps. - -```shell -python demo/image_demo.py ${IMG} ${CONFIG} ${CHECKPOINT} [-h] [--out-file OUT_FILE] [--device DEVICE] [--draw-heatmap] -``` - -| ARGS | Description | -| --------------------- | -------------------------------- | -| `IMG` | The path to the test image. | -| `CONFIG` | The path to the config file. | -| `CHECKPOINT` | The path to the checkpoint file. | -| `--out-file OUT_FILE` | Path to output file. | -| `--device DEVICE` | Device used for inference. | -| `--draw-heatmap` | Visualize the predicted heatmap. | - -Here is an example of Heatmap visualization: - -![000000196141](https://user-images.githubusercontent.com/13503330/222373580-88d93603-e00e-45e9-abdd-f504a62b4ca5.jpg) - -## Browse Dataset - -`tools/analysis_tools/browse_dataset.py` helps the user to browse a pose dataset visually, or save the image to a designated directory. - -```shell -python tools/misc/browse_dataset.py ${CONFIG} [-h] [--output-dir ${OUTPUT_DIR}] [--not-show] [--phase ${PHASE}] [--mode ${MODE}] [--show-interval ${SHOW_INTERVAL}] -``` - -| ARGS | Description | -| -------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------- | -| `CONFIG` | The path to the config file. | -| `--output-dir OUTPUT_DIR` | The target folder to save visualization results. If not specified, the visualization results will not be saved. | -| `--not-show` | Do not show the visualization results in an external window. | -| `--phase {train, val, test}` | Options for dataset. | -| `--mode {original, transformed}` | Specify the type of visualized images. `original` means to show images without pre-processing; `transformed` means to show images are pre-processed. | -| `--show-interval SHOW_INTERVAL` | Time interval between visualizing two images. | - -For instance, users who want to visualize images and annotations in COCO dataset use: - -```shell -python tools/misc/browse_dataset.py configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-e210_coco-256x192.py --mode original -``` - -The bounding boxes and keypoints will be plotted on the original image. Following is an example: -![original_coco](https://user-images.githubusercontent.com/26127467/187383698-7e518f21-b4cc-4712-9e97-99ddd8f0e437.jpg) - -The original images need to be processed before being fed into models. To visualize pre-processed images and annotations, users need to modify the argument `mode` to `transformed`. For example: - -```shell -python tools/misc/browse_dataset.py configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-e210_coco-256x192.py --mode transformed -``` - -Here is a processed sample - -![transformed_coco](https://user-images.githubusercontent.com/26127467/187386652-bd47335d-797c-4e8c-b823-2a4915f9812f.jpg) - -The heatmap target will be visualized together if it is generated in the pipeline. - -## Visualizer Hook - -During validation and testing, users can specify certain arguments to visualize the output of trained models. - -To visualize in external window during testing: - -```shell -python tools/test.py ${CONFIG} ${CHECKPOINT} --show -``` - -During validation: - -```shell -python tools/train.py ${CONFIG} --work-dir ${WORK_DIR} --show --interval ${INTERVAL} -``` - -It is suggested to use large `INTERVAL` (e.g., 50) if users want to visualize during validation, since the wait time for each visualized instance will make the validation process very slow. - -To save visualization results in `SHOW_DIR` during testing: - -```shell -python tools/test.py ${CONFIG} ${CHECKPOINT} --show-dir=${SHOW_DIR} -``` - -During validation: - -```shell -python tools/train.py ${CONFIG} --work-dir ${WORK_DIR} --show-dir=${SHOW_DIR} -``` - -More details about visualization arguments can be found in [train_and_test](./train_and_test.md). - -If you use a heatmap-based method and want to visualize predicted heatmaps, you can manually specify `output_heatmaps=True` for `model.test_cfg` in config file. Another way is to add `--cfg-options='model.test_cfg.output_heatmaps=True'` at the end of your command. - -Visualization example (top: decoded keypoints; bottom: predicted heatmap): -![vis_pred](https://user-images.githubusercontent.com/26127467/187578902-30ef7bb0-9a93-4e03-bae0-02aeccf7f689.jpg) - -For top-down models, each sample only contains one instance. So there will be multiple visualization results for each image. +# Visualization + +- [Single Image](#single-image) +- [Browse Dataset](#browse-dataset) +- [Visualizer Hook](#visualizer-hook) + +## Single Image + +`demo/image_demo.py` helps the user to visualize the prediction result of a single image, including the skeleton and heatmaps. + +```shell +python demo/image_demo.py ${IMG} ${CONFIG} ${CHECKPOINT} [-h] [--out-file OUT_FILE] [--device DEVICE] [--draw-heatmap] +``` + +| ARGS | Description | +| --------------------- | -------------------------------- | +| `IMG` | The path to the test image. | +| `CONFIG` | The path to the config file. | +| `CHECKPOINT` | The path to the checkpoint file. | +| `--out-file OUT_FILE` | Path to output file. | +| `--device DEVICE` | Device used for inference. | +| `--draw-heatmap` | Visualize the predicted heatmap. | + +Here is an example of Heatmap visualization: + +![000000196141](https://user-images.githubusercontent.com/13503330/222373580-88d93603-e00e-45e9-abdd-f504a62b4ca5.jpg) + +## Browse Dataset + +`tools/analysis_tools/browse_dataset.py` helps the user to browse a pose dataset visually, or save the image to a designated directory. + +```shell +python tools/misc/browse_dataset.py ${CONFIG} [-h] [--output-dir ${OUTPUT_DIR}] [--not-show] [--phase ${PHASE}] [--mode ${MODE}] [--show-interval ${SHOW_INTERVAL}] +``` + +| ARGS | Description | +| -------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------- | +| `CONFIG` | The path to the config file. | +| `--output-dir OUTPUT_DIR` | The target folder to save visualization results. If not specified, the visualization results will not be saved. | +| `--not-show` | Do not show the visualization results in an external window. | +| `--phase {train, val, test}` | Options for dataset. | +| `--mode {original, transformed}` | Specify the type of visualized images. `original` means to show images without pre-processing; `transformed` means to show images are pre-processed. | +| `--show-interval SHOW_INTERVAL` | Time interval between visualizing two images. | + +For instance, users who want to visualize images and annotations in COCO dataset use: + +```shell +python tools/misc/browse_dataset.py configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-e210_coco-256x192.py --mode original +``` + +The bounding boxes and keypoints will be plotted on the original image. Following is an example: +![original_coco](https://user-images.githubusercontent.com/26127467/187383698-7e518f21-b4cc-4712-9e97-99ddd8f0e437.jpg) + +The original images need to be processed before being fed into models. To visualize pre-processed images and annotations, users need to modify the argument `mode` to `transformed`. For example: + +```shell +python tools/misc/browse_dataset.py configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-e210_coco-256x192.py --mode transformed +``` + +Here is a processed sample + +![transformed_coco](https://user-images.githubusercontent.com/26127467/187386652-bd47335d-797c-4e8c-b823-2a4915f9812f.jpg) + +The heatmap target will be visualized together if it is generated in the pipeline. + +## Visualizer Hook + +During validation and testing, users can specify certain arguments to visualize the output of trained models. + +To visualize in external window during testing: + +```shell +python tools/test.py ${CONFIG} ${CHECKPOINT} --show +``` + +During validation: + +```shell +python tools/train.py ${CONFIG} --work-dir ${WORK_DIR} --show --interval ${INTERVAL} +``` + +It is suggested to use large `INTERVAL` (e.g., 50) if users want to visualize during validation, since the wait time for each visualized instance will make the validation process very slow. + +To save visualization results in `SHOW_DIR` during testing: + +```shell +python tools/test.py ${CONFIG} ${CHECKPOINT} --show-dir=${SHOW_DIR} +``` + +During validation: + +```shell +python tools/train.py ${CONFIG} --work-dir ${WORK_DIR} --show-dir=${SHOW_DIR} +``` + +More details about visualization arguments can be found in [train_and_test](./train_and_test.md). + +If you use a heatmap-based method and want to visualize predicted heatmaps, you can manually specify `output_heatmaps=True` for `model.test_cfg` in config file. Another way is to add `--cfg-options='model.test_cfg.output_heatmaps=True'` at the end of your command. + +Visualization example (top: decoded keypoints; bottom: predicted heatmap): +![vis_pred](https://user-images.githubusercontent.com/26127467/187578902-30ef7bb0-9a93-4e03-bae0-02aeccf7f689.jpg) + +For top-down models, each sample only contains one instance. So there will be multiple visualization results for each image. diff --git a/docs/src/papers/algorithms/associative_embedding.md b/docs/src/papers/algorithms/associative_embedding.md index 3a27267ae9..b545b7aa65 100644 --- a/docs/src/papers/algorithms/associative_embedding.md +++ b/docs/src/papers/algorithms/associative_embedding.md @@ -1,30 +1,30 @@ -# Associative embedding: End-to-end learning for joint detection and grouping (AE) - - - -
    -Associative Embedding (NIPS'2017) - -```bibtex -@inproceedings{newell2017associative, - title={Associative embedding: End-to-end learning for joint detection and grouping}, - author={Newell, Alejandro and Huang, Zhiao and Deng, Jia}, - booktitle={Advances in neural information processing systems}, - pages={2277--2287}, - year={2017} -} -``` - -
    - -## Abstract - - - -We introduce associative embedding, a novel method for supervising convolutional neural networks for the task of detection and grouping. A number of computer vision problems can be framed in this manner including multi-person pose estimation, instance segmentation, and multi-object tracking. Usually the grouping of detections is achieved with multi-stage pipelines, instead we propose an approach that teaches a network to simultaneously output detections and group assignments. This technique can be easily integrated into any state-of-the-art network architecture that produces pixel-wise predictions. We show how to apply this method to both multi-person pose estimation and instance segmentation and report state-of-the-art performance for multi-person pose on the MPII and MS-COCO datasets. - - - -
    - -
    +# Associative embedding: End-to-end learning for joint detection and grouping (AE) + + + +
    +Associative Embedding (NIPS'2017) + +```bibtex +@inproceedings{newell2017associative, + title={Associative embedding: End-to-end learning for joint detection and grouping}, + author={Newell, Alejandro and Huang, Zhiao and Deng, Jia}, + booktitle={Advances in neural information processing systems}, + pages={2277--2287}, + year={2017} +} +``` + +
    + +## Abstract + + + +We introduce associative embedding, a novel method for supervising convolutional neural networks for the task of detection and grouping. A number of computer vision problems can be framed in this manner including multi-person pose estimation, instance segmentation, and multi-object tracking. Usually the grouping of detections is achieved with multi-stage pipelines, instead we propose an approach that teaches a network to simultaneously output detections and group assignments. This technique can be easily integrated into any state-of-the-art network architecture that produces pixel-wise predictions. We show how to apply this method to both multi-person pose estimation and instance segmentation and report state-of-the-art performance for multi-person pose on the MPII and MS-COCO datasets. + + + +
    + +
    diff --git a/docs/src/papers/algorithms/awingloss.md b/docs/src/papers/algorithms/awingloss.md index 4d4b93a87c..4633e32581 100644 --- a/docs/src/papers/algorithms/awingloss.md +++ b/docs/src/papers/algorithms/awingloss.md @@ -1,31 +1,31 @@ -# Adaptive Wing Loss for Robust Face Alignment via Heatmap Regression - - - -
    -AdaptiveWingloss (ICCV'2019) - -```bibtex -@inproceedings{wang2019adaptive, - title={Adaptive wing loss for robust face alignment via heatmap regression}, - author={Wang, Xinyao and Bo, Liefeng and Fuxin, Li}, - booktitle={Proceedings of the IEEE/CVF international conference on computer vision}, - pages={6971--6981}, - year={2019} -} -``` - -
    - -## Abstract - - - -Heatmap regression with a deep network has become one of the mainstream approaches to localize facial landmarks. However, the loss function for heatmap regression is rarely studied. In this paper, we analyze the ideal loss function properties for heatmap regression in face alignment problems. Then we propose a novel loss function, named Adaptive Wing loss, that is able to adapt its shape to different types of ground truth heatmap pixels. This adaptability penalizes loss more on foreground pixels while less on background pixels. To address the imbalance between foreground and background pixels, we also propose Weighted Loss Map, which assigns high weights on foreground and difficult background pixels to help training process focus more on pixels that are crucial to landmark localization. To further improve face alignment accuracy, we introduce boundary prediction and CoordConv with boundary coordinates. Extensive experiments on different benchmarks, including COFW, 300W and WFLW, show our approach outperforms the state-of-the-art by a significant margin on -various evaluation metrics. Besides, the Adaptive Wing loss also helps other heatmap regression tasks. - - - -
    - -
    +# Adaptive Wing Loss for Robust Face Alignment via Heatmap Regression + + + +
    +AdaptiveWingloss (ICCV'2019) + +```bibtex +@inproceedings{wang2019adaptive, + title={Adaptive wing loss for robust face alignment via heatmap regression}, + author={Wang, Xinyao and Bo, Liefeng and Fuxin, Li}, + booktitle={Proceedings of the IEEE/CVF international conference on computer vision}, + pages={6971--6981}, + year={2019} +} +``` + +
    + +## Abstract + + + +Heatmap regression with a deep network has become one of the mainstream approaches to localize facial landmarks. However, the loss function for heatmap regression is rarely studied. In this paper, we analyze the ideal loss function properties for heatmap regression in face alignment problems. Then we propose a novel loss function, named Adaptive Wing loss, that is able to adapt its shape to different types of ground truth heatmap pixels. This adaptability penalizes loss more on foreground pixels while less on background pixels. To address the imbalance between foreground and background pixels, we also propose Weighted Loss Map, which assigns high weights on foreground and difficult background pixels to help training process focus more on pixels that are crucial to landmark localization. To further improve face alignment accuracy, we introduce boundary prediction and CoordConv with boundary coordinates. Extensive experiments on different benchmarks, including COFW, 300W and WFLW, show our approach outperforms the state-of-the-art by a significant margin on +various evaluation metrics. Besides, the Adaptive Wing loss also helps other heatmap regression tasks. + + + +
    + +
    diff --git a/docs/src/papers/algorithms/cid.md b/docs/src/papers/algorithms/cid.md index 4366f95504..10b76ce2f5 100644 --- a/docs/src/papers/algorithms/cid.md +++ b/docs/src/papers/algorithms/cid.md @@ -1,31 +1,31 @@ -# Contextual Instance Decoupling for Robust Multi-Person Pose Estimation - - - -
    -CID (CVPR'2022) - -```bibtex -@InProceedings{Wang_2022_CVPR, - author = {Wang, Dongkai and Zhang, Shiliang}, - title = {Contextual Instance Decoupling for Robust Multi-Person Pose Estimation}, - booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, - month = {June}, - year = {2022}, - pages = {11060-11068} -} -``` - -
    - -## Abstract - - - -Crowded scenes make it challenging to differentiate persons and locate their pose keypoints. This paper proposes the Contextual Instance Decoupling (CID), which presents a new pipeline for multi-person pose estimation. Instead of relying on person bounding boxes to spatially differentiate persons, CID decouples persons in an image into multiple instance-aware feature maps. Each of those feature maps is hence adopted to infer keypoints for a specific person. Compared with bounding box detection, CID is differentiable and robust to detection errors. Decoupling persons into different feature maps allows to isolate distractions from other persons, and explore context cues at scales larger than the bounding box size. Experiments show that CID outperforms previous multi-person pose estimation pipelines on crowded scenes pose estimation benchmarks in both accuracy and efficiency. For instance, it achieves 71.3% AP on CrowdPose, outperforming the recent single-stage DEKR by 5.6%, the bottom-up CenterAttention by 3.7%, and the top-down JCSPPE by 5.3%. This advantage sustains on the commonly used COCO benchmark. - - - -
    - -
    +# Contextual Instance Decoupling for Robust Multi-Person Pose Estimation + + + +
    +CID (CVPR'2022) + +```bibtex +@InProceedings{Wang_2022_CVPR, + author = {Wang, Dongkai and Zhang, Shiliang}, + title = {Contextual Instance Decoupling for Robust Multi-Person Pose Estimation}, + booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, + month = {June}, + year = {2022}, + pages = {11060-11068} +} +``` + +
    + +## Abstract + + + +Crowded scenes make it challenging to differentiate persons and locate their pose keypoints. This paper proposes the Contextual Instance Decoupling (CID), which presents a new pipeline for multi-person pose estimation. Instead of relying on person bounding boxes to spatially differentiate persons, CID decouples persons in an image into multiple instance-aware feature maps. Each of those feature maps is hence adopted to infer keypoints for a specific person. Compared with bounding box detection, CID is differentiable and robust to detection errors. Decoupling persons into different feature maps allows to isolate distractions from other persons, and explore context cues at scales larger than the bounding box size. Experiments show that CID outperforms previous multi-person pose estimation pipelines on crowded scenes pose estimation benchmarks in both accuracy and efficiency. For instance, it achieves 71.3% AP on CrowdPose, outperforming the recent single-stage DEKR by 5.6%, the bottom-up CenterAttention by 3.7%, and the top-down JCSPPE by 5.3%. This advantage sustains on the commonly used COCO benchmark. + + + +
    + +
    diff --git a/docs/src/papers/algorithms/cpm.md b/docs/src/papers/algorithms/cpm.md index fb5dbfacec..ea2ac7f73a 100644 --- a/docs/src/papers/algorithms/cpm.md +++ b/docs/src/papers/algorithms/cpm.md @@ -1,30 +1,30 @@ -# Convolutional pose machines - - - -
    -CPM (CVPR'2016) - -```bibtex -@inproceedings{wei2016convolutional, - title={Convolutional pose machines}, - author={Wei, Shih-En and Ramakrishna, Varun and Kanade, Takeo and Sheikh, Yaser}, - booktitle={Proceedings of the IEEE conference on Computer Vision and Pattern Recognition}, - pages={4724--4732}, - year={2016} -} -``` - -
    - -## Abstract - - - -We introduce associative embedding, a novel method for supervising convolutional neural networks for the task of detection and grouping. A number of computer vision problems can be framed in this manner including multi-person pose estimation, instance segmentation, and multi-object tracking. Usually the grouping of detections is achieved with multi-stage pipelines, instead we propose an approach that teaches a network to simultaneously output detections and group assignments. This technique can be easily integrated into any state-of-the-art network architecture that produces pixel-wise predictions. We show how to apply this method to both multi-person pose estimation and instance segmentation and report state-of-the-art performance for multi-person pose on the MPII and MS-COCO datasets. - - - -
    - -
    +# Convolutional pose machines + + + +
    +CPM (CVPR'2016) + +```bibtex +@inproceedings{wei2016convolutional, + title={Convolutional pose machines}, + author={Wei, Shih-En and Ramakrishna, Varun and Kanade, Takeo and Sheikh, Yaser}, + booktitle={Proceedings of the IEEE conference on Computer Vision and Pattern Recognition}, + pages={4724--4732}, + year={2016} +} +``` + +
    + +## Abstract + + + +We introduce associative embedding, a novel method for supervising convolutional neural networks for the task of detection and grouping. A number of computer vision problems can be framed in this manner including multi-person pose estimation, instance segmentation, and multi-object tracking. Usually the grouping of detections is achieved with multi-stage pipelines, instead we propose an approach that teaches a network to simultaneously output detections and group assignments. This technique can be easily integrated into any state-of-the-art network architecture that produces pixel-wise predictions. We show how to apply this method to both multi-person pose estimation and instance segmentation and report state-of-the-art performance for multi-person pose on the MPII and MS-COCO datasets. + + + +
    + +
    diff --git a/docs/src/papers/algorithms/dark.md b/docs/src/papers/algorithms/dark.md index 083b7596ab..94da433e29 100644 --- a/docs/src/papers/algorithms/dark.md +++ b/docs/src/papers/algorithms/dark.md @@ -1,30 +1,30 @@ -# Distribution-aware coordinate representation for human pose estimation - - - -
    -DarkPose (CVPR'2020) - -```bibtex -@inproceedings{zhang2020distribution, - title={Distribution-aware coordinate representation for human pose estimation}, - author={Zhang, Feng and Zhu, Xiatian and Dai, Hanbin and Ye, Mao and Zhu, Ce}, - booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, - pages={7093--7102}, - year={2020} -} -``` - -
    - -## Abstract - - - -While being the de facto standard coordinate representation for human pose estimation, heatmap has not been investigated in-depth. This work fills this gap. For the first time, we find that the process of decoding the predicted heatmaps into the final joint coordinates in the original image space is surprisingly significant for the performance. We further probe the design limitations of the standard coordinate decoding method, and propose a more principled distributionaware decoding method. Also, we improve the standard coordinate encoding process (i.e. transforming ground-truth coordinates to heatmaps) by generating unbiased/accurate heatmaps. Taking the two together, we formulate a novel Distribution-Aware coordinate Representation of Keypoints (DARK) method. Serving as a model-agnostic plug-in, DARK brings about significant performance boost to existing human pose estimation models. Extensive experiments show that DARK yields the best results on two common benchmarks, MPII and COCO. Besides, DARK achieves the 2nd place entry in the ICCV 2019 COCO Keypoints Challenge. The code is available online. - - - -
    - -
    +# Distribution-aware coordinate representation for human pose estimation + + + +
    +DarkPose (CVPR'2020) + +```bibtex +@inproceedings{zhang2020distribution, + title={Distribution-aware coordinate representation for human pose estimation}, + author={Zhang, Feng and Zhu, Xiatian and Dai, Hanbin and Ye, Mao and Zhu, Ce}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={7093--7102}, + year={2020} +} +``` + +
    + +## Abstract + + + +While being the de facto standard coordinate representation for human pose estimation, heatmap has not been investigated in-depth. This work fills this gap. For the first time, we find that the process of decoding the predicted heatmaps into the final joint coordinates in the original image space is surprisingly significant for the performance. We further probe the design limitations of the standard coordinate decoding method, and propose a more principled distributionaware decoding method. Also, we improve the standard coordinate encoding process (i.e. transforming ground-truth coordinates to heatmaps) by generating unbiased/accurate heatmaps. Taking the two together, we formulate a novel Distribution-Aware coordinate Representation of Keypoints (DARK) method. Serving as a model-agnostic plug-in, DARK brings about significant performance boost to existing human pose estimation models. Extensive experiments show that DARK yields the best results on two common benchmarks, MPII and COCO. Besides, DARK achieves the 2nd place entry in the ICCV 2019 COCO Keypoints Challenge. The code is available online. + + + +
    + +
    diff --git a/docs/src/papers/algorithms/debias_ipr.md b/docs/src/papers/algorithms/debias_ipr.md index 8d77c84c09..b02e58ecdf 100644 --- a/docs/src/papers/algorithms/debias_ipr.md +++ b/docs/src/papers/algorithms/debias_ipr.md @@ -1,30 +1,30 @@ -# Removing the Bias of Integral Pose Regression - - - -
    -Debias IPR (ICCV'2021) - -```bibtex -@inproceedings{gu2021removing, - title={Removing the Bias of Integral Pose Regression}, - author={Gu, Kerui and Yang, Linlin and Yao, Angela}, - booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision}, - pages={11067--11076}, - year={2021} - } -``` - -
    - -## Abstract - - - -Heatmap-based detection methods are dominant for 2D human pose estimation even though regression is more intuitive. The introduction of the integral regression method, which, architecture-wise uses an implicit heatmap, brings the two approaches even closer together. This begs the question -- does detection really outperform regression? In this paper, we investigate the difference in supervision between the heatmap-based detection and integral regression, as this is the key remaining difference between the two approaches. In the process, we discover an underlying bias behind integral pose regression that arises from taking the expectation after the softmax function. To counter the bias, we present a compensation method which we find to improve integral regression accuracy on all 2D pose estimation benchmarks. We further propose a simple combined detection and bias-compensated regression method that considerably outperforms state-of-the-art baselines with few added components. - - - -
    - -
    +# Removing the Bias of Integral Pose Regression + + + +
    +Debias IPR (ICCV'2021) + +```bibtex +@inproceedings{gu2021removing, + title={Removing the Bias of Integral Pose Regression}, + author={Gu, Kerui and Yang, Linlin and Yao, Angela}, + booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision}, + pages={11067--11076}, + year={2021} + } +``` + +
    + +## Abstract + + + +Heatmap-based detection methods are dominant for 2D human pose estimation even though regression is more intuitive. The introduction of the integral regression method, which, architecture-wise uses an implicit heatmap, brings the two approaches even closer together. This begs the question -- does detection really outperform regression? In this paper, we investigate the difference in supervision between the heatmap-based detection and integral regression, as this is the key remaining difference between the two approaches. In the process, we discover an underlying bias behind integral pose regression that arises from taking the expectation after the softmax function. To counter the bias, we present a compensation method which we find to improve integral regression accuracy on all 2D pose estimation benchmarks. We further propose a simple combined detection and bias-compensated regression method that considerably outperforms state-of-the-art baselines with few added components. + + + +
    + +
    diff --git a/docs/src/papers/algorithms/deeppose.md b/docs/src/papers/algorithms/deeppose.md index 24778ba9db..a6e96f5106 100644 --- a/docs/src/papers/algorithms/deeppose.md +++ b/docs/src/papers/algorithms/deeppose.md @@ -1,30 +1,30 @@ -# DeepPose: Human pose estimation via deep neural networks - - - -
    -DeepPose (CVPR'2014) - -```bibtex -@inproceedings{toshev2014deeppose, - title={Deeppose: Human pose estimation via deep neural networks}, - author={Toshev, Alexander and Szegedy, Christian}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={1653--1660}, - year={2014} -} -``` - -
    - -## Abstract - - - -We propose a method for human pose estimation based on Deep Neural Networks (DNNs). The pose estimation is formulated as a DNN-based regression problem towards body joints. We present a cascade of such DNN regressors which results in high precision pose estimates. The approach has the advantage of reasoning about pose in a holistic fashion and has a simple but yet powerful formulation which capitalizes on recent advances in Deep Learning. We present a detailed empirical analysis with state-of-art or better performance on four academic benchmarks of diverse real-world images. - - - -
    - -
    +# DeepPose: Human pose estimation via deep neural networks + + + +
    +DeepPose (CVPR'2014) + +```bibtex +@inproceedings{toshev2014deeppose, + title={Deeppose: Human pose estimation via deep neural networks}, + author={Toshev, Alexander and Szegedy, Christian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={1653--1660}, + year={2014} +} +``` + +
    + +## Abstract + + + +We propose a method for human pose estimation based on Deep Neural Networks (DNNs). The pose estimation is formulated as a DNN-based regression problem towards body joints. We present a cascade of such DNN regressors which results in high precision pose estimates. The approach has the advantage of reasoning about pose in a holistic fashion and has a simple but yet powerful formulation which capitalizes on recent advances in Deep Learning. We present a detailed empirical analysis with state-of-art or better performance on four academic benchmarks of diverse real-world images. + + + +
    + +
    diff --git a/docs/src/papers/algorithms/dekr.md b/docs/src/papers/algorithms/dekr.md index ee19a3315b..605035d6ec 100644 --- a/docs/src/papers/algorithms/dekr.md +++ b/docs/src/papers/algorithms/dekr.md @@ -1,31 +1,31 @@ -# Bottom-up Human Pose Estimation via Disentangled Keypoint Regression - - - -
    -DEKR (CVPR'2021) - -```bibtex -@inproceedings{geng2021bottom, - title={Bottom-up human pose estimation via disentangled keypoint regression}, - author={Geng, Zigang and Sun, Ke and Xiao, Bin and Zhang, Zhaoxiang and Wang, Jingdong}, - booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, - pages={14676--14686}, - year={2021} -} -``` - -
    - -## Abstract - - - -In this paper, we are interested in the bottom-up paradigm of estimating human poses from an image. We study the dense keypoint regression framework that is previously inferior to the keypoint detection and grouping framework. Our motivation is that regressing keypoint positions accurately needs to learn representations that focus on the keypoint regions. -We present a simple yet effective approach, named disentangled keypoint regression (DEKR). We adopt adaptive convolutions through pixel-wise spatial transformer to activate the pixels in the keypoint regions and accordingly learn representations from them. We use a multi-branch structure for separate regression: each branch learns a representation with dedicated adaptive convolutions and regresses one keypoint. The resulting disentangled representations are able to attend to the keypoint regions, respectively, and thus the keypoint regression is spatially more accurate. We empirically show that the proposed direct regression method outperforms keypoint detection and grouping methods and achieves superior bottom-up pose estimation results on two benchmark datasets, COCO and CrowdPose. The code and models are available at [this https URL](https://github.com/HRNet/DEKR). - - - -
    - -
    +# Bottom-up Human Pose Estimation via Disentangled Keypoint Regression + + + +
    +DEKR (CVPR'2021) + +```bibtex +@inproceedings{geng2021bottom, + title={Bottom-up human pose estimation via disentangled keypoint regression}, + author={Geng, Zigang and Sun, Ke and Xiao, Bin and Zhang, Zhaoxiang and Wang, Jingdong}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={14676--14686}, + year={2021} +} +``` + +
    + +## Abstract + + + +In this paper, we are interested in the bottom-up paradigm of estimating human poses from an image. We study the dense keypoint regression framework that is previously inferior to the keypoint detection and grouping framework. Our motivation is that regressing keypoint positions accurately needs to learn representations that focus on the keypoint regions. +We present a simple yet effective approach, named disentangled keypoint regression (DEKR). We adopt adaptive convolutions through pixel-wise spatial transformer to activate the pixels in the keypoint regions and accordingly learn representations from them. We use a multi-branch structure for separate regression: each branch learns a representation with dedicated adaptive convolutions and regresses one keypoint. The resulting disentangled representations are able to attend to the keypoint regions, respectively, and thus the keypoint regression is spatially more accurate. We empirically show that the proposed direct regression method outperforms keypoint detection and grouping methods and achieves superior bottom-up pose estimation results on two benchmark datasets, COCO and CrowdPose. The code and models are available at [this https URL](https://github.com/HRNet/DEKR). + + + +
    + +
    diff --git a/docs/src/papers/algorithms/dsnt.md b/docs/src/papers/algorithms/dsnt.md index 6a526429d6..dd187391af 100644 --- a/docs/src/papers/algorithms/dsnt.md +++ b/docs/src/papers/algorithms/dsnt.md @@ -1,29 +1,29 @@ -# Numerical Coordinate Regression with Convolutional Neural Networks - - - -
    -DSNT (2018) - -```bibtex -@article{nibali2018numerical, - title={Numerical Coordinate Regression with Convolutional Neural Networks}, - author={Nibali, Aiden and He, Zhen and Morgan, Stuart and Prendergast, Luke}, - journal={arXiv preprint arXiv:1801.07372}, - year={2018} -} -``` - -
    - -## Abstract - - - -We study deep learning approaches to inferring numerical coordinates for points of interest in an input image. Existing convolutional neural network-based solutions to this problem either take a heatmap matching approach or regress to coordinates with a fully connected output layer. Neither of these approaches is ideal, since the former is not entirely differentiable, and the latter lacks inherent spatial generalization. We propose our differentiable spatial to numerical transform (DSNT) to fill this gap. The DSNT layer adds no trainable parameters, is fully differentiable, and exhibits good spatial generalization. Unlike heatmap matching, DSNT works well with low heatmap resolutions, so it can be dropped in as an output layer for a wide range of existing fully convolutional architectures. Consequently, DSNT offers a better trade-off between inference speed and prediction accuracy compared to existing techniques. When used to replace the popular heatmap matching approach used in almost all state-of-the-art methods for pose estimation, DSNT gives better prediction accuracy for all model architectures tested. - - - -
    - -
    +# Numerical Coordinate Regression with Convolutional Neural Networks + + + +
    +DSNT (2018) + +```bibtex +@article{nibali2018numerical, + title={Numerical Coordinate Regression with Convolutional Neural Networks}, + author={Nibali, Aiden and He, Zhen and Morgan, Stuart and Prendergast, Luke}, + journal={arXiv preprint arXiv:1801.07372}, + year={2018} +} +``` + +
    + +## Abstract + + + +We study deep learning approaches to inferring numerical coordinates for points of interest in an input image. Existing convolutional neural network-based solutions to this problem either take a heatmap matching approach or regress to coordinates with a fully connected output layer. Neither of these approaches is ideal, since the former is not entirely differentiable, and the latter lacks inherent spatial generalization. We propose our differentiable spatial to numerical transform (DSNT) to fill this gap. The DSNT layer adds no trainable parameters, is fully differentiable, and exhibits good spatial generalization. Unlike heatmap matching, DSNT works well with low heatmap resolutions, so it can be dropped in as an output layer for a wide range of existing fully convolutional architectures. Consequently, DSNT offers a better trade-off between inference speed and prediction accuracy compared to existing techniques. When used to replace the popular heatmap matching approach used in almost all state-of-the-art methods for pose estimation, DSNT gives better prediction accuracy for all model architectures tested. + + + +
    + +
    diff --git a/docs/src/papers/algorithms/higherhrnet.md b/docs/src/papers/algorithms/higherhrnet.md index c1d61c992a..feed6ea06d 100644 --- a/docs/src/papers/algorithms/higherhrnet.md +++ b/docs/src/papers/algorithms/higherhrnet.md @@ -1,30 +1,30 @@ -# HigherHRNet: Scale-Aware Representation Learning for Bottom-Up Human Pose Estimation - - - -
    -HigherHRNet (CVPR'2020) - -```bibtex -@inproceedings{cheng2020higherhrnet, - title={HigherHRNet: Scale-Aware Representation Learning for Bottom-Up Human Pose Estimation}, - author={Cheng, Bowen and Xiao, Bin and Wang, Jingdong and Shi, Honghui and Huang, Thomas S and Zhang, Lei}, - booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, - pages={5386--5395}, - year={2020} -} -``` - -
    - -## Abstract - - - -Bottom-up human pose estimation methods have difficulties in predicting the correct pose for small persons due to challenges in scale variation. In this paper, we present HigherHRNet: a novel bottom-up human pose estimation method for learning scale-aware representations using high-resolution feature pyramids. Equipped with multi-resolution supervision for training and multi-resolution aggregation for inference, the proposed approach is able to solve the scale variation challenge in bottom-up multi-person pose estimation and localize keypoints more precisely, especially for small person. The feature pyramid in HigherHRNet consists of feature map outputs from HRNet and upsampled higher-resolution outputs through a transposed convolution. HigherHRNet outperforms the previous best bottom-up method by 2.5% AP for medium person on COCO test-dev, showing its effectiveness in handling scale variation. Furthermore, HigherHRNet achieves new state-of-the-art result on COCO test-dev (70.5% AP) without using refinement or other post-processing techniques, surpassing all existing bottom-up methods. HigherHRNet even surpasses all top-down methods on CrowdPose test (67.6% AP), suggesting its robustness in crowded scene. - - - -
    - -
    +# HigherHRNet: Scale-Aware Representation Learning for Bottom-Up Human Pose Estimation + + + +
    +HigherHRNet (CVPR'2020) + +```bibtex +@inproceedings{cheng2020higherhrnet, + title={HigherHRNet: Scale-Aware Representation Learning for Bottom-Up Human Pose Estimation}, + author={Cheng, Bowen and Xiao, Bin and Wang, Jingdong and Shi, Honghui and Huang, Thomas S and Zhang, Lei}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={5386--5395}, + year={2020} +} +``` + +
    + +## Abstract + + + +Bottom-up human pose estimation methods have difficulties in predicting the correct pose for small persons due to challenges in scale variation. In this paper, we present HigherHRNet: a novel bottom-up human pose estimation method for learning scale-aware representations using high-resolution feature pyramids. Equipped with multi-resolution supervision for training and multi-resolution aggregation for inference, the proposed approach is able to solve the scale variation challenge in bottom-up multi-person pose estimation and localize keypoints more precisely, especially for small person. The feature pyramid in HigherHRNet consists of feature map outputs from HRNet and upsampled higher-resolution outputs through a transposed convolution. HigherHRNet outperforms the previous best bottom-up method by 2.5% AP for medium person on COCO test-dev, showing its effectiveness in handling scale variation. Furthermore, HigherHRNet achieves new state-of-the-art result on COCO test-dev (70.5% AP) without using refinement or other post-processing techniques, surpassing all existing bottom-up methods. HigherHRNet even surpasses all top-down methods on CrowdPose test (67.6% AP), suggesting its robustness in crowded scene. + + + +
    + +
    diff --git a/docs/src/papers/algorithms/hmr.md b/docs/src/papers/algorithms/hmr.md index 5c90aa4521..3cdc9dba52 100644 --- a/docs/src/papers/algorithms/hmr.md +++ b/docs/src/papers/algorithms/hmr.md @@ -1,32 +1,32 @@ -# End-to-end Recovery of Human Shape and Pose - - - -
    -HMR (CVPR'2018) - -```bibtex -@inProceedings{kanazawaHMR18, - title={End-to-end Recovery of Human Shape and Pose}, - author = {Angjoo Kanazawa - and Michael J. Black - and David W. Jacobs - and Jitendra Malik}, - booktitle={Computer Vision and Pattern Recognition (CVPR)}, - year={2018} -} -``` - -
    - -## Abstract - - - -We describe Human Mesh Recovery (HMR), an end-to-end framework for reconstructing a full 3D mesh of a human body from a single RGB image. In contrast to most current methods that compute 2D or 3D joint locations, we produce a richer and more useful mesh representation that is parameterized by shape and 3D joint angles. The main objective is to minimize the reprojection loss of keypoints, which allows our model to be trained using in-the-wild images that only have ground truth 2D annotations. However, the reprojection loss alone is highly underconstrained. In this work we address this problem by introducing an adversary trained to tell whether human body shape and pose are real or not using a large database of 3D human meshes. We show that HMR can be trained with and without using any paired 2D-to-3D supervision. We do not rely on intermediate 2D keypoint detections and infer 3D pose and shape parameters directly from image pixels. Our model runs in real-time given a bounding box containing the person. We demonstrate our approach on various images in-the-wild and out-perform previous optimization-based methods that output 3D meshes and show competitive results on tasks such as 3D joint location estimation and part segmentation. - - - -
    - -
    +# End-to-end Recovery of Human Shape and Pose + + + +
    +HMR (CVPR'2018) + +```bibtex +@inProceedings{kanazawaHMR18, + title={End-to-end Recovery of Human Shape and Pose}, + author = {Angjoo Kanazawa + and Michael J. Black + and David W. Jacobs + and Jitendra Malik}, + booktitle={Computer Vision and Pattern Recognition (CVPR)}, + year={2018} +} +``` + +
    + +## Abstract + + + +We describe Human Mesh Recovery (HMR), an end-to-end framework for reconstructing a full 3D mesh of a human body from a single RGB image. In contrast to most current methods that compute 2D or 3D joint locations, we produce a richer and more useful mesh representation that is parameterized by shape and 3D joint angles. The main objective is to minimize the reprojection loss of keypoints, which allows our model to be trained using in-the-wild images that only have ground truth 2D annotations. However, the reprojection loss alone is highly underconstrained. In this work we address this problem by introducing an adversary trained to tell whether human body shape and pose are real or not using a large database of 3D human meshes. We show that HMR can be trained with and without using any paired 2D-to-3D supervision. We do not rely on intermediate 2D keypoint detections and infer 3D pose and shape parameters directly from image pixels. Our model runs in real-time given a bounding box containing the person. We demonstrate our approach on various images in-the-wild and out-perform previous optimization-based methods that output 3D meshes and show competitive results on tasks such as 3D joint location estimation and part segmentation. + + + +
    + +
    diff --git a/docs/src/papers/algorithms/hourglass.md b/docs/src/papers/algorithms/hourglass.md index 15f4d4d3c6..c6c7e51592 100644 --- a/docs/src/papers/algorithms/hourglass.md +++ b/docs/src/papers/algorithms/hourglass.md @@ -1,31 +1,31 @@ -# Stacked hourglass networks for human pose estimation - - - -
    -Hourglass (ECCV'2016) - -```bibtex -@inproceedings{newell2016stacked, - title={Stacked hourglass networks for human pose estimation}, - author={Newell, Alejandro and Yang, Kaiyu and Deng, Jia}, - booktitle={European conference on computer vision}, - pages={483--499}, - year={2016}, - organization={Springer} -} -``` - -
    - -## Abstract - - - -This work introduces a novel convolutional network architecture for the task of human pose estimation. Features are processed across all scales and consolidated to best capture the various spatial relationships associated with the body. We show how repeated bottom-up, top-down processing used in conjunction with intermediate supervision is critical to improving the performance of the network. We refer to the architecture as a "stacked hourglass" network based on the successive steps of pooling and upsampling that are done to produce a final set of predictions. State-of-the-art results are achieved on the FLIC and MPII benchmarks outcompeting all recent methods. - - - -
    - -
    +# Stacked hourglass networks for human pose estimation + + + +
    +Hourglass (ECCV'2016) + +```bibtex +@inproceedings{newell2016stacked, + title={Stacked hourglass networks for human pose estimation}, + author={Newell, Alejandro and Yang, Kaiyu and Deng, Jia}, + booktitle={European conference on computer vision}, + pages={483--499}, + year={2016}, + organization={Springer} +} +``` + +
    + +## Abstract + + + +This work introduces a novel convolutional network architecture for the task of human pose estimation. Features are processed across all scales and consolidated to best capture the various spatial relationships associated with the body. We show how repeated bottom-up, top-down processing used in conjunction with intermediate supervision is critical to improving the performance of the network. We refer to the architecture as a "stacked hourglass" network based on the successive steps of pooling and upsampling that are done to produce a final set of predictions. State-of-the-art results are achieved on the FLIC and MPII benchmarks outcompeting all recent methods. + + + +
    + +
    diff --git a/docs/src/papers/algorithms/hrnet.md b/docs/src/papers/algorithms/hrnet.md index 05a46f543e..e1fba7b601 100644 --- a/docs/src/papers/algorithms/hrnet.md +++ b/docs/src/papers/algorithms/hrnet.md @@ -1,32 +1,32 @@ -# Deep high-resolution representation learning for human pose estimation - - - -
    -HRNet (CVPR'2019) - -```bibtex -@inproceedings{sun2019deep, - title={Deep high-resolution representation learning for human pose estimation}, - author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={5693--5703}, - year={2019} -} -``` - -
    - -## Abstract - - - -In this paper, we are interested in the human pose estimation problem with a focus on learning reliable highresolution representations. Most existing methods recover high-resolution representations from low-resolution representations produced by a high-to-low resolution network. Instead, our proposed network maintains high-resolution representations through the whole process. We start from a high-resolution subnetwork as the first stage, gradually add high-to-low resolution subnetworks one by one to form more stages, and connect the mutliresolution subnetworks in parallel. We conduct repeated multi-scale fusions such that each of the high-to-low resolution representations receives information from other parallel representations over and over, leading to rich highresolution representations. As a result, the predicted keypoint heatmap is potentially more accurate and spatially more precise. We empirically demonstrate the effectiveness -of our network through the superior pose estimation results over two benchmark datasets: the COCO keypoint detection -dataset and the MPII Human Pose dataset. In addition, we show the superiority of our network in pose tracking on the PoseTrack dataset. - - - -
    - -
    +# Deep high-resolution representation learning for human pose estimation + + + +
    +HRNet (CVPR'2019) + +```bibtex +@inproceedings{sun2019deep, + title={Deep high-resolution representation learning for human pose estimation}, + author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={5693--5703}, + year={2019} +} +``` + +
    + +## Abstract + + + +In this paper, we are interested in the human pose estimation problem with a focus on learning reliable highresolution representations. Most existing methods recover high-resolution representations from low-resolution representations produced by a high-to-low resolution network. Instead, our proposed network maintains high-resolution representations through the whole process. We start from a high-resolution subnetwork as the first stage, gradually add high-to-low resolution subnetworks one by one to form more stages, and connect the mutliresolution subnetworks in parallel. We conduct repeated multi-scale fusions such that each of the high-to-low resolution representations receives information from other parallel representations over and over, leading to rich highresolution representations. As a result, the predicted keypoint heatmap is potentially more accurate and spatially more precise. We empirically demonstrate the effectiveness +of our network through the superior pose estimation results over two benchmark datasets: the COCO keypoint detection +dataset and the MPII Human Pose dataset. In addition, we show the superiority of our network in pose tracking on the PoseTrack dataset. + + + +
    + +
    diff --git a/docs/src/papers/algorithms/hrnetv2.md b/docs/src/papers/algorithms/hrnetv2.md index f2ed2a9c0c..f764d61def 100644 --- a/docs/src/papers/algorithms/hrnetv2.md +++ b/docs/src/papers/algorithms/hrnetv2.md @@ -1,31 +1,31 @@ -# Deep high-resolution representation learning for visual recognition - - - -
    -HRNetv2 (TPAMI'2019) - -```bibtex -@article{WangSCJDZLMTWLX19, - title={Deep High-Resolution Representation Learning for Visual Recognition}, - author={Jingdong Wang and Ke Sun and Tianheng Cheng and - Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and - Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, - journal={TPAMI}, - year={2019} -} -``` - -
    - -## Abstract - - - -High-resolution representations are essential for position-sensitive vision problems, such as human pose estimation, semantic segmentation, and object detection. Existing state-of-the-art frameworks first encode the input image as a low-resolution representation through a subnetwork that is formed by connecting high-to-low resolution convolutions in series (e.g., ResNet, VGGNet), and then recover the high-resolution representation from the encoded low-resolution representation. Instead, our proposed network, named as High-Resolution Network (HRNet), maintains high-resolution representations through the whole process. There are two key characteristics: (i) Connect the high-to-low resolution convolution streams in parallel and (ii) repeatedly exchange the information across resolutions. The benefit is that the resulting representation is semantically richer and spatially more precise. We show the superiority of the proposed HRNet in a wide range of applications, including human pose estimation, semantic segmentation, and object detection, suggesting that the HRNet is a stronger backbone for computer vision problems. - - - -
    - -
    +# Deep high-resolution representation learning for visual recognition + + + +
    +HRNetv2 (TPAMI'2019) + +```bibtex +@article{WangSCJDZLMTWLX19, + title={Deep High-Resolution Representation Learning for Visual Recognition}, + author={Jingdong Wang and Ke Sun and Tianheng Cheng and + Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and + Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, + journal={TPAMI}, + year={2019} +} +``` + +
    + +## Abstract + + + +High-resolution representations are essential for position-sensitive vision problems, such as human pose estimation, semantic segmentation, and object detection. Existing state-of-the-art frameworks first encode the input image as a low-resolution representation through a subnetwork that is formed by connecting high-to-low resolution convolutions in series (e.g., ResNet, VGGNet), and then recover the high-resolution representation from the encoded low-resolution representation. Instead, our proposed network, named as High-Resolution Network (HRNet), maintains high-resolution representations through the whole process. There are two key characteristics: (i) Connect the high-to-low resolution convolution streams in parallel and (ii) repeatedly exchange the information across resolutions. The benefit is that the resulting representation is semantically richer and spatially more precise. We show the superiority of the proposed HRNet in a wide range of applications, including human pose estimation, semantic segmentation, and object detection, suggesting that the HRNet is a stronger backbone for computer vision problems. + + + +
    + +
    diff --git a/docs/src/papers/algorithms/internet.md b/docs/src/papers/algorithms/internet.md index e37ea72cea..3c9ad7013a 100644 --- a/docs/src/papers/algorithms/internet.md +++ b/docs/src/papers/algorithms/internet.md @@ -1,29 +1,29 @@ -# InterHand2.6M: A Dataset and Baseline for 3D Interacting Hand Pose Estimation from a Single RGB Image - - - -
    -InterNet (ECCV'2020) - -```bibtex -@InProceedings{Moon_2020_ECCV_InterHand2.6M, -author = {Moon, Gyeongsik and Yu, Shoou-I and Wen, He and Shiratori, Takaaki and Lee, Kyoung Mu}, -title = {InterHand2.6M: A Dataset and Baseline for 3D Interacting Hand Pose Estimation from a Single RGB Image}, -booktitle = {European Conference on Computer Vision (ECCV)}, -year = {2020} -} -``` - -
    - -## Abstract - - - -Analysis of hand-hand interactions is a crucial step towards better understanding human behavior. However, most researches in 3D hand pose estimation have focused on the isolated single hand case. Therefore, we firstly propose (1) a large-scale dataset, InterHand2.6M, and (2) a baseline network, InterNet, for 3D interacting hand pose estimation from a single RGB image. The proposed InterHand2.6M consists of 2.6 M labeled single and interacting hand frames under various poses from multiple subjects. Our InterNet simultaneously performs 3D single and interacting hand pose estimation. In our experiments, we demonstrate big gains in 3D interacting hand pose estimation accuracy when leveraging the interacting hand data in InterHand2.6M. We also report the accuracy of InterNet on InterHand2.6M, which serves as a strong baseline for this new dataset. Finally, we show 3D interacting hand pose estimation results from general images. - - - -
    - -
    +# InterHand2.6M: A Dataset and Baseline for 3D Interacting Hand Pose Estimation from a Single RGB Image + + + +
    +InterNet (ECCV'2020) + +```bibtex +@InProceedings{Moon_2020_ECCV_InterHand2.6M, +author = {Moon, Gyeongsik and Yu, Shoou-I and Wen, He and Shiratori, Takaaki and Lee, Kyoung Mu}, +title = {InterHand2.6M: A Dataset and Baseline for 3D Interacting Hand Pose Estimation from a Single RGB Image}, +booktitle = {European Conference on Computer Vision (ECCV)}, +year = {2020} +} +``` + +
    + +## Abstract + + + +Analysis of hand-hand interactions is a crucial step towards better understanding human behavior. However, most researches in 3D hand pose estimation have focused on the isolated single hand case. Therefore, we firstly propose (1) a large-scale dataset, InterHand2.6M, and (2) a baseline network, InterNet, for 3D interacting hand pose estimation from a single RGB image. The proposed InterHand2.6M consists of 2.6 M labeled single and interacting hand frames under various poses from multiple subjects. Our InterNet simultaneously performs 3D single and interacting hand pose estimation. In our experiments, we demonstrate big gains in 3D interacting hand pose estimation accuracy when leveraging the interacting hand data in InterHand2.6M. We also report the accuracy of InterNet on InterHand2.6M, which serves as a strong baseline for this new dataset. Finally, we show 3D interacting hand pose estimation results from general images. + + + +
    + +
    diff --git a/docs/src/papers/algorithms/ipr.md b/docs/src/papers/algorithms/ipr.md index fca06b986a..859b36a89b 100644 --- a/docs/src/papers/algorithms/ipr.md +++ b/docs/src/papers/algorithms/ipr.md @@ -1,30 +1,30 @@ -# Integral Human Pose Regression - - - -
    -IPR (ECCV'2018) - -```bibtex -@inproceedings{sun2018integral, - title={Integral human pose regression}, - author={Sun, Xiao and Xiao, Bin and Wei, Fangyin and Liang, Shuang and Wei, Yichen}, - booktitle={Proceedings of the European conference on computer vision (ECCV)}, - pages={529--545}, - year={2018} -} -``` - -
    - -## Abstract - - - -State-of-the-art human pose estimation methods are based on heat map representation. In spite of the good performance, the representation has a few issues in nature, such as not differentiable and quantization error. This work shows that a simple integral operation relates and unifies the heat map representation and joint regression, thus avoiding the above issues. It is differentiable, efficient, and compatible with any heat map based methods. Its effectiveness is convincingly validated via comprehensive ablation experiments under various settings, specifically on 3D pose estimation, for the first time. - - - -
    - -
    +# Integral Human Pose Regression + + + +
    +IPR (ECCV'2018) + +```bibtex +@inproceedings{sun2018integral, + title={Integral human pose regression}, + author={Sun, Xiao and Xiao, Bin and Wei, Fangyin and Liang, Shuang and Wei, Yichen}, + booktitle={Proceedings of the European conference on computer vision (ECCV)}, + pages={529--545}, + year={2018} +} +``` + +
    + +## Abstract + + + +State-of-the-art human pose estimation methods are based on heat map representation. In spite of the good performance, the representation has a few issues in nature, such as not differentiable and quantization error. This work shows that a simple integral operation relates and unifies the heat map representation and joint regression, thus avoiding the above issues. It is differentiable, efficient, and compatible with any heat map based methods. Its effectiveness is convincingly validated via comprehensive ablation experiments under various settings, specifically on 3D pose estimation, for the first time. + + + +
    + +
    diff --git a/docs/src/papers/algorithms/litehrnet.md b/docs/src/papers/algorithms/litehrnet.md index f446062caf..06c0077640 100644 --- a/docs/src/papers/algorithms/litehrnet.md +++ b/docs/src/papers/algorithms/litehrnet.md @@ -1,30 +1,30 @@ -# Lite-HRNet: A Lightweight High-Resolution Network - - - -
    -LiteHRNet (CVPR'2021) - -```bibtex -@inproceedings{Yulitehrnet21, - title={Lite-HRNet: A Lightweight High-Resolution Network}, - author={Yu, Changqian and Xiao, Bin and Gao, Changxin and Yuan, Lu and Zhang, Lei and Sang, Nong and Wang, Jingdong}, - booktitle={CVPR}, - year={2021} -} -``` - -
    - -## Abstract - - - -We present an efficient high-resolution network, Lite-HRNet, for human pose estimation. We start by simply applying the efficient shuffle block in ShuffleNet to HRNet (high-resolution network), yielding stronger performance over popular lightweight networks, such as MobileNet, ShuffleNet, and Small HRNet. -We find that the heavily-used pointwise (1x1) convolutions in shuffle blocks become the computational bottleneck. We introduce a lightweight unit, conditional channel weighting, to replace costly pointwise (1x1) convolutions in shuffle blocks. The complexity of channel weighting is linear w.r.t the number of channels and lower than the quadratic time complexity for pointwise convolutions. Our solution learns the weights from all the channels and over multiple resolutions that are readily available in the parallel branches in HRNet. It uses the weights as the bridge to exchange information across channels and resolutions, compensating the role played by the pointwise (1x1) convolution. Lite-HRNet demonstrates superior results on human pose estimation over popular lightweight networks. Moreover, Lite-HRNet can be easily applied to semantic segmentation task in the same lightweight manner. - - - -
    - -
    +# Lite-HRNet: A Lightweight High-Resolution Network + + + +
    +LiteHRNet (CVPR'2021) + +```bibtex +@inproceedings{Yulitehrnet21, + title={Lite-HRNet: A Lightweight High-Resolution Network}, + author={Yu, Changqian and Xiao, Bin and Gao, Changxin and Yuan, Lu and Zhang, Lei and Sang, Nong and Wang, Jingdong}, + booktitle={CVPR}, + year={2021} +} +``` + +
    + +## Abstract + + + +We present an efficient high-resolution network, Lite-HRNet, for human pose estimation. We start by simply applying the efficient shuffle block in ShuffleNet to HRNet (high-resolution network), yielding stronger performance over popular lightweight networks, such as MobileNet, ShuffleNet, and Small HRNet. +We find that the heavily-used pointwise (1x1) convolutions in shuffle blocks become the computational bottleneck. We introduce a lightweight unit, conditional channel weighting, to replace costly pointwise (1x1) convolutions in shuffle blocks. The complexity of channel weighting is linear w.r.t the number of channels and lower than the quadratic time complexity for pointwise convolutions. Our solution learns the weights from all the channels and over multiple resolutions that are readily available in the parallel branches in HRNet. It uses the weights as the bridge to exchange information across channels and resolutions, compensating the role played by the pointwise (1x1) convolution. Lite-HRNet demonstrates superior results on human pose estimation over popular lightweight networks. Moreover, Lite-HRNet can be easily applied to semantic segmentation task in the same lightweight manner. + + + +
    + +
    diff --git a/docs/src/papers/algorithms/mspn.md b/docs/src/papers/algorithms/mspn.md index 1915cd3915..5824221603 100644 --- a/docs/src/papers/algorithms/mspn.md +++ b/docs/src/papers/algorithms/mspn.md @@ -1,29 +1,29 @@ -# Rethinking on multi-stage networks for human pose estimation - - - -
    -MSPN (ArXiv'2019) - -```bibtex -@article{li2019rethinking, - title={Rethinking on Multi-Stage Networks for Human Pose Estimation}, - author={Li, Wenbo and Wang, Zhicheng and Yin, Binyi and Peng, Qixiang and Du, Yuming and Xiao, Tianzi and Yu, Gang and Lu, Hongtao and Wei, Yichen and Sun, Jian}, - journal={arXiv preprint arXiv:1901.00148}, - year={2019} -} -``` - -
    - -## Abstract - - - -Existing pose estimation approaches fall into two categories: single-stage and multi-stage methods. While multi-stage methods are seemingly more suited for the task, their performance in current practice is not as good as single-stage methods. This work studies this issue. We argue that the current multi-stage methods' unsatisfactory performance comes from the insufficiency in various design choices. We propose several improvements, including the single-stage module design, cross stage feature aggregation, and coarse-to-fine supervision. The resulting method establishes the new state-of-the-art on both MS COCO and MPII Human Pose dataset, justifying the effectiveness of a multi-stage architecture. The source code is publicly available for further research. - - - -
    - -
    +# Rethinking on multi-stage networks for human pose estimation + + + +
    +MSPN (ArXiv'2019) + +```bibtex +@article{li2019rethinking, + title={Rethinking on Multi-Stage Networks for Human Pose Estimation}, + author={Li, Wenbo and Wang, Zhicheng and Yin, Binyi and Peng, Qixiang and Du, Yuming and Xiao, Tianzi and Yu, Gang and Lu, Hongtao and Wei, Yichen and Sun, Jian}, + journal={arXiv preprint arXiv:1901.00148}, + year={2019} +} +``` + +
    + +## Abstract + + + +Existing pose estimation approaches fall into two categories: single-stage and multi-stage methods. While multi-stage methods are seemingly more suited for the task, their performance in current practice is not as good as single-stage methods. This work studies this issue. We argue that the current multi-stage methods' unsatisfactory performance comes from the insufficiency in various design choices. We propose several improvements, including the single-stage module design, cross stage feature aggregation, and coarse-to-fine supervision. The resulting method establishes the new state-of-the-art on both MS COCO and MPII Human Pose dataset, justifying the effectiveness of a multi-stage architecture. The source code is publicly available for further research. + + + +
    + +
    diff --git a/docs/src/papers/algorithms/posewarper.md b/docs/src/papers/algorithms/posewarper.md index 285a36c582..973b298049 100644 --- a/docs/src/papers/algorithms/posewarper.md +++ b/docs/src/papers/algorithms/posewarper.md @@ -1,29 +1,29 @@ -# Learning Temporal Pose Estimation from Sparsely-Labeled Videos - - - -
    -PoseWarper (NeurIPS'2019) - -```bibtex -@inproceedings{NIPS2019_gberta, -title = {Learning Temporal Pose Estimation from Sparsely Labeled Videos}, -author = {Bertasius, Gedas and Feichtenhofer, Christoph, and Tran, Du and Shi, Jianbo, and Torresani, Lorenzo}, -booktitle = {Advances in Neural Information Processing Systems 33}, -year = {2019}, -} -``` - -
    - -## Abstract - - - -Modern approaches for multi-person pose estimation in video require large amounts of dense annotations. However, labeling every frame in a video is costly and labor intensive. To reduce the need for dense annotations, we propose a PoseWarper network that leverages training videos with sparse annotations (every k frames) to learn to perform dense temporal pose propagation and estimation. Given a pair of video frames---a labeled Frame A and an unlabeled Frame B---we train our model to predict human pose in Frame A using the features from Frame B by means of deformable convolutions to implicitly learn the pose warping between A and B. We demonstrate that we can leverage our trained PoseWarper for several applications. First, at inference time we can reverse the application direction of our network in order to propagate pose information from manually annotated frames to unlabeled frames. This makes it possible to generate pose annotations for the entire video given only a few manually-labeled frames. Compared to modern label propagation methods based on optical flow, our warping mechanism is much more compact (6M vs 39M parameters), and also more accurate (88.7% mAP vs 83.8% mAP). We also show that we can improve the accuracy of a pose estimator by training it on an augmented dataset obtained by adding our propagated poses to the original manual labels. Lastly, we can use our PoseWarper to aggregate temporal pose information from neighboring frames during inference. This allows our system to achieve state-of-the-art pose detection results on the PoseTrack2017 and PoseTrack2018 datasets. - - - -
    - -
    +# Learning Temporal Pose Estimation from Sparsely-Labeled Videos + + + +
    +PoseWarper (NeurIPS'2019) + +```bibtex +@inproceedings{NIPS2019_gberta, +title = {Learning Temporal Pose Estimation from Sparsely Labeled Videos}, +author = {Bertasius, Gedas and Feichtenhofer, Christoph, and Tran, Du and Shi, Jianbo, and Torresani, Lorenzo}, +booktitle = {Advances in Neural Information Processing Systems 33}, +year = {2019}, +} +``` + +
    + +## Abstract + + + +Modern approaches for multi-person pose estimation in video require large amounts of dense annotations. However, labeling every frame in a video is costly and labor intensive. To reduce the need for dense annotations, we propose a PoseWarper network that leverages training videos with sparse annotations (every k frames) to learn to perform dense temporal pose propagation and estimation. Given a pair of video frames---a labeled Frame A and an unlabeled Frame B---we train our model to predict human pose in Frame A using the features from Frame B by means of deformable convolutions to implicitly learn the pose warping between A and B. We demonstrate that we can leverage our trained PoseWarper for several applications. First, at inference time we can reverse the application direction of our network in order to propagate pose information from manually annotated frames to unlabeled frames. This makes it possible to generate pose annotations for the entire video given only a few manually-labeled frames. Compared to modern label propagation methods based on optical flow, our warping mechanism is much more compact (6M vs 39M parameters), and also more accurate (88.7% mAP vs 83.8% mAP). We also show that we can improve the accuracy of a pose estimator by training it on an augmented dataset obtained by adding our propagated poses to the original manual labels. Lastly, we can use our PoseWarper to aggregate temporal pose information from neighboring frames during inference. This allows our system to achieve state-of-the-art pose detection results on the PoseTrack2017 and PoseTrack2018 datasets. + + + +
    + +
    diff --git a/docs/src/papers/algorithms/rle.md b/docs/src/papers/algorithms/rle.md index cdc59d57ec..7734ca3d44 100644 --- a/docs/src/papers/algorithms/rle.md +++ b/docs/src/papers/algorithms/rle.md @@ -1,30 +1,30 @@ -# Human pose regression with residual log-likelihood estimation - - - -
    -RLE (ICCV'2021) - -```bibtex -@inproceedings{li2021human, - title={Human pose regression with residual log-likelihood estimation}, - author={Li, Jiefeng and Bian, Siyuan and Zeng, Ailing and Wang, Can and Pang, Bo and Liu, Wentao and Lu, Cewu}, - booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision}, - pages={11025--11034}, - year={2021} -} -``` - -
    - -## Abstract - - - -Heatmap-based methods dominate in the field of human pose estimation by modelling the output distribution through likelihood heatmaps. In contrast, regressionbased methods are more efficient but suffer from inferior performance. In this work, we explore maximum likelihood estimation (MLE) to develop an efficient and effective regression-based methods. From the perspective of MLE, adopting different regression losses is making different assumptions about the output density function. A density function closer to the true distribution leads to a better regression performance. In light of this, we propose a novel regression paradigm with Residual Log-likelihood Estimation (RLE) to capture the underlying output distribution. Concretely, RLE learns the change of the distribution instead of the unreferenced underlying distribution to facilitate the training process. With the proposed reparameterization design, our method is compatible with offthe-shelf flow models. The proposed method is effective, efficient and flexible. We show its potential in various human pose estimation tasks with comprehensive experiments. Compared to the conventional regression paradigm, regression with RLE bring 12.4 mAP improvement on MSCOCO without any test-time overhead. Moreover, for the first time, especially on multi-person pose estimation, our regression method is superior to the heatmap-based methods. - - - -
    - -
    +# Human pose regression with residual log-likelihood estimation + + + +
    +RLE (ICCV'2021) + +```bibtex +@inproceedings{li2021human, + title={Human pose regression with residual log-likelihood estimation}, + author={Li, Jiefeng and Bian, Siyuan and Zeng, Ailing and Wang, Can and Pang, Bo and Liu, Wentao and Lu, Cewu}, + booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision}, + pages={11025--11034}, + year={2021} +} +``` + +
    + +## Abstract + + + +Heatmap-based methods dominate in the field of human pose estimation by modelling the output distribution through likelihood heatmaps. In contrast, regressionbased methods are more efficient but suffer from inferior performance. In this work, we explore maximum likelihood estimation (MLE) to develop an efficient and effective regression-based methods. From the perspective of MLE, adopting different regression losses is making different assumptions about the output density function. A density function closer to the true distribution leads to a better regression performance. In light of this, we propose a novel regression paradigm with Residual Log-likelihood Estimation (RLE) to capture the underlying output distribution. Concretely, RLE learns the change of the distribution instead of the unreferenced underlying distribution to facilitate the training process. With the proposed reparameterization design, our method is compatible with offthe-shelf flow models. The proposed method is effective, efficient and flexible. We show its potential in various human pose estimation tasks with comprehensive experiments. Compared to the conventional regression paradigm, regression with RLE bring 12.4 mAP improvement on MSCOCO without any test-time overhead. Moreover, for the first time, especially on multi-person pose estimation, our regression method is superior to the heatmap-based methods. + + + +
    + +
    diff --git a/docs/src/papers/algorithms/rsn.md b/docs/src/papers/algorithms/rsn.md index b1fb1ea913..d8af907926 100644 --- a/docs/src/papers/algorithms/rsn.md +++ b/docs/src/papers/algorithms/rsn.md @@ -1,31 +1,31 @@ -# Learning delicate local representations for multi-person pose estimation - - - -
    -RSN (ECCV'2020) - -```bibtex -@misc{cai2020learning, - title={Learning Delicate Local Representations for Multi-Person Pose Estimation}, - author={Yuanhao Cai and Zhicheng Wang and Zhengxiong Luo and Binyi Yin and Angang Du and Haoqian Wang and Xinyu Zhou and Erjin Zhou and Xiangyu Zhang and Jian Sun}, - year={2020}, - eprint={2003.04030}, - archivePrefix={arXiv}, - primaryClass={cs.CV} -} -``` - -
    - -## Abstract - - - -In this paper, we propose a novel method called Residual Steps Network (RSN). RSN aggregates features with the same spatial size (Intra-level features) efficiently to obtain delicate local representations, which retain rich low-level spatial information and result in precise keypoint localization. Additionally, we observe the output features contribute differently to final performance. To tackle this problem, we propose an efficient attention mechanism - Pose Refine Machine (PRM) to make a trade-off between local and global representations in output features and further refine the keypoint locations. Our approach won the 1st place of COCO Keypoint Challenge 2019 and achieves state-of-the-art results on both COCO and MPII benchmarks, without using extra training data and pretrained model. Our single model achieves 78.6 on COCO test-dev, 93.0 on MPII test dataset. Ensembled models achieve 79.2 on COCO test-dev, 77.1 on COCO test-challenge dataset. - - - -
    - -
    +# Learning delicate local representations for multi-person pose estimation + + + +
    +RSN (ECCV'2020) + +```bibtex +@misc{cai2020learning, + title={Learning Delicate Local Representations for Multi-Person Pose Estimation}, + author={Yuanhao Cai and Zhicheng Wang and Zhengxiong Luo and Binyi Yin and Angang Du and Haoqian Wang and Xinyu Zhou and Erjin Zhou and Xiangyu Zhang and Jian Sun}, + year={2020}, + eprint={2003.04030}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +
    + +## Abstract + + + +In this paper, we propose a novel method called Residual Steps Network (RSN). RSN aggregates features with the same spatial size (Intra-level features) efficiently to obtain delicate local representations, which retain rich low-level spatial information and result in precise keypoint localization. Additionally, we observe the output features contribute differently to final performance. To tackle this problem, we propose an efficient attention mechanism - Pose Refine Machine (PRM) to make a trade-off between local and global representations in output features and further refine the keypoint locations. Our approach won the 1st place of COCO Keypoint Challenge 2019 and achieves state-of-the-art results on both COCO and MPII benchmarks, without using extra training data and pretrained model. Our single model achieves 78.6 on COCO test-dev, 93.0 on MPII test dataset. Ensembled models achieve 79.2 on COCO test-dev, 77.1 on COCO test-challenge dataset. + + + +
    + +
    diff --git a/docs/src/papers/algorithms/rtmpose.md b/docs/src/papers/algorithms/rtmpose.md index 04a3fb0a22..ff8ddd6411 100644 --- a/docs/src/papers/algorithms/rtmpose.md +++ b/docs/src/papers/algorithms/rtmpose.md @@ -1,34 +1,34 @@ -# RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose - - - -
    -RTMPose (arXiv'2023) - -```bibtex -@misc{https://doi.org/10.48550/arxiv.2303.07399, - doi = {10.48550/ARXIV.2303.07399}, - url = {https://arxiv.org/abs/2303.07399}, - author = {Jiang, Tao and Lu, Peng and Zhang, Li and Ma, Ningsheng and Han, Rui and Lyu, Chengqi and Li, Yining and Chen, Kai}, - keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences}, - title = {RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose}, - publisher = {arXiv}, - year = {2023}, - copyright = {Creative Commons Attribution 4.0 International} -} - -``` - -
    - -## Abstract - - - -Recent studies on 2D pose estimation have achieved excellent performance on public benchmarks, yet its application in the industrial community still suffers from heavy model parameters and high latency. In order to bridge this gap, we empirically explore key factors in pose estimation including paradigm, model architecture, training strategy, and deployment, and present a high-performance real-time multi-person pose estimation framework, RTMPose, based on MMPose. Our RTMPose-m achieves 75.8% AP on COCO with 90+ FPS on an Intel i7-11700 CPU and 430+ FPS on an NVIDIA GTX 1660 Ti GPU, and RTMPose-l achieves 67.0% AP on COCO-WholeBody with 130+ FPS. To further evaluate RTMPose’s capability in critical real-time applications, we also report the performance after deploying on the mobile device. Our RTMPoses achieves 72.2% AP on COCO with 70+ FPS on a Snapdragon 865 chip, outperforming existing open-source libraries. Code and models are released at https://github.com/open-mmlab/mmpose/tree/main/projects/rtmpose. - - - -
    - -
    +# RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose + + + +
    +RTMPose (arXiv'2023) + +```bibtex +@misc{https://doi.org/10.48550/arxiv.2303.07399, + doi = {10.48550/ARXIV.2303.07399}, + url = {https://arxiv.org/abs/2303.07399}, + author = {Jiang, Tao and Lu, Peng and Zhang, Li and Ma, Ningsheng and Han, Rui and Lyu, Chengqi and Li, Yining and Chen, Kai}, + keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences}, + title = {RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose}, + publisher = {arXiv}, + year = {2023}, + copyright = {Creative Commons Attribution 4.0 International} +} + +``` + +
    + +## Abstract + + + +Recent studies on 2D pose estimation have achieved excellent performance on public benchmarks, yet its application in the industrial community still suffers from heavy model parameters and high latency. In order to bridge this gap, we empirically explore key factors in pose estimation including paradigm, model architecture, training strategy, and deployment, and present a high-performance real-time multi-person pose estimation framework, RTMPose, based on MMPose. Our RTMPose-m achieves 75.8% AP on COCO with 90+ FPS on an Intel i7-11700 CPU and 430+ FPS on an NVIDIA GTX 1660 Ti GPU, and RTMPose-l achieves 67.0% AP on COCO-WholeBody with 130+ FPS. To further evaluate RTMPose’s capability in critical real-time applications, we also report the performance after deploying on the mobile device. Our RTMPoses achieves 72.2% AP on COCO with 70+ FPS on a Snapdragon 865 chip, outperforming existing open-source libraries. Code and models are released at https://github.com/open-mmlab/mmpose/tree/main/projects/rtmpose. + + + +
    + +
    diff --git a/docs/src/papers/algorithms/scnet.md b/docs/src/papers/algorithms/scnet.md index 043c144111..24f13e82b8 100644 --- a/docs/src/papers/algorithms/scnet.md +++ b/docs/src/papers/algorithms/scnet.md @@ -1,30 +1,30 @@ -# Improving Convolutional Networks with Self-Calibrated Convolutions - - - -
    -SCNet (CVPR'2020) - -```bibtex -@inproceedings{liu2020improving, - title={Improving Convolutional Networks with Self-Calibrated Convolutions}, - author={Liu, Jiang-Jiang and Hou, Qibin and Cheng, Ming-Ming and Wang, Changhu and Feng, Jiashi}, - booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, - pages={10096--10105}, - year={2020} -} -``` - -
    - -## Abstract - - - -Recent advances on CNNs are mostly devoted to designing more complex architectures to enhance their representation learning capacity. In this paper, we consider how to improve the basic convolutional feature transformation process of CNNs without tuning the model architectures. To this end, we present a novel self-calibrated convolutions that explicitly expand fields-of-view of each convolutional layers through internal communications and hence enrich the output features. In particular, unlike the standard convolutions that fuse spatial and channel-wise information using small kernels (e.g., 3x3), self-calibrated convolutions adaptively build long-range spatial and inter-channel dependencies around each spatial location through a novel self-calibration operation. Thus, it can help CNNs generate more discriminative representations by explicitly incorporating richer information. Our self-calibrated convolution design is simple and generic, and can be easily applied to augment standard convolutional layers without introducing extra parameters and complexity. Extensive experiments demonstrate that when applying self-calibrated convolutions into different backbones, our networks can significantly improve the baseline models in a variety of vision tasks, including image recognition, object detection, instance segmentation, and keypoint detection, with no need to change the network architectures. We hope this work could provide a promising way for future research in designing novel convolutional feature transformations for improving convolutional networks. Code is available on the project page. - - - -
    - -
    +# Improving Convolutional Networks with Self-Calibrated Convolutions + + + +
    +SCNet (CVPR'2020) + +```bibtex +@inproceedings{liu2020improving, + title={Improving Convolutional Networks with Self-Calibrated Convolutions}, + author={Liu, Jiang-Jiang and Hou, Qibin and Cheng, Ming-Ming and Wang, Changhu and Feng, Jiashi}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={10096--10105}, + year={2020} +} +``` + +
    + +## Abstract + + + +Recent advances on CNNs are mostly devoted to designing more complex architectures to enhance their representation learning capacity. In this paper, we consider how to improve the basic convolutional feature transformation process of CNNs without tuning the model architectures. To this end, we present a novel self-calibrated convolutions that explicitly expand fields-of-view of each convolutional layers through internal communications and hence enrich the output features. In particular, unlike the standard convolutions that fuse spatial and channel-wise information using small kernels (e.g., 3x3), self-calibrated convolutions adaptively build long-range spatial and inter-channel dependencies around each spatial location through a novel self-calibration operation. Thus, it can help CNNs generate more discriminative representations by explicitly incorporating richer information. Our self-calibrated convolution design is simple and generic, and can be easily applied to augment standard convolutional layers without introducing extra parameters and complexity. Extensive experiments demonstrate that when applying self-calibrated convolutions into different backbones, our networks can significantly improve the baseline models in a variety of vision tasks, including image recognition, object detection, instance segmentation, and keypoint detection, with no need to change the network architectures. We hope this work could provide a promising way for future research in designing novel convolutional feature transformations for improving convolutional networks. Code is available on the project page. + + + +
    + +
    diff --git a/docs/src/papers/algorithms/simcc.md b/docs/src/papers/algorithms/simcc.md index 9a00d229f3..0d9fb89515 100644 --- a/docs/src/papers/algorithms/simcc.md +++ b/docs/src/papers/algorithms/simcc.md @@ -1,28 +1,28 @@ -# SimCC: a Simple Coordinate Classification Perspective for Human Pose Estimation - - - -
    -SimCC (ECCV'2022) - -```bibtex -@misc{https://doi.org/10.48550/arxiv.2107.03332, - title={SimCC: a Simple Coordinate Classification Perspective for Human Pose Estimation}, - author={Li, Yanjie and Yang, Sen and Liu, Peidong and Zhang, Shoukui and Wang, Yunxiao and Wang, Zhicheng and Yang, Wankou and Xia, Shu-Tao}, - year={2021} -} -``` - -
    - -## Abstract - - - -The 2D heatmap-based approaches have dominated Human Pose Estimation (HPE) for years due to high performance. However, the long-standing quantization error problem in the 2D heatmap-based methods leads to several well-known drawbacks: 1) The performance for the low-resolution inputs is limited; 2) To improve the feature map resolution for higher localization precision, multiple costly upsampling layers are required; 3) Extra post-processing is adopted to reduce the quantization error. To address these issues, we aim to explore a brand new scheme, called \\textit{SimCC}, which reformulates HPE as two classification tasks for horizontal and vertical coordinates. The proposed SimCC uniformly divides each pixel into several bins, thus achieving \\emph{sub-pixel} localization precision and low quantization error. Benefiting from that, SimCC can omit additional refinement post-processing and exclude upsampling layers under certain settings, resulting in a more simple and effective pipeline for HPE. Extensive experiments conducted over COCO, CrowdPose, and MPII datasets show that SimCC outperforms heatmap-based counterparts, especially in low-resolution settings by a large margin. - - - -
    - -
    +# SimCC: a Simple Coordinate Classification Perspective for Human Pose Estimation + + + +
    +SimCC (ECCV'2022) + +```bibtex +@misc{https://doi.org/10.48550/arxiv.2107.03332, + title={SimCC: a Simple Coordinate Classification Perspective for Human Pose Estimation}, + author={Li, Yanjie and Yang, Sen and Liu, Peidong and Zhang, Shoukui and Wang, Yunxiao and Wang, Zhicheng and Yang, Wankou and Xia, Shu-Tao}, + year={2021} +} +``` + +
    + +## Abstract + + + +The 2D heatmap-based approaches have dominated Human Pose Estimation (HPE) for years due to high performance. However, the long-standing quantization error problem in the 2D heatmap-based methods leads to several well-known drawbacks: 1) The performance for the low-resolution inputs is limited; 2) To improve the feature map resolution for higher localization precision, multiple costly upsampling layers are required; 3) Extra post-processing is adopted to reduce the quantization error. To address these issues, we aim to explore a brand new scheme, called \\textit{SimCC}, which reformulates HPE as two classification tasks for horizontal and vertical coordinates. The proposed SimCC uniformly divides each pixel into several bins, thus achieving \\emph{sub-pixel} localization precision and low quantization error. Benefiting from that, SimCC can omit additional refinement post-processing and exclude upsampling layers under certain settings, resulting in a more simple and effective pipeline for HPE. Extensive experiments conducted over COCO, CrowdPose, and MPII datasets show that SimCC outperforms heatmap-based counterparts, especially in low-resolution settings by a large margin. + + + +
    + +
    diff --git a/docs/src/papers/algorithms/simplebaseline2d.md b/docs/src/papers/algorithms/simplebaseline2d.md index 3eca224da1..b2d7f69e92 100644 --- a/docs/src/papers/algorithms/simplebaseline2d.md +++ b/docs/src/papers/algorithms/simplebaseline2d.md @@ -1,30 +1,30 @@ -# Simple baselines for human pose estimation and tracking - - - -
    -SimpleBaseline2D (ECCV'2018) - -```bibtex -@inproceedings{xiao2018simple, - title={Simple baselines for human pose estimation and tracking}, - author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, - booktitle={Proceedings of the European conference on computer vision (ECCV)}, - pages={466--481}, - year={2018} -} -``` - -
    - -## Abstract - - - -There has been significant progress on pose estimation and increasing interests on pose tracking in recent years. At the same time, the overall algorithm and system complexity increases as well, making the algorithm analysis and comparison more difficult. This work provides simple and effective baseline methods. They are helpful for inspiring and evaluating new ideas for the field. State-of-the-art results are achieved on challenging benchmarks. - - - -
    - -
    +# Simple baselines for human pose estimation and tracking + + + +
    +SimpleBaseline2D (ECCV'2018) + +```bibtex +@inproceedings{xiao2018simple, + title={Simple baselines for human pose estimation and tracking}, + author={Xiao, Bin and Wu, Haiping and Wei, Yichen}, + booktitle={Proceedings of the European conference on computer vision (ECCV)}, + pages={466--481}, + year={2018} +} +``` + +
    + +## Abstract + + + +There has been significant progress on pose estimation and increasing interests on pose tracking in recent years. At the same time, the overall algorithm and system complexity increases as well, making the algorithm analysis and comparison more difficult. This work provides simple and effective baseline methods. They are helpful for inspiring and evaluating new ideas for the field. State-of-the-art results are achieved on challenging benchmarks. + + + +
    + +
    diff --git a/docs/src/papers/algorithms/simplebaseline3d.md b/docs/src/papers/algorithms/simplebaseline3d.md index ee3c58368a..c11497ca5e 100644 --- a/docs/src/papers/algorithms/simplebaseline3d.md +++ b/docs/src/papers/algorithms/simplebaseline3d.md @@ -1,29 +1,29 @@ -# A simple yet effective baseline for 3d human pose estimation - - - -
    -SimpleBaseline3D (ICCV'2017) - -```bibtex -@inproceedings{martinez_2017_3dbaseline, - title={A simple yet effective baseline for 3d human pose estimation}, - author={Martinez, Julieta and Hossain, Rayat and Romero, Javier and Little, James J.}, - booktitle={ICCV}, - year={2017} -} -``` - -
    - -## Abstract - - - -Following the success of deep convolutional networks, state-of-the-art methods for 3d human pose estimation have focused on deep end-to-end systems that predict 3d joint locations given raw image pixels. Despite their excellent performance, it is often not easy to understand whether their remaining error stems from a limited 2d pose (visual) understanding, or from a failure to map 2d poses into 3-dimensional positions. With the goal of understanding these sources of error, we set out to build a system that given 2d joint locations predicts 3d positions. Much to our surprise, we have found that, with current technology, "lifting" ground truth 2d joint locations to 3d space is a task that can be solved with a remarkably low error rate: a relatively simple deep feed-forward network outperforms the best reported result by about 30% on Human3.6M, the largest publicly available 3d pose estimation benchmark. Furthermore, training our system on the output of an off-the-shelf state-of-the-art 2d detector (i.e., using images as input) yields state of the art results -- this includes an array of systems that have been trained end-to-end specifically for this task. Our results indicate that a large portion of the error of modern deep 3d pose estimation systems stems from their visual analysis, and suggests directions to further advance the state of the art in 3d human pose estimation. - - - -
    - -
    +# A simple yet effective baseline for 3d human pose estimation + + + +
    +SimpleBaseline3D (ICCV'2017) + +```bibtex +@inproceedings{martinez_2017_3dbaseline, + title={A simple yet effective baseline for 3d human pose estimation}, + author={Martinez, Julieta and Hossain, Rayat and Romero, Javier and Little, James J.}, + booktitle={ICCV}, + year={2017} +} +``` + +
    + +## Abstract + + + +Following the success of deep convolutional networks, state-of-the-art methods for 3d human pose estimation have focused on deep end-to-end systems that predict 3d joint locations given raw image pixels. Despite their excellent performance, it is often not easy to understand whether their remaining error stems from a limited 2d pose (visual) understanding, or from a failure to map 2d poses into 3-dimensional positions. With the goal of understanding these sources of error, we set out to build a system that given 2d joint locations predicts 3d positions. Much to our surprise, we have found that, with current technology, "lifting" ground truth 2d joint locations to 3d space is a task that can be solved with a remarkably low error rate: a relatively simple deep feed-forward network outperforms the best reported result by about 30% on Human3.6M, the largest publicly available 3d pose estimation benchmark. Furthermore, training our system on the output of an off-the-shelf state-of-the-art 2d detector (i.e., using images as input) yields state of the art results -- this includes an array of systems that have been trained end-to-end specifically for this task. Our results indicate that a large portion of the error of modern deep 3d pose estimation systems stems from their visual analysis, and suggests directions to further advance the state of the art in 3d human pose estimation. + + + +
    + +
    diff --git a/docs/src/papers/algorithms/softwingloss.md b/docs/src/papers/algorithms/softwingloss.md index 524a6089ff..d638109270 100644 --- a/docs/src/papers/algorithms/softwingloss.md +++ b/docs/src/papers/algorithms/softwingloss.md @@ -1,30 +1,30 @@ -# Structure-Coherent Deep Feature Learning for Robust Face Alignment - - - -
    -SoftWingloss (TIP'2021) - -```bibtex -@article{lin2021structure, - title={Structure-Coherent Deep Feature Learning for Robust Face Alignment}, - author={Lin, Chunze and Zhu, Beier and Wang, Quan and Liao, Renjie and Qian, Chen and Lu, Jiwen and Zhou, Jie}, - journal={IEEE Transactions on Image Processing}, - year={2021}, - publisher={IEEE} -} -``` - -
    - -## Abstract - - - -In this paper, we propose a structure-coherent deep feature learning method for face alignment. Unlike most existing face alignment methods which overlook the facial structure cues, we explicitly exploit the relation among facial landmarks to make the detector robust to hard cases such as occlusion and large pose. Specifically, we leverage a landmark-graph relational network to enforce the structural relationships among landmarks. We consider the facial landmarks as structural graph nodes and carefully design the neighborhood to passing features among the most related nodes. Our method dynamically adapts the weights of node neighborhood to eliminate distracted information from noisy nodes, such as occluded landmark point. Moreover, different from most previous works which only tend to penalize the landmarks absolute position during the training, we propose a relative location loss to enhance the information of relative location of landmarks. This relative location supervision further regularizes the facial structure. Our approach considers the interactions among facial landmarks and can be easily implemented on top of any convolutional backbone to boost the performance. Extensive experiments on three popular benchmarks, including WFLW, COFW and 300W, demonstrate the effectiveness of the proposed method. In particular, due to explicit structure modeling, our approach is especially robust to challenging cases resulting in impressive low failure rate on COFW and WFLW datasets. - - - -
    - -
    +# Structure-Coherent Deep Feature Learning for Robust Face Alignment + + + +
    +SoftWingloss (TIP'2021) + +```bibtex +@article{lin2021structure, + title={Structure-Coherent Deep Feature Learning for Robust Face Alignment}, + author={Lin, Chunze and Zhu, Beier and Wang, Quan and Liao, Renjie and Qian, Chen and Lu, Jiwen and Zhou, Jie}, + journal={IEEE Transactions on Image Processing}, + year={2021}, + publisher={IEEE} +} +``` + +
    + +## Abstract + + + +In this paper, we propose a structure-coherent deep feature learning method for face alignment. Unlike most existing face alignment methods which overlook the facial structure cues, we explicitly exploit the relation among facial landmarks to make the detector robust to hard cases such as occlusion and large pose. Specifically, we leverage a landmark-graph relational network to enforce the structural relationships among landmarks. We consider the facial landmarks as structural graph nodes and carefully design the neighborhood to passing features among the most related nodes. Our method dynamically adapts the weights of node neighborhood to eliminate distracted information from noisy nodes, such as occluded landmark point. Moreover, different from most previous works which only tend to penalize the landmarks absolute position during the training, we propose a relative location loss to enhance the information of relative location of landmarks. This relative location supervision further regularizes the facial structure. Our approach considers the interactions among facial landmarks and can be easily implemented on top of any convolutional backbone to boost the performance. Extensive experiments on three popular benchmarks, including WFLW, COFW and 300W, demonstrate the effectiveness of the proposed method. In particular, due to explicit structure modeling, our approach is especially robust to challenging cases resulting in impressive low failure rate on COFW and WFLW datasets. + + + +
    + +
    diff --git a/docs/src/papers/algorithms/udp.md b/docs/src/papers/algorithms/udp.md index bb4acebfbc..00604fc5ce 100644 --- a/docs/src/papers/algorithms/udp.md +++ b/docs/src/papers/algorithms/udp.md @@ -1,30 +1,30 @@ -# The Devil is in the Details: Delving into Unbiased Data Processing for Human Pose Estimation - - - -
    -UDP (CVPR'2020) - -```bibtex -@InProceedings{Huang_2020_CVPR, - author = {Huang, Junjie and Zhu, Zheng and Guo, Feng and Huang, Guan}, - title = {The Devil Is in the Details: Delving Into Unbiased Data Processing for Human Pose Estimation}, - booktitle = {The IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, - month = {June}, - year = {2020} -} -``` - -
    - -## Abstract - - - -Recently, the leading performance of human pose estimation is dominated by top-down methods. Being a fundamental component in training and inference, data processing has not been systematically considered in pose estimation community, to the best of our knowledge. In this paper, we focus on this problem and find that the devil of top-down pose estimator is in the biased data processing. Specifically, by investigating the standard data processing in state-of-the-art approaches mainly including data transformation and encoding-decoding, we find that the results obtained by common flipping strategy are unaligned with the original ones in inference. Moreover, there is statistical error in standard encoding-decoding during both training and inference. Two problems couple together and significantly degrade the pose estimation performance. Based on quantitative analyses, we then formulate a principled way to tackle this dilemma. Data is processed in continuous space based on unit length (the intervals between pixels) instead of in discrete space with pixel, and a combined classification and regression approach is adopted to perform encoding-decoding. The Unbiased Data Processing (UDP) for human pose estimation can be achieved by combining the two together. UDP not only boosts the performance of existing methods by a large margin but also plays a important role in result reproducing and future exploration. As a model-agnostic approach, UDP promotes SimpleBaseline-ResNet50-256x192 by 1.5 AP (70.2 to 71.7) and HRNet-W32-256x192 by 1.7 AP (73.5 to 75.2) on COCO test-dev set. The HRNet-W48-384x288 equipped with UDP achieves 76.5 AP and sets a new state-of-the-art for human pose estimation. The source code is publicly available for further research. - - - -
    - -
    +# The Devil is in the Details: Delving into Unbiased Data Processing for Human Pose Estimation + + + +
    +UDP (CVPR'2020) + +```bibtex +@InProceedings{Huang_2020_CVPR, + author = {Huang, Junjie and Zhu, Zheng and Guo, Feng and Huang, Guan}, + title = {The Devil Is in the Details: Delving Into Unbiased Data Processing for Human Pose Estimation}, + booktitle = {The IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, + month = {June}, + year = {2020} +} +``` + +
    + +## Abstract + + + +Recently, the leading performance of human pose estimation is dominated by top-down methods. Being a fundamental component in training and inference, data processing has not been systematically considered in pose estimation community, to the best of our knowledge. In this paper, we focus on this problem and find that the devil of top-down pose estimator is in the biased data processing. Specifically, by investigating the standard data processing in state-of-the-art approaches mainly including data transformation and encoding-decoding, we find that the results obtained by common flipping strategy are unaligned with the original ones in inference. Moreover, there is statistical error in standard encoding-decoding during both training and inference. Two problems couple together and significantly degrade the pose estimation performance. Based on quantitative analyses, we then formulate a principled way to tackle this dilemma. Data is processed in continuous space based on unit length (the intervals between pixels) instead of in discrete space with pixel, and a combined classification and regression approach is adopted to perform encoding-decoding. The Unbiased Data Processing (UDP) for human pose estimation can be achieved by combining the two together. UDP not only boosts the performance of existing methods by a large margin but also plays a important role in result reproducing and future exploration. As a model-agnostic approach, UDP promotes SimpleBaseline-ResNet50-256x192 by 1.5 AP (70.2 to 71.7) and HRNet-W32-256x192 by 1.7 AP (73.5 to 75.2) on COCO test-dev set. The HRNet-W48-384x288 equipped with UDP achieves 76.5 AP and sets a new state-of-the-art for human pose estimation. The source code is publicly available for further research. + + + +
    + +
    diff --git a/docs/src/papers/algorithms/videopose3d.md b/docs/src/papers/algorithms/videopose3d.md index f8647e0ee8..fc427346ee 100644 --- a/docs/src/papers/algorithms/videopose3d.md +++ b/docs/src/papers/algorithms/videopose3d.md @@ -1,30 +1,30 @@ -# 3D human pose estimation in video with temporal convolutions and semi-supervised training - - - -
    -VideoPose3D (CVPR'2019) - -```bibtex -@inproceedings{pavllo20193d, - title={3d human pose estimation in video with temporal convolutions and semi-supervised training}, - author={Pavllo, Dario and Feichtenhofer, Christoph and Grangier, David and Auli, Michael}, - booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, - pages={7753--7762}, - year={2019} -} -``` - -
    - -## Abstract - - - -In this work, we demonstrate that 3D poses in video can be effectively estimated with a fully convolutional model based on dilated temporal convolutions over 2D keypoints. We also introduce back-projection, a simple and effective semi-supervised training method that leverages unlabeled video data. We start with predicted 2D keypoints for unlabeled video, then estimate 3D poses and finally back-project to the input 2D keypoints. In the supervised setting, our fully-convolutional model outperforms the previous best result from the literature by 6 mm mean per-joint position error on Human3.6M, corresponding to an error reduction of 11%, and the model also shows significant improvements on HumanEva-I. Moreover, experiments with back-projection show that it comfortably outperforms previous state-of-the-art results in semi-supervised settings where labeled data is scarce. - - - -
    - -
    +# 3D human pose estimation in video with temporal convolutions and semi-supervised training + + + +
    +VideoPose3D (CVPR'2019) + +```bibtex +@inproceedings{pavllo20193d, + title={3d human pose estimation in video with temporal convolutions and semi-supervised training}, + author={Pavllo, Dario and Feichtenhofer, Christoph and Grangier, David and Auli, Michael}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={7753--7762}, + year={2019} +} +``` + +
    + +## Abstract + + + +In this work, we demonstrate that 3D poses in video can be effectively estimated with a fully convolutional model based on dilated temporal convolutions over 2D keypoints. We also introduce back-projection, a simple and effective semi-supervised training method that leverages unlabeled video data. We start with predicted 2D keypoints for unlabeled video, then estimate 3D poses and finally back-project to the input 2D keypoints. In the supervised setting, our fully-convolutional model outperforms the previous best result from the literature by 6 mm mean per-joint position error on Human3.6M, corresponding to an error reduction of 11%, and the model also shows significant improvements on HumanEva-I. Moreover, experiments with back-projection show that it comfortably outperforms previous state-of-the-art results in semi-supervised settings where labeled data is scarce. + + + +
    + +
    diff --git a/docs/src/papers/algorithms/vipnas.md b/docs/src/papers/algorithms/vipnas.md index 5f52a8cac0..53058bf7bb 100644 --- a/docs/src/papers/algorithms/vipnas.md +++ b/docs/src/papers/algorithms/vipnas.md @@ -1,29 +1,29 @@ -# ViPNAS: Efficient Video Pose Estimation via Neural Architecture Search - - - -
    -ViPNAS (CVPR'2021) - -```bibtex -@article{xu2021vipnas, - title={ViPNAS: Efficient Video Pose Estimation via Neural Architecture Search}, - author={Xu, Lumin and Guan, Yingda and Jin, Sheng and Liu, Wentao and Qian, Chen and Luo, Ping and Ouyang, Wanli and Wang, Xiaogang}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - year={2021} -} -``` - -
    - -## Abstract - - - -Human pose estimation has achieved significant progress in recent years. However, most of the recent methods focus on improving accuracy using complicated models and ignoring real-time efficiency. To achieve a better trade-off between accuracy and efficiency, we propose a novel neural architecture search (NAS) method, termed ViPNAS, to search networks in both spatial and temporal levels for fast online video pose estimation. In the spatial level, we carefully design the search space with five different dimensions including network depth, width, kernel size, group number, and attentions. In the temporal level, we search from a series of temporal feature fusions to optimize the total accuracy and speed across multiple video frames. To the best of our knowledge, we are the first to search for the temporal feature fusion and automatic computation allocation in videos. Extensive experiments demonstrate the effectiveness of our approach on the challenging COCO2017 and PoseTrack2018 datasets. Our discovered model family, S-ViPNAS and T-ViPNAS, achieve significantly higher inference speed (CPU real-time) without sacrificing the accuracy compared to the previous state-of-the-art methods. - - - -
    - -
    +# ViPNAS: Efficient Video Pose Estimation via Neural Architecture Search + + + +
    +ViPNAS (CVPR'2021) + +```bibtex +@article{xu2021vipnas, + title={ViPNAS: Efficient Video Pose Estimation via Neural Architecture Search}, + author={Xu, Lumin and Guan, Yingda and Jin, Sheng and Liu, Wentao and Qian, Chen and Luo, Ping and Ouyang, Wanli and Wang, Xiaogang}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + year={2021} +} +``` + +
    + +## Abstract + + + +Human pose estimation has achieved significant progress in recent years. However, most of the recent methods focus on improving accuracy using complicated models and ignoring real-time efficiency. To achieve a better trade-off between accuracy and efficiency, we propose a novel neural architecture search (NAS) method, termed ViPNAS, to search networks in both spatial and temporal levels for fast online video pose estimation. In the spatial level, we carefully design the search space with five different dimensions including network depth, width, kernel size, group number, and attentions. In the temporal level, we search from a series of temporal feature fusions to optimize the total accuracy and speed across multiple video frames. To the best of our knowledge, we are the first to search for the temporal feature fusion and automatic computation allocation in videos. Extensive experiments demonstrate the effectiveness of our approach on the challenging COCO2017 and PoseTrack2018 datasets. Our discovered model family, S-ViPNAS and T-ViPNAS, achieve significantly higher inference speed (CPU real-time) without sacrificing the accuracy compared to the previous state-of-the-art methods. + + + +
    + +
    diff --git a/docs/src/papers/algorithms/vitpose.md b/docs/src/papers/algorithms/vitpose.md index dd218a5f98..334df15d13 100644 --- a/docs/src/papers/algorithms/vitpose.md +++ b/docs/src/papers/algorithms/vitpose.md @@ -1,30 +1,30 @@ -# ViTPose: Simple Vision Transformer Baselines for Human Pose Estimation - - - -
    -ViTPose (NeurIPS'2022) - -```bibtex -@inproceedings{ - xu2022vitpose, - title={ViTPose: Simple Vision Transformer Baselines for Human Pose Estimation}, - author={Yufei Xu and Jing Zhang and Qiming Zhang and Dacheng Tao}, - booktitle={Advances in Neural Information Processing Systems}, - year={2022}, -} -``` - -
    - -## Abstract - - - -Although no specific domain knowledge is considered in the design, plain vision transformers have shown excellent performance in visual recognition tasks. However, little effort has been made to reveal the potential of such simple structures for pose estimation tasks. In this paper, we show the surprisingly good capabilities of plain vision transformers for pose estimation from various aspects, namely simplicity in model structure, scalability in model size, flexibility in training paradigm, and transferability of knowledge between models, through a simple baseline model called ViTPose. Specifically, ViTPose employs plain and non-hierarchical vision transformers as backbones to extract features for a given person instance and a lightweight decoder for pose estimation. It can be scaled up from 100M to 1B parameters by taking the advantages of the scalable model capacity and high parallelism of transformers, setting a new Pareto front between throughput and performance. Besides, ViTPose is very flexible regarding the attention type, input resolution, pre-training and finetuning strategy, as well as dealing with multiple pose tasks. We also empirically demonstrate that the knowledge of large ViTPose models can be easily transferred to small ones via a simple knowledge token. Experimental results show that our basic ViTPose model outperforms representative methods on the challenging MS COCO Keypoint Detection benchmark, while the largest model sets a new state-of-the-art. - - - -
    - -
    +# ViTPose: Simple Vision Transformer Baselines for Human Pose Estimation + + + +
    +ViTPose (NeurIPS'2022) + +```bibtex +@inproceedings{ + xu2022vitpose, + title={ViTPose: Simple Vision Transformer Baselines for Human Pose Estimation}, + author={Yufei Xu and Jing Zhang and Qiming Zhang and Dacheng Tao}, + booktitle={Advances in Neural Information Processing Systems}, + year={2022}, +} +``` + +
    + +## Abstract + + + +Although no specific domain knowledge is considered in the design, plain vision transformers have shown excellent performance in visual recognition tasks. However, little effort has been made to reveal the potential of such simple structures for pose estimation tasks. In this paper, we show the surprisingly good capabilities of plain vision transformers for pose estimation from various aspects, namely simplicity in model structure, scalability in model size, flexibility in training paradigm, and transferability of knowledge between models, through a simple baseline model called ViTPose. Specifically, ViTPose employs plain and non-hierarchical vision transformers as backbones to extract features for a given person instance and a lightweight decoder for pose estimation. It can be scaled up from 100M to 1B parameters by taking the advantages of the scalable model capacity and high parallelism of transformers, setting a new Pareto front between throughput and performance. Besides, ViTPose is very flexible regarding the attention type, input resolution, pre-training and finetuning strategy, as well as dealing with multiple pose tasks. We also empirically demonstrate that the knowledge of large ViTPose models can be easily transferred to small ones via a simple knowledge token. Experimental results show that our basic ViTPose model outperforms representative methods on the challenging MS COCO Keypoint Detection benchmark, while the largest model sets a new state-of-the-art. + + + +
    + +
    diff --git a/docs/src/papers/algorithms/voxelpose.md b/docs/src/papers/algorithms/voxelpose.md index 384f4ca1e5..421d49f896 100644 --- a/docs/src/papers/algorithms/voxelpose.md +++ b/docs/src/papers/algorithms/voxelpose.md @@ -1,29 +1,29 @@ -# VoxelPose: Towards Multi-Camera 3D Human Pose Estimation in Wild Environment - - - -
    -VoxelPose (ECCV'2020) - -```bibtex -@inproceedings{tumultipose, - title={VoxelPose: Towards Multi-Camera 3D Human Pose Estimation in Wild Environment}, - author={Tu, Hanyue and Wang, Chunyu and Zeng, Wenjun}, - booktitle={ECCV}, - year={2020} -} -``` - -
    - -## Abstract - - - -We present VoxelPose to estimate 3D poses of multiple people from multiple camera views. In contrast to the previous efforts which require to establish cross-view correspondence based on noisy and incomplete 2D pose estimates, VoxelPose directly operates in the 3D space therefore avoids making incorrect decisions in each camera view. To achieve this goal, features in all camera views are aggregated in the 3D voxel space and fed into Cuboid Proposal Network (CPN) to localize all people. Then we propose Pose Regression Network (PRN) to estimate a detailed 3D pose for each proposal. The approach is robust to occlusion which occurs frequently in practice. Without bells and whistles, it outperforms the previous methods on several public datasets. - - - -
    - -
    +# VoxelPose: Towards Multi-Camera 3D Human Pose Estimation in Wild Environment + + + +
    +VoxelPose (ECCV'2020) + +```bibtex +@inproceedings{tumultipose, + title={VoxelPose: Towards Multi-Camera 3D Human Pose Estimation in Wild Environment}, + author={Tu, Hanyue and Wang, Chunyu and Zeng, Wenjun}, + booktitle={ECCV}, + year={2020} +} +``` + +
    + +## Abstract + + + +We present VoxelPose to estimate 3D poses of multiple people from multiple camera views. In contrast to the previous efforts which require to establish cross-view correspondence based on noisy and incomplete 2D pose estimates, VoxelPose directly operates in the 3D space therefore avoids making incorrect decisions in each camera view. To achieve this goal, features in all camera views are aggregated in the 3D voxel space and fed into Cuboid Proposal Network (CPN) to localize all people. Then we propose Pose Regression Network (PRN) to estimate a detailed 3D pose for each proposal. The approach is robust to occlusion which occurs frequently in practice. Without bells and whistles, it outperforms the previous methods on several public datasets. + + + +
    + +
    diff --git a/docs/src/papers/algorithms/wingloss.md b/docs/src/papers/algorithms/wingloss.md index 2aaa05722e..a0f0a35cfb 100644 --- a/docs/src/papers/algorithms/wingloss.md +++ b/docs/src/papers/algorithms/wingloss.md @@ -1,31 +1,31 @@ -# Wing Loss for Robust Facial Landmark Localisation with Convolutional Neural Networks - - - -
    -Wingloss (CVPR'2018) - -```bibtex -@inproceedings{feng2018wing, - title={Wing Loss for Robust Facial Landmark Localisation with Convolutional Neural Networks}, - author={Feng, Zhen-Hua and Kittler, Josef and Awais, Muhammad and Huber, Patrik and Wu, Xiao-Jun}, - booktitle={Computer Vision and Pattern Recognition (CVPR), 2018 IEEE Conference on}, - year={2018}, - pages ={2235-2245}, - organization={IEEE} -} -``` - -
    - -## Abstract - - - -We present a new loss function, namely Wing loss, for robust facial landmark localisation with Convolutional Neural Networks (CNNs). We first compare and analyse different loss functions including L2, L1 and smooth L1. The analysis of these loss functions suggests that, for the training of a CNN-based localisation model, more attention should be paid to small and medium range errors. To this end, we design a piece-wise loss function. The new loss amplifies the impact of errors from the interval (-w, w) by switching from L1 loss to a modified logarithm function. To address the problem of under-representation of samples with large out-of-plane head rotations in the training set, we propose a simple but effective boosting strategy, referred to as pose-based data balancing. In particular, we deal with the data imbalance problem by duplicating the minority training samples and perturbing them by injecting random image rotation, bounding box translation and other data augmentation approaches. Last, the proposed approach is extended to create a two-stage framework for robust facial landmark localisation. The experimental results obtained on AFLW and 300W demonstrate the merits of the Wing loss function, and prove the superiority of the proposed method over the state-of-the-art approaches. - - - -
    - -
    +# Wing Loss for Robust Facial Landmark Localisation with Convolutional Neural Networks + + + +
    +Wingloss (CVPR'2018) + +```bibtex +@inproceedings{feng2018wing, + title={Wing Loss for Robust Facial Landmark Localisation with Convolutional Neural Networks}, + author={Feng, Zhen-Hua and Kittler, Josef and Awais, Muhammad and Huber, Patrik and Wu, Xiao-Jun}, + booktitle={Computer Vision and Pattern Recognition (CVPR), 2018 IEEE Conference on}, + year={2018}, + pages ={2235-2245}, + organization={IEEE} +} +``` + +
    + +## Abstract + + + +We present a new loss function, namely Wing loss, for robust facial landmark localisation with Convolutional Neural Networks (CNNs). We first compare and analyse different loss functions including L2, L1 and smooth L1. The analysis of these loss functions suggests that, for the training of a CNN-based localisation model, more attention should be paid to small and medium range errors. To this end, we design a piece-wise loss function. The new loss amplifies the impact of errors from the interval (-w, w) by switching from L1 loss to a modified logarithm function. To address the problem of under-representation of samples with large out-of-plane head rotations in the training set, we propose a simple but effective boosting strategy, referred to as pose-based data balancing. In particular, we deal with the data imbalance problem by duplicating the minority training samples and perturbing them by injecting random image rotation, bounding box translation and other data augmentation approaches. Last, the proposed approach is extended to create a two-stage framework for robust facial landmark localisation. The experimental results obtained on AFLW and 300W demonstrate the merits of the Wing loss function, and prove the superiority of the proposed method over the state-of-the-art approaches. + + + +
    + +
    diff --git a/docs/src/papers/backbones/alexnet.md b/docs/src/papers/backbones/alexnet.md index d1ea753119..6b6c6a65db 100644 --- a/docs/src/papers/backbones/alexnet.md +++ b/docs/src/papers/backbones/alexnet.md @@ -1,30 +1,30 @@ -# Imagenet classification with deep convolutional neural networks - - - -
    -AlexNet (NeurIPS'2012) - -```bibtex -@inproceedings{krizhevsky2012imagenet, - title={Imagenet classification with deep convolutional neural networks}, - author={Krizhevsky, Alex and Sutskever, Ilya and Hinton, Geoffrey E}, - booktitle={Advances in neural information processing systems}, - pages={1097--1105}, - year={2012} -} -``` - -
    - -## Abstract - - - -We trained a large, deep convolutional neural network to classify the 1.2 million high-resolution images in the ImageNet LSVRC-2010 contest into the 1000 different classes. On the test data, we achieved top-1 and top-5 error rates of 37.5% and 17.0% which is considerably better than the previous state-of-the-art. The neural network, which has 60 million parameters and 650,000 neurons, consists of five convolutional layers, some of which are followed by max-pooling layers, and three fully-connected layers with a final 1000-way softmax. To make training faster, we used non-saturating neurons and a very efficient GPU implementation of the convolution operation. To reduce overfitting in the fully-connected layers we employed a recently-developed regularization method called "dropout" that proved to be very effective. We also entered a variant of this model in the ILSVRC-2012 competition and achieved a winning top-5 test error rate of 15.3%, compared to 26.2% achieved by the second-best entry - - - -
    - -
    +# Imagenet classification with deep convolutional neural networks + + + +
    +AlexNet (NeurIPS'2012) + +```bibtex +@inproceedings{krizhevsky2012imagenet, + title={Imagenet classification with deep convolutional neural networks}, + author={Krizhevsky, Alex and Sutskever, Ilya and Hinton, Geoffrey E}, + booktitle={Advances in neural information processing systems}, + pages={1097--1105}, + year={2012} +} +``` + +
    + +## Abstract + + + +We trained a large, deep convolutional neural network to classify the 1.2 million high-resolution images in the ImageNet LSVRC-2010 contest into the 1000 different classes. On the test data, we achieved top-1 and top-5 error rates of 37.5% and 17.0% which is considerably better than the previous state-of-the-art. The neural network, which has 60 million parameters and 650,000 neurons, consists of five convolutional layers, some of which are followed by max-pooling layers, and three fully-connected layers with a final 1000-way softmax. To make training faster, we used non-saturating neurons and a very efficient GPU implementation of the convolution operation. To reduce overfitting in the fully-connected layers we employed a recently-developed regularization method called "dropout" that proved to be very effective. We also entered a variant of this model in the ILSVRC-2012 competition and achieved a winning top-5 test error rate of 15.3%, compared to 26.2% achieved by the second-best entry + + + +
    + +
    diff --git a/docs/src/papers/backbones/cpm.md b/docs/src/papers/backbones/cpm.md index fb5dbfacec..ea2ac7f73a 100644 --- a/docs/src/papers/backbones/cpm.md +++ b/docs/src/papers/backbones/cpm.md @@ -1,30 +1,30 @@ -# Convolutional pose machines - - - -
    -CPM (CVPR'2016) - -```bibtex -@inproceedings{wei2016convolutional, - title={Convolutional pose machines}, - author={Wei, Shih-En and Ramakrishna, Varun and Kanade, Takeo and Sheikh, Yaser}, - booktitle={Proceedings of the IEEE conference on Computer Vision and Pattern Recognition}, - pages={4724--4732}, - year={2016} -} -``` - -
    - -## Abstract - - - -We introduce associative embedding, a novel method for supervising convolutional neural networks for the task of detection and grouping. A number of computer vision problems can be framed in this manner including multi-person pose estimation, instance segmentation, and multi-object tracking. Usually the grouping of detections is achieved with multi-stage pipelines, instead we propose an approach that teaches a network to simultaneously output detections and group assignments. This technique can be easily integrated into any state-of-the-art network architecture that produces pixel-wise predictions. We show how to apply this method to both multi-person pose estimation and instance segmentation and report state-of-the-art performance for multi-person pose on the MPII and MS-COCO datasets. - - - -
    - -
    +# Convolutional pose machines + + + +
    +CPM (CVPR'2016) + +```bibtex +@inproceedings{wei2016convolutional, + title={Convolutional pose machines}, + author={Wei, Shih-En and Ramakrishna, Varun and Kanade, Takeo and Sheikh, Yaser}, + booktitle={Proceedings of the IEEE conference on Computer Vision and Pattern Recognition}, + pages={4724--4732}, + year={2016} +} +``` + +
    + +## Abstract + + + +We introduce associative embedding, a novel method for supervising convolutional neural networks for the task of detection and grouping. A number of computer vision problems can be framed in this manner including multi-person pose estimation, instance segmentation, and multi-object tracking. Usually the grouping of detections is achieved with multi-stage pipelines, instead we propose an approach that teaches a network to simultaneously output detections and group assignments. This technique can be easily integrated into any state-of-the-art network architecture that produces pixel-wise predictions. We show how to apply this method to both multi-person pose estimation and instance segmentation and report state-of-the-art performance for multi-person pose on the MPII and MS-COCO datasets. + + + +
    + +
    diff --git a/docs/src/papers/backbones/higherhrnet.md b/docs/src/papers/backbones/higherhrnet.md index c1d61c992a..feed6ea06d 100644 --- a/docs/src/papers/backbones/higherhrnet.md +++ b/docs/src/papers/backbones/higherhrnet.md @@ -1,30 +1,30 @@ -# HigherHRNet: Scale-Aware Representation Learning for Bottom-Up Human Pose Estimation - - - -
    -HigherHRNet (CVPR'2020) - -```bibtex -@inproceedings{cheng2020higherhrnet, - title={HigherHRNet: Scale-Aware Representation Learning for Bottom-Up Human Pose Estimation}, - author={Cheng, Bowen and Xiao, Bin and Wang, Jingdong and Shi, Honghui and Huang, Thomas S and Zhang, Lei}, - booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, - pages={5386--5395}, - year={2020} -} -``` - -
    - -## Abstract - - - -Bottom-up human pose estimation methods have difficulties in predicting the correct pose for small persons due to challenges in scale variation. In this paper, we present HigherHRNet: a novel bottom-up human pose estimation method for learning scale-aware representations using high-resolution feature pyramids. Equipped with multi-resolution supervision for training and multi-resolution aggregation for inference, the proposed approach is able to solve the scale variation challenge in bottom-up multi-person pose estimation and localize keypoints more precisely, especially for small person. The feature pyramid in HigherHRNet consists of feature map outputs from HRNet and upsampled higher-resolution outputs through a transposed convolution. HigherHRNet outperforms the previous best bottom-up method by 2.5% AP for medium person on COCO test-dev, showing its effectiveness in handling scale variation. Furthermore, HigherHRNet achieves new state-of-the-art result on COCO test-dev (70.5% AP) without using refinement or other post-processing techniques, surpassing all existing bottom-up methods. HigherHRNet even surpasses all top-down methods on CrowdPose test (67.6% AP), suggesting its robustness in crowded scene. - - - -
    - -
    +# HigherHRNet: Scale-Aware Representation Learning for Bottom-Up Human Pose Estimation + + + +
    +HigherHRNet (CVPR'2020) + +```bibtex +@inproceedings{cheng2020higherhrnet, + title={HigherHRNet: Scale-Aware Representation Learning for Bottom-Up Human Pose Estimation}, + author={Cheng, Bowen and Xiao, Bin and Wang, Jingdong and Shi, Honghui and Huang, Thomas S and Zhang, Lei}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={5386--5395}, + year={2020} +} +``` + +
    + +## Abstract + + + +Bottom-up human pose estimation methods have difficulties in predicting the correct pose for small persons due to challenges in scale variation. In this paper, we present HigherHRNet: a novel bottom-up human pose estimation method for learning scale-aware representations using high-resolution feature pyramids. Equipped with multi-resolution supervision for training and multi-resolution aggregation for inference, the proposed approach is able to solve the scale variation challenge in bottom-up multi-person pose estimation and localize keypoints more precisely, especially for small person. The feature pyramid in HigherHRNet consists of feature map outputs from HRNet and upsampled higher-resolution outputs through a transposed convolution. HigherHRNet outperforms the previous best bottom-up method by 2.5% AP for medium person on COCO test-dev, showing its effectiveness in handling scale variation. Furthermore, HigherHRNet achieves new state-of-the-art result on COCO test-dev (70.5% AP) without using refinement or other post-processing techniques, surpassing all existing bottom-up methods. HigherHRNet even surpasses all top-down methods on CrowdPose test (67.6% AP), suggesting its robustness in crowded scene. + + + +
    + +
    diff --git a/docs/src/papers/backbones/hourglass.md b/docs/src/papers/backbones/hourglass.md index 15f4d4d3c6..c6c7e51592 100644 --- a/docs/src/papers/backbones/hourglass.md +++ b/docs/src/papers/backbones/hourglass.md @@ -1,31 +1,31 @@ -# Stacked hourglass networks for human pose estimation - - - -
    -Hourglass (ECCV'2016) - -```bibtex -@inproceedings{newell2016stacked, - title={Stacked hourglass networks for human pose estimation}, - author={Newell, Alejandro and Yang, Kaiyu and Deng, Jia}, - booktitle={European conference on computer vision}, - pages={483--499}, - year={2016}, - organization={Springer} -} -``` - -
    - -## Abstract - - - -This work introduces a novel convolutional network architecture for the task of human pose estimation. Features are processed across all scales and consolidated to best capture the various spatial relationships associated with the body. We show how repeated bottom-up, top-down processing used in conjunction with intermediate supervision is critical to improving the performance of the network. We refer to the architecture as a "stacked hourglass" network based on the successive steps of pooling and upsampling that are done to produce a final set of predictions. State-of-the-art results are achieved on the FLIC and MPII benchmarks outcompeting all recent methods. - - - -
    - -
    +# Stacked hourglass networks for human pose estimation + + + +
    +Hourglass (ECCV'2016) + +```bibtex +@inproceedings{newell2016stacked, + title={Stacked hourglass networks for human pose estimation}, + author={Newell, Alejandro and Yang, Kaiyu and Deng, Jia}, + booktitle={European conference on computer vision}, + pages={483--499}, + year={2016}, + organization={Springer} +} +``` + +
    + +## Abstract + + + +This work introduces a novel convolutional network architecture for the task of human pose estimation. Features are processed across all scales and consolidated to best capture the various spatial relationships associated with the body. We show how repeated bottom-up, top-down processing used in conjunction with intermediate supervision is critical to improving the performance of the network. We refer to the architecture as a "stacked hourglass" network based on the successive steps of pooling and upsampling that are done to produce a final set of predictions. State-of-the-art results are achieved on the FLIC and MPII benchmarks outcompeting all recent methods. + + + +
    + +
    diff --git a/docs/src/papers/backbones/hrformer.md b/docs/src/papers/backbones/hrformer.md index dfa7a13f6b..dd00bfea1f 100644 --- a/docs/src/papers/backbones/hrformer.md +++ b/docs/src/papers/backbones/hrformer.md @@ -1,39 +1,39 @@ -# HRFormer: High-Resolution Vision Transformer for Dense Predict - - - -
    -HRFormer (NIPS'2021) - -```bibtex -@article{yuan2021hrformer, - title={HRFormer: High-Resolution Vision Transformer for Dense Predict}, - author={Yuan, Yuhui and Fu, Rao and Huang, Lang and Lin, Weihong and Zhang, Chao and Chen, Xilin and Wang, Jingdong}, - journal={Advances in Neural Information Processing Systems}, - volume={34}, - year={2021} -} -``` - -
    - -## Abstract - - - -We present a High-Resolution Transformer (HRFormer) that learns high-resolution representations for dense -prediction tasks, in contrast to the original Vision Transformer that produces low-resolution representations -and has high memory and computational cost. We take advantage of the multi-resolution parallel design -introduced in high-resolution convolutional networks (HRNet), along with local-window self-attention -that performs self-attention over small non-overlapping image windows, for improving the memory and -computation efficiency. In addition, we introduce a convolution into the FFN to exchange information -across the disconnected image windows. We demonstrate the effectiveness of the HighResolution Transformer -on both human pose estimation and semantic segmentation tasks, e.g., HRFormer outperforms Swin -transformer by 1.3 AP on COCO pose estimation with 50% fewer parameters and 30% fewer FLOPs. -Code is available at: https://github.com/HRNet/HRFormer - - - -
    - -
    +# HRFormer: High-Resolution Vision Transformer for Dense Predict + + + +
    +HRFormer (NIPS'2021) + +```bibtex +@article{yuan2021hrformer, + title={HRFormer: High-Resolution Vision Transformer for Dense Predict}, + author={Yuan, Yuhui and Fu, Rao and Huang, Lang and Lin, Weihong and Zhang, Chao and Chen, Xilin and Wang, Jingdong}, + journal={Advances in Neural Information Processing Systems}, + volume={34}, + year={2021} +} +``` + +
    + +## Abstract + + + +We present a High-Resolution Transformer (HRFormer) that learns high-resolution representations for dense +prediction tasks, in contrast to the original Vision Transformer that produces low-resolution representations +and has high memory and computational cost. We take advantage of the multi-resolution parallel design +introduced in high-resolution convolutional networks (HRNet), along with local-window self-attention +that performs self-attention over small non-overlapping image windows, for improving the memory and +computation efficiency. In addition, we introduce a convolution into the FFN to exchange information +across the disconnected image windows. We demonstrate the effectiveness of the HighResolution Transformer +on both human pose estimation and semantic segmentation tasks, e.g., HRFormer outperforms Swin +transformer by 1.3 AP on COCO pose estimation with 50% fewer parameters and 30% fewer FLOPs. +Code is available at: https://github.com/HRNet/HRFormer + + + +
    + +
    diff --git a/docs/src/papers/backbones/hrnet.md b/docs/src/papers/backbones/hrnet.md index 05a46f543e..e1fba7b601 100644 --- a/docs/src/papers/backbones/hrnet.md +++ b/docs/src/papers/backbones/hrnet.md @@ -1,32 +1,32 @@ -# Deep high-resolution representation learning for human pose estimation - - - -
    -HRNet (CVPR'2019) - -```bibtex -@inproceedings{sun2019deep, - title={Deep high-resolution representation learning for human pose estimation}, - author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={5693--5703}, - year={2019} -} -``` - -
    - -## Abstract - - - -In this paper, we are interested in the human pose estimation problem with a focus on learning reliable highresolution representations. Most existing methods recover high-resolution representations from low-resolution representations produced by a high-to-low resolution network. Instead, our proposed network maintains high-resolution representations through the whole process. We start from a high-resolution subnetwork as the first stage, gradually add high-to-low resolution subnetworks one by one to form more stages, and connect the mutliresolution subnetworks in parallel. We conduct repeated multi-scale fusions such that each of the high-to-low resolution representations receives information from other parallel representations over and over, leading to rich highresolution representations. As a result, the predicted keypoint heatmap is potentially more accurate and spatially more precise. We empirically demonstrate the effectiveness -of our network through the superior pose estimation results over two benchmark datasets: the COCO keypoint detection -dataset and the MPII Human Pose dataset. In addition, we show the superiority of our network in pose tracking on the PoseTrack dataset. - - - -
    - -
    +# Deep high-resolution representation learning for human pose estimation + + + +
    +HRNet (CVPR'2019) + +```bibtex +@inproceedings{sun2019deep, + title={Deep high-resolution representation learning for human pose estimation}, + author={Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={5693--5703}, + year={2019} +} +``` + +
    + +## Abstract + + + +In this paper, we are interested in the human pose estimation problem with a focus on learning reliable highresolution representations. Most existing methods recover high-resolution representations from low-resolution representations produced by a high-to-low resolution network. Instead, our proposed network maintains high-resolution representations through the whole process. We start from a high-resolution subnetwork as the first stage, gradually add high-to-low resolution subnetworks one by one to form more stages, and connect the mutliresolution subnetworks in parallel. We conduct repeated multi-scale fusions such that each of the high-to-low resolution representations receives information from other parallel representations over and over, leading to rich highresolution representations. As a result, the predicted keypoint heatmap is potentially more accurate and spatially more precise. We empirically demonstrate the effectiveness +of our network through the superior pose estimation results over two benchmark datasets: the COCO keypoint detection +dataset and the MPII Human Pose dataset. In addition, we show the superiority of our network in pose tracking on the PoseTrack dataset. + + + +
    + +
    diff --git a/docs/src/papers/backbones/hrnetv2.md b/docs/src/papers/backbones/hrnetv2.md index f2ed2a9c0c..f764d61def 100644 --- a/docs/src/papers/backbones/hrnetv2.md +++ b/docs/src/papers/backbones/hrnetv2.md @@ -1,31 +1,31 @@ -# Deep high-resolution representation learning for visual recognition - - - -
    -HRNetv2 (TPAMI'2019) - -```bibtex -@article{WangSCJDZLMTWLX19, - title={Deep High-Resolution Representation Learning for Visual Recognition}, - author={Jingdong Wang and Ke Sun and Tianheng Cheng and - Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and - Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, - journal={TPAMI}, - year={2019} -} -``` - -
    - -## Abstract - - - -High-resolution representations are essential for position-sensitive vision problems, such as human pose estimation, semantic segmentation, and object detection. Existing state-of-the-art frameworks first encode the input image as a low-resolution representation through a subnetwork that is formed by connecting high-to-low resolution convolutions in series (e.g., ResNet, VGGNet), and then recover the high-resolution representation from the encoded low-resolution representation. Instead, our proposed network, named as High-Resolution Network (HRNet), maintains high-resolution representations through the whole process. There are two key characteristics: (i) Connect the high-to-low resolution convolution streams in parallel and (ii) repeatedly exchange the information across resolutions. The benefit is that the resulting representation is semantically richer and spatially more precise. We show the superiority of the proposed HRNet in a wide range of applications, including human pose estimation, semantic segmentation, and object detection, suggesting that the HRNet is a stronger backbone for computer vision problems. - - - -
    - -
    +# Deep high-resolution representation learning for visual recognition + + + +
    +HRNetv2 (TPAMI'2019) + +```bibtex +@article{WangSCJDZLMTWLX19, + title={Deep High-Resolution Representation Learning for Visual Recognition}, + author={Jingdong Wang and Ke Sun and Tianheng Cheng and + Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and + Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, + journal={TPAMI}, + year={2019} +} +``` + +
    + +## Abstract + + + +High-resolution representations are essential for position-sensitive vision problems, such as human pose estimation, semantic segmentation, and object detection. Existing state-of-the-art frameworks first encode the input image as a low-resolution representation through a subnetwork that is formed by connecting high-to-low resolution convolutions in series (e.g., ResNet, VGGNet), and then recover the high-resolution representation from the encoded low-resolution representation. Instead, our proposed network, named as High-Resolution Network (HRNet), maintains high-resolution representations through the whole process. There are two key characteristics: (i) Connect the high-to-low resolution convolution streams in parallel and (ii) repeatedly exchange the information across resolutions. The benefit is that the resulting representation is semantically richer and spatially more precise. We show the superiority of the proposed HRNet in a wide range of applications, including human pose estimation, semantic segmentation, and object detection, suggesting that the HRNet is a stronger backbone for computer vision problems. + + + +
    + +
    diff --git a/docs/src/papers/backbones/litehrnet.md b/docs/src/papers/backbones/litehrnet.md index f446062caf..06c0077640 100644 --- a/docs/src/papers/backbones/litehrnet.md +++ b/docs/src/papers/backbones/litehrnet.md @@ -1,30 +1,30 @@ -# Lite-HRNet: A Lightweight High-Resolution Network - - - -
    -LiteHRNet (CVPR'2021) - -```bibtex -@inproceedings{Yulitehrnet21, - title={Lite-HRNet: A Lightweight High-Resolution Network}, - author={Yu, Changqian and Xiao, Bin and Gao, Changxin and Yuan, Lu and Zhang, Lei and Sang, Nong and Wang, Jingdong}, - booktitle={CVPR}, - year={2021} -} -``` - -
    - -## Abstract - - - -We present an efficient high-resolution network, Lite-HRNet, for human pose estimation. We start by simply applying the efficient shuffle block in ShuffleNet to HRNet (high-resolution network), yielding stronger performance over popular lightweight networks, such as MobileNet, ShuffleNet, and Small HRNet. -We find that the heavily-used pointwise (1x1) convolutions in shuffle blocks become the computational bottleneck. We introduce a lightweight unit, conditional channel weighting, to replace costly pointwise (1x1) convolutions in shuffle blocks. The complexity of channel weighting is linear w.r.t the number of channels and lower than the quadratic time complexity for pointwise convolutions. Our solution learns the weights from all the channels and over multiple resolutions that are readily available in the parallel branches in HRNet. It uses the weights as the bridge to exchange information across channels and resolutions, compensating the role played by the pointwise (1x1) convolution. Lite-HRNet demonstrates superior results on human pose estimation over popular lightweight networks. Moreover, Lite-HRNet can be easily applied to semantic segmentation task in the same lightweight manner. - - - -
    - -
    +# Lite-HRNet: A Lightweight High-Resolution Network + + + +
    +LiteHRNet (CVPR'2021) + +```bibtex +@inproceedings{Yulitehrnet21, + title={Lite-HRNet: A Lightweight High-Resolution Network}, + author={Yu, Changqian and Xiao, Bin and Gao, Changxin and Yuan, Lu and Zhang, Lei and Sang, Nong and Wang, Jingdong}, + booktitle={CVPR}, + year={2021} +} +``` + +
    + +## Abstract + + + +We present an efficient high-resolution network, Lite-HRNet, for human pose estimation. We start by simply applying the efficient shuffle block in ShuffleNet to HRNet (high-resolution network), yielding stronger performance over popular lightweight networks, such as MobileNet, ShuffleNet, and Small HRNet. +We find that the heavily-used pointwise (1x1) convolutions in shuffle blocks become the computational bottleneck. We introduce a lightweight unit, conditional channel weighting, to replace costly pointwise (1x1) convolutions in shuffle blocks. The complexity of channel weighting is linear w.r.t the number of channels and lower than the quadratic time complexity for pointwise convolutions. Our solution learns the weights from all the channels and over multiple resolutions that are readily available in the parallel branches in HRNet. It uses the weights as the bridge to exchange information across channels and resolutions, compensating the role played by the pointwise (1x1) convolution. Lite-HRNet demonstrates superior results on human pose estimation over popular lightweight networks. Moreover, Lite-HRNet can be easily applied to semantic segmentation task in the same lightweight manner. + + + +
    + +
    diff --git a/docs/src/papers/backbones/mobilenetv2.md b/docs/src/papers/backbones/mobilenetv2.md index 9456520d46..2e1677a52d 100644 --- a/docs/src/papers/backbones/mobilenetv2.md +++ b/docs/src/papers/backbones/mobilenetv2.md @@ -1,30 +1,30 @@ -# Mobilenetv2: Inverted residuals and linear bottlenecks - - - -
    -MobilenetV2 (CVPR'2018) - -```bibtex -@inproceedings{sandler2018mobilenetv2, - title={Mobilenetv2: Inverted residuals and linear bottlenecks}, - author={Sandler, Mark and Howard, Andrew and Zhu, Menglong and Zhmoginov, Andrey and Chen, Liang-Chieh}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={4510--4520}, - year={2018} -} -``` - -
    - -## Abstract - - - -In this paper we describe a new mobile architecture, mbox{MobileNetV2}, that improves the state of the art performance of mobile models on multiple tasks and benchmarks as well as across a spectrum of different model sizes. We also describe efficient ways of applying these mobile models to object detection in a novel framework we call mbox{SSDLite}. Additionally, we demonstrate how to build mobile semantic segmentation models through a reduced form of mbox{DeepLabv3} which we call Mobile mbox{DeepLabv3}. is based on an inverted residual structure where the shortcut connections are between the thin bottleneck layers. The intermediate expansion layer uses lightweight depthwise convolutions to filter features as a source of non-linearity. Additionally, we find that it is important to remove non-linearities in the narrow layers in order to maintain representational power. We demonstrate that this improves performance and provide an intuition that led to this design. Finally, our approach allows decoupling of the input/output domains from the expressiveness of the transformation, which provides a convenient framework for further analysis. We measure our performance on mbox{ImageNet}~cite{Russakovsky:2015:ILS:2846547.2846559} classification, COCO object detection cite{COCO}, VOC image segmentation cite{PASCAL}. We evaluate the trade-offs between accuracy, and number of operations measured by multiply-adds (MAdd), as well as actual latency, and the number of parameters. - - - -
    - -
    +# Mobilenetv2: Inverted residuals and linear bottlenecks + + + +
    +MobilenetV2 (CVPR'2018) + +```bibtex +@inproceedings{sandler2018mobilenetv2, + title={Mobilenetv2: Inverted residuals and linear bottlenecks}, + author={Sandler, Mark and Howard, Andrew and Zhu, Menglong and Zhmoginov, Andrey and Chen, Liang-Chieh}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={4510--4520}, + year={2018} +} +``` + +
    + +## Abstract + + + +In this paper we describe a new mobile architecture, mbox{MobileNetV2}, that improves the state of the art performance of mobile models on multiple tasks and benchmarks as well as across a spectrum of different model sizes. We also describe efficient ways of applying these mobile models to object detection in a novel framework we call mbox{SSDLite}. Additionally, we demonstrate how to build mobile semantic segmentation models through a reduced form of mbox{DeepLabv3} which we call Mobile mbox{DeepLabv3}. is based on an inverted residual structure where the shortcut connections are between the thin bottleneck layers. The intermediate expansion layer uses lightweight depthwise convolutions to filter features as a source of non-linearity. Additionally, we find that it is important to remove non-linearities in the narrow layers in order to maintain representational power. We demonstrate that this improves performance and provide an intuition that led to this design. Finally, our approach allows decoupling of the input/output domains from the expressiveness of the transformation, which provides a convenient framework for further analysis. We measure our performance on mbox{ImageNet}~cite{Russakovsky:2015:ILS:2846547.2846559} classification, COCO object detection cite{COCO}, VOC image segmentation cite{PASCAL}. We evaluate the trade-offs between accuracy, and number of operations measured by multiply-adds (MAdd), as well as actual latency, and the number of parameters. + + + +
    + +
    diff --git a/docs/src/papers/backbones/mspn.md b/docs/src/papers/backbones/mspn.md index 1915cd3915..5824221603 100644 --- a/docs/src/papers/backbones/mspn.md +++ b/docs/src/papers/backbones/mspn.md @@ -1,29 +1,29 @@ -# Rethinking on multi-stage networks for human pose estimation - - - -
    -MSPN (ArXiv'2019) - -```bibtex -@article{li2019rethinking, - title={Rethinking on Multi-Stage Networks for Human Pose Estimation}, - author={Li, Wenbo and Wang, Zhicheng and Yin, Binyi and Peng, Qixiang and Du, Yuming and Xiao, Tianzi and Yu, Gang and Lu, Hongtao and Wei, Yichen and Sun, Jian}, - journal={arXiv preprint arXiv:1901.00148}, - year={2019} -} -``` - -
    - -## Abstract - - - -Existing pose estimation approaches fall into two categories: single-stage and multi-stage methods. While multi-stage methods are seemingly more suited for the task, their performance in current practice is not as good as single-stage methods. This work studies this issue. We argue that the current multi-stage methods' unsatisfactory performance comes from the insufficiency in various design choices. We propose several improvements, including the single-stage module design, cross stage feature aggregation, and coarse-to-fine supervision. The resulting method establishes the new state-of-the-art on both MS COCO and MPII Human Pose dataset, justifying the effectiveness of a multi-stage architecture. The source code is publicly available for further research. - - - -
    - -
    +# Rethinking on multi-stage networks for human pose estimation + + + +
    +MSPN (ArXiv'2019) + +```bibtex +@article{li2019rethinking, + title={Rethinking on Multi-Stage Networks for Human Pose Estimation}, + author={Li, Wenbo and Wang, Zhicheng and Yin, Binyi and Peng, Qixiang and Du, Yuming and Xiao, Tianzi and Yu, Gang and Lu, Hongtao and Wei, Yichen and Sun, Jian}, + journal={arXiv preprint arXiv:1901.00148}, + year={2019} +} +``` + +
    + +## Abstract + + + +Existing pose estimation approaches fall into two categories: single-stage and multi-stage methods. While multi-stage methods are seemingly more suited for the task, their performance in current practice is not as good as single-stage methods. This work studies this issue. We argue that the current multi-stage methods' unsatisfactory performance comes from the insufficiency in various design choices. We propose several improvements, including the single-stage module design, cross stage feature aggregation, and coarse-to-fine supervision. The resulting method establishes the new state-of-the-art on both MS COCO and MPII Human Pose dataset, justifying the effectiveness of a multi-stage architecture. The source code is publicly available for further research. + + + +
    + +
    diff --git a/docs/src/papers/backbones/pvt.md b/docs/src/papers/backbones/pvt.md index 303a126912..f4a5a6a85d 100644 --- a/docs/src/papers/backbones/pvt.md +++ b/docs/src/papers/backbones/pvt.md @@ -1,49 +1,49 @@ -# Pyramid Vision Transformer: A Versatile Backbone for Dense Prediction without Convolutions - - - -
    -PVT (ICCV'2021) - -```bibtex -@inproceedings{wang2021pyramid, - title={Pyramid vision transformer: A versatile backbone for dense prediction without convolutions}, - author={Wang, Wenhai and Xie, Enze and Li, Xiang and Fan, Deng-Ping and Song, Kaitao and Liang, Ding and Lu, Tong and Luo, Ping and Shao, Ling}, - booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision}, - pages={568--578}, - year={2021} -} -``` - -
    - -## Abstract - - - -Although using convolutional neural networks (CNNs) as backbones achieves great -successes in computer vision, this work investigates a simple backbone network -useful for many dense prediction tasks without convolutions. Unlike the -recently-proposed Transformer model (e.g., ViT) that is specially designed for -image classification, we propose Pyramid Vision Transformer~(PVT), which overcomes -the difficulties of porting Transformer to various dense prediction tasks. -PVT has several merits compared to prior arts. (1) Different from ViT that -typically has low-resolution outputs and high computational and memory cost, -PVT can be not only trained on dense partitions of the image to achieve high -output resolution, which is important for dense predictions but also using a -progressive shrinking pyramid to reduce computations of large feature maps. -(2) PVT inherits the advantages from both CNN and Transformer, making it a -unified backbone in various vision tasks without convolutions by simply replacing -CNN backbones. (3) We validate PVT by conducting extensive experiments, showing -that it boosts the performance of many downstream tasks, e.g., object detection, -semantic, and instance segmentation. For example, with a comparable number of -parameters, RetinaNet+PVT achieves 40.4 AP on the COCO dataset, surpassing -RetinNet+ResNet50 (36.3 AP) by 4.1 absolute AP. We hope PVT could serve as an -alternative and useful backbone for pixel-level predictions and facilitate future -researches. Code is available at https://github.com/whai362/PVT . - - - -
    - -
    +# Pyramid Vision Transformer: A Versatile Backbone for Dense Prediction without Convolutions + + + +
    +PVT (ICCV'2021) + +```bibtex +@inproceedings{wang2021pyramid, + title={Pyramid vision transformer: A versatile backbone for dense prediction without convolutions}, + author={Wang, Wenhai and Xie, Enze and Li, Xiang and Fan, Deng-Ping and Song, Kaitao and Liang, Ding and Lu, Tong and Luo, Ping and Shao, Ling}, + booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision}, + pages={568--578}, + year={2021} +} +``` + +
    + +## Abstract + + + +Although using convolutional neural networks (CNNs) as backbones achieves great +successes in computer vision, this work investigates a simple backbone network +useful for many dense prediction tasks without convolutions. Unlike the +recently-proposed Transformer model (e.g., ViT) that is specially designed for +image classification, we propose Pyramid Vision Transformer~(PVT), which overcomes +the difficulties of porting Transformer to various dense prediction tasks. +PVT has several merits compared to prior arts. (1) Different from ViT that +typically has low-resolution outputs and high computational and memory cost, +PVT can be not only trained on dense partitions of the image to achieve high +output resolution, which is important for dense predictions but also using a +progressive shrinking pyramid to reduce computations of large feature maps. +(2) PVT inherits the advantages from both CNN and Transformer, making it a +unified backbone in various vision tasks without convolutions by simply replacing +CNN backbones. (3) We validate PVT by conducting extensive experiments, showing +that it boosts the performance of many downstream tasks, e.g., object detection, +semantic, and instance segmentation. For example, with a comparable number of +parameters, RetinaNet+PVT achieves 40.4 AP on the COCO dataset, surpassing +RetinNet+ResNet50 (36.3 AP) by 4.1 absolute AP. We hope PVT could serve as an +alternative and useful backbone for pixel-level predictions and facilitate future +researches. Code is available at https://github.com/whai362/PVT . + + + +
    + +
    diff --git a/docs/src/papers/backbones/pvtv2.md b/docs/src/papers/backbones/pvtv2.md index 43657d8fb9..0d48709d5f 100644 --- a/docs/src/papers/backbones/pvtv2.md +++ b/docs/src/papers/backbones/pvtv2.md @@ -1,35 +1,35 @@ -# PVTv2: Improved Baselines with Pyramid Vision Transformer - - - -
    -PVTV2 (CVMJ'2022) - -```bibtex -@article{wang2022pvt, - title={PVT v2: Improved baselines with Pyramid Vision Transformer}, - author={Wang, Wenhai and Xie, Enze and Li, Xiang and Fan, Deng-Ping and Song, Kaitao and Liang, Ding and Lu, Tong and Luo, Ping and Shao, Ling}, - journal={Computational Visual Media}, - pages={1--10}, - year={2022}, - publisher={Springer} -} -``` - -
    - -## Abstract - - - -Transformer recently has presented encouraging progress in computer vision. -In this work, we present new baselines by improving the original Pyramid -Vision Transformer (PVTv1) by adding three designs, including (1) linear -complexity attention layer, (2) overlapping patch embedding, and (3) -convolutional feed-forward network. With these modifications, PVTv2 reduces -the computational complexity of PVTv1 to linear and achieves significant -improvements on fundamental vision tasks such as classification, detection, -and segmentation. Notably, the proposed PVTv2 achieves comparable or better -performances than recent works such as Swin Transformer. We hope this work -will facilitate state-of-the-art Transformer researches in computer vision. -Code is available at https://github.com/whai362/PVT . +# PVTv2: Improved Baselines with Pyramid Vision Transformer + + + +
    +PVTV2 (CVMJ'2022) + +```bibtex +@article{wang2022pvt, + title={PVT v2: Improved baselines with Pyramid Vision Transformer}, + author={Wang, Wenhai and Xie, Enze and Li, Xiang and Fan, Deng-Ping and Song, Kaitao and Liang, Ding and Lu, Tong and Luo, Ping and Shao, Ling}, + journal={Computational Visual Media}, + pages={1--10}, + year={2022}, + publisher={Springer} +} +``` + +
    + +## Abstract + + + +Transformer recently has presented encouraging progress in computer vision. +In this work, we present new baselines by improving the original Pyramid +Vision Transformer (PVTv1) by adding three designs, including (1) linear +complexity attention layer, (2) overlapping patch embedding, and (3) +convolutional feed-forward network. With these modifications, PVTv2 reduces +the computational complexity of PVTv1 to linear and achieves significant +improvements on fundamental vision tasks such as classification, detection, +and segmentation. Notably, the proposed PVTv2 achieves comparable or better +performances than recent works such as Swin Transformer. We hope this work +will facilitate state-of-the-art Transformer researches in computer vision. +Code is available at https://github.com/whai362/PVT . diff --git a/docs/src/papers/backbones/resnest.md b/docs/src/papers/backbones/resnest.md index 748c94737a..ecd5c1b12d 100644 --- a/docs/src/papers/backbones/resnest.md +++ b/docs/src/papers/backbones/resnest.md @@ -1,29 +1,29 @@ -# ResNeSt: Split-Attention Networks - - - -
    -ResNeSt (ArXiv'2020) - -```bibtex -@article{zhang2020resnest, - title={ResNeSt: Split-Attention Networks}, - author={Zhang, Hang and Wu, Chongruo and Zhang, Zhongyue and Zhu, Yi and Zhang, Zhi and Lin, Haibin and Sun, Yue and He, Tong and Muller, Jonas and Manmatha, R. and Li, Mu and Smola, Alexander}, - journal={arXiv preprint arXiv:2004.08955}, - year={2020} -} -``` - -
    - -## Abstract - - - -It is well known that featuremap attention and multi-path representation are important for visual recognition. In this paper, we present a modularized architecture, which applies the channel-wise attention on different network branches to leverage their success in capturing cross-feature interactions and learning diverse representations. Our design results in a simple and unified computation block, which can be parameterized using only a few variables. Our model, named ResNeSt, outperforms EfficientNet in accuracy and latency trade-off on image classification. In addition, ResNeSt has achieved superior transfer learning results on several public benchmarks serving as the backbone, and has been adopted by the winning entries of COCO-LVIS challenge. The source code for complete system and pretrained models are publicly available. - - - -
    - -
    +# ResNeSt: Split-Attention Networks + + + +
    +ResNeSt (ArXiv'2020) + +```bibtex +@article{zhang2020resnest, + title={ResNeSt: Split-Attention Networks}, + author={Zhang, Hang and Wu, Chongruo and Zhang, Zhongyue and Zhu, Yi and Zhang, Zhi and Lin, Haibin and Sun, Yue and He, Tong and Muller, Jonas and Manmatha, R. and Li, Mu and Smola, Alexander}, + journal={arXiv preprint arXiv:2004.08955}, + year={2020} +} +``` + +
    + +## Abstract + + + +It is well known that featuremap attention and multi-path representation are important for visual recognition. In this paper, we present a modularized architecture, which applies the channel-wise attention on different network branches to leverage their success in capturing cross-feature interactions and learning diverse representations. Our design results in a simple and unified computation block, which can be parameterized using only a few variables. Our model, named ResNeSt, outperforms EfficientNet in accuracy and latency trade-off on image classification. In addition, ResNeSt has achieved superior transfer learning results on several public benchmarks serving as the backbone, and has been adopted by the winning entries of COCO-LVIS challenge. The source code for complete system and pretrained models are publicly available. + + + +
    + +
    diff --git a/docs/src/papers/backbones/resnet.md b/docs/src/papers/backbones/resnet.md index 86b91ffc38..875ec8c7a6 100644 --- a/docs/src/papers/backbones/resnet.md +++ b/docs/src/papers/backbones/resnet.md @@ -1,32 +1,32 @@ -# Deep residual learning for image recognition - - - -
    -ResNet (CVPR'2016) - -```bibtex -@inproceedings{he2016deep, - title={Deep residual learning for image recognition}, - author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={770--778}, - year={2016} -} -``` - -
    - -## Abstract - - - -Deeper neural networks are more difficult to train. We present a residual learning framework to ease the training of networks that are substantially deeper than those used previously. We explicitly reformulate the layers as learning residual functions with reference to the layer inputs, instead of learning unreferenced functions. We provide comprehensive empirical evidence showing that these residual networks are easier to optimize, and can gain accuracy from -considerably increased depth. On the ImageNet dataset we evaluate residual nets with a depth of up to 152 layers—8× deeper than VGG nets but still having lower complexity. An ensemble of these residual nets achieves 3.57% error on the ImageNet test set. This result won the 1st place on the ILSVRC 2015 classification task. We also present analysis on CIFAR-10 with 100 and 1000 layers. The depth of representations is of central importance for many visual recognition tasks. Solely due to our extremely deep representations, we obtain a 28% relative improvement on the COCO object detection dataset. Deep residual nets are foundations of our submissions to ILSVRC -& COCO 2015 competitions1 , where we also won the 1st places on the tasks of ImageNet detection, ImageNet localization, COCO detection, and COCO segmentation. - - - -
    - -
    +# Deep residual learning for image recognition + + + +
    +ResNet (CVPR'2016) + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +
    + +## Abstract + + + +Deeper neural networks are more difficult to train. We present a residual learning framework to ease the training of networks that are substantially deeper than those used previously. We explicitly reformulate the layers as learning residual functions with reference to the layer inputs, instead of learning unreferenced functions. We provide comprehensive empirical evidence showing that these residual networks are easier to optimize, and can gain accuracy from +considerably increased depth. On the ImageNet dataset we evaluate residual nets with a depth of up to 152 layers—8× deeper than VGG nets but still having lower complexity. An ensemble of these residual nets achieves 3.57% error on the ImageNet test set. This result won the 1st place on the ILSVRC 2015 classification task. We also present analysis on CIFAR-10 with 100 and 1000 layers. The depth of representations is of central importance for many visual recognition tasks. Solely due to our extremely deep representations, we obtain a 28% relative improvement on the COCO object detection dataset. Deep residual nets are foundations of our submissions to ILSVRC +& COCO 2015 competitions1 , where we also won the 1st places on the tasks of ImageNet detection, ImageNet localization, COCO detection, and COCO segmentation. + + + +
    + +
    diff --git a/docs/src/papers/backbones/resnetv1d.md b/docs/src/papers/backbones/resnetv1d.md index ebde55454e..d64141e47d 100644 --- a/docs/src/papers/backbones/resnetv1d.md +++ b/docs/src/papers/backbones/resnetv1d.md @@ -1,31 +1,31 @@ -# Bag of tricks for image classification with convolutional neural networks - - - -
    -ResNetV1D (CVPR'2019) - -```bibtex -@inproceedings{he2019bag, - title={Bag of tricks for image classification with convolutional neural networks}, - author={He, Tong and Zhang, Zhi and Zhang, Hang and Zhang, Zhongyue and Xie, Junyuan and Li, Mu}, - booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, - pages={558--567}, - year={2019} -} -``` - -
    - -## Abstract - - - -Much of the recent progress made in image classification research can be credited to training procedure refinements, such as changes in data augmentations and optimization methods. In the literature, however, most refinements are either briefly mentioned as implementation details or only visible in source code. In this paper, we will examine a collection of such refinements and empirically evaluate their impact on the final model accuracy through ablation study. We will show that, by combining these refinements together, we are able to improve various CNN models significantly. For example, we raise ResNet-50’s top-1 validation accuracy from 75.3% to 79.29% on ImageNet. We will also demonstrate that improvement on image classification accuracy leads to better transfer learning performance in other application domains such as object detection and semantic -segmentation. - - - -
    - -
    +# Bag of tricks for image classification with convolutional neural networks + + + +
    +ResNetV1D (CVPR'2019) + +```bibtex +@inproceedings{he2019bag, + title={Bag of tricks for image classification with convolutional neural networks}, + author={He, Tong and Zhang, Zhi and Zhang, Hang and Zhang, Zhongyue and Xie, Junyuan and Li, Mu}, + booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, + pages={558--567}, + year={2019} +} +``` + +
    + +## Abstract + + + +Much of the recent progress made in image classification research can be credited to training procedure refinements, such as changes in data augmentations and optimization methods. In the literature, however, most refinements are either briefly mentioned as implementation details or only visible in source code. In this paper, we will examine a collection of such refinements and empirically evaluate their impact on the final model accuracy through ablation study. We will show that, by combining these refinements together, we are able to improve various CNN models significantly. For example, we raise ResNet-50’s top-1 validation accuracy from 75.3% to 79.29% on ImageNet. We will also demonstrate that improvement on image classification accuracy leads to better transfer learning performance in other application domains such as object detection and semantic +segmentation. + + + +
    + +
    diff --git a/docs/src/papers/backbones/resnext.md b/docs/src/papers/backbones/resnext.md index 9803ee9bcd..6703c4c89a 100644 --- a/docs/src/papers/backbones/resnext.md +++ b/docs/src/papers/backbones/resnext.md @@ -1,30 +1,30 @@ -# Aggregated residual transformations for deep neural networks - - - -
    -ResNext (CVPR'2017) - -```bibtex -@inproceedings{xie2017aggregated, - title={Aggregated residual transformations for deep neural networks}, - author={Xie, Saining and Girshick, Ross and Doll{\'a}r, Piotr and Tu, Zhuowen and He, Kaiming}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={1492--1500}, - year={2017} -} -``` - -
    - -## Abstract - - - -We present a simple, highly modularized network architecture for image classification. Our network is constructed by repeating a building block that aggregates a set of transformations with the same topology. Our simple design results in a homogeneous, multi-branch architecture that has only a few hyper-parameters to set. This strategy exposes a new dimension, which we call "cardinality" (the size of the set of transformations), as an essential factor in addition to the dimensions of depth and width. On the ImageNet-1K dataset, we empirically show that even under the restricted condition of maintaining complexity, increasing cardinality is able to improve classification accuracy. Moreover, increasing cardinality is more effective than going deeper or wider when we increase the capacity. Our models, named ResNeXt, are the foundations of our entry to the ILSVRC 2016 classification task in which we secured 2nd place. We further investigate ResNeXt on an ImageNet-5K set and the COCO detection set, also showing better results than its ResNet counterpart. The code and models are publicly available online. - - - -
    - -
    +# Aggregated residual transformations for deep neural networks + + + +
    +ResNext (CVPR'2017) + +```bibtex +@inproceedings{xie2017aggregated, + title={Aggregated residual transformations for deep neural networks}, + author={Xie, Saining and Girshick, Ross and Doll{\'a}r, Piotr and Tu, Zhuowen and He, Kaiming}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={1492--1500}, + year={2017} +} +``` + +
    + +## Abstract + + + +We present a simple, highly modularized network architecture for image classification. Our network is constructed by repeating a building block that aggregates a set of transformations with the same topology. Our simple design results in a homogeneous, multi-branch architecture that has only a few hyper-parameters to set. This strategy exposes a new dimension, which we call "cardinality" (the size of the set of transformations), as an essential factor in addition to the dimensions of depth and width. On the ImageNet-1K dataset, we empirically show that even under the restricted condition of maintaining complexity, increasing cardinality is able to improve classification accuracy. Moreover, increasing cardinality is more effective than going deeper or wider when we increase the capacity. Our models, named ResNeXt, are the foundations of our entry to the ILSVRC 2016 classification task in which we secured 2nd place. We further investigate ResNeXt on an ImageNet-5K set and the COCO detection set, also showing better results than its ResNet counterpart. The code and models are publicly available online. + + + +
    + +
    diff --git a/docs/src/papers/backbones/rsn.md b/docs/src/papers/backbones/rsn.md index b1fb1ea913..d8af907926 100644 --- a/docs/src/papers/backbones/rsn.md +++ b/docs/src/papers/backbones/rsn.md @@ -1,31 +1,31 @@ -# Learning delicate local representations for multi-person pose estimation - - - -
    -RSN (ECCV'2020) - -```bibtex -@misc{cai2020learning, - title={Learning Delicate Local Representations for Multi-Person Pose Estimation}, - author={Yuanhao Cai and Zhicheng Wang and Zhengxiong Luo and Binyi Yin and Angang Du and Haoqian Wang and Xinyu Zhou and Erjin Zhou and Xiangyu Zhang and Jian Sun}, - year={2020}, - eprint={2003.04030}, - archivePrefix={arXiv}, - primaryClass={cs.CV} -} -``` - -
    - -## Abstract - - - -In this paper, we propose a novel method called Residual Steps Network (RSN). RSN aggregates features with the same spatial size (Intra-level features) efficiently to obtain delicate local representations, which retain rich low-level spatial information and result in precise keypoint localization. Additionally, we observe the output features contribute differently to final performance. To tackle this problem, we propose an efficient attention mechanism - Pose Refine Machine (PRM) to make a trade-off between local and global representations in output features and further refine the keypoint locations. Our approach won the 1st place of COCO Keypoint Challenge 2019 and achieves state-of-the-art results on both COCO and MPII benchmarks, without using extra training data and pretrained model. Our single model achieves 78.6 on COCO test-dev, 93.0 on MPII test dataset. Ensembled models achieve 79.2 on COCO test-dev, 77.1 on COCO test-challenge dataset. - - - -
    - -
    +# Learning delicate local representations for multi-person pose estimation + + + +
    +RSN (ECCV'2020) + +```bibtex +@misc{cai2020learning, + title={Learning Delicate Local Representations for Multi-Person Pose Estimation}, + author={Yuanhao Cai and Zhicheng Wang and Zhengxiong Luo and Binyi Yin and Angang Du and Haoqian Wang and Xinyu Zhou and Erjin Zhou and Xiangyu Zhang and Jian Sun}, + year={2020}, + eprint={2003.04030}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +
    + +## Abstract + + + +In this paper, we propose a novel method called Residual Steps Network (RSN). RSN aggregates features with the same spatial size (Intra-level features) efficiently to obtain delicate local representations, which retain rich low-level spatial information and result in precise keypoint localization. Additionally, we observe the output features contribute differently to final performance. To tackle this problem, we propose an efficient attention mechanism - Pose Refine Machine (PRM) to make a trade-off between local and global representations in output features and further refine the keypoint locations. Our approach won the 1st place of COCO Keypoint Challenge 2019 and achieves state-of-the-art results on both COCO and MPII benchmarks, without using extra training data and pretrained model. Our single model achieves 78.6 on COCO test-dev, 93.0 on MPII test dataset. Ensembled models achieve 79.2 on COCO test-dev, 77.1 on COCO test-challenge dataset. + + + +
    + +
    diff --git a/docs/src/papers/backbones/scnet.md b/docs/src/papers/backbones/scnet.md index 043c144111..24f13e82b8 100644 --- a/docs/src/papers/backbones/scnet.md +++ b/docs/src/papers/backbones/scnet.md @@ -1,30 +1,30 @@ -# Improving Convolutional Networks with Self-Calibrated Convolutions - - - -
    -SCNet (CVPR'2020) - -```bibtex -@inproceedings{liu2020improving, - title={Improving Convolutional Networks with Self-Calibrated Convolutions}, - author={Liu, Jiang-Jiang and Hou, Qibin and Cheng, Ming-Ming and Wang, Changhu and Feng, Jiashi}, - booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, - pages={10096--10105}, - year={2020} -} -``` - -
    - -## Abstract - - - -Recent advances on CNNs are mostly devoted to designing more complex architectures to enhance their representation learning capacity. In this paper, we consider how to improve the basic convolutional feature transformation process of CNNs without tuning the model architectures. To this end, we present a novel self-calibrated convolutions that explicitly expand fields-of-view of each convolutional layers through internal communications and hence enrich the output features. In particular, unlike the standard convolutions that fuse spatial and channel-wise information using small kernels (e.g., 3x3), self-calibrated convolutions adaptively build long-range spatial and inter-channel dependencies around each spatial location through a novel self-calibration operation. Thus, it can help CNNs generate more discriminative representations by explicitly incorporating richer information. Our self-calibrated convolution design is simple and generic, and can be easily applied to augment standard convolutional layers without introducing extra parameters and complexity. Extensive experiments demonstrate that when applying self-calibrated convolutions into different backbones, our networks can significantly improve the baseline models in a variety of vision tasks, including image recognition, object detection, instance segmentation, and keypoint detection, with no need to change the network architectures. We hope this work could provide a promising way for future research in designing novel convolutional feature transformations for improving convolutional networks. Code is available on the project page. - - - -
    - -
    +# Improving Convolutional Networks with Self-Calibrated Convolutions + + + +
    +SCNet (CVPR'2020) + +```bibtex +@inproceedings{liu2020improving, + title={Improving Convolutional Networks with Self-Calibrated Convolutions}, + author={Liu, Jiang-Jiang and Hou, Qibin and Cheng, Ming-Ming and Wang, Changhu and Feng, Jiashi}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={10096--10105}, + year={2020} +} +``` + +
    + +## Abstract + + + +Recent advances on CNNs are mostly devoted to designing more complex architectures to enhance their representation learning capacity. In this paper, we consider how to improve the basic convolutional feature transformation process of CNNs without tuning the model architectures. To this end, we present a novel self-calibrated convolutions that explicitly expand fields-of-view of each convolutional layers through internal communications and hence enrich the output features. In particular, unlike the standard convolutions that fuse spatial and channel-wise information using small kernels (e.g., 3x3), self-calibrated convolutions adaptively build long-range spatial and inter-channel dependencies around each spatial location through a novel self-calibration operation. Thus, it can help CNNs generate more discriminative representations by explicitly incorporating richer information. Our self-calibrated convolution design is simple and generic, and can be easily applied to augment standard convolutional layers without introducing extra parameters and complexity. Extensive experiments demonstrate that when applying self-calibrated convolutions into different backbones, our networks can significantly improve the baseline models in a variety of vision tasks, including image recognition, object detection, instance segmentation, and keypoint detection, with no need to change the network architectures. We hope this work could provide a promising way for future research in designing novel convolutional feature transformations for improving convolutional networks. Code is available on the project page. + + + +
    + +
    diff --git a/docs/src/papers/backbones/seresnet.md b/docs/src/papers/backbones/seresnet.md index 32295324d3..fcf1db99a8 100644 --- a/docs/src/papers/backbones/seresnet.md +++ b/docs/src/papers/backbones/seresnet.md @@ -1,30 +1,30 @@ -# Squeeze-and-excitation networks - - - -
    -SEResNet (CVPR'2018) - -```bibtex -@inproceedings{hu2018squeeze, - title={Squeeze-and-excitation networks}, - author={Hu, Jie and Shen, Li and Sun, Gang}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={7132--7141}, - year={2018} -} -``` - -
    - -## Abstract - - - -Convolutional neural networks are built upon the convolution operation, which extracts informative features by fusing spatial and channel-wise information together within local receptive fields. In order to boost the representational power of a network, several recent approaches have shown the benefit of enhancing spatial encoding. In this work, we focus on the channel relationship and propose a novel architectural unit, which we term the "Squeeze-and-Excitation" (SE) block, that adaptively recalibrates channel-wise feature responses by explicitly modelling interdependencies between channels. We demonstrate that by stacking these blocks together, we can construct SENet architectures that generalise extremely well across challenging datasets. Crucially, we find that SE blocks produce significant performance improvements for existing state-of-the-art deep architectures at minimal additional computational cost. SENets formed the foundation of our ILSVRC 2017 classification submission which won first place and significantly reduced the top-5 error to 2.251%, achieving a ∼25% relative improvement over the winning entry of 2016. Code and models are available at https: //github.com/hujie-frank/SENet. - - - -
    - -
    +# Squeeze-and-excitation networks + + + +
    +SEResNet (CVPR'2018) + +```bibtex +@inproceedings{hu2018squeeze, + title={Squeeze-and-excitation networks}, + author={Hu, Jie and Shen, Li and Sun, Gang}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={7132--7141}, + year={2018} +} +``` + +
    + +## Abstract + + + +Convolutional neural networks are built upon the convolution operation, which extracts informative features by fusing spatial and channel-wise information together within local receptive fields. In order to boost the representational power of a network, several recent approaches have shown the benefit of enhancing spatial encoding. In this work, we focus on the channel relationship and propose a novel architectural unit, which we term the "Squeeze-and-Excitation" (SE) block, that adaptively recalibrates channel-wise feature responses by explicitly modelling interdependencies between channels. We demonstrate that by stacking these blocks together, we can construct SENet architectures that generalise extremely well across challenging datasets. Crucially, we find that SE blocks produce significant performance improvements for existing state-of-the-art deep architectures at minimal additional computational cost. SENets formed the foundation of our ILSVRC 2017 classification submission which won first place and significantly reduced the top-5 error to 2.251%, achieving a ∼25% relative improvement over the winning entry of 2016. Code and models are available at https: //github.com/hujie-frank/SENet. + + + +
    + +
    diff --git a/docs/src/papers/backbones/shufflenetv1.md b/docs/src/papers/backbones/shufflenetv1.md index a314c9b709..d60a1af890 100644 --- a/docs/src/papers/backbones/shufflenetv1.md +++ b/docs/src/papers/backbones/shufflenetv1.md @@ -1,30 +1,30 @@ -# Shufflenet: An extremely efficient convolutional neural network for mobile devices - - - -
    -ShufflenetV1 (CVPR'2018) - -```bibtex -@inproceedings{zhang2018shufflenet, - title={Shufflenet: An extremely efficient convolutional neural network for mobile devices}, - author={Zhang, Xiangyu and Zhou, Xinyu and Lin, Mengxiao and Sun, Jian}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={6848--6856}, - year={2018} -} -``` - -
    - -## Abstract - - - -We introduce an extremely computation-efficient CNN architecture named ShuffleNet, which is designed specially for mobile devices with very limited computing power (e.g., 10-150 MFLOPs). The new architecture utilizes two new operations, pointwise group convolution and channel shuffle, to greatly reduce computation cost while maintaining accuracy. Experiments on ImageNet classification and MS COCO object detection demonstrate the superior performance of ShuffleNet over other structures, e.g. lower top-1 error (absolute 7.8%) than recent MobileNet~cite{howard2017mobilenets} on ImageNet classification task, under the computation budget of 40 MFLOPs. On an ARM-based mobile device, ShuffleNet achieves $sim$13$ imes$ actual speedup over AlexNet while maintaining comparable accuracy. - - - -
    - -
    +# Shufflenet: An extremely efficient convolutional neural network for mobile devices + + + +
    +ShufflenetV1 (CVPR'2018) + +```bibtex +@inproceedings{zhang2018shufflenet, + title={Shufflenet: An extremely efficient convolutional neural network for mobile devices}, + author={Zhang, Xiangyu and Zhou, Xinyu and Lin, Mengxiao and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={6848--6856}, + year={2018} +} +``` + +
    + +## Abstract + + + +We introduce an extremely computation-efficient CNN architecture named ShuffleNet, which is designed specially for mobile devices with very limited computing power (e.g., 10-150 MFLOPs). The new architecture utilizes two new operations, pointwise group convolution and channel shuffle, to greatly reduce computation cost while maintaining accuracy. Experiments on ImageNet classification and MS COCO object detection demonstrate the superior performance of ShuffleNet over other structures, e.g. lower top-1 error (absolute 7.8%) than recent MobileNet~cite{howard2017mobilenets} on ImageNet classification task, under the computation budget of 40 MFLOPs. On an ARM-based mobile device, ShuffleNet achieves $sim$13$ imes$ actual speedup over AlexNet while maintaining comparable accuracy. + + + +
    + +
    diff --git a/docs/src/papers/backbones/shufflenetv2.md b/docs/src/papers/backbones/shufflenetv2.md index 834ee38bc0..5ecf6ac785 100644 --- a/docs/src/papers/backbones/shufflenetv2.md +++ b/docs/src/papers/backbones/shufflenetv2.md @@ -1,30 +1,30 @@ -# Shufflenet v2: Practical guidelines for efficient cnn architecture design - - - -
    -ShufflenetV2 (ECCV'2018) - -```bibtex -@inproceedings{ma2018shufflenet, - title={Shufflenet v2: Practical guidelines for efficient cnn architecture design}, - author={Ma, Ningning and Zhang, Xiangyu and Zheng, Hai-Tao and Sun, Jian}, - booktitle={Proceedings of the European conference on computer vision (ECCV)}, - pages={116--131}, - year={2018} -} -``` - -
    - -## Abstract - - - -Current network architecture design is mostly guided by the indirect metric of computation complexity, i.e., FLOPs. However, the direct metric, such as speed, also depends on the other factors such as memory access cost and platform characterics. Taking these factors into account, this work proposes practical guidelines for efficient network de- sign. Accordingly, a new architecture called ShuffleNet V2 is presented. Comprehensive experiments verify that it is the state-of-the-art in both speed and accuracy. - - - -
    - -
    +# Shufflenet v2: Practical guidelines for efficient cnn architecture design + + + +
    +ShufflenetV2 (ECCV'2018) + +```bibtex +@inproceedings{ma2018shufflenet, + title={Shufflenet v2: Practical guidelines for efficient cnn architecture design}, + author={Ma, Ningning and Zhang, Xiangyu and Zheng, Hai-Tao and Sun, Jian}, + booktitle={Proceedings of the European conference on computer vision (ECCV)}, + pages={116--131}, + year={2018} +} +``` + +
    + +## Abstract + + + +Current network architecture design is mostly guided by the indirect metric of computation complexity, i.e., FLOPs. However, the direct metric, such as speed, also depends on the other factors such as memory access cost and platform characterics. Taking these factors into account, this work proposes practical guidelines for efficient network de- sign. Accordingly, a new architecture called ShuffleNet V2 is presented. Comprehensive experiments verify that it is the state-of-the-art in both speed and accuracy. + + + +
    + +
    diff --git a/docs/src/papers/backbones/swin.md b/docs/src/papers/backbones/swin.md index a2c04c0cf2..663e99fb62 100644 --- a/docs/src/papers/backbones/swin.md +++ b/docs/src/papers/backbones/swin.md @@ -1,30 +1,30 @@ -# Swin transformer: Hierarchical vision transformer using shifted windows - - - -
    -Swin (ICCV'2021) - -```bibtex -@inproceedings{liu2021swin, - title={Swin transformer: Hierarchical vision transformer using shifted windows}, - author={Liu, Ze and Lin, Yutong and Cao, Yue and Hu, Han and Wei, Yixuan and Zhang, Zheng and Lin, Stephen and Guo, Baining}, - booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision}, - pages={10012--10022}, - year={2021} -} -``` - -
    - -## Abstract - - - -This paper presents a new vision Transformer, called Swin Transformer, that capably serves as a general-purpose backbone for computer vision. Challenges in adapting Transformer from language to vision arise from differences between the two domains, such as large variations in the scale of visual entities and the high resolution of pixels in images compared to words in text. To address these differences, we propose a hierarchical Transformer whose representation is computed with Shifted windows. The shifted windowing scheme brings greater efficiency by limiting self-attention computation to non-overlapping local windows while also allowing for cross-window connection. This hierarchical architecture has the flexibility to model at various scales and has linear computational complexity with respect to image size. These qualities of Swin Transformer make it compatible with a broad range of vision tasks, including image classification (87.3 top-1 accuracy on ImageNet-1K) and dense prediction tasks such as object detection (58.7 box AP and 51.1 mask AP on COCO testdev) and semantic segmentation (53.5 mIoU on ADE20K val). Its performance surpasses the previous state-of-theart by a large margin of +2.7 box AP and +2.6 mask AP on COCO, and +3.2 mIoU on ADE20K, demonstrating the potential of Transformer-based models as vision backbones. The hierarchical design and the shifted window approach also prove beneficial for all-MLP architectures. - - - -
    - -
    +# Swin transformer: Hierarchical vision transformer using shifted windows + + + +
    +Swin (ICCV'2021) + +```bibtex +@inproceedings{liu2021swin, + title={Swin transformer: Hierarchical vision transformer using shifted windows}, + author={Liu, Ze and Lin, Yutong and Cao, Yue and Hu, Han and Wei, Yixuan and Zhang, Zheng and Lin, Stephen and Guo, Baining}, + booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision}, + pages={10012--10022}, + year={2021} +} +``` + +
    + +## Abstract + + + +This paper presents a new vision Transformer, called Swin Transformer, that capably serves as a general-purpose backbone for computer vision. Challenges in adapting Transformer from language to vision arise from differences between the two domains, such as large variations in the scale of visual entities and the high resolution of pixels in images compared to words in text. To address these differences, we propose a hierarchical Transformer whose representation is computed with Shifted windows. The shifted windowing scheme brings greater efficiency by limiting self-attention computation to non-overlapping local windows while also allowing for cross-window connection. This hierarchical architecture has the flexibility to model at various scales and has linear computational complexity with respect to image size. These qualities of Swin Transformer make it compatible with a broad range of vision tasks, including image classification (87.3 top-1 accuracy on ImageNet-1K) and dense prediction tasks such as object detection (58.7 box AP and 51.1 mask AP on COCO testdev) and semantic segmentation (53.5 mIoU on ADE20K val). Its performance surpasses the previous state-of-theart by a large margin of +2.7 box AP and +2.6 mask AP on COCO, and +3.2 mIoU on ADE20K, demonstrating the potential of Transformer-based models as vision backbones. The hierarchical design and the shifted window approach also prove beneficial for all-MLP architectures. + + + +
    + +
    diff --git a/docs/src/papers/backbones/vgg.md b/docs/src/papers/backbones/vgg.md index 3a92a46b98..8f9e9069c7 100644 --- a/docs/src/papers/backbones/vgg.md +++ b/docs/src/papers/backbones/vgg.md @@ -1,29 +1,29 @@ -# Very Deep Convolutional Networks for Large-Scale Image Recognition - - - -
    -VGG (ICLR'2015) - -```bibtex -@article{simonyan2014very, - title={Very deep convolutional networks for large-scale image recognition}, - author={Simonyan, Karen and Zisserman, Andrew}, - journal={arXiv preprint arXiv:1409.1556}, - year={2014} -} -``` - -
    - -## Abstract - - - -In this work we investigate the effect of the convolutional network depth on its accuracy in the large-scale image recognition setting. Our main contribution is a thorough evaluation of networks of increasing depth using an architecture with very small (3x3) convolution filters, which shows that a significant improvement on the prior-art configurations can be achieved by pushing the depth to 16-19 weight layers. These findings were the basis of our ImageNet Challenge 2014 submission, where our team secured the first and the second places in the localisation and classification tracks respectively. We also show that our representations generalise well to other datasets, where they achieve state-of-the-art results. We have made our two best-performing ConvNet models publicly available to facilitate further research on the use of deep visual representations in computer vision. - - - -
    - -
    +# Very Deep Convolutional Networks for Large-Scale Image Recognition + + + +
    +VGG (ICLR'2015) + +```bibtex +@article{simonyan2014very, + title={Very deep convolutional networks for large-scale image recognition}, + author={Simonyan, Karen and Zisserman, Andrew}, + journal={arXiv preprint arXiv:1409.1556}, + year={2014} +} +``` + +
    + +## Abstract + + + +In this work we investigate the effect of the convolutional network depth on its accuracy in the large-scale image recognition setting. Our main contribution is a thorough evaluation of networks of increasing depth using an architecture with very small (3x3) convolution filters, which shows that a significant improvement on the prior-art configurations can be achieved by pushing the depth to 16-19 weight layers. These findings were the basis of our ImageNet Challenge 2014 submission, where our team secured the first and the second places in the localisation and classification tracks respectively. We also show that our representations generalise well to other datasets, where they achieve state-of-the-art results. We have made our two best-performing ConvNet models publicly available to facilitate further research on the use of deep visual representations in computer vision. + + + +
    + +
    diff --git a/docs/src/papers/backbones/vipnas.md b/docs/src/papers/backbones/vipnas.md index 5f52a8cac0..53058bf7bb 100644 --- a/docs/src/papers/backbones/vipnas.md +++ b/docs/src/papers/backbones/vipnas.md @@ -1,29 +1,29 @@ -# ViPNAS: Efficient Video Pose Estimation via Neural Architecture Search - - - -
    -ViPNAS (CVPR'2021) - -```bibtex -@article{xu2021vipnas, - title={ViPNAS: Efficient Video Pose Estimation via Neural Architecture Search}, - author={Xu, Lumin and Guan, Yingda and Jin, Sheng and Liu, Wentao and Qian, Chen and Luo, Ping and Ouyang, Wanli and Wang, Xiaogang}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - year={2021} -} -``` - -
    - -## Abstract - - - -Human pose estimation has achieved significant progress in recent years. However, most of the recent methods focus on improving accuracy using complicated models and ignoring real-time efficiency. To achieve a better trade-off between accuracy and efficiency, we propose a novel neural architecture search (NAS) method, termed ViPNAS, to search networks in both spatial and temporal levels for fast online video pose estimation. In the spatial level, we carefully design the search space with five different dimensions including network depth, width, kernel size, group number, and attentions. In the temporal level, we search from a series of temporal feature fusions to optimize the total accuracy and speed across multiple video frames. To the best of our knowledge, we are the first to search for the temporal feature fusion and automatic computation allocation in videos. Extensive experiments demonstrate the effectiveness of our approach on the challenging COCO2017 and PoseTrack2018 datasets. Our discovered model family, S-ViPNAS and T-ViPNAS, achieve significantly higher inference speed (CPU real-time) without sacrificing the accuracy compared to the previous state-of-the-art methods. - - - -
    - -
    +# ViPNAS: Efficient Video Pose Estimation via Neural Architecture Search + + + +
    +ViPNAS (CVPR'2021) + +```bibtex +@article{xu2021vipnas, + title={ViPNAS: Efficient Video Pose Estimation via Neural Architecture Search}, + author={Xu, Lumin and Guan, Yingda and Jin, Sheng and Liu, Wentao and Qian, Chen and Luo, Ping and Ouyang, Wanli and Wang, Xiaogang}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + year={2021} +} +``` + +
    + +## Abstract + + + +Human pose estimation has achieved significant progress in recent years. However, most of the recent methods focus on improving accuracy using complicated models and ignoring real-time efficiency. To achieve a better trade-off between accuracy and efficiency, we propose a novel neural architecture search (NAS) method, termed ViPNAS, to search networks in both spatial and temporal levels for fast online video pose estimation. In the spatial level, we carefully design the search space with five different dimensions including network depth, width, kernel size, group number, and attentions. In the temporal level, we search from a series of temporal feature fusions to optimize the total accuracy and speed across multiple video frames. To the best of our knowledge, we are the first to search for the temporal feature fusion and automatic computation allocation in videos. Extensive experiments demonstrate the effectiveness of our approach on the challenging COCO2017 and PoseTrack2018 datasets. Our discovered model family, S-ViPNAS and T-ViPNAS, achieve significantly higher inference speed (CPU real-time) without sacrificing the accuracy compared to the previous state-of-the-art methods. + + + +
    + +
    diff --git a/docs/src/papers/datasets/300w.md b/docs/src/papers/datasets/300w.md index 7af778ee6d..b0cba72bae 100644 --- a/docs/src/papers/datasets/300w.md +++ b/docs/src/papers/datasets/300w.md @@ -1,20 +1,20 @@ -# 300 faces in-the-wild challenge: Database and results - - - -
    -300W (IMAVIS'2016) - -```bibtex -@article{sagonas2016300, - title={300 faces in-the-wild challenge: Database and results}, - author={Sagonas, Christos and Antonakos, Epameinondas and Tzimiropoulos, Georgios and Zafeiriou, Stefanos and Pantic, Maja}, - journal={Image and vision computing}, - volume={47}, - pages={3--18}, - year={2016}, - publisher={Elsevier} -} -``` - -
    +# 300 faces in-the-wild challenge: Database and results + + + +
    +300W (IMAVIS'2016) + +```bibtex +@article{sagonas2016300, + title={300 faces in-the-wild challenge: Database and results}, + author={Sagonas, Christos and Antonakos, Epameinondas and Tzimiropoulos, Georgios and Zafeiriou, Stefanos and Pantic, Maja}, + journal={Image and vision computing}, + volume={47}, + pages={3--18}, + year={2016}, + publisher={Elsevier} +} +``` + +
    diff --git a/docs/src/papers/datasets/aflw.md b/docs/src/papers/datasets/aflw.md index f04f265c83..d5ee4a8820 100644 --- a/docs/src/papers/datasets/aflw.md +++ b/docs/src/papers/datasets/aflw.md @@ -1,19 +1,19 @@ -# Annotated facial landmarks in the wild: A large-scale, real-world database for facial landmark localization - - - -
    -AFLW (ICCVW'2011) - -```bibtex -@inproceedings{koestinger2011annotated, - title={Annotated facial landmarks in the wild: A large-scale, real-world database for facial landmark localization}, - author={Koestinger, Martin and Wohlhart, Paul and Roth, Peter M and Bischof, Horst}, - booktitle={2011 IEEE international conference on computer vision workshops (ICCV workshops)}, - pages={2144--2151}, - year={2011}, - organization={IEEE} -} -``` - -
    +# Annotated facial landmarks in the wild: A large-scale, real-world database for facial landmark localization + + + +
    +AFLW (ICCVW'2011) + +```bibtex +@inproceedings{koestinger2011annotated, + title={Annotated facial landmarks in the wild: A large-scale, real-world database for facial landmark localization}, + author={Koestinger, Martin and Wohlhart, Paul and Roth, Peter M and Bischof, Horst}, + booktitle={2011 IEEE international conference on computer vision workshops (ICCV workshops)}, + pages={2144--2151}, + year={2011}, + organization={IEEE} +} +``` + +
    diff --git a/docs/src/papers/datasets/aic.md b/docs/src/papers/datasets/aic.md index 5054609a39..8e79f814f8 100644 --- a/docs/src/papers/datasets/aic.md +++ b/docs/src/papers/datasets/aic.md @@ -1,17 +1,17 @@ -# Ai challenger: A large-scale dataset for going deeper in image understanding - - - -
    -AI Challenger (ArXiv'2017) - -```bibtex -@article{wu2017ai, - title={Ai challenger: A large-scale dataset for going deeper in image understanding}, - author={Wu, Jiahong and Zheng, He and Zhao, Bo and Li, Yixin and Yan, Baoming and Liang, Rui and Wang, Wenjia and Zhou, Shipei and Lin, Guosen and Fu, Yanwei and others}, - journal={arXiv preprint arXiv:1711.06475}, - year={2017} -} -``` - -
    +# Ai challenger: A large-scale dataset for going deeper in image understanding + + + +
    +AI Challenger (ArXiv'2017) + +```bibtex +@article{wu2017ai, + title={Ai challenger: A large-scale dataset for going deeper in image understanding}, + author={Wu, Jiahong and Zheng, He and Zhao, Bo and Li, Yixin and Yan, Baoming and Liang, Rui and Wang, Wenjia and Zhou, Shipei and Lin, Guosen and Fu, Yanwei and others}, + journal={arXiv preprint arXiv:1711.06475}, + year={2017} +} +``` + +
    diff --git a/docs/src/papers/datasets/animalkingdom.md b/docs/src/papers/datasets/animalkingdom.md index 64b5fe375a..815fa71e89 100644 --- a/docs/src/papers/datasets/animalkingdom.md +++ b/docs/src/papers/datasets/animalkingdom.md @@ -1,19 +1,19 @@ -# Animal Kingdom: A Large and Diverse Dataset for Animal Behavior Understanding - - - -
    -Animal Kingdom (CVPR'2022) - -```bibtex -@InProceedings{Ng_2022_CVPR, - author = {Ng, Xun Long and Ong, Kian Eng and Zheng, Qichen and Ni, Yun and Yeo, Si Yong and Liu, Jun}, - title = {Animal Kingdom: A Large and Diverse Dataset for Animal Behavior Understanding}, - booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, - month = {June}, - year = {2022}, - pages = {19023-19034} - } -``` - -
    +# Animal Kingdom: A Large and Diverse Dataset for Animal Behavior Understanding + + + +
    +Animal Kingdom (CVPR'2022) + +```bibtex +@InProceedings{Ng_2022_CVPR, + author = {Ng, Xun Long and Ong, Kian Eng and Zheng, Qichen and Ni, Yun and Yeo, Si Yong and Liu, Jun}, + title = {Animal Kingdom: A Large and Diverse Dataset for Animal Behavior Understanding}, + booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, + month = {June}, + year = {2022}, + pages = {19023-19034} + } +``` + +
    diff --git a/docs/src/papers/datasets/animalpose.md b/docs/src/papers/datasets/animalpose.md index 58303b8ee2..ab55c87895 100644 --- a/docs/src/papers/datasets/animalpose.md +++ b/docs/src/papers/datasets/animalpose.md @@ -1,18 +1,18 @@ -# Cross-Domain Adaptation for Animal Pose Estimation - - - -
    -Animal-Pose (ICCV'2019) - -```bibtex -@InProceedings{Cao_2019_ICCV, - author = {Cao, Jinkun and Tang, Hongyang and Fang, Hao-Shu and Shen, Xiaoyong and Lu, Cewu and Tai, Yu-Wing}, - title = {Cross-Domain Adaptation for Animal Pose Estimation}, - booktitle = {The IEEE International Conference on Computer Vision (ICCV)}, - month = {October}, - year = {2019} -} -``` - -
    +# Cross-Domain Adaptation for Animal Pose Estimation + + + +
    +Animal-Pose (ICCV'2019) + +```bibtex +@InProceedings{Cao_2019_ICCV, + author = {Cao, Jinkun and Tang, Hongyang and Fang, Hao-Shu and Shen, Xiaoyong and Lu, Cewu and Tai, Yu-Wing}, + title = {Cross-Domain Adaptation for Animal Pose Estimation}, + booktitle = {The IEEE International Conference on Computer Vision (ICCV)}, + month = {October}, + year = {2019} +} +``` + +
    diff --git a/docs/src/papers/datasets/ap10k.md b/docs/src/papers/datasets/ap10k.md index e36988d833..73041fea55 100644 --- a/docs/src/papers/datasets/ap10k.md +++ b/docs/src/papers/datasets/ap10k.md @@ -1,19 +1,19 @@ -# AP-10K: A Benchmark for Animal Pose Estimation in the Wild - - - -
    -AP-10K (NeurIPS'2021) - -```bibtex -@misc{yu2021ap10k, - title={AP-10K: A Benchmark for Animal Pose Estimation in the Wild}, - author={Hang Yu and Yufei Xu and Jing Zhang and Wei Zhao and Ziyu Guan and Dacheng Tao}, - year={2021}, - eprint={2108.12617}, - archivePrefix={arXiv}, - primaryClass={cs.CV} -} -``` - -
    +# AP-10K: A Benchmark for Animal Pose Estimation in the Wild + + + +
    +AP-10K (NeurIPS'2021) + +```bibtex +@misc{yu2021ap10k, + title={AP-10K: A Benchmark for Animal Pose Estimation in the Wild}, + author={Hang Yu and Yufei Xu and Jing Zhang and Wei Zhao and Ziyu Guan and Dacheng Tao}, + year={2021}, + eprint={2108.12617}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +
    diff --git a/docs/src/papers/datasets/atrw.md b/docs/src/papers/datasets/atrw.md index fe83ac0e94..4fed4ccd26 100644 --- a/docs/src/papers/datasets/atrw.md +++ b/docs/src/papers/datasets/atrw.md @@ -1,18 +1,18 @@ -# ATRW: A Benchmark for Amur Tiger Re-identification in the Wild - - - -
    -ATRW (ACM MM'2020) - -```bibtex -@inproceedings{li2020atrw, - title={ATRW: A Benchmark for Amur Tiger Re-identification in the Wild}, - author={Li, Shuyuan and Li, Jianguo and Tang, Hanlin and Qian, Rui and Lin, Weiyao}, - booktitle={Proceedings of the 28th ACM International Conference on Multimedia}, - pages={2590--2598}, - year={2020} -} -``` - -
    +# ATRW: A Benchmark for Amur Tiger Re-identification in the Wild + + + +
    +ATRW (ACM MM'2020) + +```bibtex +@inproceedings{li2020atrw, + title={ATRW: A Benchmark for Amur Tiger Re-identification in the Wild}, + author={Li, Shuyuan and Li, Jianguo and Tang, Hanlin and Qian, Rui and Lin, Weiyao}, + booktitle={Proceedings of the 28th ACM International Conference on Multimedia}, + pages={2590--2598}, + year={2020} +} +``` + +
    diff --git a/docs/src/papers/datasets/campus_and_shelf.md b/docs/src/papers/datasets/campus_and_shelf.md index 8748be137e..4b7babe038 100644 --- a/docs/src/papers/datasets/campus_and_shelf.md +++ b/docs/src/papers/datasets/campus_and_shelf.md @@ -1,20 +1,20 @@ -# 3D Pictorial Structures for Multiple Human Pose Estimation - - - -
    -Campus and Shelf (CVPR'2014) - -```bibtex -@inproceedings {belagian14multi, - title = {{3D} Pictorial Structures for Multiple Human Pose Estimation}, - author = {Belagiannis, Vasileios and Amin, Sikandar and Andriluka, Mykhaylo and Schiele, Bernt and Navab - Nassir and Ilic, Slobodan}, - booktitle = {IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR)}, - year = {2014}, - month = {June}, - organization={IEEE} -} -``` - -
    +# 3D Pictorial Structures for Multiple Human Pose Estimation + + + +
    +Campus and Shelf (CVPR'2014) + +```bibtex +@inproceedings {belagian14multi, + title = {{3D} Pictorial Structures for Multiple Human Pose Estimation}, + author = {Belagiannis, Vasileios and Amin, Sikandar and Andriluka, Mykhaylo and Schiele, Bernt and Navab + Nassir and Ilic, Slobodan}, + booktitle = {IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR)}, + year = {2014}, + month = {June}, + organization={IEEE} +} +``` + +
    diff --git a/docs/src/papers/datasets/coco.md b/docs/src/papers/datasets/coco.md index 8051dc756b..c595c38403 100644 --- a/docs/src/papers/datasets/coco.md +++ b/docs/src/papers/datasets/coco.md @@ -1,19 +1,19 @@ -# Microsoft coco: Common objects in context - - - -
    -COCO (ECCV'2014) - -```bibtex -@inproceedings{lin2014microsoft, - title={Microsoft coco: Common objects in context}, - author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, - booktitle={European conference on computer vision}, - pages={740--755}, - year={2014}, - organization={Springer} -} -``` - -
    +# Microsoft coco: Common objects in context + + + +
    +COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
    diff --git a/docs/src/papers/datasets/coco_wholebody.md b/docs/src/papers/datasets/coco_wholebody.md index 69cb2b98d1..0717a78f56 100644 --- a/docs/src/papers/datasets/coco_wholebody.md +++ b/docs/src/papers/datasets/coco_wholebody.md @@ -1,17 +1,17 @@ -# Whole-Body Human Pose Estimation in the Wild - - - -
    -COCO-WholeBody (ECCV'2020) - -```bibtex -@inproceedings{jin2020whole, - title={Whole-Body Human Pose Estimation in the Wild}, - author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, - booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, - year={2020} -} -``` - -
    +# Whole-Body Human Pose Estimation in the Wild + + + +
    +COCO-WholeBody (ECCV'2020) + +```bibtex +@inproceedings{jin2020whole, + title={Whole-Body Human Pose Estimation in the Wild}, + author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2020} +} +``` + +
    diff --git a/docs/src/papers/datasets/coco_wholebody_face.md b/docs/src/papers/datasets/coco_wholebody_face.md index 3e1d3d4501..9b48922d54 100644 --- a/docs/src/papers/datasets/coco_wholebody_face.md +++ b/docs/src/papers/datasets/coco_wholebody_face.md @@ -1,17 +1,17 @@ -# Whole-Body Human Pose Estimation in the Wild - - - -
    -COCO-WholeBody-Face (ECCV'2020) - -```bibtex -@inproceedings{jin2020whole, - title={Whole-Body Human Pose Estimation in the Wild}, - author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, - booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, - year={2020} -} -``` - -
    +# Whole-Body Human Pose Estimation in the Wild + + + +
    +COCO-WholeBody-Face (ECCV'2020) + +```bibtex +@inproceedings{jin2020whole, + title={Whole-Body Human Pose Estimation in the Wild}, + author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2020} +} +``` + +
    diff --git a/docs/src/papers/datasets/coco_wholebody_hand.md b/docs/src/papers/datasets/coco_wholebody_hand.md index 51e2169363..c2b2bb7c9d 100644 --- a/docs/src/papers/datasets/coco_wholebody_hand.md +++ b/docs/src/papers/datasets/coco_wholebody_hand.md @@ -1,17 +1,17 @@ -# Whole-Body Human Pose Estimation in the Wild - - - -
    -COCO-WholeBody-Hand (ECCV'2020) - -```bibtex -@inproceedings{jin2020whole, - title={Whole-Body Human Pose Estimation in the Wild}, - author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, - booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, - year={2020} -} -``` - -
    +# Whole-Body Human Pose Estimation in the Wild + + + +
    +COCO-WholeBody-Hand (ECCV'2020) + +```bibtex +@inproceedings{jin2020whole, + title={Whole-Body Human Pose Estimation in the Wild}, + author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2020} +} +``` + +
    diff --git a/docs/src/papers/datasets/cofw.md b/docs/src/papers/datasets/cofw.md index 20d29acdc7..3b712e7682 100644 --- a/docs/src/papers/datasets/cofw.md +++ b/docs/src/papers/datasets/cofw.md @@ -1,18 +1,18 @@ -# Robust face landmark estimation under occlusion - - - -
    -COFW (ICCV'2013) - -```bibtex -@inproceedings{burgos2013robust, - title={Robust face landmark estimation under occlusion}, - author={Burgos-Artizzu, Xavier P and Perona, Pietro and Doll{\'a}r, Piotr}, - booktitle={Proceedings of the IEEE international conference on computer vision}, - pages={1513--1520}, - year={2013} -} -``` - -
    +# Robust face landmark estimation under occlusion + + + +
    +COFW (ICCV'2013) + +```bibtex +@inproceedings{burgos2013robust, + title={Robust face landmark estimation under occlusion}, + author={Burgos-Artizzu, Xavier P and Perona, Pietro and Doll{\'a}r, Piotr}, + booktitle={Proceedings of the IEEE international conference on computer vision}, + pages={1513--1520}, + year={2013} +} +``` + +
    diff --git a/docs/src/papers/datasets/crowdpose.md b/docs/src/papers/datasets/crowdpose.md index ee678aa74f..c5bce3a13b 100644 --- a/docs/src/papers/datasets/crowdpose.md +++ b/docs/src/papers/datasets/crowdpose.md @@ -1,17 +1,17 @@ -# CrowdPose: Efficient Crowded Scenes Pose Estimation and A New Benchmark - - - -
    -CrowdPose (CVPR'2019) - -```bibtex -@article{li2018crowdpose, - title={CrowdPose: Efficient Crowded Scenes Pose Estimation and A New Benchmark}, - author={Li, Jiefeng and Wang, Can and Zhu, Hao and Mao, Yihuan and Fang, Hao-Shu and Lu, Cewu}, - journal={arXiv preprint arXiv:1812.00324}, - year={2018} -} -``` - -
    +# CrowdPose: Efficient Crowded Scenes Pose Estimation and A New Benchmark + + + +
    +CrowdPose (CVPR'2019) + +```bibtex +@article{li2018crowdpose, + title={CrowdPose: Efficient Crowded Scenes Pose Estimation and A New Benchmark}, + author={Li, Jiefeng and Wang, Can and Zhu, Hao and Mao, Yihuan and Fang, Hao-Shu and Lu, Cewu}, + journal={arXiv preprint arXiv:1812.00324}, + year={2018} +} +``` + +
    diff --git a/docs/src/papers/datasets/deepfashion.md b/docs/src/papers/datasets/deepfashion.md index 3955cf3092..f661e8680f 100644 --- a/docs/src/papers/datasets/deepfashion.md +++ b/docs/src/papers/datasets/deepfashion.md @@ -1,35 +1,35 @@ -# DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations - - - -
    -DeepFashion (CVPR'2016) - -```bibtex -@inproceedings{liuLQWTcvpr16DeepFashion, - author = {Liu, Ziwei and Luo, Ping and Qiu, Shi and Wang, Xiaogang and Tang, Xiaoou}, - title = {DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations}, - booktitle = {Proceedings of IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, - month = {June}, - year = {2016} -} -``` - -
    - - - -
    -DeepFashion (ECCV'2016) - -```bibtex -@inproceedings{liuYLWTeccv16FashionLandmark, - author = {Liu, Ziwei and Yan, Sijie and Luo, Ping and Wang, Xiaogang and Tang, Xiaoou}, - title = {Fashion Landmark Detection in the Wild}, - booktitle = {European Conference on Computer Vision (ECCV)}, - month = {October}, - year = {2016} - } -``` - -
    +# DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations + + + +
    +DeepFashion (CVPR'2016) + +```bibtex +@inproceedings{liuLQWTcvpr16DeepFashion, + author = {Liu, Ziwei and Luo, Ping and Qiu, Shi and Wang, Xiaogang and Tang, Xiaoou}, + title = {DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations}, + booktitle = {Proceedings of IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + month = {June}, + year = {2016} +} +``` + +
    + + + +
    +DeepFashion (ECCV'2016) + +```bibtex +@inproceedings{liuYLWTeccv16FashionLandmark, + author = {Liu, Ziwei and Yan, Sijie and Luo, Ping and Wang, Xiaogang and Tang, Xiaoou}, + title = {Fashion Landmark Detection in the Wild}, + booktitle = {European Conference on Computer Vision (ECCV)}, + month = {October}, + year = {2016} + } +``` + +
    diff --git a/docs/src/papers/datasets/fly.md b/docs/src/papers/datasets/fly.md index ed1a9c148e..31071727c9 100644 --- a/docs/src/papers/datasets/fly.md +++ b/docs/src/papers/datasets/fly.md @@ -1,21 +1,21 @@ -# Fast animal pose estimation using deep neural networks - - - -
    -Vinegar Fly (Nature Methods'2019) - -```bibtex -@article{pereira2019fast, - title={Fast animal pose estimation using deep neural networks}, - author={Pereira, Talmo D and Aldarondo, Diego E and Willmore, Lindsay and Kislin, Mikhail and Wang, Samuel S-H and Murthy, Mala and Shaevitz, Joshua W}, - journal={Nature methods}, - volume={16}, - number={1}, - pages={117--125}, - year={2019}, - publisher={Nature Publishing Group} -} -``` - -
    +# Fast animal pose estimation using deep neural networks + + + +
    +Vinegar Fly (Nature Methods'2019) + +```bibtex +@article{pereira2019fast, + title={Fast animal pose estimation using deep neural networks}, + author={Pereira, Talmo D and Aldarondo, Diego E and Willmore, Lindsay and Kislin, Mikhail and Wang, Samuel S-H and Murthy, Mala and Shaevitz, Joshua W}, + journal={Nature methods}, + volume={16}, + number={1}, + pages={117--125}, + year={2019}, + publisher={Nature Publishing Group} +} +``` + +
    diff --git a/docs/src/papers/datasets/freihand.md b/docs/src/papers/datasets/freihand.md index ee08602069..3090989cb6 100644 --- a/docs/src/papers/datasets/freihand.md +++ b/docs/src/papers/datasets/freihand.md @@ -1,18 +1,18 @@ -# Freihand: A dataset for markerless capture of hand pose and shape from single rgb images - - - -
    -FreiHand (ICCV'2019) - -```bibtex -@inproceedings{zimmermann2019freihand, - title={Freihand: A dataset for markerless capture of hand pose and shape from single rgb images}, - author={Zimmermann, Christian and Ceylan, Duygu and Yang, Jimei and Russell, Bryan and Argus, Max and Brox, Thomas}, - booktitle={Proceedings of the IEEE International Conference on Computer Vision}, - pages={813--822}, - year={2019} -} -``` - -
    +# Freihand: A dataset for markerless capture of hand pose and shape from single rgb images + + + +
    +FreiHand (ICCV'2019) + +```bibtex +@inproceedings{zimmermann2019freihand, + title={Freihand: A dataset for markerless capture of hand pose and shape from single rgb images}, + author={Zimmermann, Christian and Ceylan, Duygu and Yang, Jimei and Russell, Bryan and Argus, Max and Brox, Thomas}, + booktitle={Proceedings of the IEEE International Conference on Computer Vision}, + pages={813--822}, + year={2019} +} +``` + +
    diff --git a/docs/src/papers/datasets/h36m.md b/docs/src/papers/datasets/h36m.md index 143e15417c..c71de56fe9 100644 --- a/docs/src/papers/datasets/h36m.md +++ b/docs/src/papers/datasets/h36m.md @@ -1,22 +1,22 @@ -# Human3.6M: Large Scale Datasets and Predictive Methods for 3D Human Sensing in Natural Environments - - - -
    -Human3.6M (TPAMI'2014) - -```bibtex -@article{h36m_pami, - author = {Ionescu, Catalin and Papava, Dragos and Olaru, Vlad and Sminchisescu, Cristian}, - title = {Human3.6M: Large Scale Datasets and Predictive Methods for 3D Human Sensing in Natural Environments}, - journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, - publisher = {IEEE Computer Society}, - volume = {36}, - number = {7}, - pages = {1325-1339}, - month = {jul}, - year = {2014} -} -``` - -
    +# Human3.6M: Large Scale Datasets and Predictive Methods for 3D Human Sensing in Natural Environments + + + +
    +Human3.6M (TPAMI'2014) + +```bibtex +@article{h36m_pami, + author = {Ionescu, Catalin and Papava, Dragos and Olaru, Vlad and Sminchisescu, Cristian}, + title = {Human3.6M: Large Scale Datasets and Predictive Methods for 3D Human Sensing in Natural Environments}, + journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, + publisher = {IEEE Computer Society}, + volume = {36}, + number = {7}, + pages = {1325-1339}, + month = {jul}, + year = {2014} +} +``` + +
    diff --git a/docs/src/papers/datasets/halpe.md b/docs/src/papers/datasets/halpe.md index f71793fdbd..ccd8dea010 100644 --- a/docs/src/papers/datasets/halpe.md +++ b/docs/src/papers/datasets/halpe.md @@ -1,17 +1,17 @@ -# PaStaNet: Toward Human Activity Knowledge Engine - - - -
    -Halpe (CVPR'2020) - -```bibtex -@inproceedings{li2020pastanet, - title={PaStaNet: Toward Human Activity Knowledge Engine}, - author={Li, Yong-Lu and Xu, Liang and Liu, Xinpeng and Huang, Xijie and Xu, Yue and Wang, Shiyi and Fang, Hao-Shu and Ma, Ze and Chen, Mingyang and Lu, Cewu}, - booktitle={CVPR}, - year={2020} -} -``` - -
    +# PaStaNet: Toward Human Activity Knowledge Engine + + + +
    +Halpe (CVPR'2020) + +```bibtex +@inproceedings{li2020pastanet, + title={PaStaNet: Toward Human Activity Knowledge Engine}, + author={Li, Yong-Lu and Xu, Liang and Liu, Xinpeng and Huang, Xijie and Xu, Yue and Wang, Shiyi and Fang, Hao-Shu and Ma, Ze and Chen, Mingyang and Lu, Cewu}, + booktitle={CVPR}, + year={2020} +} +``` + +
    diff --git a/docs/src/papers/datasets/horse10.md b/docs/src/papers/datasets/horse10.md index 94e559db51..e361810d68 100644 --- a/docs/src/papers/datasets/horse10.md +++ b/docs/src/papers/datasets/horse10.md @@ -1,18 +1,18 @@ -# Pretraining boosts out-of-domain robustness for pose estimation - - - -
    -Horse-10 (WACV'2021) - -```bibtex -@inproceedings{mathis2021pretraining, - title={Pretraining boosts out-of-domain robustness for pose estimation}, - author={Mathis, Alexander and Biasi, Thomas and Schneider, Steffen and Yuksekgonul, Mert and Rogers, Byron and Bethge, Matthias and Mathis, Mackenzie W}, - booktitle={Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision}, - pages={1859--1868}, - year={2021} -} -``` - -
    +# Pretraining boosts out-of-domain robustness for pose estimation + + + +
    +Horse-10 (WACV'2021) + +```bibtex +@inproceedings{mathis2021pretraining, + title={Pretraining boosts out-of-domain robustness for pose estimation}, + author={Mathis, Alexander and Biasi, Thomas and Schneider, Steffen and Yuksekgonul, Mert and Rogers, Byron and Bethge, Matthias and Mathis, Mackenzie W}, + booktitle={Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision}, + pages={1859--1868}, + year={2021} +} +``` + +
    diff --git a/docs/src/papers/datasets/human_art.md b/docs/src/papers/datasets/human_art.md index dc39dabbad..f95416735a 100644 --- a/docs/src/papers/datasets/human_art.md +++ b/docs/src/papers/datasets/human_art.md @@ -1,16 +1,16 @@ -# Human-Art: A Versatile Human-Centric Dataset Bridging Natural and Artificial Scenes - - - -
    -Human-Art (CVPR'2023) - -```bibtex -@inproceedings{ju2023humanart, - title={Human-Art: A Versatile Human-Centric Dataset Bridging Natural and Artificial Scenes}, - author={Ju, Xuan and Zeng, Ailing and Jianan, Wang and Qiang, Xu and Lei, Zhang}, - booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), - year={2023}} -``` - -
    +# Human-Art: A Versatile Human-Centric Dataset Bridging Natural and Artificial Scenes + + + +
    +Human-Art (CVPR'2023) + +```bibtex +@inproceedings{ju2023humanart, + title={Human-Art: A Versatile Human-Centric Dataset Bridging Natural and Artificial Scenes}, + author={Ju, Xuan and Zeng, Ailing and Jianan, Wang and Qiang, Xu and Lei, Zhang}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), + year={2023}} +``` + +
    diff --git a/docs/src/papers/datasets/interhand.md b/docs/src/papers/datasets/interhand.md index 6b4458a01e..218a6bc82f 100644 --- a/docs/src/papers/datasets/interhand.md +++ b/docs/src/papers/datasets/interhand.md @@ -1,18 +1,18 @@ -# InterHand2.6M: A dataset and baseline for 3D interacting hand pose estimation from a single RGB image - - - -
    -InterHand2.6M (ECCV'2020) - -```bibtex -@article{moon2020interhand2, - title={InterHand2.6M: A dataset and baseline for 3D interacting hand pose estimation from a single RGB image}, - author={Moon, Gyeongsik and Yu, Shoou-I and Wen, He and Shiratori, Takaaki and Lee, Kyoung Mu}, - journal={arXiv preprint arXiv:2008.09309}, - year={2020}, - publisher={Springer} -} -``` - -
    +# InterHand2.6M: A dataset and baseline for 3D interacting hand pose estimation from a single RGB image + + + +
    +InterHand2.6M (ECCV'2020) + +```bibtex +@article{moon2020interhand2, + title={InterHand2.6M: A dataset and baseline for 3D interacting hand pose estimation from a single RGB image}, + author={Moon, Gyeongsik and Yu, Shoou-I and Wen, He and Shiratori, Takaaki and Lee, Kyoung Mu}, + journal={arXiv preprint arXiv:2008.09309}, + year={2020}, + publisher={Springer} +} +``` + +
    diff --git a/docs/src/papers/datasets/jhmdb.md b/docs/src/papers/datasets/jhmdb.md index 890d788ab2..f589d83483 100644 --- a/docs/src/papers/datasets/jhmdb.md +++ b/docs/src/papers/datasets/jhmdb.md @@ -1,19 +1,19 @@ -# Towards understanding action recognition - - - -
    -JHMDB (ICCV'2013) - -```bibtex -@inproceedings{Jhuang:ICCV:2013, - title = {Towards understanding action recognition}, - author = {H. Jhuang and J. Gall and S. Zuffi and C. Schmid and M. J. Black}, - booktitle = {International Conf. on Computer Vision (ICCV)}, - month = Dec, - pages = {3192-3199}, - year = {2013} -} -``` - -
    +# Towards understanding action recognition + + + +
    +JHMDB (ICCV'2013) + +```bibtex +@inproceedings{Jhuang:ICCV:2013, + title = {Towards understanding action recognition}, + author = {H. Jhuang and J. Gall and S. Zuffi and C. Schmid and M. J. Black}, + booktitle = {International Conf. on Computer Vision (ICCV)}, + month = Dec, + pages = {3192-3199}, + year = {2013} +} +``` + +
    diff --git a/docs/src/papers/datasets/lapa.md b/docs/src/papers/datasets/lapa.md index f82c50ca22..183ef6f96b 100644 --- a/docs/src/papers/datasets/lapa.md +++ b/docs/src/papers/datasets/lapa.md @@ -1,18 +1,18 @@ -# A New Dataset and Boundary-Attention Semantic Segmentation for Face Parsing - - - -
    -LaPa (AAAI'2020) - -```bibtex -@inproceedings{liu2020new, - title={A New Dataset and Boundary-Attention Semantic Segmentation for Face Parsing.}, - author={Liu, Yinglu and Shi, Hailin and Shen, Hao and Si, Yue and Wang, Xiaobo and Mei, Tao}, - booktitle={AAAI}, - pages={11637--11644}, - year={2020} -} -``` - -
    +# A New Dataset and Boundary-Attention Semantic Segmentation for Face Parsing + + + +
    +LaPa (AAAI'2020) + +```bibtex +@inproceedings{liu2020new, + title={A New Dataset and Boundary-Attention Semantic Segmentation for Face Parsing.}, + author={Liu, Yinglu and Shi, Hailin and Shen, Hao and Si, Yue and Wang, Xiaobo and Mei, Tao}, + booktitle={AAAI}, + pages={11637--11644}, + year={2020} +} +``` + +
    diff --git a/docs/src/papers/datasets/locust.md b/docs/src/papers/datasets/locust.md index 896ee03b83..f58316f3b7 100644 --- a/docs/src/papers/datasets/locust.md +++ b/docs/src/papers/datasets/locust.md @@ -1,20 +1,20 @@ -# DeepPoseKit, a software toolkit for fast and robust animal pose estimation using deep learning - - - -
    -Desert Locust (Elife'2019) - -```bibtex -@article{graving2019deepposekit, - title={DeepPoseKit, a software toolkit for fast and robust animal pose estimation using deep learning}, - author={Graving, Jacob M and Chae, Daniel and Naik, Hemal and Li, Liang and Koger, Benjamin and Costelloe, Blair R and Couzin, Iain D}, - journal={Elife}, - volume={8}, - pages={e47994}, - year={2019}, - publisher={eLife Sciences Publications Limited} -} -``` - -
    +# DeepPoseKit, a software toolkit for fast and robust animal pose estimation using deep learning + + + +
    +Desert Locust (Elife'2019) + +```bibtex +@article{graving2019deepposekit, + title={DeepPoseKit, a software toolkit for fast and robust animal pose estimation using deep learning}, + author={Graving, Jacob M and Chae, Daniel and Naik, Hemal and Li, Liang and Koger, Benjamin and Costelloe, Blair R and Couzin, Iain D}, + journal={Elife}, + volume={8}, + pages={e47994}, + year={2019}, + publisher={eLife Sciences Publications Limited} +} +``` + +
    diff --git a/docs/src/papers/datasets/macaque.md b/docs/src/papers/datasets/macaque.md index be4bec1131..c913d6cd8d 100644 --- a/docs/src/papers/datasets/macaque.md +++ b/docs/src/papers/datasets/macaque.md @@ -1,18 +1,18 @@ -# MacaquePose: A novel ‘in the wild’macaque monkey pose dataset for markerless motion capture - - - -
    -MacaquePose (bioRxiv'2020) - -```bibtex -@article{labuguen2020macaquepose, - title={MacaquePose: A novel ‘in the wild’macaque monkey pose dataset for markerless motion capture}, - author={Labuguen, Rollyn and Matsumoto, Jumpei and Negrete, Salvador and Nishimaru, Hiroshi and Nishijo, Hisao and Takada, Masahiko and Go, Yasuhiro and Inoue, Ken-ichi and Shibata, Tomohiro}, - journal={bioRxiv}, - year={2020}, - publisher={Cold Spring Harbor Laboratory} -} -``` - -
    +# MacaquePose: A novel ‘in the wild’macaque monkey pose dataset for markerless motion capture + + + +
    +MacaquePose (bioRxiv'2020) + +```bibtex +@article{labuguen2020macaquepose, + title={MacaquePose: A novel ‘in the wild’macaque monkey pose dataset for markerless motion capture}, + author={Labuguen, Rollyn and Matsumoto, Jumpei and Negrete, Salvador and Nishimaru, Hiroshi and Nishijo, Hisao and Takada, Masahiko and Go, Yasuhiro and Inoue, Ken-ichi and Shibata, Tomohiro}, + journal={bioRxiv}, + year={2020}, + publisher={Cold Spring Harbor Laboratory} +} +``` + +
    diff --git a/docs/src/papers/datasets/mhp.md b/docs/src/papers/datasets/mhp.md index 6dc5b17ccc..518d9fa003 100644 --- a/docs/src/papers/datasets/mhp.md +++ b/docs/src/papers/datasets/mhp.md @@ -1,18 +1,18 @@ -# Understanding humans in crowded scenes: Deep nested adversarial learning and a new benchmark for multi-human parsing - - - -
    -MHP (ACM MM'2018) - -```bibtex -@inproceedings{zhao2018understanding, - title={Understanding humans in crowded scenes: Deep nested adversarial learning and a new benchmark for multi-human parsing}, - author={Zhao, Jian and Li, Jianshu and Cheng, Yu and Sim, Terence and Yan, Shuicheng and Feng, Jiashi}, - booktitle={Proceedings of the 26th ACM international conference on Multimedia}, - pages={792--800}, - year={2018} -} -``` - -
    +# Understanding humans in crowded scenes: Deep nested adversarial learning and a new benchmark for multi-human parsing + + + +
    +MHP (ACM MM'2018) + +```bibtex +@inproceedings{zhao2018understanding, + title={Understanding humans in crowded scenes: Deep nested adversarial learning and a new benchmark for multi-human parsing}, + author={Zhao, Jian and Li, Jianshu and Cheng, Yu and Sim, Terence and Yan, Shuicheng and Feng, Jiashi}, + booktitle={Proceedings of the 26th ACM international conference on Multimedia}, + pages={792--800}, + year={2018} +} +``` + +
    diff --git a/docs/src/papers/datasets/mpi_inf_3dhp.md b/docs/src/papers/datasets/mpi_inf_3dhp.md index 3a26d49fd5..4e93e83785 100644 --- a/docs/src/papers/datasets/mpi_inf_3dhp.md +++ b/docs/src/papers/datasets/mpi_inf_3dhp.md @@ -1,20 +1,20 @@ -# Monocular 3D Human Pose Estimation In The Wild Using Improved CNN Supervision - - - -
    -MPI-INF-3DHP (3DV'2017) - -```bibtex -@inproceedings{mono-3dhp2017, - author = {Mehta, Dushyant and Rhodin, Helge and Casas, Dan and Fua, Pascal and Sotnychenko, Oleksandr and Xu, Weipeng and Theobalt, Christian}, - title = {Monocular 3D Human Pose Estimation In The Wild Using Improved CNN Supervision}, - booktitle = {3D Vision (3DV), 2017 Fifth International Conference on}, - url = {http://gvv.mpi-inf.mpg.de/3dhp_dataset}, - year = {2017}, - organization={IEEE}, - doi={10.1109/3dv.2017.00064}, -} -``` - -
    +# Monocular 3D Human Pose Estimation In The Wild Using Improved CNN Supervision + + + +
    +MPI-INF-3DHP (3DV'2017) + +```bibtex +@inproceedings{mono-3dhp2017, + author = {Mehta, Dushyant and Rhodin, Helge and Casas, Dan and Fua, Pascal and Sotnychenko, Oleksandr and Xu, Weipeng and Theobalt, Christian}, + title = {Monocular 3D Human Pose Estimation In The Wild Using Improved CNN Supervision}, + booktitle = {3D Vision (3DV), 2017 Fifth International Conference on}, + url = {http://gvv.mpi-inf.mpg.de/3dhp_dataset}, + year = {2017}, + organization={IEEE}, + doi={10.1109/3dv.2017.00064}, +} +``` + +
    diff --git a/docs/src/papers/datasets/mpii.md b/docs/src/papers/datasets/mpii.md index e2df7cfd7d..f914f957f0 100644 --- a/docs/src/papers/datasets/mpii.md +++ b/docs/src/papers/datasets/mpii.md @@ -1,18 +1,18 @@ -# 2D Human Pose Estimation: New Benchmark and State of the Art Analysis - - - -
    -MPII (CVPR'2014) - -```bibtex -@inproceedings{andriluka14cvpr, - author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, - title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, - booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, - year = {2014}, - month = {June} -} -``` - -
    +# 2D Human Pose Estimation: New Benchmark and State of the Art Analysis + + + +
    +MPII (CVPR'2014) + +```bibtex +@inproceedings{andriluka14cvpr, + author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, + title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, + booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + year = {2014}, + month = {June} +} +``` + +
    diff --git a/docs/src/papers/datasets/mpii_trb.md b/docs/src/papers/datasets/mpii_trb.md index b3e96a77d2..0395c62f82 100644 --- a/docs/src/papers/datasets/mpii_trb.md +++ b/docs/src/papers/datasets/mpii_trb.md @@ -1,18 +1,18 @@ -# TRB: A Novel Triplet Representation for Understanding 2D Human Body - - - -
    -MPII-TRB (ICCV'2019) - -```bibtex -@inproceedings{duan2019trb, - title={TRB: A Novel Triplet Representation for Understanding 2D Human Body}, - author={Duan, Haodong and Lin, Kwan-Yee and Jin, Sheng and Liu, Wentao and Qian, Chen and Ouyang, Wanli}, - booktitle={Proceedings of the IEEE International Conference on Computer Vision}, - pages={9479--9488}, - year={2019} -} -``` - -
    +# TRB: A Novel Triplet Representation for Understanding 2D Human Body + + + +
    +MPII-TRB (ICCV'2019) + +```bibtex +@inproceedings{duan2019trb, + title={TRB: A Novel Triplet Representation for Understanding 2D Human Body}, + author={Duan, Haodong and Lin, Kwan-Yee and Jin, Sheng and Liu, Wentao and Qian, Chen and Ouyang, Wanli}, + booktitle={Proceedings of the IEEE International Conference on Computer Vision}, + pages={9479--9488}, + year={2019} +} +``` + +
    diff --git a/docs/src/papers/datasets/ochuman.md b/docs/src/papers/datasets/ochuman.md index 5211c341e4..345a503613 100644 --- a/docs/src/papers/datasets/ochuman.md +++ b/docs/src/papers/datasets/ochuman.md @@ -1,18 +1,18 @@ -# Pose2seg: Detection free human instance segmentation - - - -
    -OCHuman (CVPR'2019) - -```bibtex -@inproceedings{zhang2019pose2seg, - title={Pose2seg: Detection free human instance segmentation}, - author={Zhang, Song-Hai and Li, Ruilong and Dong, Xin and Rosin, Paul and Cai, Zixi and Han, Xi and Yang, Dingcheng and Huang, Haozhi and Hu, Shi-Min}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={889--898}, - year={2019} -} -``` - -
    +# Pose2seg: Detection free human instance segmentation + + + +
    +OCHuman (CVPR'2019) + +```bibtex +@inproceedings{zhang2019pose2seg, + title={Pose2seg: Detection free human instance segmentation}, + author={Zhang, Song-Hai and Li, Ruilong and Dong, Xin and Rosin, Paul and Cai, Zixi and Han, Xi and Yang, Dingcheng and Huang, Haozhi and Hu, Shi-Min}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={889--898}, + year={2019} +} +``` + +
    diff --git a/docs/src/papers/datasets/onehand10k.md b/docs/src/papers/datasets/onehand10k.md index 5710fda477..48b2e67f75 100644 --- a/docs/src/papers/datasets/onehand10k.md +++ b/docs/src/papers/datasets/onehand10k.md @@ -1,21 +1,21 @@ -# Mask-pose cascaded cnn for 2d hand pose estimation from single color image - - - -
    -OneHand10K (TCSVT'2019) - -```bibtex -@article{wang2018mask, - title={Mask-pose cascaded cnn for 2d hand pose estimation from single color image}, - author={Wang, Yangang and Peng, Cong and Liu, Yebin}, - journal={IEEE Transactions on Circuits and Systems for Video Technology}, - volume={29}, - number={11}, - pages={3258--3268}, - year={2018}, - publisher={IEEE} -} -``` - -
    +# Mask-pose cascaded cnn for 2d hand pose estimation from single color image + + + +
    +OneHand10K (TCSVT'2019) + +```bibtex +@article{wang2018mask, + title={Mask-pose cascaded cnn for 2d hand pose estimation from single color image}, + author={Wang, Yangang and Peng, Cong and Liu, Yebin}, + journal={IEEE Transactions on Circuits and Systems for Video Technology}, + volume={29}, + number={11}, + pages={3258--3268}, + year={2018}, + publisher={IEEE} +} +``` + +
    diff --git a/docs/src/papers/datasets/panoptic.md b/docs/src/papers/datasets/panoptic.md index 60719c4df9..b7ce0585b0 100644 --- a/docs/src/papers/datasets/panoptic.md +++ b/docs/src/papers/datasets/panoptic.md @@ -1,18 +1,18 @@ -# Hand keypoint detection in single images using multiview bootstrapping - - - -
    -CMU Panoptic HandDB (CVPR'2017) - -```bibtex -@inproceedings{simon2017hand, - title={Hand keypoint detection in single images using multiview bootstrapping}, - author={Simon, Tomas and Joo, Hanbyul and Matthews, Iain and Sheikh, Yaser}, - booktitle={Proceedings of the IEEE conference on Computer Vision and Pattern Recognition}, - pages={1145--1153}, - year={2017} -} -``` - -
    +# Hand keypoint detection in single images using multiview bootstrapping + + + +
    +CMU Panoptic HandDB (CVPR'2017) + +```bibtex +@inproceedings{simon2017hand, + title={Hand keypoint detection in single images using multiview bootstrapping}, + author={Simon, Tomas and Joo, Hanbyul and Matthews, Iain and Sheikh, Yaser}, + booktitle={Proceedings of the IEEE conference on Computer Vision and Pattern Recognition}, + pages={1145--1153}, + year={2017} +} +``` + +
    diff --git a/docs/src/papers/datasets/panoptic_body3d.md b/docs/src/papers/datasets/panoptic_body3d.md index b7f45c8beb..3e7cfec289 100644 --- a/docs/src/papers/datasets/panoptic_body3d.md +++ b/docs/src/papers/datasets/panoptic_body3d.md @@ -1,17 +1,17 @@ -# Panoptic Studio: A Massively Multiview System for Social Motion Capture - - - -
    -CMU Panoptic (ICCV'2015) - -```bibtex -@Article = {joo_iccv_2015, -author = {Hanbyul Joo, Hao Liu, Lei Tan, Lin Gui, Bart Nabbe, Iain Matthews, Takeo Kanade, Shohei Nobuhara, and Yaser Sheikh}, -title = {Panoptic Studio: A Massively Multiview System for Social Motion Capture}, -booktitle = {ICCV}, -year = {2015} -} -``` - -
    +# Panoptic Studio: A Massively Multiview System for Social Motion Capture + + + +
    +CMU Panoptic (ICCV'2015) + +```bibtex +@Article = {joo_iccv_2015, +author = {Hanbyul Joo, Hao Liu, Lei Tan, Lin Gui, Bart Nabbe, Iain Matthews, Takeo Kanade, Shohei Nobuhara, and Yaser Sheikh}, +title = {Panoptic Studio: A Massively Multiview System for Social Motion Capture}, +booktitle = {ICCV}, +year = {2015} +} +``` + +
    diff --git a/docs/src/papers/datasets/posetrack18.md b/docs/src/papers/datasets/posetrack18.md index 90cfcb54f8..bc5a4f984e 100644 --- a/docs/src/papers/datasets/posetrack18.md +++ b/docs/src/papers/datasets/posetrack18.md @@ -1,18 +1,18 @@ -# Posetrack: A benchmark for human pose estimation and tracking - - - -
    -PoseTrack18 (CVPR'2018) - -```bibtex -@inproceedings{andriluka2018posetrack, - title={Posetrack: A benchmark for human pose estimation and tracking}, - author={Andriluka, Mykhaylo and Iqbal, Umar and Insafutdinov, Eldar and Pishchulin, Leonid and Milan, Anton and Gall, Juergen and Schiele, Bernt}, - booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, - pages={5167--5176}, - year={2018} -} -``` - -
    +# Posetrack: A benchmark for human pose estimation and tracking + + + +
    +PoseTrack18 (CVPR'2018) + +```bibtex +@inproceedings{andriluka2018posetrack, + title={Posetrack: A benchmark for human pose estimation and tracking}, + author={Andriluka, Mykhaylo and Iqbal, Umar and Insafutdinov, Eldar and Pishchulin, Leonid and Milan, Anton and Gall, Juergen and Schiele, Bernt}, + booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, + pages={5167--5176}, + year={2018} +} +``` + +
    diff --git a/docs/src/papers/datasets/rhd.md b/docs/src/papers/datasets/rhd.md index 1855037bdc..2e553497b9 100644 --- a/docs/src/papers/datasets/rhd.md +++ b/docs/src/papers/datasets/rhd.md @@ -1,19 +1,19 @@ -# Learning to Estimate 3D Hand Pose from Single RGB Images - - - -
    -RHD (ICCV'2017) - -```bibtex -@TechReport{zb2017hand, - author={Christian Zimmermann and Thomas Brox}, - title={Learning to Estimate 3D Hand Pose from Single RGB Images}, - institution={arXiv:1705.01389}, - year={2017}, - note="https://arxiv.org/abs/1705.01389", - url="https://lmb.informatik.uni-freiburg.de/projects/hand3d/" -} -``` - -
    +# Learning to Estimate 3D Hand Pose from Single RGB Images + + + +
    +RHD (ICCV'2017) + +```bibtex +@TechReport{zb2017hand, + author={Christian Zimmermann and Thomas Brox}, + title={Learning to Estimate 3D Hand Pose from Single RGB Images}, + institution={arXiv:1705.01389}, + year={2017}, + note="https://arxiv.org/abs/1705.01389", + url="https://lmb.informatik.uni-freiburg.de/projects/hand3d/" +} +``` + +
    diff --git a/docs/src/papers/datasets/wflw.md b/docs/src/papers/datasets/wflw.md index 08c3ccced3..8ab9678437 100644 --- a/docs/src/papers/datasets/wflw.md +++ b/docs/src/papers/datasets/wflw.md @@ -1,18 +1,18 @@ -# Look at boundary: A boundary-aware face alignment algorithm - - - -
    -WFLW (CVPR'2018) - -```bibtex -@inproceedings{wu2018look, - title={Look at boundary: A boundary-aware face alignment algorithm}, - author={Wu, Wayne and Qian, Chen and Yang, Shuo and Wang, Quan and Cai, Yici and Zhou, Qiang}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={2129--2138}, - year={2018} -} -``` - -
    +# Look at boundary: A boundary-aware face alignment algorithm + + + +
    +WFLW (CVPR'2018) + +```bibtex +@inproceedings{wu2018look, + title={Look at boundary: A boundary-aware face alignment algorithm}, + author={Wu, Wayne and Qian, Chen and Yang, Shuo and Wang, Quan and Cai, Yici and Zhou, Qiang}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={2129--2138}, + year={2018} +} +``` + +
    diff --git a/docs/src/papers/datasets/zebra.md b/docs/src/papers/datasets/zebra.md index 2727e595fc..603c23a5bd 100644 --- a/docs/src/papers/datasets/zebra.md +++ b/docs/src/papers/datasets/zebra.md @@ -1,20 +1,20 @@ -# DeepPoseKit, a software toolkit for fast and robust animal pose estimation using deep learning - - - -
    -Grévy’s Zebra (Elife'2019) - -```bibtex -@article{graving2019deepposekit, - title={DeepPoseKit, a software toolkit for fast and robust animal pose estimation using deep learning}, - author={Graving, Jacob M and Chae, Daniel and Naik, Hemal and Li, Liang and Koger, Benjamin and Costelloe, Blair R and Couzin, Iain D}, - journal={Elife}, - volume={8}, - pages={e47994}, - year={2019}, - publisher={eLife Sciences Publications Limited} -} -``` - -
    +# DeepPoseKit, a software toolkit for fast and robust animal pose estimation using deep learning + + + +
    +Grévy’s Zebra (Elife'2019) + +```bibtex +@article{graving2019deepposekit, + title={DeepPoseKit, a software toolkit for fast and robust animal pose estimation using deep learning}, + author={Graving, Jacob M and Chae, Daniel and Naik, Hemal and Li, Liang and Koger, Benjamin and Costelloe, Blair R and Couzin, Iain D}, + journal={Elife}, + volume={8}, + pages={e47994}, + year={2019}, + publisher={eLife Sciences Publications Limited} +} +``` + +
    diff --git a/docs/src/papers/techniques/albumentations.md b/docs/src/papers/techniques/albumentations.md index 9d09a7a344..e78962433e 100644 --- a/docs/src/papers/techniques/albumentations.md +++ b/docs/src/papers/techniques/albumentations.md @@ -1,21 +1,21 @@ -# Albumentations: fast and flexible image augmentations - - - -
    -Albumentations (Information'2020) - -```bibtex -@article{buslaev2020albumentations, - title={Albumentations: fast and flexible image augmentations}, - author={Buslaev, Alexander and Iglovikov, Vladimir I and Khvedchenya, Eugene and Parinov, Alex and Druzhinin, Mikhail and Kalinin, Alexandr A}, - journal={Information}, - volume={11}, - number={2}, - pages={125}, - year={2020}, - publisher={Multidisciplinary Digital Publishing Institute} -} -``` - -
    +# Albumentations: fast and flexible image augmentations + + + +
    +Albumentations (Information'2020) + +```bibtex +@article{buslaev2020albumentations, + title={Albumentations: fast and flexible image augmentations}, + author={Buslaev, Alexander and Iglovikov, Vladimir I and Khvedchenya, Eugene and Parinov, Alex and Druzhinin, Mikhail and Kalinin, Alexandr A}, + journal={Information}, + volume={11}, + number={2}, + pages={125}, + year={2020}, + publisher={Multidisciplinary Digital Publishing Institute} +} +``` + +
    diff --git a/docs/src/papers/techniques/awingloss.md b/docs/src/papers/techniques/awingloss.md index 4d4b93a87c..4633e32581 100644 --- a/docs/src/papers/techniques/awingloss.md +++ b/docs/src/papers/techniques/awingloss.md @@ -1,31 +1,31 @@ -# Adaptive Wing Loss for Robust Face Alignment via Heatmap Regression - - - -
    -AdaptiveWingloss (ICCV'2019) - -```bibtex -@inproceedings{wang2019adaptive, - title={Adaptive wing loss for robust face alignment via heatmap regression}, - author={Wang, Xinyao and Bo, Liefeng and Fuxin, Li}, - booktitle={Proceedings of the IEEE/CVF international conference on computer vision}, - pages={6971--6981}, - year={2019} -} -``` - -
    - -## Abstract - - - -Heatmap regression with a deep network has become one of the mainstream approaches to localize facial landmarks. However, the loss function for heatmap regression is rarely studied. In this paper, we analyze the ideal loss function properties for heatmap regression in face alignment problems. Then we propose a novel loss function, named Adaptive Wing loss, that is able to adapt its shape to different types of ground truth heatmap pixels. This adaptability penalizes loss more on foreground pixels while less on background pixels. To address the imbalance between foreground and background pixels, we also propose Weighted Loss Map, which assigns high weights on foreground and difficult background pixels to help training process focus more on pixels that are crucial to landmark localization. To further improve face alignment accuracy, we introduce boundary prediction and CoordConv with boundary coordinates. Extensive experiments on different benchmarks, including COFW, 300W and WFLW, show our approach outperforms the state-of-the-art by a significant margin on -various evaluation metrics. Besides, the Adaptive Wing loss also helps other heatmap regression tasks. - - - -
    - -
    +# Adaptive Wing Loss for Robust Face Alignment via Heatmap Regression + + + +
    +AdaptiveWingloss (ICCV'2019) + +```bibtex +@inproceedings{wang2019adaptive, + title={Adaptive wing loss for robust face alignment via heatmap regression}, + author={Wang, Xinyao and Bo, Liefeng and Fuxin, Li}, + booktitle={Proceedings of the IEEE/CVF international conference on computer vision}, + pages={6971--6981}, + year={2019} +} +``` + +
    + +## Abstract + + + +Heatmap regression with a deep network has become one of the mainstream approaches to localize facial landmarks. However, the loss function for heatmap regression is rarely studied. In this paper, we analyze the ideal loss function properties for heatmap regression in face alignment problems. Then we propose a novel loss function, named Adaptive Wing loss, that is able to adapt its shape to different types of ground truth heatmap pixels. This adaptability penalizes loss more on foreground pixels while less on background pixels. To address the imbalance between foreground and background pixels, we also propose Weighted Loss Map, which assigns high weights on foreground and difficult background pixels to help training process focus more on pixels that are crucial to landmark localization. To further improve face alignment accuracy, we introduce boundary prediction and CoordConv with boundary coordinates. Extensive experiments on different benchmarks, including COFW, 300W and WFLW, show our approach outperforms the state-of-the-art by a significant margin on +various evaluation metrics. Besides, the Adaptive Wing loss also helps other heatmap regression tasks. + + + +
    + +
    diff --git a/docs/src/papers/techniques/dark.md b/docs/src/papers/techniques/dark.md index 083b7596ab..94da433e29 100644 --- a/docs/src/papers/techniques/dark.md +++ b/docs/src/papers/techniques/dark.md @@ -1,30 +1,30 @@ -# Distribution-aware coordinate representation for human pose estimation - - - -
    -DarkPose (CVPR'2020) - -```bibtex -@inproceedings{zhang2020distribution, - title={Distribution-aware coordinate representation for human pose estimation}, - author={Zhang, Feng and Zhu, Xiatian and Dai, Hanbin and Ye, Mao and Zhu, Ce}, - booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, - pages={7093--7102}, - year={2020} -} -``` - -
    - -## Abstract - - - -While being the de facto standard coordinate representation for human pose estimation, heatmap has not been investigated in-depth. This work fills this gap. For the first time, we find that the process of decoding the predicted heatmaps into the final joint coordinates in the original image space is surprisingly significant for the performance. We further probe the design limitations of the standard coordinate decoding method, and propose a more principled distributionaware decoding method. Also, we improve the standard coordinate encoding process (i.e. transforming ground-truth coordinates to heatmaps) by generating unbiased/accurate heatmaps. Taking the two together, we formulate a novel Distribution-Aware coordinate Representation of Keypoints (DARK) method. Serving as a model-agnostic plug-in, DARK brings about significant performance boost to existing human pose estimation models. Extensive experiments show that DARK yields the best results on two common benchmarks, MPII and COCO. Besides, DARK achieves the 2nd place entry in the ICCV 2019 COCO Keypoints Challenge. The code is available online. - - - -
    - -
    +# Distribution-aware coordinate representation for human pose estimation + + + +
    +DarkPose (CVPR'2020) + +```bibtex +@inproceedings{zhang2020distribution, + title={Distribution-aware coordinate representation for human pose estimation}, + author={Zhang, Feng and Zhu, Xiatian and Dai, Hanbin and Ye, Mao and Zhu, Ce}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={7093--7102}, + year={2020} +} +``` + +
    + +## Abstract + + + +While being the de facto standard coordinate representation for human pose estimation, heatmap has not been investigated in-depth. This work fills this gap. For the first time, we find that the process of decoding the predicted heatmaps into the final joint coordinates in the original image space is surprisingly significant for the performance. We further probe the design limitations of the standard coordinate decoding method, and propose a more principled distributionaware decoding method. Also, we improve the standard coordinate encoding process (i.e. transforming ground-truth coordinates to heatmaps) by generating unbiased/accurate heatmaps. Taking the two together, we formulate a novel Distribution-Aware coordinate Representation of Keypoints (DARK) method. Serving as a model-agnostic plug-in, DARK brings about significant performance boost to existing human pose estimation models. Extensive experiments show that DARK yields the best results on two common benchmarks, MPII and COCO. Besides, DARK achieves the 2nd place entry in the ICCV 2019 COCO Keypoints Challenge. The code is available online. + + + +
    + +
    diff --git a/docs/src/papers/techniques/fp16.md b/docs/src/papers/techniques/fp16.md index 7fd7ee0011..bbc7b567c2 100644 --- a/docs/src/papers/techniques/fp16.md +++ b/docs/src/papers/techniques/fp16.md @@ -1,17 +1,17 @@ -# Mixed Precision Training - - - -
    -FP16 (ArXiv'2017) - -```bibtex -@article{micikevicius2017mixed, - title={Mixed precision training}, - author={Micikevicius, Paulius and Narang, Sharan and Alben, Jonah and Diamos, Gregory and Elsen, Erich and Garcia, David and Ginsburg, Boris and Houston, Michael and Kuchaiev, Oleksii and Venkatesh, Ganesh and others}, - journal={arXiv preprint arXiv:1710.03740}, - year={2017} -} -``` - -
    +# Mixed Precision Training + + + +
    +FP16 (ArXiv'2017) + +```bibtex +@article{micikevicius2017mixed, + title={Mixed precision training}, + author={Micikevicius, Paulius and Narang, Sharan and Alben, Jonah and Diamos, Gregory and Elsen, Erich and Garcia, David and Ginsburg, Boris and Houston, Michael and Kuchaiev, Oleksii and Venkatesh, Ganesh and others}, + journal={arXiv preprint arXiv:1710.03740}, + year={2017} +} +``` + +
    diff --git a/docs/src/papers/techniques/fpn.md b/docs/src/papers/techniques/fpn.md index 0de33f4866..1faf5103d8 100644 --- a/docs/src/papers/techniques/fpn.md +++ b/docs/src/papers/techniques/fpn.md @@ -1,30 +1,30 @@ -# Feature pyramid networks for object detection - - - -
    -FPN (CVPR'2017) - -```bibtex -@inproceedings{lin2017feature, - title={Feature pyramid networks for object detection}, - author={Lin, Tsung-Yi and Doll{\'a}r, Piotr and Girshick, Ross and He, Kaiming and Hariharan, Bharath and Belongie, Serge}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={2117--2125}, - year={2017} -} -``` - -
    - -## Abstract - - - -Feature pyramids are a basic component in recognition systems for detecting objects at different scales. But recent deep learning object detectors have avoided pyramid representations, in part because they are compute and memory intensive. In this paper, we exploit the inherent multi-scale, pyramidal hierarchy of deep convolutional networks to construct feature pyramids with marginal extra cost. A topdown architecture with lateral connections is developed for building high-level semantic feature maps at all scales. This architecture, called a Feature Pyramid Network (FPN), shows significant improvement as a generic feature extractor in several applications. Using FPN in a basic Faster R-CNN system, our method achieves state-of-the-art singlemodel results on the COCO detection benchmark without bells and whistles, surpassing all existing single-model entries including those from the COCO 2016 challenge winners. In addition, our method can run at 6 FPS on a GPU and thus is a practical and accurate solution to multi-scale object detection. Code will be made publicly available. - - - -
    - -
    +# Feature pyramid networks for object detection + + + +
    +FPN (CVPR'2017) + +```bibtex +@inproceedings{lin2017feature, + title={Feature pyramid networks for object detection}, + author={Lin, Tsung-Yi and Doll{\'a}r, Piotr and Girshick, Ross and He, Kaiming and Hariharan, Bharath and Belongie, Serge}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={2117--2125}, + year={2017} +} +``` + +
    + +## Abstract + + + +Feature pyramids are a basic component in recognition systems for detecting objects at different scales. But recent deep learning object detectors have avoided pyramid representations, in part because they are compute and memory intensive. In this paper, we exploit the inherent multi-scale, pyramidal hierarchy of deep convolutional networks to construct feature pyramids with marginal extra cost. A topdown architecture with lateral connections is developed for building high-level semantic feature maps at all scales. This architecture, called a Feature Pyramid Network (FPN), shows significant improvement as a generic feature extractor in several applications. Using FPN in a basic Faster R-CNN system, our method achieves state-of-the-art singlemodel results on the COCO detection benchmark without bells and whistles, surpassing all existing single-model entries including those from the COCO 2016 challenge winners. In addition, our method can run at 6 FPS on a GPU and thus is a practical and accurate solution to multi-scale object detection. Code will be made publicly available. + + + +
    + +
    diff --git a/docs/src/papers/techniques/rle.md b/docs/src/papers/techniques/rle.md index cdc59d57ec..7734ca3d44 100644 --- a/docs/src/papers/techniques/rle.md +++ b/docs/src/papers/techniques/rle.md @@ -1,30 +1,30 @@ -# Human pose regression with residual log-likelihood estimation - - - -
    -RLE (ICCV'2021) - -```bibtex -@inproceedings{li2021human, - title={Human pose regression with residual log-likelihood estimation}, - author={Li, Jiefeng and Bian, Siyuan and Zeng, Ailing and Wang, Can and Pang, Bo and Liu, Wentao and Lu, Cewu}, - booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision}, - pages={11025--11034}, - year={2021} -} -``` - -
    - -## Abstract - - - -Heatmap-based methods dominate in the field of human pose estimation by modelling the output distribution through likelihood heatmaps. In contrast, regressionbased methods are more efficient but suffer from inferior performance. In this work, we explore maximum likelihood estimation (MLE) to develop an efficient and effective regression-based methods. From the perspective of MLE, adopting different regression losses is making different assumptions about the output density function. A density function closer to the true distribution leads to a better regression performance. In light of this, we propose a novel regression paradigm with Residual Log-likelihood Estimation (RLE) to capture the underlying output distribution. Concretely, RLE learns the change of the distribution instead of the unreferenced underlying distribution to facilitate the training process. With the proposed reparameterization design, our method is compatible with offthe-shelf flow models. The proposed method is effective, efficient and flexible. We show its potential in various human pose estimation tasks with comprehensive experiments. Compared to the conventional regression paradigm, regression with RLE bring 12.4 mAP improvement on MSCOCO without any test-time overhead. Moreover, for the first time, especially on multi-person pose estimation, our regression method is superior to the heatmap-based methods. - - - -
    - -
    +# Human pose regression with residual log-likelihood estimation + + + +
    +RLE (ICCV'2021) + +```bibtex +@inproceedings{li2021human, + title={Human pose regression with residual log-likelihood estimation}, + author={Li, Jiefeng and Bian, Siyuan and Zeng, Ailing and Wang, Can and Pang, Bo and Liu, Wentao and Lu, Cewu}, + booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision}, + pages={11025--11034}, + year={2021} +} +``` + +
    + +## Abstract + + + +Heatmap-based methods dominate in the field of human pose estimation by modelling the output distribution through likelihood heatmaps. In contrast, regressionbased methods are more efficient but suffer from inferior performance. In this work, we explore maximum likelihood estimation (MLE) to develop an efficient and effective regression-based methods. From the perspective of MLE, adopting different regression losses is making different assumptions about the output density function. A density function closer to the true distribution leads to a better regression performance. In light of this, we propose a novel regression paradigm with Residual Log-likelihood Estimation (RLE) to capture the underlying output distribution. Concretely, RLE learns the change of the distribution instead of the unreferenced underlying distribution to facilitate the training process. With the proposed reparameterization design, our method is compatible with offthe-shelf flow models. The proposed method is effective, efficient and flexible. We show its potential in various human pose estimation tasks with comprehensive experiments. Compared to the conventional regression paradigm, regression with RLE bring 12.4 mAP improvement on MSCOCO without any test-time overhead. Moreover, for the first time, especially on multi-person pose estimation, our regression method is superior to the heatmap-based methods. + + + +
    + +
    diff --git a/docs/src/papers/techniques/smoothnet.md b/docs/src/papers/techniques/smoothnet.md index b09988ce44..f452299258 100644 --- a/docs/src/papers/techniques/smoothnet.md +++ b/docs/src/papers/techniques/smoothnet.md @@ -1,29 +1,29 @@ -# SmoothNet: A Plug-and-Play Network for Refining Human Poses in Videos - - - -
    -SmoothNet (arXiv'2021) - -```bibtex -@article{zeng2021smoothnet, - title={SmoothNet: A Plug-and-Play Network for Refining Human Poses in Videos}, - author={Zeng, Ailing and Yang, Lei and Ju, Xuan and Li, Jiefeng and Wang, Jianyi and Xu, Qiang}, - journal={arXiv preprint arXiv:2112.13715}, - year={2021} -} -``` - -
    - -## Abstract - - - -When analyzing human motion videos, the output jitters from existing pose estimators are highly-unbalanced. Most frames only suffer from slight jitters, while significant jitters occur in those frames with occlusion or poor image quality. Such complex poses often persist in videos, leading to consecutive frames with poor estimation results and large jitters. Existing pose smoothing solutions based on temporal convolutional networks, recurrent neural networks, or low-pass filters cannot deal with such a long-term jitter problem without considering the significant and persistent errors within the jittering video segment. Motivated by the above observation, we propose a novel plug-and-play refinement network, namely SMOOTHNET, which can be attached to any existing pose estimators to improve its temporal smoothness and enhance its per-frame precision simultaneously. Especially, SMOOTHNET is a simple yet effective data-driven fully-connected network with large receptive fields, effectively mitigating the impact of long-term jitters with unreliable estimation results. We conduct extensive experiments on twelve backbone networks with seven datasets across 2D and 3D pose estimation, body recovery, and downstream tasks. Our results demonstrate that the proposed SMOOTHNET consistently outperforms existing solutions, especially on those clips with high errors and long-term jitters. - - - -
    - -
    +# SmoothNet: A Plug-and-Play Network for Refining Human Poses in Videos + + + +
    +SmoothNet (arXiv'2021) + +```bibtex +@article{zeng2021smoothnet, + title={SmoothNet: A Plug-and-Play Network for Refining Human Poses in Videos}, + author={Zeng, Ailing and Yang, Lei and Ju, Xuan and Li, Jiefeng and Wang, Jianyi and Xu, Qiang}, + journal={arXiv preprint arXiv:2112.13715}, + year={2021} +} +``` + +
    + +## Abstract + + + +When analyzing human motion videos, the output jitters from existing pose estimators are highly-unbalanced. Most frames only suffer from slight jitters, while significant jitters occur in those frames with occlusion or poor image quality. Such complex poses often persist in videos, leading to consecutive frames with poor estimation results and large jitters. Existing pose smoothing solutions based on temporal convolutional networks, recurrent neural networks, or low-pass filters cannot deal with such a long-term jitter problem without considering the significant and persistent errors within the jittering video segment. Motivated by the above observation, we propose a novel plug-and-play refinement network, namely SMOOTHNET, which can be attached to any existing pose estimators to improve its temporal smoothness and enhance its per-frame precision simultaneously. Especially, SMOOTHNET is a simple yet effective data-driven fully-connected network with large receptive fields, effectively mitigating the impact of long-term jitters with unreliable estimation results. We conduct extensive experiments on twelve backbone networks with seven datasets across 2D and 3D pose estimation, body recovery, and downstream tasks. Our results demonstrate that the proposed SMOOTHNET consistently outperforms existing solutions, especially on those clips with high errors and long-term jitters. + + + +
    + +
    diff --git a/docs/src/papers/techniques/softwingloss.md b/docs/src/papers/techniques/softwingloss.md index 524a6089ff..d638109270 100644 --- a/docs/src/papers/techniques/softwingloss.md +++ b/docs/src/papers/techniques/softwingloss.md @@ -1,30 +1,30 @@ -# Structure-Coherent Deep Feature Learning for Robust Face Alignment - - - -
    -SoftWingloss (TIP'2021) - -```bibtex -@article{lin2021structure, - title={Structure-Coherent Deep Feature Learning for Robust Face Alignment}, - author={Lin, Chunze and Zhu, Beier and Wang, Quan and Liao, Renjie and Qian, Chen and Lu, Jiwen and Zhou, Jie}, - journal={IEEE Transactions on Image Processing}, - year={2021}, - publisher={IEEE} -} -``` - -
    - -## Abstract - - - -In this paper, we propose a structure-coherent deep feature learning method for face alignment. Unlike most existing face alignment methods which overlook the facial structure cues, we explicitly exploit the relation among facial landmarks to make the detector robust to hard cases such as occlusion and large pose. Specifically, we leverage a landmark-graph relational network to enforce the structural relationships among landmarks. We consider the facial landmarks as structural graph nodes and carefully design the neighborhood to passing features among the most related nodes. Our method dynamically adapts the weights of node neighborhood to eliminate distracted information from noisy nodes, such as occluded landmark point. Moreover, different from most previous works which only tend to penalize the landmarks absolute position during the training, we propose a relative location loss to enhance the information of relative location of landmarks. This relative location supervision further regularizes the facial structure. Our approach considers the interactions among facial landmarks and can be easily implemented on top of any convolutional backbone to boost the performance. Extensive experiments on three popular benchmarks, including WFLW, COFW and 300W, demonstrate the effectiveness of the proposed method. In particular, due to explicit structure modeling, our approach is especially robust to challenging cases resulting in impressive low failure rate on COFW and WFLW datasets. - - - -
    - -
    +# Structure-Coherent Deep Feature Learning for Robust Face Alignment + + + +
    +SoftWingloss (TIP'2021) + +```bibtex +@article{lin2021structure, + title={Structure-Coherent Deep Feature Learning for Robust Face Alignment}, + author={Lin, Chunze and Zhu, Beier and Wang, Quan and Liao, Renjie and Qian, Chen and Lu, Jiwen and Zhou, Jie}, + journal={IEEE Transactions on Image Processing}, + year={2021}, + publisher={IEEE} +} +``` + +
    + +## Abstract + + + +In this paper, we propose a structure-coherent deep feature learning method for face alignment. Unlike most existing face alignment methods which overlook the facial structure cues, we explicitly exploit the relation among facial landmarks to make the detector robust to hard cases such as occlusion and large pose. Specifically, we leverage a landmark-graph relational network to enforce the structural relationships among landmarks. We consider the facial landmarks as structural graph nodes and carefully design the neighborhood to passing features among the most related nodes. Our method dynamically adapts the weights of node neighborhood to eliminate distracted information from noisy nodes, such as occluded landmark point. Moreover, different from most previous works which only tend to penalize the landmarks absolute position during the training, we propose a relative location loss to enhance the information of relative location of landmarks. This relative location supervision further regularizes the facial structure. Our approach considers the interactions among facial landmarks and can be easily implemented on top of any convolutional backbone to boost the performance. Extensive experiments on three popular benchmarks, including WFLW, COFW and 300W, demonstrate the effectiveness of the proposed method. In particular, due to explicit structure modeling, our approach is especially robust to challenging cases resulting in impressive low failure rate on COFW and WFLW datasets. + + + +
    + +
    diff --git a/docs/src/papers/techniques/udp.md b/docs/src/papers/techniques/udp.md index bb4acebfbc..00604fc5ce 100644 --- a/docs/src/papers/techniques/udp.md +++ b/docs/src/papers/techniques/udp.md @@ -1,30 +1,30 @@ -# The Devil is in the Details: Delving into Unbiased Data Processing for Human Pose Estimation - - - -
    -UDP (CVPR'2020) - -```bibtex -@InProceedings{Huang_2020_CVPR, - author = {Huang, Junjie and Zhu, Zheng and Guo, Feng and Huang, Guan}, - title = {The Devil Is in the Details: Delving Into Unbiased Data Processing for Human Pose Estimation}, - booktitle = {The IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, - month = {June}, - year = {2020} -} -``` - -
    - -## Abstract - - - -Recently, the leading performance of human pose estimation is dominated by top-down methods. Being a fundamental component in training and inference, data processing has not been systematically considered in pose estimation community, to the best of our knowledge. In this paper, we focus on this problem and find that the devil of top-down pose estimator is in the biased data processing. Specifically, by investigating the standard data processing in state-of-the-art approaches mainly including data transformation and encoding-decoding, we find that the results obtained by common flipping strategy are unaligned with the original ones in inference. Moreover, there is statistical error in standard encoding-decoding during both training and inference. Two problems couple together and significantly degrade the pose estimation performance. Based on quantitative analyses, we then formulate a principled way to tackle this dilemma. Data is processed in continuous space based on unit length (the intervals between pixels) instead of in discrete space with pixel, and a combined classification and regression approach is adopted to perform encoding-decoding. The Unbiased Data Processing (UDP) for human pose estimation can be achieved by combining the two together. UDP not only boosts the performance of existing methods by a large margin but also plays a important role in result reproducing and future exploration. As a model-agnostic approach, UDP promotes SimpleBaseline-ResNet50-256x192 by 1.5 AP (70.2 to 71.7) and HRNet-W32-256x192 by 1.7 AP (73.5 to 75.2) on COCO test-dev set. The HRNet-W48-384x288 equipped with UDP achieves 76.5 AP and sets a new state-of-the-art for human pose estimation. The source code is publicly available for further research. - - - -
    - -
    +# The Devil is in the Details: Delving into Unbiased Data Processing for Human Pose Estimation + + + +
    +UDP (CVPR'2020) + +```bibtex +@InProceedings{Huang_2020_CVPR, + author = {Huang, Junjie and Zhu, Zheng and Guo, Feng and Huang, Guan}, + title = {The Devil Is in the Details: Delving Into Unbiased Data Processing for Human Pose Estimation}, + booktitle = {The IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, + month = {June}, + year = {2020} +} +``` + +
    + +## Abstract + + + +Recently, the leading performance of human pose estimation is dominated by top-down methods. Being a fundamental component in training and inference, data processing has not been systematically considered in pose estimation community, to the best of our knowledge. In this paper, we focus on this problem and find that the devil of top-down pose estimator is in the biased data processing. Specifically, by investigating the standard data processing in state-of-the-art approaches mainly including data transformation and encoding-decoding, we find that the results obtained by common flipping strategy are unaligned with the original ones in inference. Moreover, there is statistical error in standard encoding-decoding during both training and inference. Two problems couple together and significantly degrade the pose estimation performance. Based on quantitative analyses, we then formulate a principled way to tackle this dilemma. Data is processed in continuous space based on unit length (the intervals between pixels) instead of in discrete space with pixel, and a combined classification and regression approach is adopted to perform encoding-decoding. The Unbiased Data Processing (UDP) for human pose estimation can be achieved by combining the two together. UDP not only boosts the performance of existing methods by a large margin but also plays a important role in result reproducing and future exploration. As a model-agnostic approach, UDP promotes SimpleBaseline-ResNet50-256x192 by 1.5 AP (70.2 to 71.7) and HRNet-W32-256x192 by 1.7 AP (73.5 to 75.2) on COCO test-dev set. The HRNet-W48-384x288 equipped with UDP achieves 76.5 AP and sets a new state-of-the-art for human pose estimation. The source code is publicly available for further research. + + + +
    + +
    diff --git a/docs/src/papers/techniques/wingloss.md b/docs/src/papers/techniques/wingloss.md index 2aaa05722e..a0f0a35cfb 100644 --- a/docs/src/papers/techniques/wingloss.md +++ b/docs/src/papers/techniques/wingloss.md @@ -1,31 +1,31 @@ -# Wing Loss for Robust Facial Landmark Localisation with Convolutional Neural Networks - - - -
    -Wingloss (CVPR'2018) - -```bibtex -@inproceedings{feng2018wing, - title={Wing Loss for Robust Facial Landmark Localisation with Convolutional Neural Networks}, - author={Feng, Zhen-Hua and Kittler, Josef and Awais, Muhammad and Huber, Patrik and Wu, Xiao-Jun}, - booktitle={Computer Vision and Pattern Recognition (CVPR), 2018 IEEE Conference on}, - year={2018}, - pages ={2235-2245}, - organization={IEEE} -} -``` - -
    - -## Abstract - - - -We present a new loss function, namely Wing loss, for robust facial landmark localisation with Convolutional Neural Networks (CNNs). We first compare and analyse different loss functions including L2, L1 and smooth L1. The analysis of these loss functions suggests that, for the training of a CNN-based localisation model, more attention should be paid to small and medium range errors. To this end, we design a piece-wise loss function. The new loss amplifies the impact of errors from the interval (-w, w) by switching from L1 loss to a modified logarithm function. To address the problem of under-representation of samples with large out-of-plane head rotations in the training set, we propose a simple but effective boosting strategy, referred to as pose-based data balancing. In particular, we deal with the data imbalance problem by duplicating the minority training samples and perturbing them by injecting random image rotation, bounding box translation and other data augmentation approaches. Last, the proposed approach is extended to create a two-stage framework for robust facial landmark localisation. The experimental results obtained on AFLW and 300W demonstrate the merits of the Wing loss function, and prove the superiority of the proposed method over the state-of-the-art approaches. - - - -
    - -
    +# Wing Loss for Robust Facial Landmark Localisation with Convolutional Neural Networks + + + +
    +Wingloss (CVPR'2018) + +```bibtex +@inproceedings{feng2018wing, + title={Wing Loss for Robust Facial Landmark Localisation with Convolutional Neural Networks}, + author={Feng, Zhen-Hua and Kittler, Josef and Awais, Muhammad and Huber, Patrik and Wu, Xiao-Jun}, + booktitle={Computer Vision and Pattern Recognition (CVPR), 2018 IEEE Conference on}, + year={2018}, + pages ={2235-2245}, + organization={IEEE} +} +``` + +
    + +## Abstract + + + +We present a new loss function, namely Wing loss, for robust facial landmark localisation with Convolutional Neural Networks (CNNs). We first compare and analyse different loss functions including L2, L1 and smooth L1. The analysis of these loss functions suggests that, for the training of a CNN-based localisation model, more attention should be paid to small and medium range errors. To this end, we design a piece-wise loss function. The new loss amplifies the impact of errors from the interval (-w, w) by switching from L1 loss to a modified logarithm function. To address the problem of under-representation of samples with large out-of-plane head rotations in the training set, we propose a simple but effective boosting strategy, referred to as pose-based data balancing. In particular, we deal with the data imbalance problem by duplicating the minority training samples and perturbing them by injecting random image rotation, bounding box translation and other data augmentation approaches. Last, the proposed approach is extended to create a two-stage framework for robust facial landmark localisation. The experimental results obtained on AFLW and 300W demonstrate the merits of the Wing loss function, and prove the superiority of the proposed method over the state-of-the-art approaches. + + + +
    + +
    diff --git a/docs/zh_cn/Makefile b/docs/zh_cn/Makefile index d4bb2cbb9e..73a28c7134 100644 --- a/docs/zh_cn/Makefile +++ b/docs/zh_cn/Makefile @@ -1,20 +1,20 @@ -# Minimal makefile for Sphinx documentation -# - -# You can set these variables from the command line, and also -# from the environment for the first two. -SPHINXOPTS ?= -SPHINXBUILD ?= sphinx-build -SOURCEDIR = . -BUILDDIR = _build - -# Put it first so that "make" without argument is like "make help". -help: - @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) - -.PHONY: help Makefile - -# Catch-all target: route all unknown targets to Sphinx using the new -# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). -%: Makefile - @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = . +BUILDDIR = _build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/zh_cn/_static/css/readthedocs.css b/docs/zh_cn/_static/css/readthedocs.css index e75ab1b46a..c8740686f8 100644 --- a/docs/zh_cn/_static/css/readthedocs.css +++ b/docs/zh_cn/_static/css/readthedocs.css @@ -1,10 +1,10 @@ -.header-logo { - background-image: url("../images/mmpose-logo.png"); - background-size: 120px 50px; - height: 50px; - width: 120px; -} - -table.autosummary td { - width: 35% -} +.header-logo { + background-image: url("../images/mmpose-logo.png"); + background-size: 120px 50px; + height: 50px; + width: 120px; +} + +table.autosummary td { + width: 35% +} diff --git a/docs/zh_cn/advanced_guides/codecs.md b/docs/zh_cn/advanced_guides/codecs.md index 85d4d2e54b..3b2259a7f8 100644 --- a/docs/zh_cn/advanced_guides/codecs.md +++ b/docs/zh_cn/advanced_guides/codecs.md @@ -1,227 +1,227 @@ -# 编解码器 - -在关键点检测任务中,根据算法的不同,需要利用标注信息,生成不同格式的训练目标,比如归一化的坐标值、一维向量、高斯热图等。同样的,对于模型输出的结果,也需要经过处理转换成标注信息格式。我们一般将标注信息到训练目标的处理过程称为编码,模型输出到标注信息的处理过程称为解码。 - -编码和解码是一对紧密相关的互逆处理过程。在 MMPose 早期版本中,编码和解码过程往往分散在不同模块里,使其不够直观和统一,增加了学习和维护成本。 - -MMPose 1.0 中引入了新模块 **编解码器(Codec)** ,将关键点数据的编码和解码过程进行集成,以增加代码的友好度和复用性。 - -编解码器在工作流程中所处的位置如下所示: - -![codec-cn](https://user-images.githubusercontent.com/13503330/187829784-4d5939de-97d7-43cc-b934-c6d17c02d589.png) - -一个编解码器主要包含两个部分: - -- 编码器 -- 解码器 - -### 编码器 - -编码器主要负责将处于输入图片尺度的坐标值,编码为模型训练所需要的目标格式,主要包括: - -- 归一化的坐标值:用于 Regression-based 方法 -- 一维向量:用于 SimCC-based 方法 -- 高斯热图:用于 Heatmap-based 方法 - -以 Regression-based 方法的编码器为例: - -```Python -def encode(self, - keypoints: np.ndarray, - keypoints_visible: Optional[np.ndarray] = None) -> dict: - """Encoding keypoints from input image space to normalized space. - - Args: - keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) - keypoints_visible (np.ndarray): Keypoint visibilities in shape - (N, K) - - Returns: - dict: - - keypoint_labels (np.ndarray): The normalized regression labels in - shape (N, K, D) where D is 2 for 2d coordinates - - keypoint_weights (np.ndarray): The target weights in shape - (N, K) - """ - if keypoints_visible is None: - keypoints_visible = np.ones(keypoints.shape[:2], dtype=np.float32) - - w, h = self.input_size - valid = ((keypoints >= 0) & - (keypoints <= [w - 1, h - 1])).all(axis=-1) & ( - keypoints_visible > 0.5) - - keypoint_labels = (keypoints / np.array([w, h])).astype(np.float32) - keypoint_weights = np.where(valid, 1., 0.).astype(np.float32) - - encoded = dict( - keypoint_labels=keypoint_labels, keypoint_weights=keypoint_weights) - - return encoded -``` - -编码后的数据会在 `PackPoseInputs` 中被转换为 Tensor 格式,并封装到 `data_sample.gt_instance_labels` 中供模型调用,一般主要用于 loss 计算,下面以 `RegressionHead` 中的 `loss()` 为例: - -```Python -def loss(self, - inputs: Tuple[Tensor], - batch_data_samples: OptSampleList, - train_cfg: ConfigType = {}) -> dict: - """Calculate losses from a batch of inputs and data samples.""" - - pred_outputs = self.forward(inputs) - - keypoint_labels = torch.cat( - [d.gt_instance_labels.keypoint_labels for d in batch_data_samples]) - keypoint_weights = torch.cat([ - d.gt_instance_labels.keypoint_weights for d in batch_data_samples - ]) - - # calculate losses - losses = dict() - loss = self.loss_module(pred_outputs, keypoint_labels, - keypoint_weights.unsqueeze(-1)) - - losses.update(loss_kpt=loss) - ### 后续内容省略 ### -``` - -### 解码器 - -解码器主要负责将模型的输出解码为输入图片尺度的坐标值,处理过程与编码器相反。 - -以 Regression-based 方法的解码器为例: - -```Python -def decode(self, encoded: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: - """Decode keypoint coordinates from normalized space to input image - space. - - Args: - encoded (np.ndarray): Coordinates in shape (N, K, D) - - Returns: - tuple: - - keypoints (np.ndarray): Decoded coordinates in shape (N, K, D) - - scores (np.ndarray): The keypoint scores in shape (N, K). - It usually represents the confidence of the keypoint prediction - - """ - - if encoded.shape[-1] == 2: - N, K, _ = encoded.shape - normalized_coords = encoded.copy() - scores = np.ones((N, K), dtype=np.float32) - elif encoded.shape[-1] == 4: - # split coords and sigma if outputs contain output_sigma - normalized_coords = encoded[..., :2].copy() - output_sigma = encoded[..., 2:4].copy() - scores = (1 - output_sigma).mean(axis=-1) - else: - raise ValueError( - 'Keypoint dimension should be 2 or 4 (with sigma), ' - f'but got {encoded.shape[-1]}') - - w, h = self.input_size - keypoints = normalized_coords * np.array([w, h]) - - return keypoints, scores -``` - -默认情况下,`decode()` 方法只提供单个目标数据的解码过程,你也可以通过 `batch_decode()` 来实现批量解码提升执行效率。 - -## 常见用法 - -在 MMPose 配置文件中,主要有三处涉及编解码器: - -- 定义编解码器 -- 生成训练目标 -- 模型头部 - -### 定义编解码器 - -以回归方法生成归一化的坐标值为例,在配置文件中,我们通过如下方式定义编解码器: - -```Python -codec = dict(type='RegressionLabel', input_size=(192, 256)) -``` - -### 生成训练目标 - -在数据处理阶段生成训练目标时,需要传入编解码器用于编码: - -```Python -dict(type='GenerateTarget', encoder=codec) -``` - -### 模型头部 - -在 MMPose 中,我们在模型头部对模型的输出进行解码,需要传入编解码器用于解码: - -```Python -head=dict( - type='RLEHead', - in_channels=2048, - num_joints=17, - loss=dict(type='RLELoss', use_target_weight=True), - decoder=codec -) -``` - -它们在配置文件中的具体位置如下: - -```Python - -# codec settings -codec = dict(type='RegressionLabel', input_size=(192, 256)) ## 定义 ## - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=50, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), - ), - neck=dict(type='GlobalAveragePooling'), - head=dict( - type='RLEHead', - in_channels=2048, - num_joints=17, - loss=dict(type='RLELoss', use_target_weight=True), - decoder=codec), ## 模型头部 ## - test_cfg=dict( - flip_test=True, - shift_coords=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -backend_args = dict(backend='local') - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), ## 生成训练目标 ## - dict(type='PackPoseInputs') -] -test_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] -``` +# 编解码器 + +在关键点检测任务中,根据算法的不同,需要利用标注信息,生成不同格式的训练目标,比如归一化的坐标值、一维向量、高斯热图等。同样的,对于模型输出的结果,也需要经过处理转换成标注信息格式。我们一般将标注信息到训练目标的处理过程称为编码,模型输出到标注信息的处理过程称为解码。 + +编码和解码是一对紧密相关的互逆处理过程。在 MMPose 早期版本中,编码和解码过程往往分散在不同模块里,使其不够直观和统一,增加了学习和维护成本。 + +MMPose 1.0 中引入了新模块 **编解码器(Codec)** ,将关键点数据的编码和解码过程进行集成,以增加代码的友好度和复用性。 + +编解码器在工作流程中所处的位置如下所示: + +![codec-cn](https://user-images.githubusercontent.com/13503330/187829784-4d5939de-97d7-43cc-b934-c6d17c02d589.png) + +一个编解码器主要包含两个部分: + +- 编码器 +- 解码器 + +### 编码器 + +编码器主要负责将处于输入图片尺度的坐标值,编码为模型训练所需要的目标格式,主要包括: + +- 归一化的坐标值:用于 Regression-based 方法 +- 一维向量:用于 SimCC-based 方法 +- 高斯热图:用于 Heatmap-based 方法 + +以 Regression-based 方法的编码器为例: + +```Python +def encode(self, + keypoints: np.ndarray, + keypoints_visible: Optional[np.ndarray] = None) -> dict: + """Encoding keypoints from input image space to normalized space. + + Args: + keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) + keypoints_visible (np.ndarray): Keypoint visibilities in shape + (N, K) + + Returns: + dict: + - keypoint_labels (np.ndarray): The normalized regression labels in + shape (N, K, D) where D is 2 for 2d coordinates + - keypoint_weights (np.ndarray): The target weights in shape + (N, K) + """ + if keypoints_visible is None: + keypoints_visible = np.ones(keypoints.shape[:2], dtype=np.float32) + + w, h = self.input_size + valid = ((keypoints >= 0) & + (keypoints <= [w - 1, h - 1])).all(axis=-1) & ( + keypoints_visible > 0.5) + + keypoint_labels = (keypoints / np.array([w, h])).astype(np.float32) + keypoint_weights = np.where(valid, 1., 0.).astype(np.float32) + + encoded = dict( + keypoint_labels=keypoint_labels, keypoint_weights=keypoint_weights) + + return encoded +``` + +编码后的数据会在 `PackPoseInputs` 中被转换为 Tensor 格式,并封装到 `data_sample.gt_instance_labels` 中供模型调用,一般主要用于 loss 计算,下面以 `RegressionHead` 中的 `loss()` 为例: + +```Python +def loss(self, + inputs: Tuple[Tensor], + batch_data_samples: OptSampleList, + train_cfg: ConfigType = {}) -> dict: + """Calculate losses from a batch of inputs and data samples.""" + + pred_outputs = self.forward(inputs) + + keypoint_labels = torch.cat( + [d.gt_instance_labels.keypoint_labels for d in batch_data_samples]) + keypoint_weights = torch.cat([ + d.gt_instance_labels.keypoint_weights for d in batch_data_samples + ]) + + # calculate losses + losses = dict() + loss = self.loss_module(pred_outputs, keypoint_labels, + keypoint_weights.unsqueeze(-1)) + + losses.update(loss_kpt=loss) + ### 后续内容省略 ### +``` + +### 解码器 + +解码器主要负责将模型的输出解码为输入图片尺度的坐标值,处理过程与编码器相反。 + +以 Regression-based 方法的解码器为例: + +```Python +def decode(self, encoded: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + """Decode keypoint coordinates from normalized space to input image + space. + + Args: + encoded (np.ndarray): Coordinates in shape (N, K, D) + + Returns: + tuple: + - keypoints (np.ndarray): Decoded coordinates in shape (N, K, D) + - scores (np.ndarray): The keypoint scores in shape (N, K). + It usually represents the confidence of the keypoint prediction + + """ + + if encoded.shape[-1] == 2: + N, K, _ = encoded.shape + normalized_coords = encoded.copy() + scores = np.ones((N, K), dtype=np.float32) + elif encoded.shape[-1] == 4: + # split coords and sigma if outputs contain output_sigma + normalized_coords = encoded[..., :2].copy() + output_sigma = encoded[..., 2:4].copy() + scores = (1 - output_sigma).mean(axis=-1) + else: + raise ValueError( + 'Keypoint dimension should be 2 or 4 (with sigma), ' + f'but got {encoded.shape[-1]}') + + w, h = self.input_size + keypoints = normalized_coords * np.array([w, h]) + + return keypoints, scores +``` + +默认情况下,`decode()` 方法只提供单个目标数据的解码过程,你也可以通过 `batch_decode()` 来实现批量解码提升执行效率。 + +## 常见用法 + +在 MMPose 配置文件中,主要有三处涉及编解码器: + +- 定义编解码器 +- 生成训练目标 +- 模型头部 + +### 定义编解码器 + +以回归方法生成归一化的坐标值为例,在配置文件中,我们通过如下方式定义编解码器: + +```Python +codec = dict(type='RegressionLabel', input_size=(192, 256)) +``` + +### 生成训练目标 + +在数据处理阶段生成训练目标时,需要传入编解码器用于编码: + +```Python +dict(type='GenerateTarget', encoder=codec) +``` + +### 模型头部 + +在 MMPose 中,我们在模型头部对模型的输出进行解码,需要传入编解码器用于解码: + +```Python +head=dict( + type='RLEHead', + in_channels=2048, + num_joints=17, + loss=dict(type='RLELoss', use_target_weight=True), + decoder=codec +) +``` + +它们在配置文件中的具体位置如下: + +```Python + +# codec settings +codec = dict(type='RegressionLabel', input_size=(192, 256)) ## 定义 ## + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='RLEHead', + in_channels=2048, + num_joints=17, + loss=dict(type='RLELoss', use_target_weight=True), + decoder=codec), ## 模型头部 ## + test_cfg=dict( + flip_test=True, + shift_coords=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +backend_args = dict(backend='local') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), ## 生成训练目标 ## + dict(type='PackPoseInputs') +] +test_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] +``` diff --git a/docs/zh_cn/advanced_guides/customize_datasets.md b/docs/zh_cn/advanced_guides/customize_datasets.md index 61b58dc929..16603bfbf2 100644 --- a/docs/zh_cn/advanced_guides/customize_datasets.md +++ b/docs/zh_cn/advanced_guides/customize_datasets.md @@ -1,264 +1,264 @@ -# 自定义数据集 - -MMPose 目前已支持了多个任务和相应的数据集。您可以在 [数据集](https://mmpose.readthedocs.io/zh_CN/latest/dataset_zoo.html) 找到它们。请按照相应的指南准备数据。 - - - -- [自定义数据集-将数据组织为 COCO 格式](#自定义数据集-将数据组织为-coco-格式) -- [创建自定义数据集的元信息文件](#创建自定义数据集的元信息文件) -- [创建自定义数据集类](#创建自定义数据集类) -- [创建自定义配置文件](#创建自定义配置文件) -- [数据集封装](#数据集封装) - - - -## 将数据组织为 COCO 格式 - -最简单的使用自定义数据集的方法是将您的注释格式转换为 COCO 数据集格式。 - -COCO 格式的注释 JSON 文件具有以下必要键: - -```python -'images': [ - { - 'file_name': '000000001268.jpg', - 'height': 427, - 'width': 640, - 'id': 1268 - }, - ... -], -'annotations': [ - { - 'segmentation': [[426.36, - ... - 424.34, - 223.3]], - 'keypoints': [0,0,0, - 0,0,0, - 0,0,0, - 427,220,2, - 443,222,2, - 414,228,2, - 449,232,2, - 408,248,1, - 454,261,2, - 0,0,0, - 0,0,0, - 411,287,2, - 431,287,2, - 0,0,0, - 458,265,2, - 0,0,0, - 466,300,1], - 'num_keypoints': 10, - 'area': 3894.5826, - 'iscrowd': 0, - 'image_id': 1268, - 'bbox': [402.34, 205.02, 65.26, 88.45], - 'category_id': 1, - 'id': 215218 - }, - ... -], -'categories': [ - {'id': 1, 'name': 'person'}, - ] -``` - -JSON 标注文件中有三个关键词是必需的: - -- `images`:包含所有图像信息的列表,每个图像都有一个 `file_name`、`height`、`width` 和 `id` 键。 -- `annotations`:包含所有实例标注信息的列表,每个实例都有一个 `segmentation`、`keypoints`、`num_keypoints`、`area`、`iscrowd`、`image_id`、`bbox`、`category_id` 和 `id` 键。 -- `categories`:包含所有类别信息的列表,每个类别都有一个 `id` 和 `name` 键。以人体姿态估计为例,`id` 为 1,`name` 为 `person`。 - -如果您的数据集已经是 COCO 格式的,那么您可以直接使用 `CocoDataset` 类来读取该数据集。 - -## 创建自定义数据集的元信息文件 - -对于一个新的数据集而言,您需要创建一个新的数据集元信息文件。该文件包含了数据集的基本信息,如关键点个数、排列顺序、可视化颜色、骨架连接关系等。元信息文件通常存放在 `config/_base_/datasets/` 目录下,例如: - -``` -config/_base_/datasets/custom.py -``` - -元信息文件中需要包含以下信息: - -- `keypoint_info`:每个关键点的信息: - 1. `name`: 关键点名称,必须是唯一的,例如 `nose`、`left_eye` 等。 - 2. `id`: 关键点 ID,必须是唯一的,从 0 开始。 - 3. `color`: 关键点可视化时的颜色,以 (\[B, G, R\]) 格式组织起来,用于可视化。 - 4. `type`: 关键点类型,可以是 `upper`、`lower` 或 \`\`,用于数据增强。 - 5. `swap`: 关键点交换关系,用于水平翻转数据增强。 -- `skeleton_info`:骨架连接关系,用于可视化。 -- `joint_weights`:每个关键点的权重,用于损失函数计算。 -- `sigma`:标准差,用于计算 OKS 分数,详细信息请参考 [keypoints-eval](https://cocodataset.org/#keypoints-eval)。 - -下面是一个简化版本的元信息文件([完整版](/configs/_base_/datasets/coco.py)): - -```python -dataset_info = dict( - dataset_name='coco', - paper_info=dict( - author='Lin, Tsung-Yi and Maire, Michael and ' - 'Belongie, Serge and Hays, James and ' - 'Perona, Pietro and Ramanan, Deva and ' - r'Doll{\'a}r, Piotr and Zitnick, C Lawrence', - title='Microsoft coco: Common objects in context', - container='European conference on computer vision', - year='2014', - homepage='http://cocodataset.org/', - ), - keypoint_info={ - 0: - dict(name='nose', id=0, color=[51, 153, 255], type='upper', swap=''), - 1: - dict( - name='left_eye', - id=1, - color=[51, 153, 255], - type='upper', - swap='right_eye'), - ... - 16: - dict( - name='right_ankle', - id=16, - color=[255, 128, 0], - type='lower', - swap='left_ankle') - }, - skeleton_info={ - 0: - dict(link=('left_ankle', 'left_knee'), id=0, color=[0, 255, 0]), - ... - 18: - dict( - link=('right_ear', 'right_shoulder'), id=18, color=[51, 153, 255]) - }, - joint_weights=[ - 1., 1., 1., 1., 1., 1., 1., 1.2, 1.2, 1.5, 1.5, 1., 1., 1.2, 1.2, 1.5, - 1.5 - ], - sigmas=[ - 0.026, 0.025, 0.025, 0.035, 0.035, 0.079, 0.079, 0.072, 0.072, 0.062, - 0.062, 0.107, 0.107, 0.087, 0.087, 0.089, 0.089 - ]) -``` - -## 创建自定义数据集类 - -如果标注信息不是用 COCO 格式存储的,那么您需要创建一个新的数据集类。数据集类需要继承自 `BaseDataset` 类,并且需要按照以下步骤实现: - -1. 在 `mmpose/datasets/datasets` 目录下找到该数据集符合的 package,如果没有符合的,则创建一个新的 package。 - -2. 在该 package 下创建一个新的数据集类,在对应的注册器中进行注册: - - ```python - from mmengine.dataset import BaseDataset - from mmpose.registry import DATASETS - - @DATASETS.register_module(name='MyCustomDataset') - class MyCustomDataset(BaseDataset): - ``` - - 如果未注册,你会在运行时遇到 `KeyError: 'XXXXX is not in the dataset registry'`。 - 关于 `mmengine.BaseDataset` 的更多信息,请参考 [这个文档](https://mmengine.readthedocs.io/en/latest/advanced_tutorials/basedataset.html)。 - -3. 确保你在 package 的 `__init__.py` 中导入了该数据集类。 - -4. 确保你在 `mmpose/datasets/__init__.py` 中导入了该 package。 - -## 创建自定义配置文件 - -在配置文件中,你需要修改跟数据集有关的部分,例如: - -```python -... -# 自定义数据集类 -dataset_type = 'MyCustomDataset' # or 'CocoDataset' - -train_dataloader = dict( - batch_size=2, - dataset=dict( - type=dataset_type, - data_root='root/of/your/train/data', - ann_file='path/to/your/train/json', - data_prefix=dict(img='path/to/your/train/img'), - metainfo=dict(from_file='configs/_base_/datasets/custom.py'), - ...), - ) - -val_dataloader = dict( - batch_size=2, - dataset=dict( - type=dataset_type, - data_root='root/of/your/val/data', - ann_file='path/to/your/val/json', - data_prefix=dict(img='path/to/your/val/img'), - metainfo=dict(from_file='configs/_base_/datasets/custom.py'), - ...), - ) - -test_dataloader = dict( - batch_size=2, - dataset=dict( - type=dataset_type, - data_root='root/of/your/test/data', - ann_file='path/to/your/test/json', - data_prefix=dict(img='path/to/your/test/img'), - metainfo=dict(from_file='configs/_base_/datasets/custom.py'), - ...), - ) -... -``` - -请确保所有的路径都是正确的。 - -## 数据集封装 - -目前 [MMEngine](https://github.com/open-mmlab/mmengine) 支持以下数据集封装: - -- [ConcatDataset](https://mmengine.readthedocs.io/zh_CN/latest/advanced_tutorials/basedataset.html#concatdataset) -- [RepeatDataset](https://mmengine.readthedocs.io/zh_CN/latest/advanced_tutorials/basedataset.html#repeatdataset) - -### CombinedDataset - -MMPose 提供了一个 `CombinedDataset` 类,它可以将多个数据集封装成一个数据集。它的使用方法如下: - -```python -dataset_1 = dict( - type='dataset_type_1', - data_root='root/of/your/dataset1', - data_prefix=dict(img_path='path/to/your/img'), - ann_file='annotations/train.json', - pipeline=[ - # 使用转换器将标注信息统一为需要的格式 - converter_transform_1 - ]) - -dataset_2 = dict( - type='dataset_type_2', - data_root='root/of/your/dataset2', - data_prefix=dict(img_path='path/to/your/img'), - ann_file='annotations/train.json', - pipeline=[ - converter_transform_2 - ]) - -shared_pipeline = [ - LoadImage(), - ParseImage(), -] - -combined_dataset = dict( - type='CombinedDataset', - metainfo=dict(from_file='path/to/your/metainfo'), - datasets=[dataset_1, dataset_2], - pipeline=shared_pipeline, -) -``` - -- **合并数据集的元信息** 决定了标注格式,可以是子数据集的元信息,也可以是自定义的元信息。如果要自定义元信息,可以参考 [创建自定义数据集的元信息文件](#创建自定义数据集的元信息文件)。 -- **KeypointConverter** 用于将不同的标注格式转换成统一的格式。比如将关键点个数不同、关键点排列顺序不同的数据集进行合并。 -- 更详细的说明请前往[混合数据集训练](../user_guides/mixed_datasets.md)。 +# 自定义数据集 + +MMPose 目前已支持了多个任务和相应的数据集。您可以在 [数据集](https://mmpose.readthedocs.io/zh_CN/latest/dataset_zoo.html) 找到它们。请按照相应的指南准备数据。 + + + +- [自定义数据集-将数据组织为 COCO 格式](#自定义数据集-将数据组织为-coco-格式) +- [创建自定义数据集的元信息文件](#创建自定义数据集的元信息文件) +- [创建自定义数据集类](#创建自定义数据集类) +- [创建自定义配置文件](#创建自定义配置文件) +- [数据集封装](#数据集封装) + + + +## 将数据组织为 COCO 格式 + +最简单的使用自定义数据集的方法是将您的注释格式转换为 COCO 数据集格式。 + +COCO 格式的注释 JSON 文件具有以下必要键: + +```python +'images': [ + { + 'file_name': '000000001268.jpg', + 'height': 427, + 'width': 640, + 'id': 1268 + }, + ... +], +'annotations': [ + { + 'segmentation': [[426.36, + ... + 424.34, + 223.3]], + 'keypoints': [0,0,0, + 0,0,0, + 0,0,0, + 427,220,2, + 443,222,2, + 414,228,2, + 449,232,2, + 408,248,1, + 454,261,2, + 0,0,0, + 0,0,0, + 411,287,2, + 431,287,2, + 0,0,0, + 458,265,2, + 0,0,0, + 466,300,1], + 'num_keypoints': 10, + 'area': 3894.5826, + 'iscrowd': 0, + 'image_id': 1268, + 'bbox': [402.34, 205.02, 65.26, 88.45], + 'category_id': 1, + 'id': 215218 + }, + ... +], +'categories': [ + {'id': 1, 'name': 'person'}, + ] +``` + +JSON 标注文件中有三个关键词是必需的: + +- `images`:包含所有图像信息的列表,每个图像都有一个 `file_name`、`height`、`width` 和 `id` 键。 +- `annotations`:包含所有实例标注信息的列表,每个实例都有一个 `segmentation`、`keypoints`、`num_keypoints`、`area`、`iscrowd`、`image_id`、`bbox`、`category_id` 和 `id` 键。 +- `categories`:包含所有类别信息的列表,每个类别都有一个 `id` 和 `name` 键。以人体姿态估计为例,`id` 为 1,`name` 为 `person`。 + +如果您的数据集已经是 COCO 格式的,那么您可以直接使用 `CocoDataset` 类来读取该数据集。 + +## 创建自定义数据集的元信息文件 + +对于一个新的数据集而言,您需要创建一个新的数据集元信息文件。该文件包含了数据集的基本信息,如关键点个数、排列顺序、可视化颜色、骨架连接关系等。元信息文件通常存放在 `config/_base_/datasets/` 目录下,例如: + +``` +config/_base_/datasets/custom.py +``` + +元信息文件中需要包含以下信息: + +- `keypoint_info`:每个关键点的信息: + 1. `name`: 关键点名称,必须是唯一的,例如 `nose`、`left_eye` 等。 + 2. `id`: 关键点 ID,必须是唯一的,从 0 开始。 + 3. `color`: 关键点可视化时的颜色,以 (\[B, G, R\]) 格式组织起来,用于可视化。 + 4. `type`: 关键点类型,可以是 `upper`、`lower` 或 \`\`,用于数据增强。 + 5. `swap`: 关键点交换关系,用于水平翻转数据增强。 +- `skeleton_info`:骨架连接关系,用于可视化。 +- `joint_weights`:每个关键点的权重,用于损失函数计算。 +- `sigma`:标准差,用于计算 OKS 分数,详细信息请参考 [keypoints-eval](https://cocodataset.org/#keypoints-eval)。 + +下面是一个简化版本的元信息文件([完整版](/configs/_base_/datasets/coco.py)): + +```python +dataset_info = dict( + dataset_name='coco', + paper_info=dict( + author='Lin, Tsung-Yi and Maire, Michael and ' + 'Belongie, Serge and Hays, James and ' + 'Perona, Pietro and Ramanan, Deva and ' + r'Doll{\'a}r, Piotr and Zitnick, C Lawrence', + title='Microsoft coco: Common objects in context', + container='European conference on computer vision', + year='2014', + homepage='http://cocodataset.org/', + ), + keypoint_info={ + 0: + dict(name='nose', id=0, color=[51, 153, 255], type='upper', swap=''), + 1: + dict( + name='left_eye', + id=1, + color=[51, 153, 255], + type='upper', + swap='right_eye'), + ... + 16: + dict( + name='right_ankle', + id=16, + color=[255, 128, 0], + type='lower', + swap='left_ankle') + }, + skeleton_info={ + 0: + dict(link=('left_ankle', 'left_knee'), id=0, color=[0, 255, 0]), + ... + 18: + dict( + link=('right_ear', 'right_shoulder'), id=18, color=[51, 153, 255]) + }, + joint_weights=[ + 1., 1., 1., 1., 1., 1., 1., 1.2, 1.2, 1.5, 1.5, 1., 1., 1.2, 1.2, 1.5, + 1.5 + ], + sigmas=[ + 0.026, 0.025, 0.025, 0.035, 0.035, 0.079, 0.079, 0.072, 0.072, 0.062, + 0.062, 0.107, 0.107, 0.087, 0.087, 0.089, 0.089 + ]) +``` + +## 创建自定义数据集类 + +如果标注信息不是用 COCO 格式存储的,那么您需要创建一个新的数据集类。数据集类需要继承自 `BaseDataset` 类,并且需要按照以下步骤实现: + +1. 在 `mmpose/datasets/datasets` 目录下找到该数据集符合的 package,如果没有符合的,则创建一个新的 package。 + +2. 在该 package 下创建一个新的数据集类,在对应的注册器中进行注册: + + ```python + from mmengine.dataset import BaseDataset + from mmpose.registry import DATASETS + + @DATASETS.register_module(name='MyCustomDataset') + class MyCustomDataset(BaseDataset): + ``` + + 如果未注册,你会在运行时遇到 `KeyError: 'XXXXX is not in the dataset registry'`。 + 关于 `mmengine.BaseDataset` 的更多信息,请参考 [这个文档](https://mmengine.readthedocs.io/en/latest/advanced_tutorials/basedataset.html)。 + +3. 确保你在 package 的 `__init__.py` 中导入了该数据集类。 + +4. 确保你在 `mmpose/datasets/__init__.py` 中导入了该 package。 + +## 创建自定义配置文件 + +在配置文件中,你需要修改跟数据集有关的部分,例如: + +```python +... +# 自定义数据集类 +dataset_type = 'MyCustomDataset' # or 'CocoDataset' + +train_dataloader = dict( + batch_size=2, + dataset=dict( + type=dataset_type, + data_root='root/of/your/train/data', + ann_file='path/to/your/train/json', + data_prefix=dict(img='path/to/your/train/img'), + metainfo=dict(from_file='configs/_base_/datasets/custom.py'), + ...), + ) + +val_dataloader = dict( + batch_size=2, + dataset=dict( + type=dataset_type, + data_root='root/of/your/val/data', + ann_file='path/to/your/val/json', + data_prefix=dict(img='path/to/your/val/img'), + metainfo=dict(from_file='configs/_base_/datasets/custom.py'), + ...), + ) + +test_dataloader = dict( + batch_size=2, + dataset=dict( + type=dataset_type, + data_root='root/of/your/test/data', + ann_file='path/to/your/test/json', + data_prefix=dict(img='path/to/your/test/img'), + metainfo=dict(from_file='configs/_base_/datasets/custom.py'), + ...), + ) +... +``` + +请确保所有的路径都是正确的。 + +## 数据集封装 + +目前 [MMEngine](https://github.com/open-mmlab/mmengine) 支持以下数据集封装: + +- [ConcatDataset](https://mmengine.readthedocs.io/zh_CN/latest/advanced_tutorials/basedataset.html#concatdataset) +- [RepeatDataset](https://mmengine.readthedocs.io/zh_CN/latest/advanced_tutorials/basedataset.html#repeatdataset) + +### CombinedDataset + +MMPose 提供了一个 `CombinedDataset` 类,它可以将多个数据集封装成一个数据集。它的使用方法如下: + +```python +dataset_1 = dict( + type='dataset_type_1', + data_root='root/of/your/dataset1', + data_prefix=dict(img_path='path/to/your/img'), + ann_file='annotations/train.json', + pipeline=[ + # 使用转换器将标注信息统一为需要的格式 + converter_transform_1 + ]) + +dataset_2 = dict( + type='dataset_type_2', + data_root='root/of/your/dataset2', + data_prefix=dict(img_path='path/to/your/img'), + ann_file='annotations/train.json', + pipeline=[ + converter_transform_2 + ]) + +shared_pipeline = [ + LoadImage(), + ParseImage(), +] + +combined_dataset = dict( + type='CombinedDataset', + metainfo=dict(from_file='path/to/your/metainfo'), + datasets=[dataset_1, dataset_2], + pipeline=shared_pipeline, +) +``` + +- **合并数据集的元信息** 决定了标注格式,可以是子数据集的元信息,也可以是自定义的元信息。如果要自定义元信息,可以参考 [创建自定义数据集的元信息文件](#创建自定义数据集的元信息文件)。 +- **KeypointConverter** 用于将不同的标注格式转换成统一的格式。比如将关键点个数不同、关键点排列顺序不同的数据集进行合并。 +- 更详细的说明请前往[混合数据集训练](../user_guides/mixed_datasets.md)。 diff --git a/docs/zh_cn/advanced_guides/customize_logging.md b/docs/zh_cn/advanced_guides/customize_logging.md index 093a530dba..8e2b7f311b 100644 --- a/docs/zh_cn/advanced_guides/customize_logging.md +++ b/docs/zh_cn/advanced_guides/customize_logging.md @@ -1,3 +1,3 @@ -# Customize Logging - -Coming soon. +# Customize Logging + +Coming soon. diff --git a/docs/zh_cn/advanced_guides/customize_optimizer.md b/docs/zh_cn/advanced_guides/customize_optimizer.md index fd6a28297f..09bcc80ca2 100644 --- a/docs/zh_cn/advanced_guides/customize_optimizer.md +++ b/docs/zh_cn/advanced_guides/customize_optimizer.md @@ -1,3 +1,3 @@ -# Customize Optimizer and Scheduler - -Coming soon. +# Customize Optimizer and Scheduler + +Coming soon. diff --git a/docs/zh_cn/advanced_guides/customize_transforms.md b/docs/zh_cn/advanced_guides/customize_transforms.md index 154413994b..860bd0515e 100644 --- a/docs/zh_cn/advanced_guides/customize_transforms.md +++ b/docs/zh_cn/advanced_guides/customize_transforms.md @@ -1,3 +1,3 @@ -# Customize Data Transformation and Augmentation - -Coming soon. +# Customize Data Transformation and Augmentation + +Coming soon. diff --git a/docs/zh_cn/advanced_guides/dataflow.md b/docs/zh_cn/advanced_guides/dataflow.md index 9f098b028d..c6d55c0d98 100644 --- a/docs/zh_cn/advanced_guides/dataflow.md +++ b/docs/zh_cn/advanced_guides/dataflow.md @@ -1,3 +1,3 @@ -# Dataflow in MMPose - -Coming soon. +# Dataflow in MMPose + +Coming soon. diff --git a/docs/zh_cn/advanced_guides/how_to_deploy.md b/docs/zh_cn/advanced_guides/how_to_deploy.md index b4fead876c..38c3cb771a 100644 --- a/docs/zh_cn/advanced_guides/how_to_deploy.md +++ b/docs/zh_cn/advanced_guides/how_to_deploy.md @@ -1,3 +1,3 @@ -# How to Deploy MMPose Models - -Coming soon. +# How to Deploy MMPose Models + +Coming soon. diff --git a/docs/zh_cn/advanced_guides/implement_new_models.md b/docs/zh_cn/advanced_guides/implement_new_models.md index 4a10b0c3c9..47a6c96bc4 100644 --- a/docs/zh_cn/advanced_guides/implement_new_models.md +++ b/docs/zh_cn/advanced_guides/implement_new_models.md @@ -1,3 +1,3 @@ -# Implement New Models - -Coming soon. +# Implement New Models + +Coming soon. diff --git a/docs/zh_cn/advanced_guides/model_analysis.md b/docs/zh_cn/advanced_guides/model_analysis.md index 234dc5be85..2835bed918 100644 --- a/docs/zh_cn/advanced_guides/model_analysis.md +++ b/docs/zh_cn/advanced_guides/model_analysis.md @@ -1,100 +1,100 @@ -# Model Analysis - -## 统计模型参数量与计算量 - -MMPose 提供了 `tools/analysis_tools/get_flops.py` 来统计模型的参数量与计算量。 - -```shell -python tools/analysis_tools/get_flops.py ${CONFIG_FILE} [--shape ${INPUT_SHAPE}] [--cfg-options ${CFG_OPTIONS}] -``` - -参数说明: - -`CONFIG_FILE` : 模型配置文件的路径。 - -`--shape`: 模型的输入张量形状。 - -`--input-constructor`: 如果指定为 `batch`,将会生成一个 `batch tensor` 来计算 FLOPs。 - -`--batch-size`:如果 `--input-constructor` 指定为 `batch`,将会生成一个随机 `tensor`,形状为 `(batch_size, 3, **input_shape)` 来计算 FLOPs。 - -`--cfg-options`: 如果指定,可选的 `cfg` 的键值对将会被合并到配置文件中。 - -示例: - -```shell -python tools/analysis_tools/get_flops.py configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py -``` - -结果如下: - -```text -============================== -Input shape: (1, 3, 256, 192) -Flops: 7.7 GFLOPs -Params: 28.54 M -============================== -``` - -```{note} -目前该工具仍处于实验阶段,我们不能保证统计结果绝对正确,一些算子(比如 GN 或自定义算子)没有被统计到 FLOPs 中。 -``` - -## 分析训练日志 - -MMPose 提供了 `tools/analysis_tools/analyze_logs.py` 来对训练日志进行简单的分析,包括: - -- 将日志绘制成损失和精度曲线图 -- 统计训练速度 - -### 绘制损失和精度曲线图 - -该功能依赖于 `seaborn`,请先运行 `pip install seaborn` 安装依赖包。 - -![log_curve](https://user-images.githubusercontent.com/87690686/188538215-5d985aaa-59f8-44cf-b6f9-10890d599e9c.png) - -```shell -python tools/analysis_tools/analyze_logs.py plot_curve ${JSON_LOGS} [--keys ${KEYS}] [--title ${TITLE}] [--legend ${LEGEND}] [--backend ${BACKEND}] [--style ${STYLE}] [--out ${OUT_FILE}] -``` - -示例: - -- 绘制损失曲线 - - ```shell - python tools/analysis_tools/analyze_logs.py plot_curve log.json --keys loss_kpt --legend loss_kpt - ``` - -- 绘制精度曲线并导出为 PDF 文件 - - ```shell - python tools/analysis_tools/analyze_logs.py plot_curve log.json --keys acc_pose --out results.pdf - ``` - -- 将多个日志文件绘制在同一张图上 - - ```shell - python tools/analysis_tools/analyze_logs.py plot_curve log1.json log2.json --keys loss_kpt --legend run1 run2 --title loss_kpt --out loss_kpt.png - ``` - -### 统计训练速度 - -```shell -python tools/analysis_tools/analyze_logs.py cal_train_time ${JSON_LOGS} [--include-outliers] -``` - -示例: - -```shell -python tools/analysis_tools/analyze_logs.py cal_train_time log.json -``` - -结果如下: - -```text ------Analyze train time of hrnet_w32_256x192.json----- -slowest epoch 56, average time is 0.6924 -fastest epoch 1, average time is 0.6502 -time std over epochs is 0.0085 -average iter time: 0.6688 s/iter -``` +# Model Analysis + +## 统计模型参数量与计算量 + +MMPose 提供了 `tools/analysis_tools/get_flops.py` 来统计模型的参数量与计算量。 + +```shell +python tools/analysis_tools/get_flops.py ${CONFIG_FILE} [--shape ${INPUT_SHAPE}] [--cfg-options ${CFG_OPTIONS}] +``` + +参数说明: + +`CONFIG_FILE` : 模型配置文件的路径。 + +`--shape`: 模型的输入张量形状。 + +`--input-constructor`: 如果指定为 `batch`,将会生成一个 `batch tensor` 来计算 FLOPs。 + +`--batch-size`:如果 `--input-constructor` 指定为 `batch`,将会生成一个随机 `tensor`,形状为 `(batch_size, 3, **input_shape)` 来计算 FLOPs。 + +`--cfg-options`: 如果指定,可选的 `cfg` 的键值对将会被合并到配置文件中。 + +示例: + +```shell +python tools/analysis_tools/get_flops.py configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py +``` + +结果如下: + +```text +============================== +Input shape: (1, 3, 256, 192) +Flops: 7.7 GFLOPs +Params: 28.54 M +============================== +``` + +```{note} +目前该工具仍处于实验阶段,我们不能保证统计结果绝对正确,一些算子(比如 GN 或自定义算子)没有被统计到 FLOPs 中。 +``` + +## 分析训练日志 + +MMPose 提供了 `tools/analysis_tools/analyze_logs.py` 来对训练日志进行简单的分析,包括: + +- 将日志绘制成损失和精度曲线图 +- 统计训练速度 + +### 绘制损失和精度曲线图 + +该功能依赖于 `seaborn`,请先运行 `pip install seaborn` 安装依赖包。 + +![log_curve](https://user-images.githubusercontent.com/87690686/188538215-5d985aaa-59f8-44cf-b6f9-10890d599e9c.png) + +```shell +python tools/analysis_tools/analyze_logs.py plot_curve ${JSON_LOGS} [--keys ${KEYS}] [--title ${TITLE}] [--legend ${LEGEND}] [--backend ${BACKEND}] [--style ${STYLE}] [--out ${OUT_FILE}] +``` + +示例: + +- 绘制损失曲线 + + ```shell + python tools/analysis_tools/analyze_logs.py plot_curve log.json --keys loss_kpt --legend loss_kpt + ``` + +- 绘制精度曲线并导出为 PDF 文件 + + ```shell + python tools/analysis_tools/analyze_logs.py plot_curve log.json --keys acc_pose --out results.pdf + ``` + +- 将多个日志文件绘制在同一张图上 + + ```shell + python tools/analysis_tools/analyze_logs.py plot_curve log1.json log2.json --keys loss_kpt --legend run1 run2 --title loss_kpt --out loss_kpt.png + ``` + +### 统计训练速度 + +```shell +python tools/analysis_tools/analyze_logs.py cal_train_time ${JSON_LOGS} [--include-outliers] +``` + +示例: + +```shell +python tools/analysis_tools/analyze_logs.py cal_train_time log.json +``` + +结果如下: + +```text +-----Analyze train time of hrnet_w32_256x192.json----- +slowest epoch 56, average time is 0.6924 +fastest epoch 1, average time is 0.6502 +time std over epochs is 0.0085 +average iter time: 0.6688 s/iter +``` diff --git a/docs/zh_cn/api.rst b/docs/zh_cn/api.rst index 48819a2531..07b679eb20 100644 --- a/docs/zh_cn/api.rst +++ b/docs/zh_cn/api.rst @@ -1,134 +1,134 @@ -mmpose.apis -------------- -.. automodule:: mmpose.apis - :members: - -mmpose.codecs -------------- -.. automodule:: mmpose.codecs - :members: - -mmpose.models ---------------- -backbones -^^^^^^^^^^^ -.. automodule:: mmpose.models.backbones - :members: - -necks -^^^^^^^^^^^ -.. automodule:: mmpose.models.necks - :members: - -detectors -^^^^^^^^^^^ -.. automodule:: mmpose.models.pose_estimators - :members: - -heads -^^^^^^^^^^^^^^^ -.. automodule:: mmpose.models.heads - :members: - -losses -^^^^^^^^^^^ -.. automodule:: mmpose.models.losses - :members: - -misc -^^^^^^^^^^^ -.. automodule:: mmpose.models.utils - :members: - -mmpose.datasets ------------------ -.. automodule:: mmpose.datasets - :members: - -datasets -^^^^^^^^^^^ -.. automodule:: mmpose.datasets.datasets.base - :members: - :noindex: - -.. automodule:: mmpose.datasets.datasets.body - :members: - :noindex: - -.. automodule:: mmpose.datasets.datasets.face - :members: - :noindex: - -.. automodule:: mmpose.datasets.datasets.hand - :members: - :noindex: - -.. automodule:: mmpose.datasets.datasets.animal - :members: - :noindex: - -.. automodule:: mmpose.datasets.datasets.fashion - :members: - :noindex: - -transforms -^^^^^^^^^^^ -.. automodule:: mmpose.datasets.transforms.loading - :members: - -.. automodule:: mmpose.datasets.transforms.common_transforms - :members: - -.. automodule:: mmpose.datasets.transforms.topdown_transforms - :members: - -.. automodule:: mmpose.datasets.transforms.bottomup_transforms - :members: - -.. automodule:: mmpose.datasets.transforms.formatting - :members: - -mmpose.structures ---------------- -.. automodule:: mmpose.structures - :members: - -bbox -^^^^^^^^^^^ -.. automodule:: mmpose.structures.bbox - :members: - -keypoint -^^^^^^^^^^^ -.. automodule:: mmpose.structures.keypoint - :members: - - -mmpose.registry ---------------- -.. automodule:: mmpose.registry - :members: - -mmpose.evaluation ------------------ -metrics -^^^^^^^^^^^ -.. automodule:: mmpose.evaluation.metrics - :members: - -functional -^^^^^^^^^^^ -.. automodule:: mmpose.evaluation.functional - :members: - -mmpose.visualization --------------------- -.. automodule:: mmpose.visualization - :members: - -mmpose.engine ---------------- -hooks -^^^^^^^^^^^ -.. automodule:: mmpose.engine.hooks - :members: +mmpose.apis +------------- +.. automodule:: mmpose.apis + :members: + +mmpose.codecs +------------- +.. automodule:: mmpose.codecs + :members: + +mmpose.models +--------------- +backbones +^^^^^^^^^^^ +.. automodule:: mmpose.models.backbones + :members: + +necks +^^^^^^^^^^^ +.. automodule:: mmpose.models.necks + :members: + +detectors +^^^^^^^^^^^ +.. automodule:: mmpose.models.pose_estimators + :members: + +heads +^^^^^^^^^^^^^^^ +.. automodule:: mmpose.models.heads + :members: + +losses +^^^^^^^^^^^ +.. automodule:: mmpose.models.losses + :members: + +misc +^^^^^^^^^^^ +.. automodule:: mmpose.models.utils + :members: + +mmpose.datasets +----------------- +.. automodule:: mmpose.datasets + :members: + +datasets +^^^^^^^^^^^ +.. automodule:: mmpose.datasets.datasets.base + :members: + :noindex: + +.. automodule:: mmpose.datasets.datasets.body + :members: + :noindex: + +.. automodule:: mmpose.datasets.datasets.face + :members: + :noindex: + +.. automodule:: mmpose.datasets.datasets.hand + :members: + :noindex: + +.. automodule:: mmpose.datasets.datasets.animal + :members: + :noindex: + +.. automodule:: mmpose.datasets.datasets.fashion + :members: + :noindex: + +transforms +^^^^^^^^^^^ +.. automodule:: mmpose.datasets.transforms.loading + :members: + +.. automodule:: mmpose.datasets.transforms.common_transforms + :members: + +.. automodule:: mmpose.datasets.transforms.topdown_transforms + :members: + +.. automodule:: mmpose.datasets.transforms.bottomup_transforms + :members: + +.. automodule:: mmpose.datasets.transforms.formatting + :members: + +mmpose.structures +--------------- +.. automodule:: mmpose.structures + :members: + +bbox +^^^^^^^^^^^ +.. automodule:: mmpose.structures.bbox + :members: + +keypoint +^^^^^^^^^^^ +.. automodule:: mmpose.structures.keypoint + :members: + + +mmpose.registry +--------------- +.. automodule:: mmpose.registry + :members: + +mmpose.evaluation +----------------- +metrics +^^^^^^^^^^^ +.. automodule:: mmpose.evaluation.metrics + :members: + +functional +^^^^^^^^^^^ +.. automodule:: mmpose.evaluation.functional + :members: + +mmpose.visualization +-------------------- +.. automodule:: mmpose.visualization + :members: + +mmpose.engine +--------------- +hooks +^^^^^^^^^^^ +.. automodule:: mmpose.engine.hooks + :members: diff --git a/docs/zh_cn/collect_modelzoo.py b/docs/zh_cn/collect_modelzoo.py index 0c87d3c6ef..965250f712 100644 --- a/docs/zh_cn/collect_modelzoo.py +++ b/docs/zh_cn/collect_modelzoo.py @@ -1,196 +1,196 @@ -#!/usr/bin/env python -# Copyright (c) OpenMMLab. All rights reserved. -import os -import os.path as osp -import re -from collections import defaultdict -from glob import glob - -from addict import Addict -from titlecase import titlecase - - -def _get_model_docs(): - """Get all model document files. - - Returns: - list[str]: file paths - """ - config_root = osp.join('..', '..', 'configs') - pattern = osp.sep.join(['*'] * 4) + '.md' - docs = glob(osp.join(config_root, pattern)) - docs = [doc for doc in docs if '_base_' not in doc] - return docs - - -def _parse_model_doc_path(path): - """Parse doc file path. - - Typical path would be like: - - configs////.md - - An example is: - - "configs/animal_2d_keypoint/topdown_heatmap/ - animalpose/resnet_animalpose.md" - - Returns: - tuple: - - task (str): e.g. ``'Animal 2D Keypoint'`` - - dataset (str): e.g. ``'animalpose'`` - - keywords (tuple): e.g. ``('topdown heatmap', 'resnet')`` - """ - _path = path.split(osp.sep) - _rel_path = _path[_path.index('configs'):] - - # get task - def _titlecase_callback(word, **kwargs): - if word == '2d': - return '2D' - if word == '3d': - return '3D' - - task = titlecase( - _rel_path[1].replace('_', ' '), callback=_titlecase_callback) - - # get dataset - dataset = _rel_path[3] - - # get keywords - keywords_algo = (_rel_path[2], ) - keywords_setting = tuple(_rel_path[4][:-3].split('_')) - keywords = keywords_algo + keywords_setting - - return task, dataset, keywords - - -def _get_paper_refs(): - """Get all paper references. - - Returns: - Dict[str, List[str]]: keys are paper categories and values are lists - of paper paths. - """ - papers = glob('../src/papers/*/*.md') - paper_refs = defaultdict(list) - for fn in papers: - category = fn.split(osp.sep)[3] - paper_refs[category].append(fn) - - return paper_refs - - -def _parse_paper_ref(fn): - """Get paper name and indicator pattern from a paper reference file. - - Returns: - tuple: - - paper_name (str) - - paper_indicator (str) - """ - indicator = None - with open(fn, 'r', encoding='utf-8') as f: - for line in f.readlines(): - if line.startswith('', '', indicator).strip() - return paper_name, indicator - - -def main(): - - # Build output folders - os.makedirs('model_zoo', exist_ok=True) - os.makedirs('model_zoo_papers', exist_ok=True) - - # Collect all document contents - model_doc_list = _get_model_docs() - model_docs = Addict() - - for path in model_doc_list: - task, dataset, keywords = _parse_model_doc_path(path) - with open(path, 'r', encoding='utf-8') as f: - doc = { - 'task': task, - 'dataset': dataset, - 'keywords': keywords, - 'path': path, - 'content': f.read() - } - model_docs[task][dataset][keywords] = doc - - # Write files by task - for task, dataset_dict in model_docs.items(): - lines = [f'# {task}', ''] - for dataset, keywords_dict in dataset_dict.items(): - lines += [ - '
    ', '

    ', '', f'## {titlecase(dataset)} Dataset', - '' - ] - - for keywords, doc in keywords_dict.items(): - keyword_strs = [ - titlecase(x.replace('_', ' ')) for x in keywords - ] - dataset_str = titlecase(dataset) - if dataset_str in keyword_strs: - keyword_strs.remove(dataset_str) - - lines += [ - '
    ', '', - (f'### {" + ".join(keyword_strs)}' - f' on {dataset_str}'), '', doc['content'], '' - ] - - fn = osp.join('model_zoo', f'{task.replace(" ", "_").lower()}.md') - with open(fn, 'w', encoding='utf-8') as f: - f.write('\n'.join(lines)) - - # Write files by paper - paper_refs = _get_paper_refs() - - for paper_cat, paper_list in paper_refs.items(): - lines = [] - for paper_fn in paper_list: - paper_name, indicator = _parse_paper_ref(paper_fn) - paperlines = [] - for task, dataset_dict in model_docs.items(): - for dataset, keywords_dict in dataset_dict.items(): - for keywords, doc_info in keywords_dict.items(): - - if indicator not in doc_info['content']: - continue - - keyword_strs = [ - titlecase(x.replace('_', ' ')) for x in keywords - ] - - dataset_str = titlecase(dataset) - if dataset_str in keyword_strs: - keyword_strs.remove(dataset_str) - paperlines += [ - '
    ', '', - (f'### {" + ".join(keyword_strs)}' - f' on {dataset_str}'), '', doc_info['content'], '' - ] - if paperlines: - lines += ['
    ', '

    ', '', f'## {paper_name}', ''] - lines += paperlines - - if lines: - lines = [f'# {titlecase(paper_cat)}', ''] + lines - with open( - osp.join('model_zoo_papers', f'{paper_cat.lower()}.md'), - 'w', - encoding='utf-8') as f: - f.write('\n'.join(lines)) - - -if __name__ == '__main__': - print('collect model zoo documents') - main() +#!/usr/bin/env python +# Copyright (c) OpenMMLab. All rights reserved. +import os +import os.path as osp +import re +from collections import defaultdict +from glob import glob + +from addict import Addict +from titlecase import titlecase + + +def _get_model_docs(): + """Get all model document files. + + Returns: + list[str]: file paths + """ + config_root = osp.join('..', '..', 'configs') + pattern = osp.sep.join(['*'] * 4) + '.md' + docs = glob(osp.join(config_root, pattern)) + docs = [doc for doc in docs if '_base_' not in doc] + return docs + + +def _parse_model_doc_path(path): + """Parse doc file path. + + Typical path would be like: + + configs////.md + + An example is: + + "configs/animal_2d_keypoint/topdown_heatmap/ + animalpose/resnet_animalpose.md" + + Returns: + tuple: + - task (str): e.g. ``'Animal 2D Keypoint'`` + - dataset (str): e.g. ``'animalpose'`` + - keywords (tuple): e.g. ``('topdown heatmap', 'resnet')`` + """ + _path = path.split(osp.sep) + _rel_path = _path[_path.index('configs'):] + + # get task + def _titlecase_callback(word, **kwargs): + if word == '2d': + return '2D' + if word == '3d': + return '3D' + + task = titlecase( + _rel_path[1].replace('_', ' '), callback=_titlecase_callback) + + # get dataset + dataset = _rel_path[3] + + # get keywords + keywords_algo = (_rel_path[2], ) + keywords_setting = tuple(_rel_path[4][:-3].split('_')) + keywords = keywords_algo + keywords_setting + + return task, dataset, keywords + + +def _get_paper_refs(): + """Get all paper references. + + Returns: + Dict[str, List[str]]: keys are paper categories and values are lists + of paper paths. + """ + papers = glob('../src/papers/*/*.md') + paper_refs = defaultdict(list) + for fn in papers: + category = fn.split(osp.sep)[3] + paper_refs[category].append(fn) + + return paper_refs + + +def _parse_paper_ref(fn): + """Get paper name and indicator pattern from a paper reference file. + + Returns: + tuple: + - paper_name (str) + - paper_indicator (str) + """ + indicator = None + with open(fn, 'r', encoding='utf-8') as f: + for line in f.readlines(): + if line.startswith('', '', indicator).strip() + return paper_name, indicator + + +def main(): + + # Build output folders + os.makedirs('model_zoo', exist_ok=True) + os.makedirs('model_zoo_papers', exist_ok=True) + + # Collect all document contents + model_doc_list = _get_model_docs() + model_docs = Addict() + + for path in model_doc_list: + task, dataset, keywords = _parse_model_doc_path(path) + with open(path, 'r', encoding='utf-8') as f: + doc = { + 'task': task, + 'dataset': dataset, + 'keywords': keywords, + 'path': path, + 'content': f.read() + } + model_docs[task][dataset][keywords] = doc + + # Write files by task + for task, dataset_dict in model_docs.items(): + lines = [f'# {task}', ''] + for dataset, keywords_dict in dataset_dict.items(): + lines += [ + '
    ', '

    ', '', f'## {titlecase(dataset)} Dataset', + '' + ] + + for keywords, doc in keywords_dict.items(): + keyword_strs = [ + titlecase(x.replace('_', ' ')) for x in keywords + ] + dataset_str = titlecase(dataset) + if dataset_str in keyword_strs: + keyword_strs.remove(dataset_str) + + lines += [ + '
    ', '', + (f'### {" + ".join(keyword_strs)}' + f' on {dataset_str}'), '', doc['content'], '' + ] + + fn = osp.join('model_zoo', f'{task.replace(" ", "_").lower()}.md') + with open(fn, 'w', encoding='utf-8') as f: + f.write('\n'.join(lines)) + + # Write files by paper + paper_refs = _get_paper_refs() + + for paper_cat, paper_list in paper_refs.items(): + lines = [] + for paper_fn in paper_list: + paper_name, indicator = _parse_paper_ref(paper_fn) + paperlines = [] + for task, dataset_dict in model_docs.items(): + for dataset, keywords_dict in dataset_dict.items(): + for keywords, doc_info in keywords_dict.items(): + + if indicator not in doc_info['content']: + continue + + keyword_strs = [ + titlecase(x.replace('_', ' ')) for x in keywords + ] + + dataset_str = titlecase(dataset) + if dataset_str in keyword_strs: + keyword_strs.remove(dataset_str) + paperlines += [ + '
    ', '', + (f'### {" + ".join(keyword_strs)}' + f' on {dataset_str}'), '', doc_info['content'], '' + ] + if paperlines: + lines += ['
    ', '

    ', '', f'## {paper_name}', ''] + lines += paperlines + + if lines: + lines = [f'# {titlecase(paper_cat)}', ''] + lines + with open( + osp.join('model_zoo_papers', f'{paper_cat.lower()}.md'), + 'w', + encoding='utf-8') as f: + f.write('\n'.join(lines)) + + +if __name__ == '__main__': + print('collect model zoo documents') + main() diff --git a/docs/zh_cn/collect_projects.py b/docs/zh_cn/collect_projects.py index 93562cb4b2..e9e4ee3d18 100644 --- a/docs/zh_cn/collect_projects.py +++ b/docs/zh_cn/collect_projects.py @@ -1,119 +1,119 @@ -#!/usr/bin/env python -# Copyright (c) OpenMMLab. All rights reserved. -import os -import os.path as osp -import re -from glob import glob - - -def _get_project_docs(): - """Get all project document files. - - Returns: - list[str]: file paths - """ - project_root = osp.join('..', '..', 'projects') - pattern = osp.sep.join(['*'] * 2) + '.md' - docs = glob(osp.join(project_root, pattern)) - docs = [ - doc for doc in docs - if 'example_project' not in doc and '_CN' not in doc - ] - return docs - - -def _parse_project_doc_path(fn): - """Get project name and banner from a project reference file. - - Returns: - tuple: - - project_name (str) - - project_banner (str) - """ - project_banner, project_name = None, None - with open(fn, 'r', encoding='utf-8') as f: - for line in f.readlines(): - if re.match('^( )*#', line) - faq_doc.append(line) - return faq_doc - - -def main(): - - # Build output folders - os.makedirs('projects', exist_ok=True) - - # Collect all document contents - project_doc_list = _get_project_docs() - - project_lines = [] - for path in project_doc_list: - name, banner = _parse_project_doc_path(path) - _path = path.split(osp.sep) - _rel_path = _path[_path.index('projects'):-1] - url = 'https://github.com/open-mmlab/mmpose/blob/dev-1.x/' + '/'.join( - _rel_path) - _name = name.split(':', 1) - name, description = _name[0], '' if len( - _name) < 2 else f': {_name[-1]}' - project_lines += [ - f'- **{name}**{description} [\\[github\\]]({url})', '', - '
    ', ' ' + banner, '
    ', '
    ', '' - ] - - project_intro_doc = _get_project_intro_doc() - faq_doc = _get_faq_doc() - - with open( - osp.join('projects', 'community_projects.md'), 'w', - encoding='utf-8') as f: - f.write('# Projects of MMPose from Community Contributors\n') - f.write(''.join(project_intro_doc)) - f.write('\n'.join(project_lines)) - f.write(''.join(faq_doc)) - - -if __name__ == '__main__': - print('collect project documents') - main() +#!/usr/bin/env python +# Copyright (c) OpenMMLab. All rights reserved. +import os +import os.path as osp +import re +from glob import glob + + +def _get_project_docs(): + """Get all project document files. + + Returns: + list[str]: file paths + """ + project_root = osp.join('..', '..', 'projects') + pattern = osp.sep.join(['*'] * 2) + '.md' + docs = glob(osp.join(project_root, pattern)) + docs = [ + doc for doc in docs + if 'example_project' not in doc and '_CN' not in doc + ] + return docs + + +def _parse_project_doc_path(fn): + """Get project name and banner from a project reference file. + + Returns: + tuple: + - project_name (str) + - project_banner (str) + """ + project_banner, project_name = None, None + with open(fn, 'r', encoding='utf-8') as f: + for line in f.readlines(): + if re.match('^( )*#', line) + faq_doc.append(line) + return faq_doc + + +def main(): + + # Build output folders + os.makedirs('projects', exist_ok=True) + + # Collect all document contents + project_doc_list = _get_project_docs() + + project_lines = [] + for path in project_doc_list: + name, banner = _parse_project_doc_path(path) + _path = path.split(osp.sep) + _rel_path = _path[_path.index('projects'):-1] + url = 'https://github.com/open-mmlab/mmpose/blob/dev-1.x/' + '/'.join( + _rel_path) + _name = name.split(':', 1) + name, description = _name[0], '' if len( + _name) < 2 else f': {_name[-1]}' + project_lines += [ + f'- **{name}**{description} [\\[github\\]]({url})', '', + '
    ', ' ' + banner, '
    ', '
    ', '' + ] + + project_intro_doc = _get_project_intro_doc() + faq_doc = _get_faq_doc() + + with open( + osp.join('projects', 'community_projects.md'), 'w', + encoding='utf-8') as f: + f.write('# Projects of MMPose from Community Contributors\n') + f.write(''.join(project_intro_doc)) + f.write('\n'.join(project_lines)) + f.write(''.join(faq_doc)) + + +if __name__ == '__main__': + print('collect project documents') + main() diff --git a/docs/zh_cn/conf.py b/docs/zh_cn/conf.py index c82b9edc04..fd2df51ab6 100644 --- a/docs/zh_cn/conf.py +++ b/docs/zh_cn/conf.py @@ -1,108 +1,108 @@ -# Copyright (c) OpenMMLab. All rights reserved. -# Configuration file for the Sphinx documentation builder. -# -# This file only contains a selection of the most common options. For a full -# list see the documentation: -# https://www.sphinx-doc.org/en/master/usage/configuration.html - -# -- Path setup -------------------------------------------------------------- - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. - -import os -import subprocess -import sys - -import pytorch_sphinx_theme - -sys.path.insert(0, os.path.abspath('../..')) - -# -- Project information ----------------------------------------------------- - -project = 'MMPose' -copyright = '2020-2021, OpenMMLab' -author = 'MMPose Authors' - -# The full version, including alpha/beta/rc tags -version_file = '../../mmpose/version.py' - - -def get_version(): - with open(version_file, 'r') as f: - exec(compile(f.read(), version_file, 'exec')) - return locals()['__version__'] - - -release = get_version() - -# -- General configuration --------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - 'sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'sphinx.ext.viewcode', - 'sphinx_markdown_tables', 'sphinx_copybutton', 'myst_parser', - 'sphinx.ext.autosummary' -] - -autodoc_mock_imports = ['json_tricks', 'mmpose.version'] - -# Ignore >>> when copying code -copybutton_prompt_text = r'>>> |\.\.\. ' -copybutton_prompt_is_regexp = True - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -# This pattern also affects html_static_path and html_extra_path. -exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] - -# -- Options for HTML output ------------------------------------------------- -source_suffix = { - '.rst': 'restructuredtext', - '.md': 'markdown', -} - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -# -html_theme = 'pytorch_sphinx_theme' -html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()] -html_theme_options = { - 'menu': [{ - 'name': 'GitHub', - 'url': 'https://github.com/open-mmlab/mmpose' - }], - # Specify the language of the shared menu - 'menu_lang': 'cn' -} - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". - -language = 'zh_CN' - -html_static_path = ['_static'] -html_css_files = ['css/readthedocs.css'] - -# Enable ::: for my_st -myst_enable_extensions = ['colon_fence'] - -master_doc = 'index' - - -def builder_inited_handler(app): - subprocess.run(['python', './collect_modelzoo.py']) - subprocess.run(['python', './collect_projects.py']) - subprocess.run(['sh', './merge_docs.sh']) - subprocess.run(['python', './stats.py']) - - -def setup(app): - app.connect('builder-inited', builder_inited_handler) +# Copyright (c) OpenMMLab. All rights reserved. +# Configuration file for the Sphinx documentation builder. +# +# This file only contains a selection of the most common options. For a full +# list see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. + +import os +import subprocess +import sys + +import pytorch_sphinx_theme + +sys.path.insert(0, os.path.abspath('../..')) + +# -- Project information ----------------------------------------------------- + +project = 'MMPose' +copyright = '2020-2021, OpenMMLab' +author = 'MMPose Authors' + +# The full version, including alpha/beta/rc tags +version_file = '../../mmpose/version.py' + + +def get_version(): + with open(version_file, 'r') as f: + exec(compile(f.read(), version_file, 'exec')) + return locals()['__version__'] + + +release = get_version() + +# -- General configuration --------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'sphinx.ext.viewcode', + 'sphinx_markdown_tables', 'sphinx_copybutton', 'myst_parser', + 'sphinx.ext.autosummary' +] + +autodoc_mock_imports = ['json_tricks', 'mmpose.version'] + +# Ignore >>> when copying code +copybutton_prompt_text = r'>>> |\.\.\. ' +copybutton_prompt_is_regexp = True + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] + +# -- Options for HTML output ------------------------------------------------- +source_suffix = { + '.rst': 'restructuredtext', + '.md': 'markdown', +} + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'pytorch_sphinx_theme' +html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()] +html_theme_options = { + 'menu': [{ + 'name': 'GitHub', + 'url': 'https://github.com/open-mmlab/mmpose' + }], + # Specify the language of the shared menu + 'menu_lang': 'cn' +} + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". + +language = 'zh_CN' + +html_static_path = ['_static'] +html_css_files = ['css/readthedocs.css'] + +# Enable ::: for my_st +myst_enable_extensions = ['colon_fence'] + +master_doc = 'index' + + +def builder_inited_handler(app): + subprocess.run(['python', './collect_modelzoo.py']) + subprocess.run(['python', './collect_projects.py']) + subprocess.run(['sh', './merge_docs.sh']) + subprocess.run(['python', './stats.py']) + + +def setup(app): + app.connect('builder-inited', builder_inited_handler) diff --git a/docs/zh_cn/contribution_guide.md b/docs/zh_cn/contribution_guide.md index 96be7d1723..50ae265566 100644 --- a/docs/zh_cn/contribution_guide.md +++ b/docs/zh_cn/contribution_guide.md @@ -1,207 +1,207 @@ -# 如何给 MMPose 贡献代码 - -欢迎加入 MMPose 社区,我们致力于打造最前沿的计算机视觉基础库,我们欢迎任何形式的贡献,包括但不限于: - -- **修复错误** - 1. 如果提交的代码改动较大,我们鼓励你先开一个 issue 并正确描述现象、原因和复现方式,讨论后确认修复方案。 - 2. 修复错误并补充相应的单元测试,提交 PR 。 -- **新增功能或组件** - 1. 如果新功能或模块涉及较大的代码改动,我们建议先提交 issue,与我们确认功能的必要性。 - 2. 实现新增功能并添加单元测试,提交 PR 。 -- **文档补充或翻译** - - 如果发现文档有错误或不完善的地方,欢迎直接提交 PR 。 - -```{note} -- 如果你希望向 MMPose 1.0 贡献代码,请从 dev-1.x 上创建新分支,并提交 PR 到 dev-1.x 分支上。 -- 如果你是论文作者,并希望将你的方法加入到 MMPose 中,欢迎联系我们,我们将非常感谢你的贡献。 -- 如果你希望尽快将你的项目分享到 MMPose 开源社区,欢迎将 PR 提到 Projects 目录下,该目录下的项目将简化 Review 流程并尽快合入。 -- 如果你希望加入 MMPose 的维护者,欢迎联系我们,我们将邀请你加入 MMPose 的维护者群。 -``` - -## 准备工作 - -PR 操作所使用的命令都是用 Git 去实现的,该章节将介绍如何进行 Git 配置与 GitHub 绑定。 - -### Git 配置 - -首先,你需要在本地安装 Git,然后配置你的 Git 用户名和邮箱: - -```Shell -# 在命令提示符(cmd)或终端(terminal)中输入以下命令,查看 Git 版本 -git --version -``` - -然后,你需要检查自己的 Git Config 是否正确配置,如果 `user.name` 和 `user.email` 为空,你需要配置你的 Git 用户名和邮箱: - -```Shell -# 在命令提示符(cmd)或终端(terminal)中输入以下命令,查看 Git 配置 -git config --global --list -# 设置 Git 用户名和邮箱 -git config --global user.name "这里填入你的用户名" -git config --global user.email "这里填入你的邮箱" -``` - -## PR 流程 - -如果你对 PR 流程不熟悉,接下来将会从零开始,一步一步地教你如何提交 PR。如果你想深入了解 PR 开发模式,可以参考 [GitHub 官方文档](https://docs.github.com/cn/github/collaborating-with-issues-and-pull-requests/about-pull-requests)。 - -### 1. Fork 项目 - -当你第一次提交 PR 时,需要先 Fork 项目到自己的 GitHub 账号下。点击项目右上角的 Fork 按钮,将项目 Fork 到自己的 GitHub 账号下。 - -![](https://user-images.githubusercontent.com/13503330/223318144-a49c6cef-b1fb-45b8-aa2b-0833d0e3fd5c.png) - -接着,你需要将你的 Fork 仓库 Clone 到本地,然后添加官方仓库作为远程仓库: - -```Shell - -# Clone 你的 Fork 仓库到本地 -git clone https://github.com/username/mmpose.git - -# 添加官方仓库作为远程仓库 -cd mmpose -git remote add upstream https://github.com/open-mmlab/mmpose.git -``` - -在终端中输入以下命令,查看远程仓库是否成功添加: - -```Shell -git remote -v -``` - -如果出现以下信息,说明你已经成功添加了远程仓库: - -```Shell -origin https://github.com/{username}/mmpose.git (fetch) -origin https://github.com/{username}/mmpose.git (push) -upstream https://github.com/open-mmlab/mmpose.git (fetch) -upstream https://github.com/open-mmlab/mmpose.git (push) -``` - -```{note} -这里对 origin 和 upstream 进行一个简单的介绍,当我们使用 git clone 来克隆代码时,会默认创建一个 origin 的 remote,它指向我们克隆的代码库地址,而 upstream 则是我们自己添加的,用来指向原始代码库地址。当然如果你不喜欢他叫 upstream,也可以自己修改,比如叫 open-mmlab。我们通常向 origin 提交代码(即 fork 下来的远程仓库),然后向 upstream 提交一个 pull request。如果提交的代码和最新的代码发生冲突,再从 upstream 拉取最新的代码,和本地分支解决冲突,再提交到 origin。 -``` - -### 2. 配置 pre-commit - -在本地开发环境中,我们使用 pre-commit 来检查代码风格,以确保代码风格的统一。在提交代码前,你需要先安装 pre-commit: - -```Shell -pip install -U pre-commit - -# 在 mmpose 根目录下安装 pre-commit -pre-commit install -``` - -检查 pre-commit 是否配置成功,并安装 `.pre-commit-config.yaml` 中的钩子: - -```Shell -pre-commit run --all-files -``` - -![](https://user-images.githubusercontent.com/57566630/202368856-0465a90d-8fce-4345-918e-67b8b9c82614.png) - -```{note} -如果你是中国大陆用户,由于网络原因,可能会出现 pre-commit 安装失败的情况。 - -这时你可以使用清华源来安装 pre-commit: -pip install -U pre-commit -i https://pypi.tuna.tsinghua.edu.cn/simple - -或者使用国内镜像来安装 pre-commit: -pip install -U pre-commit -i https://pypi.mirrors.ustc.edu.cn/simple -``` - -如果安装过程被中断,可以重复执行上述命令,直到安装成功。 - -如果你提交的代码中有不符合规范的地方,pre-commit 会发出警告,并自动修复部分错误。 - -![](https://user-images.githubusercontent.com/57566630/202369176-67642454-0025-4023-a095-263529107aa3.png) - -### 3. 创建开发分支 - -安装完 pre-commit 之后,我们需要基于 dev 分支创建一个新的开发分支,建议以 `username/pr_name` 的形式命名,例如: - -```Shell -git checkout -b username/refactor_contributing_doc -``` - -在后续的开发中,如果本地仓库的 dev 分支落后于官方仓库的 dev 分支,需要先拉取 upstream 的 dev 分支,然后 rebase 到本地的开发分支上: - -```Shell -git checkout username/refactor_contributing_doc -git fetch upstream -git rebase upstream/dev-1.x -``` - -在 rebase 时,如果出现冲突,需要手动解决冲突,然后执行 `git add` 命令,再执行 `git rebase --continue` 命令,直到 rebase 完成。 - -### 4. 提交代码并在本地通过单元测试 - -在本地开发完成后,我们需要在本地通过单元测试,然后提交代码。 - -```shell -# 运行单元测试 -pytest tests/ - -# 提交代码 -git add . -git commit -m "commit message" -``` - -### 5. 推送代码到远程仓库 - -在本地开发完成后,我们需要将代码推送到远程仓库。 - -```Shell -git push origin username/refactor_contributing_doc -``` - -### 6. 提交 Pull Request (PR) - -#### (1) 在 GitHub 上创建 PR - -![](https://user-images.githubusercontent.com/13503330/223321382-e6068e18-1d91-4458-8328-b1c7c907b3b2.png) - -#### (2) 在 PR 中根据指引修改描述,添加必要的信息 - -![](https://user-images.githubusercontent.com/13503330/223322447-94ad4b8c-21bf-4ca7-b3d6-0568cace6eee.png) - -```{note} -- 在 PR branch 左侧选择 `dev` 分支,否则 PR 会被拒绝。 -- 如果你是第一次向 OpenMMLab 提交 PR,需要签署 CLA。 -``` - -![](https://user-images.githubusercontent.com/57566630/167307569-a794b967-6e28-4eac-a942-00deb657815f.png) - -## 代码风格 - -### Python - -我们采用[PEP8](https://www.python.org/dev/peps/pep-0008/)作为代码风格。 - -使用下面的工具来对代码进行整理和格式化: - -- [flake8](http://flake8.pycqa.org/en/latest/):代码提示 -- [isort](https://github.com/timothycrosley/isort):import 排序 -- [yapf](https://github.com/google/yapf):格式化工具 -- [codespell](https://github.com/codespell-project/codespell): 单词拼写检查 -- [mdformat](https://github.com/executablebooks/mdformat): markdown 文件格式化工具 -- [docformatter](https://github.com/myint/docformatter): docstring 格式化工具 - -`yapf`和`isort`的样式配置可以在[setup.cfg](/setup.cfg)中找到。 - -我们使用[pre-commit hook](https://pre-commit.com/)来: - -- 检查和格式化 `flake8`、`yapf`、`isort`、`trailing whitespaces` -- 修复 `end-of-files` -- 在每次提交时自动排序 `requirments.txt` - -`pre-commit`的配置存储在[.pre-commit-config](/.pre-commit-config.yaml)中。 - -```{note} -在你创建PR之前,请确保你的代码格式符合规范,且经过了 yapf 格式化。 -``` - -### C++与CUDA - -遵循[Google C++风格指南](https://google.github.io/styleguide/cppguide.html) +# 如何给 MMPose 贡献代码 + +欢迎加入 MMPose 社区,我们致力于打造最前沿的计算机视觉基础库,我们欢迎任何形式的贡献,包括但不限于: + +- **修复错误** + 1. 如果提交的代码改动较大,我们鼓励你先开一个 issue 并正确描述现象、原因和复现方式,讨论后确认修复方案。 + 2. 修复错误并补充相应的单元测试,提交 PR 。 +- **新增功能或组件** + 1. 如果新功能或模块涉及较大的代码改动,我们建议先提交 issue,与我们确认功能的必要性。 + 2. 实现新增功能并添加单元测试,提交 PR 。 +- **文档补充或翻译** + - 如果发现文档有错误或不完善的地方,欢迎直接提交 PR 。 + +```{note} +- 如果你希望向 MMPose 1.0 贡献代码,请从 dev-1.x 上创建新分支,并提交 PR 到 dev-1.x 分支上。 +- 如果你是论文作者,并希望将你的方法加入到 MMPose 中,欢迎联系我们,我们将非常感谢你的贡献。 +- 如果你希望尽快将你的项目分享到 MMPose 开源社区,欢迎将 PR 提到 Projects 目录下,该目录下的项目将简化 Review 流程并尽快合入。 +- 如果你希望加入 MMPose 的维护者,欢迎联系我们,我们将邀请你加入 MMPose 的维护者群。 +``` + +## 准备工作 + +PR 操作所使用的命令都是用 Git 去实现的,该章节将介绍如何进行 Git 配置与 GitHub 绑定。 + +### Git 配置 + +首先,你需要在本地安装 Git,然后配置你的 Git 用户名和邮箱: + +```Shell +# 在命令提示符(cmd)或终端(terminal)中输入以下命令,查看 Git 版本 +git --version +``` + +然后,你需要检查自己的 Git Config 是否正确配置,如果 `user.name` 和 `user.email` 为空,你需要配置你的 Git 用户名和邮箱: + +```Shell +# 在命令提示符(cmd)或终端(terminal)中输入以下命令,查看 Git 配置 +git config --global --list +# 设置 Git 用户名和邮箱 +git config --global user.name "这里填入你的用户名" +git config --global user.email "这里填入你的邮箱" +``` + +## PR 流程 + +如果你对 PR 流程不熟悉,接下来将会从零开始,一步一步地教你如何提交 PR。如果你想深入了解 PR 开发模式,可以参考 [GitHub 官方文档](https://docs.github.com/cn/github/collaborating-with-issues-and-pull-requests/about-pull-requests)。 + +### 1. Fork 项目 + +当你第一次提交 PR 时,需要先 Fork 项目到自己的 GitHub 账号下。点击项目右上角的 Fork 按钮,将项目 Fork 到自己的 GitHub 账号下。 + +![](https://user-images.githubusercontent.com/13503330/223318144-a49c6cef-b1fb-45b8-aa2b-0833d0e3fd5c.png) + +接着,你需要将你的 Fork 仓库 Clone 到本地,然后添加官方仓库作为远程仓库: + +```Shell + +# Clone 你的 Fork 仓库到本地 +git clone https://github.com/username/mmpose.git + +# 添加官方仓库作为远程仓库 +cd mmpose +git remote add upstream https://github.com/open-mmlab/mmpose.git +``` + +在终端中输入以下命令,查看远程仓库是否成功添加: + +```Shell +git remote -v +``` + +如果出现以下信息,说明你已经成功添加了远程仓库: + +```Shell +origin https://github.com/{username}/mmpose.git (fetch) +origin https://github.com/{username}/mmpose.git (push) +upstream https://github.com/open-mmlab/mmpose.git (fetch) +upstream https://github.com/open-mmlab/mmpose.git (push) +``` + +```{note} +这里对 origin 和 upstream 进行一个简单的介绍,当我们使用 git clone 来克隆代码时,会默认创建一个 origin 的 remote,它指向我们克隆的代码库地址,而 upstream 则是我们自己添加的,用来指向原始代码库地址。当然如果你不喜欢他叫 upstream,也可以自己修改,比如叫 open-mmlab。我们通常向 origin 提交代码(即 fork 下来的远程仓库),然后向 upstream 提交一个 pull request。如果提交的代码和最新的代码发生冲突,再从 upstream 拉取最新的代码,和本地分支解决冲突,再提交到 origin。 +``` + +### 2. 配置 pre-commit + +在本地开发环境中,我们使用 pre-commit 来检查代码风格,以确保代码风格的统一。在提交代码前,你需要先安装 pre-commit: + +```Shell +pip install -U pre-commit + +# 在 mmpose 根目录下安装 pre-commit +pre-commit install +``` + +检查 pre-commit 是否配置成功,并安装 `.pre-commit-config.yaml` 中的钩子: + +```Shell +pre-commit run --all-files +``` + +![](https://user-images.githubusercontent.com/57566630/202368856-0465a90d-8fce-4345-918e-67b8b9c82614.png) + +```{note} +如果你是中国大陆用户,由于网络原因,可能会出现 pre-commit 安装失败的情况。 + +这时你可以使用清华源来安装 pre-commit: +pip install -U pre-commit -i https://pypi.tuna.tsinghua.edu.cn/simple + +或者使用国内镜像来安装 pre-commit: +pip install -U pre-commit -i https://pypi.mirrors.ustc.edu.cn/simple +``` + +如果安装过程被中断,可以重复执行上述命令,直到安装成功。 + +如果你提交的代码中有不符合规范的地方,pre-commit 会发出警告,并自动修复部分错误。 + +![](https://user-images.githubusercontent.com/57566630/202369176-67642454-0025-4023-a095-263529107aa3.png) + +### 3. 创建开发分支 + +安装完 pre-commit 之后,我们需要基于 dev 分支创建一个新的开发分支,建议以 `username/pr_name` 的形式命名,例如: + +```Shell +git checkout -b username/refactor_contributing_doc +``` + +在后续的开发中,如果本地仓库的 dev 分支落后于官方仓库的 dev 分支,需要先拉取 upstream 的 dev 分支,然后 rebase 到本地的开发分支上: + +```Shell +git checkout username/refactor_contributing_doc +git fetch upstream +git rebase upstream/dev-1.x +``` + +在 rebase 时,如果出现冲突,需要手动解决冲突,然后执行 `git add` 命令,再执行 `git rebase --continue` 命令,直到 rebase 完成。 + +### 4. 提交代码并在本地通过单元测试 + +在本地开发完成后,我们需要在本地通过单元测试,然后提交代码。 + +```shell +# 运行单元测试 +pytest tests/ + +# 提交代码 +git add . +git commit -m "commit message" +``` + +### 5. 推送代码到远程仓库 + +在本地开发完成后,我们需要将代码推送到远程仓库。 + +```Shell +git push origin username/refactor_contributing_doc +``` + +### 6. 提交 Pull Request (PR) + +#### (1) 在 GitHub 上创建 PR + +![](https://user-images.githubusercontent.com/13503330/223321382-e6068e18-1d91-4458-8328-b1c7c907b3b2.png) + +#### (2) 在 PR 中根据指引修改描述,添加必要的信息 + +![](https://user-images.githubusercontent.com/13503330/223322447-94ad4b8c-21bf-4ca7-b3d6-0568cace6eee.png) + +```{note} +- 在 PR branch 左侧选择 `dev` 分支,否则 PR 会被拒绝。 +- 如果你是第一次向 OpenMMLab 提交 PR,需要签署 CLA。 +``` + +![](https://user-images.githubusercontent.com/57566630/167307569-a794b967-6e28-4eac-a942-00deb657815f.png) + +## 代码风格 + +### Python + +我们采用[PEP8](https://www.python.org/dev/peps/pep-0008/)作为代码风格。 + +使用下面的工具来对代码进行整理和格式化: + +- [flake8](http://flake8.pycqa.org/en/latest/):代码提示 +- [isort](https://github.com/timothycrosley/isort):import 排序 +- [yapf](https://github.com/google/yapf):格式化工具 +- [codespell](https://github.com/codespell-project/codespell): 单词拼写检查 +- [mdformat](https://github.com/executablebooks/mdformat): markdown 文件格式化工具 +- [docformatter](https://github.com/myint/docformatter): docstring 格式化工具 + +`yapf`和`isort`的样式配置可以在[setup.cfg](/setup.cfg)中找到。 + +我们使用[pre-commit hook](https://pre-commit.com/)来: + +- 检查和格式化 `flake8`、`yapf`、`isort`、`trailing whitespaces` +- 修复 `end-of-files` +- 在每次提交时自动排序 `requirments.txt` + +`pre-commit`的配置存储在[.pre-commit-config](/.pre-commit-config.yaml)中。 + +```{note} +在你创建PR之前,请确保你的代码格式符合规范,且经过了 yapf 格式化。 +``` + +### C++与CUDA + +遵循[Google C++风格指南](https://google.github.io/styleguide/cppguide.html) diff --git a/docs/zh_cn/dataset_zoo/2d_animal_keypoint.md b/docs/zh_cn/dataset_zoo/2d_animal_keypoint.md index 28b0b726b4..5e9ca92a71 100644 --- a/docs/zh_cn/dataset_zoo/2d_animal_keypoint.md +++ b/docs/zh_cn/dataset_zoo/2d_animal_keypoint.md @@ -1,545 +1,545 @@ -# 2D Animal Keypoint Dataset - -It is recommended to symlink the dataset root to `$MMPOSE/data`. -If your folder structure is different, you may need to change the corresponding paths in config files. - -MMPose supported datasets: - -- [Animal-Pose](#animal-pose) \[ [Homepage](https://sites.google.com/view/animal-pose/) \] -- [AP-10K](#ap-10k) \[ [Homepage](https://github.com/AlexTheBad/AP-10K/) \] -- [Horse-10](#horse-10) \[ [Homepage](http://www.mackenziemathislab.org/horse10) \] -- [MacaquePose](#macaquepose) \[ [Homepage](http://www.pri.kyoto-u.ac.jp/datasets/macaquepose/index.html) \] -- [Vinegar Fly](#vinegar-fly) \[ [Homepage](https://github.com/jgraving/DeepPoseKit-Data) \] -- [Desert Locust](#desert-locust) \[ [Homepage](https://github.com/jgraving/DeepPoseKit-Data) \] -- [Grévy’s Zebra](#grvys-zebra) \[ [Homepage](https://github.com/jgraving/DeepPoseKit-Data) \] -- [ATRW](#atrw) \[ [Homepage](https://cvwc2019.github.io/challenge.html) \] -- [Animal Kingdom](#Animal-Kindom) \[ [Homepage](https://openaccess.thecvf.com/content/CVPR2022/html/Ng_Animal_Kingdom_A_Large_and_Diverse_Dataset_for_Animal_Behavior_CVPR_2022_paper.html) \] - -## Animal-Pose - - - -
    -Animal-Pose (ICCV'2019) - -```bibtex -@InProceedings{Cao_2019_ICCV, - author = {Cao, Jinkun and Tang, Hongyang and Fang, Hao-Shu and Shen, Xiaoyong and Lu, Cewu and Tai, Yu-Wing}, - title = {Cross-Domain Adaptation for Animal Pose Estimation}, - booktitle = {The IEEE International Conference on Computer Vision (ICCV)}, - month = {October}, - year = {2019} -} -``` - -
    - -
    - -
    - -For [Animal-Pose](https://sites.google.com/view/animal-pose/) dataset, we prepare the dataset as follows: - -1. Download the images of [PASCAL VOC2012](http://host.robots.ox.ac.uk/pascal/VOC/voc2012/#data), especially the five categories (dog, cat, sheep, cow, horse), which we use as trainval dataset. -2. Download the [test-set](https://drive.google.com/drive/folders/1DwhQobZlGntOXxdm7vQsE4bqbFmN3b9y?usp=sharing) images with raw annotations (1000 images, 5 categories). -3. We have pre-processed the annotations to make it compatible with MMPose. Please download the annotation files from [annotations](https://download.openmmlab.com/mmpose/datasets/animalpose_annotations.tar). If you would like to generate the annotations by yourself, please check our dataset parsing [codes](/tools/dataset_converters/parse_animalpose_dataset.py). - -Extract them under {MMPose}/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── animalpose - │ - │-- VOC2012 - │ │-- Annotations - │ │-- ImageSets - │ │-- JPEGImages - │ │-- SegmentationClass - │ │-- SegmentationObject - │ - │-- animalpose_image_part2 - │ │-- cat - │ │-- cow - │ │-- dog - │ │-- horse - │ │-- sheep - │ - │-- annotations - │ │-- animalpose_train.json - │ |-- animalpose_val.json - │ |-- animalpose_trainval.json - │ │-- animalpose_test.json - │ - │-- PASCAL2011_animal_annotation - │ │-- cat - │ │ |-- 2007_000528_1.xml - │ │ |-- 2007_000549_1.xml - │ │ │-- ... - │ │-- cow - │ │-- dog - │ │-- horse - │ │-- sheep - │ - │-- annimalpose_anno2 - │ │-- cat - │ │ |-- ca1.xml - │ │ |-- ca2.xml - │ │ │-- ... - │ │-- cow - │ │-- dog - │ │-- horse - │ │-- sheep - -``` - -The official dataset does not provide the official train/val/test set split. -We choose the images from PascalVOC for train & val. In total, we have 3608 images and 5117 annotations for train+val, where -2798 images with 4000 annotations are used for training, and 810 images with 1117 annotations are used for validation. -Those images from other sources (1000 images with 1000 annotations) are used for testing. - -## AP-10K - - - -
    -AP-10K (NeurIPS'2021) - -```bibtex -@misc{yu2021ap10k, - title={AP-10K: A Benchmark for Animal Pose Estimation in the Wild}, - author={Hang Yu and Yufei Xu and Jing Zhang and Wei Zhao and Ziyu Guan and Dacheng Tao}, - year={2021}, - eprint={2108.12617}, - archivePrefix={arXiv}, - primaryClass={cs.CV} -} -``` - -
    - -
    - -
    - -For [AP-10K](https://github.com/AlexTheBad/AP-10K/) dataset, images and annotations can be downloaded from [download](https://drive.google.com/file/d/1-FNNGcdtAQRehYYkGY1y4wzFNg4iWNad/view?usp=sharing). -Note, this data and annotation data is for non-commercial use only. - -Extract them under {MMPose}/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── ap10k - │-- annotations - │ │-- ap10k-train-split1.json - │ |-- ap10k-train-split2.json - │ |-- ap10k-train-split3.json - │ │-- ap10k-val-split1.json - │ |-- ap10k-val-split2.json - │ |-- ap10k-val-split3.json - │ |-- ap10k-test-split1.json - │ |-- ap10k-test-split2.json - │ |-- ap10k-test-split3.json - │-- data - │ │-- 000000000001.jpg - │ │-- 000000000002.jpg - │ │-- ... - -``` - -The annotation files in 'annotation' folder contains 50 labeled animal species. There are total 10,015 labeled images with 13,028 instances in the AP-10K dataset. We randonly split them into train, val, and test set following the ratio of 7:1:2. - -## Horse-10 - - - -
    -Horse-10 (WACV'2021) - -```bibtex -@inproceedings{mathis2021pretraining, - title={Pretraining boosts out-of-domain robustness for pose estimation}, - author={Mathis, Alexander and Biasi, Thomas and Schneider, Steffen and Yuksekgonul, Mert and Rogers, Byron and Bethge, Matthias and Mathis, Mackenzie W}, - booktitle={Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision}, - pages={1859--1868}, - year={2021} -} -``` - -
    - -
    - -
    - -For [Horse-10](http://www.mackenziemathislab.org/horse10) dataset, images can be downloaded from [download](http://www.mackenziemathislab.org/horse10). -Please download the annotation files from [horse10_annotations](https://download.openmmlab.com/mmpose/datasets/horse10_annotations.tar). Note, this data and annotation data is for non-commercial use only, per the authors (see http://horse10.deeplabcut.org for more information). -Extract them under {MMPose}/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── horse10 - │-- annotations - │ │-- horse10-train-split1.json - │ |-- horse10-train-split2.json - │ |-- horse10-train-split3.json - │ │-- horse10-test-split1.json - │ |-- horse10-test-split2.json - │ |-- horse10-test-split3.json - │-- labeled-data - │ │-- BrownHorseinShadow - │ │-- BrownHorseintoshadow - │ │-- ... - -``` - -## MacaquePose - - - -
    -MacaquePose (bioRxiv'2020) - -```bibtex -@article{labuguen2020macaquepose, - title={MacaquePose: A novel ‘in the wild’macaque monkey pose dataset for markerless motion capture}, - author={Labuguen, Rollyn and Matsumoto, Jumpei and Negrete, Salvador and Nishimaru, Hiroshi and Nishijo, Hisao and Takada, Masahiko and Go, Yasuhiro and Inoue, Ken-ichi and Shibata, Tomohiro}, - journal={bioRxiv}, - year={2020}, - publisher={Cold Spring Harbor Laboratory} -} -``` - -
    - -
    - -
    - -For [MacaquePose](http://www.pri.kyoto-u.ac.jp/datasets/macaquepose/index.html) dataset, images can be downloaded from [download](http://www.pri.kyoto-u.ac.jp/datasets/macaquepose/index.html). -Please download the annotation files from [macaque_annotations](https://download.openmmlab.com/mmpose/datasets/macaque_annotations.tar). -Extract them under {MMPose}/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── macaque - │-- annotations - │ │-- macaque_train.json - │ |-- macaque_test.json - │-- images - │ │-- 01418849d54b3005.jpg - │ │-- 0142d1d1a6904a70.jpg - │ │-- 01ef2c4c260321b7.jpg - │ │-- 020a1c75c8c85238.jpg - │ │-- 020b1506eef2557d.jpg - │ │-- ... - -``` - -Since the official dataset does not provide the test set, we randomly select 12500 images for training, and the rest for evaluation (see [code](/tools/dataset/parse_macaquepose_dataset.py)). - -## Vinegar Fly - - - -
    -Vinegar Fly (Nature Methods'2019) - -```bibtex -@article{pereira2019fast, - title={Fast animal pose estimation using deep neural networks}, - author={Pereira, Talmo D and Aldarondo, Diego E and Willmore, Lindsay and Kislin, Mikhail and Wang, Samuel S-H and Murthy, Mala and Shaevitz, Joshua W}, - journal={Nature methods}, - volume={16}, - number={1}, - pages={117--125}, - year={2019}, - publisher={Nature Publishing Group} -} -``` - -
    - -
    - -
    - -For [Vinegar Fly](https://github.com/jgraving/DeepPoseKit-Data) dataset, images can be downloaded from [vinegar_fly_images](https://download.openmmlab.com/mmpose/datasets/vinegar_fly_images.tar). -Please download the annotation files from [vinegar_fly_annotations](https://download.openmmlab.com/mmpose/datasets/vinegar_fly_annotations.tar). -Extract them under {MMPose}/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── fly - │-- annotations - │ │-- fly_train.json - │ |-- fly_test.json - │-- images - │ │-- 0.jpg - │ │-- 1.jpg - │ │-- 2.jpg - │ │-- 3.jpg - │ │-- ... - -``` - -Since the official dataset does not provide the test set, we randomly select 90% images for training, and the rest (10%) for evaluation (see [code](/tools/dataset_converters/parse_deepposekit_dataset.py)). - -## Desert Locust - - - -
    -Desert Locust (Elife'2019) - -```bibtex -@article{graving2019deepposekit, - title={DeepPoseKit, a software toolkit for fast and robust animal pose estimation using deep learning}, - author={Graving, Jacob M and Chae, Daniel and Naik, Hemal and Li, Liang and Koger, Benjamin and Costelloe, Blair R and Couzin, Iain D}, - journal={Elife}, - volume={8}, - pages={e47994}, - year={2019}, - publisher={eLife Sciences Publications Limited} -} -``` - -
    - -
    - -
    - -For [Desert Locust](https://github.com/jgraving/DeepPoseKit-Data) dataset, images can be downloaded from [locust_images](https://download.openmmlab.com/mmpose/datasets/locust_images.tar). -Please download the annotation files from [locust_annotations](https://download.openmmlab.com/mmpose/datasets/locust_annotations.tar). -Extract them under {MMPose}/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── locust - │-- annotations - │ │-- locust_train.json - │ |-- locust_test.json - │-- images - │ │-- 0.jpg - │ │-- 1.jpg - │ │-- 2.jpg - │ │-- 3.jpg - │ │-- ... - -``` - -Since the official dataset does not provide the test set, we randomly select 90% images for training, and the rest (10%) for evaluation (see [code](/tools/dataset_converters/parse_deepposekit_dataset.py)). - -## Grévy’s Zebra - - - -
    -Grévy’s Zebra (Elife'2019) - -```bibtex -@article{graving2019deepposekit, - title={DeepPoseKit, a software toolkit for fast and robust animal pose estimation using deep learning}, - author={Graving, Jacob M and Chae, Daniel and Naik, Hemal and Li, Liang and Koger, Benjamin and Costelloe, Blair R and Couzin, Iain D}, - journal={Elife}, - volume={8}, - pages={e47994}, - year={2019}, - publisher={eLife Sciences Publications Limited} -} -``` - -
    - -
    - -
    - -For [Grévy’s Zebra](https://github.com/jgraving/DeepPoseKit-Data) dataset, images can be downloaded from [zebra_images](https://download.openmmlab.com/mmpose/datasets/zebra_images.tar). -Please download the annotation files from [zebra_annotations](https://download.openmmlab.com/mmpose/datasets/zebra_annotations.tar). -Extract them under {MMPose}/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── zebra - │-- annotations - │ │-- zebra_train.json - │ |-- zebra_test.json - │-- images - │ │-- 0.jpg - │ │-- 1.jpg - │ │-- 2.jpg - │ │-- 3.jpg - │ │-- ... - -``` - -Since the official dataset does not provide the test set, we randomly select 90% images for training, and the rest (10%) for evaluation (see [code](/tools/dataset_converters/parse_deepposekit_dataset.py)). - -## ATRW - - - -
    -ATRW (ACM MM'2020) - -```bibtex -@inproceedings{li2020atrw, - title={ATRW: A Benchmark for Amur Tiger Re-identification in the Wild}, - author={Li, Shuyuan and Li, Jianguo and Tang, Hanlin and Qian, Rui and Lin, Weiyao}, - booktitle={Proceedings of the 28th ACM International Conference on Multimedia}, - pages={2590--2598}, - year={2020} -} -``` - -
    - -
    - -
    - -ATRW captures images of the Amur tiger (also known as Siberian tiger, Northeast-China tiger) in the wild. -For [ATRW](https://cvwc2019.github.io/challenge.html) dataset, please download images from -[Pose_train](https://lilablobssc.blob.core.windows.net/cvwc2019/train/atrw_pose_train.tar.gz), -[Pose_val](https://lilablobssc.blob.core.windows.net/cvwc2019/train/atrw_pose_val.tar.gz), and -[Pose_test](https://lilablobssc.blob.core.windows.net/cvwc2019/test/atrw_pose_test.tar.gz). -Note that in the ATRW official annotation files, the key "file_name" is written as "filename". To make it compatible with -other coco-type json files, we have modified this key. -Please download the modified annotation files from [atrw_annotations](https://download.openmmlab.com/mmpose/datasets/atrw_annotations.tar). -Extract them under {MMPose}/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── atrw - │-- annotations - │ │-- keypoint_train.json - │ │-- keypoint_val.json - │ │-- keypoint_trainval.json - │-- images - │ │-- train - │ │ │-- 000002.jpg - │ │ │-- 000003.jpg - │ │ │-- ... - │ │-- val - │ │ │-- 000001.jpg - │ │ │-- 000013.jpg - │ │ │-- ... - │ │-- test - │ │ │-- 000000.jpg - │ │ │-- 000004.jpg - │ │ │-- ... - -``` - -## Animal Kingdom - -
    -Animal Kingdom (CVPR'2022) -
    -
    - -
    - -```bibtex -@inproceedings{Ng_2022_CVPR, - author = {Ng, Xun Long and Ong, Kian Eng and Zheng, Qichen and Ni, Yun and Yeo, Si Yong and Liu, Jun}, - title = {Animal Kingdom: A Large and Diverse Dataset for Animal Behavior Understanding}, - booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, - month = {June}, - year = {2022}, - pages = {19023-19034} - } -``` - -For [Animal Kingdom](https://github.com/sutdcv/Animal-Kingdom) dataset, images can be downloaded from [here](https://forms.office.com/pages/responsepage.aspx?id=drd2NJDpck-5UGJImDFiPVRYpnTEMixKqPJ1FxwK6VZUQkNTSkRISTNORUI2TDBWMUpZTlQ5WUlaSyQlQCN0PWcu). -Please Extract dataset under {MMPose}/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── ak - |--annotations - │ │-- ak_P1 - │ │ │-- train.json - │ │ │-- test.json - │ │-- ak_P2 - │ │ │-- train.json - │ │ │-- test.json - │ │-- ak_P3_amphibian - │ │ │-- train.json - │ │ │-- test.json - │ │-- ak_P3_bird - │ │ │-- train.json - │ │ │-- test.json - │ │-- ak_P3_fish - │ │ │-- train.json - │ │ │-- test.json - │ │-- ak_P3_mammal - │ │ │-- train.json - │ │ │-- test.json - │ │-- ak_P3_reptile - │ │-- train.json - │ │-- test.json - │-- images - │ │-- AAACXZTV - │ │ │--AAACXZTV_f000059.jpg - │ │ │--... - │ │-- AAAUILHH - │ │ │--AAAUILHH_f000098.jpg - │ │ │--... - │ │-- ... -``` +# 2D Animal Keypoint Dataset + +It is recommended to symlink the dataset root to `$MMPOSE/data`. +If your folder structure is different, you may need to change the corresponding paths in config files. + +MMPose supported datasets: + +- [Animal-Pose](#animal-pose) \[ [Homepage](https://sites.google.com/view/animal-pose/) \] +- [AP-10K](#ap-10k) \[ [Homepage](https://github.com/AlexTheBad/AP-10K/) \] +- [Horse-10](#horse-10) \[ [Homepage](http://www.mackenziemathislab.org/horse10) \] +- [MacaquePose](#macaquepose) \[ [Homepage](http://www.pri.kyoto-u.ac.jp/datasets/macaquepose/index.html) \] +- [Vinegar Fly](#vinegar-fly) \[ [Homepage](https://github.com/jgraving/DeepPoseKit-Data) \] +- [Desert Locust](#desert-locust) \[ [Homepage](https://github.com/jgraving/DeepPoseKit-Data) \] +- [Grévy’s Zebra](#grvys-zebra) \[ [Homepage](https://github.com/jgraving/DeepPoseKit-Data) \] +- [ATRW](#atrw) \[ [Homepage](https://cvwc2019.github.io/challenge.html) \] +- [Animal Kingdom](#Animal-Kindom) \[ [Homepage](https://openaccess.thecvf.com/content/CVPR2022/html/Ng_Animal_Kingdom_A_Large_and_Diverse_Dataset_for_Animal_Behavior_CVPR_2022_paper.html) \] + +## Animal-Pose + + + +
    +Animal-Pose (ICCV'2019) + +```bibtex +@InProceedings{Cao_2019_ICCV, + author = {Cao, Jinkun and Tang, Hongyang and Fang, Hao-Shu and Shen, Xiaoyong and Lu, Cewu and Tai, Yu-Wing}, + title = {Cross-Domain Adaptation for Animal Pose Estimation}, + booktitle = {The IEEE International Conference on Computer Vision (ICCV)}, + month = {October}, + year = {2019} +} +``` + +
    + +
    + +
    + +For [Animal-Pose](https://sites.google.com/view/animal-pose/) dataset, we prepare the dataset as follows: + +1. Download the images of [PASCAL VOC2012](http://host.robots.ox.ac.uk/pascal/VOC/voc2012/#data), especially the five categories (dog, cat, sheep, cow, horse), which we use as trainval dataset. +2. Download the [test-set](https://drive.google.com/drive/folders/1DwhQobZlGntOXxdm7vQsE4bqbFmN3b9y?usp=sharing) images with raw annotations (1000 images, 5 categories). +3. We have pre-processed the annotations to make it compatible with MMPose. Please download the annotation files from [annotations](https://download.openmmlab.com/mmpose/datasets/animalpose_annotations.tar). If you would like to generate the annotations by yourself, please check our dataset parsing [codes](/tools/dataset_converters/parse_animalpose_dataset.py). + +Extract them under {MMPose}/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── animalpose + │ + │-- VOC2012 + │ │-- Annotations + │ │-- ImageSets + │ │-- JPEGImages + │ │-- SegmentationClass + │ │-- SegmentationObject + │ + │-- animalpose_image_part2 + │ │-- cat + │ │-- cow + │ │-- dog + │ │-- horse + │ │-- sheep + │ + │-- annotations + │ │-- animalpose_train.json + │ |-- animalpose_val.json + │ |-- animalpose_trainval.json + │ │-- animalpose_test.json + │ + │-- PASCAL2011_animal_annotation + │ │-- cat + │ │ |-- 2007_000528_1.xml + │ │ |-- 2007_000549_1.xml + │ │ │-- ... + │ │-- cow + │ │-- dog + │ │-- horse + │ │-- sheep + │ + │-- annimalpose_anno2 + │ │-- cat + │ │ |-- ca1.xml + │ │ |-- ca2.xml + │ │ │-- ... + │ │-- cow + │ │-- dog + │ │-- horse + │ │-- sheep + +``` + +The official dataset does not provide the official train/val/test set split. +We choose the images from PascalVOC for train & val. In total, we have 3608 images and 5117 annotations for train+val, where +2798 images with 4000 annotations are used for training, and 810 images with 1117 annotations are used for validation. +Those images from other sources (1000 images with 1000 annotations) are used for testing. + +## AP-10K + + + +
    +AP-10K (NeurIPS'2021) + +```bibtex +@misc{yu2021ap10k, + title={AP-10K: A Benchmark for Animal Pose Estimation in the Wild}, + author={Hang Yu and Yufei Xu and Jing Zhang and Wei Zhao and Ziyu Guan and Dacheng Tao}, + year={2021}, + eprint={2108.12617}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +
    + +
    + +
    + +For [AP-10K](https://github.com/AlexTheBad/AP-10K/) dataset, images and annotations can be downloaded from [download](https://drive.google.com/file/d/1-FNNGcdtAQRehYYkGY1y4wzFNg4iWNad/view?usp=sharing). +Note, this data and annotation data is for non-commercial use only. + +Extract them under {MMPose}/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── ap10k + │-- annotations + │ │-- ap10k-train-split1.json + │ |-- ap10k-train-split2.json + │ |-- ap10k-train-split3.json + │ │-- ap10k-val-split1.json + │ |-- ap10k-val-split2.json + │ |-- ap10k-val-split3.json + │ |-- ap10k-test-split1.json + │ |-- ap10k-test-split2.json + │ |-- ap10k-test-split3.json + │-- data + │ │-- 000000000001.jpg + │ │-- 000000000002.jpg + │ │-- ... + +``` + +The annotation files in 'annotation' folder contains 50 labeled animal species. There are total 10,015 labeled images with 13,028 instances in the AP-10K dataset. We randonly split them into train, val, and test set following the ratio of 7:1:2. + +## Horse-10 + + + +
    +Horse-10 (WACV'2021) + +```bibtex +@inproceedings{mathis2021pretraining, + title={Pretraining boosts out-of-domain robustness for pose estimation}, + author={Mathis, Alexander and Biasi, Thomas and Schneider, Steffen and Yuksekgonul, Mert and Rogers, Byron and Bethge, Matthias and Mathis, Mackenzie W}, + booktitle={Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision}, + pages={1859--1868}, + year={2021} +} +``` + +
    + +
    + +
    + +For [Horse-10](http://www.mackenziemathislab.org/horse10) dataset, images can be downloaded from [download](http://www.mackenziemathislab.org/horse10). +Please download the annotation files from [horse10_annotations](https://download.openmmlab.com/mmpose/datasets/horse10_annotations.tar). Note, this data and annotation data is for non-commercial use only, per the authors (see http://horse10.deeplabcut.org for more information). +Extract them under {MMPose}/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── horse10 + │-- annotations + │ │-- horse10-train-split1.json + │ |-- horse10-train-split2.json + │ |-- horse10-train-split3.json + │ │-- horse10-test-split1.json + │ |-- horse10-test-split2.json + │ |-- horse10-test-split3.json + │-- labeled-data + │ │-- BrownHorseinShadow + │ │-- BrownHorseintoshadow + │ │-- ... + +``` + +## MacaquePose + + + +
    +MacaquePose (bioRxiv'2020) + +```bibtex +@article{labuguen2020macaquepose, + title={MacaquePose: A novel ‘in the wild’macaque monkey pose dataset for markerless motion capture}, + author={Labuguen, Rollyn and Matsumoto, Jumpei and Negrete, Salvador and Nishimaru, Hiroshi and Nishijo, Hisao and Takada, Masahiko and Go, Yasuhiro and Inoue, Ken-ichi and Shibata, Tomohiro}, + journal={bioRxiv}, + year={2020}, + publisher={Cold Spring Harbor Laboratory} +} +``` + +
    + +
    + +
    + +For [MacaquePose](http://www.pri.kyoto-u.ac.jp/datasets/macaquepose/index.html) dataset, images can be downloaded from [download](http://www.pri.kyoto-u.ac.jp/datasets/macaquepose/index.html). +Please download the annotation files from [macaque_annotations](https://download.openmmlab.com/mmpose/datasets/macaque_annotations.tar). +Extract them under {MMPose}/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── macaque + │-- annotations + │ │-- macaque_train.json + │ |-- macaque_test.json + │-- images + │ │-- 01418849d54b3005.jpg + │ │-- 0142d1d1a6904a70.jpg + │ │-- 01ef2c4c260321b7.jpg + │ │-- 020a1c75c8c85238.jpg + │ │-- 020b1506eef2557d.jpg + │ │-- ... + +``` + +Since the official dataset does not provide the test set, we randomly select 12500 images for training, and the rest for evaluation (see [code](/tools/dataset/parse_macaquepose_dataset.py)). + +## Vinegar Fly + + + +
    +Vinegar Fly (Nature Methods'2019) + +```bibtex +@article{pereira2019fast, + title={Fast animal pose estimation using deep neural networks}, + author={Pereira, Talmo D and Aldarondo, Diego E and Willmore, Lindsay and Kislin, Mikhail and Wang, Samuel S-H and Murthy, Mala and Shaevitz, Joshua W}, + journal={Nature methods}, + volume={16}, + number={1}, + pages={117--125}, + year={2019}, + publisher={Nature Publishing Group} +} +``` + +
    + +
    + +
    + +For [Vinegar Fly](https://github.com/jgraving/DeepPoseKit-Data) dataset, images can be downloaded from [vinegar_fly_images](https://download.openmmlab.com/mmpose/datasets/vinegar_fly_images.tar). +Please download the annotation files from [vinegar_fly_annotations](https://download.openmmlab.com/mmpose/datasets/vinegar_fly_annotations.tar). +Extract them under {MMPose}/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── fly + │-- annotations + │ │-- fly_train.json + │ |-- fly_test.json + │-- images + │ │-- 0.jpg + │ │-- 1.jpg + │ │-- 2.jpg + │ │-- 3.jpg + │ │-- ... + +``` + +Since the official dataset does not provide the test set, we randomly select 90% images for training, and the rest (10%) for evaluation (see [code](/tools/dataset_converters/parse_deepposekit_dataset.py)). + +## Desert Locust + + + +
    +Desert Locust (Elife'2019) + +```bibtex +@article{graving2019deepposekit, + title={DeepPoseKit, a software toolkit for fast and robust animal pose estimation using deep learning}, + author={Graving, Jacob M and Chae, Daniel and Naik, Hemal and Li, Liang and Koger, Benjamin and Costelloe, Blair R and Couzin, Iain D}, + journal={Elife}, + volume={8}, + pages={e47994}, + year={2019}, + publisher={eLife Sciences Publications Limited} +} +``` + +
    + +
    + +
    + +For [Desert Locust](https://github.com/jgraving/DeepPoseKit-Data) dataset, images can be downloaded from [locust_images](https://download.openmmlab.com/mmpose/datasets/locust_images.tar). +Please download the annotation files from [locust_annotations](https://download.openmmlab.com/mmpose/datasets/locust_annotations.tar). +Extract them under {MMPose}/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── locust + │-- annotations + │ │-- locust_train.json + │ |-- locust_test.json + │-- images + │ │-- 0.jpg + │ │-- 1.jpg + │ │-- 2.jpg + │ │-- 3.jpg + │ │-- ... + +``` + +Since the official dataset does not provide the test set, we randomly select 90% images for training, and the rest (10%) for evaluation (see [code](/tools/dataset_converters/parse_deepposekit_dataset.py)). + +## Grévy’s Zebra + + + +
    +Grévy’s Zebra (Elife'2019) + +```bibtex +@article{graving2019deepposekit, + title={DeepPoseKit, a software toolkit for fast and robust animal pose estimation using deep learning}, + author={Graving, Jacob M and Chae, Daniel and Naik, Hemal and Li, Liang and Koger, Benjamin and Costelloe, Blair R and Couzin, Iain D}, + journal={Elife}, + volume={8}, + pages={e47994}, + year={2019}, + publisher={eLife Sciences Publications Limited} +} +``` + +
    + +
    + +
    + +For [Grévy’s Zebra](https://github.com/jgraving/DeepPoseKit-Data) dataset, images can be downloaded from [zebra_images](https://download.openmmlab.com/mmpose/datasets/zebra_images.tar). +Please download the annotation files from [zebra_annotations](https://download.openmmlab.com/mmpose/datasets/zebra_annotations.tar). +Extract them under {MMPose}/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── zebra + │-- annotations + │ │-- zebra_train.json + │ |-- zebra_test.json + │-- images + │ │-- 0.jpg + │ │-- 1.jpg + │ │-- 2.jpg + │ │-- 3.jpg + │ │-- ... + +``` + +Since the official dataset does not provide the test set, we randomly select 90% images for training, and the rest (10%) for evaluation (see [code](/tools/dataset_converters/parse_deepposekit_dataset.py)). + +## ATRW + + + +
    +ATRW (ACM MM'2020) + +```bibtex +@inproceedings{li2020atrw, + title={ATRW: A Benchmark for Amur Tiger Re-identification in the Wild}, + author={Li, Shuyuan and Li, Jianguo and Tang, Hanlin and Qian, Rui and Lin, Weiyao}, + booktitle={Proceedings of the 28th ACM International Conference on Multimedia}, + pages={2590--2598}, + year={2020} +} +``` + +
    + +
    + +
    + +ATRW captures images of the Amur tiger (also known as Siberian tiger, Northeast-China tiger) in the wild. +For [ATRW](https://cvwc2019.github.io/challenge.html) dataset, please download images from +[Pose_train](https://lilablobssc.blob.core.windows.net/cvwc2019/train/atrw_pose_train.tar.gz), +[Pose_val](https://lilablobssc.blob.core.windows.net/cvwc2019/train/atrw_pose_val.tar.gz), and +[Pose_test](https://lilablobssc.blob.core.windows.net/cvwc2019/test/atrw_pose_test.tar.gz). +Note that in the ATRW official annotation files, the key "file_name" is written as "filename". To make it compatible with +other coco-type json files, we have modified this key. +Please download the modified annotation files from [atrw_annotations](https://download.openmmlab.com/mmpose/datasets/atrw_annotations.tar). +Extract them under {MMPose}/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── atrw + │-- annotations + │ │-- keypoint_train.json + │ │-- keypoint_val.json + │ │-- keypoint_trainval.json + │-- images + │ │-- train + │ │ │-- 000002.jpg + │ │ │-- 000003.jpg + │ │ │-- ... + │ │-- val + │ │ │-- 000001.jpg + │ │ │-- 000013.jpg + │ │ │-- ... + │ │-- test + │ │ │-- 000000.jpg + │ │ │-- 000004.jpg + │ │ │-- ... + +``` + +## Animal Kingdom + +
    +Animal Kingdom (CVPR'2022) +
    +
    + +
    + +```bibtex +@inproceedings{Ng_2022_CVPR, + author = {Ng, Xun Long and Ong, Kian Eng and Zheng, Qichen and Ni, Yun and Yeo, Si Yong and Liu, Jun}, + title = {Animal Kingdom: A Large and Diverse Dataset for Animal Behavior Understanding}, + booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, + month = {June}, + year = {2022}, + pages = {19023-19034} + } +``` + +For [Animal Kingdom](https://github.com/sutdcv/Animal-Kingdom) dataset, images can be downloaded from [here](https://forms.office.com/pages/responsepage.aspx?id=drd2NJDpck-5UGJImDFiPVRYpnTEMixKqPJ1FxwK6VZUQkNTSkRISTNORUI2TDBWMUpZTlQ5WUlaSyQlQCN0PWcu). +Please Extract dataset under {MMPose}/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── ak + |--annotations + │ │-- ak_P1 + │ │ │-- train.json + │ │ │-- test.json + │ │-- ak_P2 + │ │ │-- train.json + │ │ │-- test.json + │ │-- ak_P3_amphibian + │ │ │-- train.json + │ │ │-- test.json + │ │-- ak_P3_bird + │ │ │-- train.json + │ │ │-- test.json + │ │-- ak_P3_fish + │ │ │-- train.json + │ │ │-- test.json + │ │-- ak_P3_mammal + │ │ │-- train.json + │ │ │-- test.json + │ │-- ak_P3_reptile + │ │-- train.json + │ │-- test.json + │-- images + │ │-- AAACXZTV + │ │ │--AAACXZTV_f000059.jpg + │ │ │--... + │ │-- AAAUILHH + │ │ │--AAAUILHH_f000098.jpg + │ │ │--... + │ │-- ... +``` diff --git a/docs/zh_cn/dataset_zoo/2d_body_keypoint.md b/docs/zh_cn/dataset_zoo/2d_body_keypoint.md index 4448ebe8f4..3c68b1affc 100644 --- a/docs/zh_cn/dataset_zoo/2d_body_keypoint.md +++ b/docs/zh_cn/dataset_zoo/2d_body_keypoint.md @@ -1,588 +1,588 @@ -# 2D Body Keypoint Datasets - -It is recommended to symlink the dataset root to `$MMPOSE/data`. -If your folder structure is different, you may need to change the corresponding paths in config files. - -MMPose supported datasets: - -- Images - - [COCO](#coco) \[ [Homepage](http://cocodataset.org/) \] - - [MPII](#mpii) \[ [Homepage](http://human-pose.mpi-inf.mpg.de/) \] - - [MPII-TRB](#mpii-trb) \[ [Homepage](https://github.com/kennymckormick/Triplet-Representation-of-human-Body) \] - - [AI Challenger](#aic) \[ [Homepage](https://github.com/AIChallenger/AI_Challenger_2017) \] - - [CrowdPose](#crowdpose) \[ [Homepage](https://github.com/Jeff-sjtu/CrowdPose) \] - - [OCHuman](#ochuman) \[ [Homepage](https://github.com/liruilong940607/OCHumanApi) \] - - [MHP](#mhp) \[ [Homepage](https://lv-mhp.github.io/dataset) \] - - [Human-Art](#humanart) \[ [Homepage](https://idea-research.github.io/HumanArt/) \] -- Videos - - [PoseTrack18](#posetrack18) \[ [Homepage](https://posetrack.net/users/download.php) \] - - [sub-JHMDB](#sub-jhmdb-dataset) \[ [Homepage](http://jhmdb.is.tue.mpg.de/dataset) \] - -## COCO - - - -
    -COCO (ECCV'2014) - -```bibtex -@inproceedings{lin2014microsoft, - title={Microsoft coco: Common objects in context}, - author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, - booktitle={European conference on computer vision}, - pages={740--755}, - year={2014}, - organization={Springer} -} -``` - -
    - -
    - -
    - -For [COCO](http://cocodataset.org/) data, please download from [COCO download](http://cocodataset.org/#download), 2017 Train/Val is needed for COCO keypoints training and validation. -[HRNet-Human-Pose-Estimation](https://github.com/HRNet/HRNet-Human-Pose-Estimation) provides person detection result of COCO val2017 to reproduce our multi-person pose estimation results. -Please download from [OneDrive](https://1drv.ms/f/s!AhIXJn_J-blWzzDXoz5BeFl8sWM-) or [GoogleDrive](https://drive.google.com/drive/folders/1fRUDNUDxe9fjqcRZ2bnF_TKMlO0nB_dk?usp=sharing). -Optionally, to evaluate on COCO'2017 test-dev, please download the [image-info](https://download.openmmlab.com/mmpose/datasets/person_keypoints_test-dev-2017.json). -Download and extract them under $MMPOSE/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── coco - │-- annotations - │ │-- person_keypoints_train2017.json - │ |-- person_keypoints_val2017.json - │ |-- person_keypoints_test-dev-2017.json - |-- person_detection_results - | |-- COCO_val2017_detections_AP_H_56_person.json - | |-- COCO_test-dev2017_detections_AP_H_609_person.json - │-- train2017 - │ │-- 000000000009.jpg - │ │-- 000000000025.jpg - │ │-- 000000000030.jpg - │ │-- ... - `-- val2017 - │-- 000000000139.jpg - │-- 000000000285.jpg - │-- 000000000632.jpg - │-- ... - -``` - -## MPII - - - -
    -MPII (CVPR'2014) - -```bibtex -@inproceedings{andriluka14cvpr, - author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, - title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, - booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, - year = {2014}, - month = {June} -} -``` - -
    - -
    - -
    - -For [MPII](http://human-pose.mpi-inf.mpg.de/) data, please download from [MPII Human Pose Dataset](http://human-pose.mpi-inf.mpg.de/). -We have converted the original annotation files into json format, please download them from [mpii_annotations](https://download.openmmlab.com/mmpose/datasets/mpii_annotations.tar). -Extract them under {MMPose}/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── mpii - |── annotations - | |── mpii_gt_val.mat - | |── mpii_test.json - | |── mpii_train.json - | |── mpii_trainval.json - | `── mpii_val.json - `── images - |── 000001163.jpg - |── 000003072.jpg - -``` - -During training and inference, the prediction result will be saved as '.mat' format by default. We also provide a tool to convert this '.mat' to more readable '.json' format. - -```shell -python tools/dataset/mat2json ${PRED_MAT_FILE} ${GT_JSON_FILE} ${OUTPUT_PRED_JSON_FILE} -``` - -For example, - -```shell -python tools/dataset/mat2json work_dirs/res50_mpii_256x256/pred.mat data/mpii/annotations/mpii_val.json pred.json -``` - -## MPII-TRB - - - -
    -MPII-TRB (ICCV'2019) - -```bibtex -@inproceedings{duan2019trb, - title={TRB: A Novel Triplet Representation for Understanding 2D Human Body}, - author={Duan, Haodong and Lin, Kwan-Yee and Jin, Sheng and Liu, Wentao and Qian, Chen and Ouyang, Wanli}, - booktitle={Proceedings of the IEEE International Conference on Computer Vision}, - pages={9479--9488}, - year={2019} -} -``` - -
    - -
    - -
    - -For [MPII-TRB](https://github.com/kennymckormick/Triplet-Representation-of-human-Body) data, please download from [MPII Human Pose Dataset](http://human-pose.mpi-inf.mpg.de/). -Please download the annotation files from [mpii_trb_annotations](https://download.openmmlab.com/mmpose/datasets/mpii_trb_annotations.tar). -Extract them under {MMPose}/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── mpii - |── annotations - | |── mpii_trb_train.json - | |── mpii_trb_val.json - `── images - |── 000001163.jpg - |── 000003072.jpg - -``` - -## AIC - - - -
    -AI Challenger (ArXiv'2017) - -```bibtex -@article{wu2017ai, - title={Ai challenger: A large-scale dataset for going deeper in image understanding}, - author={Wu, Jiahong and Zheng, He and Zhao, Bo and Li, Yixin and Yan, Baoming and Liang, Rui and Wang, Wenjia and Zhou, Shipei and Lin, Guosen and Fu, Yanwei and others}, - journal={arXiv preprint arXiv:1711.06475}, - year={2017} -} -``` - -
    - -
    - -
    - -For [AIC](https://github.com/AIChallenger/AI_Challenger_2017) data, please download from [AI Challenger 2017](https://github.com/AIChallenger/AI_Challenger_2017), 2017 Train/Val is needed for keypoints training and validation. -Please download the annotation files from [aic_annotations](https://download.openmmlab.com/mmpose/datasets/aic_annotations.tar). -Download and extract them under $MMPOSE/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── aic - │-- annotations - │ │-- aic_train.json - │ |-- aic_val.json - │-- ai_challenger_keypoint_train_20170902 - │ │-- keypoint_train_images_20170902 - │ │ │-- 0000252aea98840a550dac9a78c476ecb9f47ffa.jpg - │ │ │-- 000050f770985ac9653198495ef9b5c82435d49c.jpg - │ │ │-- ... - `-- ai_challenger_keypoint_validation_20170911 - │-- keypoint_validation_images_20170911 - │-- 0002605c53fb92109a3f2de4fc3ce06425c3b61f.jpg - │-- 0003b55a2c991223e6d8b4b820045bd49507bf6d.jpg - │-- ... -``` - -## CrowdPose - - - -
    -CrowdPose (CVPR'2019) - -```bibtex -@article{li2018crowdpose, - title={CrowdPose: Efficient Crowded Scenes Pose Estimation and A New Benchmark}, - author={Li, Jiefeng and Wang, Can and Zhu, Hao and Mao, Yihuan and Fang, Hao-Shu and Lu, Cewu}, - journal={arXiv preprint arXiv:1812.00324}, - year={2018} -} -``` - -
    - -
    - -
    - -For [CrowdPose](https://github.com/Jeff-sjtu/CrowdPose) data, please download from [CrowdPose](https://github.com/Jeff-sjtu/CrowdPose). -Please download the annotation files and human detection results from [crowdpose_annotations](https://download.openmmlab.com/mmpose/datasets/crowdpose_annotations.tar). -For top-down approaches, we follow [CrowdPose](https://arxiv.org/abs/1812.00324) to use the [pre-trained weights](https://pjreddie.com/media/files/yolov3.weights) of [YOLOv3](https://github.com/eriklindernoren/PyTorch-YOLOv3) to generate the detected human bounding boxes. -For model training, we follow [HigherHRNet](https://github.com/HRNet/HigherHRNet-Human-Pose-Estimation) to train models on CrowdPose train/val dataset, and evaluate models on CrowdPose test dataset. -Download and extract them under $MMPOSE/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── crowdpose - │-- annotations - │ │-- mmpose_crowdpose_train.json - │ │-- mmpose_crowdpose_val.json - │ │-- mmpose_crowdpose_trainval.json - │ │-- mmpose_crowdpose_test.json - │ │-- det_for_crowd_test_0.1_0.5.json - │-- images - │-- 100000.jpg - │-- 100001.jpg - │-- 100002.jpg - │-- ... -``` - -## OCHuman - - - -
    -OCHuman (CVPR'2019) - -```bibtex -@inproceedings{zhang2019pose2seg, - title={Pose2seg: Detection free human instance segmentation}, - author={Zhang, Song-Hai and Li, Ruilong and Dong, Xin and Rosin, Paul and Cai, Zixi and Han, Xi and Yang, Dingcheng and Huang, Haozhi and Hu, Shi-Min}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={889--898}, - year={2019} -} -``` - -
    - -
    - -
    - -For [OCHuman](https://github.com/liruilong940607/OCHumanApi) data, please download the images and annotations from [OCHuman](https://github.com/liruilong940607/OCHumanApi), -Move them under $MMPOSE/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── ochuman - │-- annotations - │ │-- ochuman_coco_format_val_range_0.00_1.00.json - │ |-- ochuman_coco_format_test_range_0.00_1.00.json - |-- images - │-- 000001.jpg - │-- 000002.jpg - │-- 000003.jpg - │-- ... - -``` - -## MHP - - - -
    -MHP (ACM MM'2018) - -```bibtex -@inproceedings{zhao2018understanding, - title={Understanding humans in crowded scenes: Deep nested adversarial learning and a new benchmark for multi-human parsing}, - author={Zhao, Jian and Li, Jianshu and Cheng, Yu and Sim, Terence and Yan, Shuicheng and Feng, Jiashi}, - booktitle={Proceedings of the 26th ACM international conference on Multimedia}, - pages={792--800}, - year={2018} -} -``` - -
    - -
    - -
    - -For [MHP](https://lv-mhp.github.io/dataset) data, please download from [MHP](https://lv-mhp.github.io/dataset). -Please download the annotation files from [mhp_annotations](https://download.openmmlab.com/mmpose/datasets/mhp_annotations.tar.gz). -Please download and extract them under $MMPOSE/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── mhp - │-- annotations - │ │-- mhp_train.json - │ │-- mhp_val.json - │ - `-- train - │ │-- images - │ │ │-- 1004.jpg - │ │ │-- 10050.jpg - │ │ │-- ... - │ - `-- val - │ │-- images - │ │ │-- 10059.jpg - │ │ │-- 10068.jpg - │ │ │-- ... - │ - `-- test - │ │-- images - │ │ │-- 1005.jpg - │ │ │-- 10052.jpg - │ │ │-- ...~~~~ -``` - -## Human-Art dataset - - - -
    -Human-Art (CVPR'2023) - -```bibtex -@inproceedings{ju2023humanart, - title={Human-Art: A Versatile Human-Centric Dataset Bridging Natural and Artificial Scenes}, - author={Ju, Xuan and Zeng, Ailing and Jianan, Wang and Qiang, Xu and Lei, Zhang}, - booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), - year={2023}} -``` - -
    - -
    - -
    - -For [Human-Art](https://idea-research.github.io/HumanArt/) data, please download the images and annotation files from [its website](https://idea-research.github.io/HumanArt/). You need to fill in the [data form](https://docs.google.com/forms/d/e/1FAIpQLScroT_jvw6B9U2Qca1_cl5Kmmu1ceKtlh6DJNmWLte8xNEhEw/viewform) to get access to the data. -Move them under $MMPOSE/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -|── data - │── HumanArt - │-- images - │ │-- 2D_virtual_human - │ │ |-- cartoon - │ │ | |-- 000000000000.jpg - │ │ | |-- ... - │ │ |-- digital_art - │ │ |-- ... - │ |-- 3D_virtual_human - │ |-- real_human - |-- annotations - │ │-- validation_humanart.json - │ │-- training_humanart_coco.json - |-- person_detection_results - │ │-- HumanArt_validation_detections_AP_H_56_person.json -``` - -You can choose whether to download other annotation files in Human-Art. If you want to use additional annotation files (e.g. validation set of cartoon), you need to edit the corresponding code in config file. - -## PoseTrack18 - - - -
    -PoseTrack18 (CVPR'2018) - -```bibtex -@inproceedings{andriluka2018posetrack, - title={Posetrack: A benchmark for human pose estimation and tracking}, - author={Andriluka, Mykhaylo and Iqbal, Umar and Insafutdinov, Eldar and Pishchulin, Leonid and Milan, Anton and Gall, Juergen and Schiele, Bernt}, - booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, - pages={5167--5176}, - year={2018} -} -``` - -
    - -
    - -
    - -For [PoseTrack18](https://posetrack.net/users/download.php) data, please download from [PoseTrack18](https://posetrack.net/users/download.php). -Please download the annotation files from [posetrack18_annotations](https://download.openmmlab.com/mmpose/datasets/posetrack18_annotations.tar). -We have merged the video-wise separated official annotation files into two json files (posetrack18_train & posetrack18_val.json). We also generate the [mask files](https://download.openmmlab.com/mmpose/datasets/posetrack18_mask.tar) to speed up training. -For top-down approaches, we use [MMDetection](https://github.com/open-mmlab/mmdetection) pre-trained [Cascade R-CNN](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_20e_coco/cascade_rcnn_x101_64x4d_fpn_20e_coco_20200509_224357-051557b1.pth) (X-101-64x4d-FPN) to generate the detected human bounding boxes. -Please download and extract them under $MMPOSE/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── posetrack18 - │-- annotations - │ │-- posetrack18_train.json - │ │-- posetrack18_val.json - │ │-- posetrack18_val_human_detections.json - │ │-- train - │ │ │-- 000001_bonn_train.json - │ │ │-- 000002_bonn_train.json - │ │ │-- ... - │ │-- val - │ │ │-- 000342_mpii_test.json - │ │ │-- 000522_mpii_test.json - │ │ │-- ... - │ `-- test - │ │-- 000001_mpiinew_test.json - │ │-- 000002_mpiinew_test.json - │ │-- ... - │ - `-- images - │ │-- train - │ │ │-- 000001_bonn_train - │ │ │ │-- 000000.jpg - │ │ │ │-- 000001.jpg - │ │ │ │-- ... - │ │ │-- ... - │ │-- val - │ │ │-- 000342_mpii_test - │ │ │ │-- 000000.jpg - │ │ │ │-- 000001.jpg - │ │ │ │-- ... - │ │ │-- ... - │ `-- test - │ │-- 000001_mpiinew_test - │ │ │-- 000000.jpg - │ │ │-- 000001.jpg - │ │ │-- ... - │ │-- ... - `-- mask - │-- train - │ │-- 000002_bonn_train - │ │ │-- 000000.jpg - │ │ │-- 000001.jpg - │ │ │-- ... - │ │-- ... - `-- val - │-- 000522_mpii_test - │ │-- 000000.jpg - │ │-- 000001.jpg - │ │-- ... - │-- ... -``` - -The official evaluation tool for PoseTrack should be installed from GitHub. - -```shell -pip install git+https://github.com/svenkreiss/poseval.git -``` - -## sub-JHMDB dataset - - - -
    -RSN (ECCV'2020) - -```bibtex -@misc{cai2020learning, - title={Learning Delicate Local Representations for Multi-Person Pose Estimation}, - author={Yuanhao Cai and Zhicheng Wang and Zhengxiong Luo and Binyi Yin and Angang Du and Haoqian Wang and Xinyu Zhou and Erjin Zhou and Xiangyu Zhang and Jian Sun}, - year={2020}, - eprint={2003.04030}, - archivePrefix={arXiv}, - primaryClass={cs.CV} -} -``` - -
    - -
    - -
    - -For [sub-JHMDB](http://jhmdb.is.tue.mpg.de/dataset) data, please download the [images](<(http://files.is.tue.mpg.de/jhmdb/Rename_Images.tar.gz)>) from [JHMDB](http://jhmdb.is.tue.mpg.de/dataset), -Please download the annotation files from [jhmdb_annotations](https://download.openmmlab.com/mmpose/datasets/jhmdb_annotations.tar). -Move them under $MMPOSE/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── jhmdb - │-- annotations - │ │-- Sub1_train.json - │ |-- Sub1_test.json - │ │-- Sub2_train.json - │ |-- Sub2_test.json - │ │-- Sub3_train.json - │ |-- Sub3_test.json - |-- Rename_Images - │-- brush_hair - │ │--April_09_brush_hair_u_nm_np1_ba_goo_0 - | │ │--00001.png - | │ │--00002.png - │-- catch - │-- ... - -``` +# 2D Body Keypoint Datasets + +It is recommended to symlink the dataset root to `$MMPOSE/data`. +If your folder structure is different, you may need to change the corresponding paths in config files. + +MMPose supported datasets: + +- Images + - [COCO](#coco) \[ [Homepage](http://cocodataset.org/) \] + - [MPII](#mpii) \[ [Homepage](http://human-pose.mpi-inf.mpg.de/) \] + - [MPII-TRB](#mpii-trb) \[ [Homepage](https://github.com/kennymckormick/Triplet-Representation-of-human-Body) \] + - [AI Challenger](#aic) \[ [Homepage](https://github.com/AIChallenger/AI_Challenger_2017) \] + - [CrowdPose](#crowdpose) \[ [Homepage](https://github.com/Jeff-sjtu/CrowdPose) \] + - [OCHuman](#ochuman) \[ [Homepage](https://github.com/liruilong940607/OCHumanApi) \] + - [MHP](#mhp) \[ [Homepage](https://lv-mhp.github.io/dataset) \] + - [Human-Art](#humanart) \[ [Homepage](https://idea-research.github.io/HumanArt/) \] +- Videos + - [PoseTrack18](#posetrack18) \[ [Homepage](https://posetrack.net/users/download.php) \] + - [sub-JHMDB](#sub-jhmdb-dataset) \[ [Homepage](http://jhmdb.is.tue.mpg.de/dataset) \] + +## COCO + + + +
    +COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
    + +
    + +
    + +For [COCO](http://cocodataset.org/) data, please download from [COCO download](http://cocodataset.org/#download), 2017 Train/Val is needed for COCO keypoints training and validation. +[HRNet-Human-Pose-Estimation](https://github.com/HRNet/HRNet-Human-Pose-Estimation) provides person detection result of COCO val2017 to reproduce our multi-person pose estimation results. +Please download from [OneDrive](https://1drv.ms/f/s!AhIXJn_J-blWzzDXoz5BeFl8sWM-) or [GoogleDrive](https://drive.google.com/drive/folders/1fRUDNUDxe9fjqcRZ2bnF_TKMlO0nB_dk?usp=sharing). +Optionally, to evaluate on COCO'2017 test-dev, please download the [image-info](https://download.openmmlab.com/mmpose/datasets/person_keypoints_test-dev-2017.json). +Download and extract them under $MMPOSE/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── coco + │-- annotations + │ │-- person_keypoints_train2017.json + │ |-- person_keypoints_val2017.json + │ |-- person_keypoints_test-dev-2017.json + |-- person_detection_results + | |-- COCO_val2017_detections_AP_H_56_person.json + | |-- COCO_test-dev2017_detections_AP_H_609_person.json + │-- train2017 + │ │-- 000000000009.jpg + │ │-- 000000000025.jpg + │ │-- 000000000030.jpg + │ │-- ... + `-- val2017 + │-- 000000000139.jpg + │-- 000000000285.jpg + │-- 000000000632.jpg + │-- ... + +``` + +## MPII + + + +
    +MPII (CVPR'2014) + +```bibtex +@inproceedings{andriluka14cvpr, + author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, + title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, + booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + year = {2014}, + month = {June} +} +``` + +
    + +
    + +
    + +For [MPII](http://human-pose.mpi-inf.mpg.de/) data, please download from [MPII Human Pose Dataset](http://human-pose.mpi-inf.mpg.de/). +We have converted the original annotation files into json format, please download them from [mpii_annotations](https://download.openmmlab.com/mmpose/datasets/mpii_annotations.tar). +Extract them under {MMPose}/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── mpii + |── annotations + | |── mpii_gt_val.mat + | |── mpii_test.json + | |── mpii_train.json + | |── mpii_trainval.json + | `── mpii_val.json + `── images + |── 000001163.jpg + |── 000003072.jpg + +``` + +During training and inference, the prediction result will be saved as '.mat' format by default. We also provide a tool to convert this '.mat' to more readable '.json' format. + +```shell +python tools/dataset/mat2json ${PRED_MAT_FILE} ${GT_JSON_FILE} ${OUTPUT_PRED_JSON_FILE} +``` + +For example, + +```shell +python tools/dataset/mat2json work_dirs/res50_mpii_256x256/pred.mat data/mpii/annotations/mpii_val.json pred.json +``` + +## MPII-TRB + + + +
    +MPII-TRB (ICCV'2019) + +```bibtex +@inproceedings{duan2019trb, + title={TRB: A Novel Triplet Representation for Understanding 2D Human Body}, + author={Duan, Haodong and Lin, Kwan-Yee and Jin, Sheng and Liu, Wentao and Qian, Chen and Ouyang, Wanli}, + booktitle={Proceedings of the IEEE International Conference on Computer Vision}, + pages={9479--9488}, + year={2019} +} +``` + +
    + +
    + +
    + +For [MPII-TRB](https://github.com/kennymckormick/Triplet-Representation-of-human-Body) data, please download from [MPII Human Pose Dataset](http://human-pose.mpi-inf.mpg.de/). +Please download the annotation files from [mpii_trb_annotations](https://download.openmmlab.com/mmpose/datasets/mpii_trb_annotations.tar). +Extract them under {MMPose}/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── mpii + |── annotations + | |── mpii_trb_train.json + | |── mpii_trb_val.json + `── images + |── 000001163.jpg + |── 000003072.jpg + +``` + +## AIC + + + +
    +AI Challenger (ArXiv'2017) + +```bibtex +@article{wu2017ai, + title={Ai challenger: A large-scale dataset for going deeper in image understanding}, + author={Wu, Jiahong and Zheng, He and Zhao, Bo and Li, Yixin and Yan, Baoming and Liang, Rui and Wang, Wenjia and Zhou, Shipei and Lin, Guosen and Fu, Yanwei and others}, + journal={arXiv preprint arXiv:1711.06475}, + year={2017} +} +``` + +
    + +
    + +
    + +For [AIC](https://github.com/AIChallenger/AI_Challenger_2017) data, please download from [AI Challenger 2017](https://github.com/AIChallenger/AI_Challenger_2017), 2017 Train/Val is needed for keypoints training and validation. +Please download the annotation files from [aic_annotations](https://download.openmmlab.com/mmpose/datasets/aic_annotations.tar). +Download and extract them under $MMPOSE/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── aic + │-- annotations + │ │-- aic_train.json + │ |-- aic_val.json + │-- ai_challenger_keypoint_train_20170902 + │ │-- keypoint_train_images_20170902 + │ │ │-- 0000252aea98840a550dac9a78c476ecb9f47ffa.jpg + │ │ │-- 000050f770985ac9653198495ef9b5c82435d49c.jpg + │ │ │-- ... + `-- ai_challenger_keypoint_validation_20170911 + │-- keypoint_validation_images_20170911 + │-- 0002605c53fb92109a3f2de4fc3ce06425c3b61f.jpg + │-- 0003b55a2c991223e6d8b4b820045bd49507bf6d.jpg + │-- ... +``` + +## CrowdPose + + + +
    +CrowdPose (CVPR'2019) + +```bibtex +@article{li2018crowdpose, + title={CrowdPose: Efficient Crowded Scenes Pose Estimation and A New Benchmark}, + author={Li, Jiefeng and Wang, Can and Zhu, Hao and Mao, Yihuan and Fang, Hao-Shu and Lu, Cewu}, + journal={arXiv preprint arXiv:1812.00324}, + year={2018} +} +``` + +
    + +
    + +
    + +For [CrowdPose](https://github.com/Jeff-sjtu/CrowdPose) data, please download from [CrowdPose](https://github.com/Jeff-sjtu/CrowdPose). +Please download the annotation files and human detection results from [crowdpose_annotations](https://download.openmmlab.com/mmpose/datasets/crowdpose_annotations.tar). +For top-down approaches, we follow [CrowdPose](https://arxiv.org/abs/1812.00324) to use the [pre-trained weights](https://pjreddie.com/media/files/yolov3.weights) of [YOLOv3](https://github.com/eriklindernoren/PyTorch-YOLOv3) to generate the detected human bounding boxes. +For model training, we follow [HigherHRNet](https://github.com/HRNet/HigherHRNet-Human-Pose-Estimation) to train models on CrowdPose train/val dataset, and evaluate models on CrowdPose test dataset. +Download and extract them under $MMPOSE/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── crowdpose + │-- annotations + │ │-- mmpose_crowdpose_train.json + │ │-- mmpose_crowdpose_val.json + │ │-- mmpose_crowdpose_trainval.json + │ │-- mmpose_crowdpose_test.json + │ │-- det_for_crowd_test_0.1_0.5.json + │-- images + │-- 100000.jpg + │-- 100001.jpg + │-- 100002.jpg + │-- ... +``` + +## OCHuman + + + +
    +OCHuman (CVPR'2019) + +```bibtex +@inproceedings{zhang2019pose2seg, + title={Pose2seg: Detection free human instance segmentation}, + author={Zhang, Song-Hai and Li, Ruilong and Dong, Xin and Rosin, Paul and Cai, Zixi and Han, Xi and Yang, Dingcheng and Huang, Haozhi and Hu, Shi-Min}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={889--898}, + year={2019} +} +``` + +
    + +
    + +
    + +For [OCHuman](https://github.com/liruilong940607/OCHumanApi) data, please download the images and annotations from [OCHuman](https://github.com/liruilong940607/OCHumanApi), +Move them under $MMPOSE/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── ochuman + │-- annotations + │ │-- ochuman_coco_format_val_range_0.00_1.00.json + │ |-- ochuman_coco_format_test_range_0.00_1.00.json + |-- images + │-- 000001.jpg + │-- 000002.jpg + │-- 000003.jpg + │-- ... + +``` + +## MHP + + + +
    +MHP (ACM MM'2018) + +```bibtex +@inproceedings{zhao2018understanding, + title={Understanding humans in crowded scenes: Deep nested adversarial learning and a new benchmark for multi-human parsing}, + author={Zhao, Jian and Li, Jianshu and Cheng, Yu and Sim, Terence and Yan, Shuicheng and Feng, Jiashi}, + booktitle={Proceedings of the 26th ACM international conference on Multimedia}, + pages={792--800}, + year={2018} +} +``` + +
    + +
    + +
    + +For [MHP](https://lv-mhp.github.io/dataset) data, please download from [MHP](https://lv-mhp.github.io/dataset). +Please download the annotation files from [mhp_annotations](https://download.openmmlab.com/mmpose/datasets/mhp_annotations.tar.gz). +Please download and extract them under $MMPOSE/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── mhp + │-- annotations + │ │-- mhp_train.json + │ │-- mhp_val.json + │ + `-- train + │ │-- images + │ │ │-- 1004.jpg + │ │ │-- 10050.jpg + │ │ │-- ... + │ + `-- val + │ │-- images + │ │ │-- 10059.jpg + │ │ │-- 10068.jpg + │ │ │-- ... + │ + `-- test + │ │-- images + │ │ │-- 1005.jpg + │ │ │-- 10052.jpg + │ │ │-- ...~~~~ +``` + +## Human-Art dataset + + + +
    +Human-Art (CVPR'2023) + +```bibtex +@inproceedings{ju2023humanart, + title={Human-Art: A Versatile Human-Centric Dataset Bridging Natural and Artificial Scenes}, + author={Ju, Xuan and Zeng, Ailing and Jianan, Wang and Qiang, Xu and Lei, Zhang}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), + year={2023}} +``` + +
    + +
    + +
    + +For [Human-Art](https://idea-research.github.io/HumanArt/) data, please download the images and annotation files from [its website](https://idea-research.github.io/HumanArt/). You need to fill in the [data form](https://docs.google.com/forms/d/e/1FAIpQLScroT_jvw6B9U2Qca1_cl5Kmmu1ceKtlh6DJNmWLte8xNEhEw/viewform) to get access to the data. +Move them under $MMPOSE/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +|── data + │── HumanArt + │-- images + │ │-- 2D_virtual_human + │ │ |-- cartoon + │ │ | |-- 000000000000.jpg + │ │ | |-- ... + │ │ |-- digital_art + │ │ |-- ... + │ |-- 3D_virtual_human + │ |-- real_human + |-- annotations + │ │-- validation_humanart.json + │ │-- training_humanart_coco.json + |-- person_detection_results + │ │-- HumanArt_validation_detections_AP_H_56_person.json +``` + +You can choose whether to download other annotation files in Human-Art. If you want to use additional annotation files (e.g. validation set of cartoon), you need to edit the corresponding code in config file. + +## PoseTrack18 + + + +
    +PoseTrack18 (CVPR'2018) + +```bibtex +@inproceedings{andriluka2018posetrack, + title={Posetrack: A benchmark for human pose estimation and tracking}, + author={Andriluka, Mykhaylo and Iqbal, Umar and Insafutdinov, Eldar and Pishchulin, Leonid and Milan, Anton and Gall, Juergen and Schiele, Bernt}, + booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, + pages={5167--5176}, + year={2018} +} +``` + +
    + +
    + +
    + +For [PoseTrack18](https://posetrack.net/users/download.php) data, please download from [PoseTrack18](https://posetrack.net/users/download.php). +Please download the annotation files from [posetrack18_annotations](https://download.openmmlab.com/mmpose/datasets/posetrack18_annotations.tar). +We have merged the video-wise separated official annotation files into two json files (posetrack18_train & posetrack18_val.json). We also generate the [mask files](https://download.openmmlab.com/mmpose/datasets/posetrack18_mask.tar) to speed up training. +For top-down approaches, we use [MMDetection](https://github.com/open-mmlab/mmdetection) pre-trained [Cascade R-CNN](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_20e_coco/cascade_rcnn_x101_64x4d_fpn_20e_coco_20200509_224357-051557b1.pth) (X-101-64x4d-FPN) to generate the detected human bounding boxes. +Please download and extract them under $MMPOSE/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── posetrack18 + │-- annotations + │ │-- posetrack18_train.json + │ │-- posetrack18_val.json + │ │-- posetrack18_val_human_detections.json + │ │-- train + │ │ │-- 000001_bonn_train.json + │ │ │-- 000002_bonn_train.json + │ │ │-- ... + │ │-- val + │ │ │-- 000342_mpii_test.json + │ │ │-- 000522_mpii_test.json + │ │ │-- ... + │ `-- test + │ │-- 000001_mpiinew_test.json + │ │-- 000002_mpiinew_test.json + │ │-- ... + │ + `-- images + │ │-- train + │ │ │-- 000001_bonn_train + │ │ │ │-- 000000.jpg + │ │ │ │-- 000001.jpg + │ │ │ │-- ... + │ │ │-- ... + │ │-- val + │ │ │-- 000342_mpii_test + │ │ │ │-- 000000.jpg + │ │ │ │-- 000001.jpg + │ │ │ │-- ... + │ │ │-- ... + │ `-- test + │ │-- 000001_mpiinew_test + │ │ │-- 000000.jpg + │ │ │-- 000001.jpg + │ │ │-- ... + │ │-- ... + `-- mask + │-- train + │ │-- 000002_bonn_train + │ │ │-- 000000.jpg + │ │ │-- 000001.jpg + │ │ │-- ... + │ │-- ... + `-- val + │-- 000522_mpii_test + │ │-- 000000.jpg + │ │-- 000001.jpg + │ │-- ... + │-- ... +``` + +The official evaluation tool for PoseTrack should be installed from GitHub. + +```shell +pip install git+https://github.com/svenkreiss/poseval.git +``` + +## sub-JHMDB dataset + + + +
    +RSN (ECCV'2020) + +```bibtex +@misc{cai2020learning, + title={Learning Delicate Local Representations for Multi-Person Pose Estimation}, + author={Yuanhao Cai and Zhicheng Wang and Zhengxiong Luo and Binyi Yin and Angang Du and Haoqian Wang and Xinyu Zhou and Erjin Zhou and Xiangyu Zhang and Jian Sun}, + year={2020}, + eprint={2003.04030}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +
    + +
    + +
    + +For [sub-JHMDB](http://jhmdb.is.tue.mpg.de/dataset) data, please download the [images](<(http://files.is.tue.mpg.de/jhmdb/Rename_Images.tar.gz)>) from [JHMDB](http://jhmdb.is.tue.mpg.de/dataset), +Please download the annotation files from [jhmdb_annotations](https://download.openmmlab.com/mmpose/datasets/jhmdb_annotations.tar). +Move them under $MMPOSE/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── jhmdb + │-- annotations + │ │-- Sub1_train.json + │ |-- Sub1_test.json + │ │-- Sub2_train.json + │ |-- Sub2_test.json + │ │-- Sub3_train.json + │ |-- Sub3_test.json + |-- Rename_Images + │-- brush_hair + │ │--April_09_brush_hair_u_nm_np1_ba_goo_0 + | │ │--00001.png + | │ │--00002.png + │-- catch + │-- ... + +``` diff --git a/docs/zh_cn/dataset_zoo/2d_face_keypoint.md b/docs/zh_cn/dataset_zoo/2d_face_keypoint.md index 62f66bd82b..13bbb5dec4 100644 --- a/docs/zh_cn/dataset_zoo/2d_face_keypoint.md +++ b/docs/zh_cn/dataset_zoo/2d_face_keypoint.md @@ -1,384 +1,384 @@ -# 2D Face Keypoint Datasets - -It is recommended to symlink the dataset root to `$MMPOSE/data`. -If your folder structure is different, you may need to change the corresponding paths in config files. - -MMPose supported datasets: - -- [300W](#300w-dataset) \[ [Homepage](https://ibug.doc.ic.ac.uk/resources/300-W/) \] -- [WFLW](#wflw-dataset) \[ [Homepage](https://wywu.github.io/projects/LAB/WFLW.html) \] -- [AFLW](#aflw-dataset) \[ [Homepage](https://www.tugraz.at/institute/icg/research/team-bischof/lrs/downloads/aflw/) \] -- [COFW](#cofw-dataset) \[ [Homepage](http://www.vision.caltech.edu/xpburgos/ICCV13/) \] -- [COCO-WholeBody-Face](#coco-wholebody-face) \[ [Homepage](https://github.com/jin-s13/COCO-WholeBody/) \] -- [LaPa](#lapa-dataset) \[ [Homepage](https://github.com/JDAI-CV/lapa-dataset) \] - -## 300W Dataset - - - -
    -300W (IMAVIS'2016) - -```bibtex -@article{sagonas2016300, - title={300 faces in-the-wild challenge: Database and results}, - author={Sagonas, Christos and Antonakos, Epameinondas and Tzimiropoulos, Georgios and Zafeiriou, Stefanos and Pantic, Maja}, - journal={Image and vision computing}, - volume={47}, - pages={3--18}, - year={2016}, - publisher={Elsevier} -} -``` - -
    - -
    - -
    - -For 300W data, please download images from [300W Dataset](https://ibug.doc.ic.ac.uk/resources/300-W/). -Please download the annotation files from [300w_annotations](https://download.openmmlab.com/mmpose/datasets/300w_annotations.tar). -Extract them under {MMPose}/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── 300w - |── annotations - | |── face_landmarks_300w_train.json - | |── face_landmarks_300w_valid.json - | |── face_landmarks_300w_valid_common.json - | |── face_landmarks_300w_valid_challenge.json - | |── face_landmarks_300w_test.json - `── images - |── afw - | |── 1051618982_1.jpg - | |── 111076519_1.jpg - | ... - |── helen - | |── trainset - | | |── 100032540_1.jpg - | | |── 100040721_1.jpg - | | ... - | |── testset - | | |── 296814969_3.jpg - | | |── 2968560214_1.jpg - | | ... - |── ibug - | |── image_003_1.jpg - | |── image_004_1.jpg - | ... - |── lfpw - | |── trainset - | | |── image_0001.png - | | |── image_0002.png - | | ... - | |── testset - | | |── image_0001.png - | | |── image_0002.png - | | ... - `── Test - |── 01_Indoor - | |── indoor_001.png - | |── indoor_002.png - | ... - `── 02_Outdoor - |── outdoor_001.png - |── outdoor_002.png - ... -``` - -## WFLW Dataset - - - -
    -WFLW (CVPR'2018) - -```bibtex -@inproceedings{wu2018look, - title={Look at boundary: A boundary-aware face alignment algorithm}, - author={Wu, Wayne and Qian, Chen and Yang, Shuo and Wang, Quan and Cai, Yici and Zhou, Qiang}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={2129--2138}, - year={2018} -} -``` - -
    - -
    - -
    - -For WFLW data, please download images from [WFLW Dataset](https://wywu.github.io/projects/LAB/WFLW.html). -Please download the annotation files from [wflw_annotations](https://download.openmmlab.com/mmpose/datasets/wflw_annotations.tar). -Extract them under {MMPose}/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── wflw - |── annotations - | |── face_landmarks_wflw_train.json - | |── face_landmarks_wflw_test.json - | |── face_landmarks_wflw_test_blur.json - | |── face_landmarks_wflw_test_occlusion.json - | |── face_landmarks_wflw_test_expression.json - | |── face_landmarks_wflw_test_largepose.json - | |── face_landmarks_wflw_test_illumination.json - | |── face_landmarks_wflw_test_makeup.json - | - `── images - |── 0--Parade - | |── 0_Parade_marchingband_1_1015.jpg - | |── 0_Parade_marchingband_1_1031.jpg - | ... - |── 1--Handshaking - | |── 1_Handshaking_Handshaking_1_105.jpg - | |── 1_Handshaking_Handshaking_1_107.jpg - | ... - ... -``` - -## AFLW Dataset - - - -
    -AFLW (ICCVW'2011) - -```bibtex -@inproceedings{koestinger2011annotated, - title={Annotated facial landmarks in the wild: A large-scale, real-world database for facial landmark localization}, - author={Koestinger, Martin and Wohlhart, Paul and Roth, Peter M and Bischof, Horst}, - booktitle={2011 IEEE international conference on computer vision workshops (ICCV workshops)}, - pages={2144--2151}, - year={2011}, - organization={IEEE} -} -``` - -
    - -For AFLW data, please download images from [AFLW Dataset](https://www.tugraz.at/institute/icg/research/team-bischof/lrs/downloads/aflw/). -Please download the annotation files from [aflw_annotations](https://download.openmmlab.com/mmpose/datasets/aflw_annotations.tar). -Extract them under {MMPose}/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── aflw - |── annotations - | |── face_landmarks_aflw_train.json - | |── face_landmarks_aflw_test_frontal.json - | |── face_landmarks_aflw_test.json - `── images - |── flickr - |── 0 - | |── image00002.jpg - | |── image00013.jpg - | ... - |── 2 - | |── image00004.jpg - | |── image00006.jpg - | ... - `── 3 - |── image00032.jpg - |── image00035.jpg - ... -``` - -## COFW Dataset - - - -
    -COFW (ICCV'2013) - -```bibtex -@inproceedings{burgos2013robust, - title={Robust face landmark estimation under occlusion}, - author={Burgos-Artizzu, Xavier P and Perona, Pietro and Doll{\'a}r, Piotr}, - booktitle={Proceedings of the IEEE international conference on computer vision}, - pages={1513--1520}, - year={2013} -} -``` - -
    - -
    - -
    - -For COFW data, please download from [COFW Dataset (Color Images)](http://www.vision.caltech.edu/xpburgos/ICCV13/Data/COFW_color.zip). -Move `COFW_train_color.mat` and `COFW_test_color.mat` to `data/cofw/` and make them look like: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── cofw - |── COFW_train_color.mat - |── COFW_test_color.mat -``` - -Run the following script under `{MMPose}/data` - -`python tools/dataset_converters/parse_cofw_dataset.py` - -And you will get - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── cofw - |── COFW_train_color.mat - |── COFW_test_color.mat - |── annotations - | |── cofw_train.json - | |── cofw_test.json - |── images - |── 000001.jpg - |── 000002.jpg -``` - -## COCO-WholeBody (Face) - - - -
    -COCO-WholeBody-Face (ECCV'2020) - -```bibtex -@inproceedings{jin2020whole, - title={Whole-Body Human Pose Estimation in the Wild}, - author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, - booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, - year={2020} -} -``` - -
    - -
    - -
    - -For [COCO-WholeBody](https://github.com/jin-s13/COCO-WholeBody/) dataset, images can be downloaded from [COCO download](http://cocodataset.org/#download), 2017 Train/Val is needed for COCO keypoints training and validation. -Download COCO-WholeBody annotations for COCO-WholeBody annotations for [Train](https://drive.google.com/file/d/1thErEToRbmM9uLNi1JXXfOsaS5VK2FXf/view?usp=sharing) / [Validation](https://drive.google.com/file/d/1N6VgwKnj8DeyGXCvp1eYgNbRmw6jdfrb/view?usp=sharing) (Google Drive). -Download person detection result of COCO val2017 from [OneDrive](https://1drv.ms/f/s!AhIXJn_J-blWzzDXoz5BeFl8sWM-) or [GoogleDrive](https://drive.google.com/drive/folders/1fRUDNUDxe9fjqcRZ2bnF_TKMlO0nB_dk?usp=sharing). -Download and extract them under $MMPOSE/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── coco - │-- annotations - │ │-- coco_wholebody_train_v1.0.json - │ |-- coco_wholebody_val_v1.0.json - |-- person_detection_results - | |-- COCO_val2017_detections_AP_H_56_person.json - │-- train2017 - │ │-- 000000000009.jpg - │ │-- 000000000025.jpg - │ │-- 000000000030.jpg - │ │-- ... - `-- val2017 - │-- 000000000139.jpg - │-- 000000000285.jpg - │-- 000000000632.jpg - │-- ... - -``` - -Please also install the latest version of [Extended COCO API](https://github.com/jin-s13/xtcocoapi) to support COCO-WholeBody evaluation: - -`pip install xtcocotools` - -## LaPa - - - -
    -LaPa (AAAI'2020) - -```bibtex -@inproceedings{liu2020new, - title={A New Dataset and Boundary-Attention Semantic Segmentation for Face Parsing.}, - author={Liu, Yinglu and Shi, Hailin and Shen, Hao and Si, Yue and Wang, Xiaobo and Mei, Tao}, - booktitle={AAAI}, - pages={11637--11644}, - year={2020} -} -``` - -
    - -
    - -
    - -For [LaPa](https://github.com/JDAI-CV/lapa-dataset) dataset, images can be downloaded from [their github page](https://github.com/JDAI-CV/lapa-dataset). - -Download and extract them under $MMPOSE/data, and use our `tools/dataset_converters/lapa2coco.py` to make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── LaPa - │-- annotations - │ │-- lapa_train.json - │ |-- lapa_val.json - │ |-- lapa_test.json - | |-- lapa_trainval.json - │-- train - │ │-- images - │ │-- labels - │ │-- landmarks - │-- val - │ │-- images - │ │-- labels - │ │-- landmarks - `-- test - │ │-- images - │ │-- labels - │ │-- landmarks - -``` +# 2D Face Keypoint Datasets + +It is recommended to symlink the dataset root to `$MMPOSE/data`. +If your folder structure is different, you may need to change the corresponding paths in config files. + +MMPose supported datasets: + +- [300W](#300w-dataset) \[ [Homepage](https://ibug.doc.ic.ac.uk/resources/300-W/) \] +- [WFLW](#wflw-dataset) \[ [Homepage](https://wywu.github.io/projects/LAB/WFLW.html) \] +- [AFLW](#aflw-dataset) \[ [Homepage](https://www.tugraz.at/institute/icg/research/team-bischof/lrs/downloads/aflw/) \] +- [COFW](#cofw-dataset) \[ [Homepage](http://www.vision.caltech.edu/xpburgos/ICCV13/) \] +- [COCO-WholeBody-Face](#coco-wholebody-face) \[ [Homepage](https://github.com/jin-s13/COCO-WholeBody/) \] +- [LaPa](#lapa-dataset) \[ [Homepage](https://github.com/JDAI-CV/lapa-dataset) \] + +## 300W Dataset + + + +
    +300W (IMAVIS'2016) + +```bibtex +@article{sagonas2016300, + title={300 faces in-the-wild challenge: Database and results}, + author={Sagonas, Christos and Antonakos, Epameinondas and Tzimiropoulos, Georgios and Zafeiriou, Stefanos and Pantic, Maja}, + journal={Image and vision computing}, + volume={47}, + pages={3--18}, + year={2016}, + publisher={Elsevier} +} +``` + +
    + +
    + +
    + +For 300W data, please download images from [300W Dataset](https://ibug.doc.ic.ac.uk/resources/300-W/). +Please download the annotation files from [300w_annotations](https://download.openmmlab.com/mmpose/datasets/300w_annotations.tar). +Extract them under {MMPose}/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── 300w + |── annotations + | |── face_landmarks_300w_train.json + | |── face_landmarks_300w_valid.json + | |── face_landmarks_300w_valid_common.json + | |── face_landmarks_300w_valid_challenge.json + | |── face_landmarks_300w_test.json + `── images + |── afw + | |── 1051618982_1.jpg + | |── 111076519_1.jpg + | ... + |── helen + | |── trainset + | | |── 100032540_1.jpg + | | |── 100040721_1.jpg + | | ... + | |── testset + | | |── 296814969_3.jpg + | | |── 2968560214_1.jpg + | | ... + |── ibug + | |── image_003_1.jpg + | |── image_004_1.jpg + | ... + |── lfpw + | |── trainset + | | |── image_0001.png + | | |── image_0002.png + | | ... + | |── testset + | | |── image_0001.png + | | |── image_0002.png + | | ... + `── Test + |── 01_Indoor + | |── indoor_001.png + | |── indoor_002.png + | ... + `── 02_Outdoor + |── outdoor_001.png + |── outdoor_002.png + ... +``` + +## WFLW Dataset + + + +
    +WFLW (CVPR'2018) + +```bibtex +@inproceedings{wu2018look, + title={Look at boundary: A boundary-aware face alignment algorithm}, + author={Wu, Wayne and Qian, Chen and Yang, Shuo and Wang, Quan and Cai, Yici and Zhou, Qiang}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={2129--2138}, + year={2018} +} +``` + +
    + +
    + +
    + +For WFLW data, please download images from [WFLW Dataset](https://wywu.github.io/projects/LAB/WFLW.html). +Please download the annotation files from [wflw_annotations](https://download.openmmlab.com/mmpose/datasets/wflw_annotations.tar). +Extract them under {MMPose}/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── wflw + |── annotations + | |── face_landmarks_wflw_train.json + | |── face_landmarks_wflw_test.json + | |── face_landmarks_wflw_test_blur.json + | |── face_landmarks_wflw_test_occlusion.json + | |── face_landmarks_wflw_test_expression.json + | |── face_landmarks_wflw_test_largepose.json + | |── face_landmarks_wflw_test_illumination.json + | |── face_landmarks_wflw_test_makeup.json + | + `── images + |── 0--Parade + | |── 0_Parade_marchingband_1_1015.jpg + | |── 0_Parade_marchingband_1_1031.jpg + | ... + |── 1--Handshaking + | |── 1_Handshaking_Handshaking_1_105.jpg + | |── 1_Handshaking_Handshaking_1_107.jpg + | ... + ... +``` + +## AFLW Dataset + + + +
    +AFLW (ICCVW'2011) + +```bibtex +@inproceedings{koestinger2011annotated, + title={Annotated facial landmarks in the wild: A large-scale, real-world database for facial landmark localization}, + author={Koestinger, Martin and Wohlhart, Paul and Roth, Peter M and Bischof, Horst}, + booktitle={2011 IEEE international conference on computer vision workshops (ICCV workshops)}, + pages={2144--2151}, + year={2011}, + organization={IEEE} +} +``` + +
    + +For AFLW data, please download images from [AFLW Dataset](https://www.tugraz.at/institute/icg/research/team-bischof/lrs/downloads/aflw/). +Please download the annotation files from [aflw_annotations](https://download.openmmlab.com/mmpose/datasets/aflw_annotations.tar). +Extract them under {MMPose}/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── aflw + |── annotations + | |── face_landmarks_aflw_train.json + | |── face_landmarks_aflw_test_frontal.json + | |── face_landmarks_aflw_test.json + `── images + |── flickr + |── 0 + | |── image00002.jpg + | |── image00013.jpg + | ... + |── 2 + | |── image00004.jpg + | |── image00006.jpg + | ... + `── 3 + |── image00032.jpg + |── image00035.jpg + ... +``` + +## COFW Dataset + + + +
    +COFW (ICCV'2013) + +```bibtex +@inproceedings{burgos2013robust, + title={Robust face landmark estimation under occlusion}, + author={Burgos-Artizzu, Xavier P and Perona, Pietro and Doll{\'a}r, Piotr}, + booktitle={Proceedings of the IEEE international conference on computer vision}, + pages={1513--1520}, + year={2013} +} +``` + +
    + +
    + +
    + +For COFW data, please download from [COFW Dataset (Color Images)](http://www.vision.caltech.edu/xpburgos/ICCV13/Data/COFW_color.zip). +Move `COFW_train_color.mat` and `COFW_test_color.mat` to `data/cofw/` and make them look like: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── cofw + |── COFW_train_color.mat + |── COFW_test_color.mat +``` + +Run the following script under `{MMPose}/data` + +`python tools/dataset_converters/parse_cofw_dataset.py` + +And you will get + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── cofw + |── COFW_train_color.mat + |── COFW_test_color.mat + |── annotations + | |── cofw_train.json + | |── cofw_test.json + |── images + |── 000001.jpg + |── 000002.jpg +``` + +## COCO-WholeBody (Face) + + + +
    +COCO-WholeBody-Face (ECCV'2020) + +```bibtex +@inproceedings{jin2020whole, + title={Whole-Body Human Pose Estimation in the Wild}, + author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2020} +} +``` + +
    + +
    + +
    + +For [COCO-WholeBody](https://github.com/jin-s13/COCO-WholeBody/) dataset, images can be downloaded from [COCO download](http://cocodataset.org/#download), 2017 Train/Val is needed for COCO keypoints training and validation. +Download COCO-WholeBody annotations for COCO-WholeBody annotations for [Train](https://drive.google.com/file/d/1thErEToRbmM9uLNi1JXXfOsaS5VK2FXf/view?usp=sharing) / [Validation](https://drive.google.com/file/d/1N6VgwKnj8DeyGXCvp1eYgNbRmw6jdfrb/view?usp=sharing) (Google Drive). +Download person detection result of COCO val2017 from [OneDrive](https://1drv.ms/f/s!AhIXJn_J-blWzzDXoz5BeFl8sWM-) or [GoogleDrive](https://drive.google.com/drive/folders/1fRUDNUDxe9fjqcRZ2bnF_TKMlO0nB_dk?usp=sharing). +Download and extract them under $MMPOSE/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── coco + │-- annotations + │ │-- coco_wholebody_train_v1.0.json + │ |-- coco_wholebody_val_v1.0.json + |-- person_detection_results + | |-- COCO_val2017_detections_AP_H_56_person.json + │-- train2017 + │ │-- 000000000009.jpg + │ │-- 000000000025.jpg + │ │-- 000000000030.jpg + │ │-- ... + `-- val2017 + │-- 000000000139.jpg + │-- 000000000285.jpg + │-- 000000000632.jpg + │-- ... + +``` + +Please also install the latest version of [Extended COCO API](https://github.com/jin-s13/xtcocoapi) to support COCO-WholeBody evaluation: + +`pip install xtcocotools` + +## LaPa + + + +
    +LaPa (AAAI'2020) + +```bibtex +@inproceedings{liu2020new, + title={A New Dataset and Boundary-Attention Semantic Segmentation for Face Parsing.}, + author={Liu, Yinglu and Shi, Hailin and Shen, Hao and Si, Yue and Wang, Xiaobo and Mei, Tao}, + booktitle={AAAI}, + pages={11637--11644}, + year={2020} +} +``` + +
    + +
    + +
    + +For [LaPa](https://github.com/JDAI-CV/lapa-dataset) dataset, images can be downloaded from [their github page](https://github.com/JDAI-CV/lapa-dataset). + +Download and extract them under $MMPOSE/data, and use our `tools/dataset_converters/lapa2coco.py` to make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── LaPa + │-- annotations + │ │-- lapa_train.json + │ |-- lapa_val.json + │ |-- lapa_test.json + | |-- lapa_trainval.json + │-- train + │ │-- images + │ │-- labels + │ │-- landmarks + │-- val + │ │-- images + │ │-- labels + │ │-- landmarks + `-- test + │ │-- images + │ │-- labels + │ │-- landmarks + +``` diff --git a/docs/zh_cn/dataset_zoo/2d_fashion_landmark.md b/docs/zh_cn/dataset_zoo/2d_fashion_landmark.md index 25b7fd7c64..bae782e515 100644 --- a/docs/zh_cn/dataset_zoo/2d_fashion_landmark.md +++ b/docs/zh_cn/dataset_zoo/2d_fashion_landmark.md @@ -1,3 +1,3 @@ -# 2D服装关键点数据集 - -内容建设中…… +# 2D服装关键点数据集 + +内容建设中…… diff --git a/docs/zh_cn/dataset_zoo/2d_hand_keypoint.md b/docs/zh_cn/dataset_zoo/2d_hand_keypoint.md index aade35850c..825fe87742 100644 --- a/docs/zh_cn/dataset_zoo/2d_hand_keypoint.md +++ b/docs/zh_cn/dataset_zoo/2d_hand_keypoint.md @@ -1,348 +1,348 @@ -# 2D Hand Keypoint Datasets - -It is recommended to symlink the dataset root to `$MMPOSE/data`. -If your folder structure is different, you may need to change the corresponding paths in config files. - -MMPose supported datasets: - -- [OneHand10K](#onehand10k) \[ [Homepage](https://www.yangangwang.com/papers/WANG-MCC-2018-10.html) \] -- [FreiHand](#freihand-dataset) \[ [Homepage](https://lmb.informatik.uni-freiburg.de/projects/freihand/) \] -- [CMU Panoptic HandDB](#cmu-panoptic-handdb) \[ [Homepage](http://domedb.perception.cs.cmu.edu/handdb.html) \] -- [InterHand2.6M](#interhand26m) \[ [Homepage](https://mks0601.github.io/InterHand2.6M/) \] -- [RHD](#rhd-dataset) \[ [Homepage](https://lmb.informatik.uni-freiburg.de/resources/datasets/RenderedHandposeDataset.en.html) \] -- [COCO-WholeBody-Hand](#coco-wholebody-hand) \[ [Homepage](https://github.com/jin-s13/COCO-WholeBody/) \] - -## OneHand10K - - - -
    -OneHand10K (TCSVT'2019) - -```bibtex -@article{wang2018mask, - title={Mask-pose cascaded cnn for 2d hand pose estimation from single color image}, - author={Wang, Yangang and Peng, Cong and Liu, Yebin}, - journal={IEEE Transactions on Circuits and Systems for Video Technology}, - volume={29}, - number={11}, - pages={3258--3268}, - year={2018}, - publisher={IEEE} -} -``` - -
    - -
    - -
    - -For [OneHand10K](https://www.yangangwang.com/papers/WANG-MCC-2018-10.html) data, please download from [OneHand10K Dataset](https://www.yangangwang.com/papers/WANG-MCC-2018-10.html). -Please download the annotation files from [onehand10k_annotations](https://download.openmmlab.com/mmpose/datasets/onehand10k_annotations.tar). -Extract them under {MMPose}/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── onehand10k - |── annotations - | |── onehand10k_train.json - | |── onehand10k_test.json - `── Train - | |── source - | |── 0.jpg - | |── 1.jpg - | ... - `── Test - |── source - |── 0.jpg - |── 1.jpg - -``` - -## FreiHAND Dataset - - - -
    -FreiHand (ICCV'2019) - -```bibtex -@inproceedings{zimmermann2019freihand, - title={Freihand: A dataset for markerless capture of hand pose and shape from single rgb images}, - author={Zimmermann, Christian and Ceylan, Duygu and Yang, Jimei and Russell, Bryan and Argus, Max and Brox, Thomas}, - booktitle={Proceedings of the IEEE International Conference on Computer Vision}, - pages={813--822}, - year={2019} -} -``` - -
    - -
    - -
    - -For [FreiHAND](https://lmb.informatik.uni-freiburg.de/projects/freihand/) data, please download from [FreiHand Dataset](https://lmb.informatik.uni-freiburg.de/resources/datasets/FreihandDataset.en.html). -Since the official dataset does not provide validation set, we randomly split the training data into 8:1:1 for train/val/test. -Please download the annotation files from [freihand_annotations](https://download.openmmlab.com/mmpose/datasets/frei_annotations.tar). -Extract them under {MMPose}/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── freihand - |── annotations - | |── freihand_train.json - | |── freihand_val.json - | |── freihand_test.json - `── training - |── rgb - | |── 00000000.jpg - | |── 00000001.jpg - | ... - |── mask - |── 00000000.jpg - |── 00000001.jpg - ... -``` - -## CMU Panoptic HandDB - - - -
    -CMU Panoptic HandDB (CVPR'2017) - -```bibtex -@inproceedings{simon2017hand, - title={Hand keypoint detection in single images using multiview bootstrapping}, - author={Simon, Tomas and Joo, Hanbyul and Matthews, Iain and Sheikh, Yaser}, - booktitle={Proceedings of the IEEE conference on Computer Vision and Pattern Recognition}, - pages={1145--1153}, - year={2017} -} -``` - -
    - -
    - -
    - -For [CMU Panoptic HandDB](http://domedb.perception.cs.cmu.edu/handdb.html), please download from [CMU Panoptic HandDB](http://domedb.perception.cs.cmu.edu/handdb.html). -Following [Simon et al](https://arxiv.org/abs/1704.07809), panoptic images (hand143_panopticdb) and MPII & NZSL training sets (manual_train) are used for training, while MPII & NZSL test set (manual_test) for testing. -Please download the annotation files from [panoptic_annotations](https://download.openmmlab.com/mmpose/datasets/panoptic_annotations.tar). -Extract them under {MMPose}/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── panoptic - |── annotations - | |── panoptic_train.json - | |── panoptic_test.json - | - `── hand143_panopticdb - | |── imgs - | | |── 00000000.jpg - | | |── 00000001.jpg - | | ... - | - `── hand_labels - |── manual_train - | |── 000015774_01_l.jpg - | |── 000015774_01_r.jpg - | ... - | - `── manual_test - |── 000648952_02_l.jpg - |── 000835470_01_l.jpg - ... -``` - -## InterHand2.6M - - - -
    -InterHand2.6M (ECCV'2020) - -```bibtex -@InProceedings{Moon_2020_ECCV_InterHand2.6M, -author = {Moon, Gyeongsik and Yu, Shoou-I and Wen, He and Shiratori, Takaaki and Lee, Kyoung Mu}, -title = {InterHand2.6M: A Dataset and Baseline for 3D Interacting Hand Pose Estimation from a Single RGB Image}, -booktitle = {European Conference on Computer Vision (ECCV)}, -year = {2020} -} -``` - -
    - -
    - -
    - -For [InterHand2.6M](https://mks0601.github.io/InterHand2.6M/), please download from [InterHand2.6M](https://mks0601.github.io/InterHand2.6M/). -Please download the annotation files from [annotations](https://drive.google.com/drive/folders/1pWXhdfaka-J0fSAze0MsajN0VpZ8e8tO). -Extract them under {MMPose}/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── interhand2.6m - |── annotations - | |── all - | |── human_annot - | |── machine_annot - | |── skeleton.txt - | |── subject.txt - | - `── images - | |── train - | | |-- Capture0 ~ Capture26 - | |── val - | | |-- Capture0 - | |── test - | | |-- Capture0 ~ Capture7 -``` - -## RHD Dataset - - - -
    -RHD (ICCV'2017) - -```bibtex -@TechReport{zb2017hand, - author={Christian Zimmermann and Thomas Brox}, - title={Learning to Estimate 3D Hand Pose from Single RGB Images}, - institution={arXiv:1705.01389}, - year={2017}, - note="https://arxiv.org/abs/1705.01389", - url="https://lmb.informatik.uni-freiburg.de/projects/hand3d/" -} -``` - -
    - -
    - -
    - -For [RHD Dataset](https://lmb.informatik.uni-freiburg.de/resources/datasets/RenderedHandposeDataset.en.html), please download from [RHD Dataset](https://lmb.informatik.uni-freiburg.de/resources/datasets/RenderedHandposeDataset.en.html). -Please download the annotation files from [rhd_annotations](https://download.openmmlab.com/mmpose/datasets/rhd_annotations.zip). -Extract them under {MMPose}/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── rhd - |── annotations - | |── rhd_train.json - | |── rhd_test.json - `── training - | |── color - | | |── 00000.jpg - | | |── 00001.jpg - | |── depth - | | |── 00000.jpg - | | |── 00001.jpg - | |── mask - | | |── 00000.jpg - | | |── 00001.jpg - `── evaluation - | |── color - | | |── 00000.jpg - | | |── 00001.jpg - | |── depth - | | |── 00000.jpg - | | |── 00001.jpg - | |── mask - | | |── 00000.jpg - | | |── 00001.jpg -``` - -## COCO-WholeBody (Hand) - - - -
    -COCO-WholeBody-Hand (ECCV'2020) - -```bibtex -@inproceedings{jin2020whole, - title={Whole-Body Human Pose Estimation in the Wild}, - author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, - booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, - year={2020} -} -``` - -
    - -
    - -
    - -For [COCO-WholeBody](https://github.com/jin-s13/COCO-WholeBody/) dataset, images can be downloaded from [COCO download](http://cocodataset.org/#download), 2017 Train/Val is needed for COCO keypoints training and validation. -Download COCO-WholeBody annotations for COCO-WholeBody annotations for [Train](https://drive.google.com/file/d/1thErEToRbmM9uLNi1JXXfOsaS5VK2FXf/view?usp=sharing) / [Validation](https://drive.google.com/file/d/1N6VgwKnj8DeyGXCvp1eYgNbRmw6jdfrb/view?usp=sharing) (Google Drive). -Download person detection result of COCO val2017 from [OneDrive](https://1drv.ms/f/s!AhIXJn_J-blWzzDXoz5BeFl8sWM-) or [GoogleDrive](https://drive.google.com/drive/folders/1fRUDNUDxe9fjqcRZ2bnF_TKMlO0nB_dk?usp=sharing). -Download and extract them under $MMPOSE/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── coco - │-- annotations - │ │-- coco_wholebody_train_v1.0.json - │ |-- coco_wholebody_val_v1.0.json - |-- person_detection_results - | |-- COCO_val2017_detections_AP_H_56_person.json - │-- train2017 - │ │-- 000000000009.jpg - │ │-- 000000000025.jpg - │ │-- 000000000030.jpg - │ │-- ... - `-- val2017 - │-- 000000000139.jpg - │-- 000000000285.jpg - │-- 000000000632.jpg - │-- ... -``` - -Please also install the latest version of [Extended COCO API](https://github.com/jin-s13/xtcocoapi) to support COCO-WholeBody evaluation: - -`pip install xtcocotools` +# 2D Hand Keypoint Datasets + +It is recommended to symlink the dataset root to `$MMPOSE/data`. +If your folder structure is different, you may need to change the corresponding paths in config files. + +MMPose supported datasets: + +- [OneHand10K](#onehand10k) \[ [Homepage](https://www.yangangwang.com/papers/WANG-MCC-2018-10.html) \] +- [FreiHand](#freihand-dataset) \[ [Homepage](https://lmb.informatik.uni-freiburg.de/projects/freihand/) \] +- [CMU Panoptic HandDB](#cmu-panoptic-handdb) \[ [Homepage](http://domedb.perception.cs.cmu.edu/handdb.html) \] +- [InterHand2.6M](#interhand26m) \[ [Homepage](https://mks0601.github.io/InterHand2.6M/) \] +- [RHD](#rhd-dataset) \[ [Homepage](https://lmb.informatik.uni-freiburg.de/resources/datasets/RenderedHandposeDataset.en.html) \] +- [COCO-WholeBody-Hand](#coco-wholebody-hand) \[ [Homepage](https://github.com/jin-s13/COCO-WholeBody/) \] + +## OneHand10K + + + +
    +OneHand10K (TCSVT'2019) + +```bibtex +@article{wang2018mask, + title={Mask-pose cascaded cnn for 2d hand pose estimation from single color image}, + author={Wang, Yangang and Peng, Cong and Liu, Yebin}, + journal={IEEE Transactions on Circuits and Systems for Video Technology}, + volume={29}, + number={11}, + pages={3258--3268}, + year={2018}, + publisher={IEEE} +} +``` + +
    + +
    + +
    + +For [OneHand10K](https://www.yangangwang.com/papers/WANG-MCC-2018-10.html) data, please download from [OneHand10K Dataset](https://www.yangangwang.com/papers/WANG-MCC-2018-10.html). +Please download the annotation files from [onehand10k_annotations](https://download.openmmlab.com/mmpose/datasets/onehand10k_annotations.tar). +Extract them under {MMPose}/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── onehand10k + |── annotations + | |── onehand10k_train.json + | |── onehand10k_test.json + `── Train + | |── source + | |── 0.jpg + | |── 1.jpg + | ... + `── Test + |── source + |── 0.jpg + |── 1.jpg + +``` + +## FreiHAND Dataset + + + +
    +FreiHand (ICCV'2019) + +```bibtex +@inproceedings{zimmermann2019freihand, + title={Freihand: A dataset for markerless capture of hand pose and shape from single rgb images}, + author={Zimmermann, Christian and Ceylan, Duygu and Yang, Jimei and Russell, Bryan and Argus, Max and Brox, Thomas}, + booktitle={Proceedings of the IEEE International Conference on Computer Vision}, + pages={813--822}, + year={2019} +} +``` + +
    + +
    + +
    + +For [FreiHAND](https://lmb.informatik.uni-freiburg.de/projects/freihand/) data, please download from [FreiHand Dataset](https://lmb.informatik.uni-freiburg.de/resources/datasets/FreihandDataset.en.html). +Since the official dataset does not provide validation set, we randomly split the training data into 8:1:1 for train/val/test. +Please download the annotation files from [freihand_annotations](https://download.openmmlab.com/mmpose/datasets/frei_annotations.tar). +Extract them under {MMPose}/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── freihand + |── annotations + | |── freihand_train.json + | |── freihand_val.json + | |── freihand_test.json + `── training + |── rgb + | |── 00000000.jpg + | |── 00000001.jpg + | ... + |── mask + |── 00000000.jpg + |── 00000001.jpg + ... +``` + +## CMU Panoptic HandDB + + + +
    +CMU Panoptic HandDB (CVPR'2017) + +```bibtex +@inproceedings{simon2017hand, + title={Hand keypoint detection in single images using multiview bootstrapping}, + author={Simon, Tomas and Joo, Hanbyul and Matthews, Iain and Sheikh, Yaser}, + booktitle={Proceedings of the IEEE conference on Computer Vision and Pattern Recognition}, + pages={1145--1153}, + year={2017} +} +``` + +
    + +
    + +
    + +For [CMU Panoptic HandDB](http://domedb.perception.cs.cmu.edu/handdb.html), please download from [CMU Panoptic HandDB](http://domedb.perception.cs.cmu.edu/handdb.html). +Following [Simon et al](https://arxiv.org/abs/1704.07809), panoptic images (hand143_panopticdb) and MPII & NZSL training sets (manual_train) are used for training, while MPII & NZSL test set (manual_test) for testing. +Please download the annotation files from [panoptic_annotations](https://download.openmmlab.com/mmpose/datasets/panoptic_annotations.tar). +Extract them under {MMPose}/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── panoptic + |── annotations + | |── panoptic_train.json + | |── panoptic_test.json + | + `── hand143_panopticdb + | |── imgs + | | |── 00000000.jpg + | | |── 00000001.jpg + | | ... + | + `── hand_labels + |── manual_train + | |── 000015774_01_l.jpg + | |── 000015774_01_r.jpg + | ... + | + `── manual_test + |── 000648952_02_l.jpg + |── 000835470_01_l.jpg + ... +``` + +## InterHand2.6M + + + +
    +InterHand2.6M (ECCV'2020) + +```bibtex +@InProceedings{Moon_2020_ECCV_InterHand2.6M, +author = {Moon, Gyeongsik and Yu, Shoou-I and Wen, He and Shiratori, Takaaki and Lee, Kyoung Mu}, +title = {InterHand2.6M: A Dataset and Baseline for 3D Interacting Hand Pose Estimation from a Single RGB Image}, +booktitle = {European Conference on Computer Vision (ECCV)}, +year = {2020} +} +``` + +
    + +
    + +
    + +For [InterHand2.6M](https://mks0601.github.io/InterHand2.6M/), please download from [InterHand2.6M](https://mks0601.github.io/InterHand2.6M/). +Please download the annotation files from [annotations](https://drive.google.com/drive/folders/1pWXhdfaka-J0fSAze0MsajN0VpZ8e8tO). +Extract them under {MMPose}/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── interhand2.6m + |── annotations + | |── all + | |── human_annot + | |── machine_annot + | |── skeleton.txt + | |── subject.txt + | + `── images + | |── train + | | |-- Capture0 ~ Capture26 + | |── val + | | |-- Capture0 + | |── test + | | |-- Capture0 ~ Capture7 +``` + +## RHD Dataset + + + +
    +RHD (ICCV'2017) + +```bibtex +@TechReport{zb2017hand, + author={Christian Zimmermann and Thomas Brox}, + title={Learning to Estimate 3D Hand Pose from Single RGB Images}, + institution={arXiv:1705.01389}, + year={2017}, + note="https://arxiv.org/abs/1705.01389", + url="https://lmb.informatik.uni-freiburg.de/projects/hand3d/" +} +``` + +
    + +
    + +
    + +For [RHD Dataset](https://lmb.informatik.uni-freiburg.de/resources/datasets/RenderedHandposeDataset.en.html), please download from [RHD Dataset](https://lmb.informatik.uni-freiburg.de/resources/datasets/RenderedHandposeDataset.en.html). +Please download the annotation files from [rhd_annotations](https://download.openmmlab.com/mmpose/datasets/rhd_annotations.zip). +Extract them under {MMPose}/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── rhd + |── annotations + | |── rhd_train.json + | |── rhd_test.json + `── training + | |── color + | | |── 00000.jpg + | | |── 00001.jpg + | |── depth + | | |── 00000.jpg + | | |── 00001.jpg + | |── mask + | | |── 00000.jpg + | | |── 00001.jpg + `── evaluation + | |── color + | | |── 00000.jpg + | | |── 00001.jpg + | |── depth + | | |── 00000.jpg + | | |── 00001.jpg + | |── mask + | | |── 00000.jpg + | | |── 00001.jpg +``` + +## COCO-WholeBody (Hand) + + + +
    +COCO-WholeBody-Hand (ECCV'2020) + +```bibtex +@inproceedings{jin2020whole, + title={Whole-Body Human Pose Estimation in the Wild}, + author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2020} +} +``` + +
    + +
    + +
    + +For [COCO-WholeBody](https://github.com/jin-s13/COCO-WholeBody/) dataset, images can be downloaded from [COCO download](http://cocodataset.org/#download), 2017 Train/Val is needed for COCO keypoints training and validation. +Download COCO-WholeBody annotations for COCO-WholeBody annotations for [Train](https://drive.google.com/file/d/1thErEToRbmM9uLNi1JXXfOsaS5VK2FXf/view?usp=sharing) / [Validation](https://drive.google.com/file/d/1N6VgwKnj8DeyGXCvp1eYgNbRmw6jdfrb/view?usp=sharing) (Google Drive). +Download person detection result of COCO val2017 from [OneDrive](https://1drv.ms/f/s!AhIXJn_J-blWzzDXoz5BeFl8sWM-) or [GoogleDrive](https://drive.google.com/drive/folders/1fRUDNUDxe9fjqcRZ2bnF_TKMlO0nB_dk?usp=sharing). +Download and extract them under $MMPOSE/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── coco + │-- annotations + │ │-- coco_wholebody_train_v1.0.json + │ |-- coco_wholebody_val_v1.0.json + |-- person_detection_results + | |-- COCO_val2017_detections_AP_H_56_person.json + │-- train2017 + │ │-- 000000000009.jpg + │ │-- 000000000025.jpg + │ │-- 000000000030.jpg + │ │-- ... + `-- val2017 + │-- 000000000139.jpg + │-- 000000000285.jpg + │-- 000000000632.jpg + │-- ... +``` + +Please also install the latest version of [Extended COCO API](https://github.com/jin-s13/xtcocoapi) to support COCO-WholeBody evaluation: + +`pip install xtcocotools` diff --git a/docs/zh_cn/dataset_zoo/2d_wholebody_keypoint.md b/docs/zh_cn/dataset_zoo/2d_wholebody_keypoint.md index a082c657c6..55a76139df 100644 --- a/docs/zh_cn/dataset_zoo/2d_wholebody_keypoint.md +++ b/docs/zh_cn/dataset_zoo/2d_wholebody_keypoint.md @@ -1,133 +1,133 @@ -# 2D Wholebody Keypoint Datasets - -It is recommended to symlink the dataset root to `$MMPOSE/data`. -If your folder structure is different, you may need to change the corresponding paths in config files. - -MMPose supported datasets: - -- [COCO-WholeBody](#coco-wholebody) \[ [Homepage](https://github.com/jin-s13/COCO-WholeBody/) \] -- [Halpe](#halpe) \[ [Homepage](https://github.com/Fang-Haoshu/Halpe-FullBody/) \] - -## COCO-WholeBody - - - -
    -COCO-WholeBody (ECCV'2020) - -```bibtex -@inproceedings{jin2020whole, - title={Whole-Body Human Pose Estimation in the Wild}, - author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, - booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, - year={2020} -} -``` - -
    - -
    - -
    - -For [COCO-WholeBody](https://github.com/jin-s13/COCO-WholeBody/) dataset, images can be downloaded from [COCO download](http://cocodataset.org/#download), 2017 Train/Val is needed for COCO keypoints training and validation. -Download COCO-WholeBody annotations for COCO-WholeBody annotations for [Train](https://drive.google.com/file/d/1thErEToRbmM9uLNi1JXXfOsaS5VK2FXf/view?usp=sharing) / [Validation](https://drive.google.com/file/d/1N6VgwKnj8DeyGXCvp1eYgNbRmw6jdfrb/view?usp=sharing) (Google Drive). -Download person detection result of COCO val2017 from [OneDrive](https://1drv.ms/f/s!AhIXJn_J-blWzzDXoz5BeFl8sWM-) or [GoogleDrive](https://drive.google.com/drive/folders/1fRUDNUDxe9fjqcRZ2bnF_TKMlO0nB_dk?usp=sharing). -Download and extract them under $MMPOSE/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── coco - │-- annotations - │ │-- coco_wholebody_train_v1.0.json - │ |-- coco_wholebody_val_v1.0.json - |-- person_detection_results - | |-- COCO_val2017_detections_AP_H_56_person.json - │-- train2017 - │ │-- 000000000009.jpg - │ │-- 000000000025.jpg - │ │-- 000000000030.jpg - │ │-- ... - `-- val2017 - │-- 000000000139.jpg - │-- 000000000285.jpg - │-- 000000000632.jpg - │-- ... - -``` - -Please also install the latest version of [Extended COCO API](https://github.com/jin-s13/xtcocoapi) (version>=1.5) to support COCO-WholeBody evaluation: - -`pip install xtcocotools` - -## Halpe - - - -
    -Halpe (CVPR'2020) - -```bibtex -@inproceedings{li2020pastanet, - title={PaStaNet: Toward Human Activity Knowledge Engine}, - author={Li, Yong-Lu and Xu, Liang and Liu, Xinpeng and Huang, Xijie and Xu, Yue and Wang, Shiyi and Fang, Hao-Shu and Ma, Ze and Chen, Mingyang and Lu, Cewu}, - booktitle={CVPR}, - year={2020} -} -``` - -
    - -
    - -
    - -For [Halpe](https://github.com/Fang-Haoshu/Halpe-FullBody/) dataset, please download images and annotations from [Halpe download](https://github.com/Fang-Haoshu/Halpe-FullBody). -The images of the training set are from [HICO-Det](https://drive.google.com/open?id=1QZcJmGVlF9f4h-XLWe9Gkmnmj2z1gSnk) and those of the validation set are from [COCO](http://images.cocodataset.org/zips/val2017.zip). -Download person detection result of COCO val2017 from [OneDrive](https://1drv.ms/f/s!AhIXJn_J-blWzzDXoz5BeFl8sWM-) or [GoogleDrive](https://drive.google.com/drive/folders/1fRUDNUDxe9fjqcRZ2bnF_TKMlO0nB_dk?usp=sharing). -Download and extract them under $MMPOSE/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── halpe - │-- annotations - │ │-- halpe_train_v1.json - │ |-- halpe_val_v1.json - |-- person_detection_results - | |-- COCO_val2017_detections_AP_H_56_person.json - │-- hico_20160224_det - │ │-- anno_bbox.mat - │ │-- anno.mat - │ │-- README - │ │-- images - │ │ │-- train2015 - │ │ │ │-- HICO_train2015_00000001.jpg - │ │ │ │-- HICO_train2015_00000002.jpg - │ │ │ │-- HICO_train2015_00000003.jpg - │ │ │ │-- ... - │ │ │-- test2015 - │ │-- tools - │ │-- ... - `-- val2017 - │-- 000000000139.jpg - │-- 000000000285.jpg - │-- 000000000632.jpg - │-- ... - -``` - -Please also install the latest version of [Extended COCO API](https://github.com/jin-s13/xtcocoapi) (version>=1.5) to support Halpe evaluation: - -`pip install xtcocotools` +# 2D Wholebody Keypoint Datasets + +It is recommended to symlink the dataset root to `$MMPOSE/data`. +If your folder structure is different, you may need to change the corresponding paths in config files. + +MMPose supported datasets: + +- [COCO-WholeBody](#coco-wholebody) \[ [Homepage](https://github.com/jin-s13/COCO-WholeBody/) \] +- [Halpe](#halpe) \[ [Homepage](https://github.com/Fang-Haoshu/Halpe-FullBody/) \] + +## COCO-WholeBody + + + +
    +COCO-WholeBody (ECCV'2020) + +```bibtex +@inproceedings{jin2020whole, + title={Whole-Body Human Pose Estimation in the Wild}, + author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2020} +} +``` + +
    + +
    + +
    + +For [COCO-WholeBody](https://github.com/jin-s13/COCO-WholeBody/) dataset, images can be downloaded from [COCO download](http://cocodataset.org/#download), 2017 Train/Val is needed for COCO keypoints training and validation. +Download COCO-WholeBody annotations for COCO-WholeBody annotations for [Train](https://drive.google.com/file/d/1thErEToRbmM9uLNi1JXXfOsaS5VK2FXf/view?usp=sharing) / [Validation](https://drive.google.com/file/d/1N6VgwKnj8DeyGXCvp1eYgNbRmw6jdfrb/view?usp=sharing) (Google Drive). +Download person detection result of COCO val2017 from [OneDrive](https://1drv.ms/f/s!AhIXJn_J-blWzzDXoz5BeFl8sWM-) or [GoogleDrive](https://drive.google.com/drive/folders/1fRUDNUDxe9fjqcRZ2bnF_TKMlO0nB_dk?usp=sharing). +Download and extract them under $MMPOSE/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── coco + │-- annotations + │ │-- coco_wholebody_train_v1.0.json + │ |-- coco_wholebody_val_v1.0.json + |-- person_detection_results + | |-- COCO_val2017_detections_AP_H_56_person.json + │-- train2017 + │ │-- 000000000009.jpg + │ │-- 000000000025.jpg + │ │-- 000000000030.jpg + │ │-- ... + `-- val2017 + │-- 000000000139.jpg + │-- 000000000285.jpg + │-- 000000000632.jpg + │-- ... + +``` + +Please also install the latest version of [Extended COCO API](https://github.com/jin-s13/xtcocoapi) (version>=1.5) to support COCO-WholeBody evaluation: + +`pip install xtcocotools` + +## Halpe + + + +
    +Halpe (CVPR'2020) + +```bibtex +@inproceedings{li2020pastanet, + title={PaStaNet: Toward Human Activity Knowledge Engine}, + author={Li, Yong-Lu and Xu, Liang and Liu, Xinpeng and Huang, Xijie and Xu, Yue and Wang, Shiyi and Fang, Hao-Shu and Ma, Ze and Chen, Mingyang and Lu, Cewu}, + booktitle={CVPR}, + year={2020} +} +``` + +
    + +
    + +
    + +For [Halpe](https://github.com/Fang-Haoshu/Halpe-FullBody/) dataset, please download images and annotations from [Halpe download](https://github.com/Fang-Haoshu/Halpe-FullBody). +The images of the training set are from [HICO-Det](https://drive.google.com/open?id=1QZcJmGVlF9f4h-XLWe9Gkmnmj2z1gSnk) and those of the validation set are from [COCO](http://images.cocodataset.org/zips/val2017.zip). +Download person detection result of COCO val2017 from [OneDrive](https://1drv.ms/f/s!AhIXJn_J-blWzzDXoz5BeFl8sWM-) or [GoogleDrive](https://drive.google.com/drive/folders/1fRUDNUDxe9fjqcRZ2bnF_TKMlO0nB_dk?usp=sharing). +Download and extract them under $MMPOSE/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── halpe + │-- annotations + │ │-- halpe_train_v1.json + │ |-- halpe_val_v1.json + |-- person_detection_results + | |-- COCO_val2017_detections_AP_H_56_person.json + │-- hico_20160224_det + │ │-- anno_bbox.mat + │ │-- anno.mat + │ │-- README + │ │-- images + │ │ │-- train2015 + │ │ │ │-- HICO_train2015_00000001.jpg + │ │ │ │-- HICO_train2015_00000002.jpg + │ │ │ │-- HICO_train2015_00000003.jpg + │ │ │ │-- ... + │ │ │-- test2015 + │ │-- tools + │ │-- ... + `-- val2017 + │-- 000000000139.jpg + │-- 000000000285.jpg + │-- 000000000632.jpg + │-- ... + +``` + +Please also install the latest version of [Extended COCO API](https://github.com/jin-s13/xtcocoapi) (version>=1.5) to support Halpe evaluation: + +`pip install xtcocotools` diff --git a/docs/zh_cn/dataset_zoo/3d_body_keypoint.md b/docs/zh_cn/dataset_zoo/3d_body_keypoint.md index 82e21010fc..25b1d8415c 100644 --- a/docs/zh_cn/dataset_zoo/3d_body_keypoint.md +++ b/docs/zh_cn/dataset_zoo/3d_body_keypoint.md @@ -1,199 +1,199 @@ -# 3D Body Keypoint Datasets - -It is recommended to symlink the dataset root to `$MMPOSE/data`. -If your folder structure is different, you may need to change the corresponding paths in config files. - -MMPose supported datasets: - -- [Human3.6M](#human36m) \[ [Homepage](http://vision.imar.ro/human3.6m/description.php) \] -- [CMU Panoptic](#cmu-panoptic) \[ [Homepage](http://domedb.perception.cs.cmu.edu/) \] -- [Campus/Shelf](#campus-and-shelf) \[ [Homepage](http://campar.in.tum.de/Chair/MultiHumanPose) \] - -## Human3.6M - - - -
    -Human3.6M (TPAMI'2014) - -```bibtex -@article{h36m_pami, - author = {Ionescu, Catalin and Papava, Dragos and Olaru, Vlad and Sminchisescu, Cristian}, - title = {Human3.6M: Large Scale Datasets and Predictive Methods for 3D Human Sensing in Natural Environments}, - journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, - publisher = {IEEE Computer Society}, - volume = {36}, - number = {7}, - pages = {1325-1339}, - month = {jul}, - year = {2014} -} -``` - -
    - -
    - -
    - -For [Human3.6M](http://vision.imar.ro/human3.6m/description.php), please download from the official website and run the [preprocessing script](/tools/dataset_converters/preprocess_h36m.py), which will extract camera parameters and pose annotations at full framerate (50 FPS) and downsampled framerate (10 FPS). The processed data should have the following structure: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - ├── h36m - ├── annotation_body3d - | ├── cameras.pkl - | ├── fps50 - | | ├── h36m_test.npz - | | ├── h36m_train.npz - | | ├── joint2d_rel_stats.pkl - | | ├── joint2d_stats.pkl - | | ├── joint3d_rel_stats.pkl - | | `── joint3d_stats.pkl - | `── fps10 - | ├── h36m_test.npz - | ├── h36m_train.npz - | ├── joint2d_rel_stats.pkl - | ├── joint2d_stats.pkl - | ├── joint3d_rel_stats.pkl - | `── joint3d_stats.pkl - `── images - ├── S1 - | ├── S1_Directions_1.54138969 - | | ├── S1_Directions_1.54138969_00001.jpg - | | ├── S1_Directions_1.54138969_00002.jpg - | | ├── ... - | ├── ... - ├── S5 - ├── S6 - ├── S7 - ├── S8 - ├── S9 - `── S11 -``` - -## CMU Panoptic - -
    -CMU Panoptic (ICCV'2015) - -```bibtex -@Article = {joo_iccv_2015, -author = {Hanbyul Joo, Hao Liu, Lei Tan, Lin Gui, Bart Nabbe, Iain Matthews, Takeo Kanade, Shohei Nobuhara, and Yaser Sheikh}, -title = {Panoptic Studio: A Massively Multiview System for Social Motion Capture}, -booktitle = {ICCV}, -year = {2015} -} -``` - -
    - -
    - -
    - -Please follow [voxelpose-pytorch](https://github.com/microsoft/voxelpose-pytorch) to prepare this dataset. - -1. Download the dataset by following the instructions in [panoptic-toolbox](https://github.com/CMU-Perceptual-Computing-Lab/panoptic-toolbox) and extract them under `$MMPOSE/data/panoptic`. - -2. Only download those sequences that are needed. You can also just download a subset of camera views by specifying the number of views (HD_Video_Number) and changing the camera order in `./scripts/getData.sh`. The used sequences and camera views can be found in [VoxelPose](https://arxiv.org/abs/2004.06239). Note that the sequence "160906_band3" might not be available due to errors on the server of CMU Panoptic. - -3. Note that we only use HD videos, calibration data, and 3D Body Keypoint in the codes. You can comment out other irrelevant codes such as downloading 3D Face data in `./scripts/getData.sh`. - -The directory tree should be like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - ├── panoptic - ├── 16060224_haggling1 - | | ├── hdImgs - | | ├── hdvideos - | | ├── hdPose3d_stage1_coco19 - | | ├── calibration_160224_haggling1.json - ├── 160226_haggling1 - ├── ... -``` - -## Campus and Shelf - -
    -Campus and Shelf (CVPR'2014) - -```bibtex -@inproceedings {belagian14multi, - title = {{3D} Pictorial Structures for Multiple Human Pose Estimation}, - author = {Belagiannis, Vasileios and Amin, Sikandar and Andriluka, Mykhaylo and Schiele, Bernt and Navab - Nassir and Ilic, Slobo - booktitle = {IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR)}, - year = {2014}, - month = {June}, - organization={IEEE} -} -``` - -
    - -
    - -
    - -Please follow [voxelpose-pytorch](https://github.com/microsoft/voxelpose-pytorch) to prepare these two datasets. - -1. Please download the datasets from the [official website](http://campar.in.tum.de/Chair/MultiHumanPose) and extract them under `$MMPOSE/data/campus` and `$MMPOSE/data/shelf`, respectively. The original data include images as well as the ground truth pose file `actorsGT.mat`. - -2. We directly use the processed camera parameters from [voxelpose-pytorch](https://github.com/microsoft/voxelpose-pytorch). You can download them from this repository and place in under `$MMPOSE/data/campus/calibration_campus.json` and `$MMPOSE/data/shelf/calibration_shelf.json`, respectively. - -3. Like [Voxelpose](https://github.com/microsoft/voxelpose-pytorch), due to the limited and incomplete annotations of the two datasets, we don't train the model using this dataset. Instead, we directly use the 2D pose estimator trained on COCO, and use independent 3D human poses from the CMU Panoptic dataset to train our 3D model. It lies in `${MMPOSE}/data/panoptic_training_pose.pkl`. - -4. Like [Voxelpose](https://github.com/microsoft/voxelpose-pytorch), for testing, we first estimate 2D poses and generate 2D heatmaps for these two datasets. You can download the predicted poses from [voxelpose-pytorch](https://github.com/microsoft/voxelpose-pytorch) and place them in `$MMPOSE/data/campus/pred_campus_maskrcnn_hrnet_coco.pkl` and `$MMPOSE/data/shelf/pred_shelf_maskrcnn_hrnet_coco.pkl`, respectively. You can also use the models trained on COCO dataset (like HigherHRNet) to generate 2D heatmaps directly. - -The directory tree should be like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - ├── panoptic_training_pose.pkl - ├── campus - | ├── Camera0 - | | | ├── campus4-c0-00000.png - | | | ├── ... - | | | ├── campus4-c0-01999.png - | ... - | ├── Camera2 - | | | ├── campus4-c2-00000.png - | | | ├── ... - | | | ├── campus4-c2-01999.png - | ├── calibration_campus.json - | ├── pred_campus_maskrcnn_hrnet_coco.pkl - | ├── actorsGT.mat - ├── shelf - | ├── Camera0 - | | | ├── img_000000.png - | | | ├── ... - | | | ├── img_003199.png - | ... - | ├── Camera4 - | | | ├── img_000000.png - | | | ├── ... - | | | ├── img_003199.png - | ├── calibration_shelf.json - | ├── pred_shelf_maskrcnn_hrnet_coco.pkl - | ├── actorsGT.mat -``` +# 3D Body Keypoint Datasets + +It is recommended to symlink the dataset root to `$MMPOSE/data`. +If your folder structure is different, you may need to change the corresponding paths in config files. + +MMPose supported datasets: + +- [Human3.6M](#human36m) \[ [Homepage](http://vision.imar.ro/human3.6m/description.php) \] +- [CMU Panoptic](#cmu-panoptic) \[ [Homepage](http://domedb.perception.cs.cmu.edu/) \] +- [Campus/Shelf](#campus-and-shelf) \[ [Homepage](http://campar.in.tum.de/Chair/MultiHumanPose) \] + +## Human3.6M + + + +
    +Human3.6M (TPAMI'2014) + +```bibtex +@article{h36m_pami, + author = {Ionescu, Catalin and Papava, Dragos and Olaru, Vlad and Sminchisescu, Cristian}, + title = {Human3.6M: Large Scale Datasets and Predictive Methods for 3D Human Sensing in Natural Environments}, + journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, + publisher = {IEEE Computer Society}, + volume = {36}, + number = {7}, + pages = {1325-1339}, + month = {jul}, + year = {2014} +} +``` + +
    + +
    + +
    + +For [Human3.6M](http://vision.imar.ro/human3.6m/description.php), please download from the official website and run the [preprocessing script](/tools/dataset_converters/preprocess_h36m.py), which will extract camera parameters and pose annotations at full framerate (50 FPS) and downsampled framerate (10 FPS). The processed data should have the following structure: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + ├── h36m + ├── annotation_body3d + | ├── cameras.pkl + | ├── fps50 + | | ├── h36m_test.npz + | | ├── h36m_train.npz + | | ├── joint2d_rel_stats.pkl + | | ├── joint2d_stats.pkl + | | ├── joint3d_rel_stats.pkl + | | `── joint3d_stats.pkl + | `── fps10 + | ├── h36m_test.npz + | ├── h36m_train.npz + | ├── joint2d_rel_stats.pkl + | ├── joint2d_stats.pkl + | ├── joint3d_rel_stats.pkl + | `── joint3d_stats.pkl + `── images + ├── S1 + | ├── S1_Directions_1.54138969 + | | ├── S1_Directions_1.54138969_00001.jpg + | | ├── S1_Directions_1.54138969_00002.jpg + | | ├── ... + | ├── ... + ├── S5 + ├── S6 + ├── S7 + ├── S8 + ├── S9 + `── S11 +``` + +## CMU Panoptic + +
    +CMU Panoptic (ICCV'2015) + +```bibtex +@Article = {joo_iccv_2015, +author = {Hanbyul Joo, Hao Liu, Lei Tan, Lin Gui, Bart Nabbe, Iain Matthews, Takeo Kanade, Shohei Nobuhara, and Yaser Sheikh}, +title = {Panoptic Studio: A Massively Multiview System for Social Motion Capture}, +booktitle = {ICCV}, +year = {2015} +} +``` + +
    + +
    + +
    + +Please follow [voxelpose-pytorch](https://github.com/microsoft/voxelpose-pytorch) to prepare this dataset. + +1. Download the dataset by following the instructions in [panoptic-toolbox](https://github.com/CMU-Perceptual-Computing-Lab/panoptic-toolbox) and extract them under `$MMPOSE/data/panoptic`. + +2. Only download those sequences that are needed. You can also just download a subset of camera views by specifying the number of views (HD_Video_Number) and changing the camera order in `./scripts/getData.sh`. The used sequences and camera views can be found in [VoxelPose](https://arxiv.org/abs/2004.06239). Note that the sequence "160906_band3" might not be available due to errors on the server of CMU Panoptic. + +3. Note that we only use HD videos, calibration data, and 3D Body Keypoint in the codes. You can comment out other irrelevant codes such as downloading 3D Face data in `./scripts/getData.sh`. + +The directory tree should be like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + ├── panoptic + ├── 16060224_haggling1 + | | ├── hdImgs + | | ├── hdvideos + | | ├── hdPose3d_stage1_coco19 + | | ├── calibration_160224_haggling1.json + ├── 160226_haggling1 + ├── ... +``` + +## Campus and Shelf + +
    +Campus and Shelf (CVPR'2014) + +```bibtex +@inproceedings {belagian14multi, + title = {{3D} Pictorial Structures for Multiple Human Pose Estimation}, + author = {Belagiannis, Vasileios and Amin, Sikandar and Andriluka, Mykhaylo and Schiele, Bernt and Navab + Nassir and Ilic, Slobo + booktitle = {IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR)}, + year = {2014}, + month = {June}, + organization={IEEE} +} +``` + +
    + +
    + +
    + +Please follow [voxelpose-pytorch](https://github.com/microsoft/voxelpose-pytorch) to prepare these two datasets. + +1. Please download the datasets from the [official website](http://campar.in.tum.de/Chair/MultiHumanPose) and extract them under `$MMPOSE/data/campus` and `$MMPOSE/data/shelf`, respectively. The original data include images as well as the ground truth pose file `actorsGT.mat`. + +2. We directly use the processed camera parameters from [voxelpose-pytorch](https://github.com/microsoft/voxelpose-pytorch). You can download them from this repository and place in under `$MMPOSE/data/campus/calibration_campus.json` and `$MMPOSE/data/shelf/calibration_shelf.json`, respectively. + +3. Like [Voxelpose](https://github.com/microsoft/voxelpose-pytorch), due to the limited and incomplete annotations of the two datasets, we don't train the model using this dataset. Instead, we directly use the 2D pose estimator trained on COCO, and use independent 3D human poses from the CMU Panoptic dataset to train our 3D model. It lies in `${MMPOSE}/data/panoptic_training_pose.pkl`. + +4. Like [Voxelpose](https://github.com/microsoft/voxelpose-pytorch), for testing, we first estimate 2D poses and generate 2D heatmaps for these two datasets. You can download the predicted poses from [voxelpose-pytorch](https://github.com/microsoft/voxelpose-pytorch) and place them in `$MMPOSE/data/campus/pred_campus_maskrcnn_hrnet_coco.pkl` and `$MMPOSE/data/shelf/pred_shelf_maskrcnn_hrnet_coco.pkl`, respectively. You can also use the models trained on COCO dataset (like HigherHRNet) to generate 2D heatmaps directly. + +The directory tree should be like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + ├── panoptic_training_pose.pkl + ├── campus + | ├── Camera0 + | | | ├── campus4-c0-00000.png + | | | ├── ... + | | | ├── campus4-c0-01999.png + | ... + | ├── Camera2 + | | | ├── campus4-c2-00000.png + | | | ├── ... + | | | ├── campus4-c2-01999.png + | ├── calibration_campus.json + | ├── pred_campus_maskrcnn_hrnet_coco.pkl + | ├── actorsGT.mat + ├── shelf + | ├── Camera0 + | | | ├── img_000000.png + | | | ├── ... + | | | ├── img_003199.png + | ... + | ├── Camera4 + | | | ├── img_000000.png + | | | ├── ... + | | | ├── img_003199.png + | ├── calibration_shelf.json + | ├── pred_shelf_maskrcnn_hrnet_coco.pkl + | ├── actorsGT.mat +``` diff --git a/docs/zh_cn/dataset_zoo/3d_body_mesh.md b/docs/zh_cn/dataset_zoo/3d_body_mesh.md index aced63c802..25a08fd676 100644 --- a/docs/zh_cn/dataset_zoo/3d_body_mesh.md +++ b/docs/zh_cn/dataset_zoo/3d_body_mesh.md @@ -1,342 +1,342 @@ -# 3D Body Mesh Recovery Datasets - -It is recommended to symlink the dataset root to `$MMPOSE/data`. -If your folder structure is different, you may need to change the corresponding paths in config files. - -To achieve high-quality human mesh estimation, we use multiple datasets for training. -The following items should be prepared for human mesh training: - - - -- [3D Body Mesh Recovery Datasets](#3d-body-mesh-recovery-datasets) - - [Notes](#notes) - - [Annotation Files for Human Mesh Estimation](#annotation-files-for-human-mesh-estimation) - - [SMPL Model](#smpl-model) - - [COCO](#coco) - - [Human3.6M](#human36m) - - [MPI-INF-3DHP](#mpi-inf-3dhp) - - [LSP](#lsp) - - [LSPET](#lspet) - - [CMU MoShed Data](#cmu-moshed-data) - - - -## Notes - -### Annotation Files for Human Mesh Estimation - -For human mesh estimation, we use multiple datasets for training. -The annotation of different datasets are preprocessed to the same format. Please -follow the [preprocess procedure](https://github.com/nkolot/SPIN/tree/master/datasets/preprocess) -of SPIN to generate the annotation files or download the processed files from -[here](https://download.openmmlab.com/mmpose/datasets/mesh_annotation_files.zip), -and make it look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── mesh_annotation_files - ├── coco_2014_train.npz - ├── h36m_valid_protocol1.npz - ├── h36m_valid_protocol2.npz - ├── hr-lspet_train.npz - ├── lsp_dataset_original_train.npz - ├── mpi_inf_3dhp_train.npz - └── mpii_train.npz -``` - -### SMPL Model - -```bibtex -@article{loper2015smpl, - title={SMPL: A skinned multi-person linear model}, - author={Loper, Matthew and Mahmood, Naureen and Romero, Javier and Pons-Moll, Gerard and Black, Michael J}, - journal={ACM transactions on graphics (TOG)}, - volume={34}, - number={6}, - pages={1--16}, - year={2015}, - publisher={ACM New York, NY, USA} -} -``` - -For human mesh estimation, SMPL model is used to generate the human mesh. -Please download the [gender neutral SMPL model](http://smplify.is.tue.mpg.de/), -[joints regressor](https://download.openmmlab.com/mmpose/datasets/joints_regressor_cmr.npy) -and [mean parameters](https://download.openmmlab.com/mmpose/datasets/smpl_mean_params.npz) -under `$MMPOSE/models/smpl`, and make it look like this: - -```text -mmpose -├── mmpose -├── ... -├── models - │── smpl - ├── joints_regressor_cmr.npy - ├── smpl_mean_params.npz - └── SMPL_NEUTRAL.pkl -``` - -## COCO - - - -
    -COCO (ECCV'2014) - -```bibtex -@inproceedings{lin2014microsoft, - title={Microsoft coco: Common objects in context}, - author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, - booktitle={European conference on computer vision}, - pages={740--755}, - year={2014}, - organization={Springer} -} -``` - -
    - -For [COCO](http://cocodataset.org/) data, please download from [COCO download](http://cocodataset.org/#download). COCO'2014 Train is needed for human mesh estimation training. -Download and extract them under $MMPOSE/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── coco - │-- train2014 - │ ├── COCO_train2014_000000000009.jpg - │ ├── COCO_train2014_000000000025.jpg - │ ├── COCO_train2014_000000000030.jpg - | │-- ... - -``` - -## Human3.6M - - - -
    -Human3.6M (TPAMI'2014) - -```bibtex -@article{h36m_pami, - author = {Ionescu, Catalin and Papava, Dragos and Olaru, Vlad and Sminchisescu, Cristian}, - title = {Human3.6M: Large Scale Datasets and Predictive Methods for 3D Human Sensing in Natural Environments}, - journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, - publisher = {IEEE Computer Society}, - volume = {36}, - number = {7}, - pages = {1325-1339}, - month = {jul}, - year = {2014} -} -``` - -
    - -For [Human3.6M](http://vision.imar.ro/human3.6m/description.php), we use the MoShed data provided in [HMR](https://github.com/akanazawa/hmr) for training. -However, due to license limitations, we are not allowed to redistribute the MoShed data. - -For the evaluation on Human3.6M dataset, please follow the -[preprocess procedure](https://github.com/nkolot/SPIN/tree/master/datasets/preprocess) -of SPIN to extract test images from -[Human3.6M](http://vision.imar.ro/human3.6m/description.php) original videos, -and make it look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── Human3.6M - ├── images -    ├── S11_Directions_1.54138969_000001.jpg -    ├── S11_Directions_1.54138969_000006.jpg -    ├── S11_Directions_1.54138969_000011.jpg -    ├── ... -``` - -The download of Human3.6M dataset is quite difficult, you can also download the -[zip file](https://drive.google.com/file/d/1WnRJD9FS3NUf7MllwgLRJJC-JgYFr8oi/view?usp=sharing) -of the test images. However, due to the license limitations, we are not allowed to -redistribute the images either. So the users need to download the original video and -extract the images by themselves. - -## MPI-INF-3DHP - - - -```bibtex -@inproceedings{mono-3dhp2017, - author = {Mehta, Dushyant and Rhodin, Helge and Casas, Dan and Fua, Pascal and Sotnychenko, Oleksandr and Xu, Weipeng and Theobalt, Christian}, - title = {Monocular 3D Human Pose Estimation In The Wild Using Improved CNN Supervision}, - booktitle = {3D Vision (3DV), 2017 Fifth International Conference on}, - url = {http://gvv.mpi-inf.mpg.de/3dhp_dataset}, - year = {2017}, - organization={IEEE}, - doi={10.1109/3dv.2017.00064}, -} -``` - -For [MPI-INF-3DHP](http://gvv.mpi-inf.mpg.de/3dhp-dataset/), please follow the -[preprocess procedure](https://github.com/nkolot/SPIN/tree/master/datasets/preprocess) -of SPIN to sample images, and make them like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - ├── mpi_inf_3dhp_test_set - │   ├── TS1 - │   ├── TS2 - │   ├── TS3 - │   ├── TS4 - │   ├── TS5 - │   └── TS6 - ├── S1 - │   ├── Seq1 - │   └── Seq2 - ├── S2 - │   ├── Seq1 - │   └── Seq2 - ├── S3 - │   ├── Seq1 - │   └── Seq2 - ├── S4 - │   ├── Seq1 - │   └── Seq2 - ├── S5 - │   ├── Seq1 - │   └── Seq2 - ├── S6 - │   ├── Seq1 - │   └── Seq2 - ├── S7 - │   ├── Seq1 - │   └── Seq2 - └── S8 - ├── Seq1 - └── Seq2 -``` - -## LSP - - - -```bibtex -@inproceedings{johnson2010clustered, - title={Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation.}, - author={Johnson, Sam and Everingham, Mark}, - booktitle={bmvc}, - volume={2}, - number={4}, - pages={5}, - year={2010}, - organization={Citeseer} -} -``` - -For [LSP](https://sam.johnson.io/research/lsp.html), please download the high resolution version -[LSP dataset original](http://sam.johnson.io/research/lsp_dataset_original.zip). -Extract them under `$MMPOSE/data`, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── lsp_dataset_original - ├── images -    ├── im0001.jpg -    ├── im0002.jpg -    └── ... -``` - -## LSPET - - - -```bibtex -@inproceedings{johnson2011learning, - title={Learning effective human pose estimation from inaccurate annotation}, - author={Johnson, Sam and Everingham, Mark}, - booktitle={CVPR 2011}, - pages={1465--1472}, - year={2011}, - organization={IEEE} -} -``` - -For [LSPET](https://sam.johnson.io/research/lspet.html), please download its high resolution form -[HR-LSPET](http://datasets.d2.mpi-inf.mpg.de/hr-lspet/hr-lspet.zip). -Extract them under `$MMPOSE/data`, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── lspet_dataset - ├── images - │   ├── im00001.jpg - │   ├── im00002.jpg - │   ├── im00003.jpg - │   └── ... - └── joints.mat -``` - -## CMU MoShed Data - - - -```bibtex -@inproceedings{kanazawa2018end, - title={End-to-end recovery of human shape and pose}, - author={Kanazawa, Angjoo and Black, Michael J and Jacobs, David W and Malik, Jitendra}, - booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, - pages={7122--7131}, - year={2018} -} -``` - -Real-world SMPL parameters are used for the adversarial training in human mesh estimation. -The MoShed data provided in [HMR](https://github.com/akanazawa/hmr) is included in this -[zip file](https://download.openmmlab.com/mmpose/datasets/mesh_annotation_files.zip). -Please download and extract it under `$MMPOSE/data`, and make it look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── mesh_annotation_files - ├── CMU_mosh.npz - └── ... -``` +# 3D Body Mesh Recovery Datasets + +It is recommended to symlink the dataset root to `$MMPOSE/data`. +If your folder structure is different, you may need to change the corresponding paths in config files. + +To achieve high-quality human mesh estimation, we use multiple datasets for training. +The following items should be prepared for human mesh training: + + + +- [3D Body Mesh Recovery Datasets](#3d-body-mesh-recovery-datasets) + - [Notes](#notes) + - [Annotation Files for Human Mesh Estimation](#annotation-files-for-human-mesh-estimation) + - [SMPL Model](#smpl-model) + - [COCO](#coco) + - [Human3.6M](#human36m) + - [MPI-INF-3DHP](#mpi-inf-3dhp) + - [LSP](#lsp) + - [LSPET](#lspet) + - [CMU MoShed Data](#cmu-moshed-data) + + + +## Notes + +### Annotation Files for Human Mesh Estimation + +For human mesh estimation, we use multiple datasets for training. +The annotation of different datasets are preprocessed to the same format. Please +follow the [preprocess procedure](https://github.com/nkolot/SPIN/tree/master/datasets/preprocess) +of SPIN to generate the annotation files or download the processed files from +[here](https://download.openmmlab.com/mmpose/datasets/mesh_annotation_files.zip), +and make it look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── mesh_annotation_files + ├── coco_2014_train.npz + ├── h36m_valid_protocol1.npz + ├── h36m_valid_protocol2.npz + ├── hr-lspet_train.npz + ├── lsp_dataset_original_train.npz + ├── mpi_inf_3dhp_train.npz + └── mpii_train.npz +``` + +### SMPL Model + +```bibtex +@article{loper2015smpl, + title={SMPL: A skinned multi-person linear model}, + author={Loper, Matthew and Mahmood, Naureen and Romero, Javier and Pons-Moll, Gerard and Black, Michael J}, + journal={ACM transactions on graphics (TOG)}, + volume={34}, + number={6}, + pages={1--16}, + year={2015}, + publisher={ACM New York, NY, USA} +} +``` + +For human mesh estimation, SMPL model is used to generate the human mesh. +Please download the [gender neutral SMPL model](http://smplify.is.tue.mpg.de/), +[joints regressor](https://download.openmmlab.com/mmpose/datasets/joints_regressor_cmr.npy) +and [mean parameters](https://download.openmmlab.com/mmpose/datasets/smpl_mean_params.npz) +under `$MMPOSE/models/smpl`, and make it look like this: + +```text +mmpose +├── mmpose +├── ... +├── models + │── smpl + ├── joints_regressor_cmr.npy + ├── smpl_mean_params.npz + └── SMPL_NEUTRAL.pkl +``` + +## COCO + + + +
    +COCO (ECCV'2014) + +```bibtex +@inproceedings{lin2014microsoft, + title={Microsoft coco: Common objects in context}, + author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, + booktitle={European conference on computer vision}, + pages={740--755}, + year={2014}, + organization={Springer} +} +``` + +
    + +For [COCO](http://cocodataset.org/) data, please download from [COCO download](http://cocodataset.org/#download). COCO'2014 Train is needed for human mesh estimation training. +Download and extract them under $MMPOSE/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── coco + │-- train2014 + │ ├── COCO_train2014_000000000009.jpg + │ ├── COCO_train2014_000000000025.jpg + │ ├── COCO_train2014_000000000030.jpg + | │-- ... + +``` + +## Human3.6M + + + +
    +Human3.6M (TPAMI'2014) + +```bibtex +@article{h36m_pami, + author = {Ionescu, Catalin and Papava, Dragos and Olaru, Vlad and Sminchisescu, Cristian}, + title = {Human3.6M: Large Scale Datasets and Predictive Methods for 3D Human Sensing in Natural Environments}, + journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, + publisher = {IEEE Computer Society}, + volume = {36}, + number = {7}, + pages = {1325-1339}, + month = {jul}, + year = {2014} +} +``` + +
    + +For [Human3.6M](http://vision.imar.ro/human3.6m/description.php), we use the MoShed data provided in [HMR](https://github.com/akanazawa/hmr) for training. +However, due to license limitations, we are not allowed to redistribute the MoShed data. + +For the evaluation on Human3.6M dataset, please follow the +[preprocess procedure](https://github.com/nkolot/SPIN/tree/master/datasets/preprocess) +of SPIN to extract test images from +[Human3.6M](http://vision.imar.ro/human3.6m/description.php) original videos, +and make it look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── Human3.6M + ├── images +    ├── S11_Directions_1.54138969_000001.jpg +    ├── S11_Directions_1.54138969_000006.jpg +    ├── S11_Directions_1.54138969_000011.jpg +    ├── ... +``` + +The download of Human3.6M dataset is quite difficult, you can also download the +[zip file](https://drive.google.com/file/d/1WnRJD9FS3NUf7MllwgLRJJC-JgYFr8oi/view?usp=sharing) +of the test images. However, due to the license limitations, we are not allowed to +redistribute the images either. So the users need to download the original video and +extract the images by themselves. + +## MPI-INF-3DHP + + + +```bibtex +@inproceedings{mono-3dhp2017, + author = {Mehta, Dushyant and Rhodin, Helge and Casas, Dan and Fua, Pascal and Sotnychenko, Oleksandr and Xu, Weipeng and Theobalt, Christian}, + title = {Monocular 3D Human Pose Estimation In The Wild Using Improved CNN Supervision}, + booktitle = {3D Vision (3DV), 2017 Fifth International Conference on}, + url = {http://gvv.mpi-inf.mpg.de/3dhp_dataset}, + year = {2017}, + organization={IEEE}, + doi={10.1109/3dv.2017.00064}, +} +``` + +For [MPI-INF-3DHP](http://gvv.mpi-inf.mpg.de/3dhp-dataset/), please follow the +[preprocess procedure](https://github.com/nkolot/SPIN/tree/master/datasets/preprocess) +of SPIN to sample images, and make them like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + ├── mpi_inf_3dhp_test_set + │   ├── TS1 + │   ├── TS2 + │   ├── TS3 + │   ├── TS4 + │   ├── TS5 + │   └── TS6 + ├── S1 + │   ├── Seq1 + │   └── Seq2 + ├── S2 + │   ├── Seq1 + │   └── Seq2 + ├── S3 + │   ├── Seq1 + │   └── Seq2 + ├── S4 + │   ├── Seq1 + │   └── Seq2 + ├── S5 + │   ├── Seq1 + │   └── Seq2 + ├── S6 + │   ├── Seq1 + │   └── Seq2 + ├── S7 + │   ├── Seq1 + │   └── Seq2 + └── S8 + ├── Seq1 + └── Seq2 +``` + +## LSP + + + +```bibtex +@inproceedings{johnson2010clustered, + title={Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation.}, + author={Johnson, Sam and Everingham, Mark}, + booktitle={bmvc}, + volume={2}, + number={4}, + pages={5}, + year={2010}, + organization={Citeseer} +} +``` + +For [LSP](https://sam.johnson.io/research/lsp.html), please download the high resolution version +[LSP dataset original](http://sam.johnson.io/research/lsp_dataset_original.zip). +Extract them under `$MMPOSE/data`, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── lsp_dataset_original + ├── images +    ├── im0001.jpg +    ├── im0002.jpg +    └── ... +``` + +## LSPET + + + +```bibtex +@inproceedings{johnson2011learning, + title={Learning effective human pose estimation from inaccurate annotation}, + author={Johnson, Sam and Everingham, Mark}, + booktitle={CVPR 2011}, + pages={1465--1472}, + year={2011}, + organization={IEEE} +} +``` + +For [LSPET](https://sam.johnson.io/research/lspet.html), please download its high resolution form +[HR-LSPET](http://datasets.d2.mpi-inf.mpg.de/hr-lspet/hr-lspet.zip). +Extract them under `$MMPOSE/data`, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── lspet_dataset + ├── images + │   ├── im00001.jpg + │   ├── im00002.jpg + │   ├── im00003.jpg + │   └── ... + └── joints.mat +``` + +## CMU MoShed Data + + + +```bibtex +@inproceedings{kanazawa2018end, + title={End-to-end recovery of human shape and pose}, + author={Kanazawa, Angjoo and Black, Michael J and Jacobs, David W and Malik, Jitendra}, + booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, + pages={7122--7131}, + year={2018} +} +``` + +Real-world SMPL parameters are used for the adversarial training in human mesh estimation. +The MoShed data provided in [HMR](https://github.com/akanazawa/hmr) is included in this +[zip file](https://download.openmmlab.com/mmpose/datasets/mesh_annotation_files.zip). +Please download and extract it under `$MMPOSE/data`, and make it look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── mesh_annotation_files + ├── CMU_mosh.npz + └── ... +``` diff --git a/docs/zh_cn/dataset_zoo/3d_hand_keypoint.md b/docs/zh_cn/dataset_zoo/3d_hand_keypoint.md index 2b1f4d3923..8a3c398c04 100644 --- a/docs/zh_cn/dataset_zoo/3d_hand_keypoint.md +++ b/docs/zh_cn/dataset_zoo/3d_hand_keypoint.md @@ -1,59 +1,59 @@ -# 3D Hand Keypoint Datasets - -It is recommended to symlink the dataset root to `$MMPOSE/data`. -If your folder structure is different, you may need to change the corresponding paths in config files. - -MMPose supported datasets: - -- [InterHand2.6M](#interhand26m) \[ [Homepage](https://mks0601.github.io/InterHand2.6M/) \] - -## InterHand2.6M - - - -
    -InterHand2.6M (ECCV'2020) - -```bibtex -@InProceedings{Moon_2020_ECCV_InterHand2.6M, -author = {Moon, Gyeongsik and Yu, Shoou-I and Wen, He and Shiratori, Takaaki and Lee, Kyoung Mu}, -title = {InterHand2.6M: A Dataset and Baseline for 3D Interacting Hand Pose Estimation from a Single RGB Image}, -booktitle = {European Conference on Computer Vision (ECCV)}, -year = {2020} -} -``` - -
    - -
    - -
    - -For [InterHand2.6M](https://mks0601.github.io/InterHand2.6M/), please download from [InterHand2.6M](https://mks0601.github.io/InterHand2.6M/). -Please download the annotation files from [annotations](https://drive.google.com/drive/folders/1pWXhdfaka-J0fSAze0MsajN0VpZ8e8tO). -Extract them under {MMPose}/data, and make them look like this: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── interhand2.6m - |── annotations - | |── all - | |── human_annot - | |── machine_annot - | |── skeleton.txt - | |── subject.txt - | - `── images - | |── train - | | |-- Capture0 ~ Capture26 - | |── val - | | |-- Capture0 - | |── test - | | |-- Capture0 ~ Capture7 -``` +# 3D Hand Keypoint Datasets + +It is recommended to symlink the dataset root to `$MMPOSE/data`. +If your folder structure is different, you may need to change the corresponding paths in config files. + +MMPose supported datasets: + +- [InterHand2.6M](#interhand26m) \[ [Homepage](https://mks0601.github.io/InterHand2.6M/) \] + +## InterHand2.6M + + + +
    +InterHand2.6M (ECCV'2020) + +```bibtex +@InProceedings{Moon_2020_ECCV_InterHand2.6M, +author = {Moon, Gyeongsik and Yu, Shoou-I and Wen, He and Shiratori, Takaaki and Lee, Kyoung Mu}, +title = {InterHand2.6M: A Dataset and Baseline for 3D Interacting Hand Pose Estimation from a Single RGB Image}, +booktitle = {European Conference on Computer Vision (ECCV)}, +year = {2020} +} +``` + +
    + +
    + +
    + +For [InterHand2.6M](https://mks0601.github.io/InterHand2.6M/), please download from [InterHand2.6M](https://mks0601.github.io/InterHand2.6M/). +Please download the annotation files from [annotations](https://drive.google.com/drive/folders/1pWXhdfaka-J0fSAze0MsajN0VpZ8e8tO). +Extract them under {MMPose}/data, and make them look like this: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── interhand2.6m + |── annotations + | |── all + | |── human_annot + | |── machine_annot + | |── skeleton.txt + | |── subject.txt + | + `── images + | |── train + | | |-- Capture0 ~ Capture26 + | |── val + | | |-- Capture0 + | |── test + | | |-- Capture0 ~ Capture7 +``` diff --git a/docs/zh_cn/dataset_zoo/dataset_tools.md b/docs/zh_cn/dataset_zoo/dataset_tools.md index a2e6d01d97..358f0788e3 100644 --- a/docs/zh_cn/dataset_zoo/dataset_tools.md +++ b/docs/zh_cn/dataset_zoo/dataset_tools.md @@ -1,413 +1,413 @@ -# 数据集格式转换脚本 - -MMPose 提供了一些工具来帮助用户处理数据集。 - -## Animal Pose 数据集 - -
    -Animal-Pose (ICCV'2019) - -```bibtex -@InProceedings{Cao_2019_ICCV, - author = {Cao, Jinkun and Tang, Hongyang and Fang, Hao-Shu and Shen, Xiaoyong and Lu, Cewu and Tai, Yu-Wing}, - title = {Cross-Domain Adaptation for Animal Pose Estimation}, - booktitle = {The IEEE International Conference on Computer Vision (ICCV)}, - month = {October}, - year = {2019} -} -``` - -
    - -对于 [Animal-Pose](https://sites.google.com/view/animal-pose/),可以从[官方网站](https://sites.google.com/view/animal-pose/)下载图像和标注。脚本 `tools/dataset_converters/parse_animalpose_dataset.py` 将原始标注转换为 MMPose 兼容的格式。预处理的[标注文件](https://download.openmmlab.com/mmpose/datasets/animalpose_annotations.tar)可用。如果您想自己生成标注,请按照以下步骤操作: - -1. 下载图片与标注信息并解压到 `$MMPOSE/data`,按照以下格式组织: - - ```text - mmpose - ├── mmpose - ├── docs - ├── tests - ├── tools - ├── configs - `── data - │── animalpose - │ - │-- VOC2012 - │ │-- Annotations - │ │-- ImageSets - │ │-- JPEGImages - │ │-- SegmentationClass - │ │-- SegmentationObject - │ - │-- animalpose_image_part2 - │ │-- cat - │ │-- cow - │ │-- dog - │ │-- horse - │ │-- sheep - │ - │-- PASCAL2011_animal_annotation - │ │-- cat - │ │ |-- 2007_000528_1.xml - │ │ |-- 2007_000549_1.xml - │ │ │-- ... - │ │-- cow - │ │-- dog - │ │-- horse - │ │-- sheep - │ - │-- annimalpose_anno2 - │ │-- cat - │ │ |-- ca1.xml - │ │ |-- ca2.xml - │ │ │-- ... - │ │-- cow - │ │-- dog - │ │-- horse - │ │-- sheep - ``` - -2. 运行脚本 - - ```bash - python tools/dataset_converters/parse_animalpose_dataset.py - ``` - - 生成的标注文件将保存在 `$MMPOSE/data/animalpose/annotations` 中。 - -开源作者没有提供官方的 train/val/test 划分,我们选择来自 PascalVOC 的图片作为 train & val,train+val 一共 3600 张图片,5117 个标注。其中 2798 张图片,4000 个标注用于训练,810 张图片,1117 个标注用于验证。测试集包含 1000 张图片,1000 个标注用于评估。 - -## COFW 数据集 - -
    -COFW (ICCV'2013) - -```bibtex -@inproceedings{burgos2013robust, - title={Robust face landmark estimation under occlusion}, - author={Burgos-Artizzu, Xavier P and Perona, Pietro and Doll{\'a}r, Piotr}, - booktitle={Proceedings of the IEEE international conference on computer vision}, - pages={1513--1520}, - year={2013} -} -``` - -
    - -对于 COFW 数据集,请从 [COFW Dataset (Color Images)](https://data.caltech.edu/records/20099) 进行下载。 - -将 `COFW_train_color.mat` 和 `COFW_test_color.mat` 移动到 `$MMPOSE/data/cofw/`,确保它们按照以下格式组织: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── cofw - |── COFW_train_color.mat - |── COFW_test_color.mat -``` - -运行 `pip install h5py` 安装依赖,然后在 `$MMPOSE` 下运行脚本: - -```bash -python tools/dataset_converters/parse_cofw_dataset.py -``` - -最终结果为: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - │── cofw - |── COFW_train_color.mat - |── COFW_test_color.mat - |── annotations - | |── cofw_train.json - | |── cofw_test.json - |── images - |── 000001.jpg - |── 000002.jpg -``` - -## DeepposeKit 数据集 - -
    -Desert Locust (Elife'2019) - -```bibtex -@article{graving2019deepposekit, - title={DeepPoseKit, a software toolkit for fast and robust animal pose estimation using deep learning}, - author={Graving, Jacob M and Chae, Daniel and Naik, Hemal and Li, Liang and Koger, Benjamin and Costelloe, Blair R and Couzin, Iain D}, - journal={Elife}, - volume={8}, - pages={e47994}, - year={2019}, - publisher={eLife Sciences Publications Limited} -} -``` - -
    - -对于 [Vinegar Fly](https://github.com/jgraving/DeepPoseKit-Data),[Desert Locust](https://github.com/jgraving/DeepPoseKit-Data), 和 [Grévy’s Zebra](https://github.com/jgraving/DeepPoseKit-Data) 数据集,请从 [DeepPoseKit-Data](https://github.com/jgraving/DeepPoseKit-Data) 下载数据。 - -`tools/dataset_converters/parse_deepposekit_dataset.py` 脚本可以将原始标注转换为 MMPose 支持的格式。我们已经转换好的标注文件可以在这里下载: - -- [vinegar_fly_annotations](https://download.openmmlab.com/mmpose/datasets/vinegar_fly_annotations.tar) -- [locust_annotations](https://download.openmmlab.com/mmpose/datasets/locust_annotations.tar) -- [zebra_annotations](https://download.openmmlab.com/mmpose/datasets/zebra_annotations.tar) - -如果你希望自己转换数据,请按照以下步骤操作: - -1. 下载原始图片和标注,并解压到 `$MMPOSE/data`,将它们按照以下格式组织: - - ```text - mmpose - ├── mmpose - ├── docs - ├── tests - ├── tools - ├── configs - `── data - | - |── DeepPoseKit-Data - | `── datasets - | |── fly - | | |── annotation_data_release.h5 - | | |── skeleton.csv - | | |── ... - | | - | |── locust - | | |── annotation_data_release.h5 - | | |── skeleton.csv - | | |── ... - | | - | `── zebra - | |── annotation_data_release.h5 - | |── skeleton.csv - | |── ... - | - │── fly - `-- images - │-- 0.jpg - │-- 1.jpg - │-- ... - ``` - - 图片也可以在 [vinegar_fly_images](https://download.openmmlab.com/mmpose/datasets/vinegar_fly_images.tar),[locust_images](https://download.openmmlab.com/mmpose/datasets/locust_images.tar) 和[zebra_images](https://download.openmmlab.com/mmpose/datasets/zebra_images.tar) 下载。 - -2. 运行脚本: - - ```bash - python tools/dataset_converters/parse_deepposekit_dataset.py - ``` - - 生成的标注文件将保存在 $MMPOSE/data/fly/annotations`,`$MMPOSE/data/locust/annotations`和`$MMPOSE/data/zebra/annotations\` 中。 - -由于官方数据集中没有提供测试集,我们随机选择了 90% 的图片用于训练,剩下的 10% 用于测试。 - -## Macaque 数据集 - -
    -MacaquePose (bioRxiv'2020) - -```bibtex -@article{labuguen2020macaquepose, - title={MacaquePose: A novel ‘in the wild’macaque monkey pose dataset for markerless motion capture}, - author={Labuguen, Rollyn and Matsumoto, Jumpei and Negrete, Salvador and Nishimaru, Hiroshi and Nishijo, Hisao and Takada, Masahiko and Go, Yasuhiro and Inoue, Ken-ichi and Shibata, Tomohiro}, - journal={bioRxiv}, - year={2020}, - publisher={Cold Spring Harbor Laboratory} -} -``` - -
    - -对于 [MacaquePose](http://www2.ehub.kyoto-u.ac.jp/datasets/macaquepose/index.html) 数据集,请从 [这里](http://www2.ehub.kyoto-u.ac.jp/datasets/macaquepose/index.html) 下载数据。 - -`tools/dataset_converters/parse_macaquepose_dataset.py` 脚本可以将原始标注转换为 MMPose 支持的格式。我们已经转换好的标注文件可以在 [这里](https://download.openmmlab.com/mmpose/datasets/macaque_annotations.tar) 下载。 - -如果你希望自己转换数据,请按照以下步骤操作: - -1. 下载原始图片和标注,并解压到 `$MMPOSE/data`,将它们按照以下格式组织: - - ```text - mmpose - ├── mmpose - ├── docs - ├── tests - ├── tools - ├── configs - `── data - │── macaque - │-- annotations.csv - │-- images - │ │-- 01418849d54b3005.jpg - │ │-- 0142d1d1a6904a70.jpg - │ │-- 01ef2c4c260321b7.jpg - │ │-- 020a1c75c8c85238.jpg - │ │-- 020b1506eef2557d.jpg - │ │-- ... - ``` - -2. 运行脚本: - - ```bash - python tools/dataset_converters/parse_macaquepose_dataset.py - ``` - - 生成的标注文件将保存在 `$MMPOSE/data/macaque/annotations` 中。 - -由于官方数据集中没有提供测试集,我们随机选择了 90% 的图片用于训练,剩下的 10% 用于测试。 - -## Human3.6M 数据集 - -
    -Human3.6M (TPAMI'2014) - -```bibtex -@article{h36m_pami, - author = {Ionescu, Catalin and Papava, Dragos and Olaru, Vlad and Sminchisescu, Cristian}, - title = {Human3.6M: Large Scale Datasets and Predictive Methods for 3D Human Sensing in Natural Environments}, - journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, - publisher = {IEEE Computer Society}, - volume = {36}, - number = {7}, - pages = {1325-1339}, - month = {jul}, - year = {2014} -} -``` - -
    - -对于 [Human3.6M](http://vision.imar.ro/human3.6m/description.php) 数据集,请从官网下载数据,放置到 `$MMPOSE/data/h36m` 下。 - -然后执行 [预处理脚本](/tools/dataset_converters/preprocess_h36m.py)。 - -```bash -python tools/dataset_converters/preprocess_h36m.py --metadata {path to metadata.xml} --original data/h36m -``` - -这将在全帧率(50 FPS)和降频帧率(10 FPS)下提取相机参数和姿势注释。处理后的数据应具有以下结构: - -```text -mmpose -├── mmpose -├── docs -├── tests -├── tools -├── configs -`── data - ├── h36m - ├── annotation_body3d - | ├── cameras.pkl - | ├── fps50 - | | ├── h36m_test.npz - | | ├── h36m_train.npz - | | ├── joint2d_rel_stats.pkl - | | ├── joint2d_stats.pkl - | | ├── joint3d_rel_stats.pkl - | | `── joint3d_stats.pkl - | `── fps10 - | ├── h36m_test.npz - | ├── h36m_train.npz - | ├── joint2d_rel_stats.pkl - | ├── joint2d_stats.pkl - | ├── joint3d_rel_stats.pkl - | `── joint3d_stats.pkl - `── images - ├── S1 - | ├── S1_Directions_1.54138969 - | | ├── S1_Directions_1.54138969_00001.jpg - | | ├── S1_Directions_1.54138969_00002.jpg - | | ├── ... - | ├── ... - ├── S5 - ├── S6 - ├── S7 - ├── S8 - ├── S9 - `── S11 -``` - -然后,标注信息需要转换为 MMPose 支持的 COCO 格式。这可以通过运行以下命令完成: - -```bash -python tools/dataset_converters/h36m_to_coco.py -``` - -## MPII 数据集 - -
    -MPII (CVPR'2014) - -```bibtex -@inproceedings{andriluka14cvpr, - author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, - title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, - booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, - year = {2014}, - month = {June} -} -``` - -
    - -对于 [MPII](http://human-pose.mpi-inf.mpg.de/) 数据集,请从官网下载数据,放置到 `$MMPOSE/data/mpii` 下。 - -我们提供了一个脚本来将 `.mat` 格式的标注文件转换为 `.json` 格式。这可以通过运行以下命令完成: - -```shell -python tools/dataset_converters/mat2json ${PRED_MAT_FILE} ${GT_JSON_FILE} ${OUTPUT_PRED_JSON_FILE} -``` - -例如: - -```shell -python tools/dataset/mat2json work_dirs/res50_mpii_256x256/pred.mat data/mpii/annotations/mpii_val.json pred.json -``` - -## Label Studio 数据集 - -
    -Label Studio - -```bibtex -@misc{Label Studio, - title={{Label Studio}: Data labeling software}, - url={https://github.com/heartexlabs/label-studio}, - note={Open source software available from https://github.com/heartexlabs/label-studio}, - author={ - Maxim Tkachenko and - Mikhail Malyuk and - Andrey Holmanyuk and - Nikolai Liubimov}, - year={2020-2022}, -} -``` - -
    - -对于 [Label Studio](https://github.com/heartexlabs/label-studio/) 用户,请依照 [Label Studio 转换工具文档](./label_studio.md) 中的方法进行标注,并将结果导出为 Label Studio 标准的 `.json` 文件,将 `Labeling Interface` 中的 `Code` 保存为 `.xml` 文件。 - -我们提供了一个脚本来将 Label Studio 标准的 `.json` 格式标注文件转换为 COCO 标准的 `.json` 格式。这可以通过运行以下命令完成: - -```shell -python tools/dataset_converters/labelstudio2coco.py ${LS_JSON_FILE} ${LS_XML_FILE} ${OUTPUT_COCO_JSON_FILE} -``` - -例如: - -```shell -python tools/dataset_converters/labelstudio2coco.py config.xml project-1-at-2023-05-13-09-22-91b53efa.json output/result.json -``` +# 数据集格式转换脚本 + +MMPose 提供了一些工具来帮助用户处理数据集。 + +## Animal Pose 数据集 + +
    +Animal-Pose (ICCV'2019) + +```bibtex +@InProceedings{Cao_2019_ICCV, + author = {Cao, Jinkun and Tang, Hongyang and Fang, Hao-Shu and Shen, Xiaoyong and Lu, Cewu and Tai, Yu-Wing}, + title = {Cross-Domain Adaptation for Animal Pose Estimation}, + booktitle = {The IEEE International Conference on Computer Vision (ICCV)}, + month = {October}, + year = {2019} +} +``` + +
    + +对于 [Animal-Pose](https://sites.google.com/view/animal-pose/),可以从[官方网站](https://sites.google.com/view/animal-pose/)下载图像和标注。脚本 `tools/dataset_converters/parse_animalpose_dataset.py` 将原始标注转换为 MMPose 兼容的格式。预处理的[标注文件](https://download.openmmlab.com/mmpose/datasets/animalpose_annotations.tar)可用。如果您想自己生成标注,请按照以下步骤操作: + +1. 下载图片与标注信息并解压到 `$MMPOSE/data`,按照以下格式组织: + + ```text + mmpose + ├── mmpose + ├── docs + ├── tests + ├── tools + ├── configs + `── data + │── animalpose + │ + │-- VOC2012 + │ │-- Annotations + │ │-- ImageSets + │ │-- JPEGImages + │ │-- SegmentationClass + │ │-- SegmentationObject + │ + │-- animalpose_image_part2 + │ │-- cat + │ │-- cow + │ │-- dog + │ │-- horse + │ │-- sheep + │ + │-- PASCAL2011_animal_annotation + │ │-- cat + │ │ |-- 2007_000528_1.xml + │ │ |-- 2007_000549_1.xml + │ │ │-- ... + │ │-- cow + │ │-- dog + │ │-- horse + │ │-- sheep + │ + │-- annimalpose_anno2 + │ │-- cat + │ │ |-- ca1.xml + │ │ |-- ca2.xml + │ │ │-- ... + │ │-- cow + │ │-- dog + │ │-- horse + │ │-- sheep + ``` + +2. 运行脚本 + + ```bash + python tools/dataset_converters/parse_animalpose_dataset.py + ``` + + 生成的标注文件将保存在 `$MMPOSE/data/animalpose/annotations` 中。 + +开源作者没有提供官方的 train/val/test 划分,我们选择来自 PascalVOC 的图片作为 train & val,train+val 一共 3600 张图片,5117 个标注。其中 2798 张图片,4000 个标注用于训练,810 张图片,1117 个标注用于验证。测试集包含 1000 张图片,1000 个标注用于评估。 + +## COFW 数据集 + +
    +COFW (ICCV'2013) + +```bibtex +@inproceedings{burgos2013robust, + title={Robust face landmark estimation under occlusion}, + author={Burgos-Artizzu, Xavier P and Perona, Pietro and Doll{\'a}r, Piotr}, + booktitle={Proceedings of the IEEE international conference on computer vision}, + pages={1513--1520}, + year={2013} +} +``` + +
    + +对于 COFW 数据集,请从 [COFW Dataset (Color Images)](https://data.caltech.edu/records/20099) 进行下载。 + +将 `COFW_train_color.mat` 和 `COFW_test_color.mat` 移动到 `$MMPOSE/data/cofw/`,确保它们按照以下格式组织: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── cofw + |── COFW_train_color.mat + |── COFW_test_color.mat +``` + +运行 `pip install h5py` 安装依赖,然后在 `$MMPOSE` 下运行脚本: + +```bash +python tools/dataset_converters/parse_cofw_dataset.py +``` + +最终结果为: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + │── cofw + |── COFW_train_color.mat + |── COFW_test_color.mat + |── annotations + | |── cofw_train.json + | |── cofw_test.json + |── images + |── 000001.jpg + |── 000002.jpg +``` + +## DeepposeKit 数据集 + +
    +Desert Locust (Elife'2019) + +```bibtex +@article{graving2019deepposekit, + title={DeepPoseKit, a software toolkit for fast and robust animal pose estimation using deep learning}, + author={Graving, Jacob M and Chae, Daniel and Naik, Hemal and Li, Liang and Koger, Benjamin and Costelloe, Blair R and Couzin, Iain D}, + journal={Elife}, + volume={8}, + pages={e47994}, + year={2019}, + publisher={eLife Sciences Publications Limited} +} +``` + +
    + +对于 [Vinegar Fly](https://github.com/jgraving/DeepPoseKit-Data),[Desert Locust](https://github.com/jgraving/DeepPoseKit-Data), 和 [Grévy’s Zebra](https://github.com/jgraving/DeepPoseKit-Data) 数据集,请从 [DeepPoseKit-Data](https://github.com/jgraving/DeepPoseKit-Data) 下载数据。 + +`tools/dataset_converters/parse_deepposekit_dataset.py` 脚本可以将原始标注转换为 MMPose 支持的格式。我们已经转换好的标注文件可以在这里下载: + +- [vinegar_fly_annotations](https://download.openmmlab.com/mmpose/datasets/vinegar_fly_annotations.tar) +- [locust_annotations](https://download.openmmlab.com/mmpose/datasets/locust_annotations.tar) +- [zebra_annotations](https://download.openmmlab.com/mmpose/datasets/zebra_annotations.tar) + +如果你希望自己转换数据,请按照以下步骤操作: + +1. 下载原始图片和标注,并解压到 `$MMPOSE/data`,将它们按照以下格式组织: + + ```text + mmpose + ├── mmpose + ├── docs + ├── tests + ├── tools + ├── configs + `── data + | + |── DeepPoseKit-Data + | `── datasets + | |── fly + | | |── annotation_data_release.h5 + | | |── skeleton.csv + | | |── ... + | | + | |── locust + | | |── annotation_data_release.h5 + | | |── skeleton.csv + | | |── ... + | | + | `── zebra + | |── annotation_data_release.h5 + | |── skeleton.csv + | |── ... + | + │── fly + `-- images + │-- 0.jpg + │-- 1.jpg + │-- ... + ``` + + 图片也可以在 [vinegar_fly_images](https://download.openmmlab.com/mmpose/datasets/vinegar_fly_images.tar),[locust_images](https://download.openmmlab.com/mmpose/datasets/locust_images.tar) 和[zebra_images](https://download.openmmlab.com/mmpose/datasets/zebra_images.tar) 下载。 + +2. 运行脚本: + + ```bash + python tools/dataset_converters/parse_deepposekit_dataset.py + ``` + + 生成的标注文件将保存在 $MMPOSE/data/fly/annotations`,`$MMPOSE/data/locust/annotations`和`$MMPOSE/data/zebra/annotations\` 中。 + +由于官方数据集中没有提供测试集,我们随机选择了 90% 的图片用于训练,剩下的 10% 用于测试。 + +## Macaque 数据集 + +
    +MacaquePose (bioRxiv'2020) + +```bibtex +@article{labuguen2020macaquepose, + title={MacaquePose: A novel ‘in the wild’macaque monkey pose dataset for markerless motion capture}, + author={Labuguen, Rollyn and Matsumoto, Jumpei and Negrete, Salvador and Nishimaru, Hiroshi and Nishijo, Hisao and Takada, Masahiko and Go, Yasuhiro and Inoue, Ken-ichi and Shibata, Tomohiro}, + journal={bioRxiv}, + year={2020}, + publisher={Cold Spring Harbor Laboratory} +} +``` + +
    + +对于 [MacaquePose](http://www2.ehub.kyoto-u.ac.jp/datasets/macaquepose/index.html) 数据集,请从 [这里](http://www2.ehub.kyoto-u.ac.jp/datasets/macaquepose/index.html) 下载数据。 + +`tools/dataset_converters/parse_macaquepose_dataset.py` 脚本可以将原始标注转换为 MMPose 支持的格式。我们已经转换好的标注文件可以在 [这里](https://download.openmmlab.com/mmpose/datasets/macaque_annotations.tar) 下载。 + +如果你希望自己转换数据,请按照以下步骤操作: + +1. 下载原始图片和标注,并解压到 `$MMPOSE/data`,将它们按照以下格式组织: + + ```text + mmpose + ├── mmpose + ├── docs + ├── tests + ├── tools + ├── configs + `── data + │── macaque + │-- annotations.csv + │-- images + │ │-- 01418849d54b3005.jpg + │ │-- 0142d1d1a6904a70.jpg + │ │-- 01ef2c4c260321b7.jpg + │ │-- 020a1c75c8c85238.jpg + │ │-- 020b1506eef2557d.jpg + │ │-- ... + ``` + +2. 运行脚本: + + ```bash + python tools/dataset_converters/parse_macaquepose_dataset.py + ``` + + 生成的标注文件将保存在 `$MMPOSE/data/macaque/annotations` 中。 + +由于官方数据集中没有提供测试集,我们随机选择了 90% 的图片用于训练,剩下的 10% 用于测试。 + +## Human3.6M 数据集 + +
    +Human3.6M (TPAMI'2014) + +```bibtex +@article{h36m_pami, + author = {Ionescu, Catalin and Papava, Dragos and Olaru, Vlad and Sminchisescu, Cristian}, + title = {Human3.6M: Large Scale Datasets and Predictive Methods for 3D Human Sensing in Natural Environments}, + journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, + publisher = {IEEE Computer Society}, + volume = {36}, + number = {7}, + pages = {1325-1339}, + month = {jul}, + year = {2014} +} +``` + +
    + +对于 [Human3.6M](http://vision.imar.ro/human3.6m/description.php) 数据集,请从官网下载数据,放置到 `$MMPOSE/data/h36m` 下。 + +然后执行 [预处理脚本](/tools/dataset_converters/preprocess_h36m.py)。 + +```bash +python tools/dataset_converters/preprocess_h36m.py --metadata {path to metadata.xml} --original data/h36m +``` + +这将在全帧率(50 FPS)和降频帧率(10 FPS)下提取相机参数和姿势注释。处理后的数据应具有以下结构: + +```text +mmpose +├── mmpose +├── docs +├── tests +├── tools +├── configs +`── data + ├── h36m + ├── annotation_body3d + | ├── cameras.pkl + | ├── fps50 + | | ├── h36m_test.npz + | | ├── h36m_train.npz + | | ├── joint2d_rel_stats.pkl + | | ├── joint2d_stats.pkl + | | ├── joint3d_rel_stats.pkl + | | `── joint3d_stats.pkl + | `── fps10 + | ├── h36m_test.npz + | ├── h36m_train.npz + | ├── joint2d_rel_stats.pkl + | ├── joint2d_stats.pkl + | ├── joint3d_rel_stats.pkl + | `── joint3d_stats.pkl + `── images + ├── S1 + | ├── S1_Directions_1.54138969 + | | ├── S1_Directions_1.54138969_00001.jpg + | | ├── S1_Directions_1.54138969_00002.jpg + | | ├── ... + | ├── ... + ├── S5 + ├── S6 + ├── S7 + ├── S8 + ├── S9 + `── S11 +``` + +然后,标注信息需要转换为 MMPose 支持的 COCO 格式。这可以通过运行以下命令完成: + +```bash +python tools/dataset_converters/h36m_to_coco.py +``` + +## MPII 数据集 + +
    +MPII (CVPR'2014) + +```bibtex +@inproceedings{andriluka14cvpr, + author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt}, + title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis}, + booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + year = {2014}, + month = {June} +} +``` + +
    + +对于 [MPII](http://human-pose.mpi-inf.mpg.de/) 数据集,请从官网下载数据,放置到 `$MMPOSE/data/mpii` 下。 + +我们提供了一个脚本来将 `.mat` 格式的标注文件转换为 `.json` 格式。这可以通过运行以下命令完成: + +```shell +python tools/dataset_converters/mat2json ${PRED_MAT_FILE} ${GT_JSON_FILE} ${OUTPUT_PRED_JSON_FILE} +``` + +例如: + +```shell +python tools/dataset/mat2json work_dirs/res50_mpii_256x256/pred.mat data/mpii/annotations/mpii_val.json pred.json +``` + +## Label Studio 数据集 + +
    +Label Studio + +```bibtex +@misc{Label Studio, + title={{Label Studio}: Data labeling software}, + url={https://github.com/heartexlabs/label-studio}, + note={Open source software available from https://github.com/heartexlabs/label-studio}, + author={ + Maxim Tkachenko and + Mikhail Malyuk and + Andrey Holmanyuk and + Nikolai Liubimov}, + year={2020-2022}, +} +``` + +
    + +对于 [Label Studio](https://github.com/heartexlabs/label-studio/) 用户,请依照 [Label Studio 转换工具文档](./label_studio.md) 中的方法进行标注,并将结果导出为 Label Studio 标准的 `.json` 文件,将 `Labeling Interface` 中的 `Code` 保存为 `.xml` 文件。 + +我们提供了一个脚本来将 Label Studio 标准的 `.json` 格式标注文件转换为 COCO 标准的 `.json` 格式。这可以通过运行以下命令完成: + +```shell +python tools/dataset_converters/labelstudio2coco.py ${LS_JSON_FILE} ${LS_XML_FILE} ${OUTPUT_COCO_JSON_FILE} +``` + +例如: + +```shell +python tools/dataset_converters/labelstudio2coco.py config.xml project-1-at-2023-05-13-09-22-91b53efa.json output/result.json +``` diff --git a/docs/zh_cn/dataset_zoo/label_studio.md b/docs/zh_cn/dataset_zoo/label_studio.md index 94cbd6418c..8ae69a9c51 100644 --- a/docs/zh_cn/dataset_zoo/label_studio.md +++ b/docs/zh_cn/dataset_zoo/label_studio.md @@ -1,76 +1,76 @@ -# Label Studio 标注工具转COCO脚本 - -[Label Studio](https://labelstud.io/) 是一款广受欢迎的深度学习标注工具,可以对多种任务进行标注,然而对于关键点标注,Label Studio 无法直接导出成 MMPose 所需要的 COCO 格式。本文将介绍如何使用Label Studio 标注关键点数据,并利用 [labelstudio2coco.py](../../../tools/dataset_converters/labelstudio2coco.py) 工具将其转换为训练所需的格式。 - -## Label Studio 标注要求 - -根据 COCO 格式的要求,每个标注的实例中都需要包含关键点、分割和 bbox 的信息,然而 Label Studio 在标注时会将这些信息分散在不同的实例中,因此需要按一定规则进行标注,才能正常使用后续的脚本。 - -1. 标签接口设置 - -对于一个新建的 Label Studio 项目,首先要设置它的标签接口。这里需要有三种类型的标注:`KeyPointLabels`、`PolygonLabels`、`RectangleLabels`,分别对应 COCO 格式中的`keypoints`、`segmentation`、`bbox`。以下是一个标签接口的示例,可以在项目的`Settings`中找到`Labeling Interface`,点击`Code`,粘贴使用该示例。 - -```xml - - - - - - - - - -``` - -2. 标注顺序 - -由于需要将多个标注实例中的不同类型标注组合到一个实例中,因此采取了按特定顺序标注的方式,以此来判断各标注是否位于同一个实例。标注时须按照 **KeyPointLabels -> PolygonLabels/RectangleLabels** 的顺序标注,其中 KeyPointLabels 的顺序和数量要与 MMPose 配置文件中的`dataset_info`的关键点顺序和数量一致, PolygonLabels 和 RectangleLabels 的标注顺序可以互换,且可以只标注其中一个,只要保证一个实例的标注中,以关键点开始,以非关键点结束即可。下图为标注的示例: - -*注:bbox 和 area 会根据靠后的 PolygonLabels/RectangleLabels 来计算,如若先标 PolygonLabels,那么bbox会是靠后的 RectangleLabels 的范围,面积为矩形的面积,反之则是多边形外接矩形和多边形的面积* - -![image](https://github.com/open-mmlab/mmpose/assets/15847281/b2d004d0-8361-42c5-9180-cfbac0373a94) - -3. 导出标注 - -上述标注完成后,需要将标注进行导出。选择项目界面的`Export`按钮,选择`JSON`格式,再点击`Export`即可下载包含标签的 JSON 格式文件。 - -*注:上述文件中仅仅包含标签,不包含原始图片,因此需要额外提供标注对应的图片。由于 Label Studio 会对过长的文件名进行截断,因此不建议直接使用上传的文件,而是使用`Export`功能中的导出 COCO 格式工具,使用压缩包内的图片文件夹。* - -![image](https://github.com/open-mmlab/mmpose/assets/15847281/9f54ca3d-8cdd-4d7f-8ed6-494badcfeaf2) - -## 转换工具脚本的使用 - -转换工具脚本位于`tools/dataset_converters/labelstudio2coco.py`,使用方式如下: - -```bash -python tools/dataset_converters/labelstudio2coco.py config.xml project-1-at-2023-05-13-09-22-91b53efa.json output/result.json -``` - -其中`config.xml`的内容为标签接口设置中提到的`Labeling Interface`中的`Code`,`project-1-at-2023-05-13-09-22-91b53efa.json`即为导出标注时导出的 Label Studio 格式的 JSON 文件,`output/result.json`为转换后得到的 COCO 格式的 JSON 文件路径,若路径不存在,该脚本会自动创建路径。 - -随后,将图片的文件夹放置在输出目录下,即可完成 COCO 数据集的转换。目录结构示例如下: - -```bash -. -├── images -│   ├── 38b480f2.jpg -│   └── aeb26f04.jpg -└── result.json - -``` - -若想在 MMPose 中使用该数据集,可以进行类似如下的修改: - -```python -dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='result.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, -) -``` +# Label Studio 标注工具转COCO脚本 + +[Label Studio](https://labelstud.io/) 是一款广受欢迎的深度学习标注工具,可以对多种任务进行标注,然而对于关键点标注,Label Studio 无法直接导出成 MMPose 所需要的 COCO 格式。本文将介绍如何使用Label Studio 标注关键点数据,并利用 [labelstudio2coco.py](../../../tools/dataset_converters/labelstudio2coco.py) 工具将其转换为训练所需的格式。 + +## Label Studio 标注要求 + +根据 COCO 格式的要求,每个标注的实例中都需要包含关键点、分割和 bbox 的信息,然而 Label Studio 在标注时会将这些信息分散在不同的实例中,因此需要按一定规则进行标注,才能正常使用后续的脚本。 + +1. 标签接口设置 + +对于一个新建的 Label Studio 项目,首先要设置它的标签接口。这里需要有三种类型的标注:`KeyPointLabels`、`PolygonLabels`、`RectangleLabels`,分别对应 COCO 格式中的`keypoints`、`segmentation`、`bbox`。以下是一个标签接口的示例,可以在项目的`Settings`中找到`Labeling Interface`,点击`Code`,粘贴使用该示例。 + +```xml + + + + + + + + + +``` + +2. 标注顺序 + +由于需要将多个标注实例中的不同类型标注组合到一个实例中,因此采取了按特定顺序标注的方式,以此来判断各标注是否位于同一个实例。标注时须按照 **KeyPointLabels -> PolygonLabels/RectangleLabels** 的顺序标注,其中 KeyPointLabels 的顺序和数量要与 MMPose 配置文件中的`dataset_info`的关键点顺序和数量一致, PolygonLabels 和 RectangleLabels 的标注顺序可以互换,且可以只标注其中一个,只要保证一个实例的标注中,以关键点开始,以非关键点结束即可。下图为标注的示例: + +*注:bbox 和 area 会根据靠后的 PolygonLabels/RectangleLabels 来计算,如若先标 PolygonLabels,那么bbox会是靠后的 RectangleLabels 的范围,面积为矩形的面积,反之则是多边形外接矩形和多边形的面积* + +![image](https://github.com/open-mmlab/mmpose/assets/15847281/b2d004d0-8361-42c5-9180-cfbac0373a94) + +3. 导出标注 + +上述标注完成后,需要将标注进行导出。选择项目界面的`Export`按钮,选择`JSON`格式,再点击`Export`即可下载包含标签的 JSON 格式文件。 + +*注:上述文件中仅仅包含标签,不包含原始图片,因此需要额外提供标注对应的图片。由于 Label Studio 会对过长的文件名进行截断,因此不建议直接使用上传的文件,而是使用`Export`功能中的导出 COCO 格式工具,使用压缩包内的图片文件夹。* + +![image](https://github.com/open-mmlab/mmpose/assets/15847281/9f54ca3d-8cdd-4d7f-8ed6-494badcfeaf2) + +## 转换工具脚本的使用 + +转换工具脚本位于`tools/dataset_converters/labelstudio2coco.py`,使用方式如下: + +```bash +python tools/dataset_converters/labelstudio2coco.py config.xml project-1-at-2023-05-13-09-22-91b53efa.json output/result.json +``` + +其中`config.xml`的内容为标签接口设置中提到的`Labeling Interface`中的`Code`,`project-1-at-2023-05-13-09-22-91b53efa.json`即为导出标注时导出的 Label Studio 格式的 JSON 文件,`output/result.json`为转换后得到的 COCO 格式的 JSON 文件路径,若路径不存在,该脚本会自动创建路径。 + +随后,将图片的文件夹放置在输出目录下,即可完成 COCO 数据集的转换。目录结构示例如下: + +```bash +. +├── images +│   ├── 38b480f2.jpg +│   └── aeb26f04.jpg +└── result.json + +``` + +若想在 MMPose 中使用该数据集,可以进行类似如下的修改: + +```python +dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='result.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, +) +``` diff --git a/docs/zh_cn/faq.md b/docs/zh_cn/faq.md index b1e6998396..c87ec2de6f 100644 --- a/docs/zh_cn/faq.md +++ b/docs/zh_cn/faq.md @@ -1,148 +1,148 @@ -# FAQ - -We list some common issues faced by many users and their corresponding solutions here. -Feel free to enrich the list if you find any frequent issues and have ways to help others to solve them. -If the contents here do not cover your issue, please create an issue using the [provided templates](/.github/ISSUE_TEMPLATE/error-report.md) and make sure you fill in all required information in the template. - -## Installation - -Compatibility issue between MMCV and MMPose; "AssertionError: MMCV==xxx is used but incompatible. Please install mmcv>=xxx, \<=xxx." - -Here are the version correspondences between `mmdet`, `mmcv` and `mmpose`: - -- mmdet 2.x \<=> mmpose 0.x \<=> mmcv 1.x -- mmdet 3.x \<=> mmpose 1.x \<=> mmcv 2.x - -Detailed compatible MMPose and MMCV versions are shown as below. Please choose the correct version of MMCV to avoid installation issues. - -### MMPose 1.x - -| MMPose version | MMCV/MMEngine version | -| :------------: | :-----------------------------: | -| 1.1.0 | mmcv>=2.0.1, mmengine>=0.8.0 | -| 1.0.0 | mmcv>=2.0.0, mmengine>=0.7.0 | -| 1.0.0rc1 | mmcv>=2.0.0rc4, mmengine>=0.6.0 | -| 1.0.0rc0 | mmcv>=2.0.0rc0, mmengine>=0.0.1 | -| 1.0.0b0 | mmcv>=2.0.0rc0, mmengine>=0.0.1 | - -### MMPose 0.x - -| MMPose version | MMCV version | -| :------------: | :-----------------------: | -| 0.x | mmcv-full>=1.3.8, \<1.8.0 | -| 0.29.0 | mmcv-full>=1.3.8, \<1.7.0 | -| 0.28.1 | mmcv-full>=1.3.8, \<1.7.0 | -| 0.28.0 | mmcv-full>=1.3.8, \<1.6.0 | -| 0.27.0 | mmcv-full>=1.3.8, \<1.6.0 | -| 0.26.0 | mmcv-full>=1.3.8, \<1.6.0 | -| 0.25.1 | mmcv-full>=1.3.8, \<1.6.0 | -| 0.25.0 | mmcv-full>=1.3.8, \<1.5.0 | -| 0.24.0 | mmcv-full>=1.3.8, \<1.5.0 | -| 0.23.0 | mmcv-full>=1.3.8, \<1.5.0 | -| 0.22.0 | mmcv-full>=1.3.8, \<1.5.0 | -| 0.21.0 | mmcv-full>=1.3.8, \<1.5.0 | -| 0.20.0 | mmcv-full>=1.3.8, \<1.4.0 | -| 0.19.0 | mmcv-full>=1.3.8, \<1.4.0 | -| 0.18.0 | mmcv-full>=1.3.8, \<1.4.0 | -| 0.17.0 | mmcv-full>=1.3.8, \<1.4.0 | -| 0.16.0 | mmcv-full>=1.3.8, \<1.4.0 | -| 0.14.0 | mmcv-full>=1.1.3, \<1.4.0 | -| 0.13.0 | mmcv-full>=1.1.3, \<1.4.0 | -| 0.12.0 | mmcv-full>=1.1.3, \<1.3 | -| 0.11.0 | mmcv-full>=1.1.3, \<1.3 | -| 0.10.0 | mmcv-full>=1.1.3, \<1.3 | -| 0.9.0 | mmcv-full>=1.1.3, \<1.3 | -| 0.8.0 | mmcv-full>=1.1.1, \<1.2 | -| 0.7.0 | mmcv-full>=1.1.1, \<1.2 | - -- **Unable to install xtcocotools** - - 1. Try to install it using pypi manually `pip install xtcocotools`. - 2. If step1 does not work. Try to install it from [source](https://github.com/jin-s13/xtcocoapi). - - ``` - git clone https://github.com/jin-s13/xtcocoapi - cd xtcocoapi - python setup.py install - ``` - -- **No matching distribution found for xtcocotools>=1.6** - - 1. Install cython by `pip install cython`. - 2. Install xtcocotools from [source](https://github.com/jin-s13/xtcocoapi). - - ``` - git clone https://github.com/jin-s13/xtcocoapi - cd xtcocoapi - python setup.py install - ``` - -- **"No module named 'mmcv.ops'"; "No module named 'mmcv.\_ext'"** - - 1. Uninstall existing mmcv in the environment using `pip uninstall mmcv`. - 2. Install mmcv-full following the [installation instruction](https://mmcv.readthedocs.io/en/latest/#installation). - -## Data - -- **What if my custom dataset does not have bounding box label?** - - We can estimate the bounding box of a person as the minimal box that tightly bounds all the keypoints. - -- **What is `COCO_val2017_detections_AP_H_56_person.json`? Can I train pose models without it?** - - "COCO_val2017_detections_AP_H_56_person.json" contains the "detected" human bounding boxes for COCO validation set, which are generated by FasterRCNN. - One can choose to use gt bounding boxes to evaluate models, by setting `bbox_file=None''` in `val_dataloader.dataset` in config. Or one can use detected boxes to evaluate - the generalizability of models, by setting `bbox_file='COCO_val2017_detections_AP_H_56_person.json'`. - -## Training - -- **RuntimeError: Address already in use** - - Set the environment variables `MASTER_PORT=XXX`. For example, - `MASTER_PORT=29517 GPUS=16 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh Test res50 configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_8xb64-210e_coco-256x192.py work_dirs/res50_coco_256x192` - -- **"Unexpected keys in source state dict" when loading pre-trained weights** - - It's normal that some layers in the pretrained model are not used in the pose model. ImageNet-pretrained classification network and the pose network may have different architectures (e.g. no classification head). So some unexpected keys in source state dict is actually expected. - -- **How to use trained models for backbone pre-training ?** - - Refer to [Migration - Step3: Model - Backbone](../migration.md). - - When training, the unexpected keys will be ignored. - -- **How to visualize the training accuracy/loss curves in real-time ?** - - Use `TensorboardLoggerHook` in `log_config` like - - ```python - log_config=dict(interval=20, hooks=[dict(type='TensorboardLoggerHook')]) - ``` - - You can refer to [user_guides/visualization.md](../user_guides/visualization.md). - -- **Log info is NOT printed** - - Use smaller log interval. For example, change `interval=50` to `interval=1` in the config. - -## Evaluation - -- **How to evaluate on MPII test dataset?** - Since we do not have the ground-truth for test dataset, we cannot evaluate it 'locally'. - If you would like to evaluate the performance on test set, you have to upload the pred.mat (which is generated during testing) to the official server via email, according to [the MPII guideline](http://human-pose.mpi-inf.mpg.de/#evaluation). - -- **For top-down 2d pose estimation, why predicted joint coordinates can be out of the bounding box (bbox)?** - We do not directly use the bbox to crop the image. bbox will be first transformed to center & scale, and the scale will be multiplied by a factor (1.25) to include some context. If the ratio of width/height is different from that of model input (possibly 192/256), we will adjust the bbox. - -## Inference - -- **How to run mmpose on CPU?** - - Run demos with `--device=cpu`. - -- **How to speed up inference?** - - For top-down models, try to edit the config file. For example, - - 1. set `flip_test=False` in `init_cfg` in the config file. - 2. use faster human bounding box detector, see [MMDetection](https://mmdetection.readthedocs.io/zh_CN/3.x/model_zoo.html). +# FAQ + +We list some common issues faced by many users and their corresponding solutions here. +Feel free to enrich the list if you find any frequent issues and have ways to help others to solve them. +If the contents here do not cover your issue, please create an issue using the [provided templates](/.github/ISSUE_TEMPLATE/error-report.md) and make sure you fill in all required information in the template. + +## Installation + +Compatibility issue between MMCV and MMPose; "AssertionError: MMCV==xxx is used but incompatible. Please install mmcv>=xxx, \<=xxx." + +Here are the version correspondences between `mmdet`, `mmcv` and `mmpose`: + +- mmdet 2.x \<=> mmpose 0.x \<=> mmcv 1.x +- mmdet 3.x \<=> mmpose 1.x \<=> mmcv 2.x + +Detailed compatible MMPose and MMCV versions are shown as below. Please choose the correct version of MMCV to avoid installation issues. + +### MMPose 1.x + +| MMPose version | MMCV/MMEngine version | +| :------------: | :-----------------------------: | +| 1.1.0 | mmcv>=2.0.1, mmengine>=0.8.0 | +| 1.0.0 | mmcv>=2.0.0, mmengine>=0.7.0 | +| 1.0.0rc1 | mmcv>=2.0.0rc4, mmengine>=0.6.0 | +| 1.0.0rc0 | mmcv>=2.0.0rc0, mmengine>=0.0.1 | +| 1.0.0b0 | mmcv>=2.0.0rc0, mmengine>=0.0.1 | + +### MMPose 0.x + +| MMPose version | MMCV version | +| :------------: | :-----------------------: | +| 0.x | mmcv-full>=1.3.8, \<1.8.0 | +| 0.29.0 | mmcv-full>=1.3.8, \<1.7.0 | +| 0.28.1 | mmcv-full>=1.3.8, \<1.7.0 | +| 0.28.0 | mmcv-full>=1.3.8, \<1.6.0 | +| 0.27.0 | mmcv-full>=1.3.8, \<1.6.0 | +| 0.26.0 | mmcv-full>=1.3.8, \<1.6.0 | +| 0.25.1 | mmcv-full>=1.3.8, \<1.6.0 | +| 0.25.0 | mmcv-full>=1.3.8, \<1.5.0 | +| 0.24.0 | mmcv-full>=1.3.8, \<1.5.0 | +| 0.23.0 | mmcv-full>=1.3.8, \<1.5.0 | +| 0.22.0 | mmcv-full>=1.3.8, \<1.5.0 | +| 0.21.0 | mmcv-full>=1.3.8, \<1.5.0 | +| 0.20.0 | mmcv-full>=1.3.8, \<1.4.0 | +| 0.19.0 | mmcv-full>=1.3.8, \<1.4.0 | +| 0.18.0 | mmcv-full>=1.3.8, \<1.4.0 | +| 0.17.0 | mmcv-full>=1.3.8, \<1.4.0 | +| 0.16.0 | mmcv-full>=1.3.8, \<1.4.0 | +| 0.14.0 | mmcv-full>=1.1.3, \<1.4.0 | +| 0.13.0 | mmcv-full>=1.1.3, \<1.4.0 | +| 0.12.0 | mmcv-full>=1.1.3, \<1.3 | +| 0.11.0 | mmcv-full>=1.1.3, \<1.3 | +| 0.10.0 | mmcv-full>=1.1.3, \<1.3 | +| 0.9.0 | mmcv-full>=1.1.3, \<1.3 | +| 0.8.0 | mmcv-full>=1.1.1, \<1.2 | +| 0.7.0 | mmcv-full>=1.1.1, \<1.2 | + +- **Unable to install xtcocotools** + + 1. Try to install it using pypi manually `pip install xtcocotools`. + 2. If step1 does not work. Try to install it from [source](https://github.com/jin-s13/xtcocoapi). + + ``` + git clone https://github.com/jin-s13/xtcocoapi + cd xtcocoapi + python setup.py install + ``` + +- **No matching distribution found for xtcocotools>=1.6** + + 1. Install cython by `pip install cython`. + 2. Install xtcocotools from [source](https://github.com/jin-s13/xtcocoapi). + + ``` + git clone https://github.com/jin-s13/xtcocoapi + cd xtcocoapi + python setup.py install + ``` + +- **"No module named 'mmcv.ops'"; "No module named 'mmcv.\_ext'"** + + 1. Uninstall existing mmcv in the environment using `pip uninstall mmcv`. + 2. Install mmcv-full following the [installation instruction](https://mmcv.readthedocs.io/en/latest/#installation). + +## Data + +- **What if my custom dataset does not have bounding box label?** + + We can estimate the bounding box of a person as the minimal box that tightly bounds all the keypoints. + +- **What is `COCO_val2017_detections_AP_H_56_person.json`? Can I train pose models without it?** + + "COCO_val2017_detections_AP_H_56_person.json" contains the "detected" human bounding boxes for COCO validation set, which are generated by FasterRCNN. + One can choose to use gt bounding boxes to evaluate models, by setting `bbox_file=None''` in `val_dataloader.dataset` in config. Or one can use detected boxes to evaluate + the generalizability of models, by setting `bbox_file='COCO_val2017_detections_AP_H_56_person.json'`. + +## Training + +- **RuntimeError: Address already in use** + + Set the environment variables `MASTER_PORT=XXX`. For example, + `MASTER_PORT=29517 GPUS=16 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh Test res50 configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_8xb64-210e_coco-256x192.py work_dirs/res50_coco_256x192` + +- **"Unexpected keys in source state dict" when loading pre-trained weights** + + It's normal that some layers in the pretrained model are not used in the pose model. ImageNet-pretrained classification network and the pose network may have different architectures (e.g. no classification head). So some unexpected keys in source state dict is actually expected. + +- **How to use trained models for backbone pre-training ?** + + Refer to [Migration - Step3: Model - Backbone](../migration.md). + + When training, the unexpected keys will be ignored. + +- **How to visualize the training accuracy/loss curves in real-time ?** + + Use `TensorboardLoggerHook` in `log_config` like + + ```python + log_config=dict(interval=20, hooks=[dict(type='TensorboardLoggerHook')]) + ``` + + You can refer to [user_guides/visualization.md](../user_guides/visualization.md). + +- **Log info is NOT printed** + + Use smaller log interval. For example, change `interval=50` to `interval=1` in the config. + +## Evaluation + +- **How to evaluate on MPII test dataset?** + Since we do not have the ground-truth for test dataset, we cannot evaluate it 'locally'. + If you would like to evaluate the performance on test set, you have to upload the pred.mat (which is generated during testing) to the official server via email, according to [the MPII guideline](http://human-pose.mpi-inf.mpg.de/#evaluation). + +- **For top-down 2d pose estimation, why predicted joint coordinates can be out of the bounding box (bbox)?** + We do not directly use the bbox to crop the image. bbox will be first transformed to center & scale, and the scale will be multiplied by a factor (1.25) to include some context. If the ratio of width/height is different from that of model input (possibly 192/256), we will adjust the bbox. + +## Inference + +- **How to run mmpose on CPU?** + + Run demos with `--device=cpu`. + +- **How to speed up inference?** + + For top-down models, try to edit the config file. For example, + + 1. set `flip_test=False` in `init_cfg` in the config file. + 2. use faster human bounding box detector, see [MMDetection](https://mmdetection.readthedocs.io/zh_CN/3.x/model_zoo.html). diff --git a/docs/zh_cn/guide_to_framework.md b/docs/zh_cn/guide_to_framework.md index 349abf2358..c8184a91c5 100644 --- a/docs/zh_cn/guide_to_framework.md +++ b/docs/zh_cn/guide_to_framework.md @@ -1,682 +1,682 @@ -# 20 分钟了解 MMPose 架构设计 - -MMPose 1.0 与之前的版本有较大改动,对部分模块进行了重新设计和组织,降低代码冗余度,提升运行效率,降低学习难度。 - -MMPose 1.0 采用了全新的模块结构设计以精简代码,提升运行效率,降低学习难度。对于有一定深度学习基础的用户,本章节提供了对 MMPose 架构设计的总体介绍。不论你是**旧版 MMPose 的用户**,还是**希望直接从 MMPose 1.0 上手的新用户**,都可以通过本教程了解如何构建一个基于 MMPose 1.0 的项目。 - -```{note} -本教程包含了使用 MMPose 1.0 时开发者会关心的内容: - -- 整体代码架构与设计逻辑 - -- 如何用config文件管理模块 - -- 如何使用自定义数据集 - -- 如何添加新的模块(骨干网络、模型头部、损失函数等) -``` - -以下是这篇教程的目录: - -- [20 分钟了解 MMPose 架构设计](#20-分钟了解-mmpose-架构设计) - - [总览](#总览) - - [Step1:配置文件](#step1配置文件) - - [Step2:数据](#step2数据) - - [数据集元信息](#数据集元信息) - - [数据集](#数据集) - - [数据流水线](#数据流水线) - - [i. 数据增强](#i-数据增强) - - [ii. 数据变换](#ii-数据变换) - - [iii. 数据编码](#iii-数据编码) - - [iv. 数据打包](#iv-数据打包) - - [Step3: 模型](#step3-模型) - - [前处理器(DataPreprocessor)](#前处理器datapreprocessor) - - [主干网络(Backbone)](#主干网络backbone) - - [颈部模块(Neck)](#颈部模块neck) - - [预测头(Head)](#预测头head) - -## 总览 - -![overall-cn](https://user-images.githubusercontent.com/13503330/187830967-f2d7bf40-6261-42f3-91a5-ae045fa0dc0c.png) - -一般来说,开发者在项目开发过程中经常接触内容的主要有**五个**方面: - -- **通用**:环境、钩子(Hook)、模型权重存取(Checkpoint)、日志(Logger)等 - -- **数据**:数据集、数据读取(Dataloader)、数据增强等 - -- **训练**:优化器、学习率调整等 - -- **模型**:主干网络、颈部模块(Neck)、预测头模块(Head)、损失函数等 - -- **评测**:评测指标(Metric)、评测器(Evaluator)等 - -其中**通用**、**训练**和**评测**相关的模块往往由训练框架提供,开发者只需要调用和调整参数,不需要自行实现,开发者主要实现的是**数据**和**模型**部分。 - -## Step1:配置文件 - -在MMPose中,我们通常 python 格式的配置文件,用于整个项目的定义、参数管理,因此我们强烈建议第一次接触 MMPose 的开发者,查阅 [配置文件](./user_guides/configs.md) 学习配置文件的定义。 - -需要注意的是,所有新增的模块都需要使用注册器(Registry)进行注册,并在对应目录的 `__init__.py` 中进行 `import`,以便能够使用配置文件构建其实例。 - -## Step2:数据 - -MMPose 数据的组织主要包含三个方面: - -- 数据集元信息 - -- 数据集 - -- 数据流水线 - -### 数据集元信息 - -元信息指具体标注之外的数据集信息。姿态估计数据集的元信息通常包括:关键点和骨骼连接的定义、对称性、关键点性质(如关键点权重、标注标准差、所属上下半身)等。这些信息在数据在数据处理、模型训练和测试中有重要作用。在 MMPose 中,数据集的元信息使用 python 格式的配置文件保存,位于 `$MMPOSE/configs/_base_/datasets` 目录下。 - -在 MMPose 中使用自定义数据集时,你需要增加对应的元信息配置文件。以 MPII 数据集(`$MMPOSE/configs/_base_/datasets/mpii.py`)为例: - -```Python -dataset_info = dict( - dataset_name='mpii', - paper_info=dict( - author='Mykhaylo Andriluka and Leonid Pishchulin and ' - 'Peter Gehler and Schiele, Bernt', - title='2D Human Pose Estimation: New Benchmark and ' - 'State of the Art Analysis', - container='IEEE Conference on Computer Vision and ' - 'Pattern Recognition (CVPR)', - year='2014', - homepage='http://human-pose.mpi-inf.mpg.de/', - ), - keypoint_info={ - 0: - dict( - name='right_ankle', - id=0, - color=[255, 128, 0], - type='lower', - swap='left_ankle'), - ## 内容省略 - }, - skeleton_info={ - 0: - dict(link=('right_ankle', 'right_knee'), id=0, color=[255, 128, 0]), - ## 内容省略 - }, - joint_weights=[ - 1.5, 1.2, 1., 1., 1.2, 1.5, 1., 1., 1., 1., 1.5, 1.2, 1., 1., 1.2, 1.5 - ], - # 使用 COCO 数据集中提供的 sigmas 值 - sigmas=[ - 0.089, 0.083, 0.107, 0.107, 0.083, 0.089, 0.026, 0.026, 0.026, 0.026, - 0.062, 0.072, 0.179, 0.179, 0.072, 0.062 - ]) -``` - -在模型配置文件中,你需要为自定义数据集指定对应的元信息配置文件。假如该元信息配置文件路径为 `$MMPOSE/configs/_base_/datasets/custom.py`,指定方式如下: - -```python -# dataset and dataloader settings -dataset_type = 'MyCustomDataset' # or 'CocoDataset' -train_dataloader = dict( - batch_size=2, - dataset=dict( - type=dataset_type, - data_root='root/of/your/train/data', - ann_file='path/to/your/train/json', - data_prefix=dict(img='path/to/your/train/img'), - # 指定对应的元信息配置文件 - metainfo=dict(from_file='configs/_base_/datasets/custom.py'), - ...), - ) -val_dataloader = dict( - batch_size=2, - dataset=dict( - type=dataset_type, - data_root='root/of/your/val/data', - ann_file='path/to/your/val/json', - data_prefix=dict(img='path/to/your/val/img'), - # 指定对应的元信息配置文件 - metainfo=dict(from_file='configs/_base_/datasets/custom.py'), - ...), - ) -test_dataloader = val_dataloader -``` - -### 数据集 - -在 MMPose 中使用自定义数据集时,我们推荐将数据转化为已支持的格式(如 COCO 或 MPII),并直接使用我们提供的对应数据集实现。如果这种方式不可行,则用户需要实现自己的数据集类。 - -MMPose 中的大部分 2D 关键点数据集**以 COCO 形式组织**,为此我们提供了基类 [BaseCocoStyleDataset](/mmpose/datasets/datasets/base/base_coco_style_dataset.py)。我们推荐用户继承该基类,并按需重写它的方法(通常是 `__init__()` 和 `_load_annotations()` 方法),以扩展到新的 2D 关键点数据集。 - -```{note} -关于COCO数据格式的详细说明请参考 [COCO](./dataset_zoo/2d_body_keypoint.md) 。 -``` - -```{note} -在 MMPose 中 bbox 的数据格式采用 `xyxy`,而不是 `xywh`,这与 [MMDetection](https://github.com/open-mmlab/mmdetection) 等其他 OpenMMLab 成员保持一致。为了实现不同 bbox 格式之间的转换,我们提供了丰富的函数:`bbox_xyxy2xywh`、`bbox_xywh2xyxy`、`bbox_xyxy2cs`等。这些函数定义在`$MMPOSE/mmpose/structures/bbox/transforms.py`。 -``` - -下面我们以MPII数据集的实现(`$MMPOSE/mmpose/datasets/datasets/body/mpii_dataset.py`)为例: - -```Python -@DATASETS.register_module() -class MpiiDataset(BaseCocoStyleDataset): - METAINFO: dict = dict(from_file='configs/_base_/datasets/mpii.py') - - def __init__(self, - ## 内容省略 - headbox_file: Optional[str] = None, - ## 内容省略): - - if headbox_file: - if data_mode != 'topdown': - raise ValueError( - f'{self.__class__.__name__} is set to {data_mode}: ' - 'mode, while "headbox_file" is only ' - 'supported in topdown mode.') - - if not test_mode: - raise ValueError( - f'{self.__class__.__name__} has `test_mode==False` ' - 'while "headbox_file" is only ' - 'supported when `test_mode==True`.') - - headbox_file_type = headbox_file[-3:] - allow_headbox_file_type = ['mat'] - if headbox_file_type not in allow_headbox_file_type: - raise KeyError( - f'The head boxes file type {headbox_file_type} is not ' - f'supported. Should be `mat` but got {headbox_file_type}.') - self.headbox_file = headbox_file - - super().__init__( - ## 内容省略 - ) - - def _load_annotations(self) -> List[dict]: - """Load data from annotations in MPII format.""" - check_file_exist(self.ann_file) - with open(self.ann_file) as anno_file: - anns = json.load(anno_file) - - if self.headbox_file: - check_file_exist(self.headbox_file) - headbox_dict = loadmat(self.headbox_file) - headboxes_src = np.transpose(headbox_dict['headboxes_src'], - [2, 0, 1]) - SC_BIAS = 0.6 - - data_list = [] - ann_id = 0 - - # mpii bbox scales are normalized with factor 200. - pixel_std = 200. - - for idx, ann in enumerate(anns): - center = np.array(ann['center'], dtype=np.float32) - scale = np.array([ann['scale'], ann['scale']], - dtype=np.float32) * pixel_std - - # Adjust center/scale slightly to avoid cropping limbs - if center[0] != -1: - center[1] = center[1] + 15. / pixel_std * scale[1] - - # MPII uses matlab format, index is 1-based, - # we should first convert to 0-based index - center = center - 1 - - # unify shape with coco datasets - center = center.reshape(1, -1) - scale = scale.reshape(1, -1) - bbox = bbox_cs2xyxy(center, scale) - - # load keypoints in shape [1, K, 2] and keypoints_visible in [1, K] - keypoints = np.array(ann['joints']).reshape(1, -1, 2) - keypoints_visible = np.array(ann['joints_vis']).reshape(1, -1) - - data_info = { - 'id': ann_id, - 'img_id': int(ann['image'].split('.')[0]), - 'img_path': osp.join(self.data_prefix['img'], ann['image']), - 'bbox_center': center, - 'bbox_scale': scale, - 'bbox': bbox, - 'bbox_score': np.ones(1, dtype=np.float32), - 'keypoints': keypoints, - 'keypoints_visible': keypoints_visible, - } - - if self.headbox_file: - # calculate the diagonal length of head box as norm_factor - headbox = headboxes_src[idx] - head_size = np.linalg.norm(headbox[1] - headbox[0], axis=0) - head_size *= SC_BIAS - data_info['head_size'] = head_size.reshape(1, -1) - - data_list.append(data_info) - ann_id = ann_id + 1 - - return data_list -``` - -在对MPII数据集进行支持时,由于MPII需要读入 `head_size` 信息来计算 `PCKh`,因此我们在`__init__()`中增加了 `headbox_file`,并重载了 `_load_annotations()` 来完成数据组织。 - -如果自定义数据集无法被 `BaseCocoStyleDataset` 支持,你需要直接继承 [MMEngine](https://github.com/open-mmlab/mmengine) 中提供的 `BaseDataset` 基类。具体方法请参考相关[文档](https://mmengine.readthedocs.io/en/latest/advanced_tutorials/basedataset.html)。 - -### 数据流水线 - -一个典型的数据流水线配置如下: - -```Python -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -test_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] -``` - -在关键点检测任务中,数据一般会在三个尺度空间中变换: - -- **原始图片空间**:图片存储时的原始空间,不同图片的尺寸不一定相同 - -- **输入图片空间**:模型输入的图片尺度空间,所有**图片**和**标注**被缩放到输入尺度,如 `256x256`,`256x192` 等 - -- **输出尺度空间**:模型输出和训练监督信息所在的尺度空间,如`64x64(热力图)`,`1x1(回归坐标值)`等 - -数据在三个空间中变换的流程如图所示: - -![migration-cn](https://user-images.githubusercontent.com/13503330/187831574-13804daf-f498-47c2-ba43-64b8e6ffe3dd.png) - -在MMPose中,数据变换所需要的模块在`$MMPOSE/mmpose/datasets/transforms`目录下,它们的工作流程如图所示: - -![transforms-cn](https://user-images.githubusercontent.com/13503330/187831611-8db89e20-95c7-42bc-8b0d-700fadf60328.png) - -#### i. 数据增强 - -数据增强中常用的变换存放在 `$MMPOSE/mmpose/datasets/transforms/common_transforms.py` 中,如 `RandomFlip`、`RandomHalfBody` 等。 - -对于 top-down 方法,`Shift`、`Rotate`、`Resize` 操作由 `RandomBBoxTransform`来实现;对于 bottom-up 方法,这些则是由 `BottomupRandomAffine` 实现。 - -```{note} -值得注意的是,大部分数据变换都依赖于 `bbox_center` 和 `bbox_scale`,它们可以通过 `GetBBoxCenterScale` 来得到。 -``` - -#### ii. 数据变换 - -我们使用仿射变换,将图像和坐标标注从原始图片空间变换到输入图片空间。这一操作在 top-down 方法中由 `TopdownAffine` 完成,在 bottom-up 方法中则由 `BottomupRandomAffine` 完成。 - -#### iii. 数据编码 - -在模型训练时,数据从原始空间变换到输入图片空间后,需要使用 `GenerateTarget` 来生成训练所需的监督目标(比如用坐标值生成高斯热图),我们将这一过程称为编码(Encode),反之,通过高斯热图得到对应坐标值的过程称为解码(Decode)。 - -在 MMPose 中,我们将编码和解码过程集合成一个编解码器(Codec),在其中实现 `encode()` 和 `decode()`。 - -目前 MMPose 支持生成以下类型的监督目标: - -- `heatmap`: 高斯热图 - -- `keypoint_label`: 关键点标签(如归一化的坐标值) - -- `keypoint_xy_label`: 单个坐标轴关键点标签 - -- `heatmap+keypoint_label`: 同时生成高斯热图和关键点标签 - -- `multiscale_heatmap`: 多尺度高斯热图 - -生成的监督目标会按以下关键字进行封装: - -- `heatmaps`:高斯热图 - -- `keypoint_labels`:关键点标签(如归一化的坐标值) - -- `keypoint_x_labels`:x 轴关键点标签 - -- `keypoint_y_labels`:y 轴关键点标签 - -- `keypoint_weights`:关键点权重 - -```Python -@TRANSFORMS.register_module() -class GenerateTarget(BaseTransform): - """Encode keypoints into Target. - - Added Keys (depends on the args): - - heatmaps - - keypoint_labels - - keypoint_x_labels - - keypoint_y_labels - - keypoint_weights - """ -``` - -值得注意的是,我们对 top-down 和 bottom-up 的数据格式进行了统一,这意味着标注信息中会新增一个维度来代表同一张图里的不同目标(如人),格式为: - -```Python -[batch_size, num_instances, num_keypoints, dim_coordinates] -``` - -- top-down:`[B, 1, K, D]` - -- Bottom-up: `[B, N, K, D]` - -当前已经支持的编解码器定义在 `$MMPOSE/mmpose/codecs` 目录下,如果你需要自定新的编解码器,可以前往[编解码器](./user_guides/codecs.md)了解更多详情。 - -#### iv. 数据打包 - -数据经过前处理变换后,最终需要通过 `PackPoseInputs` 打包成数据样本。该操作定义在 `$MMPOSE/mmpose/datasets/transforms/formatting.py` 中。 - -打包过程会将数据流水线中用字典 `results` 存储的数据转换成用 MMPose 所需的标准数据结构, 如 `InstanceData`,`PixelData`,`PoseDataSample` 等。 - -具体而言,我们将数据样本内容分为 `gt`(标注真值) 和 `pred`(模型预测)两部分,它们都包含以下数据项: - -- **instances**(numpy.array):实例级别的原始标注或预测结果,属于原始尺度空间 - -- **instance_labels**(torch.tensor):实例级别的训练标签(如归一化的坐标值、关键点可见性),属于输出尺度空间 - -- **fields**(torch.tensor):像素级别的训练标签(如高斯热图)或预测结果,属于输出尺度空间 - -下面是 `PoseDataSample` 底层实现的例子: - -```Python -def get_pose_data_sample(self): - # meta - pose_meta = dict( - img_shape=(600, 900), # [h, w, c] - crop_size=(256, 192), # [h, w] - heatmap_size=(64, 48), # [h, w] - ) - - # gt_instances - gt_instances = InstanceData() - gt_instances.bboxes = np.random.rand(1, 4) - gt_instances.keypoints = np.random.rand(1, 17, 2) - - # gt_instance_labels - gt_instance_labels = InstanceData() - gt_instance_labels.keypoint_labels = torch.rand(1, 17, 2) - gt_instance_labels.keypoint_weights = torch.rand(1, 17) - - # pred_instances - pred_instances = InstanceData() - pred_instances.keypoints = np.random.rand(1, 17, 2) - pred_instances.keypoint_scores = np.random.rand(1, 17) - - # gt_fields - gt_fields = PixelData() - gt_fields.heatmaps = torch.rand(17, 64, 48) - - # pred_fields - pred_fields = PixelData() - pred_fields.heatmaps = torch.rand(17, 64, 48) - data_sample = PoseDataSample( - gt_instances=gt_instances, - pred_instances=pred_instances, - gt_fields=gt_fields, - pred_fields=pred_fields, - metainfo=pose_meta) - - return data_sample -``` - -## Step3: 模型 - -在 MMPose 1.0中,模型由以下几部分构成: - -- **预处理器(DataPreprocessor)**:完成图像归一化和通道转换等前处理 - -- **主干网络 (Backbone)**:用于特征提取 - -- **颈部模块(Neck)**:GAP,FPN 等可选项 - -- **预测头(Head)**:用于实现核心算法功能和损失函数定义 - -我们在 `$MMPOSE/models/pose_estimators/base.py` 下为姿态估计模型定义了一个基类 `BasePoseEstimator`,所有的模型(如 `TopdownPoseEstimator`)都需要继承这个基类,并重载对应的方法。 - -在模型的 `forward()` 方法中提供了三种不同的模式: - -- `mode == 'loss'`:返回损失函数计算的结果,用于模型训练 - -- `mode == 'predict'`:返回输入尺度下的预测结果,用于模型推理 - -- `mode == 'tensor'`:返回输出尺度下的模型输出,即只进行模型前向传播,用于模型导出 - -开发者需要在 `PoseEstimator` 中按照模型结构调用对应的 `Registry` ,对模块进行实例化。以 top-down 模型为例: - -```Python -@MODELS.register_module() -class TopdownPoseEstimator(BasePoseEstimator): - def __init__(self, - backbone: ConfigType, - neck: OptConfigType = None, - head: OptConfigType = None, - train_cfg: OptConfigType = None, - test_cfg: OptConfigType = None, - data_preprocessor: OptConfigType = None, - init_cfg: OptMultiConfig = None): - super().__init__(data_preprocessor, init_cfg) - - self.backbone = MODELS.build(backbone) - - if neck is not None: - self.neck = MODELS.build(neck) - - if head is not None: - self.head = MODELS.build(head) -``` - -### 前处理器(DataPreprocessor) - -从 MMPose 1.0 开始,我们在模型中添加了新的前处理器模块,用以完成图像归一化、通道顺序变换等操作。这样做的好处是可以利用 GPU 等设备的计算能力加快计算,并使模型在导出和部署时更具完整性。 - -在配置文件中,一个常见的 `data_preprocessor` 如下: - -```Python -data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), -``` - -它会将输入图片的通道顺序从 `bgr` 转换为 `rgb`,并根据 `mean` 和 `std` 进行数据归一化。 - -### 主干网络(Backbone) - -MMPose 实现的主干网络存放在 `$MMPOSE/mmpose/models/backbones` 目录下。 - -在实际开发中,开发者经常会使用预训练的网络权重进行迁移学习,这能有效提升模型在小数据集上的性能。 在 MMPose 中,只需要在配置文件 `backbone` 的 `init_cfg` 中设置: - -```Python -init_cfg=dict( - type='Pretrained', - checkpoint='PATH/TO/YOUR_MODEL_WEIGHTS.pth'), -``` - -如果你想只加载一个训练好的 checkpoint 的 backbone 部分,你需要指明一下前缀 `prefix`: - -```Python -init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='PATH/TO/YOUR_CHECKPOINT.pth'), -``` - -其中 `checkpoint` 既可以是本地路径,也可以是下载链接。因此,如果你想使用 Torchvision 提供的预训练模型(比如ResNet50),可以使用: - -```Python -init_cfg=dict( - type='Pretrained', - checkpoint='torchvision://resnet50') -``` - -除了这些常用的主干网络以外,你还可以从 MMClassification 等其他 OpenMMLab 项目中方便地迁移主干网络,它们都遵循同一套配置文件格式,并提供了预训练权重可供使用。 - -需要强调的是,如果你加入了新的主干网络,需要在模型定义时进行注册: - -```Python -@MODELS.register_module() -class YourBackbone(BaseBackbone): -``` - -同时在 `$MMPOSE/mmpose/models/backbones/__init__.py` 下进行 `import`,并加入到 `__all__` 中,才能被配置文件正确地调用。 - -### 颈部模块(Neck) - -颈部模块通常是介于主干网络和预测头之间的模块,在部分模型算法中会用到,常见的颈部模块有: - -- Global Average Pooling (GAP) - -- Feature Pyramid Networks (FPN) - -- Feature Map Processor (FMP) - - `FeatureMapProcessor` 是一个通用的 PyTorch 模块,旨在通过选择、拼接和缩放等非参数变换将主干网络输出的特征图转换成适合预测头的格式。以下是一些操作的配置方式及效果示意图: - - - 选择操作 - - ```python - neck=dict(type='FeatureMapProcessor', select_index=0) - ``` - -
    - - - 拼接操作 - - ```python - neck=dict(type='FeatureMapProcessor', concat=True) - ``` - -
    - - 拼接之前,其它特征图会被缩放到和序号为 0 的特征图相同的尺寸。 - - - 缩放操作 - - ```python - neck=dict(type='FeatureMapProcessor', scale_factor=2.0) - ``` - -
    - -### 预测头(Head) - -通常来说,预测头是模型算法实现的核心,用于控制模型的输出,并进行损失函数计算。 - -MMPose 中 Head 相关的模块定义在 `$MMPOSE/mmpose/models/heads` 目录下,开发者在自定义预测头时需要继承我们提供的基类 `BaseHead`,并重载以下三个方法对应模型推理的三种模式: - -- forward() - -- predict() - -- loss() - -具体而言,`predict()` 返回的应是输入图片尺度下的结果,因此需要调用 `self.decode()` 对网络输出进行解码,这一过程实现在 `BaseHead` 中已经实现,它会调用编解码器提供的 `decode()` 方法来完成解码。 - -另一方面,我们会在 `predict()` 中进行测试时增强。在进行预测时,一个常见的测试时增强技巧是进行翻转集成。即,将一张图片先进行一次推理,再将图片水平翻转进行一次推理,推理的结果再次水平翻转回去,对两次推理的结果进行平均。这个技巧能有效提升模型的预测稳定性。 - -下面是在 `RegressionHead` 中定义 `predict()` 的例子: - -```Python -def predict(self, - feats: Tuple[Tensor], - batch_data_samples: OptSampleList, - test_cfg: ConfigType = {}) -> Predictions: - """Predict results from outputs.""" - - if test_cfg.get('flip_test', False): - # TTA: flip test -> feats = [orig, flipped] - assert isinstance(feats, list) and len(feats) == 2 - flip_indices = batch_data_samples[0].metainfo['flip_indices'] - input_size = batch_data_samples[0].metainfo['input_size'] - _feats, _feats_flip = feats - _batch_coords = self.forward(_feats) - _batch_coords_flip = flip_coordinates( - self.forward(_feats_flip), - flip_indices=flip_indices, - shift_coords=test_cfg.get('shift_coords', True), - input_size=input_size) - batch_coords = (_batch_coords + _batch_coords_flip) * 0.5 - else: - batch_coords = self.forward(feats) # (B, K, D) - - batch_coords.unsqueeze_(dim=1) # (B, N, K, D) - preds = self.decode(batch_coords) -``` - -`loss()`除了进行损失函数的计算,还会进行 accuracy 等训练时指标的计算,并通过一个字典 `losses` 来传递: - -```Python - # calculate accuracy -_, avg_acc, _ = keypoint_pck_accuracy( - pred=to_numpy(pred_coords), - gt=to_numpy(keypoint_labels), - mask=to_numpy(keypoint_weights) > 0, - thr=0.05, - norm_factor=np.ones((pred_coords.size(0), 2), dtype=np.float32)) - -acc_pose = torch.tensor(avg_acc, device=keypoint_labels.device) -losses.update(acc_pose=acc_pose) -``` - -每个 batch 的数据都打包成了 `batch_data_samples`。以 Regression-based 方法为例,训练所需的归一化的坐标值和关键点权重可以用如下方式获取: - -```Python -keypoint_labels = torch.cat( - [d.gt_instance_labels.keypoint_labels for d in batch_data_samples]) -keypoint_weights = torch.cat([ - d.gt_instance_labels.keypoint_weights for d in batch_data_samples -]) -``` - -以下为 `RegressionHead` 中完整的 `loss()` 实现: - -```Python -def loss(self, - inputs: Tuple[Tensor], - batch_data_samples: OptSampleList, - train_cfg: ConfigType = {}) -> dict: - """Calculate losses from a batch of inputs and data samples.""" - - pred_outputs = self.forward(inputs) - - keypoint_labels = torch.cat( - [d.gt_instance_labels.keypoint_labels for d in batch_data_samples]) - keypoint_weights = torch.cat([ - d.gt_instance_labels.keypoint_weights for d in batch_data_samples - ]) - - # calculate losses - losses = dict() - loss = self.loss_module(pred_outputs, keypoint_labels, - keypoint_weights.unsqueeze(-1)) - - if isinstance(loss, dict): - losses.update(loss) - else: - losses.update(loss_kpt=loss) - - # calculate accuracy - _, avg_acc, _ = keypoint_pck_accuracy( - pred=to_numpy(pred_outputs), - gt=to_numpy(keypoint_labels), - mask=to_numpy(keypoint_weights) > 0, - thr=0.05, - norm_factor=np.ones((pred_outputs.size(0), 2), dtype=np.float32)) - acc_pose = torch.tensor(avg_acc, device=keypoint_labels.device) - losses.update(acc_pose=acc_pose) - - return losses -``` +# 20 分钟了解 MMPose 架构设计 + +MMPose 1.0 与之前的版本有较大改动,对部分模块进行了重新设计和组织,降低代码冗余度,提升运行效率,降低学习难度。 + +MMPose 1.0 采用了全新的模块结构设计以精简代码,提升运行效率,降低学习难度。对于有一定深度学习基础的用户,本章节提供了对 MMPose 架构设计的总体介绍。不论你是**旧版 MMPose 的用户**,还是**希望直接从 MMPose 1.0 上手的新用户**,都可以通过本教程了解如何构建一个基于 MMPose 1.0 的项目。 + +```{note} +本教程包含了使用 MMPose 1.0 时开发者会关心的内容: + +- 整体代码架构与设计逻辑 + +- 如何用config文件管理模块 + +- 如何使用自定义数据集 + +- 如何添加新的模块(骨干网络、模型头部、损失函数等) +``` + +以下是这篇教程的目录: + +- [20 分钟了解 MMPose 架构设计](#20-分钟了解-mmpose-架构设计) + - [总览](#总览) + - [Step1:配置文件](#step1配置文件) + - [Step2:数据](#step2数据) + - [数据集元信息](#数据集元信息) + - [数据集](#数据集) + - [数据流水线](#数据流水线) + - [i. 数据增强](#i-数据增强) + - [ii. 数据变换](#ii-数据变换) + - [iii. 数据编码](#iii-数据编码) + - [iv. 数据打包](#iv-数据打包) + - [Step3: 模型](#step3-模型) + - [前处理器(DataPreprocessor)](#前处理器datapreprocessor) + - [主干网络(Backbone)](#主干网络backbone) + - [颈部模块(Neck)](#颈部模块neck) + - [预测头(Head)](#预测头head) + +## 总览 + +![overall-cn](https://user-images.githubusercontent.com/13503330/187830967-f2d7bf40-6261-42f3-91a5-ae045fa0dc0c.png) + +一般来说,开发者在项目开发过程中经常接触内容的主要有**五个**方面: + +- **通用**:环境、钩子(Hook)、模型权重存取(Checkpoint)、日志(Logger)等 + +- **数据**:数据集、数据读取(Dataloader)、数据增强等 + +- **训练**:优化器、学习率调整等 + +- **模型**:主干网络、颈部模块(Neck)、预测头模块(Head)、损失函数等 + +- **评测**:评测指标(Metric)、评测器(Evaluator)等 + +其中**通用**、**训练**和**评测**相关的模块往往由训练框架提供,开发者只需要调用和调整参数,不需要自行实现,开发者主要实现的是**数据**和**模型**部分。 + +## Step1:配置文件 + +在MMPose中,我们通常 python 格式的配置文件,用于整个项目的定义、参数管理,因此我们强烈建议第一次接触 MMPose 的开发者,查阅 [配置文件](./user_guides/configs.md) 学习配置文件的定义。 + +需要注意的是,所有新增的模块都需要使用注册器(Registry)进行注册,并在对应目录的 `__init__.py` 中进行 `import`,以便能够使用配置文件构建其实例。 + +## Step2:数据 + +MMPose 数据的组织主要包含三个方面: + +- 数据集元信息 + +- 数据集 + +- 数据流水线 + +### 数据集元信息 + +元信息指具体标注之外的数据集信息。姿态估计数据集的元信息通常包括:关键点和骨骼连接的定义、对称性、关键点性质(如关键点权重、标注标准差、所属上下半身)等。这些信息在数据在数据处理、模型训练和测试中有重要作用。在 MMPose 中,数据集的元信息使用 python 格式的配置文件保存,位于 `$MMPOSE/configs/_base_/datasets` 目录下。 + +在 MMPose 中使用自定义数据集时,你需要增加对应的元信息配置文件。以 MPII 数据集(`$MMPOSE/configs/_base_/datasets/mpii.py`)为例: + +```Python +dataset_info = dict( + dataset_name='mpii', + paper_info=dict( + author='Mykhaylo Andriluka and Leonid Pishchulin and ' + 'Peter Gehler and Schiele, Bernt', + title='2D Human Pose Estimation: New Benchmark and ' + 'State of the Art Analysis', + container='IEEE Conference on Computer Vision and ' + 'Pattern Recognition (CVPR)', + year='2014', + homepage='http://human-pose.mpi-inf.mpg.de/', + ), + keypoint_info={ + 0: + dict( + name='right_ankle', + id=0, + color=[255, 128, 0], + type='lower', + swap='left_ankle'), + ## 内容省略 + }, + skeleton_info={ + 0: + dict(link=('right_ankle', 'right_knee'), id=0, color=[255, 128, 0]), + ## 内容省略 + }, + joint_weights=[ + 1.5, 1.2, 1., 1., 1.2, 1.5, 1., 1., 1., 1., 1.5, 1.2, 1., 1., 1.2, 1.5 + ], + # 使用 COCO 数据集中提供的 sigmas 值 + sigmas=[ + 0.089, 0.083, 0.107, 0.107, 0.083, 0.089, 0.026, 0.026, 0.026, 0.026, + 0.062, 0.072, 0.179, 0.179, 0.072, 0.062 + ]) +``` + +在模型配置文件中,你需要为自定义数据集指定对应的元信息配置文件。假如该元信息配置文件路径为 `$MMPOSE/configs/_base_/datasets/custom.py`,指定方式如下: + +```python +# dataset and dataloader settings +dataset_type = 'MyCustomDataset' # or 'CocoDataset' +train_dataloader = dict( + batch_size=2, + dataset=dict( + type=dataset_type, + data_root='root/of/your/train/data', + ann_file='path/to/your/train/json', + data_prefix=dict(img='path/to/your/train/img'), + # 指定对应的元信息配置文件 + metainfo=dict(from_file='configs/_base_/datasets/custom.py'), + ...), + ) +val_dataloader = dict( + batch_size=2, + dataset=dict( + type=dataset_type, + data_root='root/of/your/val/data', + ann_file='path/to/your/val/json', + data_prefix=dict(img='path/to/your/val/img'), + # 指定对应的元信息配置文件 + metainfo=dict(from_file='configs/_base_/datasets/custom.py'), + ...), + ) +test_dataloader = val_dataloader +``` + +### 数据集 + +在 MMPose 中使用自定义数据集时,我们推荐将数据转化为已支持的格式(如 COCO 或 MPII),并直接使用我们提供的对应数据集实现。如果这种方式不可行,则用户需要实现自己的数据集类。 + +MMPose 中的大部分 2D 关键点数据集**以 COCO 形式组织**,为此我们提供了基类 [BaseCocoStyleDataset](/mmpose/datasets/datasets/base/base_coco_style_dataset.py)。我们推荐用户继承该基类,并按需重写它的方法(通常是 `__init__()` 和 `_load_annotations()` 方法),以扩展到新的 2D 关键点数据集。 + +```{note} +关于COCO数据格式的详细说明请参考 [COCO](./dataset_zoo/2d_body_keypoint.md) 。 +``` + +```{note} +在 MMPose 中 bbox 的数据格式采用 `xyxy`,而不是 `xywh`,这与 [MMDetection](https://github.com/open-mmlab/mmdetection) 等其他 OpenMMLab 成员保持一致。为了实现不同 bbox 格式之间的转换,我们提供了丰富的函数:`bbox_xyxy2xywh`、`bbox_xywh2xyxy`、`bbox_xyxy2cs`等。这些函数定义在`$MMPOSE/mmpose/structures/bbox/transforms.py`。 +``` + +下面我们以MPII数据集的实现(`$MMPOSE/mmpose/datasets/datasets/body/mpii_dataset.py`)为例: + +```Python +@DATASETS.register_module() +class MpiiDataset(BaseCocoStyleDataset): + METAINFO: dict = dict(from_file='configs/_base_/datasets/mpii.py') + + def __init__(self, + ## 内容省略 + headbox_file: Optional[str] = None, + ## 内容省略): + + if headbox_file: + if data_mode != 'topdown': + raise ValueError( + f'{self.__class__.__name__} is set to {data_mode}: ' + 'mode, while "headbox_file" is only ' + 'supported in topdown mode.') + + if not test_mode: + raise ValueError( + f'{self.__class__.__name__} has `test_mode==False` ' + 'while "headbox_file" is only ' + 'supported when `test_mode==True`.') + + headbox_file_type = headbox_file[-3:] + allow_headbox_file_type = ['mat'] + if headbox_file_type not in allow_headbox_file_type: + raise KeyError( + f'The head boxes file type {headbox_file_type} is not ' + f'supported. Should be `mat` but got {headbox_file_type}.') + self.headbox_file = headbox_file + + super().__init__( + ## 内容省略 + ) + + def _load_annotations(self) -> List[dict]: + """Load data from annotations in MPII format.""" + check_file_exist(self.ann_file) + with open(self.ann_file) as anno_file: + anns = json.load(anno_file) + + if self.headbox_file: + check_file_exist(self.headbox_file) + headbox_dict = loadmat(self.headbox_file) + headboxes_src = np.transpose(headbox_dict['headboxes_src'], + [2, 0, 1]) + SC_BIAS = 0.6 + + data_list = [] + ann_id = 0 + + # mpii bbox scales are normalized with factor 200. + pixel_std = 200. + + for idx, ann in enumerate(anns): + center = np.array(ann['center'], dtype=np.float32) + scale = np.array([ann['scale'], ann['scale']], + dtype=np.float32) * pixel_std + + # Adjust center/scale slightly to avoid cropping limbs + if center[0] != -1: + center[1] = center[1] + 15. / pixel_std * scale[1] + + # MPII uses matlab format, index is 1-based, + # we should first convert to 0-based index + center = center - 1 + + # unify shape with coco datasets + center = center.reshape(1, -1) + scale = scale.reshape(1, -1) + bbox = bbox_cs2xyxy(center, scale) + + # load keypoints in shape [1, K, 2] and keypoints_visible in [1, K] + keypoints = np.array(ann['joints']).reshape(1, -1, 2) + keypoints_visible = np.array(ann['joints_vis']).reshape(1, -1) + + data_info = { + 'id': ann_id, + 'img_id': int(ann['image'].split('.')[0]), + 'img_path': osp.join(self.data_prefix['img'], ann['image']), + 'bbox_center': center, + 'bbox_scale': scale, + 'bbox': bbox, + 'bbox_score': np.ones(1, dtype=np.float32), + 'keypoints': keypoints, + 'keypoints_visible': keypoints_visible, + } + + if self.headbox_file: + # calculate the diagonal length of head box as norm_factor + headbox = headboxes_src[idx] + head_size = np.linalg.norm(headbox[1] - headbox[0], axis=0) + head_size *= SC_BIAS + data_info['head_size'] = head_size.reshape(1, -1) + + data_list.append(data_info) + ann_id = ann_id + 1 + + return data_list +``` + +在对MPII数据集进行支持时,由于MPII需要读入 `head_size` 信息来计算 `PCKh`,因此我们在`__init__()`中增加了 `headbox_file`,并重载了 `_load_annotations()` 来完成数据组织。 + +如果自定义数据集无法被 `BaseCocoStyleDataset` 支持,你需要直接继承 [MMEngine](https://github.com/open-mmlab/mmengine) 中提供的 `BaseDataset` 基类。具体方法请参考相关[文档](https://mmengine.readthedocs.io/en/latest/advanced_tutorials/basedataset.html)。 + +### 数据流水线 + +一个典型的数据流水线配置如下: + +```Python +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +test_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] +``` + +在关键点检测任务中,数据一般会在三个尺度空间中变换: + +- **原始图片空间**:图片存储时的原始空间,不同图片的尺寸不一定相同 + +- **输入图片空间**:模型输入的图片尺度空间,所有**图片**和**标注**被缩放到输入尺度,如 `256x256`,`256x192` 等 + +- **输出尺度空间**:模型输出和训练监督信息所在的尺度空间,如`64x64(热力图)`,`1x1(回归坐标值)`等 + +数据在三个空间中变换的流程如图所示: + +![migration-cn](https://user-images.githubusercontent.com/13503330/187831574-13804daf-f498-47c2-ba43-64b8e6ffe3dd.png) + +在MMPose中,数据变换所需要的模块在`$MMPOSE/mmpose/datasets/transforms`目录下,它们的工作流程如图所示: + +![transforms-cn](https://user-images.githubusercontent.com/13503330/187831611-8db89e20-95c7-42bc-8b0d-700fadf60328.png) + +#### i. 数据增强 + +数据增强中常用的变换存放在 `$MMPOSE/mmpose/datasets/transforms/common_transforms.py` 中,如 `RandomFlip`、`RandomHalfBody` 等。 + +对于 top-down 方法,`Shift`、`Rotate`、`Resize` 操作由 `RandomBBoxTransform`来实现;对于 bottom-up 方法,这些则是由 `BottomupRandomAffine` 实现。 + +```{note} +值得注意的是,大部分数据变换都依赖于 `bbox_center` 和 `bbox_scale`,它们可以通过 `GetBBoxCenterScale` 来得到。 +``` + +#### ii. 数据变换 + +我们使用仿射变换,将图像和坐标标注从原始图片空间变换到输入图片空间。这一操作在 top-down 方法中由 `TopdownAffine` 完成,在 bottom-up 方法中则由 `BottomupRandomAffine` 完成。 + +#### iii. 数据编码 + +在模型训练时,数据从原始空间变换到输入图片空间后,需要使用 `GenerateTarget` 来生成训练所需的监督目标(比如用坐标值生成高斯热图),我们将这一过程称为编码(Encode),反之,通过高斯热图得到对应坐标值的过程称为解码(Decode)。 + +在 MMPose 中,我们将编码和解码过程集合成一个编解码器(Codec),在其中实现 `encode()` 和 `decode()`。 + +目前 MMPose 支持生成以下类型的监督目标: + +- `heatmap`: 高斯热图 + +- `keypoint_label`: 关键点标签(如归一化的坐标值) + +- `keypoint_xy_label`: 单个坐标轴关键点标签 + +- `heatmap+keypoint_label`: 同时生成高斯热图和关键点标签 + +- `multiscale_heatmap`: 多尺度高斯热图 + +生成的监督目标会按以下关键字进行封装: + +- `heatmaps`:高斯热图 + +- `keypoint_labels`:关键点标签(如归一化的坐标值) + +- `keypoint_x_labels`:x 轴关键点标签 + +- `keypoint_y_labels`:y 轴关键点标签 + +- `keypoint_weights`:关键点权重 + +```Python +@TRANSFORMS.register_module() +class GenerateTarget(BaseTransform): + """Encode keypoints into Target. + + Added Keys (depends on the args): + - heatmaps + - keypoint_labels + - keypoint_x_labels + - keypoint_y_labels + - keypoint_weights + """ +``` + +值得注意的是,我们对 top-down 和 bottom-up 的数据格式进行了统一,这意味着标注信息中会新增一个维度来代表同一张图里的不同目标(如人),格式为: + +```Python +[batch_size, num_instances, num_keypoints, dim_coordinates] +``` + +- top-down:`[B, 1, K, D]` + +- Bottom-up: `[B, N, K, D]` + +当前已经支持的编解码器定义在 `$MMPOSE/mmpose/codecs` 目录下,如果你需要自定新的编解码器,可以前往[编解码器](./user_guides/codecs.md)了解更多详情。 + +#### iv. 数据打包 + +数据经过前处理变换后,最终需要通过 `PackPoseInputs` 打包成数据样本。该操作定义在 `$MMPOSE/mmpose/datasets/transforms/formatting.py` 中。 + +打包过程会将数据流水线中用字典 `results` 存储的数据转换成用 MMPose 所需的标准数据结构, 如 `InstanceData`,`PixelData`,`PoseDataSample` 等。 + +具体而言,我们将数据样本内容分为 `gt`(标注真值) 和 `pred`(模型预测)两部分,它们都包含以下数据项: + +- **instances**(numpy.array):实例级别的原始标注或预测结果,属于原始尺度空间 + +- **instance_labels**(torch.tensor):实例级别的训练标签(如归一化的坐标值、关键点可见性),属于输出尺度空间 + +- **fields**(torch.tensor):像素级别的训练标签(如高斯热图)或预测结果,属于输出尺度空间 + +下面是 `PoseDataSample` 底层实现的例子: + +```Python +def get_pose_data_sample(self): + # meta + pose_meta = dict( + img_shape=(600, 900), # [h, w, c] + crop_size=(256, 192), # [h, w] + heatmap_size=(64, 48), # [h, w] + ) + + # gt_instances + gt_instances = InstanceData() + gt_instances.bboxes = np.random.rand(1, 4) + gt_instances.keypoints = np.random.rand(1, 17, 2) + + # gt_instance_labels + gt_instance_labels = InstanceData() + gt_instance_labels.keypoint_labels = torch.rand(1, 17, 2) + gt_instance_labels.keypoint_weights = torch.rand(1, 17) + + # pred_instances + pred_instances = InstanceData() + pred_instances.keypoints = np.random.rand(1, 17, 2) + pred_instances.keypoint_scores = np.random.rand(1, 17) + + # gt_fields + gt_fields = PixelData() + gt_fields.heatmaps = torch.rand(17, 64, 48) + + # pred_fields + pred_fields = PixelData() + pred_fields.heatmaps = torch.rand(17, 64, 48) + data_sample = PoseDataSample( + gt_instances=gt_instances, + pred_instances=pred_instances, + gt_fields=gt_fields, + pred_fields=pred_fields, + metainfo=pose_meta) + + return data_sample +``` + +## Step3: 模型 + +在 MMPose 1.0中,模型由以下几部分构成: + +- **预处理器(DataPreprocessor)**:完成图像归一化和通道转换等前处理 + +- **主干网络 (Backbone)**:用于特征提取 + +- **颈部模块(Neck)**:GAP,FPN 等可选项 + +- **预测头(Head)**:用于实现核心算法功能和损失函数定义 + +我们在 `$MMPOSE/models/pose_estimators/base.py` 下为姿态估计模型定义了一个基类 `BasePoseEstimator`,所有的模型(如 `TopdownPoseEstimator`)都需要继承这个基类,并重载对应的方法。 + +在模型的 `forward()` 方法中提供了三种不同的模式: + +- `mode == 'loss'`:返回损失函数计算的结果,用于模型训练 + +- `mode == 'predict'`:返回输入尺度下的预测结果,用于模型推理 + +- `mode == 'tensor'`:返回输出尺度下的模型输出,即只进行模型前向传播,用于模型导出 + +开发者需要在 `PoseEstimator` 中按照模型结构调用对应的 `Registry` ,对模块进行实例化。以 top-down 模型为例: + +```Python +@MODELS.register_module() +class TopdownPoseEstimator(BasePoseEstimator): + def __init__(self, + backbone: ConfigType, + neck: OptConfigType = None, + head: OptConfigType = None, + train_cfg: OptConfigType = None, + test_cfg: OptConfigType = None, + data_preprocessor: OptConfigType = None, + init_cfg: OptMultiConfig = None): + super().__init__(data_preprocessor, init_cfg) + + self.backbone = MODELS.build(backbone) + + if neck is not None: + self.neck = MODELS.build(neck) + + if head is not None: + self.head = MODELS.build(head) +``` + +### 前处理器(DataPreprocessor) + +从 MMPose 1.0 开始,我们在模型中添加了新的前处理器模块,用以完成图像归一化、通道顺序变换等操作。这样做的好处是可以利用 GPU 等设备的计算能力加快计算,并使模型在导出和部署时更具完整性。 + +在配置文件中,一个常见的 `data_preprocessor` 如下: + +```Python +data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), +``` + +它会将输入图片的通道顺序从 `bgr` 转换为 `rgb`,并根据 `mean` 和 `std` 进行数据归一化。 + +### 主干网络(Backbone) + +MMPose 实现的主干网络存放在 `$MMPOSE/mmpose/models/backbones` 目录下。 + +在实际开发中,开发者经常会使用预训练的网络权重进行迁移学习,这能有效提升模型在小数据集上的性能。 在 MMPose 中,只需要在配置文件 `backbone` 的 `init_cfg` 中设置: + +```Python +init_cfg=dict( + type='Pretrained', + checkpoint='PATH/TO/YOUR_MODEL_WEIGHTS.pth'), +``` + +如果你想只加载一个训练好的 checkpoint 的 backbone 部分,你需要指明一下前缀 `prefix`: + +```Python +init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='PATH/TO/YOUR_CHECKPOINT.pth'), +``` + +其中 `checkpoint` 既可以是本地路径,也可以是下载链接。因此,如果你想使用 Torchvision 提供的预训练模型(比如ResNet50),可以使用: + +```Python +init_cfg=dict( + type='Pretrained', + checkpoint='torchvision://resnet50') +``` + +除了这些常用的主干网络以外,你还可以从 MMClassification 等其他 OpenMMLab 项目中方便地迁移主干网络,它们都遵循同一套配置文件格式,并提供了预训练权重可供使用。 + +需要强调的是,如果你加入了新的主干网络,需要在模型定义时进行注册: + +```Python +@MODELS.register_module() +class YourBackbone(BaseBackbone): +``` + +同时在 `$MMPOSE/mmpose/models/backbones/__init__.py` 下进行 `import`,并加入到 `__all__` 中,才能被配置文件正确地调用。 + +### 颈部模块(Neck) + +颈部模块通常是介于主干网络和预测头之间的模块,在部分模型算法中会用到,常见的颈部模块有: + +- Global Average Pooling (GAP) + +- Feature Pyramid Networks (FPN) + +- Feature Map Processor (FMP) + + `FeatureMapProcessor` 是一个通用的 PyTorch 模块,旨在通过选择、拼接和缩放等非参数变换将主干网络输出的特征图转换成适合预测头的格式。以下是一些操作的配置方式及效果示意图: + + - 选择操作 + + ```python + neck=dict(type='FeatureMapProcessor', select_index=0) + ``` + +
    + + - 拼接操作 + + ```python + neck=dict(type='FeatureMapProcessor', concat=True) + ``` + +
    + + 拼接之前,其它特征图会被缩放到和序号为 0 的特征图相同的尺寸。 + + - 缩放操作 + + ```python + neck=dict(type='FeatureMapProcessor', scale_factor=2.0) + ``` + +
    + +### 预测头(Head) + +通常来说,预测头是模型算法实现的核心,用于控制模型的输出,并进行损失函数计算。 + +MMPose 中 Head 相关的模块定义在 `$MMPOSE/mmpose/models/heads` 目录下,开发者在自定义预测头时需要继承我们提供的基类 `BaseHead`,并重载以下三个方法对应模型推理的三种模式: + +- forward() + +- predict() + +- loss() + +具体而言,`predict()` 返回的应是输入图片尺度下的结果,因此需要调用 `self.decode()` 对网络输出进行解码,这一过程实现在 `BaseHead` 中已经实现,它会调用编解码器提供的 `decode()` 方法来完成解码。 + +另一方面,我们会在 `predict()` 中进行测试时增强。在进行预测时,一个常见的测试时增强技巧是进行翻转集成。即,将一张图片先进行一次推理,再将图片水平翻转进行一次推理,推理的结果再次水平翻转回去,对两次推理的结果进行平均。这个技巧能有效提升模型的预测稳定性。 + +下面是在 `RegressionHead` 中定义 `predict()` 的例子: + +```Python +def predict(self, + feats: Tuple[Tensor], + batch_data_samples: OptSampleList, + test_cfg: ConfigType = {}) -> Predictions: + """Predict results from outputs.""" + + if test_cfg.get('flip_test', False): + # TTA: flip test -> feats = [orig, flipped] + assert isinstance(feats, list) and len(feats) == 2 + flip_indices = batch_data_samples[0].metainfo['flip_indices'] + input_size = batch_data_samples[0].metainfo['input_size'] + _feats, _feats_flip = feats + _batch_coords = self.forward(_feats) + _batch_coords_flip = flip_coordinates( + self.forward(_feats_flip), + flip_indices=flip_indices, + shift_coords=test_cfg.get('shift_coords', True), + input_size=input_size) + batch_coords = (_batch_coords + _batch_coords_flip) * 0.5 + else: + batch_coords = self.forward(feats) # (B, K, D) + + batch_coords.unsqueeze_(dim=1) # (B, N, K, D) + preds = self.decode(batch_coords) +``` + +`loss()`除了进行损失函数的计算,还会进行 accuracy 等训练时指标的计算,并通过一个字典 `losses` 来传递: + +```Python + # calculate accuracy +_, avg_acc, _ = keypoint_pck_accuracy( + pred=to_numpy(pred_coords), + gt=to_numpy(keypoint_labels), + mask=to_numpy(keypoint_weights) > 0, + thr=0.05, + norm_factor=np.ones((pred_coords.size(0), 2), dtype=np.float32)) + +acc_pose = torch.tensor(avg_acc, device=keypoint_labels.device) +losses.update(acc_pose=acc_pose) +``` + +每个 batch 的数据都打包成了 `batch_data_samples`。以 Regression-based 方法为例,训练所需的归一化的坐标值和关键点权重可以用如下方式获取: + +```Python +keypoint_labels = torch.cat( + [d.gt_instance_labels.keypoint_labels for d in batch_data_samples]) +keypoint_weights = torch.cat([ + d.gt_instance_labels.keypoint_weights for d in batch_data_samples +]) +``` + +以下为 `RegressionHead` 中完整的 `loss()` 实现: + +```Python +def loss(self, + inputs: Tuple[Tensor], + batch_data_samples: OptSampleList, + train_cfg: ConfigType = {}) -> dict: + """Calculate losses from a batch of inputs and data samples.""" + + pred_outputs = self.forward(inputs) + + keypoint_labels = torch.cat( + [d.gt_instance_labels.keypoint_labels for d in batch_data_samples]) + keypoint_weights = torch.cat([ + d.gt_instance_labels.keypoint_weights for d in batch_data_samples + ]) + + # calculate losses + losses = dict() + loss = self.loss_module(pred_outputs, keypoint_labels, + keypoint_weights.unsqueeze(-1)) + + if isinstance(loss, dict): + losses.update(loss) + else: + losses.update(loss_kpt=loss) + + # calculate accuracy + _, avg_acc, _ = keypoint_pck_accuracy( + pred=to_numpy(pred_outputs), + gt=to_numpy(keypoint_labels), + mask=to_numpy(keypoint_weights) > 0, + thr=0.05, + norm_factor=np.ones((pred_outputs.size(0), 2), dtype=np.float32)) + acc_pose = torch.tensor(avg_acc, device=keypoint_labels.device) + losses.update(acc_pose=acc_pose) + + return losses +``` diff --git a/docs/zh_cn/index.rst b/docs/zh_cn/index.rst index 2431d82e4d..67a68f732a 100644 --- a/docs/zh_cn/index.rst +++ b/docs/zh_cn/index.rst @@ -1,116 +1,116 @@ -欢迎来到 MMPose 中文文档! -================================== - -您可以在页面左下角切换文档语言。 - -You can change the documentation language at the lower-left corner of the page. - -.. toctree:: - :maxdepth: 1 - :caption: 开启 MMPose 之旅 - - overview.md - installation.md - guide_to_framework.md - demos.md - contribution_guide.md - faq.md - -.. toctree:: - :maxdepth: 1 - :caption: 用户教程 - - user_guides/inference.md - user_guides/configs.md - user_guides/prepare_datasets.md - user_guides/train_and_test.md - -.. toctree:: - :maxdepth: 1 - :caption: 进阶教程 - - advanced_guides/codecs.md - advanced_guides/dataflow.md - advanced_guides/implement_new_models.md - advanced_guides/customize_datasets.md - advanced_guides/customize_transforms.md - advanced_guides/customize_optimizer.md - advanced_guides/customize_logging.md - advanced_guides/how_to_deploy.md - advanced_guides/model_analysis.md - -.. toctree:: - :maxdepth: 1 - :caption: 1.x 版本迁移指南 - - migration.md - -.. toctree:: - :maxdepth: 2 - :caption: 模型库 - - model_zoo.txt - model_zoo/body_2d_keypoint.md - model_zoo/body_3d_keypoint.md - model_zoo/face_2d_keypoint.md - model_zoo/hand_2d_keypoint.md - model_zoo/wholebody_2d_keypoint.md - model_zoo/animal_2d_keypoint.md - -.. toctree:: - :maxdepth: 2 - :caption: 模型库(按论文整理) - - model_zoo_papers/algorithms.md - model_zoo_papers/backbones.md - model_zoo_papers/techniques.md - model_zoo_papers/datasets.md - -.. toctree:: - :maxdepth: 2 - :caption: 数据集 - - dataset_zoo.md - dataset_zoo/2d_body_keypoint.md - dataset_zoo/2d_wholebody_keypoint.md - dataset_zoo/2d_face_keypoint.md - dataset_zoo/2d_hand_keypoint.md - dataset_zoo/2d_fashion_landmark.md - dataset_zoo/2d_animal_keypoint.md - dataset_zoo/3d_body_keypoint.md - dataset_zoo/3d_hand_keypoint.md - dataset_zoo/dataset_tools.md - -.. toctree:: - :maxdepth: 1 - :caption: 相关项目 - - projects/community_projects.md - -.. toctree:: - :maxdepth: 1 - :caption: 其他说明 - - notes/ecosystem.md - notes/changelog.md - notes/benchmark.md - notes/pytorch_2.md - -.. toctree:: - :maxdepth: 1 - :caption: API 参考文档 - - api.rst - -.. toctree:: - :caption: 切换语言 - - switch_language.md - - - -索引与表格 -================== - -* :ref:`genindex` -* :ref:`search` +欢迎来到 MMPose 中文文档! +================================== + +您可以在页面左下角切换文档语言。 + +You can change the documentation language at the lower-left corner of the page. + +.. toctree:: + :maxdepth: 1 + :caption: 开启 MMPose 之旅 + + overview.md + installation.md + guide_to_framework.md + demos.md + contribution_guide.md + faq.md + +.. toctree:: + :maxdepth: 1 + :caption: 用户教程 + + user_guides/inference.md + user_guides/configs.md + user_guides/prepare_datasets.md + user_guides/train_and_test.md + +.. toctree:: + :maxdepth: 1 + :caption: 进阶教程 + + advanced_guides/codecs.md + advanced_guides/dataflow.md + advanced_guides/implement_new_models.md + advanced_guides/customize_datasets.md + advanced_guides/customize_transforms.md + advanced_guides/customize_optimizer.md + advanced_guides/customize_logging.md + advanced_guides/how_to_deploy.md + advanced_guides/model_analysis.md + +.. toctree:: + :maxdepth: 1 + :caption: 1.x 版本迁移指南 + + migration.md + +.. toctree:: + :maxdepth: 2 + :caption: 模型库 + + model_zoo.txt + model_zoo/body_2d_keypoint.md + model_zoo/body_3d_keypoint.md + model_zoo/face_2d_keypoint.md + model_zoo/hand_2d_keypoint.md + model_zoo/wholebody_2d_keypoint.md + model_zoo/animal_2d_keypoint.md + +.. toctree:: + :maxdepth: 2 + :caption: 模型库(按论文整理) + + model_zoo_papers/algorithms.md + model_zoo_papers/backbones.md + model_zoo_papers/techniques.md + model_zoo_papers/datasets.md + +.. toctree:: + :maxdepth: 2 + :caption: 数据集 + + dataset_zoo.md + dataset_zoo/2d_body_keypoint.md + dataset_zoo/2d_wholebody_keypoint.md + dataset_zoo/2d_face_keypoint.md + dataset_zoo/2d_hand_keypoint.md + dataset_zoo/2d_fashion_landmark.md + dataset_zoo/2d_animal_keypoint.md + dataset_zoo/3d_body_keypoint.md + dataset_zoo/3d_hand_keypoint.md + dataset_zoo/dataset_tools.md + +.. toctree:: + :maxdepth: 1 + :caption: 相关项目 + + projects/community_projects.md + +.. toctree:: + :maxdepth: 1 + :caption: 其他说明 + + notes/ecosystem.md + notes/changelog.md + notes/benchmark.md + notes/pytorch_2.md + +.. toctree:: + :maxdepth: 1 + :caption: API 参考文档 + + api.rst + +.. toctree:: + :caption: 切换语言 + + switch_language.md + + + +索引与表格 +================== + +* :ref:`genindex` +* :ref:`search` diff --git a/docs/zh_cn/installation.md b/docs/zh_cn/installation.md index ef515c8030..9df63595a8 100644 --- a/docs/zh_cn/installation.md +++ b/docs/zh_cn/installation.md @@ -1,248 +1,248 @@ -# 安装 - -我们推荐用户按照我们的最佳实践来安装 MMPose。但除此之外,如果您想根据 -您的习惯完成安装流程,也可以参见 [自定义安装](#自定义安装) 一节来获取更多信息。 - -- [安装](#安装) - - [依赖环境](#依赖环境) - - [最佳实践](#最佳实践) - - [从源码安装 MMPose](#从源码安装-mmpose) - - [作为 Python 包安装](#作为-python-包安装) - - [验证安装](#验证安装) - - [自定义安装](#自定义安装) - - [CUDA 版本](#cuda-版本) - - [不使用 MIM 安装 MMEngine](#不使用-mim-安装-mmengine) - - [在 CPU 环境中安装](#在-cpu-环境中安装) - - [在 Google Colab 中安装](#在-google-colab-中安装) - - [通过 Docker 使用 MMPose](#通过-docker-使用-mmpose) - - [故障解决](#故障解决) - -## 依赖环境 - -在本节中,我们将演示如何准备 PyTorch 相关的依赖环境。 - -MMPose 适用于 Linux、Windows 和 macOS。它需要 Python 3.7+、CUDA 9.2+ 和 PyTorch 1.8+。 - -如果您对配置 PyTorch 环境已经很熟悉,并且已经完成了配置,可以直接进入下一节:[安装](#安装-mmpose)。否则,请依照以下步骤完成配置。 - -**第 1 步** 从[官网](https://docs.conda.io/en/latest/miniconda.html) 下载并安装 Miniconda。 - -**第 2 步** 创建一个 conda 虚拟环境并激活它。 - -```shell -conda create --name openmmlab python=3.8 -y -conda activate openmmlab -``` - -**第 3 步** 按照[官方指南](https://pytorch.org/get-started/locally/) 安装 PyTorch。例如: - -在 GPU 平台: - -```shell -conda install pytorch torchvision -c pytorch -``` - -```{warning} -以上命令会自动安装最新版的 PyTorch 与对应的 cudatoolkit,请检查它们是否与您的环境匹配。 -``` - -在 CPU 平台: - -```shell -conda install pytorch torchvision cpuonly -c pytorch -``` - -**第 4 步** 使用 [MIM](https://github.com/open-mmlab/mim) 安装 [MMEngine](https://github.com/open-mmlab/mmengine) 和 [MMCV](https://github.com/open-mmlab/mmcv/tree/2.x) - -```shell -pip install -U openmim -mim install mmengine -mim install "mmcv>=2.0.1" -``` - -请注意,MMPose 中的一些推理示例脚本需要使用 [MMDetection](https://github.com/open-mmlab/mmdetection) (mmdet) 检测人体。如果您想运行这些示例脚本,可以通过运行以下命令安装 mmdet: - -```shell -mim install "mmdet>=3.1.0" -``` - -## 最佳实践 - -根据具体需求,我们支持两种安装模式: 从源码安装(推荐)和作为 Python 包安装 - -### 从源码安装(推荐) - -如果基于 MMPose 框架开发自己的任务,需要添加新的功能,比如新的模型或是数据集,或者使用我们提供的各种工具。从源码按如下方式安装 mmpose: - -```shell -git clone https://github.com/open-mmlab/mmpose.git -cd mmpose -pip install -r requirements.txt -pip install -v -e . -# "-v" 表示输出更多安装相关的信息 -# "-e" 表示以可编辑形式安装,这样可以在不重新安装的情况下,让本地修改直接生效 -``` - -### 作为 Python 包安装 - -如果只是希望调用 MMPose 的接口,或者在自己的项目中导入 MMPose 中的模块。直接使用 mim 安装即可。 - -```shell -mim install "mmpose>=1.1.0" -``` - -## 验证安装 - -为了验证 MMPose 是否安装正确,您可以通过以下步骤运行模型推理。 - -**第 1 步** 我们需要下载配置文件和模型权重文件 - -```shell -mim download mmpose --config td-hm_hrnet-w48_8xb32-210e_coco-256x192 --dest . -``` - -下载过程往往需要几秒或更多的时间,这取决于您的网络环境。完成之后,您会在当前目录下找到这两个文件:`td-hm_hrnet-w48_8xb32-210e_coco-256x192.py` 和 `hrnet_w48_coco_256x192-b9e0b3ab_20200708.pth`, 分别是配置文件和对应的模型权重文件。 - -**第 2 步** 验证推理示例 - -如果您是**从源码安装**的 mmpose,可以直接运行以下命令进行验证: - -```shell -python demo/image_demo.py \ - tests/data/coco/000000000785.jpg \ - td-hm_hrnet-w48_8xb32-210e_coco-256x192.py \ - hrnet_w48_coco_256x192-b9e0b3ab_20200708.pth \ - --out-file vis_results.jpg \ - --draw-heatmap -``` - -如果一切顺利,您将会得到这样的可视化结果: - -![image](https://user-images.githubusercontent.com/87690686/187824033-2cce0f55-034a-4127-82e2-52744178bc32.jpg) - -代码会将预测的关键点和热图绘制在图像中的人体上,并保存到当前文件夹下的 `vis_results.jpg`。 - -如果您是**作为 Python 包安装**,可以打开您的 Python 解释器,复制并粘贴如下代码: - -```python -from mmpose.apis import inference_topdown, init_model -from mmpose.utils import register_all_modules - -register_all_modules() - -config_file = 'td-hm_hrnet-w48_8xb32-210e_coco-256x192.py' -checkpoint_file = 'hrnet_w48_coco_256x192-b9e0b3ab_20200708.pth' -model = init_model(config_file, checkpoint_file, device='cpu') # or device='cuda:0' - -# 请准备好一张带有人体的图片 -results = inference_topdown(model, 'demo.jpg') -``` - -示例图片 `demo.jpg` 可以从 [Github](https://raw.githubusercontent.com/open-mmlab/mmpose/main/tests/data/coco/000000000785.jpg) 下载。 -推理结果是一个 `PoseDataSample` 列表,预测结果将会保存在 `pred_instances` 中,包括检测到的关键点位置和置信度。 - -## 自定义安装 - -### CUDA 版本 - -安装 PyTorch 时,需要指定 CUDA 版本。如果您不清楚选择哪个,请遵循我们的建议: - -- 对于 Ampere 架构的 NVIDIA GPU,例如 GeForce 30 系列 以及 NVIDIA A100,CUDA 11 是必需的。 -- 对于更早的 NVIDIA GPU,CUDA 11 是向后兼容 (backward compatible) 的,但 CUDA 10.2 能够提供更好的兼容性,也更加轻量。 - -请确保您的 GPU 驱动版本满足最低的版本需求,参阅[这张表](https://docs.nvidia.com/cuda/cuda-toolkit-release-notes/index.html#cuda-major-component-versions__table-cuda-toolkit-driver-versions)。 - -```{note} -如果按照我们的最佳实践进行安装,CUDA 运行时库就足够了,因为我们提供相关 CUDA 代码的预编译,您不需要进行本地编译。 -但如果您希望从源码进行 MMCV 的编译,或是进行其他 CUDA 算子的开发,那么就必须安装完整的 CUDA 工具链,参见 -[NVIDIA 官网](https://developer.nvidia.com/cuda-downloads),另外还需要确保该 CUDA 工具链的版本与 PyTorch 安装时 -的配置相匹配(如用 `conda install` 安装 PyTorch 时指定的 cudatoolkit 版本)。 -``` - -### 不使用 MIM 安装 MMEngine - -若不使用 mim 安装 MMEngine,请遵循 [ MMEngine 安装指南](https://mmengine.readthedocs.io/zh_CN/latest/get_started/installation.html). - -例如,您可以通过以下命令安装 MMEngine: - -```shell -pip install mmengine -``` - -### 不使用 MIM 安装 MMCV - -MMCV 包含 C++ 和 CUDA 扩展,因此其对 PyTorch 的依赖比较复杂。MIM 会自动解析这些 -依赖,选择合适的 MMCV 预编译包,使安装更简单,但它并不是必需的。 - -若不使用 mim 来安装 MMCV,请遵照 [MMCV 安装指南](https://mmcv.readthedocs.io/zh_CN/2.x/get_started/installation.html)。 -它需要您用指定 url 的形式手动指定对应的 PyTorch 和 CUDA 版本。 - -举个例子,如下命令将会安装基于 PyTorch 1.10.x 和 CUDA 11.3 编译的 mmcv。 - -```shell -pip install 'mmcv>=2.0.1' -f https://download.openmmlab.com/mmcv/dist/cu113/torch1.10/index.html -``` - -### 在 CPU 环境中安装 - -MMPose 可以仅在 CPU 环境中安装,在 CPU 模式下,您可以完成训练、测试和模型推理等所有操作。 - -在 CPU 模式下,MMCV 的部分功能将不可用,通常是一些 GPU 编译的算子,如 `Deformable Convolution`。MMPose 中大部分的模型都不会依赖这些算子,但是如果您尝试使用包含这些算子的模型来运行训练、测试或推理,将会报错。 - -### 在 Google Colab 中安装 - -[Google Colab](https://colab.research.google.com/) 通常已经包含了 PyTorch 环境,因此我们只需要安装 MMEngine, MMCV 和 MMPose 即可,命令如下: - -**第 1 步** 使用 [MIM](https://github.com/open-mmlab/mim) 安装 [MMEngine](https://github.com/open-mmlab/mmengine) 和 [MMCV](https://github.com/open-mmlab/mmcv/tree/2.x) - -```shell -!pip3 install openmim -!mim install mmengine -!mim install "mmcv>=2.0.1" -``` - -**第 2 步** 从源码安装 mmpose - -```shell -!git clone https://github.com/open-mmlab/mmpose.git -%cd mmpose -!pip install -e . -``` - -**第 3 步** 验证 - -```python -import mmpose -print(mmpose.__version__) -# 预期输出: 1.1.0 -``` - -```{note} -在 Jupyter 中,感叹号 `!` 用于执行外部命令,而 `%cd` 是一个[魔术命令](https://ipython.readthedocs.io/en/stable/interactive/magics.html#magic-cd),用于切换 Python 的工作路径。 -``` - -### 通过 Docker 使用 MMPose - -MMPose 提供 [Dockerfile](https://github.com/open-mmlab/mmpose/blob/master/docker/Dockerfile) -用于构建镜像。请确保您的 [Docker 版本](https://docs.docker.com/engine/install/) >=19.03。 - -```shell -# 构建默认的 PyTorch 1.8.0,CUDA 10.1 版本镜像 -# 如果您希望使用其他版本,请修改 Dockerfile -docker build -t mmpose docker/ -``` - -**注意**:请确保您已经安装了 [nvidia-container-toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#docker)。 - -用以下命令运行 Docker 镜像: - -```shell -docker run --gpus all --shm-size=8g -it -v {DATA_DIR}:/mmpose/data mmpose -``` - -`{DATA_DIR}` 是您本地存放用于 MMPose 训练、测试、推理等流程的数据目录。 - -## 故障解决 - -如果您在安装过程中遇到了什么问题,请先查阅[常见问题](faq.md)。如果没有找到解决方法,可以在 GitHub -上[提出 issue](https://github.com/open-mmlab/mmpose/issues/new/choose)。 +# 安装 + +我们推荐用户按照我们的最佳实践来安装 MMPose。但除此之外,如果您想根据 +您的习惯完成安装流程,也可以参见 [自定义安装](#自定义安装) 一节来获取更多信息。 + +- [安装](#安装) + - [依赖环境](#依赖环境) + - [最佳实践](#最佳实践) + - [从源码安装 MMPose](#从源码安装-mmpose) + - [作为 Python 包安装](#作为-python-包安装) + - [验证安装](#验证安装) + - [自定义安装](#自定义安装) + - [CUDA 版本](#cuda-版本) + - [不使用 MIM 安装 MMEngine](#不使用-mim-安装-mmengine) + - [在 CPU 环境中安装](#在-cpu-环境中安装) + - [在 Google Colab 中安装](#在-google-colab-中安装) + - [通过 Docker 使用 MMPose](#通过-docker-使用-mmpose) + - [故障解决](#故障解决) + +## 依赖环境 + +在本节中,我们将演示如何准备 PyTorch 相关的依赖环境。 + +MMPose 适用于 Linux、Windows 和 macOS。它需要 Python 3.7+、CUDA 9.2+ 和 PyTorch 1.8+。 + +如果您对配置 PyTorch 环境已经很熟悉,并且已经完成了配置,可以直接进入下一节:[安装](#安装-mmpose)。否则,请依照以下步骤完成配置。 + +**第 1 步** 从[官网](https://docs.conda.io/en/latest/miniconda.html) 下载并安装 Miniconda。 + +**第 2 步** 创建一个 conda 虚拟环境并激活它。 + +```shell +conda create --name openmmlab python=3.8 -y +conda activate openmmlab +``` + +**第 3 步** 按照[官方指南](https://pytorch.org/get-started/locally/) 安装 PyTorch。例如: + +在 GPU 平台: + +```shell +conda install pytorch torchvision -c pytorch +``` + +```{warning} +以上命令会自动安装最新版的 PyTorch 与对应的 cudatoolkit,请检查它们是否与您的环境匹配。 +``` + +在 CPU 平台: + +```shell +conda install pytorch torchvision cpuonly -c pytorch +``` + +**第 4 步** 使用 [MIM](https://github.com/open-mmlab/mim) 安装 [MMEngine](https://github.com/open-mmlab/mmengine) 和 [MMCV](https://github.com/open-mmlab/mmcv/tree/2.x) + +```shell +pip install -U openmim +mim install mmengine +mim install "mmcv>=2.0.1" +``` + +请注意,MMPose 中的一些推理示例脚本需要使用 [MMDetection](https://github.com/open-mmlab/mmdetection) (mmdet) 检测人体。如果您想运行这些示例脚本,可以通过运行以下命令安装 mmdet: + +```shell +mim install "mmdet>=3.1.0" +``` + +## 最佳实践 + +根据具体需求,我们支持两种安装模式: 从源码安装(推荐)和作为 Python 包安装 + +### 从源码安装(推荐) + +如果基于 MMPose 框架开发自己的任务,需要添加新的功能,比如新的模型或是数据集,或者使用我们提供的各种工具。从源码按如下方式安装 mmpose: + +```shell +git clone https://github.com/open-mmlab/mmpose.git +cd mmpose +pip install -r requirements.txt +pip install -v -e . +# "-v" 表示输出更多安装相关的信息 +# "-e" 表示以可编辑形式安装,这样可以在不重新安装的情况下,让本地修改直接生效 +``` + +### 作为 Python 包安装 + +如果只是希望调用 MMPose 的接口,或者在自己的项目中导入 MMPose 中的模块。直接使用 mim 安装即可。 + +```shell +mim install "mmpose>=1.1.0" +``` + +## 验证安装 + +为了验证 MMPose 是否安装正确,您可以通过以下步骤运行模型推理。 + +**第 1 步** 我们需要下载配置文件和模型权重文件 + +```shell +mim download mmpose --config td-hm_hrnet-w48_8xb32-210e_coco-256x192 --dest . +``` + +下载过程往往需要几秒或更多的时间,这取决于您的网络环境。完成之后,您会在当前目录下找到这两个文件:`td-hm_hrnet-w48_8xb32-210e_coco-256x192.py` 和 `hrnet_w48_coco_256x192-b9e0b3ab_20200708.pth`, 分别是配置文件和对应的模型权重文件。 + +**第 2 步** 验证推理示例 + +如果您是**从源码安装**的 mmpose,可以直接运行以下命令进行验证: + +```shell +python demo/image_demo.py \ + tests/data/coco/000000000785.jpg \ + td-hm_hrnet-w48_8xb32-210e_coco-256x192.py \ + hrnet_w48_coco_256x192-b9e0b3ab_20200708.pth \ + --out-file vis_results.jpg \ + --draw-heatmap +``` + +如果一切顺利,您将会得到这样的可视化结果: + +![image](https://user-images.githubusercontent.com/87690686/187824033-2cce0f55-034a-4127-82e2-52744178bc32.jpg) + +代码会将预测的关键点和热图绘制在图像中的人体上,并保存到当前文件夹下的 `vis_results.jpg`。 + +如果您是**作为 Python 包安装**,可以打开您的 Python 解释器,复制并粘贴如下代码: + +```python +from mmpose.apis import inference_topdown, init_model +from mmpose.utils import register_all_modules + +register_all_modules() + +config_file = 'td-hm_hrnet-w48_8xb32-210e_coco-256x192.py' +checkpoint_file = 'hrnet_w48_coco_256x192-b9e0b3ab_20200708.pth' +model = init_model(config_file, checkpoint_file, device='cpu') # or device='cuda:0' + +# 请准备好一张带有人体的图片 +results = inference_topdown(model, 'demo.jpg') +``` + +示例图片 `demo.jpg` 可以从 [Github](https://raw.githubusercontent.com/open-mmlab/mmpose/main/tests/data/coco/000000000785.jpg) 下载。 +推理结果是一个 `PoseDataSample` 列表,预测结果将会保存在 `pred_instances` 中,包括检测到的关键点位置和置信度。 + +## 自定义安装 + +### CUDA 版本 + +安装 PyTorch 时,需要指定 CUDA 版本。如果您不清楚选择哪个,请遵循我们的建议: + +- 对于 Ampere 架构的 NVIDIA GPU,例如 GeForce 30 系列 以及 NVIDIA A100,CUDA 11 是必需的。 +- 对于更早的 NVIDIA GPU,CUDA 11 是向后兼容 (backward compatible) 的,但 CUDA 10.2 能够提供更好的兼容性,也更加轻量。 + +请确保您的 GPU 驱动版本满足最低的版本需求,参阅[这张表](https://docs.nvidia.com/cuda/cuda-toolkit-release-notes/index.html#cuda-major-component-versions__table-cuda-toolkit-driver-versions)。 + +```{note} +如果按照我们的最佳实践进行安装,CUDA 运行时库就足够了,因为我们提供相关 CUDA 代码的预编译,您不需要进行本地编译。 +但如果您希望从源码进行 MMCV 的编译,或是进行其他 CUDA 算子的开发,那么就必须安装完整的 CUDA 工具链,参见 +[NVIDIA 官网](https://developer.nvidia.com/cuda-downloads),另外还需要确保该 CUDA 工具链的版本与 PyTorch 安装时 +的配置相匹配(如用 `conda install` 安装 PyTorch 时指定的 cudatoolkit 版本)。 +``` + +### 不使用 MIM 安装 MMEngine + +若不使用 mim 安装 MMEngine,请遵循 [ MMEngine 安装指南](https://mmengine.readthedocs.io/zh_CN/latest/get_started/installation.html). + +例如,您可以通过以下命令安装 MMEngine: + +```shell +pip install mmengine +``` + +### 不使用 MIM 安装 MMCV + +MMCV 包含 C++ 和 CUDA 扩展,因此其对 PyTorch 的依赖比较复杂。MIM 会自动解析这些 +依赖,选择合适的 MMCV 预编译包,使安装更简单,但它并不是必需的。 + +若不使用 mim 来安装 MMCV,请遵照 [MMCV 安装指南](https://mmcv.readthedocs.io/zh_CN/2.x/get_started/installation.html)。 +它需要您用指定 url 的形式手动指定对应的 PyTorch 和 CUDA 版本。 + +举个例子,如下命令将会安装基于 PyTorch 1.10.x 和 CUDA 11.3 编译的 mmcv。 + +```shell +pip install 'mmcv>=2.0.1' -f https://download.openmmlab.com/mmcv/dist/cu113/torch1.10/index.html +``` + +### 在 CPU 环境中安装 + +MMPose 可以仅在 CPU 环境中安装,在 CPU 模式下,您可以完成训练、测试和模型推理等所有操作。 + +在 CPU 模式下,MMCV 的部分功能将不可用,通常是一些 GPU 编译的算子,如 `Deformable Convolution`。MMPose 中大部分的模型都不会依赖这些算子,但是如果您尝试使用包含这些算子的模型来运行训练、测试或推理,将会报错。 + +### 在 Google Colab 中安装 + +[Google Colab](https://colab.research.google.com/) 通常已经包含了 PyTorch 环境,因此我们只需要安装 MMEngine, MMCV 和 MMPose 即可,命令如下: + +**第 1 步** 使用 [MIM](https://github.com/open-mmlab/mim) 安装 [MMEngine](https://github.com/open-mmlab/mmengine) 和 [MMCV](https://github.com/open-mmlab/mmcv/tree/2.x) + +```shell +!pip3 install openmim +!mim install mmengine +!mim install "mmcv>=2.0.1" +``` + +**第 2 步** 从源码安装 mmpose + +```shell +!git clone https://github.com/open-mmlab/mmpose.git +%cd mmpose +!pip install -e . +``` + +**第 3 步** 验证 + +```python +import mmpose +print(mmpose.__version__) +# 预期输出: 1.1.0 +``` + +```{note} +在 Jupyter 中,感叹号 `!` 用于执行外部命令,而 `%cd` 是一个[魔术命令](https://ipython.readthedocs.io/en/stable/interactive/magics.html#magic-cd),用于切换 Python 的工作路径。 +``` + +### 通过 Docker 使用 MMPose + +MMPose 提供 [Dockerfile](https://github.com/open-mmlab/mmpose/blob/master/docker/Dockerfile) +用于构建镜像。请确保您的 [Docker 版本](https://docs.docker.com/engine/install/) >=19.03。 + +```shell +# 构建默认的 PyTorch 1.8.0,CUDA 10.1 版本镜像 +# 如果您希望使用其他版本,请修改 Dockerfile +docker build -t mmpose docker/ +``` + +**注意**:请确保您已经安装了 [nvidia-container-toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#docker)。 + +用以下命令运行 Docker 镜像: + +```shell +docker run --gpus all --shm-size=8g -it -v {DATA_DIR}:/mmpose/data mmpose +``` + +`{DATA_DIR}` 是您本地存放用于 MMPose 训练、测试、推理等流程的数据目录。 + +## 故障解决 + +如果您在安装过程中遇到了什么问题,请先查阅[常见问题](faq.md)。如果没有找到解决方法,可以在 GitHub +上[提出 issue](https://github.com/open-mmlab/mmpose/issues/new/choose)。 diff --git a/docs/zh_cn/make.bat b/docs/zh_cn/make.bat index 922152e96a..2119f51099 100644 --- a/docs/zh_cn/make.bat +++ b/docs/zh_cn/make.bat @@ -1,35 +1,35 @@ -@ECHO OFF - -pushd %~dp0 - -REM Command file for Sphinx documentation - -if "%SPHINXBUILD%" == "" ( - set SPHINXBUILD=sphinx-build -) -set SOURCEDIR=. -set BUILDDIR=_build - -if "%1" == "" goto help - -%SPHINXBUILD% >NUL 2>NUL -if errorlevel 9009 ( - echo. - echo.The 'sphinx-build' command was not found. Make sure you have Sphinx - echo.installed, then set the SPHINXBUILD environment variable to point - echo.to the full path of the 'sphinx-build' executable. Alternatively you - echo.may add the Sphinx directory to PATH. - echo. - echo.If you don't have Sphinx installed, grab it from - echo.http://sphinx-doc.org/ - exit /b 1 -) - -%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% -goto end - -:help -%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% - -:end -popd +@ECHO OFF + +pushd %~dp0 + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set SOURCEDIR=. +set BUILDDIR=_build + +if "%1" == "" goto help + +%SPHINXBUILD% >NUL 2>NUL +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.http://sphinx-doc.org/ + exit /b 1 +) + +%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% +goto end + +:help +%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% + +:end +popd diff --git a/docs/zh_cn/merge_docs.sh b/docs/zh_cn/merge_docs.sh index 258141d5f8..1f207abdbd 100644 --- a/docs/zh_cn/merge_docs.sh +++ b/docs/zh_cn/merge_docs.sh @@ -1,31 +1,31 @@ -#!/usr/bin/env bash -# Copyright (c) OpenMMLab. All rights reserved. - -sed -i '$a\\n' ../../demo/docs/zh_cn/*_demo.md -cat ../../demo/docs/zh_cn/*_demo.md | sed "s/^## 2D\(.*\)Demo/##\1Estimation/" | sed "s/md###t/html#t/g" | sed '1i\# Demos\n' | sed 's=](/docs/en/=](/=g' | sed 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' >demos.md - - # remove /docs/ for link used in doc site -sed -i 's=](/docs/zh_cn/=](=g' overview.md -sed -i 's=](/docs/zh_cn/=](=g' installation.md -sed -i 's=](/docs/zh_cn/=](=g' quick_run.md -sed -i 's=](/docs/zh_cn/=](=g' migration.md -sed -i 's=](/docs/zh_cn/=](=g' ./model_zoo/*.md -sed -i 's=](/docs/zh_cn/=](=g' ./model_zoo_papers/*.md -sed -i 's=](/docs/zh_cn/=](=g' ./user_guides/*.md -sed -i 's=](/docs/zh_cn/=](=g' ./advanced_guides/*.md -sed -i 's=](/docs/zh_cn/=](=g' ./dataset_zoo/*.md -sed -i 's=](/docs/zh_cn/=](=g' ./notes/*.md -sed -i 's=](/docs/zh_cn/=](=g' ./projects/*.md - - -sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' overview.md -sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' installation.md -sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' quick_run.md -sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' migration.md -sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' ./advanced_guides/*.md -sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' ./model_zoo/*.md -sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' ./model_zoo_papers/*.md -sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' ./user_guides/*.md -sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' ./dataset_zoo/*.md -sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' ./notes/*.md -sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' ./projects/*.md +#!/usr/bin/env bash +# Copyright (c) OpenMMLab. All rights reserved. + +sed -i '$a\\n' ../../demo/docs/zh_cn/*_demo.md +cat ../../demo/docs/zh_cn/*_demo.md | sed "s/^## 2D\(.*\)Demo/##\1Estimation/" | sed "s/md###t/html#t/g" | sed '1i\# Demos\n' | sed 's=](/docs/en/=](/=g' | sed 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' >demos.md + + # remove /docs/ for link used in doc site +sed -i 's=](/docs/zh_cn/=](=g' overview.md +sed -i 's=](/docs/zh_cn/=](=g' installation.md +sed -i 's=](/docs/zh_cn/=](=g' quick_run.md +sed -i 's=](/docs/zh_cn/=](=g' migration.md +sed -i 's=](/docs/zh_cn/=](=g' ./model_zoo/*.md +sed -i 's=](/docs/zh_cn/=](=g' ./model_zoo_papers/*.md +sed -i 's=](/docs/zh_cn/=](=g' ./user_guides/*.md +sed -i 's=](/docs/zh_cn/=](=g' ./advanced_guides/*.md +sed -i 's=](/docs/zh_cn/=](=g' ./dataset_zoo/*.md +sed -i 's=](/docs/zh_cn/=](=g' ./notes/*.md +sed -i 's=](/docs/zh_cn/=](=g' ./projects/*.md + + +sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' overview.md +sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' installation.md +sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' quick_run.md +sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' migration.md +sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' ./advanced_guides/*.md +sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' ./model_zoo/*.md +sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' ./model_zoo_papers/*.md +sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' ./user_guides/*.md +sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' ./dataset_zoo/*.md +sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' ./notes/*.md +sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' ./projects/*.md diff --git a/docs/zh_cn/migration.md b/docs/zh_cn/migration.md index 9a591dfcc9..934203ddb7 100644 --- a/docs/zh_cn/migration.md +++ b/docs/zh_cn/migration.md @@ -1,201 +1,201 @@ -# MMPose 0.X 兼容性说明 - -MMPose 1.0 经过了大规模重构并解决了许多遗留问题,对于 0.x 版本的大部分代码 MMPose 1.0 将不兼容。 - -## 数据变换 - -### 平移、旋转和缩放 - -旧版的数据变换方法 `TopDownRandomShiftBboxCenter` 和 `TopDownGetRandomScaleRotation`,将被合并为 `RandomBBoxTransform`: - -```Python -@TRANSFORMS.register_module() -class RandomBBoxTransform(BaseTransform): - r"""Rnadomly shift, resize and rotate the bounding boxes. - - Required Keys: - - - bbox_center - - bbox_scale - - Modified Keys: - - - bbox_center - - bbox_scale - - Added Keys: - - bbox_rotation - - Args: - shift_factor (float): Randomly shift the bbox in range - :math:`[-dx, dx]` and :math:`[-dy, dy]` in X and Y directions, - where :math:`dx(y) = x(y)_scale \cdot shift_factor` in pixels. - Defaults to 0.16 - shift_prob (float): Probability of applying random shift. Defaults to - 0.3 - scale_factor (Tuple[float, float]): Randomly resize the bbox in range - :math:`[scale_factor[0], scale_factor[1]]`. Defaults to (0.5, 1.5) - scale_prob (float): Probability of applying random resizing. Defaults - to 1.0 - rotate_factor (float): Randomly rotate the bbox in - :math:`[-rotate_factor, rotate_factor]` in degrees. Defaults - to 80.0 - rotate_prob (float): Probability of applying random rotation. Defaults - to 0.6 - """ - - def __init__(self, - shift_factor: float = 0.16, - shift_prob: float = 0.3, - scale_factor: Tuple[float, float] = (0.5, 1.5), - scale_prob: float = 1.0, - rotate_factor: float = 80.0, - rotate_prob: float = 0.6) -> None: -``` - -### 标签生成 - -旧版用于训练标签生成的方法 `TopDownGenerateTarget` 、`TopDownGenerateTargetRegression`、`BottomUpGenerateHeatmapTarget`、`BottomUpGenerateTarget` 等将被合并为 `GenerateTarget`,而实际的生成方法由[编解码器](./user_guides/codecs.md) 提供: - -```Python -@TRANSFORMS.register_module() -class GenerateTarget(BaseTransform): - """Encode keypoints into Target. - - The generated target is usually the supervision signal of the model - learning, e.g. heatmaps or regression labels. - - Required Keys: - - - keypoints - - keypoints_visible - - dataset_keypoint_weights - - Added Keys: - - - The keys of the encoded items from the codec will be updated into - the results, e.g. ``'heatmaps'`` or ``'keypoint_weights'``. See - the specific codec for more details. - - Args: - encoder (dict | list[dict]): The codec config for keypoint encoding. - Both single encoder and multiple encoders (given as a list) are - supported - multilevel (bool): Determine the method to handle multiple encoders. - If ``multilevel==True``, generate multilevel targets from a group - of encoders of the same type (e.g. multiple :class:`MSRAHeatmap` - encoders with different sigma values); If ``multilevel==False``, - generate combined targets from a group of different encoders. This - argument will have no effect in case of single encoder. Defaults - to ``False`` - use_dataset_keypoint_weights (bool): Whether use the keypoint weights - from the dataset meta information. Defaults to ``False`` - """ - - def __init__(self, - encoder: MultiConfig, - multilevel: bool = False, - use_dataset_keypoint_weights: bool = False) -> None: -``` - -### 数据归一化 - -旧版的数据归一化操作 `NormalizeTensor` 和 `ToTensor` 方法将由 **DataPreprocessor** 模块替代,不再作为流水线的一部分,而是作为模块加入到模型前向传播中。 - -## 模型兼容 - -我们对 model zoo 提供的模型权重进行了兼容性处理,确保相同的模型权重测试精度能够与 0.x 版本保持同等水平,但由于在这两个版本中存在大量处理细节的差异,推理结果可能会产生轻微的不同(精度误差小于 0.05%)。 - -对于使用 0.x 版本训练保存的模型权重,我们在预测头中提供了 `_load_state_dict_pre_hook()` 方法来将旧版的权重字典替换为新版,如果你希望将在旧版上开发的模型兼容到新版,可以参考我们的实现。 - -```Python -@MODELS.register_module() -class YourHead(BaseHead): -def __init__(self): - - ## omitted - - # Register the hook to automatically convert old version state dicts - self._register_load_state_dict_pre_hook(self._load_state_dict_pre_hook) -``` - -### Heatmap-based 方法 - -对于基于SimpleBaseline方法的模型,主要需要注意最后一层卷积层的兼容: - -```Python -def _load_state_dict_pre_hook(self, state_dict, prefix, local_meta, *args, - **kwargs): - version = local_meta.get('version', None) - - if version and version >= self._version: - return - - # convert old-version state dict - keys = list(state_dict.keys()) - for _k in keys: - if not _k.startswith(prefix): - continue - v = state_dict.pop(_k) - k = _k[len(prefix):] - # In old version, "final_layer" includes both intermediate - # conv layers (new "conv_layers") and final conv layers (new - # "final_layer"). - # - # If there is no intermediate conv layer, old "final_layer" will - # have keys like "final_layer.xxx", which should be still - # named "final_layer.xxx"; - # - # If there are intermediate conv layers, old "final_layer" will - # have keys like "final_layer.n.xxx", where the weights of the last - # one should be renamed "final_layer.xxx", and others should be - # renamed "conv_layers.n.xxx" - k_parts = k.split('.') - if k_parts[0] == 'final_layer': - if len(k_parts) == 3: - assert isinstance(self.conv_layers, nn.Sequential) - idx = int(k_parts[1]) - if idx < len(self.conv_layers): - # final_layer.n.xxx -> conv_layers.n.xxx - k_new = 'conv_layers.' + '.'.join(k_parts[1:]) - else: - # final_layer.n.xxx -> final_layer.xxx - k_new = 'final_layer.' + k_parts[2] - else: - # final_layer.xxx remains final_layer.xxx - k_new = k - else: - k_new = k - - state_dict[prefix + k_new] = v -``` - -### RLE-based 方法 - -对于基于 RLE 的模型,由于新版的 `loss` 模块更名为 `loss_module`,且 flow 模型归属在 `loss` 模块下,因此需要对权重字典中 `loss` 字段进行更改: - -```Python -def _load_state_dict_pre_hook(self, state_dict, prefix, local_meta, *args, - **kwargs): - - version = local_meta.get('version', None) - - if version and version >= self._version: - return - - # convert old-version state dict - keys = list(state_dict.keys()) - for _k in keys: - v = state_dict.pop(_k) - k = _k.lstrip(prefix) - # In old version, "loss" includes the instances of loss, - # now it should be renamed "loss_module" - k_parts = k.split('.') - if k_parts[0] == 'loss': - # loss.xxx -> loss_module.xxx - k_new = prefix + 'loss_module.' + '.'.join(k_parts[1:]) - else: - k_new = _k - - state_dict[k_new] = v -``` +# MMPose 0.X 兼容性说明 + +MMPose 1.0 经过了大规模重构并解决了许多遗留问题,对于 0.x 版本的大部分代码 MMPose 1.0 将不兼容。 + +## 数据变换 + +### 平移、旋转和缩放 + +旧版的数据变换方法 `TopDownRandomShiftBboxCenter` 和 `TopDownGetRandomScaleRotation`,将被合并为 `RandomBBoxTransform`: + +```Python +@TRANSFORMS.register_module() +class RandomBBoxTransform(BaseTransform): + r"""Rnadomly shift, resize and rotate the bounding boxes. + + Required Keys: + + - bbox_center + - bbox_scale + + Modified Keys: + + - bbox_center + - bbox_scale + + Added Keys: + - bbox_rotation + + Args: + shift_factor (float): Randomly shift the bbox in range + :math:`[-dx, dx]` and :math:`[-dy, dy]` in X and Y directions, + where :math:`dx(y) = x(y)_scale \cdot shift_factor` in pixels. + Defaults to 0.16 + shift_prob (float): Probability of applying random shift. Defaults to + 0.3 + scale_factor (Tuple[float, float]): Randomly resize the bbox in range + :math:`[scale_factor[0], scale_factor[1]]`. Defaults to (0.5, 1.5) + scale_prob (float): Probability of applying random resizing. Defaults + to 1.0 + rotate_factor (float): Randomly rotate the bbox in + :math:`[-rotate_factor, rotate_factor]` in degrees. Defaults + to 80.0 + rotate_prob (float): Probability of applying random rotation. Defaults + to 0.6 + """ + + def __init__(self, + shift_factor: float = 0.16, + shift_prob: float = 0.3, + scale_factor: Tuple[float, float] = (0.5, 1.5), + scale_prob: float = 1.0, + rotate_factor: float = 80.0, + rotate_prob: float = 0.6) -> None: +``` + +### 标签生成 + +旧版用于训练标签生成的方法 `TopDownGenerateTarget` 、`TopDownGenerateTargetRegression`、`BottomUpGenerateHeatmapTarget`、`BottomUpGenerateTarget` 等将被合并为 `GenerateTarget`,而实际的生成方法由[编解码器](./user_guides/codecs.md) 提供: + +```Python +@TRANSFORMS.register_module() +class GenerateTarget(BaseTransform): + """Encode keypoints into Target. + + The generated target is usually the supervision signal of the model + learning, e.g. heatmaps or regression labels. + + Required Keys: + + - keypoints + - keypoints_visible + - dataset_keypoint_weights + + Added Keys: + + - The keys of the encoded items from the codec will be updated into + the results, e.g. ``'heatmaps'`` or ``'keypoint_weights'``. See + the specific codec for more details. + + Args: + encoder (dict | list[dict]): The codec config for keypoint encoding. + Both single encoder and multiple encoders (given as a list) are + supported + multilevel (bool): Determine the method to handle multiple encoders. + If ``multilevel==True``, generate multilevel targets from a group + of encoders of the same type (e.g. multiple :class:`MSRAHeatmap` + encoders with different sigma values); If ``multilevel==False``, + generate combined targets from a group of different encoders. This + argument will have no effect in case of single encoder. Defaults + to ``False`` + use_dataset_keypoint_weights (bool): Whether use the keypoint weights + from the dataset meta information. Defaults to ``False`` + """ + + def __init__(self, + encoder: MultiConfig, + multilevel: bool = False, + use_dataset_keypoint_weights: bool = False) -> None: +``` + +### 数据归一化 + +旧版的数据归一化操作 `NormalizeTensor` 和 `ToTensor` 方法将由 **DataPreprocessor** 模块替代,不再作为流水线的一部分,而是作为模块加入到模型前向传播中。 + +## 模型兼容 + +我们对 model zoo 提供的模型权重进行了兼容性处理,确保相同的模型权重测试精度能够与 0.x 版本保持同等水平,但由于在这两个版本中存在大量处理细节的差异,推理结果可能会产生轻微的不同(精度误差小于 0.05%)。 + +对于使用 0.x 版本训练保存的模型权重,我们在预测头中提供了 `_load_state_dict_pre_hook()` 方法来将旧版的权重字典替换为新版,如果你希望将在旧版上开发的模型兼容到新版,可以参考我们的实现。 + +```Python +@MODELS.register_module() +class YourHead(BaseHead): +def __init__(self): + + ## omitted + + # Register the hook to automatically convert old version state dicts + self._register_load_state_dict_pre_hook(self._load_state_dict_pre_hook) +``` + +### Heatmap-based 方法 + +对于基于SimpleBaseline方法的模型,主要需要注意最后一层卷积层的兼容: + +```Python +def _load_state_dict_pre_hook(self, state_dict, prefix, local_meta, *args, + **kwargs): + version = local_meta.get('version', None) + + if version and version >= self._version: + return + + # convert old-version state dict + keys = list(state_dict.keys()) + for _k in keys: + if not _k.startswith(prefix): + continue + v = state_dict.pop(_k) + k = _k[len(prefix):] + # In old version, "final_layer" includes both intermediate + # conv layers (new "conv_layers") and final conv layers (new + # "final_layer"). + # + # If there is no intermediate conv layer, old "final_layer" will + # have keys like "final_layer.xxx", which should be still + # named "final_layer.xxx"; + # + # If there are intermediate conv layers, old "final_layer" will + # have keys like "final_layer.n.xxx", where the weights of the last + # one should be renamed "final_layer.xxx", and others should be + # renamed "conv_layers.n.xxx" + k_parts = k.split('.') + if k_parts[0] == 'final_layer': + if len(k_parts) == 3: + assert isinstance(self.conv_layers, nn.Sequential) + idx = int(k_parts[1]) + if idx < len(self.conv_layers): + # final_layer.n.xxx -> conv_layers.n.xxx + k_new = 'conv_layers.' + '.'.join(k_parts[1:]) + else: + # final_layer.n.xxx -> final_layer.xxx + k_new = 'final_layer.' + k_parts[2] + else: + # final_layer.xxx remains final_layer.xxx + k_new = k + else: + k_new = k + + state_dict[prefix + k_new] = v +``` + +### RLE-based 方法 + +对于基于 RLE 的模型,由于新版的 `loss` 模块更名为 `loss_module`,且 flow 模型归属在 `loss` 模块下,因此需要对权重字典中 `loss` 字段进行更改: + +```Python +def _load_state_dict_pre_hook(self, state_dict, prefix, local_meta, *args, + **kwargs): + + version = local_meta.get('version', None) + + if version and version >= self._version: + return + + # convert old-version state dict + keys = list(state_dict.keys()) + for _k in keys: + v = state_dict.pop(_k) + k = _k.lstrip(prefix) + # In old version, "loss" includes the instances of loss, + # now it should be renamed "loss_module" + k_parts = k.split('.') + if k_parts[0] == 'loss': + # loss.xxx -> loss_module.xxx + k_new = prefix + 'loss_module.' + '.'.join(k_parts[1:]) + else: + k_new = _k + + state_dict[k_new] = v +``` diff --git a/docs/zh_cn/notes/changelog.md b/docs/zh_cn/notes/changelog.md index 68beeeb069..41c58f0256 100644 --- a/docs/zh_cn/notes/changelog.md +++ b/docs/zh_cn/notes/changelog.md @@ -1,1316 +1,1316 @@ -# Changelog - -## **v1.0.0rc1 (14/10/2022)** - -**Highlights** - -- Release RTMPose, a high-performance real-time pose estimation algorithm with cross-platform deployment and inference support. See details at the [project page](/projects/rtmpose/) -- Support several new algorithms: ViTPose (arXiv'2022), CID (CVPR'2022), DEKR (CVPR'2021) -- Add Inferencer, a convenient inference interface that perform pose estimation and visualization on images, videos and webcam streams with only one line of code -- Introduce *Project*, a new form for rapid and easy implementation of new algorithms and features in MMPose, which is more handy for community contributors - -**New Features** - -- Support RTMPose ([#1971](https://github.com/open-mmlab/mmpose/pull/1971), [#2024](https://github.com/open-mmlab/mmpose/pull/2024), [#2028](https://github.com/open-mmlab/mmpose/pull/2028), [#2030](https://github.com/open-mmlab/mmpose/pull/2030), [#2040](https://github.com/open-mmlab/mmpose/pull/2040), [#2057](https://github.com/open-mmlab/mmpose/pull/2057)) -- Support Inferencer ([#1969](https://github.com/open-mmlab/mmpose/pull/1969)) -- Support ViTPose ([#1876](https://github.com/open-mmlab/mmpose/pull/1876), [#2056](https://github.com/open-mmlab/mmpose/pull/2056), [#2058](https://github.com/open-mmlab/mmpose/pull/2058), [#2065](https://github.com/open-mmlab/mmpose/pull/2065)) -- Support CID ([#1907](https://github.com/open-mmlab/mmpose/pull/1907)) -- Support DEKR ([#1834](https://github.com/open-mmlab/mmpose/pull/1834), [#1901](https://github.com/open-mmlab/mmpose/pull/1901)) -- Support training with multiple datasets ([#1767](https://github.com/open-mmlab/mmpose/pull/1767), [#1930](https://github.com/open-mmlab/mmpose/pull/1930), [#1938](https://github.com/open-mmlab/mmpose/pull/1938), [#2025](https://github.com/open-mmlab/mmpose/pull/2025)) -- Add *project* to allow rapid and easy implementation of new models and features ([#1914](https://github.com/open-mmlab/mmpose/pull/1914)) - -**Improvements** - -- Improve documentation quality ([#1846](https://github.com/open-mmlab/mmpose/pull/1846), [#1858](https://github.com/open-mmlab/mmpose/pull/1858), [#1872](https://github.com/open-mmlab/mmpose/pull/1872), [#1899](https://github.com/open-mmlab/mmpose/pull/1899), [#1925](https://github.com/open-mmlab/mmpose/pull/1925), [#1945](https://github.com/open-mmlab/mmpose/pull/1945), [#1952](https://github.com/open-mmlab/mmpose/pull/1952), [#1990](https://github.com/open-mmlab/mmpose/pull/1990), [#2023](https://github.com/open-mmlab/mmpose/pull/2023), [#2042](https://github.com/open-mmlab/mmpose/pull/2042)) -- Support visualizing keypoint indices ([#2051](https://github.com/open-mmlab/mmpose/pull/2051)) -- Support OpenPose style visualization ([#2055](https://github.com/open-mmlab/mmpose/pull/2055)) -- Accelerate image transpose in data pipelines with tensor operation ([#1976](https://github.com/open-mmlab/mmpose/pull/1976)) -- Support auto-import modules from registry ([#1961](https://github.com/open-mmlab/mmpose/pull/1961)) -- Support keypoint partition metric ([#1944](https://github.com/open-mmlab/mmpose/pull/1944)) -- Support SimCC 1D-heatmap visualization ([#1912](https://github.com/open-mmlab/mmpose/pull/1912)) -- Support saving predictions and data metainfo in demos ([#1814](https://github.com/open-mmlab/mmpose/pull/1814), [#1879](https://github.com/open-mmlab/mmpose/pull/1879)) -- Support SimCC with DARK ([#1870](https://github.com/open-mmlab/mmpose/pull/1870)) -- Remove Gaussian blur for offset maps in UDP-regress ([#1815](https://github.com/open-mmlab/mmpose/pull/1815)) -- Refactor encoding interface of Codec for better extendibility and easier configuration ([#1781](https://github.com/open-mmlab/mmpose/pull/1781)) -- Support evaluating CocoMetric without annotation file ([#1722](https://github.com/open-mmlab/mmpose/pull/1722)) -- Improve unit tests ([#1765](https://github.com/open-mmlab/mmpose/pull/1765)) - -**Bug Fixes** - -- Fix repeated warnings from different ranks ([#2053](https://github.com/open-mmlab/mmpose/pull/2053)) -- Avoid frequent scope switching when using mmdet inference api ([#2039](https://github.com/open-mmlab/mmpose/pull/2039)) -- Remove EMA parameters and message hub data when publishing model checkpoints ([#2036](https://github.com/open-mmlab/mmpose/pull/2036)) -- Fix metainfo copying in dataset class ([#2017](https://github.com/open-mmlab/mmpose/pull/2017)) -- Fix top-down demo bug when there is no object detected ([#2007](https://github.com/open-mmlab/mmpose/pull/2007)) -- Fix config errors ([#1882](https://github.com/open-mmlab/mmpose/pull/1882), [#1906](https://github.com/open-mmlab/mmpose/pull/1906), [#1995](https://github.com/open-mmlab/mmpose/pull/1995)) -- Fix image demo failure when GUI is unavailable ([#1968](https://github.com/open-mmlab/mmpose/pull/1968)) -- Fix bug in AdaptiveWingLoss ([#1953](https://github.com/open-mmlab/mmpose/pull/1953)) -- Fix incorrect importing of RepeatDataset which is deprecated ([#1943](https://github.com/open-mmlab/mmpose/pull/1943)) -- Fix bug in bottom-up datasets that ignores images without instances ([#1752](https://github.com/open-mmlab/mmpose/pull/1752), [#1936](https://github.com/open-mmlab/mmpose/pull/1936)) -- Fix upstream dependency issues ([#1867](https://github.com/open-mmlab/mmpose/pull/1867), [#1921](https://github.com/open-mmlab/mmpose/pull/1921)) -- Fix evaluation issues and update results ([#1763](https://github.com/open-mmlab/mmpose/pull/1763), [#1773](https://github.com/open-mmlab/mmpose/pull/1773), [#1780](https://github.com/open-mmlab/mmpose/pull/1780), [#1850](https://github.com/open-mmlab/mmpose/pull/1850), [#1868](https://github.com/open-mmlab/mmpose/pull/1868)) -- Fix local registry missing warnings ([#1849](https://github.com/open-mmlab/mmpose/pull/1849)) -- Remove deprecated scripts for model deployment ([#1845](https://github.com/open-mmlab/mmpose/pull/1845)) -- Fix a bug in input transformation in BaseHead ([#1843](https://github.com/open-mmlab/mmpose/pull/1843)) -- Fix an interface mismatch with MMDetection in webcam demo ([#1813](https://github.com/open-mmlab/mmpose/pull/1813)) -- Fix a bug in heatmap visualization that causes incorrect scale ([#1800](https://github.com/open-mmlab/mmpose/pull/1800)) -- Add model metafiles ([#1768](https://github.com/open-mmlab/mmpose/pull/1768)) - -## **v1.0.0rc0 (14/10/2022)** - -**New Features** - -- Support 4 light-weight pose estimation algorithms: [SimCC](https://doi.org/10.48550/arxiv.2107.03332) (ECCV'2022), [Debias-IPR](https://openaccess.thecvf.com/content/ICCV2021/papers/Gu_Removing_the_Bias_of_Integral_Pose_Regression_ICCV_2021_paper.pdf) (ICCV'2021), [IPR](https://arxiv.org/abs/1711.08229) (ECCV'2018), and [DSNT](https://arxiv.org/abs/1801.07372v2) (ArXiv'2018) ([#1628](https://github.com/open-mmlab/mmpose/pull/1628)) - -**Migrations** - -- Add Webcam API in MMPose 1.0 ([#1638](https://github.com/open-mmlab/mmpose/pull/1638), [#1662](https://github.com/open-mmlab/mmpose/pull/1662)) @Ben-Louis -- Add codec for Associative Embedding (beta) ([#1603](https://github.com/open-mmlab/mmpose/pull/1603)) @ly015 - -**Improvements** - -- Add a colab tutorial for MMPose 1.0 ([#1660](https://github.com/open-mmlab/mmpose/pull/1660)) @Tau-J -- Add model index in config folder ([#1710](https://github.com/open-mmlab/mmpose/pull/1710), [#1709](https://github.com/open-mmlab/mmpose/pull/1709), [#1627](https://github.com/open-mmlab/mmpose/pull/1627)) @ly015, @Tau-J, @Ben-Louis -- Update and improve documentation ([#1692](https://github.com/open-mmlab/mmpose/pull/1692), [#1656](https://github.com/open-mmlab/mmpose/pull/1656), [#1681](https://github.com/open-mmlab/mmpose/pull/1681), [#1677](https://github.com/open-mmlab/mmpose/pull/1677), [#1664](https://github.com/open-mmlab/mmpose/pull/1664), [#1659](https://github.com/open-mmlab/mmpose/pull/1659)) @Tau-J, @Ben-Louis, @liqikai9 -- Improve config structures and formats ([#1651](https://github.com/open-mmlab/mmpose/pull/1651)) @liqikai9 - -**Bug Fixes** - -- Update mmengine version requirements ([#1715](https://github.com/open-mmlab/mmpose/pull/1715)) @Ben-Louis -- Update dependencies of pre-commit hooks ([#1705](https://github.com/open-mmlab/mmpose/pull/1705)) @Ben-Louis -- Fix mmcv version in DockerFile ([#1704](https://github.com/open-mmlab/mmpose/pull/1704)) -- Fix a bug in setting dataset metainfo in configs ([#1684](https://github.com/open-mmlab/mmpose/pull/1684)) @ly015 -- Fix a bug in UDP training ([#1682](https://github.com/open-mmlab/mmpose/pull/1682)) @liqikai9 -- Fix a bug in Dark decoding ([#1676](https://github.com/open-mmlab/mmpose/pull/1676)) @liqikai9 -- Fix bugs in visualization ([#1671](https://github.com/open-mmlab/mmpose/pull/1671), [#1668](https://github.com/open-mmlab/mmpose/pull/1668), [#1657](https://github.com/open-mmlab/mmpose/pull/1657)) @liqikai9, @Ben-Louis -- Fix incorrect flops calculation ([#1669](https://github.com/open-mmlab/mmpose/pull/1669)) @liqikai9 -- Fix `tensor.tile` compatibility issue for pytorch 1.6 ([#1658](https://github.com/open-mmlab/mmpose/pull/1658)) @ly015 -- Fix compatibility with `MultilevelPixelData` ([#1647](https://github.com/open-mmlab/mmpose/pull/1647)) @liqikai9 - -## **v1.0.0beta (1/09/2022)** - -We are excited to announce the release of MMPose 1.0.0beta. -MMPose 1.0.0beta is the first version of MMPose 1.x, a part of the OpenMMLab 2.0 projects. -Built upon the new [training engine](https://github.com/open-mmlab/mmengine), -MMPose 1.x unifies the interfaces of dataset, models, evaluation, and visualization with faster training and testing speed. -It also provide a general semi-supervised object detection framework, and more strong baselines. - -**Highlights** - -- **New engines**. MMPose 1.x is based on [MMEngine](https://github.com/open-mmlab/mmengine), which provides a general and powerful runner that allows more flexible customizations and significantly simplifies the entrypoints of high-level interfaces. - -- **Unified interfaces**. As a part of the OpenMMLab 2.0 projects, MMPose 1.x unifies and refactors the interfaces and internal logics of train, testing, datasets, models, evaluation, and visualization. All the OpenMMLab 2.0 projects share the same design in those interfaces and logics to allow the emergence of multi-task/modality algorithms. - -- **More documentation and tutorials**. We add a bunch of documentation and tutorials to help users get started more smoothly. Read it [here](https://mmpose.readthedocs.io/en/latest/). - -**Breaking Changes** - -In this release, we made lots of major refactoring and modifications. Please refer to the [migration guide](../migration.md) for details and migration instructions. - -## **v0.28.1 (28/07/2022)** - -This release is meant to fix the compatibility with the latest mmcv v1.6.1 - -## **v0.28.0 (06/07/2022)** - -**Highlights** - -- Support [TCFormer](https://openaccess.thecvf.com/content/CVPR2022/html/Zeng_Not_All_Tokens_Are_Equal_Human-Centric_Visual_Analysis_via_Token_CVPR_2022_paper.html) backbone, CVPR'2022 ([#1447](https://github.com/open-mmlab/mmpose/pull/1447), [#1452](https://github.com/open-mmlab/mmpose/pull/1452)) @zengwang430521 - -- Add [RLE](https://arxiv.org/abs/2107.11291) models on COCO dataset ([#1424](https://github.com/open-mmlab/mmpose/pull/1424)) @Indigo6, @Ben-Louis, @ly015 - -- Update swin models with better performance ([#1467](https://github.com/open-mmlab/mmpose/pull/1434)) @jin-s13 - -**New Features** - -- Support [TCFormer](https://openaccess.thecvf.com/content/CVPR2022/html/Zeng_Not_All_Tokens_Are_Equal_Human-Centric_Visual_Analysis_via_Token_CVPR_2022_paper.html) backbone, CVPR'2022 ([#1447](https://github.com/open-mmlab/mmpose/pull/1447), [#1452](https://github.com/open-mmlab/mmpose/pull/1452)) @zengwang430521 - -- Add [RLE](https://arxiv.org/abs/2107.11291) models on COCO dataset ([#1424](https://github.com/open-mmlab/mmpose/pull/1424)) @Indigo6, @Ben-Louis, @ly015 - -- Support layer decay optimizer constructor and learning rate decay optimizer constructor ([#1423](https://github.com/open-mmlab/mmpose/pull/1423)) @jin-s13 - -**Improvements** - -- Improve documentation quality ([#1416](https://github.com/open-mmlab/mmpose/pull/1416), [#1421](https://github.com/open-mmlab/mmpose/pull/1421), [#1423](https://github.com/open-mmlab/mmpose/pull/1423), [#1426](https://github.com/open-mmlab/mmpose/pull/1426), [#1458](https://github.com/open-mmlab/mmpose/pull/1458), [#1463](https://github.com/open-mmlab/mmpose/pull/1463)) @ly015, @liqikai9 - -- Support installation by [mim](https://github.com/open-mmlab/mim) ([#1425](https://github.com/open-mmlab/mmpose/pull/1425)) @liqikai9 - -- Support PAVI logger ([#1434](https://github.com/open-mmlab/mmpose/pull/1434)) @EvelynWang-0423 - -- Add progress bar for some demos ([#1454](https://github.com/open-mmlab/mmpose/pull/1454)) @liqikai9 - -- Webcam API supports quick device setting in terminal commands ([#1466](https://github.com/open-mmlab/mmpose/pull/1466)) @ly015 - -- Update swin models with better performance ([#1467](https://github.com/open-mmlab/mmpose/pull/1434)) @jin-s13 - -**Bug Fixes** - -- Rename `custom_hooks_config` to `custom_hooks` in configs to align with the documentation ([#1427](https://github.com/open-mmlab/mmpose/pull/1427)) @ly015 - -- Fix deadlock issue in Webcam API ([#1430](https://github.com/open-mmlab/mmpose/pull/1430)) @ly015 - -- Fix smoother configs in video 3D demo ([#1457](https://github.com/open-mmlab/mmpose/pull/1457)) @ly015 - -## **v0.27.0 (07/06/2022)** - -**Highlights** - -- Support hand gesture recognition - - - Try the demo for gesture recognition - - Learn more about the algorithm, dataset and experiment results - -- Major upgrade to the Webcam API - - - Tutorials (EN|zh_CN) - - [API Reference](https://mmpose.readthedocs.io/en/latest/api.html#mmpose-apis-webcam) - - Demo - -**New Features** - -- Support gesture recognition algorithm [MTUT](https://openaccess.thecvf.com/content_CVPR_2019/html/Abavisani_Improving_the_Performance_of_Unimodal_Dynamic_Hand-Gesture_Recognition_With_Multimodal_CVPR_2019_paper.html) CVPR'2019 and dataset [NVGesture](https://openaccess.thecvf.com/content_cvpr_2016/html/Molchanov_Online_Detection_and_CVPR_2016_paper.html) CVPR'2016 ([#1380](https://github.com/open-mmlab/mmpose/pull/1380)) @Ben-Louis - -**Improvements** - -- Upgrade Webcam API and related documents ([#1393](https://github.com/open-mmlab/mmpose/pull/1393), [#1404](https://github.com/open-mmlab/mmpose/pull/1404), [#1413](https://github.com/open-mmlab/mmpose/pull/1413)) @ly015 - -- Support exporting COCO inference result without the annotation file ([#1368](https://github.com/open-mmlab/mmpose/pull/1368)) @liqikai9 - -- Replace markdownlint with mdformat in CI to avoid the dependence on ruby [#1382](https://github.com/open-mmlab/mmpose/pull/1382) @ly015 - -- Improve documentation quality ([#1385](https://github.com/open-mmlab/mmpose/pull/1385), [#1394](https://github.com/open-mmlab/mmpose/pull/1394), [#1395](https://github.com/open-mmlab/mmpose/pull/1395), [#1408](https://github.com/open-mmlab/mmpose/pull/1408)) @chubei-oppen, @ly015, @liqikai9 - -**Bug Fixes** - -- Fix xywh->xyxy bbox conversion in dataset sanity check ([#1367](https://github.com/open-mmlab/mmpose/pull/1367)) @jin-s13 - -- Fix a bug in two-stage 3D keypoint demo ([#1373](https://github.com/open-mmlab/mmpose/pull/1373)) @ly015 - -- Fix out-dated settings in PVT configs ([#1376](https://github.com/open-mmlab/mmpose/pull/1376)) @ly015 - -- Fix myst settings for document compiling ([#1381](https://github.com/open-mmlab/mmpose/pull/1381)) @ly015 - -- Fix a bug in bbox transform ([#1384](https://github.com/open-mmlab/mmpose/pull/1384)) @ly015 - -- Fix inaccurate description of `min_keypoints` in tracking apis ([#1398](https://github.com/open-mmlab/mmpose/pull/1398)) @pallgeuer - -- Fix warning with `torch.meshgrid` ([#1402](https://github.com/open-mmlab/mmpose/pull/1402)) @pallgeuer - -- Remove redundant transformer modules from `mmpose.datasets.backbones.utils` ([#1405](https://github.com/open-mmlab/mmpose/pull/1405)) @ly015 - -## **v0.26.0 (05/05/2022)** - -**Highlights** - -- Support [RLE (Residual Log-likelihood Estimation)](https://arxiv.org/abs/2107.11291), ICCV'2021 ([#1259](https://github.com/open-mmlab/mmpose/pull/1259)) @Indigo6, @ly015 - -- Support [Swin Transformer](https://arxiv.org/abs/2103.14030), ICCV'2021 ([#1300](https://github.com/open-mmlab/mmpose/pull/1300)) @yumendecc, @ly015 - -- Support [PVT](https://arxiv.org/abs/2102.12122), ICCV'2021 and [PVTv2](https://arxiv.org/abs/2106.13797), CVMJ'2022 ([#1343](https://github.com/open-mmlab/mmpose/pull/1343)) @zengwang430521 - -- Speed up inference and reduce CPU usage by optimizing the pre-processing pipeline ([#1320](https://github.com/open-mmlab/mmpose/pull/1320)) @chenxinfeng4, @liqikai9 - -**New Features** - -- Support [RLE (Residual Log-likelihood Estimation)](https://arxiv.org/abs/2107.11291), ICCV'2021 ([#1259](https://github.com/open-mmlab/mmpose/pull/1259)) @Indigo6, @ly015 - -- Support [Swin Transformer](https://arxiv.org/abs/2103.14030), ICCV'2021 ([#1300](https://github.com/open-mmlab/mmpose/pull/1300)) @yumendecc, @ly015 - -- Support [PVT](https://arxiv.org/abs/2102.12122), ICCV'2021 and [PVTv2](https://arxiv.org/abs/2106.13797), CVMJ'2022 ([#1343](https://github.com/open-mmlab/mmpose/pull/1343)) @zengwang430521 - -- Support [FPN](https://openaccess.thecvf.com/content_cvpr_2017/html/Lin_Feature_Pyramid_Networks_CVPR_2017_paper.html), CVPR'2017 ([#1300](https://github.com/open-mmlab/mmpose/pull/1300)) @yumendecc, @ly015 - -**Improvements** - -- Speed up inference and reduce CPU usage by optimizing the pre-processing pipeline ([#1320](https://github.com/open-mmlab/mmpose/pull/1320)) @chenxinfeng4, @liqikai9 - -- Video demo supports models that requires multi-frame inputs ([#1300](https://github.com/open-mmlab/mmpose/pull/1300)) @liqikai9, @jin-s13 - -- Update benchmark regression list ([#1328](https://github.com/open-mmlab/mmpose/pull/1328)) @ly015, @liqikai9 - -- Remove unnecessary warnings in `TopDownPoseTrack18VideoDataset` ([#1335](https://github.com/open-mmlab/mmpose/pull/1335)) @liqikai9 - -- Improve documentation quality ([#1313](https://github.com/open-mmlab/mmpose/pull/1313), [#1305](https://github.com/open-mmlab/mmpose/pull/1305)) @Ben-Louis, @ly015 - -- Update deprecating settings in configs ([#1317](https://github.com/open-mmlab/mmpose/pull/1317)) @ly015 - -**Bug Fixes** - -- Fix a bug in human skeleton grouping that may skip the matching process unexpectedly when `ignore_to_much` is True ([#1341](https://github.com/open-mmlab/mmpose/pull/1341)) @daixinghome - -- Fix a GPG key error that leads to CI failure ([#1354](https://github.com/open-mmlab/mmpose/pull/1354)) @ly015 - -- Fix bugs in distributed training script ([#1338](https://github.com/open-mmlab/mmpose/pull/1338), [#1298](https://github.com/open-mmlab/mmpose/pull/1298)) @ly015 - -- Fix an upstream bug in xtoccotools that causes incorrect AP(M) results ([#1308](https://github.com/open-mmlab/mmpose/pull/1308)) @jin-s13, @ly015 - -- Fix indentiation errors in the colab tutorial ([#1298](https://github.com/open-mmlab/mmpose/pull/1298)) @YuanZi1501040205 - -- Fix incompatible model weight initialization with other OpenMMLab codebases ([#1329](https://github.com/open-mmlab/mmpose/pull/1329)) @274869388 - -- Fix HRNet FP16 checkpoints download URL ([#1309](https://github.com/open-mmlab/mmpose/pull/1309)) @YinAoXiong - -- Fix typos in `body3d_two_stage_video_demo.py` ([#1295](https://github.com/open-mmlab/mmpose/pull/1295)) @mucozcan - -**Breaking Changes** - -- Refactor bbox processing in datasets and pipelines ([#1311](https://github.com/open-mmlab/mmpose/pull/1311)) @ly015, @Ben-Louis - -- The bbox format conversion (xywh to center-scale) and random translation are moved from the dataset to the pipeline. The comparison between new and old version is as below: - -v0.26.0v0.25.0Dataset -(e.g. [TopDownCOCODataset](https://github.com/open-mmlab/mmpose/blob/master/mmpose/datasets/datasets/top_down/topdown_coco_dataset.py)) - -... # Data sample only contains bbox rec.append({ 'bbox': obj\['clean_bbox\]\[:4\], ... }) - - - - - -... # Convert bbox from xywh to center-scale center, scale = self.\_xywh2cs(\*obj\['clean_bbox'\]\[:4\]) # Data sample contains center and scale rec.append({ 'bbox': obj\['clean_bbox\]\[:4\], 'center': center, 'scale': scale, ... }) - - - - - - - -Pipeline Config - -(e.g. [HRNet+COCO](https://github.com/open-mmlab/mmpose/blob/master/configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/coco/hrnet_w32_coco_256x192.py)) - - - -... train_pipeline = \[ dict(type='LoadImageFromFile'), # Convert bbox from xywh to center-scale dict(type='TopDownGetBboxCenterScale', padding=1.25), # Randomly shift bbox center dict(type='TopDownRandomShiftBboxCenter', shift_factor=0.16, prob=0.3), ... \] - - - - - -... train_pipeline = \[ dict(type='LoadImageFromFile'), ... \] - - - - - - - -Advantage - - - -
  • Simpler data sample content
  • - -
  • Flexible bbox format conversion and augmentation
  • - -
  • Apply bbox random translation every epoch (instead of only applying once at the annotation loading) - - - -- - - - - - -BC Breaking - -The method `_xywh2cs` of dataset base classes (e.g. [Kpt2dSviewRgbImgTopDownDataset](https://github.com/open-mmlab/mmpose/blob/master/mmpose/datasets/datasets/base/kpt_2d_sview_rgb_img_top_down_dataset.py)) will be deprecated in the future. Custom datasets will need modifications to move the bbox format conversion to pipelines. - -- - - - - - - - -## **v0.25.0 (02/04/2022)** - -**Highlights** - -- Support Shelf and Campus datasets with pre-trained VoxelPose models, ["3D Pictorial Structures for Multiple Human Pose Estimation"](http://campar.in.tum.de/pub/belagiannis2014cvpr/belagiannis2014cvpr.pdf), CVPR'2014 ([#1225](https://github.com/open-mmlab/mmpose/pull/1225)) @liqikai9, @wusize - -- Add `Smoother` module for temporal smoothing of the pose estimation with configurable filters ([#1127](https://github.com/open-mmlab/mmpose/pull/1127)) @ailingzengzzz, @ly015 - -- Support SmoothNet for pose smoothing, ["SmoothNet: A Plug-and-Play Network for Refining Human Poses in Videos"](https://arxiv.org/abs/2112.13715), arXiv'2021 ([#1279](https://github.com/open-mmlab/mmpose/pull/1279)) @ailingzengzzz, @ly015 - -- Add multiview 3D pose estimation demo ([#1270](https://github.com/open-mmlab/mmpose/pull/1270)) @wusize - -**New Features** - -- Support Shelf and Campus datasets with pre-trained VoxelPose models, ["3D Pictorial Structures for Multiple Human Pose Estimation"](http://campar.in.tum.de/pub/belagiannis2014cvpr/belagiannis2014cvpr.pdf), CVPR'2014 ([#1225](https://github.com/open-mmlab/mmpose/pull/1225)) @liqikai9, @wusize - -- Add `Smoother` module for temporal smoothing of the pose estimation with configurable filters ([#1127](https://github.com/open-mmlab/mmpose/pull/1127)) @ailingzengzzz, @ly015 - -- Support SmoothNet for pose smoothing, ["SmoothNet: A Plug-and-Play Network for Refining Human Poses in Videos"](https://arxiv.org/abs/2112.13715), arXiv'2021 ([#1279](https://github.com/open-mmlab/mmpose/pull/1279)) @ailingzengzzz, @ly015 - -- Add multiview 3D pose estimation demo ([#1270](https://github.com/open-mmlab/mmpose/pull/1270)) @wusize - -- Support multi-machine distributed training ([#1248](https://github.com/open-mmlab/mmpose/pull/1248)) @ly015 - -**Improvements** - -- Update HRFormer configs and checkpoints with relative position bias ([#1245](https://github.com/open-mmlab/mmpose/pull/1245)) @zengwang430521 - -- Support using different random seed for each distributed node ([#1257](https://github.com/open-mmlab/mmpose/pull/1257), [#1229](https://github.com/open-mmlab/mmpose/pull/1229)) @ly015 - -- Improve documentation quality ([#1275](https://github.com/open-mmlab/mmpose/pull/1275), [#1255](https://github.com/open-mmlab/mmpose/pull/1255), [#1258](https://github.com/open-mmlab/mmpose/pull/1258), [#1249](https://github.com/open-mmlab/mmpose/pull/1249), [#1247](https://github.com/open-mmlab/mmpose/pull/1247), [#1240](https://github.com/open-mmlab/mmpose/pull/1240), [#1235](https://github.com/open-mmlab/mmpose/pull/1235)) @ly015, @jin-s13, @YoniChechik - -**Bug Fixes** - -- Fix keypoint index in RHD dataset meta information ([#1265](https://github.com/open-mmlab/mmpose/pull/1265)) @liqikai9 - -- Fix pre-commit hook unexpected behavior on Windows ([#1282](https://github.com/open-mmlab/mmpose/pull/1282)) @liqikai9 - -- Remove python-dev installation in CI ([#1276](https://github.com/open-mmlab/mmpose/pull/1276)) @ly015 - -- Unify hyphens in argument names in tools and demos ([#1271](https://github.com/open-mmlab/mmpose/pull/1271)) @ly015 - -- Fix ambiguous channel size in `channel_shuffle` that may cause exporting failure (#1242) @PINTO0309 - -- Fix a bug in Webcam API that causes single-class detectors fail ([#1239](https://github.com/open-mmlab/mmpose/pull/1239)) @674106399 - -- Fix the issue that `custom_hook` can not be set in configs ([#1236](https://github.com/open-mmlab/mmpose/pull/1236)) @bladrome - -- Fix incompatible MMCV version in DockerFile ([#raykindle](https://github.com/open-mmlab/mmpose/pull/raykindle)) - -- Skip invisible joints in visualization ([#1228](https://github.com/open-mmlab/mmpose/pull/1228)) @womeier - -## **v0.24.0 (07/03/2022)** - -**Highlights** - -- Support HRFormer ["HRFormer: High-Resolution Vision Transformer for Dense Predict"](https://proceedings.neurips.cc/paper/2021/hash/3bbfdde8842a5c44a0323518eec97cbe-Abstract.html), NeurIPS'2021 ([#1203](https://github.com/open-mmlab/mmpose/pull/1203)) @zengwang430521 - -- Support Windows installation with pip ([#1213](https://github.com/open-mmlab/mmpose/pull/1213)) @jin-s13, @ly015 - -- Add WebcamAPI documents ([#1187](https://github.com/open-mmlab/mmpose/pull/1187)) @ly015 - -**New Features** - -- Support HRFormer ["HRFormer: High-Resolution Vision Transformer for Dense Predict"](https://proceedings.neurips.cc/paper/2021/hash/3bbfdde8842a5c44a0323518eec97cbe-Abstract.html), NeurIPS'2021 ([#1203](https://github.com/open-mmlab/mmpose/pull/1203)) @zengwang430521 - -- Support Windows installation with pip ([#1213](https://github.com/open-mmlab/mmpose/pull/1213)) @jin-s13, @ly015 - -- Support CPU training with mmcv \< v1.4.4 ([#1161](https://github.com/open-mmlab/mmpose/pull/1161)) @EasonQYS, @ly015 - -- Add "Valentine Magic" demo with WebcamAPI ([#1189](https://github.com/open-mmlab/mmpose/pull/1189), [#1191](https://github.com/open-mmlab/mmpose/pull/1191)) @liqikai9 - -**Improvements** - -- Refactor multi-view 3D pose estimation framework towards better modularization and expansibility ([#1196](https://github.com/open-mmlab/mmpose/pull/1196)) @wusize - -- Add WebcamAPI documents and tutorials ([#1187](https://github.com/open-mmlab/mmpose/pull/1187)) @ly015 - -- Refactor dataset evaluation interface to align with other OpenMMLab codebases ([#1209](https://github.com/open-mmlab/mmpose/pull/1209)) @ly015 - -- Add deprecation message for deploy tools since [MMDeploy](https://github.com/open-mmlab/mmdeploy) has supported MMPose ([#1207](https://github.com/open-mmlab/mmpose/pull/1207)) @QwQ2000 - -- Improve documentation quality ([#1206](https://github.com/open-mmlab/mmpose/pull/1206), [#1161](https://github.com/open-mmlab/mmpose/pull/1161)) @ly015 - -- Switch to OpenMMLab official pre-commit-hook for copyright check ([#1214](https://github.com/open-mmlab/mmpose/pull/1214)) @ly015 - -**Bug Fixes** - -- Fix hard-coded data collating and scattering in inference ([#1175](https://github.com/open-mmlab/mmpose/pull/1175)) @ly015 - -- Fix model configs on JHMDB dataset ([#1188](https://github.com/open-mmlab/mmpose/pull/1188)) @jin-s13 - -- Fix area calculation in pose tracking inference ([#1197](https://github.com/open-mmlab/mmpose/pull/1197)) @pallgeuer - -- Fix registry scope conflict of module wrapper ([#1204](https://github.com/open-mmlab/mmpose/pull/1204)) @ly015 - -- Update MMCV installation in CI and documents ([#1205](https://github.com/open-mmlab/mmpose/pull/1205)) - -- Fix incorrect color channel order in visualization functions ([#1212](https://github.com/open-mmlab/mmpose/pull/1212)) @ly015 - -## **v0.23.0 (11/02/2022)** - -**Highlights** - -- Add [MMPose Webcam API](https://github.com/open-mmlab/mmpose/tree/master/tools/webcam): A simple yet powerful tools to develop interactive webcam applications with MMPose functions. ([#1178](https://github.com/open-mmlab/mmpose/pull/1178), [#1173](https://github.com/open-mmlab/mmpose/pull/1173), [#1173](https://github.com/open-mmlab/mmpose/pull/1173), [#1143](https://github.com/open-mmlab/mmpose/pull/1143), [#1094](https://github.com/open-mmlab/mmpose/pull/1094), [#1133](https://github.com/open-mmlab/mmpose/pull/1133), [#1098](https://github.com/open-mmlab/mmpose/pull/1098), [#1160](https://github.com/open-mmlab/mmpose/pull/1160)) @ly015, @jin-s13, @liqikai9, @wusize, @luminxu, @zengwang430521 @mzr1996 - -**New Features** - -- Add [MMPose Webcam API](https://github.com/open-mmlab/mmpose/tree/master/tools/webcam): A simple yet powerful tools to develop interactive webcam applications with MMPose functions. ([#1178](https://github.com/open-mmlab/mmpose/pull/1178), [#1173](https://github.com/open-mmlab/mmpose/pull/1173), [#1173](https://github.com/open-mmlab/mmpose/pull/1173), [#1143](https://github.com/open-mmlab/mmpose/pull/1143), [#1094](https://github.com/open-mmlab/mmpose/pull/1094), [#1133](https://github.com/open-mmlab/mmpose/pull/1133), [#1098](https://github.com/open-mmlab/mmpose/pull/1098), [#1160](https://github.com/open-mmlab/mmpose/pull/1160)) @ly015, @jin-s13, @liqikai9, @wusize, @luminxu, @zengwang430521 @mzr1996 - -- Support ConcatDataset ([#1139](https://github.com/open-mmlab/mmpose/pull/1139)) @Canwang-sjtu - -- Support CPU training and testing ([#1157](https://github.com/open-mmlab/mmpose/pull/1157)) @ly015 - -**Improvements** - -- Add multi-processing configurations to speed up distributed training and testing ([#1146](https://github.com/open-mmlab/mmpose/pull/1146)) @ly015 - -- Add default runtime config ([#1145](https://github.com/open-mmlab/mmpose/pull/1145)) - -- Upgrade isort in pre-commit hook ([#1179](https://github.com/open-mmlab/mmpose/pull/1179)) @liqikai9 - -- Update README and documents ([#1171](https://github.com/open-mmlab/mmpose/pull/1171), [#1167](https://github.com/open-mmlab/mmpose/pull/1167), [#1153](https://github.com/open-mmlab/mmpose/pull/1153), [#1149](https://github.com/open-mmlab/mmpose/pull/1149), [#1148](https://github.com/open-mmlab/mmpose/pull/1148), [#1147](https://github.com/open-mmlab/mmpose/pull/1147), [#1140](https://github.com/open-mmlab/mmpose/pull/1140)) @jin-s13, @wusize, @TommyZihao, @ly015 - -**Bug Fixes** - -- Fix undeterministic behavior in pre-commit hooks ([#1136](https://github.com/open-mmlab/mmpose/pull/1136)) @jin-s13 - -- Deprecate the support for "python setup.py test" ([#1179](https://github.com/open-mmlab/mmpose/pull/1179)) @ly015 - -- Fix incompatible settings with MMCV on HSigmoid default parameters ([#1132](https://github.com/open-mmlab/mmpose/pull/1132)) @ly015 - -- Fix albumentation installation ([#1184](https://github.com/open-mmlab/mmpose/pull/1184)) @BIGWangYuDong - -## **v0.22.0 (04/01/2022)** - -**Highlights** - -- Support VoxelPose ["VoxelPose: Towards Multi-Camera 3D Human Pose Estimation in Wild Environment"](https://arxiv.org/abs/2004.06239), ECCV'2020 ([#1050](https://github.com/open-mmlab/mmpose/pull/1050)) @wusize - -- Support Soft Wing loss ["Structure-Coherent Deep Feature Learning for Robust Face Alignment"](https://linchunze.github.io/papers/TIP21_Structure_coherent_FA.pdf), TIP'2021 ([#1077](https://github.com/open-mmlab/mmpose/pull/1077)) @jin-s13 - -- Support Adaptive Wing loss ["Adaptive Wing Loss for Robust Face Alignment via Heatmap Regression"](https://arxiv.org/abs/1904.07399), ICCV'2019 ([#1072](https://github.com/open-mmlab/mmpose/pull/1072)) @jin-s13 - -**New Features** - -- Support VoxelPose ["VoxelPose: Towards Multi-Camera 3D Human Pose Estimation in Wild Environment"](https://arxiv.org/abs/2004.06239), ECCV'2020 ([#1050](https://github.com/open-mmlab/mmpose/pull/1050)) @wusize - -- Support Soft Wing loss ["Structure-Coherent Deep Feature Learning for Robust Face Alignment"](https://linchunze.github.io/papers/TIP21_Structure_coherent_FA.pdf), TIP'2021 ([#1077](https://github.com/open-mmlab/mmpose/pull/1077)) @jin-s13 - -- Support Adaptive Wing loss ["Adaptive Wing Loss for Robust Face Alignment via Heatmap Regression"](https://arxiv.org/abs/1904.07399), ICCV'2019 ([#1072](https://github.com/open-mmlab/mmpose/pull/1072)) @jin-s13 - -- Add LiteHRNet-18 Checkpoints trained on COCO. ([#1120](https://github.com/open-mmlab/mmpose/pull/1120)) @jin-s13 - -**Improvements** - -- Improve documentation quality ([#1115](https://github.com/open-mmlab/mmpose/pull/1115), [#1111](https://github.com/open-mmlab/mmpose/pull/1111), [#1105](https://github.com/open-mmlab/mmpose/pull/1105), [#1087](https://github.com/open-mmlab/mmpose/pull/1087), [#1086](https://github.com/open-mmlab/mmpose/pull/1086), [#1085](https://github.com/open-mmlab/mmpose/pull/1085), [#1084](https://github.com/open-mmlab/mmpose/pull/1084), [#1083](https://github.com/open-mmlab/mmpose/pull/1083), [#1124](https://github.com/open-mmlab/mmpose/pull/1124), [#1070](https://github.com/open-mmlab/mmpose/pull/1070), [#1068](https://github.com/open-mmlab/mmpose/pull/1068)) @jin-s13, @liqikai9, @ly015 - -- Support CircleCI ([#1074](https://github.com/open-mmlab/mmpose/pull/1074)) @ly015 - -- Skip unit tests in CI when only document files were changed ([#1074](https://github.com/open-mmlab/mmpose/pull/1074), [#1041](https://github.com/open-mmlab/mmpose/pull/1041)) @QwQ2000, @ly015 - -- Support file_client_args in LoadImageFromFile ([#1076](https://github.com/open-mmlab/mmpose/pull/1076)) @jin-s13 - -**Bug Fixes** - -- Fix a bug in Dark UDP postprocessing that causes error when the channel number is large. ([#1079](https://github.com/open-mmlab/mmpose/pull/1079), [#1116](https://github.com/open-mmlab/mmpose/pull/1116)) @X00123, @jin-s13 - -- Fix hard-coded `sigmas` in bottom-up image demo ([#1107](https://github.com/open-mmlab/mmpose/pull/1107), [#1101](https://github.com/open-mmlab/mmpose/pull/1101)) @chenxinfeng4, @liqikai9 - -- Fix unstable checks in unit tests ([#1112](https://github.com/open-mmlab/mmpose/pull/1112)) @ly015 - -- Do not destroy NULL windows if `args.show==False` in demo scripts ([#1104](https://github.com/open-mmlab/mmpose/pull/1104)) @bladrome - -## **v0.21.0 (06/12/2021)** - -**Highlights** - -- Support ["Learning Temporal Pose Estimation from Sparsely-Labeled Videos"](https://arxiv.org/abs/1906.04016), NeurIPS'2019 ([#932](https://github.com/open-mmlab/mmpose/pull/932), [#1006](https://github.com/open-mmlab/mmpose/pull/1006), [#1036](https://github.com/open-mmlab/mmpose/pull/1036), [#1060](https://github.com/open-mmlab/mmpose/pull/1060)) @liqikai9 - -- Add ViPNAS-MobileNetV3 models ([#1025](https://github.com/open-mmlab/mmpose/pull/1025)) @luminxu, @jin-s13 - -- Add inference speed benchmark ([#1028](https://github.com/open-mmlab/mmpose/pull/1028), [#1034](https://github.com/open-mmlab/mmpose/pull/1034), [#1044](https://github.com/open-mmlab/mmpose/pull/1044)) @liqikai9 - -**New Features** - -- Support ["Learning Temporal Pose Estimation from Sparsely-Labeled Videos"](https://arxiv.org/abs/1906.04016), NeurIPS'2019 ([#932](https://github.com/open-mmlab/mmpose/pull/932), [#1006](https://github.com/open-mmlab/mmpose/pull/1006), [#1036](https://github.com/open-mmlab/mmpose/pull/1036)) @liqikai9 - -- Add ViPNAS-MobileNetV3 models ([#1025](https://github.com/open-mmlab/mmpose/pull/1025)) @luminxu, @jin-s13 - -- Add light-weight top-down models for whole-body keypoint detection ([#1009](https://github.com/open-mmlab/mmpose/pull/1009), [#1020](https://github.com/open-mmlab/mmpose/pull/1020), [#1055](https://github.com/open-mmlab/mmpose/pull/1055)) @luminxu, @ly015 - -- Add HRNet checkpoints with various settings on PoseTrack18 ([#1035](https://github.com/open-mmlab/mmpose/pull/1035)) @liqikai9 - -**Improvements** - -- Add inference speed benchmark ([#1028](https://github.com/open-mmlab/mmpose/pull/1028), [#1034](https://github.com/open-mmlab/mmpose/pull/1034), [#1044](https://github.com/open-mmlab/mmpose/pull/1044)) @liqikai9 - -- Update model metafile format ([#1001](https://github.com/open-mmlab/mmpose/pull/1001)) @ly015 - -- Support minus output feature index in mobilenet_v3 ([#1005](https://github.com/open-mmlab/mmpose/pull/1005)) @luminxu - -- Improve documentation quality ([#1018](https://github.com/open-mmlab/mmpose/pull/1018), [#1026](https://github.com/open-mmlab/mmpose/pull/1026), [#1027](https://github.com/open-mmlab/mmpose/pull/1027), [#1031](https://github.com/open-mmlab/mmpose/pull/1031), [#1038](https://github.com/open-mmlab/mmpose/pull/1038), [#1046](https://github.com/open-mmlab/mmpose/pull/1046), [#1056](https://github.com/open-mmlab/mmpose/pull/1056), [#1057](https://github.com/open-mmlab/mmpose/pull/1057)) @edybk, @luminxu, @ly015, @jin-s13 - -- Set default random seed in training initialization ([#1030](https://github.com/open-mmlab/mmpose/pull/1030)) @ly015 - -- Skip CI when only specific files changed ([#1041](https://github.com/open-mmlab/mmpose/pull/1041), [#1059](https://github.com/open-mmlab/mmpose/pull/1059)) @QwQ2000, @ly015 - -- Automatically cancel uncompleted action runs when new commit arrives ([#1053](https://github.com/open-mmlab/mmpose/pull/1053)) @ly015 - -**Bug Fixes** - -- Update pose tracking demo to be compatible with latest mmtracking ([#1014](https://github.com/open-mmlab/mmpose/pull/1014)) @jin-s13 - -- Fix symlink creation failure when installed in Windows environments ([#1039](https://github.com/open-mmlab/mmpose/pull/1039)) @QwQ2000 - -- Fix AP-10K dataset sigmas ([#1040](https://github.com/open-mmlab/mmpose/pull/1040)) @jin-s13 - -## **v0.20.0 (01/11/2021)** - -**Highlights** - -- Add AP-10K dataset for animal pose estimation ([#987](https://github.com/open-mmlab/mmpose/pull/987)) @Annbless, @AlexTheBad, @jin-s13, @ly015 - -- Support TorchServe ([#979](https://github.com/open-mmlab/mmpose/pull/979)) @ly015 - -**New Features** - -- Add AP-10K dataset for animal pose estimation ([#987](https://github.com/open-mmlab/mmpose/pull/987)) @Annbless, @AlexTheBad, @jin-s13, @ly015 - -- Add HRNetv2 checkpoints on 300W and COFW datasets ([#980](https://github.com/open-mmlab/mmpose/pull/980)) @jin-s13 - -- Support TorchServe ([#979](https://github.com/open-mmlab/mmpose/pull/979)) @ly015 - -**Bug Fixes** - -- Fix some deprecated or risky settings in configs ([#963](https://github.com/open-mmlab/mmpose/pull/963), [#976](https://github.com/open-mmlab/mmpose/pull/976), [#992](https://github.com/open-mmlab/mmpose/pull/992)) @jin-s13, @wusize - -- Fix issues of default arguments of training and testing scripts ([#970](https://github.com/open-mmlab/mmpose/pull/970), [#985](https://github.com/open-mmlab/mmpose/pull/985)) @liqikai9, @wusize - -- Fix heatmap and tag size mismatch in bottom-up with UDP ([#994](https://github.com/open-mmlab/mmpose/pull/994)) @wusize - -- Fix python3.9 installation in CI ([#983](https://github.com/open-mmlab/mmpose/pull/983)) @ly015 - -- Fix model zoo document integrity issue ([#990](https://github.com/open-mmlab/mmpose/pull/990)) @jin-s13 - -**Improvements** - -- Support non-square input shape for bottom-up ([#991](https://github.com/open-mmlab/mmpose/pull/991)) @wusize - -- Add image and video resources for demo ([#971](https://github.com/open-mmlab/mmpose/pull/971)) @liqikai9 - -- Use CUDA docker images to accelerate CI ([#973](https://github.com/open-mmlab/mmpose/pull/973)) @ly015 - -- Add codespell hook and fix detected typos ([#977](https://github.com/open-mmlab/mmpose/pull/977)) @ly015 - -## **v0.19.0 (08/10/2021)** - -**Highlights** - -- Add models for Associative Embedding with Hourglass network backbone ([#906](https://github.com/open-mmlab/mmpose/pull/906), [#955](https://github.com/open-mmlab/mmpose/pull/955)) @jin-s13, @luminxu - -- Support COCO-Wholebody-Face and COCO-Wholebody-Hand datasets ([#813](https://github.com/open-mmlab/mmpose/pull/813)) @jin-s13, @innerlee, @luminxu - -- Upgrade dataset interface ([#901](https://github.com/open-mmlab/mmpose/pull/901), [#924](https://github.com/open-mmlab/mmpose/pull/924)) @jin-s13, @innerlee, @ly015, @liqikai9 - -- New style of documentation ([#945](https://github.com/open-mmlab/mmpose/pull/945)) @ly015 - -**New Features** - -- Add models for Associative Embedding with Hourglass network backbone ([#906](https://github.com/open-mmlab/mmpose/pull/906), [#955](https://github.com/open-mmlab/mmpose/pull/955)) @jin-s13, @luminxu - -- Support COCO-Wholebody-Face and COCO-Wholebody-Hand datasets ([#813](https://github.com/open-mmlab/mmpose/pull/813)) @jin-s13, @innerlee, @luminxu - -- Add pseudo-labeling tool to generate COCO style keypoint annotations with given bounding boxes ([#928](https://github.com/open-mmlab/mmpose/pull/928)) @soltkreig - -- New style of documentation ([#945](https://github.com/open-mmlab/mmpose/pull/945)) @ly015 - -**Bug Fixes** - -- Fix segmentation parsing in Macaque dataset preprocessing ([#948](https://github.com/open-mmlab/mmpose/pull/948)) @jin-s13 - -- Fix dependencies that may lead to CI failure in downstream projects ([#936](https://github.com/open-mmlab/mmpose/pull/936), [#953](https://github.com/open-mmlab/mmpose/pull/953)) @RangiLyu, @ly015 - -- Fix keypoint order in Human3.6M dataset ([#940](https://github.com/open-mmlab/mmpose/pull/940)) @ttxskk - -- Fix unstable image loading for Interhand2.6M ([#913](https://github.com/open-mmlab/mmpose/pull/913)) @zengwang430521 - -**Improvements** - -- Upgrade dataset interface ([#901](https://github.com/open-mmlab/mmpose/pull/901), [#924](https://github.com/open-mmlab/mmpose/pull/924)) @jin-s13, @innerlee, @ly015, @liqikai9 - -- Improve demo usability and stability ([#908](https://github.com/open-mmlab/mmpose/pull/908), [#934](https://github.com/open-mmlab/mmpose/pull/934)) @ly015 - -- Standardize model metafile format ([#941](https://github.com/open-mmlab/mmpose/pull/941)) @ly015 - -- Support `persistent_worker` and several other arguments in configs ([#946](https://github.com/open-mmlab/mmpose/pull/946)) @jin-s13 - -- Use MMCV root model registry to enable cross-project module building ([#935](https://github.com/open-mmlab/mmpose/pull/935)) @RangiLyu - -- Improve the document quality ([#916](https://github.com/open-mmlab/mmpose/pull/916), [#909](https://github.com/open-mmlab/mmpose/pull/909), [#942](https://github.com/open-mmlab/mmpose/pull/942), [#913](https://github.com/open-mmlab/mmpose/pull/913), [#956](https://github.com/open-mmlab/mmpose/pull/956)) @jin-s13, @ly015, @bit-scientist, @zengwang430521 - -- Improve pull request template ([#952](https://github.com/open-mmlab/mmpose/pull/952), [#954](https://github.com/open-mmlab/mmpose/pull/954)) @ly015 - -**Breaking Changes** - -- Upgrade dataset interface ([#901](https://github.com/open-mmlab/mmpose/pull/901)) @jin-s13, @innerlee, @ly015 - -## **v0.18.0 (01/09/2021)** - -**Bug Fixes** - -- Fix redundant model weight loading in pytorch-to-onnx conversion ([#850](https://github.com/open-mmlab/mmpose/pull/850)) @ly015 - -- Fix a bug in update_model_index.py that may cause pre-commit hook failure([#866](https://github.com/open-mmlab/mmpose/pull/866)) @ly015 - -- Fix a bug in interhand_3d_head ([#890](https://github.com/open-mmlab/mmpose/pull/890)) @zengwang430521 - -- Fix pose tracking demo failure caused by out-of-date configs ([#891](https://github.com/open-mmlab/mmpose/pull/891)) - -**Improvements** - -- Add automatic benchmark regression tools ([#849](https://github.com/open-mmlab/mmpose/pull/849), [#880](https://github.com/open-mmlab/mmpose/pull/880), [#885](https://github.com/open-mmlab/mmpose/pull/885)) @liqikai9, @ly015 - -- Add copyright information and checking hook ([#872](https://github.com/open-mmlab/mmpose/pull/872)) - -- Add PR template ([#875](https://github.com/open-mmlab/mmpose/pull/875)) @ly015 - -- Add citation information ([#876](https://github.com/open-mmlab/mmpose/pull/876)) @ly015 - -- Add python3.9 in CI ([#877](https://github.com/open-mmlab/mmpose/pull/877), [#883](https://github.com/open-mmlab/mmpose/pull/883)) @ly015 - -- Improve the quality of the documents ([#845](https://github.com/open-mmlab/mmpose/pull/845), [#845](https://github.com/open-mmlab/mmpose/pull/845), [#848](https://github.com/open-mmlab/mmpose/pull/848), [#867](https://github.com/open-mmlab/mmpose/pull/867), [#870](https://github.com/open-mmlab/mmpose/pull/870), [#873](https://github.com/open-mmlab/mmpose/pull/873), [#896](https://github.com/open-mmlab/mmpose/pull/896)) @jin-s13, @ly015, @zhiqwang - -## **v0.17.0 (06/08/2021)** - -**Highlights** - -1. Support ["Lite-HRNet: A Lightweight High-Resolution Network"](https://arxiv.org/abs/2104.06403) CVPR'2021 ([#733](https://github.com/open-mmlab/mmpose/pull/733),[#800](https://github.com/open-mmlab/mmpose/pull/800)) @jin-s13 - -2. Add 3d body mesh demo ([#771](https://github.com/open-mmlab/mmpose/pull/771)) @zengwang430521 - -3. Add Chinese documentation ([#787](https://github.com/open-mmlab/mmpose/pull/787), [#798](https://github.com/open-mmlab/mmpose/pull/798), [#799](https://github.com/open-mmlab/mmpose/pull/799), [#802](https://github.com/open-mmlab/mmpose/pull/802), [#804](https://github.com/open-mmlab/mmpose/pull/804), [#805](https://github.com/open-mmlab/mmpose/pull/805), [#815](https://github.com/open-mmlab/mmpose/pull/815), [#816](https://github.com/open-mmlab/mmpose/pull/816), [#817](https://github.com/open-mmlab/mmpose/pull/817), [#819](https://github.com/open-mmlab/mmpose/pull/819), [#839](https://github.com/open-mmlab/mmpose/pull/839)) @ly015, @luminxu, @jin-s13, @liqikai9, @zengwang430521 - -4. Add Colab Tutorial ([#834](https://github.com/open-mmlab/mmpose/pull/834)) @ly015 - -**New Features** - -- Support ["Lite-HRNet: A Lightweight High-Resolution Network"](https://arxiv.org/abs/2104.06403) CVPR'2021 ([#733](https://github.com/open-mmlab/mmpose/pull/733),[#800](https://github.com/open-mmlab/mmpose/pull/800)) @jin-s13 - -- Add 3d body mesh demo ([#771](https://github.com/open-mmlab/mmpose/pull/771)) @zengwang430521 - -- Add Chinese documentation ([#787](https://github.com/open-mmlab/mmpose/pull/787), [#798](https://github.com/open-mmlab/mmpose/pull/798), [#799](https://github.com/open-mmlab/mmpose/pull/799), [#802](https://github.com/open-mmlab/mmpose/pull/802), [#804](https://github.com/open-mmlab/mmpose/pull/804), [#805](https://github.com/open-mmlab/mmpose/pull/805), [#815](https://github.com/open-mmlab/mmpose/pull/815), [#816](https://github.com/open-mmlab/mmpose/pull/816), [#817](https://github.com/open-mmlab/mmpose/pull/817), [#819](https://github.com/open-mmlab/mmpose/pull/819), [#839](https://github.com/open-mmlab/mmpose/pull/839)) @ly015, @luminxu, @jin-s13, @liqikai9, @zengwang430521 - -- Add Colab Tutorial ([#834](https://github.com/open-mmlab/mmpose/pull/834)) @ly015 - -- Support training for InterHand v1.0 dataset ([#761](https://github.com/open-mmlab/mmpose/pull/761)) @zengwang430521 - -**Bug Fixes** - -- Fix mpii pckh@0.1 index ([#773](https://github.com/open-mmlab/mmpose/pull/773)) @jin-s13 - -- Fix multi-node distributed test ([#818](https://github.com/open-mmlab/mmpose/pull/818)) @ly015 - -- Fix docstring and init_weights error of ShuffleNetV1 ([#814](https://github.com/open-mmlab/mmpose/pull/814)) @Junjun2016 - -- Fix imshow_bbox error when input bboxes is empty ([#796](https://github.com/open-mmlab/mmpose/pull/796)) @ly015 - -- Fix model zoo doc generation ([#778](https://github.com/open-mmlab/mmpose/pull/778)) @ly015 - -- Fix typo ([#767](https://github.com/open-mmlab/mmpose/pull/767)), ([#780](https://github.com/open-mmlab/mmpose/pull/780), [#782](https://github.com/open-mmlab/mmpose/pull/782)) @ly015, @jin-s13 - -**Breaking Changes** - -- Use MMCV EvalHook ([#686](https://github.com/open-mmlab/mmpose/pull/686)) @ly015 - -**Improvements** - -- Add pytest.ini and fix docstring ([#812](https://github.com/open-mmlab/mmpose/pull/812)) @jin-s13 - -- Update MSELoss ([#829](https://github.com/open-mmlab/mmpose/pull/829)) @Ezra-Yu - -- Move process_mmdet_results into inference.py ([#831](https://github.com/open-mmlab/mmpose/pull/831)) @ly015 - -- Update resource limit ([#783](https://github.com/open-mmlab/mmpose/pull/783)) @jin-s13 - -- Use COCO 2D pose model in 3D demo examples ([#785](https://github.com/open-mmlab/mmpose/pull/785)) @ly015 - -- Change model zoo titles in the doc from center-aligned to left-aligned ([#792](https://github.com/open-mmlab/mmpose/pull/792), [#797](https://github.com/open-mmlab/mmpose/pull/797)) @ly015 - -- Support MIM ([#706](https://github.com/open-mmlab/mmpose/pull/706), [#794](https://github.com/open-mmlab/mmpose/pull/794)) @ly015 - -- Update out-of-date configs ([#827](https://github.com/open-mmlab/mmpose/pull/827)) @jin-s13 - -- Remove opencv-python-headless dependency by albumentations ([#833](https://github.com/open-mmlab/mmpose/pull/833)) @ly015 - -- Update QQ QR code in README_CN.md ([#832](https://github.com/open-mmlab/mmpose/pull/832)) @ly015 - -## **v0.16.0 (02/07/2021)** - -**Highlights** - -1. Support ["ViPNAS: Efficient Video Pose Estimation via Neural Architecture Search"](https://arxiv.org/abs/2105.10154) CVPR'2021 ([#742](https://github.com/open-mmlab/mmpose/pull/742),[#755](https://github.com/open-mmlab/mmpose/pull/755)). - -2. Support MPI-INF-3DHP dataset ([#683](https://github.com/open-mmlab/mmpose/pull/683),[#746](https://github.com/open-mmlab/mmpose/pull/746),[#751](https://github.com/open-mmlab/mmpose/pull/751)). - -3. Add webcam demo tool ([#729](https://github.com/open-mmlab/mmpose/pull/729)) - -4. Add 3d body and hand pose estimation demo ([#704](https://github.com/open-mmlab/mmpose/pull/704), [#727](https://github.com/open-mmlab/mmpose/pull/727)). - -**New Features** - -- Support ["ViPNAS: Efficient Video Pose Estimation via Neural Architecture Search"](https://arxiv.org/abs/2105.10154) CVPR'2021 ([#742](https://github.com/open-mmlab/mmpose/pull/742),[#755](https://github.com/open-mmlab/mmpose/pull/755)) - -- Support MPI-INF-3DHP dataset ([#683](https://github.com/open-mmlab/mmpose/pull/683),[#746](https://github.com/open-mmlab/mmpose/pull/746),[#751](https://github.com/open-mmlab/mmpose/pull/751)) - -- Support Webcam demo ([#729](https://github.com/open-mmlab/mmpose/pull/729)) - -- Support Interhand 3d demo ([#704](https://github.com/open-mmlab/mmpose/pull/704)) - -- Support 3d pose video demo ([#727](https://github.com/open-mmlab/mmpose/pull/727)) - -- Support H36m dataset for 2d pose estimation ([#709](https://github.com/open-mmlab/mmpose/pull/709), [#735](https://github.com/open-mmlab/mmpose/pull/735)) - -- Add scripts to generate mim metafile ([#749](https://github.com/open-mmlab/mmpose/pull/749)) - -**Bug Fixes** - -- Fix typos ([#692](https://github.com/open-mmlab/mmpose/pull/692),[#696](https://github.com/open-mmlab/mmpose/pull/696),[#697](https://github.com/open-mmlab/mmpose/pull/697),[#698](https://github.com/open-mmlab/mmpose/pull/698),[#712](https://github.com/open-mmlab/mmpose/pull/712),[#718](https://github.com/open-mmlab/mmpose/pull/718),[#728](https://github.com/open-mmlab/mmpose/pull/728)) - -- Change model download links from `http` to `https` ([#716](https://github.com/open-mmlab/mmpose/pull/716)) - -**Breaking Changes** - -- Switch to MMCV MODEL_REGISTRY ([#669](https://github.com/open-mmlab/mmpose/pull/669)) - -**Improvements** - -- Refactor MeshMixDataset ([#752](https://github.com/open-mmlab/mmpose/pull/752)) - -- Rename 'GaussianHeatMap' to 'GaussianHeatmap' ([#745](https://github.com/open-mmlab/mmpose/pull/745)) - -- Update out-of-date configs ([#734](https://github.com/open-mmlab/mmpose/pull/734)) - -- Improve compatibility for breaking changes ([#731](https://github.com/open-mmlab/mmpose/pull/731)) - -- Enable to control radius and thickness in visualization ([#722](https://github.com/open-mmlab/mmpose/pull/722)) - -- Add regex dependency ([#720](https://github.com/open-mmlab/mmpose/pull/720)) - -## **v0.15.0 (02/06/2021)** - -**Highlights** - -1. Support 3d video pose estimation (VideoPose3D). - -2. Support 3d hand pose estimation (InterNet). - -3. Improve presentation of modelzoo. - -**New Features** - -- Support "InterHand2.6M: A Dataset and Baseline for 3D Interacting Hand Pose Estimation from a Single RGB Image" (ECCV‘20) ([#624](https://github.com/open-mmlab/mmpose/pull/624)) - -- Support "3D human pose estimation in video with temporal convolutions and semi-supervised training" (CVPR'19) ([#602](https://github.com/open-mmlab/mmpose/pull/602), [#681](https://github.com/open-mmlab/mmpose/pull/681)) - -- Support 3d pose estimation demo ([#653](https://github.com/open-mmlab/mmpose/pull/653), [#670](https://github.com/open-mmlab/mmpose/pull/670)) - -- Support bottom-up whole-body pose estimation ([#689](https://github.com/open-mmlab/mmpose/pull/689)) - -- Support mmcli ([#634](https://github.com/open-mmlab/mmpose/pull/634)) - -**Bug Fixes** - -- Fix opencv compatibility ([#635](https://github.com/open-mmlab/mmpose/pull/635)) - -- Fix demo with UDP ([#637](https://github.com/open-mmlab/mmpose/pull/637)) - -- Fix bottom-up model onnx conversion ([#680](https://github.com/open-mmlab/mmpose/pull/680)) - -- Fix `GPU_IDS` in distributed training ([#668](https://github.com/open-mmlab/mmpose/pull/668)) - -- Fix MANIFEST.in ([#641](https://github.com/open-mmlab/mmpose/pull/641), [#657](https://github.com/open-mmlab/mmpose/pull/657)) - -- Fix docs ([#643](https://github.com/open-mmlab/mmpose/pull/643),[#684](https://github.com/open-mmlab/mmpose/pull/684),[#688](https://github.com/open-mmlab/mmpose/pull/688),[#690](https://github.com/open-mmlab/mmpose/pull/690),[#692](https://github.com/open-mmlab/mmpose/pull/692)) - -**Breaking Changes** - -- Reorganize configs by tasks, algorithms, datasets, and techniques ([#647](https://github.com/open-mmlab/mmpose/pull/647)) - -- Rename heads and detectors ([#667](https://github.com/open-mmlab/mmpose/pull/667)) - -**Improvements** - -- Add `radius` and `thickness` parameters in visualization ([#638](https://github.com/open-mmlab/mmpose/pull/638)) - -- Add `trans_prob` parameter in `TopDownRandomTranslation` ([#650](https://github.com/open-mmlab/mmpose/pull/650)) - -- Switch to `MMCV MODEL_REGISTRY` ([#669](https://github.com/open-mmlab/mmpose/pull/669)) - -- Update dependencies ([#674](https://github.com/open-mmlab/mmpose/pull/674), [#676](https://github.com/open-mmlab/mmpose/pull/676)) - -## **v0.14.0 (06/05/2021)** - -**Highlights** - -1. Support animal pose estimation with 7 popular datasets. - -2. Support "A simple yet effective baseline for 3d human pose estimation" (ICCV'17). - -**New Features** - -- Support "A simple yet effective baseline for 3d human pose estimation" (ICCV'17) ([#554](https://github.com/open-mmlab/mmpose/pull/554),[#558](https://github.com/open-mmlab/mmpose/pull/558),[#566](https://github.com/open-mmlab/mmpose/pull/566),[#570](https://github.com/open-mmlab/mmpose/pull/570),[#589](https://github.com/open-mmlab/mmpose/pull/589)) - -- Support animal pose estimation ([#559](https://github.com/open-mmlab/mmpose/pull/559),[#561](https://github.com/open-mmlab/mmpose/pull/561),[#563](https://github.com/open-mmlab/mmpose/pull/563),[#571](https://github.com/open-mmlab/mmpose/pull/571),[#603](https://github.com/open-mmlab/mmpose/pull/603),[#605](https://github.com/open-mmlab/mmpose/pull/605)) - -- Support Horse-10 dataset ([#561](https://github.com/open-mmlab/mmpose/pull/561)), MacaquePose dataset ([#561](https://github.com/open-mmlab/mmpose/pull/561)), Vinegar Fly dataset ([#561](https://github.com/open-mmlab/mmpose/pull/561)), Desert Locust dataset ([#561](https://github.com/open-mmlab/mmpose/pull/561)), Grevy's Zebra dataset ([#561](https://github.com/open-mmlab/mmpose/pull/561)), ATRW dataset ([#571](https://github.com/open-mmlab/mmpose/pull/571)), and Animal-Pose dataset ([#603](https://github.com/open-mmlab/mmpose/pull/603)) - -- Support bottom-up pose tracking demo ([#574](https://github.com/open-mmlab/mmpose/pull/574)) - -- Support FP16 training ([#584](https://github.com/open-mmlab/mmpose/pull/584),[#616](https://github.com/open-mmlab/mmpose/pull/616),[#626](https://github.com/open-mmlab/mmpose/pull/626)) - -- Support NMS for bottom-up ([#609](https://github.com/open-mmlab/mmpose/pull/609)) - -**Bug Fixes** - -- Fix bugs in the top-down demo, when there are no people in the images ([#569](https://github.com/open-mmlab/mmpose/pull/569)). - -- Fix the links in the doc ([#612](https://github.com/open-mmlab/mmpose/pull/612)) - -**Improvements** - -- Speed up top-down inference ([#560](https://github.com/open-mmlab/mmpose/pull/560)) - -- Update github CI ([#562](https://github.com/open-mmlab/mmpose/pull/562), [#564](https://github.com/open-mmlab/mmpose/pull/564)) - -- Update Readme ([#578](https://github.com/open-mmlab/mmpose/pull/578),[#579](https://github.com/open-mmlab/mmpose/pull/579),[#580](https://github.com/open-mmlab/mmpose/pull/580),[#592](https://github.com/open-mmlab/mmpose/pull/592),[#599](https://github.com/open-mmlab/mmpose/pull/599),[#600](https://github.com/open-mmlab/mmpose/pull/600),[#607](https://github.com/open-mmlab/mmpose/pull/607)) - -- Update Faq ([#587](https://github.com/open-mmlab/mmpose/pull/587), [#610](https://github.com/open-mmlab/mmpose/pull/610)) - -## **v0.13.0 (31/03/2021)** - -**Highlights** - -1. Support Wingloss. - -2. Support RHD hand dataset. - -**New Features** - -- Support Wingloss ([#482](https://github.com/open-mmlab/mmpose/pull/482)) - -- Support RHD hand dataset ([#523](https://github.com/open-mmlab/mmpose/pull/523), [#551](https://github.com/open-mmlab/mmpose/pull/551)) - -- Support Human3.6m dataset for 3d keypoint detection ([#518](https://github.com/open-mmlab/mmpose/pull/518), [#527](https://github.com/open-mmlab/mmpose/pull/527)) - -- Support TCN model for 3d keypoint detection ([#521](https://github.com/open-mmlab/mmpose/pull/521), [#522](https://github.com/open-mmlab/mmpose/pull/522)) - -- Support Interhand3D model for 3d hand detection ([#536](https://github.com/open-mmlab/mmpose/pull/536)) - -- Support Multi-task detector ([#480](https://github.com/open-mmlab/mmpose/pull/480)) - -**Bug Fixes** - -- Fix PCKh@0.1 calculation ([#516](https://github.com/open-mmlab/mmpose/pull/516)) - -- Fix unittest ([#529](https://github.com/open-mmlab/mmpose/pull/529)) - -- Fix circular importing ([#542](https://github.com/open-mmlab/mmpose/pull/542)) - -- Fix bugs in bottom-up keypoint score ([#548](https://github.com/open-mmlab/mmpose/pull/548)) - -**Improvements** - -- Update config & checkpoints ([#525](https://github.com/open-mmlab/mmpose/pull/525), [#546](https://github.com/open-mmlab/mmpose/pull/546)) - -- Fix typos ([#514](https://github.com/open-mmlab/mmpose/pull/514), [#519](https://github.com/open-mmlab/mmpose/pull/519), [#532](https://github.com/open-mmlab/mmpose/pull/532), [#537](https://github.com/open-mmlab/mmpose/pull/537), ) - -- Speed up post processing ([#535](https://github.com/open-mmlab/mmpose/pull/535)) - -- Update mmcv version dependency ([#544](https://github.com/open-mmlab/mmpose/pull/544)) - -## **v0.12.0 (28/02/2021)** - -**Highlights** - -1. Support DeepPose algorithm. - -**New Features** - -- Support DeepPose algorithm ([#446](https://github.com/open-mmlab/mmpose/pull/446), [#461](https://github.com/open-mmlab/mmpose/pull/461)) - -- Support interhand3d dataset ([#468](https://github.com/open-mmlab/mmpose/pull/468)) - -- Support Albumentation pipeline ([#469](https://github.com/open-mmlab/mmpose/pull/469)) - -- Support PhotometricDistortion pipeline ([#485](https://github.com/open-mmlab/mmpose/pull/485)) - -- Set seed option for training ([#493](https://github.com/open-mmlab/mmpose/pull/493)) - -- Add demos for face keypoint detection ([#502](https://github.com/open-mmlab/mmpose/pull/502)) - -**Bug Fixes** - -- Change channel order according to configs ([#504](https://github.com/open-mmlab/mmpose/pull/504)) - -- Fix `num_factors` in UDP encoding ([#495](https://github.com/open-mmlab/mmpose/pull/495)) - -- Fix configs ([#456](https://github.com/open-mmlab/mmpose/pull/456)) - -**Breaking Changes** - -- Refactor configs for wholebody pose estimation ([#487](https://github.com/open-mmlab/mmpose/pull/487), [#491](https://github.com/open-mmlab/mmpose/pull/491)) - -- Rename `decode` function for heads ([#481](https://github.com/open-mmlab/mmpose/pull/481)) - -**Improvements** - -- Update config & checkpoints ([#453](https://github.com/open-mmlab/mmpose/pull/453),[#484](https://github.com/open-mmlab/mmpose/pull/484),[#487](https://github.com/open-mmlab/mmpose/pull/487)) - -- Add README in Chinese ([#462](https://github.com/open-mmlab/mmpose/pull/462)) - -- Add tutorials about configs ([#465](https://github.com/open-mmlab/mmpose/pull/465)) - -- Add demo videos for various tasks ([#499](https://github.com/open-mmlab/mmpose/pull/499), [#503](https://github.com/open-mmlab/mmpose/pull/503)) - -- Update docs about MMPose installation ([#467](https://github.com/open-mmlab/mmpose/pull/467), [#505](https://github.com/open-mmlab/mmpose/pull/505)) - -- Rename `stat.py` to `stats.py` ([#483](https://github.com/open-mmlab/mmpose/pull/483)) - -- Fix typos ([#463](https://github.com/open-mmlab/mmpose/pull/463), [#464](https://github.com/open-mmlab/mmpose/pull/464), [#477](https://github.com/open-mmlab/mmpose/pull/477), [#481](https://github.com/open-mmlab/mmpose/pull/481)) - -- latex to bibtex ([#471](https://github.com/open-mmlab/mmpose/pull/471)) - -- Update FAQ ([#466](https://github.com/open-mmlab/mmpose/pull/466)) - -## **v0.11.0 (31/01/2021)** - -**Highlights** - -1. Support fashion landmark detection. - -2. Support face keypoint detection. - -3. Support pose tracking with MMTracking. - -**New Features** - -- Support fashion landmark detection (DeepFashion) ([#413](https://github.com/open-mmlab/mmpose/pull/413)) - -- Support face keypoint detection (300W, AFLW, COFW, WFLW) ([#367](https://github.com/open-mmlab/mmpose/pull/367)) - -- Support pose tracking demo with MMTracking ([#427](https://github.com/open-mmlab/mmpose/pull/427)) - -- Support face demo ([#443](https://github.com/open-mmlab/mmpose/pull/443)) - -- Support AIC dataset for bottom-up methods ([#438](https://github.com/open-mmlab/mmpose/pull/438), [#449](https://github.com/open-mmlab/mmpose/pull/449)) - -**Bug Fixes** - -- Fix multi-batch training ([#434](https://github.com/open-mmlab/mmpose/pull/434)) - -- Fix sigmas in AIC dataset ([#441](https://github.com/open-mmlab/mmpose/pull/441)) - -- Fix config file ([#420](https://github.com/open-mmlab/mmpose/pull/420)) - -**Breaking Changes** - -- Refactor Heads ([#382](https://github.com/open-mmlab/mmpose/pull/382)) - -**Improvements** - -- Update readme ([#409](https://github.com/open-mmlab/mmpose/pull/409), [#412](https://github.com/open-mmlab/mmpose/pull/412), [#415](https://github.com/open-mmlab/mmpose/pull/415), [#416](https://github.com/open-mmlab/mmpose/pull/416), [#419](https://github.com/open-mmlab/mmpose/pull/419), [#421](https://github.com/open-mmlab/mmpose/pull/421), [#422](https://github.com/open-mmlab/mmpose/pull/422), [#424](https://github.com/open-mmlab/mmpose/pull/424), [#425](https://github.com/open-mmlab/mmpose/pull/425), [#435](https://github.com/open-mmlab/mmpose/pull/435), [#436](https://github.com/open-mmlab/mmpose/pull/436), [#437](https://github.com/open-mmlab/mmpose/pull/437), [#444](https://github.com/open-mmlab/mmpose/pull/444), [#445](https://github.com/open-mmlab/mmpose/pull/445)) - -- Add GAP (global average pooling) neck ([#414](https://github.com/open-mmlab/mmpose/pull/414)) - -- Speed up ([#411](https://github.com/open-mmlab/mmpose/pull/411), [#423](https://github.com/open-mmlab/mmpose/pull/423)) - -- Support COCO test-dev test ([#433](https://github.com/open-mmlab/mmpose/pull/433)) - -## **v0.10.0 (31/12/2020)** - -**Highlights** - -1. Support more human pose estimation methods. - - 1. [UDP](https://arxiv.org/abs/1911.07524) - -2. Support pose tracking. - -3. Support multi-batch inference. - -4. Add some useful tools, including `analyze_logs`, `get_flops`, `print_config`. - -5. Support more backbone networks. - - 1. [ResNest](https://arxiv.org/pdf/2004.08955.pdf) - 2. [VGG](https://arxiv.org/abs/1409.1556) - -**New Features** - -- Support UDP ([#353](https://github.com/open-mmlab/mmpose/pull/353), [#371](https://github.com/open-mmlab/mmpose/pull/371), [#402](https://github.com/open-mmlab/mmpose/pull/402)) - -- Support multi-batch inference ([#390](https://github.com/open-mmlab/mmpose/pull/390)) - -- Support MHP dataset ([#386](https://github.com/open-mmlab/mmpose/pull/386)) - -- Support pose tracking demo ([#380](https://github.com/open-mmlab/mmpose/pull/380)) - -- Support mpii-trb demo ([#372](https://github.com/open-mmlab/mmpose/pull/372)) - -- Support mobilenet for hand pose estimation ([#377](https://github.com/open-mmlab/mmpose/pull/377)) - -- Support ResNest backbone ([#370](https://github.com/open-mmlab/mmpose/pull/370)) - -- Support VGG backbone ([#370](https://github.com/open-mmlab/mmpose/pull/370)) - -- Add some useful tools, including `analyze_logs`, `get_flops`, `print_config` ([#324](https://github.com/open-mmlab/mmpose/pull/324)) - -**Bug Fixes** - -- Fix bugs in pck evaluation ([#328](https://github.com/open-mmlab/mmpose/pull/328)) - -- Fix model download links in README ([#396](https://github.com/open-mmlab/mmpose/pull/396), [#397](https://github.com/open-mmlab/mmpose/pull/397)) - -- Fix CrowdPose annotations and update benchmarks ([#384](https://github.com/open-mmlab/mmpose/pull/384)) - -- Fix modelzoo stat ([#354](https://github.com/open-mmlab/mmpose/pull/354), [#360](https://github.com/open-mmlab/mmpose/pull/360), [#362](https://github.com/open-mmlab/mmpose/pull/362)) - -- Fix config files for aic datasets ([#340](https://github.com/open-mmlab/mmpose/pull/340)) - -**Breaking Changes** - -- Rename `image_thr` to `det_bbox_thr` for top-down methods. - -**Improvements** - -- Organize the readme files ([#398](https://github.com/open-mmlab/mmpose/pull/398), [#399](https://github.com/open-mmlab/mmpose/pull/399), [#400](https://github.com/open-mmlab/mmpose/pull/400)) - -- Check linting for markdown ([#379](https://github.com/open-mmlab/mmpose/pull/379)) - -- Add faq.md ([#350](https://github.com/open-mmlab/mmpose/pull/350)) - -- Remove PyTorch 1.4 in CI ([#338](https://github.com/open-mmlab/mmpose/pull/338)) - -- Add pypi badge in readme ([#329](https://github.com/open-mmlab/mmpose/pull/329)) - -## **v0.9.0 (30/11/2020)** - -**Highlights** - -1. Support more human pose estimation methods. - - 1. [MSPN](https://arxiv.org/abs/1901.00148) - 2. [RSN](https://arxiv.org/abs/2003.04030) - -2. Support video pose estimation datasets. - - 1. [sub-JHMDB](http://jhmdb.is.tue.mpg.de/dataset) - -3. Support Onnx model conversion. - -**New Features** - -- Support MSPN ([#278](https://github.com/open-mmlab/mmpose/pull/278)) - -- Support RSN ([#221](https://github.com/open-mmlab/mmpose/pull/221), [#318](https://github.com/open-mmlab/mmpose/pull/318)) - -- Support new post-processing method for MSPN & RSN ([#288](https://github.com/open-mmlab/mmpose/pull/288)) - -- Support sub-JHMDB dataset ([#292](https://github.com/open-mmlab/mmpose/pull/292)) - -- Support urls for pre-trained models in config files ([#232](https://github.com/open-mmlab/mmpose/pull/232)) - -- Support Onnx ([#305](https://github.com/open-mmlab/mmpose/pull/305)) - -**Bug Fixes** - -- Fix model download links in README ([#255](https://github.com/open-mmlab/mmpose/pull/255), [#315](https://github.com/open-mmlab/mmpose/pull/315)) - -**Breaking Changes** - -- `post_process=True|False` and `unbiased_decoding=True|False` are deprecated, use `post_process=None|default|unbiased` etc. instead ([#288](https://github.com/open-mmlab/mmpose/pull/288)) - -**Improvements** - -- Enrich the model zoo ([#256](https://github.com/open-mmlab/mmpose/pull/256), [#320](https://github.com/open-mmlab/mmpose/pull/320)) - -- Set the default map_location as 'cpu' to reduce gpu memory cost ([#227](https://github.com/open-mmlab/mmpose/pull/227)) - -- Support return heatmaps and backbone features for bottom-up models ([#229](https://github.com/open-mmlab/mmpose/pull/229)) - -- Upgrade mmcv maximum & minimum version ([#269](https://github.com/open-mmlab/mmpose/pull/269), [#313](https://github.com/open-mmlab/mmpose/pull/313)) - -- Automatically add modelzoo statistics to readthedocs ([#252](https://github.com/open-mmlab/mmpose/pull/252)) - -- Fix Pylint issues ([#258](https://github.com/open-mmlab/mmpose/pull/258), [#259](https://github.com/open-mmlab/mmpose/pull/259), [#260](https://github.com/open-mmlab/mmpose/pull/260), [#262](https://github.com/open-mmlab/mmpose/pull/262), [#265](https://github.com/open-mmlab/mmpose/pull/265), [#267](https://github.com/open-mmlab/mmpose/pull/267), [#268](https://github.com/open-mmlab/mmpose/pull/268), [#270](https://github.com/open-mmlab/mmpose/pull/270), [#271](https://github.com/open-mmlab/mmpose/pull/271), [#272](https://github.com/open-mmlab/mmpose/pull/272), [#273](https://github.com/open-mmlab/mmpose/pull/273), [#275](https://github.com/open-mmlab/mmpose/pull/275), [#276](https://github.com/open-mmlab/mmpose/pull/276), [#283](https://github.com/open-mmlab/mmpose/pull/283), [#285](https://github.com/open-mmlab/mmpose/pull/285), [#293](https://github.com/open-mmlab/mmpose/pull/293), [#294](https://github.com/open-mmlab/mmpose/pull/294), [#295](https://github.com/open-mmlab/mmpose/pull/295)) - -- Improve README ([#226](https://github.com/open-mmlab/mmpose/pull/226), [#257](https://github.com/open-mmlab/mmpose/pull/257), [#264](https://github.com/open-mmlab/mmpose/pull/264), [#280](https://github.com/open-mmlab/mmpose/pull/280), [#296](https://github.com/open-mmlab/mmpose/pull/296)) - -- Support PyTorch 1.7 in CI ([#274](https://github.com/open-mmlab/mmpose/pull/274)) - -- Add docs/tutorials for running demos ([#263](https://github.com/open-mmlab/mmpose/pull/263)) - -## **v0.8.0 (31/10/2020)** - -**Highlights** - -1. Support more human pose estimation datasets. - - 1. [CrowdPose](https://github.com/Jeff-sjtu/CrowdPose) - 2. [PoseTrack18](https://posetrack.net/) - -2. Support more 2D hand keypoint estimation datasets. - - 1. [InterHand2.6](https://github.com/facebookresearch/InterHand2.6M) - -3. Support adversarial training for 3D human shape recovery. - -4. Support multi-stage losses. - -5. Support mpii demo. - -**New Features** - -- Support [CrowdPose](https://github.com/Jeff-sjtu/CrowdPose) dataset ([#195](https://github.com/open-mmlab/mmpose/pull/195)) - -- Support [PoseTrack18](https://posetrack.net/) dataset ([#220](https://github.com/open-mmlab/mmpose/pull/220)) - -- Support [InterHand2.6](https://github.com/facebookresearch/InterHand2.6M) dataset ([#202](https://github.com/open-mmlab/mmpose/pull/202)) - -- Support adversarial training for 3D human shape recovery ([#192](https://github.com/open-mmlab/mmpose/pull/192)) - -- Support multi-stage losses ([#204](https://github.com/open-mmlab/mmpose/pull/204)) - -**Bug Fixes** - -- Fix config files ([#190](https://github.com/open-mmlab/mmpose/pull/190)) - -**Improvements** - -- Add mpii demo ([#216](https://github.com/open-mmlab/mmpose/pull/216)) - -- Improve README ([#181](https://github.com/open-mmlab/mmpose/pull/181), [#183](https://github.com/open-mmlab/mmpose/pull/183), [#208](https://github.com/open-mmlab/mmpose/pull/208)) - -- Support return heatmaps and backbone features ([#196](https://github.com/open-mmlab/mmpose/pull/196), [#212](https://github.com/open-mmlab/mmpose/pull/212)) - -- Support different return formats of mmdetection models ([#217](https://github.com/open-mmlab/mmpose/pull/217)) - -## **v0.7.0 (30/9/2020)** - -**Highlights** - -1. Support HMR for 3D human shape recovery. - -2. Support WholeBody human pose estimation. - - 1. [COCO-WholeBody](https://github.com/jin-s13/COCO-WholeBody) - -3. Support more 2D hand keypoint estimation datasets. - - 1. [Frei-hand](https://lmb.informatik.uni-freiburg.de/projects/freihand/) - 2. [CMU Panoptic HandDB](http://domedb.perception.cs.cmu.edu/handdb.html) - -4. Add more popular backbones & enrich the [modelzoo](https://mmpose.readthedocs.io/en/latest/model_zoo.html) - - 1. ShuffleNetv2 - -5. Support hand demo and whole-body demo. - -**New Features** - -- Support HMR for 3D human shape recovery ([#157](https://github.com/open-mmlab/mmpose/pull/157), [#160](https://github.com/open-mmlab/mmpose/pull/160), [#161](https://github.com/open-mmlab/mmpose/pull/161), [#162](https://github.com/open-mmlab/mmpose/pull/162)) - -- Support [COCO-WholeBody](https://github.com/jin-s13/COCO-WholeBody) dataset ([#133](https://github.com/open-mmlab/mmpose/pull/133)) - -- Support [Frei-hand](https://lmb.informatik.uni-freiburg.de/projects/freihand/) dataset ([#125](https://github.com/open-mmlab/mmpose/pull/125)) - -- Support [CMU Panoptic HandDB](http://domedb.perception.cs.cmu.edu/handdb.html) dataset ([#144](https://github.com/open-mmlab/mmpose/pull/144)) - -- Support H36M dataset ([#159](https://github.com/open-mmlab/mmpose/pull/159)) - -- Support ShuffleNetv2 ([#139](https://github.com/open-mmlab/mmpose/pull/139)) - -- Support saving best models based on key indicator ([#127](https://github.com/open-mmlab/mmpose/pull/127)) - -**Bug Fixes** - -- Fix typos in docs ([#121](https://github.com/open-mmlab/mmpose/pull/121)) - -- Fix assertion ([#142](https://github.com/open-mmlab/mmpose/pull/142)) - -**Improvements** - -- Add tools to transform .mat format to .json format ([#126](https://github.com/open-mmlab/mmpose/pull/126)) - -- Add hand demo ([#115](https://github.com/open-mmlab/mmpose/pull/115)) - -- Add whole-body demo ([#163](https://github.com/open-mmlab/mmpose/pull/163)) - -- Reuse mmcv utility function and update version files ([#135](https://github.com/open-mmlab/mmpose/pull/135), [#137](https://github.com/open-mmlab/mmpose/pull/137)) - -- Enrich the modelzoo ([#147](https://github.com/open-mmlab/mmpose/pull/147), [#169](https://github.com/open-mmlab/mmpose/pull/169)) - -- Improve docs ([#174](https://github.com/open-mmlab/mmpose/pull/174), [#175](https://github.com/open-mmlab/mmpose/pull/175), [#178](https://github.com/open-mmlab/mmpose/pull/178)) - -- Improve README ([#176](https://github.com/open-mmlab/mmpose/pull/176)) - -- Improve version.py ([#173](https://github.com/open-mmlab/mmpose/pull/173)) - -## **v0.6.0 (31/8/2020)** - -**Highlights** - -1. Add more popular backbones & enrich the [modelzoo](https://mmpose.readthedocs.io/en/latest/model_zoo.html) - - 1. ResNext - 2. SEResNet - 3. ResNetV1D - 4. MobileNetv2 - 5. ShuffleNetv1 - 6. CPM (Convolutional Pose Machine) - -2. Add more popular datasets: - - 1. [AIChallenger](https://arxiv.org/abs/1711.06475?context=cs.CV) - 2. [MPII](http://human-pose.mpi-inf.mpg.de/) - 3. [MPII-TRB](https://github.com/kennymckormick/Triplet-Representation-of-human-Body) - 4. [OCHuman](http://www.liruilong.cn/projects/pose2seg/index.html) - -3. Support 2d hand keypoint estimation. - - 1. [OneHand10K](https://www.yangangwang.com/papers/WANG-MCC-2018-10.html) - -4. Support bottom-up inference. - -**New Features** - -- Support [OneHand10K](https://www.yangangwang.com/papers/WANG-MCC-2018-10.html) dataset ([#52](https://github.com/open-mmlab/mmpose/pull/52)) - -- Support [MPII](http://human-pose.mpi-inf.mpg.de/) dataset ([#55](https://github.com/open-mmlab/mmpose/pull/55)) - -- Support [MPII-TRB](https://github.com/kennymckormick/Triplet-Representation-of-human-Body) dataset ([#19](https://github.com/open-mmlab/mmpose/pull/19), [#47](https://github.com/open-mmlab/mmpose/pull/47), [#48](https://github.com/open-mmlab/mmpose/pull/48)) - -- Support [OCHuman](http://www.liruilong.cn/projects/pose2seg/index.html) dataset ([#70](https://github.com/open-mmlab/mmpose/pull/70)) - -- Support [AIChallenger](https://arxiv.org/abs/1711.06475?context=cs.CV) dataset ([#87](https://github.com/open-mmlab/mmpose/pull/87)) - -- Support multiple backbones ([#26](https://github.com/open-mmlab/mmpose/pull/26)) - -- Support CPM model ([#56](https://github.com/open-mmlab/mmpose/pull/56)) - -**Bug Fixes** - -- Fix configs for MPII & MPII-TRB datasets ([#93](https://github.com/open-mmlab/mmpose/pull/93)) - -- Fix the bug of missing `test_pipeline` in configs ([#14](https://github.com/open-mmlab/mmpose/pull/14)) - -- Fix typos ([#27](https://github.com/open-mmlab/mmpose/pull/27), [#28](https://github.com/open-mmlab/mmpose/pull/28), [#50](https://github.com/open-mmlab/mmpose/pull/50), [#53](https://github.com/open-mmlab/mmpose/pull/53), [#63](https://github.com/open-mmlab/mmpose/pull/63)) - -**Improvements** - -- Update benchmark ([#93](https://github.com/open-mmlab/mmpose/pull/93)) - -- Add Dockerfile ([#44](https://github.com/open-mmlab/mmpose/pull/44)) - -- Improve unittest coverage and minor fix ([#18](https://github.com/open-mmlab/mmpose/pull/18)) - -- Support CPUs for train/val/demo ([#34](https://github.com/open-mmlab/mmpose/pull/34)) - -- Support bottom-up demo ([#69](https://github.com/open-mmlab/mmpose/pull/69)) - -- Add tools to publish model ([#62](https://github.com/open-mmlab/mmpose/pull/62)) - -- Enrich the modelzoo ([#64](https://github.com/open-mmlab/mmpose/pull/64), [#68](https://github.com/open-mmlab/mmpose/pull/68), [#82](https://github.com/open-mmlab/mmpose/pull/82)) - -## **v0.5.0 (21/7/2020)** - -**Highlights** - -- MMPose is released. - -**Main Features** - -- Support both top-down and bottom-up pose estimation approaches. - -- Achieve higher training efficiency and higher accuracy than other popular codebases (e.g. AlphaPose, HRNet) - -- Support various backbone models: ResNet, HRNet, SCNet, Houglass and HigherHRNet. +# Changelog + +## **v1.0.0rc1 (14/10/2022)** + +**Highlights** + +- Release RTMPose, a high-performance real-time pose estimation algorithm with cross-platform deployment and inference support. See details at the [project page](/projects/rtmpose/) +- Support several new algorithms: ViTPose (arXiv'2022), CID (CVPR'2022), DEKR (CVPR'2021) +- Add Inferencer, a convenient inference interface that perform pose estimation and visualization on images, videos and webcam streams with only one line of code +- Introduce *Project*, a new form for rapid and easy implementation of new algorithms and features in MMPose, which is more handy for community contributors + +**New Features** + +- Support RTMPose ([#1971](https://github.com/open-mmlab/mmpose/pull/1971), [#2024](https://github.com/open-mmlab/mmpose/pull/2024), [#2028](https://github.com/open-mmlab/mmpose/pull/2028), [#2030](https://github.com/open-mmlab/mmpose/pull/2030), [#2040](https://github.com/open-mmlab/mmpose/pull/2040), [#2057](https://github.com/open-mmlab/mmpose/pull/2057)) +- Support Inferencer ([#1969](https://github.com/open-mmlab/mmpose/pull/1969)) +- Support ViTPose ([#1876](https://github.com/open-mmlab/mmpose/pull/1876), [#2056](https://github.com/open-mmlab/mmpose/pull/2056), [#2058](https://github.com/open-mmlab/mmpose/pull/2058), [#2065](https://github.com/open-mmlab/mmpose/pull/2065)) +- Support CID ([#1907](https://github.com/open-mmlab/mmpose/pull/1907)) +- Support DEKR ([#1834](https://github.com/open-mmlab/mmpose/pull/1834), [#1901](https://github.com/open-mmlab/mmpose/pull/1901)) +- Support training with multiple datasets ([#1767](https://github.com/open-mmlab/mmpose/pull/1767), [#1930](https://github.com/open-mmlab/mmpose/pull/1930), [#1938](https://github.com/open-mmlab/mmpose/pull/1938), [#2025](https://github.com/open-mmlab/mmpose/pull/2025)) +- Add *project* to allow rapid and easy implementation of new models and features ([#1914](https://github.com/open-mmlab/mmpose/pull/1914)) + +**Improvements** + +- Improve documentation quality ([#1846](https://github.com/open-mmlab/mmpose/pull/1846), [#1858](https://github.com/open-mmlab/mmpose/pull/1858), [#1872](https://github.com/open-mmlab/mmpose/pull/1872), [#1899](https://github.com/open-mmlab/mmpose/pull/1899), [#1925](https://github.com/open-mmlab/mmpose/pull/1925), [#1945](https://github.com/open-mmlab/mmpose/pull/1945), [#1952](https://github.com/open-mmlab/mmpose/pull/1952), [#1990](https://github.com/open-mmlab/mmpose/pull/1990), [#2023](https://github.com/open-mmlab/mmpose/pull/2023), [#2042](https://github.com/open-mmlab/mmpose/pull/2042)) +- Support visualizing keypoint indices ([#2051](https://github.com/open-mmlab/mmpose/pull/2051)) +- Support OpenPose style visualization ([#2055](https://github.com/open-mmlab/mmpose/pull/2055)) +- Accelerate image transpose in data pipelines with tensor operation ([#1976](https://github.com/open-mmlab/mmpose/pull/1976)) +- Support auto-import modules from registry ([#1961](https://github.com/open-mmlab/mmpose/pull/1961)) +- Support keypoint partition metric ([#1944](https://github.com/open-mmlab/mmpose/pull/1944)) +- Support SimCC 1D-heatmap visualization ([#1912](https://github.com/open-mmlab/mmpose/pull/1912)) +- Support saving predictions and data metainfo in demos ([#1814](https://github.com/open-mmlab/mmpose/pull/1814), [#1879](https://github.com/open-mmlab/mmpose/pull/1879)) +- Support SimCC with DARK ([#1870](https://github.com/open-mmlab/mmpose/pull/1870)) +- Remove Gaussian blur for offset maps in UDP-regress ([#1815](https://github.com/open-mmlab/mmpose/pull/1815)) +- Refactor encoding interface of Codec for better extendibility and easier configuration ([#1781](https://github.com/open-mmlab/mmpose/pull/1781)) +- Support evaluating CocoMetric without annotation file ([#1722](https://github.com/open-mmlab/mmpose/pull/1722)) +- Improve unit tests ([#1765](https://github.com/open-mmlab/mmpose/pull/1765)) + +**Bug Fixes** + +- Fix repeated warnings from different ranks ([#2053](https://github.com/open-mmlab/mmpose/pull/2053)) +- Avoid frequent scope switching when using mmdet inference api ([#2039](https://github.com/open-mmlab/mmpose/pull/2039)) +- Remove EMA parameters and message hub data when publishing model checkpoints ([#2036](https://github.com/open-mmlab/mmpose/pull/2036)) +- Fix metainfo copying in dataset class ([#2017](https://github.com/open-mmlab/mmpose/pull/2017)) +- Fix top-down demo bug when there is no object detected ([#2007](https://github.com/open-mmlab/mmpose/pull/2007)) +- Fix config errors ([#1882](https://github.com/open-mmlab/mmpose/pull/1882), [#1906](https://github.com/open-mmlab/mmpose/pull/1906), [#1995](https://github.com/open-mmlab/mmpose/pull/1995)) +- Fix image demo failure when GUI is unavailable ([#1968](https://github.com/open-mmlab/mmpose/pull/1968)) +- Fix bug in AdaptiveWingLoss ([#1953](https://github.com/open-mmlab/mmpose/pull/1953)) +- Fix incorrect importing of RepeatDataset which is deprecated ([#1943](https://github.com/open-mmlab/mmpose/pull/1943)) +- Fix bug in bottom-up datasets that ignores images without instances ([#1752](https://github.com/open-mmlab/mmpose/pull/1752), [#1936](https://github.com/open-mmlab/mmpose/pull/1936)) +- Fix upstream dependency issues ([#1867](https://github.com/open-mmlab/mmpose/pull/1867), [#1921](https://github.com/open-mmlab/mmpose/pull/1921)) +- Fix evaluation issues and update results ([#1763](https://github.com/open-mmlab/mmpose/pull/1763), [#1773](https://github.com/open-mmlab/mmpose/pull/1773), [#1780](https://github.com/open-mmlab/mmpose/pull/1780), [#1850](https://github.com/open-mmlab/mmpose/pull/1850), [#1868](https://github.com/open-mmlab/mmpose/pull/1868)) +- Fix local registry missing warnings ([#1849](https://github.com/open-mmlab/mmpose/pull/1849)) +- Remove deprecated scripts for model deployment ([#1845](https://github.com/open-mmlab/mmpose/pull/1845)) +- Fix a bug in input transformation in BaseHead ([#1843](https://github.com/open-mmlab/mmpose/pull/1843)) +- Fix an interface mismatch with MMDetection in webcam demo ([#1813](https://github.com/open-mmlab/mmpose/pull/1813)) +- Fix a bug in heatmap visualization that causes incorrect scale ([#1800](https://github.com/open-mmlab/mmpose/pull/1800)) +- Add model metafiles ([#1768](https://github.com/open-mmlab/mmpose/pull/1768)) + +## **v1.0.0rc0 (14/10/2022)** + +**New Features** + +- Support 4 light-weight pose estimation algorithms: [SimCC](https://doi.org/10.48550/arxiv.2107.03332) (ECCV'2022), [Debias-IPR](https://openaccess.thecvf.com/content/ICCV2021/papers/Gu_Removing_the_Bias_of_Integral_Pose_Regression_ICCV_2021_paper.pdf) (ICCV'2021), [IPR](https://arxiv.org/abs/1711.08229) (ECCV'2018), and [DSNT](https://arxiv.org/abs/1801.07372v2) (ArXiv'2018) ([#1628](https://github.com/open-mmlab/mmpose/pull/1628)) + +**Migrations** + +- Add Webcam API in MMPose 1.0 ([#1638](https://github.com/open-mmlab/mmpose/pull/1638), [#1662](https://github.com/open-mmlab/mmpose/pull/1662)) @Ben-Louis +- Add codec for Associative Embedding (beta) ([#1603](https://github.com/open-mmlab/mmpose/pull/1603)) @ly015 + +**Improvements** + +- Add a colab tutorial for MMPose 1.0 ([#1660](https://github.com/open-mmlab/mmpose/pull/1660)) @Tau-J +- Add model index in config folder ([#1710](https://github.com/open-mmlab/mmpose/pull/1710), [#1709](https://github.com/open-mmlab/mmpose/pull/1709), [#1627](https://github.com/open-mmlab/mmpose/pull/1627)) @ly015, @Tau-J, @Ben-Louis +- Update and improve documentation ([#1692](https://github.com/open-mmlab/mmpose/pull/1692), [#1656](https://github.com/open-mmlab/mmpose/pull/1656), [#1681](https://github.com/open-mmlab/mmpose/pull/1681), [#1677](https://github.com/open-mmlab/mmpose/pull/1677), [#1664](https://github.com/open-mmlab/mmpose/pull/1664), [#1659](https://github.com/open-mmlab/mmpose/pull/1659)) @Tau-J, @Ben-Louis, @liqikai9 +- Improve config structures and formats ([#1651](https://github.com/open-mmlab/mmpose/pull/1651)) @liqikai9 + +**Bug Fixes** + +- Update mmengine version requirements ([#1715](https://github.com/open-mmlab/mmpose/pull/1715)) @Ben-Louis +- Update dependencies of pre-commit hooks ([#1705](https://github.com/open-mmlab/mmpose/pull/1705)) @Ben-Louis +- Fix mmcv version in DockerFile ([#1704](https://github.com/open-mmlab/mmpose/pull/1704)) +- Fix a bug in setting dataset metainfo in configs ([#1684](https://github.com/open-mmlab/mmpose/pull/1684)) @ly015 +- Fix a bug in UDP training ([#1682](https://github.com/open-mmlab/mmpose/pull/1682)) @liqikai9 +- Fix a bug in Dark decoding ([#1676](https://github.com/open-mmlab/mmpose/pull/1676)) @liqikai9 +- Fix bugs in visualization ([#1671](https://github.com/open-mmlab/mmpose/pull/1671), [#1668](https://github.com/open-mmlab/mmpose/pull/1668), [#1657](https://github.com/open-mmlab/mmpose/pull/1657)) @liqikai9, @Ben-Louis +- Fix incorrect flops calculation ([#1669](https://github.com/open-mmlab/mmpose/pull/1669)) @liqikai9 +- Fix `tensor.tile` compatibility issue for pytorch 1.6 ([#1658](https://github.com/open-mmlab/mmpose/pull/1658)) @ly015 +- Fix compatibility with `MultilevelPixelData` ([#1647](https://github.com/open-mmlab/mmpose/pull/1647)) @liqikai9 + +## **v1.0.0beta (1/09/2022)** + +We are excited to announce the release of MMPose 1.0.0beta. +MMPose 1.0.0beta is the first version of MMPose 1.x, a part of the OpenMMLab 2.0 projects. +Built upon the new [training engine](https://github.com/open-mmlab/mmengine), +MMPose 1.x unifies the interfaces of dataset, models, evaluation, and visualization with faster training and testing speed. +It also provide a general semi-supervised object detection framework, and more strong baselines. + +**Highlights** + +- **New engines**. MMPose 1.x is based on [MMEngine](https://github.com/open-mmlab/mmengine), which provides a general and powerful runner that allows more flexible customizations and significantly simplifies the entrypoints of high-level interfaces. + +- **Unified interfaces**. As a part of the OpenMMLab 2.0 projects, MMPose 1.x unifies and refactors the interfaces and internal logics of train, testing, datasets, models, evaluation, and visualization. All the OpenMMLab 2.0 projects share the same design in those interfaces and logics to allow the emergence of multi-task/modality algorithms. + +- **More documentation and tutorials**. We add a bunch of documentation and tutorials to help users get started more smoothly. Read it [here](https://mmpose.readthedocs.io/en/latest/). + +**Breaking Changes** + +In this release, we made lots of major refactoring and modifications. Please refer to the [migration guide](../migration.md) for details and migration instructions. + +## **v0.28.1 (28/07/2022)** + +This release is meant to fix the compatibility with the latest mmcv v1.6.1 + +## **v0.28.0 (06/07/2022)** + +**Highlights** + +- Support [TCFormer](https://openaccess.thecvf.com/content/CVPR2022/html/Zeng_Not_All_Tokens_Are_Equal_Human-Centric_Visual_Analysis_via_Token_CVPR_2022_paper.html) backbone, CVPR'2022 ([#1447](https://github.com/open-mmlab/mmpose/pull/1447), [#1452](https://github.com/open-mmlab/mmpose/pull/1452)) @zengwang430521 + +- Add [RLE](https://arxiv.org/abs/2107.11291) models on COCO dataset ([#1424](https://github.com/open-mmlab/mmpose/pull/1424)) @Indigo6, @Ben-Louis, @ly015 + +- Update swin models with better performance ([#1467](https://github.com/open-mmlab/mmpose/pull/1434)) @jin-s13 + +**New Features** + +- Support [TCFormer](https://openaccess.thecvf.com/content/CVPR2022/html/Zeng_Not_All_Tokens_Are_Equal_Human-Centric_Visual_Analysis_via_Token_CVPR_2022_paper.html) backbone, CVPR'2022 ([#1447](https://github.com/open-mmlab/mmpose/pull/1447), [#1452](https://github.com/open-mmlab/mmpose/pull/1452)) @zengwang430521 + +- Add [RLE](https://arxiv.org/abs/2107.11291) models on COCO dataset ([#1424](https://github.com/open-mmlab/mmpose/pull/1424)) @Indigo6, @Ben-Louis, @ly015 + +- Support layer decay optimizer constructor and learning rate decay optimizer constructor ([#1423](https://github.com/open-mmlab/mmpose/pull/1423)) @jin-s13 + +**Improvements** + +- Improve documentation quality ([#1416](https://github.com/open-mmlab/mmpose/pull/1416), [#1421](https://github.com/open-mmlab/mmpose/pull/1421), [#1423](https://github.com/open-mmlab/mmpose/pull/1423), [#1426](https://github.com/open-mmlab/mmpose/pull/1426), [#1458](https://github.com/open-mmlab/mmpose/pull/1458), [#1463](https://github.com/open-mmlab/mmpose/pull/1463)) @ly015, @liqikai9 + +- Support installation by [mim](https://github.com/open-mmlab/mim) ([#1425](https://github.com/open-mmlab/mmpose/pull/1425)) @liqikai9 + +- Support PAVI logger ([#1434](https://github.com/open-mmlab/mmpose/pull/1434)) @EvelynWang-0423 + +- Add progress bar for some demos ([#1454](https://github.com/open-mmlab/mmpose/pull/1454)) @liqikai9 + +- Webcam API supports quick device setting in terminal commands ([#1466](https://github.com/open-mmlab/mmpose/pull/1466)) @ly015 + +- Update swin models with better performance ([#1467](https://github.com/open-mmlab/mmpose/pull/1434)) @jin-s13 + +**Bug Fixes** + +- Rename `custom_hooks_config` to `custom_hooks` in configs to align with the documentation ([#1427](https://github.com/open-mmlab/mmpose/pull/1427)) @ly015 + +- Fix deadlock issue in Webcam API ([#1430](https://github.com/open-mmlab/mmpose/pull/1430)) @ly015 + +- Fix smoother configs in video 3D demo ([#1457](https://github.com/open-mmlab/mmpose/pull/1457)) @ly015 + +## **v0.27.0 (07/06/2022)** + +**Highlights** + +- Support hand gesture recognition + + - Try the demo for gesture recognition + - Learn more about the algorithm, dataset and experiment results + +- Major upgrade to the Webcam API + + - Tutorials (EN|zh_CN) + - [API Reference](https://mmpose.readthedocs.io/en/latest/api.html#mmpose-apis-webcam) + - Demo + +**New Features** + +- Support gesture recognition algorithm [MTUT](https://openaccess.thecvf.com/content_CVPR_2019/html/Abavisani_Improving_the_Performance_of_Unimodal_Dynamic_Hand-Gesture_Recognition_With_Multimodal_CVPR_2019_paper.html) CVPR'2019 and dataset [NVGesture](https://openaccess.thecvf.com/content_cvpr_2016/html/Molchanov_Online_Detection_and_CVPR_2016_paper.html) CVPR'2016 ([#1380](https://github.com/open-mmlab/mmpose/pull/1380)) @Ben-Louis + +**Improvements** + +- Upgrade Webcam API and related documents ([#1393](https://github.com/open-mmlab/mmpose/pull/1393), [#1404](https://github.com/open-mmlab/mmpose/pull/1404), [#1413](https://github.com/open-mmlab/mmpose/pull/1413)) @ly015 + +- Support exporting COCO inference result without the annotation file ([#1368](https://github.com/open-mmlab/mmpose/pull/1368)) @liqikai9 + +- Replace markdownlint with mdformat in CI to avoid the dependence on ruby [#1382](https://github.com/open-mmlab/mmpose/pull/1382) @ly015 + +- Improve documentation quality ([#1385](https://github.com/open-mmlab/mmpose/pull/1385), [#1394](https://github.com/open-mmlab/mmpose/pull/1394), [#1395](https://github.com/open-mmlab/mmpose/pull/1395), [#1408](https://github.com/open-mmlab/mmpose/pull/1408)) @chubei-oppen, @ly015, @liqikai9 + +**Bug Fixes** + +- Fix xywh->xyxy bbox conversion in dataset sanity check ([#1367](https://github.com/open-mmlab/mmpose/pull/1367)) @jin-s13 + +- Fix a bug in two-stage 3D keypoint demo ([#1373](https://github.com/open-mmlab/mmpose/pull/1373)) @ly015 + +- Fix out-dated settings in PVT configs ([#1376](https://github.com/open-mmlab/mmpose/pull/1376)) @ly015 + +- Fix myst settings for document compiling ([#1381](https://github.com/open-mmlab/mmpose/pull/1381)) @ly015 + +- Fix a bug in bbox transform ([#1384](https://github.com/open-mmlab/mmpose/pull/1384)) @ly015 + +- Fix inaccurate description of `min_keypoints` in tracking apis ([#1398](https://github.com/open-mmlab/mmpose/pull/1398)) @pallgeuer + +- Fix warning with `torch.meshgrid` ([#1402](https://github.com/open-mmlab/mmpose/pull/1402)) @pallgeuer + +- Remove redundant transformer modules from `mmpose.datasets.backbones.utils` ([#1405](https://github.com/open-mmlab/mmpose/pull/1405)) @ly015 + +## **v0.26.0 (05/05/2022)** + +**Highlights** + +- Support [RLE (Residual Log-likelihood Estimation)](https://arxiv.org/abs/2107.11291), ICCV'2021 ([#1259](https://github.com/open-mmlab/mmpose/pull/1259)) @Indigo6, @ly015 + +- Support [Swin Transformer](https://arxiv.org/abs/2103.14030), ICCV'2021 ([#1300](https://github.com/open-mmlab/mmpose/pull/1300)) @yumendecc, @ly015 + +- Support [PVT](https://arxiv.org/abs/2102.12122), ICCV'2021 and [PVTv2](https://arxiv.org/abs/2106.13797), CVMJ'2022 ([#1343](https://github.com/open-mmlab/mmpose/pull/1343)) @zengwang430521 + +- Speed up inference and reduce CPU usage by optimizing the pre-processing pipeline ([#1320](https://github.com/open-mmlab/mmpose/pull/1320)) @chenxinfeng4, @liqikai9 + +**New Features** + +- Support [RLE (Residual Log-likelihood Estimation)](https://arxiv.org/abs/2107.11291), ICCV'2021 ([#1259](https://github.com/open-mmlab/mmpose/pull/1259)) @Indigo6, @ly015 + +- Support [Swin Transformer](https://arxiv.org/abs/2103.14030), ICCV'2021 ([#1300](https://github.com/open-mmlab/mmpose/pull/1300)) @yumendecc, @ly015 + +- Support [PVT](https://arxiv.org/abs/2102.12122), ICCV'2021 and [PVTv2](https://arxiv.org/abs/2106.13797), CVMJ'2022 ([#1343](https://github.com/open-mmlab/mmpose/pull/1343)) @zengwang430521 + +- Support [FPN](https://openaccess.thecvf.com/content_cvpr_2017/html/Lin_Feature_Pyramid_Networks_CVPR_2017_paper.html), CVPR'2017 ([#1300](https://github.com/open-mmlab/mmpose/pull/1300)) @yumendecc, @ly015 + +**Improvements** + +- Speed up inference and reduce CPU usage by optimizing the pre-processing pipeline ([#1320](https://github.com/open-mmlab/mmpose/pull/1320)) @chenxinfeng4, @liqikai9 + +- Video demo supports models that requires multi-frame inputs ([#1300](https://github.com/open-mmlab/mmpose/pull/1300)) @liqikai9, @jin-s13 + +- Update benchmark regression list ([#1328](https://github.com/open-mmlab/mmpose/pull/1328)) @ly015, @liqikai9 + +- Remove unnecessary warnings in `TopDownPoseTrack18VideoDataset` ([#1335](https://github.com/open-mmlab/mmpose/pull/1335)) @liqikai9 + +- Improve documentation quality ([#1313](https://github.com/open-mmlab/mmpose/pull/1313), [#1305](https://github.com/open-mmlab/mmpose/pull/1305)) @Ben-Louis, @ly015 + +- Update deprecating settings in configs ([#1317](https://github.com/open-mmlab/mmpose/pull/1317)) @ly015 + +**Bug Fixes** + +- Fix a bug in human skeleton grouping that may skip the matching process unexpectedly when `ignore_to_much` is True ([#1341](https://github.com/open-mmlab/mmpose/pull/1341)) @daixinghome + +- Fix a GPG key error that leads to CI failure ([#1354](https://github.com/open-mmlab/mmpose/pull/1354)) @ly015 + +- Fix bugs in distributed training script ([#1338](https://github.com/open-mmlab/mmpose/pull/1338), [#1298](https://github.com/open-mmlab/mmpose/pull/1298)) @ly015 + +- Fix an upstream bug in xtoccotools that causes incorrect AP(M) results ([#1308](https://github.com/open-mmlab/mmpose/pull/1308)) @jin-s13, @ly015 + +- Fix indentiation errors in the colab tutorial ([#1298](https://github.com/open-mmlab/mmpose/pull/1298)) @YuanZi1501040205 + +- Fix incompatible model weight initialization with other OpenMMLab codebases ([#1329](https://github.com/open-mmlab/mmpose/pull/1329)) @274869388 + +- Fix HRNet FP16 checkpoints download URL ([#1309](https://github.com/open-mmlab/mmpose/pull/1309)) @YinAoXiong + +- Fix typos in `body3d_two_stage_video_demo.py` ([#1295](https://github.com/open-mmlab/mmpose/pull/1295)) @mucozcan + +**Breaking Changes** + +- Refactor bbox processing in datasets and pipelines ([#1311](https://github.com/open-mmlab/mmpose/pull/1311)) @ly015, @Ben-Louis + +- The bbox format conversion (xywh to center-scale) and random translation are moved from the dataset to the pipeline. The comparison between new and old version is as below: + +v0.26.0v0.25.0Dataset +(e.g. [TopDownCOCODataset](https://github.com/open-mmlab/mmpose/blob/master/mmpose/datasets/datasets/top_down/topdown_coco_dataset.py)) + +... # Data sample only contains bbox rec.append({ 'bbox': obj\['clean_bbox\]\[:4\], ... }) + + + + + +... # Convert bbox from xywh to center-scale center, scale = self.\_xywh2cs(\*obj\['clean_bbox'\]\[:4\]) # Data sample contains center and scale rec.append({ 'bbox': obj\['clean_bbox\]\[:4\], 'center': center, 'scale': scale, ... }) + + + + + + + +Pipeline Config + +(e.g. [HRNet+COCO](https://github.com/open-mmlab/mmpose/blob/master/configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/coco/hrnet_w32_coco_256x192.py)) + + + +... train_pipeline = \[ dict(type='LoadImageFromFile'), # Convert bbox from xywh to center-scale dict(type='TopDownGetBboxCenterScale', padding=1.25), # Randomly shift bbox center dict(type='TopDownRandomShiftBboxCenter', shift_factor=0.16, prob=0.3), ... \] + + + + + +... train_pipeline = \[ dict(type='LoadImageFromFile'), ... \] + + + + + + + +Advantage + + + +
  • Simpler data sample content
  • + +
  • Flexible bbox format conversion and augmentation
  • + +
  • Apply bbox random translation every epoch (instead of only applying once at the annotation loading) + + + +- + + + + + +BC Breaking + +The method `_xywh2cs` of dataset base classes (e.g. [Kpt2dSviewRgbImgTopDownDataset](https://github.com/open-mmlab/mmpose/blob/master/mmpose/datasets/datasets/base/kpt_2d_sview_rgb_img_top_down_dataset.py)) will be deprecated in the future. Custom datasets will need modifications to move the bbox format conversion to pipelines. + +- + + + + + + + +## **v0.25.0 (02/04/2022)** + +**Highlights** + +- Support Shelf and Campus datasets with pre-trained VoxelPose models, ["3D Pictorial Structures for Multiple Human Pose Estimation"](http://campar.in.tum.de/pub/belagiannis2014cvpr/belagiannis2014cvpr.pdf), CVPR'2014 ([#1225](https://github.com/open-mmlab/mmpose/pull/1225)) @liqikai9, @wusize + +- Add `Smoother` module for temporal smoothing of the pose estimation with configurable filters ([#1127](https://github.com/open-mmlab/mmpose/pull/1127)) @ailingzengzzz, @ly015 + +- Support SmoothNet for pose smoothing, ["SmoothNet: A Plug-and-Play Network for Refining Human Poses in Videos"](https://arxiv.org/abs/2112.13715), arXiv'2021 ([#1279](https://github.com/open-mmlab/mmpose/pull/1279)) @ailingzengzzz, @ly015 + +- Add multiview 3D pose estimation demo ([#1270](https://github.com/open-mmlab/mmpose/pull/1270)) @wusize + +**New Features** + +- Support Shelf and Campus datasets with pre-trained VoxelPose models, ["3D Pictorial Structures for Multiple Human Pose Estimation"](http://campar.in.tum.de/pub/belagiannis2014cvpr/belagiannis2014cvpr.pdf), CVPR'2014 ([#1225](https://github.com/open-mmlab/mmpose/pull/1225)) @liqikai9, @wusize + +- Add `Smoother` module for temporal smoothing of the pose estimation with configurable filters ([#1127](https://github.com/open-mmlab/mmpose/pull/1127)) @ailingzengzzz, @ly015 + +- Support SmoothNet for pose smoothing, ["SmoothNet: A Plug-and-Play Network for Refining Human Poses in Videos"](https://arxiv.org/abs/2112.13715), arXiv'2021 ([#1279](https://github.com/open-mmlab/mmpose/pull/1279)) @ailingzengzzz, @ly015 + +- Add multiview 3D pose estimation demo ([#1270](https://github.com/open-mmlab/mmpose/pull/1270)) @wusize + +- Support multi-machine distributed training ([#1248](https://github.com/open-mmlab/mmpose/pull/1248)) @ly015 + +**Improvements** + +- Update HRFormer configs and checkpoints with relative position bias ([#1245](https://github.com/open-mmlab/mmpose/pull/1245)) @zengwang430521 + +- Support using different random seed for each distributed node ([#1257](https://github.com/open-mmlab/mmpose/pull/1257), [#1229](https://github.com/open-mmlab/mmpose/pull/1229)) @ly015 + +- Improve documentation quality ([#1275](https://github.com/open-mmlab/mmpose/pull/1275), [#1255](https://github.com/open-mmlab/mmpose/pull/1255), [#1258](https://github.com/open-mmlab/mmpose/pull/1258), [#1249](https://github.com/open-mmlab/mmpose/pull/1249), [#1247](https://github.com/open-mmlab/mmpose/pull/1247), [#1240](https://github.com/open-mmlab/mmpose/pull/1240), [#1235](https://github.com/open-mmlab/mmpose/pull/1235)) @ly015, @jin-s13, @YoniChechik + +**Bug Fixes** + +- Fix keypoint index in RHD dataset meta information ([#1265](https://github.com/open-mmlab/mmpose/pull/1265)) @liqikai9 + +- Fix pre-commit hook unexpected behavior on Windows ([#1282](https://github.com/open-mmlab/mmpose/pull/1282)) @liqikai9 + +- Remove python-dev installation in CI ([#1276](https://github.com/open-mmlab/mmpose/pull/1276)) @ly015 + +- Unify hyphens in argument names in tools and demos ([#1271](https://github.com/open-mmlab/mmpose/pull/1271)) @ly015 + +- Fix ambiguous channel size in `channel_shuffle` that may cause exporting failure (#1242) @PINTO0309 + +- Fix a bug in Webcam API that causes single-class detectors fail ([#1239](https://github.com/open-mmlab/mmpose/pull/1239)) @674106399 + +- Fix the issue that `custom_hook` can not be set in configs ([#1236](https://github.com/open-mmlab/mmpose/pull/1236)) @bladrome + +- Fix incompatible MMCV version in DockerFile ([#raykindle](https://github.com/open-mmlab/mmpose/pull/raykindle)) + +- Skip invisible joints in visualization ([#1228](https://github.com/open-mmlab/mmpose/pull/1228)) @womeier + +## **v0.24.0 (07/03/2022)** + +**Highlights** + +- Support HRFormer ["HRFormer: High-Resolution Vision Transformer for Dense Predict"](https://proceedings.neurips.cc/paper/2021/hash/3bbfdde8842a5c44a0323518eec97cbe-Abstract.html), NeurIPS'2021 ([#1203](https://github.com/open-mmlab/mmpose/pull/1203)) @zengwang430521 + +- Support Windows installation with pip ([#1213](https://github.com/open-mmlab/mmpose/pull/1213)) @jin-s13, @ly015 + +- Add WebcamAPI documents ([#1187](https://github.com/open-mmlab/mmpose/pull/1187)) @ly015 + +**New Features** + +- Support HRFormer ["HRFormer: High-Resolution Vision Transformer for Dense Predict"](https://proceedings.neurips.cc/paper/2021/hash/3bbfdde8842a5c44a0323518eec97cbe-Abstract.html), NeurIPS'2021 ([#1203](https://github.com/open-mmlab/mmpose/pull/1203)) @zengwang430521 + +- Support Windows installation with pip ([#1213](https://github.com/open-mmlab/mmpose/pull/1213)) @jin-s13, @ly015 + +- Support CPU training with mmcv \< v1.4.4 ([#1161](https://github.com/open-mmlab/mmpose/pull/1161)) @EasonQYS, @ly015 + +- Add "Valentine Magic" demo with WebcamAPI ([#1189](https://github.com/open-mmlab/mmpose/pull/1189), [#1191](https://github.com/open-mmlab/mmpose/pull/1191)) @liqikai9 + +**Improvements** + +- Refactor multi-view 3D pose estimation framework towards better modularization and expansibility ([#1196](https://github.com/open-mmlab/mmpose/pull/1196)) @wusize + +- Add WebcamAPI documents and tutorials ([#1187](https://github.com/open-mmlab/mmpose/pull/1187)) @ly015 + +- Refactor dataset evaluation interface to align with other OpenMMLab codebases ([#1209](https://github.com/open-mmlab/mmpose/pull/1209)) @ly015 + +- Add deprecation message for deploy tools since [MMDeploy](https://github.com/open-mmlab/mmdeploy) has supported MMPose ([#1207](https://github.com/open-mmlab/mmpose/pull/1207)) @QwQ2000 + +- Improve documentation quality ([#1206](https://github.com/open-mmlab/mmpose/pull/1206), [#1161](https://github.com/open-mmlab/mmpose/pull/1161)) @ly015 + +- Switch to OpenMMLab official pre-commit-hook for copyright check ([#1214](https://github.com/open-mmlab/mmpose/pull/1214)) @ly015 + +**Bug Fixes** + +- Fix hard-coded data collating and scattering in inference ([#1175](https://github.com/open-mmlab/mmpose/pull/1175)) @ly015 + +- Fix model configs on JHMDB dataset ([#1188](https://github.com/open-mmlab/mmpose/pull/1188)) @jin-s13 + +- Fix area calculation in pose tracking inference ([#1197](https://github.com/open-mmlab/mmpose/pull/1197)) @pallgeuer + +- Fix registry scope conflict of module wrapper ([#1204](https://github.com/open-mmlab/mmpose/pull/1204)) @ly015 + +- Update MMCV installation in CI and documents ([#1205](https://github.com/open-mmlab/mmpose/pull/1205)) + +- Fix incorrect color channel order in visualization functions ([#1212](https://github.com/open-mmlab/mmpose/pull/1212)) @ly015 + +## **v0.23.0 (11/02/2022)** + +**Highlights** + +- Add [MMPose Webcam API](https://github.com/open-mmlab/mmpose/tree/master/tools/webcam): A simple yet powerful tools to develop interactive webcam applications with MMPose functions. ([#1178](https://github.com/open-mmlab/mmpose/pull/1178), [#1173](https://github.com/open-mmlab/mmpose/pull/1173), [#1173](https://github.com/open-mmlab/mmpose/pull/1173), [#1143](https://github.com/open-mmlab/mmpose/pull/1143), [#1094](https://github.com/open-mmlab/mmpose/pull/1094), [#1133](https://github.com/open-mmlab/mmpose/pull/1133), [#1098](https://github.com/open-mmlab/mmpose/pull/1098), [#1160](https://github.com/open-mmlab/mmpose/pull/1160)) @ly015, @jin-s13, @liqikai9, @wusize, @luminxu, @zengwang430521 @mzr1996 + +**New Features** + +- Add [MMPose Webcam API](https://github.com/open-mmlab/mmpose/tree/master/tools/webcam): A simple yet powerful tools to develop interactive webcam applications with MMPose functions. ([#1178](https://github.com/open-mmlab/mmpose/pull/1178), [#1173](https://github.com/open-mmlab/mmpose/pull/1173), [#1173](https://github.com/open-mmlab/mmpose/pull/1173), [#1143](https://github.com/open-mmlab/mmpose/pull/1143), [#1094](https://github.com/open-mmlab/mmpose/pull/1094), [#1133](https://github.com/open-mmlab/mmpose/pull/1133), [#1098](https://github.com/open-mmlab/mmpose/pull/1098), [#1160](https://github.com/open-mmlab/mmpose/pull/1160)) @ly015, @jin-s13, @liqikai9, @wusize, @luminxu, @zengwang430521 @mzr1996 + +- Support ConcatDataset ([#1139](https://github.com/open-mmlab/mmpose/pull/1139)) @Canwang-sjtu + +- Support CPU training and testing ([#1157](https://github.com/open-mmlab/mmpose/pull/1157)) @ly015 + +**Improvements** + +- Add multi-processing configurations to speed up distributed training and testing ([#1146](https://github.com/open-mmlab/mmpose/pull/1146)) @ly015 + +- Add default runtime config ([#1145](https://github.com/open-mmlab/mmpose/pull/1145)) + +- Upgrade isort in pre-commit hook ([#1179](https://github.com/open-mmlab/mmpose/pull/1179)) @liqikai9 + +- Update README and documents ([#1171](https://github.com/open-mmlab/mmpose/pull/1171), [#1167](https://github.com/open-mmlab/mmpose/pull/1167), [#1153](https://github.com/open-mmlab/mmpose/pull/1153), [#1149](https://github.com/open-mmlab/mmpose/pull/1149), [#1148](https://github.com/open-mmlab/mmpose/pull/1148), [#1147](https://github.com/open-mmlab/mmpose/pull/1147), [#1140](https://github.com/open-mmlab/mmpose/pull/1140)) @jin-s13, @wusize, @TommyZihao, @ly015 + +**Bug Fixes** + +- Fix undeterministic behavior in pre-commit hooks ([#1136](https://github.com/open-mmlab/mmpose/pull/1136)) @jin-s13 + +- Deprecate the support for "python setup.py test" ([#1179](https://github.com/open-mmlab/mmpose/pull/1179)) @ly015 + +- Fix incompatible settings with MMCV on HSigmoid default parameters ([#1132](https://github.com/open-mmlab/mmpose/pull/1132)) @ly015 + +- Fix albumentation installation ([#1184](https://github.com/open-mmlab/mmpose/pull/1184)) @BIGWangYuDong + +## **v0.22.0 (04/01/2022)** + +**Highlights** + +- Support VoxelPose ["VoxelPose: Towards Multi-Camera 3D Human Pose Estimation in Wild Environment"](https://arxiv.org/abs/2004.06239), ECCV'2020 ([#1050](https://github.com/open-mmlab/mmpose/pull/1050)) @wusize + +- Support Soft Wing loss ["Structure-Coherent Deep Feature Learning for Robust Face Alignment"](https://linchunze.github.io/papers/TIP21_Structure_coherent_FA.pdf), TIP'2021 ([#1077](https://github.com/open-mmlab/mmpose/pull/1077)) @jin-s13 + +- Support Adaptive Wing loss ["Adaptive Wing Loss for Robust Face Alignment via Heatmap Regression"](https://arxiv.org/abs/1904.07399), ICCV'2019 ([#1072](https://github.com/open-mmlab/mmpose/pull/1072)) @jin-s13 + +**New Features** + +- Support VoxelPose ["VoxelPose: Towards Multi-Camera 3D Human Pose Estimation in Wild Environment"](https://arxiv.org/abs/2004.06239), ECCV'2020 ([#1050](https://github.com/open-mmlab/mmpose/pull/1050)) @wusize + +- Support Soft Wing loss ["Structure-Coherent Deep Feature Learning for Robust Face Alignment"](https://linchunze.github.io/papers/TIP21_Structure_coherent_FA.pdf), TIP'2021 ([#1077](https://github.com/open-mmlab/mmpose/pull/1077)) @jin-s13 + +- Support Adaptive Wing loss ["Adaptive Wing Loss for Robust Face Alignment via Heatmap Regression"](https://arxiv.org/abs/1904.07399), ICCV'2019 ([#1072](https://github.com/open-mmlab/mmpose/pull/1072)) @jin-s13 + +- Add LiteHRNet-18 Checkpoints trained on COCO. ([#1120](https://github.com/open-mmlab/mmpose/pull/1120)) @jin-s13 + +**Improvements** + +- Improve documentation quality ([#1115](https://github.com/open-mmlab/mmpose/pull/1115), [#1111](https://github.com/open-mmlab/mmpose/pull/1111), [#1105](https://github.com/open-mmlab/mmpose/pull/1105), [#1087](https://github.com/open-mmlab/mmpose/pull/1087), [#1086](https://github.com/open-mmlab/mmpose/pull/1086), [#1085](https://github.com/open-mmlab/mmpose/pull/1085), [#1084](https://github.com/open-mmlab/mmpose/pull/1084), [#1083](https://github.com/open-mmlab/mmpose/pull/1083), [#1124](https://github.com/open-mmlab/mmpose/pull/1124), [#1070](https://github.com/open-mmlab/mmpose/pull/1070), [#1068](https://github.com/open-mmlab/mmpose/pull/1068)) @jin-s13, @liqikai9, @ly015 + +- Support CircleCI ([#1074](https://github.com/open-mmlab/mmpose/pull/1074)) @ly015 + +- Skip unit tests in CI when only document files were changed ([#1074](https://github.com/open-mmlab/mmpose/pull/1074), [#1041](https://github.com/open-mmlab/mmpose/pull/1041)) @QwQ2000, @ly015 + +- Support file_client_args in LoadImageFromFile ([#1076](https://github.com/open-mmlab/mmpose/pull/1076)) @jin-s13 + +**Bug Fixes** + +- Fix a bug in Dark UDP postprocessing that causes error when the channel number is large. ([#1079](https://github.com/open-mmlab/mmpose/pull/1079), [#1116](https://github.com/open-mmlab/mmpose/pull/1116)) @X00123, @jin-s13 + +- Fix hard-coded `sigmas` in bottom-up image demo ([#1107](https://github.com/open-mmlab/mmpose/pull/1107), [#1101](https://github.com/open-mmlab/mmpose/pull/1101)) @chenxinfeng4, @liqikai9 + +- Fix unstable checks in unit tests ([#1112](https://github.com/open-mmlab/mmpose/pull/1112)) @ly015 + +- Do not destroy NULL windows if `args.show==False` in demo scripts ([#1104](https://github.com/open-mmlab/mmpose/pull/1104)) @bladrome + +## **v0.21.0 (06/12/2021)** + +**Highlights** + +- Support ["Learning Temporal Pose Estimation from Sparsely-Labeled Videos"](https://arxiv.org/abs/1906.04016), NeurIPS'2019 ([#932](https://github.com/open-mmlab/mmpose/pull/932), [#1006](https://github.com/open-mmlab/mmpose/pull/1006), [#1036](https://github.com/open-mmlab/mmpose/pull/1036), [#1060](https://github.com/open-mmlab/mmpose/pull/1060)) @liqikai9 + +- Add ViPNAS-MobileNetV3 models ([#1025](https://github.com/open-mmlab/mmpose/pull/1025)) @luminxu, @jin-s13 + +- Add inference speed benchmark ([#1028](https://github.com/open-mmlab/mmpose/pull/1028), [#1034](https://github.com/open-mmlab/mmpose/pull/1034), [#1044](https://github.com/open-mmlab/mmpose/pull/1044)) @liqikai9 + +**New Features** + +- Support ["Learning Temporal Pose Estimation from Sparsely-Labeled Videos"](https://arxiv.org/abs/1906.04016), NeurIPS'2019 ([#932](https://github.com/open-mmlab/mmpose/pull/932), [#1006](https://github.com/open-mmlab/mmpose/pull/1006), [#1036](https://github.com/open-mmlab/mmpose/pull/1036)) @liqikai9 + +- Add ViPNAS-MobileNetV3 models ([#1025](https://github.com/open-mmlab/mmpose/pull/1025)) @luminxu, @jin-s13 + +- Add light-weight top-down models for whole-body keypoint detection ([#1009](https://github.com/open-mmlab/mmpose/pull/1009), [#1020](https://github.com/open-mmlab/mmpose/pull/1020), [#1055](https://github.com/open-mmlab/mmpose/pull/1055)) @luminxu, @ly015 + +- Add HRNet checkpoints with various settings on PoseTrack18 ([#1035](https://github.com/open-mmlab/mmpose/pull/1035)) @liqikai9 + +**Improvements** + +- Add inference speed benchmark ([#1028](https://github.com/open-mmlab/mmpose/pull/1028), [#1034](https://github.com/open-mmlab/mmpose/pull/1034), [#1044](https://github.com/open-mmlab/mmpose/pull/1044)) @liqikai9 + +- Update model metafile format ([#1001](https://github.com/open-mmlab/mmpose/pull/1001)) @ly015 + +- Support minus output feature index in mobilenet_v3 ([#1005](https://github.com/open-mmlab/mmpose/pull/1005)) @luminxu + +- Improve documentation quality ([#1018](https://github.com/open-mmlab/mmpose/pull/1018), [#1026](https://github.com/open-mmlab/mmpose/pull/1026), [#1027](https://github.com/open-mmlab/mmpose/pull/1027), [#1031](https://github.com/open-mmlab/mmpose/pull/1031), [#1038](https://github.com/open-mmlab/mmpose/pull/1038), [#1046](https://github.com/open-mmlab/mmpose/pull/1046), [#1056](https://github.com/open-mmlab/mmpose/pull/1056), [#1057](https://github.com/open-mmlab/mmpose/pull/1057)) @edybk, @luminxu, @ly015, @jin-s13 + +- Set default random seed in training initialization ([#1030](https://github.com/open-mmlab/mmpose/pull/1030)) @ly015 + +- Skip CI when only specific files changed ([#1041](https://github.com/open-mmlab/mmpose/pull/1041), [#1059](https://github.com/open-mmlab/mmpose/pull/1059)) @QwQ2000, @ly015 + +- Automatically cancel uncompleted action runs when new commit arrives ([#1053](https://github.com/open-mmlab/mmpose/pull/1053)) @ly015 + +**Bug Fixes** + +- Update pose tracking demo to be compatible with latest mmtracking ([#1014](https://github.com/open-mmlab/mmpose/pull/1014)) @jin-s13 + +- Fix symlink creation failure when installed in Windows environments ([#1039](https://github.com/open-mmlab/mmpose/pull/1039)) @QwQ2000 + +- Fix AP-10K dataset sigmas ([#1040](https://github.com/open-mmlab/mmpose/pull/1040)) @jin-s13 + +## **v0.20.0 (01/11/2021)** + +**Highlights** + +- Add AP-10K dataset for animal pose estimation ([#987](https://github.com/open-mmlab/mmpose/pull/987)) @Annbless, @AlexTheBad, @jin-s13, @ly015 + +- Support TorchServe ([#979](https://github.com/open-mmlab/mmpose/pull/979)) @ly015 + +**New Features** + +- Add AP-10K dataset for animal pose estimation ([#987](https://github.com/open-mmlab/mmpose/pull/987)) @Annbless, @AlexTheBad, @jin-s13, @ly015 + +- Add HRNetv2 checkpoints on 300W and COFW datasets ([#980](https://github.com/open-mmlab/mmpose/pull/980)) @jin-s13 + +- Support TorchServe ([#979](https://github.com/open-mmlab/mmpose/pull/979)) @ly015 + +**Bug Fixes** + +- Fix some deprecated or risky settings in configs ([#963](https://github.com/open-mmlab/mmpose/pull/963), [#976](https://github.com/open-mmlab/mmpose/pull/976), [#992](https://github.com/open-mmlab/mmpose/pull/992)) @jin-s13, @wusize + +- Fix issues of default arguments of training and testing scripts ([#970](https://github.com/open-mmlab/mmpose/pull/970), [#985](https://github.com/open-mmlab/mmpose/pull/985)) @liqikai9, @wusize + +- Fix heatmap and tag size mismatch in bottom-up with UDP ([#994](https://github.com/open-mmlab/mmpose/pull/994)) @wusize + +- Fix python3.9 installation in CI ([#983](https://github.com/open-mmlab/mmpose/pull/983)) @ly015 + +- Fix model zoo document integrity issue ([#990](https://github.com/open-mmlab/mmpose/pull/990)) @jin-s13 + +**Improvements** + +- Support non-square input shape for bottom-up ([#991](https://github.com/open-mmlab/mmpose/pull/991)) @wusize + +- Add image and video resources for demo ([#971](https://github.com/open-mmlab/mmpose/pull/971)) @liqikai9 + +- Use CUDA docker images to accelerate CI ([#973](https://github.com/open-mmlab/mmpose/pull/973)) @ly015 + +- Add codespell hook and fix detected typos ([#977](https://github.com/open-mmlab/mmpose/pull/977)) @ly015 + +## **v0.19.0 (08/10/2021)** + +**Highlights** + +- Add models for Associative Embedding with Hourglass network backbone ([#906](https://github.com/open-mmlab/mmpose/pull/906), [#955](https://github.com/open-mmlab/mmpose/pull/955)) @jin-s13, @luminxu + +- Support COCO-Wholebody-Face and COCO-Wholebody-Hand datasets ([#813](https://github.com/open-mmlab/mmpose/pull/813)) @jin-s13, @innerlee, @luminxu + +- Upgrade dataset interface ([#901](https://github.com/open-mmlab/mmpose/pull/901), [#924](https://github.com/open-mmlab/mmpose/pull/924)) @jin-s13, @innerlee, @ly015, @liqikai9 + +- New style of documentation ([#945](https://github.com/open-mmlab/mmpose/pull/945)) @ly015 + +**New Features** + +- Add models for Associative Embedding with Hourglass network backbone ([#906](https://github.com/open-mmlab/mmpose/pull/906), [#955](https://github.com/open-mmlab/mmpose/pull/955)) @jin-s13, @luminxu + +- Support COCO-Wholebody-Face and COCO-Wholebody-Hand datasets ([#813](https://github.com/open-mmlab/mmpose/pull/813)) @jin-s13, @innerlee, @luminxu + +- Add pseudo-labeling tool to generate COCO style keypoint annotations with given bounding boxes ([#928](https://github.com/open-mmlab/mmpose/pull/928)) @soltkreig + +- New style of documentation ([#945](https://github.com/open-mmlab/mmpose/pull/945)) @ly015 + +**Bug Fixes** + +- Fix segmentation parsing in Macaque dataset preprocessing ([#948](https://github.com/open-mmlab/mmpose/pull/948)) @jin-s13 + +- Fix dependencies that may lead to CI failure in downstream projects ([#936](https://github.com/open-mmlab/mmpose/pull/936), [#953](https://github.com/open-mmlab/mmpose/pull/953)) @RangiLyu, @ly015 + +- Fix keypoint order in Human3.6M dataset ([#940](https://github.com/open-mmlab/mmpose/pull/940)) @ttxskk + +- Fix unstable image loading for Interhand2.6M ([#913](https://github.com/open-mmlab/mmpose/pull/913)) @zengwang430521 + +**Improvements** + +- Upgrade dataset interface ([#901](https://github.com/open-mmlab/mmpose/pull/901), [#924](https://github.com/open-mmlab/mmpose/pull/924)) @jin-s13, @innerlee, @ly015, @liqikai9 + +- Improve demo usability and stability ([#908](https://github.com/open-mmlab/mmpose/pull/908), [#934](https://github.com/open-mmlab/mmpose/pull/934)) @ly015 + +- Standardize model metafile format ([#941](https://github.com/open-mmlab/mmpose/pull/941)) @ly015 + +- Support `persistent_worker` and several other arguments in configs ([#946](https://github.com/open-mmlab/mmpose/pull/946)) @jin-s13 + +- Use MMCV root model registry to enable cross-project module building ([#935](https://github.com/open-mmlab/mmpose/pull/935)) @RangiLyu + +- Improve the document quality ([#916](https://github.com/open-mmlab/mmpose/pull/916), [#909](https://github.com/open-mmlab/mmpose/pull/909), [#942](https://github.com/open-mmlab/mmpose/pull/942), [#913](https://github.com/open-mmlab/mmpose/pull/913), [#956](https://github.com/open-mmlab/mmpose/pull/956)) @jin-s13, @ly015, @bit-scientist, @zengwang430521 + +- Improve pull request template ([#952](https://github.com/open-mmlab/mmpose/pull/952), [#954](https://github.com/open-mmlab/mmpose/pull/954)) @ly015 + +**Breaking Changes** + +- Upgrade dataset interface ([#901](https://github.com/open-mmlab/mmpose/pull/901)) @jin-s13, @innerlee, @ly015 + +## **v0.18.0 (01/09/2021)** + +**Bug Fixes** + +- Fix redundant model weight loading in pytorch-to-onnx conversion ([#850](https://github.com/open-mmlab/mmpose/pull/850)) @ly015 + +- Fix a bug in update_model_index.py that may cause pre-commit hook failure([#866](https://github.com/open-mmlab/mmpose/pull/866)) @ly015 + +- Fix a bug in interhand_3d_head ([#890](https://github.com/open-mmlab/mmpose/pull/890)) @zengwang430521 + +- Fix pose tracking demo failure caused by out-of-date configs ([#891](https://github.com/open-mmlab/mmpose/pull/891)) + +**Improvements** + +- Add automatic benchmark regression tools ([#849](https://github.com/open-mmlab/mmpose/pull/849), [#880](https://github.com/open-mmlab/mmpose/pull/880), [#885](https://github.com/open-mmlab/mmpose/pull/885)) @liqikai9, @ly015 + +- Add copyright information and checking hook ([#872](https://github.com/open-mmlab/mmpose/pull/872)) + +- Add PR template ([#875](https://github.com/open-mmlab/mmpose/pull/875)) @ly015 + +- Add citation information ([#876](https://github.com/open-mmlab/mmpose/pull/876)) @ly015 + +- Add python3.9 in CI ([#877](https://github.com/open-mmlab/mmpose/pull/877), [#883](https://github.com/open-mmlab/mmpose/pull/883)) @ly015 + +- Improve the quality of the documents ([#845](https://github.com/open-mmlab/mmpose/pull/845), [#845](https://github.com/open-mmlab/mmpose/pull/845), [#848](https://github.com/open-mmlab/mmpose/pull/848), [#867](https://github.com/open-mmlab/mmpose/pull/867), [#870](https://github.com/open-mmlab/mmpose/pull/870), [#873](https://github.com/open-mmlab/mmpose/pull/873), [#896](https://github.com/open-mmlab/mmpose/pull/896)) @jin-s13, @ly015, @zhiqwang + +## **v0.17.0 (06/08/2021)** + +**Highlights** + +1. Support ["Lite-HRNet: A Lightweight High-Resolution Network"](https://arxiv.org/abs/2104.06403) CVPR'2021 ([#733](https://github.com/open-mmlab/mmpose/pull/733),[#800](https://github.com/open-mmlab/mmpose/pull/800)) @jin-s13 + +2. Add 3d body mesh demo ([#771](https://github.com/open-mmlab/mmpose/pull/771)) @zengwang430521 + +3. Add Chinese documentation ([#787](https://github.com/open-mmlab/mmpose/pull/787), [#798](https://github.com/open-mmlab/mmpose/pull/798), [#799](https://github.com/open-mmlab/mmpose/pull/799), [#802](https://github.com/open-mmlab/mmpose/pull/802), [#804](https://github.com/open-mmlab/mmpose/pull/804), [#805](https://github.com/open-mmlab/mmpose/pull/805), [#815](https://github.com/open-mmlab/mmpose/pull/815), [#816](https://github.com/open-mmlab/mmpose/pull/816), [#817](https://github.com/open-mmlab/mmpose/pull/817), [#819](https://github.com/open-mmlab/mmpose/pull/819), [#839](https://github.com/open-mmlab/mmpose/pull/839)) @ly015, @luminxu, @jin-s13, @liqikai9, @zengwang430521 + +4. Add Colab Tutorial ([#834](https://github.com/open-mmlab/mmpose/pull/834)) @ly015 + +**New Features** + +- Support ["Lite-HRNet: A Lightweight High-Resolution Network"](https://arxiv.org/abs/2104.06403) CVPR'2021 ([#733](https://github.com/open-mmlab/mmpose/pull/733),[#800](https://github.com/open-mmlab/mmpose/pull/800)) @jin-s13 + +- Add 3d body mesh demo ([#771](https://github.com/open-mmlab/mmpose/pull/771)) @zengwang430521 + +- Add Chinese documentation ([#787](https://github.com/open-mmlab/mmpose/pull/787), [#798](https://github.com/open-mmlab/mmpose/pull/798), [#799](https://github.com/open-mmlab/mmpose/pull/799), [#802](https://github.com/open-mmlab/mmpose/pull/802), [#804](https://github.com/open-mmlab/mmpose/pull/804), [#805](https://github.com/open-mmlab/mmpose/pull/805), [#815](https://github.com/open-mmlab/mmpose/pull/815), [#816](https://github.com/open-mmlab/mmpose/pull/816), [#817](https://github.com/open-mmlab/mmpose/pull/817), [#819](https://github.com/open-mmlab/mmpose/pull/819), [#839](https://github.com/open-mmlab/mmpose/pull/839)) @ly015, @luminxu, @jin-s13, @liqikai9, @zengwang430521 + +- Add Colab Tutorial ([#834](https://github.com/open-mmlab/mmpose/pull/834)) @ly015 + +- Support training for InterHand v1.0 dataset ([#761](https://github.com/open-mmlab/mmpose/pull/761)) @zengwang430521 + +**Bug Fixes** + +- Fix mpii pckh@0.1 index ([#773](https://github.com/open-mmlab/mmpose/pull/773)) @jin-s13 + +- Fix multi-node distributed test ([#818](https://github.com/open-mmlab/mmpose/pull/818)) @ly015 + +- Fix docstring and init_weights error of ShuffleNetV1 ([#814](https://github.com/open-mmlab/mmpose/pull/814)) @Junjun2016 + +- Fix imshow_bbox error when input bboxes is empty ([#796](https://github.com/open-mmlab/mmpose/pull/796)) @ly015 + +- Fix model zoo doc generation ([#778](https://github.com/open-mmlab/mmpose/pull/778)) @ly015 + +- Fix typo ([#767](https://github.com/open-mmlab/mmpose/pull/767)), ([#780](https://github.com/open-mmlab/mmpose/pull/780), [#782](https://github.com/open-mmlab/mmpose/pull/782)) @ly015, @jin-s13 + +**Breaking Changes** + +- Use MMCV EvalHook ([#686](https://github.com/open-mmlab/mmpose/pull/686)) @ly015 + +**Improvements** + +- Add pytest.ini and fix docstring ([#812](https://github.com/open-mmlab/mmpose/pull/812)) @jin-s13 + +- Update MSELoss ([#829](https://github.com/open-mmlab/mmpose/pull/829)) @Ezra-Yu + +- Move process_mmdet_results into inference.py ([#831](https://github.com/open-mmlab/mmpose/pull/831)) @ly015 + +- Update resource limit ([#783](https://github.com/open-mmlab/mmpose/pull/783)) @jin-s13 + +- Use COCO 2D pose model in 3D demo examples ([#785](https://github.com/open-mmlab/mmpose/pull/785)) @ly015 + +- Change model zoo titles in the doc from center-aligned to left-aligned ([#792](https://github.com/open-mmlab/mmpose/pull/792), [#797](https://github.com/open-mmlab/mmpose/pull/797)) @ly015 + +- Support MIM ([#706](https://github.com/open-mmlab/mmpose/pull/706), [#794](https://github.com/open-mmlab/mmpose/pull/794)) @ly015 + +- Update out-of-date configs ([#827](https://github.com/open-mmlab/mmpose/pull/827)) @jin-s13 + +- Remove opencv-python-headless dependency by albumentations ([#833](https://github.com/open-mmlab/mmpose/pull/833)) @ly015 + +- Update QQ QR code in README_CN.md ([#832](https://github.com/open-mmlab/mmpose/pull/832)) @ly015 + +## **v0.16.0 (02/07/2021)** + +**Highlights** + +1. Support ["ViPNAS: Efficient Video Pose Estimation via Neural Architecture Search"](https://arxiv.org/abs/2105.10154) CVPR'2021 ([#742](https://github.com/open-mmlab/mmpose/pull/742),[#755](https://github.com/open-mmlab/mmpose/pull/755)). + +2. Support MPI-INF-3DHP dataset ([#683](https://github.com/open-mmlab/mmpose/pull/683),[#746](https://github.com/open-mmlab/mmpose/pull/746),[#751](https://github.com/open-mmlab/mmpose/pull/751)). + +3. Add webcam demo tool ([#729](https://github.com/open-mmlab/mmpose/pull/729)) + +4. Add 3d body and hand pose estimation demo ([#704](https://github.com/open-mmlab/mmpose/pull/704), [#727](https://github.com/open-mmlab/mmpose/pull/727)). + +**New Features** + +- Support ["ViPNAS: Efficient Video Pose Estimation via Neural Architecture Search"](https://arxiv.org/abs/2105.10154) CVPR'2021 ([#742](https://github.com/open-mmlab/mmpose/pull/742),[#755](https://github.com/open-mmlab/mmpose/pull/755)) + +- Support MPI-INF-3DHP dataset ([#683](https://github.com/open-mmlab/mmpose/pull/683),[#746](https://github.com/open-mmlab/mmpose/pull/746),[#751](https://github.com/open-mmlab/mmpose/pull/751)) + +- Support Webcam demo ([#729](https://github.com/open-mmlab/mmpose/pull/729)) + +- Support Interhand 3d demo ([#704](https://github.com/open-mmlab/mmpose/pull/704)) + +- Support 3d pose video demo ([#727](https://github.com/open-mmlab/mmpose/pull/727)) + +- Support H36m dataset for 2d pose estimation ([#709](https://github.com/open-mmlab/mmpose/pull/709), [#735](https://github.com/open-mmlab/mmpose/pull/735)) + +- Add scripts to generate mim metafile ([#749](https://github.com/open-mmlab/mmpose/pull/749)) + +**Bug Fixes** + +- Fix typos ([#692](https://github.com/open-mmlab/mmpose/pull/692),[#696](https://github.com/open-mmlab/mmpose/pull/696),[#697](https://github.com/open-mmlab/mmpose/pull/697),[#698](https://github.com/open-mmlab/mmpose/pull/698),[#712](https://github.com/open-mmlab/mmpose/pull/712),[#718](https://github.com/open-mmlab/mmpose/pull/718),[#728](https://github.com/open-mmlab/mmpose/pull/728)) + +- Change model download links from `http` to `https` ([#716](https://github.com/open-mmlab/mmpose/pull/716)) + +**Breaking Changes** + +- Switch to MMCV MODEL_REGISTRY ([#669](https://github.com/open-mmlab/mmpose/pull/669)) + +**Improvements** + +- Refactor MeshMixDataset ([#752](https://github.com/open-mmlab/mmpose/pull/752)) + +- Rename 'GaussianHeatMap' to 'GaussianHeatmap' ([#745](https://github.com/open-mmlab/mmpose/pull/745)) + +- Update out-of-date configs ([#734](https://github.com/open-mmlab/mmpose/pull/734)) + +- Improve compatibility for breaking changes ([#731](https://github.com/open-mmlab/mmpose/pull/731)) + +- Enable to control radius and thickness in visualization ([#722](https://github.com/open-mmlab/mmpose/pull/722)) + +- Add regex dependency ([#720](https://github.com/open-mmlab/mmpose/pull/720)) + +## **v0.15.0 (02/06/2021)** + +**Highlights** + +1. Support 3d video pose estimation (VideoPose3D). + +2. Support 3d hand pose estimation (InterNet). + +3. Improve presentation of modelzoo. + +**New Features** + +- Support "InterHand2.6M: A Dataset and Baseline for 3D Interacting Hand Pose Estimation from a Single RGB Image" (ECCV‘20) ([#624](https://github.com/open-mmlab/mmpose/pull/624)) + +- Support "3D human pose estimation in video with temporal convolutions and semi-supervised training" (CVPR'19) ([#602](https://github.com/open-mmlab/mmpose/pull/602), [#681](https://github.com/open-mmlab/mmpose/pull/681)) + +- Support 3d pose estimation demo ([#653](https://github.com/open-mmlab/mmpose/pull/653), [#670](https://github.com/open-mmlab/mmpose/pull/670)) + +- Support bottom-up whole-body pose estimation ([#689](https://github.com/open-mmlab/mmpose/pull/689)) + +- Support mmcli ([#634](https://github.com/open-mmlab/mmpose/pull/634)) + +**Bug Fixes** + +- Fix opencv compatibility ([#635](https://github.com/open-mmlab/mmpose/pull/635)) + +- Fix demo with UDP ([#637](https://github.com/open-mmlab/mmpose/pull/637)) + +- Fix bottom-up model onnx conversion ([#680](https://github.com/open-mmlab/mmpose/pull/680)) + +- Fix `GPU_IDS` in distributed training ([#668](https://github.com/open-mmlab/mmpose/pull/668)) + +- Fix MANIFEST.in ([#641](https://github.com/open-mmlab/mmpose/pull/641), [#657](https://github.com/open-mmlab/mmpose/pull/657)) + +- Fix docs ([#643](https://github.com/open-mmlab/mmpose/pull/643),[#684](https://github.com/open-mmlab/mmpose/pull/684),[#688](https://github.com/open-mmlab/mmpose/pull/688),[#690](https://github.com/open-mmlab/mmpose/pull/690),[#692](https://github.com/open-mmlab/mmpose/pull/692)) + +**Breaking Changes** + +- Reorganize configs by tasks, algorithms, datasets, and techniques ([#647](https://github.com/open-mmlab/mmpose/pull/647)) + +- Rename heads and detectors ([#667](https://github.com/open-mmlab/mmpose/pull/667)) + +**Improvements** + +- Add `radius` and `thickness` parameters in visualization ([#638](https://github.com/open-mmlab/mmpose/pull/638)) + +- Add `trans_prob` parameter in `TopDownRandomTranslation` ([#650](https://github.com/open-mmlab/mmpose/pull/650)) + +- Switch to `MMCV MODEL_REGISTRY` ([#669](https://github.com/open-mmlab/mmpose/pull/669)) + +- Update dependencies ([#674](https://github.com/open-mmlab/mmpose/pull/674), [#676](https://github.com/open-mmlab/mmpose/pull/676)) + +## **v0.14.0 (06/05/2021)** + +**Highlights** + +1. Support animal pose estimation with 7 popular datasets. + +2. Support "A simple yet effective baseline for 3d human pose estimation" (ICCV'17). + +**New Features** + +- Support "A simple yet effective baseline for 3d human pose estimation" (ICCV'17) ([#554](https://github.com/open-mmlab/mmpose/pull/554),[#558](https://github.com/open-mmlab/mmpose/pull/558),[#566](https://github.com/open-mmlab/mmpose/pull/566),[#570](https://github.com/open-mmlab/mmpose/pull/570),[#589](https://github.com/open-mmlab/mmpose/pull/589)) + +- Support animal pose estimation ([#559](https://github.com/open-mmlab/mmpose/pull/559),[#561](https://github.com/open-mmlab/mmpose/pull/561),[#563](https://github.com/open-mmlab/mmpose/pull/563),[#571](https://github.com/open-mmlab/mmpose/pull/571),[#603](https://github.com/open-mmlab/mmpose/pull/603),[#605](https://github.com/open-mmlab/mmpose/pull/605)) + +- Support Horse-10 dataset ([#561](https://github.com/open-mmlab/mmpose/pull/561)), MacaquePose dataset ([#561](https://github.com/open-mmlab/mmpose/pull/561)), Vinegar Fly dataset ([#561](https://github.com/open-mmlab/mmpose/pull/561)), Desert Locust dataset ([#561](https://github.com/open-mmlab/mmpose/pull/561)), Grevy's Zebra dataset ([#561](https://github.com/open-mmlab/mmpose/pull/561)), ATRW dataset ([#571](https://github.com/open-mmlab/mmpose/pull/571)), and Animal-Pose dataset ([#603](https://github.com/open-mmlab/mmpose/pull/603)) + +- Support bottom-up pose tracking demo ([#574](https://github.com/open-mmlab/mmpose/pull/574)) + +- Support FP16 training ([#584](https://github.com/open-mmlab/mmpose/pull/584),[#616](https://github.com/open-mmlab/mmpose/pull/616),[#626](https://github.com/open-mmlab/mmpose/pull/626)) + +- Support NMS for bottom-up ([#609](https://github.com/open-mmlab/mmpose/pull/609)) + +**Bug Fixes** + +- Fix bugs in the top-down demo, when there are no people in the images ([#569](https://github.com/open-mmlab/mmpose/pull/569)). + +- Fix the links in the doc ([#612](https://github.com/open-mmlab/mmpose/pull/612)) + +**Improvements** + +- Speed up top-down inference ([#560](https://github.com/open-mmlab/mmpose/pull/560)) + +- Update github CI ([#562](https://github.com/open-mmlab/mmpose/pull/562), [#564](https://github.com/open-mmlab/mmpose/pull/564)) + +- Update Readme ([#578](https://github.com/open-mmlab/mmpose/pull/578),[#579](https://github.com/open-mmlab/mmpose/pull/579),[#580](https://github.com/open-mmlab/mmpose/pull/580),[#592](https://github.com/open-mmlab/mmpose/pull/592),[#599](https://github.com/open-mmlab/mmpose/pull/599),[#600](https://github.com/open-mmlab/mmpose/pull/600),[#607](https://github.com/open-mmlab/mmpose/pull/607)) + +- Update Faq ([#587](https://github.com/open-mmlab/mmpose/pull/587), [#610](https://github.com/open-mmlab/mmpose/pull/610)) + +## **v0.13.0 (31/03/2021)** + +**Highlights** + +1. Support Wingloss. + +2. Support RHD hand dataset. + +**New Features** + +- Support Wingloss ([#482](https://github.com/open-mmlab/mmpose/pull/482)) + +- Support RHD hand dataset ([#523](https://github.com/open-mmlab/mmpose/pull/523), [#551](https://github.com/open-mmlab/mmpose/pull/551)) + +- Support Human3.6m dataset for 3d keypoint detection ([#518](https://github.com/open-mmlab/mmpose/pull/518), [#527](https://github.com/open-mmlab/mmpose/pull/527)) + +- Support TCN model for 3d keypoint detection ([#521](https://github.com/open-mmlab/mmpose/pull/521), [#522](https://github.com/open-mmlab/mmpose/pull/522)) + +- Support Interhand3D model for 3d hand detection ([#536](https://github.com/open-mmlab/mmpose/pull/536)) + +- Support Multi-task detector ([#480](https://github.com/open-mmlab/mmpose/pull/480)) + +**Bug Fixes** + +- Fix PCKh@0.1 calculation ([#516](https://github.com/open-mmlab/mmpose/pull/516)) + +- Fix unittest ([#529](https://github.com/open-mmlab/mmpose/pull/529)) + +- Fix circular importing ([#542](https://github.com/open-mmlab/mmpose/pull/542)) + +- Fix bugs in bottom-up keypoint score ([#548](https://github.com/open-mmlab/mmpose/pull/548)) + +**Improvements** + +- Update config & checkpoints ([#525](https://github.com/open-mmlab/mmpose/pull/525), [#546](https://github.com/open-mmlab/mmpose/pull/546)) + +- Fix typos ([#514](https://github.com/open-mmlab/mmpose/pull/514), [#519](https://github.com/open-mmlab/mmpose/pull/519), [#532](https://github.com/open-mmlab/mmpose/pull/532), [#537](https://github.com/open-mmlab/mmpose/pull/537), ) + +- Speed up post processing ([#535](https://github.com/open-mmlab/mmpose/pull/535)) + +- Update mmcv version dependency ([#544](https://github.com/open-mmlab/mmpose/pull/544)) + +## **v0.12.0 (28/02/2021)** + +**Highlights** + +1. Support DeepPose algorithm. + +**New Features** + +- Support DeepPose algorithm ([#446](https://github.com/open-mmlab/mmpose/pull/446), [#461](https://github.com/open-mmlab/mmpose/pull/461)) + +- Support interhand3d dataset ([#468](https://github.com/open-mmlab/mmpose/pull/468)) + +- Support Albumentation pipeline ([#469](https://github.com/open-mmlab/mmpose/pull/469)) + +- Support PhotometricDistortion pipeline ([#485](https://github.com/open-mmlab/mmpose/pull/485)) + +- Set seed option for training ([#493](https://github.com/open-mmlab/mmpose/pull/493)) + +- Add demos for face keypoint detection ([#502](https://github.com/open-mmlab/mmpose/pull/502)) + +**Bug Fixes** + +- Change channel order according to configs ([#504](https://github.com/open-mmlab/mmpose/pull/504)) + +- Fix `num_factors` in UDP encoding ([#495](https://github.com/open-mmlab/mmpose/pull/495)) + +- Fix configs ([#456](https://github.com/open-mmlab/mmpose/pull/456)) + +**Breaking Changes** + +- Refactor configs for wholebody pose estimation ([#487](https://github.com/open-mmlab/mmpose/pull/487), [#491](https://github.com/open-mmlab/mmpose/pull/491)) + +- Rename `decode` function for heads ([#481](https://github.com/open-mmlab/mmpose/pull/481)) + +**Improvements** + +- Update config & checkpoints ([#453](https://github.com/open-mmlab/mmpose/pull/453),[#484](https://github.com/open-mmlab/mmpose/pull/484),[#487](https://github.com/open-mmlab/mmpose/pull/487)) + +- Add README in Chinese ([#462](https://github.com/open-mmlab/mmpose/pull/462)) + +- Add tutorials about configs ([#465](https://github.com/open-mmlab/mmpose/pull/465)) + +- Add demo videos for various tasks ([#499](https://github.com/open-mmlab/mmpose/pull/499), [#503](https://github.com/open-mmlab/mmpose/pull/503)) + +- Update docs about MMPose installation ([#467](https://github.com/open-mmlab/mmpose/pull/467), [#505](https://github.com/open-mmlab/mmpose/pull/505)) + +- Rename `stat.py` to `stats.py` ([#483](https://github.com/open-mmlab/mmpose/pull/483)) + +- Fix typos ([#463](https://github.com/open-mmlab/mmpose/pull/463), [#464](https://github.com/open-mmlab/mmpose/pull/464), [#477](https://github.com/open-mmlab/mmpose/pull/477), [#481](https://github.com/open-mmlab/mmpose/pull/481)) + +- latex to bibtex ([#471](https://github.com/open-mmlab/mmpose/pull/471)) + +- Update FAQ ([#466](https://github.com/open-mmlab/mmpose/pull/466)) + +## **v0.11.0 (31/01/2021)** + +**Highlights** + +1. Support fashion landmark detection. + +2. Support face keypoint detection. + +3. Support pose tracking with MMTracking. + +**New Features** + +- Support fashion landmark detection (DeepFashion) ([#413](https://github.com/open-mmlab/mmpose/pull/413)) + +- Support face keypoint detection (300W, AFLW, COFW, WFLW) ([#367](https://github.com/open-mmlab/mmpose/pull/367)) + +- Support pose tracking demo with MMTracking ([#427](https://github.com/open-mmlab/mmpose/pull/427)) + +- Support face demo ([#443](https://github.com/open-mmlab/mmpose/pull/443)) + +- Support AIC dataset for bottom-up methods ([#438](https://github.com/open-mmlab/mmpose/pull/438), [#449](https://github.com/open-mmlab/mmpose/pull/449)) + +**Bug Fixes** + +- Fix multi-batch training ([#434](https://github.com/open-mmlab/mmpose/pull/434)) + +- Fix sigmas in AIC dataset ([#441](https://github.com/open-mmlab/mmpose/pull/441)) + +- Fix config file ([#420](https://github.com/open-mmlab/mmpose/pull/420)) + +**Breaking Changes** + +- Refactor Heads ([#382](https://github.com/open-mmlab/mmpose/pull/382)) + +**Improvements** + +- Update readme ([#409](https://github.com/open-mmlab/mmpose/pull/409), [#412](https://github.com/open-mmlab/mmpose/pull/412), [#415](https://github.com/open-mmlab/mmpose/pull/415), [#416](https://github.com/open-mmlab/mmpose/pull/416), [#419](https://github.com/open-mmlab/mmpose/pull/419), [#421](https://github.com/open-mmlab/mmpose/pull/421), [#422](https://github.com/open-mmlab/mmpose/pull/422), [#424](https://github.com/open-mmlab/mmpose/pull/424), [#425](https://github.com/open-mmlab/mmpose/pull/425), [#435](https://github.com/open-mmlab/mmpose/pull/435), [#436](https://github.com/open-mmlab/mmpose/pull/436), [#437](https://github.com/open-mmlab/mmpose/pull/437), [#444](https://github.com/open-mmlab/mmpose/pull/444), [#445](https://github.com/open-mmlab/mmpose/pull/445)) + +- Add GAP (global average pooling) neck ([#414](https://github.com/open-mmlab/mmpose/pull/414)) + +- Speed up ([#411](https://github.com/open-mmlab/mmpose/pull/411), [#423](https://github.com/open-mmlab/mmpose/pull/423)) + +- Support COCO test-dev test ([#433](https://github.com/open-mmlab/mmpose/pull/433)) + +## **v0.10.0 (31/12/2020)** + +**Highlights** + +1. Support more human pose estimation methods. + + 1. [UDP](https://arxiv.org/abs/1911.07524) + +2. Support pose tracking. + +3. Support multi-batch inference. + +4. Add some useful tools, including `analyze_logs`, `get_flops`, `print_config`. + +5. Support more backbone networks. + + 1. [ResNest](https://arxiv.org/pdf/2004.08955.pdf) + 2. [VGG](https://arxiv.org/abs/1409.1556) + +**New Features** + +- Support UDP ([#353](https://github.com/open-mmlab/mmpose/pull/353), [#371](https://github.com/open-mmlab/mmpose/pull/371), [#402](https://github.com/open-mmlab/mmpose/pull/402)) + +- Support multi-batch inference ([#390](https://github.com/open-mmlab/mmpose/pull/390)) + +- Support MHP dataset ([#386](https://github.com/open-mmlab/mmpose/pull/386)) + +- Support pose tracking demo ([#380](https://github.com/open-mmlab/mmpose/pull/380)) + +- Support mpii-trb demo ([#372](https://github.com/open-mmlab/mmpose/pull/372)) + +- Support mobilenet for hand pose estimation ([#377](https://github.com/open-mmlab/mmpose/pull/377)) + +- Support ResNest backbone ([#370](https://github.com/open-mmlab/mmpose/pull/370)) + +- Support VGG backbone ([#370](https://github.com/open-mmlab/mmpose/pull/370)) + +- Add some useful tools, including `analyze_logs`, `get_flops`, `print_config` ([#324](https://github.com/open-mmlab/mmpose/pull/324)) + +**Bug Fixes** + +- Fix bugs in pck evaluation ([#328](https://github.com/open-mmlab/mmpose/pull/328)) + +- Fix model download links in README ([#396](https://github.com/open-mmlab/mmpose/pull/396), [#397](https://github.com/open-mmlab/mmpose/pull/397)) + +- Fix CrowdPose annotations and update benchmarks ([#384](https://github.com/open-mmlab/mmpose/pull/384)) + +- Fix modelzoo stat ([#354](https://github.com/open-mmlab/mmpose/pull/354), [#360](https://github.com/open-mmlab/mmpose/pull/360), [#362](https://github.com/open-mmlab/mmpose/pull/362)) + +- Fix config files for aic datasets ([#340](https://github.com/open-mmlab/mmpose/pull/340)) + +**Breaking Changes** + +- Rename `image_thr` to `det_bbox_thr` for top-down methods. + +**Improvements** + +- Organize the readme files ([#398](https://github.com/open-mmlab/mmpose/pull/398), [#399](https://github.com/open-mmlab/mmpose/pull/399), [#400](https://github.com/open-mmlab/mmpose/pull/400)) + +- Check linting for markdown ([#379](https://github.com/open-mmlab/mmpose/pull/379)) + +- Add faq.md ([#350](https://github.com/open-mmlab/mmpose/pull/350)) + +- Remove PyTorch 1.4 in CI ([#338](https://github.com/open-mmlab/mmpose/pull/338)) + +- Add pypi badge in readme ([#329](https://github.com/open-mmlab/mmpose/pull/329)) + +## **v0.9.0 (30/11/2020)** + +**Highlights** + +1. Support more human pose estimation methods. + + 1. [MSPN](https://arxiv.org/abs/1901.00148) + 2. [RSN](https://arxiv.org/abs/2003.04030) + +2. Support video pose estimation datasets. + + 1. [sub-JHMDB](http://jhmdb.is.tue.mpg.de/dataset) + +3. Support Onnx model conversion. + +**New Features** + +- Support MSPN ([#278](https://github.com/open-mmlab/mmpose/pull/278)) + +- Support RSN ([#221](https://github.com/open-mmlab/mmpose/pull/221), [#318](https://github.com/open-mmlab/mmpose/pull/318)) + +- Support new post-processing method for MSPN & RSN ([#288](https://github.com/open-mmlab/mmpose/pull/288)) + +- Support sub-JHMDB dataset ([#292](https://github.com/open-mmlab/mmpose/pull/292)) + +- Support urls for pre-trained models in config files ([#232](https://github.com/open-mmlab/mmpose/pull/232)) + +- Support Onnx ([#305](https://github.com/open-mmlab/mmpose/pull/305)) + +**Bug Fixes** + +- Fix model download links in README ([#255](https://github.com/open-mmlab/mmpose/pull/255), [#315](https://github.com/open-mmlab/mmpose/pull/315)) + +**Breaking Changes** + +- `post_process=True|False` and `unbiased_decoding=True|False` are deprecated, use `post_process=None|default|unbiased` etc. instead ([#288](https://github.com/open-mmlab/mmpose/pull/288)) + +**Improvements** + +- Enrich the model zoo ([#256](https://github.com/open-mmlab/mmpose/pull/256), [#320](https://github.com/open-mmlab/mmpose/pull/320)) + +- Set the default map_location as 'cpu' to reduce gpu memory cost ([#227](https://github.com/open-mmlab/mmpose/pull/227)) + +- Support return heatmaps and backbone features for bottom-up models ([#229](https://github.com/open-mmlab/mmpose/pull/229)) + +- Upgrade mmcv maximum & minimum version ([#269](https://github.com/open-mmlab/mmpose/pull/269), [#313](https://github.com/open-mmlab/mmpose/pull/313)) + +- Automatically add modelzoo statistics to readthedocs ([#252](https://github.com/open-mmlab/mmpose/pull/252)) + +- Fix Pylint issues ([#258](https://github.com/open-mmlab/mmpose/pull/258), [#259](https://github.com/open-mmlab/mmpose/pull/259), [#260](https://github.com/open-mmlab/mmpose/pull/260), [#262](https://github.com/open-mmlab/mmpose/pull/262), [#265](https://github.com/open-mmlab/mmpose/pull/265), [#267](https://github.com/open-mmlab/mmpose/pull/267), [#268](https://github.com/open-mmlab/mmpose/pull/268), [#270](https://github.com/open-mmlab/mmpose/pull/270), [#271](https://github.com/open-mmlab/mmpose/pull/271), [#272](https://github.com/open-mmlab/mmpose/pull/272), [#273](https://github.com/open-mmlab/mmpose/pull/273), [#275](https://github.com/open-mmlab/mmpose/pull/275), [#276](https://github.com/open-mmlab/mmpose/pull/276), [#283](https://github.com/open-mmlab/mmpose/pull/283), [#285](https://github.com/open-mmlab/mmpose/pull/285), [#293](https://github.com/open-mmlab/mmpose/pull/293), [#294](https://github.com/open-mmlab/mmpose/pull/294), [#295](https://github.com/open-mmlab/mmpose/pull/295)) + +- Improve README ([#226](https://github.com/open-mmlab/mmpose/pull/226), [#257](https://github.com/open-mmlab/mmpose/pull/257), [#264](https://github.com/open-mmlab/mmpose/pull/264), [#280](https://github.com/open-mmlab/mmpose/pull/280), [#296](https://github.com/open-mmlab/mmpose/pull/296)) + +- Support PyTorch 1.7 in CI ([#274](https://github.com/open-mmlab/mmpose/pull/274)) + +- Add docs/tutorials for running demos ([#263](https://github.com/open-mmlab/mmpose/pull/263)) + +## **v0.8.0 (31/10/2020)** + +**Highlights** + +1. Support more human pose estimation datasets. + + 1. [CrowdPose](https://github.com/Jeff-sjtu/CrowdPose) + 2. [PoseTrack18](https://posetrack.net/) + +2. Support more 2D hand keypoint estimation datasets. + + 1. [InterHand2.6](https://github.com/facebookresearch/InterHand2.6M) + +3. Support adversarial training for 3D human shape recovery. + +4. Support multi-stage losses. + +5. Support mpii demo. + +**New Features** + +- Support [CrowdPose](https://github.com/Jeff-sjtu/CrowdPose) dataset ([#195](https://github.com/open-mmlab/mmpose/pull/195)) + +- Support [PoseTrack18](https://posetrack.net/) dataset ([#220](https://github.com/open-mmlab/mmpose/pull/220)) + +- Support [InterHand2.6](https://github.com/facebookresearch/InterHand2.6M) dataset ([#202](https://github.com/open-mmlab/mmpose/pull/202)) + +- Support adversarial training for 3D human shape recovery ([#192](https://github.com/open-mmlab/mmpose/pull/192)) + +- Support multi-stage losses ([#204](https://github.com/open-mmlab/mmpose/pull/204)) + +**Bug Fixes** + +- Fix config files ([#190](https://github.com/open-mmlab/mmpose/pull/190)) + +**Improvements** + +- Add mpii demo ([#216](https://github.com/open-mmlab/mmpose/pull/216)) + +- Improve README ([#181](https://github.com/open-mmlab/mmpose/pull/181), [#183](https://github.com/open-mmlab/mmpose/pull/183), [#208](https://github.com/open-mmlab/mmpose/pull/208)) + +- Support return heatmaps and backbone features ([#196](https://github.com/open-mmlab/mmpose/pull/196), [#212](https://github.com/open-mmlab/mmpose/pull/212)) + +- Support different return formats of mmdetection models ([#217](https://github.com/open-mmlab/mmpose/pull/217)) + +## **v0.7.0 (30/9/2020)** + +**Highlights** + +1. Support HMR for 3D human shape recovery. + +2. Support WholeBody human pose estimation. + + 1. [COCO-WholeBody](https://github.com/jin-s13/COCO-WholeBody) + +3. Support more 2D hand keypoint estimation datasets. + + 1. [Frei-hand](https://lmb.informatik.uni-freiburg.de/projects/freihand/) + 2. [CMU Panoptic HandDB](http://domedb.perception.cs.cmu.edu/handdb.html) + +4. Add more popular backbones & enrich the [modelzoo](https://mmpose.readthedocs.io/en/latest/model_zoo.html) + + 1. ShuffleNetv2 + +5. Support hand demo and whole-body demo. + +**New Features** + +- Support HMR for 3D human shape recovery ([#157](https://github.com/open-mmlab/mmpose/pull/157), [#160](https://github.com/open-mmlab/mmpose/pull/160), [#161](https://github.com/open-mmlab/mmpose/pull/161), [#162](https://github.com/open-mmlab/mmpose/pull/162)) + +- Support [COCO-WholeBody](https://github.com/jin-s13/COCO-WholeBody) dataset ([#133](https://github.com/open-mmlab/mmpose/pull/133)) + +- Support [Frei-hand](https://lmb.informatik.uni-freiburg.de/projects/freihand/) dataset ([#125](https://github.com/open-mmlab/mmpose/pull/125)) + +- Support [CMU Panoptic HandDB](http://domedb.perception.cs.cmu.edu/handdb.html) dataset ([#144](https://github.com/open-mmlab/mmpose/pull/144)) + +- Support H36M dataset ([#159](https://github.com/open-mmlab/mmpose/pull/159)) + +- Support ShuffleNetv2 ([#139](https://github.com/open-mmlab/mmpose/pull/139)) + +- Support saving best models based on key indicator ([#127](https://github.com/open-mmlab/mmpose/pull/127)) + +**Bug Fixes** + +- Fix typos in docs ([#121](https://github.com/open-mmlab/mmpose/pull/121)) + +- Fix assertion ([#142](https://github.com/open-mmlab/mmpose/pull/142)) + +**Improvements** + +- Add tools to transform .mat format to .json format ([#126](https://github.com/open-mmlab/mmpose/pull/126)) + +- Add hand demo ([#115](https://github.com/open-mmlab/mmpose/pull/115)) + +- Add whole-body demo ([#163](https://github.com/open-mmlab/mmpose/pull/163)) + +- Reuse mmcv utility function and update version files ([#135](https://github.com/open-mmlab/mmpose/pull/135), [#137](https://github.com/open-mmlab/mmpose/pull/137)) + +- Enrich the modelzoo ([#147](https://github.com/open-mmlab/mmpose/pull/147), [#169](https://github.com/open-mmlab/mmpose/pull/169)) + +- Improve docs ([#174](https://github.com/open-mmlab/mmpose/pull/174), [#175](https://github.com/open-mmlab/mmpose/pull/175), [#178](https://github.com/open-mmlab/mmpose/pull/178)) + +- Improve README ([#176](https://github.com/open-mmlab/mmpose/pull/176)) + +- Improve version.py ([#173](https://github.com/open-mmlab/mmpose/pull/173)) + +## **v0.6.0 (31/8/2020)** + +**Highlights** + +1. Add more popular backbones & enrich the [modelzoo](https://mmpose.readthedocs.io/en/latest/model_zoo.html) + + 1. ResNext + 2. SEResNet + 3. ResNetV1D + 4. MobileNetv2 + 5. ShuffleNetv1 + 6. CPM (Convolutional Pose Machine) + +2. Add more popular datasets: + + 1. [AIChallenger](https://arxiv.org/abs/1711.06475?context=cs.CV) + 2. [MPII](http://human-pose.mpi-inf.mpg.de/) + 3. [MPII-TRB](https://github.com/kennymckormick/Triplet-Representation-of-human-Body) + 4. [OCHuman](http://www.liruilong.cn/projects/pose2seg/index.html) + +3. Support 2d hand keypoint estimation. + + 1. [OneHand10K](https://www.yangangwang.com/papers/WANG-MCC-2018-10.html) + +4. Support bottom-up inference. + +**New Features** + +- Support [OneHand10K](https://www.yangangwang.com/papers/WANG-MCC-2018-10.html) dataset ([#52](https://github.com/open-mmlab/mmpose/pull/52)) + +- Support [MPII](http://human-pose.mpi-inf.mpg.de/) dataset ([#55](https://github.com/open-mmlab/mmpose/pull/55)) + +- Support [MPII-TRB](https://github.com/kennymckormick/Triplet-Representation-of-human-Body) dataset ([#19](https://github.com/open-mmlab/mmpose/pull/19), [#47](https://github.com/open-mmlab/mmpose/pull/47), [#48](https://github.com/open-mmlab/mmpose/pull/48)) + +- Support [OCHuman](http://www.liruilong.cn/projects/pose2seg/index.html) dataset ([#70](https://github.com/open-mmlab/mmpose/pull/70)) + +- Support [AIChallenger](https://arxiv.org/abs/1711.06475?context=cs.CV) dataset ([#87](https://github.com/open-mmlab/mmpose/pull/87)) + +- Support multiple backbones ([#26](https://github.com/open-mmlab/mmpose/pull/26)) + +- Support CPM model ([#56](https://github.com/open-mmlab/mmpose/pull/56)) + +**Bug Fixes** + +- Fix configs for MPII & MPII-TRB datasets ([#93](https://github.com/open-mmlab/mmpose/pull/93)) + +- Fix the bug of missing `test_pipeline` in configs ([#14](https://github.com/open-mmlab/mmpose/pull/14)) + +- Fix typos ([#27](https://github.com/open-mmlab/mmpose/pull/27), [#28](https://github.com/open-mmlab/mmpose/pull/28), [#50](https://github.com/open-mmlab/mmpose/pull/50), [#53](https://github.com/open-mmlab/mmpose/pull/53), [#63](https://github.com/open-mmlab/mmpose/pull/63)) + +**Improvements** + +- Update benchmark ([#93](https://github.com/open-mmlab/mmpose/pull/93)) + +- Add Dockerfile ([#44](https://github.com/open-mmlab/mmpose/pull/44)) + +- Improve unittest coverage and minor fix ([#18](https://github.com/open-mmlab/mmpose/pull/18)) + +- Support CPUs for train/val/demo ([#34](https://github.com/open-mmlab/mmpose/pull/34)) + +- Support bottom-up demo ([#69](https://github.com/open-mmlab/mmpose/pull/69)) + +- Add tools to publish model ([#62](https://github.com/open-mmlab/mmpose/pull/62)) + +- Enrich the modelzoo ([#64](https://github.com/open-mmlab/mmpose/pull/64), [#68](https://github.com/open-mmlab/mmpose/pull/68), [#82](https://github.com/open-mmlab/mmpose/pull/82)) + +## **v0.5.0 (21/7/2020)** + +**Highlights** + +- MMPose is released. + +**Main Features** + +- Support both top-down and bottom-up pose estimation approaches. + +- Achieve higher training efficiency and higher accuracy than other popular codebases (e.g. AlphaPose, HRNet) + +- Support various backbone models: ResNet, HRNet, SCNet, Houglass and HigherHRNet. diff --git a/docs/zh_cn/notes/ecosystem.md b/docs/zh_cn/notes/ecosystem.md index b0027cfa53..6ae3dd5aa6 100644 --- a/docs/zh_cn/notes/ecosystem.md +++ b/docs/zh_cn/notes/ecosystem.md @@ -1,3 +1,3 @@ -# Ecosystem - -Coming soon. +# Ecosystem + +Coming soon. diff --git a/docs/zh_cn/notes/projects.md b/docs/zh_cn/notes/projects.md index 460d8583bd..599c54055f 100644 --- a/docs/zh_cn/notes/projects.md +++ b/docs/zh_cn/notes/projects.md @@ -1,20 +1,20 @@ -# Projects based on MMPose - -There are many projects built upon MMPose. We list some of them as examples of how to extend MMPose for your own projects. As the page might not be completed, please feel free to create a PR to update this page. - -## Projects as an extension - -Some projects extend the boundary of MMPose for deployment or other research fields. They reveal the potential of what MMPose can do. We list several of them as below. - -- [Anime Face Detector](https://github.com/hysts/anime-face-detector): An anime face landmark detection toolbox. -- [PosePipeline](https://github.com/peabody124/PosePipeline): Open-Source Human Pose Estimation Pipeline for Clinical Research - -## Projects of papers - -There are also projects released with papers. Some of the papers are published in top-tier conferences (CVPR, ICCV, and ECCV), the others are also highly influential. We list some of these works as a reference for the community to develop and compare new pose estimation algorithms. Methods already supported and maintained by MMPose are not listed. - -- Pose for Everything: Towards Category-Agnostic Pose Estimation, ECCV 2022. [\[paper\]](https://arxiv.org/abs/2207.10387)[\[github\]](https://github.com/luminxu/Pose-for-Everything) -- UniFormer: Unified Transformer for Efficient Spatiotemporal Representation Learning, ICLR 2022. [\[paper\]](https://arxiv.org/abs/2201.04676)[\[github\]](https://github.com/Sense-X/UniFormer) -- Poseur:Direct Human Pose Regression with Transformers, ECCV 2022. [\[paper\]](https://arxiv.org/abs/2201.07412)[\[github\]](https://github.com/aim-uofa/Poseur) -- ViTAEv2: Vision Transformer Advanced by Exploring Inductive Bias for Image Recognition and Beyond, NeurIPS 2022. [\[paper\]](https://arxiv.org/abs/2106.03348)[\[github\]](https://github.com/ViTAE-Transformer/ViTAE-Transformer) -- Dite-HRNet:Dynamic Lightweight High-Resolution Network for Human Pose Estimation, IJCAI-ECAI 2021. [\[paper\]](https://arxiv.org/abs/2204.10762)[\[github\]](https://github.com/ZiyiZhang27/Dite-HRNet) +# Projects based on MMPose + +There are many projects built upon MMPose. We list some of them as examples of how to extend MMPose for your own projects. As the page might not be completed, please feel free to create a PR to update this page. + +## Projects as an extension + +Some projects extend the boundary of MMPose for deployment or other research fields. They reveal the potential of what MMPose can do. We list several of them as below. + +- [Anime Face Detector](https://github.com/hysts/anime-face-detector): An anime face landmark detection toolbox. +- [PosePipeline](https://github.com/peabody124/PosePipeline): Open-Source Human Pose Estimation Pipeline for Clinical Research + +## Projects of papers + +There are also projects released with papers. Some of the papers are published in top-tier conferences (CVPR, ICCV, and ECCV), the others are also highly influential. We list some of these works as a reference for the community to develop and compare new pose estimation algorithms. Methods already supported and maintained by MMPose are not listed. + +- Pose for Everything: Towards Category-Agnostic Pose Estimation, ECCV 2022. [\[paper\]](https://arxiv.org/abs/2207.10387)[\[github\]](https://github.com/luminxu/Pose-for-Everything) +- UniFormer: Unified Transformer for Efficient Spatiotemporal Representation Learning, ICLR 2022. [\[paper\]](https://arxiv.org/abs/2201.04676)[\[github\]](https://github.com/Sense-X/UniFormer) +- Poseur:Direct Human Pose Regression with Transformers, ECCV 2022. [\[paper\]](https://arxiv.org/abs/2201.07412)[\[github\]](https://github.com/aim-uofa/Poseur) +- ViTAEv2: Vision Transformer Advanced by Exploring Inductive Bias for Image Recognition and Beyond, NeurIPS 2022. [\[paper\]](https://arxiv.org/abs/2106.03348)[\[github\]](https://github.com/ViTAE-Transformer/ViTAE-Transformer) +- Dite-HRNet:Dynamic Lightweight High-Resolution Network for Human Pose Estimation, IJCAI-ECAI 2021. [\[paper\]](https://arxiv.org/abs/2204.10762)[\[github\]](https://github.com/ZiyiZhang27/Dite-HRNet) diff --git a/docs/zh_cn/notes/pytorch_2.md b/docs/zh_cn/notes/pytorch_2.md index 4892e554a5..932f9b0734 100644 --- a/docs/zh_cn/notes/pytorch_2.md +++ b/docs/zh_cn/notes/pytorch_2.md @@ -1,14 +1,14 @@ -# PyTorch 2.0 Compatibility and Benchmarks - -MMPose 1.0.0 is now compatible with PyTorch 2.0, ensuring that users can leverage the latest features and performance improvements offered by the PyTorch 2.0 framework when using MMPose. With the integration of inductor, users can expect faster model speeds. The table below shows several example models: - -| Model | Training Speed | Memory | -| :-------- | :---------------------: | :-----------: | -| ViTPose-B | 29.6% ↑ (0.931 → 0.655) | 10586 → 10663 | -| ViTPose-S | 33.7% ↑ (0.563 → 0.373) | 6091 → 6170 | -| HRNet-w32 | 12.8% ↑ (0.553 → 0.482) | 9849 → 10145 | -| HRNet-w48 | 37.1% ↑ (0.437 → 0.275) | 7319 → 7394 | -| RTMPose-t | 6.3% ↑ (1.533 → 1.437) | 6292 → 6489 | -| RTMPose-s | 13.1% ↑ (1.645 → 1.430) | 9013 → 9208 | - -- Pytorch 2.0 test, add projects doc and refactor by @LareinaM in [PR#2136](https://github.com/open-mmlab/mmpose/pull/2136) +# PyTorch 2.0 Compatibility and Benchmarks + +MMPose 1.0.0 is now compatible with PyTorch 2.0, ensuring that users can leverage the latest features and performance improvements offered by the PyTorch 2.0 framework when using MMPose. With the integration of inductor, users can expect faster model speeds. The table below shows several example models: + +| Model | Training Speed | Memory | +| :-------- | :---------------------: | :-----------: | +| ViTPose-B | 29.6% ↑ (0.931 → 0.655) | 10586 → 10663 | +| ViTPose-S | 33.7% ↑ (0.563 → 0.373) | 6091 → 6170 | +| HRNet-w32 | 12.8% ↑ (0.553 → 0.482) | 9849 → 10145 | +| HRNet-w48 | 37.1% ↑ (0.437 → 0.275) | 7319 → 7394 | +| RTMPose-t | 6.3% ↑ (1.533 → 1.437) | 6292 → 6489 | +| RTMPose-s | 13.1% ↑ (1.645 → 1.430) | 9013 → 9208 | + +- Pytorch 2.0 test, add projects doc and refactor by @LareinaM in [PR#2136](https://github.com/open-mmlab/mmpose/pull/2136) diff --git a/docs/zh_cn/overview.md b/docs/zh_cn/overview.md index a790cd3be2..f6b75ffd48 100644 --- a/docs/zh_cn/overview.md +++ b/docs/zh_cn/overview.md @@ -1,76 +1,76 @@ -# 概述 - -本章将向你介绍 MMPose 的整体框架,并提供详细的教程链接。 - -## 什么是 MMPose - -![overview](https://user-images.githubusercontent.com/13503330/191004511-508d3ec6-9ead-4c52-a522-4d9aa1f26027.png) - -MMPose 是一款基于 Pytorch 的姿态估计开源工具箱,是 OpenMMLab 项目的成员之一,包含了丰富的 2D 多人姿态估计、2D 手部姿态估计、2D 人脸关键点检测、133关键点全身人体姿态估计、动物关键点检测、服饰关键点检测等算法以及相关的组件和模块,下面是它的整体框架: - -MMPose 由 **8** 个主要部分组成,apis、structures、datasets、codecs、models、engine、evaluation 和 visualization。 - -- **apis** 提供用于模型推理的高级 API - -- **structures** 提供 bbox、keypoint 和 PoseDataSample 等数据结构 - -- **datasets** 支持用于姿态估计的各种数据集 - - - **transforms** 包含各种数据增强变换 - -- **codecs** 提供姿态编解码器:编码器用于将姿态信息(通常为关键点坐标)编码为模型学习目标(如热力图),解码器则用于将模型输出解码为姿态估计结果 - -- **models** 以模块化结构提供了姿态估计模型的各类组件 - - - **pose_estimators** 定义了所有姿态估计模型类 - - **data_preprocessors** 用于预处理模型的输入数据 - - **backbones** 包含各种骨干网络 - - **necks** 包含各种模型颈部组件 - - **heads** 包含各种模型头部 - - **losses** 包含各种损失函数 - -- **engine** 包含与姿态估计任务相关的运行时组件 - - - **hooks** 提供运行时的各种钩子 - -- **evaluation** 提供各种评估模型性能的指标 - -- **visualization** 用于可视化关键点骨架和热力图等信息 - -## 如何使用本指南 - -针对不同类型的用户,我们准备了详细的指南: - -1. 安装说明: - - - [安装](./installation.md) - -2. MMPose 的基本使用方法: - - - [20 分钟上手教程](./guide_to_framework.md) - - [Demos](./demos.md) - - [模型推理](./user_guides/inference.md) - - [配置文件](./user_guides/configs.md) - - [准备数据集](./user_guides/prepare_datasets.md) - - [训练与测试](./user_guides/train_and_test.md) - -3. 对于希望基于 MMPose 进行开发的研究者和开发者: - - - [编解码器](./advanced_guides/codecs.md) - - [数据流](./advanced_guides/dataflow.md) - - [实现新模型](./advanced_guides/implement_new_models.md) - - [自定义数据集](./advanced_guides/customize_datasets.md) - - [自定义数据变换](./advanced_guides/customize_transforms.md) - - [自定义优化器](./advanced_guides/customize_optimizer.md) - - [自定义日志](./advanced_guides/customize_logging.md) - - [模型部署](./advanced_guides/how_to_deploy.md) - - [模型分析工具](./advanced_guides/model_analysis.md) - - [迁移指南](./migration.md) - -4. 对于希望加入开源社区,向 MMPose 贡献代码的研究者和开发者: - - - [参与贡献代码](./contribution_guide.md) - -5. 对于使用过程中的常见问题: - - - [FAQ](./faq.md) +# 概述 + +本章将向你介绍 MMPose 的整体框架,并提供详细的教程链接。 + +## 什么是 MMPose + +![overview](https://user-images.githubusercontent.com/13503330/191004511-508d3ec6-9ead-4c52-a522-4d9aa1f26027.png) + +MMPose 是一款基于 Pytorch 的姿态估计开源工具箱,是 OpenMMLab 项目的成员之一,包含了丰富的 2D 多人姿态估计、2D 手部姿态估计、2D 人脸关键点检测、133关键点全身人体姿态估计、动物关键点检测、服饰关键点检测等算法以及相关的组件和模块,下面是它的整体框架: + +MMPose 由 **8** 个主要部分组成,apis、structures、datasets、codecs、models、engine、evaluation 和 visualization。 + +- **apis** 提供用于模型推理的高级 API + +- **structures** 提供 bbox、keypoint 和 PoseDataSample 等数据结构 + +- **datasets** 支持用于姿态估计的各种数据集 + + - **transforms** 包含各种数据增强变换 + +- **codecs** 提供姿态编解码器:编码器用于将姿态信息(通常为关键点坐标)编码为模型学习目标(如热力图),解码器则用于将模型输出解码为姿态估计结果 + +- **models** 以模块化结构提供了姿态估计模型的各类组件 + + - **pose_estimators** 定义了所有姿态估计模型类 + - **data_preprocessors** 用于预处理模型的输入数据 + - **backbones** 包含各种骨干网络 + - **necks** 包含各种模型颈部组件 + - **heads** 包含各种模型头部 + - **losses** 包含各种损失函数 + +- **engine** 包含与姿态估计任务相关的运行时组件 + + - **hooks** 提供运行时的各种钩子 + +- **evaluation** 提供各种评估模型性能的指标 + +- **visualization** 用于可视化关键点骨架和热力图等信息 + +## 如何使用本指南 + +针对不同类型的用户,我们准备了详细的指南: + +1. 安装说明: + + - [安装](./installation.md) + +2. MMPose 的基本使用方法: + + - [20 分钟上手教程](./guide_to_framework.md) + - [Demos](./demos.md) + - [模型推理](./user_guides/inference.md) + - [配置文件](./user_guides/configs.md) + - [准备数据集](./user_guides/prepare_datasets.md) + - [训练与测试](./user_guides/train_and_test.md) + +3. 对于希望基于 MMPose 进行开发的研究者和开发者: + + - [编解码器](./advanced_guides/codecs.md) + - [数据流](./advanced_guides/dataflow.md) + - [实现新模型](./advanced_guides/implement_new_models.md) + - [自定义数据集](./advanced_guides/customize_datasets.md) + - [自定义数据变换](./advanced_guides/customize_transforms.md) + - [自定义优化器](./advanced_guides/customize_optimizer.md) + - [自定义日志](./advanced_guides/customize_logging.md) + - [模型部署](./advanced_guides/how_to_deploy.md) + - [模型分析工具](./advanced_guides/model_analysis.md) + - [迁移指南](./migration.md) + +4. 对于希望加入开源社区,向 MMPose 贡献代码的研究者和开发者: + + - [参与贡献代码](./contribution_guide.md) + +5. 对于使用过程中的常见问题: + + - [FAQ](./faq.md) diff --git a/docs/zh_cn/quick_run.md b/docs/zh_cn/quick_run.md index 55c2d63b20..b81b3652f6 100644 --- a/docs/zh_cn/quick_run.md +++ b/docs/zh_cn/quick_run.md @@ -1,188 +1,188 @@ -# 快速上手 - -在这一章里,我们将带领你走过MMPose工作流程中关键的七个步骤,帮助你快速上手: - -1. 使用预训练模型进行推理 -2. 准备数据集 -3. 准备配置文件 -4. 可视化训练图片 -5. 训练 -6. 测试 -7. 可视化 - -## 安装 - -请查看[安装指南](./installation.md),以了解完整步骤。 - -## 快速开始 - -### 使用预训练模型进行推理 - -你可以通过以下命令来使用预训练模型对单张图片进行识别: - -```Bash -python demo/image_demo.py \ - tests/data/coco/000000000785.jpg \ - configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-8xb64-210e_coco-256x192.py\ - https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res50_coco_256x192_rle-2ea9bb4a_20220616.pth -``` - -该命令中用到了测试图片、完整的配置文件、预训练模型,如果MMPose安装无误,将会弹出一个新窗口,对检测结果进行可视化显示: - -![inference_demo](https://user-images.githubusercontent.com/13503330/187112344-0c5062f2-689c-445c-a259-d5d4311e2497.png) - -更多演示脚本的详细参数说明可以在 [模型推理](./user_guides/inference.md) 中找到。 - -### 准备数据集 - -MMPose支持各种不同的任务,我们提供了对应的数据集准备教程。 - -- [2D人体关键点](./dataset_zoo/2d_body_keypoint.md) - -- [3D人体关键点](./dataset_zoo/3d_body_keypoint.md) - -- [2D人手关键点](./dataset_zoo/2d_hand_keypoint.md) - -- [3D人手关键点](./dataset_zoo/3d_hand_keypoint.md) - -- [2D人脸关键点](./dataset_zoo/2d_face_keypoint.md) - -- [2D全身人体关键点](./dataset_zoo/2d_wholebody_keypoint.md) - -- [2D服饰关键点](./dataset_zoo/2d_fashion_landmark.md) - -- [2D动物关键点](./dataset_zoo/2d_animal_keypoint.md) - -你可以在【2D人体关键点数据集】>【COCO】下找到COCO数据集的准备教程,并按照教程完成数据集的下载和整理。 - -```{note} -在MMPose中,我们建议将COCO数据集存放到新建的 `$MMPOSE/data` 目录下。 -``` - -### 准备配置文件 - -MMPose拥有一套强大的配置系统,用于管理训练所需的一系列必要参数: - -- **通用**:环境、Hook、Checkpoint、Logger、Timer等 - -- **数据**:Dataset、Dataloader、数据增强等 - -- **训练**:优化器、学习率调整等 - -- **模型**:Backbone、Neck、Head、损失函数等 - -- **评测**:Metrics - -在`$MMPOSE/configs`目录下,我们提供了大量前沿论文方法的配置文件,可供直接使用和参考。 - -要在COCO数据集上训练基于ResNet50的RLE模型时,所需的配置文件为: - -```Bash -$MMPOSE/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-8xb64-210e_coco-256x192.py -``` - -我们需要将配置文件中的 data_root 变量修改为COCO数据集存放路径: - -```Python -data_root = 'data/coco' -``` - -```{note} -感兴趣的读者也可以查阅 [配置文件](./user_guides/configs.md) 来进一步学习MMPose所使用的配置系统。 -``` - -### 可视化训练图片 - -在开始训练之前,我们还可以对训练图片进行可视化,检查训练图片是否正确进行了数据增强。 - -我们提供了相应的可视化脚本: - -```Bash -python tools/misc/browse_dastaset.py \ - configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-8xb64-210e_coco-256x192.py \ - --mode transformed -``` - -![transformed_training_img](https://user-images.githubusercontent.com/13503330/187112376-e604edcb-46cc-4995-807b-e8f204f991b0.png) - -### 训练 - -确定数据无误后,运行以下命令启动训练: - -```Bash -python tools/train.py configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-8xb64-210e_coco-256x192.py -``` - -```{note} -MMPose中集成了大量实用训练trick和功能: - -- 学习率warmup和scheduling - -- ImageNet预训练权重 - -- 自动学习率缩放、自动batch size缩放 - -- CPU训练、多机多卡训练、集群训练 - -- HardDisk、LMDB、Petrel、HTTP等不同数据后端 - -- 混合精度浮点训练 - -- TensorBoard -``` - -### 测试 - -在不指定额外参数时,训练的权重和日志信息会默认存储到`$MMPOSE/work_dirs`目录下,最优的模型权重存放在`$MMPOSE/work_dir/best_coco`目录下。 - -我们可以通过如下指令测试模型在COCO验证集上的精度: - -```Bash -python tools/test.py \ - configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-8xb64-210e_coco-256x192.py \ - work_dir/best_coco/AP_epoch_20.pth -``` - -在COCO验证集上评测结果样例如下: - -```Bash - Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets= 20 ] = 0.704 - Average Precision (AP) @[ IoU=0.50 | area= all | maxDets= 20 ] = 0.883 - Average Precision (AP) @[ IoU=0.75 | area= all | maxDets= 20 ] = 0.777 - Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets= 20 ] = 0.667 - Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets= 20 ] = 0.769 - Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 20 ] = 0.751 - Average Recall (AR) @[ IoU=0.50 | area= all | maxDets= 20 ] = 0.920 - Average Recall (AR) @[ IoU=0.75 | area= all | maxDets= 20 ] = 0.815 - Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets= 20 ] = 0.709 - Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets= 20 ] = 0.811 -08/23 12:04:42 - mmengine - INFO - Epoch(test) [3254/3254] coco/AP: 0.704168 coco/AP .5: 0.883134 coco/AP .75: 0.777015 coco/AP (M): 0.667207 coco/AP (L): 0.768644 coco/AR: 0.750913 coco/AR .5: 0.919710 coco/AR .75: 0.815334 coco/AR (M): 0.709232 coco/AR (L): 0.811334 -``` - -```{note} -如果需要测试模型在其他数据集上的表现,可以前往 [训练与测试](./user_guides/train_and_test.md) 查看。 -``` - -### 可视化 - -除了对关键点骨架的可视化以外,我们还支持对热度图进行可视化,你只需要在配置文件中设置`output_heatmap=True`: - -```Python -model = dict( - ## 内容省略 - test_cfg = dict( - ## 内容省略 - output_heatmaps=True - ) -) -``` - -或在命令行中添加`--cfg-options='model.test_cfg.output_heatmaps=True'`。 - -可视化效果如下: - -![vis_pred](https://user-images.githubusercontent.com/26127467/187578902-30ef7bb0-9a93-4e03-bae0-02aeccf7f689.jpg) - -```{note} -如果你希望深入地学习MMPose,将其应用到自己的项目当中,我们准备了一份详细的 [迁移指南](./migration.md) 。 -``` +# 快速上手 + +在这一章里,我们将带领你走过MMPose工作流程中关键的七个步骤,帮助你快速上手: + +1. 使用预训练模型进行推理 +2. 准备数据集 +3. 准备配置文件 +4. 可视化训练图片 +5. 训练 +6. 测试 +7. 可视化 + +## 安装 + +请查看[安装指南](./installation.md),以了解完整步骤。 + +## 快速开始 + +### 使用预训练模型进行推理 + +你可以通过以下命令来使用预训练模型对单张图片进行识别: + +```Bash +python demo/image_demo.py \ + tests/data/coco/000000000785.jpg \ + configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-8xb64-210e_coco-256x192.py\ + https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res50_coco_256x192_rle-2ea9bb4a_20220616.pth +``` + +该命令中用到了测试图片、完整的配置文件、预训练模型,如果MMPose安装无误,将会弹出一个新窗口,对检测结果进行可视化显示: + +![inference_demo](https://user-images.githubusercontent.com/13503330/187112344-0c5062f2-689c-445c-a259-d5d4311e2497.png) + +更多演示脚本的详细参数说明可以在 [模型推理](./user_guides/inference.md) 中找到。 + +### 准备数据集 + +MMPose支持各种不同的任务,我们提供了对应的数据集准备教程。 + +- [2D人体关键点](./dataset_zoo/2d_body_keypoint.md) + +- [3D人体关键点](./dataset_zoo/3d_body_keypoint.md) + +- [2D人手关键点](./dataset_zoo/2d_hand_keypoint.md) + +- [3D人手关键点](./dataset_zoo/3d_hand_keypoint.md) + +- [2D人脸关键点](./dataset_zoo/2d_face_keypoint.md) + +- [2D全身人体关键点](./dataset_zoo/2d_wholebody_keypoint.md) + +- [2D服饰关键点](./dataset_zoo/2d_fashion_landmark.md) + +- [2D动物关键点](./dataset_zoo/2d_animal_keypoint.md) + +你可以在【2D人体关键点数据集】>【COCO】下找到COCO数据集的准备教程,并按照教程完成数据集的下载和整理。 + +```{note} +在MMPose中,我们建议将COCO数据集存放到新建的 `$MMPOSE/data` 目录下。 +``` + +### 准备配置文件 + +MMPose拥有一套强大的配置系统,用于管理训练所需的一系列必要参数: + +- **通用**:环境、Hook、Checkpoint、Logger、Timer等 + +- **数据**:Dataset、Dataloader、数据增强等 + +- **训练**:优化器、学习率调整等 + +- **模型**:Backbone、Neck、Head、损失函数等 + +- **评测**:Metrics + +在`$MMPOSE/configs`目录下,我们提供了大量前沿论文方法的配置文件,可供直接使用和参考。 + +要在COCO数据集上训练基于ResNet50的RLE模型时,所需的配置文件为: + +```Bash +$MMPOSE/configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-8xb64-210e_coco-256x192.py +``` + +我们需要将配置文件中的 data_root 变量修改为COCO数据集存放路径: + +```Python +data_root = 'data/coco' +``` + +```{note} +感兴趣的读者也可以查阅 [配置文件](./user_guides/configs.md) 来进一步学习MMPose所使用的配置系统。 +``` + +### 可视化训练图片 + +在开始训练之前,我们还可以对训练图片进行可视化,检查训练图片是否正确进行了数据增强。 + +我们提供了相应的可视化脚本: + +```Bash +python tools/misc/browse_dastaset.py \ + configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-8xb64-210e_coco-256x192.py \ + --mode transformed +``` + +![transformed_training_img](https://user-images.githubusercontent.com/13503330/187112376-e604edcb-46cc-4995-807b-e8f204f991b0.png) + +### 训练 + +确定数据无误后,运行以下命令启动训练: + +```Bash +python tools/train.py configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-8xb64-210e_coco-256x192.py +``` + +```{note} +MMPose中集成了大量实用训练trick和功能: + +- 学习率warmup和scheduling + +- ImageNet预训练权重 + +- 自动学习率缩放、自动batch size缩放 + +- CPU训练、多机多卡训练、集群训练 + +- HardDisk、LMDB、Petrel、HTTP等不同数据后端 + +- 混合精度浮点训练 + +- TensorBoard +``` + +### 测试 + +在不指定额外参数时,训练的权重和日志信息会默认存储到`$MMPOSE/work_dirs`目录下,最优的模型权重存放在`$MMPOSE/work_dir/best_coco`目录下。 + +我们可以通过如下指令测试模型在COCO验证集上的精度: + +```Bash +python tools/test.py \ + configs/body_2d_keypoint/topdown_regression/coco/td-reg_res50_rle-8xb64-210e_coco-256x192.py \ + work_dir/best_coco/AP_epoch_20.pth +``` + +在COCO验证集上评测结果样例如下: + +```Bash + Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets= 20 ] = 0.704 + Average Precision (AP) @[ IoU=0.50 | area= all | maxDets= 20 ] = 0.883 + Average Precision (AP) @[ IoU=0.75 | area= all | maxDets= 20 ] = 0.777 + Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets= 20 ] = 0.667 + Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets= 20 ] = 0.769 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 20 ] = 0.751 + Average Recall (AR) @[ IoU=0.50 | area= all | maxDets= 20 ] = 0.920 + Average Recall (AR) @[ IoU=0.75 | area= all | maxDets= 20 ] = 0.815 + Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets= 20 ] = 0.709 + Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets= 20 ] = 0.811 +08/23 12:04:42 - mmengine - INFO - Epoch(test) [3254/3254] coco/AP: 0.704168 coco/AP .5: 0.883134 coco/AP .75: 0.777015 coco/AP (M): 0.667207 coco/AP (L): 0.768644 coco/AR: 0.750913 coco/AR .5: 0.919710 coco/AR .75: 0.815334 coco/AR (M): 0.709232 coco/AR (L): 0.811334 +``` + +```{note} +如果需要测试模型在其他数据集上的表现,可以前往 [训练与测试](./user_guides/train_and_test.md) 查看。 +``` + +### 可视化 + +除了对关键点骨架的可视化以外,我们还支持对热度图进行可视化,你只需要在配置文件中设置`output_heatmap=True`: + +```Python +model = dict( + ## 内容省略 + test_cfg = dict( + ## 内容省略 + output_heatmaps=True + ) +) +``` + +或在命令行中添加`--cfg-options='model.test_cfg.output_heatmaps=True'`。 + +可视化效果如下: + +![vis_pred](https://user-images.githubusercontent.com/26127467/187578902-30ef7bb0-9a93-4e03-bae0-02aeccf7f689.jpg) + +```{note} +如果你希望深入地学习MMPose,将其应用到自己的项目当中,我们准备了一份详细的 [迁移指南](./migration.md) 。 +``` diff --git a/docs/zh_cn/stats.py b/docs/zh_cn/stats.py index 218d23f5b0..5b2b4af1a0 100644 --- a/docs/zh_cn/stats.py +++ b/docs/zh_cn/stats.py @@ -1,176 +1,176 @@ -#!/usr/bin/env python -# Copyright (c) OpenMMLab. All rights reserved. -import functools as func -import glob -import re -from os.path import basename, splitext - -import numpy as np -import titlecase - - -def anchor(name): - return re.sub(r'-+', '-', re.sub(r'[^a-zA-Z0-9]', '-', - name.strip().lower())).strip('-') - - -# Count algorithms - -files = sorted(glob.glob('model_zoo/*.md')) - -stats = [] - -for f in files: - with open(f, 'r') as content_file: - content = content_file.read() - - # title - title = content.split('\n')[0].replace('#', '') - - # count papers - papers = set( - (papertype, titlecase.titlecase(paper.lower().strip())) - for (papertype, paper) in re.findall( - r'\s*\n.*?\btitle\s*=\s*{(.*?)}', - content, re.DOTALL)) - # paper links - revcontent = '\n'.join(list(reversed(content.splitlines()))) - paperlinks = {} - for _, p in papers: - # print(p) - paperlinks[p] = ', '.join( - ((f'[{paperlink} ⇨]' - f'(model_zoo/{splitext(basename(f))[0]}.html#' - f'{anchor(paperlink)})') for paperlink in re.findall( - rf'\btitle\s*=\s*{{\s*{p}\s*}}.*?\n### (.*?)\s*[,;]?\s*\n', - revcontent, re.DOTALL | re.IGNORECASE))) - # print(' ', paperlinks[p]) - paperlist = '\n'.join( - sorted(f' - [{t}] {x} ({paperlinks[x]})' for t, x in papers)) - # count configs - configs = set(x.lower().strip() - for x in re.findall(r'.*configs/.*\.py', content)) - - # count ckpts - ckpts = set(x.lower().strip() - for x in re.findall(r'https://download.*\.pth', content) - if 'mmpose' in x) - - statsmsg = f""" -## [{title}]({f}) - -* 模型权重文件数量: {len(ckpts)} -* 配置文件数量: {len(configs)} -* 论文数量: {len(papers)} -{paperlist} - - """ - - stats.append((papers, configs, ckpts, statsmsg)) - -allpapers = func.reduce(lambda a, b: a.union(b), [p for p, _, _, _ in stats]) -allconfigs = func.reduce(lambda a, b: a.union(b), [c for _, c, _, _ in stats]) -allckpts = func.reduce(lambda a, b: a.union(b), [c for _, _, c, _ in stats]) - -# Summarize - -msglist = '\n'.join(x for _, _, _, x in stats) -papertypes, papercounts = np.unique([t for t, _ in allpapers], - return_counts=True) -countstr = '\n'.join( - [f' - {t}: {c}' for t, c in zip(papertypes, papercounts)]) - -modelzoo = f""" -# 概览 - -* 模型权重文件数量: {len(allckpts)} -* 配置文件数量: {len(allconfigs)} -* 论文数量: {len(allpapers)} -{countstr} - -已支持的数据集详细信息请见 [数据集](dataset_zoo.md). - -{msglist} - -""" - -with open('model_zoo.md', 'w') as f: - f.write(modelzoo) - -# Count datasets - -files = sorted(glob.glob('model_zoo/*.md')) -# files = sorted(glob.glob('docs/tasks/*.md')) - -datastats = [] - -for f in files: - with open(f, 'r') as content_file: - content = content_file.read() - - # title - title = content.split('\n')[0].replace('#', '') - - # count papers - papers = set( - (papertype, titlecase.titlecase(paper.lower().strip())) - for (papertype, paper) in re.findall( - r'\s*\n.*?\btitle\s*=\s*{(.*?)}', - content, re.DOTALL)) - # paper links - revcontent = '\n'.join(list(reversed(content.splitlines()))) - paperlinks = {} - for _, p in papers: - # print(p) - paperlinks[p] = ', '.join( - (f'[{p} ⇨](model_zoo/{splitext(basename(f))[0]}.html#' - f'{anchor(p)})' for p in re.findall( - rf'\btitle\s*=\s*{{\s*{p}\s*}}.*?\n## (.*?)\s*[,;]?\s*\n', - revcontent, re.DOTALL | re.IGNORECASE))) - # print(' ', paperlinks[p]) - paperlist = '\n'.join( - sorted(f' - [{t}] {x} ({paperlinks[x]})' for t, x in papers)) - # count configs - configs = set(x.lower().strip() - for x in re.findall(r'https.*configs/.*\.py', content)) - - # count ckpts - ckpts = set(x.lower().strip() - for x in re.findall(r'https://download.*\.pth', content) - if 'mmpose' in x) - - statsmsg = f""" -## [{title}]({f}) - -* 论文数量: {len(papers)} -{paperlist} - - """ - - datastats.append((papers, configs, ckpts, statsmsg)) - -alldatapapers = func.reduce(lambda a, b: a.union(b), - [p for p, _, _, _ in datastats]) - -# Summarize - -msglist = '\n'.join(x for _, _, _, x in stats) -datamsglist = '\n'.join(x for _, _, _, x in datastats) -papertypes, papercounts = np.unique([t for t, _ in alldatapapers], - return_counts=True) -countstr = '\n'.join( - [f' - {t}: {c}' for t, c in zip(papertypes, papercounts)]) - -dataset_zoo = f""" -# 概览 - -* 论文数量: {len(alldatapapers)} -{countstr} - -已支持的算法详细信息请见 [模型池](model_zoo.md). - -{datamsglist} -""" - -with open('dataset_zoo.md', 'w') as f: - f.write(dataset_zoo) +#!/usr/bin/env python +# Copyright (c) OpenMMLab. All rights reserved. +import functools as func +import glob +import re +from os.path import basename, splitext + +import numpy as np +import titlecase + + +def anchor(name): + return re.sub(r'-+', '-', re.sub(r'[^a-zA-Z0-9]', '-', + name.strip().lower())).strip('-') + + +# Count algorithms + +files = sorted(glob.glob('model_zoo/*.md')) + +stats = [] + +for f in files: + with open(f, 'r') as content_file: + content = content_file.read() + + # title + title = content.split('\n')[0].replace('#', '') + + # count papers + papers = set( + (papertype, titlecase.titlecase(paper.lower().strip())) + for (papertype, paper) in re.findall( + r'\s*\n.*?\btitle\s*=\s*{(.*?)}', + content, re.DOTALL)) + # paper links + revcontent = '\n'.join(list(reversed(content.splitlines()))) + paperlinks = {} + for _, p in papers: + # print(p) + paperlinks[p] = ', '.join( + ((f'[{paperlink} ⇨]' + f'(model_zoo/{splitext(basename(f))[0]}.html#' + f'{anchor(paperlink)})') for paperlink in re.findall( + rf'\btitle\s*=\s*{{\s*{p}\s*}}.*?\n### (.*?)\s*[,;]?\s*\n', + revcontent, re.DOTALL | re.IGNORECASE))) + # print(' ', paperlinks[p]) + paperlist = '\n'.join( + sorted(f' - [{t}] {x} ({paperlinks[x]})' for t, x in papers)) + # count configs + configs = set(x.lower().strip() + for x in re.findall(r'.*configs/.*\.py', content)) + + # count ckpts + ckpts = set(x.lower().strip() + for x in re.findall(r'https://download.*\.pth', content) + if 'mmpose' in x) + + statsmsg = f""" +## [{title}]({f}) + +* 模型权重文件数量: {len(ckpts)} +* 配置文件数量: {len(configs)} +* 论文数量: {len(papers)} +{paperlist} + + """ + + stats.append((papers, configs, ckpts, statsmsg)) + +allpapers = func.reduce(lambda a, b: a.union(b), [p for p, _, _, _ in stats]) +allconfigs = func.reduce(lambda a, b: a.union(b), [c for _, c, _, _ in stats]) +allckpts = func.reduce(lambda a, b: a.union(b), [c for _, _, c, _ in stats]) + +# Summarize + +msglist = '\n'.join(x for _, _, _, x in stats) +papertypes, papercounts = np.unique([t for t, _ in allpapers], + return_counts=True) +countstr = '\n'.join( + [f' - {t}: {c}' for t, c in zip(papertypes, papercounts)]) + +modelzoo = f""" +# 概览 + +* 模型权重文件数量: {len(allckpts)} +* 配置文件数量: {len(allconfigs)} +* 论文数量: {len(allpapers)} +{countstr} + +已支持的数据集详细信息请见 [数据集](dataset_zoo.md). + +{msglist} + +""" + +with open('model_zoo.md', 'w') as f: + f.write(modelzoo) + +# Count datasets + +files = sorted(glob.glob('model_zoo/*.md')) +# files = sorted(glob.glob('docs/tasks/*.md')) + +datastats = [] + +for f in files: + with open(f, 'r') as content_file: + content = content_file.read() + + # title + title = content.split('\n')[0].replace('#', '') + + # count papers + papers = set( + (papertype, titlecase.titlecase(paper.lower().strip())) + for (papertype, paper) in re.findall( + r'\s*\n.*?\btitle\s*=\s*{(.*?)}', + content, re.DOTALL)) + # paper links + revcontent = '\n'.join(list(reversed(content.splitlines()))) + paperlinks = {} + for _, p in papers: + # print(p) + paperlinks[p] = ', '.join( + (f'[{p} ⇨](model_zoo/{splitext(basename(f))[0]}.html#' + f'{anchor(p)})' for p in re.findall( + rf'\btitle\s*=\s*{{\s*{p}\s*}}.*?\n## (.*?)\s*[,;]?\s*\n', + revcontent, re.DOTALL | re.IGNORECASE))) + # print(' ', paperlinks[p]) + paperlist = '\n'.join( + sorted(f' - [{t}] {x} ({paperlinks[x]})' for t, x in papers)) + # count configs + configs = set(x.lower().strip() + for x in re.findall(r'https.*configs/.*\.py', content)) + + # count ckpts + ckpts = set(x.lower().strip() + for x in re.findall(r'https://download.*\.pth', content) + if 'mmpose' in x) + + statsmsg = f""" +## [{title}]({f}) + +* 论文数量: {len(papers)} +{paperlist} + + """ + + datastats.append((papers, configs, ckpts, statsmsg)) + +alldatapapers = func.reduce(lambda a, b: a.union(b), + [p for p, _, _, _ in datastats]) + +# Summarize + +msglist = '\n'.join(x for _, _, _, x in stats) +datamsglist = '\n'.join(x for _, _, _, x in datastats) +papertypes, papercounts = np.unique([t for t, _ in alldatapapers], + return_counts=True) +countstr = '\n'.join( + [f' - {t}: {c}' for t, c in zip(papertypes, papercounts)]) + +dataset_zoo = f""" +# 概览 + +* 论文数量: {len(alldatapapers)} +{countstr} + +已支持的算法详细信息请见 [模型池](model_zoo.md). + +{datamsglist} +""" + +with open('dataset_zoo.md', 'w') as f: + f.write(dataset_zoo) diff --git a/docs/zh_cn/switch_language.md b/docs/zh_cn/switch_language.md index 05688a9530..bfb2ae0d51 100644 --- a/docs/zh_cn/switch_language.md +++ b/docs/zh_cn/switch_language.md @@ -1,3 +1,3 @@ -## 简体中文 - -## English +## 简体中文 + +## English diff --git a/docs/zh_cn/user_guides/advanced_training.md b/docs/zh_cn/user_guides/advanced_training.md index dd02a7661f..0e2dbb9c7d 100644 --- a/docs/zh_cn/user_guides/advanced_training.md +++ b/docs/zh_cn/user_guides/advanced_training.md @@ -1,104 +1,104 @@ -# 高级训练设置 - -## 恢复训练 - -恢复训练是指从之前某次训练保存下来的状态开始继续训练,这里的状态包括模型的权重、优化器和优化器参数调整策略的状态。 - -### 自动恢复训练 - -用户可以在训练命令最后加上 `--resume` 恢复训练,程序会自动从 `work_dirs` 中加载最新的权重文件恢复训练。如果 `work_dir` 中有最新的 `checkpoint`(例如该训练在上一次训练时被中断),则会从该 `checkpoint` 恢复训练,否则(例如上一次训练还没来得及保存 `checkpoint` 或者启动了新的训练任务)会重新开始训练。 - -下面是一个恢复训练的示例: - -```shell -python tools/train.py configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-256x192.py --resume -``` - -### 指定 Checkpoint 恢复训练 - -你也可以对 `--resume` 指定 `checkpoint` 路径,MMPose 会自动读取该 `checkpoint` 并从中恢复训练,命令如下: - -```shell -python tools/train.py configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-256x192.py \ - --resume work_dirs/td-hm_res50_8xb64-210e_coco-256x192/latest.pth -``` - -如果你希望手动在配置文件中指定 `checkpoint` 路径,除了设置 `resume=True`,还需要设置 `load_from` 参数。需要注意的是,如果只设置了 `load_from` 而没有设置 `resume=True`,则只会加载 `checkpoint` 中的权重并重新开始训练,而不是接着之前的状态继续训练。 - -下面的例子与上面指定 `--resume` 参数的例子等价: - -```python -resume = True -load_from = 'work_dirs/td-hm_res50_8xb64-210e_coco-256x192/latest.pth' -# model settings -model = dict( - ## 内容省略 ## - ) -``` - -## 自动混合精度(AMP)训练 - -混合精度训练在不改变模型、不降低模型训练精度的前提下,可以缩短训练时间,降低存储需求,因而能支持更大的 batch size、更大模型和尺寸更大的输入的训练。 - -如果要开启自动混合精度(AMP)训练,在训练命令最后加上 --amp 即可, 命令如下: - -```shell -python tools/train.py ${CONFIG_FILE} --amp -``` - -具体例子如下: - -```shell -python tools/train.py configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-256x192.py --amp -``` - -## 设置随机种子 - -如果想要在训练时指定随机种子,可以使用以下命令: - -```shell -python ./tools/train.py \ - ${CONFIG} \ # 配置文件路径 - --cfg-options randomness.seed=2023 \ # 设置随机种子为 2023 - [randomness.diff_rank_seed=True] \ # 根据 rank 来设置不同的种子。 - [randomness.deterministic=True] # 把 cuDNN 后端确定性选项设置为 True -# [] 代表可选参数,实际输入命令行时,不用输入 [] -``` - -randomness 有三个参数可设置,具体含义如下: - -- `randomness.seed=2023` ,设置随机种子为 `2023`。 - -- `randomness.diff_rank_seed=True`,根据 `rank` 来设置不同的种子,`diff_rank_seed` 默认为 `False`。 - -- `randomness.deterministic=True`,把 `cuDNN` 后端确定性选项设置为 `True`,即把 `torch.backends.cudnn.deterministic` 设为 `True`,把 `torch.backends.cudnn.benchmark` 设为 `False`。`deterministic` 默认为 `False`。更多细节见 [Pytorch Randomness](https://pytorch.org/docs/stable/notes/randomness.html)。 - -如果你希望手动在配置文件中指定随机种子,可以在配置文件中设置 `random_seed` 参数,具体如下: - -```python -randomness = dict(seed=2023) -# model settings -model = dict( - ## 内容省略 ## - ) -``` - -## 使用 Tensorboard 可视化训练过程 - -安装 Tensorboard 环境 - -```shell -pip install tensorboard -``` - -在 config 文件中添加 tensorboard 配置 - -```python -visualizer = dict(vis_backends=[dict(type='LocalVisBackend'),dict(type='TensorboardVisBackend')]) -``` - -运行训练命令后,tensorboard 文件会生成在可视化文件夹 `work_dir/${CONFIG}/${TIMESTAMP}/vis_data` 下,运行下面的命令就可以在网页链接使用 tensorboard 查看 loss、学习率和精度等信息。 - -```shell -tensorboard --logdir work_dir/${CONFIG}/${TIMESTAMP}/vis_data -``` +# 高级训练设置 + +## 恢复训练 + +恢复训练是指从之前某次训练保存下来的状态开始继续训练,这里的状态包括模型的权重、优化器和优化器参数调整策略的状态。 + +### 自动恢复训练 + +用户可以在训练命令最后加上 `--resume` 恢复训练,程序会自动从 `work_dirs` 中加载最新的权重文件恢复训练。如果 `work_dir` 中有最新的 `checkpoint`(例如该训练在上一次训练时被中断),则会从该 `checkpoint` 恢复训练,否则(例如上一次训练还没来得及保存 `checkpoint` 或者启动了新的训练任务)会重新开始训练。 + +下面是一个恢复训练的示例: + +```shell +python tools/train.py configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-256x192.py --resume +``` + +### 指定 Checkpoint 恢复训练 + +你也可以对 `--resume` 指定 `checkpoint` 路径,MMPose 会自动读取该 `checkpoint` 并从中恢复训练,命令如下: + +```shell +python tools/train.py configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-256x192.py \ + --resume work_dirs/td-hm_res50_8xb64-210e_coco-256x192/latest.pth +``` + +如果你希望手动在配置文件中指定 `checkpoint` 路径,除了设置 `resume=True`,还需要设置 `load_from` 参数。需要注意的是,如果只设置了 `load_from` 而没有设置 `resume=True`,则只会加载 `checkpoint` 中的权重并重新开始训练,而不是接着之前的状态继续训练。 + +下面的例子与上面指定 `--resume` 参数的例子等价: + +```python +resume = True +load_from = 'work_dirs/td-hm_res50_8xb64-210e_coco-256x192/latest.pth' +# model settings +model = dict( + ## 内容省略 ## + ) +``` + +## 自动混合精度(AMP)训练 + +混合精度训练在不改变模型、不降低模型训练精度的前提下,可以缩短训练时间,降低存储需求,因而能支持更大的 batch size、更大模型和尺寸更大的输入的训练。 + +如果要开启自动混合精度(AMP)训练,在训练命令最后加上 --amp 即可, 命令如下: + +```shell +python tools/train.py ${CONFIG_FILE} --amp +``` + +具体例子如下: + +```shell +python tools/train.py configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-256x192.py --amp +``` + +## 设置随机种子 + +如果想要在训练时指定随机种子,可以使用以下命令: + +```shell +python ./tools/train.py \ + ${CONFIG} \ # 配置文件路径 + --cfg-options randomness.seed=2023 \ # 设置随机种子为 2023 + [randomness.diff_rank_seed=True] \ # 根据 rank 来设置不同的种子。 + [randomness.deterministic=True] # 把 cuDNN 后端确定性选项设置为 True +# [] 代表可选参数,实际输入命令行时,不用输入 [] +``` + +randomness 有三个参数可设置,具体含义如下: + +- `randomness.seed=2023` ,设置随机种子为 `2023`。 + +- `randomness.diff_rank_seed=True`,根据 `rank` 来设置不同的种子,`diff_rank_seed` 默认为 `False`。 + +- `randomness.deterministic=True`,把 `cuDNN` 后端确定性选项设置为 `True`,即把 `torch.backends.cudnn.deterministic` 设为 `True`,把 `torch.backends.cudnn.benchmark` 设为 `False`。`deterministic` 默认为 `False`。更多细节见 [Pytorch Randomness](https://pytorch.org/docs/stable/notes/randomness.html)。 + +如果你希望手动在配置文件中指定随机种子,可以在配置文件中设置 `random_seed` 参数,具体如下: + +```python +randomness = dict(seed=2023) +# model settings +model = dict( + ## 内容省略 ## + ) +``` + +## 使用 Tensorboard 可视化训练过程 + +安装 Tensorboard 环境 + +```shell +pip install tensorboard +``` + +在 config 文件中添加 tensorboard 配置 + +```python +visualizer = dict(vis_backends=[dict(type='LocalVisBackend'),dict(type='TensorboardVisBackend')]) +``` + +运行训练命令后,tensorboard 文件会生成在可视化文件夹 `work_dir/${CONFIG}/${TIMESTAMP}/vis_data` 下,运行下面的命令就可以在网页链接使用 tensorboard 查看 loss、学习率和精度等信息。 + +```shell +tensorboard --logdir work_dir/${CONFIG}/${TIMESTAMP}/vis_data +``` diff --git a/docs/zh_cn/user_guides/configs.md b/docs/zh_cn/user_guides/configs.md index 0bcb7aa1a8..6f83b629f3 100644 --- a/docs/zh_cn/user_guides/configs.md +++ b/docs/zh_cn/user_guides/configs.md @@ -1,466 +1,466 @@ -# 配置文件 - -MMPose 使用 Python 文件作为配置文件,将模块化设计和继承设计结合到配置系统中,便于进行各种实验。 - -## 简介 - -MMPose 拥有一套强大的配置系统,在注册器的配合下,用户可以通过一个配置文件来定义整个项目需要用到的所有内容,以 Python 字典形式组织配置信息,传递给注册器完成对应模块的实例化。 - -下面是一个常见的 Pytorch 模块定义的例子: - -```Python -# 在loss_a.py中定义Loss_A类 -Class Loss_A(nn.Module): - def __init__(self, param1, param2): - self.param1 = param1 - self.param2 = param2 - def forward(self, x): - return x - -# 在需要的地方进行实例化 -loss = Loss_A(param1=1.0, param2=True) -``` - -只需要通过一行代码对这个类进行注册: - -```Python -# 在loss_a.py中定义Loss_A类 -from mmpose.registry import MODELS - -@MODELS.register_module() # 注册该类到 MODELS 下 -Class Loss_A(nn.Module): - def __init__(self, param1, param2): - self.param1 = param1 - self.param2 = param2 - def forward(self, x): - return x -``` - -并在对应目录下的 `__init__.py` 中进行 `import`: - -```Python -# __init__.py of mmpose/models/losses -from .loss_a.py import Loss_A - -__all__ = ['Loss_A'] -``` - -我们就可以通过如下方式来从配置文件定义并进行实例化: - -```Python -# 在config_file.py中定义 -loss_cfg = dict( - type='Loss_A', # 通过type指定类名 - param1=1.0, # 传递__init__所需的参数 - param2=True -) - -# 在需要的地方进行实例化 -loss = MODELS.build(loss_cfg) # 等价于 loss = Loss_A(param1=1.0, param2=True) -``` - -MMPose 预定义的 Registry 在 `$MMPOSE/mmpose/registry.py` 中,目前支持的有: - -- `DATASETS`:数据集 - -- `TRANSFORMS`:数据变换 - -- `MODELS`:模型模块(Backbone、Neck、Head、Loss等) - -- `VISUALIZERS`:可视化工具 - -- `VISBACKENDS`:可视化后端 - -- `METRICS`:评测指标 - -- `KEYPOINT_CODECS`:编解码器 - -- `HOOKS`:钩子类 - -```{note} -需要注意的是,所有新增的模块都需要使用注册器(Registry)进行注册,并在对应目录的 `__init__.py` 中进行 `import`,以便能够使用配置文件构建其实例。 -``` - -## 配置系统 - -具体而言,一个配置文件主要包含如下五个部分: - -- 通用配置:与训练或测试无关的通用配置,如时间统计,模型存储与加载,可视化等相关 Hook,以及一些分布式相关的环境配置 - -- 数据配置:数据增强策略,Dataset和Dataloader相关配置 - -- 训练配置:断点恢复、模型权重加载、优化器、学习率调整、训练轮数和测试间隔等 - -- 模型配置:模型模块、参数、损失函数等 - -- 评测配置:模型性能评测指标 - -你可以在 `$MMPOSE/configs` 下找到我们提供的配置文件,配置文件之间通过继承来避免冗余。为了保持配置文件简洁易读,我们将一些必要但不常改动的配置存放到了 `$MMPOSE/configs/_base_` 目录下,如果希望查阅完整的配置信息,你可以运行如下指令: - -```Bash -python tools/analysis/print_config.py /PATH/TO/CONFIG -``` - -### 通用配置 - -通用配置指与训练或测试无关的必要配置,主要包括: - -- **默认Hook**:迭代时间统计,训练日志,参数更新,checkpoint 等 - -- **环境配置**:分布式后端,cudnn,多进程配置等 - -- **可视化器**:可视化后端和策略设置 - -- **日志配置**:日志等级,格式,打印和记录间隔等 - -下面是通用配置的样例说明: - -```Python -# 通用配置 -default_scope = 'mmpose' -default_hooks = dict( - timer=dict(type='IterTimerHook'), # 迭代时间统计,包括数据耗时和模型耗时 - logger=dict(type='LoggerHook', interval=50), # 日志打印间隔 - param_scheduler=dict(type='ParamSchedulerHook'), # 用于调度学习率更新 - checkpoint=dict( - type='CheckpointHook', interval=1, save_best='coco/AP', # ckpt保存间隔,最优ckpt参考指标 - rule='greater'), # 最优ckpt指标评价规则 - sampler_seed=dict(type='DistSamplerSeedHook')) # 分布式随机种子设置 -env_cfg = dict( - cudnn_benchmark=False, # cudnn benchmark开关 - mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), # opencv多线程配置 - dist_cfg=dict(backend='nccl')) # 分布式训练后端设置 -vis_backends = [dict(type='LocalVisBackend')] # 可视化器后端设置 -visualizer = dict( # 可视化器设置 - type='PoseLocalVisualizer', - vis_backends=[dict(type='LocalVisBackend')], - name='visualizer') -log_processor = dict( # 训练日志格式、间隔 - type='LogProcessor', window_size=50, by_epoch=True, num_digits=6) -log_level = 'INFO' # 日志记录等级 -``` - -通用配置一般单独存放到`$MMPOSE/configs/_base_`目录下,通过如下方式进行继承: - -```Python -_base_ = ['../../../_base_/default_runtime.py'] # 以运行时的config文件位置为相对路径起点 -``` - -```{note} -CheckpointHook: - -- save_best: `'coco/AP'` 用于 `CocoMetric`, `'PCK'` 用于 `PCKAccuracy` -- max_keep_ckpts: 最大保留ckpt数量,默认为-1,代表不限制 - -样例: - -`default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater', max_keep_ckpts=1))` -``` - -### 数据配置 - -数据配置指数据处理相关的配置,主要包括: - -- **数据后端**:数据供给后端设置,默认为本地硬盘,我们也支持从 LMDB,S3 Bucket 等加载 - -- **数据集**:图像与标注文件路径 - -- **加载**:加载策略,批量大小等 - -- **流水线**:数据增强策略 - -- **编码器**:根据标注生成特定格式的监督信息 - -下面是数据配置的样例说明: - -```Python -backend_args = dict(backend='local') # 数据加载后端设置,默认从本地硬盘加载 -dataset_type = 'CocoDataset' # 数据集类名 -data_mode = 'topdown' # 算法结构类型,用于指定标注信息加载策略 -data_root = 'data/coco/' # 数据存放路径 - # 定义数据编解码器,用于生成target和对pred进行解码,同时包含了输入图片和输出heatmap尺寸等信息 -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) -train_pipeline = [ # 训练时数据增强 - dict(type='LoadImage', backend_args=backend_args, # 加载图片 - dict(type='GetBBoxCenterScale'), # 根据bbox获取center和scale - dict(type='RandomBBoxTransform'), # 生成随机位移、缩放、旋转变换矩阵 - dict(type='RandomFlip', direction='horizontal'), # 生成随机翻转变换矩阵 - dict(type='RandomHalfBody'), # 随机半身增强 - dict(type='TopdownAffine', input_size=codec['input_size']), # 根据变换矩阵更新目标数据 - dict( - type='GenerateTarget', # 根据目标数据生成监督信息 - # 监督信息类型 - encoder=codec, # 传入编解码器,用于数据编码,生成特定格式的监督信息 - dict(type='PackPoseInputs') # 对target进行打包用于训练 -] -test_pipeline = [ # 测试时数据增强 - dict(type='LoadImage', backend_args=backend_args), # 加载图片 - dict(type='GetBBoxCenterScale'), # 根据bbox获取center和scale - dict(type='TopdownAffine', input_size=codec['input_size']), # 根据变换矩阵更新目标数据 - dict(type='PackPoseInputs') # 对target进行打包用于训练 -] -train_dataloader = dict( # 训练数据加载 - batch_size=64, # 批次大小 - num_workers=2, # 数据加载进程数 - persistent_workers=True, # 在不活跃时维持进程不终止,避免反复启动进程的开销 - sampler=dict(type='DefaultSampler', shuffle=True), # 采样策略,打乱数据 - dataset=dict( - type=dataset_type , # 数据集类名 - data_root=data_root, # 数据集路径 - data_mode=data_mode, # 算法类型 - ann_file='annotations/person_keypoints_train2017.json', # 标注文件路径 - data_prefix=dict(img='train2017/'), # 图像路径 - pipeline=train_pipeline # 数据流水线 - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, # 在不活跃时维持进程不终止,避免反复启动进程的开销 - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False), # 采样策略,不进行打乱 - dataset=dict( - type=dataset_type , # 数据集类名 - data_root=data_root, # 数据集路径 - data_mode=data_mode, # 算法类型 - ann_file='annotations/person_keypoints_val2017.json', # 标注文件路径 - bbox_file= - 'data/coco/person_detection_results/COCO_val2017_detections_AP_H_56_person.json', # 检测框标注文件,topdown方法专用 - data_prefix=dict(img='val2017/'), # 图像路径 - test_mode=True, # 测试模式开关 - pipeline=test_pipeline # 数据流水线 - )) -test_dataloader = val_dataloader # 默认情况下不区分验证集和测试集,用户根据需要来自行定义 -``` - -```{note} - -常用功能可以参考以下教程: -- [恢复训练](../common_usages/resume_training.md) -- [自动混合精度训练](../common_usages/amp_training.md) -- [设置随机种子](../common_usages/set_random_seed.md) - -``` - -### 训练配置 - -训练配置指训练策略相关的配置,主要包括: - -- 从断点恢复训练 - -- 模型权重加载 - -- 训练轮数和测试间隔 - -- 学习率调整策略,如 warmup,scheduler - -- 优化器和学习率 - -- 高级训练策略设置,如自动学习率缩放 - -下面是训练配置的样例说明: - -```Python -resume = False # 断点恢复 -load_from = None # 模型权重加载 -train_cfg = dict(by_epoch=True, max_epochs=210, val_interval=10) # 训练轮数,测试间隔 -param_scheduler = [ - dict( # warmup策略 - type='LinearLR', begin=0, end=500, start_factor=0.001, by_epoch=False), - dict( # scheduler - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] -optim_wrapper = dict(optimizer=dict(type='Adam', lr=0.0005)) # 优化器和学习率 -auto_scale_lr = dict(base_batch_size=512) # 根据batch_size自动缩放学习率 -``` - -### 模型配置 - -模型配置指模型训练和推理相关的配置,主要包括: - -- 模型结构 - -- 损失函数 - -- 数据解码策略 - -- 测试时增强策略 - -下面是模型配置的样例说明,定义了一个基于 HRNetw32 的 Top-down Heatmap-based 模型: - -```Python -# 定义数据编解码器,如果在数据配置部分已经定义过则无需重复定义 -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) -# 模型配置 -model = dict( - type='TopdownPoseEstimator', # 模型结构决定了算法流程 - data_preprocessor=dict( # 数据归一化和通道顺序调整,作为模型的一部分 - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( # 骨干网络定义 - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(32, 64)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(32, 64, 128)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(32, 64, 128, 256))), - init_cfg=dict( - type='Pretrained', # 预训练参数,只加载backbone权重用于迁移学习 - checkpoint='https://download.openmmlab.com/mmpose' - '/pretrain_models/hrnet_w32-36af842e.pth'), - ), - head=dict( # 模型头部 - type='HeatmapHead', - in_channels=32, - out_channels=17, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), # 损失函数 - decoder=codec), # 解码器,将heatmap解码成坐标值 - test_cfg=dict( - flip_test=True, # 开启测试时水平翻转集成 - flip_mode='heatmap', # 对heatmap进行翻转 - shift_heatmap=True, # 对翻转后的结果进行平移提高精度 - )) -``` - -### 评测配置 - -评测配置指公开数据集中关键点检测任务常用的评测指标,主要包括: - -- AR, AP and mAP - -- PCK, PCKh, tPCK - -- AUC - -- EPE - -- NME - -下面是评测配置的样例说明,定义了一个COCO指标评测器: - -```Python -val_evaluator = dict( - type='CocoMetric', # coco 评测指标 - ann_file=data_root + 'annotations/person_keypoints_val2017.json') # 加载评测标注数据 -test_evaluator = val_evaluator # 默认情况下不区分验证集和测试集,用户根据需要来自行定义 -``` - -## 配置文件命名规则 - -MMPose 配置文件命名风格如下: - -```Python -{{算法信息}}_{{模块信息}}_{{训练信息}}_{{数据信息}}.py -``` - -文件名总体分为四部分:算法信息,模块信息,训练信息和数据信息。不同部分的单词之间用下划线 `'_'` 连接,同一部分有多个单词用短横线 `'-'` 连接。 - -- **算法信息**:算法名称,如 `topdown-heatmap`,`topdown-rle` 等 - -- **模块信息**:按照数据流的顺序列举一些中间的模块,其内容依赖于算法任务,如 `res101`,`hrnet-w48`等 - -- **训练信息**:训练策略的一些设置,包括 `batch size`,`schedule` 等,如 `8xb64-210e` - -- **数据信息**:数据集名称、模态、输入尺寸等,如 `ap10k-256x256`,`zebra-160x160` 等 - -有时为了避免文件名过长,会省略模型信息中一些强相关的模块,只保留关键信息,如RLE-based算法中的`GAP`,Heatmap-based算法中的 `deconv` 等。 - -如果你希望向MMPose添加新的方法,你的配置文件同样需要遵守该命名规则。 - -## 常见用法 - -### 配置文件的继承 - -该用法常用于隐藏一些必要但不需要修改的配置,以提高配置文件的可读性。假如有如下两个配置文件: - -`optimizer_cfg.py`: - -```Python -optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) -``` - -`resnet50.py`: - -```Python -_base_ = ['optimizer_cfg.py'] -model = dict(type='ResNet', depth=50) -``` - -虽然我们在 `resnet50.py` 中没有定义 optimizer 字段,但由于我们写了 `_base_ = ['optimizer_cfg.py']`,会使这个配置文件获得 `optimizer_cfg.py` 中的所有字段: - -```Python -cfg = Config.fromfile('resnet50.py') -cfg.optimizer # ConfigDict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) -``` - -### 继承字段的修改 - -对于继承过来的已经定义好的字典,可以直接指定对应字段进行修改,而不需要重新定义完整的字典: - -`resnet50_lr0.01.py`: - -```Python -_base_ = ['optimizer_cfg.py'] -model = dict(type='ResNet', depth=50) -optimizer = dict(lr=0.01) # 直接修改对应字段 -``` - -这个配置文件只修改了对应字段`lr`的信息: - -```Python -cfg = Config.fromfile('resnet50_lr0.01.py') -cfg.optimizer # ConfigDict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) -``` - -### 删除字典中的字段 - -如果不仅是需要修改某些字段,还需要删除已定义的一些字段,需要在重新定义这个字典时指定`_delete_=True`,表示将没有在新定义中出现的字段全部删除: - -`resnet50.py`: - -```Python -_base_ = ['optimizer_cfg.py', 'runtime_cfg.py'] -model = dict(type='ResNet', depth=50) -optimizer = dict(_delete_=True, type='SGD', lr=0.01) # 重新定义字典 -``` - -此时字典中除了 `type` 和 `lr` 以外的内容(`momentum`和`weight_decay`)将被全部删除: - -```Python -cfg = Config.fromfile('resnet50_lr0.01.py') -cfg.optimizer # ConfigDict(type='SGD', lr=0.01) -``` - -```{note} -如果你希望更深入地了解配置系统的高级用法,可以查看 [MMEngine 教程](https://mmengine.readthedocs.io/zh_CN/latest/tutorials/config.html)。 -``` +# 配置文件 + +MMPose 使用 Python 文件作为配置文件,将模块化设计和继承设计结合到配置系统中,便于进行各种实验。 + +## 简介 + +MMPose 拥有一套强大的配置系统,在注册器的配合下,用户可以通过一个配置文件来定义整个项目需要用到的所有内容,以 Python 字典形式组织配置信息,传递给注册器完成对应模块的实例化。 + +下面是一个常见的 Pytorch 模块定义的例子: + +```Python +# 在loss_a.py中定义Loss_A类 +Class Loss_A(nn.Module): + def __init__(self, param1, param2): + self.param1 = param1 + self.param2 = param2 + def forward(self, x): + return x + +# 在需要的地方进行实例化 +loss = Loss_A(param1=1.0, param2=True) +``` + +只需要通过一行代码对这个类进行注册: + +```Python +# 在loss_a.py中定义Loss_A类 +from mmpose.registry import MODELS + +@MODELS.register_module() # 注册该类到 MODELS 下 +Class Loss_A(nn.Module): + def __init__(self, param1, param2): + self.param1 = param1 + self.param2 = param2 + def forward(self, x): + return x +``` + +并在对应目录下的 `__init__.py` 中进行 `import`: + +```Python +# __init__.py of mmpose/models/losses +from .loss_a.py import Loss_A + +__all__ = ['Loss_A'] +``` + +我们就可以通过如下方式来从配置文件定义并进行实例化: + +```Python +# 在config_file.py中定义 +loss_cfg = dict( + type='Loss_A', # 通过type指定类名 + param1=1.0, # 传递__init__所需的参数 + param2=True +) + +# 在需要的地方进行实例化 +loss = MODELS.build(loss_cfg) # 等价于 loss = Loss_A(param1=1.0, param2=True) +``` + +MMPose 预定义的 Registry 在 `$MMPOSE/mmpose/registry.py` 中,目前支持的有: + +- `DATASETS`:数据集 + +- `TRANSFORMS`:数据变换 + +- `MODELS`:模型模块(Backbone、Neck、Head、Loss等) + +- `VISUALIZERS`:可视化工具 + +- `VISBACKENDS`:可视化后端 + +- `METRICS`:评测指标 + +- `KEYPOINT_CODECS`:编解码器 + +- `HOOKS`:钩子类 + +```{note} +需要注意的是,所有新增的模块都需要使用注册器(Registry)进行注册,并在对应目录的 `__init__.py` 中进行 `import`,以便能够使用配置文件构建其实例。 +``` + +## 配置系统 + +具体而言,一个配置文件主要包含如下五个部分: + +- 通用配置:与训练或测试无关的通用配置,如时间统计,模型存储与加载,可视化等相关 Hook,以及一些分布式相关的环境配置 + +- 数据配置:数据增强策略,Dataset和Dataloader相关配置 + +- 训练配置:断点恢复、模型权重加载、优化器、学习率调整、训练轮数和测试间隔等 + +- 模型配置:模型模块、参数、损失函数等 + +- 评测配置:模型性能评测指标 + +你可以在 `$MMPOSE/configs` 下找到我们提供的配置文件,配置文件之间通过继承来避免冗余。为了保持配置文件简洁易读,我们将一些必要但不常改动的配置存放到了 `$MMPOSE/configs/_base_` 目录下,如果希望查阅完整的配置信息,你可以运行如下指令: + +```Bash +python tools/analysis/print_config.py /PATH/TO/CONFIG +``` + +### 通用配置 + +通用配置指与训练或测试无关的必要配置,主要包括: + +- **默认Hook**:迭代时间统计,训练日志,参数更新,checkpoint 等 + +- **环境配置**:分布式后端,cudnn,多进程配置等 + +- **可视化器**:可视化后端和策略设置 + +- **日志配置**:日志等级,格式,打印和记录间隔等 + +下面是通用配置的样例说明: + +```Python +# 通用配置 +default_scope = 'mmpose' +default_hooks = dict( + timer=dict(type='IterTimerHook'), # 迭代时间统计,包括数据耗时和模型耗时 + logger=dict(type='LoggerHook', interval=50), # 日志打印间隔 + param_scheduler=dict(type='ParamSchedulerHook'), # 用于调度学习率更新 + checkpoint=dict( + type='CheckpointHook', interval=1, save_best='coco/AP', # ckpt保存间隔,最优ckpt参考指标 + rule='greater'), # 最优ckpt指标评价规则 + sampler_seed=dict(type='DistSamplerSeedHook')) # 分布式随机种子设置 +env_cfg = dict( + cudnn_benchmark=False, # cudnn benchmark开关 + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), # opencv多线程配置 + dist_cfg=dict(backend='nccl')) # 分布式训练后端设置 +vis_backends = [dict(type='LocalVisBackend')] # 可视化器后端设置 +visualizer = dict( # 可视化器设置 + type='PoseLocalVisualizer', + vis_backends=[dict(type='LocalVisBackend')], + name='visualizer') +log_processor = dict( # 训练日志格式、间隔 + type='LogProcessor', window_size=50, by_epoch=True, num_digits=6) +log_level = 'INFO' # 日志记录等级 +``` + +通用配置一般单独存放到`$MMPOSE/configs/_base_`目录下,通过如下方式进行继承: + +```Python +_base_ = ['../../../_base_/default_runtime.py'] # 以运行时的config文件位置为相对路径起点 +``` + +```{note} +CheckpointHook: + +- save_best: `'coco/AP'` 用于 `CocoMetric`, `'PCK'` 用于 `PCKAccuracy` +- max_keep_ckpts: 最大保留ckpt数量,默认为-1,代表不限制 + +样例: + +`default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater', max_keep_ckpts=1))` +``` + +### 数据配置 + +数据配置指数据处理相关的配置,主要包括: + +- **数据后端**:数据供给后端设置,默认为本地硬盘,我们也支持从 LMDB,S3 Bucket 等加载 + +- **数据集**:图像与标注文件路径 + +- **加载**:加载策略,批量大小等 + +- **流水线**:数据增强策略 + +- **编码器**:根据标注生成特定格式的监督信息 + +下面是数据配置的样例说明: + +```Python +backend_args = dict(backend='local') # 数据加载后端设置,默认从本地硬盘加载 +dataset_type = 'CocoDataset' # 数据集类名 +data_mode = 'topdown' # 算法结构类型,用于指定标注信息加载策略 +data_root = 'data/coco/' # 数据存放路径 + # 定义数据编解码器,用于生成target和对pred进行解码,同时包含了输入图片和输出heatmap尺寸等信息 +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) +train_pipeline = [ # 训练时数据增强 + dict(type='LoadImage', backend_args=backend_args, # 加载图片 + dict(type='GetBBoxCenterScale'), # 根据bbox获取center和scale + dict(type='RandomBBoxTransform'), # 生成随机位移、缩放、旋转变换矩阵 + dict(type='RandomFlip', direction='horizontal'), # 生成随机翻转变换矩阵 + dict(type='RandomHalfBody'), # 随机半身增强 + dict(type='TopdownAffine', input_size=codec['input_size']), # 根据变换矩阵更新目标数据 + dict( + type='GenerateTarget', # 根据目标数据生成监督信息 + # 监督信息类型 + encoder=codec, # 传入编解码器,用于数据编码,生成特定格式的监督信息 + dict(type='PackPoseInputs') # 对target进行打包用于训练 +] +test_pipeline = [ # 测试时数据增强 + dict(type='LoadImage', backend_args=backend_args), # 加载图片 + dict(type='GetBBoxCenterScale'), # 根据bbox获取center和scale + dict(type='TopdownAffine', input_size=codec['input_size']), # 根据变换矩阵更新目标数据 + dict(type='PackPoseInputs') # 对target进行打包用于训练 +] +train_dataloader = dict( # 训练数据加载 + batch_size=64, # 批次大小 + num_workers=2, # 数据加载进程数 + persistent_workers=True, # 在不活跃时维持进程不终止,避免反复启动进程的开销 + sampler=dict(type='DefaultSampler', shuffle=True), # 采样策略,打乱数据 + dataset=dict( + type=dataset_type , # 数据集类名 + data_root=data_root, # 数据集路径 + data_mode=data_mode, # 算法类型 + ann_file='annotations/person_keypoints_train2017.json', # 标注文件路径 + data_prefix=dict(img='train2017/'), # 图像路径 + pipeline=train_pipeline # 数据流水线 + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, # 在不活跃时维持进程不终止,避免反复启动进程的开销 + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), # 采样策略,不进行打乱 + dataset=dict( + type=dataset_type , # 数据集类名 + data_root=data_root, # 数据集路径 + data_mode=data_mode, # 算法类型 + ann_file='annotations/person_keypoints_val2017.json', # 标注文件路径 + bbox_file= + 'data/coco/person_detection_results/COCO_val2017_detections_AP_H_56_person.json', # 检测框标注文件,topdown方法专用 + data_prefix=dict(img='val2017/'), # 图像路径 + test_mode=True, # 测试模式开关 + pipeline=test_pipeline # 数据流水线 + )) +test_dataloader = val_dataloader # 默认情况下不区分验证集和测试集,用户根据需要来自行定义 +``` + +```{note} + +常用功能可以参考以下教程: +- [恢复训练](../common_usages/resume_training.md) +- [自动混合精度训练](../common_usages/amp_training.md) +- [设置随机种子](../common_usages/set_random_seed.md) + +``` + +### 训练配置 + +训练配置指训练策略相关的配置,主要包括: + +- 从断点恢复训练 + +- 模型权重加载 + +- 训练轮数和测试间隔 + +- 学习率调整策略,如 warmup,scheduler + +- 优化器和学习率 + +- 高级训练策略设置,如自动学习率缩放 + +下面是训练配置的样例说明: + +```Python +resume = False # 断点恢复 +load_from = None # 模型权重加载 +train_cfg = dict(by_epoch=True, max_epochs=210, val_interval=10) # 训练轮数,测试间隔 +param_scheduler = [ + dict( # warmup策略 + type='LinearLR', begin=0, end=500, start_factor=0.001, by_epoch=False), + dict( # scheduler + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] +optim_wrapper = dict(optimizer=dict(type='Adam', lr=0.0005)) # 优化器和学习率 +auto_scale_lr = dict(base_batch_size=512) # 根据batch_size自动缩放学习率 +``` + +### 模型配置 + +模型配置指模型训练和推理相关的配置,主要包括: + +- 模型结构 + +- 损失函数 + +- 数据解码策略 + +- 测试时增强策略 + +下面是模型配置的样例说明,定义了一个基于 HRNetw32 的 Top-down Heatmap-based 模型: + +```Python +# 定义数据编解码器,如果在数据配置部分已经定义过则无需重复定义 +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) +# 模型配置 +model = dict( + type='TopdownPoseEstimator', # 模型结构决定了算法流程 + data_preprocessor=dict( # 数据归一化和通道顺序调整,作为模型的一部分 + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( # 骨干网络定义 + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', # 预训练参数,只加载backbone权重用于迁移学习 + checkpoint='https://download.openmmlab.com/mmpose' + '/pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( # 模型头部 + type='HeatmapHead', + in_channels=32, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), # 损失函数 + decoder=codec), # 解码器,将heatmap解码成坐标值 + test_cfg=dict( + flip_test=True, # 开启测试时水平翻转集成 + flip_mode='heatmap', # 对heatmap进行翻转 + shift_heatmap=True, # 对翻转后的结果进行平移提高精度 + )) +``` + +### 评测配置 + +评测配置指公开数据集中关键点检测任务常用的评测指标,主要包括: + +- AR, AP and mAP + +- PCK, PCKh, tPCK + +- AUC + +- EPE + +- NME + +下面是评测配置的样例说明,定义了一个COCO指标评测器: + +```Python +val_evaluator = dict( + type='CocoMetric', # coco 评测指标 + ann_file=data_root + 'annotations/person_keypoints_val2017.json') # 加载评测标注数据 +test_evaluator = val_evaluator # 默认情况下不区分验证集和测试集,用户根据需要来自行定义 +``` + +## 配置文件命名规则 + +MMPose 配置文件命名风格如下: + +```Python +{{算法信息}}_{{模块信息}}_{{训练信息}}_{{数据信息}}.py +``` + +文件名总体分为四部分:算法信息,模块信息,训练信息和数据信息。不同部分的单词之间用下划线 `'_'` 连接,同一部分有多个单词用短横线 `'-'` 连接。 + +- **算法信息**:算法名称,如 `topdown-heatmap`,`topdown-rle` 等 + +- **模块信息**:按照数据流的顺序列举一些中间的模块,其内容依赖于算法任务,如 `res101`,`hrnet-w48`等 + +- **训练信息**:训练策略的一些设置,包括 `batch size`,`schedule` 等,如 `8xb64-210e` + +- **数据信息**:数据集名称、模态、输入尺寸等,如 `ap10k-256x256`,`zebra-160x160` 等 + +有时为了避免文件名过长,会省略模型信息中一些强相关的模块,只保留关键信息,如RLE-based算法中的`GAP`,Heatmap-based算法中的 `deconv` 等。 + +如果你希望向MMPose添加新的方法,你的配置文件同样需要遵守该命名规则。 + +## 常见用法 + +### 配置文件的继承 + +该用法常用于隐藏一些必要但不需要修改的配置,以提高配置文件的可读性。假如有如下两个配置文件: + +`optimizer_cfg.py`: + +```Python +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +``` + +`resnet50.py`: + +```Python +_base_ = ['optimizer_cfg.py'] +model = dict(type='ResNet', depth=50) +``` + +虽然我们在 `resnet50.py` 中没有定义 optimizer 字段,但由于我们写了 `_base_ = ['optimizer_cfg.py']`,会使这个配置文件获得 `optimizer_cfg.py` 中的所有字段: + +```Python +cfg = Config.fromfile('resnet50.py') +cfg.optimizer # ConfigDict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +``` + +### 继承字段的修改 + +对于继承过来的已经定义好的字典,可以直接指定对应字段进行修改,而不需要重新定义完整的字典: + +`resnet50_lr0.01.py`: + +```Python +_base_ = ['optimizer_cfg.py'] +model = dict(type='ResNet', depth=50) +optimizer = dict(lr=0.01) # 直接修改对应字段 +``` + +这个配置文件只修改了对应字段`lr`的信息: + +```Python +cfg = Config.fromfile('resnet50_lr0.01.py') +cfg.optimizer # ConfigDict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) +``` + +### 删除字典中的字段 + +如果不仅是需要修改某些字段,还需要删除已定义的一些字段,需要在重新定义这个字典时指定`_delete_=True`,表示将没有在新定义中出现的字段全部删除: + +`resnet50.py`: + +```Python +_base_ = ['optimizer_cfg.py', 'runtime_cfg.py'] +model = dict(type='ResNet', depth=50) +optimizer = dict(_delete_=True, type='SGD', lr=0.01) # 重新定义字典 +``` + +此时字典中除了 `type` 和 `lr` 以外的内容(`momentum`和`weight_decay`)将被全部删除: + +```Python +cfg = Config.fromfile('resnet50_lr0.01.py') +cfg.optimizer # ConfigDict(type='SGD', lr=0.01) +``` + +```{note} +如果你希望更深入地了解配置系统的高级用法,可以查看 [MMEngine 教程](https://mmengine.readthedocs.io/zh_CN/latest/tutorials/config.html)。 +``` diff --git a/docs/zh_cn/user_guides/inference.md b/docs/zh_cn/user_guides/inference.md index 0844bc611f..ca4ea06f7e 100644 --- a/docs/zh_cn/user_guides/inference.md +++ b/docs/zh_cn/user_guides/inference.md @@ -1,267 +1,267 @@ -# 使用现有模型进行推理 - -MMPose为姿态估计提供了大量可以从[模型库](https://mmpose.readthedocs.io/en/latest/model_zoo.html)中找到的预测训练模型。本指南将演示**如何执行推理**,或使用训练过的模型对提供的图像或视频运行姿态估计。 - -有关在标准数据集上测试现有模型的说明,请参阅本指南。 - -在MMPose,模型由配置文件定义,而其已计算好的参数存储在权重文件(checkpoint file)中。您可以在[模型库](https://mmpose.readthedocs.io/en/latest/model_zoo.html)中找到模型配置文件和相应的权重文件的URL。我们建议从使用HRNet模型的[配置文件](https://github.com/open-mmlab/mmpose/blob/main/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py)和[权重文件](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192-81c58e40_20220909.pth)开始。 - -## 推理器:统一的推理接口 - -MMPose提供了一个被称为`MMPoseInferencer`的、全面的推理API。这个API使得用户得以使用所有MMPose支持的模型来对图像和视频进行模型推理。此外,该API可以完成推理结果自动化,并方便用户保存预测结果。 - -### 基本用法 - -`MMPoseInferencer`可以在任何Python程序中被用来执行姿态估计任务。以下是在一个在Python Shell中使用预训练的人体姿态模型对给定图像进行推理的示例。 - -```python -from mmpose.apis import MMPoseInferencer - -img_path = 'tests/data/coco/000000000785.jpg' # 将img_path替换给你自己的路径 - -# 使用模型别名创建推断器 -inferencer = MMPoseInferencer('human') - -# MMPoseInferencer采用了惰性推断方法,在给定输入时创建一个预测生成器 -result_generator = inferencer(img_path, show=True) -result = next(result_generator) -``` - -如果一切正常,你将在一个新窗口中看到下图: - -![inferencer_result_coco](https://user-images.githubusercontent.com/26127467/220008302-4a57fd44-0978-408e-8351-600e5513316a.jpg) - -`result` 变量是一个包含两个键值 `'visualization'` 和 `'predictions'` 的字典。 - -- `'visualization'` 键对应的值是一个列表,该列表: - - 包含可视化结果,例如输入图像、估计姿态的标记,以及可选的预测热图。 - - 如果没有指定 `return_vis` 参数,该列表将保持为空。 -- `'predictions'` 键对应的值是: - - 一个包含每个检测实例的预估关键点的列表。 - -`result` 字典的结构如下所示: - -```python -result = { - 'visualization': [ - # 元素数量:batch_size(默认为1) - vis_image_1, - ... - ], - 'predictions': [ - # 每张图像的姿态估计结果 - # 元素数量:batch_size(默认为1) - [ - # 每个检测到的实例的姿态信息 - # 元素数量:检测到的实例数 - {'keypoints': ..., # 实例 1 - 'keypoint_scores': ..., - ... - }, - {'keypoints': ..., # 实例 2 - 'keypoint_scores': ..., - ... - }, - ] - ... - ] -} -``` - -还可以使用用于用于推断的**命令行界面工具**(CLI, command-line interface): `demo/inferencer_demo.py`。这个工具允许用户使用以下命令使用相同的模型和输入执行推理: - -```python -python demo/inferencer_demo.py 'tests/data/coco/000000000785.jpg' \ - --pose2d 'human' --show --pred-out-dir 'predictions' -``` - -预测结果将被保存在路径`predictions/000000000785.json`。作为一个API,`inferencer_demo.py`的输入参数与`MMPoseInferencer`的相同。前者能够处理一系列输入类型,包括以下内容: - -- 图像路径 - -- 视频路径 - -- 文件夹路径(这会导致该文件夹中的所有图像都被推断出来) - -- 表示图像的 numpy array (在命令行界面工具中未支持) - -- 表示图像的 numpy array 列表 (在命令行界面工具中未支持) - -- 摄像头(在这种情况下,输入参数应该设置为`webcam`或`webcam:{CAMERA_ID}`) - -当输入对应于多个图像时,例如输入为**视频**或**文件夹**路径时,推理生成器必须被遍历,以便推理器对视频/文件夹中的所有帧/图像进行推理。以下是一个示例: - -```python -folder_path = 'tests/data/coco' - -result_generator = inferencer(folder_path, show=True) -results = [result for result in result_generator] -``` - -在这个示例中,`inferencer` 接受 `folder_path` 作为输入,并返回一个生成器对象(`result_generator`),用于生成推理结果。通过遍历 `result_generator` 并将每个结果存储在 `results` 列表中,您可以获得视频/文件夹中所有帧/图像的推理结果。 - -### 自定义姿态估计模型 - -`MMPoseInferencer`提供了几种可用于自定义所使用的模型的方法: - -```python -# 使用模型别名构建推断器 -inferencer = MMPoseInferencer('human') - -# 使用模型配置名构建推断器 -inferencer = MMPoseInferencer('td-hm_hrnet-w32_8xb64-210e_coco-256x192') - -# 使用模型配置文件和权重文件的路径或 URL 构建推断器 -inferencer = MMPoseInferencer( - pose2d='configs/body_2d_keypoint/topdown_heatmap/coco/' \ - 'td-hm_hrnet-w32_8xb64-210e_coco-256x192.py', - pose2d_weights='https://download.openmmlab.com/mmpose/top_down/' \ - 'hrnet/hrnet_w32_coco_256x192-c78dce93_20200708.pth' -) -``` - -模型别名的完整列表可以在模型别名部分中找到。 - -此外,自顶向下的姿态估计器还需要一个对象检测模型。`MMPoseInferencer`能够推断用MMPose支持的数据集训练的模型的实例类型,然后构建必要的对象检测模型。用户也可以通过以下方式手动指定检测模型: - -```python -# 通过别名指定检测模型 -# 可用的别名包括“human”、“hand”、“face”、“animal”、 -# 以及mmdet中定义的任何其他别名 -inferencer = MMPoseInferencer( - # 假设姿态估计器是在自定义数据集上训练的 - pose2d='custom_human_pose_estimator.py', - pose2d_weights='custom_human_pose_estimator.pth', - det_model='human' -) - -# 使用模型配置名称指定检测模型 -inferencer = MMPoseInferencer( - pose2d='human', - det_model='yolox_l_8x8_300e_coco', - det_cat_ids=[0], # 指定'human'类的类别id -) - -# 使用模型配置文件和权重文件的路径或URL构建推断器 -inferencer = MMPoseInferencer( - pose2d='human', - det_model=f'{PATH_TO_MMDET}/configs/yolox/yolox_l_8x8_300e_coco.py', - det_weights='https://download.openmmlab.com/mmdetection/v2.0/' \ - 'yolox/yolox_l_8x8_300e_coco/' \ - 'yolox_l_8x8_300e_coco_20211126_140236-d3bd2b23.pth', - det_cat_ids=[0], # 指定'human'类的类别id -) -``` - -### 转储结果 - -在执行姿态估计推理任务之后,您可能希望保存结果以供进一步分析或处理。本节将指导您将预测的关键点和可视化结果保存到本地。 - -要将预测保存在JSON文件中,在运行`MMPoseInferencer`的实例`inferencer`时使用`pred_out_dir`参数: - -```python -result_generator = inferencer(img_path, pred_out_dir='predictions') -result = next(result_generator) -``` - -预测结果将以JSON格式保存在`predictions/`文件夹中,每个文件以相应的输入图像或视频的名称命名。 - -对于更高级的场景,还可以直接从`inferencer`返回的`result`字典中访问预测结果。其中,`predictions`包含输入图像或视频中每个单独实例的预测关键点列表。然后,您可以使用您喜欢的方法操作或存储这些结果。 - -请记住,如果你想将可视化图像和预测文件保存在一个文件夹中,你可以使用`out_dir`参数: - -```python -result_generator = inferencer(img_path, out_dir='output') -result = next(result_generator) -``` - -在这种情况下,可视化图像将保存在`output/visualization/`文件夹中,而预测将存储在`output/forecasts/`文件夹中。 - -### 可视化 - -推理器`inferencer`可以自动对输入的图像或视频进行预测。可视化结果可以显示在一个新的窗口中,并保存在本地。 - -要在新窗口中查看可视化结果,请使用以下代码: - -请注意: - -- 如果输入视频来自网络摄像头,默认情况下将在新窗口中显示可视化结果,以此让用户看到输入 - -- 如果平台上没有GUI,这个步骤可能会卡住 - -要将可视化结果保存在本地,可以像这样指定`vis_out_dir`参数: - -```python -result_generator = inferencer(img_path, vis_out_dir='vis_results') -result = next(result_generator) -``` - -输入图片或视频的可视化预测结果将保存在`vis_results/`文件夹中 - -在开头展示的滑雪图中,姿态的可视化估计结果由关键点(用实心圆描绘)和骨架(用线条表示)组成。这些视觉元素的默认大小可能不会产生令人满意的结果。用户可以使用`radius`和`thickness`参数来调整圆的大小和线的粗细,如下所示: - -```python -result_generator = inferencer(img_path, show=True, radius=4, thickness=2) -result = next(result_generator) -``` - -### 推理器参数 - -`MMPoseInferencer`提供了各种自定义姿态估计、可视化和保存预测结果的参数。下面是初始化推断器时可用的参数列表及对这些参数的描述: - -| Argument | Description | -| ---------------- | ------------------------------------------------------------ | -| `pose2d` | 指定 2D 姿态估计模型的模型别名、配置文件名称或配置文件路径。 | -| `pose2d_weights` | 指定 2D 姿态估计模型权重文件的URL或本地路径。 | -| `pose3d` | 指定 3D 姿态估计模型的模型别名、配置文件名称或配置文件路径。 | -| `pose3d_weights` | 指定 3D 姿态估计模型权重文件的URL或本地路径。 | -| `det_model` | 指定对象检测模型的模型别名、配置文件名或配置文件路径。 | -| `det_weights` | 指定对象检测模型权重文件的 URL 或本地路径。 | -| `det_cat_ids` | 指定与要检测的对象类对应的类别 id 列表。 | -| `device` | 执行推理的设备。如果为 `None`,推理器将选择最合适的一个。 | -| `scope` | 定义模型模块的名称空间 | - -推理器被设计用于可视化和保存预测。以下表格列出了在使用 `MMPoseInferencer` 进行推断时可用的参数列表,以及它们与 2D 和 3D 推理器的兼容性: - -| 参数 | 描述 | 2D | 3D | -| ------------------------ | -------------------------------------------------------------------------------------------------------------------------- | --- | --- | -| `show` | 控制是否在弹出窗口中显示图像或视频。 | ✔️ | ✔️ | -| `radius` | 设置可视化关键点的半径。 | ✔️ | ✔️ | -| `thickness` | 确定可视化链接的厚度。 | ✔️ | ✔️ | -| `kpt_thr` | 设置关键点分数阈值。分数超过此阈值的关键点将被显示。 | ✔️ | ✔️ | -| `draw_bbox` | 决定是否显示实例的边界框。 | ✔️ | ✔️ | -| `draw_heatmap` | 决定是否绘制预测的热图。 | ✔️ | ❌ | -| `black_background` | 决定是否在黑色背景上显示预估的姿势。 | ✔️ | ❌ | -| `skeleton_style` | 设置骨架样式。可选项包括 'mmpose'(默认)和 'openpose'。 | ✔️ | ❌ | -| `use_oks_tracking` | 决定是否在追踪中使用OKS作为相似度测量。 | ❌ | ✔️ | -| `tracking_thr` | 设置追踪的相似度阈值。 | ❌ | ✔️ | -| `norm_pose_2d` | 决定是否将边界框缩放至数据集的平均边界框尺寸,并将边界框移至数据集的平均边界框中心。 | ❌ | ✔️ | -| `rebase_keypoint_height` | 决定是否将最低关键点的高度置为 0。 | ❌ | ✔️ | -| `return_vis` | 决定是否在结果中包含可视化图像。 | ✔️ | ✔️ | -| `vis_out_dir` | 定义保存可视化图像的文件夹路径。如果未设置,将不保存可视化图像。 | ✔️ | ✔️ | -| `return_datasample` | 决定是否以 `PoseDataSample` 格式返回预测。 | ✔️ | ✔️ | -| `pred_out_dir` | 指定保存预测的文件夹路径。如果未设置,将不保存预测。 | ✔️ | ✔️ | -| `out_dir` | 如果 `vis_out_dir` 或 `pred_out_dir` 未设置,它们将分别设置为 `f'{out_dir}/visualization'` 或 `f'{out_dir}/predictions'`。 | ✔️ | ✔️ | - -### 模型别名 - -MMPose为常用模型提供了一组预定义的别名。在初始化 `MMPoseInferencer` 时,这些别名可以用作简略的表达方式,而不是指定完整的模型配置名称。下面是可用的模型别名及其对应的配置名称的列表: - -| 别名 | 配置文件名称 | 对应任务 | 姿态估计模型 | 检测模型 | -| --------- | -------------------------------------------------- | ------------------------------- | ------------- | ------------------- | -| animal | rtmpose-m_8xb64-210e_ap10k-256x256 | Animal pose estimation | RTMPose-m | RTMDet-m | -| human | rtmpose-m_8xb256-420e_aic-coco-256x192 | Human pose estimation | RTMPose-m | RTMDet-m | -| face | rtmpose-m_8xb64-60e_wflw-256x256 | Face keypoint detection | RTMPose-m | yolox-s | -| hand | rtmpose-m_8xb32-210e_coco-wholebody-hand-256x256 | Hand keypoint detection | RTMPose-m | ssdlite_mobilenetv2 | -| wholebody | rtmpose-m_8xb64-270e_coco-wholebody-256x192 | Human wholebody pose estimation | RTMPose-m | RTMDet-m | -| vitpose | td-hm_ViTPose-base-simple_8xb64-210e_coco-256x192 | Human pose estimation | ViTPose-base | RTMDet-m | -| vitpose-s | td-hm_ViTPose-small-simple_8xb64-210e_coco-256x192 | Human pose estimation | ViTPose-small | RTMDet-m | -| vitpose-b | td-hm_ViTPose-base-simple_8xb64-210e_coco-256x192 | Human pose estimation | ViTPose-base | RTMDet-m | -| vitpose-l | td-hm_ViTPose-large-simple_8xb64-210e_coco-256x192 | Human pose estimation | ViTPose-large | RTMDet-m | -| vitpose-h | td-hm_ViTPose-huge-simple_8xb64-210e_coco-256x192 | Human pose estimation | ViTPose-huge | RTMDet-m | - -此外,用户可以使用命令行界面工具显示所有可用的别名,使用以下命令: - -```shell -python demo/inferencer_demo.py --show-alias -``` +# 使用现有模型进行推理 + +MMPose为姿态估计提供了大量可以从[模型库](https://mmpose.readthedocs.io/en/latest/model_zoo.html)中找到的预测训练模型。本指南将演示**如何执行推理**,或使用训练过的模型对提供的图像或视频运行姿态估计。 + +有关在标准数据集上测试现有模型的说明,请参阅本指南。 + +在MMPose,模型由配置文件定义,而其已计算好的参数存储在权重文件(checkpoint file)中。您可以在[模型库](https://mmpose.readthedocs.io/en/latest/model_zoo.html)中找到模型配置文件和相应的权重文件的URL。我们建议从使用HRNet模型的[配置文件](https://github.com/open-mmlab/mmpose/blob/main/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py)和[权重文件](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192-81c58e40_20220909.pth)开始。 + +## 推理器:统一的推理接口 + +MMPose提供了一个被称为`MMPoseInferencer`的、全面的推理API。这个API使得用户得以使用所有MMPose支持的模型来对图像和视频进行模型推理。此外,该API可以完成推理结果自动化,并方便用户保存预测结果。 + +### 基本用法 + +`MMPoseInferencer`可以在任何Python程序中被用来执行姿态估计任务。以下是在一个在Python Shell中使用预训练的人体姿态模型对给定图像进行推理的示例。 + +```python +from mmpose.apis import MMPoseInferencer + +img_path = 'tests/data/coco/000000000785.jpg' # 将img_path替换给你自己的路径 + +# 使用模型别名创建推断器 +inferencer = MMPoseInferencer('human') + +# MMPoseInferencer采用了惰性推断方法,在给定输入时创建一个预测生成器 +result_generator = inferencer(img_path, show=True) +result = next(result_generator) +``` + +如果一切正常,你将在一个新窗口中看到下图: + +![inferencer_result_coco](https://user-images.githubusercontent.com/26127467/220008302-4a57fd44-0978-408e-8351-600e5513316a.jpg) + +`result` 变量是一个包含两个键值 `'visualization'` 和 `'predictions'` 的字典。 + +- `'visualization'` 键对应的值是一个列表,该列表: + - 包含可视化结果,例如输入图像、估计姿态的标记,以及可选的预测热图。 + - 如果没有指定 `return_vis` 参数,该列表将保持为空。 +- `'predictions'` 键对应的值是: + - 一个包含每个检测实例的预估关键点的列表。 + +`result` 字典的结构如下所示: + +```python +result = { + 'visualization': [ + # 元素数量:batch_size(默认为1) + vis_image_1, + ... + ], + 'predictions': [ + # 每张图像的姿态估计结果 + # 元素数量:batch_size(默认为1) + [ + # 每个检测到的实例的姿态信息 + # 元素数量:检测到的实例数 + {'keypoints': ..., # 实例 1 + 'keypoint_scores': ..., + ... + }, + {'keypoints': ..., # 实例 2 + 'keypoint_scores': ..., + ... + }, + ] + ... + ] +} +``` + +还可以使用用于用于推断的**命令行界面工具**(CLI, command-line interface): `demo/inferencer_demo.py`。这个工具允许用户使用以下命令使用相同的模型和输入执行推理: + +```python +python demo/inferencer_demo.py 'tests/data/coco/000000000785.jpg' \ + --pose2d 'human' --show --pred-out-dir 'predictions' +``` + +预测结果将被保存在路径`predictions/000000000785.json`。作为一个API,`inferencer_demo.py`的输入参数与`MMPoseInferencer`的相同。前者能够处理一系列输入类型,包括以下内容: + +- 图像路径 + +- 视频路径 + +- 文件夹路径(这会导致该文件夹中的所有图像都被推断出来) + +- 表示图像的 numpy array (在命令行界面工具中未支持) + +- 表示图像的 numpy array 列表 (在命令行界面工具中未支持) + +- 摄像头(在这种情况下,输入参数应该设置为`webcam`或`webcam:{CAMERA_ID}`) + +当输入对应于多个图像时,例如输入为**视频**或**文件夹**路径时,推理生成器必须被遍历,以便推理器对视频/文件夹中的所有帧/图像进行推理。以下是一个示例: + +```python +folder_path = 'tests/data/coco' + +result_generator = inferencer(folder_path, show=True) +results = [result for result in result_generator] +``` + +在这个示例中,`inferencer` 接受 `folder_path` 作为输入,并返回一个生成器对象(`result_generator`),用于生成推理结果。通过遍历 `result_generator` 并将每个结果存储在 `results` 列表中,您可以获得视频/文件夹中所有帧/图像的推理结果。 + +### 自定义姿态估计模型 + +`MMPoseInferencer`提供了几种可用于自定义所使用的模型的方法: + +```python +# 使用模型别名构建推断器 +inferencer = MMPoseInferencer('human') + +# 使用模型配置名构建推断器 +inferencer = MMPoseInferencer('td-hm_hrnet-w32_8xb64-210e_coco-256x192') + +# 使用模型配置文件和权重文件的路径或 URL 构建推断器 +inferencer = MMPoseInferencer( + pose2d='configs/body_2d_keypoint/topdown_heatmap/coco/' \ + 'td-hm_hrnet-w32_8xb64-210e_coco-256x192.py', + pose2d_weights='https://download.openmmlab.com/mmpose/top_down/' \ + 'hrnet/hrnet_w32_coco_256x192-c78dce93_20200708.pth' +) +``` + +模型别名的完整列表可以在模型别名部分中找到。 + +此外,自顶向下的姿态估计器还需要一个对象检测模型。`MMPoseInferencer`能够推断用MMPose支持的数据集训练的模型的实例类型,然后构建必要的对象检测模型。用户也可以通过以下方式手动指定检测模型: + +```python +# 通过别名指定检测模型 +# 可用的别名包括“human”、“hand”、“face”、“animal”、 +# 以及mmdet中定义的任何其他别名 +inferencer = MMPoseInferencer( + # 假设姿态估计器是在自定义数据集上训练的 + pose2d='custom_human_pose_estimator.py', + pose2d_weights='custom_human_pose_estimator.pth', + det_model='human' +) + +# 使用模型配置名称指定检测模型 +inferencer = MMPoseInferencer( + pose2d='human', + det_model='yolox_l_8x8_300e_coco', + det_cat_ids=[0], # 指定'human'类的类别id +) + +# 使用模型配置文件和权重文件的路径或URL构建推断器 +inferencer = MMPoseInferencer( + pose2d='human', + det_model=f'{PATH_TO_MMDET}/configs/yolox/yolox_l_8x8_300e_coco.py', + det_weights='https://download.openmmlab.com/mmdetection/v2.0/' \ + 'yolox/yolox_l_8x8_300e_coco/' \ + 'yolox_l_8x8_300e_coco_20211126_140236-d3bd2b23.pth', + det_cat_ids=[0], # 指定'human'类的类别id +) +``` + +### 转储结果 + +在执行姿态估计推理任务之后,您可能希望保存结果以供进一步分析或处理。本节将指导您将预测的关键点和可视化结果保存到本地。 + +要将预测保存在JSON文件中,在运行`MMPoseInferencer`的实例`inferencer`时使用`pred_out_dir`参数: + +```python +result_generator = inferencer(img_path, pred_out_dir='predictions') +result = next(result_generator) +``` + +预测结果将以JSON格式保存在`predictions/`文件夹中,每个文件以相应的输入图像或视频的名称命名。 + +对于更高级的场景,还可以直接从`inferencer`返回的`result`字典中访问预测结果。其中,`predictions`包含输入图像或视频中每个单独实例的预测关键点列表。然后,您可以使用您喜欢的方法操作或存储这些结果。 + +请记住,如果你想将可视化图像和预测文件保存在一个文件夹中,你可以使用`out_dir`参数: + +```python +result_generator = inferencer(img_path, out_dir='output') +result = next(result_generator) +``` + +在这种情况下,可视化图像将保存在`output/visualization/`文件夹中,而预测将存储在`output/forecasts/`文件夹中。 + +### 可视化 + +推理器`inferencer`可以自动对输入的图像或视频进行预测。可视化结果可以显示在一个新的窗口中,并保存在本地。 + +要在新窗口中查看可视化结果,请使用以下代码: + +请注意: + +- 如果输入视频来自网络摄像头,默认情况下将在新窗口中显示可视化结果,以此让用户看到输入 + +- 如果平台上没有GUI,这个步骤可能会卡住 + +要将可视化结果保存在本地,可以像这样指定`vis_out_dir`参数: + +```python +result_generator = inferencer(img_path, vis_out_dir='vis_results') +result = next(result_generator) +``` + +输入图片或视频的可视化预测结果将保存在`vis_results/`文件夹中 + +在开头展示的滑雪图中,姿态的可视化估计结果由关键点(用实心圆描绘)和骨架(用线条表示)组成。这些视觉元素的默认大小可能不会产生令人满意的结果。用户可以使用`radius`和`thickness`参数来调整圆的大小和线的粗细,如下所示: + +```python +result_generator = inferencer(img_path, show=True, radius=4, thickness=2) +result = next(result_generator) +``` + +### 推理器参数 + +`MMPoseInferencer`提供了各种自定义姿态估计、可视化和保存预测结果的参数。下面是初始化推断器时可用的参数列表及对这些参数的描述: + +| Argument | Description | +| ---------------- | ------------------------------------------------------------ | +| `pose2d` | 指定 2D 姿态估计模型的模型别名、配置文件名称或配置文件路径。 | +| `pose2d_weights` | 指定 2D 姿态估计模型权重文件的URL或本地路径。 | +| `pose3d` | 指定 3D 姿态估计模型的模型别名、配置文件名称或配置文件路径。 | +| `pose3d_weights` | 指定 3D 姿态估计模型权重文件的URL或本地路径。 | +| `det_model` | 指定对象检测模型的模型别名、配置文件名或配置文件路径。 | +| `det_weights` | 指定对象检测模型权重文件的 URL 或本地路径。 | +| `det_cat_ids` | 指定与要检测的对象类对应的类别 id 列表。 | +| `device` | 执行推理的设备。如果为 `None`,推理器将选择最合适的一个。 | +| `scope` | 定义模型模块的名称空间 | + +推理器被设计用于可视化和保存预测。以下表格列出了在使用 `MMPoseInferencer` 进行推断时可用的参数列表,以及它们与 2D 和 3D 推理器的兼容性: + +| 参数 | 描述 | 2D | 3D | +| ------------------------ | -------------------------------------------------------------------------------------------------------------------------- | --- | --- | +| `show` | 控制是否在弹出窗口中显示图像或视频。 | ✔️ | ✔️ | +| `radius` | 设置可视化关键点的半径。 | ✔️ | ✔️ | +| `thickness` | 确定可视化链接的厚度。 | ✔️ | ✔️ | +| `kpt_thr` | 设置关键点分数阈值。分数超过此阈值的关键点将被显示。 | ✔️ | ✔️ | +| `draw_bbox` | 决定是否显示实例的边界框。 | ✔️ | ✔️ | +| `draw_heatmap` | 决定是否绘制预测的热图。 | ✔️ | ❌ | +| `black_background` | 决定是否在黑色背景上显示预估的姿势。 | ✔️ | ❌ | +| `skeleton_style` | 设置骨架样式。可选项包括 'mmpose'(默认)和 'openpose'。 | ✔️ | ❌ | +| `use_oks_tracking` | 决定是否在追踪中使用OKS作为相似度测量。 | ❌ | ✔️ | +| `tracking_thr` | 设置追踪的相似度阈值。 | ❌ | ✔️ | +| `norm_pose_2d` | 决定是否将边界框缩放至数据集的平均边界框尺寸,并将边界框移至数据集的平均边界框中心。 | ❌ | ✔️ | +| `rebase_keypoint_height` | 决定是否将最低关键点的高度置为 0。 | ❌ | ✔️ | +| `return_vis` | 决定是否在结果中包含可视化图像。 | ✔️ | ✔️ | +| `vis_out_dir` | 定义保存可视化图像的文件夹路径。如果未设置,将不保存可视化图像。 | ✔️ | ✔️ | +| `return_datasample` | 决定是否以 `PoseDataSample` 格式返回预测。 | ✔️ | ✔️ | +| `pred_out_dir` | 指定保存预测的文件夹路径。如果未设置,将不保存预测。 | ✔️ | ✔️ | +| `out_dir` | 如果 `vis_out_dir` 或 `pred_out_dir` 未设置,它们将分别设置为 `f'{out_dir}/visualization'` 或 `f'{out_dir}/predictions'`。 | ✔️ | ✔️ | + +### 模型别名 + +MMPose为常用模型提供了一组预定义的别名。在初始化 `MMPoseInferencer` 时,这些别名可以用作简略的表达方式,而不是指定完整的模型配置名称。下面是可用的模型别名及其对应的配置名称的列表: + +| 别名 | 配置文件名称 | 对应任务 | 姿态估计模型 | 检测模型 | +| --------- | -------------------------------------------------- | ------------------------------- | ------------- | ------------------- | +| animal | rtmpose-m_8xb64-210e_ap10k-256x256 | Animal pose estimation | RTMPose-m | RTMDet-m | +| human | rtmpose-m_8xb256-420e_aic-coco-256x192 | Human pose estimation | RTMPose-m | RTMDet-m | +| face | rtmpose-m_8xb64-60e_wflw-256x256 | Face keypoint detection | RTMPose-m | yolox-s | +| hand | rtmpose-m_8xb32-210e_coco-wholebody-hand-256x256 | Hand keypoint detection | RTMPose-m | ssdlite_mobilenetv2 | +| wholebody | rtmpose-m_8xb64-270e_coco-wholebody-256x192 | Human wholebody pose estimation | RTMPose-m | RTMDet-m | +| vitpose | td-hm_ViTPose-base-simple_8xb64-210e_coco-256x192 | Human pose estimation | ViTPose-base | RTMDet-m | +| vitpose-s | td-hm_ViTPose-small-simple_8xb64-210e_coco-256x192 | Human pose estimation | ViTPose-small | RTMDet-m | +| vitpose-b | td-hm_ViTPose-base-simple_8xb64-210e_coco-256x192 | Human pose estimation | ViTPose-base | RTMDet-m | +| vitpose-l | td-hm_ViTPose-large-simple_8xb64-210e_coco-256x192 | Human pose estimation | ViTPose-large | RTMDet-m | +| vitpose-h | td-hm_ViTPose-huge-simple_8xb64-210e_coco-256x192 | Human pose estimation | ViTPose-huge | RTMDet-m | + +此外,用户可以使用命令行界面工具显示所有可用的别名,使用以下命令: + +```shell +python demo/inferencer_demo.py --show-alias +``` diff --git a/docs/zh_cn/user_guides/mixed_datasets.md b/docs/zh_cn/user_guides/mixed_datasets.md index fac38e3338..1964d33b0d 100644 --- a/docs/zh_cn/user_guides/mixed_datasets.md +++ b/docs/zh_cn/user_guides/mixed_datasets.md @@ -1,159 +1,159 @@ -# 混合数据集训练 - -MMPose 提供了一个灵活、便捷的工具 `CombinedDataset` 来进行混合数据集训练。它作为一个封装器,可以包含多个子数据集,并将来自不同子数据集的数据转换成一个统一的格式,以用于模型训练。使用 `CombinedDataset` 的数据处理流程如下图所示。 - -![combined_dataset_pipeline](https://user-images.githubusercontent.com/26127467/223333154-fb88e511-810a-423c-b755-c791d296bc43.jpg) - -本篇教程的后续部分将通过一个结合 COCO 和 AI Challenger (AIC) 数据集的例子详细介绍如何配置 `CombinedDataset`。 - -## COCO & AIC 数据集混合案例 - -COCO 和 AIC 都是 2D 人体姿态数据集。但是,这两个数据集在关键点的数量和排列顺序上有所不同。下面是分别来自这两个数据集的图片及关键点: - -
    - -有些关键点(例如“左手”)在两个数据集中都有定义,但它们具有不同的序号。具体来说,“左手”关键点在 COCO 数据集中的序号为 9,在AIC数据集中的序号为 5。此外,每个数据集都包含独特的关键点,另一个数据集中不存在。例如,面部关键点(序号为0〜4)仅在 COCO 数据集中定义,而“头顶”(序号为 12)和“颈部”(序号为 13)关键点仅在 AIC 数据集中存在。以下的维恩图显示了两个数据集中关键点之间的关系。 - -
    - -接下来,我们会介绍两种混合数据集的方式: - -- [将 AIC 合入 COCO 数据集](#将-aic-合入-coco-数据集) -- [合并 AIC 和 COCO 数据集](#合并-aic-和-coco-数据集) - -### 将 AIC 合入 COCO 数据集 - -如果用户想提高其模型在 COCO 或类似数据集上的性能,可以将 AIC 数据集作为辅助数据。此时应该仅选择 AIC 数据集中与 COCO 数据集共享的关键点,忽略其余关键点。此外,还需要将这些被选择的关键点在 AIC 数据集中的序号进行转换,以匹配在 COCO 数据集中对应关键点的序号。 - -
    - -在这种情况下,来自 COCO 的数据不需要进行转换。此时 COCO 数据集可通过如下方式配置: - -```python -dataset_coco = dict( - type='CocoDataset', - data_root='data/coco/', - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=[], # `pipeline` 应为空列表,因为 COCO 数据不需要转换 -) -``` - -对于 AIC 数据集,需要转换关键点的顺序。MMPose 提供了一个 `KeypointConverter` 转换器来实现这一点。以下是配置 AIC 子数据集的示例: - -```python -dataset_aic = dict( - type='AicDataset', - data_root='data/aic/', - ann_file='annotations/aic_train.json', - data_prefix=dict(img='ai_challenger_keypoint_train_20170902/' - 'keypoint_train_images_20170902/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=17, # 与 COCO 数据集关键点数一致 - mapping=[ # 需要列出所有带转换关键点的序号 - (0, 6), # 0 (AIC 中的序号) -> 6 (COCO 中的序号) - (1, 8), - (2, 10), - (3, 5), - (4, 7), - (5, 9), - (6, 12), - (7, 14), - (8, 16), - (9, 11), - (10, 13), - (11, 15), - ]) - ], -) -``` - -`KeypointConverter` 会将原序号在 0 到 11 之间的关键点的序号转换为在 5 到 16 之间的对应序号。同时,在 AIC 中序号为为 12 和 13 的关键点将被删除。另外,目标序号在 0 到 4 之间的关键点在 `mapping` 参数中没有定义,这些点将被设为不可见,并且不会在训练中使用。 - -子数据集都完成配置后, 混合数据集 `CombinedDataset` 可以通过如下方式配置: - -```python -dataset = dict( - type='CombinedDataset', - # 混合数据集关键点顺序和 COCO 数据集相同, - # 所以使用 COCO 数据集的描述信息 - metainfo=dict(from_file='configs/_base_/datasets/coco.py'), - datasets=[dataset_coco, dataset_aic], - # `train_pipeline` 包含了常用的数据预处理, - # 比如图片读取、数据增广等 - pipeline=train_pipeline, -) -``` - -MMPose 提供了一份完整的 [配置文件](https://github.com/open-mmlab/mmpose/blob/dev-1.x/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-merge.py) 来将 AIC 合入 COCO 数据集并用于训练网络。用户可以查阅这个文件以获取更多细节,或者参考这个文件来构建新的混合数据集。 - -### 合并 AIC 和 COCO 数据集 - -将 AIC 合入 COCO 数据集的过程中丢弃了部分 AIC 数据集中的标注信息。如果用户想要使用两个数据集中的所有信息,可以将两个数据集合并,即在两个数据集中取关键点的并集。 - -
    - -在这种情况下,COCO 和 AIC 数据集都需要使用 `KeypointConverter` 来调整它们关键点的顺序: - -```python -dataset_coco = dict( - type='CocoDataset', - data_root='data/coco/', - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=19, # 并集中有 19 个关键点 - mapping=[ - (0, 0), - (1, 1), - # 省略 - (16, 16), - ]) - ]) - -dataset_aic = dict( - type='AicDataset', - data_root='data/aic/', - ann_file='annotations/aic_train.json', - data_prefix=dict(img='ai_challenger_keypoint_train_20170902/' - 'keypoint_train_images_20170902/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=19, # 并集中有 19 个关键点 - mapping=[ - (0, 6), - # 省略 - (12, 17), - (13, 18), - ]) - ], -) -``` - -合并后的数据集有 19 个关键点,这与 COCO 或 AIC 数据集都不同,因此需要一个新的数据集描述信息文件。[coco_aic.py](https://github.com/open-mmlab/mmpose/blob/dev-1.x/configs/_base_/datasets/coco_aic.py) 是一个描述信息文件的示例,它基于 [coco.py](https://github.com/open-mmlab/mmpose/blob/dev-1.x/configs/_base_/datasets/coco.py) 并进行了以下几点修改: - -- 添加了 AIC 数据集的文章信息; -- 在 `keypoint_info` 中添加了“头顶”和“颈部”这两个只在 AIC 中定义的关键点; -- 在 `skeleton_info` 中添加了“头顶”和“颈部”间的连线; -- 拓展 `joint_weights` 和 `sigmas` 以添加新增关键点的信息。 - -完成以上步骤后,合并数据集 `CombinedDataset` 可以通过以下方式配置: - -```python -dataset = dict( - type='CombinedDataset', - # 使用新的描述信息文件 - metainfo=dict(from_file='configs/_base_/datasets/coco_aic.py'), - datasets=[dataset_coco, dataset_aic], - # `train_pipeline` 包含了常用的数据预处理, - # 比如图片读取、数据增广等 - pipeline=train_pipeline, -) -``` - -此外,在使用混合数据集时,由于关键点数量的变化,模型的输出通道数也要做相应调整。如果用户用混合数据集训练了模型,但是要在 COCO 数据集上评估模型,就需要从模型输出的关键点中取出一个子集来匹配 COCO 中的关键点格式。可以通过 `test_cfg` 中的 `output_keypoint_indices` 参数自定义此子集。这个 [配置文件](https://github.com/open-mmlab/mmpose/blob/dev-1.x/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-combine.py) 展示了如何用 AIC 和 COCO 合并后的数据集训练模型并在 COCO 数据集上进行测试。用户可以查阅这个文件以获取更多细节,或者参考这个文件来构建新的混合数据集。 +# 混合数据集训练 + +MMPose 提供了一个灵活、便捷的工具 `CombinedDataset` 来进行混合数据集训练。它作为一个封装器,可以包含多个子数据集,并将来自不同子数据集的数据转换成一个统一的格式,以用于模型训练。使用 `CombinedDataset` 的数据处理流程如下图所示。 + +![combined_dataset_pipeline](https://user-images.githubusercontent.com/26127467/223333154-fb88e511-810a-423c-b755-c791d296bc43.jpg) + +本篇教程的后续部分将通过一个结合 COCO 和 AI Challenger (AIC) 数据集的例子详细介绍如何配置 `CombinedDataset`。 + +## COCO & AIC 数据集混合案例 + +COCO 和 AIC 都是 2D 人体姿态数据集。但是,这两个数据集在关键点的数量和排列顺序上有所不同。下面是分别来自这两个数据集的图片及关键点: + +
    + +有些关键点(例如“左手”)在两个数据集中都有定义,但它们具有不同的序号。具体来说,“左手”关键点在 COCO 数据集中的序号为 9,在AIC数据集中的序号为 5。此外,每个数据集都包含独特的关键点,另一个数据集中不存在。例如,面部关键点(序号为0〜4)仅在 COCO 数据集中定义,而“头顶”(序号为 12)和“颈部”(序号为 13)关键点仅在 AIC 数据集中存在。以下的维恩图显示了两个数据集中关键点之间的关系。 + +
    + +接下来,我们会介绍两种混合数据集的方式: + +- [将 AIC 合入 COCO 数据集](#将-aic-合入-coco-数据集) +- [合并 AIC 和 COCO 数据集](#合并-aic-和-coco-数据集) + +### 将 AIC 合入 COCO 数据集 + +如果用户想提高其模型在 COCO 或类似数据集上的性能,可以将 AIC 数据集作为辅助数据。此时应该仅选择 AIC 数据集中与 COCO 数据集共享的关键点,忽略其余关键点。此外,还需要将这些被选择的关键点在 AIC 数据集中的序号进行转换,以匹配在 COCO 数据集中对应关键点的序号。 + +
    + +在这种情况下,来自 COCO 的数据不需要进行转换。此时 COCO 数据集可通过如下方式配置: + +```python +dataset_coco = dict( + type='CocoDataset', + data_root='data/coco/', + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=[], # `pipeline` 应为空列表,因为 COCO 数据不需要转换 +) +``` + +对于 AIC 数据集,需要转换关键点的顺序。MMPose 提供了一个 `KeypointConverter` 转换器来实现这一点。以下是配置 AIC 子数据集的示例: + +```python +dataset_aic = dict( + type='AicDataset', + data_root='data/aic/', + ann_file='annotations/aic_train.json', + data_prefix=dict(img='ai_challenger_keypoint_train_20170902/' + 'keypoint_train_images_20170902/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=17, # 与 COCO 数据集关键点数一致 + mapping=[ # 需要列出所有带转换关键点的序号 + (0, 6), # 0 (AIC 中的序号) -> 6 (COCO 中的序号) + (1, 8), + (2, 10), + (3, 5), + (4, 7), + (5, 9), + (6, 12), + (7, 14), + (8, 16), + (9, 11), + (10, 13), + (11, 15), + ]) + ], +) +``` + +`KeypointConverter` 会将原序号在 0 到 11 之间的关键点的序号转换为在 5 到 16 之间的对应序号。同时,在 AIC 中序号为为 12 和 13 的关键点将被删除。另外,目标序号在 0 到 4 之间的关键点在 `mapping` 参数中没有定义,这些点将被设为不可见,并且不会在训练中使用。 + +子数据集都完成配置后, 混合数据集 `CombinedDataset` 可以通过如下方式配置: + +```python +dataset = dict( + type='CombinedDataset', + # 混合数据集关键点顺序和 COCO 数据集相同, + # 所以使用 COCO 数据集的描述信息 + metainfo=dict(from_file='configs/_base_/datasets/coco.py'), + datasets=[dataset_coco, dataset_aic], + # `train_pipeline` 包含了常用的数据预处理, + # 比如图片读取、数据增广等 + pipeline=train_pipeline, +) +``` + +MMPose 提供了一份完整的 [配置文件](https://github.com/open-mmlab/mmpose/blob/dev-1.x/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-merge.py) 来将 AIC 合入 COCO 数据集并用于训练网络。用户可以查阅这个文件以获取更多细节,或者参考这个文件来构建新的混合数据集。 + +### 合并 AIC 和 COCO 数据集 + +将 AIC 合入 COCO 数据集的过程中丢弃了部分 AIC 数据集中的标注信息。如果用户想要使用两个数据集中的所有信息,可以将两个数据集合并,即在两个数据集中取关键点的并集。 + +
    + +在这种情况下,COCO 和 AIC 数据集都需要使用 `KeypointConverter` 来调整它们关键点的顺序: + +```python +dataset_coco = dict( + type='CocoDataset', + data_root='data/coco/', + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=19, # 并集中有 19 个关键点 + mapping=[ + (0, 0), + (1, 1), + # 省略 + (16, 16), + ]) + ]) + +dataset_aic = dict( + type='AicDataset', + data_root='data/aic/', + ann_file='annotations/aic_train.json', + data_prefix=dict(img='ai_challenger_keypoint_train_20170902/' + 'keypoint_train_images_20170902/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=19, # 并集中有 19 个关键点 + mapping=[ + (0, 6), + # 省略 + (12, 17), + (13, 18), + ]) + ], +) +``` + +合并后的数据集有 19 个关键点,这与 COCO 或 AIC 数据集都不同,因此需要一个新的数据集描述信息文件。[coco_aic.py](https://github.com/open-mmlab/mmpose/blob/dev-1.x/configs/_base_/datasets/coco_aic.py) 是一个描述信息文件的示例,它基于 [coco.py](https://github.com/open-mmlab/mmpose/blob/dev-1.x/configs/_base_/datasets/coco.py) 并进行了以下几点修改: + +- 添加了 AIC 数据集的文章信息; +- 在 `keypoint_info` 中添加了“头顶”和“颈部”这两个只在 AIC 中定义的关键点; +- 在 `skeleton_info` 中添加了“头顶”和“颈部”间的连线; +- 拓展 `joint_weights` 和 `sigmas` 以添加新增关键点的信息。 + +完成以上步骤后,合并数据集 `CombinedDataset` 可以通过以下方式配置: + +```python +dataset = dict( + type='CombinedDataset', + # 使用新的描述信息文件 + metainfo=dict(from_file='configs/_base_/datasets/coco_aic.py'), + datasets=[dataset_coco, dataset_aic], + # `train_pipeline` 包含了常用的数据预处理, + # 比如图片读取、数据增广等 + pipeline=train_pipeline, +) +``` + +此外,在使用混合数据集时,由于关键点数量的变化,模型的输出通道数也要做相应调整。如果用户用混合数据集训练了模型,但是要在 COCO 数据集上评估模型,就需要从模型输出的关键点中取出一个子集来匹配 COCO 中的关键点格式。可以通过 `test_cfg` 中的 `output_keypoint_indices` 参数自定义此子集。这个 [配置文件](https://github.com/open-mmlab/mmpose/blob/dev-1.x/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-combine.py) 展示了如何用 AIC 和 COCO 合并后的数据集训练模型并在 COCO 数据集上进行测试。用户可以查阅这个文件以获取更多细节,或者参考这个文件来构建新的混合数据集。 diff --git a/docs/zh_cn/user_guides/prepare_datasets.md b/docs/zh_cn/user_guides/prepare_datasets.md index 8b7d651e88..12c92bafd3 100644 --- a/docs/zh_cn/user_guides/prepare_datasets.md +++ b/docs/zh_cn/user_guides/prepare_datasets.md @@ -1,221 +1,221 @@ -# 准备数据集 - -在这份文档将指导如何为 MMPose 准备数据集,包括使用内置数据集、创建自定义数据集、结合数据集进行训练、浏览和下载数据集。 - -## 使用内置数据集 - -**步骤一**: 准备数据 - -MMPose 支持多种任务和相应的数据集。你可以在 [数据集仓库](https://mmpose.readthedocs.io/en/latest/dataset_zoo.html) 中找到它们。为了正确准备你的数据,请按照你选择的数据集的指南进行操作。 - -**步骤二**: 在配置文件中进行数据集设置 - -在开始训练或评估模型之前,你必须配置数据集设置。以 [`td-hm_hrnet-w32_8xb64-210e_coco-256x192.py`](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py) 为例,它可以用于在 COCO 数据集上训练或评估 HRNet 姿态估计器。下面我们浏览一下数据集配置: - -- 基础数据集参数 - - ```python - # base dataset settings - dataset_type = 'CocoDataset' - data_mode = 'topdown' - data_root = 'data/coco/' - ``` - - - `dataset_type` 指定数据集的类名。用户可以参考 [数据集 API](https://mmpose.readthedocs.io/en/latest/api.html#datasets) 来找到他们想要的数据集的类名。 - - `data_mode` 决定了数据集的输出格式,有两个选项可用:`'topdown'` 和 `'bottomup'`。如果 `data_mode='topdown'`,数据元素表示一个实例及其姿态;否则,一个数据元素代表一张图像,包含多个实例和姿态。 - - `data_root` 指定数据集的根目录。 - -- 数据处理流程 - - ```python - # pipelines - train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') - ] - val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') - ] - ``` - - `train_pipeline` 和 `val_pipeline` 分别定义了训练和评估阶段处理数据元素的步骤。除了加载图像和打包输入之外,`train_pipeline` 主要包含数据增强技术和目标生成器,而 `val_pipeline` 则专注于将数据元素转换为统一的格式。 - -- 数据加载器 - - ```python - # data loaders - train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) - val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) - test_dataloader = val_dataloader - ``` - - 这个部分是配置数据集的关键。除了前面讨论过的基础数据集参数和数据处理流程之外,这里还定义了其他重要的参数。`batch_size` 决定了每个 GPU 的 batch size;`ann_file` 指定了数据集的注释文件;`data_prefix` 指定了图像文件夹。`bbox_file` 仅在 top-down 数据集的 val/test 数据加载器中使用,用于提供检测到的边界框信息。 - -我们推荐从使用相同数据集的配置文件中复制数据集配置,而不是从头开始编写,以最小化潜在的错误。通过这样做,用户可以根据需要进行必要的修改,从而确保更可靠和高效的设置过程。 - -## 使用自定义数据集 - -[自定义数据集](../advanced_guides/customize_datasets.md) 指南提供了如何构建自定义数据集的详细信息。在本节中,我们将强调一些使用和配置自定义数据集的关键技巧。 - -- 确定数据集类名。如果你将数据集重组为 COCO 格式,你可以简单地使用 `CocoDataset` 作为 `dataset_type` 的值。否则,你将需要使用你添加的自定义数据集类的名称。 - -- 指定元信息配置文件。MMPose 1.x 采用了与 MMPose 0.x 不同的策略来指定元信息。在 MMPose 1.x 中,用户可以按照以下方式指定元信息配置文件: - - ```python - train_dataloader = dict( - ... - dataset=dict( - type=dataset_type, - data_root='root/of/your/train/data', - ann_file='path/to/your/train/json', - data_prefix=dict(img='path/to/your/train/img'), - # specify dataset meta information - metainfo=dict(from_file='configs/_base_/datasets/custom.py'), - ...), - ) - ``` - - 注意,`metainfo` 参数必须在 val/test 数据加载器中指定。 - -## 使用混合数据集进行训练 - -MMPose 提供了一个方便且多功能的解决方案,用于训练混合数据集。请参考[混合数据集训练](./mixed_datasets.md)。 - -## 浏览数据集 - -`tools/analysis_tools/browse_dataset.py` 帮助用户可视化地浏览姿态数据集,或将图像保存到指定的目录。 - -```shell -python tools/misc/browse_dataset.py ${CONFIG} [-h] [--output-dir ${OUTPUT_DIR}] [--not-show] [--phase ${PHASE}] [--mode ${MODE}] [--show-interval ${SHOW_INTERVAL}] -``` - -| ARGS | Description | -| -------------------------------- | ---------------------------------------------------------------------------------------------------------- | -| `CONFIG` | 配置文件的路径 | -| `--output-dir OUTPUT_DIR` | 保存可视化结果的目标文件夹。如果不指定,可视化的结果将不会被保存 | -| `--not-show` | 不适用外部窗口显示可视化的结果 | -| `--phase {train, val, test}` | 数据集选项 | -| `--mode {original, transformed}` | 指定可视化图片类型。 `original` 为不使用数据增强的原始图片及标注可视化; `transformed` 为经过增强后的可视化 | -| `--show-interval SHOW_INTERVAL` | 显示图片的时间间隔 | - -例如,用户想要可视化 COCO 数据集中的图像和标注,可以使用: - -```shell -python tools/misc/browse_dataset.py configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-e210_coco-256x192.py --mode original -``` - -检测框和关键点将被绘制在原始图像上。下面是一个例子: -![original_coco](https://user-images.githubusercontent.com/26127467/187383698-7e518f21-b4cc-4712-9e97-99ddd8f0e437.jpg) - -原始图像在被输入模型之前需要被处理。为了可视化预处理后的图像和标注,用户需要将参数 `mode` 修改为 `transformed`。例如: - -```shell -python tools/misc/browse_dataset.py configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-e210_coco-256x192.py --mode transformed -``` - -这是一个处理后的样本: - -![transformed_coco](https://user-images.githubusercontent.com/26127467/187386652-bd47335d-797c-4e8c-b823-2a4915f9812f.jpg) - -热图目标将与之一起可视化,如果它是在 pipeline 中生成的。 - -## 用 MIM 下载数据集 - -通过使用 [OpenDataLab](https://opendatalab.com/),您可以直接下载开源数据集。通过平台的搜索功能,您可以快速轻松地找到他们正在寻找的数据集。使用平台上的格式化数据集,您可以高效地跨数据集执行任务。 - -如果您使用 MIM 下载,请确保版本大于 v0.3.8。您可以使用以下命令进行更新、安装、登录和数据集下载: - -```shell -# upgrade your MIM -pip install -U openmim - -# install OpenDataLab CLI tools -pip install -U opendatalab -# log in OpenDataLab, registry -odl login - -# download coco2017 and preprocess by MIM -mim download mmpose --dataset coco2017 -``` - -### 已支持的数据集 - -下面是支持的数据集列表,更多数据集将在之后持续更新: - -#### 人体数据集 - -| Dataset name | Download command | -| ------------- | ----------------------------------------- | -| COCO 2017 | `mim download mmpose --dataset coco2017` | -| MPII | `mim download mmpose --dataset mpii` | -| AI Challenger | `mim download mmpose --dataset aic` | -| CrowdPose | `mim download mmpose --dataset crowdpose` | - -#### 人脸数据集 - -| Dataset name | Download command | -| ------------ | ------------------------------------ | -| LaPa | `mim download mmpose --dataset lapa` | -| 300W | `mim download mmpose --dataset 300w` | -| WFLW | `mim download mmpose --dataset wflw` | - -#### 手部数据集 - -| Dataset name | Download command | -| ------------ | ------------------------------------------ | -| OneHand10K | `mim download mmpose --dataset onehand10k` | -| FreiHand | `mim download mmpose --dataset freihand` | -| HaGRID | `mim download mmpose --dataset hagrid` | - -#### 全身数据集 - -| Dataset name | Download command | -| ------------ | ------------------------------------- | -| Halpe | `mim download mmpose --dataset halpe` | - -#### 动物数据集 - -| Dataset name | Download command | -| ------------ | ------------------------------------- | -| AP-10K | `mim download mmpose --dataset ap10k` | - -#### 服装数据集 - -Coming Soon +# 准备数据集 + +在这份文档将指导如何为 MMPose 准备数据集,包括使用内置数据集、创建自定义数据集、结合数据集进行训练、浏览和下载数据集。 + +## 使用内置数据集 + +**步骤一**: 准备数据 + +MMPose 支持多种任务和相应的数据集。你可以在 [数据集仓库](https://mmpose.readthedocs.io/en/latest/dataset_zoo.html) 中找到它们。为了正确准备你的数据,请按照你选择的数据集的指南进行操作。 + +**步骤二**: 在配置文件中进行数据集设置 + +在开始训练或评估模型之前,你必须配置数据集设置。以 [`td-hm_hrnet-w32_8xb64-210e_coco-256x192.py`](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py) 为例,它可以用于在 COCO 数据集上训练或评估 HRNet 姿态估计器。下面我们浏览一下数据集配置: + +- 基础数据集参数 + + ```python + # base dataset settings + dataset_type = 'CocoDataset' + data_mode = 'topdown' + data_root = 'data/coco/' + ``` + + - `dataset_type` 指定数据集的类名。用户可以参考 [数据集 API](https://mmpose.readthedocs.io/en/latest/api.html#datasets) 来找到他们想要的数据集的类名。 + - `data_mode` 决定了数据集的输出格式,有两个选项可用:`'topdown'` 和 `'bottomup'`。如果 `data_mode='topdown'`,数据元素表示一个实例及其姿态;否则,一个数据元素代表一张图像,包含多个实例和姿态。 + - `data_root` 指定数据集的根目录。 + +- 数据处理流程 + + ```python + # pipelines + train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') + ] + val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') + ] + ``` + + `train_pipeline` 和 `val_pipeline` 分别定义了训练和评估阶段处理数据元素的步骤。除了加载图像和打包输入之外,`train_pipeline` 主要包含数据增强技术和目标生成器,而 `val_pipeline` 则专注于将数据元素转换为统一的格式。 + +- 数据加载器 + + ```python + # data loaders + train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) + val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) + test_dataloader = val_dataloader + ``` + + 这个部分是配置数据集的关键。除了前面讨论过的基础数据集参数和数据处理流程之外,这里还定义了其他重要的参数。`batch_size` 决定了每个 GPU 的 batch size;`ann_file` 指定了数据集的注释文件;`data_prefix` 指定了图像文件夹。`bbox_file` 仅在 top-down 数据集的 val/test 数据加载器中使用,用于提供检测到的边界框信息。 + +我们推荐从使用相同数据集的配置文件中复制数据集配置,而不是从头开始编写,以最小化潜在的错误。通过这样做,用户可以根据需要进行必要的修改,从而确保更可靠和高效的设置过程。 + +## 使用自定义数据集 + +[自定义数据集](../advanced_guides/customize_datasets.md) 指南提供了如何构建自定义数据集的详细信息。在本节中,我们将强调一些使用和配置自定义数据集的关键技巧。 + +- 确定数据集类名。如果你将数据集重组为 COCO 格式,你可以简单地使用 `CocoDataset` 作为 `dataset_type` 的值。否则,你将需要使用你添加的自定义数据集类的名称。 + +- 指定元信息配置文件。MMPose 1.x 采用了与 MMPose 0.x 不同的策略来指定元信息。在 MMPose 1.x 中,用户可以按照以下方式指定元信息配置文件: + + ```python + train_dataloader = dict( + ... + dataset=dict( + type=dataset_type, + data_root='root/of/your/train/data', + ann_file='path/to/your/train/json', + data_prefix=dict(img='path/to/your/train/img'), + # specify dataset meta information + metainfo=dict(from_file='configs/_base_/datasets/custom.py'), + ...), + ) + ``` + + 注意,`metainfo` 参数必须在 val/test 数据加载器中指定。 + +## 使用混合数据集进行训练 + +MMPose 提供了一个方便且多功能的解决方案,用于训练混合数据集。请参考[混合数据集训练](./mixed_datasets.md)。 + +## 浏览数据集 + +`tools/analysis_tools/browse_dataset.py` 帮助用户可视化地浏览姿态数据集,或将图像保存到指定的目录。 + +```shell +python tools/misc/browse_dataset.py ${CONFIG} [-h] [--output-dir ${OUTPUT_DIR}] [--not-show] [--phase ${PHASE}] [--mode ${MODE}] [--show-interval ${SHOW_INTERVAL}] +``` + +| ARGS | Description | +| -------------------------------- | ---------------------------------------------------------------------------------------------------------- | +| `CONFIG` | 配置文件的路径 | +| `--output-dir OUTPUT_DIR` | 保存可视化结果的目标文件夹。如果不指定,可视化的结果将不会被保存 | +| `--not-show` | 不适用外部窗口显示可视化的结果 | +| `--phase {train, val, test}` | 数据集选项 | +| `--mode {original, transformed}` | 指定可视化图片类型。 `original` 为不使用数据增强的原始图片及标注可视化; `transformed` 为经过增强后的可视化 | +| `--show-interval SHOW_INTERVAL` | 显示图片的时间间隔 | + +例如,用户想要可视化 COCO 数据集中的图像和标注,可以使用: + +```shell +python tools/misc/browse_dataset.py configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-e210_coco-256x192.py --mode original +``` + +检测框和关键点将被绘制在原始图像上。下面是一个例子: +![original_coco](https://user-images.githubusercontent.com/26127467/187383698-7e518f21-b4cc-4712-9e97-99ddd8f0e437.jpg) + +原始图像在被输入模型之前需要被处理。为了可视化预处理后的图像和标注,用户需要将参数 `mode` 修改为 `transformed`。例如: + +```shell +python tools/misc/browse_dataset.py configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-e210_coco-256x192.py --mode transformed +``` + +这是一个处理后的样本: + +![transformed_coco](https://user-images.githubusercontent.com/26127467/187386652-bd47335d-797c-4e8c-b823-2a4915f9812f.jpg) + +热图目标将与之一起可视化,如果它是在 pipeline 中生成的。 + +## 用 MIM 下载数据集 + +通过使用 [OpenDataLab](https://opendatalab.com/),您可以直接下载开源数据集。通过平台的搜索功能,您可以快速轻松地找到他们正在寻找的数据集。使用平台上的格式化数据集,您可以高效地跨数据集执行任务。 + +如果您使用 MIM 下载,请确保版本大于 v0.3.8。您可以使用以下命令进行更新、安装、登录和数据集下载: + +```shell +# upgrade your MIM +pip install -U openmim + +# install OpenDataLab CLI tools +pip install -U opendatalab +# log in OpenDataLab, registry +odl login + +# download coco2017 and preprocess by MIM +mim download mmpose --dataset coco2017 +``` + +### 已支持的数据集 + +下面是支持的数据集列表,更多数据集将在之后持续更新: + +#### 人体数据集 + +| Dataset name | Download command | +| ------------- | ----------------------------------------- | +| COCO 2017 | `mim download mmpose --dataset coco2017` | +| MPII | `mim download mmpose --dataset mpii` | +| AI Challenger | `mim download mmpose --dataset aic` | +| CrowdPose | `mim download mmpose --dataset crowdpose` | + +#### 人脸数据集 + +| Dataset name | Download command | +| ------------ | ------------------------------------ | +| LaPa | `mim download mmpose --dataset lapa` | +| 300W | `mim download mmpose --dataset 300w` | +| WFLW | `mim download mmpose --dataset wflw` | + +#### 手部数据集 + +| Dataset name | Download command | +| ------------ | ------------------------------------------ | +| OneHand10K | `mim download mmpose --dataset onehand10k` | +| FreiHand | `mim download mmpose --dataset freihand` | +| HaGRID | `mim download mmpose --dataset hagrid` | + +#### 全身数据集 + +| Dataset name | Download command | +| ------------ | ------------------------------------- | +| Halpe | `mim download mmpose --dataset halpe` | + +#### 动物数据集 + +| Dataset name | Download command | +| ------------ | ------------------------------------- | +| AP-10K | `mim download mmpose --dataset ap10k` | + +#### 服装数据集 + +Coming Soon diff --git a/docs/zh_cn/user_guides/train_and_test.md b/docs/zh_cn/user_guides/train_and_test.md index 452eddc928..bf5729bfc9 100644 --- a/docs/zh_cn/user_guides/train_and_test.md +++ b/docs/zh_cn/user_guides/train_and_test.md @@ -1,5 +1,5 @@ -# 训练与测试 - -中文内容建设中,暂时请查阅[英文版文档](../../en/user_guides/train_and_test.md) - -如果您愿意参与中文文档的翻译与维护,我们团队将十分感谢您的贡献!欢迎加入我们的社区群与我们取得联系,或直接按照 [如何给 MMPose 贡献代码](../contribution_guide.md) 在 GitHub 上提交 Pull Request。 +# 训练与测试 + +中文内容建设中,暂时请查阅[英文版文档](../../en/user_guides/train_and_test.md) + +如果您愿意参与中文文档的翻译与维护,我们团队将十分感谢您的贡献!欢迎加入我们的社区群与我们取得联系,或直接按照 [如何给 MMPose 贡献代码](../contribution_guide.md) 在 GitHub 上提交 Pull Request。 diff --git a/docs/zh_cn/user_guides/useful_tools.md b/docs/zh_cn/user_guides/useful_tools.md index f2ceb771b7..c93d9aebda 100644 --- a/docs/zh_cn/user_guides/useful_tools.md +++ b/docs/zh_cn/user_guides/useful_tools.md @@ -1,5 +1,5 @@ -# 常用工具 - -中文内容建设中,暂时请查阅[英文版文档](../../en/user_guides/useful_tools.md) - -如果您愿意参与中文文档的翻译与维护,我们团队将十分感谢您的贡献!欢迎加入我们的社区群与我们取得联系,或直接按照 [如何给 MMPose 贡献代码](../contribution_guide.md) 在 GitHub 上提交 Pull Request。 +# 常用工具 + +中文内容建设中,暂时请查阅[英文版文档](../../en/user_guides/useful_tools.md) + +如果您愿意参与中文文档的翻译与维护,我们团队将十分感谢您的贡献!欢迎加入我们的社区群与我们取得联系,或直接按照 [如何给 MMPose 贡献代码](../contribution_guide.md) 在 GitHub 上提交 Pull Request。 diff --git a/docs/zh_cn/user_guides/visualization.md b/docs/zh_cn/user_guides/visualization.md index a584eb450e..7d86767e00 100644 --- a/docs/zh_cn/user_guides/visualization.md +++ b/docs/zh_cn/user_guides/visualization.md @@ -1,5 +1,5 @@ -# 可视化 - -中文内容建设中,暂时请查阅[英文版文档](../../en/user_guides/visualization.md) - -如果您愿意参与中文文档的翻译与维护,我们团队将十分感谢您的贡献!欢迎加入我们的社区群与我们取得联系,或直接按照 [如何给 MMPose 贡献代码](../contribution_guide.md) 在 GitHub 上提交 Pull Request。 +# 可视化 + +中文内容建设中,暂时请查阅[英文版文档](../../en/user_guides/visualization.md) + +如果您愿意参与中文文档的翻译与维护,我们团队将十分感谢您的贡献!欢迎加入我们的社区群与我们取得联系,或直接按照 [如何给 MMPose 贡献代码](../contribution_guide.md) 在 GitHub 上提交 Pull Request。 diff --git a/mmpose/__init__.py b/mmpose/__init__.py index ad7946470d..e932f2e678 100644 --- a/mmpose/__init__.py +++ b/mmpose/__init__.py @@ -1,27 +1,27 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import mmcv -import mmengine -from mmengine.utils import digit_version - -from .version import __version__, short_version - -mmcv_minimum_version = '2.0.0rc4' -mmcv_maximum_version = '2.1.0' -mmcv_version = digit_version(mmcv.__version__) - -mmengine_minimum_version = '0.6.0' -mmengine_maximum_version = '1.0.0' -mmengine_version = digit_version(mmengine.__version__) - -assert (mmcv_version >= digit_version(mmcv_minimum_version) - and mmcv_version <= digit_version(mmcv_maximum_version)), \ - f'MMCV=={mmcv.__version__} is used but incompatible. ' \ - f'Please install mmcv>={mmcv_minimum_version}, <={mmcv_maximum_version}.' - -assert (mmengine_version >= digit_version(mmengine_minimum_version) - and mmengine_version <= digit_version(mmengine_maximum_version)), \ - f'MMEngine=={mmengine.__version__} is used but incompatible. ' \ - f'Please install mmengine>={mmengine_minimum_version}, ' \ - f'<={mmengine_maximum_version}.' - -__all__ = ['__version__', 'short_version'] +# Copyright (c) OpenMMLab. All rights reserved. +import mmcv +import mmengine +from mmengine.utils import digit_version + +from .version import __version__, short_version + +mmcv_minimum_version = '2.0.0rc4' +mmcv_maximum_version = '2.1.0' +mmcv_version = digit_version(mmcv.__version__) + +mmengine_minimum_version = '0.6.0' +mmengine_maximum_version = '1.0.0' +mmengine_version = digit_version(mmengine.__version__) + +assert (mmcv_version >= digit_version(mmcv_minimum_version) + and mmcv_version <= digit_version(mmcv_maximum_version)), \ + f'MMCV=={mmcv.__version__} is used but incompatible. ' \ + f'Please install mmcv>={mmcv_minimum_version}, <={mmcv_maximum_version}.' + +assert (mmengine_version >= digit_version(mmengine_minimum_version) + and mmengine_version <= digit_version(mmengine_maximum_version)), \ + f'MMEngine=={mmengine.__version__} is used but incompatible. ' \ + f'Please install mmengine>={mmengine_minimum_version}, ' \ + f'<={mmengine_maximum_version}.' + +__all__ = ['__version__', 'short_version'] diff --git a/mmpose/apis/__init__.py b/mmpose/apis/__init__.py index 0c44f7a3f8..f0947da6ee 100644 --- a/mmpose/apis/__init__.py +++ b/mmpose/apis/__init__.py @@ -1,15 +1,15 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .inference import (collect_multi_frames, inference_bottomup, - inference_topdown, init_model) -from .inference_3d import (collate_pose_sequence, convert_keypoint_definition, - extract_pose_sequence, inference_pose_lifter_model) -from .inference_tracking import _compute_iou, _track_by_iou, _track_by_oks -from .inferencers import MMPoseInferencer, Pose2DInferencer - -__all__ = [ - 'init_model', 'inference_topdown', 'inference_bottomup', - 'collect_multi_frames', 'Pose2DInferencer', 'MMPoseInferencer', - '_track_by_iou', '_track_by_oks', '_compute_iou', - 'inference_pose_lifter_model', 'extract_pose_sequence', - 'convert_keypoint_definition', 'collate_pose_sequence' -] +# Copyright (c) OpenMMLab. All rights reserved. +from .inference import (collect_multi_frames, inference_bottomup, + inference_topdown, init_model) +from .inference_3d import (collate_pose_sequence, convert_keypoint_definition, + extract_pose_sequence, inference_pose_lifter_model) +from .inference_tracking import _compute_iou, _track_by_iou, _track_by_oks +from .inferencers import MMPoseInferencer, Pose2DInferencer + +__all__ = [ + 'init_model', 'inference_topdown', 'inference_bottomup', + 'collect_multi_frames', 'Pose2DInferencer', 'MMPoseInferencer', + '_track_by_iou', '_track_by_oks', '_compute_iou', + 'inference_pose_lifter_model', 'extract_pose_sequence', + 'convert_keypoint_definition', 'collate_pose_sequence' +] diff --git a/mmpose/apis/inference.py b/mmpose/apis/inference.py index 772ef17b7c..370630e079 100644 --- a/mmpose/apis/inference.py +++ b/mmpose/apis/inference.py @@ -1,262 +1,262 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import warnings -from pathlib import Path -from typing import List, Optional, Union - -import numpy as np -import torch -import torch.nn as nn -from mmengine.config import Config -from mmengine.dataset import Compose, pseudo_collate -from mmengine.model.utils import revert_sync_batchnorm -from mmengine.registry import init_default_scope -from mmengine.runner import load_checkpoint -from PIL import Image - -from mmpose.datasets.datasets.utils import parse_pose_metainfo -from mmpose.models.builder import build_pose_estimator -from mmpose.structures import PoseDataSample -from mmpose.structures.bbox import bbox_xywh2xyxy - - -def dataset_meta_from_config(config: Config, - dataset_mode: str = 'train') -> Optional[dict]: - """Get dataset metainfo from the model config. - - Args: - config (str, :obj:`Path`, or :obj:`mmengine.Config`): Config file path, - :obj:`Path`, or the config object. - dataset_mode (str): Specify the dataset of which to get the metainfo. - Options are ``'train'``, ``'val'`` and ``'test'``. Defaults to - ``'train'`` - - Returns: - dict, optional: The dataset metainfo. See - ``mmpose.datasets.datasets.utils.parse_pose_metainfo`` for details. - Return ``None`` if failing to get dataset metainfo from the config. - """ - try: - if dataset_mode == 'train': - dataset_cfg = config.train_dataloader.dataset - elif dataset_mode == 'val': - dataset_cfg = config.val_dataloader.dataset - elif dataset_mode == 'test': - dataset_cfg = config.test_dataloader.dataset - else: - raise ValueError( - f'Invalid dataset {dataset_mode} to get metainfo. ' - 'Should be one of "train", "val", or "test".') - - if 'metainfo' in dataset_cfg: - metainfo = dataset_cfg.metainfo - else: - import mmpose.datasets.datasets # noqa: F401, F403 - from mmpose.registry import DATASETS - - dataset_class = DATASETS.get(dataset_cfg.type) - metainfo = dataset_class.METAINFO - - metainfo = parse_pose_metainfo(metainfo) - - except AttributeError: - metainfo = None - - return metainfo - - -def init_model(config: Union[str, Path, Config], - checkpoint: Optional[str] = None, - device: str = 'cuda:0', - cfg_options: Optional[dict] = None) -> nn.Module: - """Initialize a pose estimator from a config file. - - Args: - config (str, :obj:`Path`, or :obj:`mmengine.Config`): Config file path, - :obj:`Path`, or the config object. - checkpoint (str, optional): Checkpoint path. If left as None, the model - will not load any weights. Defaults to ``None`` - device (str): The device where the anchors will be put on. - Defaults to ``'cuda:0'``. - cfg_options (dict, optional): Options to override some settings in - the used config. Defaults to ``None`` - - Returns: - nn.Module: The constructed pose estimator. - """ - - if isinstance(config, (str, Path)): - config = Config.fromfile(config) - elif not isinstance(config, Config): - raise TypeError('config must be a filename or Config object, ' - f'but got {type(config)}') - if cfg_options is not None: - config.merge_from_dict(cfg_options) - elif 'init_cfg' in config.model.backbone: - config.model.backbone.init_cfg = None - config.model.train_cfg = None - - # register all modules in mmpose into the registries - scope = config.get('default_scope', 'mmpose') - if scope is not None: - init_default_scope(scope) - - model = build_pose_estimator(config.model) - model = revert_sync_batchnorm(model) - # get dataset_meta in this priority: checkpoint > config > default (COCO) - dataset_meta = None - - if checkpoint is not None: - ckpt = load_checkpoint(model, checkpoint, map_location='cpu') - - if 'dataset_meta' in ckpt.get('meta', {}): - # checkpoint from mmpose 1.x - dataset_meta = ckpt['meta']['dataset_meta'] - - if dataset_meta is None: - dataset_meta = dataset_meta_from_config(config, dataset_mode='train') - - if dataset_meta is None: - warnings.simplefilter('once') - warnings.warn('Can not load dataset_meta from the checkpoint or the ' - 'model config. Use COCO metainfo by default.') - dataset_meta = parse_pose_metainfo( - dict(from_file='configs/_base_/datasets/coco.py')) - - model.dataset_meta = dataset_meta - - model.cfg = config # save the config in the model for convenience - model.to(device) - model.eval() - return model - - -def inference_topdown(model: nn.Module, - img: Union[np.ndarray, str], - bboxes: Optional[Union[List, np.ndarray]] = None, - bbox_format: str = 'xyxy') -> List[PoseDataSample]: - """Inference image with a top-down pose estimator. - - Args: - model (nn.Module): The top-down pose estimator - img (np.ndarray | str): The loaded image or image file to inference - bboxes (np.ndarray, optional): The bboxes in shape (N, 4), each row - represents a bbox. If not given, the entire image will be regarded - as a single bbox area. Defaults to ``None`` - bbox_format (str): The bbox format indicator. Options are ``'xywh'`` - and ``'xyxy'``. Defaults to ``'xyxy'`` - - Returns: - List[:obj:`PoseDataSample`]: The inference results. Specifically, the - predicted keypoints and scores are saved at - ``data_sample.pred_instances.keypoints`` and - ``data_sample.pred_instances.keypoint_scores``. - """ - scope = model.cfg.get('default_scope', 'mmpose') - if scope is not None: - init_default_scope(scope) - pipeline = Compose(model.cfg.test_dataloader.dataset.pipeline) - - if bboxes is None or len(bboxes) == 0: - # get bbox from the image size - if isinstance(img, str): - w, h = Image.open(img).size - else: - h, w = img.shape[:2] - - bboxes = np.array([[0, 0, w, h]], dtype=np.float32) - else: - if isinstance(bboxes, list): - bboxes = np.array(bboxes) - - assert bbox_format in {'xyxy', 'xywh'}, \ - f'Invalid bbox_format "{bbox_format}".' - - if bbox_format == 'xywh': - bboxes = bbox_xywh2xyxy(bboxes) - - # construct batch data samples - data_list = [] - for bbox in bboxes: - if isinstance(img, str): - data_info = dict(img_path=img) - else: - data_info = dict(img=img) - data_info['bbox'] = bbox[None] # shape (1, 4) - data_info['bbox_score'] = np.ones(1, dtype=np.float32) # shape (1,) - data_info.update(model.dataset_meta) - data_list.append(pipeline(data_info)) - - if data_list: - # collate data list into a batch, which is a dict with following keys: - # batch['inputs']: a list of input images - # batch['data_samples']: a list of :obj:`PoseDataSample` - batch = pseudo_collate(data_list) - with torch.no_grad(): - results = model.test_step(batch) - else: - results = [] - - return results - - -def inference_bottomup(model: nn.Module, img: Union[np.ndarray, str]): - """Inference image with a bottom-up pose estimator. - - Args: - model (nn.Module): The bottom-up pose estimator - img (np.ndarray | str): The loaded image or image file to inference - - Returns: - List[:obj:`PoseDataSample`]: The inference results. Specifically, the - predicted keypoints and scores are saved at - ``data_sample.pred_instances.keypoints`` and - ``data_sample.pred_instances.keypoint_scores``. - """ - pipeline = Compose(model.cfg.test_dataloader.dataset.pipeline) - - # prepare data batch - if isinstance(img, str): - data_info = dict(img_path=img) - else: - data_info = dict(img=img) - data_info.update(model.dataset_meta) - data = pipeline(data_info) - batch = pseudo_collate([data]) - - with torch.no_grad(): - results = model.test_step(batch) - - return results - - -def collect_multi_frames(video, frame_id, indices, online=False): - """Collect multi frames from the video. - - Args: - video (mmcv.VideoReader): A VideoReader of the input video file. - frame_id (int): index of the current frame - indices (list(int)): index offsets of the frames to collect - online (bool): inference mode, if set to True, can not use future - frame information. - - Returns: - list(ndarray): multi frames collected from the input video file. - """ - num_frames = len(video) - frames = [] - # put the current frame at first - frames.append(video[frame_id]) - # use multi frames for inference - for idx in indices: - # skip current frame - if idx == 0: - continue - support_idx = frame_id + idx - # online mode, can not use future frame information - if online: - support_idx = np.clip(support_idx, 0, frame_id) - else: - support_idx = np.clip(support_idx, 0, num_frames - 1) - frames.append(video[support_idx]) - - return frames +# Copyright (c) OpenMMLab. All rights reserved. +import warnings +from pathlib import Path +from typing import List, Optional, Union + +import numpy as np +import torch +import torch.nn as nn +from mmengine.config import Config +from mmengine.dataset import Compose, pseudo_collate +from mmengine.model.utils import revert_sync_batchnorm +from mmengine.registry import init_default_scope +from mmengine.runner import load_checkpoint +from PIL import Image + +from mmpose.datasets.datasets.utils import parse_pose_metainfo +from mmpose.models.builder import build_pose_estimator +from mmpose.structures import PoseDataSample +from mmpose.structures.bbox import bbox_xywh2xyxy + + +def dataset_meta_from_config(config: Config, + dataset_mode: str = 'train') -> Optional[dict]: + """Get dataset metainfo from the model config. + + Args: + config (str, :obj:`Path`, or :obj:`mmengine.Config`): Config file path, + :obj:`Path`, or the config object. + dataset_mode (str): Specify the dataset of which to get the metainfo. + Options are ``'train'``, ``'val'`` and ``'test'``. Defaults to + ``'train'`` + + Returns: + dict, optional: The dataset metainfo. See + ``mmpose.datasets.datasets.utils.parse_pose_metainfo`` for details. + Return ``None`` if failing to get dataset metainfo from the config. + """ + try: + if dataset_mode == 'train': + dataset_cfg = config.train_dataloader.dataset + elif dataset_mode == 'val': + dataset_cfg = config.val_dataloader.dataset + elif dataset_mode == 'test': + dataset_cfg = config.test_dataloader.dataset + else: + raise ValueError( + f'Invalid dataset {dataset_mode} to get metainfo. ' + 'Should be one of "train", "val", or "test".') + + if 'metainfo' in dataset_cfg: + metainfo = dataset_cfg.metainfo + else: + import mmpose.datasets.datasets # noqa: F401, F403 + from mmpose.registry import DATASETS + + dataset_class = DATASETS.get(dataset_cfg.type) + metainfo = dataset_class.METAINFO + + metainfo = parse_pose_metainfo(metainfo) + + except AttributeError: + metainfo = None + + return metainfo + + +def init_model(config: Union[str, Path, Config], + checkpoint: Optional[str] = None, + device: str = 'cuda:0', + cfg_options: Optional[dict] = None) -> nn.Module: + """Initialize a pose estimator from a config file. + + Args: + config (str, :obj:`Path`, or :obj:`mmengine.Config`): Config file path, + :obj:`Path`, or the config object. + checkpoint (str, optional): Checkpoint path. If left as None, the model + will not load any weights. Defaults to ``None`` + device (str): The device where the anchors will be put on. + Defaults to ``'cuda:0'``. + cfg_options (dict, optional): Options to override some settings in + the used config. Defaults to ``None`` + + Returns: + nn.Module: The constructed pose estimator. + """ + + if isinstance(config, (str, Path)): + config = Config.fromfile(config) + elif not isinstance(config, Config): + raise TypeError('config must be a filename or Config object, ' + f'but got {type(config)}') + if cfg_options is not None: + config.merge_from_dict(cfg_options) + elif 'init_cfg' in config.model.backbone: + config.model.backbone.init_cfg = None + config.model.train_cfg = None + + # register all modules in mmpose into the registries + scope = config.get('default_scope', 'mmpose') + if scope is not None: + init_default_scope(scope) + + model = build_pose_estimator(config.model) + model = revert_sync_batchnorm(model) + # get dataset_meta in this priority: checkpoint > config > default (COCO) + dataset_meta = None + + if checkpoint is not None: + ckpt = load_checkpoint(model, checkpoint, map_location='cpu') + + if 'dataset_meta' in ckpt.get('meta', {}): + # checkpoint from mmpose 1.x + dataset_meta = ckpt['meta']['dataset_meta'] + + if dataset_meta is None: + dataset_meta = dataset_meta_from_config(config, dataset_mode='train') + + if dataset_meta is None: + warnings.simplefilter('once') + warnings.warn('Can not load dataset_meta from the checkpoint or the ' + 'model config. Use COCO metainfo by default.') + dataset_meta = parse_pose_metainfo( + dict(from_file='configs/_base_/datasets/coco.py')) + + model.dataset_meta = dataset_meta + + model.cfg = config # save the config in the model for convenience + model.to(device) + model.eval() + return model + + +def inference_topdown(model: nn.Module, + img: Union[np.ndarray, str], + bboxes: Optional[Union[List, np.ndarray]] = None, + bbox_format: str = 'xyxy') -> List[PoseDataSample]: + """Inference image with a top-down pose estimator. + + Args: + model (nn.Module): The top-down pose estimator + img (np.ndarray | str): The loaded image or image file to inference + bboxes (np.ndarray, optional): The bboxes in shape (N, 4), each row + represents a bbox. If not given, the entire image will be regarded + as a single bbox area. Defaults to ``None`` + bbox_format (str): The bbox format indicator. Options are ``'xywh'`` + and ``'xyxy'``. Defaults to ``'xyxy'`` + + Returns: + List[:obj:`PoseDataSample`]: The inference results. Specifically, the + predicted keypoints and scores are saved at + ``data_sample.pred_instances.keypoints`` and + ``data_sample.pred_instances.keypoint_scores``. + """ + scope = model.cfg.get('default_scope', 'mmpose') + if scope is not None: + init_default_scope(scope) + pipeline = Compose(model.cfg.test_dataloader.dataset.pipeline) + + if bboxes is None or len(bboxes) == 0: + # get bbox from the image size + if isinstance(img, str): + w, h = Image.open(img).size + else: + h, w = img.shape[:2] + + bboxes = np.array([[0, 0, w, h]], dtype=np.float32) + else: + if isinstance(bboxes, list): + bboxes = np.array(bboxes) + + assert bbox_format in {'xyxy', 'xywh'}, \ + f'Invalid bbox_format "{bbox_format}".' + + if bbox_format == 'xywh': + bboxes = bbox_xywh2xyxy(bboxes) + + # construct batch data samples + data_list = [] + for bbox in bboxes: + if isinstance(img, str): + data_info = dict(img_path=img) + else: + data_info = dict(img=img) + data_info['bbox'] = bbox[None] # shape (1, 4) + data_info['bbox_score'] = np.ones(1, dtype=np.float32) # shape (1,) + data_info.update(model.dataset_meta) + data_list.append(pipeline(data_info)) + + if data_list: + # collate data list into a batch, which is a dict with following keys: + # batch['inputs']: a list of input images + # batch['data_samples']: a list of :obj:`PoseDataSample` + batch = pseudo_collate(data_list) + with torch.no_grad(): + results = model.test_step(batch) + else: + results = [] + + return results + + +def inference_bottomup(model: nn.Module, img: Union[np.ndarray, str]): + """Inference image with a bottom-up pose estimator. + + Args: + model (nn.Module): The bottom-up pose estimator + img (np.ndarray | str): The loaded image or image file to inference + + Returns: + List[:obj:`PoseDataSample`]: The inference results. Specifically, the + predicted keypoints and scores are saved at + ``data_sample.pred_instances.keypoints`` and + ``data_sample.pred_instances.keypoint_scores``. + """ + pipeline = Compose(model.cfg.test_dataloader.dataset.pipeline) + + # prepare data batch + if isinstance(img, str): + data_info = dict(img_path=img) + else: + data_info = dict(img=img) + data_info.update(model.dataset_meta) + data = pipeline(data_info) + batch = pseudo_collate([data]) + + with torch.no_grad(): + results = model.test_step(batch) + + return results + + +def collect_multi_frames(video, frame_id, indices, online=False): + """Collect multi frames from the video. + + Args: + video (mmcv.VideoReader): A VideoReader of the input video file. + frame_id (int): index of the current frame + indices (list(int)): index offsets of the frames to collect + online (bool): inference mode, if set to True, can not use future + frame information. + + Returns: + list(ndarray): multi frames collected from the input video file. + """ + num_frames = len(video) + frames = [] + # put the current frame at first + frames.append(video[frame_id]) + # use multi frames for inference + for idx in indices: + # skip current frame + if idx == 0: + continue + support_idx = frame_id + idx + # online mode, can not use future frame information + if online: + support_idx = np.clip(support_idx, 0, frame_id) + else: + support_idx = np.clip(support_idx, 0, num_frames - 1) + frames.append(video[support_idx]) + + return frames diff --git a/mmpose/apis/inference_3d.py b/mmpose/apis/inference_3d.py index d5bb753945..5592c67f9f 100644 --- a/mmpose/apis/inference_3d.py +++ b/mmpose/apis/inference_3d.py @@ -1,339 +1,339 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import numpy as np -import torch -from mmengine.dataset import Compose, pseudo_collate -from mmengine.registry import init_default_scope -from mmengine.structures import InstanceData - -from mmpose.structures import PoseDataSample - - -def convert_keypoint_definition(keypoints, pose_det_dataset, - pose_lift_dataset): - """Convert pose det dataset keypoints definition to pose lifter dataset - keypoints definition, so that they are compatible with the definitions - required for 3D pose lifting. - - Args: - keypoints (ndarray[N, K, 2 or 3]): 2D keypoints to be transformed. - pose_det_dataset, (str): Name of the dataset for 2D pose detector. - pose_lift_dataset (str): Name of the dataset for pose lifter model. - - Returns: - ndarray[K, 2 or 3]: the transformed 2D keypoints. - """ - assert pose_lift_dataset in [ - 'Human36mDataset'], '`pose_lift_dataset` should be ' \ - f'`Human36mDataset`, but got {pose_lift_dataset}.' - - coco_style_datasets = [ - 'CocoDataset', 'PoseTrack18VideoDataset', 'PoseTrack18Dataset' - ] - keypoints_new = np.zeros((keypoints.shape[0], 17, keypoints.shape[2]), - dtype=keypoints.dtype) - if pose_lift_dataset == 'Human36mDataset': - if pose_det_dataset in ['Human36mDataset']: - keypoints_new = keypoints - elif pose_det_dataset in coco_style_datasets: - # pelvis (root) is in the middle of l_hip and r_hip - keypoints_new[:, 0] = (keypoints[:, 11] + keypoints[:, 12]) / 2 - # thorax is in the middle of l_shoulder and r_shoulder - keypoints_new[:, 8] = (keypoints[:, 5] + keypoints[:, 6]) / 2 - # spine is in the middle of thorax and pelvis - keypoints_new[:, - 7] = (keypoints_new[:, 0] + keypoints_new[:, 8]) / 2 - # in COCO, head is in the middle of l_eye and r_eye - # in PoseTrack18, head is in the middle of head_bottom and head_top - keypoints_new[:, 10] = (keypoints[:, 1] + keypoints[:, 2]) / 2 - # rearrange other keypoints - keypoints_new[:, [1, 2, 3, 4, 5, 6, 9, 11, 12, 13, 14, 15, 16]] = \ - keypoints[:, [12, 14, 16, 11, 13, 15, 0, 5, 7, 9, 6, 8, 10]] - elif pose_det_dataset in ['AicDataset']: - # pelvis (root) is in the middle of l_hip and r_hip - keypoints_new[:, 0] = (keypoints[:, 9] + keypoints[:, 6]) / 2 - # thorax is in the middle of l_shoulder and r_shoulder - keypoints_new[:, 8] = (keypoints[:, 3] + keypoints[:, 0]) / 2 - # spine is in the middle of thorax and pelvis - keypoints_new[:, - 7] = (keypoints_new[:, 0] + keypoints_new[:, 8]) / 2 - # neck base (top end of neck) is 1/4 the way from - # neck (bottom end of neck) to head top - keypoints_new[:, 9] = (3 * keypoints[:, 13] + keypoints[:, 12]) / 4 - # head (spherical centre of head) is 7/12 the way from - # neck (bottom end of neck) to head top - keypoints_new[:, 10] = (5 * keypoints[:, 13] + - 7 * keypoints[:, 12]) / 12 - - keypoints_new[:, [1, 2, 3, 4, 5, 6, 11, 12, 13, 14, 15, 16]] = \ - keypoints[:, [6, 7, 8, 9, 10, 11, 3, 4, 5, 0, 1, 2]] - elif pose_det_dataset in ['CrowdPoseDataset']: - # pelvis (root) is in the middle of l_hip and r_hip - keypoints_new[:, 0] = (keypoints[:, 6] + keypoints[:, 7]) / 2 - # thorax is in the middle of l_shoulder and r_shoulder - keypoints_new[:, 8] = (keypoints[:, 0] + keypoints[:, 1]) / 2 - # spine is in the middle of thorax and pelvis - keypoints_new[:, - 7] = (keypoints_new[:, 0] + keypoints_new[:, 8]) / 2 - # neck base (top end of neck) is 1/4 the way from - # neck (bottom end of neck) to head top - keypoints_new[:, 9] = (3 * keypoints[:, 13] + keypoints[:, 12]) / 4 - # head (spherical centre of head) is 7/12 the way from - # neck (bottom end of neck) to head top - keypoints_new[:, 10] = (5 * keypoints[:, 13] + - 7 * keypoints[:, 12]) / 12 - - keypoints_new[:, [1, 2, 3, 4, 5, 6, 11, 12, 13, 14, 15, 16]] = \ - keypoints[:, [7, 9, 11, 6, 8, 10, 0, 2, 4, 1, 3, 5]] - else: - raise NotImplementedError( - f'unsupported conversion between {pose_lift_dataset} and ' - f'{pose_det_dataset}') - - return keypoints_new - - -def extract_pose_sequence(pose_results, frame_idx, causal, seq_len, step=1): - """Extract the target frame from 2D pose results, and pad the sequence to a - fixed length. - - Args: - pose_results (List[List[:obj:`PoseDataSample`]]): Multi-frame pose - detection results stored in a list. - frame_idx (int): The index of the frame in the original video. - causal (bool): If True, the target frame is the last frame in - a sequence. Otherwise, the target frame is in the middle of - a sequence. - seq_len (int): The number of frames in the input sequence. - step (int): Step size to extract frames from the video. - - Returns: - List[List[:obj:`PoseDataSample`]]: Multi-frame pose detection results - stored in a nested list with a length of seq_len. - """ - if causal: - frames_left = seq_len - 1 - frames_right = 0 - else: - frames_left = (seq_len - 1) // 2 - frames_right = frames_left - num_frames = len(pose_results) - - # get the padded sequence - pad_left = max(0, frames_left - frame_idx // step) - pad_right = max(0, frames_right - (num_frames - 1 - frame_idx) // step) - start = max(frame_idx % step, frame_idx - frames_left * step) - end = min(num_frames - (num_frames - 1 - frame_idx) % step, - frame_idx + frames_right * step + 1) - pose_results_seq = [pose_results[0]] * pad_left + \ - pose_results[start:end:step] + [pose_results[-1]] * pad_right - return pose_results_seq - - -def collate_pose_sequence(pose_results_2d, - with_track_id=True, - target_frame=-1): - """Reorganize multi-frame pose detection results into individual pose - sequences. - - Note: - - The temporal length of the pose detection results: T - - The number of the person instances: N - - The number of the keypoints: K - - The channel number of each keypoint: C - - Args: - pose_results_2d (List[List[:obj:`PoseDataSample`]]): Multi-frame pose - detection results stored in a nested list. Each element of the - outer list is the pose detection results of a single frame, and - each element of the inner list is the pose information of one - person, which contains: - - - keypoints (ndarray[K, 2 or 3]): x, y, [score] - - track_id (int): unique id of each person, required when - ``with_track_id==True``` - - with_track_id (bool): If True, the element in pose_results is expected - to contain "track_id", which will be used to gather the pose - sequence of a person from multiple frames. Otherwise, the pose - results in each frame are expected to have a consistent number and - order of identities. Default is True. - target_frame (int): The index of the target frame. Default: -1. - - Returns: - List[:obj:`PoseDataSample`]: Indivisual pose sequence in with length N. - """ - T = len(pose_results_2d) - assert T > 0 - - target_frame = (T + target_frame) % T # convert negative index to positive - - N = len( - pose_results_2d[target_frame]) # use identities in the target frame - if N == 0: - return [] - - B, K, C = pose_results_2d[target_frame][0].pred_instances.keypoints.shape - - track_ids = None - if with_track_id: - track_ids = [res.track_id for res in pose_results_2d[target_frame]] - - pose_sequences = [] - for idx in range(N): - pose_seq = PoseDataSample() - gt_instances = InstanceData() - pred_instances = InstanceData() - - for k in pose_results_2d[target_frame][idx].gt_instances.keys(): - gt_instances.set_field( - pose_results_2d[target_frame][idx].gt_instances[k], k) - for k in pose_results_2d[target_frame][idx].pred_instances.keys(): - if k != 'keypoints': - pred_instances.set_field( - pose_results_2d[target_frame][idx].pred_instances[k], k) - pose_seq.pred_instances = pred_instances - pose_seq.gt_instances = gt_instances - - if not with_track_id: - pose_seq.pred_instances.keypoints = np.stack([ - frame[idx].pred_instances.keypoints - for frame in pose_results_2d - ], - axis=1) - else: - keypoints = np.zeros((B, T, K, C), dtype=np.float32) - keypoints[:, target_frame] = pose_results_2d[target_frame][ - idx].pred_instances.keypoints - # find the left most frame containing track_ids[idx] - for frame_idx in range(target_frame - 1, -1, -1): - contains_idx = False - for res in pose_results_2d[frame_idx]: - if res.track_id == track_ids[idx]: - keypoints[:, frame_idx] = res.pred_instances.keypoints - contains_idx = True - break - if not contains_idx: - # replicate the left most frame - keypoints[:, :frame_idx + 1] = keypoints[:, frame_idx + 1] - break - # find the right most frame containing track_idx[idx] - for frame_idx in range(target_frame + 1, T): - contains_idx = False - for res in pose_results_2d[frame_idx]: - if res.track_id == track_ids[idx]: - keypoints[:, frame_idx] = res.pred_instances.keypoints - contains_idx = True - break - if not contains_idx: - # replicate the right most frame - keypoints[:, frame_idx + 1:] = keypoints[:, frame_idx] - break - pose_seq.pred_instances.keypoints = keypoints - pose_sequences.append(pose_seq) - - return pose_sequences - - -def inference_pose_lifter_model(model, - pose_results_2d, - with_track_id=True, - image_size=None, - norm_pose_2d=False): - """Inference 3D pose from 2D pose sequences using a pose lifter model. - - Args: - model (nn.Module): The loaded pose lifter model - pose_results_2d (List[List[:obj:`PoseDataSample`]]): The 2D pose - sequences stored in a nested list. - with_track_id: If True, the element in pose_results_2d is expected to - contain "track_id", which will be used to gather the pose sequence - of a person from multiple frames. Otherwise, the pose results in - each frame are expected to have a consistent number and order of - identities. Default is True. - image_size (tuple|list): image width, image height. If None, image size - will not be contained in dict ``data``. - norm_pose_2d (bool): If True, scale the bbox (along with the 2D - pose) to the average bbox scale of the dataset, and move the bbox - (along with the 2D pose) to the average bbox center of the dataset. - - Returns: - List[:obj:`PoseDataSample`]: 3D pose inference results. Specifically, - the predicted keypoints and scores are saved at - ``data_sample.pred_instances.keypoints_3d``. - """ - init_default_scope(model.cfg.get('default_scope', 'mmpose')) - pipeline = Compose(model.cfg.test_dataloader.dataset.pipeline) - - causal = model.cfg.test_dataloader.dataset.get('causal', False) - target_idx = -1 if causal else len(pose_results_2d) // 2 - - dataset_info = model.dataset_meta - if dataset_info is not None: - if 'stats_info' in dataset_info: - bbox_center = dataset_info['stats_info']['bbox_center'] - bbox_scale = dataset_info['stats_info']['bbox_scale'] - else: - bbox_center = None - bbox_scale = None - - for i, pose_res in enumerate(pose_results_2d): - for j, data_sample in enumerate(pose_res): - kpts = data_sample.pred_instances.keypoints - bboxes = data_sample.pred_instances.bboxes - keypoints = [] - for k in range(len(kpts)): - kpt = kpts[k] - if norm_pose_2d: - bbox = bboxes[k] - center = np.array([[(bbox[0] + bbox[2]) / 2, - (bbox[1] + bbox[3]) / 2]]) - scale = max(bbox[2] - bbox[0], bbox[3] - bbox[1]) - keypoints.append((kpt[:, :2] - center) / scale * - bbox_scale + bbox_center) - else: - keypoints.append(kpt[:, :2]) - pose_results_2d[i][j].pred_instances.keypoints = np.array( - keypoints) - - pose_sequences_2d = collate_pose_sequence(pose_results_2d, with_track_id, - target_idx) - - if not pose_sequences_2d: - return [] - - data_list = [] - for i, pose_seq in enumerate(pose_sequences_2d): - data_info = dict() - - keypoints_2d = pose_seq.pred_instances.keypoints - keypoints_2d = np.squeeze( - keypoints_2d, axis=0) if keypoints_2d.ndim == 4 else keypoints_2d - - T, K, C = keypoints_2d.shape - - data_info['keypoints'] = keypoints_2d - data_info['keypoints_visible'] = np.ones(( - T, - K, - ), dtype=np.float32) - data_info['lifting_target'] = np.zeros((K, 3), dtype=np.float32) - data_info['lifting_target_visible'] = np.ones((K, 1), dtype=np.float32) - - if image_size is not None: - assert len(image_size) == 2 - data_info['camera_param'] = dict(w=image_size[0], h=image_size[1]) - - data_info.update(model.dataset_meta) - data_list.append(pipeline(data_info)) - - if data_list: - # collate data list into a batch, which is a dict with following keys: - # batch['inputs']: a list of input images - # batch['data_samples']: a list of :obj:`PoseDataSample` - batch = pseudo_collate(data_list) - with torch.no_grad(): - results = model.test_step(batch) - else: - results = [] - - return results +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch +from mmengine.dataset import Compose, pseudo_collate +from mmengine.registry import init_default_scope +from mmengine.structures import InstanceData + +from mmpose.structures import PoseDataSample + + +def convert_keypoint_definition(keypoints, pose_det_dataset, + pose_lift_dataset): + """Convert pose det dataset keypoints definition to pose lifter dataset + keypoints definition, so that they are compatible with the definitions + required for 3D pose lifting. + + Args: + keypoints (ndarray[N, K, 2 or 3]): 2D keypoints to be transformed. + pose_det_dataset, (str): Name of the dataset for 2D pose detector. + pose_lift_dataset (str): Name of the dataset for pose lifter model. + + Returns: + ndarray[K, 2 or 3]: the transformed 2D keypoints. + """ + assert pose_lift_dataset in [ + 'Human36mDataset'], '`pose_lift_dataset` should be ' \ + f'`Human36mDataset`, but got {pose_lift_dataset}.' + + coco_style_datasets = [ + 'CocoDataset', 'PoseTrack18VideoDataset', 'PoseTrack18Dataset' + ] + keypoints_new = np.zeros((keypoints.shape[0], 17, keypoints.shape[2]), + dtype=keypoints.dtype) + if pose_lift_dataset == 'Human36mDataset': + if pose_det_dataset in ['Human36mDataset']: + keypoints_new = keypoints + elif pose_det_dataset in coco_style_datasets: + # pelvis (root) is in the middle of l_hip and r_hip + keypoints_new[:, 0] = (keypoints[:, 11] + keypoints[:, 12]) / 2 + # thorax is in the middle of l_shoulder and r_shoulder + keypoints_new[:, 8] = (keypoints[:, 5] + keypoints[:, 6]) / 2 + # spine is in the middle of thorax and pelvis + keypoints_new[:, + 7] = (keypoints_new[:, 0] + keypoints_new[:, 8]) / 2 + # in COCO, head is in the middle of l_eye and r_eye + # in PoseTrack18, head is in the middle of head_bottom and head_top + keypoints_new[:, 10] = (keypoints[:, 1] + keypoints[:, 2]) / 2 + # rearrange other keypoints + keypoints_new[:, [1, 2, 3, 4, 5, 6, 9, 11, 12, 13, 14, 15, 16]] = \ + keypoints[:, [12, 14, 16, 11, 13, 15, 0, 5, 7, 9, 6, 8, 10]] + elif pose_det_dataset in ['AicDataset']: + # pelvis (root) is in the middle of l_hip and r_hip + keypoints_new[:, 0] = (keypoints[:, 9] + keypoints[:, 6]) / 2 + # thorax is in the middle of l_shoulder and r_shoulder + keypoints_new[:, 8] = (keypoints[:, 3] + keypoints[:, 0]) / 2 + # spine is in the middle of thorax and pelvis + keypoints_new[:, + 7] = (keypoints_new[:, 0] + keypoints_new[:, 8]) / 2 + # neck base (top end of neck) is 1/4 the way from + # neck (bottom end of neck) to head top + keypoints_new[:, 9] = (3 * keypoints[:, 13] + keypoints[:, 12]) / 4 + # head (spherical centre of head) is 7/12 the way from + # neck (bottom end of neck) to head top + keypoints_new[:, 10] = (5 * keypoints[:, 13] + + 7 * keypoints[:, 12]) / 12 + + keypoints_new[:, [1, 2, 3, 4, 5, 6, 11, 12, 13, 14, 15, 16]] = \ + keypoints[:, [6, 7, 8, 9, 10, 11, 3, 4, 5, 0, 1, 2]] + elif pose_det_dataset in ['CrowdPoseDataset']: + # pelvis (root) is in the middle of l_hip and r_hip + keypoints_new[:, 0] = (keypoints[:, 6] + keypoints[:, 7]) / 2 + # thorax is in the middle of l_shoulder and r_shoulder + keypoints_new[:, 8] = (keypoints[:, 0] + keypoints[:, 1]) / 2 + # spine is in the middle of thorax and pelvis + keypoints_new[:, + 7] = (keypoints_new[:, 0] + keypoints_new[:, 8]) / 2 + # neck base (top end of neck) is 1/4 the way from + # neck (bottom end of neck) to head top + keypoints_new[:, 9] = (3 * keypoints[:, 13] + keypoints[:, 12]) / 4 + # head (spherical centre of head) is 7/12 the way from + # neck (bottom end of neck) to head top + keypoints_new[:, 10] = (5 * keypoints[:, 13] + + 7 * keypoints[:, 12]) / 12 + + keypoints_new[:, [1, 2, 3, 4, 5, 6, 11, 12, 13, 14, 15, 16]] = \ + keypoints[:, [7, 9, 11, 6, 8, 10, 0, 2, 4, 1, 3, 5]] + else: + raise NotImplementedError( + f'unsupported conversion between {pose_lift_dataset} and ' + f'{pose_det_dataset}') + + return keypoints_new + + +def extract_pose_sequence(pose_results, frame_idx, causal, seq_len, step=1): + """Extract the target frame from 2D pose results, and pad the sequence to a + fixed length. + + Args: + pose_results (List[List[:obj:`PoseDataSample`]]): Multi-frame pose + detection results stored in a list. + frame_idx (int): The index of the frame in the original video. + causal (bool): If True, the target frame is the last frame in + a sequence. Otherwise, the target frame is in the middle of + a sequence. + seq_len (int): The number of frames in the input sequence. + step (int): Step size to extract frames from the video. + + Returns: + List[List[:obj:`PoseDataSample`]]: Multi-frame pose detection results + stored in a nested list with a length of seq_len. + """ + if causal: + frames_left = seq_len - 1 + frames_right = 0 + else: + frames_left = (seq_len - 1) // 2 + frames_right = frames_left + num_frames = len(pose_results) + + # get the padded sequence + pad_left = max(0, frames_left - frame_idx // step) + pad_right = max(0, frames_right - (num_frames - 1 - frame_idx) // step) + start = max(frame_idx % step, frame_idx - frames_left * step) + end = min(num_frames - (num_frames - 1 - frame_idx) % step, + frame_idx + frames_right * step + 1) + pose_results_seq = [pose_results[0]] * pad_left + \ + pose_results[start:end:step] + [pose_results[-1]] * pad_right + return pose_results_seq + + +def collate_pose_sequence(pose_results_2d, + with_track_id=True, + target_frame=-1): + """Reorganize multi-frame pose detection results into individual pose + sequences. + + Note: + - The temporal length of the pose detection results: T + - The number of the person instances: N + - The number of the keypoints: K + - The channel number of each keypoint: C + + Args: + pose_results_2d (List[List[:obj:`PoseDataSample`]]): Multi-frame pose + detection results stored in a nested list. Each element of the + outer list is the pose detection results of a single frame, and + each element of the inner list is the pose information of one + person, which contains: + + - keypoints (ndarray[K, 2 or 3]): x, y, [score] + - track_id (int): unique id of each person, required when + ``with_track_id==True``` + + with_track_id (bool): If True, the element in pose_results is expected + to contain "track_id", which will be used to gather the pose + sequence of a person from multiple frames. Otherwise, the pose + results in each frame are expected to have a consistent number and + order of identities. Default is True. + target_frame (int): The index of the target frame. Default: -1. + + Returns: + List[:obj:`PoseDataSample`]: Indivisual pose sequence in with length N. + """ + T = len(pose_results_2d) + assert T > 0 + + target_frame = (T + target_frame) % T # convert negative index to positive + + N = len( + pose_results_2d[target_frame]) # use identities in the target frame + if N == 0: + return [] + + B, K, C = pose_results_2d[target_frame][0].pred_instances.keypoints.shape + + track_ids = None + if with_track_id: + track_ids = [res.track_id for res in pose_results_2d[target_frame]] + + pose_sequences = [] + for idx in range(N): + pose_seq = PoseDataSample() + gt_instances = InstanceData() + pred_instances = InstanceData() + + for k in pose_results_2d[target_frame][idx].gt_instances.keys(): + gt_instances.set_field( + pose_results_2d[target_frame][idx].gt_instances[k], k) + for k in pose_results_2d[target_frame][idx].pred_instances.keys(): + if k != 'keypoints': + pred_instances.set_field( + pose_results_2d[target_frame][idx].pred_instances[k], k) + pose_seq.pred_instances = pred_instances + pose_seq.gt_instances = gt_instances + + if not with_track_id: + pose_seq.pred_instances.keypoints = np.stack([ + frame[idx].pred_instances.keypoints + for frame in pose_results_2d + ], + axis=1) + else: + keypoints = np.zeros((B, T, K, C), dtype=np.float32) + keypoints[:, target_frame] = pose_results_2d[target_frame][ + idx].pred_instances.keypoints + # find the left most frame containing track_ids[idx] + for frame_idx in range(target_frame - 1, -1, -1): + contains_idx = False + for res in pose_results_2d[frame_idx]: + if res.track_id == track_ids[idx]: + keypoints[:, frame_idx] = res.pred_instances.keypoints + contains_idx = True + break + if not contains_idx: + # replicate the left most frame + keypoints[:, :frame_idx + 1] = keypoints[:, frame_idx + 1] + break + # find the right most frame containing track_idx[idx] + for frame_idx in range(target_frame + 1, T): + contains_idx = False + for res in pose_results_2d[frame_idx]: + if res.track_id == track_ids[idx]: + keypoints[:, frame_idx] = res.pred_instances.keypoints + contains_idx = True + break + if not contains_idx: + # replicate the right most frame + keypoints[:, frame_idx + 1:] = keypoints[:, frame_idx] + break + pose_seq.pred_instances.keypoints = keypoints + pose_sequences.append(pose_seq) + + return pose_sequences + + +def inference_pose_lifter_model(model, + pose_results_2d, + with_track_id=True, + image_size=None, + norm_pose_2d=False): + """Inference 3D pose from 2D pose sequences using a pose lifter model. + + Args: + model (nn.Module): The loaded pose lifter model + pose_results_2d (List[List[:obj:`PoseDataSample`]]): The 2D pose + sequences stored in a nested list. + with_track_id: If True, the element in pose_results_2d is expected to + contain "track_id", which will be used to gather the pose sequence + of a person from multiple frames. Otherwise, the pose results in + each frame are expected to have a consistent number and order of + identities. Default is True. + image_size (tuple|list): image width, image height. If None, image size + will not be contained in dict ``data``. + norm_pose_2d (bool): If True, scale the bbox (along with the 2D + pose) to the average bbox scale of the dataset, and move the bbox + (along with the 2D pose) to the average bbox center of the dataset. + + Returns: + List[:obj:`PoseDataSample`]: 3D pose inference results. Specifically, + the predicted keypoints and scores are saved at + ``data_sample.pred_instances.keypoints_3d``. + """ + init_default_scope(model.cfg.get('default_scope', 'mmpose')) + pipeline = Compose(model.cfg.test_dataloader.dataset.pipeline) + + causal = model.cfg.test_dataloader.dataset.get('causal', False) + target_idx = -1 if causal else len(pose_results_2d) // 2 + + dataset_info = model.dataset_meta + if dataset_info is not None: + if 'stats_info' in dataset_info: + bbox_center = dataset_info['stats_info']['bbox_center'] + bbox_scale = dataset_info['stats_info']['bbox_scale'] + else: + bbox_center = None + bbox_scale = None + + for i, pose_res in enumerate(pose_results_2d): + for j, data_sample in enumerate(pose_res): + kpts = data_sample.pred_instances.keypoints + bboxes = data_sample.pred_instances.bboxes + keypoints = [] + for k in range(len(kpts)): + kpt = kpts[k] + if norm_pose_2d: + bbox = bboxes[k] + center = np.array([[(bbox[0] + bbox[2]) / 2, + (bbox[1] + bbox[3]) / 2]]) + scale = max(bbox[2] - bbox[0], bbox[3] - bbox[1]) + keypoints.append((kpt[:, :2] - center) / scale * + bbox_scale + bbox_center) + else: + keypoints.append(kpt[:, :2]) + pose_results_2d[i][j].pred_instances.keypoints = np.array( + keypoints) + + pose_sequences_2d = collate_pose_sequence(pose_results_2d, with_track_id, + target_idx) + + if not pose_sequences_2d: + return [] + + data_list = [] + for i, pose_seq in enumerate(pose_sequences_2d): + data_info = dict() + + keypoints_2d = pose_seq.pred_instances.keypoints + keypoints_2d = np.squeeze( + keypoints_2d, axis=0) if keypoints_2d.ndim == 4 else keypoints_2d + + T, K, C = keypoints_2d.shape + + data_info['keypoints'] = keypoints_2d + data_info['keypoints_visible'] = np.ones(( + T, + K, + ), dtype=np.float32) + data_info['lifting_target'] = np.zeros((K, 3), dtype=np.float32) + data_info['lifting_target_visible'] = np.ones((K, 1), dtype=np.float32) + + if image_size is not None: + assert len(image_size) == 2 + data_info['camera_param'] = dict(w=image_size[0], h=image_size[1]) + + data_info.update(model.dataset_meta) + data_list.append(pipeline(data_info)) + + if data_list: + # collate data list into a batch, which is a dict with following keys: + # batch['inputs']: a list of input images + # batch['data_samples']: a list of :obj:`PoseDataSample` + batch = pseudo_collate(data_list) + with torch.no_grad(): + results = model.test_step(batch) + else: + results = [] + + return results diff --git a/mmpose/apis/inference_tracking.py b/mmpose/apis/inference_tracking.py index c823adcfc7..8e8ba5e712 100644 --- a/mmpose/apis/inference_tracking.py +++ b/mmpose/apis/inference_tracking.py @@ -1,103 +1,103 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import warnings - -import numpy as np - -from mmpose.evaluation.functional.nms import oks_iou - - -def _compute_iou(bboxA, bboxB): - """Compute the Intersection over Union (IoU) between two boxes . - - Args: - bboxA (list): The first bbox info (left, top, right, bottom, score). - bboxB (list): The second bbox info (left, top, right, bottom, score). - - Returns: - float: The IoU value. - """ - - x1 = max(bboxA[0], bboxB[0]) - y1 = max(bboxA[1], bboxB[1]) - x2 = min(bboxA[2], bboxB[2]) - y2 = min(bboxA[3], bboxB[3]) - - inter_area = max(0, x2 - x1) * max(0, y2 - y1) - - bboxA_area = (bboxA[2] - bboxA[0]) * (bboxA[3] - bboxA[1]) - bboxB_area = (bboxB[2] - bboxB[0]) * (bboxB[3] - bboxB[1]) - union_area = float(bboxA_area + bboxB_area - inter_area) - if union_area == 0: - union_area = 1e-5 - warnings.warn('union_area=0 is unexpected') - - iou = inter_area / union_area - - return iou - - -def _track_by_iou(res, results_last, thr): - """Get track id using IoU tracking greedily.""" - - bbox = list(np.squeeze(res.pred_instances.bboxes, axis=0)) - - max_iou_score = -1 - max_index = -1 - match_result = {} - for index, res_last in enumerate(results_last): - bbox_last = list(np.squeeze(res_last.pred_instances.bboxes, axis=0)) - - iou_score = _compute_iou(bbox, bbox_last) - if iou_score > max_iou_score: - max_iou_score = iou_score - max_index = index - - if max_iou_score > thr: - track_id = results_last[max_index].track_id - match_result = results_last[max_index] - del results_last[max_index] - else: - track_id = -1 - - return track_id, results_last, match_result - - -def _track_by_oks(res, results_last, thr, sigmas=None): - """Get track id using OKS tracking greedily.""" - keypoint = np.concatenate((res.pred_instances.keypoints, - res.pred_instances.keypoint_scores[:, :, None]), - axis=2) - keypoint = np.squeeze(keypoint, axis=0).reshape((-1)) - area = np.squeeze(res.pred_instances.areas, axis=0) - max_index = -1 - match_result = {} - - if len(results_last) == 0: - return -1, results_last, match_result - - keypoints_last = np.array([ - np.squeeze( - np.concatenate( - (res_last.pred_instances.keypoints, - res_last.pred_instances.keypoint_scores[:, :, None]), - axis=2), - axis=0).reshape((-1)) for res_last in results_last - ]) - area_last = np.array([ - np.squeeze(res_last.pred_instances.areas, axis=0) - for res_last in results_last - ]) - - oks_score = oks_iou( - keypoint, keypoints_last, area, area_last, sigmas=sigmas) - - max_index = np.argmax(oks_score) - - if oks_score[max_index] > thr: - track_id = results_last[max_index].track_id - match_result = results_last[max_index] - del results_last[max_index] - else: - track_id = -1 - - return track_id, results_last, match_result +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import numpy as np + +from mmpose.evaluation.functional.nms import oks_iou + + +def _compute_iou(bboxA, bboxB): + """Compute the Intersection over Union (IoU) between two boxes . + + Args: + bboxA (list): The first bbox info (left, top, right, bottom, score). + bboxB (list): The second bbox info (left, top, right, bottom, score). + + Returns: + float: The IoU value. + """ + + x1 = max(bboxA[0], bboxB[0]) + y1 = max(bboxA[1], bboxB[1]) + x2 = min(bboxA[2], bboxB[2]) + y2 = min(bboxA[3], bboxB[3]) + + inter_area = max(0, x2 - x1) * max(0, y2 - y1) + + bboxA_area = (bboxA[2] - bboxA[0]) * (bboxA[3] - bboxA[1]) + bboxB_area = (bboxB[2] - bboxB[0]) * (bboxB[3] - bboxB[1]) + union_area = float(bboxA_area + bboxB_area - inter_area) + if union_area == 0: + union_area = 1e-5 + warnings.warn('union_area=0 is unexpected') + + iou = inter_area / union_area + + return iou + + +def _track_by_iou(res, results_last, thr): + """Get track id using IoU tracking greedily.""" + + bbox = list(np.squeeze(res.pred_instances.bboxes, axis=0)) + + max_iou_score = -1 + max_index = -1 + match_result = {} + for index, res_last in enumerate(results_last): + bbox_last = list(np.squeeze(res_last.pred_instances.bboxes, axis=0)) + + iou_score = _compute_iou(bbox, bbox_last) + if iou_score > max_iou_score: + max_iou_score = iou_score + max_index = index + + if max_iou_score > thr: + track_id = results_last[max_index].track_id + match_result = results_last[max_index] + del results_last[max_index] + else: + track_id = -1 + + return track_id, results_last, match_result + + +def _track_by_oks(res, results_last, thr, sigmas=None): + """Get track id using OKS tracking greedily.""" + keypoint = np.concatenate((res.pred_instances.keypoints, + res.pred_instances.keypoint_scores[:, :, None]), + axis=2) + keypoint = np.squeeze(keypoint, axis=0).reshape((-1)) + area = np.squeeze(res.pred_instances.areas, axis=0) + max_index = -1 + match_result = {} + + if len(results_last) == 0: + return -1, results_last, match_result + + keypoints_last = np.array([ + np.squeeze( + np.concatenate( + (res_last.pred_instances.keypoints, + res_last.pred_instances.keypoint_scores[:, :, None]), + axis=2), + axis=0).reshape((-1)) for res_last in results_last + ]) + area_last = np.array([ + np.squeeze(res_last.pred_instances.areas, axis=0) + for res_last in results_last + ]) + + oks_score = oks_iou( + keypoint, keypoints_last, area, area_last, sigmas=sigmas) + + max_index = np.argmax(oks_score) + + if oks_score[max_index] > thr: + track_id = results_last[max_index].track_id + match_result = results_last[max_index] + del results_last[max_index] + else: + track_id = -1 + + return track_id, results_last, match_result diff --git a/mmpose/apis/inferencers/__init__.py b/mmpose/apis/inferencers/__init__.py index 5955d79da9..f42179a481 100644 --- a/mmpose/apis/inferencers/__init__.py +++ b/mmpose/apis/inferencers/__init__.py @@ -1,10 +1,10 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .mmpose_inferencer import MMPoseInferencer -from .pose2d_inferencer import Pose2DInferencer -from .pose3d_inferencer import Pose3DInferencer -from .utils import get_model_aliases - -__all__ = [ - 'Pose2DInferencer', 'MMPoseInferencer', 'get_model_aliases', - 'Pose3DInferencer' -] +# Copyright (c) OpenMMLab. All rights reserved. +from .mmpose_inferencer import MMPoseInferencer +from .pose2d_inferencer import Pose2DInferencer +from .pose3d_inferencer import Pose3DInferencer +from .utils import get_model_aliases + +__all__ = [ + 'Pose2DInferencer', 'MMPoseInferencer', 'get_model_aliases', + 'Pose3DInferencer' +] diff --git a/mmpose/apis/inferencers/base_mmpose_inferencer.py b/mmpose/apis/inferencers/base_mmpose_inferencer.py index bed28b90d7..c58f5ba5a4 100644 --- a/mmpose/apis/inferencers/base_mmpose_inferencer.py +++ b/mmpose/apis/inferencers/base_mmpose_inferencer.py @@ -1,469 +1,469 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import mimetypes -import os -import warnings -from collections import defaultdict -from typing import (Callable, Dict, Generator, Iterable, List, Optional, - Sequence, Union) - -import cv2 -import mmcv -import mmengine -import numpy as np -import torch.nn as nn -from mmengine.config import Config, ConfigDict -from mmengine.dataset import Compose -from mmengine.fileio import (get_file_backend, isdir, join_path, - list_dir_or_file) -from mmengine.infer.infer import BaseInferencer -from mmengine.registry import init_default_scope -from mmengine.runner.checkpoint import _load_checkpoint_to_model -from mmengine.structures import InstanceData -from mmengine.utils import mkdir_or_exist - -from mmpose.apis.inference import dataset_meta_from_config -from mmpose.structures import PoseDataSample, split_instances - -InstanceList = List[InstanceData] -InputType = Union[str, np.ndarray] -InputsType = Union[InputType, Sequence[InputType]] -PredType = Union[InstanceData, InstanceList] -ImgType = Union[np.ndarray, Sequence[np.ndarray]] -ConfigType = Union[Config, ConfigDict] -ResType = Union[Dict, List[Dict], InstanceData, List[InstanceData]] - - -class BaseMMPoseInferencer(BaseInferencer): - """The base class for MMPose inferencers.""" - - preprocess_kwargs: set = {'bbox_thr', 'nms_thr', 'bboxes'} - forward_kwargs: set = set() - visualize_kwargs: set = { - 'return_vis', 'show', 'wait_time', 'draw_bbox', 'radius', 'thickness', - 'kpt_thr', 'vis_out_dir', 'black_background' - } - postprocess_kwargs: set = {'pred_out_dir'} - - def _load_weights_to_model(self, model: nn.Module, - checkpoint: Optional[dict], - cfg: Optional[ConfigType]) -> None: - """Loading model weights and meta information from cfg and checkpoint. - - Subclasses could override this method to load extra meta information - from ``checkpoint`` and ``cfg`` to model. - - Args: - model (nn.Module): Model to load weights and meta information. - checkpoint (dict, optional): The loaded checkpoint. - cfg (Config or ConfigDict, optional): The loaded config. - """ - if checkpoint is not None: - _load_checkpoint_to_model(model, checkpoint) - checkpoint_meta = checkpoint.get('meta', {}) - # save the dataset_meta in the model for convenience - if 'dataset_meta' in checkpoint_meta: - # mmpose 1.x - model.dataset_meta = checkpoint_meta['dataset_meta'] - else: - warnings.warn( - 'dataset_meta are not saved in the checkpoint\'s ' - 'meta data, load via config.') - model.dataset_meta = dataset_meta_from_config( - cfg, dataset_mode='train') - else: - warnings.warn('Checkpoint is not loaded, and the inference ' - 'result is calculated by the randomly initialized ' - 'model!') - model.dataset_meta = dataset_meta_from_config( - cfg, dataset_mode='train') - - def _inputs_to_list(self, inputs: InputsType) -> Iterable: - """Preprocess the inputs to a list. - - Preprocess inputs to a list according to its type: - - - list or tuple: return inputs - - str: - - Directory path: return all files in the directory - - other cases: return a list containing the string. The string - could be a path to file, a url or other types of string - according to the task. - - Args: - inputs (InputsType): Inputs for the inferencer. - - Returns: - list: List of input for the :meth:`preprocess`. - """ - self._video_input = False - - if isinstance(inputs, str): - backend = get_file_backend(inputs) - if hasattr(backend, 'isdir') and isdir(inputs): - # Backends like HttpsBackend do not implement `isdir`, so only - # those backends that implement `isdir` could accept the - # inputs as a directory - filepath_list = [ - join_path(inputs, fname) - for fname in list_dir_or_file(inputs, list_dir=False) - ] - inputs = [] - for filepath in filepath_list: - input_type = mimetypes.guess_type(filepath)[0].split( - '/')[0] - if input_type == 'image': - inputs.append(filepath) - inputs.sort() - else: - # if inputs is a path to a video file, it will be converted - # to a list containing separated frame filenames - input_type = mimetypes.guess_type(inputs)[0].split('/')[0] - if input_type == 'video': - self._video_input = True - video = mmcv.VideoReader(inputs) - self.video_info = dict( - fps=video.fps, - name=os.path.basename(inputs), - writer=None, - width=video.width, - height=video.height, - predictions=[]) - inputs = video - elif input_type == 'image': - inputs = [inputs] - else: - raise ValueError(f'Expected input to be an image, video, ' - f'or folder, but received {inputs} of ' - f'type {input_type}.') - - elif isinstance(inputs, np.ndarray): - inputs = [inputs] - - return inputs - - def _get_webcam_inputs(self, inputs: str) -> Generator: - """Sets up and returns a generator function that reads frames from a - webcam input. The generator function returns a new frame each time it - is iterated over. - - Args: - inputs (str): A string describing the webcam input, in the format - "webcam:id". - - Returns: - A generator function that yields frames from the webcam input. - - Raises: - ValueError: If the inputs string is not in the expected format. - """ - - # Ensure the inputs string is in the expected format. - inputs = inputs.lower() - assert inputs.startswith('webcam'), f'Expected input to start with ' \ - f'"webcam", but got "{inputs}"' - - # Parse the camera ID from the inputs string. - inputs_ = inputs.split(':') - if len(inputs_) == 1: - camera_id = 0 - elif len(inputs_) == 2 and str.isdigit(inputs_[1]): - camera_id = int(inputs_[1]) - else: - raise ValueError( - f'Expected webcam input to have format "webcam:id", ' - f'but got "{inputs}"') - - # Attempt to open the video capture object. - vcap = cv2.VideoCapture(camera_id) - if not vcap.isOpened(): - warnings.warn(f'Cannot open camera (ID={camera_id})') - return [] - - # Set video input flag and metadata. - self._video_input = True - (major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.') - if int(major_ver) < 3: - fps = vcap.get(cv2.cv.CV_CAP_PROP_FPS) - width = vcap.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH) - height = vcap.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT) - else: - fps = vcap.get(cv2.CAP_PROP_FPS) - width = vcap.get(cv2.CAP_PROP_FRAME_WIDTH) - height = vcap.get(cv2.CAP_PROP_FRAME_HEIGHT) - self.video_info = dict( - fps=fps, - name='webcam.mp4', - writer=None, - width=width, - height=height, - predictions=[]) - - def _webcam_reader() -> Generator: - while True: - if cv2.waitKey(5) & 0xFF == 27: - vcap.release() - break - - ret_val, frame = vcap.read() - if not ret_val: - break - - yield frame - - return _webcam_reader() - - def _init_pipeline(self, cfg: ConfigType) -> Callable: - """Initialize the test pipeline. - - Args: - cfg (ConfigType): model config path or dict - - Returns: - A pipeline to handle various input data, such as ``str``, - ``np.ndarray``. The returned pipeline will be used to process - a single data. - """ - scope = cfg.get('default_scope', 'mmpose') - if scope is not None: - init_default_scope(scope) - return Compose(cfg.test_dataloader.dataset.pipeline) - - def update_model_visualizer_settings(self, **kwargs): - """Update the settings of models and visualizer according to inference - arguments.""" - - pass - - def preprocess(self, - inputs: InputsType, - batch_size: int = 1, - bboxes: Optional[List] = None, - **kwargs): - """Process the inputs into a model-feedable format. - - Args: - inputs (InputsType): Inputs given by user. - batch_size (int): batch size. Defaults to 1. - - Yields: - Any: Data processed by the ``pipeline`` and ``collate_fn``. - List[str or np.ndarray]: List of original inputs in the batch - """ - - for i, input in enumerate(inputs): - bbox = bboxes[i] if bboxes else [] - data_infos = self.preprocess_single( - input, index=i, bboxes=bbox, **kwargs) - # only supports inference with batch size 1 - yield self.collate_fn(data_infos), [input] - - def visualize(self, - inputs: list, - preds: List[PoseDataSample], - return_vis: bool = False, - show: bool = False, - draw_bbox: bool = False, - wait_time: float = 0, - radius: int = 3, - thickness: int = 1, - kpt_thr: float = 0.3, - vis_out_dir: str = '', - window_name: str = '', - black_background: bool = False, - **kwargs) -> List[np.ndarray]: - """Visualize predictions. - - Args: - inputs (list): Inputs preprocessed by :meth:`_inputs_to_list`. - preds (Any): Predictions of the model. - return_vis (bool): Whether to return images with predicted results. - show (bool): Whether to display the image in a popup window. - Defaults to False. - wait_time (float): The interval of show (ms). Defaults to 0 - draw_bbox (bool): Whether to draw the bounding boxes. - Defaults to False - radius (int): Keypoint radius for visualization. Defaults to 3 - thickness (int): Link thickness for visualization. Defaults to 1 - kpt_thr (float): The threshold to visualize the keypoints. - Defaults to 0.3 - vis_out_dir (str, optional): Directory to save visualization - results w/o predictions. If left as empty, no file will - be saved. Defaults to ''. - window_name (str, optional): Title of display window. - black_background (bool, optional): Whether to plot keypoints on a - black image instead of the input image. Defaults to False. - - Returns: - List[np.ndarray]: Visualization results. - """ - if (not return_vis) and (not show) and (not vis_out_dir): - return - - if getattr(self, 'visualizer', None) is None: - raise ValueError('Visualization needs the "visualizer" term' - 'defined in the config, but got None.') - - self.visualizer.radius = radius - self.visualizer.line_width = thickness - - results = [] - - for single_input, pred in zip(inputs, preds): - if isinstance(single_input, str): - img = mmcv.imread(single_input, channel_order='rgb') - elif isinstance(single_input, np.ndarray): - img = mmcv.bgr2rgb(single_input) - else: - raise ValueError('Unsupported input type: ' - f'{type(single_input)}') - if black_background: - img = img * 0 - - img_name = os.path.basename(pred.metainfo['img_path']) - window_name = window_name if window_name else img_name - - # since visualization and inference utilize the same process, - # the wait time is reduced when a video input is utilized, - # thereby eliminating the issue of inference getting stuck. - wait_time = 1e-5 if self._video_input else wait_time - - visualization = self.visualizer.add_datasample( - window_name, - img, - pred, - draw_gt=False, - draw_bbox=draw_bbox, - show=show, - wait_time=wait_time, - kpt_thr=kpt_thr, - **kwargs) - results.append(visualization) - - if vis_out_dir: - out_img = mmcv.rgb2bgr(visualization) - _, file_extension = os.path.splitext(vis_out_dir) - if file_extension: - dir_name = os.path.dirname(vis_out_dir) - file_name = os.path.basename(vis_out_dir) - else: - dir_name = vis_out_dir - file_name = None - mkdir_or_exist(dir_name) - - if self._video_input: - - if self.video_info['writer'] is None: - fourcc = cv2.VideoWriter_fourcc(*'mp4v') - if file_name is None: - file_name = os.path.basename( - self.video_info['name']) - out_file = join_path(dir_name, file_name) - self.video_info['writer'] = cv2.VideoWriter( - out_file, fourcc, self.video_info['fps'], - (visualization.shape[1], visualization.shape[0])) - self.video_info['writer'].write(out_img) - - else: - file_name = file_name if file_name else img_name - out_file = join_path(dir_name, file_name) - mmcv.imwrite(out_img, out_file) - - if return_vis: - return results - else: - return [] - - def postprocess( - self, - preds: List[PoseDataSample], - visualization: List[np.ndarray], - return_datasample=False, - pred_out_dir: str = '', - ) -> dict: - """Process the predictions and visualization results from ``forward`` - and ``visualize``. - - This method should be responsible for the following tasks: - - 1. Convert datasamples into a json-serializable dict if needed. - 2. Pack the predictions and visualization results and return them. - 3. Dump or log the predictions. - - Args: - preds (List[Dict]): Predictions of the model. - visualization (np.ndarray): Visualized predictions. - return_datasample (bool): Whether to return results as - datasamples. Defaults to False. - pred_out_dir (str): Directory to save the inference results w/o - visualization. If left as empty, no file will be saved. - Defaults to ''. - - Returns: - dict: Inference and visualization results with key ``predictions`` - and ``visualization`` - - - ``visualization (Any)``: Returned by :meth:`visualize` - - ``predictions`` (dict or DataSample): Returned by - :meth:`forward` and processed in :meth:`postprocess`. - If ``return_datasample=False``, it usually should be a - json-serializable dict containing only basic data elements such - as strings and numbers. - """ - - result_dict = defaultdict(list) - - result_dict['visualization'] = visualization - for pred in preds: - if not return_datasample: - # convert datasamples to list of instance predictions - pred = split_instances(pred.pred_instances) - result_dict['predictions'].append(pred) - - if pred_out_dir != '': - for pred, data_sample in zip(result_dict['predictions'], preds): - if self._video_input: - # For video or webcam input, predictions for each frame - # are gathered in the 'predictions' key of 'video_info' - # dictionary. All frame predictions are then stored into - # a single file after processing all frames. - self.video_info['predictions'].append(pred) - else: - # For non-video inputs, predictions are stored in separate - # JSON files. The filename is determined by the basename - # of the input image path with a '.json' extension. The - # predictions are then dumped into this file. - fname = os.path.splitext( - os.path.basename( - data_sample.metainfo['img_path']))[0] + '.json' - mmengine.dump( - pred, join_path(pred_out_dir, fname), indent=' ') - - return result_dict - - def _finalize_video_processing( - self, - pred_out_dir: str = '', - ): - """Finalize video processing by releasing the video writer and saving - predictions to a file. - - This method should be called after completing the video processing. It - releases the video writer, if it exists, and saves the predictions to a - JSON file if a prediction output directory is provided. - """ - - # Release the video writer if it exists - if self.video_info['writer'] is not None: - self.video_info['writer'].release() - - # Save predictions - if pred_out_dir: - fname = os.path.splitext( - os.path.basename(self.video_info['name']))[0] + '.json' - predictions = [ - dict(frame_id=i, instances=pred) - for i, pred in enumerate(self.video_info['predictions']) - ] - - mmengine.dump( - predictions, join_path(pred_out_dir, fname), indent=' ') +# Copyright (c) OpenMMLab. All rights reserved. +import mimetypes +import os +import warnings +from collections import defaultdict +from typing import (Callable, Dict, Generator, Iterable, List, Optional, + Sequence, Union) + +import cv2 +import mmcv +import mmengine +import numpy as np +import torch.nn as nn +from mmengine.config import Config, ConfigDict +from mmengine.dataset import Compose +from mmengine.fileio import (get_file_backend, isdir, join_path, + list_dir_or_file) +from mmengine.infer.infer import BaseInferencer +from mmengine.registry import init_default_scope +from mmengine.runner.checkpoint import _load_checkpoint_to_model +from mmengine.structures import InstanceData +from mmengine.utils import mkdir_or_exist + +from mmpose.apis.inference import dataset_meta_from_config +from mmpose.structures import PoseDataSample, split_instances + +InstanceList = List[InstanceData] +InputType = Union[str, np.ndarray] +InputsType = Union[InputType, Sequence[InputType]] +PredType = Union[InstanceData, InstanceList] +ImgType = Union[np.ndarray, Sequence[np.ndarray]] +ConfigType = Union[Config, ConfigDict] +ResType = Union[Dict, List[Dict], InstanceData, List[InstanceData]] + + +class BaseMMPoseInferencer(BaseInferencer): + """The base class for MMPose inferencers.""" + + preprocess_kwargs: set = {'bbox_thr', 'nms_thr', 'bboxes'} + forward_kwargs: set = set() + visualize_kwargs: set = { + 'return_vis', 'show', 'wait_time', 'draw_bbox', 'radius', 'thickness', + 'kpt_thr', 'vis_out_dir', 'black_background' + } + postprocess_kwargs: set = {'pred_out_dir'} + + def _load_weights_to_model(self, model: nn.Module, + checkpoint: Optional[dict], + cfg: Optional[ConfigType]) -> None: + """Loading model weights and meta information from cfg and checkpoint. + + Subclasses could override this method to load extra meta information + from ``checkpoint`` and ``cfg`` to model. + + Args: + model (nn.Module): Model to load weights and meta information. + checkpoint (dict, optional): The loaded checkpoint. + cfg (Config or ConfigDict, optional): The loaded config. + """ + if checkpoint is not None: + _load_checkpoint_to_model(model, checkpoint) + checkpoint_meta = checkpoint.get('meta', {}) + # save the dataset_meta in the model for convenience + if 'dataset_meta' in checkpoint_meta: + # mmpose 1.x + model.dataset_meta = checkpoint_meta['dataset_meta'] + else: + warnings.warn( + 'dataset_meta are not saved in the checkpoint\'s ' + 'meta data, load via config.') + model.dataset_meta = dataset_meta_from_config( + cfg, dataset_mode='train') + else: + warnings.warn('Checkpoint is not loaded, and the inference ' + 'result is calculated by the randomly initialized ' + 'model!') + model.dataset_meta = dataset_meta_from_config( + cfg, dataset_mode='train') + + def _inputs_to_list(self, inputs: InputsType) -> Iterable: + """Preprocess the inputs to a list. + + Preprocess inputs to a list according to its type: + + - list or tuple: return inputs + - str: + - Directory path: return all files in the directory + - other cases: return a list containing the string. The string + could be a path to file, a url or other types of string + according to the task. + + Args: + inputs (InputsType): Inputs for the inferencer. + + Returns: + list: List of input for the :meth:`preprocess`. + """ + self._video_input = False + + if isinstance(inputs, str): + backend = get_file_backend(inputs) + if hasattr(backend, 'isdir') and isdir(inputs): + # Backends like HttpsBackend do not implement `isdir`, so only + # those backends that implement `isdir` could accept the + # inputs as a directory + filepath_list = [ + join_path(inputs, fname) + for fname in list_dir_or_file(inputs, list_dir=False) + ] + inputs = [] + for filepath in filepath_list: + input_type = mimetypes.guess_type(filepath)[0].split( + '/')[0] + if input_type == 'image': + inputs.append(filepath) + inputs.sort() + else: + # if inputs is a path to a video file, it will be converted + # to a list containing separated frame filenames + input_type = mimetypes.guess_type(inputs)[0].split('/')[0] + if input_type == 'video': + self._video_input = True + video = mmcv.VideoReader(inputs) + self.video_info = dict( + fps=video.fps, + name=os.path.basename(inputs), + writer=None, + width=video.width, + height=video.height, + predictions=[]) + inputs = video + elif input_type == 'image': + inputs = [inputs] + else: + raise ValueError(f'Expected input to be an image, video, ' + f'or folder, but received {inputs} of ' + f'type {input_type}.') + + elif isinstance(inputs, np.ndarray): + inputs = [inputs] + + return inputs + + def _get_webcam_inputs(self, inputs: str) -> Generator: + """Sets up and returns a generator function that reads frames from a + webcam input. The generator function returns a new frame each time it + is iterated over. + + Args: + inputs (str): A string describing the webcam input, in the format + "webcam:id". + + Returns: + A generator function that yields frames from the webcam input. + + Raises: + ValueError: If the inputs string is not in the expected format. + """ + + # Ensure the inputs string is in the expected format. + inputs = inputs.lower() + assert inputs.startswith('webcam'), f'Expected input to start with ' \ + f'"webcam", but got "{inputs}"' + + # Parse the camera ID from the inputs string. + inputs_ = inputs.split(':') + if len(inputs_) == 1: + camera_id = 0 + elif len(inputs_) == 2 and str.isdigit(inputs_[1]): + camera_id = int(inputs_[1]) + else: + raise ValueError( + f'Expected webcam input to have format "webcam:id", ' + f'but got "{inputs}"') + + # Attempt to open the video capture object. + vcap = cv2.VideoCapture(camera_id) + if not vcap.isOpened(): + warnings.warn(f'Cannot open camera (ID={camera_id})') + return [] + + # Set video input flag and metadata. + self._video_input = True + (major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.') + if int(major_ver) < 3: + fps = vcap.get(cv2.cv.CV_CAP_PROP_FPS) + width = vcap.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH) + height = vcap.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT) + else: + fps = vcap.get(cv2.CAP_PROP_FPS) + width = vcap.get(cv2.CAP_PROP_FRAME_WIDTH) + height = vcap.get(cv2.CAP_PROP_FRAME_HEIGHT) + self.video_info = dict( + fps=fps, + name='webcam.mp4', + writer=None, + width=width, + height=height, + predictions=[]) + + def _webcam_reader() -> Generator: + while True: + if cv2.waitKey(5) & 0xFF == 27: + vcap.release() + break + + ret_val, frame = vcap.read() + if not ret_val: + break + + yield frame + + return _webcam_reader() + + def _init_pipeline(self, cfg: ConfigType) -> Callable: + """Initialize the test pipeline. + + Args: + cfg (ConfigType): model config path or dict + + Returns: + A pipeline to handle various input data, such as ``str``, + ``np.ndarray``. The returned pipeline will be used to process + a single data. + """ + scope = cfg.get('default_scope', 'mmpose') + if scope is not None: + init_default_scope(scope) + return Compose(cfg.test_dataloader.dataset.pipeline) + + def update_model_visualizer_settings(self, **kwargs): + """Update the settings of models and visualizer according to inference + arguments.""" + + pass + + def preprocess(self, + inputs: InputsType, + batch_size: int = 1, + bboxes: Optional[List] = None, + **kwargs): + """Process the inputs into a model-feedable format. + + Args: + inputs (InputsType): Inputs given by user. + batch_size (int): batch size. Defaults to 1. + + Yields: + Any: Data processed by the ``pipeline`` and ``collate_fn``. + List[str or np.ndarray]: List of original inputs in the batch + """ + + for i, input in enumerate(inputs): + bbox = bboxes[i] if bboxes else [] + data_infos = self.preprocess_single( + input, index=i, bboxes=bbox, **kwargs) + # only supports inference with batch size 1 + yield self.collate_fn(data_infos), [input] + + def visualize(self, + inputs: list, + preds: List[PoseDataSample], + return_vis: bool = False, + show: bool = False, + draw_bbox: bool = False, + wait_time: float = 0, + radius: int = 3, + thickness: int = 1, + kpt_thr: float = 0.3, + vis_out_dir: str = '', + window_name: str = '', + black_background: bool = False, + **kwargs) -> List[np.ndarray]: + """Visualize predictions. + + Args: + inputs (list): Inputs preprocessed by :meth:`_inputs_to_list`. + preds (Any): Predictions of the model. + return_vis (bool): Whether to return images with predicted results. + show (bool): Whether to display the image in a popup window. + Defaults to False. + wait_time (float): The interval of show (ms). Defaults to 0 + draw_bbox (bool): Whether to draw the bounding boxes. + Defaults to False + radius (int): Keypoint radius for visualization. Defaults to 3 + thickness (int): Link thickness for visualization. Defaults to 1 + kpt_thr (float): The threshold to visualize the keypoints. + Defaults to 0.3 + vis_out_dir (str, optional): Directory to save visualization + results w/o predictions. If left as empty, no file will + be saved. Defaults to ''. + window_name (str, optional): Title of display window. + black_background (bool, optional): Whether to plot keypoints on a + black image instead of the input image. Defaults to False. + + Returns: + List[np.ndarray]: Visualization results. + """ + if (not return_vis) and (not show) and (not vis_out_dir): + return + + if getattr(self, 'visualizer', None) is None: + raise ValueError('Visualization needs the "visualizer" term' + 'defined in the config, but got None.') + + self.visualizer.radius = radius + self.visualizer.line_width = thickness + + results = [] + + for single_input, pred in zip(inputs, preds): + if isinstance(single_input, str): + img = mmcv.imread(single_input, channel_order='rgb') + elif isinstance(single_input, np.ndarray): + img = mmcv.bgr2rgb(single_input) + else: + raise ValueError('Unsupported input type: ' + f'{type(single_input)}') + if black_background: + img = img * 0 + + img_name = os.path.basename(pred.metainfo['img_path']) + window_name = window_name if window_name else img_name + + # since visualization and inference utilize the same process, + # the wait time is reduced when a video input is utilized, + # thereby eliminating the issue of inference getting stuck. + wait_time = 1e-5 if self._video_input else wait_time + + visualization = self.visualizer.add_datasample( + window_name, + img, + pred, + draw_gt=False, + draw_bbox=draw_bbox, + show=show, + wait_time=wait_time, + kpt_thr=kpt_thr, + **kwargs) + results.append(visualization) + + if vis_out_dir: + out_img = mmcv.rgb2bgr(visualization) + _, file_extension = os.path.splitext(vis_out_dir) + if file_extension: + dir_name = os.path.dirname(vis_out_dir) + file_name = os.path.basename(vis_out_dir) + else: + dir_name = vis_out_dir + file_name = None + mkdir_or_exist(dir_name) + + if self._video_input: + + if self.video_info['writer'] is None: + fourcc = cv2.VideoWriter_fourcc(*'mp4v') + if file_name is None: + file_name = os.path.basename( + self.video_info['name']) + out_file = join_path(dir_name, file_name) + self.video_info['writer'] = cv2.VideoWriter( + out_file, fourcc, self.video_info['fps'], + (visualization.shape[1], visualization.shape[0])) + self.video_info['writer'].write(out_img) + + else: + file_name = file_name if file_name else img_name + out_file = join_path(dir_name, file_name) + mmcv.imwrite(out_img, out_file) + + if return_vis: + return results + else: + return [] + + def postprocess( + self, + preds: List[PoseDataSample], + visualization: List[np.ndarray], + return_datasample=False, + pred_out_dir: str = '', + ) -> dict: + """Process the predictions and visualization results from ``forward`` + and ``visualize``. + + This method should be responsible for the following tasks: + + 1. Convert datasamples into a json-serializable dict if needed. + 2. Pack the predictions and visualization results and return them. + 3. Dump or log the predictions. + + Args: + preds (List[Dict]): Predictions of the model. + visualization (np.ndarray): Visualized predictions. + return_datasample (bool): Whether to return results as + datasamples. Defaults to False. + pred_out_dir (str): Directory to save the inference results w/o + visualization. If left as empty, no file will be saved. + Defaults to ''. + + Returns: + dict: Inference and visualization results with key ``predictions`` + and ``visualization`` + + - ``visualization (Any)``: Returned by :meth:`visualize` + - ``predictions`` (dict or DataSample): Returned by + :meth:`forward` and processed in :meth:`postprocess`. + If ``return_datasample=False``, it usually should be a + json-serializable dict containing only basic data elements such + as strings and numbers. + """ + + result_dict = defaultdict(list) + + result_dict['visualization'] = visualization + for pred in preds: + if not return_datasample: + # convert datasamples to list of instance predictions + pred = split_instances(pred.pred_instances) + result_dict['predictions'].append(pred) + + if pred_out_dir != '': + for pred, data_sample in zip(result_dict['predictions'], preds): + if self._video_input: + # For video or webcam input, predictions for each frame + # are gathered in the 'predictions' key of 'video_info' + # dictionary. All frame predictions are then stored into + # a single file after processing all frames. + self.video_info['predictions'].append(pred) + else: + # For non-video inputs, predictions are stored in separate + # JSON files. The filename is determined by the basename + # of the input image path with a '.json' extension. The + # predictions are then dumped into this file. + fname = os.path.splitext( + os.path.basename( + data_sample.metainfo['img_path']))[0] + '.json' + mmengine.dump( + pred, join_path(pred_out_dir, fname), indent=' ') + + return result_dict + + def _finalize_video_processing( + self, + pred_out_dir: str = '', + ): + """Finalize video processing by releasing the video writer and saving + predictions to a file. + + This method should be called after completing the video processing. It + releases the video writer, if it exists, and saves the predictions to a + JSON file if a prediction output directory is provided. + """ + + # Release the video writer if it exists + if self.video_info['writer'] is not None: + self.video_info['writer'].release() + + # Save predictions + if pred_out_dir: + fname = os.path.splitext( + os.path.basename(self.video_info['name']))[0] + '.json' + predictions = [ + dict(frame_id=i, instances=pred) + for i, pred in enumerate(self.video_info['predictions']) + ] + + mmengine.dump( + predictions, join_path(pred_out_dir, fname), indent=' ') diff --git a/mmpose/apis/inferencers/mmpose_inferencer.py b/mmpose/apis/inferencers/mmpose_inferencer.py index b44361bba8..d774618de7 100644 --- a/mmpose/apis/inferencers/mmpose_inferencer.py +++ b/mmpose/apis/inferencers/mmpose_inferencer.py @@ -1,239 +1,239 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import warnings -from typing import Dict, List, Optional, Sequence, Union - -import numpy as np -import torch -from mmengine.config import Config, ConfigDict -from mmengine.infer.infer import ModelType -from mmengine.structures import InstanceData - -from .base_mmpose_inferencer import BaseMMPoseInferencer -from .pose2d_inferencer import Pose2DInferencer -from .pose3d_inferencer import Pose3DInferencer - -InstanceList = List[InstanceData] -InputType = Union[str, np.ndarray] -InputsType = Union[InputType, Sequence[InputType]] -PredType = Union[InstanceData, InstanceList] -ImgType = Union[np.ndarray, Sequence[np.ndarray]] -ConfigType = Union[Config, ConfigDict] -ResType = Union[Dict, List[Dict], InstanceData, List[InstanceData]] - - -class MMPoseInferencer(BaseMMPoseInferencer): - """MMPose Inferencer. It's a unified inferencer interface for pose - estimation task, currently including: Pose2D. and it can be used to perform - 2D keypoint detection. - - Args: - pose2d (str, optional): Pretrained 2D pose estimation algorithm. - It's the path to the config file or the model name defined in - metafile. For example, it could be: - - - model alias, e.g. ``'body'``, - - config name, e.g. ``'simcc_res50_8xb64-210e_coco-256x192'``, - - config path - - Defaults to ``None``. - pose2d_weights (str, optional): Path to the custom checkpoint file of - the selected pose2d model. If it is not specified and "pose2d" is - a model name of metafile, the weights will be loaded from - metafile. Defaults to None. - device (str, optional): Device to run inference. If None, the - available device will be automatically used. Defaults to None. - scope (str, optional): The scope of the model. Defaults to "mmpose". - det_model(str, optional): Config path or alias of detection model. - Defaults to None. - det_weights(str, optional): Path to the checkpoints of detection - model. Defaults to None. - det_cat_ids(int or list[int], optional): Category id for - detection model. Defaults to None. - output_heatmaps (bool, optional): Flag to visualize predicted - heatmaps. If set to None, the default setting from the model - config will be used. Default is None. - """ - - preprocess_kwargs: set = { - 'bbox_thr', 'nms_thr', 'bboxes', 'use_oks_tracking', 'tracking_thr', - 'norm_pose_2d' - } - forward_kwargs: set = {'rebase_keypoint_height'} - visualize_kwargs: set = { - 'return_vis', 'show', 'wait_time', 'draw_bbox', 'radius', 'thickness', - 'kpt_thr', 'vis_out_dir', 'skeleton_style', 'draw_heatmap', - 'black_background' - } - postprocess_kwargs: set = {'pred_out_dir'} - - def __init__(self, - pose2d: Optional[str] = None, - pose2d_weights: Optional[str] = None, - pose3d: Optional[str] = None, - pose3d_weights: Optional[str] = None, - device: Optional[str] = None, - scope: str = 'mmpose', - det_model: Optional[Union[ModelType, str]] = None, - det_weights: Optional[str] = None, - det_cat_ids: Optional[Union[int, List]] = None) -> None: - - self.visualizer = None - if pose3d is not None: - self.inferencer = Pose3DInferencer(pose3d, pose3d_weights, pose2d, - pose2d_weights, device, scope, - det_model, det_weights, - det_cat_ids) - elif pose2d is not None: - self.inferencer = Pose2DInferencer(pose2d, pose2d_weights, device, - scope, det_model, det_weights, - det_cat_ids) - else: - raise ValueError('Either 2d or 3d pose estimation algorithm ' - 'should be provided.') - - def preprocess(self, inputs: InputsType, batch_size: int = 1, **kwargs): - """Process the inputs into a model-feedable format. - - Args: - inputs (InputsType): Inputs given by user. - batch_size (int): batch size. Defaults to 1. - - Yields: - Any: Data processed by the ``pipeline`` and ``collate_fn``. - List[str or np.ndarray]: List of original inputs in the batch - """ - - for i, input in enumerate(inputs): - data_batch = {} - data_infos = self.inferencer.preprocess_single( - input, index=i, **kwargs) - data_batch = self.inferencer.collate_fn(data_infos) - # only supports inference with batch size 1 - yield data_batch, [input] - - @torch.no_grad() - def forward(self, inputs: InputType, **forward_kwargs) -> PredType: - """Forward the inputs to the model. - - Args: - inputs (InputsType): The inputs to be forwarded. - - Returns: - Dict: The prediction results. Possibly with keys "pose2d". - """ - return self.inferencer.forward(inputs, **forward_kwargs) - - def __call__( - self, - inputs: InputsType, - return_datasample: bool = False, - batch_size: int = 1, - out_dir: Optional[str] = None, - **kwargs, - ) -> dict: - """Call the inferencer. - - Args: - inputs (InputsType): Inputs for the inferencer. - return_datasample (bool): Whether to return results as - :obj:`BaseDataElement`. Defaults to False. - batch_size (int): Batch size. Defaults to 1. - out_dir (str, optional): directory to save visualization - results and predictions. Will be overoden if vis_out_dir or - pred_out_dir are given. Defaults to None - **kwargs: Key words arguments passed to :meth:`preprocess`, - :meth:`forward`, :meth:`visualize` and :meth:`postprocess`. - Each key in kwargs should be in the corresponding set of - ``preprocess_kwargs``, ``forward_kwargs``, - ``visualize_kwargs`` and ``postprocess_kwargs``. - - Returns: - dict: Inference and visualization results. - """ - if out_dir is not None: - if 'vis_out_dir' not in kwargs: - kwargs['vis_out_dir'] = f'{out_dir}/visualizations' - if 'pred_out_dir' not in kwargs: - kwargs['pred_out_dir'] = f'{out_dir}/predictions' - - kwargs = { - key: value - for key, value in kwargs.items() - if key in set.union(self.inferencer.preprocess_kwargs, - self.inferencer.forward_kwargs, - self.inferencer.visualize_kwargs, - self.inferencer.postprocess_kwargs) - } - ( - preprocess_kwargs, - forward_kwargs, - visualize_kwargs, - postprocess_kwargs, - ) = self._dispatch_kwargs(**kwargs) - - self.inferencer.update_model_visualizer_settings(**kwargs) - - # preprocessing - if isinstance(inputs, str) and inputs.startswith('webcam'): - inputs = self.inferencer._get_webcam_inputs(inputs) - batch_size = 1 - if not visualize_kwargs.get('show', False): - warnings.warn('The display mode is closed when using webcam ' - 'input. It will be turned on automatically.') - visualize_kwargs['show'] = True - else: - inputs = self.inferencer._inputs_to_list(inputs) - self._video_input = self.inferencer._video_input - if self._video_input: - self.video_info = self.inferencer.video_info - - inputs = self.preprocess( - inputs, batch_size=batch_size, **preprocess_kwargs) - - # forward - if 'bbox_thr' in self.inferencer.forward_kwargs: - forward_kwargs['bbox_thr'] = preprocess_kwargs.get('bbox_thr', -1) - - preds = [] - - for proc_inputs, ori_inputs in inputs: - preds = self.forward(proc_inputs, **forward_kwargs) - - visualization = self.visualize(ori_inputs, preds, - **visualize_kwargs) - results = self.postprocess(preds, visualization, return_datasample, - **postprocess_kwargs) - yield results - - if self._video_input: - self._finalize_video_processing( - postprocess_kwargs.get('pred_out_dir', '')) - - def visualize(self, inputs: InputsType, preds: PredType, - **kwargs) -> List[np.ndarray]: - """Visualize predictions. - - Args: - inputs (list): Inputs preprocessed by :meth:`_inputs_to_list`. - preds (Any): Predictions of the model. - return_vis (bool): Whether to return images with predicted results. - show (bool): Whether to display the image in a popup window. - Defaults to False. - show_interval (int): The interval of show (s). Defaults to 0 - radius (int): Keypoint radius for visualization. Defaults to 3 - thickness (int): Link thickness for visualization. Defaults to 1 - kpt_thr (float): The threshold to visualize the keypoints. - Defaults to 0.3 - vis_out_dir (str, optional): directory to save visualization - results w/o predictions. If left as empty, no file will - be saved. Defaults to ''. - - Returns: - List[np.ndarray]: Visualization results. - """ - window_name = '' - if self.inferencer._video_input: - window_name = self.inferencer.video_info['name'] - - return self.inferencer.visualize( - inputs, preds, window_name=window_name, **kwargs) +# Copyright (c) OpenMMLab. All rights reserved. +import warnings +from typing import Dict, List, Optional, Sequence, Union + +import numpy as np +import torch +from mmengine.config import Config, ConfigDict +from mmengine.infer.infer import ModelType +from mmengine.structures import InstanceData + +from .base_mmpose_inferencer import BaseMMPoseInferencer +from .pose2d_inferencer import Pose2DInferencer +from .pose3d_inferencer import Pose3DInferencer + +InstanceList = List[InstanceData] +InputType = Union[str, np.ndarray] +InputsType = Union[InputType, Sequence[InputType]] +PredType = Union[InstanceData, InstanceList] +ImgType = Union[np.ndarray, Sequence[np.ndarray]] +ConfigType = Union[Config, ConfigDict] +ResType = Union[Dict, List[Dict], InstanceData, List[InstanceData]] + + +class MMPoseInferencer(BaseMMPoseInferencer): + """MMPose Inferencer. It's a unified inferencer interface for pose + estimation task, currently including: Pose2D. and it can be used to perform + 2D keypoint detection. + + Args: + pose2d (str, optional): Pretrained 2D pose estimation algorithm. + It's the path to the config file or the model name defined in + metafile. For example, it could be: + + - model alias, e.g. ``'body'``, + - config name, e.g. ``'simcc_res50_8xb64-210e_coco-256x192'``, + - config path + + Defaults to ``None``. + pose2d_weights (str, optional): Path to the custom checkpoint file of + the selected pose2d model. If it is not specified and "pose2d" is + a model name of metafile, the weights will be loaded from + metafile. Defaults to None. + device (str, optional): Device to run inference. If None, the + available device will be automatically used. Defaults to None. + scope (str, optional): The scope of the model. Defaults to "mmpose". + det_model(str, optional): Config path or alias of detection model. + Defaults to None. + det_weights(str, optional): Path to the checkpoints of detection + model. Defaults to None. + det_cat_ids(int or list[int], optional): Category id for + detection model. Defaults to None. + output_heatmaps (bool, optional): Flag to visualize predicted + heatmaps. If set to None, the default setting from the model + config will be used. Default is None. + """ + + preprocess_kwargs: set = { + 'bbox_thr', 'nms_thr', 'bboxes', 'use_oks_tracking', 'tracking_thr', + 'norm_pose_2d' + } + forward_kwargs: set = {'rebase_keypoint_height'} + visualize_kwargs: set = { + 'return_vis', 'show', 'wait_time', 'draw_bbox', 'radius', 'thickness', + 'kpt_thr', 'vis_out_dir', 'skeleton_style', 'draw_heatmap', + 'black_background' + } + postprocess_kwargs: set = {'pred_out_dir'} + + def __init__(self, + pose2d: Optional[str] = None, + pose2d_weights: Optional[str] = None, + pose3d: Optional[str] = None, + pose3d_weights: Optional[str] = None, + device: Optional[str] = None, + scope: str = 'mmpose', + det_model: Optional[Union[ModelType, str]] = None, + det_weights: Optional[str] = None, + det_cat_ids: Optional[Union[int, List]] = None) -> None: + + self.visualizer = None + if pose3d is not None: + self.inferencer = Pose3DInferencer(pose3d, pose3d_weights, pose2d, + pose2d_weights, device, scope, + det_model, det_weights, + det_cat_ids) + elif pose2d is not None: + self.inferencer = Pose2DInferencer(pose2d, pose2d_weights, device, + scope, det_model, det_weights, + det_cat_ids) + else: + raise ValueError('Either 2d or 3d pose estimation algorithm ' + 'should be provided.') + + def preprocess(self, inputs: InputsType, batch_size: int = 1, **kwargs): + """Process the inputs into a model-feedable format. + + Args: + inputs (InputsType): Inputs given by user. + batch_size (int): batch size. Defaults to 1. + + Yields: + Any: Data processed by the ``pipeline`` and ``collate_fn``. + List[str or np.ndarray]: List of original inputs in the batch + """ + + for i, input in enumerate(inputs): + data_batch = {} + data_infos = self.inferencer.preprocess_single( + input, index=i, **kwargs) + data_batch = self.inferencer.collate_fn(data_infos) + # only supports inference with batch size 1 + yield data_batch, [input] + + @torch.no_grad() + def forward(self, inputs: InputType, **forward_kwargs) -> PredType: + """Forward the inputs to the model. + + Args: + inputs (InputsType): The inputs to be forwarded. + + Returns: + Dict: The prediction results. Possibly with keys "pose2d". + """ + return self.inferencer.forward(inputs, **forward_kwargs) + + def __call__( + self, + inputs: InputsType, + return_datasample: bool = False, + batch_size: int = 1, + out_dir: Optional[str] = None, + **kwargs, + ) -> dict: + """Call the inferencer. + + Args: + inputs (InputsType): Inputs for the inferencer. + return_datasample (bool): Whether to return results as + :obj:`BaseDataElement`. Defaults to False. + batch_size (int): Batch size. Defaults to 1. + out_dir (str, optional): directory to save visualization + results and predictions. Will be overoden if vis_out_dir or + pred_out_dir are given. Defaults to None + **kwargs: Key words arguments passed to :meth:`preprocess`, + :meth:`forward`, :meth:`visualize` and :meth:`postprocess`. + Each key in kwargs should be in the corresponding set of + ``preprocess_kwargs``, ``forward_kwargs``, + ``visualize_kwargs`` and ``postprocess_kwargs``. + + Returns: + dict: Inference and visualization results. + """ + if out_dir is not None: + if 'vis_out_dir' not in kwargs: + kwargs['vis_out_dir'] = f'{out_dir}/visualizations' + if 'pred_out_dir' not in kwargs: + kwargs['pred_out_dir'] = f'{out_dir}/predictions' + + kwargs = { + key: value + for key, value in kwargs.items() + if key in set.union(self.inferencer.preprocess_kwargs, + self.inferencer.forward_kwargs, + self.inferencer.visualize_kwargs, + self.inferencer.postprocess_kwargs) + } + ( + preprocess_kwargs, + forward_kwargs, + visualize_kwargs, + postprocess_kwargs, + ) = self._dispatch_kwargs(**kwargs) + + self.inferencer.update_model_visualizer_settings(**kwargs) + + # preprocessing + if isinstance(inputs, str) and inputs.startswith('webcam'): + inputs = self.inferencer._get_webcam_inputs(inputs) + batch_size = 1 + if not visualize_kwargs.get('show', False): + warnings.warn('The display mode is closed when using webcam ' + 'input. It will be turned on automatically.') + visualize_kwargs['show'] = True + else: + inputs = self.inferencer._inputs_to_list(inputs) + self._video_input = self.inferencer._video_input + if self._video_input: + self.video_info = self.inferencer.video_info + + inputs = self.preprocess( + inputs, batch_size=batch_size, **preprocess_kwargs) + + # forward + if 'bbox_thr' in self.inferencer.forward_kwargs: + forward_kwargs['bbox_thr'] = preprocess_kwargs.get('bbox_thr', -1) + + preds = [] + + for proc_inputs, ori_inputs in inputs: + preds = self.forward(proc_inputs, **forward_kwargs) + + visualization = self.visualize(ori_inputs, preds, + **visualize_kwargs) + results = self.postprocess(preds, visualization, return_datasample, + **postprocess_kwargs) + yield results + + if self._video_input: + self._finalize_video_processing( + postprocess_kwargs.get('pred_out_dir', '')) + + def visualize(self, inputs: InputsType, preds: PredType, + **kwargs) -> List[np.ndarray]: + """Visualize predictions. + + Args: + inputs (list): Inputs preprocessed by :meth:`_inputs_to_list`. + preds (Any): Predictions of the model. + return_vis (bool): Whether to return images with predicted results. + show (bool): Whether to display the image in a popup window. + Defaults to False. + show_interval (int): The interval of show (s). Defaults to 0 + radius (int): Keypoint radius for visualization. Defaults to 3 + thickness (int): Link thickness for visualization. Defaults to 1 + kpt_thr (float): The threshold to visualize the keypoints. + Defaults to 0.3 + vis_out_dir (str, optional): directory to save visualization + results w/o predictions. If left as empty, no file will + be saved. Defaults to ''. + + Returns: + List[np.ndarray]: Visualization results. + """ + window_name = '' + if self.inferencer._video_input: + window_name = self.inferencer.video_info['name'] + + return self.inferencer.visualize( + inputs, preds, window_name=window_name, **kwargs) diff --git a/mmpose/apis/inferencers/pose2d_inferencer.py b/mmpose/apis/inferencers/pose2d_inferencer.py index 3f1f20fdc0..90530dbc02 100644 --- a/mmpose/apis/inferencers/pose2d_inferencer.py +++ b/mmpose/apis/inferencers/pose2d_inferencer.py @@ -1,327 +1,327 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os -import warnings -from typing import Dict, List, Optional, Sequence, Tuple, Union - -import mmcv -import numpy as np -import torch -from mmengine.config import Config, ConfigDict -from mmengine.infer.infer import ModelType -from mmengine.model import revert_sync_batchnorm -from mmengine.registry import init_default_scope -from mmengine.structures import InstanceData - -from mmpose.evaluation.functional import nms -from mmpose.registry import DATASETS, INFERENCERS -from mmpose.structures import merge_data_samples -from .base_mmpose_inferencer import BaseMMPoseInferencer -from .utils import default_det_models - -try: - from mmdet.apis.det_inferencer import DetInferencer - has_mmdet = True -except (ImportError, ModuleNotFoundError): - has_mmdet = False - -InstanceList = List[InstanceData] -InputType = Union[str, np.ndarray] -InputsType = Union[InputType, Sequence[InputType]] -PredType = Union[InstanceData, InstanceList] -ImgType = Union[np.ndarray, Sequence[np.ndarray]] -ConfigType = Union[Config, ConfigDict] -ResType = Union[Dict, List[Dict], InstanceData, List[InstanceData]] - - -@INFERENCERS.register_module(name='pose-estimation') -@INFERENCERS.register_module() -class Pose2DInferencer(BaseMMPoseInferencer): - """The inferencer for 2D pose estimation. - - Args: - model (str, optional): Pretrained 2D pose estimation algorithm. - It's the path to the config file or the model name defined in - metafile. For example, it could be: - - - model alias, e.g. ``'body'``, - - config name, e.g. ``'simcc_res50_8xb64-210e_coco-256x192'``, - - config path - - Defaults to ``None``. - weights (str, optional): Path to the checkpoint. If it is not - specified and "model" is a model name of metafile, the weights - will be loaded from metafile. Defaults to None. - device (str, optional): Device to run inference. If None, the - available device will be automatically used. Defaults to None. - scope (str, optional): The scope of the model. Defaults to "mmpose". - det_model (str, optional): Config path or alias of detection model. - Defaults to None. - det_weights (str, optional): Path to the checkpoints of detection - model. Defaults to None. - det_cat_ids (int or list[int], optional): Category id for - detection model. Defaults to None. - """ - - preprocess_kwargs: set = {'bbox_thr', 'nms_thr', 'bboxes'} - forward_kwargs: set = {'merge_results'} - visualize_kwargs: set = { - 'return_vis', - 'show', - 'wait_time', - 'draw_bbox', - 'radius', - 'thickness', - 'kpt_thr', - 'vis_out_dir', - 'skeleton_style', - 'draw_heatmap', - 'black_background', - } - postprocess_kwargs: set = {'pred_out_dir'} - - def __init__(self, - model: Union[ModelType, str], - weights: Optional[str] = None, - device: Optional[str] = None, - scope: Optional[str] = 'mmpose', - det_model: Optional[Union[ModelType, str]] = None, - det_weights: Optional[str] = None, - det_cat_ids: Optional[Union[int, Tuple]] = None) -> None: - - init_default_scope(scope) - super().__init__( - model=model, weights=weights, device=device, scope=scope) - self.model = revert_sync_batchnorm(self.model) - - # assign dataset metainfo to self.visualizer - self.visualizer.set_dataset_meta(self.model.dataset_meta) - - # initialize detector for top-down models - if self.cfg.data_mode == 'topdown': - object_type = DATASETS.get(self.cfg.dataset_type).__module__.split( - 'datasets.')[-1].split('.')[0].lower() - - if det_model in ('whole_image', 'whole-image') or \ - (det_model is None and - object_type not in default_det_models): - self.detector = None - - else: - det_scope = 'mmdet' - if det_model is None: - det_info = default_det_models[object_type] - det_model, det_weights, det_cat_ids = det_info[ - 'model'], det_info['weights'], det_info['cat_ids'] - elif os.path.exists(det_model): - det_cfg = Config.fromfile(det_model) - det_scope = det_cfg.default_scope - - if has_mmdet: - self.detector = DetInferencer( - det_model, det_weights, device=device, scope=det_scope) - else: - raise RuntimeError( - 'MMDetection (v3.0.0 or above) is required to build ' - 'inferencers for top-down pose estimation models.') - - if isinstance(det_cat_ids, (tuple, list)): - self.det_cat_ids = det_cat_ids - else: - self.det_cat_ids = (det_cat_ids, ) - - self._video_input = False - - def update_model_visualizer_settings(self, - draw_heatmap: bool = False, - skeleton_style: str = 'mmpose', - **kwargs) -> None: - """Update the settings of models and visualizer according to inference - arguments. - - Args: - draw_heatmaps (bool, optional): Flag to visualize predicted - heatmaps. If not provided, it defaults to False. - skeleton_style (str, optional): Skeleton style selection. Valid - options are 'mmpose' and 'openpose'. Defaults to 'mmpose'. - """ - self.model.test_cfg['output_heatmaps'] = draw_heatmap - - if skeleton_style not in ['mmpose', 'openpose']: - raise ValueError('`skeleton_style` must be either \'mmpose\' ' - 'or \'openpose\'') - - if skeleton_style == 'openpose': - self.visualizer.set_dataset_meta(self.model.dataset_meta, - skeleton_style) - - def preprocess_single(self, - input: InputType, - index: int, - bbox_thr: float = 0.3, - nms_thr: float = 0.3, - bboxes: Union[List[List], List[np.ndarray], - np.ndarray] = []): - """Process a single input into a model-feedable format. - - Args: - input (InputType): Input given by user. - index (int): index of the input - bbox_thr (float): threshold for bounding box detection. - Defaults to 0.3. - nms_thr (float): IoU threshold for bounding box NMS. - Defaults to 0.3. - - Yields: - Any: Data processed by the ``pipeline`` and ``collate_fn``. - """ - - if isinstance(input, str): - data_info = dict(img_path=input) - else: - data_info = dict(img=input, img_path=f'{index}.jpg'.rjust(10, '0')) - data_info.update(self.model.dataset_meta) - - if self.cfg.data_mode == 'topdown': - if self.detector is not None: - det_results = self.detector( - input, return_datasample=True)['predictions'] - pred_instance = det_results[0].pred_instances.cpu().numpy() - bboxes = np.concatenate( - (pred_instance.bboxes, pred_instance.scores[:, None]), - axis=1) - - label_mask = np.zeros(len(bboxes), dtype=np.uint8) - for cat_id in self.det_cat_ids: - label_mask = np.logical_or(label_mask, - pred_instance.labels == cat_id) - - bboxes = bboxes[np.logical_and( - label_mask, pred_instance.scores > bbox_thr)] - bboxes = bboxes[nms(bboxes, nms_thr)] - - data_infos = [] - if len(bboxes) > 0: - for bbox in bboxes: - inst = data_info.copy() - inst['bbox'] = bbox[None, :4] - inst['bbox_score'] = bbox[4:5] - data_infos.append(self.pipeline(inst)) - else: - inst = data_info.copy() - - # get bbox from the image size - if isinstance(input, str): - input = mmcv.imread(input) - h, w = input.shape[:2] - - inst['bbox'] = np.array([[0, 0, w, h]], dtype=np.float32) - inst['bbox_score'] = np.ones(1, dtype=np.float32) - data_infos.append(self.pipeline(inst)) - - else: # bottom-up - data_infos = [self.pipeline(data_info)] - - return data_infos - - @torch.no_grad() - def forward(self, - inputs: Union[dict, tuple], - merge_results: bool = True, - bbox_thr: float = -1): - """Performs a forward pass through the model. - - Args: - inputs (Union[dict, tuple]): The input data to be processed. Can - be either a dictionary or a tuple. - merge_results (bool, optional): Whether to merge data samples, - default to True. This is only applicable when the data_mode - is 'topdown'. - bbox_thr (float, optional): A threshold for the bounding box - scores. Bounding boxes with scores greater than this value - will be retained. Default value is -1 which retains all - bounding boxes. - - Returns: - A list of data samples with prediction instances. - """ - data_samples = self.model.test_step(inputs) - if self.cfg.data_mode == 'topdown' and merge_results: - data_samples = [merge_data_samples(data_samples)] - if bbox_thr > 0: - for ds in data_samples: - if 'bbox_scores' in ds.pred_instances: - ds.pred_instances = ds.pred_instances[ - ds.pred_instances.bbox_scores > bbox_thr] - return data_samples - - def __call__( - self, - inputs: InputsType, - return_datasample: bool = False, - batch_size: int = 1, - out_dir: Optional[str] = None, - **kwargs, - ) -> dict: - """Call the inferencer. - - Args: - inputs (InputsType): Inputs for the inferencer. - return_datasample (bool): Whether to return results as - :obj:`BaseDataElement`. Defaults to False. - batch_size (int): Batch size. Defaults to 1. - out_dir (str, optional): directory to save visualization - results and predictions. Will be overoden if vis_out_dir or - pred_out_dir are given. Defaults to None - **kwargs: Key words arguments passed to :meth:`preprocess`, - :meth:`forward`, :meth:`visualize` and :meth:`postprocess`. - Each key in kwargs should be in the corresponding set of - ``preprocess_kwargs``, ``forward_kwargs``, - ``visualize_kwargs`` and ``postprocess_kwargs``. - - Returns: - dict: Inference and visualization results. - """ - if out_dir is not None: - if 'vis_out_dir' not in kwargs: - kwargs['vis_out_dir'] = f'{out_dir}/visualizations' - if 'pred_out_dir' not in kwargs: - kwargs['pred_out_dir'] = f'{out_dir}/predictions' - - ( - preprocess_kwargs, - forward_kwargs, - visualize_kwargs, - postprocess_kwargs, - ) = self._dispatch_kwargs(**kwargs) - - self.update_model_visualizer_settings(**kwargs) - - # preprocessing - if isinstance(inputs, str) and inputs.startswith('webcam'): - inputs = self._get_webcam_inputs(inputs) - batch_size = 1 - if not visualize_kwargs.get('show', False): - warnings.warn('The display mode is closed when using webcam ' - 'input. It will be turned on automatically.') - visualize_kwargs['show'] = True - else: - inputs = self._inputs_to_list(inputs) - - forward_kwargs['bbox_thr'] = preprocess_kwargs.get('bbox_thr', -1) - inputs = self.preprocess( - inputs, batch_size=batch_size, **preprocess_kwargs) - - preds = [] - - for proc_inputs, ori_inputs in inputs: - preds = self.forward(proc_inputs, **forward_kwargs) - - visualization = self.visualize(ori_inputs, preds, - **visualize_kwargs) - results = self.postprocess(preds, visualization, return_datasample, - **postprocess_kwargs) - yield results - - if self._video_input: - self._finalize_video_processing( - postprocess_kwargs.get('pred_out_dir', '')) +# Copyright (c) OpenMMLab. All rights reserved. +import os +import warnings +from typing import Dict, List, Optional, Sequence, Tuple, Union + +import mmcv +import numpy as np +import torch +from mmengine.config import Config, ConfigDict +from mmengine.infer.infer import ModelType +from mmengine.model import revert_sync_batchnorm +from mmengine.registry import init_default_scope +from mmengine.structures import InstanceData + +from mmpose.evaluation.functional import nms +from mmpose.registry import DATASETS, INFERENCERS +from mmpose.structures import merge_data_samples +from .base_mmpose_inferencer import BaseMMPoseInferencer +from .utils import default_det_models + +try: + from mmdet.apis.det_inferencer import DetInferencer + has_mmdet = True +except (ImportError, ModuleNotFoundError): + has_mmdet = False + +InstanceList = List[InstanceData] +InputType = Union[str, np.ndarray] +InputsType = Union[InputType, Sequence[InputType]] +PredType = Union[InstanceData, InstanceList] +ImgType = Union[np.ndarray, Sequence[np.ndarray]] +ConfigType = Union[Config, ConfigDict] +ResType = Union[Dict, List[Dict], InstanceData, List[InstanceData]] + + +@INFERENCERS.register_module(name='pose-estimation') +@INFERENCERS.register_module() +class Pose2DInferencer(BaseMMPoseInferencer): + """The inferencer for 2D pose estimation. + + Args: + model (str, optional): Pretrained 2D pose estimation algorithm. + It's the path to the config file or the model name defined in + metafile. For example, it could be: + + - model alias, e.g. ``'body'``, + - config name, e.g. ``'simcc_res50_8xb64-210e_coco-256x192'``, + - config path + + Defaults to ``None``. + weights (str, optional): Path to the checkpoint. If it is not + specified and "model" is a model name of metafile, the weights + will be loaded from metafile. Defaults to None. + device (str, optional): Device to run inference. If None, the + available device will be automatically used. Defaults to None. + scope (str, optional): The scope of the model. Defaults to "mmpose". + det_model (str, optional): Config path or alias of detection model. + Defaults to None. + det_weights (str, optional): Path to the checkpoints of detection + model. Defaults to None. + det_cat_ids (int or list[int], optional): Category id for + detection model. Defaults to None. + """ + + preprocess_kwargs: set = {'bbox_thr', 'nms_thr', 'bboxes'} + forward_kwargs: set = {'merge_results'} + visualize_kwargs: set = { + 'return_vis', + 'show', + 'wait_time', + 'draw_bbox', + 'radius', + 'thickness', + 'kpt_thr', + 'vis_out_dir', + 'skeleton_style', + 'draw_heatmap', + 'black_background', + } + postprocess_kwargs: set = {'pred_out_dir'} + + def __init__(self, + model: Union[ModelType, str], + weights: Optional[str] = None, + device: Optional[str] = None, + scope: Optional[str] = 'mmpose', + det_model: Optional[Union[ModelType, str]] = None, + det_weights: Optional[str] = None, + det_cat_ids: Optional[Union[int, Tuple]] = None) -> None: + + init_default_scope(scope) + super().__init__( + model=model, weights=weights, device=device, scope=scope) + self.model = revert_sync_batchnorm(self.model) + + # assign dataset metainfo to self.visualizer + self.visualizer.set_dataset_meta(self.model.dataset_meta) + + # initialize detector for top-down models + if self.cfg.data_mode == 'topdown': + object_type = DATASETS.get(self.cfg.dataset_type).__module__.split( + 'datasets.')[-1].split('.')[0].lower() + + if det_model in ('whole_image', 'whole-image') or \ + (det_model is None and + object_type not in default_det_models): + self.detector = None + + else: + det_scope = 'mmdet' + if det_model is None: + det_info = default_det_models[object_type] + det_model, det_weights, det_cat_ids = det_info[ + 'model'], det_info['weights'], det_info['cat_ids'] + elif os.path.exists(det_model): + det_cfg = Config.fromfile(det_model) + det_scope = det_cfg.default_scope + + if has_mmdet: + self.detector = DetInferencer( + det_model, det_weights, device=device, scope=det_scope) + else: + raise RuntimeError( + 'MMDetection (v3.0.0 or above) is required to build ' + 'inferencers for top-down pose estimation models.') + + if isinstance(det_cat_ids, (tuple, list)): + self.det_cat_ids = det_cat_ids + else: + self.det_cat_ids = (det_cat_ids, ) + + self._video_input = False + + def update_model_visualizer_settings(self, + draw_heatmap: bool = False, + skeleton_style: str = 'mmpose', + **kwargs) -> None: + """Update the settings of models and visualizer according to inference + arguments. + + Args: + draw_heatmaps (bool, optional): Flag to visualize predicted + heatmaps. If not provided, it defaults to False. + skeleton_style (str, optional): Skeleton style selection. Valid + options are 'mmpose' and 'openpose'. Defaults to 'mmpose'. + """ + self.model.test_cfg['output_heatmaps'] = draw_heatmap + + if skeleton_style not in ['mmpose', 'openpose']: + raise ValueError('`skeleton_style` must be either \'mmpose\' ' + 'or \'openpose\'') + + if skeleton_style == 'openpose': + self.visualizer.set_dataset_meta(self.model.dataset_meta, + skeleton_style) + + def preprocess_single(self, + input: InputType, + index: int, + bbox_thr: float = 0.3, + nms_thr: float = 0.3, + bboxes: Union[List[List], List[np.ndarray], + np.ndarray] = []): + """Process a single input into a model-feedable format. + + Args: + input (InputType): Input given by user. + index (int): index of the input + bbox_thr (float): threshold for bounding box detection. + Defaults to 0.3. + nms_thr (float): IoU threshold for bounding box NMS. + Defaults to 0.3. + + Yields: + Any: Data processed by the ``pipeline`` and ``collate_fn``. + """ + + if isinstance(input, str): + data_info = dict(img_path=input) + else: + data_info = dict(img=input, img_path=f'{index}.jpg'.rjust(10, '0')) + data_info.update(self.model.dataset_meta) + + if self.cfg.data_mode == 'topdown': + if self.detector is not None: + det_results = self.detector( + input, return_datasample=True)['predictions'] + pred_instance = det_results[0].pred_instances.cpu().numpy() + bboxes = np.concatenate( + (pred_instance.bboxes, pred_instance.scores[:, None]), + axis=1) + + label_mask = np.zeros(len(bboxes), dtype=np.uint8) + for cat_id in self.det_cat_ids: + label_mask = np.logical_or(label_mask, + pred_instance.labels == cat_id) + + bboxes = bboxes[np.logical_and( + label_mask, pred_instance.scores > bbox_thr)] + bboxes = bboxes[nms(bboxes, nms_thr)] + + data_infos = [] + if len(bboxes) > 0: + for bbox in bboxes: + inst = data_info.copy() + inst['bbox'] = bbox[None, :4] + inst['bbox_score'] = bbox[4:5] + data_infos.append(self.pipeline(inst)) + else: + inst = data_info.copy() + + # get bbox from the image size + if isinstance(input, str): + input = mmcv.imread(input) + h, w = input.shape[:2] + + inst['bbox'] = np.array([[0, 0, w, h]], dtype=np.float32) + inst['bbox_score'] = np.ones(1, dtype=np.float32) + data_infos.append(self.pipeline(inst)) + + else: # bottom-up + data_infos = [self.pipeline(data_info)] + + return data_infos + + @torch.no_grad() + def forward(self, + inputs: Union[dict, tuple], + merge_results: bool = True, + bbox_thr: float = -1): + """Performs a forward pass through the model. + + Args: + inputs (Union[dict, tuple]): The input data to be processed. Can + be either a dictionary or a tuple. + merge_results (bool, optional): Whether to merge data samples, + default to True. This is only applicable when the data_mode + is 'topdown'. + bbox_thr (float, optional): A threshold for the bounding box + scores. Bounding boxes with scores greater than this value + will be retained. Default value is -1 which retains all + bounding boxes. + + Returns: + A list of data samples with prediction instances. + """ + data_samples = self.model.test_step(inputs) + if self.cfg.data_mode == 'topdown' and merge_results: + data_samples = [merge_data_samples(data_samples)] + if bbox_thr > 0: + for ds in data_samples: + if 'bbox_scores' in ds.pred_instances: + ds.pred_instances = ds.pred_instances[ + ds.pred_instances.bbox_scores > bbox_thr] + return data_samples + + def __call__( + self, + inputs: InputsType, + return_datasample: bool = False, + batch_size: int = 1, + out_dir: Optional[str] = None, + **kwargs, + ) -> dict: + """Call the inferencer. + + Args: + inputs (InputsType): Inputs for the inferencer. + return_datasample (bool): Whether to return results as + :obj:`BaseDataElement`. Defaults to False. + batch_size (int): Batch size. Defaults to 1. + out_dir (str, optional): directory to save visualization + results and predictions. Will be overoden if vis_out_dir or + pred_out_dir are given. Defaults to None + **kwargs: Key words arguments passed to :meth:`preprocess`, + :meth:`forward`, :meth:`visualize` and :meth:`postprocess`. + Each key in kwargs should be in the corresponding set of + ``preprocess_kwargs``, ``forward_kwargs``, + ``visualize_kwargs`` and ``postprocess_kwargs``. + + Returns: + dict: Inference and visualization results. + """ + if out_dir is not None: + if 'vis_out_dir' not in kwargs: + kwargs['vis_out_dir'] = f'{out_dir}/visualizations' + if 'pred_out_dir' not in kwargs: + kwargs['pred_out_dir'] = f'{out_dir}/predictions' + + ( + preprocess_kwargs, + forward_kwargs, + visualize_kwargs, + postprocess_kwargs, + ) = self._dispatch_kwargs(**kwargs) + + self.update_model_visualizer_settings(**kwargs) + + # preprocessing + if isinstance(inputs, str) and inputs.startswith('webcam'): + inputs = self._get_webcam_inputs(inputs) + batch_size = 1 + if not visualize_kwargs.get('show', False): + warnings.warn('The display mode is closed when using webcam ' + 'input. It will be turned on automatically.') + visualize_kwargs['show'] = True + else: + inputs = self._inputs_to_list(inputs) + + forward_kwargs['bbox_thr'] = preprocess_kwargs.get('bbox_thr', -1) + inputs = self.preprocess( + inputs, batch_size=batch_size, **preprocess_kwargs) + + preds = [] + + for proc_inputs, ori_inputs in inputs: + preds = self.forward(proc_inputs, **forward_kwargs) + + visualization = self.visualize(ori_inputs, preds, + **visualize_kwargs) + results = self.postprocess(preds, visualization, return_datasample, + **postprocess_kwargs) + yield results + + if self._video_input: + self._finalize_video_processing( + postprocess_kwargs.get('pred_out_dir', '')) diff --git a/mmpose/apis/inferencers/pose3d_inferencer.py b/mmpose/apis/inferencers/pose3d_inferencer.py index 0fe66ac72b..a2eb8f3935 100644 --- a/mmpose/apis/inferencers/pose3d_inferencer.py +++ b/mmpose/apis/inferencers/pose3d_inferencer.py @@ -1,518 +1,518 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os -import warnings -from collections import defaultdict -from functools import partial -from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union - -import cv2 -import mmcv -import numpy as np -import torch -from mmengine.config import Config, ConfigDict -from mmengine.fileio import join_path -from mmengine.infer.infer import ModelType -from mmengine.model import revert_sync_batchnorm -from mmengine.registry import init_default_scope -from mmengine.structures import InstanceData -from mmengine.utils import mkdir_or_exist - -from mmpose.apis import (_track_by_iou, _track_by_oks, collate_pose_sequence, - convert_keypoint_definition, extract_pose_sequence) -from mmpose.registry import INFERENCERS -from mmpose.structures import PoseDataSample, merge_data_samples -from .base_mmpose_inferencer import BaseMMPoseInferencer -from .pose2d_inferencer import Pose2DInferencer - -InstanceList = List[InstanceData] -InputType = Union[str, np.ndarray] -InputsType = Union[InputType, Sequence[InputType]] -PredType = Union[InstanceData, InstanceList] -ImgType = Union[np.ndarray, Sequence[np.ndarray]] -ConfigType = Union[Config, ConfigDict] -ResType = Union[Dict, List[Dict], InstanceData, List[InstanceData]] - - -@INFERENCERS.register_module(name='pose-estimation-3d') -@INFERENCERS.register_module() -class Pose3DInferencer(BaseMMPoseInferencer): - """The inferencer for 3D pose estimation. - - Args: - model (str, optional): Pretrained 2D pose estimation algorithm. - It's the path to the config file or the model name defined in - metafile. For example, it could be: - - - model alias, e.g. ``'body'``, - - config name, e.g. ``'simcc_res50_8xb64-210e_coco-256x192'``, - - config path - - Defaults to ``None``. - weights (str, optional): Path to the checkpoint. If it is not - specified and "model" is a model name of metafile, the weights - will be loaded from metafile. Defaults to None. - device (str, optional): Device to run inference. If None, the - available device will be automatically used. Defaults to None. - scope (str, optional): The scope of the model. Defaults to "mmpose". - det_model (str, optional): Config path or alias of detection model. - Defaults to None. - det_weights (str, optional): Path to the checkpoints of detection - model. Defaults to None. - det_cat_ids (int or list[int], optional): Category id for - detection model. Defaults to None. - output_heatmaps (bool, optional): Flag to visualize predicted - heatmaps. If set to None, the default setting from the model - config will be used. Default is None. - """ - - preprocess_kwargs: set = { - 'bbox_thr', 'nms_thr', 'bboxes', 'use_oks_tracking', 'tracking_thr', - 'norm_pose_2d' - } - forward_kwargs: set = {'rebase_keypoint_height'} - visualize_kwargs: set = { - 'return_vis', - 'show', - 'wait_time', - 'draw_bbox', - 'radius', - 'thickness', - 'kpt_thr', - 'vis_out_dir', - } - postprocess_kwargs: set = {'pred_out_dir'} - - def __init__(self, - model: Union[ModelType, str], - weights: Optional[str] = None, - pose2d_model: Optional[Union[ModelType, str]] = None, - pose2d_weights: Optional[str] = None, - device: Optional[str] = None, - scope: Optional[str] = 'mmpose', - det_model: Optional[Union[ModelType, str]] = None, - det_weights: Optional[str] = None, - det_cat_ids: Optional[Union[int, Tuple]] = None) -> None: - - init_default_scope(scope) - super().__init__( - model=model, weights=weights, device=device, scope=scope) - self.model = revert_sync_batchnorm(self.model) - - # assign dataset metainfo to self.visualizer - self.visualizer.set_dataset_meta(self.model.dataset_meta) - - # initialize 2d pose estimator - self.pose2d_model = Pose2DInferencer( - pose2d_model if pose2d_model else 'human', pose2d_weights, device, - scope, det_model, det_weights, det_cat_ids) - - # helper functions - self._keypoint_converter = partial( - convert_keypoint_definition, - pose_det_dataset=self.pose2d_model.cfg.test_dataloader. - dataset['type'], - pose_lift_dataset=self.cfg.test_dataloader.dataset['type'], - ) - - self._pose_seq_extractor = partial( - extract_pose_sequence, - causal=self.cfg.test_dataloader.dataset.get('causal', False), - seq_len=self.cfg.test_dataloader.dataset.get('seq_len', 1), - step=self.cfg.test_dataloader.dataset.get('seq_step', 1)) - - self._video_input = False - self._buffer = defaultdict(list) - - def preprocess_single(self, - input: InputType, - index: int, - bbox_thr: float = 0.3, - nms_thr: float = 0.3, - bboxes: Union[List[List], List[np.ndarray], - np.ndarray] = [], - use_oks_tracking: bool = False, - tracking_thr: float = 0.3, - norm_pose_2d: bool = False): - """Process a single input into a model-feedable format. - - Args: - input (InputType): The input provided by the user. - index (int): The index of the input. - bbox_thr (float, optional): The threshold for bounding box - detection. Defaults to 0.3. - nms_thr (float, optional): The Intersection over Union (IoU) - threshold for bounding box Non-Maximum Suppression (NMS). - Defaults to 0.3. - bboxes (Union[List[List], List[np.ndarray], np.ndarray]): - The bounding boxes to use. Defaults to []. - use_oks_tracking (bool, optional): A flag that indicates - whether OKS-based tracking should be used. Defaults to False. - tracking_thr (float, optional): The threshold for tracking. - Defaults to 0.3. - norm_pose_2d (bool, optional): A flag that indicates whether 2D - pose normalization should be used. Defaults to False. - - Yields: - Any: The data processed by the pipeline and collate_fn. - - This method first calculates 2D keypoints using the provided - pose2d_model. The method also performs instance matching, which - can use either OKS-based tracking or IOU-based tracking. - """ - - # calculate 2d keypoints - results_pose2d = next( - self.pose2d_model( - input, - bbox_thr=bbox_thr, - nms_thr=nms_thr, - bboxes=bboxes, - merge_results=False, - return_datasample=True))['predictions'] - - for ds in results_pose2d: - ds.pred_instances.set_field( - (ds.pred_instances.bboxes[..., 2:] - - ds.pred_instances.bboxes[..., :2]).prod(-1), 'areas') - - if not self._video_input: - height, width = results_pose2d[0].metainfo['ori_shape'] - - # Clear the buffer if inputs are individual images to prevent - # carryover effects from previous images - self._buffer.clear() - - else: - height = self.video_info['height'] - width = self.video_info['width'] - img_path = results_pose2d[0].metainfo['img_path'] - - # instance matching - if use_oks_tracking: - _track = partial(_track_by_oks) - else: - _track = _track_by_iou - - for result in results_pose2d: - track_id, self._buffer['results_pose2d_last'], _ = _track( - result, self._buffer['results_pose2d_last'], tracking_thr) - if track_id == -1: - pred_instances = result.pred_instances.cpu().numpy() - keypoints = pred_instances.keypoints - if np.count_nonzero(keypoints[:, :, 1]) >= 3: - next_id = self._buffer.get('next_id', 0) - result.set_field(next_id, 'track_id') - self._buffer['next_id'] = next_id + 1 - else: - # If the number of keypoints detected is small, - # delete that person instance. - result.pred_instances.keypoints[..., 1] = -10 - result.pred_instances.bboxes *= 0 - result.set_field(-1, 'track_id') - else: - result.set_field(track_id, 'track_id') - self._buffer['pose2d_results'] = merge_data_samples(results_pose2d) - - # convert keypoints - results_pose2d_converted = [ds.cpu().numpy() for ds in results_pose2d] - for ds in results_pose2d_converted: - ds.pred_instances.keypoints = self._keypoint_converter( - ds.pred_instances.keypoints) - self._buffer['pose_est_results_list'].append(results_pose2d_converted) - - # extract and pad input pose2d sequence - pose_results_2d = self._pose_seq_extractor( - self._buffer['pose_est_results_list'], - frame_idx=index if self._video_input else 0) - causal = self.cfg.test_dataloader.dataset.get('causal', False) - target_idx = -1 if causal else len(pose_results_2d) // 2 - - stats_info = self.model.dataset_meta.get('stats_info', {}) - bbox_center = stats_info.get('bbox_center', None) - bbox_scale = stats_info.get('bbox_scale', None) - - for i, pose_res in enumerate(pose_results_2d): - for j, data_sample in enumerate(pose_res): - kpts = data_sample.pred_instances.keypoints - bboxes = data_sample.pred_instances.bboxes - keypoints = [] - for k in range(len(kpts)): - kpt = kpts[k] - if norm_pose_2d: - bbox = bboxes[k] - center = np.array([[(bbox[0] + bbox[2]) / 2, - (bbox[1] + bbox[3]) / 2]]) - scale = max(bbox[2] - bbox[0], bbox[3] - bbox[1]) - keypoints.append((kpt[:, :2] - center) / scale * - bbox_scale + bbox_center) - else: - keypoints.append(kpt[:, :2]) - pose_results_2d[i][j].pred_instances.keypoints = np.array( - keypoints) - pose_sequences_2d = collate_pose_sequence(pose_results_2d, True, - target_idx) - if not pose_sequences_2d: - return [] - - data_list = [] - for i, pose_seq in enumerate(pose_sequences_2d): - data_info = dict() - - keypoints_2d = pose_seq.pred_instances.keypoints - keypoints_2d = np.squeeze( - keypoints_2d, - axis=0) if keypoints_2d.ndim == 4 else keypoints_2d - - T, K, C = keypoints_2d.shape - - data_info['keypoints'] = keypoints_2d - data_info['keypoints_visible'] = np.ones(( - T, - K, - ), - dtype=np.float32) - data_info['lifting_target'] = np.zeros((K, 3), dtype=np.float32) - data_info['lifting_target_visible'] = np.ones((K, 1), - dtype=np.float32) - data_info['camera_param'] = dict(w=width, h=height) - - data_info.update(self.model.dataset_meta) - data_info = self.pipeline(data_info) - data_info['data_samples'].set_field( - img_path, 'img_path', field_type='metainfo') - data_list.append(data_info) - - return data_list - - @torch.no_grad() - def forward(self, - inputs: Union[dict, tuple], - rebase_keypoint_height: bool = False): - """Perform forward pass through the model and process the results. - - Args: - inputs (Union[dict, tuple]): The inputs for the model. - rebase_keypoint_height (bool, optional): Flag to rebase the - height of the keypoints (z-axis). Defaults to False. - - Returns: - list: A list of data samples, each containing the model's output - results. - """ - - pose_lift_results = self.model.test_step(inputs) - - # Post-processing of pose estimation results - pose_est_results_converted = self._buffer['pose_est_results_list'][-1] - for idx, pose_lift_res in enumerate(pose_lift_results): - # Update track_id from the pose estimation results - pose_lift_res.track_id = pose_est_results_converted[idx].get( - 'track_id', 1e4) - - # Invert x and z values of the keypoints - keypoints = pose_lift_res.pred_instances.keypoints - keypoints = keypoints[..., [0, 2, 1]] - keypoints[..., 0] = -keypoints[..., 0] - keypoints[..., 2] = -keypoints[..., 2] - - # If rebase_keypoint_height is True, adjust z-axis values - if rebase_keypoint_height: - keypoints[..., 2] -= np.min( - keypoints[..., 2], axis=-1, keepdims=True) - - pose_lift_results[idx].pred_instances.keypoints = keypoints - - pose_lift_results = sorted( - pose_lift_results, key=lambda x: x.get('track_id', 1e4)) - - data_samples = [merge_data_samples(pose_lift_results)] - return data_samples - - def __call__( - self, - inputs: InputsType, - return_datasample: bool = False, - batch_size: int = 1, - out_dir: Optional[str] = None, - **kwargs, - ) -> dict: - """Call the inferencer. - - Args: - inputs (InputsType): Inputs for the inferencer. - return_datasample (bool): Whether to return results as - :obj:`BaseDataElement`. Defaults to False. - batch_size (int): Batch size. Defaults to 1. - out_dir (str, optional): directory to save visualization - results and predictions. Will be overoden if vis_out_dir or - pred_out_dir are given. Defaults to None - **kwargs: Key words arguments passed to :meth:`preprocess`, - :meth:`forward`, :meth:`visualize` and :meth:`postprocess`. - Each key in kwargs should be in the corresponding set of - ``preprocess_kwargs``, ``forward_kwargs``, - ``visualize_kwargs`` and ``postprocess_kwargs``. - - Returns: - dict: Inference and visualization results. - """ - if out_dir is not None: - if 'vis_out_dir' not in kwargs: - kwargs['vis_out_dir'] = f'{out_dir}/visualizations' - if 'pred_out_dir' not in kwargs: - kwargs['pred_out_dir'] = f'{out_dir}/predictions' - - ( - preprocess_kwargs, - forward_kwargs, - visualize_kwargs, - postprocess_kwargs, - ) = self._dispatch_kwargs(**kwargs) - - self.update_model_visualizer_settings(**kwargs) - - # preprocessing - if isinstance(inputs, str) and inputs.startswith('webcam'): - inputs = self._get_webcam_inputs(inputs) - batch_size = 1 - if not visualize_kwargs.get('show', False): - warnings.warn('The display mode is closed when using webcam ' - 'input. It will be turned on automatically.') - visualize_kwargs['show'] = True - else: - inputs = self._inputs_to_list(inputs) - - inputs = self.preprocess( - inputs, batch_size=batch_size, **preprocess_kwargs) - - preds = [] - - for proc_inputs, ori_inputs in inputs: - preds = self.forward(proc_inputs, **forward_kwargs) - - visualization = self.visualize(ori_inputs, preds, - **visualize_kwargs) - results = self.postprocess(preds, visualization, return_datasample, - **postprocess_kwargs) - yield results - - if self._video_input: - self._finalize_video_processing( - postprocess_kwargs.get('pred_out_dir', '')) - self._buffer.clear() - - def visualize(self, - inputs: list, - preds: List[PoseDataSample], - return_vis: bool = False, - show: bool = False, - draw_bbox: bool = False, - wait_time: float = 0, - radius: int = 3, - thickness: int = 1, - kpt_thr: float = 0.3, - vis_out_dir: str = '', - window_name: str = '', - window_close_event_handler: Optional[Callable] = None - ) -> List[np.ndarray]: - """Visualize predictions. - - Args: - inputs (list): Inputs preprocessed by :meth:`_inputs_to_list`. - preds (Any): Predictions of the model. - return_vis (bool): Whether to return images with predicted results. - show (bool): Whether to display the image in a popup window. - Defaults to False. - wait_time (float): The interval of show (ms). Defaults to 0 - draw_bbox (bool): Whether to draw the bounding boxes. - Defaults to False - radius (int): Keypoint radius for visualization. Defaults to 3 - thickness (int): Link thickness for visualization. Defaults to 1 - kpt_thr (float): The threshold to visualize the keypoints. - Defaults to 0.3 - vis_out_dir (str, optional): Directory to save visualization - results w/o predictions. If left as empty, no file will - be saved. Defaults to ''. - window_name (str, optional): Title of display window. - window_close_event_handler (callable, optional): - - Returns: - List[np.ndarray]: Visualization results. - """ - if (not return_vis) and (not show) and (not vis_out_dir): - return - - if getattr(self, 'visualizer', None) is None: - raise ValueError('Visualization needs the "visualizer" term' - 'defined in the config, but got None.') - - self.visualizer.radius = radius - self.visualizer.line_width = thickness - det_kpt_color = self.pose2d_model.visualizer.kpt_color - det_dataset_skeleton = self.pose2d_model.visualizer.skeleton - det_dataset_link_color = self.pose2d_model.visualizer.link_color - self.visualizer.det_kpt_color = det_kpt_color - self.visualizer.det_dataset_skeleton = det_dataset_skeleton - self.visualizer.det_dataset_link_color = det_dataset_link_color - - results = [] - - for single_input, pred in zip(inputs, preds): - if isinstance(single_input, str): - img = mmcv.imread(single_input, channel_order='rgb') - elif isinstance(single_input, np.ndarray): - img = mmcv.bgr2rgb(single_input) - else: - raise ValueError('Unsupported input type: ' - f'{type(single_input)}') - - # since visualization and inference utilize the same process, - # the wait time is reduced when a video input is utilized, - # thereby eliminating the issue of inference getting stuck. - wait_time = 1e-5 if self._video_input else wait_time - - visualization = self.visualizer.add_datasample( - window_name, - img, - data_sample=pred, - det_data_sample=self._buffer['pose2d_results'], - draw_gt=False, - draw_bbox=draw_bbox, - show=show, - wait_time=wait_time, - kpt_thr=kpt_thr) - results.append(visualization) - - if vis_out_dir: - out_img = mmcv.rgb2bgr(visualization) - _, file_extension = os.path.splitext(vis_out_dir) - if file_extension: - dir_name = os.path.dirname(vis_out_dir) - file_name = os.path.basename(vis_out_dir) - else: - dir_name = vis_out_dir - file_name = None - mkdir_or_exist(dir_name) - - if self._video_input: - - if self.video_info['writer'] is None: - fourcc = cv2.VideoWriter_fourcc(*'mp4v') - if file_name is None: - file_name = os.path.basename( - self.video_info['name']) - out_file = join_path(dir_name, file_name) - self.video_info['writer'] = cv2.VideoWriter( - out_file, fourcc, self.video_info['fps'], - (visualization.shape[1], visualization.shape[0])) - self.video_info['writer'].write(out_img) - - else: - img_name = os.path.basename(pred.metainfo['img_path']) - file_name = file_name if file_name else img_name - out_file = join_path(dir_name, file_name) - mmcv.imwrite(out_img, out_file) - - if return_vis: - return results - else: - return [] +# Copyright (c) OpenMMLab. All rights reserved. +import os +import warnings +from collections import defaultdict +from functools import partial +from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union + +import cv2 +import mmcv +import numpy as np +import torch +from mmengine.config import Config, ConfigDict +from mmengine.fileio import join_path +from mmengine.infer.infer import ModelType +from mmengine.model import revert_sync_batchnorm +from mmengine.registry import init_default_scope +from mmengine.structures import InstanceData +from mmengine.utils import mkdir_or_exist + +from mmpose.apis import (_track_by_iou, _track_by_oks, collate_pose_sequence, + convert_keypoint_definition, extract_pose_sequence) +from mmpose.registry import INFERENCERS +from mmpose.structures import PoseDataSample, merge_data_samples +from .base_mmpose_inferencer import BaseMMPoseInferencer +from .pose2d_inferencer import Pose2DInferencer + +InstanceList = List[InstanceData] +InputType = Union[str, np.ndarray] +InputsType = Union[InputType, Sequence[InputType]] +PredType = Union[InstanceData, InstanceList] +ImgType = Union[np.ndarray, Sequence[np.ndarray]] +ConfigType = Union[Config, ConfigDict] +ResType = Union[Dict, List[Dict], InstanceData, List[InstanceData]] + + +@INFERENCERS.register_module(name='pose-estimation-3d') +@INFERENCERS.register_module() +class Pose3DInferencer(BaseMMPoseInferencer): + """The inferencer for 3D pose estimation. + + Args: + model (str, optional): Pretrained 2D pose estimation algorithm. + It's the path to the config file or the model name defined in + metafile. For example, it could be: + + - model alias, e.g. ``'body'``, + - config name, e.g. ``'simcc_res50_8xb64-210e_coco-256x192'``, + - config path + + Defaults to ``None``. + weights (str, optional): Path to the checkpoint. If it is not + specified and "model" is a model name of metafile, the weights + will be loaded from metafile. Defaults to None. + device (str, optional): Device to run inference. If None, the + available device will be automatically used. Defaults to None. + scope (str, optional): The scope of the model. Defaults to "mmpose". + det_model (str, optional): Config path or alias of detection model. + Defaults to None. + det_weights (str, optional): Path to the checkpoints of detection + model. Defaults to None. + det_cat_ids (int or list[int], optional): Category id for + detection model. Defaults to None. + output_heatmaps (bool, optional): Flag to visualize predicted + heatmaps. If set to None, the default setting from the model + config will be used. Default is None. + """ + + preprocess_kwargs: set = { + 'bbox_thr', 'nms_thr', 'bboxes', 'use_oks_tracking', 'tracking_thr', + 'norm_pose_2d' + } + forward_kwargs: set = {'rebase_keypoint_height'} + visualize_kwargs: set = { + 'return_vis', + 'show', + 'wait_time', + 'draw_bbox', + 'radius', + 'thickness', + 'kpt_thr', + 'vis_out_dir', + } + postprocess_kwargs: set = {'pred_out_dir'} + + def __init__(self, + model: Union[ModelType, str], + weights: Optional[str] = None, + pose2d_model: Optional[Union[ModelType, str]] = None, + pose2d_weights: Optional[str] = None, + device: Optional[str] = None, + scope: Optional[str] = 'mmpose', + det_model: Optional[Union[ModelType, str]] = None, + det_weights: Optional[str] = None, + det_cat_ids: Optional[Union[int, Tuple]] = None) -> None: + + init_default_scope(scope) + super().__init__( + model=model, weights=weights, device=device, scope=scope) + self.model = revert_sync_batchnorm(self.model) + + # assign dataset metainfo to self.visualizer + self.visualizer.set_dataset_meta(self.model.dataset_meta) + + # initialize 2d pose estimator + self.pose2d_model = Pose2DInferencer( + pose2d_model if pose2d_model else 'human', pose2d_weights, device, + scope, det_model, det_weights, det_cat_ids) + + # helper functions + self._keypoint_converter = partial( + convert_keypoint_definition, + pose_det_dataset=self.pose2d_model.cfg.test_dataloader. + dataset['type'], + pose_lift_dataset=self.cfg.test_dataloader.dataset['type'], + ) + + self._pose_seq_extractor = partial( + extract_pose_sequence, + causal=self.cfg.test_dataloader.dataset.get('causal', False), + seq_len=self.cfg.test_dataloader.dataset.get('seq_len', 1), + step=self.cfg.test_dataloader.dataset.get('seq_step', 1)) + + self._video_input = False + self._buffer = defaultdict(list) + + def preprocess_single(self, + input: InputType, + index: int, + bbox_thr: float = 0.3, + nms_thr: float = 0.3, + bboxes: Union[List[List], List[np.ndarray], + np.ndarray] = [], + use_oks_tracking: bool = False, + tracking_thr: float = 0.3, + norm_pose_2d: bool = False): + """Process a single input into a model-feedable format. + + Args: + input (InputType): The input provided by the user. + index (int): The index of the input. + bbox_thr (float, optional): The threshold for bounding box + detection. Defaults to 0.3. + nms_thr (float, optional): The Intersection over Union (IoU) + threshold for bounding box Non-Maximum Suppression (NMS). + Defaults to 0.3. + bboxes (Union[List[List], List[np.ndarray], np.ndarray]): + The bounding boxes to use. Defaults to []. + use_oks_tracking (bool, optional): A flag that indicates + whether OKS-based tracking should be used. Defaults to False. + tracking_thr (float, optional): The threshold for tracking. + Defaults to 0.3. + norm_pose_2d (bool, optional): A flag that indicates whether 2D + pose normalization should be used. Defaults to False. + + Yields: + Any: The data processed by the pipeline and collate_fn. + + This method first calculates 2D keypoints using the provided + pose2d_model. The method also performs instance matching, which + can use either OKS-based tracking or IOU-based tracking. + """ + + # calculate 2d keypoints + results_pose2d = next( + self.pose2d_model( + input, + bbox_thr=bbox_thr, + nms_thr=nms_thr, + bboxes=bboxes, + merge_results=False, + return_datasample=True))['predictions'] + + for ds in results_pose2d: + ds.pred_instances.set_field( + (ds.pred_instances.bboxes[..., 2:] - + ds.pred_instances.bboxes[..., :2]).prod(-1), 'areas') + + if not self._video_input: + height, width = results_pose2d[0].metainfo['ori_shape'] + + # Clear the buffer if inputs are individual images to prevent + # carryover effects from previous images + self._buffer.clear() + + else: + height = self.video_info['height'] + width = self.video_info['width'] + img_path = results_pose2d[0].metainfo['img_path'] + + # instance matching + if use_oks_tracking: + _track = partial(_track_by_oks) + else: + _track = _track_by_iou + + for result in results_pose2d: + track_id, self._buffer['results_pose2d_last'], _ = _track( + result, self._buffer['results_pose2d_last'], tracking_thr) + if track_id == -1: + pred_instances = result.pred_instances.cpu().numpy() + keypoints = pred_instances.keypoints + if np.count_nonzero(keypoints[:, :, 1]) >= 3: + next_id = self._buffer.get('next_id', 0) + result.set_field(next_id, 'track_id') + self._buffer['next_id'] = next_id + 1 + else: + # If the number of keypoints detected is small, + # delete that person instance. + result.pred_instances.keypoints[..., 1] = -10 + result.pred_instances.bboxes *= 0 + result.set_field(-1, 'track_id') + else: + result.set_field(track_id, 'track_id') + self._buffer['pose2d_results'] = merge_data_samples(results_pose2d) + + # convert keypoints + results_pose2d_converted = [ds.cpu().numpy() for ds in results_pose2d] + for ds in results_pose2d_converted: + ds.pred_instances.keypoints = self._keypoint_converter( + ds.pred_instances.keypoints) + self._buffer['pose_est_results_list'].append(results_pose2d_converted) + + # extract and pad input pose2d sequence + pose_results_2d = self._pose_seq_extractor( + self._buffer['pose_est_results_list'], + frame_idx=index if self._video_input else 0) + causal = self.cfg.test_dataloader.dataset.get('causal', False) + target_idx = -1 if causal else len(pose_results_2d) // 2 + + stats_info = self.model.dataset_meta.get('stats_info', {}) + bbox_center = stats_info.get('bbox_center', None) + bbox_scale = stats_info.get('bbox_scale', None) + + for i, pose_res in enumerate(pose_results_2d): + for j, data_sample in enumerate(pose_res): + kpts = data_sample.pred_instances.keypoints + bboxes = data_sample.pred_instances.bboxes + keypoints = [] + for k in range(len(kpts)): + kpt = kpts[k] + if norm_pose_2d: + bbox = bboxes[k] + center = np.array([[(bbox[0] + bbox[2]) / 2, + (bbox[1] + bbox[3]) / 2]]) + scale = max(bbox[2] - bbox[0], bbox[3] - bbox[1]) + keypoints.append((kpt[:, :2] - center) / scale * + bbox_scale + bbox_center) + else: + keypoints.append(kpt[:, :2]) + pose_results_2d[i][j].pred_instances.keypoints = np.array( + keypoints) + pose_sequences_2d = collate_pose_sequence(pose_results_2d, True, + target_idx) + if not pose_sequences_2d: + return [] + + data_list = [] + for i, pose_seq in enumerate(pose_sequences_2d): + data_info = dict() + + keypoints_2d = pose_seq.pred_instances.keypoints + keypoints_2d = np.squeeze( + keypoints_2d, + axis=0) if keypoints_2d.ndim == 4 else keypoints_2d + + T, K, C = keypoints_2d.shape + + data_info['keypoints'] = keypoints_2d + data_info['keypoints_visible'] = np.ones(( + T, + K, + ), + dtype=np.float32) + data_info['lifting_target'] = np.zeros((K, 3), dtype=np.float32) + data_info['lifting_target_visible'] = np.ones((K, 1), + dtype=np.float32) + data_info['camera_param'] = dict(w=width, h=height) + + data_info.update(self.model.dataset_meta) + data_info = self.pipeline(data_info) + data_info['data_samples'].set_field( + img_path, 'img_path', field_type='metainfo') + data_list.append(data_info) + + return data_list + + @torch.no_grad() + def forward(self, + inputs: Union[dict, tuple], + rebase_keypoint_height: bool = False): + """Perform forward pass through the model and process the results. + + Args: + inputs (Union[dict, tuple]): The inputs for the model. + rebase_keypoint_height (bool, optional): Flag to rebase the + height of the keypoints (z-axis). Defaults to False. + + Returns: + list: A list of data samples, each containing the model's output + results. + """ + + pose_lift_results = self.model.test_step(inputs) + + # Post-processing of pose estimation results + pose_est_results_converted = self._buffer['pose_est_results_list'][-1] + for idx, pose_lift_res in enumerate(pose_lift_results): + # Update track_id from the pose estimation results + pose_lift_res.track_id = pose_est_results_converted[idx].get( + 'track_id', 1e4) + + # Invert x and z values of the keypoints + keypoints = pose_lift_res.pred_instances.keypoints + keypoints = keypoints[..., [0, 2, 1]] + keypoints[..., 0] = -keypoints[..., 0] + keypoints[..., 2] = -keypoints[..., 2] + + # If rebase_keypoint_height is True, adjust z-axis values + if rebase_keypoint_height: + keypoints[..., 2] -= np.min( + keypoints[..., 2], axis=-1, keepdims=True) + + pose_lift_results[idx].pred_instances.keypoints = keypoints + + pose_lift_results = sorted( + pose_lift_results, key=lambda x: x.get('track_id', 1e4)) + + data_samples = [merge_data_samples(pose_lift_results)] + return data_samples + + def __call__( + self, + inputs: InputsType, + return_datasample: bool = False, + batch_size: int = 1, + out_dir: Optional[str] = None, + **kwargs, + ) -> dict: + """Call the inferencer. + + Args: + inputs (InputsType): Inputs for the inferencer. + return_datasample (bool): Whether to return results as + :obj:`BaseDataElement`. Defaults to False. + batch_size (int): Batch size. Defaults to 1. + out_dir (str, optional): directory to save visualization + results and predictions. Will be overoden if vis_out_dir or + pred_out_dir are given. Defaults to None + **kwargs: Key words arguments passed to :meth:`preprocess`, + :meth:`forward`, :meth:`visualize` and :meth:`postprocess`. + Each key in kwargs should be in the corresponding set of + ``preprocess_kwargs``, ``forward_kwargs``, + ``visualize_kwargs`` and ``postprocess_kwargs``. + + Returns: + dict: Inference and visualization results. + """ + if out_dir is not None: + if 'vis_out_dir' not in kwargs: + kwargs['vis_out_dir'] = f'{out_dir}/visualizations' + if 'pred_out_dir' not in kwargs: + kwargs['pred_out_dir'] = f'{out_dir}/predictions' + + ( + preprocess_kwargs, + forward_kwargs, + visualize_kwargs, + postprocess_kwargs, + ) = self._dispatch_kwargs(**kwargs) + + self.update_model_visualizer_settings(**kwargs) + + # preprocessing + if isinstance(inputs, str) and inputs.startswith('webcam'): + inputs = self._get_webcam_inputs(inputs) + batch_size = 1 + if not visualize_kwargs.get('show', False): + warnings.warn('The display mode is closed when using webcam ' + 'input. It will be turned on automatically.') + visualize_kwargs['show'] = True + else: + inputs = self._inputs_to_list(inputs) + + inputs = self.preprocess( + inputs, batch_size=batch_size, **preprocess_kwargs) + + preds = [] + + for proc_inputs, ori_inputs in inputs: + preds = self.forward(proc_inputs, **forward_kwargs) + + visualization = self.visualize(ori_inputs, preds, + **visualize_kwargs) + results = self.postprocess(preds, visualization, return_datasample, + **postprocess_kwargs) + yield results + + if self._video_input: + self._finalize_video_processing( + postprocess_kwargs.get('pred_out_dir', '')) + self._buffer.clear() + + def visualize(self, + inputs: list, + preds: List[PoseDataSample], + return_vis: bool = False, + show: bool = False, + draw_bbox: bool = False, + wait_time: float = 0, + radius: int = 3, + thickness: int = 1, + kpt_thr: float = 0.3, + vis_out_dir: str = '', + window_name: str = '', + window_close_event_handler: Optional[Callable] = None + ) -> List[np.ndarray]: + """Visualize predictions. + + Args: + inputs (list): Inputs preprocessed by :meth:`_inputs_to_list`. + preds (Any): Predictions of the model. + return_vis (bool): Whether to return images with predicted results. + show (bool): Whether to display the image in a popup window. + Defaults to False. + wait_time (float): The interval of show (ms). Defaults to 0 + draw_bbox (bool): Whether to draw the bounding boxes. + Defaults to False + radius (int): Keypoint radius for visualization. Defaults to 3 + thickness (int): Link thickness for visualization. Defaults to 1 + kpt_thr (float): The threshold to visualize the keypoints. + Defaults to 0.3 + vis_out_dir (str, optional): Directory to save visualization + results w/o predictions. If left as empty, no file will + be saved. Defaults to ''. + window_name (str, optional): Title of display window. + window_close_event_handler (callable, optional): + + Returns: + List[np.ndarray]: Visualization results. + """ + if (not return_vis) and (not show) and (not vis_out_dir): + return + + if getattr(self, 'visualizer', None) is None: + raise ValueError('Visualization needs the "visualizer" term' + 'defined in the config, but got None.') + + self.visualizer.radius = radius + self.visualizer.line_width = thickness + det_kpt_color = self.pose2d_model.visualizer.kpt_color + det_dataset_skeleton = self.pose2d_model.visualizer.skeleton + det_dataset_link_color = self.pose2d_model.visualizer.link_color + self.visualizer.det_kpt_color = det_kpt_color + self.visualizer.det_dataset_skeleton = det_dataset_skeleton + self.visualizer.det_dataset_link_color = det_dataset_link_color + + results = [] + + for single_input, pred in zip(inputs, preds): + if isinstance(single_input, str): + img = mmcv.imread(single_input, channel_order='rgb') + elif isinstance(single_input, np.ndarray): + img = mmcv.bgr2rgb(single_input) + else: + raise ValueError('Unsupported input type: ' + f'{type(single_input)}') + + # since visualization and inference utilize the same process, + # the wait time is reduced when a video input is utilized, + # thereby eliminating the issue of inference getting stuck. + wait_time = 1e-5 if self._video_input else wait_time + + visualization = self.visualizer.add_datasample( + window_name, + img, + data_sample=pred, + det_data_sample=self._buffer['pose2d_results'], + draw_gt=False, + draw_bbox=draw_bbox, + show=show, + wait_time=wait_time, + kpt_thr=kpt_thr) + results.append(visualization) + + if vis_out_dir: + out_img = mmcv.rgb2bgr(visualization) + _, file_extension = os.path.splitext(vis_out_dir) + if file_extension: + dir_name = os.path.dirname(vis_out_dir) + file_name = os.path.basename(vis_out_dir) + else: + dir_name = vis_out_dir + file_name = None + mkdir_or_exist(dir_name) + + if self._video_input: + + if self.video_info['writer'] is None: + fourcc = cv2.VideoWriter_fourcc(*'mp4v') + if file_name is None: + file_name = os.path.basename( + self.video_info['name']) + out_file = join_path(dir_name, file_name) + self.video_info['writer'] = cv2.VideoWriter( + out_file, fourcc, self.video_info['fps'], + (visualization.shape[1], visualization.shape[0])) + self.video_info['writer'].write(out_img) + + else: + img_name = os.path.basename(pred.metainfo['img_path']) + file_name = file_name if file_name else img_name + out_file = join_path(dir_name, file_name) + mmcv.imwrite(out_img, out_file) + + if return_vis: + return results + else: + return [] diff --git a/mmpose/apis/inferencers/utils/__init__.py b/mmpose/apis/inferencers/utils/__init__.py index 5cc40535b0..654685dbd0 100644 --- a/mmpose/apis/inferencers/utils/__init__.py +++ b/mmpose/apis/inferencers/utils/__init__.py @@ -1,5 +1,5 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .default_det_models import default_det_models -from .get_model_alias import get_model_aliases - -__all__ = ['default_det_models', 'get_model_aliases'] +# Copyright (c) OpenMMLab. All rights reserved. +from .default_det_models import default_det_models +from .get_model_alias import get_model_aliases + +__all__ = ['default_det_models', 'get_model_aliases'] diff --git a/mmpose/apis/inferencers/utils/default_det_models.py b/mmpose/apis/inferencers/utils/default_det_models.py index 93b759c879..b7749318cf 100644 --- a/mmpose/apis/inferencers/utils/default_det_models.py +++ b/mmpose/apis/inferencers/utils/default_det_models.py @@ -1,31 +1,31 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os.path as osp - -from mmengine.config.utils import MODULE2PACKAGE -from mmengine.utils import get_installed_path - -mmpose_path = get_installed_path(MODULE2PACKAGE['mmpose']) - -default_det_models = dict( - human=dict(model='rtmdet-m', weights=None, cat_ids=(0, )), - face=dict( - model=osp.join(mmpose_path, '.mim', - 'demo/mmdetection_cfg/yolox-s_8xb8-300e_coco-face.py'), - weights='https://download.openmmlab.com/mmpose/mmdet_pretrained/' - 'yolo-x_8xb8-300e_coco-face_13274d7c.pth', - cat_ids=(0, )), - hand=dict( - model=osp.join( - mmpose_path, '.mim', 'demo/mmdetection_cfg/' - 'ssdlite_mobilenetv2_scratch_600e_onehand.py'), - weights='https://download.openmmlab.com/mmpose/mmdet_pretrained/' - 'ssdlite_mobilenetv2_scratch_600e_onehand-4f9f8686_20220523.pth', - cat_ids=(0, )), - animal=dict( - model='rtmdet-m', - weights=None, - cat_ids=(15, 16, 17, 18, 19, 20, 21, 22, 23)), -) - -default_det_models['body'] = default_det_models['human'] -default_det_models['wholebody'] = default_det_models['human'] +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp + +from mmengine.config.utils import MODULE2PACKAGE +from mmengine.utils import get_installed_path + +mmpose_path = get_installed_path(MODULE2PACKAGE['mmpose']) + +default_det_models = dict( + human=dict(model='rtmdet-m', weights=None, cat_ids=(0, )), + face=dict( + model=osp.join(mmpose_path, '.mim', + 'demo/mmdetection_cfg/yolox-s_8xb8-300e_coco-face.py'), + weights='https://download.openmmlab.com/mmpose/mmdet_pretrained/' + 'yolo-x_8xb8-300e_coco-face_13274d7c.pth', + cat_ids=(0, )), + hand=dict( + model=osp.join( + mmpose_path, '.mim', 'demo/mmdetection_cfg/' + 'ssdlite_mobilenetv2_scratch_600e_onehand.py'), + weights='https://download.openmmlab.com/mmpose/mmdet_pretrained/' + 'ssdlite_mobilenetv2_scratch_600e_onehand-4f9f8686_20220523.pth', + cat_ids=(0, )), + animal=dict( + model='rtmdet-m', + weights=None, + cat_ids=(15, 16, 17, 18, 19, 20, 21, 22, 23)), +) + +default_det_models['body'] = default_det_models['human'] +default_det_models['wholebody'] = default_det_models['human'] diff --git a/mmpose/apis/inferencers/utils/get_model_alias.py b/mmpose/apis/inferencers/utils/get_model_alias.py index 49de6528d6..9a27cee54c 100644 --- a/mmpose/apis/inferencers/utils/get_model_alias.py +++ b/mmpose/apis/inferencers/utils/get_model_alias.py @@ -1,37 +1,37 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import Dict - -from mmengine.infer import BaseInferencer - - -def get_model_aliases(scope: str = 'mmpose') -> Dict[str, str]: - """Retrieve model aliases and their corresponding configuration names. - - Args: - scope (str, optional): The scope for the model aliases. Defaults - to 'mmpose'. - - Returns: - Dict[str, str]: A dictionary containing model aliases as keys and - their corresponding configuration names as values. - """ - - # Get a list of model configurations from the metafile - repo_or_mim_dir = BaseInferencer._get_repo_or_mim_dir(scope) - model_cfgs = BaseInferencer._get_models_from_metafile(repo_or_mim_dir) - - model_alias_dict = dict() - for model_cfg in model_cfgs: - if 'Alias' in model_cfg: - if isinstance(model_cfg['Alias'], str): - model_alias_dict[model_cfg['Alias']] = model_cfg['Name'] - elif isinstance(model_cfg['Alias'], list): - for alias in model_cfg['Alias']: - model_alias_dict[alias] = model_cfg['Name'] - else: - raise ValueError( - 'encounter an unexpected alias type. Please raise an ' - 'issue at https://github.com/open-mmlab/mmpose/issues ' - 'to announce us') - - return model_alias_dict +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict + +from mmengine.infer import BaseInferencer + + +def get_model_aliases(scope: str = 'mmpose') -> Dict[str, str]: + """Retrieve model aliases and their corresponding configuration names. + + Args: + scope (str, optional): The scope for the model aliases. Defaults + to 'mmpose'. + + Returns: + Dict[str, str]: A dictionary containing model aliases as keys and + their corresponding configuration names as values. + """ + + # Get a list of model configurations from the metafile + repo_or_mim_dir = BaseInferencer._get_repo_or_mim_dir(scope) + model_cfgs = BaseInferencer._get_models_from_metafile(repo_or_mim_dir) + + model_alias_dict = dict() + for model_cfg in model_cfgs: + if 'Alias' in model_cfg: + if isinstance(model_cfg['Alias'], str): + model_alias_dict[model_cfg['Alias']] = model_cfg['Name'] + elif isinstance(model_cfg['Alias'], list): + for alias in model_cfg['Alias']: + model_alias_dict[alias] = model_cfg['Name'] + else: + raise ValueError( + 'encounter an unexpected alias type. Please raise an ' + 'issue at https://github.com/open-mmlab/mmpose/issues ' + 'to announce us') + + return model_alias_dict diff --git a/mmpose/codecs/__init__.py b/mmpose/codecs/__init__.py index cdbd8feb0c..16f3924a7d 100644 --- a/mmpose/codecs/__init__.py +++ b/mmpose/codecs/__init__.py @@ -1,18 +1,18 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .associative_embedding import AssociativeEmbedding -from .decoupled_heatmap import DecoupledHeatmap -from .image_pose_lifting import ImagePoseLifting -from .integral_regression_label import IntegralRegressionLabel -from .megvii_heatmap import MegviiHeatmap -from .msra_heatmap import MSRAHeatmap -from .regression_label import RegressionLabel -from .simcc_label import SimCCLabel -from .spr import SPR -from .udp_heatmap import UDPHeatmap -from .video_pose_lifting import VideoPoseLifting - -__all__ = [ - 'MSRAHeatmap', 'MegviiHeatmap', 'UDPHeatmap', 'RegressionLabel', - 'SimCCLabel', 'IntegralRegressionLabel', 'AssociativeEmbedding', 'SPR', - 'DecoupledHeatmap', 'VideoPoseLifting', 'ImagePoseLifting' -] +# Copyright (c) OpenMMLab. All rights reserved. +from .associative_embedding import AssociativeEmbedding +from .decoupled_heatmap import DecoupledHeatmap +from .image_pose_lifting import ImagePoseLifting +from .integral_regression_label import IntegralRegressionLabel +from .megvii_heatmap import MegviiHeatmap +from .msra_heatmap import MSRAHeatmap +from .regression_label import RegressionLabel +from .simcc_label import SimCCLabel +from .spr import SPR +from .udp_heatmap import UDPHeatmap +from .video_pose_lifting import VideoPoseLifting + +__all__ = [ + 'MSRAHeatmap', 'MegviiHeatmap', 'UDPHeatmap', 'RegressionLabel', + 'SimCCLabel', 'IntegralRegressionLabel', 'AssociativeEmbedding', 'SPR', + 'DecoupledHeatmap', 'VideoPoseLifting', 'ImagePoseLifting' +] diff --git a/mmpose/codecs/associative_embedding.py b/mmpose/codecs/associative_embedding.py index 7e080f1657..f9f6e5da8a 100644 --- a/mmpose/codecs/associative_embedding.py +++ b/mmpose/codecs/associative_embedding.py @@ -1,512 +1,512 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from collections import namedtuple -from itertools import product -from typing import Any, List, Optional, Tuple - -import numpy as np -import torch -from munkres import Munkres -from torch import Tensor - -from mmpose.registry import KEYPOINT_CODECS -from mmpose.utils.tensor_utils import to_numpy -from .base import BaseKeypointCodec -from .utils import (batch_heatmap_nms, generate_gaussian_heatmaps, - generate_udp_gaussian_heatmaps, refine_keypoints, - refine_keypoints_dark_udp) - - -def _group_keypoints_by_tags(vals: np.ndarray, - tags: np.ndarray, - locs: np.ndarray, - keypoint_order: List[int], - val_thr: float, - tag_thr: float = 1.0, - max_groups: Optional[int] = None) -> np.ndarray: - """Group the keypoints by tags using Munkres algorithm. - - Note: - - - keypoint number: K - - candidate number: M - - tag dimenssion: L - - coordinate dimension: D - - group number: G - - Args: - vals (np.ndarray): The heatmap response values of keypoints in shape - (K, M) - tags (np.ndarray): The tags of the keypoint candidates in shape - (K, M, L) - locs (np.ndarray): The locations of the keypoint candidates in shape - (K, M, D) - keypoint_order (List[int]): The grouping order of the keypoints. - The groupping usually starts from a keypoints around the head and - torso, and gruadually moves out to the limbs - val_thr (float): The threshold of the keypoint response value - tag_thr (float): The maximum allowed tag distance when matching a - keypoint to a group. A keypoint with larger tag distance to any - of the existing groups will initializes a new group - max_groups (int, optional): The maximum group number. ``None`` means - no limitation. Defaults to ``None`` - - Returns: - np.ndarray: grouped keypoints in shape (G, K, D+1), where the last - dimenssion is the concatenated keypoint coordinates and scores. - """ - K, M, D = locs.shape - assert vals.shape == tags.shape[:2] == (K, M) - assert len(keypoint_order) == K - - # Build Munkres instance - munkres = Munkres() - - # Build a group pool, each group contains the keypoints of an instance - groups = [] - - Group = namedtuple('Group', field_names=['kpts', 'scores', 'tag_list']) - - def _init_group(): - """Initialize a group, which is composed of the keypoints, keypoint - scores and the tag of each keypoint.""" - _group = Group( - kpts=np.zeros((K, D), dtype=np.float32), - scores=np.zeros(K, dtype=np.float32), - tag_list=[]) - return _group - - for i in keypoint_order: - # Get all valid candidate of the i-th keypoints - valid = vals[i] > val_thr - if not valid.any(): - continue - - tags_i = tags[i, valid] # (M', L) - vals_i = vals[i, valid] # (M',) - locs_i = locs[i, valid] # (M', D) - - if len(groups) == 0: # Initialize the group pool - for tag, val, loc in zip(tags_i, vals_i, locs_i): - group = _init_group() - group.kpts[i] = loc - group.scores[i] = val - group.tag_list.append(tag) - - groups.append(group) - - else: # Match keypoints to existing groups - groups = groups[:max_groups] - group_tags = [np.mean(g.tag_list, axis=0) for g in groups] - - # Calculate distance matrix between group tags and tag candidates - # of the i-th keypoint - # Shape: (M', 1, L) , (1, G, L) -> (M', G, L) - diff = tags_i[:, None] - np.array(group_tags)[None] - dists = np.linalg.norm(diff, ord=2, axis=2) - num_kpts, num_groups = dists.shape[:2] - - # Experimental cost function for keypoint-group matching - costs = np.round(dists) * 100 - vals_i[..., None] - if num_kpts > num_groups: - padding = np.full((num_kpts, num_kpts - num_groups), - 1e10, - dtype=np.float32) - costs = np.concatenate((costs, padding), axis=1) - - # Match keypoints and groups by Munkres algorithm - matches = munkres.compute(costs) - for kpt_idx, group_idx in matches: - if group_idx < num_groups and dists[kpt_idx, - group_idx] < tag_thr: - # Add the keypoint to the matched group - group = groups[group_idx] - else: - # Initialize a new group with unmatched keypoint - group = _init_group() - groups.append(group) - - group.kpts[i] = locs_i[kpt_idx] - group.scores[i] = vals_i[kpt_idx] - group.tag_list.append(tags_i[kpt_idx]) - - groups = groups[:max_groups] - if groups: - grouped_keypoints = np.stack( - [np.r_['1', g.kpts, g.scores[:, None]] for g in groups]) - else: - grouped_keypoints = np.empty((0, K, D + 1)) - - return grouped_keypoints - - -@KEYPOINT_CODECS.register_module() -class AssociativeEmbedding(BaseKeypointCodec): - """Encode/decode keypoints with the method introduced in "Associative - Embedding". This is an asymmetric codec, where the keypoints are - represented as gaussian heatmaps and position indices during encoding, and - restored from predicted heatmaps and group tags. - - See the paper `Associative Embedding: End-to-End Learning for Joint - Detection and Grouping`_ by Newell et al (2017) for details - - Note: - - - instance number: N - - keypoint number: K - - keypoint dimension: D - - embedding tag dimension: L - - image size: [w, h] - - heatmap size: [W, H] - - Encoded: - - - heatmaps (np.ndarray): The generated heatmap in shape (K, H, W) - where [W, H] is the `heatmap_size` - - keypoint_indices (np.ndarray): The keypoint position indices in shape - (N, K, 2). Each keypoint's index is [i, v], where i is the position - index in the heatmap (:math:`i=y*w+x`) and v is the visibility - - keypoint_weights (np.ndarray): The target weights in shape (N, K) - - Args: - input_size (tuple): Image size in [w, h] - heatmap_size (tuple): Heatmap size in [W, H] - sigma (float): The sigma value of the Gaussian heatmap - use_udp (bool): Whether use unbiased data processing. See - `UDP (CVPR 2020)`_ for details. Defaults to ``False`` - decode_keypoint_order (List[int]): The grouping order of the - keypoint indices. The groupping usually starts from a keypoints - around the head and torso, and gruadually moves out to the limbs - decode_keypoint_thr (float): The threshold of keypoint response value - in heatmaps. Defaults to 0.1 - decode_tag_thr (float): The maximum allowed tag distance when matching - a keypoint to a group. A keypoint with larger tag distance to any - of the existing groups will initializes a new group. Defaults to - 1.0 - decode_nms_kernel (int): The kernel size of the NMS during decoding, - which should be an odd integer. Defaults to 5 - decode_gaussian_kernel (int): The kernel size of the Gaussian blur - during decoding, which should be an odd integer. It is only used - when ``self.use_udp==True``. Defaults to 3 - decode_topk (int): The number top-k candidates of each keypoints that - will be retrieved from the heatmaps during dedocding. Defaults to - 20 - decode_max_instances (int, optional): The maximum number of instances - to decode. ``None`` means no limitation to the instance number. - Defaults to ``None`` - - .. _`Associative Embedding: End-to-End Learning for Joint Detection and - Grouping`: https://arxiv.org/abs/1611.05424 - .. _`UDP (CVPR 2020)`: https://arxiv.org/abs/1911.07524 - """ - - def __init__( - self, - input_size: Tuple[int, int], - heatmap_size: Tuple[int, int], - sigma: Optional[float] = None, - use_udp: bool = False, - decode_keypoint_order: List[int] = [], - decode_nms_kernel: int = 5, - decode_gaussian_kernel: int = 3, - decode_keypoint_thr: float = 0.1, - decode_tag_thr: float = 1.0, - decode_topk: int = 20, - decode_max_instances: Optional[int] = None, - ) -> None: - super().__init__() - self.input_size = input_size - self.heatmap_size = heatmap_size - self.use_udp = use_udp - self.decode_nms_kernel = decode_nms_kernel - self.decode_gaussian_kernel = decode_gaussian_kernel - self.decode_keypoint_thr = decode_keypoint_thr - self.decode_tag_thr = decode_tag_thr - self.decode_topk = decode_topk - self.decode_max_instances = decode_max_instances - self.dedecode_keypoint_order = decode_keypoint_order.copy() - - if self.use_udp: - self.scale_factor = ((np.array(input_size) - 1) / - (np.array(heatmap_size) - 1)).astype( - np.float32) - else: - self.scale_factor = (np.array(input_size) / - heatmap_size).astype(np.float32) - - if sigma is None: - sigma = (heatmap_size[0] * heatmap_size[1])**0.5 / 64 - self.sigma = sigma - - def encode( - self, - keypoints: np.ndarray, - keypoints_visible: Optional[np.ndarray] = None - ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: - """Encode keypoints into heatmaps and position indices. Note that the - original keypoint coordinates should be in the input image space. - - Args: - keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) - keypoints_visible (np.ndarray): Keypoint visibilities in shape - (N, K) - - Returns: - dict: - - heatmaps (np.ndarray): The generated heatmap in shape - (K, H, W) where [W, H] is the `heatmap_size` - - keypoint_indices (np.ndarray): The keypoint position indices - in shape (N, K, 2). Each keypoint's index is [i, v], where i - is the position index in the heatmap (:math:`i=y*w+x`) and v - is the visibility - - keypoint_weights (np.ndarray): The target weights in shape - (N, K) - """ - - if keypoints_visible is None: - keypoints_visible = np.ones(keypoints.shape[:2], dtype=np.float32) - - # keypoint coordinates in heatmap - _keypoints = keypoints / self.scale_factor - - if self.use_udp: - heatmaps, keypoint_weights = generate_udp_gaussian_heatmaps( - heatmap_size=self.heatmap_size, - keypoints=_keypoints, - keypoints_visible=keypoints_visible, - sigma=self.sigma) - else: - heatmaps, keypoint_weights = generate_gaussian_heatmaps( - heatmap_size=self.heatmap_size, - keypoints=_keypoints, - keypoints_visible=keypoints_visible, - sigma=self.sigma) - - keypoint_indices = self._encode_keypoint_indices( - heatmap_size=self.heatmap_size, - keypoints=_keypoints, - keypoints_visible=keypoints_visible) - - encoded = dict( - heatmaps=heatmaps, - keypoint_indices=keypoint_indices, - keypoint_weights=keypoint_weights) - - return encoded - - def _encode_keypoint_indices(self, heatmap_size: Tuple[int, int], - keypoints: np.ndarray, - keypoints_visible: np.ndarray) -> np.ndarray: - w, h = heatmap_size - N, K, _ = keypoints.shape - keypoint_indices = np.zeros((N, K, 2), dtype=np.int64) - - for n, k in product(range(N), range(K)): - x, y = (keypoints[n, k] + 0.5).astype(np.int64) - index = y * w + x - vis = (keypoints_visible[n, k] > 0.5 and 0 <= x < w and 0 <= y < h) - keypoint_indices[n, k] = [index, vis] - - return keypoint_indices - - def decode(self, encoded: Any) -> Tuple[np.ndarray, np.ndarray]: - raise NotImplementedError() - - def _get_batch_topk(self, batch_heatmaps: Tensor, batch_tags: Tensor, - k: int): - """Get top-k response values from the heatmaps and corresponding tag - values from the tagging heatmaps. - - Args: - batch_heatmaps (Tensor): Keypoint detection heatmaps in shape - (B, K, H, W) - batch_tags (Tensor): Tagging heatmaps in shape (B, C, H, W), where - the tag dim C is 2*K when using flip testing, or K otherwise - k (int): The number of top responses to get - - Returns: - tuple: - - topk_vals (Tensor): Top-k response values of each heatmap in - shape (B, K, Topk) - - topk_tags (Tensor): The corresponding embedding tags of the - top-k responses, in shape (B, K, Topk, L) - - topk_locs (Tensor): The location of the top-k responses in each - heatmap, in shape (B, K, Topk, 2) where last dimension - represents x and y coordinates - """ - B, K, H, W = batch_heatmaps.shape - L = batch_tags.shape[1] // K - - # shape of topk_val, top_indices: (B, K, TopK) - topk_vals, topk_indices = batch_heatmaps.flatten(-2, -1).topk( - k, dim=-1) - - topk_tags_per_kpts = [ - torch.gather(_tag, dim=2, index=topk_indices) - for _tag in torch.unbind(batch_tags.view(B, L, K, H * W), dim=1) - ] - - topk_tags = torch.stack(topk_tags_per_kpts, dim=-1) # (B, K, TopK, L) - topk_locs = torch.stack([topk_indices % W, topk_indices // W], - dim=-1) # (B, K, TopK, 2) - - return topk_vals, topk_tags, topk_locs - - def _group_keypoints(self, batch_vals: np.ndarray, batch_tags: np.ndarray, - batch_locs: np.ndarray): - """Group keypoints into groups (each represents an instance) by tags. - - Args: - batch_vals (Tensor): Heatmap response values of keypoint - candidates in shape (B, K, Topk) - batch_tags (Tensor): Tags of keypoint candidates in shape - (B, K, Topk, L) - batch_locs (Tensor): Locations of keypoint candidates in shape - (B, K, Topk, 2) - - Returns: - List[np.ndarray]: Grouping results of a batch, each element is a - np.ndarray (in shape [N, K, D+1]) that contains the groups - detected in an image, including both keypoint coordinates and - scores. - """ - - def _group_func(inputs: Tuple): - vals, tags, locs = inputs - return _group_keypoints_by_tags( - vals, - tags, - locs, - keypoint_order=self.dedecode_keypoint_order, - val_thr=self.decode_keypoint_thr, - tag_thr=self.decode_tag_thr, - max_groups=self.decode_max_instances) - - _results = map(_group_func, zip(batch_vals, batch_tags, batch_locs)) - results = list(_results) - return results - - def _fill_missing_keypoints(self, keypoints: np.ndarray, - keypoint_scores: np.ndarray, - heatmaps: np.ndarray, tags: np.ndarray): - """Fill the missing keypoints in the initial predictions. - - Args: - keypoints (np.ndarray): Keypoint predictions in shape (N, K, D) - keypoint_scores (np.ndarray): Keypint score predictions in shape - (N, K), in which 0 means the corresponding keypoint is - missing in the initial prediction - heatmaps (np.ndarry): Heatmaps in shape (K, H, W) - tags (np.ndarray): Tagging heatmaps in shape (C, H, W) where - C=L*K - - Returns: - tuple: - - keypoints (np.ndarray): Keypoint predictions with missing - ones filled - - keypoint_scores (np.ndarray): Keypoint score predictions with - missing ones filled - """ - - N, K = keypoints.shape[:2] - H, W = heatmaps.shape[1:] - L = tags.shape[0] // K - keypoint_tags = [tags[k::K] for k in range(K)] - - for n in range(N): - # Calculate the instance tag (mean tag of detected keypoints) - _tag = [] - for k in range(K): - if keypoint_scores[n, k] > 0: - x, y = keypoints[n, k, :2].astype(np.int64) - x = np.clip(x, 0, W - 1) - y = np.clip(y, 0, H - 1) - _tag.append(keypoint_tags[k][:, y, x]) - - tag = np.mean(_tag, axis=0) - tag = tag.reshape(L, 1, 1) - # Search maximum response of the missing keypoints - for k in range(K): - if keypoint_scores[n, k] > 0: - continue - dist_map = np.linalg.norm( - keypoint_tags[k] - tag, ord=2, axis=0) - cost_map = np.round(dist_map) * 100 - heatmaps[k] # H, W - y, x = np.unravel_index(np.argmin(cost_map), shape=(H, W)) - keypoints[n, k] = [x, y] - keypoint_scores[n, k] = heatmaps[k, y, x] - - return keypoints, keypoint_scores - - def batch_decode(self, batch_heatmaps: Tensor, batch_tags: Tensor - ) -> Tuple[List[np.ndarray], List[np.ndarray]]: - """Decode the keypoint coordinates from a batch of heatmaps and tagging - heatmaps. The decoded keypoint coordinates are in the input image - space. - - Args: - batch_heatmaps (Tensor): Keypoint detection heatmaps in shape - (B, K, H, W) - batch_tags (Tensor): Tagging heatmaps in shape (B, C, H, W), where - :math:`C=L*K` - - Returns: - tuple: - - batch_keypoints (List[np.ndarray]): Decoded keypoint coordinates - of the batch, each is in shape (N, K, D) - - batch_scores (List[np.ndarray]): Decoded keypoint scores of the - batch, each is in shape (N, K). It usually represents the - confidience of the keypoint prediction - """ - B, _, H, W = batch_heatmaps.shape - assert batch_tags.shape[0] == B and batch_tags.shape[2:4] == (H, W), ( - f'Mismatched shapes of heatmap ({batch_heatmaps.shape}) and ' - f'tagging map ({batch_tags.shape})') - - # Heatmap NMS - batch_heatmaps = batch_heatmap_nms(batch_heatmaps, - self.decode_nms_kernel) - - # Get top-k in each heatmap and and convert to numpy - batch_topk_vals, batch_topk_tags, batch_topk_locs = to_numpy( - self._get_batch_topk( - batch_heatmaps, batch_tags, k=self.decode_topk)) - - # Group keypoint candidates into groups (instances) - batch_groups = self._group_keypoints(batch_topk_vals, batch_topk_tags, - batch_topk_locs) - - # Convert to numpy - batch_heatmaps_np = to_numpy(batch_heatmaps) - batch_tags_np = to_numpy(batch_tags) - - # Refine the keypoint prediction - batch_keypoints = [] - batch_keypoint_scores = [] - for i, (groups, heatmaps, tags) in enumerate( - zip(batch_groups, batch_heatmaps_np, batch_tags_np)): - - keypoints, scores = groups[..., :-1], groups[..., -1] - - if keypoints.size > 0: - # identify missing keypoints - keypoints, scores = self._fill_missing_keypoints( - keypoints, scores, heatmaps, tags) - - # refine keypoint coordinates according to heatmap distribution - if self.use_udp: - keypoints = refine_keypoints_dark_udp( - keypoints, - heatmaps, - blur_kernel_size=self.decode_gaussian_kernel) - else: - keypoints = refine_keypoints(keypoints, heatmaps) - - batch_keypoints.append(keypoints) - batch_keypoint_scores.append(scores) - - # restore keypoint scale - batch_keypoints = [ - kpts * self.scale_factor for kpts in batch_keypoints - ] - - return batch_keypoints, batch_keypoint_scores +# Copyright (c) OpenMMLab. All rights reserved. +from collections import namedtuple +from itertools import product +from typing import Any, List, Optional, Tuple + +import numpy as np +import torch +from munkres import Munkres +from torch import Tensor + +from mmpose.registry import KEYPOINT_CODECS +from mmpose.utils.tensor_utils import to_numpy +from .base import BaseKeypointCodec +from .utils import (batch_heatmap_nms, generate_gaussian_heatmaps, + generate_udp_gaussian_heatmaps, refine_keypoints, + refine_keypoints_dark_udp) + + +def _group_keypoints_by_tags(vals: np.ndarray, + tags: np.ndarray, + locs: np.ndarray, + keypoint_order: List[int], + val_thr: float, + tag_thr: float = 1.0, + max_groups: Optional[int] = None) -> np.ndarray: + """Group the keypoints by tags using Munkres algorithm. + + Note: + + - keypoint number: K + - candidate number: M + - tag dimenssion: L + - coordinate dimension: D + - group number: G + + Args: + vals (np.ndarray): The heatmap response values of keypoints in shape + (K, M) + tags (np.ndarray): The tags of the keypoint candidates in shape + (K, M, L) + locs (np.ndarray): The locations of the keypoint candidates in shape + (K, M, D) + keypoint_order (List[int]): The grouping order of the keypoints. + The groupping usually starts from a keypoints around the head and + torso, and gruadually moves out to the limbs + val_thr (float): The threshold of the keypoint response value + tag_thr (float): The maximum allowed tag distance when matching a + keypoint to a group. A keypoint with larger tag distance to any + of the existing groups will initializes a new group + max_groups (int, optional): The maximum group number. ``None`` means + no limitation. Defaults to ``None`` + + Returns: + np.ndarray: grouped keypoints in shape (G, K, D+1), where the last + dimenssion is the concatenated keypoint coordinates and scores. + """ + K, M, D = locs.shape + assert vals.shape == tags.shape[:2] == (K, M) + assert len(keypoint_order) == K + + # Build Munkres instance + munkres = Munkres() + + # Build a group pool, each group contains the keypoints of an instance + groups = [] + + Group = namedtuple('Group', field_names=['kpts', 'scores', 'tag_list']) + + def _init_group(): + """Initialize a group, which is composed of the keypoints, keypoint + scores and the tag of each keypoint.""" + _group = Group( + kpts=np.zeros((K, D), dtype=np.float32), + scores=np.zeros(K, dtype=np.float32), + tag_list=[]) + return _group + + for i in keypoint_order: + # Get all valid candidate of the i-th keypoints + valid = vals[i] > val_thr + if not valid.any(): + continue + + tags_i = tags[i, valid] # (M', L) + vals_i = vals[i, valid] # (M',) + locs_i = locs[i, valid] # (M', D) + + if len(groups) == 0: # Initialize the group pool + for tag, val, loc in zip(tags_i, vals_i, locs_i): + group = _init_group() + group.kpts[i] = loc + group.scores[i] = val + group.tag_list.append(tag) + + groups.append(group) + + else: # Match keypoints to existing groups + groups = groups[:max_groups] + group_tags = [np.mean(g.tag_list, axis=0) for g in groups] + + # Calculate distance matrix between group tags and tag candidates + # of the i-th keypoint + # Shape: (M', 1, L) , (1, G, L) -> (M', G, L) + diff = tags_i[:, None] - np.array(group_tags)[None] + dists = np.linalg.norm(diff, ord=2, axis=2) + num_kpts, num_groups = dists.shape[:2] + + # Experimental cost function for keypoint-group matching + costs = np.round(dists) * 100 - vals_i[..., None] + if num_kpts > num_groups: + padding = np.full((num_kpts, num_kpts - num_groups), + 1e10, + dtype=np.float32) + costs = np.concatenate((costs, padding), axis=1) + + # Match keypoints and groups by Munkres algorithm + matches = munkres.compute(costs) + for kpt_idx, group_idx in matches: + if group_idx < num_groups and dists[kpt_idx, + group_idx] < tag_thr: + # Add the keypoint to the matched group + group = groups[group_idx] + else: + # Initialize a new group with unmatched keypoint + group = _init_group() + groups.append(group) + + group.kpts[i] = locs_i[kpt_idx] + group.scores[i] = vals_i[kpt_idx] + group.tag_list.append(tags_i[kpt_idx]) + + groups = groups[:max_groups] + if groups: + grouped_keypoints = np.stack( + [np.r_['1', g.kpts, g.scores[:, None]] for g in groups]) + else: + grouped_keypoints = np.empty((0, K, D + 1)) + + return grouped_keypoints + + +@KEYPOINT_CODECS.register_module() +class AssociativeEmbedding(BaseKeypointCodec): + """Encode/decode keypoints with the method introduced in "Associative + Embedding". This is an asymmetric codec, where the keypoints are + represented as gaussian heatmaps and position indices during encoding, and + restored from predicted heatmaps and group tags. + + See the paper `Associative Embedding: End-to-End Learning for Joint + Detection and Grouping`_ by Newell et al (2017) for details + + Note: + + - instance number: N + - keypoint number: K + - keypoint dimension: D + - embedding tag dimension: L + - image size: [w, h] + - heatmap size: [W, H] + + Encoded: + + - heatmaps (np.ndarray): The generated heatmap in shape (K, H, W) + where [W, H] is the `heatmap_size` + - keypoint_indices (np.ndarray): The keypoint position indices in shape + (N, K, 2). Each keypoint's index is [i, v], where i is the position + index in the heatmap (:math:`i=y*w+x`) and v is the visibility + - keypoint_weights (np.ndarray): The target weights in shape (N, K) + + Args: + input_size (tuple): Image size in [w, h] + heatmap_size (tuple): Heatmap size in [W, H] + sigma (float): The sigma value of the Gaussian heatmap + use_udp (bool): Whether use unbiased data processing. See + `UDP (CVPR 2020)`_ for details. Defaults to ``False`` + decode_keypoint_order (List[int]): The grouping order of the + keypoint indices. The groupping usually starts from a keypoints + around the head and torso, and gruadually moves out to the limbs + decode_keypoint_thr (float): The threshold of keypoint response value + in heatmaps. Defaults to 0.1 + decode_tag_thr (float): The maximum allowed tag distance when matching + a keypoint to a group. A keypoint with larger tag distance to any + of the existing groups will initializes a new group. Defaults to + 1.0 + decode_nms_kernel (int): The kernel size of the NMS during decoding, + which should be an odd integer. Defaults to 5 + decode_gaussian_kernel (int): The kernel size of the Gaussian blur + during decoding, which should be an odd integer. It is only used + when ``self.use_udp==True``. Defaults to 3 + decode_topk (int): The number top-k candidates of each keypoints that + will be retrieved from the heatmaps during dedocding. Defaults to + 20 + decode_max_instances (int, optional): The maximum number of instances + to decode. ``None`` means no limitation to the instance number. + Defaults to ``None`` + + .. _`Associative Embedding: End-to-End Learning for Joint Detection and + Grouping`: https://arxiv.org/abs/1611.05424 + .. _`UDP (CVPR 2020)`: https://arxiv.org/abs/1911.07524 + """ + + def __init__( + self, + input_size: Tuple[int, int], + heatmap_size: Tuple[int, int], + sigma: Optional[float] = None, + use_udp: bool = False, + decode_keypoint_order: List[int] = [], + decode_nms_kernel: int = 5, + decode_gaussian_kernel: int = 3, + decode_keypoint_thr: float = 0.1, + decode_tag_thr: float = 1.0, + decode_topk: int = 20, + decode_max_instances: Optional[int] = None, + ) -> None: + super().__init__() + self.input_size = input_size + self.heatmap_size = heatmap_size + self.use_udp = use_udp + self.decode_nms_kernel = decode_nms_kernel + self.decode_gaussian_kernel = decode_gaussian_kernel + self.decode_keypoint_thr = decode_keypoint_thr + self.decode_tag_thr = decode_tag_thr + self.decode_topk = decode_topk + self.decode_max_instances = decode_max_instances + self.dedecode_keypoint_order = decode_keypoint_order.copy() + + if self.use_udp: + self.scale_factor = ((np.array(input_size) - 1) / + (np.array(heatmap_size) - 1)).astype( + np.float32) + else: + self.scale_factor = (np.array(input_size) / + heatmap_size).astype(np.float32) + + if sigma is None: + sigma = (heatmap_size[0] * heatmap_size[1])**0.5 / 64 + self.sigma = sigma + + def encode( + self, + keypoints: np.ndarray, + keypoints_visible: Optional[np.ndarray] = None + ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + """Encode keypoints into heatmaps and position indices. Note that the + original keypoint coordinates should be in the input image space. + + Args: + keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) + keypoints_visible (np.ndarray): Keypoint visibilities in shape + (N, K) + + Returns: + dict: + - heatmaps (np.ndarray): The generated heatmap in shape + (K, H, W) where [W, H] is the `heatmap_size` + - keypoint_indices (np.ndarray): The keypoint position indices + in shape (N, K, 2). Each keypoint's index is [i, v], where i + is the position index in the heatmap (:math:`i=y*w+x`) and v + is the visibility + - keypoint_weights (np.ndarray): The target weights in shape + (N, K) + """ + + if keypoints_visible is None: + keypoints_visible = np.ones(keypoints.shape[:2], dtype=np.float32) + + # keypoint coordinates in heatmap + _keypoints = keypoints / self.scale_factor + + if self.use_udp: + heatmaps, keypoint_weights = generate_udp_gaussian_heatmaps( + heatmap_size=self.heatmap_size, + keypoints=_keypoints, + keypoints_visible=keypoints_visible, + sigma=self.sigma) + else: + heatmaps, keypoint_weights = generate_gaussian_heatmaps( + heatmap_size=self.heatmap_size, + keypoints=_keypoints, + keypoints_visible=keypoints_visible, + sigma=self.sigma) + + keypoint_indices = self._encode_keypoint_indices( + heatmap_size=self.heatmap_size, + keypoints=_keypoints, + keypoints_visible=keypoints_visible) + + encoded = dict( + heatmaps=heatmaps, + keypoint_indices=keypoint_indices, + keypoint_weights=keypoint_weights) + + return encoded + + def _encode_keypoint_indices(self, heatmap_size: Tuple[int, int], + keypoints: np.ndarray, + keypoints_visible: np.ndarray) -> np.ndarray: + w, h = heatmap_size + N, K, _ = keypoints.shape + keypoint_indices = np.zeros((N, K, 2), dtype=np.int64) + + for n, k in product(range(N), range(K)): + x, y = (keypoints[n, k] + 0.5).astype(np.int64) + index = y * w + x + vis = (keypoints_visible[n, k] > 0.5 and 0 <= x < w and 0 <= y < h) + keypoint_indices[n, k] = [index, vis] + + return keypoint_indices + + def decode(self, encoded: Any) -> Tuple[np.ndarray, np.ndarray]: + raise NotImplementedError() + + def _get_batch_topk(self, batch_heatmaps: Tensor, batch_tags: Tensor, + k: int): + """Get top-k response values from the heatmaps and corresponding tag + values from the tagging heatmaps. + + Args: + batch_heatmaps (Tensor): Keypoint detection heatmaps in shape + (B, K, H, W) + batch_tags (Tensor): Tagging heatmaps in shape (B, C, H, W), where + the tag dim C is 2*K when using flip testing, or K otherwise + k (int): The number of top responses to get + + Returns: + tuple: + - topk_vals (Tensor): Top-k response values of each heatmap in + shape (B, K, Topk) + - topk_tags (Tensor): The corresponding embedding tags of the + top-k responses, in shape (B, K, Topk, L) + - topk_locs (Tensor): The location of the top-k responses in each + heatmap, in shape (B, K, Topk, 2) where last dimension + represents x and y coordinates + """ + B, K, H, W = batch_heatmaps.shape + L = batch_tags.shape[1] // K + + # shape of topk_val, top_indices: (B, K, TopK) + topk_vals, topk_indices = batch_heatmaps.flatten(-2, -1).topk( + k, dim=-1) + + topk_tags_per_kpts = [ + torch.gather(_tag, dim=2, index=topk_indices) + for _tag in torch.unbind(batch_tags.view(B, L, K, H * W), dim=1) + ] + + topk_tags = torch.stack(topk_tags_per_kpts, dim=-1) # (B, K, TopK, L) + topk_locs = torch.stack([topk_indices % W, topk_indices // W], + dim=-1) # (B, K, TopK, 2) + + return topk_vals, topk_tags, topk_locs + + def _group_keypoints(self, batch_vals: np.ndarray, batch_tags: np.ndarray, + batch_locs: np.ndarray): + """Group keypoints into groups (each represents an instance) by tags. + + Args: + batch_vals (Tensor): Heatmap response values of keypoint + candidates in shape (B, K, Topk) + batch_tags (Tensor): Tags of keypoint candidates in shape + (B, K, Topk, L) + batch_locs (Tensor): Locations of keypoint candidates in shape + (B, K, Topk, 2) + + Returns: + List[np.ndarray]: Grouping results of a batch, each element is a + np.ndarray (in shape [N, K, D+1]) that contains the groups + detected in an image, including both keypoint coordinates and + scores. + """ + + def _group_func(inputs: Tuple): + vals, tags, locs = inputs + return _group_keypoints_by_tags( + vals, + tags, + locs, + keypoint_order=self.dedecode_keypoint_order, + val_thr=self.decode_keypoint_thr, + tag_thr=self.decode_tag_thr, + max_groups=self.decode_max_instances) + + _results = map(_group_func, zip(batch_vals, batch_tags, batch_locs)) + results = list(_results) + return results + + def _fill_missing_keypoints(self, keypoints: np.ndarray, + keypoint_scores: np.ndarray, + heatmaps: np.ndarray, tags: np.ndarray): + """Fill the missing keypoints in the initial predictions. + + Args: + keypoints (np.ndarray): Keypoint predictions in shape (N, K, D) + keypoint_scores (np.ndarray): Keypint score predictions in shape + (N, K), in which 0 means the corresponding keypoint is + missing in the initial prediction + heatmaps (np.ndarry): Heatmaps in shape (K, H, W) + tags (np.ndarray): Tagging heatmaps in shape (C, H, W) where + C=L*K + + Returns: + tuple: + - keypoints (np.ndarray): Keypoint predictions with missing + ones filled + - keypoint_scores (np.ndarray): Keypoint score predictions with + missing ones filled + """ + + N, K = keypoints.shape[:2] + H, W = heatmaps.shape[1:] + L = tags.shape[0] // K + keypoint_tags = [tags[k::K] for k in range(K)] + + for n in range(N): + # Calculate the instance tag (mean tag of detected keypoints) + _tag = [] + for k in range(K): + if keypoint_scores[n, k] > 0: + x, y = keypoints[n, k, :2].astype(np.int64) + x = np.clip(x, 0, W - 1) + y = np.clip(y, 0, H - 1) + _tag.append(keypoint_tags[k][:, y, x]) + + tag = np.mean(_tag, axis=0) + tag = tag.reshape(L, 1, 1) + # Search maximum response of the missing keypoints + for k in range(K): + if keypoint_scores[n, k] > 0: + continue + dist_map = np.linalg.norm( + keypoint_tags[k] - tag, ord=2, axis=0) + cost_map = np.round(dist_map) * 100 - heatmaps[k] # H, W + y, x = np.unravel_index(np.argmin(cost_map), shape=(H, W)) + keypoints[n, k] = [x, y] + keypoint_scores[n, k] = heatmaps[k, y, x] + + return keypoints, keypoint_scores + + def batch_decode(self, batch_heatmaps: Tensor, batch_tags: Tensor + ) -> Tuple[List[np.ndarray], List[np.ndarray]]: + """Decode the keypoint coordinates from a batch of heatmaps and tagging + heatmaps. The decoded keypoint coordinates are in the input image + space. + + Args: + batch_heatmaps (Tensor): Keypoint detection heatmaps in shape + (B, K, H, W) + batch_tags (Tensor): Tagging heatmaps in shape (B, C, H, W), where + :math:`C=L*K` + + Returns: + tuple: + - batch_keypoints (List[np.ndarray]): Decoded keypoint coordinates + of the batch, each is in shape (N, K, D) + - batch_scores (List[np.ndarray]): Decoded keypoint scores of the + batch, each is in shape (N, K). It usually represents the + confidience of the keypoint prediction + """ + B, _, H, W = batch_heatmaps.shape + assert batch_tags.shape[0] == B and batch_tags.shape[2:4] == (H, W), ( + f'Mismatched shapes of heatmap ({batch_heatmaps.shape}) and ' + f'tagging map ({batch_tags.shape})') + + # Heatmap NMS + batch_heatmaps = batch_heatmap_nms(batch_heatmaps, + self.decode_nms_kernel) + + # Get top-k in each heatmap and and convert to numpy + batch_topk_vals, batch_topk_tags, batch_topk_locs = to_numpy( + self._get_batch_topk( + batch_heatmaps, batch_tags, k=self.decode_topk)) + + # Group keypoint candidates into groups (instances) + batch_groups = self._group_keypoints(batch_topk_vals, batch_topk_tags, + batch_topk_locs) + + # Convert to numpy + batch_heatmaps_np = to_numpy(batch_heatmaps) + batch_tags_np = to_numpy(batch_tags) + + # Refine the keypoint prediction + batch_keypoints = [] + batch_keypoint_scores = [] + for i, (groups, heatmaps, tags) in enumerate( + zip(batch_groups, batch_heatmaps_np, batch_tags_np)): + + keypoints, scores = groups[..., :-1], groups[..., -1] + + if keypoints.size > 0: + # identify missing keypoints + keypoints, scores = self._fill_missing_keypoints( + keypoints, scores, heatmaps, tags) + + # refine keypoint coordinates according to heatmap distribution + if self.use_udp: + keypoints = refine_keypoints_dark_udp( + keypoints, + heatmaps, + blur_kernel_size=self.decode_gaussian_kernel) + else: + keypoints = refine_keypoints(keypoints, heatmaps) + + batch_keypoints.append(keypoints) + batch_keypoint_scores.append(scores) + + # restore keypoint scale + batch_keypoints = [ + kpts * self.scale_factor for kpts in batch_keypoints + ] + + return batch_keypoints, batch_keypoint_scores diff --git a/mmpose/codecs/base.py b/mmpose/codecs/base.py index d8479fdf1e..945c957cfe 100644 --- a/mmpose/codecs/base.py +++ b/mmpose/codecs/base.py @@ -1,77 +1,77 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from abc import ABCMeta, abstractmethod -from typing import Any, List, Optional, Tuple - -import numpy as np -from mmengine.utils import is_method_overridden - - -class BaseKeypointCodec(metaclass=ABCMeta): - """The base class of the keypoint codec. - - A keypoint codec is a module to encode keypoint coordinates to specific - representation (e.g. heatmap) and vice versa. A subclass should implement - the methods :meth:`encode` and :meth:`decode`. - """ - - # pass additional encoding arguments to the `encode` method, beyond the - # mandatory `keypoints` and `keypoints_visible` arguments. - auxiliary_encode_keys = set() - - @abstractmethod - def encode(self, - keypoints: np.ndarray, - keypoints_visible: Optional[np.ndarray] = None) -> dict: - """Encode keypoints. - - Note: - - - instance number: N - - keypoint number: K - - keypoint dimension: D - - Args: - keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) - keypoints_visible (np.ndarray): Keypoint visibility in shape - (N, K, D) - - Returns: - dict: Encoded items. - """ - - @abstractmethod - def decode(self, encoded: Any) -> Tuple[np.ndarray, np.ndarray]: - """Decode keypoints. - - Args: - encoded (any): Encoded keypoint representation using the codec - - Returns: - tuple: - - keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) - - keypoints_visible (np.ndarray): Keypoint visibility in shape - (N, K, D) - """ - - def batch_decode(self, batch_encoded: Any - ) -> Tuple[List[np.ndarray], List[np.ndarray]]: - """Decode keypoints. - - Args: - batch_encoded (any): A batch of encoded keypoint - representations - - Returns: - tuple: - - batch_keypoints (List[np.ndarray]): Each element is keypoint - coordinates in shape (N, K, D) - - batch_keypoints (List[np.ndarray]): Each element is keypoint - visibility in shape (N, K) - """ - raise NotImplementedError() - - @property - def support_batch_decoding(self) -> bool: - """Return whether the codec support decoding from batch data.""" - return is_method_overridden('batch_decode', BaseKeypointCodec, - self.__class__) +# Copyright (c) OpenMMLab. All rights reserved. +from abc import ABCMeta, abstractmethod +from typing import Any, List, Optional, Tuple + +import numpy as np +from mmengine.utils import is_method_overridden + + +class BaseKeypointCodec(metaclass=ABCMeta): + """The base class of the keypoint codec. + + A keypoint codec is a module to encode keypoint coordinates to specific + representation (e.g. heatmap) and vice versa. A subclass should implement + the methods :meth:`encode` and :meth:`decode`. + """ + + # pass additional encoding arguments to the `encode` method, beyond the + # mandatory `keypoints` and `keypoints_visible` arguments. + auxiliary_encode_keys = set() + + @abstractmethod + def encode(self, + keypoints: np.ndarray, + keypoints_visible: Optional[np.ndarray] = None) -> dict: + """Encode keypoints. + + Note: + + - instance number: N + - keypoint number: K + - keypoint dimension: D + + Args: + keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) + keypoints_visible (np.ndarray): Keypoint visibility in shape + (N, K, D) + + Returns: + dict: Encoded items. + """ + + @abstractmethod + def decode(self, encoded: Any) -> Tuple[np.ndarray, np.ndarray]: + """Decode keypoints. + + Args: + encoded (any): Encoded keypoint representation using the codec + + Returns: + tuple: + - keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) + - keypoints_visible (np.ndarray): Keypoint visibility in shape + (N, K, D) + """ + + def batch_decode(self, batch_encoded: Any + ) -> Tuple[List[np.ndarray], List[np.ndarray]]: + """Decode keypoints. + + Args: + batch_encoded (any): A batch of encoded keypoint + representations + + Returns: + tuple: + - batch_keypoints (List[np.ndarray]): Each element is keypoint + coordinates in shape (N, K, D) + - batch_keypoints (List[np.ndarray]): Each element is keypoint + visibility in shape (N, K) + """ + raise NotImplementedError() + + @property + def support_batch_decoding(self) -> bool: + """Return whether the codec support decoding from batch data.""" + return is_method_overridden('batch_decode', BaseKeypointCodec, + self.__class__) diff --git a/mmpose/codecs/decoupled_heatmap.py b/mmpose/codecs/decoupled_heatmap.py index da38a4ce2c..721f71ab58 100644 --- a/mmpose/codecs/decoupled_heatmap.py +++ b/mmpose/codecs/decoupled_heatmap.py @@ -1,265 +1,265 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import random -from typing import Optional, Tuple - -import numpy as np - -from mmpose.registry import KEYPOINT_CODECS -from .base import BaseKeypointCodec -from .utils import (generate_gaussian_heatmaps, get_diagonal_lengths, - get_instance_bbox, get_instance_root) -from .utils.post_processing import get_heatmap_maximum -from .utils.refinement import refine_keypoints - - -@KEYPOINT_CODECS.register_module() -class DecoupledHeatmap(BaseKeypointCodec): - """Encode/decode keypoints with the method introduced in the paper CID. - - See the paper Contextual Instance Decoupling for Robust Multi-Person - Pose Estimation`_ by Wang et al (2022) for details - - Note: - - - instance number: N - - keypoint number: K - - keypoint dimension: D - - image size: [w, h] - - heatmap size: [W, H] - - Encoded: - - heatmaps (np.ndarray): The coupled heatmap in shape - (1+K, H, W) where [W, H] is the `heatmap_size`. - - instance_heatmaps (np.ndarray): The decoupled heatmap in shape - (M*K, H, W) where M is the number of instances. - - keypoint_weights (np.ndarray): The weight for heatmaps in shape - (M*K). - - instance_coords (np.ndarray): The coordinates of instance roots - in shape (M, 2) - - Args: - input_size (tuple): Image size in [w, h] - heatmap_size (tuple): Heatmap size in [W, H] - root_type (str): The method to generate the instance root. Options - are: - - - ``'kpt_center'``: Average coordinate of all visible keypoints. - - ``'bbox_center'``: Center point of bounding boxes outlined by - all visible keypoints. - - Defaults to ``'kpt_center'`` - - heatmap_min_overlap (float): Minimum overlap rate among instances. - Used when calculating sigmas for instances. Defaults to 0.7 - background_weight (float): Loss weight of background pixels. - Defaults to 0.1 - encode_max_instances (int): The maximum number of instances - to encode for each sample. Defaults to 30 - - .. _`CID`: https://openaccess.thecvf.com/content/CVPR2022/html/Wang_ - Contextual_Instance_Decoupling_for_Robust_Multi-Person_Pose_Estimation_ - CVPR_2022_paper.html - """ - - # DecoupledHeatmap requires bounding boxes to determine the size of each - # instance, so that it can assign varying sigmas based on their size - auxiliary_encode_keys = {'bbox'} - - def __init__( - self, - input_size: Tuple[int, int], - heatmap_size: Tuple[int, int], - root_type: str = 'kpt_center', - heatmap_min_overlap: float = 0.7, - encode_max_instances: int = 30, - ): - super().__init__() - - self.input_size = input_size - self.heatmap_size = heatmap_size - self.root_type = root_type - self.encode_max_instances = encode_max_instances - self.heatmap_min_overlap = heatmap_min_overlap - - self.scale_factor = (np.array(input_size) / - heatmap_size).astype(np.float32) - - def _get_instance_wise_sigmas( - self, - bbox: np.ndarray, - ) -> np.ndarray: - """Get sigma values for each instance according to their size. - - Args: - bbox (np.ndarray): Bounding box in shape (N, 4, 2) - - Returns: - np.ndarray: Array containing the sigma values for each instance. - """ - sigmas = np.zeros((bbox.shape[0], ), dtype=np.float32) - - heights = np.sqrt(np.power(bbox[:, 0] - bbox[:, 1], 2).sum(axis=-1)) - widths = np.sqrt(np.power(bbox[:, 0] - bbox[:, 2], 2).sum(axis=-1)) - - for i in range(bbox.shape[0]): - h, w = heights[i], widths[i] - - # compute sigma for each instance - # condition 1 - a1, b1 = 1, h + w - c1 = w * h * (1 - self.heatmap_min_overlap) / ( - 1 + self.heatmap_min_overlap) - sq1 = np.sqrt(b1**2 - 4 * a1 * c1) - r1 = (b1 + sq1) / 2 - - # condition 2 - a2 = 4 - b2 = 2 * (h + w) - c2 = (1 - self.heatmap_min_overlap) * w * h - sq2 = np.sqrt(b2**2 - 4 * a2 * c2) - r2 = (b2 + sq2) / 2 - - # condition 3 - a3 = 4 * self.heatmap_min_overlap - b3 = -2 * self.heatmap_min_overlap * (h + w) - c3 = (self.heatmap_min_overlap - 1) * w * h - sq3 = np.sqrt(b3**2 - 4 * a3 * c3) - r3 = (b3 + sq3) / 2 - - sigmas[i] = min(r1, r2, r3) / 3 - - return sigmas - - def encode(self, - keypoints: np.ndarray, - keypoints_visible: Optional[np.ndarray] = None, - bbox: Optional[np.ndarray] = None) -> dict: - """Encode keypoints into heatmaps. - - Args: - keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) - keypoints_visible (np.ndarray): Keypoint visibilities in shape - (N, K) - bbox (np.ndarray): Bounding box in shape (N, 8) which includes - coordinates of 4 corners. - - Returns: - dict: - - heatmaps (np.ndarray): The coupled heatmap in shape - (1+K, H, W) where [W, H] is the `heatmap_size`. - - instance_heatmaps (np.ndarray): The decoupled heatmap in shape - (N*K, H, W) where M is the number of instances. - - keypoint_weights (np.ndarray): The weight for heatmaps in shape - (N*K). - - instance_coords (np.ndarray): The coordinates of instance roots - in shape (N, 2) - """ - - if keypoints_visible is None: - keypoints_visible = np.ones(keypoints.shape[:2], dtype=np.float32) - if bbox is None: - # generate pseudo bbox via visible keypoints - bbox = get_instance_bbox(keypoints, keypoints_visible) - bbox = np.tile(bbox, 2).reshape(-1, 4, 2) - # corner order: left_top, left_bottom, right_top, right_bottom - bbox[:, 1:3, 0] = bbox[:, 0:2, 0] - - # keypoint coordinates in heatmap - _keypoints = keypoints / self.scale_factor - _bbox = bbox.reshape(-1, 4, 2) / self.scale_factor - - # compute the root and scale of each instance - roots, roots_visible = get_instance_root(_keypoints, keypoints_visible, - self.root_type) - - sigmas = self._get_instance_wise_sigmas(_bbox) - - # generate global heatmaps - heatmaps, keypoint_weights = generate_gaussian_heatmaps( - heatmap_size=self.heatmap_size, - keypoints=np.concatenate((_keypoints, roots[:, None]), axis=1), - keypoints_visible=np.concatenate( - (keypoints_visible, roots_visible[:, None]), axis=1), - sigma=sigmas) - roots_visible = keypoint_weights[:, -1] - - # select instances - inst_roots, inst_indices = [], [] - diagonal_lengths = get_diagonal_lengths(_keypoints, keypoints_visible) - for i in np.argsort(diagonal_lengths): - if roots_visible[i] < 1: - continue - # rand root point in 3x3 grid - x, y = roots[i] + np.random.randint(-1, 2, (2, )) - x = max(0, min(x, self.heatmap_size[0] - 1)) - y = max(0, min(y, self.heatmap_size[1] - 1)) - if (x, y) not in inst_roots: - inst_roots.append((x, y)) - inst_indices.append(i) - if len(inst_indices) > self.encode_max_instances: - rand_indices = random.sample( - range(len(inst_indices)), self.encode_max_instances) - inst_roots = [inst_roots[i] for i in rand_indices] - inst_indices = [inst_indices[i] for i in rand_indices] - - # generate instance-wise heatmaps - inst_heatmaps, inst_heatmap_weights = [], [] - for i in inst_indices: - inst_heatmap, inst_heatmap_weight = generate_gaussian_heatmaps( - heatmap_size=self.heatmap_size, - keypoints=_keypoints[i:i + 1], - keypoints_visible=keypoints_visible[i:i + 1], - sigma=sigmas[i].item()) - inst_heatmaps.append(inst_heatmap) - inst_heatmap_weights.append(inst_heatmap_weight) - - if len(inst_indices) > 0: - inst_heatmaps = np.concatenate(inst_heatmaps) - inst_heatmap_weights = np.concatenate(inst_heatmap_weights) - inst_roots = np.array(inst_roots, dtype=np.int32) - else: - inst_heatmaps = np.empty((0, *self.heatmap_size[::-1])) - inst_heatmap_weights = np.empty((0, )) - inst_roots = np.empty((0, 2), dtype=np.int32) - - encoded = dict( - heatmaps=heatmaps, - instance_heatmaps=inst_heatmaps, - keypoint_weights=inst_heatmap_weights, - instance_coords=inst_roots) - - return encoded - - def decode(self, instance_heatmaps: np.ndarray, - instance_scores: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: - """Decode keypoint coordinates from decoupled heatmaps. The decoded - keypoint coordinates are in the input image space. - - Args: - instance_heatmaps (np.ndarray): Heatmaps in shape (N, K, H, W) - instance_scores (np.ndarray): Confidence of instance roots - prediction in shape (N, 1) - - Returns: - tuple: - - keypoints (np.ndarray): Decoded keypoint coordinates in shape - (N, K, D) - - scores (np.ndarray): The keypoint scores in shape (N, K). It - usually represents the confidence of the keypoint prediction - """ - keypoints, keypoint_scores = [], [] - - for i in range(instance_heatmaps.shape[0]): - heatmaps = instance_heatmaps[i].copy() - kpts, scores = get_heatmap_maximum(heatmaps) - keypoints.append(refine_keypoints(kpts[None], heatmaps)) - keypoint_scores.append(scores[None]) - - keypoints = np.concatenate(keypoints) - # Restore the keypoint scale - keypoints = keypoints * self.scale_factor - - keypoint_scores = np.concatenate(keypoint_scores) - keypoint_scores *= instance_scores - - return keypoints, keypoint_scores +# Copyright (c) OpenMMLab. All rights reserved. +import random +from typing import Optional, Tuple + +import numpy as np + +from mmpose.registry import KEYPOINT_CODECS +from .base import BaseKeypointCodec +from .utils import (generate_gaussian_heatmaps, get_diagonal_lengths, + get_instance_bbox, get_instance_root) +from .utils.post_processing import get_heatmap_maximum +from .utils.refinement import refine_keypoints + + +@KEYPOINT_CODECS.register_module() +class DecoupledHeatmap(BaseKeypointCodec): + """Encode/decode keypoints with the method introduced in the paper CID. + + See the paper Contextual Instance Decoupling for Robust Multi-Person + Pose Estimation`_ by Wang et al (2022) for details + + Note: + + - instance number: N + - keypoint number: K + - keypoint dimension: D + - image size: [w, h] + - heatmap size: [W, H] + + Encoded: + - heatmaps (np.ndarray): The coupled heatmap in shape + (1+K, H, W) where [W, H] is the `heatmap_size`. + - instance_heatmaps (np.ndarray): The decoupled heatmap in shape + (M*K, H, W) where M is the number of instances. + - keypoint_weights (np.ndarray): The weight for heatmaps in shape + (M*K). + - instance_coords (np.ndarray): The coordinates of instance roots + in shape (M, 2) + + Args: + input_size (tuple): Image size in [w, h] + heatmap_size (tuple): Heatmap size in [W, H] + root_type (str): The method to generate the instance root. Options + are: + + - ``'kpt_center'``: Average coordinate of all visible keypoints. + - ``'bbox_center'``: Center point of bounding boxes outlined by + all visible keypoints. + + Defaults to ``'kpt_center'`` + + heatmap_min_overlap (float): Minimum overlap rate among instances. + Used when calculating sigmas for instances. Defaults to 0.7 + background_weight (float): Loss weight of background pixels. + Defaults to 0.1 + encode_max_instances (int): The maximum number of instances + to encode for each sample. Defaults to 30 + + .. _`CID`: https://openaccess.thecvf.com/content/CVPR2022/html/Wang_ + Contextual_Instance_Decoupling_for_Robust_Multi-Person_Pose_Estimation_ + CVPR_2022_paper.html + """ + + # DecoupledHeatmap requires bounding boxes to determine the size of each + # instance, so that it can assign varying sigmas based on their size + auxiliary_encode_keys = {'bbox'} + + def __init__( + self, + input_size: Tuple[int, int], + heatmap_size: Tuple[int, int], + root_type: str = 'kpt_center', + heatmap_min_overlap: float = 0.7, + encode_max_instances: int = 30, + ): + super().__init__() + + self.input_size = input_size + self.heatmap_size = heatmap_size + self.root_type = root_type + self.encode_max_instances = encode_max_instances + self.heatmap_min_overlap = heatmap_min_overlap + + self.scale_factor = (np.array(input_size) / + heatmap_size).astype(np.float32) + + def _get_instance_wise_sigmas( + self, + bbox: np.ndarray, + ) -> np.ndarray: + """Get sigma values for each instance according to their size. + + Args: + bbox (np.ndarray): Bounding box in shape (N, 4, 2) + + Returns: + np.ndarray: Array containing the sigma values for each instance. + """ + sigmas = np.zeros((bbox.shape[0], ), dtype=np.float32) + + heights = np.sqrt(np.power(bbox[:, 0] - bbox[:, 1], 2).sum(axis=-1)) + widths = np.sqrt(np.power(bbox[:, 0] - bbox[:, 2], 2).sum(axis=-1)) + + for i in range(bbox.shape[0]): + h, w = heights[i], widths[i] + + # compute sigma for each instance + # condition 1 + a1, b1 = 1, h + w + c1 = w * h * (1 - self.heatmap_min_overlap) / ( + 1 + self.heatmap_min_overlap) + sq1 = np.sqrt(b1**2 - 4 * a1 * c1) + r1 = (b1 + sq1) / 2 + + # condition 2 + a2 = 4 + b2 = 2 * (h + w) + c2 = (1 - self.heatmap_min_overlap) * w * h + sq2 = np.sqrt(b2**2 - 4 * a2 * c2) + r2 = (b2 + sq2) / 2 + + # condition 3 + a3 = 4 * self.heatmap_min_overlap + b3 = -2 * self.heatmap_min_overlap * (h + w) + c3 = (self.heatmap_min_overlap - 1) * w * h + sq3 = np.sqrt(b3**2 - 4 * a3 * c3) + r3 = (b3 + sq3) / 2 + + sigmas[i] = min(r1, r2, r3) / 3 + + return sigmas + + def encode(self, + keypoints: np.ndarray, + keypoints_visible: Optional[np.ndarray] = None, + bbox: Optional[np.ndarray] = None) -> dict: + """Encode keypoints into heatmaps. + + Args: + keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) + keypoints_visible (np.ndarray): Keypoint visibilities in shape + (N, K) + bbox (np.ndarray): Bounding box in shape (N, 8) which includes + coordinates of 4 corners. + + Returns: + dict: + - heatmaps (np.ndarray): The coupled heatmap in shape + (1+K, H, W) where [W, H] is the `heatmap_size`. + - instance_heatmaps (np.ndarray): The decoupled heatmap in shape + (N*K, H, W) where M is the number of instances. + - keypoint_weights (np.ndarray): The weight for heatmaps in shape + (N*K). + - instance_coords (np.ndarray): The coordinates of instance roots + in shape (N, 2) + """ + + if keypoints_visible is None: + keypoints_visible = np.ones(keypoints.shape[:2], dtype=np.float32) + if bbox is None: + # generate pseudo bbox via visible keypoints + bbox = get_instance_bbox(keypoints, keypoints_visible) + bbox = np.tile(bbox, 2).reshape(-1, 4, 2) + # corner order: left_top, left_bottom, right_top, right_bottom + bbox[:, 1:3, 0] = bbox[:, 0:2, 0] + + # keypoint coordinates in heatmap + _keypoints = keypoints / self.scale_factor + _bbox = bbox.reshape(-1, 4, 2) / self.scale_factor + + # compute the root and scale of each instance + roots, roots_visible = get_instance_root(_keypoints, keypoints_visible, + self.root_type) + + sigmas = self._get_instance_wise_sigmas(_bbox) + + # generate global heatmaps + heatmaps, keypoint_weights = generate_gaussian_heatmaps( + heatmap_size=self.heatmap_size, + keypoints=np.concatenate((_keypoints, roots[:, None]), axis=1), + keypoints_visible=np.concatenate( + (keypoints_visible, roots_visible[:, None]), axis=1), + sigma=sigmas) + roots_visible = keypoint_weights[:, -1] + + # select instances + inst_roots, inst_indices = [], [] + diagonal_lengths = get_diagonal_lengths(_keypoints, keypoints_visible) + for i in np.argsort(diagonal_lengths): + if roots_visible[i] < 1: + continue + # rand root point in 3x3 grid + x, y = roots[i] + np.random.randint(-1, 2, (2, )) + x = max(0, min(x, self.heatmap_size[0] - 1)) + y = max(0, min(y, self.heatmap_size[1] - 1)) + if (x, y) not in inst_roots: + inst_roots.append((x, y)) + inst_indices.append(i) + if len(inst_indices) > self.encode_max_instances: + rand_indices = random.sample( + range(len(inst_indices)), self.encode_max_instances) + inst_roots = [inst_roots[i] for i in rand_indices] + inst_indices = [inst_indices[i] for i in rand_indices] + + # generate instance-wise heatmaps + inst_heatmaps, inst_heatmap_weights = [], [] + for i in inst_indices: + inst_heatmap, inst_heatmap_weight = generate_gaussian_heatmaps( + heatmap_size=self.heatmap_size, + keypoints=_keypoints[i:i + 1], + keypoints_visible=keypoints_visible[i:i + 1], + sigma=sigmas[i].item()) + inst_heatmaps.append(inst_heatmap) + inst_heatmap_weights.append(inst_heatmap_weight) + + if len(inst_indices) > 0: + inst_heatmaps = np.concatenate(inst_heatmaps) + inst_heatmap_weights = np.concatenate(inst_heatmap_weights) + inst_roots = np.array(inst_roots, dtype=np.int32) + else: + inst_heatmaps = np.empty((0, *self.heatmap_size[::-1])) + inst_heatmap_weights = np.empty((0, )) + inst_roots = np.empty((0, 2), dtype=np.int32) + + encoded = dict( + heatmaps=heatmaps, + instance_heatmaps=inst_heatmaps, + keypoint_weights=inst_heatmap_weights, + instance_coords=inst_roots) + + return encoded + + def decode(self, instance_heatmaps: np.ndarray, + instance_scores: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + """Decode keypoint coordinates from decoupled heatmaps. The decoded + keypoint coordinates are in the input image space. + + Args: + instance_heatmaps (np.ndarray): Heatmaps in shape (N, K, H, W) + instance_scores (np.ndarray): Confidence of instance roots + prediction in shape (N, 1) + + Returns: + tuple: + - keypoints (np.ndarray): Decoded keypoint coordinates in shape + (N, K, D) + - scores (np.ndarray): The keypoint scores in shape (N, K). It + usually represents the confidence of the keypoint prediction + """ + keypoints, keypoint_scores = [], [] + + for i in range(instance_heatmaps.shape[0]): + heatmaps = instance_heatmaps[i].copy() + kpts, scores = get_heatmap_maximum(heatmaps) + keypoints.append(refine_keypoints(kpts[None], heatmaps)) + keypoint_scores.append(scores[None]) + + keypoints = np.concatenate(keypoints) + # Restore the keypoint scale + keypoints = keypoints * self.scale_factor + + keypoint_scores = np.concatenate(keypoint_scores) + keypoint_scores *= instance_scores + + return keypoints, keypoint_scores diff --git a/mmpose/codecs/image_pose_lifting.py b/mmpose/codecs/image_pose_lifting.py index 64bf925997..e43d9abb9f 100644 --- a/mmpose/codecs/image_pose_lifting.py +++ b/mmpose/codecs/image_pose_lifting.py @@ -1,203 +1,203 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import Optional, Tuple - -import numpy as np - -from mmpose.registry import KEYPOINT_CODECS -from .base import BaseKeypointCodec - - -@KEYPOINT_CODECS.register_module() -class ImagePoseLifting(BaseKeypointCodec): - r"""Generate keypoint coordinates for pose lifter. - - Note: - - - instance number: N - - keypoint number: K - - keypoint dimension: D - - pose-lifitng target dimension: C - - Args: - num_keypoints (int): The number of keypoints in the dataset. - root_index (int): Root keypoint index in the pose. - remove_root (bool): If true, remove the root keypoint from the pose. - Default: ``False``. - save_index (bool): If true, store the root position separated from the - original pose. Default: ``False``. - keypoints_mean (np.ndarray, optional): Mean values of keypoints - coordinates in shape (K, D). - keypoints_std (np.ndarray, optional): Std values of keypoints - coordinates in shape (K, D). - target_mean (np.ndarray, optional): Mean values of pose-lifitng target - coordinates in shape (K, C). - target_std (np.ndarray, optional): Std values of pose-lifitng target - coordinates in shape (K, C). - """ - - auxiliary_encode_keys = {'lifting_target', 'lifting_target_visible'} - - def __init__(self, - num_keypoints: int, - root_index: int, - remove_root: bool = False, - save_index: bool = False, - keypoints_mean: Optional[np.ndarray] = None, - keypoints_std: Optional[np.ndarray] = None, - target_mean: Optional[np.ndarray] = None, - target_std: Optional[np.ndarray] = None): - super().__init__() - - self.num_keypoints = num_keypoints - self.root_index = root_index - self.remove_root = remove_root - self.save_index = save_index - if keypoints_mean is not None and keypoints_std is not None: - assert keypoints_mean.shape == keypoints_std.shape - if target_mean is not None and target_std is not None: - assert target_mean.shape == target_std.shape - self.keypoints_mean = keypoints_mean - self.keypoints_std = keypoints_std - self.target_mean = target_mean - self.target_std = target_std - - def encode(self, - keypoints: np.ndarray, - keypoints_visible: Optional[np.ndarray] = None, - lifting_target: Optional[np.ndarray] = None, - lifting_target_visible: Optional[np.ndarray] = None) -> dict: - """Encoding keypoints from input image space to normalized space. - - Args: - keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D). - keypoints_visible (np.ndarray, optional): Keypoint visibilities in - shape (N, K). - lifting_target (np.ndarray, optional): 3d target coordinate in - shape (K, C). - lifting_target_visible (np.ndarray, optional): Target coordinate in - shape (K, ). - - Returns: - encoded (dict): Contains the following items: - - - keypoint_labels (np.ndarray): The processed keypoints in - shape (K * D, N) where D is 2 for 2d coordinates. - - lifting_target_label: The processed target coordinate in - shape (K, C) or (K-1, C). - - lifting_target_weights (np.ndarray): The target weights in - shape (K, ) or (K-1, ). - - trajectory_weights (np.ndarray): The trajectory weights in - shape (K, ). - - target_root (np.ndarray): The root coordinate of target in - shape (C, ). - - In addition, there are some optional items it may contain: - - - target_root_removed (bool): Indicate whether the root of - pose lifting target is removed. Added if ``self.remove_root`` - is ``True``. - - target_root_index (int): An integer indicating the index of - root. Added if ``self.remove_root`` and ``self.save_index`` - are ``True``. - """ - if keypoints_visible is None: - keypoints_visible = np.ones(keypoints.shape[:2], dtype=np.float32) - - if lifting_target is None: - lifting_target = keypoints[0] - - # set initial value for `lifting_target_weights` - # and `trajectory_weights` - if lifting_target_visible is None: - lifting_target_visible = np.ones( - lifting_target.shape[:-1], dtype=np.float32) - lifting_target_weights = lifting_target_visible - trajectory_weights = (1 / lifting_target[:, 2]) - else: - valid = lifting_target_visible > 0.5 - lifting_target_weights = np.where(valid, 1., 0.).astype(np.float32) - trajectory_weights = lifting_target_weights - - encoded = dict() - - # Zero-center the target pose around a given root keypoint - assert (lifting_target.ndim >= 2 and - lifting_target.shape[-2] > self.root_index), \ - f'Got invalid joint shape {lifting_target.shape}' - - root = lifting_target[..., self.root_index, :] - lifting_target_label = lifting_target - root - - if self.remove_root: - lifting_target_label = np.delete( - lifting_target_label, self.root_index, axis=-2) - assert lifting_target_weights.ndim in {1, 2} - axis_to_remove = -2 if lifting_target_weights.ndim == 2 else -1 - lifting_target_weights = np.delete( - lifting_target_weights, self.root_index, axis=axis_to_remove) - # Add a flag to avoid latter transforms that rely on the root - # joint or the original joint index - encoded['target_root_removed'] = True - - # Save the root index which is necessary to restore the global pose - if self.save_index: - encoded['target_root_index'] = self.root_index - - # Normalize the 2D keypoint coordinate with mean and std - keypoint_labels = keypoints.copy() - if self.keypoints_mean is not None and self.keypoints_std is not None: - keypoints_shape = keypoints.shape - assert self.keypoints_mean.shape == keypoints_shape[1:] - - keypoint_labels = (keypoint_labels - - self.keypoints_mean) / self.keypoints_std - if self.target_mean is not None and self.target_std is not None: - target_shape = lifting_target_label.shape - assert self.target_mean.shape == target_shape - - lifting_target_label = (lifting_target_label - - self.target_mean) / self.target_std - - # Generate reshaped keypoint coordinates - assert keypoint_labels.ndim in {2, 3} - if keypoint_labels.ndim == 2: - keypoint_labels = keypoint_labels[None, ...] - - encoded['keypoint_labels'] = keypoint_labels - encoded['lifting_target_label'] = lifting_target_label - encoded['lifting_target_weights'] = lifting_target_weights - encoded['trajectory_weights'] = trajectory_weights - encoded['target_root'] = root - - return encoded - - def decode(self, - encoded: np.ndarray, - target_root: Optional[np.ndarray] = None - ) -> Tuple[np.ndarray, np.ndarray]: - """Decode keypoint coordinates from normalized space to input image - space. - - Args: - encoded (np.ndarray): Coordinates in shape (N, K, C). - target_root (np.ndarray, optional): The target root coordinate. - Default: ``None``. - - Returns: - keypoints (np.ndarray): Decoded coordinates in shape (N, K, C). - scores (np.ndarray): The keypoint scores in shape (N, K). - """ - keypoints = encoded.copy() - - if self.target_mean is not None and self.target_std is not None: - assert self.target_mean.shape == keypoints.shape[1:] - keypoints = keypoints * self.target_std + self.target_mean - - if target_root.size > 0: - keypoints = keypoints + np.expand_dims(target_root, axis=0) - if self.remove_root: - keypoints = np.insert( - keypoints, self.root_index, target_root, axis=1) - scores = np.ones(keypoints.shape[:-1], dtype=np.float32) - - return keypoints, scores +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Tuple + +import numpy as np + +from mmpose.registry import KEYPOINT_CODECS +from .base import BaseKeypointCodec + + +@KEYPOINT_CODECS.register_module() +class ImagePoseLifting(BaseKeypointCodec): + r"""Generate keypoint coordinates for pose lifter. + + Note: + + - instance number: N + - keypoint number: K + - keypoint dimension: D + - pose-lifitng target dimension: C + + Args: + num_keypoints (int): The number of keypoints in the dataset. + root_index (int): Root keypoint index in the pose. + remove_root (bool): If true, remove the root keypoint from the pose. + Default: ``False``. + save_index (bool): If true, store the root position separated from the + original pose. Default: ``False``. + keypoints_mean (np.ndarray, optional): Mean values of keypoints + coordinates in shape (K, D). + keypoints_std (np.ndarray, optional): Std values of keypoints + coordinates in shape (K, D). + target_mean (np.ndarray, optional): Mean values of pose-lifitng target + coordinates in shape (K, C). + target_std (np.ndarray, optional): Std values of pose-lifitng target + coordinates in shape (K, C). + """ + + auxiliary_encode_keys = {'lifting_target', 'lifting_target_visible'} + + def __init__(self, + num_keypoints: int, + root_index: int, + remove_root: bool = False, + save_index: bool = False, + keypoints_mean: Optional[np.ndarray] = None, + keypoints_std: Optional[np.ndarray] = None, + target_mean: Optional[np.ndarray] = None, + target_std: Optional[np.ndarray] = None): + super().__init__() + + self.num_keypoints = num_keypoints + self.root_index = root_index + self.remove_root = remove_root + self.save_index = save_index + if keypoints_mean is not None and keypoints_std is not None: + assert keypoints_mean.shape == keypoints_std.shape + if target_mean is not None and target_std is not None: + assert target_mean.shape == target_std.shape + self.keypoints_mean = keypoints_mean + self.keypoints_std = keypoints_std + self.target_mean = target_mean + self.target_std = target_std + + def encode(self, + keypoints: np.ndarray, + keypoints_visible: Optional[np.ndarray] = None, + lifting_target: Optional[np.ndarray] = None, + lifting_target_visible: Optional[np.ndarray] = None) -> dict: + """Encoding keypoints from input image space to normalized space. + + Args: + keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D). + keypoints_visible (np.ndarray, optional): Keypoint visibilities in + shape (N, K). + lifting_target (np.ndarray, optional): 3d target coordinate in + shape (K, C). + lifting_target_visible (np.ndarray, optional): Target coordinate in + shape (K, ). + + Returns: + encoded (dict): Contains the following items: + + - keypoint_labels (np.ndarray): The processed keypoints in + shape (K * D, N) where D is 2 for 2d coordinates. + - lifting_target_label: The processed target coordinate in + shape (K, C) or (K-1, C). + - lifting_target_weights (np.ndarray): The target weights in + shape (K, ) or (K-1, ). + - trajectory_weights (np.ndarray): The trajectory weights in + shape (K, ). + - target_root (np.ndarray): The root coordinate of target in + shape (C, ). + + In addition, there are some optional items it may contain: + + - target_root_removed (bool): Indicate whether the root of + pose lifting target is removed. Added if ``self.remove_root`` + is ``True``. + - target_root_index (int): An integer indicating the index of + root. Added if ``self.remove_root`` and ``self.save_index`` + are ``True``. + """ + if keypoints_visible is None: + keypoints_visible = np.ones(keypoints.shape[:2], dtype=np.float32) + + if lifting_target is None: + lifting_target = keypoints[0] + + # set initial value for `lifting_target_weights` + # and `trajectory_weights` + if lifting_target_visible is None: + lifting_target_visible = np.ones( + lifting_target.shape[:-1], dtype=np.float32) + lifting_target_weights = lifting_target_visible + trajectory_weights = (1 / lifting_target[:, 2]) + else: + valid = lifting_target_visible > 0.5 + lifting_target_weights = np.where(valid, 1., 0.).astype(np.float32) + trajectory_weights = lifting_target_weights + + encoded = dict() + + # Zero-center the target pose around a given root keypoint + assert (lifting_target.ndim >= 2 and + lifting_target.shape[-2] > self.root_index), \ + f'Got invalid joint shape {lifting_target.shape}' + + root = lifting_target[..., self.root_index, :] + lifting_target_label = lifting_target - root + + if self.remove_root: + lifting_target_label = np.delete( + lifting_target_label, self.root_index, axis=-2) + assert lifting_target_weights.ndim in {1, 2} + axis_to_remove = -2 if lifting_target_weights.ndim == 2 else -1 + lifting_target_weights = np.delete( + lifting_target_weights, self.root_index, axis=axis_to_remove) + # Add a flag to avoid latter transforms that rely on the root + # joint or the original joint index + encoded['target_root_removed'] = True + + # Save the root index which is necessary to restore the global pose + if self.save_index: + encoded['target_root_index'] = self.root_index + + # Normalize the 2D keypoint coordinate with mean and std + keypoint_labels = keypoints.copy() + if self.keypoints_mean is not None and self.keypoints_std is not None: + keypoints_shape = keypoints.shape + assert self.keypoints_mean.shape == keypoints_shape[1:] + + keypoint_labels = (keypoint_labels - + self.keypoints_mean) / self.keypoints_std + if self.target_mean is not None and self.target_std is not None: + target_shape = lifting_target_label.shape + assert self.target_mean.shape == target_shape + + lifting_target_label = (lifting_target_label - + self.target_mean) / self.target_std + + # Generate reshaped keypoint coordinates + assert keypoint_labels.ndim in {2, 3} + if keypoint_labels.ndim == 2: + keypoint_labels = keypoint_labels[None, ...] + + encoded['keypoint_labels'] = keypoint_labels + encoded['lifting_target_label'] = lifting_target_label + encoded['lifting_target_weights'] = lifting_target_weights + encoded['trajectory_weights'] = trajectory_weights + encoded['target_root'] = root + + return encoded + + def decode(self, + encoded: np.ndarray, + target_root: Optional[np.ndarray] = None + ) -> Tuple[np.ndarray, np.ndarray]: + """Decode keypoint coordinates from normalized space to input image + space. + + Args: + encoded (np.ndarray): Coordinates in shape (N, K, C). + target_root (np.ndarray, optional): The target root coordinate. + Default: ``None``. + + Returns: + keypoints (np.ndarray): Decoded coordinates in shape (N, K, C). + scores (np.ndarray): The keypoint scores in shape (N, K). + """ + keypoints = encoded.copy() + + if self.target_mean is not None and self.target_std is not None: + assert self.target_mean.shape == keypoints.shape[1:] + keypoints = keypoints * self.target_std + self.target_mean + + if target_root.size > 0: + keypoints = keypoints + np.expand_dims(target_root, axis=0) + if self.remove_root: + keypoints = np.insert( + keypoints, self.root_index, target_root, axis=1) + scores = np.ones(keypoints.shape[:-1], dtype=np.float32) + + return keypoints, scores diff --git a/mmpose/codecs/integral_regression_label.py b/mmpose/codecs/integral_regression_label.py index ed8e72cb10..08f93f91c8 100644 --- a/mmpose/codecs/integral_regression_label.py +++ b/mmpose/codecs/integral_regression_label.py @@ -1,115 +1,115 @@ -# Copyright (c) OpenMMLab. All rights reserved. - -from typing import Optional, Tuple - -import numpy as np - -from mmpose.registry import KEYPOINT_CODECS -from .base import BaseKeypointCodec -from .msra_heatmap import MSRAHeatmap -from .regression_label import RegressionLabel - - -@KEYPOINT_CODECS.register_module() -class IntegralRegressionLabel(BaseKeypointCodec): - """Generate keypoint coordinates and normalized heatmaps. See the paper: - `DSNT`_ by Nibali et al(2018). - - Note: - - - instance number: N - - keypoint number: K - - keypoint dimension: D - - image size: [w, h] - - Encoded: - - - keypoint_labels (np.ndarray): The normalized regression labels in - shape (N, K, D) where D is 2 for 2d coordinates - - heatmaps (np.ndarray): The generated heatmap in shape (K, H, W) where - [W, H] is the `heatmap_size` - - keypoint_weights (np.ndarray): The target weights in shape (N, K) - - Args: - input_size (tuple): Input image size in [w, h] - heatmap_size (tuple): Heatmap size in [W, H] - sigma (float): The sigma value of the Gaussian heatmap - unbiased (bool): Whether use unbiased method (DarkPose) in ``'msra'`` - encoding. See `Dark Pose`_ for details. Defaults to ``False`` - blur_kernel_size (int): The Gaussian blur kernel size of the heatmap - modulation in DarkPose. The kernel size and sigma should follow - the expirical formula :math:`sigma = 0.3*((ks-1)*0.5-1)+0.8`. - Defaults to 11 - normalize (bool): Whether to normalize the heatmaps. Defaults to True. - - .. _`DSNT`: https://arxiv.org/abs/1801.07372 - """ - - def __init__(self, - input_size: Tuple[int, int], - heatmap_size: Tuple[int, int], - sigma: float, - unbiased: bool = False, - blur_kernel_size: int = 11, - normalize: bool = True) -> None: - super().__init__() - - self.heatmap_codec = MSRAHeatmap(input_size, heatmap_size, sigma, - unbiased, blur_kernel_size) - self.keypoint_codec = RegressionLabel(input_size) - self.normalize = normalize - - def encode(self, - keypoints: np.ndarray, - keypoints_visible: Optional[np.ndarray] = None) -> dict: - """Encoding keypoints to regression labels and heatmaps. - - Args: - keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) - keypoints_visible (np.ndarray): Keypoint visibilities in shape - (N, K) - - Returns: - dict: - - keypoint_labels (np.ndarray): The normalized regression labels in - shape (N, K, D) where D is 2 for 2d coordinates - - heatmaps (np.ndarray): The generated heatmap in shape - (K, H, W) where [W, H] is the `heatmap_size` - - keypoint_weights (np.ndarray): The target weights in shape - (N, K) - """ - encoded_hm = self.heatmap_codec.encode(keypoints, keypoints_visible) - encoded_kp = self.keypoint_codec.encode(keypoints, keypoints_visible) - - heatmaps = encoded_hm['heatmaps'] - keypoint_labels = encoded_kp['keypoint_labels'] - keypoint_weights = encoded_kp['keypoint_weights'] - - if self.normalize: - val_sum = heatmaps.sum(axis=(-1, -2)).reshape(-1, 1, 1) + 1e-24 - heatmaps = heatmaps / val_sum - - encoded = dict( - keypoint_labels=keypoint_labels, - heatmaps=heatmaps, - keypoint_weights=keypoint_weights) - - return encoded - - def decode(self, encoded: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: - """Decode keypoint coordinates from normalized space to input image - space. - - Args: - encoded (np.ndarray): Coordinates in shape (N, K, D) - - Returns: - tuple: - - keypoints (np.ndarray): Decoded coordinates in shape (N, K, D) - - socres (np.ndarray): The keypoint scores in shape (N, K). - It usually represents the confidence of the keypoint prediction - """ - - keypoints, scores = self.keypoint_codec.decode(encoded) - - return keypoints, scores +# Copyright (c) OpenMMLab. All rights reserved. + +from typing import Optional, Tuple + +import numpy as np + +from mmpose.registry import KEYPOINT_CODECS +from .base import BaseKeypointCodec +from .msra_heatmap import MSRAHeatmap +from .regression_label import RegressionLabel + + +@KEYPOINT_CODECS.register_module() +class IntegralRegressionLabel(BaseKeypointCodec): + """Generate keypoint coordinates and normalized heatmaps. See the paper: + `DSNT`_ by Nibali et al(2018). + + Note: + + - instance number: N + - keypoint number: K + - keypoint dimension: D + - image size: [w, h] + + Encoded: + + - keypoint_labels (np.ndarray): The normalized regression labels in + shape (N, K, D) where D is 2 for 2d coordinates + - heatmaps (np.ndarray): The generated heatmap in shape (K, H, W) where + [W, H] is the `heatmap_size` + - keypoint_weights (np.ndarray): The target weights in shape (N, K) + + Args: + input_size (tuple): Input image size in [w, h] + heatmap_size (tuple): Heatmap size in [W, H] + sigma (float): The sigma value of the Gaussian heatmap + unbiased (bool): Whether use unbiased method (DarkPose) in ``'msra'`` + encoding. See `Dark Pose`_ for details. Defaults to ``False`` + blur_kernel_size (int): The Gaussian blur kernel size of the heatmap + modulation in DarkPose. The kernel size and sigma should follow + the expirical formula :math:`sigma = 0.3*((ks-1)*0.5-1)+0.8`. + Defaults to 11 + normalize (bool): Whether to normalize the heatmaps. Defaults to True. + + .. _`DSNT`: https://arxiv.org/abs/1801.07372 + """ + + def __init__(self, + input_size: Tuple[int, int], + heatmap_size: Tuple[int, int], + sigma: float, + unbiased: bool = False, + blur_kernel_size: int = 11, + normalize: bool = True) -> None: + super().__init__() + + self.heatmap_codec = MSRAHeatmap(input_size, heatmap_size, sigma, + unbiased, blur_kernel_size) + self.keypoint_codec = RegressionLabel(input_size) + self.normalize = normalize + + def encode(self, + keypoints: np.ndarray, + keypoints_visible: Optional[np.ndarray] = None) -> dict: + """Encoding keypoints to regression labels and heatmaps. + + Args: + keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) + keypoints_visible (np.ndarray): Keypoint visibilities in shape + (N, K) + + Returns: + dict: + - keypoint_labels (np.ndarray): The normalized regression labels in + shape (N, K, D) where D is 2 for 2d coordinates + - heatmaps (np.ndarray): The generated heatmap in shape + (K, H, W) where [W, H] is the `heatmap_size` + - keypoint_weights (np.ndarray): The target weights in shape + (N, K) + """ + encoded_hm = self.heatmap_codec.encode(keypoints, keypoints_visible) + encoded_kp = self.keypoint_codec.encode(keypoints, keypoints_visible) + + heatmaps = encoded_hm['heatmaps'] + keypoint_labels = encoded_kp['keypoint_labels'] + keypoint_weights = encoded_kp['keypoint_weights'] + + if self.normalize: + val_sum = heatmaps.sum(axis=(-1, -2)).reshape(-1, 1, 1) + 1e-24 + heatmaps = heatmaps / val_sum + + encoded = dict( + keypoint_labels=keypoint_labels, + heatmaps=heatmaps, + keypoint_weights=keypoint_weights) + + return encoded + + def decode(self, encoded: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + """Decode keypoint coordinates from normalized space to input image + space. + + Args: + encoded (np.ndarray): Coordinates in shape (N, K, D) + + Returns: + tuple: + - keypoints (np.ndarray): Decoded coordinates in shape (N, K, D) + - socres (np.ndarray): The keypoint scores in shape (N, K). + It usually represents the confidence of the keypoint prediction + """ + + keypoints, scores = self.keypoint_codec.decode(encoded) + + return keypoints, scores diff --git a/mmpose/codecs/megvii_heatmap.py b/mmpose/codecs/megvii_heatmap.py index e898004637..946bcb5e32 100644 --- a/mmpose/codecs/megvii_heatmap.py +++ b/mmpose/codecs/megvii_heatmap.py @@ -1,144 +1,144 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from itertools import product -from typing import Optional, Tuple - -import cv2 -import numpy as np - -from mmpose.registry import KEYPOINT_CODECS -from .base import BaseKeypointCodec -from .utils import gaussian_blur, get_heatmap_maximum - - -@KEYPOINT_CODECS.register_module() -class MegviiHeatmap(BaseKeypointCodec): - """Represent keypoints as heatmaps via "Megvii" approach. See `MSPN`_ - (2019) and `CPN`_ (2018) for details. - - Note: - - - instance number: N - - keypoint number: K - - keypoint dimension: D - - image size: [w, h] - - heatmap size: [W, H] - - Encoded: - - - heatmaps (np.ndarray): The generated heatmap in shape (K, H, W) - where [W, H] is the `heatmap_size` - - keypoint_weights (np.ndarray): The target weights in shape (N, K) - - Args: - input_size (tuple): Image size in [w, h] - heatmap_size (tuple): Heatmap size in [W, H] - kernel_size (tuple): The kernel size of the heatmap gaussian in - [ks_x, ks_y] - - .. _`MSPN`: https://arxiv.org/abs/1901.00148 - .. _`CPN`: https://arxiv.org/abs/1711.07319 - """ - - def __init__( - self, - input_size: Tuple[int, int], - heatmap_size: Tuple[int, int], - kernel_size: int, - ) -> None: - - super().__init__() - self.input_size = input_size - self.heatmap_size = heatmap_size - self.kernel_size = kernel_size - self.scale_factor = (np.array(input_size) / - heatmap_size).astype(np.float32) - - def encode(self, - keypoints: np.ndarray, - keypoints_visible: Optional[np.ndarray] = None) -> dict: - """Encode keypoints into heatmaps. Note that the original keypoint - coordinates should be in the input image space. - - Args: - keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) - keypoints_visible (np.ndarray): Keypoint visibilities in shape - (N, K) - - Returns: - dict: - - heatmaps (np.ndarray): The generated heatmap in shape - (K, H, W) where [W, H] is the `heatmap_size` - - keypoint_weights (np.ndarray): The target weights in shape - (N, K) - """ - - N, K, _ = keypoints.shape - W, H = self.heatmap_size - - assert N == 1, ( - f'{self.__class__.__name__} only support single-instance ' - 'keypoint encoding') - - heatmaps = np.zeros((K, H, W), dtype=np.float32) - keypoint_weights = keypoints_visible.copy() - - for n, k in product(range(N), range(K)): - # skip unlabled keypoints - if keypoints_visible[n, k] < 0.5: - continue - - # get center coordinates - kx, ky = (keypoints[n, k] / self.scale_factor).astype(np.int64) - if kx < 0 or kx >= W or ky < 0 or ky >= H: - keypoint_weights[n, k] = 0 - continue - - heatmaps[k, ky, kx] = 1. - kernel_size = (self.kernel_size, self.kernel_size) - heatmaps[k] = cv2.GaussianBlur(heatmaps[k], kernel_size, 0) - - # normalize the heatmap - heatmaps[k] = heatmaps[k] / heatmaps[k, ky, kx] * 255. - - encoded = dict(heatmaps=heatmaps, keypoint_weights=keypoint_weights) - - return encoded - - def decode(self, encoded: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: - """Decode keypoint coordinates from heatmaps. The decoded keypoint - coordinates are in the input image space. - - Args: - encoded (np.ndarray): Heatmaps in shape (K, H, W) - - Returns: - tuple: - - keypoints (np.ndarray): Decoded keypoint coordinates in shape - (K, D) - - scores (np.ndarray): The keypoint scores in shape (K,). It - usually represents the confidence of the keypoint prediction - """ - heatmaps = gaussian_blur(encoded.copy(), self.kernel_size) - K, H, W = heatmaps.shape - - keypoints, scores = get_heatmap_maximum(heatmaps) - - for k in range(K): - heatmap = heatmaps[k] - px = int(keypoints[k, 0]) - py = int(keypoints[k, 1]) - if 1 < px < W - 1 and 1 < py < H - 1: - diff = np.array([ - heatmap[py][px + 1] - heatmap[py][px - 1], - heatmap[py + 1][px] - heatmap[py - 1][px] - ]) - keypoints[k] += (np.sign(diff) * 0.25 + 0.5) - - scores = scores / 255.0 + 0.5 - - # Unsqueeze the instance dimension for single-instance results - # and restore the keypoint scales - keypoints = keypoints[None] * self.scale_factor - scores = scores[None] - - return keypoints, scores +# Copyright (c) OpenMMLab. All rights reserved. +from itertools import product +from typing import Optional, Tuple + +import cv2 +import numpy as np + +from mmpose.registry import KEYPOINT_CODECS +from .base import BaseKeypointCodec +from .utils import gaussian_blur, get_heatmap_maximum + + +@KEYPOINT_CODECS.register_module() +class MegviiHeatmap(BaseKeypointCodec): + """Represent keypoints as heatmaps via "Megvii" approach. See `MSPN`_ + (2019) and `CPN`_ (2018) for details. + + Note: + + - instance number: N + - keypoint number: K + - keypoint dimension: D + - image size: [w, h] + - heatmap size: [W, H] + + Encoded: + + - heatmaps (np.ndarray): The generated heatmap in shape (K, H, W) + where [W, H] is the `heatmap_size` + - keypoint_weights (np.ndarray): The target weights in shape (N, K) + + Args: + input_size (tuple): Image size in [w, h] + heatmap_size (tuple): Heatmap size in [W, H] + kernel_size (tuple): The kernel size of the heatmap gaussian in + [ks_x, ks_y] + + .. _`MSPN`: https://arxiv.org/abs/1901.00148 + .. _`CPN`: https://arxiv.org/abs/1711.07319 + """ + + def __init__( + self, + input_size: Tuple[int, int], + heatmap_size: Tuple[int, int], + kernel_size: int, + ) -> None: + + super().__init__() + self.input_size = input_size + self.heatmap_size = heatmap_size + self.kernel_size = kernel_size + self.scale_factor = (np.array(input_size) / + heatmap_size).astype(np.float32) + + def encode(self, + keypoints: np.ndarray, + keypoints_visible: Optional[np.ndarray] = None) -> dict: + """Encode keypoints into heatmaps. Note that the original keypoint + coordinates should be in the input image space. + + Args: + keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) + keypoints_visible (np.ndarray): Keypoint visibilities in shape + (N, K) + + Returns: + dict: + - heatmaps (np.ndarray): The generated heatmap in shape + (K, H, W) where [W, H] is the `heatmap_size` + - keypoint_weights (np.ndarray): The target weights in shape + (N, K) + """ + + N, K, _ = keypoints.shape + W, H = self.heatmap_size + + assert N == 1, ( + f'{self.__class__.__name__} only support single-instance ' + 'keypoint encoding') + + heatmaps = np.zeros((K, H, W), dtype=np.float32) + keypoint_weights = keypoints_visible.copy() + + for n, k in product(range(N), range(K)): + # skip unlabled keypoints + if keypoints_visible[n, k] < 0.5: + continue + + # get center coordinates + kx, ky = (keypoints[n, k] / self.scale_factor).astype(np.int64) + if kx < 0 or kx >= W or ky < 0 or ky >= H: + keypoint_weights[n, k] = 0 + continue + + heatmaps[k, ky, kx] = 1. + kernel_size = (self.kernel_size, self.kernel_size) + heatmaps[k] = cv2.GaussianBlur(heatmaps[k], kernel_size, 0) + + # normalize the heatmap + heatmaps[k] = heatmaps[k] / heatmaps[k, ky, kx] * 255. + + encoded = dict(heatmaps=heatmaps, keypoint_weights=keypoint_weights) + + return encoded + + def decode(self, encoded: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + """Decode keypoint coordinates from heatmaps. The decoded keypoint + coordinates are in the input image space. + + Args: + encoded (np.ndarray): Heatmaps in shape (K, H, W) + + Returns: + tuple: + - keypoints (np.ndarray): Decoded keypoint coordinates in shape + (K, D) + - scores (np.ndarray): The keypoint scores in shape (K,). It + usually represents the confidence of the keypoint prediction + """ + heatmaps = gaussian_blur(encoded.copy(), self.kernel_size) + K, H, W = heatmaps.shape + + keypoints, scores = get_heatmap_maximum(heatmaps) + + for k in range(K): + heatmap = heatmaps[k] + px = int(keypoints[k, 0]) + py = int(keypoints[k, 1]) + if 1 < px < W - 1 and 1 < py < H - 1: + diff = np.array([ + heatmap[py][px + 1] - heatmap[py][px - 1], + heatmap[py + 1][px] - heatmap[py - 1][px] + ]) + keypoints[k] += (np.sign(diff) * 0.25 + 0.5) + + scores = scores / 255.0 + 0.5 + + # Unsqueeze the instance dimension for single-instance results + # and restore the keypoint scales + keypoints = keypoints[None] * self.scale_factor + scores = scores[None] + + return keypoints, scores diff --git a/mmpose/codecs/msra_heatmap.py b/mmpose/codecs/msra_heatmap.py index 63ba292e4d..69071f779e 100644 --- a/mmpose/codecs/msra_heatmap.py +++ b/mmpose/codecs/msra_heatmap.py @@ -1,150 +1,150 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import Optional, Tuple - -import numpy as np - -from mmpose.registry import KEYPOINT_CODECS -from .base import BaseKeypointCodec -from .utils.gaussian_heatmap import (generate_gaussian_heatmaps, - generate_unbiased_gaussian_heatmaps) -from .utils.post_processing import get_heatmap_maximum -from .utils.refinement import refine_keypoints, refine_keypoints_dark - - -@KEYPOINT_CODECS.register_module() -class MSRAHeatmap(BaseKeypointCodec): - """Represent keypoints as heatmaps via "MSRA" approach. See the paper: - `Simple Baselines for Human Pose Estimation and Tracking`_ by Xiao et al - (2018) for details. - - Note: - - - instance number: N - - keypoint number: K - - keypoint dimension: D - - image size: [w, h] - - heatmap size: [W, H] - - Encoded: - - - heatmaps (np.ndarray): The generated heatmap in shape (K, H, W) - where [W, H] is the `heatmap_size` - - keypoint_weights (np.ndarray): The target weights in shape (N, K) - - Args: - input_size (tuple): Image size in [w, h] - heatmap_size (tuple): Heatmap size in [W, H] - sigma (float): The sigma value of the Gaussian heatmap - unbiased (bool): Whether use unbiased method (DarkPose) in ``'msra'`` - encoding. See `Dark Pose`_ for details. Defaults to ``False`` - blur_kernel_size (int): The Gaussian blur kernel size of the heatmap - modulation in DarkPose. The kernel size and sigma should follow - the expirical formula :math:`sigma = 0.3*((ks-1)*0.5-1)+0.8`. - Defaults to 11 - - .. _`Simple Baselines for Human Pose Estimation and Tracking`: - https://arxiv.org/abs/1804.06208 - .. _`Dark Pose`: https://arxiv.org/abs/1910.06278 - """ - - def __init__(self, - input_size: Tuple[int, int], - heatmap_size: Tuple[int, int], - sigma: float, - unbiased: bool = False, - blur_kernel_size: int = 11) -> None: - super().__init__() - self.input_size = input_size - self.heatmap_size = heatmap_size - self.sigma = sigma - self.unbiased = unbiased - - # The Gaussian blur kernel size of the heatmap modulation - # in DarkPose and the sigma value follows the expirical - # formula :math:`sigma = 0.3*((ks-1)*0.5-1)+0.8` - # which gives: - # sigma~=3 if ks=17 - # sigma=2 if ks=11; - # sigma~=1.5 if ks=7; - # sigma~=1 if ks=3; - self.blur_kernel_size = blur_kernel_size - self.scale_factor = (np.array(input_size) / - heatmap_size).astype(np.float32) - - def encode(self, - keypoints: np.ndarray, - keypoints_visible: Optional[np.ndarray] = None) -> dict: - """Encode keypoints into heatmaps. Note that the original keypoint - coordinates should be in the input image space. - - Args: - keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) - keypoints_visible (np.ndarray): Keypoint visibilities in shape - (N, K) - - Returns: - dict: - - heatmaps (np.ndarray): The generated heatmap in shape - (K, H, W) where [W, H] is the `heatmap_size` - - keypoint_weights (np.ndarray): The target weights in shape - (N, K) - """ - - assert keypoints.shape[0] == 1, ( - f'{self.__class__.__name__} only support single-instance ' - 'keypoint encoding') - - if keypoints_visible is None: - keypoints_visible = np.ones(keypoints.shape[:2], dtype=np.float32) - - if self.unbiased: - heatmaps, keypoint_weights = generate_unbiased_gaussian_heatmaps( - heatmap_size=self.heatmap_size, - keypoints=keypoints / self.scale_factor, - keypoints_visible=keypoints_visible, - sigma=self.sigma) - else: - heatmaps, keypoint_weights = generate_gaussian_heatmaps( - heatmap_size=self.heatmap_size, - keypoints=keypoints / self.scale_factor, - keypoints_visible=keypoints_visible, - sigma=self.sigma) - - encoded = dict(heatmaps=heatmaps, keypoint_weights=keypoint_weights) - - return encoded - - def decode(self, encoded: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: - """Decode keypoint coordinates from heatmaps. The decoded keypoint - coordinates are in the input image space. - - Args: - encoded (np.ndarray): Heatmaps in shape (K, H, W) - - Returns: - tuple: - - keypoints (np.ndarray): Decoded keypoint coordinates in shape - (N, K, D) - - scores (np.ndarray): The keypoint scores in shape (N, K). It - usually represents the confidence of the keypoint prediction - """ - heatmaps = encoded.copy() - K, H, W = heatmaps.shape - - keypoints, scores = get_heatmap_maximum(heatmaps) - - # Unsqueeze the instance dimension for single-instance results - keypoints, scores = keypoints[None], scores[None] - - if self.unbiased: - # Alleviate biased coordinate - keypoints = refine_keypoints_dark( - keypoints, heatmaps, blur_kernel_size=self.blur_kernel_size) - - else: - keypoints = refine_keypoints(keypoints, heatmaps) - - # Restore the keypoint scale - keypoints = keypoints * self.scale_factor - - return keypoints, scores +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Tuple + +import numpy as np + +from mmpose.registry import KEYPOINT_CODECS +from .base import BaseKeypointCodec +from .utils.gaussian_heatmap import (generate_gaussian_heatmaps, + generate_unbiased_gaussian_heatmaps) +from .utils.post_processing import get_heatmap_maximum +from .utils.refinement import refine_keypoints, refine_keypoints_dark + + +@KEYPOINT_CODECS.register_module() +class MSRAHeatmap(BaseKeypointCodec): + """Represent keypoints as heatmaps via "MSRA" approach. See the paper: + `Simple Baselines for Human Pose Estimation and Tracking`_ by Xiao et al + (2018) for details. + + Note: + + - instance number: N + - keypoint number: K + - keypoint dimension: D + - image size: [w, h] + - heatmap size: [W, H] + + Encoded: + + - heatmaps (np.ndarray): The generated heatmap in shape (K, H, W) + where [W, H] is the `heatmap_size` + - keypoint_weights (np.ndarray): The target weights in shape (N, K) + + Args: + input_size (tuple): Image size in [w, h] + heatmap_size (tuple): Heatmap size in [W, H] + sigma (float): The sigma value of the Gaussian heatmap + unbiased (bool): Whether use unbiased method (DarkPose) in ``'msra'`` + encoding. See `Dark Pose`_ for details. Defaults to ``False`` + blur_kernel_size (int): The Gaussian blur kernel size of the heatmap + modulation in DarkPose. The kernel size and sigma should follow + the expirical formula :math:`sigma = 0.3*((ks-1)*0.5-1)+0.8`. + Defaults to 11 + + .. _`Simple Baselines for Human Pose Estimation and Tracking`: + https://arxiv.org/abs/1804.06208 + .. _`Dark Pose`: https://arxiv.org/abs/1910.06278 + """ + + def __init__(self, + input_size: Tuple[int, int], + heatmap_size: Tuple[int, int], + sigma: float, + unbiased: bool = False, + blur_kernel_size: int = 11) -> None: + super().__init__() + self.input_size = input_size + self.heatmap_size = heatmap_size + self.sigma = sigma + self.unbiased = unbiased + + # The Gaussian blur kernel size of the heatmap modulation + # in DarkPose and the sigma value follows the expirical + # formula :math:`sigma = 0.3*((ks-1)*0.5-1)+0.8` + # which gives: + # sigma~=3 if ks=17 + # sigma=2 if ks=11; + # sigma~=1.5 if ks=7; + # sigma~=1 if ks=3; + self.blur_kernel_size = blur_kernel_size + self.scale_factor = (np.array(input_size) / + heatmap_size).astype(np.float32) + + def encode(self, + keypoints: np.ndarray, + keypoints_visible: Optional[np.ndarray] = None) -> dict: + """Encode keypoints into heatmaps. Note that the original keypoint + coordinates should be in the input image space. + + Args: + keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) + keypoints_visible (np.ndarray): Keypoint visibilities in shape + (N, K) + + Returns: + dict: + - heatmaps (np.ndarray): The generated heatmap in shape + (K, H, W) where [W, H] is the `heatmap_size` + - keypoint_weights (np.ndarray): The target weights in shape + (N, K) + """ + + assert keypoints.shape[0] == 1, ( + f'{self.__class__.__name__} only support single-instance ' + 'keypoint encoding') + + if keypoints_visible is None: + keypoints_visible = np.ones(keypoints.shape[:2], dtype=np.float32) + + if self.unbiased: + heatmaps, keypoint_weights = generate_unbiased_gaussian_heatmaps( + heatmap_size=self.heatmap_size, + keypoints=keypoints / self.scale_factor, + keypoints_visible=keypoints_visible, + sigma=self.sigma) + else: + heatmaps, keypoint_weights = generate_gaussian_heatmaps( + heatmap_size=self.heatmap_size, + keypoints=keypoints / self.scale_factor, + keypoints_visible=keypoints_visible, + sigma=self.sigma) + + encoded = dict(heatmaps=heatmaps, keypoint_weights=keypoint_weights) + + return encoded + + def decode(self, encoded: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + """Decode keypoint coordinates from heatmaps. The decoded keypoint + coordinates are in the input image space. + + Args: + encoded (np.ndarray): Heatmaps in shape (K, H, W) + + Returns: + tuple: + - keypoints (np.ndarray): Decoded keypoint coordinates in shape + (N, K, D) + - scores (np.ndarray): The keypoint scores in shape (N, K). It + usually represents the confidence of the keypoint prediction + """ + heatmaps = encoded.copy() + K, H, W = heatmaps.shape + + keypoints, scores = get_heatmap_maximum(heatmaps) + + # Unsqueeze the instance dimension for single-instance results + keypoints, scores = keypoints[None], scores[None] + + if self.unbiased: + # Alleviate biased coordinate + keypoints = refine_keypoints_dark( + keypoints, heatmaps, blur_kernel_size=self.blur_kernel_size) + + else: + keypoints = refine_keypoints(keypoints, heatmaps) + + # Restore the keypoint scale + keypoints = keypoints * self.scale_factor + + return keypoints, scores diff --git a/mmpose/codecs/regression_label.py b/mmpose/codecs/regression_label.py index f79195beb4..179c0113d7 100644 --- a/mmpose/codecs/regression_label.py +++ b/mmpose/codecs/regression_label.py @@ -1,103 +1,103 @@ -# Copyright (c) OpenMMLab. All rights reserved. - -from typing import Optional, Tuple - -import numpy as np - -from mmpose.registry import KEYPOINT_CODECS -from .base import BaseKeypointCodec - - -@KEYPOINT_CODECS.register_module() -class RegressionLabel(BaseKeypointCodec): - r"""Generate keypoint coordinates. - - Note: - - - instance number: N - - keypoint number: K - - keypoint dimension: D - - image size: [w, h] - - Encoded: - - - keypoint_labels (np.ndarray): The normalized regression labels in - shape (N, K, D) where D is 2 for 2d coordinates - - keypoint_weights (np.ndarray): The target weights in shape (N, K) - - Args: - input_size (tuple): Input image size in [w, h] - - """ - - def __init__(self, input_size: Tuple[int, int]) -> None: - super().__init__() - - self.input_size = input_size - - def encode(self, - keypoints: np.ndarray, - keypoints_visible: Optional[np.ndarray] = None) -> dict: - """Encoding keypoints from input image space to normalized space. - - Args: - keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) - keypoints_visible (np.ndarray): Keypoint visibilities in shape - (N, K) - - Returns: - dict: - - keypoint_labels (np.ndarray): The normalized regression labels in - shape (N, K, D) where D is 2 for 2d coordinates - - keypoint_weights (np.ndarray): The target weights in shape - (N, K) - """ - if keypoints_visible is None: - keypoints_visible = np.ones(keypoints.shape[:2], dtype=np.float32) - - w, h = self.input_size - valid = ((keypoints >= 0) & - (keypoints <= [w - 1, h - 1])).all(axis=-1) & ( - keypoints_visible > 0.5) - - keypoint_labels = (keypoints / np.array([w, h])).astype(np.float32) - keypoint_weights = np.where(valid, 1., 0.).astype(np.float32) - - encoded = dict( - keypoint_labels=keypoint_labels, keypoint_weights=keypoint_weights) - - return encoded - - def decode(self, encoded: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: - """Decode keypoint coordinates from normalized space to input image - space. - - Args: - encoded (np.ndarray): Coordinates in shape (N, K, D) - - Returns: - tuple: - - keypoints (np.ndarray): Decoded coordinates in shape (N, K, D) - - scores (np.ndarray): The keypoint scores in shape (N, K). - It usually represents the confidence of the keypoint prediction - """ - - if encoded.shape[-1] == 2: - N, K, _ = encoded.shape - normalized_coords = encoded.copy() - scores = np.ones((N, K), dtype=np.float32) - elif encoded.shape[-1] == 4: - # split coords and sigma if outputs contain output_sigma - normalized_coords = encoded[..., :2].copy() - output_sigma = encoded[..., 2:4].copy() - - scores = (1 - output_sigma).mean(axis=-1) - else: - raise ValueError( - 'Keypoint dimension should be 2 or 4 (with sigma), ' - f'but got {encoded.shape[-1]}') - - w, h = self.input_size - keypoints = normalized_coords * np.array([w, h]) - - return keypoints, scores +# Copyright (c) OpenMMLab. All rights reserved. + +from typing import Optional, Tuple + +import numpy as np + +from mmpose.registry import KEYPOINT_CODECS +from .base import BaseKeypointCodec + + +@KEYPOINT_CODECS.register_module() +class RegressionLabel(BaseKeypointCodec): + r"""Generate keypoint coordinates. + + Note: + + - instance number: N + - keypoint number: K + - keypoint dimension: D + - image size: [w, h] + + Encoded: + + - keypoint_labels (np.ndarray): The normalized regression labels in + shape (N, K, D) where D is 2 for 2d coordinates + - keypoint_weights (np.ndarray): The target weights in shape (N, K) + + Args: + input_size (tuple): Input image size in [w, h] + + """ + + def __init__(self, input_size: Tuple[int, int]) -> None: + super().__init__() + + self.input_size = input_size + + def encode(self, + keypoints: np.ndarray, + keypoints_visible: Optional[np.ndarray] = None) -> dict: + """Encoding keypoints from input image space to normalized space. + + Args: + keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) + keypoints_visible (np.ndarray): Keypoint visibilities in shape + (N, K) + + Returns: + dict: + - keypoint_labels (np.ndarray): The normalized regression labels in + shape (N, K, D) where D is 2 for 2d coordinates + - keypoint_weights (np.ndarray): The target weights in shape + (N, K) + """ + if keypoints_visible is None: + keypoints_visible = np.ones(keypoints.shape[:2], dtype=np.float32) + + w, h = self.input_size + valid = ((keypoints >= 0) & + (keypoints <= [w - 1, h - 1])).all(axis=-1) & ( + keypoints_visible > 0.5) + + keypoint_labels = (keypoints / np.array([w, h])).astype(np.float32) + keypoint_weights = np.where(valid, 1., 0.).astype(np.float32) + + encoded = dict( + keypoint_labels=keypoint_labels, keypoint_weights=keypoint_weights) + + return encoded + + def decode(self, encoded: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + """Decode keypoint coordinates from normalized space to input image + space. + + Args: + encoded (np.ndarray): Coordinates in shape (N, K, D) + + Returns: + tuple: + - keypoints (np.ndarray): Decoded coordinates in shape (N, K, D) + - scores (np.ndarray): The keypoint scores in shape (N, K). + It usually represents the confidence of the keypoint prediction + """ + + if encoded.shape[-1] == 2: + N, K, _ = encoded.shape + normalized_coords = encoded.copy() + scores = np.ones((N, K), dtype=np.float32) + elif encoded.shape[-1] == 4: + # split coords and sigma if outputs contain output_sigma + normalized_coords = encoded[..., :2].copy() + output_sigma = encoded[..., 2:4].copy() + + scores = (1 - output_sigma).mean(axis=-1) + else: + raise ValueError( + 'Keypoint dimension should be 2 or 4 (with sigma), ' + f'but got {encoded.shape[-1]}') + + w, h = self.input_size + keypoints = normalized_coords * np.array([w, h]) + + return keypoints, scores diff --git a/mmpose/codecs/simcc_label.py b/mmpose/codecs/simcc_label.py index a22498c352..ee2c31aa9b 100644 --- a/mmpose/codecs/simcc_label.py +++ b/mmpose/codecs/simcc_label.py @@ -1,286 +1,286 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from itertools import product -from typing import Optional, Tuple, Union - -import numpy as np - -from mmpose.codecs.utils import get_simcc_maximum -from mmpose.codecs.utils.refinement import refine_simcc_dark -from mmpose.registry import KEYPOINT_CODECS -from .base import BaseKeypointCodec - - -@KEYPOINT_CODECS.register_module() -class SimCCLabel(BaseKeypointCodec): - r"""Generate keypoint representation via "SimCC" approach. - See the paper: `SimCC: a Simple Coordinate Classification Perspective for - Human Pose Estimation`_ by Li et al (2022) for more details. - Old name: SimDR - - Note: - - - instance number: N - - keypoint number: K - - keypoint dimension: D - - image size: [w, h] - - Encoded: - - - keypoint_x_labels (np.ndarray): The generated SimCC label for x-axis. - The label shape is (N, K, Wx) if ``smoothing_type=='gaussian'`` - and (N, K) if `smoothing_type=='standard'``, where - :math:`Wx=w*simcc_split_ratio` - - keypoint_y_labels (np.ndarray): The generated SimCC label for y-axis. - The label shape is (N, K, Wy) if ``smoothing_type=='gaussian'`` - and (N, K) if `smoothing_type=='standard'``, where - :math:`Wy=h*simcc_split_ratio` - - keypoint_weights (np.ndarray): The target weights in shape (N, K) - - Args: - input_size (tuple): Input image size in [w, h] - smoothing_type (str): The SimCC label smoothing strategy. Options are - ``'gaussian'`` and ``'standard'``. Defaults to ``'gaussian'`` - sigma (float | int | tuple): The sigma value in the Gaussian SimCC - label. Defaults to 6.0 - simcc_split_ratio (float): The ratio of the label size to the input - size. For example, if the input width is ``w``, the x label size - will be :math:`w*simcc_split_ratio`. Defaults to 2.0 - label_smooth_weight (float): Label Smoothing weight. Defaults to 0.0 - normalize (bool): Whether to normalize the heatmaps. Defaults to True. - - .. _`SimCC: a Simple Coordinate Classification Perspective for Human Pose - Estimation`: https://arxiv.org/abs/2107.03332 - """ - - def __init__(self, - input_size: Tuple[int, int], - smoothing_type: str = 'gaussian', - sigma: Union[float, int, Tuple[float]] = 6.0, - simcc_split_ratio: float = 2.0, - label_smooth_weight: float = 0.0, - normalize: bool = True, - use_dark: bool = False) -> None: - super().__init__() - - self.input_size = input_size - self.smoothing_type = smoothing_type - self.simcc_split_ratio = simcc_split_ratio - self.label_smooth_weight = label_smooth_weight - self.normalize = normalize - self.use_dark = use_dark - - if isinstance(sigma, (float, int)): - self.sigma = np.array([sigma, sigma]) - else: - self.sigma = np.array(sigma) - - if self.smoothing_type not in {'gaussian', 'standard'}: - raise ValueError( - f'{self.__class__.__name__} got invalid `smoothing_type` value' - f'{self.smoothing_type}. Should be one of ' - '{"gaussian", "standard"}') - - if self.smoothing_type == 'gaussian' and self.label_smooth_weight > 0: - raise ValueError('Attribute `label_smooth_weight` is only ' - 'used for `standard` mode.') - - if self.label_smooth_weight < 0.0 or self.label_smooth_weight > 1.0: - raise ValueError('`label_smooth_weight` should be in range [0, 1]') - - def encode(self, - keypoints: np.ndarray, - keypoints_visible: Optional[np.ndarray] = None) -> dict: - """Encoding keypoints into SimCC labels. Note that the original - keypoint coordinates should be in the input image space. - - Args: - keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) - keypoints_visible (np.ndarray): Keypoint visibilities in shape - (N, K) - - Returns: - dict: - - keypoint_x_labels (np.ndarray): The generated SimCC label for - x-axis. - The label shape is (N, K, Wx) if ``smoothing_type=='gaussian'`` - and (N, K) if `smoothing_type=='standard'``, where - :math:`Wx=w*simcc_split_ratio` - - keypoint_y_labels (np.ndarray): The generated SimCC label for - y-axis. - The label shape is (N, K, Wy) if ``smoothing_type=='gaussian'`` - and (N, K) if `smoothing_type=='standard'``, where - :math:`Wy=h*simcc_split_ratio` - - keypoint_weights (np.ndarray): The target weights in shape - (N, K) - """ - if keypoints_visible is None: - keypoints_visible = np.ones(keypoints.shape[:2], dtype=np.float32) - - if self.smoothing_type == 'gaussian': - x_labels, y_labels, keypoint_weights = self._generate_gaussian( - keypoints, keypoints_visible) - elif self.smoothing_type == 'standard': - x_labels, y_labels, keypoint_weights = self._generate_standard( - keypoints, keypoints_visible) - else: - raise ValueError( - f'{self.__class__.__name__} got invalid `smoothing_type` value' - f'{self.smoothing_type}. Should be one of ' - '{"gaussian", "standard"}') - - encoded = dict( - keypoint_x_labels=x_labels, - keypoint_y_labels=y_labels, - keypoint_weights=keypoint_weights) - - return encoded - - def decode(self, simcc_x: np.ndarray, - simcc_y: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: - """Decode keypoint coordinates from SimCC representations. The decoded - coordinates are in the input image space. - - Args: - encoded (Tuple[np.ndarray, np.ndarray]): SimCC labels for x-axis - and y-axis - simcc_x (np.ndarray): SimCC label for x-axis - simcc_y (np.ndarray): SimCC label for y-axis - - Returns: - tuple: - - keypoints (np.ndarray): Decoded coordinates in shape (N, K, D) - - socres (np.ndarray): The keypoint scores in shape (N, K). - It usually represents the confidence of the keypoint prediction - """ - - keypoints, scores = get_simcc_maximum(simcc_x, simcc_y) - - # Unsqueeze the instance dimension for single-instance results - if keypoints.ndim == 2: - keypoints = keypoints[None, :] - scores = scores[None, :] - - if self.use_dark: - x_blur = int((self.sigma[0] * 20 - 7) // 3) - y_blur = int((self.sigma[1] * 20 - 7) // 3) - x_blur -= int((x_blur % 2) == 0) - y_blur -= int((y_blur % 2) == 0) - keypoints[:, :, 0] = refine_simcc_dark(keypoints[:, :, 0], simcc_x, - x_blur) - keypoints[:, :, 1] = refine_simcc_dark(keypoints[:, :, 1], simcc_y, - y_blur) - - keypoints /= self.simcc_split_ratio - - return keypoints, scores - - def _map_coordinates( - self, - keypoints: np.ndarray, - keypoints_visible: Optional[np.ndarray] = None - ) -> Tuple[np.ndarray, np.ndarray]: - """Mapping keypoint coordinates into SimCC space.""" - - keypoints_split = keypoints.copy() - keypoints_split = np.around(keypoints_split * self.simcc_split_ratio) - keypoints_split = keypoints_split.astype(np.int64) - keypoint_weights = keypoints_visible.copy() - - return keypoints_split, keypoint_weights - - def _generate_standard( - self, - keypoints: np.ndarray, - keypoints_visible: Optional[np.ndarray] = None - ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: - """Encoding keypoints into SimCC labels with Standard Label Smoothing - strategy. - - Labels will be one-hot vectors if self.label_smooth_weight==0.0 - """ - - N, K, _ = keypoints.shape - w, h = self.input_size - W = np.around(w * self.simcc_split_ratio).astype(int) - H = np.around(h * self.simcc_split_ratio).astype(int) - - keypoints_split, keypoint_weights = self._map_coordinates( - keypoints, keypoints_visible) - - target_x = np.zeros((N, K, W), dtype=np.float32) - target_y = np.zeros((N, K, H), dtype=np.float32) - - for n, k in product(range(N), range(K)): - # skip unlabled keypoints - if keypoints_visible[n, k] < 0.5: - continue - - # get center coordinates - mu_x, mu_y = keypoints_split[n, k].astype(np.int64) - - # detect abnormal coords and assign the weight 0 - if mu_x >= W or mu_y >= H or mu_x < 0 or mu_y < 0: - keypoint_weights[n, k] = 0 - continue - - if self.label_smooth_weight > 0: - target_x[n, k] = self.label_smooth_weight / (W - 1) - target_y[n, k] = self.label_smooth_weight / (H - 1) - - target_x[n, k, mu_x] = 1.0 - self.label_smooth_weight - target_y[n, k, mu_y] = 1.0 - self.label_smooth_weight - - return target_x, target_y, keypoint_weights - - def _generate_gaussian( - self, - keypoints: np.ndarray, - keypoints_visible: Optional[np.ndarray] = None - ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: - """Encoding keypoints into SimCC labels with Gaussian Label Smoothing - strategy.""" - - N, K, _ = keypoints.shape - w, h = self.input_size - W = np.around(w * self.simcc_split_ratio).astype(int) - H = np.around(h * self.simcc_split_ratio).astype(int) - - keypoints_split, keypoint_weights = self._map_coordinates( - keypoints, keypoints_visible) - - target_x = np.zeros((N, K, W), dtype=np.float32) - target_y = np.zeros((N, K, H), dtype=np.float32) - - # 3-sigma rule - radius = self.sigma * 3 - - # xy grid - x = np.arange(0, W, 1, dtype=np.float32) - y = np.arange(0, H, 1, dtype=np.float32) - - for n, k in product(range(N), range(K)): - # skip unlabled keypoints - if keypoints_visible[n, k] < 0.5: - continue - - mu = keypoints_split[n, k] - - # check that the gaussian has in-bounds part - left, top = mu - radius - right, bottom = mu + radius + 1 - - if left >= W or top >= H or right < 0 or bottom < 0: - keypoint_weights[n, k] = 0 - continue - - mu_x, mu_y = mu - - target_x[n, k] = np.exp(-((x - mu_x)**2) / (2 * self.sigma[0]**2)) - target_y[n, k] = np.exp(-((y - mu_y)**2) / (2 * self.sigma[1]**2)) - - if self.normalize: - norm_value = self.sigma * np.sqrt(np.pi * 2) - target_x /= norm_value[0] - target_y /= norm_value[1] - - return target_x, target_y, keypoint_weights +# Copyright (c) OpenMMLab. All rights reserved. +from itertools import product +from typing import Optional, Tuple, Union + +import numpy as np + +from mmpose.codecs.utils import get_simcc_maximum +from mmpose.codecs.utils.refinement import refine_simcc_dark +from mmpose.registry import KEYPOINT_CODECS +from .base import BaseKeypointCodec + + +@KEYPOINT_CODECS.register_module() +class SimCCLabel(BaseKeypointCodec): + r"""Generate keypoint representation via "SimCC" approach. + See the paper: `SimCC: a Simple Coordinate Classification Perspective for + Human Pose Estimation`_ by Li et al (2022) for more details. + Old name: SimDR + + Note: + + - instance number: N + - keypoint number: K + - keypoint dimension: D + - image size: [w, h] + + Encoded: + + - keypoint_x_labels (np.ndarray): The generated SimCC label for x-axis. + The label shape is (N, K, Wx) if ``smoothing_type=='gaussian'`` + and (N, K) if `smoothing_type=='standard'``, where + :math:`Wx=w*simcc_split_ratio` + - keypoint_y_labels (np.ndarray): The generated SimCC label for y-axis. + The label shape is (N, K, Wy) if ``smoothing_type=='gaussian'`` + and (N, K) if `smoothing_type=='standard'``, where + :math:`Wy=h*simcc_split_ratio` + - keypoint_weights (np.ndarray): The target weights in shape (N, K) + + Args: + input_size (tuple): Input image size in [w, h] + smoothing_type (str): The SimCC label smoothing strategy. Options are + ``'gaussian'`` and ``'standard'``. Defaults to ``'gaussian'`` + sigma (float | int | tuple): The sigma value in the Gaussian SimCC + label. Defaults to 6.0 + simcc_split_ratio (float): The ratio of the label size to the input + size. For example, if the input width is ``w``, the x label size + will be :math:`w*simcc_split_ratio`. Defaults to 2.0 + label_smooth_weight (float): Label Smoothing weight. Defaults to 0.0 + normalize (bool): Whether to normalize the heatmaps. Defaults to True. + + .. _`SimCC: a Simple Coordinate Classification Perspective for Human Pose + Estimation`: https://arxiv.org/abs/2107.03332 + """ + + def __init__(self, + input_size: Tuple[int, int], + smoothing_type: str = 'gaussian', + sigma: Union[float, int, Tuple[float]] = 6.0, + simcc_split_ratio: float = 2.0, + label_smooth_weight: float = 0.0, + normalize: bool = True, + use_dark: bool = False) -> None: + super().__init__() + + self.input_size = input_size + self.smoothing_type = smoothing_type + self.simcc_split_ratio = simcc_split_ratio + self.label_smooth_weight = label_smooth_weight + self.normalize = normalize + self.use_dark = use_dark + + if isinstance(sigma, (float, int)): + self.sigma = np.array([sigma, sigma]) + else: + self.sigma = np.array(sigma) + + if self.smoothing_type not in {'gaussian', 'standard'}: + raise ValueError( + f'{self.__class__.__name__} got invalid `smoothing_type` value' + f'{self.smoothing_type}. Should be one of ' + '{"gaussian", "standard"}') + + if self.smoothing_type == 'gaussian' and self.label_smooth_weight > 0: + raise ValueError('Attribute `label_smooth_weight` is only ' + 'used for `standard` mode.') + + if self.label_smooth_weight < 0.0 or self.label_smooth_weight > 1.0: + raise ValueError('`label_smooth_weight` should be in range [0, 1]') + + def encode(self, + keypoints: np.ndarray, + keypoints_visible: Optional[np.ndarray] = None) -> dict: + """Encoding keypoints into SimCC labels. Note that the original + keypoint coordinates should be in the input image space. + + Args: + keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) + keypoints_visible (np.ndarray): Keypoint visibilities in shape + (N, K) + + Returns: + dict: + - keypoint_x_labels (np.ndarray): The generated SimCC label for + x-axis. + The label shape is (N, K, Wx) if ``smoothing_type=='gaussian'`` + and (N, K) if `smoothing_type=='standard'``, where + :math:`Wx=w*simcc_split_ratio` + - keypoint_y_labels (np.ndarray): The generated SimCC label for + y-axis. + The label shape is (N, K, Wy) if ``smoothing_type=='gaussian'`` + and (N, K) if `smoothing_type=='standard'``, where + :math:`Wy=h*simcc_split_ratio` + - keypoint_weights (np.ndarray): The target weights in shape + (N, K) + """ + if keypoints_visible is None: + keypoints_visible = np.ones(keypoints.shape[:2], dtype=np.float32) + + if self.smoothing_type == 'gaussian': + x_labels, y_labels, keypoint_weights = self._generate_gaussian( + keypoints, keypoints_visible) + elif self.smoothing_type == 'standard': + x_labels, y_labels, keypoint_weights = self._generate_standard( + keypoints, keypoints_visible) + else: + raise ValueError( + f'{self.__class__.__name__} got invalid `smoothing_type` value' + f'{self.smoothing_type}. Should be one of ' + '{"gaussian", "standard"}') + + encoded = dict( + keypoint_x_labels=x_labels, + keypoint_y_labels=y_labels, + keypoint_weights=keypoint_weights) + + return encoded + + def decode(self, simcc_x: np.ndarray, + simcc_y: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + """Decode keypoint coordinates from SimCC representations. The decoded + coordinates are in the input image space. + + Args: + encoded (Tuple[np.ndarray, np.ndarray]): SimCC labels for x-axis + and y-axis + simcc_x (np.ndarray): SimCC label for x-axis + simcc_y (np.ndarray): SimCC label for y-axis + + Returns: + tuple: + - keypoints (np.ndarray): Decoded coordinates in shape (N, K, D) + - socres (np.ndarray): The keypoint scores in shape (N, K). + It usually represents the confidence of the keypoint prediction + """ + + keypoints, scores = get_simcc_maximum(simcc_x, simcc_y) + + # Unsqueeze the instance dimension for single-instance results + if keypoints.ndim == 2: + keypoints = keypoints[None, :] + scores = scores[None, :] + + if self.use_dark: + x_blur = int((self.sigma[0] * 20 - 7) // 3) + y_blur = int((self.sigma[1] * 20 - 7) // 3) + x_blur -= int((x_blur % 2) == 0) + y_blur -= int((y_blur % 2) == 0) + keypoints[:, :, 0] = refine_simcc_dark(keypoints[:, :, 0], simcc_x, + x_blur) + keypoints[:, :, 1] = refine_simcc_dark(keypoints[:, :, 1], simcc_y, + y_blur) + + keypoints /= self.simcc_split_ratio + + return keypoints, scores + + def _map_coordinates( + self, + keypoints: np.ndarray, + keypoints_visible: Optional[np.ndarray] = None + ) -> Tuple[np.ndarray, np.ndarray]: + """Mapping keypoint coordinates into SimCC space.""" + + keypoints_split = keypoints.copy() + keypoints_split = np.around(keypoints_split * self.simcc_split_ratio) + keypoints_split = keypoints_split.astype(np.int64) + keypoint_weights = keypoints_visible.copy() + + return keypoints_split, keypoint_weights + + def _generate_standard( + self, + keypoints: np.ndarray, + keypoints_visible: Optional[np.ndarray] = None + ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + """Encoding keypoints into SimCC labels with Standard Label Smoothing + strategy. + + Labels will be one-hot vectors if self.label_smooth_weight==0.0 + """ + + N, K, _ = keypoints.shape + w, h = self.input_size + W = np.around(w * self.simcc_split_ratio).astype(int) + H = np.around(h * self.simcc_split_ratio).astype(int) + + keypoints_split, keypoint_weights = self._map_coordinates( + keypoints, keypoints_visible) + + target_x = np.zeros((N, K, W), dtype=np.float32) + target_y = np.zeros((N, K, H), dtype=np.float32) + + for n, k in product(range(N), range(K)): + # skip unlabled keypoints + if keypoints_visible[n, k] < 0.5: + continue + + # get center coordinates + mu_x, mu_y = keypoints_split[n, k].astype(np.int64) + + # detect abnormal coords and assign the weight 0 + if mu_x >= W or mu_y >= H or mu_x < 0 or mu_y < 0: + keypoint_weights[n, k] = 0 + continue + + if self.label_smooth_weight > 0: + target_x[n, k] = self.label_smooth_weight / (W - 1) + target_y[n, k] = self.label_smooth_weight / (H - 1) + + target_x[n, k, mu_x] = 1.0 - self.label_smooth_weight + target_y[n, k, mu_y] = 1.0 - self.label_smooth_weight + + return target_x, target_y, keypoint_weights + + def _generate_gaussian( + self, + keypoints: np.ndarray, + keypoints_visible: Optional[np.ndarray] = None + ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + """Encoding keypoints into SimCC labels with Gaussian Label Smoothing + strategy.""" + + N, K, _ = keypoints.shape + w, h = self.input_size + W = np.around(w * self.simcc_split_ratio).astype(int) + H = np.around(h * self.simcc_split_ratio).astype(int) + + keypoints_split, keypoint_weights = self._map_coordinates( + keypoints, keypoints_visible) + + target_x = np.zeros((N, K, W), dtype=np.float32) + target_y = np.zeros((N, K, H), dtype=np.float32) + + # 3-sigma rule + radius = self.sigma * 3 + + # xy grid + x = np.arange(0, W, 1, dtype=np.float32) + y = np.arange(0, H, 1, dtype=np.float32) + + for n, k in product(range(N), range(K)): + # skip unlabled keypoints + if keypoints_visible[n, k] < 0.5: + continue + + mu = keypoints_split[n, k] + + # check that the gaussian has in-bounds part + left, top = mu - radius + right, bottom = mu + radius + 1 + + if left >= W or top >= H or right < 0 or bottom < 0: + keypoint_weights[n, k] = 0 + continue + + mu_x, mu_y = mu + + target_x[n, k] = np.exp(-((x - mu_x)**2) / (2 * self.sigma[0]**2)) + target_y[n, k] = np.exp(-((y - mu_y)**2) / (2 * self.sigma[1]**2)) + + if self.normalize: + norm_value = self.sigma * np.sqrt(np.pi * 2) + target_x /= norm_value[0] + target_y /= norm_value[1] + + return target_x, target_y, keypoint_weights diff --git a/mmpose/codecs/spr.py b/mmpose/codecs/spr.py index add6f5715b..6104e306b8 100644 --- a/mmpose/codecs/spr.py +++ b/mmpose/codecs/spr.py @@ -1,299 +1,299 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import Optional, Tuple, Union - -import numpy as np -import torch -from torch import Tensor - -from mmpose.registry import KEYPOINT_CODECS -from .base import BaseKeypointCodec -from .utils import (batch_heatmap_nms, generate_displacement_heatmap, - generate_gaussian_heatmaps, get_diagonal_lengths, - get_instance_root) - - -@KEYPOINT_CODECS.register_module() -class SPR(BaseKeypointCodec): - """Encode/decode keypoints with Structured Pose Representation (SPR). - - See the paper `Single-stage multi-person pose machines`_ - by Nie et al (2017) for details - - Note: - - - instance number: N - - keypoint number: K - - keypoint dimension: D - - image size: [w, h] - - heatmap size: [W, H] - - Encoded: - - - heatmaps (np.ndarray): The generated heatmap in shape (1, H, W) - where [W, H] is the `heatmap_size`. If the keypoint heatmap is - generated together, the output heatmap shape is (K+1, H, W) - - heatmap_weights (np.ndarray): The target weights for heatmaps which - has same shape with heatmaps. - - displacements (np.ndarray): The dense keypoint displacement in - shape (K*2, H, W). - - displacement_weights (np.ndarray): The target weights for heatmaps - which has same shape with displacements. - - Args: - input_size (tuple): Image size in [w, h] - heatmap_size (tuple): Heatmap size in [W, H] - sigma (float or tuple, optional): The sigma values of the Gaussian - heatmaps. If sigma is a tuple, it includes both sigmas for root - and keypoint heatmaps. ``None`` means the sigmas are computed - automatically from the heatmap size. Defaults to ``None`` - generate_keypoint_heatmaps (bool): Whether to generate Gaussian - heatmaps for each keypoint. Defaults to ``False`` - root_type (str): The method to generate the instance root. Options - are: - - - ``'kpt_center'``: Average coordinate of all visible keypoints. - - ``'bbox_center'``: Center point of bounding boxes outlined by - all visible keypoints. - - Defaults to ``'kpt_center'`` - - minimal_diagonal_length (int or float): The threshold of diagonal - length of instance bounding box. Small instances will not be - used in training. Defaults to 32 - background_weight (float): Loss weight of background pixels. - Defaults to 0.1 - decode_thr (float): The threshold of keypoint response value in - heatmaps. Defaults to 0.01 - decode_nms_kernel (int): The kernel size of the NMS during decoding, - which should be an odd integer. Defaults to 5 - decode_max_instances (int): The maximum number of instances - to decode. Defaults to 30 - - .. _`Single-stage multi-person pose machines`: - https://arxiv.org/abs/1908.09220 - """ - - def __init__( - self, - input_size: Tuple[int, int], - heatmap_size: Tuple[int, int], - sigma: Optional[Union[float, Tuple[float]]] = None, - generate_keypoint_heatmaps: bool = False, - root_type: str = 'kpt_center', - minimal_diagonal_length: Union[int, float] = 5, - background_weight: float = 0.1, - decode_nms_kernel: int = 5, - decode_max_instances: int = 30, - decode_thr: float = 0.01, - ): - super().__init__() - - self.input_size = input_size - self.heatmap_size = heatmap_size - self.generate_keypoint_heatmaps = generate_keypoint_heatmaps - self.root_type = root_type - self.minimal_diagonal_length = minimal_diagonal_length - self.background_weight = background_weight - self.decode_nms_kernel = decode_nms_kernel - self.decode_max_instances = decode_max_instances - self.decode_thr = decode_thr - - self.scale_factor = (np.array(input_size) / - heatmap_size).astype(np.float32) - - if sigma is None: - sigma = (heatmap_size[0] * heatmap_size[1])**0.5 / 32 - if generate_keypoint_heatmaps: - # sigma for root heatmap and keypoint heatmaps - self.sigma = (sigma, sigma // 2) - else: - self.sigma = (sigma, ) - else: - if not isinstance(sigma, (tuple, list)): - sigma = (sigma, ) - if generate_keypoint_heatmaps: - assert len(sigma) == 2, 'sigma for keypoints must be given ' \ - 'if `generate_keypoint_heatmaps` ' \ - 'is True. e.g. sigma=(4, 2)' - self.sigma = sigma - - def _get_heatmap_weights(self, - heatmaps, - fg_weight: float = 1, - bg_weight: float = 0): - """Generate weight array for heatmaps. - - Args: - heatmaps (np.ndarray): Root and keypoint (optional) heatmaps - fg_weight (float): Weight for foreground pixels. Defaults to 1.0 - bg_weight (float): Weight for background pixels. Defaults to 0.0 - - Returns: - np.ndarray: Heatmap weight array in the same shape with heatmaps - """ - heatmap_weights = np.ones(heatmaps.shape) * bg_weight - heatmap_weights[heatmaps > 0] = fg_weight - return heatmap_weights - - def encode(self, - keypoints: np.ndarray, - keypoints_visible: Optional[np.ndarray] = None) -> dict: - """Encode keypoints into root heatmaps and keypoint displacement - fields. Note that the original keypoint coordinates should be in the - input image space. - - Args: - keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) - keypoints_visible (np.ndarray): Keypoint visibilities in shape - (N, K) - - Returns: - dict: - - heatmaps (np.ndarray): The generated heatmap in shape - (1, H, W) where [W, H] is the `heatmap_size`. If keypoint - heatmaps are generated together, the shape is (K+1, H, W) - - heatmap_weights (np.ndarray): The pixel-wise weight for heatmaps - which has same shape with `heatmaps` - - displacements (np.ndarray): The generated displacement fields in - shape (K*D, H, W). The vector on each pixels represents the - displacement of keypoints belong to the associated instance - from this pixel. - - displacement_weights (np.ndarray): The pixel-wise weight for - displacements which has same shape with `displacements` - """ - - if keypoints_visible is None: - keypoints_visible = np.ones(keypoints.shape[:2], dtype=np.float32) - - # keypoint coordinates in heatmap - _keypoints = keypoints / self.scale_factor - - # compute the root and scale of each instance - roots, roots_visible = get_instance_root(_keypoints, keypoints_visible, - self.root_type) - diagonal_lengths = get_diagonal_lengths(_keypoints, keypoints_visible) - - # discard the small instances - roots_visible[diagonal_lengths < self.minimal_diagonal_length] = 0 - - # generate heatmaps - heatmaps, _ = generate_gaussian_heatmaps( - heatmap_size=self.heatmap_size, - keypoints=roots[:, None], - keypoints_visible=roots_visible[:, None], - sigma=self.sigma[0]) - heatmap_weights = self._get_heatmap_weights( - heatmaps, bg_weight=self.background_weight) - - if self.generate_keypoint_heatmaps: - keypoint_heatmaps, _ = generate_gaussian_heatmaps( - heatmap_size=self.heatmap_size, - keypoints=_keypoints, - keypoints_visible=keypoints_visible, - sigma=self.sigma[1]) - - keypoint_heatmaps_weights = self._get_heatmap_weights( - keypoint_heatmaps, bg_weight=self.background_weight) - - heatmaps = np.concatenate((keypoint_heatmaps, heatmaps), axis=0) - heatmap_weights = np.concatenate( - (keypoint_heatmaps_weights, heatmap_weights), axis=0) - - # generate displacements - displacements, displacement_weights = \ - generate_displacement_heatmap( - self.heatmap_size, - _keypoints, - keypoints_visible, - roots, - roots_visible, - diagonal_lengths, - self.sigma[0], - ) - - encoded = dict( - heatmaps=heatmaps, - heatmap_weights=heatmap_weights, - displacements=displacements, - displacement_weights=displacement_weights) - - return encoded - - def decode(self, heatmaps: Tensor, - displacements: Tensor) -> Tuple[np.ndarray, np.ndarray]: - """Decode the keypoint coordinates from heatmaps and displacements. The - decoded keypoint coordinates are in the input image space. - - Args: - heatmaps (Tensor): Encoded root and keypoints (optional) heatmaps - in shape (1, H, W) or (K+1, H, W) - displacements (Tensor): Encoded keypoints displacement fields - in shape (K*D, H, W) - - Returns: - tuple: - - keypoints (Tensor): Decoded keypoint coordinates in shape - (N, K, D) - - scores (tuple): - - root_scores (Tensor): The root scores in shape (N, ) - - keypoint_scores (Tensor): The keypoint scores in - shape (N, K). If keypoint heatmaps are not generated, - `keypoint_scores` will be `None` - """ - # heatmaps, displacements = encoded - _k, h, w = displacements.shape - k = _k // 2 - displacements = displacements.view(k, 2, h, w) - - # convert displacements to a dense keypoint prediction - y, x = torch.meshgrid(torch.arange(h), torch.arange(w)) - regular_grid = torch.stack([x, y], dim=0).to(displacements) - posemaps = (regular_grid[None] + displacements).flatten(2) - - # find local maximum on root heatmap - root_heatmap_peaks = batch_heatmap_nms(heatmaps[None, -1:], - self.decode_nms_kernel) - root_scores, pos_idx = root_heatmap_peaks.flatten().topk( - self.decode_max_instances) - mask = root_scores > self.decode_thr - root_scores, pos_idx = root_scores[mask], pos_idx[mask] - - keypoints = posemaps[:, :, pos_idx].permute(2, 0, 1).contiguous() - - if self.generate_keypoint_heatmaps and heatmaps.shape[0] == 1 + k: - # compute scores for each keypoint - keypoint_scores = self.get_keypoint_scores(heatmaps[:k], keypoints) - else: - keypoint_scores = None - - keypoints = torch.cat([ - kpt * self.scale_factor[i] - for i, kpt in enumerate(keypoints.split(1, -1)) - ], - dim=-1) - return keypoints, (root_scores, keypoint_scores) - - def get_keypoint_scores(self, heatmaps: Tensor, keypoints: Tensor): - """Calculate the keypoint scores with keypoints heatmaps and - coordinates. - - Args: - heatmaps (Tensor): Keypoint heatmaps in shape (K, H, W) - keypoints (Tensor): Keypoint coordinates in shape (N, K, D) - - Returns: - Tensor: Keypoint scores in [N, K] - """ - k, h, w = heatmaps.shape - keypoints = torch.stack(( - keypoints[..., 0] / (w - 1) * 2 - 1, - keypoints[..., 1] / (h - 1) * 2 - 1, - ), - dim=-1) - keypoints = keypoints.transpose(0, 1).unsqueeze(1).contiguous() - - keypoint_scores = torch.nn.functional.grid_sample( - heatmaps.unsqueeze(1), keypoints, - padding_mode='border').view(k, -1).transpose(0, 1).contiguous() - - return keypoint_scores +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Tuple, Union + +import numpy as np +import torch +from torch import Tensor + +from mmpose.registry import KEYPOINT_CODECS +from .base import BaseKeypointCodec +from .utils import (batch_heatmap_nms, generate_displacement_heatmap, + generate_gaussian_heatmaps, get_diagonal_lengths, + get_instance_root) + + +@KEYPOINT_CODECS.register_module() +class SPR(BaseKeypointCodec): + """Encode/decode keypoints with Structured Pose Representation (SPR). + + See the paper `Single-stage multi-person pose machines`_ + by Nie et al (2017) for details + + Note: + + - instance number: N + - keypoint number: K + - keypoint dimension: D + - image size: [w, h] + - heatmap size: [W, H] + + Encoded: + + - heatmaps (np.ndarray): The generated heatmap in shape (1, H, W) + where [W, H] is the `heatmap_size`. If the keypoint heatmap is + generated together, the output heatmap shape is (K+1, H, W) + - heatmap_weights (np.ndarray): The target weights for heatmaps which + has same shape with heatmaps. + - displacements (np.ndarray): The dense keypoint displacement in + shape (K*2, H, W). + - displacement_weights (np.ndarray): The target weights for heatmaps + which has same shape with displacements. + + Args: + input_size (tuple): Image size in [w, h] + heatmap_size (tuple): Heatmap size in [W, H] + sigma (float or tuple, optional): The sigma values of the Gaussian + heatmaps. If sigma is a tuple, it includes both sigmas for root + and keypoint heatmaps. ``None`` means the sigmas are computed + automatically from the heatmap size. Defaults to ``None`` + generate_keypoint_heatmaps (bool): Whether to generate Gaussian + heatmaps for each keypoint. Defaults to ``False`` + root_type (str): The method to generate the instance root. Options + are: + + - ``'kpt_center'``: Average coordinate of all visible keypoints. + - ``'bbox_center'``: Center point of bounding boxes outlined by + all visible keypoints. + + Defaults to ``'kpt_center'`` + + minimal_diagonal_length (int or float): The threshold of diagonal + length of instance bounding box. Small instances will not be + used in training. Defaults to 32 + background_weight (float): Loss weight of background pixels. + Defaults to 0.1 + decode_thr (float): The threshold of keypoint response value in + heatmaps. Defaults to 0.01 + decode_nms_kernel (int): The kernel size of the NMS during decoding, + which should be an odd integer. Defaults to 5 + decode_max_instances (int): The maximum number of instances + to decode. Defaults to 30 + + .. _`Single-stage multi-person pose machines`: + https://arxiv.org/abs/1908.09220 + """ + + def __init__( + self, + input_size: Tuple[int, int], + heatmap_size: Tuple[int, int], + sigma: Optional[Union[float, Tuple[float]]] = None, + generate_keypoint_heatmaps: bool = False, + root_type: str = 'kpt_center', + minimal_diagonal_length: Union[int, float] = 5, + background_weight: float = 0.1, + decode_nms_kernel: int = 5, + decode_max_instances: int = 30, + decode_thr: float = 0.01, + ): + super().__init__() + + self.input_size = input_size + self.heatmap_size = heatmap_size + self.generate_keypoint_heatmaps = generate_keypoint_heatmaps + self.root_type = root_type + self.minimal_diagonal_length = minimal_diagonal_length + self.background_weight = background_weight + self.decode_nms_kernel = decode_nms_kernel + self.decode_max_instances = decode_max_instances + self.decode_thr = decode_thr + + self.scale_factor = (np.array(input_size) / + heatmap_size).astype(np.float32) + + if sigma is None: + sigma = (heatmap_size[0] * heatmap_size[1])**0.5 / 32 + if generate_keypoint_heatmaps: + # sigma for root heatmap and keypoint heatmaps + self.sigma = (sigma, sigma // 2) + else: + self.sigma = (sigma, ) + else: + if not isinstance(sigma, (tuple, list)): + sigma = (sigma, ) + if generate_keypoint_heatmaps: + assert len(sigma) == 2, 'sigma for keypoints must be given ' \ + 'if `generate_keypoint_heatmaps` ' \ + 'is True. e.g. sigma=(4, 2)' + self.sigma = sigma + + def _get_heatmap_weights(self, + heatmaps, + fg_weight: float = 1, + bg_weight: float = 0): + """Generate weight array for heatmaps. + + Args: + heatmaps (np.ndarray): Root and keypoint (optional) heatmaps + fg_weight (float): Weight for foreground pixels. Defaults to 1.0 + bg_weight (float): Weight for background pixels. Defaults to 0.0 + + Returns: + np.ndarray: Heatmap weight array in the same shape with heatmaps + """ + heatmap_weights = np.ones(heatmaps.shape) * bg_weight + heatmap_weights[heatmaps > 0] = fg_weight + return heatmap_weights + + def encode(self, + keypoints: np.ndarray, + keypoints_visible: Optional[np.ndarray] = None) -> dict: + """Encode keypoints into root heatmaps and keypoint displacement + fields. Note that the original keypoint coordinates should be in the + input image space. + + Args: + keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) + keypoints_visible (np.ndarray): Keypoint visibilities in shape + (N, K) + + Returns: + dict: + - heatmaps (np.ndarray): The generated heatmap in shape + (1, H, W) where [W, H] is the `heatmap_size`. If keypoint + heatmaps are generated together, the shape is (K+1, H, W) + - heatmap_weights (np.ndarray): The pixel-wise weight for heatmaps + which has same shape with `heatmaps` + - displacements (np.ndarray): The generated displacement fields in + shape (K*D, H, W). The vector on each pixels represents the + displacement of keypoints belong to the associated instance + from this pixel. + - displacement_weights (np.ndarray): The pixel-wise weight for + displacements which has same shape with `displacements` + """ + + if keypoints_visible is None: + keypoints_visible = np.ones(keypoints.shape[:2], dtype=np.float32) + + # keypoint coordinates in heatmap + _keypoints = keypoints / self.scale_factor + + # compute the root and scale of each instance + roots, roots_visible = get_instance_root(_keypoints, keypoints_visible, + self.root_type) + diagonal_lengths = get_diagonal_lengths(_keypoints, keypoints_visible) + + # discard the small instances + roots_visible[diagonal_lengths < self.minimal_diagonal_length] = 0 + + # generate heatmaps + heatmaps, _ = generate_gaussian_heatmaps( + heatmap_size=self.heatmap_size, + keypoints=roots[:, None], + keypoints_visible=roots_visible[:, None], + sigma=self.sigma[0]) + heatmap_weights = self._get_heatmap_weights( + heatmaps, bg_weight=self.background_weight) + + if self.generate_keypoint_heatmaps: + keypoint_heatmaps, _ = generate_gaussian_heatmaps( + heatmap_size=self.heatmap_size, + keypoints=_keypoints, + keypoints_visible=keypoints_visible, + sigma=self.sigma[1]) + + keypoint_heatmaps_weights = self._get_heatmap_weights( + keypoint_heatmaps, bg_weight=self.background_weight) + + heatmaps = np.concatenate((keypoint_heatmaps, heatmaps), axis=0) + heatmap_weights = np.concatenate( + (keypoint_heatmaps_weights, heatmap_weights), axis=0) + + # generate displacements + displacements, displacement_weights = \ + generate_displacement_heatmap( + self.heatmap_size, + _keypoints, + keypoints_visible, + roots, + roots_visible, + diagonal_lengths, + self.sigma[0], + ) + + encoded = dict( + heatmaps=heatmaps, + heatmap_weights=heatmap_weights, + displacements=displacements, + displacement_weights=displacement_weights) + + return encoded + + def decode(self, heatmaps: Tensor, + displacements: Tensor) -> Tuple[np.ndarray, np.ndarray]: + """Decode the keypoint coordinates from heatmaps and displacements. The + decoded keypoint coordinates are in the input image space. + + Args: + heatmaps (Tensor): Encoded root and keypoints (optional) heatmaps + in shape (1, H, W) or (K+1, H, W) + displacements (Tensor): Encoded keypoints displacement fields + in shape (K*D, H, W) + + Returns: + tuple: + - keypoints (Tensor): Decoded keypoint coordinates in shape + (N, K, D) + - scores (tuple): + - root_scores (Tensor): The root scores in shape (N, ) + - keypoint_scores (Tensor): The keypoint scores in + shape (N, K). If keypoint heatmaps are not generated, + `keypoint_scores` will be `None` + """ + # heatmaps, displacements = encoded + _k, h, w = displacements.shape + k = _k // 2 + displacements = displacements.view(k, 2, h, w) + + # convert displacements to a dense keypoint prediction + y, x = torch.meshgrid(torch.arange(h), torch.arange(w)) + regular_grid = torch.stack([x, y], dim=0).to(displacements) + posemaps = (regular_grid[None] + displacements).flatten(2) + + # find local maximum on root heatmap + root_heatmap_peaks = batch_heatmap_nms(heatmaps[None, -1:], + self.decode_nms_kernel) + root_scores, pos_idx = root_heatmap_peaks.flatten().topk( + self.decode_max_instances) + mask = root_scores > self.decode_thr + root_scores, pos_idx = root_scores[mask], pos_idx[mask] + + keypoints = posemaps[:, :, pos_idx].permute(2, 0, 1).contiguous() + + if self.generate_keypoint_heatmaps and heatmaps.shape[0] == 1 + k: + # compute scores for each keypoint + keypoint_scores = self.get_keypoint_scores(heatmaps[:k], keypoints) + else: + keypoint_scores = None + + keypoints = torch.cat([ + kpt * self.scale_factor[i] + for i, kpt in enumerate(keypoints.split(1, -1)) + ], + dim=-1) + return keypoints, (root_scores, keypoint_scores) + + def get_keypoint_scores(self, heatmaps: Tensor, keypoints: Tensor): + """Calculate the keypoint scores with keypoints heatmaps and + coordinates. + + Args: + heatmaps (Tensor): Keypoint heatmaps in shape (K, H, W) + keypoints (Tensor): Keypoint coordinates in shape (N, K, D) + + Returns: + Tensor: Keypoint scores in [N, K] + """ + k, h, w = heatmaps.shape + keypoints = torch.stack(( + keypoints[..., 0] / (w - 1) * 2 - 1, + keypoints[..., 1] / (h - 1) * 2 - 1, + ), + dim=-1) + keypoints = keypoints.transpose(0, 1).unsqueeze(1).contiguous() + + keypoint_scores = torch.nn.functional.grid_sample( + heatmaps.unsqueeze(1), keypoints, + padding_mode='border').view(k, -1).transpose(0, 1).contiguous() + + return keypoint_scores diff --git a/mmpose/codecs/udp_heatmap.py b/mmpose/codecs/udp_heatmap.py index c38ea17be4..df95b50240 100644 --- a/mmpose/codecs/udp_heatmap.py +++ b/mmpose/codecs/udp_heatmap.py @@ -1,185 +1,185 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import Optional, Tuple - -import cv2 -import numpy as np - -from mmpose.registry import KEYPOINT_CODECS -from .base import BaseKeypointCodec -from .utils import (generate_offset_heatmap, generate_udp_gaussian_heatmaps, - get_heatmap_maximum, refine_keypoints_dark_udp) - - -@KEYPOINT_CODECS.register_module() -class UDPHeatmap(BaseKeypointCodec): - r"""Generate keypoint heatmaps by Unbiased Data Processing (UDP). - See the paper: `The Devil is in the Details: Delving into Unbiased Data - Processing for Human Pose Estimation`_ by Huang et al (2020) for details. - - Note: - - - instance number: N - - keypoint number: K - - keypoint dimension: D - - image size: [w, h] - - heatmap size: [W, H] - - Encoded: - - - heatmap (np.ndarray): The generated heatmap in shape (C_out, H, W) - where [W, H] is the `heatmap_size`, and the C_out is the output - channel number which depends on the `heatmap_type`. If - `heatmap_type=='gaussian'`, C_out equals to keypoint number K; - if `heatmap_type=='combined'`, C_out equals to K*3 - (x_offset, y_offset and class label) - - keypoint_weights (np.ndarray): The target weights in shape (K,) - - Args: - input_size (tuple): Image size in [w, h] - heatmap_size (tuple): Heatmap size in [W, H] - heatmap_type (str): The heatmap type to encode the keypoitns. Options - are: - - - ``'gaussian'``: Gaussian heatmap - - ``'combined'``: Combination of a binary label map and offset - maps for X and Y axes. - - sigma (float): The sigma value of the Gaussian heatmap when - ``heatmap_type=='gaussian'``. Defaults to 2.0 - radius_factor (float): The radius factor of the binary label - map when ``heatmap_type=='combined'``. The positive region is - defined as the neighbor of the keypoit with the radius - :math:`r=radius_factor*max(W, H)`. Defaults to 0.0546875 - blur_kernel_size (int): The Gaussian blur kernel size of the heatmap - modulation in DarkPose. Defaults to 11 - - .. _`The Devil is in the Details: Delving into Unbiased Data Processing for - Human Pose Estimation`: https://arxiv.org/abs/1911.07524 - """ - - def __init__(self, - input_size: Tuple[int, int], - heatmap_size: Tuple[int, int], - heatmap_type: str = 'gaussian', - sigma: float = 2., - radius_factor: float = 0.0546875, - blur_kernel_size: int = 11) -> None: - super().__init__() - self.input_size = input_size - self.heatmap_size = heatmap_size - self.sigma = sigma - self.radius_factor = radius_factor - self.heatmap_type = heatmap_type - self.blur_kernel_size = blur_kernel_size - self.scale_factor = ((np.array(input_size) - 1) / - (np.array(heatmap_size) - 1)).astype(np.float32) - - if self.heatmap_type not in {'gaussian', 'combined'}: - raise ValueError( - f'{self.__class__.__name__} got invalid `heatmap_type` value' - f'{self.heatmap_type}. Should be one of ' - '{"gaussian", "combined"}') - - def encode(self, - keypoints: np.ndarray, - keypoints_visible: Optional[np.ndarray] = None) -> dict: - """Encode keypoints into heatmaps. Note that the original keypoint - coordinates should be in the input image space. - - Args: - keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) - keypoints_visible (np.ndarray): Keypoint visibilities in shape - (N, K) - - Returns: - dict: - - heatmap (np.ndarray): The generated heatmap in shape - (C_out, H, W) where [W, H] is the `heatmap_size`, and the - C_out is the output channel number which depends on the - `heatmap_type`. If `heatmap_type=='gaussian'`, C_out equals to - keypoint number K; if `heatmap_type=='combined'`, C_out - equals to K*3 (x_offset, y_offset and class label) - - keypoint_weights (np.ndarray): The target weights in shape - (K,) - """ - assert keypoints.shape[0] == 1, ( - f'{self.__class__.__name__} only support single-instance ' - 'keypoint encoding') - - if keypoints_visible is None: - keypoints_visible = np.ones(keypoints.shape[:2], dtype=np.float32) - - if self.heatmap_type == 'gaussian': - heatmaps, keypoint_weights = generate_udp_gaussian_heatmaps( - heatmap_size=self.heatmap_size, - keypoints=keypoints / self.scale_factor, - keypoints_visible=keypoints_visible, - sigma=self.sigma) - elif self.heatmap_type == 'combined': - heatmaps, keypoint_weights = generate_offset_heatmap( - heatmap_size=self.heatmap_size, - keypoints=keypoints / self.scale_factor, - keypoints_visible=keypoints_visible, - radius_factor=self.radius_factor) - else: - raise ValueError( - f'{self.__class__.__name__} got invalid `heatmap_type` value' - f'{self.heatmap_type}. Should be one of ' - '{"gaussian", "combined"}') - - encoded = dict(heatmaps=heatmaps, keypoint_weights=keypoint_weights) - - return encoded - - def decode(self, encoded: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: - """Decode keypoint coordinates from heatmaps. The decoded keypoint - coordinates are in the input image space. - - Args: - encoded (np.ndarray): Heatmaps in shape (K, H, W) - - Returns: - tuple: - - keypoints (np.ndarray): Decoded keypoint coordinates in shape - (N, K, D) - - scores (np.ndarray): The keypoint scores in shape (N, K). It - usually represents the confidence of the keypoint prediction - """ - heatmaps = encoded.copy() - - if self.heatmap_type == 'gaussian': - keypoints, scores = get_heatmap_maximum(heatmaps) - # unsqueeze the instance dimension for single-instance results - keypoints = keypoints[None] - scores = scores[None] - - keypoints = refine_keypoints_dark_udp( - keypoints, heatmaps, blur_kernel_size=self.blur_kernel_size) - - elif self.heatmap_type == 'combined': - _K, H, W = heatmaps.shape - K = _K // 3 - - for cls_heatmap in heatmaps[::3]: - # Apply Gaussian blur on classification maps - ks = 2 * self.blur_kernel_size + 1 - cv2.GaussianBlur(cls_heatmap, (ks, ks), 0, cls_heatmap) - - # valid radius - radius = self.radius_factor * max(W, H) - - x_offset = heatmaps[1::3].flatten() * radius - y_offset = heatmaps[2::3].flatten() * radius - keypoints, scores = get_heatmap_maximum(heatmaps=heatmaps[::3]) - index = (keypoints[..., 0] + keypoints[..., 1] * W).flatten() - index += W * H * np.arange(0, K) - index = index.astype(int) - keypoints += np.stack((x_offset[index], y_offset[index]), axis=-1) - # unsqueeze the instance dimension for single-instance results - keypoints = keypoints[None].astype(np.float32) - scores = scores[None] - - W, H = self.heatmap_size - keypoints = keypoints / [W - 1, H - 1] * self.input_size - - return keypoints, scores +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Tuple + +import cv2 +import numpy as np + +from mmpose.registry import KEYPOINT_CODECS +from .base import BaseKeypointCodec +from .utils import (generate_offset_heatmap, generate_udp_gaussian_heatmaps, + get_heatmap_maximum, refine_keypoints_dark_udp) + + +@KEYPOINT_CODECS.register_module() +class UDPHeatmap(BaseKeypointCodec): + r"""Generate keypoint heatmaps by Unbiased Data Processing (UDP). + See the paper: `The Devil is in the Details: Delving into Unbiased Data + Processing for Human Pose Estimation`_ by Huang et al (2020) for details. + + Note: + + - instance number: N + - keypoint number: K + - keypoint dimension: D + - image size: [w, h] + - heatmap size: [W, H] + + Encoded: + + - heatmap (np.ndarray): The generated heatmap in shape (C_out, H, W) + where [W, H] is the `heatmap_size`, and the C_out is the output + channel number which depends on the `heatmap_type`. If + `heatmap_type=='gaussian'`, C_out equals to keypoint number K; + if `heatmap_type=='combined'`, C_out equals to K*3 + (x_offset, y_offset and class label) + - keypoint_weights (np.ndarray): The target weights in shape (K,) + + Args: + input_size (tuple): Image size in [w, h] + heatmap_size (tuple): Heatmap size in [W, H] + heatmap_type (str): The heatmap type to encode the keypoitns. Options + are: + + - ``'gaussian'``: Gaussian heatmap + - ``'combined'``: Combination of a binary label map and offset + maps for X and Y axes. + + sigma (float): The sigma value of the Gaussian heatmap when + ``heatmap_type=='gaussian'``. Defaults to 2.0 + radius_factor (float): The radius factor of the binary label + map when ``heatmap_type=='combined'``. The positive region is + defined as the neighbor of the keypoit with the radius + :math:`r=radius_factor*max(W, H)`. Defaults to 0.0546875 + blur_kernel_size (int): The Gaussian blur kernel size of the heatmap + modulation in DarkPose. Defaults to 11 + + .. _`The Devil is in the Details: Delving into Unbiased Data Processing for + Human Pose Estimation`: https://arxiv.org/abs/1911.07524 + """ + + def __init__(self, + input_size: Tuple[int, int], + heatmap_size: Tuple[int, int], + heatmap_type: str = 'gaussian', + sigma: float = 2., + radius_factor: float = 0.0546875, + blur_kernel_size: int = 11) -> None: + super().__init__() + self.input_size = input_size + self.heatmap_size = heatmap_size + self.sigma = sigma + self.radius_factor = radius_factor + self.heatmap_type = heatmap_type + self.blur_kernel_size = blur_kernel_size + self.scale_factor = ((np.array(input_size) - 1) / + (np.array(heatmap_size) - 1)).astype(np.float32) + + if self.heatmap_type not in {'gaussian', 'combined'}: + raise ValueError( + f'{self.__class__.__name__} got invalid `heatmap_type` value' + f'{self.heatmap_type}. Should be one of ' + '{"gaussian", "combined"}') + + def encode(self, + keypoints: np.ndarray, + keypoints_visible: Optional[np.ndarray] = None) -> dict: + """Encode keypoints into heatmaps. Note that the original keypoint + coordinates should be in the input image space. + + Args: + keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) + keypoints_visible (np.ndarray): Keypoint visibilities in shape + (N, K) + + Returns: + dict: + - heatmap (np.ndarray): The generated heatmap in shape + (C_out, H, W) where [W, H] is the `heatmap_size`, and the + C_out is the output channel number which depends on the + `heatmap_type`. If `heatmap_type=='gaussian'`, C_out equals to + keypoint number K; if `heatmap_type=='combined'`, C_out + equals to K*3 (x_offset, y_offset and class label) + - keypoint_weights (np.ndarray): The target weights in shape + (K,) + """ + assert keypoints.shape[0] == 1, ( + f'{self.__class__.__name__} only support single-instance ' + 'keypoint encoding') + + if keypoints_visible is None: + keypoints_visible = np.ones(keypoints.shape[:2], dtype=np.float32) + + if self.heatmap_type == 'gaussian': + heatmaps, keypoint_weights = generate_udp_gaussian_heatmaps( + heatmap_size=self.heatmap_size, + keypoints=keypoints / self.scale_factor, + keypoints_visible=keypoints_visible, + sigma=self.sigma) + elif self.heatmap_type == 'combined': + heatmaps, keypoint_weights = generate_offset_heatmap( + heatmap_size=self.heatmap_size, + keypoints=keypoints / self.scale_factor, + keypoints_visible=keypoints_visible, + radius_factor=self.radius_factor) + else: + raise ValueError( + f'{self.__class__.__name__} got invalid `heatmap_type` value' + f'{self.heatmap_type}. Should be one of ' + '{"gaussian", "combined"}') + + encoded = dict(heatmaps=heatmaps, keypoint_weights=keypoint_weights) + + return encoded + + def decode(self, encoded: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + """Decode keypoint coordinates from heatmaps. The decoded keypoint + coordinates are in the input image space. + + Args: + encoded (np.ndarray): Heatmaps in shape (K, H, W) + + Returns: + tuple: + - keypoints (np.ndarray): Decoded keypoint coordinates in shape + (N, K, D) + - scores (np.ndarray): The keypoint scores in shape (N, K). It + usually represents the confidence of the keypoint prediction + """ + heatmaps = encoded.copy() + + if self.heatmap_type == 'gaussian': + keypoints, scores = get_heatmap_maximum(heatmaps) + # unsqueeze the instance dimension for single-instance results + keypoints = keypoints[None] + scores = scores[None] + + keypoints = refine_keypoints_dark_udp( + keypoints, heatmaps, blur_kernel_size=self.blur_kernel_size) + + elif self.heatmap_type == 'combined': + _K, H, W = heatmaps.shape + K = _K // 3 + + for cls_heatmap in heatmaps[::3]: + # Apply Gaussian blur on classification maps + ks = 2 * self.blur_kernel_size + 1 + cv2.GaussianBlur(cls_heatmap, (ks, ks), 0, cls_heatmap) + + # valid radius + radius = self.radius_factor * max(W, H) + + x_offset = heatmaps[1::3].flatten() * radius + y_offset = heatmaps[2::3].flatten() * radius + keypoints, scores = get_heatmap_maximum(heatmaps=heatmaps[::3]) + index = (keypoints[..., 0] + keypoints[..., 1] * W).flatten() + index += W * H * np.arange(0, K) + index = index.astype(int) + keypoints += np.stack((x_offset[index], y_offset[index]), axis=-1) + # unsqueeze the instance dimension for single-instance results + keypoints = keypoints[None].astype(np.float32) + scores = scores[None] + + W, H = self.heatmap_size + keypoints = keypoints / [W - 1, H - 1] * self.input_size + + return keypoints, scores diff --git a/mmpose/codecs/utils/__init__.py b/mmpose/codecs/utils/__init__.py index eaa093f12b..f1f87d21ab 100644 --- a/mmpose/codecs/utils/__init__.py +++ b/mmpose/codecs/utils/__init__.py @@ -1,23 +1,23 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .gaussian_heatmap import (generate_gaussian_heatmaps, - generate_udp_gaussian_heatmaps, - generate_unbiased_gaussian_heatmaps) -from .instance_property import (get_diagonal_lengths, get_instance_bbox, - get_instance_root) -from .offset_heatmap import (generate_displacement_heatmap, - generate_offset_heatmap) -from .post_processing import (batch_heatmap_nms, gaussian_blur, - gaussian_blur1d, get_heatmap_maximum, - get_simcc_maximum, get_simcc_normalized) -from .refinement import (refine_keypoints, refine_keypoints_dark, - refine_keypoints_dark_udp, refine_simcc_dark) - -__all__ = [ - 'generate_gaussian_heatmaps', 'generate_udp_gaussian_heatmaps', - 'generate_unbiased_gaussian_heatmaps', 'gaussian_blur', - 'get_heatmap_maximum', 'get_simcc_maximum', 'generate_offset_heatmap', - 'batch_heatmap_nms', 'refine_keypoints', 'refine_keypoints_dark', - 'refine_keypoints_dark_udp', 'generate_displacement_heatmap', - 'refine_simcc_dark', 'gaussian_blur1d', 'get_diagonal_lengths', - 'get_instance_root', 'get_instance_bbox', 'get_simcc_normalized' -] +# Copyright (c) OpenMMLab. All rights reserved. +from .gaussian_heatmap import (generate_gaussian_heatmaps, + generate_udp_gaussian_heatmaps, + generate_unbiased_gaussian_heatmaps) +from .instance_property import (get_diagonal_lengths, get_instance_bbox, + get_instance_root) +from .offset_heatmap import (generate_displacement_heatmap, + generate_offset_heatmap) +from .post_processing import (batch_heatmap_nms, gaussian_blur, + gaussian_blur1d, get_heatmap_maximum, + get_simcc_maximum, get_simcc_normalized) +from .refinement import (refine_keypoints, refine_keypoints_dark, + refine_keypoints_dark_udp, refine_simcc_dark) + +__all__ = [ + 'generate_gaussian_heatmaps', 'generate_udp_gaussian_heatmaps', + 'generate_unbiased_gaussian_heatmaps', 'gaussian_blur', + 'get_heatmap_maximum', 'get_simcc_maximum', 'generate_offset_heatmap', + 'batch_heatmap_nms', 'refine_keypoints', 'refine_keypoints_dark', + 'refine_keypoints_dark_udp', 'generate_displacement_heatmap', + 'refine_simcc_dark', 'gaussian_blur1d', 'get_diagonal_lengths', + 'get_instance_root', 'get_instance_bbox', 'get_simcc_normalized' +] diff --git a/mmpose/codecs/utils/gaussian_heatmap.py b/mmpose/codecs/utils/gaussian_heatmap.py index 91e08c2cdd..fe3cae3b4f 100644 --- a/mmpose/codecs/utils/gaussian_heatmap.py +++ b/mmpose/codecs/utils/gaussian_heatmap.py @@ -1,227 +1,227 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from itertools import product -from typing import Tuple, Union - -import numpy as np - - -def generate_gaussian_heatmaps( - heatmap_size: Tuple[int, int], - keypoints: np.ndarray, - keypoints_visible: np.ndarray, - sigma: Union[float, Tuple[float], np.ndarray], -) -> Tuple[np.ndarray, np.ndarray]: - """Generate gaussian heatmaps of keypoints. - - Args: - heatmap_size (Tuple[int, int]): Heatmap size in [W, H] - keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) - keypoints_visible (np.ndarray): Keypoint visibilities in shape - (N, K) - sigma (float or List[float]): A list of sigma values of the Gaussian - heatmap for each instance. If sigma is given as a single float - value, it will be expanded into a tuple - - Returns: - tuple: - - heatmaps (np.ndarray): The generated heatmap in shape - (K, H, W) where [W, H] is the `heatmap_size` - - keypoint_weights (np.ndarray): The target weights in shape - (N, K) - """ - - N, K, _ = keypoints.shape - W, H = heatmap_size - - heatmaps = np.zeros((K, H, W), dtype=np.float32) - keypoint_weights = keypoints_visible.copy() - - if isinstance(sigma, (int, float)): - sigma = (sigma, ) * N - - for n in range(N): - # 3-sigma rule - radius = sigma[n] * 3 - - # xy grid - gaussian_size = 2 * radius + 1 - x = np.arange(0, gaussian_size, 1, dtype=np.float32) - y = x[:, None] - x0 = y0 = gaussian_size // 2 - - for k in range(K): - # skip unlabled keypoints - if keypoints_visible[n, k] < 0.5: - continue - - # get gaussian center coordinates - mu = (keypoints[n, k] + 0.5).astype(np.int64) - - # check that the gaussian has in-bounds part - left, top = (mu - radius).astype(np.int64) - right, bottom = (mu + radius + 1).astype(np.int64) - - if left >= W or top >= H or right < 0 or bottom < 0: - keypoint_weights[n, k] = 0 - continue - - # The gaussian is not normalized, - # we want the center value to equal 1 - gaussian = np.exp(-((x - x0)**2 + (y - y0)**2) / (2 * sigma[n]**2)) - - # valid range in gaussian - g_x1 = max(0, -left) - g_x2 = min(W, right) - left - g_y1 = max(0, -top) - g_y2 = min(H, bottom) - top - - # valid range in heatmap - h_x1 = max(0, left) - h_x2 = min(W, right) - h_y1 = max(0, top) - h_y2 = min(H, bottom) - - heatmap_region = heatmaps[k, h_y1:h_y2, h_x1:h_x2] - gaussian_regsion = gaussian[g_y1:g_y2, g_x1:g_x2] - - _ = np.maximum( - heatmap_region, gaussian_regsion, out=heatmap_region) - - return heatmaps, keypoint_weights - - -def generate_unbiased_gaussian_heatmaps( - heatmap_size: Tuple[int, int], - keypoints: np.ndarray, - keypoints_visible: np.ndarray, - sigma: float, -) -> Tuple[np.ndarray, np.ndarray]: - """Generate gaussian heatmaps of keypoints using `Dark Pose`_. - - Args: - heatmap_size (Tuple[int, int]): Heatmap size in [W, H] - keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) - keypoints_visible (np.ndarray): Keypoint visibilities in shape - (N, K) - - Returns: - tuple: - - heatmaps (np.ndarray): The generated heatmap in shape - (K, H, W) where [W, H] is the `heatmap_size` - - keypoint_weights (np.ndarray): The target weights in shape - (N, K) - - .. _`Dark Pose`: https://arxiv.org/abs/1910.06278 - """ - - N, K, _ = keypoints.shape - W, H = heatmap_size - - heatmaps = np.zeros((K, H, W), dtype=np.float32) - keypoint_weights = keypoints_visible.copy() - - # 3-sigma rule - radius = sigma * 3 - - # xy grid - x = np.arange(0, W, 1, dtype=np.float32) - y = np.arange(0, H, 1, dtype=np.float32)[:, None] - - for n, k in product(range(N), range(K)): - # skip unlabled keypoints - if keypoints_visible[n, k] < 0.5: - continue - - mu = keypoints[n, k] - # check that the gaussian has in-bounds part - left, top = mu - radius - right, bottom = mu + radius + 1 - - if left >= W or top >= H or right < 0 or bottom < 0: - keypoint_weights[n, k] = 0 - continue - - gaussian = np.exp(-((x - mu[0])**2 + (y - mu[1])**2) / (2 * sigma**2)) - - _ = np.maximum(gaussian, heatmaps[k], out=heatmaps[k]) - - return heatmaps, keypoint_weights - - -def generate_udp_gaussian_heatmaps( - heatmap_size: Tuple[int, int], - keypoints: np.ndarray, - keypoints_visible: np.ndarray, - sigma: float, -) -> Tuple[np.ndarray, np.ndarray]: - """Generate gaussian heatmaps of keypoints using `UDP`_. - - Args: - heatmap_size (Tuple[int, int]): Heatmap size in [W, H] - keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) - keypoints_visible (np.ndarray): Keypoint visibilities in shape - (N, K) - sigma (float): The sigma value of the Gaussian heatmap - - Returns: - tuple: - - heatmaps (np.ndarray): The generated heatmap in shape - (K, H, W) where [W, H] is the `heatmap_size` - - keypoint_weights (np.ndarray): The target weights in shape - (N, K) - - .. _`UDP`: https://arxiv.org/abs/1911.07524 - """ - - N, K, _ = keypoints.shape - W, H = heatmap_size - - heatmaps = np.zeros((K, H, W), dtype=np.float32) - keypoint_weights = keypoints_visible.copy() - - # 3-sigma rule - radius = sigma * 3 - - # xy grid - gaussian_size = 2 * radius + 1 - x = np.arange(0, gaussian_size, 1, dtype=np.float32) - y = x[:, None] - - for n, k in product(range(N), range(K)): - # skip unlabled keypoints - if keypoints_visible[n, k] < 0.5: - continue - - mu = (keypoints[n, k] + 0.5).astype(np.int64) - # check that the gaussian has in-bounds part - left, top = (mu - radius).astype(np.int64) - right, bottom = (mu + radius + 1).astype(np.int64) - - if left >= W or top >= H or right < 0 or bottom < 0: - keypoint_weights[n, k] = 0 - continue - - mu_ac = keypoints[n, k] - x0 = y0 = gaussian_size // 2 - x0 += mu_ac[0] - mu[0] - y0 += mu_ac[1] - mu[1] - gaussian = np.exp(-((x - x0)**2 + (y - y0)**2) / (2 * sigma**2)) - - # valid range in gaussian - g_x1 = max(0, -left) - g_x2 = min(W, right) - left - g_y1 = max(0, -top) - g_y2 = min(H, bottom) - top - - # valid range in heatmap - h_x1 = max(0, left) - h_x2 = min(W, right) - h_y1 = max(0, top) - h_y2 = min(H, bottom) - - heatmap_region = heatmaps[k, h_y1:h_y2, h_x1:h_x2] - gaussian_regsion = gaussian[g_y1:g_y2, g_x1:g_x2] - - _ = np.maximum(heatmap_region, gaussian_regsion, out=heatmap_region) - - return heatmaps, keypoint_weights +# Copyright (c) OpenMMLab. All rights reserved. +from itertools import product +from typing import Tuple, Union + +import numpy as np + + +def generate_gaussian_heatmaps( + heatmap_size: Tuple[int, int], + keypoints: np.ndarray, + keypoints_visible: np.ndarray, + sigma: Union[float, Tuple[float], np.ndarray], +) -> Tuple[np.ndarray, np.ndarray]: + """Generate gaussian heatmaps of keypoints. + + Args: + heatmap_size (Tuple[int, int]): Heatmap size in [W, H] + keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) + keypoints_visible (np.ndarray): Keypoint visibilities in shape + (N, K) + sigma (float or List[float]): A list of sigma values of the Gaussian + heatmap for each instance. If sigma is given as a single float + value, it will be expanded into a tuple + + Returns: + tuple: + - heatmaps (np.ndarray): The generated heatmap in shape + (K, H, W) where [W, H] is the `heatmap_size` + - keypoint_weights (np.ndarray): The target weights in shape + (N, K) + """ + + N, K, _ = keypoints.shape + W, H = heatmap_size + + heatmaps = np.zeros((K, H, W), dtype=np.float32) + keypoint_weights = keypoints_visible.copy() + + if isinstance(sigma, (int, float)): + sigma = (sigma, ) * N + + for n in range(N): + # 3-sigma rule + radius = sigma[n] * 3 + + # xy grid + gaussian_size = 2 * radius + 1 + x = np.arange(0, gaussian_size, 1, dtype=np.float32) + y = x[:, None] + x0 = y0 = gaussian_size // 2 + + for k in range(K): + # skip unlabled keypoints + if keypoints_visible[n, k] < 0.5: + continue + + # get gaussian center coordinates + mu = (keypoints[n, k] + 0.5).astype(np.int64) + + # check that the gaussian has in-bounds part + left, top = (mu - radius).astype(np.int64) + right, bottom = (mu + radius + 1).astype(np.int64) + + if left >= W or top >= H or right < 0 or bottom < 0: + keypoint_weights[n, k] = 0 + continue + + # The gaussian is not normalized, + # we want the center value to equal 1 + gaussian = np.exp(-((x - x0)**2 + (y - y0)**2) / (2 * sigma[n]**2)) + + # valid range in gaussian + g_x1 = max(0, -left) + g_x2 = min(W, right) - left + g_y1 = max(0, -top) + g_y2 = min(H, bottom) - top + + # valid range in heatmap + h_x1 = max(0, left) + h_x2 = min(W, right) + h_y1 = max(0, top) + h_y2 = min(H, bottom) + + heatmap_region = heatmaps[k, h_y1:h_y2, h_x1:h_x2] + gaussian_regsion = gaussian[g_y1:g_y2, g_x1:g_x2] + + _ = np.maximum( + heatmap_region, gaussian_regsion, out=heatmap_region) + + return heatmaps, keypoint_weights + + +def generate_unbiased_gaussian_heatmaps( + heatmap_size: Tuple[int, int], + keypoints: np.ndarray, + keypoints_visible: np.ndarray, + sigma: float, +) -> Tuple[np.ndarray, np.ndarray]: + """Generate gaussian heatmaps of keypoints using `Dark Pose`_. + + Args: + heatmap_size (Tuple[int, int]): Heatmap size in [W, H] + keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) + keypoints_visible (np.ndarray): Keypoint visibilities in shape + (N, K) + + Returns: + tuple: + - heatmaps (np.ndarray): The generated heatmap in shape + (K, H, W) where [W, H] is the `heatmap_size` + - keypoint_weights (np.ndarray): The target weights in shape + (N, K) + + .. _`Dark Pose`: https://arxiv.org/abs/1910.06278 + """ + + N, K, _ = keypoints.shape + W, H = heatmap_size + + heatmaps = np.zeros((K, H, W), dtype=np.float32) + keypoint_weights = keypoints_visible.copy() + + # 3-sigma rule + radius = sigma * 3 + + # xy grid + x = np.arange(0, W, 1, dtype=np.float32) + y = np.arange(0, H, 1, dtype=np.float32)[:, None] + + for n, k in product(range(N), range(K)): + # skip unlabled keypoints + if keypoints_visible[n, k] < 0.5: + continue + + mu = keypoints[n, k] + # check that the gaussian has in-bounds part + left, top = mu - radius + right, bottom = mu + radius + 1 + + if left >= W or top >= H or right < 0 or bottom < 0: + keypoint_weights[n, k] = 0 + continue + + gaussian = np.exp(-((x - mu[0])**2 + (y - mu[1])**2) / (2 * sigma**2)) + + _ = np.maximum(gaussian, heatmaps[k], out=heatmaps[k]) + + return heatmaps, keypoint_weights + + +def generate_udp_gaussian_heatmaps( + heatmap_size: Tuple[int, int], + keypoints: np.ndarray, + keypoints_visible: np.ndarray, + sigma: float, +) -> Tuple[np.ndarray, np.ndarray]: + """Generate gaussian heatmaps of keypoints using `UDP`_. + + Args: + heatmap_size (Tuple[int, int]): Heatmap size in [W, H] + keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) + keypoints_visible (np.ndarray): Keypoint visibilities in shape + (N, K) + sigma (float): The sigma value of the Gaussian heatmap + + Returns: + tuple: + - heatmaps (np.ndarray): The generated heatmap in shape + (K, H, W) where [W, H] is the `heatmap_size` + - keypoint_weights (np.ndarray): The target weights in shape + (N, K) + + .. _`UDP`: https://arxiv.org/abs/1911.07524 + """ + + N, K, _ = keypoints.shape + W, H = heatmap_size + + heatmaps = np.zeros((K, H, W), dtype=np.float32) + keypoint_weights = keypoints_visible.copy() + + # 3-sigma rule + radius = sigma * 3 + + # xy grid + gaussian_size = 2 * radius + 1 + x = np.arange(0, gaussian_size, 1, dtype=np.float32) + y = x[:, None] + + for n, k in product(range(N), range(K)): + # skip unlabled keypoints + if keypoints_visible[n, k] < 0.5: + continue + + mu = (keypoints[n, k] + 0.5).astype(np.int64) + # check that the gaussian has in-bounds part + left, top = (mu - radius).astype(np.int64) + right, bottom = (mu + radius + 1).astype(np.int64) + + if left >= W or top >= H or right < 0 or bottom < 0: + keypoint_weights[n, k] = 0 + continue + + mu_ac = keypoints[n, k] + x0 = y0 = gaussian_size // 2 + x0 += mu_ac[0] - mu[0] + y0 += mu_ac[1] - mu[1] + gaussian = np.exp(-((x - x0)**2 + (y - y0)**2) / (2 * sigma**2)) + + # valid range in gaussian + g_x1 = max(0, -left) + g_x2 = min(W, right) - left + g_y1 = max(0, -top) + g_y2 = min(H, bottom) - top + + # valid range in heatmap + h_x1 = max(0, left) + h_x2 = min(W, right) + h_y1 = max(0, top) + h_y2 = min(H, bottom) + + heatmap_region = heatmaps[k, h_y1:h_y2, h_x1:h_x2] + gaussian_regsion = gaussian[g_y1:g_y2, g_x1:g_x2] + + _ = np.maximum(heatmap_region, gaussian_regsion, out=heatmap_region) + + return heatmaps, keypoint_weights diff --git a/mmpose/codecs/utils/instance_property.py b/mmpose/codecs/utils/instance_property.py index 15ae30aef0..b592297fff 100644 --- a/mmpose/codecs/utils/instance_property.py +++ b/mmpose/codecs/utils/instance_property.py @@ -1,111 +1,111 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import Optional - -import numpy as np - - -def get_instance_root(keypoints: np.ndarray, - keypoints_visible: Optional[np.ndarray] = None, - root_type: str = 'kpt_center') -> np.ndarray: - """Calculate the coordinates and visibility of instance roots. - - Args: - keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) - keypoints_visible (np.ndarray): Keypoint visibilities in shape - (N, K) - root_type (str): Calculation of instance roots which should - be one of the following options: - - - ``'kpt_center'``: The roots' coordinates are the mean - coordinates of visible keypoints - - ``'bbox_center'``: The roots' are the center of bounding - boxes outlined by visible keypoints - - Defaults to ``'kpt_center'`` - - Returns: - tuple - - roots_coordinate(np.ndarray): Coordinates of instance roots in - shape [N, D] - - roots_visible(np.ndarray): Visibility of instance roots in - shape [N] - """ - - roots_coordinate = np.zeros((keypoints.shape[0], 2), dtype=np.float32) - roots_visible = np.ones((keypoints.shape[0]), dtype=np.float32) * 2 - - for i in range(keypoints.shape[0]): - - # collect visible keypoints - if keypoints_visible is not None: - visible_keypoints = keypoints[i][keypoints_visible[i] > 0] - else: - visible_keypoints = keypoints[i] - if visible_keypoints.size == 0: - roots_visible[i] = 0 - continue - - # compute the instance root with visible keypoints - if root_type == 'kpt_center': - roots_coordinate[i] = visible_keypoints.mean(axis=0) - roots_visible[i] = 1 - elif root_type == 'bbox_center': - roots_coordinate[i] = (visible_keypoints.max(axis=0) + - visible_keypoints.min(axis=0)) / 2.0 - roots_visible[i] = 1 - else: - raise ValueError( - f'the value of `root_type` must be \'kpt_center\' or ' - f'\'bbox_center\', but got \'{root_type}\'') - - return roots_coordinate, roots_visible - - -def get_instance_bbox(keypoints: np.ndarray, - keypoints_visible: Optional[np.ndarray] = None - ) -> np.ndarray: - """Calculate the pseudo instance bounding box from visible keypoints. The - bounding boxes are in the xyxy format. - - Args: - keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) - keypoints_visible (np.ndarray): Keypoint visibilities in shape - (N, K) - - Returns: - np.ndarray: bounding boxes in [N, 4] - """ - bbox = np.zeros((keypoints.shape[0], 4), dtype=np.float32) - for i in range(keypoints.shape[0]): - if keypoints_visible is not None: - visible_keypoints = keypoints[i][keypoints_visible[i] > 0] - else: - visible_keypoints = keypoints[i] - if visible_keypoints.size == 0: - continue - - bbox[i, :2] = visible_keypoints.min(axis=0) - bbox[i, 2:] = visible_keypoints.max(axis=0) - return bbox - - -def get_diagonal_lengths(keypoints: np.ndarray, - keypoints_visible: Optional[np.ndarray] = None - ) -> np.ndarray: - """Calculate the diagonal length of instance bounding box from visible - keypoints. - - Args: - keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) - keypoints_visible (np.ndarray): Keypoint visibilities in shape - (N, K) - - Returns: - np.ndarray: bounding box diagonal length in [N] - """ - pseudo_bbox = get_instance_bbox(keypoints, keypoints_visible) - pseudo_bbox = pseudo_bbox.reshape(-1, 2, 2) - h_w_diff = pseudo_bbox[:, 1] - pseudo_bbox[:, 0] - diagonal_length = np.sqrt(np.power(h_w_diff, 2).sum(axis=1)) - - return diagonal_length +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional + +import numpy as np + + +def get_instance_root(keypoints: np.ndarray, + keypoints_visible: Optional[np.ndarray] = None, + root_type: str = 'kpt_center') -> np.ndarray: + """Calculate the coordinates and visibility of instance roots. + + Args: + keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) + keypoints_visible (np.ndarray): Keypoint visibilities in shape + (N, K) + root_type (str): Calculation of instance roots which should + be one of the following options: + + - ``'kpt_center'``: The roots' coordinates are the mean + coordinates of visible keypoints + - ``'bbox_center'``: The roots' are the center of bounding + boxes outlined by visible keypoints + + Defaults to ``'kpt_center'`` + + Returns: + tuple + - roots_coordinate(np.ndarray): Coordinates of instance roots in + shape [N, D] + - roots_visible(np.ndarray): Visibility of instance roots in + shape [N] + """ + + roots_coordinate = np.zeros((keypoints.shape[0], 2), dtype=np.float32) + roots_visible = np.ones((keypoints.shape[0]), dtype=np.float32) * 2 + + for i in range(keypoints.shape[0]): + + # collect visible keypoints + if keypoints_visible is not None: + visible_keypoints = keypoints[i][keypoints_visible[i] > 0] + else: + visible_keypoints = keypoints[i] + if visible_keypoints.size == 0: + roots_visible[i] = 0 + continue + + # compute the instance root with visible keypoints + if root_type == 'kpt_center': + roots_coordinate[i] = visible_keypoints.mean(axis=0) + roots_visible[i] = 1 + elif root_type == 'bbox_center': + roots_coordinate[i] = (visible_keypoints.max(axis=0) + + visible_keypoints.min(axis=0)) / 2.0 + roots_visible[i] = 1 + else: + raise ValueError( + f'the value of `root_type` must be \'kpt_center\' or ' + f'\'bbox_center\', but got \'{root_type}\'') + + return roots_coordinate, roots_visible + + +def get_instance_bbox(keypoints: np.ndarray, + keypoints_visible: Optional[np.ndarray] = None + ) -> np.ndarray: + """Calculate the pseudo instance bounding box from visible keypoints. The + bounding boxes are in the xyxy format. + + Args: + keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) + keypoints_visible (np.ndarray): Keypoint visibilities in shape + (N, K) + + Returns: + np.ndarray: bounding boxes in [N, 4] + """ + bbox = np.zeros((keypoints.shape[0], 4), dtype=np.float32) + for i in range(keypoints.shape[0]): + if keypoints_visible is not None: + visible_keypoints = keypoints[i][keypoints_visible[i] > 0] + else: + visible_keypoints = keypoints[i] + if visible_keypoints.size == 0: + continue + + bbox[i, :2] = visible_keypoints.min(axis=0) + bbox[i, 2:] = visible_keypoints.max(axis=0) + return bbox + + +def get_diagonal_lengths(keypoints: np.ndarray, + keypoints_visible: Optional[np.ndarray] = None + ) -> np.ndarray: + """Calculate the diagonal length of instance bounding box from visible + keypoints. + + Args: + keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) + keypoints_visible (np.ndarray): Keypoint visibilities in shape + (N, K) + + Returns: + np.ndarray: bounding box diagonal length in [N] + """ + pseudo_bbox = get_instance_bbox(keypoints, keypoints_visible) + pseudo_bbox = pseudo_bbox.reshape(-1, 2, 2) + h_w_diff = pseudo_bbox[:, 1] - pseudo_bbox[:, 0] + diagonal_length = np.sqrt(np.power(h_w_diff, 2).sum(axis=1)) + + return diagonal_length diff --git a/mmpose/codecs/utils/offset_heatmap.py b/mmpose/codecs/utils/offset_heatmap.py index c3c1c32ed3..94a6cd511d 100644 --- a/mmpose/codecs/utils/offset_heatmap.py +++ b/mmpose/codecs/utils/offset_heatmap.py @@ -1,143 +1,143 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from itertools import product -from typing import Tuple - -import numpy as np - - -def generate_offset_heatmap( - heatmap_size: Tuple[int, int], - keypoints: np.ndarray, - keypoints_visible: np.ndarray, - radius_factor: float, -) -> Tuple[np.ndarray, np.ndarray]: - """Generate offset heatmaps of keypoints, where each keypoint is - represented by 3 maps: one pixel-level class label map (1 for keypoint and - 0 for non-keypoint) and 2 pixel-level offset maps for x and y directions - respectively. - - Args: - heatmap_size (Tuple[int, int]): Heatmap size in [W, H] - keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) - keypoints_visible (np.ndarray): Keypoint visibilities in shape - (N, K) - radius_factor (float): The radius factor of the binary label - map. The positive region is defined as the neighbor of the - keypoint with the radius :math:`r=radius_factor*max(W, H)` - - Returns: - tuple: - - heatmap (np.ndarray): The generated heatmap in shape - (K*3, H, W) where [W, H] is the `heatmap_size` - - keypoint_weights (np.ndarray): The target weights in shape - (K,) - """ - - N, K, _ = keypoints.shape - W, H = heatmap_size - - heatmaps = np.zeros((K, 3, H, W), dtype=np.float32) - keypoint_weights = keypoints_visible.copy() - - # xy grid - x = np.arange(0, W, 1) - y = np.arange(0, H, 1)[:, None] - - # positive area radius in the classification map - radius = radius_factor * max(W, H) - - for n, k in product(range(N), range(K)): - if keypoints_visible[n, k] < 0.5: - continue - - mu = keypoints[n, k] - - x_offset = (mu[0] - x) / radius - y_offset = (mu[1] - y) / radius - - heatmaps[k, 0] = np.where(x_offset**2 + y_offset**2 <= 1, 1., 0.) - heatmaps[k, 1] = x_offset - heatmaps[k, 2] = y_offset - - heatmaps = heatmaps.reshape(K * 3, H, W) - - return heatmaps, keypoint_weights - - -def generate_displacement_heatmap( - heatmap_size: Tuple[int, int], - keypoints: np.ndarray, - keypoints_visible: np.ndarray, - roots: np.ndarray, - roots_visible: np.ndarray, - diagonal_lengths: np.ndarray, - radius: float, -): - """Generate displacement heatmaps of keypoints, where each keypoint is - represented by 3 maps: one pixel-level class label map (1 for keypoint and - 0 for non-keypoint) and 2 pixel-level offset maps for x and y directions - respectively. - - Args: - heatmap_size (Tuple[int, int]): Heatmap size in [W, H] - keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) - keypoints_visible (np.ndarray): Keypoint visibilities in shape - (N, K) - roots (np.ndarray): Coordinates of instance centers in shape (N, D). - The displacement fields of each instance will locate around its - center. - roots_visible (np.ndarray): Roots visibilities in shape (N,) - diagonal_lengths (np.ndarray): Diaginal length of the bounding boxes - of each instance in shape (N,) - radius (float): The radius factor of the binary label - map. The positive region is defined as the neighbor of the - keypoint with the radius :math:`r=radius_factor*max(W, H)` - - Returns: - tuple: - - displacements (np.ndarray): The generated displacement map in - shape (K*2, H, W) where [W, H] is the `heatmap_size` - - displacement_weights (np.ndarray): The target weights in shape - (K*2, H, W) - """ - N, K, _ = keypoints.shape - W, H = heatmap_size - - displacements = np.zeros((K * 2, H, W), dtype=np.float32) - displacement_weights = np.zeros((K * 2, H, W), dtype=np.float32) - instance_size_map = np.zeros((H, W), dtype=np.float32) - - for n in range(N): - if (roots_visible[n] < 1 or (roots[n, 0] < 0 or roots[n, 1] < 0) - or (roots[n, 0] >= W or roots[n, 1] >= H)): - continue - - diagonal_length = diagonal_lengths[n] - - for k in range(K): - if keypoints_visible[n, k] < 1 or keypoints[n, k, 0] < 0 \ - or keypoints[n, k, 1] < 0 or keypoints[n, k, 0] >= W \ - or keypoints[n, k, 1] >= H: - continue - - start_x = max(int(roots[n, 0] - radius), 0) - start_y = max(int(roots[n, 1] - radius), 0) - end_x = min(int(roots[n, 0] + radius), W) - end_y = min(int(roots[n, 1] + radius), H) - - for x in range(start_x, end_x): - for y in range(start_y, end_y): - if displacements[2 * k, y, - x] != 0 or displacements[2 * k + 1, y, - x] != 0: - if diagonal_length > instance_size_map[y, x]: - # keep the gt displacement of smaller instance - continue - - displacement_weights[2 * k:2 * k + 2, y, - x] = 1 / diagonal_length - displacements[2 * k:2 * k + 2, y, - x] = keypoints[n, k] - [x, y] - instance_size_map[y, x] = diagonal_length - - return displacements, displacement_weights +# Copyright (c) OpenMMLab. All rights reserved. +from itertools import product +from typing import Tuple + +import numpy as np + + +def generate_offset_heatmap( + heatmap_size: Tuple[int, int], + keypoints: np.ndarray, + keypoints_visible: np.ndarray, + radius_factor: float, +) -> Tuple[np.ndarray, np.ndarray]: + """Generate offset heatmaps of keypoints, where each keypoint is + represented by 3 maps: one pixel-level class label map (1 for keypoint and + 0 for non-keypoint) and 2 pixel-level offset maps for x and y directions + respectively. + + Args: + heatmap_size (Tuple[int, int]): Heatmap size in [W, H] + keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) + keypoints_visible (np.ndarray): Keypoint visibilities in shape + (N, K) + radius_factor (float): The radius factor of the binary label + map. The positive region is defined as the neighbor of the + keypoint with the radius :math:`r=radius_factor*max(W, H)` + + Returns: + tuple: + - heatmap (np.ndarray): The generated heatmap in shape + (K*3, H, W) where [W, H] is the `heatmap_size` + - keypoint_weights (np.ndarray): The target weights in shape + (K,) + """ + + N, K, _ = keypoints.shape + W, H = heatmap_size + + heatmaps = np.zeros((K, 3, H, W), dtype=np.float32) + keypoint_weights = keypoints_visible.copy() + + # xy grid + x = np.arange(0, W, 1) + y = np.arange(0, H, 1)[:, None] + + # positive area radius in the classification map + radius = radius_factor * max(W, H) + + for n, k in product(range(N), range(K)): + if keypoints_visible[n, k] < 0.5: + continue + + mu = keypoints[n, k] + + x_offset = (mu[0] - x) / radius + y_offset = (mu[1] - y) / radius + + heatmaps[k, 0] = np.where(x_offset**2 + y_offset**2 <= 1, 1., 0.) + heatmaps[k, 1] = x_offset + heatmaps[k, 2] = y_offset + + heatmaps = heatmaps.reshape(K * 3, H, W) + + return heatmaps, keypoint_weights + + +def generate_displacement_heatmap( + heatmap_size: Tuple[int, int], + keypoints: np.ndarray, + keypoints_visible: np.ndarray, + roots: np.ndarray, + roots_visible: np.ndarray, + diagonal_lengths: np.ndarray, + radius: float, +): + """Generate displacement heatmaps of keypoints, where each keypoint is + represented by 3 maps: one pixel-level class label map (1 for keypoint and + 0 for non-keypoint) and 2 pixel-level offset maps for x and y directions + respectively. + + Args: + heatmap_size (Tuple[int, int]): Heatmap size in [W, H] + keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) + keypoints_visible (np.ndarray): Keypoint visibilities in shape + (N, K) + roots (np.ndarray): Coordinates of instance centers in shape (N, D). + The displacement fields of each instance will locate around its + center. + roots_visible (np.ndarray): Roots visibilities in shape (N,) + diagonal_lengths (np.ndarray): Diaginal length of the bounding boxes + of each instance in shape (N,) + radius (float): The radius factor of the binary label + map. The positive region is defined as the neighbor of the + keypoint with the radius :math:`r=radius_factor*max(W, H)` + + Returns: + tuple: + - displacements (np.ndarray): The generated displacement map in + shape (K*2, H, W) where [W, H] is the `heatmap_size` + - displacement_weights (np.ndarray): The target weights in shape + (K*2, H, W) + """ + N, K, _ = keypoints.shape + W, H = heatmap_size + + displacements = np.zeros((K * 2, H, W), dtype=np.float32) + displacement_weights = np.zeros((K * 2, H, W), dtype=np.float32) + instance_size_map = np.zeros((H, W), dtype=np.float32) + + for n in range(N): + if (roots_visible[n] < 1 or (roots[n, 0] < 0 or roots[n, 1] < 0) + or (roots[n, 0] >= W or roots[n, 1] >= H)): + continue + + diagonal_length = diagonal_lengths[n] + + for k in range(K): + if keypoints_visible[n, k] < 1 or keypoints[n, k, 0] < 0 \ + or keypoints[n, k, 1] < 0 or keypoints[n, k, 0] >= W \ + or keypoints[n, k, 1] >= H: + continue + + start_x = max(int(roots[n, 0] - radius), 0) + start_y = max(int(roots[n, 1] - radius), 0) + end_x = min(int(roots[n, 0] + radius), W) + end_y = min(int(roots[n, 1] + radius), H) + + for x in range(start_x, end_x): + for y in range(start_y, end_y): + if displacements[2 * k, y, + x] != 0 or displacements[2 * k + 1, y, + x] != 0: + if diagonal_length > instance_size_map[y, x]: + # keep the gt displacement of smaller instance + continue + + displacement_weights[2 * k:2 * k + 2, y, + x] = 1 / diagonal_length + displacements[2 * k:2 * k + 2, y, + x] = keypoints[n, k] - [x, y] + instance_size_map[y, x] = diagonal_length + + return displacements, displacement_weights diff --git a/mmpose/codecs/utils/post_processing.py b/mmpose/codecs/utils/post_processing.py index 75356388dc..36990d1241 100644 --- a/mmpose/codecs/utils/post_processing.py +++ b/mmpose/codecs/utils/post_processing.py @@ -1,227 +1,227 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from itertools import product -from typing import Tuple - -import cv2 -import numpy as np -import torch -import torch.nn.functional as F -from torch import Tensor - - -def get_simcc_normalized(batch_pred_simcc, sigma=None): - """Normalize the predicted SimCC. - - Args: - batch_pred_simcc (torch.Tensor): The predicted SimCC. - sigma (float): The sigma of the Gaussian distribution. - - Returns: - torch.Tensor: The normalized SimCC. - """ - B, K, _ = batch_pred_simcc.shape - - # Scale and clamp the tensor - if sigma is not None: - batch_pred_simcc = batch_pred_simcc / (sigma * np.sqrt(np.pi * 2)) - batch_pred_simcc = batch_pred_simcc.clamp(min=0) - - # Compute the binary mask - mask = (batch_pred_simcc.amax(dim=-1) > 1).reshape(B, K, 1) - - # Normalize the tensor using the maximum value - norm = (batch_pred_simcc / batch_pred_simcc.amax(dim=-1).reshape(B, K, 1)) - - # Apply normalization - batch_pred_simcc = torch.where(mask, norm, batch_pred_simcc) - - return batch_pred_simcc - - -def get_simcc_maximum(simcc_x: np.ndarray, - simcc_y: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: - """Get maximum response location and value from simcc representations. - - Note: - instance number: N - num_keypoints: K - heatmap height: H - heatmap width: W - - Args: - simcc_x (np.ndarray): x-axis SimCC in shape (K, Wx) or (N, K, Wx) - simcc_y (np.ndarray): y-axis SimCC in shape (K, Wy) or (N, K, Wy) - - Returns: - tuple: - - locs (np.ndarray): locations of maximum heatmap responses in shape - (K, 2) or (N, K, 2) - - vals (np.ndarray): values of maximum heatmap responses in shape - (K,) or (N, K) - """ - - assert isinstance(simcc_x, np.ndarray), ('simcc_x should be numpy.ndarray') - assert isinstance(simcc_y, np.ndarray), ('simcc_y should be numpy.ndarray') - assert simcc_x.ndim == 2 or simcc_x.ndim == 3, ( - f'Invalid shape {simcc_x.shape}') - assert simcc_y.ndim == 2 or simcc_y.ndim == 3, ( - f'Invalid shape {simcc_y.shape}') - assert simcc_x.ndim == simcc_y.ndim, ( - f'{simcc_x.shape} != {simcc_y.shape}') - - if simcc_x.ndim == 3: - N, K, Wx = simcc_x.shape - simcc_x = simcc_x.reshape(N * K, -1) - simcc_y = simcc_y.reshape(N * K, -1) - else: - N = None - - x_locs = np.argmax(simcc_x, axis=1) - y_locs = np.argmax(simcc_y, axis=1) - locs = np.stack((x_locs, y_locs), axis=-1).astype(np.float32) - max_val_x = np.amax(simcc_x, axis=1) - max_val_y = np.amax(simcc_y, axis=1) - - mask = max_val_x > max_val_y - max_val_x[mask] = max_val_y[mask] - vals = max_val_x - locs[vals <= 0.] = -1 - - if N: - locs = locs.reshape(N, K, 2) - vals = vals.reshape(N, K) - - return locs, vals - - -def get_heatmap_maximum(heatmaps: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: - """Get maximum response location and value from heatmaps. - - Note: - batch_size: B - num_keypoints: K - heatmap height: H - heatmap width: W - - Args: - heatmaps (np.ndarray): Heatmaps in shape (K, H, W) or (B, K, H, W) - - Returns: - tuple: - - locs (np.ndarray): locations of maximum heatmap responses in shape - (K, 2) or (B, K, 2) - - vals (np.ndarray): values of maximum heatmap responses in shape - (K,) or (B, K) - """ - assert isinstance(heatmaps, - np.ndarray), ('heatmaps should be numpy.ndarray') - assert heatmaps.ndim == 3 or heatmaps.ndim == 4, ( - f'Invalid shape {heatmaps.shape}') - - if heatmaps.ndim == 3: - K, H, W = heatmaps.shape - B = None - heatmaps_flatten = heatmaps.reshape(K, -1) - else: - B, K, H, W = heatmaps.shape - heatmaps_flatten = heatmaps.reshape(B * K, -1) - - y_locs, x_locs = np.unravel_index( - np.argmax(heatmaps_flatten, axis=1), shape=(H, W)) - locs = np.stack((x_locs, y_locs), axis=-1).astype(np.float32) - vals = np.amax(heatmaps_flatten, axis=1) - locs[vals <= 0.] = -1 - - if B: - locs = locs.reshape(B, K, 2) - vals = vals.reshape(B, K) - - return locs, vals - - -def gaussian_blur(heatmaps: np.ndarray, kernel: int = 11) -> np.ndarray: - """Modulate heatmap distribution with Gaussian. - - Note: - - num_keypoints: K - - heatmap height: H - - heatmap width: W - - Args: - heatmaps (np.ndarray[K, H, W]): model predicted heatmaps. - kernel (int): Gaussian kernel size (K) for modulation, which should - match the heatmap gaussian sigma when training. - K=17 for sigma=3 and k=11 for sigma=2. - - Returns: - np.ndarray ([K, H, W]): Modulated heatmap distribution. - """ - assert kernel % 2 == 1 - - border = (kernel - 1) // 2 - K, H, W = heatmaps.shape - - for k in range(K): - origin_max = np.max(heatmaps[k]) - dr = np.zeros((H + 2 * border, W + 2 * border), dtype=np.float32) - dr[border:-border, border:-border] = heatmaps[k].copy() - dr = cv2.GaussianBlur(dr, (kernel, kernel), 0) - heatmaps[k] = dr[border:-border, border:-border].copy() - heatmaps[k] *= origin_max / np.max(heatmaps[k]) - return heatmaps - - -def gaussian_blur1d(simcc: np.ndarray, kernel: int = 11) -> np.ndarray: - """Modulate simcc distribution with Gaussian. - - Note: - - num_keypoints: K - - simcc length: Wx - - Args: - simcc (np.ndarray[K, Wx]): model predicted simcc. - kernel (int): Gaussian kernel size (K) for modulation, which should - match the simcc gaussian sigma when training. - K=17 for sigma=3 and k=11 for sigma=2. - - Returns: - np.ndarray ([K, Wx]): Modulated simcc distribution. - """ - assert kernel % 2 == 1 - - border = (kernel - 1) // 2 - N, K, Wx = simcc.shape - - for n, k in product(range(N), range(K)): - origin_max = np.max(simcc[n, k]) - dr = np.zeros((1, Wx + 2 * border), dtype=np.float32) - dr[0, border:-border] = simcc[n, k].copy() - dr = cv2.GaussianBlur(dr, (kernel, 1), 0) - simcc[n, k] = dr[0, border:-border].copy() - simcc[n, k] *= origin_max / np.max(simcc[n, k]) - return simcc - - -def batch_heatmap_nms(batch_heatmaps: Tensor, kernel_size: int = 5): - """Apply NMS on a batch of heatmaps. - - Args: - batch_heatmaps (Tensor): batch heatmaps in shape (B, K, H, W) - kernel_size (int): The kernel size of the NMS which should be - a odd integer. Defaults to 5 - - Returns: - Tensor: The batch heatmaps after NMS. - """ - - assert isinstance(kernel_size, int) and kernel_size % 2 == 1, \ - f'The kernel_size should be an odd integer, got {kernel_size}' - - padding = (kernel_size - 1) // 2 - - maximum = F.max_pool2d( - batch_heatmaps, kernel_size, stride=1, padding=padding) - maximum_indicator = torch.eq(batch_heatmaps, maximum) - batch_heatmaps = batch_heatmaps * maximum_indicator.float() - - return batch_heatmaps +# Copyright (c) OpenMMLab. All rights reserved. +from itertools import product +from typing import Tuple + +import cv2 +import numpy as np +import torch +import torch.nn.functional as F +from torch import Tensor + + +def get_simcc_normalized(batch_pred_simcc, sigma=None): + """Normalize the predicted SimCC. + + Args: + batch_pred_simcc (torch.Tensor): The predicted SimCC. + sigma (float): The sigma of the Gaussian distribution. + + Returns: + torch.Tensor: The normalized SimCC. + """ + B, K, _ = batch_pred_simcc.shape + + # Scale and clamp the tensor + if sigma is not None: + batch_pred_simcc = batch_pred_simcc / (sigma * np.sqrt(np.pi * 2)) + batch_pred_simcc = batch_pred_simcc.clamp(min=0) + + # Compute the binary mask + mask = (batch_pred_simcc.amax(dim=-1) > 1).reshape(B, K, 1) + + # Normalize the tensor using the maximum value + norm = (batch_pred_simcc / batch_pred_simcc.amax(dim=-1).reshape(B, K, 1)) + + # Apply normalization + batch_pred_simcc = torch.where(mask, norm, batch_pred_simcc) + + return batch_pred_simcc + + +def get_simcc_maximum(simcc_x: np.ndarray, + simcc_y: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + """Get maximum response location and value from simcc representations. + + Note: + instance number: N + num_keypoints: K + heatmap height: H + heatmap width: W + + Args: + simcc_x (np.ndarray): x-axis SimCC in shape (K, Wx) or (N, K, Wx) + simcc_y (np.ndarray): y-axis SimCC in shape (K, Wy) or (N, K, Wy) + + Returns: + tuple: + - locs (np.ndarray): locations of maximum heatmap responses in shape + (K, 2) or (N, K, 2) + - vals (np.ndarray): values of maximum heatmap responses in shape + (K,) or (N, K) + """ + + assert isinstance(simcc_x, np.ndarray), ('simcc_x should be numpy.ndarray') + assert isinstance(simcc_y, np.ndarray), ('simcc_y should be numpy.ndarray') + assert simcc_x.ndim == 2 or simcc_x.ndim == 3, ( + f'Invalid shape {simcc_x.shape}') + assert simcc_y.ndim == 2 or simcc_y.ndim == 3, ( + f'Invalid shape {simcc_y.shape}') + assert simcc_x.ndim == simcc_y.ndim, ( + f'{simcc_x.shape} != {simcc_y.shape}') + + if simcc_x.ndim == 3: + N, K, Wx = simcc_x.shape + simcc_x = simcc_x.reshape(N * K, -1) + simcc_y = simcc_y.reshape(N * K, -1) + else: + N = None + + x_locs = np.argmax(simcc_x, axis=1) + y_locs = np.argmax(simcc_y, axis=1) + locs = np.stack((x_locs, y_locs), axis=-1).astype(np.float32) + max_val_x = np.amax(simcc_x, axis=1) + max_val_y = np.amax(simcc_y, axis=1) + + mask = max_val_x > max_val_y + max_val_x[mask] = max_val_y[mask] + vals = max_val_x + locs[vals <= 0.] = -1 + + if N: + locs = locs.reshape(N, K, 2) + vals = vals.reshape(N, K) + + return locs, vals + + +def get_heatmap_maximum(heatmaps: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + """Get maximum response location and value from heatmaps. + + Note: + batch_size: B + num_keypoints: K + heatmap height: H + heatmap width: W + + Args: + heatmaps (np.ndarray): Heatmaps in shape (K, H, W) or (B, K, H, W) + + Returns: + tuple: + - locs (np.ndarray): locations of maximum heatmap responses in shape + (K, 2) or (B, K, 2) + - vals (np.ndarray): values of maximum heatmap responses in shape + (K,) or (B, K) + """ + assert isinstance(heatmaps, + np.ndarray), ('heatmaps should be numpy.ndarray') + assert heatmaps.ndim == 3 or heatmaps.ndim == 4, ( + f'Invalid shape {heatmaps.shape}') + + if heatmaps.ndim == 3: + K, H, W = heatmaps.shape + B = None + heatmaps_flatten = heatmaps.reshape(K, -1) + else: + B, K, H, W = heatmaps.shape + heatmaps_flatten = heatmaps.reshape(B * K, -1) + + y_locs, x_locs = np.unravel_index( + np.argmax(heatmaps_flatten, axis=1), shape=(H, W)) + locs = np.stack((x_locs, y_locs), axis=-1).astype(np.float32) + vals = np.amax(heatmaps_flatten, axis=1) + locs[vals <= 0.] = -1 + + if B: + locs = locs.reshape(B, K, 2) + vals = vals.reshape(B, K) + + return locs, vals + + +def gaussian_blur(heatmaps: np.ndarray, kernel: int = 11) -> np.ndarray: + """Modulate heatmap distribution with Gaussian. + + Note: + - num_keypoints: K + - heatmap height: H + - heatmap width: W + + Args: + heatmaps (np.ndarray[K, H, W]): model predicted heatmaps. + kernel (int): Gaussian kernel size (K) for modulation, which should + match the heatmap gaussian sigma when training. + K=17 for sigma=3 and k=11 for sigma=2. + + Returns: + np.ndarray ([K, H, W]): Modulated heatmap distribution. + """ + assert kernel % 2 == 1 + + border = (kernel - 1) // 2 + K, H, W = heatmaps.shape + + for k in range(K): + origin_max = np.max(heatmaps[k]) + dr = np.zeros((H + 2 * border, W + 2 * border), dtype=np.float32) + dr[border:-border, border:-border] = heatmaps[k].copy() + dr = cv2.GaussianBlur(dr, (kernel, kernel), 0) + heatmaps[k] = dr[border:-border, border:-border].copy() + heatmaps[k] *= origin_max / np.max(heatmaps[k]) + return heatmaps + + +def gaussian_blur1d(simcc: np.ndarray, kernel: int = 11) -> np.ndarray: + """Modulate simcc distribution with Gaussian. + + Note: + - num_keypoints: K + - simcc length: Wx + + Args: + simcc (np.ndarray[K, Wx]): model predicted simcc. + kernel (int): Gaussian kernel size (K) for modulation, which should + match the simcc gaussian sigma when training. + K=17 for sigma=3 and k=11 for sigma=2. + + Returns: + np.ndarray ([K, Wx]): Modulated simcc distribution. + """ + assert kernel % 2 == 1 + + border = (kernel - 1) // 2 + N, K, Wx = simcc.shape + + for n, k in product(range(N), range(K)): + origin_max = np.max(simcc[n, k]) + dr = np.zeros((1, Wx + 2 * border), dtype=np.float32) + dr[0, border:-border] = simcc[n, k].copy() + dr = cv2.GaussianBlur(dr, (kernel, 1), 0) + simcc[n, k] = dr[0, border:-border].copy() + simcc[n, k] *= origin_max / np.max(simcc[n, k]) + return simcc + + +def batch_heatmap_nms(batch_heatmaps: Tensor, kernel_size: int = 5): + """Apply NMS on a batch of heatmaps. + + Args: + batch_heatmaps (Tensor): batch heatmaps in shape (B, K, H, W) + kernel_size (int): The kernel size of the NMS which should be + a odd integer. Defaults to 5 + + Returns: + Tensor: The batch heatmaps after NMS. + """ + + assert isinstance(kernel_size, int) and kernel_size % 2 == 1, \ + f'The kernel_size should be an odd integer, got {kernel_size}' + + padding = (kernel_size - 1) // 2 + + maximum = F.max_pool2d( + batch_heatmaps, kernel_size, stride=1, padding=padding) + maximum_indicator = torch.eq(batch_heatmaps, maximum) + batch_heatmaps = batch_heatmaps * maximum_indicator.float() + + return batch_heatmaps diff --git a/mmpose/codecs/utils/refinement.py b/mmpose/codecs/utils/refinement.py index 3495f37d0a..2dd94feb71 100644 --- a/mmpose/codecs/utils/refinement.py +++ b/mmpose/codecs/utils/refinement.py @@ -1,215 +1,215 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from itertools import product - -import numpy as np - -from .post_processing import gaussian_blur, gaussian_blur1d - - -def refine_keypoints(keypoints: np.ndarray, - heatmaps: np.ndarray) -> np.ndarray: - """Refine keypoint predictions by moving from the maximum towards the - second maximum by 0.25 pixel. The operation is in-place. - - Note: - - - instance number: N - - keypoint number: K - - keypoint dimension: D - - heatmap size: [W, H] - - Args: - keypoints (np.ndarray): The keypoint coordinates in shape (N, K, D) - heatmaps (np.ndarray): The heatmaps in shape (K, H, W) - - Returns: - np.ndarray: Refine keypoint coordinates in shape (N, K, D) - """ - N, K = keypoints.shape[:2] - H, W = heatmaps.shape[1:] - - for n, k in product(range(N), range(K)): - x, y = keypoints[n, k, :2].astype(int) - - if 1 < x < W - 1 and 0 < y < H: - dx = heatmaps[k, y, x + 1] - heatmaps[k, y, x - 1] - else: - dx = 0. - - if 1 < y < H - 1 and 0 < x < W: - dy = heatmaps[k, y + 1, x] - heatmaps[k, y - 1, x] - else: - dy = 0. - - keypoints[n, k] += np.sign([dx, dy], dtype=np.float32) * 0.25 - - return keypoints - - -def refine_keypoints_dark(keypoints: np.ndarray, heatmaps: np.ndarray, - blur_kernel_size: int) -> np.ndarray: - """Refine keypoint predictions using distribution aware coordinate - decoding. See `Dark Pose`_ for details. The operation is in-place. - - Note: - - - instance number: N - - keypoint number: K - - keypoint dimension: D - - heatmap size: [W, H] - - Args: - keypoints (np.ndarray): The keypoint coordinates in shape (N, K, D) - heatmaps (np.ndarray): The heatmaps in shape (K, H, W) - blur_kernel_size (int): The Gaussian blur kernel size of the heatmap - modulation - - Returns: - np.ndarray: Refine keypoint coordinates in shape (N, K, D) - - .. _`Dark Pose`: https://arxiv.org/abs/1910.06278 - """ - N, K = keypoints.shape[:2] - H, W = heatmaps.shape[1:] - - # modulate heatmaps - heatmaps = gaussian_blur(heatmaps, blur_kernel_size) - np.maximum(heatmaps, 1e-10, heatmaps) - np.log(heatmaps, heatmaps) - - for n, k in product(range(N), range(K)): - x, y = keypoints[n, k, :2].astype(int) - if 1 < x < W - 2 and 1 < y < H - 2: - dx = 0.5 * (heatmaps[k, y, x + 1] - heatmaps[k, y, x - 1]) - dy = 0.5 * (heatmaps[k, y + 1, x] - heatmaps[k, y - 1, x]) - - dxx = 0.25 * ( - heatmaps[k, y, x + 2] - 2 * heatmaps[k, y, x] + - heatmaps[k, y, x - 2]) - dxy = 0.25 * ( - heatmaps[k, y + 1, x + 1] - heatmaps[k, y - 1, x + 1] - - heatmaps[k, y + 1, x - 1] + heatmaps[k, y - 1, x - 1]) - dyy = 0.25 * ( - heatmaps[k, y + 2, x] - 2 * heatmaps[k, y, x] + - heatmaps[k, y - 2, x]) - derivative = np.array([[dx], [dy]]) - hessian = np.array([[dxx, dxy], [dxy, dyy]]) - if dxx * dyy - dxy**2 != 0: - hessianinv = np.linalg.inv(hessian) - offset = -hessianinv @ derivative - offset = np.squeeze(np.array(offset.T), axis=0) - keypoints[n, k, :2] += offset - return keypoints - - -def refine_keypoints_dark_udp(keypoints: np.ndarray, heatmaps: np.ndarray, - blur_kernel_size: int) -> np.ndarray: - """Refine keypoint predictions using distribution aware coordinate decoding - for UDP. See `UDP`_ for details. The operation is in-place. - - Note: - - - instance number: N - - keypoint number: K - - keypoint dimension: D - - heatmap size: [W, H] - - Args: - keypoints (np.ndarray): The keypoint coordinates in shape (N, K, D) - heatmaps (np.ndarray): The heatmaps in shape (K, H, W) - blur_kernel_size (int): The Gaussian blur kernel size of the heatmap - modulation - - Returns: - np.ndarray: Refine keypoint coordinates in shape (N, K, D) - - .. _`UDP`: https://arxiv.org/abs/1911.07524 - """ - N, K = keypoints.shape[:2] - H, W = heatmaps.shape[1:] - - # modulate heatmaps - heatmaps = gaussian_blur(heatmaps, blur_kernel_size) - np.clip(heatmaps, 1e-3, 50., heatmaps) - np.log(heatmaps, heatmaps) - - heatmaps_pad = np.pad( - heatmaps, ((0, 0), (1, 1), (1, 1)), mode='edge').flatten() - - for n in range(N): - index = keypoints[n, :, 0] + 1 + (keypoints[n, :, 1] + 1) * (W + 2) - index += (W + 2) * (H + 2) * np.arange(0, K) - index = index.astype(int).reshape(-1, 1) - i_ = heatmaps_pad[index] - ix1 = heatmaps_pad[index + 1] - iy1 = heatmaps_pad[index + W + 2] - ix1y1 = heatmaps_pad[index + W + 3] - ix1_y1_ = heatmaps_pad[index - W - 3] - ix1_ = heatmaps_pad[index - 1] - iy1_ = heatmaps_pad[index - 2 - W] - - dx = 0.5 * (ix1 - ix1_) - dy = 0.5 * (iy1 - iy1_) - derivative = np.concatenate([dx, dy], axis=1) - derivative = derivative.reshape(K, 2, 1) - - dxx = ix1 - 2 * i_ + ix1_ - dyy = iy1 - 2 * i_ + iy1_ - dxy = 0.5 * (ix1y1 - ix1 - iy1 + i_ + i_ - ix1_ - iy1_ + ix1_y1_) - hessian = np.concatenate([dxx, dxy, dxy, dyy], axis=1) - hessian = hessian.reshape(K, 2, 2) - hessian = np.linalg.inv(hessian + np.finfo(np.float32).eps * np.eye(2)) - keypoints[n] -= np.einsum('imn,ink->imk', hessian, - derivative).squeeze() - - return keypoints - - -def refine_simcc_dark(keypoints: np.ndarray, simcc: np.ndarray, - blur_kernel_size: int) -> np.ndarray: - """SimCC version. Refine keypoint predictions using distribution aware - coordinate decoding for UDP. See `UDP`_ for details. The operation is in- - place. - - Note: - - - instance number: N - - keypoint number: K - - keypoint dimension: D - - Args: - keypoints (np.ndarray): The keypoint coordinates in shape (N, K, D) - simcc (np.ndarray): The heatmaps in shape (N, K, Wx) - blur_kernel_size (int): The Gaussian blur kernel size of the heatmap - modulation - - Returns: - np.ndarray: Refine keypoint coordinates in shape (N, K, D) - - .. _`UDP`: https://arxiv.org/abs/1911.07524 - """ - N = simcc.shape[0] - - # modulate simcc - simcc = gaussian_blur1d(simcc, blur_kernel_size) - np.clip(simcc, 1e-3, 50., simcc) - np.log(simcc, simcc) - - simcc = np.pad(simcc, ((0, 0), (0, 0), (2, 2)), 'edge') - - for n in range(N): - px = (keypoints[n] + 2.5).astype(np.int64).reshape(-1, 1) # K, 1 - - dx0 = np.take_along_axis(simcc[n], px, axis=1) # K, 1 - dx1 = np.take_along_axis(simcc[n], px + 1, axis=1) - dx_1 = np.take_along_axis(simcc[n], px - 1, axis=1) - dx2 = np.take_along_axis(simcc[n], px + 2, axis=1) - dx_2 = np.take_along_axis(simcc[n], px - 2, axis=1) - - dx = 0.5 * (dx1 - dx_1) - dxx = 1e-9 + 0.25 * (dx2 - 2 * dx0 + dx_2) - - offset = dx / dxx - keypoints[n] -= offset.reshape(-1) - - return keypoints +# Copyright (c) OpenMMLab. All rights reserved. +from itertools import product + +import numpy as np + +from .post_processing import gaussian_blur, gaussian_blur1d + + +def refine_keypoints(keypoints: np.ndarray, + heatmaps: np.ndarray) -> np.ndarray: + """Refine keypoint predictions by moving from the maximum towards the + second maximum by 0.25 pixel. The operation is in-place. + + Note: + + - instance number: N + - keypoint number: K + - keypoint dimension: D + - heatmap size: [W, H] + + Args: + keypoints (np.ndarray): The keypoint coordinates in shape (N, K, D) + heatmaps (np.ndarray): The heatmaps in shape (K, H, W) + + Returns: + np.ndarray: Refine keypoint coordinates in shape (N, K, D) + """ + N, K = keypoints.shape[:2] + H, W = heatmaps.shape[1:] + + for n, k in product(range(N), range(K)): + x, y = keypoints[n, k, :2].astype(int) + + if 1 < x < W - 1 and 0 < y < H: + dx = heatmaps[k, y, x + 1] - heatmaps[k, y, x - 1] + else: + dx = 0. + + if 1 < y < H - 1 and 0 < x < W: + dy = heatmaps[k, y + 1, x] - heatmaps[k, y - 1, x] + else: + dy = 0. + + keypoints[n, k] += np.sign([dx, dy], dtype=np.float32) * 0.25 + + return keypoints + + +def refine_keypoints_dark(keypoints: np.ndarray, heatmaps: np.ndarray, + blur_kernel_size: int) -> np.ndarray: + """Refine keypoint predictions using distribution aware coordinate + decoding. See `Dark Pose`_ for details. The operation is in-place. + + Note: + + - instance number: N + - keypoint number: K + - keypoint dimension: D + - heatmap size: [W, H] + + Args: + keypoints (np.ndarray): The keypoint coordinates in shape (N, K, D) + heatmaps (np.ndarray): The heatmaps in shape (K, H, W) + blur_kernel_size (int): The Gaussian blur kernel size of the heatmap + modulation + + Returns: + np.ndarray: Refine keypoint coordinates in shape (N, K, D) + + .. _`Dark Pose`: https://arxiv.org/abs/1910.06278 + """ + N, K = keypoints.shape[:2] + H, W = heatmaps.shape[1:] + + # modulate heatmaps + heatmaps = gaussian_blur(heatmaps, blur_kernel_size) + np.maximum(heatmaps, 1e-10, heatmaps) + np.log(heatmaps, heatmaps) + + for n, k in product(range(N), range(K)): + x, y = keypoints[n, k, :2].astype(int) + if 1 < x < W - 2 and 1 < y < H - 2: + dx = 0.5 * (heatmaps[k, y, x + 1] - heatmaps[k, y, x - 1]) + dy = 0.5 * (heatmaps[k, y + 1, x] - heatmaps[k, y - 1, x]) + + dxx = 0.25 * ( + heatmaps[k, y, x + 2] - 2 * heatmaps[k, y, x] + + heatmaps[k, y, x - 2]) + dxy = 0.25 * ( + heatmaps[k, y + 1, x + 1] - heatmaps[k, y - 1, x + 1] - + heatmaps[k, y + 1, x - 1] + heatmaps[k, y - 1, x - 1]) + dyy = 0.25 * ( + heatmaps[k, y + 2, x] - 2 * heatmaps[k, y, x] + + heatmaps[k, y - 2, x]) + derivative = np.array([[dx], [dy]]) + hessian = np.array([[dxx, dxy], [dxy, dyy]]) + if dxx * dyy - dxy**2 != 0: + hessianinv = np.linalg.inv(hessian) + offset = -hessianinv @ derivative + offset = np.squeeze(np.array(offset.T), axis=0) + keypoints[n, k, :2] += offset + return keypoints + + +def refine_keypoints_dark_udp(keypoints: np.ndarray, heatmaps: np.ndarray, + blur_kernel_size: int) -> np.ndarray: + """Refine keypoint predictions using distribution aware coordinate decoding + for UDP. See `UDP`_ for details. The operation is in-place. + + Note: + + - instance number: N + - keypoint number: K + - keypoint dimension: D + - heatmap size: [W, H] + + Args: + keypoints (np.ndarray): The keypoint coordinates in shape (N, K, D) + heatmaps (np.ndarray): The heatmaps in shape (K, H, W) + blur_kernel_size (int): The Gaussian blur kernel size of the heatmap + modulation + + Returns: + np.ndarray: Refine keypoint coordinates in shape (N, K, D) + + .. _`UDP`: https://arxiv.org/abs/1911.07524 + """ + N, K = keypoints.shape[:2] + H, W = heatmaps.shape[1:] + + # modulate heatmaps + heatmaps = gaussian_blur(heatmaps, blur_kernel_size) + np.clip(heatmaps, 1e-3, 50., heatmaps) + np.log(heatmaps, heatmaps) + + heatmaps_pad = np.pad( + heatmaps, ((0, 0), (1, 1), (1, 1)), mode='edge').flatten() + + for n in range(N): + index = keypoints[n, :, 0] + 1 + (keypoints[n, :, 1] + 1) * (W + 2) + index += (W + 2) * (H + 2) * np.arange(0, K) + index = index.astype(int).reshape(-1, 1) + i_ = heatmaps_pad[index] + ix1 = heatmaps_pad[index + 1] + iy1 = heatmaps_pad[index + W + 2] + ix1y1 = heatmaps_pad[index + W + 3] + ix1_y1_ = heatmaps_pad[index - W - 3] + ix1_ = heatmaps_pad[index - 1] + iy1_ = heatmaps_pad[index - 2 - W] + + dx = 0.5 * (ix1 - ix1_) + dy = 0.5 * (iy1 - iy1_) + derivative = np.concatenate([dx, dy], axis=1) + derivative = derivative.reshape(K, 2, 1) + + dxx = ix1 - 2 * i_ + ix1_ + dyy = iy1 - 2 * i_ + iy1_ + dxy = 0.5 * (ix1y1 - ix1 - iy1 + i_ + i_ - ix1_ - iy1_ + ix1_y1_) + hessian = np.concatenate([dxx, dxy, dxy, dyy], axis=1) + hessian = hessian.reshape(K, 2, 2) + hessian = np.linalg.inv(hessian + np.finfo(np.float32).eps * np.eye(2)) + keypoints[n] -= np.einsum('imn,ink->imk', hessian, + derivative).squeeze() + + return keypoints + + +def refine_simcc_dark(keypoints: np.ndarray, simcc: np.ndarray, + blur_kernel_size: int) -> np.ndarray: + """SimCC version. Refine keypoint predictions using distribution aware + coordinate decoding for UDP. See `UDP`_ for details. The operation is in- + place. + + Note: + + - instance number: N + - keypoint number: K + - keypoint dimension: D + + Args: + keypoints (np.ndarray): The keypoint coordinates in shape (N, K, D) + simcc (np.ndarray): The heatmaps in shape (N, K, Wx) + blur_kernel_size (int): The Gaussian blur kernel size of the heatmap + modulation + + Returns: + np.ndarray: Refine keypoint coordinates in shape (N, K, D) + + .. _`UDP`: https://arxiv.org/abs/1911.07524 + """ + N = simcc.shape[0] + + # modulate simcc + simcc = gaussian_blur1d(simcc, blur_kernel_size) + np.clip(simcc, 1e-3, 50., simcc) + np.log(simcc, simcc) + + simcc = np.pad(simcc, ((0, 0), (0, 0), (2, 2)), 'edge') + + for n in range(N): + px = (keypoints[n] + 2.5).astype(np.int64).reshape(-1, 1) # K, 1 + + dx0 = np.take_along_axis(simcc[n], px, axis=1) # K, 1 + dx1 = np.take_along_axis(simcc[n], px + 1, axis=1) + dx_1 = np.take_along_axis(simcc[n], px - 1, axis=1) + dx2 = np.take_along_axis(simcc[n], px + 2, axis=1) + dx_2 = np.take_along_axis(simcc[n], px - 2, axis=1) + + dx = 0.5 * (dx1 - dx_1) + dxx = 1e-9 + 0.25 * (dx2 - 2 * dx0 + dx_2) + + offset = dx / dxx + keypoints[n] -= offset.reshape(-1) + + return keypoints diff --git a/mmpose/codecs/video_pose_lifting.py b/mmpose/codecs/video_pose_lifting.py index 56cf35fa2d..cdc17aa111 100644 --- a/mmpose/codecs/video_pose_lifting.py +++ b/mmpose/codecs/video_pose_lifting.py @@ -1,202 +1,202 @@ -# Copyright (c) OpenMMLab. All rights reserved. - -from copy import deepcopy -from typing import Optional, Tuple - -import numpy as np - -from mmpose.registry import KEYPOINT_CODECS -from .base import BaseKeypointCodec - - -@KEYPOINT_CODECS.register_module() -class VideoPoseLifting(BaseKeypointCodec): - r"""Generate keypoint coordinates for pose lifter. - - Note: - - - instance number: N - - keypoint number: K - - keypoint dimension: D - - pose-lifitng target dimension: C - - Args: - num_keypoints (int): The number of keypoints in the dataset. - zero_center: Whether to zero-center the target around root. Default: - ``True``. - root_index (int): Root keypoint index in the pose. Default: 0. - remove_root (bool): If true, remove the root keypoint from the pose. - Default: ``False``. - save_index (bool): If true, store the root position separated from the - original pose, only takes effect if ``remove_root`` is ``True``. - Default: ``False``. - normalize_camera (bool): Whether to normalize camera intrinsics. - Default: ``False``. - """ - - auxiliary_encode_keys = { - 'lifting_target', 'lifting_target_visible', 'camera_param' - } - - def __init__(self, - num_keypoints: int, - zero_center: bool = True, - root_index: int = 0, - remove_root: bool = False, - save_index: bool = False, - normalize_camera: bool = False): - super().__init__() - - self.num_keypoints = num_keypoints - self.zero_center = zero_center - self.root_index = root_index - self.remove_root = remove_root - self.save_index = save_index - self.normalize_camera = normalize_camera - - def encode(self, - keypoints: np.ndarray, - keypoints_visible: Optional[np.ndarray] = None, - lifting_target: Optional[np.ndarray] = None, - lifting_target_visible: Optional[np.ndarray] = None, - camera_param: Optional[dict] = None) -> dict: - """Encoding keypoints from input image space to normalized space. - - Args: - keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D). - keypoints_visible (np.ndarray, optional): Keypoint visibilities in - shape (N, K). - lifting_target (np.ndarray, optional): 3d target coordinate in - shape (K, C). - lifting_target_visible (np.ndarray, optional): Target coordinate in - shape (K, ). - camera_param (dict, optional): The camera parameter dictionary. - - Returns: - encoded (dict): Contains the following items: - - - keypoint_labels (np.ndarray): The processed keypoints in - shape (K * D, N) where D is 2 for 2d coordinates. - - lifting_target_label: The processed target coordinate in - shape (K, C) or (K-1, C). - - lifting_target_weights (np.ndarray): The target weights in - shape (K, ) or (K-1, ). - - trajectory_weights (np.ndarray): The trajectory weights in - shape (K, ). - - In addition, there are some optional items it may contain: - - - target_root (np.ndarray): The root coordinate of target in - shape (C, ). Exists if ``self.zero_center`` is ``True``. - - target_root_removed (bool): Indicate whether the root of - pose-lifitng target is removed. Exists if - ``self.remove_root`` is ``True``. - - target_root_index (int): An integer indicating the index of - root. Exists if ``self.remove_root`` and ``self.save_index`` - are ``True``. - - camera_param (dict): The updated camera parameter dictionary. - Exists if ``self.normalize_camera`` is ``True``. - """ - if keypoints_visible is None: - keypoints_visible = np.ones(keypoints.shape[:2], dtype=np.float32) - - if lifting_target is None: - lifting_target = keypoints[0] - - # set initial value for `lifting_target_weights` - # and `trajectory_weights` - if lifting_target_visible is None: - lifting_target_visible = np.ones( - lifting_target.shape[:-1], dtype=np.float32) - lifting_target_weights = lifting_target_visible - trajectory_weights = (1 / lifting_target[:, 2]) - else: - valid = lifting_target_visible > 0.5 - lifting_target_weights = np.where(valid, 1., 0.).astype(np.float32) - trajectory_weights = lifting_target_weights - - if camera_param is None: - camera_param = dict() - - encoded = dict() - - lifting_target_label = lifting_target.copy() - # Zero-center the target pose around a given root keypoint - if self.zero_center: - assert (lifting_target.ndim >= 2 and - lifting_target.shape[-2] > self.root_index), \ - f'Got invalid joint shape {lifting_target.shape}' - - root = lifting_target[..., self.root_index, :] - lifting_target_label = lifting_target_label - root - encoded['target_root'] = root - - if self.remove_root: - lifting_target_label = np.delete( - lifting_target_label, self.root_index, axis=-2) - assert lifting_target_weights.ndim in {1, 2} - axis_to_remove = -2 if lifting_target_weights.ndim == 2 else -1 - lifting_target_weights = np.delete( - lifting_target_weights, - self.root_index, - axis=axis_to_remove) - # Add a flag to avoid latter transforms that rely on the root - # joint or the original joint index - encoded['target_root_removed'] = True - - # Save the root index for restoring the global pose - if self.save_index: - encoded['target_root_index'] = self.root_index - - # Normalize the 2D keypoint coordinate with image width and height - _camera_param = deepcopy(camera_param) - assert 'w' in _camera_param and 'h' in _camera_param - center = np.array([0.5 * _camera_param['w'], 0.5 * _camera_param['h']], - dtype=np.float32) - scale = np.array(0.5 * _camera_param['w'], dtype=np.float32) - - keypoint_labels = (keypoints - center) / scale - - assert keypoint_labels.ndim in {2, 3} - if keypoint_labels.ndim == 2: - keypoint_labels = keypoint_labels[None, ...] - - if self.normalize_camera: - assert 'f' in _camera_param and 'c' in _camera_param - _camera_param['f'] = _camera_param['f'] / scale - _camera_param['c'] = (_camera_param['c'] - center[:, None]) / scale - encoded['camera_param'] = _camera_param - - encoded['keypoint_labels'] = keypoint_labels - encoded['lifting_target_label'] = lifting_target_label - encoded['lifting_target_weights'] = lifting_target_weights - encoded['trajectory_weights'] = trajectory_weights - - return encoded - - def decode(self, - encoded: np.ndarray, - target_root: Optional[np.ndarray] = None - ) -> Tuple[np.ndarray, np.ndarray]: - """Decode keypoint coordinates from normalized space to input image - space. - - Args: - encoded (np.ndarray): Coordinates in shape (N, K, C). - target_root (np.ndarray, optional): The pose-lifitng target root - coordinate. Default: ``None``. - - Returns: - keypoints (np.ndarray): Decoded coordinates in shape (N, K, C). - scores (np.ndarray): The keypoint scores in shape (N, K). - """ - keypoints = encoded.copy() - - if target_root.size > 0: - keypoints = keypoints + np.expand_dims(target_root, axis=0) - if self.remove_root: - keypoints = np.insert( - keypoints, self.root_index, target_root, axis=1) - scores = np.ones(keypoints.shape[:-1], dtype=np.float32) - - return keypoints, scores +# Copyright (c) OpenMMLab. All rights reserved. + +from copy import deepcopy +from typing import Optional, Tuple + +import numpy as np + +from mmpose.registry import KEYPOINT_CODECS +from .base import BaseKeypointCodec + + +@KEYPOINT_CODECS.register_module() +class VideoPoseLifting(BaseKeypointCodec): + r"""Generate keypoint coordinates for pose lifter. + + Note: + + - instance number: N + - keypoint number: K + - keypoint dimension: D + - pose-lifitng target dimension: C + + Args: + num_keypoints (int): The number of keypoints in the dataset. + zero_center: Whether to zero-center the target around root. Default: + ``True``. + root_index (int): Root keypoint index in the pose. Default: 0. + remove_root (bool): If true, remove the root keypoint from the pose. + Default: ``False``. + save_index (bool): If true, store the root position separated from the + original pose, only takes effect if ``remove_root`` is ``True``. + Default: ``False``. + normalize_camera (bool): Whether to normalize camera intrinsics. + Default: ``False``. + """ + + auxiliary_encode_keys = { + 'lifting_target', 'lifting_target_visible', 'camera_param' + } + + def __init__(self, + num_keypoints: int, + zero_center: bool = True, + root_index: int = 0, + remove_root: bool = False, + save_index: bool = False, + normalize_camera: bool = False): + super().__init__() + + self.num_keypoints = num_keypoints + self.zero_center = zero_center + self.root_index = root_index + self.remove_root = remove_root + self.save_index = save_index + self.normalize_camera = normalize_camera + + def encode(self, + keypoints: np.ndarray, + keypoints_visible: Optional[np.ndarray] = None, + lifting_target: Optional[np.ndarray] = None, + lifting_target_visible: Optional[np.ndarray] = None, + camera_param: Optional[dict] = None) -> dict: + """Encoding keypoints from input image space to normalized space. + + Args: + keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D). + keypoints_visible (np.ndarray, optional): Keypoint visibilities in + shape (N, K). + lifting_target (np.ndarray, optional): 3d target coordinate in + shape (K, C). + lifting_target_visible (np.ndarray, optional): Target coordinate in + shape (K, ). + camera_param (dict, optional): The camera parameter dictionary. + + Returns: + encoded (dict): Contains the following items: + + - keypoint_labels (np.ndarray): The processed keypoints in + shape (K * D, N) where D is 2 for 2d coordinates. + - lifting_target_label: The processed target coordinate in + shape (K, C) or (K-1, C). + - lifting_target_weights (np.ndarray): The target weights in + shape (K, ) or (K-1, ). + - trajectory_weights (np.ndarray): The trajectory weights in + shape (K, ). + + In addition, there are some optional items it may contain: + + - target_root (np.ndarray): The root coordinate of target in + shape (C, ). Exists if ``self.zero_center`` is ``True``. + - target_root_removed (bool): Indicate whether the root of + pose-lifitng target is removed. Exists if + ``self.remove_root`` is ``True``. + - target_root_index (int): An integer indicating the index of + root. Exists if ``self.remove_root`` and ``self.save_index`` + are ``True``. + - camera_param (dict): The updated camera parameter dictionary. + Exists if ``self.normalize_camera`` is ``True``. + """ + if keypoints_visible is None: + keypoints_visible = np.ones(keypoints.shape[:2], dtype=np.float32) + + if lifting_target is None: + lifting_target = keypoints[0] + + # set initial value for `lifting_target_weights` + # and `trajectory_weights` + if lifting_target_visible is None: + lifting_target_visible = np.ones( + lifting_target.shape[:-1], dtype=np.float32) + lifting_target_weights = lifting_target_visible + trajectory_weights = (1 / lifting_target[:, 2]) + else: + valid = lifting_target_visible > 0.5 + lifting_target_weights = np.where(valid, 1., 0.).astype(np.float32) + trajectory_weights = lifting_target_weights + + if camera_param is None: + camera_param = dict() + + encoded = dict() + + lifting_target_label = lifting_target.copy() + # Zero-center the target pose around a given root keypoint + if self.zero_center: + assert (lifting_target.ndim >= 2 and + lifting_target.shape[-2] > self.root_index), \ + f'Got invalid joint shape {lifting_target.shape}' + + root = lifting_target[..., self.root_index, :] + lifting_target_label = lifting_target_label - root + encoded['target_root'] = root + + if self.remove_root: + lifting_target_label = np.delete( + lifting_target_label, self.root_index, axis=-2) + assert lifting_target_weights.ndim in {1, 2} + axis_to_remove = -2 if lifting_target_weights.ndim == 2 else -1 + lifting_target_weights = np.delete( + lifting_target_weights, + self.root_index, + axis=axis_to_remove) + # Add a flag to avoid latter transforms that rely on the root + # joint or the original joint index + encoded['target_root_removed'] = True + + # Save the root index for restoring the global pose + if self.save_index: + encoded['target_root_index'] = self.root_index + + # Normalize the 2D keypoint coordinate with image width and height + _camera_param = deepcopy(camera_param) + assert 'w' in _camera_param and 'h' in _camera_param + center = np.array([0.5 * _camera_param['w'], 0.5 * _camera_param['h']], + dtype=np.float32) + scale = np.array(0.5 * _camera_param['w'], dtype=np.float32) + + keypoint_labels = (keypoints - center) / scale + + assert keypoint_labels.ndim in {2, 3} + if keypoint_labels.ndim == 2: + keypoint_labels = keypoint_labels[None, ...] + + if self.normalize_camera: + assert 'f' in _camera_param and 'c' in _camera_param + _camera_param['f'] = _camera_param['f'] / scale + _camera_param['c'] = (_camera_param['c'] - center[:, None]) / scale + encoded['camera_param'] = _camera_param + + encoded['keypoint_labels'] = keypoint_labels + encoded['lifting_target_label'] = lifting_target_label + encoded['lifting_target_weights'] = lifting_target_weights + encoded['trajectory_weights'] = trajectory_weights + + return encoded + + def decode(self, + encoded: np.ndarray, + target_root: Optional[np.ndarray] = None + ) -> Tuple[np.ndarray, np.ndarray]: + """Decode keypoint coordinates from normalized space to input image + space. + + Args: + encoded (np.ndarray): Coordinates in shape (N, K, C). + target_root (np.ndarray, optional): The pose-lifitng target root + coordinate. Default: ``None``. + + Returns: + keypoints (np.ndarray): Decoded coordinates in shape (N, K, C). + scores (np.ndarray): The keypoint scores in shape (N, K). + """ + keypoints = encoded.copy() + + if target_root.size > 0: + keypoints = keypoints + np.expand_dims(target_root, axis=0) + if self.remove_root: + keypoints = np.insert( + keypoints, self.root_index, target_root, axis=1) + scores = np.ones(keypoints.shape[:-1], dtype=np.float32) + + return keypoints, scores diff --git a/mmpose/configs/_base_/default_runtime.py b/mmpose/configs/_base_/default_runtime.py index 349ecf4b17..5df17b0db4 100644 --- a/mmpose/configs/_base_/default_runtime.py +++ b/mmpose/configs/_base_/default_runtime.py @@ -1,54 +1,54 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook, - LoggerHook, ParamSchedulerHook, SyncBuffersHook) -from mmengine.runner import LogProcessor -from mmengine.visualization import LocalVisBackend - -from mmpose.engine.hooks import PoseVisualizationHook -from mmpose.visualization import PoseLocalVisualizer - -default_scope = None - -# hooks -default_hooks = dict( - timer=dict(type=IterTimerHook), - logger=dict(type=LoggerHook, interval=50), - param_scheduler=dict(type=ParamSchedulerHook), - checkpoint=dict(type=CheckpointHook, interval=10), - sampler_seed=dict(type=DistSamplerSeedHook), - visualization=dict(type=PoseVisualizationHook, enable=False), -) - -# custom hooks -custom_hooks = [ - # Synchronize model buffers such as running_mean and running_var in BN - # at the end of each epoch - dict(type=SyncBuffersHook) -] - -# multi-processing backend -env_cfg = dict( - cudnn_benchmark=False, - mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), - dist_cfg=dict(backend='nccl'), -) - -# visualizer -vis_backends = [dict(type=LocalVisBackend)] -visualizer = dict( - type=PoseLocalVisualizer, vis_backends=vis_backends, name='visualizer') - -# logger -log_processor = dict( - type=LogProcessor, window_size=50, by_epoch=True, num_digits=6) -log_level = 'INFO' -load_from = None -resume = False - -# file I/O backend -backend_args = dict(backend='local') - -# training/validation/testing progress -train_cfg = dict(by_epoch=True) -val_cfg = dict() -test_cfg = dict() +# Copyright (c) OpenMMLab. All rights reserved. +from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook, + LoggerHook, ParamSchedulerHook, SyncBuffersHook) +from mmengine.runner import LogProcessor +from mmengine.visualization import LocalVisBackend + +from mmpose.engine.hooks import PoseVisualizationHook +from mmpose.visualization import PoseLocalVisualizer + +default_scope = None + +# hooks +default_hooks = dict( + timer=dict(type=IterTimerHook), + logger=dict(type=LoggerHook, interval=50), + param_scheduler=dict(type=ParamSchedulerHook), + checkpoint=dict(type=CheckpointHook, interval=10), + sampler_seed=dict(type=DistSamplerSeedHook), + visualization=dict(type=PoseVisualizationHook, enable=False), +) + +# custom hooks +custom_hooks = [ + # Synchronize model buffers such as running_mean and running_var in BN + # at the end of each epoch + dict(type=SyncBuffersHook) +] + +# multi-processing backend +env_cfg = dict( + cudnn_benchmark=False, + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), + dist_cfg=dict(backend='nccl'), +) + +# visualizer +vis_backends = [dict(type=LocalVisBackend)] +visualizer = dict( + type=PoseLocalVisualizer, vis_backends=vis_backends, name='visualizer') + +# logger +log_processor = dict( + type=LogProcessor, window_size=50, by_epoch=True, num_digits=6) +log_level = 'INFO' +load_from = None +resume = False + +# file I/O backend +backend_args = dict(backend='local') + +# training/validation/testing progress +train_cfg = dict(by_epoch=True) +val_cfg = dict() +test_cfg = dict() diff --git a/mmpose/configs/body_2d_keypoint/rtmpose/coco/rtmpose_m_8xb256-420e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/rtmpose/coco/rtmpose_m_8xb256-420e_coco-256x192.py index af102ec20e..8fdd6929cc 100644 --- a/mmpose/configs/body_2d_keypoint/rtmpose/coco/rtmpose_m_8xb256-420e_coco-256x192.py +++ b/mmpose/configs/body_2d_keypoint/rtmpose/coco/rtmpose_m_8xb256-420e_coco-256x192.py @@ -1,253 +1,253 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from mmengine.config import read_base - -with read_base(): - from mmpose.configs._base_.default_runtime import * - -from albumentations.augmentations import Blur, CoarseDropout, MedianBlur -from mmdet.datasets.transforms import YOLOXHSVRandomAug -from mmdet.engine.hooks import PipelineSwitchHook -from mmdet.models import CSPNeXt -from mmengine.dataset import DefaultSampler -from mmengine.hooks import EMAHook -from mmengine.model import PretrainedInit -from mmengine.optim import CosineAnnealingLR, LinearLR, OptimWrapper -from torch.nn import SiLU, SyncBatchNorm -from torch.optim import AdamW - -from mmpose.codecs import SimCCLabel -from mmpose.datasets import (CocoDataset, GenerateTarget, GetBBoxCenterScale, - LoadImage, PackPoseInputs, RandomFlip, - RandomHalfBody, TopdownAffine) -from mmpose.datasets.transforms.common_transforms import (Albumentation, - RandomBBoxTransform) -from mmpose.engine.hooks import ExpMomentumEMA -from mmpose.evaluation import CocoMetric -from mmpose.models import (KLDiscretLoss, PoseDataPreprocessor, RTMCCHead, - TopdownPoseEstimator) - -# runtime -max_epochs = 420 -stage2_num_epochs = 30 -base_lr = 4e-3 - -train_cfg.update(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type=OptimWrapper, - optimizer=dict(type=AdamW, lr=base_lr, weight_decay=0.05), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type=LinearLR, start_factor=1.0e-5, by_epoch=False, begin=0, end=1000), - dict( - # use cosine lr from 210 to 420 epoch - type=CosineAnnealingLR, - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=1024) - -# codec settings -codec = dict( - type=SimCCLabel, - input_size=(192, 256), - sigma=(4.9, 5.66), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type=TopdownPoseEstimator, - data_preprocessor=dict( - type=PoseDataPreprocessor, - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type=CSPNeXt, - arch='P5', - expand_ratio=0.5, - deepen_factor=0.67, - widen_factor=0.75, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type=SyncBatchNorm), - act_cfg=dict(type=SiLU), - init_cfg=dict( - type=PretrainedInit, - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmpose/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth' # noqa - )), - head=dict( - type=RTMCCHead, - in_channels=768, - out_channels=17, - input_size=codec['input_size'], - in_featuremap_size=(6, 8), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type=KLDiscretLoss, - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True)) - -# base dataset settings -dataset_type = CocoDataset -data_mode = 'topdown' -data_root = 'data/coco/' - -backend_args = dict(backend='local') -# backend_args = dict( -# backend='petrel', -# path_mapping=dict({ -# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', -# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' -# })) - -# pipelines -train_pipeline = [ - dict(type=LoadImage, backend_args=backend_args), - dict(type=GetBBoxCenterScale), - dict(type=RandomFlip, direction='horizontal'), - dict(type=RandomHalfBody), - dict(type=RandomBBoxTransform, scale_factor=[0.6, 1.4], rotate_factor=80), - dict(type=TopdownAffine, input_size=codec['input_size']), - dict(type=YOLOXHSVRandomAug), - dict( - type=Albumentation, - transforms=[ - dict(type=Blur, p=0.1), - dict(type=MedianBlur, p=0.1), - dict( - type=CoarseDropout, - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.), - ]), - dict(type=GenerateTarget, encoder=codec), - dict(type=PackPoseInputs) -] -val_pipeline = [ - dict(type=LoadImage, backend_args=backend_args), - dict(type=GetBBoxCenterScale), - dict(type=TopdownAffine, input_size=codec['input_size']), - dict(type=PackPoseInputs) -] - -train_pipeline_stage2 = [ - dict(type=LoadImage, backend_args=backend_args), - dict(type=GetBBoxCenterScale), - dict(type=RandomFlip, direction='horizontal'), - dict(type=RandomHalfBody), - dict( - type=RandomBBoxTransform, - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type=TopdownAffine, input_size=codec['input_size']), - dict(type=YOLOXHSVRandomAug), - dict( - type=Albumentation, - transforms=[ - dict(type=Blur, p=0.1), - dict(type=MedianBlur, p=0.1), - dict( - type=CoarseDropout, - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type=GenerateTarget, encoder=codec), - dict(type=PackPoseInputs) -] - -# data loaders -train_dataloader = dict( - batch_size=256, - num_workers=10, - persistent_workers=True, - drop_last=True, - sampler=dict(type=DefaultSampler, shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=64, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type=DefaultSampler, shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - # bbox_file=f'{data_root}person_detection_results/' - # 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks.update( - checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type=EMAHook, - ema_type=ExpMomentumEMA, - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type=PipelineSwitchHook, - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type=CocoMetric, - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +# Copyright (c) OpenMMLab. All rights reserved. +from mmengine.config import read_base + +with read_base(): + from mmpose.configs._base_.default_runtime import * + +from albumentations.augmentations import Blur, CoarseDropout, MedianBlur +from mmdet.datasets.transforms import YOLOXHSVRandomAug +from mmdet.engine.hooks import PipelineSwitchHook +from mmdet.models import CSPNeXt +from mmengine.dataset import DefaultSampler +from mmengine.hooks import EMAHook +from mmengine.model import PretrainedInit +from mmengine.optim import CosineAnnealingLR, LinearLR, OptimWrapper +from torch.nn import SiLU, SyncBatchNorm +from torch.optim import AdamW + +from mmpose.codecs import SimCCLabel +from mmpose.datasets import (CocoDataset, GenerateTarget, GetBBoxCenterScale, + LoadImage, PackPoseInputs, RandomFlip, + RandomHalfBody, TopdownAffine) +from mmpose.datasets.transforms.common_transforms import (Albumentation, + RandomBBoxTransform) +from mmpose.engine.hooks import ExpMomentumEMA +from mmpose.evaluation import CocoMetric +from mmpose.models import (KLDiscretLoss, PoseDataPreprocessor, RTMCCHead, + TopdownPoseEstimator) + +# runtime +max_epochs = 420 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg.update(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type=OptimWrapper, + optimizer=dict(type=AdamW, lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type=LinearLR, start_factor=1.0e-5, by_epoch=False, begin=0, end=1000), + dict( + # use cosine lr from 210 to 420 epoch + type=CosineAnnealingLR, + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type=SimCCLabel, + input_size=(192, 256), + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type=TopdownPoseEstimator, + data_preprocessor=dict( + type=PoseDataPreprocessor, + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type=CSPNeXt, + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type=SyncBatchNorm), + act_cfg=dict(type=SiLU), + init_cfg=dict( + type=PretrainedInit, + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmpose/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth' # noqa + )), + head=dict( + type=RTMCCHead, + in_channels=768, + out_channels=17, + input_size=codec['input_size'], + in_featuremap_size=(6, 8), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type=KLDiscretLoss, + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = CocoDataset +data_mode = 'topdown' +data_root = 'data/coco/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' +# })) + +# pipelines +train_pipeline = [ + dict(type=LoadImage, backend_args=backend_args), + dict(type=GetBBoxCenterScale), + dict(type=RandomFlip, direction='horizontal'), + dict(type=RandomHalfBody), + dict(type=RandomBBoxTransform, scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type=TopdownAffine, input_size=codec['input_size']), + dict(type=YOLOXHSVRandomAug), + dict( + type=Albumentation, + transforms=[ + dict(type=Blur, p=0.1), + dict(type=MedianBlur, p=0.1), + dict( + type=CoarseDropout, + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.), + ]), + dict(type=GenerateTarget, encoder=codec), + dict(type=PackPoseInputs) +] +val_pipeline = [ + dict(type=LoadImage, backend_args=backend_args), + dict(type=GetBBoxCenterScale), + dict(type=TopdownAffine, input_size=codec['input_size']), + dict(type=PackPoseInputs) +] + +train_pipeline_stage2 = [ + dict(type=LoadImage, backend_args=backend_args), + dict(type=GetBBoxCenterScale), + dict(type=RandomFlip, direction='horizontal'), + dict(type=RandomHalfBody), + dict( + type=RandomBBoxTransform, + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type=TopdownAffine, input_size=codec['input_size']), + dict(type=YOLOXHSVRandomAug), + dict( + type=Albumentation, + transforms=[ + dict(type=Blur, p=0.1), + dict(type=MedianBlur, p=0.1), + dict( + type=CoarseDropout, + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type=GenerateTarget, encoder=codec), + dict(type=PackPoseInputs) +] + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + drop_last=True, + sampler=dict(type=DefaultSampler, shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type=DefaultSampler, shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + # bbox_file=f'{data_root}person_detection_results/' + # 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks.update( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type=EMAHook, + ema_type=ExpMomentumEMA, + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type=PipelineSwitchHook, + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type=CocoMetric, + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/rtmpose/coco/rtmpose_s_8xb256_420e_aic_coco_256x192.py b/mmpose/configs/body_2d_keypoint/rtmpose/coco/rtmpose_s_8xb256_420e_aic_coco_256x192.py index 6fc5ec0abe..563552ad0d 100644 --- a/mmpose/configs/body_2d_keypoint/rtmpose/coco/rtmpose_s_8xb256_420e_aic_coco_256x192.py +++ b/mmpose/configs/body_2d_keypoint/rtmpose/coco/rtmpose_s_8xb256_420e_aic_coco_256x192.py @@ -1,294 +1,294 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from mmengine.config import read_base - -with read_base(): - from mmpose.configs._base_.default_runtime import * - -from albumentations.augmentations import Blur, CoarseDropout, MedianBlur -from mmdet.datasets.transforms import YOLOXHSVRandomAug -from mmdet.engine.hooks import PipelineSwitchHook -from mmdet.models import CSPNeXt -from mmengine.dataset import DefaultSampler, RepeatDataset -from mmengine.hooks import EMAHook -from mmengine.model import PretrainedInit -from mmengine.optim import CosineAnnealingLR, LinearLR, OptimWrapper -from torch.nn import SiLU, SyncBatchNorm -from torch.optim import AdamW - -from mmpose.codecs import SimCCLabel -from mmpose.datasets import (AicDataset, CocoDataset, CombinedDataset, - GenerateTarget, GetBBoxCenterScale, - KeypointConverter, LoadImage, PackPoseInputs, - RandomFlip, RandomHalfBody, TopdownAffine) -from mmpose.datasets.transforms.common_transforms import (Albumentation, - RandomBBoxTransform) -from mmpose.engine.hooks import ExpMomentumEMA -from mmpose.evaluation import CocoMetric -from mmpose.models import (KLDiscretLoss, PoseDataPreprocessor, RTMCCHead, - TopdownPoseEstimator) - -# runtime -max_epochs = 420 -stage2_num_epochs = 30 -base_lr = 4e-3 - -train_cfg.update(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type=OptimWrapper, - optimizer=dict(type=AdamW, lr=base_lr, weight_decay=0.0), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type=LinearLR, start_factor=1.0e-5, by_epoch=False, begin=0, end=1000), - dict( - # use cosine lr from 210 to 420 epoch - type=CosineAnnealingLR, - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=1024) - -# codec settings -codec = dict( - type=SimCCLabel, - input_size=(192, 256), - sigma=(4.9, 5.66), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type=TopdownPoseEstimator, - data_preprocessor=dict( - type=PoseDataPreprocessor, - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type=CSPNeXt, - arch='P5', - expand_ratio=0.5, - deepen_factor=0.33, - widen_factor=0.5, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type=SyncBatchNorm), - act_cfg=dict(type=SiLU), - init_cfg=dict( - type=PretrainedInit, - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmposev1/cspnext-s_udp-aic-coco_210e-256x192-92f5a029_20230130.pth' # noqa - )), - head=dict( - type=RTMCCHead, - in_channels=512, - out_channels=17, - input_size=codec['input_size'], - in_featuremap_size=(6, 8), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type=KLDiscretLoss, - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True, )) - -# base dataset settings -dataset_type = CocoDataset -data_mode = 'topdown' -data_root = 'data/' - -backend_args = dict(backend='local') -# backend_args = dict( -# backend='petrel', -# path_mapping=dict({ -# f'{data_root}': 's3://openmmlab/datasets/', -# f'{data_root}': 's3://openmmlab/datasets/' -# })) - -# pipelines -train_pipeline = [ - dict(type=LoadImage, backend_args=backend_args), - dict(type=GetBBoxCenterScale), - dict(type=RandomFlip, direction='horizontal'), - dict(type=RandomHalfBody), - dict(type=RandomBBoxTransform, scale_factor=[0.6, 1.4], rotate_factor=80), - dict(type=TopdownAffine, input_size=codec['input_size']), - dict(type=YOLOXHSVRandomAug), - dict( - type=Albumentation, - transforms=[ - dict(type=Blur, p=0.1), - dict(type=MedianBlur, p=0.1), - dict( - type=CoarseDropout, - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.0), - ]), - dict(type=GenerateTarget, encoder=codec), - dict(type=PackPoseInputs) -] -val_pipeline = [ - dict(type=LoadImage, backend_args=backend_args), - dict(type=GetBBoxCenterScale), - dict(type=TopdownAffine, input_size=codec['input_size']), - dict(type=PackPoseInputs) -] - -train_pipeline_stage2 = [ - dict(type=LoadImage, backend_args=backend_args), - dict(type=GetBBoxCenterScale), - dict(type=RandomFlip, direction='horizontal'), - dict(type=RandomHalfBody), - dict( - type=RandomBBoxTransform, - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type=TopdownAffine, input_size=codec['input_size']), - dict(type=YOLOXHSVRandomAug), - dict( - type=Albumentation, - transforms=[ - dict(type=Blur, p=0.1), - dict(type=MedianBlur, p=0.1), - dict( - type=CoarseDropout, - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type=GenerateTarget, encoder=codec), - dict(type=PackPoseInputs) -] - -# train datasets -dataset_coco = dict( - type=RepeatDataset, - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/person_keypoints_train2017.json', - data_prefix=dict(img='detection/coco/train2017/'), - pipeline=[], - ), - times=3) - -dataset_aic = dict( - type=AicDataset, - data_root=data_root, - data_mode=data_mode, - ann_file='aic/annotations/aic_train.json', - data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' - '_train_20170902/keypoint_train_images_20170902/'), - pipeline=[ - dict( - type=KeypointConverter, - num_keypoints=17, - mapping=[ - (0, 6), - (1, 8), - (2, 10), - (3, 5), - (4, 7), - (5, 9), - (6, 12), - (7, 14), - (8, 16), - (9, 11), - (10, 13), - (11, 15), - ]) - ], -) - -# data loaders -train_dataloader = dict( - batch_size=128 * 2, - num_workers=10, - persistent_workers=True, - sampler=dict(type=DefaultSampler, shuffle=True), - dataset=dict( - type=CombinedDataset, - metainfo=dict(from_file='configs/_base_/datasets/coco.py'), - datasets=[dataset_coco, dataset_aic], - pipeline=train_pipeline, - test_mode=False, - )) -val_dataloader = dict( - batch_size=64, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type=DefaultSampler, shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/person_keypoints_val2017.json', - # bbox_file='data/coco/person_detection_results/' - # 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='detection/coco/val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks.update( - checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type=EMAHook, - ema_type=ExpMomentumEMA, - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type=PipelineSwitchHook, - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type=CocoMetric, - ann_file=data_root + 'coco/annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +# Copyright (c) OpenMMLab. All rights reserved. +from mmengine.config import read_base + +with read_base(): + from mmpose.configs._base_.default_runtime import * + +from albumentations.augmentations import Blur, CoarseDropout, MedianBlur +from mmdet.datasets.transforms import YOLOXHSVRandomAug +from mmdet.engine.hooks import PipelineSwitchHook +from mmdet.models import CSPNeXt +from mmengine.dataset import DefaultSampler, RepeatDataset +from mmengine.hooks import EMAHook +from mmengine.model import PretrainedInit +from mmengine.optim import CosineAnnealingLR, LinearLR, OptimWrapper +from torch.nn import SiLU, SyncBatchNorm +from torch.optim import AdamW + +from mmpose.codecs import SimCCLabel +from mmpose.datasets import (AicDataset, CocoDataset, CombinedDataset, + GenerateTarget, GetBBoxCenterScale, + KeypointConverter, LoadImage, PackPoseInputs, + RandomFlip, RandomHalfBody, TopdownAffine) +from mmpose.datasets.transforms.common_transforms import (Albumentation, + RandomBBoxTransform) +from mmpose.engine.hooks import ExpMomentumEMA +from mmpose.evaluation import CocoMetric +from mmpose.models import (KLDiscretLoss, PoseDataPreprocessor, RTMCCHead, + TopdownPoseEstimator) + +# runtime +max_epochs = 420 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg.update(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type=OptimWrapper, + optimizer=dict(type=AdamW, lr=base_lr, weight_decay=0.0), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type=LinearLR, start_factor=1.0e-5, by_epoch=False, begin=0, end=1000), + dict( + # use cosine lr from 210 to 420 epoch + type=CosineAnnealingLR, + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type=SimCCLabel, + input_size=(192, 256), + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type=TopdownPoseEstimator, + data_preprocessor=dict( + type=PoseDataPreprocessor, + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type=CSPNeXt, + arch='P5', + expand_ratio=0.5, + deepen_factor=0.33, + widen_factor=0.5, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type=SyncBatchNorm), + act_cfg=dict(type=SiLU), + init_cfg=dict( + type=PretrainedInit, + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-s_udp-aic-coco_210e-256x192-92f5a029_20230130.pth' # noqa + )), + head=dict( + type=RTMCCHead, + in_channels=512, + out_channels=17, + input_size=codec['input_size'], + in_featuremap_size=(6, 8), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type=KLDiscretLoss, + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = CocoDataset +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/', +# f'{data_root}': 's3://openmmlab/datasets/' +# })) + +# pipelines +train_pipeline = [ + dict(type=LoadImage, backend_args=backend_args), + dict(type=GetBBoxCenterScale), + dict(type=RandomFlip, direction='horizontal'), + dict(type=RandomHalfBody), + dict(type=RandomBBoxTransform, scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type=TopdownAffine, input_size=codec['input_size']), + dict(type=YOLOXHSVRandomAug), + dict( + type=Albumentation, + transforms=[ + dict(type=Blur, p=0.1), + dict(type=MedianBlur, p=0.1), + dict( + type=CoarseDropout, + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type=GenerateTarget, encoder=codec), + dict(type=PackPoseInputs) +] +val_pipeline = [ + dict(type=LoadImage, backend_args=backend_args), + dict(type=GetBBoxCenterScale), + dict(type=TopdownAffine, input_size=codec['input_size']), + dict(type=PackPoseInputs) +] + +train_pipeline_stage2 = [ + dict(type=LoadImage, backend_args=backend_args), + dict(type=GetBBoxCenterScale), + dict(type=RandomFlip, direction='horizontal'), + dict(type=RandomHalfBody), + dict( + type=RandomBBoxTransform, + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type=TopdownAffine, input_size=codec['input_size']), + dict(type=YOLOXHSVRandomAug), + dict( + type=Albumentation, + transforms=[ + dict(type=Blur, p=0.1), + dict(type=MedianBlur, p=0.1), + dict( + type=CoarseDropout, + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type=GenerateTarget, encoder=codec), + dict(type=PackPoseInputs) +] + +# train datasets +dataset_coco = dict( + type=RepeatDataset, + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_train2017.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[], + ), + times=3) + +dataset_aic = dict( + type=AicDataset, + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict( + type=KeypointConverter, + num_keypoints=17, + mapping=[ + (0, 6), + (1, 8), + (2, 10), + (3, 5), + (4, 7), + (5, 9), + (6, 12), + (7, 14), + (8, 16), + (9, 11), + (10, 13), + (11, 15), + ]) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=128 * 2, + num_workers=10, + persistent_workers=True, + sampler=dict(type=DefaultSampler, shuffle=True), + dataset=dict( + type=CombinedDataset, + metainfo=dict(from_file='configs/_base_/datasets/coco.py'), + datasets=[dataset_coco, dataset_aic], + pipeline=train_pipeline, + test_mode=False, + )) +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type=DefaultSampler, shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/person_keypoints_val2017.json', + # bbox_file='data/coco/person_detection_results/' + # 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='detection/coco/val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks.update( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type=EMAHook, + ema_type=ExpMomentumEMA, + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type=PipelineSwitchHook, + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type=CocoMetric, + ann_file=data_root + 'coco/annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_udp-8xb32-210e_coco-256x192.py b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_udp-8xb32-210e_coco-256x192.py index 1ecf3a704e..92ac486d0f 100644 --- a/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_udp-8xb32-210e_coco-256x192.py +++ b/mmpose/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_udp-8xb32-210e_coco-256x192.py @@ -1,169 +1,169 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from mmengine.config import read_base - -with read_base(): - from mmpose.configs._base_.default_runtime import * - -from mmengine.dataset import DefaultSampler -from mmengine.model import PretrainedInit -from mmengine.optim import LinearLR, MultiStepLR -from torch.optim import Adam - -from mmpose.codecs import UDPHeatmap -from mmpose.datasets import (CocoDataset, GenerateTarget, GetBBoxCenterScale, - LoadImage, PackPoseInputs, RandomFlip, - RandomHalfBody, TopdownAffine) -from mmpose.datasets.transforms.common_transforms import RandomBBoxTransform -from mmpose.evaluation import CocoMetric -from mmpose.models import (HeatmapHead, HRNet, KeypointMSELoss, - PoseDataPreprocessor, TopdownPoseEstimator) - -# runtime -train_cfg.update(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type=Adam, - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict(type=LinearLR, begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type=MultiStepLR, - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks.update(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type=UDPHeatmap, input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type=TopdownPoseEstimator, - data_preprocessor=dict( - type=PoseDataPreprocessor, - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type=HRNet, - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(48, 96)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(48, 96, 192)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(48, 96, 192, 384))), - init_cfg=dict( - type=PretrainedInit, - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/hrnet_w48-8ef0771d.pth'), - ), - head=dict( - type=HeatmapHead, - in_channels=48, - out_channels=17, - deconv_out_channels=None, - loss=dict(type=KeypointMSELoss, use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=False, - )) - -# base dataset settings -dataset_type = CocoDataset -data_mode = 'topdown' -data_root = 'data/coco/' - -backend_args = dict(backend='local') - -# pipelines -train_pipeline = [ - dict(type=LoadImage, backend_args=backend_args), - dict(type=GetBBoxCenterScale), - dict(type=RandomFlip, direction='horizontal'), - dict(type=RandomHalfBody), - dict(type=RandomBBoxTransform), - dict(type=TopdownAffine, input_size=codec['input_size'], use_udp=True), - dict(type=GenerateTarget, encoder=codec), - dict(type=PackPoseInputs) -] -val_pipeline = [ - dict(type=LoadImage, backend_args=backend_args), - dict(type=GetBBoxCenterScale), - dict(type=TopdownAffine, input_size=codec['input_size'], use_udp=True), - dict(type=PackPoseInputs) -] - -# data loaders -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type=DefaultSampler, shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type=DefaultSampler, shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type=CocoMetric, - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +# Copyright (c) OpenMMLab. All rights reserved. +from mmengine.config import read_base + +with read_base(): + from mmpose.configs._base_.default_runtime import * + +from mmengine.dataset import DefaultSampler +from mmengine.model import PretrainedInit +from mmengine.optim import LinearLR, MultiStepLR +from torch.optim import Adam + +from mmpose.codecs import UDPHeatmap +from mmpose.datasets import (CocoDataset, GenerateTarget, GetBBoxCenterScale, + LoadImage, PackPoseInputs, RandomFlip, + RandomHalfBody, TopdownAffine) +from mmpose.datasets.transforms.common_transforms import RandomBBoxTransform +from mmpose.evaluation import CocoMetric +from mmpose.models import (HeatmapHead, HRNet, KeypointMSELoss, + PoseDataPreprocessor, TopdownPoseEstimator) + +# runtime +train_cfg.update(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type=Adam, + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict(type=LinearLR, begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type=MultiStepLR, + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks.update(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type=UDPHeatmap, input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type=TopdownPoseEstimator, + data_preprocessor=dict( + type=PoseDataPreprocessor, + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type=HRNet, + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(48, 96)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(48, 96, 192)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(48, 96, 192, 384))), + init_cfg=dict( + type=PretrainedInit, + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w48-8ef0771d.pth'), + ), + head=dict( + type=HeatmapHead, + in_channels=48, + out_channels=17, + deconv_out_channels=None, + loss=dict(type=KeypointMSELoss, use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +dataset_type = CocoDataset +data_mode = 'topdown' +data_root = 'data/coco/' + +backend_args = dict(backend='local') + +# pipelines +train_pipeline = [ + dict(type=LoadImage, backend_args=backend_args), + dict(type=GetBBoxCenterScale), + dict(type=RandomFlip, direction='horizontal'), + dict(type=RandomHalfBody), + dict(type=RandomBBoxTransform), + dict(type=TopdownAffine, input_size=codec['input_size'], use_udp=True), + dict(type=GenerateTarget, encoder=codec), + dict(type=PackPoseInputs) +] +val_pipeline = [ + dict(type=LoadImage, backend_args=backend_args), + dict(type=GetBBoxCenterScale), + dict(type=TopdownAffine, input_size=codec['input_size'], use_udp=True), + dict(type=PackPoseInputs) +] + +# data loaders +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type=DefaultSampler, shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type=DefaultSampler, shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type=CocoMetric, + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/mmpose/datasets/__init__.py b/mmpose/datasets/__init__.py index b90a12db49..0c8b91752e 100644 --- a/mmpose/datasets/__init__.py +++ b/mmpose/datasets/__init__.py @@ -1,8 +1,8 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .builder import build_dataset -from .dataset_wrappers import CombinedDataset -from .datasets import * # noqa -from .samplers import MultiSourceSampler -from .transforms import * # noqa - -__all__ = ['build_dataset', 'CombinedDataset', 'MultiSourceSampler'] +# Copyright (c) OpenMMLab. All rights reserved. +from .builder import build_dataset +from .dataset_wrappers import CombinedDataset +from .datasets import * # noqa +from .samplers import MultiSourceSampler +from .transforms import * # noqa + +__all__ = ['build_dataset', 'CombinedDataset', 'MultiSourceSampler'] diff --git a/mmpose/datasets/builder.py b/mmpose/datasets/builder.py index 2e5a236ff4..eaaf888c5c 100644 --- a/mmpose/datasets/builder.py +++ b/mmpose/datasets/builder.py @@ -1,90 +1,90 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy -import platform -import random - -import numpy as np -import torch -from mmengine import build_from_cfg, is_seq_of -from mmengine.dataset import ConcatDataset, RepeatDataset - -from mmpose.registry import DATASETS - -if platform.system() != 'Windows': - # https://github.com/pytorch/pytorch/issues/973 - import resource - rlimit = resource.getrlimit(resource.RLIMIT_NOFILE) - base_soft_limit = rlimit[0] - hard_limit = rlimit[1] - soft_limit = min(max(4096, base_soft_limit), hard_limit) - resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit)) - - -def _concat_dataset(cfg, default_args=None): - types = cfg['type'] - ann_files = cfg['ann_file'] - img_prefixes = cfg.get('img_prefix', None) - dataset_infos = cfg.get('dataset_info', None) - - num_joints = cfg['data_cfg'].get('num_joints', None) - dataset_channel = cfg['data_cfg'].get('dataset_channel', None) - - datasets = [] - num_dset = len(ann_files) - for i in range(num_dset): - cfg_copy = copy.deepcopy(cfg) - cfg_copy['ann_file'] = ann_files[i] - - if isinstance(types, (list, tuple)): - cfg_copy['type'] = types[i] - if isinstance(img_prefixes, (list, tuple)): - cfg_copy['img_prefix'] = img_prefixes[i] - if isinstance(dataset_infos, (list, tuple)): - cfg_copy['dataset_info'] = dataset_infos[i] - - if isinstance(num_joints, (list, tuple)): - cfg_copy['data_cfg']['num_joints'] = num_joints[i] - - if is_seq_of(dataset_channel, list): - cfg_copy['data_cfg']['dataset_channel'] = dataset_channel[i] - - datasets.append(build_dataset(cfg_copy, default_args)) - - return ConcatDataset(datasets) - - -def build_dataset(cfg, default_args=None): - """Build a dataset from config dict. - - Args: - cfg (dict): Config dict. It should at least contain the key "type". - default_args (dict, optional): Default initialization arguments. - Default: None. - - Returns: - Dataset: The constructed dataset. - """ - - if isinstance(cfg, (list, tuple)): - dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg]) - elif cfg['type'] == 'ConcatDataset': - dataset = ConcatDataset( - [build_dataset(c, default_args) for c in cfg['datasets']]) - elif cfg['type'] == 'RepeatDataset': - dataset = RepeatDataset( - build_dataset(cfg['dataset'], default_args), cfg['times']) - elif isinstance(cfg.get('ann_file'), (list, tuple)): - dataset = _concat_dataset(cfg, default_args) - else: - dataset = build_from_cfg(cfg, DATASETS, default_args) - return dataset - - -def worker_init_fn(worker_id, num_workers, rank, seed): - """Init the random seed for various workers.""" - # The seed of each worker equals to - # num_worker * rank + worker_id + user_seed - worker_seed = num_workers * rank + worker_id + seed - np.random.seed(worker_seed) - random.seed(worker_seed) - torch.manual_seed(worker_seed) +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import platform +import random + +import numpy as np +import torch +from mmengine import build_from_cfg, is_seq_of +from mmengine.dataset import ConcatDataset, RepeatDataset + +from mmpose.registry import DATASETS + +if platform.system() != 'Windows': + # https://github.com/pytorch/pytorch/issues/973 + import resource + rlimit = resource.getrlimit(resource.RLIMIT_NOFILE) + base_soft_limit = rlimit[0] + hard_limit = rlimit[1] + soft_limit = min(max(4096, base_soft_limit), hard_limit) + resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit)) + + +def _concat_dataset(cfg, default_args=None): + types = cfg['type'] + ann_files = cfg['ann_file'] + img_prefixes = cfg.get('img_prefix', None) + dataset_infos = cfg.get('dataset_info', None) + + num_joints = cfg['data_cfg'].get('num_joints', None) + dataset_channel = cfg['data_cfg'].get('dataset_channel', None) + + datasets = [] + num_dset = len(ann_files) + for i in range(num_dset): + cfg_copy = copy.deepcopy(cfg) + cfg_copy['ann_file'] = ann_files[i] + + if isinstance(types, (list, tuple)): + cfg_copy['type'] = types[i] + if isinstance(img_prefixes, (list, tuple)): + cfg_copy['img_prefix'] = img_prefixes[i] + if isinstance(dataset_infos, (list, tuple)): + cfg_copy['dataset_info'] = dataset_infos[i] + + if isinstance(num_joints, (list, tuple)): + cfg_copy['data_cfg']['num_joints'] = num_joints[i] + + if is_seq_of(dataset_channel, list): + cfg_copy['data_cfg']['dataset_channel'] = dataset_channel[i] + + datasets.append(build_dataset(cfg_copy, default_args)) + + return ConcatDataset(datasets) + + +def build_dataset(cfg, default_args=None): + """Build a dataset from config dict. + + Args: + cfg (dict): Config dict. It should at least contain the key "type". + default_args (dict, optional): Default initialization arguments. + Default: None. + + Returns: + Dataset: The constructed dataset. + """ + + if isinstance(cfg, (list, tuple)): + dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg]) + elif cfg['type'] == 'ConcatDataset': + dataset = ConcatDataset( + [build_dataset(c, default_args) for c in cfg['datasets']]) + elif cfg['type'] == 'RepeatDataset': + dataset = RepeatDataset( + build_dataset(cfg['dataset'], default_args), cfg['times']) + elif isinstance(cfg.get('ann_file'), (list, tuple)): + dataset = _concat_dataset(cfg, default_args) + else: + dataset = build_from_cfg(cfg, DATASETS, default_args) + return dataset + + +def worker_init_fn(worker_id, num_workers, rank, seed): + """Init the random seed for various workers.""" + # The seed of each worker equals to + # num_worker * rank + worker_id + user_seed + worker_seed = num_workers * rank + worker_id + seed + np.random.seed(worker_seed) + random.seed(worker_seed) + torch.manual_seed(worker_seed) diff --git a/mmpose/datasets/dataset_wrappers.py b/mmpose/datasets/dataset_wrappers.py index 28eeac9945..7bf488d863 100644 --- a/mmpose/datasets/dataset_wrappers.py +++ b/mmpose/datasets/dataset_wrappers.py @@ -1,122 +1,122 @@ -# Copyright (c) OpenMMLab. All rights reserved. - -from copy import deepcopy -from typing import Any, Callable, List, Tuple, Union - -from mmengine.dataset import BaseDataset -from mmengine.registry import build_from_cfg - -from mmpose.registry import DATASETS -from .datasets.utils import parse_pose_metainfo - - -@DATASETS.register_module() -class CombinedDataset(BaseDataset): - """A wrapper of combined dataset. - - Args: - metainfo (dict): The meta information of combined dataset. - datasets (list): The configs of datasets to be combined. - pipeline (list, optional): Processing pipeline. Defaults to []. - """ - - def __init__(self, - metainfo: dict, - datasets: list, - pipeline: List[Union[dict, Callable]] = [], - **kwargs): - - self.datasets = [] - - for cfg in datasets: - dataset = build_from_cfg(cfg, DATASETS) - self.datasets.append(dataset) - - self._lens = [len(dataset) for dataset in self.datasets] - self._len = sum(self._lens) - - super(CombinedDataset, self).__init__(pipeline=pipeline, **kwargs) - self._metainfo = parse_pose_metainfo(metainfo) - - @property - def metainfo(self): - return deepcopy(self._metainfo) - - def __len__(self): - return self._len - - def _get_subset_index(self, index: int) -> Tuple[int, int]: - """Given a data sample's global index, return the index of the sub- - dataset the data sample belongs to, and the local index within that - sub-dataset. - - Args: - index (int): The global data sample index - - Returns: - tuple[int, int]: - - subset_index (int): The index of the sub-dataset - - local_index (int): The index of the data sample within - the sub-dataset - """ - if index >= len(self) or index < -len(self): - raise ValueError( - f'index({index}) is out of bounds for dataset with ' - f'length({len(self)}).') - - if index < 0: - index = index + len(self) - - subset_index = 0 - while index >= self._lens[subset_index]: - index -= self._lens[subset_index] - subset_index += 1 - return subset_index, index - - def prepare_data(self, idx: int) -> Any: - """Get data processed by ``self.pipeline``.The source dataset is - depending on the index. - - Args: - idx (int): The index of ``data_info``. - - Returns: - Any: Depends on ``self.pipeline``. - """ - - data_info = self.get_data_info(idx) - - return self.pipeline(data_info) - - def get_data_info(self, idx: int) -> dict: - """Get annotation by index. - - Args: - idx (int): Global index of ``CombinedDataset``. - Returns: - dict: The idx-th annotation of the datasets. - """ - subset_idx, sample_idx = self._get_subset_index(idx) - # Get data sample processed by ``subset.pipeline`` - data_info = self.datasets[subset_idx][sample_idx] - - # Add metainfo items that are required in the pipeline and the model - metainfo_keys = [ - 'upper_body_ids', 'lower_body_ids', 'flip_pairs', - 'dataset_keypoint_weights', 'flip_indices' - ] - - for key in metainfo_keys: - data_info[key] = deepcopy(self._metainfo[key]) - - return data_info - - def full_init(self): - """Fully initialize all sub datasets.""" - - if self._fully_initialized: - return - - for dataset in self.datasets: - dataset.full_init() - self._fully_initialized = True +# Copyright (c) OpenMMLab. All rights reserved. + +from copy import deepcopy +from typing import Any, Callable, List, Tuple, Union + +from mmengine.dataset import BaseDataset +from mmengine.registry import build_from_cfg + +from mmpose.registry import DATASETS +from .datasets.utils import parse_pose_metainfo + + +@DATASETS.register_module() +class CombinedDataset(BaseDataset): + """A wrapper of combined dataset. + + Args: + metainfo (dict): The meta information of combined dataset. + datasets (list): The configs of datasets to be combined. + pipeline (list, optional): Processing pipeline. Defaults to []. + """ + + def __init__(self, + metainfo: dict, + datasets: list, + pipeline: List[Union[dict, Callable]] = [], + **kwargs): + + self.datasets = [] + + for cfg in datasets: + dataset = build_from_cfg(cfg, DATASETS) + self.datasets.append(dataset) + + self._lens = [len(dataset) for dataset in self.datasets] + self._len = sum(self._lens) + + super(CombinedDataset, self).__init__(pipeline=pipeline, **kwargs) + self._metainfo = parse_pose_metainfo(metainfo) + + @property + def metainfo(self): + return deepcopy(self._metainfo) + + def __len__(self): + return self._len + + def _get_subset_index(self, index: int) -> Tuple[int, int]: + """Given a data sample's global index, return the index of the sub- + dataset the data sample belongs to, and the local index within that + sub-dataset. + + Args: + index (int): The global data sample index + + Returns: + tuple[int, int]: + - subset_index (int): The index of the sub-dataset + - local_index (int): The index of the data sample within + the sub-dataset + """ + if index >= len(self) or index < -len(self): + raise ValueError( + f'index({index}) is out of bounds for dataset with ' + f'length({len(self)}).') + + if index < 0: + index = index + len(self) + + subset_index = 0 + while index >= self._lens[subset_index]: + index -= self._lens[subset_index] + subset_index += 1 + return subset_index, index + + def prepare_data(self, idx: int) -> Any: + """Get data processed by ``self.pipeline``.The source dataset is + depending on the index. + + Args: + idx (int): The index of ``data_info``. + + Returns: + Any: Depends on ``self.pipeline``. + """ + + data_info = self.get_data_info(idx) + + return self.pipeline(data_info) + + def get_data_info(self, idx: int) -> dict: + """Get annotation by index. + + Args: + idx (int): Global index of ``CombinedDataset``. + Returns: + dict: The idx-th annotation of the datasets. + """ + subset_idx, sample_idx = self._get_subset_index(idx) + # Get data sample processed by ``subset.pipeline`` + data_info = self.datasets[subset_idx][sample_idx] + + # Add metainfo items that are required in the pipeline and the model + metainfo_keys = [ + 'upper_body_ids', 'lower_body_ids', 'flip_pairs', + 'dataset_keypoint_weights', 'flip_indices' + ] + + for key in metainfo_keys: + data_info[key] = deepcopy(self._metainfo[key]) + + return data_info + + def full_init(self): + """Fully initialize all sub datasets.""" + + if self._fully_initialized: + return + + for dataset in self.datasets: + dataset.full_init() + self._fully_initialized = True diff --git a/mmpose/datasets/datasets/__init__.py b/mmpose/datasets/datasets/__init__.py index 0050716d73..2137b4ca0f 100644 --- a/mmpose/datasets/datasets/__init__.py +++ b/mmpose/datasets/datasets/__init__.py @@ -1,11 +1,11 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .animal import * # noqa: F401, F403 -from .base import * # noqa: F401, F403 -from .body import * # noqa: F401, F403 -from .body3d import * # noqa: F401, F403 -from .face import * # noqa: F401, F403 -from .fashion import * # noqa: F401, F403 -from .hand import * # noqa: F401, F403 -from .wholebody import * # noqa: F401, F403 - -from .oct import * +# Copyright (c) OpenMMLab. All rights reserved. +from .animal import * # noqa: F401, F403 +from .base import * # noqa: F401, F403 +from .body import * # noqa: F401, F403 +from .body3d import * # noqa: F401, F403 +from .face import * # noqa: F401, F403 +from .fashion import * # noqa: F401, F403 +from .hand import * # noqa: F401, F403 +from .wholebody import * # noqa: F401, F403 + +from .oct import * diff --git a/mmpose/datasets/datasets/animal/__init__.py b/mmpose/datasets/datasets/animal/__init__.py index 669f08cddd..eb7d510ea6 100644 --- a/mmpose/datasets/datasets/animal/__init__.py +++ b/mmpose/datasets/datasets/animal/__init__.py @@ -1,16 +1,16 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .animalkingdom_dataset import AnimalKingdomDataset -from .animalpose_dataset import AnimalPoseDataset -from .ap10k_dataset import AP10KDataset -from .atrw_dataset import ATRWDataset -from .fly_dataset import FlyDataset -from .horse10_dataset import Horse10Dataset -from .locust_dataset import LocustDataset -from .macaque_dataset import MacaqueDataset -from .zebra_dataset import ZebraDataset - -__all__ = [ - 'AnimalPoseDataset', 'AP10KDataset', 'Horse10Dataset', 'MacaqueDataset', - 'FlyDataset', 'LocustDataset', 'ZebraDataset', 'ATRWDataset', - 'AnimalKingdomDataset' -] +# Copyright (c) OpenMMLab. All rights reserved. +from .animalkingdom_dataset import AnimalKingdomDataset +from .animalpose_dataset import AnimalPoseDataset +from .ap10k_dataset import AP10KDataset +from .atrw_dataset import ATRWDataset +from .fly_dataset import FlyDataset +from .horse10_dataset import Horse10Dataset +from .locust_dataset import LocustDataset +from .macaque_dataset import MacaqueDataset +from .zebra_dataset import ZebraDataset + +__all__ = [ + 'AnimalPoseDataset', 'AP10KDataset', 'Horse10Dataset', 'MacaqueDataset', + 'FlyDataset', 'LocustDataset', 'ZebraDataset', 'ATRWDataset', + 'AnimalKingdomDataset' +] diff --git a/mmpose/datasets/datasets/animal/animalkingdom_dataset.py b/mmpose/datasets/datasets/animal/animalkingdom_dataset.py index 35ccb8b67a..f38e90f040 100644 --- a/mmpose/datasets/datasets/animal/animalkingdom_dataset.py +++ b/mmpose/datasets/datasets/animal/animalkingdom_dataset.py @@ -1,86 +1,86 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from mmpose.registry import DATASETS -from ..base import BaseCocoStyleDataset - - -@DATASETS.register_module() -class AnimalKingdomDataset(BaseCocoStyleDataset): - """Animal Kingdom dataset for animal pose estimation. - - "[CVPR2022] Animal Kingdom: - A Large and Diverse Dataset for Animal Behavior Understanding" - More details can be found in the `paper - `__ . - - Website: - - The dataset loads raw features and apply specified transforms - to return a dict containing the image tensors and other information. - - Animal Kingdom keypoint indexes:: - - 0: 'Head_Mid_Top', - 1: 'Eye_Left', - 2: 'Eye_Right', - 3: 'Mouth_Front_Top', - 4: 'Mouth_Back_Left', - 5: 'Mouth_Back_Right', - 6: 'Mouth_Front_Bottom', - 7: 'Shoulder_Left', - 8: 'Shoulder_Right', - 9: 'Elbow_Left', - 10: 'Elbow_Right', - 11: 'Wrist_Left', - 12: 'Wrist_Right', - 13: 'Torso_Mid_Back', - 14: 'Hip_Left', - 15: 'Hip_Right', - 16: 'Knee_Left', - 17: 'Knee_Right', - 18: 'Ankle_Left ', - 19: 'Ankle_Right', - 20: 'Tail_Top_Back', - 21: 'Tail_Mid_Back', - 22: 'Tail_End_Back - - Args: - ann_file (str): Annotation file path. Default: ''. - bbox_file (str, optional): Detection result file path. If - ``bbox_file`` is set, detected bboxes loaded from this file will - be used instead of ground-truth bboxes. This setting is only for - evaluation, i.e., ignored when ``test_mode`` is ``False``. - Default: ``None``. - data_mode (str): Specifies the mode of data samples: ``'topdown'`` or - ``'bottomup'``. In ``'topdown'`` mode, each data sample contains - one instance; while in ``'bottomup'`` mode, each data sample - contains all instances in a image. Default: ``'topdown'`` - metainfo (dict, optional): Meta information for dataset, such as class - information. Default: ``None``. - data_root (str, optional): The root directory for ``data_prefix`` and - ``ann_file``. Default: ``None``. - data_prefix (dict, optional): Prefix for training data. Default: - ``dict(img=None, ann=None)``. - filter_cfg (dict, optional): Config for filter data. Default: `None`. - indices (int or Sequence[int], optional): Support using first few - data in annotation file to facilitate training/testing on a smaller - dataset. Default: ``None`` which means using all ``data_infos``. - serialize_data (bool, optional): Whether to hold memory using - serialized objects, when enabled, data loader workers can use - shared RAM from master process instead of making a copy. - Default: ``True``. - pipeline (list, optional): Processing pipeline. Default: []. - test_mode (bool, optional): ``test_mode=True`` means in test phase. - Default: ``False``. - lazy_init (bool, optional): Whether to load annotation during - instantiation. In some cases, such as visualization, only the meta - information of the dataset is needed, which is not necessary to - load annotation file. ``Basedataset`` can skip load annotations to - save time by set ``lazy_init=False``. Default: ``False``. - max_refetch (int, optional): If ``Basedataset.prepare_data`` get a - None img. The maximum extra number of cycles to get a valid - image. Default: 1000. - """ - - METAINFO: dict = dict(from_file='configs/_base_/datasets/ak.py') +# Copyright (c) OpenMMLab. All rights reserved. +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class AnimalKingdomDataset(BaseCocoStyleDataset): + """Animal Kingdom dataset for animal pose estimation. + + "[CVPR2022] Animal Kingdom: + A Large and Diverse Dataset for Animal Behavior Understanding" + More details can be found in the `paper + `__ . + + Website: + + The dataset loads raw features and apply specified transforms + to return a dict containing the image tensors and other information. + + Animal Kingdom keypoint indexes:: + + 0: 'Head_Mid_Top', + 1: 'Eye_Left', + 2: 'Eye_Right', + 3: 'Mouth_Front_Top', + 4: 'Mouth_Back_Left', + 5: 'Mouth_Back_Right', + 6: 'Mouth_Front_Bottom', + 7: 'Shoulder_Left', + 8: 'Shoulder_Right', + 9: 'Elbow_Left', + 10: 'Elbow_Right', + 11: 'Wrist_Left', + 12: 'Wrist_Right', + 13: 'Torso_Mid_Back', + 14: 'Hip_Left', + 15: 'Hip_Right', + 16: 'Knee_Left', + 17: 'Knee_Right', + 18: 'Ankle_Left ', + 19: 'Ankle_Right', + 20: 'Tail_Top_Back', + 21: 'Tail_Mid_Back', + 22: 'Tail_End_Back + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/ak.py') diff --git a/mmpose/datasets/datasets/animal/animalpose_dataset.py b/mmpose/datasets/datasets/animal/animalpose_dataset.py index 0279cf9de0..0293afe50c 100644 --- a/mmpose/datasets/datasets/animal/animalpose_dataset.py +++ b/mmpose/datasets/datasets/animal/animalpose_dataset.py @@ -1,75 +1,75 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from mmpose.registry import DATASETS -from ..base import BaseCocoStyleDataset - - -@DATASETS.register_module() -class AnimalPoseDataset(BaseCocoStyleDataset): - """Animal-Pose dataset for animal pose estimation. - - "Cross-domain Adaptation For Animal Pose Estimation" ICCV'2019 - More details can be found in the `paper - `__ . - - Animal-Pose keypoints:: - - 0: 'L_Eye', - 1: 'R_Eye', - 2: 'L_EarBase', - 3: 'R_EarBase', - 4: 'Nose', - 5: 'Throat', - 6: 'TailBase', - 7: 'Withers', - 8: 'L_F_Elbow', - 9: 'R_F_Elbow', - 10: 'L_B_Elbow', - 11: 'R_B_Elbow', - 12: 'L_F_Knee', - 13: 'R_F_Knee', - 14: 'L_B_Knee', - 15: 'R_B_Knee', - 16: 'L_F_Paw', - 17: 'R_F_Paw', - 18: 'L_B_Paw', - 19: 'R_B_Paw' - - Args: - ann_file (str): Annotation file path. Default: ''. - bbox_file (str, optional): Detection result file path. If - ``bbox_file`` is set, detected bboxes loaded from this file will - be used instead of ground-truth bboxes. This setting is only for - evaluation, i.e., ignored when ``test_mode`` is ``False``. - Default: ``None``. - data_mode (str): Specifies the mode of data samples: ``'topdown'`` or - ``'bottomup'``. In ``'topdown'`` mode, each data sample contains - one instance; while in ``'bottomup'`` mode, each data sample - contains all instances in a image. Default: ``'topdown'`` - metainfo (dict, optional): Meta information for dataset, such as class - information. Default: ``None``. - data_root (str, optional): The root directory for ``data_prefix`` and - ``ann_file``. Default: ``None``. - data_prefix (dict, optional): Prefix for training data. Default: - ``dict(img=None, ann=None)``. - filter_cfg (dict, optional): Config for filter data. Default: `None`. - indices (int or Sequence[int], optional): Support using first few - data in annotation file to facilitate training/testing on a smaller - dataset. Default: ``None`` which means using all ``data_infos``. - serialize_data (bool, optional): Whether to hold memory using - serialized objects, when enabled, data loader workers can use - shared RAM from master process instead of making a copy. - Default: ``True``. - pipeline (list, optional): Processing pipeline. Default: []. - test_mode (bool, optional): ``test_mode=True`` means in test phase. - Default: ``False``. - lazy_init (bool, optional): Whether to load annotation during - instantiation. In some cases, such as visualization, only the meta - information of the dataset is needed, which is not necessary to - load annotation file. ``Basedataset`` can skip load annotations to - save time by set ``lazy_init=False``. Default: ``False``. - max_refetch (int, optional): If ``Basedataset.prepare_data`` get a - None img. The maximum extra number of cycles to get a valid - image. Default: 1000. - """ - - METAINFO: dict = dict(from_file='configs/_base_/datasets/animalpose.py') +# Copyright (c) OpenMMLab. All rights reserved. +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class AnimalPoseDataset(BaseCocoStyleDataset): + """Animal-Pose dataset for animal pose estimation. + + "Cross-domain Adaptation For Animal Pose Estimation" ICCV'2019 + More details can be found in the `paper + `__ . + + Animal-Pose keypoints:: + + 0: 'L_Eye', + 1: 'R_Eye', + 2: 'L_EarBase', + 3: 'R_EarBase', + 4: 'Nose', + 5: 'Throat', + 6: 'TailBase', + 7: 'Withers', + 8: 'L_F_Elbow', + 9: 'R_F_Elbow', + 10: 'L_B_Elbow', + 11: 'R_B_Elbow', + 12: 'L_F_Knee', + 13: 'R_F_Knee', + 14: 'L_B_Knee', + 15: 'R_B_Knee', + 16: 'L_F_Paw', + 17: 'R_F_Paw', + 18: 'L_B_Paw', + 19: 'R_B_Paw' + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/animalpose.py') diff --git a/mmpose/datasets/datasets/animal/ap10k_dataset.py b/mmpose/datasets/datasets/animal/ap10k_dataset.py index de1efbc67f..f844379a0d 100644 --- a/mmpose/datasets/datasets/animal/ap10k_dataset.py +++ b/mmpose/datasets/datasets/animal/ap10k_dataset.py @@ -1,73 +1,73 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from mmpose.registry import DATASETS -from ..base import BaseCocoStyleDataset - - -@DATASETS.register_module() -class AP10KDataset(BaseCocoStyleDataset): - """AP-10K dataset for animal pose estimation. - - "AP-10K: A Benchmark for Animal Pose Estimation in the Wild" - Neurips Dataset Track'2021. - More details can be found in the `paper - `__ . - - AP-10K keypoints:: - - 0: 'L_Eye', - 1: 'R_Eye', - 2: 'Nose', - 3: 'Neck', - 4: 'root of tail', - 5: 'L_Shoulder', - 6: 'L_Elbow', - 7: 'L_F_Paw', - 8: 'R_Shoulder', - 9: 'R_Elbow', - 10: 'R_F_Paw, - 11: 'L_Hip', - 12: 'L_Knee', - 13: 'L_B_Paw', - 14: 'R_Hip', - 15: 'R_Knee', - 16: 'R_B_Paw' - - Args: - ann_file (str): Annotation file path. Default: ''. - bbox_file (str, optional): Detection result file path. If - ``bbox_file`` is set, detected bboxes loaded from this file will - be used instead of ground-truth bboxes. This setting is only for - evaluation, i.e., ignored when ``test_mode`` is ``False``. - Default: ``None``. - data_mode (str): Specifies the mode of data samples: ``'topdown'`` or - ``'bottomup'``. In ``'topdown'`` mode, each data sample contains - one instance; while in ``'bottomup'`` mode, each data sample - contains all instances in a image. Default: ``'topdown'`` - metainfo (dict, optional): Meta information for dataset, such as class - information. Default: ``None``. - data_root (str, optional): The root directory for ``data_prefix`` and - ``ann_file``. Default: ``None``. - data_prefix (dict, optional): Prefix for training data. Default: - ``dict(img=None, ann=None)``. - filter_cfg (dict, optional): Config for filter data. Default: `None`. - indices (int or Sequence[int], optional): Support using first few - data in annotation file to facilitate training/testing on a smaller - dataset. Default: ``None`` which means using all ``data_infos``. - serialize_data (bool, optional): Whether to hold memory using - serialized objects, when enabled, data loader workers can use - shared RAM from master process instead of making a copy. - Default: ``True``. - pipeline (list, optional): Processing pipeline. Default: []. - test_mode (bool, optional): ``test_mode=True`` means in test phase. - Default: ``False``. - lazy_init (bool, optional): Whether to load annotation during - instantiation. In some cases, such as visualization, only the meta - information of the dataset is needed, which is not necessary to - load annotation file. ``Basedataset`` can skip load annotations to - save time by set ``lazy_init=False``. Default: ``False``. - max_refetch (int, optional): If ``Basedataset.prepare_data`` get a - None img. The maximum extra number of cycles to get a valid - image. Default: 1000. - """ - - METAINFO: dict = dict(from_file='configs/_base_/datasets/ap10k.py') +# Copyright (c) OpenMMLab. All rights reserved. +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class AP10KDataset(BaseCocoStyleDataset): + """AP-10K dataset for animal pose estimation. + + "AP-10K: A Benchmark for Animal Pose Estimation in the Wild" + Neurips Dataset Track'2021. + More details can be found in the `paper + `__ . + + AP-10K keypoints:: + + 0: 'L_Eye', + 1: 'R_Eye', + 2: 'Nose', + 3: 'Neck', + 4: 'root of tail', + 5: 'L_Shoulder', + 6: 'L_Elbow', + 7: 'L_F_Paw', + 8: 'R_Shoulder', + 9: 'R_Elbow', + 10: 'R_F_Paw, + 11: 'L_Hip', + 12: 'L_Knee', + 13: 'L_B_Paw', + 14: 'R_Hip', + 15: 'R_Knee', + 16: 'R_B_Paw' + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/ap10k.py') diff --git a/mmpose/datasets/datasets/animal/atrw_dataset.py b/mmpose/datasets/datasets/animal/atrw_dataset.py index de5b1a09a0..2669c8dd03 100644 --- a/mmpose/datasets/datasets/animal/atrw_dataset.py +++ b/mmpose/datasets/datasets/animal/atrw_dataset.py @@ -1,71 +1,71 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from mmpose.registry import DATASETS -from ..base import BaseCocoStyleDataset - - -@DATASETS.register_module() -class ATRWDataset(BaseCocoStyleDataset): - """ATRW dataset for animal pose estimation. - - "ATRW: A Benchmark for Amur Tiger Re-identification in the Wild" - ACM MM'2020. - More details can be found in the `paper - `__ . - - ATRW keypoints:: - - 0: "left_ear", - 1: "right_ear", - 2: "nose", - 3: "right_shoulder", - 4: "right_front_paw", - 5: "left_shoulder", - 6: "left_front_paw", - 7: "right_hip", - 8: "right_knee", - 9: "right_back_paw", - 10: "left_hip", - 11: "left_knee", - 12: "left_back_paw", - 13: "tail", - 14: "center" - - Args: - ann_file (str): Annotation file path. Default: ''. - bbox_file (str, optional): Detection result file path. If - ``bbox_file`` is set, detected bboxes loaded from this file will - be used instead of ground-truth bboxes. This setting is only for - evaluation, i.e., ignored when ``test_mode`` is ``False``. - Default: ``None``. - data_mode (str): Specifies the mode of data samples: ``'topdown'`` or - ``'bottomup'``. In ``'topdown'`` mode, each data sample contains - one instance; while in ``'bottomup'`` mode, each data sample - contains all instances in a image. Default: ``'topdown'`` - metainfo (dict, optional): Meta information for dataset, such as class - information. Default: ``None``. - data_root (str, optional): The root directory for ``data_prefix`` and - ``ann_file``. Default: ``None``. - data_prefix (dict, optional): Prefix for training data. Default: - ``dict(img=None, ann=None)``. - filter_cfg (dict, optional): Config for filter data. Default: `None`. - indices (int or Sequence[int], optional): Support using first few - data in annotation file to facilitate training/testing on a smaller - dataset. Default: ``None`` which means using all ``data_infos``. - serialize_data (bool, optional): Whether to hold memory using - serialized objects, when enabled, data loader workers can use - shared RAM from master process instead of making a copy. - Default: ``True``. - pipeline (list, optional): Processing pipeline. Default: []. - test_mode (bool, optional): ``test_mode=True`` means in test phase. - Default: ``False``. - lazy_init (bool, optional): Whether to load annotation during - instantiation. In some cases, such as visualization, only the meta - information of the dataset is needed, which is not necessary to - load annotation file. ``Basedataset`` can skip load annotations to - save time by set ``lazy_init=False``. Default: ``False``. - max_refetch (int, optional): If ``Basedataset.prepare_data`` get a - None img. The maximum extra number of cycles to get a valid - image. Default: 1000. - """ - - METAINFO: dict = dict(from_file='configs/_base_/datasets/atrw.py') +# Copyright (c) OpenMMLab. All rights reserved. +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class ATRWDataset(BaseCocoStyleDataset): + """ATRW dataset for animal pose estimation. + + "ATRW: A Benchmark for Amur Tiger Re-identification in the Wild" + ACM MM'2020. + More details can be found in the `paper + `__ . + + ATRW keypoints:: + + 0: "left_ear", + 1: "right_ear", + 2: "nose", + 3: "right_shoulder", + 4: "right_front_paw", + 5: "left_shoulder", + 6: "left_front_paw", + 7: "right_hip", + 8: "right_knee", + 9: "right_back_paw", + 10: "left_hip", + 11: "left_knee", + 12: "left_back_paw", + 13: "tail", + 14: "center" + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/atrw.py') diff --git a/mmpose/datasets/datasets/animal/fly_dataset.py b/mmpose/datasets/datasets/animal/fly_dataset.py index b614d9b9f7..a1605e61f8 100644 --- a/mmpose/datasets/datasets/animal/fly_dataset.py +++ b/mmpose/datasets/datasets/animal/fly_dataset.py @@ -1,88 +1,88 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from mmpose.registry import DATASETS -from ..base import BaseCocoStyleDataset - - -@DATASETS.register_module() -class FlyDataset(BaseCocoStyleDataset): - """FlyDataset for animal pose estimation. - - "Fast animal pose estimation using deep neural networks" - Nature methods'2019. More details can be found in the `paper - `__ . - - Vinegar Fly keypoints:: - - 0: "head", - 1: "eyeL", - 2: "eyeR", - 3: "neck", - 4: "thorax", - 5: "abdomen", - 6: "forelegR1", - 7: "forelegR2", - 8: "forelegR3", - 9: "forelegR4", - 10: "midlegR1", - 11: "midlegR2", - 12: "midlegR3", - 13: "midlegR4", - 14: "hindlegR1", - 15: "hindlegR2", - 16: "hindlegR3", - 17: "hindlegR4", - 18: "forelegL1", - 19: "forelegL2", - 20: "forelegL3", - 21: "forelegL4", - 22: "midlegL1", - 23: "midlegL2", - 24: "midlegL3", - 25: "midlegL4", - 26: "hindlegL1", - 27: "hindlegL2", - 28: "hindlegL3", - 29: "hindlegL4", - 30: "wingL", - 31: "wingR" - - Args: - ann_file (str): Annotation file path. Default: ''. - bbox_file (str, optional): Detection result file path. If - ``bbox_file`` is set, detected bboxes loaded from this file will - be used instead of ground-truth bboxes. This setting is only for - evaluation, i.e., ignored when ``test_mode`` is ``False``. - Default: ``None``. - data_mode (str): Specifies the mode of data samples: ``'topdown'`` or - ``'bottomup'``. In ``'topdown'`` mode, each data sample contains - one instance; while in ``'bottomup'`` mode, each data sample - contains all instances in a image. Default: ``'topdown'`` - metainfo (dict, optional): Meta information for dataset, such as class - information. Default: ``None``. - data_root (str, optional): The root directory for ``data_prefix`` and - ``ann_file``. Default: ``None``. - data_prefix (dict, optional): Prefix for training data. Default: - ``dict(img=None, ann=None)``. - filter_cfg (dict, optional): Config for filter data. Default: `None`. - indices (int or Sequence[int], optional): Support using first few - data in annotation file to facilitate training/testing on a smaller - dataset. Default: ``None`` which means using all ``data_infos``. - serialize_data (bool, optional): Whether to hold memory using - serialized objects, when enabled, data loader workers can use - shared RAM from master process instead of making a copy. - Default: ``True``. - pipeline (list, optional): Processing pipeline. Default: []. - test_mode (bool, optional): ``test_mode=True`` means in test phase. - Default: ``False``. - lazy_init (bool, optional): Whether to load annotation during - instantiation. In some cases, such as visualization, only the meta - information of the dataset is needed, which is not necessary to - load annotation file. ``Basedataset`` can skip load annotations to - save time by set ``lazy_init=False``. Default: ``False``. - max_refetch (int, optional): If ``Basedataset.prepare_data`` get a - None img. The maximum extra number of cycles to get a valid - image. Default: 1000. - """ - - METAINFO: dict = dict(from_file='configs/_base_/datasets/fly.py') +# Copyright (c) OpenMMLab. All rights reserved. +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class FlyDataset(BaseCocoStyleDataset): + """FlyDataset for animal pose estimation. + + "Fast animal pose estimation using deep neural networks" + Nature methods'2019. More details can be found in the `paper + `__ . + + Vinegar Fly keypoints:: + + 0: "head", + 1: "eyeL", + 2: "eyeR", + 3: "neck", + 4: "thorax", + 5: "abdomen", + 6: "forelegR1", + 7: "forelegR2", + 8: "forelegR3", + 9: "forelegR4", + 10: "midlegR1", + 11: "midlegR2", + 12: "midlegR3", + 13: "midlegR4", + 14: "hindlegR1", + 15: "hindlegR2", + 16: "hindlegR3", + 17: "hindlegR4", + 18: "forelegL1", + 19: "forelegL2", + 20: "forelegL3", + 21: "forelegL4", + 22: "midlegL1", + 23: "midlegL2", + 24: "midlegL3", + 25: "midlegL4", + 26: "hindlegL1", + 27: "hindlegL2", + 28: "hindlegL3", + 29: "hindlegL4", + 30: "wingL", + 31: "wingR" + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/fly.py') diff --git a/mmpose/datasets/datasets/animal/horse10_dataset.py b/mmpose/datasets/datasets/animal/horse10_dataset.py index 0c25dba6a7..47f91624aa 100644 --- a/mmpose/datasets/datasets/animal/horse10_dataset.py +++ b/mmpose/datasets/datasets/animal/horse10_dataset.py @@ -1,77 +1,77 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from mmpose.registry import DATASETS -from ..base import BaseCocoStyleDataset - - -@DATASETS.register_module() -class Horse10Dataset(BaseCocoStyleDataset): - """Horse10Dataset for animal pose estimation. - - "Pretraining boosts out-of-domain robustness for pose estimation" - WACV'2021. More details can be found in the `paper - `__ . - - Horse-10 keypoints:: - - 0: 'Nose', - 1: 'Eye', - 2: 'Nearknee', - 3: 'Nearfrontfetlock', - 4: 'Nearfrontfoot', - 5: 'Offknee', - 6: 'Offfrontfetlock', - 7: 'Offfrontfoot', - 8: 'Shoulder', - 9: 'Midshoulder', - 10: 'Elbow', - 11: 'Girth', - 12: 'Wither', - 13: 'Nearhindhock', - 14: 'Nearhindfetlock', - 15: 'Nearhindfoot', - 16: 'Hip', - 17: 'Stifle', - 18: 'Offhindhock', - 19: 'Offhindfetlock', - 20: 'Offhindfoot', - 21: 'Ischium' - - Args: - ann_file (str): Annotation file path. Default: ''. - bbox_file (str, optional): Detection result file path. If - ``bbox_file`` is set, detected bboxes loaded from this file will - be used instead of ground-truth bboxes. This setting is only for - evaluation, i.e., ignored when ``test_mode`` is ``False``. - Default: ``None``. - data_mode (str): Specifies the mode of data samples: ``'topdown'`` or - ``'bottomup'``. In ``'topdown'`` mode, each data sample contains - one instance; while in ``'bottomup'`` mode, each data sample - contains all instances in a image. Default: ``'topdown'`` - metainfo (dict, optional): Meta information for dataset, such as class - information. Default: ``None``. - data_root (str, optional): The root directory for ``data_prefix`` and - ``ann_file``. Default: ``None``. - data_prefix (dict, optional): Prefix for training data. Default: - ``dict(img=None, ann=None)``. - filter_cfg (dict, optional): Config for filter data. Default: `None`. - indices (int or Sequence[int], optional): Support using first few - data in annotation file to facilitate training/testing on a smaller - dataset. Default: ``None`` which means using all ``data_infos``. - serialize_data (bool, optional): Whether to hold memory using - serialized objects, when enabled, data loader workers can use - shared RAM from master process instead of making a copy. - Default: ``True``. - pipeline (list, optional): Processing pipeline. Default: []. - test_mode (bool, optional): ``test_mode=True`` means in test phase. - Default: ``False``. - lazy_init (bool, optional): Whether to load annotation during - instantiation. In some cases, such as visualization, only the meta - information of the dataset is needed, which is not necessary to - load annotation file. ``Basedataset`` can skip load annotations to - save time by set ``lazy_init=False``. Default: ``False``. - max_refetch (int, optional): If ``Basedataset.prepare_data`` get a - None img. The maximum extra number of cycles to get a valid - image. Default: 1000. - """ - - METAINFO: dict = dict(from_file='configs/_base_/datasets/horse10.py') +# Copyright (c) OpenMMLab. All rights reserved. +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class Horse10Dataset(BaseCocoStyleDataset): + """Horse10Dataset for animal pose estimation. + + "Pretraining boosts out-of-domain robustness for pose estimation" + WACV'2021. More details can be found in the `paper + `__ . + + Horse-10 keypoints:: + + 0: 'Nose', + 1: 'Eye', + 2: 'Nearknee', + 3: 'Nearfrontfetlock', + 4: 'Nearfrontfoot', + 5: 'Offknee', + 6: 'Offfrontfetlock', + 7: 'Offfrontfoot', + 8: 'Shoulder', + 9: 'Midshoulder', + 10: 'Elbow', + 11: 'Girth', + 12: 'Wither', + 13: 'Nearhindhock', + 14: 'Nearhindfetlock', + 15: 'Nearhindfoot', + 16: 'Hip', + 17: 'Stifle', + 18: 'Offhindhock', + 19: 'Offhindfetlock', + 20: 'Offhindfoot', + 21: 'Ischium' + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/horse10.py') diff --git a/mmpose/datasets/datasets/animal/locust_dataset.py b/mmpose/datasets/datasets/animal/locust_dataset.py index 3ada76034d..2957b64019 100644 --- a/mmpose/datasets/datasets/animal/locust_dataset.py +++ b/mmpose/datasets/datasets/animal/locust_dataset.py @@ -1,140 +1,140 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os.path as osp -from typing import Optional - -import numpy as np - -from mmpose.registry import DATASETS -from ..base import BaseCocoStyleDataset - - -@DATASETS.register_module() -class LocustDataset(BaseCocoStyleDataset): - """LocustDataset for animal pose estimation. - - "DeepPoseKit, a software toolkit for fast and robust animal - pose estimation using deep learning" Elife'2019. - More details can be found in the `paper - `__ . - - Desert Locust keypoints:: - - 0: "head", - 1: "neck", - 2: "thorax", - 3: "abdomen1", - 4: "abdomen2", - 5: "anttipL", - 6: "antbaseL", - 7: "eyeL", - 8: "forelegL1", - 9: "forelegL2", - 10: "forelegL3", - 11: "forelegL4", - 12: "midlegL1", - 13: "midlegL2", - 14: "midlegL3", - 15: "midlegL4", - 16: "hindlegL1", - 17: "hindlegL2", - 18: "hindlegL3", - 19: "hindlegL4", - 20: "anttipR", - 21: "antbaseR", - 22: "eyeR", - 23: "forelegR1", - 24: "forelegR2", - 25: "forelegR3", - 26: "forelegR4", - 27: "midlegR1", - 28: "midlegR2", - 29: "midlegR3", - 30: "midlegR4", - 31: "hindlegR1", - 32: "hindlegR2", - 33: "hindlegR3", - 34: "hindlegR4" - - Args: - ann_file (str): Annotation file path. Default: ''. - bbox_file (str, optional): Detection result file path. If - ``bbox_file`` is set, detected bboxes loaded from this file will - be used instead of ground-truth bboxes. This setting is only for - evaluation, i.e., ignored when ``test_mode`` is ``False``. - Default: ``None``. - data_mode (str): Specifies the mode of data samples: ``'topdown'`` or - ``'bottomup'``. In ``'topdown'`` mode, each data sample contains - one instance; while in ``'bottomup'`` mode, each data sample - contains all instances in a image. Default: ``'topdown'`` - metainfo (dict, optional): Meta information for dataset, such as class - information. Default: ``None``. - data_root (str, optional): The root directory for ``data_prefix`` and - ``ann_file``. Default: ``None``. - data_prefix (dict, optional): Prefix for training data. Default: - ``dict(img=None, ann=None)``. - filter_cfg (dict, optional): Config for filter data. Default: `None`. - indices (int or Sequence[int], optional): Support using first few - data in annotation file to facilitate training/testing on a smaller - dataset. Default: ``None`` which means using all ``data_infos``. - serialize_data (bool, optional): Whether to hold memory using - serialized objects, when enabled, data loader workers can use - shared RAM from master process instead of making a copy. - Default: ``True``. - pipeline (list, optional): Processing pipeline. Default: []. - test_mode (bool, optional): ``test_mode=True`` means in test phase. - Default: ``False``. - lazy_init (bool, optional): Whether to load annotation during - instantiation. In some cases, such as visualization, only the meta - information of the dataset is needed, which is not necessary to - load annotation file. ``Basedataset`` can skip load annotations to - save time by set ``lazy_init=False``. Default: ``False``. - max_refetch (int, optional): If ``Basedataset.prepare_data`` get a - None img. The maximum extra number of cycles to get a valid - image. Default: 1000. - """ - - METAINFO: dict = dict(from_file='configs/_base_/datasets/locust.py') - - def parse_data_info(self, raw_data_info: dict) -> Optional[dict]: - """Parse raw Locust annotation of an instance. - - Args: - raw_data_info (dict): Raw data information loaded from - ``ann_file``. It should have following contents: - - - ``'raw_ann_info'``: Raw annotation of an instance - - ``'raw_img_info'``: Raw information of the image that - contains the instance - - Returns: - dict: Parsed instance annotation - """ - - ann = raw_data_info['raw_ann_info'] - img = raw_data_info['raw_img_info'] - - img_path = osp.join(self.data_prefix['img'], img['file_name']) - - # get bbox in shape [1, 4], formatted as xywh - # use the entire image which is 160x160 - bbox = np.array([0, 0, 160, 160], dtype=np.float32).reshape(1, 4) - - # keypoints in shape [1, K, 2] and keypoints_visible in [1, K] - _keypoints = np.array( - ann['keypoints'], dtype=np.float32).reshape(1, -1, 3) - keypoints = _keypoints[..., :2] - keypoints_visible = np.minimum(1, _keypoints[..., 2]) - - data_info = { - 'img_id': ann['image_id'], - 'img_path': img_path, - 'bbox': bbox, - 'bbox_score': np.ones(1, dtype=np.float32), - 'num_keypoints': ann['num_keypoints'], - 'keypoints': keypoints, - 'keypoints_visible': keypoints_visible, - 'iscrowd': ann['iscrowd'], - 'id': ann['id'], - } - - return data_info +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +from typing import Optional + +import numpy as np + +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class LocustDataset(BaseCocoStyleDataset): + """LocustDataset for animal pose estimation. + + "DeepPoseKit, a software toolkit for fast and robust animal + pose estimation using deep learning" Elife'2019. + More details can be found in the `paper + `__ . + + Desert Locust keypoints:: + + 0: "head", + 1: "neck", + 2: "thorax", + 3: "abdomen1", + 4: "abdomen2", + 5: "anttipL", + 6: "antbaseL", + 7: "eyeL", + 8: "forelegL1", + 9: "forelegL2", + 10: "forelegL3", + 11: "forelegL4", + 12: "midlegL1", + 13: "midlegL2", + 14: "midlegL3", + 15: "midlegL4", + 16: "hindlegL1", + 17: "hindlegL2", + 18: "hindlegL3", + 19: "hindlegL4", + 20: "anttipR", + 21: "antbaseR", + 22: "eyeR", + 23: "forelegR1", + 24: "forelegR2", + 25: "forelegR3", + 26: "forelegR4", + 27: "midlegR1", + 28: "midlegR2", + 29: "midlegR3", + 30: "midlegR4", + 31: "hindlegR1", + 32: "hindlegR2", + 33: "hindlegR3", + 34: "hindlegR4" + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/locust.py') + + def parse_data_info(self, raw_data_info: dict) -> Optional[dict]: + """Parse raw Locust annotation of an instance. + + Args: + raw_data_info (dict): Raw data information loaded from + ``ann_file``. It should have following contents: + + - ``'raw_ann_info'``: Raw annotation of an instance + - ``'raw_img_info'``: Raw information of the image that + contains the instance + + Returns: + dict: Parsed instance annotation + """ + + ann = raw_data_info['raw_ann_info'] + img = raw_data_info['raw_img_info'] + + img_path = osp.join(self.data_prefix['img'], img['file_name']) + + # get bbox in shape [1, 4], formatted as xywh + # use the entire image which is 160x160 + bbox = np.array([0, 0, 160, 160], dtype=np.float32).reshape(1, 4) + + # keypoints in shape [1, K, 2] and keypoints_visible in [1, K] + _keypoints = np.array( + ann['keypoints'], dtype=np.float32).reshape(1, -1, 3) + keypoints = _keypoints[..., :2] + keypoints_visible = np.minimum(1, _keypoints[..., 2]) + + data_info = { + 'img_id': ann['image_id'], + 'img_path': img_path, + 'bbox': bbox, + 'bbox_score': np.ones(1, dtype=np.float32), + 'num_keypoints': ann['num_keypoints'], + 'keypoints': keypoints, + 'keypoints_visible': keypoints_visible, + 'iscrowd': ann['iscrowd'], + 'id': ann['id'], + } + + return data_info diff --git a/mmpose/datasets/datasets/animal/macaque_dataset.py b/mmpose/datasets/datasets/animal/macaque_dataset.py index 08da981a1a..4947327c93 100644 --- a/mmpose/datasets/datasets/animal/macaque_dataset.py +++ b/mmpose/datasets/datasets/animal/macaque_dataset.py @@ -1,74 +1,74 @@ -# Copyright (c) OpenMMLab. All rights reserved. - -from mmpose.registry import DATASETS -from ..base import BaseCocoStyleDataset - - -@DATASETS.register_module() -class MacaqueDataset(BaseCocoStyleDataset): - """MacaquePose dataset for animal pose estimation. - - "MacaquePose: A novel 'in the wild' macaque monkey pose dataset - for markerless motion capture" bioRxiv'2020. - More details can be found in the `paper - `__ . - - Macaque keypoints:: - - 0: 'nose', - 1: 'left_eye', - 2: 'right_eye', - 3: 'left_ear', - 4: 'right_ear', - 5: 'left_shoulder', - 6: 'right_shoulder', - 7: 'left_elbow', - 8: 'right_elbow', - 9: 'left_wrist', - 10: 'right_wrist', - 11: 'left_hip', - 12: 'right_hip', - 13: 'left_knee', - 14: 'right_knee', - 15: 'left_ankle', - 16: 'right_ankle' - - Args: - ann_file (str): Annotation file path. Default: ''. - bbox_file (str, optional): Detection result file path. If - ``bbox_file`` is set, detected bboxes loaded from this file will - be used instead of ground-truth bboxes. This setting is only for - evaluation, i.e., ignored when ``test_mode`` is ``False``. - Default: ``None``. - data_mode (str): Specifies the mode of data samples: ``'topdown'`` or - ``'bottomup'``. In ``'topdown'`` mode, each data sample contains - one instance; while in ``'bottomup'`` mode, each data sample - contains all instances in a image. Default: ``'topdown'`` - metainfo (dict, optional): Meta information for dataset, such as class - information. Default: ``None``. - data_root (str, optional): The root directory for ``data_prefix`` and - ``ann_file``. Default: ``None``. - data_prefix (dict, optional): Prefix for training data. Default: - ``dict(img=None, ann=None)``. - filter_cfg (dict, optional): Config for filter data. Default: `None`. - indices (int or Sequence[int], optional): Support using first few - data in annotation file to facilitate training/testing on a smaller - dataset. Default: ``None`` which means using all ``data_infos``. - serialize_data (bool, optional): Whether to hold memory using - serialized objects, when enabled, data loader workers can use - shared RAM from master process instead of making a copy. - Default: ``True``. - pipeline (list, optional): Processing pipeline. Default: []. - test_mode (bool, optional): ``test_mode=True`` means in test phase. - Default: ``False``. - lazy_init (bool, optional): Whether to load annotation during - instantiation. In some cases, such as visualization, only the meta - information of the dataset is needed, which is not necessary to - load annotation file. ``Basedataset`` can skip load annotations to - save time by set ``lazy_init=False``. Default: ``False``. - max_refetch (int, optional): If ``Basedataset.prepare_data`` get a - None img. The maximum extra number of cycles to get a valid - image. Default: 1000. - """ - - METAINFO: dict = dict(from_file='configs/_base_/datasets/macaque.py') +# Copyright (c) OpenMMLab. All rights reserved. + +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class MacaqueDataset(BaseCocoStyleDataset): + """MacaquePose dataset for animal pose estimation. + + "MacaquePose: A novel 'in the wild' macaque monkey pose dataset + for markerless motion capture" bioRxiv'2020. + More details can be found in the `paper + `__ . + + Macaque keypoints:: + + 0: 'nose', + 1: 'left_eye', + 2: 'right_eye', + 3: 'left_ear', + 4: 'right_ear', + 5: 'left_shoulder', + 6: 'right_shoulder', + 7: 'left_elbow', + 8: 'right_elbow', + 9: 'left_wrist', + 10: 'right_wrist', + 11: 'left_hip', + 12: 'right_hip', + 13: 'left_knee', + 14: 'right_knee', + 15: 'left_ankle', + 16: 'right_ankle' + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/macaque.py') diff --git a/mmpose/datasets/datasets/animal/zebra_dataset.py b/mmpose/datasets/datasets/animal/zebra_dataset.py index b399a8479b..194a78826e 100644 --- a/mmpose/datasets/datasets/animal/zebra_dataset.py +++ b/mmpose/datasets/datasets/animal/zebra_dataset.py @@ -1,116 +1,116 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os.path as osp -from typing import Optional - -import numpy as np - -from mmpose.registry import DATASETS -from ..base import BaseCocoStyleDataset - - -@DATASETS.register_module() -class ZebraDataset(BaseCocoStyleDataset): - """ZebraDataset for animal pose estimation. - - "DeepPoseKit, a software toolkit for fast and robust animal - pose estimation using deep learning" Elife'2019. - More details can be found in the `paper - `__ . - - Zebra keypoints:: - - 0: "snout", - 1: "head", - 2: "neck", - 3: "forelegL1", - 4: "forelegR1", - 5: "hindlegL1", - 6: "hindlegR1", - 7: "tailbase", - 8: "tailtip" - - Args: - ann_file (str): Annotation file path. Default: ''. - bbox_file (str, optional): Detection result file path. If - ``bbox_file`` is set, detected bboxes loaded from this file will - be used instead of ground-truth bboxes. This setting is only for - evaluation, i.e., ignored when ``test_mode`` is ``False``. - Default: ``None``. - data_mode (str): Specifies the mode of data samples: ``'topdown'`` or - ``'bottomup'``. In ``'topdown'`` mode, each data sample contains - one instance; while in ``'bottomup'`` mode, each data sample - contains all instances in a image. Default: ``'topdown'`` - metainfo (dict, optional): Meta information for dataset, such as class - information. Default: ``None``. - data_root (str, optional): The root directory for ``data_prefix`` and - ``ann_file``. Default: ``None``. - data_prefix (dict, optional): Prefix for training data. Default: - ``dict(img=None, ann=None)``. - filter_cfg (dict, optional): Config for filter data. Default: `None`. - indices (int or Sequence[int], optional): Support using first few - data in annotation file to facilitate training/testing on a smaller - dataset. Default: ``None`` which means using all ``data_infos``. - serialize_data (bool, optional): Whether to hold memory using - serialized objects, when enabled, data loader workers can use - shared RAM from master process instead of making a copy. - Default: ``True``. - pipeline (list, optional): Processing pipeline. Default: []. - test_mode (bool, optional): ``test_mode=True`` means in test phase. - Default: ``False``. - lazy_init (bool, optional): Whether to load annotation during - instantiation. In some cases, such as visualization, only the meta - information of the dataset is needed, which is not necessary to - load annotation file. ``Basedataset`` can skip load annotations to - save time by set ``lazy_init=False``. Default: ``False``. - max_refetch (int, optional): If ``Basedataset.prepare_data`` get a - None img. The maximum extra number of cycles to get a valid - image. Default: 1000. - """ - - METAINFO: dict = dict(from_file='configs/_base_/datasets/zebra.py') - - def parse_data_info(self, raw_data_info: dict) -> Optional[dict]: - """Parse raw Zebra annotation of an instance. - - Args: - raw_data_info (dict): Raw data information loaded from - ``ann_file``. It should have following contents: - - - ``'raw_ann_info'``: Raw annotation of an instance - - ``'raw_img_info'``: Raw information of the image that - contains the instance - - Returns: - dict: Parsed instance annotation - """ - - ann = raw_data_info['raw_ann_info'] - img = raw_data_info['raw_img_info'] - - img_path = osp.join(self.data_prefix['img'], img['file_name']) - - # get bbox in shape [1, 4], formatted as xywh - # use the entire image which is 160x160 - bbox = np.array([0, 0, 160, 160], dtype=np.float32).reshape(1, 4) - - # keypoints in shape [1, K, 2] and keypoints_visible in [1, K] - _keypoints = np.array( - ann['keypoints'], dtype=np.float32).reshape(1, -1, 3) - keypoints = _keypoints[..., :2] - keypoints_visible = np.minimum(1, _keypoints[..., 2]) - - num_keypoints = ann['num_keypoints'] - - data_info = { - 'img_id': ann['image_id'], - 'img_path': img_path, - 'bbox': bbox, - 'bbox_score': np.ones(1, dtype=np.float32), - 'num_keypoints': num_keypoints, - 'keypoints': keypoints, - 'keypoints_visible': keypoints_visible, - 'iscrowd': ann['iscrowd'], - 'id': ann['id'], - } - - return data_info +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +from typing import Optional + +import numpy as np + +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class ZebraDataset(BaseCocoStyleDataset): + """ZebraDataset for animal pose estimation. + + "DeepPoseKit, a software toolkit for fast and robust animal + pose estimation using deep learning" Elife'2019. + More details can be found in the `paper + `__ . + + Zebra keypoints:: + + 0: "snout", + 1: "head", + 2: "neck", + 3: "forelegL1", + 4: "forelegR1", + 5: "hindlegL1", + 6: "hindlegR1", + 7: "tailbase", + 8: "tailtip" + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/zebra.py') + + def parse_data_info(self, raw_data_info: dict) -> Optional[dict]: + """Parse raw Zebra annotation of an instance. + + Args: + raw_data_info (dict): Raw data information loaded from + ``ann_file``. It should have following contents: + + - ``'raw_ann_info'``: Raw annotation of an instance + - ``'raw_img_info'``: Raw information of the image that + contains the instance + + Returns: + dict: Parsed instance annotation + """ + + ann = raw_data_info['raw_ann_info'] + img = raw_data_info['raw_img_info'] + + img_path = osp.join(self.data_prefix['img'], img['file_name']) + + # get bbox in shape [1, 4], formatted as xywh + # use the entire image which is 160x160 + bbox = np.array([0, 0, 160, 160], dtype=np.float32).reshape(1, 4) + + # keypoints in shape [1, K, 2] and keypoints_visible in [1, K] + _keypoints = np.array( + ann['keypoints'], dtype=np.float32).reshape(1, -1, 3) + keypoints = _keypoints[..., :2] + keypoints_visible = np.minimum(1, _keypoints[..., 2]) + + num_keypoints = ann['num_keypoints'] + + data_info = { + 'img_id': ann['image_id'], + 'img_path': img_path, + 'bbox': bbox, + 'bbox_score': np.ones(1, dtype=np.float32), + 'num_keypoints': num_keypoints, + 'keypoints': keypoints, + 'keypoints_visible': keypoints_visible, + 'iscrowd': ann['iscrowd'], + 'id': ann['id'], + } + + return data_info diff --git a/mmpose/datasets/datasets/base/__init__.py b/mmpose/datasets/datasets/base/__init__.py index 810440530e..aa682ddeb6 100644 --- a/mmpose/datasets/datasets/base/__init__.py +++ b/mmpose/datasets/datasets/base/__init__.py @@ -1,5 +1,5 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .base_coco_style_dataset import BaseCocoStyleDataset -from .base_mocap_dataset import BaseMocapDataset - -__all__ = ['BaseCocoStyleDataset', 'BaseMocapDataset'] +# Copyright (c) OpenMMLab. All rights reserved. +from .base_coco_style_dataset import BaseCocoStyleDataset +from .base_mocap_dataset import BaseMocapDataset + +__all__ = ['BaseCocoStyleDataset', 'BaseMocapDataset'] diff --git a/mmpose/datasets/datasets/base/base_coco_style_dataset.py b/mmpose/datasets/datasets/base/base_coco_style_dataset.py index 3b592813d8..36b127028f 100644 --- a/mmpose/datasets/datasets/base/base_coco_style_dataset.py +++ b/mmpose/datasets/datasets/base/base_coco_style_dataset.py @@ -1,458 +1,458 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy -import os.path as osp -from copy import deepcopy -from itertools import filterfalse, groupby -from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union - -import numpy as np -from mmengine.dataset import BaseDataset, force_full_init -from mmengine.fileio import exists, get_local_path, load -from mmengine.utils import is_list_of -from xtcocotools.coco import COCO - -from mmpose.registry import DATASETS -from mmpose.structures.bbox import bbox_xywh2xyxy -from ..utils import parse_pose_metainfo - - -@DATASETS.register_module() -class BaseCocoStyleDataset(BaseDataset): - """Base class for COCO-style datasets. - - Args: - ann_file (str): Annotation file path. Default: ''. - bbox_file (str, optional): Detection result file path. If - ``bbox_file`` is set, detected bboxes loaded from this file will - be used instead of ground-truth bboxes. This setting is only for - evaluation, i.e., ignored when ``test_mode`` is ``False``. - Default: ``None``. - data_mode (str): Specifies the mode of data samples: ``'topdown'`` or - ``'bottomup'``. In ``'topdown'`` mode, each data sample contains - one instance; while in ``'bottomup'`` mode, each data sample - contains all instances in a image. Default: ``'topdown'`` - metainfo (dict, optional): Meta information for dataset, such as class - information. Default: ``None``. - data_root (str, optional): The root directory for ``data_prefix`` and - ``ann_file``. Default: ``None``. - data_prefix (dict, optional): Prefix for training data. - Default: ``dict(img='')``. - filter_cfg (dict, optional): Config for filter data. Default: `None`. - indices (int or Sequence[int], optional): Support using first few - data in annotation file to facilitate training/testing on a smaller - dataset. Default: ``None`` which means using all ``data_infos``. - serialize_data (bool, optional): Whether to hold memory using - serialized objects, when enabled, data loader workers can use - shared RAM from master process instead of making a copy. - Default: ``True``. - pipeline (list, optional): Processing pipeline. Default: []. - test_mode (bool, optional): ``test_mode=True`` means in test phase. - Default: ``False``. - lazy_init (bool, optional): Whether to load annotation during - instantiation. In some cases, such as visualization, only the meta - information of the dataset is needed, which is not necessary to - load annotation file. ``Basedataset`` can skip load annotations to - save time by set ``lazy_init=False``. Default: ``False``. - max_refetch (int, optional): If ``Basedataset.prepare_data`` get a - None img. The maximum extra number of cycles to get a valid - image. Default: 1000. - """ - - METAINFO: dict = dict() - - def __init__(self, - ann_file: str = '', - bbox_file: Optional[str] = None, - data_mode: str = 'topdown', - metainfo: Optional[dict] = None, - data_root: Optional[str] = None, - data_prefix: dict = dict(img=''), - filter_cfg: Optional[dict] = None, - indices: Optional[Union[int, Sequence[int]]] = None, - serialize_data: bool = True, - pipeline: List[Union[dict, Callable]] = [], - test_mode: bool = False, - lazy_init: bool = False, - max_refetch: int = 1000): - - if data_mode not in {'topdown', 'bottomup'}: - raise ValueError( - f'{self.__class__.__name__} got invalid data_mode: ' - f'{data_mode}. Should be "topdown" or "bottomup".') - self.data_mode = data_mode - - if bbox_file: - if self.data_mode != 'topdown': - raise ValueError( - f'{self.__class__.__name__} is set to {self.data_mode}: ' - 'mode, while "bbox_file" is only ' - 'supported in topdown mode.') - - if not test_mode: - raise ValueError( - f'{self.__class__.__name__} has `test_mode==False` ' - 'while "bbox_file" is only ' - 'supported when `test_mode==True`.') - self.bbox_file = bbox_file - - super().__init__( - ann_file=ann_file, - metainfo=metainfo, - data_root=data_root, - data_prefix=data_prefix, - filter_cfg=filter_cfg, - indices=indices, - serialize_data=serialize_data, - pipeline=pipeline, - test_mode=test_mode, - lazy_init=lazy_init, - max_refetch=max_refetch) - - @classmethod - def _load_metainfo(cls, metainfo: dict = None) -> dict: - """Collect meta information from the dictionary of meta. - - Args: - metainfo (dict): Raw data of pose meta information. - - Returns: - dict: Parsed meta information. - """ - - if metainfo is None: - metainfo = deepcopy(cls.METAINFO) - - if not isinstance(metainfo, dict): - raise TypeError( - f'metainfo should be a dict, but got {type(metainfo)}') - - # parse pose metainfo if it has been assigned - if metainfo: - metainfo = parse_pose_metainfo(metainfo) - return metainfo - - @force_full_init - def prepare_data(self, idx) -> Any: - """Get data processed by ``self.pipeline``. - - :class:`BaseCocoStyleDataset` overrides this method from - :class:`mmengine.dataset.BaseDataset` to add the metainfo into - the ``data_info`` before it is passed to the pipeline. - - Args: - idx (int): The index of ``data_info``. - - Returns: - Any: Depends on ``self.pipeline``. - """ - data_info = self.get_data_info(idx) - - return self.pipeline(data_info) - - def get_data_info(self, idx: int) -> dict: - """Get data info by index. - - Args: - idx (int): Index of data info. - - Returns: - dict: Data info. - """ - data_info = super().get_data_info(idx) - - # Add metainfo items that are required in the pipeline and the model - metainfo_keys = [ - 'upper_body_ids', 'lower_body_ids', 'flip_pairs', - 'dataset_keypoint_weights', 'flip_indices', 'skeleton_links' - ] - - for key in metainfo_keys: - assert key not in data_info, ( - f'"{key}" is a reserved key for `metainfo`, but already ' - 'exists in the `data_info`.') - - data_info[key] = deepcopy(self._metainfo[key]) - - return data_info - - def load_data_list(self) -> List[dict]: - """Load data list from COCO annotation file or person detection result - file.""" - - if self.bbox_file: - data_list = self._load_detection_results() - else: - instance_list, image_list = self._load_annotations() - - if self.data_mode == 'topdown': - data_list = self._get_topdown_data_infos(instance_list) - else: - data_list = self._get_bottomup_data_infos( - instance_list, image_list) - - return data_list - - def _load_annotations(self) -> Tuple[List[dict], List[dict]]: - """Load data from annotations in COCO format.""" - - assert exists(self.ann_file), 'Annotation file does not exist' - - with get_local_path(self.ann_file) as local_path: - self.coco = COCO(local_path) - # set the metainfo about categories, which is a list of dict - # and each dict contains the 'id', 'name', etc. about this category - self._metainfo['CLASSES'] = self.coco.loadCats(self.coco.getCatIds()) - - instance_list = [] - image_list = [] - - for img_id in self.coco.getImgIds(): - img = self.coco.loadImgs(img_id)[0] - img.update({ - 'img_id': - img_id, - 'img_path': - osp.join(self.data_prefix['img'], img['file_name']), - }) - image_list.append(img) - - ann_ids = self.coco.getAnnIds(imgIds=img_id) - for ann in self.coco.loadAnns(ann_ids): - - instance_info = self.parse_data_info( - dict(raw_ann_info=ann, raw_img_info=img)) - - # skip invalid instance annotation. - if not instance_info: - continue - - instance_list.append(instance_info) - return instance_list, image_list - - def parse_data_info(self, raw_data_info: dict) -> Optional[dict]: - """Parse raw COCO annotation of an instance. - - Args: - raw_data_info (dict): Raw data information loaded from - ``ann_file``. It should have following contents: - - - ``'raw_ann_info'``: Raw annotation of an instance - - ``'raw_img_info'``: Raw information of the image that - contains the instance - - Returns: - dict | None: Parsed instance annotation - """ - - ann = raw_data_info['raw_ann_info'] - img = raw_data_info['raw_img_info'] - - # filter invalid instance - if 'bbox' not in ann or 'keypoints' not in ann: - return None - - img_w, img_h = img['width'], img['height'] - - # get bbox in shape [1, 4], formatted as xywh - x, y, w, h = ann['bbox'] - x1 = np.clip(x, 0, img_w - 1) - y1 = np.clip(y, 0, img_h - 1) - x2 = np.clip(x + w, 0, img_w - 1) - y2 = np.clip(y + h, 0, img_h - 1) - - bbox = np.array([x1, y1, x2, y2], dtype=np.float32).reshape(1, 4) - - # keypoints in shape [1, K, 2] and keypoints_visible in [1, K] - _keypoints = np.array( - ann['keypoints'], dtype=np.float32).reshape(1, -1, 3) - keypoints = _keypoints[..., :2] - keypoints_visible = np.minimum(1, _keypoints[..., 2]) - - if 'num_keypoints' in ann: - num_keypoints = ann['num_keypoints'] - else: - num_keypoints = np.count_nonzero(keypoints.max(axis=2)) - - data_info = { - 'img_id': ann['image_id'], - 'img_path': img['img_path'], - 'bbox': bbox, - 'bbox_score': np.ones(1, dtype=np.float32), - 'num_keypoints': num_keypoints, - 'keypoints': keypoints, - 'keypoints_visible': keypoints_visible, - 'iscrowd': ann.get('iscrowd', 0), - 'segmentation': ann.get('segmentation', None), - 'id': ann['id'], - 'category_id': ann['category_id'], - # store the raw annotation of the instance - # it is useful for evaluation without providing ann_file - 'raw_ann_info': copy.deepcopy(ann), - } - - if 'crowdIndex' in img: - data_info['crowd_index'] = img['crowdIndex'] - - return data_info - - @staticmethod - def _is_valid_instance(data_info: Dict) -> bool: - """Check a data info is an instance with valid bbox and keypoint - annotations.""" - # crowd annotation - if 'iscrowd' in data_info and data_info['iscrowd']: - return False - # invalid keypoints - if 'num_keypoints' in data_info and data_info['num_keypoints'] == 0: - return False - # invalid bbox - if 'bbox' in data_info: - bbox = data_info['bbox'][0] - w, h = bbox[2:4] - bbox[:2] - if w <= 0 or h <= 0: - return False - # invalid keypoints - if 'keypoints' in data_info: - if np.max(data_info['keypoints']) <= 0: - return False - return True - - def _get_topdown_data_infos(self, instance_list: List[Dict]) -> List[Dict]: - """Organize the data list in top-down mode.""" - # sanitize data samples - data_list_tp = list(filter(self._is_valid_instance, instance_list)) - - return data_list_tp - - def _get_bottomup_data_infos(self, instance_list: List[Dict], - image_list: List[Dict]) -> List[Dict]: - """Organize the data list in bottom-up mode.""" - - # bottom-up data list - data_list_bu = [] - - used_img_ids = set() - - # group instances by img_id - for img_id, data_infos in groupby(instance_list, - lambda x: x['img_id']): - used_img_ids.add(img_id) - data_infos = list(data_infos) - - # image data - img_path = data_infos[0]['img_path'] - data_info_bu = { - 'img_id': img_id, - 'img_path': img_path, - } - - for key in data_infos[0].keys(): - if key not in data_info_bu: - seq = [d[key] for d in data_infos] - if isinstance(seq[0], np.ndarray): - seq = np.concatenate(seq, axis=0) - data_info_bu[key] = seq - - # The segmentation annotation of invalid objects will be used - # to generate valid region mask in the pipeline. - invalid_segs = [] - for data_info_invalid in filterfalse(self._is_valid_instance, - data_infos): - if 'segmentation' in data_info_invalid: - invalid_segs.append(data_info_invalid['segmentation']) - data_info_bu['invalid_segs'] = invalid_segs - - data_list_bu.append(data_info_bu) - - # add images without instance for evaluation - if self.test_mode: - for img_info in image_list: - if img_info['img_id'] not in used_img_ids: - data_info_bu = { - 'img_id': img_info['img_id'], - 'img_path': img_info['img_path'], - 'id': list(), - 'raw_ann_info': None, - } - data_list_bu.append(data_info_bu) - - return data_list_bu - - def _load_detection_results(self) -> List[dict]: - """Load data from detection results with dummy keypoint annotations.""" - - assert exists(self.ann_file), 'Annotation file does not exist' - assert exists(self.bbox_file), 'Bbox file does not exist' - # load detection results - det_results = load(self.bbox_file) - assert is_list_of(det_results, dict) - - # load coco annotations to build image id-to-name index - with get_local_path(self.ann_file) as local_path: - self.coco = COCO(local_path) - # set the metainfo about categories, which is a list of dict - # and each dict contains the 'id', 'name', etc. about this category - self._metainfo['CLASSES'] = self.coco.loadCats(self.coco.getCatIds()) - - num_keypoints = self.metainfo['num_keypoints'] - data_list = [] - id_ = 0 - for det in det_results: - # remove non-human instances - if det['category_id'] != 1: - continue - - img = self.coco.loadImgs(det['image_id'])[0] - - img_path = osp.join(self.data_prefix['img'], img['file_name']) - bbox_xywh = np.array( - det['bbox'][:4], dtype=np.float32).reshape(1, 4) - bbox = bbox_xywh2xyxy(bbox_xywh) - bbox_score = np.array(det['score'], dtype=np.float32).reshape(1) - - # use dummy keypoint location and visibility - keypoints = np.zeros((1, num_keypoints, 2), dtype=np.float32) - keypoints_visible = np.ones((1, num_keypoints), dtype=np.float32) - - data_list.append({ - 'img_id': det['image_id'], - 'img_path': img_path, - 'img_shape': (img['height'], img['width']), - 'bbox': bbox, - 'bbox_score': bbox_score, - 'keypoints': keypoints, - 'keypoints_visible': keypoints_visible, - 'id': id_, - }) - - id_ += 1 - - return data_list - - def filter_data(self) -> List[dict]: - """Filter annotations according to filter_cfg. Defaults return full - ``data_list``. - - If 'bbox_score_thr` in filter_cfg, the annotation with bbox_score below - the threshold `bbox_score_thr` will be filtered out. - """ - - data_list = self.data_list - - if self.filter_cfg is None: - return data_list - - # filter out annotations with a bbox_score below the threshold - if 'bbox_score_thr' in self.filter_cfg: - - if self.data_mode != 'topdown': - raise ValueError( - f'{self.__class__.__name__} is set to {self.data_mode} ' - 'mode, while "bbox_score_thr" is only supported in ' - 'topdown mode.') - - thr = self.filter_cfg['bbox_score_thr'] - data_list = list( - filterfalse(lambda ann: ann['bbox_score'] < thr, data_list)) - - return data_list +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import os.path as osp +from copy import deepcopy +from itertools import filterfalse, groupby +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union + +import numpy as np +from mmengine.dataset import BaseDataset, force_full_init +from mmengine.fileio import exists, get_local_path, load +from mmengine.utils import is_list_of +from xtcocotools.coco import COCO + +from mmpose.registry import DATASETS +from mmpose.structures.bbox import bbox_xywh2xyxy +from ..utils import parse_pose_metainfo + + +@DATASETS.register_module() +class BaseCocoStyleDataset(BaseDataset): + """Base class for COCO-style datasets. + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. + Default: ``dict(img='')``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict() + + def __init__(self, + ann_file: str = '', + bbox_file: Optional[str] = None, + data_mode: str = 'topdown', + metainfo: Optional[dict] = None, + data_root: Optional[str] = None, + data_prefix: dict = dict(img=''), + filter_cfg: Optional[dict] = None, + indices: Optional[Union[int, Sequence[int]]] = None, + serialize_data: bool = True, + pipeline: List[Union[dict, Callable]] = [], + test_mode: bool = False, + lazy_init: bool = False, + max_refetch: int = 1000): + + if data_mode not in {'topdown', 'bottomup'}: + raise ValueError( + f'{self.__class__.__name__} got invalid data_mode: ' + f'{data_mode}. Should be "topdown" or "bottomup".') + self.data_mode = data_mode + + if bbox_file: + if self.data_mode != 'topdown': + raise ValueError( + f'{self.__class__.__name__} is set to {self.data_mode}: ' + 'mode, while "bbox_file" is only ' + 'supported in topdown mode.') + + if not test_mode: + raise ValueError( + f'{self.__class__.__name__} has `test_mode==False` ' + 'while "bbox_file" is only ' + 'supported when `test_mode==True`.') + self.bbox_file = bbox_file + + super().__init__( + ann_file=ann_file, + metainfo=metainfo, + data_root=data_root, + data_prefix=data_prefix, + filter_cfg=filter_cfg, + indices=indices, + serialize_data=serialize_data, + pipeline=pipeline, + test_mode=test_mode, + lazy_init=lazy_init, + max_refetch=max_refetch) + + @classmethod + def _load_metainfo(cls, metainfo: dict = None) -> dict: + """Collect meta information from the dictionary of meta. + + Args: + metainfo (dict): Raw data of pose meta information. + + Returns: + dict: Parsed meta information. + """ + + if metainfo is None: + metainfo = deepcopy(cls.METAINFO) + + if not isinstance(metainfo, dict): + raise TypeError( + f'metainfo should be a dict, but got {type(metainfo)}') + + # parse pose metainfo if it has been assigned + if metainfo: + metainfo = parse_pose_metainfo(metainfo) + return metainfo + + @force_full_init + def prepare_data(self, idx) -> Any: + """Get data processed by ``self.pipeline``. + + :class:`BaseCocoStyleDataset` overrides this method from + :class:`mmengine.dataset.BaseDataset` to add the metainfo into + the ``data_info`` before it is passed to the pipeline. + + Args: + idx (int): The index of ``data_info``. + + Returns: + Any: Depends on ``self.pipeline``. + """ + data_info = self.get_data_info(idx) + + return self.pipeline(data_info) + + def get_data_info(self, idx: int) -> dict: + """Get data info by index. + + Args: + idx (int): Index of data info. + + Returns: + dict: Data info. + """ + data_info = super().get_data_info(idx) + + # Add metainfo items that are required in the pipeline and the model + metainfo_keys = [ + 'upper_body_ids', 'lower_body_ids', 'flip_pairs', + 'dataset_keypoint_weights', 'flip_indices', 'skeleton_links' + ] + + for key in metainfo_keys: + assert key not in data_info, ( + f'"{key}" is a reserved key for `metainfo`, but already ' + 'exists in the `data_info`.') + + data_info[key] = deepcopy(self._metainfo[key]) + + return data_info + + def load_data_list(self) -> List[dict]: + """Load data list from COCO annotation file or person detection result + file.""" + + if self.bbox_file: + data_list = self._load_detection_results() + else: + instance_list, image_list = self._load_annotations() + + if self.data_mode == 'topdown': + data_list = self._get_topdown_data_infos(instance_list) + else: + data_list = self._get_bottomup_data_infos( + instance_list, image_list) + + return data_list + + def _load_annotations(self) -> Tuple[List[dict], List[dict]]: + """Load data from annotations in COCO format.""" + + assert exists(self.ann_file), 'Annotation file does not exist' + + with get_local_path(self.ann_file) as local_path: + self.coco = COCO(local_path) + # set the metainfo about categories, which is a list of dict + # and each dict contains the 'id', 'name', etc. about this category + self._metainfo['CLASSES'] = self.coco.loadCats(self.coco.getCatIds()) + + instance_list = [] + image_list = [] + + for img_id in self.coco.getImgIds(): + img = self.coco.loadImgs(img_id)[0] + img.update({ + 'img_id': + img_id, + 'img_path': + osp.join(self.data_prefix['img'], img['file_name']), + }) + image_list.append(img) + + ann_ids = self.coco.getAnnIds(imgIds=img_id) + for ann in self.coco.loadAnns(ann_ids): + + instance_info = self.parse_data_info( + dict(raw_ann_info=ann, raw_img_info=img)) + + # skip invalid instance annotation. + if not instance_info: + continue + + instance_list.append(instance_info) + return instance_list, image_list + + def parse_data_info(self, raw_data_info: dict) -> Optional[dict]: + """Parse raw COCO annotation of an instance. + + Args: + raw_data_info (dict): Raw data information loaded from + ``ann_file``. It should have following contents: + + - ``'raw_ann_info'``: Raw annotation of an instance + - ``'raw_img_info'``: Raw information of the image that + contains the instance + + Returns: + dict | None: Parsed instance annotation + """ + + ann = raw_data_info['raw_ann_info'] + img = raw_data_info['raw_img_info'] + + # filter invalid instance + if 'bbox' not in ann or 'keypoints' not in ann: + return None + + img_w, img_h = img['width'], img['height'] + + # get bbox in shape [1, 4], formatted as xywh + x, y, w, h = ann['bbox'] + x1 = np.clip(x, 0, img_w - 1) + y1 = np.clip(y, 0, img_h - 1) + x2 = np.clip(x + w, 0, img_w - 1) + y2 = np.clip(y + h, 0, img_h - 1) + + bbox = np.array([x1, y1, x2, y2], dtype=np.float32).reshape(1, 4) + + # keypoints in shape [1, K, 2] and keypoints_visible in [1, K] + _keypoints = np.array( + ann['keypoints'], dtype=np.float32).reshape(1, -1, 3) + keypoints = _keypoints[..., :2] + keypoints_visible = np.minimum(1, _keypoints[..., 2]) + + if 'num_keypoints' in ann: + num_keypoints = ann['num_keypoints'] + else: + num_keypoints = np.count_nonzero(keypoints.max(axis=2)) + + data_info = { + 'img_id': ann['image_id'], + 'img_path': img['img_path'], + 'bbox': bbox, + 'bbox_score': np.ones(1, dtype=np.float32), + 'num_keypoints': num_keypoints, + 'keypoints': keypoints, + 'keypoints_visible': keypoints_visible, + 'iscrowd': ann.get('iscrowd', 0), + 'segmentation': ann.get('segmentation', None), + 'id': ann['id'], + 'category_id': ann['category_id'], + # store the raw annotation of the instance + # it is useful for evaluation without providing ann_file + 'raw_ann_info': copy.deepcopy(ann), + } + + if 'crowdIndex' in img: + data_info['crowd_index'] = img['crowdIndex'] + + return data_info + + @staticmethod + def _is_valid_instance(data_info: Dict) -> bool: + """Check a data info is an instance with valid bbox and keypoint + annotations.""" + # crowd annotation + if 'iscrowd' in data_info and data_info['iscrowd']: + return False + # invalid keypoints + if 'num_keypoints' in data_info and data_info['num_keypoints'] == 0: + return False + # invalid bbox + if 'bbox' in data_info: + bbox = data_info['bbox'][0] + w, h = bbox[2:4] - bbox[:2] + if w <= 0 or h <= 0: + return False + # invalid keypoints + if 'keypoints' in data_info: + if np.max(data_info['keypoints']) <= 0: + return False + return True + + def _get_topdown_data_infos(self, instance_list: List[Dict]) -> List[Dict]: + """Organize the data list in top-down mode.""" + # sanitize data samples + data_list_tp = list(filter(self._is_valid_instance, instance_list)) + + return data_list_tp + + def _get_bottomup_data_infos(self, instance_list: List[Dict], + image_list: List[Dict]) -> List[Dict]: + """Organize the data list in bottom-up mode.""" + + # bottom-up data list + data_list_bu = [] + + used_img_ids = set() + + # group instances by img_id + for img_id, data_infos in groupby(instance_list, + lambda x: x['img_id']): + used_img_ids.add(img_id) + data_infos = list(data_infos) + + # image data + img_path = data_infos[0]['img_path'] + data_info_bu = { + 'img_id': img_id, + 'img_path': img_path, + } + + for key in data_infos[0].keys(): + if key not in data_info_bu: + seq = [d[key] for d in data_infos] + if isinstance(seq[0], np.ndarray): + seq = np.concatenate(seq, axis=0) + data_info_bu[key] = seq + + # The segmentation annotation of invalid objects will be used + # to generate valid region mask in the pipeline. + invalid_segs = [] + for data_info_invalid in filterfalse(self._is_valid_instance, + data_infos): + if 'segmentation' in data_info_invalid: + invalid_segs.append(data_info_invalid['segmentation']) + data_info_bu['invalid_segs'] = invalid_segs + + data_list_bu.append(data_info_bu) + + # add images without instance for evaluation + if self.test_mode: + for img_info in image_list: + if img_info['img_id'] not in used_img_ids: + data_info_bu = { + 'img_id': img_info['img_id'], + 'img_path': img_info['img_path'], + 'id': list(), + 'raw_ann_info': None, + } + data_list_bu.append(data_info_bu) + + return data_list_bu + + def _load_detection_results(self) -> List[dict]: + """Load data from detection results with dummy keypoint annotations.""" + + assert exists(self.ann_file), 'Annotation file does not exist' + assert exists(self.bbox_file), 'Bbox file does not exist' + # load detection results + det_results = load(self.bbox_file) + assert is_list_of(det_results, dict) + + # load coco annotations to build image id-to-name index + with get_local_path(self.ann_file) as local_path: + self.coco = COCO(local_path) + # set the metainfo about categories, which is a list of dict + # and each dict contains the 'id', 'name', etc. about this category + self._metainfo['CLASSES'] = self.coco.loadCats(self.coco.getCatIds()) + + num_keypoints = self.metainfo['num_keypoints'] + data_list = [] + id_ = 0 + for det in det_results: + # remove non-human instances + if det['category_id'] != 1: + continue + + img = self.coco.loadImgs(det['image_id'])[0] + + img_path = osp.join(self.data_prefix['img'], img['file_name']) + bbox_xywh = np.array( + det['bbox'][:4], dtype=np.float32).reshape(1, 4) + bbox = bbox_xywh2xyxy(bbox_xywh) + bbox_score = np.array(det['score'], dtype=np.float32).reshape(1) + + # use dummy keypoint location and visibility + keypoints = np.zeros((1, num_keypoints, 2), dtype=np.float32) + keypoints_visible = np.ones((1, num_keypoints), dtype=np.float32) + + data_list.append({ + 'img_id': det['image_id'], + 'img_path': img_path, + 'img_shape': (img['height'], img['width']), + 'bbox': bbox, + 'bbox_score': bbox_score, + 'keypoints': keypoints, + 'keypoints_visible': keypoints_visible, + 'id': id_, + }) + + id_ += 1 + + return data_list + + def filter_data(self) -> List[dict]: + """Filter annotations according to filter_cfg. Defaults return full + ``data_list``. + + If 'bbox_score_thr` in filter_cfg, the annotation with bbox_score below + the threshold `bbox_score_thr` will be filtered out. + """ + + data_list = self.data_list + + if self.filter_cfg is None: + return data_list + + # filter out annotations with a bbox_score below the threshold + if 'bbox_score_thr' in self.filter_cfg: + + if self.data_mode != 'topdown': + raise ValueError( + f'{self.__class__.__name__} is set to {self.data_mode} ' + 'mode, while "bbox_score_thr" is only supported in ' + 'topdown mode.') + + thr = self.filter_cfg['bbox_score_thr'] + data_list = list( + filterfalse(lambda ann: ann['bbox_score'] < thr, data_list)) + + return data_list diff --git a/mmpose/datasets/datasets/base/base_mocap_dataset.py b/mmpose/datasets/datasets/base/base_mocap_dataset.py index d671a6ae94..eafff4f2b7 100644 --- a/mmpose/datasets/datasets/base/base_mocap_dataset.py +++ b/mmpose/datasets/datasets/base/base_mocap_dataset.py @@ -1,403 +1,403 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os.path as osp -from copy import deepcopy -from itertools import filterfalse, groupby -from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union - -import numpy as np -from mmengine.dataset import BaseDataset, force_full_init -from mmengine.fileio import exists, get_local_path, load -from mmengine.utils import is_abs -from PIL import Image - -from mmpose.registry import DATASETS -from ..utils import parse_pose_metainfo - - -@DATASETS.register_module() -class BaseMocapDataset(BaseDataset): - """Base class for 3d body datasets. - - Args: - ann_file (str): Annotation file path. Default: ''. - seq_len (int): Number of frames in a sequence. Default: 1. - causal (bool): If set to ``True``, the rightmost input frame will be - the target frame. Otherwise, the middle input frame will be the - target frame. Default: ``True``. - subset_frac (float): The fraction to reduce dataset size. If set to 1, - the dataset size is not reduced. Default: 1. - camera_param_file (str): Cameras' parameters file. Default: ``None``. - data_mode (str): Specifies the mode of data samples: ``'topdown'`` or - ``'bottomup'``. In ``'topdown'`` mode, each data sample contains - one instance; while in ``'bottomup'`` mode, each data sample - contains all instances in a image. Default: ``'topdown'`` - metainfo (dict, optional): Meta information for dataset, such as class - information. Default: ``None``. - data_root (str, optional): The root directory for ``data_prefix`` and - ``ann_file``. Default: ``None``. - data_prefix (dict, optional): Prefix for training data. - Default: ``dict(img='')``. - filter_cfg (dict, optional): Config for filter data. Default: `None`. - indices (int or Sequence[int], optional): Support using first few - data in annotation file to facilitate training/testing on a smaller - dataset. Default: ``None`` which means using all ``data_infos``. - serialize_data (bool, optional): Whether to hold memory using - serialized objects, when enabled, data loader workers can use - shared RAM from master process instead of making a copy. - Default: ``True``. - pipeline (list, optional): Processing pipeline. Default: []. - test_mode (bool, optional): ``test_mode=True`` means in test phase. - Default: ``False``. - lazy_init (bool, optional): Whether to load annotation during - instantiation. In some cases, such as visualization, only the meta - information of the dataset is needed, which is not necessary to - load annotation file. ``Basedataset`` can skip load annotations to - save time by set ``lazy_init=False``. Default: ``False``. - max_refetch (int, optional): If ``Basedataset.prepare_data`` get a - None img. The maximum extra number of cycles to get a valid - image. Default: 1000. - """ - - METAINFO: dict = dict() - - def __init__(self, - ann_file: str = '', - seq_len: int = 1, - causal: bool = True, - subset_frac: float = 1.0, - camera_param_file: Optional[str] = None, - data_mode: str = 'topdown', - metainfo: Optional[dict] = None, - data_root: Optional[str] = None, - data_prefix: dict = dict(img=''), - filter_cfg: Optional[dict] = None, - indices: Optional[Union[int, Sequence[int]]] = None, - serialize_data: bool = True, - pipeline: List[Union[dict, Callable]] = [], - test_mode: bool = False, - lazy_init: bool = False, - max_refetch: int = 1000): - - if data_mode not in {'topdown', 'bottomup'}: - raise ValueError( - f'{self.__class__.__name__} got invalid data_mode: ' - f'{data_mode}. Should be "topdown" or "bottomup".') - self.data_mode = data_mode - - _ann_file = ann_file - if not is_abs(_ann_file): - _ann_file = osp.join(data_root, _ann_file) - assert exists(_ann_file), 'Annotation file does not exist.' - with get_local_path(_ann_file) as local_path: - self.ann_data = np.load(local_path) - - self.camera_param_file = camera_param_file - if self.camera_param_file: - if not is_abs(self.camera_param_file): - self.camera_param_file = osp.join(data_root, - self.camera_param_file) - assert exists(self.camera_param_file) - self.camera_param = load(self.camera_param_file) - - self.seq_len = seq_len - self.causal = causal - - assert 0 < subset_frac <= 1, ( - f'Unsupported `subset_frac` {subset_frac}. Supported range ' - 'is (0, 1].') - self.subset_frac = subset_frac - - self.sequence_indices = self.get_sequence_indices() - - super().__init__( - ann_file=ann_file, - metainfo=metainfo, - data_root=data_root, - data_prefix=data_prefix, - filter_cfg=filter_cfg, - indices=indices, - serialize_data=serialize_data, - pipeline=pipeline, - test_mode=test_mode, - lazy_init=lazy_init, - max_refetch=max_refetch) - - @classmethod - def _load_metainfo(cls, metainfo: dict = None) -> dict: - """Collect meta information from the dictionary of meta. - - Args: - metainfo (dict): Raw data of pose meta information. - - Returns: - dict: Parsed meta information. - """ - - if metainfo is None: - metainfo = deepcopy(cls.METAINFO) - - if not isinstance(metainfo, dict): - raise TypeError( - f'metainfo should be a dict, but got {type(metainfo)}') - - # parse pose metainfo if it has been assigned - if metainfo: - metainfo = parse_pose_metainfo(metainfo) - return metainfo - - @force_full_init - def prepare_data(self, idx) -> Any: - """Get data processed by ``self.pipeline``. - - :class:`BaseCocoStyleDataset` overrides this method from - :class:`mmengine.dataset.BaseDataset` to add the metainfo into - the ``data_info`` before it is passed to the pipeline. - - Args: - idx (int): The index of ``data_info``. - - Returns: - Any: Depends on ``self.pipeline``. - """ - data_info = self.get_data_info(idx) - - return self.pipeline(data_info) - - def get_data_info(self, idx: int) -> dict: - """Get data info by index. - - Args: - idx (int): Index of data info. - - Returns: - dict: Data info. - """ - data_info = super().get_data_info(idx) - - # Add metainfo items that are required in the pipeline and the model - metainfo_keys = [ - 'upper_body_ids', 'lower_body_ids', 'flip_pairs', - 'dataset_keypoint_weights', 'flip_indices', 'skeleton_links' - ] - - for key in metainfo_keys: - assert key not in data_info, ( - f'"{key}" is a reserved key for `metainfo`, but already ' - 'exists in the `data_info`.') - - data_info[key] = deepcopy(self._metainfo[key]) - - return data_info - - def load_data_list(self) -> List[dict]: - """Load data list from COCO annotation file or person detection result - file.""" - - instance_list, image_list = self._load_annotations() - - if self.data_mode == 'topdown': - data_list = self._get_topdown_data_infos(instance_list) - else: - data_list = self._get_bottomup_data_infos(instance_list, - image_list) - - return data_list - - def get_img_info(self, img_idx, img_name): - try: - with get_local_path(osp.join(self.data_prefix['img'], - img_name)) as local_path: - im = Image.open(local_path) - w, h = im.size - im.close() - except: # noqa: E722 - return None - - img = { - 'file_name': img_name, - 'height': h, - 'width': w, - 'id': img_idx, - 'img_id': img_idx, - 'img_path': osp.join(self.data_prefix['img'], img_name), - } - return img - - def get_sequence_indices(self) -> List[List[int]]: - """Build sequence indices. - - The default method creates sample indices that each sample is a single - frame (i.e. seq_len=1). Override this method in the subclass to define - how frames are sampled to form data samples. - - Outputs: - sample_indices: the frame indices of each sample. - For a sample, all frames will be treated as an input sequence, - and the ground-truth pose of the last frame will be the target. - """ - sequence_indices = [] - if self.seq_len == 1: - num_imgs = len(self.ann_data['imgname']) - sequence_indices = [[idx] for idx in range(num_imgs)] - else: - raise NotImplementedError('Multi-frame data sample unsupported!') - return sequence_indices - - def _load_annotations(self) -> Tuple[List[dict], List[dict]]: - """Load data from annotations in COCO format.""" - num_keypoints = self.metainfo['num_keypoints'] - - img_names = self.ann_data['imgname'] - num_imgs = len(img_names) - - if 'S' in self.ann_data.keys(): - kpts_3d = self.ann_data['S'] - else: - kpts_3d = np.zeros((num_imgs, num_keypoints, 4), dtype=np.float32) - - if 'part' in self.ann_data.keys(): - kpts_2d = self.ann_data['part'] - else: - kpts_2d = np.zeros((num_imgs, num_keypoints, 3), dtype=np.float32) - - if 'center' in self.ann_data.keys(): - centers = self.ann_data['center'] - else: - centers = np.zeros((num_imgs, 2), dtype=np.float32) - - if 'scale' in self.ann_data.keys(): - scales = self.ann_data['scale'].astype(np.float32) - else: - scales = np.zeros(num_imgs, dtype=np.float32) - - instance_list = [] - image_list = [] - - for idx, frame_ids in enumerate(self.sequence_indices): - assert len(frame_ids) == self.seq_len - - _img_names = img_names[frame_ids] - - _keypoints = kpts_2d[frame_ids].astype(np.float32) - keypoints = _keypoints[..., :2] - keypoints_visible = _keypoints[..., 2] - - _keypoints_3d = kpts_3d[frame_ids].astype(np.float32) - keypoints_3d = _keypoints_3d[..., :3] - keypoints_3d_visible = _keypoints_3d[..., 3] - - target_idx = -1 if self.causal else int(self.seq_len) // 2 - - instance_info = { - 'num_keypoints': num_keypoints, - 'keypoints': keypoints, - 'keypoints_visible': keypoints_visible, - 'keypoints_3d': keypoints_3d, - 'keypoints_3d_visible': keypoints_3d_visible, - 'scale': scales[idx], - 'center': centers[idx].astype(np.float32).reshape(1, -1), - 'id': idx, - 'category_id': 1, - 'iscrowd': 0, - 'img_paths': list(_img_names), - 'img_ids': frame_ids, - 'lifting_target': keypoints_3d[target_idx], - 'lifting_target_visible': keypoints_3d_visible[target_idx], - 'target_img_path': _img_names[target_idx], - } - - if self.camera_param_file: - _cam_param = self.get_camera_param(_img_names[0]) - instance_info['camera_param'] = _cam_param - - instance_list.append(instance_info) - - for idx, imgname in enumerate(img_names): - img_info = self.get_img_info(idx, imgname) - image_list.append(img_info) - - return instance_list, image_list - - def get_camera_param(self, imgname): - """Get camera parameters of a frame by its image name. - - Override this method to specify how to get camera parameters. - """ - raise NotImplementedError - - @staticmethod - def _is_valid_instance(data_info: Dict) -> bool: - """Check a data info is an instance with valid bbox and keypoint - annotations.""" - # crowd annotation - if 'iscrowd' in data_info and data_info['iscrowd']: - return False - # invalid keypoints - if 'num_keypoints' in data_info and data_info['num_keypoints'] == 0: - return False - # invalid keypoints - if 'keypoints' in data_info: - if np.max(data_info['keypoints']) <= 0: - return False - return True - - def _get_topdown_data_infos(self, instance_list: List[Dict]) -> List[Dict]: - """Organize the data list in top-down mode.""" - # sanitize data samples - data_list_tp = list(filter(self._is_valid_instance, instance_list)) - - return data_list_tp - - def _get_bottomup_data_infos(self, instance_list: List[Dict], - image_list: List[Dict]) -> List[Dict]: - """Organize the data list in bottom-up mode.""" - - # bottom-up data list - data_list_bu = [] - - used_img_ids = set() - - # group instances by img_id - for img_ids, data_infos in groupby(instance_list, - lambda x: x['img_ids']): - for img_id in img_ids: - used_img_ids.add(img_id) - data_infos = list(data_infos) - - # image data - img_paths = data_infos[0]['img_paths'] - data_info_bu = { - 'img_ids': img_ids, - 'img_paths': img_paths, - } - - for key in data_infos[0].keys(): - if key not in data_info_bu: - seq = [d[key] for d in data_infos] - if isinstance(seq[0], np.ndarray): - seq = np.concatenate(seq, axis=0) - data_info_bu[key] = seq - - # The segmentation annotation of invalid objects will be used - # to generate valid region mask in the pipeline. - invalid_segs = [] - for data_info_invalid in filterfalse(self._is_valid_instance, - data_infos): - if 'segmentation' in data_info_invalid: - invalid_segs.append(data_info_invalid['segmentation']) - data_info_bu['invalid_segs'] = invalid_segs - - data_list_bu.append(data_info_bu) - - # add images without instance for evaluation - if self.test_mode: - for img_info in image_list: - if img_info['img_id'] not in used_img_ids: - data_info_bu = { - 'img_ids': [img_info['img_id']], - 'img_path': [img_info['img_path']], - 'id': list(), - } - data_list_bu.append(data_info_bu) - - return data_list_bu +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +from copy import deepcopy +from itertools import filterfalse, groupby +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union + +import numpy as np +from mmengine.dataset import BaseDataset, force_full_init +from mmengine.fileio import exists, get_local_path, load +from mmengine.utils import is_abs +from PIL import Image + +from mmpose.registry import DATASETS +from ..utils import parse_pose_metainfo + + +@DATASETS.register_module() +class BaseMocapDataset(BaseDataset): + """Base class for 3d body datasets. + + Args: + ann_file (str): Annotation file path. Default: ''. + seq_len (int): Number of frames in a sequence. Default: 1. + causal (bool): If set to ``True``, the rightmost input frame will be + the target frame. Otherwise, the middle input frame will be the + target frame. Default: ``True``. + subset_frac (float): The fraction to reduce dataset size. If set to 1, + the dataset size is not reduced. Default: 1. + camera_param_file (str): Cameras' parameters file. Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. + Default: ``dict(img='')``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict() + + def __init__(self, + ann_file: str = '', + seq_len: int = 1, + causal: bool = True, + subset_frac: float = 1.0, + camera_param_file: Optional[str] = None, + data_mode: str = 'topdown', + metainfo: Optional[dict] = None, + data_root: Optional[str] = None, + data_prefix: dict = dict(img=''), + filter_cfg: Optional[dict] = None, + indices: Optional[Union[int, Sequence[int]]] = None, + serialize_data: bool = True, + pipeline: List[Union[dict, Callable]] = [], + test_mode: bool = False, + lazy_init: bool = False, + max_refetch: int = 1000): + + if data_mode not in {'topdown', 'bottomup'}: + raise ValueError( + f'{self.__class__.__name__} got invalid data_mode: ' + f'{data_mode}. Should be "topdown" or "bottomup".') + self.data_mode = data_mode + + _ann_file = ann_file + if not is_abs(_ann_file): + _ann_file = osp.join(data_root, _ann_file) + assert exists(_ann_file), 'Annotation file does not exist.' + with get_local_path(_ann_file) as local_path: + self.ann_data = np.load(local_path) + + self.camera_param_file = camera_param_file + if self.camera_param_file: + if not is_abs(self.camera_param_file): + self.camera_param_file = osp.join(data_root, + self.camera_param_file) + assert exists(self.camera_param_file) + self.camera_param = load(self.camera_param_file) + + self.seq_len = seq_len + self.causal = causal + + assert 0 < subset_frac <= 1, ( + f'Unsupported `subset_frac` {subset_frac}. Supported range ' + 'is (0, 1].') + self.subset_frac = subset_frac + + self.sequence_indices = self.get_sequence_indices() + + super().__init__( + ann_file=ann_file, + metainfo=metainfo, + data_root=data_root, + data_prefix=data_prefix, + filter_cfg=filter_cfg, + indices=indices, + serialize_data=serialize_data, + pipeline=pipeline, + test_mode=test_mode, + lazy_init=lazy_init, + max_refetch=max_refetch) + + @classmethod + def _load_metainfo(cls, metainfo: dict = None) -> dict: + """Collect meta information from the dictionary of meta. + + Args: + metainfo (dict): Raw data of pose meta information. + + Returns: + dict: Parsed meta information. + """ + + if metainfo is None: + metainfo = deepcopy(cls.METAINFO) + + if not isinstance(metainfo, dict): + raise TypeError( + f'metainfo should be a dict, but got {type(metainfo)}') + + # parse pose metainfo if it has been assigned + if metainfo: + metainfo = parse_pose_metainfo(metainfo) + return metainfo + + @force_full_init + def prepare_data(self, idx) -> Any: + """Get data processed by ``self.pipeline``. + + :class:`BaseCocoStyleDataset` overrides this method from + :class:`mmengine.dataset.BaseDataset` to add the metainfo into + the ``data_info`` before it is passed to the pipeline. + + Args: + idx (int): The index of ``data_info``. + + Returns: + Any: Depends on ``self.pipeline``. + """ + data_info = self.get_data_info(idx) + + return self.pipeline(data_info) + + def get_data_info(self, idx: int) -> dict: + """Get data info by index. + + Args: + idx (int): Index of data info. + + Returns: + dict: Data info. + """ + data_info = super().get_data_info(idx) + + # Add metainfo items that are required in the pipeline and the model + metainfo_keys = [ + 'upper_body_ids', 'lower_body_ids', 'flip_pairs', + 'dataset_keypoint_weights', 'flip_indices', 'skeleton_links' + ] + + for key in metainfo_keys: + assert key not in data_info, ( + f'"{key}" is a reserved key for `metainfo`, but already ' + 'exists in the `data_info`.') + + data_info[key] = deepcopy(self._metainfo[key]) + + return data_info + + def load_data_list(self) -> List[dict]: + """Load data list from COCO annotation file or person detection result + file.""" + + instance_list, image_list = self._load_annotations() + + if self.data_mode == 'topdown': + data_list = self._get_topdown_data_infos(instance_list) + else: + data_list = self._get_bottomup_data_infos(instance_list, + image_list) + + return data_list + + def get_img_info(self, img_idx, img_name): + try: + with get_local_path(osp.join(self.data_prefix['img'], + img_name)) as local_path: + im = Image.open(local_path) + w, h = im.size + im.close() + except: # noqa: E722 + return None + + img = { + 'file_name': img_name, + 'height': h, + 'width': w, + 'id': img_idx, + 'img_id': img_idx, + 'img_path': osp.join(self.data_prefix['img'], img_name), + } + return img + + def get_sequence_indices(self) -> List[List[int]]: + """Build sequence indices. + + The default method creates sample indices that each sample is a single + frame (i.e. seq_len=1). Override this method in the subclass to define + how frames are sampled to form data samples. + + Outputs: + sample_indices: the frame indices of each sample. + For a sample, all frames will be treated as an input sequence, + and the ground-truth pose of the last frame will be the target. + """ + sequence_indices = [] + if self.seq_len == 1: + num_imgs = len(self.ann_data['imgname']) + sequence_indices = [[idx] for idx in range(num_imgs)] + else: + raise NotImplementedError('Multi-frame data sample unsupported!') + return sequence_indices + + def _load_annotations(self) -> Tuple[List[dict], List[dict]]: + """Load data from annotations in COCO format.""" + num_keypoints = self.metainfo['num_keypoints'] + + img_names = self.ann_data['imgname'] + num_imgs = len(img_names) + + if 'S' in self.ann_data.keys(): + kpts_3d = self.ann_data['S'] + else: + kpts_3d = np.zeros((num_imgs, num_keypoints, 4), dtype=np.float32) + + if 'part' in self.ann_data.keys(): + kpts_2d = self.ann_data['part'] + else: + kpts_2d = np.zeros((num_imgs, num_keypoints, 3), dtype=np.float32) + + if 'center' in self.ann_data.keys(): + centers = self.ann_data['center'] + else: + centers = np.zeros((num_imgs, 2), dtype=np.float32) + + if 'scale' in self.ann_data.keys(): + scales = self.ann_data['scale'].astype(np.float32) + else: + scales = np.zeros(num_imgs, dtype=np.float32) + + instance_list = [] + image_list = [] + + for idx, frame_ids in enumerate(self.sequence_indices): + assert len(frame_ids) == self.seq_len + + _img_names = img_names[frame_ids] + + _keypoints = kpts_2d[frame_ids].astype(np.float32) + keypoints = _keypoints[..., :2] + keypoints_visible = _keypoints[..., 2] + + _keypoints_3d = kpts_3d[frame_ids].astype(np.float32) + keypoints_3d = _keypoints_3d[..., :3] + keypoints_3d_visible = _keypoints_3d[..., 3] + + target_idx = -1 if self.causal else int(self.seq_len) // 2 + + instance_info = { + 'num_keypoints': num_keypoints, + 'keypoints': keypoints, + 'keypoints_visible': keypoints_visible, + 'keypoints_3d': keypoints_3d, + 'keypoints_3d_visible': keypoints_3d_visible, + 'scale': scales[idx], + 'center': centers[idx].astype(np.float32).reshape(1, -1), + 'id': idx, + 'category_id': 1, + 'iscrowd': 0, + 'img_paths': list(_img_names), + 'img_ids': frame_ids, + 'lifting_target': keypoints_3d[target_idx], + 'lifting_target_visible': keypoints_3d_visible[target_idx], + 'target_img_path': _img_names[target_idx], + } + + if self.camera_param_file: + _cam_param = self.get_camera_param(_img_names[0]) + instance_info['camera_param'] = _cam_param + + instance_list.append(instance_info) + + for idx, imgname in enumerate(img_names): + img_info = self.get_img_info(idx, imgname) + image_list.append(img_info) + + return instance_list, image_list + + def get_camera_param(self, imgname): + """Get camera parameters of a frame by its image name. + + Override this method to specify how to get camera parameters. + """ + raise NotImplementedError + + @staticmethod + def _is_valid_instance(data_info: Dict) -> bool: + """Check a data info is an instance with valid bbox and keypoint + annotations.""" + # crowd annotation + if 'iscrowd' in data_info and data_info['iscrowd']: + return False + # invalid keypoints + if 'num_keypoints' in data_info and data_info['num_keypoints'] == 0: + return False + # invalid keypoints + if 'keypoints' in data_info: + if np.max(data_info['keypoints']) <= 0: + return False + return True + + def _get_topdown_data_infos(self, instance_list: List[Dict]) -> List[Dict]: + """Organize the data list in top-down mode.""" + # sanitize data samples + data_list_tp = list(filter(self._is_valid_instance, instance_list)) + + return data_list_tp + + def _get_bottomup_data_infos(self, instance_list: List[Dict], + image_list: List[Dict]) -> List[Dict]: + """Organize the data list in bottom-up mode.""" + + # bottom-up data list + data_list_bu = [] + + used_img_ids = set() + + # group instances by img_id + for img_ids, data_infos in groupby(instance_list, + lambda x: x['img_ids']): + for img_id in img_ids: + used_img_ids.add(img_id) + data_infos = list(data_infos) + + # image data + img_paths = data_infos[0]['img_paths'] + data_info_bu = { + 'img_ids': img_ids, + 'img_paths': img_paths, + } + + for key in data_infos[0].keys(): + if key not in data_info_bu: + seq = [d[key] for d in data_infos] + if isinstance(seq[0], np.ndarray): + seq = np.concatenate(seq, axis=0) + data_info_bu[key] = seq + + # The segmentation annotation of invalid objects will be used + # to generate valid region mask in the pipeline. + invalid_segs = [] + for data_info_invalid in filterfalse(self._is_valid_instance, + data_infos): + if 'segmentation' in data_info_invalid: + invalid_segs.append(data_info_invalid['segmentation']) + data_info_bu['invalid_segs'] = invalid_segs + + data_list_bu.append(data_info_bu) + + # add images without instance for evaluation + if self.test_mode: + for img_info in image_list: + if img_info['img_id'] not in used_img_ids: + data_info_bu = { + 'img_ids': [img_info['img_id']], + 'img_path': [img_info['img_path']], + 'id': list(), + } + data_list_bu.append(data_info_bu) + + return data_list_bu diff --git a/mmpose/datasets/datasets/body/__init__.py b/mmpose/datasets/datasets/body/__init__.py index 1405b0d675..93fdaa4e81 100644 --- a/mmpose/datasets/datasets/body/__init__.py +++ b/mmpose/datasets/datasets/body/__init__.py @@ -1,18 +1,18 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .aic_dataset import AicDataset -from .coco_dataset import CocoDataset -from .crowdpose_dataset import CrowdPoseDataset -from .humanart_dataset import HumanArtDataset -from .jhmdb_dataset import JhmdbDataset -from .mhp_dataset import MhpDataset -from .mpii_dataset import MpiiDataset -from .mpii_trb_dataset import MpiiTrbDataset -from .ochuman_dataset import OCHumanDataset -from .posetrack18_dataset import PoseTrack18Dataset -from .posetrack18_video_dataset import PoseTrack18VideoDataset - -__all__ = [ - 'CocoDataset', 'MpiiDataset', 'MpiiTrbDataset', 'AicDataset', - 'CrowdPoseDataset', 'OCHumanDataset', 'MhpDataset', 'PoseTrack18Dataset', - 'JhmdbDataset', 'PoseTrack18VideoDataset', 'HumanArtDataset' -] +# Copyright (c) OpenMMLab. All rights reserved. +from .aic_dataset import AicDataset +from .coco_dataset import CocoDataset +from .crowdpose_dataset import CrowdPoseDataset +from .humanart_dataset import HumanArtDataset +from .jhmdb_dataset import JhmdbDataset +from .mhp_dataset import MhpDataset +from .mpii_dataset import MpiiDataset +from .mpii_trb_dataset import MpiiTrbDataset +from .ochuman_dataset import OCHumanDataset +from .posetrack18_dataset import PoseTrack18Dataset +from .posetrack18_video_dataset import PoseTrack18VideoDataset + +__all__ = [ + 'CocoDataset', 'MpiiDataset', 'MpiiTrbDataset', 'AicDataset', + 'CrowdPoseDataset', 'OCHumanDataset', 'MhpDataset', 'PoseTrack18Dataset', + 'JhmdbDataset', 'PoseTrack18VideoDataset', 'HumanArtDataset' +] diff --git a/mmpose/datasets/datasets/body/aic_dataset.py b/mmpose/datasets/datasets/body/aic_dataset.py index b9c7cccc76..5b3ab5353e 100644 --- a/mmpose/datasets/datasets/body/aic_dataset.py +++ b/mmpose/datasets/datasets/body/aic_dataset.py @@ -1,70 +1,70 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from mmpose.registry import DATASETS -from ..base import BaseCocoStyleDataset - - -@DATASETS.register_module() -class AicDataset(BaseCocoStyleDataset): - """AIC dataset for pose estimation. - - "AI Challenger : A Large-scale Dataset for Going Deeper - in Image Understanding", arXiv'2017. - More details can be found in the `paper - `__ - - AIC keypoints:: - - 0: "right_shoulder", - 1: "right_elbow", - 2: "right_wrist", - 3: "left_shoulder", - 4: "left_elbow", - 5: "left_wrist", - 6: "right_hip", - 7: "right_knee", - 8: "right_ankle", - 9: "left_hip", - 10: "left_knee", - 11: "left_ankle", - 12: "head_top", - 13: "neck" - - Args: - ann_file (str): Annotation file path. Default: ''. - bbox_file (str, optional): Detection result file path. If - ``bbox_file`` is set, detected bboxes loaded from this file will - be used instead of ground-truth bboxes. This setting is only for - evaluation, i.e., ignored when ``test_mode`` is ``False``. - Default: ``None``. - data_mode (str): Specifies the mode of data samples: ``'topdown'`` or - ``'bottomup'``. In ``'topdown'`` mode, each data sample contains - one instance; while in ``'bottomup'`` mode, each data sample - contains all instances in a image. Default: ``'topdown'`` - metainfo (dict, optional): Meta information for dataset, such as class - information. Default: ``None``. - data_root (str, optional): The root directory for ``data_prefix`` and - ``ann_file``. Default: ``None``. - data_prefix (dict, optional): Prefix for training data. Default: - ``dict(img=None, ann=None)``. - filter_cfg (dict, optional): Config for filter data. Default: `None`. - indices (int or Sequence[int], optional): Support using first few - data in annotation file to facilitate training/testing on a smaller - dataset. Default: ``None`` which means using all ``data_infos``. - serialize_data (bool, optional): Whether to hold memory using - serialized objects, when enabled, data loader workers can use - shared RAM from master process instead of making a copy. - Default: ``True``. - pipeline (list, optional): Processing pipeline. Default: []. - test_mode (bool, optional): ``test_mode=True`` means in test phase. - Default: ``False``. - lazy_init (bool, optional): Whether to load annotation during - instantiation. In some cases, such as visualization, only the meta - information of the dataset is needed, which is not necessary to - load annotation file. ``Basedataset`` can skip load annotations to - save time by set ``lazy_init=False``. Default: ``False``. - max_refetch (int, optional): If ``Basedataset.prepare_data`` get a - None img. The maximum extra number of cycles to get a valid - image. Default: 1000. - """ - - METAINFO: dict = dict(from_file='configs/_base_/datasets/aic.py') +# Copyright (c) OpenMMLab. All rights reserved. +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class AicDataset(BaseCocoStyleDataset): + """AIC dataset for pose estimation. + + "AI Challenger : A Large-scale Dataset for Going Deeper + in Image Understanding", arXiv'2017. + More details can be found in the `paper + `__ + + AIC keypoints:: + + 0: "right_shoulder", + 1: "right_elbow", + 2: "right_wrist", + 3: "left_shoulder", + 4: "left_elbow", + 5: "left_wrist", + 6: "right_hip", + 7: "right_knee", + 8: "right_ankle", + 9: "left_hip", + 10: "left_knee", + 11: "left_ankle", + 12: "head_top", + 13: "neck" + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/aic.py') diff --git a/mmpose/datasets/datasets/body/coco_dataset.py b/mmpose/datasets/datasets/body/coco_dataset.py index 7cc971f91f..789a1f0561 100644 --- a/mmpose/datasets/datasets/body/coco_dataset.py +++ b/mmpose/datasets/datasets/body/coco_dataset.py @@ -1,72 +1,72 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from mmpose.registry import DATASETS -from ..base import BaseCocoStyleDataset - - -@DATASETS.register_module() -class CocoDataset(BaseCocoStyleDataset): - """COCO dataset for pose estimation. - - "Microsoft COCO: Common Objects in Context", ECCV'2014. - More details can be found in the `paper - `__ . - - COCO keypoints:: - - 0: 'nose', - 1: 'left_eye', - 2: 'right_eye', - 3: 'left_ear', - 4: 'right_ear', - 5: 'left_shoulder', - 6: 'right_shoulder', - 7: 'left_elbow', - 8: 'right_elbow', - 9: 'left_wrist', - 10: 'right_wrist', - 11: 'left_hip', - 12: 'right_hip', - 13: 'left_knee', - 14: 'right_knee', - 15: 'left_ankle', - 16: 'right_ankle' - - Args: - ann_file (str): Annotation file path. Default: ''. - bbox_file (str, optional): Detection result file path. If - ``bbox_file`` is set, detected bboxes loaded from this file will - be used instead of ground-truth bboxes. This setting is only for - evaluation, i.e., ignored when ``test_mode`` is ``False``. - Default: ``None``. - data_mode (str): Specifies the mode of data samples: ``'topdown'`` or - ``'bottomup'``. In ``'topdown'`` mode, each data sample contains - one instance; while in ``'bottomup'`` mode, each data sample - contains all instances in a image. Default: ``'topdown'`` - metainfo (dict, optional): Meta information for dataset, such as class - information. Default: ``None``. - data_root (str, optional): The root directory for ``data_prefix`` and - ``ann_file``. Default: ``None``. - data_prefix (dict, optional): Prefix for training data. Default: - ``dict(img=None, ann=None)``. - filter_cfg (dict, optional): Config for filter data. Default: `None`. - indices (int or Sequence[int], optional): Support using first few - data in annotation file to facilitate training/testing on a smaller - dataset. Default: ``None`` which means using all ``data_infos``. - serialize_data (bool, optional): Whether to hold memory using - serialized objects, when enabled, data loader workers can use - shared RAM from master process instead of making a copy. - Default: ``True``. - pipeline (list, optional): Processing pipeline. Default: []. - test_mode (bool, optional): ``test_mode=True`` means in test phase. - Default: ``False``. - lazy_init (bool, optional): Whether to load annotation during - instantiation. In some cases, such as visualization, only the meta - information of the dataset is needed, which is not necessary to - load annotation file. ``Basedataset`` can skip load annotations to - save time by set ``lazy_init=False``. Default: ``False``. - max_refetch (int, optional): If ``Basedataset.prepare_data`` get a - None img. The maximum extra number of cycles to get a valid - image. Default: 1000. - """ - - METAINFO: dict = dict(from_file='configs/_base_/datasets/coco.py') +# Copyright (c) OpenMMLab. All rights reserved. +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class CocoDataset(BaseCocoStyleDataset): + """COCO dataset for pose estimation. + + "Microsoft COCO: Common Objects in Context", ECCV'2014. + More details can be found in the `paper + `__ . + + COCO keypoints:: + + 0: 'nose', + 1: 'left_eye', + 2: 'right_eye', + 3: 'left_ear', + 4: 'right_ear', + 5: 'left_shoulder', + 6: 'right_shoulder', + 7: 'left_elbow', + 8: 'right_elbow', + 9: 'left_wrist', + 10: 'right_wrist', + 11: 'left_hip', + 12: 'right_hip', + 13: 'left_knee', + 14: 'right_knee', + 15: 'left_ankle', + 16: 'right_ankle' + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/coco.py') diff --git a/mmpose/datasets/datasets/body/crowdpose_dataset.py b/mmpose/datasets/datasets/body/crowdpose_dataset.py index 4218708ff2..56ca02cf63 100644 --- a/mmpose/datasets/datasets/body/crowdpose_dataset.py +++ b/mmpose/datasets/datasets/body/crowdpose_dataset.py @@ -1,70 +1,70 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from mmpose.registry import DATASETS -from ..base import BaseCocoStyleDataset - - -@DATASETS.register_module() -class CrowdPoseDataset(BaseCocoStyleDataset): - """CrowdPose dataset for pose estimation. - - "CrowdPose: Efficient Crowded Scenes Pose Estimation and - A New Benchmark", CVPR'2019. - More details can be found in the `paper - `__. - - CrowdPose keypoints:: - - 0: 'left_shoulder', - 1: 'right_shoulder', - 2: 'left_elbow', - 3: 'right_elbow', - 4: 'left_wrist', - 5: 'right_wrist', - 6: 'left_hip', - 7: 'right_hip', - 8: 'left_knee', - 9: 'right_knee', - 10: 'left_ankle', - 11: 'right_ankle', - 12: 'top_head', - 13: 'neck' - - Args: - ann_file (str): Annotation file path. Default: ''. - bbox_file (str, optional): Detection result file path. If - ``bbox_file`` is set, detected bboxes loaded from this file will - be used instead of ground-truth bboxes. This setting is only for - evaluation, i.e., ignored when ``test_mode`` is ``False``. - Default: ``None``. - data_mode (str): Specifies the mode of data samples: ``'topdown'`` or - ``'bottomup'``. In ``'topdown'`` mode, each data sample contains - one instance; while in ``'bottomup'`` mode, each data sample - contains all instances in a image. Default: ``'topdown'`` - metainfo (dict, optional): Meta information for dataset, such as class - information. Default: ``None``. - data_root (str, optional): The root directory for ``data_prefix`` and - ``ann_file``. Default: ``None``. - data_prefix (dict, optional): Prefix for training data. Default: - ``dict(img=None, ann=None)``. - filter_cfg (dict, optional): Config for filter data. Default: `None`. - indices (int or Sequence[int], optional): Support using first few - data in annotation file to facilitate training/testing on a smaller - dataset. Default: ``None`` which means using all ``data_infos``. - serialize_data (bool, optional): Whether to hold memory using - serialized objects, when enabled, data loader workers can use - shared RAM from master process instead of making a copy. - Default: ``True``. - pipeline (list, optional): Processing pipeline. Default: []. - test_mode (bool, optional): ``test_mode=True`` means in test phase. - Default: ``False``. - lazy_init (bool, optional): Whether to load annotation during - instantiation. In some cases, such as visualization, only the meta - information of the dataset is needed, which is not necessary to - load annotation file. ``Basedataset`` can skip load annotations to - save time by set ``lazy_init=False``. Default: ``False``. - max_refetch (int, optional): If ``Basedataset.prepare_data`` get a - None img. The maximum extra number of cycles to get a valid - image. Default: 1000. - """ - - METAINFO: dict = dict(from_file='configs/_base_/datasets/crowdpose.py') +# Copyright (c) OpenMMLab. All rights reserved. +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class CrowdPoseDataset(BaseCocoStyleDataset): + """CrowdPose dataset for pose estimation. + + "CrowdPose: Efficient Crowded Scenes Pose Estimation and + A New Benchmark", CVPR'2019. + More details can be found in the `paper + `__. + + CrowdPose keypoints:: + + 0: 'left_shoulder', + 1: 'right_shoulder', + 2: 'left_elbow', + 3: 'right_elbow', + 4: 'left_wrist', + 5: 'right_wrist', + 6: 'left_hip', + 7: 'right_hip', + 8: 'left_knee', + 9: 'right_knee', + 10: 'left_ankle', + 11: 'right_ankle', + 12: 'top_head', + 13: 'neck' + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/crowdpose.py') diff --git a/mmpose/datasets/datasets/body/humanart_dataset.py b/mmpose/datasets/datasets/body/humanart_dataset.py index 719f35fc9e..9af5e4e5a2 100644 --- a/mmpose/datasets/datasets/body/humanart_dataset.py +++ b/mmpose/datasets/datasets/body/humanart_dataset.py @@ -1,73 +1,73 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from mmpose.registry import DATASETS -from ..base import BaseCocoStyleDataset - - -@DATASETS.register_module() -class HumanArtDataset(BaseCocoStyleDataset): - """Human-Art dataset for pose estimation. - - "Human-Art: A Versatile Human-Centric Dataset - Bridging Natural and Artificial Scenes", CVPR'2023. - More details can be found in the `paper - `__ . - - Human-Art keypoints:: - - 0: 'nose', - 1: 'left_eye', - 2: 'right_eye', - 3: 'left_ear', - 4: 'right_ear', - 5: 'left_shoulder', - 6: 'right_shoulder', - 7: 'left_elbow', - 8: 'right_elbow', - 9: 'left_wrist', - 10: 'right_wrist', - 11: 'left_hip', - 12: 'right_hip', - 13: 'left_knee', - 14: 'right_knee', - 15: 'left_ankle', - 16: 'right_ankle' - - Args: - ann_file (str): Annotation file path. Default: ''. - bbox_file (str, optional): Detection result file path. If - ``bbox_file`` is set, detected bboxes loaded from this file will - be used instead of ground-truth bboxes. This setting is only for - evaluation, i.e., ignored when ``test_mode`` is ``False``. - Default: ``None``. - data_mode (str): Specifies the mode of data samples: ``'topdown'`` or - ``'bottomup'``. In ``'topdown'`` mode, each data sample contains - one instance; while in ``'bottomup'`` mode, each data sample - contains all instances in a image. Default: ``'topdown'`` - metainfo (dict, optional): Meta information for dataset, such as class - information. Default: ``None``. - data_root (str, optional): The root directory for ``data_prefix`` and - ``ann_file``. Default: ``None``. - data_prefix (dict, optional): Prefix for training data. Default: - ``dict(img=None, ann=None)``. - filter_cfg (dict, optional): Config for filter data. Default: `None`. - indices (int or Sequence[int], optional): Support using first few - data in annotation file to facilitate training/testing on a smaller - dataset. Default: ``None`` which means using all ``data_infos``. - serialize_data (bool, optional): Whether to hold memory using - serialized objects, when enabled, data loader workers can use - shared RAM from master process instead of making a copy. - Default: ``True``. - pipeline (list, optional): Processing pipeline. Default: []. - test_mode (bool, optional): ``test_mode=True`` means in test phase. - Default: ``False``. - lazy_init (bool, optional): Whether to load annotation during - instantiation. In some cases, such as visualization, only the meta - information of the dataset is needed, which is not necessary to - load annotation file. ``Basedataset`` can skip load annotations to - save time by set ``lazy_init=False``. Default: ``False``. - max_refetch (int, optional): If ``Basedataset.prepare_data`` get a - None img. The maximum extra number of cycles to get a valid - image. Default: 1000. - """ - - METAINFO: dict = dict(from_file='configs/_base_/datasets/humanart.py') +# Copyright (c) OpenMMLab. All rights reserved. +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class HumanArtDataset(BaseCocoStyleDataset): + """Human-Art dataset for pose estimation. + + "Human-Art: A Versatile Human-Centric Dataset + Bridging Natural and Artificial Scenes", CVPR'2023. + More details can be found in the `paper + `__ . + + Human-Art keypoints:: + + 0: 'nose', + 1: 'left_eye', + 2: 'right_eye', + 3: 'left_ear', + 4: 'right_ear', + 5: 'left_shoulder', + 6: 'right_shoulder', + 7: 'left_elbow', + 8: 'right_elbow', + 9: 'left_wrist', + 10: 'right_wrist', + 11: 'left_hip', + 12: 'right_hip', + 13: 'left_knee', + 14: 'right_knee', + 15: 'left_ankle', + 16: 'right_ankle' + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/humanart.py') diff --git a/mmpose/datasets/datasets/body/jhmdb_dataset.py b/mmpose/datasets/datasets/body/jhmdb_dataset.py index 7d72a7ddc5..e76dec4de8 100644 --- a/mmpose/datasets/datasets/body/jhmdb_dataset.py +++ b/mmpose/datasets/datasets/body/jhmdb_dataset.py @@ -1,135 +1,135 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os.path as osp -from typing import Optional - -import numpy as np - -from mmpose.registry import DATASETS -from ..base import BaseCocoStyleDataset - - -@DATASETS.register_module() -class JhmdbDataset(BaseCocoStyleDataset): - """JhmdbDataset dataset for pose estimation. - - "Towards understanding action recognition", ICCV'2013. - More details can be found in the `paper - `__ - - sub-JHMDB keypoints:: - - 0: "neck", - 1: "belly", - 2: "head", - 3: "right_shoulder", - 4: "left_shoulder", - 5: "right_hip", - 6: "left_hip", - 7: "right_elbow", - 8: "left_elbow", - 9: "right_knee", - 10: "left_knee", - 11: "right_wrist", - 12: "left_wrist", - 13: "right_ankle", - 14: "left_ankle" - - Args: - ann_file (str): Annotation file path. Default: ''. - bbox_file (str, optional): Detection result file path. If - ``bbox_file`` is set, detected bboxes loaded from this file will - be used instead of ground-truth bboxes. This setting is only for - evaluation, i.e., ignored when ``test_mode`` is ``False``. - Default: ``None``. - data_mode (str): Specifies the mode of data samples: ``'topdown'`` or - ``'bottomup'``. In ``'topdown'`` mode, each data sample contains - one instance; while in ``'bottomup'`` mode, each data sample - contains all instances in a image. Default: ``'topdown'`` - metainfo (dict, optional): Meta information for dataset, such as class - information. Default: ``None``. - data_root (str, optional): The root directory for ``data_prefix`` and - ``ann_file``. Default: ``None``. - data_prefix (dict, optional): Prefix for training data. Default: - ``dict(img=None, ann=None)``. - filter_cfg (dict, optional): Config for filter data. Default: `None`. - indices (int or Sequence[int], optional): Support using first few - data in annotation file to facilitate training/testing on a smaller - dataset. Default: ``None`` which means using all ``data_infos``. - serialize_data (bool, optional): Whether to hold memory using - serialized objects, when enabled, data loader workers can use - shared RAM from master process instead of making a copy. - Default: ``True``. - pipeline (list, optional): Processing pipeline. Default: []. - test_mode (bool, optional): ``test_mode=True`` means in test phase. - Default: ``False``. - lazy_init (bool, optional): Whether to load annotation during - instantiation. In some cases, such as visualization, only the meta - information of the dataset is needed, which is not necessary to - load annotation file. ``Basedataset`` can skip load annotations to - save time by set ``lazy_init=False``. Default: ``False``. - max_refetch (int, optional): If ``Basedataset.prepare_data`` get a - None img. The maximum extra number of cycles to get a valid - image. Default: 1000. - """ - - METAINFO: dict = dict(from_file='configs/_base_/datasets/jhmdb.py') - - def parse_data_info(self, raw_data_info: dict) -> Optional[dict]: - """Parse raw COCO annotation of an instance. - - Args: - raw_data_info (dict): Raw data information loaded from - ``ann_file``. It should have following contents: - - - ``'raw_ann_info'``: Raw annotation of an instance - - ``'raw_img_info'``: Raw information of the image that - contains the instance - - Returns: - dict: Parsed instance annotation - """ - - ann = raw_data_info['raw_ann_info'] - img = raw_data_info['raw_img_info'] - - img_path = osp.join(self.data_prefix['img'], img['file_name']) - img_w, img_h = img['width'], img['height'] - - # get bbox in shape [1, 4], formatted as xywh - x, y, w, h = ann['bbox'] - # JHMDB uses matlab format, index is 1-based, - # we should first convert to 0-based index - x -= 1 - y -= 1 - x1 = np.clip(x, 0, img_w - 1) - y1 = np.clip(y, 0, img_h - 1) - x2 = np.clip(x + w, 0, img_w - 1) - y2 = np.clip(y + h, 0, img_h - 1) - - bbox = np.array([x1, y1, x2, y2], dtype=np.float32).reshape(1, 4) - - # keypoints in shape [1, K, 2] and keypoints_visible in [1, K] - _keypoints = np.array( - ann['keypoints'], dtype=np.float32).reshape(1, -1, 3) - # JHMDB uses matlab format, index is 1-based, - # we should first convert to 0-based index - keypoints = _keypoints[..., :2] - 1 - keypoints_visible = np.minimum(1, _keypoints[..., 2]) - - num_keypoints = np.count_nonzero(keypoints.max(axis=2)) - - data_info = { - 'img_id': ann['image_id'], - 'img_path': img_path, - 'bbox': bbox, - 'bbox_score': np.ones(1, dtype=np.float32), - 'num_keypoints': num_keypoints, - 'keypoints': keypoints, - 'keypoints_visible': keypoints_visible, - 'iscrowd': ann.get('iscrowd', 0), - 'segmentation': ann.get('segmentation', None), - 'id': ann['id'], - } - - return data_info +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +from typing import Optional + +import numpy as np + +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class JhmdbDataset(BaseCocoStyleDataset): + """JhmdbDataset dataset for pose estimation. + + "Towards understanding action recognition", ICCV'2013. + More details can be found in the `paper + `__ + + sub-JHMDB keypoints:: + + 0: "neck", + 1: "belly", + 2: "head", + 3: "right_shoulder", + 4: "left_shoulder", + 5: "right_hip", + 6: "left_hip", + 7: "right_elbow", + 8: "left_elbow", + 9: "right_knee", + 10: "left_knee", + 11: "right_wrist", + 12: "left_wrist", + 13: "right_ankle", + 14: "left_ankle" + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/jhmdb.py') + + def parse_data_info(self, raw_data_info: dict) -> Optional[dict]: + """Parse raw COCO annotation of an instance. + + Args: + raw_data_info (dict): Raw data information loaded from + ``ann_file``. It should have following contents: + + - ``'raw_ann_info'``: Raw annotation of an instance + - ``'raw_img_info'``: Raw information of the image that + contains the instance + + Returns: + dict: Parsed instance annotation + """ + + ann = raw_data_info['raw_ann_info'] + img = raw_data_info['raw_img_info'] + + img_path = osp.join(self.data_prefix['img'], img['file_name']) + img_w, img_h = img['width'], img['height'] + + # get bbox in shape [1, 4], formatted as xywh + x, y, w, h = ann['bbox'] + # JHMDB uses matlab format, index is 1-based, + # we should first convert to 0-based index + x -= 1 + y -= 1 + x1 = np.clip(x, 0, img_w - 1) + y1 = np.clip(y, 0, img_h - 1) + x2 = np.clip(x + w, 0, img_w - 1) + y2 = np.clip(y + h, 0, img_h - 1) + + bbox = np.array([x1, y1, x2, y2], dtype=np.float32).reshape(1, 4) + + # keypoints in shape [1, K, 2] and keypoints_visible in [1, K] + _keypoints = np.array( + ann['keypoints'], dtype=np.float32).reshape(1, -1, 3) + # JHMDB uses matlab format, index is 1-based, + # we should first convert to 0-based index + keypoints = _keypoints[..., :2] - 1 + keypoints_visible = np.minimum(1, _keypoints[..., 2]) + + num_keypoints = np.count_nonzero(keypoints.max(axis=2)) + + data_info = { + 'img_id': ann['image_id'], + 'img_path': img_path, + 'bbox': bbox, + 'bbox_score': np.ones(1, dtype=np.float32), + 'num_keypoints': num_keypoints, + 'keypoints': keypoints, + 'keypoints_visible': keypoints_visible, + 'iscrowd': ann.get('iscrowd', 0), + 'segmentation': ann.get('segmentation', None), + 'id': ann['id'], + } + + return data_info diff --git a/mmpose/datasets/datasets/body/mhp_dataset.py b/mmpose/datasets/datasets/body/mhp_dataset.py index 55d3360253..e8d5410f73 100644 --- a/mmpose/datasets/datasets/body/mhp_dataset.py +++ b/mmpose/datasets/datasets/body/mhp_dataset.py @@ -1,72 +1,72 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from mmpose.registry import DATASETS -from ..base import BaseCocoStyleDataset - - -@DATASETS.register_module() -class MhpDataset(BaseCocoStyleDataset): - """MHPv2.0 dataset for pose estimation. - - "Understanding Humans in Crowded Scenes: Deep Nested Adversarial - Learning and A New Benchmark for Multi-Human Parsing", ACM MM'2018. - More details can be found in the `paper - `__ - - MHP keypoints:: - - 0: "right ankle", - 1: "right knee", - 2: "right hip", - 3: "left hip", - 4: "left knee", - 5: "left ankle", - 6: "pelvis", - 7: "thorax", - 8: "upper neck", - 9: "head top", - 10: "right wrist", - 11: "right elbow", - 12: "right shoulder", - 13: "left shoulder", - 14: "left elbow", - 15: "left wrist", - - Args: - ann_file (str): Annotation file path. Default: ''. - bbox_file (str, optional): Detection result file path. If - ``bbox_file`` is set, detected bboxes loaded from this file will - be used instead of ground-truth bboxes. This setting is only for - evaluation, i.e., ignored when ``test_mode`` is ``False``. - Default: ``None``. - data_mode (str): Specifies the mode of data samples: ``'topdown'`` or - ``'bottomup'``. In ``'topdown'`` mode, each data sample contains - one instance; while in ``'bottomup'`` mode, each data sample - contains all instances in a image. Default: ``'topdown'`` - metainfo (dict, optional): Meta information for dataset, such as class - information. Default: ``None``. - data_root (str, optional): The root directory for ``data_prefix`` and - ``ann_file``. Default: ``None``. - data_prefix (dict, optional): Prefix for training data. Default: - ``dict(img=None, ann=None)``. - filter_cfg (dict, optional): Config for filter data. Default: `None`. - indices (int or Sequence[int], optional): Support using first few - data in annotation file to facilitate training/testing on a smaller - dataset. Default: ``None`` which means using all ``data_infos``. - serialize_data (bool, optional): Whether to hold memory using - serialized objects, when enabled, data loader workers can use - shared RAM from master process instead of making a copy. - Default: ``True``. - pipeline (list, optional): Processing pipeline. Default: []. - test_mode (bool, optional): ``test_mode=True`` means in test phase. - Default: ``False``. - lazy_init (bool, optional): Whether to load annotation during - instantiation. In some cases, such as visualization, only the meta - information of the dataset is needed, which is not necessary to - load annotation file. ``Basedataset`` can skip load annotations to - save time by set ``lazy_init=False``. Default: ``False``. - max_refetch (int, optional): If ``Basedataset.prepare_data`` get a - None img. The maximum extra number of cycles to get a valid - image. Default: 1000. - """ - - METAINFO: dict = dict(from_file='configs/_base_/datasets/mhp.py') +# Copyright (c) OpenMMLab. All rights reserved. +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class MhpDataset(BaseCocoStyleDataset): + """MHPv2.0 dataset for pose estimation. + + "Understanding Humans in Crowded Scenes: Deep Nested Adversarial + Learning and A New Benchmark for Multi-Human Parsing", ACM MM'2018. + More details can be found in the `paper + `__ + + MHP keypoints:: + + 0: "right ankle", + 1: "right knee", + 2: "right hip", + 3: "left hip", + 4: "left knee", + 5: "left ankle", + 6: "pelvis", + 7: "thorax", + 8: "upper neck", + 9: "head top", + 10: "right wrist", + 11: "right elbow", + 12: "right shoulder", + 13: "left shoulder", + 14: "left elbow", + 15: "left wrist", + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/mhp.py') diff --git a/mmpose/datasets/datasets/body/mpii_dataset.py b/mmpose/datasets/datasets/body/mpii_dataset.py index 237f1ab2b6..c90abd6003 100644 --- a/mmpose/datasets/datasets/body/mpii_dataset.py +++ b/mmpose/datasets/datasets/body/mpii_dataset.py @@ -1,212 +1,212 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import json -import os.path as osp -from typing import Callable, List, Optional, Sequence, Tuple, Union - -import numpy as np -from mmengine.fileio import exists, get_local_path -from scipy.io import loadmat - -from mmpose.registry import DATASETS -from mmpose.structures.bbox import bbox_cs2xyxy -from ..base import BaseCocoStyleDataset - - -@DATASETS.register_module() -class MpiiDataset(BaseCocoStyleDataset): - """MPII Dataset for pose estimation. - - "2D Human Pose Estimation: New Benchmark and State of the Art Analysis" - ,CVPR'2014. More details can be found in the `paper - `__ . - - MPII keypoints:: - - 0: 'right_ankle' - 1: 'right_knee', - 2: 'right_hip', - 3: 'left_hip', - 4: 'left_knee', - 5: 'left_ankle', - 6: 'pelvis', - 7: 'thorax', - 8: 'upper_neck', - 9: 'head_top', - 10: 'right_wrist', - 11: 'right_elbow', - 12: 'right_shoulder', - 13: 'left_shoulder', - 14: 'left_elbow', - 15: 'left_wrist' - - Args: - ann_file (str): Annotation file path. Default: ''. - bbox_file (str, optional): Detection result file path. If - ``bbox_file`` is set, detected bboxes loaded from this file will - be used instead of ground-truth bboxes. This setting is only for - evaluation, i.e., ignored when ``test_mode`` is ``False``. - Default: ``None``. - headbox_file (str, optional): The path of ``mpii_gt_val.mat`` which - provides the headboxes information used for ``PCKh``. - Default: ``None``. - data_mode (str): Specifies the mode of data samples: ``'topdown'`` or - ``'bottomup'``. In ``'topdown'`` mode, each data sample contains - one instance; while in ``'bottomup'`` mode, each data sample - contains all instances in a image. Default: ``'topdown'`` - metainfo (dict, optional): Meta information for dataset, such as class - information. Default: ``None``. - data_root (str, optional): The root directory for ``data_prefix`` and - ``ann_file``. Default: ``None``. - data_prefix (dict, optional): Prefix for training data. Default: - ``dict(img=None, ann=None)``. - filter_cfg (dict, optional): Config for filter data. Default: `None`. - indices (int or Sequence[int], optional): Support using first few - data in annotation file to facilitate training/testing on a smaller - dataset. Default: ``None`` which means using all ``data_infos``. - serialize_data (bool, optional): Whether to hold memory using - serialized objects, when enabled, data loader workers can use - shared RAM from master process instead of making a copy. - Default: ``True``. - pipeline (list, optional): Processing pipeline. Default: []. - test_mode (bool, optional): ``test_mode=True`` means in test phase. - Default: ``False``. - lazy_init (bool, optional): Whether to load annotation during - instantiation. In some cases, such as visualization, only the meta - information of the dataset is needed, which is not necessary to - load annotation file. ``Basedataset`` can skip load annotations to - save time by set ``lazy_init=False``. Default: ``False``. - max_refetch (int, optional): If ``Basedataset.prepare_data`` get a - None img. The maximum extra number of cycles to get a valid - image. Default: 1000. - """ - - METAINFO: dict = dict(from_file='configs/_base_/datasets/mpii.py') - - def __init__(self, - ann_file: str = '', - bbox_file: Optional[str] = None, - headbox_file: Optional[str] = None, - data_mode: str = 'topdown', - metainfo: Optional[dict] = None, - data_root: Optional[str] = None, - data_prefix: dict = dict(img=''), - filter_cfg: Optional[dict] = None, - indices: Optional[Union[int, Sequence[int]]] = None, - serialize_data: bool = True, - pipeline: List[Union[dict, Callable]] = [], - test_mode: bool = False, - lazy_init: bool = False, - max_refetch: int = 1000): - - if headbox_file: - if data_mode != 'topdown': - raise ValueError( - f'{self.__class__.__name__} is set to {data_mode}: ' - 'mode, while "headbox_file" is only ' - 'supported in topdown mode.') - - if not test_mode: - raise ValueError( - f'{self.__class__.__name__} has `test_mode==False` ' - 'while "headbox_file" is only ' - 'supported when `test_mode==True`.') - - headbox_file_type = headbox_file[-3:] - allow_headbox_file_type = ['mat'] - if headbox_file_type not in allow_headbox_file_type: - raise KeyError( - f'The head boxes file type {headbox_file_type} is not ' - f'supported. Should be `mat` but got {headbox_file_type}.') - self.headbox_file = headbox_file - - super().__init__( - ann_file=ann_file, - bbox_file=bbox_file, - data_mode=data_mode, - metainfo=metainfo, - data_root=data_root, - data_prefix=data_prefix, - filter_cfg=filter_cfg, - indices=indices, - serialize_data=serialize_data, - pipeline=pipeline, - test_mode=test_mode, - lazy_init=lazy_init, - max_refetch=max_refetch) - - def _load_annotations(self) -> Tuple[List[dict], List[dict]]: - """Load data from annotations in MPII format.""" - - assert exists(self.ann_file), 'Annotation file does not exist' - with get_local_path(self.ann_file) as local_path: - with open(local_path) as anno_file: - self.anns = json.load(anno_file) - - if self.headbox_file: - assert exists(self.headbox_file), 'Headbox file does not exist' - with get_local_path(self.headbox_file) as local_path: - self.headbox_dict = loadmat(local_path) - headboxes_src = np.transpose(self.headbox_dict['headboxes_src'], - [2, 0, 1]) - SC_BIAS = 0.6 - - instance_list = [] - image_list = [] - used_img_ids = set() - ann_id = 0 - - # mpii bbox scales are normalized with factor 200. - pixel_std = 200. - - for idx, ann in enumerate(self.anns): - center = np.array(ann['center'], dtype=np.float32) - scale = np.array([ann['scale'], ann['scale']], - dtype=np.float32) * pixel_std - - # Adjust center/scale slightly to avoid cropping limbs - if center[0] != -1: - center[1] = center[1] + 15. / pixel_std * scale[1] - - # MPII uses matlab format, index is 1-based, - # we should first convert to 0-based index - center = center - 1 - - # unify shape with coco datasets - center = center.reshape(1, -1) - scale = scale.reshape(1, -1) - bbox = bbox_cs2xyxy(center, scale) - - # load keypoints in shape [1, K, 2] and keypoints_visible in [1, K] - keypoints = np.array(ann['joints']).reshape(1, -1, 2) - keypoints_visible = np.array(ann['joints_vis']).reshape(1, -1) - - instance_info = { - 'id': ann_id, - 'img_id': int(ann['image'].split('.')[0]), - 'img_path': osp.join(self.data_prefix['img'], ann['image']), - 'bbox_center': center, - 'bbox_scale': scale, - 'bbox': bbox, - 'bbox_score': np.ones(1, dtype=np.float32), - 'keypoints': keypoints, - 'keypoints_visible': keypoints_visible, - } - - if self.headbox_file: - # calculate the diagonal length of head box as norm_factor - headbox = headboxes_src[idx] - head_size = np.linalg.norm(headbox[1] - headbox[0], axis=0) - head_size *= SC_BIAS - instance_info['head_size'] = head_size.reshape(1, -1) - - if instance_info['img_id'] not in used_img_ids: - used_img_ids.add(instance_info['img_id']) - image_list.append({ - 'img_id': instance_info['img_id'], - 'img_path': instance_info['img_path'], - }) - - instance_list.append(instance_info) - ann_id = ann_id + 1 - - return instance_list, image_list +# Copyright (c) OpenMMLab. All rights reserved. +import json +import os.path as osp +from typing import Callable, List, Optional, Sequence, Tuple, Union + +import numpy as np +from mmengine.fileio import exists, get_local_path +from scipy.io import loadmat + +from mmpose.registry import DATASETS +from mmpose.structures.bbox import bbox_cs2xyxy +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class MpiiDataset(BaseCocoStyleDataset): + """MPII Dataset for pose estimation. + + "2D Human Pose Estimation: New Benchmark and State of the Art Analysis" + ,CVPR'2014. More details can be found in the `paper + `__ . + + MPII keypoints:: + + 0: 'right_ankle' + 1: 'right_knee', + 2: 'right_hip', + 3: 'left_hip', + 4: 'left_knee', + 5: 'left_ankle', + 6: 'pelvis', + 7: 'thorax', + 8: 'upper_neck', + 9: 'head_top', + 10: 'right_wrist', + 11: 'right_elbow', + 12: 'right_shoulder', + 13: 'left_shoulder', + 14: 'left_elbow', + 15: 'left_wrist' + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + headbox_file (str, optional): The path of ``mpii_gt_val.mat`` which + provides the headboxes information used for ``PCKh``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/mpii.py') + + def __init__(self, + ann_file: str = '', + bbox_file: Optional[str] = None, + headbox_file: Optional[str] = None, + data_mode: str = 'topdown', + metainfo: Optional[dict] = None, + data_root: Optional[str] = None, + data_prefix: dict = dict(img=''), + filter_cfg: Optional[dict] = None, + indices: Optional[Union[int, Sequence[int]]] = None, + serialize_data: bool = True, + pipeline: List[Union[dict, Callable]] = [], + test_mode: bool = False, + lazy_init: bool = False, + max_refetch: int = 1000): + + if headbox_file: + if data_mode != 'topdown': + raise ValueError( + f'{self.__class__.__name__} is set to {data_mode}: ' + 'mode, while "headbox_file" is only ' + 'supported in topdown mode.') + + if not test_mode: + raise ValueError( + f'{self.__class__.__name__} has `test_mode==False` ' + 'while "headbox_file" is only ' + 'supported when `test_mode==True`.') + + headbox_file_type = headbox_file[-3:] + allow_headbox_file_type = ['mat'] + if headbox_file_type not in allow_headbox_file_type: + raise KeyError( + f'The head boxes file type {headbox_file_type} is not ' + f'supported. Should be `mat` but got {headbox_file_type}.') + self.headbox_file = headbox_file + + super().__init__( + ann_file=ann_file, + bbox_file=bbox_file, + data_mode=data_mode, + metainfo=metainfo, + data_root=data_root, + data_prefix=data_prefix, + filter_cfg=filter_cfg, + indices=indices, + serialize_data=serialize_data, + pipeline=pipeline, + test_mode=test_mode, + lazy_init=lazy_init, + max_refetch=max_refetch) + + def _load_annotations(self) -> Tuple[List[dict], List[dict]]: + """Load data from annotations in MPII format.""" + + assert exists(self.ann_file), 'Annotation file does not exist' + with get_local_path(self.ann_file) as local_path: + with open(local_path) as anno_file: + self.anns = json.load(anno_file) + + if self.headbox_file: + assert exists(self.headbox_file), 'Headbox file does not exist' + with get_local_path(self.headbox_file) as local_path: + self.headbox_dict = loadmat(local_path) + headboxes_src = np.transpose(self.headbox_dict['headboxes_src'], + [2, 0, 1]) + SC_BIAS = 0.6 + + instance_list = [] + image_list = [] + used_img_ids = set() + ann_id = 0 + + # mpii bbox scales are normalized with factor 200. + pixel_std = 200. + + for idx, ann in enumerate(self.anns): + center = np.array(ann['center'], dtype=np.float32) + scale = np.array([ann['scale'], ann['scale']], + dtype=np.float32) * pixel_std + + # Adjust center/scale slightly to avoid cropping limbs + if center[0] != -1: + center[1] = center[1] + 15. / pixel_std * scale[1] + + # MPII uses matlab format, index is 1-based, + # we should first convert to 0-based index + center = center - 1 + + # unify shape with coco datasets + center = center.reshape(1, -1) + scale = scale.reshape(1, -1) + bbox = bbox_cs2xyxy(center, scale) + + # load keypoints in shape [1, K, 2] and keypoints_visible in [1, K] + keypoints = np.array(ann['joints']).reshape(1, -1, 2) + keypoints_visible = np.array(ann['joints_vis']).reshape(1, -1) + + instance_info = { + 'id': ann_id, + 'img_id': int(ann['image'].split('.')[0]), + 'img_path': osp.join(self.data_prefix['img'], ann['image']), + 'bbox_center': center, + 'bbox_scale': scale, + 'bbox': bbox, + 'bbox_score': np.ones(1, dtype=np.float32), + 'keypoints': keypoints, + 'keypoints_visible': keypoints_visible, + } + + if self.headbox_file: + # calculate the diagonal length of head box as norm_factor + headbox = headboxes_src[idx] + head_size = np.linalg.norm(headbox[1] - headbox[0], axis=0) + head_size *= SC_BIAS + instance_info['head_size'] = head_size.reshape(1, -1) + + if instance_info['img_id'] not in used_img_ids: + used_img_ids.add(instance_info['img_id']) + image_list.append({ + 'img_id': instance_info['img_id'], + 'img_path': instance_info['img_path'], + }) + + instance_list.append(instance_info) + ann_id = ann_id + 1 + + return instance_list, image_list diff --git a/mmpose/datasets/datasets/body/mpii_trb_dataset.py b/mmpose/datasets/datasets/body/mpii_trb_dataset.py index bb96ad876f..3a46cf9e1b 100644 --- a/mmpose/datasets/datasets/body/mpii_trb_dataset.py +++ b/mmpose/datasets/datasets/body/mpii_trb_dataset.py @@ -1,169 +1,169 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import json -import os.path as osp -from typing import List, Tuple - -import numpy as np -from mmengine.fileio import exists, get_local_path - -from mmpose.registry import DATASETS -from mmpose.structures.bbox import bbox_cs2xyxy -from ..base import BaseCocoStyleDataset - - -@DATASETS.register_module() -class MpiiTrbDataset(BaseCocoStyleDataset): - """MPII-TRB Dataset dataset for pose estimation. - - "TRB: A Novel Triplet Representation for Understanding 2D Human Body", - ICCV'2019. More details can be found in the `paper - `__ . - - MPII-TRB keypoints:: - - 0: 'left_shoulder' - 1: 'right_shoulder' - 2: 'left_elbow' - 3: 'right_elbow' - 4: 'left_wrist' - 5: 'right_wrist' - 6: 'left_hip' - 7: 'right_hip' - 8: 'left_knee' - 9: 'right_knee' - 10: 'left_ankle' - 11: 'right_ankle' - 12: 'head' - 13: 'neck' - - 14: 'right_neck' - 15: 'left_neck' - 16: 'medial_right_shoulder' - 17: 'lateral_right_shoulder' - 18: 'medial_right_bow' - 19: 'lateral_right_bow' - 20: 'medial_right_wrist' - 21: 'lateral_right_wrist' - 22: 'medial_left_shoulder' - 23: 'lateral_left_shoulder' - 24: 'medial_left_bow' - 25: 'lateral_left_bow' - 26: 'medial_left_wrist' - 27: 'lateral_left_wrist' - 28: 'medial_right_hip' - 29: 'lateral_right_hip' - 30: 'medial_right_knee' - 31: 'lateral_right_knee' - 32: 'medial_right_ankle' - 33: 'lateral_right_ankle' - 34: 'medial_left_hip' - 35: 'lateral_left_hip' - 36: 'medial_left_knee' - 37: 'lateral_left_knee' - 38: 'medial_left_ankle' - 39: 'lateral_left_ankle' - - Args: - ann_file (str): Annotation file path. Default: ''. - bbox_file (str, optional): Detection result file path. If - ``bbox_file`` is set, detected bboxes loaded from this file will - be used instead of ground-truth bboxes. This setting is only for - evaluation, i.e., ignored when ``test_mode`` is ``False``. - Default: ``None``. - data_mode (str): Specifies the mode of data samples: ``'topdown'`` or - ``'bottomup'``. In ``'topdown'`` mode, each data sample contains - one instance; while in ``'bottomup'`` mode, each data sample - contains all instances in a image. Default: ``'topdown'`` - metainfo (dict, optional): Meta information for dataset, such as class - information. Default: ``None``. - data_root (str, optional): The root directory for ``data_prefix`` and - ``ann_file``. Default: ``None``. - data_prefix (dict, optional): Prefix for training data. Default: - ``dict(img=None, ann=None)``. - filter_cfg (dict, optional): Config for filter data. Default: `None`. - indices (int or Sequence[int], optional): Support using first few - data in annotation file to facilitate training/testing on a smaller - dataset. Default: ``None`` which means using all ``data_infos``. - serialize_data (bool, optional): Whether to hold memory using - serialized objects, when enabled, data loader workers can use - shared RAM from master process instead of making a copy. - Default: ``True``. - pipeline (list, optional): Processing pipeline. Default: []. - test_mode (bool, optional): ``test_mode=True`` means in test phase. - Default: ``False``. - lazy_init (bool, optional): Whether to load annotation during - instantiation. In some cases, such as visualization, only the meta - information of the dataset is needed, which is not necessary to - load annotation file. ``Basedataset`` can skip load annotations to - save time by set ``lazy_init=False``. Default: ``False``. - max_refetch (int, optional): If ``Basedataset.prepare_data`` get a - None img. The maximum extra number of cycles to get a valid - image. Default: 1000. - """ - - METAINFO: dict = dict(from_file='configs/_base_/datasets/mpii_trb.py') - - def _load_annotations(self) -> Tuple[List[dict], List[dict]]: - """Load data from annotations in MPII-TRB format.""" - - assert exists(self.ann_file), 'Annotation file does not exist' - with get_local_path(self.ann_file) as local_path: - with open(local_path) as anno_file: - self.data = json.load(anno_file) - - imgid2info = {img['id']: img for img in self.data['images']} - - instance_list = [] - image_list = [] - used_img_ids = set() - - # mpii-trb bbox scales are normalized with factor 200. - pixel_std = 200. - - for ann in self.data['annotations']: - img_id = ann['image_id'] - - # center, scale in shape [1, 2] and bbox in [1, 4] - center = np.array([ann['center']], dtype=np.float32) - scale = np.array([[ann['scale'], ann['scale']]], - dtype=np.float32) * pixel_std - bbox = bbox_cs2xyxy(center, scale) - - # keypoints in shape [1, K, 2] and keypoints_visible in [1, K] - _keypoints = np.array( - ann['keypoints'], dtype=np.float32).reshape(1, -1, 3) - keypoints = _keypoints[..., :2] - keypoints_visible = np.minimum(1, _keypoints[..., 2]) - - img_path = osp.join(self.data_prefix['img'], - imgid2info[img_id]['file_name']) - - instance_info = { - 'id': ann['id'], - 'img_id': img_id, - 'img_path': img_path, - 'bbox_center': center, - 'bbox_scale': scale, - 'bbox': bbox, - 'bbox_score': np.ones(1, dtype=np.float32), - 'num_keypoints': ann['num_joints'], - 'keypoints': keypoints, - 'keypoints_visible': keypoints_visible, - 'iscrowd': ann['iscrowd'], - } - - # val set - if 'headbox' in ann: - instance_info['headbox'] = np.array( - ann['headbox'], dtype=np.float32) - - instance_list.append(instance_info) - if instance_info['img_id'] not in used_img_ids: - used_img_ids.add(instance_info['img_id']) - image_list.append({ - 'img_id': instance_info['img_id'], - 'img_path': instance_info['img_path'], - }) - - instance_list = sorted(instance_list, key=lambda x: x['id']) - return instance_list, image_list +# Copyright (c) OpenMMLab. All rights reserved. +import json +import os.path as osp +from typing import List, Tuple + +import numpy as np +from mmengine.fileio import exists, get_local_path + +from mmpose.registry import DATASETS +from mmpose.structures.bbox import bbox_cs2xyxy +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class MpiiTrbDataset(BaseCocoStyleDataset): + """MPII-TRB Dataset dataset for pose estimation. + + "TRB: A Novel Triplet Representation for Understanding 2D Human Body", + ICCV'2019. More details can be found in the `paper + `__ . + + MPII-TRB keypoints:: + + 0: 'left_shoulder' + 1: 'right_shoulder' + 2: 'left_elbow' + 3: 'right_elbow' + 4: 'left_wrist' + 5: 'right_wrist' + 6: 'left_hip' + 7: 'right_hip' + 8: 'left_knee' + 9: 'right_knee' + 10: 'left_ankle' + 11: 'right_ankle' + 12: 'head' + 13: 'neck' + + 14: 'right_neck' + 15: 'left_neck' + 16: 'medial_right_shoulder' + 17: 'lateral_right_shoulder' + 18: 'medial_right_bow' + 19: 'lateral_right_bow' + 20: 'medial_right_wrist' + 21: 'lateral_right_wrist' + 22: 'medial_left_shoulder' + 23: 'lateral_left_shoulder' + 24: 'medial_left_bow' + 25: 'lateral_left_bow' + 26: 'medial_left_wrist' + 27: 'lateral_left_wrist' + 28: 'medial_right_hip' + 29: 'lateral_right_hip' + 30: 'medial_right_knee' + 31: 'lateral_right_knee' + 32: 'medial_right_ankle' + 33: 'lateral_right_ankle' + 34: 'medial_left_hip' + 35: 'lateral_left_hip' + 36: 'medial_left_knee' + 37: 'lateral_left_knee' + 38: 'medial_left_ankle' + 39: 'lateral_left_ankle' + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/mpii_trb.py') + + def _load_annotations(self) -> Tuple[List[dict], List[dict]]: + """Load data from annotations in MPII-TRB format.""" + + assert exists(self.ann_file), 'Annotation file does not exist' + with get_local_path(self.ann_file) as local_path: + with open(local_path) as anno_file: + self.data = json.load(anno_file) + + imgid2info = {img['id']: img for img in self.data['images']} + + instance_list = [] + image_list = [] + used_img_ids = set() + + # mpii-trb bbox scales are normalized with factor 200. + pixel_std = 200. + + for ann in self.data['annotations']: + img_id = ann['image_id'] + + # center, scale in shape [1, 2] and bbox in [1, 4] + center = np.array([ann['center']], dtype=np.float32) + scale = np.array([[ann['scale'], ann['scale']]], + dtype=np.float32) * pixel_std + bbox = bbox_cs2xyxy(center, scale) + + # keypoints in shape [1, K, 2] and keypoints_visible in [1, K] + _keypoints = np.array( + ann['keypoints'], dtype=np.float32).reshape(1, -1, 3) + keypoints = _keypoints[..., :2] + keypoints_visible = np.minimum(1, _keypoints[..., 2]) + + img_path = osp.join(self.data_prefix['img'], + imgid2info[img_id]['file_name']) + + instance_info = { + 'id': ann['id'], + 'img_id': img_id, + 'img_path': img_path, + 'bbox_center': center, + 'bbox_scale': scale, + 'bbox': bbox, + 'bbox_score': np.ones(1, dtype=np.float32), + 'num_keypoints': ann['num_joints'], + 'keypoints': keypoints, + 'keypoints_visible': keypoints_visible, + 'iscrowd': ann['iscrowd'], + } + + # val set + if 'headbox' in ann: + instance_info['headbox'] = np.array( + ann['headbox'], dtype=np.float32) + + instance_list.append(instance_info) + if instance_info['img_id'] not in used_img_ids: + used_img_ids.add(instance_info['img_id']) + image_list.append({ + 'img_id': instance_info['img_id'], + 'img_path': instance_info['img_path'], + }) + + instance_list = sorted(instance_list, key=lambda x: x['id']) + return instance_list, image_list diff --git a/mmpose/datasets/datasets/body/ochuman_dataset.py b/mmpose/datasets/datasets/body/ochuman_dataset.py index 695d090ea9..df20d8f570 100644 --- a/mmpose/datasets/datasets/body/ochuman_dataset.py +++ b/mmpose/datasets/datasets/body/ochuman_dataset.py @@ -1,78 +1,78 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from mmpose.registry import DATASETS -from ..base import BaseCocoStyleDataset - - -@DATASETS.register_module() -class OCHumanDataset(BaseCocoStyleDataset): - """OChuman dataset for pose estimation. - - "Pose2Seg: Detection Free Human Instance Segmentation", CVPR'2019. - More details can be found in the `paper - `__ . - - "Occluded Human (OCHuman)" dataset contains 8110 heavily occluded - human instances within 4731 images. OCHuman dataset is designed for - validation and testing. To evaluate on OCHuman, the model should be - trained on COCO training set, and then test the robustness of the - model to occlusion using OCHuman. - - OCHuman keypoints (same as COCO):: - - 0: 'nose', - 1: 'left_eye', - 2: 'right_eye', - 3: 'left_ear', - 4: 'right_ear', - 5: 'left_shoulder', - 6: 'right_shoulder', - 7: 'left_elbow', - 8: 'right_elbow', - 9: 'left_wrist', - 10: 'right_wrist', - 11: 'left_hip', - 12: 'right_hip', - 13: 'left_knee', - 14: 'right_knee', - 15: 'left_ankle', - 16: 'right_ankle' - - Args: - ann_file (str): Annotation file path. Default: ''. - bbox_file (str, optional): Detection result file path. If - ``bbox_file`` is set, detected bboxes loaded from this file will - be used instead of ground-truth bboxes. This setting is only for - evaluation, i.e., ignored when ``test_mode`` is ``False``. - Default: ``None``. - data_mode (str): Specifies the mode of data samples: ``'topdown'`` or - ``'bottomup'``. In ``'topdown'`` mode, each data sample contains - one instance; while in ``'bottomup'`` mode, each data sample - contains all instances in a image. Default: ``'topdown'`` - metainfo (dict, optional): Meta information for dataset, such as class - information. Default: ``None``. - data_root (str, optional): The root directory for ``data_prefix`` and - ``ann_file``. Default: ``None``. - data_prefix (dict, optional): Prefix for training data. Default: - ``dict(img=None, ann=None)``. - filter_cfg (dict, optional): Config for filter data. Default: `None`. - indices (int or Sequence[int], optional): Support using first few - data in annotation file to facilitate training/testing on a smaller - dataset. Default: ``None`` which means using all ``data_infos``. - serialize_data (bool, optional): Whether to hold memory using - serialized objects, when enabled, data loader workers can use - shared RAM from master process instead of making a copy. - Default: ``True``. - pipeline (list, optional): Processing pipeline. Default: []. - test_mode (bool, optional): ``test_mode=True`` means in test phase. - Default: ``False``. - lazy_init (bool, optional): Whether to load annotation during - instantiation. In some cases, such as visualization, only the meta - information of the dataset is needed, which is not necessary to - load annotation file. ``Basedataset`` can skip load annotations to - save time by set ``lazy_init=False``. Default: ``False``. - max_refetch (int, optional): If ``Basedataset.prepare_data`` get a - None img. The maximum extra number of cycles to get a valid - image. Default: 1000. - """ - - METAINFO: dict = dict(from_file='configs/_base_/datasets/ochuman.py') +# Copyright (c) OpenMMLab. All rights reserved. +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class OCHumanDataset(BaseCocoStyleDataset): + """OChuman dataset for pose estimation. + + "Pose2Seg: Detection Free Human Instance Segmentation", CVPR'2019. + More details can be found in the `paper + `__ . + + "Occluded Human (OCHuman)" dataset contains 8110 heavily occluded + human instances within 4731 images. OCHuman dataset is designed for + validation and testing. To evaluate on OCHuman, the model should be + trained on COCO training set, and then test the robustness of the + model to occlusion using OCHuman. + + OCHuman keypoints (same as COCO):: + + 0: 'nose', + 1: 'left_eye', + 2: 'right_eye', + 3: 'left_ear', + 4: 'right_ear', + 5: 'left_shoulder', + 6: 'right_shoulder', + 7: 'left_elbow', + 8: 'right_elbow', + 9: 'left_wrist', + 10: 'right_wrist', + 11: 'left_hip', + 12: 'right_hip', + 13: 'left_knee', + 14: 'right_knee', + 15: 'left_ankle', + 16: 'right_ankle' + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/ochuman.py') diff --git a/mmpose/datasets/datasets/body/posetrack18_dataset.py b/mmpose/datasets/datasets/body/posetrack18_dataset.py index b8110c107f..45b0d38667 100644 --- a/mmpose/datasets/datasets/body/posetrack18_dataset.py +++ b/mmpose/datasets/datasets/body/posetrack18_dataset.py @@ -1,72 +1,72 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from mmpose.registry import DATASETS -from ..base import BaseCocoStyleDataset - - -@DATASETS.register_module() -class PoseTrack18Dataset(BaseCocoStyleDataset): - """PoseTrack18 dataset for pose estimation. - - "Posetrack: A benchmark for human pose estimation and tracking", CVPR'2018. - More details can be found in the `paper - `__ . - - PoseTrack2018 keypoints:: - - 0: 'nose', - 1: 'head_bottom', - 2: 'head_top', - 3: 'left_ear', - 4: 'right_ear', - 5: 'left_shoulder', - 6: 'right_shoulder', - 7: 'left_elbow', - 8: 'right_elbow', - 9: 'left_wrist', - 10: 'right_wrist', - 11: 'left_hip', - 12: 'right_hip', - 13: 'left_knee', - 14: 'right_knee', - 15: 'left_ankle', - 16: 'right_ankle' - - Args: - ann_file (str): Annotation file path. Default: ''. - bbox_file (str, optional): Detection result file path. If - ``bbox_file`` is set, detected bboxes loaded from this file will - be used instead of ground-truth bboxes. This setting is only for - evaluation, i.e., ignored when ``test_mode`` is ``False``. - Default: ``None``. - data_mode (str): Specifies the mode of data samples: ``'topdown'`` or - ``'bottomup'``. In ``'topdown'`` mode, each data sample contains - one instance; while in ``'bottomup'`` mode, each data sample - contains all instances in a image. Default: ``'topdown'`` - metainfo (dict, optional): Meta information for dataset, such as class - information. Default: ``None``. - data_root (str, optional): The root directory for ``data_prefix`` and - ``ann_file``. Default: ``None``. - data_prefix (dict, optional): Prefix for training data. Default: - ``dict(img=None, ann=None)``. - filter_cfg (dict, optional): Config for filter data. Default: `None`. - indices (int or Sequence[int], optional): Support using first few - data in annotation file to facilitate training/testing on a smaller - dataset. Default: ``None`` which means using all ``data_infos``. - serialize_data (bool, optional): Whether to hold memory using - serialized objects, when enabled, data loader workers can use - shared RAM from master process instead of making a copy. - Default: ``True``. - pipeline (list, optional): Processing pipeline. Default: []. - test_mode (bool, optional): ``test_mode=True`` means in test phase. - Default: ``False``. - lazy_init (bool, optional): Whether to load annotation during - instantiation. In some cases, such as visualization, only the meta - information of the dataset is needed, which is not necessary to - load annotation file. ``Basedataset`` can skip load annotations to - save time by set ``lazy_init=False``. Default: ``False``. - max_refetch (int, optional): If ``Basedataset.prepare_data`` get a - None img. The maximum extra number of cycles to get a valid - image. Default: 1000. - """ - - METAINFO: dict = dict(from_file='configs/_base_/datasets/posetrack18.py') +# Copyright (c) OpenMMLab. All rights reserved. +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class PoseTrack18Dataset(BaseCocoStyleDataset): + """PoseTrack18 dataset for pose estimation. + + "Posetrack: A benchmark for human pose estimation and tracking", CVPR'2018. + More details can be found in the `paper + `__ . + + PoseTrack2018 keypoints:: + + 0: 'nose', + 1: 'head_bottom', + 2: 'head_top', + 3: 'left_ear', + 4: 'right_ear', + 5: 'left_shoulder', + 6: 'right_shoulder', + 7: 'left_elbow', + 8: 'right_elbow', + 9: 'left_wrist', + 10: 'right_wrist', + 11: 'left_hip', + 12: 'right_hip', + 13: 'left_knee', + 14: 'right_knee', + 15: 'left_ankle', + 16: 'right_ankle' + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/posetrack18.py') diff --git a/mmpose/datasets/datasets/body/posetrack18_video_dataset.py b/mmpose/datasets/datasets/body/posetrack18_video_dataset.py index cc5fe8646c..029484cf4d 100644 --- a/mmpose/datasets/datasets/body/posetrack18_video_dataset.py +++ b/mmpose/datasets/datasets/body/posetrack18_video_dataset.py @@ -1,389 +1,389 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os.path as osp -from typing import Callable, List, Optional, Sequence, Union - -import numpy as np -from mmengine.fileio import exists, get_local_path, load -from mmengine.utils import is_list_of -from xtcocotools.coco import COCO - -from mmpose.registry import DATASETS -from mmpose.structures.bbox import bbox_xywh2xyxy -from ..base import BaseCocoStyleDataset - - -@DATASETS.register_module() -class PoseTrack18VideoDataset(BaseCocoStyleDataset): - """PoseTrack18 dataset for video pose estimation. - - "Posetrack: A benchmark for human pose estimation and tracking", CVPR'2018. - More details can be found in the `paper - `__ . - - PoseTrack2018 keypoints:: - - 0: 'nose', - 1: 'head_bottom', - 2: 'head_top', - 3: 'left_ear', - 4: 'right_ear', - 5: 'left_shoulder', - 6: 'right_shoulder', - 7: 'left_elbow', - 8: 'right_elbow', - 9: 'left_wrist', - 10: 'right_wrist', - 11: 'left_hip', - 12: 'right_hip', - 13: 'left_knee', - 14: 'right_knee', - 15: 'left_ankle', - 16: 'right_ankle' - - Args: - ann_file (str): Annotation file path. Default: ''. - bbox_file (str, optional): Detection result file path. If - ``bbox_file`` is set, detected bboxes loaded from this file will - be used instead of ground-truth bboxes. This setting is only for - evaluation, i.e., ignored when ``test_mode`` is ``False``. - Default: ``None``. - data_mode (str): Specifies the mode of data samples: ``'topdown'`` or - ``'bottomup'``. In ``'topdown'`` mode, each data sample contains - one instance; while in ``'bottomup'`` mode, each data sample - contains all instances in a image. Default: ``'topdown'`` - frame_weights (List[Union[int, float]] ): The weight of each frame - for aggregation. The first weight is for the center frame, then on - ascending order of frame indices. Note that the length of - ``frame_weights`` should be consistent with the number of sampled - frames. Default: [0.0, 1.0] - frame_sampler_mode (str): Specifies the mode of frame sampler: - ``'fixed'`` or ``'random'``. In ``'fixed'`` mode, each frame - index relative to the center frame is fixed, specified by - ``frame_indices``, while in ``'random'`` mode, each frame index - relative to the center frame is sampled from ``frame_range`` - with certain randomness. Default: ``'random'``. - frame_range (int | List[int], optional): The sampling range of - supporting frames in the same video for center frame. - Only valid when ``frame_sampler_mode`` is ``'random'``. - Default: ``None``. - num_sampled_frame(int, optional): The number of sampled frames, except - the center frame. Only valid when ``frame_sampler_mode`` is - ``'random'``. Default: 1. - frame_indices (Sequence[int], optional): The sampled frame indices, - including the center frame indicated by 0. Only valid when - ``frame_sampler_mode`` is ``'fixed'``. Default: ``None``. - ph_fill_len (int): The length of the placeholder to fill in the - image filenames. Default: 6 - metainfo (dict, optional): Meta information for dataset, such as class - information. Default: ``None``. - data_root (str, optional): The root directory for ``data_prefix`` and - ``ann_file``. Default: ``None``. - data_prefix (dict, optional): Prefix for training data. Default: - ``dict(img='')``. - filter_cfg (dict, optional): Config for filter data. Default: `None`. - indices (int or Sequence[int], optional): Support using first few - data in annotation file to facilitate training/testing on a smaller - dataset. Default: ``None`` which means using all ``data_infos``. - serialize_data (bool, optional): Whether to hold memory using - serialized objects, when enabled, data loader workers can use - shared RAM from master process instead of making a copy. - Default: ``True``. - pipeline (list, optional): Processing pipeline. Default: []. - test_mode (bool, optional): ``test_mode=True`` means in test phase. - Default: ``False``. - lazy_init (bool, optional): Whether to load annotation during - instantiation. In some cases, such as visualization, only the meta - information of the dataset is needed, which is not necessary to - load annotation file. ``Basedataset`` can skip load annotations to - save time by set ``lazy_init=False``. Default: ``False``. - max_refetch (int, optional): If ``Basedataset.prepare_data`` get a - None img. The maximum extra number of cycles to get a valid - image. Default: 1000. - """ - - METAINFO: dict = dict(from_file='configs/_base_/datasets/posetrack18.py') - - def __init__(self, - ann_file: str = '', - bbox_file: Optional[str] = None, - data_mode: str = 'topdown', - frame_weights: List[Union[int, float]] = [0.0, 1.0], - frame_sampler_mode: str = 'random', - frame_range: Optional[Union[int, List[int]]] = None, - num_sampled_frame: Optional[int] = None, - frame_indices: Optional[Sequence[int]] = None, - ph_fill_len: int = 6, - metainfo: Optional[dict] = None, - data_root: Optional[str] = None, - data_prefix: dict = dict(img=''), - filter_cfg: Optional[dict] = None, - indices: Optional[Union[int, Sequence[int]]] = None, - serialize_data: bool = True, - pipeline: List[Union[dict, Callable]] = [], - test_mode: bool = False, - lazy_init: bool = False, - max_refetch: int = 1000): - assert sum(frame_weights) == 1, 'Invalid `frame_weights`: should sum'\ - f' to 1.0, but got {frame_weights}.' - for weight in frame_weights: - assert weight >= 0, 'frame_weight can not be a negative value.' - self.frame_weights = np.array(frame_weights) - - if frame_sampler_mode not in {'fixed', 'random'}: - raise ValueError( - f'{self.__class__.__name__} got invalid frame_sampler_mode: ' - f'{frame_sampler_mode}. Should be `"fixed"` or `"random"`.') - self.frame_sampler_mode = frame_sampler_mode - - if frame_sampler_mode == 'random': - assert frame_range is not None, \ - '`frame_sampler_mode` is set as `random`, ' \ - 'please specify the `frame_range`.' - - if isinstance(frame_range, int): - assert frame_range >= 0, \ - 'frame_range can not be a negative value.' - self.frame_range = [-frame_range, frame_range] - - elif isinstance(frame_range, Sequence): - assert len(frame_range) == 2, 'The length must be 2.' - assert frame_range[0] <= 0 and frame_range[ - 1] >= 0 and frame_range[1] > frame_range[ - 0], 'Invalid `frame_range`' - for i in frame_range: - assert isinstance(i, int), 'Each element must be int.' - self.frame_range = frame_range - else: - raise TypeError( - f'The type of `frame_range` must be int or Sequence, ' - f'but got {type(frame_range)}.') - - assert num_sampled_frame is not None, \ - '`frame_sampler_mode` is set as `random`, please specify ' \ - '`num_sampled_frame`, e.g. the number of sampled frames.' - - assert len(frame_weights) == num_sampled_frame + 1, \ - f'the length of frame_weights({len(frame_weights)}) '\ - f'does not match the number of sampled adjacent '\ - f'frames({num_sampled_frame})' - self.frame_indices = None - self.num_sampled_frame = num_sampled_frame - - if frame_sampler_mode == 'fixed': - assert frame_indices is not None, \ - '`frame_sampler_mode` is set as `fixed`, ' \ - 'please specify the `frame_indices`.' - assert len(frame_weights) == len(frame_indices), \ - f'the length of frame_weights({len(frame_weights)}) does not '\ - f'match the length of frame_indices({len(frame_indices)}).' - frame_indices.sort() - self.frame_indices = frame_indices - self.frame_range = None - self.num_sampled_frame = None - - self.ph_fill_len = ph_fill_len - - super().__init__( - ann_file=ann_file, - bbox_file=bbox_file, - data_mode=data_mode, - metainfo=metainfo, - data_root=data_root, - data_prefix=data_prefix, - filter_cfg=filter_cfg, - indices=indices, - serialize_data=serialize_data, - pipeline=pipeline, - test_mode=test_mode, - lazy_init=lazy_init, - max_refetch=max_refetch) - - def parse_data_info(self, raw_data_info: dict) -> Optional[dict]: - """Parse raw annotation of an instance. - - Args: - raw_data_info (dict): Raw data information loaded from - ``ann_file``. It should have following contents: - - - ``'raw_ann_info'``: Raw annotation of an instance - - ``'raw_img_info'``: Raw information of the image that - contains the instance - - Returns: - dict: Parsed instance annotation - """ - - ann = raw_data_info['raw_ann_info'] - img = raw_data_info['raw_img_info'] - - # filter invalid instance - if 'bbox' not in ann or 'keypoints' not in ann or max( - ann['keypoints']) == 0: - return None - - img_w, img_h = img['width'], img['height'] - # get the bbox of the center frame - # get bbox in shape [1, 4], formatted as xywh - x, y, w, h = ann['bbox'] - x1 = np.clip(x, 0, img_w - 1) - y1 = np.clip(y, 0, img_h - 1) - x2 = np.clip(x + w, 0, img_w - 1) - y2 = np.clip(y + h, 0, img_h - 1) - - bbox = np.array([x1, y1, x2, y2], dtype=np.float32).reshape(1, 4) - - # get the keypoints of the center frame - # keypoints in shape [1, K, 2] and keypoints_visible in [1, K] - _keypoints = np.array( - ann['keypoints'], dtype=np.float32).reshape(1, -1, 3) - keypoints = _keypoints[..., :2] - keypoints_visible = np.minimum(1, _keypoints[..., 2]) - - # deal with multiple image paths - img_paths: list = [] - # get the image path of the center frame - center_img_path = osp.join(self.data_prefix['img'], img['file_name']) - # append the center image path first - img_paths.append(center_img_path) - - # select the frame indices - if self.frame_sampler_mode == 'fixed': - indices = self.frame_indices - else: # self.frame_sampler_mode == 'random': - low, high = self.frame_range - indices = np.random.randint(low, high + 1, self.num_sampled_frame) - - nframes = int(img['nframes']) - file_name = img['file_name'] - ref_idx = int(osp.splitext(osp.basename(file_name))[0]) - - for idx in indices: - if self.test_mode and idx == 0: - continue - # the supporting frame index - support_idx = ref_idx + idx - # clip the frame index to make sure that it does not exceed - # the boundings of frame indices - support_idx = np.clip(support_idx, 0, nframes - 1) - sup_img_path = osp.join( - osp.dirname(center_img_path), - str(support_idx).zfill(self.ph_fill_len) + '.jpg') - - img_paths.append(sup_img_path) - - data_info = { - 'img_id': int(img['frame_id']), - 'img_path': img_paths, - 'bbox': bbox, - 'bbox_score': np.ones(1, dtype=np.float32), - 'num_keypoints': ann['num_keypoints'], - 'keypoints': keypoints, - 'keypoints_visible': keypoints_visible, - 'frame_weights': self.frame_weights, - 'id': ann['id'], - } - - return data_info - - def _load_detection_results(self) -> List[dict]: - """Load data from detection results with dummy keypoint annotations.""" - assert exists(self.ann_file), 'Annotation file does not exist' - assert exists(self.bbox_file), 'Bbox file does not exist' - - # load detection results - det_results = load(self.bbox_file) - assert is_list_of(det_results, dict) - - # load coco annotations to build image id-to-name index - with get_local_path(self.ann_file) as local_path: - self.coco = COCO(local_path) - - # mapping image name to id - name2id = {} - # mapping image id to name - id2name = {} - for img_id, image in self.coco.imgs.items(): - file_name = image['file_name'] - id2name[img_id] = file_name - name2id[file_name] = img_id - - num_keypoints = self.metainfo['num_keypoints'] - data_list = [] - id_ = 0 - for det in det_results: - # remove non-human instances - if det['category_id'] != 1: - continue - - # get the predicted bbox and bbox_score - bbox_xywh = np.array( - det['bbox'][:4], dtype=np.float32).reshape(1, 4) - bbox = bbox_xywh2xyxy(bbox_xywh) - bbox_score = np.array(det['score'], dtype=np.float32).reshape(1) - - # use dummy keypoint location and visibility - keypoints = np.zeros((1, num_keypoints, 2), dtype=np.float32) - keypoints_visible = np.ones((1, num_keypoints), dtype=np.float32) - - # deal with different bbox file formats - if 'nframes' in det: - nframes = int(det['nframes']) - else: - if 'image_name' in det: - img_id = name2id[det['image_name']] - else: - img_id = det['image_id'] - img_ann = self.coco.loadImgs(img_id)[0] - nframes = int(img_ann['nframes']) - - # deal with multiple image paths - img_paths: list = [] - if 'image_name' in det: - image_name = det['image_name'] - else: - image_name = id2name[det['image_id']] - # get the image path of the center frame - center_img_path = osp.join(self.data_prefix['img'], image_name) - # append the center image path first - img_paths.append(center_img_path) - - # "images/val/012834_mpii_test/000000.jpg" -->> "000000.jpg" - center_image_name = image_name.split('/')[-1] - ref_idx = int(center_image_name.replace('.jpg', '')) - - # select the frame indices - if self.frame_sampler_mode == 'fixed': - indices = self.frame_indices - else: # self.frame_sampler_mode == 'random': - low, high = self.frame_range - indices = np.random.randint(low, high + 1, - self.num_sampled_frame) - - for idx in indices: - if self.test_mode and idx == 0: - continue - # the supporting frame index - support_idx = ref_idx + idx - # clip the frame index to make sure that it does not exceed - # the boundings of frame indices - support_idx = np.clip(support_idx, 0, nframes - 1) - sup_img_path = center_img_path.replace( - center_image_name, - str(support_idx).zfill(self.ph_fill_len) + '.jpg') - - img_paths.append(sup_img_path) - - data_list.append({ - 'img_id': det['image_id'], - 'img_path': img_paths, - 'frame_weights': self.frame_weights, - 'bbox': bbox, - 'bbox_score': bbox_score, - 'keypoints': keypoints, - 'keypoints_visible': keypoints_visible, - 'id': id_, - }) - - id_ += 1 - - return data_list +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +from typing import Callable, List, Optional, Sequence, Union + +import numpy as np +from mmengine.fileio import exists, get_local_path, load +from mmengine.utils import is_list_of +from xtcocotools.coco import COCO + +from mmpose.registry import DATASETS +from mmpose.structures.bbox import bbox_xywh2xyxy +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class PoseTrack18VideoDataset(BaseCocoStyleDataset): + """PoseTrack18 dataset for video pose estimation. + + "Posetrack: A benchmark for human pose estimation and tracking", CVPR'2018. + More details can be found in the `paper + `__ . + + PoseTrack2018 keypoints:: + + 0: 'nose', + 1: 'head_bottom', + 2: 'head_top', + 3: 'left_ear', + 4: 'right_ear', + 5: 'left_shoulder', + 6: 'right_shoulder', + 7: 'left_elbow', + 8: 'right_elbow', + 9: 'left_wrist', + 10: 'right_wrist', + 11: 'left_hip', + 12: 'right_hip', + 13: 'left_knee', + 14: 'right_knee', + 15: 'left_ankle', + 16: 'right_ankle' + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + frame_weights (List[Union[int, float]] ): The weight of each frame + for aggregation. The first weight is for the center frame, then on + ascending order of frame indices. Note that the length of + ``frame_weights`` should be consistent with the number of sampled + frames. Default: [0.0, 1.0] + frame_sampler_mode (str): Specifies the mode of frame sampler: + ``'fixed'`` or ``'random'``. In ``'fixed'`` mode, each frame + index relative to the center frame is fixed, specified by + ``frame_indices``, while in ``'random'`` mode, each frame index + relative to the center frame is sampled from ``frame_range`` + with certain randomness. Default: ``'random'``. + frame_range (int | List[int], optional): The sampling range of + supporting frames in the same video for center frame. + Only valid when ``frame_sampler_mode`` is ``'random'``. + Default: ``None``. + num_sampled_frame(int, optional): The number of sampled frames, except + the center frame. Only valid when ``frame_sampler_mode`` is + ``'random'``. Default: 1. + frame_indices (Sequence[int], optional): The sampled frame indices, + including the center frame indicated by 0. Only valid when + ``frame_sampler_mode`` is ``'fixed'``. Default: ``None``. + ph_fill_len (int): The length of the placeholder to fill in the + image filenames. Default: 6 + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img='')``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/posetrack18.py') + + def __init__(self, + ann_file: str = '', + bbox_file: Optional[str] = None, + data_mode: str = 'topdown', + frame_weights: List[Union[int, float]] = [0.0, 1.0], + frame_sampler_mode: str = 'random', + frame_range: Optional[Union[int, List[int]]] = None, + num_sampled_frame: Optional[int] = None, + frame_indices: Optional[Sequence[int]] = None, + ph_fill_len: int = 6, + metainfo: Optional[dict] = None, + data_root: Optional[str] = None, + data_prefix: dict = dict(img=''), + filter_cfg: Optional[dict] = None, + indices: Optional[Union[int, Sequence[int]]] = None, + serialize_data: bool = True, + pipeline: List[Union[dict, Callable]] = [], + test_mode: bool = False, + lazy_init: bool = False, + max_refetch: int = 1000): + assert sum(frame_weights) == 1, 'Invalid `frame_weights`: should sum'\ + f' to 1.0, but got {frame_weights}.' + for weight in frame_weights: + assert weight >= 0, 'frame_weight can not be a negative value.' + self.frame_weights = np.array(frame_weights) + + if frame_sampler_mode not in {'fixed', 'random'}: + raise ValueError( + f'{self.__class__.__name__} got invalid frame_sampler_mode: ' + f'{frame_sampler_mode}. Should be `"fixed"` or `"random"`.') + self.frame_sampler_mode = frame_sampler_mode + + if frame_sampler_mode == 'random': + assert frame_range is not None, \ + '`frame_sampler_mode` is set as `random`, ' \ + 'please specify the `frame_range`.' + + if isinstance(frame_range, int): + assert frame_range >= 0, \ + 'frame_range can not be a negative value.' + self.frame_range = [-frame_range, frame_range] + + elif isinstance(frame_range, Sequence): + assert len(frame_range) == 2, 'The length must be 2.' + assert frame_range[0] <= 0 and frame_range[ + 1] >= 0 and frame_range[1] > frame_range[ + 0], 'Invalid `frame_range`' + for i in frame_range: + assert isinstance(i, int), 'Each element must be int.' + self.frame_range = frame_range + else: + raise TypeError( + f'The type of `frame_range` must be int or Sequence, ' + f'but got {type(frame_range)}.') + + assert num_sampled_frame is not None, \ + '`frame_sampler_mode` is set as `random`, please specify ' \ + '`num_sampled_frame`, e.g. the number of sampled frames.' + + assert len(frame_weights) == num_sampled_frame + 1, \ + f'the length of frame_weights({len(frame_weights)}) '\ + f'does not match the number of sampled adjacent '\ + f'frames({num_sampled_frame})' + self.frame_indices = None + self.num_sampled_frame = num_sampled_frame + + if frame_sampler_mode == 'fixed': + assert frame_indices is not None, \ + '`frame_sampler_mode` is set as `fixed`, ' \ + 'please specify the `frame_indices`.' + assert len(frame_weights) == len(frame_indices), \ + f'the length of frame_weights({len(frame_weights)}) does not '\ + f'match the length of frame_indices({len(frame_indices)}).' + frame_indices.sort() + self.frame_indices = frame_indices + self.frame_range = None + self.num_sampled_frame = None + + self.ph_fill_len = ph_fill_len + + super().__init__( + ann_file=ann_file, + bbox_file=bbox_file, + data_mode=data_mode, + metainfo=metainfo, + data_root=data_root, + data_prefix=data_prefix, + filter_cfg=filter_cfg, + indices=indices, + serialize_data=serialize_data, + pipeline=pipeline, + test_mode=test_mode, + lazy_init=lazy_init, + max_refetch=max_refetch) + + def parse_data_info(self, raw_data_info: dict) -> Optional[dict]: + """Parse raw annotation of an instance. + + Args: + raw_data_info (dict): Raw data information loaded from + ``ann_file``. It should have following contents: + + - ``'raw_ann_info'``: Raw annotation of an instance + - ``'raw_img_info'``: Raw information of the image that + contains the instance + + Returns: + dict: Parsed instance annotation + """ + + ann = raw_data_info['raw_ann_info'] + img = raw_data_info['raw_img_info'] + + # filter invalid instance + if 'bbox' not in ann or 'keypoints' not in ann or max( + ann['keypoints']) == 0: + return None + + img_w, img_h = img['width'], img['height'] + # get the bbox of the center frame + # get bbox in shape [1, 4], formatted as xywh + x, y, w, h = ann['bbox'] + x1 = np.clip(x, 0, img_w - 1) + y1 = np.clip(y, 0, img_h - 1) + x2 = np.clip(x + w, 0, img_w - 1) + y2 = np.clip(y + h, 0, img_h - 1) + + bbox = np.array([x1, y1, x2, y2], dtype=np.float32).reshape(1, 4) + + # get the keypoints of the center frame + # keypoints in shape [1, K, 2] and keypoints_visible in [1, K] + _keypoints = np.array( + ann['keypoints'], dtype=np.float32).reshape(1, -1, 3) + keypoints = _keypoints[..., :2] + keypoints_visible = np.minimum(1, _keypoints[..., 2]) + + # deal with multiple image paths + img_paths: list = [] + # get the image path of the center frame + center_img_path = osp.join(self.data_prefix['img'], img['file_name']) + # append the center image path first + img_paths.append(center_img_path) + + # select the frame indices + if self.frame_sampler_mode == 'fixed': + indices = self.frame_indices + else: # self.frame_sampler_mode == 'random': + low, high = self.frame_range + indices = np.random.randint(low, high + 1, self.num_sampled_frame) + + nframes = int(img['nframes']) + file_name = img['file_name'] + ref_idx = int(osp.splitext(osp.basename(file_name))[0]) + + for idx in indices: + if self.test_mode and idx == 0: + continue + # the supporting frame index + support_idx = ref_idx + idx + # clip the frame index to make sure that it does not exceed + # the boundings of frame indices + support_idx = np.clip(support_idx, 0, nframes - 1) + sup_img_path = osp.join( + osp.dirname(center_img_path), + str(support_idx).zfill(self.ph_fill_len) + '.jpg') + + img_paths.append(sup_img_path) + + data_info = { + 'img_id': int(img['frame_id']), + 'img_path': img_paths, + 'bbox': bbox, + 'bbox_score': np.ones(1, dtype=np.float32), + 'num_keypoints': ann['num_keypoints'], + 'keypoints': keypoints, + 'keypoints_visible': keypoints_visible, + 'frame_weights': self.frame_weights, + 'id': ann['id'], + } + + return data_info + + def _load_detection_results(self) -> List[dict]: + """Load data from detection results with dummy keypoint annotations.""" + assert exists(self.ann_file), 'Annotation file does not exist' + assert exists(self.bbox_file), 'Bbox file does not exist' + + # load detection results + det_results = load(self.bbox_file) + assert is_list_of(det_results, dict) + + # load coco annotations to build image id-to-name index + with get_local_path(self.ann_file) as local_path: + self.coco = COCO(local_path) + + # mapping image name to id + name2id = {} + # mapping image id to name + id2name = {} + for img_id, image in self.coco.imgs.items(): + file_name = image['file_name'] + id2name[img_id] = file_name + name2id[file_name] = img_id + + num_keypoints = self.metainfo['num_keypoints'] + data_list = [] + id_ = 0 + for det in det_results: + # remove non-human instances + if det['category_id'] != 1: + continue + + # get the predicted bbox and bbox_score + bbox_xywh = np.array( + det['bbox'][:4], dtype=np.float32).reshape(1, 4) + bbox = bbox_xywh2xyxy(bbox_xywh) + bbox_score = np.array(det['score'], dtype=np.float32).reshape(1) + + # use dummy keypoint location and visibility + keypoints = np.zeros((1, num_keypoints, 2), dtype=np.float32) + keypoints_visible = np.ones((1, num_keypoints), dtype=np.float32) + + # deal with different bbox file formats + if 'nframes' in det: + nframes = int(det['nframes']) + else: + if 'image_name' in det: + img_id = name2id[det['image_name']] + else: + img_id = det['image_id'] + img_ann = self.coco.loadImgs(img_id)[0] + nframes = int(img_ann['nframes']) + + # deal with multiple image paths + img_paths: list = [] + if 'image_name' in det: + image_name = det['image_name'] + else: + image_name = id2name[det['image_id']] + # get the image path of the center frame + center_img_path = osp.join(self.data_prefix['img'], image_name) + # append the center image path first + img_paths.append(center_img_path) + + # "images/val/012834_mpii_test/000000.jpg" -->> "000000.jpg" + center_image_name = image_name.split('/')[-1] + ref_idx = int(center_image_name.replace('.jpg', '')) + + # select the frame indices + if self.frame_sampler_mode == 'fixed': + indices = self.frame_indices + else: # self.frame_sampler_mode == 'random': + low, high = self.frame_range + indices = np.random.randint(low, high + 1, + self.num_sampled_frame) + + for idx in indices: + if self.test_mode and idx == 0: + continue + # the supporting frame index + support_idx = ref_idx + idx + # clip the frame index to make sure that it does not exceed + # the boundings of frame indices + support_idx = np.clip(support_idx, 0, nframes - 1) + sup_img_path = center_img_path.replace( + center_image_name, + str(support_idx).zfill(self.ph_fill_len) + '.jpg') + + img_paths.append(sup_img_path) + + data_list.append({ + 'img_id': det['image_id'], + 'img_path': img_paths, + 'frame_weights': self.frame_weights, + 'bbox': bbox, + 'bbox_score': bbox_score, + 'keypoints': keypoints, + 'keypoints_visible': keypoints_visible, + 'id': id_, + }) + + id_ += 1 + + return data_list diff --git a/mmpose/datasets/datasets/body3d/__init__.py b/mmpose/datasets/datasets/body3d/__init__.py index d5afeca578..e844bc46e0 100644 --- a/mmpose/datasets/datasets/body3d/__init__.py +++ b/mmpose/datasets/datasets/body3d/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .h36m_dataset import Human36mDataset - -__all__ = ['Human36mDataset'] +# Copyright (c) OpenMMLab. All rights reserved. +from .h36m_dataset import Human36mDataset + +__all__ = ['Human36mDataset'] diff --git a/mmpose/datasets/datasets/body3d/h36m_dataset.py b/mmpose/datasets/datasets/body3d/h36m_dataset.py index 60094aa254..d2cb9010be 100644 --- a/mmpose/datasets/datasets/body3d/h36m_dataset.py +++ b/mmpose/datasets/datasets/body3d/h36m_dataset.py @@ -1,259 +1,259 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os.path as osp -from collections import defaultdict -from typing import Callable, List, Optional, Sequence, Tuple, Union - -import numpy as np -from mmengine.fileio import exists, get_local_path -from mmengine.utils import is_abs - -from mmpose.datasets.datasets import BaseMocapDataset -from mmpose.registry import DATASETS - - -@DATASETS.register_module() -class Human36mDataset(BaseMocapDataset): - """Human3.6M dataset for 3D human pose estimation. - - "Human3.6M: Large Scale Datasets and Predictive Methods for 3D Human - Sensing in Natural Environments", TPAMI`2014. - More details can be found in the `paper - `__. - - Human3.6M keypoint indexes:: - - 0: 'root (pelvis)', - 1: 'right_hip', - 2: 'right_knee', - 3: 'right_foot', - 4: 'left_hip', - 5: 'left_knee', - 6: 'left_foot', - 7: 'spine', - 8: 'thorax', - 9: 'neck_base', - 10: 'head', - 11: 'left_shoulder', - 12: 'left_elbow', - 13: 'left_wrist', - 14: 'right_shoulder', - 15: 'right_elbow', - 16: 'right_wrist' - - Args: - ann_file (str): Annotation file path. Default: ''. - seq_len (int): Number of frames in a sequence. Default: 1. - seq_step (int): The interval for extracting frames from the video. - Default: 1. - pad_video_seq (bool): Whether to pad the video so that poses will be - predicted for every frame in the video. Default: ``False``. - causal (bool): If set to ``True``, the rightmost input frame will be - the target frame. Otherwise, the middle input frame will be the - target frame. Default: ``True``. - subset_frac (float): The fraction to reduce dataset size. If set to 1, - the dataset size is not reduced. Default: 1. - keypoint_2d_src (str): Specifies 2D keypoint information options, which - should be one of the following options: - - - ``'gt'``: load from the annotation file - - ``'detection'``: load from a detection - result file of 2D keypoint - - 'pipeline': the information will be generated by the pipeline - - Default: ``'gt'``. - keypoint_2d_det_file (str, optional): The 2D keypoint detection file. - If set, 2d keypoint loaded from this file will be used instead of - ground-truth keypoints. This setting is only when - ``keypoint_2d_src`` is ``'detection'``. Default: ``None``. - camera_param_file (str): Cameras' parameters file. Default: ``None``. - data_mode (str): Specifies the mode of data samples: ``'topdown'`` or - ``'bottomup'``. In ``'topdown'`` mode, each data sample contains - one instance; while in ``'bottomup'`` mode, each data sample - contains all instances in a image. Default: ``'topdown'`` - metainfo (dict, optional): Meta information for dataset, such as class - information. Default: ``None``. - data_root (str, optional): The root directory for ``data_prefix`` and - ``ann_file``. Default: ``None``. - data_prefix (dict, optional): Prefix for training data. - Default: ``dict(img='')``. - filter_cfg (dict, optional): Config for filter data. Default: `None`. - indices (int or Sequence[int], optional): Support using first few - data in annotation file to facilitate training/testing on a smaller - dataset. Default: ``None`` which means using all ``data_infos``. - serialize_data (bool, optional): Whether to hold memory using - serialized objects, when enabled, data loader workers can use - shared RAM from master process instead of making a copy. - Default: ``True``. - pipeline (list, optional): Processing pipeline. Default: []. - test_mode (bool, optional): ``test_mode=True`` means in test phase. - Default: ``False``. - lazy_init (bool, optional): Whether to load annotation during - instantiation. In some cases, such as visualization, only the meta - information of the dataset is needed, which is not necessary to - load annotation file. ``Basedataset`` can skip load annotations to - save time by set ``lazy_init=False``. Default: ``False``. - max_refetch (int, optional): If ``Basedataset.prepare_data`` get a - None img. The maximum extra number of cycles to get a valid - image. Default: 1000. - """ - - METAINFO: dict = dict(from_file='configs/_base_/datasets/h36m.py') - SUPPORTED_keypoint_2d_src = {'gt', 'detection', 'pipeline'} - - def __init__(self, - ann_file: str = '', - seq_len: int = 1, - seq_step: int = 1, - pad_video_seq: bool = False, - causal: bool = True, - subset_frac: float = 1.0, - keypoint_2d_src: str = 'gt', - keypoint_2d_det_file: Optional[str] = None, - camera_param_file: Optional[str] = None, - data_mode: str = 'topdown', - metainfo: Optional[dict] = None, - data_root: Optional[str] = None, - data_prefix: dict = dict(img=''), - filter_cfg: Optional[dict] = None, - indices: Optional[Union[int, Sequence[int]]] = None, - serialize_data: bool = True, - pipeline: List[Union[dict, Callable]] = [], - test_mode: bool = False, - lazy_init: bool = False, - max_refetch: int = 1000): - # check keypoint_2d_src - self.keypoint_2d_src = keypoint_2d_src - if self.keypoint_2d_src not in self.SUPPORTED_keypoint_2d_src: - raise ValueError( - f'Unsupported `keypoint_2d_src` "{self.keypoint_2d_src}". ' - f'Supported options are {self.SUPPORTED_keypoint_2d_src}') - - if keypoint_2d_det_file: - if not is_abs(keypoint_2d_det_file): - self.keypoint_2d_det_file = osp.join(data_root, - keypoint_2d_det_file) - else: - self.keypoint_2d_det_file = keypoint_2d_det_file - - self.seq_step = seq_step - self.pad_video_seq = pad_video_seq - - super().__init__( - ann_file=ann_file, - seq_len=seq_len, - causal=causal, - subset_frac=subset_frac, - camera_param_file=camera_param_file, - data_mode=data_mode, - metainfo=metainfo, - data_root=data_root, - data_prefix=data_prefix, - filter_cfg=filter_cfg, - indices=indices, - serialize_data=serialize_data, - pipeline=pipeline, - test_mode=test_mode, - lazy_init=lazy_init, - max_refetch=max_refetch) - - def get_sequence_indices(self) -> List[List[int]]: - """Split original videos into sequences and build frame indices. - - This method overrides the default one in the base class. - """ - imgnames = self.ann_data['imgname'] - video_frames = defaultdict(list) - for idx, imgname in enumerate(imgnames): - subj, action, camera = self._parse_h36m_imgname(imgname) - video_frames[(subj, action, camera)].append(idx) - - # build sample indices - sequence_indices = [] - _len = (self.seq_len - 1) * self.seq_step + 1 - _step = self.seq_step - for _, _indices in sorted(video_frames.items()): - n_frame = len(_indices) - - if self.pad_video_seq: - # Pad the sequence so that every frame in the sequence will be - # predicted. - if self.causal: - frames_left = self.seq_len - 1 - frames_right = 0 - else: - frames_left = (self.seq_len - 1) // 2 - frames_right = frames_left - for i in range(n_frame): - pad_left = max(0, frames_left - i // _step) - pad_right = max(0, - frames_right - (n_frame - 1 - i) // _step) - start = max(i % _step, i - frames_left * _step) - end = min(n_frame - (n_frame - 1 - i) % _step, - i + frames_right * _step + 1) - sequence_indices.append([_indices[0]] * pad_left + - _indices[start:end:_step] + - [_indices[-1]] * pad_right) - else: - seqs_from_video = [ - _indices[i:(i + _len):_step] - for i in range(0, n_frame - _len + 1) - ] - sequence_indices.extend(seqs_from_video) - - # reduce dataset size if needed - subset_size = int(len(sequence_indices) * self.subset_frac) - start = np.random.randint(0, len(sequence_indices) - subset_size + 1) - end = start + subset_size - - return sequence_indices[start:end] - - def _load_annotations(self) -> Tuple[List[dict], List[dict]]: - instance_list, image_list = super()._load_annotations() - - h36m_data = self.ann_data - kpts_3d = h36m_data['S'] - - if self.keypoint_2d_src == 'detection': - assert exists(self.keypoint_2d_det_file) - kpts_2d = self._load_keypoint_2d_detection( - self.keypoint_2d_det_file) - assert kpts_2d.shape[0] == kpts_3d.shape[0] - assert kpts_2d.shape[2] == 3 - - for idx, frame_ids in enumerate(self.sequence_indices): - kpt_2d = kpts_2d[frame_ids].astype(np.float32) - keypoints = kpt_2d[..., :2] - keypoints_visible = kpt_2d[..., 2] - instance_list[idx].update({ - 'keypoints': - keypoints, - 'keypoints_visible': - keypoints_visible - }) - - return instance_list, image_list - - @staticmethod - def _parse_h36m_imgname(imgname) -> Tuple[str, str, str]: - """Parse imgname to get information of subject, action and camera. - - A typical h36m image filename is like: - S1_Directions_1.54138969_000001.jpg - """ - subj, rest = osp.basename(imgname).split('_', 1) - action, rest = rest.split('.', 1) - camera, rest = rest.split('_', 1) - return subj, action, camera - - def get_camera_param(self, imgname) -> dict: - """Get camera parameters of a frame by its image name.""" - assert hasattr(self, 'camera_param') - subj, _, camera = self._parse_h36m_imgname(imgname) - return self.camera_param[(subj, camera)] - - def _load_keypoint_2d_detection(self, det_file): - """"Load 2D joint detection results from file.""" - with get_local_path(det_file) as local_path: - kpts_2d = np.load(local_path).astype(np.float32) - - return kpts_2d +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +from collections import defaultdict +from typing import Callable, List, Optional, Sequence, Tuple, Union + +import numpy as np +from mmengine.fileio import exists, get_local_path +from mmengine.utils import is_abs + +from mmpose.datasets.datasets import BaseMocapDataset +from mmpose.registry import DATASETS + + +@DATASETS.register_module() +class Human36mDataset(BaseMocapDataset): + """Human3.6M dataset for 3D human pose estimation. + + "Human3.6M: Large Scale Datasets and Predictive Methods for 3D Human + Sensing in Natural Environments", TPAMI`2014. + More details can be found in the `paper + `__. + + Human3.6M keypoint indexes:: + + 0: 'root (pelvis)', + 1: 'right_hip', + 2: 'right_knee', + 3: 'right_foot', + 4: 'left_hip', + 5: 'left_knee', + 6: 'left_foot', + 7: 'spine', + 8: 'thorax', + 9: 'neck_base', + 10: 'head', + 11: 'left_shoulder', + 12: 'left_elbow', + 13: 'left_wrist', + 14: 'right_shoulder', + 15: 'right_elbow', + 16: 'right_wrist' + + Args: + ann_file (str): Annotation file path. Default: ''. + seq_len (int): Number of frames in a sequence. Default: 1. + seq_step (int): The interval for extracting frames from the video. + Default: 1. + pad_video_seq (bool): Whether to pad the video so that poses will be + predicted for every frame in the video. Default: ``False``. + causal (bool): If set to ``True``, the rightmost input frame will be + the target frame. Otherwise, the middle input frame will be the + target frame. Default: ``True``. + subset_frac (float): The fraction to reduce dataset size. If set to 1, + the dataset size is not reduced. Default: 1. + keypoint_2d_src (str): Specifies 2D keypoint information options, which + should be one of the following options: + + - ``'gt'``: load from the annotation file + - ``'detection'``: load from a detection + result file of 2D keypoint + - 'pipeline': the information will be generated by the pipeline + + Default: ``'gt'``. + keypoint_2d_det_file (str, optional): The 2D keypoint detection file. + If set, 2d keypoint loaded from this file will be used instead of + ground-truth keypoints. This setting is only when + ``keypoint_2d_src`` is ``'detection'``. Default: ``None``. + camera_param_file (str): Cameras' parameters file. Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. + Default: ``dict(img='')``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/h36m.py') + SUPPORTED_keypoint_2d_src = {'gt', 'detection', 'pipeline'} + + def __init__(self, + ann_file: str = '', + seq_len: int = 1, + seq_step: int = 1, + pad_video_seq: bool = False, + causal: bool = True, + subset_frac: float = 1.0, + keypoint_2d_src: str = 'gt', + keypoint_2d_det_file: Optional[str] = None, + camera_param_file: Optional[str] = None, + data_mode: str = 'topdown', + metainfo: Optional[dict] = None, + data_root: Optional[str] = None, + data_prefix: dict = dict(img=''), + filter_cfg: Optional[dict] = None, + indices: Optional[Union[int, Sequence[int]]] = None, + serialize_data: bool = True, + pipeline: List[Union[dict, Callable]] = [], + test_mode: bool = False, + lazy_init: bool = False, + max_refetch: int = 1000): + # check keypoint_2d_src + self.keypoint_2d_src = keypoint_2d_src + if self.keypoint_2d_src not in self.SUPPORTED_keypoint_2d_src: + raise ValueError( + f'Unsupported `keypoint_2d_src` "{self.keypoint_2d_src}". ' + f'Supported options are {self.SUPPORTED_keypoint_2d_src}') + + if keypoint_2d_det_file: + if not is_abs(keypoint_2d_det_file): + self.keypoint_2d_det_file = osp.join(data_root, + keypoint_2d_det_file) + else: + self.keypoint_2d_det_file = keypoint_2d_det_file + + self.seq_step = seq_step + self.pad_video_seq = pad_video_seq + + super().__init__( + ann_file=ann_file, + seq_len=seq_len, + causal=causal, + subset_frac=subset_frac, + camera_param_file=camera_param_file, + data_mode=data_mode, + metainfo=metainfo, + data_root=data_root, + data_prefix=data_prefix, + filter_cfg=filter_cfg, + indices=indices, + serialize_data=serialize_data, + pipeline=pipeline, + test_mode=test_mode, + lazy_init=lazy_init, + max_refetch=max_refetch) + + def get_sequence_indices(self) -> List[List[int]]: + """Split original videos into sequences and build frame indices. + + This method overrides the default one in the base class. + """ + imgnames = self.ann_data['imgname'] + video_frames = defaultdict(list) + for idx, imgname in enumerate(imgnames): + subj, action, camera = self._parse_h36m_imgname(imgname) + video_frames[(subj, action, camera)].append(idx) + + # build sample indices + sequence_indices = [] + _len = (self.seq_len - 1) * self.seq_step + 1 + _step = self.seq_step + for _, _indices in sorted(video_frames.items()): + n_frame = len(_indices) + + if self.pad_video_seq: + # Pad the sequence so that every frame in the sequence will be + # predicted. + if self.causal: + frames_left = self.seq_len - 1 + frames_right = 0 + else: + frames_left = (self.seq_len - 1) // 2 + frames_right = frames_left + for i in range(n_frame): + pad_left = max(0, frames_left - i // _step) + pad_right = max(0, + frames_right - (n_frame - 1 - i) // _step) + start = max(i % _step, i - frames_left * _step) + end = min(n_frame - (n_frame - 1 - i) % _step, + i + frames_right * _step + 1) + sequence_indices.append([_indices[0]] * pad_left + + _indices[start:end:_step] + + [_indices[-1]] * pad_right) + else: + seqs_from_video = [ + _indices[i:(i + _len):_step] + for i in range(0, n_frame - _len + 1) + ] + sequence_indices.extend(seqs_from_video) + + # reduce dataset size if needed + subset_size = int(len(sequence_indices) * self.subset_frac) + start = np.random.randint(0, len(sequence_indices) - subset_size + 1) + end = start + subset_size + + return sequence_indices[start:end] + + def _load_annotations(self) -> Tuple[List[dict], List[dict]]: + instance_list, image_list = super()._load_annotations() + + h36m_data = self.ann_data + kpts_3d = h36m_data['S'] + + if self.keypoint_2d_src == 'detection': + assert exists(self.keypoint_2d_det_file) + kpts_2d = self._load_keypoint_2d_detection( + self.keypoint_2d_det_file) + assert kpts_2d.shape[0] == kpts_3d.shape[0] + assert kpts_2d.shape[2] == 3 + + for idx, frame_ids in enumerate(self.sequence_indices): + kpt_2d = kpts_2d[frame_ids].astype(np.float32) + keypoints = kpt_2d[..., :2] + keypoints_visible = kpt_2d[..., 2] + instance_list[idx].update({ + 'keypoints': + keypoints, + 'keypoints_visible': + keypoints_visible + }) + + return instance_list, image_list + + @staticmethod + def _parse_h36m_imgname(imgname) -> Tuple[str, str, str]: + """Parse imgname to get information of subject, action and camera. + + A typical h36m image filename is like: + S1_Directions_1.54138969_000001.jpg + """ + subj, rest = osp.basename(imgname).split('_', 1) + action, rest = rest.split('.', 1) + camera, rest = rest.split('_', 1) + return subj, action, camera + + def get_camera_param(self, imgname) -> dict: + """Get camera parameters of a frame by its image name.""" + assert hasattr(self, 'camera_param') + subj, _, camera = self._parse_h36m_imgname(imgname) + return self.camera_param[(subj, camera)] + + def _load_keypoint_2d_detection(self, det_file): + """"Load 2D joint detection results from file.""" + with get_local_path(det_file) as local_path: + kpts_2d = np.load(local_path).astype(np.float32) + + return kpts_2d diff --git a/mmpose/datasets/datasets/face/__init__.py b/mmpose/datasets/datasets/face/__init__.py index 700cb605f7..1f8f86ec83 100644 --- a/mmpose/datasets/datasets/face/__init__.py +++ b/mmpose/datasets/datasets/face/__init__.py @@ -1,12 +1,12 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .aflw_dataset import AFLWDataset -from .coco_wholebody_face_dataset import CocoWholeBodyFaceDataset -from .cofw_dataset import COFWDataset -from .face_300w_dataset import Face300WDataset -from .lapa_dataset import LapaDataset -from .wflw_dataset import WFLWDataset - -__all__ = [ - 'Face300WDataset', 'WFLWDataset', 'AFLWDataset', 'COFWDataset', - 'CocoWholeBodyFaceDataset', 'LapaDataset' -] +# Copyright (c) OpenMMLab. All rights reserved. +from .aflw_dataset import AFLWDataset +from .coco_wholebody_face_dataset import CocoWholeBodyFaceDataset +from .cofw_dataset import COFWDataset +from .face_300w_dataset import Face300WDataset +from .lapa_dataset import LapaDataset +from .wflw_dataset import WFLWDataset + +__all__ = [ + 'Face300WDataset', 'WFLWDataset', 'AFLWDataset', 'COFWDataset', + 'CocoWholeBodyFaceDataset', 'LapaDataset' +] diff --git a/mmpose/datasets/datasets/face/aflw_dataset.py b/mmpose/datasets/datasets/face/aflw_dataset.py index deda0974bb..33927a3a7f 100644 --- a/mmpose/datasets/datasets/face/aflw_dataset.py +++ b/mmpose/datasets/datasets/face/aflw_dataset.py @@ -1,122 +1,122 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os.path as osp -from typing import Optional - -import numpy as np - -from mmpose.registry import DATASETS -from mmpose.structures.bbox import bbox_cs2xyxy -from ..base import BaseCocoStyleDataset - - -@DATASETS.register_module() -class AFLWDataset(BaseCocoStyleDataset): - """AFLW dataset for face keypoint localization. - - "Annotated Facial Landmarks in the Wild: A Large-scale, - Real-world Database for Facial Landmark Localization". - In Proc. First IEEE International Workshop on Benchmarking - Facial Image Analysis Technologies, 2011. - - The landmark annotations follow the 19 points mark-up. The definition - can be found in `https://www.tugraz.at/institute/icg/research` - `/team-bischof/lrs/downloads/aflw/` - - Args: - ann_file (str): Annotation file path. Default: ''. - bbox_file (str, optional): Detection result file path. If - ``bbox_file`` is set, detected bboxes loaded from this file will - be used instead of ground-truth bboxes. This setting is only for - evaluation, i.e., ignored when ``test_mode`` is ``False``. - Default: ``None``. - data_mode (str): Specifies the mode of data samples: ``'topdown'`` or - ``'bottomup'``. In ``'topdown'`` mode, each data sample contains - one instance; while in ``'bottomup'`` mode, each data sample - contains all instances in a image. Default: ``'topdown'`` - metainfo (dict, optional): Meta information for dataset, such as class - information. Default: ``None``. - data_root (str, optional): The root directory for ``data_prefix`` and - ``ann_file``. Default: ``None``. - data_prefix (dict, optional): Prefix for training data. Default: - ``dict(img=None, ann=None)``. - filter_cfg (dict, optional): Config for filter data. Default: `None`. - indices (int or Sequence[int], optional): Support using first few - data in annotation file to facilitate training/testing on a smaller - dataset. Default: ``None`` which means using all ``data_infos``. - serialize_data (bool, optional): Whether to hold memory using - serialized objects, when enabled, data loader workers can use - shared RAM from master process instead of making a copy. - Default: ``True``. - pipeline (list, optional): Processing pipeline. Default: []. - test_mode (bool, optional): ``test_mode=True`` means in test phase. - Default: ``False``. - lazy_init (bool, optional): Whether to load annotation during - instantiation. In some cases, such as visualization, only the meta - information of the dataset is needed, which is not necessary to - load annotation file. ``Basedataset`` can skip load annotations to - save time by set ``lazy_init=False``. Default: ``False``. - max_refetch (int, optional): If ``Basedataset.prepare_data`` get a - None img. The maximum extra number of cycles to get a valid - image. Default: 1000. - """ - - METAINFO: dict = dict(from_file='configs/_base_/datasets/aflw.py') - - def parse_data_info(self, raw_data_info: dict) -> Optional[dict]: - """Parse raw Face AFLW annotation of an instance. - - Args: - raw_data_info (dict): Raw data information loaded from - ``ann_file``. It should have following contents: - - - ``'raw_ann_info'``: Raw annotation of an instance - - ``'raw_img_info'``: Raw information of the image that - contains the instance - - Returns: - dict: Parsed instance annotation - """ - - ann = raw_data_info['raw_ann_info'] - img = raw_data_info['raw_img_info'] - - img_path = osp.join(self.data_prefix['img'], img['file_name']) - - # aflw bbox scales are normalized with factor 200. - pixel_std = 200. - - # center, scale in shape [1, 2] and bbox in [1, 4] - center = np.array([ann['center']], dtype=np.float32) - scale = np.array([[ann['scale'], ann['scale']]], - dtype=np.float32) * pixel_std - bbox = bbox_cs2xyxy(center, scale) - - # keypoints in shape [1, K, 2] and keypoints_visible in [1, K] - _keypoints = np.array( - ann['keypoints'], dtype=np.float32).reshape(1, -1, 3) - keypoints = _keypoints[..., :2] - keypoints_visible = np.minimum(1, _keypoints[..., 2]) - - num_keypoints = ann['num_keypoints'] - - data_info = { - 'img_id': ann['image_id'], - 'img_path': img_path, - 'bbox': bbox, - 'bbox_center': center, - 'bbox_scale': scale, - 'bbox_score': np.ones(1, dtype=np.float32), - 'num_keypoints': num_keypoints, - 'keypoints': keypoints, - 'keypoints_visible': keypoints_visible, - 'iscrowd': ann['iscrowd'], - 'id': ann['id'], - } - - if self.test_mode: - # 'box_size' is used as normalization factor - assert 'box_size' in ann, '"box_size" is missing in annotation, '\ - 'which is required for evaluation.' - data_info['box_size'] = ann['box_size'] - - return data_info +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +from typing import Optional + +import numpy as np + +from mmpose.registry import DATASETS +from mmpose.structures.bbox import bbox_cs2xyxy +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class AFLWDataset(BaseCocoStyleDataset): + """AFLW dataset for face keypoint localization. + + "Annotated Facial Landmarks in the Wild: A Large-scale, + Real-world Database for Facial Landmark Localization". + In Proc. First IEEE International Workshop on Benchmarking + Facial Image Analysis Technologies, 2011. + + The landmark annotations follow the 19 points mark-up. The definition + can be found in `https://www.tugraz.at/institute/icg/research` + `/team-bischof/lrs/downloads/aflw/` + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/aflw.py') + + def parse_data_info(self, raw_data_info: dict) -> Optional[dict]: + """Parse raw Face AFLW annotation of an instance. + + Args: + raw_data_info (dict): Raw data information loaded from + ``ann_file``. It should have following contents: + + - ``'raw_ann_info'``: Raw annotation of an instance + - ``'raw_img_info'``: Raw information of the image that + contains the instance + + Returns: + dict: Parsed instance annotation + """ + + ann = raw_data_info['raw_ann_info'] + img = raw_data_info['raw_img_info'] + + img_path = osp.join(self.data_prefix['img'], img['file_name']) + + # aflw bbox scales are normalized with factor 200. + pixel_std = 200. + + # center, scale in shape [1, 2] and bbox in [1, 4] + center = np.array([ann['center']], dtype=np.float32) + scale = np.array([[ann['scale'], ann['scale']]], + dtype=np.float32) * pixel_std + bbox = bbox_cs2xyxy(center, scale) + + # keypoints in shape [1, K, 2] and keypoints_visible in [1, K] + _keypoints = np.array( + ann['keypoints'], dtype=np.float32).reshape(1, -1, 3) + keypoints = _keypoints[..., :2] + keypoints_visible = np.minimum(1, _keypoints[..., 2]) + + num_keypoints = ann['num_keypoints'] + + data_info = { + 'img_id': ann['image_id'], + 'img_path': img_path, + 'bbox': bbox, + 'bbox_center': center, + 'bbox_scale': scale, + 'bbox_score': np.ones(1, dtype=np.float32), + 'num_keypoints': num_keypoints, + 'keypoints': keypoints, + 'keypoints_visible': keypoints_visible, + 'iscrowd': ann['iscrowd'], + 'id': ann['id'], + } + + if self.test_mode: + # 'box_size' is used as normalization factor + assert 'box_size' in ann, '"box_size" is missing in annotation, '\ + 'which is required for evaluation.' + data_info['box_size'] = ann['box_size'] + + return data_info diff --git a/mmpose/datasets/datasets/face/coco_wholebody_face_dataset.py b/mmpose/datasets/datasets/face/coco_wholebody_face_dataset.py index bc2c5be386..728da21ae1 100644 --- a/mmpose/datasets/datasets/face/coco_wholebody_face_dataset.py +++ b/mmpose/datasets/datasets/face/coco_wholebody_face_dataset.py @@ -1,115 +1,115 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os.path as osp -from typing import Optional - -import numpy as np - -from mmpose.registry import DATASETS -from ..base import BaseCocoStyleDataset - - -@DATASETS.register_module() -class CocoWholeBodyFaceDataset(BaseCocoStyleDataset): - """CocoWholeBodyDataset for face keypoint localization. - - `Whole-Body Human Pose Estimation in the Wild', ECCV'2020. - More details can be found in the `paper - `__ . - - The face landmark annotations follow the 68 points mark-up. - - Args: - ann_file (str): Annotation file path. Default: ''. - bbox_file (str, optional): Detection result file path. If - ``bbox_file`` is set, detected bboxes loaded from this file will - be used instead of ground-truth bboxes. This setting is only for - evaluation, i.e., ignored when ``test_mode`` is ``False``. - Default: ``None``. - data_mode (str): Specifies the mode of data samples: ``'topdown'`` or - ``'bottomup'``. In ``'topdown'`` mode, each data sample contains - one instance; while in ``'bottomup'`` mode, each data sample - contains all instances in a image. Default: ``'topdown'`` - metainfo (dict, optional): Meta information for dataset, such as class - information. Default: ``None``. - data_root (str, optional): The root directory for ``data_prefix`` and - ``ann_file``. Default: ``None``. - data_prefix (dict, optional): Prefix for training data. Default: - ``dict(img=None, ann=None)``. - filter_cfg (dict, optional): Config for filter data. Default: `None`. - indices (int or Sequence[int], optional): Support using first few - data in annotation file to facilitate training/testing on a smaller - dataset. Default: ``None`` which means using all ``data_infos``. - serialize_data (bool, optional): Whether to hold memory using - serialized objects, when enabled, data loader workers can use - shared RAM from master process instead of making a copy. - Default: ``True``. - pipeline (list, optional): Processing pipeline. Default: []. - test_mode (bool, optional): ``test_mode=True`` means in test phase. - Default: ``False``. - lazy_init (bool, optional): Whether to load annotation during - instantiation. In some cases, such as visualization, only the meta - information of the dataset is needed, which is not necessary to - load annotation file. ``Basedataset`` can skip load annotations to - save time by set ``lazy_init=False``. Default: ``False``. - max_refetch (int, optional): If ``Basedataset.prepare_data`` get a - None img. The maximum extra number of cycles to get a valid - image. Default: 1000. - """ - - METAINFO: dict = dict( - from_file='configs/_base_/datasets/coco_wholebody_face.py') - - def parse_data_info(self, raw_data_info: dict) -> Optional[dict]: - """Parse raw CocoWholeBody Face annotation of an instance. - - Args: - raw_data_info (dict): Raw data information loaded from - ``ann_file``. It should have following contents: - - - ``'raw_ann_info'``: Raw annotation of an instance - - ``'raw_img_info'``: Raw information of the image that - contains the instance - - Returns: - dict: Parsed instance annotation - """ - - ann = raw_data_info['raw_ann_info'] - img = raw_data_info['raw_img_info'] - - # filter invalid instance - if not ann['face_valid'] or max(ann['face_kpts']) <= 0: - return None - - img_path = osp.join(self.data_prefix['img'], img['file_name']) - img_w, img_h = img['width'], img['height'] - - # get bbox in shape [1, 4], formatted as xywh - x, y, w, h = ann['face_box'] - x1 = np.clip(x, 0, img_w - 1) - y1 = np.clip(y, 0, img_h - 1) - x2 = np.clip(x + w, 0, img_w - 1) - y2 = np.clip(y + h, 0, img_h - 1) - - bbox = np.array([x1, y1, x2, y2], dtype=np.float32).reshape(1, 4) - - # keypoints in shape [1, K, 2] and keypoints_visible in [1, K] - _keypoints = np.array( - ann['face_kpts'], dtype=np.float32).reshape(1, -1, 3) - keypoints = _keypoints[..., :2] - keypoints_visible = np.minimum(1, _keypoints[..., 2]) - - num_keypoints = np.count_nonzero(keypoints.max(axis=2)) - - data_info = { - 'img_id': ann['image_id'], - 'img_path': img_path, - 'bbox': bbox, - 'bbox_score': np.ones(1, dtype=np.float32), - 'num_keypoints': num_keypoints, - 'keypoints': keypoints, - 'keypoints_visible': keypoints_visible, - 'iscrowd': ann['iscrowd'], - 'id': ann['id'], - } - return data_info +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +from typing import Optional + +import numpy as np + +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class CocoWholeBodyFaceDataset(BaseCocoStyleDataset): + """CocoWholeBodyDataset for face keypoint localization. + + `Whole-Body Human Pose Estimation in the Wild', ECCV'2020. + More details can be found in the `paper + `__ . + + The face landmark annotations follow the 68 points mark-up. + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict( + from_file='configs/_base_/datasets/coco_wholebody_face.py') + + def parse_data_info(self, raw_data_info: dict) -> Optional[dict]: + """Parse raw CocoWholeBody Face annotation of an instance. + + Args: + raw_data_info (dict): Raw data information loaded from + ``ann_file``. It should have following contents: + + - ``'raw_ann_info'``: Raw annotation of an instance + - ``'raw_img_info'``: Raw information of the image that + contains the instance + + Returns: + dict: Parsed instance annotation + """ + + ann = raw_data_info['raw_ann_info'] + img = raw_data_info['raw_img_info'] + + # filter invalid instance + if not ann['face_valid'] or max(ann['face_kpts']) <= 0: + return None + + img_path = osp.join(self.data_prefix['img'], img['file_name']) + img_w, img_h = img['width'], img['height'] + + # get bbox in shape [1, 4], formatted as xywh + x, y, w, h = ann['face_box'] + x1 = np.clip(x, 0, img_w - 1) + y1 = np.clip(y, 0, img_h - 1) + x2 = np.clip(x + w, 0, img_w - 1) + y2 = np.clip(y + h, 0, img_h - 1) + + bbox = np.array([x1, y1, x2, y2], dtype=np.float32).reshape(1, 4) + + # keypoints in shape [1, K, 2] and keypoints_visible in [1, K] + _keypoints = np.array( + ann['face_kpts'], dtype=np.float32).reshape(1, -1, 3) + keypoints = _keypoints[..., :2] + keypoints_visible = np.minimum(1, _keypoints[..., 2]) + + num_keypoints = np.count_nonzero(keypoints.max(axis=2)) + + data_info = { + 'img_id': ann['image_id'], + 'img_path': img_path, + 'bbox': bbox, + 'bbox_score': np.ones(1, dtype=np.float32), + 'num_keypoints': num_keypoints, + 'keypoints': keypoints, + 'keypoints_visible': keypoints_visible, + 'iscrowd': ann['iscrowd'], + 'id': ann['id'], + } + return data_info diff --git a/mmpose/datasets/datasets/face/cofw_dataset.py b/mmpose/datasets/datasets/face/cofw_dataset.py index 5ec2a37efd..47fa634de9 100644 --- a/mmpose/datasets/datasets/face/cofw_dataset.py +++ b/mmpose/datasets/datasets/face/cofw_dataset.py @@ -1,53 +1,53 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from mmpose.registry import DATASETS -from ..base import BaseCocoStyleDataset - - -@DATASETS.register_module() -class COFWDataset(BaseCocoStyleDataset): - """COFW dataset for face keypoint localization. - - "Robust face landmark estimation under occlusion", ICCV'2013. - - The landmark annotations follow the 29 points mark-up. The definition - can be found in `http://www.vision.caltech.edu/xpburgos/ICCV13/`__ . - - Args: - ann_file (str): Annotation file path. Default: ''. - bbox_file (str, optional): Detection result file path. If - ``bbox_file`` is set, detected bboxes loaded from this file will - be used instead of ground-truth bboxes. This setting is only for - evaluation, i.e., ignored when ``test_mode`` is ``False``. - Default: ``None``. - data_mode (str): Specifies the mode of data samples: ``'topdown'`` or - ``'bottomup'``. In ``'topdown'`` mode, each data sample contains - one instance; while in ``'bottomup'`` mode, each data sample - contains all instances in a image. Default: ``'topdown'`` - metainfo (dict, optional): Meta information for dataset, such as class - information. Default: ``None``. - data_root (str, optional): The root directory for ``data_prefix`` and - ``ann_file``. Default: ``None``. - data_prefix (dict, optional): Prefix for training data. Default: - ``dict(img=None, ann=None)``. - filter_cfg (dict, optional): Config for filter data. Default: `None`. - indices (int or Sequence[int], optional): Support using first few - data in annotation file to facilitate training/testing on a smaller - dataset. Default: ``None`` which means using all ``data_infos``. - serialize_data (bool, optional): Whether to hold memory using - serialized objects, when enabled, data loader workers can use - shared RAM from master process instead of making a copy. - Default: ``True``. - pipeline (list, optional): Processing pipeline. Default: []. - test_mode (bool, optional): ``test_mode=True`` means in test phase. - Default: ``False``. - lazy_init (bool, optional): Whether to load annotation during - instantiation. In some cases, such as visualization, only the meta - information of the dataset is needed, which is not necessary to - load annotation file. ``Basedataset`` can skip load annotations to - save time by set ``lazy_init=False``. Default: ``False``. - max_refetch (int, optional): If ``Basedataset.prepare_data`` get a - None img. The maximum extra number of cycles to get a valid - image. Default: 1000. - """ - - METAINFO: dict = dict(from_file='configs/_base_/datasets/cofw.py') +# Copyright (c) OpenMMLab. All rights reserved. +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class COFWDataset(BaseCocoStyleDataset): + """COFW dataset for face keypoint localization. + + "Robust face landmark estimation under occlusion", ICCV'2013. + + The landmark annotations follow the 29 points mark-up. The definition + can be found in `http://www.vision.caltech.edu/xpburgos/ICCV13/`__ . + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/cofw.py') diff --git a/mmpose/datasets/datasets/face/face_300w_dataset.py b/mmpose/datasets/datasets/face/face_300w_dataset.py index c70e892b4f..0071c8799a 100644 --- a/mmpose/datasets/datasets/face/face_300w_dataset.py +++ b/mmpose/datasets/datasets/face/face_300w_dataset.py @@ -1,112 +1,112 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os.path as osp -from typing import Optional - -import numpy as np - -from mmpose.registry import DATASETS -from mmpose.structures.bbox import bbox_cs2xyxy -from ..base import BaseCocoStyleDataset - - -@DATASETS.register_module() -class Face300WDataset(BaseCocoStyleDataset): - """300W dataset for face keypoint localization. - - "300 faces In-the-wild challenge: Database and results", - Image and Vision Computing (IMAVIS) 2019. - - The landmark annotations follow the 68 points mark-up. The definition - can be found in `https://ibug.doc.ic.ac.uk/resources/300-W/`. - - Args: - ann_file (str): Annotation file path. Default: ''. - bbox_file (str, optional): Detection result file path. If - ``bbox_file`` is set, detected bboxes loaded from this file will - be used instead of ground-truth bboxes. This setting is only for - evaluation, i.e., ignored when ``test_mode`` is ``False``. - Default: ``None``. - data_mode (str): Specifies the mode of data samples: ``'topdown'`` or - ``'bottomup'``. In ``'topdown'`` mode, each data sample contains - one instance; while in ``'bottomup'`` mode, each data sample - contains all instances in a image. Default: ``'topdown'`` - metainfo (dict, optional): Meta information for dataset, such as class - information. Default: ``None``. - data_root (str, optional): The root directory for ``data_prefix`` and - ``ann_file``. Default: ``None``. - data_prefix (dict, optional): Prefix for training data. Default: - ``dict(img=None, ann=None)``. - filter_cfg (dict, optional): Config for filter data. Default: `None`. - indices (int or Sequence[int], optional): Support using first few - data in annotation file to facilitate training/testing on a smaller - dataset. Default: ``None`` which means using all ``data_infos``. - serialize_data (bool, optional): Whether to hold memory using - serialized objects, when enabled, data loader workers can use - shared RAM from master process instead of making a copy. - Default: ``True``. - pipeline (list, optional): Processing pipeline. Default: []. - test_mode (bool, optional): ``test_mode=True`` means in test phase. - Default: ``False``. - lazy_init (bool, optional): Whether to load annotation during - instantiation. In some cases, such as visualization, only the meta - information of the dataset is needed, which is not necessary to - load annotation file. ``Basedataset`` can skip load annotations to - save time by set ``lazy_init=False``. Default: ``False``. - max_refetch (int, optional): If ``Basedataset.prepare_data`` get a - None img. The maximum extra number of cycles to get a valid - image. Default: 1000. - """ - - METAINFO: dict = dict(from_file='configs/_base_/datasets/300w.py') - - def parse_data_info(self, raw_data_info: dict) -> Optional[dict]: - """Parse raw Face300W annotation of an instance. - - Args: - raw_data_info (dict): Raw data information loaded from - ``ann_file``. It should have following contents: - - - ``'raw_ann_info'``: Raw annotation of an instance - - ``'raw_img_info'``: Raw information of the image that - contains the instance - - Returns: - dict: Parsed instance annotation - """ - - ann = raw_data_info['raw_ann_info'] - img = raw_data_info['raw_img_info'] - - img_path = osp.join(self.data_prefix['img'], img['file_name']) - - # 300w bbox scales are normalized with factor 200. - pixel_std = 200. - - # center, scale in shape [1, 2] and bbox in [1, 4] - center = np.array([ann['center']], dtype=np.float32) - scale = np.array([[ann['scale'], ann['scale']]], - dtype=np.float32) * pixel_std - bbox = bbox_cs2xyxy(center, scale) - - # keypoints in shape [1, K, 2] and keypoints_visible in [1, K] - _keypoints = np.array( - ann['keypoints'], dtype=np.float32).reshape(1, -1, 3) - keypoints = _keypoints[..., :2] - keypoints_visible = np.minimum(1, _keypoints[..., 2]) - - num_keypoints = ann['num_keypoints'] - - data_info = { - 'img_id': ann['image_id'], - 'img_path': img_path, - 'bbox': bbox, - 'bbox_center': center, - 'bbox_scale': scale, - 'bbox_score': np.ones(1, dtype=np.float32), - 'num_keypoints': num_keypoints, - 'keypoints': keypoints, - 'keypoints_visible': keypoints_visible, - 'iscrowd': ann['iscrowd'], - 'id': ann['id'], - } - return data_info +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +from typing import Optional + +import numpy as np + +from mmpose.registry import DATASETS +from mmpose.structures.bbox import bbox_cs2xyxy +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class Face300WDataset(BaseCocoStyleDataset): + """300W dataset for face keypoint localization. + + "300 faces In-the-wild challenge: Database and results", + Image and Vision Computing (IMAVIS) 2019. + + The landmark annotations follow the 68 points mark-up. The definition + can be found in `https://ibug.doc.ic.ac.uk/resources/300-W/`. + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/300w.py') + + def parse_data_info(self, raw_data_info: dict) -> Optional[dict]: + """Parse raw Face300W annotation of an instance. + + Args: + raw_data_info (dict): Raw data information loaded from + ``ann_file``. It should have following contents: + + - ``'raw_ann_info'``: Raw annotation of an instance + - ``'raw_img_info'``: Raw information of the image that + contains the instance + + Returns: + dict: Parsed instance annotation + """ + + ann = raw_data_info['raw_ann_info'] + img = raw_data_info['raw_img_info'] + + img_path = osp.join(self.data_prefix['img'], img['file_name']) + + # 300w bbox scales are normalized with factor 200. + pixel_std = 200. + + # center, scale in shape [1, 2] and bbox in [1, 4] + center = np.array([ann['center']], dtype=np.float32) + scale = np.array([[ann['scale'], ann['scale']]], + dtype=np.float32) * pixel_std + bbox = bbox_cs2xyxy(center, scale) + + # keypoints in shape [1, K, 2] and keypoints_visible in [1, K] + _keypoints = np.array( + ann['keypoints'], dtype=np.float32).reshape(1, -1, 3) + keypoints = _keypoints[..., :2] + keypoints_visible = np.minimum(1, _keypoints[..., 2]) + + num_keypoints = ann['num_keypoints'] + + data_info = { + 'img_id': ann['image_id'], + 'img_path': img_path, + 'bbox': bbox, + 'bbox_center': center, + 'bbox_scale': scale, + 'bbox_score': np.ones(1, dtype=np.float32), + 'num_keypoints': num_keypoints, + 'keypoints': keypoints, + 'keypoints_visible': keypoints_visible, + 'iscrowd': ann['iscrowd'], + 'id': ann['id'], + } + return data_info diff --git a/mmpose/datasets/datasets/face/lapa_dataset.py b/mmpose/datasets/datasets/face/lapa_dataset.py index 1a5bdc4ec0..e7a7e0a767 100644 --- a/mmpose/datasets/datasets/face/lapa_dataset.py +++ b/mmpose/datasets/datasets/face/lapa_dataset.py @@ -1,54 +1,54 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from mmpose.registry import DATASETS -from ..base import BaseCocoStyleDataset - - -@DATASETS.register_module() -class LapaDataset(BaseCocoStyleDataset): - """LaPa dataset for face keypoint localization. - - "A New Dataset and Boundary-Attention Semantic Segmentation - for Face Parsing", AAAI'2020. - - The landmark annotations follow the 106 points mark-up. The definition - can be found in `https://github.com/JDAI-CV/lapa-dataset/`__ . - - Args: - ann_file (str): Annotation file path. Default: ''. - bbox_file (str, optional): Detection result file path. If - ``bbox_file`` is set, detected bboxes loaded from this file will - be used instead of ground-truth bboxes. This setting is only for - evaluation, i.e., ignored when ``test_mode`` is ``False``. - Default: ``None``. - data_mode (str): Specifies the mode of data samples: ``'topdown'`` or - ``'bottomup'``. In ``'topdown'`` mode, each data sample contains - one instance; while in ``'bottomup'`` mode, each data sample - contains all instances in a image. Default: ``'topdown'`` - metainfo (dict, optional): Meta information for dataset, such as class - information. Default: ``None``. - data_root (str, optional): The root directory for ``data_prefix`` and - ``ann_file``. Default: ``None``. - data_prefix (dict, optional): Prefix for training data. Default: - ``dict(img=None, ann=None)``. - filter_cfg (dict, optional): Config for filter data. Default: `None`. - indices (int or Sequence[int], optional): Support using first few - data in annotation file to facilitate training/testing on a smaller - dataset. Default: ``None`` which means using all ``data_infos``. - serialize_data (bool, optional): Whether to hold memory using - serialized objects, when enabled, data loader workers can use - shared RAM from master process instead of making a copy. - Default: ``True``. - pipeline (list, optional): Processing pipeline. Default: []. - test_mode (bool, optional): ``test_mode=True`` means in test phase. - Default: ``False``. - lazy_init (bool, optional): Whether to load annotation during - instantiation. In some cases, such as visualization, only the meta - information of the dataset is needed, which is not necessary to - load annotation file. ``Basedataset`` can skip load annotations to - save time by set ``lazy_init=False``. Default: ``False``. - max_refetch (int, optional): If ``Basedataset.prepare_data`` get a - None img. The maximum extra number of cycles to get a valid - image. Default: 1000. - """ - - METAINFO: dict = dict(from_file='configs/_base_/datasets/lapa.py') +# Copyright (c) OpenMMLab. All rights reserved. +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class LapaDataset(BaseCocoStyleDataset): + """LaPa dataset for face keypoint localization. + + "A New Dataset and Boundary-Attention Semantic Segmentation + for Face Parsing", AAAI'2020. + + The landmark annotations follow the 106 points mark-up. The definition + can be found in `https://github.com/JDAI-CV/lapa-dataset/`__ . + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/lapa.py') diff --git a/mmpose/datasets/datasets/face/wflw_dataset.py b/mmpose/datasets/datasets/face/wflw_dataset.py index 9c1c23053c..7a4b21b27b 100644 --- a/mmpose/datasets/datasets/face/wflw_dataset.py +++ b/mmpose/datasets/datasets/face/wflw_dataset.py @@ -1,112 +1,112 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os.path as osp -from typing import Optional - -import numpy as np - -from mmpose.registry import DATASETS -from mmpose.structures.bbox import bbox_cs2xyxy -from ..base import BaseCocoStyleDataset - - -@DATASETS.register_module() -class WFLWDataset(BaseCocoStyleDataset): - """WFLW dataset for face keypoint localization. - - "Look at Boundary: A Boundary-Aware Face Alignment Algorithm", - CVPR'2018. - - The landmark annotations follow the 98 points mark-up. The definition - can be found in `https://wywu.github.io/projects/LAB/WFLW.html`__ . - - Args: - ann_file (str): Annotation file path. Default: ''. - bbox_file (str, optional): Detection result file path. If - ``bbox_file`` is set, detected bboxes loaded from this file will - be used instead of ground-truth bboxes. This setting is only for - evaluation, i.e., ignored when ``test_mode`` is ``False``. - Default: ``None``. - data_mode (str): Specifies the mode of data samples: ``'topdown'`` or - ``'bottomup'``. In ``'topdown'`` mode, each data sample contains - one instance; while in ``'bottomup'`` mode, each data sample - contains all instances in a image. Default: ``'topdown'`` - metainfo (dict, optional): Meta information for dataset, such as class - information. Default: ``None``. - data_root (str, optional): The root directory for ``data_prefix`` and - ``ann_file``. Default: ``None``. - data_prefix (dict, optional): Prefix for training data. Default: - ``dict(img=None, ann=None)``. - filter_cfg (dict, optional): Config for filter data. Default: `None`. - indices (int or Sequence[int], optional): Support using first few - data in annotation file to facilitate training/testing on a smaller - dataset. Default: ``None`` which means using all ``data_infos``. - serialize_data (bool, optional): Whether to hold memory using - serialized objects, when enabled, data loader workers can use - shared RAM from master process instead of making a copy. - Default: ``True``. - pipeline (list, optional): Processing pipeline. Default: []. - test_mode (bool, optional): ``test_mode=True`` means in test phase. - Default: ``False``. - lazy_init (bool, optional): Whether to load annotation during - instantiation. In some cases, such as visualization, only the meta - information of the dataset is needed, which is not necessary to - load annotation file. ``Basedataset`` can skip load annotations to - save time by set ``lazy_init=False``. Default: ``False``. - max_refetch (int, optional): If ``Basedataset.prepare_data`` get a - None img. The maximum extra number of cycles to get a valid - image. Default: 1000. - """ - - METAINFO: dict = dict(from_file='configs/_base_/datasets/wflw.py') - - def parse_data_info(self, raw_data_info: dict) -> Optional[dict]: - """Parse raw Face WFLW annotation of an instance. - - Args: - raw_data_info (dict): Raw data information loaded from - ``ann_file``. It should have following contents: - - - ``'raw_ann_info'``: Raw annotation of an instance - - ``'raw_img_info'``: Raw information of the image that - contains the instance - - Returns: - dict: Parsed instance annotation - """ - - ann = raw_data_info['raw_ann_info'] - img = raw_data_info['raw_img_info'] - - img_path = osp.join(self.data_prefix['img'], img['file_name']) - - # wflw bbox scales are normalized with factor 200. - pixel_std = 200. - - # center, scale in shape [1, 2] and bbox in [1, 4] - center = np.array([ann['center']], dtype=np.float32) - scale = np.array([[ann['scale'], ann['scale']]], - dtype=np.float32) * pixel_std - bbox = bbox_cs2xyxy(center, scale) - - # keypoints in shape [1, K, 2] and keypoints_visible in [1, K] - _keypoints = np.array( - ann['keypoints'], dtype=np.float32).reshape(1, -1, 3) - keypoints = _keypoints[..., :2] - keypoints_visible = np.minimum(1, _keypoints[..., 2]) - - num_keypoints = ann['num_keypoints'] - - data_info = { - 'img_id': ann['image_id'], - 'img_path': img_path, - 'bbox': bbox, - 'bbox_center': center, - 'bbox_scale': scale, - 'bbox_score': np.ones(1, dtype=np.float32), - 'num_keypoints': num_keypoints, - 'keypoints': keypoints, - 'keypoints_visible': keypoints_visible, - 'iscrowd': ann['iscrowd'], - 'id': ann['id'], - } - return data_info +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +from typing import Optional + +import numpy as np + +from mmpose.registry import DATASETS +from mmpose.structures.bbox import bbox_cs2xyxy +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class WFLWDataset(BaseCocoStyleDataset): + """WFLW dataset for face keypoint localization. + + "Look at Boundary: A Boundary-Aware Face Alignment Algorithm", + CVPR'2018. + + The landmark annotations follow the 98 points mark-up. The definition + can be found in `https://wywu.github.io/projects/LAB/WFLW.html`__ . + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/wflw.py') + + def parse_data_info(self, raw_data_info: dict) -> Optional[dict]: + """Parse raw Face WFLW annotation of an instance. + + Args: + raw_data_info (dict): Raw data information loaded from + ``ann_file``. It should have following contents: + + - ``'raw_ann_info'``: Raw annotation of an instance + - ``'raw_img_info'``: Raw information of the image that + contains the instance + + Returns: + dict: Parsed instance annotation + """ + + ann = raw_data_info['raw_ann_info'] + img = raw_data_info['raw_img_info'] + + img_path = osp.join(self.data_prefix['img'], img['file_name']) + + # wflw bbox scales are normalized with factor 200. + pixel_std = 200. + + # center, scale in shape [1, 2] and bbox in [1, 4] + center = np.array([ann['center']], dtype=np.float32) + scale = np.array([[ann['scale'], ann['scale']]], + dtype=np.float32) * pixel_std + bbox = bbox_cs2xyxy(center, scale) + + # keypoints in shape [1, K, 2] and keypoints_visible in [1, K] + _keypoints = np.array( + ann['keypoints'], dtype=np.float32).reshape(1, -1, 3) + keypoints = _keypoints[..., :2] + keypoints_visible = np.minimum(1, _keypoints[..., 2]) + + num_keypoints = ann['num_keypoints'] + + data_info = { + 'img_id': ann['image_id'], + 'img_path': img_path, + 'bbox': bbox, + 'bbox_center': center, + 'bbox_scale': scale, + 'bbox_score': np.ones(1, dtype=np.float32), + 'num_keypoints': num_keypoints, + 'keypoints': keypoints, + 'keypoints_visible': keypoints_visible, + 'iscrowd': ann['iscrowd'], + 'id': ann['id'], + } + return data_info diff --git a/mmpose/datasets/datasets/fashion/__init__.py b/mmpose/datasets/datasets/fashion/__init__.py index 8be25dede3..51a563a9d5 100644 --- a/mmpose/datasets/datasets/fashion/__init__.py +++ b/mmpose/datasets/datasets/fashion/__init__.py @@ -1,5 +1,5 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .deepfashion2_dataset import DeepFashion2Dataset -from .deepfashion_dataset import DeepFashionDataset - -__all__ = ['DeepFashionDataset', 'DeepFashion2Dataset'] +# Copyright (c) OpenMMLab. All rights reserved. +from .deepfashion2_dataset import DeepFashion2Dataset +from .deepfashion_dataset import DeepFashionDataset + +__all__ = ['DeepFashionDataset', 'DeepFashion2Dataset'] diff --git a/mmpose/datasets/datasets/fashion/deepfashion2_dataset.py b/mmpose/datasets/datasets/fashion/deepfashion2_dataset.py index c3cde9bf97..cbf7a98d99 100644 --- a/mmpose/datasets/datasets/fashion/deepfashion2_dataset.py +++ b/mmpose/datasets/datasets/fashion/deepfashion2_dataset.py @@ -1,10 +1,10 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from mmpose.registry import DATASETS -from ..base import BaseCocoStyleDataset - - -@DATASETS.register_module(name='DeepFashion2Dataset') -class DeepFashion2Dataset(BaseCocoStyleDataset): - """DeepFashion2 dataset for fashion landmark detection.""" - - METAINFO: dict = dict(from_file='configs/_base_/datasets/deepfashion2.py') +# Copyright (c) OpenMMLab. All rights reserved. +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module(name='DeepFashion2Dataset') +class DeepFashion2Dataset(BaseCocoStyleDataset): + """DeepFashion2 dataset for fashion landmark detection.""" + + METAINFO: dict = dict(from_file='configs/_base_/datasets/deepfashion2.py') diff --git a/mmpose/datasets/datasets/fashion/deepfashion_dataset.py b/mmpose/datasets/datasets/fashion/deepfashion_dataset.py index a0aa493732..edf24265ff 100644 --- a/mmpose/datasets/datasets/fashion/deepfashion_dataset.py +++ b/mmpose/datasets/datasets/fashion/deepfashion_dataset.py @@ -1,137 +1,137 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import Callable, List, Optional, Sequence, Union - -from mmpose.registry import DATASETS -from ..base import BaseCocoStyleDataset - - -@DATASETS.register_module() -class DeepFashionDataset(BaseCocoStyleDataset): - """DeepFashion dataset (full-body clothes) for fashion landmark detection. - - "DeepFashion: Powering Robust Clothes Recognition - and Retrieval with Rich Annotations", CVPR'2016. - "Fashion Landmark Detection in the Wild", ECCV'2016. - - The dataset contains 3 categories for full-body, upper-body and lower-body. - - Fashion landmark indexes for upper-body clothes:: - - 0: 'left collar', - 1: 'right collar', - 2: 'left sleeve', - 3: 'right sleeve', - 4: 'left hem', - 5: 'right hem' - - Fashion landmark indexes for lower-body clothes:: - - 0: 'left waistline', - 1: 'right waistline', - 2: 'left hem', - 3: 'right hem' - - Fashion landmark indexes for full-body clothes:: - - 0: 'left collar', - 1: 'right collar', - 2: 'left sleeve', - 3: 'right sleeve', - 4: 'left waistline', - 5: 'right waistline', - 6: 'left hem', - 7: 'right hem' - - Args: - ann_file (str): Annotation file path. Default: ''. - subset (str): Specifies the subset of body: ``'full'``, ``'upper'`` or - ``'lower'``. Default: '', which means ``'full'``. - bbox_file (str, optional): Detection result file path. If - ``bbox_file`` is set, detected bboxes loaded from this file will - be used instead of ground-truth bboxes. This setting is only for - evaluation, i.e., ignored when ``test_mode`` is ``False``. - Default: ``None``. - data_mode (str): Specifies the mode of data samples: ``'topdown'`` or - ``'bottomup'``. In ``'topdown'`` mode, each data sample contains - one instance; while in ``'bottomup'`` mode, each data sample - contains all instances in a image. Default: ``'topdown'`` - metainfo (dict, optional): Meta information for dataset, such as class - information. Default: ``None``. - data_root (str, optional): The root directory for ``data_prefix`` and - ``ann_file``. Default: ``None``. - data_prefix (dict, optional): Prefix for training data. Default: - ``dict(img='')``. - filter_cfg (dict, optional): Config for filter data. Default: `None`. - indices (int or Sequence[int], optional): Support using first few - data in annotation file to facilitate training/testing on a smaller - dataset. Default: ``None`` which means using all ``data_infos``. - serialize_data (bool, optional): Whether to hold memory using - serialized objects, when enabled, data loader workers can use - shared RAM from master process instead of making a copy. - Default: ``True``. - pipeline (list, optional): Processing pipeline. Default: []. - test_mode (bool, optional): ``test_mode=True`` means in test phase. - Default: ``False``. - lazy_init (bool, optional): Whether to load annotation during - instantiation. In some cases, such as visualization, only the meta - information of the dataset is needed, which is not necessary to - load annotation file. ``Basedataset`` can skip load annotations to - save time by set ``lazy_init=False``. Default: ``False``. - max_refetch (int, optional): If ``Basedataset.prepare_data`` get a - None img. The maximum extra number of cycles to get a valid - image. Default: 1000. - """ - - def __init__(self, - ann_file: str = '', - subset: str = '', - bbox_file: Optional[str] = None, - data_mode: str = 'topdown', - metainfo: Optional[dict] = None, - data_root: Optional[str] = None, - data_prefix: dict = dict(img=''), - filter_cfg: Optional[dict] = None, - indices: Optional[Union[int, Sequence[int]]] = None, - serialize_data: bool = True, - pipeline: List[Union[dict, Callable]] = [], - test_mode: bool = False, - lazy_init: bool = False, - max_refetch: int = 1000): - self._check_subset_and_metainfo(subset) - - super().__init__( - ann_file=ann_file, - bbox_file=bbox_file, - data_mode=data_mode, - metainfo=metainfo, - data_root=data_root, - data_prefix=data_prefix, - filter_cfg=filter_cfg, - indices=indices, - serialize_data=serialize_data, - pipeline=pipeline, - test_mode=test_mode, - lazy_init=lazy_init, - max_refetch=max_refetch) - - @classmethod - def _check_subset_and_metainfo(cls, subset: str = '') -> None: - """Check the subset of body and set the corresponding metainfo. - - Args: - subset(str): the subset of body: could be ``'full'``, ``'upper'`` - or ``'lower'``. Default: '', which means ``'full'``. - """ - if subset == '' or subset == 'full': - cls.METAINFO = dict( - from_file='configs/_base_/datasets/deepfashion_full.py') - elif subset == 'upper': - cls.METAINFO = dict( - from_file='configs/_base_/datasets/deepfashion_upper.py') - elif subset == 'lower': - cls.METAINFO = dict( - from_file='configs/_base_/datasets/deepfashion_lower.py') - else: - raise ValueError( - f'{cls.__class__.__name__} got invalid subset: ' - f'{subset}. Should be "full", "lower" or "upper".') +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Callable, List, Optional, Sequence, Union + +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class DeepFashionDataset(BaseCocoStyleDataset): + """DeepFashion dataset (full-body clothes) for fashion landmark detection. + + "DeepFashion: Powering Robust Clothes Recognition + and Retrieval with Rich Annotations", CVPR'2016. + "Fashion Landmark Detection in the Wild", ECCV'2016. + + The dataset contains 3 categories for full-body, upper-body and lower-body. + + Fashion landmark indexes for upper-body clothes:: + + 0: 'left collar', + 1: 'right collar', + 2: 'left sleeve', + 3: 'right sleeve', + 4: 'left hem', + 5: 'right hem' + + Fashion landmark indexes for lower-body clothes:: + + 0: 'left waistline', + 1: 'right waistline', + 2: 'left hem', + 3: 'right hem' + + Fashion landmark indexes for full-body clothes:: + + 0: 'left collar', + 1: 'right collar', + 2: 'left sleeve', + 3: 'right sleeve', + 4: 'left waistline', + 5: 'right waistline', + 6: 'left hem', + 7: 'right hem' + + Args: + ann_file (str): Annotation file path. Default: ''. + subset (str): Specifies the subset of body: ``'full'``, ``'upper'`` or + ``'lower'``. Default: '', which means ``'full'``. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img='')``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + def __init__(self, + ann_file: str = '', + subset: str = '', + bbox_file: Optional[str] = None, + data_mode: str = 'topdown', + metainfo: Optional[dict] = None, + data_root: Optional[str] = None, + data_prefix: dict = dict(img=''), + filter_cfg: Optional[dict] = None, + indices: Optional[Union[int, Sequence[int]]] = None, + serialize_data: bool = True, + pipeline: List[Union[dict, Callable]] = [], + test_mode: bool = False, + lazy_init: bool = False, + max_refetch: int = 1000): + self._check_subset_and_metainfo(subset) + + super().__init__( + ann_file=ann_file, + bbox_file=bbox_file, + data_mode=data_mode, + metainfo=metainfo, + data_root=data_root, + data_prefix=data_prefix, + filter_cfg=filter_cfg, + indices=indices, + serialize_data=serialize_data, + pipeline=pipeline, + test_mode=test_mode, + lazy_init=lazy_init, + max_refetch=max_refetch) + + @classmethod + def _check_subset_and_metainfo(cls, subset: str = '') -> None: + """Check the subset of body and set the corresponding metainfo. + + Args: + subset(str): the subset of body: could be ``'full'``, ``'upper'`` + or ``'lower'``. Default: '', which means ``'full'``. + """ + if subset == '' or subset == 'full': + cls.METAINFO = dict( + from_file='configs/_base_/datasets/deepfashion_full.py') + elif subset == 'upper': + cls.METAINFO = dict( + from_file='configs/_base_/datasets/deepfashion_upper.py') + elif subset == 'lower': + cls.METAINFO = dict( + from_file='configs/_base_/datasets/deepfashion_lower.py') + else: + raise ValueError( + f'{cls.__class__.__name__} got invalid subset: ' + f'{subset}. Should be "full", "lower" or "upper".') diff --git a/mmpose/datasets/datasets/hand/__init__.py b/mmpose/datasets/datasets/hand/__init__.py index d5e2222be9..0a87fed008 100644 --- a/mmpose/datasets/datasets/hand/__init__.py +++ b/mmpose/datasets/datasets/hand/__init__.py @@ -1,11 +1,11 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .coco_wholebody_hand_dataset import CocoWholeBodyHandDataset -from .freihand_dataset import FreiHandDataset -from .onehand10k_dataset import OneHand10KDataset -from .panoptic_hand2d_dataset import PanopticHand2DDataset -from .rhd2d_dataset import Rhd2DDataset - -__all__ = [ - 'OneHand10KDataset', 'FreiHandDataset', 'PanopticHand2DDataset', - 'Rhd2DDataset', 'CocoWholeBodyHandDataset' -] +# Copyright (c) OpenMMLab. All rights reserved. +from .coco_wholebody_hand_dataset import CocoWholeBodyHandDataset +from .freihand_dataset import FreiHandDataset +from .onehand10k_dataset import OneHand10KDataset +from .panoptic_hand2d_dataset import PanopticHand2DDataset +from .rhd2d_dataset import Rhd2DDataset + +__all__ = [ + 'OneHand10KDataset', 'FreiHandDataset', 'PanopticHand2DDataset', + 'Rhd2DDataset', 'CocoWholeBodyHandDataset' +] diff --git a/mmpose/datasets/datasets/hand/coco_wholebody_hand_dataset.py b/mmpose/datasets/datasets/hand/coco_wholebody_hand_dataset.py index dba0132f58..7f508073dd 100644 --- a/mmpose/datasets/datasets/hand/coco_wholebody_hand_dataset.py +++ b/mmpose/datasets/datasets/hand/coco_wholebody_hand_dataset.py @@ -1,148 +1,148 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os.path as osp -from typing import List, Tuple - -import numpy as np -from mmengine.fileio import exists, get_local_path -from xtcocotools.coco import COCO - -from mmpose.registry import DATASETS -from mmpose.structures.bbox import bbox_xywh2xyxy -from ..base import BaseCocoStyleDataset - - -@DATASETS.register_module() -class CocoWholeBodyHandDataset(BaseCocoStyleDataset): - """CocoWholeBodyDataset for hand pose estimation. - - "Whole-Body Human Pose Estimation in the Wild", ECCV'2020. - More details can be found in the `paper - `__ . - - COCO-WholeBody Hand keypoints:: - - 0: 'wrist', - 1: 'thumb1', - 2: 'thumb2', - 3: 'thumb3', - 4: 'thumb4', - 5: 'forefinger1', - 6: 'forefinger2', - 7: 'forefinger3', - 8: 'forefinger4', - 9: 'middle_finger1', - 10: 'middle_finger2', - 11: 'middle_finger3', - 12: 'middle_finger4', - 13: 'ring_finger1', - 14: 'ring_finger2', - 15: 'ring_finger3', - 16: 'ring_finger4', - 17: 'pinky_finger1', - 18: 'pinky_finger2', - 19: 'pinky_finger3', - 20: 'pinky_finger4' - - Args: - ann_file (str): Annotation file path. Default: ''. - bbox_file (str, optional): Detection result file path. If - ``bbox_file`` is set, detected bboxes loaded from this file will - be used instead of ground-truth bboxes. This setting is only for - evaluation, i.e., ignored when ``test_mode`` is ``False``. - Default: ``None``. - data_mode (str): Specifies the mode of data samples: ``'topdown'`` or - ``'bottomup'``. In ``'topdown'`` mode, each data sample contains - one instance; while in ``'bottomup'`` mode, each data sample - contains all instances in a image. Default: ``'topdown'`` - metainfo (dict, optional): Meta information for dataset, such as class - information. Default: ``None``. - data_root (str, optional): The root directory for ``data_prefix`` and - ``ann_file``. Default: ``None``. - data_prefix (dict, optional): Prefix for training data. Default: - ``dict(img=None, ann=None)``. - filter_cfg (dict, optional): Config for filter data. Default: `None`. - indices (int or Sequence[int], optional): Support using first few - data in annotation file to facilitate training/testing on a smaller - dataset. Default: ``None`` which means using all ``data_infos``. - serialize_data (bool, optional): Whether to hold memory using - serialized objects, when enabled, data loader workers can use - shared RAM from master process instead of making a copy. - Default: ``True``. - pipeline (list, optional): Processing pipeline. Default: []. - test_mode (bool, optional): ``test_mode=True`` means in test phase. - Default: ``False``. - lazy_init (bool, optional): Whether to load annotation during - instantiation. In some cases, such as visualization, only the meta - information of the dataset is needed, which is not necessary to - load annotation file. ``Basedataset`` can skip load annotations to - save time by set ``lazy_init=False``. Default: ``False``. - max_refetch (int, optional): If ``Basedataset.prepare_data`` get a - None img. The maximum extra number of cycles to get a valid - image. Default: 1000. - """ - - METAINFO: dict = dict( - from_file='configs/_base_/datasets/coco_wholebody_hand.py') - - def _load_annotations(self) -> Tuple[List[dict], List[dict]]: - """Load data from annotations in COCO format.""" - - assert exists(self.ann_file), 'Annotation file does not exist' - - with get_local_path(self.ann_file) as local_path: - self.coco = COCO(local_path) - instance_list = [] - image_list = [] - id = 0 - - for img_id in self.coco.getImgIds(): - img = self.coco.loadImgs(img_id)[0] - - img.update({ - 'img_id': - img_id, - 'img_path': - osp.join(self.data_prefix['img'], img['file_name']), - }) - image_list.append(img) - - ann_ids = self.coco.getAnnIds(imgIds=img_id, iscrowd=False) - anns = self.coco.loadAnns(ann_ids) - for ann in anns: - for type in ['left', 'right']: - # filter invalid hand annotations, there might be two - # valid instances (left and right hand) in one image - if ann[f'{type}hand_valid'] and max( - ann[f'{type}hand_kpts']) > 0: - - bbox_xywh = np.array( - ann[f'{type}hand_box'], - dtype=np.float32).reshape(1, 4) - - bbox = bbox_xywh2xyxy(bbox_xywh) - - _keypoints = np.array( - ann[f'{type}hand_kpts'], - dtype=np.float32).reshape(1, -1, 3) - keypoints = _keypoints[..., :2] - keypoints_visible = np.minimum(1, _keypoints[..., 2]) - - num_keypoints = np.count_nonzero(keypoints.max(axis=2)) - - instance_info = { - 'img_id': ann['image_id'], - 'img_path': img['img_path'], - 'bbox': bbox, - 'bbox_score': np.ones(1, dtype=np.float32), - 'num_keypoints': num_keypoints, - 'keypoints': keypoints, - 'keypoints_visible': keypoints_visible, - 'iscrowd': ann['iscrowd'], - 'segmentation': ann['segmentation'], - 'id': id, - } - instance_list.append(instance_info) - id = id + 1 - - instance_list = sorted(instance_list, key=lambda x: x['id']) - return instance_list, image_list +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +from typing import List, Tuple + +import numpy as np +from mmengine.fileio import exists, get_local_path +from xtcocotools.coco import COCO + +from mmpose.registry import DATASETS +from mmpose.structures.bbox import bbox_xywh2xyxy +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class CocoWholeBodyHandDataset(BaseCocoStyleDataset): + """CocoWholeBodyDataset for hand pose estimation. + + "Whole-Body Human Pose Estimation in the Wild", ECCV'2020. + More details can be found in the `paper + `__ . + + COCO-WholeBody Hand keypoints:: + + 0: 'wrist', + 1: 'thumb1', + 2: 'thumb2', + 3: 'thumb3', + 4: 'thumb4', + 5: 'forefinger1', + 6: 'forefinger2', + 7: 'forefinger3', + 8: 'forefinger4', + 9: 'middle_finger1', + 10: 'middle_finger2', + 11: 'middle_finger3', + 12: 'middle_finger4', + 13: 'ring_finger1', + 14: 'ring_finger2', + 15: 'ring_finger3', + 16: 'ring_finger4', + 17: 'pinky_finger1', + 18: 'pinky_finger2', + 19: 'pinky_finger3', + 20: 'pinky_finger4' + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict( + from_file='configs/_base_/datasets/coco_wholebody_hand.py') + + def _load_annotations(self) -> Tuple[List[dict], List[dict]]: + """Load data from annotations in COCO format.""" + + assert exists(self.ann_file), 'Annotation file does not exist' + + with get_local_path(self.ann_file) as local_path: + self.coco = COCO(local_path) + instance_list = [] + image_list = [] + id = 0 + + for img_id in self.coco.getImgIds(): + img = self.coco.loadImgs(img_id)[0] + + img.update({ + 'img_id': + img_id, + 'img_path': + osp.join(self.data_prefix['img'], img['file_name']), + }) + image_list.append(img) + + ann_ids = self.coco.getAnnIds(imgIds=img_id, iscrowd=False) + anns = self.coco.loadAnns(ann_ids) + for ann in anns: + for type in ['left', 'right']: + # filter invalid hand annotations, there might be two + # valid instances (left and right hand) in one image + if ann[f'{type}hand_valid'] and max( + ann[f'{type}hand_kpts']) > 0: + + bbox_xywh = np.array( + ann[f'{type}hand_box'], + dtype=np.float32).reshape(1, 4) + + bbox = bbox_xywh2xyxy(bbox_xywh) + + _keypoints = np.array( + ann[f'{type}hand_kpts'], + dtype=np.float32).reshape(1, -1, 3) + keypoints = _keypoints[..., :2] + keypoints_visible = np.minimum(1, _keypoints[..., 2]) + + num_keypoints = np.count_nonzero(keypoints.max(axis=2)) + + instance_info = { + 'img_id': ann['image_id'], + 'img_path': img['img_path'], + 'bbox': bbox, + 'bbox_score': np.ones(1, dtype=np.float32), + 'num_keypoints': num_keypoints, + 'keypoints': keypoints, + 'keypoints_visible': keypoints_visible, + 'iscrowd': ann['iscrowd'], + 'segmentation': ann['segmentation'], + 'id': id, + } + instance_list.append(instance_info) + id = id + 1 + + instance_list = sorted(instance_list, key=lambda x: x['id']) + return instance_list, image_list diff --git a/mmpose/datasets/datasets/hand/freihand_dataset.py b/mmpose/datasets/datasets/hand/freihand_dataset.py index 8f0e23cdd5..b530779606 100644 --- a/mmpose/datasets/datasets/hand/freihand_dataset.py +++ b/mmpose/datasets/datasets/hand/freihand_dataset.py @@ -1,128 +1,128 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os.path as osp -from typing import Optional - -import numpy as np - -from mmpose.registry import DATASETS -from ..base import BaseCocoStyleDataset - - -@DATASETS.register_module() -class FreiHandDataset(BaseCocoStyleDataset): - """FreiHand dataset for hand pose estimation. - - "FreiHAND: A Dataset for Markerless Capture of Hand Pose - and Shape from Single RGB Images", ICCV'2019. - More details can be found in the `paper - `__ . - - FreiHand keypoints:: - - 0: 'wrist', - 1: 'thumb1', - 2: 'thumb2', - 3: 'thumb3', - 4: 'thumb4', - 5: 'forefinger1', - 6: 'forefinger2', - 7: 'forefinger3', - 8: 'forefinger4', - 9: 'middle_finger1', - 10: 'middle_finger2', - 11: 'middle_finger3', - 12: 'middle_finger4', - 13: 'ring_finger1', - 14: 'ring_finger2', - 15: 'ring_finger3', - 16: 'ring_finger4', - 17: 'pinky_finger1', - 18: 'pinky_finger2', - 19: 'pinky_finger3', - 20: 'pinky_finger4' - - Args: - ann_file (str): Annotation file path. Default: ''. - bbox_file (str, optional): Detection result file path. If - ``bbox_file`` is set, detected bboxes loaded from this file will - be used instead of ground-truth bboxes. This setting is only for - evaluation, i.e., ignored when ``test_mode`` is ``False``. - Default: ``None``. - data_mode (str): Specifies the mode of data samples: ``'topdown'`` or - ``'bottomup'``. In ``'topdown'`` mode, each data sample contains - one instance; while in ``'bottomup'`` mode, each data sample - contains all instances in a image. Default: ``'topdown'`` - metainfo (dict, optional): Meta information for dataset, such as class - information. Default: ``None``. - data_root (str, optional): The root directory for ``data_prefix`` and - ``ann_file``. Default: ``None``. - data_prefix (dict, optional): Prefix for training data. Default: - ``dict(img=None, ann=None)``. - filter_cfg (dict, optional): Config for filter data. Default: `None`. - indices (int or Sequence[int], optional): Support using first few - data in annotation file to facilitate training/testing on a smaller - dataset. Default: ``None`` which means using all ``data_infos``. - serialize_data (bool, optional): Whether to hold memory using - serialized objects, when enabled, data loader workers can use - shared RAM from master process instead of making a copy. - Default: ``True``. - pipeline (list, optional): Processing pipeline. Default: []. - test_mode (bool, optional): ``test_mode=True`` means in test phase. - Default: ``False``. - lazy_init (bool, optional): Whether to load annotation during - instantiation. In some cases, such as visualization, only the meta - information of the dataset is needed, which is not necessary to - load annotation file. ``Basedataset`` can skip load annotations to - save time by set ``lazy_init=False``. Default: ``False``. - max_refetch (int, optional): If ``Basedataset.prepare_data`` get a - None img. The maximum extra number of cycles to get a valid - image. Default: 1000. - """ - - METAINFO: dict = dict(from_file='configs/_base_/datasets/freihand2d.py') - - def parse_data_info(self, raw_data_info: dict) -> Optional[dict]: - """Parse raw COCO annotation of an instance. - - Args: - raw_data_info (dict): Raw data information loaded from - ``ann_file``. It should have following contents: - - - ``'raw_ann_info'``: Raw annotation of an instance - - ``'raw_img_info'``: Raw information of the image that - contains the instance - - Returns: - dict: Parsed instance annotation - """ - - ann = raw_data_info['raw_ann_info'] - img = raw_data_info['raw_img_info'] - - img_path = osp.join(self.data_prefix['img'], img['file_name']) - - # use the entire image which is 224x224 - bbox = np.array([0, 0, 224, 224], dtype=np.float32).reshape(1, 4) - - # keypoints in shape [1, K, 2] and keypoints_visible in [1, K] - _keypoints = np.array( - ann['keypoints'], dtype=np.float32).reshape(1, -1, 3) - keypoints = _keypoints[..., :2] - keypoints_visible = np.minimum(1, _keypoints[..., 2]) - - num_keypoints = np.count_nonzero(keypoints.max(axis=2)) - - data_info = { - 'img_id': ann['image_id'], - 'img_path': img_path, - 'bbox': bbox, - 'bbox_score': np.ones(1, dtype=np.float32), - 'num_keypoints': num_keypoints, - 'keypoints': keypoints, - 'keypoints_visible': keypoints_visible, - 'iscrowd': ann['iscrowd'], - 'segmentation': ann['segmentation'], - 'id': ann['id'], - } - - return data_info +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +from typing import Optional + +import numpy as np + +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class FreiHandDataset(BaseCocoStyleDataset): + """FreiHand dataset for hand pose estimation. + + "FreiHAND: A Dataset for Markerless Capture of Hand Pose + and Shape from Single RGB Images", ICCV'2019. + More details can be found in the `paper + `__ . + + FreiHand keypoints:: + + 0: 'wrist', + 1: 'thumb1', + 2: 'thumb2', + 3: 'thumb3', + 4: 'thumb4', + 5: 'forefinger1', + 6: 'forefinger2', + 7: 'forefinger3', + 8: 'forefinger4', + 9: 'middle_finger1', + 10: 'middle_finger2', + 11: 'middle_finger3', + 12: 'middle_finger4', + 13: 'ring_finger1', + 14: 'ring_finger2', + 15: 'ring_finger3', + 16: 'ring_finger4', + 17: 'pinky_finger1', + 18: 'pinky_finger2', + 19: 'pinky_finger3', + 20: 'pinky_finger4' + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/freihand2d.py') + + def parse_data_info(self, raw_data_info: dict) -> Optional[dict]: + """Parse raw COCO annotation of an instance. + + Args: + raw_data_info (dict): Raw data information loaded from + ``ann_file``. It should have following contents: + + - ``'raw_ann_info'``: Raw annotation of an instance + - ``'raw_img_info'``: Raw information of the image that + contains the instance + + Returns: + dict: Parsed instance annotation + """ + + ann = raw_data_info['raw_ann_info'] + img = raw_data_info['raw_img_info'] + + img_path = osp.join(self.data_prefix['img'], img['file_name']) + + # use the entire image which is 224x224 + bbox = np.array([0, 0, 224, 224], dtype=np.float32).reshape(1, 4) + + # keypoints in shape [1, K, 2] and keypoints_visible in [1, K] + _keypoints = np.array( + ann['keypoints'], dtype=np.float32).reshape(1, -1, 3) + keypoints = _keypoints[..., :2] + keypoints_visible = np.minimum(1, _keypoints[..., 2]) + + num_keypoints = np.count_nonzero(keypoints.max(axis=2)) + + data_info = { + 'img_id': ann['image_id'], + 'img_path': img_path, + 'bbox': bbox, + 'bbox_score': np.ones(1, dtype=np.float32), + 'num_keypoints': num_keypoints, + 'keypoints': keypoints, + 'keypoints_visible': keypoints_visible, + 'iscrowd': ann['iscrowd'], + 'segmentation': ann['segmentation'], + 'id': ann['id'], + } + + return data_info diff --git a/mmpose/datasets/datasets/hand/onehand10k_dataset.py b/mmpose/datasets/datasets/hand/onehand10k_dataset.py index 3519ace560..55cff8f5a5 100644 --- a/mmpose/datasets/datasets/hand/onehand10k_dataset.py +++ b/mmpose/datasets/datasets/hand/onehand10k_dataset.py @@ -1,77 +1,77 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from mmpose.registry import DATASETS -from ..base import BaseCocoStyleDataset - - -@DATASETS.register_module() -class OneHand10KDataset(BaseCocoStyleDataset): - """OneHand10K dataset for hand pose estimation. - - "Mask-pose Cascaded CNN for 2D Hand Pose Estimation from - Single Color Images", TCSVT'2019. - More details can be found in the `paper - `__ . - - OneHand10K keypoints:: - - 0: 'wrist', - 1: 'thumb1', - 2: 'thumb2', - 3: 'thumb3', - 4: 'thumb4', - 5: 'forefinger1', - 6: 'forefinger2', - 7: 'forefinger3', - 8: 'forefinger4', - 9: 'middle_finger1', - 10: 'middle_finger2', - 11: 'middle_finger3', - 12: 'middle_finger4', - 13: 'ring_finger1', - 14: 'ring_finger2', - 15: 'ring_finger3', - 16: 'ring_finger4', - 17: 'pinky_finger1', - 18: 'pinky_finger2', - 19: 'pinky_finger3', - 20: 'pinky_finger4' - - Args: - ann_file (str): Annotation file path. Default: ''. - bbox_file (str, optional): Detection result file path. If - ``bbox_file`` is set, detected bboxes loaded from this file will - be used instead of ground-truth bboxes. This setting is only for - evaluation, i.e., ignored when ``test_mode`` is ``False``. - Default: ``None``. - data_mode (str): Specifies the mode of data samples: ``'topdown'`` or - ``'bottomup'``. In ``'topdown'`` mode, each data sample contains - one instance; while in ``'bottomup'`` mode, each data sample - contains all instances in a image. Default: ``'topdown'`` - metainfo (dict, optional): Meta information for dataset, such as class - information. Default: ``None``. - data_root (str, optional): The root directory for ``data_prefix`` and - ``ann_file``. Default: ``None``. - data_prefix (dict, optional): Prefix for training data. Default: - ``dict(img=None, ann=None)``. - filter_cfg (dict, optional): Config for filter data. Default: `None`. - indices (int or Sequence[int], optional): Support using first few - data in annotation file to facilitate training/testing on a smaller - dataset. Default: ``None`` which means using all ``data_infos``. - serialize_data (bool, optional): Whether to hold memory using - serialized objects, when enabled, data loader workers can use - shared RAM from master process instead of making a copy. - Default: ``True``. - pipeline (list, optional): Processing pipeline. Default: []. - test_mode (bool, optional): ``test_mode=True`` means in test phase. - Default: ``False``. - lazy_init (bool, optional): Whether to load annotation during - instantiation. In some cases, such as visualization, only the meta - information of the dataset is needed, which is not necessary to - load annotation file. ``Basedataset`` can skip load annotations to - save time by set ``lazy_init=False``. Default: ``False``. - max_refetch (int, optional): If ``Basedataset.prepare_data`` get a - None img. The maximum extra number of cycles to get a valid - image. Default: 1000. - """ - - METAINFO: dict = dict(from_file='configs/_base_/datasets/onehand10k.py') +# Copyright (c) OpenMMLab. All rights reserved. +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class OneHand10KDataset(BaseCocoStyleDataset): + """OneHand10K dataset for hand pose estimation. + + "Mask-pose Cascaded CNN for 2D Hand Pose Estimation from + Single Color Images", TCSVT'2019. + More details can be found in the `paper + `__ . + + OneHand10K keypoints:: + + 0: 'wrist', + 1: 'thumb1', + 2: 'thumb2', + 3: 'thumb3', + 4: 'thumb4', + 5: 'forefinger1', + 6: 'forefinger2', + 7: 'forefinger3', + 8: 'forefinger4', + 9: 'middle_finger1', + 10: 'middle_finger2', + 11: 'middle_finger3', + 12: 'middle_finger4', + 13: 'ring_finger1', + 14: 'ring_finger2', + 15: 'ring_finger3', + 16: 'ring_finger4', + 17: 'pinky_finger1', + 18: 'pinky_finger2', + 19: 'pinky_finger3', + 20: 'pinky_finger4' + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/onehand10k.py') diff --git a/mmpose/datasets/datasets/hand/panoptic_hand2d_dataset.py b/mmpose/datasets/datasets/hand/panoptic_hand2d_dataset.py index 26d364840e..a3b03db9fe 100644 --- a/mmpose/datasets/datasets/hand/panoptic_hand2d_dataset.py +++ b/mmpose/datasets/datasets/hand/panoptic_hand2d_dataset.py @@ -1,137 +1,137 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os.path as osp -from typing import Optional - -import numpy as np - -from mmpose.registry import DATASETS -from ..base import BaseCocoStyleDataset - - -@DATASETS.register_module() -class PanopticHand2DDataset(BaseCocoStyleDataset): - """Panoptic 2D dataset for hand pose estimation. - - "Hand Keypoint Detection in Single Images using Multiview - Bootstrapping", CVPR'2017. - More details can be found in the `paper - `__ . - - Panoptic keypoints:: - - 0: 'wrist', - 1: 'thumb1', - 2: 'thumb2', - 3: 'thumb3', - 4: 'thumb4', - 5: 'forefinger1', - 6: 'forefinger2', - 7: 'forefinger3', - 8: 'forefinger4', - 9: 'middle_finger1', - 10: 'middle_finger2', - 11: 'middle_finger3', - 12: 'middle_finger4', - 13: 'ring_finger1', - 14: 'ring_finger2', - 15: 'ring_finger3', - 16: 'ring_finger4', - 17: 'pinky_finger1', - 18: 'pinky_finger2', - 19: 'pinky_finger3', - 20: 'pinky_finger4' - - Args: - ann_file (str): Annotation file path. Default: ''. - bbox_file (str, optional): Detection result file path. If - ``bbox_file`` is set, detected bboxes loaded from this file will - be used instead of ground-truth bboxes. This setting is only for - evaluation, i.e., ignored when ``test_mode`` is ``False``. - Default: ``None``. - data_mode (str): Specifies the mode of data samples: ``'topdown'`` or - ``'bottomup'``. In ``'topdown'`` mode, each data sample contains - one instance; while in ``'bottomup'`` mode, each data sample - contains all instances in a image. Default: ``'topdown'`` - metainfo (dict, optional): Meta information for dataset, such as class - information. Default: ``None``. - data_root (str, optional): The root directory for ``data_prefix`` and - ``ann_file``. Default: ``None``. - data_prefix (dict, optional): Prefix for training data. Default: - ``dict(img=None, ann=None)``. - filter_cfg (dict, optional): Config for filter data. Default: `None`. - indices (int or Sequence[int], optional): Support using first few - data in annotation file to facilitate training/testing on a smaller - dataset. Default: ``None`` which means using all ``data_infos``. - serialize_data (bool, optional): Whether to hold memory using - serialized objects, when enabled, data loader workers can use - shared RAM from master process instead of making a copy. - Default: ``True``. - pipeline (list, optional): Processing pipeline. Default: []. - test_mode (bool, optional): ``test_mode=True`` means in test phase. - Default: ``False``. - lazy_init (bool, optional): Whether to load annotation during - instantiation. In some cases, such as visualization, only the meta - information of the dataset is needed, which is not necessary to - load annotation file. ``Basedataset`` can skip load annotations to - save time by set ``lazy_init=False``. Default: ``False``. - max_refetch (int, optional): If ``Basedataset.prepare_data`` get a - None img. The maximum extra number of cycles to get a valid - image. Default: 1000. - """ - - METAINFO: dict = dict( - from_file='configs/_base_/datasets/panoptic_hand2d.py') - - def parse_data_info(self, raw_data_info: dict) -> Optional[dict]: - """Parse raw COCO annotation of an instance. - - Args: - raw_data_info (dict): Raw data information loaded from - ``ann_file``. It should have following contents: - - - ``'raw_ann_info'``: Raw annotation of an instance - - ``'raw_img_info'``: Raw information of the image that - contains the instance - - Returns: - dict: Parsed instance annotation - """ - - ann = raw_data_info['raw_ann_info'] - img = raw_data_info['raw_img_info'] - - img_path = osp.join(self.data_prefix['img'], img['file_name']) - img_w, img_h = img['width'], img['height'] - - # get bbox in shape [1, 4], formatted as xywh - x, y, w, h = ann['bbox'] - x1 = np.clip(x, 0, img_w - 1) - y1 = np.clip(y, 0, img_h - 1) - x2 = np.clip(x + w, 0, img_w - 1) - y2 = np.clip(y + h, 0, img_h - 1) - - bbox = np.array([x1, y1, x2, y2], dtype=np.float32).reshape(1, 4) - - # keypoints in shape [1, K, 2] and keypoints_visible in [1, K] - _keypoints = np.array( - ann['keypoints'], dtype=np.float32).reshape(1, -1, 3) - keypoints = _keypoints[..., :2] - keypoints_visible = np.minimum(1, _keypoints[..., 2]) - - num_keypoints = np.count_nonzero(keypoints.max(axis=2)) - - data_info = { - 'img_id': ann['image_id'], - 'img_path': img_path, - 'bbox': bbox, - 'bbox_score': np.ones(1, dtype=np.float32), - 'num_keypoints': num_keypoints, - 'keypoints': keypoints, - 'keypoints_visible': keypoints_visible, - 'iscrowd': ann['iscrowd'], - 'segmentation': ann['segmentation'], - 'head_size': ann['head_size'], - 'id': ann['id'], - } - - return data_info +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +from typing import Optional + +import numpy as np + +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class PanopticHand2DDataset(BaseCocoStyleDataset): + """Panoptic 2D dataset for hand pose estimation. + + "Hand Keypoint Detection in Single Images using Multiview + Bootstrapping", CVPR'2017. + More details can be found in the `paper + `__ . + + Panoptic keypoints:: + + 0: 'wrist', + 1: 'thumb1', + 2: 'thumb2', + 3: 'thumb3', + 4: 'thumb4', + 5: 'forefinger1', + 6: 'forefinger2', + 7: 'forefinger3', + 8: 'forefinger4', + 9: 'middle_finger1', + 10: 'middle_finger2', + 11: 'middle_finger3', + 12: 'middle_finger4', + 13: 'ring_finger1', + 14: 'ring_finger2', + 15: 'ring_finger3', + 16: 'ring_finger4', + 17: 'pinky_finger1', + 18: 'pinky_finger2', + 19: 'pinky_finger3', + 20: 'pinky_finger4' + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict( + from_file='configs/_base_/datasets/panoptic_hand2d.py') + + def parse_data_info(self, raw_data_info: dict) -> Optional[dict]: + """Parse raw COCO annotation of an instance. + + Args: + raw_data_info (dict): Raw data information loaded from + ``ann_file``. It should have following contents: + + - ``'raw_ann_info'``: Raw annotation of an instance + - ``'raw_img_info'``: Raw information of the image that + contains the instance + + Returns: + dict: Parsed instance annotation + """ + + ann = raw_data_info['raw_ann_info'] + img = raw_data_info['raw_img_info'] + + img_path = osp.join(self.data_prefix['img'], img['file_name']) + img_w, img_h = img['width'], img['height'] + + # get bbox in shape [1, 4], formatted as xywh + x, y, w, h = ann['bbox'] + x1 = np.clip(x, 0, img_w - 1) + y1 = np.clip(y, 0, img_h - 1) + x2 = np.clip(x + w, 0, img_w - 1) + y2 = np.clip(y + h, 0, img_h - 1) + + bbox = np.array([x1, y1, x2, y2], dtype=np.float32).reshape(1, 4) + + # keypoints in shape [1, K, 2] and keypoints_visible in [1, K] + _keypoints = np.array( + ann['keypoints'], dtype=np.float32).reshape(1, -1, 3) + keypoints = _keypoints[..., :2] + keypoints_visible = np.minimum(1, _keypoints[..., 2]) + + num_keypoints = np.count_nonzero(keypoints.max(axis=2)) + + data_info = { + 'img_id': ann['image_id'], + 'img_path': img_path, + 'bbox': bbox, + 'bbox_score': np.ones(1, dtype=np.float32), + 'num_keypoints': num_keypoints, + 'keypoints': keypoints, + 'keypoints_visible': keypoints_visible, + 'iscrowd': ann['iscrowd'], + 'segmentation': ann['segmentation'], + 'head_size': ann['head_size'], + 'id': ann['id'], + } + + return data_info diff --git a/mmpose/datasets/datasets/hand/rhd2d_dataset.py b/mmpose/datasets/datasets/hand/rhd2d_dataset.py index ebc4301590..fff6c87c93 100644 --- a/mmpose/datasets/datasets/hand/rhd2d_dataset.py +++ b/mmpose/datasets/datasets/hand/rhd2d_dataset.py @@ -1,77 +1,77 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from mmpose.registry import DATASETS -from ..base import BaseCocoStyleDataset - - -@DATASETS.register_module() -class Rhd2DDataset(BaseCocoStyleDataset): - """Rendered Handpose Dataset for hand pose estimation. - - "Learning to Estimate 3D Hand Pose from Single RGB Images", - ICCV'2017. - More details can be found in the `paper - `__ . - - Rhd keypoints:: - - 0: 'wrist', - 1: 'thumb4', - 2: 'thumb3', - 3: 'thumb2', - 4: 'thumb1', - 5: 'forefinger4', - 6: 'forefinger3', - 7: 'forefinger2', - 8: 'forefinger1', - 9: 'middle_finger4', - 10: 'middle_finger3', - 11: 'middle_finger2', - 12: 'middle_finger1', - 13: 'ring_finger4', - 14: 'ring_finger3', - 15: 'ring_finger2', - 16: 'ring_finger1', - 17: 'pinky_finger4', - 18: 'pinky_finger3', - 19: 'pinky_finger2', - 20: 'pinky_finger1' - - Args: - ann_file (str): Annotation file path. Default: ''. - bbox_file (str, optional): Detection result file path. If - ``bbox_file`` is set, detected bboxes loaded from this file will - be used instead of ground-truth bboxes. This setting is only for - evaluation, i.e., ignored when ``test_mode`` is ``False``. - Default: ``None``. - data_mode (str): Specifies the mode of data samples: ``'topdown'`` or - ``'bottomup'``. In ``'topdown'`` mode, each data sample contains - one instance; while in ``'bottomup'`` mode, each data sample - contains all instances in a image. Default: ``'topdown'`` - metainfo (dict, optional): Meta information for dataset, such as class - information. Default: ``None``. - data_root (str, optional): The root directory for ``data_prefix`` and - ``ann_file``. Default: ``None``. - data_prefix (dict, optional): Prefix for training data. Default: - ``dict(img=None, ann=None)``. - filter_cfg (dict, optional): Config for filter data. Default: `None`. - indices (int or Sequence[int], optional): Support using first few - data in annotation file to facilitate training/testing on a smaller - dataset. Default: ``None`` which means using all ``data_infos``. - serialize_data (bool, optional): Whether to hold memory using - serialized objects, when enabled, data loader workers can use - shared RAM from master process instead of making a copy. - Default: ``True``. - pipeline (list, optional): Processing pipeline. Default: []. - test_mode (bool, optional): ``test_mode=True`` means in test phase. - Default: ``False``. - lazy_init (bool, optional): Whether to load annotation during - instantiation. In some cases, such as visualization, only the meta - information of the dataset is needed, which is not necessary to - load annotation file. ``Basedataset`` can skip load annotations to - save time by set ``lazy_init=False``. Default: ``False``. - max_refetch (int, optional): If ``Basedataset.prepare_data`` get a - None img. The maximum extra number of cycles to get a valid - image. Default: 1000. - """ - - METAINFO: dict = dict(from_file='configs/_base_/datasets/rhd2d.py') +# Copyright (c) OpenMMLab. All rights reserved. +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class Rhd2DDataset(BaseCocoStyleDataset): + """Rendered Handpose Dataset for hand pose estimation. + + "Learning to Estimate 3D Hand Pose from Single RGB Images", + ICCV'2017. + More details can be found in the `paper + `__ . + + Rhd keypoints:: + + 0: 'wrist', + 1: 'thumb4', + 2: 'thumb3', + 3: 'thumb2', + 4: 'thumb1', + 5: 'forefinger4', + 6: 'forefinger3', + 7: 'forefinger2', + 8: 'forefinger1', + 9: 'middle_finger4', + 10: 'middle_finger3', + 11: 'middle_finger2', + 12: 'middle_finger1', + 13: 'ring_finger4', + 14: 'ring_finger3', + 15: 'ring_finger2', + 16: 'ring_finger1', + 17: 'pinky_finger4', + 18: 'pinky_finger3', + 19: 'pinky_finger2', + 20: 'pinky_finger1' + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/rhd2d.py') diff --git a/mmpose/datasets/datasets/oct/__init__.py b/mmpose/datasets/datasets/oct/__init__.py index 304c80e849..310c89b232 100644 --- a/mmpose/datasets/datasets/oct/__init__.py +++ b/mmpose/datasets/datasets/oct/__init__.py @@ -1,7 +1,7 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .octseg import OCTSegDataset - - -__all__ = [ - 'OCTSegDataset' -] +# Copyright (c) OpenMMLab. All rights reserved. +from .octseg import OCTSegDataset + + +__all__ = [ + 'OCTSegDataset' +] diff --git a/mmpose/datasets/datasets/oct/octseg.py b/mmpose/datasets/datasets/oct/octseg.py index b63da450fe..5da19608bf 100644 --- a/mmpose/datasets/datasets/oct/octseg.py +++ b/mmpose/datasets/datasets/oct/octseg.py @@ -1,9 +1,9 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from mmpose.registry import DATASETS -from ..base import BaseCocoStyleDataset - - -@DATASETS.register_module(name='OCTSegDataset') -class OCTSegDataset(BaseCocoStyleDataset): - +# Copyright (c) OpenMMLab. All rights reserved. +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module(name='OCTSegDataset') +class OCTSegDataset(BaseCocoStyleDataset): + METAINFO: dict = dict(from_file='configs/_base_/datasets/octseg.py') \ No newline at end of file diff --git a/mmpose/datasets/datasets/utils.py b/mmpose/datasets/datasets/utils.py index 7433a168b9..da25fe6ae9 100644 --- a/mmpose/datasets/datasets/utils.py +++ b/mmpose/datasets/datasets/utils.py @@ -1,202 +1,202 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os.path as osp -import warnings - -import numpy as np -from mmengine import Config - - -def parse_pose_metainfo(metainfo: dict): - """Load meta information of pose dataset and check its integrity. - - Args: - metainfo (dict): Raw data of pose meta information, which should - contain following contents: - - - "dataset_name" (str): The name of the dataset - - "keypoint_info" (dict): The keypoint-related meta information, - e.g., name, upper/lower body, and symmetry - - "skeleton_info" (dict): The skeleton-related meta information, - e.g., start/end keypoint of limbs - - "joint_weights" (list[float]): The loss weights of keypoints - - "sigmas" (list[float]): The keypoint distribution parameters - to calculate OKS score. See `COCO keypoint evaluation - `__. - - An example of metainfo is shown as follows. - - .. code-block:: none - { - "dataset_name": "coco", - "keypoint_info": - { - 0: - { - "name": "nose", - "type": "upper", - "swap": "", - "color": [51, 153, 255], - }, - 1: - { - "name": "right_eye", - "type": "upper", - "swap": "left_eye", - "color": [51, 153, 255], - }, - ... - }, - "skeleton_info": - { - 0: - { - "link": ("left_ankle", "left_knee"), - "color": [0, 255, 0], - }, - ... - }, - "joint_weights": [1., 1., ...], - "sigmas": [0.026, 0.025, ...], - } - - - A special case is that `metainfo` can have the key "from_file", - which should be the path of a config file. In this case, the - actual metainfo will be loaded by: - - .. code-block:: python - metainfo = mmengine.Config.fromfile(metainfo['from_file']) - - Returns: - Dict: pose meta information that contains following contents: - - - "dataset_name" (str): Same as ``"dataset_name"`` in the input - - "num_keypoints" (int): Number of keypoints - - "keypoint_id2name" (dict): Mapping from keypoint id to name - - "keypoint_name2id" (dict): Mapping from keypoint name to id - - "upper_body_ids" (list): Ids of upper-body keypoint - - "lower_body_ids" (list): Ids of lower-body keypoint - - "flip_indices" (list): The Id of each keypoint's symmetric keypoint - - "flip_pairs" (list): The Ids of symmetric keypoint pairs - - "keypoint_colors" (numpy.ndarray): The keypoint color matrix of - shape [K, 3], where each row is the color of one keypint in bgr - - "num_skeleton_links" (int): The number of links - - "skeleton_links" (list): The links represented by Id pairs of start - and end points - - "skeleton_link_colors" (numpy.ndarray): The link color matrix - - "dataset_keypoint_weights" (numpy.ndarray): Same as the - ``"joint_weights"`` in the input - - "sigmas" (numpy.ndarray): Same as the ``"sigmas"`` in the input - """ - - if 'from_file' in metainfo: - cfg_file = metainfo['from_file'] - if not osp.isfile(cfg_file): - # Search configs in 'mmpose/.mim/configs/' in case that mmpose - # is installed in non-editable mode. - import mmpose - mmpose_path = osp.dirname(mmpose.__file__) - _cfg_file = osp.join(mmpose_path, '.mim', 'configs', '_base_', - 'datasets', osp.basename(cfg_file)) - if osp.isfile(_cfg_file): - warnings.warn( - f'The metainfo config file "{cfg_file}" does not exist. ' - f'A matched config file "{_cfg_file}" will be used ' - 'instead.') - cfg_file = _cfg_file - else: - raise FileNotFoundError( - f'The metainfo config file "{cfg_file}" does not exist.') - - # TODO: remove the nested structure of dataset_info - # metainfo = Config.fromfile(metainfo['from_file']) - metainfo = Config.fromfile(cfg_file).dataset_info - - # check data integrity - assert 'dataset_name' in metainfo - assert 'keypoint_info' in metainfo - assert 'skeleton_info' in metainfo - assert 'joint_weights' in metainfo - assert 'sigmas' in metainfo - - # parse metainfo - parsed = dict( - dataset_name=None, - num_keypoints=None, - keypoint_id2name={}, - keypoint_name2id={}, - upper_body_ids=[], - lower_body_ids=[], - flip_indices=[], - flip_pairs=[], - keypoint_colors=[], - num_skeleton_links=None, - skeleton_links=[], - skeleton_link_colors=[], - dataset_keypoint_weights=None, - sigmas=None, - ) - - parsed['dataset_name'] = metainfo['dataset_name'] - - # parse keypoint information - parsed['num_keypoints'] = len(metainfo['keypoint_info']) - - for kpt_id, kpt in metainfo['keypoint_info'].items(): - kpt_name = kpt['name'] - parsed['keypoint_id2name'][kpt_id] = kpt_name - parsed['keypoint_name2id'][kpt_name] = kpt_id - parsed['keypoint_colors'].append(kpt.get('color', [255, 128, 0])) - - kpt_type = kpt.get('type', '') - if kpt_type == 'upper': - parsed['upper_body_ids'].append(kpt_id) - elif kpt_type == 'lower': - parsed['lower_body_ids'].append(kpt_id) - - swap_kpt = kpt.get('swap', '') - if swap_kpt == kpt_name or swap_kpt == '': - parsed['flip_indices'].append(kpt_name) - else: - parsed['flip_indices'].append(swap_kpt) - pair = (swap_kpt, kpt_name) - if pair not in parsed['flip_pairs']: - parsed['flip_pairs'].append(pair) - - # parse skeleton information - parsed['num_skeleton_links'] = len(metainfo['skeleton_info']) - for _, sk in metainfo['skeleton_info'].items(): - parsed['skeleton_links'].append(sk['link']) - parsed['skeleton_link_colors'].append(sk.get('color', [96, 96, 255])) - - # parse extra information - parsed['dataset_keypoint_weights'] = np.array( - metainfo['joint_weights'], dtype=np.float32) - parsed['sigmas'] = np.array(metainfo['sigmas'], dtype=np.float32) - - if 'stats_info' in metainfo: - parsed['stats_info'] = {} - for name, val in metainfo['stats_info'].items(): - parsed['stats_info'][name] = np.array(val, dtype=np.float32) - - # formatting - def _map(src, mapping: dict): - if isinstance(src, (list, tuple)): - cls = type(src) - return cls(_map(s, mapping) for s in src) - else: - return mapping[src] - - parsed['flip_pairs'] = _map( - parsed['flip_pairs'], mapping=parsed['keypoint_name2id']) - parsed['flip_indices'] = _map( - parsed['flip_indices'], mapping=parsed['keypoint_name2id']) - parsed['skeleton_links'] = _map( - parsed['skeleton_links'], mapping=parsed['keypoint_name2id']) - - parsed['keypoint_colors'] = np.array( - parsed['keypoint_colors'], dtype=np.uint8) - parsed['skeleton_link_colors'] = np.array( - parsed['skeleton_link_colors'], dtype=np.uint8) - - return parsed +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +import warnings + +import numpy as np +from mmengine import Config + + +def parse_pose_metainfo(metainfo: dict): + """Load meta information of pose dataset and check its integrity. + + Args: + metainfo (dict): Raw data of pose meta information, which should + contain following contents: + + - "dataset_name" (str): The name of the dataset + - "keypoint_info" (dict): The keypoint-related meta information, + e.g., name, upper/lower body, and symmetry + - "skeleton_info" (dict): The skeleton-related meta information, + e.g., start/end keypoint of limbs + - "joint_weights" (list[float]): The loss weights of keypoints + - "sigmas" (list[float]): The keypoint distribution parameters + to calculate OKS score. See `COCO keypoint evaluation + `__. + + An example of metainfo is shown as follows. + + .. code-block:: none + { + "dataset_name": "coco", + "keypoint_info": + { + 0: + { + "name": "nose", + "type": "upper", + "swap": "", + "color": [51, 153, 255], + }, + 1: + { + "name": "right_eye", + "type": "upper", + "swap": "left_eye", + "color": [51, 153, 255], + }, + ... + }, + "skeleton_info": + { + 0: + { + "link": ("left_ankle", "left_knee"), + "color": [0, 255, 0], + }, + ... + }, + "joint_weights": [1., 1., ...], + "sigmas": [0.026, 0.025, ...], + } + + + A special case is that `metainfo` can have the key "from_file", + which should be the path of a config file. In this case, the + actual metainfo will be loaded by: + + .. code-block:: python + metainfo = mmengine.Config.fromfile(metainfo['from_file']) + + Returns: + Dict: pose meta information that contains following contents: + + - "dataset_name" (str): Same as ``"dataset_name"`` in the input + - "num_keypoints" (int): Number of keypoints + - "keypoint_id2name" (dict): Mapping from keypoint id to name + - "keypoint_name2id" (dict): Mapping from keypoint name to id + - "upper_body_ids" (list): Ids of upper-body keypoint + - "lower_body_ids" (list): Ids of lower-body keypoint + - "flip_indices" (list): The Id of each keypoint's symmetric keypoint + - "flip_pairs" (list): The Ids of symmetric keypoint pairs + - "keypoint_colors" (numpy.ndarray): The keypoint color matrix of + shape [K, 3], where each row is the color of one keypint in bgr + - "num_skeleton_links" (int): The number of links + - "skeleton_links" (list): The links represented by Id pairs of start + and end points + - "skeleton_link_colors" (numpy.ndarray): The link color matrix + - "dataset_keypoint_weights" (numpy.ndarray): Same as the + ``"joint_weights"`` in the input + - "sigmas" (numpy.ndarray): Same as the ``"sigmas"`` in the input + """ + + if 'from_file' in metainfo: + cfg_file = metainfo['from_file'] + if not osp.isfile(cfg_file): + # Search configs in 'mmpose/.mim/configs/' in case that mmpose + # is installed in non-editable mode. + import mmpose + mmpose_path = osp.dirname(mmpose.__file__) + _cfg_file = osp.join(mmpose_path, '.mim', 'configs', '_base_', + 'datasets', osp.basename(cfg_file)) + if osp.isfile(_cfg_file): + warnings.warn( + f'The metainfo config file "{cfg_file}" does not exist. ' + f'A matched config file "{_cfg_file}" will be used ' + 'instead.') + cfg_file = _cfg_file + else: + raise FileNotFoundError( + f'The metainfo config file "{cfg_file}" does not exist.') + + # TODO: remove the nested structure of dataset_info + # metainfo = Config.fromfile(metainfo['from_file']) + metainfo = Config.fromfile(cfg_file).dataset_info + + # check data integrity + assert 'dataset_name' in metainfo + assert 'keypoint_info' in metainfo + assert 'skeleton_info' in metainfo + assert 'joint_weights' in metainfo + assert 'sigmas' in metainfo + + # parse metainfo + parsed = dict( + dataset_name=None, + num_keypoints=None, + keypoint_id2name={}, + keypoint_name2id={}, + upper_body_ids=[], + lower_body_ids=[], + flip_indices=[], + flip_pairs=[], + keypoint_colors=[], + num_skeleton_links=None, + skeleton_links=[], + skeleton_link_colors=[], + dataset_keypoint_weights=None, + sigmas=None, + ) + + parsed['dataset_name'] = metainfo['dataset_name'] + + # parse keypoint information + parsed['num_keypoints'] = len(metainfo['keypoint_info']) + + for kpt_id, kpt in metainfo['keypoint_info'].items(): + kpt_name = kpt['name'] + parsed['keypoint_id2name'][kpt_id] = kpt_name + parsed['keypoint_name2id'][kpt_name] = kpt_id + parsed['keypoint_colors'].append(kpt.get('color', [255, 128, 0])) + + kpt_type = kpt.get('type', '') + if kpt_type == 'upper': + parsed['upper_body_ids'].append(kpt_id) + elif kpt_type == 'lower': + parsed['lower_body_ids'].append(kpt_id) + + swap_kpt = kpt.get('swap', '') + if swap_kpt == kpt_name or swap_kpt == '': + parsed['flip_indices'].append(kpt_name) + else: + parsed['flip_indices'].append(swap_kpt) + pair = (swap_kpt, kpt_name) + if pair not in parsed['flip_pairs']: + parsed['flip_pairs'].append(pair) + + # parse skeleton information + parsed['num_skeleton_links'] = len(metainfo['skeleton_info']) + for _, sk in metainfo['skeleton_info'].items(): + parsed['skeleton_links'].append(sk['link']) + parsed['skeleton_link_colors'].append(sk.get('color', [96, 96, 255])) + + # parse extra information + parsed['dataset_keypoint_weights'] = np.array( + metainfo['joint_weights'], dtype=np.float32) + parsed['sigmas'] = np.array(metainfo['sigmas'], dtype=np.float32) + + if 'stats_info' in metainfo: + parsed['stats_info'] = {} + for name, val in metainfo['stats_info'].items(): + parsed['stats_info'][name] = np.array(val, dtype=np.float32) + + # formatting + def _map(src, mapping: dict): + if isinstance(src, (list, tuple)): + cls = type(src) + return cls(_map(s, mapping) for s in src) + else: + return mapping[src] + + parsed['flip_pairs'] = _map( + parsed['flip_pairs'], mapping=parsed['keypoint_name2id']) + parsed['flip_indices'] = _map( + parsed['flip_indices'], mapping=parsed['keypoint_name2id']) + parsed['skeleton_links'] = _map( + parsed['skeleton_links'], mapping=parsed['keypoint_name2id']) + + parsed['keypoint_colors'] = np.array( + parsed['keypoint_colors'], dtype=np.uint8) + parsed['skeleton_link_colors'] = np.array( + parsed['skeleton_link_colors'], dtype=np.uint8) + + return parsed diff --git a/mmpose/datasets/datasets/wholebody/__init__.py b/mmpose/datasets/datasets/wholebody/__init__.py index 156094c2b0..dd28293d06 100644 --- a/mmpose/datasets/datasets/wholebody/__init__.py +++ b/mmpose/datasets/datasets/wholebody/__init__.py @@ -1,5 +1,5 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .coco_wholebody_dataset import CocoWholeBodyDataset -from .halpe_dataset import HalpeDataset - -__all__ = ['CocoWholeBodyDataset', 'HalpeDataset'] +# Copyright (c) OpenMMLab. All rights reserved. +from .coco_wholebody_dataset import CocoWholeBodyDataset +from .halpe_dataset import HalpeDataset + +__all__ = ['CocoWholeBodyDataset', 'HalpeDataset'] diff --git a/mmpose/datasets/datasets/wholebody/coco_wholebody_dataset.py b/mmpose/datasets/datasets/wholebody/coco_wholebody_dataset.py index 00a2ea418f..720e49a4ae 100644 --- a/mmpose/datasets/datasets/wholebody/coco_wholebody_dataset.py +++ b/mmpose/datasets/datasets/wholebody/coco_wholebody_dataset.py @@ -1,127 +1,127 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy -import os.path as osp -from typing import Optional - -import numpy as np - -from mmpose.registry import DATASETS -from ..base import BaseCocoStyleDataset - - -@DATASETS.register_module() -class CocoWholeBodyDataset(BaseCocoStyleDataset): - """CocoWholeBody dataset for pose estimation. - - "Whole-Body Human Pose Estimation in the Wild", ECCV'2020. - More details can be found in the `paper - `__ . - - COCO-WholeBody keypoints:: - - 0-16: 17 body keypoints, - 17-22: 6 foot keypoints, - 23-90: 68 face keypoints, - 91-132: 42 hand keypoints - - In total, we have 133 keypoints for wholebody pose estimation. - - Args: - ann_file (str): Annotation file path. Default: ''. - bbox_file (str, optional): Detection result file path. If - ``bbox_file`` is set, detected bboxes loaded from this file will - be used instead of ground-truth bboxes. This setting is only for - evaluation, i.e., ignored when ``test_mode`` is ``False``. - Default: ``None``. - data_mode (str): Specifies the mode of data samples: ``'topdown'`` or - ``'bottomup'``. In ``'topdown'`` mode, each data sample contains - one instance; while in ``'bottomup'`` mode, each data sample - contains all instances in a image. Default: ``'topdown'`` - metainfo (dict, optional): Meta information for dataset, such as class - information. Default: ``None``. - data_root (str, optional): The root directory for ``data_prefix`` and - ``ann_file``. Default: ``None``. - data_prefix (dict, optional): Prefix for training data. Default: - ``dict(img=None, ann=None)``. - filter_cfg (dict, optional): Config for filter data. Default: `None`. - indices (int or Sequence[int], optional): Support using first few - data in annotation file to facilitate training/testing on a smaller - dataset. Default: ``None`` which means using all ``data_infos``. - serialize_data (bool, optional): Whether to hold memory using - serialized objects, when enabled, data loader workers can use - shared RAM from master process instead of making a copy. - Default: ``True``. - pipeline (list, optional): Processing pipeline. Default: []. - test_mode (bool, optional): ``test_mode=True`` means in test phase. - Default: ``False``. - lazy_init (bool, optional): Whether to load annotation during - instantiation. In some cases, such as visualization, only the meta - information of the dataset is needed, which is not necessary to - load annotation file. ``Basedataset`` can skip load annotations to - save time by set ``lazy_init=False``. Default: ``False``. - max_refetch (int, optional): If ``Basedataset.prepare_data`` get a - None img. The maximum extra number of cycles to get a valid - image. Default: 1000. - """ - - METAINFO: dict = dict( - from_file='configs/_base_/datasets/coco_wholebody.py') - - def parse_data_info(self, raw_data_info: dict) -> Optional[dict]: - """Parse raw COCO annotation of an instance. - - Args: - raw_data_info (dict): Raw data information loaded from - ``ann_file``. It should have following contents: - - - ``'raw_ann_info'``: Raw annotation of an instance - - ``'raw_img_info'``: Raw information of the image that - contains the instance - - Returns: - dict: Parsed instance annotation - """ - - ann = raw_data_info['raw_ann_info'] - img = raw_data_info['raw_img_info'] - - img_path = osp.join(self.data_prefix['img'], img['file_name']) - img_w, img_h = img['width'], img['height'] - - # get bbox in shape [1, 4], formatted as xywh - x, y, w, h = ann['bbox'] - x1 = np.clip(x, 0, img_w - 1) - y1 = np.clip(y, 0, img_h - 1) - x2 = np.clip(x + w, 0, img_w - 1) - y2 = np.clip(y + h, 0, img_h - 1) - - bbox = np.array([x1, y1, x2, y2], dtype=np.float32).reshape(1, 4) - - # keypoints in shape [1, K, 2] and keypoints_visible in [1, K] - # COCO-Wholebody: consisting of body, foot, face and hand keypoints - _keypoints = np.array(ann['keypoints'] + ann['foot_kpts'] + - ann['face_kpts'] + ann['lefthand_kpts'] + - ann['righthand_kpts']).reshape(1, -1, 3) - keypoints = _keypoints[..., :2] - keypoints_visible = np.minimum(1, _keypoints[..., 2] > 0) - - num_keypoints = ann['num_keypoints'] - - data_info = { - 'img_id': ann['image_id'], - 'img_path': img_path, - 'bbox': bbox, - 'bbox_score': np.ones(1, dtype=np.float32), - 'num_keypoints': num_keypoints, - 'keypoints': keypoints, - 'keypoints_visible': keypoints_visible, - 'iscrowd': ann['iscrowd'], - 'segmentation': ann['segmentation'], - 'id': ann['id'], - 'category_id': ann['category_id'], - # store the raw annotation of the instance - # it is useful for evaluation without providing ann_file - 'raw_ann_info': copy.deepcopy(ann), - } - - return data_info +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import os.path as osp +from typing import Optional + +import numpy as np + +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class CocoWholeBodyDataset(BaseCocoStyleDataset): + """CocoWholeBody dataset for pose estimation. + + "Whole-Body Human Pose Estimation in the Wild", ECCV'2020. + More details can be found in the `paper + `__ . + + COCO-WholeBody keypoints:: + + 0-16: 17 body keypoints, + 17-22: 6 foot keypoints, + 23-90: 68 face keypoints, + 91-132: 42 hand keypoints + + In total, we have 133 keypoints for wholebody pose estimation. + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict( + from_file='configs/_base_/datasets/coco_wholebody.py') + + def parse_data_info(self, raw_data_info: dict) -> Optional[dict]: + """Parse raw COCO annotation of an instance. + + Args: + raw_data_info (dict): Raw data information loaded from + ``ann_file``. It should have following contents: + + - ``'raw_ann_info'``: Raw annotation of an instance + - ``'raw_img_info'``: Raw information of the image that + contains the instance + + Returns: + dict: Parsed instance annotation + """ + + ann = raw_data_info['raw_ann_info'] + img = raw_data_info['raw_img_info'] + + img_path = osp.join(self.data_prefix['img'], img['file_name']) + img_w, img_h = img['width'], img['height'] + + # get bbox in shape [1, 4], formatted as xywh + x, y, w, h = ann['bbox'] + x1 = np.clip(x, 0, img_w - 1) + y1 = np.clip(y, 0, img_h - 1) + x2 = np.clip(x + w, 0, img_w - 1) + y2 = np.clip(y + h, 0, img_h - 1) + + bbox = np.array([x1, y1, x2, y2], dtype=np.float32).reshape(1, 4) + + # keypoints in shape [1, K, 2] and keypoints_visible in [1, K] + # COCO-Wholebody: consisting of body, foot, face and hand keypoints + _keypoints = np.array(ann['keypoints'] + ann['foot_kpts'] + + ann['face_kpts'] + ann['lefthand_kpts'] + + ann['righthand_kpts']).reshape(1, -1, 3) + keypoints = _keypoints[..., :2] + keypoints_visible = np.minimum(1, _keypoints[..., 2] > 0) + + num_keypoints = ann['num_keypoints'] + + data_info = { + 'img_id': ann['image_id'], + 'img_path': img_path, + 'bbox': bbox, + 'bbox_score': np.ones(1, dtype=np.float32), + 'num_keypoints': num_keypoints, + 'keypoints': keypoints, + 'keypoints_visible': keypoints_visible, + 'iscrowd': ann['iscrowd'], + 'segmentation': ann['segmentation'], + 'id': ann['id'], + 'category_id': ann['category_id'], + # store the raw annotation of the instance + # it is useful for evaluation without providing ann_file + 'raw_ann_info': copy.deepcopy(ann), + } + + return data_info diff --git a/mmpose/datasets/datasets/wholebody/halpe_dataset.py b/mmpose/datasets/datasets/wholebody/halpe_dataset.py index 0699f3b702..75819a2c56 100644 --- a/mmpose/datasets/datasets/wholebody/halpe_dataset.py +++ b/mmpose/datasets/datasets/wholebody/halpe_dataset.py @@ -1,59 +1,59 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from mmpose.registry import DATASETS -from ..base import BaseCocoStyleDataset - - -@DATASETS.register_module() -class HalpeDataset(BaseCocoStyleDataset): - """Halpe dataset for pose estimation. - - 'https://github.com/Fang-Haoshu/Halpe-FullBody' - - Halpe keypoints:: - - 0-19: 20 body keypoints, - 20-25: 6 foot keypoints, - 26-93: 68 face keypoints, - 94-135: 42 hand keypoints - - In total, we have 136 keypoints for wholebody pose estimation. - - Args: - ann_file (str): Annotation file path. Default: ''. - bbox_file (str, optional): Detection result file path. If - ``bbox_file`` is set, detected bboxes loaded from this file will - be used instead of ground-truth bboxes. This setting is only for - evaluation, i.e., ignored when ``test_mode`` is ``False``. - Default: ``None``. - data_mode (str): Specifies the mode of data samples: ``'topdown'`` or - ``'bottomup'``. In ``'topdown'`` mode, each data sample contains - one instance; while in ``'bottomup'`` mode, each data sample - contains all instances in a image. Default: ``'topdown'`` - metainfo (dict, optional): Meta information for dataset, such as class - information. Default: ``None``. - data_root (str, optional): The root directory for ``data_prefix`` and - ``ann_file``. Default: ``None``. - data_prefix (dict, optional): Prefix for training data. Default: - ``dict(img=None, ann=None)``. - filter_cfg (dict, optional): Config for filter data. Default: `None`. - indices (int or Sequence[int], optional): Support using first few - data in annotation file to facilitate training/testing on a smaller - dataset. Default: ``None`` which means using all ``data_infos``. - serialize_data (bool, optional): Whether to hold memory using - serialized objects, when enabled, data loader workers can use - shared RAM from master process instead of making a copy. - Default: ``True``. - pipeline (list, optional): Processing pipeline. Default: []. - test_mode (bool, optional): ``test_mode=True`` means in test phase. - Default: ``False``. - lazy_init (bool, optional): Whether to load annotation during - instantiation. In some cases, such as visualization, only the meta - information of the dataset is needed, which is not necessary to - load annotation file. ``Basedataset`` can skip load annotations to - save time by set ``lazy_init=False``. Default: ``False``. - max_refetch (int, optional): If ``Basedataset.prepare_data`` get a - None img. The maximum extra number of cycles to get a valid - image. Default: 1000. - """ - - METAINFO: dict = dict(from_file='configs/_base_/datasets/halpe.py') +# Copyright (c) OpenMMLab. All rights reserved. +from mmpose.registry import DATASETS +from ..base import BaseCocoStyleDataset + + +@DATASETS.register_module() +class HalpeDataset(BaseCocoStyleDataset): + """Halpe dataset for pose estimation. + + 'https://github.com/Fang-Haoshu/Halpe-FullBody' + + Halpe keypoints:: + + 0-19: 20 body keypoints, + 20-25: 6 foot keypoints, + 26-93: 68 face keypoints, + 94-135: 42 hand keypoints + + In total, we have 136 keypoints for wholebody pose estimation. + + Args: + ann_file (str): Annotation file path. Default: ''. + bbox_file (str, optional): Detection result file path. If + ``bbox_file`` is set, detected bboxes loaded from this file will + be used instead of ground-truth bboxes. This setting is only for + evaluation, i.e., ignored when ``test_mode`` is ``False``. + Default: ``None``. + data_mode (str): Specifies the mode of data samples: ``'topdown'`` or + ``'bottomup'``. In ``'topdown'`` mode, each data sample contains + one instance; while in ``'bottomup'`` mode, each data sample + contains all instances in a image. Default: ``'topdown'`` + metainfo (dict, optional): Meta information for dataset, such as class + information. Default: ``None``. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Default: ``None``. + data_prefix (dict, optional): Prefix for training data. Default: + ``dict(img=None, ann=None)``. + filter_cfg (dict, optional): Config for filter data. Default: `None`. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Default: ``None`` which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. + Default: ``True``. + pipeline (list, optional): Processing pipeline. Default: []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Default: ``False``. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Default: ``False``. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Default: 1000. + """ + + METAINFO: dict = dict(from_file='configs/_base_/datasets/halpe.py') diff --git a/mmpose/datasets/samplers.py b/mmpose/datasets/samplers.py index d6bb34287a..f9def1ebda 100644 --- a/mmpose/datasets/samplers.py +++ b/mmpose/datasets/samplers.py @@ -1,114 +1,114 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import itertools -import math -from typing import Iterator, List, Optional, Sized, Union - -import torch -from mmengine.dist import get_dist_info, sync_random_seed -from torch.utils.data import Sampler - -from mmpose.datasets import CombinedDataset -from mmpose.registry import DATA_SAMPLERS - - -@DATA_SAMPLERS.register_module() -class MultiSourceSampler(Sampler): - """Multi-Source Sampler. According to the sampling ratio, sample data from - different datasets to form batches. - - Args: - dataset (Sized): The dataset - batch_size (int): Size of mini-batch - source_ratio (list[int | float]): The sampling ratio of different - source datasets in a mini-batch - shuffle (bool): Whether shuffle the dataset or not. Defaults to - ``True`` - round_up (bool): Whether to add extra samples to make the number of - samples evenly divisible by the world size. Defaults to True. - seed (int, optional): Random seed. If ``None``, set a random seed. - Defaults to ``None`` - """ - - def __init__(self, - dataset: Sized, - batch_size: int, - source_ratio: List[Union[int, float]], - shuffle: bool = True, - round_up: bool = True, - seed: Optional[int] = None) -> None: - - assert isinstance(dataset, CombinedDataset),\ - f'The dataset must be CombinedDataset, but get {dataset}' - assert isinstance(batch_size, int) and batch_size > 0, \ - 'batch_size must be a positive integer value, ' \ - f'but got batch_size={batch_size}' - assert isinstance(source_ratio, list), \ - f'source_ratio must be a list, but got source_ratio={source_ratio}' - assert len(source_ratio) == len(dataset._lens), \ - 'The length of source_ratio must be equal to ' \ - f'the number of datasets, but got source_ratio={source_ratio}' - - rank, world_size = get_dist_info() - self.rank = rank - self.world_size = world_size - - self.dataset = dataset - self.cumulative_sizes = [0] + list(itertools.accumulate(dataset._lens)) - self.batch_size = batch_size - self.source_ratio = source_ratio - self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / world_size)) - self.num_per_source = [ - int(batch_size * sr / sum(source_ratio)) for sr in source_ratio - ] - self.num_per_source[0] = batch_size - sum(self.num_per_source[1:]) - - assert sum(self.num_per_source) == batch_size, \ - 'The sum of num_per_source must be equal to ' \ - f'batch_size, but get {self.num_per_source}' - - self.seed = sync_random_seed() if seed is None else seed - self.shuffle = shuffle - self.round_up = round_up - self.source2inds = { - source: self._indices_of_rank(len(ds)) - for source, ds in enumerate(dataset.datasets) - } - - def _infinite_indices(self, sample_size: int) -> Iterator[int]: - """Infinitely yield a sequence of indices.""" - g = torch.Generator() - g.manual_seed(self.seed) - while True: - if self.shuffle: - yield from torch.randperm(sample_size, generator=g).tolist() - else: - yield from torch.arange(sample_size).tolist() - - def _indices_of_rank(self, sample_size: int) -> Iterator[int]: - """Slice the infinite indices by rank.""" - yield from itertools.islice( - self._infinite_indices(sample_size), self.rank, None, - self.world_size) - - def __iter__(self) -> Iterator[int]: - batch_buffer = [] - num_iters = self.num_samples // self.batch_size - if self.round_up and self.num_samples > num_iters * self.batch_size: - num_iters += 1 - for i in range(num_iters): - for source, num in enumerate(self.num_per_source): - batch_buffer_per_source = [] - for idx in self.source2inds[source]: - idx += self.cumulative_sizes[source] - batch_buffer_per_source.append(idx) - if len(batch_buffer_per_source) == num: - batch_buffer += batch_buffer_per_source - break - return iter(batch_buffer) - - def __len__(self) -> int: - return self.num_samples - - def set_epoch(self, epoch: int) -> None: - """Compatible in `epoch-based runner.""" - pass +# Copyright (c) OpenMMLab. All rights reserved. +import itertools +import math +from typing import Iterator, List, Optional, Sized, Union + +import torch +from mmengine.dist import get_dist_info, sync_random_seed +from torch.utils.data import Sampler + +from mmpose.datasets import CombinedDataset +from mmpose.registry import DATA_SAMPLERS + + +@DATA_SAMPLERS.register_module() +class MultiSourceSampler(Sampler): + """Multi-Source Sampler. According to the sampling ratio, sample data from + different datasets to form batches. + + Args: + dataset (Sized): The dataset + batch_size (int): Size of mini-batch + source_ratio (list[int | float]): The sampling ratio of different + source datasets in a mini-batch + shuffle (bool): Whether shuffle the dataset or not. Defaults to + ``True`` + round_up (bool): Whether to add extra samples to make the number of + samples evenly divisible by the world size. Defaults to True. + seed (int, optional): Random seed. If ``None``, set a random seed. + Defaults to ``None`` + """ + + def __init__(self, + dataset: Sized, + batch_size: int, + source_ratio: List[Union[int, float]], + shuffle: bool = True, + round_up: bool = True, + seed: Optional[int] = None) -> None: + + assert isinstance(dataset, CombinedDataset),\ + f'The dataset must be CombinedDataset, but get {dataset}' + assert isinstance(batch_size, int) and batch_size > 0, \ + 'batch_size must be a positive integer value, ' \ + f'but got batch_size={batch_size}' + assert isinstance(source_ratio, list), \ + f'source_ratio must be a list, but got source_ratio={source_ratio}' + assert len(source_ratio) == len(dataset._lens), \ + 'The length of source_ratio must be equal to ' \ + f'the number of datasets, but got source_ratio={source_ratio}' + + rank, world_size = get_dist_info() + self.rank = rank + self.world_size = world_size + + self.dataset = dataset + self.cumulative_sizes = [0] + list(itertools.accumulate(dataset._lens)) + self.batch_size = batch_size + self.source_ratio = source_ratio + self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / world_size)) + self.num_per_source = [ + int(batch_size * sr / sum(source_ratio)) for sr in source_ratio + ] + self.num_per_source[0] = batch_size - sum(self.num_per_source[1:]) + + assert sum(self.num_per_source) == batch_size, \ + 'The sum of num_per_source must be equal to ' \ + f'batch_size, but get {self.num_per_source}' + + self.seed = sync_random_seed() if seed is None else seed + self.shuffle = shuffle + self.round_up = round_up + self.source2inds = { + source: self._indices_of_rank(len(ds)) + for source, ds in enumerate(dataset.datasets) + } + + def _infinite_indices(self, sample_size: int) -> Iterator[int]: + """Infinitely yield a sequence of indices.""" + g = torch.Generator() + g.manual_seed(self.seed) + while True: + if self.shuffle: + yield from torch.randperm(sample_size, generator=g).tolist() + else: + yield from torch.arange(sample_size).tolist() + + def _indices_of_rank(self, sample_size: int) -> Iterator[int]: + """Slice the infinite indices by rank.""" + yield from itertools.islice( + self._infinite_indices(sample_size), self.rank, None, + self.world_size) + + def __iter__(self) -> Iterator[int]: + batch_buffer = [] + num_iters = self.num_samples // self.batch_size + if self.round_up and self.num_samples > num_iters * self.batch_size: + num_iters += 1 + for i in range(num_iters): + for source, num in enumerate(self.num_per_source): + batch_buffer_per_source = [] + for idx in self.source2inds[source]: + idx += self.cumulative_sizes[source] + batch_buffer_per_source.append(idx) + if len(batch_buffer_per_source) == num: + batch_buffer += batch_buffer_per_source + break + return iter(batch_buffer) + + def __len__(self) -> int: + return self.num_samples + + def set_epoch(self, epoch: int) -> None: + """Compatible in `epoch-based runner.""" + pass diff --git a/mmpose/datasets/transforms/__init__.py b/mmpose/datasets/transforms/__init__.py index 7ccbf7dac2..6d9405837c 100644 --- a/mmpose/datasets/transforms/__init__.py +++ b/mmpose/datasets/transforms/__init__.py @@ -1,20 +1,22 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .bottomup_transforms import (BottomupGetHeatmapMask, BottomupRandomAffine, - BottomupResize) -from .common_transforms import (Albumentation, GenerateTarget, - GetBBoxCenterScale, PhotometricDistortion, - RandomBBoxTransform, RandomFlip, - RandomHalfBody) -from .converting import KeypointConverter -from .formatting import PackPoseInputs -from .loading import LoadImage -from .pose3d_transforms import RandomFlipAroundRoot -from .topdown_transforms import TopdownAffine - -__all__ = [ - 'GetBBoxCenterScale', 'RandomBBoxTransform', 'RandomFlip', - 'RandomHalfBody', 'TopdownAffine', 'Albumentation', - 'PhotometricDistortion', 'PackPoseInputs', 'LoadImage', - 'BottomupGetHeatmapMask', 'BottomupRandomAffine', 'BottomupResize', - 'GenerateTarget', 'KeypointConverter', 'RandomFlipAroundRoot' -] +# Copyright (c) OpenMMLab. All rights reserved. +from .bottomup_transforms import (BottomupGetHeatmapMask, BottomupRandomAffine, + BottomupResize) +from .common_transforms import (Albumentation, GenerateTarget, + GetBBoxCenterScale, PhotometricDistortion, + RandomBBoxTransform, RandomFlip, + RandomHalfBody) +from .converting import KeypointConverter +from .formatting import PackPoseInputs +from .loading import LoadImage +from .pose3d_transforms import RandomFlipAroundRoot +from .topdown_transforms import TopdownAffine + +from .warping import Warping + +__all__ = [ + 'GetBBoxCenterScale', 'RandomBBoxTransform', 'RandomFlip', + 'RandomHalfBody', 'TopdownAffine', 'Albumentation', + 'PhotometricDistortion', 'PackPoseInputs', 'LoadImage', + 'BottomupGetHeatmapMask', 'BottomupRandomAffine', 'BottomupResize', + 'GenerateTarget', 'KeypointConverter', 'RandomFlipAroundRoot' +] diff --git a/mmpose/datasets/transforms/bottomup_transforms.py b/mmpose/datasets/transforms/bottomup_transforms.py index c31e0ae17d..a3e23a47f2 100644 --- a/mmpose/datasets/transforms/bottomup_transforms.py +++ b/mmpose/datasets/transforms/bottomup_transforms.py @@ -1,517 +1,517 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import Dict, List, Optional, Tuple - -import cv2 -import numpy as np -import xtcocotools.mask as cocomask -from mmcv.image import imflip_, imresize -from mmcv.transforms import BaseTransform -from mmcv.transforms.utils import cache_randomness -from scipy.stats import truncnorm - -from mmpose.registry import TRANSFORMS -from mmpose.structures.bbox import get_udp_warp_matrix, get_warp_matrix - - -@TRANSFORMS.register_module() -class BottomupGetHeatmapMask(BaseTransform): - """Generate the mask of valid regions from the segmentation annotation. - - Required Keys: - - - img_shape - - invalid_segs (optional) - - warp_mat (optional) - - flip (optional) - - flip_direction (optional) - - heatmaps (optional) - - Added Keys: - - - heatmap_mask - """ - - def _segs_to_mask(self, segs: list, img_shape: Tuple[int, - int]) -> np.ndarray: - """Calculate mask from object segmentations. - - Args: - segs (List): The object segmentation annotations in COCO format - img_shape (Tuple): The image shape in (h, w) - - Returns: - np.ndarray: The binary object mask in size (h, w), where the - object pixels are 1 and background pixels are 0 - """ - - # RLE is a simple yet efficient format for storing binary masks. - # details can be found at `COCO tools `__ - rles = [] - for seg in segs: - rle = cocomask.frPyObjects(seg, img_shape[0], img_shape[1]) - if isinstance(rle, list): - # For non-crowded objects (e.g. human with no visible - # keypoints), the results is a list of rles - rles.extend(rle) - else: - # For crowded objects, the result is a single rle - rles.append(rle) - - if rles: - mask = cocomask.decode(cocomask.merge(rles)) - else: - mask = np.zeros(img_shape, dtype=np.uint8) - - return mask - - def transform(self, results: Dict) -> Optional[dict]: - """The transform function of :class:`BottomupGetHeatmapMask` to perform - photometric distortion on images. - - See ``transform()`` method of :class:`BaseTransform` for details. - - - Args: - results (dict): Result dict from the data pipeline. - - Returns: - dict: Result dict with images distorted. - """ - - invalid_segs = results.get('invalid_segs', []) - img_shape = results['img_shape'] # (img_h, img_w) - input_size = results['input_size'] - - # Calculate the mask of the valid region by negating the segmentation - # mask of invalid objects - mask = 1 - self._segs_to_mask(invalid_segs, img_shape) - - # Apply an affine transform to the mask if the image has been - # transformed - if 'warp_mat' in results: - warp_mat = results['warp_mat'] - - mask = mask.astype(np.float32) - mask = cv2.warpAffine( - mask, warp_mat, input_size, flags=cv2.INTER_LINEAR) - - # Flip the mask if the image has been flipped - if results.get('flip', False): - flip_dir = results['flip_direction'] - if flip_dir is not None: - mask = imflip_(mask, flip_dir) - - # Resize the mask to the same size of heatmaps - if 'heatmaps' in results: - heatmaps = results['heatmaps'] - if isinstance(heatmaps, list): - # Multi-level heatmaps - heatmap_mask = [] - for hm in results['heatmaps']: - h, w = hm.shape[1:3] - _mask = imresize( - mask, size=(w, h), interpolation='bilinear') - heatmap_mask.append(_mask) - else: - h, w = heatmaps.shape[1:3] - heatmap_mask = imresize( - mask, size=(w, h), interpolation='bilinear') - else: - heatmap_mask = mask - - # Binarize the mask(s) - if isinstance(heatmap_mask, list): - results['heatmap_mask'] = [hm > 0.5 for hm in heatmap_mask] - else: - results['heatmap_mask'] = heatmap_mask > 0.5 - - return results - - -@TRANSFORMS.register_module() -class BottomupRandomAffine(BaseTransform): - r"""Randomly shift, resize and rotate the image. - - Required Keys: - - - img - - img_shape - - keypoints (optional) - - Modified Keys: - - - img - - keypoints (optional) - - Added Keys: - - - input_size - - warp_mat - - Args: - input_size (Tuple[int, int]): The input image size of the model in - [w, h] - shift_factor (float): Randomly shift the image in range - :math:`[-dx, dx]` and :math:`[-dy, dy]` in X and Y directions, - where :math:`dx(y) = img_w(h) \cdot shift_factor` in pixels. - Defaults to 0.2 - shift_prob (float): Probability of applying random shift. Defaults to - 1.0 - scale_factor (Tuple[float, float]): Randomly resize the image in range - :math:`[scale_factor[0], scale_factor[1]]`. Defaults to - (0.75, 1.5) - scale_prob (float): Probability of applying random resizing. Defaults - to 1.0 - scale_type (str): wrt ``long`` or ``short`` length of the image. - Defaults to ``short`` - rotate_factor (float): Randomly rotate the bbox in - :math:`[-rotate_factor, rotate_factor]` in degrees. Defaults - to 40.0 - use_udp (bool): Whether use unbiased data processing. See - `UDP (CVPR 2020)`_ for details. Defaults to ``False`` - - .. _`UDP (CVPR 2020)`: https://arxiv.org/abs/1911.07524 - """ - - def __init__(self, - input_size: Tuple[int, int], - shift_factor: float = 0.2, - shift_prob: float = 1., - scale_factor: Tuple[float, float] = (0.75, 1.5), - scale_prob: float = 1., - scale_type: str = 'short', - rotate_factor: float = 30., - rotate_prob: float = 1, - use_udp: bool = False) -> None: - super().__init__() - - self.input_size = input_size - self.shift_factor = shift_factor - self.shift_prob = shift_prob - self.scale_factor = scale_factor - self.scale_prob = scale_prob - self.scale_type = scale_type - self.rotate_factor = rotate_factor - self.rotate_prob = rotate_prob - self.use_udp = use_udp - - @staticmethod - def _truncnorm(low: float = -1., - high: float = 1., - size: tuple = ()) -> np.ndarray: - """Sample from a truncated normal distribution.""" - return truncnorm.rvs(low, high, size=size).astype(np.float32) - - def _fix_aspect_ratio(self, scale: np.ndarray, aspect_ratio: float): - """Extend the scale to match the given aspect ratio. - - Args: - scale (np.ndarray): The image scale (w, h) in shape (2, ) - aspect_ratio (float): The ratio of ``w/h`` - - Returns: - np.ndarray: The reshaped image scale in (2, ) - """ - w, h = scale - if w > h * aspect_ratio: - if self.scale_type == 'long': - _w, _h = w, w / aspect_ratio - elif self.scale_type == 'short': - _w, _h = h * aspect_ratio, h - else: - raise ValueError(f'Unknown scale type: {self.scale_type}') - else: - if self.scale_type == 'short': - _w, _h = w, w / aspect_ratio - elif self.scale_type == 'long': - _w, _h = h * aspect_ratio, h - else: - raise ValueError(f'Unknown scale type: {self.scale_type}') - return np.array([_w, _h], dtype=scale.dtype) - - @cache_randomness - def _get_transform_params(self) -> Tuple: - """Get random transform parameters. - - Returns: - tuple: - - offset (np.ndarray): Image offset rate in shape (2, ) - - scale (np.ndarray): Image scaling rate factor in shape (1, ) - - rotate (np.ndarray): Image rotation degree in shape (1, ) - """ - # get offset - if np.random.rand() < self.shift_prob: - offset = self._truncnorm(size=(2, )) * self.shift_factor - else: - offset = np.zeros((2, ), dtype=np.float32) - - # get scale - if np.random.rand() < self.scale_prob: - scale_min, scale_max = self.scale_factor - scale = scale_min + (scale_max - scale_min) * ( - self._truncnorm(size=(1, )) + 1) / 2 - else: - scale = np.ones(1, dtype=np.float32) - - # get rotation - if np.random.rand() < self.rotate_prob: - rotate = self._truncnorm() * self.rotate_factor - else: - rotate = 0 - - return offset, scale, rotate - - def transform(self, results: Dict) -> Optional[dict]: - """The transform function of :class:`BottomupRandomAffine` to perform - photometric distortion on images. - - See ``transform()`` method of :class:`BaseTransform` for details. - - - Args: - results (dict): Result dict from the data pipeline. - - Returns: - dict: Result dict with images distorted. - """ - - img_h, img_w = results['img_shape'] - w, h = self.input_size - - offset_rate, scale_rate, rotate = self._get_transform_params() - offset = offset_rate * [img_w, img_h] - scale = scale_rate * [img_w, img_h] - # adjust the scale to match the target aspect ratio - scale = self._fix_aspect_ratio(scale, aspect_ratio=w / h) - - if self.use_udp: - center = np.array([(img_w - 1.0) / 2, (img_h - 1.0) / 2], - dtype=np.float32) - warp_mat = get_udp_warp_matrix( - center=center + offset, - scale=scale, - rot=rotate, - output_size=(w, h)) - else: - center = np.array([img_w / 2, img_h / 2], dtype=np.float32) - warp_mat = get_warp_matrix( - center=center + offset, - scale=scale, - rot=rotate, - output_size=(w, h)) - - # warp image and keypoints - results['img'] = cv2.warpAffine( - results['img'], warp_mat, (int(w), int(h)), flags=cv2.INTER_LINEAR) - - if 'keypoints' in results: - # Only transform (x, y) coordinates - results['keypoints'][..., :2] = cv2.transform( - results['keypoints'][..., :2], warp_mat) - - if 'bbox' in results: - bbox = np.tile(results['bbox'], 2).reshape(-1, 4, 2) - # corner order: left_top, left_bottom, right_top, right_bottom - bbox[:, 1:3, 0] = bbox[:, 0:2, 0] - results['bbox'] = cv2.transform(bbox, warp_mat).reshape(-1, 8) - - results['input_size'] = self.input_size - results['warp_mat'] = warp_mat - - return results - - -@TRANSFORMS.register_module() -class BottomupResize(BaseTransform): - """Resize the image to the input size of the model. Optionally, the image - can be resized to multiple sizes to build a image pyramid for multi-scale - inference. - - Required Keys: - - - img - - ori_shape - - Modified Keys: - - - img - - img_shape - - Added Keys: - - - input_size - - warp_mat - - aug_scale - - Args: - input_size (Tuple[int, int]): The input size of the model in [w, h]. - Note that the actually size of the resized image will be affected - by ``resize_mode`` and ``size_factor``, thus may not exactly equals - to the ``input_size`` - aug_scales (List[float], optional): The extra input scales for - multi-scale testing. If given, the input image will be resized - to different scales to build a image pyramid. And heatmaps from - all scales will be aggregated to make final prediction. Defaults - to ``None`` - size_factor (int): The actual input size will be ceiled to - a multiple of the `size_factor` value at both sides. - Defaults to 16 - resize_mode (str): The method to resize the image to the input size. - Options are: - - - ``'fit'``: The image will be resized according to the - relatively longer side with the aspect ratio kept. The - resized image will entirely fits into the range of the - input size - - ``'expand'``: The image will be resized according to the - relatively shorter side with the aspect ratio kept. The - resized image will exceed the given input size at the - longer side - use_udp (bool): Whether use unbiased data processing. See - `UDP (CVPR 2020)`_ for details. Defaults to ``False`` - - .. _`UDP (CVPR 2020)`: https://arxiv.org/abs/1911.07524 - """ - - def __init__(self, - input_size: Tuple[int, int], - aug_scales: Optional[List[float]] = None, - size_factor: int = 32, - resize_mode: str = 'fit', - use_udp: bool = False): - super().__init__() - - self.input_size = input_size - self.aug_scales = aug_scales - self.resize_mode = resize_mode - self.size_factor = size_factor - self.use_udp = use_udp - - @staticmethod - def _ceil_to_multiple(size: Tuple[int, int], base: int): - """Ceil the given size (tuple of [w, h]) to a multiple of the base.""" - return tuple(int(np.ceil(s / base) * base) for s in size) - - def _get_input_size(self, img_size: Tuple[int, int], - input_size: Tuple[int, int]) -> Tuple: - """Calculate the actual input size (which the original image will be - resized to) and the padded input size (which the resized image will be - padded to, or which is the size of the model input). - - Args: - img_size (Tuple[int, int]): The original image size in [w, h] - input_size (Tuple[int, int]): The expected input size in [w, h] - - Returns: - tuple: - - actual_input_size (Tuple[int, int]): The target size to resize - the image - - padded_input_size (Tuple[int, int]): The target size to generate - the model input which will contain the resized image - """ - img_w, img_h = img_size - ratio = img_w / img_h - - if self.resize_mode == 'fit': - padded_input_size = self._ceil_to_multiple(input_size, - self.size_factor) - if padded_input_size != input_size: - raise ValueError( - 'When ``resize_mode==\'fit\', the input size (height and' - ' width) should be mulitples of the size_factor(' - f'{self.size_factor}) at all scales. Got invalid input ' - f'size {input_size}.') - - pad_w, pad_h = padded_input_size - rsz_w = min(pad_w, pad_h * ratio) - rsz_h = min(pad_h, pad_w / ratio) - actual_input_size = (rsz_w, rsz_h) - - elif self.resize_mode == 'expand': - _padded_input_size = self._ceil_to_multiple( - input_size, self.size_factor) - pad_w, pad_h = _padded_input_size - rsz_w = max(pad_w, pad_h * ratio) - rsz_h = max(pad_h, pad_w / ratio) - - actual_input_size = (rsz_w, rsz_h) - padded_input_size = self._ceil_to_multiple(actual_input_size, - self.size_factor) - - else: - raise ValueError(f'Invalid resize mode {self.resize_mode}') - - return actual_input_size, padded_input_size - - def transform(self, results: Dict) -> Optional[dict]: - """The transform function of :class:`BottomupResize` to perform - photometric distortion on images. - - See ``transform()`` method of :class:`BaseTransform` for details. - - - Args: - results (dict): Result dict from the data pipeline. - - Returns: - dict: Result dict with images distorted. - """ - - img = results['img'] - img_h, img_w = results['ori_shape'] - w, h = self.input_size - - input_sizes = [(w, h)] - if self.aug_scales: - input_sizes += [(int(w * s), int(h * s)) for s in self.aug_scales] - - imgs = [] - for i, (_w, _h) in enumerate(input_sizes): - - actual_input_size, padded_input_size = self._get_input_size( - img_size=(img_w, img_h), input_size=(_w, _h)) - - if self.use_udp: - center = np.array([(img_w - 1.0) / 2, (img_h - 1.0) / 2], - dtype=np.float32) - scale = np.array([img_w, img_h], dtype=np.float32) - warp_mat = get_udp_warp_matrix( - center=center, - scale=scale, - rot=0, - output_size=actual_input_size) - else: - center = np.array([img_w / 2, img_h / 2], dtype=np.float32) - scale = np.array([ - img_w * padded_input_size[0] / actual_input_size[0], - img_h * padded_input_size[1] / actual_input_size[1] - ], - dtype=np.float32) - warp_mat = get_warp_matrix( - center=center, - scale=scale, - rot=0, - output_size=padded_input_size) - - _img = cv2.warpAffine( - img, warp_mat, padded_input_size, flags=cv2.INTER_LINEAR) - - imgs.append(_img) - - # Store the transform information w.r.t. the main input size - if i == 0: - results['img_shape'] = padded_input_size[::-1] - results['input_center'] = center - results['input_scale'] = scale - results['input_size'] = padded_input_size - - if self.aug_scales: - results['img'] = imgs - results['aug_scales'] = self.aug_scales - else: - results['img'] = imgs[0] - results['aug_scale'] = None - - return results +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, List, Optional, Tuple + +import cv2 +import numpy as np +import xtcocotools.mask as cocomask +from mmcv.image import imflip_, imresize +from mmcv.transforms import BaseTransform +from mmcv.transforms.utils import cache_randomness +from scipy.stats import truncnorm + +from mmpose.registry import TRANSFORMS +from mmpose.structures.bbox import get_udp_warp_matrix, get_warp_matrix + + +@TRANSFORMS.register_module() +class BottomupGetHeatmapMask(BaseTransform): + """Generate the mask of valid regions from the segmentation annotation. + + Required Keys: + + - img_shape + - invalid_segs (optional) + - warp_mat (optional) + - flip (optional) + - flip_direction (optional) + - heatmaps (optional) + + Added Keys: + + - heatmap_mask + """ + + def _segs_to_mask(self, segs: list, img_shape: Tuple[int, + int]) -> np.ndarray: + """Calculate mask from object segmentations. + + Args: + segs (List): The object segmentation annotations in COCO format + img_shape (Tuple): The image shape in (h, w) + + Returns: + np.ndarray: The binary object mask in size (h, w), where the + object pixels are 1 and background pixels are 0 + """ + + # RLE is a simple yet efficient format for storing binary masks. + # details can be found at `COCO tools `__ + rles = [] + for seg in segs: + rle = cocomask.frPyObjects(seg, img_shape[0], img_shape[1]) + if isinstance(rle, list): + # For non-crowded objects (e.g. human with no visible + # keypoints), the results is a list of rles + rles.extend(rle) + else: + # For crowded objects, the result is a single rle + rles.append(rle) + + if rles: + mask = cocomask.decode(cocomask.merge(rles)) + else: + mask = np.zeros(img_shape, dtype=np.uint8) + + return mask + + def transform(self, results: Dict) -> Optional[dict]: + """The transform function of :class:`BottomupGetHeatmapMask` to perform + photometric distortion on images. + + See ``transform()`` method of :class:`BaseTransform` for details. + + + Args: + results (dict): Result dict from the data pipeline. + + Returns: + dict: Result dict with images distorted. + """ + + invalid_segs = results.get('invalid_segs', []) + img_shape = results['img_shape'] # (img_h, img_w) + input_size = results['input_size'] + + # Calculate the mask of the valid region by negating the segmentation + # mask of invalid objects + mask = 1 - self._segs_to_mask(invalid_segs, img_shape) + + # Apply an affine transform to the mask if the image has been + # transformed + if 'warp_mat' in results: + warp_mat = results['warp_mat'] + + mask = mask.astype(np.float32) + mask = cv2.warpAffine( + mask, warp_mat, input_size, flags=cv2.INTER_LINEAR) + + # Flip the mask if the image has been flipped + if results.get('flip', False): + flip_dir = results['flip_direction'] + if flip_dir is not None: + mask = imflip_(mask, flip_dir) + + # Resize the mask to the same size of heatmaps + if 'heatmaps' in results: + heatmaps = results['heatmaps'] + if isinstance(heatmaps, list): + # Multi-level heatmaps + heatmap_mask = [] + for hm in results['heatmaps']: + h, w = hm.shape[1:3] + _mask = imresize( + mask, size=(w, h), interpolation='bilinear') + heatmap_mask.append(_mask) + else: + h, w = heatmaps.shape[1:3] + heatmap_mask = imresize( + mask, size=(w, h), interpolation='bilinear') + else: + heatmap_mask = mask + + # Binarize the mask(s) + if isinstance(heatmap_mask, list): + results['heatmap_mask'] = [hm > 0.5 for hm in heatmap_mask] + else: + results['heatmap_mask'] = heatmap_mask > 0.5 + + return results + + +@TRANSFORMS.register_module() +class BottomupRandomAffine(BaseTransform): + r"""Randomly shift, resize and rotate the image. + + Required Keys: + + - img + - img_shape + - keypoints (optional) + + Modified Keys: + + - img + - keypoints (optional) + + Added Keys: + + - input_size + - warp_mat + + Args: + input_size (Tuple[int, int]): The input image size of the model in + [w, h] + shift_factor (float): Randomly shift the image in range + :math:`[-dx, dx]` and :math:`[-dy, dy]` in X and Y directions, + where :math:`dx(y) = img_w(h) \cdot shift_factor` in pixels. + Defaults to 0.2 + shift_prob (float): Probability of applying random shift. Defaults to + 1.0 + scale_factor (Tuple[float, float]): Randomly resize the image in range + :math:`[scale_factor[0], scale_factor[1]]`. Defaults to + (0.75, 1.5) + scale_prob (float): Probability of applying random resizing. Defaults + to 1.0 + scale_type (str): wrt ``long`` or ``short`` length of the image. + Defaults to ``short`` + rotate_factor (float): Randomly rotate the bbox in + :math:`[-rotate_factor, rotate_factor]` in degrees. Defaults + to 40.0 + use_udp (bool): Whether use unbiased data processing. See + `UDP (CVPR 2020)`_ for details. Defaults to ``False`` + + .. _`UDP (CVPR 2020)`: https://arxiv.org/abs/1911.07524 + """ + + def __init__(self, + input_size: Tuple[int, int], + shift_factor: float = 0.2, + shift_prob: float = 1., + scale_factor: Tuple[float, float] = (0.75, 1.5), + scale_prob: float = 1., + scale_type: str = 'short', + rotate_factor: float = 30., + rotate_prob: float = 1, + use_udp: bool = False) -> None: + super().__init__() + + self.input_size = input_size + self.shift_factor = shift_factor + self.shift_prob = shift_prob + self.scale_factor = scale_factor + self.scale_prob = scale_prob + self.scale_type = scale_type + self.rotate_factor = rotate_factor + self.rotate_prob = rotate_prob + self.use_udp = use_udp + + @staticmethod + def _truncnorm(low: float = -1., + high: float = 1., + size: tuple = ()) -> np.ndarray: + """Sample from a truncated normal distribution.""" + return truncnorm.rvs(low, high, size=size).astype(np.float32) + + def _fix_aspect_ratio(self, scale: np.ndarray, aspect_ratio: float): + """Extend the scale to match the given aspect ratio. + + Args: + scale (np.ndarray): The image scale (w, h) in shape (2, ) + aspect_ratio (float): The ratio of ``w/h`` + + Returns: + np.ndarray: The reshaped image scale in (2, ) + """ + w, h = scale + if w > h * aspect_ratio: + if self.scale_type == 'long': + _w, _h = w, w / aspect_ratio + elif self.scale_type == 'short': + _w, _h = h * aspect_ratio, h + else: + raise ValueError(f'Unknown scale type: {self.scale_type}') + else: + if self.scale_type == 'short': + _w, _h = w, w / aspect_ratio + elif self.scale_type == 'long': + _w, _h = h * aspect_ratio, h + else: + raise ValueError(f'Unknown scale type: {self.scale_type}') + return np.array([_w, _h], dtype=scale.dtype) + + @cache_randomness + def _get_transform_params(self) -> Tuple: + """Get random transform parameters. + + Returns: + tuple: + - offset (np.ndarray): Image offset rate in shape (2, ) + - scale (np.ndarray): Image scaling rate factor in shape (1, ) + - rotate (np.ndarray): Image rotation degree in shape (1, ) + """ + # get offset + if np.random.rand() < self.shift_prob: + offset = self._truncnorm(size=(2, )) * self.shift_factor + else: + offset = np.zeros((2, ), dtype=np.float32) + + # get scale + if np.random.rand() < self.scale_prob: + scale_min, scale_max = self.scale_factor + scale = scale_min + (scale_max - scale_min) * ( + self._truncnorm(size=(1, )) + 1) / 2 + else: + scale = np.ones(1, dtype=np.float32) + + # get rotation + if np.random.rand() < self.rotate_prob: + rotate = self._truncnorm() * self.rotate_factor + else: + rotate = 0 + + return offset, scale, rotate + + def transform(self, results: Dict) -> Optional[dict]: + """The transform function of :class:`BottomupRandomAffine` to perform + photometric distortion on images. + + See ``transform()`` method of :class:`BaseTransform` for details. + + + Args: + results (dict): Result dict from the data pipeline. + + Returns: + dict: Result dict with images distorted. + """ + + img_h, img_w = results['img_shape'] + w, h = self.input_size + + offset_rate, scale_rate, rotate = self._get_transform_params() + offset = offset_rate * [img_w, img_h] + scale = scale_rate * [img_w, img_h] + # adjust the scale to match the target aspect ratio + scale = self._fix_aspect_ratio(scale, aspect_ratio=w / h) + + if self.use_udp: + center = np.array([(img_w - 1.0) / 2, (img_h - 1.0) / 2], + dtype=np.float32) + warp_mat = get_udp_warp_matrix( + center=center + offset, + scale=scale, + rot=rotate, + output_size=(w, h)) + else: + center = np.array([img_w / 2, img_h / 2], dtype=np.float32) + warp_mat = get_warp_matrix( + center=center + offset, + scale=scale, + rot=rotate, + output_size=(w, h)) + + # warp image and keypoints + results['img'] = cv2.warpAffine( + results['img'], warp_mat, (int(w), int(h)), flags=cv2.INTER_LINEAR) + + if 'keypoints' in results: + # Only transform (x, y) coordinates + results['keypoints'][..., :2] = cv2.transform( + results['keypoints'][..., :2], warp_mat) + + if 'bbox' in results: + bbox = np.tile(results['bbox'], 2).reshape(-1, 4, 2) + # corner order: left_top, left_bottom, right_top, right_bottom + bbox[:, 1:3, 0] = bbox[:, 0:2, 0] + results['bbox'] = cv2.transform(bbox, warp_mat).reshape(-1, 8) + + results['input_size'] = self.input_size + results['warp_mat'] = warp_mat + + return results + + +@TRANSFORMS.register_module() +class BottomupResize(BaseTransform): + """Resize the image to the input size of the model. Optionally, the image + can be resized to multiple sizes to build a image pyramid for multi-scale + inference. + + Required Keys: + + - img + - ori_shape + + Modified Keys: + + - img + - img_shape + + Added Keys: + + - input_size + - warp_mat + - aug_scale + + Args: + input_size (Tuple[int, int]): The input size of the model in [w, h]. + Note that the actually size of the resized image will be affected + by ``resize_mode`` and ``size_factor``, thus may not exactly equals + to the ``input_size`` + aug_scales (List[float], optional): The extra input scales for + multi-scale testing. If given, the input image will be resized + to different scales to build a image pyramid. And heatmaps from + all scales will be aggregated to make final prediction. Defaults + to ``None`` + size_factor (int): The actual input size will be ceiled to + a multiple of the `size_factor` value at both sides. + Defaults to 16 + resize_mode (str): The method to resize the image to the input size. + Options are: + + - ``'fit'``: The image will be resized according to the + relatively longer side with the aspect ratio kept. The + resized image will entirely fits into the range of the + input size + - ``'expand'``: The image will be resized according to the + relatively shorter side with the aspect ratio kept. The + resized image will exceed the given input size at the + longer side + use_udp (bool): Whether use unbiased data processing. See + `UDP (CVPR 2020)`_ for details. Defaults to ``False`` + + .. _`UDP (CVPR 2020)`: https://arxiv.org/abs/1911.07524 + """ + + def __init__(self, + input_size: Tuple[int, int], + aug_scales: Optional[List[float]] = None, + size_factor: int = 32, + resize_mode: str = 'fit', + use_udp: bool = False): + super().__init__() + + self.input_size = input_size + self.aug_scales = aug_scales + self.resize_mode = resize_mode + self.size_factor = size_factor + self.use_udp = use_udp + + @staticmethod + def _ceil_to_multiple(size: Tuple[int, int], base: int): + """Ceil the given size (tuple of [w, h]) to a multiple of the base.""" + return tuple(int(np.ceil(s / base) * base) for s in size) + + def _get_input_size(self, img_size: Tuple[int, int], + input_size: Tuple[int, int]) -> Tuple: + """Calculate the actual input size (which the original image will be + resized to) and the padded input size (which the resized image will be + padded to, or which is the size of the model input). + + Args: + img_size (Tuple[int, int]): The original image size in [w, h] + input_size (Tuple[int, int]): The expected input size in [w, h] + + Returns: + tuple: + - actual_input_size (Tuple[int, int]): The target size to resize + the image + - padded_input_size (Tuple[int, int]): The target size to generate + the model input which will contain the resized image + """ + img_w, img_h = img_size + ratio = img_w / img_h + + if self.resize_mode == 'fit': + padded_input_size = self._ceil_to_multiple(input_size, + self.size_factor) + if padded_input_size != input_size: + raise ValueError( + 'When ``resize_mode==\'fit\', the input size (height and' + ' width) should be mulitples of the size_factor(' + f'{self.size_factor}) at all scales. Got invalid input ' + f'size {input_size}.') + + pad_w, pad_h = padded_input_size + rsz_w = min(pad_w, pad_h * ratio) + rsz_h = min(pad_h, pad_w / ratio) + actual_input_size = (rsz_w, rsz_h) + + elif self.resize_mode == 'expand': + _padded_input_size = self._ceil_to_multiple( + input_size, self.size_factor) + pad_w, pad_h = _padded_input_size + rsz_w = max(pad_w, pad_h * ratio) + rsz_h = max(pad_h, pad_w / ratio) + + actual_input_size = (rsz_w, rsz_h) + padded_input_size = self._ceil_to_multiple(actual_input_size, + self.size_factor) + + else: + raise ValueError(f'Invalid resize mode {self.resize_mode}') + + return actual_input_size, padded_input_size + + def transform(self, results: Dict) -> Optional[dict]: + """The transform function of :class:`BottomupResize` to perform + photometric distortion on images. + + See ``transform()`` method of :class:`BaseTransform` for details. + + + Args: + results (dict): Result dict from the data pipeline. + + Returns: + dict: Result dict with images distorted. + """ + + img = results['img'] + img_h, img_w = results['ori_shape'] + w, h = self.input_size + + input_sizes = [(w, h)] + if self.aug_scales: + input_sizes += [(int(w * s), int(h * s)) for s in self.aug_scales] + + imgs = [] + for i, (_w, _h) in enumerate(input_sizes): + + actual_input_size, padded_input_size = self._get_input_size( + img_size=(img_w, img_h), input_size=(_w, _h)) + + if self.use_udp: + center = np.array([(img_w - 1.0) / 2, (img_h - 1.0) / 2], + dtype=np.float32) + scale = np.array([img_w, img_h], dtype=np.float32) + warp_mat = get_udp_warp_matrix( + center=center, + scale=scale, + rot=0, + output_size=actual_input_size) + else: + center = np.array([img_w / 2, img_h / 2], dtype=np.float32) + scale = np.array([ + img_w * padded_input_size[0] / actual_input_size[0], + img_h * padded_input_size[1] / actual_input_size[1] + ], + dtype=np.float32) + warp_mat = get_warp_matrix( + center=center, + scale=scale, + rot=0, + output_size=padded_input_size) + + _img = cv2.warpAffine( + img, warp_mat, padded_input_size, flags=cv2.INTER_LINEAR) + + imgs.append(_img) + + # Store the transform information w.r.t. the main input size + if i == 0: + results['img_shape'] = padded_input_size[::-1] + results['input_center'] = center + results['input_scale'] = scale + results['input_size'] = padded_input_size + + if self.aug_scales: + results['img'] = imgs + results['aug_scales'] = self.aug_scales + else: + results['img'] = imgs[0] + results['aug_scale'] = None + + return results diff --git a/mmpose/datasets/transforms/common_transforms.py b/mmpose/datasets/transforms/common_transforms.py index 87068246f8..92f9f5a115 100644 --- a/mmpose/datasets/transforms/common_transforms.py +++ b/mmpose/datasets/transforms/common_transforms.py @@ -1,1056 +1,1056 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import warnings -from copy import deepcopy -from typing import Dict, List, Optional, Sequence, Tuple, Union - -import mmcv -import mmengine -import numpy as np -from mmcv.image import imflip -from mmcv.transforms import BaseTransform -from mmcv.transforms.utils import avoid_cache_randomness, cache_randomness -from mmengine import is_list_of -from mmengine.dist import get_dist_info -from scipy.stats import truncnorm - -from mmpose.codecs import * # noqa: F401, F403 -from mmpose.registry import KEYPOINT_CODECS, TRANSFORMS -from mmpose.structures.bbox import bbox_xyxy2cs, flip_bbox -from mmpose.structures.keypoint import flip_keypoints -from mmpose.utils.typing import MultiConfig - -try: - import albumentations -except ImportError: - albumentations = None - -Number = Union[int, float] - - -@TRANSFORMS.register_module() -class GetBBoxCenterScale(BaseTransform): - """Convert bboxes from [x, y, w, h] to center and scale. - - The center is the coordinates of the bbox center, and the scale is the - bbox width and height normalized by a scale factor. - - Required Keys: - - - bbox - - Added Keys: - - - bbox_center - - bbox_scale - - Args: - padding (float): The bbox padding scale that will be multilied to - `bbox_scale`. Defaults to 1.25 - """ - - def __init__(self, padding: float = 1.25) -> None: - super().__init__() - - self.padding = padding - - def transform(self, results: Dict) -> Optional[dict]: - """The transform function of :class:`GetBBoxCenterScale`. - - See ``transform()`` method of :class:`BaseTransform` for details. - - Args: - results (dict): The result dict - - Returns: - dict: The result dict. - """ - if 'bbox_center' in results and 'bbox_scale' in results: - rank, _ = get_dist_info() - if rank == 0: - warnings.warn('Use the existing "bbox_center" and "bbox_scale"' - '. The padding will still be applied.') - results['bbox_scale'] *= self.padding - - else: - bbox = results['bbox'] - center, scale = bbox_xyxy2cs(bbox, padding=self.padding) - - results['bbox_center'] = center - results['bbox_scale'] = scale - - return results - - def __repr__(self) -> str: - """print the basic information of the transform. - - Returns: - str: Formatted string. - """ - repr_str = self.__class__.__name__ + f'(padding={self.padding})' - return repr_str - - -@TRANSFORMS.register_module() -class RandomFlip(BaseTransform): - """Randomly flip the image, bbox and keypoints. - - Required Keys: - - - img - - img_shape - - flip_indices - - input_size (optional) - - bbox (optional) - - bbox_center (optional) - - keypoints (optional) - - keypoints_visible (optional) - - img_mask (optional) - - Modified Keys: - - - img - - bbox (optional) - - bbox_center (optional) - - keypoints (optional) - - keypoints_visible (optional) - - img_mask (optional) - - Added Keys: - - - flip - - flip_direction - - Args: - prob (float | list[float]): The flipping probability. If a list is - given, the argument `direction` should be a list with the same - length. And each element in `prob` indicates the flipping - probability of the corresponding one in ``direction``. Defaults - to 0.5 - direction (str | list[str]): The flipping direction. Options are - ``'horizontal'``, ``'vertical'`` and ``'diagonal'``. If a list is - is given, each data sample's flipping direction will be sampled - from a distribution determined by the argument ``prob``. Defaults - to ``'horizontal'``. - """ - - def __init__(self, - prob: Union[float, List[float]] = 0.5, - direction: Union[str, List[str]] = 'horizontal') -> None: - if isinstance(prob, list): - assert is_list_of(prob, float) - assert 0 <= sum(prob) <= 1 - elif isinstance(prob, float): - assert 0 <= prob <= 1 - else: - raise ValueError(f'probs must be float or list of float, but \ - got `{type(prob)}`.') - self.prob = prob - - valid_directions = ['horizontal', 'vertical', 'diagonal'] - if isinstance(direction, str): - assert direction in valid_directions - elif isinstance(direction, list): - assert is_list_of(direction, str) - assert set(direction).issubset(set(valid_directions)) - else: - raise ValueError(f'direction must be either str or list of str, \ - but got `{type(direction)}`.') - self.direction = direction - - if isinstance(prob, list): - assert len(prob) == len(self.direction) - - @cache_randomness - def _choose_direction(self) -> str: - """Choose the flip direction according to `prob` and `direction`""" - if isinstance(self.direction, - List) and not isinstance(self.direction, str): - # None means non-flip - direction_list: list = list(self.direction) + [None] - elif isinstance(self.direction, str): - # None means non-flip - direction_list = [self.direction, None] - - if isinstance(self.prob, list): - non_prob: float = 1 - sum(self.prob) - prob_list = self.prob + [non_prob] - elif isinstance(self.prob, float): - non_prob = 1. - self.prob - # exclude non-flip - single_ratio = self.prob / (len(direction_list) - 1) - prob_list = [single_ratio] * (len(direction_list) - 1) + [non_prob] - - cur_dir = np.random.choice(direction_list, p=prob_list) - - return cur_dir - - def transform(self, results: dict) -> dict: - """The transform function of :class:`RandomFlip`. - - See ``transform()`` method of :class:`BaseTransform` for details. - - Args: - results (dict): The result dict - - Returns: - dict: The result dict. - """ - - flip_dir = self._choose_direction() - - if flip_dir is None: - results['flip'] = False - results['flip_direction'] = None - else: - results['flip'] = True - results['flip_direction'] = flip_dir - - h, w = results.get('input_size', results['img_shape']) - # flip image and mask - if isinstance(results['img'], list): - results['img'] = [ - imflip(img, direction=flip_dir) for img in results['img'] - ] - else: - results['img'] = imflip(results['img'], direction=flip_dir) - - if 'img_mask' in results: - results['img_mask'] = imflip( - results['img_mask'], direction=flip_dir) - - # flip bboxes - if results.get('bbox', None) is not None: - results['bbox'] = flip_bbox( - results['bbox'], - image_size=(w, h), - bbox_format='xyxy', - direction=flip_dir) - - if results.get('bbox_center', None) is not None: - results['bbox_center'] = flip_bbox( - results['bbox_center'], - image_size=(w, h), - bbox_format='center', - direction=flip_dir) - - # flip keypoints - if results.get('keypoints', None) is not None: - keypoints, keypoints_visible = flip_keypoints( - results['keypoints'], - results.get('keypoints_visible', None), - image_size=(w, h), - flip_indices=results['flip_indices'], - direction=flip_dir) - - results['keypoints'] = keypoints - results['keypoints_visible'] = keypoints_visible - - return results - - def __repr__(self) -> str: - """print the basic information of the transform. - - Returns: - str: Formatted string. - """ - repr_str = self.__class__.__name__ - repr_str += f'(prob={self.prob}, ' - repr_str += f'direction={self.direction})' - return repr_str - - -@TRANSFORMS.register_module() -class RandomHalfBody(BaseTransform): - """Data augmentation with half-body transform that keeps only the upper or - lower body at random. - - Required Keys: - - - keypoints - - keypoints_visible - - upper_body_ids - - lower_body_ids - - Modified Keys: - - - bbox - - bbox_center - - bbox_scale - - Args: - min_total_keypoints (int): The minimum required number of total valid - keypoints of a person to apply half-body transform. Defaults to 8 - min_half_keypoints (int): The minimum required number of valid - half-body keypoints of a person to apply half-body transform. - Defaults to 2 - padding (float): The bbox padding scale that will be multilied to - `bbox_scale`. Defaults to 1.5 - prob (float): The probability to apply half-body transform when the - keypoint number meets the requirement. Defaults to 0.3 - """ - - def __init__(self, - min_total_keypoints: int = 9, - min_upper_keypoints: int = 2, - min_lower_keypoints: int = 3, - padding: float = 1.5, - prob: float = 0.3, - upper_prioritized_prob: float = 0.7) -> None: - super().__init__() - self.min_total_keypoints = min_total_keypoints - self.min_upper_keypoints = min_upper_keypoints - self.min_lower_keypoints = min_lower_keypoints - self.padding = padding - self.prob = prob - self.upper_prioritized_prob = upper_prioritized_prob - - def _get_half_body_bbox(self, keypoints: np.ndarray, - half_body_ids: List[int] - ) -> Tuple[np.ndarray, np.ndarray]: - """Get half-body bbox center and scale of a single instance. - - Args: - keypoints (np.ndarray): Keypoints in shape (K, D) - upper_body_ids (list): The list of half-body keypont indices - - Returns: - tuple: A tuple containing half-body bbox center and scale - - center: Center (x, y) of the bbox - - scale: Scale (w, h) of the bbox - """ - - selected_keypoints = keypoints[half_body_ids] - center = selected_keypoints.mean(axis=0)[:2] - - x1, y1 = selected_keypoints.min(axis=0) - x2, y2 = selected_keypoints.max(axis=0) - w = x2 - x1 - h = y2 - y1 - scale = np.array([w, h], dtype=center.dtype) * self.padding - - return center, scale - - @cache_randomness - def _random_select_half_body(self, keypoints_visible: np.ndarray, - upper_body_ids: List[int], - lower_body_ids: List[int] - ) -> List[Optional[List[int]]]: - """Randomly determine whether applying half-body transform and get the - half-body keyponit indices of each instances. - - Args: - keypoints_visible (np.ndarray, optional): The visibility of - keypoints in shape (N, K, 1). - upper_body_ids (list): The list of upper body keypoint indices - lower_body_ids (list): The list of lower body keypoint indices - - Returns: - list[list[int] | None]: The selected half-body keypoint indices - of each instance. ``None`` means not applying half-body transform. - """ - - half_body_ids = [] - - for visible in keypoints_visible: - if visible.sum() < self.min_total_keypoints: - indices = None - elif np.random.rand() > self.prob: - indices = None - else: - upper_valid_ids = [i for i in upper_body_ids if visible[i] > 0] - lower_valid_ids = [i for i in lower_body_ids if visible[i] > 0] - - num_upper = len(upper_valid_ids) - num_lower = len(lower_valid_ids) - - prefer_upper = np.random.rand() < self.upper_prioritized_prob - if (num_upper < self.min_upper_keypoints - and num_lower < self.min_lower_keypoints): - indices = None - elif num_lower < self.min_lower_keypoints: - indices = upper_valid_ids - elif num_upper < self.min_upper_keypoints: - indices = lower_valid_ids - else: - indices = ( - upper_valid_ids if prefer_upper else lower_valid_ids) - - half_body_ids.append(indices) - - return half_body_ids - - def transform(self, results: Dict) -> Optional[dict]: - """The transform function of :class:`HalfBodyTransform`. - - See ``transform()`` method of :class:`BaseTransform` for details. - - Args: - results (dict): The result dict - - Returns: - dict: The result dict. - """ - - half_body_ids = self._random_select_half_body( - keypoints_visible=results['keypoints_visible'], - upper_body_ids=results['upper_body_ids'], - lower_body_ids=results['lower_body_ids']) - - bbox_center = [] - bbox_scale = [] - - for i, indices in enumerate(half_body_ids): - if indices is None: - bbox_center.append(results['bbox_center'][i]) - bbox_scale.append(results['bbox_scale'][i]) - else: - _center, _scale = self._get_half_body_bbox( - results['keypoints'][i], indices) - bbox_center.append(_center) - bbox_scale.append(_scale) - - results['bbox_center'] = np.stack(bbox_center) - results['bbox_scale'] = np.stack(bbox_scale) - return results - - def __repr__(self) -> str: - """print the basic information of the transform. - - Returns: - str: Formatted string. - """ - repr_str = self.__class__.__name__ - repr_str += f'(min_total_keypoints={self.min_total_keypoints}, ' - repr_str += f'min_upper_keypoints={self.min_upper_keypoints}, ' - repr_str += f'min_lower_keypoints={self.min_lower_keypoints}, ' - repr_str += f'padding={self.padding}, ' - repr_str += f'prob={self.prob}, ' - repr_str += f'upper_prioritized_prob={self.upper_prioritized_prob})' - return repr_str - - -@TRANSFORMS.register_module() -class RandomBBoxTransform(BaseTransform): - r"""Rnadomly shift, resize and rotate the bounding boxes. - - Required Keys: - - - bbox_center - - bbox_scale - - Modified Keys: - - - bbox_center - - bbox_scale - - Added Keys: - - bbox_rotation - - Args: - shift_factor (float): Randomly shift the bbox in range - :math:`[-dx, dx]` and :math:`[-dy, dy]` in X and Y directions, - where :math:`dx(y) = x(y)_scale \cdot shift_factor` in pixels. - Defaults to 0.16 - shift_prob (float): Probability of applying random shift. Defaults to - 0.3 - scale_factor (Tuple[float, float]): Randomly resize the bbox in range - :math:`[scale_factor[0], scale_factor[1]]`. Defaults to (0.5, 1.5) - scale_prob (float): Probability of applying random resizing. Defaults - to 1.0 - rotate_factor (float): Randomly rotate the bbox in - :math:`[-rotate_factor, rotate_factor]` in degrees. Defaults - to 80.0 - rotate_prob (float): Probability of applying random rotation. Defaults - to 0.6 - """ - - def __init__(self, - shift_factor: float = 0.16, - shift_prob: float = 0.3, - scale_factor: Tuple[float, float] = (0.5, 1.5), - scale_prob: float = 1.0, - rotate_factor: float = 80.0, - rotate_prob: float = 0.6) -> None: - super().__init__() - - self.shift_factor = shift_factor - self.shift_prob = shift_prob - self.scale_factor = scale_factor - self.scale_prob = scale_prob - self.rotate_factor = rotate_factor - self.rotate_prob = rotate_prob - - @staticmethod - def _truncnorm(low: float = -1., - high: float = 1., - size: tuple = ()) -> np.ndarray: - """Sample from a truncated normal distribution.""" - return truncnorm.rvs(low, high, size=size).astype(np.float32) - - @cache_randomness - def _get_transform_params(self, num_bboxes: int) -> Tuple: - """Get random transform parameters. - - Args: - num_bboxes (int): The number of bboxes - - Returns: - tuple: - - offset (np.ndarray): Offset factor of each bbox in shape (n, 2) - - scale (np.ndarray): Scaling factor of each bbox in shape (n, 1) - - rotate (np.ndarray): Rotation degree of each bbox in shape (n,) - """ - # Get shift parameters - offset = self._truncnorm(size=(num_bboxes, 2)) * self.shift_factor - offset = np.where( - np.random.rand(num_bboxes, 1) < self.shift_prob, offset, 0.) - - # Get scaling parameters - scale_min, scale_max = self.scale_factor - mu = (scale_max + scale_min) * 0.5 - sigma = (scale_max - scale_min) * 0.5 - scale = self._truncnorm(size=(num_bboxes, 1)) * sigma + mu - scale = np.where( - np.random.rand(num_bboxes, 1) < self.scale_prob, scale, 1.) - - # Get rotation parameters - rotate = self._truncnorm(size=(num_bboxes, )) * self.rotate_factor - rotate = np.where( - np.random.rand(num_bboxes) < self.rotate_prob, rotate, 0.) - - return offset, scale, rotate - - def transform(self, results: Dict) -> Optional[dict]: - """The transform function of :class:`RandomBboxTransform`. - - See ``transform()`` method of :class:`BaseTransform` for details. - - Args: - results (dict): The result dict - - Returns: - dict: The result dict. - """ - bbox_scale = results['bbox_scale'] - num_bboxes = bbox_scale.shape[0] - - offset, scale, rotate = self._get_transform_params(num_bboxes) - - results['bbox_center'] += offset * bbox_scale - results['bbox_scale'] *= scale - results['bbox_rotation'] = rotate - - return results - - def __repr__(self) -> str: - """print the basic information of the transform. - - Returns: - str: Formatted string. - """ - repr_str = self.__class__.__name__ - repr_str += f'(shift_prob={self.shift_prob}, ' - repr_str += f'shift_factor={self.shift_factor}, ' - repr_str += f'scale_prob={self.scale_prob}, ' - repr_str += f'scale_factor={self.scale_factor}, ' - repr_str += f'rotate_prob={self.rotate_prob}, ' - repr_str += f'rotate_factor={self.rotate_factor})' - return repr_str - - -@TRANSFORMS.register_module() -@avoid_cache_randomness -class Albumentation(BaseTransform): - """Albumentation augmentation (pixel-level transforms only). - - Adds custom pixel-level transformations from Albumentations library. - Please visit `https://albumentations.ai/docs/` - to get more information. - - Note: we only support pixel-level transforms. - Please visit `https://github.com/albumentations-team/` - `albumentations#pixel-level-transforms` - to get more information about pixel-level transforms. - - Required Keys: - - - img - - Modified Keys: - - - img - - Args: - transforms (List[dict]): A list of Albumentation transforms. - An example of ``transforms`` is as followed: - .. code-block:: python - - [ - dict( - type='RandomBrightnessContrast', - brightness_limit=[0.1, 0.3], - contrast_limit=[0.1, 0.3], - p=0.2), - dict(type='ChannelShuffle', p=0.1), - dict( - type='OneOf', - transforms=[ - dict(type='Blur', blur_limit=3, p=1.0), - dict(type='MedianBlur', blur_limit=3, p=1.0) - ], - p=0.1), - ] - keymap (dict | None): key mapping from ``input key`` to - ``albumentation-style key``. - Defaults to None, which will use {'img': 'image'}. - """ - - def __init__(self, - transforms: List[dict], - keymap: Optional[dict] = None) -> None: - if albumentations is None: - raise RuntimeError('albumentations is not installed') - - self.transforms = transforms - - self.aug = albumentations.Compose( - [self.albu_builder(t) for t in self.transforms]) - - if not keymap: - self.keymap_to_albu = { - 'img': 'image', - } - else: - self.keymap_to_albu = keymap - - def albu_builder(self, cfg: dict) -> albumentations: - """Import a module from albumentations. - - It resembles some of :func:`build_from_cfg` logic. - - Args: - cfg (dict): Config dict. It should at least contain the key "type". - - Returns: - albumentations.BasicTransform: The constructed transform object - """ - - assert isinstance(cfg, dict) and 'type' in cfg - args = cfg.copy() - - obj_type = args.pop('type') - if mmengine.is_str(obj_type): - if albumentations is None: - raise RuntimeError('albumentations is not installed') - rank, _ = get_dist_info() - if rank == 0 and not hasattr( - albumentations.augmentations.transforms, obj_type): - warnings.warn( - f'{obj_type} is not pixel-level transformations. ' - 'Please use with caution.') - obj_cls = getattr(albumentations, obj_type) - elif isinstance(obj_type, type): - obj_cls = obj_type - else: - raise TypeError(f'type must be a str, but got {type(obj_type)}') - - if 'transforms' in args: - args['transforms'] = [ - self.albu_builder(transform) - for transform in args['transforms'] - ] - - return obj_cls(**args) - - def transform(self, results: dict) -> dict: - """The transform function of :class:`Albumentation` to apply - albumentations transforms. - - See ``transform()`` method of :class:`BaseTransform` for details. - - Args: - results (dict): Result dict from the data pipeline. - - Return: - dict: updated result dict. - """ - # map result dict to albumentations format - results_albu = {} - for k, v in self.keymap_to_albu.items(): - assert k in results, \ - f'The `{k}` is required to perform albumentations transforms' - results_albu[v] = results[k] - - # Apply albumentations transforms - results_albu = self.aug(**results_albu) - - # map the albu results back to the original format - for k, v in self.keymap_to_albu.items(): - results[k] = results_albu[v] - - return results - - def __repr__(self) -> str: - """print the basic information of the transform. - - Returns: - str: Formatted string. - """ - repr_str = self.__class__.__name__ + f'(transforms={self.transforms})' - return repr_str - - -@TRANSFORMS.register_module() -class PhotometricDistortion(BaseTransform): - """Apply photometric distortion to image sequentially, every transformation - is applied with a probability of 0.5. The position of random contrast is in - second or second to last. - - 1. random brightness - 2. random contrast (mode 0) - 3. convert color from BGR to HSV - 4. random saturation - 5. random hue - 6. convert color from HSV to BGR - 7. random contrast (mode 1) - 8. randomly swap channels - - Required Keys: - - - img - - Modified Keys: - - - img - - Args: - brightness_delta (int): delta of brightness. - contrast_range (tuple): range of contrast. - saturation_range (tuple): range of saturation. - hue_delta (int): delta of hue. - """ - - def __init__(self, - brightness_delta: int = 32, - contrast_range: Sequence[Number] = (0.5, 1.5), - saturation_range: Sequence[Number] = (0.5, 1.5), - hue_delta: int = 18) -> None: - self.brightness_delta = brightness_delta - self.contrast_lower, self.contrast_upper = contrast_range - self.saturation_lower, self.saturation_upper = saturation_range - self.hue_delta = hue_delta - - @cache_randomness - def _random_flags(self) -> Sequence[Number]: - """Generate the random flags for subsequent transforms. - - Returns: - Sequence[Number]: a sequence of numbers that indicate whether to - do the corresponding transforms. - """ - # contrast_mode == 0 --> do random contrast first - # contrast_mode == 1 --> do random contrast last - contrast_mode = np.random.randint(2) - # whether to apply brightness distortion - brightness_flag = np.random.randint(2) - # whether to apply contrast distortion - contrast_flag = np.random.randint(2) - # the mode to convert color from BGR to HSV - hsv_mode = np.random.randint(4) - # whether to apply channel swap - swap_flag = np.random.randint(2) - - # the beta in `self._convert` to be added to image array - # in brightness distortion - brightness_beta = np.random.uniform(-self.brightness_delta, - self.brightness_delta) - # the alpha in `self._convert` to be multiplied to image array - # in contrast distortion - contrast_alpha = np.random.uniform(self.contrast_lower, - self.contrast_upper) - # the alpha in `self._convert` to be multiplied to image array - # in saturation distortion to hsv-formatted img - saturation_alpha = np.random.uniform(self.saturation_lower, - self.saturation_upper) - # delta of hue to add to image array in hue distortion - hue_delta = np.random.randint(-self.hue_delta, self.hue_delta) - # the random permutation of channel order - swap_channel_order = np.random.permutation(3) - - return (contrast_mode, brightness_flag, contrast_flag, hsv_mode, - swap_flag, brightness_beta, contrast_alpha, saturation_alpha, - hue_delta, swap_channel_order) - - def _convert(self, - img: np.ndarray, - alpha: float = 1, - beta: float = 0) -> np.ndarray: - """Multiple with alpha and add beta with clip. - - Args: - img (np.ndarray): The image array. - alpha (float): The random multiplier. - beta (float): The random offset. - - Returns: - np.ndarray: The updated image array. - """ - img = img.astype(np.float32) * alpha + beta - img = np.clip(img, 0, 255) - return img.astype(np.uint8) - - def transform(self, results: dict) -> dict: - """The transform function of :class:`PhotometricDistortion` to perform - photometric distortion on images. - - See ``transform()`` method of :class:`BaseTransform` for details. - - - Args: - results (dict): Result dict from the data pipeline. - - Returns: - dict: Result dict with images distorted. - """ - - assert 'img' in results, '`img` is not found in results' - img = results['img'] - - (contrast_mode, brightness_flag, contrast_flag, hsv_mode, swap_flag, - brightness_beta, contrast_alpha, saturation_alpha, hue_delta, - swap_channel_order) = self._random_flags() - - # random brightness distortion - if brightness_flag: - img = self._convert(img, beta=brightness_beta) - - # contrast_mode == 0 --> do random contrast first - # contrast_mode == 1 --> do random contrast last - if contrast_mode == 1: - if contrast_flag: - img = self._convert(img, alpha=contrast_alpha) - - if hsv_mode: - # random saturation/hue distortion - img = mmcv.bgr2hsv(img) - if hsv_mode == 1 or hsv_mode == 3: - # apply saturation distortion to hsv-formatted img - img[:, :, 1] = self._convert( - img[:, :, 1], alpha=saturation_alpha) - if hsv_mode == 2 or hsv_mode == 3: - # apply hue distortion to hsv-formatted img - img[:, :, 0] = img[:, :, 0].astype(int) + hue_delta - img = mmcv.hsv2bgr(img) - - if contrast_mode == 1: - if contrast_flag: - img = self._convert(img, alpha=contrast_alpha) - - # randomly swap channels - if swap_flag: - img = img[..., swap_channel_order] - - results['img'] = img - return results - - def __repr__(self) -> str: - """print the basic information of the transform. - - Returns: - str: Formatted string. - """ - repr_str = self.__class__.__name__ - repr_str += (f'(brightness_delta={self.brightness_delta}, ' - f'contrast_range=({self.contrast_lower}, ' - f'{self.contrast_upper}), ' - f'saturation_range=({self.saturation_lower}, ' - f'{self.saturation_upper}), ' - f'hue_delta={self.hue_delta})') - return repr_str - - -@TRANSFORMS.register_module() -class GenerateTarget(BaseTransform): - """Encode keypoints into Target. - - The generated target is usually the supervision signal of the model - learning, e.g. heatmaps or regression labels. - - Required Keys: - - - keypoints - - keypoints_visible - - dataset_keypoint_weights - - Added Keys: - - - The keys of the encoded items from the codec will be updated into - the results, e.g. ``'heatmaps'`` or ``'keypoint_weights'``. See - the specific codec for more details. - - Args: - encoder (dict | list[dict]): The codec config for keypoint encoding. - Both single encoder and multiple encoders (given as a list) are - supported - multilevel (bool): Determine the method to handle multiple encoders. - If ``multilevel==True``, generate multilevel targets from a group - of encoders of the same type (e.g. multiple :class:`MSRAHeatmap` - encoders with different sigma values); If ``multilevel==False``, - generate combined targets from a group of different encoders. This - argument will have no effect in case of single encoder. Defaults - to ``False`` - use_dataset_keypoint_weights (bool): Whether use the keypoint weights - from the dataset meta information. Defaults to ``False`` - target_type (str, deprecated): This argument is deprecated and has no - effect. Defaults to ``None`` - """ - - def __init__(self, - encoder: MultiConfig, - target_type: Optional[str] = None, - multilevel: bool = False, - use_dataset_keypoint_weights: bool = False) -> None: - super().__init__() - - if target_type is not None: - rank, _ = get_dist_info() - if rank == 0: - warnings.warn( - 'The argument `target_type` is deprecated in' - ' GenerateTarget. The target type and encoded ' - 'keys will be determined by encoder(s).', - DeprecationWarning) - - self.encoder_cfg = deepcopy(encoder) - self.multilevel = multilevel - self.use_dataset_keypoint_weights = use_dataset_keypoint_weights - - if isinstance(self.encoder_cfg, list): - self.encoder = [ - KEYPOINT_CODECS.build(cfg) for cfg in self.encoder_cfg - ] - else: - assert not self.multilevel, ( - 'Need multiple encoder configs if ``multilevel==True``') - self.encoder = KEYPOINT_CODECS.build(self.encoder_cfg) - - def transform(self, results: Dict) -> Optional[dict]: - """The transform function of :class:`GenerateTarget`. - - See ``transform()`` method of :class:`BaseTransform` for details. - """ - - if results.get('transformed_keypoints', None) is not None: - # use keypoints transformed by TopdownAffine - keypoints = results['transformed_keypoints'] - elif results.get('keypoints', None) is not None: - # use original keypoints - keypoints = results['keypoints'] - else: - raise ValueError( - 'GenerateTarget requires \'transformed_keypoints\' or' - ' \'keypoints\' in the results.') - - keypoints_visible = results['keypoints_visible'] - - # Encoded items from the encoder(s) will be updated into the results. - # Please refer to the document of the specific codec for details about - # encoded items. - if not isinstance(self.encoder, list): - # For single encoding, the encoded items will be directly added - # into results. - auxiliary_encode_kwargs = { - key: results[key] - for key in self.encoder.auxiliary_encode_keys - } - encoded = self.encoder.encode( - keypoints=keypoints, - keypoints_visible=keypoints_visible, - **auxiliary_encode_kwargs) - - else: - encoded_list = [] - for _encoder in self.encoder: - auxiliary_encode_kwargs = { - key: results[key] - for key in _encoder.auxiliary_encode_keys - } - encoded_list.append( - _encoder.encode( - keypoints=keypoints, - keypoints_visible=keypoints_visible, - **auxiliary_encode_kwargs)) - - if self.multilevel: - # For multilevel encoding, the encoded items from each encoder - # should have the same keys. - - keys = encoded_list[0].keys() - if not all(_encoded.keys() == keys - for _encoded in encoded_list): - raise ValueError( - 'Encoded items from all encoders must have the same ' - 'keys if ``multilevel==True``.') - - encoded = { - k: [_encoded[k] for _encoded in encoded_list] - for k in keys - } - - else: - # For combined encoding, the encoded items from different - # encoders should have no overlapping items, except for - # `keypoint_weights`. If multiple `keypoint_weights` are given, - # they will be multiplied as the final `keypoint_weights`. - - encoded = dict() - keypoint_weights = [] - - for _encoded in encoded_list: - for key, value in _encoded.items(): - if key == 'keypoint_weights': - keypoint_weights.append(value) - elif key not in encoded: - encoded[key] = value - else: - raise ValueError( - f'Overlapping item "{key}" from multiple ' - 'encoders, which is not supported when ' - '``multilevel==False``') - - if keypoint_weights: - encoded['keypoint_weights'] = keypoint_weights - - if self.use_dataset_keypoint_weights and 'keypoint_weights' in encoded: - if isinstance(encoded['keypoint_weights'], list): - for w in encoded['keypoint_weights']: - w *= results['dataset_keypoint_weights'] - else: - encoded['keypoint_weights'] *= results[ - 'dataset_keypoint_weights'] - - results.update(encoded) - - if results.get('keypoint_weights', None) is not None: - results['transformed_keypoints_visible'] = results[ - 'keypoint_weights'] - elif results.get('keypoints', None) is not None: - results['transformed_keypoints_visible'] = results[ - 'keypoints_visible'] - else: - raise ValueError('GenerateTarget requires \'keypoint_weights\' or' - ' \'keypoints_visible\' in the results.') - - return results - - def __repr__(self) -> str: - """print the basic information of the transform. - - Returns: - str: Formatted string. - """ - repr_str = self.__class__.__name__ - repr_str += (f'(encoder={str(self.encoder_cfg)}, ') - repr_str += ('use_dataset_keypoint_weights=' - f'{self.use_dataset_keypoint_weights})') - return repr_str +# Copyright (c) OpenMMLab. All rights reserved. +import warnings +from copy import deepcopy +from typing import Dict, List, Optional, Sequence, Tuple, Union + +import mmcv +import mmengine +import numpy as np +from mmcv.image import imflip +from mmcv.transforms import BaseTransform +from mmcv.transforms.utils import avoid_cache_randomness, cache_randomness +from mmengine import is_list_of +from mmengine.dist import get_dist_info +from scipy.stats import truncnorm + +from mmpose.codecs import * # noqa: F401, F403 +from mmpose.registry import KEYPOINT_CODECS, TRANSFORMS +from mmpose.structures.bbox import bbox_xyxy2cs, flip_bbox +from mmpose.structures.keypoint import flip_keypoints +from mmpose.utils.typing import MultiConfig + +try: + import albumentations +except ImportError: + albumentations = None + +Number = Union[int, float] + + +@TRANSFORMS.register_module() +class GetBBoxCenterScale(BaseTransform): + """Convert bboxes from [x, y, w, h] to center and scale. + + The center is the coordinates of the bbox center, and the scale is the + bbox width and height normalized by a scale factor. + + Required Keys: + + - bbox + + Added Keys: + + - bbox_center + - bbox_scale + + Args: + padding (float): The bbox padding scale that will be multilied to + `bbox_scale`. Defaults to 1.25 + """ + + def __init__(self, padding: float = 1.25) -> None: + super().__init__() + + self.padding = padding + + def transform(self, results: Dict) -> Optional[dict]: + """The transform function of :class:`GetBBoxCenterScale`. + + See ``transform()`` method of :class:`BaseTransform` for details. + + Args: + results (dict): The result dict + + Returns: + dict: The result dict. + """ + if 'bbox_center' in results and 'bbox_scale' in results: + rank, _ = get_dist_info() + if rank == 0: + warnings.warn('Use the existing "bbox_center" and "bbox_scale"' + '. The padding will still be applied.') + results['bbox_scale'] *= self.padding + + else: + bbox = results['bbox'] + center, scale = bbox_xyxy2cs(bbox, padding=self.padding) + + results['bbox_center'] = center + results['bbox_scale'] = scale + + return results + + def __repr__(self) -> str: + """print the basic information of the transform. + + Returns: + str: Formatted string. + """ + repr_str = self.__class__.__name__ + f'(padding={self.padding})' + return repr_str + + +@TRANSFORMS.register_module() +class RandomFlip(BaseTransform): + """Randomly flip the image, bbox and keypoints. + + Required Keys: + + - img + - img_shape + - flip_indices + - input_size (optional) + - bbox (optional) + - bbox_center (optional) + - keypoints (optional) + - keypoints_visible (optional) + - img_mask (optional) + + Modified Keys: + + - img + - bbox (optional) + - bbox_center (optional) + - keypoints (optional) + - keypoints_visible (optional) + - img_mask (optional) + + Added Keys: + + - flip + - flip_direction + + Args: + prob (float | list[float]): The flipping probability. If a list is + given, the argument `direction` should be a list with the same + length. And each element in `prob` indicates the flipping + probability of the corresponding one in ``direction``. Defaults + to 0.5 + direction (str | list[str]): The flipping direction. Options are + ``'horizontal'``, ``'vertical'`` and ``'diagonal'``. If a list is + is given, each data sample's flipping direction will be sampled + from a distribution determined by the argument ``prob``. Defaults + to ``'horizontal'``. + """ + + def __init__(self, + prob: Union[float, List[float]] = 0.5, + direction: Union[str, List[str]] = 'horizontal') -> None: + if isinstance(prob, list): + assert is_list_of(prob, float) + assert 0 <= sum(prob) <= 1 + elif isinstance(prob, float): + assert 0 <= prob <= 1 + else: + raise ValueError(f'probs must be float or list of float, but \ + got `{type(prob)}`.') + self.prob = prob + + valid_directions = ['horizontal', 'vertical', 'diagonal'] + if isinstance(direction, str): + assert direction in valid_directions + elif isinstance(direction, list): + assert is_list_of(direction, str) + assert set(direction).issubset(set(valid_directions)) + else: + raise ValueError(f'direction must be either str or list of str, \ + but got `{type(direction)}`.') + self.direction = direction + + if isinstance(prob, list): + assert len(prob) == len(self.direction) + + @cache_randomness + def _choose_direction(self) -> str: + """Choose the flip direction according to `prob` and `direction`""" + if isinstance(self.direction, + List) and not isinstance(self.direction, str): + # None means non-flip + direction_list: list = list(self.direction) + [None] + elif isinstance(self.direction, str): + # None means non-flip + direction_list = [self.direction, None] + + if isinstance(self.prob, list): + non_prob: float = 1 - sum(self.prob) + prob_list = self.prob + [non_prob] + elif isinstance(self.prob, float): + non_prob = 1. - self.prob + # exclude non-flip + single_ratio = self.prob / (len(direction_list) - 1) + prob_list = [single_ratio] * (len(direction_list) - 1) + [non_prob] + + cur_dir = np.random.choice(direction_list, p=prob_list) + + return cur_dir + + def transform(self, results: dict) -> dict: + """The transform function of :class:`RandomFlip`. + + See ``transform()`` method of :class:`BaseTransform` for details. + + Args: + results (dict): The result dict + + Returns: + dict: The result dict. + """ + + flip_dir = self._choose_direction() + + if flip_dir is None: + results['flip'] = False + results['flip_direction'] = None + else: + results['flip'] = True + results['flip_direction'] = flip_dir + + h, w = results.get('input_size', results['img_shape']) + # flip image and mask + if isinstance(results['img'], list): + results['img'] = [ + imflip(img, direction=flip_dir) for img in results['img'] + ] + else: + results['img'] = imflip(results['img'], direction=flip_dir) + + if 'img_mask' in results: + results['img_mask'] = imflip( + results['img_mask'], direction=flip_dir) + + # flip bboxes + if results.get('bbox', None) is not None: + results['bbox'] = flip_bbox( + results['bbox'], + image_size=(w, h), + bbox_format='xyxy', + direction=flip_dir) + + if results.get('bbox_center', None) is not None: + results['bbox_center'] = flip_bbox( + results['bbox_center'], + image_size=(w, h), + bbox_format='center', + direction=flip_dir) + + # flip keypoints + if results.get('keypoints', None) is not None: + keypoints, keypoints_visible = flip_keypoints( + results['keypoints'], + results.get('keypoints_visible', None), + image_size=(w, h), + flip_indices=results['flip_indices'], + direction=flip_dir) + + results['keypoints'] = keypoints + results['keypoints_visible'] = keypoints_visible + + return results + + def __repr__(self) -> str: + """print the basic information of the transform. + + Returns: + str: Formatted string. + """ + repr_str = self.__class__.__name__ + repr_str += f'(prob={self.prob}, ' + repr_str += f'direction={self.direction})' + return repr_str + + +@TRANSFORMS.register_module() +class RandomHalfBody(BaseTransform): + """Data augmentation with half-body transform that keeps only the upper or + lower body at random. + + Required Keys: + + - keypoints + - keypoints_visible + - upper_body_ids + - lower_body_ids + + Modified Keys: + + - bbox + - bbox_center + - bbox_scale + + Args: + min_total_keypoints (int): The minimum required number of total valid + keypoints of a person to apply half-body transform. Defaults to 8 + min_half_keypoints (int): The minimum required number of valid + half-body keypoints of a person to apply half-body transform. + Defaults to 2 + padding (float): The bbox padding scale that will be multilied to + `bbox_scale`. Defaults to 1.5 + prob (float): The probability to apply half-body transform when the + keypoint number meets the requirement. Defaults to 0.3 + """ + + def __init__(self, + min_total_keypoints: int = 9, + min_upper_keypoints: int = 2, + min_lower_keypoints: int = 3, + padding: float = 1.5, + prob: float = 0.3, + upper_prioritized_prob: float = 0.7) -> None: + super().__init__() + self.min_total_keypoints = min_total_keypoints + self.min_upper_keypoints = min_upper_keypoints + self.min_lower_keypoints = min_lower_keypoints + self.padding = padding + self.prob = prob + self.upper_prioritized_prob = upper_prioritized_prob + + def _get_half_body_bbox(self, keypoints: np.ndarray, + half_body_ids: List[int] + ) -> Tuple[np.ndarray, np.ndarray]: + """Get half-body bbox center and scale of a single instance. + + Args: + keypoints (np.ndarray): Keypoints in shape (K, D) + upper_body_ids (list): The list of half-body keypont indices + + Returns: + tuple: A tuple containing half-body bbox center and scale + - center: Center (x, y) of the bbox + - scale: Scale (w, h) of the bbox + """ + + selected_keypoints = keypoints[half_body_ids] + center = selected_keypoints.mean(axis=0)[:2] + + x1, y1 = selected_keypoints.min(axis=0) + x2, y2 = selected_keypoints.max(axis=0) + w = x2 - x1 + h = y2 - y1 + scale = np.array([w, h], dtype=center.dtype) * self.padding + + return center, scale + + @cache_randomness + def _random_select_half_body(self, keypoints_visible: np.ndarray, + upper_body_ids: List[int], + lower_body_ids: List[int] + ) -> List[Optional[List[int]]]: + """Randomly determine whether applying half-body transform and get the + half-body keyponit indices of each instances. + + Args: + keypoints_visible (np.ndarray, optional): The visibility of + keypoints in shape (N, K, 1). + upper_body_ids (list): The list of upper body keypoint indices + lower_body_ids (list): The list of lower body keypoint indices + + Returns: + list[list[int] | None]: The selected half-body keypoint indices + of each instance. ``None`` means not applying half-body transform. + """ + + half_body_ids = [] + + for visible in keypoints_visible: + if visible.sum() < self.min_total_keypoints: + indices = None + elif np.random.rand() > self.prob: + indices = None + else: + upper_valid_ids = [i for i in upper_body_ids if visible[i] > 0] + lower_valid_ids = [i for i in lower_body_ids if visible[i] > 0] + + num_upper = len(upper_valid_ids) + num_lower = len(lower_valid_ids) + + prefer_upper = np.random.rand() < self.upper_prioritized_prob + if (num_upper < self.min_upper_keypoints + and num_lower < self.min_lower_keypoints): + indices = None + elif num_lower < self.min_lower_keypoints: + indices = upper_valid_ids + elif num_upper < self.min_upper_keypoints: + indices = lower_valid_ids + else: + indices = ( + upper_valid_ids if prefer_upper else lower_valid_ids) + + half_body_ids.append(indices) + + return half_body_ids + + def transform(self, results: Dict) -> Optional[dict]: + """The transform function of :class:`HalfBodyTransform`. + + See ``transform()`` method of :class:`BaseTransform` for details. + + Args: + results (dict): The result dict + + Returns: + dict: The result dict. + """ + + half_body_ids = self._random_select_half_body( + keypoints_visible=results['keypoints_visible'], + upper_body_ids=results['upper_body_ids'], + lower_body_ids=results['lower_body_ids']) + + bbox_center = [] + bbox_scale = [] + + for i, indices in enumerate(half_body_ids): + if indices is None: + bbox_center.append(results['bbox_center'][i]) + bbox_scale.append(results['bbox_scale'][i]) + else: + _center, _scale = self._get_half_body_bbox( + results['keypoints'][i], indices) + bbox_center.append(_center) + bbox_scale.append(_scale) + + results['bbox_center'] = np.stack(bbox_center) + results['bbox_scale'] = np.stack(bbox_scale) + return results + + def __repr__(self) -> str: + """print the basic information of the transform. + + Returns: + str: Formatted string. + """ + repr_str = self.__class__.__name__ + repr_str += f'(min_total_keypoints={self.min_total_keypoints}, ' + repr_str += f'min_upper_keypoints={self.min_upper_keypoints}, ' + repr_str += f'min_lower_keypoints={self.min_lower_keypoints}, ' + repr_str += f'padding={self.padding}, ' + repr_str += f'prob={self.prob}, ' + repr_str += f'upper_prioritized_prob={self.upper_prioritized_prob})' + return repr_str + + +@TRANSFORMS.register_module() +class RandomBBoxTransform(BaseTransform): + r"""Rnadomly shift, resize and rotate the bounding boxes. + + Required Keys: + + - bbox_center + - bbox_scale + + Modified Keys: + + - bbox_center + - bbox_scale + + Added Keys: + - bbox_rotation + + Args: + shift_factor (float): Randomly shift the bbox in range + :math:`[-dx, dx]` and :math:`[-dy, dy]` in X and Y directions, + where :math:`dx(y) = x(y)_scale \cdot shift_factor` in pixels. + Defaults to 0.16 + shift_prob (float): Probability of applying random shift. Defaults to + 0.3 + scale_factor (Tuple[float, float]): Randomly resize the bbox in range + :math:`[scale_factor[0], scale_factor[1]]`. Defaults to (0.5, 1.5) + scale_prob (float): Probability of applying random resizing. Defaults + to 1.0 + rotate_factor (float): Randomly rotate the bbox in + :math:`[-rotate_factor, rotate_factor]` in degrees. Defaults + to 80.0 + rotate_prob (float): Probability of applying random rotation. Defaults + to 0.6 + """ + + def __init__(self, + shift_factor: float = 0.16, + shift_prob: float = 0.3, + scale_factor: Tuple[float, float] = (0.5, 1.5), + scale_prob: float = 1.0, + rotate_factor: float = 80.0, + rotate_prob: float = 0.6) -> None: + super().__init__() + + self.shift_factor = shift_factor + self.shift_prob = shift_prob + self.scale_factor = scale_factor + self.scale_prob = scale_prob + self.rotate_factor = rotate_factor + self.rotate_prob = rotate_prob + + @staticmethod + def _truncnorm(low: float = -1., + high: float = 1., + size: tuple = ()) -> np.ndarray: + """Sample from a truncated normal distribution.""" + return truncnorm.rvs(low, high, size=size).astype(np.float32) + + @cache_randomness + def _get_transform_params(self, num_bboxes: int) -> Tuple: + """Get random transform parameters. + + Args: + num_bboxes (int): The number of bboxes + + Returns: + tuple: + - offset (np.ndarray): Offset factor of each bbox in shape (n, 2) + - scale (np.ndarray): Scaling factor of each bbox in shape (n, 1) + - rotate (np.ndarray): Rotation degree of each bbox in shape (n,) + """ + # Get shift parameters + offset = self._truncnorm(size=(num_bboxes, 2)) * self.shift_factor + offset = np.where( + np.random.rand(num_bboxes, 1) < self.shift_prob, offset, 0.) + + # Get scaling parameters + scale_min, scale_max = self.scale_factor + mu = (scale_max + scale_min) * 0.5 + sigma = (scale_max - scale_min) * 0.5 + scale = self._truncnorm(size=(num_bboxes, 1)) * sigma + mu + scale = np.where( + np.random.rand(num_bboxes, 1) < self.scale_prob, scale, 1.) + + # Get rotation parameters + rotate = self._truncnorm(size=(num_bboxes, )) * self.rotate_factor + rotate = np.where( + np.random.rand(num_bboxes) < self.rotate_prob, rotate, 0.) + + return offset, scale, rotate + + def transform(self, results: Dict) -> Optional[dict]: + """The transform function of :class:`RandomBboxTransform`. + + See ``transform()`` method of :class:`BaseTransform` for details. + + Args: + results (dict): The result dict + + Returns: + dict: The result dict. + """ + bbox_scale = results['bbox_scale'] + num_bboxes = bbox_scale.shape[0] + + offset, scale, rotate = self._get_transform_params(num_bboxes) + + results['bbox_center'] += offset * bbox_scale + results['bbox_scale'] *= scale + results['bbox_rotation'] = rotate + + return results + + def __repr__(self) -> str: + """print the basic information of the transform. + + Returns: + str: Formatted string. + """ + repr_str = self.__class__.__name__ + repr_str += f'(shift_prob={self.shift_prob}, ' + repr_str += f'shift_factor={self.shift_factor}, ' + repr_str += f'scale_prob={self.scale_prob}, ' + repr_str += f'scale_factor={self.scale_factor}, ' + repr_str += f'rotate_prob={self.rotate_prob}, ' + repr_str += f'rotate_factor={self.rotate_factor})' + return repr_str + + +@TRANSFORMS.register_module() +@avoid_cache_randomness +class Albumentation(BaseTransform): + """Albumentation augmentation (pixel-level transforms only). + + Adds custom pixel-level transformations from Albumentations library. + Please visit `https://albumentations.ai/docs/` + to get more information. + + Note: we only support pixel-level transforms. + Please visit `https://github.com/albumentations-team/` + `albumentations#pixel-level-transforms` + to get more information about pixel-level transforms. + + Required Keys: + + - img + + Modified Keys: + + - img + + Args: + transforms (List[dict]): A list of Albumentation transforms. + An example of ``transforms`` is as followed: + .. code-block:: python + + [ + dict( + type='RandomBrightnessContrast', + brightness_limit=[0.1, 0.3], + contrast_limit=[0.1, 0.3], + p=0.2), + dict(type='ChannelShuffle', p=0.1), + dict( + type='OneOf', + transforms=[ + dict(type='Blur', blur_limit=3, p=1.0), + dict(type='MedianBlur', blur_limit=3, p=1.0) + ], + p=0.1), + ] + keymap (dict | None): key mapping from ``input key`` to + ``albumentation-style key``. + Defaults to None, which will use {'img': 'image'}. + """ + + def __init__(self, + transforms: List[dict], + keymap: Optional[dict] = None) -> None: + if albumentations is None: + raise RuntimeError('albumentations is not installed') + + self.transforms = transforms + + self.aug = albumentations.Compose( + [self.albu_builder(t) for t in self.transforms]) + + if not keymap: + self.keymap_to_albu = { + 'img': 'image', + } + else: + self.keymap_to_albu = keymap + + def albu_builder(self, cfg: dict) -> albumentations: + """Import a module from albumentations. + + It resembles some of :func:`build_from_cfg` logic. + + Args: + cfg (dict): Config dict. It should at least contain the key "type". + + Returns: + albumentations.BasicTransform: The constructed transform object + """ + + assert isinstance(cfg, dict) and 'type' in cfg + args = cfg.copy() + + obj_type = args.pop('type') + if mmengine.is_str(obj_type): + if albumentations is None: + raise RuntimeError('albumentations is not installed') + rank, _ = get_dist_info() + if rank == 0 and not hasattr( + albumentations.augmentations.transforms, obj_type): + warnings.warn( + f'{obj_type} is not pixel-level transformations. ' + 'Please use with caution.') + obj_cls = getattr(albumentations, obj_type) + elif isinstance(obj_type, type): + obj_cls = obj_type + else: + raise TypeError(f'type must be a str, but got {type(obj_type)}') + + if 'transforms' in args: + args['transforms'] = [ + self.albu_builder(transform) + for transform in args['transforms'] + ] + + return obj_cls(**args) + + def transform(self, results: dict) -> dict: + """The transform function of :class:`Albumentation` to apply + albumentations transforms. + + See ``transform()`` method of :class:`BaseTransform` for details. + + Args: + results (dict): Result dict from the data pipeline. + + Return: + dict: updated result dict. + """ + # map result dict to albumentations format + results_albu = {} + for k, v in self.keymap_to_albu.items(): + assert k in results, \ + f'The `{k}` is required to perform albumentations transforms' + results_albu[v] = results[k] + + # Apply albumentations transforms + results_albu = self.aug(**results_albu) + + # map the albu results back to the original format + for k, v in self.keymap_to_albu.items(): + results[k] = results_albu[v] + + return results + + def __repr__(self) -> str: + """print the basic information of the transform. + + Returns: + str: Formatted string. + """ + repr_str = self.__class__.__name__ + f'(transforms={self.transforms})' + return repr_str + + +@TRANSFORMS.register_module() +class PhotometricDistortion(BaseTransform): + """Apply photometric distortion to image sequentially, every transformation + is applied with a probability of 0.5. The position of random contrast is in + second or second to last. + + 1. random brightness + 2. random contrast (mode 0) + 3. convert color from BGR to HSV + 4. random saturation + 5. random hue + 6. convert color from HSV to BGR + 7. random contrast (mode 1) + 8. randomly swap channels + + Required Keys: + + - img + + Modified Keys: + + - img + + Args: + brightness_delta (int): delta of brightness. + contrast_range (tuple): range of contrast. + saturation_range (tuple): range of saturation. + hue_delta (int): delta of hue. + """ + + def __init__(self, + brightness_delta: int = 32, + contrast_range: Sequence[Number] = (0.5, 1.5), + saturation_range: Sequence[Number] = (0.5, 1.5), + hue_delta: int = 18) -> None: + self.brightness_delta = brightness_delta + self.contrast_lower, self.contrast_upper = contrast_range + self.saturation_lower, self.saturation_upper = saturation_range + self.hue_delta = hue_delta + + @cache_randomness + def _random_flags(self) -> Sequence[Number]: + """Generate the random flags for subsequent transforms. + + Returns: + Sequence[Number]: a sequence of numbers that indicate whether to + do the corresponding transforms. + """ + # contrast_mode == 0 --> do random contrast first + # contrast_mode == 1 --> do random contrast last + contrast_mode = np.random.randint(2) + # whether to apply brightness distortion + brightness_flag = np.random.randint(2) + # whether to apply contrast distortion + contrast_flag = np.random.randint(2) + # the mode to convert color from BGR to HSV + hsv_mode = np.random.randint(4) + # whether to apply channel swap + swap_flag = np.random.randint(2) + + # the beta in `self._convert` to be added to image array + # in brightness distortion + brightness_beta = np.random.uniform(-self.brightness_delta, + self.brightness_delta) + # the alpha in `self._convert` to be multiplied to image array + # in contrast distortion + contrast_alpha = np.random.uniform(self.contrast_lower, + self.contrast_upper) + # the alpha in `self._convert` to be multiplied to image array + # in saturation distortion to hsv-formatted img + saturation_alpha = np.random.uniform(self.saturation_lower, + self.saturation_upper) + # delta of hue to add to image array in hue distortion + hue_delta = np.random.randint(-self.hue_delta, self.hue_delta) + # the random permutation of channel order + swap_channel_order = np.random.permutation(3) + + return (contrast_mode, brightness_flag, contrast_flag, hsv_mode, + swap_flag, brightness_beta, contrast_alpha, saturation_alpha, + hue_delta, swap_channel_order) + + def _convert(self, + img: np.ndarray, + alpha: float = 1, + beta: float = 0) -> np.ndarray: + """Multiple with alpha and add beta with clip. + + Args: + img (np.ndarray): The image array. + alpha (float): The random multiplier. + beta (float): The random offset. + + Returns: + np.ndarray: The updated image array. + """ + img = img.astype(np.float32) * alpha + beta + img = np.clip(img, 0, 255) + return img.astype(np.uint8) + + def transform(self, results: dict) -> dict: + """The transform function of :class:`PhotometricDistortion` to perform + photometric distortion on images. + + See ``transform()`` method of :class:`BaseTransform` for details. + + + Args: + results (dict): Result dict from the data pipeline. + + Returns: + dict: Result dict with images distorted. + """ + + assert 'img' in results, '`img` is not found in results' + img = results['img'] + + (contrast_mode, brightness_flag, contrast_flag, hsv_mode, swap_flag, + brightness_beta, contrast_alpha, saturation_alpha, hue_delta, + swap_channel_order) = self._random_flags() + + # random brightness distortion + if brightness_flag: + img = self._convert(img, beta=brightness_beta) + + # contrast_mode == 0 --> do random contrast first + # contrast_mode == 1 --> do random contrast last + if contrast_mode == 1: + if contrast_flag: + img = self._convert(img, alpha=contrast_alpha) + + if hsv_mode: + # random saturation/hue distortion + img = mmcv.bgr2hsv(img) + if hsv_mode == 1 or hsv_mode == 3: + # apply saturation distortion to hsv-formatted img + img[:, :, 1] = self._convert( + img[:, :, 1], alpha=saturation_alpha) + if hsv_mode == 2 or hsv_mode == 3: + # apply hue distortion to hsv-formatted img + img[:, :, 0] = img[:, :, 0].astype(int) + hue_delta + img = mmcv.hsv2bgr(img) + + if contrast_mode == 1: + if contrast_flag: + img = self._convert(img, alpha=contrast_alpha) + + # randomly swap channels + if swap_flag: + img = img[..., swap_channel_order] + + results['img'] = img + return results + + def __repr__(self) -> str: + """print the basic information of the transform. + + Returns: + str: Formatted string. + """ + repr_str = self.__class__.__name__ + repr_str += (f'(brightness_delta={self.brightness_delta}, ' + f'contrast_range=({self.contrast_lower}, ' + f'{self.contrast_upper}), ' + f'saturation_range=({self.saturation_lower}, ' + f'{self.saturation_upper}), ' + f'hue_delta={self.hue_delta})') + return repr_str + + +@TRANSFORMS.register_module() +class GenerateTarget(BaseTransform): + """Encode keypoints into Target. + + The generated target is usually the supervision signal of the model + learning, e.g. heatmaps or regression labels. + + Required Keys: + + - keypoints + - keypoints_visible + - dataset_keypoint_weights + + Added Keys: + + - The keys of the encoded items from the codec will be updated into + the results, e.g. ``'heatmaps'`` or ``'keypoint_weights'``. See + the specific codec for more details. + + Args: + encoder (dict | list[dict]): The codec config for keypoint encoding. + Both single encoder and multiple encoders (given as a list) are + supported + multilevel (bool): Determine the method to handle multiple encoders. + If ``multilevel==True``, generate multilevel targets from a group + of encoders of the same type (e.g. multiple :class:`MSRAHeatmap` + encoders with different sigma values); If ``multilevel==False``, + generate combined targets from a group of different encoders. This + argument will have no effect in case of single encoder. Defaults + to ``False`` + use_dataset_keypoint_weights (bool): Whether use the keypoint weights + from the dataset meta information. Defaults to ``False`` + target_type (str, deprecated): This argument is deprecated and has no + effect. Defaults to ``None`` + """ + + def __init__(self, + encoder: MultiConfig, + target_type: Optional[str] = None, + multilevel: bool = False, + use_dataset_keypoint_weights: bool = False) -> None: + super().__init__() + + if target_type is not None: + rank, _ = get_dist_info() + if rank == 0: + warnings.warn( + 'The argument `target_type` is deprecated in' + ' GenerateTarget. The target type and encoded ' + 'keys will be determined by encoder(s).', + DeprecationWarning) + + self.encoder_cfg = deepcopy(encoder) + self.multilevel = multilevel + self.use_dataset_keypoint_weights = use_dataset_keypoint_weights + + if isinstance(self.encoder_cfg, list): + self.encoder = [ + KEYPOINT_CODECS.build(cfg) for cfg in self.encoder_cfg + ] + else: + assert not self.multilevel, ( + 'Need multiple encoder configs if ``multilevel==True``') + self.encoder = KEYPOINT_CODECS.build(self.encoder_cfg) + + def transform(self, results: Dict) -> Optional[dict]: + """The transform function of :class:`GenerateTarget`. + + See ``transform()`` method of :class:`BaseTransform` for details. + """ + + if results.get('transformed_keypoints', None) is not None: + # use keypoints transformed by TopdownAffine + keypoints = results['transformed_keypoints'] + elif results.get('keypoints', None) is not None: + # use original keypoints + keypoints = results['keypoints'] + else: + raise ValueError( + 'GenerateTarget requires \'transformed_keypoints\' or' + ' \'keypoints\' in the results.') + + keypoints_visible = results['keypoints_visible'] + + # Encoded items from the encoder(s) will be updated into the results. + # Please refer to the document of the specific codec for details about + # encoded items. + if not isinstance(self.encoder, list): + # For single encoding, the encoded items will be directly added + # into results. + auxiliary_encode_kwargs = { + key: results[key] + for key in self.encoder.auxiliary_encode_keys + } + encoded = self.encoder.encode( + keypoints=keypoints, + keypoints_visible=keypoints_visible, + **auxiliary_encode_kwargs) + + else: + encoded_list = [] + for _encoder in self.encoder: + auxiliary_encode_kwargs = { + key: results[key] + for key in _encoder.auxiliary_encode_keys + } + encoded_list.append( + _encoder.encode( + keypoints=keypoints, + keypoints_visible=keypoints_visible, + **auxiliary_encode_kwargs)) + + if self.multilevel: + # For multilevel encoding, the encoded items from each encoder + # should have the same keys. + + keys = encoded_list[0].keys() + if not all(_encoded.keys() == keys + for _encoded in encoded_list): + raise ValueError( + 'Encoded items from all encoders must have the same ' + 'keys if ``multilevel==True``.') + + encoded = { + k: [_encoded[k] for _encoded in encoded_list] + for k in keys + } + + else: + # For combined encoding, the encoded items from different + # encoders should have no overlapping items, except for + # `keypoint_weights`. If multiple `keypoint_weights` are given, + # they will be multiplied as the final `keypoint_weights`. + + encoded = dict() + keypoint_weights = [] + + for _encoded in encoded_list: + for key, value in _encoded.items(): + if key == 'keypoint_weights': + keypoint_weights.append(value) + elif key not in encoded: + encoded[key] = value + else: + raise ValueError( + f'Overlapping item "{key}" from multiple ' + 'encoders, which is not supported when ' + '``multilevel==False``') + + if keypoint_weights: + encoded['keypoint_weights'] = keypoint_weights + + if self.use_dataset_keypoint_weights and 'keypoint_weights' in encoded: + if isinstance(encoded['keypoint_weights'], list): + for w in encoded['keypoint_weights']: + w *= results['dataset_keypoint_weights'] + else: + encoded['keypoint_weights'] *= results[ + 'dataset_keypoint_weights'] + + results.update(encoded) + + if results.get('keypoint_weights', None) is not None: + results['transformed_keypoints_visible'] = results[ + 'keypoint_weights'] + elif results.get('keypoints', None) is not None: + results['transformed_keypoints_visible'] = results[ + 'keypoints_visible'] + else: + raise ValueError('GenerateTarget requires \'keypoint_weights\' or' + ' \'keypoints_visible\' in the results.') + + return results + + def __repr__(self) -> str: + """print the basic information of the transform. + + Returns: + str: Formatted string. + """ + repr_str = self.__class__.__name__ + repr_str += (f'(encoder={str(self.encoder_cfg)}, ') + repr_str += ('use_dataset_keypoint_weights=' + f'{self.use_dataset_keypoint_weights})') + return repr_str diff --git a/mmpose/datasets/transforms/converting.py b/mmpose/datasets/transforms/converting.py index 38dcea0994..932cc424b3 100644 --- a/mmpose/datasets/transforms/converting.py +++ b/mmpose/datasets/transforms/converting.py @@ -1,125 +1,125 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import List, Tuple, Union - -import numpy as np -from mmcv.transforms import BaseTransform - -from mmpose.registry import TRANSFORMS - - -@TRANSFORMS.register_module() -class KeypointConverter(BaseTransform): - """Change the order of keypoints according to the given mapping. - - Required Keys: - - - keypoints - - keypoints_visible - - Modified Keys: - - - keypoints - - keypoints_visible - - Args: - num_keypoints (int): The number of keypoints in target dataset. - mapping (list): A list containing mapping indexes. Each element has - format (source_index, target_index) - - Example: - >>> import numpy as np - >>> # case 1: 1-to-1 mapping - >>> # (0, 0) means target[0] = source[0] - >>> self = KeypointConverter( - >>> num_keypoints=3, - >>> mapping=[ - >>> (0, 0), (1, 1), (2, 2), (3, 3) - >>> ]) - >>> results = dict( - >>> keypoints=np.arange(34).reshape(2, 3, 2), - >>> keypoints_visible=np.arange(34).reshape(2, 3, 2) % 2) - >>> results = self(results) - >>> assert np.equal(results['keypoints'], - >>> np.arange(34).reshape(2, 3, 2)).all() - >>> assert np.equal(results['keypoints_visible'], - >>> np.arange(34).reshape(2, 3, 2) % 2).all() - >>> - >>> # case 2: 2-to-1 mapping - >>> # ((1, 2), 0) means target[0] = (source[1] + source[2]) / 2 - >>> self = KeypointConverter( - >>> num_keypoints=3, - >>> mapping=[ - >>> ((1, 2), 0), (1, 1), (2, 2) - >>> ]) - >>> results = dict( - >>> keypoints=np.arange(34).reshape(2, 3, 2), - >>> keypoints_visible=np.arange(34).reshape(2, 3, 2) % 2) - >>> results = self(results) - """ - - def __init__(self, num_keypoints: int, - mapping: Union[List[Tuple[int, int]], List[Tuple[Tuple, - int]]]): - self.num_keypoints = num_keypoints - self.mapping = mapping - source_index, target_index = zip(*mapping) - - src1, src2 = [], [] - interpolation = False - for x in source_index: - if isinstance(x, (list, tuple)): - assert len(x) == 2, 'source_index should be a list/tuple of ' \ - 'length 2' - src1.append(x[0]) - src2.append(x[1]) - interpolation = True - else: - src1.append(x) - src2.append(x) - - # When paired source_indexes are input, - # keep a self.source_index2 for interpolation - if interpolation: - self.source_index2 = src2 - - self.source_index = src1 - self.target_index = target_index - self.interpolation = interpolation - - def transform(self, results: dict) -> dict: - num_instances = results['keypoints'].shape[0] - - keypoints = np.zeros((num_instances, self.num_keypoints, 2)) - keypoints_visible = np.zeros((num_instances, self.num_keypoints)) - - # When paired source_indexes are input, - # perform interpolation with self.source_index and self.source_index2 - if self.interpolation: - keypoints[:, self.target_index] = 0.5 * ( - results['keypoints'][:, self.source_index] + - results['keypoints'][:, self.source_index2]) - - keypoints_visible[:, self.target_index] = results[ - 'keypoints_visible'][:, self.source_index] * \ - results['keypoints_visible'][:, self.source_index2] - else: - keypoints[:, - self.target_index] = results['keypoints'][:, self. - source_index] - keypoints_visible[:, self.target_index] = results[ - 'keypoints_visible'][:, self.source_index] - - results['keypoints'] = keypoints - results['keypoints_visible'] = keypoints_visible - return results - - def __repr__(self) -> str: - """print the basic information of the transform. - - Returns: - str: Formatted string. - """ - repr_str = self.__class__.__name__ - repr_str += f'(num_keypoints={self.num_keypoints}, '\ - f'mapping={self.mapping})' - return repr_str +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Tuple, Union + +import numpy as np +from mmcv.transforms import BaseTransform + +from mmpose.registry import TRANSFORMS + + +@TRANSFORMS.register_module() +class KeypointConverter(BaseTransform): + """Change the order of keypoints according to the given mapping. + + Required Keys: + + - keypoints + - keypoints_visible + + Modified Keys: + + - keypoints + - keypoints_visible + + Args: + num_keypoints (int): The number of keypoints in target dataset. + mapping (list): A list containing mapping indexes. Each element has + format (source_index, target_index) + + Example: + >>> import numpy as np + >>> # case 1: 1-to-1 mapping + >>> # (0, 0) means target[0] = source[0] + >>> self = KeypointConverter( + >>> num_keypoints=3, + >>> mapping=[ + >>> (0, 0), (1, 1), (2, 2), (3, 3) + >>> ]) + >>> results = dict( + >>> keypoints=np.arange(34).reshape(2, 3, 2), + >>> keypoints_visible=np.arange(34).reshape(2, 3, 2) % 2) + >>> results = self(results) + >>> assert np.equal(results['keypoints'], + >>> np.arange(34).reshape(2, 3, 2)).all() + >>> assert np.equal(results['keypoints_visible'], + >>> np.arange(34).reshape(2, 3, 2) % 2).all() + >>> + >>> # case 2: 2-to-1 mapping + >>> # ((1, 2), 0) means target[0] = (source[1] + source[2]) / 2 + >>> self = KeypointConverter( + >>> num_keypoints=3, + >>> mapping=[ + >>> ((1, 2), 0), (1, 1), (2, 2) + >>> ]) + >>> results = dict( + >>> keypoints=np.arange(34).reshape(2, 3, 2), + >>> keypoints_visible=np.arange(34).reshape(2, 3, 2) % 2) + >>> results = self(results) + """ + + def __init__(self, num_keypoints: int, + mapping: Union[List[Tuple[int, int]], List[Tuple[Tuple, + int]]]): + self.num_keypoints = num_keypoints + self.mapping = mapping + source_index, target_index = zip(*mapping) + + src1, src2 = [], [] + interpolation = False + for x in source_index: + if isinstance(x, (list, tuple)): + assert len(x) == 2, 'source_index should be a list/tuple of ' \ + 'length 2' + src1.append(x[0]) + src2.append(x[1]) + interpolation = True + else: + src1.append(x) + src2.append(x) + + # When paired source_indexes are input, + # keep a self.source_index2 for interpolation + if interpolation: + self.source_index2 = src2 + + self.source_index = src1 + self.target_index = target_index + self.interpolation = interpolation + + def transform(self, results: dict) -> dict: + num_instances = results['keypoints'].shape[0] + + keypoints = np.zeros((num_instances, self.num_keypoints, 2)) + keypoints_visible = np.zeros((num_instances, self.num_keypoints)) + + # When paired source_indexes are input, + # perform interpolation with self.source_index and self.source_index2 + if self.interpolation: + keypoints[:, self.target_index] = 0.5 * ( + results['keypoints'][:, self.source_index] + + results['keypoints'][:, self.source_index2]) + + keypoints_visible[:, self.target_index] = results[ + 'keypoints_visible'][:, self.source_index] * \ + results['keypoints_visible'][:, self.source_index2] + else: + keypoints[:, + self.target_index] = results['keypoints'][:, self. + source_index] + keypoints_visible[:, self.target_index] = results[ + 'keypoints_visible'][:, self.source_index] + + results['keypoints'] = keypoints + results['keypoints_visible'] = keypoints_visible + return results + + def __repr__(self) -> str: + """print the basic information of the transform. + + Returns: + str: Formatted string. + """ + repr_str = self.__class__.__name__ + repr_str += f'(num_keypoints={self.num_keypoints}, '\ + f'mapping={self.mapping})' + return repr_str diff --git a/mmpose/datasets/transforms/formatting.py b/mmpose/datasets/transforms/formatting.py index 05aeef179f..749e4f8ca0 100644 --- a/mmpose/datasets/transforms/formatting.py +++ b/mmpose/datasets/transforms/formatting.py @@ -1,270 +1,270 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import Sequence, Union - -import numpy as np -import torch -from mmcv.transforms import BaseTransform -from mmengine.structures import InstanceData, PixelData -from mmengine.utils import is_seq_of - -from mmpose.registry import TRANSFORMS -from mmpose.structures import MultilevelPixelData, PoseDataSample - - -def image_to_tensor(img: Union[np.ndarray, - Sequence[np.ndarray]]) -> torch.torch.Tensor: - """Translate image or sequence of images to tensor. Multiple image tensors - will be stacked. - - Args: - value (np.ndarray | Sequence[np.ndarray]): The original image or - image sequence - - Returns: - torch.Tensor: The output tensor. - """ - - if isinstance(img, np.ndarray): - if len(img.shape) < 3: - img = np.expand_dims(img, -1) - - img = np.ascontiguousarray(img) - tensor = torch.from_numpy(img).permute(2, 0, 1).contiguous() - else: - assert is_seq_of(img, np.ndarray) - tensor = torch.stack([image_to_tensor(_img) for _img in img]) - - return tensor - - -def keypoints_to_tensor(keypoints: Union[np.ndarray, Sequence[np.ndarray]] - ) -> torch.torch.Tensor: - """Translate keypoints or sequence of keypoints to tensor. Multiple - keypoints tensors will be stacked. - - Args: - keypoints (np.ndarray | Sequence[np.ndarray]): The keypoints or - keypoints sequence. - - Returns: - torch.Tensor: The output tensor. - """ - if isinstance(keypoints, np.ndarray): - keypoints = np.ascontiguousarray(keypoints) - N = keypoints.shape[0] - keypoints = keypoints.transpose(1, 2, 0).reshape(-1, N) - tensor = torch.from_numpy(keypoints).contiguous() - else: - assert is_seq_of(keypoints, np.ndarray) - tensor = torch.stack( - [keypoints_to_tensor(_keypoints) for _keypoints in keypoints]) - - return tensor - - -@TRANSFORMS.register_module() -class PackPoseInputs(BaseTransform): - """Pack the inputs data for pose estimation. - - The ``img_meta`` item is always populated. The contents of the - ``img_meta`` dictionary depends on ``meta_keys``. By default it includes: - - - ``id``: id of the data sample - - - ``img_id``: id of the image - - - ``'category_id'``: the id of the instance category - - - ``img_path``: path to the image file - - - ``crowd_index`` (optional): measure the crowding level of an image, - defined in CrowdPose dataset - - - ``ori_shape``: original shape of the image as a tuple (h, w, c) - - - ``img_shape``: shape of the image input to the network as a tuple \ - (h, w). Note that images may be zero padded on the \ - bottom/right if the batch tensor is larger than this shape. - - - ``input_size``: the input size to the network - - - ``flip``: a boolean indicating if image flip transform was used - - - ``flip_direction``: the flipping direction - - - ``flip_indices``: the indices of each keypoint's symmetric keypoint - - - ``raw_ann_info`` (optional): raw annotation of the instance(s) - - Args: - meta_keys (Sequence[str], optional): Meta keys which will be stored in - :obj: `PoseDataSample` as meta info. Defaults to ``('id', - 'img_id', 'img_path', 'category_id', 'crowd_index, 'ori_shape', - 'img_shape',, 'input_size', 'input_center', 'input_scale', 'flip', - 'flip_direction', 'flip_indices', 'raw_ann_info')`` - """ - - # items in `instance_mapping_table` will be directly packed into - # PoseDataSample.gt_instances without converting to Tensor - instance_mapping_table = { - 'bbox': 'bboxes', - 'head_size': 'head_size', - 'bbox_center': 'bbox_centers', - 'bbox_scale': 'bbox_scales', - 'bbox_score': 'bbox_scores', - 'keypoints': 'keypoints', - 'keypoints_visible': 'keypoints_visible', - 'lifting_target': 'lifting_target', - 'lifting_target_visible': 'lifting_target_visible', - } - - # items in `label_mapping_table` will be packed into - # PoseDataSample.gt_instance_labels and converted to Tensor. These items - # will be used for computing losses - label_mapping_table = { - 'keypoint_labels': 'keypoint_labels', - 'lifting_target_label': 'lifting_target_label', - 'lifting_target_weights': 'lifting_target_weights', - 'trajectory_weights': 'trajectory_weights', - 'keypoint_x_labels': 'keypoint_x_labels', - 'keypoint_y_labels': 'keypoint_y_labels', - 'keypoint_weights': 'keypoint_weights', - 'instance_coords': 'instance_coords', - 'transformed_keypoints_visible': 'keypoints_visible', - } - - # items in `field_mapping_table` will be packed into - # PoseDataSample.gt_fields and converted to Tensor. These items will be - # used for computing losses - field_mapping_table = { - 'heatmaps': 'heatmaps', - 'instance_heatmaps': 'instance_heatmaps', - 'heatmap_mask': 'heatmap_mask', - 'heatmap_weights': 'heatmap_weights', - 'displacements': 'displacements', - 'displacement_weights': 'displacement_weights', - } - - def __init__(self, - meta_keys=('id', 'img_id', 'img_path', 'category_id', - 'crowd_index', 'ori_shape', 'img_shape', - 'input_size', 'input_center', 'input_scale', - 'flip', 'flip_direction', 'flip_indices', - 'raw_ann_info'), - pack_transformed=False): - self.meta_keys = meta_keys - self.pack_transformed = pack_transformed - - def transform(self, results: dict) -> dict: - """Method to pack the input data. - - Args: - results (dict): Result dict from the data pipeline. - - Returns: - dict: - - - 'inputs' (obj:`torch.Tensor`): The forward data of models. - - 'data_samples' (obj:`PoseDataSample`): The annotation info of the - sample. - """ - # Pack image(s) for 2d pose estimation - if 'img' in results: - img = results['img'] - inputs_tensor = image_to_tensor(img) - # Pack keypoints for 3d pose-lifting - elif 'lifting_target' in results and 'keypoints' in results: - if 'keypoint_labels' in results: - keypoints = results['keypoint_labels'] - else: - keypoints = results['keypoints'] - inputs_tensor = keypoints_to_tensor(keypoints) - - data_sample = PoseDataSample() - - # pack instance data - gt_instances = InstanceData() - for key, packed_key in self.instance_mapping_table.items(): - if key in results: - if 'lifting_target' in results and key in { - 'keypoints', 'keypoints_visible' - }: - continue - gt_instances.set_field(results[key], packed_key) - - # pack `transformed_keypoints` for visualizing data transform - # and augmentation results - if self.pack_transformed and 'transformed_keypoints' in results: - gt_instances.set_field(results['transformed_keypoints'], - 'transformed_keypoints') - if self.pack_transformed and \ - 'transformed_keypoints_visible' in results: - gt_instances.set_field(results['transformed_keypoints_visible'], - 'transformed_keypoints_visible') - - data_sample.gt_instances = gt_instances - - # pack instance labels - gt_instance_labels = InstanceData() - for key, packed_key in self.label_mapping_table.items(): - if key in results: - # For pose-lifting, store only target-related fields - if 'lifting_target_label' in results and key in { - 'keypoint_labels', 'keypoint_weights', - 'transformed_keypoints_visible' - }: - continue - if isinstance(results[key], list): - # A list of labels is usually generated by combined - # multiple encoders (See ``GenerateTarget`` in - # mmpose/datasets/transforms/common_transforms.py) - # In this case, labels in list should have the same - # shape and will be stacked. - _labels = np.stack(results[key]) - gt_instance_labels.set_field(_labels, packed_key) - else: - gt_instance_labels.set_field(results[key], packed_key) - data_sample.gt_instance_labels = gt_instance_labels.to_tensor() - - # pack fields - gt_fields = None - for key, packed_key in self.field_mapping_table.items(): - if key in results: - if isinstance(results[key], list): - if gt_fields is None: - gt_fields = MultilevelPixelData() - else: - assert isinstance( - gt_fields, MultilevelPixelData - ), 'Got mixed single-level and multi-level pixel data.' - else: - if gt_fields is None: - gt_fields = PixelData() - else: - assert isinstance( - gt_fields, PixelData - ), 'Got mixed single-level and multi-level pixel data.' - - gt_fields.set_field(results[key], packed_key) - - if gt_fields: - data_sample.gt_fields = gt_fields.to_tensor() - - img_meta = {k: results[k] for k in self.meta_keys if k in results} - data_sample.set_metainfo(img_meta) - - packed_results = dict() - packed_results['inputs'] = inputs_tensor - packed_results['data_samples'] = data_sample - - return packed_results - - def __repr__(self) -> str: - """print the basic information of the transform. - - Returns: - str: Formatted string. - """ - repr_str = self.__class__.__name__ - repr_str += f'(meta_keys={self.meta_keys})' - return repr_str +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Sequence, Union + +import numpy as np +import torch +from mmcv.transforms import BaseTransform +from mmengine.structures import InstanceData, PixelData +from mmengine.utils import is_seq_of + +from mmpose.registry import TRANSFORMS +from mmpose.structures import MultilevelPixelData, PoseDataSample + + +def image_to_tensor(img: Union[np.ndarray, + Sequence[np.ndarray]]) -> torch.torch.Tensor: + """Translate image or sequence of images to tensor. Multiple image tensors + will be stacked. + + Args: + value (np.ndarray | Sequence[np.ndarray]): The original image or + image sequence + + Returns: + torch.Tensor: The output tensor. + """ + + if isinstance(img, np.ndarray): + if len(img.shape) < 3: + img = np.expand_dims(img, -1) + + img = np.ascontiguousarray(img) + tensor = torch.from_numpy(img).permute(2, 0, 1).contiguous() + else: + assert is_seq_of(img, np.ndarray) + tensor = torch.stack([image_to_tensor(_img) for _img in img]) + + return tensor + + +def keypoints_to_tensor(keypoints: Union[np.ndarray, Sequence[np.ndarray]] + ) -> torch.torch.Tensor: + """Translate keypoints or sequence of keypoints to tensor. Multiple + keypoints tensors will be stacked. + + Args: + keypoints (np.ndarray | Sequence[np.ndarray]): The keypoints or + keypoints sequence. + + Returns: + torch.Tensor: The output tensor. + """ + if isinstance(keypoints, np.ndarray): + keypoints = np.ascontiguousarray(keypoints) + N = keypoints.shape[0] + keypoints = keypoints.transpose(1, 2, 0).reshape(-1, N) + tensor = torch.from_numpy(keypoints).contiguous() + else: + assert is_seq_of(keypoints, np.ndarray) + tensor = torch.stack( + [keypoints_to_tensor(_keypoints) for _keypoints in keypoints]) + + return tensor + + +@TRANSFORMS.register_module() +class PackPoseInputs(BaseTransform): + """Pack the inputs data for pose estimation. + + The ``img_meta`` item is always populated. The contents of the + ``img_meta`` dictionary depends on ``meta_keys``. By default it includes: + + - ``id``: id of the data sample + + - ``img_id``: id of the image + + - ``'category_id'``: the id of the instance category + + - ``img_path``: path to the image file + + - ``crowd_index`` (optional): measure the crowding level of an image, + defined in CrowdPose dataset + + - ``ori_shape``: original shape of the image as a tuple (h, w, c) + + - ``img_shape``: shape of the image input to the network as a tuple \ + (h, w). Note that images may be zero padded on the \ + bottom/right if the batch tensor is larger than this shape. + + - ``input_size``: the input size to the network + + - ``flip``: a boolean indicating if image flip transform was used + + - ``flip_direction``: the flipping direction + + - ``flip_indices``: the indices of each keypoint's symmetric keypoint + + - ``raw_ann_info`` (optional): raw annotation of the instance(s) + + Args: + meta_keys (Sequence[str], optional): Meta keys which will be stored in + :obj: `PoseDataSample` as meta info. Defaults to ``('id', + 'img_id', 'img_path', 'category_id', 'crowd_index, 'ori_shape', + 'img_shape',, 'input_size', 'input_center', 'input_scale', 'flip', + 'flip_direction', 'flip_indices', 'raw_ann_info')`` + """ + + # items in `instance_mapping_table` will be directly packed into + # PoseDataSample.gt_instances without converting to Tensor + instance_mapping_table = { + 'bbox': 'bboxes', + 'head_size': 'head_size', + 'bbox_center': 'bbox_centers', + 'bbox_scale': 'bbox_scales', + 'bbox_score': 'bbox_scores', + 'keypoints': 'keypoints', + 'keypoints_visible': 'keypoints_visible', + 'lifting_target': 'lifting_target', + 'lifting_target_visible': 'lifting_target_visible', + } + + # items in `label_mapping_table` will be packed into + # PoseDataSample.gt_instance_labels and converted to Tensor. These items + # will be used for computing losses + label_mapping_table = { + 'keypoint_labels': 'keypoint_labels', + 'lifting_target_label': 'lifting_target_label', + 'lifting_target_weights': 'lifting_target_weights', + 'trajectory_weights': 'trajectory_weights', + 'keypoint_x_labels': 'keypoint_x_labels', + 'keypoint_y_labels': 'keypoint_y_labels', + 'keypoint_weights': 'keypoint_weights', + 'instance_coords': 'instance_coords', + 'transformed_keypoints_visible': 'keypoints_visible', + } + + # items in `field_mapping_table` will be packed into + # PoseDataSample.gt_fields and converted to Tensor. These items will be + # used for computing losses + field_mapping_table = { + 'heatmaps': 'heatmaps', + 'instance_heatmaps': 'instance_heatmaps', + 'heatmap_mask': 'heatmap_mask', + 'heatmap_weights': 'heatmap_weights', + 'displacements': 'displacements', + 'displacement_weights': 'displacement_weights', + } + + def __init__(self, + meta_keys=('id', 'img_id', 'img_path', 'category_id', + 'crowd_index', 'ori_shape', 'img_shape', + 'input_size', 'input_center', 'input_scale', + 'flip', 'flip_direction', 'flip_indices', + 'raw_ann_info'), + pack_transformed=False): + self.meta_keys = meta_keys + self.pack_transformed = pack_transformed + + def transform(self, results: dict) -> dict: + """Method to pack the input data. + + Args: + results (dict): Result dict from the data pipeline. + + Returns: + dict: + + - 'inputs' (obj:`torch.Tensor`): The forward data of models. + - 'data_samples' (obj:`PoseDataSample`): The annotation info of the + sample. + """ + # Pack image(s) for 2d pose estimation + if 'img' in results: + img = results['img'] + inputs_tensor = image_to_tensor(img) + # Pack keypoints for 3d pose-lifting + elif 'lifting_target' in results and 'keypoints' in results: + if 'keypoint_labels' in results: + keypoints = results['keypoint_labels'] + else: + keypoints = results['keypoints'] + inputs_tensor = keypoints_to_tensor(keypoints) + + data_sample = PoseDataSample() + + # pack instance data + gt_instances = InstanceData() + for key, packed_key in self.instance_mapping_table.items(): + if key in results: + if 'lifting_target' in results and key in { + 'keypoints', 'keypoints_visible' + }: + continue + gt_instances.set_field(results[key], packed_key) + + # pack `transformed_keypoints` for visualizing data transform + # and augmentation results + if self.pack_transformed and 'transformed_keypoints' in results: + gt_instances.set_field(results['transformed_keypoints'], + 'transformed_keypoints') + if self.pack_transformed and \ + 'transformed_keypoints_visible' in results: + gt_instances.set_field(results['transformed_keypoints_visible'], + 'transformed_keypoints_visible') + + data_sample.gt_instances = gt_instances + + # pack instance labels + gt_instance_labels = InstanceData() + for key, packed_key in self.label_mapping_table.items(): + if key in results: + # For pose-lifting, store only target-related fields + if 'lifting_target_label' in results and key in { + 'keypoint_labels', 'keypoint_weights', + 'transformed_keypoints_visible' + }: + continue + if isinstance(results[key], list): + # A list of labels is usually generated by combined + # multiple encoders (See ``GenerateTarget`` in + # mmpose/datasets/transforms/common_transforms.py) + # In this case, labels in list should have the same + # shape and will be stacked. + _labels = np.stack(results[key]) + gt_instance_labels.set_field(_labels, packed_key) + else: + gt_instance_labels.set_field(results[key], packed_key) + data_sample.gt_instance_labels = gt_instance_labels.to_tensor() + + # pack fields + gt_fields = None + for key, packed_key in self.field_mapping_table.items(): + if key in results: + if isinstance(results[key], list): + if gt_fields is None: + gt_fields = MultilevelPixelData() + else: + assert isinstance( + gt_fields, MultilevelPixelData + ), 'Got mixed single-level and multi-level pixel data.' + else: + if gt_fields is None: + gt_fields = PixelData() + else: + assert isinstance( + gt_fields, PixelData + ), 'Got mixed single-level and multi-level pixel data.' + + gt_fields.set_field(results[key], packed_key) + + if gt_fields: + data_sample.gt_fields = gt_fields.to_tensor() + + img_meta = {k: results[k] for k in self.meta_keys if k in results} + data_sample.set_metainfo(img_meta) + + packed_results = dict() + packed_results['inputs'] = inputs_tensor + packed_results['data_samples'] = data_sample + + return packed_results + + def __repr__(self) -> str: + """print the basic information of the transform. + + Returns: + str: Formatted string. + """ + repr_str = self.__class__.__name__ + repr_str += f'(meta_keys={self.meta_keys})' + return repr_str diff --git a/mmpose/datasets/transforms/loading.py b/mmpose/datasets/transforms/loading.py index 28edcb4806..2febbbdf2d 100644 --- a/mmpose/datasets/transforms/loading.py +++ b/mmpose/datasets/transforms/loading.py @@ -1,66 +1,66 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import Optional - -import numpy as np -from mmcv.transforms import LoadImageFromFile - -from mmpose.registry import TRANSFORMS - - -@TRANSFORMS.register_module() -class LoadImage(LoadImageFromFile): - """Load an image from file or from the np.ndarray in ``results['img']``. - - Required Keys: - - - img_path - - img (optional) - - Modified Keys: - - - img - - img_shape - - ori_shape - - img_path (optional) - - Args: - to_float32 (bool): Whether to convert the loaded image to a float32 - numpy array. If set to False, the loaded image is an uint8 array. - Defaults to False. - color_type (str): The flag argument for :func:``mmcv.imfrombytes``. - Defaults to 'color'. - imdecode_backend (str): The image decoding backend type. The backend - argument for :func:``mmcv.imfrombytes``. - See :func:``mmcv.imfrombytes`` for details. - Defaults to 'cv2'. - backend_args (dict, optional): Arguments to instantiate the preifx of - uri corresponding backend. Defaults to None. - ignore_empty (bool): Whether to allow loading empty image or file path - not existent. Defaults to False. - """ - - def transform(self, results: dict) -> Optional[dict]: - """The transform function of :class:`LoadImage`. - - Args: - results (dict): The result dict - - Returns: - dict: The result dict. - """ - - if 'img' not in results: - # Load image from file by :meth:`LoadImageFromFile.transform` - results = super().transform(results) - else: - img = results['img'] - assert isinstance(img, np.ndarray) - if self.to_float32: - img = img.astype(np.float32) - - if 'img_path' not in results: - results['img_path'] = None - results['img_shape'] = img.shape[:2] - results['ori_shape'] = img.shape[:2] - - return results +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional + +import numpy as np +from mmcv.transforms import LoadImageFromFile + +from mmpose.registry import TRANSFORMS + + +@TRANSFORMS.register_module() +class LoadImage(LoadImageFromFile): + """Load an image from file or from the np.ndarray in ``results['img']``. + + Required Keys: + + - img_path + - img (optional) + + Modified Keys: + + - img + - img_shape + - ori_shape + - img_path (optional) + + Args: + to_float32 (bool): Whether to convert the loaded image to a float32 + numpy array. If set to False, the loaded image is an uint8 array. + Defaults to False. + color_type (str): The flag argument for :func:``mmcv.imfrombytes``. + Defaults to 'color'. + imdecode_backend (str): The image decoding backend type. The backend + argument for :func:``mmcv.imfrombytes``. + See :func:``mmcv.imfrombytes`` for details. + Defaults to 'cv2'. + backend_args (dict, optional): Arguments to instantiate the preifx of + uri corresponding backend. Defaults to None. + ignore_empty (bool): Whether to allow loading empty image or file path + not existent. Defaults to False. + """ + + def transform(self, results: dict) -> Optional[dict]: + """The transform function of :class:`LoadImage`. + + Args: + results (dict): The result dict + + Returns: + dict: The result dict. + """ + + if 'img' not in results: + # Load image from file by :meth:`LoadImageFromFile.transform` + results = super().transform(results) + else: + img = results['img'] + assert isinstance(img, np.ndarray) + if self.to_float32: + img = img.astype(np.float32) + + if 'img_path' not in results: + results['img_path'] = None + results['img_shape'] = img.shape[:2] + results['ori_shape'] = img.shape[:2] + + return results diff --git a/mmpose/datasets/transforms/pose3d_transforms.py b/mmpose/datasets/transforms/pose3d_transforms.py index e6559fa398..096f892b32 100644 --- a/mmpose/datasets/transforms/pose3d_transforms.py +++ b/mmpose/datasets/transforms/pose3d_transforms.py @@ -1,105 +1,105 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from copy import deepcopy -from typing import Dict - -import numpy as np -from mmcv.transforms import BaseTransform - -from mmpose.registry import TRANSFORMS -from mmpose.structures.keypoint import flip_keypoints_custom_center - - -@TRANSFORMS.register_module() -class RandomFlipAroundRoot(BaseTransform): - """Data augmentation with random horizontal joint flip around a root joint. - - Args: - keypoints_flip_cfg (dict): Configurations of the - ``flip_keypoints_custom_center`` function for ``keypoints``. Please - refer to the docstring of the ``flip_keypoints_custom_center`` - function for more details. - target_flip_cfg (dict): Configurations of the - ``flip_keypoints_custom_center`` function for ``lifting_target``. - Please refer to the docstring of the - ``flip_keypoints_custom_center`` function for more details. - flip_prob (float): Probability of flip. Default: 0.5. - flip_camera (bool): Whether to flip horizontal distortion coefficients. - Default: ``False``. - - Required keys: - keypoints - lifting_target - - Modified keys: - (keypoints, keypoints_visible, lifting_target, lifting_target_visible, - camera_param) - """ - - def __init__(self, - keypoints_flip_cfg, - target_flip_cfg, - flip_prob=0.5, - flip_camera=False): - self.keypoints_flip_cfg = keypoints_flip_cfg - self.target_flip_cfg = target_flip_cfg - self.flip_prob = flip_prob - self.flip_camera = flip_camera - - def transform(self, results: Dict) -> dict: - """The transform function of :class:`ZeroCenterPose`. - - See ``transform()`` method of :class:`BaseTransform` for details. - - Args: - results (dict): The result dict - - Returns: - dict: The result dict. - """ - - keypoints = results['keypoints'] - if 'keypoints_visible' in results: - keypoints_visible = results['keypoints_visible'] - else: - keypoints_visible = np.ones(keypoints.shape[:-1], dtype=np.float32) - lifting_target = results['lifting_target'] - if 'lifting_target_visible' in results: - lifting_target_visible = results['lifting_target_visible'] - else: - lifting_target_visible = np.ones( - lifting_target.shape[:-1], dtype=np.float32) - - if np.random.rand() <= self.flip_prob: - if 'flip_indices' not in results: - flip_indices = list(range(self.num_keypoints)) - else: - flip_indices = results['flip_indices'] - - # flip joint coordinates - keypoints, keypoints_visible = flip_keypoints_custom_center( - keypoints, keypoints_visible, flip_indices, - **self.keypoints_flip_cfg) - lifting_target, lifting_target_visible = flip_keypoints_custom_center( # noqa - lifting_target, lifting_target_visible, flip_indices, - **self.target_flip_cfg) - - results['keypoints'] = keypoints - results['keypoints_visible'] = keypoints_visible - results['lifting_target'] = lifting_target - results['lifting_target_visible'] = lifting_target_visible - - # flip horizontal distortion coefficients - if self.flip_camera: - assert 'camera_param' in results, \ - 'Camera parameters are missing.' - _camera_param = deepcopy(results['camera_param']) - - assert 'c' in _camera_param - _camera_param['c'][0] *= -1 - - if 'p' in _camera_param: - _camera_param['p'][0] *= -1 - - results['camera_param'].update(_camera_param) - - return results +# Copyright (c) OpenMMLab. All rights reserved. +from copy import deepcopy +from typing import Dict + +import numpy as np +from mmcv.transforms import BaseTransform + +from mmpose.registry import TRANSFORMS +from mmpose.structures.keypoint import flip_keypoints_custom_center + + +@TRANSFORMS.register_module() +class RandomFlipAroundRoot(BaseTransform): + """Data augmentation with random horizontal joint flip around a root joint. + + Args: + keypoints_flip_cfg (dict): Configurations of the + ``flip_keypoints_custom_center`` function for ``keypoints``. Please + refer to the docstring of the ``flip_keypoints_custom_center`` + function for more details. + target_flip_cfg (dict): Configurations of the + ``flip_keypoints_custom_center`` function for ``lifting_target``. + Please refer to the docstring of the + ``flip_keypoints_custom_center`` function for more details. + flip_prob (float): Probability of flip. Default: 0.5. + flip_camera (bool): Whether to flip horizontal distortion coefficients. + Default: ``False``. + + Required keys: + keypoints + lifting_target + + Modified keys: + (keypoints, keypoints_visible, lifting_target, lifting_target_visible, + camera_param) + """ + + def __init__(self, + keypoints_flip_cfg, + target_flip_cfg, + flip_prob=0.5, + flip_camera=False): + self.keypoints_flip_cfg = keypoints_flip_cfg + self.target_flip_cfg = target_flip_cfg + self.flip_prob = flip_prob + self.flip_camera = flip_camera + + def transform(self, results: Dict) -> dict: + """The transform function of :class:`ZeroCenterPose`. + + See ``transform()`` method of :class:`BaseTransform` for details. + + Args: + results (dict): The result dict + + Returns: + dict: The result dict. + """ + + keypoints = results['keypoints'] + if 'keypoints_visible' in results: + keypoints_visible = results['keypoints_visible'] + else: + keypoints_visible = np.ones(keypoints.shape[:-1], dtype=np.float32) + lifting_target = results['lifting_target'] + if 'lifting_target_visible' in results: + lifting_target_visible = results['lifting_target_visible'] + else: + lifting_target_visible = np.ones( + lifting_target.shape[:-1], dtype=np.float32) + + if np.random.rand() <= self.flip_prob: + if 'flip_indices' not in results: + flip_indices = list(range(self.num_keypoints)) + else: + flip_indices = results['flip_indices'] + + # flip joint coordinates + keypoints, keypoints_visible = flip_keypoints_custom_center( + keypoints, keypoints_visible, flip_indices, + **self.keypoints_flip_cfg) + lifting_target, lifting_target_visible = flip_keypoints_custom_center( # noqa + lifting_target, lifting_target_visible, flip_indices, + **self.target_flip_cfg) + + results['keypoints'] = keypoints + results['keypoints_visible'] = keypoints_visible + results['lifting_target'] = lifting_target + results['lifting_target_visible'] = lifting_target_visible + + # flip horizontal distortion coefficients + if self.flip_camera: + assert 'camera_param' in results, \ + 'Camera parameters are missing.' + _camera_param = deepcopy(results['camera_param']) + + assert 'c' in _camera_param + _camera_param['c'][0] *= -1 + + if 'p' in _camera_param: + _camera_param['p'][0] *= -1 + + results['camera_param'].update(_camera_param) + + return results diff --git a/mmpose/datasets/transforms/topdown_transforms.py b/mmpose/datasets/transforms/topdown_transforms.py index 29aa48eb06..d9992c7d8a 100644 --- a/mmpose/datasets/transforms/topdown_transforms.py +++ b/mmpose/datasets/transforms/topdown_transforms.py @@ -1,140 +1,140 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import Dict, Optional, Tuple - -import cv2 -import numpy as np -from mmcv.transforms import BaseTransform -from mmengine import is_seq_of - -from mmpose.registry import TRANSFORMS -from mmpose.structures.bbox import get_udp_warp_matrix, get_warp_matrix - - -@TRANSFORMS.register_module() -class TopdownAffine(BaseTransform): - """Get the bbox image as the model input by affine transform. - - Required Keys: - - - img - - bbox_center - - bbox_scale - - bbox_rotation (optional) - - keypoints (optional) - - Modified Keys: - - - img - - bbox_scale - - Added Keys: - - - input_size - - transformed_keypoints - - Args: - input_size (Tuple[int, int]): The input image size of the model in - [w, h]. The bbox region will be cropped and resize to `input_size` - use_udp (bool): Whether use unbiased data processing. See - `UDP (CVPR 2020)`_ for details. Defaults to ``False`` - - .. _`UDP (CVPR 2020)`: https://arxiv.org/abs/1911.07524 - """ - - def __init__(self, - input_size: Tuple[int, int], - use_udp: bool = False) -> None: - super().__init__() - - assert is_seq_of(input_size, int) and len(input_size) == 2, ( - f'Invalid input_size {input_size}') - - self.input_size = input_size - self.use_udp = use_udp - - @staticmethod - def _fix_aspect_ratio(bbox_scale: np.ndarray, aspect_ratio: float): - """Reshape the bbox to a fixed aspect ratio. - - Args: - bbox_scale (np.ndarray): The bbox scales (w, h) in shape (n, 2) - aspect_ratio (float): The ratio of ``w/h`` - - Returns: - np.darray: The reshaped bbox scales in (n, 2) - """ - - w, h = np.hsplit(bbox_scale, [1]) - bbox_scale = np.where(w > h * aspect_ratio, - np.hstack([w, w / aspect_ratio]), - np.hstack([h * aspect_ratio, h])) - return bbox_scale - - def transform(self, results: Dict) -> Optional[dict]: - """The transform function of :class:`TopdownAffine`. - - See ``transform()`` method of :class:`BaseTransform` for details. - - Args: - results (dict): The result dict - - Returns: - dict: The result dict. - """ - - w, h = self.input_size - warp_size = (int(w), int(h)) - - # reshape bbox to fixed aspect ratio - results['bbox_scale'] = self._fix_aspect_ratio( - results['bbox_scale'], aspect_ratio=w / h) - - # TODO: support multi-instance - assert results['bbox_center'].shape[0] == 1, ( - 'Top-down heatmap only supports single instance. Got invalid ' - f'shape of bbox_center {results["bbox_center"].shape}.') - - center = results['bbox_center'][0] - scale = results['bbox_scale'][0] - if 'bbox_rotation' in results: - rot = results['bbox_rotation'][0] - else: - rot = 0. - - if self.use_udp: - warp_mat = get_udp_warp_matrix( - center, scale, rot, output_size=(w, h)) - else: - warp_mat = get_warp_matrix(center, scale, rot, output_size=(w, h)) - - if isinstance(results['img'], list): - results['img'] = [ - cv2.warpAffine( - img, warp_mat, warp_size, flags=cv2.INTER_LINEAR) - for img in results['img'] - ] - else: - results['img'] = cv2.warpAffine( - results['img'], warp_mat, warp_size, flags=cv2.INTER_LINEAR) - - if results.get('keypoints', None) is not None: - transformed_keypoints = results['keypoints'].copy() - # Only transform (x, y) coordinates - transformed_keypoints[..., :2] = cv2.transform( - results['keypoints'][..., :2], warp_mat) - results['transformed_keypoints'] = transformed_keypoints - - results['input_size'] = (w, h) - - return results - - def __repr__(self) -> str: - """print the basic information of the transform. - - Returns: - str: Formatted string. - """ - repr_str = self.__class__.__name__ - repr_str += f'(input_size={self.input_size}, ' - repr_str += f'use_udp={self.use_udp})' - return repr_str +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, Optional, Tuple + +import cv2 +import numpy as np +from mmcv.transforms import BaseTransform +from mmengine import is_seq_of + +from mmpose.registry import TRANSFORMS +from mmpose.structures.bbox import get_udp_warp_matrix, get_warp_matrix + + +@TRANSFORMS.register_module() +class TopdownAffine(BaseTransform): + """Get the bbox image as the model input by affine transform. + + Required Keys: + + - img + - bbox_center + - bbox_scale + - bbox_rotation (optional) + - keypoints (optional) + + Modified Keys: + + - img + - bbox_scale + + Added Keys: + + - input_size + - transformed_keypoints + + Args: + input_size (Tuple[int, int]): The input image size of the model in + [w, h]. The bbox region will be cropped and resize to `input_size` + use_udp (bool): Whether use unbiased data processing. See + `UDP (CVPR 2020)`_ for details. Defaults to ``False`` + + .. _`UDP (CVPR 2020)`: https://arxiv.org/abs/1911.07524 + """ + + def __init__(self, + input_size: Tuple[int, int], + use_udp: bool = False) -> None: + super().__init__() + + assert is_seq_of(input_size, int) and len(input_size) == 2, ( + f'Invalid input_size {input_size}') + + self.input_size = input_size + self.use_udp = use_udp + + @staticmethod + def _fix_aspect_ratio(bbox_scale: np.ndarray, aspect_ratio: float): + """Reshape the bbox to a fixed aspect ratio. + + Args: + bbox_scale (np.ndarray): The bbox scales (w, h) in shape (n, 2) + aspect_ratio (float): The ratio of ``w/h`` + + Returns: + np.darray: The reshaped bbox scales in (n, 2) + """ + + w, h = np.hsplit(bbox_scale, [1]) + bbox_scale = np.where(w > h * aspect_ratio, + np.hstack([w, w / aspect_ratio]), + np.hstack([h * aspect_ratio, h])) + return bbox_scale + + def transform(self, results: Dict) -> Optional[dict]: + """The transform function of :class:`TopdownAffine`. + + See ``transform()`` method of :class:`BaseTransform` for details. + + Args: + results (dict): The result dict + + Returns: + dict: The result dict. + """ + + w, h = self.input_size + warp_size = (int(w), int(h)) + + # reshape bbox to fixed aspect ratio + results['bbox_scale'] = self._fix_aspect_ratio( + results['bbox_scale'], aspect_ratio=w / h) + + # TODO: support multi-instance + assert results['bbox_center'].shape[0] == 1, ( + 'Top-down heatmap only supports single instance. Got invalid ' + f'shape of bbox_center {results["bbox_center"].shape}.') + + center = results['bbox_center'][0] + scale = results['bbox_scale'][0] + if 'bbox_rotation' in results: + rot = results['bbox_rotation'][0] + else: + rot = 0. + + if self.use_udp: + warp_mat = get_udp_warp_matrix( + center, scale, rot, output_size=(w, h)) + else: + warp_mat = get_warp_matrix(center, scale, rot, output_size=(w, h)) + + if isinstance(results['img'], list): + results['img'] = [ + cv2.warpAffine( + img, warp_mat, warp_size, flags=cv2.INTER_LINEAR) + for img in results['img'] + ] + else: + results['img'] = cv2.warpAffine( + results['img'], warp_mat, warp_size, flags=cv2.INTER_LINEAR) + + if results.get('keypoints', None) is not None: + transformed_keypoints = results['keypoints'].copy() + # Only transform (x, y) coordinates + transformed_keypoints[..., :2] = cv2.transform( + results['keypoints'][..., :2], warp_mat) + results['transformed_keypoints'] = transformed_keypoints + + results['input_size'] = (w, h) + + return results + + def __repr__(self) -> str: + """print the basic information of the transform. + + Returns: + str: Formatted string. + """ + repr_str = self.__class__.__name__ + repr_str += f'(input_size={self.input_size}, ' + repr_str += f'use_udp={self.use_udp})' + return repr_str diff --git a/mmpose/datasets/transforms/warping.py b/mmpose/datasets/transforms/warping.py new file mode 100644 index 0000000000..be0d827fcd --- /dev/null +++ b/mmpose/datasets/transforms/warping.py @@ -0,0 +1,60 @@ +import cv2 +import numpy as np +import mmcv +from mmcv.transforms import BaseTransform, TRANSFORMS + +@TRANSFORMS.register_module() +class Warping(BaseTransform): + def __init__(self, direction: str, n_beams: int, scale: float): + super().__init__() + self.direction = direction + self.n_beams = n_beams + self.scale = scale + + def transform(self, results: dict) -> dict: + img = results['img'] + + if self.direction == 'cart2polar': + cart = img + ws = cart.shape[0] + cart = cv2.flip(cart, 0) + + dsize = (ws, self.n_beams) + center = (ws // 2.0, ws // 2.0) + max_radius = ws // 2.0 + flags = cv2.WARP_POLAR_LINEAR + cv2.WARP_FILL_OUTLIERS + cv2.INTER_CUBIC + + polar = cv2.warpPolar(cart, + dsize=dsize, + center=center, + maxRadius=max_radius, + flags=flags + ) + + img = cv2.rotate(cart, cv2.ROTATE_90_CLOCKWISE) + else: + polar_dtype = polar.dtype + if polar_dtype == np.bool_: + polar = polar.astype(np.uint8) * 255 + + ws = int(2 * self.scale * polar.shape[1]) + dsize = (ws, ws) + center = (ws // 2.0, ws // 2.0) + max_radius = ws // 2 + flags = cv2.WARP_POLAR_LINEAR | cv2.WARP_INVERSE_MAP | cv2.WARP_FILL_OUTLIERS | cv2.INTER_CUBIC + polar = cv2.rotate(polar, cv2.ROTATE_90_COUNTERCLOCKWISE) + cart = cv2.warpPolar(polar, + dsize=dsize, + center=center, + maxRadius=max_radius, + flags=flags + ) + cart = cv2.flip(cart, 0) + + if polar_dtype == np.bool_: + cart = cart.astype(np.bool_) + + img = cart + + results['img'] = img + return results \ No newline at end of file diff --git a/mmpose/engine/__init__.py b/mmpose/engine/__init__.py index ac85928986..53090550a7 100644 --- a/mmpose/engine/__init__.py +++ b/mmpose/engine/__init__.py @@ -1,3 +1,3 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .hooks import * # noqa: F401, F403 -from .optim_wrappers import * # noqa: F401, F403 +# Copyright (c) OpenMMLab. All rights reserved. +from .hooks import * # noqa: F401, F403 +from .optim_wrappers import * # noqa: F401, F403 diff --git a/mmpose/engine/hooks/__init__.py b/mmpose/engine/hooks/__init__.py index dadb9c5f91..4c98802fbb 100644 --- a/mmpose/engine/hooks/__init__.py +++ b/mmpose/engine/hooks/__init__.py @@ -1,5 +1,5 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .ema_hook import ExpMomentumEMA -from .visualization_hook import PoseVisualizationHook - -__all__ = ['PoseVisualizationHook', 'ExpMomentumEMA'] +# Copyright (c) OpenMMLab. All rights reserved. +from .ema_hook import ExpMomentumEMA +from .visualization_hook import PoseVisualizationHook + +__all__ = ['PoseVisualizationHook', 'ExpMomentumEMA'] diff --git a/mmpose/engine/hooks/ema_hook.py b/mmpose/engine/hooks/ema_hook.py index fd1a689f96..7d7da46d5c 100644 --- a/mmpose/engine/hooks/ema_hook.py +++ b/mmpose/engine/hooks/ema_hook.py @@ -1,69 +1,69 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import math -from typing import Optional - -import torch -import torch.nn as nn -from mmengine.model import ExponentialMovingAverage -from torch import Tensor - -from mmpose.registry import MODELS - - -@MODELS.register_module() -class ExpMomentumEMA(ExponentialMovingAverage): - """Exponential moving average (EMA) with exponential momentum strategy, - which is used in YOLOX. - - Ported from ` the implementation of MMDetection - `_. - - Args: - model (nn.Module): The model to be averaged. - momentum (float): The momentum used for updating ema parameter. - Ema's parameter are updated with the formula: - `averaged_param = (1-momentum) * averaged_param + momentum * - source_param`. Defaults to 0.0002. - gamma (int): Use a larger momentum early in training and gradually - annealing to a smaller value to update the ema model smoothly. The - momentum is calculated as - `(1 - momentum) * exp(-(1 + steps) / gamma) + momentum`. - Defaults to 2000. - interval (int): Interval between two updates. Defaults to 1. - device (torch.device, optional): If provided, the averaged model will - be stored on the :attr:`device`. Defaults to None. - update_buffers (bool): if True, it will compute running averages for - both the parameters and the buffers of the model. Defaults to - False. - """ - - def __init__(self, - model: nn.Module, - momentum: float = 0.0002, - gamma: int = 2000, - interval=1, - device: Optional[torch.device] = None, - update_buffers: bool = False) -> None: - super().__init__( - model=model, - momentum=momentum, - interval=interval, - device=device, - update_buffers=update_buffers) - assert gamma > 0, f'gamma must be greater than 0, but got {gamma}' - self.gamma = gamma - - def avg_func(self, averaged_param: Tensor, source_param: Tensor, - steps: int) -> None: - """Compute the moving average of the parameters using the exponential - momentum strategy. - - Args: - averaged_param (Tensor): The averaged parameters. - source_param (Tensor): The source parameters. - steps (int): The number of times the parameters have been - updated. - """ - momentum = (1 - self.momentum) * math.exp( - -float(1 + steps) / self.gamma) + self.momentum - averaged_param.mul_(1 - momentum).add_(source_param, alpha=momentum) +# Copyright (c) OpenMMLab. All rights reserved. +import math +from typing import Optional + +import torch +import torch.nn as nn +from mmengine.model import ExponentialMovingAverage +from torch import Tensor + +from mmpose.registry import MODELS + + +@MODELS.register_module() +class ExpMomentumEMA(ExponentialMovingAverage): + """Exponential moving average (EMA) with exponential momentum strategy, + which is used in YOLOX. + + Ported from ` the implementation of MMDetection + `_. + + Args: + model (nn.Module): The model to be averaged. + momentum (float): The momentum used for updating ema parameter. + Ema's parameter are updated with the formula: + `averaged_param = (1-momentum) * averaged_param + momentum * + source_param`. Defaults to 0.0002. + gamma (int): Use a larger momentum early in training and gradually + annealing to a smaller value to update the ema model smoothly. The + momentum is calculated as + `(1 - momentum) * exp(-(1 + steps) / gamma) + momentum`. + Defaults to 2000. + interval (int): Interval between two updates. Defaults to 1. + device (torch.device, optional): If provided, the averaged model will + be stored on the :attr:`device`. Defaults to None. + update_buffers (bool): if True, it will compute running averages for + both the parameters and the buffers of the model. Defaults to + False. + """ + + def __init__(self, + model: nn.Module, + momentum: float = 0.0002, + gamma: int = 2000, + interval=1, + device: Optional[torch.device] = None, + update_buffers: bool = False) -> None: + super().__init__( + model=model, + momentum=momentum, + interval=interval, + device=device, + update_buffers=update_buffers) + assert gamma > 0, f'gamma must be greater than 0, but got {gamma}' + self.gamma = gamma + + def avg_func(self, averaged_param: Tensor, source_param: Tensor, + steps: int) -> None: + """Compute the moving average of the parameters using the exponential + momentum strategy. + + Args: + averaged_param (Tensor): The averaged parameters. + source_param (Tensor): The source parameters. + steps (int): The number of times the parameters have been + updated. + """ + momentum = (1 - self.momentum) * math.exp( + -float(1 + steps) / self.gamma) + self.momentum + averaged_param.mul_(1 - momentum).add_(source_param, alpha=momentum) diff --git a/mmpose/engine/hooks/visualization_hook.py b/mmpose/engine/hooks/visualization_hook.py index 24b845f282..6b5320a0b1 100644 --- a/mmpose/engine/hooks/visualization_hook.py +++ b/mmpose/engine/hooks/visualization_hook.py @@ -1,168 +1,168 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os -import warnings -from typing import Optional, Sequence - -import mmcv -import mmengine -import mmengine.fileio as fileio -from mmengine.hooks import Hook -from mmengine.runner import Runner -from mmengine.visualization import Visualizer - -from mmpose.registry import HOOKS -from mmpose.structures import PoseDataSample, merge_data_samples - - -@HOOKS.register_module() -class PoseVisualizationHook(Hook): - """Pose Estimation Visualization Hook. Used to visualize validation and - testing process prediction results. - - In the testing phase: - - 1. If ``show`` is True, it means that only the prediction results are - visualized without storing data, so ``vis_backends`` needs to - be excluded. - 2. If ``out_dir`` is specified, it means that the prediction results - need to be saved to ``out_dir``. In order to avoid vis_backends - also storing data, so ``vis_backends`` needs to be excluded. - 3. ``vis_backends`` takes effect if the user does not specify ``show`` - and `out_dir``. You can set ``vis_backends`` to WandbVisBackend or - TensorboardVisBackend to store the prediction result in Wandb or - Tensorboard. - - Args: - enable (bool): whether to draw prediction results. If it is False, - it means that no drawing will be done. Defaults to False. - interval (int): The interval of visualization. Defaults to 50. - score_thr (float): The threshold to visualize the bboxes - and masks. Defaults to 0.3. - show (bool): Whether to display the drawn image. Default to False. - wait_time (float): The interval of show (s). Defaults to 0. - out_dir (str, optional): directory where painted images - will be saved in testing process. - backend_args (dict, optional): Arguments to instantiate the preifx of - uri corresponding backend. Defaults to None. - """ - - def __init__( - self, - enable: bool = False, - interval: int = 50, - kpt_thr: float = 0.3, - show: bool = False, - wait_time: float = 0., - out_dir: Optional[str] = None, - backend_args: Optional[dict] = None, - ): - self._visualizer: Visualizer = Visualizer.get_current_instance() - self.interval = interval - self.kpt_thr = kpt_thr - self.show = show - if self.show: - # No need to think about vis backends. - self._visualizer._vis_backends = {} - warnings.warn('The show is True, it means that only ' - 'the prediction results are visualized ' - 'without storing data, so vis_backends ' - 'needs to be excluded.') - - self.wait_time = wait_time - self.enable = enable - self.out_dir = out_dir - self._test_index = 0 - self.backend_args = backend_args - - def after_val_iter(self, runner: Runner, batch_idx: int, data_batch: dict, - outputs: Sequence[PoseDataSample]) -> None: - """Run after every ``self.interval`` validation iterations. - - Args: - runner (:obj:`Runner`): The runner of the validation process. - batch_idx (int): The index of the current batch in the val loop. - data_batch (dict): Data from dataloader. - outputs (Sequence[:obj:`PoseDataSample`]): Outputs from model. - """ - if self.enable is False: - return - - self._visualizer.set_dataset_meta(runner.val_evaluator.dataset_meta) - - # There is no guarantee that the same batch of images - # is visualized for each evaluation. - total_curr_iter = runner.iter + batch_idx - - # Visualize only the first data - img_path = data_batch['data_samples'][0].get('img_path') - img_bytes = fileio.get(img_path, backend_args=self.backend_args) - img = mmcv.imfrombytes(img_bytes, channel_order='rgb') - data_sample = outputs[0] - - # revert the heatmap on the original image - data_sample = merge_data_samples([data_sample]) - - if total_curr_iter % self.interval == 0: - self._visualizer.add_datasample( - os.path.basename(img_path) if self.show else 'val_img', - img, - data_sample=data_sample, - draw_gt=False, - draw_bbox=True, - draw_heatmap=True, - show=self.show, - wait_time=self.wait_time, - kpt_thr=self.kpt_thr, - step=total_curr_iter) - - def after_test_iter(self, runner: Runner, batch_idx: int, data_batch: dict, - outputs: Sequence[PoseDataSample]) -> None: - """Run after every testing iterations. - - Args: - runner (:obj:`Runner`): The runner of the testing process. - batch_idx (int): The index of the current batch in the test loop. - data_batch (dict): Data from dataloader. - outputs (Sequence[:obj:`PoseDataSample`]): Outputs from model. - """ - if self.enable is False: - return - - if self.out_dir is not None: - self.out_dir = os.path.join(runner.work_dir, runner.timestamp, - self.out_dir) - mmengine.mkdir_or_exist(self.out_dir) - - self._visualizer.set_dataset_meta(runner.test_evaluator.dataset_meta) - - for data_sample in outputs: - self._test_index += 1 - - img_path = data_sample.get('img_path') - img_bytes = fileio.get(img_path, backend_args=self.backend_args) - img = mmcv.imfrombytes(img_bytes, channel_order='rgb') - data_sample = merge_data_samples([data_sample]) - - out_file = None - if self.out_dir is not None: - out_file_name, postfix = os.path.basename(img_path).rsplit( - '.', 1) - index = len([ - fname for fname in os.listdir(self.out_dir) - if fname.startswith(out_file_name) - ]) - out_file = f'{out_file_name}_{index}.{postfix}' - out_file = os.path.join(self.out_dir, out_file) - - self._visualizer.add_datasample( - os.path.basename(img_path) if self.show else 'test_img', - img, - data_sample=data_sample, - show=self.show, - draw_gt=False, - draw_bbox=True, - draw_heatmap=True, - wait_time=self.wait_time, - kpt_thr=self.kpt_thr, - out_file=out_file, - step=self._test_index) +# Copyright (c) OpenMMLab. All rights reserved. +import os +import warnings +from typing import Optional, Sequence + +import mmcv +import mmengine +import mmengine.fileio as fileio +from mmengine.hooks import Hook +from mmengine.runner import Runner +from mmengine.visualization import Visualizer + +from mmpose.registry import HOOKS +from mmpose.structures import PoseDataSample, merge_data_samples + + +@HOOKS.register_module() +class PoseVisualizationHook(Hook): + """Pose Estimation Visualization Hook. Used to visualize validation and + testing process prediction results. + + In the testing phase: + + 1. If ``show`` is True, it means that only the prediction results are + visualized without storing data, so ``vis_backends`` needs to + be excluded. + 2. If ``out_dir`` is specified, it means that the prediction results + need to be saved to ``out_dir``. In order to avoid vis_backends + also storing data, so ``vis_backends`` needs to be excluded. + 3. ``vis_backends`` takes effect if the user does not specify ``show`` + and `out_dir``. You can set ``vis_backends`` to WandbVisBackend or + TensorboardVisBackend to store the prediction result in Wandb or + Tensorboard. + + Args: + enable (bool): whether to draw prediction results. If it is False, + it means that no drawing will be done. Defaults to False. + interval (int): The interval of visualization. Defaults to 50. + score_thr (float): The threshold to visualize the bboxes + and masks. Defaults to 0.3. + show (bool): Whether to display the drawn image. Default to False. + wait_time (float): The interval of show (s). Defaults to 0. + out_dir (str, optional): directory where painted images + will be saved in testing process. + backend_args (dict, optional): Arguments to instantiate the preifx of + uri corresponding backend. Defaults to None. + """ + + def __init__( + self, + enable: bool = False, + interval: int = 50, + kpt_thr: float = 0.3, + show: bool = False, + wait_time: float = 0., + out_dir: Optional[str] = None, + backend_args: Optional[dict] = None, + ): + self._visualizer: Visualizer = Visualizer.get_current_instance() + self.interval = interval + self.kpt_thr = kpt_thr + self.show = show + if self.show: + # No need to think about vis backends. + self._visualizer._vis_backends = {} + warnings.warn('The show is True, it means that only ' + 'the prediction results are visualized ' + 'without storing data, so vis_backends ' + 'needs to be excluded.') + + self.wait_time = wait_time + self.enable = enable + self.out_dir = out_dir + self._test_index = 0 + self.backend_args = backend_args + + def after_val_iter(self, runner: Runner, batch_idx: int, data_batch: dict, + outputs: Sequence[PoseDataSample]) -> None: + """Run after every ``self.interval`` validation iterations. + + Args: + runner (:obj:`Runner`): The runner of the validation process. + batch_idx (int): The index of the current batch in the val loop. + data_batch (dict): Data from dataloader. + outputs (Sequence[:obj:`PoseDataSample`]): Outputs from model. + """ + if self.enable is False: + return + + self._visualizer.set_dataset_meta(runner.val_evaluator.dataset_meta) + + # There is no guarantee that the same batch of images + # is visualized for each evaluation. + total_curr_iter = runner.iter + batch_idx + + # Visualize only the first data + img_path = data_batch['data_samples'][0].get('img_path') + img_bytes = fileio.get(img_path, backend_args=self.backend_args) + img = mmcv.imfrombytes(img_bytes, channel_order='rgb') + data_sample = outputs[0] + + # revert the heatmap on the original image + data_sample = merge_data_samples([data_sample]) + + if total_curr_iter % self.interval == 0: + self._visualizer.add_datasample( + os.path.basename(img_path) if self.show else 'val_img', + img, + data_sample=data_sample, + draw_gt=False, + draw_bbox=True, + draw_heatmap=True, + show=self.show, + wait_time=self.wait_time, + kpt_thr=self.kpt_thr, + step=total_curr_iter) + + def after_test_iter(self, runner: Runner, batch_idx: int, data_batch: dict, + outputs: Sequence[PoseDataSample]) -> None: + """Run after every testing iterations. + + Args: + runner (:obj:`Runner`): The runner of the testing process. + batch_idx (int): The index of the current batch in the test loop. + data_batch (dict): Data from dataloader. + outputs (Sequence[:obj:`PoseDataSample`]): Outputs from model. + """ + if self.enable is False: + return + + if self.out_dir is not None: + self.out_dir = os.path.join(runner.work_dir, runner.timestamp, + self.out_dir) + mmengine.mkdir_or_exist(self.out_dir) + + self._visualizer.set_dataset_meta(runner.test_evaluator.dataset_meta) + + for data_sample in outputs: + self._test_index += 1 + + img_path = data_sample.get('img_path') + img_bytes = fileio.get(img_path, backend_args=self.backend_args) + img = mmcv.imfrombytes(img_bytes, channel_order='rgb') + data_sample = merge_data_samples([data_sample]) + + out_file = None + if self.out_dir is not None: + out_file_name, postfix = os.path.basename(img_path).rsplit( + '.', 1) + index = len([ + fname for fname in os.listdir(self.out_dir) + if fname.startswith(out_file_name) + ]) + out_file = f'{out_file_name}_{index}.{postfix}' + out_file = os.path.join(self.out_dir, out_file) + + self._visualizer.add_datasample( + os.path.basename(img_path) if self.show else 'test_img', + img, + data_sample=data_sample, + show=self.show, + draw_gt=False, + draw_bbox=True, + draw_heatmap=True, + wait_time=self.wait_time, + kpt_thr=self.kpt_thr, + out_file=out_file, + step=self._test_index) diff --git a/mmpose/engine/optim_wrappers/__init__.py b/mmpose/engine/optim_wrappers/__init__.py index 7c0b1f533a..ca0b9fcc7a 100644 --- a/mmpose/engine/optim_wrappers/__init__.py +++ b/mmpose/engine/optim_wrappers/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .layer_decay_optim_wrapper import LayerDecayOptimWrapperConstructor - -__all__ = ['LayerDecayOptimWrapperConstructor'] +# Copyright (c) OpenMMLab. All rights reserved. +from .layer_decay_optim_wrapper import LayerDecayOptimWrapperConstructor + +__all__ = ['LayerDecayOptimWrapperConstructor'] diff --git a/mmpose/engine/optim_wrappers/layer_decay_optim_wrapper.py b/mmpose/engine/optim_wrappers/layer_decay_optim_wrapper.py index 6513e5593d..631a3cef23 100644 --- a/mmpose/engine/optim_wrappers/layer_decay_optim_wrapper.py +++ b/mmpose/engine/optim_wrappers/layer_decay_optim_wrapper.py @@ -1,73 +1,73 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from mmengine.dist.utils import get_dist_info -from mmengine.optim import DefaultOptimWrapperConstructor -from mmengine.registry import OPTIM_WRAPPER_CONSTRUCTORS - - -def get_num_layer_for_vit(var_name, num_max_layer): - if var_name in ('backbone.cls_token', 'backbone.mask_token', - 'backbone.pos_embed'): - return 0 - elif var_name.startswith('backbone.patch_embed'): - return 0 - elif var_name.startswith('backbone.layers'): - layer_id = int(var_name.split('.')[2]) - return layer_id + 1 - else: - return num_max_layer - 1 - - -@OPTIM_WRAPPER_CONSTRUCTORS.register_module(force=True) -class LayerDecayOptimWrapperConstructor(DefaultOptimWrapperConstructor): - - def __init__(self, optim_wrapper_cfg, paramwise_cfg=None): - super().__init__(optim_wrapper_cfg, paramwise_cfg=None) - self.layer_decay_rate = paramwise_cfg.get('layer_decay_rate', 0.5) - - super().__init__(optim_wrapper_cfg, paramwise_cfg) - - def add_params(self, params, module, prefix='', lr=None): - parameter_groups = {} - print(self.paramwise_cfg) - num_layers = self.paramwise_cfg.get('num_layers') + 2 - layer_decay_rate = self.paramwise_cfg.get('layer_decay_rate') - weight_decay = self.base_wd - - for name, param in module.named_parameters(): - if not param.requires_grad: - continue # frozen weights - if (len(param.shape) == 1 or name.endswith('.bias') - or 'pos_embed' in name): - group_name = 'no_decay' - this_weight_decay = 0. - else: - group_name = 'decay' - this_weight_decay = weight_decay - layer_id = get_num_layer_for_vit(name, num_layers) - group_name = 'layer_%d_%s' % (layer_id, group_name) - - if group_name not in parameter_groups: - scale = layer_decay_rate**(num_layers - layer_id - 1) - - parameter_groups[group_name] = { - 'weight_decay': this_weight_decay, - 'params': [], - 'param_names': [], - 'lr_scale': scale, - 'group_name': group_name, - 'lr': scale * self.base_lr, - } - - parameter_groups[group_name]['params'].append(param) - parameter_groups[group_name]['param_names'].append(name) - rank, _ = get_dist_info() - if rank == 0: - to_display = {} - for key in parameter_groups: - to_display[key] = { - 'param_names': parameter_groups[key]['param_names'], - 'lr_scale': parameter_groups[key]['lr_scale'], - 'lr': parameter_groups[key]['lr'], - 'weight_decay': parameter_groups[key]['weight_decay'], - } - params.extend(parameter_groups.values()) +# Copyright (c) OpenMMLab. All rights reserved. +from mmengine.dist.utils import get_dist_info +from mmengine.optim import DefaultOptimWrapperConstructor +from mmengine.registry import OPTIM_WRAPPER_CONSTRUCTORS + + +def get_num_layer_for_vit(var_name, num_max_layer): + if var_name in ('backbone.cls_token', 'backbone.mask_token', + 'backbone.pos_embed'): + return 0 + elif var_name.startswith('backbone.patch_embed'): + return 0 + elif var_name.startswith('backbone.layers'): + layer_id = int(var_name.split('.')[2]) + return layer_id + 1 + else: + return num_max_layer - 1 + + +@OPTIM_WRAPPER_CONSTRUCTORS.register_module(force=True) +class LayerDecayOptimWrapperConstructor(DefaultOptimWrapperConstructor): + + def __init__(self, optim_wrapper_cfg, paramwise_cfg=None): + super().__init__(optim_wrapper_cfg, paramwise_cfg=None) + self.layer_decay_rate = paramwise_cfg.get('layer_decay_rate', 0.5) + + super().__init__(optim_wrapper_cfg, paramwise_cfg) + + def add_params(self, params, module, prefix='', lr=None): + parameter_groups = {} + print(self.paramwise_cfg) + num_layers = self.paramwise_cfg.get('num_layers') + 2 + layer_decay_rate = self.paramwise_cfg.get('layer_decay_rate') + weight_decay = self.base_wd + + for name, param in module.named_parameters(): + if not param.requires_grad: + continue # frozen weights + if (len(param.shape) == 1 or name.endswith('.bias') + or 'pos_embed' in name): + group_name = 'no_decay' + this_weight_decay = 0. + else: + group_name = 'decay' + this_weight_decay = weight_decay + layer_id = get_num_layer_for_vit(name, num_layers) + group_name = 'layer_%d_%s' % (layer_id, group_name) + + if group_name not in parameter_groups: + scale = layer_decay_rate**(num_layers - layer_id - 1) + + parameter_groups[group_name] = { + 'weight_decay': this_weight_decay, + 'params': [], + 'param_names': [], + 'lr_scale': scale, + 'group_name': group_name, + 'lr': scale * self.base_lr, + } + + parameter_groups[group_name]['params'].append(param) + parameter_groups[group_name]['param_names'].append(name) + rank, _ = get_dist_info() + if rank == 0: + to_display = {} + for key in parameter_groups: + to_display[key] = { + 'param_names': parameter_groups[key]['param_names'], + 'lr_scale': parameter_groups[key]['lr_scale'], + 'lr': parameter_groups[key]['lr'], + 'weight_decay': parameter_groups[key]['weight_decay'], + } + params.extend(parameter_groups.values()) diff --git a/mmpose/evaluation/__init__.py b/mmpose/evaluation/__init__.py index f70dc226d3..bf038e034f 100644 --- a/mmpose/evaluation/__init__.py +++ b/mmpose/evaluation/__init__.py @@ -1,3 +1,3 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .functional import * # noqa: F401,F403 -from .metrics import * # noqa: F401,F403 +# Copyright (c) OpenMMLab. All rights reserved. +from .functional import * # noqa: F401,F403 +from .metrics import * # noqa: F401,F403 diff --git a/mmpose/evaluation/functional/__init__.py b/mmpose/evaluation/functional/__init__.py index 49f243163c..f5cb4c80af 100644 --- a/mmpose/evaluation/functional/__init__.py +++ b/mmpose/evaluation/functional/__init__.py @@ -1,12 +1,12 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .keypoint_eval import (keypoint_auc, keypoint_epe, keypoint_mpjpe, - keypoint_nme, keypoint_pck_accuracy, - multilabel_classification_accuracy, - pose_pck_accuracy, simcc_pck_accuracy) -from .nms import nms, oks_nms, soft_oks_nms - -__all__ = [ - 'keypoint_pck_accuracy', 'keypoint_auc', 'keypoint_nme', 'keypoint_epe', - 'pose_pck_accuracy', 'multilabel_classification_accuracy', - 'simcc_pck_accuracy', 'nms', 'oks_nms', 'soft_oks_nms', 'keypoint_mpjpe' -] +# Copyright (c) OpenMMLab. All rights reserved. +from .keypoint_eval import (keypoint_auc, keypoint_epe, keypoint_mpjpe, + keypoint_nme, keypoint_pck_accuracy, + multilabel_classification_accuracy, + pose_pck_accuracy, simcc_pck_accuracy) +from .nms import nms, oks_nms, soft_oks_nms + +__all__ = [ + 'keypoint_pck_accuracy', 'keypoint_auc', 'keypoint_nme', 'keypoint_epe', + 'pose_pck_accuracy', 'multilabel_classification_accuracy', + 'simcc_pck_accuracy', 'nms', 'oks_nms', 'soft_oks_nms', 'keypoint_mpjpe' +] diff --git a/mmpose/evaluation/functional/keypoint_eval.py b/mmpose/evaluation/functional/keypoint_eval.py index 847faaf6d8..cab4fb8fda 100644 --- a/mmpose/evaluation/functional/keypoint_eval.py +++ b/mmpose/evaluation/functional/keypoint_eval.py @@ -1,375 +1,375 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import Optional, Tuple - -import numpy as np - -from mmpose.codecs.utils import get_heatmap_maximum, get_simcc_maximum -from .mesh_eval import compute_similarity_transform - - -def _calc_distances(preds: np.ndarray, gts: np.ndarray, mask: np.ndarray, - norm_factor: np.ndarray) -> np.ndarray: - """Calculate the normalized distances between preds and target. - - Note: - - instance number: N - - keypoint number: K - - keypoint dimension: D (normally, D=2 or D=3) - - Args: - preds (np.ndarray[N, K, D]): Predicted keypoint location. - gts (np.ndarray[N, K, D]): Groundtruth keypoint location. - mask (np.ndarray[N, K]): Visibility of the target. False for invisible - joints, and True for visible. Invisible joints will be ignored for - accuracy calculation. - norm_factor (np.ndarray[N, D]): Normalization factor. - Typical value is heatmap_size. - - Returns: - np.ndarray[K, N]: The normalized distances. \ - If target keypoints are missing, the distance is -1. - """ - N, K, _ = preds.shape - # set mask=0 when norm_factor==0 - _mask = mask.copy() - _mask[np.where((norm_factor == 0).sum(1))[0], :] = False - - distances = np.full((N, K), -1, dtype=np.float32) - # handle invalid values - norm_factor[np.where(norm_factor <= 0)] = 1e6 - distances[_mask] = np.linalg.norm( - ((preds - gts) / norm_factor[:, None, :])[_mask], axis=-1) - return distances.T - - -def _distance_acc(distances: np.ndarray, thr: float = 0.5) -> float: - """Return the percentage below the distance threshold, while ignoring - distances values with -1. - - Note: - - instance number: N - - Args: - distances (np.ndarray[N, ]): The normalized distances. - thr (float): Threshold of the distances. - - Returns: - float: Percentage of distances below the threshold. \ - If all target keypoints are missing, return -1. - """ - distance_valid = distances != -1 - num_distance_valid = distance_valid.sum() - if num_distance_valid > 0: - return (distances[distance_valid] < thr).sum() / num_distance_valid - return -1 - - -def keypoint_pck_accuracy(pred: np.ndarray, gt: np.ndarray, mask: np.ndarray, - thr: np.ndarray, norm_factor: np.ndarray) -> tuple: - """Calculate the pose accuracy of PCK for each individual keypoint and the - averaged accuracy across all keypoints for coordinates. - - Note: - PCK metric measures accuracy of the localization of the body joints. - The distances between predicted positions and the ground-truth ones - are typically normalized by the bounding box size. - The threshold (thr) of the normalized distance is commonly set - as 0.05, 0.1 or 0.2 etc. - - - instance number: N - - keypoint number: K - - Args: - pred (np.ndarray[N, K, 2]): Predicted keypoint location. - gt (np.ndarray[N, K, 2]): Groundtruth keypoint location. - mask (np.ndarray[N, K]): Visibility of the target. False for invisible - joints, and True for visible. Invisible joints will be ignored for - accuracy calculation. - thr (float): Threshold of PCK calculation. - norm_factor (np.ndarray[N, 2]): Normalization factor for H&W. - - Returns: - tuple: A tuple containing keypoint accuracy. - - - acc (np.ndarray[K]): Accuracy of each keypoint. - - avg_acc (float): Averaged accuracy across all keypoints. - - cnt (int): Number of valid keypoints. - """ - distances = _calc_distances(pred, gt, mask, norm_factor) - acc = np.array([_distance_acc(d, thr) for d in distances]) - valid_acc = acc[acc >= 0] - cnt = len(valid_acc) - avg_acc = valid_acc.mean() if cnt > 0 else 0.0 - return acc, avg_acc, cnt - - -def keypoint_auc(pred: np.ndarray, - gt: np.ndarray, - mask: np.ndarray, - norm_factor: np.ndarray, - num_thrs: int = 20) -> float: - """Calculate the Area under curve (AUC) of keypoint PCK accuracy. - - Note: - - instance number: N - - keypoint number: K - - Args: - pred (np.ndarray[N, K, 2]): Predicted keypoint location. - gt (np.ndarray[N, K, 2]): Groundtruth keypoint location. - mask (np.ndarray[N, K]): Visibility of the target. False for invisible - joints, and True for visible. Invisible joints will be ignored for - accuracy calculation. - norm_factor (float): Normalization factor. - num_thrs (int): number of thresholds to calculate auc. - - Returns: - float: Area under curve (AUC) of keypoint PCK accuracy. - """ - nor = np.tile(np.array([[norm_factor, norm_factor]]), (pred.shape[0], 1)) - thrs = [1.0 * i / num_thrs for i in range(num_thrs)] - avg_accs = [] - for thr in thrs: - _, avg_acc, _ = keypoint_pck_accuracy(pred, gt, mask, thr, nor) - avg_accs.append(avg_acc) - - auc = 0 - for i in range(num_thrs): - auc += 1.0 / num_thrs * avg_accs[i] - return auc - - -def keypoint_nme(pred: np.ndarray, gt: np.ndarray, mask: np.ndarray, - normalize_factor: np.ndarray) -> float: - """Calculate the normalized mean error (NME). - - Note: - - instance number: N - - keypoint number: K - - Args: - pred (np.ndarray[N, K, 2]): Predicted keypoint location. - gt (np.ndarray[N, K, 2]): Groundtruth keypoint location. - mask (np.ndarray[N, K]): Visibility of the target. False for invisible - joints, and True for visible. Invisible joints will be ignored for - accuracy calculation. - normalize_factor (np.ndarray[N, 2]): Normalization factor. - - Returns: - float: normalized mean error - """ - distances = _calc_distances(pred, gt, mask, normalize_factor) - distance_valid = distances[distances != -1] - return distance_valid.sum() / max(1, len(distance_valid)) - - -def keypoint_epe(pred: np.ndarray, gt: np.ndarray, mask: np.ndarray) -> float: - """Calculate the end-point error. - - Note: - - instance number: N - - keypoint number: K - - Args: - pred (np.ndarray[N, K, 2]): Predicted keypoint location. - gt (np.ndarray[N, K, 2]): Groundtruth keypoint location. - mask (np.ndarray[N, K]): Visibility of the target. False for invisible - joints, and True for visible. Invisible joints will be ignored for - accuracy calculation. - - Returns: - float: Average end-point error. - """ - - distances = _calc_distances( - pred, gt, mask, - np.ones((pred.shape[0], pred.shape[2]), dtype=np.float32)) - distance_valid = distances[distances != -1] - return distance_valid.sum() / max(1, len(distance_valid)) - - -def pose_pck_accuracy(output: np.ndarray, - target: np.ndarray, - mask: np.ndarray, - thr: float = 0.05, - normalize: Optional[np.ndarray] = None) -> tuple: - """Calculate the pose accuracy of PCK for each individual keypoint and the - averaged accuracy across all keypoints from heatmaps. - - Note: - PCK metric measures accuracy of the localization of the body joints. - The distances between predicted positions and the ground-truth ones - are typically normalized by the bounding box size. - The threshold (thr) of the normalized distance is commonly set - as 0.05, 0.1 or 0.2 etc. - - - batch_size: N - - num_keypoints: K - - heatmap height: H - - heatmap width: W - - Args: - output (np.ndarray[N, K, H, W]): Model output heatmaps. - target (np.ndarray[N, K, H, W]): Groundtruth heatmaps. - mask (np.ndarray[N, K]): Visibility of the target. False for invisible - joints, and True for visible. Invisible joints will be ignored for - accuracy calculation. - thr (float): Threshold of PCK calculation. Default 0.05. - normalize (np.ndarray[N, 2]): Normalization factor for H&W. - - Returns: - tuple: A tuple containing keypoint accuracy. - - - np.ndarray[K]: Accuracy of each keypoint. - - float: Averaged accuracy across all keypoints. - - int: Number of valid keypoints. - """ - N, K, H, W = output.shape - if K == 0: - return None, 0, 0 - if normalize is None: - normalize = np.tile(np.array([[H, W]]), (N, 1)) - - pred, _ = get_heatmap_maximum(output) - gt, _ = get_heatmap_maximum(target) - return keypoint_pck_accuracy(pred, gt, mask, thr, normalize) - - -def simcc_pck_accuracy(output: Tuple[np.ndarray, np.ndarray], - target: Tuple[np.ndarray, np.ndarray], - simcc_split_ratio: float, - mask: np.ndarray, - thr: float = 0.05, - normalize: Optional[np.ndarray] = None) -> tuple: - """Calculate the pose accuracy of PCK for each individual keypoint and the - averaged accuracy across all keypoints from SimCC. - - Note: - PCK metric measures accuracy of the localization of the body joints. - The distances between predicted positions and the ground-truth ones - are typically normalized by the bounding box size. - The threshold (thr) of the normalized distance is commonly set - as 0.05, 0.1 or 0.2 etc. - - - instance number: N - - keypoint number: K - - Args: - output (Tuple[np.ndarray, np.ndarray]): Model predicted SimCC. - target (Tuple[np.ndarray, np.ndarray]): Groundtruth SimCC. - mask (np.ndarray[N, K]): Visibility of the target. False for invisible - joints, and True for visible. Invisible joints will be ignored for - accuracy calculation. - thr (float): Threshold of PCK calculation. Default 0.05. - normalize (np.ndarray[N, 2]): Normalization factor for H&W. - - Returns: - tuple: A tuple containing keypoint accuracy. - - - np.ndarray[K]: Accuracy of each keypoint. - - float: Averaged accuracy across all keypoints. - - int: Number of valid keypoints. - """ - pred_x, pred_y = output - gt_x, gt_y = target - - N, _, Wx = pred_x.shape - _, _, Wy = pred_y.shape - W, H = int(Wx / simcc_split_ratio), int(Wy / simcc_split_ratio) - - if normalize is None: - normalize = np.tile(np.array([[H, W]]), (N, 1)) - - pred_coords, _ = get_simcc_maximum(pred_x, pred_y) - pred_coords /= simcc_split_ratio - gt_coords, _ = get_simcc_maximum(gt_x, gt_y) - gt_coords /= simcc_split_ratio - - return keypoint_pck_accuracy(pred_coords, gt_coords, mask, thr, normalize) - - -def multilabel_classification_accuracy(pred: np.ndarray, - gt: np.ndarray, - mask: np.ndarray, - thr: float = 0.5) -> float: - """Get multi-label classification accuracy. - - Note: - - batch size: N - - label number: L - - Args: - pred (np.ndarray[N, L, 2]): model predicted labels. - gt (np.ndarray[N, L, 2]): ground-truth labels. - mask (np.ndarray[N, 1] or np.ndarray[N, L] ): reliability of - ground-truth labels. - thr (float): Threshold for calculating accuracy. - - Returns: - float: multi-label classification accuracy. - """ - # we only compute accuracy on the samples with ground-truth of all labels. - valid = (mask > 0).min(axis=1) if mask.ndim == 2 else (mask > 0) - pred, gt = pred[valid], gt[valid] - - if pred.shape[0] == 0: - acc = 0.0 # when no sample is with gt labels, set acc to 0. - else: - # The classification of a sample is regarded as correct - # only if it's correct for all labels. - acc = (((pred - thr) * (gt - thr)) > 0).all(axis=1).mean() - return acc - - -def keypoint_mpjpe(pred: np.ndarray, - gt: np.ndarray, - mask: np.ndarray, - alignment: str = 'none'): - """Calculate the mean per-joint position error (MPJPE) and the error after - rigid alignment with the ground truth (P-MPJPE). - - Note: - - batch_size: N - - num_keypoints: K - - keypoint_dims: C - - Args: - pred (np.ndarray): Predicted keypoint location with shape [N, K, C]. - gt (np.ndarray): Groundtruth keypoint location with shape [N, K, C]. - mask (np.ndarray): Visibility of the target with shape [N, K]. - False for invisible joints, and True for visible. - Invisible joints will be ignored for accuracy calculation. - alignment (str, optional): method to align the prediction with the - groundtruth. Supported options are: - - - ``'none'``: no alignment will be applied - - ``'scale'``: align in the least-square sense in scale - - ``'procrustes'``: align in the least-square sense in - scale, rotation and translation. - - Returns: - tuple: A tuple containing joint position errors - - - (float | np.ndarray): mean per-joint position error (mpjpe). - - (float | np.ndarray): mpjpe after rigid alignment with the - ground truth (p-mpjpe). - """ - assert mask.any() - - if alignment == 'none': - pass - elif alignment == 'procrustes': - pred = np.stack([ - compute_similarity_transform(pred_i, gt_i) - for pred_i, gt_i in zip(pred, gt) - ]) - elif alignment == 'scale': - pred_dot_pred = np.einsum('nkc,nkc->n', pred, pred) - pred_dot_gt = np.einsum('nkc,nkc->n', pred, gt) - scale_factor = pred_dot_gt / pred_dot_pred - pred = pred * scale_factor[:, None, None] - else: - raise ValueError(f'Invalid value for alignment: {alignment}') - error = np.linalg.norm(pred - gt, ord=2, axis=-1)[mask].mean() - - return error +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Tuple + +import numpy as np + +from mmpose.codecs.utils import get_heatmap_maximum, get_simcc_maximum +from .mesh_eval import compute_similarity_transform + + +def _calc_distances(preds: np.ndarray, gts: np.ndarray, mask: np.ndarray, + norm_factor: np.ndarray) -> np.ndarray: + """Calculate the normalized distances between preds and target. + + Note: + - instance number: N + - keypoint number: K + - keypoint dimension: D (normally, D=2 or D=3) + + Args: + preds (np.ndarray[N, K, D]): Predicted keypoint location. + gts (np.ndarray[N, K, D]): Groundtruth keypoint location. + mask (np.ndarray[N, K]): Visibility of the target. False for invisible + joints, and True for visible. Invisible joints will be ignored for + accuracy calculation. + norm_factor (np.ndarray[N, D]): Normalization factor. + Typical value is heatmap_size. + + Returns: + np.ndarray[K, N]: The normalized distances. \ + If target keypoints are missing, the distance is -1. + """ + N, K, _ = preds.shape + # set mask=0 when norm_factor==0 + _mask = mask.copy() + _mask[np.where((norm_factor == 0).sum(1))[0], :] = False + + distances = np.full((N, K), -1, dtype=np.float32) + # handle invalid values + norm_factor[np.where(norm_factor <= 0)] = 1e6 + distances[_mask] = np.linalg.norm( + ((preds - gts) / norm_factor[:, None, :])[_mask], axis=-1) + return distances.T + + +def _distance_acc(distances: np.ndarray, thr: float = 0.5) -> float: + """Return the percentage below the distance threshold, while ignoring + distances values with -1. + + Note: + - instance number: N + + Args: + distances (np.ndarray[N, ]): The normalized distances. + thr (float): Threshold of the distances. + + Returns: + float: Percentage of distances below the threshold. \ + If all target keypoints are missing, return -1. + """ + distance_valid = distances != -1 + num_distance_valid = distance_valid.sum() + if num_distance_valid > 0: + return (distances[distance_valid] < thr).sum() / num_distance_valid + return -1 + + +def keypoint_pck_accuracy(pred: np.ndarray, gt: np.ndarray, mask: np.ndarray, + thr: np.ndarray, norm_factor: np.ndarray) -> tuple: + """Calculate the pose accuracy of PCK for each individual keypoint and the + averaged accuracy across all keypoints for coordinates. + + Note: + PCK metric measures accuracy of the localization of the body joints. + The distances between predicted positions and the ground-truth ones + are typically normalized by the bounding box size. + The threshold (thr) of the normalized distance is commonly set + as 0.05, 0.1 or 0.2 etc. + + - instance number: N + - keypoint number: K + + Args: + pred (np.ndarray[N, K, 2]): Predicted keypoint location. + gt (np.ndarray[N, K, 2]): Groundtruth keypoint location. + mask (np.ndarray[N, K]): Visibility of the target. False for invisible + joints, and True for visible. Invisible joints will be ignored for + accuracy calculation. + thr (float): Threshold of PCK calculation. + norm_factor (np.ndarray[N, 2]): Normalization factor for H&W. + + Returns: + tuple: A tuple containing keypoint accuracy. + + - acc (np.ndarray[K]): Accuracy of each keypoint. + - avg_acc (float): Averaged accuracy across all keypoints. + - cnt (int): Number of valid keypoints. + """ + distances = _calc_distances(pred, gt, mask, norm_factor) + acc = np.array([_distance_acc(d, thr) for d in distances]) + valid_acc = acc[acc >= 0] + cnt = len(valid_acc) + avg_acc = valid_acc.mean() if cnt > 0 else 0.0 + return acc, avg_acc, cnt + + +def keypoint_auc(pred: np.ndarray, + gt: np.ndarray, + mask: np.ndarray, + norm_factor: np.ndarray, + num_thrs: int = 20) -> float: + """Calculate the Area under curve (AUC) of keypoint PCK accuracy. + + Note: + - instance number: N + - keypoint number: K + + Args: + pred (np.ndarray[N, K, 2]): Predicted keypoint location. + gt (np.ndarray[N, K, 2]): Groundtruth keypoint location. + mask (np.ndarray[N, K]): Visibility of the target. False for invisible + joints, and True for visible. Invisible joints will be ignored for + accuracy calculation. + norm_factor (float): Normalization factor. + num_thrs (int): number of thresholds to calculate auc. + + Returns: + float: Area under curve (AUC) of keypoint PCK accuracy. + """ + nor = np.tile(np.array([[norm_factor, norm_factor]]), (pred.shape[0], 1)) + thrs = [1.0 * i / num_thrs for i in range(num_thrs)] + avg_accs = [] + for thr in thrs: + _, avg_acc, _ = keypoint_pck_accuracy(pred, gt, mask, thr, nor) + avg_accs.append(avg_acc) + + auc = 0 + for i in range(num_thrs): + auc += 1.0 / num_thrs * avg_accs[i] + return auc + + +def keypoint_nme(pred: np.ndarray, gt: np.ndarray, mask: np.ndarray, + normalize_factor: np.ndarray) -> float: + """Calculate the normalized mean error (NME). + + Note: + - instance number: N + - keypoint number: K + + Args: + pred (np.ndarray[N, K, 2]): Predicted keypoint location. + gt (np.ndarray[N, K, 2]): Groundtruth keypoint location. + mask (np.ndarray[N, K]): Visibility of the target. False for invisible + joints, and True for visible. Invisible joints will be ignored for + accuracy calculation. + normalize_factor (np.ndarray[N, 2]): Normalization factor. + + Returns: + float: normalized mean error + """ + distances = _calc_distances(pred, gt, mask, normalize_factor) + distance_valid = distances[distances != -1] + return distance_valid.sum() / max(1, len(distance_valid)) + + +def keypoint_epe(pred: np.ndarray, gt: np.ndarray, mask: np.ndarray) -> float: + """Calculate the end-point error. + + Note: + - instance number: N + - keypoint number: K + + Args: + pred (np.ndarray[N, K, 2]): Predicted keypoint location. + gt (np.ndarray[N, K, 2]): Groundtruth keypoint location. + mask (np.ndarray[N, K]): Visibility of the target. False for invisible + joints, and True for visible. Invisible joints will be ignored for + accuracy calculation. + + Returns: + float: Average end-point error. + """ + + distances = _calc_distances( + pred, gt, mask, + np.ones((pred.shape[0], pred.shape[2]), dtype=np.float32)) + distance_valid = distances[distances != -1] + return distance_valid.sum() / max(1, len(distance_valid)) + + +def pose_pck_accuracy(output: np.ndarray, + target: np.ndarray, + mask: np.ndarray, + thr: float = 0.05, + normalize: Optional[np.ndarray] = None) -> tuple: + """Calculate the pose accuracy of PCK for each individual keypoint and the + averaged accuracy across all keypoints from heatmaps. + + Note: + PCK metric measures accuracy of the localization of the body joints. + The distances between predicted positions and the ground-truth ones + are typically normalized by the bounding box size. + The threshold (thr) of the normalized distance is commonly set + as 0.05, 0.1 or 0.2 etc. + + - batch_size: N + - num_keypoints: K + - heatmap height: H + - heatmap width: W + + Args: + output (np.ndarray[N, K, H, W]): Model output heatmaps. + target (np.ndarray[N, K, H, W]): Groundtruth heatmaps. + mask (np.ndarray[N, K]): Visibility of the target. False for invisible + joints, and True for visible. Invisible joints will be ignored for + accuracy calculation. + thr (float): Threshold of PCK calculation. Default 0.05. + normalize (np.ndarray[N, 2]): Normalization factor for H&W. + + Returns: + tuple: A tuple containing keypoint accuracy. + + - np.ndarray[K]: Accuracy of each keypoint. + - float: Averaged accuracy across all keypoints. + - int: Number of valid keypoints. + """ + N, K, H, W = output.shape + if K == 0: + return None, 0, 0 + if normalize is None: + normalize = np.tile(np.array([[H, W]]), (N, 1)) + + pred, _ = get_heatmap_maximum(output) + gt, _ = get_heatmap_maximum(target) + return keypoint_pck_accuracy(pred, gt, mask, thr, normalize) + + +def simcc_pck_accuracy(output: Tuple[np.ndarray, np.ndarray], + target: Tuple[np.ndarray, np.ndarray], + simcc_split_ratio: float, + mask: np.ndarray, + thr: float = 0.05, + normalize: Optional[np.ndarray] = None) -> tuple: + """Calculate the pose accuracy of PCK for each individual keypoint and the + averaged accuracy across all keypoints from SimCC. + + Note: + PCK metric measures accuracy of the localization of the body joints. + The distances between predicted positions and the ground-truth ones + are typically normalized by the bounding box size. + The threshold (thr) of the normalized distance is commonly set + as 0.05, 0.1 or 0.2 etc. + + - instance number: N + - keypoint number: K + + Args: + output (Tuple[np.ndarray, np.ndarray]): Model predicted SimCC. + target (Tuple[np.ndarray, np.ndarray]): Groundtruth SimCC. + mask (np.ndarray[N, K]): Visibility of the target. False for invisible + joints, and True for visible. Invisible joints will be ignored for + accuracy calculation. + thr (float): Threshold of PCK calculation. Default 0.05. + normalize (np.ndarray[N, 2]): Normalization factor for H&W. + + Returns: + tuple: A tuple containing keypoint accuracy. + + - np.ndarray[K]: Accuracy of each keypoint. + - float: Averaged accuracy across all keypoints. + - int: Number of valid keypoints. + """ + pred_x, pred_y = output + gt_x, gt_y = target + + N, _, Wx = pred_x.shape + _, _, Wy = pred_y.shape + W, H = int(Wx / simcc_split_ratio), int(Wy / simcc_split_ratio) + + if normalize is None: + normalize = np.tile(np.array([[H, W]]), (N, 1)) + + pred_coords, _ = get_simcc_maximum(pred_x, pred_y) + pred_coords /= simcc_split_ratio + gt_coords, _ = get_simcc_maximum(gt_x, gt_y) + gt_coords /= simcc_split_ratio + + return keypoint_pck_accuracy(pred_coords, gt_coords, mask, thr, normalize) + + +def multilabel_classification_accuracy(pred: np.ndarray, + gt: np.ndarray, + mask: np.ndarray, + thr: float = 0.5) -> float: + """Get multi-label classification accuracy. + + Note: + - batch size: N + - label number: L + + Args: + pred (np.ndarray[N, L, 2]): model predicted labels. + gt (np.ndarray[N, L, 2]): ground-truth labels. + mask (np.ndarray[N, 1] or np.ndarray[N, L] ): reliability of + ground-truth labels. + thr (float): Threshold for calculating accuracy. + + Returns: + float: multi-label classification accuracy. + """ + # we only compute accuracy on the samples with ground-truth of all labels. + valid = (mask > 0).min(axis=1) if mask.ndim == 2 else (mask > 0) + pred, gt = pred[valid], gt[valid] + + if pred.shape[0] == 0: + acc = 0.0 # when no sample is with gt labels, set acc to 0. + else: + # The classification of a sample is regarded as correct + # only if it's correct for all labels. + acc = (((pred - thr) * (gt - thr)) > 0).all(axis=1).mean() + return acc + + +def keypoint_mpjpe(pred: np.ndarray, + gt: np.ndarray, + mask: np.ndarray, + alignment: str = 'none'): + """Calculate the mean per-joint position error (MPJPE) and the error after + rigid alignment with the ground truth (P-MPJPE). + + Note: + - batch_size: N + - num_keypoints: K + - keypoint_dims: C + + Args: + pred (np.ndarray): Predicted keypoint location with shape [N, K, C]. + gt (np.ndarray): Groundtruth keypoint location with shape [N, K, C]. + mask (np.ndarray): Visibility of the target with shape [N, K]. + False for invisible joints, and True for visible. + Invisible joints will be ignored for accuracy calculation. + alignment (str, optional): method to align the prediction with the + groundtruth. Supported options are: + + - ``'none'``: no alignment will be applied + - ``'scale'``: align in the least-square sense in scale + - ``'procrustes'``: align in the least-square sense in + scale, rotation and translation. + + Returns: + tuple: A tuple containing joint position errors + + - (float | np.ndarray): mean per-joint position error (mpjpe). + - (float | np.ndarray): mpjpe after rigid alignment with the + ground truth (p-mpjpe). + """ + assert mask.any() + + if alignment == 'none': + pass + elif alignment == 'procrustes': + pred = np.stack([ + compute_similarity_transform(pred_i, gt_i) + for pred_i, gt_i in zip(pred, gt) + ]) + elif alignment == 'scale': + pred_dot_pred = np.einsum('nkc,nkc->n', pred, pred) + pred_dot_gt = np.einsum('nkc,nkc->n', pred, gt) + scale_factor = pred_dot_gt / pred_dot_pred + pred = pred * scale_factor[:, None, None] + else: + raise ValueError(f'Invalid value for alignment: {alignment}') + error = np.linalg.norm(pred - gt, ord=2, axis=-1)[mask].mean() + + return error diff --git a/mmpose/evaluation/functional/mesh_eval.py b/mmpose/evaluation/functional/mesh_eval.py index 683b4539b2..18b5ae2c68 100644 --- a/mmpose/evaluation/functional/mesh_eval.py +++ b/mmpose/evaluation/functional/mesh_eval.py @@ -1,66 +1,66 @@ -# ------------------------------------------------------------------------------ -# Adapted from https://github.com/akanazawa/hmr -# Original licence: Copyright (c) 2018 akanazawa, under the MIT License. -# ------------------------------------------------------------------------------ - -import numpy as np - - -def compute_similarity_transform(source_points, target_points): - """Computes a similarity transform (sR, t) that takes a set of 3D points - source_points (N x 3) closest to a set of 3D points target_points, where R - is an 3x3 rotation matrix, t 3x1 translation, s scale. And return the - transformed 3D points source_points_hat (N x 3). i.e. solves the orthogonal - Procrutes problem. - - Note: - Points number: N - - Args: - source_points (np.ndarray): Source point set with shape [N, 3]. - target_points (np.ndarray): Target point set with shape [N, 3]. - - Returns: - np.ndarray: Transformed source point set with shape [N, 3]. - """ - - assert target_points.shape[0] == source_points.shape[0] - assert target_points.shape[1] == 3 and source_points.shape[1] == 3 - - source_points = source_points.T - target_points = target_points.T - - # 1. Remove mean. - mu1 = source_points.mean(axis=1, keepdims=True) - mu2 = target_points.mean(axis=1, keepdims=True) - X1 = source_points - mu1 - X2 = target_points - mu2 - - # 2. Compute variance of X1 used for scale. - var1 = np.sum(X1**2) - - # 3. The outer product of X1 and X2. - K = X1.dot(X2.T) - - # 4. Solution that Maximizes trace(R'K) is R=U*V', where U, V are - # singular vectors of K. - U, _, Vh = np.linalg.svd(K) - V = Vh.T - # Construct Z that fixes the orientation of R to get det(R)=1. - Z = np.eye(U.shape[0]) - Z[-1, -1] *= np.sign(np.linalg.det(U.dot(V.T))) - # Construct R. - R = V.dot(Z.dot(U.T)) - - # 5. Recover scale. - scale = np.trace(R.dot(K)) / var1 - - # 6. Recover translation. - t = mu2 - scale * (R.dot(mu1)) - - # 7. Transform the source points: - source_points_hat = scale * R.dot(source_points) + t - - source_points_hat = source_points_hat.T - - return source_points_hat +# ------------------------------------------------------------------------------ +# Adapted from https://github.com/akanazawa/hmr +# Original licence: Copyright (c) 2018 akanazawa, under the MIT License. +# ------------------------------------------------------------------------------ + +import numpy as np + + +def compute_similarity_transform(source_points, target_points): + """Computes a similarity transform (sR, t) that takes a set of 3D points + source_points (N x 3) closest to a set of 3D points target_points, where R + is an 3x3 rotation matrix, t 3x1 translation, s scale. And return the + transformed 3D points source_points_hat (N x 3). i.e. solves the orthogonal + Procrutes problem. + + Note: + Points number: N + + Args: + source_points (np.ndarray): Source point set with shape [N, 3]. + target_points (np.ndarray): Target point set with shape [N, 3]. + + Returns: + np.ndarray: Transformed source point set with shape [N, 3]. + """ + + assert target_points.shape[0] == source_points.shape[0] + assert target_points.shape[1] == 3 and source_points.shape[1] == 3 + + source_points = source_points.T + target_points = target_points.T + + # 1. Remove mean. + mu1 = source_points.mean(axis=1, keepdims=True) + mu2 = target_points.mean(axis=1, keepdims=True) + X1 = source_points - mu1 + X2 = target_points - mu2 + + # 2. Compute variance of X1 used for scale. + var1 = np.sum(X1**2) + + # 3. The outer product of X1 and X2. + K = X1.dot(X2.T) + + # 4. Solution that Maximizes trace(R'K) is R=U*V', where U, V are + # singular vectors of K. + U, _, Vh = np.linalg.svd(K) + V = Vh.T + # Construct Z that fixes the orientation of R to get det(R)=1. + Z = np.eye(U.shape[0]) + Z[-1, -1] *= np.sign(np.linalg.det(U.dot(V.T))) + # Construct R. + R = V.dot(Z.dot(U.T)) + + # 5. Recover scale. + scale = np.trace(R.dot(K)) / var1 + + # 6. Recover translation. + t = mu2 - scale * (R.dot(mu1)) + + # 7. Transform the source points: + source_points_hat = scale * R.dot(source_points) + t + + source_points_hat = source_points_hat.T + + return source_points_hat diff --git a/mmpose/evaluation/functional/nms.py b/mmpose/evaluation/functional/nms.py index eed4e5cf73..c3ac408045 100644 --- a/mmpose/evaluation/functional/nms.py +++ b/mmpose/evaluation/functional/nms.py @@ -1,327 +1,327 @@ -# ------------------------------------------------------------------------------ -# Adapted from https://github.com/leoxiaobin/deep-high-resolution-net.pytorch -# and https://github.com/HRNet/DEKR -# Original licence: Copyright (c) Microsoft, under the MIT License. -# ------------------------------------------------------------------------------ - -from typing import List, Optional - -import numpy as np - - -def nms(dets: np.ndarray, thr: float) -> List[int]: - """Greedily select boxes with high confidence and overlap <= thr. - - Args: - dets (np.ndarray): [[x1, y1, x2, y2, score]]. - thr (float): Retain overlap < thr. - - Returns: - list: Indexes to keep. - """ - if len(dets) == 0: - return [] - - x1 = dets[:, 0] - y1 = dets[:, 1] - x2 = dets[:, 2] - y2 = dets[:, 3] - scores = dets[:, 4] - - areas = (x2 - x1 + 1) * (y2 - y1 + 1) - order = scores.argsort()[::-1] - - keep = [] - while len(order) > 0: - i = order[0] - keep.append(i) - xx1 = np.maximum(x1[i], x1[order[1:]]) - yy1 = np.maximum(y1[i], y1[order[1:]]) - xx2 = np.minimum(x2[i], x2[order[1:]]) - yy2 = np.minimum(y2[i], y2[order[1:]]) - - w = np.maximum(0.0, xx2 - xx1 + 1) - h = np.maximum(0.0, yy2 - yy1 + 1) - inter = w * h - ovr = inter / (areas[i] + areas[order[1:]] - inter) - - inds = np.where(ovr <= thr)[0] - order = order[inds + 1] - - return keep - - -def oks_iou(g: np.ndarray, - d: np.ndarray, - a_g: float, - a_d: np.ndarray, - sigmas: Optional[np.ndarray] = None, - vis_thr: Optional[float] = None) -> np.ndarray: - """Calculate oks ious. - - Note: - - - number of keypoints: K - - number of instances: N - - Args: - g (np.ndarray): The instance to calculate OKS IOU with other - instances. Containing the keypoints coordinates. Shape: (K*3, ) - d (np.ndarray): The rest instances. Containing the keypoints - coordinates. Shape: (N, K*3) - a_g (float): Area of the ground truth object. - a_d (np.ndarray): Area of the detected object. Shape: (N, ) - sigmas (np.ndarray, optional): Keypoint labelling uncertainty. - Please refer to `COCO keypoint evaluation - `__ for more details. - If not given, use the sigmas on COCO dataset. - If specified, shape: (K, ). Defaults to ``None`` - vis_thr(float, optional): Threshold of the keypoint visibility. - If specified, will calculate OKS based on those keypoints whose - visibility higher than vis_thr. If not given, calculate the OKS - based on all keypoints. Defaults to ``None`` - - Returns: - np.ndarray: The oks ious. - """ - if sigmas is None: - sigmas = np.array([ - .26, .25, .25, .35, .35, .79, .79, .72, .72, .62, .62, 1.07, 1.07, - .87, .87, .89, .89 - ]) / 10.0 - vars = (sigmas * 2)**2 - xg = g[0::3] - yg = g[1::3] - vg = g[2::3] - ious = np.zeros(len(d), dtype=np.float32) - for n_d in range(0, len(d)): - xd = d[n_d, 0::3] - yd = d[n_d, 1::3] - vd = d[n_d, 2::3] - dx = xd - xg - dy = yd - yg - e = (dx**2 + dy**2) / vars / ((a_g + a_d[n_d]) / 2 + np.spacing(1)) / 2 - if vis_thr is not None: - ind = list((vg > vis_thr) & (vd > vis_thr)) - e = e[ind] - ious[n_d] = np.sum(np.exp(-e)) / len(e) if len(e) != 0 else 0.0 - return ious - - -def oks_nms(kpts_db: List[dict], - thr: float, - sigmas: Optional[np.ndarray] = None, - vis_thr: Optional[float] = None, - score_per_joint: bool = False): - """OKS NMS implementations. - - Args: - kpts_db (List[dict]): The keypoints results of the same image. - thr (float): The threshold of NMS. Will retain oks overlap < thr. - sigmas (np.ndarray, optional): Keypoint labelling uncertainty. - Please refer to `COCO keypoint evaluation - `__ for more details. - If not given, use the sigmas on COCO dataset. Defaults to ``None`` - vis_thr(float, optional): Threshold of the keypoint visibility. - If specified, will calculate OKS based on those keypoints whose - visibility higher than vis_thr. If not given, calculate the OKS - based on all keypoints. Defaults to ``None`` - score_per_joint(bool): Whether the input scores (in kpts_db) are - per-joint scores. Defaults to ``False`` - - Returns: - np.ndarray: indexes to keep. - """ - if len(kpts_db) == 0: - return [] - - if score_per_joint: - scores = np.array([k['score'].mean() for k in kpts_db]) - else: - scores = np.array([k['score'] for k in kpts_db]) - - kpts = np.array([k['keypoints'].flatten() for k in kpts_db]) - areas = np.array([k['area'] for k in kpts_db]) - - order = scores.argsort()[::-1] - - keep = [] - while len(order) > 0: - i = order[0] - keep.append(i) - - oks_ovr = oks_iou(kpts[i], kpts[order[1:]], areas[i], areas[order[1:]], - sigmas, vis_thr) - - inds = np.where(oks_ovr <= thr)[0] - order = order[inds + 1] - - keep = np.array(keep) - - return keep - - -def _rescore(overlap: np.ndarray, - scores: np.ndarray, - thr: float, - type: str = 'gaussian'): - """Rescoring mechanism gaussian or linear. - - Args: - overlap (np.ndarray): The calculated oks ious. - scores (np.ndarray): target scores. - thr (float): retain oks overlap < thr. - type (str): The rescoring type. Could be 'gaussian' or 'linear'. - Defaults to ``'gaussian'`` - - Returns: - np.ndarray: indexes to keep - """ - assert len(overlap) == len(scores) - assert type in ['gaussian', 'linear'] - - if type == 'linear': - inds = np.where(overlap >= thr)[0] - scores[inds] = scores[inds] * (1 - overlap[inds]) - else: - scores = scores * np.exp(-overlap**2 / thr) - - return scores - - -def soft_oks_nms(kpts_db: List[dict], - thr: float, - max_dets: int = 20, - sigmas: Optional[np.ndarray] = None, - vis_thr: Optional[float] = None, - score_per_joint: bool = False): - """Soft OKS NMS implementations. - - Args: - kpts_db (List[dict]): The keypoints results of the same image. - thr (float): The threshold of NMS. Will retain oks overlap < thr. - max_dets (int): Maximum number of detections to keep. Defaults to 20 - sigmas (np.ndarray, optional): Keypoint labelling uncertainty. - Please refer to `COCO keypoint evaluation - `__ for more details. - If not given, use the sigmas on COCO dataset. Defaults to ``None`` - vis_thr(float, optional): Threshold of the keypoint visibility. - If specified, will calculate OKS based on those keypoints whose - visibility higher than vis_thr. If not given, calculate the OKS - based on all keypoints. Defaults to ``None`` - score_per_joint(bool): Whether the input scores (in kpts_db) are - per-joint scores. Defaults to ``False`` - - Returns: - np.ndarray: indexes to keep. - """ - if len(kpts_db) == 0: - return [] - - if score_per_joint: - scores = np.array([k['score'].mean() for k in kpts_db]) - else: - scores = np.array([k['score'] for k in kpts_db]) - - kpts = np.array([k['keypoints'].flatten() for k in kpts_db]) - areas = np.array([k['area'] for k in kpts_db]) - - order = scores.argsort()[::-1] - scores = scores[order] - - keep = np.zeros(max_dets, dtype=np.intp) - keep_cnt = 0 - while len(order) > 0 and keep_cnt < max_dets: - i = order[0] - - oks_ovr = oks_iou(kpts[i], kpts[order[1:]], areas[i], areas[order[1:]], - sigmas, vis_thr) - - order = order[1:] - scores = _rescore(oks_ovr, scores[1:], thr) - - tmp = scores.argsort()[::-1] - order = order[tmp] - scores = scores[tmp] - - keep[keep_cnt] = i - keep_cnt += 1 - - keep = keep[:keep_cnt] - - return keep - - -def nearby_joints_nms( - kpts_db: List[dict], - dist_thr: float, - num_nearby_joints_thr: Optional[int] = None, - score_per_joint: bool = False, - max_dets: int = 30, -): - """Nearby joints NMS implementations. Instances with non-maximum scores - will be suppressed if they have too much closed joints with other - instances. This function is modified from project - `DEKR`. - - Args: - kpts_db (list[dict]): keypoints and scores. - dist_thr (float): threshold for judging whether two joints are close. - num_nearby_joints_thr (int): threshold for judging whether two - instances are close. - max_dets (int): max number of detections to keep. - score_per_joint (bool): the input scores (in kpts_db) are per joint - scores. - - Returns: - np.ndarray: indexes to keep. - """ - - assert dist_thr > 0, '`dist_thr` must be greater than 0.' - if len(kpts_db) == 0: - return [] - - if score_per_joint: - scores = np.array([k['score'].mean() for k in kpts_db]) - else: - scores = np.array([k['score'] for k in kpts_db]) - - kpts = np.array([k['keypoints'] for k in kpts_db]) - - num_people, num_joints, _ = kpts.shape - if num_nearby_joints_thr is None: - num_nearby_joints_thr = num_joints // 2 - assert num_nearby_joints_thr < num_joints, '`num_nearby_joints_thr` must '\ - 'be less than the number of joints.' - - # compute distance threshold - pose_area = kpts.max(axis=1) - kpts.min(axis=1) - pose_area = np.sqrt(np.power(pose_area, 2).sum(axis=1)) - pose_area = pose_area.reshape(num_people, 1, 1) - pose_area = np.tile(pose_area, (num_people, num_joints)) - close_dist_thr = pose_area * dist_thr - - # count nearby joints between instances - instance_dist = kpts[:, None] - kpts - instance_dist = np.sqrt(np.power(instance_dist, 2).sum(axis=3)) - close_instance_num = (instance_dist < close_dist_thr).sum(2) - close_instance = close_instance_num > num_nearby_joints_thr - - # apply nms - ignored_pose_inds, keep_pose_inds = set(), list() - indexes = np.argsort(scores)[::-1] - for i in indexes: - if i in ignored_pose_inds: - continue - keep_inds = close_instance[i].nonzero()[0] - keep_ind = keep_inds[np.argmax(scores[keep_inds])] - if keep_ind not in ignored_pose_inds: - keep_pose_inds.append(keep_ind) - ignored_pose_inds = ignored_pose_inds.union(set(keep_inds)) - - # limit the number of output instances - if max_dets > 0 and len(keep_pose_inds) > max_dets: - sub_inds = np.argsort(scores[keep_pose_inds])[-1:-max_dets - 1:-1] - keep_pose_inds = [keep_pose_inds[i] for i in sub_inds] - - return keep_pose_inds +# ------------------------------------------------------------------------------ +# Adapted from https://github.com/leoxiaobin/deep-high-resolution-net.pytorch +# and https://github.com/HRNet/DEKR +# Original licence: Copyright (c) Microsoft, under the MIT License. +# ------------------------------------------------------------------------------ + +from typing import List, Optional + +import numpy as np + + +def nms(dets: np.ndarray, thr: float) -> List[int]: + """Greedily select boxes with high confidence and overlap <= thr. + + Args: + dets (np.ndarray): [[x1, y1, x2, y2, score]]. + thr (float): Retain overlap < thr. + + Returns: + list: Indexes to keep. + """ + if len(dets) == 0: + return [] + + x1 = dets[:, 0] + y1 = dets[:, 1] + x2 = dets[:, 2] + y2 = dets[:, 3] + scores = dets[:, 4] + + areas = (x2 - x1 + 1) * (y2 - y1 + 1) + order = scores.argsort()[::-1] + + keep = [] + while len(order) > 0: + i = order[0] + keep.append(i) + xx1 = np.maximum(x1[i], x1[order[1:]]) + yy1 = np.maximum(y1[i], y1[order[1:]]) + xx2 = np.minimum(x2[i], x2[order[1:]]) + yy2 = np.minimum(y2[i], y2[order[1:]]) + + w = np.maximum(0.0, xx2 - xx1 + 1) + h = np.maximum(0.0, yy2 - yy1 + 1) + inter = w * h + ovr = inter / (areas[i] + areas[order[1:]] - inter) + + inds = np.where(ovr <= thr)[0] + order = order[inds + 1] + + return keep + + +def oks_iou(g: np.ndarray, + d: np.ndarray, + a_g: float, + a_d: np.ndarray, + sigmas: Optional[np.ndarray] = None, + vis_thr: Optional[float] = None) -> np.ndarray: + """Calculate oks ious. + + Note: + + - number of keypoints: K + - number of instances: N + + Args: + g (np.ndarray): The instance to calculate OKS IOU with other + instances. Containing the keypoints coordinates. Shape: (K*3, ) + d (np.ndarray): The rest instances. Containing the keypoints + coordinates. Shape: (N, K*3) + a_g (float): Area of the ground truth object. + a_d (np.ndarray): Area of the detected object. Shape: (N, ) + sigmas (np.ndarray, optional): Keypoint labelling uncertainty. + Please refer to `COCO keypoint evaluation + `__ for more details. + If not given, use the sigmas on COCO dataset. + If specified, shape: (K, ). Defaults to ``None`` + vis_thr(float, optional): Threshold of the keypoint visibility. + If specified, will calculate OKS based on those keypoints whose + visibility higher than vis_thr. If not given, calculate the OKS + based on all keypoints. Defaults to ``None`` + + Returns: + np.ndarray: The oks ious. + """ + if sigmas is None: + sigmas = np.array([ + .26, .25, .25, .35, .35, .79, .79, .72, .72, .62, .62, 1.07, 1.07, + .87, .87, .89, .89 + ]) / 10.0 + vars = (sigmas * 2)**2 + xg = g[0::3] + yg = g[1::3] + vg = g[2::3] + ious = np.zeros(len(d), dtype=np.float32) + for n_d in range(0, len(d)): + xd = d[n_d, 0::3] + yd = d[n_d, 1::3] + vd = d[n_d, 2::3] + dx = xd - xg + dy = yd - yg + e = (dx**2 + dy**2) / vars / ((a_g + a_d[n_d]) / 2 + np.spacing(1)) / 2 + if vis_thr is not None: + ind = list((vg > vis_thr) & (vd > vis_thr)) + e = e[ind] + ious[n_d] = np.sum(np.exp(-e)) / len(e) if len(e) != 0 else 0.0 + return ious + + +def oks_nms(kpts_db: List[dict], + thr: float, + sigmas: Optional[np.ndarray] = None, + vis_thr: Optional[float] = None, + score_per_joint: bool = False): + """OKS NMS implementations. + + Args: + kpts_db (List[dict]): The keypoints results of the same image. + thr (float): The threshold of NMS. Will retain oks overlap < thr. + sigmas (np.ndarray, optional): Keypoint labelling uncertainty. + Please refer to `COCO keypoint evaluation + `__ for more details. + If not given, use the sigmas on COCO dataset. Defaults to ``None`` + vis_thr(float, optional): Threshold of the keypoint visibility. + If specified, will calculate OKS based on those keypoints whose + visibility higher than vis_thr. If not given, calculate the OKS + based on all keypoints. Defaults to ``None`` + score_per_joint(bool): Whether the input scores (in kpts_db) are + per-joint scores. Defaults to ``False`` + + Returns: + np.ndarray: indexes to keep. + """ + if len(kpts_db) == 0: + return [] + + if score_per_joint: + scores = np.array([k['score'].mean() for k in kpts_db]) + else: + scores = np.array([k['score'] for k in kpts_db]) + + kpts = np.array([k['keypoints'].flatten() for k in kpts_db]) + areas = np.array([k['area'] for k in kpts_db]) + + order = scores.argsort()[::-1] + + keep = [] + while len(order) > 0: + i = order[0] + keep.append(i) + + oks_ovr = oks_iou(kpts[i], kpts[order[1:]], areas[i], areas[order[1:]], + sigmas, vis_thr) + + inds = np.where(oks_ovr <= thr)[0] + order = order[inds + 1] + + keep = np.array(keep) + + return keep + + +def _rescore(overlap: np.ndarray, + scores: np.ndarray, + thr: float, + type: str = 'gaussian'): + """Rescoring mechanism gaussian or linear. + + Args: + overlap (np.ndarray): The calculated oks ious. + scores (np.ndarray): target scores. + thr (float): retain oks overlap < thr. + type (str): The rescoring type. Could be 'gaussian' or 'linear'. + Defaults to ``'gaussian'`` + + Returns: + np.ndarray: indexes to keep + """ + assert len(overlap) == len(scores) + assert type in ['gaussian', 'linear'] + + if type == 'linear': + inds = np.where(overlap >= thr)[0] + scores[inds] = scores[inds] * (1 - overlap[inds]) + else: + scores = scores * np.exp(-overlap**2 / thr) + + return scores + + +def soft_oks_nms(kpts_db: List[dict], + thr: float, + max_dets: int = 20, + sigmas: Optional[np.ndarray] = None, + vis_thr: Optional[float] = None, + score_per_joint: bool = False): + """Soft OKS NMS implementations. + + Args: + kpts_db (List[dict]): The keypoints results of the same image. + thr (float): The threshold of NMS. Will retain oks overlap < thr. + max_dets (int): Maximum number of detections to keep. Defaults to 20 + sigmas (np.ndarray, optional): Keypoint labelling uncertainty. + Please refer to `COCO keypoint evaluation + `__ for more details. + If not given, use the sigmas on COCO dataset. Defaults to ``None`` + vis_thr(float, optional): Threshold of the keypoint visibility. + If specified, will calculate OKS based on those keypoints whose + visibility higher than vis_thr. If not given, calculate the OKS + based on all keypoints. Defaults to ``None`` + score_per_joint(bool): Whether the input scores (in kpts_db) are + per-joint scores. Defaults to ``False`` + + Returns: + np.ndarray: indexes to keep. + """ + if len(kpts_db) == 0: + return [] + + if score_per_joint: + scores = np.array([k['score'].mean() for k in kpts_db]) + else: + scores = np.array([k['score'] for k in kpts_db]) + + kpts = np.array([k['keypoints'].flatten() for k in kpts_db]) + areas = np.array([k['area'] for k in kpts_db]) + + order = scores.argsort()[::-1] + scores = scores[order] + + keep = np.zeros(max_dets, dtype=np.intp) + keep_cnt = 0 + while len(order) > 0 and keep_cnt < max_dets: + i = order[0] + + oks_ovr = oks_iou(kpts[i], kpts[order[1:]], areas[i], areas[order[1:]], + sigmas, vis_thr) + + order = order[1:] + scores = _rescore(oks_ovr, scores[1:], thr) + + tmp = scores.argsort()[::-1] + order = order[tmp] + scores = scores[tmp] + + keep[keep_cnt] = i + keep_cnt += 1 + + keep = keep[:keep_cnt] + + return keep + + +def nearby_joints_nms( + kpts_db: List[dict], + dist_thr: float, + num_nearby_joints_thr: Optional[int] = None, + score_per_joint: bool = False, + max_dets: int = 30, +): + """Nearby joints NMS implementations. Instances with non-maximum scores + will be suppressed if they have too much closed joints with other + instances. This function is modified from project + `DEKR`. + + Args: + kpts_db (list[dict]): keypoints and scores. + dist_thr (float): threshold for judging whether two joints are close. + num_nearby_joints_thr (int): threshold for judging whether two + instances are close. + max_dets (int): max number of detections to keep. + score_per_joint (bool): the input scores (in kpts_db) are per joint + scores. + + Returns: + np.ndarray: indexes to keep. + """ + + assert dist_thr > 0, '`dist_thr` must be greater than 0.' + if len(kpts_db) == 0: + return [] + + if score_per_joint: + scores = np.array([k['score'].mean() for k in kpts_db]) + else: + scores = np.array([k['score'] for k in kpts_db]) + + kpts = np.array([k['keypoints'] for k in kpts_db]) + + num_people, num_joints, _ = kpts.shape + if num_nearby_joints_thr is None: + num_nearby_joints_thr = num_joints // 2 + assert num_nearby_joints_thr < num_joints, '`num_nearby_joints_thr` must '\ + 'be less than the number of joints.' + + # compute distance threshold + pose_area = kpts.max(axis=1) - kpts.min(axis=1) + pose_area = np.sqrt(np.power(pose_area, 2).sum(axis=1)) + pose_area = pose_area.reshape(num_people, 1, 1) + pose_area = np.tile(pose_area, (num_people, num_joints)) + close_dist_thr = pose_area * dist_thr + + # count nearby joints between instances + instance_dist = kpts[:, None] - kpts + instance_dist = np.sqrt(np.power(instance_dist, 2).sum(axis=3)) + close_instance_num = (instance_dist < close_dist_thr).sum(2) + close_instance = close_instance_num > num_nearby_joints_thr + + # apply nms + ignored_pose_inds, keep_pose_inds = set(), list() + indexes = np.argsort(scores)[::-1] + for i in indexes: + if i in ignored_pose_inds: + continue + keep_inds = close_instance[i].nonzero()[0] + keep_ind = keep_inds[np.argmax(scores[keep_inds])] + if keep_ind not in ignored_pose_inds: + keep_pose_inds.append(keep_ind) + ignored_pose_inds = ignored_pose_inds.union(set(keep_inds)) + + # limit the number of output instances + if max_dets > 0 and len(keep_pose_inds) > max_dets: + sub_inds = np.argsort(scores[keep_pose_inds])[-1:-max_dets - 1:-1] + keep_pose_inds = [keep_pose_inds[i] for i in sub_inds] + + return keep_pose_inds diff --git a/mmpose/evaluation/metrics/__init__.py b/mmpose/evaluation/metrics/__init__.py index ac7e21b5cc..3a81111bed 100644 --- a/mmpose/evaluation/metrics/__init__.py +++ b/mmpose/evaluation/metrics/__init__.py @@ -1,14 +1,14 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .coco_metric import CocoMetric -from .coco_wholebody_metric import CocoWholeBodyMetric -from .keypoint_2d_metrics import (AUC, EPE, NME, JhmdbPCKAccuracy, - MpiiPCKAccuracy, PCKAccuracy) -from .keypoint_3d_metrics import MPJPE -from .keypoint_partition_metric import KeypointPartitionMetric -from .posetrack18_metric import PoseTrack18Metric - -__all__ = [ - 'CocoMetric', 'PCKAccuracy', 'MpiiPCKAccuracy', 'JhmdbPCKAccuracy', 'AUC', - 'EPE', 'NME', 'PoseTrack18Metric', 'CocoWholeBodyMetric', - 'KeypointPartitionMetric', 'MPJPE' -] +# Copyright (c) OpenMMLab. All rights reserved. +from .coco_metric import CocoMetric +from .coco_wholebody_metric import CocoWholeBodyMetric +from .keypoint_2d_metrics import (AUC, EPE, NME, JhmdbPCKAccuracy, + MpiiPCKAccuracy, PCKAccuracy) +from .keypoint_3d_metrics import MPJPE +from .keypoint_partition_metric import KeypointPartitionMetric +from .posetrack18_metric import PoseTrack18Metric + +__all__ = [ + 'CocoMetric', 'PCKAccuracy', 'MpiiPCKAccuracy', 'JhmdbPCKAccuracy', 'AUC', + 'EPE', 'NME', 'PoseTrack18Metric', 'CocoWholeBodyMetric', + 'KeypointPartitionMetric', 'MPJPE' +] diff --git a/mmpose/evaluation/metrics/coco_metric.py b/mmpose/evaluation/metrics/coco_metric.py index 8327e2eca7..d09b329dfc 100644 --- a/mmpose/evaluation/metrics/coco_metric.py +++ b/mmpose/evaluation/metrics/coco_metric.py @@ -1,550 +1,550 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import datetime -import os.path as osp -import tempfile -from collections import OrderedDict, defaultdict -from typing import Dict, Optional, Sequence - -import numpy as np -from mmengine.evaluator import BaseMetric -from mmengine.fileio import dump, get_local_path, load -from mmengine.logging import MMLogger -from xtcocotools.coco import COCO -from xtcocotools.cocoeval import COCOeval - -from mmpose.registry import METRICS -from ..functional import oks_nms, soft_oks_nms - - -@METRICS.register_module() -class CocoMetric(BaseMetric): - """COCO pose estimation task evaluation metric. - - Evaluate AR, AP, and mAP for keypoint detection tasks. Support COCO - dataset and other datasets in COCO format. Please refer to - `COCO keypoint evaluation `__ - for more details. - - Args: - ann_file (str, optional): Path to the coco format annotation file. - If not specified, ground truth annotations from the dataset will - be converted to coco format. Defaults to None - use_area (bool): Whether to use ``'area'`` message in the annotations. - If the ground truth annotations (e.g. CrowdPose, AIC) do not have - the field ``'area'``, please set ``use_area=False``. - Defaults to ``True`` - iou_type (str): The same parameter as `iouType` in - :class:`xtcocotools.COCOeval`, which can be ``'keypoints'``, or - ``'keypoints_crowd'`` (used in CrowdPose dataset). - Defaults to ``'keypoints'`` - score_mode (str): The mode to score the prediction results which - should be one of the following options: - - - ``'bbox'``: Take the score of bbox as the score of the - prediction results. - - ``'bbox_keypoint'``: Use keypoint score to rescore the - prediction results. - - ``'bbox_rle'``: Use rle_score to rescore the - prediction results. - - Defaults to ``'bbox_keypoint'` - keypoint_score_thr (float): The threshold of keypoint score. The - keypoints with score lower than it will not be included to - rescore the prediction results. Valid only when ``score_mode`` is - ``bbox_keypoint``. Defaults to ``0.2`` - nms_mode (str): The mode to perform Non-Maximum Suppression (NMS), - which should be one of the following options: - - - ``'oks_nms'``: Use Object Keypoint Similarity (OKS) to - perform NMS. - - ``'soft_oks_nms'``: Use Object Keypoint Similarity (OKS) - to perform soft NMS. - - ``'none'``: Do not perform NMS. Typically for bottomup mode - output. - - Defaults to ``'oks_nms'` - nms_thr (float): The Object Keypoint Similarity (OKS) threshold - used in NMS when ``nms_mode`` is ``'oks_nms'`` or - ``'soft_oks_nms'``. Will retain the prediction results with OKS - lower than ``nms_thr``. Defaults to ``0.9`` - format_only (bool): Whether only format the output results without - doing quantitative evaluation. This is designed for the need of - test submission when the ground truth annotations are absent. If - set to ``True``, ``outfile_prefix`` should specify the path to - store the output results. Defaults to ``False`` - outfile_prefix (str | None): The prefix of json files. It includes - the file path and the prefix of filename, e.g., ``'a/b/prefix'``. - If not specified, a temp file will be created. Defaults to ``None`` - collect_device (str): Device name used for collecting results from - different ranks during distributed training. Must be ``'cpu'`` or - ``'gpu'``. Defaults to ``'cpu'`` - prefix (str, optional): The prefix that will be added in the metric - names to disambiguate homonymous metrics of different evaluators. - If prefix is not provided in the argument, ``self.default_prefix`` - will be used instead. Defaults to ``None`` - """ - default_prefix: Optional[str] = 'coco' - - def __init__(self, - ann_file: Optional[str] = None, - use_area: bool = True, - iou_type: str = 'keypoints', - score_mode: str = 'bbox_keypoint', - keypoint_score_thr: float = 0.2, - nms_mode: str = 'oks_nms', - nms_thr: float = 0.9, - format_only: bool = False, - outfile_prefix: Optional[str] = None, - collect_device: str = 'cpu', - prefix: Optional[str] = None) -> None: - super().__init__(collect_device=collect_device, prefix=prefix) - self.ann_file = ann_file - # initialize coco helper with the annotation json file - # if ann_file is not specified, initialize with the converted dataset - if ann_file is not None: - with get_local_path(ann_file) as local_path: - self.coco = COCO(local_path) - else: - self.coco = None - - self.use_area = use_area - self.iou_type = iou_type - - allowed_score_modes = ['bbox', 'bbox_keypoint', 'bbox_rle', 'keypoint'] - if score_mode not in allowed_score_modes: - raise ValueError( - "`score_mode` should be one of 'bbox', 'bbox_keypoint', " - f"'bbox_rle', but got {score_mode}") - self.score_mode = score_mode - self.keypoint_score_thr = keypoint_score_thr - - allowed_nms_modes = ['oks_nms', 'soft_oks_nms', 'none'] - if nms_mode not in allowed_nms_modes: - raise ValueError( - "`nms_mode` should be one of 'oks_nms', 'soft_oks_nms', " - f"'none', but got {nms_mode}") - self.nms_mode = nms_mode - self.nms_thr = nms_thr - - if format_only: - assert outfile_prefix is not None, '`outfile_prefix` can not be '\ - 'None when `format_only` is True, otherwise the result file '\ - 'will be saved to a temp directory which will be cleaned up '\ - 'in the end.' - elif ann_file is not None: - # do evaluation only if the ground truth annotations exist - assert 'annotations' in load(ann_file), \ - 'Ground truth annotations are required for evaluation '\ - 'when `format_only` is False.' - - self.format_only = format_only - self.outfile_prefix = outfile_prefix - - def process(self, data_batch: Sequence[dict], - data_samples: Sequence[dict]) -> None: - """Process one batch of data samples and predictions. The processed - results should be stored in ``self.results``, which will be used to - compute the metrics when all batches have been processed. - - Args: - data_batch (Sequence[dict]): A batch of data - from the dataloader. - data_samples (Sequence[dict]): A batch of outputs from - the model, each of which has the following keys: - - - 'id': The id of the sample - - 'img_id': The image_id of the sample - - 'pred_instances': The prediction results of instance(s) - """ - for data_sample in data_samples: - if 'pred_instances' not in data_sample: - raise ValueError( - '`pred_instances` are required to process the ' - f'predictions results in {self.__class__.__name__}. ') - - # keypoints.shape: [N, K, 2], - # N: number of instances, K: number of keypoints - # for topdown-style output, N is usually 1, while for - # bottomup-style output, N is the number of instances in the image - keypoints = data_sample['pred_instances']['keypoints'] - # [N, K], the scores for all keypoints of all instances - keypoint_scores = data_sample['pred_instances']['keypoint_scores'] - assert keypoint_scores.shape == keypoints.shape[:2] - - # parse prediction results - pred = dict() - pred['id'] = data_sample['id'] - pred['img_id'] = data_sample['img_id'] - pred['keypoints'] = keypoints - pred['keypoint_scores'] = keypoint_scores - pred['category_id'] = data_sample.get('category_id', 1) - - if 'bbox_scores' in data_sample['pred_instances']: - # some one-stage models will predict bboxes and scores - # together with keypoints - bbox_scores = data_sample['pred_instances']['bbox_scores'] - elif ('bbox_scores' not in data_sample['gt_instances'] - or len(data_sample['gt_instances']['bbox_scores']) != - len(keypoints)): - # bottom-up models might output different number of - # instances from annotation - bbox_scores = np.ones(len(keypoints)) - else: - # top-down models use detected bboxes, the scores of which - # are contained in the gt_instances - bbox_scores = data_sample['gt_instances']['bbox_scores'] - pred['bbox_scores'] = bbox_scores - - # get area information - if 'bbox_scales' in data_sample['gt_instances']: - pred['areas'] = np.prod( - data_sample['gt_instances']['bbox_scales'], axis=1) - - # parse gt - gt = dict() - if self.coco is None: - gt['width'] = data_sample['ori_shape'][1] - gt['height'] = data_sample['ori_shape'][0] - gt['img_id'] = data_sample['img_id'] - if self.iou_type == 'keypoints_crowd': - assert 'crowd_index' in data_sample, \ - '`crowd_index` is required when `self.iou_type` is ' \ - '`keypoints_crowd`' - gt['crowd_index'] = data_sample['crowd_index'] - assert 'raw_ann_info' in data_sample, \ - 'The row ground truth annotations are required for ' \ - 'evaluation when `ann_file` is not provided' - anns = data_sample['raw_ann_info'] - gt['raw_ann_info'] = anns if isinstance(anns, list) else [anns] - - # add converted result to the results list - self.results.append((pred, gt)) - - def gt_to_coco_json(self, gt_dicts: Sequence[dict], - outfile_prefix: str) -> str: - """Convert ground truth to coco format json file. - - Args: - gt_dicts (Sequence[dict]): Ground truth of the dataset. Each dict - contains the ground truth information about the data sample. - Required keys of the each `gt_dict` in `gt_dicts`: - - `img_id`: image id of the data sample - - `width`: original image width - - `height`: original image height - - `raw_ann_info`: the raw annotation information - Optional keys: - - `crowd_index`: measure the crowding level of an image, - defined in CrowdPose dataset - It is worth mentioning that, in order to compute `CocoMetric`, - there are some required keys in the `raw_ann_info`: - - `id`: the id to distinguish different annotations - - `image_id`: the image id of this annotation - - `category_id`: the category of the instance. - - `bbox`: the object bounding box - - `keypoints`: the keypoints cooridinates along with their - visibilities. Note that it need to be aligned - with the official COCO format, e.g., a list with length - N * 3, in which N is the number of keypoints. And each - triplet represent the [x, y, visible] of the keypoint. - - `iscrowd`: indicating whether the annotation is a crowd. - It is useful when matching the detection results to - the ground truth. - There are some optional keys as well: - - `area`: it is necessary when `self.use_area` is `True` - - `num_keypoints`: it is necessary when `self.iou_type` - is set as `keypoints_crowd`. - outfile_prefix (str): The filename prefix of the json files. If the - prefix is "somepath/xxx", the json file will be named - "somepath/xxx.gt.json". - Returns: - str: The filename of the json file. - """ - image_infos = [] - annotations = [] - img_ids = [] - ann_ids = [] - - for gt_dict in gt_dicts: - # filter duplicate image_info - if gt_dict['img_id'] not in img_ids: - image_info = dict( - id=gt_dict['img_id'], - width=gt_dict['width'], - height=gt_dict['height'], - ) - if self.iou_type == 'keypoints_crowd': - image_info['crowdIndex'] = gt_dict['crowd_index'] - - image_infos.append(image_info) - img_ids.append(gt_dict['img_id']) - - # filter duplicate annotations - for ann in gt_dict['raw_ann_info']: - if ann is None: - # during evaluation on bottom-up datasets, some images - # do not have instance annotation - continue - - annotation = dict( - id=ann['id'], - image_id=ann['image_id'], - category_id=ann['category_id'], - bbox=ann['bbox'], - keypoints=ann['keypoints'], - iscrowd=ann['iscrowd'], - ) - if self.use_area: - assert 'area' in ann, \ - '`area` is required when `self.use_area` is `True`' - annotation['area'] = ann['area'] - - if self.iou_type == 'keypoints_crowd': - assert 'num_keypoints' in ann, \ - '`num_keypoints` is required when `self.iou_type` ' \ - 'is `keypoints_crowd`' - annotation['num_keypoints'] = ann['num_keypoints'] - - annotations.append(annotation) - ann_ids.append(ann['id']) - - info = dict( - date_created=str(datetime.datetime.now()), - description='Coco json file converted by mmpose CocoMetric.') - coco_json = dict( - info=info, - images=image_infos, - categories=self.dataset_meta['CLASSES'], - licenses=None, - annotations=annotations, - ) - converted_json_path = f'{outfile_prefix}.gt.json' - dump(coco_json, converted_json_path, sort_keys=True, indent=4) - return converted_json_path - - def compute_metrics(self, results: list) -> Dict[str, float]: - """Compute the metrics from processed results. - - Args: - results (list): The processed results of each batch. - - Returns: - Dict[str, float]: The computed metrics. The keys are the names of - the metrics, and the values are corresponding results. - """ - logger: MMLogger = MMLogger.get_current_instance() - - # split prediction and gt list - preds, gts = zip(*results) - - tmp_dir = None - if self.outfile_prefix is None: - tmp_dir = tempfile.TemporaryDirectory() - outfile_prefix = osp.join(tmp_dir.name, 'results') - else: - outfile_prefix = self.outfile_prefix - - if self.coco is None: - # use converted gt json file to initialize coco helper - logger.info('Converting ground truth to coco format...') - coco_json_path = self.gt_to_coco_json( - gt_dicts=gts, outfile_prefix=outfile_prefix) - self.coco = COCO(coco_json_path) - - kpts = defaultdict(list) - - # group the preds by img_id - for pred in preds: - img_id = pred['img_id'] - for idx in range(len(pred['keypoints'])): - instance = { - 'id': pred['id'], - 'img_id': pred['img_id'], - 'category_id': pred['category_id'], - 'keypoints': pred['keypoints'][idx], - 'keypoint_scores': pred['keypoint_scores'][idx], - 'bbox_score': pred['bbox_scores'][idx], - } - - if 'areas' in pred: - instance['area'] = pred['areas'][idx] - else: - # use keypoint to calculate bbox and get area - keypoints = pred['keypoints'][idx] - area = ( - np.max(keypoints[:, 0]) - np.min(keypoints[:, 0])) * ( - np.max(keypoints[:, 1]) - np.min(keypoints[:, 1])) - instance['area'] = area - - kpts[img_id].append(instance) - - # sort keypoint results according to id and remove duplicate ones - kpts = self._sort_and_unique_bboxes(kpts, key='id') - - # score the prediction results according to `score_mode` - # and perform NMS according to `nms_mode` - valid_kpts = defaultdict(list) - num_keypoints = self.dataset_meta['num_keypoints'] - for img_id, instances in kpts.items(): - for instance in instances: - # concatenate the keypoint coordinates and scores - instance['keypoints'] = np.concatenate([ - instance['keypoints'], instance['keypoint_scores'][:, None] - ], - axis=-1) - if self.score_mode == 'bbox': - instance['score'] = instance['bbox_score'] - elif self.score_mode == 'keypoint': - instance['score'] = np.mean(instance['keypoint_scores']) - else: - bbox_score = instance['bbox_score'] - if self.score_mode == 'bbox_rle': - keypoint_scores = instance['keypoint_scores'] - instance['score'] = float(bbox_score + - np.mean(keypoint_scores) + - np.max(keypoint_scores)) - - else: # self.score_mode == 'bbox_keypoint': - mean_kpt_score = 0 - valid_num = 0 - for kpt_idx in range(num_keypoints): - kpt_score = instance['keypoint_scores'][kpt_idx] - if kpt_score > self.keypoint_score_thr: - mean_kpt_score += kpt_score - valid_num += 1 - if valid_num != 0: - mean_kpt_score /= valid_num - instance['score'] = bbox_score * mean_kpt_score - # perform nms - if self.nms_mode == 'none': - valid_kpts[img_id] = instances - else: - nms = oks_nms if self.nms_mode == 'oks_nms' else soft_oks_nms - keep = nms( - instances, - self.nms_thr, - sigmas=self.dataset_meta['sigmas']) - valid_kpts[img_id] = [instances[_keep] for _keep in keep] - - # convert results to coco style and dump into a json file - self.results2json(valid_kpts, outfile_prefix=outfile_prefix) - - # only format the results without doing quantitative evaluation - if self.format_only: - logger.info('results are saved in ' - f'{osp.dirname(outfile_prefix)}') - return {} - - # evaluation results - eval_results = OrderedDict() - logger.info(f'Evaluating {self.__class__.__name__}...') - info_str = self._do_python_keypoint_eval(outfile_prefix) - name_value = OrderedDict(info_str) - eval_results.update(name_value) - - if tmp_dir is not None: - tmp_dir.cleanup() - return eval_results - - def results2json(self, keypoints: Dict[int, list], - outfile_prefix: str) -> str: - """Dump the keypoint detection results to a COCO style json file. - - Args: - keypoints (Dict[int, list]): Keypoint detection results - of the dataset. - outfile_prefix (str): The filename prefix of the json files. If the - prefix is "somepath/xxx", the json files will be named - "somepath/xxx.keypoints.json", - - Returns: - str: The json file name of keypoint results. - """ - # the results with category_id - cat_results = [] - - for _, img_kpts in keypoints.items(): - _keypoints = np.array( - [img_kpt['keypoints'] for img_kpt in img_kpts]) - num_keypoints = self.dataset_meta['num_keypoints'] - # collect all the person keypoints in current image - _keypoints = _keypoints.reshape(-1, num_keypoints * 3) - - result = [{ - 'image_id': img_kpt['img_id'], - 'category_id': img_kpt['category_id'], - 'keypoints': keypoint.tolist(), - 'score': float(img_kpt['score']), - } for img_kpt, keypoint in zip(img_kpts, _keypoints)] - - cat_results.extend(result) - - res_file = f'{outfile_prefix}.keypoints.json' - dump(cat_results, res_file, sort_keys=True, indent=4) - - def _do_python_keypoint_eval(self, outfile_prefix: str) -> list: - """Do keypoint evaluation using COCOAPI. - - Args: - outfile_prefix (str): The filename prefix of the json files. If the - prefix is "somepath/xxx", the json files will be named - "somepath/xxx.keypoints.json", - - Returns: - list: a list of tuples. Each tuple contains the evaluation stats - name and corresponding stats value. - """ - res_file = f'{outfile_prefix}.keypoints.json' - coco_det = self.coco.loadRes(res_file) - sigmas = self.dataset_meta['sigmas'] - coco_eval = COCOeval(self.coco, coco_det, self.iou_type, sigmas, - self.use_area) - coco_eval.params.useSegm = None - coco_eval.evaluate() - coco_eval.accumulate() - coco_eval.summarize() - - if self.iou_type == 'keypoints_crowd': - stats_names = [ - 'AP', 'AP .5', 'AP .75', 'AR', 'AR .5', 'AR .75', 'AP(E)', - 'AP(M)', 'AP(H)' - ] - else: - stats_names = [ - 'AP', 'AP .5', 'AP .75', 'AP (M)', 'AP (L)', 'AR', 'AR .5', - 'AR .75', 'AR (M)', 'AR (L)' - ] - - info_str = list(zip(stats_names, coco_eval.stats)) - - return info_str - - def _sort_and_unique_bboxes(self, - kpts: Dict[int, list], - key: str = 'id') -> Dict[int, list]: - """Sort keypoint detection results in each image and remove the - duplicate ones. Usually performed in multi-batch testing. - - Args: - kpts (Dict[int, list]): keypoint prediction results. The keys are - '`img_id`' and the values are list that may contain - keypoints of multiple persons. Each element in the list is a - dict containing the ``'key'`` field. - See the argument ``key`` for details. - key (str): The key name in each person prediction results. The - corresponding value will be used for sorting the results. - Default: ``'id'``. - - Returns: - Dict[int, list]: The sorted keypoint detection results. - """ - for img_id, persons in kpts.items(): - # deal with bottomup-style output - if isinstance(kpts[img_id][0][key], Sequence): - return kpts - num = len(persons) - kpts[img_id] = sorted(kpts[img_id], key=lambda x: x[key]) - for i in range(num - 1, 0, -1): - if kpts[img_id][i][key] == kpts[img_id][i - 1][key]: - del kpts[img_id][i] - - return kpts +# Copyright (c) OpenMMLab. All rights reserved. +import datetime +import os.path as osp +import tempfile +from collections import OrderedDict, defaultdict +from typing import Dict, Optional, Sequence + +import numpy as np +from mmengine.evaluator import BaseMetric +from mmengine.fileio import dump, get_local_path, load +from mmengine.logging import MMLogger +from xtcocotools.coco import COCO +from xtcocotools.cocoeval import COCOeval + +from mmpose.registry import METRICS +from ..functional import oks_nms, soft_oks_nms + + +@METRICS.register_module() +class CocoMetric(BaseMetric): + """COCO pose estimation task evaluation metric. + + Evaluate AR, AP, and mAP for keypoint detection tasks. Support COCO + dataset and other datasets in COCO format. Please refer to + `COCO keypoint evaluation `__ + for more details. + + Args: + ann_file (str, optional): Path to the coco format annotation file. + If not specified, ground truth annotations from the dataset will + be converted to coco format. Defaults to None + use_area (bool): Whether to use ``'area'`` message in the annotations. + If the ground truth annotations (e.g. CrowdPose, AIC) do not have + the field ``'area'``, please set ``use_area=False``. + Defaults to ``True`` + iou_type (str): The same parameter as `iouType` in + :class:`xtcocotools.COCOeval`, which can be ``'keypoints'``, or + ``'keypoints_crowd'`` (used in CrowdPose dataset). + Defaults to ``'keypoints'`` + score_mode (str): The mode to score the prediction results which + should be one of the following options: + + - ``'bbox'``: Take the score of bbox as the score of the + prediction results. + - ``'bbox_keypoint'``: Use keypoint score to rescore the + prediction results. + - ``'bbox_rle'``: Use rle_score to rescore the + prediction results. + + Defaults to ``'bbox_keypoint'` + keypoint_score_thr (float): The threshold of keypoint score. The + keypoints with score lower than it will not be included to + rescore the prediction results. Valid only when ``score_mode`` is + ``bbox_keypoint``. Defaults to ``0.2`` + nms_mode (str): The mode to perform Non-Maximum Suppression (NMS), + which should be one of the following options: + + - ``'oks_nms'``: Use Object Keypoint Similarity (OKS) to + perform NMS. + - ``'soft_oks_nms'``: Use Object Keypoint Similarity (OKS) + to perform soft NMS. + - ``'none'``: Do not perform NMS. Typically for bottomup mode + output. + + Defaults to ``'oks_nms'` + nms_thr (float): The Object Keypoint Similarity (OKS) threshold + used in NMS when ``nms_mode`` is ``'oks_nms'`` or + ``'soft_oks_nms'``. Will retain the prediction results with OKS + lower than ``nms_thr``. Defaults to ``0.9`` + format_only (bool): Whether only format the output results without + doing quantitative evaluation. This is designed for the need of + test submission when the ground truth annotations are absent. If + set to ``True``, ``outfile_prefix`` should specify the path to + store the output results. Defaults to ``False`` + outfile_prefix (str | None): The prefix of json files. It includes + the file path and the prefix of filename, e.g., ``'a/b/prefix'``. + If not specified, a temp file will be created. Defaults to ``None`` + collect_device (str): Device name used for collecting results from + different ranks during distributed training. Must be ``'cpu'`` or + ``'gpu'``. Defaults to ``'cpu'`` + prefix (str, optional): The prefix that will be added in the metric + names to disambiguate homonymous metrics of different evaluators. + If prefix is not provided in the argument, ``self.default_prefix`` + will be used instead. Defaults to ``None`` + """ + default_prefix: Optional[str] = 'coco' + + def __init__(self, + ann_file: Optional[str] = None, + use_area: bool = True, + iou_type: str = 'keypoints', + score_mode: str = 'bbox_keypoint', + keypoint_score_thr: float = 0.2, + nms_mode: str = 'oks_nms', + nms_thr: float = 0.9, + format_only: bool = False, + outfile_prefix: Optional[str] = None, + collect_device: str = 'cpu', + prefix: Optional[str] = None) -> None: + super().__init__(collect_device=collect_device, prefix=prefix) + self.ann_file = ann_file + # initialize coco helper with the annotation json file + # if ann_file is not specified, initialize with the converted dataset + if ann_file is not None: + with get_local_path(ann_file) as local_path: + self.coco = COCO(local_path) + else: + self.coco = None + + self.use_area = use_area + self.iou_type = iou_type + + allowed_score_modes = ['bbox', 'bbox_keypoint', 'bbox_rle', 'keypoint'] + if score_mode not in allowed_score_modes: + raise ValueError( + "`score_mode` should be one of 'bbox', 'bbox_keypoint', " + f"'bbox_rle', but got {score_mode}") + self.score_mode = score_mode + self.keypoint_score_thr = keypoint_score_thr + + allowed_nms_modes = ['oks_nms', 'soft_oks_nms', 'none'] + if nms_mode not in allowed_nms_modes: + raise ValueError( + "`nms_mode` should be one of 'oks_nms', 'soft_oks_nms', " + f"'none', but got {nms_mode}") + self.nms_mode = nms_mode + self.nms_thr = nms_thr + + if format_only: + assert outfile_prefix is not None, '`outfile_prefix` can not be '\ + 'None when `format_only` is True, otherwise the result file '\ + 'will be saved to a temp directory which will be cleaned up '\ + 'in the end.' + elif ann_file is not None: + # do evaluation only if the ground truth annotations exist + assert 'annotations' in load(ann_file), \ + 'Ground truth annotations are required for evaluation '\ + 'when `format_only` is False.' + + self.format_only = format_only + self.outfile_prefix = outfile_prefix + + def process(self, data_batch: Sequence[dict], + data_samples: Sequence[dict]) -> None: + """Process one batch of data samples and predictions. The processed + results should be stored in ``self.results``, which will be used to + compute the metrics when all batches have been processed. + + Args: + data_batch (Sequence[dict]): A batch of data + from the dataloader. + data_samples (Sequence[dict]): A batch of outputs from + the model, each of which has the following keys: + + - 'id': The id of the sample + - 'img_id': The image_id of the sample + - 'pred_instances': The prediction results of instance(s) + """ + for data_sample in data_samples: + if 'pred_instances' not in data_sample: + raise ValueError( + '`pred_instances` are required to process the ' + f'predictions results in {self.__class__.__name__}. ') + + # keypoints.shape: [N, K, 2], + # N: number of instances, K: number of keypoints + # for topdown-style output, N is usually 1, while for + # bottomup-style output, N is the number of instances in the image + keypoints = data_sample['pred_instances']['keypoints'] + # [N, K], the scores for all keypoints of all instances + keypoint_scores = data_sample['pred_instances']['keypoint_scores'] + assert keypoint_scores.shape == keypoints.shape[:2] + + # parse prediction results + pred = dict() + pred['id'] = data_sample['id'] + pred['img_id'] = data_sample['img_id'] + pred['keypoints'] = keypoints + pred['keypoint_scores'] = keypoint_scores + pred['category_id'] = data_sample.get('category_id', 1) + + if 'bbox_scores' in data_sample['pred_instances']: + # some one-stage models will predict bboxes and scores + # together with keypoints + bbox_scores = data_sample['pred_instances']['bbox_scores'] + elif ('bbox_scores' not in data_sample['gt_instances'] + or len(data_sample['gt_instances']['bbox_scores']) != + len(keypoints)): + # bottom-up models might output different number of + # instances from annotation + bbox_scores = np.ones(len(keypoints)) + else: + # top-down models use detected bboxes, the scores of which + # are contained in the gt_instances + bbox_scores = data_sample['gt_instances']['bbox_scores'] + pred['bbox_scores'] = bbox_scores + + # get area information + if 'bbox_scales' in data_sample['gt_instances']: + pred['areas'] = np.prod( + data_sample['gt_instances']['bbox_scales'], axis=1) + + # parse gt + gt = dict() + if self.coco is None: + gt['width'] = data_sample['ori_shape'][1] + gt['height'] = data_sample['ori_shape'][0] + gt['img_id'] = data_sample['img_id'] + if self.iou_type == 'keypoints_crowd': + assert 'crowd_index' in data_sample, \ + '`crowd_index` is required when `self.iou_type` is ' \ + '`keypoints_crowd`' + gt['crowd_index'] = data_sample['crowd_index'] + assert 'raw_ann_info' in data_sample, \ + 'The row ground truth annotations are required for ' \ + 'evaluation when `ann_file` is not provided' + anns = data_sample['raw_ann_info'] + gt['raw_ann_info'] = anns if isinstance(anns, list) else [anns] + + # add converted result to the results list + self.results.append((pred, gt)) + + def gt_to_coco_json(self, gt_dicts: Sequence[dict], + outfile_prefix: str) -> str: + """Convert ground truth to coco format json file. + + Args: + gt_dicts (Sequence[dict]): Ground truth of the dataset. Each dict + contains the ground truth information about the data sample. + Required keys of the each `gt_dict` in `gt_dicts`: + - `img_id`: image id of the data sample + - `width`: original image width + - `height`: original image height + - `raw_ann_info`: the raw annotation information + Optional keys: + - `crowd_index`: measure the crowding level of an image, + defined in CrowdPose dataset + It is worth mentioning that, in order to compute `CocoMetric`, + there are some required keys in the `raw_ann_info`: + - `id`: the id to distinguish different annotations + - `image_id`: the image id of this annotation + - `category_id`: the category of the instance. + - `bbox`: the object bounding box + - `keypoints`: the keypoints cooridinates along with their + visibilities. Note that it need to be aligned + with the official COCO format, e.g., a list with length + N * 3, in which N is the number of keypoints. And each + triplet represent the [x, y, visible] of the keypoint. + - `iscrowd`: indicating whether the annotation is a crowd. + It is useful when matching the detection results to + the ground truth. + There are some optional keys as well: + - `area`: it is necessary when `self.use_area` is `True` + - `num_keypoints`: it is necessary when `self.iou_type` + is set as `keypoints_crowd`. + outfile_prefix (str): The filename prefix of the json files. If the + prefix is "somepath/xxx", the json file will be named + "somepath/xxx.gt.json". + Returns: + str: The filename of the json file. + """ + image_infos = [] + annotations = [] + img_ids = [] + ann_ids = [] + + for gt_dict in gt_dicts: + # filter duplicate image_info + if gt_dict['img_id'] not in img_ids: + image_info = dict( + id=gt_dict['img_id'], + width=gt_dict['width'], + height=gt_dict['height'], + ) + if self.iou_type == 'keypoints_crowd': + image_info['crowdIndex'] = gt_dict['crowd_index'] + + image_infos.append(image_info) + img_ids.append(gt_dict['img_id']) + + # filter duplicate annotations + for ann in gt_dict['raw_ann_info']: + if ann is None: + # during evaluation on bottom-up datasets, some images + # do not have instance annotation + continue + + annotation = dict( + id=ann['id'], + image_id=ann['image_id'], + category_id=ann['category_id'], + bbox=ann['bbox'], + keypoints=ann['keypoints'], + iscrowd=ann['iscrowd'], + ) + if self.use_area: + assert 'area' in ann, \ + '`area` is required when `self.use_area` is `True`' + annotation['area'] = ann['area'] + + if self.iou_type == 'keypoints_crowd': + assert 'num_keypoints' in ann, \ + '`num_keypoints` is required when `self.iou_type` ' \ + 'is `keypoints_crowd`' + annotation['num_keypoints'] = ann['num_keypoints'] + + annotations.append(annotation) + ann_ids.append(ann['id']) + + info = dict( + date_created=str(datetime.datetime.now()), + description='Coco json file converted by mmpose CocoMetric.') + coco_json = dict( + info=info, + images=image_infos, + categories=self.dataset_meta['CLASSES'], + licenses=None, + annotations=annotations, + ) + converted_json_path = f'{outfile_prefix}.gt.json' + dump(coco_json, converted_json_path, sort_keys=True, indent=4) + return converted_json_path + + def compute_metrics(self, results: list) -> Dict[str, float]: + """Compute the metrics from processed results. + + Args: + results (list): The processed results of each batch. + + Returns: + Dict[str, float]: The computed metrics. The keys are the names of + the metrics, and the values are corresponding results. + """ + logger: MMLogger = MMLogger.get_current_instance() + + # split prediction and gt list + preds, gts = zip(*results) + + tmp_dir = None + if self.outfile_prefix is None: + tmp_dir = tempfile.TemporaryDirectory() + outfile_prefix = osp.join(tmp_dir.name, 'results') + else: + outfile_prefix = self.outfile_prefix + + if self.coco is None: + # use converted gt json file to initialize coco helper + logger.info('Converting ground truth to coco format...') + coco_json_path = self.gt_to_coco_json( + gt_dicts=gts, outfile_prefix=outfile_prefix) + self.coco = COCO(coco_json_path) + + kpts = defaultdict(list) + + # group the preds by img_id + for pred in preds: + img_id = pred['img_id'] + for idx in range(len(pred['keypoints'])): + instance = { + 'id': pred['id'], + 'img_id': pred['img_id'], + 'category_id': pred['category_id'], + 'keypoints': pred['keypoints'][idx], + 'keypoint_scores': pred['keypoint_scores'][idx], + 'bbox_score': pred['bbox_scores'][idx], + } + + if 'areas' in pred: + instance['area'] = pred['areas'][idx] + else: + # use keypoint to calculate bbox and get area + keypoints = pred['keypoints'][idx] + area = ( + np.max(keypoints[:, 0]) - np.min(keypoints[:, 0])) * ( + np.max(keypoints[:, 1]) - np.min(keypoints[:, 1])) + instance['area'] = area + + kpts[img_id].append(instance) + + # sort keypoint results according to id and remove duplicate ones + kpts = self._sort_and_unique_bboxes(kpts, key='id') + + # score the prediction results according to `score_mode` + # and perform NMS according to `nms_mode` + valid_kpts = defaultdict(list) + num_keypoints = self.dataset_meta['num_keypoints'] + for img_id, instances in kpts.items(): + for instance in instances: + # concatenate the keypoint coordinates and scores + instance['keypoints'] = np.concatenate([ + instance['keypoints'], instance['keypoint_scores'][:, None] + ], + axis=-1) + if self.score_mode == 'bbox': + instance['score'] = instance['bbox_score'] + elif self.score_mode == 'keypoint': + instance['score'] = np.mean(instance['keypoint_scores']) + else: + bbox_score = instance['bbox_score'] + if self.score_mode == 'bbox_rle': + keypoint_scores = instance['keypoint_scores'] + instance['score'] = float(bbox_score + + np.mean(keypoint_scores) + + np.max(keypoint_scores)) + + else: # self.score_mode == 'bbox_keypoint': + mean_kpt_score = 0 + valid_num = 0 + for kpt_idx in range(num_keypoints): + kpt_score = instance['keypoint_scores'][kpt_idx] + if kpt_score > self.keypoint_score_thr: + mean_kpt_score += kpt_score + valid_num += 1 + if valid_num != 0: + mean_kpt_score /= valid_num + instance['score'] = bbox_score * mean_kpt_score + # perform nms + if self.nms_mode == 'none': + valid_kpts[img_id] = instances + else: + nms = oks_nms if self.nms_mode == 'oks_nms' else soft_oks_nms + keep = nms( + instances, + self.nms_thr, + sigmas=self.dataset_meta['sigmas']) + valid_kpts[img_id] = [instances[_keep] for _keep in keep] + + # convert results to coco style and dump into a json file + self.results2json(valid_kpts, outfile_prefix=outfile_prefix) + + # only format the results without doing quantitative evaluation + if self.format_only: + logger.info('results are saved in ' + f'{osp.dirname(outfile_prefix)}') + return {} + + # evaluation results + eval_results = OrderedDict() + logger.info(f'Evaluating {self.__class__.__name__}...') + info_str = self._do_python_keypoint_eval(outfile_prefix) + name_value = OrderedDict(info_str) + eval_results.update(name_value) + + if tmp_dir is not None: + tmp_dir.cleanup() + return eval_results + + def results2json(self, keypoints: Dict[int, list], + outfile_prefix: str) -> str: + """Dump the keypoint detection results to a COCO style json file. + + Args: + keypoints (Dict[int, list]): Keypoint detection results + of the dataset. + outfile_prefix (str): The filename prefix of the json files. If the + prefix is "somepath/xxx", the json files will be named + "somepath/xxx.keypoints.json", + + Returns: + str: The json file name of keypoint results. + """ + # the results with category_id + cat_results = [] + + for _, img_kpts in keypoints.items(): + _keypoints = np.array( + [img_kpt['keypoints'] for img_kpt in img_kpts]) + num_keypoints = self.dataset_meta['num_keypoints'] + # collect all the person keypoints in current image + _keypoints = _keypoints.reshape(-1, num_keypoints * 3) + + result = [{ + 'image_id': img_kpt['img_id'], + 'category_id': img_kpt['category_id'], + 'keypoints': keypoint.tolist(), + 'score': float(img_kpt['score']), + } for img_kpt, keypoint in zip(img_kpts, _keypoints)] + + cat_results.extend(result) + + res_file = f'{outfile_prefix}.keypoints.json' + dump(cat_results, res_file, sort_keys=True, indent=4) + + def _do_python_keypoint_eval(self, outfile_prefix: str) -> list: + """Do keypoint evaluation using COCOAPI. + + Args: + outfile_prefix (str): The filename prefix of the json files. If the + prefix is "somepath/xxx", the json files will be named + "somepath/xxx.keypoints.json", + + Returns: + list: a list of tuples. Each tuple contains the evaluation stats + name and corresponding stats value. + """ + res_file = f'{outfile_prefix}.keypoints.json' + coco_det = self.coco.loadRes(res_file) + sigmas = self.dataset_meta['sigmas'] + coco_eval = COCOeval(self.coco, coco_det, self.iou_type, sigmas, + self.use_area) + coco_eval.params.useSegm = None + coco_eval.evaluate() + coco_eval.accumulate() + coco_eval.summarize() + + if self.iou_type == 'keypoints_crowd': + stats_names = [ + 'AP', 'AP .5', 'AP .75', 'AR', 'AR .5', 'AR .75', 'AP(E)', + 'AP(M)', 'AP(H)' + ] + else: + stats_names = [ + 'AP', 'AP .5', 'AP .75', 'AP (M)', 'AP (L)', 'AR', 'AR .5', + 'AR .75', 'AR (M)', 'AR (L)' + ] + + info_str = list(zip(stats_names, coco_eval.stats)) + + return info_str + + def _sort_and_unique_bboxes(self, + kpts: Dict[int, list], + key: str = 'id') -> Dict[int, list]: + """Sort keypoint detection results in each image and remove the + duplicate ones. Usually performed in multi-batch testing. + + Args: + kpts (Dict[int, list]): keypoint prediction results. The keys are + '`img_id`' and the values are list that may contain + keypoints of multiple persons. Each element in the list is a + dict containing the ``'key'`` field. + See the argument ``key`` for details. + key (str): The key name in each person prediction results. The + corresponding value will be used for sorting the results. + Default: ``'id'``. + + Returns: + Dict[int, list]: The sorted keypoint detection results. + """ + for img_id, persons in kpts.items(): + # deal with bottomup-style output + if isinstance(kpts[img_id][0][key], Sequence): + return kpts + num = len(persons) + kpts[img_id] = sorted(kpts[img_id], key=lambda x: x[key]) + for i in range(num - 1, 0, -1): + if kpts[img_id][i][key] == kpts[img_id][i - 1][key]: + del kpts[img_id][i] + + return kpts diff --git a/mmpose/evaluation/metrics/coco_wholebody_metric.py b/mmpose/evaluation/metrics/coco_wholebody_metric.py index c5675f54c8..e42f686d46 100644 --- a/mmpose/evaluation/metrics/coco_wholebody_metric.py +++ b/mmpose/evaluation/metrics/coco_wholebody_metric.py @@ -1,312 +1,312 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import datetime -from typing import Dict, Optional, Sequence - -import numpy as np -from mmengine.fileio import dump -from xtcocotools.cocoeval import COCOeval - -from mmpose.registry import METRICS -from .coco_metric import CocoMetric - - -@METRICS.register_module() -class CocoWholeBodyMetric(CocoMetric): - """COCO-WholeBody evaluation metric. - - Evaluate AR, AP, and mAP for COCO-WholeBody keypoint detection tasks. - Support COCO-WholeBody dataset. Please refer to - `COCO keypoint evaluation `__ - for more details. - - Args: - ann_file (str, optional): Path to the coco format annotation file. - If not specified, ground truth annotations from the dataset will - be converted to coco format. Defaults to None - use_area (bool): Whether to use ``'area'`` message in the annotations. - If the ground truth annotations (e.g. CrowdPose, AIC) do not have - the field ``'area'``, please set ``use_area=False``. - Defaults to ``True`` - iou_type (str): The same parameter as `iouType` in - :class:`xtcocotools.COCOeval`, which can be ``'keypoints'``, or - ``'keypoints_crowd'`` (used in CrowdPose dataset). - Defaults to ``'keypoints'`` - score_mode (str): The mode to score the prediction results which - should be one of the following options: - - - ``'bbox'``: Take the score of bbox as the score of the - prediction results. - - ``'bbox_keypoint'``: Use keypoint score to rescore the - prediction results. - - ``'bbox_rle'``: Use rle_score to rescore the - prediction results. - - Defaults to ``'bbox_keypoint'` - keypoint_score_thr (float): The threshold of keypoint score. The - keypoints with score lower than it will not be included to - rescore the prediction results. Valid only when ``score_mode`` is - ``bbox_keypoint``. Defaults to ``0.2`` - nms_mode (str): The mode to perform Non-Maximum Suppression (NMS), - which should be one of the following options: - - - ``'oks_nms'``: Use Object Keypoint Similarity (OKS) to - perform NMS. - - ``'soft_oks_nms'``: Use Object Keypoint Similarity (OKS) - to perform soft NMS. - - ``'none'``: Do not perform NMS. Typically for bottomup mode - output. - - Defaults to ``'oks_nms'` - nms_thr (float): The Object Keypoint Similarity (OKS) threshold - used in NMS when ``nms_mode`` is ``'oks_nms'`` or - ``'soft_oks_nms'``. Will retain the prediction results with OKS - lower than ``nms_thr``. Defaults to ``0.9`` - format_only (bool): Whether only format the output results without - doing quantitative evaluation. This is designed for the need of - test submission when the ground truth annotations are absent. If - set to ``True``, ``outfile_prefix`` should specify the path to - store the output results. Defaults to ``False`` - outfile_prefix (str | None): The prefix of json files. It includes - the file path and the prefix of filename, e.g., ``'a/b/prefix'``. - If not specified, a temp file will be created. Defaults to ``None`` - **kwargs: Keyword parameters passed to :class:`mmeval.BaseMetric` - """ - default_prefix: Optional[str] = 'coco-wholebody' - body_num = 17 - foot_num = 6 - face_num = 68 - left_hand_num = 21 - right_hand_num = 21 - - def gt_to_coco_json(self, gt_dicts: Sequence[dict], - outfile_prefix: str) -> str: - """Convert ground truth to coco format json file. - - Args: - gt_dicts (Sequence[dict]): Ground truth of the dataset. Each dict - contains the ground truth information about the data sample. - Required keys of the each `gt_dict` in `gt_dicts`: - - `img_id`: image id of the data sample - - `width`: original image width - - `height`: original image height - - `raw_ann_info`: the raw annotation information - Optional keys: - - `crowd_index`: measure the crowding level of an image, - defined in CrowdPose dataset - It is worth mentioning that, in order to compute `CocoMetric`, - there are some required keys in the `raw_ann_info`: - - `id`: the id to distinguish different annotations - - `image_id`: the image id of this annotation - - `category_id`: the category of the instance. - - `bbox`: the object bounding box - - `keypoints`: the keypoints cooridinates along with their - visibilities. Note that it need to be aligned - with the official COCO format, e.g., a list with length - N * 3, in which N is the number of keypoints. And each - triplet represent the [x, y, visible] of the keypoint. - - 'keypoints' - - `iscrowd`: indicating whether the annotation is a crowd. - It is useful when matching the detection results to - the ground truth. - There are some optional keys as well: - - `area`: it is necessary when `self.use_area` is `True` - - `num_keypoints`: it is necessary when `self.iou_type` - is set as `keypoints_crowd`. - outfile_prefix (str): The filename prefix of the json files. If the - prefix is "somepath/xxx", the json file will be named - "somepath/xxx.gt.json". - Returns: - str: The filename of the json file. - """ - image_infos = [] - annotations = [] - img_ids = [] - ann_ids = [] - - for gt_dict in gt_dicts: - # filter duplicate image_info - if gt_dict['img_id'] not in img_ids: - image_info = dict( - id=gt_dict['img_id'], - width=gt_dict['width'], - height=gt_dict['height'], - ) - if self.iou_type == 'keypoints_crowd': - image_info['crowdIndex'] = gt_dict['crowd_index'] - - image_infos.append(image_info) - img_ids.append(gt_dict['img_id']) - - # filter duplicate annotations - for ann in gt_dict['raw_ann_info']: - annotation = dict( - id=ann['id'], - image_id=ann['image_id'], - category_id=ann['category_id'], - bbox=ann['bbox'], - keypoints=ann['keypoints'], - foot_kpts=ann['foot_kpts'], - face_kpts=ann['face_kpts'], - lefthand_kpts=ann['lefthand_kpts'], - righthand_kpts=ann['righthand_kpts'], - iscrowd=ann['iscrowd'], - ) - if self.use_area: - assert 'area' in ann, \ - '`area` is required when `self.use_area` is `True`' - annotation['area'] = ann['area'] - - annotations.append(annotation) - ann_ids.append(ann['id']) - - info = dict( - date_created=str(datetime.datetime.now()), - description='Coco json file converted by mmpose CocoMetric.') - coco_json: dict = dict( - info=info, - images=image_infos, - categories=self.dataset_meta['CLASSES'], - licenses=None, - annotations=annotations, - ) - converted_json_path = f'{outfile_prefix}.gt.json' - dump(coco_json, converted_json_path, sort_keys=True, indent=4) - return converted_json_path - - def results2json(self, keypoints: Dict[int, list], - outfile_prefix: str) -> str: - """Dump the keypoint detection results to a COCO style json file. - - Args: - keypoints (Dict[int, list]): Keypoint detection results - of the dataset. - outfile_prefix (str): The filename prefix of the json files. If the - prefix is "somepath/xxx", the json files will be named - "somepath/xxx.keypoints.json", - - Returns: - str: The json file name of keypoint results. - """ - # the results with category_id - cat_id = 1 - cat_results = [] - - cuts = np.cumsum([ - 0, self.body_num, self.foot_num, self.face_num, self.left_hand_num, - self.right_hand_num - ]) * 3 - - for _, img_kpts in keypoints.items(): - _keypoints = np.array( - [img_kpt['keypoints'] for img_kpt in img_kpts]) - num_keypoints = self.dataset_meta['num_keypoints'] - # collect all the person keypoints in current image - _keypoints = _keypoints.reshape(-1, num_keypoints * 3) - - result = [{ - 'image_id': img_kpt['img_id'], - 'category_id': cat_id, - 'keypoints': _keypoint[cuts[0]:cuts[1]].tolist(), - 'foot_kpts': _keypoint[cuts[1]:cuts[2]].tolist(), - 'face_kpts': _keypoint[cuts[2]:cuts[3]].tolist(), - 'lefthand_kpts': _keypoint[cuts[3]:cuts[4]].tolist(), - 'righthand_kpts': _keypoint[cuts[4]:cuts[5]].tolist(), - 'score': float(img_kpt['score']), - } for img_kpt, _keypoint in zip(img_kpts, _keypoints)] - - cat_results.extend(result) - - res_file = f'{outfile_prefix}.keypoints.json' - dump(cat_results, res_file, sort_keys=True, indent=4) - - def _do_python_keypoint_eval(self, outfile_prefix: str) -> list: - """Do keypoint evaluation using COCOAPI. - - Args: - outfile_prefix (str): The filename prefix of the json files. If the - prefix is "somepath/xxx", the json files will be named - "somepath/xxx.keypoints.json", - - Returns: - list: a list of tuples. Each tuple contains the evaluation stats - name and corresponding stats value. - """ - res_file = f'{outfile_prefix}.keypoints.json' - coco_det = self.coco.loadRes(res_file) - sigmas = self.dataset_meta['sigmas'] - - cuts = np.cumsum([ - 0, self.body_num, self.foot_num, self.face_num, self.left_hand_num, - self.right_hand_num - ]) - - coco_eval = COCOeval( - self.coco, - coco_det, - 'keypoints_body', - sigmas[cuts[0]:cuts[1]], - use_area=True) - coco_eval.params.useSegm = None - coco_eval.evaluate() - coco_eval.accumulate() - coco_eval.summarize() - - coco_eval = COCOeval( - self.coco, - coco_det, - 'keypoints_foot', - sigmas[cuts[1]:cuts[2]], - use_area=True) - coco_eval.params.useSegm = None - coco_eval.evaluate() - coco_eval.accumulate() - coco_eval.summarize() - - coco_eval = COCOeval( - self.coco, - coco_det, - 'keypoints_face', - sigmas[cuts[2]:cuts[3]], - use_area=True) - coco_eval.params.useSegm = None - coco_eval.evaluate() - coco_eval.accumulate() - coco_eval.summarize() - - coco_eval = COCOeval( - self.coco, - coco_det, - 'keypoints_lefthand', - sigmas[cuts[3]:cuts[4]], - use_area=True) - coco_eval.params.useSegm = None - coco_eval.evaluate() - coco_eval.accumulate() - coco_eval.summarize() - - coco_eval = COCOeval( - self.coco, - coco_det, - 'keypoints_righthand', - sigmas[cuts[4]:cuts[5]], - use_area=True) - coco_eval.params.useSegm = None - coco_eval.evaluate() - coco_eval.accumulate() - coco_eval.summarize() - - coco_eval = COCOeval( - self.coco, coco_det, 'keypoints_wholebody', sigmas, use_area=True) - coco_eval.params.useSegm = None - coco_eval.evaluate() - coco_eval.accumulate() - coco_eval.summarize() - - stats_names = [ - 'AP', 'AP .5', 'AP .75', 'AP (M)', 'AP (L)', 'AR', 'AR .5', - 'AR .75', 'AR (M)', 'AR (L)' - ] - - info_str = list(zip(stats_names, coco_eval.stats)) - - return info_str +# Copyright (c) OpenMMLab. All rights reserved. +import datetime +from typing import Dict, Optional, Sequence + +import numpy as np +from mmengine.fileio import dump +from xtcocotools.cocoeval import COCOeval + +from mmpose.registry import METRICS +from .coco_metric import CocoMetric + + +@METRICS.register_module() +class CocoWholeBodyMetric(CocoMetric): + """COCO-WholeBody evaluation metric. + + Evaluate AR, AP, and mAP for COCO-WholeBody keypoint detection tasks. + Support COCO-WholeBody dataset. Please refer to + `COCO keypoint evaluation `__ + for more details. + + Args: + ann_file (str, optional): Path to the coco format annotation file. + If not specified, ground truth annotations from the dataset will + be converted to coco format. Defaults to None + use_area (bool): Whether to use ``'area'`` message in the annotations. + If the ground truth annotations (e.g. CrowdPose, AIC) do not have + the field ``'area'``, please set ``use_area=False``. + Defaults to ``True`` + iou_type (str): The same parameter as `iouType` in + :class:`xtcocotools.COCOeval`, which can be ``'keypoints'``, or + ``'keypoints_crowd'`` (used in CrowdPose dataset). + Defaults to ``'keypoints'`` + score_mode (str): The mode to score the prediction results which + should be one of the following options: + + - ``'bbox'``: Take the score of bbox as the score of the + prediction results. + - ``'bbox_keypoint'``: Use keypoint score to rescore the + prediction results. + - ``'bbox_rle'``: Use rle_score to rescore the + prediction results. + + Defaults to ``'bbox_keypoint'` + keypoint_score_thr (float): The threshold of keypoint score. The + keypoints with score lower than it will not be included to + rescore the prediction results. Valid only when ``score_mode`` is + ``bbox_keypoint``. Defaults to ``0.2`` + nms_mode (str): The mode to perform Non-Maximum Suppression (NMS), + which should be one of the following options: + + - ``'oks_nms'``: Use Object Keypoint Similarity (OKS) to + perform NMS. + - ``'soft_oks_nms'``: Use Object Keypoint Similarity (OKS) + to perform soft NMS. + - ``'none'``: Do not perform NMS. Typically for bottomup mode + output. + + Defaults to ``'oks_nms'` + nms_thr (float): The Object Keypoint Similarity (OKS) threshold + used in NMS when ``nms_mode`` is ``'oks_nms'`` or + ``'soft_oks_nms'``. Will retain the prediction results with OKS + lower than ``nms_thr``. Defaults to ``0.9`` + format_only (bool): Whether only format the output results without + doing quantitative evaluation. This is designed for the need of + test submission when the ground truth annotations are absent. If + set to ``True``, ``outfile_prefix`` should specify the path to + store the output results. Defaults to ``False`` + outfile_prefix (str | None): The prefix of json files. It includes + the file path and the prefix of filename, e.g., ``'a/b/prefix'``. + If not specified, a temp file will be created. Defaults to ``None`` + **kwargs: Keyword parameters passed to :class:`mmeval.BaseMetric` + """ + default_prefix: Optional[str] = 'coco-wholebody' + body_num = 17 + foot_num = 6 + face_num = 68 + left_hand_num = 21 + right_hand_num = 21 + + def gt_to_coco_json(self, gt_dicts: Sequence[dict], + outfile_prefix: str) -> str: + """Convert ground truth to coco format json file. + + Args: + gt_dicts (Sequence[dict]): Ground truth of the dataset. Each dict + contains the ground truth information about the data sample. + Required keys of the each `gt_dict` in `gt_dicts`: + - `img_id`: image id of the data sample + - `width`: original image width + - `height`: original image height + - `raw_ann_info`: the raw annotation information + Optional keys: + - `crowd_index`: measure the crowding level of an image, + defined in CrowdPose dataset + It is worth mentioning that, in order to compute `CocoMetric`, + there are some required keys in the `raw_ann_info`: + - `id`: the id to distinguish different annotations + - `image_id`: the image id of this annotation + - `category_id`: the category of the instance. + - `bbox`: the object bounding box + - `keypoints`: the keypoints cooridinates along with their + visibilities. Note that it need to be aligned + with the official COCO format, e.g., a list with length + N * 3, in which N is the number of keypoints. And each + triplet represent the [x, y, visible] of the keypoint. + - 'keypoints' + - `iscrowd`: indicating whether the annotation is a crowd. + It is useful when matching the detection results to + the ground truth. + There are some optional keys as well: + - `area`: it is necessary when `self.use_area` is `True` + - `num_keypoints`: it is necessary when `self.iou_type` + is set as `keypoints_crowd`. + outfile_prefix (str): The filename prefix of the json files. If the + prefix is "somepath/xxx", the json file will be named + "somepath/xxx.gt.json". + Returns: + str: The filename of the json file. + """ + image_infos = [] + annotations = [] + img_ids = [] + ann_ids = [] + + for gt_dict in gt_dicts: + # filter duplicate image_info + if gt_dict['img_id'] not in img_ids: + image_info = dict( + id=gt_dict['img_id'], + width=gt_dict['width'], + height=gt_dict['height'], + ) + if self.iou_type == 'keypoints_crowd': + image_info['crowdIndex'] = gt_dict['crowd_index'] + + image_infos.append(image_info) + img_ids.append(gt_dict['img_id']) + + # filter duplicate annotations + for ann in gt_dict['raw_ann_info']: + annotation = dict( + id=ann['id'], + image_id=ann['image_id'], + category_id=ann['category_id'], + bbox=ann['bbox'], + keypoints=ann['keypoints'], + foot_kpts=ann['foot_kpts'], + face_kpts=ann['face_kpts'], + lefthand_kpts=ann['lefthand_kpts'], + righthand_kpts=ann['righthand_kpts'], + iscrowd=ann['iscrowd'], + ) + if self.use_area: + assert 'area' in ann, \ + '`area` is required when `self.use_area` is `True`' + annotation['area'] = ann['area'] + + annotations.append(annotation) + ann_ids.append(ann['id']) + + info = dict( + date_created=str(datetime.datetime.now()), + description='Coco json file converted by mmpose CocoMetric.') + coco_json: dict = dict( + info=info, + images=image_infos, + categories=self.dataset_meta['CLASSES'], + licenses=None, + annotations=annotations, + ) + converted_json_path = f'{outfile_prefix}.gt.json' + dump(coco_json, converted_json_path, sort_keys=True, indent=4) + return converted_json_path + + def results2json(self, keypoints: Dict[int, list], + outfile_prefix: str) -> str: + """Dump the keypoint detection results to a COCO style json file. + + Args: + keypoints (Dict[int, list]): Keypoint detection results + of the dataset. + outfile_prefix (str): The filename prefix of the json files. If the + prefix is "somepath/xxx", the json files will be named + "somepath/xxx.keypoints.json", + + Returns: + str: The json file name of keypoint results. + """ + # the results with category_id + cat_id = 1 + cat_results = [] + + cuts = np.cumsum([ + 0, self.body_num, self.foot_num, self.face_num, self.left_hand_num, + self.right_hand_num + ]) * 3 + + for _, img_kpts in keypoints.items(): + _keypoints = np.array( + [img_kpt['keypoints'] for img_kpt in img_kpts]) + num_keypoints = self.dataset_meta['num_keypoints'] + # collect all the person keypoints in current image + _keypoints = _keypoints.reshape(-1, num_keypoints * 3) + + result = [{ + 'image_id': img_kpt['img_id'], + 'category_id': cat_id, + 'keypoints': _keypoint[cuts[0]:cuts[1]].tolist(), + 'foot_kpts': _keypoint[cuts[1]:cuts[2]].tolist(), + 'face_kpts': _keypoint[cuts[2]:cuts[3]].tolist(), + 'lefthand_kpts': _keypoint[cuts[3]:cuts[4]].tolist(), + 'righthand_kpts': _keypoint[cuts[4]:cuts[5]].tolist(), + 'score': float(img_kpt['score']), + } for img_kpt, _keypoint in zip(img_kpts, _keypoints)] + + cat_results.extend(result) + + res_file = f'{outfile_prefix}.keypoints.json' + dump(cat_results, res_file, sort_keys=True, indent=4) + + def _do_python_keypoint_eval(self, outfile_prefix: str) -> list: + """Do keypoint evaluation using COCOAPI. + + Args: + outfile_prefix (str): The filename prefix of the json files. If the + prefix is "somepath/xxx", the json files will be named + "somepath/xxx.keypoints.json", + + Returns: + list: a list of tuples. Each tuple contains the evaluation stats + name and corresponding stats value. + """ + res_file = f'{outfile_prefix}.keypoints.json' + coco_det = self.coco.loadRes(res_file) + sigmas = self.dataset_meta['sigmas'] + + cuts = np.cumsum([ + 0, self.body_num, self.foot_num, self.face_num, self.left_hand_num, + self.right_hand_num + ]) + + coco_eval = COCOeval( + self.coco, + coco_det, + 'keypoints_body', + sigmas[cuts[0]:cuts[1]], + use_area=True) + coco_eval.params.useSegm = None + coco_eval.evaluate() + coco_eval.accumulate() + coco_eval.summarize() + + coco_eval = COCOeval( + self.coco, + coco_det, + 'keypoints_foot', + sigmas[cuts[1]:cuts[2]], + use_area=True) + coco_eval.params.useSegm = None + coco_eval.evaluate() + coco_eval.accumulate() + coco_eval.summarize() + + coco_eval = COCOeval( + self.coco, + coco_det, + 'keypoints_face', + sigmas[cuts[2]:cuts[3]], + use_area=True) + coco_eval.params.useSegm = None + coco_eval.evaluate() + coco_eval.accumulate() + coco_eval.summarize() + + coco_eval = COCOeval( + self.coco, + coco_det, + 'keypoints_lefthand', + sigmas[cuts[3]:cuts[4]], + use_area=True) + coco_eval.params.useSegm = None + coco_eval.evaluate() + coco_eval.accumulate() + coco_eval.summarize() + + coco_eval = COCOeval( + self.coco, + coco_det, + 'keypoints_righthand', + sigmas[cuts[4]:cuts[5]], + use_area=True) + coco_eval.params.useSegm = None + coco_eval.evaluate() + coco_eval.accumulate() + coco_eval.summarize() + + coco_eval = COCOeval( + self.coco, coco_det, 'keypoints_wholebody', sigmas, use_area=True) + coco_eval.params.useSegm = None + coco_eval.evaluate() + coco_eval.accumulate() + coco_eval.summarize() + + stats_names = [ + 'AP', 'AP .5', 'AP .75', 'AP (M)', 'AP (L)', 'AR', 'AR .5', + 'AR .75', 'AR (M)', 'AR (L)' + ] + + info_str = list(zip(stats_names, coco_eval.stats)) + + return info_str diff --git a/mmpose/evaluation/metrics/keypoint_2d_metrics.py b/mmpose/evaluation/metrics/keypoint_2d_metrics.py index 5c8d23ac08..67206575d6 100644 --- a/mmpose/evaluation/metrics/keypoint_2d_metrics.py +++ b/mmpose/evaluation/metrics/keypoint_2d_metrics.py @@ -1,912 +1,912 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import warnings -from typing import Dict, Optional, Sequence, Union - -import numpy as np -from mmengine.evaluator import BaseMetric -from mmengine.logging import MMLogger - -from mmpose.registry import METRICS -from ..functional import (keypoint_auc, keypoint_epe, keypoint_nme, - keypoint_pck_accuracy) - - -@METRICS.register_module() -class PCKAccuracy(BaseMetric): - """PCK accuracy evaluation metric. - Calculate the pose accuracy of Percentage of Correct Keypoints (PCK) for - each individual keypoint and the averaged accuracy across all keypoints. - PCK metric measures accuracy of the localization of the body joints. - The distances between predicted positions and the ground-truth ones - are typically normalized by the person bounding box size. - The threshold (thr) of the normalized distance is commonly set - as 0.05, 0.1 or 0.2 etc. - Note: - - length of dataset: N - - num_keypoints: K - - number of keypoint dimensions: D (typically D = 2) - Args: - thr(float): Threshold of PCK calculation. Default: 0.05. - norm_item (str | Sequence[str]): The item used for normalization. - Valid items include 'bbox', 'head', 'torso', which correspond - to 'PCK', 'PCKh' and 'tPCK' respectively. Default: ``'bbox'``. - collect_device (str): Device name used for collecting results from - different ranks during distributed training. Must be ``'cpu'`` or - ``'gpu'``. Default: ``'cpu'``. - prefix (str, optional): The prefix that will be added in the metric - names to disambiguate homonymous metrics of different evaluators. - If prefix is not provided in the argument, ``self.default_prefix`` - will be used instead. Default: ``None``. - - Examples: - - >>> from mmpose.evaluation.metrics import PCKAccuracy - >>> import numpy as np - >>> from mmengine.structures import InstanceData - >>> num_keypoints = 15 - >>> keypoints = np.random.random((1, num_keypoints, 2)) * 10 - >>> gt_instances = InstanceData() - >>> gt_instances.keypoints = keypoints - >>> gt_instances.keypoints_visible = np.ones( - ... (1, num_keypoints, 1)).astype(bool) - >>> gt_instances.bboxes = np.random.random((1, 4)) * 20 - >>> pred_instances = InstanceData() - >>> pred_instances.keypoints = keypoints - >>> data_sample = { - ... 'gt_instances': gt_instances.to_dict(), - ... 'pred_instances': pred_instances.to_dict(), - ... } - >>> data_samples = [data_sample] - >>> data_batch = [{'inputs': None}] - >>> pck_metric = PCKAccuracy(thr=0.5, norm_item='bbox') - ...: UserWarning: The prefix is not set in metric class PCKAccuracy. - >>> pck_metric.process(data_batch, data_samples) - >>> pck_metric.evaluate(1) - 10/26 15:37:57 - mmengine - INFO - Evaluating PCKAccuracy (normalized by ``"bbox_size"``)... # noqa - {'PCK': 1.0} - - """ - - def __init__(self, - thr: float = 0.05, - norm_item: Union[str, Sequence[str]] = 'bbox', - collect_device: str = 'cpu', - prefix: Optional[str] = None) -> None: - super().__init__(collect_device=collect_device, prefix=prefix) - self.thr = thr - self.norm_item = norm_item if isinstance(norm_item, - (tuple, - list)) else [norm_item] - allow_normalized_items = ['bbox', 'head', 'torso'] - for item in self.norm_item: - if item not in allow_normalized_items: - raise KeyError( - f'The normalized item {item} is not supported by ' - f"{self.__class__.__name__}. Should be one of 'bbox', " - f"'head', 'torso', but got {item}.") - - def process(self, data_batch: Sequence[dict], - data_samples: Sequence[dict]) -> None: - """Process one batch of data samples and predictions. - - The processed - results should be stored in ``self.results``, which will be used to - compute the metrics when all batches have been processed. - Args: - data_batch (Sequence[dict]): A batch of data - from the dataloader. - data_samples (Sequence[dict]): A batch of outputs from - the model. - """ - for data_sample in data_samples: - # predicted keypoints coordinates, [1, K, D] - pred_coords = data_sample['pred_instances']['keypoints'] - # ground truth data_info - gt = data_sample['gt_instances'] - # ground truth keypoints coordinates, [1, K, D] - gt_coords = gt['keypoints'] - # ground truth keypoints_visible, [1, K, 1] - mask = gt['keypoints_visible'].astype(bool).reshape(1, -1) - - result = { - 'pred_coords': pred_coords, - 'gt_coords': gt_coords, - 'mask': mask, - } - - if 'bbox' in self.norm_item: - assert 'bboxes' in gt, 'The ground truth data info do not ' \ - 'have the expected normalized_item ``"bbox"``.' - # ground truth bboxes, [1, 4] - bbox_size_ = np.max(gt['bboxes'][0][2:] - gt['bboxes'][0][:2]) - bbox_size = np.array([bbox_size_, bbox_size_]).reshape(-1, 2) - result['bbox_size'] = bbox_size - - if 'head' in self.norm_item: - assert 'head_size' in gt, 'The ground truth data info do ' \ - 'not have the expected normalized_item ``"head_size"``.' - # ground truth bboxes - head_size_ = gt['head_size'] - head_size = np.array([head_size_, head_size_]).reshape(-1, 2) - result['head_size'] = head_size - - if 'torso' in self.norm_item: - # used in JhmdbDataset - torso_size_ = np.linalg.norm(gt_coords[0][4] - gt_coords[0][5]) - if torso_size_ < 1: - torso_size_ = np.linalg.norm(pred_coords[0][4] - - pred_coords[0][5]) - warnings.warn('Ground truth torso size < 1. ' - 'Use torso size from predicted ' - 'keypoint results instead.') - torso_size = np.array([torso_size_, - torso_size_]).reshape(-1, 2) - result['torso_size'] = torso_size - - self.results.append(result) - - def compute_metrics(self, results: list) -> Dict[str, float]: - """Compute the metrics from processed results. - - Args: - results (list): The processed results of each batch. - Returns: - Dict[str, float]: The computed metrics. The keys are the names of - the metrics, and the values are corresponding results. - The returned result dict may have the following keys: - - 'PCK': The pck accuracy normalized by `bbox_size`. - - 'PCKh': The pck accuracy normalized by `head_size`. - - 'tPCK': The pck accuracy normalized by `torso_size`. - """ - logger: MMLogger = MMLogger.get_current_instance() - - # pred_coords: [N, K, D] - pred_coords = np.concatenate( - [result['pred_coords'] for result in results]) - # gt_coords: [N, K, D] - gt_coords = np.concatenate([result['gt_coords'] for result in results]) - # mask: [N, K] - mask = np.concatenate([result['mask'] for result in results]) - - metrics = dict() - if 'bbox' in self.norm_item: - norm_size_bbox = np.concatenate( - [result['bbox_size'] for result in results]) - - logger.info(f'Evaluating {self.__class__.__name__} ' - f'(normalized by ``"bbox_size"``)...') - - _, pck, _ = keypoint_pck_accuracy(pred_coords, gt_coords, mask, - self.thr, norm_size_bbox) - metrics['PCK'] = pck - - if 'head' in self.norm_item: - norm_size_head = np.concatenate( - [result['head_size'] for result in results]) - - logger.info(f'Evaluating {self.__class__.__name__} ' - f'(normalized by ``"head_size"``)...') - - _, pckh, _ = keypoint_pck_accuracy(pred_coords, gt_coords, mask, - self.thr, norm_size_head) - metrics['PCKh'] = pckh - - if 'torso' in self.norm_item: - norm_size_torso = np.concatenate( - [result['torso_size'] for result in results]) - - logger.info(f'Evaluating {self.__class__.__name__} ' - f'(normalized by ``"torso_size"``)...') - - _, tpck, _ = keypoint_pck_accuracy(pred_coords, gt_coords, mask, - self.thr, norm_size_torso) - metrics['tPCK'] = tpck - - return metrics - - -@METRICS.register_module() -class MpiiPCKAccuracy(PCKAccuracy): - """PCKh accuracy evaluation metric for MPII dataset. - - Calculate the pose accuracy of Percentage of Correct Keypoints (PCK) for - each individual keypoint and the averaged accuracy across all keypoints. - PCK metric measures accuracy of the localization of the body joints. - The distances between predicted positions and the ground-truth ones - are typically normalized by the person bounding box size. - The threshold (thr) of the normalized distance is commonly set - as 0.05, 0.1 or 0.2 etc. - - Note: - - length of dataset: N - - num_keypoints: K - - number of keypoint dimensions: D (typically D = 2) - - Args: - thr(float): Threshold of PCK calculation. Default: 0.05. - norm_item (str | Sequence[str]): The item used for normalization. - Valid items include 'bbox', 'head', 'torso', which correspond - to 'PCK', 'PCKh' and 'tPCK' respectively. Default: ``'head'``. - collect_device (str): Device name used for collecting results from - different ranks during distributed training. Must be ``'cpu'`` or - ``'gpu'``. Default: ``'cpu'``. - prefix (str, optional): The prefix that will be added in the metric - names to disambiguate homonymous metrics of different evaluators. - If prefix is not provided in the argument, ``self.default_prefix`` - will be used instead. Default: ``None``. - - Examples: - - >>> from mmpose.evaluation.metrics import MpiiPCKAccuracy - >>> import numpy as np - >>> from mmengine.structures import InstanceData - >>> num_keypoints = 16 - >>> keypoints = np.random.random((1, num_keypoints, 2)) * 10 - >>> gt_instances = InstanceData() - >>> gt_instances.keypoints = keypoints + 1.0 - >>> gt_instances.keypoints_visible = np.ones( - ... (1, num_keypoints, 1)).astype(bool) - >>> gt_instances.head_size = np.random.random((1, 1)) * 10 - >>> pred_instances = InstanceData() - >>> pred_instances.keypoints = keypoints - >>> data_sample = { - ... 'gt_instances': gt_instances.to_dict(), - ... 'pred_instances': pred_instances.to_dict(), - ... } - >>> data_samples = [data_sample] - >>> data_batch = [{'inputs': None}] - >>> mpii_pck_metric = MpiiPCKAccuracy(thr=0.3, norm_item='head') - ... UserWarning: The prefix is not set in metric class MpiiPCKAccuracy. - >>> mpii_pck_metric.process(data_batch, data_samples) - >>> mpii_pck_metric.evaluate(1) - 10/26 17:43:39 - mmengine - INFO - Evaluating MpiiPCKAccuracy (normalized by ``"head_size"``)... # noqa - {'Head PCK': 100.0, 'Shoulder PCK': 100.0, 'Elbow PCK': 100.0, - Wrist PCK': 100.0, 'Hip PCK': 100.0, 'Knee PCK': 100.0, - 'Ankle PCK': 100.0, 'PCK': 100.0, 'PCK@0.1': 100.0} - """ - - def __init__(self, - thr: float = 0.5, - norm_item: Union[str, Sequence[str]] = 'head', - collect_device: str = 'cpu', - prefix: Optional[str] = None) -> None: - super().__init__( - thr=thr, - norm_item=norm_item, - collect_device=collect_device, - prefix=prefix) - - def compute_metrics(self, results: list) -> Dict[str, float]: - """Compute the metrics from processed results. - - Args: - results (list): The processed results of each batch. - - Returns: - Dict[str, float]: The computed metrics. The keys are the names of - the metrics, and the values are corresponding results. - If `'head'` in `self.norm_item`, the returned results are the pck - accuracy normalized by `head_size`, which have the following keys: - - 'Head PCK': The PCK of head - - 'Shoulder PCK': The PCK of shoulder - - 'Elbow PCK': The PCK of elbow - - 'Wrist PCK': The PCK of wrist - - 'Hip PCK': The PCK of hip - - 'Knee PCK': The PCK of knee - - 'Ankle PCK': The PCK of ankle - - 'PCK': The mean PCK over all keypoints - - 'PCK@0.1': The mean PCK at threshold 0.1 - """ - logger: MMLogger = MMLogger.get_current_instance() - - # pred_coords: [N, K, D] - pred_coords = np.concatenate( - [result['pred_coords'] for result in results]) - # gt_coords: [N, K, D] - gt_coords = np.concatenate([result['gt_coords'] for result in results]) - # mask: [N, K] - mask = np.concatenate([result['mask'] for result in results]) - - # MPII uses matlab format, gt index is 1-based, - # convert 0-based index to 1-based index - pred_coords = pred_coords + 1.0 - - metrics = {} - if 'head' in self.norm_item: - norm_size_head = np.concatenate( - [result['head_size'] for result in results]) - - logger.info(f'Evaluating {self.__class__.__name__} ' - f'(normalized by ``"head_size"``)...') - - pck_p, _, _ = keypoint_pck_accuracy(pred_coords, gt_coords, mask, - self.thr, norm_size_head) - - jnt_count = np.sum(mask, axis=0) - PCKh = 100. * pck_p - - rng = np.arange(0, 0.5 + 0.01, 0.01) - pckAll = np.zeros((len(rng), 16), dtype=np.float32) - - for r, threshold in enumerate(rng): - _pck, _, _ = keypoint_pck_accuracy(pred_coords, gt_coords, - mask, threshold, - norm_size_head) - pckAll[r, :] = 100. * _pck - - PCKh = np.ma.array(PCKh, mask=False) - PCKh.mask[6:8] = True - - jnt_count = np.ma.array(jnt_count, mask=False) - jnt_count.mask[6:8] = True - jnt_ratio = jnt_count / np.sum(jnt_count).astype(np.float64) - - # dataset_joints_idx: - # head 9 - # lsho 13 rsho 12 - # lelb 14 relb 11 - # lwri 15 rwri 10 - # lhip 3 rhip 2 - # lkne 4 rkne 1 - # lank 5 rank 0 - stats = { - 'Head PCK': PCKh[9], - 'Shoulder PCK': 0.5 * (PCKh[13] + PCKh[12]), - 'Elbow PCK': 0.5 * (PCKh[14] + PCKh[11]), - 'Wrist PCK': 0.5 * (PCKh[15] + PCKh[10]), - 'Hip PCK': 0.5 * (PCKh[3] + PCKh[2]), - 'Knee PCK': 0.5 * (PCKh[4] + PCKh[1]), - 'Ankle PCK': 0.5 * (PCKh[5] + PCKh[0]), - 'PCK': np.sum(PCKh * jnt_ratio), - 'PCK@0.1': np.sum(pckAll[10, :] * jnt_ratio) - } - - for stats_name, stat in stats.items(): - metrics[stats_name] = stat - - return metrics - - -@METRICS.register_module() -class JhmdbPCKAccuracy(PCKAccuracy): - """PCK accuracy evaluation metric for Jhmdb dataset. - - Calculate the pose accuracy of Percentage of Correct Keypoints (PCK) for - each individual keypoint and the averaged accuracy across all keypoints. - PCK metric measures accuracy of the localization of the body joints. - The distances between predicted positions and the ground-truth ones - are typically normalized by the person bounding box size. - The threshold (thr) of the normalized distance is commonly set - as 0.05, 0.1 or 0.2 etc. - - Note: - - length of dataset: N - - num_keypoints: K - - number of keypoint dimensions: D (typically D = 2) - - Args: - thr(float): Threshold of PCK calculation. Default: 0.05. - norm_item (str | Sequence[str]): The item used for normalization. - Valid items include 'bbox', 'head', 'torso', which correspond - to 'PCK', 'PCKh' and 'tPCK' respectively. Default: ``'bbox'``. - collect_device (str): Device name used for collecting results from - different ranks during distributed training. Must be ``'cpu'`` or - ``'gpu'``. Default: ``'cpu'``. - prefix (str, optional): The prefix that will be added in the metric - names to disambiguate homonymous metrics of different evaluators. - If prefix is not provided in the argument, ``self.default_prefix`` - will be used instead. Default: ``None``. - - Examples: - - >>> from mmpose.evaluation.metrics import JhmdbPCKAccuracy - >>> import numpy as np - >>> from mmengine.structures import InstanceData - >>> num_keypoints = 15 - >>> keypoints = np.random.random((1, num_keypoints, 2)) * 10 - >>> gt_instances = InstanceData() - >>> gt_instances.keypoints = keypoints - >>> gt_instances.keypoints_visible = np.ones( - ... (1, num_keypoints, 1)).astype(bool) - >>> gt_instances.bboxes = np.random.random((1, 4)) * 20 - >>> gt_instances.head_size = np.random.random((1, 1)) * 10 - >>> pred_instances = InstanceData() - >>> pred_instances.keypoints = keypoints - >>> data_sample = { - ... 'gt_instances': gt_instances.to_dict(), - ... 'pred_instances': pred_instances.to_dict(), - ... } - >>> data_samples = [data_sample] - >>> data_batch = [{'inputs': None}] - >>> jhmdb_pck_metric = JhmdbPCKAccuracy(thr=0.2, norm_item=['bbox', 'torso']) - ... UserWarning: The prefix is not set in metric class JhmdbPCKAccuracy. - >>> jhmdb_pck_metric.process(data_batch, data_samples) - >>> jhmdb_pck_metric.evaluate(1) - 10/26 17:48:09 - mmengine - INFO - Evaluating JhmdbPCKAccuracy (normalized by ``"bbox_size"``)... # noqa - 10/26 17:48:09 - mmengine - INFO - Evaluating JhmdbPCKAccuracy (normalized by ``"torso_size"``)... # noqa - {'Head PCK': 1.0, 'Sho PCK': 1.0, 'Elb PCK': 1.0, 'Wri PCK': 1.0, - 'Hip PCK': 1.0, 'Knee PCK': 1.0, 'Ank PCK': 1.0, 'PCK': 1.0, - 'Head tPCK': 1.0, 'Sho tPCK': 1.0, 'Elb tPCK': 1.0, 'Wri tPCK': 1.0, - 'Hip tPCK': 1.0, 'Knee tPCK': 1.0, 'Ank tPCK': 1.0, 'tPCK': 1.0} - """ - - def __init__(self, - thr: float = 0.05, - norm_item: Union[str, Sequence[str]] = 'bbox', - collect_device: str = 'cpu', - prefix: Optional[str] = None) -> None: - super().__init__( - thr=thr, - norm_item=norm_item, - collect_device=collect_device, - prefix=prefix) - - def compute_metrics(self, results: list) -> Dict[str, float]: - """Compute the metrics from processed results. - - Args: - results (list): The processed results of each batch. - - Returns: - Dict[str, float]: The computed metrics. The keys are the names of - the metrics, and the values are corresponding results. - If `'bbox'` in `self.norm_item`, the returned results are the pck - accuracy normalized by `bbox_size`, which have the following keys: - - 'Head PCK': The PCK of head - - 'Sho PCK': The PCK of shoulder - - 'Elb PCK': The PCK of elbow - - 'Wri PCK': The PCK of wrist - - 'Hip PCK': The PCK of hip - - 'Knee PCK': The PCK of knee - - 'Ank PCK': The PCK of ankle - - 'PCK': The mean PCK over all keypoints - If `'torso'` in `self.norm_item`, the returned results are the pck - accuracy normalized by `torso_size`, which have the following keys: - - 'Head tPCK': The PCK of head - - 'Sho tPCK': The PCK of shoulder - - 'Elb tPCK': The PCK of elbow - - 'Wri tPCK': The PCK of wrist - - 'Hip tPCK': The PCK of hip - - 'Knee tPCK': The PCK of knee - - 'Ank tPCK': The PCK of ankle - - 'tPCK': The mean PCK over all keypoints - """ - logger: MMLogger = MMLogger.get_current_instance() - - # pred_coords: [N, K, D] - pred_coords = np.concatenate( - [result['pred_coords'] for result in results]) - # gt_coords: [N, K, D] - gt_coords = np.concatenate([result['gt_coords'] for result in results]) - # mask: [N, K] - mask = np.concatenate([result['mask'] for result in results]) - - metrics = dict() - if 'bbox' in self.norm_item: - norm_size_bbox = np.concatenate( - [result['bbox_size'] for result in results]) - - logger.info(f'Evaluating {self.__class__.__name__} ' - f'(normalized by ``"bbox_size"``)...') - - pck_p, pck, _ = keypoint_pck_accuracy(pred_coords, gt_coords, mask, - self.thr, norm_size_bbox) - stats = { - 'Head PCK': pck_p[2], - 'Sho PCK': 0.5 * pck_p[3] + 0.5 * pck_p[4], - 'Elb PCK': 0.5 * pck_p[7] + 0.5 * pck_p[8], - 'Wri PCK': 0.5 * pck_p[11] + 0.5 * pck_p[12], - 'Hip PCK': 0.5 * pck_p[5] + 0.5 * pck_p[6], - 'Knee PCK': 0.5 * pck_p[9] + 0.5 * pck_p[10], - 'Ank PCK': 0.5 * pck_p[13] + 0.5 * pck_p[14], - 'PCK': pck - } - - for stats_name, stat in stats.items(): - metrics[stats_name] = stat - - if 'torso' in self.norm_item: - norm_size_torso = np.concatenate( - [result['torso_size'] for result in results]) - - logger.info(f'Evaluating {self.__class__.__name__} ' - f'(normalized by ``"torso_size"``)...') - - pck_p, pck, _ = keypoint_pck_accuracy(pred_coords, gt_coords, mask, - self.thr, norm_size_torso) - - stats = { - 'Head tPCK': pck_p[2], - 'Sho tPCK': 0.5 * pck_p[3] + 0.5 * pck_p[4], - 'Elb tPCK': 0.5 * pck_p[7] + 0.5 * pck_p[8], - 'Wri tPCK': 0.5 * pck_p[11] + 0.5 * pck_p[12], - 'Hip tPCK': 0.5 * pck_p[5] + 0.5 * pck_p[6], - 'Knee tPCK': 0.5 * pck_p[9] + 0.5 * pck_p[10], - 'Ank tPCK': 0.5 * pck_p[13] + 0.5 * pck_p[14], - 'tPCK': pck - } - - for stats_name, stat in stats.items(): - metrics[stats_name] = stat - - return metrics - - -@METRICS.register_module() -class AUC(BaseMetric): - """AUC evaluation metric. - - Calculate the Area Under Curve (AUC) of keypoint PCK accuracy. - - By altering the threshold percentage in the calculation of PCK accuracy, - AUC can be generated to further evaluate the pose estimation algorithms. - - Note: - - length of dataset: N - - num_keypoints: K - - number of keypoint dimensions: D (typically D = 2) - - Args: - norm_factor (float): AUC normalization factor, Default: 30 (pixels). - num_thrs (int): number of thresholds to calculate auc. Default: 20. - collect_device (str): Device name used for collecting results from - different ranks during distributed training. Must be ``'cpu'`` or - ``'gpu'``. Default: ``'cpu'``. - prefix (str, optional): The prefix that will be added in the metric - names to disambiguate homonymous metrics of different evaluators. - If prefix is not provided in the argument, ``self.default_prefix`` - will be used instead. Default: ``None``. - """ - - def __init__(self, - norm_factor: float = 30, - num_thrs: int = 20, - collect_device: str = 'cpu', - prefix: Optional[str] = None) -> None: - super().__init__(collect_device=collect_device, prefix=prefix) - self.norm_factor = norm_factor - self.num_thrs = num_thrs - - def process(self, data_batch: Sequence[dict], - data_samples: Sequence[dict]) -> None: - """Process one batch of data samples and predictions. The processed - results should be stored in ``self.results``, which will be used to - compute the metrics when all batches have been processed. - - Args: - data_batch (Sequence[dict]): A batch of data - from the dataloader. - data_sample (Sequence[dict]): A batch of outputs from - the model. - """ - for data_sample in data_samples: - # predicted keypoints coordinates, [1, K, D] - pred_coords = data_sample['pred_instances']['keypoints'] - # ground truth data_info - gt = data_sample['gt_instances'] - # ground truth keypoints coordinates, [1, K, D] - gt_coords = gt['keypoints'] - # ground truth keypoints_visible, [1, K, 1] - mask = gt['keypoints_visible'].astype(bool).reshape(1, -1) - - result = { - 'pred_coords': pred_coords, - 'gt_coords': gt_coords, - 'mask': mask, - } - - self.results.append(result) - - def compute_metrics(self, results: list) -> Dict[str, float]: - """Compute the metrics from processed results. - - Args: - results (list): The processed results of each batch. - - Returns: - Dict[str, float]: The computed metrics. The keys are the names of - the metrics, and the values are corresponding results. - """ - logger: MMLogger = MMLogger.get_current_instance() - - # pred_coords: [N, K, D] - pred_coords = np.concatenate( - [result['pred_coords'] for result in results]) - # gt_coords: [N, K, D] - gt_coords = np.concatenate([result['gt_coords'] for result in results]) - # mask: [N, K] - mask = np.concatenate([result['mask'] for result in results]) - - logger.info(f'Evaluating {self.__class__.__name__}...') - - auc = keypoint_auc(pred_coords, gt_coords, mask, self.norm_factor, - self.num_thrs) - - metrics = dict() - metrics['AUC'] = auc - - return metrics - - -@METRICS.register_module() -class EPE(BaseMetric): - """EPE evaluation metric. - - Calculate the end-point error (EPE) of keypoints. - - Note: - - length of dataset: N - - num_keypoints: K - - number of keypoint dimensions: D (typically D = 2) - - Args: - collect_device (str): Device name used for collecting results from - different ranks during distributed training. Must be ``'cpu'`` or - ``'gpu'``. Default: ``'cpu'``. - prefix (str, optional): The prefix that will be added in the metric - names to disambiguate homonymous metrics of different evaluators. - If prefix is not provided in the argument, ``self.default_prefix`` - will be used instead. Default: ``None``. - """ - - def process(self, data_batch: Sequence[dict], - data_samples: Sequence[dict]) -> None: - """Process one batch of data samples and predictions. The processed - results should be stored in ``self.results``, which will be used to - compute the metrics when all batches have been processed. - - Args: - data_batch (Sequence[dict]): A batch of data - from the dataloader. - data_samples (Sequence[dict]): A batch of outputs from - the model. - """ - for data_sample in data_samples: - # predicted keypoints coordinates, [1, K, D] - pred_coords = data_sample['pred_instances']['keypoints'] - # ground truth data_info - gt = data_sample['gt_instances'] - # ground truth keypoints coordinates, [1, K, D] - gt_coords = gt['keypoints'] - # ground truth keypoints_visible, [1, K, 1] - mask = gt['keypoints_visible'].astype(bool).reshape(1, -1) - - result = { - 'pred_coords': pred_coords, - 'gt_coords': gt_coords, - 'mask': mask, - } - - self.results.append(result) - - def compute_metrics(self, results: list) -> Dict[str, float]: - """Compute the metrics from processed results. - - Args: - results (list): The processed results of each batch. - - Returns: - Dict[str, float]: The computed metrics. The keys are the names of - the metrics, and the values are corresponding results. - """ - logger: MMLogger = MMLogger.get_current_instance() - - # pred_coords: [N, K, D] - pred_coords = np.concatenate( - [result['pred_coords'] for result in results]) - # gt_coords: [N, K, D] - gt_coords = np.concatenate([result['gt_coords'] for result in results]) - # mask: [N, K] - mask = np.concatenate([result['mask'] for result in results]) - - logger.info(f'Evaluating {self.__class__.__name__}...') - - epe = keypoint_epe(pred_coords, gt_coords, mask) - - metrics = dict() - metrics['EPE'] = epe - - return metrics - - -@METRICS.register_module() -class NME(BaseMetric): - """NME evaluation metric. - - Calculate the normalized mean error (NME) of keypoints. - - Note: - - length of dataset: N - - num_keypoints: K - - number of keypoint dimensions: D (typically D = 2) - - Args: - norm_mode (str): The normalization mode. There are two valid modes: - `'use_norm_item'` and `'keypoint_distance'`. - When set as `'use_norm_item'`, should specify the argument - `norm_item`, which represents the item in the datainfo that - will be used as the normalization factor. - When set as `'keypoint_distance'`, should specify the argument - `keypoint_indices` that are used to calculate the keypoint - distance as the normalization factor. - norm_item (str, optional): The item used as the normalization factor. - For example, `'bbox_size'` in `'AFLWDataset'`. Only valid when - ``norm_mode`` is ``use_norm_item``. - Default: ``None``. - keypoint_indices (Sequence[int], optional): The keypoint indices used - to calculate the keypoint distance as the normalization factor. - Only valid when ``norm_mode`` is ``keypoint_distance``. - If set as None, will use the default ``keypoint_indices`` in - `DEFAULT_KEYPOINT_INDICES` for specific datasets, else use the - given ``keypoint_indices`` of the dataset. Default: ``None``. - collect_device (str): Device name used for collecting results from - different ranks during distributed training. Must be ``'cpu'`` or - ``'gpu'``. Default: ``'cpu'``. - prefix (str, optional): The prefix that will be added in the metric - names to disambiguate homonymous metrics of different evaluators. - If prefix is not provided in the argument, ``self.default_prefix`` - will be used instead. Default: ``None``. - """ - - DEFAULT_KEYPOINT_INDICES = { - # horse10: corresponding to `nose` and `eye` keypoints - 'horse10': [0, 1], - # 300w: corresponding to `right-most` and `left-most` eye keypoints - '300w': [36, 45], - # coco_wholebody_face corresponding to `right-most` and `left-most` - # eye keypoints - 'coco_wholebody_face': [36, 45], - # cofw: corresponding to `right-most` and `left-most` eye keypoints - 'cofw': [8, 9], - # wflw: corresponding to `right-most` and `left-most` eye keypoints - 'wflw': [60, 72], - # lapa: corresponding to `right-most` and `left-most` eye keypoints - 'lapa': [66, 79], - } - - def __init__(self, - norm_mode: str, - norm_item: Optional[str] = None, - keypoint_indices: Optional[Sequence[int]] = None, - collect_device: str = 'cpu', - prefix: Optional[str] = None) -> None: - super().__init__(collect_device=collect_device, prefix=prefix) - allowed_norm_modes = ['use_norm_item', 'keypoint_distance'] - if norm_mode not in allowed_norm_modes: - raise KeyError("`norm_mode` should be 'use_norm_item' or " - f"'keypoint_distance', but got {norm_mode}.") - - self.norm_mode = norm_mode - if self.norm_mode == 'use_norm_item': - if not norm_item: - raise KeyError('`norm_mode` is set to `"use_norm_item"`, ' - 'please specify the `norm_item` in the ' - 'datainfo used as the normalization factor.') - self.norm_item = norm_item - self.keypoint_indices = keypoint_indices - - def process(self, data_batch: Sequence[dict], - data_samples: Sequence[dict]) -> None: - """Process one batch of data samples and predictions. The processed - results should be stored in ``self.results``, which will be used to - compute the metrics when all batches have been processed. - - Args: - data_batch (Sequence[dict]): A batch of data - from the dataloader. - data_samples (Sequence[dict]): A batch of outputs from - the model. - """ - for data_sample in data_samples: - # predicted keypoints coordinates, [1, K, D] - pred_coords = data_sample['pred_instances']['keypoints'] - # ground truth data_info - gt = data_sample['gt_instances'] - # ground truth keypoints coordinates, [1, K, D] - gt_coords = gt['keypoints'] - # ground truth keypoints_visible, [1, K, 1] - mask = gt['keypoints_visible'].astype(bool).reshape(1, -1) - - result = { - 'pred_coords': pred_coords, - 'gt_coords': gt_coords, - 'mask': mask, - } - - if self.norm_item: - if self.norm_item == 'bbox_size': - assert 'bboxes' in gt, 'The ground truth data info do ' \ - 'not have the item ``bboxes`` for expected ' \ - 'normalized_item ``"bbox_size"``.' - # ground truth bboxes, [1, 4] - bbox_size = np.max(gt['bboxes'][0][2:] - - gt['bboxes'][0][:2]) - result['bbox_size'] = np.array([bbox_size]).reshape(-1, 1) - else: - assert self.norm_item in gt, f'The ground truth data ' \ - f'info do not have the expected normalized factor ' \ - f'"{self.norm_item}"' - # ground truth norm_item - result[self.norm_item] = np.array( - gt[self.norm_item]).reshape([-1, 1]) - - self.results.append(result) - - def compute_metrics(self, results: list) -> Dict[str, float]: - """Compute the metrics from processed results. - - Args: - results (list): The processed results of each batch. - - Returns: - Dict[str, float]: The computed metrics. The keys are the names of - the metrics, and the values are corresponding results. - """ - logger: MMLogger = MMLogger.get_current_instance() - - # pred_coords: [N, K, D] - pred_coords = np.concatenate( - [result['pred_coords'] for result in results]) - # gt_coords: [N, K, D] - gt_coords = np.concatenate([result['gt_coords'] for result in results]) - # mask: [N, K] - mask = np.concatenate([result['mask'] for result in results]) - - logger.info(f'Evaluating {self.__class__.__name__}...') - metrics = dict() - - if self.norm_mode == 'use_norm_item': - normalize_factor_ = np.concatenate( - [result[self.norm_item] for result in results]) - # normalize_factor: [N, 2] - normalize_factor = np.tile(normalize_factor_, [1, 2]) - nme = keypoint_nme(pred_coords, gt_coords, mask, normalize_factor) - metrics['NME'] = nme - - else: - if self.keypoint_indices is None: - # use default keypoint_indices in some datasets - dataset_name = self.dataset_meta['dataset_name'] - if dataset_name not in self.DEFAULT_KEYPOINT_INDICES: - raise KeyError( - '`norm_mode` is set to `keypoint_distance`, and the ' - 'keypoint_indices is set to None, can not find the ' - 'keypoint_indices in `DEFAULT_KEYPOINT_INDICES`, ' - 'please specify `keypoint_indices` appropriately.') - self.keypoint_indices = self.DEFAULT_KEYPOINT_INDICES[ - dataset_name] - else: - assert len(self.keypoint_indices) == 2, 'The keypoint '\ - 'indices used for normalization should be a pair.' - keypoint_id2name = self.dataset_meta['keypoint_id2name'] - dataset_name = self.dataset_meta['dataset_name'] - for idx in self.keypoint_indices: - assert idx in keypoint_id2name, f'The {dataset_name} '\ - f'dataset does not contain the required '\ - f'{idx}-th keypoint.' - # normalize_factor: [N, 2] - normalize_factor = self._get_normalize_factor(gt_coords=gt_coords) - nme = keypoint_nme(pred_coords, gt_coords, mask, normalize_factor) - metrics['NME'] = nme - - return metrics - - def _get_normalize_factor(self, gt_coords: np.ndarray) -> np.ndarray: - """Get the normalize factor. generally inter-ocular distance measured - as the Euclidean distance between the outer corners of the eyes is - used. - - Args: - gt_coords (np.ndarray[N, K, 2]): Groundtruth keypoint coordinates. - - Returns: - np.ndarray[N, 2]: normalized factor - """ - idx1, idx2 = self.keypoint_indices - - interocular = np.linalg.norm( - gt_coords[:, idx1, :] - gt_coords[:, idx2, :], - axis=1, - keepdims=True) - - return np.tile(interocular, [1, 2]) +# Copyright (c) OpenMMLab. All rights reserved. +import warnings +from typing import Dict, Optional, Sequence, Union + +import numpy as np +from mmengine.evaluator import BaseMetric +from mmengine.logging import MMLogger + +from mmpose.registry import METRICS +from ..functional import (keypoint_auc, keypoint_epe, keypoint_nme, + keypoint_pck_accuracy) + + +@METRICS.register_module() +class PCKAccuracy(BaseMetric): + """PCK accuracy evaluation metric. + Calculate the pose accuracy of Percentage of Correct Keypoints (PCK) for + each individual keypoint and the averaged accuracy across all keypoints. + PCK metric measures accuracy of the localization of the body joints. + The distances between predicted positions and the ground-truth ones + are typically normalized by the person bounding box size. + The threshold (thr) of the normalized distance is commonly set + as 0.05, 0.1 or 0.2 etc. + Note: + - length of dataset: N + - num_keypoints: K + - number of keypoint dimensions: D (typically D = 2) + Args: + thr(float): Threshold of PCK calculation. Default: 0.05. + norm_item (str | Sequence[str]): The item used for normalization. + Valid items include 'bbox', 'head', 'torso', which correspond + to 'PCK', 'PCKh' and 'tPCK' respectively. Default: ``'bbox'``. + collect_device (str): Device name used for collecting results from + different ranks during distributed training. Must be ``'cpu'`` or + ``'gpu'``. Default: ``'cpu'``. + prefix (str, optional): The prefix that will be added in the metric + names to disambiguate homonymous metrics of different evaluators. + If prefix is not provided in the argument, ``self.default_prefix`` + will be used instead. Default: ``None``. + + Examples: + + >>> from mmpose.evaluation.metrics import PCKAccuracy + >>> import numpy as np + >>> from mmengine.structures import InstanceData + >>> num_keypoints = 15 + >>> keypoints = np.random.random((1, num_keypoints, 2)) * 10 + >>> gt_instances = InstanceData() + >>> gt_instances.keypoints = keypoints + >>> gt_instances.keypoints_visible = np.ones( + ... (1, num_keypoints, 1)).astype(bool) + >>> gt_instances.bboxes = np.random.random((1, 4)) * 20 + >>> pred_instances = InstanceData() + >>> pred_instances.keypoints = keypoints + >>> data_sample = { + ... 'gt_instances': gt_instances.to_dict(), + ... 'pred_instances': pred_instances.to_dict(), + ... } + >>> data_samples = [data_sample] + >>> data_batch = [{'inputs': None}] + >>> pck_metric = PCKAccuracy(thr=0.5, norm_item='bbox') + ...: UserWarning: The prefix is not set in metric class PCKAccuracy. + >>> pck_metric.process(data_batch, data_samples) + >>> pck_metric.evaluate(1) + 10/26 15:37:57 - mmengine - INFO - Evaluating PCKAccuracy (normalized by ``"bbox_size"``)... # noqa + {'PCK': 1.0} + + """ + + def __init__(self, + thr: float = 0.05, + norm_item: Union[str, Sequence[str]] = 'bbox', + collect_device: str = 'cpu', + prefix: Optional[str] = None) -> None: + super().__init__(collect_device=collect_device, prefix=prefix) + self.thr = thr + self.norm_item = norm_item if isinstance(norm_item, + (tuple, + list)) else [norm_item] + allow_normalized_items = ['bbox', 'head', 'torso'] + for item in self.norm_item: + if item not in allow_normalized_items: + raise KeyError( + f'The normalized item {item} is not supported by ' + f"{self.__class__.__name__}. Should be one of 'bbox', " + f"'head', 'torso', but got {item}.") + + def process(self, data_batch: Sequence[dict], + data_samples: Sequence[dict]) -> None: + """Process one batch of data samples and predictions. + + The processed + results should be stored in ``self.results``, which will be used to + compute the metrics when all batches have been processed. + Args: + data_batch (Sequence[dict]): A batch of data + from the dataloader. + data_samples (Sequence[dict]): A batch of outputs from + the model. + """ + for data_sample in data_samples: + # predicted keypoints coordinates, [1, K, D] + pred_coords = data_sample['pred_instances']['keypoints'] + # ground truth data_info + gt = data_sample['gt_instances'] + # ground truth keypoints coordinates, [1, K, D] + gt_coords = gt['keypoints'] + # ground truth keypoints_visible, [1, K, 1] + mask = gt['keypoints_visible'].astype(bool).reshape(1, -1) + + result = { + 'pred_coords': pred_coords, + 'gt_coords': gt_coords, + 'mask': mask, + } + + if 'bbox' in self.norm_item: + assert 'bboxes' in gt, 'The ground truth data info do not ' \ + 'have the expected normalized_item ``"bbox"``.' + # ground truth bboxes, [1, 4] + bbox_size_ = np.max(gt['bboxes'][0][2:] - gt['bboxes'][0][:2]) + bbox_size = np.array([bbox_size_, bbox_size_]).reshape(-1, 2) + result['bbox_size'] = bbox_size + + if 'head' in self.norm_item: + assert 'head_size' in gt, 'The ground truth data info do ' \ + 'not have the expected normalized_item ``"head_size"``.' + # ground truth bboxes + head_size_ = gt['head_size'] + head_size = np.array([head_size_, head_size_]).reshape(-1, 2) + result['head_size'] = head_size + + if 'torso' in self.norm_item: + # used in JhmdbDataset + torso_size_ = np.linalg.norm(gt_coords[0][4] - gt_coords[0][5]) + if torso_size_ < 1: + torso_size_ = np.linalg.norm(pred_coords[0][4] - + pred_coords[0][5]) + warnings.warn('Ground truth torso size < 1. ' + 'Use torso size from predicted ' + 'keypoint results instead.') + torso_size = np.array([torso_size_, + torso_size_]).reshape(-1, 2) + result['torso_size'] = torso_size + + self.results.append(result) + + def compute_metrics(self, results: list) -> Dict[str, float]: + """Compute the metrics from processed results. + + Args: + results (list): The processed results of each batch. + Returns: + Dict[str, float]: The computed metrics. The keys are the names of + the metrics, and the values are corresponding results. + The returned result dict may have the following keys: + - 'PCK': The pck accuracy normalized by `bbox_size`. + - 'PCKh': The pck accuracy normalized by `head_size`. + - 'tPCK': The pck accuracy normalized by `torso_size`. + """ + logger: MMLogger = MMLogger.get_current_instance() + + # pred_coords: [N, K, D] + pred_coords = np.concatenate( + [result['pred_coords'] for result in results]) + # gt_coords: [N, K, D] + gt_coords = np.concatenate([result['gt_coords'] for result in results]) + # mask: [N, K] + mask = np.concatenate([result['mask'] for result in results]) + + metrics = dict() + if 'bbox' in self.norm_item: + norm_size_bbox = np.concatenate( + [result['bbox_size'] for result in results]) + + logger.info(f'Evaluating {self.__class__.__name__} ' + f'(normalized by ``"bbox_size"``)...') + + _, pck, _ = keypoint_pck_accuracy(pred_coords, gt_coords, mask, + self.thr, norm_size_bbox) + metrics['PCK'] = pck + + if 'head' in self.norm_item: + norm_size_head = np.concatenate( + [result['head_size'] for result in results]) + + logger.info(f'Evaluating {self.__class__.__name__} ' + f'(normalized by ``"head_size"``)...') + + _, pckh, _ = keypoint_pck_accuracy(pred_coords, gt_coords, mask, + self.thr, norm_size_head) + metrics['PCKh'] = pckh + + if 'torso' in self.norm_item: + norm_size_torso = np.concatenate( + [result['torso_size'] for result in results]) + + logger.info(f'Evaluating {self.__class__.__name__} ' + f'(normalized by ``"torso_size"``)...') + + _, tpck, _ = keypoint_pck_accuracy(pred_coords, gt_coords, mask, + self.thr, norm_size_torso) + metrics['tPCK'] = tpck + + return metrics + + +@METRICS.register_module() +class MpiiPCKAccuracy(PCKAccuracy): + """PCKh accuracy evaluation metric for MPII dataset. + + Calculate the pose accuracy of Percentage of Correct Keypoints (PCK) for + each individual keypoint and the averaged accuracy across all keypoints. + PCK metric measures accuracy of the localization of the body joints. + The distances between predicted positions and the ground-truth ones + are typically normalized by the person bounding box size. + The threshold (thr) of the normalized distance is commonly set + as 0.05, 0.1 or 0.2 etc. + + Note: + - length of dataset: N + - num_keypoints: K + - number of keypoint dimensions: D (typically D = 2) + + Args: + thr(float): Threshold of PCK calculation. Default: 0.05. + norm_item (str | Sequence[str]): The item used for normalization. + Valid items include 'bbox', 'head', 'torso', which correspond + to 'PCK', 'PCKh' and 'tPCK' respectively. Default: ``'head'``. + collect_device (str): Device name used for collecting results from + different ranks during distributed training. Must be ``'cpu'`` or + ``'gpu'``. Default: ``'cpu'``. + prefix (str, optional): The prefix that will be added in the metric + names to disambiguate homonymous metrics of different evaluators. + If prefix is not provided in the argument, ``self.default_prefix`` + will be used instead. Default: ``None``. + + Examples: + + >>> from mmpose.evaluation.metrics import MpiiPCKAccuracy + >>> import numpy as np + >>> from mmengine.structures import InstanceData + >>> num_keypoints = 16 + >>> keypoints = np.random.random((1, num_keypoints, 2)) * 10 + >>> gt_instances = InstanceData() + >>> gt_instances.keypoints = keypoints + 1.0 + >>> gt_instances.keypoints_visible = np.ones( + ... (1, num_keypoints, 1)).astype(bool) + >>> gt_instances.head_size = np.random.random((1, 1)) * 10 + >>> pred_instances = InstanceData() + >>> pred_instances.keypoints = keypoints + >>> data_sample = { + ... 'gt_instances': gt_instances.to_dict(), + ... 'pred_instances': pred_instances.to_dict(), + ... } + >>> data_samples = [data_sample] + >>> data_batch = [{'inputs': None}] + >>> mpii_pck_metric = MpiiPCKAccuracy(thr=0.3, norm_item='head') + ... UserWarning: The prefix is not set in metric class MpiiPCKAccuracy. + >>> mpii_pck_metric.process(data_batch, data_samples) + >>> mpii_pck_metric.evaluate(1) + 10/26 17:43:39 - mmengine - INFO - Evaluating MpiiPCKAccuracy (normalized by ``"head_size"``)... # noqa + {'Head PCK': 100.0, 'Shoulder PCK': 100.0, 'Elbow PCK': 100.0, + Wrist PCK': 100.0, 'Hip PCK': 100.0, 'Knee PCK': 100.0, + 'Ankle PCK': 100.0, 'PCK': 100.0, 'PCK@0.1': 100.0} + """ + + def __init__(self, + thr: float = 0.5, + norm_item: Union[str, Sequence[str]] = 'head', + collect_device: str = 'cpu', + prefix: Optional[str] = None) -> None: + super().__init__( + thr=thr, + norm_item=norm_item, + collect_device=collect_device, + prefix=prefix) + + def compute_metrics(self, results: list) -> Dict[str, float]: + """Compute the metrics from processed results. + + Args: + results (list): The processed results of each batch. + + Returns: + Dict[str, float]: The computed metrics. The keys are the names of + the metrics, and the values are corresponding results. + If `'head'` in `self.norm_item`, the returned results are the pck + accuracy normalized by `head_size`, which have the following keys: + - 'Head PCK': The PCK of head + - 'Shoulder PCK': The PCK of shoulder + - 'Elbow PCK': The PCK of elbow + - 'Wrist PCK': The PCK of wrist + - 'Hip PCK': The PCK of hip + - 'Knee PCK': The PCK of knee + - 'Ankle PCK': The PCK of ankle + - 'PCK': The mean PCK over all keypoints + - 'PCK@0.1': The mean PCK at threshold 0.1 + """ + logger: MMLogger = MMLogger.get_current_instance() + + # pred_coords: [N, K, D] + pred_coords = np.concatenate( + [result['pred_coords'] for result in results]) + # gt_coords: [N, K, D] + gt_coords = np.concatenate([result['gt_coords'] for result in results]) + # mask: [N, K] + mask = np.concatenate([result['mask'] for result in results]) + + # MPII uses matlab format, gt index is 1-based, + # convert 0-based index to 1-based index + pred_coords = pred_coords + 1.0 + + metrics = {} + if 'head' in self.norm_item: + norm_size_head = np.concatenate( + [result['head_size'] for result in results]) + + logger.info(f'Evaluating {self.__class__.__name__} ' + f'(normalized by ``"head_size"``)...') + + pck_p, _, _ = keypoint_pck_accuracy(pred_coords, gt_coords, mask, + self.thr, norm_size_head) + + jnt_count = np.sum(mask, axis=0) + PCKh = 100. * pck_p + + rng = np.arange(0, 0.5 + 0.01, 0.01) + pckAll = np.zeros((len(rng), 16), dtype=np.float32) + + for r, threshold in enumerate(rng): + _pck, _, _ = keypoint_pck_accuracy(pred_coords, gt_coords, + mask, threshold, + norm_size_head) + pckAll[r, :] = 100. * _pck + + PCKh = np.ma.array(PCKh, mask=False) + PCKh.mask[6:8] = True + + jnt_count = np.ma.array(jnt_count, mask=False) + jnt_count.mask[6:8] = True + jnt_ratio = jnt_count / np.sum(jnt_count).astype(np.float64) + + # dataset_joints_idx: + # head 9 + # lsho 13 rsho 12 + # lelb 14 relb 11 + # lwri 15 rwri 10 + # lhip 3 rhip 2 + # lkne 4 rkne 1 + # lank 5 rank 0 + stats = { + 'Head PCK': PCKh[9], + 'Shoulder PCK': 0.5 * (PCKh[13] + PCKh[12]), + 'Elbow PCK': 0.5 * (PCKh[14] + PCKh[11]), + 'Wrist PCK': 0.5 * (PCKh[15] + PCKh[10]), + 'Hip PCK': 0.5 * (PCKh[3] + PCKh[2]), + 'Knee PCK': 0.5 * (PCKh[4] + PCKh[1]), + 'Ankle PCK': 0.5 * (PCKh[5] + PCKh[0]), + 'PCK': np.sum(PCKh * jnt_ratio), + 'PCK@0.1': np.sum(pckAll[10, :] * jnt_ratio) + } + + for stats_name, stat in stats.items(): + metrics[stats_name] = stat + + return metrics + + +@METRICS.register_module() +class JhmdbPCKAccuracy(PCKAccuracy): + """PCK accuracy evaluation metric for Jhmdb dataset. + + Calculate the pose accuracy of Percentage of Correct Keypoints (PCK) for + each individual keypoint and the averaged accuracy across all keypoints. + PCK metric measures accuracy of the localization of the body joints. + The distances between predicted positions and the ground-truth ones + are typically normalized by the person bounding box size. + The threshold (thr) of the normalized distance is commonly set + as 0.05, 0.1 or 0.2 etc. + + Note: + - length of dataset: N + - num_keypoints: K + - number of keypoint dimensions: D (typically D = 2) + + Args: + thr(float): Threshold of PCK calculation. Default: 0.05. + norm_item (str | Sequence[str]): The item used for normalization. + Valid items include 'bbox', 'head', 'torso', which correspond + to 'PCK', 'PCKh' and 'tPCK' respectively. Default: ``'bbox'``. + collect_device (str): Device name used for collecting results from + different ranks during distributed training. Must be ``'cpu'`` or + ``'gpu'``. Default: ``'cpu'``. + prefix (str, optional): The prefix that will be added in the metric + names to disambiguate homonymous metrics of different evaluators. + If prefix is not provided in the argument, ``self.default_prefix`` + will be used instead. Default: ``None``. + + Examples: + + >>> from mmpose.evaluation.metrics import JhmdbPCKAccuracy + >>> import numpy as np + >>> from mmengine.structures import InstanceData + >>> num_keypoints = 15 + >>> keypoints = np.random.random((1, num_keypoints, 2)) * 10 + >>> gt_instances = InstanceData() + >>> gt_instances.keypoints = keypoints + >>> gt_instances.keypoints_visible = np.ones( + ... (1, num_keypoints, 1)).astype(bool) + >>> gt_instances.bboxes = np.random.random((1, 4)) * 20 + >>> gt_instances.head_size = np.random.random((1, 1)) * 10 + >>> pred_instances = InstanceData() + >>> pred_instances.keypoints = keypoints + >>> data_sample = { + ... 'gt_instances': gt_instances.to_dict(), + ... 'pred_instances': pred_instances.to_dict(), + ... } + >>> data_samples = [data_sample] + >>> data_batch = [{'inputs': None}] + >>> jhmdb_pck_metric = JhmdbPCKAccuracy(thr=0.2, norm_item=['bbox', 'torso']) + ... UserWarning: The prefix is not set in metric class JhmdbPCKAccuracy. + >>> jhmdb_pck_metric.process(data_batch, data_samples) + >>> jhmdb_pck_metric.evaluate(1) + 10/26 17:48:09 - mmengine - INFO - Evaluating JhmdbPCKAccuracy (normalized by ``"bbox_size"``)... # noqa + 10/26 17:48:09 - mmengine - INFO - Evaluating JhmdbPCKAccuracy (normalized by ``"torso_size"``)... # noqa + {'Head PCK': 1.0, 'Sho PCK': 1.0, 'Elb PCK': 1.0, 'Wri PCK': 1.0, + 'Hip PCK': 1.0, 'Knee PCK': 1.0, 'Ank PCK': 1.0, 'PCK': 1.0, + 'Head tPCK': 1.0, 'Sho tPCK': 1.0, 'Elb tPCK': 1.0, 'Wri tPCK': 1.0, + 'Hip tPCK': 1.0, 'Knee tPCK': 1.0, 'Ank tPCK': 1.0, 'tPCK': 1.0} + """ + + def __init__(self, + thr: float = 0.05, + norm_item: Union[str, Sequence[str]] = 'bbox', + collect_device: str = 'cpu', + prefix: Optional[str] = None) -> None: + super().__init__( + thr=thr, + norm_item=norm_item, + collect_device=collect_device, + prefix=prefix) + + def compute_metrics(self, results: list) -> Dict[str, float]: + """Compute the metrics from processed results. + + Args: + results (list): The processed results of each batch. + + Returns: + Dict[str, float]: The computed metrics. The keys are the names of + the metrics, and the values are corresponding results. + If `'bbox'` in `self.norm_item`, the returned results are the pck + accuracy normalized by `bbox_size`, which have the following keys: + - 'Head PCK': The PCK of head + - 'Sho PCK': The PCK of shoulder + - 'Elb PCK': The PCK of elbow + - 'Wri PCK': The PCK of wrist + - 'Hip PCK': The PCK of hip + - 'Knee PCK': The PCK of knee + - 'Ank PCK': The PCK of ankle + - 'PCK': The mean PCK over all keypoints + If `'torso'` in `self.norm_item`, the returned results are the pck + accuracy normalized by `torso_size`, which have the following keys: + - 'Head tPCK': The PCK of head + - 'Sho tPCK': The PCK of shoulder + - 'Elb tPCK': The PCK of elbow + - 'Wri tPCK': The PCK of wrist + - 'Hip tPCK': The PCK of hip + - 'Knee tPCK': The PCK of knee + - 'Ank tPCK': The PCK of ankle + - 'tPCK': The mean PCK over all keypoints + """ + logger: MMLogger = MMLogger.get_current_instance() + + # pred_coords: [N, K, D] + pred_coords = np.concatenate( + [result['pred_coords'] for result in results]) + # gt_coords: [N, K, D] + gt_coords = np.concatenate([result['gt_coords'] for result in results]) + # mask: [N, K] + mask = np.concatenate([result['mask'] for result in results]) + + metrics = dict() + if 'bbox' in self.norm_item: + norm_size_bbox = np.concatenate( + [result['bbox_size'] for result in results]) + + logger.info(f'Evaluating {self.__class__.__name__} ' + f'(normalized by ``"bbox_size"``)...') + + pck_p, pck, _ = keypoint_pck_accuracy(pred_coords, gt_coords, mask, + self.thr, norm_size_bbox) + stats = { + 'Head PCK': pck_p[2], + 'Sho PCK': 0.5 * pck_p[3] + 0.5 * pck_p[4], + 'Elb PCK': 0.5 * pck_p[7] + 0.5 * pck_p[8], + 'Wri PCK': 0.5 * pck_p[11] + 0.5 * pck_p[12], + 'Hip PCK': 0.5 * pck_p[5] + 0.5 * pck_p[6], + 'Knee PCK': 0.5 * pck_p[9] + 0.5 * pck_p[10], + 'Ank PCK': 0.5 * pck_p[13] + 0.5 * pck_p[14], + 'PCK': pck + } + + for stats_name, stat in stats.items(): + metrics[stats_name] = stat + + if 'torso' in self.norm_item: + norm_size_torso = np.concatenate( + [result['torso_size'] for result in results]) + + logger.info(f'Evaluating {self.__class__.__name__} ' + f'(normalized by ``"torso_size"``)...') + + pck_p, pck, _ = keypoint_pck_accuracy(pred_coords, gt_coords, mask, + self.thr, norm_size_torso) + + stats = { + 'Head tPCK': pck_p[2], + 'Sho tPCK': 0.5 * pck_p[3] + 0.5 * pck_p[4], + 'Elb tPCK': 0.5 * pck_p[7] + 0.5 * pck_p[8], + 'Wri tPCK': 0.5 * pck_p[11] + 0.5 * pck_p[12], + 'Hip tPCK': 0.5 * pck_p[5] + 0.5 * pck_p[6], + 'Knee tPCK': 0.5 * pck_p[9] + 0.5 * pck_p[10], + 'Ank tPCK': 0.5 * pck_p[13] + 0.5 * pck_p[14], + 'tPCK': pck + } + + for stats_name, stat in stats.items(): + metrics[stats_name] = stat + + return metrics + + +@METRICS.register_module() +class AUC(BaseMetric): + """AUC evaluation metric. + + Calculate the Area Under Curve (AUC) of keypoint PCK accuracy. + + By altering the threshold percentage in the calculation of PCK accuracy, + AUC can be generated to further evaluate the pose estimation algorithms. + + Note: + - length of dataset: N + - num_keypoints: K + - number of keypoint dimensions: D (typically D = 2) + + Args: + norm_factor (float): AUC normalization factor, Default: 30 (pixels). + num_thrs (int): number of thresholds to calculate auc. Default: 20. + collect_device (str): Device name used for collecting results from + different ranks during distributed training. Must be ``'cpu'`` or + ``'gpu'``. Default: ``'cpu'``. + prefix (str, optional): The prefix that will be added in the metric + names to disambiguate homonymous metrics of different evaluators. + If prefix is not provided in the argument, ``self.default_prefix`` + will be used instead. Default: ``None``. + """ + + def __init__(self, + norm_factor: float = 30, + num_thrs: int = 20, + collect_device: str = 'cpu', + prefix: Optional[str] = None) -> None: + super().__init__(collect_device=collect_device, prefix=prefix) + self.norm_factor = norm_factor + self.num_thrs = num_thrs + + def process(self, data_batch: Sequence[dict], + data_samples: Sequence[dict]) -> None: + """Process one batch of data samples and predictions. The processed + results should be stored in ``self.results``, which will be used to + compute the metrics when all batches have been processed. + + Args: + data_batch (Sequence[dict]): A batch of data + from the dataloader. + data_sample (Sequence[dict]): A batch of outputs from + the model. + """ + for data_sample in data_samples: + # predicted keypoints coordinates, [1, K, D] + pred_coords = data_sample['pred_instances']['keypoints'] + # ground truth data_info + gt = data_sample['gt_instances'] + # ground truth keypoints coordinates, [1, K, D] + gt_coords = gt['keypoints'] + # ground truth keypoints_visible, [1, K, 1] + mask = gt['keypoints_visible'].astype(bool).reshape(1, -1) + + result = { + 'pred_coords': pred_coords, + 'gt_coords': gt_coords, + 'mask': mask, + } + + self.results.append(result) + + def compute_metrics(self, results: list) -> Dict[str, float]: + """Compute the metrics from processed results. + + Args: + results (list): The processed results of each batch. + + Returns: + Dict[str, float]: The computed metrics. The keys are the names of + the metrics, and the values are corresponding results. + """ + logger: MMLogger = MMLogger.get_current_instance() + + # pred_coords: [N, K, D] + pred_coords = np.concatenate( + [result['pred_coords'] for result in results]) + # gt_coords: [N, K, D] + gt_coords = np.concatenate([result['gt_coords'] for result in results]) + # mask: [N, K] + mask = np.concatenate([result['mask'] for result in results]) + + logger.info(f'Evaluating {self.__class__.__name__}...') + + auc = keypoint_auc(pred_coords, gt_coords, mask, self.norm_factor, + self.num_thrs) + + metrics = dict() + metrics['AUC'] = auc + + return metrics + + +@METRICS.register_module() +class EPE(BaseMetric): + """EPE evaluation metric. + + Calculate the end-point error (EPE) of keypoints. + + Note: + - length of dataset: N + - num_keypoints: K + - number of keypoint dimensions: D (typically D = 2) + + Args: + collect_device (str): Device name used for collecting results from + different ranks during distributed training. Must be ``'cpu'`` or + ``'gpu'``. Default: ``'cpu'``. + prefix (str, optional): The prefix that will be added in the metric + names to disambiguate homonymous metrics of different evaluators. + If prefix is not provided in the argument, ``self.default_prefix`` + will be used instead. Default: ``None``. + """ + + def process(self, data_batch: Sequence[dict], + data_samples: Sequence[dict]) -> None: + """Process one batch of data samples and predictions. The processed + results should be stored in ``self.results``, which will be used to + compute the metrics when all batches have been processed. + + Args: + data_batch (Sequence[dict]): A batch of data + from the dataloader. + data_samples (Sequence[dict]): A batch of outputs from + the model. + """ + for data_sample in data_samples: + # predicted keypoints coordinates, [1, K, D] + pred_coords = data_sample['pred_instances']['keypoints'] + # ground truth data_info + gt = data_sample['gt_instances'] + # ground truth keypoints coordinates, [1, K, D] + gt_coords = gt['keypoints'] + # ground truth keypoints_visible, [1, K, 1] + mask = gt['keypoints_visible'].astype(bool).reshape(1, -1) + + result = { + 'pred_coords': pred_coords, + 'gt_coords': gt_coords, + 'mask': mask, + } + + self.results.append(result) + + def compute_metrics(self, results: list) -> Dict[str, float]: + """Compute the metrics from processed results. + + Args: + results (list): The processed results of each batch. + + Returns: + Dict[str, float]: The computed metrics. The keys are the names of + the metrics, and the values are corresponding results. + """ + logger: MMLogger = MMLogger.get_current_instance() + + # pred_coords: [N, K, D] + pred_coords = np.concatenate( + [result['pred_coords'] for result in results]) + # gt_coords: [N, K, D] + gt_coords = np.concatenate([result['gt_coords'] for result in results]) + # mask: [N, K] + mask = np.concatenate([result['mask'] for result in results]) + + logger.info(f'Evaluating {self.__class__.__name__}...') + + epe = keypoint_epe(pred_coords, gt_coords, mask) + + metrics = dict() + metrics['EPE'] = epe + + return metrics + + +@METRICS.register_module() +class NME(BaseMetric): + """NME evaluation metric. + + Calculate the normalized mean error (NME) of keypoints. + + Note: + - length of dataset: N + - num_keypoints: K + - number of keypoint dimensions: D (typically D = 2) + + Args: + norm_mode (str): The normalization mode. There are two valid modes: + `'use_norm_item'` and `'keypoint_distance'`. + When set as `'use_norm_item'`, should specify the argument + `norm_item`, which represents the item in the datainfo that + will be used as the normalization factor. + When set as `'keypoint_distance'`, should specify the argument + `keypoint_indices` that are used to calculate the keypoint + distance as the normalization factor. + norm_item (str, optional): The item used as the normalization factor. + For example, `'bbox_size'` in `'AFLWDataset'`. Only valid when + ``norm_mode`` is ``use_norm_item``. + Default: ``None``. + keypoint_indices (Sequence[int], optional): The keypoint indices used + to calculate the keypoint distance as the normalization factor. + Only valid when ``norm_mode`` is ``keypoint_distance``. + If set as None, will use the default ``keypoint_indices`` in + `DEFAULT_KEYPOINT_INDICES` for specific datasets, else use the + given ``keypoint_indices`` of the dataset. Default: ``None``. + collect_device (str): Device name used for collecting results from + different ranks during distributed training. Must be ``'cpu'`` or + ``'gpu'``. Default: ``'cpu'``. + prefix (str, optional): The prefix that will be added in the metric + names to disambiguate homonymous metrics of different evaluators. + If prefix is not provided in the argument, ``self.default_prefix`` + will be used instead. Default: ``None``. + """ + + DEFAULT_KEYPOINT_INDICES = { + # horse10: corresponding to `nose` and `eye` keypoints + 'horse10': [0, 1], + # 300w: corresponding to `right-most` and `left-most` eye keypoints + '300w': [36, 45], + # coco_wholebody_face corresponding to `right-most` and `left-most` + # eye keypoints + 'coco_wholebody_face': [36, 45], + # cofw: corresponding to `right-most` and `left-most` eye keypoints + 'cofw': [8, 9], + # wflw: corresponding to `right-most` and `left-most` eye keypoints + 'wflw': [60, 72], + # lapa: corresponding to `right-most` and `left-most` eye keypoints + 'lapa': [66, 79], + } + + def __init__(self, + norm_mode: str, + norm_item: Optional[str] = None, + keypoint_indices: Optional[Sequence[int]] = None, + collect_device: str = 'cpu', + prefix: Optional[str] = None) -> None: + super().__init__(collect_device=collect_device, prefix=prefix) + allowed_norm_modes = ['use_norm_item', 'keypoint_distance'] + if norm_mode not in allowed_norm_modes: + raise KeyError("`norm_mode` should be 'use_norm_item' or " + f"'keypoint_distance', but got {norm_mode}.") + + self.norm_mode = norm_mode + if self.norm_mode == 'use_norm_item': + if not norm_item: + raise KeyError('`norm_mode` is set to `"use_norm_item"`, ' + 'please specify the `norm_item` in the ' + 'datainfo used as the normalization factor.') + self.norm_item = norm_item + self.keypoint_indices = keypoint_indices + + def process(self, data_batch: Sequence[dict], + data_samples: Sequence[dict]) -> None: + """Process one batch of data samples and predictions. The processed + results should be stored in ``self.results``, which will be used to + compute the metrics when all batches have been processed. + + Args: + data_batch (Sequence[dict]): A batch of data + from the dataloader. + data_samples (Sequence[dict]): A batch of outputs from + the model. + """ + for data_sample in data_samples: + # predicted keypoints coordinates, [1, K, D] + pred_coords = data_sample['pred_instances']['keypoints'] + # ground truth data_info + gt = data_sample['gt_instances'] + # ground truth keypoints coordinates, [1, K, D] + gt_coords = gt['keypoints'] + # ground truth keypoints_visible, [1, K, 1] + mask = gt['keypoints_visible'].astype(bool).reshape(1, -1) + + result = { + 'pred_coords': pred_coords, + 'gt_coords': gt_coords, + 'mask': mask, + } + + if self.norm_item: + if self.norm_item == 'bbox_size': + assert 'bboxes' in gt, 'The ground truth data info do ' \ + 'not have the item ``bboxes`` for expected ' \ + 'normalized_item ``"bbox_size"``.' + # ground truth bboxes, [1, 4] + bbox_size = np.max(gt['bboxes'][0][2:] - + gt['bboxes'][0][:2]) + result['bbox_size'] = np.array([bbox_size]).reshape(-1, 1) + else: + assert self.norm_item in gt, f'The ground truth data ' \ + f'info do not have the expected normalized factor ' \ + f'"{self.norm_item}"' + # ground truth norm_item + result[self.norm_item] = np.array( + gt[self.norm_item]).reshape([-1, 1]) + + self.results.append(result) + + def compute_metrics(self, results: list) -> Dict[str, float]: + """Compute the metrics from processed results. + + Args: + results (list): The processed results of each batch. + + Returns: + Dict[str, float]: The computed metrics. The keys are the names of + the metrics, and the values are corresponding results. + """ + logger: MMLogger = MMLogger.get_current_instance() + + # pred_coords: [N, K, D] + pred_coords = np.concatenate( + [result['pred_coords'] for result in results]) + # gt_coords: [N, K, D] + gt_coords = np.concatenate([result['gt_coords'] for result in results]) + # mask: [N, K] + mask = np.concatenate([result['mask'] for result in results]) + + logger.info(f'Evaluating {self.__class__.__name__}...') + metrics = dict() + + if self.norm_mode == 'use_norm_item': + normalize_factor_ = np.concatenate( + [result[self.norm_item] for result in results]) + # normalize_factor: [N, 2] + normalize_factor = np.tile(normalize_factor_, [1, 2]) + nme = keypoint_nme(pred_coords, gt_coords, mask, normalize_factor) + metrics['NME'] = nme + + else: + if self.keypoint_indices is None: + # use default keypoint_indices in some datasets + dataset_name = self.dataset_meta['dataset_name'] + if dataset_name not in self.DEFAULT_KEYPOINT_INDICES: + raise KeyError( + '`norm_mode` is set to `keypoint_distance`, and the ' + 'keypoint_indices is set to None, can not find the ' + 'keypoint_indices in `DEFAULT_KEYPOINT_INDICES`, ' + 'please specify `keypoint_indices` appropriately.') + self.keypoint_indices = self.DEFAULT_KEYPOINT_INDICES[ + dataset_name] + else: + assert len(self.keypoint_indices) == 2, 'The keypoint '\ + 'indices used for normalization should be a pair.' + keypoint_id2name = self.dataset_meta['keypoint_id2name'] + dataset_name = self.dataset_meta['dataset_name'] + for idx in self.keypoint_indices: + assert idx in keypoint_id2name, f'The {dataset_name} '\ + f'dataset does not contain the required '\ + f'{idx}-th keypoint.' + # normalize_factor: [N, 2] + normalize_factor = self._get_normalize_factor(gt_coords=gt_coords) + nme = keypoint_nme(pred_coords, gt_coords, mask, normalize_factor) + metrics['NME'] = nme + + return metrics + + def _get_normalize_factor(self, gt_coords: np.ndarray) -> np.ndarray: + """Get the normalize factor. generally inter-ocular distance measured + as the Euclidean distance between the outer corners of the eyes is + used. + + Args: + gt_coords (np.ndarray[N, K, 2]): Groundtruth keypoint coordinates. + + Returns: + np.ndarray[N, 2]: normalized factor + """ + idx1, idx2 = self.keypoint_indices + + interocular = np.linalg.norm( + gt_coords[:, idx1, :] - gt_coords[:, idx2, :], + axis=1, + keepdims=True) + + return np.tile(interocular, [1, 2]) diff --git a/mmpose/evaluation/metrics/keypoint_3d_metrics.py b/mmpose/evaluation/metrics/keypoint_3d_metrics.py index e945650c30..0697020320 100644 --- a/mmpose/evaluation/metrics/keypoint_3d_metrics.py +++ b/mmpose/evaluation/metrics/keypoint_3d_metrics.py @@ -1,131 +1,131 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from collections import defaultdict -from os import path as osp -from typing import Dict, Optional, Sequence - -import numpy as np -from mmengine.evaluator import BaseMetric -from mmengine.logging import MMLogger - -from mmpose.registry import METRICS -from ..functional import keypoint_mpjpe - - -@METRICS.register_module() -class MPJPE(BaseMetric): - """MPJPE evaluation metric. - - Calculate the mean per-joint position error (MPJPE) of keypoints. - - Note: - - length of dataset: N - - num_keypoints: K - - number of keypoint dimensions: D (typically D = 2) - - Args: - mode (str): Method to align the prediction with the - ground truth. Supported options are: - - - ``'mpjpe'``: no alignment will be applied - - ``'p-mpjpe'``: align in the least-square sense in scale - - ``'n-mpjpe'``: align in the least-square sense in - scale, rotation, and translation. - - collect_device (str): Device name used for collecting results from - different ranks during distributed training. Must be ``'cpu'`` or - ``'gpu'``. Default: ``'cpu'``. - prefix (str, optional): The prefix that will be added in the metric - names to disambiguate homonymous metrics of different evaluators. - If prefix is not provided in the argument, ``self.default_prefix`` - will be used instead. Default: ``None``. - """ - - ALIGNMENT = {'mpjpe': 'none', 'p-mpjpe': 'procrustes', 'n-mpjpe': 'scale'} - - def __init__(self, - mode: str = 'mpjpe', - collect_device: str = 'cpu', - prefix: Optional[str] = None) -> None: - super().__init__(collect_device=collect_device, prefix=prefix) - allowed_modes = self.ALIGNMENT.keys() - if mode not in allowed_modes: - raise KeyError("`mode` should be 'mpjpe', 'p-mpjpe', or " - f"'n-mpjpe', but got '{mode}'.") - - self.mode = mode - - def process(self, data_batch: Sequence[dict], - data_samples: Sequence[dict]) -> None: - """Process one batch of data samples and predictions. The processed - results should be stored in ``self.results``, which will be used to - compute the metrics when all batches have been processed. - - Args: - data_batch (Sequence[dict]): A batch of data - from the dataloader. - data_samples (Sequence[dict]): A batch of outputs from - the model. - """ - for data_sample in data_samples: - # predicted keypoints coordinates, [1, K, D] - pred_coords = data_sample['pred_instances']['keypoints'] - # ground truth data_info - gt = data_sample['gt_instances'] - # ground truth keypoints coordinates, [1, K, D] - gt_coords = gt['lifting_target'] - # ground truth keypoints_visible, [1, K, 1] - mask = gt['lifting_target_visible'].astype(bool).reshape(1, -1) - # instance action - img_path = data_sample['target_img_path'] - _, rest = osp.basename(img_path).split('_', 1) - action, _ = rest.split('.', 1) - - result = { - 'pred_coords': pred_coords, - 'gt_coords': gt_coords, - 'mask': mask, - 'action': action - } - - self.results.append(result) - - def compute_metrics(self, results: list) -> Dict[str, float]: - """Compute the metrics from processed results. - - Args: - results (list): The processed results of each batch. - - Returns: - Dict[str, float]: The computed metrics. The keys are the names of - the metrics, and the values are the corresponding results. - """ - logger: MMLogger = MMLogger.get_current_instance() - - # pred_coords: [N, K, D] - pred_coords = np.concatenate( - [result['pred_coords'] for result in results]) - if pred_coords.ndim == 4 and pred_coords.shape[1] == 1: - pred_coords = np.squeeze(pred_coords, axis=1) - # gt_coords: [N, K, D] - gt_coords = np.stack([result['gt_coords'] for result in results]) - # mask: [N, K] - mask = np.concatenate([result['mask'] for result in results]) - # action_category_indices: Dict[List[int]] - action_category_indices = defaultdict(list) - for idx, result in enumerate(results): - action_category = result['action'].split('_')[0] - action_category_indices[action_category].append(idx) - - error_name = self.mode.upper() - - logger.info(f'Evaluating {self.mode.upper()}...') - metrics = dict() - - metrics[error_name] = keypoint_mpjpe(pred_coords, gt_coords, mask, - self.ALIGNMENT[self.mode]) - - for action_category, indices in action_category_indices.items(): - metrics[f'{error_name}_{action_category}'] = keypoint_mpjpe( - pred_coords[indices], gt_coords[indices], mask[indices]) - - return metrics +# Copyright (c) OpenMMLab. All rights reserved. +from collections import defaultdict +from os import path as osp +from typing import Dict, Optional, Sequence + +import numpy as np +from mmengine.evaluator import BaseMetric +from mmengine.logging import MMLogger + +from mmpose.registry import METRICS +from ..functional import keypoint_mpjpe + + +@METRICS.register_module() +class MPJPE(BaseMetric): + """MPJPE evaluation metric. + + Calculate the mean per-joint position error (MPJPE) of keypoints. + + Note: + - length of dataset: N + - num_keypoints: K + - number of keypoint dimensions: D (typically D = 2) + + Args: + mode (str): Method to align the prediction with the + ground truth. Supported options are: + + - ``'mpjpe'``: no alignment will be applied + - ``'p-mpjpe'``: align in the least-square sense in scale + - ``'n-mpjpe'``: align in the least-square sense in + scale, rotation, and translation. + + collect_device (str): Device name used for collecting results from + different ranks during distributed training. Must be ``'cpu'`` or + ``'gpu'``. Default: ``'cpu'``. + prefix (str, optional): The prefix that will be added in the metric + names to disambiguate homonymous metrics of different evaluators. + If prefix is not provided in the argument, ``self.default_prefix`` + will be used instead. Default: ``None``. + """ + + ALIGNMENT = {'mpjpe': 'none', 'p-mpjpe': 'procrustes', 'n-mpjpe': 'scale'} + + def __init__(self, + mode: str = 'mpjpe', + collect_device: str = 'cpu', + prefix: Optional[str] = None) -> None: + super().__init__(collect_device=collect_device, prefix=prefix) + allowed_modes = self.ALIGNMENT.keys() + if mode not in allowed_modes: + raise KeyError("`mode` should be 'mpjpe', 'p-mpjpe', or " + f"'n-mpjpe', but got '{mode}'.") + + self.mode = mode + + def process(self, data_batch: Sequence[dict], + data_samples: Sequence[dict]) -> None: + """Process one batch of data samples and predictions. The processed + results should be stored in ``self.results``, which will be used to + compute the metrics when all batches have been processed. + + Args: + data_batch (Sequence[dict]): A batch of data + from the dataloader. + data_samples (Sequence[dict]): A batch of outputs from + the model. + """ + for data_sample in data_samples: + # predicted keypoints coordinates, [1, K, D] + pred_coords = data_sample['pred_instances']['keypoints'] + # ground truth data_info + gt = data_sample['gt_instances'] + # ground truth keypoints coordinates, [1, K, D] + gt_coords = gt['lifting_target'] + # ground truth keypoints_visible, [1, K, 1] + mask = gt['lifting_target_visible'].astype(bool).reshape(1, -1) + # instance action + img_path = data_sample['target_img_path'] + _, rest = osp.basename(img_path).split('_', 1) + action, _ = rest.split('.', 1) + + result = { + 'pred_coords': pred_coords, + 'gt_coords': gt_coords, + 'mask': mask, + 'action': action + } + + self.results.append(result) + + def compute_metrics(self, results: list) -> Dict[str, float]: + """Compute the metrics from processed results. + + Args: + results (list): The processed results of each batch. + + Returns: + Dict[str, float]: The computed metrics. The keys are the names of + the metrics, and the values are the corresponding results. + """ + logger: MMLogger = MMLogger.get_current_instance() + + # pred_coords: [N, K, D] + pred_coords = np.concatenate( + [result['pred_coords'] for result in results]) + if pred_coords.ndim == 4 and pred_coords.shape[1] == 1: + pred_coords = np.squeeze(pred_coords, axis=1) + # gt_coords: [N, K, D] + gt_coords = np.stack([result['gt_coords'] for result in results]) + # mask: [N, K] + mask = np.concatenate([result['mask'] for result in results]) + # action_category_indices: Dict[List[int]] + action_category_indices = defaultdict(list) + for idx, result in enumerate(results): + action_category = result['action'].split('_')[0] + action_category_indices[action_category].append(idx) + + error_name = self.mode.upper() + + logger.info(f'Evaluating {self.mode.upper()}...') + metrics = dict() + + metrics[error_name] = keypoint_mpjpe(pred_coords, gt_coords, mask, + self.ALIGNMENT[self.mode]) + + for action_category, indices in action_category_indices.items(): + metrics[f'{error_name}_{action_category}'] = keypoint_mpjpe( + pred_coords[indices], gt_coords[indices], mask[indices]) + + return metrics diff --git a/mmpose/evaluation/metrics/keypoint_partition_metric.py b/mmpose/evaluation/metrics/keypoint_partition_metric.py index fb30eca0d5..eb0c581ed8 100644 --- a/mmpose/evaluation/metrics/keypoint_partition_metric.py +++ b/mmpose/evaluation/metrics/keypoint_partition_metric.py @@ -1,203 +1,203 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import warnings -from collections import OrderedDict -from copy import deepcopy -from typing import Sequence - -import numpy as np -from mmengine.evaluator import BaseMetric - -from mmpose.registry import METRICS - - -@METRICS.register_module() -class KeypointPartitionMetric(BaseMetric): - """Wrapper metric for evaluating pose metric on user-defined body parts. - - Sometimes one may be interested in the performance of a pose model on - certain body parts rather than on all the keypoints. For example, - ``CocoWholeBodyMetric`` evaluates coco metric on body, foot, face, - lefthand and righthand. However, ``CocoWholeBodyMetric`` cannot be - applied to arbitrary custom datasets. This wrapper metric solves this - problem. - - Supported metrics: - ``CocoMetric`` Note 1: all keypoint ground truth should be stored in - `keypoints` not other data fields. Note 2: `ann_file` is not - supported, it will be ignored. Note 3: `score_mode` other than - 'bbox' may produce results different from the - ``CocoWholebodyMetric``. Note 4: `nms_mode` other than 'none' may - produce results different from the ``CocoWholebodyMetric``. - ``PCKAccuracy`` Note 1: data fields required by ``PCKAccuracy`` should - be provided, such as bbox, head_size, etc. Note 2: In terms of - 'torso', since it is specifically designed for ``JhmdbDataset``, it is - not recommended to use it for other datasets. - ``AUC`` supported without limitations. - ``EPE`` supported without limitations. - ``NME`` only `norm_mode` = 'use_norm_item' is supported, - 'keypoint_distance' is incompatible with ``KeypointPartitionMetric``. - - Incompatible metrics: - The following metrics are dataset specific metrics: - ``CocoWholeBodyMetric`` - ``MpiiPCKAccuracy`` - ``JhmdbPCKAccuracy`` - ``PoseTrack18Metric`` - Keypoint partitioning is included in these metrics. - - Args: - metric (dict): arguments to instantiate a metric, please refer to the - arguments required by the metric of your choice. - partitions (dict): definition of body partitions. For example, if we - have 10 keypoints in total, the first 7 keypoints belong to body - and the last 3 keypoints belong to foot, this field can be like - this: - dict( - body=[0, 1, 2, 3, 4, 5, 6], - foot=[7, 8, 9], - all=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9] - ) - where the numbers are the indices of keypoints and they can be - discontinuous. - """ - - def __init__( - self, - metric: dict, - partitions: dict, - ) -> None: - super().__init__() - # check metric type - supported_metric_types = [ - 'CocoMetric', 'PCKAccuracy', 'AUC', 'EPE', 'NME' - ] - if metric['type'] not in supported_metric_types: - raise ValueError( - 'Metrics supported by KeypointPartitionMetric are CocoMetric, ' - 'PCKAccuracy, AUC, EPE and NME, ' - f"but got {metric['type']}") - - # check CocoMetric arguments - if metric['type'] == 'CocoMetric': - if 'ann_file' in metric: - warnings.warn( - 'KeypointPartitionMetric does not support the ann_file ' - 'argument of CocoMetric, this argument will be ignored.') - metric['ann_file'] = None - score_mode = metric.get('score_mode', 'bbox_keypoint') - if score_mode != 'bbox': - warnings.warn( - 'When using KeypointPartitionMetric with CocoMetric, ' - "if score_mode is not 'bbox', pose scores will be " - "calculated part by part rather than by 'wholebody'. " - 'Therefore, this may produce results different from the ' - 'CocoWholebodyMetric.') - nms_mode = metric.get('nms_mode', 'oks_nms') - if nms_mode != 'none': - warnings.warn( - 'When using KeypointPartitionMetric with CocoMetric, ' - 'oks_nms and soft_oks_nms will be calculated part by part ' - "rather than by 'wholebody'. Therefore, this may produce " - 'results different from the CocoWholebodyMetric.') - - # check PCKAccuracy arguments - if metric['type'] == 'PCKAccuracy': - norm_item = metric.get('norm_item', 'bbox') - if norm_item == 'torso' or 'torso' in norm_item: - warnings.warn( - 'norm_item torso is used in JhmdbDataset, it may not be ' - 'compatible with other datasets, use at your own risk.') - - # check NME arguments - if metric['type'] == 'NME': - assert 'norm_mode' in metric, \ - 'Missing norm_mode required by the NME metric.' - if metric['norm_mode'] != 'use_norm_item': - raise ValueError( - "NME norm_mode 'keypoint_distance' is incompatible with " - 'KeypointPartitionMetric.') - - # check partitions - assert len(partitions) > 0, 'There should be at least one partition.' - for partition_name, partition in partitions.items(): - assert isinstance(partition, Sequence), \ - 'Each partition should be a sequence.' - assert len(partition) > 0, \ - 'Each partition should have at least one element.' - self.partitions = partitions - - # instantiate metrics for each partition - self.metrics = {} - for partition_name in partitions.keys(): - _metric = deepcopy(metric) - if 'outfile_prefix' in _metric: - _metric['outfile_prefix'] = _metric[ - 'outfile_prefix'] + '.' + partition_name - self.metrics[partition_name] = METRICS.build(_metric) - - @BaseMetric.dataset_meta.setter - def dataset_meta(self, dataset_meta: dict) -> None: - """Set the dataset meta info to the metric.""" - self._dataset_meta = dataset_meta - # sigmas required by coco metric have to be split as well - for partition_name, keypoint_ids in self.partitions.items(): - _dataset_meta = deepcopy(dataset_meta) - _dataset_meta['num_keypoints'] = len(keypoint_ids) - _dataset_meta['sigmas'] = _dataset_meta['sigmas'][keypoint_ids] - self.metrics[partition_name].dataset_meta = _dataset_meta - - def process(self, data_batch: Sequence[dict], - data_samples: Sequence[dict]) -> None: - """Split data samples by partitions, then call metric.process part by - part.""" - parted_data_samples = { - partition_name: [] - for partition_name in self.partitions.keys() - } - for data_sample in data_samples: - for partition_name, keypoint_ids in self.partitions.items(): - _data_sample = deepcopy(data_sample) - if 'keypoint_scores' in _data_sample['pred_instances']: - _data_sample['pred_instances'][ - 'keypoint_scores'] = _data_sample['pred_instances'][ - 'keypoint_scores'][:, keypoint_ids] - _data_sample['pred_instances']['keypoints'] = _data_sample[ - 'pred_instances']['keypoints'][:, keypoint_ids] - _data_sample['gt_instances']['keypoints'] = _data_sample[ - 'gt_instances']['keypoints'][:, keypoint_ids] - _data_sample['gt_instances'][ - 'keypoints_visible'] = _data_sample['gt_instances'][ - 'keypoints_visible'][:, keypoint_ids] - - # for coco metric - if 'raw_ann_info' in _data_sample: - raw_ann_info = _data_sample['raw_ann_info'] - anns = raw_ann_info if isinstance( - raw_ann_info, list) else [raw_ann_info] - for ann in anns: - if 'keypoints' in ann: - keypoints = np.array(ann['keypoints']).reshape( - -1, 3) - keypoints = keypoints[keypoint_ids] - num_keypoints = np.sum(keypoints[:, 2] > 0) - ann['keypoints'] = keypoints.flatten().tolist() - ann['num_keypoints'] = num_keypoints - - parted_data_samples[partition_name].append(_data_sample) - - for partition_name, metric in self.metrics.items(): - metric.process(data_batch, parted_data_samples[partition_name]) - - def compute_metrics(self, results: list) -> dict: - pass - - def evaluate(self, size: int) -> dict: - """Run evaluation for each partition.""" - eval_results = OrderedDict() - for partition_name, metric in self.metrics.items(): - _eval_results = metric.evaluate(size) - for key in list(_eval_results.keys()): - new_key = partition_name + '/' + key - _eval_results[new_key] = _eval_results.pop(key) - eval_results.update(_eval_results) - return eval_results +# Copyright (c) OpenMMLab. All rights reserved. +import warnings +from collections import OrderedDict +from copy import deepcopy +from typing import Sequence + +import numpy as np +from mmengine.evaluator import BaseMetric + +from mmpose.registry import METRICS + + +@METRICS.register_module() +class KeypointPartitionMetric(BaseMetric): + """Wrapper metric for evaluating pose metric on user-defined body parts. + + Sometimes one may be interested in the performance of a pose model on + certain body parts rather than on all the keypoints. For example, + ``CocoWholeBodyMetric`` evaluates coco metric on body, foot, face, + lefthand and righthand. However, ``CocoWholeBodyMetric`` cannot be + applied to arbitrary custom datasets. This wrapper metric solves this + problem. + + Supported metrics: + ``CocoMetric`` Note 1: all keypoint ground truth should be stored in + `keypoints` not other data fields. Note 2: `ann_file` is not + supported, it will be ignored. Note 3: `score_mode` other than + 'bbox' may produce results different from the + ``CocoWholebodyMetric``. Note 4: `nms_mode` other than 'none' may + produce results different from the ``CocoWholebodyMetric``. + ``PCKAccuracy`` Note 1: data fields required by ``PCKAccuracy`` should + be provided, such as bbox, head_size, etc. Note 2: In terms of + 'torso', since it is specifically designed for ``JhmdbDataset``, it is + not recommended to use it for other datasets. + ``AUC`` supported without limitations. + ``EPE`` supported without limitations. + ``NME`` only `norm_mode` = 'use_norm_item' is supported, + 'keypoint_distance' is incompatible with ``KeypointPartitionMetric``. + + Incompatible metrics: + The following metrics are dataset specific metrics: + ``CocoWholeBodyMetric`` + ``MpiiPCKAccuracy`` + ``JhmdbPCKAccuracy`` + ``PoseTrack18Metric`` + Keypoint partitioning is included in these metrics. + + Args: + metric (dict): arguments to instantiate a metric, please refer to the + arguments required by the metric of your choice. + partitions (dict): definition of body partitions. For example, if we + have 10 keypoints in total, the first 7 keypoints belong to body + and the last 3 keypoints belong to foot, this field can be like + this: + dict( + body=[0, 1, 2, 3, 4, 5, 6], + foot=[7, 8, 9], + all=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + ) + where the numbers are the indices of keypoints and they can be + discontinuous. + """ + + def __init__( + self, + metric: dict, + partitions: dict, + ) -> None: + super().__init__() + # check metric type + supported_metric_types = [ + 'CocoMetric', 'PCKAccuracy', 'AUC', 'EPE', 'NME' + ] + if metric['type'] not in supported_metric_types: + raise ValueError( + 'Metrics supported by KeypointPartitionMetric are CocoMetric, ' + 'PCKAccuracy, AUC, EPE and NME, ' + f"but got {metric['type']}") + + # check CocoMetric arguments + if metric['type'] == 'CocoMetric': + if 'ann_file' in metric: + warnings.warn( + 'KeypointPartitionMetric does not support the ann_file ' + 'argument of CocoMetric, this argument will be ignored.') + metric['ann_file'] = None + score_mode = metric.get('score_mode', 'bbox_keypoint') + if score_mode != 'bbox': + warnings.warn( + 'When using KeypointPartitionMetric with CocoMetric, ' + "if score_mode is not 'bbox', pose scores will be " + "calculated part by part rather than by 'wholebody'. " + 'Therefore, this may produce results different from the ' + 'CocoWholebodyMetric.') + nms_mode = metric.get('nms_mode', 'oks_nms') + if nms_mode != 'none': + warnings.warn( + 'When using KeypointPartitionMetric with CocoMetric, ' + 'oks_nms and soft_oks_nms will be calculated part by part ' + "rather than by 'wholebody'. Therefore, this may produce " + 'results different from the CocoWholebodyMetric.') + + # check PCKAccuracy arguments + if metric['type'] == 'PCKAccuracy': + norm_item = metric.get('norm_item', 'bbox') + if norm_item == 'torso' or 'torso' in norm_item: + warnings.warn( + 'norm_item torso is used in JhmdbDataset, it may not be ' + 'compatible with other datasets, use at your own risk.') + + # check NME arguments + if metric['type'] == 'NME': + assert 'norm_mode' in metric, \ + 'Missing norm_mode required by the NME metric.' + if metric['norm_mode'] != 'use_norm_item': + raise ValueError( + "NME norm_mode 'keypoint_distance' is incompatible with " + 'KeypointPartitionMetric.') + + # check partitions + assert len(partitions) > 0, 'There should be at least one partition.' + for partition_name, partition in partitions.items(): + assert isinstance(partition, Sequence), \ + 'Each partition should be a sequence.' + assert len(partition) > 0, \ + 'Each partition should have at least one element.' + self.partitions = partitions + + # instantiate metrics for each partition + self.metrics = {} + for partition_name in partitions.keys(): + _metric = deepcopy(metric) + if 'outfile_prefix' in _metric: + _metric['outfile_prefix'] = _metric[ + 'outfile_prefix'] + '.' + partition_name + self.metrics[partition_name] = METRICS.build(_metric) + + @BaseMetric.dataset_meta.setter + def dataset_meta(self, dataset_meta: dict) -> None: + """Set the dataset meta info to the metric.""" + self._dataset_meta = dataset_meta + # sigmas required by coco metric have to be split as well + for partition_name, keypoint_ids in self.partitions.items(): + _dataset_meta = deepcopy(dataset_meta) + _dataset_meta['num_keypoints'] = len(keypoint_ids) + _dataset_meta['sigmas'] = _dataset_meta['sigmas'][keypoint_ids] + self.metrics[partition_name].dataset_meta = _dataset_meta + + def process(self, data_batch: Sequence[dict], + data_samples: Sequence[dict]) -> None: + """Split data samples by partitions, then call metric.process part by + part.""" + parted_data_samples = { + partition_name: [] + for partition_name in self.partitions.keys() + } + for data_sample in data_samples: + for partition_name, keypoint_ids in self.partitions.items(): + _data_sample = deepcopy(data_sample) + if 'keypoint_scores' in _data_sample['pred_instances']: + _data_sample['pred_instances'][ + 'keypoint_scores'] = _data_sample['pred_instances'][ + 'keypoint_scores'][:, keypoint_ids] + _data_sample['pred_instances']['keypoints'] = _data_sample[ + 'pred_instances']['keypoints'][:, keypoint_ids] + _data_sample['gt_instances']['keypoints'] = _data_sample[ + 'gt_instances']['keypoints'][:, keypoint_ids] + _data_sample['gt_instances'][ + 'keypoints_visible'] = _data_sample['gt_instances'][ + 'keypoints_visible'][:, keypoint_ids] + + # for coco metric + if 'raw_ann_info' in _data_sample: + raw_ann_info = _data_sample['raw_ann_info'] + anns = raw_ann_info if isinstance( + raw_ann_info, list) else [raw_ann_info] + for ann in anns: + if 'keypoints' in ann: + keypoints = np.array(ann['keypoints']).reshape( + -1, 3) + keypoints = keypoints[keypoint_ids] + num_keypoints = np.sum(keypoints[:, 2] > 0) + ann['keypoints'] = keypoints.flatten().tolist() + ann['num_keypoints'] = num_keypoints + + parted_data_samples[partition_name].append(_data_sample) + + for partition_name, metric in self.metrics.items(): + metric.process(data_batch, parted_data_samples[partition_name]) + + def compute_metrics(self, results: list) -> dict: + pass + + def evaluate(self, size: int) -> dict: + """Run evaluation for each partition.""" + eval_results = OrderedDict() + for partition_name, metric in self.metrics.items(): + _eval_results = metric.evaluate(size) + for key in list(_eval_results.keys()): + new_key = partition_name + '/' + key + _eval_results[new_key] = _eval_results.pop(key) + eval_results.update(_eval_results) + return eval_results diff --git a/mmpose/evaluation/metrics/posetrack18_metric.py b/mmpose/evaluation/metrics/posetrack18_metric.py index 86f801455a..0abf5c4c12 100644 --- a/mmpose/evaluation/metrics/posetrack18_metric.py +++ b/mmpose/evaluation/metrics/posetrack18_metric.py @@ -1,220 +1,220 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os -import os.path as osp -from typing import Dict, List, Optional - -import numpy as np -from mmengine.fileio import dump, load -from mmengine.logging import MMLogger - -from mmpose.registry import METRICS -from .coco_metric import CocoMetric - -try: - from poseval import eval_helpers - from poseval.evaluateAP import evaluateAP - has_poseval = True -except (ImportError, ModuleNotFoundError): - has_poseval = False - - -@METRICS.register_module() -class PoseTrack18Metric(CocoMetric): - """PoseTrack18 evaluation metric. - - Evaluate AP, and mAP for keypoint detection tasks. - Support PoseTrack18 (video) dataset. Please refer to - ``__ - for more details. - - Args: - ann_file (str, optional): Path to the coco format annotation file. - If not specified, ground truth annotations from the dataset will - be converted to coco format. Defaults to None - score_mode (str): The mode to score the prediction results which - should be one of the following options: - - - ``'bbox'``: Take the score of bbox as the score of the - prediction results. - - ``'bbox_keypoint'``: Use keypoint score to rescore the - prediction results. - - Defaults to ``'bbox_keypoint'` - keypoint_score_thr (float): The threshold of keypoint score. The - keypoints with score lower than it will not be included to - rescore the prediction results. Valid only when ``score_mode`` is - ``bbox_keypoint``. Defaults to ``0.2`` - nms_mode (str): The mode to perform Non-Maximum Suppression (NMS), - which should be one of the following options: - - - ``'oks_nms'``: Use Object Keypoint Similarity (OKS) to - perform NMS. - - ``'soft_oks_nms'``: Use Object Keypoint Similarity (OKS) - to perform soft NMS. - - ``'none'``: Do not perform NMS. Typically for bottomup mode - output. - - Defaults to ``'oks_nms'` - nms_thr (float): The Object Keypoint Similarity (OKS) threshold - used in NMS when ``nms_mode`` is ``'oks_nms'`` or - ``'soft_oks_nms'``. Will retain the prediction results with OKS - lower than ``nms_thr``. Defaults to ``0.9`` - format_only (bool): Whether only format the output results without - doing quantitative evaluation. This is designed for the need of - test submission when the ground truth annotations are absent. If - set to ``True``, ``outfile_prefix`` should specify the path to - store the output results. Defaults to ``False`` - outfile_prefix (str | None): The prefix of json files. It includes - the file path and the prefix of filename, e.g., ``'a/b/prefix'``. - If not specified, a temp file will be created. Defaults to ``None`` - **kwargs: Keyword parameters passed to :class:`mmeval.BaseMetric` - """ - default_prefix: Optional[str] = 'posetrack18' - - def __init__(self, - ann_file: Optional[str] = None, - score_mode: str = 'bbox_keypoint', - keypoint_score_thr: float = 0.2, - nms_mode: str = 'oks_nms', - nms_thr: float = 0.9, - format_only: bool = False, - outfile_prefix: Optional[str] = None, - collect_device: str = 'cpu', - prefix: Optional[str] = None) -> None: - # raise an error to avoid long time running without getting results - if not has_poseval: - raise ImportError('Please install ``poseval`` package for ' - 'evaluation on PoseTrack dataset ' - '(see `requirements/optional.txt`)') - super().__init__( - ann_file=ann_file, - score_mode=score_mode, - keypoint_score_thr=keypoint_score_thr, - nms_mode=nms_mode, - nms_thr=nms_thr, - format_only=format_only, - outfile_prefix=outfile_prefix, - collect_device=collect_device, - prefix=prefix) - - def results2json(self, keypoints: Dict[int, list], - outfile_prefix: str) -> str: - """Dump the keypoint detection results into a json file. - - Args: - keypoints (Dict[int, list]): Keypoint detection results - of the dataset. - outfile_prefix (str): The filename prefix of the json files. - If the prefix is "somepath/xxx", the json files will be named - "somepath/xxx.keypoints.json". - - Returns: - str: The json file name of keypoint results. - """ - categories = [] - - cat = {} - cat['supercategory'] = 'person' - cat['id'] = 1 - cat['name'] = 'person' - cat['keypoints'] = [ - 'nose', 'head_bottom', 'head_top', 'left_ear', 'right_ear', - 'left_shoulder', 'right_shoulder', 'left_elbow', 'right_elbow', - 'left_wrist', 'right_wrist', 'left_hip', 'right_hip', 'left_knee', - 'right_knee', 'left_ankle', 'right_ankle' - ] - cat['skeleton'] = [[16, 14], [14, 12], [17, 15], [15, 13], [12, 13], - [6, 12], [7, 13], [6, 7], [6, 8], [7, 9], [8, 10], - [9, 11], [2, 3], [1, 2], [1, 3], [2, 4], [3, 5], - [4, 6], [5, 7]] - categories.append(cat) - - # path of directory for official gt files - gt_folder = osp.join( - osp.dirname(self.ann_file), - osp.splitext(self.ann_file.split('_')[-1])[0]) - # the json file for each video sequence - json_files = [ - pos for pos in os.listdir(gt_folder) if pos.endswith('.json') - ] - - for json_file in json_files: - gt = load(osp.join(gt_folder, json_file)) - annotations = [] - images = [] - - for image in gt['images']: - img = {} - img['id'] = image['id'] - img['file_name'] = image['file_name'] - images.append(img) - - img_kpts = keypoints[img['id']] - - for track_id, img_kpt in enumerate(img_kpts): - ann = {} - ann['image_id'] = img_kpt['img_id'] - ann['keypoints'] = np.array( - img_kpt['keypoints']).reshape(-1).tolist() - ann['scores'] = np.array(ann['keypoints']).reshape( - [-1, 3])[:, 2].tolist() - ann['score'] = float(img_kpt['score']) - ann['track_id'] = track_id - annotations.append(ann) - - pred_file = osp.join(osp.dirname(outfile_prefix), json_file) - info = {} - info['images'] = images - info['categories'] = categories - info['annotations'] = annotations - - dump(info, pred_file, sort_keys=True, indent=4) - - def _do_python_keypoint_eval(self, outfile_prefix: str) -> List[tuple]: - """Do keypoint evaluation using `poseval` package. - - Args: - outfile_prefix (str): The filename prefix of the json files. - If the prefix is "somepath/xxx", the json files will be named - "somepath/xxx.keypoints.json". - - Returns: - list: a list of tuples. Each tuple contains the evaluation stats - name and corresponding stats value. - """ - logger: MMLogger = MMLogger.get_current_instance() - - # path of directory for official gt files - # 'xxx/posetrack18_train.json' -> 'xxx/train/' - gt_folder = osp.join( - osp.dirname(self.ann_file), - osp.splitext(self.ann_file.split('_')[-1])[0]) - pred_folder = osp.dirname(outfile_prefix) - - argv = ['', gt_folder + '/', pred_folder + '/'] - - logger.info('Loading data') - gtFramesAll, prFramesAll = eval_helpers.load_data_dir(argv) - - logger.info(f'# gt frames : {len(gtFramesAll)}') - logger.info(f'# pred frames: {len(prFramesAll)}') - - # evaluate per-frame multi-person pose estimation (AP) - # compute AP - logger.info('Evaluation of per-frame multi-person pose estimation') - apAll, _, _ = evaluateAP(gtFramesAll, prFramesAll, None, False, False) - - # print AP - logger.info('Average Precision (AP) metric:') - eval_helpers.printTable(apAll) - - stats = eval_helpers.getCum(apAll) - - stats_names = [ - 'Head AP', 'Shou AP', 'Elb AP', 'Wri AP', 'Hip AP', 'Knee AP', - 'Ankl AP', 'AP' - ] - - info_str = list(zip(stats_names, stats)) - - return info_str +# Copyright (c) OpenMMLab. All rights reserved. +import os +import os.path as osp +from typing import Dict, List, Optional + +import numpy as np +from mmengine.fileio import dump, load +from mmengine.logging import MMLogger + +from mmpose.registry import METRICS +from .coco_metric import CocoMetric + +try: + from poseval import eval_helpers + from poseval.evaluateAP import evaluateAP + has_poseval = True +except (ImportError, ModuleNotFoundError): + has_poseval = False + + +@METRICS.register_module() +class PoseTrack18Metric(CocoMetric): + """PoseTrack18 evaluation metric. + + Evaluate AP, and mAP for keypoint detection tasks. + Support PoseTrack18 (video) dataset. Please refer to + ``__ + for more details. + + Args: + ann_file (str, optional): Path to the coco format annotation file. + If not specified, ground truth annotations from the dataset will + be converted to coco format. Defaults to None + score_mode (str): The mode to score the prediction results which + should be one of the following options: + + - ``'bbox'``: Take the score of bbox as the score of the + prediction results. + - ``'bbox_keypoint'``: Use keypoint score to rescore the + prediction results. + + Defaults to ``'bbox_keypoint'` + keypoint_score_thr (float): The threshold of keypoint score. The + keypoints with score lower than it will not be included to + rescore the prediction results. Valid only when ``score_mode`` is + ``bbox_keypoint``. Defaults to ``0.2`` + nms_mode (str): The mode to perform Non-Maximum Suppression (NMS), + which should be one of the following options: + + - ``'oks_nms'``: Use Object Keypoint Similarity (OKS) to + perform NMS. + - ``'soft_oks_nms'``: Use Object Keypoint Similarity (OKS) + to perform soft NMS. + - ``'none'``: Do not perform NMS. Typically for bottomup mode + output. + + Defaults to ``'oks_nms'` + nms_thr (float): The Object Keypoint Similarity (OKS) threshold + used in NMS when ``nms_mode`` is ``'oks_nms'`` or + ``'soft_oks_nms'``. Will retain the prediction results with OKS + lower than ``nms_thr``. Defaults to ``0.9`` + format_only (bool): Whether only format the output results without + doing quantitative evaluation. This is designed for the need of + test submission when the ground truth annotations are absent. If + set to ``True``, ``outfile_prefix`` should specify the path to + store the output results. Defaults to ``False`` + outfile_prefix (str | None): The prefix of json files. It includes + the file path and the prefix of filename, e.g., ``'a/b/prefix'``. + If not specified, a temp file will be created. Defaults to ``None`` + **kwargs: Keyword parameters passed to :class:`mmeval.BaseMetric` + """ + default_prefix: Optional[str] = 'posetrack18' + + def __init__(self, + ann_file: Optional[str] = None, + score_mode: str = 'bbox_keypoint', + keypoint_score_thr: float = 0.2, + nms_mode: str = 'oks_nms', + nms_thr: float = 0.9, + format_only: bool = False, + outfile_prefix: Optional[str] = None, + collect_device: str = 'cpu', + prefix: Optional[str] = None) -> None: + # raise an error to avoid long time running without getting results + if not has_poseval: + raise ImportError('Please install ``poseval`` package for ' + 'evaluation on PoseTrack dataset ' + '(see `requirements/optional.txt`)') + super().__init__( + ann_file=ann_file, + score_mode=score_mode, + keypoint_score_thr=keypoint_score_thr, + nms_mode=nms_mode, + nms_thr=nms_thr, + format_only=format_only, + outfile_prefix=outfile_prefix, + collect_device=collect_device, + prefix=prefix) + + def results2json(self, keypoints: Dict[int, list], + outfile_prefix: str) -> str: + """Dump the keypoint detection results into a json file. + + Args: + keypoints (Dict[int, list]): Keypoint detection results + of the dataset. + outfile_prefix (str): The filename prefix of the json files. + If the prefix is "somepath/xxx", the json files will be named + "somepath/xxx.keypoints.json". + + Returns: + str: The json file name of keypoint results. + """ + categories = [] + + cat = {} + cat['supercategory'] = 'person' + cat['id'] = 1 + cat['name'] = 'person' + cat['keypoints'] = [ + 'nose', 'head_bottom', 'head_top', 'left_ear', 'right_ear', + 'left_shoulder', 'right_shoulder', 'left_elbow', 'right_elbow', + 'left_wrist', 'right_wrist', 'left_hip', 'right_hip', 'left_knee', + 'right_knee', 'left_ankle', 'right_ankle' + ] + cat['skeleton'] = [[16, 14], [14, 12], [17, 15], [15, 13], [12, 13], + [6, 12], [7, 13], [6, 7], [6, 8], [7, 9], [8, 10], + [9, 11], [2, 3], [1, 2], [1, 3], [2, 4], [3, 5], + [4, 6], [5, 7]] + categories.append(cat) + + # path of directory for official gt files + gt_folder = osp.join( + osp.dirname(self.ann_file), + osp.splitext(self.ann_file.split('_')[-1])[0]) + # the json file for each video sequence + json_files = [ + pos for pos in os.listdir(gt_folder) if pos.endswith('.json') + ] + + for json_file in json_files: + gt = load(osp.join(gt_folder, json_file)) + annotations = [] + images = [] + + for image in gt['images']: + img = {} + img['id'] = image['id'] + img['file_name'] = image['file_name'] + images.append(img) + + img_kpts = keypoints[img['id']] + + for track_id, img_kpt in enumerate(img_kpts): + ann = {} + ann['image_id'] = img_kpt['img_id'] + ann['keypoints'] = np.array( + img_kpt['keypoints']).reshape(-1).tolist() + ann['scores'] = np.array(ann['keypoints']).reshape( + [-1, 3])[:, 2].tolist() + ann['score'] = float(img_kpt['score']) + ann['track_id'] = track_id + annotations.append(ann) + + pred_file = osp.join(osp.dirname(outfile_prefix), json_file) + info = {} + info['images'] = images + info['categories'] = categories + info['annotations'] = annotations + + dump(info, pred_file, sort_keys=True, indent=4) + + def _do_python_keypoint_eval(self, outfile_prefix: str) -> List[tuple]: + """Do keypoint evaluation using `poseval` package. + + Args: + outfile_prefix (str): The filename prefix of the json files. + If the prefix is "somepath/xxx", the json files will be named + "somepath/xxx.keypoints.json". + + Returns: + list: a list of tuples. Each tuple contains the evaluation stats + name and corresponding stats value. + """ + logger: MMLogger = MMLogger.get_current_instance() + + # path of directory for official gt files + # 'xxx/posetrack18_train.json' -> 'xxx/train/' + gt_folder = osp.join( + osp.dirname(self.ann_file), + osp.splitext(self.ann_file.split('_')[-1])[0]) + pred_folder = osp.dirname(outfile_prefix) + + argv = ['', gt_folder + '/', pred_folder + '/'] + + logger.info('Loading data') + gtFramesAll, prFramesAll = eval_helpers.load_data_dir(argv) + + logger.info(f'# gt frames : {len(gtFramesAll)}') + logger.info(f'# pred frames: {len(prFramesAll)}') + + # evaluate per-frame multi-person pose estimation (AP) + # compute AP + logger.info('Evaluation of per-frame multi-person pose estimation') + apAll, _, _ = evaluateAP(gtFramesAll, prFramesAll, None, False, False) + + # print AP + logger.info('Average Precision (AP) metric:') + eval_helpers.printTable(apAll) + + stats = eval_helpers.getCum(apAll) + + stats_names = [ + 'Head AP', 'Shou AP', 'Elb AP', 'Wri AP', 'Hip AP', 'Knee AP', + 'Ankl AP', 'AP' + ] + + info_str = list(zip(stats_names, stats)) + + return info_str diff --git a/mmpose/models/__init__.py b/mmpose/models/__init__.py index 4e236f9928..1590e10fdb 100644 --- a/mmpose/models/__init__.py +++ b/mmpose/models/__init__.py @@ -1,15 +1,15 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .backbones import * # noqa -from .builder import (BACKBONES, HEADS, LOSSES, NECKS, build_backbone, - build_head, build_loss, build_neck, build_pose_estimator, - build_posenet) -from .data_preprocessors import * # noqa -from .heads import * # noqa -from .losses import * # noqa -from .necks import * # noqa -from .pose_estimators import * # noqa - -__all__ = [ - 'BACKBONES', 'HEADS', 'NECKS', 'LOSSES', 'build_backbone', 'build_head', - 'build_loss', 'build_posenet', 'build_neck', 'build_pose_estimator' -] +# Copyright (c) OpenMMLab. All rights reserved. +from .backbones import * # noqa +from .builder import (BACKBONES, HEADS, LOSSES, NECKS, build_backbone, + build_head, build_loss, build_neck, build_pose_estimator, + build_posenet) +from .data_preprocessors import * # noqa +from .heads import * # noqa +from .losses import * # noqa +from .necks import * # noqa +from .pose_estimators import * # noqa + +__all__ = [ + 'BACKBONES', 'HEADS', 'NECKS', 'LOSSES', 'build_backbone', 'build_head', + 'build_loss', 'build_posenet', 'build_neck', 'build_pose_estimator' +] diff --git a/mmpose/models/backbones/__init__.py b/mmpose/models/backbones/__init__.py index cb2498560a..e7937f6b75 100644 --- a/mmpose/models/backbones/__init__.py +++ b/mmpose/models/backbones/__init__.py @@ -1,37 +1,41 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .alexnet import AlexNet -from .cpm import CPM -from .hourglass import HourglassNet -from .hourglass_ae import HourglassAENet -from .hrformer import HRFormer -from .hrnet import HRNet -from .litehrnet import LiteHRNet -from .mobilenet_v2 import MobileNetV2 -from .mobilenet_v3 import MobileNetV3 -from .mspn import MSPN -from .pvt import PyramidVisionTransformer, PyramidVisionTransformerV2 -from .regnet import RegNet -from .resnest import ResNeSt -from .resnet import ResNet, ResNetV1d -from .resnext import ResNeXt -from .rsn import RSN -from .scnet import SCNet -from .seresnet import SEResNet -from .seresnext import SEResNeXt -from .shufflenet_v1 import ShuffleNetV1 -from .shufflenet_v2 import ShuffleNetV2 -from .swin import SwinTransformer -from .tcn import TCN -from .v2v_net import V2VNet -from .vgg import VGG -from .vipnas_mbv3 import ViPNAS_MobileNetV3 -from .vipnas_resnet import ViPNAS_ResNet - -__all__ = [ - 'AlexNet', 'HourglassNet', 'HourglassAENet', 'HRNet', 'MobileNetV2', - 'MobileNetV3', 'RegNet', 'ResNet', 'ResNetV1d', 'ResNeXt', 'SCNet', - 'SEResNet', 'SEResNeXt', 'ShuffleNetV1', 'ShuffleNetV2', 'CPM', 'RSN', - 'MSPN', 'ResNeSt', 'VGG', 'TCN', 'ViPNAS_ResNet', 'ViPNAS_MobileNetV3', - 'LiteHRNet', 'V2VNet', 'HRFormer', 'PyramidVisionTransformer', - 'PyramidVisionTransformerV2', 'SwinTransformer' -] +# Copyright (c) OpenMMLab. All rights reserved. +from .alexnet import AlexNet +from .cpm import CPM +from .hourglass import HourglassNet +from .hourglass_ae import HourglassAENet +from .hrformer import HRFormer +from .hrnet import HRNet +from .litehrnet import LiteHRNet +from .mobilenet_v2 import MobileNetV2 +from .mobilenet_v3 import MobileNetV3 +from .mspn import MSPN +from .pvt import PyramidVisionTransformer, PyramidVisionTransformerV2 +from .regnet import RegNet +from .resnest import ResNeSt +from .resnet import ResNet, ResNetV1d +from .resnext import ResNeXt +from .rsn import RSN +from .scnet import SCNet +from .seresnet import SEResNet +from .seresnext import SEResNeXt +from .shufflenet_v1 import ShuffleNetV1 +from .shufflenet_v2 import ShuffleNetV2 +from .swin import SwinTransformer +from .tcn import TCN +from .v2v_net import V2VNet +from .vgg import VGG +from .vipnas_mbv3 import ViPNAS_MobileNetV3 +from .vipnas_resnet import ViPNAS_ResNet + +from .octsb1 import OCTSB1 +from .octsb2 import OCTSB2 + +__all__ = [ + 'AlexNet', 'HourglassNet', 'HourglassAENet', 'HRNet', 'MobileNetV2', + 'MobileNetV3', 'RegNet', 'ResNet', 'ResNetV1d', 'ResNeXt', 'SCNet', + 'SEResNet', 'SEResNeXt', 'ShuffleNetV1', 'ShuffleNetV2', 'CPM', 'RSN', + 'MSPN', 'ResNeSt', 'VGG', 'TCN', 'ViPNAS_ResNet', 'ViPNAS_MobileNetV3', + 'LiteHRNet', 'V2VNet', 'HRFormer', 'PyramidVisionTransformer', + 'PyramidVisionTransformerV2', 'SwinTransformer', + 'OCTSB1', 'OCTSB2' +] diff --git a/mmpose/models/backbones/alexnet.py b/mmpose/models/backbones/alexnet.py index 2262658f47..f570ee508a 100644 --- a/mmpose/models/backbones/alexnet.py +++ b/mmpose/models/backbones/alexnet.py @@ -1,58 +1,58 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch.nn as nn - -from mmpose.registry import MODELS -from .base_backbone import BaseBackbone - - -@MODELS.register_module() -class AlexNet(BaseBackbone): - """`AlexNet `__ backbone. - - The input for AlexNet is a 224x224 RGB image. - - Args: - num_classes (int): number of classes for classification. - The default value is -1, which uses the backbone as - a feature extractor without the top classifier. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, num_classes=-1, init_cfg=None): - super().__init__(init_cfg=init_cfg) - self.num_classes = num_classes - self.features = nn.Sequential( - nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2), - nn.ReLU(inplace=True), - nn.MaxPool2d(kernel_size=3, stride=2), - nn.Conv2d(64, 192, kernel_size=5, padding=2), - nn.ReLU(inplace=True), - nn.MaxPool2d(kernel_size=3, stride=2), - nn.Conv2d(192, 384, kernel_size=3, padding=1), - nn.ReLU(inplace=True), - nn.Conv2d(384, 256, kernel_size=3, padding=1), - nn.ReLU(inplace=True), - nn.Conv2d(256, 256, kernel_size=3, padding=1), - nn.ReLU(inplace=True), - nn.MaxPool2d(kernel_size=3, stride=2), - ) - if self.num_classes > 0: - self.classifier = nn.Sequential( - nn.Dropout(), - nn.Linear(256 * 6 * 6, 4096), - nn.ReLU(inplace=True), - nn.Dropout(), - nn.Linear(4096, 4096), - nn.ReLU(inplace=True), - nn.Linear(4096, num_classes), - ) - - def forward(self, x): - - x = self.features(x) - if self.num_classes > 0: - x = x.view(x.size(0), 256 * 6 * 6) - x = self.classifier(x) - - return (x, ) +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn + +from mmpose.registry import MODELS +from .base_backbone import BaseBackbone + + +@MODELS.register_module() +class AlexNet(BaseBackbone): + """`AlexNet `__ backbone. + + The input for AlexNet is a 224x224 RGB image. + + Args: + num_classes (int): number of classes for classification. + The default value is -1, which uses the backbone as + a feature extractor without the top classifier. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, num_classes=-1, init_cfg=None): + super().__init__(init_cfg=init_cfg) + self.num_classes = num_classes + self.features = nn.Sequential( + nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=3, stride=2), + nn.Conv2d(64, 192, kernel_size=5, padding=2), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=3, stride=2), + nn.Conv2d(192, 384, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(384, 256, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(256, 256, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=3, stride=2), + ) + if self.num_classes > 0: + self.classifier = nn.Sequential( + nn.Dropout(), + nn.Linear(256 * 6 * 6, 4096), + nn.ReLU(inplace=True), + nn.Dropout(), + nn.Linear(4096, 4096), + nn.ReLU(inplace=True), + nn.Linear(4096, num_classes), + ) + + def forward(self, x): + + x = self.features(x) + if self.num_classes > 0: + x = x.view(x.size(0), 256 * 6 * 6) + x = self.classifier(x) + + return (x, ) diff --git a/mmpose/models/backbones/base_backbone.py b/mmpose/models/backbones/base_backbone.py index 6094b4e831..2b0d90c1f8 100644 --- a/mmpose/models/backbones/base_backbone.py +++ b/mmpose/models/backbones/base_backbone.py @@ -1,29 +1,29 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from abc import ABCMeta, abstractmethod - -from mmengine.model import BaseModule - - -class BaseBackbone(BaseModule, metaclass=ABCMeta): - """Base backbone. - - This class defines the basic functions of a backbone. Any backbone that - inherits this class should at least define its own `forward` function. - """ - - @abstractmethod - def forward(self, x): - """Forward function. - - Args: - x (Tensor | tuple[Tensor]): x could be a torch.Tensor or a tuple of - torch.Tensor, containing input data for forward computation. - """ - - def train(self, mode=True): - """Set module status before forward computation. - - Args: - mode (bool): Whether it is train_mode or test_mode - """ - super(BaseBackbone, self).train(mode) +# Copyright (c) OpenMMLab. All rights reserved. +from abc import ABCMeta, abstractmethod + +from mmengine.model import BaseModule + + +class BaseBackbone(BaseModule, metaclass=ABCMeta): + """Base backbone. + + This class defines the basic functions of a backbone. Any backbone that + inherits this class should at least define its own `forward` function. + """ + + @abstractmethod + def forward(self, x): + """Forward function. + + Args: + x (Tensor | tuple[Tensor]): x could be a torch.Tensor or a tuple of + torch.Tensor, containing input data for forward computation. + """ + + def train(self, mode=True): + """Set module status before forward computation. + + Args: + mode (bool): Whether it is train_mode or test_mode + """ + super(BaseBackbone, self).train(mode) diff --git a/mmpose/models/backbones/cpm.py b/mmpose/models/backbones/cpm.py index 256769c43a..1ad19a9006 100644 --- a/mmpose/models/backbones/cpm.py +++ b/mmpose/models/backbones/cpm.py @@ -1,183 +1,183 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy - -import torch -import torch.nn as nn -from mmcv.cnn import ConvModule -from mmengine.model import BaseModule - -from mmpose.registry import MODELS -from .base_backbone import BaseBackbone - - -class CpmBlock(BaseModule): - """CpmBlock for Convolutional Pose Machine. - - Args: - in_channels (int): Input channels of this block. - channels (list): Output channels of each conv module. - kernels (list): Kernel sizes of each conv module. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - in_channels, - channels=(128, 128, 128), - kernels=(11, 11, 11), - norm_cfg=None, - init_cfg=None): - super().__init__(init_cfg=init_cfg) - - assert len(channels) == len(kernels) - layers = [] - for i in range(len(channels)): - if i == 0: - input_channels = in_channels - else: - input_channels = channels[i - 1] - layers.append( - ConvModule( - input_channels, - channels[i], - kernels[i], - padding=(kernels[i] - 1) // 2, - norm_cfg=norm_cfg)) - self.model = nn.Sequential(*layers) - - def forward(self, x): - """Model forward function.""" - out = self.model(x) - return out - - -@MODELS.register_module() -class CPM(BaseBackbone): - """CPM backbone. - - Convolutional Pose Machines. - More details can be found in the `paper - `__ . - - Args: - in_channels (int): The input channels of the CPM. - out_channels (int): The output channels of the CPM. - feat_channels (int): Feature channel of each CPM stage. - middle_channels (int): Feature channel of conv after the middle stage. - num_stages (int): Number of stages. - norm_cfg (dict): Dictionary to construct and config norm layer. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: - ``[ - dict(type='Normal', std=0.001, layer=['Conv2d']), - dict( - type='Constant', - val=1, - layer=['_BatchNorm', 'GroupNorm']) - ]`` - - Example: - >>> from mmpose.models import CPM - >>> import torch - >>> self = CPM(3, 17) - >>> self.eval() - >>> inputs = torch.rand(1, 3, 368, 368) - >>> level_outputs = self.forward(inputs) - >>> for level_output in level_outputs: - ... print(tuple(level_output.shape)) - (1, 17, 46, 46) - (1, 17, 46, 46) - (1, 17, 46, 46) - (1, 17, 46, 46) - (1, 17, 46, 46) - (1, 17, 46, 46) - """ - - def __init__( - self, - in_channels, - out_channels, - feat_channels=128, - middle_channels=32, - num_stages=6, - norm_cfg=dict(type='BN', requires_grad=True), - init_cfg=[ - dict(type='Normal', std=0.001, layer=['Conv2d']), - dict(type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm']) - ], - ): - # Protect mutable default arguments - norm_cfg = copy.deepcopy(norm_cfg) - super().__init__(init_cfg=init_cfg) - - assert in_channels == 3 - - self.num_stages = num_stages - assert self.num_stages >= 1 - - self.stem = nn.Sequential( - ConvModule(in_channels, 128, 9, padding=4, norm_cfg=norm_cfg), - nn.MaxPool2d(kernel_size=3, stride=2, padding=1), - ConvModule(128, 128, 9, padding=4, norm_cfg=norm_cfg), - nn.MaxPool2d(kernel_size=3, stride=2, padding=1), - ConvModule(128, 128, 9, padding=4, norm_cfg=norm_cfg), - nn.MaxPool2d(kernel_size=3, stride=2, padding=1), - ConvModule(128, 32, 5, padding=2, norm_cfg=norm_cfg), - ConvModule(32, 512, 9, padding=4, norm_cfg=norm_cfg), - ConvModule(512, 512, 1, padding=0, norm_cfg=norm_cfg), - ConvModule(512, out_channels, 1, padding=0, act_cfg=None)) - - self.middle = nn.Sequential( - ConvModule(in_channels, 128, 9, padding=4, norm_cfg=norm_cfg), - nn.MaxPool2d(kernel_size=3, stride=2, padding=1), - ConvModule(128, 128, 9, padding=4, norm_cfg=norm_cfg), - nn.MaxPool2d(kernel_size=3, stride=2, padding=1), - ConvModule(128, 128, 9, padding=4, norm_cfg=norm_cfg), - nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) - - self.cpm_stages = nn.ModuleList([ - CpmBlock( - middle_channels + out_channels, - channels=[feat_channels, feat_channels, feat_channels], - kernels=[11, 11, 11], - norm_cfg=norm_cfg) for _ in range(num_stages - 1) - ]) - - self.middle_conv = nn.ModuleList([ - nn.Sequential( - ConvModule( - 128, middle_channels, 5, padding=2, norm_cfg=norm_cfg)) - for _ in range(num_stages - 1) - ]) - - self.out_convs = nn.ModuleList([ - nn.Sequential( - ConvModule( - feat_channels, - feat_channels, - 1, - padding=0, - norm_cfg=norm_cfg), - ConvModule(feat_channels, out_channels, 1, act_cfg=None)) - for _ in range(num_stages - 1) - ]) - - def forward(self, x): - """Model forward function.""" - stage1_out = self.stem(x) - middle_out = self.middle(x) - out_feats = [] - - out_feats.append(stage1_out) - - for ind in range(self.num_stages - 1): - single_stage = self.cpm_stages[ind] - out_conv = self.out_convs[ind] - - inp_feat = torch.cat( - [out_feats[-1], self.middle_conv[ind](middle_out)], 1) - cpm_feat = single_stage(inp_feat) - out_feat = out_conv(cpm_feat) - out_feats.append(out_feat) - - return out_feats +# Copyright (c) OpenMMLab. All rights reserved. +import copy + +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule +from mmengine.model import BaseModule + +from mmpose.registry import MODELS +from .base_backbone import BaseBackbone + + +class CpmBlock(BaseModule): + """CpmBlock for Convolutional Pose Machine. + + Args: + in_channels (int): Input channels of this block. + channels (list): Output channels of each conv module. + kernels (list): Kernel sizes of each conv module. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + in_channels, + channels=(128, 128, 128), + kernels=(11, 11, 11), + norm_cfg=None, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + + assert len(channels) == len(kernels) + layers = [] + for i in range(len(channels)): + if i == 0: + input_channels = in_channels + else: + input_channels = channels[i - 1] + layers.append( + ConvModule( + input_channels, + channels[i], + kernels[i], + padding=(kernels[i] - 1) // 2, + norm_cfg=norm_cfg)) + self.model = nn.Sequential(*layers) + + def forward(self, x): + """Model forward function.""" + out = self.model(x) + return out + + +@MODELS.register_module() +class CPM(BaseBackbone): + """CPM backbone. + + Convolutional Pose Machines. + More details can be found in the `paper + `__ . + + Args: + in_channels (int): The input channels of the CPM. + out_channels (int): The output channels of the CPM. + feat_channels (int): Feature channel of each CPM stage. + middle_channels (int): Feature channel of conv after the middle stage. + num_stages (int): Number of stages. + norm_cfg (dict): Dictionary to construct and config norm layer. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: + ``[ + dict(type='Normal', std=0.001, layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ]`` + + Example: + >>> from mmpose.models import CPM + >>> import torch + >>> self = CPM(3, 17) + >>> self.eval() + >>> inputs = torch.rand(1, 3, 368, 368) + >>> level_outputs = self.forward(inputs) + >>> for level_output in level_outputs: + ... print(tuple(level_output.shape)) + (1, 17, 46, 46) + (1, 17, 46, 46) + (1, 17, 46, 46) + (1, 17, 46, 46) + (1, 17, 46, 46) + (1, 17, 46, 46) + """ + + def __init__( + self, + in_channels, + out_channels, + feat_channels=128, + middle_channels=32, + num_stages=6, + norm_cfg=dict(type='BN', requires_grad=True), + init_cfg=[ + dict(type='Normal', std=0.001, layer=['Conv2d']), + dict(type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm']) + ], + ): + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + super().__init__(init_cfg=init_cfg) + + assert in_channels == 3 + + self.num_stages = num_stages + assert self.num_stages >= 1 + + self.stem = nn.Sequential( + ConvModule(in_channels, 128, 9, padding=4, norm_cfg=norm_cfg), + nn.MaxPool2d(kernel_size=3, stride=2, padding=1), + ConvModule(128, 128, 9, padding=4, norm_cfg=norm_cfg), + nn.MaxPool2d(kernel_size=3, stride=2, padding=1), + ConvModule(128, 128, 9, padding=4, norm_cfg=norm_cfg), + nn.MaxPool2d(kernel_size=3, stride=2, padding=1), + ConvModule(128, 32, 5, padding=2, norm_cfg=norm_cfg), + ConvModule(32, 512, 9, padding=4, norm_cfg=norm_cfg), + ConvModule(512, 512, 1, padding=0, norm_cfg=norm_cfg), + ConvModule(512, out_channels, 1, padding=0, act_cfg=None)) + + self.middle = nn.Sequential( + ConvModule(in_channels, 128, 9, padding=4, norm_cfg=norm_cfg), + nn.MaxPool2d(kernel_size=3, stride=2, padding=1), + ConvModule(128, 128, 9, padding=4, norm_cfg=norm_cfg), + nn.MaxPool2d(kernel_size=3, stride=2, padding=1), + ConvModule(128, 128, 9, padding=4, norm_cfg=norm_cfg), + nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) + + self.cpm_stages = nn.ModuleList([ + CpmBlock( + middle_channels + out_channels, + channels=[feat_channels, feat_channels, feat_channels], + kernels=[11, 11, 11], + norm_cfg=norm_cfg) for _ in range(num_stages - 1) + ]) + + self.middle_conv = nn.ModuleList([ + nn.Sequential( + ConvModule( + 128, middle_channels, 5, padding=2, norm_cfg=norm_cfg)) + for _ in range(num_stages - 1) + ]) + + self.out_convs = nn.ModuleList([ + nn.Sequential( + ConvModule( + feat_channels, + feat_channels, + 1, + padding=0, + norm_cfg=norm_cfg), + ConvModule(feat_channels, out_channels, 1, act_cfg=None)) + for _ in range(num_stages - 1) + ]) + + def forward(self, x): + """Model forward function.""" + stage1_out = self.stem(x) + middle_out = self.middle(x) + out_feats = [] + + out_feats.append(stage1_out) + + for ind in range(self.num_stages - 1): + single_stage = self.cpm_stages[ind] + out_conv = self.out_convs[ind] + + inp_feat = torch.cat( + [out_feats[-1], self.middle_conv[ind](middle_out)], 1) + cpm_feat = single_stage(inp_feat) + out_feat = out_conv(cpm_feat) + out_feats.append(out_feat) + + return out_feats diff --git a/mmpose/models/backbones/hourglass.py b/mmpose/models/backbones/hourglass.py index cfc8d6d328..6429487b36 100644 --- a/mmpose/models/backbones/hourglass.py +++ b/mmpose/models/backbones/hourglass.py @@ -1,209 +1,209 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy - -import torch.nn as nn -from mmcv.cnn import ConvModule -from mmengine.model import BaseModule - -from mmpose.registry import MODELS -from .base_backbone import BaseBackbone -from .resnet import BasicBlock, ResLayer - - -class HourglassModule(BaseModule): - """Hourglass Module for HourglassNet backbone. - - Generate module recursively and use BasicBlock as the base unit. - - Args: - depth (int): Depth of current HourglassModule. - stage_channels (list[int]): Feature channels of sub-modules in current - and follow-up HourglassModule. - stage_blocks (list[int]): Number of sub-modules stacked in current and - follow-up HourglassModule. - norm_cfg (dict): Dictionary to construct and config norm layer. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - depth, - stage_channels, - stage_blocks, - norm_cfg=dict(type='BN', requires_grad=True), - init_cfg=None): - # Protect mutable default arguments - norm_cfg = copy.deepcopy(norm_cfg) - super().__init__(init_cfg=init_cfg) - - self.depth = depth - - cur_block = stage_blocks[0] - next_block = stage_blocks[1] - - cur_channel = stage_channels[0] - next_channel = stage_channels[1] - - self.up1 = ResLayer( - BasicBlock, cur_block, cur_channel, cur_channel, norm_cfg=norm_cfg) - - self.low1 = ResLayer( - BasicBlock, - cur_block, - cur_channel, - next_channel, - stride=2, - norm_cfg=norm_cfg) - - if self.depth > 1: - self.low2 = HourglassModule(depth - 1, stage_channels[1:], - stage_blocks[1:]) - else: - self.low2 = ResLayer( - BasicBlock, - next_block, - next_channel, - next_channel, - norm_cfg=norm_cfg) - - self.low3 = ResLayer( - BasicBlock, - cur_block, - next_channel, - cur_channel, - norm_cfg=norm_cfg, - downsample_first=False) - - self.up2 = nn.Upsample(scale_factor=2) - - def forward(self, x): - """Model forward function.""" - up1 = self.up1(x) - low1 = self.low1(x) - low2 = self.low2(low1) - low3 = self.low3(low2) - up2 = self.up2(low3) - return up1 + up2 - - -@MODELS.register_module() -class HourglassNet(BaseBackbone): - """HourglassNet backbone. - - Stacked Hourglass Networks for Human Pose Estimation. - More details can be found in the `paper - `__ . - - Args: - downsample_times (int): Downsample times in a HourglassModule. - num_stacks (int): Number of HourglassModule modules stacked, - 1 for Hourglass-52, 2 for Hourglass-104. - stage_channels (list[int]): Feature channel of each sub-module in a - HourglassModule. - stage_blocks (list[int]): Number of sub-modules stacked in a - HourglassModule. - feat_channel (int): Feature channel of conv after a HourglassModule. - norm_cfg (dict): Dictionary to construct and config norm layer. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: - ``[ - dict(type='Normal', std=0.001, layer=['Conv2d']), - dict( - type='Constant', - val=1, - layer=['_BatchNorm', 'GroupNorm']) - ]`` - - Example: - >>> from mmpose.models import HourglassNet - >>> import torch - >>> self = HourglassNet() - >>> self.eval() - >>> inputs = torch.rand(1, 3, 511, 511) - >>> level_outputs = self.forward(inputs) - >>> for level_output in level_outputs: - ... print(tuple(level_output.shape)) - (1, 256, 128, 128) - (1, 256, 128, 128) - """ - - def __init__( - self, - downsample_times=5, - num_stacks=2, - stage_channels=(256, 256, 384, 384, 384, 512), - stage_blocks=(2, 2, 2, 2, 2, 4), - feat_channel=256, - norm_cfg=dict(type='BN', requires_grad=True), - init_cfg=[ - dict(type='Normal', std=0.001, layer=['Conv2d']), - dict(type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm']) - ], - ): - # Protect mutable default arguments - norm_cfg = copy.deepcopy(norm_cfg) - super().__init__(init_cfg=init_cfg) - - self.num_stacks = num_stacks - assert self.num_stacks >= 1 - assert len(stage_channels) == len(stage_blocks) - assert len(stage_channels) > downsample_times - - cur_channel = stage_channels[0] - - self.stem = nn.Sequential( - ConvModule(3, 128, 7, padding=3, stride=2, norm_cfg=norm_cfg), - ResLayer(BasicBlock, 1, 128, 256, stride=2, norm_cfg=norm_cfg)) - - self.hourglass_modules = nn.ModuleList([ - HourglassModule(downsample_times, stage_channels, stage_blocks) - for _ in range(num_stacks) - ]) - - self.inters = ResLayer( - BasicBlock, - num_stacks - 1, - cur_channel, - cur_channel, - norm_cfg=norm_cfg) - - self.conv1x1s = nn.ModuleList([ - ConvModule( - cur_channel, cur_channel, 1, norm_cfg=norm_cfg, act_cfg=None) - for _ in range(num_stacks - 1) - ]) - - self.out_convs = nn.ModuleList([ - ConvModule( - cur_channel, feat_channel, 3, padding=1, norm_cfg=norm_cfg) - for _ in range(num_stacks) - ]) - - self.remap_convs = nn.ModuleList([ - ConvModule( - feat_channel, cur_channel, 1, norm_cfg=norm_cfg, act_cfg=None) - for _ in range(num_stacks - 1) - ]) - - self.relu = nn.ReLU(inplace=True) - - def forward(self, x): - """Model forward function.""" - inter_feat = self.stem(x) - out_feats = [] - - for ind in range(self.num_stacks): - single_hourglass = self.hourglass_modules[ind] - out_conv = self.out_convs[ind] - - hourglass_feat = single_hourglass(inter_feat) - out_feat = out_conv(hourglass_feat) - out_feats.append(out_feat) - - if ind < self.num_stacks - 1: - inter_feat = self.conv1x1s[ind]( - inter_feat) + self.remap_convs[ind]( - out_feat) - inter_feat = self.inters[ind](self.relu(inter_feat)) - - return out_feats +# Copyright (c) OpenMMLab. All rights reserved. +import copy + +import torch.nn as nn +from mmcv.cnn import ConvModule +from mmengine.model import BaseModule + +from mmpose.registry import MODELS +from .base_backbone import BaseBackbone +from .resnet import BasicBlock, ResLayer + + +class HourglassModule(BaseModule): + """Hourglass Module for HourglassNet backbone. + + Generate module recursively and use BasicBlock as the base unit. + + Args: + depth (int): Depth of current HourglassModule. + stage_channels (list[int]): Feature channels of sub-modules in current + and follow-up HourglassModule. + stage_blocks (list[int]): Number of sub-modules stacked in current and + follow-up HourglassModule. + norm_cfg (dict): Dictionary to construct and config norm layer. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + depth, + stage_channels, + stage_blocks, + norm_cfg=dict(type='BN', requires_grad=True), + init_cfg=None): + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + super().__init__(init_cfg=init_cfg) + + self.depth = depth + + cur_block = stage_blocks[0] + next_block = stage_blocks[1] + + cur_channel = stage_channels[0] + next_channel = stage_channels[1] + + self.up1 = ResLayer( + BasicBlock, cur_block, cur_channel, cur_channel, norm_cfg=norm_cfg) + + self.low1 = ResLayer( + BasicBlock, + cur_block, + cur_channel, + next_channel, + stride=2, + norm_cfg=norm_cfg) + + if self.depth > 1: + self.low2 = HourglassModule(depth - 1, stage_channels[1:], + stage_blocks[1:]) + else: + self.low2 = ResLayer( + BasicBlock, + next_block, + next_channel, + next_channel, + norm_cfg=norm_cfg) + + self.low3 = ResLayer( + BasicBlock, + cur_block, + next_channel, + cur_channel, + norm_cfg=norm_cfg, + downsample_first=False) + + self.up2 = nn.Upsample(scale_factor=2) + + def forward(self, x): + """Model forward function.""" + up1 = self.up1(x) + low1 = self.low1(x) + low2 = self.low2(low1) + low3 = self.low3(low2) + up2 = self.up2(low3) + return up1 + up2 + + +@MODELS.register_module() +class HourglassNet(BaseBackbone): + """HourglassNet backbone. + + Stacked Hourglass Networks for Human Pose Estimation. + More details can be found in the `paper + `__ . + + Args: + downsample_times (int): Downsample times in a HourglassModule. + num_stacks (int): Number of HourglassModule modules stacked, + 1 for Hourglass-52, 2 for Hourglass-104. + stage_channels (list[int]): Feature channel of each sub-module in a + HourglassModule. + stage_blocks (list[int]): Number of sub-modules stacked in a + HourglassModule. + feat_channel (int): Feature channel of conv after a HourglassModule. + norm_cfg (dict): Dictionary to construct and config norm layer. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: + ``[ + dict(type='Normal', std=0.001, layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ]`` + + Example: + >>> from mmpose.models import HourglassNet + >>> import torch + >>> self = HourglassNet() + >>> self.eval() + >>> inputs = torch.rand(1, 3, 511, 511) + >>> level_outputs = self.forward(inputs) + >>> for level_output in level_outputs: + ... print(tuple(level_output.shape)) + (1, 256, 128, 128) + (1, 256, 128, 128) + """ + + def __init__( + self, + downsample_times=5, + num_stacks=2, + stage_channels=(256, 256, 384, 384, 384, 512), + stage_blocks=(2, 2, 2, 2, 2, 4), + feat_channel=256, + norm_cfg=dict(type='BN', requires_grad=True), + init_cfg=[ + dict(type='Normal', std=0.001, layer=['Conv2d']), + dict(type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm']) + ], + ): + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + super().__init__(init_cfg=init_cfg) + + self.num_stacks = num_stacks + assert self.num_stacks >= 1 + assert len(stage_channels) == len(stage_blocks) + assert len(stage_channels) > downsample_times + + cur_channel = stage_channels[0] + + self.stem = nn.Sequential( + ConvModule(3, 128, 7, padding=3, stride=2, norm_cfg=norm_cfg), + ResLayer(BasicBlock, 1, 128, 256, stride=2, norm_cfg=norm_cfg)) + + self.hourglass_modules = nn.ModuleList([ + HourglassModule(downsample_times, stage_channels, stage_blocks) + for _ in range(num_stacks) + ]) + + self.inters = ResLayer( + BasicBlock, + num_stacks - 1, + cur_channel, + cur_channel, + norm_cfg=norm_cfg) + + self.conv1x1s = nn.ModuleList([ + ConvModule( + cur_channel, cur_channel, 1, norm_cfg=norm_cfg, act_cfg=None) + for _ in range(num_stacks - 1) + ]) + + self.out_convs = nn.ModuleList([ + ConvModule( + cur_channel, feat_channel, 3, padding=1, norm_cfg=norm_cfg) + for _ in range(num_stacks) + ]) + + self.remap_convs = nn.ModuleList([ + ConvModule( + feat_channel, cur_channel, 1, norm_cfg=norm_cfg, act_cfg=None) + for _ in range(num_stacks - 1) + ]) + + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): + """Model forward function.""" + inter_feat = self.stem(x) + out_feats = [] + + for ind in range(self.num_stacks): + single_hourglass = self.hourglass_modules[ind] + out_conv = self.out_convs[ind] + + hourglass_feat = single_hourglass(inter_feat) + out_feat = out_conv(hourglass_feat) + out_feats.append(out_feat) + + if ind < self.num_stacks - 1: + inter_feat = self.conv1x1s[ind]( + inter_feat) + self.remap_convs[ind]( + out_feat) + inter_feat = self.inters[ind](self.relu(inter_feat)) + + return out_feats diff --git a/mmpose/models/backbones/hourglass_ae.py b/mmpose/models/backbones/hourglass_ae.py index 93e62dd406..25388ada64 100644 --- a/mmpose/models/backbones/hourglass_ae.py +++ b/mmpose/models/backbones/hourglass_ae.py @@ -1,209 +1,209 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy - -import torch.nn as nn -from mmcv.cnn import ConvModule, MaxPool2d -from mmengine.model import BaseModule - -from mmpose.registry import MODELS -from .base_backbone import BaseBackbone - - -class HourglassAEModule(BaseModule): - """Modified Hourglass Module for HourglassNet_AE backbone. - - Generate module recursively and use BasicBlock as the base unit. - - Args: - depth (int): Depth of current HourglassModule. - stage_channels (list[int]): Feature channels of sub-modules in current - and follow-up HourglassModule. - norm_cfg (dict): Dictionary to construct and config norm layer. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - depth, - stage_channels, - norm_cfg=dict(type='BN', requires_grad=True), - init_cfg=None): - # Protect mutable default arguments - norm_cfg = copy.deepcopy(norm_cfg) - super().__init__(init_cfg=init_cfg) - - self.depth = depth - - cur_channel = stage_channels[0] - next_channel = stage_channels[1] - - self.up1 = ConvModule( - cur_channel, cur_channel, 3, padding=1, norm_cfg=norm_cfg) - - self.pool1 = MaxPool2d(2, 2) - - self.low1 = ConvModule( - cur_channel, next_channel, 3, padding=1, norm_cfg=norm_cfg) - - if self.depth > 1: - self.low2 = HourglassAEModule(depth - 1, stage_channels[1:]) - else: - self.low2 = ConvModule( - next_channel, next_channel, 3, padding=1, norm_cfg=norm_cfg) - - self.low3 = ConvModule( - next_channel, cur_channel, 3, padding=1, norm_cfg=norm_cfg) - - self.up2 = nn.UpsamplingNearest2d(scale_factor=2) - - def forward(self, x): - """Model forward function.""" - up1 = self.up1(x) - pool1 = self.pool1(x) - low1 = self.low1(pool1) - low2 = self.low2(low1) - low3 = self.low3(low2) - up2 = self.up2(low3) - return up1 + up2 - - -@MODELS.register_module() -class HourglassAENet(BaseBackbone): - """Hourglass-AE Network proposed by Newell et al. - - Associative Embedding: End-to-End Learning for Joint - Detection and Grouping. - - More details can be found in the `paper - `__ . - - Args: - downsample_times (int): Downsample times in a HourglassModule. - num_stacks (int): Number of HourglassModule modules stacked, - 1 for Hourglass-52, 2 for Hourglass-104. - stage_channels (list[int]): Feature channel of each sub-module in a - HourglassModule. - stage_blocks (list[int]): Number of sub-modules stacked in a - HourglassModule. - feat_channels (int): Feature channel of conv after a HourglassModule. - norm_cfg (dict): Dictionary to construct and config norm layer. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: - ``[ - dict(type='Normal', std=0.001, layer=['Conv2d']), - dict( - type='Constant', - val=1, - layer=['_BatchNorm', 'GroupNorm']) - ]`` - - Example: - >>> from mmpose.models import HourglassAENet - >>> import torch - >>> self = HourglassAENet() - >>> self.eval() - >>> inputs = torch.rand(1, 3, 512, 512) - >>> level_outputs = self.forward(inputs) - >>> for level_output in level_outputs: - ... print(tuple(level_output.shape)) - (1, 34, 128, 128) - """ - - def __init__( - self, - downsample_times=4, - num_stacks=1, - out_channels=34, - stage_channels=(256, 384, 512, 640, 768), - feat_channels=256, - norm_cfg=dict(type='BN', requires_grad=True), - init_cfg=[ - dict(type='Normal', std=0.001, layer=['Conv2d']), - dict(type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm']) - ], - ): - # Protect mutable default arguments - norm_cfg = copy.deepcopy(norm_cfg) - super().__init__(init_cfg=init_cfg) - - self.num_stacks = num_stacks - assert self.num_stacks >= 1 - assert len(stage_channels) > downsample_times - - cur_channels = stage_channels[0] - - self.stem = nn.Sequential( - ConvModule(3, 64, 7, padding=3, stride=2, norm_cfg=norm_cfg), - ConvModule(64, 128, 3, padding=1, norm_cfg=norm_cfg), - MaxPool2d(2, 2), - ConvModule(128, 128, 3, padding=1, norm_cfg=norm_cfg), - ConvModule(128, feat_channels, 3, padding=1, norm_cfg=norm_cfg), - ) - - self.hourglass_modules = nn.ModuleList([ - nn.Sequential( - HourglassAEModule( - downsample_times, stage_channels, norm_cfg=norm_cfg), - ConvModule( - feat_channels, - feat_channels, - 3, - padding=1, - norm_cfg=norm_cfg), - ConvModule( - feat_channels, - feat_channels, - 3, - padding=1, - norm_cfg=norm_cfg)) for _ in range(num_stacks) - ]) - - self.out_convs = nn.ModuleList([ - ConvModule( - cur_channels, - out_channels, - 1, - padding=0, - norm_cfg=None, - act_cfg=None) for _ in range(num_stacks) - ]) - - self.remap_out_convs = nn.ModuleList([ - ConvModule( - out_channels, - feat_channels, - 1, - norm_cfg=norm_cfg, - act_cfg=None) for _ in range(num_stacks - 1) - ]) - - self.remap_feature_convs = nn.ModuleList([ - ConvModule( - feat_channels, - feat_channels, - 1, - norm_cfg=norm_cfg, - act_cfg=None) for _ in range(num_stacks - 1) - ]) - - self.relu = nn.ReLU(inplace=True) - - def forward(self, x): - """Model forward function.""" - inter_feat = self.stem(x) - out_feats = [] - - for ind in range(self.num_stacks): - single_hourglass = self.hourglass_modules[ind] - out_conv = self.out_convs[ind] - - hourglass_feat = single_hourglass(inter_feat) - out_feat = out_conv(hourglass_feat) - out_feats.append(out_feat) - - if ind < self.num_stacks - 1: - inter_feat = inter_feat + self.remap_out_convs[ind]( - out_feat) + self.remap_feature_convs[ind]( - hourglass_feat) - - return out_feats +# Copyright (c) OpenMMLab. All rights reserved. +import copy + +import torch.nn as nn +from mmcv.cnn import ConvModule, MaxPool2d +from mmengine.model import BaseModule + +from mmpose.registry import MODELS +from .base_backbone import BaseBackbone + + +class HourglassAEModule(BaseModule): + """Modified Hourglass Module for HourglassNet_AE backbone. + + Generate module recursively and use BasicBlock as the base unit. + + Args: + depth (int): Depth of current HourglassModule. + stage_channels (list[int]): Feature channels of sub-modules in current + and follow-up HourglassModule. + norm_cfg (dict): Dictionary to construct and config norm layer. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + depth, + stage_channels, + norm_cfg=dict(type='BN', requires_grad=True), + init_cfg=None): + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + super().__init__(init_cfg=init_cfg) + + self.depth = depth + + cur_channel = stage_channels[0] + next_channel = stage_channels[1] + + self.up1 = ConvModule( + cur_channel, cur_channel, 3, padding=1, norm_cfg=norm_cfg) + + self.pool1 = MaxPool2d(2, 2) + + self.low1 = ConvModule( + cur_channel, next_channel, 3, padding=1, norm_cfg=norm_cfg) + + if self.depth > 1: + self.low2 = HourglassAEModule(depth - 1, stage_channels[1:]) + else: + self.low2 = ConvModule( + next_channel, next_channel, 3, padding=1, norm_cfg=norm_cfg) + + self.low3 = ConvModule( + next_channel, cur_channel, 3, padding=1, norm_cfg=norm_cfg) + + self.up2 = nn.UpsamplingNearest2d(scale_factor=2) + + def forward(self, x): + """Model forward function.""" + up1 = self.up1(x) + pool1 = self.pool1(x) + low1 = self.low1(pool1) + low2 = self.low2(low1) + low3 = self.low3(low2) + up2 = self.up2(low3) + return up1 + up2 + + +@MODELS.register_module() +class HourglassAENet(BaseBackbone): + """Hourglass-AE Network proposed by Newell et al. + + Associative Embedding: End-to-End Learning for Joint + Detection and Grouping. + + More details can be found in the `paper + `__ . + + Args: + downsample_times (int): Downsample times in a HourglassModule. + num_stacks (int): Number of HourglassModule modules stacked, + 1 for Hourglass-52, 2 for Hourglass-104. + stage_channels (list[int]): Feature channel of each sub-module in a + HourglassModule. + stage_blocks (list[int]): Number of sub-modules stacked in a + HourglassModule. + feat_channels (int): Feature channel of conv after a HourglassModule. + norm_cfg (dict): Dictionary to construct and config norm layer. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: + ``[ + dict(type='Normal', std=0.001, layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ]`` + + Example: + >>> from mmpose.models import HourglassAENet + >>> import torch + >>> self = HourglassAENet() + >>> self.eval() + >>> inputs = torch.rand(1, 3, 512, 512) + >>> level_outputs = self.forward(inputs) + >>> for level_output in level_outputs: + ... print(tuple(level_output.shape)) + (1, 34, 128, 128) + """ + + def __init__( + self, + downsample_times=4, + num_stacks=1, + out_channels=34, + stage_channels=(256, 384, 512, 640, 768), + feat_channels=256, + norm_cfg=dict(type='BN', requires_grad=True), + init_cfg=[ + dict(type='Normal', std=0.001, layer=['Conv2d']), + dict(type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm']) + ], + ): + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + super().__init__(init_cfg=init_cfg) + + self.num_stacks = num_stacks + assert self.num_stacks >= 1 + assert len(stage_channels) > downsample_times + + cur_channels = stage_channels[0] + + self.stem = nn.Sequential( + ConvModule(3, 64, 7, padding=3, stride=2, norm_cfg=norm_cfg), + ConvModule(64, 128, 3, padding=1, norm_cfg=norm_cfg), + MaxPool2d(2, 2), + ConvModule(128, 128, 3, padding=1, norm_cfg=norm_cfg), + ConvModule(128, feat_channels, 3, padding=1, norm_cfg=norm_cfg), + ) + + self.hourglass_modules = nn.ModuleList([ + nn.Sequential( + HourglassAEModule( + downsample_times, stage_channels, norm_cfg=norm_cfg), + ConvModule( + feat_channels, + feat_channels, + 3, + padding=1, + norm_cfg=norm_cfg), + ConvModule( + feat_channels, + feat_channels, + 3, + padding=1, + norm_cfg=norm_cfg)) for _ in range(num_stacks) + ]) + + self.out_convs = nn.ModuleList([ + ConvModule( + cur_channels, + out_channels, + 1, + padding=0, + norm_cfg=None, + act_cfg=None) for _ in range(num_stacks) + ]) + + self.remap_out_convs = nn.ModuleList([ + ConvModule( + out_channels, + feat_channels, + 1, + norm_cfg=norm_cfg, + act_cfg=None) for _ in range(num_stacks - 1) + ]) + + self.remap_feature_convs = nn.ModuleList([ + ConvModule( + feat_channels, + feat_channels, + 1, + norm_cfg=norm_cfg, + act_cfg=None) for _ in range(num_stacks - 1) + ]) + + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): + """Model forward function.""" + inter_feat = self.stem(x) + out_feats = [] + + for ind in range(self.num_stacks): + single_hourglass = self.hourglass_modules[ind] + out_conv = self.out_convs[ind] + + hourglass_feat = single_hourglass(inter_feat) + out_feat = out_conv(hourglass_feat) + out_feats.append(out_feat) + + if ind < self.num_stacks - 1: + inter_feat = inter_feat + self.remap_out_convs[ind]( + out_feat) + self.remap_feature_convs[ind]( + hourglass_feat) + + return out_feats diff --git a/mmpose/models/backbones/hrformer.py b/mmpose/models/backbones/hrformer.py index 0b86617f14..e50641a45e 100644 --- a/mmpose/models/backbones/hrformer.py +++ b/mmpose/models/backbones/hrformer.py @@ -1,758 +1,758 @@ -# Copyright (c) OpenMMLab. All rights reserved. - -import math - -import torch -import torch.nn as nn -from mmcv.cnn import build_activation_layer, build_conv_layer, build_norm_layer -from mmcv.cnn.bricks.transformer import build_dropout -from mmengine.model import BaseModule, trunc_normal_init -from torch.nn.functional import pad - -from mmpose.registry import MODELS -from .hrnet import Bottleneck, HRModule, HRNet - - -def nlc_to_nchw(x, hw_shape): - """Convert [N, L, C] shape tensor to [N, C, H, W] shape tensor. - - Args: - x (Tensor): The input tensor of shape [N, L, C] before conversion. - hw_shape (Sequence[int]): The height and width of output feature map. - - Returns: - Tensor: The output tensor of shape [N, C, H, W] after conversion. - """ - H, W = hw_shape - assert len(x.shape) == 3 - B, L, C = x.shape - assert L == H * W, 'The seq_len doesn\'t match H, W' - return x.transpose(1, 2).reshape(B, C, H, W) - - -def nchw_to_nlc(x): - """Flatten [N, C, H, W] shape tensor to [N, L, C] shape tensor. - - Args: - x (Tensor): The input tensor of shape [N, C, H, W] before conversion. - - Returns: - Tensor: The output tensor of shape [N, L, C] after conversion. - """ - assert len(x.shape) == 4 - return x.flatten(2).transpose(1, 2).contiguous() - - -def build_drop_path(drop_path_rate): - """Build drop path layer.""" - return build_dropout(dict(type='DropPath', drop_prob=drop_path_rate)) - - -class WindowMSA(BaseModule): - """Window based multi-head self-attention (W-MSA) module with relative - position bias. - - Args: - embed_dims (int): Number of input channels. - num_heads (int): Number of attention heads. - window_size (tuple[int]): The height and width of the window. - qkv_bias (bool, optional): If True, add a learnable bias to q, k, v. - Default: True. - qk_scale (float | None, optional): Override default qk scale of - head_dim ** -0.5 if set. Default: None. - attn_drop_rate (float, optional): Dropout ratio of attention weight. - Default: 0.0 - proj_drop_rate (float, optional): Dropout ratio of output. Default: 0. - with_rpe (bool, optional): If True, use relative position bias. - Default: True. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None. - """ - - def __init__(self, - embed_dims, - num_heads, - window_size, - qkv_bias=True, - qk_scale=None, - attn_drop_rate=0., - proj_drop_rate=0., - with_rpe=True, - init_cfg=None): - - super().__init__(init_cfg=init_cfg) - self.embed_dims = embed_dims - self.window_size = window_size # Wh, Ww - self.num_heads = num_heads - head_embed_dims = embed_dims // num_heads - self.scale = qk_scale or head_embed_dims**-0.5 - - self.with_rpe = with_rpe - if self.with_rpe: - # define a parameter table of relative position bias - self.relative_position_bias_table = nn.Parameter( - torch.zeros( - (2 * window_size[0] - 1) * (2 * window_size[1] - 1), - num_heads)) # 2*Wh-1 * 2*Ww-1, nH - - Wh, Ww = self.window_size - rel_index_coords = self.double_step_seq(2 * Ww - 1, Wh, 1, Ww) - rel_position_index = rel_index_coords + rel_index_coords.T - rel_position_index = rel_position_index.flip(1).contiguous() - self.register_buffer('relative_position_index', rel_position_index) - - self.qkv = nn.Linear(embed_dims, embed_dims * 3, bias=qkv_bias) - self.attn_drop = nn.Dropout(attn_drop_rate) - self.proj = nn.Linear(embed_dims, embed_dims) - self.proj_drop = nn.Dropout(proj_drop_rate) - - self.softmax = nn.Softmax(dim=-1) - - def init_weights(self): - trunc_normal_init(self.relative_position_bias_table, std=0.02) - - def forward(self, x, mask=None): - """ - Args: - - x (tensor): input features with shape of (B*num_windows, N, C) - mask (tensor | None, Optional): mask with shape of (num_windows, - Wh*Ww, Wh*Ww), value should be between (-inf, 0]. - """ - B, N, C = x.shape - qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, - C // self.num_heads).permute(2, 0, 3, 1, 4) - q, k, v = qkv[0], qkv[1], qkv[2] - - q = q * self.scale - attn = (q @ k.transpose(-2, -1)) - - if self.with_rpe: - relative_position_bias = self.relative_position_bias_table[ - self.relative_position_index.view(-1)].view( - self.window_size[0] * self.window_size[1], - self.window_size[0] * self.window_size[1], - -1) # Wh*Ww,Wh*Ww,nH - relative_position_bias = relative_position_bias.permute( - 2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww - attn = attn + relative_position_bias.unsqueeze(0) - - if mask is not None: - nW = mask.shape[0] - attn = attn.view(B // nW, nW, self.num_heads, N, - N) + mask.unsqueeze(1).unsqueeze(0) - attn = attn.view(-1, self.num_heads, N, N) - attn = self.softmax(attn) - - attn = self.attn_drop(attn) - - x = (attn @ v).transpose(1, 2).reshape(B, N, C) - x = self.proj(x) - x = self.proj_drop(x) - return x - - @staticmethod - def double_step_seq(step1, len1, step2, len2): - seq1 = torch.arange(0, step1 * len1, step1) - seq2 = torch.arange(0, step2 * len2, step2) - return (seq1[:, None] + seq2[None, :]).reshape(1, -1) - - -class LocalWindowSelfAttention(BaseModule): - r""" Local-window Self Attention (LSA) module with relative position bias. - - This module is the short-range self-attention module in the - Interlaced Sparse Self-Attention `_. - - Args: - embed_dims (int): Number of input channels. - num_heads (int): Number of attention heads. - window_size (tuple[int] | int): The height and width of the window. - qkv_bias (bool, optional): If True, add a learnable bias to q, k, v. - Default: True. - qk_scale (float | None, optional): Override default qk scale of - head_dim ** -0.5 if set. Default: None. - attn_drop_rate (float, optional): Dropout ratio of attention weight. - Default: 0.0 - proj_drop_rate (float, optional): Dropout ratio of output. Default: 0. - with_rpe (bool, optional): If True, use relative position bias. - Default: True. - with_pad_mask (bool, optional): If True, mask out the padded tokens in - the attention process. Default: False. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None. - """ - - def __init__(self, - embed_dims, - num_heads, - window_size, - qkv_bias=True, - qk_scale=None, - attn_drop_rate=0., - proj_drop_rate=0., - with_rpe=True, - with_pad_mask=False, - init_cfg=None): - super().__init__(init_cfg=init_cfg) - if isinstance(window_size, int): - window_size = (window_size, window_size) - self.window_size = window_size - self.with_pad_mask = with_pad_mask - self.attn = WindowMSA( - embed_dims=embed_dims, - num_heads=num_heads, - window_size=window_size, - qkv_bias=qkv_bias, - qk_scale=qk_scale, - attn_drop_rate=attn_drop_rate, - proj_drop_rate=proj_drop_rate, - with_rpe=with_rpe, - init_cfg=init_cfg) - - def forward(self, x, H, W, **kwargs): - """Forward function.""" - B, N, C = x.shape - x = x.view(B, H, W, C) - Wh, Ww = self.window_size - - # center-pad the feature on H and W axes - pad_h = math.ceil(H / Wh) * Wh - H - pad_w = math.ceil(W / Ww) * Ww - W - x = pad(x, (0, 0, pad_w // 2, pad_w - pad_w // 2, pad_h // 2, - pad_h - pad_h // 2)) - - # permute - x = x.view(B, math.ceil(H / Wh), Wh, math.ceil(W / Ww), Ww, C) - x = x.permute(0, 1, 3, 2, 4, 5) - x = x.reshape(-1, Wh * Ww, C) # (B*num_window, Wh*Ww, C) - - # attention - if self.with_pad_mask and pad_h > 0 and pad_w > 0: - pad_mask = x.new_zeros(1, H, W, 1) - pad_mask = pad( - pad_mask, [ - 0, 0, pad_w // 2, pad_w - pad_w // 2, pad_h // 2, - pad_h - pad_h // 2 - ], - value=-float('inf')) - pad_mask = pad_mask.view(1, math.ceil(H / Wh), Wh, - math.ceil(W / Ww), Ww, 1) - pad_mask = pad_mask.permute(1, 3, 0, 2, 4, 5) - pad_mask = pad_mask.reshape(-1, Wh * Ww) - pad_mask = pad_mask[:, None, :].expand([-1, Wh * Ww, -1]) - out = self.attn(x, pad_mask, **kwargs) - else: - out = self.attn(x, **kwargs) - - # reverse permutation - out = out.reshape(B, math.ceil(H / Wh), math.ceil(W / Ww), Wh, Ww, C) - out = out.permute(0, 1, 3, 2, 4, 5) - out = out.reshape(B, H + pad_h, W + pad_w, C) - - # de-pad - out = out[:, pad_h // 2:H + pad_h // 2, pad_w // 2:W + pad_w // 2] - return out.reshape(B, N, C) - - -class CrossFFN(BaseModule): - r"""FFN with Depthwise Conv of HRFormer. - - Args: - in_features (int): The feature dimension. - hidden_features (int, optional): The hidden dimension of FFNs. - Defaults: The same as in_features. - act_cfg (dict, optional): Config of activation layer. - Default: dict(type='GELU'). - dw_act_cfg (dict, optional): Config of activation layer appended - right after DW Conv. Default: dict(type='GELU'). - norm_cfg (dict, optional): Config of norm layer. - Default: dict(type='SyncBN'). - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None. - """ - - def __init__(self, - in_features, - hidden_features=None, - out_features=None, - act_cfg=dict(type='GELU'), - dw_act_cfg=dict(type='GELU'), - norm_cfg=dict(type='SyncBN'), - init_cfg=None): - super().__init__(init_cfg=init_cfg) - out_features = out_features or in_features - hidden_features = hidden_features or in_features - self.fc1 = nn.Conv2d(in_features, hidden_features, kernel_size=1) - self.act1 = build_activation_layer(act_cfg) - self.norm1 = build_norm_layer(norm_cfg, hidden_features)[1] - self.dw3x3 = nn.Conv2d( - hidden_features, - hidden_features, - kernel_size=3, - stride=1, - groups=hidden_features, - padding=1) - self.act2 = build_activation_layer(dw_act_cfg) - self.norm2 = build_norm_layer(norm_cfg, hidden_features)[1] - self.fc2 = nn.Conv2d(hidden_features, out_features, kernel_size=1) - self.act3 = build_activation_layer(act_cfg) - self.norm3 = build_norm_layer(norm_cfg, out_features)[1] - - def forward(self, x, H, W): - """Forward function.""" - x = nlc_to_nchw(x, (H, W)) - x = self.act1(self.norm1(self.fc1(x))) - x = self.act2(self.norm2(self.dw3x3(x))) - x = self.act3(self.norm3(self.fc2(x))) - x = nchw_to_nlc(x) - return x - - -class HRFormerBlock(BaseModule): - """High-Resolution Block for HRFormer. - - Args: - in_features (int): The input dimension. - out_features (int): The output dimension. - num_heads (int): The number of head within each LSA. - window_size (int, optional): The window size for the LSA. - Default: 7 - mlp_ratio (int, optional): The expansion ration of FFN. - Default: 4 - act_cfg (dict, optional): Config of activation layer. - Default: dict(type='GELU'). - norm_cfg (dict, optional): Config of norm layer. - Default: dict(type='SyncBN'). - transformer_norm_cfg (dict, optional): Config of transformer norm - layer. Default: dict(type='LN', eps=1e-6). - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None. - """ - - expansion = 1 - - def __init__(self, - in_features, - out_features, - num_heads, - window_size=7, - mlp_ratio=4.0, - drop_path=0.0, - act_cfg=dict(type='GELU'), - norm_cfg=dict(type='SyncBN'), - transformer_norm_cfg=dict(type='LN', eps=1e-6), - init_cfg=None, - **kwargs): - super(HRFormerBlock, self).__init__(init_cfg=init_cfg) - self.num_heads = num_heads - self.window_size = window_size - self.mlp_ratio = mlp_ratio - - self.norm1 = build_norm_layer(transformer_norm_cfg, in_features)[1] - self.attn = LocalWindowSelfAttention( - in_features, - num_heads=num_heads, - window_size=window_size, - init_cfg=None, - **kwargs) - - self.norm2 = build_norm_layer(transformer_norm_cfg, out_features)[1] - self.ffn = CrossFFN( - in_features=in_features, - hidden_features=int(in_features * mlp_ratio), - out_features=out_features, - norm_cfg=norm_cfg, - act_cfg=act_cfg, - dw_act_cfg=act_cfg, - init_cfg=None) - - self.drop_path = build_drop_path( - drop_path) if drop_path > 0.0 else nn.Identity() - - def forward(self, x): - """Forward function.""" - B, C, H, W = x.size() - # Attention - x = x.view(B, C, -1).permute(0, 2, 1) - x = x + self.drop_path(self.attn(self.norm1(x), H, W)) - # FFN - x = x + self.drop_path(self.ffn(self.norm2(x), H, W)) - x = x.permute(0, 2, 1).view(B, C, H, W) - return x - - def extra_repr(self): - """(Optional) Set the extra information about this module.""" - return 'num_heads={}, window_size={}, mlp_ratio={}'.format( - self.num_heads, self.window_size, self.mlp_ratio) - - -class HRFomerModule(HRModule): - """High-Resolution Module for HRFormer. - - Args: - num_branches (int): The number of branches in the HRFormerModule. - block (nn.Module): The building block of HRFormer. - The block should be the HRFormerBlock. - num_blocks (tuple): The number of blocks in each branch. - The length must be equal to num_branches. - num_inchannels (tuple): The number of input channels in each branch. - The length must be equal to num_branches. - num_channels (tuple): The number of channels in each branch. - The length must be equal to num_branches. - num_heads (tuple): The number of heads within the LSAs. - num_window_sizes (tuple): The window size for the LSAs. - num_mlp_ratios (tuple): The expansion ratio for the FFNs. - drop_path (int, optional): The drop path rate of HRFomer. - Default: 0.0 - multiscale_output (bool, optional): Whether to output multi-level - features produced by multiple branches. If False, only the first - level feature will be output. Default: True. - conv_cfg (dict, optional): Config of the conv layers. - Default: None. - norm_cfg (dict, optional): Config of the norm layers appended - right after conv. Default: dict(type='SyncBN', requires_grad=True) - transformer_norm_cfg (dict, optional): Config of the norm layers. - Default: dict(type='LN', eps=1e-6) - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. Default: False - upsample_cfg(dict, optional): The config of upsample layers in fuse - layers. Default: dict(mode='bilinear', align_corners=False) - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None. - """ - - def __init__(self, - num_branches, - block, - num_blocks, - num_inchannels, - num_channels, - num_heads, - num_window_sizes, - num_mlp_ratios, - multiscale_output=True, - drop_paths=0.0, - with_rpe=True, - with_pad_mask=False, - conv_cfg=None, - norm_cfg=dict(type='SyncBN', requires_grad=True), - transformer_norm_cfg=dict(type='LN', eps=1e-6), - with_cp=False, - upsample_cfg=dict(mode='bilinear', align_corners=False), - **kwargs): - - self.transformer_norm_cfg = transformer_norm_cfg - self.drop_paths = drop_paths - self.num_heads = num_heads - self.num_window_sizes = num_window_sizes - self.num_mlp_ratios = num_mlp_ratios - self.with_rpe = with_rpe - self.with_pad_mask = with_pad_mask - - super().__init__(num_branches, block, num_blocks, num_inchannels, - num_channels, multiscale_output, with_cp, conv_cfg, - norm_cfg, upsample_cfg, **kwargs) - - def _make_one_branch(self, - branch_index, - block, - num_blocks, - num_channels, - stride=1): - """Build one branch.""" - # HRFormerBlock does not support down sample layer yet. - assert stride == 1 and self.in_channels[branch_index] == num_channels[ - branch_index] - layers = [] - layers.append( - block( - self.in_channels[branch_index], - num_channels[branch_index], - num_heads=self.num_heads[branch_index], - window_size=self.num_window_sizes[branch_index], - mlp_ratio=self.num_mlp_ratios[branch_index], - drop_path=self.drop_paths[0], - norm_cfg=self.norm_cfg, - transformer_norm_cfg=self.transformer_norm_cfg, - init_cfg=None, - with_rpe=self.with_rpe, - with_pad_mask=self.with_pad_mask)) - - self.in_channels[ - branch_index] = self.in_channels[branch_index] * block.expansion - for i in range(1, num_blocks[branch_index]): - layers.append( - block( - self.in_channels[branch_index], - num_channels[branch_index], - num_heads=self.num_heads[branch_index], - window_size=self.num_window_sizes[branch_index], - mlp_ratio=self.num_mlp_ratios[branch_index], - drop_path=self.drop_paths[i], - norm_cfg=self.norm_cfg, - transformer_norm_cfg=self.transformer_norm_cfg, - init_cfg=None, - with_rpe=self.with_rpe, - with_pad_mask=self.with_pad_mask)) - return nn.Sequential(*layers) - - def _make_fuse_layers(self): - """Build fuse layers.""" - if self.num_branches == 1: - return None - num_branches = self.num_branches - num_inchannels = self.in_channels - fuse_layers = [] - for i in range(num_branches if self.multiscale_output else 1): - fuse_layer = [] - for j in range(num_branches): - if j > i: - fuse_layer.append( - nn.Sequential( - build_conv_layer( - self.conv_cfg, - num_inchannels[j], - num_inchannels[i], - kernel_size=1, - stride=1, - bias=False), - build_norm_layer(self.norm_cfg, - num_inchannels[i])[1], - nn.Upsample( - scale_factor=2**(j - i), - mode=self.upsample_cfg['mode'], - align_corners=self. - upsample_cfg['align_corners']))) - elif j == i: - fuse_layer.append(None) - else: - conv3x3s = [] - for k in range(i - j): - if k == i - j - 1: - num_outchannels_conv3x3 = num_inchannels[i] - with_out_act = False - else: - num_outchannels_conv3x3 = num_inchannels[j] - with_out_act = True - sub_modules = [ - build_conv_layer( - self.conv_cfg, - num_inchannels[j], - num_inchannels[j], - kernel_size=3, - stride=2, - padding=1, - groups=num_inchannels[j], - bias=False, - ), - build_norm_layer(self.norm_cfg, - num_inchannels[j])[1], - build_conv_layer( - self.conv_cfg, - num_inchannels[j], - num_outchannels_conv3x3, - kernel_size=1, - stride=1, - bias=False, - ), - build_norm_layer(self.norm_cfg, - num_outchannels_conv3x3)[1] - ] - if with_out_act: - sub_modules.append(nn.ReLU(False)) - conv3x3s.append(nn.Sequential(*sub_modules)) - fuse_layer.append(nn.Sequential(*conv3x3s)) - fuse_layers.append(nn.ModuleList(fuse_layer)) - - return nn.ModuleList(fuse_layers) - - def get_num_inchannels(self): - """Return the number of input channels.""" - return self.in_channels - - -@MODELS.register_module() -class HRFormer(HRNet): - """HRFormer backbone. - - This backbone is the implementation of `HRFormer: High-Resolution - Transformer for Dense Prediction `_. - - Args: - extra (dict): Detailed configuration for each stage of HRNet. - There must be 4 stages, the configuration for each stage must have - 5 keys: - - - num_modules (int): The number of HRModule in this stage. - - num_branches (int): The number of branches in the HRModule. - - block (str): The type of block. - - num_blocks (tuple): The number of blocks in each branch. - The length must be equal to num_branches. - - num_channels (tuple): The number of channels in each branch. - The length must be equal to num_branches. - in_channels (int): Number of input image channels. Normally 3. - conv_cfg (dict): Dictionary to construct and config conv layer. - Default: None. - norm_cfg (dict): Config of norm layer. - Use `SyncBN` by default. - transformer_norm_cfg (dict): Config of transformer norm layer. - Use `LN` by default. - norm_eval (bool): Whether to set norm layers to eval mode, namely, - freeze running stats (mean and var). Note: Effect on Batch Norm - and its variants only. Default: False. - zero_init_residual (bool): Whether to use zero init for last norm layer - in resblocks to let them behave as identity. Default: False. - frozen_stages (int): Stages to be frozen (stop grad and set eval mode). - -1 means not freezing any parameters. Default: -1. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: - ``[ - dict(type='Normal', std=0.001, layer=['Conv2d']), - dict( - type='Constant', - val=1, - layer=['_BatchNorm', 'GroupNorm']) - ]`` - - Example: - >>> from mmpose.models import HRFormer - >>> import torch - >>> extra = dict( - >>> stage1=dict( - >>> num_modules=1, - >>> num_branches=1, - >>> block='BOTTLENECK', - >>> num_blocks=(2, ), - >>> num_channels=(64, )), - >>> stage2=dict( - >>> num_modules=1, - >>> num_branches=2, - >>> block='HRFORMER', - >>> window_sizes=(7, 7), - >>> num_heads=(1, 2), - >>> mlp_ratios=(4, 4), - >>> num_blocks=(2, 2), - >>> num_channels=(32, 64)), - >>> stage3=dict( - >>> num_modules=4, - >>> num_branches=3, - >>> block='HRFORMER', - >>> window_sizes=(7, 7, 7), - >>> num_heads=(1, 2, 4), - >>> mlp_ratios=(4, 4, 4), - >>> num_blocks=(2, 2, 2), - >>> num_channels=(32, 64, 128)), - >>> stage4=dict( - >>> num_modules=2, - >>> num_branches=4, - >>> block='HRFORMER', - >>> window_sizes=(7, 7, 7, 7), - >>> num_heads=(1, 2, 4, 8), - >>> mlp_ratios=(4, 4, 4, 4), - >>> num_blocks=(2, 2, 2, 2), - >>> num_channels=(32, 64, 128, 256))) - >>> self = HRFormer(extra, in_channels=1) - >>> self.eval() - >>> inputs = torch.rand(1, 1, 32, 32) - >>> level_outputs = self.forward(inputs) - >>> for level_out in level_outputs: - ... print(tuple(level_out.shape)) - (1, 32, 8, 8) - (1, 64, 4, 4) - (1, 128, 2, 2) - (1, 256, 1, 1) - """ - - blocks_dict = {'BOTTLENECK': Bottleneck, 'HRFORMERBLOCK': HRFormerBlock} - - def __init__( - self, - extra, - in_channels=3, - conv_cfg=None, - norm_cfg=dict(type='BN', requires_grad=True), - transformer_norm_cfg=dict(type='LN', eps=1e-6), - norm_eval=False, - with_cp=False, - zero_init_residual=False, - frozen_stages=-1, - init_cfg=[ - dict(type='Normal', std=0.001, layer=['Conv2d']), - dict(type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm']) - ], - ): - - # stochastic depth - depths = [ - extra[stage]['num_blocks'][0] * extra[stage]['num_modules'] - for stage in ['stage2', 'stage3', 'stage4'] - ] - depth_s2, depth_s3, _ = depths - drop_path_rate = extra['drop_path_rate'] - dpr = [ - x.item() for x in torch.linspace(0, drop_path_rate, sum(depths)) - ] - extra['stage2']['drop_path_rates'] = dpr[0:depth_s2] - extra['stage3']['drop_path_rates'] = dpr[depth_s2:depth_s2 + depth_s3] - extra['stage4']['drop_path_rates'] = dpr[depth_s2 + depth_s3:] - - # HRFormer use bilinear upsample as default - upsample_cfg = extra.get('upsample', { - 'mode': 'bilinear', - 'align_corners': False - }) - extra['upsample'] = upsample_cfg - self.transformer_norm_cfg = transformer_norm_cfg - self.with_rpe = extra.get('with_rpe', True) - self.with_pad_mask = extra.get('with_pad_mask', False) - - super().__init__(extra, in_channels, conv_cfg, norm_cfg, norm_eval, - with_cp, zero_init_residual, frozen_stages, init_cfg) - - def _make_stage(self, - layer_config, - num_inchannels, - multiscale_output=True): - """Make each stage.""" - num_modules = layer_config['num_modules'] - num_branches = layer_config['num_branches'] - num_blocks = layer_config['num_blocks'] - num_channels = layer_config['num_channels'] - block = self.blocks_dict[layer_config['block']] - num_heads = layer_config['num_heads'] - num_window_sizes = layer_config['window_sizes'] - num_mlp_ratios = layer_config['mlp_ratios'] - drop_path_rates = layer_config['drop_path_rates'] - - modules = [] - for i in range(num_modules): - # multiscale_output is only used at the last module - if not multiscale_output and i == num_modules - 1: - reset_multiscale_output = False - else: - reset_multiscale_output = True - - modules.append( - HRFomerModule( - num_branches, - block, - num_blocks, - num_inchannels, - num_channels, - num_heads, - num_window_sizes, - num_mlp_ratios, - reset_multiscale_output, - drop_paths=drop_path_rates[num_blocks[0] * - i:num_blocks[0] * (i + 1)], - with_rpe=self.with_rpe, - with_pad_mask=self.with_pad_mask, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - transformer_norm_cfg=self.transformer_norm_cfg, - with_cp=self.with_cp, - upsample_cfg=self.upsample_cfg)) - num_inchannels = modules[-1].get_num_inchannels() - - return nn.Sequential(*modules), num_inchannels +# Copyright (c) OpenMMLab. All rights reserved. + +import math + +import torch +import torch.nn as nn +from mmcv.cnn import build_activation_layer, build_conv_layer, build_norm_layer +from mmcv.cnn.bricks.transformer import build_dropout +from mmengine.model import BaseModule, trunc_normal_init +from torch.nn.functional import pad + +from mmpose.registry import MODELS +from .hrnet import Bottleneck, HRModule, HRNet + + +def nlc_to_nchw(x, hw_shape): + """Convert [N, L, C] shape tensor to [N, C, H, W] shape tensor. + + Args: + x (Tensor): The input tensor of shape [N, L, C] before conversion. + hw_shape (Sequence[int]): The height and width of output feature map. + + Returns: + Tensor: The output tensor of shape [N, C, H, W] after conversion. + """ + H, W = hw_shape + assert len(x.shape) == 3 + B, L, C = x.shape + assert L == H * W, 'The seq_len doesn\'t match H, W' + return x.transpose(1, 2).reshape(B, C, H, W) + + +def nchw_to_nlc(x): + """Flatten [N, C, H, W] shape tensor to [N, L, C] shape tensor. + + Args: + x (Tensor): The input tensor of shape [N, C, H, W] before conversion. + + Returns: + Tensor: The output tensor of shape [N, L, C] after conversion. + """ + assert len(x.shape) == 4 + return x.flatten(2).transpose(1, 2).contiguous() + + +def build_drop_path(drop_path_rate): + """Build drop path layer.""" + return build_dropout(dict(type='DropPath', drop_prob=drop_path_rate)) + + +class WindowMSA(BaseModule): + """Window based multi-head self-attention (W-MSA) module with relative + position bias. + + Args: + embed_dims (int): Number of input channels. + num_heads (int): Number of attention heads. + window_size (tuple[int]): The height and width of the window. + qkv_bias (bool, optional): If True, add a learnable bias to q, k, v. + Default: True. + qk_scale (float | None, optional): Override default qk scale of + head_dim ** -0.5 if set. Default: None. + attn_drop_rate (float, optional): Dropout ratio of attention weight. + Default: 0.0 + proj_drop_rate (float, optional): Dropout ratio of output. Default: 0. + with_rpe (bool, optional): If True, use relative position bias. + Default: True. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + """ + + def __init__(self, + embed_dims, + num_heads, + window_size, + qkv_bias=True, + qk_scale=None, + attn_drop_rate=0., + proj_drop_rate=0., + with_rpe=True, + init_cfg=None): + + super().__init__(init_cfg=init_cfg) + self.embed_dims = embed_dims + self.window_size = window_size # Wh, Ww + self.num_heads = num_heads + head_embed_dims = embed_dims // num_heads + self.scale = qk_scale or head_embed_dims**-0.5 + + self.with_rpe = with_rpe + if self.with_rpe: + # define a parameter table of relative position bias + self.relative_position_bias_table = nn.Parameter( + torch.zeros( + (2 * window_size[0] - 1) * (2 * window_size[1] - 1), + num_heads)) # 2*Wh-1 * 2*Ww-1, nH + + Wh, Ww = self.window_size + rel_index_coords = self.double_step_seq(2 * Ww - 1, Wh, 1, Ww) + rel_position_index = rel_index_coords + rel_index_coords.T + rel_position_index = rel_position_index.flip(1).contiguous() + self.register_buffer('relative_position_index', rel_position_index) + + self.qkv = nn.Linear(embed_dims, embed_dims * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop_rate) + self.proj = nn.Linear(embed_dims, embed_dims) + self.proj_drop = nn.Dropout(proj_drop_rate) + + self.softmax = nn.Softmax(dim=-1) + + def init_weights(self): + trunc_normal_init(self.relative_position_bias_table, std=0.02) + + def forward(self, x, mask=None): + """ + Args: + + x (tensor): input features with shape of (B*num_windows, N, C) + mask (tensor | None, Optional): mask with shape of (num_windows, + Wh*Ww, Wh*Ww), value should be between (-inf, 0]. + """ + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, + C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] + + q = q * self.scale + attn = (q @ k.transpose(-2, -1)) + + if self.with_rpe: + relative_position_bias = self.relative_position_bias_table[ + self.relative_position_index.view(-1)].view( + self.window_size[0] * self.window_size[1], + self.window_size[0] * self.window_size[1], + -1) # Wh*Ww,Wh*Ww,nH + relative_position_bias = relative_position_bias.permute( + 2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + attn = attn + relative_position_bias.unsqueeze(0) + + if mask is not None: + nW = mask.shape[0] + attn = attn.view(B // nW, nW, self.num_heads, N, + N) + mask.unsqueeze(1).unsqueeze(0) + attn = attn.view(-1, self.num_heads, N, N) + attn = self.softmax(attn) + + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + @staticmethod + def double_step_seq(step1, len1, step2, len2): + seq1 = torch.arange(0, step1 * len1, step1) + seq2 = torch.arange(0, step2 * len2, step2) + return (seq1[:, None] + seq2[None, :]).reshape(1, -1) + + +class LocalWindowSelfAttention(BaseModule): + r""" Local-window Self Attention (LSA) module with relative position bias. + + This module is the short-range self-attention module in the + Interlaced Sparse Self-Attention `_. + + Args: + embed_dims (int): Number of input channels. + num_heads (int): Number of attention heads. + window_size (tuple[int] | int): The height and width of the window. + qkv_bias (bool, optional): If True, add a learnable bias to q, k, v. + Default: True. + qk_scale (float | None, optional): Override default qk scale of + head_dim ** -0.5 if set. Default: None. + attn_drop_rate (float, optional): Dropout ratio of attention weight. + Default: 0.0 + proj_drop_rate (float, optional): Dropout ratio of output. Default: 0. + with_rpe (bool, optional): If True, use relative position bias. + Default: True. + with_pad_mask (bool, optional): If True, mask out the padded tokens in + the attention process. Default: False. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + """ + + def __init__(self, + embed_dims, + num_heads, + window_size, + qkv_bias=True, + qk_scale=None, + attn_drop_rate=0., + proj_drop_rate=0., + with_rpe=True, + with_pad_mask=False, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + if isinstance(window_size, int): + window_size = (window_size, window_size) + self.window_size = window_size + self.with_pad_mask = with_pad_mask + self.attn = WindowMSA( + embed_dims=embed_dims, + num_heads=num_heads, + window_size=window_size, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop_rate=attn_drop_rate, + proj_drop_rate=proj_drop_rate, + with_rpe=with_rpe, + init_cfg=init_cfg) + + def forward(self, x, H, W, **kwargs): + """Forward function.""" + B, N, C = x.shape + x = x.view(B, H, W, C) + Wh, Ww = self.window_size + + # center-pad the feature on H and W axes + pad_h = math.ceil(H / Wh) * Wh - H + pad_w = math.ceil(W / Ww) * Ww - W + x = pad(x, (0, 0, pad_w // 2, pad_w - pad_w // 2, pad_h // 2, + pad_h - pad_h // 2)) + + # permute + x = x.view(B, math.ceil(H / Wh), Wh, math.ceil(W / Ww), Ww, C) + x = x.permute(0, 1, 3, 2, 4, 5) + x = x.reshape(-1, Wh * Ww, C) # (B*num_window, Wh*Ww, C) + + # attention + if self.with_pad_mask and pad_h > 0 and pad_w > 0: + pad_mask = x.new_zeros(1, H, W, 1) + pad_mask = pad( + pad_mask, [ + 0, 0, pad_w // 2, pad_w - pad_w // 2, pad_h // 2, + pad_h - pad_h // 2 + ], + value=-float('inf')) + pad_mask = pad_mask.view(1, math.ceil(H / Wh), Wh, + math.ceil(W / Ww), Ww, 1) + pad_mask = pad_mask.permute(1, 3, 0, 2, 4, 5) + pad_mask = pad_mask.reshape(-1, Wh * Ww) + pad_mask = pad_mask[:, None, :].expand([-1, Wh * Ww, -1]) + out = self.attn(x, pad_mask, **kwargs) + else: + out = self.attn(x, **kwargs) + + # reverse permutation + out = out.reshape(B, math.ceil(H / Wh), math.ceil(W / Ww), Wh, Ww, C) + out = out.permute(0, 1, 3, 2, 4, 5) + out = out.reshape(B, H + pad_h, W + pad_w, C) + + # de-pad + out = out[:, pad_h // 2:H + pad_h // 2, pad_w // 2:W + pad_w // 2] + return out.reshape(B, N, C) + + +class CrossFFN(BaseModule): + r"""FFN with Depthwise Conv of HRFormer. + + Args: + in_features (int): The feature dimension. + hidden_features (int, optional): The hidden dimension of FFNs. + Defaults: The same as in_features. + act_cfg (dict, optional): Config of activation layer. + Default: dict(type='GELU'). + dw_act_cfg (dict, optional): Config of activation layer appended + right after DW Conv. Default: dict(type='GELU'). + norm_cfg (dict, optional): Config of norm layer. + Default: dict(type='SyncBN'). + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + """ + + def __init__(self, + in_features, + hidden_features=None, + out_features=None, + act_cfg=dict(type='GELU'), + dw_act_cfg=dict(type='GELU'), + norm_cfg=dict(type='SyncBN'), + init_cfg=None): + super().__init__(init_cfg=init_cfg) + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Conv2d(in_features, hidden_features, kernel_size=1) + self.act1 = build_activation_layer(act_cfg) + self.norm1 = build_norm_layer(norm_cfg, hidden_features)[1] + self.dw3x3 = nn.Conv2d( + hidden_features, + hidden_features, + kernel_size=3, + stride=1, + groups=hidden_features, + padding=1) + self.act2 = build_activation_layer(dw_act_cfg) + self.norm2 = build_norm_layer(norm_cfg, hidden_features)[1] + self.fc2 = nn.Conv2d(hidden_features, out_features, kernel_size=1) + self.act3 = build_activation_layer(act_cfg) + self.norm3 = build_norm_layer(norm_cfg, out_features)[1] + + def forward(self, x, H, W): + """Forward function.""" + x = nlc_to_nchw(x, (H, W)) + x = self.act1(self.norm1(self.fc1(x))) + x = self.act2(self.norm2(self.dw3x3(x))) + x = self.act3(self.norm3(self.fc2(x))) + x = nchw_to_nlc(x) + return x + + +class HRFormerBlock(BaseModule): + """High-Resolution Block for HRFormer. + + Args: + in_features (int): The input dimension. + out_features (int): The output dimension. + num_heads (int): The number of head within each LSA. + window_size (int, optional): The window size for the LSA. + Default: 7 + mlp_ratio (int, optional): The expansion ration of FFN. + Default: 4 + act_cfg (dict, optional): Config of activation layer. + Default: dict(type='GELU'). + norm_cfg (dict, optional): Config of norm layer. + Default: dict(type='SyncBN'). + transformer_norm_cfg (dict, optional): Config of transformer norm + layer. Default: dict(type='LN', eps=1e-6). + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + """ + + expansion = 1 + + def __init__(self, + in_features, + out_features, + num_heads, + window_size=7, + mlp_ratio=4.0, + drop_path=0.0, + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='SyncBN'), + transformer_norm_cfg=dict(type='LN', eps=1e-6), + init_cfg=None, + **kwargs): + super(HRFormerBlock, self).__init__(init_cfg=init_cfg) + self.num_heads = num_heads + self.window_size = window_size + self.mlp_ratio = mlp_ratio + + self.norm1 = build_norm_layer(transformer_norm_cfg, in_features)[1] + self.attn = LocalWindowSelfAttention( + in_features, + num_heads=num_heads, + window_size=window_size, + init_cfg=None, + **kwargs) + + self.norm2 = build_norm_layer(transformer_norm_cfg, out_features)[1] + self.ffn = CrossFFN( + in_features=in_features, + hidden_features=int(in_features * mlp_ratio), + out_features=out_features, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + dw_act_cfg=act_cfg, + init_cfg=None) + + self.drop_path = build_drop_path( + drop_path) if drop_path > 0.0 else nn.Identity() + + def forward(self, x): + """Forward function.""" + B, C, H, W = x.size() + # Attention + x = x.view(B, C, -1).permute(0, 2, 1) + x = x + self.drop_path(self.attn(self.norm1(x), H, W)) + # FFN + x = x + self.drop_path(self.ffn(self.norm2(x), H, W)) + x = x.permute(0, 2, 1).view(B, C, H, W) + return x + + def extra_repr(self): + """(Optional) Set the extra information about this module.""" + return 'num_heads={}, window_size={}, mlp_ratio={}'.format( + self.num_heads, self.window_size, self.mlp_ratio) + + +class HRFomerModule(HRModule): + """High-Resolution Module for HRFormer. + + Args: + num_branches (int): The number of branches in the HRFormerModule. + block (nn.Module): The building block of HRFormer. + The block should be the HRFormerBlock. + num_blocks (tuple): The number of blocks in each branch. + The length must be equal to num_branches. + num_inchannels (tuple): The number of input channels in each branch. + The length must be equal to num_branches. + num_channels (tuple): The number of channels in each branch. + The length must be equal to num_branches. + num_heads (tuple): The number of heads within the LSAs. + num_window_sizes (tuple): The window size for the LSAs. + num_mlp_ratios (tuple): The expansion ratio for the FFNs. + drop_path (int, optional): The drop path rate of HRFomer. + Default: 0.0 + multiscale_output (bool, optional): Whether to output multi-level + features produced by multiple branches. If False, only the first + level feature will be output. Default: True. + conv_cfg (dict, optional): Config of the conv layers. + Default: None. + norm_cfg (dict, optional): Config of the norm layers appended + right after conv. Default: dict(type='SyncBN', requires_grad=True) + transformer_norm_cfg (dict, optional): Config of the norm layers. + Default: dict(type='LN', eps=1e-6) + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False + upsample_cfg(dict, optional): The config of upsample layers in fuse + layers. Default: dict(mode='bilinear', align_corners=False) + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + """ + + def __init__(self, + num_branches, + block, + num_blocks, + num_inchannels, + num_channels, + num_heads, + num_window_sizes, + num_mlp_ratios, + multiscale_output=True, + drop_paths=0.0, + with_rpe=True, + with_pad_mask=False, + conv_cfg=None, + norm_cfg=dict(type='SyncBN', requires_grad=True), + transformer_norm_cfg=dict(type='LN', eps=1e-6), + with_cp=False, + upsample_cfg=dict(mode='bilinear', align_corners=False), + **kwargs): + + self.transformer_norm_cfg = transformer_norm_cfg + self.drop_paths = drop_paths + self.num_heads = num_heads + self.num_window_sizes = num_window_sizes + self.num_mlp_ratios = num_mlp_ratios + self.with_rpe = with_rpe + self.with_pad_mask = with_pad_mask + + super().__init__(num_branches, block, num_blocks, num_inchannels, + num_channels, multiscale_output, with_cp, conv_cfg, + norm_cfg, upsample_cfg, **kwargs) + + def _make_one_branch(self, + branch_index, + block, + num_blocks, + num_channels, + stride=1): + """Build one branch.""" + # HRFormerBlock does not support down sample layer yet. + assert stride == 1 and self.in_channels[branch_index] == num_channels[ + branch_index] + layers = [] + layers.append( + block( + self.in_channels[branch_index], + num_channels[branch_index], + num_heads=self.num_heads[branch_index], + window_size=self.num_window_sizes[branch_index], + mlp_ratio=self.num_mlp_ratios[branch_index], + drop_path=self.drop_paths[0], + norm_cfg=self.norm_cfg, + transformer_norm_cfg=self.transformer_norm_cfg, + init_cfg=None, + with_rpe=self.with_rpe, + with_pad_mask=self.with_pad_mask)) + + self.in_channels[ + branch_index] = self.in_channels[branch_index] * block.expansion + for i in range(1, num_blocks[branch_index]): + layers.append( + block( + self.in_channels[branch_index], + num_channels[branch_index], + num_heads=self.num_heads[branch_index], + window_size=self.num_window_sizes[branch_index], + mlp_ratio=self.num_mlp_ratios[branch_index], + drop_path=self.drop_paths[i], + norm_cfg=self.norm_cfg, + transformer_norm_cfg=self.transformer_norm_cfg, + init_cfg=None, + with_rpe=self.with_rpe, + with_pad_mask=self.with_pad_mask)) + return nn.Sequential(*layers) + + def _make_fuse_layers(self): + """Build fuse layers.""" + if self.num_branches == 1: + return None + num_branches = self.num_branches + num_inchannels = self.in_channels + fuse_layers = [] + for i in range(num_branches if self.multiscale_output else 1): + fuse_layer = [] + for j in range(num_branches): + if j > i: + fuse_layer.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + num_inchannels[j], + num_inchannels[i], + kernel_size=1, + stride=1, + bias=False), + build_norm_layer(self.norm_cfg, + num_inchannels[i])[1], + nn.Upsample( + scale_factor=2**(j - i), + mode=self.upsample_cfg['mode'], + align_corners=self. + upsample_cfg['align_corners']))) + elif j == i: + fuse_layer.append(None) + else: + conv3x3s = [] + for k in range(i - j): + if k == i - j - 1: + num_outchannels_conv3x3 = num_inchannels[i] + with_out_act = False + else: + num_outchannels_conv3x3 = num_inchannels[j] + with_out_act = True + sub_modules = [ + build_conv_layer( + self.conv_cfg, + num_inchannels[j], + num_inchannels[j], + kernel_size=3, + stride=2, + padding=1, + groups=num_inchannels[j], + bias=False, + ), + build_norm_layer(self.norm_cfg, + num_inchannels[j])[1], + build_conv_layer( + self.conv_cfg, + num_inchannels[j], + num_outchannels_conv3x3, + kernel_size=1, + stride=1, + bias=False, + ), + build_norm_layer(self.norm_cfg, + num_outchannels_conv3x3)[1] + ] + if with_out_act: + sub_modules.append(nn.ReLU(False)) + conv3x3s.append(nn.Sequential(*sub_modules)) + fuse_layer.append(nn.Sequential(*conv3x3s)) + fuse_layers.append(nn.ModuleList(fuse_layer)) + + return nn.ModuleList(fuse_layers) + + def get_num_inchannels(self): + """Return the number of input channels.""" + return self.in_channels + + +@MODELS.register_module() +class HRFormer(HRNet): + """HRFormer backbone. + + This backbone is the implementation of `HRFormer: High-Resolution + Transformer for Dense Prediction `_. + + Args: + extra (dict): Detailed configuration for each stage of HRNet. + There must be 4 stages, the configuration for each stage must have + 5 keys: + + - num_modules (int): The number of HRModule in this stage. + - num_branches (int): The number of branches in the HRModule. + - block (str): The type of block. + - num_blocks (tuple): The number of blocks in each branch. + The length must be equal to num_branches. + - num_channels (tuple): The number of channels in each branch. + The length must be equal to num_branches. + in_channels (int): Number of input image channels. Normally 3. + conv_cfg (dict): Dictionary to construct and config conv layer. + Default: None. + norm_cfg (dict): Config of norm layer. + Use `SyncBN` by default. + transformer_norm_cfg (dict): Config of transformer norm layer. + Use `LN` by default. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + zero_init_residual (bool): Whether to use zero init for last norm layer + in resblocks to let them behave as identity. Default: False. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Default: -1. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: + ``[ + dict(type='Normal', std=0.001, layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ]`` + + Example: + >>> from mmpose.models import HRFormer + >>> import torch + >>> extra = dict( + >>> stage1=dict( + >>> num_modules=1, + >>> num_branches=1, + >>> block='BOTTLENECK', + >>> num_blocks=(2, ), + >>> num_channels=(64, )), + >>> stage2=dict( + >>> num_modules=1, + >>> num_branches=2, + >>> block='HRFORMER', + >>> window_sizes=(7, 7), + >>> num_heads=(1, 2), + >>> mlp_ratios=(4, 4), + >>> num_blocks=(2, 2), + >>> num_channels=(32, 64)), + >>> stage3=dict( + >>> num_modules=4, + >>> num_branches=3, + >>> block='HRFORMER', + >>> window_sizes=(7, 7, 7), + >>> num_heads=(1, 2, 4), + >>> mlp_ratios=(4, 4, 4), + >>> num_blocks=(2, 2, 2), + >>> num_channels=(32, 64, 128)), + >>> stage4=dict( + >>> num_modules=2, + >>> num_branches=4, + >>> block='HRFORMER', + >>> window_sizes=(7, 7, 7, 7), + >>> num_heads=(1, 2, 4, 8), + >>> mlp_ratios=(4, 4, 4, 4), + >>> num_blocks=(2, 2, 2, 2), + >>> num_channels=(32, 64, 128, 256))) + >>> self = HRFormer(extra, in_channels=1) + >>> self.eval() + >>> inputs = torch.rand(1, 1, 32, 32) + >>> level_outputs = self.forward(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 32, 8, 8) + (1, 64, 4, 4) + (1, 128, 2, 2) + (1, 256, 1, 1) + """ + + blocks_dict = {'BOTTLENECK': Bottleneck, 'HRFORMERBLOCK': HRFormerBlock} + + def __init__( + self, + extra, + in_channels=3, + conv_cfg=None, + norm_cfg=dict(type='BN', requires_grad=True), + transformer_norm_cfg=dict(type='LN', eps=1e-6), + norm_eval=False, + with_cp=False, + zero_init_residual=False, + frozen_stages=-1, + init_cfg=[ + dict(type='Normal', std=0.001, layer=['Conv2d']), + dict(type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm']) + ], + ): + + # stochastic depth + depths = [ + extra[stage]['num_blocks'][0] * extra[stage]['num_modules'] + for stage in ['stage2', 'stage3', 'stage4'] + ] + depth_s2, depth_s3, _ = depths + drop_path_rate = extra['drop_path_rate'] + dpr = [ + x.item() for x in torch.linspace(0, drop_path_rate, sum(depths)) + ] + extra['stage2']['drop_path_rates'] = dpr[0:depth_s2] + extra['stage3']['drop_path_rates'] = dpr[depth_s2:depth_s2 + depth_s3] + extra['stage4']['drop_path_rates'] = dpr[depth_s2 + depth_s3:] + + # HRFormer use bilinear upsample as default + upsample_cfg = extra.get('upsample', { + 'mode': 'bilinear', + 'align_corners': False + }) + extra['upsample'] = upsample_cfg + self.transformer_norm_cfg = transformer_norm_cfg + self.with_rpe = extra.get('with_rpe', True) + self.with_pad_mask = extra.get('with_pad_mask', False) + + super().__init__(extra, in_channels, conv_cfg, norm_cfg, norm_eval, + with_cp, zero_init_residual, frozen_stages, init_cfg) + + def _make_stage(self, + layer_config, + num_inchannels, + multiscale_output=True): + """Make each stage.""" + num_modules = layer_config['num_modules'] + num_branches = layer_config['num_branches'] + num_blocks = layer_config['num_blocks'] + num_channels = layer_config['num_channels'] + block = self.blocks_dict[layer_config['block']] + num_heads = layer_config['num_heads'] + num_window_sizes = layer_config['window_sizes'] + num_mlp_ratios = layer_config['mlp_ratios'] + drop_path_rates = layer_config['drop_path_rates'] + + modules = [] + for i in range(num_modules): + # multiscale_output is only used at the last module + if not multiscale_output and i == num_modules - 1: + reset_multiscale_output = False + else: + reset_multiscale_output = True + + modules.append( + HRFomerModule( + num_branches, + block, + num_blocks, + num_inchannels, + num_channels, + num_heads, + num_window_sizes, + num_mlp_ratios, + reset_multiscale_output, + drop_paths=drop_path_rates[num_blocks[0] * + i:num_blocks[0] * (i + 1)], + with_rpe=self.with_rpe, + with_pad_mask=self.with_pad_mask, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + transformer_norm_cfg=self.transformer_norm_cfg, + with_cp=self.with_cp, + upsample_cfg=self.upsample_cfg)) + num_inchannels = modules[-1].get_num_inchannels() + + return nn.Sequential(*modules), num_inchannels diff --git a/mmpose/models/backbones/hrnet.py b/mmpose/models/backbones/hrnet.py index 381b22d60e..4b291b48fe 100644 --- a/mmpose/models/backbones/hrnet.py +++ b/mmpose/models/backbones/hrnet.py @@ -1,610 +1,610 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy - -import torch.nn as nn -from mmcv.cnn import build_conv_layer, build_norm_layer -from mmengine.model import BaseModule, constant_init -from torch.nn.modules.batchnorm import _BatchNorm - -from mmpose.registry import MODELS -from .base_backbone import BaseBackbone -from .resnet import BasicBlock, Bottleneck, get_expansion - - -class HRModule(BaseModule): - """High-Resolution Module for HRNet. - - In this module, every branch has 4 BasicBlocks/Bottlenecks. Fusion/Exchange - is in this module. - """ - - def __init__(self, - num_branches, - blocks, - num_blocks, - in_channels, - num_channels, - multiscale_output=False, - with_cp=False, - conv_cfg=None, - norm_cfg=dict(type='BN'), - upsample_cfg=dict(mode='nearest', align_corners=None), - init_cfg=None): - - # Protect mutable default arguments - norm_cfg = copy.deepcopy(norm_cfg) - super().__init__(init_cfg=init_cfg) - self._check_branches(num_branches, num_blocks, in_channels, - num_channels) - - self.in_channels = in_channels - self.num_branches = num_branches - - self.multiscale_output = multiscale_output - self.norm_cfg = norm_cfg - self.conv_cfg = conv_cfg - self.upsample_cfg = upsample_cfg - self.with_cp = with_cp - self.branches = self._make_branches(num_branches, blocks, num_blocks, - num_channels) - self.fuse_layers = self._make_fuse_layers() - self.relu = nn.ReLU(inplace=True) - - @staticmethod - def _check_branches(num_branches, num_blocks, in_channels, num_channels): - """Check input to avoid ValueError.""" - if num_branches != len(num_blocks): - error_msg = f'NUM_BRANCHES({num_branches}) ' \ - f'!= NUM_BLOCKS({len(num_blocks)})' - raise ValueError(error_msg) - - if num_branches != len(num_channels): - error_msg = f'NUM_BRANCHES({num_branches}) ' \ - f'!= NUM_CHANNELS({len(num_channels)})' - raise ValueError(error_msg) - - if num_branches != len(in_channels): - error_msg = f'NUM_BRANCHES({num_branches}) ' \ - f'!= NUM_INCHANNELS({len(in_channels)})' - raise ValueError(error_msg) - - def _make_one_branch(self, - branch_index, - block, - num_blocks, - num_channels, - stride=1): - """Make one branch.""" - downsample = None - if stride != 1 or \ - self.in_channels[branch_index] != \ - num_channels[branch_index] * get_expansion(block): - downsample = nn.Sequential( - build_conv_layer( - self.conv_cfg, - self.in_channels[branch_index], - num_channels[branch_index] * get_expansion(block), - kernel_size=1, - stride=stride, - bias=False), - build_norm_layer( - self.norm_cfg, - num_channels[branch_index] * get_expansion(block))[1]) - - layers = [] - layers.append( - block( - self.in_channels[branch_index], - num_channels[branch_index] * get_expansion(block), - stride=stride, - downsample=downsample, - with_cp=self.with_cp, - norm_cfg=self.norm_cfg, - conv_cfg=self.conv_cfg)) - self.in_channels[branch_index] = \ - num_channels[branch_index] * get_expansion(block) - for _ in range(1, num_blocks[branch_index]): - layers.append( - block( - self.in_channels[branch_index], - num_channels[branch_index] * get_expansion(block), - with_cp=self.with_cp, - norm_cfg=self.norm_cfg, - conv_cfg=self.conv_cfg)) - - return nn.Sequential(*layers) - - def _make_branches(self, num_branches, block, num_blocks, num_channels): - """Make branches.""" - branches = [] - - for i in range(num_branches): - branches.append( - self._make_one_branch(i, block, num_blocks, num_channels)) - - return nn.ModuleList(branches) - - def _make_fuse_layers(self): - """Make fuse layer.""" - if self.num_branches == 1: - return None - - num_branches = self.num_branches - in_channels = self.in_channels - fuse_layers = [] - num_out_branches = num_branches if self.multiscale_output else 1 - - for i in range(num_out_branches): - fuse_layer = [] - for j in range(num_branches): - if j > i: - fuse_layer.append( - nn.Sequential( - build_conv_layer( - self.conv_cfg, - in_channels[j], - in_channels[i], - kernel_size=1, - stride=1, - padding=0, - bias=False), - build_norm_layer(self.norm_cfg, in_channels[i])[1], - nn.Upsample( - scale_factor=2**(j - i), - mode=self.upsample_cfg['mode'], - align_corners=self. - upsample_cfg['align_corners']))) - elif j == i: - fuse_layer.append(None) - else: - conv_downsamples = [] - for k in range(i - j): - if k == i - j - 1: - conv_downsamples.append( - nn.Sequential( - build_conv_layer( - self.conv_cfg, - in_channels[j], - in_channels[i], - kernel_size=3, - stride=2, - padding=1, - bias=False), - build_norm_layer(self.norm_cfg, - in_channels[i])[1])) - else: - conv_downsamples.append( - nn.Sequential( - build_conv_layer( - self.conv_cfg, - in_channels[j], - in_channels[j], - kernel_size=3, - stride=2, - padding=1, - bias=False), - build_norm_layer(self.norm_cfg, - in_channels[j])[1], - nn.ReLU(inplace=True))) - fuse_layer.append(nn.Sequential(*conv_downsamples)) - fuse_layers.append(nn.ModuleList(fuse_layer)) - - return nn.ModuleList(fuse_layers) - - def forward(self, x): - """Forward function.""" - if self.num_branches == 1: - return [self.branches[0](x[0])] - - for i in range(self.num_branches): - x[i] = self.branches[i](x[i]) - - x_fuse = [] - for i in range(len(self.fuse_layers)): - y = 0 - for j in range(self.num_branches): - if i == j: - y += x[j] - else: - y += self.fuse_layers[i][j](x[j]) - x_fuse.append(self.relu(y)) - return x_fuse - - -@MODELS.register_module() -class HRNet(BaseBackbone): - """HRNet backbone. - - `High-Resolution Representations for Labeling Pixels and Regions - `__ - - Args: - extra (dict): detailed configuration for each stage of HRNet. - in_channels (int): Number of input image channels. Default: 3. - conv_cfg (dict): dictionary to construct and config conv layer. - norm_cfg (dict): dictionary to construct and config norm layer. - norm_eval (bool): Whether to set norm layers to eval mode, namely, - freeze running stats (mean and var). Note: Effect on Batch Norm - and its variants only. Default: False - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. - zero_init_residual (bool): whether to use zero init for last norm layer - in resblocks to let them behave as identity. - frozen_stages (int): Stages to be frozen (stop grad and set eval mode). - -1 means not freezing any parameters. Default: -1. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: - ``[ - dict(type='Normal', std=0.001, layer=['Conv2d']), - dict( - type='Constant', - val=1, - layer=['_BatchNorm', 'GroupNorm']) - ]`` - - Example: - >>> from mmpose.models import HRNet - >>> import torch - >>> extra = dict( - >>> stage1=dict( - >>> num_modules=1, - >>> num_branches=1, - >>> block='BOTTLENECK', - >>> num_blocks=(4, ), - >>> num_channels=(64, )), - >>> stage2=dict( - >>> num_modules=1, - >>> num_branches=2, - >>> block='BASIC', - >>> num_blocks=(4, 4), - >>> num_channels=(32, 64)), - >>> stage3=dict( - >>> num_modules=4, - >>> num_branches=3, - >>> block='BASIC', - >>> num_blocks=(4, 4, 4), - >>> num_channels=(32, 64, 128)), - >>> stage4=dict( - >>> num_modules=3, - >>> num_branches=4, - >>> block='BASIC', - >>> num_blocks=(4, 4, 4, 4), - >>> num_channels=(32, 64, 128, 256))) - >>> self = HRNet(extra, in_channels=1) - >>> self.eval() - >>> inputs = torch.rand(1, 1, 32, 32) - >>> level_outputs = self.forward(inputs) - >>> for level_out in level_outputs: - ... print(tuple(level_out.shape)) - (1, 32, 8, 8) - """ - - blocks_dict = {'BASIC': BasicBlock, 'BOTTLENECK': Bottleneck} - - def __init__( - self, - extra, - in_channels=3, - conv_cfg=None, - norm_cfg=dict(type='BN'), - norm_eval=False, - with_cp=False, - zero_init_residual=False, - frozen_stages=-1, - init_cfg=[ - dict(type='Normal', std=0.001, layer=['Conv2d']), - dict(type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm']) - ], - ): - # Protect mutable default arguments - norm_cfg = copy.deepcopy(norm_cfg) - super().__init__(init_cfg=init_cfg) - self.extra = extra - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.init_cfg = init_cfg - self.norm_eval = norm_eval - self.with_cp = with_cp - self.zero_init_residual = zero_init_residual - self.frozen_stages = frozen_stages - - # stem net - self.norm1_name, norm1 = build_norm_layer(self.norm_cfg, 64, postfix=1) - self.norm2_name, norm2 = build_norm_layer(self.norm_cfg, 64, postfix=2) - - self.conv1 = build_conv_layer( - self.conv_cfg, - in_channels, - 64, - kernel_size=3, - stride=2, - padding=1, - bias=False) - - self.add_module(self.norm1_name, norm1) - self.conv2 = build_conv_layer( - self.conv_cfg, - 64, - 64, - kernel_size=3, - stride=2, - padding=1, - bias=False) - - self.add_module(self.norm2_name, norm2) - self.relu = nn.ReLU(inplace=True) - - self.upsample_cfg = self.extra.get('upsample', { - 'mode': 'nearest', - 'align_corners': None - }) - - # stage 1 - self.stage1_cfg = self.extra['stage1'] - num_channels = self.stage1_cfg['num_channels'][0] - block_type = self.stage1_cfg['block'] - num_blocks = self.stage1_cfg['num_blocks'][0] - - block = self.blocks_dict[block_type] - stage1_out_channels = num_channels * get_expansion(block) - self.layer1 = self._make_layer(block, 64, stage1_out_channels, - num_blocks) - - # stage 2 - self.stage2_cfg = self.extra['stage2'] - num_channels = self.stage2_cfg['num_channels'] - block_type = self.stage2_cfg['block'] - - block = self.blocks_dict[block_type] - num_channels = [ - channel * get_expansion(block) for channel in num_channels - ] - self.transition1 = self._make_transition_layer([stage1_out_channels], - num_channels) - self.stage2, pre_stage_channels = self._make_stage( - self.stage2_cfg, num_channels) - - # stage 3 - self.stage3_cfg = self.extra['stage3'] - num_channels = self.stage3_cfg['num_channels'] - block_type = self.stage3_cfg['block'] - - block = self.blocks_dict[block_type] - num_channels = [ - channel * get_expansion(block) for channel in num_channels - ] - self.transition2 = self._make_transition_layer(pre_stage_channels, - num_channels) - self.stage3, pre_stage_channels = self._make_stage( - self.stage3_cfg, num_channels) - - # stage 4 - self.stage4_cfg = self.extra['stage4'] - num_channels = self.stage4_cfg['num_channels'] - block_type = self.stage4_cfg['block'] - - block = self.blocks_dict[block_type] - num_channels = [ - channel * get_expansion(block) for channel in num_channels - ] - self.transition3 = self._make_transition_layer(pre_stage_channels, - num_channels) - - self.stage4, pre_stage_channels = self._make_stage( - self.stage4_cfg, - num_channels, - multiscale_output=self.stage4_cfg.get('multiscale_output', False)) - - self._freeze_stages() - - @property - def norm1(self): - """nn.Module: the normalization layer named "norm1" """ - return getattr(self, self.norm1_name) - - @property - def norm2(self): - """nn.Module: the normalization layer named "norm2" """ - return getattr(self, self.norm2_name) - - def _make_transition_layer(self, num_channels_pre_layer, - num_channels_cur_layer): - """Make transition layer.""" - num_branches_cur = len(num_channels_cur_layer) - num_branches_pre = len(num_channels_pre_layer) - - transition_layers = [] - for i in range(num_branches_cur): - if i < num_branches_pre: - if num_channels_cur_layer[i] != num_channels_pre_layer[i]: - transition_layers.append( - nn.Sequential( - build_conv_layer( - self.conv_cfg, - num_channels_pre_layer[i], - num_channels_cur_layer[i], - kernel_size=3, - stride=1, - padding=1, - bias=False), - build_norm_layer(self.norm_cfg, - num_channels_cur_layer[i])[1], - nn.ReLU(inplace=True))) - else: - transition_layers.append(None) - else: - conv_downsamples = [] - for j in range(i + 1 - num_branches_pre): - in_channels = num_channels_pre_layer[-1] - out_channels = num_channels_cur_layer[i] \ - if j == i - num_branches_pre else in_channels - conv_downsamples.append( - nn.Sequential( - build_conv_layer( - self.conv_cfg, - in_channels, - out_channels, - kernel_size=3, - stride=2, - padding=1, - bias=False), - build_norm_layer(self.norm_cfg, out_channels)[1], - nn.ReLU(inplace=True))) - transition_layers.append(nn.Sequential(*conv_downsamples)) - - return nn.ModuleList(transition_layers) - - def _make_layer(self, block, in_channels, out_channels, blocks, stride=1): - """Make layer.""" - downsample = None - if stride != 1 or in_channels != out_channels: - downsample = nn.Sequential( - build_conv_layer( - self.conv_cfg, - in_channels, - out_channels, - kernel_size=1, - stride=stride, - bias=False), - build_norm_layer(self.norm_cfg, out_channels)[1]) - - layers = [] - layers.append( - block( - in_channels, - out_channels, - stride=stride, - downsample=downsample, - with_cp=self.with_cp, - norm_cfg=self.norm_cfg, - conv_cfg=self.conv_cfg)) - for _ in range(1, blocks): - layers.append( - block( - out_channels, - out_channels, - with_cp=self.with_cp, - norm_cfg=self.norm_cfg, - conv_cfg=self.conv_cfg)) - - return nn.Sequential(*layers) - - def _make_stage(self, layer_config, in_channels, multiscale_output=True): - """Make stage.""" - num_modules = layer_config['num_modules'] - num_branches = layer_config['num_branches'] - num_blocks = layer_config['num_blocks'] - num_channels = layer_config['num_channels'] - block = self.blocks_dict[layer_config['block']] - - hr_modules = [] - for i in range(num_modules): - # multi_scale_output is only used for the last module - if not multiscale_output and i == num_modules - 1: - reset_multiscale_output = False - else: - reset_multiscale_output = True - - hr_modules.append( - HRModule( - num_branches, - block, - num_blocks, - in_channels, - num_channels, - reset_multiscale_output, - with_cp=self.with_cp, - norm_cfg=self.norm_cfg, - conv_cfg=self.conv_cfg, - upsample_cfg=self.upsample_cfg)) - - in_channels = hr_modules[-1].in_channels - - return nn.Sequential(*hr_modules), in_channels - - def _freeze_stages(self): - """Freeze parameters.""" - if self.frozen_stages >= 0: - self.norm1.eval() - self.norm2.eval() - - for m in [self.conv1, self.norm1, self.conv2, self.norm2]: - for param in m.parameters(): - param.requires_grad = False - - for i in range(1, self.frozen_stages + 1): - if i == 1: - m = getattr(self, 'layer1') - else: - m = getattr(self, f'stage{i}') - - m.eval() - for param in m.parameters(): - param.requires_grad = False - - if i < 4: - m = getattr(self, f'transition{i}') - m.eval() - for param in m.parameters(): - param.requires_grad = False - - def init_weights(self): - """Initialize the weights in backbone.""" - super(HRNet, self).init_weights() - - if (isinstance(self.init_cfg, dict) - and self.init_cfg['type'] == 'Pretrained'): - # Suppress zero_init_residual if use pretrained model. - return - - if self.zero_init_residual: - for m in self.modules(): - if isinstance(m, Bottleneck): - constant_init(m.norm3, 0) - elif isinstance(m, BasicBlock): - constant_init(m.norm2, 0) - - def forward(self, x): - """Forward function.""" - x = self.conv1(x) - x = self.norm1(x) - x = self.relu(x) - x = self.conv2(x) - x = self.norm2(x) - x = self.relu(x) - x = self.layer1(x) - - x_list = [] - for i in range(self.stage2_cfg['num_branches']): - if self.transition1[i] is not None: - x_list.append(self.transition1[i](x)) - else: - x_list.append(x) - y_list = self.stage2(x_list) - - x_list = [] - for i in range(self.stage3_cfg['num_branches']): - if self.transition2[i] is not None: - x_list.append(self.transition2[i](y_list[-1])) - else: - x_list.append(y_list[i]) - y_list = self.stage3(x_list) - - x_list = [] - for i in range(self.stage4_cfg['num_branches']): - if self.transition3[i] is not None: - x_list.append(self.transition3[i](y_list[-1])) - else: - x_list.append(y_list[i]) - y_list = self.stage4(x_list) - - return tuple(y_list) - - def train(self, mode=True): - """Convert the model into training mode.""" - super().train(mode) - self._freeze_stages() - if mode and self.norm_eval: - for m in self.modules(): - if isinstance(m, _BatchNorm): - m.eval() +# Copyright (c) OpenMMLab. All rights reserved. +import copy + +import torch.nn as nn +from mmcv.cnn import build_conv_layer, build_norm_layer +from mmengine.model import BaseModule, constant_init +from torch.nn.modules.batchnorm import _BatchNorm + +from mmpose.registry import MODELS +from .base_backbone import BaseBackbone +from .resnet import BasicBlock, Bottleneck, get_expansion + + +class HRModule(BaseModule): + """High-Resolution Module for HRNet. + + In this module, every branch has 4 BasicBlocks/Bottlenecks. Fusion/Exchange + is in this module. + """ + + def __init__(self, + num_branches, + blocks, + num_blocks, + in_channels, + num_channels, + multiscale_output=False, + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + upsample_cfg=dict(mode='nearest', align_corners=None), + init_cfg=None): + + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + super().__init__(init_cfg=init_cfg) + self._check_branches(num_branches, num_blocks, in_channels, + num_channels) + + self.in_channels = in_channels + self.num_branches = num_branches + + self.multiscale_output = multiscale_output + self.norm_cfg = norm_cfg + self.conv_cfg = conv_cfg + self.upsample_cfg = upsample_cfg + self.with_cp = with_cp + self.branches = self._make_branches(num_branches, blocks, num_blocks, + num_channels) + self.fuse_layers = self._make_fuse_layers() + self.relu = nn.ReLU(inplace=True) + + @staticmethod + def _check_branches(num_branches, num_blocks, in_channels, num_channels): + """Check input to avoid ValueError.""" + if num_branches != len(num_blocks): + error_msg = f'NUM_BRANCHES({num_branches}) ' \ + f'!= NUM_BLOCKS({len(num_blocks)})' + raise ValueError(error_msg) + + if num_branches != len(num_channels): + error_msg = f'NUM_BRANCHES({num_branches}) ' \ + f'!= NUM_CHANNELS({len(num_channels)})' + raise ValueError(error_msg) + + if num_branches != len(in_channels): + error_msg = f'NUM_BRANCHES({num_branches}) ' \ + f'!= NUM_INCHANNELS({len(in_channels)})' + raise ValueError(error_msg) + + def _make_one_branch(self, + branch_index, + block, + num_blocks, + num_channels, + stride=1): + """Make one branch.""" + downsample = None + if stride != 1 or \ + self.in_channels[branch_index] != \ + num_channels[branch_index] * get_expansion(block): + downsample = nn.Sequential( + build_conv_layer( + self.conv_cfg, + self.in_channels[branch_index], + num_channels[branch_index] * get_expansion(block), + kernel_size=1, + stride=stride, + bias=False), + build_norm_layer( + self.norm_cfg, + num_channels[branch_index] * get_expansion(block))[1]) + + layers = [] + layers.append( + block( + self.in_channels[branch_index], + num_channels[branch_index] * get_expansion(block), + stride=stride, + downsample=downsample, + with_cp=self.with_cp, + norm_cfg=self.norm_cfg, + conv_cfg=self.conv_cfg)) + self.in_channels[branch_index] = \ + num_channels[branch_index] * get_expansion(block) + for _ in range(1, num_blocks[branch_index]): + layers.append( + block( + self.in_channels[branch_index], + num_channels[branch_index] * get_expansion(block), + with_cp=self.with_cp, + norm_cfg=self.norm_cfg, + conv_cfg=self.conv_cfg)) + + return nn.Sequential(*layers) + + def _make_branches(self, num_branches, block, num_blocks, num_channels): + """Make branches.""" + branches = [] + + for i in range(num_branches): + branches.append( + self._make_one_branch(i, block, num_blocks, num_channels)) + + return nn.ModuleList(branches) + + def _make_fuse_layers(self): + """Make fuse layer.""" + if self.num_branches == 1: + return None + + num_branches = self.num_branches + in_channels = self.in_channels + fuse_layers = [] + num_out_branches = num_branches if self.multiscale_output else 1 + + for i in range(num_out_branches): + fuse_layer = [] + for j in range(num_branches): + if j > i: + fuse_layer.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels[j], + in_channels[i], + kernel_size=1, + stride=1, + padding=0, + bias=False), + build_norm_layer(self.norm_cfg, in_channels[i])[1], + nn.Upsample( + scale_factor=2**(j - i), + mode=self.upsample_cfg['mode'], + align_corners=self. + upsample_cfg['align_corners']))) + elif j == i: + fuse_layer.append(None) + else: + conv_downsamples = [] + for k in range(i - j): + if k == i - j - 1: + conv_downsamples.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels[j], + in_channels[i], + kernel_size=3, + stride=2, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, + in_channels[i])[1])) + else: + conv_downsamples.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels[j], + in_channels[j], + kernel_size=3, + stride=2, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, + in_channels[j])[1], + nn.ReLU(inplace=True))) + fuse_layer.append(nn.Sequential(*conv_downsamples)) + fuse_layers.append(nn.ModuleList(fuse_layer)) + + return nn.ModuleList(fuse_layers) + + def forward(self, x): + """Forward function.""" + if self.num_branches == 1: + return [self.branches[0](x[0])] + + for i in range(self.num_branches): + x[i] = self.branches[i](x[i]) + + x_fuse = [] + for i in range(len(self.fuse_layers)): + y = 0 + for j in range(self.num_branches): + if i == j: + y += x[j] + else: + y += self.fuse_layers[i][j](x[j]) + x_fuse.append(self.relu(y)) + return x_fuse + + +@MODELS.register_module() +class HRNet(BaseBackbone): + """HRNet backbone. + + `High-Resolution Representations for Labeling Pixels and Regions + `__ + + Args: + extra (dict): detailed configuration for each stage of HRNet. + in_channels (int): Number of input image channels. Default: 3. + conv_cfg (dict): dictionary to construct and config conv layer. + norm_cfg (dict): dictionary to construct and config norm layer. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + zero_init_residual (bool): whether to use zero init for last norm layer + in resblocks to let them behave as identity. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Default: -1. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: + ``[ + dict(type='Normal', std=0.001, layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ]`` + + Example: + >>> from mmpose.models import HRNet + >>> import torch + >>> extra = dict( + >>> stage1=dict( + >>> num_modules=1, + >>> num_branches=1, + >>> block='BOTTLENECK', + >>> num_blocks=(4, ), + >>> num_channels=(64, )), + >>> stage2=dict( + >>> num_modules=1, + >>> num_branches=2, + >>> block='BASIC', + >>> num_blocks=(4, 4), + >>> num_channels=(32, 64)), + >>> stage3=dict( + >>> num_modules=4, + >>> num_branches=3, + >>> block='BASIC', + >>> num_blocks=(4, 4, 4), + >>> num_channels=(32, 64, 128)), + >>> stage4=dict( + >>> num_modules=3, + >>> num_branches=4, + >>> block='BASIC', + >>> num_blocks=(4, 4, 4, 4), + >>> num_channels=(32, 64, 128, 256))) + >>> self = HRNet(extra, in_channels=1) + >>> self.eval() + >>> inputs = torch.rand(1, 1, 32, 32) + >>> level_outputs = self.forward(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 32, 8, 8) + """ + + blocks_dict = {'BASIC': BasicBlock, 'BOTTLENECK': Bottleneck} + + def __init__( + self, + extra, + in_channels=3, + conv_cfg=None, + norm_cfg=dict(type='BN'), + norm_eval=False, + with_cp=False, + zero_init_residual=False, + frozen_stages=-1, + init_cfg=[ + dict(type='Normal', std=0.001, layer=['Conv2d']), + dict(type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm']) + ], + ): + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + super().__init__(init_cfg=init_cfg) + self.extra = extra + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.init_cfg = init_cfg + self.norm_eval = norm_eval + self.with_cp = with_cp + self.zero_init_residual = zero_init_residual + self.frozen_stages = frozen_stages + + # stem net + self.norm1_name, norm1 = build_norm_layer(self.norm_cfg, 64, postfix=1) + self.norm2_name, norm2 = build_norm_layer(self.norm_cfg, 64, postfix=2) + + self.conv1 = build_conv_layer( + self.conv_cfg, + in_channels, + 64, + kernel_size=3, + stride=2, + padding=1, + bias=False) + + self.add_module(self.norm1_name, norm1) + self.conv2 = build_conv_layer( + self.conv_cfg, + 64, + 64, + kernel_size=3, + stride=2, + padding=1, + bias=False) + + self.add_module(self.norm2_name, norm2) + self.relu = nn.ReLU(inplace=True) + + self.upsample_cfg = self.extra.get('upsample', { + 'mode': 'nearest', + 'align_corners': None + }) + + # stage 1 + self.stage1_cfg = self.extra['stage1'] + num_channels = self.stage1_cfg['num_channels'][0] + block_type = self.stage1_cfg['block'] + num_blocks = self.stage1_cfg['num_blocks'][0] + + block = self.blocks_dict[block_type] + stage1_out_channels = num_channels * get_expansion(block) + self.layer1 = self._make_layer(block, 64, stage1_out_channels, + num_blocks) + + # stage 2 + self.stage2_cfg = self.extra['stage2'] + num_channels = self.stage2_cfg['num_channels'] + block_type = self.stage2_cfg['block'] + + block = self.blocks_dict[block_type] + num_channels = [ + channel * get_expansion(block) for channel in num_channels + ] + self.transition1 = self._make_transition_layer([stage1_out_channels], + num_channels) + self.stage2, pre_stage_channels = self._make_stage( + self.stage2_cfg, num_channels) + + # stage 3 + self.stage3_cfg = self.extra['stage3'] + num_channels = self.stage3_cfg['num_channels'] + block_type = self.stage3_cfg['block'] + + block = self.blocks_dict[block_type] + num_channels = [ + channel * get_expansion(block) for channel in num_channels + ] + self.transition2 = self._make_transition_layer(pre_stage_channels, + num_channels) + self.stage3, pre_stage_channels = self._make_stage( + self.stage3_cfg, num_channels) + + # stage 4 + self.stage4_cfg = self.extra['stage4'] + num_channels = self.stage4_cfg['num_channels'] + block_type = self.stage4_cfg['block'] + + block = self.blocks_dict[block_type] + num_channels = [ + channel * get_expansion(block) for channel in num_channels + ] + self.transition3 = self._make_transition_layer(pre_stage_channels, + num_channels) + + self.stage4, pre_stage_channels = self._make_stage( + self.stage4_cfg, + num_channels, + multiscale_output=self.stage4_cfg.get('multiscale_output', False)) + + self._freeze_stages() + + @property + def norm1(self): + """nn.Module: the normalization layer named "norm1" """ + return getattr(self, self.norm1_name) + + @property + def norm2(self): + """nn.Module: the normalization layer named "norm2" """ + return getattr(self, self.norm2_name) + + def _make_transition_layer(self, num_channels_pre_layer, + num_channels_cur_layer): + """Make transition layer.""" + num_branches_cur = len(num_channels_cur_layer) + num_branches_pre = len(num_channels_pre_layer) + + transition_layers = [] + for i in range(num_branches_cur): + if i < num_branches_pre: + if num_channels_cur_layer[i] != num_channels_pre_layer[i]: + transition_layers.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + num_channels_pre_layer[i], + num_channels_cur_layer[i], + kernel_size=3, + stride=1, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, + num_channels_cur_layer[i])[1], + nn.ReLU(inplace=True))) + else: + transition_layers.append(None) + else: + conv_downsamples = [] + for j in range(i + 1 - num_branches_pre): + in_channels = num_channels_pre_layer[-1] + out_channels = num_channels_cur_layer[i] \ + if j == i - num_branches_pre else in_channels + conv_downsamples.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels, + out_channels, + kernel_size=3, + stride=2, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, out_channels)[1], + nn.ReLU(inplace=True))) + transition_layers.append(nn.Sequential(*conv_downsamples)) + + return nn.ModuleList(transition_layers) + + def _make_layer(self, block, in_channels, out_channels, blocks, stride=1): + """Make layer.""" + downsample = None + if stride != 1 or in_channels != out_channels: + downsample = nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels, + out_channels, + kernel_size=1, + stride=stride, + bias=False), + build_norm_layer(self.norm_cfg, out_channels)[1]) + + layers = [] + layers.append( + block( + in_channels, + out_channels, + stride=stride, + downsample=downsample, + with_cp=self.with_cp, + norm_cfg=self.norm_cfg, + conv_cfg=self.conv_cfg)) + for _ in range(1, blocks): + layers.append( + block( + out_channels, + out_channels, + with_cp=self.with_cp, + norm_cfg=self.norm_cfg, + conv_cfg=self.conv_cfg)) + + return nn.Sequential(*layers) + + def _make_stage(self, layer_config, in_channels, multiscale_output=True): + """Make stage.""" + num_modules = layer_config['num_modules'] + num_branches = layer_config['num_branches'] + num_blocks = layer_config['num_blocks'] + num_channels = layer_config['num_channels'] + block = self.blocks_dict[layer_config['block']] + + hr_modules = [] + for i in range(num_modules): + # multi_scale_output is only used for the last module + if not multiscale_output and i == num_modules - 1: + reset_multiscale_output = False + else: + reset_multiscale_output = True + + hr_modules.append( + HRModule( + num_branches, + block, + num_blocks, + in_channels, + num_channels, + reset_multiscale_output, + with_cp=self.with_cp, + norm_cfg=self.norm_cfg, + conv_cfg=self.conv_cfg, + upsample_cfg=self.upsample_cfg)) + + in_channels = hr_modules[-1].in_channels + + return nn.Sequential(*hr_modules), in_channels + + def _freeze_stages(self): + """Freeze parameters.""" + if self.frozen_stages >= 0: + self.norm1.eval() + self.norm2.eval() + + for m in [self.conv1, self.norm1, self.conv2, self.norm2]: + for param in m.parameters(): + param.requires_grad = False + + for i in range(1, self.frozen_stages + 1): + if i == 1: + m = getattr(self, 'layer1') + else: + m = getattr(self, f'stage{i}') + + m.eval() + for param in m.parameters(): + param.requires_grad = False + + if i < 4: + m = getattr(self, f'transition{i}') + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def init_weights(self): + """Initialize the weights in backbone.""" + super(HRNet, self).init_weights() + + if (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + # Suppress zero_init_residual if use pretrained model. + return + + if self.zero_init_residual: + for m in self.modules(): + if isinstance(m, Bottleneck): + constant_init(m.norm3, 0) + elif isinstance(m, BasicBlock): + constant_init(m.norm2, 0) + + def forward(self, x): + """Forward function.""" + x = self.conv1(x) + x = self.norm1(x) + x = self.relu(x) + x = self.conv2(x) + x = self.norm2(x) + x = self.relu(x) + x = self.layer1(x) + + x_list = [] + for i in range(self.stage2_cfg['num_branches']): + if self.transition1[i] is not None: + x_list.append(self.transition1[i](x)) + else: + x_list.append(x) + y_list = self.stage2(x_list) + + x_list = [] + for i in range(self.stage3_cfg['num_branches']): + if self.transition2[i] is not None: + x_list.append(self.transition2[i](y_list[-1])) + else: + x_list.append(y_list[i]) + y_list = self.stage3(x_list) + + x_list = [] + for i in range(self.stage4_cfg['num_branches']): + if self.transition3[i] is not None: + x_list.append(self.transition3[i](y_list[-1])) + else: + x_list.append(y_list[i]) + y_list = self.stage4(x_list) + + return tuple(y_list) + + def train(self, mode=True): + """Convert the model into training mode.""" + super().train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, _BatchNorm): + m.eval() diff --git a/mmpose/models/backbones/litehrnet.py b/mmpose/models/backbones/litehrnet.py index 1ad5f63014..753523e068 100644 --- a/mmpose/models/backbones/litehrnet.py +++ b/mmpose/models/backbones/litehrnet.py @@ -1,999 +1,999 @@ -# ------------------------------------------------------------------------------ -# Adapted from https://github.com/HRNet/Lite-HRNet -# Original licence: Apache License 2.0. -# ------------------------------------------------------------------------------ - -import mmengine -import torch -import torch.nn as nn -import torch.nn.functional as F -import torch.utils.checkpoint as cp -from mmcv.cnn import (ConvModule, DepthwiseSeparableConvModule, - build_conv_layer, build_norm_layer) -from mmengine.model import BaseModule -from torch.nn.modules.batchnorm import _BatchNorm - -from mmpose.registry import MODELS -from .base_backbone import BaseBackbone -from .utils import channel_shuffle - - -class SpatialWeighting(BaseModule): - """Spatial weighting module. - - Args: - channels (int): The channels of the module. - ratio (int): channel reduction ratio. - conv_cfg (dict): Config dict for convolution layer. - Default: None, which means using conv2d. - norm_cfg (dict): Config dict for normalization layer. - Default: None. - act_cfg (dict): Config dict for activation layer. - Default: (dict(type='ReLU'), dict(type='Sigmoid')). - The last ConvModule uses Sigmoid by default. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - channels, - ratio=16, - conv_cfg=None, - norm_cfg=None, - act_cfg=(dict(type='ReLU'), dict(type='Sigmoid')), - init_cfg=None): - super().__init__(init_cfg=init_cfg) - if isinstance(act_cfg, dict): - act_cfg = (act_cfg, act_cfg) - assert len(act_cfg) == 2 - assert mmengine.is_tuple_of(act_cfg, dict) - self.global_avgpool = nn.AdaptiveAvgPool2d(1) - self.conv1 = ConvModule( - in_channels=channels, - out_channels=int(channels / ratio), - kernel_size=1, - stride=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg[0]) - self.conv2 = ConvModule( - in_channels=int(channels / ratio), - out_channels=channels, - kernel_size=1, - stride=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg[1]) - - def forward(self, x): - out = self.global_avgpool(x) - out = self.conv1(out) - out = self.conv2(out) - return x * out - - -class CrossResolutionWeighting(BaseModule): - """Cross-resolution channel weighting module. - - Args: - channels (int): The channels of the module. - ratio (int): channel reduction ratio. - conv_cfg (dict): Config dict for convolution layer. - Default: None, which means using conv2d. - norm_cfg (dict): Config dict for normalization layer. - Default: None. - act_cfg (dict): Config dict for activation layer. - Default: (dict(type='ReLU'), dict(type='Sigmoid')). - The last ConvModule uses Sigmoid by default. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - channels, - ratio=16, - conv_cfg=None, - norm_cfg=None, - act_cfg=(dict(type='ReLU'), dict(type='Sigmoid')), - init_cfg=None): - super().__init__(init_cfg=init_cfg) - if isinstance(act_cfg, dict): - act_cfg = (act_cfg, act_cfg) - assert len(act_cfg) == 2 - assert mmengine.is_tuple_of(act_cfg, dict) - self.channels = channels - total_channel = sum(channels) - self.conv1 = ConvModule( - in_channels=total_channel, - out_channels=int(total_channel / ratio), - kernel_size=1, - stride=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg[0]) - self.conv2 = ConvModule( - in_channels=int(total_channel / ratio), - out_channels=total_channel, - kernel_size=1, - stride=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg[1]) - - def forward(self, x): - mini_size = x[-1].size()[-2:] - out = [F.adaptive_avg_pool2d(s, mini_size) for s in x[:-1]] + [x[-1]] - out = torch.cat(out, dim=1) - out = self.conv1(out) - out = self.conv2(out) - out = torch.split(out, self.channels, dim=1) - out = [ - s * F.interpolate(a, size=s.size()[-2:], mode='nearest') - for s, a in zip(x, out) - ] - return out - - -class ConditionalChannelWeighting(BaseModule): - """Conditional channel weighting block. - - Args: - in_channels (int): The input channels of the block. - stride (int): Stride of the 3x3 convolution layer. - reduce_ratio (int): channel reduction ratio. - conv_cfg (dict): Config dict for convolution layer. - Default: None, which means using conv2d. - norm_cfg (dict): Config dict for normalization layer. - Default: dict(type='BN'). - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. Default: False. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - in_channels, - stride, - reduce_ratio, - conv_cfg=None, - norm_cfg=dict(type='BN'), - with_cp=False, - init_cfg=None): - super().__init__(init_cfg=init_cfg) - self.with_cp = with_cp - self.stride = stride - assert stride in [1, 2] - - branch_channels = [channel // 2 for channel in in_channels] - - self.cross_resolution_weighting = CrossResolutionWeighting( - branch_channels, - ratio=reduce_ratio, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg) - - self.depthwise_convs = nn.ModuleList([ - ConvModule( - channel, - channel, - kernel_size=3, - stride=self.stride, - padding=1, - groups=channel, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=None) for channel in branch_channels - ]) - - self.spatial_weighting = nn.ModuleList([ - SpatialWeighting(channels=channel, ratio=4) - for channel in branch_channels - ]) - - def forward(self, x): - - def _inner_forward(x): - x = [s.chunk(2, dim=1) for s in x] - x1 = [s[0] for s in x] - x2 = [s[1] for s in x] - - x2 = self.cross_resolution_weighting(x2) - x2 = [dw(s) for s, dw in zip(x2, self.depthwise_convs)] - x2 = [sw(s) for s, sw in zip(x2, self.spatial_weighting)] - - out = [torch.cat([s1, s2], dim=1) for s1, s2 in zip(x1, x2)] - out = [channel_shuffle(s, 2) for s in out] - - return out - - if self.with_cp and x.requires_grad: - out = cp.checkpoint(_inner_forward, x) - else: - out = _inner_forward(x) - - return out - - -class Stem(BaseModule): - """Stem network block. - - Args: - in_channels (int): The input channels of the block. - stem_channels (int): Output channels of the stem layer. - out_channels (int): The output channels of the block. - expand_ratio (int): adjusts number of channels of the hidden layer - in InvertedResidual by this amount. - conv_cfg (dict): Config dict for convolution layer. - Default: None, which means using conv2d. - norm_cfg (dict): Config dict for normalization layer. - Default: dict(type='BN'). - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. Default: False. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - in_channels, - stem_channels, - out_channels, - expand_ratio, - conv_cfg=None, - norm_cfg=dict(type='BN'), - with_cp=False, - init_cfg=None): - super().__init__(init_cfg=init_cfg) - self.in_channels = in_channels - self.out_channels = out_channels - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.with_cp = with_cp - - self.conv1 = ConvModule( - in_channels=in_channels, - out_channels=stem_channels, - kernel_size=3, - stride=2, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=dict(type='ReLU')) - - mid_channels = int(round(stem_channels * expand_ratio)) - branch_channels = stem_channels // 2 - if stem_channels == self.out_channels: - inc_channels = self.out_channels - branch_channels - else: - inc_channels = self.out_channels - stem_channels - - self.branch1 = nn.Sequential( - ConvModule( - branch_channels, - branch_channels, - kernel_size=3, - stride=2, - padding=1, - groups=branch_channels, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=None), - ConvModule( - branch_channels, - inc_channels, - kernel_size=1, - stride=1, - padding=0, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=dict(type='ReLU')), - ) - - self.expand_conv = ConvModule( - branch_channels, - mid_channels, - kernel_size=1, - stride=1, - padding=0, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=dict(type='ReLU')) - self.depthwise_conv = ConvModule( - mid_channels, - mid_channels, - kernel_size=3, - stride=2, - padding=1, - groups=mid_channels, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=None) - self.linear_conv = ConvModule( - mid_channels, - branch_channels - if stem_channels == self.out_channels else stem_channels, - kernel_size=1, - stride=1, - padding=0, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=dict(type='ReLU')) - - def forward(self, x): - - def _inner_forward(x): - x = self.conv1(x) - x1, x2 = x.chunk(2, dim=1) - - x2 = self.expand_conv(x2) - x2 = self.depthwise_conv(x2) - x2 = self.linear_conv(x2) - - out = torch.cat((self.branch1(x1), x2), dim=1) - - out = channel_shuffle(out, 2) - - return out - - if self.with_cp and x.requires_grad: - out = cp.checkpoint(_inner_forward, x) - else: - out = _inner_forward(x) - - return out - - -class IterativeHead(BaseModule): - """Extra iterative head for feature learning. - - Args: - in_channels (int): The input channels of the block. - norm_cfg (dict): Config dict for normalization layer. - Default: dict(type='BN'). - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, in_channels, norm_cfg=dict(type='BN'), init_cfg=None): - super().__init__(init_cfg=init_cfg) - projects = [] - num_branchs = len(in_channels) - self.in_channels = in_channels[::-1] - - for i in range(num_branchs): - if i != num_branchs - 1: - projects.append( - DepthwiseSeparableConvModule( - in_channels=self.in_channels[i], - out_channels=self.in_channels[i + 1], - kernel_size=3, - stride=1, - padding=1, - norm_cfg=norm_cfg, - act_cfg=dict(type='ReLU'), - dw_act_cfg=None, - pw_act_cfg=dict(type='ReLU'))) - else: - projects.append( - DepthwiseSeparableConvModule( - in_channels=self.in_channels[i], - out_channels=self.in_channels[i], - kernel_size=3, - stride=1, - padding=1, - norm_cfg=norm_cfg, - act_cfg=dict(type='ReLU'), - dw_act_cfg=None, - pw_act_cfg=dict(type='ReLU'))) - self.projects = nn.ModuleList(projects) - - def forward(self, x): - x = x[::-1] - - y = [] - last_x = None - for i, s in enumerate(x): - if last_x is not None: - last_x = F.interpolate( - last_x, - size=s.size()[-2:], - mode='bilinear', - align_corners=True) - s = s + last_x - s = self.projects[i](s) - y.append(s) - last_x = s - - return y[::-1] - - -class ShuffleUnit(BaseModule): - """InvertedResidual block for ShuffleNetV2 backbone. - - Args: - in_channels (int): The input channels of the block. - out_channels (int): The output channels of the block. - stride (int): Stride of the 3x3 convolution layer. Default: 1 - conv_cfg (dict): Config dict for convolution layer. - Default: None, which means using conv2d. - norm_cfg (dict): Config dict for normalization layer. - Default: dict(type='BN'). - act_cfg (dict): Config dict for activation layer. - Default: dict(type='ReLU'). - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. Default: False. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - in_channels, - out_channels, - stride=1, - conv_cfg=None, - norm_cfg=dict(type='BN'), - act_cfg=dict(type='ReLU'), - with_cp=False, - init_cfg=None): - super().__init__(init_cfg=init_cfg) - self.stride = stride - self.with_cp = with_cp - - branch_features = out_channels // 2 - if self.stride == 1: - assert in_channels == branch_features * 2, ( - f'in_channels ({in_channels}) should equal to ' - f'branch_features * 2 ({branch_features * 2}) ' - 'when stride is 1') - - if in_channels != branch_features * 2: - assert self.stride != 1, ( - f'stride ({self.stride}) should not equal 1 when ' - f'in_channels != branch_features * 2') - - if self.stride > 1: - self.branch1 = nn.Sequential( - ConvModule( - in_channels, - in_channels, - kernel_size=3, - stride=self.stride, - padding=1, - groups=in_channels, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=None), - ConvModule( - in_channels, - branch_features, - kernel_size=1, - stride=1, - padding=0, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg), - ) - - self.branch2 = nn.Sequential( - ConvModule( - in_channels if (self.stride > 1) else branch_features, - branch_features, - kernel_size=1, - stride=1, - padding=0, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg), - ConvModule( - branch_features, - branch_features, - kernel_size=3, - stride=self.stride, - padding=1, - groups=branch_features, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=None), - ConvModule( - branch_features, - branch_features, - kernel_size=1, - stride=1, - padding=0, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg)) - - def forward(self, x): - - def _inner_forward(x): - if self.stride > 1: - out = torch.cat((self.branch1(x), self.branch2(x)), dim=1) - else: - x1, x2 = x.chunk(2, dim=1) - out = torch.cat((x1, self.branch2(x2)), dim=1) - - out = channel_shuffle(out, 2) - - return out - - if self.with_cp and x.requires_grad: - out = cp.checkpoint(_inner_forward, x) - else: - out = _inner_forward(x) - - return out - - -class LiteHRModule(BaseModule): - """High-Resolution Module for LiteHRNet. - - It contains conditional channel weighting blocks and - shuffle blocks. - - - Args: - num_branches (int): Number of branches in the module. - num_blocks (int): Number of blocks in the module. - in_channels (list(int)): Number of input image channels. - reduce_ratio (int): Channel reduction ratio. - module_type (str): 'LITE' or 'NAIVE' - multiscale_output (bool): Whether to output multi-scale features. - with_fuse (bool): Whether to use fuse layers. - conv_cfg (dict): dictionary to construct and config conv layer. - norm_cfg (dict): dictionary to construct and config norm layer. - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - num_branches, - num_blocks, - in_channels, - reduce_ratio, - module_type, - multiscale_output=False, - with_fuse=True, - conv_cfg=None, - norm_cfg=dict(type='BN'), - with_cp=False, - init_cfg=None): - super().__init__(init_cfg=init_cfg) - self._check_branches(num_branches, in_channels) - - self.in_channels = in_channels - self.num_branches = num_branches - - self.module_type = module_type - self.multiscale_output = multiscale_output - self.with_fuse = with_fuse - self.norm_cfg = norm_cfg - self.conv_cfg = conv_cfg - self.with_cp = with_cp - - if self.module_type.upper() == 'LITE': - self.layers = self._make_weighting_blocks(num_blocks, reduce_ratio) - elif self.module_type.upper() == 'NAIVE': - self.layers = self._make_naive_branches(num_branches, num_blocks) - else: - raise ValueError("module_type should be either 'LITE' or 'NAIVE'.") - if self.with_fuse: - self.fuse_layers = self._make_fuse_layers() - self.relu = nn.ReLU() - - def _check_branches(self, num_branches, in_channels): - """Check input to avoid ValueError.""" - if num_branches != len(in_channels): - error_msg = f'NUM_BRANCHES({num_branches}) ' \ - f'!= NUM_INCHANNELS({len(in_channels)})' - raise ValueError(error_msg) - - def _make_weighting_blocks(self, num_blocks, reduce_ratio, stride=1): - """Make channel weighting blocks.""" - layers = [] - for i in range(num_blocks): - layers.append( - ConditionalChannelWeighting( - self.in_channels, - stride=stride, - reduce_ratio=reduce_ratio, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - with_cp=self.with_cp)) - - return nn.Sequential(*layers) - - def _make_one_branch(self, branch_index, num_blocks, stride=1): - """Make one branch.""" - layers = [] - layers.append( - ShuffleUnit( - self.in_channels[branch_index], - self.in_channels[branch_index], - stride=stride, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=dict(type='ReLU'), - with_cp=self.with_cp)) - for i in range(1, num_blocks): - layers.append( - ShuffleUnit( - self.in_channels[branch_index], - self.in_channels[branch_index], - stride=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=dict(type='ReLU'), - with_cp=self.with_cp)) - - return nn.Sequential(*layers) - - def _make_naive_branches(self, num_branches, num_blocks): - """Make branches.""" - branches = [] - - for i in range(num_branches): - branches.append(self._make_one_branch(i, num_blocks)) - - return nn.ModuleList(branches) - - def _make_fuse_layers(self): - """Make fuse layer.""" - if self.num_branches == 1: - return None - - num_branches = self.num_branches - in_channels = self.in_channels - fuse_layers = [] - num_out_branches = num_branches if self.multiscale_output else 1 - for i in range(num_out_branches): - fuse_layer = [] - for j in range(num_branches): - if j > i: - fuse_layer.append( - nn.Sequential( - build_conv_layer( - self.conv_cfg, - in_channels[j], - in_channels[i], - kernel_size=1, - stride=1, - padding=0, - bias=False), - build_norm_layer(self.norm_cfg, in_channels[i])[1], - nn.Upsample( - scale_factor=2**(j - i), mode='nearest'))) - elif j == i: - fuse_layer.append(None) - else: - conv_downsamples = [] - for k in range(i - j): - if k == i - j - 1: - conv_downsamples.append( - nn.Sequential( - build_conv_layer( - self.conv_cfg, - in_channels[j], - in_channels[j], - kernel_size=3, - stride=2, - padding=1, - groups=in_channels[j], - bias=False), - build_norm_layer(self.norm_cfg, - in_channels[j])[1], - build_conv_layer( - self.conv_cfg, - in_channels[j], - in_channels[i], - kernel_size=1, - stride=1, - padding=0, - bias=False), - build_norm_layer(self.norm_cfg, - in_channels[i])[1])) - else: - conv_downsamples.append( - nn.Sequential( - build_conv_layer( - self.conv_cfg, - in_channels[j], - in_channels[j], - kernel_size=3, - stride=2, - padding=1, - groups=in_channels[j], - bias=False), - build_norm_layer(self.norm_cfg, - in_channels[j])[1], - build_conv_layer( - self.conv_cfg, - in_channels[j], - in_channels[j], - kernel_size=1, - stride=1, - padding=0, - bias=False), - build_norm_layer(self.norm_cfg, - in_channels[j])[1], - nn.ReLU(inplace=True))) - fuse_layer.append(nn.Sequential(*conv_downsamples)) - fuse_layers.append(nn.ModuleList(fuse_layer)) - - return nn.ModuleList(fuse_layers) - - def forward(self, x): - """Forward function.""" - if self.num_branches == 1: - return [self.layers[0](x[0])] - - if self.module_type.upper() == 'LITE': - out = self.layers(x) - elif self.module_type.upper() == 'NAIVE': - for i in range(self.num_branches): - x[i] = self.layers[i](x[i]) - out = x - - if self.with_fuse: - out_fuse = [] - for i in range(len(self.fuse_layers)): - # `y = 0` will lead to decreased accuracy (0.5~1 mAP) - y = out[0] if i == 0 else self.fuse_layers[i][0](out[0]) - for j in range(self.num_branches): - if i == j: - y += out[j] - else: - y += self.fuse_layers[i][j](out[j]) - out_fuse.append(self.relu(y)) - out = out_fuse - if not self.multiscale_output: - out = [out[0]] - return out - - -@MODELS.register_module() -class LiteHRNet(BaseBackbone): - """Lite-HRNet backbone. - - `Lite-HRNet: A Lightweight High-Resolution Network - `_. - - Code adapted from 'https://github.com/HRNet/Lite-HRNet'. - - Args: - extra (dict): detailed configuration for each stage of HRNet. - in_channels (int): Number of input image channels. Default: 3. - conv_cfg (dict): dictionary to construct and config conv layer. - norm_cfg (dict): dictionary to construct and config norm layer. - norm_eval (bool): Whether to set norm layers to eval mode, namely, - freeze running stats (mean and var). Note: Effect on Batch Norm - and its variants only. Default: False - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: - ``[ - dict(type='Normal', std=0.001, layer=['Conv2d']), - dict( - type='Constant', - val=1, - layer=['_BatchNorm', 'GroupNorm']) - ]`` - - Example: - >>> from mmpose.models import LiteHRNet - >>> import torch - >>> extra=dict( - >>> stem=dict(stem_channels=32, out_channels=32, expand_ratio=1), - >>> num_stages=3, - >>> stages_spec=dict( - >>> num_modules=(2, 4, 2), - >>> num_branches=(2, 3, 4), - >>> num_blocks=(2, 2, 2), - >>> module_type=('LITE', 'LITE', 'LITE'), - >>> with_fuse=(True, True, True), - >>> reduce_ratios=(8, 8, 8), - >>> num_channels=( - >>> (40, 80), - >>> (40, 80, 160), - >>> (40, 80, 160, 320), - >>> )), - >>> with_head=False) - >>> self = LiteHRNet(extra, in_channels=1) - >>> self.eval() - >>> inputs = torch.rand(1, 1, 32, 32) - >>> level_outputs = self.forward(inputs) - >>> for level_out in level_outputs: - ... print(tuple(level_out.shape)) - (1, 40, 8, 8) - """ - - def __init__(self, - extra, - in_channels=3, - conv_cfg=None, - norm_cfg=dict(type='BN'), - norm_eval=False, - with_cp=False, - init_cfg=[ - dict(type='Normal', std=0.001, layer=['Conv2d']), - dict( - type='Constant', - val=1, - layer=['_BatchNorm', 'GroupNorm']) - ]): - super().__init__(init_cfg=init_cfg) - self.extra = extra - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.norm_eval = norm_eval - self.with_cp = with_cp - - self.stem = Stem( - in_channels, - stem_channels=self.extra['stem']['stem_channels'], - out_channels=self.extra['stem']['out_channels'], - expand_ratio=self.extra['stem']['expand_ratio'], - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg) - - self.num_stages = self.extra['num_stages'] - self.stages_spec = self.extra['stages_spec'] - - num_channels_last = [ - self.stem.out_channels, - ] - for i in range(self.num_stages): - num_channels = self.stages_spec['num_channels'][i] - num_channels = [num_channels[i] for i in range(len(num_channels))] - setattr( - self, f'transition{i}', - self._make_transition_layer(num_channels_last, num_channels)) - - stage, num_channels_last = self._make_stage( - self.stages_spec, i, num_channels, multiscale_output=True) - setattr(self, f'stage{i}', stage) - - self.with_head = self.extra['with_head'] - if self.with_head: - self.head_layer = IterativeHead( - in_channels=num_channels_last, - norm_cfg=self.norm_cfg, - ) - - def _make_transition_layer(self, num_channels_pre_layer, - num_channels_cur_layer): - """Make transition layer.""" - num_branches_cur = len(num_channels_cur_layer) - num_branches_pre = len(num_channels_pre_layer) - - transition_layers = [] - for i in range(num_branches_cur): - if i < num_branches_pre: - if num_channels_cur_layer[i] != num_channels_pre_layer[i]: - transition_layers.append( - nn.Sequential( - build_conv_layer( - self.conv_cfg, - num_channels_pre_layer[i], - num_channels_pre_layer[i], - kernel_size=3, - stride=1, - padding=1, - groups=num_channels_pre_layer[i], - bias=False), - build_norm_layer(self.norm_cfg, - num_channels_pre_layer[i])[1], - build_conv_layer( - self.conv_cfg, - num_channels_pre_layer[i], - num_channels_cur_layer[i], - kernel_size=1, - stride=1, - padding=0, - bias=False), - build_norm_layer(self.norm_cfg, - num_channels_cur_layer[i])[1], - nn.ReLU())) - else: - transition_layers.append(None) - else: - conv_downsamples = [] - for j in range(i + 1 - num_branches_pre): - in_channels = num_channels_pre_layer[-1] - out_channels = num_channels_cur_layer[i] \ - if j == i - num_branches_pre else in_channels - conv_downsamples.append( - nn.Sequential( - build_conv_layer( - self.conv_cfg, - in_channels, - in_channels, - kernel_size=3, - stride=2, - padding=1, - groups=in_channels, - bias=False), - build_norm_layer(self.norm_cfg, in_channels)[1], - build_conv_layer( - self.conv_cfg, - in_channels, - out_channels, - kernel_size=1, - stride=1, - padding=0, - bias=False), - build_norm_layer(self.norm_cfg, out_channels)[1], - nn.ReLU())) - transition_layers.append(nn.Sequential(*conv_downsamples)) - - return nn.ModuleList(transition_layers) - - def _make_stage(self, - stages_spec, - stage_index, - in_channels, - multiscale_output=True): - num_modules = stages_spec['num_modules'][stage_index] - num_branches = stages_spec['num_branches'][stage_index] - num_blocks = stages_spec['num_blocks'][stage_index] - reduce_ratio = stages_spec['reduce_ratios'][stage_index] - with_fuse = stages_spec['with_fuse'][stage_index] - module_type = stages_spec['module_type'][stage_index] - - modules = [] - for i in range(num_modules): - # multi_scale_output is only used last module - if not multiscale_output and i == num_modules - 1: - reset_multiscale_output = False - else: - reset_multiscale_output = True - - modules.append( - LiteHRModule( - num_branches, - num_blocks, - in_channels, - reduce_ratio, - module_type, - multiscale_output=reset_multiscale_output, - with_fuse=with_fuse, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - with_cp=self.with_cp)) - in_channels = modules[-1].in_channels - - return nn.Sequential(*modules), in_channels - - def forward(self, x): - """Forward function.""" - x = self.stem(x) - - y_list = [x] - for i in range(self.num_stages): - x_list = [] - transition = getattr(self, f'transition{i}') - for j in range(self.stages_spec['num_branches'][i]): - if transition[j]: - if j >= len(y_list): - x_list.append(transition[j](y_list[-1])) - else: - x_list.append(transition[j](y_list[j])) - else: - x_list.append(y_list[j]) - y_list = getattr(self, f'stage{i}')(x_list) - - x = y_list - if self.with_head: - x = self.head_layer(x) - - return (x[0], ) - - def train(self, mode=True): - """Convert the model into training mode.""" - super().train(mode) - if mode and self.norm_eval: - for m in self.modules(): - if isinstance(m, _BatchNorm): - m.eval() +# ------------------------------------------------------------------------------ +# Adapted from https://github.com/HRNet/Lite-HRNet +# Original licence: Apache License 2.0. +# ------------------------------------------------------------------------------ + +import mmengine +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as cp +from mmcv.cnn import (ConvModule, DepthwiseSeparableConvModule, + build_conv_layer, build_norm_layer) +from mmengine.model import BaseModule +from torch.nn.modules.batchnorm import _BatchNorm + +from mmpose.registry import MODELS +from .base_backbone import BaseBackbone +from .utils import channel_shuffle + + +class SpatialWeighting(BaseModule): + """Spatial weighting module. + + Args: + channels (int): The channels of the module. + ratio (int): channel reduction ratio. + conv_cfg (dict): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: None. + act_cfg (dict): Config dict for activation layer. + Default: (dict(type='ReLU'), dict(type='Sigmoid')). + The last ConvModule uses Sigmoid by default. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + channels, + ratio=16, + conv_cfg=None, + norm_cfg=None, + act_cfg=(dict(type='ReLU'), dict(type='Sigmoid')), + init_cfg=None): + super().__init__(init_cfg=init_cfg) + if isinstance(act_cfg, dict): + act_cfg = (act_cfg, act_cfg) + assert len(act_cfg) == 2 + assert mmengine.is_tuple_of(act_cfg, dict) + self.global_avgpool = nn.AdaptiveAvgPool2d(1) + self.conv1 = ConvModule( + in_channels=channels, + out_channels=int(channels / ratio), + kernel_size=1, + stride=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg[0]) + self.conv2 = ConvModule( + in_channels=int(channels / ratio), + out_channels=channels, + kernel_size=1, + stride=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg[1]) + + def forward(self, x): + out = self.global_avgpool(x) + out = self.conv1(out) + out = self.conv2(out) + return x * out + + +class CrossResolutionWeighting(BaseModule): + """Cross-resolution channel weighting module. + + Args: + channels (int): The channels of the module. + ratio (int): channel reduction ratio. + conv_cfg (dict): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: None. + act_cfg (dict): Config dict for activation layer. + Default: (dict(type='ReLU'), dict(type='Sigmoid')). + The last ConvModule uses Sigmoid by default. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + channels, + ratio=16, + conv_cfg=None, + norm_cfg=None, + act_cfg=(dict(type='ReLU'), dict(type='Sigmoid')), + init_cfg=None): + super().__init__(init_cfg=init_cfg) + if isinstance(act_cfg, dict): + act_cfg = (act_cfg, act_cfg) + assert len(act_cfg) == 2 + assert mmengine.is_tuple_of(act_cfg, dict) + self.channels = channels + total_channel = sum(channels) + self.conv1 = ConvModule( + in_channels=total_channel, + out_channels=int(total_channel / ratio), + kernel_size=1, + stride=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg[0]) + self.conv2 = ConvModule( + in_channels=int(total_channel / ratio), + out_channels=total_channel, + kernel_size=1, + stride=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg[1]) + + def forward(self, x): + mini_size = x[-1].size()[-2:] + out = [F.adaptive_avg_pool2d(s, mini_size) for s in x[:-1]] + [x[-1]] + out = torch.cat(out, dim=1) + out = self.conv1(out) + out = self.conv2(out) + out = torch.split(out, self.channels, dim=1) + out = [ + s * F.interpolate(a, size=s.size()[-2:], mode='nearest') + for s, a in zip(x, out) + ] + return out + + +class ConditionalChannelWeighting(BaseModule): + """Conditional channel weighting block. + + Args: + in_channels (int): The input channels of the block. + stride (int): Stride of the 3x3 convolution layer. + reduce_ratio (int): channel reduction ratio. + conv_cfg (dict): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + in_channels, + stride, + reduce_ratio, + conv_cfg=None, + norm_cfg=dict(type='BN'), + with_cp=False, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + self.with_cp = with_cp + self.stride = stride + assert stride in [1, 2] + + branch_channels = [channel // 2 for channel in in_channels] + + self.cross_resolution_weighting = CrossResolutionWeighting( + branch_channels, + ratio=reduce_ratio, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg) + + self.depthwise_convs = nn.ModuleList([ + ConvModule( + channel, + channel, + kernel_size=3, + stride=self.stride, + padding=1, + groups=channel, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None) for channel in branch_channels + ]) + + self.spatial_weighting = nn.ModuleList([ + SpatialWeighting(channels=channel, ratio=4) + for channel in branch_channels + ]) + + def forward(self, x): + + def _inner_forward(x): + x = [s.chunk(2, dim=1) for s in x] + x1 = [s[0] for s in x] + x2 = [s[1] for s in x] + + x2 = self.cross_resolution_weighting(x2) + x2 = [dw(s) for s, dw in zip(x2, self.depthwise_convs)] + x2 = [sw(s) for s, sw in zip(x2, self.spatial_weighting)] + + out = [torch.cat([s1, s2], dim=1) for s1, s2 in zip(x1, x2)] + out = [channel_shuffle(s, 2) for s in out] + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + return out + + +class Stem(BaseModule): + """Stem network block. + + Args: + in_channels (int): The input channels of the block. + stem_channels (int): Output channels of the stem layer. + out_channels (int): The output channels of the block. + expand_ratio (int): adjusts number of channels of the hidden layer + in InvertedResidual by this amount. + conv_cfg (dict): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + in_channels, + stem_channels, + out_channels, + expand_ratio, + conv_cfg=None, + norm_cfg=dict(type='BN'), + with_cp=False, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + self.in_channels = in_channels + self.out_channels = out_channels + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.with_cp = with_cp + + self.conv1 = ConvModule( + in_channels=in_channels, + out_channels=stem_channels, + kernel_size=3, + stride=2, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=dict(type='ReLU')) + + mid_channels = int(round(stem_channels * expand_ratio)) + branch_channels = stem_channels // 2 + if stem_channels == self.out_channels: + inc_channels = self.out_channels - branch_channels + else: + inc_channels = self.out_channels - stem_channels + + self.branch1 = nn.Sequential( + ConvModule( + branch_channels, + branch_channels, + kernel_size=3, + stride=2, + padding=1, + groups=branch_channels, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None), + ConvModule( + branch_channels, + inc_channels, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=dict(type='ReLU')), + ) + + self.expand_conv = ConvModule( + branch_channels, + mid_channels, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=dict(type='ReLU')) + self.depthwise_conv = ConvModule( + mid_channels, + mid_channels, + kernel_size=3, + stride=2, + padding=1, + groups=mid_channels, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None) + self.linear_conv = ConvModule( + mid_channels, + branch_channels + if stem_channels == self.out_channels else stem_channels, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=dict(type='ReLU')) + + def forward(self, x): + + def _inner_forward(x): + x = self.conv1(x) + x1, x2 = x.chunk(2, dim=1) + + x2 = self.expand_conv(x2) + x2 = self.depthwise_conv(x2) + x2 = self.linear_conv(x2) + + out = torch.cat((self.branch1(x1), x2), dim=1) + + out = channel_shuffle(out, 2) + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + return out + + +class IterativeHead(BaseModule): + """Extra iterative head for feature learning. + + Args: + in_channels (int): The input channels of the block. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, in_channels, norm_cfg=dict(type='BN'), init_cfg=None): + super().__init__(init_cfg=init_cfg) + projects = [] + num_branchs = len(in_channels) + self.in_channels = in_channels[::-1] + + for i in range(num_branchs): + if i != num_branchs - 1: + projects.append( + DepthwiseSeparableConvModule( + in_channels=self.in_channels[i], + out_channels=self.in_channels[i + 1], + kernel_size=3, + stride=1, + padding=1, + norm_cfg=norm_cfg, + act_cfg=dict(type='ReLU'), + dw_act_cfg=None, + pw_act_cfg=dict(type='ReLU'))) + else: + projects.append( + DepthwiseSeparableConvModule( + in_channels=self.in_channels[i], + out_channels=self.in_channels[i], + kernel_size=3, + stride=1, + padding=1, + norm_cfg=norm_cfg, + act_cfg=dict(type='ReLU'), + dw_act_cfg=None, + pw_act_cfg=dict(type='ReLU'))) + self.projects = nn.ModuleList(projects) + + def forward(self, x): + x = x[::-1] + + y = [] + last_x = None + for i, s in enumerate(x): + if last_x is not None: + last_x = F.interpolate( + last_x, + size=s.size()[-2:], + mode='bilinear', + align_corners=True) + s = s + last_x + s = self.projects[i](s) + y.append(s) + last_x = s + + return y[::-1] + + +class ShuffleUnit(BaseModule): + """InvertedResidual block for ShuffleNetV2 backbone. + + Args: + in_channels (int): The input channels of the block. + out_channels (int): The output channels of the block. + stride (int): Stride of the 3x3 convolution layer. Default: 1 + conv_cfg (dict): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU'). + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + in_channels, + out_channels, + stride=1, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + with_cp=False, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + self.stride = stride + self.with_cp = with_cp + + branch_features = out_channels // 2 + if self.stride == 1: + assert in_channels == branch_features * 2, ( + f'in_channels ({in_channels}) should equal to ' + f'branch_features * 2 ({branch_features * 2}) ' + 'when stride is 1') + + if in_channels != branch_features * 2: + assert self.stride != 1, ( + f'stride ({self.stride}) should not equal 1 when ' + f'in_channels != branch_features * 2') + + if self.stride > 1: + self.branch1 = nn.Sequential( + ConvModule( + in_channels, + in_channels, + kernel_size=3, + stride=self.stride, + padding=1, + groups=in_channels, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None), + ConvModule( + in_channels, + branch_features, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg), + ) + + self.branch2 = nn.Sequential( + ConvModule( + in_channels if (self.stride > 1) else branch_features, + branch_features, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg), + ConvModule( + branch_features, + branch_features, + kernel_size=3, + stride=self.stride, + padding=1, + groups=branch_features, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None), + ConvModule( + branch_features, + branch_features, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + + def forward(self, x): + + def _inner_forward(x): + if self.stride > 1: + out = torch.cat((self.branch1(x), self.branch2(x)), dim=1) + else: + x1, x2 = x.chunk(2, dim=1) + out = torch.cat((x1, self.branch2(x2)), dim=1) + + out = channel_shuffle(out, 2) + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + return out + + +class LiteHRModule(BaseModule): + """High-Resolution Module for LiteHRNet. + + It contains conditional channel weighting blocks and + shuffle blocks. + + + Args: + num_branches (int): Number of branches in the module. + num_blocks (int): Number of blocks in the module. + in_channels (list(int)): Number of input image channels. + reduce_ratio (int): Channel reduction ratio. + module_type (str): 'LITE' or 'NAIVE' + multiscale_output (bool): Whether to output multi-scale features. + with_fuse (bool): Whether to use fuse layers. + conv_cfg (dict): dictionary to construct and config conv layer. + norm_cfg (dict): dictionary to construct and config norm layer. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + num_branches, + num_blocks, + in_channels, + reduce_ratio, + module_type, + multiscale_output=False, + with_fuse=True, + conv_cfg=None, + norm_cfg=dict(type='BN'), + with_cp=False, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + self._check_branches(num_branches, in_channels) + + self.in_channels = in_channels + self.num_branches = num_branches + + self.module_type = module_type + self.multiscale_output = multiscale_output + self.with_fuse = with_fuse + self.norm_cfg = norm_cfg + self.conv_cfg = conv_cfg + self.with_cp = with_cp + + if self.module_type.upper() == 'LITE': + self.layers = self._make_weighting_blocks(num_blocks, reduce_ratio) + elif self.module_type.upper() == 'NAIVE': + self.layers = self._make_naive_branches(num_branches, num_blocks) + else: + raise ValueError("module_type should be either 'LITE' or 'NAIVE'.") + if self.with_fuse: + self.fuse_layers = self._make_fuse_layers() + self.relu = nn.ReLU() + + def _check_branches(self, num_branches, in_channels): + """Check input to avoid ValueError.""" + if num_branches != len(in_channels): + error_msg = f'NUM_BRANCHES({num_branches}) ' \ + f'!= NUM_INCHANNELS({len(in_channels)})' + raise ValueError(error_msg) + + def _make_weighting_blocks(self, num_blocks, reduce_ratio, stride=1): + """Make channel weighting blocks.""" + layers = [] + for i in range(num_blocks): + layers.append( + ConditionalChannelWeighting( + self.in_channels, + stride=stride, + reduce_ratio=reduce_ratio, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + with_cp=self.with_cp)) + + return nn.Sequential(*layers) + + def _make_one_branch(self, branch_index, num_blocks, stride=1): + """Make one branch.""" + layers = [] + layers.append( + ShuffleUnit( + self.in_channels[branch_index], + self.in_channels[branch_index], + stride=stride, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=dict(type='ReLU'), + with_cp=self.with_cp)) + for i in range(1, num_blocks): + layers.append( + ShuffleUnit( + self.in_channels[branch_index], + self.in_channels[branch_index], + stride=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=dict(type='ReLU'), + with_cp=self.with_cp)) + + return nn.Sequential(*layers) + + def _make_naive_branches(self, num_branches, num_blocks): + """Make branches.""" + branches = [] + + for i in range(num_branches): + branches.append(self._make_one_branch(i, num_blocks)) + + return nn.ModuleList(branches) + + def _make_fuse_layers(self): + """Make fuse layer.""" + if self.num_branches == 1: + return None + + num_branches = self.num_branches + in_channels = self.in_channels + fuse_layers = [] + num_out_branches = num_branches if self.multiscale_output else 1 + for i in range(num_out_branches): + fuse_layer = [] + for j in range(num_branches): + if j > i: + fuse_layer.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels[j], + in_channels[i], + kernel_size=1, + stride=1, + padding=0, + bias=False), + build_norm_layer(self.norm_cfg, in_channels[i])[1], + nn.Upsample( + scale_factor=2**(j - i), mode='nearest'))) + elif j == i: + fuse_layer.append(None) + else: + conv_downsamples = [] + for k in range(i - j): + if k == i - j - 1: + conv_downsamples.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels[j], + in_channels[j], + kernel_size=3, + stride=2, + padding=1, + groups=in_channels[j], + bias=False), + build_norm_layer(self.norm_cfg, + in_channels[j])[1], + build_conv_layer( + self.conv_cfg, + in_channels[j], + in_channels[i], + kernel_size=1, + stride=1, + padding=0, + bias=False), + build_norm_layer(self.norm_cfg, + in_channels[i])[1])) + else: + conv_downsamples.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels[j], + in_channels[j], + kernel_size=3, + stride=2, + padding=1, + groups=in_channels[j], + bias=False), + build_norm_layer(self.norm_cfg, + in_channels[j])[1], + build_conv_layer( + self.conv_cfg, + in_channels[j], + in_channels[j], + kernel_size=1, + stride=1, + padding=0, + bias=False), + build_norm_layer(self.norm_cfg, + in_channels[j])[1], + nn.ReLU(inplace=True))) + fuse_layer.append(nn.Sequential(*conv_downsamples)) + fuse_layers.append(nn.ModuleList(fuse_layer)) + + return nn.ModuleList(fuse_layers) + + def forward(self, x): + """Forward function.""" + if self.num_branches == 1: + return [self.layers[0](x[0])] + + if self.module_type.upper() == 'LITE': + out = self.layers(x) + elif self.module_type.upper() == 'NAIVE': + for i in range(self.num_branches): + x[i] = self.layers[i](x[i]) + out = x + + if self.with_fuse: + out_fuse = [] + for i in range(len(self.fuse_layers)): + # `y = 0` will lead to decreased accuracy (0.5~1 mAP) + y = out[0] if i == 0 else self.fuse_layers[i][0](out[0]) + for j in range(self.num_branches): + if i == j: + y += out[j] + else: + y += self.fuse_layers[i][j](out[j]) + out_fuse.append(self.relu(y)) + out = out_fuse + if not self.multiscale_output: + out = [out[0]] + return out + + +@MODELS.register_module() +class LiteHRNet(BaseBackbone): + """Lite-HRNet backbone. + + `Lite-HRNet: A Lightweight High-Resolution Network + `_. + + Code adapted from 'https://github.com/HRNet/Lite-HRNet'. + + Args: + extra (dict): detailed configuration for each stage of HRNet. + in_channels (int): Number of input image channels. Default: 3. + conv_cfg (dict): dictionary to construct and config conv layer. + norm_cfg (dict): dictionary to construct and config norm layer. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: + ``[ + dict(type='Normal', std=0.001, layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ]`` + + Example: + >>> from mmpose.models import LiteHRNet + >>> import torch + >>> extra=dict( + >>> stem=dict(stem_channels=32, out_channels=32, expand_ratio=1), + >>> num_stages=3, + >>> stages_spec=dict( + >>> num_modules=(2, 4, 2), + >>> num_branches=(2, 3, 4), + >>> num_blocks=(2, 2, 2), + >>> module_type=('LITE', 'LITE', 'LITE'), + >>> with_fuse=(True, True, True), + >>> reduce_ratios=(8, 8, 8), + >>> num_channels=( + >>> (40, 80), + >>> (40, 80, 160), + >>> (40, 80, 160, 320), + >>> )), + >>> with_head=False) + >>> self = LiteHRNet(extra, in_channels=1) + >>> self.eval() + >>> inputs = torch.rand(1, 1, 32, 32) + >>> level_outputs = self.forward(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 40, 8, 8) + """ + + def __init__(self, + extra, + in_channels=3, + conv_cfg=None, + norm_cfg=dict(type='BN'), + norm_eval=False, + with_cp=False, + init_cfg=[ + dict(type='Normal', std=0.001, layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ]): + super().__init__(init_cfg=init_cfg) + self.extra = extra + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.norm_eval = norm_eval + self.with_cp = with_cp + + self.stem = Stem( + in_channels, + stem_channels=self.extra['stem']['stem_channels'], + out_channels=self.extra['stem']['out_channels'], + expand_ratio=self.extra['stem']['expand_ratio'], + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg) + + self.num_stages = self.extra['num_stages'] + self.stages_spec = self.extra['stages_spec'] + + num_channels_last = [ + self.stem.out_channels, + ] + for i in range(self.num_stages): + num_channels = self.stages_spec['num_channels'][i] + num_channels = [num_channels[i] for i in range(len(num_channels))] + setattr( + self, f'transition{i}', + self._make_transition_layer(num_channels_last, num_channels)) + + stage, num_channels_last = self._make_stage( + self.stages_spec, i, num_channels, multiscale_output=True) + setattr(self, f'stage{i}', stage) + + self.with_head = self.extra['with_head'] + if self.with_head: + self.head_layer = IterativeHead( + in_channels=num_channels_last, + norm_cfg=self.norm_cfg, + ) + + def _make_transition_layer(self, num_channels_pre_layer, + num_channels_cur_layer): + """Make transition layer.""" + num_branches_cur = len(num_channels_cur_layer) + num_branches_pre = len(num_channels_pre_layer) + + transition_layers = [] + for i in range(num_branches_cur): + if i < num_branches_pre: + if num_channels_cur_layer[i] != num_channels_pre_layer[i]: + transition_layers.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + num_channels_pre_layer[i], + num_channels_pre_layer[i], + kernel_size=3, + stride=1, + padding=1, + groups=num_channels_pre_layer[i], + bias=False), + build_norm_layer(self.norm_cfg, + num_channels_pre_layer[i])[1], + build_conv_layer( + self.conv_cfg, + num_channels_pre_layer[i], + num_channels_cur_layer[i], + kernel_size=1, + stride=1, + padding=0, + bias=False), + build_norm_layer(self.norm_cfg, + num_channels_cur_layer[i])[1], + nn.ReLU())) + else: + transition_layers.append(None) + else: + conv_downsamples = [] + for j in range(i + 1 - num_branches_pre): + in_channels = num_channels_pre_layer[-1] + out_channels = num_channels_cur_layer[i] \ + if j == i - num_branches_pre else in_channels + conv_downsamples.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels, + in_channels, + kernel_size=3, + stride=2, + padding=1, + groups=in_channels, + bias=False), + build_norm_layer(self.norm_cfg, in_channels)[1], + build_conv_layer( + self.conv_cfg, + in_channels, + out_channels, + kernel_size=1, + stride=1, + padding=0, + bias=False), + build_norm_layer(self.norm_cfg, out_channels)[1], + nn.ReLU())) + transition_layers.append(nn.Sequential(*conv_downsamples)) + + return nn.ModuleList(transition_layers) + + def _make_stage(self, + stages_spec, + stage_index, + in_channels, + multiscale_output=True): + num_modules = stages_spec['num_modules'][stage_index] + num_branches = stages_spec['num_branches'][stage_index] + num_blocks = stages_spec['num_blocks'][stage_index] + reduce_ratio = stages_spec['reduce_ratios'][stage_index] + with_fuse = stages_spec['with_fuse'][stage_index] + module_type = stages_spec['module_type'][stage_index] + + modules = [] + for i in range(num_modules): + # multi_scale_output is only used last module + if not multiscale_output and i == num_modules - 1: + reset_multiscale_output = False + else: + reset_multiscale_output = True + + modules.append( + LiteHRModule( + num_branches, + num_blocks, + in_channels, + reduce_ratio, + module_type, + multiscale_output=reset_multiscale_output, + with_fuse=with_fuse, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + with_cp=self.with_cp)) + in_channels = modules[-1].in_channels + + return nn.Sequential(*modules), in_channels + + def forward(self, x): + """Forward function.""" + x = self.stem(x) + + y_list = [x] + for i in range(self.num_stages): + x_list = [] + transition = getattr(self, f'transition{i}') + for j in range(self.stages_spec['num_branches'][i]): + if transition[j]: + if j >= len(y_list): + x_list.append(transition[j](y_list[-1])) + else: + x_list.append(transition[j](y_list[j])) + else: + x_list.append(y_list[j]) + y_list = getattr(self, f'stage{i}')(x_list) + + x = y_list + if self.with_head: + x = self.head_layer(x) + + return (x[0], ) + + def train(self, mode=True): + """Convert the model into training mode.""" + super().train(mode) + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, _BatchNorm): + m.eval() diff --git a/mmpose/models/backbones/mobilenet_v2.py b/mmpose/models/backbones/mobilenet_v2.py index b64c0d73d4..3c62394076 100644 --- a/mmpose/models/backbones/mobilenet_v2.py +++ b/mmpose/models/backbones/mobilenet_v2.py @@ -1,279 +1,279 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy - -import torch.nn as nn -import torch.utils.checkpoint as cp -from mmcv.cnn import ConvModule -from mmengine.model import BaseModule -from torch.nn.modules.batchnorm import _BatchNorm - -from mmpose.registry import MODELS -from .base_backbone import BaseBackbone -from .utils import make_divisible - - -class InvertedResidual(BaseModule): - """InvertedResidual block for MobileNetV2. - - Args: - in_channels (int): The input channels of the InvertedResidual block. - out_channels (int): The output channels of the InvertedResidual block. - stride (int): Stride of the middle (first) 3x3 convolution. - expand_ratio (int): adjusts number of channels of the hidden layer - in InvertedResidual by this amount. - conv_cfg (dict): Config dict for convolution layer. - Default: None, which means using conv2d. - norm_cfg (dict): Config dict for normalization layer. - Default: dict(type='BN'). - act_cfg (dict): Config dict for activation layer. - Default: dict(type='ReLU6'). - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. Default: False. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - in_channels, - out_channels, - stride, - expand_ratio, - conv_cfg=None, - norm_cfg=dict(type='BN'), - act_cfg=dict(type='ReLU6'), - with_cp=False, - init_cfg=None): - # Protect mutable default arguments - norm_cfg = copy.deepcopy(norm_cfg) - act_cfg = copy.deepcopy(act_cfg) - super().__init__(init_cfg=init_cfg) - self.stride = stride - assert stride in [1, 2], f'stride must in [1, 2]. ' \ - f'But received {stride}.' - self.with_cp = with_cp - self.use_res_connect = self.stride == 1 and in_channels == out_channels - hidden_dim = int(round(in_channels * expand_ratio)) - - layers = [] - if expand_ratio != 1: - layers.append( - ConvModule( - in_channels=in_channels, - out_channels=hidden_dim, - kernel_size=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg)) - layers.extend([ - ConvModule( - in_channels=hidden_dim, - out_channels=hidden_dim, - kernel_size=3, - stride=stride, - padding=1, - groups=hidden_dim, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg), - ConvModule( - in_channels=hidden_dim, - out_channels=out_channels, - kernel_size=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=None) - ]) - self.conv = nn.Sequential(*layers) - - def forward(self, x): - - def _inner_forward(x): - if self.use_res_connect: - return x + self.conv(x) - return self.conv(x) - - if self.with_cp and x.requires_grad: - out = cp.checkpoint(_inner_forward, x) - else: - out = _inner_forward(x) - - return out - - -@MODELS.register_module() -class MobileNetV2(BaseBackbone): - """MobileNetV2 backbone. - - Args: - widen_factor (float): Width multiplier, multiply number of - channels in each layer by this amount. Default: 1.0. - out_indices (None or Sequence[int]): Output from which stages. - Default: (7, ). - frozen_stages (int): Stages to be frozen (all param fixed). - Default: -1, which means not freezing any parameters. - conv_cfg (dict): Config dict for convolution layer. - Default: None, which means using conv2d. - norm_cfg (dict): Config dict for normalization layer. - Default: dict(type='BN'). - act_cfg (dict): Config dict for activation layer. - Default: dict(type='ReLU6'). - norm_eval (bool): Whether to set norm layers to eval mode, namely, - freeze running stats (mean and var). Note: Effect on Batch Norm - and its variants only. Default: False. - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. Default: False. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: - ``[ - dict(type='Kaiming', layer=['Conv2d']), - dict( - type='Constant', - val=1, - layer=['_BatchNorm', 'GroupNorm']) - ]`` - """ - - # Parameters to build layers. 4 parameters are needed to construct a - # layer, from left to right: expand_ratio, channel, num_blocks, stride. - arch_settings = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2], - [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2], - [6, 320, 1, 1]] - - def __init__(self, - widen_factor=1., - out_indices=(7, ), - frozen_stages=-1, - conv_cfg=None, - norm_cfg=dict(type='BN'), - act_cfg=dict(type='ReLU6'), - norm_eval=False, - with_cp=False, - init_cfg=[ - dict(type='Kaiming', layer=['Conv2d']), - dict( - type='Constant', - val=1, - layer=['_BatchNorm', 'GroupNorm']) - ]): - # Protect mutable default arguments - norm_cfg = copy.deepcopy(norm_cfg) - act_cfg = copy.deepcopy(act_cfg) - super().__init__(init_cfg=init_cfg) - self.widen_factor = widen_factor - self.out_indices = out_indices - for index in out_indices: - if index not in range(0, 8): - raise ValueError('the item in out_indices must in ' - f'range(0, 8). But received {index}') - - if frozen_stages not in range(-1, 8): - raise ValueError('frozen_stages must be in range(-1, 8). ' - f'But received {frozen_stages}') - self.out_indices = out_indices - self.frozen_stages = frozen_stages - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.act_cfg = act_cfg - self.norm_eval = norm_eval - self.with_cp = with_cp - - self.in_channels = make_divisible(32 * widen_factor, 8) - - self.conv1 = ConvModule( - in_channels=3, - out_channels=self.in_channels, - kernel_size=3, - stride=2, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - - self.layers = [] - - for i, layer_cfg in enumerate(self.arch_settings): - expand_ratio, channel, num_blocks, stride = layer_cfg - out_channels = make_divisible(channel * widen_factor, 8) - inverted_res_layer = self.make_layer( - out_channels=out_channels, - num_blocks=num_blocks, - stride=stride, - expand_ratio=expand_ratio) - layer_name = f'layer{i + 1}' - self.add_module(layer_name, inverted_res_layer) - self.layers.append(layer_name) - - if widen_factor > 1.0: - self.out_channel = int(1280 * widen_factor) - else: - self.out_channel = 1280 - - layer = ConvModule( - in_channels=self.in_channels, - out_channels=self.out_channel, - kernel_size=1, - stride=1, - padding=0, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - self.add_module('conv2', layer) - self.layers.append('conv2') - - def make_layer(self, out_channels, num_blocks, stride, expand_ratio): - """Stack InvertedResidual blocks to build a layer for MobileNetV2. - - Args: - out_channels (int): out_channels of block. - num_blocks (int): number of blocks. - stride (int): stride of the first block. Default: 1 - expand_ratio (int): Expand the number of channels of the - hidden layer in InvertedResidual by this ratio. Default: 6. - """ - layers = [] - for i in range(num_blocks): - if i >= 1: - stride = 1 - layers.append( - InvertedResidual( - self.in_channels, - out_channels, - stride, - expand_ratio=expand_ratio, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg, - with_cp=self.with_cp)) - self.in_channels = out_channels - - return nn.Sequential(*layers) - - def forward(self, x): - x = self.conv1(x) - - outs = [] - for i, layer_name in enumerate(self.layers): - layer = getattr(self, layer_name) - x = layer(x) - if i in self.out_indices: - outs.append(x) - - return tuple(outs) - - def _freeze_stages(self): - if self.frozen_stages >= 0: - for param in self.conv1.parameters(): - param.requires_grad = False - for i in range(1, self.frozen_stages + 1): - layer = getattr(self, f'layer{i}') - layer.eval() - for param in layer.parameters(): - param.requires_grad = False - - def train(self, mode=True): - super().train(mode) - self._freeze_stages() - if mode and self.norm_eval: - for m in self.modules(): - if isinstance(m, _BatchNorm): - m.eval() +# Copyright (c) OpenMMLab. All rights reserved. +import copy + +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn import ConvModule +from mmengine.model import BaseModule +from torch.nn.modules.batchnorm import _BatchNorm + +from mmpose.registry import MODELS +from .base_backbone import BaseBackbone +from .utils import make_divisible + + +class InvertedResidual(BaseModule): + """InvertedResidual block for MobileNetV2. + + Args: + in_channels (int): The input channels of the InvertedResidual block. + out_channels (int): The output channels of the InvertedResidual block. + stride (int): Stride of the middle (first) 3x3 convolution. + expand_ratio (int): adjusts number of channels of the hidden layer + in InvertedResidual by this amount. + conv_cfg (dict): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU6'). + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + in_channels, + out_channels, + stride, + expand_ratio, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU6'), + with_cp=False, + init_cfg=None): + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + act_cfg = copy.deepcopy(act_cfg) + super().__init__(init_cfg=init_cfg) + self.stride = stride + assert stride in [1, 2], f'stride must in [1, 2]. ' \ + f'But received {stride}.' + self.with_cp = with_cp + self.use_res_connect = self.stride == 1 and in_channels == out_channels + hidden_dim = int(round(in_channels * expand_ratio)) + + layers = [] + if expand_ratio != 1: + layers.append( + ConvModule( + in_channels=in_channels, + out_channels=hidden_dim, + kernel_size=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + layers.extend([ + ConvModule( + in_channels=hidden_dim, + out_channels=hidden_dim, + kernel_size=3, + stride=stride, + padding=1, + groups=hidden_dim, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg), + ConvModule( + in_channels=hidden_dim, + out_channels=out_channels, + kernel_size=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None) + ]) + self.conv = nn.Sequential(*layers) + + def forward(self, x): + + def _inner_forward(x): + if self.use_res_connect: + return x + self.conv(x) + return self.conv(x) + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + return out + + +@MODELS.register_module() +class MobileNetV2(BaseBackbone): + """MobileNetV2 backbone. + + Args: + widen_factor (float): Width multiplier, multiply number of + channels in each layer by this amount. Default: 1.0. + out_indices (None or Sequence[int]): Output from which stages. + Default: (7, ). + frozen_stages (int): Stages to be frozen (all param fixed). + Default: -1, which means not freezing any parameters. + conv_cfg (dict): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU6'). + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: + ``[ + dict(type='Kaiming', layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ]`` + """ + + # Parameters to build layers. 4 parameters are needed to construct a + # layer, from left to right: expand_ratio, channel, num_blocks, stride. + arch_settings = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2], + [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2], + [6, 320, 1, 1]] + + def __init__(self, + widen_factor=1., + out_indices=(7, ), + frozen_stages=-1, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU6'), + norm_eval=False, + with_cp=False, + init_cfg=[ + dict(type='Kaiming', layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ]): + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + act_cfg = copy.deepcopy(act_cfg) + super().__init__(init_cfg=init_cfg) + self.widen_factor = widen_factor + self.out_indices = out_indices + for index in out_indices: + if index not in range(0, 8): + raise ValueError('the item in out_indices must in ' + f'range(0, 8). But received {index}') + + if frozen_stages not in range(-1, 8): + raise ValueError('frozen_stages must be in range(-1, 8). ' + f'But received {frozen_stages}') + self.out_indices = out_indices + self.frozen_stages = frozen_stages + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.norm_eval = norm_eval + self.with_cp = with_cp + + self.in_channels = make_divisible(32 * widen_factor, 8) + + self.conv1 = ConvModule( + in_channels=3, + out_channels=self.in_channels, + kernel_size=3, + stride=2, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + self.layers = [] + + for i, layer_cfg in enumerate(self.arch_settings): + expand_ratio, channel, num_blocks, stride = layer_cfg + out_channels = make_divisible(channel * widen_factor, 8) + inverted_res_layer = self.make_layer( + out_channels=out_channels, + num_blocks=num_blocks, + stride=stride, + expand_ratio=expand_ratio) + layer_name = f'layer{i + 1}' + self.add_module(layer_name, inverted_res_layer) + self.layers.append(layer_name) + + if widen_factor > 1.0: + self.out_channel = int(1280 * widen_factor) + else: + self.out_channel = 1280 + + layer = ConvModule( + in_channels=self.in_channels, + out_channels=self.out_channel, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + self.add_module('conv2', layer) + self.layers.append('conv2') + + def make_layer(self, out_channels, num_blocks, stride, expand_ratio): + """Stack InvertedResidual blocks to build a layer for MobileNetV2. + + Args: + out_channels (int): out_channels of block. + num_blocks (int): number of blocks. + stride (int): stride of the first block. Default: 1 + expand_ratio (int): Expand the number of channels of the + hidden layer in InvertedResidual by this ratio. Default: 6. + """ + layers = [] + for i in range(num_blocks): + if i >= 1: + stride = 1 + layers.append( + InvertedResidual( + self.in_channels, + out_channels, + stride, + expand_ratio=expand_ratio, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + with_cp=self.with_cp)) + self.in_channels = out_channels + + return nn.Sequential(*layers) + + def forward(self, x): + x = self.conv1(x) + + outs = [] + for i, layer_name in enumerate(self.layers): + layer = getattr(self, layer_name) + x = layer(x) + if i in self.out_indices: + outs.append(x) + + return tuple(outs) + + def _freeze_stages(self): + if self.frozen_stages >= 0: + for param in self.conv1.parameters(): + param.requires_grad = False + for i in range(1, self.frozen_stages + 1): + layer = getattr(self, f'layer{i}') + layer.eval() + for param in layer.parameters(): + param.requires_grad = False + + def train(self, mode=True): + super().train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, _BatchNorm): + m.eval() diff --git a/mmpose/models/backbones/mobilenet_v3.py b/mmpose/models/backbones/mobilenet_v3.py index 03ecf90dd2..89edbb68ee 100644 --- a/mmpose/models/backbones/mobilenet_v3.py +++ b/mmpose/models/backbones/mobilenet_v3.py @@ -1,185 +1,185 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy - -from mmcv.cnn import ConvModule -from torch.nn.modules.batchnorm import _BatchNorm - -from mmpose.registry import MODELS -from .base_backbone import BaseBackbone -from .utils import InvertedResidual - - -@MODELS.register_module() -class MobileNetV3(BaseBackbone): - """MobileNetV3 backbone. - - Args: - arch (str): Architecture of mobilnetv3, from {small, big}. - Default: small. - conv_cfg (dict): Config dict for convolution layer. - Default: None, which means using conv2d. - norm_cfg (dict): Config dict for normalization layer. - Default: dict(type='BN'). - out_indices (None or Sequence[int]): Output from which stages. - Default: (-1, ), which means output tensors from final stage. - frozen_stages (int): Stages to be frozen (all param fixed). - Default: -1, which means not freezing any parameters. - norm_eval (bool): Whether to set norm layers to eval mode, namely, - freeze running stats (mean and var). Note: Effect on Batch Norm - and its variants only. Default: False. - with_cp (bool): Use checkpoint or not. Using checkpoint will save - some memory while slowing down the training speed. - Default: False. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: - ``[ - dict(type='Kaiming', layer=['Conv2d']), - dict( - type='Constant', - val=1, - layer=['_BatchNorm']) - ]`` - """ - # Parameters to build each block: - # [kernel size, mid channels, out channels, with_se, act type, stride] - arch_settings = { - 'small': [[3, 16, 16, True, 'ReLU', 2], - [3, 72, 24, False, 'ReLU', 2], - [3, 88, 24, False, 'ReLU', 1], - [5, 96, 40, True, 'HSwish', 2], - [5, 240, 40, True, 'HSwish', 1], - [5, 240, 40, True, 'HSwish', 1], - [5, 120, 48, True, 'HSwish', 1], - [5, 144, 48, True, 'HSwish', 1], - [5, 288, 96, True, 'HSwish', 2], - [5, 576, 96, True, 'HSwish', 1], - [5, 576, 96, True, 'HSwish', 1]], - 'big': [[3, 16, 16, False, 'ReLU', 1], - [3, 64, 24, False, 'ReLU', 2], - [3, 72, 24, False, 'ReLU', 1], - [5, 72, 40, True, 'ReLU', 2], - [5, 120, 40, True, 'ReLU', 1], - [5, 120, 40, True, 'ReLU', 1], - [3, 240, 80, False, 'HSwish', 2], - [3, 200, 80, False, 'HSwish', 1], - [3, 184, 80, False, 'HSwish', 1], - [3, 184, 80, False, 'HSwish', 1], - [3, 480, 112, True, 'HSwish', 1], - [3, 672, 112, True, 'HSwish', 1], - [5, 672, 160, True, 'HSwish', 1], - [5, 672, 160, True, 'HSwish', 2], - [5, 960, 160, True, 'HSwish', 1]] - } # yapf: disable - - def __init__(self, - arch='small', - conv_cfg=None, - norm_cfg=dict(type='BN'), - out_indices=(-1, ), - frozen_stages=-1, - norm_eval=False, - with_cp=False, - init_cfg=[ - dict(type='Kaiming', layer=['Conv2d']), - dict(type='Constant', val=1, layer=['_BatchNorm']) - ]): - # Protect mutable default arguments - norm_cfg = copy.deepcopy(norm_cfg) - super().__init__(init_cfg=init_cfg) - assert arch in self.arch_settings - for index in out_indices: - if index not in range(-len(self.arch_settings[arch]), - len(self.arch_settings[arch])): - raise ValueError('the item in out_indices must in ' - f'range(0, {len(self.arch_settings[arch])}). ' - f'But received {index}') - - if frozen_stages not in range(-1, len(self.arch_settings[arch])): - raise ValueError('frozen_stages must be in range(-1, ' - f'{len(self.arch_settings[arch])}). ' - f'But received {frozen_stages}') - self.arch = arch - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.out_indices = out_indices - self.frozen_stages = frozen_stages - self.norm_eval = norm_eval - self.with_cp = with_cp - - self.in_channels = 16 - self.conv1 = ConvModule( - in_channels=3, - out_channels=self.in_channels, - kernel_size=3, - stride=2, - padding=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=dict(type='HSwish')) - - self.layers = self._make_layer() - self.feat_dim = self.arch_settings[arch][-1][2] - - def _make_layer(self): - layers = [] - layer_setting = self.arch_settings[self.arch] - for i, params in enumerate(layer_setting): - (kernel_size, mid_channels, out_channels, with_se, act, - stride) = params - if with_se: - se_cfg = dict( - channels=mid_channels, - ratio=4, - act_cfg=(dict(type='ReLU'), - dict(type='HSigmoid', bias=1.0, divisor=2.0))) - else: - se_cfg = None - - layer = InvertedResidual( - in_channels=self.in_channels, - out_channels=out_channels, - mid_channels=mid_channels, - kernel_size=kernel_size, - stride=stride, - se_cfg=se_cfg, - with_expand_conv=True, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=dict(type=act), - with_cp=self.with_cp) - self.in_channels = out_channels - layer_name = f'layer{i + 1}' - self.add_module(layer_name, layer) - layers.append(layer_name) - return layers - - def forward(self, x): - x = self.conv1(x) - - outs = [] - for i, layer_name in enumerate(self.layers): - layer = getattr(self, layer_name) - x = layer(x) - if i in self.out_indices or \ - i - len(self.layers) in self.out_indices: - outs.append(x) - - return tuple(outs) - - def _freeze_stages(self): - if self.frozen_stages >= 0: - for param in self.conv1.parameters(): - param.requires_grad = False - for i in range(1, self.frozen_stages + 1): - layer = getattr(self, f'layer{i}') - layer.eval() - for param in layer.parameters(): - param.requires_grad = False - - def train(self, mode=True): - super().train(mode) - self._freeze_stages() - if mode and self.norm_eval: - for m in self.modules(): - if isinstance(m, _BatchNorm): - m.eval() +# Copyright (c) OpenMMLab. All rights reserved. +import copy + +from mmcv.cnn import ConvModule +from torch.nn.modules.batchnorm import _BatchNorm + +from mmpose.registry import MODELS +from .base_backbone import BaseBackbone +from .utils import InvertedResidual + + +@MODELS.register_module() +class MobileNetV3(BaseBackbone): + """MobileNetV3 backbone. + + Args: + arch (str): Architecture of mobilnetv3, from {small, big}. + Default: small. + conv_cfg (dict): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + out_indices (None or Sequence[int]): Output from which stages. + Default: (-1, ), which means output tensors from final stage. + frozen_stages (int): Stages to be frozen (all param fixed). + Default: -1, which means not freezing any parameters. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save + some memory while slowing down the training speed. + Default: False. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: + ``[ + dict(type='Kaiming', layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm']) + ]`` + """ + # Parameters to build each block: + # [kernel size, mid channels, out channels, with_se, act type, stride] + arch_settings = { + 'small': [[3, 16, 16, True, 'ReLU', 2], + [3, 72, 24, False, 'ReLU', 2], + [3, 88, 24, False, 'ReLU', 1], + [5, 96, 40, True, 'HSwish', 2], + [5, 240, 40, True, 'HSwish', 1], + [5, 240, 40, True, 'HSwish', 1], + [5, 120, 48, True, 'HSwish', 1], + [5, 144, 48, True, 'HSwish', 1], + [5, 288, 96, True, 'HSwish', 2], + [5, 576, 96, True, 'HSwish', 1], + [5, 576, 96, True, 'HSwish', 1]], + 'big': [[3, 16, 16, False, 'ReLU', 1], + [3, 64, 24, False, 'ReLU', 2], + [3, 72, 24, False, 'ReLU', 1], + [5, 72, 40, True, 'ReLU', 2], + [5, 120, 40, True, 'ReLU', 1], + [5, 120, 40, True, 'ReLU', 1], + [3, 240, 80, False, 'HSwish', 2], + [3, 200, 80, False, 'HSwish', 1], + [3, 184, 80, False, 'HSwish', 1], + [3, 184, 80, False, 'HSwish', 1], + [3, 480, 112, True, 'HSwish', 1], + [3, 672, 112, True, 'HSwish', 1], + [5, 672, 160, True, 'HSwish', 1], + [5, 672, 160, True, 'HSwish', 2], + [5, 960, 160, True, 'HSwish', 1]] + } # yapf: disable + + def __init__(self, + arch='small', + conv_cfg=None, + norm_cfg=dict(type='BN'), + out_indices=(-1, ), + frozen_stages=-1, + norm_eval=False, + with_cp=False, + init_cfg=[ + dict(type='Kaiming', layer=['Conv2d']), + dict(type='Constant', val=1, layer=['_BatchNorm']) + ]): + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + super().__init__(init_cfg=init_cfg) + assert arch in self.arch_settings + for index in out_indices: + if index not in range(-len(self.arch_settings[arch]), + len(self.arch_settings[arch])): + raise ValueError('the item in out_indices must in ' + f'range(0, {len(self.arch_settings[arch])}). ' + f'But received {index}') + + if frozen_stages not in range(-1, len(self.arch_settings[arch])): + raise ValueError('frozen_stages must be in range(-1, ' + f'{len(self.arch_settings[arch])}). ' + f'But received {frozen_stages}') + self.arch = arch + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.out_indices = out_indices + self.frozen_stages = frozen_stages + self.norm_eval = norm_eval + self.with_cp = with_cp + + self.in_channels = 16 + self.conv1 = ConvModule( + in_channels=3, + out_channels=self.in_channels, + kernel_size=3, + stride=2, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=dict(type='HSwish')) + + self.layers = self._make_layer() + self.feat_dim = self.arch_settings[arch][-1][2] + + def _make_layer(self): + layers = [] + layer_setting = self.arch_settings[self.arch] + for i, params in enumerate(layer_setting): + (kernel_size, mid_channels, out_channels, with_se, act, + stride) = params + if with_se: + se_cfg = dict( + channels=mid_channels, + ratio=4, + act_cfg=(dict(type='ReLU'), + dict(type='HSigmoid', bias=1.0, divisor=2.0))) + else: + se_cfg = None + + layer = InvertedResidual( + in_channels=self.in_channels, + out_channels=out_channels, + mid_channels=mid_channels, + kernel_size=kernel_size, + stride=stride, + se_cfg=se_cfg, + with_expand_conv=True, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=dict(type=act), + with_cp=self.with_cp) + self.in_channels = out_channels + layer_name = f'layer{i + 1}' + self.add_module(layer_name, layer) + layers.append(layer_name) + return layers + + def forward(self, x): + x = self.conv1(x) + + outs = [] + for i, layer_name in enumerate(self.layers): + layer = getattr(self, layer_name) + x = layer(x) + if i in self.out_indices or \ + i - len(self.layers) in self.out_indices: + outs.append(x) + + return tuple(outs) + + def _freeze_stages(self): + if self.frozen_stages >= 0: + for param in self.conv1.parameters(): + param.requires_grad = False + for i in range(1, self.frozen_stages + 1): + layer = getattr(self, f'layer{i}') + layer.eval() + for param in layer.parameters(): + param.requires_grad = False + + def train(self, mode=True): + super().train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, _BatchNorm): + m.eval() diff --git a/mmpose/models/backbones/mspn.py b/mmpose/models/backbones/mspn.py index bcb636b1a3..4753f927d7 100644 --- a/mmpose/models/backbones/mspn.py +++ b/mmpose/models/backbones/mspn.py @@ -1,541 +1,541 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy as cp -from collections import OrderedDict - -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import ConvModule, MaxPool2d -from mmengine.model import BaseModule -from mmengine.runner import load_state_dict - -from mmpose.registry import MODELS -from mmpose.utils import get_root_logger -from .base_backbone import BaseBackbone -from .resnet import Bottleneck as _Bottleneck -from .utils import get_state_dict - - -class Bottleneck(_Bottleneck): - expansion = 4 - """Bottleneck block for MSPN. - - Args: - in_channels (int): Input channels of this block. - out_channels (int): Output channels of this block. - stride (int): stride of the block. Default: 1 - downsample (nn.Module): downsample operation on identity branch. - Default: None - norm_cfg (dict): dictionary to construct and config norm layer. - Default: dict(type='BN') - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, in_channels, out_channels, **kwargs): - super().__init__(in_channels, out_channels * 4, **kwargs) - - -class DownsampleModule(BaseModule): - """Downsample module for MSPN. - - Args: - block (nn.Module): Downsample block. - num_blocks (list): Number of blocks in each downsample unit. - num_units (int): Numbers of downsample units. Default: 4 - has_skip (bool): Have skip connections from prior upsample - module or not. Default:False - norm_cfg (dict): dictionary to construct and config norm layer. - Default: dict(type='BN') - in_channels (int): Number of channels of the input feature to - downsample module. Default: 64 - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - block, - num_blocks, - num_units=4, - has_skip=False, - norm_cfg=dict(type='BN'), - in_channels=64, - init_cfg=None): - # Protect mutable default arguments - norm_cfg = cp.deepcopy(norm_cfg) - super().__init__(init_cfg=init_cfg) - self.has_skip = has_skip - self.in_channels = in_channels - assert len(num_blocks) == num_units - self.num_blocks = num_blocks - self.num_units = num_units - self.norm_cfg = norm_cfg - self.layer1 = self._make_layer(block, in_channels, num_blocks[0]) - for i in range(1, num_units): - module_name = f'layer{i + 1}' - self.add_module( - module_name, - self._make_layer( - block, in_channels * pow(2, i), num_blocks[i], stride=2)) - - def _make_layer(self, block, out_channels, blocks, stride=1): - downsample = None - if stride != 1 or self.in_channels != out_channels * block.expansion: - downsample = ConvModule( - self.in_channels, - out_channels * block.expansion, - kernel_size=1, - stride=stride, - padding=0, - norm_cfg=self.norm_cfg, - act_cfg=None, - inplace=True) - - units = list() - units.append( - block( - self.in_channels, - out_channels, - stride=stride, - downsample=downsample, - norm_cfg=self.norm_cfg)) - self.in_channels = out_channels * block.expansion - for _ in range(1, blocks): - units.append(block(self.in_channels, out_channels)) - - return nn.Sequential(*units) - - def forward(self, x, skip1, skip2): - out = list() - for i in range(self.num_units): - module_name = f'layer{i + 1}' - module_i = getattr(self, module_name) - x = module_i(x) - if self.has_skip: - x = x + skip1[i] + skip2[i] - out.append(x) - out.reverse() - - return tuple(out) - - -class UpsampleUnit(BaseModule): - """Upsample unit for upsample module. - - Args: - ind (int): Indicates whether to interpolate (>0) and whether to - generate feature map for the next hourglass-like module. - num_units (int): Number of units that form a upsample module. Along - with ind and gen_cross_conv, nm_units is used to decide whether - to generate feature map for the next hourglass-like module. - in_channels (int): Channel number of the skip-in feature maps from - the corresponding downsample unit. - unit_channels (int): Channel number in this unit. Default:256. - gen_skip: (bool): Whether or not to generate skips for the posterior - downsample module. Default:False - gen_cross_conv (bool): Whether to generate feature map for the next - hourglass-like module. Default:False - norm_cfg (dict): dictionary to construct and config norm layer. - Default: dict(type='BN') - out_channels (int): Number of channels of feature output by upsample - module. Must equal to in_channels of downsample module. Default:64 - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - ind, - num_units, - in_channels, - unit_channels=256, - gen_skip=False, - gen_cross_conv=False, - norm_cfg=dict(type='BN'), - out_channels=64, - init_cfg=None): - # Protect mutable default arguments - norm_cfg = cp.deepcopy(norm_cfg) - super().__init__(init_cfg=init_cfg) - self.num_units = num_units - self.norm_cfg = norm_cfg - self.in_skip = ConvModule( - in_channels, - unit_channels, - kernel_size=1, - stride=1, - padding=0, - norm_cfg=self.norm_cfg, - act_cfg=None, - inplace=True) - self.relu = nn.ReLU(inplace=True) - - self.ind = ind - if self.ind > 0: - self.up_conv = ConvModule( - unit_channels, - unit_channels, - kernel_size=1, - stride=1, - padding=0, - norm_cfg=self.norm_cfg, - act_cfg=None, - inplace=True) - - self.gen_skip = gen_skip - if self.gen_skip: - self.out_skip1 = ConvModule( - in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0, - norm_cfg=self.norm_cfg, - inplace=True) - - self.out_skip2 = ConvModule( - unit_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0, - norm_cfg=self.norm_cfg, - inplace=True) - - self.gen_cross_conv = gen_cross_conv - if self.ind == num_units - 1 and self.gen_cross_conv: - self.cross_conv = ConvModule( - unit_channels, - out_channels, - kernel_size=1, - stride=1, - padding=0, - norm_cfg=self.norm_cfg, - inplace=True) - - def forward(self, x, up_x): - out = self.in_skip(x) - - if self.ind > 0: - up_x = F.interpolate( - up_x, - size=(x.size(2), x.size(3)), - mode='bilinear', - align_corners=True) - up_x = self.up_conv(up_x) - out = out + up_x - out = self.relu(out) - - skip1 = None - skip2 = None - if self.gen_skip: - skip1 = self.out_skip1(x) - skip2 = self.out_skip2(out) - - cross_conv = None - if self.ind == self.num_units - 1 and self.gen_cross_conv: - cross_conv = self.cross_conv(out) - - return out, skip1, skip2, cross_conv - - -class UpsampleModule(BaseModule): - """Upsample module for MSPN. - - Args: - unit_channels (int): Channel number in the upsample units. - Default:256. - num_units (int): Numbers of upsample units. Default: 4 - gen_skip (bool): Whether to generate skip for posterior downsample - module or not. Default:False - gen_cross_conv (bool): Whether to generate feature map for the next - hourglass-like module. Default:False - norm_cfg (dict): dictionary to construct and config norm layer. - Default: dict(type='BN') - out_channels (int): Number of channels of feature output by upsample - module. Must equal to in_channels of downsample module. Default:64 - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - unit_channels=256, - num_units=4, - gen_skip=False, - gen_cross_conv=False, - norm_cfg=dict(type='BN'), - out_channels=64, - init_cfg=None): - # Protect mutable default arguments - norm_cfg = cp.deepcopy(norm_cfg) - super().__init__(init_cfg=init_cfg) - self.in_channels = list() - for i in range(num_units): - self.in_channels.append(Bottleneck.expansion * out_channels * - pow(2, i)) - self.in_channels.reverse() - self.num_units = num_units - self.gen_skip = gen_skip - self.gen_cross_conv = gen_cross_conv - self.norm_cfg = norm_cfg - for i in range(num_units): - module_name = f'up{i + 1}' - self.add_module( - module_name, - UpsampleUnit( - i, - self.num_units, - self.in_channels[i], - unit_channels, - self.gen_skip, - self.gen_cross_conv, - norm_cfg=self.norm_cfg, - out_channels=64)) - - def forward(self, x): - out = list() - skip1 = list() - skip2 = list() - cross_conv = None - for i in range(self.num_units): - module_i = getattr(self, f'up{i + 1}') - if i == 0: - outi, skip1_i, skip2_i, _ = module_i(x[i], None) - elif i == self.num_units - 1: - outi, skip1_i, skip2_i, cross_conv = module_i(x[i], out[i - 1]) - else: - outi, skip1_i, skip2_i, _ = module_i(x[i], out[i - 1]) - out.append(outi) - skip1.append(skip1_i) - skip2.append(skip2_i) - skip1.reverse() - skip2.reverse() - - return out, skip1, skip2, cross_conv - - -class SingleStageNetwork(BaseModule): - """Single_stage Network. - - Args: - unit_channels (int): Channel number in the upsample units. Default:256. - num_units (int): Numbers of downsample/upsample units. Default: 4 - gen_skip (bool): Whether to generate skip for posterior downsample - module or not. Default:False - gen_cross_conv (bool): Whether to generate feature map for the next - hourglass-like module. Default:False - has_skip (bool): Have skip connections from prior upsample - module or not. Default:False - num_blocks (list): Number of blocks in each downsample unit. - Default: [2, 2, 2, 2] Note: Make sure num_units==len(num_blocks) - norm_cfg (dict): dictionary to construct and config norm layer. - Default: dict(type='BN') - in_channels (int): Number of channels of the feature from ResNetTop. - Default: 64. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - has_skip=False, - gen_skip=False, - gen_cross_conv=False, - unit_channels=256, - num_units=4, - num_blocks=[2, 2, 2, 2], - norm_cfg=dict(type='BN'), - in_channels=64, - init_cfg=None): - # Protect mutable default arguments - norm_cfg = cp.deepcopy(norm_cfg) - num_blocks = cp.deepcopy(num_blocks) - super().__init__(init_cfg=init_cfg) - assert len(num_blocks) == num_units - self.has_skip = has_skip - self.gen_skip = gen_skip - self.gen_cross_conv = gen_cross_conv - self.num_units = num_units - self.unit_channels = unit_channels - self.num_blocks = num_blocks - self.norm_cfg = norm_cfg - - self.downsample = DownsampleModule(Bottleneck, num_blocks, num_units, - has_skip, norm_cfg, in_channels) - self.upsample = UpsampleModule(unit_channels, num_units, gen_skip, - gen_cross_conv, norm_cfg, in_channels) - - def forward(self, x, skip1, skip2): - mid = self.downsample(x, skip1, skip2) - out, skip1, skip2, cross_conv = self.upsample(mid) - - return out, skip1, skip2, cross_conv - - -class ResNetTop(BaseModule): - """ResNet top for MSPN. - - Args: - norm_cfg (dict): dictionary to construct and config norm layer. - Default: dict(type='BN') - channels (int): Number of channels of the feature output by ResNetTop. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, norm_cfg=dict(type='BN'), channels=64, init_cfg=None): - # Protect mutable default arguments - norm_cfg = cp.deepcopy(norm_cfg) - super().__init__(init_cfg=init_cfg) - self.top = nn.Sequential( - ConvModule( - 3, - channels, - kernel_size=7, - stride=2, - padding=3, - norm_cfg=norm_cfg, - inplace=True), MaxPool2d(kernel_size=3, stride=2, padding=1)) - - def forward(self, img): - return self.top(img) - - -@MODELS.register_module() -class MSPN(BaseBackbone): - """MSPN backbone. Paper ref: Li et al. "Rethinking on Multi-Stage Networks - for Human Pose Estimation" (CVPR 2020). - - Args: - unit_channels (int): Number of Channels in an upsample unit. - Default: 256 - num_stages (int): Number of stages in a multi-stage MSPN. Default: 4 - num_units (int): Number of downsample/upsample units in a single-stage - network. Default: 4 - Note: Make sure num_units == len(self.num_blocks) - num_blocks (list): Number of bottlenecks in each - downsample unit. Default: [2, 2, 2, 2] - norm_cfg (dict): dictionary to construct and config norm layer. - Default: dict(type='BN') - res_top_channels (int): Number of channels of feature from ResNetTop. - Default: 64. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: - ``[ - dict(type='Kaiming', layer=['Conv2d']), - dict( - type='Constant', - val=1, - layer=['_BatchNorm', 'GroupNorm']), - dict( - type='Normal', - std=0.01, - layer=['Linear']), - ]`` - - Example: - >>> from mmpose.models import MSPN - >>> import torch - >>> self = MSPN(num_stages=2,num_units=2,num_blocks=[2,2]) - >>> self.eval() - >>> inputs = torch.rand(1, 3, 511, 511) - >>> level_outputs = self.forward(inputs) - >>> for level_output in level_outputs: - ... for feature in level_output: - ... print(tuple(feature.shape)) - ... - (1, 256, 64, 64) - (1, 256, 128, 128) - (1, 256, 64, 64) - (1, 256, 128, 128) - """ - - def __init__(self, - unit_channels=256, - num_stages=4, - num_units=4, - num_blocks=[2, 2, 2, 2], - norm_cfg=dict(type='BN'), - res_top_channels=64, - init_cfg=[ - dict(type='Kaiming', layer=['Conv2d']), - dict( - type='Constant', - val=1, - layer=['_BatchNorm', 'GroupNorm']), - dict(type='Normal', std=0.01, layer=['Linear']), - ]): - # Protect mutable default arguments - norm_cfg = cp.deepcopy(norm_cfg) - num_blocks = cp.deepcopy(num_blocks) - super().__init__(init_cfg=init_cfg) - self.unit_channels = unit_channels - self.num_stages = num_stages - self.num_units = num_units - self.num_blocks = num_blocks - self.norm_cfg = norm_cfg - - assert self.num_stages > 0 - assert self.num_units > 1 - assert self.num_units == len(self.num_blocks) - self.top = ResNetTop(norm_cfg=norm_cfg) - self.multi_stage_mspn = nn.ModuleList([]) - for i in range(self.num_stages): - if i == 0: - has_skip = False - else: - has_skip = True - if i != self.num_stages - 1: - gen_skip = True - gen_cross_conv = True - else: - gen_skip = False - gen_cross_conv = False - self.multi_stage_mspn.append( - SingleStageNetwork(has_skip, gen_skip, gen_cross_conv, - unit_channels, num_units, num_blocks, - norm_cfg, res_top_channels)) - - def forward(self, x): - """Model forward function.""" - out_feats = [] - skip1 = None - skip2 = None - x = self.top(x) - for i in range(self.num_stages): - out, skip1, skip2, x = self.multi_stage_mspn[i](x, skip1, skip2) - out_feats.append(out) - - return out_feats - - def init_weights(self): - """Initialize model weights.""" - if (isinstance(self.init_cfg, dict) - and self.init_cfg['type'] == 'Pretrained'): - logger = get_root_logger() - state_dict_tmp = get_state_dict(self.init_cfg['checkpoint']) - state_dict = OrderedDict() - state_dict['top'] = OrderedDict() - state_dict['bottlenecks'] = OrderedDict() - for k, v in state_dict_tmp.items(): - if k.startswith('layer'): - if 'downsample.0' in k: - state_dict['bottlenecks'][k.replace( - 'downsample.0', 'downsample.conv')] = v - elif 'downsample.1' in k: - state_dict['bottlenecks'][k.replace( - 'downsample.1', 'downsample.bn')] = v - else: - state_dict['bottlenecks'][k] = v - elif k.startswith('conv1'): - state_dict['top'][k.replace('conv1', 'top.0.conv')] = v - elif k.startswith('bn1'): - state_dict['top'][k.replace('bn1', 'top.0.bn')] = v - - load_state_dict( - self.top, state_dict['top'], strict=False, logger=logger) - for i in range(self.num_stages): - load_state_dict( - self.multi_stage_mspn[i].downsample, - state_dict['bottlenecks'], - strict=False, - logger=logger) - else: - super(MSPN, self).init_weights() +# Copyright (c) OpenMMLab. All rights reserved. +import copy as cp +from collections import OrderedDict + +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import ConvModule, MaxPool2d +from mmengine.model import BaseModule +from mmengine.runner import load_state_dict + +from mmpose.registry import MODELS +from mmpose.utils import get_root_logger +from .base_backbone import BaseBackbone +from .resnet import Bottleneck as _Bottleneck +from .utils import get_state_dict + + +class Bottleneck(_Bottleneck): + expansion = 4 + """Bottleneck block for MSPN. + + Args: + in_channels (int): Input channels of this block. + out_channels (int): Output channels of this block. + stride (int): stride of the block. Default: 1 + downsample (nn.Module): downsample operation on identity branch. + Default: None + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, in_channels, out_channels, **kwargs): + super().__init__(in_channels, out_channels * 4, **kwargs) + + +class DownsampleModule(BaseModule): + """Downsample module for MSPN. + + Args: + block (nn.Module): Downsample block. + num_blocks (list): Number of blocks in each downsample unit. + num_units (int): Numbers of downsample units. Default: 4 + has_skip (bool): Have skip connections from prior upsample + module or not. Default:False + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + in_channels (int): Number of channels of the input feature to + downsample module. Default: 64 + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + block, + num_blocks, + num_units=4, + has_skip=False, + norm_cfg=dict(type='BN'), + in_channels=64, + init_cfg=None): + # Protect mutable default arguments + norm_cfg = cp.deepcopy(norm_cfg) + super().__init__(init_cfg=init_cfg) + self.has_skip = has_skip + self.in_channels = in_channels + assert len(num_blocks) == num_units + self.num_blocks = num_blocks + self.num_units = num_units + self.norm_cfg = norm_cfg + self.layer1 = self._make_layer(block, in_channels, num_blocks[0]) + for i in range(1, num_units): + module_name = f'layer{i + 1}' + self.add_module( + module_name, + self._make_layer( + block, in_channels * pow(2, i), num_blocks[i], stride=2)) + + def _make_layer(self, block, out_channels, blocks, stride=1): + downsample = None + if stride != 1 or self.in_channels != out_channels * block.expansion: + downsample = ConvModule( + self.in_channels, + out_channels * block.expansion, + kernel_size=1, + stride=stride, + padding=0, + norm_cfg=self.norm_cfg, + act_cfg=None, + inplace=True) + + units = list() + units.append( + block( + self.in_channels, + out_channels, + stride=stride, + downsample=downsample, + norm_cfg=self.norm_cfg)) + self.in_channels = out_channels * block.expansion + for _ in range(1, blocks): + units.append(block(self.in_channels, out_channels)) + + return nn.Sequential(*units) + + def forward(self, x, skip1, skip2): + out = list() + for i in range(self.num_units): + module_name = f'layer{i + 1}' + module_i = getattr(self, module_name) + x = module_i(x) + if self.has_skip: + x = x + skip1[i] + skip2[i] + out.append(x) + out.reverse() + + return tuple(out) + + +class UpsampleUnit(BaseModule): + """Upsample unit for upsample module. + + Args: + ind (int): Indicates whether to interpolate (>0) and whether to + generate feature map for the next hourglass-like module. + num_units (int): Number of units that form a upsample module. Along + with ind and gen_cross_conv, nm_units is used to decide whether + to generate feature map for the next hourglass-like module. + in_channels (int): Channel number of the skip-in feature maps from + the corresponding downsample unit. + unit_channels (int): Channel number in this unit. Default:256. + gen_skip: (bool): Whether or not to generate skips for the posterior + downsample module. Default:False + gen_cross_conv (bool): Whether to generate feature map for the next + hourglass-like module. Default:False + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + out_channels (int): Number of channels of feature output by upsample + module. Must equal to in_channels of downsample module. Default:64 + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + ind, + num_units, + in_channels, + unit_channels=256, + gen_skip=False, + gen_cross_conv=False, + norm_cfg=dict(type='BN'), + out_channels=64, + init_cfg=None): + # Protect mutable default arguments + norm_cfg = cp.deepcopy(norm_cfg) + super().__init__(init_cfg=init_cfg) + self.num_units = num_units + self.norm_cfg = norm_cfg + self.in_skip = ConvModule( + in_channels, + unit_channels, + kernel_size=1, + stride=1, + padding=0, + norm_cfg=self.norm_cfg, + act_cfg=None, + inplace=True) + self.relu = nn.ReLU(inplace=True) + + self.ind = ind + if self.ind > 0: + self.up_conv = ConvModule( + unit_channels, + unit_channels, + kernel_size=1, + stride=1, + padding=0, + norm_cfg=self.norm_cfg, + act_cfg=None, + inplace=True) + + self.gen_skip = gen_skip + if self.gen_skip: + self.out_skip1 = ConvModule( + in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0, + norm_cfg=self.norm_cfg, + inplace=True) + + self.out_skip2 = ConvModule( + unit_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0, + norm_cfg=self.norm_cfg, + inplace=True) + + self.gen_cross_conv = gen_cross_conv + if self.ind == num_units - 1 and self.gen_cross_conv: + self.cross_conv = ConvModule( + unit_channels, + out_channels, + kernel_size=1, + stride=1, + padding=0, + norm_cfg=self.norm_cfg, + inplace=True) + + def forward(self, x, up_x): + out = self.in_skip(x) + + if self.ind > 0: + up_x = F.interpolate( + up_x, + size=(x.size(2), x.size(3)), + mode='bilinear', + align_corners=True) + up_x = self.up_conv(up_x) + out = out + up_x + out = self.relu(out) + + skip1 = None + skip2 = None + if self.gen_skip: + skip1 = self.out_skip1(x) + skip2 = self.out_skip2(out) + + cross_conv = None + if self.ind == self.num_units - 1 and self.gen_cross_conv: + cross_conv = self.cross_conv(out) + + return out, skip1, skip2, cross_conv + + +class UpsampleModule(BaseModule): + """Upsample module for MSPN. + + Args: + unit_channels (int): Channel number in the upsample units. + Default:256. + num_units (int): Numbers of upsample units. Default: 4 + gen_skip (bool): Whether to generate skip for posterior downsample + module or not. Default:False + gen_cross_conv (bool): Whether to generate feature map for the next + hourglass-like module. Default:False + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + out_channels (int): Number of channels of feature output by upsample + module. Must equal to in_channels of downsample module. Default:64 + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + unit_channels=256, + num_units=4, + gen_skip=False, + gen_cross_conv=False, + norm_cfg=dict(type='BN'), + out_channels=64, + init_cfg=None): + # Protect mutable default arguments + norm_cfg = cp.deepcopy(norm_cfg) + super().__init__(init_cfg=init_cfg) + self.in_channels = list() + for i in range(num_units): + self.in_channels.append(Bottleneck.expansion * out_channels * + pow(2, i)) + self.in_channels.reverse() + self.num_units = num_units + self.gen_skip = gen_skip + self.gen_cross_conv = gen_cross_conv + self.norm_cfg = norm_cfg + for i in range(num_units): + module_name = f'up{i + 1}' + self.add_module( + module_name, + UpsampleUnit( + i, + self.num_units, + self.in_channels[i], + unit_channels, + self.gen_skip, + self.gen_cross_conv, + norm_cfg=self.norm_cfg, + out_channels=64)) + + def forward(self, x): + out = list() + skip1 = list() + skip2 = list() + cross_conv = None + for i in range(self.num_units): + module_i = getattr(self, f'up{i + 1}') + if i == 0: + outi, skip1_i, skip2_i, _ = module_i(x[i], None) + elif i == self.num_units - 1: + outi, skip1_i, skip2_i, cross_conv = module_i(x[i], out[i - 1]) + else: + outi, skip1_i, skip2_i, _ = module_i(x[i], out[i - 1]) + out.append(outi) + skip1.append(skip1_i) + skip2.append(skip2_i) + skip1.reverse() + skip2.reverse() + + return out, skip1, skip2, cross_conv + + +class SingleStageNetwork(BaseModule): + """Single_stage Network. + + Args: + unit_channels (int): Channel number in the upsample units. Default:256. + num_units (int): Numbers of downsample/upsample units. Default: 4 + gen_skip (bool): Whether to generate skip for posterior downsample + module or not. Default:False + gen_cross_conv (bool): Whether to generate feature map for the next + hourglass-like module. Default:False + has_skip (bool): Have skip connections from prior upsample + module or not. Default:False + num_blocks (list): Number of blocks in each downsample unit. + Default: [2, 2, 2, 2] Note: Make sure num_units==len(num_blocks) + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + in_channels (int): Number of channels of the feature from ResNetTop. + Default: 64. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + has_skip=False, + gen_skip=False, + gen_cross_conv=False, + unit_channels=256, + num_units=4, + num_blocks=[2, 2, 2, 2], + norm_cfg=dict(type='BN'), + in_channels=64, + init_cfg=None): + # Protect mutable default arguments + norm_cfg = cp.deepcopy(norm_cfg) + num_blocks = cp.deepcopy(num_blocks) + super().__init__(init_cfg=init_cfg) + assert len(num_blocks) == num_units + self.has_skip = has_skip + self.gen_skip = gen_skip + self.gen_cross_conv = gen_cross_conv + self.num_units = num_units + self.unit_channels = unit_channels + self.num_blocks = num_blocks + self.norm_cfg = norm_cfg + + self.downsample = DownsampleModule(Bottleneck, num_blocks, num_units, + has_skip, norm_cfg, in_channels) + self.upsample = UpsampleModule(unit_channels, num_units, gen_skip, + gen_cross_conv, norm_cfg, in_channels) + + def forward(self, x, skip1, skip2): + mid = self.downsample(x, skip1, skip2) + out, skip1, skip2, cross_conv = self.upsample(mid) + + return out, skip1, skip2, cross_conv + + +class ResNetTop(BaseModule): + """ResNet top for MSPN. + + Args: + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + channels (int): Number of channels of the feature output by ResNetTop. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, norm_cfg=dict(type='BN'), channels=64, init_cfg=None): + # Protect mutable default arguments + norm_cfg = cp.deepcopy(norm_cfg) + super().__init__(init_cfg=init_cfg) + self.top = nn.Sequential( + ConvModule( + 3, + channels, + kernel_size=7, + stride=2, + padding=3, + norm_cfg=norm_cfg, + inplace=True), MaxPool2d(kernel_size=3, stride=2, padding=1)) + + def forward(self, img): + return self.top(img) + + +@MODELS.register_module() +class MSPN(BaseBackbone): + """MSPN backbone. Paper ref: Li et al. "Rethinking on Multi-Stage Networks + for Human Pose Estimation" (CVPR 2020). + + Args: + unit_channels (int): Number of Channels in an upsample unit. + Default: 256 + num_stages (int): Number of stages in a multi-stage MSPN. Default: 4 + num_units (int): Number of downsample/upsample units in a single-stage + network. Default: 4 + Note: Make sure num_units == len(self.num_blocks) + num_blocks (list): Number of bottlenecks in each + downsample unit. Default: [2, 2, 2, 2] + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + res_top_channels (int): Number of channels of feature from ResNetTop. + Default: 64. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: + ``[ + dict(type='Kaiming', layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']), + dict( + type='Normal', + std=0.01, + layer=['Linear']), + ]`` + + Example: + >>> from mmpose.models import MSPN + >>> import torch + >>> self = MSPN(num_stages=2,num_units=2,num_blocks=[2,2]) + >>> self.eval() + >>> inputs = torch.rand(1, 3, 511, 511) + >>> level_outputs = self.forward(inputs) + >>> for level_output in level_outputs: + ... for feature in level_output: + ... print(tuple(feature.shape)) + ... + (1, 256, 64, 64) + (1, 256, 128, 128) + (1, 256, 64, 64) + (1, 256, 128, 128) + """ + + def __init__(self, + unit_channels=256, + num_stages=4, + num_units=4, + num_blocks=[2, 2, 2, 2], + norm_cfg=dict(type='BN'), + res_top_channels=64, + init_cfg=[ + dict(type='Kaiming', layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']), + dict(type='Normal', std=0.01, layer=['Linear']), + ]): + # Protect mutable default arguments + norm_cfg = cp.deepcopy(norm_cfg) + num_blocks = cp.deepcopy(num_blocks) + super().__init__(init_cfg=init_cfg) + self.unit_channels = unit_channels + self.num_stages = num_stages + self.num_units = num_units + self.num_blocks = num_blocks + self.norm_cfg = norm_cfg + + assert self.num_stages > 0 + assert self.num_units > 1 + assert self.num_units == len(self.num_blocks) + self.top = ResNetTop(norm_cfg=norm_cfg) + self.multi_stage_mspn = nn.ModuleList([]) + for i in range(self.num_stages): + if i == 0: + has_skip = False + else: + has_skip = True + if i != self.num_stages - 1: + gen_skip = True + gen_cross_conv = True + else: + gen_skip = False + gen_cross_conv = False + self.multi_stage_mspn.append( + SingleStageNetwork(has_skip, gen_skip, gen_cross_conv, + unit_channels, num_units, num_blocks, + norm_cfg, res_top_channels)) + + def forward(self, x): + """Model forward function.""" + out_feats = [] + skip1 = None + skip2 = None + x = self.top(x) + for i in range(self.num_stages): + out, skip1, skip2, x = self.multi_stage_mspn[i](x, skip1, skip2) + out_feats.append(out) + + return out_feats + + def init_weights(self): + """Initialize model weights.""" + if (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + logger = get_root_logger() + state_dict_tmp = get_state_dict(self.init_cfg['checkpoint']) + state_dict = OrderedDict() + state_dict['top'] = OrderedDict() + state_dict['bottlenecks'] = OrderedDict() + for k, v in state_dict_tmp.items(): + if k.startswith('layer'): + if 'downsample.0' in k: + state_dict['bottlenecks'][k.replace( + 'downsample.0', 'downsample.conv')] = v + elif 'downsample.1' in k: + state_dict['bottlenecks'][k.replace( + 'downsample.1', 'downsample.bn')] = v + else: + state_dict['bottlenecks'][k] = v + elif k.startswith('conv1'): + state_dict['top'][k.replace('conv1', 'top.0.conv')] = v + elif k.startswith('bn1'): + state_dict['top'][k.replace('bn1', 'top.0.bn')] = v + + load_state_dict( + self.top, state_dict['top'], strict=False, logger=logger) + for i in range(self.num_stages): + load_state_dict( + self.multi_stage_mspn[i].downsample, + state_dict['bottlenecks'], + strict=False, + logger=logger) + else: + super(MSPN, self).init_weights() diff --git a/mmpose/models/backbones/octsb1.py b/mmpose/models/backbones/octsb1.py new file mode 100644 index 0000000000..d9bf00d6b4 --- /dev/null +++ b/mmpose/models/backbones/octsb1.py @@ -0,0 +1,653 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy + +import torch.nn as nn +from mmcv.cnn import build_conv_layer, build_norm_layer +from mmengine.model import BaseModule, constant_init +from torch.nn.modules.batchnorm import _BatchNorm + +from mmpose.registry import MODELS +from .base_backbone import BaseBackbone +from .resnet import BasicBlock, Bottleneck, get_expansion + +import os.path as osp + +import torch +import torch.nn.functional as F + +# HRNet + Lumen segmentation + +class HRModule(BaseModule): + """High-Resolution Module for HRNet. + + In this module, every branch has 4 BasicBlocks/Bottlenecks. Fusion/Exchange + is in this module. + """ + + def __init__(self, + num_branches, + blocks, + num_blocks, + in_channels, + num_channels, + multiscale_output=False, + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + upsample_cfg=dict(mode='nearest', align_corners=None), + init_cfg=None): + + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + super().__init__(init_cfg=init_cfg) + self._check_branches(num_branches, num_blocks, in_channels, + num_channels) + + self.in_channels = in_channels + self.num_branches = num_branches + + self.multiscale_output = multiscale_output + self.norm_cfg = norm_cfg + self.conv_cfg = conv_cfg + self.upsample_cfg = upsample_cfg + self.with_cp = with_cp + self.branches = self._make_branches(num_branches, blocks, num_blocks, + num_channels) + self.fuse_layers = self._make_fuse_layers() + self.relu = nn.ReLU(inplace=True) + + @staticmethod + def _check_branches(num_branches, num_blocks, in_channels, num_channels): + """Check input to avoid ValueError.""" + if num_branches != len(num_blocks): + error_msg = f'NUM_BRANCHES({num_branches}) ' \ + f'!= NUM_BLOCKS({len(num_blocks)})' + raise ValueError(error_msg) + + if num_branches != len(num_channels): + error_msg = f'NUM_BRANCHES({num_branches}) ' \ + f'!= NUM_CHANNELS({len(num_channels)})' + raise ValueError(error_msg) + + if num_branches != len(in_channels): + error_msg = f'NUM_BRANCHES({num_branches}) ' \ + f'!= NUM_INCHANNELS({len(in_channels)})' + raise ValueError(error_msg) + + def _make_one_branch(self, + branch_index, + block, + num_blocks, + num_channels, + stride=1): + """Make one branch.""" + downsample = None + if stride != 1 or \ + self.in_channels[branch_index] != \ + num_channels[branch_index] * get_expansion(block): + downsample = nn.Sequential( + build_conv_layer( + self.conv_cfg, + self.in_channels[branch_index], + num_channels[branch_index] * get_expansion(block), + kernel_size=1, + stride=stride, + bias=False), + build_norm_layer( + self.norm_cfg, + num_channels[branch_index] * get_expansion(block))[1]) + + layers = [] + layers.append( + block( + self.in_channels[branch_index], + num_channels[branch_index] * get_expansion(block), + stride=stride, + downsample=downsample, + with_cp=self.with_cp, + norm_cfg=self.norm_cfg, + conv_cfg=self.conv_cfg)) + self.in_channels[branch_index] = \ + num_channels[branch_index] * get_expansion(block) + for _ in range(1, num_blocks[branch_index]): + layers.append( + block( + self.in_channels[branch_index], + num_channels[branch_index] * get_expansion(block), + with_cp=self.with_cp, + norm_cfg=self.norm_cfg, + conv_cfg=self.conv_cfg)) + + return nn.Sequential(*layers) + + def _make_branches(self, num_branches, block, num_blocks, num_channels): + """Make branches.""" + branches = [] + + for i in range(num_branches): + branches.append( + self._make_one_branch(i, block, num_blocks, num_channels)) + + return nn.ModuleList(branches) + + def _make_fuse_layers(self): + """Make fuse layer.""" + if self.num_branches == 1: + return None + + num_branches = self.num_branches + in_channels = self.in_channels + fuse_layers = [] + num_out_branches = num_branches if self.multiscale_output else 1 + + for i in range(num_out_branches): + fuse_layer = [] + for j in range(num_branches): + if j > i: + fuse_layer.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels[j], + in_channels[i], + kernel_size=1, + stride=1, + padding=0, + bias=False), + build_norm_layer(self.norm_cfg, in_channels[i])[1], + nn.Upsample( + scale_factor=2**(j - i), + mode=self.upsample_cfg['mode'], + align_corners=self. + upsample_cfg['align_corners']))) + elif j == i: + fuse_layer.append(None) + else: + conv_downsamples = [] + for k in range(i - j): + if k == i - j - 1: + conv_downsamples.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels[j], + in_channels[i], + kernel_size=3, + stride=2, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, + in_channels[i])[1])) + else: + conv_downsamples.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels[j], + in_channels[j], + kernel_size=3, + stride=2, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, + in_channels[j])[1], + nn.ReLU(inplace=True))) + fuse_layer.append(nn.Sequential(*conv_downsamples)) + fuse_layers.append(nn.ModuleList(fuse_layer)) + + return nn.ModuleList(fuse_layers) + + def forward(self, x): + """Forward function.""" + if self.num_branches == 1: + return [self.branches[0](x[0])] + + for i in range(self.num_branches): + x[i] = self.branches[i](x[i]) + + x_fuse = [] + for i in range(len(self.fuse_layers)): + y = 0 + for j in range(self.num_branches): + if i == j: + y += x[j] + else: + y += self.fuse_layers[i][j](x[j]) + x_fuse.append(self.relu(y)) + return x_fuse + + +@MODELS.register_module() +class OCTSB1(BaseBackbone): + """HRNet backbone. + + `High-Resolution Representations for Labeling Pixels and Regions + `__ + + Args: + extra (dict): detailed configuration for each stage of HRNet. + in_channels (int): Number of input image channels. Default: 3. + conv_cfg (dict): dictionary to construct and config conv layer. + norm_cfg (dict): dictionary to construct and config norm layer. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + zero_init_residual (bool): whether to use zero init for last norm layer + in resblocks to let them behave as identity. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Default: -1. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: + ``[ + dict(type='Normal', std=0.001, layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ]`` + + Example: + >>> from mmpose.models import HRNet + >>> import torch + >>> extra = dict( + >>> stage1=dict( + >>> num_modules=1, + >>> num_branches=1, + >>> block='BOTTLENECK', + >>> num_blocks=(4, ), + >>> num_channels=(64, )), + >>> stage2=dict( + >>> num_modules=1, + >>> num_branches=2, + >>> block='BASIC', + >>> num_blocks=(4, 4), + >>> num_channels=(32, 64)), + >>> stage3=dict( + >>> num_modules=4, + >>> num_branches=3, + >>> block='BASIC', + >>> num_blocks=(4, 4, 4), + >>> num_channels=(32, 64, 128)), + >>> stage4=dict( + >>> num_modules=3, + >>> num_branches=4, + >>> block='BASIC', + >>> num_blocks=(4, 4, 4, 4), + >>> num_channels=(32, 64, 128, 256))) + >>> self = HRNet(extra, in_channels=1) + >>> self.eval() + >>> inputs = torch.rand(1, 1, 32, 32) + >>> level_outputs = self.forward(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 32, 8, 8) + """ + + blocks_dict = {'BASIC': BasicBlock, 'BOTTLENECK': Bottleneck} + + def __init__( + self, + extra, + lumen_cfg, + in_channels=3, + conv_cfg=None, + norm_cfg=dict(type='BN'), + norm_eval=False, + with_cp=False, + zero_init_residual=False, + frozen_stages=-1, + init_cfg=[ + dict(type='Normal', std=0.001, layer=['Conv2d']), + dict(type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm']) + ] + ): + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + super().__init__(init_cfg=init_cfg) + + + # Load pretrained lumen segmentation model + # lumen_config_path = lumen_cfg['config_path'] + # lumen_config_path = lumen_cfg['config_path'] + lumen_checkpoint_path = lumen_cfg['checkpoint_path'] + # print('lumen config_path:', osp.abspath(lumen_config_path)) + print('lumen checkpoint_path:', osp.abspath(lumen_checkpoint_path)) + print('Initializing lumen segmentation model') + + # self.lumen_net = init_seg_model(lumen_config_path, lumen_checkpoint_path) + self.lumen_net = torch.jit.load(lumen_checkpoint_path, map_location=torch.device('cuda')) + self.lumen_net.eval() + if torch.cuda.is_available(): self.lumen_net = self.lumen_net.to('cuda') + # print('lumen_net:.device:', self.lumen_net.device) + + + self.extra = extra + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.init_cfg = init_cfg + self.norm_eval = norm_eval + self.with_cp = with_cp + self.zero_init_residual = zero_init_residual + self.frozen_stages = frozen_stages + + # stem net + self.norm1_name, norm1 = build_norm_layer(self.norm_cfg, 64, postfix=1) + self.norm2_name, norm2 = build_norm_layer(self.norm_cfg, 64, postfix=2) + + in_channels = in_channels * 2 # image channel + lumen segmentation mask channel + + self.conv1 = build_conv_layer( + self.conv_cfg, + in_channels, + 64, + kernel_size=3, + stride=2, + padding=1, + bias=False) + + self.add_module(self.norm1_name, norm1) + self.conv2 = build_conv_layer( + self.conv_cfg, + 64, + 64, + kernel_size=3, + stride=2, + padding=1, + bias=False) + + self.add_module(self.norm2_name, norm2) + self.relu = nn.ReLU(inplace=True) + + self.upsample_cfg = self.extra.get('upsample', { + 'mode': 'nearest', + 'align_corners': None + }) + + # stage 1 + self.stage1_cfg = self.extra['stage1'] + num_channels = self.stage1_cfg['num_channels'][0] + block_type = self.stage1_cfg['block'] + num_blocks = self.stage1_cfg['num_blocks'][0] + + block = self.blocks_dict[block_type] + stage1_out_channels = num_channels * get_expansion(block) + self.layer1 = self._make_layer(block, 64, stage1_out_channels, + num_blocks) + + # stage 2 + self.stage2_cfg = self.extra['stage2'] + num_channels = self.stage2_cfg['num_channels'] + block_type = self.stage2_cfg['block'] + + block = self.blocks_dict[block_type] + num_channels = [ + channel * get_expansion(block) for channel in num_channels + ] + self.transition1 = self._make_transition_layer([stage1_out_channels], + num_channels) + self.stage2, pre_stage_channels = self._make_stage( + self.stage2_cfg, num_channels) + + # stage 3 + self.stage3_cfg = self.extra['stage3'] + num_channels = self.stage3_cfg['num_channels'] + block_type = self.stage3_cfg['block'] + + block = self.blocks_dict[block_type] + num_channels = [ + channel * get_expansion(block) for channel in num_channels + ] + self.transition2 = self._make_transition_layer(pre_stage_channels, + num_channels) + self.stage3, pre_stage_channels = self._make_stage( + self.stage3_cfg, num_channels) + + # stage 4 + self.stage4_cfg = self.extra['stage4'] + num_channels = self.stage4_cfg['num_channels'] + block_type = self.stage4_cfg['block'] + + block = self.blocks_dict[block_type] + num_channels = [ + channel * get_expansion(block) for channel in num_channels + ] + self.transition3 = self._make_transition_layer(pre_stage_channels, + num_channels) + + self.stage4, pre_stage_channels = self._make_stage( + self.stage4_cfg, + num_channels, + multiscale_output=self.stage4_cfg.get('multiscale_output', False)) + + self._freeze_stages() + + @property + def norm1(self): + """nn.Module: the normalization layer named "norm1" """ + return getattr(self, self.norm1_name) + + @property + def norm2(self): + """nn.Module: the normalization layer named "norm2" """ + return getattr(self, self.norm2_name) + + def _make_transition_layer(self, num_channels_pre_layer, + num_channels_cur_layer): + """Make transition layer.""" + num_branches_cur = len(num_channels_cur_layer) + num_branches_pre = len(num_channels_pre_layer) + + transition_layers = [] + for i in range(num_branches_cur): + if i < num_branches_pre: + if num_channels_cur_layer[i] != num_channels_pre_layer[i]: + transition_layers.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + num_channels_pre_layer[i], + num_channels_cur_layer[i], + kernel_size=3, + stride=1, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, + num_channels_cur_layer[i])[1], + nn.ReLU(inplace=True))) + else: + transition_layers.append(None) + else: + conv_downsamples = [] + for j in range(i + 1 - num_branches_pre): + in_channels = num_channels_pre_layer[-1] + out_channels = num_channels_cur_layer[i] \ + if j == i - num_branches_pre else in_channels + conv_downsamples.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels, + out_channels, + kernel_size=3, + stride=2, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, out_channels)[1], + nn.ReLU(inplace=True))) + transition_layers.append(nn.Sequential(*conv_downsamples)) + + return nn.ModuleList(transition_layers) + + def _make_layer(self, block, in_channels, out_channels, blocks, stride=1): + """Make layer.""" + downsample = None + if stride != 1 or in_channels != out_channels: + downsample = nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels, + out_channels, + kernel_size=1, + stride=stride, + bias=False), + build_norm_layer(self.norm_cfg, out_channels)[1]) + + layers = [] + layers.append( + block( + in_channels, + out_channels, + stride=stride, + downsample=downsample, + with_cp=self.with_cp, + norm_cfg=self.norm_cfg, + conv_cfg=self.conv_cfg)) + for _ in range(1, blocks): + layers.append( + block( + out_channels, + out_channels, + with_cp=self.with_cp, + norm_cfg=self.norm_cfg, + conv_cfg=self.conv_cfg)) + + return nn.Sequential(*layers) + + def _make_stage(self, layer_config, in_channels, multiscale_output=True): + """Make stage.""" + num_modules = layer_config['num_modules'] + num_branches = layer_config['num_branches'] + num_blocks = layer_config['num_blocks'] + num_channels = layer_config['num_channels'] + block = self.blocks_dict[layer_config['block']] + + hr_modules = [] + for i in range(num_modules): + # multi_scale_output is only used for the last module + if not multiscale_output and i == num_modules - 1: + reset_multiscale_output = False + else: + reset_multiscale_output = True + + hr_modules.append( + HRModule( + num_branches, + block, + num_blocks, + in_channels, + num_channels, + reset_multiscale_output, + with_cp=self.with_cp, + norm_cfg=self.norm_cfg, + conv_cfg=self.conv_cfg, + upsample_cfg=self.upsample_cfg)) + + in_channels = hr_modules[-1].in_channels + + return nn.Sequential(*hr_modules), in_channels + + def _freeze_stages(self): + """Freeze parameters.""" + if self.frozen_stages >= 0: + self.norm1.eval() + self.norm2.eval() + + for m in [self.conv1, self.norm1, self.conv2, self.norm2]: + for param in m.parameters(): + param.requires_grad = False + + for i in range(1, self.frozen_stages + 1): + if i == 1: + m = getattr(self, 'layer1') + else: + m = getattr(self, f'stage{i}') + + m.eval() + for param in m.parameters(): + param.requires_grad = False + + if i < 4: + m = getattr(self, f'transition{i}') + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def init_weights(self): + """Initialize the weights in backbone.""" + super(OCTSB1, self).init_weights() + + if (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + # Suppress zero_init_residual if use pretrained model. + return + + if self.zero_init_residual: + for m in self.modules(): + if isinstance(m, Bottleneck): + constant_init(m.norm3, 0) + elif isinstance(m, BasicBlock): + constant_init(m.norm2, 0) + + def forward(self, x): + """Forward function.""" + # print('x:', x) + # print('hrnet input x.shape:', x.shape) + # print('hrnet input x.device:', x.device) + with torch.no_grad(): + m = self.lumen_net(x) #.to(x.device) + # print('m:', m) + # print('Segmentation m.shape:', m.shape) + # print('Segmentation m.device:', m.device) + x = torch.cat([x, m], dim=1).detach() + # print('x+m:', x) + # print('x+m.shape:', x.shape) + x = self.conv1(x) + x = self.norm1(x) + x = self.relu(x) + x = self.conv2(x) + x = self.norm2(x) + x = self.relu(x) + x = self.layer1(x) + + x_list = [] + for i in range(self.stage2_cfg['num_branches']): + if self.transition1[i] is not None: + x_list.append(self.transition1[i](x)) + else: + x_list.append(x) + y_list = self.stage2(x_list) + + x_list = [] + for i in range(self.stage3_cfg['num_branches']): + if self.transition2[i] is not None: + x_list.append(self.transition2[i](y_list[-1])) + else: + x_list.append(y_list[i]) + y_list = self.stage3(x_list) + + x_list = [] + for i in range(self.stage4_cfg['num_branches']): + if self.transition3[i] is not None: + x_list.append(self.transition3[i](y_list[-1])) + else: + x_list.append(y_list[i]) + y_list = self.stage4(x_list) + + return tuple(y_list) + + def train(self, mode=True): + """Convert the model into training mode.""" + super().train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, _BatchNorm): + m.eval() + + # def forward_lumen(self, x): + # p = self.lumen_net(x) + # p = F.softmax(p, dim=1) + # m = p >= 0.5 + # return m \ No newline at end of file diff --git a/mmpose/models/backbones/octsb2.py b/mmpose/models/backbones/octsb2.py new file mode 100644 index 0000000000..bf86f66526 --- /dev/null +++ b/mmpose/models/backbones/octsb2.py @@ -0,0 +1,656 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy + +import torch.nn as nn +from mmcv.cnn import build_conv_layer, build_norm_layer +from mmengine.model import BaseModule, constant_init +from torch.nn.modules.batchnorm import _BatchNorm + +from mmpose.registry import MODELS +from .base_backbone import BaseBackbone +from .resnet import BasicBlock, Bottleneck, get_expansion + +import os.path as osp + +import torch +import torch.nn.functional as F + +# HRNet + Guidewire segmentation with x-axis along masked + +class HRModule(BaseModule): + """High-Resolution Module for HRNet. + + In this module, every branch has 4 BasicBlocks/Bottlenecks. Fusion/Exchange + is in this module. + """ + + def __init__(self, + num_branches, + blocks, + num_blocks, + in_channels, + num_channels, + multiscale_output=False, + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + upsample_cfg=dict(mode='nearest', align_corners=None), + init_cfg=None): + + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + super().__init__(init_cfg=init_cfg) + self._check_branches(num_branches, num_blocks, in_channels, + num_channels) + + self.in_channels = in_channels + self.num_branches = num_branches + + self.multiscale_output = multiscale_output + self.norm_cfg = norm_cfg + self.conv_cfg = conv_cfg + self.upsample_cfg = upsample_cfg + self.with_cp = with_cp + self.branches = self._make_branches(num_branches, blocks, num_blocks, + num_channels) + self.fuse_layers = self._make_fuse_layers() + self.relu = nn.ReLU(inplace=True) + + @staticmethod + def _check_branches(num_branches, num_blocks, in_channels, num_channels): + """Check input to avoid ValueError.""" + if num_branches != len(num_blocks): + error_msg = f'NUM_BRANCHES({num_branches}) ' \ + f'!= NUM_BLOCKS({len(num_blocks)})' + raise ValueError(error_msg) + + if num_branches != len(num_channels): + error_msg = f'NUM_BRANCHES({num_branches}) ' \ + f'!= NUM_CHANNELS({len(num_channels)})' + raise ValueError(error_msg) + + if num_branches != len(in_channels): + error_msg = f'NUM_BRANCHES({num_branches}) ' \ + f'!= NUM_INCHANNELS({len(in_channels)})' + raise ValueError(error_msg) + + def _make_one_branch(self, + branch_index, + block, + num_blocks, + num_channels, + stride=1): + """Make one branch.""" + downsample = None + if stride != 1 or \ + self.in_channels[branch_index] != \ + num_channels[branch_index] * get_expansion(block): + downsample = nn.Sequential( + build_conv_layer( + self.conv_cfg, + self.in_channels[branch_index], + num_channels[branch_index] * get_expansion(block), + kernel_size=1, + stride=stride, + bias=False), + build_norm_layer( + self.norm_cfg, + num_channels[branch_index] * get_expansion(block))[1]) + + layers = [] + layers.append( + block( + self.in_channels[branch_index], + num_channels[branch_index] * get_expansion(block), + stride=stride, + downsample=downsample, + with_cp=self.with_cp, + norm_cfg=self.norm_cfg, + conv_cfg=self.conv_cfg)) + self.in_channels[branch_index] = \ + num_channels[branch_index] * get_expansion(block) + for _ in range(1, num_blocks[branch_index]): + layers.append( + block( + self.in_channels[branch_index], + num_channels[branch_index] * get_expansion(block), + with_cp=self.with_cp, + norm_cfg=self.norm_cfg, + conv_cfg=self.conv_cfg)) + + return nn.Sequential(*layers) + + def _make_branches(self, num_branches, block, num_blocks, num_channels): + """Make branches.""" + branches = [] + + for i in range(num_branches): + branches.append( + self._make_one_branch(i, block, num_blocks, num_channels)) + + return nn.ModuleList(branches) + + def _make_fuse_layers(self): + """Make fuse layer.""" + if self.num_branches == 1: + return None + + num_branches = self.num_branches + in_channels = self.in_channels + fuse_layers = [] + num_out_branches = num_branches if self.multiscale_output else 1 + + for i in range(num_out_branches): + fuse_layer = [] + for j in range(num_branches): + if j > i: + fuse_layer.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels[j], + in_channels[i], + kernel_size=1, + stride=1, + padding=0, + bias=False), + build_norm_layer(self.norm_cfg, in_channels[i])[1], + nn.Upsample( + scale_factor=2**(j - i), + mode=self.upsample_cfg['mode'], + align_corners=self. + upsample_cfg['align_corners']))) + elif j == i: + fuse_layer.append(None) + else: + conv_downsamples = [] + for k in range(i - j): + if k == i - j - 1: + conv_downsamples.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels[j], + in_channels[i], + kernel_size=3, + stride=2, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, + in_channels[i])[1])) + else: + conv_downsamples.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels[j], + in_channels[j], + kernel_size=3, + stride=2, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, + in_channels[j])[1], + nn.ReLU(inplace=True))) + fuse_layer.append(nn.Sequential(*conv_downsamples)) + fuse_layers.append(nn.ModuleList(fuse_layer)) + + return nn.ModuleList(fuse_layers) + + def forward(self, x): + """Forward function.""" + if self.num_branches == 1: + return [self.branches[0](x[0])] + + for i in range(self.num_branches): + x[i] = self.branches[i](x[i]) + + x_fuse = [] + for i in range(len(self.fuse_layers)): + y = 0 + for j in range(self.num_branches): + if i == j: + y += x[j] + else: + y += self.fuse_layers[i][j](x[j]) + x_fuse.append(self.relu(y)) + return x_fuse + + +@MODELS.register_module() +class OCTSB2(BaseBackbone): + """HRNet backbone. + + `High-Resolution Representations for Labeling Pixels and Regions + `__ + + Args: + extra (dict): detailed configuration for each stage of HRNet. + in_channels (int): Number of input image channels. Default: 3. + conv_cfg (dict): dictionary to construct and config conv layer. + norm_cfg (dict): dictionary to construct and config norm layer. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + zero_init_residual (bool): whether to use zero init for last norm layer + in resblocks to let them behave as identity. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Default: -1. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: + ``[ + dict(type='Normal', std=0.001, layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ]`` + + Example: + >>> from mmpose.models import HRNet + >>> import torch + >>> extra = dict( + >>> stage1=dict( + >>> num_modules=1, + >>> num_branches=1, + >>> block='BOTTLENECK', + >>> num_blocks=(4, ), + >>> num_channels=(64, )), + >>> stage2=dict( + >>> num_modules=1, + >>> num_branches=2, + >>> block='BASIC', + >>> num_blocks=(4, 4), + >>> num_channels=(32, 64)), + >>> stage3=dict( + >>> num_modules=4, + >>> num_branches=3, + >>> block='BASIC', + >>> num_blocks=(4, 4, 4), + >>> num_channels=(32, 64, 128)), + >>> stage4=dict( + >>> num_modules=3, + >>> num_branches=4, + >>> block='BASIC', + >>> num_blocks=(4, 4, 4, 4), + >>> num_channels=(32, 64, 128, 256))) + >>> self = HRNet(extra, in_channels=1) + >>> self.eval() + >>> inputs = torch.rand(1, 1, 32, 32) + >>> level_outputs = self.forward(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 32, 8, 8) + """ + + blocks_dict = {'BASIC': BasicBlock, 'BOTTLENECK': Bottleneck} + + def __init__( + self, + extra, + lumen_cfg, + in_channels=3, + conv_cfg=None, + norm_cfg=dict(type='BN'), + norm_eval=False, + with_cp=False, + zero_init_residual=False, + frozen_stages=-1, + init_cfg=[ + dict(type='Normal', std=0.001, layer=['Conv2d']), + dict(type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm']) + ] + ): + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + super().__init__(init_cfg=init_cfg) + + + # Load pretrained lumen segmentation model + # lumen_config_path = lumen_cfg['config_path'] + # lumen_config_path = lumen_cfg['config_path'] + lumen_checkpoint_path = lumen_cfg['checkpoint_path'] + # print('lumen config_path:', osp.abspath(lumen_config_path)) + print('lumen checkpoint_path:', osp.abspath(lumen_checkpoint_path)) + print('Initializing lumen segmentation model') + + self.guidewire_net = torch.jit.load(lumen_checkpoint_path, map_location=torch.device('cuda')) + self.guidewire_net.eval() + if torch.cuda.is_available(): self.guidewire_net = self.guidewire_net.to('cuda') + # print('guidewire_net:.device:', self.guidewire_net.device) + + + self.extra = extra + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.init_cfg = init_cfg + self.norm_eval = norm_eval + self.with_cp = with_cp + self.zero_init_residual = zero_init_residual + self.frozen_stages = frozen_stages + + # stem net + self.norm1_name, norm1 = build_norm_layer(self.norm_cfg, 64, postfix=1) + self.norm2_name, norm2 = build_norm_layer(self.norm_cfg, 64, postfix=2) + + in_channels = in_channels * 2 # image channel + lumen segmentation mask channel + + self.conv1 = build_conv_layer( + self.conv_cfg, + in_channels, + 64, + kernel_size=3, + stride=2, + padding=1, + bias=False) + + self.add_module(self.norm1_name, norm1) + self.conv2 = build_conv_layer( + self.conv_cfg, + 64, + 64, + kernel_size=3, + stride=2, + padding=1, + bias=False) + + self.add_module(self.norm2_name, norm2) + self.relu = nn.ReLU(inplace=True) + + self.upsample_cfg = self.extra.get('upsample', { + 'mode': 'nearest', + 'align_corners': None + }) + + # stage 1 + self.stage1_cfg = self.extra['stage1'] + num_channels = self.stage1_cfg['num_channels'][0] + block_type = self.stage1_cfg['block'] + num_blocks = self.stage1_cfg['num_blocks'][0] + + block = self.blocks_dict[block_type] + stage1_out_channels = num_channels * get_expansion(block) + self.layer1 = self._make_layer(block, 64, stage1_out_channels, + num_blocks) + + # stage 2 + self.stage2_cfg = self.extra['stage2'] + num_channels = self.stage2_cfg['num_channels'] + block_type = self.stage2_cfg['block'] + + block = self.blocks_dict[block_type] + num_channels = [ + channel * get_expansion(block) for channel in num_channels + ] + self.transition1 = self._make_transition_layer([stage1_out_channels], + num_channels) + self.stage2, pre_stage_channels = self._make_stage( + self.stage2_cfg, num_channels) + + # stage 3 + self.stage3_cfg = self.extra['stage3'] + num_channels = self.stage3_cfg['num_channels'] + block_type = self.stage3_cfg['block'] + + block = self.blocks_dict[block_type] + num_channels = [ + channel * get_expansion(block) for channel in num_channels + ] + self.transition2 = self._make_transition_layer(pre_stage_channels, + num_channels) + self.stage3, pre_stage_channels = self._make_stage( + self.stage3_cfg, num_channels) + + # stage 4 + self.stage4_cfg = self.extra['stage4'] + num_channels = self.stage4_cfg['num_channels'] + block_type = self.stage4_cfg['block'] + + block = self.blocks_dict[block_type] + num_channels = [ + channel * get_expansion(block) for channel in num_channels + ] + self.transition3 = self._make_transition_layer(pre_stage_channels, + num_channels) + + self.stage4, pre_stage_channels = self._make_stage( + self.stage4_cfg, + num_channels, + multiscale_output=self.stage4_cfg.get('multiscale_output', False)) + + self._freeze_stages() + + @property + def norm1(self): + """nn.Module: the normalization layer named "norm1" """ + return getattr(self, self.norm1_name) + + @property + def norm2(self): + """nn.Module: the normalization layer named "norm2" """ + return getattr(self, self.norm2_name) + + def _make_transition_layer(self, num_channels_pre_layer, + num_channels_cur_layer): + """Make transition layer.""" + num_branches_cur = len(num_channels_cur_layer) + num_branches_pre = len(num_channels_pre_layer) + + transition_layers = [] + for i in range(num_branches_cur): + if i < num_branches_pre: + if num_channels_cur_layer[i] != num_channels_pre_layer[i]: + transition_layers.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + num_channels_pre_layer[i], + num_channels_cur_layer[i], + kernel_size=3, + stride=1, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, + num_channels_cur_layer[i])[1], + nn.ReLU(inplace=True))) + else: + transition_layers.append(None) + else: + conv_downsamples = [] + for j in range(i + 1 - num_branches_pre): + in_channels = num_channels_pre_layer[-1] + out_channels = num_channels_cur_layer[i] \ + if j == i - num_branches_pre else in_channels + conv_downsamples.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels, + out_channels, + kernel_size=3, + stride=2, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, out_channels)[1], + nn.ReLU(inplace=True))) + transition_layers.append(nn.Sequential(*conv_downsamples)) + + return nn.ModuleList(transition_layers) + + def _make_layer(self, block, in_channels, out_channels, blocks, stride=1): + """Make layer.""" + downsample = None + if stride != 1 or in_channels != out_channels: + downsample = nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels, + out_channels, + kernel_size=1, + stride=stride, + bias=False), + build_norm_layer(self.norm_cfg, out_channels)[1]) + + layers = [] + layers.append( + block( + in_channels, + out_channels, + stride=stride, + downsample=downsample, + with_cp=self.with_cp, + norm_cfg=self.norm_cfg, + conv_cfg=self.conv_cfg)) + for _ in range(1, blocks): + layers.append( + block( + out_channels, + out_channels, + with_cp=self.with_cp, + norm_cfg=self.norm_cfg, + conv_cfg=self.conv_cfg)) + + return nn.Sequential(*layers) + + def _make_stage(self, layer_config, in_channels, multiscale_output=True): + """Make stage.""" + num_modules = layer_config['num_modules'] + num_branches = layer_config['num_branches'] + num_blocks = layer_config['num_blocks'] + num_channels = layer_config['num_channels'] + block = self.blocks_dict[layer_config['block']] + + hr_modules = [] + for i in range(num_modules): + # multi_scale_output is only used for the last module + if not multiscale_output and i == num_modules - 1: + reset_multiscale_output = False + else: + reset_multiscale_output = True + + hr_modules.append( + HRModule( + num_branches, + block, + num_blocks, + in_channels, + num_channels, + reset_multiscale_output, + with_cp=self.with_cp, + norm_cfg=self.norm_cfg, + conv_cfg=self.conv_cfg, + upsample_cfg=self.upsample_cfg)) + + in_channels = hr_modules[-1].in_channels + + return nn.Sequential(*hr_modules), in_channels + + def _freeze_stages(self): + """Freeze parameters.""" + if self.frozen_stages >= 0: + self.norm1.eval() + self.norm2.eval() + + for m in [self.conv1, self.norm1, self.conv2, self.norm2]: + for param in m.parameters(): + param.requires_grad = False + + for i in range(1, self.frozen_stages + 1): + if i == 1: + m = getattr(self, 'layer1') + else: + m = getattr(self, f'stage{i}') + + m.eval() + for param in m.parameters(): + param.requires_grad = False + + if i < 4: + m = getattr(self, f'transition{i}') + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def init_weights(self): + """Initialize the weights in backbone.""" + super(OCTSB2, self).init_weights() + + if (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + # Suppress zero_init_residual if use pretrained model. + return + + if self.zero_init_residual: + for m in self.modules(): + if isinstance(m, Bottleneck): + constant_init(m.norm3, 0) + elif isinstance(m, BasicBlock): + constant_init(m.norm2, 0) + + def forward(self, x): + """Forward function.""" + # print('x:', x) + # print('hrnet input x.shape:', x.shape) + # print('hrnet input x.device:', x.device) + with torch.no_grad(): + m = self.guidewire_net(x) #.to(x.device) + # print('m:', m) + # print('Segmentation m.shape:', m.shape) + # print('Segmentation m.device:', m.device) + x = torch.cat([x, m], dim=1).detach() + # print('x+m:', x) + # print('x+m.shape:', x.shape) + x = self.conv1(x) + x = self.norm1(x) + x = self.relu(x) + x = self.conv2(x) + x = self.norm2(x) + x = self.relu(x) + x = self.layer1(x) + + x_list = [] + for i in range(self.stage2_cfg['num_branches']): + if self.transition1[i] is not None: + x_list.append(self.transition1[i](x)) + else: + x_list.append(x) + y_list = self.stage2(x_list) + + x_list = [] + for i in range(self.stage3_cfg['num_branches']): + if self.transition2[i] is not None: + x_list.append(self.transition2[i](y_list[-1])) + else: + x_list.append(y_list[i]) + y_list = self.stage3(x_list) + + x_list = [] + for i in range(self.stage4_cfg['num_branches']): + if self.transition3[i] is not None: + x_list.append(self.transition3[i](y_list[-1])) + else: + x_list.append(y_list[i]) + y_list = self.stage4(x_list) + + return tuple(y_list) + + def train(self, mode=True): + """Convert the model into training mode.""" + super().train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, _BatchNorm): + m.eval() + + def forward_guidewire(self, x): + p = self.guidewire_net(x) + p = p.sigmoid() + m = p >= 0.3 + nz = m.nonzero() + x_ = nz[:, 3].unique() + m[:, :, :, x_] = 1 + m = m.float() + return m \ No newline at end of file diff --git a/mmpose/models/backbones/pvt.py b/mmpose/models/backbones/pvt.py index 3f2b649548..c953ee354f 100644 --- a/mmpose/models/backbones/pvt.py +++ b/mmpose/models/backbones/pvt.py @@ -1,569 +1,569 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import warnings - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import Conv2d, build_activation_layer, build_norm_layer -from mmcv.cnn.bricks.drop import build_dropout -from mmcv.cnn.bricks.transformer import MultiheadAttention -from mmengine.model import BaseModule, ModuleList, Sequential -from mmengine.model.weight_init import trunc_normal_ -from mmengine.runner import load_state_dict -from mmengine.utils import to_2tuple - -from mmpose.registry import MODELS -from ...utils import get_root_logger -from ..utils import PatchEmbed, nchw_to_nlc, nlc_to_nchw, pvt_convert -from .utils import get_state_dict - - -class MixFFN(BaseModule): - """An implementation of MixFFN of PVT. - - The differences between MixFFN & FFN: - 1. Use 1X1 Conv to replace Linear layer. - 2. Introduce 3X3 Depth-wise Conv to encode positional information. - - Args: - embed_dims (int): The feature dimension. Same as - `MultiheadAttention`. - feedforward_channels (int): The hidden dimension of FFNs. - act_cfg (dict, optional): The activation config for FFNs. - Default: dict(type='GELU'). - ffn_drop (float, optional): Probability of an element to be - zeroed in FFN. Default 0.0. - dropout_layer (obj:`ConfigDict`): The dropout_layer used - when adding the shortcut. - Default: None. - use_conv (bool): If True, add 3x3 DWConv between two Linear layers. - Defaults: False. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - embed_dims, - feedforward_channels, - act_cfg=dict(type='GELU'), - ffn_drop=0., - dropout_layer=None, - use_conv=False, - init_cfg=None): - super(MixFFN, self).__init__(init_cfg=init_cfg) - - self.embed_dims = embed_dims - self.feedforward_channels = feedforward_channels - self.act_cfg = act_cfg - activate = build_activation_layer(act_cfg) - - in_channels = embed_dims - fc1 = Conv2d( - in_channels=in_channels, - out_channels=feedforward_channels, - kernel_size=1, - stride=1, - bias=True) - if use_conv: - # 3x3 depth wise conv to provide positional encode information - dw_conv = Conv2d( - in_channels=feedforward_channels, - out_channels=feedforward_channels, - kernel_size=3, - stride=1, - padding=(3 - 1) // 2, - bias=True, - groups=feedforward_channels) - fc2 = Conv2d( - in_channels=feedforward_channels, - out_channels=in_channels, - kernel_size=1, - stride=1, - bias=True) - drop = nn.Dropout(ffn_drop) - layers = [fc1, activate, drop, fc2, drop] - if use_conv: - layers.insert(1, dw_conv) - self.layers = Sequential(*layers) - self.dropout_layer = build_dropout( - dropout_layer) if dropout_layer else torch.nn.Identity() - - def forward(self, x, hw_shape, identity=None): - out = nlc_to_nchw(x, hw_shape) - out = self.layers(out) - out = nchw_to_nlc(out) - if identity is None: - identity = x - return identity + self.dropout_layer(out) - - -class SpatialReductionAttention(MultiheadAttention): - """An implementation of Spatial Reduction Attention of PVT. - - This module is modified from MultiheadAttention which is a module from - mmcv.cnn.bricks.transformer. - - Args: - embed_dims (int): The embedding dimension. - num_heads (int): Parallel attention heads. - attn_drop (float): A Dropout layer on attn_output_weights. - Default: 0.0. - proj_drop (float): A Dropout layer after `nn.MultiheadAttention`. - Default: 0.0. - dropout_layer (obj:`ConfigDict`): The dropout_layer used - when adding the shortcut. Default: None. - batch_first (bool): Key, Query and Value are shape of - (batch, n, embed_dim) - or (n, batch, embed_dim). Default: False. - qkv_bias (bool): enable bias for qkv if True. Default: True. - norm_cfg (dict): Config dict for normalization layer. - Default: dict(type='LN'). - sr_ratio (int): The ratio of spatial reduction of Spatial Reduction - Attention of PVT. Default: 1. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - embed_dims, - num_heads, - attn_drop=0., - proj_drop=0., - dropout_layer=None, - batch_first=True, - qkv_bias=True, - norm_cfg=dict(type='LN'), - sr_ratio=1, - init_cfg=None): - super().__init__( - embed_dims, - num_heads, - attn_drop, - proj_drop, - batch_first=batch_first, - dropout_layer=dropout_layer, - bias=qkv_bias, - init_cfg=init_cfg) - - self.sr_ratio = sr_ratio - if sr_ratio > 1: - self.sr = Conv2d( - in_channels=embed_dims, - out_channels=embed_dims, - kernel_size=sr_ratio, - stride=sr_ratio) - # The ret[0] of build_norm_layer is norm name. - self.norm = build_norm_layer(norm_cfg, embed_dims)[1] - - # handle the BC-breaking from https://github.com/open-mmlab/mmcv/pull/1418 # noqa - from mmpose import digit_version, mmcv_version - if mmcv_version < digit_version('1.3.17'): - warnings.warn('The legacy version of forward function in' - 'SpatialReductionAttention is deprecated in' - 'mmcv>=1.3.17 and will no longer support in the' - 'future. Please upgrade your mmcv.') - self.forward = self.legacy_forward - - def forward(self, x, hw_shape, identity=None): - - x_q = x - if self.sr_ratio > 1: - x_kv = nlc_to_nchw(x, hw_shape) - x_kv = self.sr(x_kv) - x_kv = nchw_to_nlc(x_kv) - x_kv = self.norm(x_kv) - else: - x_kv = x - - if identity is None: - identity = x_q - - # Because the dataflow('key', 'query', 'value') of - # ``torch.nn.MultiheadAttention`` is (num_query, batch, - # embed_dims), We should adjust the shape of dataflow from - # batch_first (batch, num_query, embed_dims) to num_query_first - # (num_query ,batch, embed_dims), and recover ``attn_output`` - # from num_query_first to batch_first. - if self.batch_first: - x_q = x_q.transpose(0, 1) - x_kv = x_kv.transpose(0, 1) - - out = self.attn(query=x_q, key=x_kv, value=x_kv)[0] - - if self.batch_first: - out = out.transpose(0, 1) - - return identity + self.dropout_layer(self.proj_drop(out)) - - def legacy_forward(self, x, hw_shape, identity=None): - """multi head attention forward in mmcv version < 1.3.17.""" - x_q = x - if self.sr_ratio > 1: - x_kv = nlc_to_nchw(x, hw_shape) - x_kv = self.sr(x_kv) - x_kv = nchw_to_nlc(x_kv) - x_kv = self.norm(x_kv) - else: - x_kv = x - - if identity is None: - identity = x_q - - out = self.attn(query=x_q, key=x_kv, value=x_kv)[0] - - return identity + self.dropout_layer(self.proj_drop(out)) - - -class PVTEncoderLayer(BaseModule): - """Implements one encoder layer in PVT. - - Args: - embed_dims (int): The feature dimension. - num_heads (int): Parallel attention heads. - feedforward_channels (int): The hidden dimension for FFNs. - drop_rate (float): Probability of an element to be zeroed. - after the feed forward layer. Default: 0.0. - attn_drop_rate (float): The drop out rate for attention layer. - Default: 0.0. - drop_path_rate (float): stochastic depth rate. Default: 0.0. - qkv_bias (bool): enable bias for qkv if True. - Default: True. - act_cfg (dict): The activation config for FFNs. - Default: dict(type='GELU'). - norm_cfg (dict): Config dict for normalization layer. - Default: dict(type='LN'). - sr_ratio (int): The ratio of spatial reduction of Spatial Reduction - Attention of PVT. Default: 1. - use_conv_ffn (bool): If True, use Convolutional FFN to replace FFN. - Default: False. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - embed_dims, - num_heads, - feedforward_channels, - drop_rate=0., - attn_drop_rate=0., - drop_path_rate=0., - qkv_bias=True, - act_cfg=dict(type='GELU'), - norm_cfg=dict(type='LN'), - sr_ratio=1, - use_conv_ffn=False, - init_cfg=None): - super(PVTEncoderLayer, self).__init__(init_cfg=init_cfg) - - # The ret[0] of build_norm_layer is norm name. - self.norm1 = build_norm_layer(norm_cfg, embed_dims)[1] - - self.attn = SpatialReductionAttention( - embed_dims=embed_dims, - num_heads=num_heads, - attn_drop=attn_drop_rate, - proj_drop=drop_rate, - dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), - qkv_bias=qkv_bias, - norm_cfg=norm_cfg, - sr_ratio=sr_ratio) - - # The ret[0] of build_norm_layer is norm name. - self.norm2 = build_norm_layer(norm_cfg, embed_dims)[1] - - self.ffn = MixFFN( - embed_dims=embed_dims, - feedforward_channels=feedforward_channels, - ffn_drop=drop_rate, - dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), - use_conv=use_conv_ffn, - act_cfg=act_cfg) - - def forward(self, x, hw_shape): - x = self.attn(self.norm1(x), hw_shape, identity=x) - x = self.ffn(self.norm2(x), hw_shape, identity=x) - - return x - - -class AbsolutePositionEmbedding(BaseModule): - """An implementation of the absolute position embedding in PVT. - - Args: - pos_shape (int): The shape of the absolute position embedding. - pos_dim (int): The dimension of the absolute position embedding. - drop_rate (float): Probability of an element to be zeroed. - Default: 0.0. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None. - """ - - def __init__(self, pos_shape, pos_dim, drop_rate=0., init_cfg=None): - super().__init__(init_cfg=init_cfg) - - if isinstance(pos_shape, int): - pos_shape = to_2tuple(pos_shape) - elif isinstance(pos_shape, tuple): - if len(pos_shape) == 1: - pos_shape = to_2tuple(pos_shape[0]) - assert len(pos_shape) == 2, \ - f'The size of image should have length 1 or 2, ' \ - f'but got {len(pos_shape)}' - self.pos_shape = pos_shape - self.pos_dim = pos_dim - - self.pos_embed = nn.Parameter( - torch.zeros(1, pos_shape[0] * pos_shape[1], pos_dim)) - self.drop = nn.Dropout(p=drop_rate) - - def init_weights(self): - trunc_normal_(self.pos_embed, std=0.02) - - def resize_pos_embed(self, pos_embed, input_shape, mode='bilinear'): - """Resize pos_embed weights. - - Resize pos_embed using bilinear interpolate method. - - Args: - pos_embed (torch.Tensor): Position embedding weights. - input_shape (tuple): Tuple for (downsampled input image height, - downsampled input image width). - mode (str): Algorithm used for upsampling: - ``'nearest'`` | ``'linear'`` | ``'bilinear'`` | ``'bicubic'`` | - ``'trilinear'``. Default: ``'bilinear'``. - - Return: - torch.Tensor: The resized pos_embed of shape [B, L_new, C]. - """ - assert pos_embed.ndim == 3, 'shape of pos_embed must be [B, L, C]' - pos_h, pos_w = self.pos_shape - pos_embed_weight = pos_embed[:, (-1 * pos_h * pos_w):] - pos_embed_weight = pos_embed_weight.reshape( - 1, pos_h, pos_w, self.pos_dim).permute(0, 3, 1, 2).contiguous() - pos_embed_weight = F.interpolate( - pos_embed_weight, size=input_shape, mode=mode) - pos_embed_weight = torch.flatten(pos_embed_weight, - 2).transpose(1, 2).contiguous() - pos_embed = pos_embed_weight - - return pos_embed - - def forward(self, x, hw_shape, mode='bilinear'): - pos_embed = self.resize_pos_embed(self.pos_embed, hw_shape, mode) - return self.drop(x + pos_embed) - - -@MODELS.register_module() -class PyramidVisionTransformer(BaseModule): - """Pyramid Vision Transformer (PVT) - - Implementation of `Pyramid Vision Transformer: A Versatile Backbone for - Dense Prediction without Convolutions - `_. - - Args: - pretrain_img_size (int | tuple[int]): The size of input image when - pretrain. Defaults: 224. - in_channels (int): Number of input channels. Default: 3. - embed_dims (int): Embedding dimension. Default: 64. - num_stags (int): The num of stages. Default: 4. - num_layers (Sequence[int]): The layer number of each transformer encode - layer. Default: [3, 4, 6, 3]. - num_heads (Sequence[int]): The attention heads of each transformer - encode layer. Default: [1, 2, 5, 8]. - patch_sizes (Sequence[int]): The patch_size of each patch embedding. - Default: [4, 2, 2, 2]. - strides (Sequence[int]): The stride of each patch embedding. - Default: [4, 2, 2, 2]. - paddings (Sequence[int]): The padding of each patch embedding. - Default: [0, 0, 0, 0]. - sr_ratios (Sequence[int]): The spatial reduction rate of each - transformer encode layer. Default: [8, 4, 2, 1]. - out_indices (Sequence[int] | int): Output from which stages. - Default: (0, 1, 2, 3). - mlp_ratios (Sequence[int]): The ratio of the mlp hidden dim to the - embedding dim of each transformer encode layer. - Default: [8, 8, 4, 4]. - qkv_bias (bool): Enable bias for qkv if True. Default: True. - drop_rate (float): Probability of an element to be zeroed. - Default 0.0. - attn_drop_rate (float): The drop out rate for attention layer. - Default 0.0. - drop_path_rate (float): stochastic depth rate. Default 0.1. - use_abs_pos_embed (bool): If True, add absolute position embedding to - the patch embedding. Defaults: True. - use_conv_ffn (bool): If True, use Convolutional FFN to replace FFN. - Default: False. - act_cfg (dict): The activation config for FFNs. - Default: dict(type='GELU'). - norm_cfg (dict): Config dict for normalization layer. - Default: dict(type='LN'). - pretrained (str, optional): model pretrained path. Default: None. - convert_weights (bool): The flag indicates whether the - pre-trained model is from the original repo. We may need - to convert some keys to make it compatible. - Default: True. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: - ``[ - dict(type='TruncNormal', std=.02, layer=['Linear']), - dict(type='Constant', val=1, layer=['LayerNorm']), - dict(type='Normal', std=0.01, layer=['Conv2d']) - ]`` - """ - - def __init__(self, - pretrain_img_size=224, - in_channels=3, - embed_dims=64, - num_stages=4, - num_layers=[3, 4, 6, 3], - num_heads=[1, 2, 5, 8], - patch_sizes=[4, 2, 2, 2], - strides=[4, 2, 2, 2], - paddings=[0, 0, 0, 0], - sr_ratios=[8, 4, 2, 1], - out_indices=(0, 1, 2, 3), - mlp_ratios=[8, 8, 4, 4], - qkv_bias=True, - drop_rate=0., - attn_drop_rate=0., - drop_path_rate=0.1, - use_abs_pos_embed=True, - norm_after_stage=False, - use_conv_ffn=False, - act_cfg=dict(type='GELU'), - norm_cfg=dict(type='LN', eps=1e-6), - convert_weights=True, - init_cfg=[ - dict(type='TruncNormal', std=.02, layer=['Linear']), - dict(type='Constant', val=1, layer=['LayerNorm']), - dict(type='Kaiming', layer=['Conv2d']) - ]): - super().__init__(init_cfg=init_cfg) - - self.convert_weights = convert_weights - if isinstance(pretrain_img_size, int): - pretrain_img_size = to_2tuple(pretrain_img_size) - elif isinstance(pretrain_img_size, tuple): - if len(pretrain_img_size) == 1: - pretrain_img_size = to_2tuple(pretrain_img_size[0]) - assert len(pretrain_img_size) == 2, \ - f'The size of image should have length 1 or 2, ' \ - f'but got {len(pretrain_img_size)}' - - self.embed_dims = embed_dims - - self.num_stages = num_stages - self.num_layers = num_layers - self.num_heads = num_heads - self.patch_sizes = patch_sizes - self.strides = strides - self.sr_ratios = sr_ratios - assert num_stages == len(num_layers) == len(num_heads) \ - == len(patch_sizes) == len(strides) == len(sr_ratios) - - self.out_indices = out_indices - assert max(out_indices) < self.num_stages - - # transformer encoder - dpr = [ - x.item() - for x in torch.linspace(0, drop_path_rate, sum(num_layers)) - ] # stochastic num_layer decay rule - - cur = 0 - self.layers = ModuleList() - for i, num_layer in enumerate(num_layers): - embed_dims_i = embed_dims * num_heads[i] - patch_embed = PatchEmbed( - in_channels=in_channels, - embed_dims=embed_dims_i, - kernel_size=patch_sizes[i], - stride=strides[i], - padding=paddings[i], - bias=True, - norm_cfg=norm_cfg) - - layers = ModuleList() - if use_abs_pos_embed: - pos_shape = pretrain_img_size // np.prod(patch_sizes[:i + 1]) - pos_embed = AbsolutePositionEmbedding( - pos_shape=pos_shape, - pos_dim=embed_dims_i, - drop_rate=drop_rate) - layers.append(pos_embed) - layers.extend([ - PVTEncoderLayer( - embed_dims=embed_dims_i, - num_heads=num_heads[i], - feedforward_channels=mlp_ratios[i] * embed_dims_i, - drop_rate=drop_rate, - attn_drop_rate=attn_drop_rate, - drop_path_rate=dpr[cur + idx], - qkv_bias=qkv_bias, - act_cfg=act_cfg, - norm_cfg=norm_cfg, - sr_ratio=sr_ratios[i], - use_conv_ffn=use_conv_ffn) for idx in range(num_layer) - ]) - in_channels = embed_dims_i - # The ret[0] of build_norm_layer is norm name. - if norm_after_stage: - norm = build_norm_layer(norm_cfg, embed_dims_i)[1] - else: - norm = nn.Identity() - self.layers.append(ModuleList([patch_embed, layers, norm])) - cur += num_layer - - def init_weights(self): - """Initialize the weights in backbone.""" - - if (isinstance(self.init_cfg, dict) - and self.init_cfg['type'] == 'Pretrained'): - logger = get_root_logger() - state_dict = get_state_dict( - self.init_cfg['checkpoint'], map_location='cpu') - logger.warn(f'Load pre-trained model for ' - f'{self.__class__.__name__} from original repo') - - if self.convert_weights: - # Because pvt backbones are not supported by mmcls, - # so we need to convert pre-trained weights to match this - # implementation. - state_dict = pvt_convert(state_dict) - load_state_dict(self, state_dict, strict=False, logger=logger) - - else: - super(PyramidVisionTransformer, self).init_weights() - - def forward(self, x): - outs = [] - - for i, layer in enumerate(self.layers): - x, hw_shape = layer[0](x) - - for block in layer[1]: - x = block(x, hw_shape) - x = layer[2](x) - x = nlc_to_nchw(x, hw_shape) - if i in self.out_indices: - outs.append(x) - - return outs - - -@MODELS.register_module() -class PyramidVisionTransformerV2(PyramidVisionTransformer): - """Implementation of `PVTv2: Improved Baselines with Pyramid Vision - Transformer `_.""" - - def __init__(self, **kwargs): - super(PyramidVisionTransformerV2, self).__init__( - patch_sizes=[7, 3, 3, 3], - paddings=[3, 1, 1, 1], - use_abs_pos_embed=False, - norm_after_stage=True, - use_conv_ffn=True, - **kwargs) +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import Conv2d, build_activation_layer, build_norm_layer +from mmcv.cnn.bricks.drop import build_dropout +from mmcv.cnn.bricks.transformer import MultiheadAttention +from mmengine.model import BaseModule, ModuleList, Sequential +from mmengine.model.weight_init import trunc_normal_ +from mmengine.runner import load_state_dict +from mmengine.utils import to_2tuple + +from mmpose.registry import MODELS +from ...utils import get_root_logger +from ..utils import PatchEmbed, nchw_to_nlc, nlc_to_nchw, pvt_convert +from .utils import get_state_dict + + +class MixFFN(BaseModule): + """An implementation of MixFFN of PVT. + + The differences between MixFFN & FFN: + 1. Use 1X1 Conv to replace Linear layer. + 2. Introduce 3X3 Depth-wise Conv to encode positional information. + + Args: + embed_dims (int): The feature dimension. Same as + `MultiheadAttention`. + feedforward_channels (int): The hidden dimension of FFNs. + act_cfg (dict, optional): The activation config for FFNs. + Default: dict(type='GELU'). + ffn_drop (float, optional): Probability of an element to be + zeroed in FFN. Default 0.0. + dropout_layer (obj:`ConfigDict`): The dropout_layer used + when adding the shortcut. + Default: None. + use_conv (bool): If True, add 3x3 DWConv between two Linear layers. + Defaults: False. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + embed_dims, + feedforward_channels, + act_cfg=dict(type='GELU'), + ffn_drop=0., + dropout_layer=None, + use_conv=False, + init_cfg=None): + super(MixFFN, self).__init__(init_cfg=init_cfg) + + self.embed_dims = embed_dims + self.feedforward_channels = feedforward_channels + self.act_cfg = act_cfg + activate = build_activation_layer(act_cfg) + + in_channels = embed_dims + fc1 = Conv2d( + in_channels=in_channels, + out_channels=feedforward_channels, + kernel_size=1, + stride=1, + bias=True) + if use_conv: + # 3x3 depth wise conv to provide positional encode information + dw_conv = Conv2d( + in_channels=feedforward_channels, + out_channels=feedforward_channels, + kernel_size=3, + stride=1, + padding=(3 - 1) // 2, + bias=True, + groups=feedforward_channels) + fc2 = Conv2d( + in_channels=feedforward_channels, + out_channels=in_channels, + kernel_size=1, + stride=1, + bias=True) + drop = nn.Dropout(ffn_drop) + layers = [fc1, activate, drop, fc2, drop] + if use_conv: + layers.insert(1, dw_conv) + self.layers = Sequential(*layers) + self.dropout_layer = build_dropout( + dropout_layer) if dropout_layer else torch.nn.Identity() + + def forward(self, x, hw_shape, identity=None): + out = nlc_to_nchw(x, hw_shape) + out = self.layers(out) + out = nchw_to_nlc(out) + if identity is None: + identity = x + return identity + self.dropout_layer(out) + + +class SpatialReductionAttention(MultiheadAttention): + """An implementation of Spatial Reduction Attention of PVT. + + This module is modified from MultiheadAttention which is a module from + mmcv.cnn.bricks.transformer. + + Args: + embed_dims (int): The embedding dimension. + num_heads (int): Parallel attention heads. + attn_drop (float): A Dropout layer on attn_output_weights. + Default: 0.0. + proj_drop (float): A Dropout layer after `nn.MultiheadAttention`. + Default: 0.0. + dropout_layer (obj:`ConfigDict`): The dropout_layer used + when adding the shortcut. Default: None. + batch_first (bool): Key, Query and Value are shape of + (batch, n, embed_dim) + or (n, batch, embed_dim). Default: False. + qkv_bias (bool): enable bias for qkv if True. Default: True. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='LN'). + sr_ratio (int): The ratio of spatial reduction of Spatial Reduction + Attention of PVT. Default: 1. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + embed_dims, + num_heads, + attn_drop=0., + proj_drop=0., + dropout_layer=None, + batch_first=True, + qkv_bias=True, + norm_cfg=dict(type='LN'), + sr_ratio=1, + init_cfg=None): + super().__init__( + embed_dims, + num_heads, + attn_drop, + proj_drop, + batch_first=batch_first, + dropout_layer=dropout_layer, + bias=qkv_bias, + init_cfg=init_cfg) + + self.sr_ratio = sr_ratio + if sr_ratio > 1: + self.sr = Conv2d( + in_channels=embed_dims, + out_channels=embed_dims, + kernel_size=sr_ratio, + stride=sr_ratio) + # The ret[0] of build_norm_layer is norm name. + self.norm = build_norm_layer(norm_cfg, embed_dims)[1] + + # handle the BC-breaking from https://github.com/open-mmlab/mmcv/pull/1418 # noqa + from mmpose import digit_version, mmcv_version + if mmcv_version < digit_version('1.3.17'): + warnings.warn('The legacy version of forward function in' + 'SpatialReductionAttention is deprecated in' + 'mmcv>=1.3.17 and will no longer support in the' + 'future. Please upgrade your mmcv.') + self.forward = self.legacy_forward + + def forward(self, x, hw_shape, identity=None): + + x_q = x + if self.sr_ratio > 1: + x_kv = nlc_to_nchw(x, hw_shape) + x_kv = self.sr(x_kv) + x_kv = nchw_to_nlc(x_kv) + x_kv = self.norm(x_kv) + else: + x_kv = x + + if identity is None: + identity = x_q + + # Because the dataflow('key', 'query', 'value') of + # ``torch.nn.MultiheadAttention`` is (num_query, batch, + # embed_dims), We should adjust the shape of dataflow from + # batch_first (batch, num_query, embed_dims) to num_query_first + # (num_query ,batch, embed_dims), and recover ``attn_output`` + # from num_query_first to batch_first. + if self.batch_first: + x_q = x_q.transpose(0, 1) + x_kv = x_kv.transpose(0, 1) + + out = self.attn(query=x_q, key=x_kv, value=x_kv)[0] + + if self.batch_first: + out = out.transpose(0, 1) + + return identity + self.dropout_layer(self.proj_drop(out)) + + def legacy_forward(self, x, hw_shape, identity=None): + """multi head attention forward in mmcv version < 1.3.17.""" + x_q = x + if self.sr_ratio > 1: + x_kv = nlc_to_nchw(x, hw_shape) + x_kv = self.sr(x_kv) + x_kv = nchw_to_nlc(x_kv) + x_kv = self.norm(x_kv) + else: + x_kv = x + + if identity is None: + identity = x_q + + out = self.attn(query=x_q, key=x_kv, value=x_kv)[0] + + return identity + self.dropout_layer(self.proj_drop(out)) + + +class PVTEncoderLayer(BaseModule): + """Implements one encoder layer in PVT. + + Args: + embed_dims (int): The feature dimension. + num_heads (int): Parallel attention heads. + feedforward_channels (int): The hidden dimension for FFNs. + drop_rate (float): Probability of an element to be zeroed. + after the feed forward layer. Default: 0.0. + attn_drop_rate (float): The drop out rate for attention layer. + Default: 0.0. + drop_path_rate (float): stochastic depth rate. Default: 0.0. + qkv_bias (bool): enable bias for qkv if True. + Default: True. + act_cfg (dict): The activation config for FFNs. + Default: dict(type='GELU'). + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='LN'). + sr_ratio (int): The ratio of spatial reduction of Spatial Reduction + Attention of PVT. Default: 1. + use_conv_ffn (bool): If True, use Convolutional FFN to replace FFN. + Default: False. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + embed_dims, + num_heads, + feedforward_channels, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + qkv_bias=True, + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + sr_ratio=1, + use_conv_ffn=False, + init_cfg=None): + super(PVTEncoderLayer, self).__init__(init_cfg=init_cfg) + + # The ret[0] of build_norm_layer is norm name. + self.norm1 = build_norm_layer(norm_cfg, embed_dims)[1] + + self.attn = SpatialReductionAttention( + embed_dims=embed_dims, + num_heads=num_heads, + attn_drop=attn_drop_rate, + proj_drop=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + qkv_bias=qkv_bias, + norm_cfg=norm_cfg, + sr_ratio=sr_ratio) + + # The ret[0] of build_norm_layer is norm name. + self.norm2 = build_norm_layer(norm_cfg, embed_dims)[1] + + self.ffn = MixFFN( + embed_dims=embed_dims, + feedforward_channels=feedforward_channels, + ffn_drop=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + use_conv=use_conv_ffn, + act_cfg=act_cfg) + + def forward(self, x, hw_shape): + x = self.attn(self.norm1(x), hw_shape, identity=x) + x = self.ffn(self.norm2(x), hw_shape, identity=x) + + return x + + +class AbsolutePositionEmbedding(BaseModule): + """An implementation of the absolute position embedding in PVT. + + Args: + pos_shape (int): The shape of the absolute position embedding. + pos_dim (int): The dimension of the absolute position embedding. + drop_rate (float): Probability of an element to be zeroed. + Default: 0.0. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + """ + + def __init__(self, pos_shape, pos_dim, drop_rate=0., init_cfg=None): + super().__init__(init_cfg=init_cfg) + + if isinstance(pos_shape, int): + pos_shape = to_2tuple(pos_shape) + elif isinstance(pos_shape, tuple): + if len(pos_shape) == 1: + pos_shape = to_2tuple(pos_shape[0]) + assert len(pos_shape) == 2, \ + f'The size of image should have length 1 or 2, ' \ + f'but got {len(pos_shape)}' + self.pos_shape = pos_shape + self.pos_dim = pos_dim + + self.pos_embed = nn.Parameter( + torch.zeros(1, pos_shape[0] * pos_shape[1], pos_dim)) + self.drop = nn.Dropout(p=drop_rate) + + def init_weights(self): + trunc_normal_(self.pos_embed, std=0.02) + + def resize_pos_embed(self, pos_embed, input_shape, mode='bilinear'): + """Resize pos_embed weights. + + Resize pos_embed using bilinear interpolate method. + + Args: + pos_embed (torch.Tensor): Position embedding weights. + input_shape (tuple): Tuple for (downsampled input image height, + downsampled input image width). + mode (str): Algorithm used for upsampling: + ``'nearest'`` | ``'linear'`` | ``'bilinear'`` | ``'bicubic'`` | + ``'trilinear'``. Default: ``'bilinear'``. + + Return: + torch.Tensor: The resized pos_embed of shape [B, L_new, C]. + """ + assert pos_embed.ndim == 3, 'shape of pos_embed must be [B, L, C]' + pos_h, pos_w = self.pos_shape + pos_embed_weight = pos_embed[:, (-1 * pos_h * pos_w):] + pos_embed_weight = pos_embed_weight.reshape( + 1, pos_h, pos_w, self.pos_dim).permute(0, 3, 1, 2).contiguous() + pos_embed_weight = F.interpolate( + pos_embed_weight, size=input_shape, mode=mode) + pos_embed_weight = torch.flatten(pos_embed_weight, + 2).transpose(1, 2).contiguous() + pos_embed = pos_embed_weight + + return pos_embed + + def forward(self, x, hw_shape, mode='bilinear'): + pos_embed = self.resize_pos_embed(self.pos_embed, hw_shape, mode) + return self.drop(x + pos_embed) + + +@MODELS.register_module() +class PyramidVisionTransformer(BaseModule): + """Pyramid Vision Transformer (PVT) + + Implementation of `Pyramid Vision Transformer: A Versatile Backbone for + Dense Prediction without Convolutions + `_. + + Args: + pretrain_img_size (int | tuple[int]): The size of input image when + pretrain. Defaults: 224. + in_channels (int): Number of input channels. Default: 3. + embed_dims (int): Embedding dimension. Default: 64. + num_stags (int): The num of stages. Default: 4. + num_layers (Sequence[int]): The layer number of each transformer encode + layer. Default: [3, 4, 6, 3]. + num_heads (Sequence[int]): The attention heads of each transformer + encode layer. Default: [1, 2, 5, 8]. + patch_sizes (Sequence[int]): The patch_size of each patch embedding. + Default: [4, 2, 2, 2]. + strides (Sequence[int]): The stride of each patch embedding. + Default: [4, 2, 2, 2]. + paddings (Sequence[int]): The padding of each patch embedding. + Default: [0, 0, 0, 0]. + sr_ratios (Sequence[int]): The spatial reduction rate of each + transformer encode layer. Default: [8, 4, 2, 1]. + out_indices (Sequence[int] | int): Output from which stages. + Default: (0, 1, 2, 3). + mlp_ratios (Sequence[int]): The ratio of the mlp hidden dim to the + embedding dim of each transformer encode layer. + Default: [8, 8, 4, 4]. + qkv_bias (bool): Enable bias for qkv if True. Default: True. + drop_rate (float): Probability of an element to be zeroed. + Default 0.0. + attn_drop_rate (float): The drop out rate for attention layer. + Default 0.0. + drop_path_rate (float): stochastic depth rate. Default 0.1. + use_abs_pos_embed (bool): If True, add absolute position embedding to + the patch embedding. Defaults: True. + use_conv_ffn (bool): If True, use Convolutional FFN to replace FFN. + Default: False. + act_cfg (dict): The activation config for FFNs. + Default: dict(type='GELU'). + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='LN'). + pretrained (str, optional): model pretrained path. Default: None. + convert_weights (bool): The flag indicates whether the + pre-trained model is from the original repo. We may need + to convert some keys to make it compatible. + Default: True. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: + ``[ + dict(type='TruncNormal', std=.02, layer=['Linear']), + dict(type='Constant', val=1, layer=['LayerNorm']), + dict(type='Normal', std=0.01, layer=['Conv2d']) + ]`` + """ + + def __init__(self, + pretrain_img_size=224, + in_channels=3, + embed_dims=64, + num_stages=4, + num_layers=[3, 4, 6, 3], + num_heads=[1, 2, 5, 8], + patch_sizes=[4, 2, 2, 2], + strides=[4, 2, 2, 2], + paddings=[0, 0, 0, 0], + sr_ratios=[8, 4, 2, 1], + out_indices=(0, 1, 2, 3), + mlp_ratios=[8, 8, 4, 4], + qkv_bias=True, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.1, + use_abs_pos_embed=True, + norm_after_stage=False, + use_conv_ffn=False, + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN', eps=1e-6), + convert_weights=True, + init_cfg=[ + dict(type='TruncNormal', std=.02, layer=['Linear']), + dict(type='Constant', val=1, layer=['LayerNorm']), + dict(type='Kaiming', layer=['Conv2d']) + ]): + super().__init__(init_cfg=init_cfg) + + self.convert_weights = convert_weights + if isinstance(pretrain_img_size, int): + pretrain_img_size = to_2tuple(pretrain_img_size) + elif isinstance(pretrain_img_size, tuple): + if len(pretrain_img_size) == 1: + pretrain_img_size = to_2tuple(pretrain_img_size[0]) + assert len(pretrain_img_size) == 2, \ + f'The size of image should have length 1 or 2, ' \ + f'but got {len(pretrain_img_size)}' + + self.embed_dims = embed_dims + + self.num_stages = num_stages + self.num_layers = num_layers + self.num_heads = num_heads + self.patch_sizes = patch_sizes + self.strides = strides + self.sr_ratios = sr_ratios + assert num_stages == len(num_layers) == len(num_heads) \ + == len(patch_sizes) == len(strides) == len(sr_ratios) + + self.out_indices = out_indices + assert max(out_indices) < self.num_stages + + # transformer encoder + dpr = [ + x.item() + for x in torch.linspace(0, drop_path_rate, sum(num_layers)) + ] # stochastic num_layer decay rule + + cur = 0 + self.layers = ModuleList() + for i, num_layer in enumerate(num_layers): + embed_dims_i = embed_dims * num_heads[i] + patch_embed = PatchEmbed( + in_channels=in_channels, + embed_dims=embed_dims_i, + kernel_size=patch_sizes[i], + stride=strides[i], + padding=paddings[i], + bias=True, + norm_cfg=norm_cfg) + + layers = ModuleList() + if use_abs_pos_embed: + pos_shape = pretrain_img_size // np.prod(patch_sizes[:i + 1]) + pos_embed = AbsolutePositionEmbedding( + pos_shape=pos_shape, + pos_dim=embed_dims_i, + drop_rate=drop_rate) + layers.append(pos_embed) + layers.extend([ + PVTEncoderLayer( + embed_dims=embed_dims_i, + num_heads=num_heads[i], + feedforward_channels=mlp_ratios[i] * embed_dims_i, + drop_rate=drop_rate, + attn_drop_rate=attn_drop_rate, + drop_path_rate=dpr[cur + idx], + qkv_bias=qkv_bias, + act_cfg=act_cfg, + norm_cfg=norm_cfg, + sr_ratio=sr_ratios[i], + use_conv_ffn=use_conv_ffn) for idx in range(num_layer) + ]) + in_channels = embed_dims_i + # The ret[0] of build_norm_layer is norm name. + if norm_after_stage: + norm = build_norm_layer(norm_cfg, embed_dims_i)[1] + else: + norm = nn.Identity() + self.layers.append(ModuleList([patch_embed, layers, norm])) + cur += num_layer + + def init_weights(self): + """Initialize the weights in backbone.""" + + if (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + logger = get_root_logger() + state_dict = get_state_dict( + self.init_cfg['checkpoint'], map_location='cpu') + logger.warn(f'Load pre-trained model for ' + f'{self.__class__.__name__} from original repo') + + if self.convert_weights: + # Because pvt backbones are not supported by mmcls, + # so we need to convert pre-trained weights to match this + # implementation. + state_dict = pvt_convert(state_dict) + load_state_dict(self, state_dict, strict=False, logger=logger) + + else: + super(PyramidVisionTransformer, self).init_weights() + + def forward(self, x): + outs = [] + + for i, layer in enumerate(self.layers): + x, hw_shape = layer[0](x) + + for block in layer[1]: + x = block(x, hw_shape) + x = layer[2](x) + x = nlc_to_nchw(x, hw_shape) + if i in self.out_indices: + outs.append(x) + + return outs + + +@MODELS.register_module() +class PyramidVisionTransformerV2(PyramidVisionTransformer): + """Implementation of `PVTv2: Improved Baselines with Pyramid Vision + Transformer `_.""" + + def __init__(self, **kwargs): + super(PyramidVisionTransformerV2, self).__init__( + patch_sizes=[7, 3, 3, 3], + paddings=[3, 1, 1, 1], + use_abs_pos_embed=False, + norm_after_stage=True, + use_conv_ffn=True, + **kwargs) diff --git a/mmpose/models/backbones/regnet.py b/mmpose/models/backbones/regnet.py index 120523e658..de3ee9957f 100644 --- a/mmpose/models/backbones/regnet.py +++ b/mmpose/models/backbones/regnet.py @@ -1,331 +1,331 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy - -import numpy as np -import torch.nn as nn -from mmcv.cnn import build_conv_layer, build_norm_layer - -from mmpose.registry import MODELS -from .resnet import ResNet -from .resnext import Bottleneck - - -@MODELS.register_module() -class RegNet(ResNet): - """RegNet backbone. - - More details can be found in `paper `__ . - - Args: - arch (dict): The parameter of RegNets. - - w0 (int): initial width - - wa (float): slope of width - - wm (float): quantization parameter to quantize the width - - depth (int): depth of the backbone - - group_w (int): width of group - - bot_mul (float): bottleneck ratio, i.e. expansion of bottleneck. - strides (Sequence[int]): Strides of the first block of each stage. - base_channels (int): Base channels after stem layer. - in_channels (int): Number of input image channels. Default: 3. - dilations (Sequence[int]): Dilation of each stage. - out_indices (Sequence[int]): Output from which stages. - style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two - layer is the 3x3 conv layer, otherwise the stride-two layer is - the first 1x1 conv layer. Default: "pytorch". - frozen_stages (int): Stages to be frozen (all param fixed). -1 means - not freezing any parameters. Default: -1. - norm_cfg (dict): dictionary to construct and config norm layer. - Default: dict(type='BN', requires_grad=True). - norm_eval (bool): Whether to set norm layers to eval mode, namely, - freeze running stats (mean and var). Note: Effect on Batch Norm - and its variants only. Default: False. - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. Default: False. - zero_init_residual (bool): whether to use zero init for last norm layer - in resblocks to let them behave as identity. Default: True. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: - ``[ - dict(type='Kaiming', layer=['Conv2d']), - dict( - type='Constant', - val=1, - layer=['_BatchNorm', 'GroupNorm']) - ]`` - - Example: - >>> from mmpose.models import RegNet - >>> import torch - >>> self = RegNet( - arch=dict( - w0=88, - wa=26.31, - wm=2.25, - group_w=48, - depth=25, - bot_mul=1.0), - out_indices=(0, 1, 2, 3)) - >>> self.eval() - >>> inputs = torch.rand(1, 3, 32, 32) - >>> level_outputs = self.forward(inputs) - >>> for level_out in level_outputs: - ... print(tuple(level_out.shape)) - (1, 96, 8, 8) - (1, 192, 4, 4) - (1, 432, 2, 2) - (1, 1008, 1, 1) - """ - arch_settings = { - 'regnetx_400mf': - dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22, bot_mul=1.0), - 'regnetx_800mf': - dict(w0=56, wa=35.73, wm=2.28, group_w=16, depth=16, bot_mul=1.0), - 'regnetx_1.6gf': - dict(w0=80, wa=34.01, wm=2.25, group_w=24, depth=18, bot_mul=1.0), - 'regnetx_3.2gf': - dict(w0=88, wa=26.31, wm=2.25, group_w=48, depth=25, bot_mul=1.0), - 'regnetx_4.0gf': - dict(w0=96, wa=38.65, wm=2.43, group_w=40, depth=23, bot_mul=1.0), - 'regnetx_6.4gf': - dict(w0=184, wa=60.83, wm=2.07, group_w=56, depth=17, bot_mul=1.0), - 'regnetx_8.0gf': - dict(w0=80, wa=49.56, wm=2.88, group_w=120, depth=23, bot_mul=1.0), - 'regnetx_12gf': - dict(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19, bot_mul=1.0), - } - - def __init__(self, - arch, - in_channels=3, - stem_channels=32, - base_channels=32, - strides=(2, 2, 2, 2), - dilations=(1, 1, 1, 1), - out_indices=(3, ), - style='pytorch', - deep_stem=False, - avg_down=False, - frozen_stages=-1, - conv_cfg=None, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=False, - with_cp=False, - zero_init_residual=True, - init_cfg=[ - dict(type='Kaiming', layer=['Conv2d']), - dict( - type='Constant', - val=1, - layer=['_BatchNorm', 'GroupNorm']) - ]): - # Protect mutable default arguments - norm_cfg = copy.deepcopy(norm_cfg) - super(ResNet, self).__init__(init_cfg=init_cfg) - - # Generate RegNet parameters first - if isinstance(arch, str): - assert arch in self.arch_settings, \ - f'"arch": "{arch}" is not one of the' \ - ' arch_settings' - arch = self.arch_settings[arch] - elif not isinstance(arch, dict): - raise TypeError('Expect "arch" to be either a string ' - f'or a dict, got {type(arch)}') - - widths, num_stages = self.generate_regnet( - arch['w0'], - arch['wa'], - arch['wm'], - arch['depth'], - ) - # Convert to per stage format - stage_widths, stage_blocks = self.get_stages_from_blocks(widths) - # Generate group widths and bot muls - group_widths = [arch['group_w'] for _ in range(num_stages)] - self.bottleneck_ratio = [arch['bot_mul'] for _ in range(num_stages)] - # Adjust the compatibility of stage_widths and group_widths - stage_widths, group_widths = self.adjust_width_group( - stage_widths, self.bottleneck_ratio, group_widths) - - # Group params by stage - self.stage_widths = stage_widths - self.group_widths = group_widths - self.depth = sum(stage_blocks) - self.stem_channels = stem_channels - self.base_channels = base_channels - self.num_stages = num_stages - assert 1 <= num_stages <= 4 - self.strides = strides - self.dilations = dilations - assert len(strides) == len(dilations) == num_stages - self.out_indices = out_indices - assert max(out_indices) < num_stages - self.style = style - self.deep_stem = deep_stem - if self.deep_stem: - raise NotImplementedError( - 'deep_stem has not been implemented for RegNet') - self.avg_down = avg_down - self.frozen_stages = frozen_stages - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.with_cp = with_cp - self.norm_eval = norm_eval - self.zero_init_residual = zero_init_residual - self.stage_blocks = stage_blocks[:num_stages] - - self._make_stem_layer(in_channels, stem_channels) - - _in_channels = stem_channels - self.res_layers = [] - for i, num_blocks in enumerate(self.stage_blocks): - stride = self.strides[i] - dilation = self.dilations[i] - group_width = self.group_widths[i] - width = int(round(self.stage_widths[i] * self.bottleneck_ratio[i])) - stage_groups = width // group_width - - res_layer = self.make_res_layer( - block=Bottleneck, - num_blocks=num_blocks, - in_channels=_in_channels, - out_channels=self.stage_widths[i], - expansion=1, - stride=stride, - dilation=dilation, - style=self.style, - avg_down=self.avg_down, - with_cp=self.with_cp, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - base_channels=self.stage_widths[i], - groups=stage_groups, - width_per_group=group_width) - _in_channels = self.stage_widths[i] - layer_name = f'layer{i + 1}' - self.add_module(layer_name, res_layer) - self.res_layers.append(layer_name) - - self._freeze_stages() - - self.feat_dim = stage_widths[-1] - - def _make_stem_layer(self, in_channels, base_channels): - self.conv1 = build_conv_layer( - self.conv_cfg, - in_channels, - base_channels, - kernel_size=3, - stride=2, - padding=1, - bias=False) - self.norm1_name, norm1 = build_norm_layer( - self.norm_cfg, base_channels, postfix=1) - self.add_module(self.norm1_name, norm1) - self.relu = nn.ReLU(inplace=True) - - @staticmethod - def generate_regnet(initial_width, - width_slope, - width_parameter, - depth, - divisor=8): - """Generates per block width from RegNet parameters. - - Args: - initial_width ([int]): Initial width of the backbone - width_slope ([float]): Slope of the quantized linear function - width_parameter ([int]): Parameter used to quantize the width. - depth ([int]): Depth of the backbone. - divisor (int, optional): The divisor of channels. Defaults to 8. - - Returns: - list, int: return a list of widths of each stage and the number of - stages - """ - assert width_slope >= 0 - assert initial_width > 0 - assert width_parameter > 1 - assert initial_width % divisor == 0 - widths_cont = np.arange(depth) * width_slope + initial_width - ks = np.round( - np.log(widths_cont / initial_width) / np.log(width_parameter)) - widths = initial_width * np.power(width_parameter, ks) - widths = np.round(np.divide(widths, divisor)) * divisor - num_stages = len(np.unique(widths)) - widths, widths_cont = widths.astype(int).tolist(), widths_cont.tolist() - return widths, num_stages - - @staticmethod - def quantize_float(number, divisor): - """Converts a float to closest non-zero int divisible by divior. - - Args: - number (int): Original number to be quantized. - divisor (int): Divisor used to quantize the number. - - Returns: - int: quantized number that is divisible by devisor. - """ - return int(round(number / divisor) * divisor) - - def adjust_width_group(self, widths, bottleneck_ratio, groups): - """Adjusts the compatibility of widths and groups. - - Args: - widths (list[int]): Width of each stage. - bottleneck_ratio (float): Bottleneck ratio. - groups (int): number of groups in each stage - - Returns: - tuple(list): The adjusted widths and groups of each stage. - """ - bottleneck_width = [ - int(w * b) for w, b in zip(widths, bottleneck_ratio) - ] - groups = [min(g, w_bot) for g, w_bot in zip(groups, bottleneck_width)] - bottleneck_width = [ - self.quantize_float(w_bot, g) - for w_bot, g in zip(bottleneck_width, groups) - ] - widths = [ - int(w_bot / b) - for w_bot, b in zip(bottleneck_width, bottleneck_ratio) - ] - return widths, groups - - def get_stages_from_blocks(self, widths): - """Gets widths/stage_blocks of network at each stage. - - Args: - widths (list[int]): Width in each stage. - - Returns: - tuple(list): width and depth of each stage - """ - width_diff = [ - width != width_prev - for width, width_prev in zip(widths + [0], [0] + widths) - ] - stage_widths = [ - width for width, diff in zip(widths, width_diff[:-1]) if diff - ] - stage_blocks = np.diff([ - depth for depth, diff in zip(range(len(width_diff)), width_diff) - if diff - ]).tolist() - return stage_widths, stage_blocks - - def forward(self, x): - x = self.conv1(x) - x = self.norm1(x) - x = self.relu(x) - - outs = [] - for i, layer_name in enumerate(self.res_layers): - res_layer = getattr(self, layer_name) - x = res_layer(x) - if i in self.out_indices: - outs.append(x) - - return tuple(outs) +# Copyright (c) OpenMMLab. All rights reserved. +import copy + +import numpy as np +import torch.nn as nn +from mmcv.cnn import build_conv_layer, build_norm_layer + +from mmpose.registry import MODELS +from .resnet import ResNet +from .resnext import Bottleneck + + +@MODELS.register_module() +class RegNet(ResNet): + """RegNet backbone. + + More details can be found in `paper `__ . + + Args: + arch (dict): The parameter of RegNets. + - w0 (int): initial width + - wa (float): slope of width + - wm (float): quantization parameter to quantize the width + - depth (int): depth of the backbone + - group_w (int): width of group + - bot_mul (float): bottleneck ratio, i.e. expansion of bottleneck. + strides (Sequence[int]): Strides of the first block of each stage. + base_channels (int): Base channels after stem layer. + in_channels (int): Number of input image channels. Default: 3. + dilations (Sequence[int]): Dilation of each stage. + out_indices (Sequence[int]): Output from which stages. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. Default: "pytorch". + frozen_stages (int): Stages to be frozen (all param fixed). -1 means + not freezing any parameters. Default: -1. + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN', requires_grad=True). + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + zero_init_residual (bool): whether to use zero init for last norm layer + in resblocks to let them behave as identity. Default: True. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: + ``[ + dict(type='Kaiming', layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ]`` + + Example: + >>> from mmpose.models import RegNet + >>> import torch + >>> self = RegNet( + arch=dict( + w0=88, + wa=26.31, + wm=2.25, + group_w=48, + depth=25, + bot_mul=1.0), + out_indices=(0, 1, 2, 3)) + >>> self.eval() + >>> inputs = torch.rand(1, 3, 32, 32) + >>> level_outputs = self.forward(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 96, 8, 8) + (1, 192, 4, 4) + (1, 432, 2, 2) + (1, 1008, 1, 1) + """ + arch_settings = { + 'regnetx_400mf': + dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22, bot_mul=1.0), + 'regnetx_800mf': + dict(w0=56, wa=35.73, wm=2.28, group_w=16, depth=16, bot_mul=1.0), + 'regnetx_1.6gf': + dict(w0=80, wa=34.01, wm=2.25, group_w=24, depth=18, bot_mul=1.0), + 'regnetx_3.2gf': + dict(w0=88, wa=26.31, wm=2.25, group_w=48, depth=25, bot_mul=1.0), + 'regnetx_4.0gf': + dict(w0=96, wa=38.65, wm=2.43, group_w=40, depth=23, bot_mul=1.0), + 'regnetx_6.4gf': + dict(w0=184, wa=60.83, wm=2.07, group_w=56, depth=17, bot_mul=1.0), + 'regnetx_8.0gf': + dict(w0=80, wa=49.56, wm=2.88, group_w=120, depth=23, bot_mul=1.0), + 'regnetx_12gf': + dict(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19, bot_mul=1.0), + } + + def __init__(self, + arch, + in_channels=3, + stem_channels=32, + base_channels=32, + strides=(2, 2, 2, 2), + dilations=(1, 1, 1, 1), + out_indices=(3, ), + style='pytorch', + deep_stem=False, + avg_down=False, + frozen_stages=-1, + conv_cfg=None, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=False, + with_cp=False, + zero_init_residual=True, + init_cfg=[ + dict(type='Kaiming', layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ]): + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + super(ResNet, self).__init__(init_cfg=init_cfg) + + # Generate RegNet parameters first + if isinstance(arch, str): + assert arch in self.arch_settings, \ + f'"arch": "{arch}" is not one of the' \ + ' arch_settings' + arch = self.arch_settings[arch] + elif not isinstance(arch, dict): + raise TypeError('Expect "arch" to be either a string ' + f'or a dict, got {type(arch)}') + + widths, num_stages = self.generate_regnet( + arch['w0'], + arch['wa'], + arch['wm'], + arch['depth'], + ) + # Convert to per stage format + stage_widths, stage_blocks = self.get_stages_from_blocks(widths) + # Generate group widths and bot muls + group_widths = [arch['group_w'] for _ in range(num_stages)] + self.bottleneck_ratio = [arch['bot_mul'] for _ in range(num_stages)] + # Adjust the compatibility of stage_widths and group_widths + stage_widths, group_widths = self.adjust_width_group( + stage_widths, self.bottleneck_ratio, group_widths) + + # Group params by stage + self.stage_widths = stage_widths + self.group_widths = group_widths + self.depth = sum(stage_blocks) + self.stem_channels = stem_channels + self.base_channels = base_channels + self.num_stages = num_stages + assert 1 <= num_stages <= 4 + self.strides = strides + self.dilations = dilations + assert len(strides) == len(dilations) == num_stages + self.out_indices = out_indices + assert max(out_indices) < num_stages + self.style = style + self.deep_stem = deep_stem + if self.deep_stem: + raise NotImplementedError( + 'deep_stem has not been implemented for RegNet') + self.avg_down = avg_down + self.frozen_stages = frozen_stages + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.with_cp = with_cp + self.norm_eval = norm_eval + self.zero_init_residual = zero_init_residual + self.stage_blocks = stage_blocks[:num_stages] + + self._make_stem_layer(in_channels, stem_channels) + + _in_channels = stem_channels + self.res_layers = [] + for i, num_blocks in enumerate(self.stage_blocks): + stride = self.strides[i] + dilation = self.dilations[i] + group_width = self.group_widths[i] + width = int(round(self.stage_widths[i] * self.bottleneck_ratio[i])) + stage_groups = width // group_width + + res_layer = self.make_res_layer( + block=Bottleneck, + num_blocks=num_blocks, + in_channels=_in_channels, + out_channels=self.stage_widths[i], + expansion=1, + stride=stride, + dilation=dilation, + style=self.style, + avg_down=self.avg_down, + with_cp=self.with_cp, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + base_channels=self.stage_widths[i], + groups=stage_groups, + width_per_group=group_width) + _in_channels = self.stage_widths[i] + layer_name = f'layer{i + 1}' + self.add_module(layer_name, res_layer) + self.res_layers.append(layer_name) + + self._freeze_stages() + + self.feat_dim = stage_widths[-1] + + def _make_stem_layer(self, in_channels, base_channels): + self.conv1 = build_conv_layer( + self.conv_cfg, + in_channels, + base_channels, + kernel_size=3, + stride=2, + padding=1, + bias=False) + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, base_channels, postfix=1) + self.add_module(self.norm1_name, norm1) + self.relu = nn.ReLU(inplace=True) + + @staticmethod + def generate_regnet(initial_width, + width_slope, + width_parameter, + depth, + divisor=8): + """Generates per block width from RegNet parameters. + + Args: + initial_width ([int]): Initial width of the backbone + width_slope ([float]): Slope of the quantized linear function + width_parameter ([int]): Parameter used to quantize the width. + depth ([int]): Depth of the backbone. + divisor (int, optional): The divisor of channels. Defaults to 8. + + Returns: + list, int: return a list of widths of each stage and the number of + stages + """ + assert width_slope >= 0 + assert initial_width > 0 + assert width_parameter > 1 + assert initial_width % divisor == 0 + widths_cont = np.arange(depth) * width_slope + initial_width + ks = np.round( + np.log(widths_cont / initial_width) / np.log(width_parameter)) + widths = initial_width * np.power(width_parameter, ks) + widths = np.round(np.divide(widths, divisor)) * divisor + num_stages = len(np.unique(widths)) + widths, widths_cont = widths.astype(int).tolist(), widths_cont.tolist() + return widths, num_stages + + @staticmethod + def quantize_float(number, divisor): + """Converts a float to closest non-zero int divisible by divior. + + Args: + number (int): Original number to be quantized. + divisor (int): Divisor used to quantize the number. + + Returns: + int: quantized number that is divisible by devisor. + """ + return int(round(number / divisor) * divisor) + + def adjust_width_group(self, widths, bottleneck_ratio, groups): + """Adjusts the compatibility of widths and groups. + + Args: + widths (list[int]): Width of each stage. + bottleneck_ratio (float): Bottleneck ratio. + groups (int): number of groups in each stage + + Returns: + tuple(list): The adjusted widths and groups of each stage. + """ + bottleneck_width = [ + int(w * b) for w, b in zip(widths, bottleneck_ratio) + ] + groups = [min(g, w_bot) for g, w_bot in zip(groups, bottleneck_width)] + bottleneck_width = [ + self.quantize_float(w_bot, g) + for w_bot, g in zip(bottleneck_width, groups) + ] + widths = [ + int(w_bot / b) + for w_bot, b in zip(bottleneck_width, bottleneck_ratio) + ] + return widths, groups + + def get_stages_from_blocks(self, widths): + """Gets widths/stage_blocks of network at each stage. + + Args: + widths (list[int]): Width in each stage. + + Returns: + tuple(list): width and depth of each stage + """ + width_diff = [ + width != width_prev + for width, width_prev in zip(widths + [0], [0] + widths) + ] + stage_widths = [ + width for width, diff in zip(widths, width_diff[:-1]) if diff + ] + stage_blocks = np.diff([ + depth for depth, diff in zip(range(len(width_diff)), width_diff) + if diff + ]).tolist() + return stage_widths, stage_blocks + + def forward(self, x): + x = self.conv1(x) + x = self.norm1(x) + x = self.relu(x) + + outs = [] + for i, layer_name in enumerate(self.res_layers): + res_layer = getattr(self, layer_name) + x = res_layer(x) + if i in self.out_indices: + outs.append(x) + + return tuple(outs) diff --git a/mmpose/models/backbones/resnest.py b/mmpose/models/backbones/resnest.py index b5eea8ad7e..5bcc3d6c19 100644 --- a/mmpose/models/backbones/resnest.py +++ b/mmpose/models/backbones/resnest.py @@ -1,353 +1,353 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -import torch.nn as nn -import torch.nn.functional as F -import torch.utils.checkpoint as cp -from mmcv.cnn import build_conv_layer, build_norm_layer -from mmengine.model import BaseModule - -from mmpose.registry import MODELS -from .resnet import Bottleneck as _Bottleneck -from .resnet import ResLayer, ResNetV1d - - -class RSoftmax(nn.Module): - """Radix Softmax module in ``SplitAttentionConv2d``. - - Args: - radix (int): Radix of input. - groups (int): Groups of input. - """ - - def __init__(self, radix, groups): - super().__init__() - self.radix = radix - self.groups = groups - - def forward(self, x): - batch = x.size(0) - if self.radix > 1: - x = x.view(batch, self.groups, self.radix, -1).transpose(1, 2) - x = F.softmax(x, dim=1) - x = x.reshape(batch, -1) - else: - x = torch.sigmoid(x) - return x - - -class SplitAttentionConv2d(BaseModule): - """Split-Attention Conv2d. - - Args: - in_channels (int): Same as nn.Conv2d. - out_channels (int): Same as nn.Conv2d. - kernel_size (int | tuple[int]): Same as nn.Conv2d. - stride (int | tuple[int]): Same as nn.Conv2d. - padding (int | tuple[int]): Same as nn.Conv2d. - dilation (int | tuple[int]): Same as nn.Conv2d. - groups (int): Same as nn.Conv2d. - radix (int): Radix of SpltAtConv2d. Default: 2 - reduction_factor (int): Reduction factor of SplitAttentionConv2d. - Default: 4. - conv_cfg (dict): Config dict for convolution layer. Default: None, - which means using conv2d. - norm_cfg (dict): Config dict for normalization layer. Default: None. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - in_channels, - channels, - kernel_size, - stride=1, - padding=0, - dilation=1, - groups=1, - radix=2, - reduction_factor=4, - conv_cfg=None, - norm_cfg=dict(type='BN'), - init_cfg=None): - super().__init__(init_cfg=init_cfg) - inter_channels = max(in_channels * radix // reduction_factor, 32) - self.radix = radix - self.groups = groups - self.channels = channels - self.conv = build_conv_layer( - conv_cfg, - in_channels, - channels * radix, - kernel_size, - stride=stride, - padding=padding, - dilation=dilation, - groups=groups * radix, - bias=False) - self.norm0_name, norm0 = build_norm_layer( - norm_cfg, channels * radix, postfix=0) - self.add_module(self.norm0_name, norm0) - self.relu = nn.ReLU(inplace=True) - self.fc1 = build_conv_layer( - None, channels, inter_channels, 1, groups=self.groups) - self.norm1_name, norm1 = build_norm_layer( - norm_cfg, inter_channels, postfix=1) - self.add_module(self.norm1_name, norm1) - self.fc2 = build_conv_layer( - None, inter_channels, channels * radix, 1, groups=self.groups) - self.rsoftmax = RSoftmax(radix, groups) - - @property - def norm0(self): - return getattr(self, self.norm0_name) - - @property - def norm1(self): - return getattr(self, self.norm1_name) - - def forward(self, x): - x = self.conv(x) - x = self.norm0(x) - x = self.relu(x) - - batch, rchannel = x.shape[:2] - if self.radix > 1: - splits = x.view(batch, self.radix, -1, *x.shape[2:]) - gap = splits.sum(dim=1) - else: - gap = x - gap = F.adaptive_avg_pool2d(gap, 1) - gap = self.fc1(gap) - - gap = self.norm1(gap) - gap = self.relu(gap) - - atten = self.fc2(gap) - atten = self.rsoftmax(atten).view(batch, -1, 1, 1) - - if self.radix > 1: - attens = atten.view(batch, self.radix, -1, *atten.shape[2:]) - out = torch.sum(attens * splits, dim=1) - else: - out = atten * x - return out.contiguous() - - -class Bottleneck(_Bottleneck): - """Bottleneck block for ResNeSt. - - Args: - in_channels (int): Input channels of this block. - out_channels (int): Output channels of this block. - groups (int): Groups of conv2. - width_per_group (int): Width per group of conv2. 64x4d indicates - ``groups=64, width_per_group=4`` and 32x8d indicates - ``groups=32, width_per_group=8``. - radix (int): Radix of SpltAtConv2d. Default: 2 - reduction_factor (int): Reduction factor of SplitAttentionConv2d. - Default: 4. - avg_down_stride (bool): Whether to use average pool for stride in - Bottleneck. Default: True. - stride (int): stride of the block. Default: 1 - dilation (int): dilation of convolution. Default: 1 - downsample (nn.Module): downsample operation on identity branch. - Default: None - style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two - layer is the 3x3 conv layer, otherwise the stride-two layer is - the first 1x1 conv layer. - conv_cfg (dict): dictionary to construct and config conv layer. - Default: None - norm_cfg (dict): dictionary to construct and config norm layer. - Default: dict(type='BN') - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - in_channels, - out_channels, - groups=1, - width_per_group=4, - base_channels=64, - radix=2, - reduction_factor=4, - avg_down_stride=True, - **kwargs): - super().__init__(in_channels, out_channels, **kwargs) - - self.groups = groups - self.width_per_group = width_per_group - - # For ResNet bottleneck, middle channels are determined by expansion - # and out_channels, but for ResNeXt bottleneck, it is determined by - # groups and width_per_group and the stage it is located in. - if groups != 1: - assert self.mid_channels % base_channels == 0 - self.mid_channels = ( - groups * width_per_group * self.mid_channels // base_channels) - - self.avg_down_stride = avg_down_stride and self.conv2_stride > 1 - - self.norm1_name, norm1 = build_norm_layer( - self.norm_cfg, self.mid_channels, postfix=1) - self.norm3_name, norm3 = build_norm_layer( - self.norm_cfg, self.out_channels, postfix=3) - - self.conv1 = build_conv_layer( - self.conv_cfg, - self.in_channels, - self.mid_channels, - kernel_size=1, - stride=self.conv1_stride, - bias=False) - self.add_module(self.norm1_name, norm1) - self.conv2 = SplitAttentionConv2d( - self.mid_channels, - self.mid_channels, - kernel_size=3, - stride=1 if self.avg_down_stride else self.conv2_stride, - padding=self.dilation, - dilation=self.dilation, - groups=groups, - radix=radix, - reduction_factor=reduction_factor, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg) - delattr(self, self.norm2_name) - - if self.avg_down_stride: - self.avd_layer = nn.AvgPool2d(3, self.conv2_stride, padding=1) - - self.conv3 = build_conv_layer( - self.conv_cfg, - self.mid_channels, - self.out_channels, - kernel_size=1, - bias=False) - self.add_module(self.norm3_name, norm3) - - def forward(self, x): - - def _inner_forward(x): - identity = x - - out = self.conv1(x) - out = self.norm1(out) - out = self.relu(out) - - out = self.conv2(out) - - if self.avg_down_stride: - out = self.avd_layer(out) - - out = self.conv3(out) - out = self.norm3(out) - - if self.downsample is not None: - identity = self.downsample(x) - - out += identity - - return out - - if self.with_cp and x.requires_grad: - out = cp.checkpoint(_inner_forward, x) - else: - out = _inner_forward(x) - - out = self.relu(out) - - return out - - -@MODELS.register_module() -class ResNeSt(ResNetV1d): - """ResNeSt backbone. - - Please refer to the `paper `__ - for details. - - Args: - depth (int): Network depth, from {50, 101, 152, 200}. - groups (int): Groups of conv2 in Bottleneck. Default: 32. - width_per_group (int): Width per group of conv2 in Bottleneck. - Default: 4. - radix (int): Radix of SpltAtConv2d. Default: 2 - reduction_factor (int): Reduction factor of SplitAttentionConv2d. - Default: 4. - avg_down_stride (bool): Whether to use average pool for stride in - Bottleneck. Default: True. - in_channels (int): Number of input image channels. Default: 3. - stem_channels (int): Output channels of the stem layer. Default: 64. - num_stages (int): Stages of the network. Default: 4. - strides (Sequence[int]): Strides of the first block of each stage. - Default: ``(1, 2, 2, 2)``. - dilations (Sequence[int]): Dilation of each stage. - Default: ``(1, 1, 1, 1)``. - out_indices (Sequence[int]): Output from which stages. If only one - stage is specified, a single tensor (feature map) is returned, - otherwise multiple stages are specified, a tuple of tensors will - be returned. Default: ``(3, )``. - style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two - layer is the 3x3 conv layer, otherwise the stride-two layer is - the first 1x1 conv layer. - deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv. - Default: False. - avg_down (bool): Use AvgPool instead of stride conv when - downsampling in the bottleneck. Default: False. - frozen_stages (int): Stages to be frozen (stop grad and set eval mode). - -1 means not freezing any parameters. Default: -1. - conv_cfg (dict | None): The config dict for conv layers. Default: None. - norm_cfg (dict): The config dict for norm layers. - norm_eval (bool): Whether to set norm layers to eval mode, namely, - freeze running stats (mean and var). Note: Effect on Batch Norm - and its variants only. Default: False. - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. Default: False. - zero_init_residual (bool): Whether to use zero init for last norm layer - in resblocks to let them behave as identity. Default: True. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: - ``[ - dict(type='Kaiming', layer=['Conv2d']), - dict( - type='Constant', - val=1, - layer=['_BatchNorm', 'GroupNorm']) - ]`` - """ - - arch_settings = { - 50: (Bottleneck, (3, 4, 6, 3)), - 101: (Bottleneck, (3, 4, 23, 3)), - 152: (Bottleneck, (3, 8, 36, 3)), - 200: (Bottleneck, (3, 24, 36, 3)), - 269: (Bottleneck, (3, 30, 48, 8)) - } - - def __init__(self, - depth, - groups=1, - width_per_group=4, - radix=2, - reduction_factor=4, - avg_down_stride=True, - **kwargs): - self.groups = groups - self.width_per_group = width_per_group - self.radix = radix - self.reduction_factor = reduction_factor - self.avg_down_stride = avg_down_stride - super().__init__(depth=depth, **kwargs) - - def make_res_layer(self, **kwargs): - return ResLayer( - groups=self.groups, - width_per_group=self.width_per_group, - base_channels=self.base_channels, - radix=self.radix, - reduction_factor=self.reduction_factor, - avg_down_stride=self.avg_down_stride, - **kwargs) +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as cp +from mmcv.cnn import build_conv_layer, build_norm_layer +from mmengine.model import BaseModule + +from mmpose.registry import MODELS +from .resnet import Bottleneck as _Bottleneck +from .resnet import ResLayer, ResNetV1d + + +class RSoftmax(nn.Module): + """Radix Softmax module in ``SplitAttentionConv2d``. + + Args: + radix (int): Radix of input. + groups (int): Groups of input. + """ + + def __init__(self, radix, groups): + super().__init__() + self.radix = radix + self.groups = groups + + def forward(self, x): + batch = x.size(0) + if self.radix > 1: + x = x.view(batch, self.groups, self.radix, -1).transpose(1, 2) + x = F.softmax(x, dim=1) + x = x.reshape(batch, -1) + else: + x = torch.sigmoid(x) + return x + + +class SplitAttentionConv2d(BaseModule): + """Split-Attention Conv2d. + + Args: + in_channels (int): Same as nn.Conv2d. + out_channels (int): Same as nn.Conv2d. + kernel_size (int | tuple[int]): Same as nn.Conv2d. + stride (int | tuple[int]): Same as nn.Conv2d. + padding (int | tuple[int]): Same as nn.Conv2d. + dilation (int | tuple[int]): Same as nn.Conv2d. + groups (int): Same as nn.Conv2d. + radix (int): Radix of SpltAtConv2d. Default: 2 + reduction_factor (int): Reduction factor of SplitAttentionConv2d. + Default: 4. + conv_cfg (dict): Config dict for convolution layer. Default: None, + which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. Default: None. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + in_channels, + channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + radix=2, + reduction_factor=4, + conv_cfg=None, + norm_cfg=dict(type='BN'), + init_cfg=None): + super().__init__(init_cfg=init_cfg) + inter_channels = max(in_channels * radix // reduction_factor, 32) + self.radix = radix + self.groups = groups + self.channels = channels + self.conv = build_conv_layer( + conv_cfg, + in_channels, + channels * radix, + kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups * radix, + bias=False) + self.norm0_name, norm0 = build_norm_layer( + norm_cfg, channels * radix, postfix=0) + self.add_module(self.norm0_name, norm0) + self.relu = nn.ReLU(inplace=True) + self.fc1 = build_conv_layer( + None, channels, inter_channels, 1, groups=self.groups) + self.norm1_name, norm1 = build_norm_layer( + norm_cfg, inter_channels, postfix=1) + self.add_module(self.norm1_name, norm1) + self.fc2 = build_conv_layer( + None, inter_channels, channels * radix, 1, groups=self.groups) + self.rsoftmax = RSoftmax(radix, groups) + + @property + def norm0(self): + return getattr(self, self.norm0_name) + + @property + def norm1(self): + return getattr(self, self.norm1_name) + + def forward(self, x): + x = self.conv(x) + x = self.norm0(x) + x = self.relu(x) + + batch, rchannel = x.shape[:2] + if self.radix > 1: + splits = x.view(batch, self.radix, -1, *x.shape[2:]) + gap = splits.sum(dim=1) + else: + gap = x + gap = F.adaptive_avg_pool2d(gap, 1) + gap = self.fc1(gap) + + gap = self.norm1(gap) + gap = self.relu(gap) + + atten = self.fc2(gap) + atten = self.rsoftmax(atten).view(batch, -1, 1, 1) + + if self.radix > 1: + attens = atten.view(batch, self.radix, -1, *atten.shape[2:]) + out = torch.sum(attens * splits, dim=1) + else: + out = atten * x + return out.contiguous() + + +class Bottleneck(_Bottleneck): + """Bottleneck block for ResNeSt. + + Args: + in_channels (int): Input channels of this block. + out_channels (int): Output channels of this block. + groups (int): Groups of conv2. + width_per_group (int): Width per group of conv2. 64x4d indicates + ``groups=64, width_per_group=4`` and 32x8d indicates + ``groups=32, width_per_group=8``. + radix (int): Radix of SpltAtConv2d. Default: 2 + reduction_factor (int): Reduction factor of SplitAttentionConv2d. + Default: 4. + avg_down_stride (bool): Whether to use average pool for stride in + Bottleneck. Default: True. + stride (int): stride of the block. Default: 1 + dilation (int): dilation of convolution. Default: 1 + downsample (nn.Module): downsample operation on identity branch. + Default: None + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + conv_cfg (dict): dictionary to construct and config conv layer. + Default: None + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + in_channels, + out_channels, + groups=1, + width_per_group=4, + base_channels=64, + radix=2, + reduction_factor=4, + avg_down_stride=True, + **kwargs): + super().__init__(in_channels, out_channels, **kwargs) + + self.groups = groups + self.width_per_group = width_per_group + + # For ResNet bottleneck, middle channels are determined by expansion + # and out_channels, but for ResNeXt bottleneck, it is determined by + # groups and width_per_group and the stage it is located in. + if groups != 1: + assert self.mid_channels % base_channels == 0 + self.mid_channels = ( + groups * width_per_group * self.mid_channels // base_channels) + + self.avg_down_stride = avg_down_stride and self.conv2_stride > 1 + + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, self.mid_channels, postfix=1) + self.norm3_name, norm3 = build_norm_layer( + self.norm_cfg, self.out_channels, postfix=3) + + self.conv1 = build_conv_layer( + self.conv_cfg, + self.in_channels, + self.mid_channels, + kernel_size=1, + stride=self.conv1_stride, + bias=False) + self.add_module(self.norm1_name, norm1) + self.conv2 = SplitAttentionConv2d( + self.mid_channels, + self.mid_channels, + kernel_size=3, + stride=1 if self.avg_down_stride else self.conv2_stride, + padding=self.dilation, + dilation=self.dilation, + groups=groups, + radix=radix, + reduction_factor=reduction_factor, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg) + delattr(self, self.norm2_name) + + if self.avg_down_stride: + self.avd_layer = nn.AvgPool2d(3, self.conv2_stride, padding=1) + + self.conv3 = build_conv_layer( + self.conv_cfg, + self.mid_channels, + self.out_channels, + kernel_size=1, + bias=False) + self.add_module(self.norm3_name, norm3) + + def forward(self, x): + + def _inner_forward(x): + identity = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + out = self.conv2(out) + + if self.avg_down_stride: + out = self.avd_layer(out) + + out = self.conv3(out) + out = self.norm3(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.relu(out) + + return out + + +@MODELS.register_module() +class ResNeSt(ResNetV1d): + """ResNeSt backbone. + + Please refer to the `paper `__ + for details. + + Args: + depth (int): Network depth, from {50, 101, 152, 200}. + groups (int): Groups of conv2 in Bottleneck. Default: 32. + width_per_group (int): Width per group of conv2 in Bottleneck. + Default: 4. + radix (int): Radix of SpltAtConv2d. Default: 2 + reduction_factor (int): Reduction factor of SplitAttentionConv2d. + Default: 4. + avg_down_stride (bool): Whether to use average pool for stride in + Bottleneck. Default: True. + in_channels (int): Number of input image channels. Default: 3. + stem_channels (int): Output channels of the stem layer. Default: 64. + num_stages (int): Stages of the network. Default: 4. + strides (Sequence[int]): Strides of the first block of each stage. + Default: ``(1, 2, 2, 2)``. + dilations (Sequence[int]): Dilation of each stage. + Default: ``(1, 1, 1, 1)``. + out_indices (Sequence[int]): Output from which stages. If only one + stage is specified, a single tensor (feature map) is returned, + otherwise multiple stages are specified, a tuple of tensors will + be returned. Default: ``(3, )``. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv. + Default: False. + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. Default: False. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Default: -1. + conv_cfg (dict | None): The config dict for conv layers. Default: None. + norm_cfg (dict): The config dict for norm layers. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + zero_init_residual (bool): Whether to use zero init for last norm layer + in resblocks to let them behave as identity. Default: True. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: + ``[ + dict(type='Kaiming', layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ]`` + """ + + arch_settings = { + 50: (Bottleneck, (3, 4, 6, 3)), + 101: (Bottleneck, (3, 4, 23, 3)), + 152: (Bottleneck, (3, 8, 36, 3)), + 200: (Bottleneck, (3, 24, 36, 3)), + 269: (Bottleneck, (3, 30, 48, 8)) + } + + def __init__(self, + depth, + groups=1, + width_per_group=4, + radix=2, + reduction_factor=4, + avg_down_stride=True, + **kwargs): + self.groups = groups + self.width_per_group = width_per_group + self.radix = radix + self.reduction_factor = reduction_factor + self.avg_down_stride = avg_down_stride + super().__init__(depth=depth, **kwargs) + + def make_res_layer(self, **kwargs): + return ResLayer( + groups=self.groups, + width_per_group=self.width_per_group, + base_channels=self.base_channels, + radix=self.radix, + reduction_factor=self.reduction_factor, + avg_down_stride=self.avg_down_stride, + **kwargs) diff --git a/mmpose/models/backbones/resnet.py b/mmpose/models/backbones/resnet.py index a04853f60d..1a2b3622b4 100644 --- a/mmpose/models/backbones/resnet.py +++ b/mmpose/models/backbones/resnet.py @@ -1,715 +1,715 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy - -import torch.nn as nn -import torch.utils.checkpoint as cp -from mmcv.cnn import ConvModule, build_conv_layer, build_norm_layer -from mmengine.model import BaseModule, constant_init -from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm - -from mmpose.registry import MODELS -from .base_backbone import BaseBackbone - - -class BasicBlock(BaseModule): - """BasicBlock for ResNet. - - Args: - in_channels (int): Input channels of this block. - out_channels (int): Output channels of this block. - expansion (int): The ratio of ``out_channels/mid_channels`` where - ``mid_channels`` is the output channels of conv1. This is a - reserved argument in BasicBlock and should always be 1. Default: 1. - stride (int): stride of the block. Default: 1 - dilation (int): dilation of convolution. Default: 1 - downsample (nn.Module): downsample operation on identity branch. - Default: None. - style (str): `pytorch` or `caffe`. It is unused and reserved for - unified API with Bottleneck. - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. - conv_cfg (dict): dictionary to construct and config conv layer. - Default: None - norm_cfg (dict): dictionary to construct and config norm layer. - Default: dict(type='BN') - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - in_channels, - out_channels, - expansion=1, - stride=1, - dilation=1, - downsample=None, - style='pytorch', - with_cp=False, - conv_cfg=None, - norm_cfg=dict(type='BN'), - init_cfg=None): - # Protect mutable default arguments - norm_cfg = copy.deepcopy(norm_cfg) - super().__init__(init_cfg=init_cfg) - self.in_channels = in_channels - self.out_channels = out_channels - self.expansion = expansion - assert self.expansion == 1 - assert out_channels % expansion == 0 - self.mid_channels = out_channels // expansion - self.stride = stride - self.dilation = dilation - self.style = style - self.with_cp = with_cp - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - - self.norm1_name, norm1 = build_norm_layer( - norm_cfg, self.mid_channels, postfix=1) - self.norm2_name, norm2 = build_norm_layer( - norm_cfg, out_channels, postfix=2) - - self.conv1 = build_conv_layer( - conv_cfg, - in_channels, - self.mid_channels, - 3, - stride=stride, - padding=dilation, - dilation=dilation, - bias=False) - self.add_module(self.norm1_name, norm1) - self.conv2 = build_conv_layer( - conv_cfg, - self.mid_channels, - out_channels, - 3, - padding=1, - bias=False) - self.add_module(self.norm2_name, norm2) - - self.relu = nn.ReLU(inplace=True) - self.downsample = downsample - - @property - def norm1(self): - """nn.Module: the normalization layer named "norm1" """ - return getattr(self, self.norm1_name) - - @property - def norm2(self): - """nn.Module: the normalization layer named "norm2" """ - return getattr(self, self.norm2_name) - - def forward(self, x): - """Forward function.""" - - def _inner_forward(x): - identity = x - - out = self.conv1(x) - out = self.norm1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.norm2(out) - - if self.downsample is not None: - identity = self.downsample(x) - - out += identity - - return out - - if self.with_cp and x.requires_grad: - out = cp.checkpoint(_inner_forward, x) - else: - out = _inner_forward(x) - - out = self.relu(out) - - return out - - -class Bottleneck(BaseModule): - """Bottleneck block for ResNet. - - Args: - in_channels (int): Input channels of this block. - out_channels (int): Output channels of this block. - expansion (int): The ratio of ``out_channels/mid_channels`` where - ``mid_channels`` is the input/output channels of conv2. Default: 4. - stride (int): stride of the block. Default: 1 - dilation (int): dilation of convolution. Default: 1 - downsample (nn.Module): downsample operation on identity branch. - Default: None. - style (str): ``"pytorch"`` or ``"caffe"``. If set to "pytorch", the - stride-two layer is the 3x3 conv layer, otherwise the stride-two - layer is the first 1x1 conv layer. Default: "pytorch". - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. - conv_cfg (dict): dictionary to construct and config conv layer. - Default: None - norm_cfg (dict): dictionary to construct and config norm layer. - Default: dict(type='BN') - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - in_channels, - out_channels, - expansion=4, - stride=1, - dilation=1, - downsample=None, - style='pytorch', - with_cp=False, - conv_cfg=None, - norm_cfg=dict(type='BN'), - init_cfg=None): - # Protect mutable default arguments - norm_cfg = copy.deepcopy(norm_cfg) - super().__init__(init_cfg=init_cfg) - assert style in ['pytorch', 'caffe'] - - self.in_channels = in_channels - self.out_channels = out_channels - self.expansion = expansion - assert out_channels % expansion == 0 - self.mid_channels = out_channels // expansion - self.stride = stride - self.dilation = dilation - self.style = style - self.with_cp = with_cp - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - - if self.style == 'pytorch': - self.conv1_stride = 1 - self.conv2_stride = stride - else: - self.conv1_stride = stride - self.conv2_stride = 1 - - self.norm1_name, norm1 = build_norm_layer( - norm_cfg, self.mid_channels, postfix=1) - self.norm2_name, norm2 = build_norm_layer( - norm_cfg, self.mid_channels, postfix=2) - self.norm3_name, norm3 = build_norm_layer( - norm_cfg, out_channels, postfix=3) - - self.conv1 = build_conv_layer( - conv_cfg, - in_channels, - self.mid_channels, - kernel_size=1, - stride=self.conv1_stride, - bias=False) - self.add_module(self.norm1_name, norm1) - self.conv2 = build_conv_layer( - conv_cfg, - self.mid_channels, - self.mid_channels, - kernel_size=3, - stride=self.conv2_stride, - padding=dilation, - dilation=dilation, - bias=False) - - self.add_module(self.norm2_name, norm2) - self.conv3 = build_conv_layer( - conv_cfg, - self.mid_channels, - out_channels, - kernel_size=1, - bias=False) - self.add_module(self.norm3_name, norm3) - - self.relu = nn.ReLU(inplace=True) - self.downsample = downsample - - @property - def norm1(self): - """nn.Module: the normalization layer named "norm1" """ - return getattr(self, self.norm1_name) - - @property - def norm2(self): - """nn.Module: the normalization layer named "norm2" """ - return getattr(self, self.norm2_name) - - @property - def norm3(self): - """nn.Module: the normalization layer named "norm3" """ - return getattr(self, self.norm3_name) - - def forward(self, x): - """Forward function.""" - - def _inner_forward(x): - identity = x - - out = self.conv1(x) - out = self.norm1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.norm2(out) - out = self.relu(out) - - out = self.conv3(out) - out = self.norm3(out) - - if self.downsample is not None: - identity = self.downsample(x) - - out += identity - - return out - - if self.with_cp and x.requires_grad: - out = cp.checkpoint(_inner_forward, x) - else: - out = _inner_forward(x) - - out = self.relu(out) - - return out - - -def get_expansion(block, expansion=None): - """Get the expansion of a residual block. - - The block expansion will be obtained by the following order: - - 1. If ``expansion`` is given, just return it. - 2. If ``block`` has the attribute ``expansion``, then return - ``block.expansion``. - 3. Return the default value according the the block type: - 1 for ``BasicBlock`` and 4 for ``Bottleneck``. - - Args: - block (class): The block class. - expansion (int | None): The given expansion ratio. - - Returns: - int: The expansion of the block. - """ - if isinstance(expansion, int): - assert expansion > 0 - elif expansion is None: - if hasattr(block, 'expansion'): - expansion = block.expansion - elif issubclass(block, BasicBlock): - expansion = 1 - elif issubclass(block, Bottleneck): - expansion = 4 - else: - raise TypeError(f'expansion is not specified for {block.__name__}') - else: - raise TypeError('expansion must be an integer or None') - - return expansion - - -class ResLayer(nn.Sequential): - """ResLayer to build ResNet style backbone. - - Args: - block (nn.Module): Residual block used to build ResLayer. - num_blocks (int): Number of blocks. - in_channels (int): Input channels of this block. - out_channels (int): Output channels of this block. - expansion (int, optional): The expansion for BasicBlock/Bottleneck. - If not specified, it will firstly be obtained via - ``block.expansion``. If the block has no attribute "expansion", - the following default values will be used: 1 for BasicBlock and - 4 for Bottleneck. Default: None. - stride (int): stride of the first block. Default: 1. - avg_down (bool): Use AvgPool instead of stride conv when - downsampling in the bottleneck. Default: False - conv_cfg (dict): dictionary to construct and config conv layer. - Default: None - norm_cfg (dict): dictionary to construct and config norm layer. - Default: dict(type='BN') - downsample_first (bool): Downsample at the first block or last block. - False for Hourglass, True for ResNet. Default: True - """ - - def __init__(self, - block, - num_blocks, - in_channels, - out_channels, - expansion=None, - stride=1, - avg_down=False, - conv_cfg=None, - norm_cfg=dict(type='BN'), - downsample_first=True, - **kwargs): - # Protect mutable default arguments - norm_cfg = copy.deepcopy(norm_cfg) - self.block = block - self.expansion = get_expansion(block, expansion) - - downsample = None - if stride != 1 or in_channels != out_channels: - downsample = [] - conv_stride = stride - if avg_down and stride != 1: - conv_stride = 1 - downsample.append( - nn.AvgPool2d( - kernel_size=stride, - stride=stride, - ceil_mode=True, - count_include_pad=False)) - downsample.extend([ - build_conv_layer( - conv_cfg, - in_channels, - out_channels, - kernel_size=1, - stride=conv_stride, - bias=False), - build_norm_layer(norm_cfg, out_channels)[1] - ]) - downsample = nn.Sequential(*downsample) - - layers = [] - if downsample_first: - layers.append( - block( - in_channels=in_channels, - out_channels=out_channels, - expansion=self.expansion, - stride=stride, - downsample=downsample, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - **kwargs)) - in_channels = out_channels - for _ in range(1, num_blocks): - layers.append( - block( - in_channels=in_channels, - out_channels=out_channels, - expansion=self.expansion, - stride=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - **kwargs)) - else: # downsample_first=False is for HourglassModule - for i in range(0, num_blocks - 1): - layers.append( - block( - in_channels=in_channels, - out_channels=in_channels, - expansion=self.expansion, - stride=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - **kwargs)) - layers.append( - block( - in_channels=in_channels, - out_channels=out_channels, - expansion=self.expansion, - stride=stride, - downsample=downsample, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - **kwargs)) - - super().__init__(*layers) - - -@MODELS.register_module() -class ResNet(BaseBackbone): - """ResNet backbone. - - Please refer to the `paper `__ for - details. - - Args: - depth (int): Network depth, from {18, 34, 50, 101, 152}. - in_channels (int): Number of input image channels. Default: 3. - stem_channels (int): Output channels of the stem layer. Default: 64. - base_channels (int): Middle channels of the first stage. Default: 64. - num_stages (int): Stages of the network. Default: 4. - strides (Sequence[int]): Strides of the first block of each stage. - Default: ``(1, 2, 2, 2)``. - dilations (Sequence[int]): Dilation of each stage. - Default: ``(1, 1, 1, 1)``. - out_indices (Sequence[int]): Output from which stages. If only one - stage is specified, a single tensor (feature map) is returned, - otherwise multiple stages are specified, a tuple of tensors will - be returned. Default: ``(3, )``. - style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two - layer is the 3x3 conv layer, otherwise the stride-two layer is - the first 1x1 conv layer. - deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv. - Default: False. - avg_down (bool): Use AvgPool instead of stride conv when - downsampling in the bottleneck. Default: False. - frozen_stages (int): Stages to be frozen (stop grad and set eval mode). - -1 means not freezing any parameters. Default: -1. - conv_cfg (dict | None): The config dict for conv layers. Default: None. - norm_cfg (dict): The config dict for norm layers. - norm_eval (bool): Whether to set norm layers to eval mode, namely, - freeze running stats (mean and var). Note: Effect on Batch Norm - and its variants only. Default: False. - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. Default: False. - zero_init_residual (bool): Whether to use zero init for last norm layer - in resblocks to let them behave as identity. Default: True. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: - ``[ - dict(type='Kaiming', layer=['Conv2d']), - dict( - type='Constant', - val=1, - layer=['_BatchNorm', 'GroupNorm']) - ]`` - - Example: - >>> from mmpose.models import ResNet - >>> import torch - >>> self = ResNet(depth=18, out_indices=(0, 1, 2, 3)) - >>> self.eval() - >>> inputs = torch.rand(1, 3, 32, 32) - >>> level_outputs = self.forward(inputs) - >>> for level_out in level_outputs: - ... print(tuple(level_out.shape)) - (1, 64, 8, 8) - (1, 128, 4, 4) - (1, 256, 2, 2) - (1, 512, 1, 1) - """ - - arch_settings = { - 18: (BasicBlock, (2, 2, 2, 2)), - 34: (BasicBlock, (3, 4, 6, 3)), - 50: (Bottleneck, (3, 4, 6, 3)), - 101: (Bottleneck, (3, 4, 23, 3)), - 152: (Bottleneck, (3, 8, 36, 3)) - } - - def __init__(self, - depth, - in_channels=3, - stem_channels=64, - base_channels=64, - expansion=None, - num_stages=4, - strides=(1, 2, 2, 2), - dilations=(1, 1, 1, 1), - out_indices=(3, ), - style='pytorch', - deep_stem=False, - avg_down=False, - frozen_stages=-1, - conv_cfg=None, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=False, - with_cp=False, - zero_init_residual=True, - init_cfg=[ - dict(type='Kaiming', layer=['Conv2d']), - dict( - type='Constant', - val=1, - layer=['_BatchNorm', 'GroupNorm']) - ]): - # Protect mutable default arguments - norm_cfg = copy.deepcopy(norm_cfg) - super(ResNet, self).__init__(init_cfg) - if depth not in self.arch_settings: - raise KeyError(f'invalid depth {depth} for resnet') - self.depth = depth - self.stem_channels = stem_channels - self.base_channels = base_channels - self.num_stages = num_stages - assert 1 <= num_stages <= 4 - self.strides = strides - self.dilations = dilations - assert len(strides) == len(dilations) == num_stages - self.out_indices = out_indices - assert max(out_indices) < num_stages - self.style = style - self.deep_stem = deep_stem - self.avg_down = avg_down - self.frozen_stages = frozen_stages - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.with_cp = with_cp - self.norm_eval = norm_eval - self.zero_init_residual = zero_init_residual - self.block, stage_blocks = self.arch_settings[depth] - self.stage_blocks = stage_blocks[:num_stages] - self.expansion = get_expansion(self.block, expansion) - - self._make_stem_layer(in_channels, stem_channels) - - self.res_layers = [] - _in_channels = stem_channels - _out_channels = base_channels * self.expansion - for i, num_blocks in enumerate(self.stage_blocks): - stride = strides[i] - dilation = dilations[i] - res_layer = self.make_res_layer( - block=self.block, - num_blocks=num_blocks, - in_channels=_in_channels, - out_channels=_out_channels, - expansion=self.expansion, - stride=stride, - dilation=dilation, - style=self.style, - avg_down=self.avg_down, - with_cp=with_cp, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg) - _in_channels = _out_channels - _out_channels *= 2 - layer_name = f'layer{i + 1}' - self.add_module(layer_name, res_layer) - self.res_layers.append(layer_name) - - self._freeze_stages() - - self.feat_dim = res_layer[-1].out_channels - - def make_res_layer(self, **kwargs): - """Make a ResLayer.""" - return ResLayer(**kwargs) - - @property - def norm1(self): - """nn.Module: the normalization layer named "norm1" """ - return getattr(self, self.norm1_name) - - def _make_stem_layer(self, in_channels, stem_channels): - """Make stem layer.""" - if self.deep_stem: - self.stem = nn.Sequential( - ConvModule( - in_channels, - stem_channels // 2, - kernel_size=3, - stride=2, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - inplace=True), - ConvModule( - stem_channels // 2, - stem_channels // 2, - kernel_size=3, - stride=1, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - inplace=True), - ConvModule( - stem_channels // 2, - stem_channels, - kernel_size=3, - stride=1, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - inplace=True)) - else: - self.conv1 = build_conv_layer( - self.conv_cfg, - in_channels, - stem_channels, - kernel_size=7, - stride=2, - padding=3, - bias=False) - self.norm1_name, norm1 = build_norm_layer( - self.norm_cfg, stem_channels, postfix=1) - self.add_module(self.norm1_name, norm1) - self.relu = nn.ReLU(inplace=True) - self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) - - def _freeze_stages(self): - """Freeze parameters.""" - if self.frozen_stages >= 0: - if self.deep_stem: - self.stem.eval() - for param in self.stem.parameters(): - param.requires_grad = False - else: - self.norm1.eval() - for m in [self.conv1, self.norm1]: - for param in m.parameters(): - param.requires_grad = False - - for i in range(1, self.frozen_stages + 1): - m = getattr(self, f'layer{i}') - m.eval() - for param in m.parameters(): - param.requires_grad = False - - def init_weights(self): - """Initialize the weights in backbone.""" - super(ResNet, self).init_weights() - - if (isinstance(self.init_cfg, dict) - and self.init_cfg['type'] == 'Pretrained'): - # Suppress zero_init_residual if use pretrained model. - return - - if self.zero_init_residual: - for m in self.modules(): - if isinstance(m, Bottleneck): - constant_init(m.norm3, 0) - elif isinstance(m, BasicBlock): - constant_init(m.norm2, 0) - - def forward(self, x): - """Forward function.""" - if self.deep_stem: - x = self.stem(x) - else: - x = self.conv1(x) - x = self.norm1(x) - x = self.relu(x) - x = self.maxpool(x) - outs = [] - for i, layer_name in enumerate(self.res_layers): - res_layer = getattr(self, layer_name) - x = res_layer(x) - if i in self.out_indices: - outs.append(x) - return tuple(outs) - - def train(self, mode=True): - """Convert the model into training mode.""" - super().train(mode) - self._freeze_stages() - if mode and self.norm_eval: - for m in self.modules(): - # trick: eval have effect on BatchNorm only - if isinstance(m, _BatchNorm): - m.eval() - - -@MODELS.register_module() -class ResNetV1d(ResNet): - r"""ResNetV1d variant described in `Bag of Tricks - `__. - - Compared with default ResNet(ResNetV1b), ResNetV1d replaces the 7x7 conv in - the input stem with three 3x3 convs. And in the downsampling block, a 2x2 - avg_pool with stride 2 is added before conv, whose stride is changed to 1. - """ - - def __init__(self, **kwargs): - super().__init__(deep_stem=True, avg_down=True, **kwargs) +# Copyright (c) OpenMMLab. All rights reserved. +import copy + +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn import ConvModule, build_conv_layer, build_norm_layer +from mmengine.model import BaseModule, constant_init +from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm + +from mmpose.registry import MODELS +from .base_backbone import BaseBackbone + + +class BasicBlock(BaseModule): + """BasicBlock for ResNet. + + Args: + in_channels (int): Input channels of this block. + out_channels (int): Output channels of this block. + expansion (int): The ratio of ``out_channels/mid_channels`` where + ``mid_channels`` is the output channels of conv1. This is a + reserved argument in BasicBlock and should always be 1. Default: 1. + stride (int): stride of the block. Default: 1 + dilation (int): dilation of convolution. Default: 1 + downsample (nn.Module): downsample operation on identity branch. + Default: None. + style (str): `pytorch` or `caffe`. It is unused and reserved for + unified API with Bottleneck. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + conv_cfg (dict): dictionary to construct and config conv layer. + Default: None + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + in_channels, + out_channels, + expansion=1, + stride=1, + dilation=1, + downsample=None, + style='pytorch', + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + init_cfg=None): + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + super().__init__(init_cfg=init_cfg) + self.in_channels = in_channels + self.out_channels = out_channels + self.expansion = expansion + assert self.expansion == 1 + assert out_channels % expansion == 0 + self.mid_channels = out_channels // expansion + self.stride = stride + self.dilation = dilation + self.style = style + self.with_cp = with_cp + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + + self.norm1_name, norm1 = build_norm_layer( + norm_cfg, self.mid_channels, postfix=1) + self.norm2_name, norm2 = build_norm_layer( + norm_cfg, out_channels, postfix=2) + + self.conv1 = build_conv_layer( + conv_cfg, + in_channels, + self.mid_channels, + 3, + stride=stride, + padding=dilation, + dilation=dilation, + bias=False) + self.add_module(self.norm1_name, norm1) + self.conv2 = build_conv_layer( + conv_cfg, + self.mid_channels, + out_channels, + 3, + padding=1, + bias=False) + self.add_module(self.norm2_name, norm2) + + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + + @property + def norm1(self): + """nn.Module: the normalization layer named "norm1" """ + return getattr(self, self.norm1_name) + + @property + def norm2(self): + """nn.Module: the normalization layer named "norm2" """ + return getattr(self, self.norm2_name) + + def forward(self, x): + """Forward function.""" + + def _inner_forward(x): + identity = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.norm2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.relu(out) + + return out + + +class Bottleneck(BaseModule): + """Bottleneck block for ResNet. + + Args: + in_channels (int): Input channels of this block. + out_channels (int): Output channels of this block. + expansion (int): The ratio of ``out_channels/mid_channels`` where + ``mid_channels`` is the input/output channels of conv2. Default: 4. + stride (int): stride of the block. Default: 1 + dilation (int): dilation of convolution. Default: 1 + downsample (nn.Module): downsample operation on identity branch. + Default: None. + style (str): ``"pytorch"`` or ``"caffe"``. If set to "pytorch", the + stride-two layer is the 3x3 conv layer, otherwise the stride-two + layer is the first 1x1 conv layer. Default: "pytorch". + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + conv_cfg (dict): dictionary to construct and config conv layer. + Default: None + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + in_channels, + out_channels, + expansion=4, + stride=1, + dilation=1, + downsample=None, + style='pytorch', + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + init_cfg=None): + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + super().__init__(init_cfg=init_cfg) + assert style in ['pytorch', 'caffe'] + + self.in_channels = in_channels + self.out_channels = out_channels + self.expansion = expansion + assert out_channels % expansion == 0 + self.mid_channels = out_channels // expansion + self.stride = stride + self.dilation = dilation + self.style = style + self.with_cp = with_cp + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + + if self.style == 'pytorch': + self.conv1_stride = 1 + self.conv2_stride = stride + else: + self.conv1_stride = stride + self.conv2_stride = 1 + + self.norm1_name, norm1 = build_norm_layer( + norm_cfg, self.mid_channels, postfix=1) + self.norm2_name, norm2 = build_norm_layer( + norm_cfg, self.mid_channels, postfix=2) + self.norm3_name, norm3 = build_norm_layer( + norm_cfg, out_channels, postfix=3) + + self.conv1 = build_conv_layer( + conv_cfg, + in_channels, + self.mid_channels, + kernel_size=1, + stride=self.conv1_stride, + bias=False) + self.add_module(self.norm1_name, norm1) + self.conv2 = build_conv_layer( + conv_cfg, + self.mid_channels, + self.mid_channels, + kernel_size=3, + stride=self.conv2_stride, + padding=dilation, + dilation=dilation, + bias=False) + + self.add_module(self.norm2_name, norm2) + self.conv3 = build_conv_layer( + conv_cfg, + self.mid_channels, + out_channels, + kernel_size=1, + bias=False) + self.add_module(self.norm3_name, norm3) + + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + + @property + def norm1(self): + """nn.Module: the normalization layer named "norm1" """ + return getattr(self, self.norm1_name) + + @property + def norm2(self): + """nn.Module: the normalization layer named "norm2" """ + return getattr(self, self.norm2_name) + + @property + def norm3(self): + """nn.Module: the normalization layer named "norm3" """ + return getattr(self, self.norm3_name) + + def forward(self, x): + """Forward function.""" + + def _inner_forward(x): + identity = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.norm2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.norm3(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.relu(out) + + return out + + +def get_expansion(block, expansion=None): + """Get the expansion of a residual block. + + The block expansion will be obtained by the following order: + + 1. If ``expansion`` is given, just return it. + 2. If ``block`` has the attribute ``expansion``, then return + ``block.expansion``. + 3. Return the default value according the the block type: + 1 for ``BasicBlock`` and 4 for ``Bottleneck``. + + Args: + block (class): The block class. + expansion (int | None): The given expansion ratio. + + Returns: + int: The expansion of the block. + """ + if isinstance(expansion, int): + assert expansion > 0 + elif expansion is None: + if hasattr(block, 'expansion'): + expansion = block.expansion + elif issubclass(block, BasicBlock): + expansion = 1 + elif issubclass(block, Bottleneck): + expansion = 4 + else: + raise TypeError(f'expansion is not specified for {block.__name__}') + else: + raise TypeError('expansion must be an integer or None') + + return expansion + + +class ResLayer(nn.Sequential): + """ResLayer to build ResNet style backbone. + + Args: + block (nn.Module): Residual block used to build ResLayer. + num_blocks (int): Number of blocks. + in_channels (int): Input channels of this block. + out_channels (int): Output channels of this block. + expansion (int, optional): The expansion for BasicBlock/Bottleneck. + If not specified, it will firstly be obtained via + ``block.expansion``. If the block has no attribute "expansion", + the following default values will be used: 1 for BasicBlock and + 4 for Bottleneck. Default: None. + stride (int): stride of the first block. Default: 1. + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. Default: False + conv_cfg (dict): dictionary to construct and config conv layer. + Default: None + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + downsample_first (bool): Downsample at the first block or last block. + False for Hourglass, True for ResNet. Default: True + """ + + def __init__(self, + block, + num_blocks, + in_channels, + out_channels, + expansion=None, + stride=1, + avg_down=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + downsample_first=True, + **kwargs): + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + self.block = block + self.expansion = get_expansion(block, expansion) + + downsample = None + if stride != 1 or in_channels != out_channels: + downsample = [] + conv_stride = stride + if avg_down and stride != 1: + conv_stride = 1 + downsample.append( + nn.AvgPool2d( + kernel_size=stride, + stride=stride, + ceil_mode=True, + count_include_pad=False)) + downsample.extend([ + build_conv_layer( + conv_cfg, + in_channels, + out_channels, + kernel_size=1, + stride=conv_stride, + bias=False), + build_norm_layer(norm_cfg, out_channels)[1] + ]) + downsample = nn.Sequential(*downsample) + + layers = [] + if downsample_first: + layers.append( + block( + in_channels=in_channels, + out_channels=out_channels, + expansion=self.expansion, + stride=stride, + downsample=downsample, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + **kwargs)) + in_channels = out_channels + for _ in range(1, num_blocks): + layers.append( + block( + in_channels=in_channels, + out_channels=out_channels, + expansion=self.expansion, + stride=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + **kwargs)) + else: # downsample_first=False is for HourglassModule + for i in range(0, num_blocks - 1): + layers.append( + block( + in_channels=in_channels, + out_channels=in_channels, + expansion=self.expansion, + stride=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + **kwargs)) + layers.append( + block( + in_channels=in_channels, + out_channels=out_channels, + expansion=self.expansion, + stride=stride, + downsample=downsample, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + **kwargs)) + + super().__init__(*layers) + + +@MODELS.register_module() +class ResNet(BaseBackbone): + """ResNet backbone. + + Please refer to the `paper `__ for + details. + + Args: + depth (int): Network depth, from {18, 34, 50, 101, 152}. + in_channels (int): Number of input image channels. Default: 3. + stem_channels (int): Output channels of the stem layer. Default: 64. + base_channels (int): Middle channels of the first stage. Default: 64. + num_stages (int): Stages of the network. Default: 4. + strides (Sequence[int]): Strides of the first block of each stage. + Default: ``(1, 2, 2, 2)``. + dilations (Sequence[int]): Dilation of each stage. + Default: ``(1, 1, 1, 1)``. + out_indices (Sequence[int]): Output from which stages. If only one + stage is specified, a single tensor (feature map) is returned, + otherwise multiple stages are specified, a tuple of tensors will + be returned. Default: ``(3, )``. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv. + Default: False. + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. Default: False. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Default: -1. + conv_cfg (dict | None): The config dict for conv layers. Default: None. + norm_cfg (dict): The config dict for norm layers. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + zero_init_residual (bool): Whether to use zero init for last norm layer + in resblocks to let them behave as identity. Default: True. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: + ``[ + dict(type='Kaiming', layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ]`` + + Example: + >>> from mmpose.models import ResNet + >>> import torch + >>> self = ResNet(depth=18, out_indices=(0, 1, 2, 3)) + >>> self.eval() + >>> inputs = torch.rand(1, 3, 32, 32) + >>> level_outputs = self.forward(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 64, 8, 8) + (1, 128, 4, 4) + (1, 256, 2, 2) + (1, 512, 1, 1) + """ + + arch_settings = { + 18: (BasicBlock, (2, 2, 2, 2)), + 34: (BasicBlock, (3, 4, 6, 3)), + 50: (Bottleneck, (3, 4, 6, 3)), + 101: (Bottleneck, (3, 4, 23, 3)), + 152: (Bottleneck, (3, 8, 36, 3)) + } + + def __init__(self, + depth, + in_channels=3, + stem_channels=64, + base_channels=64, + expansion=None, + num_stages=4, + strides=(1, 2, 2, 2), + dilations=(1, 1, 1, 1), + out_indices=(3, ), + style='pytorch', + deep_stem=False, + avg_down=False, + frozen_stages=-1, + conv_cfg=None, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=False, + with_cp=False, + zero_init_residual=True, + init_cfg=[ + dict(type='Kaiming', layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ]): + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + super(ResNet, self).__init__(init_cfg) + if depth not in self.arch_settings: + raise KeyError(f'invalid depth {depth} for resnet') + self.depth = depth + self.stem_channels = stem_channels + self.base_channels = base_channels + self.num_stages = num_stages + assert 1 <= num_stages <= 4 + self.strides = strides + self.dilations = dilations + assert len(strides) == len(dilations) == num_stages + self.out_indices = out_indices + assert max(out_indices) < num_stages + self.style = style + self.deep_stem = deep_stem + self.avg_down = avg_down + self.frozen_stages = frozen_stages + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.with_cp = with_cp + self.norm_eval = norm_eval + self.zero_init_residual = zero_init_residual + self.block, stage_blocks = self.arch_settings[depth] + self.stage_blocks = stage_blocks[:num_stages] + self.expansion = get_expansion(self.block, expansion) + + self._make_stem_layer(in_channels, stem_channels) + + self.res_layers = [] + _in_channels = stem_channels + _out_channels = base_channels * self.expansion + for i, num_blocks in enumerate(self.stage_blocks): + stride = strides[i] + dilation = dilations[i] + res_layer = self.make_res_layer( + block=self.block, + num_blocks=num_blocks, + in_channels=_in_channels, + out_channels=_out_channels, + expansion=self.expansion, + stride=stride, + dilation=dilation, + style=self.style, + avg_down=self.avg_down, + with_cp=with_cp, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg) + _in_channels = _out_channels + _out_channels *= 2 + layer_name = f'layer{i + 1}' + self.add_module(layer_name, res_layer) + self.res_layers.append(layer_name) + + self._freeze_stages() + + self.feat_dim = res_layer[-1].out_channels + + def make_res_layer(self, **kwargs): + """Make a ResLayer.""" + return ResLayer(**kwargs) + + @property + def norm1(self): + """nn.Module: the normalization layer named "norm1" """ + return getattr(self, self.norm1_name) + + def _make_stem_layer(self, in_channels, stem_channels): + """Make stem layer.""" + if self.deep_stem: + self.stem = nn.Sequential( + ConvModule( + in_channels, + stem_channels // 2, + kernel_size=3, + stride=2, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + inplace=True), + ConvModule( + stem_channels // 2, + stem_channels // 2, + kernel_size=3, + stride=1, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + inplace=True), + ConvModule( + stem_channels // 2, + stem_channels, + kernel_size=3, + stride=1, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + inplace=True)) + else: + self.conv1 = build_conv_layer( + self.conv_cfg, + in_channels, + stem_channels, + kernel_size=7, + stride=2, + padding=3, + bias=False) + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, stem_channels, postfix=1) + self.add_module(self.norm1_name, norm1) + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + def _freeze_stages(self): + """Freeze parameters.""" + if self.frozen_stages >= 0: + if self.deep_stem: + self.stem.eval() + for param in self.stem.parameters(): + param.requires_grad = False + else: + self.norm1.eval() + for m in [self.conv1, self.norm1]: + for param in m.parameters(): + param.requires_grad = False + + for i in range(1, self.frozen_stages + 1): + m = getattr(self, f'layer{i}') + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def init_weights(self): + """Initialize the weights in backbone.""" + super(ResNet, self).init_weights() + + if (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + # Suppress zero_init_residual if use pretrained model. + return + + if self.zero_init_residual: + for m in self.modules(): + if isinstance(m, Bottleneck): + constant_init(m.norm3, 0) + elif isinstance(m, BasicBlock): + constant_init(m.norm2, 0) + + def forward(self, x): + """Forward function.""" + if self.deep_stem: + x = self.stem(x) + else: + x = self.conv1(x) + x = self.norm1(x) + x = self.relu(x) + x = self.maxpool(x) + outs = [] + for i, layer_name in enumerate(self.res_layers): + res_layer = getattr(self, layer_name) + x = res_layer(x) + if i in self.out_indices: + outs.append(x) + return tuple(outs) + + def train(self, mode=True): + """Convert the model into training mode.""" + super().train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + # trick: eval have effect on BatchNorm only + if isinstance(m, _BatchNorm): + m.eval() + + +@MODELS.register_module() +class ResNetV1d(ResNet): + r"""ResNetV1d variant described in `Bag of Tricks + `__. + + Compared with default ResNet(ResNetV1b), ResNetV1d replaces the 7x7 conv in + the input stem with three 3x3 convs. And in the downsampling block, a 2x2 + avg_pool with stride 2 is added before conv, whose stride is changed to 1. + """ + + def __init__(self, **kwargs): + super().__init__(deep_stem=True, avg_down=True, **kwargs) diff --git a/mmpose/models/backbones/resnext.py b/mmpose/models/backbones/resnext.py index 241f83a114..cc4d907e3a 100644 --- a/mmpose/models/backbones/resnext.py +++ b/mmpose/models/backbones/resnext.py @@ -1,171 +1,171 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from mmcv.cnn import build_conv_layer, build_norm_layer - -from mmpose.registry import MODELS -from .resnet import Bottleneck as _Bottleneck -from .resnet import ResLayer, ResNet - - -class Bottleneck(_Bottleneck): - """Bottleneck block for ResNeXt. - - Args: - in_channels (int): Input channels of this block. - out_channels (int): Output channels of this block. - groups (int): Groups of conv2. - width_per_group (int): Width per group of conv2. 64x4d indicates - ``groups=64, width_per_group=4`` and 32x8d indicates - ``groups=32, width_per_group=8``. - stride (int): stride of the block. Default: 1 - dilation (int): dilation of convolution. Default: 1 - downsample (nn.Module): downsample operation on identity branch. - Default: None - style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two - layer is the 3x3 conv layer, otherwise the stride-two layer is - the first 1x1 conv layer. - conv_cfg (dict): dictionary to construct and config conv layer. - Default: None - norm_cfg (dict): dictionary to construct and config norm layer. - Default: dict(type='BN') - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. - """ - - def __init__(self, - in_channels, - out_channels, - base_channels=64, - groups=32, - width_per_group=4, - **kwargs): - super().__init__(in_channels, out_channels, **kwargs) - self.groups = groups - self.width_per_group = width_per_group - - # For ResNet bottleneck, middle channels are determined by expansion - # and out_channels, but for ResNeXt bottleneck, it is determined by - # groups and width_per_group and the stage it is located in. - if groups != 1: - assert self.mid_channels % base_channels == 0 - self.mid_channels = ( - groups * width_per_group * self.mid_channels // base_channels) - - self.norm1_name, norm1 = build_norm_layer( - self.norm_cfg, self.mid_channels, postfix=1) - self.norm2_name, norm2 = build_norm_layer( - self.norm_cfg, self.mid_channels, postfix=2) - self.norm3_name, norm3 = build_norm_layer( - self.norm_cfg, self.out_channels, postfix=3) - - self.conv1 = build_conv_layer( - self.conv_cfg, - self.in_channels, - self.mid_channels, - kernel_size=1, - stride=self.conv1_stride, - bias=False) - self.add_module(self.norm1_name, norm1) - self.conv2 = build_conv_layer( - self.conv_cfg, - self.mid_channels, - self.mid_channels, - kernel_size=3, - stride=self.conv2_stride, - padding=self.dilation, - dilation=self.dilation, - groups=groups, - bias=False) - - self.add_module(self.norm2_name, norm2) - self.conv3 = build_conv_layer( - self.conv_cfg, - self.mid_channels, - self.out_channels, - kernel_size=1, - bias=False) - self.add_module(self.norm3_name, norm3) - - -@MODELS.register_module() -class ResNeXt(ResNet): - """ResNeXt backbone. - - Please refer to the `paper `__ for - details. - - Args: - depth (int): Network depth, from {50, 101, 152}. - groups (int): Groups of conv2 in Bottleneck. Default: 32. - width_per_group (int): Width per group of conv2 in Bottleneck. - Default: 4. - in_channels (int): Number of input image channels. Default: 3. - stem_channels (int): Output channels of the stem layer. Default: 64. - num_stages (int): Stages of the network. Default: 4. - strides (Sequence[int]): Strides of the first block of each stage. - Default: ``(1, 2, 2, 2)``. - dilations (Sequence[int]): Dilation of each stage. - Default: ``(1, 1, 1, 1)``. - out_indices (Sequence[int]): Output from which stages. If only one - stage is specified, a single tensor (feature map) is returned, - otherwise multiple stages are specified, a tuple of tensors will - be returned. Default: ``(3, )``. - style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two - layer is the 3x3 conv layer, otherwise the stride-two layer is - the first 1x1 conv layer. - deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv. - Default: False. - avg_down (bool): Use AvgPool instead of stride conv when - downsampling in the bottleneck. Default: False. - frozen_stages (int): Stages to be frozen (stop grad and set eval mode). - -1 means not freezing any parameters. Default: -1. - conv_cfg (dict | None): The config dict for conv layers. Default: None. - norm_cfg (dict): The config dict for norm layers. - norm_eval (bool): Whether to set norm layers to eval mode, namely, - freeze running stats (mean and var). Note: Effect on Batch Norm - and its variants only. Default: False. - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. Default: False. - zero_init_residual (bool): Whether to use zero init for last norm layer - in resblocks to let them behave as identity. Default: True. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: - ``[ - dict(type='Kaiming', layer=['Conv2d']), - dict( - type='Constant', - val=1, - layer=['_BatchNorm', 'GroupNorm']) - ]`` - - Example: - >>> from mmpose.models import ResNeXt - >>> import torch - >>> self = ResNeXt(depth=50, out_indices=(0, 1, 2, 3)) - >>> self.eval() - >>> inputs = torch.rand(1, 3, 32, 32) - >>> level_outputs = self.forward(inputs) - >>> for level_out in level_outputs: - ... print(tuple(level_out.shape)) - (1, 256, 8, 8) - (1, 512, 4, 4) - (1, 1024, 2, 2) - (1, 2048, 1, 1) - """ - - arch_settings = { - 50: (Bottleneck, (3, 4, 6, 3)), - 101: (Bottleneck, (3, 4, 23, 3)), - 152: (Bottleneck, (3, 8, 36, 3)) - } - - def __init__(self, depth, groups=32, width_per_group=4, **kwargs): - self.groups = groups - self.width_per_group = width_per_group - super().__init__(depth, **kwargs) - - def make_res_layer(self, **kwargs): - return ResLayer( - groups=self.groups, - width_per_group=self.width_per_group, - base_channels=self.base_channels, - **kwargs) +# Copyright (c) OpenMMLab. All rights reserved. +from mmcv.cnn import build_conv_layer, build_norm_layer + +from mmpose.registry import MODELS +from .resnet import Bottleneck as _Bottleneck +from .resnet import ResLayer, ResNet + + +class Bottleneck(_Bottleneck): + """Bottleneck block for ResNeXt. + + Args: + in_channels (int): Input channels of this block. + out_channels (int): Output channels of this block. + groups (int): Groups of conv2. + width_per_group (int): Width per group of conv2. 64x4d indicates + ``groups=64, width_per_group=4`` and 32x8d indicates + ``groups=32, width_per_group=8``. + stride (int): stride of the block. Default: 1 + dilation (int): dilation of convolution. Default: 1 + downsample (nn.Module): downsample operation on identity branch. + Default: None + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + conv_cfg (dict): dictionary to construct and config conv layer. + Default: None + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + """ + + def __init__(self, + in_channels, + out_channels, + base_channels=64, + groups=32, + width_per_group=4, + **kwargs): + super().__init__(in_channels, out_channels, **kwargs) + self.groups = groups + self.width_per_group = width_per_group + + # For ResNet bottleneck, middle channels are determined by expansion + # and out_channels, but for ResNeXt bottleneck, it is determined by + # groups and width_per_group and the stage it is located in. + if groups != 1: + assert self.mid_channels % base_channels == 0 + self.mid_channels = ( + groups * width_per_group * self.mid_channels // base_channels) + + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, self.mid_channels, postfix=1) + self.norm2_name, norm2 = build_norm_layer( + self.norm_cfg, self.mid_channels, postfix=2) + self.norm3_name, norm3 = build_norm_layer( + self.norm_cfg, self.out_channels, postfix=3) + + self.conv1 = build_conv_layer( + self.conv_cfg, + self.in_channels, + self.mid_channels, + kernel_size=1, + stride=self.conv1_stride, + bias=False) + self.add_module(self.norm1_name, norm1) + self.conv2 = build_conv_layer( + self.conv_cfg, + self.mid_channels, + self.mid_channels, + kernel_size=3, + stride=self.conv2_stride, + padding=self.dilation, + dilation=self.dilation, + groups=groups, + bias=False) + + self.add_module(self.norm2_name, norm2) + self.conv3 = build_conv_layer( + self.conv_cfg, + self.mid_channels, + self.out_channels, + kernel_size=1, + bias=False) + self.add_module(self.norm3_name, norm3) + + +@MODELS.register_module() +class ResNeXt(ResNet): + """ResNeXt backbone. + + Please refer to the `paper `__ for + details. + + Args: + depth (int): Network depth, from {50, 101, 152}. + groups (int): Groups of conv2 in Bottleneck. Default: 32. + width_per_group (int): Width per group of conv2 in Bottleneck. + Default: 4. + in_channels (int): Number of input image channels. Default: 3. + stem_channels (int): Output channels of the stem layer. Default: 64. + num_stages (int): Stages of the network. Default: 4. + strides (Sequence[int]): Strides of the first block of each stage. + Default: ``(1, 2, 2, 2)``. + dilations (Sequence[int]): Dilation of each stage. + Default: ``(1, 1, 1, 1)``. + out_indices (Sequence[int]): Output from which stages. If only one + stage is specified, a single tensor (feature map) is returned, + otherwise multiple stages are specified, a tuple of tensors will + be returned. Default: ``(3, )``. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv. + Default: False. + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. Default: False. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Default: -1. + conv_cfg (dict | None): The config dict for conv layers. Default: None. + norm_cfg (dict): The config dict for norm layers. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + zero_init_residual (bool): Whether to use zero init for last norm layer + in resblocks to let them behave as identity. Default: True. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: + ``[ + dict(type='Kaiming', layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ]`` + + Example: + >>> from mmpose.models import ResNeXt + >>> import torch + >>> self = ResNeXt(depth=50, out_indices=(0, 1, 2, 3)) + >>> self.eval() + >>> inputs = torch.rand(1, 3, 32, 32) + >>> level_outputs = self.forward(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 256, 8, 8) + (1, 512, 4, 4) + (1, 1024, 2, 2) + (1, 2048, 1, 1) + """ + + arch_settings = { + 50: (Bottleneck, (3, 4, 6, 3)), + 101: (Bottleneck, (3, 4, 23, 3)), + 152: (Bottleneck, (3, 8, 36, 3)) + } + + def __init__(self, depth, groups=32, width_per_group=4, **kwargs): + self.groups = groups + self.width_per_group = width_per_group + super().__init__(depth, **kwargs) + + def make_res_layer(self, **kwargs): + return ResLayer( + groups=self.groups, + width_per_group=self.width_per_group, + base_channels=self.base_channels, + **kwargs) diff --git a/mmpose/models/backbones/rsn.py b/mmpose/models/backbones/rsn.py index 8267d23d95..74c689f7a7 100644 --- a/mmpose/models/backbones/rsn.py +++ b/mmpose/models/backbones/rsn.py @@ -1,640 +1,640 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy as cp - -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import ConvModule, MaxPool2d -from mmengine.model import BaseModule - -from mmpose.registry import MODELS -from .base_backbone import BaseBackbone - - -class RSB(BaseModule): - """Residual Steps block for RSN. Paper ref: Cai et al. "Learning Delicate - Local Representations for Multi-Person Pose Estimation" (ECCV 2020). - - Args: - in_channels (int): Input channels of this block. - out_channels (int): Output channels of this block. - num_steps (int): Numbers of steps in RSB - stride (int): stride of the block. Default: 1 - downsample (nn.Module): downsample operation on identity branch. - Default: None. - norm_cfg (dict): dictionary to construct and config norm layer. - Default: dict(type='BN') - expand_times (int): Times by which the in_channels are expanded. - Default:26. - res_top_channels (int): Number of channels of feature output by - ResNet_top. Default:64. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - expansion = 1 - - def __init__(self, - in_channels, - out_channels, - num_steps=4, - stride=1, - downsample=None, - with_cp=False, - norm_cfg=dict(type='BN'), - expand_times=26, - res_top_channels=64, - init_cfg=None): - # Protect mutable default arguments - norm_cfg = cp.deepcopy(norm_cfg) - super().__init__(init_cfg=init_cfg) - assert num_steps > 1 - self.in_channels = in_channels - self.branch_channels = self.in_channels * expand_times - self.branch_channels //= res_top_channels - self.out_channels = out_channels - self.stride = stride - self.downsample = downsample - self.with_cp = with_cp - self.norm_cfg = norm_cfg - self.num_steps = num_steps - self.conv_bn_relu1 = ConvModule( - self.in_channels, - self.num_steps * self.branch_channels, - kernel_size=1, - stride=self.stride, - padding=0, - norm_cfg=self.norm_cfg, - inplace=False) - for i in range(self.num_steps): - for j in range(i + 1): - module_name = f'conv_bn_relu2_{i + 1}_{j + 1}' - self.add_module( - module_name, - ConvModule( - self.branch_channels, - self.branch_channels, - kernel_size=3, - stride=1, - padding=1, - norm_cfg=self.norm_cfg, - inplace=False)) - self.conv_bn3 = ConvModule( - self.num_steps * self.branch_channels, - self.out_channels * self.expansion, - kernel_size=1, - stride=1, - padding=0, - act_cfg=None, - norm_cfg=self.norm_cfg, - inplace=False) - self.relu = nn.ReLU(inplace=False) - - def forward(self, x): - """Forward function.""" - - identity = x - x = self.conv_bn_relu1(x) - spx = torch.split(x, self.branch_channels, 1) - outputs = list() - outs = list() - for i in range(self.num_steps): - outputs_i = list() - outputs.append(outputs_i) - for j in range(i + 1): - if j == 0: - inputs = spx[i] - else: - inputs = outputs[i][j - 1] - if i > j: - inputs = inputs + outputs[i - 1][j] - module_name = f'conv_bn_relu2_{i + 1}_{j + 1}' - module_i_j = getattr(self, module_name) - outputs[i].append(module_i_j(inputs)) - - outs.append(outputs[i][i]) - out = torch.cat(tuple(outs), 1) - out = self.conv_bn3(out) - - if self.downsample is not None: - identity = self.downsample(identity) - out = out + identity - - out = self.relu(out) - - return out - - -class Downsample_module(BaseModule): - """Downsample module for RSN. - - Args: - block (nn.Module): Downsample block. - num_blocks (list): Number of blocks in each downsample unit. - num_units (int): Numbers of downsample units. Default: 4 - has_skip (bool): Have skip connections from prior upsample - module or not. Default:False - num_steps (int): Number of steps in a block. Default:4 - norm_cfg (dict): dictionary to construct and config norm layer. - Default: dict(type='BN') - in_channels (int): Number of channels of the input feature to - downsample module. Default: 64 - expand_times (int): Times by which the in_channels are expanded. - Default:26. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - block, - num_blocks, - num_steps=4, - num_units=4, - has_skip=False, - norm_cfg=dict(type='BN'), - in_channels=64, - expand_times=26, - init_cfg=None): - # Protect mutable default arguments - norm_cfg = cp.deepcopy(norm_cfg) - super().__init__(init_cfg=init_cfg) - self.has_skip = has_skip - self.in_channels = in_channels - assert len(num_blocks) == num_units - self.num_blocks = num_blocks - self.num_units = num_units - self.num_steps = num_steps - self.norm_cfg = norm_cfg - self.layer1 = self._make_layer( - block, - in_channels, - num_blocks[0], - expand_times=expand_times, - res_top_channels=in_channels) - for i in range(1, num_units): - module_name = f'layer{i + 1}' - self.add_module( - module_name, - self._make_layer( - block, - in_channels * pow(2, i), - num_blocks[i], - stride=2, - expand_times=expand_times, - res_top_channels=in_channels)) - - def _make_layer(self, - block, - out_channels, - blocks, - stride=1, - expand_times=26, - res_top_channels=64): - downsample = None - if stride != 1 or self.in_channels != out_channels * block.expansion: - downsample = ConvModule( - self.in_channels, - out_channels * block.expansion, - kernel_size=1, - stride=stride, - padding=0, - norm_cfg=self.norm_cfg, - act_cfg=None, - inplace=True) - - units = list() - units.append( - block( - self.in_channels, - out_channels, - num_steps=self.num_steps, - stride=stride, - downsample=downsample, - norm_cfg=self.norm_cfg, - expand_times=expand_times, - res_top_channels=res_top_channels)) - self.in_channels = out_channels * block.expansion - for _ in range(1, blocks): - units.append( - block( - self.in_channels, - out_channels, - num_steps=self.num_steps, - expand_times=expand_times, - res_top_channels=res_top_channels)) - - return nn.Sequential(*units) - - def forward(self, x, skip1, skip2): - out = list() - for i in range(self.num_units): - module_name = f'layer{i + 1}' - module_i = getattr(self, module_name) - x = module_i(x) - if self.has_skip: - x = x + skip1[i] + skip2[i] - out.append(x) - out.reverse() - - return tuple(out) - - -class Upsample_unit(BaseModule): - """Upsample unit for upsample module. - - Args: - ind (int): Indicates whether to interpolate (>0) and whether to - generate feature map for the next hourglass-like module. - num_units (int): Number of units that form a upsample module. Along - with ind and gen_cross_conv, nm_units is used to decide whether - to generate feature map for the next hourglass-like module. - in_channels (int): Channel number of the skip-in feature maps from - the corresponding downsample unit. - unit_channels (int): Channel number in this unit. Default:256. - gen_skip: (bool): Whether or not to generate skips for the posterior - downsample module. Default:False - gen_cross_conv (bool): Whether to generate feature map for the next - hourglass-like module. Default:False - norm_cfg (dict): dictionary to construct and config norm layer. - Default: dict(type='BN') - out_channels (in): Number of channels of feature output by upsample - module. Must equal to in_channels of downsample module. Default:64 - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - ind, - num_units, - in_channels, - unit_channels=256, - gen_skip=False, - gen_cross_conv=False, - norm_cfg=dict(type='BN'), - out_channels=64, - init_cfg=None): - # Protect mutable default arguments - norm_cfg = cp.deepcopy(norm_cfg) - super().__init__(init_cfg=init_cfg) - self.num_units = num_units - self.norm_cfg = norm_cfg - self.in_skip = ConvModule( - in_channels, - unit_channels, - kernel_size=1, - stride=1, - padding=0, - norm_cfg=self.norm_cfg, - act_cfg=None, - inplace=True) - self.relu = nn.ReLU(inplace=True) - - self.ind = ind - if self.ind > 0: - self.up_conv = ConvModule( - unit_channels, - unit_channels, - kernel_size=1, - stride=1, - padding=0, - norm_cfg=self.norm_cfg, - act_cfg=None, - inplace=True) - - self.gen_skip = gen_skip - if self.gen_skip: - self.out_skip1 = ConvModule( - in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0, - norm_cfg=self.norm_cfg, - inplace=True) - - self.out_skip2 = ConvModule( - unit_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0, - norm_cfg=self.norm_cfg, - inplace=True) - - self.gen_cross_conv = gen_cross_conv - if self.ind == num_units - 1 and self.gen_cross_conv: - self.cross_conv = ConvModule( - unit_channels, - out_channels, - kernel_size=1, - stride=1, - padding=0, - norm_cfg=self.norm_cfg, - inplace=True) - - def forward(self, x, up_x): - out = self.in_skip(x) - - if self.ind > 0: - up_x = F.interpolate( - up_x, - size=(x.size(2), x.size(3)), - mode='bilinear', - align_corners=True) - up_x = self.up_conv(up_x) - out = out + up_x - out = self.relu(out) - - skip1 = None - skip2 = None - if self.gen_skip: - skip1 = self.out_skip1(x) - skip2 = self.out_skip2(out) - - cross_conv = None - if self.ind == self.num_units - 1 and self.gen_cross_conv: - cross_conv = self.cross_conv(out) - - return out, skip1, skip2, cross_conv - - -class Upsample_module(BaseModule): - """Upsample module for RSN. - - Args: - unit_channels (int): Channel number in the upsample units. - Default:256. - num_units (int): Numbers of upsample units. Default: 4 - gen_skip (bool): Whether to generate skip for posterior downsample - module or not. Default:False - gen_cross_conv (bool): Whether to generate feature map for the next - hourglass-like module. Default:False - norm_cfg (dict): dictionary to construct and config norm layer. - Default: dict(type='BN') - out_channels (int): Number of channels of feature output by upsample - module. Must equal to in_channels of downsample module. Default:64 - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - unit_channels=256, - num_units=4, - gen_skip=False, - gen_cross_conv=False, - norm_cfg=dict(type='BN'), - out_channels=64, - init_cfg=None): - # Protect mutable default arguments - norm_cfg = cp.deepcopy(norm_cfg) - super().__init__(init_cfg=init_cfg) - self.in_channels = list() - for i in range(num_units): - self.in_channels.append(RSB.expansion * out_channels * pow(2, i)) - self.in_channels.reverse() - self.num_units = num_units - self.gen_skip = gen_skip - self.gen_cross_conv = gen_cross_conv - self.norm_cfg = norm_cfg - for i in range(num_units): - module_name = f'up{i + 1}' - self.add_module( - module_name, - Upsample_unit( - i, - self.num_units, - self.in_channels[i], - unit_channels, - self.gen_skip, - self.gen_cross_conv, - norm_cfg=self.norm_cfg, - out_channels=64)) - - def forward(self, x): - out = list() - skip1 = list() - skip2 = list() - cross_conv = None - for i in range(self.num_units): - module_i = getattr(self, f'up{i + 1}') - if i == 0: - outi, skip1_i, skip2_i, _ = module_i(x[i], None) - elif i == self.num_units - 1: - outi, skip1_i, skip2_i, cross_conv = module_i(x[i], out[i - 1]) - else: - outi, skip1_i, skip2_i, _ = module_i(x[i], out[i - 1]) - out.append(outi) - skip1.append(skip1_i) - skip2.append(skip2_i) - skip1.reverse() - skip2.reverse() - - return out, skip1, skip2, cross_conv - - -class Single_stage_RSN(BaseModule): - """Single_stage Residual Steps Network. - - Args: - unit_channels (int): Channel number in the upsample units. Default:256. - num_units (int): Numbers of downsample/upsample units. Default: 4 - gen_skip (bool): Whether to generate skip for posterior downsample - module or not. Default:False - gen_cross_conv (bool): Whether to generate feature map for the next - hourglass-like module. Default:False - has_skip (bool): Have skip connections from prior upsample - module or not. Default:False - num_steps (int): Number of steps in RSB. Default: 4 - num_blocks (list): Number of blocks in each downsample unit. - Default: [2, 2, 2, 2] Note: Make sure num_units==len(num_blocks) - norm_cfg (dict): dictionary to construct and config norm layer. - Default: dict(type='BN') - in_channels (int): Number of channels of the feature from ResNet_Top. - Default: 64. - expand_times (int): Times by which the in_channels are expanded in RSB. - Default:26. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - has_skip=False, - gen_skip=False, - gen_cross_conv=False, - unit_channels=256, - num_units=4, - num_steps=4, - num_blocks=[2, 2, 2, 2], - norm_cfg=dict(type='BN'), - in_channels=64, - expand_times=26, - init_cfg=None): - # Protect mutable default arguments - norm_cfg = cp.deepcopy(norm_cfg) - num_blocks = cp.deepcopy(num_blocks) - super().__init__(init_cfg=init_cfg) - assert len(num_blocks) == num_units - self.has_skip = has_skip - self.gen_skip = gen_skip - self.gen_cross_conv = gen_cross_conv - self.num_units = num_units - self.num_steps = num_steps - self.unit_channels = unit_channels - self.num_blocks = num_blocks - self.norm_cfg = norm_cfg - - self.downsample = Downsample_module(RSB, num_blocks, num_steps, - num_units, has_skip, norm_cfg, - in_channels, expand_times) - self.upsample = Upsample_module(unit_channels, num_units, gen_skip, - gen_cross_conv, norm_cfg, in_channels) - - def forward(self, x, skip1, skip2): - mid = self.downsample(x, skip1, skip2) - out, skip1, skip2, cross_conv = self.upsample(mid) - - return out, skip1, skip2, cross_conv - - -class ResNet_top(BaseModule): - """ResNet top for RSN. - - Args: - norm_cfg (dict): dictionary to construct and config norm layer. - Default: dict(type='BN') - channels (int): Number of channels of the feature output by ResNet_top. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, norm_cfg=dict(type='BN'), channels=64, init_cfg=None): - # Protect mutable default arguments - norm_cfg = cp.deepcopy(norm_cfg) - super().__init__(init_cfg=init_cfg) - self.top = nn.Sequential( - ConvModule( - 3, - channels, - kernel_size=7, - stride=2, - padding=3, - norm_cfg=norm_cfg, - inplace=True), MaxPool2d(kernel_size=3, stride=2, padding=1)) - - def forward(self, img): - return self.top(img) - - -@MODELS.register_module() -class RSN(BaseBackbone): - """Residual Steps Network backbone. Paper ref: Cai et al. "Learning - Delicate Local Representations for Multi-Person Pose Estimation" (ECCV - 2020). - - Args: - unit_channels (int): Number of Channels in an upsample unit. - Default: 256 - num_stages (int): Number of stages in a multi-stage RSN. Default: 4 - num_units (int): NUmber of downsample/upsample units in a single-stage - RSN. Default: 4 Note: Make sure num_units == len(self.num_blocks) - num_blocks (list): Number of RSBs (Residual Steps Block) in each - downsample unit. Default: [2, 2, 2, 2] - num_steps (int): Number of steps in a RSB. Default:4 - norm_cfg (dict): dictionary to construct and config norm layer. - Default: dict(type='BN') - res_top_channels (int): Number of channels of feature from ResNet_top. - Default: 64. - expand_times (int): Times by which the in_channels are expanded in RSB. - Default:26. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: - ``[ - dict(type='Kaiming', layer=['Conv2d']), - dict( - type='Constant', - val=1, - layer=['_BatchNorm', 'GroupNorm']), - dict( - type='Normal', - std=0.01, - layer=['Linear']), - ]`` - Example: - >>> from mmpose.models import RSN - >>> import torch - >>> self = RSN(num_stages=2,num_units=2,num_blocks=[2,2]) - >>> self.eval() - >>> inputs = torch.rand(1, 3, 511, 511) - >>> level_outputs = self.forward(inputs) - >>> for level_output in level_outputs: - ... for feature in level_output: - ... print(tuple(feature.shape)) - ... - (1, 256, 64, 64) - (1, 256, 128, 128) - (1, 256, 64, 64) - (1, 256, 128, 128) - """ - - def __init__(self, - unit_channels=256, - num_stages=4, - num_units=4, - num_blocks=[2, 2, 2, 2], - num_steps=4, - norm_cfg=dict(type='BN'), - res_top_channels=64, - expand_times=26, - init_cfg=[ - dict(type='Kaiming', layer=['Conv2d']), - dict( - type='Constant', - val=1, - layer=['_BatchNorm', 'GroupNorm']), - dict(type='Normal', std=0.01, layer=['Linear']), - ]): - # Protect mutable default arguments - norm_cfg = cp.deepcopy(norm_cfg) - num_blocks = cp.deepcopy(num_blocks) - super().__init__(init_cfg=init_cfg) - self.unit_channels = unit_channels - self.num_stages = num_stages - self.num_units = num_units - self.num_blocks = num_blocks - self.num_steps = num_steps - self.norm_cfg = norm_cfg - - assert self.num_stages > 0 - assert self.num_steps > 1 - assert self.num_units > 1 - assert self.num_units == len(self.num_blocks) - self.top = ResNet_top(norm_cfg=norm_cfg) - self.multi_stage_rsn = nn.ModuleList([]) - for i in range(self.num_stages): - if i == 0: - has_skip = False - else: - has_skip = True - if i != self.num_stages - 1: - gen_skip = True - gen_cross_conv = True - else: - gen_skip = False - gen_cross_conv = False - self.multi_stage_rsn.append( - Single_stage_RSN(has_skip, gen_skip, gen_cross_conv, - unit_channels, num_units, num_steps, - num_blocks, norm_cfg, res_top_channels, - expand_times)) - - def forward(self, x): - """Model forward function.""" - out_feats = [] - skip1 = None - skip2 = None - x = self.top(x) - for i in range(self.num_stages): - out, skip1, skip2, x = self.multi_stage_rsn[i](x, skip1, skip2) - out_feats.append(out) - - return out_feats +# Copyright (c) OpenMMLab. All rights reserved. +import copy as cp + +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import ConvModule, MaxPool2d +from mmengine.model import BaseModule + +from mmpose.registry import MODELS +from .base_backbone import BaseBackbone + + +class RSB(BaseModule): + """Residual Steps block for RSN. Paper ref: Cai et al. "Learning Delicate + Local Representations for Multi-Person Pose Estimation" (ECCV 2020). + + Args: + in_channels (int): Input channels of this block. + out_channels (int): Output channels of this block. + num_steps (int): Numbers of steps in RSB + stride (int): stride of the block. Default: 1 + downsample (nn.Module): downsample operation on identity branch. + Default: None. + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + expand_times (int): Times by which the in_channels are expanded. + Default:26. + res_top_channels (int): Number of channels of feature output by + ResNet_top. Default:64. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + expansion = 1 + + def __init__(self, + in_channels, + out_channels, + num_steps=4, + stride=1, + downsample=None, + with_cp=False, + norm_cfg=dict(type='BN'), + expand_times=26, + res_top_channels=64, + init_cfg=None): + # Protect mutable default arguments + norm_cfg = cp.deepcopy(norm_cfg) + super().__init__(init_cfg=init_cfg) + assert num_steps > 1 + self.in_channels = in_channels + self.branch_channels = self.in_channels * expand_times + self.branch_channels //= res_top_channels + self.out_channels = out_channels + self.stride = stride + self.downsample = downsample + self.with_cp = with_cp + self.norm_cfg = norm_cfg + self.num_steps = num_steps + self.conv_bn_relu1 = ConvModule( + self.in_channels, + self.num_steps * self.branch_channels, + kernel_size=1, + stride=self.stride, + padding=0, + norm_cfg=self.norm_cfg, + inplace=False) + for i in range(self.num_steps): + for j in range(i + 1): + module_name = f'conv_bn_relu2_{i + 1}_{j + 1}' + self.add_module( + module_name, + ConvModule( + self.branch_channels, + self.branch_channels, + kernel_size=3, + stride=1, + padding=1, + norm_cfg=self.norm_cfg, + inplace=False)) + self.conv_bn3 = ConvModule( + self.num_steps * self.branch_channels, + self.out_channels * self.expansion, + kernel_size=1, + stride=1, + padding=0, + act_cfg=None, + norm_cfg=self.norm_cfg, + inplace=False) + self.relu = nn.ReLU(inplace=False) + + def forward(self, x): + """Forward function.""" + + identity = x + x = self.conv_bn_relu1(x) + spx = torch.split(x, self.branch_channels, 1) + outputs = list() + outs = list() + for i in range(self.num_steps): + outputs_i = list() + outputs.append(outputs_i) + for j in range(i + 1): + if j == 0: + inputs = spx[i] + else: + inputs = outputs[i][j - 1] + if i > j: + inputs = inputs + outputs[i - 1][j] + module_name = f'conv_bn_relu2_{i + 1}_{j + 1}' + module_i_j = getattr(self, module_name) + outputs[i].append(module_i_j(inputs)) + + outs.append(outputs[i][i]) + out = torch.cat(tuple(outs), 1) + out = self.conv_bn3(out) + + if self.downsample is not None: + identity = self.downsample(identity) + out = out + identity + + out = self.relu(out) + + return out + + +class Downsample_module(BaseModule): + """Downsample module for RSN. + + Args: + block (nn.Module): Downsample block. + num_blocks (list): Number of blocks in each downsample unit. + num_units (int): Numbers of downsample units. Default: 4 + has_skip (bool): Have skip connections from prior upsample + module or not. Default:False + num_steps (int): Number of steps in a block. Default:4 + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + in_channels (int): Number of channels of the input feature to + downsample module. Default: 64 + expand_times (int): Times by which the in_channels are expanded. + Default:26. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + block, + num_blocks, + num_steps=4, + num_units=4, + has_skip=False, + norm_cfg=dict(type='BN'), + in_channels=64, + expand_times=26, + init_cfg=None): + # Protect mutable default arguments + norm_cfg = cp.deepcopy(norm_cfg) + super().__init__(init_cfg=init_cfg) + self.has_skip = has_skip + self.in_channels = in_channels + assert len(num_blocks) == num_units + self.num_blocks = num_blocks + self.num_units = num_units + self.num_steps = num_steps + self.norm_cfg = norm_cfg + self.layer1 = self._make_layer( + block, + in_channels, + num_blocks[0], + expand_times=expand_times, + res_top_channels=in_channels) + for i in range(1, num_units): + module_name = f'layer{i + 1}' + self.add_module( + module_name, + self._make_layer( + block, + in_channels * pow(2, i), + num_blocks[i], + stride=2, + expand_times=expand_times, + res_top_channels=in_channels)) + + def _make_layer(self, + block, + out_channels, + blocks, + stride=1, + expand_times=26, + res_top_channels=64): + downsample = None + if stride != 1 or self.in_channels != out_channels * block.expansion: + downsample = ConvModule( + self.in_channels, + out_channels * block.expansion, + kernel_size=1, + stride=stride, + padding=0, + norm_cfg=self.norm_cfg, + act_cfg=None, + inplace=True) + + units = list() + units.append( + block( + self.in_channels, + out_channels, + num_steps=self.num_steps, + stride=stride, + downsample=downsample, + norm_cfg=self.norm_cfg, + expand_times=expand_times, + res_top_channels=res_top_channels)) + self.in_channels = out_channels * block.expansion + for _ in range(1, blocks): + units.append( + block( + self.in_channels, + out_channels, + num_steps=self.num_steps, + expand_times=expand_times, + res_top_channels=res_top_channels)) + + return nn.Sequential(*units) + + def forward(self, x, skip1, skip2): + out = list() + for i in range(self.num_units): + module_name = f'layer{i + 1}' + module_i = getattr(self, module_name) + x = module_i(x) + if self.has_skip: + x = x + skip1[i] + skip2[i] + out.append(x) + out.reverse() + + return tuple(out) + + +class Upsample_unit(BaseModule): + """Upsample unit for upsample module. + + Args: + ind (int): Indicates whether to interpolate (>0) and whether to + generate feature map for the next hourglass-like module. + num_units (int): Number of units that form a upsample module. Along + with ind and gen_cross_conv, nm_units is used to decide whether + to generate feature map for the next hourglass-like module. + in_channels (int): Channel number of the skip-in feature maps from + the corresponding downsample unit. + unit_channels (int): Channel number in this unit. Default:256. + gen_skip: (bool): Whether or not to generate skips for the posterior + downsample module. Default:False + gen_cross_conv (bool): Whether to generate feature map for the next + hourglass-like module. Default:False + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + out_channels (in): Number of channels of feature output by upsample + module. Must equal to in_channels of downsample module. Default:64 + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + ind, + num_units, + in_channels, + unit_channels=256, + gen_skip=False, + gen_cross_conv=False, + norm_cfg=dict(type='BN'), + out_channels=64, + init_cfg=None): + # Protect mutable default arguments + norm_cfg = cp.deepcopy(norm_cfg) + super().__init__(init_cfg=init_cfg) + self.num_units = num_units + self.norm_cfg = norm_cfg + self.in_skip = ConvModule( + in_channels, + unit_channels, + kernel_size=1, + stride=1, + padding=0, + norm_cfg=self.norm_cfg, + act_cfg=None, + inplace=True) + self.relu = nn.ReLU(inplace=True) + + self.ind = ind + if self.ind > 0: + self.up_conv = ConvModule( + unit_channels, + unit_channels, + kernel_size=1, + stride=1, + padding=0, + norm_cfg=self.norm_cfg, + act_cfg=None, + inplace=True) + + self.gen_skip = gen_skip + if self.gen_skip: + self.out_skip1 = ConvModule( + in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0, + norm_cfg=self.norm_cfg, + inplace=True) + + self.out_skip2 = ConvModule( + unit_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0, + norm_cfg=self.norm_cfg, + inplace=True) + + self.gen_cross_conv = gen_cross_conv + if self.ind == num_units - 1 and self.gen_cross_conv: + self.cross_conv = ConvModule( + unit_channels, + out_channels, + kernel_size=1, + stride=1, + padding=0, + norm_cfg=self.norm_cfg, + inplace=True) + + def forward(self, x, up_x): + out = self.in_skip(x) + + if self.ind > 0: + up_x = F.interpolate( + up_x, + size=(x.size(2), x.size(3)), + mode='bilinear', + align_corners=True) + up_x = self.up_conv(up_x) + out = out + up_x + out = self.relu(out) + + skip1 = None + skip2 = None + if self.gen_skip: + skip1 = self.out_skip1(x) + skip2 = self.out_skip2(out) + + cross_conv = None + if self.ind == self.num_units - 1 and self.gen_cross_conv: + cross_conv = self.cross_conv(out) + + return out, skip1, skip2, cross_conv + + +class Upsample_module(BaseModule): + """Upsample module for RSN. + + Args: + unit_channels (int): Channel number in the upsample units. + Default:256. + num_units (int): Numbers of upsample units. Default: 4 + gen_skip (bool): Whether to generate skip for posterior downsample + module or not. Default:False + gen_cross_conv (bool): Whether to generate feature map for the next + hourglass-like module. Default:False + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + out_channels (int): Number of channels of feature output by upsample + module. Must equal to in_channels of downsample module. Default:64 + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + unit_channels=256, + num_units=4, + gen_skip=False, + gen_cross_conv=False, + norm_cfg=dict(type='BN'), + out_channels=64, + init_cfg=None): + # Protect mutable default arguments + norm_cfg = cp.deepcopy(norm_cfg) + super().__init__(init_cfg=init_cfg) + self.in_channels = list() + for i in range(num_units): + self.in_channels.append(RSB.expansion * out_channels * pow(2, i)) + self.in_channels.reverse() + self.num_units = num_units + self.gen_skip = gen_skip + self.gen_cross_conv = gen_cross_conv + self.norm_cfg = norm_cfg + for i in range(num_units): + module_name = f'up{i + 1}' + self.add_module( + module_name, + Upsample_unit( + i, + self.num_units, + self.in_channels[i], + unit_channels, + self.gen_skip, + self.gen_cross_conv, + norm_cfg=self.norm_cfg, + out_channels=64)) + + def forward(self, x): + out = list() + skip1 = list() + skip2 = list() + cross_conv = None + for i in range(self.num_units): + module_i = getattr(self, f'up{i + 1}') + if i == 0: + outi, skip1_i, skip2_i, _ = module_i(x[i], None) + elif i == self.num_units - 1: + outi, skip1_i, skip2_i, cross_conv = module_i(x[i], out[i - 1]) + else: + outi, skip1_i, skip2_i, _ = module_i(x[i], out[i - 1]) + out.append(outi) + skip1.append(skip1_i) + skip2.append(skip2_i) + skip1.reverse() + skip2.reverse() + + return out, skip1, skip2, cross_conv + + +class Single_stage_RSN(BaseModule): + """Single_stage Residual Steps Network. + + Args: + unit_channels (int): Channel number in the upsample units. Default:256. + num_units (int): Numbers of downsample/upsample units. Default: 4 + gen_skip (bool): Whether to generate skip for posterior downsample + module or not. Default:False + gen_cross_conv (bool): Whether to generate feature map for the next + hourglass-like module. Default:False + has_skip (bool): Have skip connections from prior upsample + module or not. Default:False + num_steps (int): Number of steps in RSB. Default: 4 + num_blocks (list): Number of blocks in each downsample unit. + Default: [2, 2, 2, 2] Note: Make sure num_units==len(num_blocks) + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + in_channels (int): Number of channels of the feature from ResNet_Top. + Default: 64. + expand_times (int): Times by which the in_channels are expanded in RSB. + Default:26. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + has_skip=False, + gen_skip=False, + gen_cross_conv=False, + unit_channels=256, + num_units=4, + num_steps=4, + num_blocks=[2, 2, 2, 2], + norm_cfg=dict(type='BN'), + in_channels=64, + expand_times=26, + init_cfg=None): + # Protect mutable default arguments + norm_cfg = cp.deepcopy(norm_cfg) + num_blocks = cp.deepcopy(num_blocks) + super().__init__(init_cfg=init_cfg) + assert len(num_blocks) == num_units + self.has_skip = has_skip + self.gen_skip = gen_skip + self.gen_cross_conv = gen_cross_conv + self.num_units = num_units + self.num_steps = num_steps + self.unit_channels = unit_channels + self.num_blocks = num_blocks + self.norm_cfg = norm_cfg + + self.downsample = Downsample_module(RSB, num_blocks, num_steps, + num_units, has_skip, norm_cfg, + in_channels, expand_times) + self.upsample = Upsample_module(unit_channels, num_units, gen_skip, + gen_cross_conv, norm_cfg, in_channels) + + def forward(self, x, skip1, skip2): + mid = self.downsample(x, skip1, skip2) + out, skip1, skip2, cross_conv = self.upsample(mid) + + return out, skip1, skip2, cross_conv + + +class ResNet_top(BaseModule): + """ResNet top for RSN. + + Args: + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + channels (int): Number of channels of the feature output by ResNet_top. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, norm_cfg=dict(type='BN'), channels=64, init_cfg=None): + # Protect mutable default arguments + norm_cfg = cp.deepcopy(norm_cfg) + super().__init__(init_cfg=init_cfg) + self.top = nn.Sequential( + ConvModule( + 3, + channels, + kernel_size=7, + stride=2, + padding=3, + norm_cfg=norm_cfg, + inplace=True), MaxPool2d(kernel_size=3, stride=2, padding=1)) + + def forward(self, img): + return self.top(img) + + +@MODELS.register_module() +class RSN(BaseBackbone): + """Residual Steps Network backbone. Paper ref: Cai et al. "Learning + Delicate Local Representations for Multi-Person Pose Estimation" (ECCV + 2020). + + Args: + unit_channels (int): Number of Channels in an upsample unit. + Default: 256 + num_stages (int): Number of stages in a multi-stage RSN. Default: 4 + num_units (int): NUmber of downsample/upsample units in a single-stage + RSN. Default: 4 Note: Make sure num_units == len(self.num_blocks) + num_blocks (list): Number of RSBs (Residual Steps Block) in each + downsample unit. Default: [2, 2, 2, 2] + num_steps (int): Number of steps in a RSB. Default:4 + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + res_top_channels (int): Number of channels of feature from ResNet_top. + Default: 64. + expand_times (int): Times by which the in_channels are expanded in RSB. + Default:26. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: + ``[ + dict(type='Kaiming', layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']), + dict( + type='Normal', + std=0.01, + layer=['Linear']), + ]`` + Example: + >>> from mmpose.models import RSN + >>> import torch + >>> self = RSN(num_stages=2,num_units=2,num_blocks=[2,2]) + >>> self.eval() + >>> inputs = torch.rand(1, 3, 511, 511) + >>> level_outputs = self.forward(inputs) + >>> for level_output in level_outputs: + ... for feature in level_output: + ... print(tuple(feature.shape)) + ... + (1, 256, 64, 64) + (1, 256, 128, 128) + (1, 256, 64, 64) + (1, 256, 128, 128) + """ + + def __init__(self, + unit_channels=256, + num_stages=4, + num_units=4, + num_blocks=[2, 2, 2, 2], + num_steps=4, + norm_cfg=dict(type='BN'), + res_top_channels=64, + expand_times=26, + init_cfg=[ + dict(type='Kaiming', layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']), + dict(type='Normal', std=0.01, layer=['Linear']), + ]): + # Protect mutable default arguments + norm_cfg = cp.deepcopy(norm_cfg) + num_blocks = cp.deepcopy(num_blocks) + super().__init__(init_cfg=init_cfg) + self.unit_channels = unit_channels + self.num_stages = num_stages + self.num_units = num_units + self.num_blocks = num_blocks + self.num_steps = num_steps + self.norm_cfg = norm_cfg + + assert self.num_stages > 0 + assert self.num_steps > 1 + assert self.num_units > 1 + assert self.num_units == len(self.num_blocks) + self.top = ResNet_top(norm_cfg=norm_cfg) + self.multi_stage_rsn = nn.ModuleList([]) + for i in range(self.num_stages): + if i == 0: + has_skip = False + else: + has_skip = True + if i != self.num_stages - 1: + gen_skip = True + gen_cross_conv = True + else: + gen_skip = False + gen_cross_conv = False + self.multi_stage_rsn.append( + Single_stage_RSN(has_skip, gen_skip, gen_cross_conv, + unit_channels, num_units, num_steps, + num_blocks, norm_cfg, res_top_channels, + expand_times)) + + def forward(self, x): + """Model forward function.""" + out_feats = [] + skip1 = None + skip2 = None + x = self.top(x) + for i in range(self.num_stages): + out, skip1, skip2, x = self.multi_stage_rsn[i](x, skip1, skip2) + out_feats.append(out) + + return out_feats diff --git a/mmpose/models/backbones/scnet.py b/mmpose/models/backbones/scnet.py index 5c802d256e..a99afe2c38 100644 --- a/mmpose/models/backbones/scnet.py +++ b/mmpose/models/backbones/scnet.py @@ -1,252 +1,252 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy - -import torch -import torch.nn as nn -import torch.nn.functional as F -import torch.utils.checkpoint as cp -from mmcv.cnn import build_conv_layer, build_norm_layer -from mmengine.model import BaseModule - -from mmpose.registry import MODELS -from .resnet import Bottleneck, ResNet - - -class SCConv(BaseModule): - """SCConv (Self-calibrated Convolution) - - Args: - in_channels (int): The input channels of the SCConv. - out_channels (int): The output channel of the SCConv. - stride (int): stride of SCConv. - pooling_r (int): size of pooling for scconv. - conv_cfg (dict): dictionary to construct and config conv layer. - Default: None - norm_cfg (dict): dictionary to construct and config norm layer. - Default: dict(type='BN') - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - in_channels, - out_channels, - stride, - pooling_r, - conv_cfg=None, - norm_cfg=dict(type='BN', momentum=0.1), - init_cfg=None): - # Protect mutable default arguments - norm_cfg = copy.deepcopy(norm_cfg) - super().__init__(init_cfg=init_cfg) - - assert in_channels == out_channels - - self.k2 = nn.Sequential( - nn.AvgPool2d(kernel_size=pooling_r, stride=pooling_r), - build_conv_layer( - conv_cfg, - in_channels, - in_channels, - kernel_size=3, - stride=1, - padding=1, - bias=False), - build_norm_layer(norm_cfg, in_channels)[1], - ) - self.k3 = nn.Sequential( - build_conv_layer( - conv_cfg, - in_channels, - in_channels, - kernel_size=3, - stride=1, - padding=1, - bias=False), - build_norm_layer(norm_cfg, in_channels)[1], - ) - self.k4 = nn.Sequential( - build_conv_layer( - conv_cfg, - in_channels, - in_channels, - kernel_size=3, - stride=stride, - padding=1, - bias=False), - build_norm_layer(norm_cfg, out_channels)[1], - nn.ReLU(inplace=True), - ) - - def forward(self, x): - """Forward function.""" - identity = x - - out = torch.sigmoid( - torch.add(identity, F.interpolate(self.k2(x), - identity.size()[2:]))) - out = torch.mul(self.k3(x), out) - out = self.k4(out) - - return out - - -class SCBottleneck(Bottleneck): - """SC(Self-calibrated) Bottleneck. - - Args: - in_channels (int): The input channels of the SCBottleneck block. - out_channels (int): The output channel of the SCBottleneck block. - """ - - pooling_r = 4 - - def __init__(self, in_channels, out_channels, **kwargs): - super().__init__(in_channels, out_channels, **kwargs) - self.mid_channels = out_channels // self.expansion // 2 - - self.norm1_name, norm1 = build_norm_layer( - self.norm_cfg, self.mid_channels, postfix=1) - self.norm2_name, norm2 = build_norm_layer( - self.norm_cfg, self.mid_channels, postfix=2) - self.norm3_name, norm3 = build_norm_layer( - self.norm_cfg, out_channels, postfix=3) - - self.conv1 = build_conv_layer( - self.conv_cfg, - in_channels, - self.mid_channels, - kernel_size=1, - stride=1, - bias=False) - self.add_module(self.norm1_name, norm1) - - self.k1 = nn.Sequential( - build_conv_layer( - self.conv_cfg, - self.mid_channels, - self.mid_channels, - kernel_size=3, - stride=self.stride, - padding=1, - bias=False), - build_norm_layer(self.norm_cfg, self.mid_channels)[1], - nn.ReLU(inplace=True)) - - self.conv2 = build_conv_layer( - self.conv_cfg, - in_channels, - self.mid_channels, - kernel_size=1, - stride=1, - bias=False) - self.add_module(self.norm2_name, norm2) - - self.scconv = SCConv(self.mid_channels, self.mid_channels, self.stride, - self.pooling_r, self.conv_cfg, self.norm_cfg) - - self.conv3 = build_conv_layer( - self.conv_cfg, - self.mid_channels * 2, - out_channels, - kernel_size=1, - stride=1, - bias=False) - self.add_module(self.norm3_name, norm3) - - def forward(self, x): - """Forward function.""" - - def _inner_forward(x): - identity = x - - out_a = self.conv1(x) - out_a = self.norm1(out_a) - out_a = self.relu(out_a) - - out_a = self.k1(out_a) - - out_b = self.conv2(x) - out_b = self.norm2(out_b) - out_b = self.relu(out_b) - - out_b = self.scconv(out_b) - - out = self.conv3(torch.cat([out_a, out_b], dim=1)) - out = self.norm3(out) - - if self.downsample is not None: - identity = self.downsample(x) - - out += identity - - return out - - if self.with_cp and x.requires_grad: - out = cp.checkpoint(_inner_forward, x) - else: - out = _inner_forward(x) - - out = self.relu(out) - - return out - - -@MODELS.register_module() -class SCNet(ResNet): - """SCNet backbone. - - Improving Convolutional Networks with Self-Calibrated Convolutions, - Jiang-Jiang Liu, Qibin Hou, Ming-Ming Cheng, Changhu Wang, Jiashi Feng, - IEEE CVPR, 2020. - http://mftp.mmcheng.net/Papers/20cvprSCNet.pdf - - Args: - depth (int): Depth of scnet, from {50, 101}. - in_channels (int): Number of input image channels. Normally 3. - base_channels (int): Number of base channels of hidden layer. - num_stages (int): SCNet stages, normally 4. - strides (Sequence[int]): Strides of the first block of each stage. - dilations (Sequence[int]): Dilation of each stage. - out_indices (Sequence[int]): Output from which stages. - style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two - layer is the 3x3 conv layer, otherwise the stride-two layer is - the first 1x1 conv layer. - deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv - avg_down (bool): Use AvgPool instead of stride conv when - downsampling in the bottleneck. - frozen_stages (int): Stages to be frozen (stop grad and set eval mode). - -1 means not freezing any parameters. - norm_cfg (dict): Dictionary to construct and config norm layer. - norm_eval (bool): Whether to set norm layers to eval mode, namely, - freeze running stats (mean and var). Note: Effect on Batch Norm - and its variants only. - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. - zero_init_residual (bool): Whether to use zero init for last norm layer - in resblocks to let them behave as identity. - - Example: - >>> from mmpose.models import SCNet - >>> import torch - >>> self = SCNet(depth=50, out_indices=(0, 1, 2, 3)) - >>> self.eval() - >>> inputs = torch.rand(1, 3, 224, 224) - >>> level_outputs = self.forward(inputs) - >>> for level_out in level_outputs: - ... print(tuple(level_out.shape)) - (1, 256, 56, 56) - (1, 512, 28, 28) - (1, 1024, 14, 14) - (1, 2048, 7, 7) - """ - - arch_settings = { - 50: (SCBottleneck, [3, 4, 6, 3]), - 101: (SCBottleneck, [3, 4, 23, 3]) - } - - def __init__(self, depth, **kwargs): - if depth not in self.arch_settings: - raise KeyError(f'invalid depth {depth} for SCNet') - super().__init__(depth, **kwargs) +# Copyright (c) OpenMMLab. All rights reserved. +import copy + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as cp +from mmcv.cnn import build_conv_layer, build_norm_layer +from mmengine.model import BaseModule + +from mmpose.registry import MODELS +from .resnet import Bottleneck, ResNet + + +class SCConv(BaseModule): + """SCConv (Self-calibrated Convolution) + + Args: + in_channels (int): The input channels of the SCConv. + out_channels (int): The output channel of the SCConv. + stride (int): stride of SCConv. + pooling_r (int): size of pooling for scconv. + conv_cfg (dict): dictionary to construct and config conv layer. + Default: None + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + in_channels, + out_channels, + stride, + pooling_r, + conv_cfg=None, + norm_cfg=dict(type='BN', momentum=0.1), + init_cfg=None): + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + super().__init__(init_cfg=init_cfg) + + assert in_channels == out_channels + + self.k2 = nn.Sequential( + nn.AvgPool2d(kernel_size=pooling_r, stride=pooling_r), + build_conv_layer( + conv_cfg, + in_channels, + in_channels, + kernel_size=3, + stride=1, + padding=1, + bias=False), + build_norm_layer(norm_cfg, in_channels)[1], + ) + self.k3 = nn.Sequential( + build_conv_layer( + conv_cfg, + in_channels, + in_channels, + kernel_size=3, + stride=1, + padding=1, + bias=False), + build_norm_layer(norm_cfg, in_channels)[1], + ) + self.k4 = nn.Sequential( + build_conv_layer( + conv_cfg, + in_channels, + in_channels, + kernel_size=3, + stride=stride, + padding=1, + bias=False), + build_norm_layer(norm_cfg, out_channels)[1], + nn.ReLU(inplace=True), + ) + + def forward(self, x): + """Forward function.""" + identity = x + + out = torch.sigmoid( + torch.add(identity, F.interpolate(self.k2(x), + identity.size()[2:]))) + out = torch.mul(self.k3(x), out) + out = self.k4(out) + + return out + + +class SCBottleneck(Bottleneck): + """SC(Self-calibrated) Bottleneck. + + Args: + in_channels (int): The input channels of the SCBottleneck block. + out_channels (int): The output channel of the SCBottleneck block. + """ + + pooling_r = 4 + + def __init__(self, in_channels, out_channels, **kwargs): + super().__init__(in_channels, out_channels, **kwargs) + self.mid_channels = out_channels // self.expansion // 2 + + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, self.mid_channels, postfix=1) + self.norm2_name, norm2 = build_norm_layer( + self.norm_cfg, self.mid_channels, postfix=2) + self.norm3_name, norm3 = build_norm_layer( + self.norm_cfg, out_channels, postfix=3) + + self.conv1 = build_conv_layer( + self.conv_cfg, + in_channels, + self.mid_channels, + kernel_size=1, + stride=1, + bias=False) + self.add_module(self.norm1_name, norm1) + + self.k1 = nn.Sequential( + build_conv_layer( + self.conv_cfg, + self.mid_channels, + self.mid_channels, + kernel_size=3, + stride=self.stride, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, self.mid_channels)[1], + nn.ReLU(inplace=True)) + + self.conv2 = build_conv_layer( + self.conv_cfg, + in_channels, + self.mid_channels, + kernel_size=1, + stride=1, + bias=False) + self.add_module(self.norm2_name, norm2) + + self.scconv = SCConv(self.mid_channels, self.mid_channels, self.stride, + self.pooling_r, self.conv_cfg, self.norm_cfg) + + self.conv3 = build_conv_layer( + self.conv_cfg, + self.mid_channels * 2, + out_channels, + kernel_size=1, + stride=1, + bias=False) + self.add_module(self.norm3_name, norm3) + + def forward(self, x): + """Forward function.""" + + def _inner_forward(x): + identity = x + + out_a = self.conv1(x) + out_a = self.norm1(out_a) + out_a = self.relu(out_a) + + out_a = self.k1(out_a) + + out_b = self.conv2(x) + out_b = self.norm2(out_b) + out_b = self.relu(out_b) + + out_b = self.scconv(out_b) + + out = self.conv3(torch.cat([out_a, out_b], dim=1)) + out = self.norm3(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.relu(out) + + return out + + +@MODELS.register_module() +class SCNet(ResNet): + """SCNet backbone. + + Improving Convolutional Networks with Self-Calibrated Convolutions, + Jiang-Jiang Liu, Qibin Hou, Ming-Ming Cheng, Changhu Wang, Jiashi Feng, + IEEE CVPR, 2020. + http://mftp.mmcheng.net/Papers/20cvprSCNet.pdf + + Args: + depth (int): Depth of scnet, from {50, 101}. + in_channels (int): Number of input image channels. Normally 3. + base_channels (int): Number of base channels of hidden layer. + num_stages (int): SCNet stages, normally 4. + strides (Sequence[int]): Strides of the first block of each stage. + dilations (Sequence[int]): Dilation of each stage. + out_indices (Sequence[int]): Output from which stages. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. + norm_cfg (dict): Dictionary to construct and config norm layer. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + zero_init_residual (bool): Whether to use zero init for last norm layer + in resblocks to let them behave as identity. + + Example: + >>> from mmpose.models import SCNet + >>> import torch + >>> self = SCNet(depth=50, out_indices=(0, 1, 2, 3)) + >>> self.eval() + >>> inputs = torch.rand(1, 3, 224, 224) + >>> level_outputs = self.forward(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 256, 56, 56) + (1, 512, 28, 28) + (1, 1024, 14, 14) + (1, 2048, 7, 7) + """ + + arch_settings = { + 50: (SCBottleneck, [3, 4, 6, 3]), + 101: (SCBottleneck, [3, 4, 23, 3]) + } + + def __init__(self, depth, **kwargs): + if depth not in self.arch_settings: + raise KeyError(f'invalid depth {depth} for SCNet') + super().__init__(depth, **kwargs) diff --git a/mmpose/models/backbones/seresnet.py b/mmpose/models/backbones/seresnet.py index 617a1b72be..042d3cc961 100644 --- a/mmpose/models/backbones/seresnet.py +++ b/mmpose/models/backbones/seresnet.py @@ -1,134 +1,134 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch.utils.checkpoint as cp - -from mmpose.registry import MODELS -from .resnet import Bottleneck, ResLayer, ResNet -from .utils.se_layer import SELayer - - -class SEBottleneck(Bottleneck): - """SEBottleneck block for SEResNet. - - Args: - in_channels (int): The input channels of the SEBottleneck block. - out_channels (int): The output channel of the SEBottleneck block. - se_ratio (int): Squeeze ratio in SELayer. Default: 16 - """ - - def __init__(self, in_channels, out_channels, se_ratio=16, **kwargs): - super().__init__(in_channels, out_channels, **kwargs) - self.se_layer = SELayer(out_channels, ratio=se_ratio) - - def forward(self, x): - - def _inner_forward(x): - identity = x - - out = self.conv1(x) - out = self.norm1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.norm2(out) - out = self.relu(out) - - out = self.conv3(out) - out = self.norm3(out) - - out = self.se_layer(out) - - if self.downsample is not None: - identity = self.downsample(x) - - out += identity - - return out - - if self.with_cp and x.requires_grad: - out = cp.checkpoint(_inner_forward, x) - else: - out = _inner_forward(x) - - out = self.relu(out) - - return out - - -@MODELS.register_module() -class SEResNet(ResNet): - """SEResNet backbone. - - Please refer to the `paper `__ for - details. - - Args: - depth (int): Network depth, from {50, 101, 152}. - se_ratio (int): Squeeze ratio in SELayer. Default: 16. - in_channels (int): Number of input image channels. Default: 3. - stem_channels (int): Output channels of the stem layer. Default: 64. - num_stages (int): Stages of the network. Default: 4. - strides (Sequence[int]): Strides of the first block of each stage. - Default: ``(1, 2, 2, 2)``. - dilations (Sequence[int]): Dilation of each stage. - Default: ``(1, 1, 1, 1)``. - out_indices (Sequence[int]): Output from which stages. If only one - stage is specified, a single tensor (feature map) is returned, - otherwise multiple stages are specified, a tuple of tensors will - be returned. Default: ``(3, )``. - style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two - layer is the 3x3 conv layer, otherwise the stride-two layer is - the first 1x1 conv layer. - deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv. - Default: False. - avg_down (bool): Use AvgPool instead of stride conv when - downsampling in the bottleneck. Default: False. - frozen_stages (int): Stages to be frozen (stop grad and set eval mode). - -1 means not freezing any parameters. Default: -1. - conv_cfg (dict | None): The config dict for conv layers. Default: None. - norm_cfg (dict): The config dict for norm layers. - norm_eval (bool): Whether to set norm layers to eval mode, namely, - freeze running stats (mean and var). Note: Effect on Batch Norm - and its variants only. Default: False. - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. Default: False. - zero_init_residual (bool): Whether to use zero init for last norm layer - in resblocks to let them behave as identity. Default: True. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: - ``[ - dict(type='Kaiming', layer=['Conv2d']), - dict( - type='Constant', - val=1, - layer=['_BatchNorm', 'GroupNorm']) - ]`` - - Example: - >>> from mmpose.models import SEResNet - >>> import torch - >>> self = SEResNet(depth=50, out_indices=(0, 1, 2, 3)) - >>> self.eval() - >>> inputs = torch.rand(1, 3, 224, 224) - >>> level_outputs = self.forward(inputs) - >>> for level_out in level_outputs: - ... print(tuple(level_out.shape)) - (1, 256, 56, 56) - (1, 512, 28, 28) - (1, 1024, 14, 14) - (1, 2048, 7, 7) - """ - - arch_settings = { - 50: (SEBottleneck, (3, 4, 6, 3)), - 101: (SEBottleneck, (3, 4, 23, 3)), - 152: (SEBottleneck, (3, 8, 36, 3)) - } - - def __init__(self, depth, se_ratio=16, **kwargs): - if depth not in self.arch_settings: - raise KeyError(f'invalid depth {depth} for SEResNet') - self.se_ratio = se_ratio - super().__init__(depth, **kwargs) - - def make_res_layer(self, **kwargs): - return ResLayer(se_ratio=self.se_ratio, **kwargs) +# Copyright (c) OpenMMLab. All rights reserved. +import torch.utils.checkpoint as cp + +from mmpose.registry import MODELS +from .resnet import Bottleneck, ResLayer, ResNet +from .utils.se_layer import SELayer + + +class SEBottleneck(Bottleneck): + """SEBottleneck block for SEResNet. + + Args: + in_channels (int): The input channels of the SEBottleneck block. + out_channels (int): The output channel of the SEBottleneck block. + se_ratio (int): Squeeze ratio in SELayer. Default: 16 + """ + + def __init__(self, in_channels, out_channels, se_ratio=16, **kwargs): + super().__init__(in_channels, out_channels, **kwargs) + self.se_layer = SELayer(out_channels, ratio=se_ratio) + + def forward(self, x): + + def _inner_forward(x): + identity = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.norm2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.norm3(out) + + out = self.se_layer(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.relu(out) + + return out + + +@MODELS.register_module() +class SEResNet(ResNet): + """SEResNet backbone. + + Please refer to the `paper `__ for + details. + + Args: + depth (int): Network depth, from {50, 101, 152}. + se_ratio (int): Squeeze ratio in SELayer. Default: 16. + in_channels (int): Number of input image channels. Default: 3. + stem_channels (int): Output channels of the stem layer. Default: 64. + num_stages (int): Stages of the network. Default: 4. + strides (Sequence[int]): Strides of the first block of each stage. + Default: ``(1, 2, 2, 2)``. + dilations (Sequence[int]): Dilation of each stage. + Default: ``(1, 1, 1, 1)``. + out_indices (Sequence[int]): Output from which stages. If only one + stage is specified, a single tensor (feature map) is returned, + otherwise multiple stages are specified, a tuple of tensors will + be returned. Default: ``(3, )``. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv. + Default: False. + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. Default: False. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Default: -1. + conv_cfg (dict | None): The config dict for conv layers. Default: None. + norm_cfg (dict): The config dict for norm layers. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + zero_init_residual (bool): Whether to use zero init for last norm layer + in resblocks to let them behave as identity. Default: True. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: + ``[ + dict(type='Kaiming', layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ]`` + + Example: + >>> from mmpose.models import SEResNet + >>> import torch + >>> self = SEResNet(depth=50, out_indices=(0, 1, 2, 3)) + >>> self.eval() + >>> inputs = torch.rand(1, 3, 224, 224) + >>> level_outputs = self.forward(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 256, 56, 56) + (1, 512, 28, 28) + (1, 1024, 14, 14) + (1, 2048, 7, 7) + """ + + arch_settings = { + 50: (SEBottleneck, (3, 4, 6, 3)), + 101: (SEBottleneck, (3, 4, 23, 3)), + 152: (SEBottleneck, (3, 8, 36, 3)) + } + + def __init__(self, depth, se_ratio=16, **kwargs): + if depth not in self.arch_settings: + raise KeyError(f'invalid depth {depth} for SEResNet') + self.se_ratio = se_ratio + super().__init__(depth, **kwargs) + + def make_res_layer(self, **kwargs): + return ResLayer(se_ratio=self.se_ratio, **kwargs) diff --git a/mmpose/models/backbones/seresnext.py b/mmpose/models/backbones/seresnext.py index c1f5a6c8f3..7469dfddab 100644 --- a/mmpose/models/backbones/seresnext.py +++ b/mmpose/models/backbones/seresnext.py @@ -1,179 +1,179 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from mmcv.cnn import build_conv_layer, build_norm_layer - -from mmpose.registry import MODELS -from .resnet import ResLayer -from .seresnet import SEBottleneck as _SEBottleneck -from .seresnet import SEResNet - - -class SEBottleneck(_SEBottleneck): - """SEBottleneck block for SEResNeXt. - - Args: - in_channels (int): Input channels of this block. - out_channels (int): Output channels of this block. - base_channels (int): Middle channels of the first stage. Default: 64. - groups (int): Groups of conv2. - width_per_group (int): Width per group of conv2. 64x4d indicates - ``groups=64, width_per_group=4`` and 32x8d indicates - ``groups=32, width_per_group=8``. - stride (int): stride of the block. Default: 1 - dilation (int): dilation of convolution. Default: 1 - downsample (nn.Module): downsample operation on identity branch. - Default: None - se_ratio (int): Squeeze ratio in SELayer. Default: 16 - style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two - layer is the 3x3 conv layer, otherwise the stride-two layer is - the first 1x1 conv layer. - conv_cfg (dict): dictionary to construct and config conv layer. - Default: None - norm_cfg (dict): dictionary to construct and config norm layer. - Default: dict(type='BN') - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - in_channels, - out_channels, - base_channels=64, - groups=32, - width_per_group=4, - se_ratio=16, - **kwargs): - super().__init__(in_channels, out_channels, se_ratio, **kwargs) - self.groups = groups - self.width_per_group = width_per_group - - # We follow the same rational of ResNext to compute mid_channels. - # For SEResNet bottleneck, middle channels are determined by expansion - # and out_channels, but for SEResNeXt bottleneck, it is determined by - # groups and width_per_group and the stage it is located in. - if groups != 1: - assert self.mid_channels % base_channels == 0 - self.mid_channels = ( - groups * width_per_group * self.mid_channels // base_channels) - - self.norm1_name, norm1 = build_norm_layer( - self.norm_cfg, self.mid_channels, postfix=1) - self.norm2_name, norm2 = build_norm_layer( - self.norm_cfg, self.mid_channels, postfix=2) - self.norm3_name, norm3 = build_norm_layer( - self.norm_cfg, self.out_channels, postfix=3) - - self.conv1 = build_conv_layer( - self.conv_cfg, - self.in_channels, - self.mid_channels, - kernel_size=1, - stride=self.conv1_stride, - bias=False) - self.add_module(self.norm1_name, norm1) - self.conv2 = build_conv_layer( - self.conv_cfg, - self.mid_channels, - self.mid_channels, - kernel_size=3, - stride=self.conv2_stride, - padding=self.dilation, - dilation=self.dilation, - groups=groups, - bias=False) - - self.add_module(self.norm2_name, norm2) - self.conv3 = build_conv_layer( - self.conv_cfg, - self.mid_channels, - self.out_channels, - kernel_size=1, - bias=False) - self.add_module(self.norm3_name, norm3) - - -@MODELS.register_module() -class SEResNeXt(SEResNet): - """SEResNeXt backbone. - - Please refer to the `paper `__ for - details. - - Args: - depth (int): Network depth, from {50, 101, 152}. - groups (int): Groups of conv2 in Bottleneck. Default: 32. - width_per_group (int): Width per group of conv2 in Bottleneck. - Default: 4. - se_ratio (int): Squeeze ratio in SELayer. Default: 16. - in_channels (int): Number of input image channels. Default: 3. - stem_channels (int): Output channels of the stem layer. Default: 64. - num_stages (int): Stages of the network. Default: 4. - strides (Sequence[int]): Strides of the first block of each stage. - Default: ``(1, 2, 2, 2)``. - dilations (Sequence[int]): Dilation of each stage. - Default: ``(1, 1, 1, 1)``. - out_indices (Sequence[int]): Output from which stages. If only one - stage is specified, a single tensor (feature map) is returned, - otherwise multiple stages are specified, a tuple of tensors will - be returned. Default: ``(3, )``. - style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two - layer is the 3x3 conv layer, otherwise the stride-two layer is - the first 1x1 conv layer. - deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv. - Default: False. - avg_down (bool): Use AvgPool instead of stride conv when - downsampling in the bottleneck. Default: False. - frozen_stages (int): Stages to be frozen (stop grad and set eval mode). - -1 means not freezing any parameters. Default: -1. - conv_cfg (dict | None): The config dict for conv layers. Default: None. - norm_cfg (dict): The config dict for norm layers. - norm_eval (bool): Whether to set norm layers to eval mode, namely, - freeze running stats (mean and var). Note: Effect on Batch Norm - and its variants only. Default: False. - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. Default: False. - zero_init_residual (bool): Whether to use zero init for last norm layer - in resblocks to let them behave as identity. Default: True. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: - ``[ - dict(type='Kaiming', layer=['Conv2d']), - dict( - type='Constant', - val=1, - layer=['_BatchNorm', 'GroupNorm']) - ]`` - - Example: - >>> from mmpose.models import SEResNeXt - >>> import torch - >>> self = SEResNet(depth=50, out_indices=(0, 1, 2, 3)) - >>> self.eval() - >>> inputs = torch.rand(1, 3, 224, 224) - >>> level_outputs = self.forward(inputs) - >>> for level_out in level_outputs: - ... print(tuple(level_out.shape)) - (1, 256, 56, 56) - (1, 512, 28, 28) - (1, 1024, 14, 14) - (1, 2048, 7, 7) - """ - - arch_settings = { - 50: (SEBottleneck, (3, 4, 6, 3)), - 101: (SEBottleneck, (3, 4, 23, 3)), - 152: (SEBottleneck, (3, 8, 36, 3)) - } - - def __init__(self, depth, groups=32, width_per_group=4, **kwargs): - self.groups = groups - self.width_per_group = width_per_group - super().__init__(depth, **kwargs) - - def make_res_layer(self, **kwargs): - return ResLayer( - groups=self.groups, - width_per_group=self.width_per_group, - base_channels=self.base_channels, - **kwargs) +# Copyright (c) OpenMMLab. All rights reserved. +from mmcv.cnn import build_conv_layer, build_norm_layer + +from mmpose.registry import MODELS +from .resnet import ResLayer +from .seresnet import SEBottleneck as _SEBottleneck +from .seresnet import SEResNet + + +class SEBottleneck(_SEBottleneck): + """SEBottleneck block for SEResNeXt. + + Args: + in_channels (int): Input channels of this block. + out_channels (int): Output channels of this block. + base_channels (int): Middle channels of the first stage. Default: 64. + groups (int): Groups of conv2. + width_per_group (int): Width per group of conv2. 64x4d indicates + ``groups=64, width_per_group=4`` and 32x8d indicates + ``groups=32, width_per_group=8``. + stride (int): stride of the block. Default: 1 + dilation (int): dilation of convolution. Default: 1 + downsample (nn.Module): downsample operation on identity branch. + Default: None + se_ratio (int): Squeeze ratio in SELayer. Default: 16 + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + conv_cfg (dict): dictionary to construct and config conv layer. + Default: None + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + in_channels, + out_channels, + base_channels=64, + groups=32, + width_per_group=4, + se_ratio=16, + **kwargs): + super().__init__(in_channels, out_channels, se_ratio, **kwargs) + self.groups = groups + self.width_per_group = width_per_group + + # We follow the same rational of ResNext to compute mid_channels. + # For SEResNet bottleneck, middle channels are determined by expansion + # and out_channels, but for SEResNeXt bottleneck, it is determined by + # groups and width_per_group and the stage it is located in. + if groups != 1: + assert self.mid_channels % base_channels == 0 + self.mid_channels = ( + groups * width_per_group * self.mid_channels // base_channels) + + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, self.mid_channels, postfix=1) + self.norm2_name, norm2 = build_norm_layer( + self.norm_cfg, self.mid_channels, postfix=2) + self.norm3_name, norm3 = build_norm_layer( + self.norm_cfg, self.out_channels, postfix=3) + + self.conv1 = build_conv_layer( + self.conv_cfg, + self.in_channels, + self.mid_channels, + kernel_size=1, + stride=self.conv1_stride, + bias=False) + self.add_module(self.norm1_name, norm1) + self.conv2 = build_conv_layer( + self.conv_cfg, + self.mid_channels, + self.mid_channels, + kernel_size=3, + stride=self.conv2_stride, + padding=self.dilation, + dilation=self.dilation, + groups=groups, + bias=False) + + self.add_module(self.norm2_name, norm2) + self.conv3 = build_conv_layer( + self.conv_cfg, + self.mid_channels, + self.out_channels, + kernel_size=1, + bias=False) + self.add_module(self.norm3_name, norm3) + + +@MODELS.register_module() +class SEResNeXt(SEResNet): + """SEResNeXt backbone. + + Please refer to the `paper `__ for + details. + + Args: + depth (int): Network depth, from {50, 101, 152}. + groups (int): Groups of conv2 in Bottleneck. Default: 32. + width_per_group (int): Width per group of conv2 in Bottleneck. + Default: 4. + se_ratio (int): Squeeze ratio in SELayer. Default: 16. + in_channels (int): Number of input image channels. Default: 3. + stem_channels (int): Output channels of the stem layer. Default: 64. + num_stages (int): Stages of the network. Default: 4. + strides (Sequence[int]): Strides of the first block of each stage. + Default: ``(1, 2, 2, 2)``. + dilations (Sequence[int]): Dilation of each stage. + Default: ``(1, 1, 1, 1)``. + out_indices (Sequence[int]): Output from which stages. If only one + stage is specified, a single tensor (feature map) is returned, + otherwise multiple stages are specified, a tuple of tensors will + be returned. Default: ``(3, )``. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv. + Default: False. + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. Default: False. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Default: -1. + conv_cfg (dict | None): The config dict for conv layers. Default: None. + norm_cfg (dict): The config dict for norm layers. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + zero_init_residual (bool): Whether to use zero init for last norm layer + in resblocks to let them behave as identity. Default: True. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: + ``[ + dict(type='Kaiming', layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ]`` + + Example: + >>> from mmpose.models import SEResNeXt + >>> import torch + >>> self = SEResNet(depth=50, out_indices=(0, 1, 2, 3)) + >>> self.eval() + >>> inputs = torch.rand(1, 3, 224, 224) + >>> level_outputs = self.forward(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 256, 56, 56) + (1, 512, 28, 28) + (1, 1024, 14, 14) + (1, 2048, 7, 7) + """ + + arch_settings = { + 50: (SEBottleneck, (3, 4, 6, 3)), + 101: (SEBottleneck, (3, 4, 23, 3)), + 152: (SEBottleneck, (3, 8, 36, 3)) + } + + def __init__(self, depth, groups=32, width_per_group=4, **kwargs): + self.groups = groups + self.width_per_group = width_per_group + super().__init__(depth, **kwargs) + + def make_res_layer(self, **kwargs): + return ResLayer( + groups=self.groups, + width_per_group=self.width_per_group, + base_channels=self.base_channels, + **kwargs) diff --git a/mmpose/models/backbones/shufflenet_v1.py b/mmpose/models/backbones/shufflenet_v1.py index 17491910e9..462c204065 100644 --- a/mmpose/models/backbones/shufflenet_v1.py +++ b/mmpose/models/backbones/shufflenet_v1.py @@ -1,338 +1,338 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy - -import torch -import torch.nn as nn -import torch.utils.checkpoint as cp -from mmcv.cnn import ConvModule, build_activation_layer -from mmengine.model import BaseModule -from torch.nn.modules.batchnorm import _BatchNorm - -from mmpose.registry import MODELS -from .base_backbone import BaseBackbone -from .utils import channel_shuffle, make_divisible - - -class ShuffleUnit(BaseModule): - """ShuffleUnit block. - - ShuffleNet unit with pointwise group convolution (GConv) and channel - shuffle. - - Args: - in_channels (int): The input channels of the ShuffleUnit. - out_channels (int): The output channels of the ShuffleUnit. - groups (int, optional): The number of groups to be used in grouped 1x1 - convolutions in each ShuffleUnit. Default: 3 - first_block (bool, optional): Whether it is the first ShuffleUnit of a - sequential ShuffleUnits. Default: True, which means not using the - grouped 1x1 convolution. - combine (str, optional): The ways to combine the input and output - branches. Default: 'add'. - conv_cfg (dict): Config dict for convolution layer. Default: None, - which means using conv2d. - norm_cfg (dict): Config dict for normalization layer. - Default: dict(type='BN'). - act_cfg (dict): Config dict for activation layer. - Default: dict(type='ReLU'). - with_cp (bool, optional): Use checkpoint or not. Using checkpoint - will save some memory while slowing down the training speed. - Default: False. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - - Returns: - Tensor: The output tensor. - """ - - def __init__(self, - in_channels, - out_channels, - groups=3, - first_block=True, - combine='add', - conv_cfg=None, - norm_cfg=dict(type='BN'), - act_cfg=dict(type='ReLU'), - with_cp=False, - init_cfg=None): - # Protect mutable default arguments - norm_cfg = copy.deepcopy(norm_cfg) - act_cfg = copy.deepcopy(act_cfg) - super().__init__(init_cfg=init_cfg) - self.in_channels = in_channels - self.out_channels = out_channels - self.first_block = first_block - self.combine = combine - self.groups = groups - self.bottleneck_channels = self.out_channels // 4 - self.with_cp = with_cp - - if self.combine == 'add': - self.depthwise_stride = 1 - self._combine_func = self._add - assert in_channels == out_channels, ( - 'in_channels must be equal to out_channels when combine ' - 'is add') - elif self.combine == 'concat': - self.depthwise_stride = 2 - self._combine_func = self._concat - self.out_channels -= self.in_channels - self.avgpool = nn.AvgPool2d(kernel_size=3, stride=2, padding=1) - else: - raise ValueError(f'Cannot combine tensors with {self.combine}. ' - 'Only "add" and "concat" are supported') - - self.first_1x1_groups = 1 if first_block else self.groups - self.g_conv_1x1_compress = ConvModule( - in_channels=self.in_channels, - out_channels=self.bottleneck_channels, - kernel_size=1, - groups=self.first_1x1_groups, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg) - - self.depthwise_conv3x3_bn = ConvModule( - in_channels=self.bottleneck_channels, - out_channels=self.bottleneck_channels, - kernel_size=3, - stride=self.depthwise_stride, - padding=1, - groups=self.bottleneck_channels, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=None) - - self.g_conv_1x1_expand = ConvModule( - in_channels=self.bottleneck_channels, - out_channels=self.out_channels, - kernel_size=1, - groups=self.groups, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=None) - - self.act = build_activation_layer(act_cfg) - - @staticmethod - def _add(x, out): - # residual connection - return x + out - - @staticmethod - def _concat(x, out): - # concatenate along channel axis - return torch.cat((x, out), 1) - - def forward(self, x): - - def _inner_forward(x): - residual = x - - out = self.g_conv_1x1_compress(x) - out = self.depthwise_conv3x3_bn(out) - - if self.groups > 1: - out = channel_shuffle(out, self.groups) - - out = self.g_conv_1x1_expand(out) - - if self.combine == 'concat': - residual = self.avgpool(residual) - out = self.act(out) - out = self._combine_func(residual, out) - else: - out = self._combine_func(residual, out) - out = self.act(out) - return out - - if self.with_cp and x.requires_grad: - out = cp.checkpoint(_inner_forward, x) - else: - out = _inner_forward(x) - - return out - - -@MODELS.register_module() -class ShuffleNetV1(BaseBackbone): - """ShuffleNetV1 backbone. - - Args: - groups (int, optional): The number of groups to be used in grouped 1x1 - convolutions in each ShuffleUnit. Default: 3. - widen_factor (float, optional): Width multiplier - adjusts the number - of channels in each layer by this amount. Default: 1.0. - out_indices (Sequence[int]): Output from which stages. - Default: (2, ) - frozen_stages (int): Stages to be frozen (all param fixed). - Default: -1, which means not freezing any parameters. - conv_cfg (dict): Config dict for convolution layer. Default: None, - which means using conv2d. - norm_cfg (dict): Config dict for normalization layer. - Default: dict(type='BN'). - act_cfg (dict): Config dict for activation layer. - Default: dict(type='ReLU'). - norm_eval (bool): Whether to set norm layers to eval mode, namely, - freeze running stats (mean and var). Note: Effect on Batch Norm - and its variants only. Default: False. - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. Default: False. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: - ``[ - dict(type='Normal', std=0.01, layer=['Conv2d']), - dict( - type='Constant', - val=1, - bias=0.0001 - layer=['_BatchNorm', 'GroupNorm']) - ]`` - """ - - def __init__(self, - groups=3, - widen_factor=1.0, - out_indices=(2, ), - frozen_stages=-1, - conv_cfg=None, - norm_cfg=dict(type='BN'), - act_cfg=dict(type='ReLU'), - norm_eval=False, - with_cp=False, - init_cfg=[ - dict(type='Normal', std=0.01, layer=['Conv2d']), - dict( - type='Constant', - val=1, - bias=0.0001, - layer=['_BatchNorm', 'GroupNorm']) - ]): - # Protect mutable default arguments - norm_cfg = copy.deepcopy(norm_cfg) - act_cfg = copy.deepcopy(act_cfg) - super().__init__(init_cfg=init_cfg) - self.stage_blocks = [4, 8, 4] - self.groups = groups - - for index in out_indices: - if index not in range(0, 3): - raise ValueError('the item in out_indices must in ' - f'range(0, 3). But received {index}') - - if frozen_stages not in range(-1, 3): - raise ValueError('frozen_stages must be in range(-1, 3). ' - f'But received {frozen_stages}') - self.out_indices = out_indices - self.frozen_stages = frozen_stages - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.act_cfg = act_cfg - self.norm_eval = norm_eval - self.with_cp = with_cp - - if groups == 1: - channels = (144, 288, 576) - elif groups == 2: - channels = (200, 400, 800) - elif groups == 3: - channels = (240, 480, 960) - elif groups == 4: - channels = (272, 544, 1088) - elif groups == 8: - channels = (384, 768, 1536) - else: - raise ValueError(f'{groups} groups is not supported for 1x1 ' - 'Grouped Convolutions') - - channels = [make_divisible(ch * widen_factor, 8) for ch in channels] - - self.in_channels = int(24 * widen_factor) - - self.conv1 = ConvModule( - in_channels=3, - out_channels=self.in_channels, - kernel_size=3, - stride=2, - padding=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg) - self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) - - self.layers = nn.ModuleList() - for i, num_blocks in enumerate(self.stage_blocks): - first_block = (i == 0) - layer = self.make_layer(channels[i], num_blocks, first_block) - self.layers.append(layer) - - def _freeze_stages(self): - if self.frozen_stages >= 0: - for param in self.conv1.parameters(): - param.requires_grad = False - for i in range(self.frozen_stages): - layer = self.layers[i] - layer.eval() - for param in layer.parameters(): - param.requires_grad = False - - def init_weights(self, pretrained=None): - super(ShuffleNetV1, self).init_weights() - - if (isinstance(self.init_cfg, dict) - and self.init_cfg['type'] == 'Pretrained'): - return - - for name, m in self.named_modules(): - if isinstance(m, nn.Conv2d) and 'conv1' not in name: - nn.init.normal_(m.weight, mean=0, std=1.0 / m.weight.shape[1]) - - def make_layer(self, out_channels, num_blocks, first_block=False): - """Stack ShuffleUnit blocks to make a layer. - - Args: - out_channels (int): out_channels of the block. - num_blocks (int): Number of blocks. - first_block (bool, optional): Whether is the first ShuffleUnit of a - sequential ShuffleUnits. Default: False, which means using - the grouped 1x1 convolution. - """ - layers = [] - for i in range(num_blocks): - first_block = first_block if i == 0 else False - combine_mode = 'concat' if i == 0 else 'add' - layers.append( - ShuffleUnit( - self.in_channels, - out_channels, - groups=self.groups, - first_block=first_block, - combine=combine_mode, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg, - with_cp=self.with_cp)) - self.in_channels = out_channels - - return nn.Sequential(*layers) - - def forward(self, x): - x = self.conv1(x) - x = self.maxpool(x) - - outs = [] - for i, layer in enumerate(self.layers): - x = layer(x) - if i in self.out_indices: - outs.append(x) - - return tuple(outs) - - def train(self, mode=True): - super().train(mode) - self._freeze_stages() - if mode and self.norm_eval: - for m in self.modules(): - if isinstance(m, _BatchNorm): - m.eval() +# Copyright (c) OpenMMLab. All rights reserved. +import copy + +import torch +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn import ConvModule, build_activation_layer +from mmengine.model import BaseModule +from torch.nn.modules.batchnorm import _BatchNorm + +from mmpose.registry import MODELS +from .base_backbone import BaseBackbone +from .utils import channel_shuffle, make_divisible + + +class ShuffleUnit(BaseModule): + """ShuffleUnit block. + + ShuffleNet unit with pointwise group convolution (GConv) and channel + shuffle. + + Args: + in_channels (int): The input channels of the ShuffleUnit. + out_channels (int): The output channels of the ShuffleUnit. + groups (int, optional): The number of groups to be used in grouped 1x1 + convolutions in each ShuffleUnit. Default: 3 + first_block (bool, optional): Whether it is the first ShuffleUnit of a + sequential ShuffleUnits. Default: True, which means not using the + grouped 1x1 convolution. + combine (str, optional): The ways to combine the input and output + branches. Default: 'add'. + conv_cfg (dict): Config dict for convolution layer. Default: None, + which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU'). + with_cp (bool, optional): Use checkpoint or not. Using checkpoint + will save some memory while slowing down the training speed. + Default: False. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + + Returns: + Tensor: The output tensor. + """ + + def __init__(self, + in_channels, + out_channels, + groups=3, + first_block=True, + combine='add', + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + with_cp=False, + init_cfg=None): + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + act_cfg = copy.deepcopy(act_cfg) + super().__init__(init_cfg=init_cfg) + self.in_channels = in_channels + self.out_channels = out_channels + self.first_block = first_block + self.combine = combine + self.groups = groups + self.bottleneck_channels = self.out_channels // 4 + self.with_cp = with_cp + + if self.combine == 'add': + self.depthwise_stride = 1 + self._combine_func = self._add + assert in_channels == out_channels, ( + 'in_channels must be equal to out_channels when combine ' + 'is add') + elif self.combine == 'concat': + self.depthwise_stride = 2 + self._combine_func = self._concat + self.out_channels -= self.in_channels + self.avgpool = nn.AvgPool2d(kernel_size=3, stride=2, padding=1) + else: + raise ValueError(f'Cannot combine tensors with {self.combine}. ' + 'Only "add" and "concat" are supported') + + self.first_1x1_groups = 1 if first_block else self.groups + self.g_conv_1x1_compress = ConvModule( + in_channels=self.in_channels, + out_channels=self.bottleneck_channels, + kernel_size=1, + groups=self.first_1x1_groups, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + self.depthwise_conv3x3_bn = ConvModule( + in_channels=self.bottleneck_channels, + out_channels=self.bottleneck_channels, + kernel_size=3, + stride=self.depthwise_stride, + padding=1, + groups=self.bottleneck_channels, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None) + + self.g_conv_1x1_expand = ConvModule( + in_channels=self.bottleneck_channels, + out_channels=self.out_channels, + kernel_size=1, + groups=self.groups, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None) + + self.act = build_activation_layer(act_cfg) + + @staticmethod + def _add(x, out): + # residual connection + return x + out + + @staticmethod + def _concat(x, out): + # concatenate along channel axis + return torch.cat((x, out), 1) + + def forward(self, x): + + def _inner_forward(x): + residual = x + + out = self.g_conv_1x1_compress(x) + out = self.depthwise_conv3x3_bn(out) + + if self.groups > 1: + out = channel_shuffle(out, self.groups) + + out = self.g_conv_1x1_expand(out) + + if self.combine == 'concat': + residual = self.avgpool(residual) + out = self.act(out) + out = self._combine_func(residual, out) + else: + out = self._combine_func(residual, out) + out = self.act(out) + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + return out + + +@MODELS.register_module() +class ShuffleNetV1(BaseBackbone): + """ShuffleNetV1 backbone. + + Args: + groups (int, optional): The number of groups to be used in grouped 1x1 + convolutions in each ShuffleUnit. Default: 3. + widen_factor (float, optional): Width multiplier - adjusts the number + of channels in each layer by this amount. Default: 1.0. + out_indices (Sequence[int]): Output from which stages. + Default: (2, ) + frozen_stages (int): Stages to be frozen (all param fixed). + Default: -1, which means not freezing any parameters. + conv_cfg (dict): Config dict for convolution layer. Default: None, + which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU'). + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: + ``[ + dict(type='Normal', std=0.01, layer=['Conv2d']), + dict( + type='Constant', + val=1, + bias=0.0001 + layer=['_BatchNorm', 'GroupNorm']) + ]`` + """ + + def __init__(self, + groups=3, + widen_factor=1.0, + out_indices=(2, ), + frozen_stages=-1, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + norm_eval=False, + with_cp=False, + init_cfg=[ + dict(type='Normal', std=0.01, layer=['Conv2d']), + dict( + type='Constant', + val=1, + bias=0.0001, + layer=['_BatchNorm', 'GroupNorm']) + ]): + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + act_cfg = copy.deepcopy(act_cfg) + super().__init__(init_cfg=init_cfg) + self.stage_blocks = [4, 8, 4] + self.groups = groups + + for index in out_indices: + if index not in range(0, 3): + raise ValueError('the item in out_indices must in ' + f'range(0, 3). But received {index}') + + if frozen_stages not in range(-1, 3): + raise ValueError('frozen_stages must be in range(-1, 3). ' + f'But received {frozen_stages}') + self.out_indices = out_indices + self.frozen_stages = frozen_stages + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.norm_eval = norm_eval + self.with_cp = with_cp + + if groups == 1: + channels = (144, 288, 576) + elif groups == 2: + channels = (200, 400, 800) + elif groups == 3: + channels = (240, 480, 960) + elif groups == 4: + channels = (272, 544, 1088) + elif groups == 8: + channels = (384, 768, 1536) + else: + raise ValueError(f'{groups} groups is not supported for 1x1 ' + 'Grouped Convolutions') + + channels = [make_divisible(ch * widen_factor, 8) for ch in channels] + + self.in_channels = int(24 * widen_factor) + + self.conv1 = ConvModule( + in_channels=3, + out_channels=self.in_channels, + kernel_size=3, + stride=2, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + self.layers = nn.ModuleList() + for i, num_blocks in enumerate(self.stage_blocks): + first_block = (i == 0) + layer = self.make_layer(channels[i], num_blocks, first_block) + self.layers.append(layer) + + def _freeze_stages(self): + if self.frozen_stages >= 0: + for param in self.conv1.parameters(): + param.requires_grad = False + for i in range(self.frozen_stages): + layer = self.layers[i] + layer.eval() + for param in layer.parameters(): + param.requires_grad = False + + def init_weights(self, pretrained=None): + super(ShuffleNetV1, self).init_weights() + + if (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + return + + for name, m in self.named_modules(): + if isinstance(m, nn.Conv2d) and 'conv1' not in name: + nn.init.normal_(m.weight, mean=0, std=1.0 / m.weight.shape[1]) + + def make_layer(self, out_channels, num_blocks, first_block=False): + """Stack ShuffleUnit blocks to make a layer. + + Args: + out_channels (int): out_channels of the block. + num_blocks (int): Number of blocks. + first_block (bool, optional): Whether is the first ShuffleUnit of a + sequential ShuffleUnits. Default: False, which means using + the grouped 1x1 convolution. + """ + layers = [] + for i in range(num_blocks): + first_block = first_block if i == 0 else False + combine_mode = 'concat' if i == 0 else 'add' + layers.append( + ShuffleUnit( + self.in_channels, + out_channels, + groups=self.groups, + first_block=first_block, + combine=combine_mode, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + with_cp=self.with_cp)) + self.in_channels = out_channels + + return nn.Sequential(*layers) + + def forward(self, x): + x = self.conv1(x) + x = self.maxpool(x) + + outs = [] + for i, layer in enumerate(self.layers): + x = layer(x) + if i in self.out_indices: + outs.append(x) + + return tuple(outs) + + def train(self, mode=True): + super().train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, _BatchNorm): + m.eval() diff --git a/mmpose/models/backbones/shufflenet_v2.py b/mmpose/models/backbones/shufflenet_v2.py index 9757841e73..dabf383e58 100644 --- a/mmpose/models/backbones/shufflenet_v2.py +++ b/mmpose/models/backbones/shufflenet_v2.py @@ -1,311 +1,311 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy - -import torch -import torch.nn as nn -import torch.utils.checkpoint as cp -from mmcv.cnn import ConvModule -from mmengine.model import BaseModule - -from mmpose.registry import MODELS -from .base_backbone import BaseBackbone -from .utils import channel_shuffle - - -class InvertedResidual(BaseModule): - """InvertedResidual block for ShuffleNetV2 backbone. - - Args: - in_channels (int): The input channels of the block. - out_channels (int): The output channels of the block. - stride (int): Stride of the 3x3 convolution layer. Default: 1 - conv_cfg (dict): Config dict for convolution layer. - Default: None, which means using conv2d. - norm_cfg (dict): Config dict for normalization layer. - Default: dict(type='BN'). - act_cfg (dict): Config dict for activation layer. - Default: dict(type='ReLU'). - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. Default: False. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - in_channels, - out_channels, - stride=1, - conv_cfg=None, - norm_cfg=dict(type='BN'), - act_cfg=dict(type='ReLU'), - with_cp=False, - init_cfg=None): - # Protect mutable default arguments - norm_cfg = copy.deepcopy(norm_cfg) - act_cfg = copy.deepcopy(act_cfg) - super().__init__(init_cfg=init_cfg) - self.stride = stride - self.with_cp = with_cp - - branch_features = out_channels // 2 - if self.stride == 1: - assert in_channels == branch_features * 2, ( - f'in_channels ({in_channels}) should equal to ' - f'branch_features * 2 ({branch_features * 2}) ' - 'when stride is 1') - - if in_channels != branch_features * 2: - assert self.stride != 1, ( - f'stride ({self.stride}) should not equal 1 when ' - f'in_channels != branch_features * 2') - - if self.stride > 1: - self.branch1 = nn.Sequential( - ConvModule( - in_channels, - in_channels, - kernel_size=3, - stride=self.stride, - padding=1, - groups=in_channels, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=None), - ConvModule( - in_channels, - branch_features, - kernel_size=1, - stride=1, - padding=0, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg), - ) - - self.branch2 = nn.Sequential( - ConvModule( - in_channels if (self.stride > 1) else branch_features, - branch_features, - kernel_size=1, - stride=1, - padding=0, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg), - ConvModule( - branch_features, - branch_features, - kernel_size=3, - stride=self.stride, - padding=1, - groups=branch_features, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=None), - ConvModule( - branch_features, - branch_features, - kernel_size=1, - stride=1, - padding=0, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg)) - - def forward(self, x): - - def _inner_forward(x): - if self.stride > 1: - out = torch.cat((self.branch1(x), self.branch2(x)), dim=1) - else: - x1, x2 = x.chunk(2, dim=1) - out = torch.cat((x1, self.branch2(x2)), dim=1) - - out = channel_shuffle(out, 2) - - return out - - if self.with_cp and x.requires_grad: - out = cp.checkpoint(_inner_forward, x) - else: - out = _inner_forward(x) - - return out - - -@MODELS.register_module() -class ShuffleNetV2(BaseBackbone): - """ShuffleNetV2 backbone. - - Args: - widen_factor (float): Width multiplier - adjusts the number of - channels in each layer by this amount. Default: 1.0. - out_indices (Sequence[int]): Output from which stages. - Default: (0, 1, 2, 3). - frozen_stages (int): Stages to be frozen (all param fixed). - Default: -1, which means not freezing any parameters. - conv_cfg (dict): Config dict for convolution layer. - Default: None, which means using conv2d. - norm_cfg (dict): Config dict for normalization layer. - Default: dict(type='BN'). - act_cfg (dict): Config dict for activation layer. - Default: dict(type='ReLU'). - norm_eval (bool): Whether to set norm layers to eval mode, namely, - freeze running stats (mean and var). Note: Effect on Batch Norm - and its variants only. Default: False. - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. Default: False. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: - ``[ - dict(type='Normal', std=0.01, layer=['Conv2d']), - dict( - type='Constant', - val=1, - bias=0.0001 - layer=['_BatchNorm', 'GroupNorm']) - ]`` - """ - - def __init__(self, - widen_factor=1.0, - out_indices=(3, ), - frozen_stages=-1, - conv_cfg=None, - norm_cfg=dict(type='BN'), - act_cfg=dict(type='ReLU'), - norm_eval=False, - with_cp=False, - init_cfg=[ - dict(type='Normal', std=0.01, layer=['Conv2d']), - dict( - type='Constant', - val=1, - bias=0.0001, - layer=['_BatchNorm', 'GroupNorm']) - ]): - # Protect mutable default arguments - norm_cfg = copy.deepcopy(norm_cfg) - act_cfg = copy.deepcopy(act_cfg) - super().__init__(init_cfg=init_cfg) - self.stage_blocks = [4, 8, 4] - for index in out_indices: - if index not in range(0, 4): - raise ValueError('the item in out_indices must in ' - f'range(0, 4). But received {index}') - - if frozen_stages not in range(-1, 4): - raise ValueError('frozen_stages must be in range(-1, 4). ' - f'But received {frozen_stages}') - self.out_indices = out_indices - self.frozen_stages = frozen_stages - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.act_cfg = act_cfg - self.norm_eval = norm_eval - self.with_cp = with_cp - - if widen_factor == 0.5: - channels = [48, 96, 192, 1024] - elif widen_factor == 1.0: - channels = [116, 232, 464, 1024] - elif widen_factor == 1.5: - channels = [176, 352, 704, 1024] - elif widen_factor == 2.0: - channels = [244, 488, 976, 2048] - else: - raise ValueError('widen_factor must be in [0.5, 1.0, 1.5, 2.0]. ' - f'But received {widen_factor}') - - self.in_channels = 24 - self.conv1 = ConvModule( - in_channels=3, - out_channels=self.in_channels, - kernel_size=3, - stride=2, - padding=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg) - - self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) - - self.layers = nn.ModuleList() - for i, num_blocks in enumerate(self.stage_blocks): - layer = self._make_layer(channels[i], num_blocks) - self.layers.append(layer) - - output_channels = channels[-1] - self.layers.append( - ConvModule( - in_channels=self.in_channels, - out_channels=output_channels, - kernel_size=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg)) - - def _make_layer(self, out_channels, num_blocks): - """Stack blocks to make a layer. - - Args: - out_channels (int): out_channels of the block. - num_blocks (int): number of blocks. - """ - layers = [] - for i in range(num_blocks): - stride = 2 if i == 0 else 1 - layers.append( - InvertedResidual( - in_channels=self.in_channels, - out_channels=out_channels, - stride=stride, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg, - with_cp=self.with_cp)) - self.in_channels = out_channels - - return nn.Sequential(*layers) - - def _freeze_stages(self): - if self.frozen_stages >= 0: - for param in self.conv1.parameters(): - param.requires_grad = False - - for i in range(self.frozen_stages): - m = self.layers[i] - m.eval() - for param in m.parameters(): - param.requires_grad = False - - def init_weights(self): - super(ShuffleNetV2, self).init_weights() - - if (isinstance(self.init_cfg, dict) - and self.init_cfg['type'] == 'Pretrained'): - return - - for name, m in self.named_modules(): - if isinstance(m, nn.Conv2d) and 'conv1' not in name: - nn.init.normal_(m.weight, mean=0, std=1.0 / m.weight.shape[1]) - - def forward(self, x): - x = self.conv1(x) - x = self.maxpool(x) - - outs = [] - for i, layer in enumerate(self.layers): - x = layer(x) - if i in self.out_indices: - outs.append(x) - - return tuple(outs) - - def train(self, mode=True): - super().train(mode) - self._freeze_stages() - if mode and self.norm_eval: - for m in self.modules(): - if isinstance(m, nn.BatchNorm2d): - m.eval() +# Copyright (c) OpenMMLab. All rights reserved. +import copy + +import torch +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn import ConvModule +from mmengine.model import BaseModule + +from mmpose.registry import MODELS +from .base_backbone import BaseBackbone +from .utils import channel_shuffle + + +class InvertedResidual(BaseModule): + """InvertedResidual block for ShuffleNetV2 backbone. + + Args: + in_channels (int): The input channels of the block. + out_channels (int): The output channels of the block. + stride (int): Stride of the 3x3 convolution layer. Default: 1 + conv_cfg (dict): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU'). + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + in_channels, + out_channels, + stride=1, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + with_cp=False, + init_cfg=None): + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + act_cfg = copy.deepcopy(act_cfg) + super().__init__(init_cfg=init_cfg) + self.stride = stride + self.with_cp = with_cp + + branch_features = out_channels // 2 + if self.stride == 1: + assert in_channels == branch_features * 2, ( + f'in_channels ({in_channels}) should equal to ' + f'branch_features * 2 ({branch_features * 2}) ' + 'when stride is 1') + + if in_channels != branch_features * 2: + assert self.stride != 1, ( + f'stride ({self.stride}) should not equal 1 when ' + f'in_channels != branch_features * 2') + + if self.stride > 1: + self.branch1 = nn.Sequential( + ConvModule( + in_channels, + in_channels, + kernel_size=3, + stride=self.stride, + padding=1, + groups=in_channels, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None), + ConvModule( + in_channels, + branch_features, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg), + ) + + self.branch2 = nn.Sequential( + ConvModule( + in_channels if (self.stride > 1) else branch_features, + branch_features, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg), + ConvModule( + branch_features, + branch_features, + kernel_size=3, + stride=self.stride, + padding=1, + groups=branch_features, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None), + ConvModule( + branch_features, + branch_features, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + + def forward(self, x): + + def _inner_forward(x): + if self.stride > 1: + out = torch.cat((self.branch1(x), self.branch2(x)), dim=1) + else: + x1, x2 = x.chunk(2, dim=1) + out = torch.cat((x1, self.branch2(x2)), dim=1) + + out = channel_shuffle(out, 2) + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + return out + + +@MODELS.register_module() +class ShuffleNetV2(BaseBackbone): + """ShuffleNetV2 backbone. + + Args: + widen_factor (float): Width multiplier - adjusts the number of + channels in each layer by this amount. Default: 1.0. + out_indices (Sequence[int]): Output from which stages. + Default: (0, 1, 2, 3). + frozen_stages (int): Stages to be frozen (all param fixed). + Default: -1, which means not freezing any parameters. + conv_cfg (dict): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU'). + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: + ``[ + dict(type='Normal', std=0.01, layer=['Conv2d']), + dict( + type='Constant', + val=1, + bias=0.0001 + layer=['_BatchNorm', 'GroupNorm']) + ]`` + """ + + def __init__(self, + widen_factor=1.0, + out_indices=(3, ), + frozen_stages=-1, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + norm_eval=False, + with_cp=False, + init_cfg=[ + dict(type='Normal', std=0.01, layer=['Conv2d']), + dict( + type='Constant', + val=1, + bias=0.0001, + layer=['_BatchNorm', 'GroupNorm']) + ]): + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + act_cfg = copy.deepcopy(act_cfg) + super().__init__(init_cfg=init_cfg) + self.stage_blocks = [4, 8, 4] + for index in out_indices: + if index not in range(0, 4): + raise ValueError('the item in out_indices must in ' + f'range(0, 4). But received {index}') + + if frozen_stages not in range(-1, 4): + raise ValueError('frozen_stages must be in range(-1, 4). ' + f'But received {frozen_stages}') + self.out_indices = out_indices + self.frozen_stages = frozen_stages + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.norm_eval = norm_eval + self.with_cp = with_cp + + if widen_factor == 0.5: + channels = [48, 96, 192, 1024] + elif widen_factor == 1.0: + channels = [116, 232, 464, 1024] + elif widen_factor == 1.5: + channels = [176, 352, 704, 1024] + elif widen_factor == 2.0: + channels = [244, 488, 976, 2048] + else: + raise ValueError('widen_factor must be in [0.5, 1.0, 1.5, 2.0]. ' + f'But received {widen_factor}') + + self.in_channels = 24 + self.conv1 = ConvModule( + in_channels=3, + out_channels=self.in_channels, + kernel_size=3, + stride=2, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + self.layers = nn.ModuleList() + for i, num_blocks in enumerate(self.stage_blocks): + layer = self._make_layer(channels[i], num_blocks) + self.layers.append(layer) + + output_channels = channels[-1] + self.layers.append( + ConvModule( + in_channels=self.in_channels, + out_channels=output_channels, + kernel_size=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + + def _make_layer(self, out_channels, num_blocks): + """Stack blocks to make a layer. + + Args: + out_channels (int): out_channels of the block. + num_blocks (int): number of blocks. + """ + layers = [] + for i in range(num_blocks): + stride = 2 if i == 0 else 1 + layers.append( + InvertedResidual( + in_channels=self.in_channels, + out_channels=out_channels, + stride=stride, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + with_cp=self.with_cp)) + self.in_channels = out_channels + + return nn.Sequential(*layers) + + def _freeze_stages(self): + if self.frozen_stages >= 0: + for param in self.conv1.parameters(): + param.requires_grad = False + + for i in range(self.frozen_stages): + m = self.layers[i] + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def init_weights(self): + super(ShuffleNetV2, self).init_weights() + + if (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + return + + for name, m in self.named_modules(): + if isinstance(m, nn.Conv2d) and 'conv1' not in name: + nn.init.normal_(m.weight, mean=0, std=1.0 / m.weight.shape[1]) + + def forward(self, x): + x = self.conv1(x) + x = self.maxpool(x) + + outs = [] + for i, layer in enumerate(self.layers): + x = layer(x) + if i in self.out_indices: + outs.append(x) + + return tuple(outs) + + def train(self, mode=True): + super().train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, nn.BatchNorm2d): + m.eval() diff --git a/mmpose/models/backbones/swin.py b/mmpose/models/backbones/swin.py index a8f7c97278..a2251fd74c 100644 --- a/mmpose/models/backbones/swin.py +++ b/mmpose/models/backbones/swin.py @@ -1,739 +1,739 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from copy import deepcopy - -import torch -import torch.nn as nn -import torch.nn.functional as F -import torch.utils.checkpoint as cp -from mmcv.cnn import build_norm_layer -from mmcv.cnn.bricks.transformer import FFN, build_dropout -from mmengine.model import BaseModule -from mmengine.model.weight_init import trunc_normal_ -from mmengine.runner import load_state_dict -from mmengine.utils import to_2tuple - -from mmpose.registry import MODELS -from mmpose.utils import get_root_logger -from ..utils.transformer import PatchEmbed, PatchMerging -from .base_backbone import BaseBackbone -from .utils import get_state_dict -from .utils.ckpt_convert import swin_converter - - -class WindowMSA(BaseModule): - """Window based multi-head self-attention (W-MSA) module with relative - position bias. - - Args: - embed_dims (int): Number of input channels. - num_heads (int): Number of attention heads. - window_size (tuple[int]): The height and width of the window. - qkv_bias (bool, optional): If True, add a learnable bias to q, k, v. - Default: True. - qk_scale (float | None, optional): Override default qk scale of - head_dim ** -0.5 if set. Default: None. - attn_drop_rate (float, optional): Dropout ratio of attention weight. - Default: 0.0 - proj_drop_rate (float, optional): Dropout ratio of output. Default: 0. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None. - """ - - def __init__(self, - embed_dims, - num_heads, - window_size, - qkv_bias=True, - qk_scale=None, - attn_drop_rate=0., - proj_drop_rate=0., - init_cfg=None): - - super().__init__(init_cfg=init_cfg) - self.embed_dims = embed_dims - self.window_size = window_size # Wh, Ww - self.num_heads = num_heads - head_embed_dims = embed_dims // num_heads - self.scale = qk_scale or head_embed_dims**-0.5 - - # define a parameter table of relative position bias - self.relative_position_bias_table = nn.Parameter( - torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), - num_heads)) # 2*Wh-1 * 2*Ww-1, nH - - # About 2x faster than original impl - Wh, Ww = self.window_size - rel_index_coords = self.double_step_seq(2 * Ww - 1, Wh, 1, Ww) - rel_position_index = rel_index_coords + rel_index_coords.T - rel_position_index = rel_position_index.flip(1).contiguous() - self.register_buffer('relative_position_index', rel_position_index) - - self.qkv = nn.Linear(embed_dims, embed_dims * 3, bias=qkv_bias) - self.attn_drop = nn.Dropout(attn_drop_rate) - self.proj = nn.Linear(embed_dims, embed_dims) - self.proj_drop = nn.Dropout(proj_drop_rate) - - self.softmax = nn.Softmax(dim=-1) - - def init_weights(self): - trunc_normal_(self.relative_position_bias_table, std=0.02) - - def forward(self, x, mask=None): - """ - Args: - - x (tensor): input features with shape of (num_windows*B, N, C) - mask (tensor | None, Optional): mask with shape of (num_windows, - Wh*Ww, Wh*Ww), value should be between (-inf, 0]. - """ - B, N, C = x.shape - qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, - C // self.num_heads).permute(2, 0, 3, 1, 4) - # make torchscript happy (cannot use tensor as tuple) - q, k, v = qkv[0], qkv[1], qkv[2] - - q = q * self.scale - attn = (q @ k.transpose(-2, -1)) - - relative_position_bias = self.relative_position_bias_table[ - self.relative_position_index.view(-1)].view( - self.window_size[0] * self.window_size[1], - self.window_size[0] * self.window_size[1], - -1) # Wh*Ww,Wh*Ww,nH - relative_position_bias = relative_position_bias.permute( - 2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww - attn = attn + relative_position_bias.unsqueeze(0) - - if mask is not None: - nW = mask.shape[0] - attn = attn.view(B // nW, nW, self.num_heads, N, - N) + mask.unsqueeze(1).unsqueeze(0) - attn = attn.view(-1, self.num_heads, N, N) - attn = self.softmax(attn) - - attn = self.attn_drop(attn) - - x = (attn @ v).transpose(1, 2).reshape(B, N, C) - x = self.proj(x) - x = self.proj_drop(x) - return x - - @staticmethod - def double_step_seq(step1, len1, step2, len2): - seq1 = torch.arange(0, step1 * len1, step1) - seq2 = torch.arange(0, step2 * len2, step2) - return (seq1[:, None] + seq2[None, :]).reshape(1, -1) - - -class ShiftWindowMSA(BaseModule): - """Shifted Window Multihead Self-Attention Module. - - Args: - embed_dims (int): Number of input channels. - num_heads (int): Number of attention heads. - window_size (int): The height and width of the window. - shift_size (int, optional): The shift step of each window towards - right-bottom. If zero, act as regular window-msa. Defaults to 0. - qkv_bias (bool, optional): If True, add a learnable bias to q, k, v. - Default: True - qk_scale (float | None, optional): Override default qk scale of - head_dim ** -0.5 if set. Defaults: None. - attn_drop_rate (float, optional): Dropout ratio of attention weight. - Defaults: 0. - proj_drop_rate (float, optional): Dropout ratio of output. - Defaults: 0. - dropout_layer (dict, optional): The dropout_layer used before output. - Defaults: dict(type='DropPath', drop_prob=0.). - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - embed_dims, - num_heads, - window_size, - shift_size=0, - qkv_bias=True, - qk_scale=None, - attn_drop_rate=0, - proj_drop_rate=0, - dropout_layer=dict(type='DropPath', drop_prob=0.), - init_cfg=None): - super().__init__(init_cfg=init_cfg) - - self.window_size = window_size - self.shift_size = shift_size - assert 0 <= self.shift_size < self.window_size - - self.w_msa = WindowMSA( - embed_dims=embed_dims, - num_heads=num_heads, - window_size=to_2tuple(window_size), - qkv_bias=qkv_bias, - qk_scale=qk_scale, - attn_drop_rate=attn_drop_rate, - proj_drop_rate=proj_drop_rate) - - self.drop = build_dropout(dropout_layer) - - def forward(self, query, hw_shape): - B, L, C = query.shape - H, W = hw_shape - assert L == H * W, 'input feature has wrong size' - query = query.view(B, H, W, C) - - # pad feature maps to multiples of window size - pad_r = (self.window_size - W % self.window_size) % self.window_size - pad_b = (self.window_size - H % self.window_size) % self.window_size - query = F.pad(query, (0, 0, 0, pad_r, 0, pad_b)) - H_pad, W_pad = query.shape[1], query.shape[2] - - # cyclic shift - if self.shift_size > 0: - shifted_query = torch.roll( - query, - shifts=(-self.shift_size, -self.shift_size), - dims=(1, 2)) - - # calculate attention mask for SW-MSA - img_mask = torch.zeros((1, H_pad, W_pad, 1), device=query.device) - h_slices = (slice(0, -self.window_size), - slice(-self.window_size, - -self.shift_size), slice(-self.shift_size, None)) - w_slices = (slice(0, -self.window_size), - slice(-self.window_size, - -self.shift_size), slice(-self.shift_size, None)) - cnt = 0 - for h in h_slices: - for w in w_slices: - img_mask[:, h, w, :] = cnt - cnt += 1 - - # nW, window_size, window_size, 1 - mask_windows = self.window_partition(img_mask) - mask_windows = mask_windows.view( - -1, self.window_size * self.window_size) - attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) - attn_mask = attn_mask.masked_fill(attn_mask != 0, - float(-100.0)).masked_fill( - attn_mask == 0, float(0.0)) - else: - shifted_query = query - attn_mask = None - - # nW*B, window_size, window_size, C - query_windows = self.window_partition(shifted_query) - # nW*B, window_size*window_size, C - query_windows = query_windows.view(-1, self.window_size**2, C) - - # W-MSA/SW-MSA (nW*B, window_size*window_size, C) - attn_windows = self.w_msa(query_windows, mask=attn_mask) - - # merge windows - attn_windows = attn_windows.view(-1, self.window_size, - self.window_size, C) - - # B H' W' C - shifted_x = self.window_reverse(attn_windows, H_pad, W_pad) - # reverse cyclic shift - if self.shift_size > 0: - x = torch.roll( - shifted_x, - shifts=(self.shift_size, self.shift_size), - dims=(1, 2)) - else: - x = shifted_x - - if pad_r > 0 or pad_b: - x = x[:, :H, :W, :].contiguous() - - x = x.view(B, H * W, C) - - x = self.drop(x) - return x - - def window_reverse(self, windows, H, W): - """ - Args: - windows: (num_windows*B, window_size, window_size, C) - H (int): Height of image - W (int): Width of image - Returns: - x: (B, H, W, C) - """ - window_size = self.window_size - B = int(windows.shape[0] / (H * W / window_size / window_size)) - x = windows.view(B, H // window_size, W // window_size, window_size, - window_size, -1) - x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) - return x - - def window_partition(self, x): - """ - Args: - x: (B, H, W, C) - Returns: - windows: (num_windows*B, window_size, window_size, C) - """ - B, H, W, C = x.shape - window_size = self.window_size - x = x.view(B, H // window_size, window_size, W // window_size, - window_size, C) - windows = x.permute(0, 1, 3, 2, 4, 5).contiguous() - windows = windows.view(-1, window_size, window_size, C) - return windows - - -class SwinBlock(BaseModule): - """" - Args: - embed_dims (int): The feature dimension. - num_heads (int): Parallel attention heads. - feedforward_channels (int): The hidden dimension for FFNs. - window_size (int, optional): The local window scale. Default: 7. - shift (bool, optional): whether to shift window or not. Default False. - qkv_bias (bool, optional): enable bias for qkv if True. Default: True. - qk_scale (float | None, optional): Override default qk scale of - head_dim ** -0.5 if set. Default: None. - drop_rate (float, optional): Dropout rate. Default: 0. - attn_drop_rate (float, optional): Attention dropout rate. Default: 0. - drop_path_rate (float, optional): Stochastic depth rate. Default: 0. - act_cfg (dict, optional): The config dict of activation function. - Default: dict(type='GELU'). - norm_cfg (dict, optional): The config dict of normalization. - Default: dict(type='LN'). - with_cp (bool, optional): Use checkpoint or not. Using checkpoint - will save some memory while slowing down the training speed. - Default: False. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - embed_dims, - num_heads, - feedforward_channels, - window_size=7, - shift=False, - qkv_bias=True, - qk_scale=None, - drop_rate=0., - attn_drop_rate=0., - drop_path_rate=0., - act_cfg=dict(type='GELU'), - norm_cfg=dict(type='LN'), - with_cp=False, - init_cfg=None): - - super(SwinBlock, self).__init__(init_cfg=init_cfg) - - self.with_cp = with_cp - - self.norm1 = build_norm_layer(norm_cfg, embed_dims)[1] - self.attn = ShiftWindowMSA( - embed_dims=embed_dims, - num_heads=num_heads, - window_size=window_size, - shift_size=window_size // 2 if shift else 0, - qkv_bias=qkv_bias, - qk_scale=qk_scale, - attn_drop_rate=attn_drop_rate, - proj_drop_rate=drop_rate, - dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate)) - - self.norm2 = build_norm_layer(norm_cfg, embed_dims)[1] - self.ffn = FFN( - embed_dims=embed_dims, - feedforward_channels=feedforward_channels, - num_fcs=2, - ffn_drop=drop_rate, - dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), - act_cfg=act_cfg, - add_identity=True, - init_cfg=None) - - def forward(self, x, hw_shape): - - def _inner_forward(x): - identity = x - x = self.norm1(x) - x = self.attn(x, hw_shape) - - x = x + identity - - identity = x - x = self.norm2(x) - x = self.ffn(x, identity=identity) - - return x - - if self.with_cp and x.requires_grad: - x = cp.checkpoint(_inner_forward, x) - else: - x = _inner_forward(x) - - return x - - -class SwinBlockSequence(BaseModule): - """Implements one stage in Swin Transformer. - - Args: - embed_dims (int): The feature dimension. - num_heads (int): Parallel attention heads. - feedforward_channels (int): The hidden dimension for FFNs. - depth (int): The number of blocks in this stage. - window_size (int, optional): The local window scale. Default: 7. - qkv_bias (bool, optional): enable bias for qkv if True. Default: True. - qk_scale (float | None, optional): Override default qk scale of - head_dim ** -0.5 if set. Default: None. - drop_rate (float, optional): Dropout rate. Default: 0. - attn_drop_rate (float, optional): Attention dropout rate. Default: 0. - drop_path_rate (float | list[float], optional): Stochastic depth - rate. Default: 0. - downsample (nn.Module | None, optional): The downsample operation - module. Default: None. - act_cfg (dict, optional): The config dict of activation function. - Default: dict(type='GELU'). - norm_cfg (dict, optional): The config dict of normalization. - Default: dict(type='LN'). - with_cp (bool, optional): Use checkpoint or not. Using checkpoint - will save some memory while slowing down the training speed. - Default: False. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - embed_dims, - num_heads, - feedforward_channels, - depth, - window_size=7, - qkv_bias=True, - qk_scale=None, - drop_rate=0., - attn_drop_rate=0., - drop_path_rate=0., - downsample=None, - act_cfg=dict(type='GELU'), - norm_cfg=dict(type='LN'), - with_cp=False, - init_cfg=None): - super().__init__(init_cfg=init_cfg) - - if isinstance(drop_path_rate, list): - drop_path_rates = drop_path_rate - assert len(drop_path_rates) == depth - else: - drop_path_rates = [deepcopy(drop_path_rate) for _ in range(depth)] - - self.blocks = nn.ModuleList() - for i in range(depth): - block = SwinBlock( - embed_dims=embed_dims, - num_heads=num_heads, - feedforward_channels=feedforward_channels, - window_size=window_size, - shift=False if i % 2 == 0 else True, - qkv_bias=qkv_bias, - qk_scale=qk_scale, - drop_rate=drop_rate, - attn_drop_rate=attn_drop_rate, - drop_path_rate=drop_path_rates[i], - act_cfg=act_cfg, - norm_cfg=norm_cfg, - with_cp=with_cp) - self.blocks.append(block) - - self.downsample = downsample - - def forward(self, x, hw_shape): - for block in self.blocks: - x = block(x, hw_shape) - - if self.downsample: - x_down, down_hw_shape = self.downsample(x, hw_shape) - return x_down, down_hw_shape, x, hw_shape - else: - return x, hw_shape, x, hw_shape - - -@MODELS.register_module() -class SwinTransformer(BaseBackbone): - """ Swin Transformer - A PyTorch implement of : `Swin Transformer: - Hierarchical Vision Transformer using Shifted Windows` - - https://arxiv.org/abs/2103.14030 - - Inspiration from - https://github.com/microsoft/Swin-Transformer - - Args: - pretrain_img_size (int | tuple[int]): The size of input image when - pretrain. Defaults: 224. - in_channels (int): The num of input channels. - Defaults: 3. - embed_dims (int): The feature dimension. Default: 96. - patch_size (int | tuple[int]): Patch size. Default: 4. - window_size (int): Window size. Default: 7. - mlp_ratio (int): Ratio of mlp hidden dim to embedding dim. - Default: 4. - depths (tuple[int]): Depths of each Swin Transformer stage. - Default: (2, 2, 6, 2). - num_heads (tuple[int]): Parallel attention heads of each Swin - Transformer stage. Default: (3, 6, 12, 24). - strides (tuple[int]): The patch merging or patch embedding stride of - each Swin Transformer stage. (In swin, we set kernel size equal to - stride.) Default: (4, 2, 2, 2). - out_indices (tuple[int]): Output from which stages. - Default: (0, 1, 2, 3). - qkv_bias (bool, optional): If True, add a learnable bias to query, key, - value. Default: True - qk_scale (float | None, optional): Override default qk scale of - head_dim ** -0.5 if set. Default: None. - patch_norm (bool): If add a norm layer for patch embed and patch - merging. Default: True. - drop_rate (float): Dropout rate. Defaults: 0. - attn_drop_rate (float): Attention dropout rate. Default: 0. - drop_path_rate (float): Stochastic depth rate. Defaults: 0.1. - use_abs_pos_embed (bool): If True, add absolute position embedding to - the patch embedding. Defaults: False. - act_cfg (dict): Config dict for activation layer. - Default: dict(type='LN'). - norm_cfg (dict): Config dict for normalization layer at - output of backone. Defaults: dict(type='LN'). - with_cp (bool, optional): Use checkpoint or not. Using checkpoint - will save some memory while slowing down the training speed. - Default: False. - pretrained (str, optional): model pretrained path. Default: None. - convert_weights (bool): The flag indicates whether the - pre-trained model is from the original repo. We may need - to convert some keys to make it compatible. - Default: False. - frozen_stages (int): Stages to be frozen (stop grad and set eval mode). - Default: -1 (-1 means not freezing any parameters). - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: ``[ - dict(type='TruncNormal', std=.02, layer=['Linear']), - dict(type='Constant', val=1, layer=['LayerNorm']), - ]`` - """ - - def __init__(self, - pretrain_img_size=224, - in_channels=3, - embed_dims=96, - patch_size=4, - window_size=7, - mlp_ratio=4, - depths=(2, 2, 6, 2), - num_heads=(3, 6, 12, 24), - strides=(4, 2, 2, 2), - out_indices=(0, 1, 2, 3), - qkv_bias=True, - qk_scale=None, - patch_norm=True, - drop_rate=0., - attn_drop_rate=0., - drop_path_rate=0.1, - use_abs_pos_embed=False, - act_cfg=dict(type='GELU'), - norm_cfg=dict(type='LN'), - with_cp=False, - convert_weights=False, - frozen_stages=-1, - init_cfg=[ - dict(type='TruncNormal', std=.02, layer=['Linear']), - dict(type='Constant', val=1, layer=['LayerNorm']), - ]): - self.convert_weights = convert_weights - self.frozen_stages = frozen_stages - if isinstance(pretrain_img_size, int): - pretrain_img_size = to_2tuple(pretrain_img_size) - elif isinstance(pretrain_img_size, tuple): - if len(pretrain_img_size) == 1: - pretrain_img_size = to_2tuple(pretrain_img_size[0]) - assert len(pretrain_img_size) == 2, \ - f'The size of image should have length 1 or 2, ' \ - f'but got {len(pretrain_img_size)}' - - super(SwinTransformer, self).__init__(init_cfg=init_cfg) - - num_layers = len(depths) - self.out_indices = out_indices - self.use_abs_pos_embed = use_abs_pos_embed - - assert strides[0] == patch_size, 'Use non-overlapping patch embed.' - - self.patch_embed = PatchEmbed( - in_channels=in_channels, - embed_dims=embed_dims, - conv_type='Conv2d', - kernel_size=patch_size, - stride=strides[0], - norm_cfg=norm_cfg if patch_norm else None, - init_cfg=None) - - if self.use_abs_pos_embed: - patch_row = pretrain_img_size[0] // patch_size - patch_col = pretrain_img_size[1] // patch_size - num_patches = patch_row * patch_col - self.absolute_pos_embed = nn.Parameter( - torch.zeros((1, num_patches, embed_dims))) - - self.drop_after_pos = nn.Dropout(p=drop_rate) - - # set stochastic depth decay rule - total_depth = sum(depths) - dpr = [ - x.item() for x in torch.linspace(0, drop_path_rate, total_depth) - ] - - self.stages = nn.ModuleList() - in_channels = embed_dims - for i in range(num_layers): - if i < num_layers - 1: - downsample = PatchMerging( - in_channels=in_channels, - out_channels=2 * in_channels, - stride=strides[i + 1], - norm_cfg=norm_cfg if patch_norm else None, - init_cfg=None) - else: - downsample = None - - stage = SwinBlockSequence( - embed_dims=in_channels, - num_heads=num_heads[i], - feedforward_channels=mlp_ratio * in_channels, - depth=depths[i], - window_size=window_size, - qkv_bias=qkv_bias, - qk_scale=qk_scale, - drop_rate=drop_rate, - attn_drop_rate=attn_drop_rate, - drop_path_rate=dpr[sum(depths[:i]):sum(depths[:i + 1])], - downsample=downsample, - act_cfg=act_cfg, - norm_cfg=norm_cfg, - with_cp=with_cp) - self.stages.append(stage) - if downsample: - in_channels = downsample.out_channels - - self.num_features = [int(embed_dims * 2**i) for i in range(num_layers)] - # Add a norm layer for each output - for i in out_indices: - layer = build_norm_layer(norm_cfg, self.num_features[i])[1] - layer_name = f'norm{i}' - self.add_module(layer_name, layer) - - def train(self, mode=True): - """Convert the model into training mode while keep layers freezed.""" - super(SwinTransformer, self).train(mode) - self._freeze_stages() - - def _freeze_stages(self): - if self.frozen_stages >= 0: - self.patch_embed.eval() - for param in self.patch_embed.parameters(): - param.requires_grad = False - if self.use_abs_pos_embed: - self.absolute_pos_embed.requires_grad = False - self.drop_after_pos.eval() - - for i in range(1, self.frozen_stages + 1): - - if (i - 1) in self.out_indices: - norm_layer = getattr(self, f'norm{i-1}') - norm_layer.eval() - for param in norm_layer.parameters(): - param.requires_grad = False - - m = self.stages[i - 1] - m.eval() - for param in m.parameters(): - param.requires_grad = False - - def init_weights(self, pretrained=None): - """Initialize the weights in backbone. - - Args: - pretrained (str, optional): Path to pre-trained weights. - Defaults to None. - """ - if (isinstance(self.init_cfg, dict) - and self.init_cfg['type'] == 'Pretrained'): - # Suppress zero_init_residual if use pretrained model. - logger = get_root_logger() - state_dict = get_state_dict( - self.init_cfg['checkpoint'], map_location='cpu') - if self.convert_weights: - # supported loading weight from original repo - state_dict = swin_converter(state_dict) - - # strip prefix of state_dict - if list(state_dict.keys())[0].startswith('module.'): - state_dict = {k[7:]: v for k, v in state_dict.items()} - - # reshape absolute position embedding - if state_dict.get('absolute_pos_embed') is not None: - absolute_pos_embed = state_dict['absolute_pos_embed'] - N1, L, C1 = absolute_pos_embed.size() - N2, C2, H, W = self.absolute_pos_embed.size() - if N1 != N2 or C1 != C2 or L != H * W: - logger.warning('Error in loading absolute_pos_embed, pass') - else: - state_dict['absolute_pos_embed'] = absolute_pos_embed.view( - N2, H, W, C2).permute(0, 3, 1, 2).contiguous() - - # interpolate position bias table if needed - relative_position_bias_table_keys = [ - k for k in state_dict.keys() - if 'relative_position_bias_table' in k - ] - for table_key in relative_position_bias_table_keys: - table_pretrained = state_dict[table_key] - table_current = self.state_dict()[table_key] - L1, nH1 = table_pretrained.size() - L2, nH2 = table_current.size() - if nH1 != nH2: - logger.warning(f'Error in loading {table_key}, pass') - elif L1 != L2: - S1 = int(L1**0.5) - S2 = int(L2**0.5) - table_pretrained_resized = F.interpolate( - table_pretrained.permute(1, 0).reshape(1, nH1, S1, S1), - size=(S2, S2), - mode='bicubic') - state_dict[table_key] = table_pretrained_resized.view( - nH2, L2).permute(1, 0).contiguous() - - # load state_dict - load_state_dict(self, state_dict, strict=False, logger=logger) - - else: - super(SwinTransformer, self).init_weights() - if self.use_abs_pos_embed: - trunc_normal_(self.absolute_pos_embed, std=0.02) - - def forward(self, x): - x, hw_shape = self.patch_embed(x) - - if self.use_abs_pos_embed: - x = x + self.absolute_pos_embed - x = self.drop_after_pos(x) - - outs = [] - for i, stage in enumerate(self.stages): - x, hw_shape, out, out_hw_shape = stage(x, hw_shape) - if i in self.out_indices: - norm_layer = getattr(self, f'norm{i}') - out = norm_layer(out) - out = out.view(-1, *out_hw_shape, - self.num_features[i]).permute(0, 3, 1, - 2).contiguous() - outs.append(out) - - return tuple(outs) +# Copyright (c) OpenMMLab. All rights reserved. +from copy import deepcopy + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as cp +from mmcv.cnn import build_norm_layer +from mmcv.cnn.bricks.transformer import FFN, build_dropout +from mmengine.model import BaseModule +from mmengine.model.weight_init import trunc_normal_ +from mmengine.runner import load_state_dict +from mmengine.utils import to_2tuple + +from mmpose.registry import MODELS +from mmpose.utils import get_root_logger +from ..utils.transformer import PatchEmbed, PatchMerging +from .base_backbone import BaseBackbone +from .utils import get_state_dict +from .utils.ckpt_convert import swin_converter + + +class WindowMSA(BaseModule): + """Window based multi-head self-attention (W-MSA) module with relative + position bias. + + Args: + embed_dims (int): Number of input channels. + num_heads (int): Number of attention heads. + window_size (tuple[int]): The height and width of the window. + qkv_bias (bool, optional): If True, add a learnable bias to q, k, v. + Default: True. + qk_scale (float | None, optional): Override default qk scale of + head_dim ** -0.5 if set. Default: None. + attn_drop_rate (float, optional): Dropout ratio of attention weight. + Default: 0.0 + proj_drop_rate (float, optional): Dropout ratio of output. Default: 0. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + """ + + def __init__(self, + embed_dims, + num_heads, + window_size, + qkv_bias=True, + qk_scale=None, + attn_drop_rate=0., + proj_drop_rate=0., + init_cfg=None): + + super().__init__(init_cfg=init_cfg) + self.embed_dims = embed_dims + self.window_size = window_size # Wh, Ww + self.num_heads = num_heads + head_embed_dims = embed_dims // num_heads + self.scale = qk_scale or head_embed_dims**-0.5 + + # define a parameter table of relative position bias + self.relative_position_bias_table = nn.Parameter( + torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), + num_heads)) # 2*Wh-1 * 2*Ww-1, nH + + # About 2x faster than original impl + Wh, Ww = self.window_size + rel_index_coords = self.double_step_seq(2 * Ww - 1, Wh, 1, Ww) + rel_position_index = rel_index_coords + rel_index_coords.T + rel_position_index = rel_position_index.flip(1).contiguous() + self.register_buffer('relative_position_index', rel_position_index) + + self.qkv = nn.Linear(embed_dims, embed_dims * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop_rate) + self.proj = nn.Linear(embed_dims, embed_dims) + self.proj_drop = nn.Dropout(proj_drop_rate) + + self.softmax = nn.Softmax(dim=-1) + + def init_weights(self): + trunc_normal_(self.relative_position_bias_table, std=0.02) + + def forward(self, x, mask=None): + """ + Args: + + x (tensor): input features with shape of (num_windows*B, N, C) + mask (tensor | None, Optional): mask with shape of (num_windows, + Wh*Ww, Wh*Ww), value should be between (-inf, 0]. + """ + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, + C // self.num_heads).permute(2, 0, 3, 1, 4) + # make torchscript happy (cannot use tensor as tuple) + q, k, v = qkv[0], qkv[1], qkv[2] + + q = q * self.scale + attn = (q @ k.transpose(-2, -1)) + + relative_position_bias = self.relative_position_bias_table[ + self.relative_position_index.view(-1)].view( + self.window_size[0] * self.window_size[1], + self.window_size[0] * self.window_size[1], + -1) # Wh*Ww,Wh*Ww,nH + relative_position_bias = relative_position_bias.permute( + 2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + attn = attn + relative_position_bias.unsqueeze(0) + + if mask is not None: + nW = mask.shape[0] + attn = attn.view(B // nW, nW, self.num_heads, N, + N) + mask.unsqueeze(1).unsqueeze(0) + attn = attn.view(-1, self.num_heads, N, N) + attn = self.softmax(attn) + + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + @staticmethod + def double_step_seq(step1, len1, step2, len2): + seq1 = torch.arange(0, step1 * len1, step1) + seq2 = torch.arange(0, step2 * len2, step2) + return (seq1[:, None] + seq2[None, :]).reshape(1, -1) + + +class ShiftWindowMSA(BaseModule): + """Shifted Window Multihead Self-Attention Module. + + Args: + embed_dims (int): Number of input channels. + num_heads (int): Number of attention heads. + window_size (int): The height and width of the window. + shift_size (int, optional): The shift step of each window towards + right-bottom. If zero, act as regular window-msa. Defaults to 0. + qkv_bias (bool, optional): If True, add a learnable bias to q, k, v. + Default: True + qk_scale (float | None, optional): Override default qk scale of + head_dim ** -0.5 if set. Defaults: None. + attn_drop_rate (float, optional): Dropout ratio of attention weight. + Defaults: 0. + proj_drop_rate (float, optional): Dropout ratio of output. + Defaults: 0. + dropout_layer (dict, optional): The dropout_layer used before output. + Defaults: dict(type='DropPath', drop_prob=0.). + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + embed_dims, + num_heads, + window_size, + shift_size=0, + qkv_bias=True, + qk_scale=None, + attn_drop_rate=0, + proj_drop_rate=0, + dropout_layer=dict(type='DropPath', drop_prob=0.), + init_cfg=None): + super().__init__(init_cfg=init_cfg) + + self.window_size = window_size + self.shift_size = shift_size + assert 0 <= self.shift_size < self.window_size + + self.w_msa = WindowMSA( + embed_dims=embed_dims, + num_heads=num_heads, + window_size=to_2tuple(window_size), + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop_rate=attn_drop_rate, + proj_drop_rate=proj_drop_rate) + + self.drop = build_dropout(dropout_layer) + + def forward(self, query, hw_shape): + B, L, C = query.shape + H, W = hw_shape + assert L == H * W, 'input feature has wrong size' + query = query.view(B, H, W, C) + + # pad feature maps to multiples of window size + pad_r = (self.window_size - W % self.window_size) % self.window_size + pad_b = (self.window_size - H % self.window_size) % self.window_size + query = F.pad(query, (0, 0, 0, pad_r, 0, pad_b)) + H_pad, W_pad = query.shape[1], query.shape[2] + + # cyclic shift + if self.shift_size > 0: + shifted_query = torch.roll( + query, + shifts=(-self.shift_size, -self.shift_size), + dims=(1, 2)) + + # calculate attention mask for SW-MSA + img_mask = torch.zeros((1, H_pad, W_pad, 1), device=query.device) + h_slices = (slice(0, -self.window_size), + slice(-self.window_size, + -self.shift_size), slice(-self.shift_size, None)) + w_slices = (slice(0, -self.window_size), + slice(-self.window_size, + -self.shift_size), slice(-self.shift_size, None)) + cnt = 0 + for h in h_slices: + for w in w_slices: + img_mask[:, h, w, :] = cnt + cnt += 1 + + # nW, window_size, window_size, 1 + mask_windows = self.window_partition(img_mask) + mask_windows = mask_windows.view( + -1, self.window_size * self.window_size) + attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) + attn_mask = attn_mask.masked_fill(attn_mask != 0, + float(-100.0)).masked_fill( + attn_mask == 0, float(0.0)) + else: + shifted_query = query + attn_mask = None + + # nW*B, window_size, window_size, C + query_windows = self.window_partition(shifted_query) + # nW*B, window_size*window_size, C + query_windows = query_windows.view(-1, self.window_size**2, C) + + # W-MSA/SW-MSA (nW*B, window_size*window_size, C) + attn_windows = self.w_msa(query_windows, mask=attn_mask) + + # merge windows + attn_windows = attn_windows.view(-1, self.window_size, + self.window_size, C) + + # B H' W' C + shifted_x = self.window_reverse(attn_windows, H_pad, W_pad) + # reverse cyclic shift + if self.shift_size > 0: + x = torch.roll( + shifted_x, + shifts=(self.shift_size, self.shift_size), + dims=(1, 2)) + else: + x = shifted_x + + if pad_r > 0 or pad_b: + x = x[:, :H, :W, :].contiguous() + + x = x.view(B, H * W, C) + + x = self.drop(x) + return x + + def window_reverse(self, windows, H, W): + """ + Args: + windows: (num_windows*B, window_size, window_size, C) + H (int): Height of image + W (int): Width of image + Returns: + x: (B, H, W, C) + """ + window_size = self.window_size + B = int(windows.shape[0] / (H * W / window_size / window_size)) + x = windows.view(B, H // window_size, W // window_size, window_size, + window_size, -1) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) + return x + + def window_partition(self, x): + """ + Args: + x: (B, H, W, C) + Returns: + windows: (num_windows*B, window_size, window_size, C) + """ + B, H, W, C = x.shape + window_size = self.window_size + x = x.view(B, H // window_size, window_size, W // window_size, + window_size, C) + windows = x.permute(0, 1, 3, 2, 4, 5).contiguous() + windows = windows.view(-1, window_size, window_size, C) + return windows + + +class SwinBlock(BaseModule): + """" + Args: + embed_dims (int): The feature dimension. + num_heads (int): Parallel attention heads. + feedforward_channels (int): The hidden dimension for FFNs. + window_size (int, optional): The local window scale. Default: 7. + shift (bool, optional): whether to shift window or not. Default False. + qkv_bias (bool, optional): enable bias for qkv if True. Default: True. + qk_scale (float | None, optional): Override default qk scale of + head_dim ** -0.5 if set. Default: None. + drop_rate (float, optional): Dropout rate. Default: 0. + attn_drop_rate (float, optional): Attention dropout rate. Default: 0. + drop_path_rate (float, optional): Stochastic depth rate. Default: 0. + act_cfg (dict, optional): The config dict of activation function. + Default: dict(type='GELU'). + norm_cfg (dict, optional): The config dict of normalization. + Default: dict(type='LN'). + with_cp (bool, optional): Use checkpoint or not. Using checkpoint + will save some memory while slowing down the training speed. + Default: False. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + embed_dims, + num_heads, + feedforward_channels, + window_size=7, + shift=False, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + with_cp=False, + init_cfg=None): + + super(SwinBlock, self).__init__(init_cfg=init_cfg) + + self.with_cp = with_cp + + self.norm1 = build_norm_layer(norm_cfg, embed_dims)[1] + self.attn = ShiftWindowMSA( + embed_dims=embed_dims, + num_heads=num_heads, + window_size=window_size, + shift_size=window_size // 2 if shift else 0, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop_rate=attn_drop_rate, + proj_drop_rate=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate)) + + self.norm2 = build_norm_layer(norm_cfg, embed_dims)[1] + self.ffn = FFN( + embed_dims=embed_dims, + feedforward_channels=feedforward_channels, + num_fcs=2, + ffn_drop=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + act_cfg=act_cfg, + add_identity=True, + init_cfg=None) + + def forward(self, x, hw_shape): + + def _inner_forward(x): + identity = x + x = self.norm1(x) + x = self.attn(x, hw_shape) + + x = x + identity + + identity = x + x = self.norm2(x) + x = self.ffn(x, identity=identity) + + return x + + if self.with_cp and x.requires_grad: + x = cp.checkpoint(_inner_forward, x) + else: + x = _inner_forward(x) + + return x + + +class SwinBlockSequence(BaseModule): + """Implements one stage in Swin Transformer. + + Args: + embed_dims (int): The feature dimension. + num_heads (int): Parallel attention heads. + feedforward_channels (int): The hidden dimension for FFNs. + depth (int): The number of blocks in this stage. + window_size (int, optional): The local window scale. Default: 7. + qkv_bias (bool, optional): enable bias for qkv if True. Default: True. + qk_scale (float | None, optional): Override default qk scale of + head_dim ** -0.5 if set. Default: None. + drop_rate (float, optional): Dropout rate. Default: 0. + attn_drop_rate (float, optional): Attention dropout rate. Default: 0. + drop_path_rate (float | list[float], optional): Stochastic depth + rate. Default: 0. + downsample (nn.Module | None, optional): The downsample operation + module. Default: None. + act_cfg (dict, optional): The config dict of activation function. + Default: dict(type='GELU'). + norm_cfg (dict, optional): The config dict of normalization. + Default: dict(type='LN'). + with_cp (bool, optional): Use checkpoint or not. Using checkpoint + will save some memory while slowing down the training speed. + Default: False. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + embed_dims, + num_heads, + feedforward_channels, + depth, + window_size=7, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + downsample=None, + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + with_cp=False, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + + if isinstance(drop_path_rate, list): + drop_path_rates = drop_path_rate + assert len(drop_path_rates) == depth + else: + drop_path_rates = [deepcopy(drop_path_rate) for _ in range(depth)] + + self.blocks = nn.ModuleList() + for i in range(depth): + block = SwinBlock( + embed_dims=embed_dims, + num_heads=num_heads, + feedforward_channels=feedforward_channels, + window_size=window_size, + shift=False if i % 2 == 0 else True, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop_rate=drop_rate, + attn_drop_rate=attn_drop_rate, + drop_path_rate=drop_path_rates[i], + act_cfg=act_cfg, + norm_cfg=norm_cfg, + with_cp=with_cp) + self.blocks.append(block) + + self.downsample = downsample + + def forward(self, x, hw_shape): + for block in self.blocks: + x = block(x, hw_shape) + + if self.downsample: + x_down, down_hw_shape = self.downsample(x, hw_shape) + return x_down, down_hw_shape, x, hw_shape + else: + return x, hw_shape, x, hw_shape + + +@MODELS.register_module() +class SwinTransformer(BaseBackbone): + """ Swin Transformer + A PyTorch implement of : `Swin Transformer: + Hierarchical Vision Transformer using Shifted Windows` - + https://arxiv.org/abs/2103.14030 + + Inspiration from + https://github.com/microsoft/Swin-Transformer + + Args: + pretrain_img_size (int | tuple[int]): The size of input image when + pretrain. Defaults: 224. + in_channels (int): The num of input channels. + Defaults: 3. + embed_dims (int): The feature dimension. Default: 96. + patch_size (int | tuple[int]): Patch size. Default: 4. + window_size (int): Window size. Default: 7. + mlp_ratio (int): Ratio of mlp hidden dim to embedding dim. + Default: 4. + depths (tuple[int]): Depths of each Swin Transformer stage. + Default: (2, 2, 6, 2). + num_heads (tuple[int]): Parallel attention heads of each Swin + Transformer stage. Default: (3, 6, 12, 24). + strides (tuple[int]): The patch merging or patch embedding stride of + each Swin Transformer stage. (In swin, we set kernel size equal to + stride.) Default: (4, 2, 2, 2). + out_indices (tuple[int]): Output from which stages. + Default: (0, 1, 2, 3). + qkv_bias (bool, optional): If True, add a learnable bias to query, key, + value. Default: True + qk_scale (float | None, optional): Override default qk scale of + head_dim ** -0.5 if set. Default: None. + patch_norm (bool): If add a norm layer for patch embed and patch + merging. Default: True. + drop_rate (float): Dropout rate. Defaults: 0. + attn_drop_rate (float): Attention dropout rate. Default: 0. + drop_path_rate (float): Stochastic depth rate. Defaults: 0.1. + use_abs_pos_embed (bool): If True, add absolute position embedding to + the patch embedding. Defaults: False. + act_cfg (dict): Config dict for activation layer. + Default: dict(type='LN'). + norm_cfg (dict): Config dict for normalization layer at + output of backone. Defaults: dict(type='LN'). + with_cp (bool, optional): Use checkpoint or not. Using checkpoint + will save some memory while slowing down the training speed. + Default: False. + pretrained (str, optional): model pretrained path. Default: None. + convert_weights (bool): The flag indicates whether the + pre-trained model is from the original repo. We may need + to convert some keys to make it compatible. + Default: False. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + Default: -1 (-1 means not freezing any parameters). + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: ``[ + dict(type='TruncNormal', std=.02, layer=['Linear']), + dict(type='Constant', val=1, layer=['LayerNorm']), + ]`` + """ + + def __init__(self, + pretrain_img_size=224, + in_channels=3, + embed_dims=96, + patch_size=4, + window_size=7, + mlp_ratio=4, + depths=(2, 2, 6, 2), + num_heads=(3, 6, 12, 24), + strides=(4, 2, 2, 2), + out_indices=(0, 1, 2, 3), + qkv_bias=True, + qk_scale=None, + patch_norm=True, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.1, + use_abs_pos_embed=False, + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + with_cp=False, + convert_weights=False, + frozen_stages=-1, + init_cfg=[ + dict(type='TruncNormal', std=.02, layer=['Linear']), + dict(type='Constant', val=1, layer=['LayerNorm']), + ]): + self.convert_weights = convert_weights + self.frozen_stages = frozen_stages + if isinstance(pretrain_img_size, int): + pretrain_img_size = to_2tuple(pretrain_img_size) + elif isinstance(pretrain_img_size, tuple): + if len(pretrain_img_size) == 1: + pretrain_img_size = to_2tuple(pretrain_img_size[0]) + assert len(pretrain_img_size) == 2, \ + f'The size of image should have length 1 or 2, ' \ + f'but got {len(pretrain_img_size)}' + + super(SwinTransformer, self).__init__(init_cfg=init_cfg) + + num_layers = len(depths) + self.out_indices = out_indices + self.use_abs_pos_embed = use_abs_pos_embed + + assert strides[0] == patch_size, 'Use non-overlapping patch embed.' + + self.patch_embed = PatchEmbed( + in_channels=in_channels, + embed_dims=embed_dims, + conv_type='Conv2d', + kernel_size=patch_size, + stride=strides[0], + norm_cfg=norm_cfg if patch_norm else None, + init_cfg=None) + + if self.use_abs_pos_embed: + patch_row = pretrain_img_size[0] // patch_size + patch_col = pretrain_img_size[1] // patch_size + num_patches = patch_row * patch_col + self.absolute_pos_embed = nn.Parameter( + torch.zeros((1, num_patches, embed_dims))) + + self.drop_after_pos = nn.Dropout(p=drop_rate) + + # set stochastic depth decay rule + total_depth = sum(depths) + dpr = [ + x.item() for x in torch.linspace(0, drop_path_rate, total_depth) + ] + + self.stages = nn.ModuleList() + in_channels = embed_dims + for i in range(num_layers): + if i < num_layers - 1: + downsample = PatchMerging( + in_channels=in_channels, + out_channels=2 * in_channels, + stride=strides[i + 1], + norm_cfg=norm_cfg if patch_norm else None, + init_cfg=None) + else: + downsample = None + + stage = SwinBlockSequence( + embed_dims=in_channels, + num_heads=num_heads[i], + feedforward_channels=mlp_ratio * in_channels, + depth=depths[i], + window_size=window_size, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop_rate=drop_rate, + attn_drop_rate=attn_drop_rate, + drop_path_rate=dpr[sum(depths[:i]):sum(depths[:i + 1])], + downsample=downsample, + act_cfg=act_cfg, + norm_cfg=norm_cfg, + with_cp=with_cp) + self.stages.append(stage) + if downsample: + in_channels = downsample.out_channels + + self.num_features = [int(embed_dims * 2**i) for i in range(num_layers)] + # Add a norm layer for each output + for i in out_indices: + layer = build_norm_layer(norm_cfg, self.num_features[i])[1] + layer_name = f'norm{i}' + self.add_module(layer_name, layer) + + def train(self, mode=True): + """Convert the model into training mode while keep layers freezed.""" + super(SwinTransformer, self).train(mode) + self._freeze_stages() + + def _freeze_stages(self): + if self.frozen_stages >= 0: + self.patch_embed.eval() + for param in self.patch_embed.parameters(): + param.requires_grad = False + if self.use_abs_pos_embed: + self.absolute_pos_embed.requires_grad = False + self.drop_after_pos.eval() + + for i in range(1, self.frozen_stages + 1): + + if (i - 1) in self.out_indices: + norm_layer = getattr(self, f'norm{i-1}') + norm_layer.eval() + for param in norm_layer.parameters(): + param.requires_grad = False + + m = self.stages[i - 1] + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def init_weights(self, pretrained=None): + """Initialize the weights in backbone. + + Args: + pretrained (str, optional): Path to pre-trained weights. + Defaults to None. + """ + if (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + # Suppress zero_init_residual if use pretrained model. + logger = get_root_logger() + state_dict = get_state_dict( + self.init_cfg['checkpoint'], map_location='cpu') + if self.convert_weights: + # supported loading weight from original repo + state_dict = swin_converter(state_dict) + + # strip prefix of state_dict + if list(state_dict.keys())[0].startswith('module.'): + state_dict = {k[7:]: v for k, v in state_dict.items()} + + # reshape absolute position embedding + if state_dict.get('absolute_pos_embed') is not None: + absolute_pos_embed = state_dict['absolute_pos_embed'] + N1, L, C1 = absolute_pos_embed.size() + N2, C2, H, W = self.absolute_pos_embed.size() + if N1 != N2 or C1 != C2 or L != H * W: + logger.warning('Error in loading absolute_pos_embed, pass') + else: + state_dict['absolute_pos_embed'] = absolute_pos_embed.view( + N2, H, W, C2).permute(0, 3, 1, 2).contiguous() + + # interpolate position bias table if needed + relative_position_bias_table_keys = [ + k for k in state_dict.keys() + if 'relative_position_bias_table' in k + ] + for table_key in relative_position_bias_table_keys: + table_pretrained = state_dict[table_key] + table_current = self.state_dict()[table_key] + L1, nH1 = table_pretrained.size() + L2, nH2 = table_current.size() + if nH1 != nH2: + logger.warning(f'Error in loading {table_key}, pass') + elif L1 != L2: + S1 = int(L1**0.5) + S2 = int(L2**0.5) + table_pretrained_resized = F.interpolate( + table_pretrained.permute(1, 0).reshape(1, nH1, S1, S1), + size=(S2, S2), + mode='bicubic') + state_dict[table_key] = table_pretrained_resized.view( + nH2, L2).permute(1, 0).contiguous() + + # load state_dict + load_state_dict(self, state_dict, strict=False, logger=logger) + + else: + super(SwinTransformer, self).init_weights() + if self.use_abs_pos_embed: + trunc_normal_(self.absolute_pos_embed, std=0.02) + + def forward(self, x): + x, hw_shape = self.patch_embed(x) + + if self.use_abs_pos_embed: + x = x + self.absolute_pos_embed + x = self.drop_after_pos(x) + + outs = [] + for i, stage in enumerate(self.stages): + x, hw_shape, out, out_hw_shape = stage(x, hw_shape) + if i in self.out_indices: + norm_layer = getattr(self, f'norm{i}') + out = norm_layer(out) + out = out.view(-1, *out_hw_shape, + self.num_features[i]).permute(0, 3, 1, + 2).contiguous() + outs.append(out) + + return tuple(outs) diff --git a/mmpose/models/backbones/tcn.py b/mmpose/models/backbones/tcn.py index ef49a1ff07..476769c297 100644 --- a/mmpose/models/backbones/tcn.py +++ b/mmpose/models/backbones/tcn.py @@ -1,284 +1,284 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy - -import torch.nn as nn -from mmcv.cnn import ConvModule, build_conv_layer -from mmengine.model import BaseModule - -from mmpose.registry import MODELS -from ..utils.regularizations import WeightNormClipHook -from .base_backbone import BaseBackbone - - -class BasicTemporalBlock(BaseModule): - """Basic block for VideoPose3D. - - Args: - in_channels (int): Input channels of this block. - out_channels (int): Output channels of this block. - mid_channels (int): The output channels of conv1. Default: 1024. - kernel_size (int): Size of the convolving kernel. Default: 3. - dilation (int): Spacing between kernel elements. Default: 3. - dropout (float): Dropout rate. Default: 0.25. - causal (bool): Use causal convolutions instead of symmetric - convolutions (for real-time applications). Default: False. - residual (bool): Use residual connection. Default: True. - use_stride_conv (bool): Use optimized TCN that designed - specifically for single-frame batching, i.e. where batches have - input length = receptive field, and output length = 1. This - implementation replaces dilated convolutions with strided - convolutions to avoid generating unused intermediate results. - Default: False. - conv_cfg (dict): dictionary to construct and config conv layer. - Default: dict(type='Conv1d'). - norm_cfg (dict): dictionary to construct and config norm layer. - Default: dict(type='BN1d'). - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - in_channels, - out_channels, - mid_channels=1024, - kernel_size=3, - dilation=3, - dropout=0.25, - causal=False, - residual=True, - use_stride_conv=False, - conv_cfg=dict(type='Conv1d'), - norm_cfg=dict(type='BN1d'), - init_cfg=None): - # Protect mutable default arguments - conv_cfg = copy.deepcopy(conv_cfg) - norm_cfg = copy.deepcopy(norm_cfg) - super().__init__(init_cfg=init_cfg) - self.in_channels = in_channels - self.out_channels = out_channels - self.mid_channels = mid_channels - self.kernel_size = kernel_size - self.dilation = dilation - self.dropout = dropout - self.causal = causal - self.residual = residual - self.use_stride_conv = use_stride_conv - - self.pad = (kernel_size - 1) * dilation // 2 - if use_stride_conv: - self.stride = kernel_size - self.causal_shift = kernel_size // 2 if causal else 0 - self.dilation = 1 - else: - self.stride = 1 - self.causal_shift = kernel_size // 2 * dilation if causal else 0 - - self.conv1 = nn.Sequential( - ConvModule( - in_channels, - mid_channels, - kernel_size=kernel_size, - stride=self.stride, - dilation=self.dilation, - bias='auto', - conv_cfg=conv_cfg, - norm_cfg=norm_cfg)) - self.conv2 = nn.Sequential( - ConvModule( - mid_channels, - out_channels, - kernel_size=1, - bias='auto', - conv_cfg=conv_cfg, - norm_cfg=norm_cfg)) - - if residual and in_channels != out_channels: - self.short_cut = build_conv_layer(conv_cfg, in_channels, - out_channels, 1) - else: - self.short_cut = None - - self.dropout = nn.Dropout(dropout) if dropout > 0 else None - - def forward(self, x): - """Forward function.""" - if self.use_stride_conv: - assert self.causal_shift + self.kernel_size // 2 < x.shape[2] - else: - assert 0 <= self.pad + self.causal_shift < x.shape[2] - \ - self.pad + self.causal_shift <= x.shape[2] - - out = self.conv1(x) - if self.dropout is not None: - out = self.dropout(out) - - out = self.conv2(out) - if self.dropout is not None: - out = self.dropout(out) - - if self.residual: - if self.use_stride_conv: - res = x[:, :, self.causal_shift + - self.kernel_size // 2::self.kernel_size] - else: - res = x[:, :, - (self.pad + self.causal_shift):(x.shape[2] - self.pad + - self.causal_shift)] - - if self.short_cut is not None: - res = self.short_cut(res) - out = out + res - - return out - - -@MODELS.register_module() -class TCN(BaseBackbone): - """TCN backbone. - - Temporal Convolutional Networks. - More details can be found in the - `paper `__ . - - Args: - in_channels (int): Number of input channels, which equals to - num_keypoints * num_features. - stem_channels (int): Number of feature channels. Default: 1024. - num_blocks (int): NUmber of basic temporal convolutional blocks. - Default: 2. - kernel_sizes (Sequence[int]): Sizes of the convolving kernel of - each basic block. Default: ``(3, 3, 3)``. - dropout (float): Dropout rate. Default: 0.25. - causal (bool): Use causal convolutions instead of symmetric - convolutions (for real-time applications). - Default: False. - residual (bool): Use residual connection. Default: True. - use_stride_conv (bool): Use TCN backbone optimized for - single-frame batching, i.e. where batches have input length = - receptive field, and output length = 1. This implementation - replaces dilated convolutions with strided convolutions to avoid - generating unused intermediate results. The weights are - interchangeable with the reference implementation. Default: False - conv_cfg (dict): dictionary to construct and config conv layer. - Default: dict(type='Conv1d'). - norm_cfg (dict): dictionary to construct and config norm layer. - Default: dict(type='BN1d'). - max_norm (float|None): if not None, the weight of convolution layers - will be clipped to have a maximum norm of max_norm. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: - ``[ - dict( - type='Kaiming', - mode='fan_in', - nonlinearity='relu', - layer=['Conv2d']), - dict( - type='Constant', - val=1, - layer=['_BatchNorm', 'GroupNorm']) - ]`` - - Example: - >>> from mmpose.models import TCN - >>> import torch - >>> self = TCN(in_channels=34) - >>> self.eval() - >>> inputs = torch.rand(1, 34, 243) - >>> level_outputs = self.forward(inputs) - >>> for level_out in level_outputs: - ... print(tuple(level_out.shape)) - (1, 1024, 235) - (1, 1024, 217) - """ - - def __init__(self, - in_channels, - stem_channels=1024, - num_blocks=2, - kernel_sizes=(3, 3, 3), - dropout=0.25, - causal=False, - residual=True, - use_stride_conv=False, - conv_cfg=dict(type='Conv1d'), - norm_cfg=dict(type='BN1d'), - max_norm=None, - init_cfg=[ - dict( - type='Kaiming', - mode='fan_in', - nonlinearity='relu', - layer=['Conv2d']), - dict( - type='Constant', - val=1, - layer=['_BatchNorm', 'GroupNorm']) - ]): - # Protect mutable default arguments - conv_cfg = copy.deepcopy(conv_cfg) - norm_cfg = copy.deepcopy(norm_cfg) - super().__init__() - self.in_channels = in_channels - self.stem_channels = stem_channels - self.num_blocks = num_blocks - self.kernel_sizes = kernel_sizes - self.dropout = dropout - self.causal = causal - self.residual = residual - self.use_stride_conv = use_stride_conv - self.max_norm = max_norm - - assert num_blocks == len(kernel_sizes) - 1 - for ks in kernel_sizes: - assert ks % 2 == 1, 'Only odd filter widths are supported.' - - self.expand_conv = ConvModule( - in_channels, - stem_channels, - kernel_size=kernel_sizes[0], - stride=kernel_sizes[0] if use_stride_conv else 1, - bias='auto', - conv_cfg=conv_cfg, - norm_cfg=norm_cfg) - - dilation = kernel_sizes[0] - self.tcn_blocks = nn.ModuleList() - for i in range(1, num_blocks + 1): - self.tcn_blocks.append( - BasicTemporalBlock( - in_channels=stem_channels, - out_channels=stem_channels, - mid_channels=stem_channels, - kernel_size=kernel_sizes[i], - dilation=dilation, - dropout=dropout, - causal=causal, - residual=residual, - use_stride_conv=use_stride_conv, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg)) - dilation *= kernel_sizes[i] - - if self.max_norm is not None: - # Apply weight norm clip to conv layers - weight_clip = WeightNormClipHook(self.max_norm) - for module in self.modules(): - if isinstance(module, nn.modules.conv._ConvNd): - weight_clip.register(module) - - self.dropout = nn.Dropout(dropout) if dropout > 0 else None - - def forward(self, x): - """Forward function.""" - x = self.expand_conv(x) - - if self.dropout is not None: - x = self.dropout(x) - - outs = [] - for i in range(self.num_blocks): - x = self.tcn_blocks[i](x) - outs.append(x) - - return tuple(outs) +# Copyright (c) OpenMMLab. All rights reserved. +import copy + +import torch.nn as nn +from mmcv.cnn import ConvModule, build_conv_layer +from mmengine.model import BaseModule + +from mmpose.registry import MODELS +from ..utils.regularizations import WeightNormClipHook +from .base_backbone import BaseBackbone + + +class BasicTemporalBlock(BaseModule): + """Basic block for VideoPose3D. + + Args: + in_channels (int): Input channels of this block. + out_channels (int): Output channels of this block. + mid_channels (int): The output channels of conv1. Default: 1024. + kernel_size (int): Size of the convolving kernel. Default: 3. + dilation (int): Spacing between kernel elements. Default: 3. + dropout (float): Dropout rate. Default: 0.25. + causal (bool): Use causal convolutions instead of symmetric + convolutions (for real-time applications). Default: False. + residual (bool): Use residual connection. Default: True. + use_stride_conv (bool): Use optimized TCN that designed + specifically for single-frame batching, i.e. where batches have + input length = receptive field, and output length = 1. This + implementation replaces dilated convolutions with strided + convolutions to avoid generating unused intermediate results. + Default: False. + conv_cfg (dict): dictionary to construct and config conv layer. + Default: dict(type='Conv1d'). + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN1d'). + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + in_channels, + out_channels, + mid_channels=1024, + kernel_size=3, + dilation=3, + dropout=0.25, + causal=False, + residual=True, + use_stride_conv=False, + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d'), + init_cfg=None): + # Protect mutable default arguments + conv_cfg = copy.deepcopy(conv_cfg) + norm_cfg = copy.deepcopy(norm_cfg) + super().__init__(init_cfg=init_cfg) + self.in_channels = in_channels + self.out_channels = out_channels + self.mid_channels = mid_channels + self.kernel_size = kernel_size + self.dilation = dilation + self.dropout = dropout + self.causal = causal + self.residual = residual + self.use_stride_conv = use_stride_conv + + self.pad = (kernel_size - 1) * dilation // 2 + if use_stride_conv: + self.stride = kernel_size + self.causal_shift = kernel_size // 2 if causal else 0 + self.dilation = 1 + else: + self.stride = 1 + self.causal_shift = kernel_size // 2 * dilation if causal else 0 + + self.conv1 = nn.Sequential( + ConvModule( + in_channels, + mid_channels, + kernel_size=kernel_size, + stride=self.stride, + dilation=self.dilation, + bias='auto', + conv_cfg=conv_cfg, + norm_cfg=norm_cfg)) + self.conv2 = nn.Sequential( + ConvModule( + mid_channels, + out_channels, + kernel_size=1, + bias='auto', + conv_cfg=conv_cfg, + norm_cfg=norm_cfg)) + + if residual and in_channels != out_channels: + self.short_cut = build_conv_layer(conv_cfg, in_channels, + out_channels, 1) + else: + self.short_cut = None + + self.dropout = nn.Dropout(dropout) if dropout > 0 else None + + def forward(self, x): + """Forward function.""" + if self.use_stride_conv: + assert self.causal_shift + self.kernel_size // 2 < x.shape[2] + else: + assert 0 <= self.pad + self.causal_shift < x.shape[2] - \ + self.pad + self.causal_shift <= x.shape[2] + + out = self.conv1(x) + if self.dropout is not None: + out = self.dropout(out) + + out = self.conv2(out) + if self.dropout is not None: + out = self.dropout(out) + + if self.residual: + if self.use_stride_conv: + res = x[:, :, self.causal_shift + + self.kernel_size // 2::self.kernel_size] + else: + res = x[:, :, + (self.pad + self.causal_shift):(x.shape[2] - self.pad + + self.causal_shift)] + + if self.short_cut is not None: + res = self.short_cut(res) + out = out + res + + return out + + +@MODELS.register_module() +class TCN(BaseBackbone): + """TCN backbone. + + Temporal Convolutional Networks. + More details can be found in the + `paper `__ . + + Args: + in_channels (int): Number of input channels, which equals to + num_keypoints * num_features. + stem_channels (int): Number of feature channels. Default: 1024. + num_blocks (int): NUmber of basic temporal convolutional blocks. + Default: 2. + kernel_sizes (Sequence[int]): Sizes of the convolving kernel of + each basic block. Default: ``(3, 3, 3)``. + dropout (float): Dropout rate. Default: 0.25. + causal (bool): Use causal convolutions instead of symmetric + convolutions (for real-time applications). + Default: False. + residual (bool): Use residual connection. Default: True. + use_stride_conv (bool): Use TCN backbone optimized for + single-frame batching, i.e. where batches have input length = + receptive field, and output length = 1. This implementation + replaces dilated convolutions with strided convolutions to avoid + generating unused intermediate results. The weights are + interchangeable with the reference implementation. Default: False + conv_cfg (dict): dictionary to construct and config conv layer. + Default: dict(type='Conv1d'). + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN1d'). + max_norm (float|None): if not None, the weight of convolution layers + will be clipped to have a maximum norm of max_norm. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: + ``[ + dict( + type='Kaiming', + mode='fan_in', + nonlinearity='relu', + layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ]`` + + Example: + >>> from mmpose.models import TCN + >>> import torch + >>> self = TCN(in_channels=34) + >>> self.eval() + >>> inputs = torch.rand(1, 34, 243) + >>> level_outputs = self.forward(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 1024, 235) + (1, 1024, 217) + """ + + def __init__(self, + in_channels, + stem_channels=1024, + num_blocks=2, + kernel_sizes=(3, 3, 3), + dropout=0.25, + causal=False, + residual=True, + use_stride_conv=False, + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d'), + max_norm=None, + init_cfg=[ + dict( + type='Kaiming', + mode='fan_in', + nonlinearity='relu', + layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ]): + # Protect mutable default arguments + conv_cfg = copy.deepcopy(conv_cfg) + norm_cfg = copy.deepcopy(norm_cfg) + super().__init__() + self.in_channels = in_channels + self.stem_channels = stem_channels + self.num_blocks = num_blocks + self.kernel_sizes = kernel_sizes + self.dropout = dropout + self.causal = causal + self.residual = residual + self.use_stride_conv = use_stride_conv + self.max_norm = max_norm + + assert num_blocks == len(kernel_sizes) - 1 + for ks in kernel_sizes: + assert ks % 2 == 1, 'Only odd filter widths are supported.' + + self.expand_conv = ConvModule( + in_channels, + stem_channels, + kernel_size=kernel_sizes[0], + stride=kernel_sizes[0] if use_stride_conv else 1, + bias='auto', + conv_cfg=conv_cfg, + norm_cfg=norm_cfg) + + dilation = kernel_sizes[0] + self.tcn_blocks = nn.ModuleList() + for i in range(1, num_blocks + 1): + self.tcn_blocks.append( + BasicTemporalBlock( + in_channels=stem_channels, + out_channels=stem_channels, + mid_channels=stem_channels, + kernel_size=kernel_sizes[i], + dilation=dilation, + dropout=dropout, + causal=causal, + residual=residual, + use_stride_conv=use_stride_conv, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg)) + dilation *= kernel_sizes[i] + + if self.max_norm is not None: + # Apply weight norm clip to conv layers + weight_clip = WeightNormClipHook(self.max_norm) + for module in self.modules(): + if isinstance(module, nn.modules.conv._ConvNd): + weight_clip.register(module) + + self.dropout = nn.Dropout(dropout) if dropout > 0 else None + + def forward(self, x): + """Forward function.""" + x = self.expand_conv(x) + + if self.dropout is not None: + x = self.dropout(x) + + outs = [] + for i in range(self.num_blocks): + x = self.tcn_blocks[i](x) + outs.append(x) + + return tuple(outs) diff --git a/mmpose/models/backbones/utils/__init__.py b/mmpose/models/backbones/utils/__init__.py index 07e42f8912..a3febdd053 100644 --- a/mmpose/models/backbones/utils/__init__.py +++ b/mmpose/models/backbones/utils/__init__.py @@ -1,11 +1,11 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .channel_shuffle import channel_shuffle -from .inverted_residual import InvertedResidual -from .make_divisible import make_divisible -from .se_layer import SELayer -from .utils import get_state_dict, load_checkpoint - -__all__ = [ - 'channel_shuffle', 'make_divisible', 'InvertedResidual', 'SELayer', - 'load_checkpoint', 'get_state_dict' -] +# Copyright (c) OpenMMLab. All rights reserved. +from .channel_shuffle import channel_shuffle +from .inverted_residual import InvertedResidual +from .make_divisible import make_divisible +from .se_layer import SELayer +from .utils import get_state_dict, load_checkpoint + +__all__ = [ + 'channel_shuffle', 'make_divisible', 'InvertedResidual', 'SELayer', + 'load_checkpoint', 'get_state_dict' +] diff --git a/mmpose/models/backbones/utils/channel_shuffle.py b/mmpose/models/backbones/utils/channel_shuffle.py index aedd826bee..3805e5eb9e 100644 --- a/mmpose/models/backbones/utils/channel_shuffle.py +++ b/mmpose/models/backbones/utils/channel_shuffle.py @@ -1,29 +1,29 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch - - -def channel_shuffle(x, groups): - """Channel Shuffle operation. - - This function enables cross-group information flow for multiple groups - convolution layers. - - Args: - x (Tensor): The input tensor. - groups (int): The number of groups to divide the input tensor - in the channel dimension. - - Returns: - Tensor: The output tensor after channel shuffle operation. - """ - - batch_size, num_channels, height, width = x.size() - assert (num_channels % groups == 0), ('num_channels should be ' - 'divisible by groups') - channels_per_group = num_channels // groups - - x = x.view(batch_size, groups, channels_per_group, height, width) - x = torch.transpose(x, 1, 2).contiguous() - x = x.view(batch_size, groups * channels_per_group, height, width) - - return x +# Copyright (c) OpenMMLab. All rights reserved. +import torch + + +def channel_shuffle(x, groups): + """Channel Shuffle operation. + + This function enables cross-group information flow for multiple groups + convolution layers. + + Args: + x (Tensor): The input tensor. + groups (int): The number of groups to divide the input tensor + in the channel dimension. + + Returns: + Tensor: The output tensor after channel shuffle operation. + """ + + batch_size, num_channels, height, width = x.size() + assert (num_channels % groups == 0), ('num_channels should be ' + 'divisible by groups') + channels_per_group = num_channels // groups + + x = x.view(batch_size, groups, channels_per_group, height, width) + x = torch.transpose(x, 1, 2).contiguous() + x = x.view(batch_size, groups * channels_per_group, height, width) + + return x diff --git a/mmpose/models/backbones/utils/ckpt_convert.py b/mmpose/models/backbones/utils/ckpt_convert.py index 14a43892c6..903f2d0975 100644 --- a/mmpose/models/backbones/utils/ckpt_convert.py +++ b/mmpose/models/backbones/utils/ckpt_convert.py @@ -1,62 +1,62 @@ -# Copyright (c) OpenMMLab. All rights reserved. - -# This script consists of several convert functions which -# can modify the weights of model in original repo to be -# pre-trained weights. - -from collections import OrderedDict - - -def swin_converter(ckpt): - - new_ckpt = OrderedDict() - - def correct_unfold_reduction_order(x): - out_channel, in_channel = x.shape - x = x.reshape(out_channel, 4, in_channel // 4) - x = x[:, [0, 2, 1, 3], :].transpose(1, - 2).reshape(out_channel, in_channel) - return x - - def correct_unfold_norm_order(x): - in_channel = x.shape[0] - x = x.reshape(4, in_channel // 4) - x = x[[0, 2, 1, 3], :].transpose(0, 1).reshape(in_channel) - return x - - for k, v in ckpt.items(): - if k.startswith('head'): - continue - elif k.startswith('layers'): - new_v = v - if 'attn.' in k: - new_k = k.replace('attn.', 'attn.w_msa.') - elif 'mlp.' in k: - if 'mlp.fc1.' in k: - new_k = k.replace('mlp.fc1.', 'ffn.layers.0.0.') - elif 'mlp.fc2.' in k: - new_k = k.replace('mlp.fc2.', 'ffn.layers.1.') - else: - new_k = k.replace('mlp.', 'ffn.') - elif 'downsample' in k: - new_k = k - if 'reduction.' in k: - new_v = correct_unfold_reduction_order(v) - elif 'norm.' in k: - new_v = correct_unfold_norm_order(v) - else: - new_k = k - new_k = new_k.replace('layers', 'stages', 1) - elif k.startswith('patch_embed'): - new_v = v - if 'proj' in k: - new_k = k.replace('proj', 'projection') - else: - new_k = k - else: - new_v = v - new_k = k - - new_ckpt['backbone.' + new_k] = new_v - - return new_ckpt +# Copyright (c) OpenMMLab. All rights reserved. + +# This script consists of several convert functions which +# can modify the weights of model in original repo to be +# pre-trained weights. + +from collections import OrderedDict + + +def swin_converter(ckpt): + + new_ckpt = OrderedDict() + + def correct_unfold_reduction_order(x): + out_channel, in_channel = x.shape + x = x.reshape(out_channel, 4, in_channel // 4) + x = x[:, [0, 2, 1, 3], :].transpose(1, + 2).reshape(out_channel, in_channel) + return x + + def correct_unfold_norm_order(x): + in_channel = x.shape[0] + x = x.reshape(4, in_channel // 4) + x = x[[0, 2, 1, 3], :].transpose(0, 1).reshape(in_channel) + return x + + for k, v in ckpt.items(): + if k.startswith('head'): + continue + elif k.startswith('layers'): + new_v = v + if 'attn.' in k: + new_k = k.replace('attn.', 'attn.w_msa.') + elif 'mlp.' in k: + if 'mlp.fc1.' in k: + new_k = k.replace('mlp.fc1.', 'ffn.layers.0.0.') + elif 'mlp.fc2.' in k: + new_k = k.replace('mlp.fc2.', 'ffn.layers.1.') + else: + new_k = k.replace('mlp.', 'ffn.') + elif 'downsample' in k: + new_k = k + if 'reduction.' in k: + new_v = correct_unfold_reduction_order(v) + elif 'norm.' in k: + new_v = correct_unfold_norm_order(v) + else: + new_k = k + new_k = new_k.replace('layers', 'stages', 1) + elif k.startswith('patch_embed'): + new_v = v + if 'proj' in k: + new_k = k.replace('proj', 'projection') + else: + new_k = k + else: + new_v = v + new_k = k + + new_ckpt['backbone.' + new_k] = new_v + + return new_ckpt diff --git a/mmpose/models/backbones/utils/inverted_residual.py b/mmpose/models/backbones/utils/inverted_residual.py index dff762c570..528e1f8281 100644 --- a/mmpose/models/backbones/utils/inverted_residual.py +++ b/mmpose/models/backbones/utils/inverted_residual.py @@ -1,128 +1,128 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy - -import torch.nn as nn -import torch.utils.checkpoint as cp -from mmcv.cnn import ConvModule - -from .se_layer import SELayer - - -class InvertedResidual(nn.Module): - """Inverted Residual Block. - - Args: - in_channels (int): The input channels of this Module. - out_channels (int): The output channels of this Module. - mid_channels (int): The input channels of the depthwise convolution. - kernel_size (int): The kernel size of the depthwise convolution. - Default: 3. - groups (None or int): The group number of the depthwise convolution. - Default: None, which means group number = mid_channels. - stride (int): The stride of the depthwise convolution. Default: 1. - se_cfg (dict): Config dict for se layer. Default: None, which means no - se layer. - with_expand_conv (bool): Use expand conv or not. If set False, - mid_channels must be the same with in_channels. - Default: True. - conv_cfg (dict): Config dict for convolution layer. Default: None, - which means using conv2d. - norm_cfg (dict): Config dict for normalization layer. - Default: dict(type='BN'). - act_cfg (dict): Config dict for activation layer. - Default: dict(type='ReLU'). - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. Default: False. - - Returns: - Tensor: The output tensor. - """ - - def __init__(self, - in_channels, - out_channels, - mid_channels, - kernel_size=3, - groups=None, - stride=1, - se_cfg=None, - with_expand_conv=True, - conv_cfg=None, - norm_cfg=dict(type='BN'), - act_cfg=dict(type='ReLU'), - with_cp=False): - # Protect mutable default arguments - norm_cfg = copy.deepcopy(norm_cfg) - act_cfg = copy.deepcopy(act_cfg) - super().__init__() - self.with_res_shortcut = (stride == 1 and in_channels == out_channels) - assert stride in [1, 2] - self.with_cp = with_cp - self.with_se = se_cfg is not None - self.with_expand_conv = with_expand_conv - - if groups is None: - groups = mid_channels - - if self.with_se: - assert isinstance(se_cfg, dict) - if not self.with_expand_conv: - assert mid_channels == in_channels - - if self.with_expand_conv: - self.expand_conv = ConvModule( - in_channels=in_channels, - out_channels=mid_channels, - kernel_size=1, - stride=1, - padding=0, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg) - self.depthwise_conv = ConvModule( - in_channels=mid_channels, - out_channels=mid_channels, - kernel_size=kernel_size, - stride=stride, - padding=kernel_size // 2, - groups=groups, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg) - if self.with_se: - self.se = SELayer(**se_cfg) - self.linear_conv = ConvModule( - in_channels=mid_channels, - out_channels=out_channels, - kernel_size=1, - stride=1, - padding=0, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=None) - - def forward(self, x): - - def _inner_forward(x): - out = x - - if self.with_expand_conv: - out = self.expand_conv(out) - - out = self.depthwise_conv(out) - - if self.with_se: - out = self.se(out) - - out = self.linear_conv(out) - - if self.with_res_shortcut: - return x + out - return out - - if self.with_cp and x.requires_grad: - out = cp.checkpoint(_inner_forward, x) - else: - out = _inner_forward(x) - - return out +# Copyright (c) OpenMMLab. All rights reserved. +import copy + +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn import ConvModule + +from .se_layer import SELayer + + +class InvertedResidual(nn.Module): + """Inverted Residual Block. + + Args: + in_channels (int): The input channels of this Module. + out_channels (int): The output channels of this Module. + mid_channels (int): The input channels of the depthwise convolution. + kernel_size (int): The kernel size of the depthwise convolution. + Default: 3. + groups (None or int): The group number of the depthwise convolution. + Default: None, which means group number = mid_channels. + stride (int): The stride of the depthwise convolution. Default: 1. + se_cfg (dict): Config dict for se layer. Default: None, which means no + se layer. + with_expand_conv (bool): Use expand conv or not. If set False, + mid_channels must be the same with in_channels. + Default: True. + conv_cfg (dict): Config dict for convolution layer. Default: None, + which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU'). + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + + Returns: + Tensor: The output tensor. + """ + + def __init__(self, + in_channels, + out_channels, + mid_channels, + kernel_size=3, + groups=None, + stride=1, + se_cfg=None, + with_expand_conv=True, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + with_cp=False): + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + act_cfg = copy.deepcopy(act_cfg) + super().__init__() + self.with_res_shortcut = (stride == 1 and in_channels == out_channels) + assert stride in [1, 2] + self.with_cp = with_cp + self.with_se = se_cfg is not None + self.with_expand_conv = with_expand_conv + + if groups is None: + groups = mid_channels + + if self.with_se: + assert isinstance(se_cfg, dict) + if not self.with_expand_conv: + assert mid_channels == in_channels + + if self.with_expand_conv: + self.expand_conv = ConvModule( + in_channels=in_channels, + out_channels=mid_channels, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.depthwise_conv = ConvModule( + in_channels=mid_channels, + out_channels=mid_channels, + kernel_size=kernel_size, + stride=stride, + padding=kernel_size // 2, + groups=groups, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + if self.with_se: + self.se = SELayer(**se_cfg) + self.linear_conv = ConvModule( + in_channels=mid_channels, + out_channels=out_channels, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None) + + def forward(self, x): + + def _inner_forward(x): + out = x + + if self.with_expand_conv: + out = self.expand_conv(out) + + out = self.depthwise_conv(out) + + if self.with_se: + out = self.se(out) + + out = self.linear_conv(out) + + if self.with_res_shortcut: + return x + out + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + return out diff --git a/mmpose/models/backbones/utils/make_divisible.py b/mmpose/models/backbones/utils/make_divisible.py index b7666be659..5347ed112d 100644 --- a/mmpose/models/backbones/utils/make_divisible.py +++ b/mmpose/models/backbones/utils/make_divisible.py @@ -1,25 +1,25 @@ -# Copyright (c) OpenMMLab. All rights reserved. -def make_divisible(value, divisor, min_value=None, min_ratio=0.9): - """Make divisible function. - - This function rounds the channel number down to the nearest value that can - be divisible by the divisor. - - Args: - value (int): The original channel number. - divisor (int): The divisor to fully divide the channel number. - min_value (int, optional): The minimum value of the output channel. - Default: None, means that the minimum value equal to the divisor. - min_ratio (float, optional): The minimum ratio of the rounded channel - number to the original channel number. Default: 0.9. - Returns: - int: The modified output channel number - """ - - if min_value is None: - min_value = divisor - new_value = max(min_value, int(value + divisor / 2) // divisor * divisor) - # Make sure that round down does not go down by more than (1-min_ratio). - if new_value < min_ratio * value: - new_value += divisor - return new_value +# Copyright (c) OpenMMLab. All rights reserved. +def make_divisible(value, divisor, min_value=None, min_ratio=0.9): + """Make divisible function. + + This function rounds the channel number down to the nearest value that can + be divisible by the divisor. + + Args: + value (int): The original channel number. + divisor (int): The divisor to fully divide the channel number. + min_value (int, optional): The minimum value of the output channel. + Default: None, means that the minimum value equal to the divisor. + min_ratio (float, optional): The minimum ratio of the rounded channel + number to the original channel number. Default: 0.9. + Returns: + int: The modified output channel number + """ + + if min_value is None: + min_value = divisor + new_value = max(min_value, int(value + divisor / 2) // divisor * divisor) + # Make sure that round down does not go down by more than (1-min_ratio). + if new_value < min_ratio * value: + new_value += divisor + return new_value diff --git a/mmpose/models/backbones/utils/se_layer.py b/mmpose/models/backbones/utils/se_layer.py index ec6d7aeaa9..8bcde2bff6 100644 --- a/mmpose/models/backbones/utils/se_layer.py +++ b/mmpose/models/backbones/utils/se_layer.py @@ -1,54 +1,54 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import mmengine -import torch.nn as nn -from mmcv.cnn import ConvModule - - -class SELayer(nn.Module): - """Squeeze-and-Excitation Module. - - Args: - channels (int): The input (and output) channels of the SE layer. - ratio (int): Squeeze ratio in SELayer, the intermediate channel will be - ``int(channels/ratio)``. Default: 16. - conv_cfg (None or dict): Config dict for convolution layer. - Default: None, which means using conv2d. - act_cfg (dict or Sequence[dict]): Config dict for activation layer. - If act_cfg is a dict, two activation layers will be configurated - by this dict. If act_cfg is a sequence of dicts, the first - activation layer will be configurated by the first dict and the - second activation layer will be configurated by the second dict. - Default: (dict(type='ReLU'), dict(type='Sigmoid')) - """ - - def __init__(self, - channels, - ratio=16, - conv_cfg=None, - act_cfg=(dict(type='ReLU'), dict(type='Sigmoid'))): - super().__init__() - if isinstance(act_cfg, dict): - act_cfg = (act_cfg, act_cfg) - assert len(act_cfg) == 2 - assert mmengine.is_tuple_of(act_cfg, dict) - self.global_avgpool = nn.AdaptiveAvgPool2d(1) - self.conv1 = ConvModule( - in_channels=channels, - out_channels=int(channels / ratio), - kernel_size=1, - stride=1, - conv_cfg=conv_cfg, - act_cfg=act_cfg[0]) - self.conv2 = ConvModule( - in_channels=int(channels / ratio), - out_channels=channels, - kernel_size=1, - stride=1, - conv_cfg=conv_cfg, - act_cfg=act_cfg[1]) - - def forward(self, x): - out = self.global_avgpool(x) - out = self.conv1(out) - out = self.conv2(out) - return x * out +# Copyright (c) OpenMMLab. All rights reserved. +import mmengine +import torch.nn as nn +from mmcv.cnn import ConvModule + + +class SELayer(nn.Module): + """Squeeze-and-Excitation Module. + + Args: + channels (int): The input (and output) channels of the SE layer. + ratio (int): Squeeze ratio in SELayer, the intermediate channel will be + ``int(channels/ratio)``. Default: 16. + conv_cfg (None or dict): Config dict for convolution layer. + Default: None, which means using conv2d. + act_cfg (dict or Sequence[dict]): Config dict for activation layer. + If act_cfg is a dict, two activation layers will be configurated + by this dict. If act_cfg is a sequence of dicts, the first + activation layer will be configurated by the first dict and the + second activation layer will be configurated by the second dict. + Default: (dict(type='ReLU'), dict(type='Sigmoid')) + """ + + def __init__(self, + channels, + ratio=16, + conv_cfg=None, + act_cfg=(dict(type='ReLU'), dict(type='Sigmoid'))): + super().__init__() + if isinstance(act_cfg, dict): + act_cfg = (act_cfg, act_cfg) + assert len(act_cfg) == 2 + assert mmengine.is_tuple_of(act_cfg, dict) + self.global_avgpool = nn.AdaptiveAvgPool2d(1) + self.conv1 = ConvModule( + in_channels=channels, + out_channels=int(channels / ratio), + kernel_size=1, + stride=1, + conv_cfg=conv_cfg, + act_cfg=act_cfg[0]) + self.conv2 = ConvModule( + in_channels=int(channels / ratio), + out_channels=channels, + kernel_size=1, + stride=1, + conv_cfg=conv_cfg, + act_cfg=act_cfg[1]) + + def forward(self, x): + out = self.global_avgpool(x) + out = self.conv1(out) + out = self.conv2(out) + return x * out diff --git a/mmpose/models/backbones/utils/utils.py b/mmpose/models/backbones/utils/utils.py index ebc4fe40cd..bd8c7d89fd 100644 --- a/mmpose/models/backbones/utils/utils.py +++ b/mmpose/models/backbones/utils/utils.py @@ -1,89 +1,89 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from collections import OrderedDict - -from mmengine.runner import CheckpointLoader, load_state_dict - - -def load_checkpoint(model, - filename, - map_location='cpu', - strict=False, - logger=None): - """Load checkpoint from a file or URI. - - Args: - model (Module): Module to load checkpoint. - filename (str): Accept local filepath, URL, ``torchvision://xxx``, - ``open-mmlab://xxx``. - map_location (str): Same as :func:`torch.load`. - strict (bool): Whether to allow different params for the model and - checkpoint. - logger (:mod:`logging.Logger` or None): The logger for error message. - - Returns: - dict or OrderedDict: The loaded checkpoint. - """ - checkpoint = CheckpointLoader.load_checkpoint(filename, map_location) - # OrderedDict is a subclass of dict - if not isinstance(checkpoint, dict): - raise RuntimeError( - f'No state_dict found in checkpoint file {filename}') - # get state_dict from checkpoint - if 'state_dict' in checkpoint: - state_dict_tmp = checkpoint['state_dict'] - elif 'model' in checkpoint: - state_dict_tmp = checkpoint['model'] - else: - state_dict_tmp = checkpoint - - state_dict = OrderedDict() - # strip prefix of state_dict - for k, v in state_dict_tmp.items(): - if k.startswith('module.backbone.'): - state_dict[k[16:]] = v - elif k.startswith('module.'): - state_dict[k[7:]] = v - elif k.startswith('backbone.'): - state_dict[k[9:]] = v - else: - state_dict[k] = v - # load state_dict - load_state_dict(model, state_dict, strict, logger) - return checkpoint - - -def get_state_dict(filename, map_location='cpu'): - """Get state_dict from a file or URI. - - Args: - filename (str): Accept local filepath, URL, ``torchvision://xxx``, - ``open-mmlab://xxx``. - map_location (str): Same as :func:`torch.load`. - - Returns: - OrderedDict: The state_dict. - """ - checkpoint = CheckpointLoader.load_checkpoint(filename, map_location) - # OrderedDict is a subclass of dict - if not isinstance(checkpoint, dict): - raise RuntimeError( - f'No state_dict found in checkpoint file {filename}') - # get state_dict from checkpoint - if 'state_dict' in checkpoint: - state_dict_tmp = checkpoint['state_dict'] - else: - state_dict_tmp = checkpoint - - state_dict = OrderedDict() - # strip prefix of state_dict - for k, v in state_dict_tmp.items(): - if k.startswith('module.backbone.'): - state_dict[k[16:]] = v - elif k.startswith('module.'): - state_dict[k[7:]] = v - elif k.startswith('backbone.'): - state_dict[k[9:]] = v - else: - state_dict[k] = v - - return state_dict +# Copyright (c) OpenMMLab. All rights reserved. +from collections import OrderedDict + +from mmengine.runner import CheckpointLoader, load_state_dict + + +def load_checkpoint(model, + filename, + map_location='cpu', + strict=False, + logger=None): + """Load checkpoint from a file or URI. + + Args: + model (Module): Module to load checkpoint. + filename (str): Accept local filepath, URL, ``torchvision://xxx``, + ``open-mmlab://xxx``. + map_location (str): Same as :func:`torch.load`. + strict (bool): Whether to allow different params for the model and + checkpoint. + logger (:mod:`logging.Logger` or None): The logger for error message. + + Returns: + dict or OrderedDict: The loaded checkpoint. + """ + checkpoint = CheckpointLoader.load_checkpoint(filename, map_location) + # OrderedDict is a subclass of dict + if not isinstance(checkpoint, dict): + raise RuntimeError( + f'No state_dict found in checkpoint file {filename}') + # get state_dict from checkpoint + if 'state_dict' in checkpoint: + state_dict_tmp = checkpoint['state_dict'] + elif 'model' in checkpoint: + state_dict_tmp = checkpoint['model'] + else: + state_dict_tmp = checkpoint + + state_dict = OrderedDict() + # strip prefix of state_dict + for k, v in state_dict_tmp.items(): + if k.startswith('module.backbone.'): + state_dict[k[16:]] = v + elif k.startswith('module.'): + state_dict[k[7:]] = v + elif k.startswith('backbone.'): + state_dict[k[9:]] = v + else: + state_dict[k] = v + # load state_dict + load_state_dict(model, state_dict, strict, logger) + return checkpoint + + +def get_state_dict(filename, map_location='cpu'): + """Get state_dict from a file or URI. + + Args: + filename (str): Accept local filepath, URL, ``torchvision://xxx``, + ``open-mmlab://xxx``. + map_location (str): Same as :func:`torch.load`. + + Returns: + OrderedDict: The state_dict. + """ + checkpoint = CheckpointLoader.load_checkpoint(filename, map_location) + # OrderedDict is a subclass of dict + if not isinstance(checkpoint, dict): + raise RuntimeError( + f'No state_dict found in checkpoint file {filename}') + # get state_dict from checkpoint + if 'state_dict' in checkpoint: + state_dict_tmp = checkpoint['state_dict'] + else: + state_dict_tmp = checkpoint + + state_dict = OrderedDict() + # strip prefix of state_dict + for k, v in state_dict_tmp.items(): + if k.startswith('module.backbone.'): + state_dict[k[16:]] = v + elif k.startswith('module.'): + state_dict[k[7:]] = v + elif k.startswith('backbone.'): + state_dict[k[9:]] = v + else: + state_dict[k] = v + + return state_dict diff --git a/mmpose/models/backbones/v2v_net.py b/mmpose/models/backbones/v2v_net.py index 2cd1ab93b1..85b567b2a7 100644 --- a/mmpose/models/backbones/v2v_net.py +++ b/mmpose/models/backbones/v2v_net.py @@ -1,275 +1,275 @@ -# ------------------------------------------------------------------------------ -# Copyright and License Information -# Adapted from -# https://github.com/microsoft/voxelpose-pytorch/blob/main/lib/models/v2v_net.py -# Original Licence: MIT License -# ------------------------------------------------------------------------------ - -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import ConvModule -from mmengine.model import BaseModule - -from mmpose.registry import MODELS -from .base_backbone import BaseBackbone - - -class Basic3DBlock(BaseModule): - """A basic 3D convolutional block. - - Args: - in_channels (int): Input channels of this block. - out_channels (int): Output channels of this block. - kernel_size (int): Kernel size of the convolution operation - conv_cfg (dict): Dictionary to construct and config conv layer. - Default: dict(type='Conv3d') - norm_cfg (dict): Dictionary to construct and config norm layer. - Default: dict(type='BN3d') - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - in_channels, - out_channels, - kernel_size, - conv_cfg=dict(type='Conv3d'), - norm_cfg=dict(type='BN3d'), - init_cfg=None): - super(Basic3DBlock, self).__init__(init_cfg=init_cfg) - self.block = ConvModule( - in_channels, - out_channels, - kernel_size, - stride=1, - padding=((kernel_size - 1) // 2), - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - bias=True) - - def forward(self, x): - """Forward function.""" - return self.block(x) - - -class Res3DBlock(BaseModule): - """A residual 3D convolutional block. - - Args: - in_channels (int): Input channels of this block. - out_channels (int): Output channels of this block. - kernel_size (int): Kernel size of the convolution operation - Default: 3 - conv_cfg (dict): Dictionary to construct and config conv layer. - Default: dict(type='Conv3d') - norm_cfg (dict): Dictionary to construct and config norm layer. - Default: dict(type='BN3d') - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - in_channels, - out_channels, - kernel_size=3, - conv_cfg=dict(type='Conv3d'), - norm_cfg=dict(type='BN3d'), - init_cfg=None): - super(Res3DBlock, self).__init__(init_cfg=init_cfg) - self.res_branch = nn.Sequential( - ConvModule( - in_channels, - out_channels, - kernel_size, - stride=1, - padding=((kernel_size - 1) // 2), - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - bias=True), - ConvModule( - out_channels, - out_channels, - kernel_size, - stride=1, - padding=((kernel_size - 1) // 2), - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=None, - bias=True)) - - if in_channels == out_channels: - self.skip_con = nn.Sequential() - else: - self.skip_con = ConvModule( - in_channels, - out_channels, - 1, - stride=1, - padding=0, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=None, - bias=True) - - def forward(self, x): - """Forward function.""" - res = self.res_branch(x) - skip = self.skip_con(x) - return F.relu(res + skip, True) - - -class Pool3DBlock(BaseModule): - """A 3D max-pool block. - - Args: - pool_size (int): Pool size of the 3D max-pool layer - """ - - def __init__(self, pool_size): - super(Pool3DBlock, self).__init__() - self.pool_size = pool_size - - def forward(self, x): - """Forward function.""" - return F.max_pool3d( - x, kernel_size=self.pool_size, stride=self.pool_size) - - -class Upsample3DBlock(BaseModule): - """A 3D upsample block. - - Args: - in_channels (int): Input channels of this block. - out_channels (int): Output channels of this block. - kernel_size (int): Kernel size of the transposed convolution operation. - Default: 2 - stride (int): Kernel size of the transposed convolution operation. - Default: 2 - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - in_channels, - out_channels, - kernel_size=2, - stride=2, - init_cfg=None): - super(Upsample3DBlock, self).__init__(init_cfg=init_cfg) - assert kernel_size == 2 - assert stride == 2 - self.block = nn.Sequential( - nn.ConvTranspose3d( - in_channels, - out_channels, - kernel_size=kernel_size, - stride=stride, - padding=0, - output_padding=0), nn.BatchNorm3d(out_channels), nn.ReLU(True)) - - def forward(self, x): - """Forward function.""" - return self.block(x) - - -class EncoderDecorder(BaseModule): - """An encoder-decoder block. - - Args: - in_channels (int): Input channels of this block - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, in_channels=32, init_cfg=None): - super(EncoderDecorder, self).__init__(init_cfg=init_cfg) - - self.encoder_pool1 = Pool3DBlock(2) - self.encoder_res1 = Res3DBlock(in_channels, in_channels * 2) - self.encoder_pool2 = Pool3DBlock(2) - self.encoder_res2 = Res3DBlock(in_channels * 2, in_channels * 4) - - self.mid_res = Res3DBlock(in_channels * 4, in_channels * 4) - - self.decoder_res2 = Res3DBlock(in_channels * 4, in_channels * 4) - self.decoder_upsample2 = Upsample3DBlock(in_channels * 4, - in_channels * 2, 2, 2) - self.decoder_res1 = Res3DBlock(in_channels * 2, in_channels * 2) - self.decoder_upsample1 = Upsample3DBlock(in_channels * 2, in_channels, - 2, 2) - - self.skip_res1 = Res3DBlock(in_channels, in_channels) - self.skip_res2 = Res3DBlock(in_channels * 2, in_channels * 2) - - def forward(self, x): - """Forward function.""" - skip_x1 = self.skip_res1(x) - x = self.encoder_pool1(x) - x = self.encoder_res1(x) - - skip_x2 = self.skip_res2(x) - x = self.encoder_pool2(x) - x = self.encoder_res2(x) - - x = self.mid_res(x) - - x = self.decoder_res2(x) - x = self.decoder_upsample2(x) - x = x + skip_x2 - - x = self.decoder_res1(x) - x = self.decoder_upsample1(x) - x = x + skip_x1 - - return x - - -@MODELS.register_module() -class V2VNet(BaseBackbone): - """V2VNet. - - Please refer to the `paper ` - for details. - - Args: - input_channels (int): - Number of channels of the input feature volume. - output_channels (int): - Number of channels of the output volume. - mid_channels (int): - Input and output channels of the encoder-decoder block. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: ``dict( - type='Normal', - std=0.001, - layer=['Conv3d', 'ConvTranspose3d'] - )`` - """ - - def __init__(self, - input_channels, - output_channels, - mid_channels=32, - init_cfg=dict( - type='Normal', - std=0.001, - layer=['Conv3d', 'ConvTranspose3d'])): - super(V2VNet, self).__init__(init_cfg=init_cfg) - - self.front_layers = nn.Sequential( - Basic3DBlock(input_channels, mid_channels // 2, 7), - Res3DBlock(mid_channels // 2, mid_channels), - ) - - self.encoder_decoder = EncoderDecorder(in_channels=mid_channels) - - self.output_layer = nn.Conv3d( - mid_channels, output_channels, kernel_size=1, stride=1, padding=0) - - def forward(self, x): - """Forward function.""" - x = self.front_layers(x) - x = self.encoder_decoder(x) - x = self.output_layer(x) - - return (x, ) +# ------------------------------------------------------------------------------ +# Copyright and License Information +# Adapted from +# https://github.com/microsoft/voxelpose-pytorch/blob/main/lib/models/v2v_net.py +# Original Licence: MIT License +# ------------------------------------------------------------------------------ + +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import ConvModule +from mmengine.model import BaseModule + +from mmpose.registry import MODELS +from .base_backbone import BaseBackbone + + +class Basic3DBlock(BaseModule): + """A basic 3D convolutional block. + + Args: + in_channels (int): Input channels of this block. + out_channels (int): Output channels of this block. + kernel_size (int): Kernel size of the convolution operation + conv_cfg (dict): Dictionary to construct and config conv layer. + Default: dict(type='Conv3d') + norm_cfg (dict): Dictionary to construct and config norm layer. + Default: dict(type='BN3d') + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + in_channels, + out_channels, + kernel_size, + conv_cfg=dict(type='Conv3d'), + norm_cfg=dict(type='BN3d'), + init_cfg=None): + super(Basic3DBlock, self).__init__(init_cfg=init_cfg) + self.block = ConvModule( + in_channels, + out_channels, + kernel_size, + stride=1, + padding=((kernel_size - 1) // 2), + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + bias=True) + + def forward(self, x): + """Forward function.""" + return self.block(x) + + +class Res3DBlock(BaseModule): + """A residual 3D convolutional block. + + Args: + in_channels (int): Input channels of this block. + out_channels (int): Output channels of this block. + kernel_size (int): Kernel size of the convolution operation + Default: 3 + conv_cfg (dict): Dictionary to construct and config conv layer. + Default: dict(type='Conv3d') + norm_cfg (dict): Dictionary to construct and config norm layer. + Default: dict(type='BN3d') + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + in_channels, + out_channels, + kernel_size=3, + conv_cfg=dict(type='Conv3d'), + norm_cfg=dict(type='BN3d'), + init_cfg=None): + super(Res3DBlock, self).__init__(init_cfg=init_cfg) + self.res_branch = nn.Sequential( + ConvModule( + in_channels, + out_channels, + kernel_size, + stride=1, + padding=((kernel_size - 1) // 2), + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + bias=True), + ConvModule( + out_channels, + out_channels, + kernel_size, + stride=1, + padding=((kernel_size - 1) // 2), + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None, + bias=True)) + + if in_channels == out_channels: + self.skip_con = nn.Sequential() + else: + self.skip_con = ConvModule( + in_channels, + out_channels, + 1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None, + bias=True) + + def forward(self, x): + """Forward function.""" + res = self.res_branch(x) + skip = self.skip_con(x) + return F.relu(res + skip, True) + + +class Pool3DBlock(BaseModule): + """A 3D max-pool block. + + Args: + pool_size (int): Pool size of the 3D max-pool layer + """ + + def __init__(self, pool_size): + super(Pool3DBlock, self).__init__() + self.pool_size = pool_size + + def forward(self, x): + """Forward function.""" + return F.max_pool3d( + x, kernel_size=self.pool_size, stride=self.pool_size) + + +class Upsample3DBlock(BaseModule): + """A 3D upsample block. + + Args: + in_channels (int): Input channels of this block. + out_channels (int): Output channels of this block. + kernel_size (int): Kernel size of the transposed convolution operation. + Default: 2 + stride (int): Kernel size of the transposed convolution operation. + Default: 2 + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + in_channels, + out_channels, + kernel_size=2, + stride=2, + init_cfg=None): + super(Upsample3DBlock, self).__init__(init_cfg=init_cfg) + assert kernel_size == 2 + assert stride == 2 + self.block = nn.Sequential( + nn.ConvTranspose3d( + in_channels, + out_channels, + kernel_size=kernel_size, + stride=stride, + padding=0, + output_padding=0), nn.BatchNorm3d(out_channels), nn.ReLU(True)) + + def forward(self, x): + """Forward function.""" + return self.block(x) + + +class EncoderDecorder(BaseModule): + """An encoder-decoder block. + + Args: + in_channels (int): Input channels of this block + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, in_channels=32, init_cfg=None): + super(EncoderDecorder, self).__init__(init_cfg=init_cfg) + + self.encoder_pool1 = Pool3DBlock(2) + self.encoder_res1 = Res3DBlock(in_channels, in_channels * 2) + self.encoder_pool2 = Pool3DBlock(2) + self.encoder_res2 = Res3DBlock(in_channels * 2, in_channels * 4) + + self.mid_res = Res3DBlock(in_channels * 4, in_channels * 4) + + self.decoder_res2 = Res3DBlock(in_channels * 4, in_channels * 4) + self.decoder_upsample2 = Upsample3DBlock(in_channels * 4, + in_channels * 2, 2, 2) + self.decoder_res1 = Res3DBlock(in_channels * 2, in_channels * 2) + self.decoder_upsample1 = Upsample3DBlock(in_channels * 2, in_channels, + 2, 2) + + self.skip_res1 = Res3DBlock(in_channels, in_channels) + self.skip_res2 = Res3DBlock(in_channels * 2, in_channels * 2) + + def forward(self, x): + """Forward function.""" + skip_x1 = self.skip_res1(x) + x = self.encoder_pool1(x) + x = self.encoder_res1(x) + + skip_x2 = self.skip_res2(x) + x = self.encoder_pool2(x) + x = self.encoder_res2(x) + + x = self.mid_res(x) + + x = self.decoder_res2(x) + x = self.decoder_upsample2(x) + x = x + skip_x2 + + x = self.decoder_res1(x) + x = self.decoder_upsample1(x) + x = x + skip_x1 + + return x + + +@MODELS.register_module() +class V2VNet(BaseBackbone): + """V2VNet. + + Please refer to the `paper ` + for details. + + Args: + input_channels (int): + Number of channels of the input feature volume. + output_channels (int): + Number of channels of the output volume. + mid_channels (int): + Input and output channels of the encoder-decoder block. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: ``dict( + type='Normal', + std=0.001, + layer=['Conv3d', 'ConvTranspose3d'] + )`` + """ + + def __init__(self, + input_channels, + output_channels, + mid_channels=32, + init_cfg=dict( + type='Normal', + std=0.001, + layer=['Conv3d', 'ConvTranspose3d'])): + super(V2VNet, self).__init__(init_cfg=init_cfg) + + self.front_layers = nn.Sequential( + Basic3DBlock(input_channels, mid_channels // 2, 7), + Res3DBlock(mid_channels // 2, mid_channels), + ) + + self.encoder_decoder = EncoderDecorder(in_channels=mid_channels) + + self.output_layer = nn.Conv3d( + mid_channels, output_channels, kernel_size=1, stride=1, padding=0) + + def forward(self, x): + """Forward function.""" + x = self.front_layers(x) + x = self.encoder_decoder(x) + x = self.output_layer(x) + + return (x, ) diff --git a/mmpose/models/backbones/vgg.py b/mmpose/models/backbones/vgg.py index 8fa09d8dc7..52fd2a1913 100644 --- a/mmpose/models/backbones/vgg.py +++ b/mmpose/models/backbones/vgg.py @@ -1,201 +1,201 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch.nn as nn -from mmcv.cnn import ConvModule -from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm - -from mmpose.registry import MODELS -from .base_backbone import BaseBackbone - - -def make_vgg_layer(in_channels, - out_channels, - num_blocks, - conv_cfg=None, - norm_cfg=None, - act_cfg=dict(type='ReLU'), - dilation=1, - with_norm=False, - ceil_mode=False): - layers = [] - for _ in range(num_blocks): - layer = ConvModule( - in_channels=in_channels, - out_channels=out_channels, - kernel_size=3, - dilation=dilation, - padding=dilation, - bias=True, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg) - layers.append(layer) - in_channels = out_channels - layers.append(nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=ceil_mode)) - - return layers - - -@MODELS.register_module() -class VGG(BaseBackbone): - """VGG backbone. - - Args: - depth (int): Depth of vgg, from {11, 13, 16, 19}. - with_norm (bool): Use BatchNorm or not. - num_classes (int): number of classes for classification. - num_stages (int): VGG stages, normally 5. - dilations (Sequence[int]): Dilation of each stage. - out_indices (Sequence[int]): Output from which stages. If only one - stage is specified, a single tensor (feature map) is returned, - otherwise multiple stages are specified, a tuple of tensors will - be returned. When it is None, the default behavior depends on - whether num_classes is specified. If num_classes <= 0, the default - value is (4, ), outputting the last feature map before classifier. - If num_classes > 0, the default value is (5, ), outputting the - classification score. Default: None. - frozen_stages (int): Stages to be frozen (all param fixed). -1 means - not freezing any parameters. - norm_eval (bool): Whether to set norm layers to eval mode, namely, - freeze running stats (mean and var). Note: Effect on Batch Norm - and its variants only. Default: False. - ceil_mode (bool): Whether to use ceil_mode of MaxPool. Default: False. - with_last_pool (bool): Whether to keep the last pooling before - classifier. Default: True. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: - ``[ - dict(type='Kaiming', layer=['Conv2d']), - dict( - type='Constant', - val=1, - layer=['_BatchNorm', 'GroupNorm']), - dict( - type='Normal', - std=0.01, - layer=['Linear']), - ]`` - """ - - # Parameters to build layers. Each element specifies the number of conv in - # each stage. For example, VGG11 contains 11 layers with learnable - # parameters. 11 is computed as 11 = (1 + 1 + 2 + 2 + 2) + 3, - # where 3 indicates the last three fully-connected layers. - arch_settings = { - 11: (1, 1, 2, 2, 2), - 13: (2, 2, 2, 2, 2), - 16: (2, 2, 3, 3, 3), - 19: (2, 2, 4, 4, 4) - } - - def __init__(self, - depth, - num_classes=-1, - num_stages=5, - dilations=(1, 1, 1, 1, 1), - out_indices=None, - frozen_stages=-1, - conv_cfg=None, - norm_cfg=None, - act_cfg=dict(type='ReLU'), - norm_eval=False, - ceil_mode=False, - with_last_pool=True, - init_cfg=[ - dict(type='Kaiming', layer=['Conv2d']), - dict( - type='Constant', - val=1, - layer=['_BatchNorm', 'GroupNorm']), - dict(type='Normal', std=0.01, layer=['Linear']), - ]): - super().__init__(init_cfg=init_cfg) - if depth not in self.arch_settings: - raise KeyError(f'invalid depth {depth} for vgg') - assert num_stages >= 1 and num_stages <= 5 - stage_blocks = self.arch_settings[depth] - self.stage_blocks = stage_blocks[:num_stages] - assert len(dilations) == num_stages - - self.num_classes = num_classes - self.frozen_stages = frozen_stages - self.norm_eval = norm_eval - with_norm = norm_cfg is not None - - if out_indices is None: - out_indices = (5, ) if num_classes > 0 else (4, ) - assert max(out_indices) <= num_stages - self.out_indices = out_indices - - self.in_channels = 3 - start_idx = 0 - vgg_layers = [] - self.range_sub_modules = [] - for i, num_blocks in enumerate(self.stage_blocks): - num_modules = num_blocks + 1 - end_idx = start_idx + num_modules - dilation = dilations[i] - out_channels = 64 * 2**i if i < 4 else 512 - vgg_layer = make_vgg_layer( - self.in_channels, - out_channels, - num_blocks, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg, - dilation=dilation, - with_norm=with_norm, - ceil_mode=ceil_mode) - vgg_layers.extend(vgg_layer) - self.in_channels = out_channels - self.range_sub_modules.append([start_idx, end_idx]) - start_idx = end_idx - if not with_last_pool: - vgg_layers.pop(-1) - self.range_sub_modules[-1][1] -= 1 - self.module_name = 'features' - self.add_module(self.module_name, nn.Sequential(*vgg_layers)) - - if self.num_classes > 0: - self.classifier = nn.Sequential( - nn.Linear(512 * 7 * 7, 4096), - nn.ReLU(True), - nn.Dropout(), - nn.Linear(4096, 4096), - nn.ReLU(True), - nn.Dropout(), - nn.Linear(4096, num_classes), - ) - - def forward(self, x): - outs = [] - vgg_layers = getattr(self, self.module_name) - for i in range(len(self.stage_blocks)): - for j in range(*self.range_sub_modules[i]): - vgg_layer = vgg_layers[j] - x = vgg_layer(x) - if i in self.out_indices: - outs.append(x) - if self.num_classes > 0: - x = x.view(x.size(0), -1) - x = self.classifier(x) - outs.append(x) - - return tuple(outs) - - def _freeze_stages(self): - vgg_layers = getattr(self, self.module_name) - for i in range(self.frozen_stages): - for j in range(*self.range_sub_modules[i]): - m = vgg_layers[j] - m.eval() - for param in m.parameters(): - param.requires_grad = False - - def train(self, mode=True): - super().train(mode) - self._freeze_stages() - if mode and self.norm_eval: - for m in self.modules(): - # trick: eval have effect on BatchNorm only - if isinstance(m, _BatchNorm): - m.eval() +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +from mmcv.cnn import ConvModule +from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm + +from mmpose.registry import MODELS +from .base_backbone import BaseBackbone + + +def make_vgg_layer(in_channels, + out_channels, + num_blocks, + conv_cfg=None, + norm_cfg=None, + act_cfg=dict(type='ReLU'), + dilation=1, + with_norm=False, + ceil_mode=False): + layers = [] + for _ in range(num_blocks): + layer = ConvModule( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=3, + dilation=dilation, + padding=dilation, + bias=True, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + layers.append(layer) + in_channels = out_channels + layers.append(nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=ceil_mode)) + + return layers + + +@MODELS.register_module() +class VGG(BaseBackbone): + """VGG backbone. + + Args: + depth (int): Depth of vgg, from {11, 13, 16, 19}. + with_norm (bool): Use BatchNorm or not. + num_classes (int): number of classes for classification. + num_stages (int): VGG stages, normally 5. + dilations (Sequence[int]): Dilation of each stage. + out_indices (Sequence[int]): Output from which stages. If only one + stage is specified, a single tensor (feature map) is returned, + otherwise multiple stages are specified, a tuple of tensors will + be returned. When it is None, the default behavior depends on + whether num_classes is specified. If num_classes <= 0, the default + value is (4, ), outputting the last feature map before classifier. + If num_classes > 0, the default value is (5, ), outputting the + classification score. Default: None. + frozen_stages (int): Stages to be frozen (all param fixed). -1 means + not freezing any parameters. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + ceil_mode (bool): Whether to use ceil_mode of MaxPool. Default: False. + with_last_pool (bool): Whether to keep the last pooling before + classifier. Default: True. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: + ``[ + dict(type='Kaiming', layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']), + dict( + type='Normal', + std=0.01, + layer=['Linear']), + ]`` + """ + + # Parameters to build layers. Each element specifies the number of conv in + # each stage. For example, VGG11 contains 11 layers with learnable + # parameters. 11 is computed as 11 = (1 + 1 + 2 + 2 + 2) + 3, + # where 3 indicates the last three fully-connected layers. + arch_settings = { + 11: (1, 1, 2, 2, 2), + 13: (2, 2, 2, 2, 2), + 16: (2, 2, 3, 3, 3), + 19: (2, 2, 4, 4, 4) + } + + def __init__(self, + depth, + num_classes=-1, + num_stages=5, + dilations=(1, 1, 1, 1, 1), + out_indices=None, + frozen_stages=-1, + conv_cfg=None, + norm_cfg=None, + act_cfg=dict(type='ReLU'), + norm_eval=False, + ceil_mode=False, + with_last_pool=True, + init_cfg=[ + dict(type='Kaiming', layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']), + dict(type='Normal', std=0.01, layer=['Linear']), + ]): + super().__init__(init_cfg=init_cfg) + if depth not in self.arch_settings: + raise KeyError(f'invalid depth {depth} for vgg') + assert num_stages >= 1 and num_stages <= 5 + stage_blocks = self.arch_settings[depth] + self.stage_blocks = stage_blocks[:num_stages] + assert len(dilations) == num_stages + + self.num_classes = num_classes + self.frozen_stages = frozen_stages + self.norm_eval = norm_eval + with_norm = norm_cfg is not None + + if out_indices is None: + out_indices = (5, ) if num_classes > 0 else (4, ) + assert max(out_indices) <= num_stages + self.out_indices = out_indices + + self.in_channels = 3 + start_idx = 0 + vgg_layers = [] + self.range_sub_modules = [] + for i, num_blocks in enumerate(self.stage_blocks): + num_modules = num_blocks + 1 + end_idx = start_idx + num_modules + dilation = dilations[i] + out_channels = 64 * 2**i if i < 4 else 512 + vgg_layer = make_vgg_layer( + self.in_channels, + out_channels, + num_blocks, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + dilation=dilation, + with_norm=with_norm, + ceil_mode=ceil_mode) + vgg_layers.extend(vgg_layer) + self.in_channels = out_channels + self.range_sub_modules.append([start_idx, end_idx]) + start_idx = end_idx + if not with_last_pool: + vgg_layers.pop(-1) + self.range_sub_modules[-1][1] -= 1 + self.module_name = 'features' + self.add_module(self.module_name, nn.Sequential(*vgg_layers)) + + if self.num_classes > 0: + self.classifier = nn.Sequential( + nn.Linear(512 * 7 * 7, 4096), + nn.ReLU(True), + nn.Dropout(), + nn.Linear(4096, 4096), + nn.ReLU(True), + nn.Dropout(), + nn.Linear(4096, num_classes), + ) + + def forward(self, x): + outs = [] + vgg_layers = getattr(self, self.module_name) + for i in range(len(self.stage_blocks)): + for j in range(*self.range_sub_modules[i]): + vgg_layer = vgg_layers[j] + x = vgg_layer(x) + if i in self.out_indices: + outs.append(x) + if self.num_classes > 0: + x = x.view(x.size(0), -1) + x = self.classifier(x) + outs.append(x) + + return tuple(outs) + + def _freeze_stages(self): + vgg_layers = getattr(self, self.module_name) + for i in range(self.frozen_stages): + for j in range(*self.range_sub_modules[i]): + m = vgg_layers[j] + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def train(self, mode=True): + super().train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + # trick: eval have effect on BatchNorm only + if isinstance(m, _BatchNorm): + m.eval() diff --git a/mmpose/models/backbones/vipnas_mbv3.py b/mmpose/models/backbones/vipnas_mbv3.py index 9156cafa56..3e56439b01 100644 --- a/mmpose/models/backbones/vipnas_mbv3.py +++ b/mmpose/models/backbones/vipnas_mbv3.py @@ -1,173 +1,173 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy - -from mmcv.cnn import ConvModule -from torch.nn.modules.batchnorm import _BatchNorm - -from mmpose.registry import MODELS -from .base_backbone import BaseBackbone -from .utils import InvertedResidual - - -@MODELS.register_module() -class ViPNAS_MobileNetV3(BaseBackbone): - """ViPNAS_MobileNetV3 backbone. - - "ViPNAS: Efficient Video Pose Estimation via Neural Architecture Search" - More details can be found in the `paper - `__ . - - Args: - wid (list(int)): Searched width config for each stage. - expan (list(int)): Searched expansion ratio config for each stage. - dep (list(int)): Searched depth config for each stage. - ks (list(int)): Searched kernel size config for each stage. - group (list(int)): Searched group number config for each stage. - att (list(bool)): Searched attention config for each stage. - stride (list(int)): Stride config for each stage. - act (list(dict)): Activation config for each stage. - conv_cfg (dict): Config dict for convolution layer. - Default: None, which means using conv2d. - norm_cfg (dict): Config dict for normalization layer. - Default: dict(type='BN'). - frozen_stages (int): Stages to be frozen (all param fixed). - Default: -1, which means not freezing any parameters. - norm_eval (bool): Whether to set norm layers to eval mode, namely, - freeze running stats (mean and var). Note: Effect on Batch Norm - and its variants only. Default: False. - with_cp (bool): Use checkpoint or not. Using checkpoint will save - some memory while slowing down the training speed. - Default: False. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: - ``[ - dict(type='Normal', std=0.001, layer=['Conv2d']), - dict( - type='Constant', - val=1, - layer=['_BatchNorm', 'GroupNorm']) - ]`` - """ - - def __init__( - self, - wid=[16, 16, 24, 40, 80, 112, 160], - expan=[None, 1, 5, 4, 5, 5, 6], - dep=[None, 1, 4, 4, 4, 4, 4], - ks=[3, 3, 7, 7, 5, 7, 5], - group=[None, 8, 120, 20, 100, 280, 240], - att=[None, True, True, False, True, True, True], - stride=[2, 1, 2, 2, 2, 1, 2], - act=['HSwish', 'ReLU', 'ReLU', 'ReLU', 'HSwish', 'HSwish', 'HSwish'], - conv_cfg=None, - norm_cfg=dict(type='BN'), - frozen_stages=-1, - norm_eval=False, - with_cp=False, - init_cfg=[ - dict(type='Normal', std=0.001, layer=['Conv2d']), - dict(type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm']) - ], - ): - # Protect mutable default arguments - norm_cfg = copy.deepcopy(norm_cfg) - super().__init__(init_cfg=init_cfg) - self.wid = wid - self.expan = expan - self.dep = dep - self.ks = ks - self.group = group - self.att = att - self.stride = stride - self.act = act - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.frozen_stages = frozen_stages - self.norm_eval = norm_eval - self.with_cp = with_cp - - self.conv1 = ConvModule( - in_channels=3, - out_channels=self.wid[0], - kernel_size=self.ks[0], - stride=self.stride[0], - padding=self.ks[0] // 2, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=dict(type=self.act[0])) - - self.layers = self._make_layer() - - def _make_layer(self): - layers = [] - layer_index = 0 - for i, dep in enumerate(self.dep[1:]): - mid_channels = self.wid[i + 1] * self.expan[i + 1] - - if self.att[i + 1]: - se_cfg = dict( - channels=mid_channels, - ratio=4, - act_cfg=(dict(type='ReLU'), - dict(type='HSigmoid', bias=1.0, divisor=2.0))) - else: - se_cfg = None - - if self.expan[i + 1] == 1: - with_expand_conv = False - else: - with_expand_conv = True - - for j in range(dep): - if j == 0: - stride = self.stride[i + 1] - in_channels = self.wid[i] - else: - stride = 1 - in_channels = self.wid[i + 1] - - layer = InvertedResidual( - in_channels=in_channels, - out_channels=self.wid[i + 1], - mid_channels=mid_channels, - kernel_size=self.ks[i + 1], - groups=self.group[i + 1], - stride=stride, - se_cfg=se_cfg, - with_expand_conv=with_expand_conv, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=dict(type=self.act[i + 1]), - with_cp=self.with_cp) - layer_index += 1 - layer_name = f'layer{layer_index}' - self.add_module(layer_name, layer) - layers.append(layer_name) - return layers - - def forward(self, x): - x = self.conv1(x) - - for i, layer_name in enumerate(self.layers): - layer = getattr(self, layer_name) - x = layer(x) - - return (x, ) - - def _freeze_stages(self): - if self.frozen_stages >= 0: - for param in self.conv1.parameters(): - param.requires_grad = False - for i in range(1, self.frozen_stages + 1): - layer = getattr(self, f'layer{i}') - layer.eval() - for param in layer.parameters(): - param.requires_grad = False - - def train(self, mode=True): - super().train(mode) - self._freeze_stages() - if mode and self.norm_eval: - for m in self.modules(): - if isinstance(m, _BatchNorm): - m.eval() +# Copyright (c) OpenMMLab. All rights reserved. +import copy + +from mmcv.cnn import ConvModule +from torch.nn.modules.batchnorm import _BatchNorm + +from mmpose.registry import MODELS +from .base_backbone import BaseBackbone +from .utils import InvertedResidual + + +@MODELS.register_module() +class ViPNAS_MobileNetV3(BaseBackbone): + """ViPNAS_MobileNetV3 backbone. + + "ViPNAS: Efficient Video Pose Estimation via Neural Architecture Search" + More details can be found in the `paper + `__ . + + Args: + wid (list(int)): Searched width config for each stage. + expan (list(int)): Searched expansion ratio config for each stage. + dep (list(int)): Searched depth config for each stage. + ks (list(int)): Searched kernel size config for each stage. + group (list(int)): Searched group number config for each stage. + att (list(bool)): Searched attention config for each stage. + stride (list(int)): Stride config for each stage. + act (list(dict)): Activation config for each stage. + conv_cfg (dict): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + frozen_stages (int): Stages to be frozen (all param fixed). + Default: -1, which means not freezing any parameters. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save + some memory while slowing down the training speed. + Default: False. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: + ``[ + dict(type='Normal', std=0.001, layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ]`` + """ + + def __init__( + self, + wid=[16, 16, 24, 40, 80, 112, 160], + expan=[None, 1, 5, 4, 5, 5, 6], + dep=[None, 1, 4, 4, 4, 4, 4], + ks=[3, 3, 7, 7, 5, 7, 5], + group=[None, 8, 120, 20, 100, 280, 240], + att=[None, True, True, False, True, True, True], + stride=[2, 1, 2, 2, 2, 1, 2], + act=['HSwish', 'ReLU', 'ReLU', 'ReLU', 'HSwish', 'HSwish', 'HSwish'], + conv_cfg=None, + norm_cfg=dict(type='BN'), + frozen_stages=-1, + norm_eval=False, + with_cp=False, + init_cfg=[ + dict(type='Normal', std=0.001, layer=['Conv2d']), + dict(type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm']) + ], + ): + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + super().__init__(init_cfg=init_cfg) + self.wid = wid + self.expan = expan + self.dep = dep + self.ks = ks + self.group = group + self.att = att + self.stride = stride + self.act = act + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.frozen_stages = frozen_stages + self.norm_eval = norm_eval + self.with_cp = with_cp + + self.conv1 = ConvModule( + in_channels=3, + out_channels=self.wid[0], + kernel_size=self.ks[0], + stride=self.stride[0], + padding=self.ks[0] // 2, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=dict(type=self.act[0])) + + self.layers = self._make_layer() + + def _make_layer(self): + layers = [] + layer_index = 0 + for i, dep in enumerate(self.dep[1:]): + mid_channels = self.wid[i + 1] * self.expan[i + 1] + + if self.att[i + 1]: + se_cfg = dict( + channels=mid_channels, + ratio=4, + act_cfg=(dict(type='ReLU'), + dict(type='HSigmoid', bias=1.0, divisor=2.0))) + else: + se_cfg = None + + if self.expan[i + 1] == 1: + with_expand_conv = False + else: + with_expand_conv = True + + for j in range(dep): + if j == 0: + stride = self.stride[i + 1] + in_channels = self.wid[i] + else: + stride = 1 + in_channels = self.wid[i + 1] + + layer = InvertedResidual( + in_channels=in_channels, + out_channels=self.wid[i + 1], + mid_channels=mid_channels, + kernel_size=self.ks[i + 1], + groups=self.group[i + 1], + stride=stride, + se_cfg=se_cfg, + with_expand_conv=with_expand_conv, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=dict(type=self.act[i + 1]), + with_cp=self.with_cp) + layer_index += 1 + layer_name = f'layer{layer_index}' + self.add_module(layer_name, layer) + layers.append(layer_name) + return layers + + def forward(self, x): + x = self.conv1(x) + + for i, layer_name in enumerate(self.layers): + layer = getattr(self, layer_name) + x = layer(x) + + return (x, ) + + def _freeze_stages(self): + if self.frozen_stages >= 0: + for param in self.conv1.parameters(): + param.requires_grad = False + for i in range(1, self.frozen_stages + 1): + layer = getattr(self, f'layer{i}') + layer.eval() + for param in layer.parameters(): + param.requires_grad = False + + def train(self, mode=True): + super().train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, _BatchNorm): + m.eval() diff --git a/mmpose/models/backbones/vipnas_resnet.py b/mmpose/models/backbones/vipnas_resnet.py index 7be810b449..6e55a70998 100644 --- a/mmpose/models/backbones/vipnas_resnet.py +++ b/mmpose/models/backbones/vipnas_resnet.py @@ -1,596 +1,596 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy - -import torch.nn as nn -import torch.utils.checkpoint as cp -from mmcv.cnn import ConvModule, build_conv_layer, build_norm_layer -from mmcv.cnn.bricks import ContextBlock -from mmengine.model import BaseModule, Sequential -from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm - -from mmpose.registry import MODELS -from .base_backbone import BaseBackbone - - -class ViPNAS_Bottleneck(BaseModule): - """Bottleneck block for ViPNAS_ResNet. - - Args: - in_channels (int): Input channels of this block. - out_channels (int): Output channels of this block. - expansion (int): The ratio of ``out_channels/mid_channels`` where - ``mid_channels`` is the input/output channels of conv2. Default: 4. - stride (int): stride of the block. Default: 1 - dilation (int): dilation of convolution. Default: 1 - downsample (nn.Module): downsample operation on identity branch. - Default: None. - style (str): ``"pytorch"`` or ``"caffe"``. If set to "pytorch", the - stride-two layer is the 3x3 conv layer, otherwise the stride-two - layer is the first 1x1 conv layer. Default: "pytorch". - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. - conv_cfg (dict): dictionary to construct and config conv layer. - Default: None - norm_cfg (dict): dictionary to construct and config norm layer. - Default: dict(type='BN') - kernel_size (int): kernel size of conv2 searched in ViPANS. - groups (int): group number of conv2 searched in ViPNAS. - attention (bool): whether to use attention module in the end of - the block. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - in_channels, - out_channels, - expansion=4, - stride=1, - dilation=1, - downsample=None, - style='pytorch', - with_cp=False, - conv_cfg=None, - norm_cfg=dict(type='BN'), - kernel_size=3, - groups=1, - attention=False, - init_cfg=None): - # Protect mutable default arguments - norm_cfg = copy.deepcopy(norm_cfg) - super().__init__(init_cfg=init_cfg) - assert style in ['pytorch', 'caffe'] - - self.in_channels = in_channels - self.out_channels = out_channels - self.expansion = expansion - assert out_channels % expansion == 0 - self.mid_channels = out_channels // expansion - self.stride = stride - self.dilation = dilation - self.style = style - self.with_cp = with_cp - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - - if self.style == 'pytorch': - self.conv1_stride = 1 - self.conv2_stride = stride - else: - self.conv1_stride = stride - self.conv2_stride = 1 - - self.norm1_name, norm1 = build_norm_layer( - norm_cfg, self.mid_channels, postfix=1) - self.norm2_name, norm2 = build_norm_layer( - norm_cfg, self.mid_channels, postfix=2) - self.norm3_name, norm3 = build_norm_layer( - norm_cfg, out_channels, postfix=3) - - self.conv1 = build_conv_layer( - conv_cfg, - in_channels, - self.mid_channels, - kernel_size=1, - stride=self.conv1_stride, - bias=False) - self.add_module(self.norm1_name, norm1) - self.conv2 = build_conv_layer( - conv_cfg, - self.mid_channels, - self.mid_channels, - kernel_size=kernel_size, - stride=self.conv2_stride, - padding=kernel_size // 2, - groups=groups, - dilation=dilation, - bias=False) - - self.add_module(self.norm2_name, norm2) - self.conv3 = build_conv_layer( - conv_cfg, - self.mid_channels, - out_channels, - kernel_size=1, - bias=False) - self.add_module(self.norm3_name, norm3) - - if attention: - self.attention = ContextBlock(out_channels, - max(1.0 / 16, 16.0 / out_channels)) - else: - self.attention = None - - self.relu = nn.ReLU(inplace=True) - self.downsample = downsample - - @property - def norm1(self): - """nn.Module: the normalization layer named "norm1" """ - return getattr(self, self.norm1_name) - - @property - def norm2(self): - """nn.Module: the normalization layer named "norm2" """ - return getattr(self, self.norm2_name) - - @property - def norm3(self): - """nn.Module: the normalization layer named "norm3" """ - return getattr(self, self.norm3_name) - - def forward(self, x): - """Forward function.""" - - def _inner_forward(x): - identity = x - - out = self.conv1(x) - out = self.norm1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.norm2(out) - out = self.relu(out) - - out = self.conv3(out) - out = self.norm3(out) - - if self.attention is not None: - out = self.attention(out) - - if self.downsample is not None: - identity = self.downsample(x) - - out += identity - - return out - - if self.with_cp and x.requires_grad: - out = cp.checkpoint(_inner_forward, x) - else: - out = _inner_forward(x) - - out = self.relu(out) - - return out - - -def get_expansion(block, expansion=None): - """Get the expansion of a residual block. - - The block expansion will be obtained by the following order: - - 1. If ``expansion`` is given, just return it. - 2. If ``block`` has the attribute ``expansion``, then return - ``block.expansion``. - 3. Return the default value according the the block type: - 4 for ``ViPNAS_Bottleneck``. - - Args: - block (class): The block class. - expansion (int | None): The given expansion ratio. - - Returns: - int: The expansion of the block. - """ - if isinstance(expansion, int): - assert expansion > 0 - elif expansion is None: - if hasattr(block, 'expansion'): - expansion = block.expansion - elif issubclass(block, ViPNAS_Bottleneck): - expansion = 1 - else: - raise TypeError(f'expansion is not specified for {block.__name__}') - else: - raise TypeError('expansion must be an integer or None') - - return expansion - - -class ViPNAS_ResLayer(Sequential): - """ViPNAS_ResLayer to build ResNet style backbone. - - Args: - block (nn.Module): Residual block used to build ViPNAS ResLayer. - num_blocks (int): Number of blocks. - in_channels (int): Input channels of this block. - out_channels (int): Output channels of this block. - expansion (int, optional): The expansion for BasicBlock/Bottleneck. - If not specified, it will firstly be obtained via - ``block.expansion``. If the block has no attribute "expansion", - the following default values will be used: 1 for BasicBlock and - 4 for Bottleneck. Default: None. - stride (int): stride of the first block. Default: 1. - avg_down (bool): Use AvgPool instead of stride conv when - downsampling in the bottleneck. Default: False - conv_cfg (dict): dictionary to construct and config conv layer. - Default: None - norm_cfg (dict): dictionary to construct and config norm layer. - Default: dict(type='BN') - downsample_first (bool): Downsample at the first block or last block. - False for Hourglass, True for ResNet. Default: True - kernel_size (int): Kernel Size of the corresponding convolution layer - searched in the block. - groups (int): Group number of the corresponding convolution layer - searched in the block. - attention (bool): Whether to use attention module in the end of the - block. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - block, - num_blocks, - in_channels, - out_channels, - expansion=None, - stride=1, - avg_down=False, - conv_cfg=None, - norm_cfg=dict(type='BN'), - downsample_first=True, - kernel_size=3, - groups=1, - attention=False, - init_cfg=None, - **kwargs): - # Protect mutable default arguments - norm_cfg = copy.deepcopy(norm_cfg) - self.block = block - self.expansion = get_expansion(block, expansion) - - downsample = None - if stride != 1 or in_channels != out_channels: - downsample = [] - conv_stride = stride - if avg_down and stride != 1: - conv_stride = 1 - downsample.append( - nn.AvgPool2d( - kernel_size=stride, - stride=stride, - ceil_mode=True, - count_include_pad=False)) - downsample.extend([ - build_conv_layer( - conv_cfg, - in_channels, - out_channels, - kernel_size=1, - stride=conv_stride, - bias=False), - build_norm_layer(norm_cfg, out_channels)[1] - ]) - downsample = nn.Sequential(*downsample) - - layers = [] - if downsample_first: - layers.append( - block( - in_channels=in_channels, - out_channels=out_channels, - expansion=self.expansion, - stride=stride, - downsample=downsample, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - kernel_size=kernel_size, - groups=groups, - attention=attention, - **kwargs)) - in_channels = out_channels - for _ in range(1, num_blocks): - layers.append( - block( - in_channels=in_channels, - out_channels=out_channels, - expansion=self.expansion, - stride=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - kernel_size=kernel_size, - groups=groups, - attention=attention, - **kwargs)) - else: # downsample_first=False is for HourglassModule - for i in range(0, num_blocks - 1): - layers.append( - block( - in_channels=in_channels, - out_channels=in_channels, - expansion=self.expansion, - stride=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - kernel_size=kernel_size, - groups=groups, - attention=attention, - **kwargs)) - layers.append( - block( - in_channels=in_channels, - out_channels=out_channels, - expansion=self.expansion, - stride=stride, - downsample=downsample, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - kernel_size=kernel_size, - groups=groups, - attention=attention, - **kwargs)) - - super().__init__(*layers, init_cfg=init_cfg) - - -@MODELS.register_module() -class ViPNAS_ResNet(BaseBackbone): - """ViPNAS_ResNet backbone. - - "ViPNAS: Efficient Video Pose Estimation via Neural Architecture Search" - More details can be found in the `paper - `__ . - - Args: - depth (int): Network depth, from {18, 34, 50, 101, 152}. - in_channels (int): Number of input image channels. Default: 3. - num_stages (int): Stages of the network. Default: 4. - strides (Sequence[int]): Strides of the first block of each stage. - Default: ``(1, 2, 2, 2)``. - dilations (Sequence[int]): Dilation of each stage. - Default: ``(1, 1, 1, 1)``. - out_indices (Sequence[int]): Output from which stages. If only one - stage is specified, a single tensor (feature map) is returned, - otherwise multiple stages are specified, a tuple of tensors will - be returned. Default: ``(3, )``. - style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two - layer is the 3x3 conv layer, otherwise the stride-two layer is - the first 1x1 conv layer. - deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv. - Default: False. - avg_down (bool): Use AvgPool instead of stride conv when - downsampling in the bottleneck. Default: False. - frozen_stages (int): Stages to be frozen (stop grad and set eval mode). - -1 means not freezing any parameters. Default: -1. - conv_cfg (dict | None): The config dict for conv layers. Default: None. - norm_cfg (dict): The config dict for norm layers. - norm_eval (bool): Whether to set norm layers to eval mode, namely, - freeze running stats (mean and var). Note: Effect on Batch Norm - and its variants only. Default: False. - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. Default: False. - zero_init_residual (bool): Whether to use zero init for last norm layer - in resblocks to let them behave as identity. Default: True. - wid (list(int)): Searched width config for each stage. - expan (list(int)): Searched expansion ratio config for each stage. - dep (list(int)): Searched depth config for each stage. - ks (list(int)): Searched kernel size config for each stage. - group (list(int)): Searched group number config for each stage. - att (list(bool)): Searched attention config for each stage. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: - ``[ - dict(type='Normal', std=0.001, layer=['Conv2d']), - dict( - type='Constant', - val=1, - layer=['_BatchNorm', 'GroupNorm']) - ]`` - """ - - arch_settings = { - 50: ViPNAS_Bottleneck, - } - - def __init__(self, - depth, - in_channels=3, - num_stages=4, - strides=(1, 2, 2, 2), - dilations=(1, 1, 1, 1), - out_indices=(3, ), - style='pytorch', - deep_stem=False, - avg_down=False, - frozen_stages=-1, - conv_cfg=None, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=False, - with_cp=False, - zero_init_residual=True, - wid=[48, 80, 160, 304, 608], - expan=[None, 1, 1, 1, 1], - dep=[None, 4, 6, 7, 3], - ks=[7, 3, 5, 5, 5], - group=[None, 16, 16, 16, 16], - att=[None, True, False, True, True], - init_cfg=[ - dict(type='Normal', std=0.001, layer=['Conv2d']), - dict( - type='Constant', - val=1, - layer=['_BatchNorm', 'GroupNorm']) - ]): - # Protect mutable default arguments - norm_cfg = copy.deepcopy(norm_cfg) - super().__init__(init_cfg=init_cfg) - if depth not in self.arch_settings: - raise KeyError(f'invalid depth {depth} for resnet') - self.depth = depth - self.stem_channels = dep[0] - self.num_stages = num_stages - assert 1 <= num_stages <= 4 - self.strides = strides - self.dilations = dilations - assert len(strides) == len(dilations) == num_stages - self.out_indices = out_indices - assert max(out_indices) < num_stages - self.style = style - self.deep_stem = deep_stem - self.avg_down = avg_down - self.frozen_stages = frozen_stages - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.with_cp = with_cp - self.norm_eval = norm_eval - self.zero_init_residual = zero_init_residual - self.block = self.arch_settings[depth] - self.stage_blocks = dep[1:1 + num_stages] - - self._make_stem_layer(in_channels, wid[0], ks[0]) - - self.res_layers = [] - _in_channels = wid[0] - for i, num_blocks in enumerate(self.stage_blocks): - expansion = get_expansion(self.block, expan[i + 1]) - _out_channels = wid[i + 1] * expansion - stride = strides[i] - dilation = dilations[i] - res_layer = self.make_res_layer( - block=self.block, - num_blocks=num_blocks, - in_channels=_in_channels, - out_channels=_out_channels, - expansion=expansion, - stride=stride, - dilation=dilation, - style=self.style, - avg_down=self.avg_down, - with_cp=with_cp, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - kernel_size=ks[i + 1], - groups=group[i + 1], - attention=att[i + 1]) - _in_channels = _out_channels - layer_name = f'layer{i + 1}' - self.add_module(layer_name, res_layer) - self.res_layers.append(layer_name) - - self._freeze_stages() - - self.feat_dim = res_layer[-1].out_channels - - def make_res_layer(self, **kwargs): - """Make a ViPNAS ResLayer.""" - return ViPNAS_ResLayer(**kwargs) - - @property - def norm1(self): - """nn.Module: the normalization layer named "norm1" """ - return getattr(self, self.norm1_name) - - def _make_stem_layer(self, in_channels, stem_channels, kernel_size): - """Make stem layer.""" - if self.deep_stem: - self.stem = nn.Sequential( - ConvModule( - in_channels, - stem_channels // 2, - kernel_size=3, - stride=2, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - inplace=True), - ConvModule( - stem_channels // 2, - stem_channels // 2, - kernel_size=3, - stride=1, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - inplace=True), - ConvModule( - stem_channels // 2, - stem_channels, - kernel_size=3, - stride=1, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - inplace=True)) - else: - self.conv1 = build_conv_layer( - self.conv_cfg, - in_channels, - stem_channels, - kernel_size=kernel_size, - stride=2, - padding=kernel_size // 2, - bias=False) - self.norm1_name, norm1 = build_norm_layer( - self.norm_cfg, stem_channels, postfix=1) - self.add_module(self.norm1_name, norm1) - self.relu = nn.ReLU(inplace=True) - self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) - - def _freeze_stages(self): - """Freeze parameters.""" - if self.frozen_stages >= 0: - if self.deep_stem: - self.stem.eval() - for param in self.stem.parameters(): - param.requires_grad = False - else: - self.norm1.eval() - for m in [self.conv1, self.norm1]: - for param in m.parameters(): - param.requires_grad = False - - for i in range(1, self.frozen_stages + 1): - m = getattr(self, f'layer{i}') - m.eval() - for param in m.parameters(): - param.requires_grad = False - - def forward(self, x): - """Forward function.""" - if self.deep_stem: - x = self.stem(x) - else: - x = self.conv1(x) - x = self.norm1(x) - x = self.relu(x) - x = self.maxpool(x) - outs = [] - for i, layer_name in enumerate(self.res_layers): - res_layer = getattr(self, layer_name) - x = res_layer(x) - if i in self.out_indices: - outs.append(x) - return tuple(outs) - - def train(self, mode=True): - """Convert the model into training mode.""" - super().train(mode) - self._freeze_stages() - if mode and self.norm_eval: - for m in self.modules(): - # trick: eval have effect on BatchNorm only - if isinstance(m, _BatchNorm): - m.eval() +# Copyright (c) OpenMMLab. All rights reserved. +import copy + +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn import ConvModule, build_conv_layer, build_norm_layer +from mmcv.cnn.bricks import ContextBlock +from mmengine.model import BaseModule, Sequential +from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm + +from mmpose.registry import MODELS +from .base_backbone import BaseBackbone + + +class ViPNAS_Bottleneck(BaseModule): + """Bottleneck block for ViPNAS_ResNet. + + Args: + in_channels (int): Input channels of this block. + out_channels (int): Output channels of this block. + expansion (int): The ratio of ``out_channels/mid_channels`` where + ``mid_channels`` is the input/output channels of conv2. Default: 4. + stride (int): stride of the block. Default: 1 + dilation (int): dilation of convolution. Default: 1 + downsample (nn.Module): downsample operation on identity branch. + Default: None. + style (str): ``"pytorch"`` or ``"caffe"``. If set to "pytorch", the + stride-two layer is the 3x3 conv layer, otherwise the stride-two + layer is the first 1x1 conv layer. Default: "pytorch". + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + conv_cfg (dict): dictionary to construct and config conv layer. + Default: None + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + kernel_size (int): kernel size of conv2 searched in ViPANS. + groups (int): group number of conv2 searched in ViPNAS. + attention (bool): whether to use attention module in the end of + the block. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + in_channels, + out_channels, + expansion=4, + stride=1, + dilation=1, + downsample=None, + style='pytorch', + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + kernel_size=3, + groups=1, + attention=False, + init_cfg=None): + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + super().__init__(init_cfg=init_cfg) + assert style in ['pytorch', 'caffe'] + + self.in_channels = in_channels + self.out_channels = out_channels + self.expansion = expansion + assert out_channels % expansion == 0 + self.mid_channels = out_channels // expansion + self.stride = stride + self.dilation = dilation + self.style = style + self.with_cp = with_cp + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + + if self.style == 'pytorch': + self.conv1_stride = 1 + self.conv2_stride = stride + else: + self.conv1_stride = stride + self.conv2_stride = 1 + + self.norm1_name, norm1 = build_norm_layer( + norm_cfg, self.mid_channels, postfix=1) + self.norm2_name, norm2 = build_norm_layer( + norm_cfg, self.mid_channels, postfix=2) + self.norm3_name, norm3 = build_norm_layer( + norm_cfg, out_channels, postfix=3) + + self.conv1 = build_conv_layer( + conv_cfg, + in_channels, + self.mid_channels, + kernel_size=1, + stride=self.conv1_stride, + bias=False) + self.add_module(self.norm1_name, norm1) + self.conv2 = build_conv_layer( + conv_cfg, + self.mid_channels, + self.mid_channels, + kernel_size=kernel_size, + stride=self.conv2_stride, + padding=kernel_size // 2, + groups=groups, + dilation=dilation, + bias=False) + + self.add_module(self.norm2_name, norm2) + self.conv3 = build_conv_layer( + conv_cfg, + self.mid_channels, + out_channels, + kernel_size=1, + bias=False) + self.add_module(self.norm3_name, norm3) + + if attention: + self.attention = ContextBlock(out_channels, + max(1.0 / 16, 16.0 / out_channels)) + else: + self.attention = None + + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + + @property + def norm1(self): + """nn.Module: the normalization layer named "norm1" """ + return getattr(self, self.norm1_name) + + @property + def norm2(self): + """nn.Module: the normalization layer named "norm2" """ + return getattr(self, self.norm2_name) + + @property + def norm3(self): + """nn.Module: the normalization layer named "norm3" """ + return getattr(self, self.norm3_name) + + def forward(self, x): + """Forward function.""" + + def _inner_forward(x): + identity = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.norm2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.norm3(out) + + if self.attention is not None: + out = self.attention(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.relu(out) + + return out + + +def get_expansion(block, expansion=None): + """Get the expansion of a residual block. + + The block expansion will be obtained by the following order: + + 1. If ``expansion`` is given, just return it. + 2. If ``block`` has the attribute ``expansion``, then return + ``block.expansion``. + 3. Return the default value according the the block type: + 4 for ``ViPNAS_Bottleneck``. + + Args: + block (class): The block class. + expansion (int | None): The given expansion ratio. + + Returns: + int: The expansion of the block. + """ + if isinstance(expansion, int): + assert expansion > 0 + elif expansion is None: + if hasattr(block, 'expansion'): + expansion = block.expansion + elif issubclass(block, ViPNAS_Bottleneck): + expansion = 1 + else: + raise TypeError(f'expansion is not specified for {block.__name__}') + else: + raise TypeError('expansion must be an integer or None') + + return expansion + + +class ViPNAS_ResLayer(Sequential): + """ViPNAS_ResLayer to build ResNet style backbone. + + Args: + block (nn.Module): Residual block used to build ViPNAS ResLayer. + num_blocks (int): Number of blocks. + in_channels (int): Input channels of this block. + out_channels (int): Output channels of this block. + expansion (int, optional): The expansion for BasicBlock/Bottleneck. + If not specified, it will firstly be obtained via + ``block.expansion``. If the block has no attribute "expansion", + the following default values will be used: 1 for BasicBlock and + 4 for Bottleneck. Default: None. + stride (int): stride of the first block. Default: 1. + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. Default: False + conv_cfg (dict): dictionary to construct and config conv layer. + Default: None + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + downsample_first (bool): Downsample at the first block or last block. + False for Hourglass, True for ResNet. Default: True + kernel_size (int): Kernel Size of the corresponding convolution layer + searched in the block. + groups (int): Group number of the corresponding convolution layer + searched in the block. + attention (bool): Whether to use attention module in the end of the + block. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + block, + num_blocks, + in_channels, + out_channels, + expansion=None, + stride=1, + avg_down=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + downsample_first=True, + kernel_size=3, + groups=1, + attention=False, + init_cfg=None, + **kwargs): + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + self.block = block + self.expansion = get_expansion(block, expansion) + + downsample = None + if stride != 1 or in_channels != out_channels: + downsample = [] + conv_stride = stride + if avg_down and stride != 1: + conv_stride = 1 + downsample.append( + nn.AvgPool2d( + kernel_size=stride, + stride=stride, + ceil_mode=True, + count_include_pad=False)) + downsample.extend([ + build_conv_layer( + conv_cfg, + in_channels, + out_channels, + kernel_size=1, + stride=conv_stride, + bias=False), + build_norm_layer(norm_cfg, out_channels)[1] + ]) + downsample = nn.Sequential(*downsample) + + layers = [] + if downsample_first: + layers.append( + block( + in_channels=in_channels, + out_channels=out_channels, + expansion=self.expansion, + stride=stride, + downsample=downsample, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + kernel_size=kernel_size, + groups=groups, + attention=attention, + **kwargs)) + in_channels = out_channels + for _ in range(1, num_blocks): + layers.append( + block( + in_channels=in_channels, + out_channels=out_channels, + expansion=self.expansion, + stride=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + kernel_size=kernel_size, + groups=groups, + attention=attention, + **kwargs)) + else: # downsample_first=False is for HourglassModule + for i in range(0, num_blocks - 1): + layers.append( + block( + in_channels=in_channels, + out_channels=in_channels, + expansion=self.expansion, + stride=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + kernel_size=kernel_size, + groups=groups, + attention=attention, + **kwargs)) + layers.append( + block( + in_channels=in_channels, + out_channels=out_channels, + expansion=self.expansion, + stride=stride, + downsample=downsample, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + kernel_size=kernel_size, + groups=groups, + attention=attention, + **kwargs)) + + super().__init__(*layers, init_cfg=init_cfg) + + +@MODELS.register_module() +class ViPNAS_ResNet(BaseBackbone): + """ViPNAS_ResNet backbone. + + "ViPNAS: Efficient Video Pose Estimation via Neural Architecture Search" + More details can be found in the `paper + `__ . + + Args: + depth (int): Network depth, from {18, 34, 50, 101, 152}. + in_channels (int): Number of input image channels. Default: 3. + num_stages (int): Stages of the network. Default: 4. + strides (Sequence[int]): Strides of the first block of each stage. + Default: ``(1, 2, 2, 2)``. + dilations (Sequence[int]): Dilation of each stage. + Default: ``(1, 1, 1, 1)``. + out_indices (Sequence[int]): Output from which stages. If only one + stage is specified, a single tensor (feature map) is returned, + otherwise multiple stages are specified, a tuple of tensors will + be returned. Default: ``(3, )``. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv. + Default: False. + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. Default: False. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Default: -1. + conv_cfg (dict | None): The config dict for conv layers. Default: None. + norm_cfg (dict): The config dict for norm layers. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + zero_init_residual (bool): Whether to use zero init for last norm layer + in resblocks to let them behave as identity. Default: True. + wid (list(int)): Searched width config for each stage. + expan (list(int)): Searched expansion ratio config for each stage. + dep (list(int)): Searched depth config for each stage. + ks (list(int)): Searched kernel size config for each stage. + group (list(int)): Searched group number config for each stage. + att (list(bool)): Searched attention config for each stage. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: + ``[ + dict(type='Normal', std=0.001, layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ]`` + """ + + arch_settings = { + 50: ViPNAS_Bottleneck, + } + + def __init__(self, + depth, + in_channels=3, + num_stages=4, + strides=(1, 2, 2, 2), + dilations=(1, 1, 1, 1), + out_indices=(3, ), + style='pytorch', + deep_stem=False, + avg_down=False, + frozen_stages=-1, + conv_cfg=None, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=False, + with_cp=False, + zero_init_residual=True, + wid=[48, 80, 160, 304, 608], + expan=[None, 1, 1, 1, 1], + dep=[None, 4, 6, 7, 3], + ks=[7, 3, 5, 5, 5], + group=[None, 16, 16, 16, 16], + att=[None, True, False, True, True], + init_cfg=[ + dict(type='Normal', std=0.001, layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ]): + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + super().__init__(init_cfg=init_cfg) + if depth not in self.arch_settings: + raise KeyError(f'invalid depth {depth} for resnet') + self.depth = depth + self.stem_channels = dep[0] + self.num_stages = num_stages + assert 1 <= num_stages <= 4 + self.strides = strides + self.dilations = dilations + assert len(strides) == len(dilations) == num_stages + self.out_indices = out_indices + assert max(out_indices) < num_stages + self.style = style + self.deep_stem = deep_stem + self.avg_down = avg_down + self.frozen_stages = frozen_stages + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.with_cp = with_cp + self.norm_eval = norm_eval + self.zero_init_residual = zero_init_residual + self.block = self.arch_settings[depth] + self.stage_blocks = dep[1:1 + num_stages] + + self._make_stem_layer(in_channels, wid[0], ks[0]) + + self.res_layers = [] + _in_channels = wid[0] + for i, num_blocks in enumerate(self.stage_blocks): + expansion = get_expansion(self.block, expan[i + 1]) + _out_channels = wid[i + 1] * expansion + stride = strides[i] + dilation = dilations[i] + res_layer = self.make_res_layer( + block=self.block, + num_blocks=num_blocks, + in_channels=_in_channels, + out_channels=_out_channels, + expansion=expansion, + stride=stride, + dilation=dilation, + style=self.style, + avg_down=self.avg_down, + with_cp=with_cp, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + kernel_size=ks[i + 1], + groups=group[i + 1], + attention=att[i + 1]) + _in_channels = _out_channels + layer_name = f'layer{i + 1}' + self.add_module(layer_name, res_layer) + self.res_layers.append(layer_name) + + self._freeze_stages() + + self.feat_dim = res_layer[-1].out_channels + + def make_res_layer(self, **kwargs): + """Make a ViPNAS ResLayer.""" + return ViPNAS_ResLayer(**kwargs) + + @property + def norm1(self): + """nn.Module: the normalization layer named "norm1" """ + return getattr(self, self.norm1_name) + + def _make_stem_layer(self, in_channels, stem_channels, kernel_size): + """Make stem layer.""" + if self.deep_stem: + self.stem = nn.Sequential( + ConvModule( + in_channels, + stem_channels // 2, + kernel_size=3, + stride=2, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + inplace=True), + ConvModule( + stem_channels // 2, + stem_channels // 2, + kernel_size=3, + stride=1, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + inplace=True), + ConvModule( + stem_channels // 2, + stem_channels, + kernel_size=3, + stride=1, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + inplace=True)) + else: + self.conv1 = build_conv_layer( + self.conv_cfg, + in_channels, + stem_channels, + kernel_size=kernel_size, + stride=2, + padding=kernel_size // 2, + bias=False) + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, stem_channels, postfix=1) + self.add_module(self.norm1_name, norm1) + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + def _freeze_stages(self): + """Freeze parameters.""" + if self.frozen_stages >= 0: + if self.deep_stem: + self.stem.eval() + for param in self.stem.parameters(): + param.requires_grad = False + else: + self.norm1.eval() + for m in [self.conv1, self.norm1]: + for param in m.parameters(): + param.requires_grad = False + + for i in range(1, self.frozen_stages + 1): + m = getattr(self, f'layer{i}') + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def forward(self, x): + """Forward function.""" + if self.deep_stem: + x = self.stem(x) + else: + x = self.conv1(x) + x = self.norm1(x) + x = self.relu(x) + x = self.maxpool(x) + outs = [] + for i, layer_name in enumerate(self.res_layers): + res_layer = getattr(self, layer_name) + x = res_layer(x) + if i in self.out_indices: + outs.append(x) + return tuple(outs) + + def train(self, mode=True): + """Convert the model into training mode.""" + super().train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + # trick: eval have effect on BatchNorm only + if isinstance(m, _BatchNorm): + m.eval() diff --git a/mmpose/models/builder.py b/mmpose/models/builder.py index cefaedc291..1bcfc060ad 100644 --- a/mmpose/models/builder.py +++ b/mmpose/models/builder.py @@ -1,43 +1,43 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import warnings - -from mmpose.registry import MODELS - -BACKBONES = MODELS -NECKS = MODELS -HEADS = MODELS -LOSSES = MODELS -POSE_ESTIMATORS = MODELS - - -def build_backbone(cfg): - """Build backbone.""" - return BACKBONES.build(cfg) - - -def build_neck(cfg): - """Build neck.""" - return NECKS.build(cfg) - - -def build_head(cfg): - """Build head.""" - return HEADS.build(cfg) - - -def build_loss(cfg): - """Build loss.""" - return LOSSES.build(cfg) - - -def build_pose_estimator(cfg): - """Build pose estimator.""" - return POSE_ESTIMATORS.build(cfg) - - -def build_posenet(cfg): - """Build posenet.""" - warnings.warn( - '``build_posenet`` will be deprecated soon, ' - 'please use ``build_pose_estimator`` instead.', DeprecationWarning) - return build_pose_estimator(cfg) +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +from mmpose.registry import MODELS + +BACKBONES = MODELS +NECKS = MODELS +HEADS = MODELS +LOSSES = MODELS +POSE_ESTIMATORS = MODELS + + +def build_backbone(cfg): + """Build backbone.""" + return BACKBONES.build(cfg) + + +def build_neck(cfg): + """Build neck.""" + return NECKS.build(cfg) + + +def build_head(cfg): + """Build head.""" + return HEADS.build(cfg) + + +def build_loss(cfg): + """Build loss.""" + return LOSSES.build(cfg) + + +def build_pose_estimator(cfg): + """Build pose estimator.""" + return POSE_ESTIMATORS.build(cfg) + + +def build_posenet(cfg): + """Build posenet.""" + warnings.warn( + '``build_posenet`` will be deprecated soon, ' + 'please use ``build_pose_estimator`` instead.', DeprecationWarning) + return build_pose_estimator(cfg) diff --git a/mmpose/models/data_preprocessors/__init__.py b/mmpose/models/data_preprocessors/__init__.py index 7c9bd22e2b..77fc080fc9 100644 --- a/mmpose/models/data_preprocessors/__init__.py +++ b/mmpose/models/data_preprocessors/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .data_preprocessor import PoseDataPreprocessor - -__all__ = ['PoseDataPreprocessor'] +# Copyright (c) OpenMMLab. All rights reserved. +from .data_preprocessor import PoseDataPreprocessor + +__all__ = ['PoseDataPreprocessor'] diff --git a/mmpose/models/data_preprocessors/data_preprocessor.py b/mmpose/models/data_preprocessors/data_preprocessor.py index bcfe54ab59..572151a81c 100644 --- a/mmpose/models/data_preprocessors/data_preprocessor.py +++ b/mmpose/models/data_preprocessors/data_preprocessor.py @@ -1,9 +1,9 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from mmengine.model import ImgDataPreprocessor - -from mmpose.registry import MODELS - - -@MODELS.register_module() -class PoseDataPreprocessor(ImgDataPreprocessor): - """Image pre-processor for pose estimation tasks.""" +# Copyright (c) OpenMMLab. All rights reserved. +from mmengine.model import ImgDataPreprocessor + +from mmpose.registry import MODELS + + +@MODELS.register_module() +class PoseDataPreprocessor(ImgDataPreprocessor): + """Image pre-processor for pose estimation tasks.""" diff --git a/mmpose/models/heads/__init__.py b/mmpose/models/heads/__init__.py index e01f2269e3..8631b0def8 100644 --- a/mmpose/models/heads/__init__.py +++ b/mmpose/models/heads/__init__.py @@ -1,17 +1,17 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .base_head import BaseHead -from .coord_cls_heads import RTMCCHead, SimCCHead -from .heatmap_heads import (AssociativeEmbeddingHead, CIDHead, CPMHead, - HeatmapHead, MSPNHead, ViPNASHead) -from .hybrid_heads import DEKRHead, VisPredictHead -from .regression_heads import (DSNTHead, IntegralRegressionHead, - RegressionHead, RLEHead, TemporalRegressionHead, - TrajectoryRegressionHead) - -__all__ = [ - 'BaseHead', 'HeatmapHead', 'CPMHead', 'MSPNHead', 'ViPNASHead', - 'RegressionHead', 'IntegralRegressionHead', 'SimCCHead', 'RLEHead', - 'DSNTHead', 'AssociativeEmbeddingHead', 'DEKRHead', 'VisPredictHead', - 'CIDHead', 'RTMCCHead', 'TemporalRegressionHead', - 'TrajectoryRegressionHead' -] +# Copyright (c) OpenMMLab. All rights reserved. +from .base_head import BaseHead +from .coord_cls_heads import RTMCCHead, SimCCHead +from .heatmap_heads import (AssociativeEmbeddingHead, CIDHead, CPMHead, + HeatmapHead, MSPNHead, ViPNASHead) +from .hybrid_heads import DEKRHead, VisPredictHead +from .regression_heads import (DSNTHead, IntegralRegressionHead, + RegressionHead, RLEHead, TemporalRegressionHead, + TrajectoryRegressionHead) + +__all__ = [ + 'BaseHead', 'HeatmapHead', 'CPMHead', 'MSPNHead', 'ViPNASHead', + 'RegressionHead', 'IntegralRegressionHead', 'SimCCHead', 'RLEHead', + 'DSNTHead', 'AssociativeEmbeddingHead', 'DEKRHead', 'VisPredictHead', + 'CIDHead', 'RTMCCHead', 'TemporalRegressionHead', + 'TrajectoryRegressionHead' +] diff --git a/mmpose/models/heads/base_head.py b/mmpose/models/heads/base_head.py index 14882db243..da9c765740 100644 --- a/mmpose/models/heads/base_head.py +++ b/mmpose/models/heads/base_head.py @@ -1,83 +1,83 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from abc import ABCMeta, abstractmethod -from typing import Tuple, Union - -from mmengine.model import BaseModule -from mmengine.structures import InstanceData -from torch import Tensor - -from mmpose.utils.tensor_utils import to_numpy -from mmpose.utils.typing import (Features, InstanceList, OptConfigType, - OptSampleList, Predictions) - - -class BaseHead(BaseModule, metaclass=ABCMeta): - """Base head. A subclass should override :meth:`predict` and :meth:`loss`. - - Args: - init_cfg (dict, optional): The extra init config of layers. - Defaults to None. - """ - - @abstractmethod - def forward(self, feats: Tuple[Tensor]): - """Forward the network.""" - - @abstractmethod - def predict(self, - feats: Features, - batch_data_samples: OptSampleList, - test_cfg: OptConfigType = {}) -> Predictions: - """Predict results from features.""" - - @abstractmethod - def loss(self, - feats: Tuple[Tensor], - batch_data_samples: OptSampleList, - train_cfg: OptConfigType = {}) -> dict: - """Calculate losses from a batch of inputs and data samples.""" - - def decode(self, batch_outputs: Union[Tensor, - Tuple[Tensor]]) -> InstanceList: - """Decode keypoints from outputs. - - Args: - batch_outputs (Tensor | Tuple[Tensor]): The network outputs of - a data batch - - Returns: - List[InstanceData]: A list of InstanceData, each contains the - decoded pose information of the instances of one data sample. - """ - - def _pack_and_call(args, func): - if not isinstance(args, tuple): - args = (args, ) - return func(*args) - - if self.decoder is None: - raise RuntimeError( - f'The decoder has not been set in {self.__class__.__name__}. ' - 'Please set the decoder configs in the init parameters to ' - 'enable head methods `head.predict()` and `head.decode()`') - - if self.decoder.support_batch_decoding: - batch_keypoints, batch_scores = _pack_and_call( - batch_outputs, self.decoder.batch_decode) - - else: - batch_output_np = to_numpy(batch_outputs, unzip=True) - batch_keypoints = [] - batch_scores = [] - for outputs in batch_output_np: - keypoints, scores = _pack_and_call(outputs, - self.decoder.decode) - batch_keypoints.append(keypoints) - batch_scores.append(scores) - - preds = [ - InstanceData(keypoints=keypoints, keypoint_scores=scores) - for keypoints, scores in zip(batch_keypoints, batch_scores) - ] - - return preds +# Copyright (c) OpenMMLab. All rights reserved. +from abc import ABCMeta, abstractmethod +from typing import Tuple, Union + +from mmengine.model import BaseModule +from mmengine.structures import InstanceData +from torch import Tensor + +from mmpose.utils.tensor_utils import to_numpy +from mmpose.utils.typing import (Features, InstanceList, OptConfigType, + OptSampleList, Predictions) + + +class BaseHead(BaseModule, metaclass=ABCMeta): + """Base head. A subclass should override :meth:`predict` and :meth:`loss`. + + Args: + init_cfg (dict, optional): The extra init config of layers. + Defaults to None. + """ + + @abstractmethod + def forward(self, feats: Tuple[Tensor]): + """Forward the network.""" + + @abstractmethod + def predict(self, + feats: Features, + batch_data_samples: OptSampleList, + test_cfg: OptConfigType = {}) -> Predictions: + """Predict results from features.""" + + @abstractmethod + def loss(self, + feats: Tuple[Tensor], + batch_data_samples: OptSampleList, + train_cfg: OptConfigType = {}) -> dict: + """Calculate losses from a batch of inputs and data samples.""" + + def decode(self, batch_outputs: Union[Tensor, + Tuple[Tensor]]) -> InstanceList: + """Decode keypoints from outputs. + + Args: + batch_outputs (Tensor | Tuple[Tensor]): The network outputs of + a data batch + + Returns: + List[InstanceData]: A list of InstanceData, each contains the + decoded pose information of the instances of one data sample. + """ + + def _pack_and_call(args, func): + if not isinstance(args, tuple): + args = (args, ) + return func(*args) + + if self.decoder is None: + raise RuntimeError( + f'The decoder has not been set in {self.__class__.__name__}. ' + 'Please set the decoder configs in the init parameters to ' + 'enable head methods `head.predict()` and `head.decode()`') + + if self.decoder.support_batch_decoding: + batch_keypoints, batch_scores = _pack_and_call( + batch_outputs, self.decoder.batch_decode) + + else: + batch_output_np = to_numpy(batch_outputs, unzip=True) + batch_keypoints = [] + batch_scores = [] + for outputs in batch_output_np: + keypoints, scores = _pack_and_call(outputs, + self.decoder.decode) + batch_keypoints.append(keypoints) + batch_scores.append(scores) + + preds = [ + InstanceData(keypoints=keypoints, keypoint_scores=scores) + for keypoints, scores in zip(batch_keypoints, batch_scores) + ] + + return preds diff --git a/mmpose/models/heads/coord_cls_heads/__init__.py b/mmpose/models/heads/coord_cls_heads/__init__.py index 104ff91308..108b4795f3 100644 --- a/mmpose/models/heads/coord_cls_heads/__init__.py +++ b/mmpose/models/heads/coord_cls_heads/__init__.py @@ -1,5 +1,5 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .rtmcc_head import RTMCCHead -from .simcc_head import SimCCHead - -__all__ = ['SimCCHead', 'RTMCCHead'] +# Copyright (c) OpenMMLab. All rights reserved. +from .rtmcc_head import RTMCCHead +from .simcc_head import SimCCHead + +__all__ = ['SimCCHead', 'RTMCCHead'] diff --git a/mmpose/models/heads/coord_cls_heads/rtmcc_head.py b/mmpose/models/heads/coord_cls_heads/rtmcc_head.py index 5df0733c48..f4ef8513aa 100644 --- a/mmpose/models/heads/coord_cls_heads/rtmcc_head.py +++ b/mmpose/models/heads/coord_cls_heads/rtmcc_head.py @@ -1,303 +1,303 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import warnings -from typing import Optional, Sequence, Tuple, Union - -import torch -from mmengine.dist import get_dist_info -from mmengine.structures import PixelData -from torch import Tensor, nn - -from mmpose.codecs.utils import get_simcc_normalized -from mmpose.evaluation.functional import simcc_pck_accuracy -from mmpose.models.utils.rtmcc_block import RTMCCBlock, ScaleNorm -from mmpose.models.utils.tta import flip_vectors -from mmpose.registry import KEYPOINT_CODECS, MODELS -from mmpose.utils.tensor_utils import to_numpy -from mmpose.utils.typing import (ConfigType, InstanceList, OptConfigType, - OptSampleList) -from ..base_head import BaseHead - -OptIntSeq = Optional[Sequence[int]] - - -@MODELS.register_module() -class RTMCCHead(BaseHead): - """Top-down head introduced in RTMPose (2023). The head is composed of a - large-kernel convolutional layer, a fully-connected layer and a Gated - Attention Unit to generate 1d representation from low-resolution feature - maps. - - Args: - in_channels (int | sequence[int]): Number of channels in the input - feature map. - out_channels (int): Number of channels in the output heatmap. - input_size (tuple): Size of input image in shape [w, h]. - in_featuremap_size (int | sequence[int]): Size of input feature map. - simcc_split_ratio (float): Split ratio of pixels. - Default: 2.0. - final_layer_kernel_size (int): Kernel size of the convolutional layer. - Default: 1. - gau_cfg (Config): Config dict for the Gated Attention Unit. - Default: dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='ReLU', - use_rel_bias=False, - pos_enc=False). - loss (Config): Config of the keypoint loss. Defaults to use - :class:`KLDiscretLoss` - decoder (Config, optional): The decoder config that controls decoding - keypoint coordinates from the network output. Defaults to ``None`` - init_cfg (Config, optional): Config to control the initialization. See - :attr:`default_init_cfg` for default settings - """ - - def __init__( - self, - in_channels: Union[int, Sequence[int]], - out_channels: int, - input_size: Tuple[int, int], - in_featuremap_size: Tuple[int, int], - simcc_split_ratio: float = 2.0, - final_layer_kernel_size: int = 1, - gau_cfg: ConfigType = dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='ReLU', - use_rel_bias=False, - pos_enc=False), - loss: ConfigType = dict(type='KLDiscretLoss', use_target_weight=True), - decoder: OptConfigType = None, - init_cfg: OptConfigType = None, - ): - - if init_cfg is None: - init_cfg = self.default_init_cfg - - super().__init__(init_cfg) - - self.in_channels = in_channels - self.out_channels = out_channels - self.input_size = input_size - self.in_featuremap_size = in_featuremap_size - self.simcc_split_ratio = simcc_split_ratio - - self.loss_module = MODELS.build(loss) - if decoder is not None: - self.decoder = KEYPOINT_CODECS.build(decoder) - else: - self.decoder = None - - if isinstance(in_channels, (tuple, list)): - raise ValueError( - f'{self.__class__.__name__} does not support selecting ' - 'multiple input features.') - - # Define SimCC layers - flatten_dims = self.in_featuremap_size[0] * self.in_featuremap_size[1] - - self.final_layer = nn.Conv2d( - in_channels, - out_channels, - kernel_size=final_layer_kernel_size, - stride=1, - padding=final_layer_kernel_size // 2) - self.mlp = nn.Sequential( - ScaleNorm(flatten_dims), - nn.Linear(flatten_dims, gau_cfg['hidden_dims'], bias=False)) - - W = int(self.input_size[0] * self.simcc_split_ratio) - H = int(self.input_size[1] * self.simcc_split_ratio) - - self.gau = RTMCCBlock( - self.out_channels, - gau_cfg['hidden_dims'], - gau_cfg['hidden_dims'], - s=gau_cfg['s'], - expansion_factor=gau_cfg['expansion_factor'], - dropout_rate=gau_cfg['dropout_rate'], - drop_path=gau_cfg['drop_path'], - attn_type='self-attn', - act_fn=gau_cfg['act_fn'], - use_rel_bias=gau_cfg['use_rel_bias'], - pos_enc=gau_cfg['pos_enc']) - - self.cls_x = nn.Linear(gau_cfg['hidden_dims'], W, bias=False) - self.cls_y = nn.Linear(gau_cfg['hidden_dims'], H, bias=False) - - def forward(self, feats: Tuple[Tensor]) -> Tuple[Tensor, Tensor]: - """Forward the network. - - The input is the featuremap extracted by backbone and the - output is the simcc representation. - - Args: - feats (Tuple[Tensor]): Multi scale feature maps. - - Returns: - pred_x (Tensor): 1d representation of x. - pred_y (Tensor): 1d representation of y. - """ - feats = feats[-1] - - feats = self.final_layer(feats) # -> B, K, H, W - - # flatten the output heatmap - feats = torch.flatten(feats, 2) - - feats = self.mlp(feats) # -> B, K, hidden - - feats = self.gau(feats) - - pred_x = self.cls_x(feats) - pred_y = self.cls_y(feats) - - return pred_x, pred_y - - def predict( - self, - feats: Tuple[Tensor], - batch_data_samples: OptSampleList, - test_cfg: OptConfigType = {}, - ) -> InstanceList: - """Predict results from features. - - Args: - feats (Tuple[Tensor] | List[Tuple[Tensor]]): The multi-stage - features (or multiple multi-stage features in TTA) - batch_data_samples (List[:obj:`PoseDataSample`]): The batch - data samples - test_cfg (dict): The runtime config for testing process. Defaults - to {} - - Returns: - List[InstanceData]: The pose predictions, each contains - the following fields: - - keypoints (np.ndarray): predicted keypoint coordinates in - shape (num_instances, K, D) where K is the keypoint number - and D is the keypoint dimension - - keypoint_scores (np.ndarray): predicted keypoint scores in - shape (num_instances, K) - - keypoint_x_labels (np.ndarray, optional): The predicted 1-D - intensity distribution in the x direction - - keypoint_y_labels (np.ndarray, optional): The predicted 1-D - intensity distribution in the y direction - """ - - if test_cfg.get('flip_test', False): - # TTA: flip test -> feats = [orig, flipped] - assert isinstance(feats, list) and len(feats) == 2 - flip_indices = batch_data_samples[0].metainfo['flip_indices'] - _feats, _feats_flip = feats - - _batch_pred_x, _batch_pred_y = self.forward(_feats) - - _batch_pred_x_flip, _batch_pred_y_flip = self.forward(_feats_flip) - _batch_pred_x_flip, _batch_pred_y_flip = flip_vectors( - _batch_pred_x_flip, - _batch_pred_y_flip, - flip_indices=flip_indices) - - batch_pred_x = (_batch_pred_x + _batch_pred_x_flip) * 0.5 - batch_pred_y = (_batch_pred_y + _batch_pred_y_flip) * 0.5 - else: - batch_pred_x, batch_pred_y = self.forward(feats) - - preds = self.decode((batch_pred_x, batch_pred_y)) - - if test_cfg.get('output_heatmaps', False): - rank, _ = get_dist_info() - if rank == 0: - warnings.warn('The predicted simcc values are normalized for ' - 'visualization. This may cause discrepancy ' - 'between the keypoint scores and the 1D heatmaps' - '.') - - # normalize the predicted 1d distribution - batch_pred_x = get_simcc_normalized(batch_pred_x) - batch_pred_y = get_simcc_normalized(batch_pred_y) - - B, K, _ = batch_pred_x.shape - # B, K, Wx -> B, K, Wx, 1 - x = batch_pred_x.reshape(B, K, 1, -1) - # B, K, Wy -> B, K, 1, Wy - y = batch_pred_y.reshape(B, K, -1, 1) - # B, K, Wx, Wy - batch_heatmaps = torch.matmul(y, x) - pred_fields = [ - PixelData(heatmaps=hm) for hm in batch_heatmaps.detach() - ] - - for pred_instances, pred_x, pred_y in zip(preds, - to_numpy(batch_pred_x), - to_numpy(batch_pred_y)): - - pred_instances.keypoint_x_labels = pred_x[None] - pred_instances.keypoint_y_labels = pred_y[None] - - return preds, pred_fields - else: - return preds - - def loss( - self, - feats: Tuple[Tensor], - batch_data_samples: OptSampleList, - train_cfg: OptConfigType = {}, - ) -> dict: - """Calculate losses from a batch of inputs and data samples.""" - - pred_x, pred_y = self.forward(feats) - - gt_x = torch.cat([ - d.gt_instance_labels.keypoint_x_labels for d in batch_data_samples - ], - dim=0) - gt_y = torch.cat([ - d.gt_instance_labels.keypoint_y_labels for d in batch_data_samples - ], - dim=0) - keypoint_weights = torch.cat( - [ - d.gt_instance_labels.keypoint_weights - for d in batch_data_samples - ], - dim=0, - ) - - pred_simcc = (pred_x, pred_y) - gt_simcc = (gt_x, gt_y) - - # calculate losses - losses = dict() - loss = self.loss_module(pred_simcc, gt_simcc, keypoint_weights) - - losses.update(loss_kpt=loss) - - # calculate accuracy - _, avg_acc, _ = simcc_pck_accuracy( - output=to_numpy(pred_simcc), - target=to_numpy(gt_simcc), - simcc_split_ratio=self.simcc_split_ratio, - mask=to_numpy(keypoint_weights) > 0, - ) - - acc_pose = torch.tensor(avg_acc, device=gt_x.device) - losses.update(acc_pose=acc_pose) - - return losses - - @property - def default_init_cfg(self): - init_cfg = [ - dict(type='Normal', layer=['Conv2d'], std=0.001), - dict(type='Constant', layer='BatchNorm2d', val=1), - dict(type='Normal', layer=['Linear'], std=0.01, bias=0), - ] - return init_cfg +# Copyright (c) OpenMMLab. All rights reserved. +import warnings +from typing import Optional, Sequence, Tuple, Union + +import torch +from mmengine.dist import get_dist_info +from mmengine.structures import PixelData +from torch import Tensor, nn + +from mmpose.codecs.utils import get_simcc_normalized +from mmpose.evaluation.functional import simcc_pck_accuracy +from mmpose.models.utils.rtmcc_block import RTMCCBlock, ScaleNorm +from mmpose.models.utils.tta import flip_vectors +from mmpose.registry import KEYPOINT_CODECS, MODELS +from mmpose.utils.tensor_utils import to_numpy +from mmpose.utils.typing import (ConfigType, InstanceList, OptConfigType, + OptSampleList) +from ..base_head import BaseHead + +OptIntSeq = Optional[Sequence[int]] + + +@MODELS.register_module() +class RTMCCHead(BaseHead): + """Top-down head introduced in RTMPose (2023). The head is composed of a + large-kernel convolutional layer, a fully-connected layer and a Gated + Attention Unit to generate 1d representation from low-resolution feature + maps. + + Args: + in_channels (int | sequence[int]): Number of channels in the input + feature map. + out_channels (int): Number of channels in the output heatmap. + input_size (tuple): Size of input image in shape [w, h]. + in_featuremap_size (int | sequence[int]): Size of input feature map. + simcc_split_ratio (float): Split ratio of pixels. + Default: 2.0. + final_layer_kernel_size (int): Kernel size of the convolutional layer. + Default: 1. + gau_cfg (Config): Config dict for the Gated Attention Unit. + Default: dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='ReLU', + use_rel_bias=False, + pos_enc=False). + loss (Config): Config of the keypoint loss. Defaults to use + :class:`KLDiscretLoss` + decoder (Config, optional): The decoder config that controls decoding + keypoint coordinates from the network output. Defaults to ``None`` + init_cfg (Config, optional): Config to control the initialization. See + :attr:`default_init_cfg` for default settings + """ + + def __init__( + self, + in_channels: Union[int, Sequence[int]], + out_channels: int, + input_size: Tuple[int, int], + in_featuremap_size: Tuple[int, int], + simcc_split_ratio: float = 2.0, + final_layer_kernel_size: int = 1, + gau_cfg: ConfigType = dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='ReLU', + use_rel_bias=False, + pos_enc=False), + loss: ConfigType = dict(type='KLDiscretLoss', use_target_weight=True), + decoder: OptConfigType = None, + init_cfg: OptConfigType = None, + ): + + if init_cfg is None: + init_cfg = self.default_init_cfg + + super().__init__(init_cfg) + + self.in_channels = in_channels + self.out_channels = out_channels + self.input_size = input_size + self.in_featuremap_size = in_featuremap_size + self.simcc_split_ratio = simcc_split_ratio + + self.loss_module = MODELS.build(loss) + if decoder is not None: + self.decoder = KEYPOINT_CODECS.build(decoder) + else: + self.decoder = None + + if isinstance(in_channels, (tuple, list)): + raise ValueError( + f'{self.__class__.__name__} does not support selecting ' + 'multiple input features.') + + # Define SimCC layers + flatten_dims = self.in_featuremap_size[0] * self.in_featuremap_size[1] + + self.final_layer = nn.Conv2d( + in_channels, + out_channels, + kernel_size=final_layer_kernel_size, + stride=1, + padding=final_layer_kernel_size // 2) + self.mlp = nn.Sequential( + ScaleNorm(flatten_dims), + nn.Linear(flatten_dims, gau_cfg['hidden_dims'], bias=False)) + + W = int(self.input_size[0] * self.simcc_split_ratio) + H = int(self.input_size[1] * self.simcc_split_ratio) + + self.gau = RTMCCBlock( + self.out_channels, + gau_cfg['hidden_dims'], + gau_cfg['hidden_dims'], + s=gau_cfg['s'], + expansion_factor=gau_cfg['expansion_factor'], + dropout_rate=gau_cfg['dropout_rate'], + drop_path=gau_cfg['drop_path'], + attn_type='self-attn', + act_fn=gau_cfg['act_fn'], + use_rel_bias=gau_cfg['use_rel_bias'], + pos_enc=gau_cfg['pos_enc']) + + self.cls_x = nn.Linear(gau_cfg['hidden_dims'], W, bias=False) + self.cls_y = nn.Linear(gau_cfg['hidden_dims'], H, bias=False) + + def forward(self, feats: Tuple[Tensor]) -> Tuple[Tensor, Tensor]: + """Forward the network. + + The input is the featuremap extracted by backbone and the + output is the simcc representation. + + Args: + feats (Tuple[Tensor]): Multi scale feature maps. + + Returns: + pred_x (Tensor): 1d representation of x. + pred_y (Tensor): 1d representation of y. + """ + feats = feats[-1] + + feats = self.final_layer(feats) # -> B, K, H, W + + # flatten the output heatmap + feats = torch.flatten(feats, 2) + + feats = self.mlp(feats) # -> B, K, hidden + + feats = self.gau(feats) + + pred_x = self.cls_x(feats) + pred_y = self.cls_y(feats) + + return pred_x, pred_y + + def predict( + self, + feats: Tuple[Tensor], + batch_data_samples: OptSampleList, + test_cfg: OptConfigType = {}, + ) -> InstanceList: + """Predict results from features. + + Args: + feats (Tuple[Tensor] | List[Tuple[Tensor]]): The multi-stage + features (or multiple multi-stage features in TTA) + batch_data_samples (List[:obj:`PoseDataSample`]): The batch + data samples + test_cfg (dict): The runtime config for testing process. Defaults + to {} + + Returns: + List[InstanceData]: The pose predictions, each contains + the following fields: + - keypoints (np.ndarray): predicted keypoint coordinates in + shape (num_instances, K, D) where K is the keypoint number + and D is the keypoint dimension + - keypoint_scores (np.ndarray): predicted keypoint scores in + shape (num_instances, K) + - keypoint_x_labels (np.ndarray, optional): The predicted 1-D + intensity distribution in the x direction + - keypoint_y_labels (np.ndarray, optional): The predicted 1-D + intensity distribution in the y direction + """ + + if test_cfg.get('flip_test', False): + # TTA: flip test -> feats = [orig, flipped] + assert isinstance(feats, list) and len(feats) == 2 + flip_indices = batch_data_samples[0].metainfo['flip_indices'] + _feats, _feats_flip = feats + + _batch_pred_x, _batch_pred_y = self.forward(_feats) + + _batch_pred_x_flip, _batch_pred_y_flip = self.forward(_feats_flip) + _batch_pred_x_flip, _batch_pred_y_flip = flip_vectors( + _batch_pred_x_flip, + _batch_pred_y_flip, + flip_indices=flip_indices) + + batch_pred_x = (_batch_pred_x + _batch_pred_x_flip) * 0.5 + batch_pred_y = (_batch_pred_y + _batch_pred_y_flip) * 0.5 + else: + batch_pred_x, batch_pred_y = self.forward(feats) + + preds = self.decode((batch_pred_x, batch_pred_y)) + + if test_cfg.get('output_heatmaps', False): + rank, _ = get_dist_info() + if rank == 0: + warnings.warn('The predicted simcc values are normalized for ' + 'visualization. This may cause discrepancy ' + 'between the keypoint scores and the 1D heatmaps' + '.') + + # normalize the predicted 1d distribution + batch_pred_x = get_simcc_normalized(batch_pred_x) + batch_pred_y = get_simcc_normalized(batch_pred_y) + + B, K, _ = batch_pred_x.shape + # B, K, Wx -> B, K, Wx, 1 + x = batch_pred_x.reshape(B, K, 1, -1) + # B, K, Wy -> B, K, 1, Wy + y = batch_pred_y.reshape(B, K, -1, 1) + # B, K, Wx, Wy + batch_heatmaps = torch.matmul(y, x) + pred_fields = [ + PixelData(heatmaps=hm) for hm in batch_heatmaps.detach() + ] + + for pred_instances, pred_x, pred_y in zip(preds, + to_numpy(batch_pred_x), + to_numpy(batch_pred_y)): + + pred_instances.keypoint_x_labels = pred_x[None] + pred_instances.keypoint_y_labels = pred_y[None] + + return preds, pred_fields + else: + return preds + + def loss( + self, + feats: Tuple[Tensor], + batch_data_samples: OptSampleList, + train_cfg: OptConfigType = {}, + ) -> dict: + """Calculate losses from a batch of inputs and data samples.""" + + pred_x, pred_y = self.forward(feats) + + gt_x = torch.cat([ + d.gt_instance_labels.keypoint_x_labels for d in batch_data_samples + ], + dim=0) + gt_y = torch.cat([ + d.gt_instance_labels.keypoint_y_labels for d in batch_data_samples + ], + dim=0) + keypoint_weights = torch.cat( + [ + d.gt_instance_labels.keypoint_weights + for d in batch_data_samples + ], + dim=0, + ) + + pred_simcc = (pred_x, pred_y) + gt_simcc = (gt_x, gt_y) + + # calculate losses + losses = dict() + loss = self.loss_module(pred_simcc, gt_simcc, keypoint_weights) + + losses.update(loss_kpt=loss) + + # calculate accuracy + _, avg_acc, _ = simcc_pck_accuracy( + output=to_numpy(pred_simcc), + target=to_numpy(gt_simcc), + simcc_split_ratio=self.simcc_split_ratio, + mask=to_numpy(keypoint_weights) > 0, + ) + + acc_pose = torch.tensor(avg_acc, device=gt_x.device) + losses.update(acc_pose=acc_pose) + + return losses + + @property + def default_init_cfg(self): + init_cfg = [ + dict(type='Normal', layer=['Conv2d'], std=0.001), + dict(type='Constant', layer='BatchNorm2d', val=1), + dict(type='Normal', layer=['Linear'], std=0.01, bias=0), + ] + return init_cfg diff --git a/mmpose/models/heads/coord_cls_heads/simcc_head.py b/mmpose/models/heads/coord_cls_heads/simcc_head.py index d9e7001cbc..7d9ca62ddc 100644 --- a/mmpose/models/heads/coord_cls_heads/simcc_head.py +++ b/mmpose/models/heads/coord_cls_heads/simcc_head.py @@ -1,371 +1,371 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import warnings -from typing import Optional, Sequence, Tuple, Union - -import torch -from mmcv.cnn import build_conv_layer -from mmengine.dist import get_dist_info -from mmengine.structures import PixelData -from torch import Tensor, nn - -from mmpose.codecs.utils import get_simcc_normalized -from mmpose.evaluation.functional import simcc_pck_accuracy -from mmpose.models.utils.tta import flip_vectors -from mmpose.registry import KEYPOINT_CODECS, MODELS -from mmpose.utils.tensor_utils import to_numpy -from mmpose.utils.typing import (ConfigType, InstanceList, OptConfigType, - OptSampleList) -from ..base_head import BaseHead - -OptIntSeq = Optional[Sequence[int]] - - -@MODELS.register_module() -class SimCCHead(BaseHead): - """Top-down heatmap head introduced in `SimCC`_ by Li et al (2022). The - head is composed of a few deconvolutional layers followed by a fully- - connected layer to generate 1d representation from low-resolution feature - maps. - - Args: - in_channels (int | sequence[int]): Number of channels in the input - feature map - out_channels (int): Number of channels in the output heatmap - input_size (tuple): Input image size in shape [w, h] - in_featuremap_size (int | sequence[int]): Size of input feature map - simcc_split_ratio (float): Split ratio of pixels - deconv_type (str, optional): The type of deconv head which should - be one of the following options: - - - ``'heatmap'``: make deconv layers in `HeatmapHead` - - ``'vipnas'``: make deconv layers in `ViPNASHead` - - Defaults to ``'Heatmap'`` - deconv_out_channels (sequence[int]): The output channel number of each - deconv layer. Defaults to ``(256, 256, 256)`` - deconv_kernel_sizes (sequence[int | tuple], optional): The kernel size - of each deconv layer. Each element should be either an integer for - both height and width dimensions, or a tuple of two integers for - the height and the width dimension respectively.Defaults to - ``(4, 4, 4)`` - deconv_num_groups (Sequence[int], optional): The group number of each - deconv layer. Defaults to ``(16, 16, 16)`` - conv_out_channels (sequence[int], optional): The output channel number - of each intermediate conv layer. ``None`` means no intermediate - conv layer between deconv layers and the final conv layer. - Defaults to ``None`` - conv_kernel_sizes (sequence[int | tuple], optional): The kernel size - of each intermediate conv layer. Defaults to ``None`` - final_layer (dict): Arguments of the final Conv2d layer. - Defaults to ``dict(kernel_size=1)`` - loss (Config): Config of the keypoint loss. Defaults to use - :class:`KLDiscretLoss` - decoder (Config, optional): The decoder config that controls decoding - keypoint coordinates from the network output. Defaults to ``None`` - init_cfg (Config, optional): Config to control the initialization. See - :attr:`default_init_cfg` for default settings - - .. _`SimCC`: https://arxiv.org/abs/2107.03332 - """ - - _version = 2 - - def __init__( - self, - in_channels: Union[int, Sequence[int]], - out_channels: int, - input_size: Tuple[int, int], - in_featuremap_size: Tuple[int, int], - simcc_split_ratio: float = 2.0, - deconv_type: str = 'heatmap', - deconv_out_channels: OptIntSeq = (256, 256, 256), - deconv_kernel_sizes: OptIntSeq = (4, 4, 4), - deconv_num_groups: OptIntSeq = (16, 16, 16), - conv_out_channels: OptIntSeq = None, - conv_kernel_sizes: OptIntSeq = None, - final_layer: dict = dict(kernel_size=1), - loss: ConfigType = dict(type='KLDiscretLoss', use_target_weight=True), - decoder: OptConfigType = None, - init_cfg: OptConfigType = None, - ): - - if init_cfg is None: - init_cfg = self.default_init_cfg - - super().__init__(init_cfg) - - if deconv_type not in {'heatmap', 'vipnas'}: - raise ValueError( - f'{self.__class__.__name__} got invalid `deconv_type` value' - f'{deconv_type}. Should be one of ' - '{"heatmap", "vipnas"}') - - self.in_channels = in_channels - self.out_channels = out_channels - self.input_size = input_size - self.in_featuremap_size = in_featuremap_size - self.simcc_split_ratio = simcc_split_ratio - self.loss_module = MODELS.build(loss) - if decoder is not None: - self.decoder = KEYPOINT_CODECS.build(decoder) - else: - self.decoder = None - - num_deconv = len(deconv_out_channels) if deconv_out_channels else 0 - if num_deconv != 0: - self.heatmap_size = tuple( - [s * (2**num_deconv) for s in in_featuremap_size]) - - # deconv layers + 1x1 conv - self.deconv_head = self._make_deconv_head( - in_channels=in_channels, - out_channels=out_channels, - deconv_type=deconv_type, - deconv_out_channels=deconv_out_channels, - deconv_kernel_sizes=deconv_kernel_sizes, - deconv_num_groups=deconv_num_groups, - conv_out_channels=conv_out_channels, - conv_kernel_sizes=conv_kernel_sizes, - final_layer=final_layer) - - if final_layer is not None: - in_channels = out_channels - else: - in_channels = deconv_out_channels[-1] - - else: - self.deconv_head = None - - if final_layer is not None: - cfg = dict( - type='Conv2d', - in_channels=in_channels, - out_channels=out_channels, - kernel_size=1) - cfg.update(final_layer) - self.final_layer = build_conv_layer(cfg) - else: - self.final_layer = None - - self.heatmap_size = in_featuremap_size - - # Define SimCC layers - flatten_dims = self.heatmap_size[0] * self.heatmap_size[1] - - W = int(self.input_size[0] * self.simcc_split_ratio) - H = int(self.input_size[1] * self.simcc_split_ratio) - - self.mlp_head_x = nn.Linear(flatten_dims, W) - self.mlp_head_y = nn.Linear(flatten_dims, H) - - def _make_deconv_head( - self, - in_channels: Union[int, Sequence[int]], - out_channels: int, - deconv_type: str = 'heatmap', - deconv_out_channels: OptIntSeq = (256, 256, 256), - deconv_kernel_sizes: OptIntSeq = (4, 4, 4), - deconv_num_groups: OptIntSeq = (16, 16, 16), - conv_out_channels: OptIntSeq = None, - conv_kernel_sizes: OptIntSeq = None, - final_layer: dict = dict(kernel_size=1) - ) -> nn.Module: - """Create deconvolutional layers by given parameters.""" - - if deconv_type == 'heatmap': - deconv_head = MODELS.build( - dict( - type='HeatmapHead', - in_channels=self.in_channels, - out_channels=out_channels, - deconv_out_channels=deconv_out_channels, - deconv_kernel_sizes=deconv_kernel_sizes, - conv_out_channels=conv_out_channels, - conv_kernel_sizes=conv_kernel_sizes, - final_layer=final_layer)) - else: - deconv_head = MODELS.build( - dict( - type='ViPNASHead', - in_channels=in_channels, - out_channels=out_channels, - deconv_out_channels=deconv_out_channels, - deconv_num_groups=deconv_num_groups, - conv_out_channels=conv_out_channels, - conv_kernel_sizes=conv_kernel_sizes, - final_layer=final_layer)) - - return deconv_head - - def forward(self, feats: Tuple[Tensor]) -> Tuple[Tensor, Tensor]: - """Forward the network. - - The input is the featuremap extracted by backbone and the - output is the simcc representation. - - Args: - feats (Tuple[Tensor]): Multi scale feature maps. - - Returns: - pred_x (Tensor): 1d representation of x. - pred_y (Tensor): 1d representation of y. - """ - if self.deconv_head is None: - feats = feats[-1] - if self.final_layer is not None: - feats = self.final_layer(feats) - else: - feats = self.deconv_head(feats) - - # flatten the output heatmap - x = torch.flatten(feats, 2) - - pred_x = self.mlp_head_x(x) - pred_y = self.mlp_head_y(x) - - return pred_x, pred_y - - def predict( - self, - feats: Tuple[Tensor], - batch_data_samples: OptSampleList, - test_cfg: OptConfigType = {}, - ) -> InstanceList: - """Predict results from features. - - Args: - feats (Tuple[Tensor] | List[Tuple[Tensor]]): The multi-stage - features (or multiple multi-stage features in TTA) - batch_data_samples (List[:obj:`PoseDataSample`]): The batch - data samples - test_cfg (dict): The runtime config for testing process. Defaults - to {} - - Returns: - List[InstanceData]: The pose predictions, each contains - the following fields: - - - keypoints (np.ndarray): predicted keypoint coordinates in - shape (num_instances, K, D) where K is the keypoint number - and D is the keypoint dimension - - keypoint_scores (np.ndarray): predicted keypoint scores in - shape (num_instances, K) - - keypoint_x_labels (np.ndarray, optional): The predicted 1-D - intensity distribution in the x direction - - keypoint_y_labels (np.ndarray, optional): The predicted 1-D - intensity distribution in the y direction - """ - - if test_cfg.get('flip_test', False): - # TTA: flip test -> feats = [orig, flipped] - assert isinstance(feats, list) and len(feats) == 2 - flip_indices = batch_data_samples[0].metainfo['flip_indices'] - _feats, _feats_flip = feats - - _batch_pred_x, _batch_pred_y = self.forward(_feats) - - _batch_pred_x_flip, _batch_pred_y_flip = self.forward(_feats_flip) - _batch_pred_x_flip, _batch_pred_y_flip = flip_vectors( - _batch_pred_x_flip, - _batch_pred_y_flip, - flip_indices=flip_indices) - - batch_pred_x = (_batch_pred_x + _batch_pred_x_flip) * 0.5 - batch_pred_y = (_batch_pred_y + _batch_pred_y_flip) * 0.5 - else: - batch_pred_x, batch_pred_y = self.forward(feats) - - preds = self.decode((batch_pred_x, batch_pred_y)) - - if test_cfg.get('output_heatmaps', False): - rank, _ = get_dist_info() - if rank == 0: - warnings.warn('The predicted simcc values are normalized for ' - 'visualization. This may cause discrepancy ' - 'between the keypoint scores and the 1D heatmaps' - '.') - - # normalize the predicted 1d distribution - sigma = self.decoder.sigma - batch_pred_x = get_simcc_normalized(batch_pred_x, sigma[0]) - batch_pred_y = get_simcc_normalized(batch_pred_y, sigma[1]) - - B, K, _ = batch_pred_x.shape - # B, K, Wx -> B, K, Wx, 1 - x = batch_pred_x.reshape(B, K, 1, -1) - # B, K, Wy -> B, K, 1, Wy - y = batch_pred_y.reshape(B, K, -1, 1) - # B, K, Wx, Wy - batch_heatmaps = torch.matmul(y, x) - pred_fields = [ - PixelData(heatmaps=hm) for hm in batch_heatmaps.detach() - ] - - for pred_instances, pred_x, pred_y in zip(preds, - to_numpy(batch_pred_x), - to_numpy(batch_pred_y)): - - pred_instances.keypoint_x_labels = pred_x[None] - pred_instances.keypoint_y_labels = pred_y[None] - - return preds, pred_fields - else: - return preds - - def loss( - self, - feats: Tuple[Tensor], - batch_data_samples: OptSampleList, - train_cfg: OptConfigType = {}, - ) -> dict: - """Calculate losses from a batch of inputs and data samples.""" - - pred_x, pred_y = self.forward(feats) - - gt_x = torch.cat([ - d.gt_instance_labels.keypoint_x_labels for d in batch_data_samples - ], - dim=0) - gt_y = torch.cat([ - d.gt_instance_labels.keypoint_y_labels for d in batch_data_samples - ], - dim=0) - keypoint_weights = torch.cat( - [ - d.gt_instance_labels.keypoint_weights - for d in batch_data_samples - ], - dim=0, - ) - - pred_simcc = (pred_x, pred_y) - gt_simcc = (gt_x, gt_y) - - # calculate losses - losses = dict() - loss = self.loss_module(pred_simcc, gt_simcc, keypoint_weights) - - losses.update(loss_kpt=loss) - - # calculate accuracy - _, avg_acc, _ = simcc_pck_accuracy( - output=to_numpy(pred_simcc), - target=to_numpy(gt_simcc), - simcc_split_ratio=self.simcc_split_ratio, - mask=to_numpy(keypoint_weights) > 0, - ) - - acc_pose = torch.tensor(avg_acc, device=gt_x.device) - losses.update(acc_pose=acc_pose) - - return losses - - @property - def default_init_cfg(self): - init_cfg = [ - dict( - type='Normal', layer=['Conv2d', 'ConvTranspose2d'], std=0.001), - dict(type='Constant', layer='BatchNorm2d', val=1), - dict(type='Normal', layer=['Linear'], std=0.01, bias=0), - ] - return init_cfg +# Copyright (c) OpenMMLab. All rights reserved. +import warnings +from typing import Optional, Sequence, Tuple, Union + +import torch +from mmcv.cnn import build_conv_layer +from mmengine.dist import get_dist_info +from mmengine.structures import PixelData +from torch import Tensor, nn + +from mmpose.codecs.utils import get_simcc_normalized +from mmpose.evaluation.functional import simcc_pck_accuracy +from mmpose.models.utils.tta import flip_vectors +from mmpose.registry import KEYPOINT_CODECS, MODELS +from mmpose.utils.tensor_utils import to_numpy +from mmpose.utils.typing import (ConfigType, InstanceList, OptConfigType, + OptSampleList) +from ..base_head import BaseHead + +OptIntSeq = Optional[Sequence[int]] + + +@MODELS.register_module() +class SimCCHead(BaseHead): + """Top-down heatmap head introduced in `SimCC`_ by Li et al (2022). The + head is composed of a few deconvolutional layers followed by a fully- + connected layer to generate 1d representation from low-resolution feature + maps. + + Args: + in_channels (int | sequence[int]): Number of channels in the input + feature map + out_channels (int): Number of channels in the output heatmap + input_size (tuple): Input image size in shape [w, h] + in_featuremap_size (int | sequence[int]): Size of input feature map + simcc_split_ratio (float): Split ratio of pixels + deconv_type (str, optional): The type of deconv head which should + be one of the following options: + + - ``'heatmap'``: make deconv layers in `HeatmapHead` + - ``'vipnas'``: make deconv layers in `ViPNASHead` + + Defaults to ``'Heatmap'`` + deconv_out_channels (sequence[int]): The output channel number of each + deconv layer. Defaults to ``(256, 256, 256)`` + deconv_kernel_sizes (sequence[int | tuple], optional): The kernel size + of each deconv layer. Each element should be either an integer for + both height and width dimensions, or a tuple of two integers for + the height and the width dimension respectively.Defaults to + ``(4, 4, 4)`` + deconv_num_groups (Sequence[int], optional): The group number of each + deconv layer. Defaults to ``(16, 16, 16)`` + conv_out_channels (sequence[int], optional): The output channel number + of each intermediate conv layer. ``None`` means no intermediate + conv layer between deconv layers and the final conv layer. + Defaults to ``None`` + conv_kernel_sizes (sequence[int | tuple], optional): The kernel size + of each intermediate conv layer. Defaults to ``None`` + final_layer (dict): Arguments of the final Conv2d layer. + Defaults to ``dict(kernel_size=1)`` + loss (Config): Config of the keypoint loss. Defaults to use + :class:`KLDiscretLoss` + decoder (Config, optional): The decoder config that controls decoding + keypoint coordinates from the network output. Defaults to ``None`` + init_cfg (Config, optional): Config to control the initialization. See + :attr:`default_init_cfg` for default settings + + .. _`SimCC`: https://arxiv.org/abs/2107.03332 + """ + + _version = 2 + + def __init__( + self, + in_channels: Union[int, Sequence[int]], + out_channels: int, + input_size: Tuple[int, int], + in_featuremap_size: Tuple[int, int], + simcc_split_ratio: float = 2.0, + deconv_type: str = 'heatmap', + deconv_out_channels: OptIntSeq = (256, 256, 256), + deconv_kernel_sizes: OptIntSeq = (4, 4, 4), + deconv_num_groups: OptIntSeq = (16, 16, 16), + conv_out_channels: OptIntSeq = None, + conv_kernel_sizes: OptIntSeq = None, + final_layer: dict = dict(kernel_size=1), + loss: ConfigType = dict(type='KLDiscretLoss', use_target_weight=True), + decoder: OptConfigType = None, + init_cfg: OptConfigType = None, + ): + + if init_cfg is None: + init_cfg = self.default_init_cfg + + super().__init__(init_cfg) + + if deconv_type not in {'heatmap', 'vipnas'}: + raise ValueError( + f'{self.__class__.__name__} got invalid `deconv_type` value' + f'{deconv_type}. Should be one of ' + '{"heatmap", "vipnas"}') + + self.in_channels = in_channels + self.out_channels = out_channels + self.input_size = input_size + self.in_featuremap_size = in_featuremap_size + self.simcc_split_ratio = simcc_split_ratio + self.loss_module = MODELS.build(loss) + if decoder is not None: + self.decoder = KEYPOINT_CODECS.build(decoder) + else: + self.decoder = None + + num_deconv = len(deconv_out_channels) if deconv_out_channels else 0 + if num_deconv != 0: + self.heatmap_size = tuple( + [s * (2**num_deconv) for s in in_featuremap_size]) + + # deconv layers + 1x1 conv + self.deconv_head = self._make_deconv_head( + in_channels=in_channels, + out_channels=out_channels, + deconv_type=deconv_type, + deconv_out_channels=deconv_out_channels, + deconv_kernel_sizes=deconv_kernel_sizes, + deconv_num_groups=deconv_num_groups, + conv_out_channels=conv_out_channels, + conv_kernel_sizes=conv_kernel_sizes, + final_layer=final_layer) + + if final_layer is not None: + in_channels = out_channels + else: + in_channels = deconv_out_channels[-1] + + else: + self.deconv_head = None + + if final_layer is not None: + cfg = dict( + type='Conv2d', + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1) + cfg.update(final_layer) + self.final_layer = build_conv_layer(cfg) + else: + self.final_layer = None + + self.heatmap_size = in_featuremap_size + + # Define SimCC layers + flatten_dims = self.heatmap_size[0] * self.heatmap_size[1] + + W = int(self.input_size[0] * self.simcc_split_ratio) + H = int(self.input_size[1] * self.simcc_split_ratio) + + self.mlp_head_x = nn.Linear(flatten_dims, W) + self.mlp_head_y = nn.Linear(flatten_dims, H) + + def _make_deconv_head( + self, + in_channels: Union[int, Sequence[int]], + out_channels: int, + deconv_type: str = 'heatmap', + deconv_out_channels: OptIntSeq = (256, 256, 256), + deconv_kernel_sizes: OptIntSeq = (4, 4, 4), + deconv_num_groups: OptIntSeq = (16, 16, 16), + conv_out_channels: OptIntSeq = None, + conv_kernel_sizes: OptIntSeq = None, + final_layer: dict = dict(kernel_size=1) + ) -> nn.Module: + """Create deconvolutional layers by given parameters.""" + + if deconv_type == 'heatmap': + deconv_head = MODELS.build( + dict( + type='HeatmapHead', + in_channels=self.in_channels, + out_channels=out_channels, + deconv_out_channels=deconv_out_channels, + deconv_kernel_sizes=deconv_kernel_sizes, + conv_out_channels=conv_out_channels, + conv_kernel_sizes=conv_kernel_sizes, + final_layer=final_layer)) + else: + deconv_head = MODELS.build( + dict( + type='ViPNASHead', + in_channels=in_channels, + out_channels=out_channels, + deconv_out_channels=deconv_out_channels, + deconv_num_groups=deconv_num_groups, + conv_out_channels=conv_out_channels, + conv_kernel_sizes=conv_kernel_sizes, + final_layer=final_layer)) + + return deconv_head + + def forward(self, feats: Tuple[Tensor]) -> Tuple[Tensor, Tensor]: + """Forward the network. + + The input is the featuremap extracted by backbone and the + output is the simcc representation. + + Args: + feats (Tuple[Tensor]): Multi scale feature maps. + + Returns: + pred_x (Tensor): 1d representation of x. + pred_y (Tensor): 1d representation of y. + """ + if self.deconv_head is None: + feats = feats[-1] + if self.final_layer is not None: + feats = self.final_layer(feats) + else: + feats = self.deconv_head(feats) + + # flatten the output heatmap + x = torch.flatten(feats, 2) + + pred_x = self.mlp_head_x(x) + pred_y = self.mlp_head_y(x) + + return pred_x, pred_y + + def predict( + self, + feats: Tuple[Tensor], + batch_data_samples: OptSampleList, + test_cfg: OptConfigType = {}, + ) -> InstanceList: + """Predict results from features. + + Args: + feats (Tuple[Tensor] | List[Tuple[Tensor]]): The multi-stage + features (or multiple multi-stage features in TTA) + batch_data_samples (List[:obj:`PoseDataSample`]): The batch + data samples + test_cfg (dict): The runtime config for testing process. Defaults + to {} + + Returns: + List[InstanceData]: The pose predictions, each contains + the following fields: + + - keypoints (np.ndarray): predicted keypoint coordinates in + shape (num_instances, K, D) where K is the keypoint number + and D is the keypoint dimension + - keypoint_scores (np.ndarray): predicted keypoint scores in + shape (num_instances, K) + - keypoint_x_labels (np.ndarray, optional): The predicted 1-D + intensity distribution in the x direction + - keypoint_y_labels (np.ndarray, optional): The predicted 1-D + intensity distribution in the y direction + """ + + if test_cfg.get('flip_test', False): + # TTA: flip test -> feats = [orig, flipped] + assert isinstance(feats, list) and len(feats) == 2 + flip_indices = batch_data_samples[0].metainfo['flip_indices'] + _feats, _feats_flip = feats + + _batch_pred_x, _batch_pred_y = self.forward(_feats) + + _batch_pred_x_flip, _batch_pred_y_flip = self.forward(_feats_flip) + _batch_pred_x_flip, _batch_pred_y_flip = flip_vectors( + _batch_pred_x_flip, + _batch_pred_y_flip, + flip_indices=flip_indices) + + batch_pred_x = (_batch_pred_x + _batch_pred_x_flip) * 0.5 + batch_pred_y = (_batch_pred_y + _batch_pred_y_flip) * 0.5 + else: + batch_pred_x, batch_pred_y = self.forward(feats) + + preds = self.decode((batch_pred_x, batch_pred_y)) + + if test_cfg.get('output_heatmaps', False): + rank, _ = get_dist_info() + if rank == 0: + warnings.warn('The predicted simcc values are normalized for ' + 'visualization. This may cause discrepancy ' + 'between the keypoint scores and the 1D heatmaps' + '.') + + # normalize the predicted 1d distribution + sigma = self.decoder.sigma + batch_pred_x = get_simcc_normalized(batch_pred_x, sigma[0]) + batch_pred_y = get_simcc_normalized(batch_pred_y, sigma[1]) + + B, K, _ = batch_pred_x.shape + # B, K, Wx -> B, K, Wx, 1 + x = batch_pred_x.reshape(B, K, 1, -1) + # B, K, Wy -> B, K, 1, Wy + y = batch_pred_y.reshape(B, K, -1, 1) + # B, K, Wx, Wy + batch_heatmaps = torch.matmul(y, x) + pred_fields = [ + PixelData(heatmaps=hm) for hm in batch_heatmaps.detach() + ] + + for pred_instances, pred_x, pred_y in zip(preds, + to_numpy(batch_pred_x), + to_numpy(batch_pred_y)): + + pred_instances.keypoint_x_labels = pred_x[None] + pred_instances.keypoint_y_labels = pred_y[None] + + return preds, pred_fields + else: + return preds + + def loss( + self, + feats: Tuple[Tensor], + batch_data_samples: OptSampleList, + train_cfg: OptConfigType = {}, + ) -> dict: + """Calculate losses from a batch of inputs and data samples.""" + + pred_x, pred_y = self.forward(feats) + + gt_x = torch.cat([ + d.gt_instance_labels.keypoint_x_labels for d in batch_data_samples + ], + dim=0) + gt_y = torch.cat([ + d.gt_instance_labels.keypoint_y_labels for d in batch_data_samples + ], + dim=0) + keypoint_weights = torch.cat( + [ + d.gt_instance_labels.keypoint_weights + for d in batch_data_samples + ], + dim=0, + ) + + pred_simcc = (pred_x, pred_y) + gt_simcc = (gt_x, gt_y) + + # calculate losses + losses = dict() + loss = self.loss_module(pred_simcc, gt_simcc, keypoint_weights) + + losses.update(loss_kpt=loss) + + # calculate accuracy + _, avg_acc, _ = simcc_pck_accuracy( + output=to_numpy(pred_simcc), + target=to_numpy(gt_simcc), + simcc_split_ratio=self.simcc_split_ratio, + mask=to_numpy(keypoint_weights) > 0, + ) + + acc_pose = torch.tensor(avg_acc, device=gt_x.device) + losses.update(acc_pose=acc_pose) + + return losses + + @property + def default_init_cfg(self): + init_cfg = [ + dict( + type='Normal', layer=['Conv2d', 'ConvTranspose2d'], std=0.001), + dict(type='Constant', layer='BatchNorm2d', val=1), + dict(type='Normal', layer=['Linear'], std=0.01, bias=0), + ] + return init_cfg diff --git a/mmpose/models/heads/heatmap_heads/__init__.py b/mmpose/models/heads/heatmap_heads/__init__.py index b482216b36..3e0945c16e 100644 --- a/mmpose/models/heads/heatmap_heads/__init__.py +++ b/mmpose/models/heads/heatmap_heads/__init__.py @@ -1,12 +1,12 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .ae_head import AssociativeEmbeddingHead -from .cid_head import CIDHead -from .cpm_head import CPMHead -from .heatmap_head import HeatmapHead -from .mspn_head import MSPNHead -from .vipnas_head import ViPNASHead - -__all__ = [ - 'HeatmapHead', 'CPMHead', 'MSPNHead', 'ViPNASHead', - 'AssociativeEmbeddingHead', 'CIDHead' -] +# Copyright (c) OpenMMLab. All rights reserved. +from .ae_head import AssociativeEmbeddingHead +from .cid_head import CIDHead +from .cpm_head import CPMHead +from .heatmap_head import HeatmapHead +from .mspn_head import MSPNHead +from .vipnas_head import ViPNASHead + +__all__ = [ + 'HeatmapHead', 'CPMHead', 'MSPNHead', 'ViPNASHead', + 'AssociativeEmbeddingHead', 'CIDHead' +] diff --git a/mmpose/models/heads/heatmap_heads/ae_head.py b/mmpose/models/heads/heatmap_heads/ae_head.py index bd12d57a33..69b29fc7fe 100644 --- a/mmpose/models/heads/heatmap_heads/ae_head.py +++ b/mmpose/models/heads/heatmap_heads/ae_head.py @@ -1,291 +1,291 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import List, Optional, Sequence, Tuple, Union - -import torch -from mmengine.structures import PixelData -from mmengine.utils import is_list_of -from torch import Tensor - -from mmpose.models.utils.tta import aggregate_heatmaps, flip_heatmaps -from mmpose.registry import MODELS -from mmpose.utils.typing import (ConfigType, Features, OptConfigType, - OptSampleList, Predictions) -from .heatmap_head import HeatmapHead - -OptIntSeq = Optional[Sequence[int]] - - -@MODELS.register_module() -class AssociativeEmbeddingHead(HeatmapHead): - - def __init__(self, - in_channels: Union[int, Sequence[int]], - num_keypoints: int, - tag_dim: int = 1, - tag_per_keypoint: bool = True, - deconv_out_channels: OptIntSeq = (256, 256, 256), - deconv_kernel_sizes: OptIntSeq = (4, 4, 4), - conv_out_channels: OptIntSeq = None, - conv_kernel_sizes: OptIntSeq = None, - final_layer: dict = dict(kernel_size=1), - keypoint_loss: ConfigType = dict(type='KeypointMSELoss'), - tag_loss: ConfigType = dict(type='AssociativeEmbeddingLoss'), - decoder: OptConfigType = None, - init_cfg: OptConfigType = None): - - if tag_per_keypoint: - out_channels = num_keypoints * (1 + tag_dim) - else: - out_channels = num_keypoints + tag_dim - - loss = dict( - type='CombinedLoss', - losses=dict(keypoint_loss=keypoint_loss, tag_loss=tag_loss)) - - super().__init__( - in_channels=in_channels, - out_channels=out_channels, - deconv_out_channels=deconv_out_channels, - deconv_kernel_sizes=deconv_kernel_sizes, - conv_out_channels=conv_out_channels, - conv_kernel_sizes=conv_kernel_sizes, - final_layer=final_layer, - loss=loss, - decoder=decoder, - init_cfg=init_cfg) - - self.num_keypoints = num_keypoints - self.tag_dim = tag_dim - self.tag_per_keypoint = tag_per_keypoint - - def predict(self, - feats: Features, - batch_data_samples: OptSampleList, - test_cfg: ConfigType = {}) -> Predictions: - """Predict results from features. - - Args: - feats (Features): The features which could be in following forms: - - - Tuple[Tensor]: multi-stage features from the backbone - - List[Tuple[Tensor]]: multiple features for TTA where either - `flip_test` or `multiscale_test` is applied - - List[List[Tuple[Tensor]]]: multiple features for TTA where - both `flip_test` and `multiscale_test` are applied - - batch_data_samples (List[:obj:`PoseDataSample`]): The batch - data samples - test_cfg (dict): The runtime config for testing process. Defaults - to {} - - Returns: - Union[InstanceList | Tuple[InstanceList | PixelDataList]]: If - ``test_cfg['output_heatmap']==True``, return both pose and heatmap - prediction; otherwise only return the pose prediction. - - The pose prediction is a list of ``InstanceData``, each contains - the following fields: - - - keypoints (np.ndarray): predicted keypoint coordinates in - shape (num_instances, K, D) where K is the keypoint number - and D is the keypoint dimension - - keypoint_scores (np.ndarray): predicted keypoint scores in - shape (num_instances, K) - - The heatmap prediction is a list of ``PixelData``, each contains - the following fields: - - - heatmaps (Tensor): The predicted heatmaps in shape (K, h, w) - """ - # test configs - multiscale_test = test_cfg.get('multiscale_test', False) - flip_test = test_cfg.get('flip_test', False) - shift_heatmap = test_cfg.get('shift_heatmap', False) - align_corners = test_cfg.get('align_corners', False) - restore_heatmap_size = test_cfg.get('restore_heatmap_size', False) - output_heatmaps = test_cfg.get('output_heatmaps', False) - - # enable multi-scale test - if multiscale_test: - # TTA: multi-scale test - assert is_list_of(feats, list if flip_test else tuple) - else: - assert is_list_of(feats, tuple if flip_test else Tensor) - feats = [feats] - - # resize heatmaps to align with with input size - if restore_heatmap_size: - img_shape = batch_data_samples[0].metainfo['img_shape'] - assert all(d.metainfo['img_shape'] == img_shape - for d in batch_data_samples) - img_h, img_w = img_shape - heatmap_size = (img_w, img_h) - else: - heatmap_size = None - - multiscale_heatmaps = [] - multiscale_tags = [] - - for scale_idx, _feats in enumerate(feats): - if not flip_test: - _heatmaps, _tags = self.forward(_feats) - - else: - # TTA: flip test - assert isinstance(_feats, list) and len(_feats) == 2 - flip_indices = batch_data_samples[0].metainfo['flip_indices'] - # original - _feats_orig, _feats_flip = _feats - _heatmaps_orig, _tags_orig = self.forward(_feats_orig) - - # flipped - _heatmaps_flip, _tags_flip = self.forward(_feats_flip) - _heatmaps_flip = flip_heatmaps( - _heatmaps_flip, - flip_mode='heatmap', - flip_indices=flip_indices, - shift_heatmap=shift_heatmap) - _tags_flip = self._flip_tags( - _tags_flip, - flip_indices=flip_indices, - shift_heatmap=shift_heatmap) - - # aggregated heatmaps - _heatmaps = aggregate_heatmaps( - [_heatmaps_orig, _heatmaps_flip], - size=heatmap_size, - align_corners=align_corners, - mode='average') - - # aggregated tags (only at original scale) - if scale_idx == 0: - _tags = aggregate_heatmaps([_tags_orig, _tags_flip], - size=heatmap_size, - align_corners=align_corners, - mode='concat') - else: - _tags = None - - multiscale_heatmaps.append(_heatmaps) - multiscale_tags.append(_tags) - - # aggregate multi-scale heatmaps - if len(feats) > 1: - batch_heatmaps = aggregate_heatmaps( - multiscale_heatmaps, - align_corners=align_corners, - mode='average') - else: - batch_heatmaps = multiscale_heatmaps[0] - # only keep tags at original scale - batch_tags = multiscale_tags[0] - - batch_outputs = tuple([batch_heatmaps, batch_tags]) - preds = self.decode(batch_outputs) - - if output_heatmaps: - pred_fields = [] - for _heatmaps, _tags in zip(batch_heatmaps.detach(), - batch_tags.detach()): - pred_fields.append(PixelData(heatmaps=_heatmaps, tags=_tags)) - - return preds, pred_fields - else: - return preds - - def _flip_tags(self, - tags: Tensor, - flip_indices: List[int], - shift_heatmap: bool = True): - """Flip the tagging heatmaps horizontally for test-time augmentation. - - Args: - tags (Tensor): batched tagging heatmaps to flip - flip_indices (List[int]): The indices of each keypoint's symmetric - keypoint - shift_heatmap (bool): Shift the flipped heatmaps to align with the - original heatmaps and improve accuracy. Defaults to ``True`` - - Returns: - Tensor: flipped tagging heatmaps - """ - B, C, H, W = tags.shape - K = self.num_keypoints - L = self.tag_dim - - tags = tags.flip(-1) - - if self.tag_per_keypoint: - assert C == K * L - tags = tags.view(B, L, K, H, W) - tags = tags[:, :, flip_indices] - tags = tags.view(B, C, H, W) - - if shift_heatmap: - tags[..., 1:] = tags[..., :-1].clone() - - return tags - - def forward(self, feats: Tuple[Tensor]) -> Tuple[Tensor, Tensor]: - """Forward the network. The input is multi scale feature maps and the - output is the heatmaps and tags. - - Args: - feats (Tuple[Tensor]): Multi scale feature maps. - - Returns: - tuple: - - heatmaps (Tensor): output heatmaps - - tags (Tensor): output tags - """ - - output = super().forward(feats) - heatmaps = output[:, :self.num_keypoints] - tags = output[:, self.num_keypoints:] - return heatmaps, tags - - def loss(self, - feats: Tuple[Tensor], - batch_data_samples: OptSampleList, - train_cfg: ConfigType = {}) -> dict: - """Calculate losses from a batch of inputs and data samples. - - Args: - feats (Tuple[Tensor]): The multi-stage features - batch_data_samples (List[:obj:`PoseDataSample`]): The batch - data samples - train_cfg (dict): The runtime config for training process. - Defaults to {} - - Returns: - dict: A dictionary of losses. - """ - pred_heatmaps, pred_tags = self.forward(feats) - - if not self.tag_per_keypoint: - pred_tags = pred_tags.repeat((1, self.num_keypoints, 1, 1)) - - gt_heatmaps = torch.stack( - [d.gt_fields.heatmaps for d in batch_data_samples]) - gt_masks = torch.stack( - [d.gt_fields.heatmap_mask for d in batch_data_samples]) - keypoint_weights = torch.cat([ - d.gt_instance_labels.keypoint_weights for d in batch_data_samples - ]) - keypoint_indices = [ - d.gt_instance_labels.keypoint_indices for d in batch_data_samples - ] - - loss_kpt = self.loss_module.keypoint_loss(pred_heatmaps, gt_heatmaps, - keypoint_weights, gt_masks) - - loss_pull, loss_push = self.loss_module.tag_loss( - pred_tags, keypoint_indices) - - losses = { - 'loss_kpt': loss_kpt, - 'loss_pull': loss_pull, - 'loss_push': loss_push - } - - return losses +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional, Sequence, Tuple, Union + +import torch +from mmengine.structures import PixelData +from mmengine.utils import is_list_of +from torch import Tensor + +from mmpose.models.utils.tta import aggregate_heatmaps, flip_heatmaps +from mmpose.registry import MODELS +from mmpose.utils.typing import (ConfigType, Features, OptConfigType, + OptSampleList, Predictions) +from .heatmap_head import HeatmapHead + +OptIntSeq = Optional[Sequence[int]] + + +@MODELS.register_module() +class AssociativeEmbeddingHead(HeatmapHead): + + def __init__(self, + in_channels: Union[int, Sequence[int]], + num_keypoints: int, + tag_dim: int = 1, + tag_per_keypoint: bool = True, + deconv_out_channels: OptIntSeq = (256, 256, 256), + deconv_kernel_sizes: OptIntSeq = (4, 4, 4), + conv_out_channels: OptIntSeq = None, + conv_kernel_sizes: OptIntSeq = None, + final_layer: dict = dict(kernel_size=1), + keypoint_loss: ConfigType = dict(type='KeypointMSELoss'), + tag_loss: ConfigType = dict(type='AssociativeEmbeddingLoss'), + decoder: OptConfigType = None, + init_cfg: OptConfigType = None): + + if tag_per_keypoint: + out_channels = num_keypoints * (1 + tag_dim) + else: + out_channels = num_keypoints + tag_dim + + loss = dict( + type='CombinedLoss', + losses=dict(keypoint_loss=keypoint_loss, tag_loss=tag_loss)) + + super().__init__( + in_channels=in_channels, + out_channels=out_channels, + deconv_out_channels=deconv_out_channels, + deconv_kernel_sizes=deconv_kernel_sizes, + conv_out_channels=conv_out_channels, + conv_kernel_sizes=conv_kernel_sizes, + final_layer=final_layer, + loss=loss, + decoder=decoder, + init_cfg=init_cfg) + + self.num_keypoints = num_keypoints + self.tag_dim = tag_dim + self.tag_per_keypoint = tag_per_keypoint + + def predict(self, + feats: Features, + batch_data_samples: OptSampleList, + test_cfg: ConfigType = {}) -> Predictions: + """Predict results from features. + + Args: + feats (Features): The features which could be in following forms: + + - Tuple[Tensor]: multi-stage features from the backbone + - List[Tuple[Tensor]]: multiple features for TTA where either + `flip_test` or `multiscale_test` is applied + - List[List[Tuple[Tensor]]]: multiple features for TTA where + both `flip_test` and `multiscale_test` are applied + + batch_data_samples (List[:obj:`PoseDataSample`]): The batch + data samples + test_cfg (dict): The runtime config for testing process. Defaults + to {} + + Returns: + Union[InstanceList | Tuple[InstanceList | PixelDataList]]: If + ``test_cfg['output_heatmap']==True``, return both pose and heatmap + prediction; otherwise only return the pose prediction. + + The pose prediction is a list of ``InstanceData``, each contains + the following fields: + + - keypoints (np.ndarray): predicted keypoint coordinates in + shape (num_instances, K, D) where K is the keypoint number + and D is the keypoint dimension + - keypoint_scores (np.ndarray): predicted keypoint scores in + shape (num_instances, K) + + The heatmap prediction is a list of ``PixelData``, each contains + the following fields: + + - heatmaps (Tensor): The predicted heatmaps in shape (K, h, w) + """ + # test configs + multiscale_test = test_cfg.get('multiscale_test', False) + flip_test = test_cfg.get('flip_test', False) + shift_heatmap = test_cfg.get('shift_heatmap', False) + align_corners = test_cfg.get('align_corners', False) + restore_heatmap_size = test_cfg.get('restore_heatmap_size', False) + output_heatmaps = test_cfg.get('output_heatmaps', False) + + # enable multi-scale test + if multiscale_test: + # TTA: multi-scale test + assert is_list_of(feats, list if flip_test else tuple) + else: + assert is_list_of(feats, tuple if flip_test else Tensor) + feats = [feats] + + # resize heatmaps to align with with input size + if restore_heatmap_size: + img_shape = batch_data_samples[0].metainfo['img_shape'] + assert all(d.metainfo['img_shape'] == img_shape + for d in batch_data_samples) + img_h, img_w = img_shape + heatmap_size = (img_w, img_h) + else: + heatmap_size = None + + multiscale_heatmaps = [] + multiscale_tags = [] + + for scale_idx, _feats in enumerate(feats): + if not flip_test: + _heatmaps, _tags = self.forward(_feats) + + else: + # TTA: flip test + assert isinstance(_feats, list) and len(_feats) == 2 + flip_indices = batch_data_samples[0].metainfo['flip_indices'] + # original + _feats_orig, _feats_flip = _feats + _heatmaps_orig, _tags_orig = self.forward(_feats_orig) + + # flipped + _heatmaps_flip, _tags_flip = self.forward(_feats_flip) + _heatmaps_flip = flip_heatmaps( + _heatmaps_flip, + flip_mode='heatmap', + flip_indices=flip_indices, + shift_heatmap=shift_heatmap) + _tags_flip = self._flip_tags( + _tags_flip, + flip_indices=flip_indices, + shift_heatmap=shift_heatmap) + + # aggregated heatmaps + _heatmaps = aggregate_heatmaps( + [_heatmaps_orig, _heatmaps_flip], + size=heatmap_size, + align_corners=align_corners, + mode='average') + + # aggregated tags (only at original scale) + if scale_idx == 0: + _tags = aggregate_heatmaps([_tags_orig, _tags_flip], + size=heatmap_size, + align_corners=align_corners, + mode='concat') + else: + _tags = None + + multiscale_heatmaps.append(_heatmaps) + multiscale_tags.append(_tags) + + # aggregate multi-scale heatmaps + if len(feats) > 1: + batch_heatmaps = aggregate_heatmaps( + multiscale_heatmaps, + align_corners=align_corners, + mode='average') + else: + batch_heatmaps = multiscale_heatmaps[0] + # only keep tags at original scale + batch_tags = multiscale_tags[0] + + batch_outputs = tuple([batch_heatmaps, batch_tags]) + preds = self.decode(batch_outputs) + + if output_heatmaps: + pred_fields = [] + for _heatmaps, _tags in zip(batch_heatmaps.detach(), + batch_tags.detach()): + pred_fields.append(PixelData(heatmaps=_heatmaps, tags=_tags)) + + return preds, pred_fields + else: + return preds + + def _flip_tags(self, + tags: Tensor, + flip_indices: List[int], + shift_heatmap: bool = True): + """Flip the tagging heatmaps horizontally for test-time augmentation. + + Args: + tags (Tensor): batched tagging heatmaps to flip + flip_indices (List[int]): The indices of each keypoint's symmetric + keypoint + shift_heatmap (bool): Shift the flipped heatmaps to align with the + original heatmaps and improve accuracy. Defaults to ``True`` + + Returns: + Tensor: flipped tagging heatmaps + """ + B, C, H, W = tags.shape + K = self.num_keypoints + L = self.tag_dim + + tags = tags.flip(-1) + + if self.tag_per_keypoint: + assert C == K * L + tags = tags.view(B, L, K, H, W) + tags = tags[:, :, flip_indices] + tags = tags.view(B, C, H, W) + + if shift_heatmap: + tags[..., 1:] = tags[..., :-1].clone() + + return tags + + def forward(self, feats: Tuple[Tensor]) -> Tuple[Tensor, Tensor]: + """Forward the network. The input is multi scale feature maps and the + output is the heatmaps and tags. + + Args: + feats (Tuple[Tensor]): Multi scale feature maps. + + Returns: + tuple: + - heatmaps (Tensor): output heatmaps + - tags (Tensor): output tags + """ + + output = super().forward(feats) + heatmaps = output[:, :self.num_keypoints] + tags = output[:, self.num_keypoints:] + return heatmaps, tags + + def loss(self, + feats: Tuple[Tensor], + batch_data_samples: OptSampleList, + train_cfg: ConfigType = {}) -> dict: + """Calculate losses from a batch of inputs and data samples. + + Args: + feats (Tuple[Tensor]): The multi-stage features + batch_data_samples (List[:obj:`PoseDataSample`]): The batch + data samples + train_cfg (dict): The runtime config for training process. + Defaults to {} + + Returns: + dict: A dictionary of losses. + """ + pred_heatmaps, pred_tags = self.forward(feats) + + if not self.tag_per_keypoint: + pred_tags = pred_tags.repeat((1, self.num_keypoints, 1, 1)) + + gt_heatmaps = torch.stack( + [d.gt_fields.heatmaps for d in batch_data_samples]) + gt_masks = torch.stack( + [d.gt_fields.heatmap_mask for d in batch_data_samples]) + keypoint_weights = torch.cat([ + d.gt_instance_labels.keypoint_weights for d in batch_data_samples + ]) + keypoint_indices = [ + d.gt_instance_labels.keypoint_indices for d in batch_data_samples + ] + + loss_kpt = self.loss_module.keypoint_loss(pred_heatmaps, gt_heatmaps, + keypoint_weights, gt_masks) + + loss_pull, loss_push = self.loss_module.tag_loss( + pred_tags, keypoint_indices) + + losses = { + 'loss_kpt': loss_kpt, + 'loss_pull': loss_pull, + 'loss_push': loss_push + } + + return losses diff --git a/mmpose/models/heads/heatmap_heads/cid_head.py b/mmpose/models/heads/heatmap_heads/cid_head.py index 39e0211a3e..42f6c50dcc 100644 --- a/mmpose/models/heads/heatmap_heads/cid_head.py +++ b/mmpose/models/heads/heatmap_heads/cid_head.py @@ -1,743 +1,743 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import math -from typing import Dict, Optional, Sequence, Tuple, Union - -import numpy as np -import torch -import torch.nn as nn -from mmcv.cnn import build_conv_layer -from mmengine.model import BaseModule, ModuleDict -from mmengine.structures import InstanceData, PixelData -from torch import Tensor - -from mmpose.models.utils.tta import flip_heatmaps -from mmpose.registry import KEYPOINT_CODECS, MODELS -from mmpose.utils.typing import (ConfigType, Features, OptConfigType, - OptSampleList, Predictions) -from ..base_head import BaseHead - - -def smooth_heatmaps(heatmaps: Tensor, blur_kernel_size: int) -> Tensor: - """Smooth the heatmaps by blurring and averaging. - - Args: - heatmaps (Tensor): The heatmaps to smooth. - blur_kernel_size (int): The kernel size for blurring the heatmaps. - - Returns: - Tensor: The smoothed heatmaps. - """ - smoothed_heatmaps = torch.nn.functional.avg_pool2d( - heatmaps, blur_kernel_size, 1, (blur_kernel_size - 1) // 2) - smoothed_heatmaps = (heatmaps + smoothed_heatmaps) / 2.0 - return smoothed_heatmaps - - -class TruncSigmoid(nn.Sigmoid): - """A sigmoid activation function that truncates the output to the given - range. - - Args: - min (float, optional): The minimum value to clamp the output to. - Defaults to 0.0 - max (float, optional): The maximum value to clamp the output to. - Defaults to 1.0 - """ - - def __init__(self, min: float = 0.0, max: float = 1.0): - super(TruncSigmoid, self).__init__() - self.min = min - self.max = max - - def forward(self, input: Tensor) -> Tensor: - """Computes the truncated sigmoid activation of the input tensor.""" - output = torch.sigmoid(input) - output = output.clamp(min=self.min, max=self.max) - return output - - -class IIAModule(BaseModule): - """Instance Information Abstraction module introduced in `CID`. This module - extracts the feature representation vectors for each instance. - - Args: - in_channels (int): Number of channels in the input feature tensor - out_channels (int): Number of channels of the output heatmaps - clamp_delta (float, optional): A small value that prevents the sigmoid - activation from becoming saturated. Defaults to 1e-4. - init_cfg (Config, optional): Config to control the initialization. See - :attr:`default_init_cfg` for default settings - """ - - def __init__( - self, - in_channels: int, - out_channels: int, - clamp_delta: float = 1e-4, - init_cfg: OptConfigType = None, - ): - super().__init__(init_cfg=init_cfg) - - self.keypoint_root_conv = build_conv_layer( - dict( - type='Conv2d', - in_channels=in_channels, - out_channels=out_channels, - kernel_size=1)) - self.sigmoid = TruncSigmoid(min=clamp_delta, max=1 - clamp_delta) - - def forward(self, feats: Tensor): - heatmaps = self.keypoint_root_conv(feats) - heatmaps = self.sigmoid(heatmaps) - return heatmaps - - def _sample_feats(self, feats: Tensor, indices: Tensor) -> Tensor: - """Extract feature vectors at the specified indices from the input - feature map. - - Args: - feats (Tensor): Input feature map. - indices (Tensor): Indices of the feature vectors to extract. - - Returns: - Tensor: Extracted feature vectors. - """ - assert indices.dtype == torch.long - if indices.shape[1] == 3: - b, w, h = [ind.squeeze(-1) for ind in indices.split(1, -1)] - instance_feats = feats[b, :, h, w] - elif indices.shape[1] == 2: - w, h = [ind.squeeze(-1) for ind in indices.split(1, -1)] - instance_feats = feats[:, :, h, w] - instance_feats = instance_feats.permute(0, 2, 1) - instance_feats = instance_feats.reshape(-1, - instance_feats.shape[-1]) - - else: - raise ValueError(f'`indices` should have 2 or 3 channels, ' - f'but got f{indices.shape[1]}') - return instance_feats - - def _hierarchical_pool(self, heatmaps: Tensor) -> Tensor: - """Conduct max pooling on the input heatmaps with different kernel size - according to the input size. - - Args: - heatmaps (Tensor): Input heatmaps. - - Returns: - Tensor: Result of hierarchical pooling. - """ - map_size = (heatmaps.shape[-1] + heatmaps.shape[-2]) / 2.0 - if map_size > 300: - maxm = torch.nn.functional.max_pool2d(heatmaps, 7, 1, 3) - elif map_size > 200: - maxm = torch.nn.functional.max_pool2d(heatmaps, 5, 1, 2) - else: - maxm = torch.nn.functional.max_pool2d(heatmaps, 3, 1, 1) - return maxm - - def forward_train(self, feats: Tensor, instance_coords: Tensor, - instance_imgids: Tensor) -> Tuple[Tensor, Tensor]: - """Forward pass during training. - - Args: - feats (Tensor): Input feature tensor. - instance_coords (Tensor): Coordinates of the instance roots. - instance_imgids (Tensor): Sample indices of each instances - in the batch. - - Returns: - Tuple[Tensor, Tensor]: Extracted feature vectors and heatmaps - for the instances. - """ - heatmaps = self.forward(feats) - indices = torch.cat((instance_imgids[:, None], instance_coords), dim=1) - instance_feats = self._sample_feats(feats, indices) - - return instance_feats, heatmaps - - def forward_test( - self, feats: Tensor, test_cfg: Dict - ) -> Tuple[Optional[Tensor], Optional[Tensor], Optional[Tensor]]: - """Forward pass during testing. - - Args: - feats (Tensor): Input feature tensor. - test_cfg (Dict): Testing configuration, including: - - blur_kernel_size (int, optional): Kernel size for blurring - the heatmaps. Defaults to 3. - - max_instances (int, optional): Maximum number of instances - to extract. Defaults to 30. - - score_threshold (float, optional): Minimum score for - extracting an instance. Defaults to 0.01. - - flip_test (bool, optional): Whether to compute the average - of the heatmaps across the batch dimension. - Defaults to False. - - Returns: - A tuple of Tensor including extracted feature vectors, - coordinates, and scores of the instances. Any of these can be - empty Tensor if no instances are extracted. - """ - blur_kernel_size = test_cfg.get('blur_kernel_size', 3) - max_instances = test_cfg.get('max_instances', 30) - score_threshold = test_cfg.get('score_threshold', 0.01) - H, W = feats.shape[-2:] - - # compute heatmaps - heatmaps = self.forward(feats).narrow(1, -1, 1) - if test_cfg.get('flip_test', False): - heatmaps = heatmaps.mean(dim=0, keepdims=True) - smoothed_heatmaps = smooth_heatmaps(heatmaps, blur_kernel_size) - - # decode heatmaps - maximums = self._hierarchical_pool(smoothed_heatmaps) - maximums = torch.eq(maximums, smoothed_heatmaps).float() - maximums = (smoothed_heatmaps * maximums).reshape(-1) - scores, pos_ind = maximums.topk(max_instances, dim=0) - select_ind = (scores > (score_threshold)).nonzero().squeeze(1) - scores, pos_ind = scores[select_ind], pos_ind[select_ind] - - # sample feature vectors from feature map - instance_coords = torch.stack((pos_ind % W, pos_ind // W), dim=1) - instance_feats = self._sample_feats(feats, instance_coords) - - return instance_feats, instance_coords, scores - - -class ChannelAttention(nn.Module): - """Channel-wise attention module introduced in `CID`. - - Args: - in_channels (int): The number of channels of the input instance - vectors. - out_channels (int): The number of channels of the transformed instance - vectors. - """ - - def __init__(self, in_channels: int, out_channels: int): - super(ChannelAttention, self).__init__() - self.atn = nn.Linear(in_channels, out_channels) - - def forward(self, global_feats: Tensor, instance_feats: Tensor) -> Tensor: - """Applies attention to the channel dimension of the input tensor.""" - - instance_feats = self.atn(instance_feats).unsqueeze(2).unsqueeze(3) - return global_feats * instance_feats - - -class SpatialAttention(nn.Module): - """Spatial-wise attention module introduced in `CID`. - - Args: - in_channels (int): The number of channels of the input instance - vectors. - out_channels (int): The number of channels of the transformed instance - vectors. - """ - - def __init__(self, in_channels, out_channels): - super(SpatialAttention, self).__init__() - self.atn = nn.Linear(in_channels, out_channels) - self.feat_stride = 4 - self.conv = nn.Conv2d(3, 1, 5, 1, 2) - - def _get_pixel_coords(self, heatmap_size: Tuple, device: str = 'cpu'): - """Get pixel coordinates for each element in the heatmap. - - Args: - heatmap_size (tuple): Size of the heatmap in (W, H) format. - device (str): Device to put the resulting tensor on. - - Returns: - Tensor of shape (batch_size, num_pixels, 2) containing the pixel - coordinates for each element in the heatmap. - """ - w, h = heatmap_size - y, x = torch.meshgrid(torch.arange(h), torch.arange(w)) - pixel_coords = torch.stack((x, y), dim=-1).reshape(-1, 2) - pixel_coords = pixel_coords.float().to(device) + 0.5 - return pixel_coords - - def forward(self, global_feats: Tensor, instance_feats: Tensor, - instance_coords: Tensor) -> Tensor: - """Perform spatial attention. - - Args: - global_feats (Tensor): Tensor containing the global features. - instance_feats (Tensor): Tensor containing the instance feature - vectors. - instance_coords (Tensor): Tensor containing the root coordinates - of the instances. - - Returns: - Tensor containing the modulated global features. - """ - B, C, H, W = global_feats.size() - - instance_feats = self.atn(instance_feats).reshape(B, C, 1, 1) - feats = global_feats * instance_feats.expand_as(global_feats) - fsum = torch.sum(feats, dim=1, keepdim=True) - - pixel_coords = self._get_pixel_coords((W, H), feats.device) - relative_coords = instance_coords.reshape( - -1, 1, 2) - pixel_coords.reshape(1, -1, 2) - relative_coords = relative_coords.permute(0, 2, 1) / 32.0 - relative_coords = relative_coords.reshape(B, 2, H, W) - - input_feats = torch.cat((fsum, relative_coords), dim=1) - mask = self.conv(input_feats).sigmoid() - return global_feats * mask - - -class GFDModule(BaseModule): - """Global Feature Decoupling module introduced in `CID`. This module - extracts the decoupled heatmaps for each instance. - - Args: - in_channels (int): Number of channels in the input feature map - out_channels (int): Number of channels of the output heatmaps - for each instance - gfd_channels (int): Number of channels in the transformed feature map - clamp_delta (float, optional): A small value that prevents the sigmoid - activation from becoming saturated. Defaults to 1e-4. - init_cfg (Config, optional): Config to control the initialization. See - :attr:`default_init_cfg` for default settings - """ - - def __init__( - self, - in_channels: int, - out_channels: int, - gfd_channels: int, - clamp_delta: float = 1e-4, - init_cfg: OptConfigType = None, - ): - super().__init__(init_cfg=init_cfg) - - self.conv_down = build_conv_layer( - dict( - type='Conv2d', - in_channels=in_channels, - out_channels=gfd_channels, - kernel_size=1)) - - self.channel_attention = ChannelAttention(in_channels, gfd_channels) - self.spatial_attention = SpatialAttention(in_channels, gfd_channels) - self.fuse_attention = build_conv_layer( - dict( - type='Conv2d', - in_channels=gfd_channels * 2, - out_channels=gfd_channels, - kernel_size=1)) - self.heatmap_conv = build_conv_layer( - dict( - type='Conv2d', - in_channels=gfd_channels, - out_channels=out_channels, - kernel_size=1)) - self.sigmoid = TruncSigmoid(min=clamp_delta, max=1 - clamp_delta) - - def forward( - self, - feats: Tensor, - instance_feats: Tensor, - instance_coords: Tensor, - instance_imgids: Tensor, - ) -> Tensor: - """Extract decoupled heatmaps for each instance. - - Args: - feats (Tensor): Input feature maps. - instance_feats (Tensor): Tensor containing the instance feature - vectors. - instance_coords (Tensor): Tensor containing the root coordinates - of the instances. - instance_imgids (Tensor): Sample indices of each instances - in the batch. - - Returns: - A tensor containing decoupled heatmaps. - """ - - global_feats = self.conv_down(feats) - global_feats = global_feats[instance_imgids] - cond_instance_feats = torch.cat( - (self.channel_attention(global_feats, instance_feats), - self.spatial_attention(global_feats, instance_feats, - instance_coords)), - dim=1) - - cond_instance_feats = self.fuse_attention(cond_instance_feats) - cond_instance_feats = torch.nn.functional.relu(cond_instance_feats) - cond_instance_feats = self.heatmap_conv(cond_instance_feats) - heatmaps = self.sigmoid(cond_instance_feats) - - return heatmaps - - -@MODELS.register_module() -class CIDHead(BaseHead): - """Contextual Instance Decoupling head introduced in `Contextual Instance - Decoupling for Robust Multi-Person Pose Estimation (CID)`_ by Wang et al - (2022). The head is composed of an Instance Information Abstraction (IIA) - module and a Global Feature Decoupling (GFD) module. - - Args: - in_channels (int | Sequence[int]): Number of channels in the input - feature map - num_keypoints (int): Number of keypoints - gfd_channels (int): Number of filters in GFD module - max_train_instances (int): Maximum number of instances in a batch - during training. Defaults to 200 - heatmap_loss (Config): Config of the heatmap loss. Defaults to use - :class:`KeypointMSELoss` - coupled_heatmap_loss (Config): Config of the loss for coupled heatmaps. - Defaults to use :class:`SoftWeightSmoothL1Loss` - decoupled_heatmap_loss (Config): Config of the loss for decoupled - heatmaps. Defaults to use :class:`SoftWeightSmoothL1Loss` - contrastive_loss (Config): Config of the contrastive loss for - representation vectors of instances. Defaults to use - :class:`InfoNCELoss` - decoder (Config, optional): The decoder config that controls decoding - keypoint coordinates from the network output. Defaults to ``None`` - init_cfg (Config, optional): Config to control the initialization. See - :attr:`default_init_cfg` for default settings - - .. _`CID`: https://openaccess.thecvf.com/content/CVPR2022/html/Wang_ - Contextual_Instance_Decoupling_for_Robust_Multi-Person_Pose_Estimation_ - CVPR_2022_paper.html - """ - _version = 2 - - def __init__(self, - in_channels: Union[int, Sequence[int]], - gfd_channels: int, - num_keypoints: int, - prior_prob: float = 0.01, - coupled_heatmap_loss: OptConfigType = dict( - type='FocalHeatmapLoss'), - decoupled_heatmap_loss: OptConfigType = dict( - type='FocalHeatmapLoss'), - contrastive_loss: OptConfigType = dict(type='InfoNCELoss'), - decoder: OptConfigType = None, - init_cfg: OptConfigType = None): - - if init_cfg is None: - init_cfg = self.default_init_cfg - - super().__init__(init_cfg) - - self.in_channels = in_channels - self.num_keypoints = num_keypoints - if decoder is not None: - self.decoder = KEYPOINT_CODECS.build(decoder) - else: - self.decoder = None - - # build sub-modules - bias_value = -math.log((1 - prior_prob) / prior_prob) - self.iia_module = IIAModule( - in_channels, - num_keypoints + 1, - init_cfg=init_cfg + [ - dict( - type='Normal', - layer=['Conv2d', 'Linear'], - std=0.001, - override=dict( - name='keypoint_root_conv', - type='Normal', - std=0.001, - bias=bias_value)) - ]) - self.gfd_module = GFDModule( - in_channels, - num_keypoints, - gfd_channels, - init_cfg=init_cfg + [ - dict( - type='Normal', - layer=['Conv2d', 'Linear'], - std=0.001, - override=dict( - name='heatmap_conv', - type='Normal', - std=0.001, - bias=bias_value)) - ]) - - # build losses - self.loss_module = ModuleDict( - dict( - heatmap_coupled=MODELS.build(coupled_heatmap_loss), - heatmap_decoupled=MODELS.build(decoupled_heatmap_loss), - contrastive=MODELS.build(contrastive_loss), - )) - - # Register the hook to automatically convert old version state dicts - self._register_load_state_dict_pre_hook(self._load_state_dict_pre_hook) - - @property - def default_init_cfg(self): - init_cfg = [ - dict(type='Normal', layer=['Conv2d', 'Linear'], std=0.001), - dict(type='Constant', layer='BatchNorm2d', val=1) - ] - return init_cfg - - def forward(self, feats: Tuple[Tensor]) -> Tensor: - """Forward the network. The input is multi scale feature maps and the - output is the heatmap. - - Args: - feats (Tuple[Tensor]): Multi scale feature maps. - - Returns: - Tensor: output heatmap. - """ - feats = feats[-1] - instance_info = self.iia_module.forward_test(feats, {}) - instance_feats, instance_coords, instance_scores = instance_info - instance_imgids = torch.zeros( - instance_coords.size(0), dtype=torch.long, device=feats.device) - instance_heatmaps = self.gfd_module(feats, instance_feats, - instance_coords, instance_imgids) - - return instance_heatmaps - - def predict(self, - feats: Features, - batch_data_samples: OptSampleList, - test_cfg: ConfigType = {}) -> Predictions: - """Predict results from features. - - Args: - feats (Tuple[Tensor] | List[Tuple[Tensor]]): The multi-stage - features (or multiple multi-stage features in TTA) - batch_data_samples (List[:obj:`PoseDataSample`]): The batch - data samples - test_cfg (dict): The runtime config for testing process. Defaults - to {} - - Returns: - Union[InstanceList | Tuple[InstanceList | PixelDataList]]: If - ``test_cfg['output_heatmap']==True``, return both pose and heatmap - prediction; otherwise only return the pose prediction. - - The pose prediction is a list of ``InstanceData``, each contains - the following fields: - - - keypoints (np.ndarray): predicted keypoint coordinates in - shape (num_instances, K, D) where K is the keypoint number - and D is the keypoint dimension - - keypoint_scores (np.ndarray): predicted keypoint scores in - shape (num_instances, K) - - The heatmap prediction is a list of ``PixelData``, each contains - the following fields: - - - heatmaps (Tensor): The predicted heatmaps in shape (K, h, w) - """ - metainfo = batch_data_samples[0].metainfo - - if test_cfg.get('flip_test', False): - assert isinstance(feats, list) and len(feats) == 2 - - feats_flipped = flip_heatmaps(feats[1][-1], shift_heatmap=False) - feats = torch.cat((feats[0][-1], feats_flipped)) - else: - feats = feats[-1] - - instance_info = self.iia_module.forward_test(feats, test_cfg) - instance_feats, instance_coords, instance_scores = instance_info - if len(instance_coords) > 0: - instance_imgids = torch.zeros( - instance_coords.size(0), dtype=torch.long, device=feats.device) - if test_cfg.get('flip_test', False): - instance_coords = torch.cat((instance_coords, instance_coords)) - instance_imgids = torch.cat( - (instance_imgids, instance_imgids + 1)) - instance_heatmaps = self.gfd_module(feats, instance_feats, - instance_coords, - instance_imgids) - if test_cfg.get('flip_test', False): - flip_indices = batch_data_samples[0].metainfo['flip_indices'] - instance_heatmaps, instance_heatmaps_flip = torch.chunk( - instance_heatmaps, 2, dim=0) - instance_heatmaps_flip = \ - instance_heatmaps_flip[:, flip_indices, :, :] - instance_heatmaps = (instance_heatmaps + - instance_heatmaps_flip) / 2.0 - instance_heatmaps = smooth_heatmaps( - instance_heatmaps, test_cfg.get('blur_kernel_size', 3)) - - preds = self.decode((instance_heatmaps, instance_scores[:, None])) - preds = InstanceData.cat(preds) - preds.keypoints[..., 0] += metainfo['input_size'][ - 0] / instance_heatmaps.shape[-1] / 2.0 - preds.keypoints[..., 1] += metainfo['input_size'][ - 1] / instance_heatmaps.shape[-2] / 2.0 - preds = [preds] - - else: - preds = [ - InstanceData( - keypoints=np.empty((0, self.num_keypoints, 2)), - keypoint_scores=np.empty((0, self.num_keypoints))) - ] - instance_heatmaps = torch.empty(0, self.num_keypoints, - *feats.shape[-2:]) - - if test_cfg.get('output_heatmaps', False): - pred_fields = [ - PixelData( - heatmaps=instance_heatmaps.reshape( - -1, *instance_heatmaps.shape[-2:])) - ] - return preds, pred_fields - else: - return preds - - def loss(self, - feats: Tuple[Tensor], - batch_data_samples: OptSampleList, - train_cfg: ConfigType = {}) -> dict: - """Calculate losses from a batch of inputs and data samples. - - Args: - feats (Tuple[Tensor]): The multi-stage features - batch_data_samples (List[:obj:`PoseDataSample`]): The batch - data samples - train_cfg (dict): The runtime config for training process. - Defaults to {} - - Returns: - dict: A dictionary of losses. - """ - - # load targets - gt_heatmaps, gt_instance_coords, keypoint_weights = [], [], [] - heatmap_mask = [] - instance_imgids, gt_instance_heatmaps = [], [] - for i, d in enumerate(batch_data_samples): - gt_heatmaps.append(d.gt_fields.heatmaps) - gt_instance_coords.append(d.gt_instance_labels.instance_coords) - keypoint_weights.append(d.gt_instance_labels.keypoint_weights) - instance_imgids.append( - torch.ones( - len(d.gt_instance_labels.instance_coords), - dtype=torch.long) * i) - - instance_heatmaps = d.gt_fields.instance_heatmaps.reshape( - -1, self.num_keypoints, - *d.gt_fields.instance_heatmaps.shape[1:]) - gt_instance_heatmaps.append(instance_heatmaps) - - if 'heatmap_mask' in d.gt_fields: - heatmap_mask.append(d.gt_fields.heatmap_mask) - - gt_heatmaps = torch.stack(gt_heatmaps) - heatmap_mask = torch.stack(heatmap_mask) if heatmap_mask else None - - gt_instance_coords = torch.cat(gt_instance_coords, dim=0) - gt_instance_heatmaps = torch.cat(gt_instance_heatmaps, dim=0) - keypoint_weights = torch.cat(keypoint_weights, dim=0) - instance_imgids = torch.cat(instance_imgids).to(gt_heatmaps.device) - - # feed-forward - feats = feats[-1] - pred_instance_feats, pred_heatmaps = self.iia_module.forward_train( - feats, gt_instance_coords, instance_imgids) - - # conpute contrastive loss - contrastive_loss = 0 - for i in range(len(batch_data_samples)): - pred_instance_feat = pred_instance_feats[instance_imgids == i] - contrastive_loss += self.loss_module['contrastive']( - pred_instance_feat) - contrastive_loss = contrastive_loss / max(1, len(instance_imgids)) - - # limit the number of instances - max_train_instances = train_cfg.get('max_train_instances', -1) - if (max_train_instances > 0 - and len(instance_imgids) > max_train_instances): - selected_indices = torch.randperm( - len(instance_imgids), - device=gt_heatmaps.device, - dtype=torch.long)[:max_train_instances] - gt_instance_coords = gt_instance_coords[selected_indices] - keypoint_weights = keypoint_weights[selected_indices] - gt_instance_heatmaps = gt_instance_heatmaps[selected_indices] - instance_imgids = instance_imgids[selected_indices] - pred_instance_feats = pred_instance_feats[selected_indices] - - # calculate the decoupled heatmaps for each instance - pred_instance_heatmaps = self.gfd_module(feats, pred_instance_feats, - gt_instance_coords, - instance_imgids) - - # calculate losses - losses = { - 'loss/heatmap_coupled': - self.loss_module['heatmap_coupled'](pred_heatmaps, gt_heatmaps, - None, heatmap_mask) - } - if len(instance_imgids) > 0: - losses.update({ - 'loss/heatmap_decoupled': - self.loss_module['heatmap_decoupled'](pred_instance_heatmaps, - gt_instance_heatmaps, - keypoint_weights), - 'loss/contrastive': - contrastive_loss - }) - - return losses - - def _load_state_dict_pre_hook(self, state_dict, prefix, local_meta, *args, - **kwargs): - """A hook function to convert old-version state dict of - :class:`CIDHead` (before MMPose v1.0.0) to a compatible format - of :class:`CIDHead`. - - The hook will be automatically registered during initialization. - """ - version = local_meta.get('version', None) - if version and version >= self._version: - return - - # convert old-version state dict - keys = list(state_dict.keys()) - for k in keys: - if 'keypoint_center_conv' in k: - v = state_dict.pop(k) - k = k.replace('keypoint_center_conv', - 'iia_module.keypoint_root_conv') - state_dict[k] = v - - if 'conv_down' in k: - v = state_dict.pop(k) - k = k.replace('conv_down', 'gfd_module.conv_down') - state_dict[k] = v - - if 'c_attn' in k: - v = state_dict.pop(k) - k = k.replace('c_attn', 'gfd_module.channel_attention') - state_dict[k] = v - - if 's_attn' in k: - v = state_dict.pop(k) - k = k.replace('s_attn', 'gfd_module.spatial_attention') - state_dict[k] = v - - if 'fuse_attn' in k: - v = state_dict.pop(k) - k = k.replace('fuse_attn', 'gfd_module.fuse_attention') - state_dict[k] = v - - if 'heatmap_conv' in k: - v = state_dict.pop(k) - k = k.replace('heatmap_conv', 'gfd_module.heatmap_conv') - state_dict[k] = v +# Copyright (c) OpenMMLab. All rights reserved. +import math +from typing import Dict, Optional, Sequence, Tuple, Union + +import numpy as np +import torch +import torch.nn as nn +from mmcv.cnn import build_conv_layer +from mmengine.model import BaseModule, ModuleDict +from mmengine.structures import InstanceData, PixelData +from torch import Tensor + +from mmpose.models.utils.tta import flip_heatmaps +from mmpose.registry import KEYPOINT_CODECS, MODELS +from mmpose.utils.typing import (ConfigType, Features, OptConfigType, + OptSampleList, Predictions) +from ..base_head import BaseHead + + +def smooth_heatmaps(heatmaps: Tensor, blur_kernel_size: int) -> Tensor: + """Smooth the heatmaps by blurring and averaging. + + Args: + heatmaps (Tensor): The heatmaps to smooth. + blur_kernel_size (int): The kernel size for blurring the heatmaps. + + Returns: + Tensor: The smoothed heatmaps. + """ + smoothed_heatmaps = torch.nn.functional.avg_pool2d( + heatmaps, blur_kernel_size, 1, (blur_kernel_size - 1) // 2) + smoothed_heatmaps = (heatmaps + smoothed_heatmaps) / 2.0 + return smoothed_heatmaps + + +class TruncSigmoid(nn.Sigmoid): + """A sigmoid activation function that truncates the output to the given + range. + + Args: + min (float, optional): The minimum value to clamp the output to. + Defaults to 0.0 + max (float, optional): The maximum value to clamp the output to. + Defaults to 1.0 + """ + + def __init__(self, min: float = 0.0, max: float = 1.0): + super(TruncSigmoid, self).__init__() + self.min = min + self.max = max + + def forward(self, input: Tensor) -> Tensor: + """Computes the truncated sigmoid activation of the input tensor.""" + output = torch.sigmoid(input) + output = output.clamp(min=self.min, max=self.max) + return output + + +class IIAModule(BaseModule): + """Instance Information Abstraction module introduced in `CID`. This module + extracts the feature representation vectors for each instance. + + Args: + in_channels (int): Number of channels in the input feature tensor + out_channels (int): Number of channels of the output heatmaps + clamp_delta (float, optional): A small value that prevents the sigmoid + activation from becoming saturated. Defaults to 1e-4. + init_cfg (Config, optional): Config to control the initialization. See + :attr:`default_init_cfg` for default settings + """ + + def __init__( + self, + in_channels: int, + out_channels: int, + clamp_delta: float = 1e-4, + init_cfg: OptConfigType = None, + ): + super().__init__(init_cfg=init_cfg) + + self.keypoint_root_conv = build_conv_layer( + dict( + type='Conv2d', + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1)) + self.sigmoid = TruncSigmoid(min=clamp_delta, max=1 - clamp_delta) + + def forward(self, feats: Tensor): + heatmaps = self.keypoint_root_conv(feats) + heatmaps = self.sigmoid(heatmaps) + return heatmaps + + def _sample_feats(self, feats: Tensor, indices: Tensor) -> Tensor: + """Extract feature vectors at the specified indices from the input + feature map. + + Args: + feats (Tensor): Input feature map. + indices (Tensor): Indices of the feature vectors to extract. + + Returns: + Tensor: Extracted feature vectors. + """ + assert indices.dtype == torch.long + if indices.shape[1] == 3: + b, w, h = [ind.squeeze(-1) for ind in indices.split(1, -1)] + instance_feats = feats[b, :, h, w] + elif indices.shape[1] == 2: + w, h = [ind.squeeze(-1) for ind in indices.split(1, -1)] + instance_feats = feats[:, :, h, w] + instance_feats = instance_feats.permute(0, 2, 1) + instance_feats = instance_feats.reshape(-1, + instance_feats.shape[-1]) + + else: + raise ValueError(f'`indices` should have 2 or 3 channels, ' + f'but got f{indices.shape[1]}') + return instance_feats + + def _hierarchical_pool(self, heatmaps: Tensor) -> Tensor: + """Conduct max pooling on the input heatmaps with different kernel size + according to the input size. + + Args: + heatmaps (Tensor): Input heatmaps. + + Returns: + Tensor: Result of hierarchical pooling. + """ + map_size = (heatmaps.shape[-1] + heatmaps.shape[-2]) / 2.0 + if map_size > 300: + maxm = torch.nn.functional.max_pool2d(heatmaps, 7, 1, 3) + elif map_size > 200: + maxm = torch.nn.functional.max_pool2d(heatmaps, 5, 1, 2) + else: + maxm = torch.nn.functional.max_pool2d(heatmaps, 3, 1, 1) + return maxm + + def forward_train(self, feats: Tensor, instance_coords: Tensor, + instance_imgids: Tensor) -> Tuple[Tensor, Tensor]: + """Forward pass during training. + + Args: + feats (Tensor): Input feature tensor. + instance_coords (Tensor): Coordinates of the instance roots. + instance_imgids (Tensor): Sample indices of each instances + in the batch. + + Returns: + Tuple[Tensor, Tensor]: Extracted feature vectors and heatmaps + for the instances. + """ + heatmaps = self.forward(feats) + indices = torch.cat((instance_imgids[:, None], instance_coords), dim=1) + instance_feats = self._sample_feats(feats, indices) + + return instance_feats, heatmaps + + def forward_test( + self, feats: Tensor, test_cfg: Dict + ) -> Tuple[Optional[Tensor], Optional[Tensor], Optional[Tensor]]: + """Forward pass during testing. + + Args: + feats (Tensor): Input feature tensor. + test_cfg (Dict): Testing configuration, including: + - blur_kernel_size (int, optional): Kernel size for blurring + the heatmaps. Defaults to 3. + - max_instances (int, optional): Maximum number of instances + to extract. Defaults to 30. + - score_threshold (float, optional): Minimum score for + extracting an instance. Defaults to 0.01. + - flip_test (bool, optional): Whether to compute the average + of the heatmaps across the batch dimension. + Defaults to False. + + Returns: + A tuple of Tensor including extracted feature vectors, + coordinates, and scores of the instances. Any of these can be + empty Tensor if no instances are extracted. + """ + blur_kernel_size = test_cfg.get('blur_kernel_size', 3) + max_instances = test_cfg.get('max_instances', 30) + score_threshold = test_cfg.get('score_threshold', 0.01) + H, W = feats.shape[-2:] + + # compute heatmaps + heatmaps = self.forward(feats).narrow(1, -1, 1) + if test_cfg.get('flip_test', False): + heatmaps = heatmaps.mean(dim=0, keepdims=True) + smoothed_heatmaps = smooth_heatmaps(heatmaps, blur_kernel_size) + + # decode heatmaps + maximums = self._hierarchical_pool(smoothed_heatmaps) + maximums = torch.eq(maximums, smoothed_heatmaps).float() + maximums = (smoothed_heatmaps * maximums).reshape(-1) + scores, pos_ind = maximums.topk(max_instances, dim=0) + select_ind = (scores > (score_threshold)).nonzero().squeeze(1) + scores, pos_ind = scores[select_ind], pos_ind[select_ind] + + # sample feature vectors from feature map + instance_coords = torch.stack((pos_ind % W, pos_ind // W), dim=1) + instance_feats = self._sample_feats(feats, instance_coords) + + return instance_feats, instance_coords, scores + + +class ChannelAttention(nn.Module): + """Channel-wise attention module introduced in `CID`. + + Args: + in_channels (int): The number of channels of the input instance + vectors. + out_channels (int): The number of channels of the transformed instance + vectors. + """ + + def __init__(self, in_channels: int, out_channels: int): + super(ChannelAttention, self).__init__() + self.atn = nn.Linear(in_channels, out_channels) + + def forward(self, global_feats: Tensor, instance_feats: Tensor) -> Tensor: + """Applies attention to the channel dimension of the input tensor.""" + + instance_feats = self.atn(instance_feats).unsqueeze(2).unsqueeze(3) + return global_feats * instance_feats + + +class SpatialAttention(nn.Module): + """Spatial-wise attention module introduced in `CID`. + + Args: + in_channels (int): The number of channels of the input instance + vectors. + out_channels (int): The number of channels of the transformed instance + vectors. + """ + + def __init__(self, in_channels, out_channels): + super(SpatialAttention, self).__init__() + self.atn = nn.Linear(in_channels, out_channels) + self.feat_stride = 4 + self.conv = nn.Conv2d(3, 1, 5, 1, 2) + + def _get_pixel_coords(self, heatmap_size: Tuple, device: str = 'cpu'): + """Get pixel coordinates for each element in the heatmap. + + Args: + heatmap_size (tuple): Size of the heatmap in (W, H) format. + device (str): Device to put the resulting tensor on. + + Returns: + Tensor of shape (batch_size, num_pixels, 2) containing the pixel + coordinates for each element in the heatmap. + """ + w, h = heatmap_size + y, x = torch.meshgrid(torch.arange(h), torch.arange(w)) + pixel_coords = torch.stack((x, y), dim=-1).reshape(-1, 2) + pixel_coords = pixel_coords.float().to(device) + 0.5 + return pixel_coords + + def forward(self, global_feats: Tensor, instance_feats: Tensor, + instance_coords: Tensor) -> Tensor: + """Perform spatial attention. + + Args: + global_feats (Tensor): Tensor containing the global features. + instance_feats (Tensor): Tensor containing the instance feature + vectors. + instance_coords (Tensor): Tensor containing the root coordinates + of the instances. + + Returns: + Tensor containing the modulated global features. + """ + B, C, H, W = global_feats.size() + + instance_feats = self.atn(instance_feats).reshape(B, C, 1, 1) + feats = global_feats * instance_feats.expand_as(global_feats) + fsum = torch.sum(feats, dim=1, keepdim=True) + + pixel_coords = self._get_pixel_coords((W, H), feats.device) + relative_coords = instance_coords.reshape( + -1, 1, 2) - pixel_coords.reshape(1, -1, 2) + relative_coords = relative_coords.permute(0, 2, 1) / 32.0 + relative_coords = relative_coords.reshape(B, 2, H, W) + + input_feats = torch.cat((fsum, relative_coords), dim=1) + mask = self.conv(input_feats).sigmoid() + return global_feats * mask + + +class GFDModule(BaseModule): + """Global Feature Decoupling module introduced in `CID`. This module + extracts the decoupled heatmaps for each instance. + + Args: + in_channels (int): Number of channels in the input feature map + out_channels (int): Number of channels of the output heatmaps + for each instance + gfd_channels (int): Number of channels in the transformed feature map + clamp_delta (float, optional): A small value that prevents the sigmoid + activation from becoming saturated. Defaults to 1e-4. + init_cfg (Config, optional): Config to control the initialization. See + :attr:`default_init_cfg` for default settings + """ + + def __init__( + self, + in_channels: int, + out_channels: int, + gfd_channels: int, + clamp_delta: float = 1e-4, + init_cfg: OptConfigType = None, + ): + super().__init__(init_cfg=init_cfg) + + self.conv_down = build_conv_layer( + dict( + type='Conv2d', + in_channels=in_channels, + out_channels=gfd_channels, + kernel_size=1)) + + self.channel_attention = ChannelAttention(in_channels, gfd_channels) + self.spatial_attention = SpatialAttention(in_channels, gfd_channels) + self.fuse_attention = build_conv_layer( + dict( + type='Conv2d', + in_channels=gfd_channels * 2, + out_channels=gfd_channels, + kernel_size=1)) + self.heatmap_conv = build_conv_layer( + dict( + type='Conv2d', + in_channels=gfd_channels, + out_channels=out_channels, + kernel_size=1)) + self.sigmoid = TruncSigmoid(min=clamp_delta, max=1 - clamp_delta) + + def forward( + self, + feats: Tensor, + instance_feats: Tensor, + instance_coords: Tensor, + instance_imgids: Tensor, + ) -> Tensor: + """Extract decoupled heatmaps for each instance. + + Args: + feats (Tensor): Input feature maps. + instance_feats (Tensor): Tensor containing the instance feature + vectors. + instance_coords (Tensor): Tensor containing the root coordinates + of the instances. + instance_imgids (Tensor): Sample indices of each instances + in the batch. + + Returns: + A tensor containing decoupled heatmaps. + """ + + global_feats = self.conv_down(feats) + global_feats = global_feats[instance_imgids] + cond_instance_feats = torch.cat( + (self.channel_attention(global_feats, instance_feats), + self.spatial_attention(global_feats, instance_feats, + instance_coords)), + dim=1) + + cond_instance_feats = self.fuse_attention(cond_instance_feats) + cond_instance_feats = torch.nn.functional.relu(cond_instance_feats) + cond_instance_feats = self.heatmap_conv(cond_instance_feats) + heatmaps = self.sigmoid(cond_instance_feats) + + return heatmaps + + +@MODELS.register_module() +class CIDHead(BaseHead): + """Contextual Instance Decoupling head introduced in `Contextual Instance + Decoupling for Robust Multi-Person Pose Estimation (CID)`_ by Wang et al + (2022). The head is composed of an Instance Information Abstraction (IIA) + module and a Global Feature Decoupling (GFD) module. + + Args: + in_channels (int | Sequence[int]): Number of channels in the input + feature map + num_keypoints (int): Number of keypoints + gfd_channels (int): Number of filters in GFD module + max_train_instances (int): Maximum number of instances in a batch + during training. Defaults to 200 + heatmap_loss (Config): Config of the heatmap loss. Defaults to use + :class:`KeypointMSELoss` + coupled_heatmap_loss (Config): Config of the loss for coupled heatmaps. + Defaults to use :class:`SoftWeightSmoothL1Loss` + decoupled_heatmap_loss (Config): Config of the loss for decoupled + heatmaps. Defaults to use :class:`SoftWeightSmoothL1Loss` + contrastive_loss (Config): Config of the contrastive loss for + representation vectors of instances. Defaults to use + :class:`InfoNCELoss` + decoder (Config, optional): The decoder config that controls decoding + keypoint coordinates from the network output. Defaults to ``None`` + init_cfg (Config, optional): Config to control the initialization. See + :attr:`default_init_cfg` for default settings + + .. _`CID`: https://openaccess.thecvf.com/content/CVPR2022/html/Wang_ + Contextual_Instance_Decoupling_for_Robust_Multi-Person_Pose_Estimation_ + CVPR_2022_paper.html + """ + _version = 2 + + def __init__(self, + in_channels: Union[int, Sequence[int]], + gfd_channels: int, + num_keypoints: int, + prior_prob: float = 0.01, + coupled_heatmap_loss: OptConfigType = dict( + type='FocalHeatmapLoss'), + decoupled_heatmap_loss: OptConfigType = dict( + type='FocalHeatmapLoss'), + contrastive_loss: OptConfigType = dict(type='InfoNCELoss'), + decoder: OptConfigType = None, + init_cfg: OptConfigType = None): + + if init_cfg is None: + init_cfg = self.default_init_cfg + + super().__init__(init_cfg) + + self.in_channels = in_channels + self.num_keypoints = num_keypoints + if decoder is not None: + self.decoder = KEYPOINT_CODECS.build(decoder) + else: + self.decoder = None + + # build sub-modules + bias_value = -math.log((1 - prior_prob) / prior_prob) + self.iia_module = IIAModule( + in_channels, + num_keypoints + 1, + init_cfg=init_cfg + [ + dict( + type='Normal', + layer=['Conv2d', 'Linear'], + std=0.001, + override=dict( + name='keypoint_root_conv', + type='Normal', + std=0.001, + bias=bias_value)) + ]) + self.gfd_module = GFDModule( + in_channels, + num_keypoints, + gfd_channels, + init_cfg=init_cfg + [ + dict( + type='Normal', + layer=['Conv2d', 'Linear'], + std=0.001, + override=dict( + name='heatmap_conv', + type='Normal', + std=0.001, + bias=bias_value)) + ]) + + # build losses + self.loss_module = ModuleDict( + dict( + heatmap_coupled=MODELS.build(coupled_heatmap_loss), + heatmap_decoupled=MODELS.build(decoupled_heatmap_loss), + contrastive=MODELS.build(contrastive_loss), + )) + + # Register the hook to automatically convert old version state dicts + self._register_load_state_dict_pre_hook(self._load_state_dict_pre_hook) + + @property + def default_init_cfg(self): + init_cfg = [ + dict(type='Normal', layer=['Conv2d', 'Linear'], std=0.001), + dict(type='Constant', layer='BatchNorm2d', val=1) + ] + return init_cfg + + def forward(self, feats: Tuple[Tensor]) -> Tensor: + """Forward the network. The input is multi scale feature maps and the + output is the heatmap. + + Args: + feats (Tuple[Tensor]): Multi scale feature maps. + + Returns: + Tensor: output heatmap. + """ + feats = feats[-1] + instance_info = self.iia_module.forward_test(feats, {}) + instance_feats, instance_coords, instance_scores = instance_info + instance_imgids = torch.zeros( + instance_coords.size(0), dtype=torch.long, device=feats.device) + instance_heatmaps = self.gfd_module(feats, instance_feats, + instance_coords, instance_imgids) + + return instance_heatmaps + + def predict(self, + feats: Features, + batch_data_samples: OptSampleList, + test_cfg: ConfigType = {}) -> Predictions: + """Predict results from features. + + Args: + feats (Tuple[Tensor] | List[Tuple[Tensor]]): The multi-stage + features (or multiple multi-stage features in TTA) + batch_data_samples (List[:obj:`PoseDataSample`]): The batch + data samples + test_cfg (dict): The runtime config for testing process. Defaults + to {} + + Returns: + Union[InstanceList | Tuple[InstanceList | PixelDataList]]: If + ``test_cfg['output_heatmap']==True``, return both pose and heatmap + prediction; otherwise only return the pose prediction. + + The pose prediction is a list of ``InstanceData``, each contains + the following fields: + + - keypoints (np.ndarray): predicted keypoint coordinates in + shape (num_instances, K, D) where K is the keypoint number + and D is the keypoint dimension + - keypoint_scores (np.ndarray): predicted keypoint scores in + shape (num_instances, K) + + The heatmap prediction is a list of ``PixelData``, each contains + the following fields: + + - heatmaps (Tensor): The predicted heatmaps in shape (K, h, w) + """ + metainfo = batch_data_samples[0].metainfo + + if test_cfg.get('flip_test', False): + assert isinstance(feats, list) and len(feats) == 2 + + feats_flipped = flip_heatmaps(feats[1][-1], shift_heatmap=False) + feats = torch.cat((feats[0][-1], feats_flipped)) + else: + feats = feats[-1] + + instance_info = self.iia_module.forward_test(feats, test_cfg) + instance_feats, instance_coords, instance_scores = instance_info + if len(instance_coords) > 0: + instance_imgids = torch.zeros( + instance_coords.size(0), dtype=torch.long, device=feats.device) + if test_cfg.get('flip_test', False): + instance_coords = torch.cat((instance_coords, instance_coords)) + instance_imgids = torch.cat( + (instance_imgids, instance_imgids + 1)) + instance_heatmaps = self.gfd_module(feats, instance_feats, + instance_coords, + instance_imgids) + if test_cfg.get('flip_test', False): + flip_indices = batch_data_samples[0].metainfo['flip_indices'] + instance_heatmaps, instance_heatmaps_flip = torch.chunk( + instance_heatmaps, 2, dim=0) + instance_heatmaps_flip = \ + instance_heatmaps_flip[:, flip_indices, :, :] + instance_heatmaps = (instance_heatmaps + + instance_heatmaps_flip) / 2.0 + instance_heatmaps = smooth_heatmaps( + instance_heatmaps, test_cfg.get('blur_kernel_size', 3)) + + preds = self.decode((instance_heatmaps, instance_scores[:, None])) + preds = InstanceData.cat(preds) + preds.keypoints[..., 0] += metainfo['input_size'][ + 0] / instance_heatmaps.shape[-1] / 2.0 + preds.keypoints[..., 1] += metainfo['input_size'][ + 1] / instance_heatmaps.shape[-2] / 2.0 + preds = [preds] + + else: + preds = [ + InstanceData( + keypoints=np.empty((0, self.num_keypoints, 2)), + keypoint_scores=np.empty((0, self.num_keypoints))) + ] + instance_heatmaps = torch.empty(0, self.num_keypoints, + *feats.shape[-2:]) + + if test_cfg.get('output_heatmaps', False): + pred_fields = [ + PixelData( + heatmaps=instance_heatmaps.reshape( + -1, *instance_heatmaps.shape[-2:])) + ] + return preds, pred_fields + else: + return preds + + def loss(self, + feats: Tuple[Tensor], + batch_data_samples: OptSampleList, + train_cfg: ConfigType = {}) -> dict: + """Calculate losses from a batch of inputs and data samples. + + Args: + feats (Tuple[Tensor]): The multi-stage features + batch_data_samples (List[:obj:`PoseDataSample`]): The batch + data samples + train_cfg (dict): The runtime config for training process. + Defaults to {} + + Returns: + dict: A dictionary of losses. + """ + + # load targets + gt_heatmaps, gt_instance_coords, keypoint_weights = [], [], [] + heatmap_mask = [] + instance_imgids, gt_instance_heatmaps = [], [] + for i, d in enumerate(batch_data_samples): + gt_heatmaps.append(d.gt_fields.heatmaps) + gt_instance_coords.append(d.gt_instance_labels.instance_coords) + keypoint_weights.append(d.gt_instance_labels.keypoint_weights) + instance_imgids.append( + torch.ones( + len(d.gt_instance_labels.instance_coords), + dtype=torch.long) * i) + + instance_heatmaps = d.gt_fields.instance_heatmaps.reshape( + -1, self.num_keypoints, + *d.gt_fields.instance_heatmaps.shape[1:]) + gt_instance_heatmaps.append(instance_heatmaps) + + if 'heatmap_mask' in d.gt_fields: + heatmap_mask.append(d.gt_fields.heatmap_mask) + + gt_heatmaps = torch.stack(gt_heatmaps) + heatmap_mask = torch.stack(heatmap_mask) if heatmap_mask else None + + gt_instance_coords = torch.cat(gt_instance_coords, dim=0) + gt_instance_heatmaps = torch.cat(gt_instance_heatmaps, dim=0) + keypoint_weights = torch.cat(keypoint_weights, dim=0) + instance_imgids = torch.cat(instance_imgids).to(gt_heatmaps.device) + + # feed-forward + feats = feats[-1] + pred_instance_feats, pred_heatmaps = self.iia_module.forward_train( + feats, gt_instance_coords, instance_imgids) + + # conpute contrastive loss + contrastive_loss = 0 + for i in range(len(batch_data_samples)): + pred_instance_feat = pred_instance_feats[instance_imgids == i] + contrastive_loss += self.loss_module['contrastive']( + pred_instance_feat) + contrastive_loss = contrastive_loss / max(1, len(instance_imgids)) + + # limit the number of instances + max_train_instances = train_cfg.get('max_train_instances', -1) + if (max_train_instances > 0 + and len(instance_imgids) > max_train_instances): + selected_indices = torch.randperm( + len(instance_imgids), + device=gt_heatmaps.device, + dtype=torch.long)[:max_train_instances] + gt_instance_coords = gt_instance_coords[selected_indices] + keypoint_weights = keypoint_weights[selected_indices] + gt_instance_heatmaps = gt_instance_heatmaps[selected_indices] + instance_imgids = instance_imgids[selected_indices] + pred_instance_feats = pred_instance_feats[selected_indices] + + # calculate the decoupled heatmaps for each instance + pred_instance_heatmaps = self.gfd_module(feats, pred_instance_feats, + gt_instance_coords, + instance_imgids) + + # calculate losses + losses = { + 'loss/heatmap_coupled': + self.loss_module['heatmap_coupled'](pred_heatmaps, gt_heatmaps, + None, heatmap_mask) + } + if len(instance_imgids) > 0: + losses.update({ + 'loss/heatmap_decoupled': + self.loss_module['heatmap_decoupled'](pred_instance_heatmaps, + gt_instance_heatmaps, + keypoint_weights), + 'loss/contrastive': + contrastive_loss + }) + + return losses + + def _load_state_dict_pre_hook(self, state_dict, prefix, local_meta, *args, + **kwargs): + """A hook function to convert old-version state dict of + :class:`CIDHead` (before MMPose v1.0.0) to a compatible format + of :class:`CIDHead`. + + The hook will be automatically registered during initialization. + """ + version = local_meta.get('version', None) + if version and version >= self._version: + return + + # convert old-version state dict + keys = list(state_dict.keys()) + for k in keys: + if 'keypoint_center_conv' in k: + v = state_dict.pop(k) + k = k.replace('keypoint_center_conv', + 'iia_module.keypoint_root_conv') + state_dict[k] = v + + if 'conv_down' in k: + v = state_dict.pop(k) + k = k.replace('conv_down', 'gfd_module.conv_down') + state_dict[k] = v + + if 'c_attn' in k: + v = state_dict.pop(k) + k = k.replace('c_attn', 'gfd_module.channel_attention') + state_dict[k] = v + + if 's_attn' in k: + v = state_dict.pop(k) + k = k.replace('s_attn', 'gfd_module.spatial_attention') + state_dict[k] = v + + if 'fuse_attn' in k: + v = state_dict.pop(k) + k = k.replace('fuse_attn', 'gfd_module.fuse_attention') + state_dict[k] = v + + if 'heatmap_conv' in k: + v = state_dict.pop(k) + k = k.replace('heatmap_conv', 'gfd_module.heatmap_conv') + state_dict[k] = v diff --git a/mmpose/models/heads/heatmap_heads/cpm_head.py b/mmpose/models/heads/heatmap_heads/cpm_head.py index 1ba46357ec..287d591106 100644 --- a/mmpose/models/heads/heatmap_heads/cpm_head.py +++ b/mmpose/models/heads/heatmap_heads/cpm_head.py @@ -1,307 +1,307 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import List, Optional, Sequence, Union - -import torch -from mmcv.cnn import build_conv_layer, build_upsample_layer -from mmengine.structures import PixelData -from torch import Tensor, nn - -from mmpose.evaluation.functional import pose_pck_accuracy -from mmpose.models.utils.tta import flip_heatmaps -from mmpose.registry import KEYPOINT_CODECS, MODELS -from mmpose.utils.tensor_utils import to_numpy -from mmpose.utils.typing import (Features, MultiConfig, OptConfigType, - OptSampleList, Predictions) -from ..base_head import BaseHead - -OptIntSeq = Optional[Sequence[int]] - - -@MODELS.register_module() -class CPMHead(BaseHead): - """Multi-stage heatmap head introduced in `Convolutional Pose Machines`_ by - Wei et al (2016) and used by `Stacked Hourglass Networks`_ by Newell et al - (2016). The head consists of multiple branches, each of which has some - deconv layers and a simple conv2d layer. - - Args: - in_channels (int | Sequence[int]): Number of channels in the input - feature maps. - out_channels (int): Number of channels in the output heatmaps. - num_stages (int): Number of stages. - deconv_out_channels (Sequence[int], optional): The output channel - number of each deconv layer. Defaults to ``(256, 256, 256)`` - deconv_kernel_sizes (Sequence[int | tuple], optional): The kernel size - of each deconv layer. Each element should be either an integer for - both height and width dimensions, or a tuple of two integers for - the height and the width dimension respectively. - Defaults to ``(4, 4, 4)`` - final_layer (dict): Arguments of the final Conv2d layer. - Defaults to ``dict(kernel_size=1)`` - loss (Config | List[Config]): Config of the keypoint loss of different - stages. Defaults to use :class:`KeypointMSELoss`. - decoder (Config, optional): The decoder config that controls decoding - keypoint coordinates from the network output. Defaults to ``None`` - init_cfg (Config, optional): Config to control the initialization. See - :attr:`default_init_cfg` for default settings - - .. _`Convolutional Pose Machines`: https://arxiv.org/abs/1602.00134 - .. _`Stacked Hourglass Networks`: https://arxiv.org/abs/1603.06937 - """ - - _version = 2 - - def __init__(self, - in_channels: Union[int, Sequence[int]], - out_channels: int, - num_stages: int, - deconv_out_channels: OptIntSeq = None, - deconv_kernel_sizes: OptIntSeq = None, - final_layer: dict = dict(kernel_size=1), - loss: MultiConfig = dict( - type='KeypointMSELoss', use_target_weight=True), - decoder: OptConfigType = None, - init_cfg: OptConfigType = None): - - if init_cfg is None: - init_cfg = self.default_init_cfg - super().__init__(init_cfg) - - self.num_stages = num_stages - self.in_channels = in_channels - self.out_channels = out_channels - - if isinstance(loss, list): - if len(loss) != num_stages: - raise ValueError( - f'The length of loss_module({len(loss)}) did not match ' - f'`num_stages`({num_stages})') - self.loss_module = nn.ModuleList( - MODELS.build(_loss) for _loss in loss) - else: - self.loss_module = MODELS.build(loss) - - if decoder is not None: - self.decoder = KEYPOINT_CODECS.build(decoder) - else: - self.decoder = None - - # build multi-stage deconv layers - self.multi_deconv_layers = nn.ModuleList([]) - if deconv_out_channels: - if deconv_kernel_sizes is None or len(deconv_out_channels) != len( - deconv_kernel_sizes): - raise ValueError( - '"deconv_out_channels" and "deconv_kernel_sizes" should ' - 'be integer sequences with the same length. Got ' - f'mismatched lengths {deconv_out_channels} and ' - f'{deconv_kernel_sizes}') - - for _ in range(self.num_stages): - deconv_layers = self._make_deconv_layers( - in_channels=in_channels, - layer_out_channels=deconv_out_channels, - layer_kernel_sizes=deconv_kernel_sizes, - ) - self.multi_deconv_layers.append(deconv_layers) - in_channels = deconv_out_channels[-1] - else: - for _ in range(self.num_stages): - self.multi_deconv_layers.append(nn.Identity()) - - # build multi-stage final layers - self.multi_final_layers = nn.ModuleList([]) - if final_layer is not None: - cfg = dict( - type='Conv2d', - in_channels=in_channels, - out_channels=out_channels, - kernel_size=1) - cfg.update(final_layer) - for _ in range(self.num_stages): - self.multi_final_layers.append(build_conv_layer(cfg)) - else: - for _ in range(self.num_stages): - self.multi_final_layers.append(nn.Identity()) - - @property - def default_init_cfg(self): - init_cfg = [ - dict( - type='Normal', layer=['Conv2d', 'ConvTranspose2d'], std=0.001), - dict(type='Constant', layer='BatchNorm2d', val=1) - ] - return init_cfg - - def _make_deconv_layers(self, in_channels: int, - layer_out_channels: Sequence[int], - layer_kernel_sizes: Sequence[int]) -> nn.Module: - """Create deconvolutional layers by given parameters.""" - - layers = [] - for out_channels, kernel_size in zip(layer_out_channels, - layer_kernel_sizes): - if kernel_size == 4: - padding = 1 - output_padding = 0 - elif kernel_size == 3: - padding = 1 - output_padding = 1 - elif kernel_size == 2: - padding = 0 - output_padding = 0 - else: - raise ValueError(f'Unsupported kernel size {kernel_size} for' - 'deconvlutional layers in ' - f'{self.__class__.__name__}') - cfg = dict( - type='deconv', - in_channels=in_channels, - out_channels=out_channels, - kernel_size=kernel_size, - stride=2, - padding=padding, - output_padding=output_padding, - bias=False) - layers.append(build_upsample_layer(cfg)) - layers.append(nn.BatchNorm2d(num_features=out_channels)) - layers.append(nn.ReLU(inplace=True)) - in_channels = out_channels - - return nn.Sequential(*layers) - - def forward(self, feats: Sequence[Tensor]) -> List[Tensor]: - """Forward the network. The input is multi-stage feature maps and the - output is a list of heatmaps from multiple stages. - - Args: - feats (Sequence[Tensor]): Multi-stage feature maps. - - Returns: - List[Tensor]: A list of output heatmaps from multiple stages. - """ - out = [] - assert len(feats) == self.num_stages, ( - f'The length of feature maps did not match the ' - f'`num_stages` in {self.__class__.__name__}') - for i in range(self.num_stages): - y = self.multi_deconv_layers[i](feats[i]) - y = self.multi_final_layers[i](y) - out.append(y) - - return out - - def predict(self, - feats: Features, - batch_data_samples: OptSampleList, - test_cfg: OptConfigType = {}) -> Predictions: - """Predict results from multi-stage feature maps. - - Args: - feats (Tuple[Tensor] | List[Tuple[Tensor]]): The multi-stage - features (or multiple multi-stage features in TTA) - batch_data_samples (List[:obj:`PoseDataSample`]): The batch - data samples - test_cfg (dict): The runtime config for testing process. Defaults - to {} - - Returns: - Union[InstanceList | Tuple[InstanceList | PixelDataList]]: If - ``test_cfg['output_heatmap']==True``, return both pose and heatmap - prediction; otherwise only return the pose prediction. - - The pose prediction is a list of ``InstanceData``, each contains - the following fields: - - - keypoints (np.ndarray): predicted keypoint coordinates in - shape (num_instances, K, D) where K is the keypoint number - and D is the keypoint dimension - - keypoint_scores (np.ndarray): predicted keypoint scores in - shape (num_instances, K) - - The heatmap prediction is a list of ``PixelData``, each contains - the following fields: - - - heatmaps (Tensor): The predicted heatmaps in shape (K, h, w) - """ - - if test_cfg.get('flip_test', False): - # TTA: flip test - assert isinstance(feats, list) and len(feats) == 2 - flip_indices = batch_data_samples[0].metainfo['flip_indices'] - _feats, _feats_flip = feats - _batch_heatmaps = self.forward(_feats)[-1] - _batch_heatmaps_flip = flip_heatmaps( - self.forward(_feats_flip)[-1], - flip_mode=test_cfg.get('flip_mode', 'heatmap'), - flip_indices=flip_indices, - shift_heatmap=test_cfg.get('shift_heatmap', False)) - batch_heatmaps = (_batch_heatmaps + _batch_heatmaps_flip) * 0.5 - else: - multi_stage_heatmaps = self.forward(feats) - batch_heatmaps = multi_stage_heatmaps[-1] - - preds = self.decode(batch_heatmaps) - - if test_cfg.get('output_heatmaps', False): - pred_fields = [ - PixelData(heatmaps=hm) for hm in batch_heatmaps.detach() - ] - return preds, pred_fields - else: - return preds - - def loss(self, - feats: Sequence[Tensor], - batch_data_samples: OptSampleList, - train_cfg: OptConfigType = {}) -> dict: - """Calculate losses from a batch of inputs and data samples. - - Args: - feats (Sequence[Tensor]): Multi-stage feature maps. - batch_data_samples (List[:obj:`PoseDataSample`]): The Data - Samples. It usually includes information such as - `gt_instances`. - train_cfg (Config, optional): The training config. - - Returns: - dict: A dictionary of loss components. - """ - multi_stage_pred_heatmaps = self.forward(feats) - - gt_heatmaps = torch.stack( - [d.gt_fields.heatmaps for d in batch_data_samples]) - keypoint_weights = torch.cat([ - d.gt_instance_labels.keypoint_weights for d in batch_data_samples - ]) - - # calculate losses over multiple stages - losses = dict() - for i in range(self.num_stages): - if isinstance(self.loss_module, nn.ModuleList): - # use different loss_module over different stages - loss_func = self.loss_module[i] - else: - # use the same loss_module over different stages - loss_func = self.loss_module - - # the `gt_heatmaps` and `keypoint_weights` used to calculate loss - # for different stages are the same - loss_i = loss_func(multi_stage_pred_heatmaps[i], gt_heatmaps, - keypoint_weights) - - if 'loss_kpt' not in losses: - losses['loss_kpt'] = loss_i - else: - losses['loss_kpt'] += loss_i - - # calculate accuracy - _, avg_acc, _ = pose_pck_accuracy( - output=to_numpy(multi_stage_pred_heatmaps[-1]), - target=to_numpy(gt_heatmaps), - mask=to_numpy(keypoint_weights) > 0) - - acc_pose = torch.tensor(avg_acc, device=gt_heatmaps.device) - losses.update(acc_pose=acc_pose) - - return losses +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional, Sequence, Union + +import torch +from mmcv.cnn import build_conv_layer, build_upsample_layer +from mmengine.structures import PixelData +from torch import Tensor, nn + +from mmpose.evaluation.functional import pose_pck_accuracy +from mmpose.models.utils.tta import flip_heatmaps +from mmpose.registry import KEYPOINT_CODECS, MODELS +from mmpose.utils.tensor_utils import to_numpy +from mmpose.utils.typing import (Features, MultiConfig, OptConfigType, + OptSampleList, Predictions) +from ..base_head import BaseHead + +OptIntSeq = Optional[Sequence[int]] + + +@MODELS.register_module() +class CPMHead(BaseHead): + """Multi-stage heatmap head introduced in `Convolutional Pose Machines`_ by + Wei et al (2016) and used by `Stacked Hourglass Networks`_ by Newell et al + (2016). The head consists of multiple branches, each of which has some + deconv layers and a simple conv2d layer. + + Args: + in_channels (int | Sequence[int]): Number of channels in the input + feature maps. + out_channels (int): Number of channels in the output heatmaps. + num_stages (int): Number of stages. + deconv_out_channels (Sequence[int], optional): The output channel + number of each deconv layer. Defaults to ``(256, 256, 256)`` + deconv_kernel_sizes (Sequence[int | tuple], optional): The kernel size + of each deconv layer. Each element should be either an integer for + both height and width dimensions, or a tuple of two integers for + the height and the width dimension respectively. + Defaults to ``(4, 4, 4)`` + final_layer (dict): Arguments of the final Conv2d layer. + Defaults to ``dict(kernel_size=1)`` + loss (Config | List[Config]): Config of the keypoint loss of different + stages. Defaults to use :class:`KeypointMSELoss`. + decoder (Config, optional): The decoder config that controls decoding + keypoint coordinates from the network output. Defaults to ``None`` + init_cfg (Config, optional): Config to control the initialization. See + :attr:`default_init_cfg` for default settings + + .. _`Convolutional Pose Machines`: https://arxiv.org/abs/1602.00134 + .. _`Stacked Hourglass Networks`: https://arxiv.org/abs/1603.06937 + """ + + _version = 2 + + def __init__(self, + in_channels: Union[int, Sequence[int]], + out_channels: int, + num_stages: int, + deconv_out_channels: OptIntSeq = None, + deconv_kernel_sizes: OptIntSeq = None, + final_layer: dict = dict(kernel_size=1), + loss: MultiConfig = dict( + type='KeypointMSELoss', use_target_weight=True), + decoder: OptConfigType = None, + init_cfg: OptConfigType = None): + + if init_cfg is None: + init_cfg = self.default_init_cfg + super().__init__(init_cfg) + + self.num_stages = num_stages + self.in_channels = in_channels + self.out_channels = out_channels + + if isinstance(loss, list): + if len(loss) != num_stages: + raise ValueError( + f'The length of loss_module({len(loss)}) did not match ' + f'`num_stages`({num_stages})') + self.loss_module = nn.ModuleList( + MODELS.build(_loss) for _loss in loss) + else: + self.loss_module = MODELS.build(loss) + + if decoder is not None: + self.decoder = KEYPOINT_CODECS.build(decoder) + else: + self.decoder = None + + # build multi-stage deconv layers + self.multi_deconv_layers = nn.ModuleList([]) + if deconv_out_channels: + if deconv_kernel_sizes is None or len(deconv_out_channels) != len( + deconv_kernel_sizes): + raise ValueError( + '"deconv_out_channels" and "deconv_kernel_sizes" should ' + 'be integer sequences with the same length. Got ' + f'mismatched lengths {deconv_out_channels} and ' + f'{deconv_kernel_sizes}') + + for _ in range(self.num_stages): + deconv_layers = self._make_deconv_layers( + in_channels=in_channels, + layer_out_channels=deconv_out_channels, + layer_kernel_sizes=deconv_kernel_sizes, + ) + self.multi_deconv_layers.append(deconv_layers) + in_channels = deconv_out_channels[-1] + else: + for _ in range(self.num_stages): + self.multi_deconv_layers.append(nn.Identity()) + + # build multi-stage final layers + self.multi_final_layers = nn.ModuleList([]) + if final_layer is not None: + cfg = dict( + type='Conv2d', + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1) + cfg.update(final_layer) + for _ in range(self.num_stages): + self.multi_final_layers.append(build_conv_layer(cfg)) + else: + for _ in range(self.num_stages): + self.multi_final_layers.append(nn.Identity()) + + @property + def default_init_cfg(self): + init_cfg = [ + dict( + type='Normal', layer=['Conv2d', 'ConvTranspose2d'], std=0.001), + dict(type='Constant', layer='BatchNorm2d', val=1) + ] + return init_cfg + + def _make_deconv_layers(self, in_channels: int, + layer_out_channels: Sequence[int], + layer_kernel_sizes: Sequence[int]) -> nn.Module: + """Create deconvolutional layers by given parameters.""" + + layers = [] + for out_channels, kernel_size in zip(layer_out_channels, + layer_kernel_sizes): + if kernel_size == 4: + padding = 1 + output_padding = 0 + elif kernel_size == 3: + padding = 1 + output_padding = 1 + elif kernel_size == 2: + padding = 0 + output_padding = 0 + else: + raise ValueError(f'Unsupported kernel size {kernel_size} for' + 'deconvlutional layers in ' + f'{self.__class__.__name__}') + cfg = dict( + type='deconv', + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=2, + padding=padding, + output_padding=output_padding, + bias=False) + layers.append(build_upsample_layer(cfg)) + layers.append(nn.BatchNorm2d(num_features=out_channels)) + layers.append(nn.ReLU(inplace=True)) + in_channels = out_channels + + return nn.Sequential(*layers) + + def forward(self, feats: Sequence[Tensor]) -> List[Tensor]: + """Forward the network. The input is multi-stage feature maps and the + output is a list of heatmaps from multiple stages. + + Args: + feats (Sequence[Tensor]): Multi-stage feature maps. + + Returns: + List[Tensor]: A list of output heatmaps from multiple stages. + """ + out = [] + assert len(feats) == self.num_stages, ( + f'The length of feature maps did not match the ' + f'`num_stages` in {self.__class__.__name__}') + for i in range(self.num_stages): + y = self.multi_deconv_layers[i](feats[i]) + y = self.multi_final_layers[i](y) + out.append(y) + + return out + + def predict(self, + feats: Features, + batch_data_samples: OptSampleList, + test_cfg: OptConfigType = {}) -> Predictions: + """Predict results from multi-stage feature maps. + + Args: + feats (Tuple[Tensor] | List[Tuple[Tensor]]): The multi-stage + features (or multiple multi-stage features in TTA) + batch_data_samples (List[:obj:`PoseDataSample`]): The batch + data samples + test_cfg (dict): The runtime config for testing process. Defaults + to {} + + Returns: + Union[InstanceList | Tuple[InstanceList | PixelDataList]]: If + ``test_cfg['output_heatmap']==True``, return both pose and heatmap + prediction; otherwise only return the pose prediction. + + The pose prediction is a list of ``InstanceData``, each contains + the following fields: + + - keypoints (np.ndarray): predicted keypoint coordinates in + shape (num_instances, K, D) where K is the keypoint number + and D is the keypoint dimension + - keypoint_scores (np.ndarray): predicted keypoint scores in + shape (num_instances, K) + + The heatmap prediction is a list of ``PixelData``, each contains + the following fields: + + - heatmaps (Tensor): The predicted heatmaps in shape (K, h, w) + """ + + if test_cfg.get('flip_test', False): + # TTA: flip test + assert isinstance(feats, list) and len(feats) == 2 + flip_indices = batch_data_samples[0].metainfo['flip_indices'] + _feats, _feats_flip = feats + _batch_heatmaps = self.forward(_feats)[-1] + _batch_heatmaps_flip = flip_heatmaps( + self.forward(_feats_flip)[-1], + flip_mode=test_cfg.get('flip_mode', 'heatmap'), + flip_indices=flip_indices, + shift_heatmap=test_cfg.get('shift_heatmap', False)) + batch_heatmaps = (_batch_heatmaps + _batch_heatmaps_flip) * 0.5 + else: + multi_stage_heatmaps = self.forward(feats) + batch_heatmaps = multi_stage_heatmaps[-1] + + preds = self.decode(batch_heatmaps) + + if test_cfg.get('output_heatmaps', False): + pred_fields = [ + PixelData(heatmaps=hm) for hm in batch_heatmaps.detach() + ] + return preds, pred_fields + else: + return preds + + def loss(self, + feats: Sequence[Tensor], + batch_data_samples: OptSampleList, + train_cfg: OptConfigType = {}) -> dict: + """Calculate losses from a batch of inputs and data samples. + + Args: + feats (Sequence[Tensor]): Multi-stage feature maps. + batch_data_samples (List[:obj:`PoseDataSample`]): The Data + Samples. It usually includes information such as + `gt_instances`. + train_cfg (Config, optional): The training config. + + Returns: + dict: A dictionary of loss components. + """ + multi_stage_pred_heatmaps = self.forward(feats) + + gt_heatmaps = torch.stack( + [d.gt_fields.heatmaps for d in batch_data_samples]) + keypoint_weights = torch.cat([ + d.gt_instance_labels.keypoint_weights for d in batch_data_samples + ]) + + # calculate losses over multiple stages + losses = dict() + for i in range(self.num_stages): + if isinstance(self.loss_module, nn.ModuleList): + # use different loss_module over different stages + loss_func = self.loss_module[i] + else: + # use the same loss_module over different stages + loss_func = self.loss_module + + # the `gt_heatmaps` and `keypoint_weights` used to calculate loss + # for different stages are the same + loss_i = loss_func(multi_stage_pred_heatmaps[i], gt_heatmaps, + keypoint_weights) + + if 'loss_kpt' not in losses: + losses['loss_kpt'] = loss_i + else: + losses['loss_kpt'] += loss_i + + # calculate accuracy + _, avg_acc, _ = pose_pck_accuracy( + output=to_numpy(multi_stage_pred_heatmaps[-1]), + target=to_numpy(gt_heatmaps), + mask=to_numpy(keypoint_weights) > 0) + + acc_pose = torch.tensor(avg_acc, device=gt_heatmaps.device) + losses.update(acc_pose=acc_pose) + + return losses diff --git a/mmpose/models/heads/heatmap_heads/heatmap_head.py b/mmpose/models/heads/heatmap_heads/heatmap_head.py index 0b0fa3f475..784670514f 100644 --- a/mmpose/models/heads/heatmap_heads/heatmap_head.py +++ b/mmpose/models/heads/heatmap_heads/heatmap_head.py @@ -1,369 +1,369 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import Optional, Sequence, Tuple, Union - -import torch -from mmcv.cnn import build_conv_layer, build_upsample_layer -from mmengine.structures import PixelData -from torch import Tensor, nn - -from mmpose.evaluation.functional import pose_pck_accuracy -from mmpose.models.utils.tta import flip_heatmaps -from mmpose.registry import KEYPOINT_CODECS, MODELS -from mmpose.utils.tensor_utils import to_numpy -from mmpose.utils.typing import (ConfigType, Features, OptConfigType, - OptSampleList, Predictions) -from ..base_head import BaseHead - -OptIntSeq = Optional[Sequence[int]] - - -@MODELS.register_module() -class HeatmapHead(BaseHead): - """Top-down heatmap head introduced in `Simple Baselines`_ by Xiao et al - (2018). The head is composed of a few deconvolutional layers followed by a - convolutional layer to generate heatmaps from low-resolution feature maps. - - Args: - in_channels (int | Sequence[int]): Number of channels in the input - feature map - out_channels (int): Number of channels in the output heatmap - deconv_out_channels (Sequence[int], optional): The output channel - number of each deconv layer. Defaults to ``(256, 256, 256)`` - deconv_kernel_sizes (Sequence[int | tuple], optional): The kernel size - of each deconv layer. Each element should be either an integer for - both height and width dimensions, or a tuple of two integers for - the height and the width dimension respectively.Defaults to - ``(4, 4, 4)`` - conv_out_channels (Sequence[int], optional): The output channel number - of each intermediate conv layer. ``None`` means no intermediate - conv layer between deconv layers and the final conv layer. - Defaults to ``None`` - conv_kernel_sizes (Sequence[int | tuple], optional): The kernel size - of each intermediate conv layer. Defaults to ``None`` - final_layer (dict): Arguments of the final Conv2d layer. - Defaults to ``dict(kernel_size=1)`` - loss (Config): Config of the keypoint loss. Defaults to use - :class:`KeypointMSELoss` - decoder (Config, optional): The decoder config that controls decoding - keypoint coordinates from the network output. Defaults to ``None`` - init_cfg (Config, optional): Config to control the initialization. See - :attr:`default_init_cfg` for default settings - extra (dict, optional): Extra configurations. - Defaults to ``None`` - - .. _`Simple Baselines`: https://arxiv.org/abs/1804.06208 - """ - - _version = 2 - - def __init__(self, - in_channels: Union[int, Sequence[int]], - out_channels: int, - deconv_out_channels: OptIntSeq = (256, 256, 256), - deconv_kernel_sizes: OptIntSeq = (4, 4, 4), - conv_out_channels: OptIntSeq = None, - conv_kernel_sizes: OptIntSeq = None, - final_layer: dict = dict(kernel_size=1), - loss: ConfigType = dict( - type='KeypointMSELoss', use_target_weight=True), - decoder: OptConfigType = None, - init_cfg: OptConfigType = None): - - if init_cfg is None: - init_cfg = self.default_init_cfg - - super().__init__(init_cfg) - - self.in_channels = in_channels - self.out_channels = out_channels - self.loss_module = MODELS.build(loss) - if decoder is not None: - self.decoder = KEYPOINT_CODECS.build(decoder) - else: - self.decoder = None - - if deconv_out_channels: - if deconv_kernel_sizes is None or len(deconv_out_channels) != len( - deconv_kernel_sizes): - raise ValueError( - '"deconv_out_channels" and "deconv_kernel_sizes" should ' - 'be integer sequences with the same length. Got ' - f'mismatched lengths {deconv_out_channels} and ' - f'{deconv_kernel_sizes}') - - self.deconv_layers = self._make_deconv_layers( - in_channels=in_channels, - layer_out_channels=deconv_out_channels, - layer_kernel_sizes=deconv_kernel_sizes, - ) - in_channels = deconv_out_channels[-1] - else: - self.deconv_layers = nn.Identity() - - if conv_out_channels: - if conv_kernel_sizes is None or len(conv_out_channels) != len( - conv_kernel_sizes): - raise ValueError( - '"conv_out_channels" and "conv_kernel_sizes" should ' - 'be integer sequences with the same length. Got ' - f'mismatched lengths {conv_out_channels} and ' - f'{conv_kernel_sizes}') - - self.conv_layers = self._make_conv_layers( - in_channels=in_channels, - layer_out_channels=conv_out_channels, - layer_kernel_sizes=conv_kernel_sizes) - in_channels = conv_out_channels[-1] - else: - self.conv_layers = nn.Identity() - - if final_layer is not None: - cfg = dict( - type='Conv2d', - in_channels=in_channels, - out_channels=out_channels, - kernel_size=1) - cfg.update(final_layer) - self.final_layer = build_conv_layer(cfg) - else: - self.final_layer = nn.Identity() - - # Register the hook to automatically convert old version state dicts - self._register_load_state_dict_pre_hook(self._load_state_dict_pre_hook) - - def _make_conv_layers(self, in_channels: int, - layer_out_channels: Sequence[int], - layer_kernel_sizes: Sequence[int]) -> nn.Module: - """Create convolutional layers by given parameters.""" - - layers = [] - for out_channels, kernel_size in zip(layer_out_channels, - layer_kernel_sizes): - padding = (kernel_size - 1) // 2 - cfg = dict( - type='Conv2d', - in_channels=in_channels, - out_channels=out_channels, - kernel_size=kernel_size, - stride=1, - padding=padding) - layers.append(build_conv_layer(cfg)) - layers.append(nn.BatchNorm2d(num_features=out_channels)) - layers.append(nn.ReLU(inplace=True)) - in_channels = out_channels - - return nn.Sequential(*layers) - - def _make_deconv_layers(self, in_channels: int, - layer_out_channels: Sequence[int], - layer_kernel_sizes: Sequence[int]) -> nn.Module: - """Create deconvolutional layers by given parameters.""" - - layers = [] - for out_channels, kernel_size in zip(layer_out_channels, - layer_kernel_sizes): - if kernel_size == 4: - padding = 1 - output_padding = 0 - elif kernel_size == 3: - padding = 1 - output_padding = 1 - elif kernel_size == 2: - padding = 0 - output_padding = 0 - else: - raise ValueError(f'Unsupported kernel size {kernel_size} for' - 'deconvlutional layers in ' - f'{self.__class__.__name__}') - cfg = dict( - type='deconv', - in_channels=in_channels, - out_channels=out_channels, - kernel_size=kernel_size, - stride=2, - padding=padding, - output_padding=output_padding, - bias=False) - layers.append(build_upsample_layer(cfg)) - layers.append(nn.BatchNorm2d(num_features=out_channels)) - layers.append(nn.ReLU(inplace=True)) - in_channels = out_channels - - return nn.Sequential(*layers) - - @property - def default_init_cfg(self): - init_cfg = [ - dict( - type='Normal', layer=['Conv2d', 'ConvTranspose2d'], std=0.001), - dict(type='Constant', layer='BatchNorm2d', val=1) - ] - return init_cfg - - def forward(self, feats: Tuple[Tensor]) -> Tensor: - """Forward the network. The input is multi scale feature maps and the - output is the heatmap. - - Args: - feats (Tuple[Tensor]): Multi scale feature maps. - - Returns: - Tensor: output heatmap. - """ - x = feats[-1] - - x = self.deconv_layers(x) - x = self.conv_layers(x) - x = self.final_layer(x) - - return x - - def predict(self, - feats: Features, - batch_data_samples: OptSampleList, - test_cfg: ConfigType = {}) -> Predictions: - """Predict results from features. - - Args: - feats (Tuple[Tensor] | List[Tuple[Tensor]]): The multi-stage - features (or multiple multi-stage features in TTA) - batch_data_samples (List[:obj:`PoseDataSample`]): The batch - data samples - test_cfg (dict): The runtime config for testing process. Defaults - to {} - - Returns: - Union[InstanceList | Tuple[InstanceList | PixelDataList]]: If - ``test_cfg['output_heatmap']==True``, return both pose and heatmap - prediction; otherwise only return the pose prediction. - - The pose prediction is a list of ``InstanceData``, each contains - the following fields: - - - keypoints (np.ndarray): predicted keypoint coordinates in - shape (num_instances, K, D) where K is the keypoint number - and D is the keypoint dimension - - keypoint_scores (np.ndarray): predicted keypoint scores in - shape (num_instances, K) - - The heatmap prediction is a list of ``PixelData``, each contains - the following fields: - - - heatmaps (Tensor): The predicted heatmaps in shape (K, h, w) - """ - - if test_cfg.get('flip_test', False): - # TTA: flip test -> feats = [orig, flipped] - assert isinstance(feats, list) and len(feats) == 2 - flip_indices = batch_data_samples[0].metainfo['flip_indices'] - _feats, _feats_flip = feats - _batch_heatmaps = self.forward(_feats) - _batch_heatmaps_flip = flip_heatmaps( - self.forward(_feats_flip), - flip_mode=test_cfg.get('flip_mode', 'heatmap'), - flip_indices=flip_indices, - shift_heatmap=test_cfg.get('shift_heatmap', False)) - batch_heatmaps = (_batch_heatmaps + _batch_heatmaps_flip) * 0.5 - else: - batch_heatmaps = self.forward(feats) - - preds = self.decode(batch_heatmaps) - - if test_cfg.get('output_heatmaps', False): - pred_fields = [ - PixelData(heatmaps=hm) for hm in batch_heatmaps.detach() - ] - return preds, pred_fields - else: - return preds - - def loss(self, - feats: Tuple[Tensor], - batch_data_samples: OptSampleList, - train_cfg: ConfigType = {}) -> dict: - """Calculate losses from a batch of inputs and data samples. - - Args: - feats (Tuple[Tensor]): The multi-stage features - batch_data_samples (List[:obj:`PoseDataSample`]): The batch - data samples - train_cfg (dict): The runtime config for training process. - Defaults to {} - - Returns: - dict: A dictionary of losses. - """ - pred_fields = self.forward(feats) - gt_heatmaps = torch.stack( - [d.gt_fields.heatmaps for d in batch_data_samples]) - keypoint_weights = torch.cat([ - d.gt_instance_labels.keypoint_weights for d in batch_data_samples - ]) - - # calculate losses - losses = dict() - loss = self.loss_module(pred_fields, gt_heatmaps, keypoint_weights) - - losses.update(loss_kpt=loss) - - # calculate accuracy - if train_cfg.get('compute_acc', True): - _, avg_acc, _ = pose_pck_accuracy( - output=to_numpy(pred_fields), - target=to_numpy(gt_heatmaps), - mask=to_numpy(keypoint_weights) > 0) - - acc_pose = torch.tensor(avg_acc, device=gt_heatmaps.device) - losses.update(acc_pose=acc_pose) - - return losses - - def _load_state_dict_pre_hook(self, state_dict, prefix, local_meta, *args, - **kwargs): - """A hook function to convert old-version state dict of - :class:`DeepposeRegressionHead` (before MMPose v1.0.0) to a - compatible format of :class:`RegressionHead`. - - The hook will be automatically registered during initialization. - """ - version = local_meta.get('version', None) - if version and version >= self._version: - return - - # convert old-version state dict - keys = list(state_dict.keys()) - for _k in keys: - if not _k.startswith(prefix): - continue - v = state_dict.pop(_k) - k = _k[len(prefix):] - # In old version, "final_layer" includes both intermediate - # conv layers (new "conv_layers") and final conv layers (new - # "final_layer"). - # - # If there is no intermediate conv layer, old "final_layer" will - # have keys like "final_layer.xxx", which should be still - # named "final_layer.xxx"; - # - # If there are intermediate conv layers, old "final_layer" will - # have keys like "final_layer.n.xxx", where the weights of the last - # one should be renamed "final_layer.xxx", and others should be - # renamed "conv_layers.n.xxx" - k_parts = k.split('.') - if k_parts[0] == 'final_layer': - if len(k_parts) == 3: - assert isinstance(self.conv_layers, nn.Sequential) - idx = int(k_parts[1]) - if idx < len(self.conv_layers): - # final_layer.n.xxx -> conv_layers.n.xxx - k_new = 'conv_layers.' + '.'.join(k_parts[1:]) - else: - # final_layer.n.xxx -> final_layer.xxx - k_new = 'final_layer.' + k_parts[2] - else: - # final_layer.xxx remains final_layer.xxx - k_new = k - else: - k_new = k - - state_dict[prefix + k_new] = v +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Sequence, Tuple, Union + +import torch +from mmcv.cnn import build_conv_layer, build_upsample_layer +from mmengine.structures import PixelData +from torch import Tensor, nn + +from mmpose.evaluation.functional import pose_pck_accuracy +from mmpose.models.utils.tta import flip_heatmaps +from mmpose.registry import KEYPOINT_CODECS, MODELS +from mmpose.utils.tensor_utils import to_numpy +from mmpose.utils.typing import (ConfigType, Features, OptConfigType, + OptSampleList, Predictions) +from ..base_head import BaseHead + +OptIntSeq = Optional[Sequence[int]] + + +@MODELS.register_module() +class HeatmapHead(BaseHead): + """Top-down heatmap head introduced in `Simple Baselines`_ by Xiao et al + (2018). The head is composed of a few deconvolutional layers followed by a + convolutional layer to generate heatmaps from low-resolution feature maps. + + Args: + in_channels (int | Sequence[int]): Number of channels in the input + feature map + out_channels (int): Number of channels in the output heatmap + deconv_out_channels (Sequence[int], optional): The output channel + number of each deconv layer. Defaults to ``(256, 256, 256)`` + deconv_kernel_sizes (Sequence[int | tuple], optional): The kernel size + of each deconv layer. Each element should be either an integer for + both height and width dimensions, or a tuple of two integers for + the height and the width dimension respectively.Defaults to + ``(4, 4, 4)`` + conv_out_channels (Sequence[int], optional): The output channel number + of each intermediate conv layer. ``None`` means no intermediate + conv layer between deconv layers and the final conv layer. + Defaults to ``None`` + conv_kernel_sizes (Sequence[int | tuple], optional): The kernel size + of each intermediate conv layer. Defaults to ``None`` + final_layer (dict): Arguments of the final Conv2d layer. + Defaults to ``dict(kernel_size=1)`` + loss (Config): Config of the keypoint loss. Defaults to use + :class:`KeypointMSELoss` + decoder (Config, optional): The decoder config that controls decoding + keypoint coordinates from the network output. Defaults to ``None`` + init_cfg (Config, optional): Config to control the initialization. See + :attr:`default_init_cfg` for default settings + extra (dict, optional): Extra configurations. + Defaults to ``None`` + + .. _`Simple Baselines`: https://arxiv.org/abs/1804.06208 + """ + + _version = 2 + + def __init__(self, + in_channels: Union[int, Sequence[int]], + out_channels: int, + deconv_out_channels: OptIntSeq = (256, 256, 256), + deconv_kernel_sizes: OptIntSeq = (4, 4, 4), + conv_out_channels: OptIntSeq = None, + conv_kernel_sizes: OptIntSeq = None, + final_layer: dict = dict(kernel_size=1), + loss: ConfigType = dict( + type='KeypointMSELoss', use_target_weight=True), + decoder: OptConfigType = None, + init_cfg: OptConfigType = None): + + if init_cfg is None: + init_cfg = self.default_init_cfg + + super().__init__(init_cfg) + + self.in_channels = in_channels + self.out_channels = out_channels + self.loss_module = MODELS.build(loss) + if decoder is not None: + self.decoder = KEYPOINT_CODECS.build(decoder) + else: + self.decoder = None + + if deconv_out_channels: + if deconv_kernel_sizes is None or len(deconv_out_channels) != len( + deconv_kernel_sizes): + raise ValueError( + '"deconv_out_channels" and "deconv_kernel_sizes" should ' + 'be integer sequences with the same length. Got ' + f'mismatched lengths {deconv_out_channels} and ' + f'{deconv_kernel_sizes}') + + self.deconv_layers = self._make_deconv_layers( + in_channels=in_channels, + layer_out_channels=deconv_out_channels, + layer_kernel_sizes=deconv_kernel_sizes, + ) + in_channels = deconv_out_channels[-1] + else: + self.deconv_layers = nn.Identity() + + if conv_out_channels: + if conv_kernel_sizes is None or len(conv_out_channels) != len( + conv_kernel_sizes): + raise ValueError( + '"conv_out_channels" and "conv_kernel_sizes" should ' + 'be integer sequences with the same length. Got ' + f'mismatched lengths {conv_out_channels} and ' + f'{conv_kernel_sizes}') + + self.conv_layers = self._make_conv_layers( + in_channels=in_channels, + layer_out_channels=conv_out_channels, + layer_kernel_sizes=conv_kernel_sizes) + in_channels = conv_out_channels[-1] + else: + self.conv_layers = nn.Identity() + + if final_layer is not None: + cfg = dict( + type='Conv2d', + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1) + cfg.update(final_layer) + self.final_layer = build_conv_layer(cfg) + else: + self.final_layer = nn.Identity() + + # Register the hook to automatically convert old version state dicts + self._register_load_state_dict_pre_hook(self._load_state_dict_pre_hook) + + def _make_conv_layers(self, in_channels: int, + layer_out_channels: Sequence[int], + layer_kernel_sizes: Sequence[int]) -> nn.Module: + """Create convolutional layers by given parameters.""" + + layers = [] + for out_channels, kernel_size in zip(layer_out_channels, + layer_kernel_sizes): + padding = (kernel_size - 1) // 2 + cfg = dict( + type='Conv2d', + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=1, + padding=padding) + layers.append(build_conv_layer(cfg)) + layers.append(nn.BatchNorm2d(num_features=out_channels)) + layers.append(nn.ReLU(inplace=True)) + in_channels = out_channels + + return nn.Sequential(*layers) + + def _make_deconv_layers(self, in_channels: int, + layer_out_channels: Sequence[int], + layer_kernel_sizes: Sequence[int]) -> nn.Module: + """Create deconvolutional layers by given parameters.""" + + layers = [] + for out_channels, kernel_size in zip(layer_out_channels, + layer_kernel_sizes): + if kernel_size == 4: + padding = 1 + output_padding = 0 + elif kernel_size == 3: + padding = 1 + output_padding = 1 + elif kernel_size == 2: + padding = 0 + output_padding = 0 + else: + raise ValueError(f'Unsupported kernel size {kernel_size} for' + 'deconvlutional layers in ' + f'{self.__class__.__name__}') + cfg = dict( + type='deconv', + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=2, + padding=padding, + output_padding=output_padding, + bias=False) + layers.append(build_upsample_layer(cfg)) + layers.append(nn.BatchNorm2d(num_features=out_channels)) + layers.append(nn.ReLU(inplace=True)) + in_channels = out_channels + + return nn.Sequential(*layers) + + @property + def default_init_cfg(self): + init_cfg = [ + dict( + type='Normal', layer=['Conv2d', 'ConvTranspose2d'], std=0.001), + dict(type='Constant', layer='BatchNorm2d', val=1) + ] + return init_cfg + + def forward(self, feats: Tuple[Tensor]) -> Tensor: + """Forward the network. The input is multi scale feature maps and the + output is the heatmap. + + Args: + feats (Tuple[Tensor]): Multi scale feature maps. + + Returns: + Tensor: output heatmap. + """ + x = feats[-1] + + x = self.deconv_layers(x) + x = self.conv_layers(x) + x = self.final_layer(x) + + return x + + def predict(self, + feats: Features, + batch_data_samples: OptSampleList, + test_cfg: ConfigType = {}) -> Predictions: + """Predict results from features. + + Args: + feats (Tuple[Tensor] | List[Tuple[Tensor]]): The multi-stage + features (or multiple multi-stage features in TTA) + batch_data_samples (List[:obj:`PoseDataSample`]): The batch + data samples + test_cfg (dict): The runtime config for testing process. Defaults + to {} + + Returns: + Union[InstanceList | Tuple[InstanceList | PixelDataList]]: If + ``test_cfg['output_heatmap']==True``, return both pose and heatmap + prediction; otherwise only return the pose prediction. + + The pose prediction is a list of ``InstanceData``, each contains + the following fields: + + - keypoints (np.ndarray): predicted keypoint coordinates in + shape (num_instances, K, D) where K is the keypoint number + and D is the keypoint dimension + - keypoint_scores (np.ndarray): predicted keypoint scores in + shape (num_instances, K) + + The heatmap prediction is a list of ``PixelData``, each contains + the following fields: + + - heatmaps (Tensor): The predicted heatmaps in shape (K, h, w) + """ + + if test_cfg.get('flip_test', False): + # TTA: flip test -> feats = [orig, flipped] + assert isinstance(feats, list) and len(feats) == 2 + flip_indices = batch_data_samples[0].metainfo['flip_indices'] + _feats, _feats_flip = feats + _batch_heatmaps = self.forward(_feats) + _batch_heatmaps_flip = flip_heatmaps( + self.forward(_feats_flip), + flip_mode=test_cfg.get('flip_mode', 'heatmap'), + flip_indices=flip_indices, + shift_heatmap=test_cfg.get('shift_heatmap', False)) + batch_heatmaps = (_batch_heatmaps + _batch_heatmaps_flip) * 0.5 + else: + batch_heatmaps = self.forward(feats) + + preds = self.decode(batch_heatmaps) + + if test_cfg.get('output_heatmaps', False): + pred_fields = [ + PixelData(heatmaps=hm) for hm in batch_heatmaps.detach() + ] + return preds, pred_fields + else: + return preds + + def loss(self, + feats: Tuple[Tensor], + batch_data_samples: OptSampleList, + train_cfg: ConfigType = {}) -> dict: + """Calculate losses from a batch of inputs and data samples. + + Args: + feats (Tuple[Tensor]): The multi-stage features + batch_data_samples (List[:obj:`PoseDataSample`]): The batch + data samples + train_cfg (dict): The runtime config for training process. + Defaults to {} + + Returns: + dict: A dictionary of losses. + """ + pred_fields = self.forward(feats) + gt_heatmaps = torch.stack( + [d.gt_fields.heatmaps for d in batch_data_samples]) + keypoint_weights = torch.cat([ + d.gt_instance_labels.keypoint_weights for d in batch_data_samples + ]) + + # calculate losses + losses = dict() + loss = self.loss_module(pred_fields, gt_heatmaps, keypoint_weights) + + losses.update(loss_kpt=loss) + + # calculate accuracy + if train_cfg.get('compute_acc', True): + _, avg_acc, _ = pose_pck_accuracy( + output=to_numpy(pred_fields), + target=to_numpy(gt_heatmaps), + mask=to_numpy(keypoint_weights) > 0) + + acc_pose = torch.tensor(avg_acc, device=gt_heatmaps.device) + losses.update(acc_pose=acc_pose) + + return losses + + def _load_state_dict_pre_hook(self, state_dict, prefix, local_meta, *args, + **kwargs): + """A hook function to convert old-version state dict of + :class:`DeepposeRegressionHead` (before MMPose v1.0.0) to a + compatible format of :class:`RegressionHead`. + + The hook will be automatically registered during initialization. + """ + version = local_meta.get('version', None) + if version and version >= self._version: + return + + # convert old-version state dict + keys = list(state_dict.keys()) + for _k in keys: + if not _k.startswith(prefix): + continue + v = state_dict.pop(_k) + k = _k[len(prefix):] + # In old version, "final_layer" includes both intermediate + # conv layers (new "conv_layers") and final conv layers (new + # "final_layer"). + # + # If there is no intermediate conv layer, old "final_layer" will + # have keys like "final_layer.xxx", which should be still + # named "final_layer.xxx"; + # + # If there are intermediate conv layers, old "final_layer" will + # have keys like "final_layer.n.xxx", where the weights of the last + # one should be renamed "final_layer.xxx", and others should be + # renamed "conv_layers.n.xxx" + k_parts = k.split('.') + if k_parts[0] == 'final_layer': + if len(k_parts) == 3: + assert isinstance(self.conv_layers, nn.Sequential) + idx = int(k_parts[1]) + if idx < len(self.conv_layers): + # final_layer.n.xxx -> conv_layers.n.xxx + k_new = 'conv_layers.' + '.'.join(k_parts[1:]) + else: + # final_layer.n.xxx -> final_layer.xxx + k_new = 'final_layer.' + k_parts[2] + else: + # final_layer.xxx remains final_layer.xxx + k_new = k + else: + k_new = k + + state_dict[prefix + k_new] = v diff --git a/mmpose/models/heads/heatmap_heads/mspn_head.py b/mmpose/models/heads/heatmap_heads/mspn_head.py index 8b7cddf798..ebd5b66fdd 100644 --- a/mmpose/models/heads/heatmap_heads/mspn_head.py +++ b/mmpose/models/heads/heatmap_heads/mspn_head.py @@ -1,432 +1,432 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy -from typing import List, Optional, Sequence, Union - -import torch -from mmcv.cnn import (ConvModule, DepthwiseSeparableConvModule, Linear, - build_activation_layer, build_norm_layer) -from mmengine.structures import PixelData -from torch import Tensor, nn - -from mmpose.evaluation.functional import pose_pck_accuracy -from mmpose.models.utils.tta import flip_heatmaps -from mmpose.registry import KEYPOINT_CODECS, MODELS -from mmpose.utils.tensor_utils import to_numpy -from mmpose.utils.typing import (ConfigType, MultiConfig, OptConfigType, - OptSampleList, Predictions) -from ..base_head import BaseHead - -OptIntSeq = Optional[Sequence[int]] -MSMUFeatures = Sequence[Sequence[Tensor]] # Multi-stage multi-unit features - - -class PRM(nn.Module): - """Pose Refine Machine. - - Please refer to "Learning Delicate Local Representations - for Multi-Person Pose Estimation" (ECCV 2020). - - Args: - out_channels (int): Number of the output channels, equals to - the number of keypoints. - norm_cfg (Config): Config to construct the norm layer. - Defaults to ``dict(type='BN')`` - """ - - def __init__(self, - out_channels: int, - norm_cfg: ConfigType = dict(type='BN')): - super().__init__() - - # Protect mutable default arguments - norm_cfg = copy.deepcopy(norm_cfg) - self.out_channels = out_channels - self.global_pooling = nn.AdaptiveAvgPool2d((1, 1)) - self.middle_path = nn.Sequential( - Linear(self.out_channels, self.out_channels), - build_norm_layer(dict(type='BN1d'), out_channels)[1], - build_activation_layer(dict(type='ReLU')), - Linear(self.out_channels, self.out_channels), - build_norm_layer(dict(type='BN1d'), out_channels)[1], - build_activation_layer(dict(type='ReLU')), - build_activation_layer(dict(type='Sigmoid'))) - - self.bottom_path = nn.Sequential( - ConvModule( - self.out_channels, - self.out_channels, - kernel_size=1, - stride=1, - padding=0, - norm_cfg=norm_cfg, - inplace=False), - DepthwiseSeparableConvModule( - self.out_channels, - 1, - kernel_size=9, - stride=1, - padding=4, - norm_cfg=norm_cfg, - inplace=False), build_activation_layer(dict(type='Sigmoid'))) - self.conv_bn_relu_prm_1 = ConvModule( - self.out_channels, - self.out_channels, - kernel_size=3, - stride=1, - padding=1, - norm_cfg=norm_cfg, - inplace=False) - - def forward(self, x: Tensor) -> Tensor: - """Forward the network. The input heatmaps will be refined. - - Args: - x (Tensor): The input heatmaps. - - Returns: - Tensor: output heatmaps. - """ - out = self.conv_bn_relu_prm_1(x) - out_1 = out - - out_2 = self.global_pooling(out_1) - out_2 = out_2.view(out_2.size(0), -1) - out_2 = self.middle_path(out_2) - out_2 = out_2.unsqueeze(2) - out_2 = out_2.unsqueeze(3) - - out_3 = self.bottom_path(out_1) - out = out_1 * (1 + out_2 * out_3) - - return out - - -class PredictHeatmap(nn.Module): - """Predict the heatmap for an input feature. - - Args: - unit_channels (int): Number of input channels. - out_channels (int): Number of output channels. - out_shape (tuple): Shape of the output heatmaps. - use_prm (bool): Whether to use pose refine machine. Default: False. - norm_cfg (Config): Config to construct the norm layer. - Defaults to ``dict(type='BN')`` - """ - - def __init__(self, - unit_channels: int, - out_channels: int, - out_shape: tuple, - use_prm: bool = False, - norm_cfg: ConfigType = dict(type='BN')): - - super().__init__() - - # Protect mutable default arguments - norm_cfg = copy.deepcopy(norm_cfg) - self.unit_channels = unit_channels - self.out_channels = out_channels - self.out_shape = out_shape - self.use_prm = use_prm - if use_prm: - self.prm = PRM(out_channels, norm_cfg=norm_cfg) - self.conv_layers = nn.Sequential( - ConvModule( - unit_channels, - unit_channels, - kernel_size=1, - stride=1, - padding=0, - norm_cfg=norm_cfg, - inplace=False), - ConvModule( - unit_channels, - out_channels, - kernel_size=3, - stride=1, - padding=1, - norm_cfg=norm_cfg, - act_cfg=None, - inplace=False)) - - def forward(self, feature: Tensor) -> Tensor: - """Forward the network. - - Args: - feature (Tensor): The input feature maps. - - Returns: - Tensor: output heatmaps. - """ - feature = self.conv_layers(feature) - output = nn.functional.interpolate( - feature, size=self.out_shape, mode='bilinear', align_corners=True) - if self.use_prm: - output = self.prm(output) - return output - - -@MODELS.register_module() -class MSPNHead(BaseHead): - """Multi-stage multi-unit heatmap head introduced in `Multi-Stage Pose - estimation Network (MSPN)`_ by Li et al (2019), and used by `Residual Steps - Networks (RSN)`_ by Cai et al (2020). The head consists of multiple stages - and each stage consists of multiple units. Each unit of each stage has some - conv layers. - - Args: - num_stages (int): Number of stages. - num_units (int): Number of units in each stage. - out_shape (tuple): The output shape of the output heatmaps. - unit_channels (int): Number of input channels. - out_channels (int): Number of output channels. - out_shape (tuple): Shape of the output heatmaps. - use_prm (bool): Whether to use pose refine machine (PRM). - Defaults to ``False``. - norm_cfg (Config): Config to construct the norm layer. - Defaults to ``dict(type='BN')`` - loss (Config | List[Config]): Config of the keypoint loss for - different stages and different units. - Defaults to use :class:`KeypointMSELoss`. - level_indices (Sequence[int]): The indices that specified the level - of target heatmaps. - decoder (Config, optional): The decoder config that controls decoding - keypoint coordinates from the network output. Defaults to ``None`` - init_cfg (Config, optional): Config to control the initialization. See - :attr:`default_init_cfg` for default settings - - .. _`MSPN`: https://arxiv.org/abs/1901.00148 - .. _`RSN`: https://arxiv.org/abs/2003.04030 - """ - _version = 2 - - def __init__(self, - num_stages: int = 4, - num_units: int = 4, - out_shape: tuple = (64, 48), - unit_channels: int = 256, - out_channels: int = 17, - use_prm: bool = False, - norm_cfg: ConfigType = dict(type='BN'), - level_indices: Sequence[int] = [], - loss: MultiConfig = dict( - type='KeypointMSELoss', use_target_weight=True), - decoder: OptConfigType = None, - init_cfg: OptConfigType = None): - if init_cfg is None: - init_cfg = self.default_init_cfg - super().__init__(init_cfg) - - self.num_stages = num_stages - self.num_units = num_units - self.out_shape = out_shape - self.unit_channels = unit_channels - self.out_channels = out_channels - if len(level_indices) != num_stages * num_units: - raise ValueError( - f'The length of level_indices({len(level_indices)}) did not ' - f'match `num_stages`({num_stages}) * `num_units`({num_units})') - - self.level_indices = level_indices - - if isinstance(loss, list) and len(loss) != num_stages * num_units: - raise ValueError( - f'The length of loss_module({len(loss)}) did not match ' - f'`num_stages`({num_stages}) * `num_units`({num_units})') - - if isinstance(loss, list): - if len(loss) != num_stages * num_units: - raise ValueError( - f'The length of loss_module({len(loss)}) did not match ' - f'`num_stages`({num_stages}) * `num_units`({num_units})') - self.loss_module = nn.ModuleList( - MODELS.build(_loss) for _loss in loss) - else: - self.loss_module = MODELS.build(loss) - - if decoder is not None: - self.decoder = KEYPOINT_CODECS.build(decoder) - else: - self.decoder = None - - # Protect mutable default arguments - norm_cfg = copy.deepcopy(norm_cfg) - - self.predict_layers = nn.ModuleList([]) - for i in range(self.num_stages): - for j in range(self.num_units): - self.predict_layers.append( - PredictHeatmap( - unit_channels, - out_channels, - out_shape, - use_prm, - norm_cfg=norm_cfg)) - - @property - def default_init_cfg(self): - """Default config for weight initialization.""" - init_cfg = [ - dict(type='Kaiming', layer='Conv2d'), - dict(type='Normal', layer='Linear', std=0.01), - dict(type='Constant', layer='BatchNorm2d', val=1), - ] - return init_cfg - - def forward(self, feats: Sequence[Sequence[Tensor]]) -> List[Tensor]: - """Forward the network. The input is multi-stage multi-unit feature - maps and the output is a list of heatmaps from multiple stages. - - Args: - feats (Sequence[Sequence[Tensor]]): Feature maps from multiple - stages and units. - - Returns: - List[Tensor]: A list of output heatmaps from multiple stages - and units. - """ - out = [] - assert len(feats) == self.num_stages, ( - f'The length of feature maps did not match the ' - f'`num_stages` in {self.__class__.__name__}') - for feat in feats: - assert len(feat) == self.num_units, ( - f'The length of feature maps did not match the ' - f'`num_units` in {self.__class__.__name__}') - for f in feat: - assert f.shape[1] == self.unit_channels, ( - f'The number of feature map channels did not match the ' - f'`unit_channels` in {self.__class__.__name__}') - - for i in range(self.num_stages): - for j in range(self.num_units): - y = self.predict_layers[i * self.num_units + j](feats[i][j]) - out.append(y) - return out - - def predict(self, - feats: Union[MSMUFeatures, List[MSMUFeatures]], - batch_data_samples: OptSampleList, - test_cfg: OptConfigType = {}) -> Predictions: - """Predict results from multi-stage feature maps. - - Args: - feats (Sequence[Sequence[Tensor]]): Multi-stage multi-unit - features (or multiple MSMU features for TTA) - batch_data_samples (List[:obj:`PoseDataSample`]): The Data - Samples. It usually includes information such as - `gt_instance_labels`. - test_cfg (Config, optional): The testing/inference config - - Returns: - Union[InstanceList | Tuple[InstanceList | PixelDataList]]: If - ``test_cfg['output_heatmap']==True``, return both pose and heatmap - prediction; otherwise only return the pose prediction. - - The pose prediction is a list of ``InstanceData``, each contains - the following fields: - - - keypoints (np.ndarray): predicted keypoint coordinates in - shape (num_instances, K, D) where K is the keypoint number - and D is the keypoint dimension - - keypoint_scores (np.ndarray): predicted keypoint scores in - shape (num_instances, K) - - The heatmap prediction is a list of ``PixelData``, each contains - the following fields: - - - heatmaps (Tensor): The predicted heatmaps in shape (K, h, w) - """ - # multi-stage multi-unit batch heatmaps - if test_cfg.get('flip_test', False): - # TTA: flip test - assert isinstance(feats, list) and len(feats) == 2 - flip_indices = batch_data_samples[0].metainfo['flip_indices'] - _feats, _feats_flip = feats - _batch_heatmaps = self.forward(_feats)[-1] - _batch_heatmaps_flip = flip_heatmaps( - self.forward(_feats_flip)[-1], - flip_mode=test_cfg.get('flip_mode', 'heatmap'), - flip_indices=flip_indices, - shift_heatmap=test_cfg.get('shift_heatmap', False)) - batch_heatmaps = (_batch_heatmaps + _batch_heatmaps_flip) * 0.5 - else: - msmu_batch_heatmaps = self.forward(feats) - batch_heatmaps = msmu_batch_heatmaps[-1] - - preds = self.decode(batch_heatmaps) - - if test_cfg.get('output_heatmaps', False): - pred_fields = [ - PixelData(heatmaps=hm) for hm in batch_heatmaps.detach() - ] - return preds, pred_fields - else: - return preds - - def loss(self, - feats: MSMUFeatures, - batch_data_samples: OptSampleList, - train_cfg: OptConfigType = {}) -> dict: - """Calculate losses from a batch of inputs and data samples. - - Note: - - batch_size: B - - num_output_heatmap_levels: L - - num_keypoints: K - - heatmaps height: H - - heatmaps weight: W - - num_instances: N (usually 1 in topdown heatmap heads) - - Args: - feats (Sequence[Sequence[Tensor]]): Feature maps from multiple - stages and units - batch_data_samples (List[:obj:`PoseDataSample`]): The Data - Samples. It usually includes information such as - `gt_instance_labels` and `gt_fields`. - train_cfg (Config, optional): The training config - - Returns: - dict: A dictionary of loss components. - """ - # multi-stage multi-unit predict heatmaps - msmu_pred_heatmaps = self.forward(feats) - - keypoint_weights = torch.cat([ - d.gt_instance_labels.keypoint_weights for d in batch_data_samples - ]) # shape: [B*N, L, K] - - # calculate losses over multiple stages and multiple units - losses = dict() - for i in range(self.num_stages * self.num_units): - if isinstance(self.loss_module, nn.ModuleList): - # use different loss_module over different stages and units - loss_func = self.loss_module[i] - else: - # use the same loss_module over different stages and units - loss_func = self.loss_module - - # select `gt_heatmaps` and `keypoint_weights` for different level - # according to `self.level_indices` to calculate loss - gt_heatmaps = torch.stack([ - d.gt_fields[self.level_indices[i]].heatmaps - for d in batch_data_samples - ]) - loss_i = loss_func(msmu_pred_heatmaps[i], gt_heatmaps, - keypoint_weights[:, self.level_indices[i]]) - - if 'loss_kpt' not in losses: - losses['loss_kpt'] = loss_i - else: - losses['loss_kpt'] += loss_i - - # calculate accuracy - _, avg_acc, _ = pose_pck_accuracy( - output=to_numpy(msmu_pred_heatmaps[-1]), - target=to_numpy(gt_heatmaps), - mask=to_numpy(keypoint_weights[:, -1]) > 0) - - acc_pose = torch.tensor(avg_acc, device=gt_heatmaps.device) - losses.update(acc_pose=acc_pose) - - return losses +# Copyright (c) OpenMMLab. All rights reserved. +import copy +from typing import List, Optional, Sequence, Union + +import torch +from mmcv.cnn import (ConvModule, DepthwiseSeparableConvModule, Linear, + build_activation_layer, build_norm_layer) +from mmengine.structures import PixelData +from torch import Tensor, nn + +from mmpose.evaluation.functional import pose_pck_accuracy +from mmpose.models.utils.tta import flip_heatmaps +from mmpose.registry import KEYPOINT_CODECS, MODELS +from mmpose.utils.tensor_utils import to_numpy +from mmpose.utils.typing import (ConfigType, MultiConfig, OptConfigType, + OptSampleList, Predictions) +from ..base_head import BaseHead + +OptIntSeq = Optional[Sequence[int]] +MSMUFeatures = Sequence[Sequence[Tensor]] # Multi-stage multi-unit features + + +class PRM(nn.Module): + """Pose Refine Machine. + + Please refer to "Learning Delicate Local Representations + for Multi-Person Pose Estimation" (ECCV 2020). + + Args: + out_channels (int): Number of the output channels, equals to + the number of keypoints. + norm_cfg (Config): Config to construct the norm layer. + Defaults to ``dict(type='BN')`` + """ + + def __init__(self, + out_channels: int, + norm_cfg: ConfigType = dict(type='BN')): + super().__init__() + + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + self.out_channels = out_channels + self.global_pooling = nn.AdaptiveAvgPool2d((1, 1)) + self.middle_path = nn.Sequential( + Linear(self.out_channels, self.out_channels), + build_norm_layer(dict(type='BN1d'), out_channels)[1], + build_activation_layer(dict(type='ReLU')), + Linear(self.out_channels, self.out_channels), + build_norm_layer(dict(type='BN1d'), out_channels)[1], + build_activation_layer(dict(type='ReLU')), + build_activation_layer(dict(type='Sigmoid'))) + + self.bottom_path = nn.Sequential( + ConvModule( + self.out_channels, + self.out_channels, + kernel_size=1, + stride=1, + padding=0, + norm_cfg=norm_cfg, + inplace=False), + DepthwiseSeparableConvModule( + self.out_channels, + 1, + kernel_size=9, + stride=1, + padding=4, + norm_cfg=norm_cfg, + inplace=False), build_activation_layer(dict(type='Sigmoid'))) + self.conv_bn_relu_prm_1 = ConvModule( + self.out_channels, + self.out_channels, + kernel_size=3, + stride=1, + padding=1, + norm_cfg=norm_cfg, + inplace=False) + + def forward(self, x: Tensor) -> Tensor: + """Forward the network. The input heatmaps will be refined. + + Args: + x (Tensor): The input heatmaps. + + Returns: + Tensor: output heatmaps. + """ + out = self.conv_bn_relu_prm_1(x) + out_1 = out + + out_2 = self.global_pooling(out_1) + out_2 = out_2.view(out_2.size(0), -1) + out_2 = self.middle_path(out_2) + out_2 = out_2.unsqueeze(2) + out_2 = out_2.unsqueeze(3) + + out_3 = self.bottom_path(out_1) + out = out_1 * (1 + out_2 * out_3) + + return out + + +class PredictHeatmap(nn.Module): + """Predict the heatmap for an input feature. + + Args: + unit_channels (int): Number of input channels. + out_channels (int): Number of output channels. + out_shape (tuple): Shape of the output heatmaps. + use_prm (bool): Whether to use pose refine machine. Default: False. + norm_cfg (Config): Config to construct the norm layer. + Defaults to ``dict(type='BN')`` + """ + + def __init__(self, + unit_channels: int, + out_channels: int, + out_shape: tuple, + use_prm: bool = False, + norm_cfg: ConfigType = dict(type='BN')): + + super().__init__() + + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + self.unit_channels = unit_channels + self.out_channels = out_channels + self.out_shape = out_shape + self.use_prm = use_prm + if use_prm: + self.prm = PRM(out_channels, norm_cfg=norm_cfg) + self.conv_layers = nn.Sequential( + ConvModule( + unit_channels, + unit_channels, + kernel_size=1, + stride=1, + padding=0, + norm_cfg=norm_cfg, + inplace=False), + ConvModule( + unit_channels, + out_channels, + kernel_size=3, + stride=1, + padding=1, + norm_cfg=norm_cfg, + act_cfg=None, + inplace=False)) + + def forward(self, feature: Tensor) -> Tensor: + """Forward the network. + + Args: + feature (Tensor): The input feature maps. + + Returns: + Tensor: output heatmaps. + """ + feature = self.conv_layers(feature) + output = nn.functional.interpolate( + feature, size=self.out_shape, mode='bilinear', align_corners=True) + if self.use_prm: + output = self.prm(output) + return output + + +@MODELS.register_module() +class MSPNHead(BaseHead): + """Multi-stage multi-unit heatmap head introduced in `Multi-Stage Pose + estimation Network (MSPN)`_ by Li et al (2019), and used by `Residual Steps + Networks (RSN)`_ by Cai et al (2020). The head consists of multiple stages + and each stage consists of multiple units. Each unit of each stage has some + conv layers. + + Args: + num_stages (int): Number of stages. + num_units (int): Number of units in each stage. + out_shape (tuple): The output shape of the output heatmaps. + unit_channels (int): Number of input channels. + out_channels (int): Number of output channels. + out_shape (tuple): Shape of the output heatmaps. + use_prm (bool): Whether to use pose refine machine (PRM). + Defaults to ``False``. + norm_cfg (Config): Config to construct the norm layer. + Defaults to ``dict(type='BN')`` + loss (Config | List[Config]): Config of the keypoint loss for + different stages and different units. + Defaults to use :class:`KeypointMSELoss`. + level_indices (Sequence[int]): The indices that specified the level + of target heatmaps. + decoder (Config, optional): The decoder config that controls decoding + keypoint coordinates from the network output. Defaults to ``None`` + init_cfg (Config, optional): Config to control the initialization. See + :attr:`default_init_cfg` for default settings + + .. _`MSPN`: https://arxiv.org/abs/1901.00148 + .. _`RSN`: https://arxiv.org/abs/2003.04030 + """ + _version = 2 + + def __init__(self, + num_stages: int = 4, + num_units: int = 4, + out_shape: tuple = (64, 48), + unit_channels: int = 256, + out_channels: int = 17, + use_prm: bool = False, + norm_cfg: ConfigType = dict(type='BN'), + level_indices: Sequence[int] = [], + loss: MultiConfig = dict( + type='KeypointMSELoss', use_target_weight=True), + decoder: OptConfigType = None, + init_cfg: OptConfigType = None): + if init_cfg is None: + init_cfg = self.default_init_cfg + super().__init__(init_cfg) + + self.num_stages = num_stages + self.num_units = num_units + self.out_shape = out_shape + self.unit_channels = unit_channels + self.out_channels = out_channels + if len(level_indices) != num_stages * num_units: + raise ValueError( + f'The length of level_indices({len(level_indices)}) did not ' + f'match `num_stages`({num_stages}) * `num_units`({num_units})') + + self.level_indices = level_indices + + if isinstance(loss, list) and len(loss) != num_stages * num_units: + raise ValueError( + f'The length of loss_module({len(loss)}) did not match ' + f'`num_stages`({num_stages}) * `num_units`({num_units})') + + if isinstance(loss, list): + if len(loss) != num_stages * num_units: + raise ValueError( + f'The length of loss_module({len(loss)}) did not match ' + f'`num_stages`({num_stages}) * `num_units`({num_units})') + self.loss_module = nn.ModuleList( + MODELS.build(_loss) for _loss in loss) + else: + self.loss_module = MODELS.build(loss) + + if decoder is not None: + self.decoder = KEYPOINT_CODECS.build(decoder) + else: + self.decoder = None + + # Protect mutable default arguments + norm_cfg = copy.deepcopy(norm_cfg) + + self.predict_layers = nn.ModuleList([]) + for i in range(self.num_stages): + for j in range(self.num_units): + self.predict_layers.append( + PredictHeatmap( + unit_channels, + out_channels, + out_shape, + use_prm, + norm_cfg=norm_cfg)) + + @property + def default_init_cfg(self): + """Default config for weight initialization.""" + init_cfg = [ + dict(type='Kaiming', layer='Conv2d'), + dict(type='Normal', layer='Linear', std=0.01), + dict(type='Constant', layer='BatchNorm2d', val=1), + ] + return init_cfg + + def forward(self, feats: Sequence[Sequence[Tensor]]) -> List[Tensor]: + """Forward the network. The input is multi-stage multi-unit feature + maps and the output is a list of heatmaps from multiple stages. + + Args: + feats (Sequence[Sequence[Tensor]]): Feature maps from multiple + stages and units. + + Returns: + List[Tensor]: A list of output heatmaps from multiple stages + and units. + """ + out = [] + assert len(feats) == self.num_stages, ( + f'The length of feature maps did not match the ' + f'`num_stages` in {self.__class__.__name__}') + for feat in feats: + assert len(feat) == self.num_units, ( + f'The length of feature maps did not match the ' + f'`num_units` in {self.__class__.__name__}') + for f in feat: + assert f.shape[1] == self.unit_channels, ( + f'The number of feature map channels did not match the ' + f'`unit_channels` in {self.__class__.__name__}') + + for i in range(self.num_stages): + for j in range(self.num_units): + y = self.predict_layers[i * self.num_units + j](feats[i][j]) + out.append(y) + return out + + def predict(self, + feats: Union[MSMUFeatures, List[MSMUFeatures]], + batch_data_samples: OptSampleList, + test_cfg: OptConfigType = {}) -> Predictions: + """Predict results from multi-stage feature maps. + + Args: + feats (Sequence[Sequence[Tensor]]): Multi-stage multi-unit + features (or multiple MSMU features for TTA) + batch_data_samples (List[:obj:`PoseDataSample`]): The Data + Samples. It usually includes information such as + `gt_instance_labels`. + test_cfg (Config, optional): The testing/inference config + + Returns: + Union[InstanceList | Tuple[InstanceList | PixelDataList]]: If + ``test_cfg['output_heatmap']==True``, return both pose and heatmap + prediction; otherwise only return the pose prediction. + + The pose prediction is a list of ``InstanceData``, each contains + the following fields: + + - keypoints (np.ndarray): predicted keypoint coordinates in + shape (num_instances, K, D) where K is the keypoint number + and D is the keypoint dimension + - keypoint_scores (np.ndarray): predicted keypoint scores in + shape (num_instances, K) + + The heatmap prediction is a list of ``PixelData``, each contains + the following fields: + + - heatmaps (Tensor): The predicted heatmaps in shape (K, h, w) + """ + # multi-stage multi-unit batch heatmaps + if test_cfg.get('flip_test', False): + # TTA: flip test + assert isinstance(feats, list) and len(feats) == 2 + flip_indices = batch_data_samples[0].metainfo['flip_indices'] + _feats, _feats_flip = feats + _batch_heatmaps = self.forward(_feats)[-1] + _batch_heatmaps_flip = flip_heatmaps( + self.forward(_feats_flip)[-1], + flip_mode=test_cfg.get('flip_mode', 'heatmap'), + flip_indices=flip_indices, + shift_heatmap=test_cfg.get('shift_heatmap', False)) + batch_heatmaps = (_batch_heatmaps + _batch_heatmaps_flip) * 0.5 + else: + msmu_batch_heatmaps = self.forward(feats) + batch_heatmaps = msmu_batch_heatmaps[-1] + + preds = self.decode(batch_heatmaps) + + if test_cfg.get('output_heatmaps', False): + pred_fields = [ + PixelData(heatmaps=hm) for hm in batch_heatmaps.detach() + ] + return preds, pred_fields + else: + return preds + + def loss(self, + feats: MSMUFeatures, + batch_data_samples: OptSampleList, + train_cfg: OptConfigType = {}) -> dict: + """Calculate losses from a batch of inputs and data samples. + + Note: + - batch_size: B + - num_output_heatmap_levels: L + - num_keypoints: K + - heatmaps height: H + - heatmaps weight: W + - num_instances: N (usually 1 in topdown heatmap heads) + + Args: + feats (Sequence[Sequence[Tensor]]): Feature maps from multiple + stages and units + batch_data_samples (List[:obj:`PoseDataSample`]): The Data + Samples. It usually includes information such as + `gt_instance_labels` and `gt_fields`. + train_cfg (Config, optional): The training config + + Returns: + dict: A dictionary of loss components. + """ + # multi-stage multi-unit predict heatmaps + msmu_pred_heatmaps = self.forward(feats) + + keypoint_weights = torch.cat([ + d.gt_instance_labels.keypoint_weights for d in batch_data_samples + ]) # shape: [B*N, L, K] + + # calculate losses over multiple stages and multiple units + losses = dict() + for i in range(self.num_stages * self.num_units): + if isinstance(self.loss_module, nn.ModuleList): + # use different loss_module over different stages and units + loss_func = self.loss_module[i] + else: + # use the same loss_module over different stages and units + loss_func = self.loss_module + + # select `gt_heatmaps` and `keypoint_weights` for different level + # according to `self.level_indices` to calculate loss + gt_heatmaps = torch.stack([ + d.gt_fields[self.level_indices[i]].heatmaps + for d in batch_data_samples + ]) + loss_i = loss_func(msmu_pred_heatmaps[i], gt_heatmaps, + keypoint_weights[:, self.level_indices[i]]) + + if 'loss_kpt' not in losses: + losses['loss_kpt'] = loss_i + else: + losses['loss_kpt'] += loss_i + + # calculate accuracy + _, avg_acc, _ = pose_pck_accuracy( + output=to_numpy(msmu_pred_heatmaps[-1]), + target=to_numpy(gt_heatmaps), + mask=to_numpy(keypoint_weights[:, -1]) > 0) + + acc_pose = torch.tensor(avg_acc, device=gt_heatmaps.device) + losses.update(acc_pose=acc_pose) + + return losses diff --git a/mmpose/models/heads/heatmap_heads/vipnas_head.py b/mmpose/models/heads/heatmap_heads/vipnas_head.py index 949ee95b09..7a77dd2a08 100644 --- a/mmpose/models/heads/heatmap_heads/vipnas_head.py +++ b/mmpose/models/heads/heatmap_heads/vipnas_head.py @@ -1,179 +1,179 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import Optional, Sequence, Union - -from mmcv.cnn import build_conv_layer, build_upsample_layer -from torch import nn - -from mmpose.registry import KEYPOINT_CODECS, MODELS -from mmpose.utils.typing import ConfigType, OptConfigType -from .heatmap_head import HeatmapHead - -OptIntSeq = Optional[Sequence[int]] - - -@MODELS.register_module() -class ViPNASHead(HeatmapHead): - """ViPNAS heatmap head introduced in `ViPNAS`_ by Xu et al (2021). The head - is composed of a few deconvolutional layers followed by a convolutional - layer to generate heatmaps from low-resolution feature maps. Specifically, - different from the :class: `HeatmapHead` introduced by `Simple Baselines`_, - the group numbers in the deconvolutional layers are elastic and thus can be - optimized by neural architecture search (NAS). - - Args: - in_channels (int | Sequence[int]): Number of channels in the input - feature map - out_channels (int): Number of channels in the output heatmap - deconv_out_channels (Sequence[int], optional): The output channel - number of each deconv layer. Defaults to ``(144, 144, 144)`` - deconv_kernel_sizes (Sequence[int | tuple], optional): The kernel size - of each deconv layer. Each element should be either an integer for - both height and width dimensions, or a tuple of two integers for - the height and the width dimension respectively.Defaults to - ``(4, 4, 4)`` - deconv_num_groups (Sequence[int], optional): The group number of each - deconv layer. Defaults to ``(16, 16, 16)`` - conv_out_channels (Sequence[int], optional): The output channel number - of each intermediate conv layer. ``None`` means no intermediate - conv layer between deconv layers and the final conv layer. - Defaults to ``None`` - conv_kernel_sizes (Sequence[int | tuple], optional): The kernel size - of each intermediate conv layer. Defaults to ``None`` - final_layer (dict): Arguments of the final Conv2d layer. - Defaults to ``dict(kernel_size=1)`` - loss (Config): Config of the keypoint loss. Defaults to use - :class:`KeypointMSELoss` - decoder (Config, optional): The decoder config that controls decoding - keypoint coordinates from the network output. Defaults to ``None`` - init_cfg (Config, optional): Config to control the initialization. See - :attr:`default_init_cfg` for default settings - - .. _`ViPNAS`: https://arxiv.org/abs/2105.10154 - .. _`Simple Baselines`: https://arxiv.org/abs/1804.06208 - """ - - _version = 2 - - def __init__(self, - in_channels: Union[int, Sequence[int]], - out_channels: int, - deconv_out_channels: OptIntSeq = (144, 144, 144), - deconv_kernel_sizes: OptIntSeq = (4, 4, 4), - deconv_num_groups: OptIntSeq = (16, 16, 16), - conv_out_channels: OptIntSeq = None, - conv_kernel_sizes: OptIntSeq = None, - final_layer: dict = dict(kernel_size=1), - loss: ConfigType = dict( - type='KeypointMSELoss', use_target_weight=True), - decoder: OptConfigType = None, - init_cfg: OptConfigType = None): - - if init_cfg is None: - init_cfg = self.default_init_cfg - - super(HeatmapHead, self).__init__(init_cfg) - - self.in_channels = in_channels - self.out_channels = out_channels - self.loss_module = MODELS.build(loss) - if decoder is not None: - self.decoder = KEYPOINT_CODECS.build(decoder) - else: - self.decoder = None - - if deconv_out_channels: - if deconv_kernel_sizes is None or len(deconv_out_channels) != len( - deconv_kernel_sizes): - raise ValueError( - '"deconv_out_channels" and "deconv_kernel_sizes" should ' - 'be integer sequences with the same length. Got ' - f'mismatched lengths {deconv_out_channels} and ' - f'{deconv_kernel_sizes}') - if deconv_num_groups is None or len(deconv_out_channels) != len( - deconv_num_groups): - raise ValueError( - '"deconv_out_channels" and "deconv_num_groups" should ' - 'be integer sequences with the same length. Got ' - f'mismatched lengths {deconv_out_channels} and ' - f'{deconv_num_groups}') - - self.deconv_layers = self._make_deconv_layers( - in_channels=in_channels, - layer_out_channels=deconv_out_channels, - layer_kernel_sizes=deconv_kernel_sizes, - layer_groups=deconv_num_groups, - ) - in_channels = deconv_out_channels[-1] - else: - self.deconv_layers = nn.Identity() - - if conv_out_channels: - if conv_kernel_sizes is None or len(conv_out_channels) != len( - conv_kernel_sizes): - raise ValueError( - '"conv_out_channels" and "conv_kernel_sizes" should ' - 'be integer sequences with the same length. Got ' - f'mismatched lengths {conv_out_channels} and ' - f'{conv_kernel_sizes}') - - self.conv_layers = self._make_conv_layers( - in_channels=in_channels, - layer_out_channels=conv_out_channels, - layer_kernel_sizes=conv_kernel_sizes) - in_channels = conv_out_channels[-1] - else: - self.conv_layers = nn.Identity() - - if final_layer is not None: - cfg = dict( - type='Conv2d', - in_channels=in_channels, - out_channels=out_channels, - kernel_size=1) - cfg.update(final_layer) - self.final_layer = build_conv_layer(cfg) - else: - self.final_layer = nn.Identity() - - # Register the hook to automatically convert old version state dicts - self._register_load_state_dict_pre_hook(self._load_state_dict_pre_hook) - - def _make_deconv_layers(self, in_channels: int, - layer_out_channels: Sequence[int], - layer_kernel_sizes: Sequence[int], - layer_groups: Sequence[int]) -> nn.Module: - """Create deconvolutional layers by given parameters.""" - - layers = [] - for out_channels, kernel_size, groups in zip(layer_out_channels, - layer_kernel_sizes, - layer_groups): - if kernel_size == 4: - padding = 1 - output_padding = 0 - elif kernel_size == 3: - padding = 1 - output_padding = 1 - elif kernel_size == 2: - padding = 0 - output_padding = 0 - else: - raise ValueError(f'Unsupported kernel size {kernel_size} for' - 'deconvlutional layers in ' - f'{self.__class__.__name__}') - cfg = dict( - type='deconv', - in_channels=in_channels, - out_channels=out_channels, - kernel_size=kernel_size, - groups=groups, - stride=2, - padding=padding, - output_padding=output_padding, - bias=False) - layers.append(build_upsample_layer(cfg)) - layers.append(nn.BatchNorm2d(num_features=out_channels)) - layers.append(nn.ReLU(inplace=True)) - in_channels = out_channels - - return nn.Sequential(*layers) +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Sequence, Union + +from mmcv.cnn import build_conv_layer, build_upsample_layer +from torch import nn + +from mmpose.registry import KEYPOINT_CODECS, MODELS +from mmpose.utils.typing import ConfigType, OptConfigType +from .heatmap_head import HeatmapHead + +OptIntSeq = Optional[Sequence[int]] + + +@MODELS.register_module() +class ViPNASHead(HeatmapHead): + """ViPNAS heatmap head introduced in `ViPNAS`_ by Xu et al (2021). The head + is composed of a few deconvolutional layers followed by a convolutional + layer to generate heatmaps from low-resolution feature maps. Specifically, + different from the :class: `HeatmapHead` introduced by `Simple Baselines`_, + the group numbers in the deconvolutional layers are elastic and thus can be + optimized by neural architecture search (NAS). + + Args: + in_channels (int | Sequence[int]): Number of channels in the input + feature map + out_channels (int): Number of channels in the output heatmap + deconv_out_channels (Sequence[int], optional): The output channel + number of each deconv layer. Defaults to ``(144, 144, 144)`` + deconv_kernel_sizes (Sequence[int | tuple], optional): The kernel size + of each deconv layer. Each element should be either an integer for + both height and width dimensions, or a tuple of two integers for + the height and the width dimension respectively.Defaults to + ``(4, 4, 4)`` + deconv_num_groups (Sequence[int], optional): The group number of each + deconv layer. Defaults to ``(16, 16, 16)`` + conv_out_channels (Sequence[int], optional): The output channel number + of each intermediate conv layer. ``None`` means no intermediate + conv layer between deconv layers and the final conv layer. + Defaults to ``None`` + conv_kernel_sizes (Sequence[int | tuple], optional): The kernel size + of each intermediate conv layer. Defaults to ``None`` + final_layer (dict): Arguments of the final Conv2d layer. + Defaults to ``dict(kernel_size=1)`` + loss (Config): Config of the keypoint loss. Defaults to use + :class:`KeypointMSELoss` + decoder (Config, optional): The decoder config that controls decoding + keypoint coordinates from the network output. Defaults to ``None`` + init_cfg (Config, optional): Config to control the initialization. See + :attr:`default_init_cfg` for default settings + + .. _`ViPNAS`: https://arxiv.org/abs/2105.10154 + .. _`Simple Baselines`: https://arxiv.org/abs/1804.06208 + """ + + _version = 2 + + def __init__(self, + in_channels: Union[int, Sequence[int]], + out_channels: int, + deconv_out_channels: OptIntSeq = (144, 144, 144), + deconv_kernel_sizes: OptIntSeq = (4, 4, 4), + deconv_num_groups: OptIntSeq = (16, 16, 16), + conv_out_channels: OptIntSeq = None, + conv_kernel_sizes: OptIntSeq = None, + final_layer: dict = dict(kernel_size=1), + loss: ConfigType = dict( + type='KeypointMSELoss', use_target_weight=True), + decoder: OptConfigType = None, + init_cfg: OptConfigType = None): + + if init_cfg is None: + init_cfg = self.default_init_cfg + + super(HeatmapHead, self).__init__(init_cfg) + + self.in_channels = in_channels + self.out_channels = out_channels + self.loss_module = MODELS.build(loss) + if decoder is not None: + self.decoder = KEYPOINT_CODECS.build(decoder) + else: + self.decoder = None + + if deconv_out_channels: + if deconv_kernel_sizes is None or len(deconv_out_channels) != len( + deconv_kernel_sizes): + raise ValueError( + '"deconv_out_channels" and "deconv_kernel_sizes" should ' + 'be integer sequences with the same length. Got ' + f'mismatched lengths {deconv_out_channels} and ' + f'{deconv_kernel_sizes}') + if deconv_num_groups is None or len(deconv_out_channels) != len( + deconv_num_groups): + raise ValueError( + '"deconv_out_channels" and "deconv_num_groups" should ' + 'be integer sequences with the same length. Got ' + f'mismatched lengths {deconv_out_channels} and ' + f'{deconv_num_groups}') + + self.deconv_layers = self._make_deconv_layers( + in_channels=in_channels, + layer_out_channels=deconv_out_channels, + layer_kernel_sizes=deconv_kernel_sizes, + layer_groups=deconv_num_groups, + ) + in_channels = deconv_out_channels[-1] + else: + self.deconv_layers = nn.Identity() + + if conv_out_channels: + if conv_kernel_sizes is None or len(conv_out_channels) != len( + conv_kernel_sizes): + raise ValueError( + '"conv_out_channels" and "conv_kernel_sizes" should ' + 'be integer sequences with the same length. Got ' + f'mismatched lengths {conv_out_channels} and ' + f'{conv_kernel_sizes}') + + self.conv_layers = self._make_conv_layers( + in_channels=in_channels, + layer_out_channels=conv_out_channels, + layer_kernel_sizes=conv_kernel_sizes) + in_channels = conv_out_channels[-1] + else: + self.conv_layers = nn.Identity() + + if final_layer is not None: + cfg = dict( + type='Conv2d', + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1) + cfg.update(final_layer) + self.final_layer = build_conv_layer(cfg) + else: + self.final_layer = nn.Identity() + + # Register the hook to automatically convert old version state dicts + self._register_load_state_dict_pre_hook(self._load_state_dict_pre_hook) + + def _make_deconv_layers(self, in_channels: int, + layer_out_channels: Sequence[int], + layer_kernel_sizes: Sequence[int], + layer_groups: Sequence[int]) -> nn.Module: + """Create deconvolutional layers by given parameters.""" + + layers = [] + for out_channels, kernel_size, groups in zip(layer_out_channels, + layer_kernel_sizes, + layer_groups): + if kernel_size == 4: + padding = 1 + output_padding = 0 + elif kernel_size == 3: + padding = 1 + output_padding = 1 + elif kernel_size == 2: + padding = 0 + output_padding = 0 + else: + raise ValueError(f'Unsupported kernel size {kernel_size} for' + 'deconvlutional layers in ' + f'{self.__class__.__name__}') + cfg = dict( + type='deconv', + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + groups=groups, + stride=2, + padding=padding, + output_padding=output_padding, + bias=False) + layers.append(build_upsample_layer(cfg)) + layers.append(nn.BatchNorm2d(num_features=out_channels)) + layers.append(nn.ReLU(inplace=True)) + in_channels = out_channels + + return nn.Sequential(*layers) diff --git a/mmpose/models/heads/hybrid_heads/__init__.py b/mmpose/models/heads/hybrid_heads/__init__.py index 6431b6a2c2..af3c5a4b05 100644 --- a/mmpose/models/heads/hybrid_heads/__init__.py +++ b/mmpose/models/heads/hybrid_heads/__init__.py @@ -1,5 +1,5 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .dekr_head import DEKRHead -from .vis_head import VisPredictHead - -__all__ = ['DEKRHead', 'VisPredictHead'] +# Copyright (c) OpenMMLab. All rights reserved. +from .dekr_head import DEKRHead +from .vis_head import VisPredictHead + +__all__ = ['DEKRHead', 'VisPredictHead'] diff --git a/mmpose/models/heads/hybrid_heads/dekr_head.py b/mmpose/models/heads/hybrid_heads/dekr_head.py index 41f7cfc4ce..3a6bf69414 100644 --- a/mmpose/models/heads/hybrid_heads/dekr_head.py +++ b/mmpose/models/heads/hybrid_heads/dekr_head.py @@ -1,581 +1,581 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import Sequence, Tuple, Union - -import torch -from mmcv.cnn import (ConvModule, build_activation_layer, build_conv_layer, - build_norm_layer) -from mmengine.model import BaseModule, ModuleDict, Sequential -from mmengine.structures import InstanceData, PixelData -from torch import Tensor - -from mmpose.evaluation.functional.nms import nearby_joints_nms -from mmpose.models.utils.tta import flip_heatmaps -from mmpose.registry import KEYPOINT_CODECS, MODELS -from mmpose.utils.tensor_utils import to_numpy -from mmpose.utils.typing import (ConfigType, Features, InstanceList, - OptConfigType, OptSampleList, Predictions) -from ...backbones.resnet import BasicBlock -from ..base_head import BaseHead - -try: - from mmcv.ops import DeformConv2d - has_mmcv_full = True -except (ImportError, ModuleNotFoundError): - has_mmcv_full = False - - -class AdaptiveActivationBlock(BaseModule): - """Adaptive activation convolution block. "Bottom-up human pose estimation - via disentangled keypoint regression", CVPR'2021. - - Args: - in_channels (int): Number of input channels - out_channels (int): Number of output channels - groups (int): Number of groups. Generally equal to the - number of joints. - norm_cfg (dict): Config for normalization layers. - act_cfg (dict): Config for activation layers. - """ - - def __init__(self, - in_channels, - out_channels, - groups=1, - norm_cfg=dict(type='BN'), - act_cfg=dict(type='ReLU'), - init_cfg=None): - super(AdaptiveActivationBlock, self).__init__(init_cfg=init_cfg) - - assert in_channels % groups == 0 and out_channels % groups == 0 - self.groups = groups - - regular_matrix = torch.tensor([[-1, -1, -1, 0, 0, 0, 1, 1, 1], - [-1, 0, 1, -1, 0, 1, -1, 0, 1], - [1, 1, 1, 1, 1, 1, 1, 1, 1]]) - self.register_buffer('regular_matrix', regular_matrix.float()) - - self.transform_matrix_conv = build_conv_layer( - dict(type='Conv2d'), - in_channels=in_channels, - out_channels=6 * groups, - kernel_size=3, - padding=1, - groups=groups, - bias=True) - - if has_mmcv_full: - self.adapt_conv = DeformConv2d( - in_channels, - out_channels, - kernel_size=3, - padding=1, - bias=False, - groups=groups, - deform_groups=groups) - else: - raise ImportError('Please install the full version of mmcv ' - 'to use `DeformConv2d`.') - - self.norm = build_norm_layer(norm_cfg, out_channels)[1] - self.act = build_activation_layer(act_cfg) - - def forward(self, x): - B, _, H, W = x.size() - residual = x - - affine_matrix = self.transform_matrix_conv(x) - affine_matrix = affine_matrix.permute(0, 2, 3, 1).contiguous() - affine_matrix = affine_matrix.view(B, H, W, self.groups, 2, 3) - offset = torch.matmul(affine_matrix, self.regular_matrix) - offset = offset.transpose(4, 5).reshape(B, H, W, self.groups * 18) - offset = offset.permute(0, 3, 1, 2).contiguous() - - x = self.adapt_conv(x, offset) - x = self.norm(x) - x = self.act(x + residual) - - return x - - -class RescoreNet(BaseModule): - """Rescore net used to predict the OKS score of predicted pose. We use the - off-the-shelf rescore net pretrained by authors of DEKR. - - Args: - in_channels (int): Input channels - norm_indexes (Tuple(int)): Indices of torso in skeleton - init_cfg (dict, optional): Initialization config dict - """ - - def __init__( - self, - in_channels, - norm_indexes, - init_cfg=None, - ): - super(RescoreNet, self).__init__(init_cfg=init_cfg) - - self.norm_indexes = norm_indexes - - hidden = 256 - - self.l1 = torch.nn.Linear(in_channels, hidden, bias=True) - self.l2 = torch.nn.Linear(hidden, hidden, bias=True) - self.l3 = torch.nn.Linear(hidden, 1, bias=True) - self.relu = torch.nn.ReLU() - - def make_feature(self, keypoints, keypoint_scores, skeleton): - """Combine original scores, joint distance and relative distance to - make feature. - - Args: - keypoints (torch.Tensor): predicetd keypoints - keypoint_scores (torch.Tensor): predicetd keypoint scores - skeleton (list(list(int))): joint links - - Returns: - torch.Tensor: feature for each instance - """ - joint_1, joint_2 = zip(*skeleton) - num_link = len(skeleton) - - joint_relate = (keypoints[:, joint_1] - - keypoints[:, joint_2])[:, :, :2] - joint_length = joint_relate.norm(dim=2) - - # To use the torso distance to normalize - normalize = (joint_length[:, self.norm_indexes[0]] + - joint_length[:, self.norm_indexes[1]]) / 2 - normalize = normalize.unsqueeze(1).expand(normalize.size(0), num_link) - normalize = normalize.clamp(min=1).contiguous() - - joint_length = joint_length / normalize[:, :] - joint_relate = joint_relate / normalize.unsqueeze(-1) - joint_relate = joint_relate.flatten(1) - - feature = torch.cat((joint_relate, joint_length, keypoint_scores), - dim=1).float() - return feature - - def forward(self, keypoints, keypoint_scores, skeleton): - feature = self.make_feature(keypoints, keypoint_scores, skeleton) - x = self.relu(self.l1(feature)) - x = self.relu(self.l2(x)) - x = self.l3(x) - return x.squeeze(1) - - -@MODELS.register_module() -class DEKRHead(BaseHead): - """DisEntangled Keypoint Regression head introduced in `Bottom-up human - pose estimation via disentangled keypoint regression`_ by Geng et al - (2021). The head is composed of a heatmap branch and a displacement branch. - - Args: - in_channels (int | Sequence[int]): Number of channels in the input - feature map - num_joints (int): Number of joints - num_heatmap_filters (int): Number of filters for heatmap branch. - Defaults to 32 - num_offset_filters_per_joint (int): Number of filters for each joint - in displacement branch. Defaults to 15 - heatmap_loss (Config): Config of the heatmap loss. Defaults to use - :class:`KeypointMSELoss` - displacement_loss (Config): Config of the displacement regression loss. - Defaults to use :class:`SoftWeightSmoothL1Loss` - decoder (Config, optional): The decoder config that controls decoding - keypoint coordinates from the network output. Defaults to ``None`` - rescore_cfg (Config, optional): The config for rescore net which - estimates OKS via predicted keypoints and keypoint scores. - Defaults to ``None`` - init_cfg (Config, optional): Config to control the initialization. See - :attr:`default_init_cfg` for default settings - - .. _`Bottom-up human pose estimation via disentangled keypoint regression`: - https://arxiv.org/abs/2104.02300 - """ - - _version = 2 - - def __init__(self, - in_channels: Union[int, Sequence[int]], - num_keypoints: int, - num_heatmap_filters: int = 32, - num_displacement_filters_per_keypoint: int = 15, - heatmap_loss: ConfigType = dict( - type='KeypointMSELoss', use_target_weight=True), - displacement_loss: ConfigType = dict( - type='SoftWeightSmoothL1Loss', - use_target_weight=True, - supervise_empty=False), - decoder: OptConfigType = None, - rescore_cfg: OptConfigType = None, - init_cfg: OptConfigType = None): - - if init_cfg is None: - init_cfg = self.default_init_cfg - - super().__init__(init_cfg) - - self.in_channels = in_channels - self.num_keypoints = num_keypoints - - # build heatmap branch - self.heatmap_conv_layers = self._make_heatmap_conv_layers( - in_channels=in_channels, - out_channels=1 + num_keypoints, - num_filters=num_heatmap_filters, - ) - - # build displacement branch - self.displacement_conv_layers = self._make_displacement_conv_layers( - in_channels=in_channels, - out_channels=2 * num_keypoints, - num_filters=num_keypoints * num_displacement_filters_per_keypoint, - groups=num_keypoints) - - # build losses - self.loss_module = ModuleDict( - dict( - heatmap=MODELS.build(heatmap_loss), - displacement=MODELS.build(displacement_loss), - )) - - # build decoder - if decoder is not None: - self.decoder = KEYPOINT_CODECS.build(decoder) - else: - self.decoder = None - - # build rescore net - if rescore_cfg is not None: - self.rescore_net = RescoreNet(**rescore_cfg) - else: - self.rescore_net = None - - # Register the hook to automatically convert old version state dicts - self._register_load_state_dict_pre_hook(self._load_state_dict_pre_hook) - - @property - def default_init_cfg(self): - init_cfg = [ - dict( - type='Normal', layer=['Conv2d', 'ConvTranspose2d'], std=0.001), - dict(type='Constant', layer='BatchNorm2d', val=1) - ] - return init_cfg - - def _make_heatmap_conv_layers(self, in_channels: int, out_channels: int, - num_filters: int): - """Create convolutional layers of heatmap branch by given - parameters.""" - layers = [ - ConvModule( - in_channels=in_channels, - out_channels=num_filters, - kernel_size=1, - norm_cfg=dict(type='BN')), - BasicBlock(num_filters, num_filters), - build_conv_layer( - dict(type='Conv2d'), - in_channels=num_filters, - out_channels=out_channels, - kernel_size=1), - ] - - return Sequential(*layers) - - def _make_displacement_conv_layers(self, in_channels: int, - out_channels: int, num_filters: int, - groups: int): - """Create convolutional layers of displacement branch by given - parameters.""" - layers = [ - ConvModule( - in_channels=in_channels, - out_channels=num_filters, - kernel_size=1, - norm_cfg=dict(type='BN')), - AdaptiveActivationBlock(num_filters, num_filters, groups=groups), - AdaptiveActivationBlock(num_filters, num_filters, groups=groups), - build_conv_layer( - dict(type='Conv2d'), - in_channels=num_filters, - out_channels=out_channels, - kernel_size=1, - groups=groups) - ] - - return Sequential(*layers) - - def forward(self, feats: Tuple[Tensor]) -> Tensor: - """Forward the network. The input is multi scale feature maps and the - output is a tuple of heatmap and displacement. - - Args: - feats (Tuple[Tensor]): Multi scale feature maps. - - Returns: - Tuple[Tensor]: output heatmap and displacement. - """ - x = feats[-1] - - heatmaps = self.heatmap_conv_layers(x) - displacements = self.displacement_conv_layers(x) - - return heatmaps, displacements - - def loss(self, - feats: Tuple[Tensor], - batch_data_samples: OptSampleList, - train_cfg: ConfigType = {}) -> dict: - """Calculate losses from a batch of inputs and data samples. - - Args: - feats (Tuple[Tensor]): The multi-stage features - batch_data_samples (List[:obj:`PoseDataSample`]): The batch - data samples - train_cfg (dict): The runtime config for training process. - Defaults to {} - - Returns: - dict: A dictionary of losses. - """ - pred_heatmaps, pred_displacements = self.forward(feats) - gt_heatmaps = torch.stack( - [d.gt_fields.heatmaps for d in batch_data_samples]) - heatmap_weights = torch.stack( - [d.gt_fields.heatmap_weights for d in batch_data_samples]) - gt_displacements = torch.stack( - [d.gt_fields.displacements for d in batch_data_samples]) - displacement_weights = torch.stack( - [d.gt_fields.displacement_weights for d in batch_data_samples]) - - if 'heatmap_mask' in batch_data_samples[0].gt_fields.keys(): - heatmap_mask = torch.stack( - [d.gt_fields.heatmap_mask for d in batch_data_samples]) - else: - heatmap_mask = None - - # calculate losses - losses = dict() - heatmap_loss = self.loss_module['heatmap'](pred_heatmaps, gt_heatmaps, - heatmap_weights, - heatmap_mask) - displacement_loss = self.loss_module['displacement']( - pred_displacements, gt_displacements, displacement_weights) - - losses.update({ - 'loss/heatmap': heatmap_loss, - 'loss/displacement': displacement_loss, - }) - - return losses - - def predict(self, - feats: Features, - batch_data_samples: OptSampleList, - test_cfg: ConfigType = {}) -> Predictions: - """Predict results from features. - - Args: - feats (Tuple[Tensor] | List[Tuple[Tensor]]): The multi-stage - features (or multiple multi-scale features in TTA) - batch_data_samples (List[:obj:`PoseDataSample`]): The batch - data samples - test_cfg (dict): The runtime config for testing process. Defaults - to {} - - Returns: - Union[InstanceList | Tuple[InstanceList | PixelDataList]]: If - ``test_cfg['output_heatmap']==True``, return both pose and heatmap - prediction; otherwise only return the pose prediction. - - The pose prediction is a list of ``InstanceData``, each contains - the following fields: - - - keypoints (np.ndarray): predicted keypoint coordinates in - shape (num_instances, K, D) where K is the keypoint number - and D is the keypoint dimension - - keypoint_scores (np.ndarray): predicted keypoint scores in - shape (num_instances, K) - - The heatmap prediction is a list of ``PixelData``, each contains - the following fields: - - - heatmaps (Tensor): The predicted heatmaps in shape (1, h, w) - or (K+1, h, w) if keypoint heatmaps are predicted - - displacements (Tensor): The predicted displacement fields - in shape (K*2, h, w) - """ - - assert len(batch_data_samples) == 1, f'DEKRHead only supports ' \ - f'prediction with batch_size 1, but got {len(batch_data_samples)}' - - multiscale_test = test_cfg.get('multiscale_test', False) - flip_test = test_cfg.get('flip_test', False) - metainfo = batch_data_samples[0].metainfo - aug_scales = [1] - - if not multiscale_test: - feats = [feats] - else: - aug_scales = aug_scales + metainfo['aug_scales'] - - heatmaps, displacements = [], [] - for feat, s in zip(feats, aug_scales): - if flip_test: - assert isinstance(feat, list) and len(feat) == 2 - flip_indices = metainfo['flip_indices'] - _feat, _feat_flip = feat - _heatmaps, _displacements = self.forward(_feat) - _heatmaps_flip, _displacements_flip = self.forward(_feat_flip) - - _heatmaps_flip = flip_heatmaps( - _heatmaps_flip, - flip_mode='heatmap', - flip_indices=flip_indices + [len(flip_indices)], - shift_heatmap=test_cfg.get('shift_heatmap', False)) - _heatmaps = (_heatmaps + _heatmaps_flip) / 2.0 - - _displacements_flip = flip_heatmaps( - _displacements_flip, - flip_mode='offset', - flip_indices=flip_indices, - shift_heatmap=False) - - # this is a coordinate amendment. - x_scale_factor = s * ( - metainfo['input_size'][0] / _heatmaps.shape[-1]) - _displacements_flip[:, ::2] += (x_scale_factor - 1) / ( - x_scale_factor) - _displacements = (_displacements + _displacements_flip) / 2.0 - - else: - _heatmaps, _displacements = self.forward(feat) - - heatmaps.append(_heatmaps) - displacements.append(_displacements) - - preds = self.decode(heatmaps, displacements, test_cfg, metainfo) - - if test_cfg.get('output_heatmaps', False): - heatmaps = [hm.detach() for hm in heatmaps] - displacements = [dm.detach() for dm in displacements] - B = heatmaps[0].shape[0] - pred_fields = [] - for i in range(B): - pred_fields.append( - PixelData( - heatmaps=heatmaps[0][i], - displacements=displacements[0][i])) - return preds, pred_fields - else: - return preds - - def decode(self, - heatmaps: Tuple[Tensor], - displacements: Tuple[Tensor], - test_cfg: ConfigType = {}, - metainfo: dict = {}) -> InstanceList: - """Decode keypoints from outputs. - - Args: - heatmaps (Tuple[Tensor]): The output heatmaps inferred from one - image or multi-scale images. - displacements (Tuple[Tensor]): The output displacement fields - inferred from one image or multi-scale images. - test_cfg (dict): The runtime config for testing process. Defaults - to {} - metainfo (dict): The metainfo of test dataset. Defaults to {} - - Returns: - List[InstanceData]: A list of InstanceData, each contains the - decoded pose information of the instances of one data sample. - """ - - if self.decoder is None: - raise RuntimeError( - f'The decoder has not been set in {self.__class__.__name__}. ' - 'Please set the decoder configs in the init parameters to ' - 'enable head methods `head.predict()` and `head.decode()`') - - multiscale_test = test_cfg.get('multiscale_test', False) - skeleton = metainfo.get('skeleton_links', None) - - preds = [] - batch_size = heatmaps[0].shape[0] - - for b in range(batch_size): - if multiscale_test: - raise NotImplementedError - else: - keypoints, (root_scores, - keypoint_scores) = self.decoder.decode( - heatmaps[0][b], displacements[0][b]) - - # rescore each instance - if self.rescore_net is not None and skeleton and len( - keypoints) > 0: - instance_scores = self.rescore_net(keypoints, keypoint_scores, - skeleton) - instance_scores[torch.isnan(instance_scores)] = 0 - root_scores = root_scores * instance_scores - - # nms - keypoints, keypoint_scores = to_numpy((keypoints, keypoint_scores)) - scores = to_numpy(root_scores)[..., None] * keypoint_scores - if len(keypoints) > 0 and test_cfg.get('nms_dist_thr', 0) > 0: - kpts_db = [] - for i in range(len(keypoints)): - kpts_db.append( - dict(keypoints=keypoints[i], score=keypoint_scores[i])) - keep_instance_inds = nearby_joints_nms( - kpts_db, - test_cfg['nms_dist_thr'], - test_cfg.get('nms_joints_thr', None), - score_per_joint=True, - max_dets=test_cfg.get('max_num_people', 30)) - keypoints = keypoints[keep_instance_inds] - scores = scores[keep_instance_inds] - - # pack outputs - preds.append( - InstanceData(keypoints=keypoints, keypoint_scores=scores)) - - return preds - - def _load_state_dict_pre_hook(self, state_dict, prefix, local_meta, *args, - **kwargs): - """A hook function to convert old-version state dict of - :class:`DEKRHead` (before MMPose v1.0.0) to a compatible format - of :class:`DEKRHead`. - - The hook will be automatically registered during initialization. - """ - version = local_meta.get('version', None) - if version and version >= self._version: - return - - # convert old-version state dict - keys = list(state_dict.keys()) - for k in keys: - if 'offset_conv_layer' in k: - v = state_dict.pop(k) - k = k.replace('offset_conv_layers', 'displacement_conv_layers') - if 'displacement_conv_layers.3.' in k: - # the source and target of displacement vectors are - # opposite between two versions. - v = -v - state_dict[k] = v - - if 'heatmap_conv_layers.2' in k: - # root heatmap is at the first/last channel of the - # heatmap tensor in MMPose v0.x/1.x, respectively. - v = state_dict.pop(k) - state_dict[k] = torch.cat((v[1:], v[:1])) - - if 'rescore_net' in k: - v = state_dict.pop(k) - k = k.replace('rescore_net', 'head.rescore_net') - state_dict[k] = v +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Sequence, Tuple, Union + +import torch +from mmcv.cnn import (ConvModule, build_activation_layer, build_conv_layer, + build_norm_layer) +from mmengine.model import BaseModule, ModuleDict, Sequential +from mmengine.structures import InstanceData, PixelData +from torch import Tensor + +from mmpose.evaluation.functional.nms import nearby_joints_nms +from mmpose.models.utils.tta import flip_heatmaps +from mmpose.registry import KEYPOINT_CODECS, MODELS +from mmpose.utils.tensor_utils import to_numpy +from mmpose.utils.typing import (ConfigType, Features, InstanceList, + OptConfigType, OptSampleList, Predictions) +from ...backbones.resnet import BasicBlock +from ..base_head import BaseHead + +try: + from mmcv.ops import DeformConv2d + has_mmcv_full = True +except (ImportError, ModuleNotFoundError): + has_mmcv_full = False + + +class AdaptiveActivationBlock(BaseModule): + """Adaptive activation convolution block. "Bottom-up human pose estimation + via disentangled keypoint regression", CVPR'2021. + + Args: + in_channels (int): Number of input channels + out_channels (int): Number of output channels + groups (int): Number of groups. Generally equal to the + number of joints. + norm_cfg (dict): Config for normalization layers. + act_cfg (dict): Config for activation layers. + """ + + def __init__(self, + in_channels, + out_channels, + groups=1, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + init_cfg=None): + super(AdaptiveActivationBlock, self).__init__(init_cfg=init_cfg) + + assert in_channels % groups == 0 and out_channels % groups == 0 + self.groups = groups + + regular_matrix = torch.tensor([[-1, -1, -1, 0, 0, 0, 1, 1, 1], + [-1, 0, 1, -1, 0, 1, -1, 0, 1], + [1, 1, 1, 1, 1, 1, 1, 1, 1]]) + self.register_buffer('regular_matrix', regular_matrix.float()) + + self.transform_matrix_conv = build_conv_layer( + dict(type='Conv2d'), + in_channels=in_channels, + out_channels=6 * groups, + kernel_size=3, + padding=1, + groups=groups, + bias=True) + + if has_mmcv_full: + self.adapt_conv = DeformConv2d( + in_channels, + out_channels, + kernel_size=3, + padding=1, + bias=False, + groups=groups, + deform_groups=groups) + else: + raise ImportError('Please install the full version of mmcv ' + 'to use `DeformConv2d`.') + + self.norm = build_norm_layer(norm_cfg, out_channels)[1] + self.act = build_activation_layer(act_cfg) + + def forward(self, x): + B, _, H, W = x.size() + residual = x + + affine_matrix = self.transform_matrix_conv(x) + affine_matrix = affine_matrix.permute(0, 2, 3, 1).contiguous() + affine_matrix = affine_matrix.view(B, H, W, self.groups, 2, 3) + offset = torch.matmul(affine_matrix, self.regular_matrix) + offset = offset.transpose(4, 5).reshape(B, H, W, self.groups * 18) + offset = offset.permute(0, 3, 1, 2).contiguous() + + x = self.adapt_conv(x, offset) + x = self.norm(x) + x = self.act(x + residual) + + return x + + +class RescoreNet(BaseModule): + """Rescore net used to predict the OKS score of predicted pose. We use the + off-the-shelf rescore net pretrained by authors of DEKR. + + Args: + in_channels (int): Input channels + norm_indexes (Tuple(int)): Indices of torso in skeleton + init_cfg (dict, optional): Initialization config dict + """ + + def __init__( + self, + in_channels, + norm_indexes, + init_cfg=None, + ): + super(RescoreNet, self).__init__(init_cfg=init_cfg) + + self.norm_indexes = norm_indexes + + hidden = 256 + + self.l1 = torch.nn.Linear(in_channels, hidden, bias=True) + self.l2 = torch.nn.Linear(hidden, hidden, bias=True) + self.l3 = torch.nn.Linear(hidden, 1, bias=True) + self.relu = torch.nn.ReLU() + + def make_feature(self, keypoints, keypoint_scores, skeleton): + """Combine original scores, joint distance and relative distance to + make feature. + + Args: + keypoints (torch.Tensor): predicetd keypoints + keypoint_scores (torch.Tensor): predicetd keypoint scores + skeleton (list(list(int))): joint links + + Returns: + torch.Tensor: feature for each instance + """ + joint_1, joint_2 = zip(*skeleton) + num_link = len(skeleton) + + joint_relate = (keypoints[:, joint_1] - + keypoints[:, joint_2])[:, :, :2] + joint_length = joint_relate.norm(dim=2) + + # To use the torso distance to normalize + normalize = (joint_length[:, self.norm_indexes[0]] + + joint_length[:, self.norm_indexes[1]]) / 2 + normalize = normalize.unsqueeze(1).expand(normalize.size(0), num_link) + normalize = normalize.clamp(min=1).contiguous() + + joint_length = joint_length / normalize[:, :] + joint_relate = joint_relate / normalize.unsqueeze(-1) + joint_relate = joint_relate.flatten(1) + + feature = torch.cat((joint_relate, joint_length, keypoint_scores), + dim=1).float() + return feature + + def forward(self, keypoints, keypoint_scores, skeleton): + feature = self.make_feature(keypoints, keypoint_scores, skeleton) + x = self.relu(self.l1(feature)) + x = self.relu(self.l2(x)) + x = self.l3(x) + return x.squeeze(1) + + +@MODELS.register_module() +class DEKRHead(BaseHead): + """DisEntangled Keypoint Regression head introduced in `Bottom-up human + pose estimation via disentangled keypoint regression`_ by Geng et al + (2021). The head is composed of a heatmap branch and a displacement branch. + + Args: + in_channels (int | Sequence[int]): Number of channels in the input + feature map + num_joints (int): Number of joints + num_heatmap_filters (int): Number of filters for heatmap branch. + Defaults to 32 + num_offset_filters_per_joint (int): Number of filters for each joint + in displacement branch. Defaults to 15 + heatmap_loss (Config): Config of the heatmap loss. Defaults to use + :class:`KeypointMSELoss` + displacement_loss (Config): Config of the displacement regression loss. + Defaults to use :class:`SoftWeightSmoothL1Loss` + decoder (Config, optional): The decoder config that controls decoding + keypoint coordinates from the network output. Defaults to ``None`` + rescore_cfg (Config, optional): The config for rescore net which + estimates OKS via predicted keypoints and keypoint scores. + Defaults to ``None`` + init_cfg (Config, optional): Config to control the initialization. See + :attr:`default_init_cfg` for default settings + + .. _`Bottom-up human pose estimation via disentangled keypoint regression`: + https://arxiv.org/abs/2104.02300 + """ + + _version = 2 + + def __init__(self, + in_channels: Union[int, Sequence[int]], + num_keypoints: int, + num_heatmap_filters: int = 32, + num_displacement_filters_per_keypoint: int = 15, + heatmap_loss: ConfigType = dict( + type='KeypointMSELoss', use_target_weight=True), + displacement_loss: ConfigType = dict( + type='SoftWeightSmoothL1Loss', + use_target_weight=True, + supervise_empty=False), + decoder: OptConfigType = None, + rescore_cfg: OptConfigType = None, + init_cfg: OptConfigType = None): + + if init_cfg is None: + init_cfg = self.default_init_cfg + + super().__init__(init_cfg) + + self.in_channels = in_channels + self.num_keypoints = num_keypoints + + # build heatmap branch + self.heatmap_conv_layers = self._make_heatmap_conv_layers( + in_channels=in_channels, + out_channels=1 + num_keypoints, + num_filters=num_heatmap_filters, + ) + + # build displacement branch + self.displacement_conv_layers = self._make_displacement_conv_layers( + in_channels=in_channels, + out_channels=2 * num_keypoints, + num_filters=num_keypoints * num_displacement_filters_per_keypoint, + groups=num_keypoints) + + # build losses + self.loss_module = ModuleDict( + dict( + heatmap=MODELS.build(heatmap_loss), + displacement=MODELS.build(displacement_loss), + )) + + # build decoder + if decoder is not None: + self.decoder = KEYPOINT_CODECS.build(decoder) + else: + self.decoder = None + + # build rescore net + if rescore_cfg is not None: + self.rescore_net = RescoreNet(**rescore_cfg) + else: + self.rescore_net = None + + # Register the hook to automatically convert old version state dicts + self._register_load_state_dict_pre_hook(self._load_state_dict_pre_hook) + + @property + def default_init_cfg(self): + init_cfg = [ + dict( + type='Normal', layer=['Conv2d', 'ConvTranspose2d'], std=0.001), + dict(type='Constant', layer='BatchNorm2d', val=1) + ] + return init_cfg + + def _make_heatmap_conv_layers(self, in_channels: int, out_channels: int, + num_filters: int): + """Create convolutional layers of heatmap branch by given + parameters.""" + layers = [ + ConvModule( + in_channels=in_channels, + out_channels=num_filters, + kernel_size=1, + norm_cfg=dict(type='BN')), + BasicBlock(num_filters, num_filters), + build_conv_layer( + dict(type='Conv2d'), + in_channels=num_filters, + out_channels=out_channels, + kernel_size=1), + ] + + return Sequential(*layers) + + def _make_displacement_conv_layers(self, in_channels: int, + out_channels: int, num_filters: int, + groups: int): + """Create convolutional layers of displacement branch by given + parameters.""" + layers = [ + ConvModule( + in_channels=in_channels, + out_channels=num_filters, + kernel_size=1, + norm_cfg=dict(type='BN')), + AdaptiveActivationBlock(num_filters, num_filters, groups=groups), + AdaptiveActivationBlock(num_filters, num_filters, groups=groups), + build_conv_layer( + dict(type='Conv2d'), + in_channels=num_filters, + out_channels=out_channels, + kernel_size=1, + groups=groups) + ] + + return Sequential(*layers) + + def forward(self, feats: Tuple[Tensor]) -> Tensor: + """Forward the network. The input is multi scale feature maps and the + output is a tuple of heatmap and displacement. + + Args: + feats (Tuple[Tensor]): Multi scale feature maps. + + Returns: + Tuple[Tensor]: output heatmap and displacement. + """ + x = feats[-1] + + heatmaps = self.heatmap_conv_layers(x) + displacements = self.displacement_conv_layers(x) + + return heatmaps, displacements + + def loss(self, + feats: Tuple[Tensor], + batch_data_samples: OptSampleList, + train_cfg: ConfigType = {}) -> dict: + """Calculate losses from a batch of inputs and data samples. + + Args: + feats (Tuple[Tensor]): The multi-stage features + batch_data_samples (List[:obj:`PoseDataSample`]): The batch + data samples + train_cfg (dict): The runtime config for training process. + Defaults to {} + + Returns: + dict: A dictionary of losses. + """ + pred_heatmaps, pred_displacements = self.forward(feats) + gt_heatmaps = torch.stack( + [d.gt_fields.heatmaps for d in batch_data_samples]) + heatmap_weights = torch.stack( + [d.gt_fields.heatmap_weights for d in batch_data_samples]) + gt_displacements = torch.stack( + [d.gt_fields.displacements for d in batch_data_samples]) + displacement_weights = torch.stack( + [d.gt_fields.displacement_weights for d in batch_data_samples]) + + if 'heatmap_mask' in batch_data_samples[0].gt_fields.keys(): + heatmap_mask = torch.stack( + [d.gt_fields.heatmap_mask for d in batch_data_samples]) + else: + heatmap_mask = None + + # calculate losses + losses = dict() + heatmap_loss = self.loss_module['heatmap'](pred_heatmaps, gt_heatmaps, + heatmap_weights, + heatmap_mask) + displacement_loss = self.loss_module['displacement']( + pred_displacements, gt_displacements, displacement_weights) + + losses.update({ + 'loss/heatmap': heatmap_loss, + 'loss/displacement': displacement_loss, + }) + + return losses + + def predict(self, + feats: Features, + batch_data_samples: OptSampleList, + test_cfg: ConfigType = {}) -> Predictions: + """Predict results from features. + + Args: + feats (Tuple[Tensor] | List[Tuple[Tensor]]): The multi-stage + features (or multiple multi-scale features in TTA) + batch_data_samples (List[:obj:`PoseDataSample`]): The batch + data samples + test_cfg (dict): The runtime config for testing process. Defaults + to {} + + Returns: + Union[InstanceList | Tuple[InstanceList | PixelDataList]]: If + ``test_cfg['output_heatmap']==True``, return both pose and heatmap + prediction; otherwise only return the pose prediction. + + The pose prediction is a list of ``InstanceData``, each contains + the following fields: + + - keypoints (np.ndarray): predicted keypoint coordinates in + shape (num_instances, K, D) where K is the keypoint number + and D is the keypoint dimension + - keypoint_scores (np.ndarray): predicted keypoint scores in + shape (num_instances, K) + + The heatmap prediction is a list of ``PixelData``, each contains + the following fields: + + - heatmaps (Tensor): The predicted heatmaps in shape (1, h, w) + or (K+1, h, w) if keypoint heatmaps are predicted + - displacements (Tensor): The predicted displacement fields + in shape (K*2, h, w) + """ + + assert len(batch_data_samples) == 1, f'DEKRHead only supports ' \ + f'prediction with batch_size 1, but got {len(batch_data_samples)}' + + multiscale_test = test_cfg.get('multiscale_test', False) + flip_test = test_cfg.get('flip_test', False) + metainfo = batch_data_samples[0].metainfo + aug_scales = [1] + + if not multiscale_test: + feats = [feats] + else: + aug_scales = aug_scales + metainfo['aug_scales'] + + heatmaps, displacements = [], [] + for feat, s in zip(feats, aug_scales): + if flip_test: + assert isinstance(feat, list) and len(feat) == 2 + flip_indices = metainfo['flip_indices'] + _feat, _feat_flip = feat + _heatmaps, _displacements = self.forward(_feat) + _heatmaps_flip, _displacements_flip = self.forward(_feat_flip) + + _heatmaps_flip = flip_heatmaps( + _heatmaps_flip, + flip_mode='heatmap', + flip_indices=flip_indices + [len(flip_indices)], + shift_heatmap=test_cfg.get('shift_heatmap', False)) + _heatmaps = (_heatmaps + _heatmaps_flip) / 2.0 + + _displacements_flip = flip_heatmaps( + _displacements_flip, + flip_mode='offset', + flip_indices=flip_indices, + shift_heatmap=False) + + # this is a coordinate amendment. + x_scale_factor = s * ( + metainfo['input_size'][0] / _heatmaps.shape[-1]) + _displacements_flip[:, ::2] += (x_scale_factor - 1) / ( + x_scale_factor) + _displacements = (_displacements + _displacements_flip) / 2.0 + + else: + _heatmaps, _displacements = self.forward(feat) + + heatmaps.append(_heatmaps) + displacements.append(_displacements) + + preds = self.decode(heatmaps, displacements, test_cfg, metainfo) + + if test_cfg.get('output_heatmaps', False): + heatmaps = [hm.detach() for hm in heatmaps] + displacements = [dm.detach() for dm in displacements] + B = heatmaps[0].shape[0] + pred_fields = [] + for i in range(B): + pred_fields.append( + PixelData( + heatmaps=heatmaps[0][i], + displacements=displacements[0][i])) + return preds, pred_fields + else: + return preds + + def decode(self, + heatmaps: Tuple[Tensor], + displacements: Tuple[Tensor], + test_cfg: ConfigType = {}, + metainfo: dict = {}) -> InstanceList: + """Decode keypoints from outputs. + + Args: + heatmaps (Tuple[Tensor]): The output heatmaps inferred from one + image or multi-scale images. + displacements (Tuple[Tensor]): The output displacement fields + inferred from one image or multi-scale images. + test_cfg (dict): The runtime config for testing process. Defaults + to {} + metainfo (dict): The metainfo of test dataset. Defaults to {} + + Returns: + List[InstanceData]: A list of InstanceData, each contains the + decoded pose information of the instances of one data sample. + """ + + if self.decoder is None: + raise RuntimeError( + f'The decoder has not been set in {self.__class__.__name__}. ' + 'Please set the decoder configs in the init parameters to ' + 'enable head methods `head.predict()` and `head.decode()`') + + multiscale_test = test_cfg.get('multiscale_test', False) + skeleton = metainfo.get('skeleton_links', None) + + preds = [] + batch_size = heatmaps[0].shape[0] + + for b in range(batch_size): + if multiscale_test: + raise NotImplementedError + else: + keypoints, (root_scores, + keypoint_scores) = self.decoder.decode( + heatmaps[0][b], displacements[0][b]) + + # rescore each instance + if self.rescore_net is not None and skeleton and len( + keypoints) > 0: + instance_scores = self.rescore_net(keypoints, keypoint_scores, + skeleton) + instance_scores[torch.isnan(instance_scores)] = 0 + root_scores = root_scores * instance_scores + + # nms + keypoints, keypoint_scores = to_numpy((keypoints, keypoint_scores)) + scores = to_numpy(root_scores)[..., None] * keypoint_scores + if len(keypoints) > 0 and test_cfg.get('nms_dist_thr', 0) > 0: + kpts_db = [] + for i in range(len(keypoints)): + kpts_db.append( + dict(keypoints=keypoints[i], score=keypoint_scores[i])) + keep_instance_inds = nearby_joints_nms( + kpts_db, + test_cfg['nms_dist_thr'], + test_cfg.get('nms_joints_thr', None), + score_per_joint=True, + max_dets=test_cfg.get('max_num_people', 30)) + keypoints = keypoints[keep_instance_inds] + scores = scores[keep_instance_inds] + + # pack outputs + preds.append( + InstanceData(keypoints=keypoints, keypoint_scores=scores)) + + return preds + + def _load_state_dict_pre_hook(self, state_dict, prefix, local_meta, *args, + **kwargs): + """A hook function to convert old-version state dict of + :class:`DEKRHead` (before MMPose v1.0.0) to a compatible format + of :class:`DEKRHead`. + + The hook will be automatically registered during initialization. + """ + version = local_meta.get('version', None) + if version and version >= self._version: + return + + # convert old-version state dict + keys = list(state_dict.keys()) + for k in keys: + if 'offset_conv_layer' in k: + v = state_dict.pop(k) + k = k.replace('offset_conv_layers', 'displacement_conv_layers') + if 'displacement_conv_layers.3.' in k: + # the source and target of displacement vectors are + # opposite between two versions. + v = -v + state_dict[k] = v + + if 'heatmap_conv_layers.2' in k: + # root heatmap is at the first/last channel of the + # heatmap tensor in MMPose v0.x/1.x, respectively. + v = state_dict.pop(k) + state_dict[k] = torch.cat((v[1:], v[:1])) + + if 'rescore_net' in k: + v = state_dict.pop(k) + k = k.replace('rescore_net', 'head.rescore_net') + state_dict[k] = v diff --git a/mmpose/models/heads/hybrid_heads/vis_head.py b/mmpose/models/heads/hybrid_heads/vis_head.py index e9ea271ac5..781fd32c8a 100644 --- a/mmpose/models/heads/hybrid_heads/vis_head.py +++ b/mmpose/models/heads/hybrid_heads/vis_head.py @@ -1,229 +1,229 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import Tuple, Union - -import torch -from torch import Tensor, nn - -from mmpose.models.utils.tta import flip_visibility -from mmpose.registry import MODELS -from mmpose.utils.tensor_utils import to_numpy -from mmpose.utils.typing import (ConfigType, InstanceList, OptConfigType, - OptSampleList, Predictions) -from ..base_head import BaseHead - - -@MODELS.register_module() -class VisPredictHead(BaseHead): - """VisPredictHead must be used together with other heads. It can predict - keypoints coordinates of and their visibility simultaneously. In the - current version, it only supports top-down approaches. - - Args: - pose_cfg (Config): Config to construct keypoints prediction head - loss (Config): Config for visibility loss. Defaults to use - :class:`BCELoss` - use_sigmoid (bool): Whether to use sigmoid activation function - init_cfg (Config, optional): Config to control the initialization. See - :attr:`default_init_cfg` for default settings - """ - - def __init__(self, - pose_cfg: ConfigType, - loss: ConfigType = dict( - type='BCELoss', use_target_weight=False, - with_logits=True), - use_sigmoid: bool = False, - init_cfg: OptConfigType = None): - - if init_cfg is None: - init_cfg = self.default_init_cfg - - super().__init__(init_cfg) - - self.in_channels = pose_cfg['in_channels'] - if pose_cfg.get('num_joints', None) is not None: - self.out_channels = pose_cfg['num_joints'] - elif pose_cfg.get('out_channels', None) is not None: - self.out_channels = pose_cfg['out_channels'] - else: - raise ValueError('VisPredictHead requires \'num_joints\' or' - ' \'out_channels\' in the pose_cfg.') - - self.loss_module = MODELS.build(loss) - - self.pose_head = MODELS.build(pose_cfg) - self.pose_cfg = pose_cfg - - self.use_sigmoid = use_sigmoid - - modules = [ - nn.AdaptiveAvgPool2d(1), - nn.Flatten(), - nn.Linear(self.in_channels, self.out_channels) - ] - if use_sigmoid: - modules.append(nn.Sigmoid()) - - self.vis_head = nn.Sequential(*modules) - - def vis_forward(self, feats: Tuple[Tensor]): - """Forward the vis_head. The input is multi scale feature maps and the - output is coordinates visibility. - - Args: - feats (Tuple[Tensor]): Multi scale feature maps. - - Returns: - Tensor: output coordinates visibility. - """ - x = feats[-1] - while len(x.shape) < 4: - x.unsqueeze_(-1) - x = self.vis_head(x) - return x.reshape(-1, self.out_channels) - - def forward(self, feats: Tuple[Tensor]): - """Forward the network. The input is multi scale feature maps and the - output is coordinates and coordinates visibility. - - Args: - feats (Tuple[Tensor]): Multi scale feature maps. - - Returns: - Tuple[Tensor]: output coordinates and coordinates visibility. - """ - x_pose = self.pose_head.forward(feats) - x_vis = self.vis_forward(feats) - - return x_pose, x_vis - - def integrate(self, batch_vis: Tensor, - pose_preds: Union[Tuple, Predictions]) -> InstanceList: - """Add keypoints visibility prediction to pose prediction. - - Overwrite the original keypoint_scores. - """ - if isinstance(pose_preds, tuple): - pose_pred_instances, pose_pred_fields = pose_preds - else: - pose_pred_instances = pose_preds - pose_pred_fields = None - - batch_vis_np = to_numpy(batch_vis, unzip=True) - - assert len(pose_pred_instances) == len(batch_vis_np) - for index, _ in enumerate(pose_pred_instances): - pose_pred_instances[index].keypoint_scores = batch_vis_np[index] - - return pose_pred_instances, pose_pred_fields - - def predict(self, - feats: Tuple[Tensor], - batch_data_samples: OptSampleList, - test_cfg: ConfigType = {}) -> Predictions: - """Predict results from features. - - Args: - feats (Tuple[Tensor] | List[Tuple[Tensor]]): The multi-stage - features (or multiple multi-stage features in TTA) - batch_data_samples (List[:obj:`PoseDataSample`]): The batch - data samples - test_cfg (dict): The runtime config for testing process. Defaults - to {} - - Returns: - Union[InstanceList | Tuple[InstanceList | PixelDataList]]: If - posehead's ``test_cfg['output_heatmap']==True``, return both - pose and heatmap prediction; otherwise only return the pose - prediction. - - The pose prediction is a list of ``InstanceData``, each contains - the following fields: - - - keypoints (np.ndarray): predicted keypoint coordinates in - shape (num_instances, K, D) where K is the keypoint number - and D is the keypoint dimension - - keypoint_scores (np.ndarray): predicted keypoint scores in - shape (num_instances, K) - - keypoint_visibility (np.ndarray): predicted keypoints - visibility in shape (num_instances, K) - - The heatmap prediction is a list of ``PixelData``, each contains - the following fields: - - - heatmaps (Tensor): The predicted heatmaps in shape (K, h, w) - """ - if test_cfg.get('flip_test', False): - # TTA: flip test -> feats = [orig, flipped] - assert isinstance(feats, list) and len(feats) == 2 - flip_indices = batch_data_samples[0].metainfo['flip_indices'] - _feats, _feats_flip = feats - - _batch_vis = self.vis_forward(_feats) - _batch_vis_flip = flip_visibility( - self.vis_forward(_feats_flip), flip_indices=flip_indices) - batch_vis = (_batch_vis + _batch_vis_flip) * 0.5 - else: - batch_vis = self.vis_forward(feats) # (B, K, D) - - batch_vis.unsqueeze_(dim=1) # (B, N, K, D) - - if not self.use_sigmoid: - batch_vis = torch.sigmoid(batch_vis) - - batch_pose = self.pose_head.predict(feats, batch_data_samples, - test_cfg) - - return self.integrate(batch_vis, batch_pose) - - def vis_accuracy(self, vis_pred_outputs, vis_labels): - """Calculate visibility prediction accuracy.""" - probabilities = torch.sigmoid(torch.flatten(vis_pred_outputs)) - threshold = 0.5 - predictions = (probabilities >= threshold).int() - labels = torch.flatten(vis_labels) - correct = torch.sum(predictions == labels).item() - accuracy = correct / len(labels) - return torch.tensor(accuracy) - - def loss(self, - feats: Tuple[Tensor], - batch_data_samples: OptSampleList, - train_cfg: OptConfigType = {}) -> dict: - """Calculate losses from a batch of inputs and data samples. - - Args: - feats (Tuple[Tensor]): The multi-stage features - batch_data_samples (List[:obj:`PoseDataSample`]): The batch - data samples - train_cfg (dict): The runtime config for training process. - Defaults to {} - - Returns: - dict: A dictionary of losses. - """ - vis_pred_outputs = self.vis_forward(feats) - vis_labels = torch.cat([ - d.gt_instance_labels.keypoint_weights for d in batch_data_samples - ]) - - # calculate vis losses - losses = dict() - loss_vis = self.loss_module(vis_pred_outputs, vis_labels) - - losses.update(loss_vis=loss_vis) - - # calculate vis accuracy - acc_vis = self.vis_accuracy(vis_pred_outputs, vis_labels) - losses.update(acc_vis=acc_vis) - - # calculate keypoints losses - loss_kpt = self.pose_head.loss(feats, batch_data_samples) - losses.update(loss_kpt) - - return losses - - @property - def default_init_cfg(self): - init_cfg = [dict(type='Normal', layer=['Linear'], std=0.01, bias=0)] - return init_cfg +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Tuple, Union + +import torch +from torch import Tensor, nn + +from mmpose.models.utils.tta import flip_visibility +from mmpose.registry import MODELS +from mmpose.utils.tensor_utils import to_numpy +from mmpose.utils.typing import (ConfigType, InstanceList, OptConfigType, + OptSampleList, Predictions) +from ..base_head import BaseHead + + +@MODELS.register_module() +class VisPredictHead(BaseHead): + """VisPredictHead must be used together with other heads. It can predict + keypoints coordinates of and their visibility simultaneously. In the + current version, it only supports top-down approaches. + + Args: + pose_cfg (Config): Config to construct keypoints prediction head + loss (Config): Config for visibility loss. Defaults to use + :class:`BCELoss` + use_sigmoid (bool): Whether to use sigmoid activation function + init_cfg (Config, optional): Config to control the initialization. See + :attr:`default_init_cfg` for default settings + """ + + def __init__(self, + pose_cfg: ConfigType, + loss: ConfigType = dict( + type='BCELoss', use_target_weight=False, + with_logits=True), + use_sigmoid: bool = False, + init_cfg: OptConfigType = None): + + if init_cfg is None: + init_cfg = self.default_init_cfg + + super().__init__(init_cfg) + + self.in_channels = pose_cfg['in_channels'] + if pose_cfg.get('num_joints', None) is not None: + self.out_channels = pose_cfg['num_joints'] + elif pose_cfg.get('out_channels', None) is not None: + self.out_channels = pose_cfg['out_channels'] + else: + raise ValueError('VisPredictHead requires \'num_joints\' or' + ' \'out_channels\' in the pose_cfg.') + + self.loss_module = MODELS.build(loss) + + self.pose_head = MODELS.build(pose_cfg) + self.pose_cfg = pose_cfg + + self.use_sigmoid = use_sigmoid + + modules = [ + nn.AdaptiveAvgPool2d(1), + nn.Flatten(), + nn.Linear(self.in_channels, self.out_channels) + ] + if use_sigmoid: + modules.append(nn.Sigmoid()) + + self.vis_head = nn.Sequential(*modules) + + def vis_forward(self, feats: Tuple[Tensor]): + """Forward the vis_head. The input is multi scale feature maps and the + output is coordinates visibility. + + Args: + feats (Tuple[Tensor]): Multi scale feature maps. + + Returns: + Tensor: output coordinates visibility. + """ + x = feats[-1] + while len(x.shape) < 4: + x.unsqueeze_(-1) + x = self.vis_head(x) + return x.reshape(-1, self.out_channels) + + def forward(self, feats: Tuple[Tensor]): + """Forward the network. The input is multi scale feature maps and the + output is coordinates and coordinates visibility. + + Args: + feats (Tuple[Tensor]): Multi scale feature maps. + + Returns: + Tuple[Tensor]: output coordinates and coordinates visibility. + """ + x_pose = self.pose_head.forward(feats) + x_vis = self.vis_forward(feats) + + return x_pose, x_vis + + def integrate(self, batch_vis: Tensor, + pose_preds: Union[Tuple, Predictions]) -> InstanceList: + """Add keypoints visibility prediction to pose prediction. + + Overwrite the original keypoint_scores. + """ + if isinstance(pose_preds, tuple): + pose_pred_instances, pose_pred_fields = pose_preds + else: + pose_pred_instances = pose_preds + pose_pred_fields = None + + batch_vis_np = to_numpy(batch_vis, unzip=True) + + assert len(pose_pred_instances) == len(batch_vis_np) + for index, _ in enumerate(pose_pred_instances): + pose_pred_instances[index].keypoint_scores = batch_vis_np[index] + + return pose_pred_instances, pose_pred_fields + + def predict(self, + feats: Tuple[Tensor], + batch_data_samples: OptSampleList, + test_cfg: ConfigType = {}) -> Predictions: + """Predict results from features. + + Args: + feats (Tuple[Tensor] | List[Tuple[Tensor]]): The multi-stage + features (or multiple multi-stage features in TTA) + batch_data_samples (List[:obj:`PoseDataSample`]): The batch + data samples + test_cfg (dict): The runtime config for testing process. Defaults + to {} + + Returns: + Union[InstanceList | Tuple[InstanceList | PixelDataList]]: If + posehead's ``test_cfg['output_heatmap']==True``, return both + pose and heatmap prediction; otherwise only return the pose + prediction. + + The pose prediction is a list of ``InstanceData``, each contains + the following fields: + + - keypoints (np.ndarray): predicted keypoint coordinates in + shape (num_instances, K, D) where K is the keypoint number + and D is the keypoint dimension + - keypoint_scores (np.ndarray): predicted keypoint scores in + shape (num_instances, K) + - keypoint_visibility (np.ndarray): predicted keypoints + visibility in shape (num_instances, K) + + The heatmap prediction is a list of ``PixelData``, each contains + the following fields: + + - heatmaps (Tensor): The predicted heatmaps in shape (K, h, w) + """ + if test_cfg.get('flip_test', False): + # TTA: flip test -> feats = [orig, flipped] + assert isinstance(feats, list) and len(feats) == 2 + flip_indices = batch_data_samples[0].metainfo['flip_indices'] + _feats, _feats_flip = feats + + _batch_vis = self.vis_forward(_feats) + _batch_vis_flip = flip_visibility( + self.vis_forward(_feats_flip), flip_indices=flip_indices) + batch_vis = (_batch_vis + _batch_vis_flip) * 0.5 + else: + batch_vis = self.vis_forward(feats) # (B, K, D) + + batch_vis.unsqueeze_(dim=1) # (B, N, K, D) + + if not self.use_sigmoid: + batch_vis = torch.sigmoid(batch_vis) + + batch_pose = self.pose_head.predict(feats, batch_data_samples, + test_cfg) + + return self.integrate(batch_vis, batch_pose) + + def vis_accuracy(self, vis_pred_outputs, vis_labels): + """Calculate visibility prediction accuracy.""" + probabilities = torch.sigmoid(torch.flatten(vis_pred_outputs)) + threshold = 0.5 + predictions = (probabilities >= threshold).int() + labels = torch.flatten(vis_labels) + correct = torch.sum(predictions == labels).item() + accuracy = correct / len(labels) + return torch.tensor(accuracy) + + def loss(self, + feats: Tuple[Tensor], + batch_data_samples: OptSampleList, + train_cfg: OptConfigType = {}) -> dict: + """Calculate losses from a batch of inputs and data samples. + + Args: + feats (Tuple[Tensor]): The multi-stage features + batch_data_samples (List[:obj:`PoseDataSample`]): The batch + data samples + train_cfg (dict): The runtime config for training process. + Defaults to {} + + Returns: + dict: A dictionary of losses. + """ + vis_pred_outputs = self.vis_forward(feats) + vis_labels = torch.cat([ + d.gt_instance_labels.keypoint_weights for d in batch_data_samples + ]) + + # calculate vis losses + losses = dict() + loss_vis = self.loss_module(vis_pred_outputs, vis_labels) + + losses.update(loss_vis=loss_vis) + + # calculate vis accuracy + acc_vis = self.vis_accuracy(vis_pred_outputs, vis_labels) + losses.update(acc_vis=acc_vis) + + # calculate keypoints losses + loss_kpt = self.pose_head.loss(feats, batch_data_samples) + losses.update(loss_kpt) + + return losses + + @property + def default_init_cfg(self): + init_cfg = [dict(type='Normal', layer=['Linear'], std=0.01, bias=0)] + return init_cfg diff --git a/mmpose/models/heads/regression_heads/__init__.py b/mmpose/models/heads/regression_heads/__init__.py index ce9cd5e1b0..1911c39a8c 100644 --- a/mmpose/models/heads/regression_heads/__init__.py +++ b/mmpose/models/heads/regression_heads/__init__.py @@ -1,16 +1,16 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .dsnt_head import DSNTHead -from .integral_regression_head import IntegralRegressionHead -from .regression_head import RegressionHead -from .rle_head import RLEHead -from .temporal_regression_head import TemporalRegressionHead -from .trajectory_regression_head import TrajectoryRegressionHead - -__all__ = [ - 'RegressionHead', - 'IntegralRegressionHead', - 'DSNTHead', - 'RLEHead', - 'TemporalRegressionHead', - 'TrajectoryRegressionHead', -] +# Copyright (c) OpenMMLab. All rights reserved. +from .dsnt_head import DSNTHead +from .integral_regression_head import IntegralRegressionHead +from .regression_head import RegressionHead +from .rle_head import RLEHead +from .temporal_regression_head import TemporalRegressionHead +from .trajectory_regression_head import TrajectoryRegressionHead + +__all__ = [ + 'RegressionHead', + 'IntegralRegressionHead', + 'DSNTHead', + 'RLEHead', + 'TemporalRegressionHead', + 'TrajectoryRegressionHead', +] diff --git a/mmpose/models/heads/regression_heads/dsnt_head.py b/mmpose/models/heads/regression_heads/dsnt_head.py index 3bd49e385d..43f20a6257 100644 --- a/mmpose/models/heads/regression_heads/dsnt_head.py +++ b/mmpose/models/heads/regression_heads/dsnt_head.py @@ -1,146 +1,146 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import Optional, Sequence, Tuple, Union - -import numpy as np -import torch -from mmengine.logging import MessageHub -from torch import Tensor - -from mmpose.evaluation.functional import keypoint_pck_accuracy -from mmpose.registry import MODELS -from mmpose.utils.tensor_utils import to_numpy -from mmpose.utils.typing import ConfigType, OptConfigType, OptSampleList -from .integral_regression_head import IntegralRegressionHead - -OptIntSeq = Optional[Sequence[int]] - - -@MODELS.register_module() -class DSNTHead(IntegralRegressionHead): - """Top-down integral regression head introduced in `DSNT`_ by Nibali et - al(2018). The head contains a differentiable spatial to numerical transform - (DSNT) layer that do soft-argmax operation on the predicted heatmaps to - regress the coordinates. - - This head is used for algorithms that require supervision of heatmaps - in `DSNT` approach. - - Args: - in_channels (int | sequence[int]): Number of input channels - in_featuremap_size (int | sequence[int]): Size of input feature map - num_joints (int): Number of joints - lambda_t (int): Discard heatmap-based loss when current - epoch > lambda_t. Defaults to -1. - debias (bool): Whether to remove the bias of Integral Pose Regression. - see `Removing the Bias of Integral Pose Regression`_ by Gu et al - (2021). Defaults to ``False``. - beta (float): A smoothing parameter in softmax. Defaults to ``1.0``. - deconv_out_channels (sequence[int]): The output channel number of each - deconv layer. Defaults to ``(256, 256, 256)`` - deconv_kernel_sizes (sequence[int | tuple], optional): The kernel size - of each deconv layer. Each element should be either an integer for - both height and width dimensions, or a tuple of two integers for - the height and the width dimension respectively.Defaults to - ``(4, 4, 4)`` - conv_out_channels (sequence[int], optional): The output channel number - of each intermediate conv layer. ``None`` means no intermediate - conv layer between deconv layers and the final conv layer. - Defaults to ``None`` - conv_kernel_sizes (sequence[int | tuple], optional): The kernel size - of each intermediate conv layer. Defaults to ``None`` - final_layer (dict): Arguments of the final Conv2d layer. - Defaults to ``dict(kernel_size=1)`` - loss (Config): Config for keypoint loss. Defaults to use - :class:`DSNTLoss` - decoder (Config, optional): The decoder config that controls decoding - keypoint coordinates from the network output. Defaults to ``None`` - init_cfg (Config, optional): Config to control the initialization. See - :attr:`default_init_cfg` for default settings - - .. _`DSNT`: https://arxiv.org/abs/1801.07372 - """ - - _version = 2 - - def __init__(self, - in_channels: Union[int, Sequence[int]], - in_featuremap_size: Tuple[int, int], - num_joints: int, - lambda_t: int = -1, - debias: bool = False, - beta: float = 1.0, - deconv_out_channels: OptIntSeq = (256, 256, 256), - deconv_kernel_sizes: OptIntSeq = (4, 4, 4), - conv_out_channels: OptIntSeq = None, - conv_kernel_sizes: OptIntSeq = None, - final_layer: dict = dict(kernel_size=1), - loss: ConfigType = dict( - type='MultipleLossWrapper', - losses=[ - dict(type='SmoothL1Loss', use_target_weight=True), - dict(type='JSDiscretLoss', use_target_weight=True) - ]), - decoder: OptConfigType = None, - init_cfg: OptConfigType = None): - - super().__init__( - in_channels=in_channels, - in_featuremap_size=in_featuremap_size, - num_joints=num_joints, - debias=debias, - beta=beta, - deconv_out_channels=deconv_out_channels, - deconv_kernel_sizes=deconv_kernel_sizes, - conv_out_channels=conv_out_channels, - conv_kernel_sizes=conv_kernel_sizes, - final_layer=final_layer, - loss=loss, - decoder=decoder, - init_cfg=init_cfg) - - self.lambda_t = lambda_t - - def loss(self, - inputs: Tuple[Tensor], - batch_data_samples: OptSampleList, - train_cfg: ConfigType = {}) -> dict: - """Calculate losses from a batch of inputs and data samples.""" - - pred_coords, pred_heatmaps = self.forward(inputs) - keypoint_labels = torch.cat( - [d.gt_instance_labels.keypoint_labels for d in batch_data_samples]) - keypoint_weights = torch.cat([ - d.gt_instance_labels.keypoint_weights for d in batch_data_samples - ]) - gt_heatmaps = torch.stack( - [d.gt_fields.heatmaps for d in batch_data_samples]) - - input_list = [pred_coords, pred_heatmaps] - target_list = [keypoint_labels, gt_heatmaps] - # calculate losses - losses = dict() - - loss_list = self.loss_module(input_list, target_list, keypoint_weights) - - loss = loss_list[0] + loss_list[1] - - if self.lambda_t > 0: - mh = MessageHub.get_current_instance() - cur_epoch = mh.get_info('epoch') - if cur_epoch >= self.lambda_t: - loss = loss_list[0] - - losses.update(loss_kpt=loss) - - # calculate accuracy - _, avg_acc, _ = keypoint_pck_accuracy( - pred=to_numpy(pred_coords), - gt=to_numpy(keypoint_labels), - mask=to_numpy(keypoint_weights) > 0, - thr=0.05, - norm_factor=np.ones((pred_coords.size(0), 2), dtype=np.float32)) - - acc_pose = torch.tensor(avg_acc, device=keypoint_labels.device) - losses.update(acc_pose=acc_pose) - - return losses +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Sequence, Tuple, Union + +import numpy as np +import torch +from mmengine.logging import MessageHub +from torch import Tensor + +from mmpose.evaluation.functional import keypoint_pck_accuracy +from mmpose.registry import MODELS +from mmpose.utils.tensor_utils import to_numpy +from mmpose.utils.typing import ConfigType, OptConfigType, OptSampleList +from .integral_regression_head import IntegralRegressionHead + +OptIntSeq = Optional[Sequence[int]] + + +@MODELS.register_module() +class DSNTHead(IntegralRegressionHead): + """Top-down integral regression head introduced in `DSNT`_ by Nibali et + al(2018). The head contains a differentiable spatial to numerical transform + (DSNT) layer that do soft-argmax operation on the predicted heatmaps to + regress the coordinates. + + This head is used for algorithms that require supervision of heatmaps + in `DSNT` approach. + + Args: + in_channels (int | sequence[int]): Number of input channels + in_featuremap_size (int | sequence[int]): Size of input feature map + num_joints (int): Number of joints + lambda_t (int): Discard heatmap-based loss when current + epoch > lambda_t. Defaults to -1. + debias (bool): Whether to remove the bias of Integral Pose Regression. + see `Removing the Bias of Integral Pose Regression`_ by Gu et al + (2021). Defaults to ``False``. + beta (float): A smoothing parameter in softmax. Defaults to ``1.0``. + deconv_out_channels (sequence[int]): The output channel number of each + deconv layer. Defaults to ``(256, 256, 256)`` + deconv_kernel_sizes (sequence[int | tuple], optional): The kernel size + of each deconv layer. Each element should be either an integer for + both height and width dimensions, or a tuple of two integers for + the height and the width dimension respectively.Defaults to + ``(4, 4, 4)`` + conv_out_channels (sequence[int], optional): The output channel number + of each intermediate conv layer. ``None`` means no intermediate + conv layer between deconv layers and the final conv layer. + Defaults to ``None`` + conv_kernel_sizes (sequence[int | tuple], optional): The kernel size + of each intermediate conv layer. Defaults to ``None`` + final_layer (dict): Arguments of the final Conv2d layer. + Defaults to ``dict(kernel_size=1)`` + loss (Config): Config for keypoint loss. Defaults to use + :class:`DSNTLoss` + decoder (Config, optional): The decoder config that controls decoding + keypoint coordinates from the network output. Defaults to ``None`` + init_cfg (Config, optional): Config to control the initialization. See + :attr:`default_init_cfg` for default settings + + .. _`DSNT`: https://arxiv.org/abs/1801.07372 + """ + + _version = 2 + + def __init__(self, + in_channels: Union[int, Sequence[int]], + in_featuremap_size: Tuple[int, int], + num_joints: int, + lambda_t: int = -1, + debias: bool = False, + beta: float = 1.0, + deconv_out_channels: OptIntSeq = (256, 256, 256), + deconv_kernel_sizes: OptIntSeq = (4, 4, 4), + conv_out_channels: OptIntSeq = None, + conv_kernel_sizes: OptIntSeq = None, + final_layer: dict = dict(kernel_size=1), + loss: ConfigType = dict( + type='MultipleLossWrapper', + losses=[ + dict(type='SmoothL1Loss', use_target_weight=True), + dict(type='JSDiscretLoss', use_target_weight=True) + ]), + decoder: OptConfigType = None, + init_cfg: OptConfigType = None): + + super().__init__( + in_channels=in_channels, + in_featuremap_size=in_featuremap_size, + num_joints=num_joints, + debias=debias, + beta=beta, + deconv_out_channels=deconv_out_channels, + deconv_kernel_sizes=deconv_kernel_sizes, + conv_out_channels=conv_out_channels, + conv_kernel_sizes=conv_kernel_sizes, + final_layer=final_layer, + loss=loss, + decoder=decoder, + init_cfg=init_cfg) + + self.lambda_t = lambda_t + + def loss(self, + inputs: Tuple[Tensor], + batch_data_samples: OptSampleList, + train_cfg: ConfigType = {}) -> dict: + """Calculate losses from a batch of inputs and data samples.""" + + pred_coords, pred_heatmaps = self.forward(inputs) + keypoint_labels = torch.cat( + [d.gt_instance_labels.keypoint_labels for d in batch_data_samples]) + keypoint_weights = torch.cat([ + d.gt_instance_labels.keypoint_weights for d in batch_data_samples + ]) + gt_heatmaps = torch.stack( + [d.gt_fields.heatmaps for d in batch_data_samples]) + + input_list = [pred_coords, pred_heatmaps] + target_list = [keypoint_labels, gt_heatmaps] + # calculate losses + losses = dict() + + loss_list = self.loss_module(input_list, target_list, keypoint_weights) + + loss = loss_list[0] + loss_list[1] + + if self.lambda_t > 0: + mh = MessageHub.get_current_instance() + cur_epoch = mh.get_info('epoch') + if cur_epoch >= self.lambda_t: + loss = loss_list[0] + + losses.update(loss_kpt=loss) + + # calculate accuracy + _, avg_acc, _ = keypoint_pck_accuracy( + pred=to_numpy(pred_coords), + gt=to_numpy(keypoint_labels), + mask=to_numpy(keypoint_weights) > 0, + thr=0.05, + norm_factor=np.ones((pred_coords.size(0), 2), dtype=np.float32)) + + acc_pose = torch.tensor(avg_acc, device=keypoint_labels.device) + losses.update(acc_pose=acc_pose) + + return losses diff --git a/mmpose/models/heads/regression_heads/integral_regression_head.py b/mmpose/models/heads/regression_heads/integral_regression_head.py index 9046d94ad4..add813b429 100644 --- a/mmpose/models/heads/regression_heads/integral_regression_head.py +++ b/mmpose/models/heads/regression_heads/integral_regression_head.py @@ -1,339 +1,339 @@ -# Copyright (c) OpenMMLab. All rights reserved. - -from typing import Optional, Sequence, Tuple, Union - -import numpy as np -import torch -import torch.nn.functional as F -from mmcv.cnn import build_conv_layer -from mmengine.structures import PixelData -from torch import Tensor, nn - -from mmpose.evaluation.functional import keypoint_pck_accuracy -from mmpose.models.utils.tta import flip_coordinates, flip_heatmaps -from mmpose.registry import KEYPOINT_CODECS, MODELS -from mmpose.utils.tensor_utils import to_numpy -from mmpose.utils.typing import (ConfigType, OptConfigType, OptSampleList, - Predictions) -from .. import HeatmapHead -from ..base_head import BaseHead - -OptIntSeq = Optional[Sequence[int]] - - -@MODELS.register_module() -class IntegralRegressionHead(BaseHead): - """Top-down integral regression head introduced in `IPR`_ by Xiao et - al(2018). The head contains a differentiable spatial to numerical transform - (DSNT) layer that do soft-argmax operation on the predicted heatmaps to - regress the coordinates. - - This head is used for algorithms that only supervise the coordinates. - - Args: - in_channels (int | sequence[int]): Number of input channels - in_featuremap_size (int | sequence[int]): Size of input feature map - num_joints (int): Number of joints - debias (bool): Whether to remove the bias of Integral Pose Regression. - see `Removing the Bias of Integral Pose Regression`_ by Gu et al - (2021). Defaults to ``False``. - beta (float): A smoothing parameter in softmax. Defaults to ``1.0``. - deconv_out_channels (sequence[int]): The output channel number of each - deconv layer. Defaults to ``(256, 256, 256)`` - deconv_kernel_sizes (sequence[int | tuple], optional): The kernel size - of each deconv layer. Each element should be either an integer for - both height and width dimensions, or a tuple of two integers for - the height and the width dimension respectively.Defaults to - ``(4, 4, 4)`` - conv_out_channels (sequence[int], optional): The output channel number - of each intermediate conv layer. ``None`` means no intermediate - conv layer between deconv layers and the final conv layer. - Defaults to ``None`` - conv_kernel_sizes (sequence[int | tuple], optional): The kernel size - of each intermediate conv layer. Defaults to ``None`` - final_layer (dict): Arguments of the final Conv2d layer. - Defaults to ``dict(kernel_size=1)`` - loss (Config): Config for keypoint loss. Defaults to use - :class:`SmoothL1Loss` - decoder (Config, optional): The decoder config that controls decoding - keypoint coordinates from the network output. Defaults to ``None`` - init_cfg (Config, optional): Config to control the initialization. See - :attr:`default_init_cfg` for default settings - - .. _`IPR`: https://arxiv.org/abs/1711.08229 - .. _`Debias`: - """ - - _version = 2 - - def __init__(self, - in_channels: Union[int, Sequence[int]], - in_featuremap_size: Tuple[int, int], - num_joints: int, - debias: bool = False, - beta: float = 1.0, - deconv_out_channels: OptIntSeq = (256, 256, 256), - deconv_kernel_sizes: OptIntSeq = (4, 4, 4), - conv_out_channels: OptIntSeq = None, - conv_kernel_sizes: OptIntSeq = None, - final_layer: dict = dict(kernel_size=1), - loss: ConfigType = dict( - type='SmoothL1Loss', use_target_weight=True), - decoder: OptConfigType = None, - init_cfg: OptConfigType = None): - - if init_cfg is None: - init_cfg = self.default_init_cfg - - super().__init__(init_cfg) - - self.in_channels = in_channels - self.num_joints = num_joints - self.debias = debias - self.beta = beta - self.loss_module = MODELS.build(loss) - if decoder is not None: - self.decoder = KEYPOINT_CODECS.build(decoder) - else: - self.decoder = None - - num_deconv = len(deconv_out_channels) if deconv_out_channels else 0 - if num_deconv != 0: - - self.heatmap_size = tuple( - [s * (2**num_deconv) for s in in_featuremap_size]) - - # deconv layers + 1x1 conv - self.simplebaseline_head = HeatmapHead( - in_channels=in_channels, - out_channels=num_joints, - deconv_out_channels=deconv_out_channels, - deconv_kernel_sizes=deconv_kernel_sizes, - conv_out_channels=conv_out_channels, - conv_kernel_sizes=conv_kernel_sizes, - final_layer=final_layer) - - if final_layer is not None: - in_channels = num_joints - else: - in_channels = deconv_out_channels[-1] - - else: - self.simplebaseline_head = None - - if final_layer is not None: - cfg = dict( - type='Conv2d', - in_channels=in_channels, - out_channels=num_joints, - kernel_size=1) - cfg.update(final_layer) - self.final_layer = build_conv_layer(cfg) - else: - self.final_layer = None - - self.heatmap_size = in_featuremap_size - - if isinstance(in_channels, list): - raise ValueError( - f'{self.__class__.__name__} does not support selecting ' - 'multiple input features.') - - W, H = self.heatmap_size - self.linspace_x = torch.arange(0.0, 1.0 * W, 1).reshape(1, 1, 1, W) / W - self.linspace_y = torch.arange(0.0, 1.0 * H, 1).reshape(1, 1, H, 1) / H - - self.linspace_x = nn.Parameter(self.linspace_x, requires_grad=False) - self.linspace_y = nn.Parameter(self.linspace_y, requires_grad=False) - - self._register_load_state_dict_pre_hook(self._load_state_dict_pre_hook) - - def _linear_expectation(self, heatmaps: Tensor, - linspace: Tensor) -> Tensor: - """Calculate linear expectation.""" - - B, N, _, _ = heatmaps.shape - heatmaps = heatmaps.mul(linspace).reshape(B, N, -1) - expectation = torch.sum(heatmaps, dim=2, keepdim=True) - - return expectation - - def _flat_softmax(self, featmaps: Tensor) -> Tensor: - """Use Softmax to normalize the featmaps in depthwise.""" - - _, N, H, W = featmaps.shape - - featmaps = featmaps.reshape(-1, N, H * W) - heatmaps = F.softmax(featmaps, dim=2) - - return heatmaps.reshape(-1, N, H, W) - - def forward(self, feats: Tuple[Tensor]) -> Union[Tensor, Tuple[Tensor]]: - """Forward the network. The input is multi scale feature maps and the - output is the coordinates. - - Args: - feats (Tuple[Tensor]): Multi scale feature maps. - - Returns: - Tensor: output coordinates(and sigmas[optional]). - """ - if self.simplebaseline_head is None: - feats = feats[-1] - if self.final_layer is not None: - feats = self.final_layer(feats) - else: - feats = self.simplebaseline_head(feats) - - heatmaps = self._flat_softmax(feats * self.beta) - - pred_x = self._linear_expectation(heatmaps, self.linspace_x) - pred_y = self._linear_expectation(heatmaps, self.linspace_y) - - if self.debias: - B, N, H, W = feats.shape - C = feats.reshape(B, N, H * W).exp().sum(dim=2).reshape(B, N, 1) - pred_x = C / (C - 1) * (pred_x - 1 / (2 * C)) - pred_y = C / (C - 1) * (pred_y - 1 / (2 * C)) - - coords = torch.cat([pred_x, pred_y], dim=-1) - return coords, heatmaps - - def predict(self, - feats: Tuple[Tensor], - batch_data_samples: OptSampleList, - test_cfg: ConfigType = {}) -> Predictions: - """Predict results from features. - - Args: - feats (Tuple[Tensor] | List[Tuple[Tensor]]): The multi-stage - features (or multiple multi-stage features in TTA) - batch_data_samples (List[:obj:`PoseDataSample`]): The batch - data samples - test_cfg (dict): The runtime config for testing process. Defaults - to {} - - Returns: - Union[InstanceList | Tuple[InstanceList | PixelDataList]]: If - ``test_cfg['output_heatmap']==True``, return both pose and heatmap - prediction; otherwise only return the pose prediction. - - The pose prediction is a list of ``InstanceData``, each contains - the following fields: - - - keypoints (np.ndarray): predicted keypoint coordinates in - shape (num_instances, K, D) where K is the keypoint number - and D is the keypoint dimension - - keypoint_scores (np.ndarray): predicted keypoint scores in - shape (num_instances, K) - - The heatmap prediction is a list of ``PixelData``, each contains - the following fields: - - - heatmaps (Tensor): The predicted heatmaps in shape (K, h, w) - """ - - if test_cfg.get('flip_test', False): - # TTA: flip test -> feats = [orig, flipped] - assert isinstance(feats, list) and len(feats) == 2 - flip_indices = batch_data_samples[0].metainfo['flip_indices'] - input_size = batch_data_samples[0].metainfo['input_size'] - _feats, _feats_flip = feats - - _batch_coords, _batch_heatmaps = self.forward(_feats) - - _batch_coords_flip, _batch_heatmaps_flip = self.forward( - _feats_flip) - _batch_coords_flip = flip_coordinates( - _batch_coords_flip, - flip_indices=flip_indices, - shift_coords=test_cfg.get('shift_coords', True), - input_size=input_size) - _batch_heatmaps_flip = flip_heatmaps( - _batch_heatmaps_flip, - flip_mode='heatmap', - flip_indices=flip_indices, - shift_heatmap=test_cfg.get('shift_heatmap', False)) - - batch_coords = (_batch_coords + _batch_coords_flip) * 0.5 - batch_heatmaps = (_batch_heatmaps + _batch_heatmaps_flip) * 0.5 - else: - batch_coords, batch_heatmaps = self.forward(feats) # (B, K, D) - - batch_coords.unsqueeze_(dim=1) # (B, N, K, D) - preds = self.decode(batch_coords) - - if test_cfg.get('output_heatmaps', False): - pred_fields = [ - PixelData(heatmaps=hm) for hm in batch_heatmaps.detach() - ] - return preds, pred_fields - else: - return preds - - def loss(self, - inputs: Tuple[Tensor], - batch_data_samples: OptSampleList, - train_cfg: ConfigType = {}) -> dict: - """Calculate losses from a batch of inputs and data samples.""" - - pred_coords, _ = self.forward(inputs) - keypoint_labels = torch.cat( - [d.gt_instance_labels.keypoint_labels for d in batch_data_samples]) - keypoint_weights = torch.cat([ - d.gt_instance_labels.keypoint_weights for d in batch_data_samples - ]) - - # calculate losses - losses = dict() - - # TODO: multi-loss calculation - loss = self.loss_module(pred_coords, keypoint_labels, keypoint_weights) - - losses.update(loss_kpt=loss) - - # calculate accuracy - _, avg_acc, _ = keypoint_pck_accuracy( - pred=to_numpy(pred_coords), - gt=to_numpy(keypoint_labels), - mask=to_numpy(keypoint_weights) > 0, - thr=0.05, - norm_factor=np.ones((pred_coords.size(0), 2), dtype=np.float32)) - - acc_pose = torch.tensor(avg_acc, device=keypoint_labels.device) - losses.update(acc_pose=acc_pose) - - return losses - - @property - def default_init_cfg(self): - init_cfg = [dict(type='Normal', layer=['Linear'], std=0.01, bias=0)] - return init_cfg - - def _load_state_dict_pre_hook(self, state_dict, prefix, local_meta, *args, - **kwargs): - """A hook function to load weights of deconv layers from - :class:`HeatmapHead` into `simplebaseline_head`. - - The hook will be automatically registered during initialization. - """ - - # convert old-version state dict - keys = list(state_dict.keys()) - for _k in keys: - if not _k.startswith(prefix): - continue - v = state_dict.pop(_k) - k = _k.lstrip(prefix) - - k_new = _k - k_parts = k.split('.') - if self.simplebaseline_head is not None: - if k_parts[0] == 'conv_layers': - k_new = ( - prefix + 'simplebaseline_head.deconv_layers.' + - '.'.join(k_parts[1:])) - elif k_parts[0] == 'final_layer': - k_new = prefix + 'simplebaseline_head.' + k - - state_dict[k_new] = v +# Copyright (c) OpenMMLab. All rights reserved. + +from typing import Optional, Sequence, Tuple, Union + +import numpy as np +import torch +import torch.nn.functional as F +from mmcv.cnn import build_conv_layer +from mmengine.structures import PixelData +from torch import Tensor, nn + +from mmpose.evaluation.functional import keypoint_pck_accuracy +from mmpose.models.utils.tta import flip_coordinates, flip_heatmaps +from mmpose.registry import KEYPOINT_CODECS, MODELS +from mmpose.utils.tensor_utils import to_numpy +from mmpose.utils.typing import (ConfigType, OptConfigType, OptSampleList, + Predictions) +from .. import HeatmapHead +from ..base_head import BaseHead + +OptIntSeq = Optional[Sequence[int]] + + +@MODELS.register_module() +class IntegralRegressionHead(BaseHead): + """Top-down integral regression head introduced in `IPR`_ by Xiao et + al(2018). The head contains a differentiable spatial to numerical transform + (DSNT) layer that do soft-argmax operation on the predicted heatmaps to + regress the coordinates. + + This head is used for algorithms that only supervise the coordinates. + + Args: + in_channels (int | sequence[int]): Number of input channels + in_featuremap_size (int | sequence[int]): Size of input feature map + num_joints (int): Number of joints + debias (bool): Whether to remove the bias of Integral Pose Regression. + see `Removing the Bias of Integral Pose Regression`_ by Gu et al + (2021). Defaults to ``False``. + beta (float): A smoothing parameter in softmax. Defaults to ``1.0``. + deconv_out_channels (sequence[int]): The output channel number of each + deconv layer. Defaults to ``(256, 256, 256)`` + deconv_kernel_sizes (sequence[int | tuple], optional): The kernel size + of each deconv layer. Each element should be either an integer for + both height and width dimensions, or a tuple of two integers for + the height and the width dimension respectively.Defaults to + ``(4, 4, 4)`` + conv_out_channels (sequence[int], optional): The output channel number + of each intermediate conv layer. ``None`` means no intermediate + conv layer between deconv layers and the final conv layer. + Defaults to ``None`` + conv_kernel_sizes (sequence[int | tuple], optional): The kernel size + of each intermediate conv layer. Defaults to ``None`` + final_layer (dict): Arguments of the final Conv2d layer. + Defaults to ``dict(kernel_size=1)`` + loss (Config): Config for keypoint loss. Defaults to use + :class:`SmoothL1Loss` + decoder (Config, optional): The decoder config that controls decoding + keypoint coordinates from the network output. Defaults to ``None`` + init_cfg (Config, optional): Config to control the initialization. See + :attr:`default_init_cfg` for default settings + + .. _`IPR`: https://arxiv.org/abs/1711.08229 + .. _`Debias`: + """ + + _version = 2 + + def __init__(self, + in_channels: Union[int, Sequence[int]], + in_featuremap_size: Tuple[int, int], + num_joints: int, + debias: bool = False, + beta: float = 1.0, + deconv_out_channels: OptIntSeq = (256, 256, 256), + deconv_kernel_sizes: OptIntSeq = (4, 4, 4), + conv_out_channels: OptIntSeq = None, + conv_kernel_sizes: OptIntSeq = None, + final_layer: dict = dict(kernel_size=1), + loss: ConfigType = dict( + type='SmoothL1Loss', use_target_weight=True), + decoder: OptConfigType = None, + init_cfg: OptConfigType = None): + + if init_cfg is None: + init_cfg = self.default_init_cfg + + super().__init__(init_cfg) + + self.in_channels = in_channels + self.num_joints = num_joints + self.debias = debias + self.beta = beta + self.loss_module = MODELS.build(loss) + if decoder is not None: + self.decoder = KEYPOINT_CODECS.build(decoder) + else: + self.decoder = None + + num_deconv = len(deconv_out_channels) if deconv_out_channels else 0 + if num_deconv != 0: + + self.heatmap_size = tuple( + [s * (2**num_deconv) for s in in_featuremap_size]) + + # deconv layers + 1x1 conv + self.simplebaseline_head = HeatmapHead( + in_channels=in_channels, + out_channels=num_joints, + deconv_out_channels=deconv_out_channels, + deconv_kernel_sizes=deconv_kernel_sizes, + conv_out_channels=conv_out_channels, + conv_kernel_sizes=conv_kernel_sizes, + final_layer=final_layer) + + if final_layer is not None: + in_channels = num_joints + else: + in_channels = deconv_out_channels[-1] + + else: + self.simplebaseline_head = None + + if final_layer is not None: + cfg = dict( + type='Conv2d', + in_channels=in_channels, + out_channels=num_joints, + kernel_size=1) + cfg.update(final_layer) + self.final_layer = build_conv_layer(cfg) + else: + self.final_layer = None + + self.heatmap_size = in_featuremap_size + + if isinstance(in_channels, list): + raise ValueError( + f'{self.__class__.__name__} does not support selecting ' + 'multiple input features.') + + W, H = self.heatmap_size + self.linspace_x = torch.arange(0.0, 1.0 * W, 1).reshape(1, 1, 1, W) / W + self.linspace_y = torch.arange(0.0, 1.0 * H, 1).reshape(1, 1, H, 1) / H + + self.linspace_x = nn.Parameter(self.linspace_x, requires_grad=False) + self.linspace_y = nn.Parameter(self.linspace_y, requires_grad=False) + + self._register_load_state_dict_pre_hook(self._load_state_dict_pre_hook) + + def _linear_expectation(self, heatmaps: Tensor, + linspace: Tensor) -> Tensor: + """Calculate linear expectation.""" + + B, N, _, _ = heatmaps.shape + heatmaps = heatmaps.mul(linspace).reshape(B, N, -1) + expectation = torch.sum(heatmaps, dim=2, keepdim=True) + + return expectation + + def _flat_softmax(self, featmaps: Tensor) -> Tensor: + """Use Softmax to normalize the featmaps in depthwise.""" + + _, N, H, W = featmaps.shape + + featmaps = featmaps.reshape(-1, N, H * W) + heatmaps = F.softmax(featmaps, dim=2) + + return heatmaps.reshape(-1, N, H, W) + + def forward(self, feats: Tuple[Tensor]) -> Union[Tensor, Tuple[Tensor]]: + """Forward the network. The input is multi scale feature maps and the + output is the coordinates. + + Args: + feats (Tuple[Tensor]): Multi scale feature maps. + + Returns: + Tensor: output coordinates(and sigmas[optional]). + """ + if self.simplebaseline_head is None: + feats = feats[-1] + if self.final_layer is not None: + feats = self.final_layer(feats) + else: + feats = self.simplebaseline_head(feats) + + heatmaps = self._flat_softmax(feats * self.beta) + + pred_x = self._linear_expectation(heatmaps, self.linspace_x) + pred_y = self._linear_expectation(heatmaps, self.linspace_y) + + if self.debias: + B, N, H, W = feats.shape + C = feats.reshape(B, N, H * W).exp().sum(dim=2).reshape(B, N, 1) + pred_x = C / (C - 1) * (pred_x - 1 / (2 * C)) + pred_y = C / (C - 1) * (pred_y - 1 / (2 * C)) + + coords = torch.cat([pred_x, pred_y], dim=-1) + return coords, heatmaps + + def predict(self, + feats: Tuple[Tensor], + batch_data_samples: OptSampleList, + test_cfg: ConfigType = {}) -> Predictions: + """Predict results from features. + + Args: + feats (Tuple[Tensor] | List[Tuple[Tensor]]): The multi-stage + features (or multiple multi-stage features in TTA) + batch_data_samples (List[:obj:`PoseDataSample`]): The batch + data samples + test_cfg (dict): The runtime config for testing process. Defaults + to {} + + Returns: + Union[InstanceList | Tuple[InstanceList | PixelDataList]]: If + ``test_cfg['output_heatmap']==True``, return both pose and heatmap + prediction; otherwise only return the pose prediction. + + The pose prediction is a list of ``InstanceData``, each contains + the following fields: + + - keypoints (np.ndarray): predicted keypoint coordinates in + shape (num_instances, K, D) where K is the keypoint number + and D is the keypoint dimension + - keypoint_scores (np.ndarray): predicted keypoint scores in + shape (num_instances, K) + + The heatmap prediction is a list of ``PixelData``, each contains + the following fields: + + - heatmaps (Tensor): The predicted heatmaps in shape (K, h, w) + """ + + if test_cfg.get('flip_test', False): + # TTA: flip test -> feats = [orig, flipped] + assert isinstance(feats, list) and len(feats) == 2 + flip_indices = batch_data_samples[0].metainfo['flip_indices'] + input_size = batch_data_samples[0].metainfo['input_size'] + _feats, _feats_flip = feats + + _batch_coords, _batch_heatmaps = self.forward(_feats) + + _batch_coords_flip, _batch_heatmaps_flip = self.forward( + _feats_flip) + _batch_coords_flip = flip_coordinates( + _batch_coords_flip, + flip_indices=flip_indices, + shift_coords=test_cfg.get('shift_coords', True), + input_size=input_size) + _batch_heatmaps_flip = flip_heatmaps( + _batch_heatmaps_flip, + flip_mode='heatmap', + flip_indices=flip_indices, + shift_heatmap=test_cfg.get('shift_heatmap', False)) + + batch_coords = (_batch_coords + _batch_coords_flip) * 0.5 + batch_heatmaps = (_batch_heatmaps + _batch_heatmaps_flip) * 0.5 + else: + batch_coords, batch_heatmaps = self.forward(feats) # (B, K, D) + + batch_coords.unsqueeze_(dim=1) # (B, N, K, D) + preds = self.decode(batch_coords) + + if test_cfg.get('output_heatmaps', False): + pred_fields = [ + PixelData(heatmaps=hm) for hm in batch_heatmaps.detach() + ] + return preds, pred_fields + else: + return preds + + def loss(self, + inputs: Tuple[Tensor], + batch_data_samples: OptSampleList, + train_cfg: ConfigType = {}) -> dict: + """Calculate losses from a batch of inputs and data samples.""" + + pred_coords, _ = self.forward(inputs) + keypoint_labels = torch.cat( + [d.gt_instance_labels.keypoint_labels for d in batch_data_samples]) + keypoint_weights = torch.cat([ + d.gt_instance_labels.keypoint_weights for d in batch_data_samples + ]) + + # calculate losses + losses = dict() + + # TODO: multi-loss calculation + loss = self.loss_module(pred_coords, keypoint_labels, keypoint_weights) + + losses.update(loss_kpt=loss) + + # calculate accuracy + _, avg_acc, _ = keypoint_pck_accuracy( + pred=to_numpy(pred_coords), + gt=to_numpy(keypoint_labels), + mask=to_numpy(keypoint_weights) > 0, + thr=0.05, + norm_factor=np.ones((pred_coords.size(0), 2), dtype=np.float32)) + + acc_pose = torch.tensor(avg_acc, device=keypoint_labels.device) + losses.update(acc_pose=acc_pose) + + return losses + + @property + def default_init_cfg(self): + init_cfg = [dict(type='Normal', layer=['Linear'], std=0.01, bias=0)] + return init_cfg + + def _load_state_dict_pre_hook(self, state_dict, prefix, local_meta, *args, + **kwargs): + """A hook function to load weights of deconv layers from + :class:`HeatmapHead` into `simplebaseline_head`. + + The hook will be automatically registered during initialization. + """ + + # convert old-version state dict + keys = list(state_dict.keys()) + for _k in keys: + if not _k.startswith(prefix): + continue + v = state_dict.pop(_k) + k = _k.lstrip(prefix) + + k_new = _k + k_parts = k.split('.') + if self.simplebaseline_head is not None: + if k_parts[0] == 'conv_layers': + k_new = ( + prefix + 'simplebaseline_head.deconv_layers.' + + '.'.join(k_parts[1:])) + elif k_parts[0] == 'final_layer': + k_new = prefix + 'simplebaseline_head.' + k + + state_dict[k_new] = v diff --git a/mmpose/models/heads/regression_heads/regression_head.py b/mmpose/models/heads/regression_heads/regression_head.py index 8ff73aa6ef..514bbf56db 100644 --- a/mmpose/models/heads/regression_heads/regression_head.py +++ b/mmpose/models/heads/regression_heads/regression_head.py @@ -1,146 +1,146 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import Optional, Sequence, Tuple, Union - -import numpy as np -import torch -from torch import Tensor, nn - -from mmpose.evaluation.functional import keypoint_pck_accuracy -from mmpose.models.utils.tta import flip_coordinates -from mmpose.registry import KEYPOINT_CODECS, MODELS -from mmpose.utils.tensor_utils import to_numpy -from mmpose.utils.typing import (ConfigType, OptConfigType, OptSampleList, - Predictions) -from ..base_head import BaseHead - -OptIntSeq = Optional[Sequence[int]] - - -@MODELS.register_module() -class RegressionHead(BaseHead): - """Top-down regression head introduced in `Deeppose`_ by Toshev et al - (2014). The head is composed of fully-connected layers to predict the - coordinates directly. - - Args: - in_channels (int | sequence[int]): Number of input channels - num_joints (int): Number of joints - loss (Config): Config for keypoint loss. Defaults to use - :class:`SmoothL1Loss` - decoder (Config, optional): The decoder config that controls decoding - keypoint coordinates from the network output. Defaults to ``None`` - init_cfg (Config, optional): Config to control the initialization. See - :attr:`default_init_cfg` for default settings - - .. _`Deeppose`: https://arxiv.org/abs/1312.4659 - """ - - _version = 2 - - def __init__(self, - in_channels: Union[int, Sequence[int]], - num_joints: int, - loss: ConfigType = dict( - type='SmoothL1Loss', use_target_weight=True), - decoder: OptConfigType = None, - init_cfg: OptConfigType = None): - - if init_cfg is None: - init_cfg = self.default_init_cfg - - super().__init__(init_cfg) - - self.in_channels = in_channels - self.num_joints = num_joints - self.loss_module = MODELS.build(loss) - if decoder is not None: - self.decoder = KEYPOINT_CODECS.build(decoder) - else: - self.decoder = None - - # Define fully-connected layers - self.fc = nn.Linear(in_channels, self.num_joints * 2) - - def forward(self, feats: Tuple[Tensor]) -> Tensor: - """Forward the network. The input is multi scale feature maps and the - output is the coordinates. - - Args: - feats (Tuple[Tensor]): Multi scale feature maps. - - Returns: - Tensor: output coordinates(and sigmas[optional]). - """ - x = feats[-1] - - x = torch.flatten(x, 1) - x = self.fc(x) - - return x.reshape(-1, self.num_joints, 2) - - def predict(self, - feats: Tuple[Tensor], - batch_data_samples: OptSampleList, - test_cfg: ConfigType = {}) -> Predictions: - """Predict results from outputs.""" - - if test_cfg.get('flip_test', False): - # TTA: flip test -> feats = [orig, flipped] - assert isinstance(feats, list) and len(feats) == 2 - flip_indices = batch_data_samples[0].metainfo['flip_indices'] - input_size = batch_data_samples[0].metainfo['input_size'] - _feats, _feats_flip = feats - - _batch_coords = self.forward(_feats) - _batch_coords_flip = flip_coordinates( - self.forward(_feats_flip), - flip_indices=flip_indices, - shift_coords=test_cfg.get('shift_coords', True), - input_size=input_size) - batch_coords = (_batch_coords + _batch_coords_flip) * 0.5 - else: - batch_coords = self.forward(feats) # (B, K, D) - - batch_coords.unsqueeze_(dim=1) # (B, N, K, D) - preds = self.decode(batch_coords) - - return preds - - def loss(self, - inputs: Tuple[Tensor], - batch_data_samples: OptSampleList, - train_cfg: ConfigType = {}) -> dict: - """Calculate losses from a batch of inputs and data samples.""" - - pred_outputs = self.forward(inputs) - - keypoint_labels = torch.cat( - [d.gt_instance_labels.keypoint_labels for d in batch_data_samples]) - keypoint_weights = torch.cat([ - d.gt_instance_labels.keypoint_weights for d in batch_data_samples - ]) - - # calculate losses - losses = dict() - loss = self.loss_module(pred_outputs, keypoint_labels, - keypoint_weights.unsqueeze(-1)) - - losses.update(loss_kpt=loss) - - # calculate accuracy - _, avg_acc, _ = keypoint_pck_accuracy( - pred=to_numpy(pred_outputs), - gt=to_numpy(keypoint_labels), - mask=to_numpy(keypoint_weights) > 0, - thr=0.05, - norm_factor=np.ones((pred_outputs.size(0), 2), dtype=np.float32)) - - acc_pose = torch.tensor(avg_acc, device=keypoint_labels.device) - losses.update(acc_pose=acc_pose) - - return losses - - @property - def default_init_cfg(self): - init_cfg = [dict(type='Normal', layer=['Linear'], std=0.01, bias=0)] - return init_cfg +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Sequence, Tuple, Union + +import numpy as np +import torch +from torch import Tensor, nn + +from mmpose.evaluation.functional import keypoint_pck_accuracy +from mmpose.models.utils.tta import flip_coordinates +from mmpose.registry import KEYPOINT_CODECS, MODELS +from mmpose.utils.tensor_utils import to_numpy +from mmpose.utils.typing import (ConfigType, OptConfigType, OptSampleList, + Predictions) +from ..base_head import BaseHead + +OptIntSeq = Optional[Sequence[int]] + + +@MODELS.register_module() +class RegressionHead(BaseHead): + """Top-down regression head introduced in `Deeppose`_ by Toshev et al + (2014). The head is composed of fully-connected layers to predict the + coordinates directly. + + Args: + in_channels (int | sequence[int]): Number of input channels + num_joints (int): Number of joints + loss (Config): Config for keypoint loss. Defaults to use + :class:`SmoothL1Loss` + decoder (Config, optional): The decoder config that controls decoding + keypoint coordinates from the network output. Defaults to ``None`` + init_cfg (Config, optional): Config to control the initialization. See + :attr:`default_init_cfg` for default settings + + .. _`Deeppose`: https://arxiv.org/abs/1312.4659 + """ + + _version = 2 + + def __init__(self, + in_channels: Union[int, Sequence[int]], + num_joints: int, + loss: ConfigType = dict( + type='SmoothL1Loss', use_target_weight=True), + decoder: OptConfigType = None, + init_cfg: OptConfigType = None): + + if init_cfg is None: + init_cfg = self.default_init_cfg + + super().__init__(init_cfg) + + self.in_channels = in_channels + self.num_joints = num_joints + self.loss_module = MODELS.build(loss) + if decoder is not None: + self.decoder = KEYPOINT_CODECS.build(decoder) + else: + self.decoder = None + + # Define fully-connected layers + self.fc = nn.Linear(in_channels, self.num_joints * 2) + + def forward(self, feats: Tuple[Tensor]) -> Tensor: + """Forward the network. The input is multi scale feature maps and the + output is the coordinates. + + Args: + feats (Tuple[Tensor]): Multi scale feature maps. + + Returns: + Tensor: output coordinates(and sigmas[optional]). + """ + x = feats[-1] + + x = torch.flatten(x, 1) + x = self.fc(x) + + return x.reshape(-1, self.num_joints, 2) + + def predict(self, + feats: Tuple[Tensor], + batch_data_samples: OptSampleList, + test_cfg: ConfigType = {}) -> Predictions: + """Predict results from outputs.""" + + if test_cfg.get('flip_test', False): + # TTA: flip test -> feats = [orig, flipped] + assert isinstance(feats, list) and len(feats) == 2 + flip_indices = batch_data_samples[0].metainfo['flip_indices'] + input_size = batch_data_samples[0].metainfo['input_size'] + _feats, _feats_flip = feats + + _batch_coords = self.forward(_feats) + _batch_coords_flip = flip_coordinates( + self.forward(_feats_flip), + flip_indices=flip_indices, + shift_coords=test_cfg.get('shift_coords', True), + input_size=input_size) + batch_coords = (_batch_coords + _batch_coords_flip) * 0.5 + else: + batch_coords = self.forward(feats) # (B, K, D) + + batch_coords.unsqueeze_(dim=1) # (B, N, K, D) + preds = self.decode(batch_coords) + + return preds + + def loss(self, + inputs: Tuple[Tensor], + batch_data_samples: OptSampleList, + train_cfg: ConfigType = {}) -> dict: + """Calculate losses from a batch of inputs and data samples.""" + + pred_outputs = self.forward(inputs) + + keypoint_labels = torch.cat( + [d.gt_instance_labels.keypoint_labels for d in batch_data_samples]) + keypoint_weights = torch.cat([ + d.gt_instance_labels.keypoint_weights for d in batch_data_samples + ]) + + # calculate losses + losses = dict() + loss = self.loss_module(pred_outputs, keypoint_labels, + keypoint_weights.unsqueeze(-1)) + + losses.update(loss_kpt=loss) + + # calculate accuracy + _, avg_acc, _ = keypoint_pck_accuracy( + pred=to_numpy(pred_outputs), + gt=to_numpy(keypoint_labels), + mask=to_numpy(keypoint_weights) > 0, + thr=0.05, + norm_factor=np.ones((pred_outputs.size(0), 2), dtype=np.float32)) + + acc_pose = torch.tensor(avg_acc, device=keypoint_labels.device) + losses.update(acc_pose=acc_pose) + + return losses + + @property + def default_init_cfg(self): + init_cfg = [dict(type='Normal', layer=['Linear'], std=0.01, bias=0)] + return init_cfg diff --git a/mmpose/models/heads/regression_heads/rle_head.py b/mmpose/models/heads/regression_heads/rle_head.py index ef62d7d9ac..ff7c4f022d 100644 --- a/mmpose/models/heads/regression_heads/rle_head.py +++ b/mmpose/models/heads/regression_heads/rle_head.py @@ -1,187 +1,187 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import Optional, Sequence, Tuple, Union - -import numpy as np -import torch -from torch import Tensor, nn - -from mmpose.evaluation.functional import keypoint_pck_accuracy -from mmpose.models.utils.tta import flip_coordinates -from mmpose.registry import KEYPOINT_CODECS, MODELS -from mmpose.utils.tensor_utils import to_numpy -from mmpose.utils.typing import (ConfigType, OptConfigType, OptSampleList, - Predictions) -from ..base_head import BaseHead - -OptIntSeq = Optional[Sequence[int]] - - -@MODELS.register_module() -class RLEHead(BaseHead): - """Top-down regression head introduced in `RLE`_ by Li et al(2021). The - head is composed of fully-connected layers to predict the coordinates and - sigma(the variance of the coordinates) together. - - Args: - in_channels (int | sequence[int]): Number of input channels - num_joints (int): Number of joints - loss (Config): Config for keypoint loss. Defaults to use - :class:`RLELoss` - decoder (Config, optional): The decoder config that controls decoding - keypoint coordinates from the network output. Defaults to ``None`` - init_cfg (Config, optional): Config to control the initialization. See - :attr:`default_init_cfg` for default settings - - .. _`RLE`: https://arxiv.org/abs/2107.11291 - """ - - _version = 2 - - def __init__(self, - in_channels: Union[int, Sequence[int]], - num_joints: int, - loss: ConfigType = dict( - type='RLELoss', use_target_weight=True), - decoder: OptConfigType = None, - init_cfg: OptConfigType = None): - - if init_cfg is None: - init_cfg = self.default_init_cfg - - super().__init__(init_cfg) - - self.in_channels = in_channels - self.num_joints = num_joints - self.loss_module = MODELS.build(loss) - if decoder is not None: - self.decoder = KEYPOINT_CODECS.build(decoder) - else: - self.decoder = None - - # Define fully-connected layers - self.fc = nn.Linear(in_channels, self.num_joints * 4) - - # Register the hook to automatically convert old version state dicts - self._register_load_state_dict_pre_hook(self._load_state_dict_pre_hook) - - def forward(self, feats: Tuple[Tensor]) -> Tensor: - """Forward the network. The input is multi scale feature maps and the - output is the coordinates. - - Args: - feats (Tuple[Tensor]): Multi scale feature maps. - - Returns: - Tensor: output coordinates(and sigmas[optional]). - """ - x = feats[-1] - - x = torch.flatten(x, 1) - x = self.fc(x) - - return x.reshape(-1, self.num_joints, 4) - - def predict(self, - feats: Tuple[Tensor], - batch_data_samples: OptSampleList, - test_cfg: ConfigType = {}) -> Predictions: - """Predict results from outputs.""" - - if test_cfg.get('flip_test', False): - # TTA: flip test -> feats = [orig, flipped] - assert isinstance(feats, list) and len(feats) == 2 - flip_indices = batch_data_samples[0].metainfo['flip_indices'] - input_size = batch_data_samples[0].metainfo['input_size'] - - _feats, _feats_flip = feats - - _batch_coords = self.forward(_feats) - _batch_coords[..., 2:] = _batch_coords[..., 2:].sigmoid() - - _batch_coords_flip = flip_coordinates( - self.forward(_feats_flip), - flip_indices=flip_indices, - shift_coords=test_cfg.get('shift_coords', True), - input_size=input_size) - _batch_coords_flip[..., 2:] = _batch_coords_flip[..., 2:].sigmoid() - - batch_coords = (_batch_coords + _batch_coords_flip) * 0.5 - else: - batch_coords = self.forward(feats) # (B, K, D) - batch_coords[..., 2:] = batch_coords[..., 2:].sigmoid() - - batch_coords.unsqueeze_(dim=1) # (B, N, K, D) - preds = self.decode(batch_coords) - - return preds - - def loss(self, - inputs: Tuple[Tensor], - batch_data_samples: OptSampleList, - train_cfg: ConfigType = {}) -> dict: - """Calculate losses from a batch of inputs and data samples.""" - - pred_outputs = self.forward(inputs) - - keypoint_labels = torch.cat( - [d.gt_instance_labels.keypoint_labels for d in batch_data_samples]) - keypoint_weights = torch.cat([ - d.gt_instance_labels.keypoint_weights for d in batch_data_samples - ]) - - pred_coords = pred_outputs[:, :, :2] - pred_sigma = pred_outputs[:, :, 2:4] - - # calculate losses - losses = dict() - loss = self.loss_module(pred_coords, pred_sigma, keypoint_labels, - keypoint_weights.unsqueeze(-1)) - - losses.update(loss_kpt=loss) - - # calculate accuracy - _, avg_acc, _ = keypoint_pck_accuracy( - pred=to_numpy(pred_coords), - gt=to_numpy(keypoint_labels), - mask=to_numpy(keypoint_weights) > 0, - thr=0.05, - norm_factor=np.ones((pred_coords.size(0), 2), dtype=np.float32)) - - acc_pose = torch.tensor(avg_acc, device=keypoint_labels.device) - losses.update(acc_pose=acc_pose) - - return losses - - def _load_state_dict_pre_hook(self, state_dict, prefix, local_meta, *args, - **kwargs): - """A hook function to convert old-version state dict of - :class:`TopdownHeatmapSimpleHead` (before MMPose v1.0.0) to a - compatible format of :class:`HeatmapHead`. - - The hook will be automatically registered during initialization. - """ - - version = local_meta.get('version', None) - if version and version >= self._version: - return - - # convert old-version state dict - keys = list(state_dict.keys()) - for _k in keys: - v = state_dict.pop(_k) - k = _k.lstrip(prefix) - # In old version, "loss" includes the instances of loss, - # now it should be renamed "loss_module" - k_parts = k.split('.') - if k_parts[0] == 'loss': - # loss.xxx -> loss_module.xxx - k_new = prefix + 'loss_module.' + '.'.join(k_parts[1:]) - else: - k_new = _k - - state_dict[k_new] = v - - @property - def default_init_cfg(self): - init_cfg = [dict(type='Normal', layer=['Linear'], std=0.01, bias=0)] - return init_cfg +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Sequence, Tuple, Union + +import numpy as np +import torch +from torch import Tensor, nn + +from mmpose.evaluation.functional import keypoint_pck_accuracy +from mmpose.models.utils.tta import flip_coordinates +from mmpose.registry import KEYPOINT_CODECS, MODELS +from mmpose.utils.tensor_utils import to_numpy +from mmpose.utils.typing import (ConfigType, OptConfigType, OptSampleList, + Predictions) +from ..base_head import BaseHead + +OptIntSeq = Optional[Sequence[int]] + + +@MODELS.register_module() +class RLEHead(BaseHead): + """Top-down regression head introduced in `RLE`_ by Li et al(2021). The + head is composed of fully-connected layers to predict the coordinates and + sigma(the variance of the coordinates) together. + + Args: + in_channels (int | sequence[int]): Number of input channels + num_joints (int): Number of joints + loss (Config): Config for keypoint loss. Defaults to use + :class:`RLELoss` + decoder (Config, optional): The decoder config that controls decoding + keypoint coordinates from the network output. Defaults to ``None`` + init_cfg (Config, optional): Config to control the initialization. See + :attr:`default_init_cfg` for default settings + + .. _`RLE`: https://arxiv.org/abs/2107.11291 + """ + + _version = 2 + + def __init__(self, + in_channels: Union[int, Sequence[int]], + num_joints: int, + loss: ConfigType = dict( + type='RLELoss', use_target_weight=True), + decoder: OptConfigType = None, + init_cfg: OptConfigType = None): + + if init_cfg is None: + init_cfg = self.default_init_cfg + + super().__init__(init_cfg) + + self.in_channels = in_channels + self.num_joints = num_joints + self.loss_module = MODELS.build(loss) + if decoder is not None: + self.decoder = KEYPOINT_CODECS.build(decoder) + else: + self.decoder = None + + # Define fully-connected layers + self.fc = nn.Linear(in_channels, self.num_joints * 4) + + # Register the hook to automatically convert old version state dicts + self._register_load_state_dict_pre_hook(self._load_state_dict_pre_hook) + + def forward(self, feats: Tuple[Tensor]) -> Tensor: + """Forward the network. The input is multi scale feature maps and the + output is the coordinates. + + Args: + feats (Tuple[Tensor]): Multi scale feature maps. + + Returns: + Tensor: output coordinates(and sigmas[optional]). + """ + x = feats[-1] + + x = torch.flatten(x, 1) + x = self.fc(x) + + return x.reshape(-1, self.num_joints, 4) + + def predict(self, + feats: Tuple[Tensor], + batch_data_samples: OptSampleList, + test_cfg: ConfigType = {}) -> Predictions: + """Predict results from outputs.""" + + if test_cfg.get('flip_test', False): + # TTA: flip test -> feats = [orig, flipped] + assert isinstance(feats, list) and len(feats) == 2 + flip_indices = batch_data_samples[0].metainfo['flip_indices'] + input_size = batch_data_samples[0].metainfo['input_size'] + + _feats, _feats_flip = feats + + _batch_coords = self.forward(_feats) + _batch_coords[..., 2:] = _batch_coords[..., 2:].sigmoid() + + _batch_coords_flip = flip_coordinates( + self.forward(_feats_flip), + flip_indices=flip_indices, + shift_coords=test_cfg.get('shift_coords', True), + input_size=input_size) + _batch_coords_flip[..., 2:] = _batch_coords_flip[..., 2:].sigmoid() + + batch_coords = (_batch_coords + _batch_coords_flip) * 0.5 + else: + batch_coords = self.forward(feats) # (B, K, D) + batch_coords[..., 2:] = batch_coords[..., 2:].sigmoid() + + batch_coords.unsqueeze_(dim=1) # (B, N, K, D) + preds = self.decode(batch_coords) + + return preds + + def loss(self, + inputs: Tuple[Tensor], + batch_data_samples: OptSampleList, + train_cfg: ConfigType = {}) -> dict: + """Calculate losses from a batch of inputs and data samples.""" + + pred_outputs = self.forward(inputs) + + keypoint_labels = torch.cat( + [d.gt_instance_labels.keypoint_labels for d in batch_data_samples]) + keypoint_weights = torch.cat([ + d.gt_instance_labels.keypoint_weights for d in batch_data_samples + ]) + + pred_coords = pred_outputs[:, :, :2] + pred_sigma = pred_outputs[:, :, 2:4] + + # calculate losses + losses = dict() + loss = self.loss_module(pred_coords, pred_sigma, keypoint_labels, + keypoint_weights.unsqueeze(-1)) + + losses.update(loss_kpt=loss) + + # calculate accuracy + _, avg_acc, _ = keypoint_pck_accuracy( + pred=to_numpy(pred_coords), + gt=to_numpy(keypoint_labels), + mask=to_numpy(keypoint_weights) > 0, + thr=0.05, + norm_factor=np.ones((pred_coords.size(0), 2), dtype=np.float32)) + + acc_pose = torch.tensor(avg_acc, device=keypoint_labels.device) + losses.update(acc_pose=acc_pose) + + return losses + + def _load_state_dict_pre_hook(self, state_dict, prefix, local_meta, *args, + **kwargs): + """A hook function to convert old-version state dict of + :class:`TopdownHeatmapSimpleHead` (before MMPose v1.0.0) to a + compatible format of :class:`HeatmapHead`. + + The hook will be automatically registered during initialization. + """ + + version = local_meta.get('version', None) + if version and version >= self._version: + return + + # convert old-version state dict + keys = list(state_dict.keys()) + for _k in keys: + v = state_dict.pop(_k) + k = _k.lstrip(prefix) + # In old version, "loss" includes the instances of loss, + # now it should be renamed "loss_module" + k_parts = k.split('.') + if k_parts[0] == 'loss': + # loss.xxx -> loss_module.xxx + k_new = prefix + 'loss_module.' + '.'.join(k_parts[1:]) + else: + k_new = _k + + state_dict[k_new] = v + + @property + def default_init_cfg(self): + init_cfg = [dict(type='Normal', layer=['Linear'], std=0.01, bias=0)] + return init_cfg diff --git a/mmpose/models/heads/regression_heads/temporal_regression_head.py b/mmpose/models/heads/regression_heads/temporal_regression_head.py index ac76316842..2af58156f7 100644 --- a/mmpose/models/heads/regression_heads/temporal_regression_head.py +++ b/mmpose/models/heads/regression_heads/temporal_regression_head.py @@ -1,151 +1,151 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import Optional, Sequence, Tuple, Union - -import numpy as np -import torch -from torch import Tensor, nn - -from mmpose.evaluation.functional import keypoint_pck_accuracy -from mmpose.registry import KEYPOINT_CODECS, MODELS -from mmpose.utils.tensor_utils import to_numpy -from mmpose.utils.typing import (ConfigType, OptConfigType, OptSampleList, - Predictions) -from ..base_head import BaseHead - -OptIntSeq = Optional[Sequence[int]] - - -@MODELS.register_module() -class TemporalRegressionHead(BaseHead): - """Temporal Regression head of `VideoPose3D`_ by Dario et al (CVPR'2019). - - Args: - in_channels (int | sequence[int]): Number of input channels - num_joints (int): Number of joints - loss (Config): Config for keypoint loss. Defaults to use - :class:`SmoothL1Loss` - decoder (Config, optional): The decoder config that controls decoding - keypoint coordinates from the network output. Defaults to ``None`` - init_cfg (Config, optional): Config to control the initialization. See - :attr:`default_init_cfg` for default settings - - .. _`VideoPose3D`: https://arxiv.org/abs/1811.11742 - """ - - _version = 2 - - def __init__(self, - in_channels: Union[int, Sequence[int]], - num_joints: int, - loss: ConfigType = dict( - type='MSELoss', use_target_weight=True), - decoder: OptConfigType = None, - init_cfg: OptConfigType = None): - - if init_cfg is None: - init_cfg = self.default_init_cfg - - super().__init__(init_cfg) - - self.in_channels = in_channels - self.num_joints = num_joints - self.loss_module = MODELS.build(loss) - if decoder is not None: - self.decoder = KEYPOINT_CODECS.build(decoder) - else: - self.decoder = None - - # Define fully-connected layers - self.conv = nn.Conv1d(in_channels, self.num_joints * 3, 1) - - def forward(self, feats: Tuple[Tensor]) -> Tensor: - """Forward the network. The input is multi scale feature maps and the - output is the coordinates. - - Args: - feats (Tuple[Tensor]): Multi scale feature maps. - - Returns: - Tensor: Output coordinates (and sigmas[optional]). - """ - x = feats[-1] - - x = self.conv(x) - - return x.reshape(-1, self.num_joints, 3) - - def predict(self, - feats: Tuple[Tensor], - batch_data_samples: OptSampleList, - test_cfg: ConfigType = {}) -> Predictions: - """Predict results from outputs. - - Returns: - preds (sequence[InstanceData]): Prediction results. - Each contains the following fields: - - - keypoints: Predicted keypoints of shape (B, N, K, D). - - keypoint_scores: Scores of predicted keypoints of shape - (B, N, K). - """ - - batch_coords = self.forward(feats) # (B, K, D) - - # Restore global position with target_root - target_root = batch_data_samples[0].metainfo.get('target_root', None) - if target_root is not None: - target_root = torch.stack([ - torch.from_numpy(b.metainfo['target_root']) - for b in batch_data_samples - ]) - else: - target_root = torch.stack([ - torch.empty((0), dtype=torch.float32) - for _ in batch_data_samples[0].metainfo - ]) - - preds = self.decode((batch_coords, target_root)) - - return preds - - def loss(self, - inputs: Tuple[Tensor], - batch_data_samples: OptSampleList, - train_cfg: ConfigType = {}) -> dict: - """Calculate losses from a batch of inputs and data samples.""" - - pred_outputs = self.forward(inputs) - - lifting_target_label = torch.cat([ - d.gt_instance_labels.lifting_target_label - for d in batch_data_samples - ]) - lifting_target_weights = torch.cat([ - d.gt_instance_labels.lifting_target_weights - for d in batch_data_samples - ]) - - # calculate losses - losses = dict() - loss = self.loss_module(pred_outputs, lifting_target_label, - lifting_target_weights.unsqueeze(-1)) - - losses.update(loss_pose3d=loss) - - # calculate accuracy - _, avg_acc, _ = keypoint_pck_accuracy( - pred=to_numpy(pred_outputs), - gt=to_numpy(lifting_target_label), - mask=to_numpy(lifting_target_weights) > 0, - thr=0.05, - norm_factor=np.ones((pred_outputs.size(0), 3), dtype=np.float32)) - - mpjpe_pose = torch.tensor(avg_acc, device=lifting_target_label.device) - losses.update(mpjpe=mpjpe_pose) - - return losses - - @property - def default_init_cfg(self): - init_cfg = [dict(type='Normal', layer=['Linear'], std=0.01, bias=0)] - return init_cfg +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Sequence, Tuple, Union + +import numpy as np +import torch +from torch import Tensor, nn + +from mmpose.evaluation.functional import keypoint_pck_accuracy +from mmpose.registry import KEYPOINT_CODECS, MODELS +from mmpose.utils.tensor_utils import to_numpy +from mmpose.utils.typing import (ConfigType, OptConfigType, OptSampleList, + Predictions) +from ..base_head import BaseHead + +OptIntSeq = Optional[Sequence[int]] + + +@MODELS.register_module() +class TemporalRegressionHead(BaseHead): + """Temporal Regression head of `VideoPose3D`_ by Dario et al (CVPR'2019). + + Args: + in_channels (int | sequence[int]): Number of input channels + num_joints (int): Number of joints + loss (Config): Config for keypoint loss. Defaults to use + :class:`SmoothL1Loss` + decoder (Config, optional): The decoder config that controls decoding + keypoint coordinates from the network output. Defaults to ``None`` + init_cfg (Config, optional): Config to control the initialization. See + :attr:`default_init_cfg` for default settings + + .. _`VideoPose3D`: https://arxiv.org/abs/1811.11742 + """ + + _version = 2 + + def __init__(self, + in_channels: Union[int, Sequence[int]], + num_joints: int, + loss: ConfigType = dict( + type='MSELoss', use_target_weight=True), + decoder: OptConfigType = None, + init_cfg: OptConfigType = None): + + if init_cfg is None: + init_cfg = self.default_init_cfg + + super().__init__(init_cfg) + + self.in_channels = in_channels + self.num_joints = num_joints + self.loss_module = MODELS.build(loss) + if decoder is not None: + self.decoder = KEYPOINT_CODECS.build(decoder) + else: + self.decoder = None + + # Define fully-connected layers + self.conv = nn.Conv1d(in_channels, self.num_joints * 3, 1) + + def forward(self, feats: Tuple[Tensor]) -> Tensor: + """Forward the network. The input is multi scale feature maps and the + output is the coordinates. + + Args: + feats (Tuple[Tensor]): Multi scale feature maps. + + Returns: + Tensor: Output coordinates (and sigmas[optional]). + """ + x = feats[-1] + + x = self.conv(x) + + return x.reshape(-1, self.num_joints, 3) + + def predict(self, + feats: Tuple[Tensor], + batch_data_samples: OptSampleList, + test_cfg: ConfigType = {}) -> Predictions: + """Predict results from outputs. + + Returns: + preds (sequence[InstanceData]): Prediction results. + Each contains the following fields: + + - keypoints: Predicted keypoints of shape (B, N, K, D). + - keypoint_scores: Scores of predicted keypoints of shape + (B, N, K). + """ + + batch_coords = self.forward(feats) # (B, K, D) + + # Restore global position with target_root + target_root = batch_data_samples[0].metainfo.get('target_root', None) + if target_root is not None: + target_root = torch.stack([ + torch.from_numpy(b.metainfo['target_root']) + for b in batch_data_samples + ]) + else: + target_root = torch.stack([ + torch.empty((0), dtype=torch.float32) + for _ in batch_data_samples[0].metainfo + ]) + + preds = self.decode((batch_coords, target_root)) + + return preds + + def loss(self, + inputs: Tuple[Tensor], + batch_data_samples: OptSampleList, + train_cfg: ConfigType = {}) -> dict: + """Calculate losses from a batch of inputs and data samples.""" + + pred_outputs = self.forward(inputs) + + lifting_target_label = torch.cat([ + d.gt_instance_labels.lifting_target_label + for d in batch_data_samples + ]) + lifting_target_weights = torch.cat([ + d.gt_instance_labels.lifting_target_weights + for d in batch_data_samples + ]) + + # calculate losses + losses = dict() + loss = self.loss_module(pred_outputs, lifting_target_label, + lifting_target_weights.unsqueeze(-1)) + + losses.update(loss_pose3d=loss) + + # calculate accuracy + _, avg_acc, _ = keypoint_pck_accuracy( + pred=to_numpy(pred_outputs), + gt=to_numpy(lifting_target_label), + mask=to_numpy(lifting_target_weights) > 0, + thr=0.05, + norm_factor=np.ones((pred_outputs.size(0), 3), dtype=np.float32)) + + mpjpe_pose = torch.tensor(avg_acc, device=lifting_target_label.device) + losses.update(mpjpe=mpjpe_pose) + + return losses + + @property + def default_init_cfg(self): + init_cfg = [dict(type='Normal', layer=['Linear'], std=0.01, bias=0)] + return init_cfg diff --git a/mmpose/models/heads/regression_heads/trajectory_regression_head.py b/mmpose/models/heads/regression_heads/trajectory_regression_head.py index adfd7353d3..ca2958bc01 100644 --- a/mmpose/models/heads/regression_heads/trajectory_regression_head.py +++ b/mmpose/models/heads/regression_heads/trajectory_regression_head.py @@ -1,150 +1,150 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import Optional, Sequence, Tuple, Union - -import numpy as np -import torch -from torch import Tensor, nn - -from mmpose.evaluation.functional import keypoint_pck_accuracy -from mmpose.registry import KEYPOINT_CODECS, MODELS -from mmpose.utils.tensor_utils import to_numpy -from mmpose.utils.typing import (ConfigType, OptConfigType, OptSampleList, - Predictions) -from ..base_head import BaseHead - -OptIntSeq = Optional[Sequence[int]] - - -@MODELS.register_module() -class TrajectoryRegressionHead(BaseHead): - """Trajectory Regression head of `VideoPose3D`_ by Dario et al (CVPR'2019). - - Args: - in_channels (int | sequence[int]): Number of input channels - num_joints (int): Number of joints - loss (Config): Config for trajectory loss. Defaults to use - :class:`MPJPELoss` - decoder (Config, optional): The decoder config that controls decoding - keypoint coordinates from the network output. Defaults to ``None`` - init_cfg (Config, optional): Config to control the initialization. See - :attr:`default_init_cfg` for default settings - - .. _`VideoPose3D`: https://arxiv.org/abs/1811.11742 - """ - - _version = 2 - - def __init__(self, - in_channels: Union[int, Sequence[int]], - num_joints: int, - loss: ConfigType = dict( - type='MPJPELoss', use_target_weight=True), - decoder: OptConfigType = None, - init_cfg: OptConfigType = None): - - if init_cfg is None: - init_cfg = self.default_init_cfg - - super().__init__(init_cfg) - - self.in_channels = in_channels - self.num_joints = num_joints - self.loss_module = MODELS.build(loss) - if decoder is not None: - self.decoder = KEYPOINT_CODECS.build(decoder) - else: - self.decoder = None - - # Define fully-connected layers - self.conv = nn.Conv1d(in_channels, self.num_joints * 3, 1) - - def forward(self, feats: Tuple[Tensor]) -> Tensor: - """Forward the network. The input is multi scale feature maps and the - output is the coordinates. - - Args: - feats (Tuple[Tensor]): Multi scale feature maps. - - Returns: - Tensor: output coordinates(and sigmas[optional]). - """ - x = feats[-1] - - x = self.conv(x) - - return x.reshape(-1, self.num_joints, 3) - - def predict(self, - feats: Tuple[Tensor], - batch_data_samples: OptSampleList, - test_cfg: ConfigType = {}) -> Predictions: - """Predict results from outputs. - - Returns: - preds (sequence[InstanceData]): Prediction results. - Each contains the following fields: - - - keypoints: Predicted keypoints of shape (B, N, K, D). - - keypoint_scores: Scores of predicted keypoints of shape - (B, N, K). - """ - - batch_coords = self.forward(feats) # (B, K, D) - - # Restore global position with target_root - target_root = batch_data_samples[0].metainfo.get('target_root', None) - if target_root is not None: - target_root = torch.stack([ - torch.from_numpy(b.metainfo['target_root']) - for b in batch_data_samples - ]) - else: - target_root = torch.stack([ - torch.empty((0), dtype=torch.float32) - for _ in batch_data_samples[0].metainfo - ]) - - preds = self.decode((batch_coords, target_root)) - - return preds - - def loss(self, - inputs: Union[Tensor, Tuple[Tensor]], - batch_data_samples: OptSampleList, - train_cfg: ConfigType = {}) -> dict: - """Calculate losses from a batch of inputs and data samples.""" - - pred_outputs = self.forward(inputs) - - lifting_target_label = torch.cat([ - d.gt_instance_labels.lifting_target_label - for d in batch_data_samples - ]) - trajectory_weights = torch.cat([ - d.gt_instance_labels.trajectory_weights for d in batch_data_samples - ]) - - # calculate losses - losses = dict() - loss = self.loss_module(pred_outputs, lifting_target_label, - trajectory_weights.unsqueeze(-1)) - - losses.update(loss_traj=loss) - - # calculate accuracy - _, avg_acc, _ = keypoint_pck_accuracy( - pred=to_numpy(pred_outputs), - gt=to_numpy(lifting_target_label), - mask=to_numpy(trajectory_weights) > 0, - thr=0.05, - norm_factor=np.ones((pred_outputs.size(0), 3), dtype=np.float32)) - - mpjpe_traj = torch.tensor(avg_acc, device=lifting_target_label.device) - losses.update(mpjpe_traj=mpjpe_traj) - - return losses - - @property - def default_init_cfg(self): - init_cfg = [dict(type='Normal', layer=['Linear'], std=0.01, bias=0)] - return init_cfg +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Sequence, Tuple, Union + +import numpy as np +import torch +from torch import Tensor, nn + +from mmpose.evaluation.functional import keypoint_pck_accuracy +from mmpose.registry import KEYPOINT_CODECS, MODELS +from mmpose.utils.tensor_utils import to_numpy +from mmpose.utils.typing import (ConfigType, OptConfigType, OptSampleList, + Predictions) +from ..base_head import BaseHead + +OptIntSeq = Optional[Sequence[int]] + + +@MODELS.register_module() +class TrajectoryRegressionHead(BaseHead): + """Trajectory Regression head of `VideoPose3D`_ by Dario et al (CVPR'2019). + + Args: + in_channels (int | sequence[int]): Number of input channels + num_joints (int): Number of joints + loss (Config): Config for trajectory loss. Defaults to use + :class:`MPJPELoss` + decoder (Config, optional): The decoder config that controls decoding + keypoint coordinates from the network output. Defaults to ``None`` + init_cfg (Config, optional): Config to control the initialization. See + :attr:`default_init_cfg` for default settings + + .. _`VideoPose3D`: https://arxiv.org/abs/1811.11742 + """ + + _version = 2 + + def __init__(self, + in_channels: Union[int, Sequence[int]], + num_joints: int, + loss: ConfigType = dict( + type='MPJPELoss', use_target_weight=True), + decoder: OptConfigType = None, + init_cfg: OptConfigType = None): + + if init_cfg is None: + init_cfg = self.default_init_cfg + + super().__init__(init_cfg) + + self.in_channels = in_channels + self.num_joints = num_joints + self.loss_module = MODELS.build(loss) + if decoder is not None: + self.decoder = KEYPOINT_CODECS.build(decoder) + else: + self.decoder = None + + # Define fully-connected layers + self.conv = nn.Conv1d(in_channels, self.num_joints * 3, 1) + + def forward(self, feats: Tuple[Tensor]) -> Tensor: + """Forward the network. The input is multi scale feature maps and the + output is the coordinates. + + Args: + feats (Tuple[Tensor]): Multi scale feature maps. + + Returns: + Tensor: output coordinates(and sigmas[optional]). + """ + x = feats[-1] + + x = self.conv(x) + + return x.reshape(-1, self.num_joints, 3) + + def predict(self, + feats: Tuple[Tensor], + batch_data_samples: OptSampleList, + test_cfg: ConfigType = {}) -> Predictions: + """Predict results from outputs. + + Returns: + preds (sequence[InstanceData]): Prediction results. + Each contains the following fields: + + - keypoints: Predicted keypoints of shape (B, N, K, D). + - keypoint_scores: Scores of predicted keypoints of shape + (B, N, K). + """ + + batch_coords = self.forward(feats) # (B, K, D) + + # Restore global position with target_root + target_root = batch_data_samples[0].metainfo.get('target_root', None) + if target_root is not None: + target_root = torch.stack([ + torch.from_numpy(b.metainfo['target_root']) + for b in batch_data_samples + ]) + else: + target_root = torch.stack([ + torch.empty((0), dtype=torch.float32) + for _ in batch_data_samples[0].metainfo + ]) + + preds = self.decode((batch_coords, target_root)) + + return preds + + def loss(self, + inputs: Union[Tensor, Tuple[Tensor]], + batch_data_samples: OptSampleList, + train_cfg: ConfigType = {}) -> dict: + """Calculate losses from a batch of inputs and data samples.""" + + pred_outputs = self.forward(inputs) + + lifting_target_label = torch.cat([ + d.gt_instance_labels.lifting_target_label + for d in batch_data_samples + ]) + trajectory_weights = torch.cat([ + d.gt_instance_labels.trajectory_weights for d in batch_data_samples + ]) + + # calculate losses + losses = dict() + loss = self.loss_module(pred_outputs, lifting_target_label, + trajectory_weights.unsqueeze(-1)) + + losses.update(loss_traj=loss) + + # calculate accuracy + _, avg_acc, _ = keypoint_pck_accuracy( + pred=to_numpy(pred_outputs), + gt=to_numpy(lifting_target_label), + mask=to_numpy(trajectory_weights) > 0, + thr=0.05, + norm_factor=np.ones((pred_outputs.size(0), 3), dtype=np.float32)) + + mpjpe_traj = torch.tensor(avg_acc, device=lifting_target_label.device) + losses.update(mpjpe_traj=mpjpe_traj) + + return losses + + @property + def default_init_cfg(self): + init_cfg = [dict(type='Normal', layer=['Linear'], std=0.01, bias=0)] + return init_cfg diff --git a/mmpose/models/losses/__init__.py b/mmpose/models/losses/__init__.py index f21071e156..db989e969e 100644 --- a/mmpose/models/losses/__init__.py +++ b/mmpose/models/losses/__init__.py @@ -1,17 +1,17 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .ae_loss import AssociativeEmbeddingLoss -from .classification_loss import BCELoss, JSDiscretLoss, KLDiscretLoss -from .heatmap_loss import (AdaptiveWingLoss, KeypointMSELoss, - KeypointOHKMMSELoss) -from .loss_wrappers import CombinedLoss, MultipleLossWrapper -from .regression_loss import (BoneLoss, L1Loss, MPJPELoss, MSELoss, RLELoss, - SemiSupervisionLoss, SmoothL1Loss, - SoftWeightSmoothL1Loss, SoftWingLoss, WingLoss) - -__all__ = [ - 'KeypointMSELoss', 'KeypointOHKMMSELoss', 'SmoothL1Loss', 'WingLoss', - 'MPJPELoss', 'MSELoss', 'L1Loss', 'BCELoss', 'BoneLoss', - 'SemiSupervisionLoss', 'SoftWingLoss', 'AdaptiveWingLoss', 'RLELoss', - 'KLDiscretLoss', 'MultipleLossWrapper', 'JSDiscretLoss', 'CombinedLoss', - 'AssociativeEmbeddingLoss', 'SoftWeightSmoothL1Loss' -] +# Copyright (c) OpenMMLab. All rights reserved. +from .ae_loss import AssociativeEmbeddingLoss +from .classification_loss import BCELoss, JSDiscretLoss, KLDiscretLoss +from .heatmap_loss import (AdaptiveWingLoss, KeypointMSELoss, + KeypointOHKMMSELoss) +from .loss_wrappers import CombinedLoss, MultipleLossWrapper +from .regression_loss import (BoneLoss, L1Loss, MPJPELoss, MSELoss, RLELoss, + SemiSupervisionLoss, SmoothL1Loss, + SoftWeightSmoothL1Loss, SoftWingLoss, WingLoss) + +__all__ = [ + 'KeypointMSELoss', 'KeypointOHKMMSELoss', 'SmoothL1Loss', 'WingLoss', + 'MPJPELoss', 'MSELoss', 'L1Loss', 'BCELoss', 'BoneLoss', + 'SemiSupervisionLoss', 'SoftWingLoss', 'AdaptiveWingLoss', 'RLELoss', + 'KLDiscretLoss', 'MultipleLossWrapper', 'JSDiscretLoss', 'CombinedLoss', + 'AssociativeEmbeddingLoss', 'SoftWeightSmoothL1Loss' +] diff --git a/mmpose/models/losses/ae_loss.py b/mmpose/models/losses/ae_loss.py index 1f1e08181b..49ff745f58 100644 --- a/mmpose/models/losses/ae_loss.py +++ b/mmpose/models/losses/ae_loss.py @@ -1,123 +1,123 @@ -# Copyright (c) OpenMMLab. All rights reserved. - -from typing import List, Union - -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch import Tensor - -from mmpose.registry import MODELS - - -@MODELS.register_module() -class AssociativeEmbeddingLoss(nn.Module): - """Associative Embedding loss. - - Details can be found in - `Associative Embedding `_ - - Note: - - - batch size: B - - instance number: N - - keypoint number: K - - keypoint dimension: D - - embedding tag dimension: L - - heatmap size: [W, H] - - Args: - loss_weight (float): Weight of the loss. Defaults to 1.0 - push_loss_factor (float): A factor that controls the weight between - the push loss and the pull loss. Defaults to 0.5 - """ - - def __init__(self, - loss_weight: float = 1.0, - push_loss_factor: float = 0.5) -> None: - super().__init__() - self.loss_weight = loss_weight - self.push_loss_factor = push_loss_factor - - def _ae_loss_per_image(self, tags: Tensor, keypoint_indices: Tensor): - """Compute associative embedding loss for one image. - - Args: - tags (Tensor): Tagging heatmaps in shape (K*L, H, W) - keypoint_indices (Tensor): Ground-truth keypint position indices - in shape (N, K, 2) - """ - K = keypoint_indices.shape[1] - C, H, W = tags.shape - L = C // K - - tags = tags.view(L, K, H * W) - instance_tags = [] - instance_kpt_tags = [] - - for keypoint_indices_n in keypoint_indices: - _kpt_tags = [] - for k in range(K): - if keypoint_indices_n[k, 1]: - _kpt_tags.append(tags[:, k, keypoint_indices_n[k, 0]]) - - if _kpt_tags: - kpt_tags = torch.stack(_kpt_tags) - instance_kpt_tags.append(kpt_tags) - instance_tags.append(kpt_tags.mean(dim=0)) - - N = len(instance_kpt_tags) # number of instances with valid keypoints - - if N == 0: - pull_loss = tags.new_zeros(size=(), requires_grad=True) - push_loss = tags.new_zeros(size=(), requires_grad=True) - else: - pull_loss = sum( - F.mse_loss(_kpt_tags, _tag.expand_as(_kpt_tags)) - for (_kpt_tags, _tag) in zip(instance_kpt_tags, instance_tags)) - - if N == 1: - push_loss = tags.new_zeros(size=(), requires_grad=True) - else: - tag_mat = torch.stack(instance_tags) # (N, L) - diff = tag_mat[None] - tag_mat[:, None] # (N, N, L) - push_loss = torch.sum(torch.exp(-diff.pow(2))) - - # normalization - eps = 1e-6 - pull_loss = pull_loss / (N + eps) - push_loss = push_loss / ((N - 1) * N + eps) - - return pull_loss, push_loss - - def forward(self, tags: Tensor, keypoint_indices: Union[List[Tensor], - Tensor]): - """Compute associative embedding loss on a batch of data. - - Args: - tags (Tensor): Tagging heatmaps in shape (B, L*K, H, W) - keypoint_indices (Tensor|List[Tensor]): Ground-truth keypint - position indices represented by a Tensor in shape - (B, N, K, 2), or a list of B Tensors in shape (N_i, K, 2) - Each keypoint's index is represented as [i, v], where i is the - position index in the heatmap (:math:`i=y*w+x`) and v is the - visibility - - Returns: - tuple: - - pull_loss (Tensor) - - push_loss (Tensor) - """ - - assert tags.shape[0] == len(keypoint_indices) - - pull_loss = 0. - push_loss = 0. - - for i in range(tags.shape[0]): - _pull, _push = self._ae_loss_per_image(tags[i], - keypoint_indices[i]) - pull_loss += _pull * self.loss_weight - push_loss += _push * self.loss_weight * self.push_loss_factor - - return pull_loss, push_loss +# Copyright (c) OpenMMLab. All rights reserved. + +from typing import List, Union + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch import Tensor + +from mmpose.registry import MODELS + + +@MODELS.register_module() +class AssociativeEmbeddingLoss(nn.Module): + """Associative Embedding loss. + + Details can be found in + `Associative Embedding `_ + + Note: + + - batch size: B + - instance number: N + - keypoint number: K + - keypoint dimension: D + - embedding tag dimension: L + - heatmap size: [W, H] + + Args: + loss_weight (float): Weight of the loss. Defaults to 1.0 + push_loss_factor (float): A factor that controls the weight between + the push loss and the pull loss. Defaults to 0.5 + """ + + def __init__(self, + loss_weight: float = 1.0, + push_loss_factor: float = 0.5) -> None: + super().__init__() + self.loss_weight = loss_weight + self.push_loss_factor = push_loss_factor + + def _ae_loss_per_image(self, tags: Tensor, keypoint_indices: Tensor): + """Compute associative embedding loss for one image. + + Args: + tags (Tensor): Tagging heatmaps in shape (K*L, H, W) + keypoint_indices (Tensor): Ground-truth keypint position indices + in shape (N, K, 2) + """ + K = keypoint_indices.shape[1] + C, H, W = tags.shape + L = C // K + + tags = tags.view(L, K, H * W) + instance_tags = [] + instance_kpt_tags = [] + + for keypoint_indices_n in keypoint_indices: + _kpt_tags = [] + for k in range(K): + if keypoint_indices_n[k, 1]: + _kpt_tags.append(tags[:, k, keypoint_indices_n[k, 0]]) + + if _kpt_tags: + kpt_tags = torch.stack(_kpt_tags) + instance_kpt_tags.append(kpt_tags) + instance_tags.append(kpt_tags.mean(dim=0)) + + N = len(instance_kpt_tags) # number of instances with valid keypoints + + if N == 0: + pull_loss = tags.new_zeros(size=(), requires_grad=True) + push_loss = tags.new_zeros(size=(), requires_grad=True) + else: + pull_loss = sum( + F.mse_loss(_kpt_tags, _tag.expand_as(_kpt_tags)) + for (_kpt_tags, _tag) in zip(instance_kpt_tags, instance_tags)) + + if N == 1: + push_loss = tags.new_zeros(size=(), requires_grad=True) + else: + tag_mat = torch.stack(instance_tags) # (N, L) + diff = tag_mat[None] - tag_mat[:, None] # (N, N, L) + push_loss = torch.sum(torch.exp(-diff.pow(2))) + + # normalization + eps = 1e-6 + pull_loss = pull_loss / (N + eps) + push_loss = push_loss / ((N - 1) * N + eps) + + return pull_loss, push_loss + + def forward(self, tags: Tensor, keypoint_indices: Union[List[Tensor], + Tensor]): + """Compute associative embedding loss on a batch of data. + + Args: + tags (Tensor): Tagging heatmaps in shape (B, L*K, H, W) + keypoint_indices (Tensor|List[Tensor]): Ground-truth keypint + position indices represented by a Tensor in shape + (B, N, K, 2), or a list of B Tensors in shape (N_i, K, 2) + Each keypoint's index is represented as [i, v], where i is the + position index in the heatmap (:math:`i=y*w+x`) and v is the + visibility + + Returns: + tuple: + - pull_loss (Tensor) + - push_loss (Tensor) + """ + + assert tags.shape[0] == len(keypoint_indices) + + pull_loss = 0. + push_loss = 0. + + for i in range(tags.shape[0]): + _pull, _push = self._ae_loss_per_image(tags[i], + keypoint_indices[i]) + pull_loss += _pull * self.loss_weight + push_loss += _push * self.loss_weight * self.push_loss_factor + + return pull_loss, push_loss diff --git a/mmpose/models/losses/classification_loss.py b/mmpose/models/losses/classification_loss.py index 4605acabd3..656ebf7379 100644 --- a/mmpose/models/losses/classification_loss.py +++ b/mmpose/models/losses/classification_loss.py @@ -1,218 +1,218 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -import torch.nn as nn -import torch.nn.functional as F - -from mmpose.registry import MODELS - - -@MODELS.register_module() -class BCELoss(nn.Module): - """Binary Cross Entropy loss. - - Args: - use_target_weight (bool): Option to use weighted loss. - Different joint types may have different target weights. - loss_weight (float): Weight of the loss. Default: 1.0. - with_logits (bool): Whether to use BCEWithLogitsLoss. Default: False. - """ - - def __init__(self, - use_target_weight=False, - loss_weight=1., - with_logits=False): - super().__init__() - self.criterion = F.binary_cross_entropy if not with_logits\ - else F.binary_cross_entropy_with_logits - self.use_target_weight = use_target_weight - self.loss_weight = loss_weight - - def forward(self, output, target, target_weight=None): - """Forward function. - - Note: - - batch_size: N - - num_labels: K - - Args: - output (torch.Tensor[N, K]): Output classification. - target (torch.Tensor[N, K]): Target classification. - target_weight (torch.Tensor[N, K] or torch.Tensor[N]): - Weights across different labels. - """ - - if self.use_target_weight: - assert target_weight is not None - loss = self.criterion(output, target, reduction='none') - if target_weight.dim() == 1: - target_weight = target_weight[:, None] - loss = (loss * target_weight).mean() - else: - loss = self.criterion(output, target) - - return loss * self.loss_weight - - -@MODELS.register_module() -class JSDiscretLoss(nn.Module): - """Discrete JS Divergence loss for DSNT with Gaussian Heatmap. - - Modified from `the official implementation - `_. - - Args: - use_target_weight (bool): Option to use weighted loss. - Different joint types may have different target weights. - size_average (bool): Option to average the loss by the batch_size. - """ - - def __init__( - self, - use_target_weight=True, - size_average: bool = True, - ): - super(JSDiscretLoss, self).__init__() - self.use_target_weight = use_target_weight - self.size_average = size_average - self.kl_loss = nn.KLDivLoss(reduction='none') - - def kl(self, p, q): - """Kullback-Leibler Divergence.""" - - eps = 1e-24 - kl_values = self.kl_loss((q + eps).log(), p) - return kl_values - - def js(self, pred_hm, gt_hm): - """Jensen-Shannon Divergence.""" - - m = 0.5 * (pred_hm + gt_hm) - js_values = 0.5 * (self.kl(pred_hm, m) + self.kl(gt_hm, m)) - return js_values - - def forward(self, pred_hm, gt_hm, target_weight=None): - """Forward function. - - Args: - pred_hm (torch.Tensor[N, K, H, W]): Predicted heatmaps. - gt_hm (torch.Tensor[N, K, H, W]): Target heatmaps. - target_weight (torch.Tensor[N, K] or torch.Tensor[N]): - Weights across different labels. - - Returns: - torch.Tensor: Loss value. - """ - - if self.use_target_weight: - assert target_weight is not None - assert pred_hm.ndim >= target_weight.ndim - - for i in range(pred_hm.ndim - target_weight.ndim): - target_weight = target_weight.unsqueeze(-1) - - loss = self.js(pred_hm * target_weight, gt_hm * target_weight) - else: - loss = self.js(pred_hm, gt_hm) - - if self.size_average: - loss /= len(gt_hm) - - return loss.sum() - - -@MODELS.register_module() -class KLDiscretLoss(nn.Module): - """Discrete KL Divergence loss for SimCC with Gaussian Label Smoothing. - Modified from `the official implementation. - - `_. - Args: - beta (float): Temperature factor of Softmax. - label_softmax (bool): Whether to use Softmax on labels. - use_target_weight (bool): Option to use weighted loss. - Different joint types may have different target weights. - """ - - def __init__(self, beta=1.0, label_softmax=False, use_target_weight=True): - super(KLDiscretLoss, self).__init__() - self.beta = beta - self.label_softmax = label_softmax - self.use_target_weight = use_target_weight - - self.log_softmax = nn.LogSoftmax(dim=1) - self.kl_loss = nn.KLDivLoss(reduction='none') - - def criterion(self, dec_outs, labels): - """Criterion function.""" - log_pt = self.log_softmax(dec_outs * self.beta) - if self.label_softmax: - labels = F.softmax(labels * self.beta, dim=1) - loss = torch.mean(self.kl_loss(log_pt, labels), dim=1) - return loss - - def forward(self, pred_simcc, gt_simcc, target_weight): - """Forward function. - - Args: - pred_simcc (Tuple[Tensor, Tensor]): Predicted SimCC vectors of - x-axis and y-axis. - gt_simcc (Tuple[Tensor, Tensor]): Target representations. - target_weight (torch.Tensor[N, K] or torch.Tensor[N]): - Weights across different labels. - """ - num_joints = pred_simcc[0].size(1) - loss = 0 - - if self.use_target_weight: - weight = target_weight.reshape(-1) - else: - weight = 1. - - for pred, target in zip(pred_simcc, gt_simcc): - pred = pred.reshape(-1, pred.size(-1)) - target = target.reshape(-1, target.size(-1)) - - loss += self.criterion(pred, target).mul(weight).sum() - - return loss / num_joints - - -@MODELS.register_module() -class InfoNCELoss(nn.Module): - """InfoNCE loss for training a discriminative representation space with a - contrastive manner. - - `Representation Learning with Contrastive Predictive Coding - arXiv: `_. - - Args: - temperature (float, optional): The temperature to use in the softmax - function. Higher temperatures lead to softer probability - distributions. Defaults to 1.0. - loss_weight (float, optional): The weight to apply to the loss. - Defaults to 1.0. - """ - - def __init__(self, temperature: float = 1.0, loss_weight=1.0) -> None: - super(InfoNCELoss, self).__init__() - assert temperature > 0, f'the argument `temperature` must be ' \ - f'positive, but got {temperature}' - self.temp = temperature - self.loss_weight = loss_weight - - def forward(self, features: torch.Tensor) -> torch.Tensor: - """Computes the InfoNCE loss. - - Args: - features (Tensor): A tensor containing the feature - representations of different samples. - - Returns: - Tensor: A tensor of shape (1,) containing the InfoNCE loss. - """ - n = features.size(0) - features_norm = F.normalize(features, dim=1) - logits = features_norm.mm(features_norm.t()) / self.temp - targets = torch.arange(n, dtype=torch.long, device=features.device) - loss = F.cross_entropy(logits, targets, reduction='sum') - return loss * self.loss_weight +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.nn.functional as F + +from mmpose.registry import MODELS + + +@MODELS.register_module() +class BCELoss(nn.Module): + """Binary Cross Entropy loss. + + Args: + use_target_weight (bool): Option to use weighted loss. + Different joint types may have different target weights. + loss_weight (float): Weight of the loss. Default: 1.0. + with_logits (bool): Whether to use BCEWithLogitsLoss. Default: False. + """ + + def __init__(self, + use_target_weight=False, + loss_weight=1., + with_logits=False): + super().__init__() + self.criterion = F.binary_cross_entropy if not with_logits\ + else F.binary_cross_entropy_with_logits + self.use_target_weight = use_target_weight + self.loss_weight = loss_weight + + def forward(self, output, target, target_weight=None): + """Forward function. + + Note: + - batch_size: N + - num_labels: K + + Args: + output (torch.Tensor[N, K]): Output classification. + target (torch.Tensor[N, K]): Target classification. + target_weight (torch.Tensor[N, K] or torch.Tensor[N]): + Weights across different labels. + """ + + if self.use_target_weight: + assert target_weight is not None + loss = self.criterion(output, target, reduction='none') + if target_weight.dim() == 1: + target_weight = target_weight[:, None] + loss = (loss * target_weight).mean() + else: + loss = self.criterion(output, target) + + return loss * self.loss_weight + + +@MODELS.register_module() +class JSDiscretLoss(nn.Module): + """Discrete JS Divergence loss for DSNT with Gaussian Heatmap. + + Modified from `the official implementation + `_. + + Args: + use_target_weight (bool): Option to use weighted loss. + Different joint types may have different target weights. + size_average (bool): Option to average the loss by the batch_size. + """ + + def __init__( + self, + use_target_weight=True, + size_average: bool = True, + ): + super(JSDiscretLoss, self).__init__() + self.use_target_weight = use_target_weight + self.size_average = size_average + self.kl_loss = nn.KLDivLoss(reduction='none') + + def kl(self, p, q): + """Kullback-Leibler Divergence.""" + + eps = 1e-24 + kl_values = self.kl_loss((q + eps).log(), p) + return kl_values + + def js(self, pred_hm, gt_hm): + """Jensen-Shannon Divergence.""" + + m = 0.5 * (pred_hm + gt_hm) + js_values = 0.5 * (self.kl(pred_hm, m) + self.kl(gt_hm, m)) + return js_values + + def forward(self, pred_hm, gt_hm, target_weight=None): + """Forward function. + + Args: + pred_hm (torch.Tensor[N, K, H, W]): Predicted heatmaps. + gt_hm (torch.Tensor[N, K, H, W]): Target heatmaps. + target_weight (torch.Tensor[N, K] or torch.Tensor[N]): + Weights across different labels. + + Returns: + torch.Tensor: Loss value. + """ + + if self.use_target_weight: + assert target_weight is not None + assert pred_hm.ndim >= target_weight.ndim + + for i in range(pred_hm.ndim - target_weight.ndim): + target_weight = target_weight.unsqueeze(-1) + + loss = self.js(pred_hm * target_weight, gt_hm * target_weight) + else: + loss = self.js(pred_hm, gt_hm) + + if self.size_average: + loss /= len(gt_hm) + + return loss.sum() + + +@MODELS.register_module() +class KLDiscretLoss(nn.Module): + """Discrete KL Divergence loss for SimCC with Gaussian Label Smoothing. + Modified from `the official implementation. + + `_. + Args: + beta (float): Temperature factor of Softmax. + label_softmax (bool): Whether to use Softmax on labels. + use_target_weight (bool): Option to use weighted loss. + Different joint types may have different target weights. + """ + + def __init__(self, beta=1.0, label_softmax=False, use_target_weight=True): + super(KLDiscretLoss, self).__init__() + self.beta = beta + self.label_softmax = label_softmax + self.use_target_weight = use_target_weight + + self.log_softmax = nn.LogSoftmax(dim=1) + self.kl_loss = nn.KLDivLoss(reduction='none') + + def criterion(self, dec_outs, labels): + """Criterion function.""" + log_pt = self.log_softmax(dec_outs * self.beta) + if self.label_softmax: + labels = F.softmax(labels * self.beta, dim=1) + loss = torch.mean(self.kl_loss(log_pt, labels), dim=1) + return loss + + def forward(self, pred_simcc, gt_simcc, target_weight): + """Forward function. + + Args: + pred_simcc (Tuple[Tensor, Tensor]): Predicted SimCC vectors of + x-axis and y-axis. + gt_simcc (Tuple[Tensor, Tensor]): Target representations. + target_weight (torch.Tensor[N, K] or torch.Tensor[N]): + Weights across different labels. + """ + num_joints = pred_simcc[0].size(1) + loss = 0 + + if self.use_target_weight: + weight = target_weight.reshape(-1) + else: + weight = 1. + + for pred, target in zip(pred_simcc, gt_simcc): + pred = pred.reshape(-1, pred.size(-1)) + target = target.reshape(-1, target.size(-1)) + + loss += self.criterion(pred, target).mul(weight).sum() + + return loss / num_joints + + +@MODELS.register_module() +class InfoNCELoss(nn.Module): + """InfoNCE loss for training a discriminative representation space with a + contrastive manner. + + `Representation Learning with Contrastive Predictive Coding + arXiv: `_. + + Args: + temperature (float, optional): The temperature to use in the softmax + function. Higher temperatures lead to softer probability + distributions. Defaults to 1.0. + loss_weight (float, optional): The weight to apply to the loss. + Defaults to 1.0. + """ + + def __init__(self, temperature: float = 1.0, loss_weight=1.0) -> None: + super(InfoNCELoss, self).__init__() + assert temperature > 0, f'the argument `temperature` must be ' \ + f'positive, but got {temperature}' + self.temp = temperature + self.loss_weight = loss_weight + + def forward(self, features: torch.Tensor) -> torch.Tensor: + """Computes the InfoNCE loss. + + Args: + features (Tensor): A tensor containing the feature + representations of different samples. + + Returns: + Tensor: A tensor of shape (1,) containing the InfoNCE loss. + """ + n = features.size(0) + features_norm = F.normalize(features, dim=1) + logits = features_norm.mm(features_norm.t()) / self.temp + targets = torch.arange(n, dtype=torch.long, device=features.device) + loss = F.cross_entropy(logits, targets, reduction='sum') + return loss * self.loss_weight diff --git a/mmpose/models/losses/heatmap_loss.py b/mmpose/models/losses/heatmap_loss.py index ffe5cd1e80..8a73579007 100644 --- a/mmpose/models/losses/heatmap_loss.py +++ b/mmpose/models/losses/heatmap_loss.py @@ -1,455 +1,455 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import Optional - -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch import Tensor - -from mmpose.registry import MODELS - - -@MODELS.register_module() -class KeypointMSELoss(nn.Module): - """MSE loss for heatmaps. - - Args: - use_target_weight (bool): Option to use weighted MSE loss. - Different joint types may have different target weights. - Defaults to ``False`` - skip_empty_channel (bool): If ``True``, heatmap channels with no - non-zero value (which means no visible ground-truth keypoint - in the image) will not be used to calculate the loss. Defaults to - ``False`` - loss_weight (float): Weight of the loss. Defaults to 1.0 - """ - - def __init__(self, - use_target_weight: bool = False, - skip_empty_channel: bool = False, - loss_weight: float = 1.): - super().__init__() - self.use_target_weight = use_target_weight - self.skip_empty_channel = skip_empty_channel - self.loss_weight = loss_weight - - def forward(self, - output: Tensor, - target: Tensor, - target_weights: Optional[Tensor] = None, - mask: Optional[Tensor] = None) -> Tensor: - """Forward function of loss. - - Note: - - batch_size: B - - num_keypoints: K - - heatmaps height: H - - heatmaps weight: W - - Args: - output (Tensor): The output heatmaps with shape [B, K, H, W] - target (Tensor): The target heatmaps with shape [B, K, H, W] - target_weights (Tensor, optional): The target weights of differet - keypoints, with shape [B, K] (keypoint-wise) or - [B, K, H, W] (pixel-wise). - mask (Tensor, optional): The masks of valid heatmap pixels in - shape [B, K, H, W] or [B, 1, H, W]. If ``None``, no mask will - be applied. Defaults to ``None`` - - Returns: - Tensor: The calculated loss. - """ - - _mask = self._get_mask(target, target_weights, mask) - if _mask is None: - loss = F.mse_loss(output, target) - else: - _loss = F.mse_loss(output, target, reduction='none') - loss = (_loss * _mask).mean() - - return loss * self.loss_weight - - def _get_mask(self, target: Tensor, target_weights: Optional[Tensor], - mask: Optional[Tensor]) -> Optional[Tensor]: - """Generate the heatmap mask w.r.t. the given mask, target weight and - `skip_empty_channel` setting. - - Returns: - Tensor: The mask in shape (B, K, *) or ``None`` if no mask is - needed. - """ - # Given spatial mask - if mask is not None: - # check mask has matching type with target - assert (mask.ndim == target.ndim and all( - d_m == d_t or d_m == 1 - for d_m, d_t in zip(mask.shape, target.shape))), ( - f'mask and target have mismatched shapes {mask.shape} v.s.' - f'{target.shape}') - - # Mask by target weights (keypoint-wise mask) - if target_weights is not None: - # check target weight has matching shape with target - assert (target_weights.ndim in (2, 4) and target_weights.shape - == target.shape[:target_weights.ndim]), ( - 'target_weights and target have mismatched shapes ' - f'{target_weights.shape} v.s. {target.shape}') - - ndim_pad = target.ndim - target_weights.ndim - _mask = target_weights.view(target_weights.shape + - (1, ) * ndim_pad) - - if mask is None: - mask = _mask - else: - mask = mask * _mask - - # Mask by ``skip_empty_channel`` - if self.skip_empty_channel: - _mask = (target != 0).flatten(2).any(dim=2) - ndim_pad = target.ndim - _mask.ndim - _mask = _mask.view(_mask.shape + (1, ) * ndim_pad) - - if mask is None: - mask = _mask - else: - mask = mask * _mask - - return mask - - -@MODELS.register_module() -class CombinedTargetMSELoss(nn.Module): - """MSE loss for combined target. - - CombinedTarget: The combination of classification target - (response map) and regression target (offset map). - Paper ref: Huang et al. The Devil is in the Details: Delving into - Unbiased Data Processing for Human Pose Estimation (CVPR 2020). - - Args: - use_target_weight (bool): Option to use weighted MSE loss. - Different joint types may have different target weights. - Defaults to ``False`` - loss_weight (float): Weight of the loss. Defaults to 1.0 - """ - - def __init__(self, - use_target_weight: bool = False, - loss_weight: float = 1.): - super().__init__() - self.criterion = nn.MSELoss(reduction='mean') - self.use_target_weight = use_target_weight - self.loss_weight = loss_weight - - def forward(self, output: Tensor, target: Tensor, - target_weights: Tensor) -> Tensor: - """Forward function of loss. - - Note: - - batch_size: B - - num_channels: C - - heatmaps height: H - - heatmaps weight: W - - num_keypoints: K - Here, C = 3 * K - - Args: - output (Tensor): The output feature maps with shape [B, C, H, W]. - target (Tensor): The target feature maps with shape [B, C, H, W]. - target_weights (Tensor): The target weights of differet keypoints, - with shape [B, K]. - - Returns: - Tensor: The calculated loss. - """ - batch_size = output.size(0) - num_channels = output.size(1) - heatmaps_pred = output.reshape( - (batch_size, num_channels, -1)).split(1, 1) - heatmaps_gt = target.reshape( - (batch_size, num_channels, -1)).split(1, 1) - loss = 0. - num_joints = num_channels // 3 - for idx in range(num_joints): - heatmap_pred = heatmaps_pred[idx * 3].squeeze() - heatmap_gt = heatmaps_gt[idx * 3].squeeze() - offset_x_pred = heatmaps_pred[idx * 3 + 1].squeeze() - offset_x_gt = heatmaps_gt[idx * 3 + 1].squeeze() - offset_y_pred = heatmaps_pred[idx * 3 + 2].squeeze() - offset_y_gt = heatmaps_gt[idx * 3 + 2].squeeze() - if self.use_target_weight: - target_weight = target_weights[:, idx, None] - heatmap_pred = heatmap_pred * target_weight - heatmap_gt = heatmap_gt * target_weight - # classification loss - loss += 0.5 * self.criterion(heatmap_pred, heatmap_gt) - # regression loss - loss += 0.5 * self.criterion(heatmap_gt * offset_x_pred, - heatmap_gt * offset_x_gt) - loss += 0.5 * self.criterion(heatmap_gt * offset_y_pred, - heatmap_gt * offset_y_gt) - return loss / num_joints * self.loss_weight - - -@MODELS.register_module() -class KeypointOHKMMSELoss(nn.Module): - """MSE loss with online hard keypoint mining. - - Args: - use_target_weight (bool): Option to use weighted MSE loss. - Different joint types may have different target weights. - Defaults to ``False`` - topk (int): Only top k joint losses are kept. Defaults to 8 - loss_weight (float): Weight of the loss. Defaults to 1.0 - """ - - def __init__(self, - use_target_weight: bool = False, - topk: int = 8, - loss_weight: float = 1.): - super().__init__() - assert topk > 0 - self.criterion = nn.MSELoss(reduction='none') - self.use_target_weight = use_target_weight - self.topk = topk - self.loss_weight = loss_weight - - def _ohkm(self, losses: Tensor) -> Tensor: - """Online hard keypoint mining. - - Note: - - batch_size: B - - num_keypoints: K - - Args: - loss (Tensor): The losses with shape [B, K] - - Returns: - Tensor: The calculated loss. - """ - ohkm_loss = 0. - B = losses.shape[0] - for i in range(B): - sub_loss = losses[i] - _, topk_idx = torch.topk( - sub_loss, k=self.topk, dim=0, sorted=False) - tmp_loss = torch.gather(sub_loss, 0, topk_idx) - ohkm_loss += torch.sum(tmp_loss) / self.topk - ohkm_loss /= B - return ohkm_loss - - def forward(self, output: Tensor, target: Tensor, - target_weights: Tensor) -> Tensor: - """Forward function of loss. - - Note: - - batch_size: B - - num_keypoints: K - - heatmaps height: H - - heatmaps weight: W - - Args: - output (Tensor): The output heatmaps with shape [B, K, H, W]. - target (Tensor): The target heatmaps with shape [B, K, H, W]. - target_weights (Tensor): The target weights of differet keypoints, - with shape [B, K]. - - Returns: - Tensor: The calculated loss. - """ - num_keypoints = output.size(1) - if num_keypoints < self.topk: - raise ValueError(f'topk ({self.topk}) should not be ' - f'larger than num_keypoints ({num_keypoints}).') - - losses = [] - for idx in range(num_keypoints): - if self.use_target_weight: - target_weight = target_weights[:, idx, None, None] - losses.append( - self.criterion(output[:, idx] * target_weight, - target[:, idx] * target_weight)) - else: - losses.append(self.criterion(output[:, idx], target[:, idx])) - - losses = [loss.mean(dim=(1, 2)).unsqueeze(dim=1) for loss in losses] - losses = torch.cat(losses, dim=1) - - return self._ohkm(losses) * self.loss_weight - - -@MODELS.register_module() -class AdaptiveWingLoss(nn.Module): - """Adaptive wing loss. paper ref: 'Adaptive Wing Loss for Robust Face - Alignment via Heatmap Regression' Wang et al. ICCV'2019. - - Args: - alpha (float), omega (float), epsilon (float), theta (float) - are hyper-parameters. - use_target_weight (bool): Option to use weighted MSE loss. - Different joint types may have different target weights. - loss_weight (float): Weight of the loss. Default: 1.0. - """ - - def __init__(self, - alpha=2.1, - omega=14, - epsilon=1, - theta=0.5, - use_target_weight=False, - loss_weight=1.): - super().__init__() - self.alpha = float(alpha) - self.omega = float(omega) - self.epsilon = float(epsilon) - self.theta = float(theta) - self.use_target_weight = use_target_weight - self.loss_weight = loss_weight - - def criterion(self, pred, target): - """Criterion of wingloss. - - Note: - batch_size: N - num_keypoints: K - - Args: - pred (torch.Tensor[NxKxHxW]): Predicted heatmaps. - target (torch.Tensor[NxKxHxW]): Target heatmaps. - """ - H, W = pred.shape[2:4] - delta = (target - pred).abs() - - A = self.omega * ( - 1 / (1 + torch.pow(self.theta / self.epsilon, self.alpha - target)) - ) * (self.alpha - target) * (torch.pow( - self.theta / self.epsilon, - self.alpha - target - 1)) * (1 / self.epsilon) - C = self.theta * A - self.omega * torch.log( - 1 + torch.pow(self.theta / self.epsilon, self.alpha - target)) - - losses = torch.where( - delta < self.theta, - self.omega * - torch.log(1 + - torch.pow(delta / self.epsilon, self.alpha - target)), - A * delta - C) - - return torch.mean(losses) - - def forward(self, - output: Tensor, - target: Tensor, - target_weights: Optional[Tensor] = None): - """Forward function. - - Note: - batch_size: N - num_keypoints: K - - Args: - output (torch.Tensor[N, K, H, W]): Output heatmaps. - target (torch.Tensor[N, K, H, W]): Target heatmaps. - target_weight (torch.Tensor[N, K]): - Weights across different joint types. - """ - if self.use_target_weight: - assert (target_weights.ndim in (2, 4) and target_weights.shape - == target.shape[:target_weights.ndim]), ( - 'target_weights and target have mismatched shapes ' - f'{target_weights.shape} v.s. {target.shape}') - - ndim_pad = target.ndim - target_weights.ndim - target_weights = target_weights.view(target_weights.shape + - (1, ) * ndim_pad) - loss = self.criterion(output * target_weights, - target * target_weights) - else: - loss = self.criterion(output, target) - - return loss * self.loss_weight - - -@MODELS.register_module() -class FocalHeatmapLoss(KeypointMSELoss): - """A class for calculating the modified focal loss for heatmap prediction. - - This loss function is exactly the same as the one used in CornerNet. It - runs faster and costs a little bit more memory. - - `CornerNet: Detecting Objects as Paired Keypoints - arXiv: `_. - - Arguments: - alpha (int): The alpha parameter in the focal loss equation. - beta (int): The beta parameter in the focal loss equation. - use_target_weight (bool): Option to use weighted MSE loss. - Different joint types may have different target weights. - Defaults to ``False`` - skip_empty_channel (bool): If ``True``, heatmap channels with no - non-zero value (which means no visible ground-truth keypoint - in the image) will not be used to calculate the loss. Defaults to - ``False`` - loss_weight (float): Weight of the loss. Defaults to 1.0 - """ - - def __init__(self, - alpha: int = 2, - beta: int = 4, - use_target_weight: bool = False, - skip_empty_channel: bool = False, - loss_weight: float = 1.0): - super(FocalHeatmapLoss, self).__init__(use_target_weight, - skip_empty_channel, loss_weight) - self.alpha = alpha - self.beta = beta - - def forward(self, - output: Tensor, - target: Tensor, - target_weights: Optional[Tensor] = None, - mask: Optional[Tensor] = None) -> Tensor: - """Calculate the modified focal loss for heatmap prediction. - - Note: - - batch_size: B - - num_keypoints: K - - heatmaps height: H - - heatmaps weight: W - - Args: - output (Tensor): The output heatmaps with shape [B, K, H, W] - target (Tensor): The target heatmaps with shape [B, K, H, W] - target_weights (Tensor, optional): The target weights of differet - keypoints, with shape [B, K] (keypoint-wise) or - [B, K, H, W] (pixel-wise). - mask (Tensor, optional): The masks of valid heatmap pixels in - shape [B, K, H, W] or [B, 1, H, W]. If ``None``, no mask will - be applied. Defaults to ``None`` - - Returns: - Tensor: The calculated loss. - """ - _mask = self._get_mask(target, target_weights, mask) - - pos_inds = target.eq(1).float() - neg_inds = target.lt(1).float() - - if _mask is not None: - pos_inds = pos_inds * _mask - neg_inds = neg_inds * _mask - - neg_weights = torch.pow(1 - target, self.beta) - - pos_loss = torch.log(output) * torch.pow(1 - output, - self.alpha) * pos_inds - neg_loss = torch.log(1 - output) * torch.pow( - output, self.alpha) * neg_weights * neg_inds - - num_pos = pos_inds.float().sum() - if num_pos == 0: - loss = -neg_loss.sum() - else: - loss = -(pos_loss.sum() + neg_loss.sum()) / num_pos - return loss * self.loss_weight +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch import Tensor + +from mmpose.registry import MODELS + + +@MODELS.register_module() +class KeypointMSELoss(nn.Module): + """MSE loss for heatmaps. + + Args: + use_target_weight (bool): Option to use weighted MSE loss. + Different joint types may have different target weights. + Defaults to ``False`` + skip_empty_channel (bool): If ``True``, heatmap channels with no + non-zero value (which means no visible ground-truth keypoint + in the image) will not be used to calculate the loss. Defaults to + ``False`` + loss_weight (float): Weight of the loss. Defaults to 1.0 + """ + + def __init__(self, + use_target_weight: bool = False, + skip_empty_channel: bool = False, + loss_weight: float = 1.): + super().__init__() + self.use_target_weight = use_target_weight + self.skip_empty_channel = skip_empty_channel + self.loss_weight = loss_weight + + def forward(self, + output: Tensor, + target: Tensor, + target_weights: Optional[Tensor] = None, + mask: Optional[Tensor] = None) -> Tensor: + """Forward function of loss. + + Note: + - batch_size: B + - num_keypoints: K + - heatmaps height: H + - heatmaps weight: W + + Args: + output (Tensor): The output heatmaps with shape [B, K, H, W] + target (Tensor): The target heatmaps with shape [B, K, H, W] + target_weights (Tensor, optional): The target weights of differet + keypoints, with shape [B, K] (keypoint-wise) or + [B, K, H, W] (pixel-wise). + mask (Tensor, optional): The masks of valid heatmap pixels in + shape [B, K, H, W] or [B, 1, H, W]. If ``None``, no mask will + be applied. Defaults to ``None`` + + Returns: + Tensor: The calculated loss. + """ + + _mask = self._get_mask(target, target_weights, mask) + if _mask is None: + loss = F.mse_loss(output, target) + else: + _loss = F.mse_loss(output, target, reduction='none') + loss = (_loss * _mask).mean() + + return loss * self.loss_weight + + def _get_mask(self, target: Tensor, target_weights: Optional[Tensor], + mask: Optional[Tensor]) -> Optional[Tensor]: + """Generate the heatmap mask w.r.t. the given mask, target weight and + `skip_empty_channel` setting. + + Returns: + Tensor: The mask in shape (B, K, *) or ``None`` if no mask is + needed. + """ + # Given spatial mask + if mask is not None: + # check mask has matching type with target + assert (mask.ndim == target.ndim and all( + d_m == d_t or d_m == 1 + for d_m, d_t in zip(mask.shape, target.shape))), ( + f'mask and target have mismatched shapes {mask.shape} v.s.' + f'{target.shape}') + + # Mask by target weights (keypoint-wise mask) + if target_weights is not None: + # check target weight has matching shape with target + assert (target_weights.ndim in (2, 4) and target_weights.shape + == target.shape[:target_weights.ndim]), ( + 'target_weights and target have mismatched shapes ' + f'{target_weights.shape} v.s. {target.shape}') + + ndim_pad = target.ndim - target_weights.ndim + _mask = target_weights.view(target_weights.shape + + (1, ) * ndim_pad) + + if mask is None: + mask = _mask + else: + mask = mask * _mask + + # Mask by ``skip_empty_channel`` + if self.skip_empty_channel: + _mask = (target != 0).flatten(2).any(dim=2) + ndim_pad = target.ndim - _mask.ndim + _mask = _mask.view(_mask.shape + (1, ) * ndim_pad) + + if mask is None: + mask = _mask + else: + mask = mask * _mask + + return mask + + +@MODELS.register_module() +class CombinedTargetMSELoss(nn.Module): + """MSE loss for combined target. + + CombinedTarget: The combination of classification target + (response map) and regression target (offset map). + Paper ref: Huang et al. The Devil is in the Details: Delving into + Unbiased Data Processing for Human Pose Estimation (CVPR 2020). + + Args: + use_target_weight (bool): Option to use weighted MSE loss. + Different joint types may have different target weights. + Defaults to ``False`` + loss_weight (float): Weight of the loss. Defaults to 1.0 + """ + + def __init__(self, + use_target_weight: bool = False, + loss_weight: float = 1.): + super().__init__() + self.criterion = nn.MSELoss(reduction='mean') + self.use_target_weight = use_target_weight + self.loss_weight = loss_weight + + def forward(self, output: Tensor, target: Tensor, + target_weights: Tensor) -> Tensor: + """Forward function of loss. + + Note: + - batch_size: B + - num_channels: C + - heatmaps height: H + - heatmaps weight: W + - num_keypoints: K + Here, C = 3 * K + + Args: + output (Tensor): The output feature maps with shape [B, C, H, W]. + target (Tensor): The target feature maps with shape [B, C, H, W]. + target_weights (Tensor): The target weights of differet keypoints, + with shape [B, K]. + + Returns: + Tensor: The calculated loss. + """ + batch_size = output.size(0) + num_channels = output.size(1) + heatmaps_pred = output.reshape( + (batch_size, num_channels, -1)).split(1, 1) + heatmaps_gt = target.reshape( + (batch_size, num_channels, -1)).split(1, 1) + loss = 0. + num_joints = num_channels // 3 + for idx in range(num_joints): + heatmap_pred = heatmaps_pred[idx * 3].squeeze() + heatmap_gt = heatmaps_gt[idx * 3].squeeze() + offset_x_pred = heatmaps_pred[idx * 3 + 1].squeeze() + offset_x_gt = heatmaps_gt[idx * 3 + 1].squeeze() + offset_y_pred = heatmaps_pred[idx * 3 + 2].squeeze() + offset_y_gt = heatmaps_gt[idx * 3 + 2].squeeze() + if self.use_target_weight: + target_weight = target_weights[:, idx, None] + heatmap_pred = heatmap_pred * target_weight + heatmap_gt = heatmap_gt * target_weight + # classification loss + loss += 0.5 * self.criterion(heatmap_pred, heatmap_gt) + # regression loss + loss += 0.5 * self.criterion(heatmap_gt * offset_x_pred, + heatmap_gt * offset_x_gt) + loss += 0.5 * self.criterion(heatmap_gt * offset_y_pred, + heatmap_gt * offset_y_gt) + return loss / num_joints * self.loss_weight + + +@MODELS.register_module() +class KeypointOHKMMSELoss(nn.Module): + """MSE loss with online hard keypoint mining. + + Args: + use_target_weight (bool): Option to use weighted MSE loss. + Different joint types may have different target weights. + Defaults to ``False`` + topk (int): Only top k joint losses are kept. Defaults to 8 + loss_weight (float): Weight of the loss. Defaults to 1.0 + """ + + def __init__(self, + use_target_weight: bool = False, + topk: int = 8, + loss_weight: float = 1.): + super().__init__() + assert topk > 0 + self.criterion = nn.MSELoss(reduction='none') + self.use_target_weight = use_target_weight + self.topk = topk + self.loss_weight = loss_weight + + def _ohkm(self, losses: Tensor) -> Tensor: + """Online hard keypoint mining. + + Note: + - batch_size: B + - num_keypoints: K + + Args: + loss (Tensor): The losses with shape [B, K] + + Returns: + Tensor: The calculated loss. + """ + ohkm_loss = 0. + B = losses.shape[0] + for i in range(B): + sub_loss = losses[i] + _, topk_idx = torch.topk( + sub_loss, k=self.topk, dim=0, sorted=False) + tmp_loss = torch.gather(sub_loss, 0, topk_idx) + ohkm_loss += torch.sum(tmp_loss) / self.topk + ohkm_loss /= B + return ohkm_loss + + def forward(self, output: Tensor, target: Tensor, + target_weights: Tensor) -> Tensor: + """Forward function of loss. + + Note: + - batch_size: B + - num_keypoints: K + - heatmaps height: H + - heatmaps weight: W + + Args: + output (Tensor): The output heatmaps with shape [B, K, H, W]. + target (Tensor): The target heatmaps with shape [B, K, H, W]. + target_weights (Tensor): The target weights of differet keypoints, + with shape [B, K]. + + Returns: + Tensor: The calculated loss. + """ + num_keypoints = output.size(1) + if num_keypoints < self.topk: + raise ValueError(f'topk ({self.topk}) should not be ' + f'larger than num_keypoints ({num_keypoints}).') + + losses = [] + for idx in range(num_keypoints): + if self.use_target_weight: + target_weight = target_weights[:, idx, None, None] + losses.append( + self.criterion(output[:, idx] * target_weight, + target[:, idx] * target_weight)) + else: + losses.append(self.criterion(output[:, idx], target[:, idx])) + + losses = [loss.mean(dim=(1, 2)).unsqueeze(dim=1) for loss in losses] + losses = torch.cat(losses, dim=1) + + return self._ohkm(losses) * self.loss_weight + + +@MODELS.register_module() +class AdaptiveWingLoss(nn.Module): + """Adaptive wing loss. paper ref: 'Adaptive Wing Loss for Robust Face + Alignment via Heatmap Regression' Wang et al. ICCV'2019. + + Args: + alpha (float), omega (float), epsilon (float), theta (float) + are hyper-parameters. + use_target_weight (bool): Option to use weighted MSE loss. + Different joint types may have different target weights. + loss_weight (float): Weight of the loss. Default: 1.0. + """ + + def __init__(self, + alpha=2.1, + omega=14, + epsilon=1, + theta=0.5, + use_target_weight=False, + loss_weight=1.): + super().__init__() + self.alpha = float(alpha) + self.omega = float(omega) + self.epsilon = float(epsilon) + self.theta = float(theta) + self.use_target_weight = use_target_weight + self.loss_weight = loss_weight + + def criterion(self, pred, target): + """Criterion of wingloss. + + Note: + batch_size: N + num_keypoints: K + + Args: + pred (torch.Tensor[NxKxHxW]): Predicted heatmaps. + target (torch.Tensor[NxKxHxW]): Target heatmaps. + """ + H, W = pred.shape[2:4] + delta = (target - pred).abs() + + A = self.omega * ( + 1 / (1 + torch.pow(self.theta / self.epsilon, self.alpha - target)) + ) * (self.alpha - target) * (torch.pow( + self.theta / self.epsilon, + self.alpha - target - 1)) * (1 / self.epsilon) + C = self.theta * A - self.omega * torch.log( + 1 + torch.pow(self.theta / self.epsilon, self.alpha - target)) + + losses = torch.where( + delta < self.theta, + self.omega * + torch.log(1 + + torch.pow(delta / self.epsilon, self.alpha - target)), + A * delta - C) + + return torch.mean(losses) + + def forward(self, + output: Tensor, + target: Tensor, + target_weights: Optional[Tensor] = None): + """Forward function. + + Note: + batch_size: N + num_keypoints: K + + Args: + output (torch.Tensor[N, K, H, W]): Output heatmaps. + target (torch.Tensor[N, K, H, W]): Target heatmaps. + target_weight (torch.Tensor[N, K]): + Weights across different joint types. + """ + if self.use_target_weight: + assert (target_weights.ndim in (2, 4) and target_weights.shape + == target.shape[:target_weights.ndim]), ( + 'target_weights and target have mismatched shapes ' + f'{target_weights.shape} v.s. {target.shape}') + + ndim_pad = target.ndim - target_weights.ndim + target_weights = target_weights.view(target_weights.shape + + (1, ) * ndim_pad) + loss = self.criterion(output * target_weights, + target * target_weights) + else: + loss = self.criterion(output, target) + + return loss * self.loss_weight + + +@MODELS.register_module() +class FocalHeatmapLoss(KeypointMSELoss): + """A class for calculating the modified focal loss for heatmap prediction. + + This loss function is exactly the same as the one used in CornerNet. It + runs faster and costs a little bit more memory. + + `CornerNet: Detecting Objects as Paired Keypoints + arXiv: `_. + + Arguments: + alpha (int): The alpha parameter in the focal loss equation. + beta (int): The beta parameter in the focal loss equation. + use_target_weight (bool): Option to use weighted MSE loss. + Different joint types may have different target weights. + Defaults to ``False`` + skip_empty_channel (bool): If ``True``, heatmap channels with no + non-zero value (which means no visible ground-truth keypoint + in the image) will not be used to calculate the loss. Defaults to + ``False`` + loss_weight (float): Weight of the loss. Defaults to 1.0 + """ + + def __init__(self, + alpha: int = 2, + beta: int = 4, + use_target_weight: bool = False, + skip_empty_channel: bool = False, + loss_weight: float = 1.0): + super(FocalHeatmapLoss, self).__init__(use_target_weight, + skip_empty_channel, loss_weight) + self.alpha = alpha + self.beta = beta + + def forward(self, + output: Tensor, + target: Tensor, + target_weights: Optional[Tensor] = None, + mask: Optional[Tensor] = None) -> Tensor: + """Calculate the modified focal loss for heatmap prediction. + + Note: + - batch_size: B + - num_keypoints: K + - heatmaps height: H + - heatmaps weight: W + + Args: + output (Tensor): The output heatmaps with shape [B, K, H, W] + target (Tensor): The target heatmaps with shape [B, K, H, W] + target_weights (Tensor, optional): The target weights of differet + keypoints, with shape [B, K] (keypoint-wise) or + [B, K, H, W] (pixel-wise). + mask (Tensor, optional): The masks of valid heatmap pixels in + shape [B, K, H, W] or [B, 1, H, W]. If ``None``, no mask will + be applied. Defaults to ``None`` + + Returns: + Tensor: The calculated loss. + """ + _mask = self._get_mask(target, target_weights, mask) + + pos_inds = target.eq(1).float() + neg_inds = target.lt(1).float() + + if _mask is not None: + pos_inds = pos_inds * _mask + neg_inds = neg_inds * _mask + + neg_weights = torch.pow(1 - target, self.beta) + + pos_loss = torch.log(output) * torch.pow(1 - output, + self.alpha) * pos_inds + neg_loss = torch.log(1 - output) * torch.pow( + output, self.alpha) * neg_weights * neg_inds + + num_pos = pos_inds.float().sum() + if num_pos == 0: + loss = -neg_loss.sum() + else: + loss = -(pos_loss.sum() + neg_loss.sum()) / num_pos + return loss * self.loss_weight diff --git a/mmpose/models/losses/loss_wrappers.py b/mmpose/models/losses/loss_wrappers.py index d821661b48..431e15df9a 100644 --- a/mmpose/models/losses/loss_wrappers.py +++ b/mmpose/models/losses/loss_wrappers.py @@ -1,82 +1,82 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import Dict - -import torch.nn as nn - -from mmpose.registry import MODELS -from mmpose.utils.typing import ConfigType - - -@MODELS.register_module() -class MultipleLossWrapper(nn.Module): - """A wrapper to collect multiple loss functions together and return a list - of losses in the same order. - - Args: - losses (list): List of Loss Config - """ - - def __init__(self, losses: list): - super().__init__() - self.num_losses = len(losses) - - loss_modules = [] - for loss_cfg in losses: - t_loss = MODELS.build(loss_cfg) - loss_modules.append(t_loss) - self.loss_modules = nn.ModuleList(loss_modules) - - def forward(self, input_list, target_list, keypoint_weights=None): - """Forward function. - - Note: - - batch_size: N - - num_keypoints: K - - dimension of keypoints: D (D=2 or D=3) - - Args: - input_list (List[Tensor]): List of inputs. - target_list (List[Tensor]): List of targets. - keypoint_weights (Tensor[N, K, D]): - Weights across different joint types. - """ - assert isinstance(input_list, list), '' - assert isinstance(target_list, list), '' - assert len(input_list) == len(target_list), '' - - losses = [] - for i in range(self.num_losses): - input_i = input_list[i] - target_i = target_list[i] - - loss_i = self.loss_modules[i](input_i, target_i, keypoint_weights) - losses.append(loss_i) - - return losses - - -@MODELS.register_module() -class CombinedLoss(nn.ModuleDict): - """A wrapper to combine multiple loss functions. These loss functions can - have different input type (e.g. heatmaps or regression values), and can - only be involed individually and explixitly. - - Args: - losses (Dict[str, ConfigType]): The names and configs of loss - functions to be wrapped - - Example:: - >>> heatmap_loss_cfg = dict(type='KeypointMSELoss') - >>> ae_loss_cfg = dict(type='AssociativeEmbeddingLoss') - >>> loss_module = CombinedLoss( - ... losses=dict( - ... heatmap_loss=heatmap_loss_cfg, - ... ae_loss=ae_loss_cfg)) - >>> loss_hm = loss_module.heatmap_loss(pred_heatmap, gt_heatmap) - >>> loss_ae = loss_module.ae_loss(pred_tags, keypoint_indices) - """ - - def __init__(self, losses: Dict[str, ConfigType]): - super().__init__() - for loss_name, loss_cfg in losses.items(): - self.add_module(loss_name, MODELS.build(loss_cfg)) +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict + +import torch.nn as nn + +from mmpose.registry import MODELS +from mmpose.utils.typing import ConfigType + + +@MODELS.register_module() +class MultipleLossWrapper(nn.Module): + """A wrapper to collect multiple loss functions together and return a list + of losses in the same order. + + Args: + losses (list): List of Loss Config + """ + + def __init__(self, losses: list): + super().__init__() + self.num_losses = len(losses) + + loss_modules = [] + for loss_cfg in losses: + t_loss = MODELS.build(loss_cfg) + loss_modules.append(t_loss) + self.loss_modules = nn.ModuleList(loss_modules) + + def forward(self, input_list, target_list, keypoint_weights=None): + """Forward function. + + Note: + - batch_size: N + - num_keypoints: K + - dimension of keypoints: D (D=2 or D=3) + + Args: + input_list (List[Tensor]): List of inputs. + target_list (List[Tensor]): List of targets. + keypoint_weights (Tensor[N, K, D]): + Weights across different joint types. + """ + assert isinstance(input_list, list), '' + assert isinstance(target_list, list), '' + assert len(input_list) == len(target_list), '' + + losses = [] + for i in range(self.num_losses): + input_i = input_list[i] + target_i = target_list[i] + + loss_i = self.loss_modules[i](input_i, target_i, keypoint_weights) + losses.append(loss_i) + + return losses + + +@MODELS.register_module() +class CombinedLoss(nn.ModuleDict): + """A wrapper to combine multiple loss functions. These loss functions can + have different input type (e.g. heatmaps or regression values), and can + only be involed individually and explixitly. + + Args: + losses (Dict[str, ConfigType]): The names and configs of loss + functions to be wrapped + + Example:: + >>> heatmap_loss_cfg = dict(type='KeypointMSELoss') + >>> ae_loss_cfg = dict(type='AssociativeEmbeddingLoss') + >>> loss_module = CombinedLoss( + ... losses=dict( + ... heatmap_loss=heatmap_loss_cfg, + ... ae_loss=ae_loss_cfg)) + >>> loss_hm = loss_module.heatmap_loss(pred_heatmap, gt_heatmap) + >>> loss_ae = loss_module.ae_loss(pred_tags, keypoint_indices) + """ + + def __init__(self, losses: Dict[str, ConfigType]): + super().__init__() + for loss_name, loss_cfg in losses.items(): + self.add_module(loss_name, MODELS.build(loss_cfg)) diff --git a/mmpose/models/losses/regression_loss.py b/mmpose/models/losses/regression_loss.py index 9a64a4adfe..ba0a070893 100644 --- a/mmpose/models/losses/regression_loss.py +++ b/mmpose/models/losses/regression_loss.py @@ -1,618 +1,618 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import math -from functools import partial - -import torch -import torch.nn as nn -import torch.nn.functional as F - -from mmpose.registry import MODELS -from ..utils.realnvp import RealNVP - - -@MODELS.register_module() -class RLELoss(nn.Module): - """RLE Loss. - - `Human Pose Regression With Residual Log-Likelihood Estimation - arXiv: `_. - - Code is modified from `the official implementation - `_. - - Args: - use_target_weight (bool): Option to use weighted loss. - Different joint types may have different target weights. - size_average (bool): Option to average the loss by the batch_size. - residual (bool): Option to add L1 loss and let the flow - learn the residual error distribution. - q_dis (string): Option for the identity Q(error) distribution, - Options: "laplace" or "gaussian" - """ - - def __init__(self, - use_target_weight=False, - size_average=True, - residual=True, - q_distribution='laplace'): - super(RLELoss, self).__init__() - self.size_average = size_average - self.use_target_weight = use_target_weight - self.residual = residual - self.q_distribution = q_distribution - - self.flow_model = RealNVP() - - def forward(self, pred, sigma, target, target_weight=None): - """Forward function. - - Note: - - batch_size: N - - num_keypoints: K - - dimension of keypoints: D (D=2 or D=3) - - Args: - pred (Tensor[N, K, D]): Output regression. - sigma (Tensor[N, K, D]): Output sigma. - target (Tensor[N, K, D]): Target regression. - target_weight (Tensor[N, K, D]): - Weights across different joint types. - """ - sigma = sigma.sigmoid() - - error = (pred - target) / (sigma + 1e-9) - # (B, K, 2) - log_phi = self.flow_model.log_prob(error.reshape(-1, 2)) - log_phi = log_phi.reshape(target.shape[0], target.shape[1], 1) - log_sigma = torch.log(sigma).reshape(target.shape[0], target.shape[1], - 2) - nf_loss = log_sigma - log_phi - - if self.residual: - assert self.q_distribution in ['laplace', 'gaussian'] - if self.q_distribution == 'laplace': - loss_q = torch.log(sigma * 2) + torch.abs(error) - else: - loss_q = torch.log( - sigma * math.sqrt(2 * math.pi)) + 0.5 * error**2 - - loss = nf_loss + loss_q - else: - loss = nf_loss - - if self.use_target_weight: - assert target_weight is not None - loss *= target_weight - - if self.size_average: - loss /= len(loss) - - return loss.sum() - - -@MODELS.register_module() -class SmoothL1Loss(nn.Module): - """SmoothL1Loss loss. - - Args: - use_target_weight (bool): Option to use weighted MSE loss. - Different joint types may have different target weights. - loss_weight (float): Weight of the loss. Default: 1.0. - """ - - def __init__(self, use_target_weight=False, loss_weight=1.): - super().__init__() - self.criterion = F.smooth_l1_loss - self.use_target_weight = use_target_weight - self.loss_weight = loss_weight - - def forward(self, output, target, target_weight=None): - """Forward function. - - Note: - - batch_size: N - - num_keypoints: K - - dimension of keypoints: D (D=2 or D=3) - - Args: - output (torch.Tensor[N, K, D]): Output regression. - target (torch.Tensor[N, K, D]): Target regression. - target_weight (torch.Tensor[N, K, D]): - Weights across different joint types. - """ - - if self.use_target_weight: - assert target_weight is not None - assert output.ndim >= target_weight.ndim - - for i in range(output.ndim - target_weight.ndim): - target_weight = target_weight.unsqueeze(-1) - - loss = self.criterion(output * target_weight, - target * target_weight) - else: - loss = self.criterion(output, target) - - return loss * self.loss_weight - - -@MODELS.register_module() -class SoftWeightSmoothL1Loss(nn.Module): - """Smooth L1 loss with soft weight for regression. - - Args: - use_target_weight (bool): Option to use weighted MSE loss. - Different joint types may have different target weights. - supervise_empty (bool): Whether to supervise the output with zero - weight. - beta (float): Specifies the threshold at which to change between - L1 and L2 loss. - loss_weight (float): Weight of the loss. Default: 1.0. - """ - - def __init__(self, - use_target_weight=False, - supervise_empty=True, - beta=1.0, - loss_weight=1.): - super().__init__() - - reduction = 'none' if use_target_weight else 'mean' - self.criterion = partial( - self.smooth_l1_loss, reduction=reduction, beta=beta) - - self.supervise_empty = supervise_empty - self.use_target_weight = use_target_weight - self.loss_weight = loss_weight - - @staticmethod - def smooth_l1_loss(input, target, reduction='none', beta=1.0): - """Re-implement torch.nn.functional.smooth_l1_loss with beta to support - pytorch <= 1.6.""" - delta = input - target - mask = delta.abs() < beta - delta[mask] = (delta[mask]).pow(2) / (2 * beta) - delta[~mask] = delta[~mask].abs() - beta / 2 - - if reduction == 'mean': - return delta.mean() - elif reduction == 'sum': - return delta.sum() - elif reduction == 'none': - return delta - else: - raise ValueError(f'reduction must be \'mean\', \'sum\' or ' - f'\'none\', but got \'{reduction}\'') - - def forward(self, output, target, target_weight=None): - """Forward function. - - Note: - - batch_size: N - - num_keypoints: K - - dimension of keypoints: D (D=2 or D=3) - - Args: - output (torch.Tensor[N, K, D]): Output regression. - target (torch.Tensor[N, K, D]): Target regression. - target_weight (torch.Tensor[N, K, D]): - Weights across different joint types. - """ - if self.use_target_weight: - assert target_weight is not None - assert output.ndim >= target_weight.ndim - - for i in range(output.ndim - target_weight.ndim): - target_weight = target_weight.unsqueeze(-1) - - loss = self.criterion(output, target) * target_weight - if self.supervise_empty: - loss = loss.mean() - else: - num_elements = torch.nonzero(target_weight > 0).size()[0] - loss = loss.sum() / max(num_elements, 1.0) - else: - loss = self.criterion(output, target) - - return loss * self.loss_weight - - -@MODELS.register_module() -class WingLoss(nn.Module): - """Wing Loss. paper ref: 'Wing Loss for Robust Facial Landmark Localisation - with Convolutional Neural Networks' Feng et al. CVPR'2018. - - Args: - omega (float): Also referred to as width. - epsilon (float): Also referred to as curvature. - use_target_weight (bool): Option to use weighted MSE loss. - Different joint types may have different target weights. - loss_weight (float): Weight of the loss. Default: 1.0. - """ - - def __init__(self, - omega=10.0, - epsilon=2.0, - use_target_weight=False, - loss_weight=1.): - super().__init__() - self.omega = omega - self.epsilon = epsilon - self.use_target_weight = use_target_weight - self.loss_weight = loss_weight - - # constant that smoothly links the piecewise-defined linear - # and nonlinear parts - self.C = self.omega * (1.0 - math.log(1.0 + self.omega / self.epsilon)) - - def criterion(self, pred, target): - """Criterion of wingloss. - - Note: - - batch_size: N - - num_keypoints: K - - dimension of keypoints: D (D=2 or D=3) - - Args: - pred (torch.Tensor[N, K, D]): Output regression. - target (torch.Tensor[N, K, D]): Target regression. - """ - delta = (target - pred).abs() - losses = torch.where( - delta < self.omega, - self.omega * torch.log(1.0 + delta / self.epsilon), delta - self.C) - return torch.mean(torch.sum(losses, dim=[1, 2]), dim=0) - - def forward(self, output, target, target_weight=None): - """Forward function. - - Note: - - batch_size: N - - num_keypoints: K - - dimension of keypoints: D (D=2 or D=3) - - Args: - output (torch.Tensor[N, K, D]): Output regression. - target (torch.Tensor[N, K, D]): Target regression. - target_weight (torch.Tensor[N,K,D]): - Weights across different joint types. - """ - if self.use_target_weight: - assert target_weight is not None - loss = self.criterion(output * target_weight, - target * target_weight) - else: - loss = self.criterion(output, target) - - return loss * self.loss_weight - - -@MODELS.register_module() -class SoftWingLoss(nn.Module): - """Soft Wing Loss 'Structure-Coherent Deep Feature Learning for Robust Face - Alignment' Lin et al. TIP'2021. - - loss = - 1. |x| , if |x| < omega1 - 2. omega2*ln(1+|x|/epsilon) + B, if |x| >= omega1 - - Args: - omega1 (float): The first threshold. - omega2 (float): The second threshold. - epsilon (float): Also referred to as curvature. - use_target_weight (bool): Option to use weighted MSE loss. - Different joint types may have different target weights. - loss_weight (float): Weight of the loss. Default: 1.0. - """ - - def __init__(self, - omega1=2.0, - omega2=20.0, - epsilon=0.5, - use_target_weight=False, - loss_weight=1.): - super().__init__() - self.omega1 = omega1 - self.omega2 = omega2 - self.epsilon = epsilon - self.use_target_weight = use_target_weight - self.loss_weight = loss_weight - - # constant that smoothly links the piecewise-defined linear - # and nonlinear parts - self.B = self.omega1 - self.omega2 * math.log(1.0 + self.omega1 / - self.epsilon) - - def criterion(self, pred, target): - """Criterion of wingloss. - - Note: - batch_size: N - num_keypoints: K - dimension of keypoints: D (D=2 or D=3) - - Args: - pred (torch.Tensor[N, K, D]): Output regression. - target (torch.Tensor[N, K, D]): Target regression. - """ - delta = (target - pred).abs() - losses = torch.where( - delta < self.omega1, delta, - self.omega2 * torch.log(1.0 + delta / self.epsilon) + self.B) - return torch.mean(torch.sum(losses, dim=[1, 2]), dim=0) - - def forward(self, output, target, target_weight=None): - """Forward function. - - Note: - batch_size: N - num_keypoints: K - dimension of keypoints: D (D=2 or D=3) - - Args: - output (torch.Tensor[N, K, D]): Output regression. - target (torch.Tensor[N, K, D]): Target regression. - target_weight (torch.Tensor[N, K, D]): - Weights across different joint types. - """ - if self.use_target_weight: - assert target_weight is not None - loss = self.criterion(output * target_weight, - target * target_weight) - else: - loss = self.criterion(output, target) - - return loss * self.loss_weight - - -@MODELS.register_module() -class MPJPELoss(nn.Module): - """MPJPE (Mean Per Joint Position Error) loss. - - Args: - use_target_weight (bool): Option to use weighted MSE loss. - Different joint types may have different target weights. - loss_weight (float): Weight of the loss. Default: 1.0. - """ - - def __init__(self, use_target_weight=False, loss_weight=1.): - super().__init__() - self.use_target_weight = use_target_weight - self.loss_weight = loss_weight - - def forward(self, output, target, target_weight=None): - """Forward function. - - Note: - - batch_size: N - - num_keypoints: K - - dimension of keypoints: D (D=2 or D=3) - - Args: - output (torch.Tensor[N, K, D]): Output regression. - target (torch.Tensor[N, K, D]): Target regression. - target_weight (torch.Tensor[N,K,D]): - Weights across different joint types. - """ - - if self.use_target_weight: - assert target_weight is not None - loss = torch.mean( - torch.norm((output - target) * target_weight, dim=-1)) - else: - loss = torch.mean(torch.norm(output - target, dim=-1)) - - return loss * self.loss_weight - - -@MODELS.register_module() -class L1Loss(nn.Module): - """L1Loss loss .""" - - def __init__(self, use_target_weight=False, loss_weight=1.): - super().__init__() - self.criterion = F.l1_loss - self.use_target_weight = use_target_weight - self.loss_weight = loss_weight - - def forward(self, output, target, target_weight=None): - """Forward function. - - Note: - - batch_size: N - - num_keypoints: K - - Args: - output (torch.Tensor[N, K, 2]): Output regression. - target (torch.Tensor[N, K, 2]): Target regression. - target_weight (torch.Tensor[N, K, 2]): - Weights across different joint types. - """ - if self.use_target_weight: - assert target_weight is not None - loss = self.criterion(output * target_weight, - target * target_weight) - else: - loss = self.criterion(output, target) - - return loss * self.loss_weight - - -@MODELS.register_module() -class MSELoss(nn.Module): - """MSE loss for coordinate regression.""" - - def __init__(self, use_target_weight=False, loss_weight=1.): - super().__init__() - self.criterion = F.mse_loss - self.use_target_weight = use_target_weight - self.loss_weight = loss_weight - - def forward(self, output, target, target_weight=None): - """Forward function. - - Note: - - batch_size: N - - num_keypoints: K - - Args: - output (torch.Tensor[N, K, 2]): Output regression. - target (torch.Tensor[N, K, 2]): Target regression. - target_weight (torch.Tensor[N, K, 2]): - Weights across different joint types. - """ - - if self.use_target_weight: - assert target_weight is not None - loss = self.criterion(output * target_weight, - target * target_weight) - else: - loss = self.criterion(output, target) - - return loss * self.loss_weight - - -@MODELS.register_module() -class BoneLoss(nn.Module): - """Bone length loss. - - Args: - joint_parents (list): Indices of each joint's parent joint. - use_target_weight (bool): Option to use weighted bone loss. - Different bone types may have different target weights. - loss_weight (float): Weight of the loss. Default: 1.0. - """ - - def __init__(self, joint_parents, use_target_weight=False, loss_weight=1.): - super().__init__() - self.joint_parents = joint_parents - self.use_target_weight = use_target_weight - self.loss_weight = loss_weight - - self.non_root_indices = [] - for i in range(len(self.joint_parents)): - if i != self.joint_parents[i]: - self.non_root_indices.append(i) - - def forward(self, output, target, target_weight=None): - """Forward function. - - Note: - - batch_size: N - - num_keypoints: K - - dimension of keypoints: D (D=2 or D=3) - - Args: - output (torch.Tensor[N, K, D]): Output regression. - target (torch.Tensor[N, K, D]): Target regression. - target_weight (torch.Tensor[N, K-1]): - Weights across different bone types. - """ - output_bone = torch.norm( - output - output[:, self.joint_parents, :], - dim=-1)[:, self.non_root_indices] - target_bone = torch.norm( - target - target[:, self.joint_parents, :], - dim=-1)[:, self.non_root_indices] - if self.use_target_weight: - assert target_weight is not None - loss = torch.mean( - torch.abs((output_bone * target_weight).mean(dim=0) - - (target_bone * target_weight).mean(dim=0))) - else: - loss = torch.mean( - torch.abs(output_bone.mean(dim=0) - target_bone.mean(dim=0))) - - return loss * self.loss_weight - - -@MODELS.register_module() -class SemiSupervisionLoss(nn.Module): - """Semi-supervision loss for unlabeled data. It is composed of projection - loss and bone loss. - - Paper ref: `3D human pose estimation in video with temporal convolutions - and semi-supervised training` Dario Pavllo et al. CVPR'2019. - - Args: - joint_parents (list): Indices of each joint's parent joint. - projection_loss_weight (float): Weight for projection loss. - bone_loss_weight (float): Weight for bone loss. - warmup_iterations (int): Number of warmup iterations. In the first - `warmup_iterations` iterations, the model is trained only on - labeled data, and semi-supervision loss will be 0. - This is a workaround since currently we cannot access - epoch number in loss functions. Note that the iteration number in - an epoch can be changed due to different GPU numbers in multi-GPU - settings. So please set this parameter carefully. - warmup_iterations = dataset_size // samples_per_gpu // gpu_num - * warmup_epochs - """ - - def __init__(self, - joint_parents, - projection_loss_weight=1., - bone_loss_weight=1., - warmup_iterations=0): - super().__init__() - self.criterion_projection = MPJPELoss( - loss_weight=projection_loss_weight) - self.criterion_bone = BoneLoss( - joint_parents, loss_weight=bone_loss_weight) - self.warmup_iterations = warmup_iterations - self.num_iterations = 0 - - @staticmethod - def project_joints(x, intrinsics): - """Project 3D joint coordinates to 2D image plane using camera - intrinsic parameters. - - Args: - x (torch.Tensor[N, K, 3]): 3D joint coordinates. - intrinsics (torch.Tensor[N, 4] | torch.Tensor[N, 9]): Camera - intrinsics: f (2), c (2), k (3), p (2). - """ - while intrinsics.dim() < x.dim(): - intrinsics.unsqueeze_(1) - f = intrinsics[..., :2] - c = intrinsics[..., 2:4] - _x = torch.clamp(x[:, :, :2] / x[:, :, 2:], -1, 1) - if intrinsics.shape[-1] == 9: - k = intrinsics[..., 4:7] - p = intrinsics[..., 7:9] - - r2 = torch.sum(_x[:, :, :2]**2, dim=-1, keepdim=True) - radial = 1 + torch.sum( - k * torch.cat((r2, r2**2, r2**3), dim=-1), - dim=-1, - keepdim=True) - tan = torch.sum(p * _x, dim=-1, keepdim=True) - _x = _x * (radial + tan) + p * r2 - _x = f * _x + c - return _x - - def forward(self, output, target): - losses = dict() - - self.num_iterations += 1 - if self.num_iterations <= self.warmup_iterations: - return losses - - labeled_pose = output['labeled_pose'] - unlabeled_pose = output['unlabeled_pose'] - unlabeled_traj = output['unlabeled_traj'] - unlabeled_target_2d = target['unlabeled_target_2d'] - intrinsics = target['intrinsics'] - - # projection loss - unlabeled_output = unlabeled_pose + unlabeled_traj - unlabeled_output_2d = self.project_joints(unlabeled_output, intrinsics) - loss_proj = self.criterion_projection(unlabeled_output_2d, - unlabeled_target_2d, None) - losses['proj_loss'] = loss_proj - - # bone loss - loss_bone = self.criterion_bone(unlabeled_pose, labeled_pose, None) - losses['bone_loss'] = loss_bone - - return losses +# Copyright (c) OpenMMLab. All rights reserved. +import math +from functools import partial + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from mmpose.registry import MODELS +from ..utils.realnvp import RealNVP + + +@MODELS.register_module() +class RLELoss(nn.Module): + """RLE Loss. + + `Human Pose Regression With Residual Log-Likelihood Estimation + arXiv: `_. + + Code is modified from `the official implementation + `_. + + Args: + use_target_weight (bool): Option to use weighted loss. + Different joint types may have different target weights. + size_average (bool): Option to average the loss by the batch_size. + residual (bool): Option to add L1 loss and let the flow + learn the residual error distribution. + q_dis (string): Option for the identity Q(error) distribution, + Options: "laplace" or "gaussian" + """ + + def __init__(self, + use_target_weight=False, + size_average=True, + residual=True, + q_distribution='laplace'): + super(RLELoss, self).__init__() + self.size_average = size_average + self.use_target_weight = use_target_weight + self.residual = residual + self.q_distribution = q_distribution + + self.flow_model = RealNVP() + + def forward(self, pred, sigma, target, target_weight=None): + """Forward function. + + Note: + - batch_size: N + - num_keypoints: K + - dimension of keypoints: D (D=2 or D=3) + + Args: + pred (Tensor[N, K, D]): Output regression. + sigma (Tensor[N, K, D]): Output sigma. + target (Tensor[N, K, D]): Target regression. + target_weight (Tensor[N, K, D]): + Weights across different joint types. + """ + sigma = sigma.sigmoid() + + error = (pred - target) / (sigma + 1e-9) + # (B, K, 2) + log_phi = self.flow_model.log_prob(error.reshape(-1, 2)) + log_phi = log_phi.reshape(target.shape[0], target.shape[1], 1) + log_sigma = torch.log(sigma).reshape(target.shape[0], target.shape[1], + 2) + nf_loss = log_sigma - log_phi + + if self.residual: + assert self.q_distribution in ['laplace', 'gaussian'] + if self.q_distribution == 'laplace': + loss_q = torch.log(sigma * 2) + torch.abs(error) + else: + loss_q = torch.log( + sigma * math.sqrt(2 * math.pi)) + 0.5 * error**2 + + loss = nf_loss + loss_q + else: + loss = nf_loss + + if self.use_target_weight: + assert target_weight is not None + loss *= target_weight + + if self.size_average: + loss /= len(loss) + + return loss.sum() + + +@MODELS.register_module() +class SmoothL1Loss(nn.Module): + """SmoothL1Loss loss. + + Args: + use_target_weight (bool): Option to use weighted MSE loss. + Different joint types may have different target weights. + loss_weight (float): Weight of the loss. Default: 1.0. + """ + + def __init__(self, use_target_weight=False, loss_weight=1.): + super().__init__() + self.criterion = F.smooth_l1_loss + self.use_target_weight = use_target_weight + self.loss_weight = loss_weight + + def forward(self, output, target, target_weight=None): + """Forward function. + + Note: + - batch_size: N + - num_keypoints: K + - dimension of keypoints: D (D=2 or D=3) + + Args: + output (torch.Tensor[N, K, D]): Output regression. + target (torch.Tensor[N, K, D]): Target regression. + target_weight (torch.Tensor[N, K, D]): + Weights across different joint types. + """ + + if self.use_target_weight: + assert target_weight is not None + assert output.ndim >= target_weight.ndim + + for i in range(output.ndim - target_weight.ndim): + target_weight = target_weight.unsqueeze(-1) + + loss = self.criterion(output * target_weight, + target * target_weight) + else: + loss = self.criterion(output, target) + + return loss * self.loss_weight + + +@MODELS.register_module() +class SoftWeightSmoothL1Loss(nn.Module): + """Smooth L1 loss with soft weight for regression. + + Args: + use_target_weight (bool): Option to use weighted MSE loss. + Different joint types may have different target weights. + supervise_empty (bool): Whether to supervise the output with zero + weight. + beta (float): Specifies the threshold at which to change between + L1 and L2 loss. + loss_weight (float): Weight of the loss. Default: 1.0. + """ + + def __init__(self, + use_target_weight=False, + supervise_empty=True, + beta=1.0, + loss_weight=1.): + super().__init__() + + reduction = 'none' if use_target_weight else 'mean' + self.criterion = partial( + self.smooth_l1_loss, reduction=reduction, beta=beta) + + self.supervise_empty = supervise_empty + self.use_target_weight = use_target_weight + self.loss_weight = loss_weight + + @staticmethod + def smooth_l1_loss(input, target, reduction='none', beta=1.0): + """Re-implement torch.nn.functional.smooth_l1_loss with beta to support + pytorch <= 1.6.""" + delta = input - target + mask = delta.abs() < beta + delta[mask] = (delta[mask]).pow(2) / (2 * beta) + delta[~mask] = delta[~mask].abs() - beta / 2 + + if reduction == 'mean': + return delta.mean() + elif reduction == 'sum': + return delta.sum() + elif reduction == 'none': + return delta + else: + raise ValueError(f'reduction must be \'mean\', \'sum\' or ' + f'\'none\', but got \'{reduction}\'') + + def forward(self, output, target, target_weight=None): + """Forward function. + + Note: + - batch_size: N + - num_keypoints: K + - dimension of keypoints: D (D=2 or D=3) + + Args: + output (torch.Tensor[N, K, D]): Output regression. + target (torch.Tensor[N, K, D]): Target regression. + target_weight (torch.Tensor[N, K, D]): + Weights across different joint types. + """ + if self.use_target_weight: + assert target_weight is not None + assert output.ndim >= target_weight.ndim + + for i in range(output.ndim - target_weight.ndim): + target_weight = target_weight.unsqueeze(-1) + + loss = self.criterion(output, target) * target_weight + if self.supervise_empty: + loss = loss.mean() + else: + num_elements = torch.nonzero(target_weight > 0).size()[0] + loss = loss.sum() / max(num_elements, 1.0) + else: + loss = self.criterion(output, target) + + return loss * self.loss_weight + + +@MODELS.register_module() +class WingLoss(nn.Module): + """Wing Loss. paper ref: 'Wing Loss for Robust Facial Landmark Localisation + with Convolutional Neural Networks' Feng et al. CVPR'2018. + + Args: + omega (float): Also referred to as width. + epsilon (float): Also referred to as curvature. + use_target_weight (bool): Option to use weighted MSE loss. + Different joint types may have different target weights. + loss_weight (float): Weight of the loss. Default: 1.0. + """ + + def __init__(self, + omega=10.0, + epsilon=2.0, + use_target_weight=False, + loss_weight=1.): + super().__init__() + self.omega = omega + self.epsilon = epsilon + self.use_target_weight = use_target_weight + self.loss_weight = loss_weight + + # constant that smoothly links the piecewise-defined linear + # and nonlinear parts + self.C = self.omega * (1.0 - math.log(1.0 + self.omega / self.epsilon)) + + def criterion(self, pred, target): + """Criterion of wingloss. + + Note: + - batch_size: N + - num_keypoints: K + - dimension of keypoints: D (D=2 or D=3) + + Args: + pred (torch.Tensor[N, K, D]): Output regression. + target (torch.Tensor[N, K, D]): Target regression. + """ + delta = (target - pred).abs() + losses = torch.where( + delta < self.omega, + self.omega * torch.log(1.0 + delta / self.epsilon), delta - self.C) + return torch.mean(torch.sum(losses, dim=[1, 2]), dim=0) + + def forward(self, output, target, target_weight=None): + """Forward function. + + Note: + - batch_size: N + - num_keypoints: K + - dimension of keypoints: D (D=2 or D=3) + + Args: + output (torch.Tensor[N, K, D]): Output regression. + target (torch.Tensor[N, K, D]): Target regression. + target_weight (torch.Tensor[N,K,D]): + Weights across different joint types. + """ + if self.use_target_weight: + assert target_weight is not None + loss = self.criterion(output * target_weight, + target * target_weight) + else: + loss = self.criterion(output, target) + + return loss * self.loss_weight + + +@MODELS.register_module() +class SoftWingLoss(nn.Module): + """Soft Wing Loss 'Structure-Coherent Deep Feature Learning for Robust Face + Alignment' Lin et al. TIP'2021. + + loss = + 1. |x| , if |x| < omega1 + 2. omega2*ln(1+|x|/epsilon) + B, if |x| >= omega1 + + Args: + omega1 (float): The first threshold. + omega2 (float): The second threshold. + epsilon (float): Also referred to as curvature. + use_target_weight (bool): Option to use weighted MSE loss. + Different joint types may have different target weights. + loss_weight (float): Weight of the loss. Default: 1.0. + """ + + def __init__(self, + omega1=2.0, + omega2=20.0, + epsilon=0.5, + use_target_weight=False, + loss_weight=1.): + super().__init__() + self.omega1 = omega1 + self.omega2 = omega2 + self.epsilon = epsilon + self.use_target_weight = use_target_weight + self.loss_weight = loss_weight + + # constant that smoothly links the piecewise-defined linear + # and nonlinear parts + self.B = self.omega1 - self.omega2 * math.log(1.0 + self.omega1 / + self.epsilon) + + def criterion(self, pred, target): + """Criterion of wingloss. + + Note: + batch_size: N + num_keypoints: K + dimension of keypoints: D (D=2 or D=3) + + Args: + pred (torch.Tensor[N, K, D]): Output regression. + target (torch.Tensor[N, K, D]): Target regression. + """ + delta = (target - pred).abs() + losses = torch.where( + delta < self.omega1, delta, + self.omega2 * torch.log(1.0 + delta / self.epsilon) + self.B) + return torch.mean(torch.sum(losses, dim=[1, 2]), dim=0) + + def forward(self, output, target, target_weight=None): + """Forward function. + + Note: + batch_size: N + num_keypoints: K + dimension of keypoints: D (D=2 or D=3) + + Args: + output (torch.Tensor[N, K, D]): Output regression. + target (torch.Tensor[N, K, D]): Target regression. + target_weight (torch.Tensor[N, K, D]): + Weights across different joint types. + """ + if self.use_target_weight: + assert target_weight is not None + loss = self.criterion(output * target_weight, + target * target_weight) + else: + loss = self.criterion(output, target) + + return loss * self.loss_weight + + +@MODELS.register_module() +class MPJPELoss(nn.Module): + """MPJPE (Mean Per Joint Position Error) loss. + + Args: + use_target_weight (bool): Option to use weighted MSE loss. + Different joint types may have different target weights. + loss_weight (float): Weight of the loss. Default: 1.0. + """ + + def __init__(self, use_target_weight=False, loss_weight=1.): + super().__init__() + self.use_target_weight = use_target_weight + self.loss_weight = loss_weight + + def forward(self, output, target, target_weight=None): + """Forward function. + + Note: + - batch_size: N + - num_keypoints: K + - dimension of keypoints: D (D=2 or D=3) + + Args: + output (torch.Tensor[N, K, D]): Output regression. + target (torch.Tensor[N, K, D]): Target regression. + target_weight (torch.Tensor[N,K,D]): + Weights across different joint types. + """ + + if self.use_target_weight: + assert target_weight is not None + loss = torch.mean( + torch.norm((output - target) * target_weight, dim=-1)) + else: + loss = torch.mean(torch.norm(output - target, dim=-1)) + + return loss * self.loss_weight + + +@MODELS.register_module() +class L1Loss(nn.Module): + """L1Loss loss .""" + + def __init__(self, use_target_weight=False, loss_weight=1.): + super().__init__() + self.criterion = F.l1_loss + self.use_target_weight = use_target_weight + self.loss_weight = loss_weight + + def forward(self, output, target, target_weight=None): + """Forward function. + + Note: + - batch_size: N + - num_keypoints: K + + Args: + output (torch.Tensor[N, K, 2]): Output regression. + target (torch.Tensor[N, K, 2]): Target regression. + target_weight (torch.Tensor[N, K, 2]): + Weights across different joint types. + """ + if self.use_target_weight: + assert target_weight is not None + loss = self.criterion(output * target_weight, + target * target_weight) + else: + loss = self.criterion(output, target) + + return loss * self.loss_weight + + +@MODELS.register_module() +class MSELoss(nn.Module): + """MSE loss for coordinate regression.""" + + def __init__(self, use_target_weight=False, loss_weight=1.): + super().__init__() + self.criterion = F.mse_loss + self.use_target_weight = use_target_weight + self.loss_weight = loss_weight + + def forward(self, output, target, target_weight=None): + """Forward function. + + Note: + - batch_size: N + - num_keypoints: K + + Args: + output (torch.Tensor[N, K, 2]): Output regression. + target (torch.Tensor[N, K, 2]): Target regression. + target_weight (torch.Tensor[N, K, 2]): + Weights across different joint types. + """ + + if self.use_target_weight: + assert target_weight is not None + loss = self.criterion(output * target_weight, + target * target_weight) + else: + loss = self.criterion(output, target) + + return loss * self.loss_weight + + +@MODELS.register_module() +class BoneLoss(nn.Module): + """Bone length loss. + + Args: + joint_parents (list): Indices of each joint's parent joint. + use_target_weight (bool): Option to use weighted bone loss. + Different bone types may have different target weights. + loss_weight (float): Weight of the loss. Default: 1.0. + """ + + def __init__(self, joint_parents, use_target_weight=False, loss_weight=1.): + super().__init__() + self.joint_parents = joint_parents + self.use_target_weight = use_target_weight + self.loss_weight = loss_weight + + self.non_root_indices = [] + for i in range(len(self.joint_parents)): + if i != self.joint_parents[i]: + self.non_root_indices.append(i) + + def forward(self, output, target, target_weight=None): + """Forward function. + + Note: + - batch_size: N + - num_keypoints: K + - dimension of keypoints: D (D=2 or D=3) + + Args: + output (torch.Tensor[N, K, D]): Output regression. + target (torch.Tensor[N, K, D]): Target regression. + target_weight (torch.Tensor[N, K-1]): + Weights across different bone types. + """ + output_bone = torch.norm( + output - output[:, self.joint_parents, :], + dim=-1)[:, self.non_root_indices] + target_bone = torch.norm( + target - target[:, self.joint_parents, :], + dim=-1)[:, self.non_root_indices] + if self.use_target_weight: + assert target_weight is not None + loss = torch.mean( + torch.abs((output_bone * target_weight).mean(dim=0) - + (target_bone * target_weight).mean(dim=0))) + else: + loss = torch.mean( + torch.abs(output_bone.mean(dim=0) - target_bone.mean(dim=0))) + + return loss * self.loss_weight + + +@MODELS.register_module() +class SemiSupervisionLoss(nn.Module): + """Semi-supervision loss for unlabeled data. It is composed of projection + loss and bone loss. + + Paper ref: `3D human pose estimation in video with temporal convolutions + and semi-supervised training` Dario Pavllo et al. CVPR'2019. + + Args: + joint_parents (list): Indices of each joint's parent joint. + projection_loss_weight (float): Weight for projection loss. + bone_loss_weight (float): Weight for bone loss. + warmup_iterations (int): Number of warmup iterations. In the first + `warmup_iterations` iterations, the model is trained only on + labeled data, and semi-supervision loss will be 0. + This is a workaround since currently we cannot access + epoch number in loss functions. Note that the iteration number in + an epoch can be changed due to different GPU numbers in multi-GPU + settings. So please set this parameter carefully. + warmup_iterations = dataset_size // samples_per_gpu // gpu_num + * warmup_epochs + """ + + def __init__(self, + joint_parents, + projection_loss_weight=1., + bone_loss_weight=1., + warmup_iterations=0): + super().__init__() + self.criterion_projection = MPJPELoss( + loss_weight=projection_loss_weight) + self.criterion_bone = BoneLoss( + joint_parents, loss_weight=bone_loss_weight) + self.warmup_iterations = warmup_iterations + self.num_iterations = 0 + + @staticmethod + def project_joints(x, intrinsics): + """Project 3D joint coordinates to 2D image plane using camera + intrinsic parameters. + + Args: + x (torch.Tensor[N, K, 3]): 3D joint coordinates. + intrinsics (torch.Tensor[N, 4] | torch.Tensor[N, 9]): Camera + intrinsics: f (2), c (2), k (3), p (2). + """ + while intrinsics.dim() < x.dim(): + intrinsics.unsqueeze_(1) + f = intrinsics[..., :2] + c = intrinsics[..., 2:4] + _x = torch.clamp(x[:, :, :2] / x[:, :, 2:], -1, 1) + if intrinsics.shape[-1] == 9: + k = intrinsics[..., 4:7] + p = intrinsics[..., 7:9] + + r2 = torch.sum(_x[:, :, :2]**2, dim=-1, keepdim=True) + radial = 1 + torch.sum( + k * torch.cat((r2, r2**2, r2**3), dim=-1), + dim=-1, + keepdim=True) + tan = torch.sum(p * _x, dim=-1, keepdim=True) + _x = _x * (radial + tan) + p * r2 + _x = f * _x + c + return _x + + def forward(self, output, target): + losses = dict() + + self.num_iterations += 1 + if self.num_iterations <= self.warmup_iterations: + return losses + + labeled_pose = output['labeled_pose'] + unlabeled_pose = output['unlabeled_pose'] + unlabeled_traj = output['unlabeled_traj'] + unlabeled_target_2d = target['unlabeled_target_2d'] + intrinsics = target['intrinsics'] + + # projection loss + unlabeled_output = unlabeled_pose + unlabeled_traj + unlabeled_output_2d = self.project_joints(unlabeled_output, intrinsics) + loss_proj = self.criterion_projection(unlabeled_output_2d, + unlabeled_target_2d, None) + losses['proj_loss'] = loss_proj + + # bone loss + loss_bone = self.criterion_bone(unlabeled_pose, labeled_pose, None) + losses['bone_loss'] = loss_bone + + return losses diff --git a/mmpose/models/necks/__init__.py b/mmpose/models/necks/__init__.py index b4f9105cb3..a983d6ecb7 100644 --- a/mmpose/models/necks/__init__.py +++ b/mmpose/models/necks/__init__.py @@ -1,9 +1,9 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .fmap_proc_neck import FeatureMapProcessor -from .fpn import FPN -from .gap_neck import GlobalAveragePooling -from .posewarper_neck import PoseWarperNeck - -__all__ = [ - 'GlobalAveragePooling', 'PoseWarperNeck', 'FPN', 'FeatureMapProcessor' -] +# Copyright (c) OpenMMLab. All rights reserved. +from .fmap_proc_neck import FeatureMapProcessor +from .fpn import FPN +from .gap_neck import GlobalAveragePooling +from .posewarper_neck import PoseWarperNeck + +__all__ = [ + 'GlobalAveragePooling', 'PoseWarperNeck', 'FPN', 'FeatureMapProcessor' +] diff --git a/mmpose/models/necks/fmap_proc_neck.py b/mmpose/models/necks/fmap_proc_neck.py index 2c3a4d7bf4..76e9d398de 100644 --- a/mmpose/models/necks/fmap_proc_neck.py +++ b/mmpose/models/necks/fmap_proc_neck.py @@ -1,101 +1,101 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import List, Optional, Sequence, Tuple, Union - -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch import Tensor - -from mmpose.models.utils.ops import resize -from mmpose.registry import MODELS - - -@MODELS.register_module() -class FeatureMapProcessor(nn.Module): - """A PyTorch module for selecting, concatenating, and rescaling feature - maps. - - Args: - select_index (Optional[Union[int, Tuple[int]]], optional): Index or - indices of feature maps to select. Defaults to None, which means - all feature maps are used. - concat (bool, optional): Whether to concatenate the selected feature - maps. Defaults to False. - scale_factor (float, optional): The scaling factor to apply to the - feature maps. Defaults to 1.0. - apply_relu (bool, optional): Whether to apply ReLU on input feature - maps. Defaults to False. - align_corners (bool, optional): Whether to align corners when resizing - the feature maps. Defaults to False. - """ - - def __init__( - self, - select_index: Optional[Union[int, Tuple[int]]] = None, - concat: bool = False, - scale_factor: float = 1.0, - apply_relu: bool = False, - align_corners: bool = False, - ): - super().__init__() - - if isinstance(select_index, int): - select_index = (select_index, ) - self.select_index = select_index - self.concat = concat - - assert ( - scale_factor > 0 - ), f'the argument `scale_factor` must be positive, ' \ - f'but got {scale_factor}' - self.scale_factor = scale_factor - self.apply_relu = apply_relu - self.align_corners = align_corners - - def forward(self, inputs: Union[Tensor, Sequence[Tensor]] - ) -> Union[Tensor, List[Tensor]]: - - if not isinstance(inputs, (tuple, list)): - sequential_input = False - inputs = [inputs] - else: - sequential_input = True - - if self.select_index is not None: - inputs = [inputs[i] for i in self.select_index] - - if self.concat: - inputs = self._concat(inputs) - - if self.apply_relu: - inputs = [F.relu(x) for x in inputs] - - if self.scale_factor != 1.0: - inputs = self._rescale(inputs) - - if not sequential_input: - inputs = inputs[0] - - return inputs - - def _concat(self, inputs: Sequence[Tensor]) -> List[Tensor]: - size = inputs[0].shape[-2:] - resized_inputs = [ - resize( - x, - size=size, - mode='bilinear', - align_corners=self.align_corners) for x in inputs - ] - return [torch.cat(resized_inputs, dim=1)] - - def _rescale(self, inputs: Sequence[Tensor]) -> List[Tensor]: - rescaled_inputs = [ - resize( - x, - scale_factor=self.scale_factor, - mode='bilinear', - align_corners=self.align_corners, - ) for x in inputs - ] - return rescaled_inputs +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional, Sequence, Tuple, Union + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch import Tensor + +from mmpose.models.utils.ops import resize +from mmpose.registry import MODELS + + +@MODELS.register_module() +class FeatureMapProcessor(nn.Module): + """A PyTorch module for selecting, concatenating, and rescaling feature + maps. + + Args: + select_index (Optional[Union[int, Tuple[int]]], optional): Index or + indices of feature maps to select. Defaults to None, which means + all feature maps are used. + concat (bool, optional): Whether to concatenate the selected feature + maps. Defaults to False. + scale_factor (float, optional): The scaling factor to apply to the + feature maps. Defaults to 1.0. + apply_relu (bool, optional): Whether to apply ReLU on input feature + maps. Defaults to False. + align_corners (bool, optional): Whether to align corners when resizing + the feature maps. Defaults to False. + """ + + def __init__( + self, + select_index: Optional[Union[int, Tuple[int]]] = None, + concat: bool = False, + scale_factor: float = 1.0, + apply_relu: bool = False, + align_corners: bool = False, + ): + super().__init__() + + if isinstance(select_index, int): + select_index = (select_index, ) + self.select_index = select_index + self.concat = concat + + assert ( + scale_factor > 0 + ), f'the argument `scale_factor` must be positive, ' \ + f'but got {scale_factor}' + self.scale_factor = scale_factor + self.apply_relu = apply_relu + self.align_corners = align_corners + + def forward(self, inputs: Union[Tensor, Sequence[Tensor]] + ) -> Union[Tensor, List[Tensor]]: + + if not isinstance(inputs, (tuple, list)): + sequential_input = False + inputs = [inputs] + else: + sequential_input = True + + if self.select_index is not None: + inputs = [inputs[i] for i in self.select_index] + + if self.concat: + inputs = self._concat(inputs) + + if self.apply_relu: + inputs = [F.relu(x) for x in inputs] + + if self.scale_factor != 1.0: + inputs = self._rescale(inputs) + + if not sequential_input: + inputs = inputs[0] + + return inputs + + def _concat(self, inputs: Sequence[Tensor]) -> List[Tensor]: + size = inputs[0].shape[-2:] + resized_inputs = [ + resize( + x, + size=size, + mode='bilinear', + align_corners=self.align_corners) for x in inputs + ] + return [torch.cat(resized_inputs, dim=1)] + + def _rescale(self, inputs: Sequence[Tensor]) -> List[Tensor]: + rescaled_inputs = [ + resize( + x, + scale_factor=self.scale_factor, + mode='bilinear', + align_corners=self.align_corners, + ) for x in inputs + ] + return rescaled_inputs diff --git a/mmpose/models/necks/fpn.py b/mmpose/models/necks/fpn.py index d4d3311bda..7696f0cc71 100644 --- a/mmpose/models/necks/fpn.py +++ b/mmpose/models/necks/fpn.py @@ -1,206 +1,206 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import ConvModule -from mmengine.model import xavier_init - -from mmpose.registry import MODELS - - -@MODELS.register_module() -class FPN(nn.Module): - r"""Feature Pyramid Network. - - This is an implementation of paper `Feature Pyramid Networks for Object - Detection `_. - - Args: - in_channels (list[int]): Number of input channels per scale. - out_channels (int): Number of output channels (used at each scale). - num_outs (int): Number of output scales. - start_level (int): Index of the start input backbone level used to - build the feature pyramid. Default: 0. - end_level (int): Index of the end input backbone level (exclusive) to - build the feature pyramid. Default: -1, which means the last level. - add_extra_convs (bool | str): If bool, it decides whether to add conv - layers on top of the original feature maps. Default to False. - If True, it is equivalent to `add_extra_convs='on_input'`. - If str, it specifies the source feature map of the extra convs. - Only the following options are allowed - - - 'on_input': Last feat map of neck inputs (i.e. backbone feature). - - 'on_lateral': Last feature map after lateral convs. - - 'on_output': The last output feature map after fpn convs. - relu_before_extra_convs (bool): Whether to apply relu before the extra - conv. Default: False. - no_norm_on_lateral (bool): Whether to apply norm on lateral. - Default: False. - conv_cfg (dict): Config dict for convolution layer. Default: None. - norm_cfg (dict): Config dict for normalization layer. Default: None. - act_cfg (dict): Config dict for activation layer in ConvModule. - Default: None. - upsample_cfg (dict): Config dict for interpolate layer. - Default: dict(mode='nearest'). - - Example: - >>> import torch - >>> in_channels = [2, 3, 5, 7] - >>> scales = [340, 170, 84, 43] - >>> inputs = [torch.rand(1, c, s, s) - ... for c, s in zip(in_channels, scales)] - >>> self = FPN(in_channels, 11, len(in_channels)).eval() - >>> outputs = self.forward(inputs) - >>> for i in range(len(outputs)): - ... print(f'outputs[{i}].shape = {outputs[i].shape}') - outputs[0].shape = torch.Size([1, 11, 340, 340]) - outputs[1].shape = torch.Size([1, 11, 170, 170]) - outputs[2].shape = torch.Size([1, 11, 84, 84]) - outputs[3].shape = torch.Size([1, 11, 43, 43]) - """ - - def __init__(self, - in_channels, - out_channels, - num_outs, - start_level=0, - end_level=-1, - add_extra_convs=False, - relu_before_extra_convs=False, - no_norm_on_lateral=False, - conv_cfg=None, - norm_cfg=None, - act_cfg=None, - upsample_cfg=dict(mode='nearest')): - super().__init__() - assert isinstance(in_channels, list) - self.in_channels = in_channels - self.out_channels = out_channels - self.num_ins = len(in_channels) - self.num_outs = num_outs - self.relu_before_extra_convs = relu_before_extra_convs - self.no_norm_on_lateral = no_norm_on_lateral - self.fp16_enabled = False - self.upsample_cfg = upsample_cfg.copy() - - if end_level == -1 or end_level == self.num_ins - 1: - self.backbone_end_level = self.num_ins - assert num_outs >= self.num_ins - start_level - else: - # if end_level is not the last level, no extra level is allowed - self.backbone_end_level = end_level + 1 - assert end_level < self.num_ins - assert num_outs == end_level - start_level + 1 - self.start_level = start_level - self.end_level = end_level - self.add_extra_convs = add_extra_convs - assert isinstance(add_extra_convs, (str, bool)) - if isinstance(add_extra_convs, str): - # Extra_convs_source choices: 'on_input', 'on_lateral', 'on_output' - assert add_extra_convs in ('on_input', 'on_lateral', 'on_output') - elif add_extra_convs: # True - self.add_extra_convs = 'on_input' - - self.lateral_convs = nn.ModuleList() - self.fpn_convs = nn.ModuleList() - - for i in range(self.start_level, self.backbone_end_level): - l_conv = ConvModule( - in_channels[i], - out_channels, - 1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg if not self.no_norm_on_lateral else None, - act_cfg=act_cfg, - inplace=False) - fpn_conv = ConvModule( - out_channels, - out_channels, - 3, - padding=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg, - inplace=False) - - self.lateral_convs.append(l_conv) - self.fpn_convs.append(fpn_conv) - - # add extra conv layers (e.g., RetinaNet) - extra_levels = num_outs - self.backbone_end_level + self.start_level - if self.add_extra_convs and extra_levels >= 1: - for i in range(extra_levels): - if i == 0 and self.add_extra_convs == 'on_input': - in_channels = self.in_channels[self.backbone_end_level - 1] - else: - in_channels = out_channels - extra_fpn_conv = ConvModule( - in_channels, - out_channels, - 3, - stride=2, - padding=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg, - inplace=False) - self.fpn_convs.append(extra_fpn_conv) - - def init_weights(self): - """Initialize model weights.""" - for m in self.modules(): - if isinstance(m, nn.Conv2d): - xavier_init(m, distribution='uniform') - - def forward(self, inputs): - """Forward function.""" - assert len(inputs) == len(self.in_channels) - - # build laterals - laterals = [ - lateral_conv(inputs[i + self.start_level]) - for i, lateral_conv in enumerate(self.lateral_convs) - ] - - # build top-down path - used_backbone_levels = len(laterals) - for i in range(used_backbone_levels - 1, 0, -1): - # In some cases, fixing `scale factor` (e.g. 2) is preferred, but - # it cannot co-exist with `size` in `F.interpolate`. - if 'scale_factor' in self.upsample_cfg: - # fix runtime error of "+=" inplace operation in PyTorch 1.10 - laterals[i - 1] = laterals[i - 1] + F.interpolate( - laterals[i], **self.upsample_cfg) - else: - prev_shape = laterals[i - 1].shape[2:] - laterals[i - 1] = laterals[i - 1] + F.interpolate( - laterals[i], size=prev_shape, **self.upsample_cfg) - - # build outputs - # part 1: from original levels - outs = [ - self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels) - ] - # part 2: add extra levels - if self.num_outs > len(outs): - # use max pool to get more levels on top of outputs - # (e.g., Faster R-CNN, Mask R-CNN) - if not self.add_extra_convs: - for i in range(self.num_outs - used_backbone_levels): - outs.append(F.max_pool2d(outs[-1], 1, stride=2)) - # add conv layers on top of original feature maps (RetinaNet) - else: - if self.add_extra_convs == 'on_input': - extra_source = inputs[self.backbone_end_level - 1] - elif self.add_extra_convs == 'on_lateral': - extra_source = laterals[-1] - elif self.add_extra_convs == 'on_output': - extra_source = outs[-1] - else: - raise NotImplementedError - outs.append(self.fpn_convs[used_backbone_levels](extra_source)) - for i in range(used_backbone_levels + 1, self.num_outs): - if self.relu_before_extra_convs: - outs.append(self.fpn_convs[i](F.relu(outs[-1]))) - else: - outs.append(self.fpn_convs[i](outs[-1])) - return outs +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import ConvModule +from mmengine.model import xavier_init + +from mmpose.registry import MODELS + + +@MODELS.register_module() +class FPN(nn.Module): + r"""Feature Pyramid Network. + + This is an implementation of paper `Feature Pyramid Networks for Object + Detection `_. + + Args: + in_channels (list[int]): Number of input channels per scale. + out_channels (int): Number of output channels (used at each scale). + num_outs (int): Number of output scales. + start_level (int): Index of the start input backbone level used to + build the feature pyramid. Default: 0. + end_level (int): Index of the end input backbone level (exclusive) to + build the feature pyramid. Default: -1, which means the last level. + add_extra_convs (bool | str): If bool, it decides whether to add conv + layers on top of the original feature maps. Default to False. + If True, it is equivalent to `add_extra_convs='on_input'`. + If str, it specifies the source feature map of the extra convs. + Only the following options are allowed + + - 'on_input': Last feat map of neck inputs (i.e. backbone feature). + - 'on_lateral': Last feature map after lateral convs. + - 'on_output': The last output feature map after fpn convs. + relu_before_extra_convs (bool): Whether to apply relu before the extra + conv. Default: False. + no_norm_on_lateral (bool): Whether to apply norm on lateral. + Default: False. + conv_cfg (dict): Config dict for convolution layer. Default: None. + norm_cfg (dict): Config dict for normalization layer. Default: None. + act_cfg (dict): Config dict for activation layer in ConvModule. + Default: None. + upsample_cfg (dict): Config dict for interpolate layer. + Default: dict(mode='nearest'). + + Example: + >>> import torch + >>> in_channels = [2, 3, 5, 7] + >>> scales = [340, 170, 84, 43] + >>> inputs = [torch.rand(1, c, s, s) + ... for c, s in zip(in_channels, scales)] + >>> self = FPN(in_channels, 11, len(in_channels)).eval() + >>> outputs = self.forward(inputs) + >>> for i in range(len(outputs)): + ... print(f'outputs[{i}].shape = {outputs[i].shape}') + outputs[0].shape = torch.Size([1, 11, 340, 340]) + outputs[1].shape = torch.Size([1, 11, 170, 170]) + outputs[2].shape = torch.Size([1, 11, 84, 84]) + outputs[3].shape = torch.Size([1, 11, 43, 43]) + """ + + def __init__(self, + in_channels, + out_channels, + num_outs, + start_level=0, + end_level=-1, + add_extra_convs=False, + relu_before_extra_convs=False, + no_norm_on_lateral=False, + conv_cfg=None, + norm_cfg=None, + act_cfg=None, + upsample_cfg=dict(mode='nearest')): + super().__init__() + assert isinstance(in_channels, list) + self.in_channels = in_channels + self.out_channels = out_channels + self.num_ins = len(in_channels) + self.num_outs = num_outs + self.relu_before_extra_convs = relu_before_extra_convs + self.no_norm_on_lateral = no_norm_on_lateral + self.fp16_enabled = False + self.upsample_cfg = upsample_cfg.copy() + + if end_level == -1 or end_level == self.num_ins - 1: + self.backbone_end_level = self.num_ins + assert num_outs >= self.num_ins - start_level + else: + # if end_level is not the last level, no extra level is allowed + self.backbone_end_level = end_level + 1 + assert end_level < self.num_ins + assert num_outs == end_level - start_level + 1 + self.start_level = start_level + self.end_level = end_level + self.add_extra_convs = add_extra_convs + assert isinstance(add_extra_convs, (str, bool)) + if isinstance(add_extra_convs, str): + # Extra_convs_source choices: 'on_input', 'on_lateral', 'on_output' + assert add_extra_convs in ('on_input', 'on_lateral', 'on_output') + elif add_extra_convs: # True + self.add_extra_convs = 'on_input' + + self.lateral_convs = nn.ModuleList() + self.fpn_convs = nn.ModuleList() + + for i in range(self.start_level, self.backbone_end_level): + l_conv = ConvModule( + in_channels[i], + out_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg if not self.no_norm_on_lateral else None, + act_cfg=act_cfg, + inplace=False) + fpn_conv = ConvModule( + out_channels, + out_channels, + 3, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + inplace=False) + + self.lateral_convs.append(l_conv) + self.fpn_convs.append(fpn_conv) + + # add extra conv layers (e.g., RetinaNet) + extra_levels = num_outs - self.backbone_end_level + self.start_level + if self.add_extra_convs and extra_levels >= 1: + for i in range(extra_levels): + if i == 0 and self.add_extra_convs == 'on_input': + in_channels = self.in_channels[self.backbone_end_level - 1] + else: + in_channels = out_channels + extra_fpn_conv = ConvModule( + in_channels, + out_channels, + 3, + stride=2, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + inplace=False) + self.fpn_convs.append(extra_fpn_conv) + + def init_weights(self): + """Initialize model weights.""" + for m in self.modules(): + if isinstance(m, nn.Conv2d): + xavier_init(m, distribution='uniform') + + def forward(self, inputs): + """Forward function.""" + assert len(inputs) == len(self.in_channels) + + # build laterals + laterals = [ + lateral_conv(inputs[i + self.start_level]) + for i, lateral_conv in enumerate(self.lateral_convs) + ] + + # build top-down path + used_backbone_levels = len(laterals) + for i in range(used_backbone_levels - 1, 0, -1): + # In some cases, fixing `scale factor` (e.g. 2) is preferred, but + # it cannot co-exist with `size` in `F.interpolate`. + if 'scale_factor' in self.upsample_cfg: + # fix runtime error of "+=" inplace operation in PyTorch 1.10 + laterals[i - 1] = laterals[i - 1] + F.interpolate( + laterals[i], **self.upsample_cfg) + else: + prev_shape = laterals[i - 1].shape[2:] + laterals[i - 1] = laterals[i - 1] + F.interpolate( + laterals[i], size=prev_shape, **self.upsample_cfg) + + # build outputs + # part 1: from original levels + outs = [ + self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels) + ] + # part 2: add extra levels + if self.num_outs > len(outs): + # use max pool to get more levels on top of outputs + # (e.g., Faster R-CNN, Mask R-CNN) + if not self.add_extra_convs: + for i in range(self.num_outs - used_backbone_levels): + outs.append(F.max_pool2d(outs[-1], 1, stride=2)) + # add conv layers on top of original feature maps (RetinaNet) + else: + if self.add_extra_convs == 'on_input': + extra_source = inputs[self.backbone_end_level - 1] + elif self.add_extra_convs == 'on_lateral': + extra_source = laterals[-1] + elif self.add_extra_convs == 'on_output': + extra_source = outs[-1] + else: + raise NotImplementedError + outs.append(self.fpn_convs[used_backbone_levels](extra_source)) + for i in range(used_backbone_levels + 1, self.num_outs): + if self.relu_before_extra_convs: + outs.append(self.fpn_convs[i](F.relu(outs[-1]))) + else: + outs.append(self.fpn_convs[i](outs[-1])) + return outs diff --git a/mmpose/models/necks/gap_neck.py b/mmpose/models/necks/gap_neck.py index 58ce5d939f..c096d790ae 100644 --- a/mmpose/models/necks/gap_neck.py +++ b/mmpose/models/necks/gap_neck.py @@ -1,39 +1,39 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -import torch.nn as nn - -from mmpose.registry import MODELS - - -@MODELS.register_module() -class GlobalAveragePooling(nn.Module): - """Global Average Pooling neck. - - Note that we use `view` to remove extra channel after pooling. We do not - use `squeeze` as it will also remove the batch dimension when the tensor - has a batch dimension of size 1, which can lead to unexpected errors. - """ - - def __init__(self): - super().__init__() - self.gap = nn.AdaptiveAvgPool2d((1, 1)) - - def init_weights(self): - pass - - def forward(self, inputs): - """Forward function.""" - - if isinstance(inputs, tuple): - outs = tuple([self.gap(x) for x in inputs]) - outs = tuple( - [out.view(x.size(0), -1) for out, x in zip(outs, inputs)]) - elif isinstance(inputs, list): - outs = [self.gap(x) for x in inputs] - outs = [out.view(x.size(0), -1) for out, x in zip(outs, inputs)] - elif isinstance(inputs, torch.Tensor): - outs = self.gap(inputs) - outs = outs.view(inputs.size(0), -1) - else: - raise TypeError('neck inputs should be tuple or torch.tensor') - return outs +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn + +from mmpose.registry import MODELS + + +@MODELS.register_module() +class GlobalAveragePooling(nn.Module): + """Global Average Pooling neck. + + Note that we use `view` to remove extra channel after pooling. We do not + use `squeeze` as it will also remove the batch dimension when the tensor + has a batch dimension of size 1, which can lead to unexpected errors. + """ + + def __init__(self): + super().__init__() + self.gap = nn.AdaptiveAvgPool2d((1, 1)) + + def init_weights(self): + pass + + def forward(self, inputs): + """Forward function.""" + + if isinstance(inputs, tuple): + outs = tuple([self.gap(x) for x in inputs]) + outs = tuple( + [out.view(x.size(0), -1) for out, x in zip(outs, inputs)]) + elif isinstance(inputs, list): + outs = [self.gap(x) for x in inputs] + outs = [out.view(x.size(0), -1) for out, x in zip(outs, inputs)] + elif isinstance(inputs, torch.Tensor): + outs = self.gap(inputs) + outs = outs.view(inputs.size(0), -1) + else: + raise TypeError('neck inputs should be tuple or torch.tensor') + return outs diff --git a/mmpose/models/necks/posewarper_neck.py b/mmpose/models/necks/posewarper_neck.py index 517fabd2e8..5bf675ab8f 100644 --- a/mmpose/models/necks/posewarper_neck.py +++ b/mmpose/models/necks/posewarper_neck.py @@ -1,329 +1,329 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import mmcv -import torch -import torch.nn as nn -from mmcv.cnn import build_conv_layer, build_norm_layer -from mmengine.model import constant_init, normal_init -from mmengine.utils import digit_version -from torch.nn.modules.batchnorm import _BatchNorm - -from mmpose.models.utils.ops import resize -from mmpose.registry import MODELS -from ..backbones.resnet import BasicBlock, Bottleneck - -try: - from mmcv.ops import DeformConv2d - has_mmcv_full = True -except (ImportError, ModuleNotFoundError): - has_mmcv_full = False - - -@MODELS.register_module() -class PoseWarperNeck(nn.Module): - """PoseWarper neck. - - `"Learning temporal pose estimation from sparsely-labeled videos" - `_. - - Args: - in_channels (int): Number of input channels from backbone - out_channels (int): Number of output channels - inner_channels (int): Number of intermediate channels of the res block - deform_groups (int): Number of groups in the deformable conv - dilations (list|tuple): different dilations of the offset conv layers - trans_conv_kernel (int): the kernel of the trans conv layer, which is - used to get heatmap from the output of backbone. Default: 1 - res_blocks_cfg (dict|None): config of residual blocks. If None, - use the default values. If not None, it should contain the - following keys: - - - block (str): the type of residual block, Default: 'BASIC'. - - num_blocks (int): the number of blocks, Default: 20. - - offsets_kernel (int): the kernel of offset conv layer. - deform_conv_kernel (int): the kernel of defomrable conv layer. - in_index (int|Sequence[int]): Input feature index. Default: 0 - input_transform (str|None): Transformation type of input features. - Options: 'resize_concat', 'multiple_select', None. - Default: None. - - - 'resize_concat': Multiple feature maps will be resize to \ - the same size as first one and than concat together. \ - Usually used in FCN head of HRNet. - - 'multiple_select': Multiple feature maps will be bundle into \ - a list and passed into decode head. - - None: Only one select feature map is allowed. - - freeze_trans_layer (bool): Whether to freeze the transition layer - (stop grad and set eval mode). Default: True. - norm_eval (bool): Whether to set norm layers to eval mode, namely, - freeze running stats (mean and var). Note: Effect on Batch Norm - and its variants only. Default: False. - im2col_step (int): the argument `im2col_step` in deformable conv, - Default: 80. - """ - blocks_dict = {'BASIC': BasicBlock, 'BOTTLENECK': Bottleneck} - minimum_mmcv_version = '1.3.17' - - def __init__(self, - in_channels, - out_channels, - inner_channels, - deform_groups=17, - dilations=(3, 6, 12, 18, 24), - trans_conv_kernel=1, - res_blocks_cfg=None, - offsets_kernel=3, - deform_conv_kernel=3, - in_index=0, - input_transform=None, - freeze_trans_layer=True, - norm_eval=False, - im2col_step=80): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.inner_channels = inner_channels - self.deform_groups = deform_groups - self.dilations = dilations - self.trans_conv_kernel = trans_conv_kernel - self.res_blocks_cfg = res_blocks_cfg - self.offsets_kernel = offsets_kernel - self.deform_conv_kernel = deform_conv_kernel - self.in_index = in_index - self.input_transform = input_transform - self.freeze_trans_layer = freeze_trans_layer - self.norm_eval = norm_eval - self.im2col_step = im2col_step - - identity_trans_layer = False - - assert trans_conv_kernel in [0, 1, 3] - kernel_size = trans_conv_kernel - if kernel_size == 3: - padding = 1 - elif kernel_size == 1: - padding = 0 - else: - # 0 for Identity mapping. - identity_trans_layer = True - - if identity_trans_layer: - self.trans_layer = nn.Identity() - else: - self.trans_layer = build_conv_layer( - cfg=dict(type='Conv2d'), - in_channels=in_channels, - out_channels=out_channels, - kernel_size=kernel_size, - stride=1, - padding=padding) - - # build chain of residual blocks - if res_blocks_cfg is not None and not isinstance(res_blocks_cfg, dict): - raise TypeError('res_blocks_cfg should be dict or None.') - - if res_blocks_cfg is None: - block_type = 'BASIC' - num_blocks = 20 - else: - block_type = res_blocks_cfg.get('block', 'BASIC') - num_blocks = res_blocks_cfg.get('num_blocks', 20) - - block = self.blocks_dict[block_type] - - res_layers = [] - downsample = nn.Sequential( - build_conv_layer( - cfg=dict(type='Conv2d'), - in_channels=out_channels, - out_channels=inner_channels, - kernel_size=1, - stride=1, - bias=False), - build_norm_layer(dict(type='BN'), inner_channels)[1]) - res_layers.append( - block( - in_channels=out_channels, - out_channels=inner_channels, - downsample=downsample)) - - for _ in range(1, num_blocks): - res_layers.append(block(inner_channels, inner_channels)) - self.offset_feats = nn.Sequential(*res_layers) - - # build offset layers - self.num_offset_layers = len(dilations) - assert self.num_offset_layers > 0, 'Number of offset layers ' \ - 'should be larger than 0.' - - target_offset_channels = 2 * offsets_kernel**2 * deform_groups - - offset_layers = [ - build_conv_layer( - cfg=dict(type='Conv2d'), - in_channels=inner_channels, - out_channels=target_offset_channels, - kernel_size=offsets_kernel, - stride=1, - dilation=dilations[i], - padding=dilations[i], - bias=False, - ) for i in range(self.num_offset_layers) - ] - self.offset_layers = nn.ModuleList(offset_layers) - - # build deformable conv layers - assert digit_version(mmcv.__version__) >= \ - digit_version(self.minimum_mmcv_version), \ - f'Current MMCV version: {mmcv.__version__}, ' \ - f'but MMCV >= {self.minimum_mmcv_version} is required, see ' \ - f'https://github.com/open-mmlab/mmcv/issues/1440, ' \ - f'Please install the latest MMCV.' - - if has_mmcv_full: - deform_conv_layers = [ - DeformConv2d( - in_channels=out_channels, - out_channels=out_channels, - kernel_size=deform_conv_kernel, - stride=1, - padding=int(deform_conv_kernel / 2) * dilations[i], - dilation=dilations[i], - deform_groups=deform_groups, - im2col_step=self.im2col_step, - ) for i in range(self.num_offset_layers) - ] - else: - raise ImportError('Please install the full version of mmcv ' - 'to use `DeformConv2d`.') - - self.deform_conv_layers = nn.ModuleList(deform_conv_layers) - - self.freeze_layers() - - def freeze_layers(self): - if self.freeze_trans_layer: - self.trans_layer.eval() - - for param in self.trans_layer.parameters(): - param.requires_grad = False - - def init_weights(self): - for m in self.modules(): - if isinstance(m, nn.Conv2d): - normal_init(m, std=0.001) - elif isinstance(m, (_BatchNorm, nn.GroupNorm)): - constant_init(m, 1) - elif isinstance(m, DeformConv2d): - filler = torch.zeros([ - m.weight.size(0), - m.weight.size(1), - m.weight.size(2), - m.weight.size(3) - ], - dtype=torch.float32, - device=m.weight.device) - for k in range(m.weight.size(0)): - filler[k, k, - int(m.weight.size(2) / 2), - int(m.weight.size(3) / 2)] = 1.0 - m.weight = torch.nn.Parameter(filler) - m.weight.requires_grad = True - - # posewarper offset layer weight initialization - for m in self.offset_layers.modules(): - constant_init(m, 0) - - def _transform_inputs(self, inputs): - """Transform inputs for decoder. - - Args: - inputs (list[Tensor] | Tensor): multi-level img features. - - Returns: - Tensor: The transformed inputs - """ - if not isinstance(inputs, list): - return inputs - - if self.input_transform == 'resize_concat': - inputs = [inputs[i] for i in self.in_index] - upsampled_inputs = [ - resize( - input=x, - size=inputs[0].shape[2:], - mode='bilinear', - align_corners=self.align_corners) for x in inputs - ] - inputs = torch.cat(upsampled_inputs, dim=1) - elif self.input_transform == 'multiple_select': - inputs = [inputs[i] for i in self.in_index] - else: - inputs = inputs[self.in_index] - - return inputs - - def forward(self, inputs, frame_weight): - assert isinstance(inputs, (list, tuple)), 'PoseWarperNeck inputs ' \ - 'should be list or tuple, even though the length is 1, ' \ - 'for unified processing.' - - output_heatmap = 0 - if len(inputs) > 1: - inputs = [self._transform_inputs(input) for input in inputs] - inputs = [self.trans_layer(input) for input in inputs] - - # calculate difference features - diff_features = [ - self.offset_feats(inputs[0] - input) for input in inputs - ] - - for i in range(len(inputs)): - if frame_weight[i] == 0: - continue - warped_heatmap = 0 - for j in range(self.num_offset_layers): - offset = (self.offset_layers[j](diff_features[i])) - warped_heatmap_tmp = self.deform_conv_layers[j](inputs[i], - offset) - warped_heatmap += warped_heatmap_tmp / \ - self.num_offset_layers - - output_heatmap += warped_heatmap * frame_weight[i] - - else: - inputs = inputs[0] - inputs = self._transform_inputs(inputs) - inputs = self.trans_layer(inputs) - - num_frames = len(frame_weight) - batch_size = inputs.size(0) // num_frames - ref_x = inputs[:batch_size] - ref_x_tiled = ref_x.repeat(num_frames, 1, 1, 1) - - offset_features = self.offset_feats(ref_x_tiled - inputs) - - warped_heatmap = 0 - for j in range(self.num_offset_layers): - offset = self.offset_layers[j](offset_features) - - warped_heatmap_tmp = self.deform_conv_layers[j](inputs, offset) - warped_heatmap += warped_heatmap_tmp / self.num_offset_layers - - for i in range(num_frames): - if frame_weight[i] == 0: - continue - output_heatmap += warped_heatmap[i * batch_size:(i + 1) * - batch_size] * frame_weight[i] - - return output_heatmap - - def train(self, mode=True): - """Convert the model into training mode.""" - super().train(mode) - self.freeze_layers() - if mode and self.norm_eval: - for m in self.modules(): - if isinstance(m, _BatchNorm): - m.eval() +# Copyright (c) OpenMMLab. All rights reserved. +import mmcv +import torch +import torch.nn as nn +from mmcv.cnn import build_conv_layer, build_norm_layer +from mmengine.model import constant_init, normal_init +from mmengine.utils import digit_version +from torch.nn.modules.batchnorm import _BatchNorm + +from mmpose.models.utils.ops import resize +from mmpose.registry import MODELS +from ..backbones.resnet import BasicBlock, Bottleneck + +try: + from mmcv.ops import DeformConv2d + has_mmcv_full = True +except (ImportError, ModuleNotFoundError): + has_mmcv_full = False + + +@MODELS.register_module() +class PoseWarperNeck(nn.Module): + """PoseWarper neck. + + `"Learning temporal pose estimation from sparsely-labeled videos" + `_. + + Args: + in_channels (int): Number of input channels from backbone + out_channels (int): Number of output channels + inner_channels (int): Number of intermediate channels of the res block + deform_groups (int): Number of groups in the deformable conv + dilations (list|tuple): different dilations of the offset conv layers + trans_conv_kernel (int): the kernel of the trans conv layer, which is + used to get heatmap from the output of backbone. Default: 1 + res_blocks_cfg (dict|None): config of residual blocks. If None, + use the default values. If not None, it should contain the + following keys: + + - block (str): the type of residual block, Default: 'BASIC'. + - num_blocks (int): the number of blocks, Default: 20. + + offsets_kernel (int): the kernel of offset conv layer. + deform_conv_kernel (int): the kernel of defomrable conv layer. + in_index (int|Sequence[int]): Input feature index. Default: 0 + input_transform (str|None): Transformation type of input features. + Options: 'resize_concat', 'multiple_select', None. + Default: None. + + - 'resize_concat': Multiple feature maps will be resize to \ + the same size as first one and than concat together. \ + Usually used in FCN head of HRNet. + - 'multiple_select': Multiple feature maps will be bundle into \ + a list and passed into decode head. + - None: Only one select feature map is allowed. + + freeze_trans_layer (bool): Whether to freeze the transition layer + (stop grad and set eval mode). Default: True. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + im2col_step (int): the argument `im2col_step` in deformable conv, + Default: 80. + """ + blocks_dict = {'BASIC': BasicBlock, 'BOTTLENECK': Bottleneck} + minimum_mmcv_version = '1.3.17' + + def __init__(self, + in_channels, + out_channels, + inner_channels, + deform_groups=17, + dilations=(3, 6, 12, 18, 24), + trans_conv_kernel=1, + res_blocks_cfg=None, + offsets_kernel=3, + deform_conv_kernel=3, + in_index=0, + input_transform=None, + freeze_trans_layer=True, + norm_eval=False, + im2col_step=80): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.inner_channels = inner_channels + self.deform_groups = deform_groups + self.dilations = dilations + self.trans_conv_kernel = trans_conv_kernel + self.res_blocks_cfg = res_blocks_cfg + self.offsets_kernel = offsets_kernel + self.deform_conv_kernel = deform_conv_kernel + self.in_index = in_index + self.input_transform = input_transform + self.freeze_trans_layer = freeze_trans_layer + self.norm_eval = norm_eval + self.im2col_step = im2col_step + + identity_trans_layer = False + + assert trans_conv_kernel in [0, 1, 3] + kernel_size = trans_conv_kernel + if kernel_size == 3: + padding = 1 + elif kernel_size == 1: + padding = 0 + else: + # 0 for Identity mapping. + identity_trans_layer = True + + if identity_trans_layer: + self.trans_layer = nn.Identity() + else: + self.trans_layer = build_conv_layer( + cfg=dict(type='Conv2d'), + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=1, + padding=padding) + + # build chain of residual blocks + if res_blocks_cfg is not None and not isinstance(res_blocks_cfg, dict): + raise TypeError('res_blocks_cfg should be dict or None.') + + if res_blocks_cfg is None: + block_type = 'BASIC' + num_blocks = 20 + else: + block_type = res_blocks_cfg.get('block', 'BASIC') + num_blocks = res_blocks_cfg.get('num_blocks', 20) + + block = self.blocks_dict[block_type] + + res_layers = [] + downsample = nn.Sequential( + build_conv_layer( + cfg=dict(type='Conv2d'), + in_channels=out_channels, + out_channels=inner_channels, + kernel_size=1, + stride=1, + bias=False), + build_norm_layer(dict(type='BN'), inner_channels)[1]) + res_layers.append( + block( + in_channels=out_channels, + out_channels=inner_channels, + downsample=downsample)) + + for _ in range(1, num_blocks): + res_layers.append(block(inner_channels, inner_channels)) + self.offset_feats = nn.Sequential(*res_layers) + + # build offset layers + self.num_offset_layers = len(dilations) + assert self.num_offset_layers > 0, 'Number of offset layers ' \ + 'should be larger than 0.' + + target_offset_channels = 2 * offsets_kernel**2 * deform_groups + + offset_layers = [ + build_conv_layer( + cfg=dict(type='Conv2d'), + in_channels=inner_channels, + out_channels=target_offset_channels, + kernel_size=offsets_kernel, + stride=1, + dilation=dilations[i], + padding=dilations[i], + bias=False, + ) for i in range(self.num_offset_layers) + ] + self.offset_layers = nn.ModuleList(offset_layers) + + # build deformable conv layers + assert digit_version(mmcv.__version__) >= \ + digit_version(self.minimum_mmcv_version), \ + f'Current MMCV version: {mmcv.__version__}, ' \ + f'but MMCV >= {self.minimum_mmcv_version} is required, see ' \ + f'https://github.com/open-mmlab/mmcv/issues/1440, ' \ + f'Please install the latest MMCV.' + + if has_mmcv_full: + deform_conv_layers = [ + DeformConv2d( + in_channels=out_channels, + out_channels=out_channels, + kernel_size=deform_conv_kernel, + stride=1, + padding=int(deform_conv_kernel / 2) * dilations[i], + dilation=dilations[i], + deform_groups=deform_groups, + im2col_step=self.im2col_step, + ) for i in range(self.num_offset_layers) + ] + else: + raise ImportError('Please install the full version of mmcv ' + 'to use `DeformConv2d`.') + + self.deform_conv_layers = nn.ModuleList(deform_conv_layers) + + self.freeze_layers() + + def freeze_layers(self): + if self.freeze_trans_layer: + self.trans_layer.eval() + + for param in self.trans_layer.parameters(): + param.requires_grad = False + + def init_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + normal_init(m, std=0.001) + elif isinstance(m, (_BatchNorm, nn.GroupNorm)): + constant_init(m, 1) + elif isinstance(m, DeformConv2d): + filler = torch.zeros([ + m.weight.size(0), + m.weight.size(1), + m.weight.size(2), + m.weight.size(3) + ], + dtype=torch.float32, + device=m.weight.device) + for k in range(m.weight.size(0)): + filler[k, k, + int(m.weight.size(2) / 2), + int(m.weight.size(3) / 2)] = 1.0 + m.weight = torch.nn.Parameter(filler) + m.weight.requires_grad = True + + # posewarper offset layer weight initialization + for m in self.offset_layers.modules(): + constant_init(m, 0) + + def _transform_inputs(self, inputs): + """Transform inputs for decoder. + + Args: + inputs (list[Tensor] | Tensor): multi-level img features. + + Returns: + Tensor: The transformed inputs + """ + if not isinstance(inputs, list): + return inputs + + if self.input_transform == 'resize_concat': + inputs = [inputs[i] for i in self.in_index] + upsampled_inputs = [ + resize( + input=x, + size=inputs[0].shape[2:], + mode='bilinear', + align_corners=self.align_corners) for x in inputs + ] + inputs = torch.cat(upsampled_inputs, dim=1) + elif self.input_transform == 'multiple_select': + inputs = [inputs[i] for i in self.in_index] + else: + inputs = inputs[self.in_index] + + return inputs + + def forward(self, inputs, frame_weight): + assert isinstance(inputs, (list, tuple)), 'PoseWarperNeck inputs ' \ + 'should be list or tuple, even though the length is 1, ' \ + 'for unified processing.' + + output_heatmap = 0 + if len(inputs) > 1: + inputs = [self._transform_inputs(input) for input in inputs] + inputs = [self.trans_layer(input) for input in inputs] + + # calculate difference features + diff_features = [ + self.offset_feats(inputs[0] - input) for input in inputs + ] + + for i in range(len(inputs)): + if frame_weight[i] == 0: + continue + warped_heatmap = 0 + for j in range(self.num_offset_layers): + offset = (self.offset_layers[j](diff_features[i])) + warped_heatmap_tmp = self.deform_conv_layers[j](inputs[i], + offset) + warped_heatmap += warped_heatmap_tmp / \ + self.num_offset_layers + + output_heatmap += warped_heatmap * frame_weight[i] + + else: + inputs = inputs[0] + inputs = self._transform_inputs(inputs) + inputs = self.trans_layer(inputs) + + num_frames = len(frame_weight) + batch_size = inputs.size(0) // num_frames + ref_x = inputs[:batch_size] + ref_x_tiled = ref_x.repeat(num_frames, 1, 1, 1) + + offset_features = self.offset_feats(ref_x_tiled - inputs) + + warped_heatmap = 0 + for j in range(self.num_offset_layers): + offset = self.offset_layers[j](offset_features) + + warped_heatmap_tmp = self.deform_conv_layers[j](inputs, offset) + warped_heatmap += warped_heatmap_tmp / self.num_offset_layers + + for i in range(num_frames): + if frame_weight[i] == 0: + continue + output_heatmap += warped_heatmap[i * batch_size:(i + 1) * + batch_size] * frame_weight[i] + + return output_heatmap + + def train(self, mode=True): + """Convert the model into training mode.""" + super().train(mode) + self.freeze_layers() + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, _BatchNorm): + m.eval() diff --git a/mmpose/models/pose_estimators/__init__.py b/mmpose/models/pose_estimators/__init__.py index c5287e0c2c..a5af255256 100644 --- a/mmpose/models/pose_estimators/__init__.py +++ b/mmpose/models/pose_estimators/__init__.py @@ -1,6 +1,6 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .bottomup import BottomupPoseEstimator -from .pose_lifter import PoseLifter -from .topdown import TopdownPoseEstimator - -__all__ = ['TopdownPoseEstimator', 'BottomupPoseEstimator', 'PoseLifter'] +# Copyright (c) OpenMMLab. All rights reserved. +from .bottomup import BottomupPoseEstimator +from .pose_lifter import PoseLifter +from .topdown import TopdownPoseEstimator + +__all__ = ['TopdownPoseEstimator', 'BottomupPoseEstimator', 'PoseLifter'] diff --git a/mmpose/models/pose_estimators/base.py b/mmpose/models/pose_estimators/base.py index 0ae921d0ec..7bb0711d80 100644 --- a/mmpose/models/pose_estimators/base.py +++ b/mmpose/models/pose_estimators/base.py @@ -1,212 +1,212 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from abc import ABCMeta, abstractmethod -from typing import Tuple, Union - -import torch -from mmengine.model import BaseModel -from torch import Tensor - -from mmpose.datasets.datasets.utils import parse_pose_metainfo -from mmpose.models.utils import check_and_update_config -from mmpose.registry import MODELS -from mmpose.utils.typing import (ConfigType, ForwardResults, OptConfigType, - Optional, OptMultiConfig, OptSampleList, - SampleList) - - -class BasePoseEstimator(BaseModel, metaclass=ABCMeta): - """Base class for pose estimators. - - Args: - data_preprocessor (dict | ConfigDict, optional): The pre-processing - config of :class:`BaseDataPreprocessor`. Defaults to ``None`` - init_cfg (dict | ConfigDict): The model initialization config. - Defaults to ``None`` - metainfo (dict): Meta information for dataset, such as keypoints - definition and properties. If set, the metainfo of the input data - batch will be overridden. For more details, please refer to - https://mmpose.readthedocs.io/en/latest/user_guides/ - prepare_datasets.html#create-a-custom-dataset-info- - config-file-for-the-dataset. Defaults to ``None`` - """ - _version = 2 - - def __init__(self, - backbone: ConfigType, - neck: OptConfigType = None, - head: OptConfigType = None, - train_cfg: OptConfigType = None, - test_cfg: OptConfigType = None, - data_preprocessor: OptConfigType = None, - init_cfg: OptMultiConfig = None, - metainfo: Optional[dict] = None): - super().__init__( - data_preprocessor=data_preprocessor, init_cfg=init_cfg) - self.metainfo = self._load_metainfo(metainfo) - - self.backbone = MODELS.build(backbone) - - # the PR #2108 and #2126 modified the interface of neck and head. - # The following function automatically detects outdated - # configurations and updates them accordingly, while also providing - # clear and concise information on the changes made. - neck, head = check_and_update_config(neck, head) - - if neck is not None: - self.neck = MODELS.build(neck) - - if head is not None: - self.head = MODELS.build(head) - - self.train_cfg = train_cfg if train_cfg else {} - self.test_cfg = test_cfg if test_cfg else {} - - # Register the hook to automatically convert old version state dicts - self._register_load_state_dict_pre_hook(self._load_state_dict_pre_hook) - - @property - def with_neck(self) -> bool: - """bool: whether the pose estimator has a neck.""" - return hasattr(self, 'neck') and self.neck is not None - - @property - def with_head(self) -> bool: - """bool: whether the pose estimator has a head.""" - return hasattr(self, 'head') and self.head is not None - - @staticmethod - def _load_metainfo(metainfo: dict = None) -> dict: - """Collect meta information from the dictionary of meta. - - Args: - metainfo (dict): Raw data of pose meta information. - - Returns: - dict: Parsed meta information. - """ - - if metainfo is None: - return None - - if not isinstance(metainfo, dict): - raise TypeError( - f'metainfo should be a dict, but got {type(metainfo)}') - - metainfo = parse_pose_metainfo(metainfo) - return metainfo - - def forward(self, - inputs: torch.Tensor, - data_samples: OptSampleList, - mode: str = 'tensor') -> ForwardResults: - """The unified entry for a forward process in both training and test. - - The method should accept three modes: 'tensor', 'predict' and 'loss': - - - 'tensor': Forward the whole network and return tensor or tuple of - tensor without any post-processing, same as a common nn.Module. - - 'predict': Forward and return the predictions, which are fully - processed to a list of :obj:`PoseDataSample`. - - 'loss': Forward and return a dict of losses according to the given - inputs and data samples. - - Note that this method doesn't handle neither back propagation nor - optimizer updating, which are done in the :meth:`train_step`. - - Args: - inputs (torch.Tensor): The input tensor with shape - (N, C, ...) in general - data_samples (list[:obj:`PoseDataSample`], optional): The - annotation of every sample. Defaults to ``None`` - mode (str): Set the forward mode and return value type. Defaults - to ``'tensor'`` - - Returns: - The return type depends on ``mode``. - - - If ``mode='tensor'``, return a tensor or a tuple of tensors - - If ``mode='predict'``, return a list of :obj:``PoseDataSample`` - that contains the pose predictions - - If ``mode='loss'``, return a dict of tensor(s) which is the loss - function value - """ - if isinstance(inputs, list): - inputs = torch.stack(inputs) - if mode == 'loss': - return self.loss(inputs, data_samples) - elif mode == 'predict': - # use customed metainfo to override the default metainfo - if self.metainfo is not None: - for data_sample in data_samples: - data_sample.set_metainfo(self.metainfo) - return self.predict(inputs, data_samples) - elif mode == 'tensor': - return self._forward(inputs) - else: - raise RuntimeError(f'Invalid mode "{mode}". ' - 'Only supports loss, predict and tensor mode.') - - @abstractmethod - def loss(self, inputs: Tensor, data_samples: SampleList) -> dict: - """Calculate losses from a batch of inputs and data samples.""" - - @abstractmethod - def predict(self, inputs: Tensor, data_samples: SampleList) -> SampleList: - """Predict results from a batch of inputs and data samples with post- - processing.""" - - def _forward(self, - inputs: Tensor, - data_samples: OptSampleList = None - ) -> Union[Tensor, Tuple[Tensor]]: - """Network forward process. Usually includes backbone, neck and head - forward without any post-processing. - - Args: - inputs (Tensor): Inputs with shape (N, C, H, W). - - Returns: - Union[Tensor | Tuple[Tensor]]: forward output of the network. - """ - - x = self.extract_feat(inputs) - if self.with_head: - x = self.head.forward(x) - - return x - - def extract_feat(self, inputs: Tensor) -> Tuple[Tensor]: - """Extract features. - - Args: - inputs (Tensor): Image tensor with shape (N, C, H ,W). - - Returns: - tuple[Tensor]: Multi-level features that may have various - resolutions. - """ - x = self.backbone(inputs) - if self.with_neck: - x = self.neck(x) - - return x - - def _load_state_dict_pre_hook(self, state_dict, prefix, local_meta, *args, - **kwargs): - """A hook function to convert old-version state dict of - :class:`TopdownHeatmapSimpleHead` (before MMPose v1.0.0) to a - compatible format of :class:`HeatmapHead`. - - The hook will be automatically registered during initialization. - """ - version = local_meta.get('version', None) - if version and version >= self._version: - return - - # convert old-version state dict - keys = list(state_dict.keys()) - for k in keys: - if 'keypoint_head' in k: - v = state_dict.pop(k) - k = k.replace('keypoint_head', 'head') - state_dict[k] = v +# Copyright (c) OpenMMLab. All rights reserved. +from abc import ABCMeta, abstractmethod +from typing import Tuple, Union + +import torch +from mmengine.model import BaseModel +from torch import Tensor + +from mmpose.datasets.datasets.utils import parse_pose_metainfo +from mmpose.models.utils import check_and_update_config +from mmpose.registry import MODELS +from mmpose.utils.typing import (ConfigType, ForwardResults, OptConfigType, + Optional, OptMultiConfig, OptSampleList, + SampleList) + + +class BasePoseEstimator(BaseModel, metaclass=ABCMeta): + """Base class for pose estimators. + + Args: + data_preprocessor (dict | ConfigDict, optional): The pre-processing + config of :class:`BaseDataPreprocessor`. Defaults to ``None`` + init_cfg (dict | ConfigDict): The model initialization config. + Defaults to ``None`` + metainfo (dict): Meta information for dataset, such as keypoints + definition and properties. If set, the metainfo of the input data + batch will be overridden. For more details, please refer to + https://mmpose.readthedocs.io/en/latest/user_guides/ + prepare_datasets.html#create-a-custom-dataset-info- + config-file-for-the-dataset. Defaults to ``None`` + """ + _version = 2 + + def __init__(self, + backbone: ConfigType, + neck: OptConfigType = None, + head: OptConfigType = None, + train_cfg: OptConfigType = None, + test_cfg: OptConfigType = None, + data_preprocessor: OptConfigType = None, + init_cfg: OptMultiConfig = None, + metainfo: Optional[dict] = None): + super().__init__( + data_preprocessor=data_preprocessor, init_cfg=init_cfg) + self.metainfo = self._load_metainfo(metainfo) + + self.backbone = MODELS.build(backbone) + + # the PR #2108 and #2126 modified the interface of neck and head. + # The following function automatically detects outdated + # configurations and updates them accordingly, while also providing + # clear and concise information on the changes made. + neck, head = check_and_update_config(neck, head) + + if neck is not None: + self.neck = MODELS.build(neck) + + if head is not None: + self.head = MODELS.build(head) + + self.train_cfg = train_cfg if train_cfg else {} + self.test_cfg = test_cfg if test_cfg else {} + + # Register the hook to automatically convert old version state dicts + self._register_load_state_dict_pre_hook(self._load_state_dict_pre_hook) + + @property + def with_neck(self) -> bool: + """bool: whether the pose estimator has a neck.""" + return hasattr(self, 'neck') and self.neck is not None + + @property + def with_head(self) -> bool: + """bool: whether the pose estimator has a head.""" + return hasattr(self, 'head') and self.head is not None + + @staticmethod + def _load_metainfo(metainfo: dict = None) -> dict: + """Collect meta information from the dictionary of meta. + + Args: + metainfo (dict): Raw data of pose meta information. + + Returns: + dict: Parsed meta information. + """ + + if metainfo is None: + return None + + if not isinstance(metainfo, dict): + raise TypeError( + f'metainfo should be a dict, but got {type(metainfo)}') + + metainfo = parse_pose_metainfo(metainfo) + return metainfo + + def forward(self, + inputs: torch.Tensor, + data_samples: OptSampleList, + mode: str = 'tensor') -> ForwardResults: + """The unified entry for a forward process in both training and test. + + The method should accept three modes: 'tensor', 'predict' and 'loss': + + - 'tensor': Forward the whole network and return tensor or tuple of + tensor without any post-processing, same as a common nn.Module. + - 'predict': Forward and return the predictions, which are fully + processed to a list of :obj:`PoseDataSample`. + - 'loss': Forward and return a dict of losses according to the given + inputs and data samples. + + Note that this method doesn't handle neither back propagation nor + optimizer updating, which are done in the :meth:`train_step`. + + Args: + inputs (torch.Tensor): The input tensor with shape + (N, C, ...) in general + data_samples (list[:obj:`PoseDataSample`], optional): The + annotation of every sample. Defaults to ``None`` + mode (str): Set the forward mode and return value type. Defaults + to ``'tensor'`` + + Returns: + The return type depends on ``mode``. + + - If ``mode='tensor'``, return a tensor or a tuple of tensors + - If ``mode='predict'``, return a list of :obj:``PoseDataSample`` + that contains the pose predictions + - If ``mode='loss'``, return a dict of tensor(s) which is the loss + function value + """ + if isinstance(inputs, list): + inputs = torch.stack(inputs) + if mode == 'loss': + return self.loss(inputs, data_samples) + elif mode == 'predict': + # use customed metainfo to override the default metainfo + if self.metainfo is not None: + for data_sample in data_samples: + data_sample.set_metainfo(self.metainfo) + return self.predict(inputs, data_samples) + elif mode == 'tensor': + return self._forward(inputs) + else: + raise RuntimeError(f'Invalid mode "{mode}". ' + 'Only supports loss, predict and tensor mode.') + + @abstractmethod + def loss(self, inputs: Tensor, data_samples: SampleList) -> dict: + """Calculate losses from a batch of inputs and data samples.""" + + @abstractmethod + def predict(self, inputs: Tensor, data_samples: SampleList) -> SampleList: + """Predict results from a batch of inputs and data samples with post- + processing.""" + + def _forward(self, + inputs: Tensor, + data_samples: OptSampleList = None + ) -> Union[Tensor, Tuple[Tensor]]: + """Network forward process. Usually includes backbone, neck and head + forward without any post-processing. + + Args: + inputs (Tensor): Inputs with shape (N, C, H, W). + + Returns: + Union[Tensor | Tuple[Tensor]]: forward output of the network. + """ + + x = self.extract_feat(inputs) + if self.with_head: + x = self.head.forward(x) + + return x + + def extract_feat(self, inputs: Tensor) -> Tuple[Tensor]: + """Extract features. + + Args: + inputs (Tensor): Image tensor with shape (N, C, H ,W). + + Returns: + tuple[Tensor]: Multi-level features that may have various + resolutions. + """ + x = self.backbone(inputs) + if self.with_neck: + x = self.neck(x) + + return x + + def _load_state_dict_pre_hook(self, state_dict, prefix, local_meta, *args, + **kwargs): + """A hook function to convert old-version state dict of + :class:`TopdownHeatmapSimpleHead` (before MMPose v1.0.0) to a + compatible format of :class:`HeatmapHead`. + + The hook will be automatically registered during initialization. + """ + version = local_meta.get('version', None) + if version and version >= self._version: + return + + # convert old-version state dict + keys = list(state_dict.keys()) + for k in keys: + if 'keypoint_head' in k: + v = state_dict.pop(k) + k = k.replace('keypoint_head', 'head') + state_dict[k] = v diff --git a/mmpose/models/pose_estimators/bottomup.py b/mmpose/models/pose_estimators/bottomup.py index 5400f2478e..8b7a6d8ec2 100644 --- a/mmpose/models/pose_estimators/bottomup.py +++ b/mmpose/models/pose_estimators/bottomup.py @@ -1,178 +1,178 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from itertools import zip_longest -from typing import List, Optional, Union - -from mmengine.utils import is_list_of -from torch import Tensor - -from mmpose.registry import MODELS -from mmpose.utils.typing import (ConfigType, InstanceList, OptConfigType, - OptMultiConfig, PixelDataList, SampleList) -from .base import BasePoseEstimator - - -@MODELS.register_module() -class BottomupPoseEstimator(BasePoseEstimator): - """Base class for bottom-up pose estimators. - - Args: - backbone (dict): The backbone config - neck (dict, optional): The neck config. Defaults to ``None`` - head (dict, optional): The head config. Defaults to ``None`` - train_cfg (dict, optional): The runtime config for training process. - Defaults to ``None`` - test_cfg (dict, optional): The runtime config for testing process. - Defaults to ``None`` - data_preprocessor (dict, optional): The data preprocessing config to - build the instance of :class:`BaseDataPreprocessor`. Defaults to - ``None``. - init_cfg (dict, optional): The config to control the initialization. - Defaults to ``None`` - """ - - def __init__(self, - backbone: ConfigType, - neck: OptConfigType = None, - head: OptConfigType = None, - train_cfg: OptConfigType = None, - test_cfg: OptConfigType = None, - data_preprocessor: OptConfigType = None, - init_cfg: OptMultiConfig = None): - super().__init__( - backbone=backbone, - neck=neck, - head=head, - train_cfg=train_cfg, - test_cfg=test_cfg, - data_preprocessor=data_preprocessor, - init_cfg=init_cfg) - - def loss(self, inputs: Tensor, data_samples: SampleList) -> dict: - """Calculate losses from a batch of inputs and data samples. - - Args: - inputs (Tensor): Inputs with shape (N, C, H, W). - data_samples (List[:obj:`PoseDataSample`]): The batch - data samples. - - Returns: - dict: A dictionary of losses. - """ - feats = self.extract_feat(inputs) - - losses = dict() - - if self.with_head: - losses.update( - self.head.loss(feats, data_samples, train_cfg=self.train_cfg)) - - return losses - - def predict(self, inputs: Union[Tensor, List[Tensor]], - data_samples: SampleList) -> SampleList: - """Predict results from a batch of inputs and data samples with post- - processing. - - Args: - inputs (Tensor | List[Tensor]): Input image in tensor or image - pyramid as a list of tensors. Each tensor is in shape - [B, C, H, W] - data_samples (List[:obj:`PoseDataSample`]): The batch - data samples - - Returns: - list[:obj:`PoseDataSample`]: The pose estimation results of the - input images. The return value is `PoseDataSample` instances with - ``pred_instances`` and ``pred_fields``(optional) field , and - ``pred_instances`` usually contains the following keys: - - - keypoints (Tensor): predicted keypoint coordinates in shape - (num_instances, K, D) where K is the keypoint number and D - is the keypoint dimension - - keypoint_scores (Tensor): predicted keypoint scores in shape - (num_instances, K) - """ - assert self.with_head, ( - 'The model must have head to perform prediction.') - - multiscale_test = self.test_cfg.get('multiscale_test', False) - flip_test = self.test_cfg.get('flip_test', False) - - # enable multi-scale test - aug_scales = data_samples[0].metainfo.get('aug_scales', None) - if multiscale_test: - assert isinstance(aug_scales, list) - assert is_list_of(inputs, Tensor) - # `inputs` includes images in original and augmented scales - assert len(inputs) == len(aug_scales) + 1 - else: - assert isinstance(inputs, Tensor) - # single-scale test - inputs = [inputs] - - feats = [] - for _inputs in inputs: - if flip_test: - _feats_orig = self.extract_feat(_inputs) - _feats_flip = self.extract_feat(_inputs.flip(-1)) - _feats = [_feats_orig, _feats_flip] - else: - _feats = self.extract_feat(_inputs) - - feats.append(_feats) - - if not multiscale_test: - feats = feats[0] - - preds = self.head.predict(feats, data_samples, test_cfg=self.test_cfg) - - if isinstance(preds, tuple): - batch_pred_instances, batch_pred_fields = preds - else: - batch_pred_instances = preds - batch_pred_fields = None - - results = self.add_pred_to_datasample(batch_pred_instances, - batch_pred_fields, data_samples) - - return results - - def add_pred_to_datasample(self, batch_pred_instances: InstanceList, - batch_pred_fields: Optional[PixelDataList], - batch_data_samples: SampleList) -> SampleList: - """Add predictions into data samples. - - Args: - batch_pred_instances (List[InstanceData]): The predicted instances - of the input data batch - batch_pred_fields (List[PixelData], optional): The predicted - fields (e.g. heatmaps) of the input batch - batch_data_samples (List[PoseDataSample]): The input data batch - - Returns: - List[PoseDataSample]: A list of data samples where the predictions - are stored in the ``pred_instances`` field of each data sample. - The length of the list is the batch size when ``merge==False``, or - 1 when ``merge==True``. - """ - assert len(batch_pred_instances) == len(batch_data_samples) - if batch_pred_fields is None: - batch_pred_fields = [] - - for pred_instances, pred_fields, data_sample in zip_longest( - batch_pred_instances, batch_pred_fields, batch_data_samples): - - # convert keypoint coordinates from input space to image space - input_size = data_sample.metainfo['input_size'] - input_center = data_sample.metainfo['input_center'] - input_scale = data_sample.metainfo['input_scale'] - - pred_instances.keypoints = pred_instances.keypoints / input_size \ - * input_scale + input_center - 0.5 * input_scale - - data_sample.pred_instances = pred_instances - - if pred_fields is not None: - data_sample.pred_fields = pred_fields - - return batch_data_samples +# Copyright (c) OpenMMLab. All rights reserved. +from itertools import zip_longest +from typing import List, Optional, Union + +from mmengine.utils import is_list_of +from torch import Tensor + +from mmpose.registry import MODELS +from mmpose.utils.typing import (ConfigType, InstanceList, OptConfigType, + OptMultiConfig, PixelDataList, SampleList) +from .base import BasePoseEstimator + + +@MODELS.register_module() +class BottomupPoseEstimator(BasePoseEstimator): + """Base class for bottom-up pose estimators. + + Args: + backbone (dict): The backbone config + neck (dict, optional): The neck config. Defaults to ``None`` + head (dict, optional): The head config. Defaults to ``None`` + train_cfg (dict, optional): The runtime config for training process. + Defaults to ``None`` + test_cfg (dict, optional): The runtime config for testing process. + Defaults to ``None`` + data_preprocessor (dict, optional): The data preprocessing config to + build the instance of :class:`BaseDataPreprocessor`. Defaults to + ``None``. + init_cfg (dict, optional): The config to control the initialization. + Defaults to ``None`` + """ + + def __init__(self, + backbone: ConfigType, + neck: OptConfigType = None, + head: OptConfigType = None, + train_cfg: OptConfigType = None, + test_cfg: OptConfigType = None, + data_preprocessor: OptConfigType = None, + init_cfg: OptMultiConfig = None): + super().__init__( + backbone=backbone, + neck=neck, + head=head, + train_cfg=train_cfg, + test_cfg=test_cfg, + data_preprocessor=data_preprocessor, + init_cfg=init_cfg) + + def loss(self, inputs: Tensor, data_samples: SampleList) -> dict: + """Calculate losses from a batch of inputs and data samples. + + Args: + inputs (Tensor): Inputs with shape (N, C, H, W). + data_samples (List[:obj:`PoseDataSample`]): The batch + data samples. + + Returns: + dict: A dictionary of losses. + """ + feats = self.extract_feat(inputs) + + losses = dict() + + if self.with_head: + losses.update( + self.head.loss(feats, data_samples, train_cfg=self.train_cfg)) + + return losses + + def predict(self, inputs: Union[Tensor, List[Tensor]], + data_samples: SampleList) -> SampleList: + """Predict results from a batch of inputs and data samples with post- + processing. + + Args: + inputs (Tensor | List[Tensor]): Input image in tensor or image + pyramid as a list of tensors. Each tensor is in shape + [B, C, H, W] + data_samples (List[:obj:`PoseDataSample`]): The batch + data samples + + Returns: + list[:obj:`PoseDataSample`]: The pose estimation results of the + input images. The return value is `PoseDataSample` instances with + ``pred_instances`` and ``pred_fields``(optional) field , and + ``pred_instances`` usually contains the following keys: + + - keypoints (Tensor): predicted keypoint coordinates in shape + (num_instances, K, D) where K is the keypoint number and D + is the keypoint dimension + - keypoint_scores (Tensor): predicted keypoint scores in shape + (num_instances, K) + """ + assert self.with_head, ( + 'The model must have head to perform prediction.') + + multiscale_test = self.test_cfg.get('multiscale_test', False) + flip_test = self.test_cfg.get('flip_test', False) + + # enable multi-scale test + aug_scales = data_samples[0].metainfo.get('aug_scales', None) + if multiscale_test: + assert isinstance(aug_scales, list) + assert is_list_of(inputs, Tensor) + # `inputs` includes images in original and augmented scales + assert len(inputs) == len(aug_scales) + 1 + else: + assert isinstance(inputs, Tensor) + # single-scale test + inputs = [inputs] + + feats = [] + for _inputs in inputs: + if flip_test: + _feats_orig = self.extract_feat(_inputs) + _feats_flip = self.extract_feat(_inputs.flip(-1)) + _feats = [_feats_orig, _feats_flip] + else: + _feats = self.extract_feat(_inputs) + + feats.append(_feats) + + if not multiscale_test: + feats = feats[0] + + preds = self.head.predict(feats, data_samples, test_cfg=self.test_cfg) + + if isinstance(preds, tuple): + batch_pred_instances, batch_pred_fields = preds + else: + batch_pred_instances = preds + batch_pred_fields = None + + results = self.add_pred_to_datasample(batch_pred_instances, + batch_pred_fields, data_samples) + + return results + + def add_pred_to_datasample(self, batch_pred_instances: InstanceList, + batch_pred_fields: Optional[PixelDataList], + batch_data_samples: SampleList) -> SampleList: + """Add predictions into data samples. + + Args: + batch_pred_instances (List[InstanceData]): The predicted instances + of the input data batch + batch_pred_fields (List[PixelData], optional): The predicted + fields (e.g. heatmaps) of the input batch + batch_data_samples (List[PoseDataSample]): The input data batch + + Returns: + List[PoseDataSample]: A list of data samples where the predictions + are stored in the ``pred_instances`` field of each data sample. + The length of the list is the batch size when ``merge==False``, or + 1 when ``merge==True``. + """ + assert len(batch_pred_instances) == len(batch_data_samples) + if batch_pred_fields is None: + batch_pred_fields = [] + + for pred_instances, pred_fields, data_sample in zip_longest( + batch_pred_instances, batch_pred_fields, batch_data_samples): + + # convert keypoint coordinates from input space to image space + input_size = data_sample.metainfo['input_size'] + input_center = data_sample.metainfo['input_center'] + input_scale = data_sample.metainfo['input_scale'] + + pred_instances.keypoints = pred_instances.keypoints / input_size \ + * input_scale + input_center - 0.5 * input_scale + + data_sample.pred_instances = pred_instances + + if pred_fields is not None: + data_sample.pred_fields = pred_fields + + return batch_data_samples diff --git a/mmpose/models/pose_estimators/pose_lifter.py b/mmpose/models/pose_estimators/pose_lifter.py index 5bad3dde3c..5069b8736a 100644 --- a/mmpose/models/pose_estimators/pose_lifter.py +++ b/mmpose/models/pose_estimators/pose_lifter.py @@ -1,340 +1,340 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from itertools import zip_longest -from typing import Tuple, Union - -from torch import Tensor - -from mmpose.models.utils import check_and_update_config -from mmpose.registry import MODELS -from mmpose.utils.typing import (ConfigType, InstanceList, OptConfigType, - Optional, OptMultiConfig, OptSampleList, - PixelDataList, SampleList) -from .base import BasePoseEstimator - - -@MODELS.register_module() -class PoseLifter(BasePoseEstimator): - """Base class for pose lifter. - - Args: - backbone (dict): The backbone config - neck (dict, optional): The neck config. Defaults to ``None`` - head (dict, optional): The head config. Defaults to ``None`` - traj_backbone (dict, optional): The backbone config for trajectory - model. Defaults to ``None`` - traj_neck (dict, optional): The neck config for trajectory model. - Defaults to ``None`` - traj_head (dict, optional): The head config for trajectory model. - Defaults to ``None`` - semi_loss (dict, optional): The semi-supervised loss config. - Defaults to ``None`` - train_cfg (dict, optional): The runtime config for training process. - Defaults to ``None`` - test_cfg (dict, optional): The runtime config for testing process. - Defaults to ``None`` - data_preprocessor (dict, optional): The data preprocessing config to - build the instance of :class:`BaseDataPreprocessor`. Defaults to - ``None`` - init_cfg (dict, optional): The config to control the initialization. - Defaults to ``None`` - metainfo (dict): Meta information for dataset, such as keypoints - definition and properties. If set, the metainfo of the input data - batch will be overridden. For more details, please refer to - https://mmpose.readthedocs.io/en/latest/user_guides/ - prepare_datasets.html#create-a-custom-dataset-info- - config-file-for-the-dataset. Defaults to ``None`` - """ - - def __init__(self, - backbone: ConfigType, - neck: OptConfigType = None, - head: OptConfigType = None, - traj_backbone: OptConfigType = None, - traj_neck: OptConfigType = None, - traj_head: OptConfigType = None, - semi_loss: OptConfigType = None, - train_cfg: OptConfigType = None, - test_cfg: OptConfigType = None, - data_preprocessor: OptConfigType = None, - init_cfg: OptMultiConfig = None, - metainfo: Optional[dict] = None): - super().__init__( - backbone=backbone, - neck=neck, - head=head, - train_cfg=train_cfg, - test_cfg=test_cfg, - data_preprocessor=data_preprocessor, - init_cfg=init_cfg, - metainfo=metainfo) - - # trajectory model - self.share_backbone = False - if traj_head is not None: - if traj_backbone is not None: - self.traj_backbone = MODELS.build(traj_backbone) - else: - self.share_backbone = True - - # the PR #2108 and #2126 modified the interface of neck and head. - # The following function automatically detects outdated - # configurations and updates them accordingly, while also providing - # clear and concise information on the changes made. - traj_neck, traj_head = check_and_update_config( - traj_neck, traj_head) - - if traj_neck is not None: - self.traj_neck = MODELS.build(traj_neck) - - self.traj_head = MODELS.build(traj_head) - - # semi-supervised loss - self.semi_supervised = semi_loss is not None - if self.semi_supervised: - assert any([head, traj_head]) - self.semi_loss = MODELS.build(semi_loss) - - @property - def with_traj_backbone(self): - """bool: Whether the pose lifter has trajectory backbone.""" - return hasattr(self, 'traj_backbone') and \ - self.traj_backbone is not None - - @property - def with_traj_neck(self): - """bool: Whether the pose lifter has trajectory neck.""" - return hasattr(self, 'traj_neck') and self.traj_neck is not None - - @property - def with_traj(self): - """bool: Whether the pose lifter has trajectory head.""" - return hasattr(self, 'traj_head') - - @property - def causal(self): - """bool: Whether the pose lifter is causal.""" - if hasattr(self.backbone, 'causal'): - return self.backbone.causal - else: - raise AttributeError('A PoseLifter\'s backbone should have ' - 'the bool attribute "causal" to indicate if' - 'it performs causal inference.') - - def extract_feat(self, inputs: Tensor) -> Tuple[Tensor]: - """Extract features. - - Args: - inputs (Tensor): Image tensor with shape (N, K, C, T). - - Returns: - tuple[Tensor]: Multi-level features that may have various - resolutions. - """ - # supervised learning - # pose model - feats = self.backbone(inputs) - if self.with_neck: - feats = self.neck(feats) - - # trajectory model - if self.with_traj: - if self.share_backbone: - traj_x = feats - else: - traj_x = self.traj_backbone(inputs) - - if self.with_traj_neck: - traj_x = self.traj_neck(traj_x) - return feats, traj_x - else: - return feats - - def _forward(self, - inputs: Tensor, - data_samples: OptSampleList = None - ) -> Union[Tensor, Tuple[Tensor]]: - """Network forward process. Usually includes backbone, neck and head - forward without any post-processing. - - Args: - inputs (Tensor): Inputs with shape (N, K, C, T). - - Returns: - Union[Tensor | Tuple[Tensor]]: forward output of the network. - """ - feats = self.extract_feat(inputs) - - if self.with_traj: - # forward with trajectory model - x, traj_x = feats - if self.with_head: - x = self.head.forward(x) - - traj_x = self.traj_head.forward(traj_x) - return x, traj_x - else: - # forward without trajectory model - x = feats - if self.with_head: - x = self.head.forward(x) - return x - - def loss(self, inputs: Tensor, data_samples: SampleList) -> dict: - """Calculate losses from a batch of inputs and data samples. - - Args: - inputs (Tensor): Inputs with shape (N, K, C, T). - data_samples (List[:obj:`PoseDataSample`]): The batch - data samples. - - Returns: - dict: A dictionary of losses. - """ - feats = self.extract_feat(inputs) - - losses = {} - - if self.with_traj: - x, traj_x = feats - # loss of trajectory model - losses.update( - self.traj_head.loss( - traj_x, data_samples, train_cfg=self.train_cfg)) - else: - x = feats - - if self.with_head: - # loss of pose model - losses.update( - self.head.loss(x, data_samples, train_cfg=self.train_cfg)) - - # TODO: support semi-supervised learning - if self.semi_supervised: - losses.update(semi_loss=self.semi_loss(inputs, data_samples)) - - return losses - - def predict(self, inputs: Tensor, data_samples: SampleList) -> SampleList: - """Predict results from a batch of inputs and data samples with post- - processing. - - Note: - - batch_size: B - - num_input_keypoints: K - - input_keypoint_dim: C - - input_sequence_len: T - - Args: - inputs (Tensor): Inputs with shape like (B, K, C, T). - data_samples (List[:obj:`PoseDataSample`]): The batch - data samples - - Returns: - list[:obj:`PoseDataSample`]: The pose estimation results of the - input images. The return value is `PoseDataSample` instances with - ``pred_instances`` and ``pred_fields``(optional) field , and - ``pred_instances`` usually contains the following keys: - - - keypoints (Tensor): predicted keypoint coordinates in shape - (num_instances, K, D) where K is the keypoint number and D - is the keypoint dimension - - keypoint_scores (Tensor): predicted keypoint scores in shape - (num_instances, K) - """ - assert self.with_head, ( - 'The model must have head to perform prediction.') - - feats = self.extract_feat(inputs) - - pose_preds, batch_pred_instances, batch_pred_fields = None, None, None - traj_preds, batch_traj_instances, batch_traj_fields = None, None, None - if self.with_traj: - x, traj_x = feats - traj_preds = self.traj_head.predict( - traj_x, data_samples, test_cfg=self.test_cfg) - else: - x = feats - - if self.with_head: - pose_preds = self.head.predict( - x, data_samples, test_cfg=self.test_cfg) - - if isinstance(pose_preds, tuple): - batch_pred_instances, batch_pred_fields = pose_preds - else: - batch_pred_instances = pose_preds - - if isinstance(traj_preds, tuple): - batch_traj_instances, batch_traj_fields = traj_preds - else: - batch_traj_instances = traj_preds - - results = self.add_pred_to_datasample(batch_pred_instances, - batch_pred_fields, - batch_traj_instances, - batch_traj_fields, data_samples) - - return results - - def add_pred_to_datasample( - self, - batch_pred_instances: InstanceList, - batch_pred_fields: Optional[PixelDataList], - batch_traj_instances: InstanceList, - batch_traj_fields: Optional[PixelDataList], - batch_data_samples: SampleList, - ) -> SampleList: - """Add predictions into data samples. - - Args: - batch_pred_instances (List[InstanceData]): The predicted instances - of the input data batch - batch_pred_fields (List[PixelData], optional): The predicted - fields (e.g. heatmaps) of the input batch - batch_traj_instances (List[InstanceData]): The predicted instances - of the input data batch - batch_traj_fields (List[PixelData], optional): The predicted - fields (e.g. heatmaps) of the input batch - batch_data_samples (List[PoseDataSample]): The input data batch - - Returns: - List[PoseDataSample]: A list of data samples where the predictions - are stored in the ``pred_instances`` field of each data sample. - """ - assert len(batch_pred_instances) == len(batch_data_samples) - if batch_pred_fields is None: - batch_pred_fields, batch_traj_fields = [], [] - if batch_traj_instances is None: - batch_traj_instances = [] - output_keypoint_indices = self.test_cfg.get('output_keypoint_indices', - None) - - for (pred_instances, pred_fields, traj_instances, traj_fields, - data_sample) in zip_longest(batch_pred_instances, - batch_pred_fields, - batch_traj_instances, - batch_traj_fields, - batch_data_samples): - - if output_keypoint_indices is not None: - # select output keypoints with given indices - num_keypoints = pred_instances.keypoints.shape[1] - for key, value in pred_instances.all_items(): - if key.startswith('keypoint'): - pred_instances.set_field( - value[:, output_keypoint_indices], key) - - data_sample.pred_instances = pred_instances - - if pred_fields is not None: - if output_keypoint_indices is not None: - # select output heatmap channels with keypoint indices - # when the number of heatmap channel matches num_keypoints - for key, value in pred_fields.all_items(): - if value.shape[0] != num_keypoints: - continue - pred_fields.set_field(value[output_keypoint_indices], - key) - data_sample.pred_fields = pred_fields - - return batch_data_samples +# Copyright (c) OpenMMLab. All rights reserved. +from itertools import zip_longest +from typing import Tuple, Union + +from torch import Tensor + +from mmpose.models.utils import check_and_update_config +from mmpose.registry import MODELS +from mmpose.utils.typing import (ConfigType, InstanceList, OptConfigType, + Optional, OptMultiConfig, OptSampleList, + PixelDataList, SampleList) +from .base import BasePoseEstimator + + +@MODELS.register_module() +class PoseLifter(BasePoseEstimator): + """Base class for pose lifter. + + Args: + backbone (dict): The backbone config + neck (dict, optional): The neck config. Defaults to ``None`` + head (dict, optional): The head config. Defaults to ``None`` + traj_backbone (dict, optional): The backbone config for trajectory + model. Defaults to ``None`` + traj_neck (dict, optional): The neck config for trajectory model. + Defaults to ``None`` + traj_head (dict, optional): The head config for trajectory model. + Defaults to ``None`` + semi_loss (dict, optional): The semi-supervised loss config. + Defaults to ``None`` + train_cfg (dict, optional): The runtime config for training process. + Defaults to ``None`` + test_cfg (dict, optional): The runtime config for testing process. + Defaults to ``None`` + data_preprocessor (dict, optional): The data preprocessing config to + build the instance of :class:`BaseDataPreprocessor`. Defaults to + ``None`` + init_cfg (dict, optional): The config to control the initialization. + Defaults to ``None`` + metainfo (dict): Meta information for dataset, such as keypoints + definition and properties. If set, the metainfo of the input data + batch will be overridden. For more details, please refer to + https://mmpose.readthedocs.io/en/latest/user_guides/ + prepare_datasets.html#create-a-custom-dataset-info- + config-file-for-the-dataset. Defaults to ``None`` + """ + + def __init__(self, + backbone: ConfigType, + neck: OptConfigType = None, + head: OptConfigType = None, + traj_backbone: OptConfigType = None, + traj_neck: OptConfigType = None, + traj_head: OptConfigType = None, + semi_loss: OptConfigType = None, + train_cfg: OptConfigType = None, + test_cfg: OptConfigType = None, + data_preprocessor: OptConfigType = None, + init_cfg: OptMultiConfig = None, + metainfo: Optional[dict] = None): + super().__init__( + backbone=backbone, + neck=neck, + head=head, + train_cfg=train_cfg, + test_cfg=test_cfg, + data_preprocessor=data_preprocessor, + init_cfg=init_cfg, + metainfo=metainfo) + + # trajectory model + self.share_backbone = False + if traj_head is not None: + if traj_backbone is not None: + self.traj_backbone = MODELS.build(traj_backbone) + else: + self.share_backbone = True + + # the PR #2108 and #2126 modified the interface of neck and head. + # The following function automatically detects outdated + # configurations and updates them accordingly, while also providing + # clear and concise information on the changes made. + traj_neck, traj_head = check_and_update_config( + traj_neck, traj_head) + + if traj_neck is not None: + self.traj_neck = MODELS.build(traj_neck) + + self.traj_head = MODELS.build(traj_head) + + # semi-supervised loss + self.semi_supervised = semi_loss is not None + if self.semi_supervised: + assert any([head, traj_head]) + self.semi_loss = MODELS.build(semi_loss) + + @property + def with_traj_backbone(self): + """bool: Whether the pose lifter has trajectory backbone.""" + return hasattr(self, 'traj_backbone') and \ + self.traj_backbone is not None + + @property + def with_traj_neck(self): + """bool: Whether the pose lifter has trajectory neck.""" + return hasattr(self, 'traj_neck') and self.traj_neck is not None + + @property + def with_traj(self): + """bool: Whether the pose lifter has trajectory head.""" + return hasattr(self, 'traj_head') + + @property + def causal(self): + """bool: Whether the pose lifter is causal.""" + if hasattr(self.backbone, 'causal'): + return self.backbone.causal + else: + raise AttributeError('A PoseLifter\'s backbone should have ' + 'the bool attribute "causal" to indicate if' + 'it performs causal inference.') + + def extract_feat(self, inputs: Tensor) -> Tuple[Tensor]: + """Extract features. + + Args: + inputs (Tensor): Image tensor with shape (N, K, C, T). + + Returns: + tuple[Tensor]: Multi-level features that may have various + resolutions. + """ + # supervised learning + # pose model + feats = self.backbone(inputs) + if self.with_neck: + feats = self.neck(feats) + + # trajectory model + if self.with_traj: + if self.share_backbone: + traj_x = feats + else: + traj_x = self.traj_backbone(inputs) + + if self.with_traj_neck: + traj_x = self.traj_neck(traj_x) + return feats, traj_x + else: + return feats + + def _forward(self, + inputs: Tensor, + data_samples: OptSampleList = None + ) -> Union[Tensor, Tuple[Tensor]]: + """Network forward process. Usually includes backbone, neck and head + forward without any post-processing. + + Args: + inputs (Tensor): Inputs with shape (N, K, C, T). + + Returns: + Union[Tensor | Tuple[Tensor]]: forward output of the network. + """ + feats = self.extract_feat(inputs) + + if self.with_traj: + # forward with trajectory model + x, traj_x = feats + if self.with_head: + x = self.head.forward(x) + + traj_x = self.traj_head.forward(traj_x) + return x, traj_x + else: + # forward without trajectory model + x = feats + if self.with_head: + x = self.head.forward(x) + return x + + def loss(self, inputs: Tensor, data_samples: SampleList) -> dict: + """Calculate losses from a batch of inputs and data samples. + + Args: + inputs (Tensor): Inputs with shape (N, K, C, T). + data_samples (List[:obj:`PoseDataSample`]): The batch + data samples. + + Returns: + dict: A dictionary of losses. + """ + feats = self.extract_feat(inputs) + + losses = {} + + if self.with_traj: + x, traj_x = feats + # loss of trajectory model + losses.update( + self.traj_head.loss( + traj_x, data_samples, train_cfg=self.train_cfg)) + else: + x = feats + + if self.with_head: + # loss of pose model + losses.update( + self.head.loss(x, data_samples, train_cfg=self.train_cfg)) + + # TODO: support semi-supervised learning + if self.semi_supervised: + losses.update(semi_loss=self.semi_loss(inputs, data_samples)) + + return losses + + def predict(self, inputs: Tensor, data_samples: SampleList) -> SampleList: + """Predict results from a batch of inputs and data samples with post- + processing. + + Note: + - batch_size: B + - num_input_keypoints: K + - input_keypoint_dim: C + - input_sequence_len: T + + Args: + inputs (Tensor): Inputs with shape like (B, K, C, T). + data_samples (List[:obj:`PoseDataSample`]): The batch + data samples + + Returns: + list[:obj:`PoseDataSample`]: The pose estimation results of the + input images. The return value is `PoseDataSample` instances with + ``pred_instances`` and ``pred_fields``(optional) field , and + ``pred_instances`` usually contains the following keys: + + - keypoints (Tensor): predicted keypoint coordinates in shape + (num_instances, K, D) where K is the keypoint number and D + is the keypoint dimension + - keypoint_scores (Tensor): predicted keypoint scores in shape + (num_instances, K) + """ + assert self.with_head, ( + 'The model must have head to perform prediction.') + + feats = self.extract_feat(inputs) + + pose_preds, batch_pred_instances, batch_pred_fields = None, None, None + traj_preds, batch_traj_instances, batch_traj_fields = None, None, None + if self.with_traj: + x, traj_x = feats + traj_preds = self.traj_head.predict( + traj_x, data_samples, test_cfg=self.test_cfg) + else: + x = feats + + if self.with_head: + pose_preds = self.head.predict( + x, data_samples, test_cfg=self.test_cfg) + + if isinstance(pose_preds, tuple): + batch_pred_instances, batch_pred_fields = pose_preds + else: + batch_pred_instances = pose_preds + + if isinstance(traj_preds, tuple): + batch_traj_instances, batch_traj_fields = traj_preds + else: + batch_traj_instances = traj_preds + + results = self.add_pred_to_datasample(batch_pred_instances, + batch_pred_fields, + batch_traj_instances, + batch_traj_fields, data_samples) + + return results + + def add_pred_to_datasample( + self, + batch_pred_instances: InstanceList, + batch_pred_fields: Optional[PixelDataList], + batch_traj_instances: InstanceList, + batch_traj_fields: Optional[PixelDataList], + batch_data_samples: SampleList, + ) -> SampleList: + """Add predictions into data samples. + + Args: + batch_pred_instances (List[InstanceData]): The predicted instances + of the input data batch + batch_pred_fields (List[PixelData], optional): The predicted + fields (e.g. heatmaps) of the input batch + batch_traj_instances (List[InstanceData]): The predicted instances + of the input data batch + batch_traj_fields (List[PixelData], optional): The predicted + fields (e.g. heatmaps) of the input batch + batch_data_samples (List[PoseDataSample]): The input data batch + + Returns: + List[PoseDataSample]: A list of data samples where the predictions + are stored in the ``pred_instances`` field of each data sample. + """ + assert len(batch_pred_instances) == len(batch_data_samples) + if batch_pred_fields is None: + batch_pred_fields, batch_traj_fields = [], [] + if batch_traj_instances is None: + batch_traj_instances = [] + output_keypoint_indices = self.test_cfg.get('output_keypoint_indices', + None) + + for (pred_instances, pred_fields, traj_instances, traj_fields, + data_sample) in zip_longest(batch_pred_instances, + batch_pred_fields, + batch_traj_instances, + batch_traj_fields, + batch_data_samples): + + if output_keypoint_indices is not None: + # select output keypoints with given indices + num_keypoints = pred_instances.keypoints.shape[1] + for key, value in pred_instances.all_items(): + if key.startswith('keypoint'): + pred_instances.set_field( + value[:, output_keypoint_indices], key) + + data_sample.pred_instances = pred_instances + + if pred_fields is not None: + if output_keypoint_indices is not None: + # select output heatmap channels with keypoint indices + # when the number of heatmap channel matches num_keypoints + for key, value in pred_fields.all_items(): + if value.shape[0] != num_keypoints: + continue + pred_fields.set_field(value[output_keypoint_indices], + key) + data_sample.pred_fields = pred_fields + + return batch_data_samples diff --git a/mmpose/models/pose_estimators/topdown.py b/mmpose/models/pose_estimators/topdown.py index 89b332893f..2ceb79fcb8 100644 --- a/mmpose/models/pose_estimators/topdown.py +++ b/mmpose/models/pose_estimators/topdown.py @@ -1,182 +1,182 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from itertools import zip_longest -from typing import Optional - -from torch import Tensor - -from mmpose.registry import MODELS -from mmpose.utils.typing import (ConfigType, InstanceList, OptConfigType, - OptMultiConfig, PixelDataList, SampleList) -from .base import BasePoseEstimator - - -@MODELS.register_module() -class TopdownPoseEstimator(BasePoseEstimator): - """Base class for top-down pose estimators. - - Args: - backbone (dict): The backbone config - neck (dict, optional): The neck config. Defaults to ``None`` - head (dict, optional): The head config. Defaults to ``None`` - train_cfg (dict, optional): The runtime config for training process. - Defaults to ``None`` - test_cfg (dict, optional): The runtime config for testing process. - Defaults to ``None`` - data_preprocessor (dict, optional): The data preprocessing config to - build the instance of :class:`BaseDataPreprocessor`. Defaults to - ``None`` - init_cfg (dict, optional): The config to control the initialization. - Defaults to ``None`` - metainfo (dict): Meta information for dataset, such as keypoints - definition and properties. If set, the metainfo of the input data - batch will be overridden. For more details, please refer to - https://mmpose.readthedocs.io/en/latest/user_guides/ - prepare_datasets.html#create-a-custom-dataset-info- - config-file-for-the-dataset. Defaults to ``None`` - """ - - def __init__(self, - backbone: ConfigType, - neck: OptConfigType = None, - head: OptConfigType = None, - train_cfg: OptConfigType = None, - test_cfg: OptConfigType = None, - data_preprocessor: OptConfigType = None, - init_cfg: OptMultiConfig = None, - metainfo: Optional[dict] = None): - super().__init__( - backbone=backbone, - neck=neck, - head=head, - train_cfg=train_cfg, - test_cfg=test_cfg, - data_preprocessor=data_preprocessor, - init_cfg=init_cfg, - metainfo=metainfo) - - def loss(self, inputs: Tensor, data_samples: SampleList) -> dict: - """Calculate losses from a batch of inputs and data samples. - - Args: - inputs (Tensor): Inputs with shape (N, C, H, W). - data_samples (List[:obj:`PoseDataSample`]): The batch - data samples. - - Returns: - dict: A dictionary of losses. - """ - feats = self.extract_feat(inputs) - - losses = dict() - - if self.with_head: - losses.update( - self.head.loss(feats, data_samples, train_cfg=self.train_cfg)) - - return losses - - def predict(self, inputs: Tensor, data_samples: SampleList) -> SampleList: - """Predict results from a batch of inputs and data samples with post- - processing. - - Args: - inputs (Tensor): Inputs with shape (N, C, H, W) - data_samples (List[:obj:`PoseDataSample`]): The batch - data samples - - Returns: - list[:obj:`PoseDataSample`]: The pose estimation results of the - input images. The return value is `PoseDataSample` instances with - ``pred_instances`` and ``pred_fields``(optional) field , and - ``pred_instances`` usually contains the following keys: - - - keypoints (Tensor): predicted keypoint coordinates in shape - (num_instances, K, D) where K is the keypoint number and D - is the keypoint dimension - - keypoint_scores (Tensor): predicted keypoint scores in shape - (num_instances, K) - """ - assert self.with_head, ( - 'The model must have head to perform prediction.') - - if self.test_cfg.get('flip_test', False): - _feats = self.extract_feat(inputs) - _feats_flip = self.extract_feat(inputs.flip(-1)) - feats = [_feats, _feats_flip] - else: - feats = self.extract_feat(inputs) - - preds = self.head.predict(feats, data_samples, test_cfg=self.test_cfg) - - if isinstance(preds, tuple): - batch_pred_instances, batch_pred_fields = preds - else: - batch_pred_instances = preds - batch_pred_fields = None - - results = self.add_pred_to_datasample(batch_pred_instances, - batch_pred_fields, data_samples) - - return results - - def add_pred_to_datasample(self, batch_pred_instances: InstanceList, - batch_pred_fields: Optional[PixelDataList], - batch_data_samples: SampleList) -> SampleList: - """Add predictions into data samples. - - Args: - batch_pred_instances (List[InstanceData]): The predicted instances - of the input data batch - batch_pred_fields (List[PixelData], optional): The predicted - fields (e.g. heatmaps) of the input batch - batch_data_samples (List[PoseDataSample]): The input data batch - - Returns: - List[PoseDataSample]: A list of data samples where the predictions - are stored in the ``pred_instances`` field of each data sample. - """ - assert len(batch_pred_instances) == len(batch_data_samples) - if batch_pred_fields is None: - batch_pred_fields = [] - output_keypoint_indices = self.test_cfg.get('output_keypoint_indices', - None) - - for pred_instances, pred_fields, data_sample in zip_longest( - batch_pred_instances, batch_pred_fields, batch_data_samples): - - gt_instances = data_sample.gt_instances - - # convert keypoint coordinates from input space to image space - bbox_centers = gt_instances.bbox_centers - bbox_scales = gt_instances.bbox_scales - input_size = data_sample.metainfo['input_size'] - - pred_instances.keypoints = pred_instances.keypoints / input_size \ - * bbox_scales + bbox_centers - 0.5 * bbox_scales - - if output_keypoint_indices is not None: - # select output keypoints with given indices - num_keypoints = pred_instances.keypoints.shape[1] - for key, value in pred_instances.all_items(): - if key.startswith('keypoint'): - pred_instances.set_field( - value[:, output_keypoint_indices], key) - - # add bbox information into pred_instances - pred_instances.bboxes = gt_instances.bboxes - pred_instances.bbox_scores = gt_instances.bbox_scores - - data_sample.pred_instances = pred_instances - - if pred_fields is not None: - if output_keypoint_indices is not None: - # select output heatmap channels with keypoint indices - # when the number of heatmap channel matches num_keypoints - for key, value in pred_fields.all_items(): - if value.shape[0] != num_keypoints: - continue - pred_fields.set_field(value[output_keypoint_indices], - key) - data_sample.pred_fields = pred_fields - - return batch_data_samples +# Copyright (c) OpenMMLab. All rights reserved. +from itertools import zip_longest +from typing import Optional + +from torch import Tensor + +from mmpose.registry import MODELS +from mmpose.utils.typing import (ConfigType, InstanceList, OptConfigType, + OptMultiConfig, PixelDataList, SampleList) +from .base import BasePoseEstimator + + +@MODELS.register_module() +class TopdownPoseEstimator(BasePoseEstimator): + """Base class for top-down pose estimators. + + Args: + backbone (dict): The backbone config + neck (dict, optional): The neck config. Defaults to ``None`` + head (dict, optional): The head config. Defaults to ``None`` + train_cfg (dict, optional): The runtime config for training process. + Defaults to ``None`` + test_cfg (dict, optional): The runtime config for testing process. + Defaults to ``None`` + data_preprocessor (dict, optional): The data preprocessing config to + build the instance of :class:`BaseDataPreprocessor`. Defaults to + ``None`` + init_cfg (dict, optional): The config to control the initialization. + Defaults to ``None`` + metainfo (dict): Meta information for dataset, such as keypoints + definition and properties. If set, the metainfo of the input data + batch will be overridden. For more details, please refer to + https://mmpose.readthedocs.io/en/latest/user_guides/ + prepare_datasets.html#create-a-custom-dataset-info- + config-file-for-the-dataset. Defaults to ``None`` + """ + + def __init__(self, + backbone: ConfigType, + neck: OptConfigType = None, + head: OptConfigType = None, + train_cfg: OptConfigType = None, + test_cfg: OptConfigType = None, + data_preprocessor: OptConfigType = None, + init_cfg: OptMultiConfig = None, + metainfo: Optional[dict] = None): + super().__init__( + backbone=backbone, + neck=neck, + head=head, + train_cfg=train_cfg, + test_cfg=test_cfg, + data_preprocessor=data_preprocessor, + init_cfg=init_cfg, + metainfo=metainfo) + + def loss(self, inputs: Tensor, data_samples: SampleList) -> dict: + """Calculate losses from a batch of inputs and data samples. + + Args: + inputs (Tensor): Inputs with shape (N, C, H, W). + data_samples (List[:obj:`PoseDataSample`]): The batch + data samples. + + Returns: + dict: A dictionary of losses. + """ + feats = self.extract_feat(inputs) + + losses = dict() + + if self.with_head: + losses.update( + self.head.loss(feats, data_samples, train_cfg=self.train_cfg)) + + return losses + + def predict(self, inputs: Tensor, data_samples: SampleList) -> SampleList: + """Predict results from a batch of inputs and data samples with post- + processing. + + Args: + inputs (Tensor): Inputs with shape (N, C, H, W) + data_samples (List[:obj:`PoseDataSample`]): The batch + data samples + + Returns: + list[:obj:`PoseDataSample`]: The pose estimation results of the + input images. The return value is `PoseDataSample` instances with + ``pred_instances`` and ``pred_fields``(optional) field , and + ``pred_instances`` usually contains the following keys: + + - keypoints (Tensor): predicted keypoint coordinates in shape + (num_instances, K, D) where K is the keypoint number and D + is the keypoint dimension + - keypoint_scores (Tensor): predicted keypoint scores in shape + (num_instances, K) + """ + assert self.with_head, ( + 'The model must have head to perform prediction.') + + if self.test_cfg.get('flip_test', False): + _feats = self.extract_feat(inputs) + _feats_flip = self.extract_feat(inputs.flip(-1)) + feats = [_feats, _feats_flip] + else: + feats = self.extract_feat(inputs) + + preds = self.head.predict(feats, data_samples, test_cfg=self.test_cfg) + + if isinstance(preds, tuple): + batch_pred_instances, batch_pred_fields = preds + else: + batch_pred_instances = preds + batch_pred_fields = None + + results = self.add_pred_to_datasample(batch_pred_instances, + batch_pred_fields, data_samples) + + return results + + def add_pred_to_datasample(self, batch_pred_instances: InstanceList, + batch_pred_fields: Optional[PixelDataList], + batch_data_samples: SampleList) -> SampleList: + """Add predictions into data samples. + + Args: + batch_pred_instances (List[InstanceData]): The predicted instances + of the input data batch + batch_pred_fields (List[PixelData], optional): The predicted + fields (e.g. heatmaps) of the input batch + batch_data_samples (List[PoseDataSample]): The input data batch + + Returns: + List[PoseDataSample]: A list of data samples where the predictions + are stored in the ``pred_instances`` field of each data sample. + """ + assert len(batch_pred_instances) == len(batch_data_samples) + if batch_pred_fields is None: + batch_pred_fields = [] + output_keypoint_indices = self.test_cfg.get('output_keypoint_indices', + None) + + for pred_instances, pred_fields, data_sample in zip_longest( + batch_pred_instances, batch_pred_fields, batch_data_samples): + + gt_instances = data_sample.gt_instances + + # convert keypoint coordinates from input space to image space + bbox_centers = gt_instances.bbox_centers + bbox_scales = gt_instances.bbox_scales + input_size = data_sample.metainfo['input_size'] + + pred_instances.keypoints = pred_instances.keypoints / input_size \ + * bbox_scales + bbox_centers - 0.5 * bbox_scales + + if output_keypoint_indices is not None: + # select output keypoints with given indices + num_keypoints = pred_instances.keypoints.shape[1] + for key, value in pred_instances.all_items(): + if key.startswith('keypoint'): + pred_instances.set_field( + value[:, output_keypoint_indices], key) + + # add bbox information into pred_instances + pred_instances.bboxes = gt_instances.bboxes + pred_instances.bbox_scores = gt_instances.bbox_scores + + data_sample.pred_instances = pred_instances + + if pred_fields is not None: + if output_keypoint_indices is not None: + # select output heatmap channels with keypoint indices + # when the number of heatmap channel matches num_keypoints + for key, value in pred_fields.all_items(): + if value.shape[0] != num_keypoints: + continue + pred_fields.set_field(value[output_keypoint_indices], + key) + data_sample.pred_fields = pred_fields + + return batch_data_samples diff --git a/mmpose/models/utils/__init__.py b/mmpose/models/utils/__init__.py index 22d8a89b41..5d03bafa4c 100644 --- a/mmpose/models/utils/__init__.py +++ b/mmpose/models/utils/__init__.py @@ -1,10 +1,10 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .check_and_update_config import check_and_update_config -from .ckpt_convert import pvt_convert -from .rtmcc_block import RTMCCBlock, rope -from .transformer import PatchEmbed, nchw_to_nlc, nlc_to_nchw - -__all__ = [ - 'PatchEmbed', 'nchw_to_nlc', 'nlc_to_nchw', 'pvt_convert', 'RTMCCBlock', - 'rope', 'check_and_update_config' -] +# Copyright (c) OpenMMLab. All rights reserved. +from .check_and_update_config import check_and_update_config +from .ckpt_convert import pvt_convert +from .rtmcc_block import RTMCCBlock, rope +from .transformer import PatchEmbed, nchw_to_nlc, nlc_to_nchw + +__all__ = [ + 'PatchEmbed', 'nchw_to_nlc', 'nlc_to_nchw', 'pvt_convert', 'RTMCCBlock', + 'rope', 'check_and_update_config' +] diff --git a/mmpose/models/utils/check_and_update_config.py b/mmpose/models/utils/check_and_update_config.py index 4cd1efa39b..3156151948 100644 --- a/mmpose/models/utils/check_and_update_config.py +++ b/mmpose/models/utils/check_and_update_config.py @@ -1,230 +1,230 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import Dict, Optional, Tuple, Union - -from mmengine.config import Config, ConfigDict -from mmengine.dist import master_only -from mmengine.logging import MMLogger - -ConfigType = Union[Config, ConfigDict] - - -def process_input_transform(input_transform: str, head: Dict, head_new: Dict, - head_deleted_dict: Dict, head_append_dict: Dict, - neck_new: Dict, input_index: Tuple[int], - align_corners: bool) -> None: - """Process the input_transform field and update head and neck - dictionaries.""" - if input_transform == 'resize_concat': - in_channels = head_new.pop('in_channels') - head_deleted_dict['in_channels'] = str(in_channels) - in_channels = sum([in_channels[i] for i in input_index]) - head_new['in_channels'] = in_channels - head_append_dict['in_channels'] = str(in_channels) - - neck_new.update( - dict( - type='FeatureMapProcessor', - concat=True, - select_index=input_index, - )) - if align_corners: - neck_new['align_corners'] = align_corners - - elif input_transform == 'select': - if input_index != (-1, ): - neck_new.update( - dict(type='FeatureMapProcessor', select_index=input_index)) - if isinstance(head['in_channels'], tuple): - in_channels = head_new.pop('in_channels') - head_deleted_dict['in_channels'] = str(in_channels) - if isinstance(input_index, int): - in_channels = in_channels[input_index] - else: - in_channels = tuple([in_channels[i] for i in input_index]) - head_new['in_channels'] = in_channels - head_append_dict['in_channels'] = str(in_channels) - if align_corners: - neck_new['align_corners'] = align_corners - - else: - raise ValueError(f'model.head get invalid value for argument ' - f'input_transform: {input_transform}') - - -def process_extra_field(extra: Dict, head_new: Dict, head_deleted_dict: Dict, - head_append_dict: Dict, neck_new: Dict) -> None: - """Process the extra field and update head and neck dictionaries.""" - head_deleted_dict['extra'] = 'dict(' - for key, value in extra.items(): - head_deleted_dict['extra'] += f'{key}={value},' - head_deleted_dict['extra'] = head_deleted_dict['extra'][:-1] + ')' - if 'final_conv_kernel' in extra: - kernel_size = extra['final_conv_kernel'] - if kernel_size > 1: - padding = kernel_size // 2 - head_new['final_layer'] = dict( - kernel_size=kernel_size, padding=padding) - head_append_dict[ - 'final_layer'] = f'dict(kernel_size={kernel_size}, ' \ - f'padding={padding})' - else: - head_new['final_layer'] = dict(kernel_size=kernel_size) - head_append_dict[ - 'final_layer'] = f'dict(kernel_size={kernel_size})' - if 'upsample' in extra: - neck_new.update( - dict( - type='FeatureMapProcessor', - scale_factor=float(extra['upsample']), - apply_relu=True, - )) - - -def process_has_final_layer(has_final_layer: bool, head_new: Dict, - head_deleted_dict: Dict, - head_append_dict: Dict) -> None: - """Process the has_final_layer field and update the head dictionary.""" - head_deleted_dict['has_final_layer'] = str(has_final_layer) - if not has_final_layer: - if 'final_layer' not in head_new: - head_new['final_layer'] = None - head_append_dict['final_layer'] = 'None' - - -def check_and_update_config(neck: Optional[ConfigType], - head: ConfigType) -> Tuple[Optional[Dict], Dict]: - """Check and update the configuration of the head and neck components. - Args: - neck (Optional[ConfigType]): Configuration for the neck component. - head (ConfigType): Configuration for the head component. - - Returns: - Tuple[Optional[Dict], Dict]: Updated configurations for the neck - and head components. - """ - head_new, neck_new = head.copy(), neck.copy() if isinstance(neck, - dict) else {} - head_deleted_dict, head_append_dict = {}, {} - - if 'input_transform' in head: - input_transform = head_new.pop('input_transform') - head_deleted_dict['input_transform'] = f'\'{input_transform}\'' - else: - input_transform = 'select' - - if 'input_index' in head: - input_index = head_new.pop('input_index') - head_deleted_dict['input_index'] = str(input_index) - else: - input_index = (-1, ) - - if 'align_corners' in head: - align_corners = head_new.pop('align_corners') - head_deleted_dict['align_corners'] = str(align_corners) - else: - align_corners = False - - process_input_transform(input_transform, head, head_new, head_deleted_dict, - head_append_dict, neck_new, input_index, - align_corners) - - if 'extra' in head: - extra = head_new.pop('extra') - process_extra_field(extra, head_new, head_deleted_dict, - head_append_dict, neck_new) - - if 'has_final_layer' in head: - has_final_layer = head_new.pop('has_final_layer') - process_has_final_layer(has_final_layer, head_new, head_deleted_dict, - head_append_dict) - - display_modifications(head_deleted_dict, head_append_dict, neck_new) - - neck_new = neck_new if len(neck_new) else None - return neck_new, head_new - - -@master_only -def display_modifications(head_deleted_dict: Dict, head_append_dict: Dict, - neck: Dict) -> None: - """Display the modifications made to the head and neck configurations. - - Args: - head_deleted_dict (Dict): Dictionary of deleted fields in the head. - head_append_dict (Dict): Dictionary of appended fields in the head. - neck (Dict): Updated neck configuration. - """ - if len(head_deleted_dict) + len(head_append_dict) == 0: - return - - old_model_info, new_model_info = build_model_info(head_deleted_dict, - head_append_dict, neck) - - total_info = '\nThe config you are using is outdated. '\ - 'The following section of the config:\n```\n' - total_info += old_model_info - total_info += '```\nshould be updated to\n```\n' - total_info += new_model_info - total_info += '```\nFor more information, please refer to '\ - 'https://mmpose.readthedocs.io/en/latest/' \ - 'guide_to_framework.html#step3-model' - - logger: MMLogger = MMLogger.get_current_instance() - logger.warning(total_info) - - -def build_model_info(head_deleted_dict: Dict, head_append_dict: Dict, - neck: Dict) -> Tuple[str, str]: - """Build the old and new model information strings. - Args: - head_deleted_dict (Dict): Dictionary of deleted fields in the head. - head_append_dict (Dict): Dictionary of appended fields in the head. - neck (Dict): Updated neck configuration. - - Returns: - Tuple[str, str]: Old and new model information strings. - """ - old_head_info = build_head_info(head_deleted_dict) - new_head_info = build_head_info(head_append_dict) - neck_info = build_neck_info(neck) - - old_model_info = 'model=dict(\n' + ' ' * 4 + '...,\n' + old_head_info - new_model_info = 'model=dict(\n' + ' ' * 4 + '...,\n' \ - + neck_info + new_head_info - - return old_model_info, new_model_info - - -def build_head_info(head_dict: Dict) -> str: - """Build the head information string. - - Args: - head_dict (Dict): Dictionary of fields in the head configuration. - Returns: - str: Head information string. - """ - head_info = ' ' * 4 + 'head=dict(\n' - for key, value in head_dict.items(): - head_info += ' ' * 8 + f'{key}={value},\n' - head_info += ' ' * 8 + '...),\n' - return head_info - - -def build_neck_info(neck: Dict) -> str: - """Build the neck information string. - Args: - neck (Dict): Updated neck configuration. - - Returns: - str: Neck information string. - """ - if len(neck) > 0: - neck = neck.copy() - neck_info = ' ' * 4 + 'neck=dict(\n' + ' ' * 8 + \ - f'type=\'{neck.pop("type")}\',\n' - for key, value in neck.items(): - neck_info += ' ' * 8 + f'{key}={str(value)},\n' - neck_info += ' ' * 4 + '),\n' - else: - neck_info = '' - return neck_info +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, Optional, Tuple, Union + +from mmengine.config import Config, ConfigDict +from mmengine.dist import master_only +from mmengine.logging import MMLogger + +ConfigType = Union[Config, ConfigDict] + + +def process_input_transform(input_transform: str, head: Dict, head_new: Dict, + head_deleted_dict: Dict, head_append_dict: Dict, + neck_new: Dict, input_index: Tuple[int], + align_corners: bool) -> None: + """Process the input_transform field and update head and neck + dictionaries.""" + if input_transform == 'resize_concat': + in_channels = head_new.pop('in_channels') + head_deleted_dict['in_channels'] = str(in_channels) + in_channels = sum([in_channels[i] for i in input_index]) + head_new['in_channels'] = in_channels + head_append_dict['in_channels'] = str(in_channels) + + neck_new.update( + dict( + type='FeatureMapProcessor', + concat=True, + select_index=input_index, + )) + if align_corners: + neck_new['align_corners'] = align_corners + + elif input_transform == 'select': + if input_index != (-1, ): + neck_new.update( + dict(type='FeatureMapProcessor', select_index=input_index)) + if isinstance(head['in_channels'], tuple): + in_channels = head_new.pop('in_channels') + head_deleted_dict['in_channels'] = str(in_channels) + if isinstance(input_index, int): + in_channels = in_channels[input_index] + else: + in_channels = tuple([in_channels[i] for i in input_index]) + head_new['in_channels'] = in_channels + head_append_dict['in_channels'] = str(in_channels) + if align_corners: + neck_new['align_corners'] = align_corners + + else: + raise ValueError(f'model.head get invalid value for argument ' + f'input_transform: {input_transform}') + + +def process_extra_field(extra: Dict, head_new: Dict, head_deleted_dict: Dict, + head_append_dict: Dict, neck_new: Dict) -> None: + """Process the extra field and update head and neck dictionaries.""" + head_deleted_dict['extra'] = 'dict(' + for key, value in extra.items(): + head_deleted_dict['extra'] += f'{key}={value},' + head_deleted_dict['extra'] = head_deleted_dict['extra'][:-1] + ')' + if 'final_conv_kernel' in extra: + kernel_size = extra['final_conv_kernel'] + if kernel_size > 1: + padding = kernel_size // 2 + head_new['final_layer'] = dict( + kernel_size=kernel_size, padding=padding) + head_append_dict[ + 'final_layer'] = f'dict(kernel_size={kernel_size}, ' \ + f'padding={padding})' + else: + head_new['final_layer'] = dict(kernel_size=kernel_size) + head_append_dict[ + 'final_layer'] = f'dict(kernel_size={kernel_size})' + if 'upsample' in extra: + neck_new.update( + dict( + type='FeatureMapProcessor', + scale_factor=float(extra['upsample']), + apply_relu=True, + )) + + +def process_has_final_layer(has_final_layer: bool, head_new: Dict, + head_deleted_dict: Dict, + head_append_dict: Dict) -> None: + """Process the has_final_layer field and update the head dictionary.""" + head_deleted_dict['has_final_layer'] = str(has_final_layer) + if not has_final_layer: + if 'final_layer' not in head_new: + head_new['final_layer'] = None + head_append_dict['final_layer'] = 'None' + + +def check_and_update_config(neck: Optional[ConfigType], + head: ConfigType) -> Tuple[Optional[Dict], Dict]: + """Check and update the configuration of the head and neck components. + Args: + neck (Optional[ConfigType]): Configuration for the neck component. + head (ConfigType): Configuration for the head component. + + Returns: + Tuple[Optional[Dict], Dict]: Updated configurations for the neck + and head components. + """ + head_new, neck_new = head.copy(), neck.copy() if isinstance(neck, + dict) else {} + head_deleted_dict, head_append_dict = {}, {} + + if 'input_transform' in head: + input_transform = head_new.pop('input_transform') + head_deleted_dict['input_transform'] = f'\'{input_transform}\'' + else: + input_transform = 'select' + + if 'input_index' in head: + input_index = head_new.pop('input_index') + head_deleted_dict['input_index'] = str(input_index) + else: + input_index = (-1, ) + + if 'align_corners' in head: + align_corners = head_new.pop('align_corners') + head_deleted_dict['align_corners'] = str(align_corners) + else: + align_corners = False + + process_input_transform(input_transform, head, head_new, head_deleted_dict, + head_append_dict, neck_new, input_index, + align_corners) + + if 'extra' in head: + extra = head_new.pop('extra') + process_extra_field(extra, head_new, head_deleted_dict, + head_append_dict, neck_new) + + if 'has_final_layer' in head: + has_final_layer = head_new.pop('has_final_layer') + process_has_final_layer(has_final_layer, head_new, head_deleted_dict, + head_append_dict) + + display_modifications(head_deleted_dict, head_append_dict, neck_new) + + neck_new = neck_new if len(neck_new) else None + return neck_new, head_new + + +@master_only +def display_modifications(head_deleted_dict: Dict, head_append_dict: Dict, + neck: Dict) -> None: + """Display the modifications made to the head and neck configurations. + + Args: + head_deleted_dict (Dict): Dictionary of deleted fields in the head. + head_append_dict (Dict): Dictionary of appended fields in the head. + neck (Dict): Updated neck configuration. + """ + if len(head_deleted_dict) + len(head_append_dict) == 0: + return + + old_model_info, new_model_info = build_model_info(head_deleted_dict, + head_append_dict, neck) + + total_info = '\nThe config you are using is outdated. '\ + 'The following section of the config:\n```\n' + total_info += old_model_info + total_info += '```\nshould be updated to\n```\n' + total_info += new_model_info + total_info += '```\nFor more information, please refer to '\ + 'https://mmpose.readthedocs.io/en/latest/' \ + 'guide_to_framework.html#step3-model' + + logger: MMLogger = MMLogger.get_current_instance() + logger.warning(total_info) + + +def build_model_info(head_deleted_dict: Dict, head_append_dict: Dict, + neck: Dict) -> Tuple[str, str]: + """Build the old and new model information strings. + Args: + head_deleted_dict (Dict): Dictionary of deleted fields in the head. + head_append_dict (Dict): Dictionary of appended fields in the head. + neck (Dict): Updated neck configuration. + + Returns: + Tuple[str, str]: Old and new model information strings. + """ + old_head_info = build_head_info(head_deleted_dict) + new_head_info = build_head_info(head_append_dict) + neck_info = build_neck_info(neck) + + old_model_info = 'model=dict(\n' + ' ' * 4 + '...,\n' + old_head_info + new_model_info = 'model=dict(\n' + ' ' * 4 + '...,\n' \ + + neck_info + new_head_info + + return old_model_info, new_model_info + + +def build_head_info(head_dict: Dict) -> str: + """Build the head information string. + + Args: + head_dict (Dict): Dictionary of fields in the head configuration. + Returns: + str: Head information string. + """ + head_info = ' ' * 4 + 'head=dict(\n' + for key, value in head_dict.items(): + head_info += ' ' * 8 + f'{key}={value},\n' + head_info += ' ' * 8 + '...),\n' + return head_info + + +def build_neck_info(neck: Dict) -> str: + """Build the neck information string. + Args: + neck (Dict): Updated neck configuration. + + Returns: + str: Neck information string. + """ + if len(neck) > 0: + neck = neck.copy() + neck_info = ' ' * 4 + 'neck=dict(\n' + ' ' * 8 + \ + f'type=\'{neck.pop("type")}\',\n' + for key, value in neck.items(): + neck_info += ' ' * 8 + f'{key}={str(value)},\n' + neck_info += ' ' * 4 + '),\n' + else: + neck_info = '' + return neck_info diff --git a/mmpose/models/utils/ckpt_convert.py b/mmpose/models/utils/ckpt_convert.py index 05f5cdb4a3..b883547085 100644 --- a/mmpose/models/utils/ckpt_convert.py +++ b/mmpose/models/utils/ckpt_convert.py @@ -1,82 +1,82 @@ -# Copyright (c) OpenMMLab. All rights reserved. - -# This script consists of several convert functions which -# can modify the weights of model in original repo to be -# pre-trained weights. - -from collections import OrderedDict - -import torch - - -def pvt_convert(ckpt): - new_ckpt = OrderedDict() - # Process the concat between q linear weights and kv linear weights - use_abs_pos_embed = False - use_conv_ffn = False - for k in ckpt.keys(): - if k.startswith('pos_embed'): - use_abs_pos_embed = True - if k.find('dwconv') >= 0: - use_conv_ffn = True - for k, v in ckpt.items(): - if k.startswith('head'): - continue - if k.startswith('norm.'): - continue - if k.startswith('cls_token'): - continue - if k.startswith('pos_embed'): - stage_i = int(k.replace('pos_embed', '')) - new_k = k.replace(f'pos_embed{stage_i}', - f'layers.{stage_i - 1}.1.0.pos_embed') - if stage_i == 4 and v.size(1) == 50: # 1 (cls token) + 7 * 7 - new_v = v[:, 1:, :] # remove cls token - else: - new_v = v - elif k.startswith('patch_embed'): - stage_i = int(k.split('.')[0].replace('patch_embed', '')) - new_k = k.replace(f'patch_embed{stage_i}', - f'layers.{stage_i - 1}.0') - new_v = v - if 'proj.' in new_k: - new_k = new_k.replace('proj.', 'projection.') - elif k.startswith('block'): - stage_i = int(k.split('.')[0].replace('block', '')) - layer_i = int(k.split('.')[1]) - new_layer_i = layer_i + use_abs_pos_embed - new_k = k.replace(f'block{stage_i}.{layer_i}', - f'layers.{stage_i - 1}.1.{new_layer_i}') - new_v = v - if 'attn.q.' in new_k: - sub_item_k = k.replace('q.', 'kv.') - new_k = new_k.replace('q.', 'attn.in_proj_') - new_v = torch.cat([v, ckpt[sub_item_k]], dim=0) - elif 'attn.kv.' in new_k: - continue - elif 'attn.proj.' in new_k: - new_k = new_k.replace('proj.', 'attn.out_proj.') - elif 'attn.sr.' in new_k: - new_k = new_k.replace('sr.', 'sr.') - elif 'mlp.' in new_k: - string = f'{new_k}-' - new_k = new_k.replace('mlp.', 'ffn.layers.') - if 'fc1.weight' in new_k or 'fc2.weight' in new_k: - new_v = v.reshape((*v.shape, 1, 1)) - new_k = new_k.replace('fc1.', '0.') - new_k = new_k.replace('dwconv.dwconv.', '1.') - if use_conv_ffn: - new_k = new_k.replace('fc2.', '4.') - else: - new_k = new_k.replace('fc2.', '3.') - string += f'{new_k} {v.shape}-{new_v.shape}' - elif k.startswith('norm'): - stage_i = int(k[4]) - new_k = k.replace(f'norm{stage_i}', f'layers.{stage_i - 1}.2') - new_v = v - else: - new_k = k - new_v = v - new_ckpt[new_k] = new_v - - return new_ckpt +# Copyright (c) OpenMMLab. All rights reserved. + +# This script consists of several convert functions which +# can modify the weights of model in original repo to be +# pre-trained weights. + +from collections import OrderedDict + +import torch + + +def pvt_convert(ckpt): + new_ckpt = OrderedDict() + # Process the concat between q linear weights and kv linear weights + use_abs_pos_embed = False + use_conv_ffn = False + for k in ckpt.keys(): + if k.startswith('pos_embed'): + use_abs_pos_embed = True + if k.find('dwconv') >= 0: + use_conv_ffn = True + for k, v in ckpt.items(): + if k.startswith('head'): + continue + if k.startswith('norm.'): + continue + if k.startswith('cls_token'): + continue + if k.startswith('pos_embed'): + stage_i = int(k.replace('pos_embed', '')) + new_k = k.replace(f'pos_embed{stage_i}', + f'layers.{stage_i - 1}.1.0.pos_embed') + if stage_i == 4 and v.size(1) == 50: # 1 (cls token) + 7 * 7 + new_v = v[:, 1:, :] # remove cls token + else: + new_v = v + elif k.startswith('patch_embed'): + stage_i = int(k.split('.')[0].replace('patch_embed', '')) + new_k = k.replace(f'patch_embed{stage_i}', + f'layers.{stage_i - 1}.0') + new_v = v + if 'proj.' in new_k: + new_k = new_k.replace('proj.', 'projection.') + elif k.startswith('block'): + stage_i = int(k.split('.')[0].replace('block', '')) + layer_i = int(k.split('.')[1]) + new_layer_i = layer_i + use_abs_pos_embed + new_k = k.replace(f'block{stage_i}.{layer_i}', + f'layers.{stage_i - 1}.1.{new_layer_i}') + new_v = v + if 'attn.q.' in new_k: + sub_item_k = k.replace('q.', 'kv.') + new_k = new_k.replace('q.', 'attn.in_proj_') + new_v = torch.cat([v, ckpt[sub_item_k]], dim=0) + elif 'attn.kv.' in new_k: + continue + elif 'attn.proj.' in new_k: + new_k = new_k.replace('proj.', 'attn.out_proj.') + elif 'attn.sr.' in new_k: + new_k = new_k.replace('sr.', 'sr.') + elif 'mlp.' in new_k: + string = f'{new_k}-' + new_k = new_k.replace('mlp.', 'ffn.layers.') + if 'fc1.weight' in new_k or 'fc2.weight' in new_k: + new_v = v.reshape((*v.shape, 1, 1)) + new_k = new_k.replace('fc1.', '0.') + new_k = new_k.replace('dwconv.dwconv.', '1.') + if use_conv_ffn: + new_k = new_k.replace('fc2.', '4.') + else: + new_k = new_k.replace('fc2.', '3.') + string += f'{new_k} {v.shape}-{new_v.shape}' + elif k.startswith('norm'): + stage_i = int(k[4]) + new_k = k.replace(f'norm{stage_i}', f'layers.{stage_i - 1}.2') + new_v = v + else: + new_k = k + new_v = v + new_ckpt[new_k] = new_v + + return new_ckpt diff --git a/mmpose/models/utils/geometry.py b/mmpose/models/utils/geometry.py index 0ceadaec30..4821364496 100644 --- a/mmpose/models/utils/geometry.py +++ b/mmpose/models/utils/geometry.py @@ -1,68 +1,68 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -from torch.nn import functional as F - - -def rot6d_to_rotmat(x): - """Convert 6D rotation representation to 3x3 rotation matrix. - - Based on Zhou et al., "On the Continuity of Rotation - Representations in Neural Networks", CVPR 2019 - Input: - (B,6) Batch of 6-D rotation representations - Output: - (B,3,3) Batch of corresponding rotation matrices - """ - x = x.view(-1, 3, 2) - a1 = x[:, :, 0] - a2 = x[:, :, 1] - b1 = F.normalize(a1) - b2 = F.normalize(a2 - torch.einsum('bi,bi->b', b1, a2).unsqueeze(-1) * b1) - b3 = torch.cross(b1, b2) - return torch.stack((b1, b2, b3), dim=-1) - - -def batch_rodrigues(theta): - """Convert axis-angle representation to rotation matrix. - Args: - theta: size = [B, 3] - Returns: - Rotation matrix corresponding to the quaternion - -- size = [B, 3, 3] - """ - l2norm = torch.norm(theta + 1e-8, p=2, dim=1) - angle = torch.unsqueeze(l2norm, -1) - normalized = torch.div(theta, angle) - angle = angle * 0.5 - v_cos = torch.cos(angle) - v_sin = torch.sin(angle) - quat = torch.cat([v_cos, v_sin * normalized], dim=1) - return quat_to_rotmat(quat) - - -def quat_to_rotmat(quat): - """Convert quaternion coefficients to rotation matrix. - Args: - quat: size = [B, 4] 4 <===>(w, x, y, z) - Returns: - Rotation matrix corresponding to the quaternion - -- size = [B, 3, 3] - """ - norm_quat = quat - norm_quat = norm_quat / norm_quat.norm(p=2, dim=1, keepdim=True) - w, x, y, z = norm_quat[:, 0], norm_quat[:, 1],\ - norm_quat[:, 2], norm_quat[:, 3] - - B = quat.size(0) - - w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2) - wx, wy, wz = w * x, w * y, w * z - xy, xz, yz = x * y, x * z, y * z - - rotMat = torch.stack([ - w2 + x2 - y2 - z2, 2 * xy - 2 * wz, 2 * wy + 2 * xz, 2 * wz + 2 * xy, - w2 - x2 + y2 - z2, 2 * yz - 2 * wx, 2 * xz - 2 * wy, 2 * wx + 2 * yz, - w2 - x2 - y2 + z2 - ], - dim=1).view(B, 3, 3) - return rotMat +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from torch.nn import functional as F + + +def rot6d_to_rotmat(x): + """Convert 6D rotation representation to 3x3 rotation matrix. + + Based on Zhou et al., "On the Continuity of Rotation + Representations in Neural Networks", CVPR 2019 + Input: + (B,6) Batch of 6-D rotation representations + Output: + (B,3,3) Batch of corresponding rotation matrices + """ + x = x.view(-1, 3, 2) + a1 = x[:, :, 0] + a2 = x[:, :, 1] + b1 = F.normalize(a1) + b2 = F.normalize(a2 - torch.einsum('bi,bi->b', b1, a2).unsqueeze(-1) * b1) + b3 = torch.cross(b1, b2) + return torch.stack((b1, b2, b3), dim=-1) + + +def batch_rodrigues(theta): + """Convert axis-angle representation to rotation matrix. + Args: + theta: size = [B, 3] + Returns: + Rotation matrix corresponding to the quaternion + -- size = [B, 3, 3] + """ + l2norm = torch.norm(theta + 1e-8, p=2, dim=1) + angle = torch.unsqueeze(l2norm, -1) + normalized = torch.div(theta, angle) + angle = angle * 0.5 + v_cos = torch.cos(angle) + v_sin = torch.sin(angle) + quat = torch.cat([v_cos, v_sin * normalized], dim=1) + return quat_to_rotmat(quat) + + +def quat_to_rotmat(quat): + """Convert quaternion coefficients to rotation matrix. + Args: + quat: size = [B, 4] 4 <===>(w, x, y, z) + Returns: + Rotation matrix corresponding to the quaternion + -- size = [B, 3, 3] + """ + norm_quat = quat + norm_quat = norm_quat / norm_quat.norm(p=2, dim=1, keepdim=True) + w, x, y, z = norm_quat[:, 0], norm_quat[:, 1],\ + norm_quat[:, 2], norm_quat[:, 3] + + B = quat.size(0) + + w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2) + wx, wy, wz = w * x, w * y, w * z + xy, xz, yz = x * y, x * z, y * z + + rotMat = torch.stack([ + w2 + x2 - y2 - z2, 2 * xy - 2 * wz, 2 * wy + 2 * xz, 2 * wz + 2 * xy, + w2 - x2 + y2 - z2, 2 * yz - 2 * wx, 2 * xz - 2 * wy, 2 * wx + 2 * yz, + w2 - x2 - y2 + z2 + ], + dim=1).view(B, 3, 3) + return rotMat diff --git a/mmpose/models/utils/ops.py b/mmpose/models/utils/ops.py index 0c94352647..0acbfe41e1 100644 --- a/mmpose/models/utils/ops.py +++ b/mmpose/models/utils/ops.py @@ -1,52 +1,52 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import warnings -from typing import Optional, Tuple, Union - -import torch -from torch.nn import functional as F - - -def resize(input: torch.Tensor, - size: Optional[Union[Tuple[int, int], torch.Size]] = None, - scale_factor: Optional[float] = None, - mode: str = 'nearest', - align_corners: Optional[bool] = None, - warning: bool = True) -> torch.Tensor: - """Resize a given input tensor using specified size or scale_factor. - - Args: - input (torch.Tensor): The input tensor to be resized. - size (Optional[Union[Tuple[int, int], torch.Size]]): The desired - output size. Defaults to None. - scale_factor (Optional[float]): The scaling factor for resizing. - Defaults to None. - mode (str): The interpolation mode. Defaults to 'nearest'. - align_corners (Optional[bool]): Determines whether to align the - corners when using certain interpolation modes. Defaults to None. - warning (bool): Whether to display a warning when the input and - output sizes are not ideal for alignment. Defaults to True. - - Returns: - torch.Tensor: The resized tensor. - """ - # Check if a warning should be displayed regarding input and output sizes - if warning: - if size is not None and align_corners: - input_h, input_w = tuple(int(x) for x in input.shape[2:]) - output_h, output_w = tuple(int(x) for x in size) - if output_h > input_h or output_w > output_h: - if ((output_h > 1 and output_w > 1 and input_h > 1 - and input_w > 1) and (output_h - 1) % (input_h - 1) - and (output_w - 1) % (input_w - 1)): - warnings.warn( - f'When align_corners={align_corners}, ' - 'the output would be more aligned if ' - f'input size {(input_h, input_w)} is `x+1` and ' - f'out size {(output_h, output_w)} is `nx+1`') - - # Convert torch.Size to tuple if necessary - if isinstance(size, torch.Size): - size = tuple(int(x) for x in size) - - # Perform the resizing operation - return F.interpolate(input, size, scale_factor, mode, align_corners) +# Copyright (c) OpenMMLab. All rights reserved. +import warnings +from typing import Optional, Tuple, Union + +import torch +from torch.nn import functional as F + + +def resize(input: torch.Tensor, + size: Optional[Union[Tuple[int, int], torch.Size]] = None, + scale_factor: Optional[float] = None, + mode: str = 'nearest', + align_corners: Optional[bool] = None, + warning: bool = True) -> torch.Tensor: + """Resize a given input tensor using specified size or scale_factor. + + Args: + input (torch.Tensor): The input tensor to be resized. + size (Optional[Union[Tuple[int, int], torch.Size]]): The desired + output size. Defaults to None. + scale_factor (Optional[float]): The scaling factor for resizing. + Defaults to None. + mode (str): The interpolation mode. Defaults to 'nearest'. + align_corners (Optional[bool]): Determines whether to align the + corners when using certain interpolation modes. Defaults to None. + warning (bool): Whether to display a warning when the input and + output sizes are not ideal for alignment. Defaults to True. + + Returns: + torch.Tensor: The resized tensor. + """ + # Check if a warning should be displayed regarding input and output sizes + if warning: + if size is not None and align_corners: + input_h, input_w = tuple(int(x) for x in input.shape[2:]) + output_h, output_w = tuple(int(x) for x in size) + if output_h > input_h or output_w > output_h: + if ((output_h > 1 and output_w > 1 and input_h > 1 + and input_w > 1) and (output_h - 1) % (input_h - 1) + and (output_w - 1) % (input_w - 1)): + warnings.warn( + f'When align_corners={align_corners}, ' + 'the output would be more aligned if ' + f'input size {(input_h, input_w)} is `x+1` and ' + f'out size {(output_h, output_w)} is `nx+1`') + + # Convert torch.Size to tuple if necessary + if isinstance(size, torch.Size): + size = tuple(int(x) for x in size) + + # Perform the resizing operation + return F.interpolate(input, size, scale_factor, mode, align_corners) diff --git a/mmpose/models/utils/realnvp.py b/mmpose/models/utils/realnvp.py index 911953e8f9..befd569e03 100644 --- a/mmpose/models/utils/realnvp.py +++ b/mmpose/models/utils/realnvp.py @@ -1,76 +1,76 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -import torch.nn as nn -from torch import distributions - - -class RealNVP(nn.Module): - """RealNVP: a flow-based generative model - - `Density estimation using Real NVP - arXiv: `_. - - Code is modified from `the official implementation of RLE - `_. - - See also `real-nvp-pytorch - `_. - """ - - @staticmethod - def get_scale_net(): - """Get the scale model in a single invertable mapping.""" - return nn.Sequential( - nn.Linear(2, 64), nn.LeakyReLU(), nn.Linear(64, 64), - nn.LeakyReLU(), nn.Linear(64, 2), nn.Tanh()) - - @staticmethod - def get_trans_net(): - """Get the translation model in a single invertable mapping.""" - return nn.Sequential( - nn.Linear(2, 64), nn.LeakyReLU(), nn.Linear(64, 64), - nn.LeakyReLU(), nn.Linear(64, 2)) - - @property - def prior(self): - """The prior distribution.""" - return distributions.MultivariateNormal(self.loc, self.cov) - - def __init__(self): - super(RealNVP, self).__init__() - - self.register_buffer('loc', torch.zeros(2)) - self.register_buffer('cov', torch.eye(2)) - self.register_buffer( - 'mask', torch.tensor([[0, 1], [1, 0]] * 3, dtype=torch.float32)) - - self.s = torch.nn.ModuleList( - [self.get_scale_net() for _ in range(len(self.mask))]) - self.t = torch.nn.ModuleList( - [self.get_trans_net() for _ in range(len(self.mask))]) - self.init_weights() - - def init_weights(self): - """Initialization model weights.""" - for m in self.modules(): - if isinstance(m, nn.Linear): - nn.init.xavier_uniform_(m.weight, gain=0.01) - - def backward_p(self, x): - """Apply mapping form the data space to the latent space and calculate - the log determinant of the Jacobian matrix.""" - - log_det_jacob, z = x.new_zeros(x.shape[0]), x - for i in reversed(range(len(self.t))): - z_ = self.mask[i] * z - s = self.s[i](z_) * (1 - self.mask[i]) # torch.exp(s): betas - t = self.t[i](z_) * (1 - self.mask[i]) # gammas - z = (1 - self.mask[i]) * (z - t) * torch.exp(-s) + z_ - log_det_jacob -= s.sum(dim=1) - return z, log_det_jacob - - def log_prob(self, x): - """Calculate the log probability of given sample in data space.""" - - z, log_det = self.backward_p(x) - return self.prior.log_prob(z) + log_det +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from torch import distributions + + +class RealNVP(nn.Module): + """RealNVP: a flow-based generative model + + `Density estimation using Real NVP + arXiv: `_. + + Code is modified from `the official implementation of RLE + `_. + + See also `real-nvp-pytorch + `_. + """ + + @staticmethod + def get_scale_net(): + """Get the scale model in a single invertable mapping.""" + return nn.Sequential( + nn.Linear(2, 64), nn.LeakyReLU(), nn.Linear(64, 64), + nn.LeakyReLU(), nn.Linear(64, 2), nn.Tanh()) + + @staticmethod + def get_trans_net(): + """Get the translation model in a single invertable mapping.""" + return nn.Sequential( + nn.Linear(2, 64), nn.LeakyReLU(), nn.Linear(64, 64), + nn.LeakyReLU(), nn.Linear(64, 2)) + + @property + def prior(self): + """The prior distribution.""" + return distributions.MultivariateNormal(self.loc, self.cov) + + def __init__(self): + super(RealNVP, self).__init__() + + self.register_buffer('loc', torch.zeros(2)) + self.register_buffer('cov', torch.eye(2)) + self.register_buffer( + 'mask', torch.tensor([[0, 1], [1, 0]] * 3, dtype=torch.float32)) + + self.s = torch.nn.ModuleList( + [self.get_scale_net() for _ in range(len(self.mask))]) + self.t = torch.nn.ModuleList( + [self.get_trans_net() for _ in range(len(self.mask))]) + self.init_weights() + + def init_weights(self): + """Initialization model weights.""" + for m in self.modules(): + if isinstance(m, nn.Linear): + nn.init.xavier_uniform_(m.weight, gain=0.01) + + def backward_p(self, x): + """Apply mapping form the data space to the latent space and calculate + the log determinant of the Jacobian matrix.""" + + log_det_jacob, z = x.new_zeros(x.shape[0]), x + for i in reversed(range(len(self.t))): + z_ = self.mask[i] * z + s = self.s[i](z_) * (1 - self.mask[i]) # torch.exp(s): betas + t = self.t[i](z_) * (1 - self.mask[i]) # gammas + z = (1 - self.mask[i]) * (z - t) * torch.exp(-s) + z_ + log_det_jacob -= s.sum(dim=1) + return z, log_det_jacob + + def log_prob(self, x): + """Calculate the log probability of given sample in data space.""" + + z, log_det = self.backward_p(x) + return self.prior.log_prob(z) + log_det diff --git a/mmpose/models/utils/regularizations.py b/mmpose/models/utils/regularizations.py index d8c7449038..1911ad6090 100644 --- a/mmpose/models/utils/regularizations.py +++ b/mmpose/models/utils/regularizations.py @@ -1,86 +1,86 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from abc import ABCMeta, abstractmethod, abstractproperty - -import torch - - -class PytorchModuleHook(metaclass=ABCMeta): - """Base class for PyTorch module hook registers. - - An instance of a subclass of PytorchModuleHook can be used to - register hook to a pytorch module using the `register` method like: - hook_register.register(module) - - Subclasses should add/overwrite the following methods: - - __init__ - - hook - - hook_type - """ - - @abstractmethod - def hook(self, *args, **kwargs): - """Hook function.""" - - @abstractproperty - def hook_type(self) -> str: - """Hook type Subclasses should overwrite this function to return a - string value in. - - {`forward`, `forward_pre`, `backward`} - """ - - def register(self, module): - """Register the hook function to the module. - - Args: - module (pytorch module): the module to register the hook. - - Returns: - handle (torch.utils.hooks.RemovableHandle): a handle to remove - the hook by calling handle.remove() - """ - assert isinstance(module, torch.nn.Module) - - if self.hook_type == 'forward': - h = module.register_forward_hook(self.hook) - elif self.hook_type == 'forward_pre': - h = module.register_forward_pre_hook(self.hook) - elif self.hook_type == 'backward': - h = module.register_backward_hook(self.hook) - else: - raise ValueError(f'Invalid hook type {self.hook}') - - return h - - -class WeightNormClipHook(PytorchModuleHook): - """Apply weight norm clip regularization. - - The module's parameter will be clip to a given maximum norm before each - forward pass. - - Args: - max_norm (float): The maximum norm of the parameter. - module_param_names (str|list): The parameter name (or name list) to - apply weight norm clip. - """ - - def __init__(self, max_norm=1.0, module_param_names='weight'): - self.module_param_names = module_param_names if isinstance( - module_param_names, list) else [module_param_names] - self.max_norm = max_norm - - @property - def hook_type(self): - return 'forward_pre' - - def hook(self, module, _input): - for name in self.module_param_names: - assert name in module._parameters, f'{name} is not a parameter' \ - f' of the module {type(module)}' - param = module._parameters[name] - - with torch.no_grad(): - m = param.norm().item() - if m > self.max_norm: - param.mul_(self.max_norm / (m + 1e-6)) +# Copyright (c) OpenMMLab. All rights reserved. +from abc import ABCMeta, abstractmethod, abstractproperty + +import torch + + +class PytorchModuleHook(metaclass=ABCMeta): + """Base class for PyTorch module hook registers. + + An instance of a subclass of PytorchModuleHook can be used to + register hook to a pytorch module using the `register` method like: + hook_register.register(module) + + Subclasses should add/overwrite the following methods: + - __init__ + - hook + - hook_type + """ + + @abstractmethod + def hook(self, *args, **kwargs): + """Hook function.""" + + @abstractproperty + def hook_type(self) -> str: + """Hook type Subclasses should overwrite this function to return a + string value in. + + {`forward`, `forward_pre`, `backward`} + """ + + def register(self, module): + """Register the hook function to the module. + + Args: + module (pytorch module): the module to register the hook. + + Returns: + handle (torch.utils.hooks.RemovableHandle): a handle to remove + the hook by calling handle.remove() + """ + assert isinstance(module, torch.nn.Module) + + if self.hook_type == 'forward': + h = module.register_forward_hook(self.hook) + elif self.hook_type == 'forward_pre': + h = module.register_forward_pre_hook(self.hook) + elif self.hook_type == 'backward': + h = module.register_backward_hook(self.hook) + else: + raise ValueError(f'Invalid hook type {self.hook}') + + return h + + +class WeightNormClipHook(PytorchModuleHook): + """Apply weight norm clip regularization. + + The module's parameter will be clip to a given maximum norm before each + forward pass. + + Args: + max_norm (float): The maximum norm of the parameter. + module_param_names (str|list): The parameter name (or name list) to + apply weight norm clip. + """ + + def __init__(self, max_norm=1.0, module_param_names='weight'): + self.module_param_names = module_param_names if isinstance( + module_param_names, list) else [module_param_names] + self.max_norm = max_norm + + @property + def hook_type(self): + return 'forward_pre' + + def hook(self, module, _input): + for name in self.module_param_names: + assert name in module._parameters, f'{name} is not a parameter' \ + f' of the module {type(module)}' + param = module._parameters[name] + + with torch.no_grad(): + m = param.norm().item() + if m > self.max_norm: + param.mul_(self.max_norm / (m + 1e-6)) diff --git a/mmpose/models/utils/rtmcc_block.py b/mmpose/models/utils/rtmcc_block.py index bd4929454c..82fbaf7106 100644 --- a/mmpose/models/utils/rtmcc_block.py +++ b/mmpose/models/utils/rtmcc_block.py @@ -1,305 +1,305 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import math - -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn.bricks import DropPath -from mmengine.utils import digit_version -from mmengine.utils.dl_utils import TORCH_VERSION - - -def rope(x, dim): - """Applies Rotary Position Embedding to input tensor. - - Args: - x (torch.Tensor): Input tensor. - dim (int | list[int]): The spatial dimension(s) to apply - rotary position embedding. - - Returns: - torch.Tensor: The tensor after applying rotary position - embedding. - - Reference: - `RoFormer: Enhanced Transformer with Rotary - Position Embedding `_ - """ - shape = x.shape - if isinstance(dim, int): - dim = [dim] - - spatial_shape = [shape[i] for i in dim] - total_len = 1 - for i in spatial_shape: - total_len *= i - - position = torch.reshape( - torch.arange(total_len, dtype=torch.int, device=x.device), - spatial_shape) - - for i in range(dim[-1] + 1, len(shape) - 1, 1): - position = torch.unsqueeze(position, dim=-1) - - half_size = shape[-1] // 2 - freq_seq = -torch.arange( - half_size, dtype=torch.int, device=x.device) / float(half_size) - inv_freq = 10000**-freq_seq - - sinusoid = position[..., None] * inv_freq[None, None, :] - - sin = torch.sin(sinusoid) - cos = torch.cos(sinusoid) - x1, x2 = torch.chunk(x, 2, dim=-1) - - return torch.cat([x1 * cos - x2 * sin, x2 * cos + x1 * sin], dim=-1) - - -class Scale(nn.Module): - """Scale vector by element multiplications. - - Args: - dim (int): The dimension of the scale vector. - init_value (float, optional): The initial value of the scale vector. - Defaults to 1.0. - trainable (bool, optional): Whether the scale vector is trainable. - Defaults to True. - """ - - def __init__(self, dim, init_value=1., trainable=True): - super().__init__() - self.scale = nn.Parameter( - init_value * torch.ones(dim), requires_grad=trainable) - - def forward(self, x): - """Forward function.""" - - return x * self.scale - - -class ScaleNorm(nn.Module): - """Scale Norm. - - Args: - dim (int): The dimension of the scale vector. - eps (float, optional): The minimum value in clamp. Defaults to 1e-5. - - Reference: - `Transformers without Tears: Improving the Normalization - of Self-Attention `_ - """ - - def __init__(self, dim, eps=1e-5): - super().__init__() - self.scale = dim**-0.5 - self.eps = eps - self.g = nn.Parameter(torch.ones(1)) - - def forward(self, x): - """Forward function. - - Args: - x (torch.Tensor): Input tensor. - - Returns: - torch.Tensor: The tensor after applying scale norm. - """ - - norm = torch.norm(x, dim=2, keepdim=True) * self.scale - return x / norm.clamp(min=self.eps) * self.g - - -class RTMCCBlock(nn.Module): - """Gated Attention Unit (GAU) in RTMBlock. - - Args: - num_token (int): The number of tokens. - in_token_dims (int): The input token dimension. - out_token_dims (int): The output token dimension. - expansion_factor (int, optional): The expansion factor of the - intermediate token dimension. Defaults to 2. - s (int, optional): The self-attention feature dimension. - Defaults to 128. - eps (float, optional): The minimum value in clamp. Defaults to 1e-5. - dropout_rate (float, optional): The dropout rate. Defaults to 0.0. - drop_path (float, optional): The drop path rate. Defaults to 0.0. - attn_type (str, optional): Type of attention which should be one of - the following options: - - - 'self-attn': Self-attention. - - 'cross-attn': Cross-attention. - - Defaults to 'self-attn'. - act_fn (str, optional): The activation function which should be one - of the following options: - - - 'ReLU': ReLU activation. - - 'SiLU': SiLU activation. - - Defaults to 'SiLU'. - bias (bool, optional): Whether to use bias in linear layers. - Defaults to False. - use_rel_bias (bool, optional): Whether to use relative bias. - Defaults to True. - pos_enc (bool, optional): Whether to use rotary position - embedding. Defaults to False. - - Reference: - `Transformer Quality in Linear Time - `_ - """ - - def __init__(self, - num_token, - in_token_dims, - out_token_dims, - expansion_factor=2, - s=128, - eps=1e-5, - dropout_rate=0., - drop_path=0., - attn_type='self-attn', - act_fn='SiLU', - bias=False, - use_rel_bias=True, - pos_enc=False): - - super(RTMCCBlock, self).__init__() - self.s = s - self.num_token = num_token - self.use_rel_bias = use_rel_bias - self.attn_type = attn_type - self.pos_enc = pos_enc - self.drop_path = DropPath(drop_path) \ - if drop_path > 0. else nn.Identity() - - self.e = int(in_token_dims * expansion_factor) - if use_rel_bias: - if attn_type == 'self-attn': - self.w = nn.Parameter( - torch.rand([2 * num_token - 1], dtype=torch.float)) - else: - self.a = nn.Parameter(torch.rand([1, s], dtype=torch.float)) - self.b = nn.Parameter(torch.rand([1, s], dtype=torch.float)) - self.o = nn.Linear(self.e, out_token_dims, bias=bias) - - if attn_type == 'self-attn': - self.uv = nn.Linear(in_token_dims, 2 * self.e + self.s, bias=bias) - self.gamma = nn.Parameter(torch.rand((2, self.s))) - self.beta = nn.Parameter(torch.rand((2, self.s))) - else: - self.uv = nn.Linear(in_token_dims, self.e + self.s, bias=bias) - self.k_fc = nn.Linear(in_token_dims, self.s, bias=bias) - self.v_fc = nn.Linear(in_token_dims, self.e, bias=bias) - nn.init.xavier_uniform_(self.k_fc.weight) - nn.init.xavier_uniform_(self.v_fc.weight) - - self.ln = ScaleNorm(in_token_dims, eps=eps) - - nn.init.xavier_uniform_(self.uv.weight) - - if act_fn == 'SiLU': - assert digit_version(TORCH_VERSION) >= digit_version('1.7.0'), \ - 'SiLU activation requires PyTorch version >= 1.7' - - self.act_fn = nn.SiLU(True) - else: - self.act_fn = nn.ReLU(True) - - if in_token_dims == out_token_dims: - self.shortcut = True - self.res_scale = Scale(in_token_dims) - else: - self.shortcut = False - - self.sqrt_s = math.sqrt(s) - - self.dropout_rate = dropout_rate - - if dropout_rate > 0.: - self.dropout = nn.Dropout(dropout_rate) - - def rel_pos_bias(self, seq_len, k_len=None): - """Add relative position bias.""" - - if self.attn_type == 'self-attn': - t = F.pad(self.w[:2 * seq_len - 1], [0, seq_len]).repeat(seq_len) - t = t[..., :-seq_len].reshape(-1, seq_len, 3 * seq_len - 2) - r = (2 * seq_len - 1) // 2 - t = t[..., r:-r] - else: - a = rope(self.a.repeat(seq_len, 1), dim=0) - b = rope(self.b.repeat(k_len, 1), dim=0) - t = torch.bmm(a, b.permute(0, 2, 1)) - return t - - def _forward(self, inputs): - """GAU Forward function.""" - - if self.attn_type == 'self-attn': - x = inputs - else: - x, k, v = inputs - - x = self.ln(x) - - # [B, K, in_token_dims] -> [B, K, e + e + s] - uv = self.uv(x) - uv = self.act_fn(uv) - - if self.attn_type == 'self-attn': - # [B, K, e + e + s] -> [B, K, e], [B, K, e], [B, K, s] - u, v, base = torch.split(uv, [self.e, self.e, self.s], dim=2) - # [B, K, 1, s] * [1, 1, 2, s] + [2, s] -> [B, K, 2, s] - base = base.unsqueeze(2) * self.gamma[None, None, :] + self.beta - - if self.pos_enc: - base = rope(base, dim=1) - # [B, K, 2, s] -> [B, K, s], [B, K, s] - q, k = torch.unbind(base, dim=2) - - else: - # [B, K, e + s] -> [B, K, e], [B, K, s] - u, q = torch.split(uv, [self.e, self.s], dim=2) - - k = self.k_fc(k) # -> [B, K, s] - v = self.v_fc(v) # -> [B, K, e] - - if self.pos_enc: - q = rope(q, 1) - k = rope(k, 1) - - # [B, K, s].permute() -> [B, s, K] - # [B, K, s] x [B, s, K] -> [B, K, K] - qk = torch.bmm(q, k.permute(0, 2, 1)) - - if self.use_rel_bias: - if self.attn_type == 'self-attn': - bias = self.rel_pos_bias(q.size(1)) - else: - bias = self.rel_pos_bias(q.size(1), k.size(1)) - qk += bias[:, :q.size(1), :k.size(1)] - # [B, K, K] - kernel = torch.square(F.relu(qk / self.sqrt_s)) - - if self.dropout_rate > 0.: - kernel = self.dropout(kernel) - # [B, K, K] x [B, K, e] -> [B, K, e] - x = u * torch.bmm(kernel, v) - # [B, K, e] -> [B, K, out_token_dims] - x = self.o(x) - - return x - - def forward(self, x): - """Forward function.""" - - if self.shortcut: - if self.attn_type == 'cross-attn': - res_shortcut = x[0] - else: - res_shortcut = x - main_branch = self.drop_path(self._forward(x)) - return self.res_scale(res_shortcut) + main_branch - else: - return self.drop_path(self._forward(x)) +# Copyright (c) OpenMMLab. All rights reserved. +import math + +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn.bricks import DropPath +from mmengine.utils import digit_version +from mmengine.utils.dl_utils import TORCH_VERSION + + +def rope(x, dim): + """Applies Rotary Position Embedding to input tensor. + + Args: + x (torch.Tensor): Input tensor. + dim (int | list[int]): The spatial dimension(s) to apply + rotary position embedding. + + Returns: + torch.Tensor: The tensor after applying rotary position + embedding. + + Reference: + `RoFormer: Enhanced Transformer with Rotary + Position Embedding `_ + """ + shape = x.shape + if isinstance(dim, int): + dim = [dim] + + spatial_shape = [shape[i] for i in dim] + total_len = 1 + for i in spatial_shape: + total_len *= i + + position = torch.reshape( + torch.arange(total_len, dtype=torch.int, device=x.device), + spatial_shape) + + for i in range(dim[-1] + 1, len(shape) - 1, 1): + position = torch.unsqueeze(position, dim=-1) + + half_size = shape[-1] // 2 + freq_seq = -torch.arange( + half_size, dtype=torch.int, device=x.device) / float(half_size) + inv_freq = 10000**-freq_seq + + sinusoid = position[..., None] * inv_freq[None, None, :] + + sin = torch.sin(sinusoid) + cos = torch.cos(sinusoid) + x1, x2 = torch.chunk(x, 2, dim=-1) + + return torch.cat([x1 * cos - x2 * sin, x2 * cos + x1 * sin], dim=-1) + + +class Scale(nn.Module): + """Scale vector by element multiplications. + + Args: + dim (int): The dimension of the scale vector. + init_value (float, optional): The initial value of the scale vector. + Defaults to 1.0. + trainable (bool, optional): Whether the scale vector is trainable. + Defaults to True. + """ + + def __init__(self, dim, init_value=1., trainable=True): + super().__init__() + self.scale = nn.Parameter( + init_value * torch.ones(dim), requires_grad=trainable) + + def forward(self, x): + """Forward function.""" + + return x * self.scale + + +class ScaleNorm(nn.Module): + """Scale Norm. + + Args: + dim (int): The dimension of the scale vector. + eps (float, optional): The minimum value in clamp. Defaults to 1e-5. + + Reference: + `Transformers without Tears: Improving the Normalization + of Self-Attention `_ + """ + + def __init__(self, dim, eps=1e-5): + super().__init__() + self.scale = dim**-0.5 + self.eps = eps + self.g = nn.Parameter(torch.ones(1)) + + def forward(self, x): + """Forward function. + + Args: + x (torch.Tensor): Input tensor. + + Returns: + torch.Tensor: The tensor after applying scale norm. + """ + + norm = torch.norm(x, dim=2, keepdim=True) * self.scale + return x / norm.clamp(min=self.eps) * self.g + + +class RTMCCBlock(nn.Module): + """Gated Attention Unit (GAU) in RTMBlock. + + Args: + num_token (int): The number of tokens. + in_token_dims (int): The input token dimension. + out_token_dims (int): The output token dimension. + expansion_factor (int, optional): The expansion factor of the + intermediate token dimension. Defaults to 2. + s (int, optional): The self-attention feature dimension. + Defaults to 128. + eps (float, optional): The minimum value in clamp. Defaults to 1e-5. + dropout_rate (float, optional): The dropout rate. Defaults to 0.0. + drop_path (float, optional): The drop path rate. Defaults to 0.0. + attn_type (str, optional): Type of attention which should be one of + the following options: + + - 'self-attn': Self-attention. + - 'cross-attn': Cross-attention. + + Defaults to 'self-attn'. + act_fn (str, optional): The activation function which should be one + of the following options: + + - 'ReLU': ReLU activation. + - 'SiLU': SiLU activation. + + Defaults to 'SiLU'. + bias (bool, optional): Whether to use bias in linear layers. + Defaults to False. + use_rel_bias (bool, optional): Whether to use relative bias. + Defaults to True. + pos_enc (bool, optional): Whether to use rotary position + embedding. Defaults to False. + + Reference: + `Transformer Quality in Linear Time + `_ + """ + + def __init__(self, + num_token, + in_token_dims, + out_token_dims, + expansion_factor=2, + s=128, + eps=1e-5, + dropout_rate=0., + drop_path=0., + attn_type='self-attn', + act_fn='SiLU', + bias=False, + use_rel_bias=True, + pos_enc=False): + + super(RTMCCBlock, self).__init__() + self.s = s + self.num_token = num_token + self.use_rel_bias = use_rel_bias + self.attn_type = attn_type + self.pos_enc = pos_enc + self.drop_path = DropPath(drop_path) \ + if drop_path > 0. else nn.Identity() + + self.e = int(in_token_dims * expansion_factor) + if use_rel_bias: + if attn_type == 'self-attn': + self.w = nn.Parameter( + torch.rand([2 * num_token - 1], dtype=torch.float)) + else: + self.a = nn.Parameter(torch.rand([1, s], dtype=torch.float)) + self.b = nn.Parameter(torch.rand([1, s], dtype=torch.float)) + self.o = nn.Linear(self.e, out_token_dims, bias=bias) + + if attn_type == 'self-attn': + self.uv = nn.Linear(in_token_dims, 2 * self.e + self.s, bias=bias) + self.gamma = nn.Parameter(torch.rand((2, self.s))) + self.beta = nn.Parameter(torch.rand((2, self.s))) + else: + self.uv = nn.Linear(in_token_dims, self.e + self.s, bias=bias) + self.k_fc = nn.Linear(in_token_dims, self.s, bias=bias) + self.v_fc = nn.Linear(in_token_dims, self.e, bias=bias) + nn.init.xavier_uniform_(self.k_fc.weight) + nn.init.xavier_uniform_(self.v_fc.weight) + + self.ln = ScaleNorm(in_token_dims, eps=eps) + + nn.init.xavier_uniform_(self.uv.weight) + + if act_fn == 'SiLU': + assert digit_version(TORCH_VERSION) >= digit_version('1.7.0'), \ + 'SiLU activation requires PyTorch version >= 1.7' + + self.act_fn = nn.SiLU(True) + else: + self.act_fn = nn.ReLU(True) + + if in_token_dims == out_token_dims: + self.shortcut = True + self.res_scale = Scale(in_token_dims) + else: + self.shortcut = False + + self.sqrt_s = math.sqrt(s) + + self.dropout_rate = dropout_rate + + if dropout_rate > 0.: + self.dropout = nn.Dropout(dropout_rate) + + def rel_pos_bias(self, seq_len, k_len=None): + """Add relative position bias.""" + + if self.attn_type == 'self-attn': + t = F.pad(self.w[:2 * seq_len - 1], [0, seq_len]).repeat(seq_len) + t = t[..., :-seq_len].reshape(-1, seq_len, 3 * seq_len - 2) + r = (2 * seq_len - 1) // 2 + t = t[..., r:-r] + else: + a = rope(self.a.repeat(seq_len, 1), dim=0) + b = rope(self.b.repeat(k_len, 1), dim=0) + t = torch.bmm(a, b.permute(0, 2, 1)) + return t + + def _forward(self, inputs): + """GAU Forward function.""" + + if self.attn_type == 'self-attn': + x = inputs + else: + x, k, v = inputs + + x = self.ln(x) + + # [B, K, in_token_dims] -> [B, K, e + e + s] + uv = self.uv(x) + uv = self.act_fn(uv) + + if self.attn_type == 'self-attn': + # [B, K, e + e + s] -> [B, K, e], [B, K, e], [B, K, s] + u, v, base = torch.split(uv, [self.e, self.e, self.s], dim=2) + # [B, K, 1, s] * [1, 1, 2, s] + [2, s] -> [B, K, 2, s] + base = base.unsqueeze(2) * self.gamma[None, None, :] + self.beta + + if self.pos_enc: + base = rope(base, dim=1) + # [B, K, 2, s] -> [B, K, s], [B, K, s] + q, k = torch.unbind(base, dim=2) + + else: + # [B, K, e + s] -> [B, K, e], [B, K, s] + u, q = torch.split(uv, [self.e, self.s], dim=2) + + k = self.k_fc(k) # -> [B, K, s] + v = self.v_fc(v) # -> [B, K, e] + + if self.pos_enc: + q = rope(q, 1) + k = rope(k, 1) + + # [B, K, s].permute() -> [B, s, K] + # [B, K, s] x [B, s, K] -> [B, K, K] + qk = torch.bmm(q, k.permute(0, 2, 1)) + + if self.use_rel_bias: + if self.attn_type == 'self-attn': + bias = self.rel_pos_bias(q.size(1)) + else: + bias = self.rel_pos_bias(q.size(1), k.size(1)) + qk += bias[:, :q.size(1), :k.size(1)] + # [B, K, K] + kernel = torch.square(F.relu(qk / self.sqrt_s)) + + if self.dropout_rate > 0.: + kernel = self.dropout(kernel) + # [B, K, K] x [B, K, e] -> [B, K, e] + x = u * torch.bmm(kernel, v) + # [B, K, e] -> [B, K, out_token_dims] + x = self.o(x) + + return x + + def forward(self, x): + """Forward function.""" + + if self.shortcut: + if self.attn_type == 'cross-attn': + res_shortcut = x[0] + else: + res_shortcut = x + main_branch = self.drop_path(self._forward(x)) + return self.res_scale(res_shortcut) + main_branch + else: + return self.drop_path(self._forward(x)) diff --git a/mmpose/models/utils/transformer.py b/mmpose/models/utils/transformer.py index 103b9e9970..a2d5ec2022 100644 --- a/mmpose/models/utils/transformer.py +++ b/mmpose/models/utils/transformer.py @@ -1,369 +1,369 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import math -from typing import Sequence - -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import build_conv_layer, build_norm_layer -from mmengine.model import BaseModule -from mmengine.utils import to_2tuple - - -def nlc_to_nchw(x, hw_shape): - """Convert [N, L, C] shape tensor to [N, C, H, W] shape tensor. - - Args: - x (Tensor): The input tensor of shape [N, L, C] before conversion. - hw_shape (Sequence[int]): The height and width of output feature map. - - Returns: - Tensor: The output tensor of shape [N, C, H, W] after conversion. - """ - H, W = hw_shape - assert len(x.shape) == 3 - B, L, C = x.shape - assert L == H * W, 'The seq_len does not match H, W' - return x.transpose(1, 2).reshape(B, C, H, W).contiguous() - - -def nchw_to_nlc(x): - """Flatten [N, C, H, W] shape tensor to [N, L, C] shape tensor. - - Args: - x (Tensor): The input tensor of shape [N, C, H, W] before conversion. - - Returns: - Tensor: The output tensor of shape [N, L, C] after conversion. - """ - assert len(x.shape) == 4 - return x.flatten(2).transpose(1, 2).contiguous() - - -class AdaptivePadding(nn.Module): - """Applies padding to input (if needed) so that input can get fully covered - by filter you specified. It support two modes "same" and "corner". The - "same" mode is same with "SAME" padding mode in TensorFlow, pad zero around - input. The "corner" mode would pad zero to bottom right. - - Args: - kernel_size (int | tuple): Size of the kernel: - stride (int | tuple): Stride of the filter. Default: 1: - dilation (int | tuple): Spacing between kernel elements. - Default: 1 - padding (str): Support "same" and "corner", "corner" mode - would pad zero to bottom right, and "same" mode would - pad zero around input. Default: "corner". - Example: - >>> kernel_size = 16 - >>> stride = 16 - >>> dilation = 1 - >>> input = torch.rand(1, 1, 15, 17) - >>> adap_pad = AdaptivePadding( - >>> kernel_size=kernel_size, - >>> stride=stride, - >>> dilation=dilation, - >>> padding="corner") - >>> out = adap_pad(input) - >>> assert (out.shape[2], out.shape[3]) == (16, 32) - >>> input = torch.rand(1, 1, 16, 17) - >>> out = adap_pad(input) - >>> assert (out.shape[2], out.shape[3]) == (16, 32) - """ - - def __init__(self, kernel_size=1, stride=1, dilation=1, padding='corner'): - - super(AdaptivePadding, self).__init__() - - assert padding in ('same', 'corner') - - kernel_size = to_2tuple(kernel_size) - stride = to_2tuple(stride) - padding = to_2tuple(padding) - dilation = to_2tuple(dilation) - - self.padding = padding - self.kernel_size = kernel_size - self.stride = stride - self.dilation = dilation - - def get_pad_shape(self, input_shape): - """Get horizontal and vertical padding shapes.""" - - input_h, input_w = input_shape - kernel_h, kernel_w = self.kernel_size - stride_h, stride_w = self.stride - output_h = math.ceil(input_h / stride_h) - output_w = math.ceil(input_w / stride_w) - pad_h = max((output_h - 1) * stride_h + - (kernel_h - 1) * self.dilation[0] + 1 - input_h, 0) - pad_w = max((output_w - 1) * stride_w + - (kernel_w - 1) * self.dilation[1] + 1 - input_w, 0) - return pad_h, pad_w - - def forward(self, x): - """Forward function.""" - - pad_h, pad_w = self.get_pad_shape(x.size()[-2:]) - if pad_h > 0 or pad_w > 0: - if self.padding == 'corner': - x = F.pad(x, [0, pad_w, 0, pad_h]) - elif self.padding == 'same': - x = F.pad(x, [ - pad_w // 2, pad_w - pad_w // 2, pad_h // 2, - pad_h - pad_h // 2 - ]) - return x - - -class PatchEmbed(BaseModule): - """Image to Patch Embedding. - - We use a conv layer to implement PatchEmbed. - - Args: - in_channels (int): The num of input channels. Default: 3 - embed_dims (int): The dimensions of embedding. Default: 768 - conv_type (str): The config dict for embedding - conv layer type selection. Default: "Conv2d. - kernel_size (int): The kernel_size of embedding conv. Default: 16. - stride (int): The slide stride of embedding conv. - Default: None (Would be set as `kernel_size`). - padding (int | tuple | string ): The padding length of - embedding conv. When it is a string, it means the mode - of adaptive padding, support "same" and "corner" now. - Default: "corner". - dilation (int): The dilation rate of embedding conv. Default: 1. - bias (bool): Bias of embed conv. Default: True. - norm_cfg (dict, optional): Config dict for normalization layer. - Default: None. - input_size (int | tuple | None): The size of input, which will be - used to calculate the out size. Only work when `dynamic_size` - is False. Default: None. - init_cfg (`mmcv.ConfigDict`, optional): The Config for initialization. - Default: None. - """ - - def __init__( - self, - in_channels=3, - embed_dims=768, - conv_type='Conv2d', - kernel_size=16, - stride=16, - padding='corner', - dilation=1, - bias=True, - norm_cfg=None, - input_size=None, - init_cfg=None, - ): - super(PatchEmbed, self).__init__(init_cfg=init_cfg) - - self.embed_dims = embed_dims - if stride is None: - stride = kernel_size - - kernel_size = to_2tuple(kernel_size) - stride = to_2tuple(stride) - dilation = to_2tuple(dilation) - - if isinstance(padding, str): - self.adap_padding = AdaptivePadding( - kernel_size=kernel_size, - stride=stride, - dilation=dilation, - padding=padding) - # disable the padding of conv - padding = 0 - else: - self.adap_padding = None - padding = to_2tuple(padding) - - self.projection = build_conv_layer( - dict(type=conv_type), - in_channels=in_channels, - out_channels=embed_dims, - kernel_size=kernel_size, - stride=stride, - padding=padding, - dilation=dilation, - bias=bias) - - if norm_cfg is not None: - self.norm = build_norm_layer(norm_cfg, embed_dims)[1] - else: - self.norm = None - - if input_size: - input_size = to_2tuple(input_size) - # `init_out_size` would be used outside to - # calculate the num_patches - # when `use_abs_pos_embed` outside - self.init_input_size = input_size - if self.adap_padding: - pad_h, pad_w = self.adap_padding.get_pad_shape(input_size) - input_h, input_w = input_size - input_h = input_h + pad_h - input_w = input_w + pad_w - input_size = (input_h, input_w) - - # https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html - h_out = (input_size[0] + 2 * padding[0] - dilation[0] * - (kernel_size[0] - 1) - 1) // stride[0] + 1 - w_out = (input_size[1] + 2 * padding[1] - dilation[1] * - (kernel_size[1] - 1) - 1) // stride[1] + 1 - self.init_out_size = (h_out, w_out) - else: - self.init_input_size = None - self.init_out_size = None - - def forward(self, x): - """ - Args: - x (Tensor): Has shape (B, C, H, W). In most case, C is 3. - - Returns: - tuple: Contains merged results and its spatial shape. - - - x (Tensor): Has shape (B, out_h * out_w, embed_dims) - - out_size (tuple[int]): Spatial shape of x, arrange as - (out_h, out_w). - """ - - if self.adap_padding: - x = self.adap_padding(x) - - x = self.projection(x) - out_size = (x.shape[2], x.shape[3]) - x = x.flatten(2).transpose(1, 2) - if self.norm is not None: - x = self.norm(x) - return x, out_size - - -class PatchMerging(BaseModule): - """Merge patch feature map. - - This layer groups feature map by kernel_size, and applies norm and linear - layers to the grouped feature map. Our implementation uses `nn.Unfold` to - merge patch, which is about 25% faster than original implementation. - Instead, we need to modify pretrained models for compatibility. - - Args: - in_channels (int): The num of input channels. - to gets fully covered by filter and stride you specified.. - Default: True. - out_channels (int): The num of output channels. - kernel_size (int | tuple, optional): the kernel size in the unfold - layer. Defaults to 2. - stride (int | tuple, optional): the stride of the sliding blocks in the - unfold layer. Default: None. (Would be set as `kernel_size`) - padding (int | tuple | string ): The padding length of - embedding conv. When it is a string, it means the mode - of adaptive padding, support "same" and "corner" now. - Default: "corner". - dilation (int | tuple, optional): dilation parameter in the unfold - layer. Default: 1. - bias (bool, optional): Whether to add bias in linear layer or not. - Defaults: False. - norm_cfg (dict, optional): Config dict for normalization layer. - Default: dict(type='LN'). - init_cfg (dict, optional): The extra config for initialization. - Default: None. - """ - - def __init__(self, - in_channels, - out_channels, - kernel_size=2, - stride=None, - padding='corner', - dilation=1, - bias=False, - norm_cfg=dict(type='LN'), - init_cfg=None): - super().__init__(init_cfg=init_cfg) - self.in_channels = in_channels - self.out_channels = out_channels - if stride: - stride = stride - else: - stride = kernel_size - - kernel_size = to_2tuple(kernel_size) - stride = to_2tuple(stride) - dilation = to_2tuple(dilation) - - if isinstance(padding, str): - self.adap_padding = AdaptivePadding( - kernel_size=kernel_size, - stride=stride, - dilation=dilation, - padding=padding) - # disable the padding of unfold - padding = 0 - else: - self.adap_padding = None - - padding = to_2tuple(padding) - self.sampler = nn.Unfold( - kernel_size=kernel_size, - dilation=dilation, - padding=padding, - stride=stride) - - sample_dim = kernel_size[0] * kernel_size[1] * in_channels - - if norm_cfg is not None: - self.norm = build_norm_layer(norm_cfg, sample_dim)[1] - else: - self.norm = None - - self.reduction = nn.Linear(sample_dim, out_channels, bias=bias) - - def forward(self, x, input_size): - """ - Args: - x (Tensor): Has shape (B, H*W, C_in). - input_size (tuple[int]): The spatial shape of x, arrange as (H, W). - Default: None. - - Returns: - tuple: Contains merged results and its spatial shape. - - - x (Tensor): Has shape (B, Merged_H * Merged_W, C_out) - - out_size (tuple[int]): Spatial shape of x, arrange as - (Merged_H, Merged_W). - """ - B, L, C = x.shape - assert isinstance(input_size, Sequence), f'Expect ' \ - f'input_size is ' \ - f'`Sequence` ' \ - f'but get {input_size}' - - H, W = input_size - assert L == H * W, 'input feature has wrong size' - - x = x.view(B, H, W, C).permute([0, 3, 1, 2]) # B, C, H, W - # Use nn.Unfold to merge patch. About 25% faster than original method, - # but need to modify pretrained model for compatibility - - if self.adap_padding: - x = self.adap_padding(x) - H, W = x.shape[-2:] - - x = self.sampler(x) - # if kernel_size=2 and stride=2, x should has shape (B, 4*C, H/2*W/2) - - out_h = (H + 2 * self.sampler.padding[0] - self.sampler.dilation[0] * - (self.sampler.kernel_size[0] - 1) - - 1) // self.sampler.stride[0] + 1 - out_w = (W + 2 * self.sampler.padding[1] - self.sampler.dilation[1] * - (self.sampler.kernel_size[1] - 1) - - 1) // self.sampler.stride[1] + 1 - - output_size = (out_h, out_w) - x = x.transpose(1, 2) # B, H/2*W/2, 4*C - x = self.norm(x) if self.norm else x - x = self.reduction(x) - return x, output_size +# Copyright (c) OpenMMLab. All rights reserved. +import math +from typing import Sequence + +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import build_conv_layer, build_norm_layer +from mmengine.model import BaseModule +from mmengine.utils import to_2tuple + + +def nlc_to_nchw(x, hw_shape): + """Convert [N, L, C] shape tensor to [N, C, H, W] shape tensor. + + Args: + x (Tensor): The input tensor of shape [N, L, C] before conversion. + hw_shape (Sequence[int]): The height and width of output feature map. + + Returns: + Tensor: The output tensor of shape [N, C, H, W] after conversion. + """ + H, W = hw_shape + assert len(x.shape) == 3 + B, L, C = x.shape + assert L == H * W, 'The seq_len does not match H, W' + return x.transpose(1, 2).reshape(B, C, H, W).contiguous() + + +def nchw_to_nlc(x): + """Flatten [N, C, H, W] shape tensor to [N, L, C] shape tensor. + + Args: + x (Tensor): The input tensor of shape [N, C, H, W] before conversion. + + Returns: + Tensor: The output tensor of shape [N, L, C] after conversion. + """ + assert len(x.shape) == 4 + return x.flatten(2).transpose(1, 2).contiguous() + + +class AdaptivePadding(nn.Module): + """Applies padding to input (if needed) so that input can get fully covered + by filter you specified. It support two modes "same" and "corner". The + "same" mode is same with "SAME" padding mode in TensorFlow, pad zero around + input. The "corner" mode would pad zero to bottom right. + + Args: + kernel_size (int | tuple): Size of the kernel: + stride (int | tuple): Stride of the filter. Default: 1: + dilation (int | tuple): Spacing between kernel elements. + Default: 1 + padding (str): Support "same" and "corner", "corner" mode + would pad zero to bottom right, and "same" mode would + pad zero around input. Default: "corner". + Example: + >>> kernel_size = 16 + >>> stride = 16 + >>> dilation = 1 + >>> input = torch.rand(1, 1, 15, 17) + >>> adap_pad = AdaptivePadding( + >>> kernel_size=kernel_size, + >>> stride=stride, + >>> dilation=dilation, + >>> padding="corner") + >>> out = adap_pad(input) + >>> assert (out.shape[2], out.shape[3]) == (16, 32) + >>> input = torch.rand(1, 1, 16, 17) + >>> out = adap_pad(input) + >>> assert (out.shape[2], out.shape[3]) == (16, 32) + """ + + def __init__(self, kernel_size=1, stride=1, dilation=1, padding='corner'): + + super(AdaptivePadding, self).__init__() + + assert padding in ('same', 'corner') + + kernel_size = to_2tuple(kernel_size) + stride = to_2tuple(stride) + padding = to_2tuple(padding) + dilation = to_2tuple(dilation) + + self.padding = padding + self.kernel_size = kernel_size + self.stride = stride + self.dilation = dilation + + def get_pad_shape(self, input_shape): + """Get horizontal and vertical padding shapes.""" + + input_h, input_w = input_shape + kernel_h, kernel_w = self.kernel_size + stride_h, stride_w = self.stride + output_h = math.ceil(input_h / stride_h) + output_w = math.ceil(input_w / stride_w) + pad_h = max((output_h - 1) * stride_h + + (kernel_h - 1) * self.dilation[0] + 1 - input_h, 0) + pad_w = max((output_w - 1) * stride_w + + (kernel_w - 1) * self.dilation[1] + 1 - input_w, 0) + return pad_h, pad_w + + def forward(self, x): + """Forward function.""" + + pad_h, pad_w = self.get_pad_shape(x.size()[-2:]) + if pad_h > 0 or pad_w > 0: + if self.padding == 'corner': + x = F.pad(x, [0, pad_w, 0, pad_h]) + elif self.padding == 'same': + x = F.pad(x, [ + pad_w // 2, pad_w - pad_w // 2, pad_h // 2, + pad_h - pad_h // 2 + ]) + return x + + +class PatchEmbed(BaseModule): + """Image to Patch Embedding. + + We use a conv layer to implement PatchEmbed. + + Args: + in_channels (int): The num of input channels. Default: 3 + embed_dims (int): The dimensions of embedding. Default: 768 + conv_type (str): The config dict for embedding + conv layer type selection. Default: "Conv2d. + kernel_size (int): The kernel_size of embedding conv. Default: 16. + stride (int): The slide stride of embedding conv. + Default: None (Would be set as `kernel_size`). + padding (int | tuple | string ): The padding length of + embedding conv. When it is a string, it means the mode + of adaptive padding, support "same" and "corner" now. + Default: "corner". + dilation (int): The dilation rate of embedding conv. Default: 1. + bias (bool): Bias of embed conv. Default: True. + norm_cfg (dict, optional): Config dict for normalization layer. + Default: None. + input_size (int | tuple | None): The size of input, which will be + used to calculate the out size. Only work when `dynamic_size` + is False. Default: None. + init_cfg (`mmcv.ConfigDict`, optional): The Config for initialization. + Default: None. + """ + + def __init__( + self, + in_channels=3, + embed_dims=768, + conv_type='Conv2d', + kernel_size=16, + stride=16, + padding='corner', + dilation=1, + bias=True, + norm_cfg=None, + input_size=None, + init_cfg=None, + ): + super(PatchEmbed, self).__init__(init_cfg=init_cfg) + + self.embed_dims = embed_dims + if stride is None: + stride = kernel_size + + kernel_size = to_2tuple(kernel_size) + stride = to_2tuple(stride) + dilation = to_2tuple(dilation) + + if isinstance(padding, str): + self.adap_padding = AdaptivePadding( + kernel_size=kernel_size, + stride=stride, + dilation=dilation, + padding=padding) + # disable the padding of conv + padding = 0 + else: + self.adap_padding = None + padding = to_2tuple(padding) + + self.projection = build_conv_layer( + dict(type=conv_type), + in_channels=in_channels, + out_channels=embed_dims, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + bias=bias) + + if norm_cfg is not None: + self.norm = build_norm_layer(norm_cfg, embed_dims)[1] + else: + self.norm = None + + if input_size: + input_size = to_2tuple(input_size) + # `init_out_size` would be used outside to + # calculate the num_patches + # when `use_abs_pos_embed` outside + self.init_input_size = input_size + if self.adap_padding: + pad_h, pad_w = self.adap_padding.get_pad_shape(input_size) + input_h, input_w = input_size + input_h = input_h + pad_h + input_w = input_w + pad_w + input_size = (input_h, input_w) + + # https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html + h_out = (input_size[0] + 2 * padding[0] - dilation[0] * + (kernel_size[0] - 1) - 1) // stride[0] + 1 + w_out = (input_size[1] + 2 * padding[1] - dilation[1] * + (kernel_size[1] - 1) - 1) // stride[1] + 1 + self.init_out_size = (h_out, w_out) + else: + self.init_input_size = None + self.init_out_size = None + + def forward(self, x): + """ + Args: + x (Tensor): Has shape (B, C, H, W). In most case, C is 3. + + Returns: + tuple: Contains merged results and its spatial shape. + + - x (Tensor): Has shape (B, out_h * out_w, embed_dims) + - out_size (tuple[int]): Spatial shape of x, arrange as + (out_h, out_w). + """ + + if self.adap_padding: + x = self.adap_padding(x) + + x = self.projection(x) + out_size = (x.shape[2], x.shape[3]) + x = x.flatten(2).transpose(1, 2) + if self.norm is not None: + x = self.norm(x) + return x, out_size + + +class PatchMerging(BaseModule): + """Merge patch feature map. + + This layer groups feature map by kernel_size, and applies norm and linear + layers to the grouped feature map. Our implementation uses `nn.Unfold` to + merge patch, which is about 25% faster than original implementation. + Instead, we need to modify pretrained models for compatibility. + + Args: + in_channels (int): The num of input channels. + to gets fully covered by filter and stride you specified.. + Default: True. + out_channels (int): The num of output channels. + kernel_size (int | tuple, optional): the kernel size in the unfold + layer. Defaults to 2. + stride (int | tuple, optional): the stride of the sliding blocks in the + unfold layer. Default: None. (Would be set as `kernel_size`) + padding (int | tuple | string ): The padding length of + embedding conv. When it is a string, it means the mode + of adaptive padding, support "same" and "corner" now. + Default: "corner". + dilation (int | tuple, optional): dilation parameter in the unfold + layer. Default: 1. + bias (bool, optional): Whether to add bias in linear layer or not. + Defaults: False. + norm_cfg (dict, optional): Config dict for normalization layer. + Default: dict(type='LN'). + init_cfg (dict, optional): The extra config for initialization. + Default: None. + """ + + def __init__(self, + in_channels, + out_channels, + kernel_size=2, + stride=None, + padding='corner', + dilation=1, + bias=False, + norm_cfg=dict(type='LN'), + init_cfg=None): + super().__init__(init_cfg=init_cfg) + self.in_channels = in_channels + self.out_channels = out_channels + if stride: + stride = stride + else: + stride = kernel_size + + kernel_size = to_2tuple(kernel_size) + stride = to_2tuple(stride) + dilation = to_2tuple(dilation) + + if isinstance(padding, str): + self.adap_padding = AdaptivePadding( + kernel_size=kernel_size, + stride=stride, + dilation=dilation, + padding=padding) + # disable the padding of unfold + padding = 0 + else: + self.adap_padding = None + + padding = to_2tuple(padding) + self.sampler = nn.Unfold( + kernel_size=kernel_size, + dilation=dilation, + padding=padding, + stride=stride) + + sample_dim = kernel_size[0] * kernel_size[1] * in_channels + + if norm_cfg is not None: + self.norm = build_norm_layer(norm_cfg, sample_dim)[1] + else: + self.norm = None + + self.reduction = nn.Linear(sample_dim, out_channels, bias=bias) + + def forward(self, x, input_size): + """ + Args: + x (Tensor): Has shape (B, H*W, C_in). + input_size (tuple[int]): The spatial shape of x, arrange as (H, W). + Default: None. + + Returns: + tuple: Contains merged results and its spatial shape. + + - x (Tensor): Has shape (B, Merged_H * Merged_W, C_out) + - out_size (tuple[int]): Spatial shape of x, arrange as + (Merged_H, Merged_W). + """ + B, L, C = x.shape + assert isinstance(input_size, Sequence), f'Expect ' \ + f'input_size is ' \ + f'`Sequence` ' \ + f'but get {input_size}' + + H, W = input_size + assert L == H * W, 'input feature has wrong size' + + x = x.view(B, H, W, C).permute([0, 3, 1, 2]) # B, C, H, W + # Use nn.Unfold to merge patch. About 25% faster than original method, + # but need to modify pretrained model for compatibility + + if self.adap_padding: + x = self.adap_padding(x) + H, W = x.shape[-2:] + + x = self.sampler(x) + # if kernel_size=2 and stride=2, x should has shape (B, 4*C, H/2*W/2) + + out_h = (H + 2 * self.sampler.padding[0] - self.sampler.dilation[0] * + (self.sampler.kernel_size[0] - 1) - + 1) // self.sampler.stride[0] + 1 + out_w = (W + 2 * self.sampler.padding[1] - self.sampler.dilation[1] * + (self.sampler.kernel_size[1] - 1) - + 1) // self.sampler.stride[1] + 1 + + output_size = (out_h, out_w) + x = x.transpose(1, 2) # B, H/2*W/2, 4*C + x = self.norm(x) if self.norm else x + x = self.reduction(x) + return x, output_size diff --git a/mmpose/models/utils/tta.py b/mmpose/models/utils/tta.py index 41d2f2fd47..77dbdd2dae 100644 --- a/mmpose/models/utils/tta.py +++ b/mmpose/models/utils/tta.py @@ -1,183 +1,183 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import List, Optional, Tuple - -import torch -import torch.nn.functional as F -from torch import Tensor - - -def flip_heatmaps(heatmaps: Tensor, - flip_indices: Optional[List[int]] = None, - flip_mode: str = 'heatmap', - shift_heatmap: bool = True): - """Flip heatmaps for test-time augmentation. - - Args: - heatmaps (Tensor): The heatmaps to flip. Should be a tensor in shape - [B, C, H, W] - flip_indices (List[int]): The indices of each keypoint's symmetric - keypoint. Defaults to ``None`` - flip_mode (str): Specify the flipping mode. Options are: - - - ``'heatmap'``: horizontally flip the heatmaps and swap heatmaps - of symmetric keypoints according to ``flip_indices`` - - ``'udp_combined'``: similar to ``'heatmap'`` mode but further - flip the x_offset values - - ``'offset'``: horizontally flip the offset fields and swap - heatmaps of symmetric keypoints according to - ``flip_indices``. x_offset values are also reversed - shift_heatmap (bool): Shift the flipped heatmaps to align with the - original heatmaps and improve accuracy. Defaults to ``True`` - - Returns: - Tensor: flipped heatmaps in shape [B, C, H, W] - """ - - if flip_mode == 'heatmap': - heatmaps = heatmaps.flip(-1) - if flip_indices is not None: - assert len(flip_indices) == heatmaps.shape[1] - heatmaps = heatmaps[:, flip_indices] - elif flip_mode == 'udp_combined': - B, C, H, W = heatmaps.shape - heatmaps = heatmaps.view(B, C // 3, 3, H, W) - heatmaps = heatmaps.flip(-1) - if flip_indices is not None: - assert len(flip_indices) == C // 3 - heatmaps = heatmaps[:, flip_indices] - heatmaps[:, :, 1] = -heatmaps[:, :, 1] - heatmaps = heatmaps.view(B, C, H, W) - - elif flip_mode == 'offset': - B, C, H, W = heatmaps.shape - heatmaps = heatmaps.view(B, C // 2, -1, H, W) - heatmaps = heatmaps.flip(-1) - if flip_indices is not None: - assert len(flip_indices) == C // 2 - heatmaps = heatmaps[:, flip_indices] - heatmaps[:, :, 0] = -heatmaps[:, :, 0] - heatmaps = heatmaps.view(B, C, H, W) - - else: - raise ValueError(f'Invalid flip_mode value "{flip_mode}"') - - if shift_heatmap: - # clone data to avoid unexpected in-place operation when using CPU - heatmaps[..., 1:] = heatmaps[..., :-1].clone() - - return heatmaps - - -def flip_vectors(x_labels: Tensor, y_labels: Tensor, flip_indices: List[int]): - """Flip instance-level labels in specific axis for test-time augmentation. - - Args: - x_labels (Tensor): The vector labels in x-axis to flip. Should be - a tensor in shape [B, C, Wx] - y_labels (Tensor): The vector labels in y-axis to flip. Should be - a tensor in shape [B, C, Wy] - flip_indices (List[int]): The indices of each keypoint's symmetric - keypoint - """ - assert x_labels.ndim == 3 and y_labels.ndim == 3 - assert len(flip_indices) == x_labels.shape[1] and len( - flip_indices) == y_labels.shape[1] - x_labels = x_labels[:, flip_indices].flip(-1) - y_labels = y_labels[:, flip_indices] - - return x_labels, y_labels - - -def flip_coordinates(coords: Tensor, flip_indices: List[int], - shift_coords: bool, input_size: Tuple[int, int]): - """Flip normalized coordinates for test-time augmentation. - - Args: - coords (Tensor): The coordinates to flip. Should be a tensor in shape - [B, K, D] - flip_indices (List[int]): The indices of each keypoint's symmetric - keypoint - shift_coords (bool): Shift the flipped coordinates to align with the - original coordinates and improve accuracy. Defaults to ``True`` - input_size (Tuple[int, int]): The size of input image in [w, h] - """ - assert coords.ndim == 3 - assert len(flip_indices) == coords.shape[1] - - coords[:, :, 0] = 1.0 - coords[:, :, 0] - - if shift_coords: - img_width = input_size[0] - coords[:, :, 0] -= 1.0 / img_width - - coords = coords[:, flip_indices] - return coords - - -def flip_visibility(vis: Tensor, flip_indices: List[int]): - """Flip keypoints visibility for test-time augmentation. - - Args: - vis (Tensor): The keypoints visibility to flip. Should be a tensor - in shape [B, K] - flip_indices (List[int]): The indices of each keypoint's symmetric - keypoint - """ - assert vis.ndim == 2 - - vis = vis[:, flip_indices] - return vis - - -def aggregate_heatmaps(heatmaps: List[Tensor], - size: Optional[Tuple[int, int]], - align_corners: bool = False, - mode: str = 'average'): - """Aggregate multiple heatmaps. - - Args: - heatmaps (List[Tensor]): Multiple heatmaps to aggregate. Each should - be in shape (B, C, H, W) - size (Tuple[int, int], optional): The target size in (w, h). All - heatmaps will be resized to the target size. If not given, the - first heatmap tensor's width and height will be used as the target - size. Defaults to ``None`` - align_corners (bool): Whether align corners when resizing heatmaps. - Defaults to ``False`` - mode (str): Aggregation mode in one of the following: - - - ``'average'``: Get average of heatmaps. All heatmaps mush have - the same channel number - - ``'concat'``: Concate the heatmaps at the channel dim - """ - - if mode not in {'average', 'concat'}: - raise ValueError(f'Invalid aggregation mode `{mode}`') - - if size is None: - h, w = heatmaps[0].shape[2:4] - else: - w, h = size - - for i, _heatmaps in enumerate(heatmaps): - assert _heatmaps.ndim == 4 - if mode == 'average': - assert _heatmaps.shape[:2] == heatmaps[0].shape[:2] - else: - assert _heatmaps.shape[0] == heatmaps[0].shape[0] - - if _heatmaps.shape[2:4] != (h, w): - heatmaps[i] = F.interpolate( - _heatmaps, - size=(h, w), - mode='bilinear', - align_corners=align_corners) - - if mode == 'average': - output = sum(heatmaps).div(len(heatmaps)) - elif mode == 'concat': - output = torch.cat(heatmaps, dim=1) - else: - raise ValueError() - - return output +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional, Tuple + +import torch +import torch.nn.functional as F +from torch import Tensor + + +def flip_heatmaps(heatmaps: Tensor, + flip_indices: Optional[List[int]] = None, + flip_mode: str = 'heatmap', + shift_heatmap: bool = True): + """Flip heatmaps for test-time augmentation. + + Args: + heatmaps (Tensor): The heatmaps to flip. Should be a tensor in shape + [B, C, H, W] + flip_indices (List[int]): The indices of each keypoint's symmetric + keypoint. Defaults to ``None`` + flip_mode (str): Specify the flipping mode. Options are: + + - ``'heatmap'``: horizontally flip the heatmaps and swap heatmaps + of symmetric keypoints according to ``flip_indices`` + - ``'udp_combined'``: similar to ``'heatmap'`` mode but further + flip the x_offset values + - ``'offset'``: horizontally flip the offset fields and swap + heatmaps of symmetric keypoints according to + ``flip_indices``. x_offset values are also reversed + shift_heatmap (bool): Shift the flipped heatmaps to align with the + original heatmaps and improve accuracy. Defaults to ``True`` + + Returns: + Tensor: flipped heatmaps in shape [B, C, H, W] + """ + + if flip_mode == 'heatmap': + heatmaps = heatmaps.flip(-1) + if flip_indices is not None: + assert len(flip_indices) == heatmaps.shape[1] + heatmaps = heatmaps[:, flip_indices] + elif flip_mode == 'udp_combined': + B, C, H, W = heatmaps.shape + heatmaps = heatmaps.view(B, C // 3, 3, H, W) + heatmaps = heatmaps.flip(-1) + if flip_indices is not None: + assert len(flip_indices) == C // 3 + heatmaps = heatmaps[:, flip_indices] + heatmaps[:, :, 1] = -heatmaps[:, :, 1] + heatmaps = heatmaps.view(B, C, H, W) + + elif flip_mode == 'offset': + B, C, H, W = heatmaps.shape + heatmaps = heatmaps.view(B, C // 2, -1, H, W) + heatmaps = heatmaps.flip(-1) + if flip_indices is not None: + assert len(flip_indices) == C // 2 + heatmaps = heatmaps[:, flip_indices] + heatmaps[:, :, 0] = -heatmaps[:, :, 0] + heatmaps = heatmaps.view(B, C, H, W) + + else: + raise ValueError(f'Invalid flip_mode value "{flip_mode}"') + + if shift_heatmap: + # clone data to avoid unexpected in-place operation when using CPU + heatmaps[..., 1:] = heatmaps[..., :-1].clone() + + return heatmaps + + +def flip_vectors(x_labels: Tensor, y_labels: Tensor, flip_indices: List[int]): + """Flip instance-level labels in specific axis for test-time augmentation. + + Args: + x_labels (Tensor): The vector labels in x-axis to flip. Should be + a tensor in shape [B, C, Wx] + y_labels (Tensor): The vector labels in y-axis to flip. Should be + a tensor in shape [B, C, Wy] + flip_indices (List[int]): The indices of each keypoint's symmetric + keypoint + """ + assert x_labels.ndim == 3 and y_labels.ndim == 3 + assert len(flip_indices) == x_labels.shape[1] and len( + flip_indices) == y_labels.shape[1] + x_labels = x_labels[:, flip_indices].flip(-1) + y_labels = y_labels[:, flip_indices] + + return x_labels, y_labels + + +def flip_coordinates(coords: Tensor, flip_indices: List[int], + shift_coords: bool, input_size: Tuple[int, int]): + """Flip normalized coordinates for test-time augmentation. + + Args: + coords (Tensor): The coordinates to flip. Should be a tensor in shape + [B, K, D] + flip_indices (List[int]): The indices of each keypoint's symmetric + keypoint + shift_coords (bool): Shift the flipped coordinates to align with the + original coordinates and improve accuracy. Defaults to ``True`` + input_size (Tuple[int, int]): The size of input image in [w, h] + """ + assert coords.ndim == 3 + assert len(flip_indices) == coords.shape[1] + + coords[:, :, 0] = 1.0 - coords[:, :, 0] + + if shift_coords: + img_width = input_size[0] + coords[:, :, 0] -= 1.0 / img_width + + coords = coords[:, flip_indices] + return coords + + +def flip_visibility(vis: Tensor, flip_indices: List[int]): + """Flip keypoints visibility for test-time augmentation. + + Args: + vis (Tensor): The keypoints visibility to flip. Should be a tensor + in shape [B, K] + flip_indices (List[int]): The indices of each keypoint's symmetric + keypoint + """ + assert vis.ndim == 2 + + vis = vis[:, flip_indices] + return vis + + +def aggregate_heatmaps(heatmaps: List[Tensor], + size: Optional[Tuple[int, int]], + align_corners: bool = False, + mode: str = 'average'): + """Aggregate multiple heatmaps. + + Args: + heatmaps (List[Tensor]): Multiple heatmaps to aggregate. Each should + be in shape (B, C, H, W) + size (Tuple[int, int], optional): The target size in (w, h). All + heatmaps will be resized to the target size. If not given, the + first heatmap tensor's width and height will be used as the target + size. Defaults to ``None`` + align_corners (bool): Whether align corners when resizing heatmaps. + Defaults to ``False`` + mode (str): Aggregation mode in one of the following: + + - ``'average'``: Get average of heatmaps. All heatmaps mush have + the same channel number + - ``'concat'``: Concate the heatmaps at the channel dim + """ + + if mode not in {'average', 'concat'}: + raise ValueError(f'Invalid aggregation mode `{mode}`') + + if size is None: + h, w = heatmaps[0].shape[2:4] + else: + w, h = size + + for i, _heatmaps in enumerate(heatmaps): + assert _heatmaps.ndim == 4 + if mode == 'average': + assert _heatmaps.shape[:2] == heatmaps[0].shape[:2] + else: + assert _heatmaps.shape[0] == heatmaps[0].shape[0] + + if _heatmaps.shape[2:4] != (h, w): + heatmaps[i] = F.interpolate( + _heatmaps, + size=(h, w), + mode='bilinear', + align_corners=align_corners) + + if mode == 'average': + output = sum(heatmaps).div(len(heatmaps)) + elif mode == 'concat': + output = torch.cat(heatmaps, dim=1) + else: + raise ValueError() + + return output diff --git a/mmpose/registry.py b/mmpose/registry.py index e3b8d17c4c..cd53e25346 100644 --- a/mmpose/registry.py +++ b/mmpose/registry.py @@ -1,132 +1,132 @@ -# Copyright (c) OpenMMLab. All rights reserved. -"""MMPose provides following registry nodes to support using modules across -projects. - -Each node is a child of the root registry in MMEngine. -More details can be found at -https://mmengine.readthedocs.io/en/latest/tutorials/registry.html. -""" - -from mmengine.registry import DATA_SAMPLERS as MMENGINE_DATA_SAMPLERS -from mmengine.registry import DATASETS as MMENGINE_DATASETS -from mmengine.registry import EVALUATOR as MMENGINE_EVALUATOR -from mmengine.registry import HOOKS as MMENGINE_HOOKS -from mmengine.registry import INFERENCERS as MMENGINE_INFERENCERS -from mmengine.registry import LOG_PROCESSORS as MMENGINE_LOG_PROCESSORS -from mmengine.registry import LOOPS as MMENGINE_LOOPS -from mmengine.registry import METRICS as MMENGINE_METRICS -from mmengine.registry import MODEL_WRAPPERS as MMENGINE_MODEL_WRAPPERS -from mmengine.registry import MODELS as MMENGINE_MODELS -from mmengine.registry import \ - OPTIM_WRAPPER_CONSTRUCTORS as MMENGINE_OPTIM_WRAPPER_CONSTRUCTORS -from mmengine.registry import OPTIM_WRAPPERS as MMENGINE_OPTIM_WRAPPERS -from mmengine.registry import OPTIMIZERS as MMENGINE_OPTIMIZERS -from mmengine.registry import PARAM_SCHEDULERS as MMENGINE_PARAM_SCHEDULERS -from mmengine.registry import \ - RUNNER_CONSTRUCTORS as MMENGINE_RUNNER_CONSTRUCTORS -from mmengine.registry import RUNNERS as MMENGINE_RUNNERS -from mmengine.registry import TASK_UTILS as MMENGINE_TASK_UTILS -from mmengine.registry import TRANSFORMS as MMENGINE_TRANSFORMS -from mmengine.registry import VISBACKENDS as MMENGINE_VISBACKENDS -from mmengine.registry import VISUALIZERS as MMENGINE_VISUALIZERS -from mmengine.registry import \ - WEIGHT_INITIALIZERS as MMENGINE_WEIGHT_INITIALIZERS -from mmengine.registry import Registry - -# Registries For Runner and the related -# manage all kinds of runners like `EpochBasedRunner` and `IterBasedRunner` -RUNNERS = Registry('runner', parent=MMENGINE_RUNNERS) -# manage runner constructors that define how to initialize runners -RUNNER_CONSTRUCTORS = Registry( - 'runner constructor', parent=MMENGINE_RUNNER_CONSTRUCTORS) -# manage all kinds of loops like `EpochBasedTrainLoop` -LOOPS = Registry('loop', parent=MMENGINE_LOOPS) -# manage all kinds of hooks like `CheckpointHook` -HOOKS = Registry( - 'hook', parent=MMENGINE_HOOKS, locations=['mmpose.engine.hooks']) - -# Registries For Data and the related -# manage data-related modules -DATASETS = Registry( - 'dataset', parent=MMENGINE_DATASETS, locations=['mmpose.datasets']) -DATA_SAMPLERS = Registry( - 'data sampler', - parent=MMENGINE_DATA_SAMPLERS, - locations=['mmpose.datasets.samplers']) -TRANSFORMS = Registry( - 'transform', - parent=MMENGINE_TRANSFORMS, - locations=['mmpose.datasets.transforms']) - -# manage all kinds of modules inheriting `nn.Module` -MODELS = Registry('model', parent=MMENGINE_MODELS, locations=['mmpose.models']) -# manage all kinds of model wrappers like 'MMDistributedDataParallel' -MODEL_WRAPPERS = Registry( - 'model_wrapper', - parent=MMENGINE_MODEL_WRAPPERS, - locations=['mmpose.models']) -# manage all kinds of weight initialization modules like `Uniform` -WEIGHT_INITIALIZERS = Registry( - 'weight initializer', - parent=MMENGINE_WEIGHT_INITIALIZERS, - locations=['mmpose.models']) -# manage all kinds of batch augmentations like Mixup and CutMix. -BATCH_AUGMENTS = Registry('batch augment', locations=['mmpose.models']) - -# Registries For Optimizer and the related -# manage all kinds of optimizers like `SGD` and `Adam` -OPTIMIZERS = Registry( - 'optimizer', parent=MMENGINE_OPTIMIZERS, locations=['mmpose.engine']) -# manage optimizer wrapper -OPTIM_WRAPPERS = Registry( - 'optimizer_wrapper', - parent=MMENGINE_OPTIM_WRAPPERS, - locations=['mmpose.engine']) -# manage constructors that customize the optimization hyperparameters. -OPTIM_WRAPPER_CONSTRUCTORS = Registry( - 'optimizer wrapper constructor', - parent=MMENGINE_OPTIM_WRAPPER_CONSTRUCTORS, - locations=['mmpose.engine.optim_wrappers']) -# manage all kinds of parameter schedulers like `MultiStepLR` -PARAM_SCHEDULERS = Registry( - 'parameter scheduler', - parent=MMENGINE_PARAM_SCHEDULERS, - locations=['mmpose.engine']) - -# manage all kinds of metrics -METRICS = Registry( - 'metric', parent=MMENGINE_METRICS, locations=['mmpose.evaluation.metrics']) -# manage all kinds of evaluators -EVALUATORS = Registry( - 'evaluator', parent=MMENGINE_EVALUATOR, locations=['mmpose.evaluation']) - -# manage task-specific modules like anchor generators and box coders -TASK_UTILS = Registry( - 'task util', parent=MMENGINE_TASK_UTILS, locations=['mmpose.models']) - -# Registries For Visualizer and the related -# manage visualizer -VISUALIZERS = Registry( - 'visualizer', - parent=MMENGINE_VISUALIZERS, - locations=['mmpose.visualization']) -# manage visualizer backend -VISBACKENDS = Registry( - 'vis_backend', - parent=MMENGINE_VISBACKENDS, - locations=['mmpose.visualization']) - -# manage all kinds log processors -LOG_PROCESSORS = Registry( - 'log processor', - parent=MMENGINE_LOG_PROCESSORS, - locations=['mmpose.visualization']) - -# manager keypoint encoder/decoder -KEYPOINT_CODECS = Registry('KEYPOINT_CODECS', locations=['mmpose.codecs']) - -# manage inferencer -INFERENCERS = Registry( - 'inferencer', - parent=MMENGINE_INFERENCERS, - locations=['mmpose.apis.inferencers']) +# Copyright (c) OpenMMLab. All rights reserved. +"""MMPose provides following registry nodes to support using modules across +projects. + +Each node is a child of the root registry in MMEngine. +More details can be found at +https://mmengine.readthedocs.io/en/latest/tutorials/registry.html. +""" + +from mmengine.registry import DATA_SAMPLERS as MMENGINE_DATA_SAMPLERS +from mmengine.registry import DATASETS as MMENGINE_DATASETS +from mmengine.registry import EVALUATOR as MMENGINE_EVALUATOR +from mmengine.registry import HOOKS as MMENGINE_HOOKS +from mmengine.registry import INFERENCERS as MMENGINE_INFERENCERS +from mmengine.registry import LOG_PROCESSORS as MMENGINE_LOG_PROCESSORS +from mmengine.registry import LOOPS as MMENGINE_LOOPS +from mmengine.registry import METRICS as MMENGINE_METRICS +from mmengine.registry import MODEL_WRAPPERS as MMENGINE_MODEL_WRAPPERS +from mmengine.registry import MODELS as MMENGINE_MODELS +from mmengine.registry import \ + OPTIM_WRAPPER_CONSTRUCTORS as MMENGINE_OPTIM_WRAPPER_CONSTRUCTORS +from mmengine.registry import OPTIM_WRAPPERS as MMENGINE_OPTIM_WRAPPERS +from mmengine.registry import OPTIMIZERS as MMENGINE_OPTIMIZERS +from mmengine.registry import PARAM_SCHEDULERS as MMENGINE_PARAM_SCHEDULERS +from mmengine.registry import \ + RUNNER_CONSTRUCTORS as MMENGINE_RUNNER_CONSTRUCTORS +from mmengine.registry import RUNNERS as MMENGINE_RUNNERS +from mmengine.registry import TASK_UTILS as MMENGINE_TASK_UTILS +from mmengine.registry import TRANSFORMS as MMENGINE_TRANSFORMS +from mmengine.registry import VISBACKENDS as MMENGINE_VISBACKENDS +from mmengine.registry import VISUALIZERS as MMENGINE_VISUALIZERS +from mmengine.registry import \ + WEIGHT_INITIALIZERS as MMENGINE_WEIGHT_INITIALIZERS +from mmengine.registry import Registry + +# Registries For Runner and the related +# manage all kinds of runners like `EpochBasedRunner` and `IterBasedRunner` +RUNNERS = Registry('runner', parent=MMENGINE_RUNNERS) +# manage runner constructors that define how to initialize runners +RUNNER_CONSTRUCTORS = Registry( + 'runner constructor', parent=MMENGINE_RUNNER_CONSTRUCTORS) +# manage all kinds of loops like `EpochBasedTrainLoop` +LOOPS = Registry('loop', parent=MMENGINE_LOOPS) +# manage all kinds of hooks like `CheckpointHook` +HOOKS = Registry( + 'hook', parent=MMENGINE_HOOKS, locations=['mmpose.engine.hooks']) + +# Registries For Data and the related +# manage data-related modules +DATASETS = Registry( + 'dataset', parent=MMENGINE_DATASETS, locations=['mmpose.datasets']) +DATA_SAMPLERS = Registry( + 'data sampler', + parent=MMENGINE_DATA_SAMPLERS, + locations=['mmpose.datasets.samplers']) +TRANSFORMS = Registry( + 'transform', + parent=MMENGINE_TRANSFORMS, + locations=['mmpose.datasets.transforms']) + +# manage all kinds of modules inheriting `nn.Module` +MODELS = Registry('model', parent=MMENGINE_MODELS, locations=['mmpose.models']) +# manage all kinds of model wrappers like 'MMDistributedDataParallel' +MODEL_WRAPPERS = Registry( + 'model_wrapper', + parent=MMENGINE_MODEL_WRAPPERS, + locations=['mmpose.models']) +# manage all kinds of weight initialization modules like `Uniform` +WEIGHT_INITIALIZERS = Registry( + 'weight initializer', + parent=MMENGINE_WEIGHT_INITIALIZERS, + locations=['mmpose.models']) +# manage all kinds of batch augmentations like Mixup and CutMix. +BATCH_AUGMENTS = Registry('batch augment', locations=['mmpose.models']) + +# Registries For Optimizer and the related +# manage all kinds of optimizers like `SGD` and `Adam` +OPTIMIZERS = Registry( + 'optimizer', parent=MMENGINE_OPTIMIZERS, locations=['mmpose.engine']) +# manage optimizer wrapper +OPTIM_WRAPPERS = Registry( + 'optimizer_wrapper', + parent=MMENGINE_OPTIM_WRAPPERS, + locations=['mmpose.engine']) +# manage constructors that customize the optimization hyperparameters. +OPTIM_WRAPPER_CONSTRUCTORS = Registry( + 'optimizer wrapper constructor', + parent=MMENGINE_OPTIM_WRAPPER_CONSTRUCTORS, + locations=['mmpose.engine.optim_wrappers']) +# manage all kinds of parameter schedulers like `MultiStepLR` +PARAM_SCHEDULERS = Registry( + 'parameter scheduler', + parent=MMENGINE_PARAM_SCHEDULERS, + locations=['mmpose.engine']) + +# manage all kinds of metrics +METRICS = Registry( + 'metric', parent=MMENGINE_METRICS, locations=['mmpose.evaluation.metrics']) +# manage all kinds of evaluators +EVALUATORS = Registry( + 'evaluator', parent=MMENGINE_EVALUATOR, locations=['mmpose.evaluation']) + +# manage task-specific modules like anchor generators and box coders +TASK_UTILS = Registry( + 'task util', parent=MMENGINE_TASK_UTILS, locations=['mmpose.models']) + +# Registries For Visualizer and the related +# manage visualizer +VISUALIZERS = Registry( + 'visualizer', + parent=MMENGINE_VISUALIZERS, + locations=['mmpose.visualization']) +# manage visualizer backend +VISBACKENDS = Registry( + 'vis_backend', + parent=MMENGINE_VISBACKENDS, + locations=['mmpose.visualization']) + +# manage all kinds log processors +LOG_PROCESSORS = Registry( + 'log processor', + parent=MMENGINE_LOG_PROCESSORS, + locations=['mmpose.visualization']) + +# manager keypoint encoder/decoder +KEYPOINT_CODECS = Registry('KEYPOINT_CODECS', locations=['mmpose.codecs']) + +# manage inferencer +INFERENCERS = Registry( + 'inferencer', + parent=MMENGINE_INFERENCERS, + locations=['mmpose.apis.inferencers']) diff --git a/mmpose/structures/__init__.py b/mmpose/structures/__init__.py index e4384af1cd..8b326f985f 100644 --- a/mmpose/structures/__init__.py +++ b/mmpose/structures/__init__.py @@ -1,15 +1,15 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .bbox import (bbox_cs2xywh, bbox_cs2xyxy, bbox_xywh2cs, bbox_xywh2xyxy, - bbox_xyxy2cs, bbox_xyxy2xywh, flip_bbox, - get_udp_warp_matrix, get_warp_matrix) -from .keypoint import flip_keypoints -from .multilevel_pixel_data import MultilevelPixelData -from .pose_data_sample import PoseDataSample -from .utils import merge_data_samples, revert_heatmap, split_instances - -__all__ = [ - 'PoseDataSample', 'MultilevelPixelData', 'bbox_cs2xywh', 'bbox_cs2xyxy', - 'bbox_xywh2cs', 'bbox_xywh2xyxy', 'bbox_xyxy2cs', 'bbox_xyxy2xywh', - 'flip_bbox', 'get_udp_warp_matrix', 'get_warp_matrix', 'flip_keypoints', - 'merge_data_samples', 'revert_heatmap', 'split_instances' -] +# Copyright (c) OpenMMLab. All rights reserved. +from .bbox import (bbox_cs2xywh, bbox_cs2xyxy, bbox_xywh2cs, bbox_xywh2xyxy, + bbox_xyxy2cs, bbox_xyxy2xywh, flip_bbox, + get_udp_warp_matrix, get_warp_matrix) +from .keypoint import flip_keypoints +from .multilevel_pixel_data import MultilevelPixelData +from .pose_data_sample import PoseDataSample +from .utils import merge_data_samples, revert_heatmap, split_instances + +__all__ = [ + 'PoseDataSample', 'MultilevelPixelData', 'bbox_cs2xywh', 'bbox_cs2xyxy', + 'bbox_xywh2cs', 'bbox_xywh2xyxy', 'bbox_xyxy2cs', 'bbox_xyxy2xywh', + 'flip_bbox', 'get_udp_warp_matrix', 'get_warp_matrix', 'flip_keypoints', + 'merge_data_samples', 'revert_heatmap', 'split_instances' +] diff --git a/mmpose/structures/bbox/__init__.py b/mmpose/structures/bbox/__init__.py index a3e723918c..a91af7e9be 100644 --- a/mmpose/structures/bbox/__init__.py +++ b/mmpose/structures/bbox/__init__.py @@ -1,10 +1,10 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .transforms import (bbox_cs2xywh, bbox_cs2xyxy, bbox_xywh2cs, - bbox_xywh2xyxy, bbox_xyxy2cs, bbox_xyxy2xywh, - flip_bbox, get_udp_warp_matrix, get_warp_matrix) - -__all__ = [ - 'bbox_cs2xywh', 'bbox_cs2xyxy', 'bbox_xywh2cs', 'bbox_xywh2xyxy', - 'bbox_xyxy2cs', 'bbox_xyxy2xywh', 'flip_bbox', 'get_udp_warp_matrix', - 'get_warp_matrix' -] +# Copyright (c) OpenMMLab. All rights reserved. +from .transforms import (bbox_cs2xywh, bbox_cs2xyxy, bbox_xywh2cs, + bbox_xywh2xyxy, bbox_xyxy2cs, bbox_xyxy2xywh, + flip_bbox, get_udp_warp_matrix, get_warp_matrix) + +__all__ = [ + 'bbox_cs2xywh', 'bbox_cs2xyxy', 'bbox_xywh2cs', 'bbox_xywh2xyxy', + 'bbox_xyxy2cs', 'bbox_xyxy2xywh', 'flip_bbox', 'get_udp_warp_matrix', + 'get_warp_matrix' +] diff --git a/mmpose/structures/bbox/transforms.py b/mmpose/structures/bbox/transforms.py index c0c8e73395..3b89bb9664 100644 --- a/mmpose/structures/bbox/transforms.py +++ b/mmpose/structures/bbox/transforms.py @@ -1,361 +1,361 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import math -from typing import Tuple - -import cv2 -import numpy as np - - -def bbox_xyxy2xywh(bbox_xyxy: np.ndarray) -> np.ndarray: - """Transform the bbox format from x1y1x2y2 to xywh. - - Args: - bbox_xyxy (np.ndarray): Bounding boxes (with scores), shaped (n, 4) or - (n, 5). (left, top, right, bottom, [score]) - - Returns: - np.ndarray: Bounding boxes (with scores), - shaped (n, 4) or (n, 5). (left, top, width, height, [score]) - """ - bbox_xywh = bbox_xyxy.copy() - bbox_xywh[:, 2] = bbox_xywh[:, 2] - bbox_xywh[:, 0] - bbox_xywh[:, 3] = bbox_xywh[:, 3] - bbox_xywh[:, 1] - - return bbox_xywh - - -def bbox_xywh2xyxy(bbox_xywh: np.ndarray) -> np.ndarray: - """Transform the bbox format from xywh to x1y1x2y2. - - Args: - bbox_xywh (ndarray): Bounding boxes (with scores), - shaped (n, 4) or (n, 5). (left, top, width, height, [score]) - Returns: - np.ndarray: Bounding boxes (with scores), shaped (n, 4) or - (n, 5). (left, top, right, bottom, [score]) - """ - bbox_xyxy = bbox_xywh.copy() - bbox_xyxy[:, 2] = bbox_xyxy[:, 2] + bbox_xyxy[:, 0] - bbox_xyxy[:, 3] = bbox_xyxy[:, 3] + bbox_xyxy[:, 1] - - return bbox_xyxy - - -def bbox_xyxy2cs(bbox: np.ndarray, - padding: float = 1.) -> Tuple[np.ndarray, np.ndarray]: - """Transform the bbox format from (x,y,w,h) into (center, scale) - - Args: - bbox (ndarray): Bounding box(es) in shape (4,) or (n, 4), formatted - as (left, top, right, bottom) - padding (float): BBox padding factor that will be multilied to scale. - Default: 1.0 - - Returns: - tuple: A tuple containing center and scale. - - np.ndarray[float32]: Center (x, y) of the bbox in shape (2,) or - (n, 2) - - np.ndarray[float32]: Scale (w, h) of the bbox in shape (2,) or - (n, 2) - """ - # convert single bbox from (4, ) to (1, 4) - dim = bbox.ndim - if dim == 1: - bbox = bbox[None, :] - - x1, y1, x2, y2 = np.hsplit(bbox, [1, 2, 3]) - center = np.hstack([x1 + x2, y1 + y2]) * 0.5 - scale = np.hstack([x2 - x1, y2 - y1]) * padding - - if dim == 1: - center = center[0] - scale = scale[0] - - return center, scale - - -def bbox_xywh2cs(bbox: np.ndarray, - padding: float = 1.) -> Tuple[np.ndarray, np.ndarray]: - """Transform the bbox format from (x,y,w,h) into (center, scale) - - Args: - bbox (ndarray): Bounding box(es) in shape (4,) or (n, 4), formatted - as (x, y, h, w) - padding (float): BBox padding factor that will be multilied to scale. - Default: 1.0 - - Returns: - tuple: A tuple containing center and scale. - - np.ndarray[float32]: Center (x, y) of the bbox in shape (2,) or - (n, 2) - - np.ndarray[float32]: Scale (w, h) of the bbox in shape (2,) or - (n, 2) - """ - - # convert single bbox from (4, ) to (1, 4) - dim = bbox.ndim - if dim == 1: - bbox = bbox[None, :] - - x, y, w, h = np.hsplit(bbox, [1, 2, 3]) - center = np.hstack([x + w * 0.5, y + h * 0.5]) - scale = np.hstack([w, h]) * padding - - if dim == 1: - center = center[0] - scale = scale[0] - - return center, scale - - -def bbox_cs2xyxy(center: np.ndarray, - scale: np.ndarray, - padding: float = 1.) -> np.ndarray: - """Transform the bbox format from (center, scale) to (x1,y1,x2,y2). - - Args: - center (ndarray): BBox center (x, y) in shape (2,) or (n, 2) - scale (ndarray): BBox scale (w, h) in shape (2,) or (n, 2) - padding (float): BBox padding factor that will be multilied to scale. - Default: 1.0 - - Returns: - ndarray[float32]: BBox (x1, y1, x2, y2) in shape (4, ) or (n, 4) - """ - - dim = center.ndim - assert scale.ndim == dim - - if dim == 1: - center = center[None, :] - scale = scale[None, :] - - wh = scale / padding - xy = center - 0.5 * wh - bbox = np.hstack((xy, xy + wh)) - - if dim == 1: - bbox = bbox[0] - - return bbox - - -def bbox_cs2xywh(center: np.ndarray, - scale: np.ndarray, - padding: float = 1.) -> np.ndarray: - """Transform the bbox format from (center, scale) to (x,y,w,h). - - Args: - center (ndarray): BBox center (x, y) in shape (2,) or (n, 2) - scale (ndarray): BBox scale (w, h) in shape (2,) or (n, 2) - padding (float): BBox padding factor that will be multilied to scale. - Default: 1.0 - - Returns: - ndarray[float32]: BBox (x, y, w, h) in shape (4, ) or (n, 4) - """ - - dim = center.ndim - assert scale.ndim == dim - - if dim == 1: - center = center[None, :] - scale = scale[None, :] - - wh = scale / padding - xy = center - 0.5 * wh - bbox = np.hstack((xy, wh)) - - if dim == 1: - bbox = bbox[0] - - return bbox - - -def flip_bbox(bbox: np.ndarray, - image_size: Tuple[int, int], - bbox_format: str = 'xywh', - direction: str = 'horizontal') -> np.ndarray: - """Flip the bbox in the given direction. - - Args: - bbox (np.ndarray): The bounding boxes. The shape should be (..., 4) - if ``bbox_format`` is ``'xyxy'`` or ``'xywh'``, and (..., 2) if - ``bbox_format`` is ``'center'`` - image_size (tuple): The image shape in [w, h] - bbox_format (str): The bbox format. Options are ``'xywh'``, ``'xyxy'`` - and ``'center'``. - direction (str): The flip direction. Options are ``'horizontal'``, - ``'vertical'`` and ``'diagonal'``. Defaults to ``'horizontal'`` - - Returns: - np.ndarray: The flipped bounding boxes. - """ - direction_options = {'horizontal', 'vertical', 'diagonal'} - assert direction in direction_options, ( - f'Invalid flipping direction "{direction}". ' - f'Options are {direction_options}') - - format_options = {'xywh', 'xyxy', 'center'} - assert bbox_format in format_options, ( - f'Invalid bbox format "{bbox_format}". ' - f'Options are {format_options}') - - bbox_flipped = bbox.copy() - w, h = image_size - - # TODO: consider using "integer corner" coordinate system - if direction == 'horizontal': - if bbox_format == 'xywh' or bbox_format == 'center': - bbox_flipped[..., 0] = w - bbox[..., 0] - 1 - elif bbox_format == 'xyxy': - bbox_flipped[..., ::2] = w - bbox[..., ::2] - 1 - elif direction == 'vertical': - if bbox_format == 'xywh' or bbox_format == 'center': - bbox_flipped[..., 1] = h - bbox[..., 1] - 1 - elif bbox_format == 'xyxy': - bbox_flipped[..., 1::2] = h - bbox[..., 1::2] - 1 - elif direction == 'diagonal': - if bbox_format == 'xywh' or bbox_format == 'center': - bbox_flipped[..., :2] = [w, h] - bbox[..., :2] - 1 - elif bbox_format == 'xyxy': - bbox_flipped[...] = [w, h, w, h] - bbox - 1 - - return bbox_flipped - - -def get_udp_warp_matrix( - center: np.ndarray, - scale: np.ndarray, - rot: float, - output_size: Tuple[int, int], -) -> np.ndarray: - """Calculate the affine transformation matrix under the unbiased - constraint. See `UDP (CVPR 2020)`_ for details. - - Note: - - - The bbox number: N - - Args: - center (np.ndarray[2, ]): Center of the bounding box (x, y). - scale (np.ndarray[2, ]): Scale of the bounding box - wrt [width, height]. - rot (float): Rotation angle (degree). - output_size (tuple): Size ([w, h]) of the output image - - Returns: - np.ndarray: A 2x3 transformation matrix - - .. _`UDP (CVPR 2020)`: https://arxiv.org/abs/1911.07524 - """ - assert len(center) == 2 - assert len(scale) == 2 - assert len(output_size) == 2 - - input_size = center * 2 - rot_rad = np.deg2rad(rot) - warp_mat = np.zeros((2, 3), dtype=np.float32) - scale_x = (output_size[0] - 1) / scale[0] - scale_y = (output_size[1] - 1) / scale[1] - warp_mat[0, 0] = math.cos(rot_rad) * scale_x - warp_mat[0, 1] = -math.sin(rot_rad) * scale_x - warp_mat[0, 2] = scale_x * (-0.5 * input_size[0] * math.cos(rot_rad) + - 0.5 * input_size[1] * math.sin(rot_rad) + - 0.5 * scale[0]) - warp_mat[1, 0] = math.sin(rot_rad) * scale_y - warp_mat[1, 1] = math.cos(rot_rad) * scale_y - warp_mat[1, 2] = scale_y * (-0.5 * input_size[0] * math.sin(rot_rad) - - 0.5 * input_size[1] * math.cos(rot_rad) + - 0.5 * scale[1]) - return warp_mat - - -def get_warp_matrix(center: np.ndarray, - scale: np.ndarray, - rot: float, - output_size: Tuple[int, int], - shift: Tuple[float, float] = (0., 0.), - inv: bool = False) -> np.ndarray: - """Calculate the affine transformation matrix that can warp the bbox area - in the input image to the output size. - - Args: - center (np.ndarray[2, ]): Center of the bounding box (x, y). - scale (np.ndarray[2, ]): Scale of the bounding box - wrt [width, height]. - rot (float): Rotation angle (degree). - output_size (np.ndarray[2, ] | list(2,)): Size of the - destination heatmaps. - shift (0-100%): Shift translation ratio wrt the width/height. - Default (0., 0.). - inv (bool): Option to inverse the affine transform direction. - (inv=False: src->dst or inv=True: dst->src) - - Returns: - np.ndarray: A 2x3 transformation matrix - """ - assert len(center) == 2 - assert len(scale) == 2 - assert len(output_size) == 2 - assert len(shift) == 2 - - shift = np.array(shift) - src_w = scale[0] - dst_w = output_size[0] - dst_h = output_size[1] - - rot_rad = np.deg2rad(rot) - src_dir = _rotate_point(np.array([0., src_w * -0.5]), rot_rad) - dst_dir = np.array([0., dst_w * -0.5]) - - src = np.zeros((3, 2), dtype=np.float32) - src[0, :] = center + scale * shift - src[1, :] = center + src_dir + scale * shift - src[2, :] = _get_3rd_point(src[0, :], src[1, :]) - - dst = np.zeros((3, 2), dtype=np.float32) - dst[0, :] = [dst_w * 0.5, dst_h * 0.5] - dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst_dir - dst[2, :] = _get_3rd_point(dst[0, :], dst[1, :]) - - if inv: - warp_mat = cv2.getAffineTransform(np.float32(dst), np.float32(src)) - else: - warp_mat = cv2.getAffineTransform(np.float32(src), np.float32(dst)) - return warp_mat - - -def _rotate_point(pt: np.ndarray, angle_rad: float) -> np.ndarray: - """Rotate a point by an angle. - - Args: - pt (np.ndarray): 2D point coordinates (x, y) in shape (2, ) - angle_rad (float): rotation angle in radian - - Returns: - np.ndarray: Rotated point in shape (2, ) - """ - - sn, cs = np.sin(angle_rad), np.cos(angle_rad) - rot_mat = np.array([[cs, -sn], [sn, cs]]) - return rot_mat @ pt - - -def _get_3rd_point(a: np.ndarray, b: np.ndarray): - """To calculate the affine matrix, three pairs of points are required. This - function is used to get the 3rd point, given 2D points a & b. - - The 3rd point is defined by rotating vector `a - b` by 90 degrees - anticlockwise, using b as the rotation center. - - Args: - a (np.ndarray): The 1st point (x,y) in shape (2, ) - b (np.ndarray): The 2nd point (x,y) in shape (2, ) - - Returns: - np.ndarray: The 3rd point. - """ - direction = a - b - c = b + np.r_[-direction[1], direction[0]] - return c +# Copyright (c) OpenMMLab. All rights reserved. +import math +from typing import Tuple + +import cv2 +import numpy as np + + +def bbox_xyxy2xywh(bbox_xyxy: np.ndarray) -> np.ndarray: + """Transform the bbox format from x1y1x2y2 to xywh. + + Args: + bbox_xyxy (np.ndarray): Bounding boxes (with scores), shaped (n, 4) or + (n, 5). (left, top, right, bottom, [score]) + + Returns: + np.ndarray: Bounding boxes (with scores), + shaped (n, 4) or (n, 5). (left, top, width, height, [score]) + """ + bbox_xywh = bbox_xyxy.copy() + bbox_xywh[:, 2] = bbox_xywh[:, 2] - bbox_xywh[:, 0] + bbox_xywh[:, 3] = bbox_xywh[:, 3] - bbox_xywh[:, 1] + + return bbox_xywh + + +def bbox_xywh2xyxy(bbox_xywh: np.ndarray) -> np.ndarray: + """Transform the bbox format from xywh to x1y1x2y2. + + Args: + bbox_xywh (ndarray): Bounding boxes (with scores), + shaped (n, 4) or (n, 5). (left, top, width, height, [score]) + Returns: + np.ndarray: Bounding boxes (with scores), shaped (n, 4) or + (n, 5). (left, top, right, bottom, [score]) + """ + bbox_xyxy = bbox_xywh.copy() + bbox_xyxy[:, 2] = bbox_xyxy[:, 2] + bbox_xyxy[:, 0] + bbox_xyxy[:, 3] = bbox_xyxy[:, 3] + bbox_xyxy[:, 1] + + return bbox_xyxy + + +def bbox_xyxy2cs(bbox: np.ndarray, + padding: float = 1.) -> Tuple[np.ndarray, np.ndarray]: + """Transform the bbox format from (x,y,w,h) into (center, scale) + + Args: + bbox (ndarray): Bounding box(es) in shape (4,) or (n, 4), formatted + as (left, top, right, bottom) + padding (float): BBox padding factor that will be multilied to scale. + Default: 1.0 + + Returns: + tuple: A tuple containing center and scale. + - np.ndarray[float32]: Center (x, y) of the bbox in shape (2,) or + (n, 2) + - np.ndarray[float32]: Scale (w, h) of the bbox in shape (2,) or + (n, 2) + """ + # convert single bbox from (4, ) to (1, 4) + dim = bbox.ndim + if dim == 1: + bbox = bbox[None, :] + + x1, y1, x2, y2 = np.hsplit(bbox, [1, 2, 3]) + center = np.hstack([x1 + x2, y1 + y2]) * 0.5 + scale = np.hstack([x2 - x1, y2 - y1]) * padding + + if dim == 1: + center = center[0] + scale = scale[0] + + return center, scale + + +def bbox_xywh2cs(bbox: np.ndarray, + padding: float = 1.) -> Tuple[np.ndarray, np.ndarray]: + """Transform the bbox format from (x,y,w,h) into (center, scale) + + Args: + bbox (ndarray): Bounding box(es) in shape (4,) or (n, 4), formatted + as (x, y, h, w) + padding (float): BBox padding factor that will be multilied to scale. + Default: 1.0 + + Returns: + tuple: A tuple containing center and scale. + - np.ndarray[float32]: Center (x, y) of the bbox in shape (2,) or + (n, 2) + - np.ndarray[float32]: Scale (w, h) of the bbox in shape (2,) or + (n, 2) + """ + + # convert single bbox from (4, ) to (1, 4) + dim = bbox.ndim + if dim == 1: + bbox = bbox[None, :] + + x, y, w, h = np.hsplit(bbox, [1, 2, 3]) + center = np.hstack([x + w * 0.5, y + h * 0.5]) + scale = np.hstack([w, h]) * padding + + if dim == 1: + center = center[0] + scale = scale[0] + + return center, scale + + +def bbox_cs2xyxy(center: np.ndarray, + scale: np.ndarray, + padding: float = 1.) -> np.ndarray: + """Transform the bbox format from (center, scale) to (x1,y1,x2,y2). + + Args: + center (ndarray): BBox center (x, y) in shape (2,) or (n, 2) + scale (ndarray): BBox scale (w, h) in shape (2,) or (n, 2) + padding (float): BBox padding factor that will be multilied to scale. + Default: 1.0 + + Returns: + ndarray[float32]: BBox (x1, y1, x2, y2) in shape (4, ) or (n, 4) + """ + + dim = center.ndim + assert scale.ndim == dim + + if dim == 1: + center = center[None, :] + scale = scale[None, :] + + wh = scale / padding + xy = center - 0.5 * wh + bbox = np.hstack((xy, xy + wh)) + + if dim == 1: + bbox = bbox[0] + + return bbox + + +def bbox_cs2xywh(center: np.ndarray, + scale: np.ndarray, + padding: float = 1.) -> np.ndarray: + """Transform the bbox format from (center, scale) to (x,y,w,h). + + Args: + center (ndarray): BBox center (x, y) in shape (2,) or (n, 2) + scale (ndarray): BBox scale (w, h) in shape (2,) or (n, 2) + padding (float): BBox padding factor that will be multilied to scale. + Default: 1.0 + + Returns: + ndarray[float32]: BBox (x, y, w, h) in shape (4, ) or (n, 4) + """ + + dim = center.ndim + assert scale.ndim == dim + + if dim == 1: + center = center[None, :] + scale = scale[None, :] + + wh = scale / padding + xy = center - 0.5 * wh + bbox = np.hstack((xy, wh)) + + if dim == 1: + bbox = bbox[0] + + return bbox + + +def flip_bbox(bbox: np.ndarray, + image_size: Tuple[int, int], + bbox_format: str = 'xywh', + direction: str = 'horizontal') -> np.ndarray: + """Flip the bbox in the given direction. + + Args: + bbox (np.ndarray): The bounding boxes. The shape should be (..., 4) + if ``bbox_format`` is ``'xyxy'`` or ``'xywh'``, and (..., 2) if + ``bbox_format`` is ``'center'`` + image_size (tuple): The image shape in [w, h] + bbox_format (str): The bbox format. Options are ``'xywh'``, ``'xyxy'`` + and ``'center'``. + direction (str): The flip direction. Options are ``'horizontal'``, + ``'vertical'`` and ``'diagonal'``. Defaults to ``'horizontal'`` + + Returns: + np.ndarray: The flipped bounding boxes. + """ + direction_options = {'horizontal', 'vertical', 'diagonal'} + assert direction in direction_options, ( + f'Invalid flipping direction "{direction}". ' + f'Options are {direction_options}') + + format_options = {'xywh', 'xyxy', 'center'} + assert bbox_format in format_options, ( + f'Invalid bbox format "{bbox_format}". ' + f'Options are {format_options}') + + bbox_flipped = bbox.copy() + w, h = image_size + + # TODO: consider using "integer corner" coordinate system + if direction == 'horizontal': + if bbox_format == 'xywh' or bbox_format == 'center': + bbox_flipped[..., 0] = w - bbox[..., 0] - 1 + elif bbox_format == 'xyxy': + bbox_flipped[..., ::2] = w - bbox[..., ::2] - 1 + elif direction == 'vertical': + if bbox_format == 'xywh' or bbox_format == 'center': + bbox_flipped[..., 1] = h - bbox[..., 1] - 1 + elif bbox_format == 'xyxy': + bbox_flipped[..., 1::2] = h - bbox[..., 1::2] - 1 + elif direction == 'diagonal': + if bbox_format == 'xywh' or bbox_format == 'center': + bbox_flipped[..., :2] = [w, h] - bbox[..., :2] - 1 + elif bbox_format == 'xyxy': + bbox_flipped[...] = [w, h, w, h] - bbox - 1 + + return bbox_flipped + + +def get_udp_warp_matrix( + center: np.ndarray, + scale: np.ndarray, + rot: float, + output_size: Tuple[int, int], +) -> np.ndarray: + """Calculate the affine transformation matrix under the unbiased + constraint. See `UDP (CVPR 2020)`_ for details. + + Note: + + - The bbox number: N + + Args: + center (np.ndarray[2, ]): Center of the bounding box (x, y). + scale (np.ndarray[2, ]): Scale of the bounding box + wrt [width, height]. + rot (float): Rotation angle (degree). + output_size (tuple): Size ([w, h]) of the output image + + Returns: + np.ndarray: A 2x3 transformation matrix + + .. _`UDP (CVPR 2020)`: https://arxiv.org/abs/1911.07524 + """ + assert len(center) == 2 + assert len(scale) == 2 + assert len(output_size) == 2 + + input_size = center * 2 + rot_rad = np.deg2rad(rot) + warp_mat = np.zeros((2, 3), dtype=np.float32) + scale_x = (output_size[0] - 1) / scale[0] + scale_y = (output_size[1] - 1) / scale[1] + warp_mat[0, 0] = math.cos(rot_rad) * scale_x + warp_mat[0, 1] = -math.sin(rot_rad) * scale_x + warp_mat[0, 2] = scale_x * (-0.5 * input_size[0] * math.cos(rot_rad) + + 0.5 * input_size[1] * math.sin(rot_rad) + + 0.5 * scale[0]) + warp_mat[1, 0] = math.sin(rot_rad) * scale_y + warp_mat[1, 1] = math.cos(rot_rad) * scale_y + warp_mat[1, 2] = scale_y * (-0.5 * input_size[0] * math.sin(rot_rad) - + 0.5 * input_size[1] * math.cos(rot_rad) + + 0.5 * scale[1]) + return warp_mat + + +def get_warp_matrix(center: np.ndarray, + scale: np.ndarray, + rot: float, + output_size: Tuple[int, int], + shift: Tuple[float, float] = (0., 0.), + inv: bool = False) -> np.ndarray: + """Calculate the affine transformation matrix that can warp the bbox area + in the input image to the output size. + + Args: + center (np.ndarray[2, ]): Center of the bounding box (x, y). + scale (np.ndarray[2, ]): Scale of the bounding box + wrt [width, height]. + rot (float): Rotation angle (degree). + output_size (np.ndarray[2, ] | list(2,)): Size of the + destination heatmaps. + shift (0-100%): Shift translation ratio wrt the width/height. + Default (0., 0.). + inv (bool): Option to inverse the affine transform direction. + (inv=False: src->dst or inv=True: dst->src) + + Returns: + np.ndarray: A 2x3 transformation matrix + """ + assert len(center) == 2 + assert len(scale) == 2 + assert len(output_size) == 2 + assert len(shift) == 2 + + shift = np.array(shift) + src_w = scale[0] + dst_w = output_size[0] + dst_h = output_size[1] + + rot_rad = np.deg2rad(rot) + src_dir = _rotate_point(np.array([0., src_w * -0.5]), rot_rad) + dst_dir = np.array([0., dst_w * -0.5]) + + src = np.zeros((3, 2), dtype=np.float32) + src[0, :] = center + scale * shift + src[1, :] = center + src_dir + scale * shift + src[2, :] = _get_3rd_point(src[0, :], src[1, :]) + + dst = np.zeros((3, 2), dtype=np.float32) + dst[0, :] = [dst_w * 0.5, dst_h * 0.5] + dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst_dir + dst[2, :] = _get_3rd_point(dst[0, :], dst[1, :]) + + if inv: + warp_mat = cv2.getAffineTransform(np.float32(dst), np.float32(src)) + else: + warp_mat = cv2.getAffineTransform(np.float32(src), np.float32(dst)) + return warp_mat + + +def _rotate_point(pt: np.ndarray, angle_rad: float) -> np.ndarray: + """Rotate a point by an angle. + + Args: + pt (np.ndarray): 2D point coordinates (x, y) in shape (2, ) + angle_rad (float): rotation angle in radian + + Returns: + np.ndarray: Rotated point in shape (2, ) + """ + + sn, cs = np.sin(angle_rad), np.cos(angle_rad) + rot_mat = np.array([[cs, -sn], [sn, cs]]) + return rot_mat @ pt + + +def _get_3rd_point(a: np.ndarray, b: np.ndarray): + """To calculate the affine matrix, three pairs of points are required. This + function is used to get the 3rd point, given 2D points a & b. + + The 3rd point is defined by rotating vector `a - b` by 90 degrees + anticlockwise, using b as the rotation center. + + Args: + a (np.ndarray): The 1st point (x,y) in shape (2, ) + b (np.ndarray): The 2nd point (x,y) in shape (2, ) + + Returns: + np.ndarray: The 3rd point. + """ + direction = a - b + c = b + np.r_[-direction[1], direction[0]] + return c diff --git a/mmpose/structures/keypoint/__init__.py b/mmpose/structures/keypoint/__init__.py index 12ee96cf7c..468d77ddfb 100644 --- a/mmpose/structures/keypoint/__init__.py +++ b/mmpose/structures/keypoint/__init__.py @@ -1,5 +1,5 @@ -# Copyright (c) OpenMMLab. All rights reserved. - -from .transforms import flip_keypoints, flip_keypoints_custom_center - -__all__ = ['flip_keypoints', 'flip_keypoints_custom_center'] +# Copyright (c) OpenMMLab. All rights reserved. + +from .transforms import flip_keypoints, flip_keypoints_custom_center + +__all__ = ['flip_keypoints', 'flip_keypoints_custom_center'] diff --git a/mmpose/structures/keypoint/transforms.py b/mmpose/structures/keypoint/transforms.py index b50da4f8fe..fa94a8055c 100644 --- a/mmpose/structures/keypoint/transforms.py +++ b/mmpose/structures/keypoint/transforms.py @@ -1,121 +1,121 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import List, Optional, Tuple - -import numpy as np - - -def flip_keypoints(keypoints: np.ndarray, - keypoints_visible: Optional[np.ndarray], - image_size: Tuple[int, int], - flip_indices: List[int], - direction: str = 'horizontal' - ) -> Tuple[np.ndarray, Optional[np.ndarray]]: - """Flip keypoints in the given direction. - - Note: - - - keypoint number: K - - keypoint dimension: D - - Args: - keypoints (np.ndarray): Keypoints in shape (..., K, D) - keypoints_visible (np.ndarray, optional): The visibility of keypoints - in shape (..., K, 1). Set ``None`` if the keypoint visibility is - unavailable - image_size (tuple): The image shape in [w, h] - flip_indices (List[int]): The indices of each keypoint's symmetric - keypoint - direction (str): The flip direction. Options are ``'horizontal'``, - ``'vertical'`` and ``'diagonal'``. Defaults to ``'horizontal'`` - - Returns: - tuple: - - keypoints_flipped (np.ndarray): Flipped keypoints in shape - (..., K, D) - - keypoints_visible_flipped (np.ndarray, optional): Flipped keypoints' - visibility in shape (..., K, 1). Return ``None`` if the input - ``keypoints_visible`` is ``None`` - """ - - assert keypoints.shape[:-1] == keypoints_visible.shape, ( - f'Mismatched shapes of keypoints {keypoints.shape} and ' - f'keypoints_visible {keypoints_visible.shape}') - - direction_options = {'horizontal', 'vertical', 'diagonal'} - assert direction in direction_options, ( - f'Invalid flipping direction "{direction}". ' - f'Options are {direction_options}') - - # swap the symmetric keypoint pairs - if direction == 'horizontal' or direction == 'vertical': - keypoints = keypoints[..., flip_indices, :] - if keypoints_visible is not None: - keypoints_visible = keypoints_visible[..., flip_indices] - - # flip the keypoints - w, h = image_size - if direction == 'horizontal': - keypoints[..., 0] = w - 1 - keypoints[..., 0] - elif direction == 'vertical': - keypoints[..., 1] = h - 1 - keypoints[..., 1] - else: - keypoints = [w, h] - keypoints - 1 - - return keypoints, keypoints_visible - - -def flip_keypoints_custom_center(keypoints: np.ndarray, - keypoints_visible: np.ndarray, - flip_indices: List[int], - center_mode: str = 'static', - center_x: float = 0.5, - center_index: int = 0): - """Flip human joints horizontally. - - Note: - - num_keypoint: K - - dimension: D - - Args: - keypoints (np.ndarray([..., K, D])): Coordinates of keypoints. - keypoints_visible (np.ndarray([..., K])): Visibility item of keypoints. - flip_indices (list[int]): The indices to flip the keypoints. - center_mode (str): The mode to set the center location on the x-axis - to flip around. Options are: - - - static: use a static x value (see center_x also) - - root: use a root joint (see center_index also) - - Defaults: ``'static'``. - center_x (float): Set the x-axis location of the flip center. Only used - when ``center_mode`` is ``'static'``. Defaults: 0.5. - center_index (int): Set the index of the root joint, whose x location - will be used as the flip center. Only used when ``center_mode`` is - ``'root'``. Defaults: 0. - - Returns: - np.ndarray([..., K, C]): Flipped joints. - """ - - assert keypoints.ndim >= 2, f'Invalid pose shape {keypoints.shape}' - - allowed_center_mode = {'static', 'root'} - assert center_mode in allowed_center_mode, 'Get invalid center_mode ' \ - f'{center_mode}, allowed choices are {allowed_center_mode}' - - if center_mode == 'static': - x_c = center_x - elif center_mode == 'root': - assert keypoints.shape[-2] > center_index - x_c = keypoints[..., center_index, 0] - - keypoints_flipped = keypoints.copy() - keypoints_visible_flipped = keypoints_visible.copy() - # Swap left-right parts - for left, right in enumerate(flip_indices): - keypoints_flipped[..., left, :] = keypoints[..., right, :] - keypoints_visible_flipped[..., left] = keypoints_visible[..., right] - - # Flip horizontally - keypoints_flipped[..., 0] = x_c * 2 - keypoints_flipped[..., 0] - return keypoints_flipped, keypoints_visible_flipped +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional, Tuple + +import numpy as np + + +def flip_keypoints(keypoints: np.ndarray, + keypoints_visible: Optional[np.ndarray], + image_size: Tuple[int, int], + flip_indices: List[int], + direction: str = 'horizontal' + ) -> Tuple[np.ndarray, Optional[np.ndarray]]: + """Flip keypoints in the given direction. + + Note: + + - keypoint number: K + - keypoint dimension: D + + Args: + keypoints (np.ndarray): Keypoints in shape (..., K, D) + keypoints_visible (np.ndarray, optional): The visibility of keypoints + in shape (..., K, 1). Set ``None`` if the keypoint visibility is + unavailable + image_size (tuple): The image shape in [w, h] + flip_indices (List[int]): The indices of each keypoint's symmetric + keypoint + direction (str): The flip direction. Options are ``'horizontal'``, + ``'vertical'`` and ``'diagonal'``. Defaults to ``'horizontal'`` + + Returns: + tuple: + - keypoints_flipped (np.ndarray): Flipped keypoints in shape + (..., K, D) + - keypoints_visible_flipped (np.ndarray, optional): Flipped keypoints' + visibility in shape (..., K, 1). Return ``None`` if the input + ``keypoints_visible`` is ``None`` + """ + + assert keypoints.shape[:-1] == keypoints_visible.shape, ( + f'Mismatched shapes of keypoints {keypoints.shape} and ' + f'keypoints_visible {keypoints_visible.shape}') + + direction_options = {'horizontal', 'vertical', 'diagonal'} + assert direction in direction_options, ( + f'Invalid flipping direction "{direction}". ' + f'Options are {direction_options}') + + # swap the symmetric keypoint pairs + if direction == 'horizontal' or direction == 'vertical': + keypoints = keypoints[..., flip_indices, :] + if keypoints_visible is not None: + keypoints_visible = keypoints_visible[..., flip_indices] + + # flip the keypoints + w, h = image_size + if direction == 'horizontal': + keypoints[..., 0] = w - 1 - keypoints[..., 0] + elif direction == 'vertical': + keypoints[..., 1] = h - 1 - keypoints[..., 1] + else: + keypoints = [w, h] - keypoints - 1 + + return keypoints, keypoints_visible + + +def flip_keypoints_custom_center(keypoints: np.ndarray, + keypoints_visible: np.ndarray, + flip_indices: List[int], + center_mode: str = 'static', + center_x: float = 0.5, + center_index: int = 0): + """Flip human joints horizontally. + + Note: + - num_keypoint: K + - dimension: D + + Args: + keypoints (np.ndarray([..., K, D])): Coordinates of keypoints. + keypoints_visible (np.ndarray([..., K])): Visibility item of keypoints. + flip_indices (list[int]): The indices to flip the keypoints. + center_mode (str): The mode to set the center location on the x-axis + to flip around. Options are: + + - static: use a static x value (see center_x also) + - root: use a root joint (see center_index also) + + Defaults: ``'static'``. + center_x (float): Set the x-axis location of the flip center. Only used + when ``center_mode`` is ``'static'``. Defaults: 0.5. + center_index (int): Set the index of the root joint, whose x location + will be used as the flip center. Only used when ``center_mode`` is + ``'root'``. Defaults: 0. + + Returns: + np.ndarray([..., K, C]): Flipped joints. + """ + + assert keypoints.ndim >= 2, f'Invalid pose shape {keypoints.shape}' + + allowed_center_mode = {'static', 'root'} + assert center_mode in allowed_center_mode, 'Get invalid center_mode ' \ + f'{center_mode}, allowed choices are {allowed_center_mode}' + + if center_mode == 'static': + x_c = center_x + elif center_mode == 'root': + assert keypoints.shape[-2] > center_index + x_c = keypoints[..., center_index, 0] + + keypoints_flipped = keypoints.copy() + keypoints_visible_flipped = keypoints_visible.copy() + # Swap left-right parts + for left, right in enumerate(flip_indices): + keypoints_flipped[..., left, :] = keypoints[..., right, :] + keypoints_visible_flipped[..., left] = keypoints_visible[..., right] + + # Flip horizontally + keypoints_flipped[..., 0] = x_c * 2 - keypoints_flipped[..., 0] + return keypoints_flipped, keypoints_visible_flipped diff --git a/mmpose/structures/multilevel_pixel_data.py b/mmpose/structures/multilevel_pixel_data.py index bea191e729..2a961e8947 100644 --- a/mmpose/structures/multilevel_pixel_data.py +++ b/mmpose/structures/multilevel_pixel_data.py @@ -1,273 +1,273 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from collections import abc -from typing import Any, Callable, List, Optional, Sequence, Tuple, Type, Union - -import numpy as np -import torch -from mmengine.structures import BaseDataElement, PixelData -from mmengine.utils import is_list_of - -IndexType = Union[str, slice, int, list, torch.LongTensor, - torch.cuda.LongTensor, torch.BoolTensor, - torch.cuda.BoolTensor, np.ndarray] - - -class MultilevelPixelData(BaseDataElement): - """Data structure for multi-level pixel-wise annotations or predictions. - - All data items in ``data_fields`` of ``MultilevelPixelData`` are lists - of np.ndarray or torch.Tensor, and should meet the following requirements: - - - Have the same length, which is the number of levels - - At each level, the data should have 3 dimensions in order of channel, - height and weight - - At each level, the data should have the same height and weight - - Examples: - >>> metainfo = dict(num_keypoints=17) - >>> sizes = [(64, 48), (128, 96), (256, 192)] - >>> heatmaps = [np.random.rand(17, h, w) for h, w in sizes] - >>> masks = [torch.rand(1, h, w) for h, w in sizes] - >>> data = MultilevelPixelData(metainfo=metainfo, - ... heatmaps=heatmaps, - ... masks=masks) - - >>> # get data item - >>> heatmaps = data.heatmaps # A list of 3 numpy.ndarrays - >>> masks = data.masks # A list of 3 torch.Tensors - - >>> # get level - >>> data_l0 = data[0] # PixelData with fields 'heatmaps' and 'masks' - >>> data.nlevel - 3 - - >>> # get shape - >>> data.shape - ((64, 48), (128, 96), (256, 192)) - - >>> # set - >>> offset_maps = [torch.rand(2, h, w) for h, w in sizes] - >>> data.offset_maps = offset_maps - """ - - def __init__(self, *, metainfo: Optional[dict] = None, **kwargs) -> None: - object.__setattr__(self, '_nlevel', None) - super().__init__(metainfo=metainfo, **kwargs) - - @property - def nlevel(self): - """Return the level number. - - Returns: - Optional[int]: The level number, or ``None`` if the data has not - been assigned. - """ - return self._nlevel - - def __getitem__(self, item: Union[int, str, list, - slice]) -> Union[PixelData, Sequence]: - if isinstance(item, int): - if self.nlevel is None or item >= self.nlevel: - raise IndexError( - f'Lcale index {item} out of range ({self.nlevel})') - return self.get(f'_level_{item}') - - if isinstance(item, str): - if item not in self: - raise KeyError(item) - return getattr(self, item) - - # TODO: support indexing by list and slice over levels - raise NotImplementedError( - f'{self.__class__.__name__} does not support index type ' - f'{type(item)}') - - def levels(self) -> List[PixelData]: - if self.nlevel: - return list(self[i] for i in range(self.nlevel)) - return [] - - @property - def shape(self) -> Optional[Tuple[Tuple]]: - """Get the shape of multi-level pixel data. - - Returns: - Optional[tuple]: A tuple of data shape at each level, or ``None`` - if the data has not been assigned. - """ - if self.nlevel is None: - return None - - return tuple(level.shape for level in self.levels()) - - def set_data(self, data: dict) -> None: - """Set or change key-value pairs in ``data_field`` by parameter - ``data``. - - Args: - data (dict): A dict contains annotations of image or - model predictions. - """ - assert isinstance(data, - dict), f'meta should be a `dict` but got {data}' - for k, v in data.items(): - self.set_field(v, k, field_type='data') - - def set_field(self, - value: Any, - name: str, - dtype: Optional[Union[Type, Tuple[Type, ...]]] = None, - field_type: str = 'data') -> None: - """Special method for set union field, used as property.setter - functions.""" - assert field_type in ['metainfo', 'data'] - if dtype is not None: - assert isinstance( - value, - dtype), f'{value} should be a {dtype} but got {type(value)}' - - if name.startswith('_level_'): - raise AttributeError( - f'Cannot set {name} to be a field because the pattern ' - '<_level_{n}> is reserved for inner data field') - - if field_type == 'metainfo': - if name in self._data_fields: - raise AttributeError( - f'Cannot set {name} to be a field of metainfo ' - f'because {name} is already a data field') - self._metainfo_fields.add(name) - - else: - if name in self._metainfo_fields: - raise AttributeError( - f'Cannot set {name} to be a field of data ' - f'because {name} is already a metainfo field') - - if not isinstance(value, abc.Sequence): - raise TypeError( - 'The value should be a sequence (of numpy.ndarray or' - f'torch.Tesnor), but got a {type(value)}') - - if len(value) == 0: - raise ValueError('Setting empty value is not allowed') - - if not isinstance(value[0], (torch.Tensor, np.ndarray)): - raise TypeError( - 'The value should be a sequence of numpy.ndarray or' - f'torch.Tesnor, but got a sequence of {type(value[0])}') - - if self.nlevel is not None: - assert len(value) == self.nlevel, ( - f'The length of the value ({len(value)}) should match the' - f'number of the levels ({self.nlevel})') - else: - object.__setattr__(self, '_nlevel', len(value)) - for i in range(self.nlevel): - object.__setattr__(self, f'_level_{i}', PixelData()) - - for i, v in enumerate(value): - self[i].set_field(v, name, field_type='data') - - self._data_fields.add(name) - - object.__setattr__(self, name, value) - - def __delattr__(self, item: str): - """delete the item in dataelement. - - Args: - item (str): The key to delete. - """ - if item in ('_metainfo_fields', '_data_fields'): - raise AttributeError(f'{item} has been used as a ' - 'private attribute, which is immutable. ') - - if item in self._metainfo_fields: - super().__delattr__(item) - else: - for level in self.levels(): - level.__delattr__(item) - self._data_fields.remove(item) - - def __getattr__(self, name): - if name in {'_data_fields', '_metainfo_fields' - } or name not in self._data_fields: - raise AttributeError( - f'\'{self.__class__.__name__}\' object has no attribute ' - f'\'{name}\'') - - return [getattr(level, name) for level in self.levels()] - - def pop(self, *args) -> Any: - """pop property in data and metainfo as the same as python.""" - assert len(args) < 3, '``pop`` get more than 2 arguments' - name = args[0] - if name in self._metainfo_fields: - self._metainfo_fields.remove(name) - return self.__dict__.pop(*args) - - elif name in self._data_fields: - self._data_fields.remove(name) - return [level.pop(*args) for level in self.levels()] - - # with default value - elif len(args) == 2: - return args[1] - else: - # don't just use 'self.__dict__.pop(*args)' for only popping key in - # metainfo or data - raise KeyError(f'{args[0]} is not contained in metainfo or data') - - def _convert(self, apply_to: Type, - func: Callable[[Any], Any]) -> 'MultilevelPixelData': - """Convert data items with the given function. - - Args: - apply_to (Type): The type of data items to apply the conversion - func (Callable): The conversion function that takes a data item - as the input and return the converted result - - Returns: - MultilevelPixelData: the converted data element. - """ - new_data = self.new() - for k, v in self.items(): - if is_list_of(v, apply_to): - v = [func(_v) for _v in v] - data = {k: v} - new_data.set_data(data) - return new_data - - def cpu(self) -> 'MultilevelPixelData': - """Convert all tensors to CPU in data.""" - return self._convert(apply_to=torch.Tensor, func=lambda x: x.cpu()) - - def cuda(self) -> 'MultilevelPixelData': - """Convert all tensors to GPU in data.""" - return self._convert(apply_to=torch.Tensor, func=lambda x: x.cuda()) - - def detach(self) -> 'MultilevelPixelData': - """Detach all tensors in data.""" - return self._convert(apply_to=torch.Tensor, func=lambda x: x.detach()) - - def numpy(self) -> 'MultilevelPixelData': - """Convert all tensor to np.narray in data.""" - return self._convert( - apply_to=torch.Tensor, func=lambda x: x.detach().cpu().numpy()) - - def to_tensor(self) -> 'MultilevelPixelData': - """Convert all tensor to np.narray in data.""" - return self._convert( - apply_to=np.ndarray, func=lambda x: torch.from_numpy(x)) - - # Tensor-like methods - def to(self, *args, **kwargs) -> 'MultilevelPixelData': - """Apply same name function to all tensors in data_fields.""" - new_data = self.new() - for k, v in self.items(): - if hasattr(v[0], 'to'): - v = [v_.to(*args, **kwargs) for v_ in v] - data = {k: v} - new_data.set_data(data) - return new_data +# Copyright (c) OpenMMLab. All rights reserved. +from collections import abc +from typing import Any, Callable, List, Optional, Sequence, Tuple, Type, Union + +import numpy as np +import torch +from mmengine.structures import BaseDataElement, PixelData +from mmengine.utils import is_list_of + +IndexType = Union[str, slice, int, list, torch.LongTensor, + torch.cuda.LongTensor, torch.BoolTensor, + torch.cuda.BoolTensor, np.ndarray] + + +class MultilevelPixelData(BaseDataElement): + """Data structure for multi-level pixel-wise annotations or predictions. + + All data items in ``data_fields`` of ``MultilevelPixelData`` are lists + of np.ndarray or torch.Tensor, and should meet the following requirements: + + - Have the same length, which is the number of levels + - At each level, the data should have 3 dimensions in order of channel, + height and weight + - At each level, the data should have the same height and weight + + Examples: + >>> metainfo = dict(num_keypoints=17) + >>> sizes = [(64, 48), (128, 96), (256, 192)] + >>> heatmaps = [np.random.rand(17, h, w) for h, w in sizes] + >>> masks = [torch.rand(1, h, w) for h, w in sizes] + >>> data = MultilevelPixelData(metainfo=metainfo, + ... heatmaps=heatmaps, + ... masks=masks) + + >>> # get data item + >>> heatmaps = data.heatmaps # A list of 3 numpy.ndarrays + >>> masks = data.masks # A list of 3 torch.Tensors + + >>> # get level + >>> data_l0 = data[0] # PixelData with fields 'heatmaps' and 'masks' + >>> data.nlevel + 3 + + >>> # get shape + >>> data.shape + ((64, 48), (128, 96), (256, 192)) + + >>> # set + >>> offset_maps = [torch.rand(2, h, w) for h, w in sizes] + >>> data.offset_maps = offset_maps + """ + + def __init__(self, *, metainfo: Optional[dict] = None, **kwargs) -> None: + object.__setattr__(self, '_nlevel', None) + super().__init__(metainfo=metainfo, **kwargs) + + @property + def nlevel(self): + """Return the level number. + + Returns: + Optional[int]: The level number, or ``None`` if the data has not + been assigned. + """ + return self._nlevel + + def __getitem__(self, item: Union[int, str, list, + slice]) -> Union[PixelData, Sequence]: + if isinstance(item, int): + if self.nlevel is None or item >= self.nlevel: + raise IndexError( + f'Lcale index {item} out of range ({self.nlevel})') + return self.get(f'_level_{item}') + + if isinstance(item, str): + if item not in self: + raise KeyError(item) + return getattr(self, item) + + # TODO: support indexing by list and slice over levels + raise NotImplementedError( + f'{self.__class__.__name__} does not support index type ' + f'{type(item)}') + + def levels(self) -> List[PixelData]: + if self.nlevel: + return list(self[i] for i in range(self.nlevel)) + return [] + + @property + def shape(self) -> Optional[Tuple[Tuple]]: + """Get the shape of multi-level pixel data. + + Returns: + Optional[tuple]: A tuple of data shape at each level, or ``None`` + if the data has not been assigned. + """ + if self.nlevel is None: + return None + + return tuple(level.shape for level in self.levels()) + + def set_data(self, data: dict) -> None: + """Set or change key-value pairs in ``data_field`` by parameter + ``data``. + + Args: + data (dict): A dict contains annotations of image or + model predictions. + """ + assert isinstance(data, + dict), f'meta should be a `dict` but got {data}' + for k, v in data.items(): + self.set_field(v, k, field_type='data') + + def set_field(self, + value: Any, + name: str, + dtype: Optional[Union[Type, Tuple[Type, ...]]] = None, + field_type: str = 'data') -> None: + """Special method for set union field, used as property.setter + functions.""" + assert field_type in ['metainfo', 'data'] + if dtype is not None: + assert isinstance( + value, + dtype), f'{value} should be a {dtype} but got {type(value)}' + + if name.startswith('_level_'): + raise AttributeError( + f'Cannot set {name} to be a field because the pattern ' + '<_level_{n}> is reserved for inner data field') + + if field_type == 'metainfo': + if name in self._data_fields: + raise AttributeError( + f'Cannot set {name} to be a field of metainfo ' + f'because {name} is already a data field') + self._metainfo_fields.add(name) + + else: + if name in self._metainfo_fields: + raise AttributeError( + f'Cannot set {name} to be a field of data ' + f'because {name} is already a metainfo field') + + if not isinstance(value, abc.Sequence): + raise TypeError( + 'The value should be a sequence (of numpy.ndarray or' + f'torch.Tesnor), but got a {type(value)}') + + if len(value) == 0: + raise ValueError('Setting empty value is not allowed') + + if not isinstance(value[0], (torch.Tensor, np.ndarray)): + raise TypeError( + 'The value should be a sequence of numpy.ndarray or' + f'torch.Tesnor, but got a sequence of {type(value[0])}') + + if self.nlevel is not None: + assert len(value) == self.nlevel, ( + f'The length of the value ({len(value)}) should match the' + f'number of the levels ({self.nlevel})') + else: + object.__setattr__(self, '_nlevel', len(value)) + for i in range(self.nlevel): + object.__setattr__(self, f'_level_{i}', PixelData()) + + for i, v in enumerate(value): + self[i].set_field(v, name, field_type='data') + + self._data_fields.add(name) + + object.__setattr__(self, name, value) + + def __delattr__(self, item: str): + """delete the item in dataelement. + + Args: + item (str): The key to delete. + """ + if item in ('_metainfo_fields', '_data_fields'): + raise AttributeError(f'{item} has been used as a ' + 'private attribute, which is immutable. ') + + if item in self._metainfo_fields: + super().__delattr__(item) + else: + for level in self.levels(): + level.__delattr__(item) + self._data_fields.remove(item) + + def __getattr__(self, name): + if name in {'_data_fields', '_metainfo_fields' + } or name not in self._data_fields: + raise AttributeError( + f'\'{self.__class__.__name__}\' object has no attribute ' + f'\'{name}\'') + + return [getattr(level, name) for level in self.levels()] + + def pop(self, *args) -> Any: + """pop property in data and metainfo as the same as python.""" + assert len(args) < 3, '``pop`` get more than 2 arguments' + name = args[0] + if name in self._metainfo_fields: + self._metainfo_fields.remove(name) + return self.__dict__.pop(*args) + + elif name in self._data_fields: + self._data_fields.remove(name) + return [level.pop(*args) for level in self.levels()] + + # with default value + elif len(args) == 2: + return args[1] + else: + # don't just use 'self.__dict__.pop(*args)' for only popping key in + # metainfo or data + raise KeyError(f'{args[0]} is not contained in metainfo or data') + + def _convert(self, apply_to: Type, + func: Callable[[Any], Any]) -> 'MultilevelPixelData': + """Convert data items with the given function. + + Args: + apply_to (Type): The type of data items to apply the conversion + func (Callable): The conversion function that takes a data item + as the input and return the converted result + + Returns: + MultilevelPixelData: the converted data element. + """ + new_data = self.new() + for k, v in self.items(): + if is_list_of(v, apply_to): + v = [func(_v) for _v in v] + data = {k: v} + new_data.set_data(data) + return new_data + + def cpu(self) -> 'MultilevelPixelData': + """Convert all tensors to CPU in data.""" + return self._convert(apply_to=torch.Tensor, func=lambda x: x.cpu()) + + def cuda(self) -> 'MultilevelPixelData': + """Convert all tensors to GPU in data.""" + return self._convert(apply_to=torch.Tensor, func=lambda x: x.cuda()) + + def detach(self) -> 'MultilevelPixelData': + """Detach all tensors in data.""" + return self._convert(apply_to=torch.Tensor, func=lambda x: x.detach()) + + def numpy(self) -> 'MultilevelPixelData': + """Convert all tensor to np.narray in data.""" + return self._convert( + apply_to=torch.Tensor, func=lambda x: x.detach().cpu().numpy()) + + def to_tensor(self) -> 'MultilevelPixelData': + """Convert all tensor to np.narray in data.""" + return self._convert( + apply_to=np.ndarray, func=lambda x: torch.from_numpy(x)) + + # Tensor-like methods + def to(self, *args, **kwargs) -> 'MultilevelPixelData': + """Apply same name function to all tensors in data_fields.""" + new_data = self.new() + for k, v in self.items(): + if hasattr(v[0], 'to'): + v = [v_.to(*args, **kwargs) for v_ in v] + data = {k: v} + new_data.set_data(data) + return new_data diff --git a/mmpose/structures/pose_data_sample.py b/mmpose/structures/pose_data_sample.py index 2c1d69034e..56d1b4cf1e 100644 --- a/mmpose/structures/pose_data_sample.py +++ b/mmpose/structures/pose_data_sample.py @@ -1,104 +1,104 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import Union - -from mmengine.structures import BaseDataElement, InstanceData, PixelData - -from mmpose.structures import MultilevelPixelData - - -class PoseDataSample(BaseDataElement): - """The base data structure of MMPose that is used as the interface between - modules. - - The attributes of ``PoseDataSample`` includes: - - - ``gt_instances``(InstanceData): Ground truth of instances with - keypoint annotations - - ``pred_instances``(InstanceData): Instances with keypoint - predictions - - ``gt_fields``(PixelData): Ground truth of spatial distribution - annotations like keypoint heatmaps and part affine fields (PAF) - - ``pred_fields``(PixelData): Predictions of spatial distributions - - Examples: - >>> import torch - >>> from mmengine.structures import InstanceData, PixelData - >>> from mmpose.structures import PoseDataSample - - >>> pose_meta = dict(img_shape=(800, 1216), - ... crop_size=(256, 192), - ... heatmap_size=(64, 48)) - >>> gt_instances = InstanceData() - >>> gt_instances.bboxes = torch.rand((1, 4)) - >>> gt_instances.keypoints = torch.rand((1, 17, 2)) - >>> gt_instances.keypoints_visible = torch.rand((1, 17, 1)) - >>> gt_fields = PixelData() - >>> gt_fields.heatmaps = torch.rand((17, 64, 48)) - - >>> data_sample = PoseDataSample(gt_instances=gt_instances, - ... gt_fields=gt_fields, - ... metainfo=pose_meta) - >>> assert 'img_shape' in data_sample - >>> len(data_sample.gt_intances) - 1 - """ - - @property - def gt_instances(self) -> InstanceData: - return self._gt_instances - - @gt_instances.setter - def gt_instances(self, value: InstanceData): - self.set_field(value, '_gt_instances', dtype=InstanceData) - - @gt_instances.deleter - def gt_instances(self): - del self._gt_instances - - @property - def gt_instance_labels(self) -> InstanceData: - return self._gt_instance_labels - - @gt_instance_labels.setter - def gt_instance_labels(self, value: InstanceData): - self.set_field(value, '_gt_instance_labels', dtype=InstanceData) - - @gt_instance_labels.deleter - def gt_instance_labels(self): - del self._gt_instance_labels - - @property - def pred_instances(self) -> InstanceData: - return self._pred_instances - - @pred_instances.setter - def pred_instances(self, value: InstanceData): - self.set_field(value, '_pred_instances', dtype=InstanceData) - - @pred_instances.deleter - def pred_instances(self): - del self._pred_instances - - @property - def gt_fields(self) -> Union[PixelData, MultilevelPixelData]: - return self._gt_fields - - @gt_fields.setter - def gt_fields(self, value: Union[PixelData, MultilevelPixelData]): - self.set_field(value, '_gt_fields', dtype=type(value)) - - @gt_fields.deleter - def gt_fields(self): - del self._gt_fields - - @property - def pred_fields(self) -> PixelData: - return self._pred_heatmaps - - @pred_fields.setter - def pred_fields(self, value: PixelData): - self.set_field(value, '_pred_heatmaps', dtype=PixelData) - - @pred_fields.deleter - def pred_fields(self): - del self._pred_heatmaps +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Union + +from mmengine.structures import BaseDataElement, InstanceData, PixelData + +from mmpose.structures import MultilevelPixelData + + +class PoseDataSample(BaseDataElement): + """The base data structure of MMPose that is used as the interface between + modules. + + The attributes of ``PoseDataSample`` includes: + + - ``gt_instances``(InstanceData): Ground truth of instances with + keypoint annotations + - ``pred_instances``(InstanceData): Instances with keypoint + predictions + - ``gt_fields``(PixelData): Ground truth of spatial distribution + annotations like keypoint heatmaps and part affine fields (PAF) + - ``pred_fields``(PixelData): Predictions of spatial distributions + + Examples: + >>> import torch + >>> from mmengine.structures import InstanceData, PixelData + >>> from mmpose.structures import PoseDataSample + + >>> pose_meta = dict(img_shape=(800, 1216), + ... crop_size=(256, 192), + ... heatmap_size=(64, 48)) + >>> gt_instances = InstanceData() + >>> gt_instances.bboxes = torch.rand((1, 4)) + >>> gt_instances.keypoints = torch.rand((1, 17, 2)) + >>> gt_instances.keypoints_visible = torch.rand((1, 17, 1)) + >>> gt_fields = PixelData() + >>> gt_fields.heatmaps = torch.rand((17, 64, 48)) + + >>> data_sample = PoseDataSample(gt_instances=gt_instances, + ... gt_fields=gt_fields, + ... metainfo=pose_meta) + >>> assert 'img_shape' in data_sample + >>> len(data_sample.gt_intances) + 1 + """ + + @property + def gt_instances(self) -> InstanceData: + return self._gt_instances + + @gt_instances.setter + def gt_instances(self, value: InstanceData): + self.set_field(value, '_gt_instances', dtype=InstanceData) + + @gt_instances.deleter + def gt_instances(self): + del self._gt_instances + + @property + def gt_instance_labels(self) -> InstanceData: + return self._gt_instance_labels + + @gt_instance_labels.setter + def gt_instance_labels(self, value: InstanceData): + self.set_field(value, '_gt_instance_labels', dtype=InstanceData) + + @gt_instance_labels.deleter + def gt_instance_labels(self): + del self._gt_instance_labels + + @property + def pred_instances(self) -> InstanceData: + return self._pred_instances + + @pred_instances.setter + def pred_instances(self, value: InstanceData): + self.set_field(value, '_pred_instances', dtype=InstanceData) + + @pred_instances.deleter + def pred_instances(self): + del self._pred_instances + + @property + def gt_fields(self) -> Union[PixelData, MultilevelPixelData]: + return self._gt_fields + + @gt_fields.setter + def gt_fields(self, value: Union[PixelData, MultilevelPixelData]): + self.set_field(value, '_gt_fields', dtype=type(value)) + + @gt_fields.deleter + def gt_fields(self): + del self._gt_fields + + @property + def pred_fields(self) -> PixelData: + return self._pred_heatmaps + + @pred_fields.setter + def pred_fields(self, value: PixelData): + self.set_field(value, '_pred_heatmaps', dtype=PixelData) + + @pred_fields.deleter + def pred_fields(self): + del self._pred_heatmaps diff --git a/mmpose/structures/utils.py b/mmpose/structures/utils.py index 882cda8603..132b8f6397 100644 --- a/mmpose/structures/utils.py +++ b/mmpose/structures/utils.py @@ -1,138 +1,138 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import warnings -from typing import List - -import cv2 -import numpy as np -import torch -from mmengine.structures import InstanceData, PixelData -from mmengine.utils import is_list_of - -from .bbox.transforms import get_warp_matrix -from .pose_data_sample import PoseDataSample - - -def merge_data_samples(data_samples: List[PoseDataSample]) -> PoseDataSample: - """Merge the given data samples into a single data sample. - - This function can be used to merge the top-down predictions with - bboxes from the same image. The merged data sample will contain all - instances from the input data samples, and the identical metainfo with - the first input data sample. - - Args: - data_samples (List[:obj:`PoseDataSample`]): The data samples to - merge - - Returns: - PoseDataSample: The merged data sample. - """ - - if not is_list_of(data_samples, PoseDataSample): - raise ValueError('Invalid input type, should be a list of ' - ':obj:`PoseDataSample`') - - if len(data_samples) == 0: - warnings.warn('Try to merge an empty list of data samples.') - return PoseDataSample() - - merged = PoseDataSample(metainfo=data_samples[0].metainfo) - - if 'gt_instances' in data_samples[0]: - merged.gt_instances = InstanceData.cat( - [d.gt_instances for d in data_samples]) - - if 'pred_instances' in data_samples[0]: - merged.pred_instances = InstanceData.cat( - [d.pred_instances for d in data_samples]) - - if 'pred_fields' in data_samples[0] and 'heatmaps' in data_samples[ - 0].pred_fields: - reverted_heatmaps = [ - revert_heatmap(data_sample.pred_fields.heatmaps, - data_sample.gt_instances.bbox_centers, - data_sample.gt_instances.bbox_scales, - data_sample.ori_shape) - for data_sample in data_samples - ] - - merged_heatmaps = np.max(reverted_heatmaps, axis=0) - pred_fields = PixelData() - pred_fields.set_data(dict(heatmaps=merged_heatmaps)) - merged.pred_fields = pred_fields - - if 'gt_fields' in data_samples[0] and 'heatmaps' in data_samples[ - 0].gt_fields: - reverted_heatmaps = [ - revert_heatmap(data_sample.gt_fields.heatmaps, - data_sample.gt_instances.bbox_centers, - data_sample.gt_instances.bbox_scales, - data_sample.ori_shape) - for data_sample in data_samples - ] - - merged_heatmaps = np.max(reverted_heatmaps, axis=0) - gt_fields = PixelData() - gt_fields.set_data(dict(heatmaps=merged_heatmaps)) - merged.gt_fields = gt_fields - - return merged - - -def revert_heatmap(heatmap, bbox_center, bbox_scale, img_shape): - """Revert predicted heatmap on the original image. - - Args: - heatmap (np.ndarray or torch.tensor): predicted heatmap. - bbox_center (np.ndarray): bounding box center coordinate. - bbox_scale (np.ndarray): bounding box scale. - img_shape (tuple or list): size of original image. - """ - if torch.is_tensor(heatmap): - heatmap = heatmap.cpu().detach().numpy() - - ndim = heatmap.ndim - # [K, H, W] -> [H, W, K] - if ndim == 3: - heatmap = heatmap.transpose(1, 2, 0) - - hm_h, hm_w = heatmap.shape[:2] - img_h, img_w = img_shape - warp_mat = get_warp_matrix( - bbox_center.reshape((2, )), - bbox_scale.reshape((2, )), - rot=0, - output_size=(hm_w, hm_h), - inv=True) - - heatmap = cv2.warpAffine( - heatmap, warp_mat, (img_w, img_h), flags=cv2.INTER_LINEAR) - - # [H, W, K] -> [K, H, W] - if ndim == 3: - heatmap = heatmap.transpose(2, 0, 1) - - return heatmap - - -def split_instances(instances: InstanceData) -> List[InstanceData]: - """Convert instances into a list where each element is a dict that contains - information about one instance.""" - results = [] - - # return an empty list if there is no instance detected by the model - if instances is None: - return results - - for i in range(len(instances.keypoints)): - result = dict( - keypoints=instances.keypoints[i].tolist(), - keypoint_scores=instances.keypoint_scores[i].tolist(), - ) - if 'bboxes' in instances: - result['bbox'] = instances.bboxes[i].tolist(), - if 'bbox_scores' in instances: - result['bbox_score'] = instances.bbox_scores[i] - results.append(result) - - return results +# Copyright (c) OpenMMLab. All rights reserved. +import warnings +from typing import List + +import cv2 +import numpy as np +import torch +from mmengine.structures import InstanceData, PixelData +from mmengine.utils import is_list_of + +from .bbox.transforms import get_warp_matrix +from .pose_data_sample import PoseDataSample + + +def merge_data_samples(data_samples: List[PoseDataSample]) -> PoseDataSample: + """Merge the given data samples into a single data sample. + + This function can be used to merge the top-down predictions with + bboxes from the same image. The merged data sample will contain all + instances from the input data samples, and the identical metainfo with + the first input data sample. + + Args: + data_samples (List[:obj:`PoseDataSample`]): The data samples to + merge + + Returns: + PoseDataSample: The merged data sample. + """ + + if not is_list_of(data_samples, PoseDataSample): + raise ValueError('Invalid input type, should be a list of ' + ':obj:`PoseDataSample`') + + if len(data_samples) == 0: + warnings.warn('Try to merge an empty list of data samples.') + return PoseDataSample() + + merged = PoseDataSample(metainfo=data_samples[0].metainfo) + + if 'gt_instances' in data_samples[0]: + merged.gt_instances = InstanceData.cat( + [d.gt_instances for d in data_samples]) + + if 'pred_instances' in data_samples[0]: + merged.pred_instances = InstanceData.cat( + [d.pred_instances for d in data_samples]) + + if 'pred_fields' in data_samples[0] and 'heatmaps' in data_samples[ + 0].pred_fields: + reverted_heatmaps = [ + revert_heatmap(data_sample.pred_fields.heatmaps, + data_sample.gt_instances.bbox_centers, + data_sample.gt_instances.bbox_scales, + data_sample.ori_shape) + for data_sample in data_samples + ] + + merged_heatmaps = np.max(reverted_heatmaps, axis=0) + pred_fields = PixelData() + pred_fields.set_data(dict(heatmaps=merged_heatmaps)) + merged.pred_fields = pred_fields + + if 'gt_fields' in data_samples[0] and 'heatmaps' in data_samples[ + 0].gt_fields: + reverted_heatmaps = [ + revert_heatmap(data_sample.gt_fields.heatmaps, + data_sample.gt_instances.bbox_centers, + data_sample.gt_instances.bbox_scales, + data_sample.ori_shape) + for data_sample in data_samples + ] + + merged_heatmaps = np.max(reverted_heatmaps, axis=0) + gt_fields = PixelData() + gt_fields.set_data(dict(heatmaps=merged_heatmaps)) + merged.gt_fields = gt_fields + + return merged + + +def revert_heatmap(heatmap, bbox_center, bbox_scale, img_shape): + """Revert predicted heatmap on the original image. + + Args: + heatmap (np.ndarray or torch.tensor): predicted heatmap. + bbox_center (np.ndarray): bounding box center coordinate. + bbox_scale (np.ndarray): bounding box scale. + img_shape (tuple or list): size of original image. + """ + if torch.is_tensor(heatmap): + heatmap = heatmap.cpu().detach().numpy() + + ndim = heatmap.ndim + # [K, H, W] -> [H, W, K] + if ndim == 3: + heatmap = heatmap.transpose(1, 2, 0) + + hm_h, hm_w = heatmap.shape[:2] + img_h, img_w = img_shape + warp_mat = get_warp_matrix( + bbox_center.reshape((2, )), + bbox_scale.reshape((2, )), + rot=0, + output_size=(hm_w, hm_h), + inv=True) + + heatmap = cv2.warpAffine( + heatmap, warp_mat, (img_w, img_h), flags=cv2.INTER_LINEAR) + + # [H, W, K] -> [K, H, W] + if ndim == 3: + heatmap = heatmap.transpose(2, 0, 1) + + return heatmap + + +def split_instances(instances: InstanceData) -> List[InstanceData]: + """Convert instances into a list where each element is a dict that contains + information about one instance.""" + results = [] + + # return an empty list if there is no instance detected by the model + if instances is None: + return results + + for i in range(len(instances.keypoints)): + result = dict( + keypoints=instances.keypoints[i].tolist(), + keypoint_scores=instances.keypoint_scores[i].tolist(), + ) + if 'bboxes' in instances: + result['bbox'] = instances.bboxes[i].tolist(), + if 'bbox_scores' in instances: + result['bbox_score'] = instances.bbox_scores[i] + results.append(result) + + return results diff --git a/mmpose/testing/__init__.py b/mmpose/testing/__init__.py index 5612dac6c6..de4f28e6fc 100644 --- a/mmpose/testing/__init__.py +++ b/mmpose/testing/__init__.py @@ -1,8 +1,8 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from ._utils import (get_coco_sample, get_config_file, get_packed_inputs, - get_pose_estimator_cfg, get_repo_dir) - -__all__ = [ - 'get_packed_inputs', 'get_coco_sample', 'get_config_file', - 'get_pose_estimator_cfg', 'get_repo_dir' -] +# Copyright (c) OpenMMLab. All rights reserved. +from ._utils import (get_coco_sample, get_config_file, get_packed_inputs, + get_pose_estimator_cfg, get_repo_dir) + +__all__ = [ + 'get_packed_inputs', 'get_coco_sample', 'get_config_file', + 'get_pose_estimator_cfg', 'get_repo_dir' +] diff --git a/mmpose/testing/_utils.py b/mmpose/testing/_utils.py index 1908129be8..5b0a5c5a31 100644 --- a/mmpose/testing/_utils.py +++ b/mmpose/testing/_utils.py @@ -1,248 +1,248 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os.path as osp -from copy import deepcopy -from typing import Optional - -import numpy as np -import torch -from mmengine.config import Config -from mmengine.dataset import pseudo_collate -from mmengine.structures import InstanceData, PixelData - -from mmpose.structures import MultilevelPixelData, PoseDataSample -from mmpose.structures.bbox import bbox_xyxy2cs - - -def get_coco_sample( - img_shape=(240, 320), - img_fill: Optional[int] = None, - num_instances=1, - with_bbox_cs=True, - with_img_mask=False, - random_keypoints_visible=False, - non_occlusion=False): - """Create a dummy data sample in COCO style.""" - rng = np.random.RandomState(0) - h, w = img_shape - if img_fill is None: - img = np.random.randint(0, 256, (h, w, 3), dtype=np.uint8) - else: - img = np.full((h, w, 3), img_fill, dtype=np.uint8) - - if non_occlusion: - bbox = _rand_bboxes(rng, num_instances, w / num_instances, h) - for i in range(num_instances): - bbox[i, 0::2] += w / num_instances * i - else: - bbox = _rand_bboxes(rng, num_instances, w, h) - - keypoints = _rand_keypoints(rng, bbox, 17) - if random_keypoints_visible: - keypoints_visible = np.random.randint(0, 2, (num_instances, - 17)).astype(np.float32) - else: - keypoints_visible = np.full((num_instances, 17), 1, dtype=np.float32) - - upper_body_ids = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] - lower_body_ids = [11, 12, 13, 14, 15, 16] - flip_pairs = [[2, 1], [1, 2], [4, 3], [3, 4], [6, 5], [5, 6], [8, 7], - [7, 8], [10, 9], [9, 10], [12, 11], [11, 12], [14, 13], - [13, 14], [16, 15], [15, 16]] - flip_indices = [0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15] - dataset_keypoint_weights = np.array([ - 1., 1., 1., 1., 1., 1., 1., 1.2, 1.2, 1.5, 1.5, 1., 1., 1.2, 1.2, 1.5, - 1.5 - ]).astype(np.float32) - - data = { - 'img': img, - 'img_shape': img_shape, - 'ori_shape': img_shape, - 'bbox': bbox, - 'keypoints': keypoints, - 'keypoints_visible': keypoints_visible, - 'upper_body_ids': upper_body_ids, - 'lower_body_ids': lower_body_ids, - 'flip_pairs': flip_pairs, - 'flip_indices': flip_indices, - 'dataset_keypoint_weights': dataset_keypoint_weights, - 'invalid_segs': [], - } - - if with_bbox_cs: - data['bbox_center'], data['bbox_scale'] = bbox_xyxy2cs(data['bbox']) - - if with_img_mask: - data['img_mask'] = np.random.randint(0, 2, (h, w), dtype=np.uint8) - - return data - - -def get_packed_inputs(batch_size=2, - num_instances=1, - num_keypoints=17, - num_levels=1, - img_shape=(256, 192), - input_size=(192, 256), - heatmap_size=(48, 64), - simcc_split_ratio=2.0, - with_heatmap=True, - with_reg_label=True, - with_simcc_label=True): - """Create a dummy batch of model inputs and data samples.""" - rng = np.random.RandomState(0) - - inputs_list = [] - for idx in range(batch_size): - inputs = dict() - - # input - h, w = img_shape - image = rng.randint(0, 255, size=(3, h, w), dtype=np.uint8) - inputs['inputs'] = torch.from_numpy(image) - - # meta - img_meta = { - 'id': idx, - 'img_id': idx, - 'img_path': '.png', - 'img_shape': img_shape, - 'input_size': input_size, - 'flip': False, - 'flip_direction': None, - 'flip_indices': list(range(num_keypoints)) - } - - np.random.shuffle(img_meta['flip_indices']) - data_sample = PoseDataSample(metainfo=img_meta) - - # gt_instance - gt_instances = InstanceData() - gt_instance_labels = InstanceData() - bboxes = _rand_bboxes(rng, num_instances, w, h) - bbox_centers, bbox_scales = bbox_xyxy2cs(bboxes) - - keypoints = _rand_keypoints(rng, bboxes, num_keypoints) - keypoints_visible = np.ones((num_instances, num_keypoints), - dtype=np.float32) - - # [N, K] -> [N, num_levels, K] - # keep the first dimension as the num_instances - if num_levels > 1: - keypoint_weights = np.tile(keypoints_visible[:, None], - (1, num_levels, 1)) - else: - keypoint_weights = keypoints_visible.copy() - - gt_instances.bboxes = bboxes - gt_instances.bbox_centers = bbox_centers - gt_instances.bbox_scales = bbox_scales - gt_instances.bbox_scores = np.ones((num_instances, ), dtype=np.float32) - gt_instances.keypoints = keypoints - gt_instances.keypoints_visible = keypoints_visible - - gt_instance_labels.keypoint_weights = torch.FloatTensor( - keypoint_weights) - - if with_reg_label: - gt_instance_labels.keypoint_labels = torch.FloatTensor(keypoints / - input_size) - - if with_simcc_label: - len_x = np.around(input_size[0] * simcc_split_ratio) - len_y = np.around(input_size[1] * simcc_split_ratio) - gt_instance_labels.keypoint_x_labels = torch.FloatTensor( - _rand_simcc_label(rng, num_instances, num_keypoints, len_x)) - gt_instance_labels.keypoint_y_labels = torch.FloatTensor( - _rand_simcc_label(rng, num_instances, num_keypoints, len_y)) - - # gt_fields - if with_heatmap: - if num_levels == 1: - gt_fields = PixelData() - # generate single-level heatmaps - W, H = heatmap_size - heatmaps = rng.rand(num_keypoints, H, W) - gt_fields.heatmaps = torch.FloatTensor(heatmaps) - else: - # generate multilevel heatmaps - heatmaps = [] - for _ in range(num_levels): - W, H = heatmap_size - heatmaps_ = rng.rand(num_keypoints, H, W) - heatmaps.append(torch.FloatTensor(heatmaps_)) - # [num_levels*K, H, W] - gt_fields = MultilevelPixelData() - gt_fields.heatmaps = heatmaps - data_sample.gt_fields = gt_fields - - data_sample.gt_instances = gt_instances - data_sample.gt_instance_labels = gt_instance_labels - - inputs['data_samples'] = data_sample - inputs_list.append(inputs) - - packed_inputs = pseudo_collate(inputs_list) - return packed_inputs - - -def _rand_keypoints(rng, bboxes, num_keypoints): - n = bboxes.shape[0] - relative_pos = rng.rand(n, num_keypoints, 2) - keypoints = relative_pos * bboxes[:, None, :2] + ( - 1 - relative_pos) * bboxes[:, None, 2:4] - - return keypoints - - -def _rand_simcc_label(rng, num_instances, num_keypoints, len_feats): - simcc_label = rng.rand(num_instances, num_keypoints, int(len_feats)) - return simcc_label - - -def _rand_bboxes(rng, num_instances, img_w, img_h): - cx, cy = rng.rand(num_instances, 2).T - bw, bh = 0.2 + 0.8 * rng.rand(num_instances, 2).T - - tl_x = ((cx * img_w) - (img_w * bw / 2)).clip(0, img_w) - tl_y = ((cy * img_h) - (img_h * bh / 2)).clip(0, img_h) - br_x = ((cx * img_w) + (img_w * bw / 2)).clip(0, img_w) - br_y = ((cy * img_h) + (img_h * bh / 2)).clip(0, img_h) - - bboxes = np.vstack([tl_x, tl_y, br_x, br_y]).T - return bboxes - - -def get_repo_dir(): - """Return the path of the MMPose repo directory.""" - try: - # Assume the function in invoked is the source mmpose repo - repo_dir = osp.dirname(osp.dirname(osp.dirname(__file__))) - except NameError: - # For IPython development when __file__ is not defined - import mmpose - repo_dir = osp.dirname(osp.dirname(mmpose.__file__)) - - return repo_dir - - -def get_config_file(fn: str): - """Return full path of a config file from the given relative path.""" - repo_dir = get_repo_dir() - if fn.startswith('configs'): - fn_config = osp.join(repo_dir, fn) - else: - fn_config = osp.join(repo_dir, 'configs', fn) - - if not osp.isfile(fn_config): - raise FileNotFoundError(f'Cannot find config file {fn_config}') - - return fn_config - - -def get_pose_estimator_cfg(fn: str): - """Load model config from a config file.""" - - fn_config = get_config_file(fn) - config = Config.fromfile(fn_config) - return deepcopy(config.model) +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +from copy import deepcopy +from typing import Optional + +import numpy as np +import torch +from mmengine.config import Config +from mmengine.dataset import pseudo_collate +from mmengine.structures import InstanceData, PixelData + +from mmpose.structures import MultilevelPixelData, PoseDataSample +from mmpose.structures.bbox import bbox_xyxy2cs + + +def get_coco_sample( + img_shape=(240, 320), + img_fill: Optional[int] = None, + num_instances=1, + with_bbox_cs=True, + with_img_mask=False, + random_keypoints_visible=False, + non_occlusion=False): + """Create a dummy data sample in COCO style.""" + rng = np.random.RandomState(0) + h, w = img_shape + if img_fill is None: + img = np.random.randint(0, 256, (h, w, 3), dtype=np.uint8) + else: + img = np.full((h, w, 3), img_fill, dtype=np.uint8) + + if non_occlusion: + bbox = _rand_bboxes(rng, num_instances, w / num_instances, h) + for i in range(num_instances): + bbox[i, 0::2] += w / num_instances * i + else: + bbox = _rand_bboxes(rng, num_instances, w, h) + + keypoints = _rand_keypoints(rng, bbox, 17) + if random_keypoints_visible: + keypoints_visible = np.random.randint(0, 2, (num_instances, + 17)).astype(np.float32) + else: + keypoints_visible = np.full((num_instances, 17), 1, dtype=np.float32) + + upper_body_ids = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + lower_body_ids = [11, 12, 13, 14, 15, 16] + flip_pairs = [[2, 1], [1, 2], [4, 3], [3, 4], [6, 5], [5, 6], [8, 7], + [7, 8], [10, 9], [9, 10], [12, 11], [11, 12], [14, 13], + [13, 14], [16, 15], [15, 16]] + flip_indices = [0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15] + dataset_keypoint_weights = np.array([ + 1., 1., 1., 1., 1., 1., 1., 1.2, 1.2, 1.5, 1.5, 1., 1., 1.2, 1.2, 1.5, + 1.5 + ]).astype(np.float32) + + data = { + 'img': img, + 'img_shape': img_shape, + 'ori_shape': img_shape, + 'bbox': bbox, + 'keypoints': keypoints, + 'keypoints_visible': keypoints_visible, + 'upper_body_ids': upper_body_ids, + 'lower_body_ids': lower_body_ids, + 'flip_pairs': flip_pairs, + 'flip_indices': flip_indices, + 'dataset_keypoint_weights': dataset_keypoint_weights, + 'invalid_segs': [], + } + + if with_bbox_cs: + data['bbox_center'], data['bbox_scale'] = bbox_xyxy2cs(data['bbox']) + + if with_img_mask: + data['img_mask'] = np.random.randint(0, 2, (h, w), dtype=np.uint8) + + return data + + +def get_packed_inputs(batch_size=2, + num_instances=1, + num_keypoints=17, + num_levels=1, + img_shape=(256, 192), + input_size=(192, 256), + heatmap_size=(48, 64), + simcc_split_ratio=2.0, + with_heatmap=True, + with_reg_label=True, + with_simcc_label=True): + """Create a dummy batch of model inputs and data samples.""" + rng = np.random.RandomState(0) + + inputs_list = [] + for idx in range(batch_size): + inputs = dict() + + # input + h, w = img_shape + image = rng.randint(0, 255, size=(3, h, w), dtype=np.uint8) + inputs['inputs'] = torch.from_numpy(image) + + # meta + img_meta = { + 'id': idx, + 'img_id': idx, + 'img_path': '.png', + 'img_shape': img_shape, + 'input_size': input_size, + 'flip': False, + 'flip_direction': None, + 'flip_indices': list(range(num_keypoints)) + } + + np.random.shuffle(img_meta['flip_indices']) + data_sample = PoseDataSample(metainfo=img_meta) + + # gt_instance + gt_instances = InstanceData() + gt_instance_labels = InstanceData() + bboxes = _rand_bboxes(rng, num_instances, w, h) + bbox_centers, bbox_scales = bbox_xyxy2cs(bboxes) + + keypoints = _rand_keypoints(rng, bboxes, num_keypoints) + keypoints_visible = np.ones((num_instances, num_keypoints), + dtype=np.float32) + + # [N, K] -> [N, num_levels, K] + # keep the first dimension as the num_instances + if num_levels > 1: + keypoint_weights = np.tile(keypoints_visible[:, None], + (1, num_levels, 1)) + else: + keypoint_weights = keypoints_visible.copy() + + gt_instances.bboxes = bboxes + gt_instances.bbox_centers = bbox_centers + gt_instances.bbox_scales = bbox_scales + gt_instances.bbox_scores = np.ones((num_instances, ), dtype=np.float32) + gt_instances.keypoints = keypoints + gt_instances.keypoints_visible = keypoints_visible + + gt_instance_labels.keypoint_weights = torch.FloatTensor( + keypoint_weights) + + if with_reg_label: + gt_instance_labels.keypoint_labels = torch.FloatTensor(keypoints / + input_size) + + if with_simcc_label: + len_x = np.around(input_size[0] * simcc_split_ratio) + len_y = np.around(input_size[1] * simcc_split_ratio) + gt_instance_labels.keypoint_x_labels = torch.FloatTensor( + _rand_simcc_label(rng, num_instances, num_keypoints, len_x)) + gt_instance_labels.keypoint_y_labels = torch.FloatTensor( + _rand_simcc_label(rng, num_instances, num_keypoints, len_y)) + + # gt_fields + if with_heatmap: + if num_levels == 1: + gt_fields = PixelData() + # generate single-level heatmaps + W, H = heatmap_size + heatmaps = rng.rand(num_keypoints, H, W) + gt_fields.heatmaps = torch.FloatTensor(heatmaps) + else: + # generate multilevel heatmaps + heatmaps = [] + for _ in range(num_levels): + W, H = heatmap_size + heatmaps_ = rng.rand(num_keypoints, H, W) + heatmaps.append(torch.FloatTensor(heatmaps_)) + # [num_levels*K, H, W] + gt_fields = MultilevelPixelData() + gt_fields.heatmaps = heatmaps + data_sample.gt_fields = gt_fields + + data_sample.gt_instances = gt_instances + data_sample.gt_instance_labels = gt_instance_labels + + inputs['data_samples'] = data_sample + inputs_list.append(inputs) + + packed_inputs = pseudo_collate(inputs_list) + return packed_inputs + + +def _rand_keypoints(rng, bboxes, num_keypoints): + n = bboxes.shape[0] + relative_pos = rng.rand(n, num_keypoints, 2) + keypoints = relative_pos * bboxes[:, None, :2] + ( + 1 - relative_pos) * bboxes[:, None, 2:4] + + return keypoints + + +def _rand_simcc_label(rng, num_instances, num_keypoints, len_feats): + simcc_label = rng.rand(num_instances, num_keypoints, int(len_feats)) + return simcc_label + + +def _rand_bboxes(rng, num_instances, img_w, img_h): + cx, cy = rng.rand(num_instances, 2).T + bw, bh = 0.2 + 0.8 * rng.rand(num_instances, 2).T + + tl_x = ((cx * img_w) - (img_w * bw / 2)).clip(0, img_w) + tl_y = ((cy * img_h) - (img_h * bh / 2)).clip(0, img_h) + br_x = ((cx * img_w) + (img_w * bw / 2)).clip(0, img_w) + br_y = ((cy * img_h) + (img_h * bh / 2)).clip(0, img_h) + + bboxes = np.vstack([tl_x, tl_y, br_x, br_y]).T + return bboxes + + +def get_repo_dir(): + """Return the path of the MMPose repo directory.""" + try: + # Assume the function in invoked is the source mmpose repo + repo_dir = osp.dirname(osp.dirname(osp.dirname(__file__))) + except NameError: + # For IPython development when __file__ is not defined + import mmpose + repo_dir = osp.dirname(osp.dirname(mmpose.__file__)) + + return repo_dir + + +def get_config_file(fn: str): + """Return full path of a config file from the given relative path.""" + repo_dir = get_repo_dir() + if fn.startswith('configs'): + fn_config = osp.join(repo_dir, fn) + else: + fn_config = osp.join(repo_dir, 'configs', fn) + + if not osp.isfile(fn_config): + raise FileNotFoundError(f'Cannot find config file {fn_config}') + + return fn_config + + +def get_pose_estimator_cfg(fn: str): + """Load model config from a config file.""" + + fn_config = get_config_file(fn) + config = Config.fromfile(fn_config) + return deepcopy(config.model) diff --git a/mmpose/utils/__init__.py b/mmpose/utils/__init__.py index c48ca01cea..09966bd606 100644 --- a/mmpose/utils/__init__.py +++ b/mmpose/utils/__init__.py @@ -1,13 +1,13 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .camera import SimpleCamera, SimpleCameraTorch -from .collect_env import collect_env -from .config_utils import adapt_mmdet_pipeline -from .logger import get_root_logger -from .setup_env import register_all_modules, setup_multi_processes -from .timer import StopWatch - -__all__ = [ - 'get_root_logger', 'collect_env', 'StopWatch', 'setup_multi_processes', - 'register_all_modules', 'SimpleCamera', 'SimpleCameraTorch', - 'adapt_mmdet_pipeline' -] +# Copyright (c) OpenMMLab. All rights reserved. +from .camera import SimpleCamera, SimpleCameraTorch +from .collect_env import collect_env +from .config_utils import adapt_mmdet_pipeline +from .logger import get_root_logger +from .setup_env import register_all_modules, setup_multi_processes +from .timer import StopWatch + +__all__ = [ + 'get_root_logger', 'collect_env', 'StopWatch', 'setup_multi_processes', + 'register_all_modules', 'SimpleCamera', 'SimpleCameraTorch', + 'adapt_mmdet_pipeline' +] diff --git a/mmpose/utils/camera.py b/mmpose/utils/camera.py index a7759d308f..795789261b 100644 --- a/mmpose/utils/camera.py +++ b/mmpose/utils/camera.py @@ -1,280 +1,280 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from abc import ABCMeta, abstractmethod - -import numpy as np -import torch -from mmengine.registry import Registry - -CAMERAS = Registry('camera') - - -class SingleCameraBase(metaclass=ABCMeta): - """Base class for single camera model. - - Args: - param (dict): Camera parameters - - Methods: - world_to_camera: Project points from world coordinates to camera - coordinates - camera_to_world: Project points from camera coordinates to world - coordinates - camera_to_pixel: Project points from camera coordinates to pixel - coordinates - world_to_pixel: Project points from world coordinates to pixel - coordinates - """ - - @abstractmethod - def __init__(self, param): - """Load camera parameters and check validity.""" - - def world_to_camera(self, X): - """Project points from world coordinates to camera coordinates.""" - raise NotImplementedError - - def camera_to_world(self, X): - """Project points from camera coordinates to world coordinates.""" - raise NotImplementedError - - def camera_to_pixel(self, X): - """Project points from camera coordinates to pixel coordinates.""" - raise NotImplementedError - - def world_to_pixel(self, X): - """Project points from world coordinates to pixel coordinates.""" - _X = self.world_to_camera(X) - return self.camera_to_pixel(_X) - - -@CAMERAS.register_module() -class SimpleCamera(SingleCameraBase): - """Camera model to calculate coordinate transformation with given - intrinsic/extrinsic camera parameters. - - Note: - The keypoint coordinate should be an np.ndarray with a shape of - [...,J, C] where J is the keypoint number of an instance, and C is - the coordinate dimension. For example: - - [J, C]: shape of joint coordinates of a person with J joints. - [N, J, C]: shape of a batch of person joint coordinates. - [N, T, J, C]: shape of a batch of pose sequences. - - Args: - param (dict): camera parameters including: - - R: 3x3, camera rotation matrix (camera-to-world) - - T: 3x1, camera translation (camera-to-world) - - K: (optional) 2x3, camera intrinsic matrix - - k: (optional) nx1, camera radial distortion coefficients - - p: (optional) mx1, camera tangential distortion coefficients - - f: (optional) 2x1, camera focal length - - c: (optional) 2x1, camera center - if K is not provided, it will be calculated from f and c. - - Methods: - world_to_camera: Project points from world coordinates to camera - coordinates - camera_to_pixel: Project points from camera coordinates to pixel - coordinates - world_to_pixel: Project points from world coordinates to pixel - coordinates - """ - - def __init__(self, param): - - self.param = {} - # extrinsic param - R = np.array(param['R'], dtype=np.float32) - T = np.array(param['T'], dtype=np.float32) - assert R.shape == (3, 3) - assert T.shape == (3, 1) - # The camera matrices are transposed in advance because the joint - # coordinates are stored as row vectors. - self.param['R_c2w'] = R.T - self.param['T_c2w'] = T.T - self.param['R_w2c'] = R - self.param['T_w2c'] = -self.param['T_c2w'] @ self.param['R_w2c'] - - # intrinsic param - if 'K' in param: - K = np.array(param['K'], dtype=np.float32) - assert K.shape == (2, 3) - self.param['K'] = K.T - self.param['f'] = np.array([K[0, 0], K[1, 1]])[:, np.newaxis] - self.param['c'] = np.array([K[0, 2], K[1, 2]])[:, np.newaxis] - elif 'f' in param and 'c' in param: - f = np.array(param['f'], dtype=np.float32) - c = np.array(param['c'], dtype=np.float32) - assert f.shape == (2, 1) - assert c.shape == (2, 1) - self.param['K'] = np.concatenate((np.diagflat(f), c), axis=-1).T - self.param['f'] = f - self.param['c'] = c - else: - raise ValueError('Camera intrinsic parameters are missing. ' - 'Either "K" or "f"&"c" should be provided.') - - # distortion param - if 'k' in param and 'p' in param: - self.undistortion = True - self.param['k'] = np.array(param['k'], dtype=np.float32).flatten() - self.param['p'] = np.array(param['p'], dtype=np.float32).flatten() - assert self.param['k'].size in {3, 6} - assert self.param['p'].size == 2 - else: - self.undistortion = False - - def world_to_camera(self, X): - assert isinstance(X, np.ndarray) - assert X.ndim >= 2 and X.shape[-1] == 3 - return X @ self.param['R_w2c'] + self.param['T_w2c'] - - def camera_to_world(self, X): - assert isinstance(X, np.ndarray) - assert X.ndim >= 2 and X.shape[-1] == 3 - return X @ self.param['R_c2w'] + self.param['T_c2w'] - - def camera_to_pixel(self, X): - assert isinstance(X, np.ndarray) - assert X.ndim >= 2 and X.shape[-1] == 3 - - _X = X / X[..., 2:] - - if self.undistortion: - k = self.param['k'] - p = self.param['p'] - _X_2d = _X[..., :2] - r2 = (_X_2d**2).sum(-1) - radial = 1 + sum(ki * r2**(i + 1) for i, ki in enumerate(k[:3])) - if k.size == 6: - radial /= 1 + sum( - (ki * r2**(i + 1) for i, ki in enumerate(k[3:]))) - - tangential = 2 * (p[1] * _X[..., 0] + p[0] * _X[..., 1]) - - _X[..., :2] = _X_2d * (radial + tangential)[..., None] + np.outer( - r2, p[::-1]).reshape(_X_2d.shape) - return _X @ self.param['K'] - - def pixel_to_camera(self, X): - assert isinstance(X, np.ndarray) - assert X.ndim >= 2 and X.shape[-1] == 3 - _X = X.copy() - _X[:, :2] = (X[:, :2] - self.param['c'].T) / self.param['f'].T * X[:, - [2]] - return _X - - -@CAMERAS.register_module() -class SimpleCameraTorch(SingleCameraBase): - """Camera model to calculate coordinate transformation with given - intrinsic/extrinsic camera parameters. - - Notes: - The keypoint coordinate should be an np.ndarray with a shape of - [...,J, C] where J is the keypoint number of an instance, and C is - the coordinate dimension. For example: - - [J, C]: shape of joint coordinates of a person with J joints. - [N, J, C]: shape of a batch of person joint coordinates. - [N, T, J, C]: shape of a batch of pose sequences. - - Args: - param (dict): camera parameters including: - - R: 3x3, camera rotation matrix (camera-to-world) - - T: 3x1, camera translation (camera-to-world) - - K: (optional) 2x3, camera intrinsic matrix - - k: (optional) nx1, camera radial distortion coefficients - - p: (optional) mx1, camera tangential distortion coefficients - - f: (optional) 2x1, camera focal length - - c: (optional) 2x1, camera center - if K is not provided, it will be calculated from f and c. - - Methods: - world_to_camera: Project points from world coordinates to camera - coordinates - camera_to_pixel: Project points from camera coordinates to pixel - coordinates - world_to_pixel: Project points from world coordinates to pixel - coordinates - """ - - def __init__(self, param, device): - - self.param = {} - # extrinsic param - R = torch.tensor(param['R'], device=device) - T = torch.tensor(param['T'], device=device) - - assert R.shape == (3, 3) - assert T.shape == (3, 1) - # The camera matrices are transposed in advance because the joint - # coordinates are stored as row vectors. - self.param['R_c2w'] = R.T - self.param['T_c2w'] = T.T - self.param['R_w2c'] = R - self.param['T_w2c'] = -self.param['T_c2w'] @ self.param['R_w2c'] - - # intrinsic param - if 'K' in param: - K = torch.tensor(param['K'], device=device) - assert K.shape == (2, 3) - self.param['K'] = K.T - self.param['f'] = torch.tensor([[K[0, 0]], [K[1, 1]]], - device=device) - self.param['c'] = torch.tensor([[K[0, 2]], [K[1, 2]]], - device=device) - elif 'f' in param and 'c' in param: - f = torch.tensor(param['f'], device=device) - c = torch.tensor(param['c'], device=device) - assert f.shape == (2, 1) - assert c.shape == (2, 1) - self.param['K'] = torch.cat([torch.diagflat(f), c], dim=-1).T - self.param['f'] = f - self.param['c'] = c - else: - raise ValueError('Camera intrinsic parameters are missing. ' - 'Either "K" or "f"&"c" should be provided.') - - # distortion param - if 'k' in param and 'p' in param: - self.undistortion = True - self.param['k'] = torch.tensor(param['k'], device=device).view(-1) - self.param['p'] = torch.tensor(param['p'], device=device).view(-1) - assert len(self.param['k']) in {3, 6} - assert len(self.param['p']) == 2 - else: - self.undistortion = False - - def world_to_camera(self, X): - assert isinstance(X, torch.Tensor) - assert X.ndim >= 2 and X.shape[-1] == 3 - return X @ self.param['R_w2c'] + self.param['T_w2c'] - - def camera_to_world(self, X): - assert isinstance(X, torch.Tensor) - assert X.ndim >= 2 and X.shape[-1] == 3 - return X @ self.param['R_c2w'] + self.param['T_c2w'] - - def camera_to_pixel(self, X): - assert isinstance(X, torch.Tensor) - assert X.ndim >= 2 and X.shape[-1] == 3 - - _X = X / X[..., 2:] - - if self.undistortion: - k = self.param['k'] - p = self.param['p'] - _X_2d = _X[..., :2] - r2 = (_X_2d**2).sum(-1) - radial = 1 + sum(ki * r2**(i + 1) for i, ki in enumerate(k[:3])) - if k.size == 6: - radial /= 1 + sum( - (ki * r2**(i + 1) for i, ki in enumerate(k[3:]))) - - tangential = 2 * (p[1] * _X[..., 0] + p[0] * _X[..., 1]) - - _X[..., :2] = _X_2d * (radial + tangential)[..., None] + torch.ger( - r2, p.flip([0])).reshape(_X_2d.shape) - return _X @ self.param['K'] +# Copyright (c) OpenMMLab. All rights reserved. +from abc import ABCMeta, abstractmethod + +import numpy as np +import torch +from mmengine.registry import Registry + +CAMERAS = Registry('camera') + + +class SingleCameraBase(metaclass=ABCMeta): + """Base class for single camera model. + + Args: + param (dict): Camera parameters + + Methods: + world_to_camera: Project points from world coordinates to camera + coordinates + camera_to_world: Project points from camera coordinates to world + coordinates + camera_to_pixel: Project points from camera coordinates to pixel + coordinates + world_to_pixel: Project points from world coordinates to pixel + coordinates + """ + + @abstractmethod + def __init__(self, param): + """Load camera parameters and check validity.""" + + def world_to_camera(self, X): + """Project points from world coordinates to camera coordinates.""" + raise NotImplementedError + + def camera_to_world(self, X): + """Project points from camera coordinates to world coordinates.""" + raise NotImplementedError + + def camera_to_pixel(self, X): + """Project points from camera coordinates to pixel coordinates.""" + raise NotImplementedError + + def world_to_pixel(self, X): + """Project points from world coordinates to pixel coordinates.""" + _X = self.world_to_camera(X) + return self.camera_to_pixel(_X) + + +@CAMERAS.register_module() +class SimpleCamera(SingleCameraBase): + """Camera model to calculate coordinate transformation with given + intrinsic/extrinsic camera parameters. + + Note: + The keypoint coordinate should be an np.ndarray with a shape of + [...,J, C] where J is the keypoint number of an instance, and C is + the coordinate dimension. For example: + + [J, C]: shape of joint coordinates of a person with J joints. + [N, J, C]: shape of a batch of person joint coordinates. + [N, T, J, C]: shape of a batch of pose sequences. + + Args: + param (dict): camera parameters including: + - R: 3x3, camera rotation matrix (camera-to-world) + - T: 3x1, camera translation (camera-to-world) + - K: (optional) 2x3, camera intrinsic matrix + - k: (optional) nx1, camera radial distortion coefficients + - p: (optional) mx1, camera tangential distortion coefficients + - f: (optional) 2x1, camera focal length + - c: (optional) 2x1, camera center + if K is not provided, it will be calculated from f and c. + + Methods: + world_to_camera: Project points from world coordinates to camera + coordinates + camera_to_pixel: Project points from camera coordinates to pixel + coordinates + world_to_pixel: Project points from world coordinates to pixel + coordinates + """ + + def __init__(self, param): + + self.param = {} + # extrinsic param + R = np.array(param['R'], dtype=np.float32) + T = np.array(param['T'], dtype=np.float32) + assert R.shape == (3, 3) + assert T.shape == (3, 1) + # The camera matrices are transposed in advance because the joint + # coordinates are stored as row vectors. + self.param['R_c2w'] = R.T + self.param['T_c2w'] = T.T + self.param['R_w2c'] = R + self.param['T_w2c'] = -self.param['T_c2w'] @ self.param['R_w2c'] + + # intrinsic param + if 'K' in param: + K = np.array(param['K'], dtype=np.float32) + assert K.shape == (2, 3) + self.param['K'] = K.T + self.param['f'] = np.array([K[0, 0], K[1, 1]])[:, np.newaxis] + self.param['c'] = np.array([K[0, 2], K[1, 2]])[:, np.newaxis] + elif 'f' in param and 'c' in param: + f = np.array(param['f'], dtype=np.float32) + c = np.array(param['c'], dtype=np.float32) + assert f.shape == (2, 1) + assert c.shape == (2, 1) + self.param['K'] = np.concatenate((np.diagflat(f), c), axis=-1).T + self.param['f'] = f + self.param['c'] = c + else: + raise ValueError('Camera intrinsic parameters are missing. ' + 'Either "K" or "f"&"c" should be provided.') + + # distortion param + if 'k' in param and 'p' in param: + self.undistortion = True + self.param['k'] = np.array(param['k'], dtype=np.float32).flatten() + self.param['p'] = np.array(param['p'], dtype=np.float32).flatten() + assert self.param['k'].size in {3, 6} + assert self.param['p'].size == 2 + else: + self.undistortion = False + + def world_to_camera(self, X): + assert isinstance(X, np.ndarray) + assert X.ndim >= 2 and X.shape[-1] == 3 + return X @ self.param['R_w2c'] + self.param['T_w2c'] + + def camera_to_world(self, X): + assert isinstance(X, np.ndarray) + assert X.ndim >= 2 and X.shape[-1] == 3 + return X @ self.param['R_c2w'] + self.param['T_c2w'] + + def camera_to_pixel(self, X): + assert isinstance(X, np.ndarray) + assert X.ndim >= 2 and X.shape[-1] == 3 + + _X = X / X[..., 2:] + + if self.undistortion: + k = self.param['k'] + p = self.param['p'] + _X_2d = _X[..., :2] + r2 = (_X_2d**2).sum(-1) + radial = 1 + sum(ki * r2**(i + 1) for i, ki in enumerate(k[:3])) + if k.size == 6: + radial /= 1 + sum( + (ki * r2**(i + 1) for i, ki in enumerate(k[3:]))) + + tangential = 2 * (p[1] * _X[..., 0] + p[0] * _X[..., 1]) + + _X[..., :2] = _X_2d * (radial + tangential)[..., None] + np.outer( + r2, p[::-1]).reshape(_X_2d.shape) + return _X @ self.param['K'] + + def pixel_to_camera(self, X): + assert isinstance(X, np.ndarray) + assert X.ndim >= 2 and X.shape[-1] == 3 + _X = X.copy() + _X[:, :2] = (X[:, :2] - self.param['c'].T) / self.param['f'].T * X[:, + [2]] + return _X + + +@CAMERAS.register_module() +class SimpleCameraTorch(SingleCameraBase): + """Camera model to calculate coordinate transformation with given + intrinsic/extrinsic camera parameters. + + Notes: + The keypoint coordinate should be an np.ndarray with a shape of + [...,J, C] where J is the keypoint number of an instance, and C is + the coordinate dimension. For example: + + [J, C]: shape of joint coordinates of a person with J joints. + [N, J, C]: shape of a batch of person joint coordinates. + [N, T, J, C]: shape of a batch of pose sequences. + + Args: + param (dict): camera parameters including: + - R: 3x3, camera rotation matrix (camera-to-world) + - T: 3x1, camera translation (camera-to-world) + - K: (optional) 2x3, camera intrinsic matrix + - k: (optional) nx1, camera radial distortion coefficients + - p: (optional) mx1, camera tangential distortion coefficients + - f: (optional) 2x1, camera focal length + - c: (optional) 2x1, camera center + if K is not provided, it will be calculated from f and c. + + Methods: + world_to_camera: Project points from world coordinates to camera + coordinates + camera_to_pixel: Project points from camera coordinates to pixel + coordinates + world_to_pixel: Project points from world coordinates to pixel + coordinates + """ + + def __init__(self, param, device): + + self.param = {} + # extrinsic param + R = torch.tensor(param['R'], device=device) + T = torch.tensor(param['T'], device=device) + + assert R.shape == (3, 3) + assert T.shape == (3, 1) + # The camera matrices are transposed in advance because the joint + # coordinates are stored as row vectors. + self.param['R_c2w'] = R.T + self.param['T_c2w'] = T.T + self.param['R_w2c'] = R + self.param['T_w2c'] = -self.param['T_c2w'] @ self.param['R_w2c'] + + # intrinsic param + if 'K' in param: + K = torch.tensor(param['K'], device=device) + assert K.shape == (2, 3) + self.param['K'] = K.T + self.param['f'] = torch.tensor([[K[0, 0]], [K[1, 1]]], + device=device) + self.param['c'] = torch.tensor([[K[0, 2]], [K[1, 2]]], + device=device) + elif 'f' in param and 'c' in param: + f = torch.tensor(param['f'], device=device) + c = torch.tensor(param['c'], device=device) + assert f.shape == (2, 1) + assert c.shape == (2, 1) + self.param['K'] = torch.cat([torch.diagflat(f), c], dim=-1).T + self.param['f'] = f + self.param['c'] = c + else: + raise ValueError('Camera intrinsic parameters are missing. ' + 'Either "K" or "f"&"c" should be provided.') + + # distortion param + if 'k' in param and 'p' in param: + self.undistortion = True + self.param['k'] = torch.tensor(param['k'], device=device).view(-1) + self.param['p'] = torch.tensor(param['p'], device=device).view(-1) + assert len(self.param['k']) in {3, 6} + assert len(self.param['p']) == 2 + else: + self.undistortion = False + + def world_to_camera(self, X): + assert isinstance(X, torch.Tensor) + assert X.ndim >= 2 and X.shape[-1] == 3 + return X @ self.param['R_w2c'] + self.param['T_w2c'] + + def camera_to_world(self, X): + assert isinstance(X, torch.Tensor) + assert X.ndim >= 2 and X.shape[-1] == 3 + return X @ self.param['R_c2w'] + self.param['T_c2w'] + + def camera_to_pixel(self, X): + assert isinstance(X, torch.Tensor) + assert X.ndim >= 2 and X.shape[-1] == 3 + + _X = X / X[..., 2:] + + if self.undistortion: + k = self.param['k'] + p = self.param['p'] + _X_2d = _X[..., :2] + r2 = (_X_2d**2).sum(-1) + radial = 1 + sum(ki * r2**(i + 1) for i, ki in enumerate(k[:3])) + if k.size == 6: + radial /= 1 + sum( + (ki * r2**(i + 1) for i, ki in enumerate(k[3:]))) + + tangential = 2 * (p[1] * _X[..., 0] + p[0] * _X[..., 1]) + + _X[..., :2] = _X_2d * (radial + tangential)[..., None] + torch.ger( + r2, p.flip([0])).reshape(_X_2d.shape) + return _X @ self.param['K'] diff --git a/mmpose/utils/collect_env.py b/mmpose/utils/collect_env.py index e8fb5f35e1..e60c686172 100644 --- a/mmpose/utils/collect_env.py +++ b/mmpose/utils/collect_env.py @@ -1,16 +1,16 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from mmengine.utils import get_git_hash -from mmengine.utils.dl_utils import collect_env as collect_base_env - -import mmpose - - -def collect_env(): - env_info = collect_base_env() - env_info['MMPose'] = (mmpose.__version__ + '+' + get_git_hash(digits=7)) - return env_info - - -if __name__ == '__main__': - for name, val in collect_env().items(): - print(f'{name}: {val}') +# Copyright (c) OpenMMLab. All rights reserved. +from mmengine.utils import get_git_hash +from mmengine.utils.dl_utils import collect_env as collect_base_env + +import mmpose + + +def collect_env(): + env_info = collect_base_env() + env_info['MMPose'] = (mmpose.__version__ + '+' + get_git_hash(digits=7)) + return env_info + + +if __name__ == '__main__': + for name, val in collect_env().items(): + print(f'{name}: {val}') diff --git a/mmpose/utils/config_utils.py b/mmpose/utils/config_utils.py index 2f54d2ef24..62f618e4ff 100644 --- a/mmpose/utils/config_utils.py +++ b/mmpose/utils/config_utils.py @@ -1,26 +1,26 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from mmpose.utils.typing import ConfigDict - - -def adapt_mmdet_pipeline(cfg: ConfigDict) -> ConfigDict: - """Converts pipeline types in MMDetection's test dataloader to use the - 'mmdet' namespace. - - Args: - cfg (ConfigDict): Configuration dictionary for MMDetection. - - Returns: - ConfigDict: Configuration dictionary with updated pipeline types. - """ - # use lazy import to avoid hard dependence on mmdet - from mmdet.datasets import transforms - - if 'test_dataloader' not in cfg: - return cfg - - pipeline = cfg.test_dataloader.dataset.pipeline - for trans in pipeline: - if trans['type'] in dir(transforms): - trans['type'] = 'mmdet.' + trans['type'] - - return cfg +# Copyright (c) OpenMMLab. All rights reserved. +from mmpose.utils.typing import ConfigDict + + +def adapt_mmdet_pipeline(cfg: ConfigDict) -> ConfigDict: + """Converts pipeline types in MMDetection's test dataloader to use the + 'mmdet' namespace. + + Args: + cfg (ConfigDict): Configuration dictionary for MMDetection. + + Returns: + ConfigDict: Configuration dictionary with updated pipeline types. + """ + # use lazy import to avoid hard dependence on mmdet + from mmdet.datasets import transforms + + if 'test_dataloader' not in cfg: + return cfg + + pipeline = cfg.test_dataloader.dataset.pipeline + for trans in pipeline: + if trans['type'] in dir(transforms): + trans['type'] = 'mmdet.' + trans['type'] + + return cfg diff --git a/mmpose/utils/hooks.py b/mmpose/utils/hooks.py index b68940f2b7..a5cfb4f0f8 100644 --- a/mmpose/utils/hooks.py +++ b/mmpose/utils/hooks.py @@ -1,60 +1,60 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import functools - - -class OutputHook: - - def __init__(self, module, outputs=None, as_tensor=False): - self.outputs = outputs - self.as_tensor = as_tensor - self.layer_outputs = {} - self.register(module) - - def register(self, module): - - def hook_wrapper(name): - - def hook(model, input, output): - if self.as_tensor: - self.layer_outputs[name] = output - else: - if isinstance(output, list): - self.layer_outputs[name] = [ - out.detach().cpu().numpy() for out in output - ] - else: - self.layer_outputs[name] = output.detach().cpu().numpy( - ) - - return hook - - self.handles = [] - if isinstance(self.outputs, (list, tuple)): - for name in self.outputs: - try: - layer = rgetattr(module, name) - h = layer.register_forward_hook(hook_wrapper(name)) - except ModuleNotFoundError as module_not_found: - raise ModuleNotFoundError( - f'Module {name} not found') from module_not_found - self.handles.append(h) - - def remove(self): - for h in self.handles: - h.remove() - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self.remove() - - -# using wonder's beautiful simplification: -# https://stackoverflow.com/questions/31174295/getattr-and-setattr-on-nested-objects -def rgetattr(obj, attr, *args): - - def _getattr(obj, attr): - return getattr(obj, attr, *args) - - return functools.reduce(_getattr, [obj] + attr.split('.')) +# Copyright (c) OpenMMLab. All rights reserved. +import functools + + +class OutputHook: + + def __init__(self, module, outputs=None, as_tensor=False): + self.outputs = outputs + self.as_tensor = as_tensor + self.layer_outputs = {} + self.register(module) + + def register(self, module): + + def hook_wrapper(name): + + def hook(model, input, output): + if self.as_tensor: + self.layer_outputs[name] = output + else: + if isinstance(output, list): + self.layer_outputs[name] = [ + out.detach().cpu().numpy() for out in output + ] + else: + self.layer_outputs[name] = output.detach().cpu().numpy( + ) + + return hook + + self.handles = [] + if isinstance(self.outputs, (list, tuple)): + for name in self.outputs: + try: + layer = rgetattr(module, name) + h = layer.register_forward_hook(hook_wrapper(name)) + except ModuleNotFoundError as module_not_found: + raise ModuleNotFoundError( + f'Module {name} not found') from module_not_found + self.handles.append(h) + + def remove(self): + for h in self.handles: + h.remove() + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.remove() + + +# using wonder's beautiful simplification: +# https://stackoverflow.com/questions/31174295/getattr-and-setattr-on-nested-objects +def rgetattr(obj, attr, *args): + + def _getattr(obj, attr): + return getattr(obj, attr, *args) + + return functools.reduce(_getattr, [obj] + attr.split('.')) diff --git a/mmpose/utils/logger.py b/mmpose/utils/logger.py index f67e56efeb..6edd46cbc0 100644 --- a/mmpose/utils/logger.py +++ b/mmpose/utils/logger.py @@ -1,25 +1,25 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import logging - -from mmengine.logging import MMLogger - - -def get_root_logger(log_file=None, log_level=logging.INFO): - """Use `MMLogger` class in mmengine to get the root logger. - - The logger will be initialized if it has not been initialized. By default a - StreamHandler will be added. If `log_file` is specified, a FileHandler will - also be added. The name of the root logger is the top-level package name, - e.g., "mmpose". - - Args: - log_file (str | None): The log filename. If specified, a FileHandler - will be added to the root logger. - log_level (int): The root logger level. Note that only the process of - rank 0 is affected, while other processes will set the level to - "Error" and be silent most of the time. - - Returns: - logging.Logger: The root logger. - """ - return MMLogger('MMLogger', __name__.split('.')[0], log_file, log_level) +# Copyright (c) OpenMMLab. All rights reserved. +import logging + +from mmengine.logging import MMLogger + + +def get_root_logger(log_file=None, log_level=logging.INFO): + """Use `MMLogger` class in mmengine to get the root logger. + + The logger will be initialized if it has not been initialized. By default a + StreamHandler will be added. If `log_file` is specified, a FileHandler will + also be added. The name of the root logger is the top-level package name, + e.g., "mmpose". + + Args: + log_file (str | None): The log filename. If specified, a FileHandler + will be added to the root logger. + log_level (int): The root logger level. Note that only the process of + rank 0 is affected, while other processes will set the level to + "Error" and be silent most of the time. + + Returns: + logging.Logger: The root logger. + """ + return MMLogger('MMLogger', __name__.split('.')[0], log_file, log_level) diff --git a/mmpose/utils/setup_env.py b/mmpose/utils/setup_env.py index ff299539ef..e0aad4fd04 100644 --- a/mmpose/utils/setup_env.py +++ b/mmpose/utils/setup_env.py @@ -1,86 +1,86 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import datetime -import os -import platform -import warnings - -import cv2 -import torch.multiprocessing as mp -from mmengine import DefaultScope - - -def setup_multi_processes(cfg): - """Setup multi-processing environment variables.""" - # set multi-process start method as `fork` to speed up the training - if platform.system() != 'Windows': - mp_start_method = cfg.get('mp_start_method', 'fork') - current_method = mp.get_start_method(allow_none=True) - if current_method is not None and current_method != mp_start_method: - warnings.warn( - f'Multi-processing start method `{mp_start_method}` is ' - f'different from the previous setting `{current_method}`.' - f'It will be force set to `{mp_start_method}`. You can change ' - f'this behavior by changing `mp_start_method` in your config.') - mp.set_start_method(mp_start_method, force=True) - - # disable opencv multithreading to avoid system being overloaded - opencv_num_threads = cfg.get('opencv_num_threads', 0) - cv2.setNumThreads(opencv_num_threads) - - # setup OMP threads - # This code is referred from https://github.com/pytorch/pytorch/blob/master/torch/distributed/run.py # noqa - if 'OMP_NUM_THREADS' not in os.environ and cfg.data.workers_per_gpu > 1: - omp_num_threads = 1 - warnings.warn( - f'Setting OMP_NUM_THREADS environment variable for each process ' - f'to be {omp_num_threads} in default, to avoid your system being ' - f'overloaded, please further tune the variable for optimal ' - f'performance in your application as needed.') - os.environ['OMP_NUM_THREADS'] = str(omp_num_threads) - - # setup MKL threads - if 'MKL_NUM_THREADS' not in os.environ and cfg.data.workers_per_gpu > 1: - mkl_num_threads = 1 - warnings.warn( - f'Setting MKL_NUM_THREADS environment variable for each process ' - f'to be {mkl_num_threads} in default, to avoid your system being ' - f'overloaded, please further tune the variable for optimal ' - f'performance in your application as needed.') - os.environ['MKL_NUM_THREADS'] = str(mkl_num_threads) - - -def register_all_modules(init_default_scope: bool = True) -> None: - """Register all modules in mmpose into the registries. - - Args: - init_default_scope (bool): Whether initialize the mmpose default scope. - When `init_default_scope=True`, the global default scope will be - set to `mmpose`, and all registries will build modules from mmpose's - registry node. To understand more about the registry, please refer - to https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/registry.md - Defaults to True. - """ # noqa - - import mmpose.codecs # noqa: F401, F403 - import mmpose.datasets # noqa: F401,F403 - import mmpose.engine # noqa: F401,F403 - import mmpose.evaluation # noqa: F401,F403 - import mmpose.models # noqa: F401,F403 - import mmpose.visualization # noqa: F401,F403 - - if init_default_scope: - never_created = DefaultScope.get_current_instance() is None \ - or not DefaultScope.check_instance_created('mmpose') - if never_created: - DefaultScope.get_instance('mmpose', scope_name='mmpose') - return - current_scope = DefaultScope.get_current_instance() - if current_scope.scope_name != 'mmpose': - warnings.warn('The current default scope ' - f'"{current_scope.scope_name}" is not "mmpose", ' - '`register_all_modules` will force the current' - 'default scope to be "mmpose". If this is not ' - 'expected, please set `init_default_scope=False`.') - # avoid name conflict - new_instance_name = f'mmpose-{datetime.datetime.now()}' - DefaultScope.get_instance(new_instance_name, scope_name='mmpose') +# Copyright (c) OpenMMLab. All rights reserved. +import datetime +import os +import platform +import warnings + +import cv2 +import torch.multiprocessing as mp +from mmengine import DefaultScope + + +def setup_multi_processes(cfg): + """Setup multi-processing environment variables.""" + # set multi-process start method as `fork` to speed up the training + if platform.system() != 'Windows': + mp_start_method = cfg.get('mp_start_method', 'fork') + current_method = mp.get_start_method(allow_none=True) + if current_method is not None and current_method != mp_start_method: + warnings.warn( + f'Multi-processing start method `{mp_start_method}` is ' + f'different from the previous setting `{current_method}`.' + f'It will be force set to `{mp_start_method}`. You can change ' + f'this behavior by changing `mp_start_method` in your config.') + mp.set_start_method(mp_start_method, force=True) + + # disable opencv multithreading to avoid system being overloaded + opencv_num_threads = cfg.get('opencv_num_threads', 0) + cv2.setNumThreads(opencv_num_threads) + + # setup OMP threads + # This code is referred from https://github.com/pytorch/pytorch/blob/master/torch/distributed/run.py # noqa + if 'OMP_NUM_THREADS' not in os.environ and cfg.data.workers_per_gpu > 1: + omp_num_threads = 1 + warnings.warn( + f'Setting OMP_NUM_THREADS environment variable for each process ' + f'to be {omp_num_threads} in default, to avoid your system being ' + f'overloaded, please further tune the variable for optimal ' + f'performance in your application as needed.') + os.environ['OMP_NUM_THREADS'] = str(omp_num_threads) + + # setup MKL threads + if 'MKL_NUM_THREADS' not in os.environ and cfg.data.workers_per_gpu > 1: + mkl_num_threads = 1 + warnings.warn( + f'Setting MKL_NUM_THREADS environment variable for each process ' + f'to be {mkl_num_threads} in default, to avoid your system being ' + f'overloaded, please further tune the variable for optimal ' + f'performance in your application as needed.') + os.environ['MKL_NUM_THREADS'] = str(mkl_num_threads) + + +def register_all_modules(init_default_scope: bool = True) -> None: + """Register all modules in mmpose into the registries. + + Args: + init_default_scope (bool): Whether initialize the mmpose default scope. + When `init_default_scope=True`, the global default scope will be + set to `mmpose`, and all registries will build modules from mmpose's + registry node. To understand more about the registry, please refer + to https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/registry.md + Defaults to True. + """ # noqa + + import mmpose.codecs # noqa: F401, F403 + import mmpose.datasets # noqa: F401,F403 + import mmpose.engine # noqa: F401,F403 + import mmpose.evaluation # noqa: F401,F403 + import mmpose.models # noqa: F401,F403 + import mmpose.visualization # noqa: F401,F403 + + if init_default_scope: + never_created = DefaultScope.get_current_instance() is None \ + or not DefaultScope.check_instance_created('mmpose') + if never_created: + DefaultScope.get_instance('mmpose', scope_name='mmpose') + return + current_scope = DefaultScope.get_current_instance() + if current_scope.scope_name != 'mmpose': + warnings.warn('The current default scope ' + f'"{current_scope.scope_name}" is not "mmpose", ' + '`register_all_modules` will force the current' + 'default scope to be "mmpose". If this is not ' + 'expected, please set `init_default_scope=False`.') + # avoid name conflict + new_instance_name = f'mmpose-{datetime.datetime.now()}' + DefaultScope.get_instance(new_instance_name, scope_name='mmpose') diff --git a/mmpose/utils/tensor_utils.py b/mmpose/utils/tensor_utils.py index 1be73f8991..95793b0d63 100644 --- a/mmpose/utils/tensor_utils.py +++ b/mmpose/utils/tensor_utils.py @@ -1,71 +1,71 @@ -# Copyright (c) OpenMMLab. All rights reserved. - -from typing import Any, Optional, Sequence, Union - -import numpy as np -import torch -from mmengine.utils import is_seq_of -from torch import Tensor - - -def to_numpy(x: Union[Tensor, Sequence[Tensor]], - return_device: bool = False, - unzip: bool = False) -> Union[np.ndarray, tuple]: - """Convert torch tensor to numpy.ndarray. - - Args: - x (Tensor | Sequence[Tensor]): A single tensor or a sequence of - tensors - return_device (bool): Whether return the tensor device. Defaults to - ``False`` - unzip (bool): Whether unzip the input sequence. Defaults to ``False`` - - Returns: - np.ndarray | tuple: If ``return_device`` is ``True``, return a tuple - of converted numpy array(s) and the device indicator; otherwise only - return the numpy array(s) - """ - - if isinstance(x, Tensor): - arrays = x.detach().cpu().numpy() - device = x.device - elif is_seq_of(x, Tensor): - if unzip: - # convert (A, B) -> [(A[0], B[0]), (A[1], B[1]), ...] - arrays = [ - tuple(to_numpy(_x[None, :]) for _x in _each) - for _each in zip(*x) - ] - else: - arrays = [to_numpy(_x) for _x in x] - - device = x[0].device - - else: - raise ValueError(f'Invalid input type {type(x)}') - - if return_device: - return arrays, device - else: - return arrays - - -def to_tensor(x: Union[np.ndarray, Sequence[np.ndarray]], - device: Optional[Any] = None) -> Union[Tensor, Sequence[Tensor]]: - """Convert numpy.ndarray to torch tensor. - - Args: - x (np.ndarray | Sequence[np.ndarray]): A single np.ndarray or a - sequence of tensors - tensor (Any, optional): The device indicator. Defaults to ``None`` - - Returns: - tuple: - - Tensor | Sequence[Tensor]: The converted Tensor or Tensor sequence - """ - if isinstance(x, np.ndarray): - return torch.tensor(x, device=device) - elif is_seq_of(x, np.ndarray): - return [to_tensor(_x, device=device) for _x in x] - else: - raise ValueError(f'Invalid input type {type(x)}') +# Copyright (c) OpenMMLab. All rights reserved. + +from typing import Any, Optional, Sequence, Union + +import numpy as np +import torch +from mmengine.utils import is_seq_of +from torch import Tensor + + +def to_numpy(x: Union[Tensor, Sequence[Tensor]], + return_device: bool = False, + unzip: bool = False) -> Union[np.ndarray, tuple]: + """Convert torch tensor to numpy.ndarray. + + Args: + x (Tensor | Sequence[Tensor]): A single tensor or a sequence of + tensors + return_device (bool): Whether return the tensor device. Defaults to + ``False`` + unzip (bool): Whether unzip the input sequence. Defaults to ``False`` + + Returns: + np.ndarray | tuple: If ``return_device`` is ``True``, return a tuple + of converted numpy array(s) and the device indicator; otherwise only + return the numpy array(s) + """ + + if isinstance(x, Tensor): + arrays = x.detach().cpu().numpy() + device = x.device + elif is_seq_of(x, Tensor): + if unzip: + # convert (A, B) -> [(A[0], B[0]), (A[1], B[1]), ...] + arrays = [ + tuple(to_numpy(_x[None, :]) for _x in _each) + for _each in zip(*x) + ] + else: + arrays = [to_numpy(_x) for _x in x] + + device = x[0].device + + else: + raise ValueError(f'Invalid input type {type(x)}') + + if return_device: + return arrays, device + else: + return arrays + + +def to_tensor(x: Union[np.ndarray, Sequence[np.ndarray]], + device: Optional[Any] = None) -> Union[Tensor, Sequence[Tensor]]: + """Convert numpy.ndarray to torch tensor. + + Args: + x (np.ndarray | Sequence[np.ndarray]): A single np.ndarray or a + sequence of tensors + tensor (Any, optional): The device indicator. Defaults to ``None`` + + Returns: + tuple: + - Tensor | Sequence[Tensor]: The converted Tensor or Tensor sequence + """ + if isinstance(x, np.ndarray): + return torch.tensor(x, device=device) + elif is_seq_of(x, np.ndarray): + return [to_tensor(_x, device=device) for _x in x] + else: + raise ValueError(f'Invalid input type {type(x)}') diff --git a/mmpose/utils/timer.py b/mmpose/utils/timer.py index c219c04069..66dab46a4d 100644 --- a/mmpose/utils/timer.py +++ b/mmpose/utils/timer.py @@ -1,117 +1,117 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from collections import defaultdict -from contextlib import contextmanager -from functools import partial - -import numpy as np -from mmengine import Timer - - -class RunningAverage(): - r"""A helper class to calculate running average in a sliding window. - - Args: - window (int): The size of the sliding window. - """ - - def __init__(self, window: int = 1): - self.window = window - self._data = [] - - def update(self, value): - """Update a new data sample.""" - self._data.append(value) - self._data = self._data[-self.window:] - - def average(self): - """Get the average value of current window.""" - return np.mean(self._data) - - -class StopWatch: - r"""A helper class to measure FPS and detailed time consuming of each phase - in a video processing loop or similar scenarios. - - Args: - window (int): The sliding window size to calculate the running average - of the time consuming. - - Example: - >>> from mmpose.utils import StopWatch - >>> import time - >>> stop_watch = StopWatch(window=10) - >>> with stop_watch.timeit('total'): - >>> time.sleep(0.1) - >>> # 'timeit' support nested use - >>> with stop_watch.timeit('phase1'): - >>> time.sleep(0.1) - >>> with stop_watch.timeit('phase2'): - >>> time.sleep(0.2) - >>> time.sleep(0.2) - >>> report = stop_watch.report() - """ - - def __init__(self, window=1): - self.window = window - self._record = defaultdict(partial(RunningAverage, window=self.window)) - self._timer_stack = [] - - @contextmanager - def timeit(self, timer_name='_FPS_'): - """Timing a code snippet with an assigned name. - - Args: - timer_name (str): The unique name of the interested code snippet to - handle multiple timers and generate reports. Note that '_FPS_' - is a special key that the measurement will be in `fps` instead - of `millisecond`. Also see `report` and `report_strings`. - Default: '_FPS_'. - Note: - This function should always be used in a `with` statement, as shown - in the example. - """ - self._timer_stack.append((timer_name, Timer())) - try: - yield - finally: - timer_name, timer = self._timer_stack.pop() - self._record[timer_name].update(timer.since_start()) - - def report(self, key=None): - """Report timing information. - - Returns: - dict: The key is the timer name and the value is the \ - corresponding average time consuming. - """ - result = { - name: r.average() * 1000. - for name, r in self._record.items() - } - - if '_FPS_' in result: - result['_FPS_'] = 1000. / result.pop('_FPS_') - - if key is None: - return result - return result[key] - - def report_strings(self): - """Report timing information in texture strings. - - Returns: - list(str): Each element is the information string of a timed \ - event, in format of '{timer_name}: {time_in_ms}'. \ - Specially, if timer_name is '_FPS_', the result will \ - be converted to fps. - """ - result = self.report() - strings = [] - if '_FPS_' in result: - strings.append(f'FPS: {result["_FPS_"]:>5.1f}') - strings += [f'{name}: {val:>3.0f}' for name, val in result.items()] - return strings - - def reset(self): - self._record = defaultdict(list) - self._active_timer_stack = [] +# Copyright (c) OpenMMLab. All rights reserved. +from collections import defaultdict +from contextlib import contextmanager +from functools import partial + +import numpy as np +from mmengine import Timer + + +class RunningAverage(): + r"""A helper class to calculate running average in a sliding window. + + Args: + window (int): The size of the sliding window. + """ + + def __init__(self, window: int = 1): + self.window = window + self._data = [] + + def update(self, value): + """Update a new data sample.""" + self._data.append(value) + self._data = self._data[-self.window:] + + def average(self): + """Get the average value of current window.""" + return np.mean(self._data) + + +class StopWatch: + r"""A helper class to measure FPS and detailed time consuming of each phase + in a video processing loop or similar scenarios. + + Args: + window (int): The sliding window size to calculate the running average + of the time consuming. + + Example: + >>> from mmpose.utils import StopWatch + >>> import time + >>> stop_watch = StopWatch(window=10) + >>> with stop_watch.timeit('total'): + >>> time.sleep(0.1) + >>> # 'timeit' support nested use + >>> with stop_watch.timeit('phase1'): + >>> time.sleep(0.1) + >>> with stop_watch.timeit('phase2'): + >>> time.sleep(0.2) + >>> time.sleep(0.2) + >>> report = stop_watch.report() + """ + + def __init__(self, window=1): + self.window = window + self._record = defaultdict(partial(RunningAverage, window=self.window)) + self._timer_stack = [] + + @contextmanager + def timeit(self, timer_name='_FPS_'): + """Timing a code snippet with an assigned name. + + Args: + timer_name (str): The unique name of the interested code snippet to + handle multiple timers and generate reports. Note that '_FPS_' + is a special key that the measurement will be in `fps` instead + of `millisecond`. Also see `report` and `report_strings`. + Default: '_FPS_'. + Note: + This function should always be used in a `with` statement, as shown + in the example. + """ + self._timer_stack.append((timer_name, Timer())) + try: + yield + finally: + timer_name, timer = self._timer_stack.pop() + self._record[timer_name].update(timer.since_start()) + + def report(self, key=None): + """Report timing information. + + Returns: + dict: The key is the timer name and the value is the \ + corresponding average time consuming. + """ + result = { + name: r.average() * 1000. + for name, r in self._record.items() + } + + if '_FPS_' in result: + result['_FPS_'] = 1000. / result.pop('_FPS_') + + if key is None: + return result + return result[key] + + def report_strings(self): + """Report timing information in texture strings. + + Returns: + list(str): Each element is the information string of a timed \ + event, in format of '{timer_name}: {time_in_ms}'. \ + Specially, if timer_name is '_FPS_', the result will \ + be converted to fps. + """ + result = self.report() + strings = [] + if '_FPS_' in result: + strings.append(f'FPS: {result["_FPS_"]:>5.1f}') + strings += [f'{name}: {val:>3.0f}' for name, val in result.items()] + return strings + + def reset(self): + self._record = defaultdict(list) + self._active_timer_stack = [] diff --git a/mmpose/utils/typing.py b/mmpose/utils/typing.py index 557891b3b9..3549b13a87 100644 --- a/mmpose/utils/typing.py +++ b/mmpose/utils/typing.py @@ -1,29 +1,29 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import Dict, List, Optional, Tuple, Union - -from mmengine.config import ConfigDict -from mmengine.structures import InstanceData, PixelData -from torch import Tensor - -from mmpose.structures import PoseDataSample - -# Type hint of config data -ConfigType = Union[ConfigDict, dict] -OptConfigType = Optional[ConfigType] -# Type hint of one or more config data -MultiConfig = Union[ConfigType, List[ConfigType]] -OptMultiConfig = Optional[MultiConfig] -# Type hint of data samples -SampleList = List[PoseDataSample] -OptSampleList = Optional[SampleList] -InstanceList = List[InstanceData] -PixelDataList = List[PixelData] -Predictions = Union[InstanceList, Tuple[InstanceList, PixelDataList]] -# Type hint of model outputs -ForwardResults = Union[Dict[str, Tensor], List[PoseDataSample], Tuple[Tensor], - Tensor] -# Type hint of features -# - Tuple[Tensor]: multi-level features extracted by the network -# - List[Tuple[Tensor]]: multiple feature pyramids for TTA -# - List[List[Tuple[Tensor]]]: multi-scale feature pyramids -Features = Union[Tuple[Tensor], List[Tuple[Tensor]], List[List[Tuple[Tensor]]]] +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, List, Optional, Tuple, Union + +from mmengine.config import ConfigDict +from mmengine.structures import InstanceData, PixelData +from torch import Tensor + +from mmpose.structures import PoseDataSample + +# Type hint of config data +ConfigType = Union[ConfigDict, dict] +OptConfigType = Optional[ConfigType] +# Type hint of one or more config data +MultiConfig = Union[ConfigType, List[ConfigType]] +OptMultiConfig = Optional[MultiConfig] +# Type hint of data samples +SampleList = List[PoseDataSample] +OptSampleList = Optional[SampleList] +InstanceList = List[InstanceData] +PixelDataList = List[PixelData] +Predictions = Union[InstanceList, Tuple[InstanceList, PixelDataList]] +# Type hint of model outputs +ForwardResults = Union[Dict[str, Tensor], List[PoseDataSample], Tuple[Tensor], + Tensor] +# Type hint of features +# - Tuple[Tensor]: multi-level features extracted by the network +# - List[Tuple[Tensor]]: multiple feature pyramids for TTA +# - List[List[Tuple[Tensor]]]: multi-scale feature pyramids +Features = Union[Tuple[Tensor], List[Tuple[Tensor]], List[List[Tuple[Tensor]]]] diff --git a/mmpose/version.py b/mmpose/version.py index bf58664b39..924449a908 100644 --- a/mmpose/version.py +++ b/mmpose/version.py @@ -1,31 +1,31 @@ -# Copyright (c) Open-MMLab. All rights reserved. - -__version__ = '1.1.0' -short_version = __version__ - - -def parse_version_info(version_str): - """Parse a version string into a tuple. - - Args: - version_str (str): The version string. - Returns: - tuple[int | str]: The version info, e.g., "1.3.0" is parsed into - (1, 3, 0), and "2.0.0rc1" is parsed into (2, 0, 0, 'rc1'). - """ - version_info = [] - for x in version_str.split('.'): - if x.isdigit(): - version_info.append(int(x)) - elif x.find('rc') != -1: - patch_version = x.split('rc') - version_info.append(int(patch_version[0])) - version_info.append(f'rc{patch_version[1]}') - elif x.find('b') != -1: - patch_version = x.split('b') - version_info.append(int(patch_version[0])) - version_info.append(f'b{patch_version[1]}') - return tuple(version_info) - - -version_info = parse_version_info(__version__) +# Copyright (c) Open-MMLab. All rights reserved. + +__version__ = '1.1.0' +short_version = __version__ + + +def parse_version_info(version_str): + """Parse a version string into a tuple. + + Args: + version_str (str): The version string. + Returns: + tuple[int | str]: The version info, e.g., "1.3.0" is parsed into + (1, 3, 0), and "2.0.0rc1" is parsed into (2, 0, 0, 'rc1'). + """ + version_info = [] + for x in version_str.split('.'): + if x.isdigit(): + version_info.append(int(x)) + elif x.find('rc') != -1: + patch_version = x.split('rc') + version_info.append(int(patch_version[0])) + version_info.append(f'rc{patch_version[1]}') + elif x.find('b') != -1: + patch_version = x.split('b') + version_info.append(int(patch_version[0])) + version_info.append(f'b{patch_version[1]}') + return tuple(version_info) + + +version_info = parse_version_info(__version__) diff --git a/mmpose/visualization/__init__.py b/mmpose/visualization/__init__.py index 4a18e8bc5b..a144d8762f 100644 --- a/mmpose/visualization/__init__.py +++ b/mmpose/visualization/__init__.py @@ -1,6 +1,6 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .fast_visualizer import FastVisualizer -from .local_visualizer import PoseLocalVisualizer -from .local_visualizer_3d import Pose3dLocalVisualizer - -__all__ = ['PoseLocalVisualizer', 'FastVisualizer', 'Pose3dLocalVisualizer'] +# Copyright (c) OpenMMLab. All rights reserved. +from .fast_visualizer import FastVisualizer +from .local_visualizer import PoseLocalVisualizer +from .local_visualizer_3d import Pose3dLocalVisualizer + +__all__ = ['PoseLocalVisualizer', 'FastVisualizer', 'Pose3dLocalVisualizer'] diff --git a/mmpose/visualization/fast_visualizer.py b/mmpose/visualization/fast_visualizer.py index fa0cb38527..f6ddb3ffda 100644 --- a/mmpose/visualization/fast_visualizer.py +++ b/mmpose/visualization/fast_visualizer.py @@ -1,78 +1,78 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import cv2 - - -class FastVisualizer: - """MMPose Fast Visualizer. - - A simple yet fast visualizer for video/webcam inference. - - Args: - metainfo (dict): pose meta information - radius (int, optional)): Keypoint radius for visualization. - Defaults to 6. - line_width (int, optional): Link width for visualization. - Defaults to 3. - kpt_thr (float, optional): Threshold for keypoints' confidence score, - keypoints with score below this value will not be drawn. - Defaults to 0.3. - """ - - def __init__(self, metainfo, radius=6, line_width=3, kpt_thr=0.3): - self.radius = radius - self.line_width = line_width - self.kpt_thr = kpt_thr - - self.keypoint_id2name = metainfo['keypoint_id2name'] - self.keypoint_name2id = metainfo['keypoint_name2id'] - self.keypoint_colors = metainfo['keypoint_colors'] - self.skeleton_links = metainfo['skeleton_links'] - self.skeleton_link_colors = metainfo['skeleton_link_colors'] - - def draw_pose(self, img, instances): - """Draw pose estimations on the given image. - - This method draws keypoints and skeleton links on the input image - using the provided instances. - - Args: - img (numpy.ndarray): The input image on which to - draw the pose estimations. - instances (object): An object containing detected instances' - information, including keypoints and keypoint_scores. - - Returns: - None: The input image will be modified in place. - """ - - if instances is None: - print('no instance detected') - return - - keypoints = instances.keypoints - scores = instances.keypoint_scores - - for kpts, score in zip(keypoints, scores): - for sk_id, sk in enumerate(self.skeleton_links): - if score[sk[0]] < self.kpt_thr or score[sk[1]] < self.kpt_thr: - # skip the link that should not be drawn - continue - - pos1 = (int(kpts[sk[0], 0]), int(kpts[sk[0], 1])) - pos2 = (int(kpts[sk[1], 0]), int(kpts[sk[1], 1])) - - color = self.skeleton_link_colors[sk_id].tolist() - cv2.line(img, pos1, pos2, color, thickness=self.line_width) - - for kid, kpt in enumerate(kpts): - if score[kid] < self.kpt_thr: - # skip the point that should not be drawn - continue - - x_coord, y_coord = int(kpt[0]), int(kpt[1]) - - color = self.keypoint_colors[kid].tolist() - cv2.circle(img, (int(x_coord), int(y_coord)), self.radius, - color, -1) - cv2.circle(img, (int(x_coord), int(y_coord)), self.radius, - (255, 255, 255)) +# Copyright (c) OpenMMLab. All rights reserved. +import cv2 + + +class FastVisualizer: + """MMPose Fast Visualizer. + + A simple yet fast visualizer for video/webcam inference. + + Args: + metainfo (dict): pose meta information + radius (int, optional)): Keypoint radius for visualization. + Defaults to 6. + line_width (int, optional): Link width for visualization. + Defaults to 3. + kpt_thr (float, optional): Threshold for keypoints' confidence score, + keypoints with score below this value will not be drawn. + Defaults to 0.3. + """ + + def __init__(self, metainfo, radius=6, line_width=3, kpt_thr=0.3): + self.radius = radius + self.line_width = line_width + self.kpt_thr = kpt_thr + + self.keypoint_id2name = metainfo['keypoint_id2name'] + self.keypoint_name2id = metainfo['keypoint_name2id'] + self.keypoint_colors = metainfo['keypoint_colors'] + self.skeleton_links = metainfo['skeleton_links'] + self.skeleton_link_colors = metainfo['skeleton_link_colors'] + + def draw_pose(self, img, instances): + """Draw pose estimations on the given image. + + This method draws keypoints and skeleton links on the input image + using the provided instances. + + Args: + img (numpy.ndarray): The input image on which to + draw the pose estimations. + instances (object): An object containing detected instances' + information, including keypoints and keypoint_scores. + + Returns: + None: The input image will be modified in place. + """ + + if instances is None: + print('no instance detected') + return + + keypoints = instances.keypoints + scores = instances.keypoint_scores + + for kpts, score in zip(keypoints, scores): + for sk_id, sk in enumerate(self.skeleton_links): + if score[sk[0]] < self.kpt_thr or score[sk[1]] < self.kpt_thr: + # skip the link that should not be drawn + continue + + pos1 = (int(kpts[sk[0], 0]), int(kpts[sk[0], 1])) + pos2 = (int(kpts[sk[1], 0]), int(kpts[sk[1], 1])) + + color = self.skeleton_link_colors[sk_id].tolist() + cv2.line(img, pos1, pos2, color, thickness=self.line_width) + + for kid, kpt in enumerate(kpts): + if score[kid] < self.kpt_thr: + # skip the point that should not be drawn + continue + + x_coord, y_coord = int(kpt[0]), int(kpt[1]) + + color = self.keypoint_colors[kid].tolist() + cv2.circle(img, (int(x_coord), int(y_coord)), self.radius, + color, -1) + cv2.circle(img, (int(x_coord), int(y_coord)), self.radius, + (255, 255, 255)) diff --git a/mmpose/visualization/local_visualizer.py b/mmpose/visualization/local_visualizer.py index 080e628e33..4696852600 100644 --- a/mmpose/visualization/local_visualizer.py +++ b/mmpose/visualization/local_visualizer.py @@ -1,583 +1,583 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import math -from typing import Dict, List, Optional, Tuple, Union - -import cv2 -import mmcv -import numpy as np -import torch -from mmengine.dist import master_only -from mmengine.structures import InstanceData, PixelData - -from mmpose.datasets.datasets.utils import parse_pose_metainfo -from mmpose.registry import VISUALIZERS -from mmpose.structures import PoseDataSample -from .opencv_backend_visualizer import OpencvBackendVisualizer -from .simcc_vis import SimCCVisualizer - - -def _get_adaptive_scales(areas: np.ndarray, - min_area: int = 800, - max_area: int = 30000) -> np.ndarray: - """Get adaptive scales according to areas. - - The scale range is [0.5, 1.0]. When the area is less than - ``min_area``, the scale is 0.5 while the area is larger than - ``max_area``, the scale is 1.0. - - Args: - areas (ndarray): The areas of bboxes or masks with the - shape of (n, ). - min_area (int): Lower bound areas for adaptive scales. - Defaults to 800. - max_area (int): Upper bound areas for adaptive scales. - Defaults to 30000. - - Returns: - ndarray: The adaotive scales with the shape of (n, ). - """ - scales = 0.5 + (areas - min_area) / (max_area - min_area) - scales = np.clip(scales, 0.5, 1.0) - return scales - - -@VISUALIZERS.register_module() -class PoseLocalVisualizer(OpencvBackendVisualizer): - """MMPose Local Visualizer. - - Args: - name (str): Name of the instance. Defaults to 'visualizer'. - image (np.ndarray, optional): the origin image to draw. The format - should be RGB. Defaults to ``None`` - vis_backends (list, optional): Visual backend config list. Defaults to - ``None`` - save_dir (str, optional): Save file dir for all storage backends. - If it is ``None``, the backend storage will not save any data. - Defaults to ``None`` - bbox_color (str, tuple(int), optional): Color of bbox lines. - The tuple of color should be in BGR order. Defaults to ``'green'`` - kpt_color (str, tuple(tuple(int)), optional): Color of keypoints. - The tuple of color should be in BGR order. Defaults to ``'red'`` - link_color (str, tuple(tuple(int)), optional): Color of skeleton. - The tuple of color should be in BGR order. Defaults to ``None`` - line_width (int, float): The width of lines. Defaults to 1 - radius (int, float): The radius of keypoints. Defaults to 4 - show_keypoint_weight (bool): Whether to adjust the transparency - of keypoints according to their score. Defaults to ``False`` - alpha (int, float): The transparency of bboxes. Defaults to ``1.0`` - - Examples: - >>> import numpy as np - >>> from mmengine.structures import InstanceData - >>> from mmpose.structures import PoseDataSample - >>> from mmpose.visualization import PoseLocalVisualizer - - >>> pose_local_visualizer = PoseLocalVisualizer(radius=1) - >>> image = np.random.randint(0, 256, - ... size=(10, 12, 3)).astype('uint8') - >>> gt_instances = InstanceData() - >>> gt_instances.keypoints = np.array([[[1, 1], [2, 2], [4, 4], - ... [8, 8]]]) - >>> gt_pose_data_sample = PoseDataSample() - >>> gt_pose_data_sample.gt_instances = gt_instances - >>> dataset_meta = {'skeleton_links': [[0, 1], [1, 2], [2, 3]]} - >>> pose_local_visualizer.set_dataset_meta(dataset_meta) - >>> pose_local_visualizer.add_datasample('image', image, - ... gt_pose_data_sample) - >>> pose_local_visualizer.add_datasample( - ... 'image', image, gt_pose_data_sample, - ... out_file='out_file.jpg') - >>> pose_local_visualizer.add_datasample( - ... 'image', image, gt_pose_data_sample, - ... show=True) - >>> pred_instances = InstanceData() - >>> pred_instances.keypoints = np.array([[[1, 1], [2, 2], [4, 4], - ... [8, 8]]]) - >>> pred_instances.score = np.array([0.8, 1, 0.9, 1]) - >>> pred_pose_data_sample = PoseDataSample() - >>> pred_pose_data_sample.pred_instances = pred_instances - >>> pose_local_visualizer.add_datasample('image', image, - ... gt_pose_data_sample, - ... pred_pose_data_sample) - """ - - def __init__(self, - name: str = 'visualizer', - image: Optional[np.ndarray] = None, - vis_backends: Optional[Dict] = None, - save_dir: Optional[str] = None, - bbox_color: Optional[Union[str, Tuple[int]]] = 'green', - kpt_color: Optional[Union[str, Tuple[Tuple[int]]]] = 'red', - link_color: Optional[Union[str, Tuple[Tuple[int]]]] = None, - text_color: Optional[Union[str, - Tuple[int]]] = (255, 255, 255), - skeleton: Optional[Union[List, Tuple]] = None, - line_width: Union[int, float] = 1, - radius: Union[int, float] = 3, - show_keypoint_weight: bool = False, - backend: str = 'opencv', - alpha: float = 1.0): - super().__init__( - name=name, - image=image, - vis_backends=vis_backends, - save_dir=save_dir, - backend=backend) - - self.bbox_color = bbox_color - self.kpt_color = kpt_color - self.link_color = link_color - self.line_width = line_width - self.text_color = text_color - self.skeleton = skeleton - self.radius = radius - self.alpha = alpha - self.show_keypoint_weight = show_keypoint_weight - # Set default value. When calling - # `PoseLocalVisualizer().set_dataset_meta(xxx)`, - # it will override the default value. - self.dataset_meta = {} - - def set_dataset_meta(self, - dataset_meta: Dict, - skeleton_style: str = 'mmpose'): - """Assign dataset_meta to the visualizer. The default visualization - settings will be overridden. - - Args: - dataset_meta (dict): meta information of dataset. - """ - if dataset_meta.get( - 'dataset_name') == 'coco' and skeleton_style == 'openpose': - dataset_meta = parse_pose_metainfo( - dict(from_file='configs/_base_/datasets/coco_openpose.py')) - - if isinstance(dataset_meta, dict): - self.dataset_meta = dataset_meta.copy() - self.bbox_color = dataset_meta.get('bbox_color', self.bbox_color) - self.kpt_color = dataset_meta.get('keypoint_colors', - self.kpt_color) - self.link_color = dataset_meta.get('skeleton_link_colors', - self.link_color) - self.skeleton = dataset_meta.get('skeleton_links', self.skeleton) - # sometimes self.dataset_meta is manually set, which might be None. - # it should be converted to a dict at these times - if self.dataset_meta is None: - self.dataset_meta = {} - - def _draw_instances_bbox(self, image: np.ndarray, - instances: InstanceData) -> np.ndarray: - """Draw bounding boxes and corresponding labels of GT or prediction. - - Args: - image (np.ndarray): The image to draw. - instances (:obj:`InstanceData`): Data structure for - instance-level annotations or predictions. - - Returns: - np.ndarray: the drawn image which channel is RGB. - """ - self.set_image(image) - - if 'bboxes' in instances: - bboxes = instances.bboxes - self.draw_bboxes( - bboxes, - edge_colors=self.bbox_color, - alpha=self.alpha, - line_widths=self.line_width) - else: - return self.get_image() - - if 'labels' in instances and self.text_color is not None: - classes = self.dataset_meta.get('classes', None) - labels = instances.labels - - positions = bboxes[:, :2] - areas = (bboxes[:, 3] - bboxes[:, 1]) * ( - bboxes[:, 2] - bboxes[:, 0]) - scales = _get_adaptive_scales(areas) - - for i, (pos, label) in enumerate(zip(positions, labels)): - label_text = classes[ - label] if classes is not None else f'class {label}' - - if isinstance(self.bbox_color, - tuple) and max(self.bbox_color) > 1: - facecolor = [c / 255.0 for c in self.bbox_color] - else: - facecolor = self.bbox_color - - self.draw_texts( - label_text, - pos, - colors=self.text_color, - font_sizes=int(13 * scales[i]), - vertical_alignments='bottom', - bboxes=[{ - 'facecolor': facecolor, - 'alpha': 0.8, - 'pad': 0.7, - 'edgecolor': 'none' - }]) - - return self.get_image() - - def _draw_instances_kpts(self, - image: np.ndarray, - instances: InstanceData, - kpt_thr: float = 0.3, - show_kpt_idx: bool = False, - skeleton_style: str = 'mmpose'): - """Draw keypoints and skeletons (optional) of GT or prediction. - - Args: - image (np.ndarray): The image to draw. - instances (:obj:`InstanceData`): Data structure for - instance-level annotations or predictions. - kpt_thr (float, optional): Minimum threshold of keypoints - to be shown. Default: 0.3. - show_kpt_idx (bool): Whether to show the index of keypoints. - Defaults to ``False`` - skeleton_style (str): Skeleton style selection. Defaults to - ``'mmpose'`` - - Returns: - np.ndarray: the drawn image which channel is RGB. - """ - - self.set_image(image) - img_h, img_w, _ = image.shape - - if 'keypoints' in instances: - keypoints = instances.get('transformed_keypoints', - instances.keypoints) - - if 'keypoint_scores' in instances: - scores = instances.keypoint_scores - else: - scores = np.ones(keypoints.shape[:-1]) - - if 'keypoints_visible' in instances: - keypoints_visible = instances.keypoints_visible - else: - keypoints_visible = np.ones(keypoints.shape[:-1]) - - if skeleton_style == 'openpose': - keypoints_info = np.concatenate( - (keypoints, scores[..., None], keypoints_visible[..., - None]), - axis=-1) - # compute neck joint - neck = np.mean(keypoints_info[:, [5, 6]], axis=1) - # neck score when visualizing pred - neck[:, 2:4] = np.logical_and( - keypoints_info[:, 5, 2:4] > kpt_thr, - keypoints_info[:, 6, 2:4] > kpt_thr).astype(int) - new_keypoints_info = np.insert( - keypoints_info, 17, neck, axis=1) - - mmpose_idx = [ - 17, 6, 8, 10, 7, 9, 12, 14, 16, 13, 15, 2, 1, 4, 3 - ] - openpose_idx = [ - 1, 2, 3, 4, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17 - ] - new_keypoints_info[:, openpose_idx] = \ - new_keypoints_info[:, mmpose_idx] - keypoints_info = new_keypoints_info - - keypoints, scores, keypoints_visible = keypoints_info[ - ..., :2], keypoints_info[..., 2], keypoints_info[..., 3] - - for kpts, score, visible in zip(keypoints, scores, - keypoints_visible): - kpts = np.array(kpts, copy=False) - - if self.kpt_color is None or isinstance(self.kpt_color, str): - kpt_color = [self.kpt_color] * len(kpts) - elif len(self.kpt_color) == len(kpts): - kpt_color = self.kpt_color - else: - raise ValueError( - f'the length of kpt_color ' - f'({len(self.kpt_color)}) does not matches ' - f'that of keypoints ({len(kpts)})') - - # draw links - if self.skeleton is not None and self.link_color is not None: - if self.link_color is None or isinstance( - self.link_color, str): - link_color = [self.link_color] * len(self.skeleton) - elif len(self.link_color) == len(self.skeleton): - link_color = self.link_color - else: - raise ValueError( - f'the length of link_color ' - f'({len(self.link_color)}) does not matches ' - f'that of skeleton ({len(self.skeleton)})') - - for sk_id, sk in enumerate(self.skeleton): - pos1 = (int(kpts[sk[0], 0]), int(kpts[sk[0], 1])) - pos2 = (int(kpts[sk[1], 0]), int(kpts[sk[1], 1])) - if not (visible[sk[0]] and visible[sk[1]]): - continue - - if (pos1[0] <= 0 or pos1[0] >= img_w or pos1[1] <= 0 - or pos1[1] >= img_h or pos2[0] <= 0 - or pos2[0] >= img_w or pos2[1] <= 0 - or pos2[1] >= img_h or score[sk[0]] < kpt_thr - or score[sk[1]] < kpt_thr - or link_color[sk_id] is None): - # skip the link that should not be drawn - continue - X = np.array((pos1[0], pos2[0])) - Y = np.array((pos1[1], pos2[1])) - color = link_color[sk_id] - if not isinstance(color, str): - color = tuple(int(c) for c in color) - transparency = self.alpha - if self.show_keypoint_weight: - transparency *= max( - 0, min(1, 0.5 * (score[sk[0]] + score[sk[1]]))) - - if skeleton_style == 'openpose': - mX = np.mean(X) - mY = np.mean(Y) - length = ((Y[0] - Y[1])**2 + (X[0] - X[1])**2)**0.5 - transparency = 0.6 - angle = math.degrees( - math.atan2(Y[0] - Y[1], X[0] - X[1])) - polygons = cv2.ellipse2Poly( - (int(mX), int(mY)), - (int(length / 2), int(self.line_width)), - int(angle), 0, 360, 1) - - self.draw_polygons( - polygons, - edge_colors=color, - face_colors=color, - alpha=transparency) - - else: - self.draw_lines( - X, Y, color, line_widths=self.line_width) - - # draw each point on image - for kid, kpt in enumerate(kpts): - if score[kid] < kpt_thr or not visible[ - kid] or kpt_color[kid] is None: - # skip the point that should not be drawn - continue - - color = kpt_color[kid] - if not isinstance(color, str): - color = tuple(int(c) for c in color) - transparency = self.alpha - if self.show_keypoint_weight: - transparency *= max(0, min(1, score[kid])) - self.draw_circles( - kpt, - radius=np.array([self.radius]), - face_colors=color, - edge_colors=color, - alpha=transparency, - line_widths=self.radius) - if show_kpt_idx: - kpt[0] += self.radius - kpt[1] -= self.radius - self.draw_texts( - str(kid), - kpt, - colors=color, - font_sizes=self.radius * 3, - vertical_alignments='bottom', - horizontal_alignments='center') - - return self.get_image() - - def _draw_instance_heatmap( - self, - fields: PixelData, - overlaid_image: Optional[np.ndarray] = None, - ): - """Draw heatmaps of GT or prediction. - - Args: - fields (:obj:`PixelData`): Data structure for - pixel-level annotations or predictions. - overlaid_image (np.ndarray): The image to draw. - - Returns: - np.ndarray: the drawn image which channel is RGB. - """ - if 'heatmaps' not in fields: - return None - heatmaps = fields.heatmaps - if isinstance(heatmaps, np.ndarray): - heatmaps = torch.from_numpy(heatmaps) - if heatmaps.dim() == 3: - heatmaps, _ = heatmaps.max(dim=0) - heatmaps = heatmaps.unsqueeze(0) - out_image = self.draw_featmap(heatmaps, overlaid_image) - return out_image - - def _draw_instance_xy_heatmap( - self, - fields: PixelData, - overlaid_image: Optional[np.ndarray] = None, - n: int = 20, - ): - """Draw heatmaps of GT or prediction. - - Args: - fields (:obj:`PixelData`): Data structure for - pixel-level annotations or predictions. - overlaid_image (np.ndarray): The image to draw. - n (int): Number of keypoint, up to 20. - - Returns: - np.ndarray: the drawn image which channel is RGB. - """ - if 'heatmaps' not in fields: - return None - heatmaps = fields.heatmaps - _, h, w = heatmaps.shape - if isinstance(heatmaps, np.ndarray): - heatmaps = torch.from_numpy(heatmaps) - out_image = SimCCVisualizer().draw_instance_xy_heatmap( - heatmaps, overlaid_image, n) - out_image = cv2.resize(out_image[:, :, ::-1], (w, h)) - return out_image - - @master_only - def add_datasample(self, - name: str, - image: np.ndarray, - data_sample: PoseDataSample, - draw_gt: bool = True, - draw_pred: bool = True, - draw_heatmap: bool = False, - draw_bbox: bool = False, - show_kpt_idx: bool = False, - skeleton_style: str = 'mmpose', - show: bool = False, - wait_time: float = 0, - out_file: Optional[str] = None, - kpt_thr: float = 0.3, - step: int = 0) -> None: - """Draw datasample and save to all backends. - - - If GT and prediction are plotted at the same time, they are - displayed in a stitched image where the left image is the - ground truth and the right image is the prediction. - - If ``show`` is True, all storage backends are ignored, and - the images will be displayed in a local window. - - If ``out_file`` is specified, the drawn image will be - saved to ``out_file``. t is usually used when the display - is not available. - - Args: - name (str): The image identifier - image (np.ndarray): The image to draw - data_sample (:obj:`PoseDataSample`, optional): The data sample - to visualize - draw_gt (bool): Whether to draw GT PoseDataSample. Default to - ``True`` - draw_pred (bool): Whether to draw Prediction PoseDataSample. - Defaults to ``True`` - draw_bbox (bool): Whether to draw bounding boxes. Default to - ``False`` - draw_heatmap (bool): Whether to draw heatmaps. Defaults to - ``False`` - show_kpt_idx (bool): Whether to show the index of keypoints. - Defaults to ``False`` - skeleton_style (str): Skeleton style selection. Defaults to - ``'mmpose'`` - show (bool): Whether to display the drawn image. Default to - ``False`` - wait_time (float): The interval of show (s). Defaults to 0 - out_file (str): Path to output file. Defaults to ``None`` - kpt_thr (float, optional): Minimum threshold of keypoints - to be shown. Default: 0.3. - step (int): Global step value to record. Defaults to 0 - """ - - gt_img_data = None - pred_img_data = None - - if draw_gt: - gt_img_data = image.copy() - gt_img_heatmap = None - - # draw bboxes & keypoints - if 'gt_instances' in data_sample: - gt_img_data = self._draw_instances_kpts( - gt_img_data, data_sample.gt_instances, kpt_thr, - show_kpt_idx, skeleton_style) - if draw_bbox: - gt_img_data = self._draw_instances_bbox( - gt_img_data, data_sample.gt_instances) - - # draw heatmaps - if 'gt_fields' in data_sample and draw_heatmap: - gt_img_heatmap = self._draw_instance_heatmap( - data_sample.gt_fields, image) - if gt_img_heatmap is not None: - gt_img_data = np.concatenate((gt_img_data, gt_img_heatmap), - axis=0) - - if draw_pred: - pred_img_data = image.copy() - pred_img_heatmap = None - - # draw bboxes & keypoints - if 'pred_instances' in data_sample: - pred_img_data = self._draw_instances_kpts( - pred_img_data, data_sample.pred_instances, kpt_thr, - show_kpt_idx, skeleton_style) - if draw_bbox: - pred_img_data = self._draw_instances_bbox( - pred_img_data, data_sample.pred_instances) - - # draw heatmaps - if 'pred_fields' in data_sample and draw_heatmap: - if 'keypoint_x_labels' in data_sample.pred_instances: - pred_img_heatmap = self._draw_instance_xy_heatmap( - data_sample.pred_fields, image) - else: - pred_img_heatmap = self._draw_instance_heatmap( - data_sample.pred_fields, image) - if pred_img_heatmap is not None: - pred_img_data = np.concatenate( - (pred_img_data, pred_img_heatmap), axis=0) - - # merge visualization results - if gt_img_data is not None and pred_img_data is not None: - if gt_img_heatmap is None and pred_img_heatmap is not None: - gt_img_data = np.concatenate((gt_img_data, image), axis=0) - elif gt_img_heatmap is not None and pred_img_heatmap is None: - pred_img_data = np.concatenate((pred_img_data, image), axis=0) - - drawn_img = np.concatenate((gt_img_data, pred_img_data), axis=1) - - elif gt_img_data is not None: - drawn_img = gt_img_data - else: - drawn_img = pred_img_data - - # It is convenient for users to obtain the drawn image. - # For example, the user wants to obtain the drawn image and - # save it as a video during video inference. - self.set_image(drawn_img) - - if show: - self.show(drawn_img, win_name=name, wait_time=wait_time) - - if out_file is not None: - mmcv.imwrite(drawn_img[..., ::-1], out_file) - else: - # save drawn_img to backends - self.add_image(name, drawn_img, step) - - return self.get_image() +# Copyright (c) OpenMMLab. All rights reserved. +import math +from typing import Dict, List, Optional, Tuple, Union + +import cv2 +import mmcv +import numpy as np +import torch +from mmengine.dist import master_only +from mmengine.structures import InstanceData, PixelData + +from mmpose.datasets.datasets.utils import parse_pose_metainfo +from mmpose.registry import VISUALIZERS +from mmpose.structures import PoseDataSample +from .opencv_backend_visualizer import OpencvBackendVisualizer +from .simcc_vis import SimCCVisualizer + + +def _get_adaptive_scales(areas: np.ndarray, + min_area: int = 800, + max_area: int = 30000) -> np.ndarray: + """Get adaptive scales according to areas. + + The scale range is [0.5, 1.0]. When the area is less than + ``min_area``, the scale is 0.5 while the area is larger than + ``max_area``, the scale is 1.0. + + Args: + areas (ndarray): The areas of bboxes or masks with the + shape of (n, ). + min_area (int): Lower bound areas for adaptive scales. + Defaults to 800. + max_area (int): Upper bound areas for adaptive scales. + Defaults to 30000. + + Returns: + ndarray: The adaotive scales with the shape of (n, ). + """ + scales = 0.5 + (areas - min_area) / (max_area - min_area) + scales = np.clip(scales, 0.5, 1.0) + return scales + + +@VISUALIZERS.register_module() +class PoseLocalVisualizer(OpencvBackendVisualizer): + """MMPose Local Visualizer. + + Args: + name (str): Name of the instance. Defaults to 'visualizer'. + image (np.ndarray, optional): the origin image to draw. The format + should be RGB. Defaults to ``None`` + vis_backends (list, optional): Visual backend config list. Defaults to + ``None`` + save_dir (str, optional): Save file dir for all storage backends. + If it is ``None``, the backend storage will not save any data. + Defaults to ``None`` + bbox_color (str, tuple(int), optional): Color of bbox lines. + The tuple of color should be in BGR order. Defaults to ``'green'`` + kpt_color (str, tuple(tuple(int)), optional): Color of keypoints. + The tuple of color should be in BGR order. Defaults to ``'red'`` + link_color (str, tuple(tuple(int)), optional): Color of skeleton. + The tuple of color should be in BGR order. Defaults to ``None`` + line_width (int, float): The width of lines. Defaults to 1 + radius (int, float): The radius of keypoints. Defaults to 4 + show_keypoint_weight (bool): Whether to adjust the transparency + of keypoints according to their score. Defaults to ``False`` + alpha (int, float): The transparency of bboxes. Defaults to ``1.0`` + + Examples: + >>> import numpy as np + >>> from mmengine.structures import InstanceData + >>> from mmpose.structures import PoseDataSample + >>> from mmpose.visualization import PoseLocalVisualizer + + >>> pose_local_visualizer = PoseLocalVisualizer(radius=1) + >>> image = np.random.randint(0, 256, + ... size=(10, 12, 3)).astype('uint8') + >>> gt_instances = InstanceData() + >>> gt_instances.keypoints = np.array([[[1, 1], [2, 2], [4, 4], + ... [8, 8]]]) + >>> gt_pose_data_sample = PoseDataSample() + >>> gt_pose_data_sample.gt_instances = gt_instances + >>> dataset_meta = {'skeleton_links': [[0, 1], [1, 2], [2, 3]]} + >>> pose_local_visualizer.set_dataset_meta(dataset_meta) + >>> pose_local_visualizer.add_datasample('image', image, + ... gt_pose_data_sample) + >>> pose_local_visualizer.add_datasample( + ... 'image', image, gt_pose_data_sample, + ... out_file='out_file.jpg') + >>> pose_local_visualizer.add_datasample( + ... 'image', image, gt_pose_data_sample, + ... show=True) + >>> pred_instances = InstanceData() + >>> pred_instances.keypoints = np.array([[[1, 1], [2, 2], [4, 4], + ... [8, 8]]]) + >>> pred_instances.score = np.array([0.8, 1, 0.9, 1]) + >>> pred_pose_data_sample = PoseDataSample() + >>> pred_pose_data_sample.pred_instances = pred_instances + >>> pose_local_visualizer.add_datasample('image', image, + ... gt_pose_data_sample, + ... pred_pose_data_sample) + """ + + def __init__(self, + name: str = 'visualizer', + image: Optional[np.ndarray] = None, + vis_backends: Optional[Dict] = None, + save_dir: Optional[str] = None, + bbox_color: Optional[Union[str, Tuple[int]]] = 'green', + kpt_color: Optional[Union[str, Tuple[Tuple[int]]]] = 'red', + link_color: Optional[Union[str, Tuple[Tuple[int]]]] = None, + text_color: Optional[Union[str, + Tuple[int]]] = (255, 255, 255), + skeleton: Optional[Union[List, Tuple]] = None, + line_width: Union[int, float] = 1, + radius: Union[int, float] = 3, + show_keypoint_weight: bool = False, + backend: str = 'opencv', + alpha: float = 1.0): + super().__init__( + name=name, + image=image, + vis_backends=vis_backends, + save_dir=save_dir, + backend=backend) + + self.bbox_color = bbox_color + self.kpt_color = kpt_color + self.link_color = link_color + self.line_width = line_width + self.text_color = text_color + self.skeleton = skeleton + self.radius = radius + self.alpha = alpha + self.show_keypoint_weight = show_keypoint_weight + # Set default value. When calling + # `PoseLocalVisualizer().set_dataset_meta(xxx)`, + # it will override the default value. + self.dataset_meta = {} + + def set_dataset_meta(self, + dataset_meta: Dict, + skeleton_style: str = 'mmpose'): + """Assign dataset_meta to the visualizer. The default visualization + settings will be overridden. + + Args: + dataset_meta (dict): meta information of dataset. + """ + if dataset_meta.get( + 'dataset_name') == 'coco' and skeleton_style == 'openpose': + dataset_meta = parse_pose_metainfo( + dict(from_file='configs/_base_/datasets/coco_openpose.py')) + + if isinstance(dataset_meta, dict): + self.dataset_meta = dataset_meta.copy() + self.bbox_color = dataset_meta.get('bbox_color', self.bbox_color) + self.kpt_color = dataset_meta.get('keypoint_colors', + self.kpt_color) + self.link_color = dataset_meta.get('skeleton_link_colors', + self.link_color) + self.skeleton = dataset_meta.get('skeleton_links', self.skeleton) + # sometimes self.dataset_meta is manually set, which might be None. + # it should be converted to a dict at these times + if self.dataset_meta is None: + self.dataset_meta = {} + + def _draw_instances_bbox(self, image: np.ndarray, + instances: InstanceData) -> np.ndarray: + """Draw bounding boxes and corresponding labels of GT or prediction. + + Args: + image (np.ndarray): The image to draw. + instances (:obj:`InstanceData`): Data structure for + instance-level annotations or predictions. + + Returns: + np.ndarray: the drawn image which channel is RGB. + """ + self.set_image(image) + + if 'bboxes' in instances: + bboxes = instances.bboxes + self.draw_bboxes( + bboxes, + edge_colors=self.bbox_color, + alpha=self.alpha, + line_widths=self.line_width) + else: + return self.get_image() + + if 'labels' in instances and self.text_color is not None: + classes = self.dataset_meta.get('classes', None) + labels = instances.labels + + positions = bboxes[:, :2] + areas = (bboxes[:, 3] - bboxes[:, 1]) * ( + bboxes[:, 2] - bboxes[:, 0]) + scales = _get_adaptive_scales(areas) + + for i, (pos, label) in enumerate(zip(positions, labels)): + label_text = classes[ + label] if classes is not None else f'class {label}' + + if isinstance(self.bbox_color, + tuple) and max(self.bbox_color) > 1: + facecolor = [c / 255.0 for c in self.bbox_color] + else: + facecolor = self.bbox_color + + self.draw_texts( + label_text, + pos, + colors=self.text_color, + font_sizes=int(13 * scales[i]), + vertical_alignments='bottom', + bboxes=[{ + 'facecolor': facecolor, + 'alpha': 0.8, + 'pad': 0.7, + 'edgecolor': 'none' + }]) + + return self.get_image() + + def _draw_instances_kpts(self, + image: np.ndarray, + instances: InstanceData, + kpt_thr: float = 0.3, + show_kpt_idx: bool = False, + skeleton_style: str = 'mmpose'): + """Draw keypoints and skeletons (optional) of GT or prediction. + + Args: + image (np.ndarray): The image to draw. + instances (:obj:`InstanceData`): Data structure for + instance-level annotations or predictions. + kpt_thr (float, optional): Minimum threshold of keypoints + to be shown. Default: 0.3. + show_kpt_idx (bool): Whether to show the index of keypoints. + Defaults to ``False`` + skeleton_style (str): Skeleton style selection. Defaults to + ``'mmpose'`` + + Returns: + np.ndarray: the drawn image which channel is RGB. + """ + + self.set_image(image) + img_h, img_w, _ = image.shape + + if 'keypoints' in instances: + keypoints = instances.get('transformed_keypoints', + instances.keypoints) + + if 'keypoint_scores' in instances: + scores = instances.keypoint_scores + else: + scores = np.ones(keypoints.shape[:-1]) + + if 'keypoints_visible' in instances: + keypoints_visible = instances.keypoints_visible + else: + keypoints_visible = np.ones(keypoints.shape[:-1]) + + if skeleton_style == 'openpose': + keypoints_info = np.concatenate( + (keypoints, scores[..., None], keypoints_visible[..., + None]), + axis=-1) + # compute neck joint + neck = np.mean(keypoints_info[:, [5, 6]], axis=1) + # neck score when visualizing pred + neck[:, 2:4] = np.logical_and( + keypoints_info[:, 5, 2:4] > kpt_thr, + keypoints_info[:, 6, 2:4] > kpt_thr).astype(int) + new_keypoints_info = np.insert( + keypoints_info, 17, neck, axis=1) + + mmpose_idx = [ + 17, 6, 8, 10, 7, 9, 12, 14, 16, 13, 15, 2, 1, 4, 3 + ] + openpose_idx = [ + 1, 2, 3, 4, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17 + ] + new_keypoints_info[:, openpose_idx] = \ + new_keypoints_info[:, mmpose_idx] + keypoints_info = new_keypoints_info + + keypoints, scores, keypoints_visible = keypoints_info[ + ..., :2], keypoints_info[..., 2], keypoints_info[..., 3] + + for kpts, score, visible in zip(keypoints, scores, + keypoints_visible): + kpts = np.array(kpts, copy=False) + + if self.kpt_color is None or isinstance(self.kpt_color, str): + kpt_color = [self.kpt_color] * len(kpts) + elif len(self.kpt_color) == len(kpts): + kpt_color = self.kpt_color + else: + raise ValueError( + f'the length of kpt_color ' + f'({len(self.kpt_color)}) does not matches ' + f'that of keypoints ({len(kpts)})') + + # draw links + if self.skeleton is not None and self.link_color is not None: + if self.link_color is None or isinstance( + self.link_color, str): + link_color = [self.link_color] * len(self.skeleton) + elif len(self.link_color) == len(self.skeleton): + link_color = self.link_color + else: + raise ValueError( + f'the length of link_color ' + f'({len(self.link_color)}) does not matches ' + f'that of skeleton ({len(self.skeleton)})') + + for sk_id, sk in enumerate(self.skeleton): + pos1 = (int(kpts[sk[0], 0]), int(kpts[sk[0], 1])) + pos2 = (int(kpts[sk[1], 0]), int(kpts[sk[1], 1])) + if not (visible[sk[0]] and visible[sk[1]]): + continue + + if (pos1[0] <= 0 or pos1[0] >= img_w or pos1[1] <= 0 + or pos1[1] >= img_h or pos2[0] <= 0 + or pos2[0] >= img_w or pos2[1] <= 0 + or pos2[1] >= img_h or score[sk[0]] < kpt_thr + or score[sk[1]] < kpt_thr + or link_color[sk_id] is None): + # skip the link that should not be drawn + continue + X = np.array((pos1[0], pos2[0])) + Y = np.array((pos1[1], pos2[1])) + color = link_color[sk_id] + if not isinstance(color, str): + color = tuple(int(c) for c in color) + transparency = self.alpha + if self.show_keypoint_weight: + transparency *= max( + 0, min(1, 0.5 * (score[sk[0]] + score[sk[1]]))) + + if skeleton_style == 'openpose': + mX = np.mean(X) + mY = np.mean(Y) + length = ((Y[0] - Y[1])**2 + (X[0] - X[1])**2)**0.5 + transparency = 0.6 + angle = math.degrees( + math.atan2(Y[0] - Y[1], X[0] - X[1])) + polygons = cv2.ellipse2Poly( + (int(mX), int(mY)), + (int(length / 2), int(self.line_width)), + int(angle), 0, 360, 1) + + self.draw_polygons( + polygons, + edge_colors=color, + face_colors=color, + alpha=transparency) + + else: + self.draw_lines( + X, Y, color, line_widths=self.line_width) + + # draw each point on image + for kid, kpt in enumerate(kpts): + if score[kid] < kpt_thr or not visible[ + kid] or kpt_color[kid] is None: + # skip the point that should not be drawn + continue + + color = kpt_color[kid] + if not isinstance(color, str): + color = tuple(int(c) for c in color) + transparency = self.alpha + if self.show_keypoint_weight: + transparency *= max(0, min(1, score[kid])) + self.draw_circles( + kpt, + radius=np.array([self.radius]), + face_colors=color, + edge_colors=color, + alpha=transparency, + line_widths=self.radius) + if show_kpt_idx: + kpt[0] += self.radius + kpt[1] -= self.radius + self.draw_texts( + str(kid), + kpt, + colors=color, + font_sizes=self.radius * 3, + vertical_alignments='bottom', + horizontal_alignments='center') + + return self.get_image() + + def _draw_instance_heatmap( + self, + fields: PixelData, + overlaid_image: Optional[np.ndarray] = None, + ): + """Draw heatmaps of GT or prediction. + + Args: + fields (:obj:`PixelData`): Data structure for + pixel-level annotations or predictions. + overlaid_image (np.ndarray): The image to draw. + + Returns: + np.ndarray: the drawn image which channel is RGB. + """ + if 'heatmaps' not in fields: + return None + heatmaps = fields.heatmaps + if isinstance(heatmaps, np.ndarray): + heatmaps = torch.from_numpy(heatmaps) + if heatmaps.dim() == 3: + heatmaps, _ = heatmaps.max(dim=0) + heatmaps = heatmaps.unsqueeze(0) + out_image = self.draw_featmap(heatmaps, overlaid_image) + return out_image + + def _draw_instance_xy_heatmap( + self, + fields: PixelData, + overlaid_image: Optional[np.ndarray] = None, + n: int = 20, + ): + """Draw heatmaps of GT or prediction. + + Args: + fields (:obj:`PixelData`): Data structure for + pixel-level annotations or predictions. + overlaid_image (np.ndarray): The image to draw. + n (int): Number of keypoint, up to 20. + + Returns: + np.ndarray: the drawn image which channel is RGB. + """ + if 'heatmaps' not in fields: + return None + heatmaps = fields.heatmaps + _, h, w = heatmaps.shape + if isinstance(heatmaps, np.ndarray): + heatmaps = torch.from_numpy(heatmaps) + out_image = SimCCVisualizer().draw_instance_xy_heatmap( + heatmaps, overlaid_image, n) + out_image = cv2.resize(out_image[:, :, ::-1], (w, h)) + return out_image + + @master_only + def add_datasample(self, + name: str, + image: np.ndarray, + data_sample: PoseDataSample, + draw_gt: bool = True, + draw_pred: bool = True, + draw_heatmap: bool = False, + draw_bbox: bool = False, + show_kpt_idx: bool = False, + skeleton_style: str = 'mmpose', + show: bool = False, + wait_time: float = 0, + out_file: Optional[str] = None, + kpt_thr: float = 0.3, + step: int = 0) -> None: + """Draw datasample and save to all backends. + + - If GT and prediction are plotted at the same time, they are + displayed in a stitched image where the left image is the + ground truth and the right image is the prediction. + - If ``show`` is True, all storage backends are ignored, and + the images will be displayed in a local window. + - If ``out_file`` is specified, the drawn image will be + saved to ``out_file``. t is usually used when the display + is not available. + + Args: + name (str): The image identifier + image (np.ndarray): The image to draw + data_sample (:obj:`PoseDataSample`, optional): The data sample + to visualize + draw_gt (bool): Whether to draw GT PoseDataSample. Default to + ``True`` + draw_pred (bool): Whether to draw Prediction PoseDataSample. + Defaults to ``True`` + draw_bbox (bool): Whether to draw bounding boxes. Default to + ``False`` + draw_heatmap (bool): Whether to draw heatmaps. Defaults to + ``False`` + show_kpt_idx (bool): Whether to show the index of keypoints. + Defaults to ``False`` + skeleton_style (str): Skeleton style selection. Defaults to + ``'mmpose'`` + show (bool): Whether to display the drawn image. Default to + ``False`` + wait_time (float): The interval of show (s). Defaults to 0 + out_file (str): Path to output file. Defaults to ``None`` + kpt_thr (float, optional): Minimum threshold of keypoints + to be shown. Default: 0.3. + step (int): Global step value to record. Defaults to 0 + """ + + gt_img_data = None + pred_img_data = None + + if draw_gt: + gt_img_data = image.copy() + gt_img_heatmap = None + + # draw bboxes & keypoints + if 'gt_instances' in data_sample: + gt_img_data = self._draw_instances_kpts( + gt_img_data, data_sample.gt_instances, kpt_thr, + show_kpt_idx, skeleton_style) + if draw_bbox: + gt_img_data = self._draw_instances_bbox( + gt_img_data, data_sample.gt_instances) + + # draw heatmaps + if 'gt_fields' in data_sample and draw_heatmap: + gt_img_heatmap = self._draw_instance_heatmap( + data_sample.gt_fields, image) + if gt_img_heatmap is not None: + gt_img_data = np.concatenate((gt_img_data, gt_img_heatmap), + axis=0) + + if draw_pred: + pred_img_data = image.copy() + pred_img_heatmap = None + + # draw bboxes & keypoints + if 'pred_instances' in data_sample: + pred_img_data = self._draw_instances_kpts( + pred_img_data, data_sample.pred_instances, kpt_thr, + show_kpt_idx, skeleton_style) + if draw_bbox: + pred_img_data = self._draw_instances_bbox( + pred_img_data, data_sample.pred_instances) + + # draw heatmaps + if 'pred_fields' in data_sample and draw_heatmap: + if 'keypoint_x_labels' in data_sample.pred_instances: + pred_img_heatmap = self._draw_instance_xy_heatmap( + data_sample.pred_fields, image) + else: + pred_img_heatmap = self._draw_instance_heatmap( + data_sample.pred_fields, image) + if pred_img_heatmap is not None: + pred_img_data = np.concatenate( + (pred_img_data, pred_img_heatmap), axis=0) + + # merge visualization results + if gt_img_data is not None and pred_img_data is not None: + if gt_img_heatmap is None and pred_img_heatmap is not None: + gt_img_data = np.concatenate((gt_img_data, image), axis=0) + elif gt_img_heatmap is not None and pred_img_heatmap is None: + pred_img_data = np.concatenate((pred_img_data, image), axis=0) + + drawn_img = np.concatenate((gt_img_data, pred_img_data), axis=1) + + elif gt_img_data is not None: + drawn_img = gt_img_data + else: + drawn_img = pred_img_data + + # It is convenient for users to obtain the drawn image. + # For example, the user wants to obtain the drawn image and + # save it as a video during video inference. + self.set_image(drawn_img) + + if show: + self.show(drawn_img, win_name=name, wait_time=wait_time) + + if out_file is not None: + mmcv.imwrite(drawn_img[..., ::-1], out_file) + else: + # save drawn_img to backends + self.add_image(name, drawn_img, step) + + return self.get_image() diff --git a/mmpose/visualization/local_visualizer_3d.py b/mmpose/visualization/local_visualizer_3d.py index 7e3462ce79..6aee7ba2c1 100644 --- a/mmpose/visualization/local_visualizer_3d.py +++ b/mmpose/visualization/local_visualizer_3d.py @@ -1,564 +1,564 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import math -from typing import Dict, List, Optional, Tuple, Union - -import cv2 -import mmcv -import numpy as np -from matplotlib import pyplot as plt -from mmengine.dist import master_only -from mmengine.structures import InstanceData - -from mmpose.registry import VISUALIZERS -from mmpose.structures import PoseDataSample -from . import PoseLocalVisualizer - - -@VISUALIZERS.register_module() -class Pose3dLocalVisualizer(PoseLocalVisualizer): - """MMPose 3d Local Visualizer. - - Args: - name (str): Name of the instance. Defaults to 'visualizer'. - image (np.ndarray, optional): the origin image to draw. The format - should be RGB. Defaults to ``None`` - vis_backends (list, optional): Visual backend config list. Defaults to - ``None`` - save_dir (str, optional): Save file dir for all storage backends. - If it is ``None``, the backend storage will not save any data. - Defaults to ``None`` - bbox_color (str, tuple(int), optional): Color of bbox lines. - The tuple of color should be in BGR order. Defaults to ``'green'`` - kpt_color (str, tuple(tuple(int)), optional): Color of keypoints. - The tuple of color should be in BGR order. Defaults to ``'red'`` - link_color (str, tuple(tuple(int)), optional): Color of skeleton. - The tuple of color should be in BGR order. Defaults to ``None`` - line_width (int, float): The width of lines. Defaults to 1 - radius (int, float): The radius of keypoints. Defaults to 4 - show_keypoint_weight (bool): Whether to adjust the transparency - of keypoints according to their score. Defaults to ``False`` - alpha (int, float): The transparency of bboxes. Defaults to ``0.8`` - det_kpt_color (str, tuple(tuple(int)), optional): Keypoints color - info for detection. Defaults to ``None`` - det_dataset_skeleton (list): Skeleton info for detection. Defaults to - ``None`` - det_dataset_link_color (list): Link color for detection. Defaults to - ``None`` - """ - - def __init__( - self, - name: str = 'visualizer', - image: Optional[np.ndarray] = None, - vis_backends: Optional[Dict] = None, - save_dir: Optional[str] = None, - bbox_color: Optional[Union[str, Tuple[int]]] = 'green', - kpt_color: Optional[Union[str, Tuple[Tuple[int]]]] = 'red', - link_color: Optional[Union[str, Tuple[Tuple[int]]]] = None, - text_color: Optional[Union[str, Tuple[int]]] = (255, 255, 255), - skeleton: Optional[Union[List, Tuple]] = None, - line_width: Union[int, float] = 1, - radius: Union[int, float] = 3, - show_keypoint_weight: bool = False, - backend: str = 'opencv', - alpha: float = 0.8, - det_kpt_color: Optional[Union[str, Tuple[Tuple[int]]]] = None, - det_dataset_skeleton: Optional[Union[str, - Tuple[Tuple[int]]]] = None, - det_dataset_link_color: Optional[np.ndarray] = None): - super().__init__(name, image, vis_backends, save_dir, bbox_color, - kpt_color, link_color, text_color, skeleton, - line_width, radius, show_keypoint_weight, backend, - alpha) - self.det_kpt_color = det_kpt_color - self.det_dataset_skeleton = det_dataset_skeleton - self.det_dataset_link_color = det_dataset_link_color - - def _draw_3d_data_samples( - self, - image: np.ndarray, - pose_samples: PoseDataSample, - draw_gt: bool = True, - kpt_thr: float = 0.3, - num_instances=-1, - axis_azimuth: float = 70, - axis_limit: float = 1.7, - axis_dist: float = 10.0, - axis_elev: float = 15.0, - ): - """Draw keypoints and skeletons (optional) of GT or prediction. - - Args: - image (np.ndarray): The image to draw. - instances (:obj:`InstanceData`): Data structure for - instance-level annotations or predictions. - draw_gt (bool): Whether to draw GT PoseDataSample. Default to - ``True`` - kpt_thr (float, optional): Minimum threshold of keypoints - to be shown. Default: 0.3. - num_instances (int): Number of instances to be shown in 3D. If - smaller than 0, all the instances in the pose_result will be - shown. Otherwise, pad or truncate the pose_result to a length - of num_instances. - axis_azimuth (float): axis azimuth angle for 3D visualizations. - axis_dist (float): axis distance for 3D visualizations. - axis_elev (float): axis elevation view angle for 3D visualizations. - axis_limit (float): The axis limit to visualize 3d pose. The xyz - range will be set as: - - x: [x_c - axis_limit/2, x_c + axis_limit/2] - - y: [y_c - axis_limit/2, y_c + axis_limit/2] - - z: [0, axis_limit] - Where x_c, y_c is the mean value of x and y coordinates - - Returns: - Tuple(np.ndarray): the drawn image which channel is RGB. - """ - vis_height, vis_width, _ = image.shape - - if 'pred_instances' in pose_samples: - pred_instances = pose_samples.pred_instances - else: - pred_instances = InstanceData() - if num_instances < 0: - if 'keypoints' in pred_instances: - num_instances = len(pred_instances) - else: - num_instances = 0 - else: - if len(pred_instances) > num_instances: - pred_instances_ = InstanceData() - for k in pred_instances.keys(): - new_val = pred_instances[k][:num_instances] - pred_instances_.set_field(new_val, k) - pred_instances = pred_instances_ - elif num_instances < len(pred_instances): - num_instances = len(pred_instances) - - num_fig = num_instances - if draw_gt: - vis_width *= 2 - num_fig *= 2 - - plt.ioff() - fig = plt.figure( - figsize=(vis_width * num_instances * 0.01, vis_height * 0.01)) - - def _draw_3d_instances_kpts(keypoints, - scores, - keypoints_visible, - fig_idx, - title=None): - - for idx, (kpts, score, visible) in enumerate( - zip(keypoints, scores, keypoints_visible)): - - valid = np.logical_and(score >= kpt_thr, - np.any(~np.isnan(kpts), axis=-1)) - - ax = fig.add_subplot( - 1, num_fig, fig_idx * (idx + 1), projection='3d') - ax.view_init(elev=axis_elev, azim=axis_azimuth) - ax.set_zlim3d([0, axis_limit]) - ax.set_aspect('auto') - ax.set_xticks([]) - ax.set_yticks([]) - ax.set_zticks([]) - ax.set_xticklabels([]) - ax.set_yticklabels([]) - ax.set_zticklabels([]) - ax.scatter([0], [0], [0], marker='o', color='red') - if title: - ax.set_title(f'{title} ({idx})') - ax.dist = axis_dist - - x_c = np.mean(kpts[valid, 0]) if valid.any() else 0 - y_c = np.mean(kpts[valid, 1]) if valid.any() else 0 - - ax.set_xlim3d([x_c - axis_limit / 2, x_c + axis_limit / 2]) - ax.set_ylim3d([y_c - axis_limit / 2, y_c + axis_limit / 2]) - - kpts = np.array(kpts, copy=False) - - if self.kpt_color is None or isinstance(self.kpt_color, str): - kpt_color = [self.kpt_color] * len(kpts) - elif len(self.kpt_color) == len(kpts): - kpt_color = self.kpt_color - else: - raise ValueError( - f'the length of kpt_color ' - f'({len(self.kpt_color)}) does not matches ' - f'that of keypoints ({len(kpts)})') - - kpts = kpts[valid] - x_3d, y_3d, z_3d = np.split(kpts[:, :3], [1, 2], axis=1) - - kpt_color = kpt_color[valid][..., ::-1] / 255. - - ax.scatter(x_3d, y_3d, z_3d, marker='o', color=kpt_color) - - for kpt_idx in range(len(x_3d)): - ax.text(x_3d[kpt_idx][0], y_3d[kpt_idx][0], - z_3d[kpt_idx][0], str(kpt_idx)) - - if self.skeleton is not None and self.link_color is not None: - if self.link_color is None or isinstance( - self.link_color, str): - link_color = [self.link_color] * len(self.skeleton) - elif len(self.link_color) == len(self.skeleton): - link_color = self.link_color - else: - raise ValueError( - f'the length of link_color ' - f'({len(self.link_color)}) does not matches ' - f'that of skeleton ({len(self.skeleton)})') - - for sk_id, sk in enumerate(self.skeleton): - sk_indices = [_i for _i in sk] - xs_3d = kpts[sk_indices, 0] - ys_3d = kpts[sk_indices, 1] - zs_3d = kpts[sk_indices, 2] - kpt_score = score[sk_indices] - if kpt_score.min() > kpt_thr: - # matplotlib uses RGB color in [0, 1] value range - _color = link_color[sk_id][::-1] / 255. - ax.plot( - xs_3d, ys_3d, zs_3d, color=_color, zdir='z') - - if 'keypoints' in pred_instances: - keypoints = pred_instances.get('keypoints', - pred_instances.keypoints) - - if 'keypoint_scores' in pred_instances: - scores = pred_instances.keypoint_scores - else: - scores = np.ones(keypoints.shape[:-1]) - - if 'keypoints_visible' in pred_instances: - keypoints_visible = pred_instances.keypoints_visible - else: - keypoints_visible = np.ones(keypoints.shape[:-1]) - - _draw_3d_instances_kpts(keypoints, scores, keypoints_visible, 1, - 'Prediction') - - if draw_gt and 'gt_instances' in pose_samples: - gt_instances = pose_samples.gt_instances - if 'lifting_target' in gt_instances: - keypoints = gt_instances.get('lifting_target', - gt_instances.lifting_target) - scores = np.ones(keypoints.shape[:-1]) - - if 'lifting_target_visible' in gt_instances: - keypoints_visible = gt_instances.lifting_target_visible - else: - keypoints_visible = np.ones(keypoints.shape[:-1]) - - _draw_3d_instances_kpts(keypoints, scores, keypoints_visible, - 2, 'Ground Truth') - - # convert figure to numpy array - fig.tight_layout() - fig.canvas.draw() - - pred_img_data = fig.canvas.tostring_rgb() - pred_img_data = np.frombuffer( - fig.canvas.tostring_rgb(), dtype=np.uint8) - - if not pred_img_data.any(): - pred_img_data = np.full((vis_height, vis_width, 3), 255) - else: - pred_img_data = pred_img_data.reshape(vis_height, - vis_width * num_instances, - -1) - - plt.close(fig) - - return pred_img_data - - def _draw_instances_kpts(self, - image: np.ndarray, - instances: InstanceData, - kpt_thr: float = 0.3, - show_kpt_idx: bool = False, - skeleton_style: str = 'mmpose'): - """Draw keypoints and skeletons (optional) of GT or prediction. - - Args: - image (np.ndarray): The image to draw. - instances (:obj:`InstanceData`): Data structure for - instance-level annotations or predictions. - kpt_thr (float, optional): Minimum threshold of keypoints - to be shown. Default: 0.3. - show_kpt_idx (bool): Whether to show the index of keypoints. - Defaults to ``False`` - skeleton_style (str): Skeleton style selection. Defaults to - ``'mmpose'`` - - Returns: - np.ndarray: the drawn image which channel is RGB. - """ - - self.set_image(image) - img_h, img_w, _ = image.shape - - if 'keypoints' in instances: - keypoints = instances.get('transformed_keypoints', - instances.keypoints) - - if 'keypoint_scores' in instances: - scores = instances.keypoint_scores - else: - scores = np.ones(keypoints.shape[:-1]) - - if 'keypoints_visible' in instances: - keypoints_visible = instances.keypoints_visible - else: - keypoints_visible = np.ones(keypoints.shape[:-1]) - - if skeleton_style == 'openpose': - keypoints_info = np.concatenate( - (keypoints, scores[..., None], keypoints_visible[..., - None]), - axis=-1) - # compute neck joint - neck = np.mean(keypoints_info[:, [5, 6]], axis=1) - # neck score when visualizing pred - neck[:, 2:4] = np.logical_and( - keypoints_info[:, 5, 2:4] > kpt_thr, - keypoints_info[:, 6, 2:4] > kpt_thr).astype(int) - new_keypoints_info = np.insert( - keypoints_info, 17, neck, axis=1) - - mmpose_idx = [ - 17, 6, 8, 10, 7, 9, 12, 14, 16, 13, 15, 2, 1, 4, 3 - ] - openpose_idx = [ - 1, 2, 3, 4, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17 - ] - new_keypoints_info[:, openpose_idx] = \ - new_keypoints_info[:, mmpose_idx] - keypoints_info = new_keypoints_info - - keypoints, scores, keypoints_visible = keypoints_info[ - ..., :2], keypoints_info[..., 2], keypoints_info[..., 3] - - kpt_color = self.kpt_color - if self.det_kpt_color is not None: - kpt_color = self.det_kpt_color - - for kpts, score, visible in zip(keypoints, scores, - keypoints_visible): - kpts = np.array(kpts, copy=False) - - if kpt_color is None or isinstance(kpt_color, str): - kpt_color = [kpt_color] * len(kpts) - elif len(kpt_color) == len(kpts): - kpt_color = kpt_color - else: - raise ValueError(f'the length of kpt_color ' - f'({len(kpt_color)}) does not matches ' - f'that of keypoints ({len(kpts)})') - - # draw each point on image - for kid, kpt in enumerate(kpts): - if score[kid] < kpt_thr or not visible[ - kid] or kpt_color[kid] is None: - # skip the point that should not be drawn - continue - - color = kpt_color[kid] - if not isinstance(color, str): - color = tuple(int(c) for c in color) - transparency = self.alpha - if self.show_keypoint_weight: - transparency *= max(0, min(1, score[kid])) - self.draw_circles( - kpt, - radius=np.array([self.radius]), - face_colors=color, - edge_colors=color, - alpha=transparency, - line_widths=self.radius) - if show_kpt_idx: - self.draw_texts( - str(kid), - kpt, - colors=color, - font_sizes=self.radius * 3, - vertical_alignments='bottom', - horizontal_alignments='center') - - # draw links - skeleton = self.skeleton - if self.det_dataset_skeleton is not None: - skeleton = self.det_dataset_skeleton - link_color = self.link_color - if self.det_dataset_link_color is not None: - link_color = self.det_dataset_link_color - if skeleton is not None and link_color is not None: - if link_color is None or isinstance(link_color, str): - link_color = [link_color] * len(skeleton) - elif len(link_color) == len(skeleton): - link_color = link_color - else: - raise ValueError( - f'the length of link_color ' - f'({len(link_color)}) does not matches ' - f'that of skeleton ({len(skeleton)})') - - for sk_id, sk in enumerate(skeleton): - pos1 = (int(kpts[sk[0], 0]), int(kpts[sk[0], 1])) - pos2 = (int(kpts[sk[1], 0]), int(kpts[sk[1], 1])) - if not (visible[sk[0]] and visible[sk[1]]): - continue - - if (pos1[0] <= 0 or pos1[0] >= img_w or pos1[1] <= 0 - or pos1[1] >= img_h or pos2[0] <= 0 - or pos2[0] >= img_w or pos2[1] <= 0 - or pos2[1] >= img_h or score[sk[0]] < kpt_thr - or score[sk[1]] < kpt_thr - or link_color[sk_id] is None): - # skip the link that should not be drawn - continue - X = np.array((pos1[0], pos2[0])) - Y = np.array((pos1[1], pos2[1])) - color = link_color[sk_id] - if not isinstance(color, str): - color = tuple(int(c) for c in color) - transparency = self.alpha - if self.show_keypoint_weight: - transparency *= max( - 0, min(1, 0.5 * (score[sk[0]] + score[sk[1]]))) - - if skeleton_style == 'openpose': - mX = np.mean(X) - mY = np.mean(Y) - length = ((Y[0] - Y[1])**2 + (X[0] - X[1])**2)**0.5 - angle = math.degrees( - math.atan2(Y[0] - Y[1], X[0] - X[1])) - stickwidth = 2 - polygons = cv2.ellipse2Poly( - (int(mX), int(mY)), - (int(length / 2), int(stickwidth)), int(angle), - 0, 360, 1) - - self.draw_polygons( - polygons, - edge_colors=color, - face_colors=color, - alpha=transparency) - - else: - self.draw_lines( - X, Y, color, line_widths=self.line_width) - - return self.get_image() - - @master_only - def add_datasample(self, - name: str, - image: np.ndarray, - data_sample: PoseDataSample, - det_data_sample: Optional[PoseDataSample] = None, - draw_gt: bool = True, - draw_pred: bool = True, - draw_2d: bool = True, - draw_bbox: bool = False, - show_kpt_idx: bool = False, - skeleton_style: str = 'mmpose', - num_instances: int = -1, - show: bool = False, - wait_time: float = 0, - out_file: Optional[str] = None, - kpt_thr: float = 0.3, - step: int = 0) -> None: - """Draw datasample and save to all backends. - - - If GT and prediction are plotted at the same time, they are - displayed in a stitched image where the left image is the - ground truth and the right image is the prediction. - - If ``show`` is True, all storage backends are ignored, and - the images will be displayed in a local window. - - If ``out_file`` is specified, the drawn image will be - saved to ``out_file``. t is usually used when the display - is not available. - - Args: - name (str): The image identifier - image (np.ndarray): The image to draw - data_sample (:obj:`PoseDataSample`): The 3d data sample - to visualize - det_data_sample (:obj:`PoseDataSample`, optional): The 2d detection - data sample to visualize - draw_gt (bool): Whether to draw GT PoseDataSample. Default to - ``True`` - draw_pred (bool): Whether to draw Prediction PoseDataSample. - Defaults to ``True`` - draw_2d (bool): Whether to draw 2d detection results. Defaults to - ``True`` - draw_bbox (bool): Whether to draw bounding boxes. Default to - ``False`` - show_kpt_idx (bool): Whether to show the index of keypoints. - Defaults to ``False`` - skeleton_style (str): Skeleton style selection. Defaults to - ``'mmpose'`` - num_instances (int): Number of instances to be shown in 3D. If - smaller than 0, all the instances in the pose_result will be - shown. Otherwise, pad or truncate the pose_result to a length - of num_instances. Defaults to -1 - show (bool): Whether to display the drawn image. Default to - ``False`` - wait_time (float): The interval of show (s). Defaults to 0 - out_file (str): Path to output file. Defaults to ``None`` - kpt_thr (float, optional): Minimum threshold of keypoints - to be shown. Default: 0.3. - step (int): Global step value to record. Defaults to 0 - """ - - det_img_data = None - gt_img_data = None - - if draw_2d: - det_img_data = image.copy() - - # draw bboxes & keypoints - if 'pred_instances' in det_data_sample: - det_img_data = self._draw_instances_kpts( - det_img_data, det_data_sample.pred_instances, kpt_thr, - show_kpt_idx, skeleton_style) - if draw_bbox: - det_img_data = self._draw_instances_bbox( - det_img_data, det_data_sample.pred_instances) - - pred_img_data = self._draw_3d_data_samples( - image.copy(), - data_sample, - draw_gt=draw_gt, - num_instances=num_instances) - - # merge visualization results - if det_img_data is not None and gt_img_data is not None: - drawn_img = np.concatenate( - (det_img_data, pred_img_data, gt_img_data), axis=1) - elif det_img_data is not None: - drawn_img = np.concatenate((det_img_data, pred_img_data), axis=1) - elif gt_img_data is not None: - drawn_img = np.concatenate((det_img_data, gt_img_data), axis=1) - else: - drawn_img = pred_img_data - - # It is convenient for users to obtain the drawn image. - # For example, the user wants to obtain the drawn image and - # save it as a video during video inference. - self.set_image(drawn_img) - - if show: - self.show(drawn_img, win_name=name, wait_time=wait_time) - - if out_file is not None: - mmcv.imwrite(drawn_img[..., ::-1], out_file) - else: - # save drawn_img to backends - self.add_image(name, drawn_img, step) - - return self.get_image() +# Copyright (c) OpenMMLab. All rights reserved. +import math +from typing import Dict, List, Optional, Tuple, Union + +import cv2 +import mmcv +import numpy as np +from matplotlib import pyplot as plt +from mmengine.dist import master_only +from mmengine.structures import InstanceData + +from mmpose.registry import VISUALIZERS +from mmpose.structures import PoseDataSample +from . import PoseLocalVisualizer + + +@VISUALIZERS.register_module() +class Pose3dLocalVisualizer(PoseLocalVisualizer): + """MMPose 3d Local Visualizer. + + Args: + name (str): Name of the instance. Defaults to 'visualizer'. + image (np.ndarray, optional): the origin image to draw. The format + should be RGB. Defaults to ``None`` + vis_backends (list, optional): Visual backend config list. Defaults to + ``None`` + save_dir (str, optional): Save file dir for all storage backends. + If it is ``None``, the backend storage will not save any data. + Defaults to ``None`` + bbox_color (str, tuple(int), optional): Color of bbox lines. + The tuple of color should be in BGR order. Defaults to ``'green'`` + kpt_color (str, tuple(tuple(int)), optional): Color of keypoints. + The tuple of color should be in BGR order. Defaults to ``'red'`` + link_color (str, tuple(tuple(int)), optional): Color of skeleton. + The tuple of color should be in BGR order. Defaults to ``None`` + line_width (int, float): The width of lines. Defaults to 1 + radius (int, float): The radius of keypoints. Defaults to 4 + show_keypoint_weight (bool): Whether to adjust the transparency + of keypoints according to their score. Defaults to ``False`` + alpha (int, float): The transparency of bboxes. Defaults to ``0.8`` + det_kpt_color (str, tuple(tuple(int)), optional): Keypoints color + info for detection. Defaults to ``None`` + det_dataset_skeleton (list): Skeleton info for detection. Defaults to + ``None`` + det_dataset_link_color (list): Link color for detection. Defaults to + ``None`` + """ + + def __init__( + self, + name: str = 'visualizer', + image: Optional[np.ndarray] = None, + vis_backends: Optional[Dict] = None, + save_dir: Optional[str] = None, + bbox_color: Optional[Union[str, Tuple[int]]] = 'green', + kpt_color: Optional[Union[str, Tuple[Tuple[int]]]] = 'red', + link_color: Optional[Union[str, Tuple[Tuple[int]]]] = None, + text_color: Optional[Union[str, Tuple[int]]] = (255, 255, 255), + skeleton: Optional[Union[List, Tuple]] = None, + line_width: Union[int, float] = 1, + radius: Union[int, float] = 3, + show_keypoint_weight: bool = False, + backend: str = 'opencv', + alpha: float = 0.8, + det_kpt_color: Optional[Union[str, Tuple[Tuple[int]]]] = None, + det_dataset_skeleton: Optional[Union[str, + Tuple[Tuple[int]]]] = None, + det_dataset_link_color: Optional[np.ndarray] = None): + super().__init__(name, image, vis_backends, save_dir, bbox_color, + kpt_color, link_color, text_color, skeleton, + line_width, radius, show_keypoint_weight, backend, + alpha) + self.det_kpt_color = det_kpt_color + self.det_dataset_skeleton = det_dataset_skeleton + self.det_dataset_link_color = det_dataset_link_color + + def _draw_3d_data_samples( + self, + image: np.ndarray, + pose_samples: PoseDataSample, + draw_gt: bool = True, + kpt_thr: float = 0.3, + num_instances=-1, + axis_azimuth: float = 70, + axis_limit: float = 1.7, + axis_dist: float = 10.0, + axis_elev: float = 15.0, + ): + """Draw keypoints and skeletons (optional) of GT or prediction. + + Args: + image (np.ndarray): The image to draw. + instances (:obj:`InstanceData`): Data structure for + instance-level annotations or predictions. + draw_gt (bool): Whether to draw GT PoseDataSample. Default to + ``True`` + kpt_thr (float, optional): Minimum threshold of keypoints + to be shown. Default: 0.3. + num_instances (int): Number of instances to be shown in 3D. If + smaller than 0, all the instances in the pose_result will be + shown. Otherwise, pad or truncate the pose_result to a length + of num_instances. + axis_azimuth (float): axis azimuth angle for 3D visualizations. + axis_dist (float): axis distance for 3D visualizations. + axis_elev (float): axis elevation view angle for 3D visualizations. + axis_limit (float): The axis limit to visualize 3d pose. The xyz + range will be set as: + - x: [x_c - axis_limit/2, x_c + axis_limit/2] + - y: [y_c - axis_limit/2, y_c + axis_limit/2] + - z: [0, axis_limit] + Where x_c, y_c is the mean value of x and y coordinates + + Returns: + Tuple(np.ndarray): the drawn image which channel is RGB. + """ + vis_height, vis_width, _ = image.shape + + if 'pred_instances' in pose_samples: + pred_instances = pose_samples.pred_instances + else: + pred_instances = InstanceData() + if num_instances < 0: + if 'keypoints' in pred_instances: + num_instances = len(pred_instances) + else: + num_instances = 0 + else: + if len(pred_instances) > num_instances: + pred_instances_ = InstanceData() + for k in pred_instances.keys(): + new_val = pred_instances[k][:num_instances] + pred_instances_.set_field(new_val, k) + pred_instances = pred_instances_ + elif num_instances < len(pred_instances): + num_instances = len(pred_instances) + + num_fig = num_instances + if draw_gt: + vis_width *= 2 + num_fig *= 2 + + plt.ioff() + fig = plt.figure( + figsize=(vis_width * num_instances * 0.01, vis_height * 0.01)) + + def _draw_3d_instances_kpts(keypoints, + scores, + keypoints_visible, + fig_idx, + title=None): + + for idx, (kpts, score, visible) in enumerate( + zip(keypoints, scores, keypoints_visible)): + + valid = np.logical_and(score >= kpt_thr, + np.any(~np.isnan(kpts), axis=-1)) + + ax = fig.add_subplot( + 1, num_fig, fig_idx * (idx + 1), projection='3d') + ax.view_init(elev=axis_elev, azim=axis_azimuth) + ax.set_zlim3d([0, axis_limit]) + ax.set_aspect('auto') + ax.set_xticks([]) + ax.set_yticks([]) + ax.set_zticks([]) + ax.set_xticklabels([]) + ax.set_yticklabels([]) + ax.set_zticklabels([]) + ax.scatter([0], [0], [0], marker='o', color='red') + if title: + ax.set_title(f'{title} ({idx})') + ax.dist = axis_dist + + x_c = np.mean(kpts[valid, 0]) if valid.any() else 0 + y_c = np.mean(kpts[valid, 1]) if valid.any() else 0 + + ax.set_xlim3d([x_c - axis_limit / 2, x_c + axis_limit / 2]) + ax.set_ylim3d([y_c - axis_limit / 2, y_c + axis_limit / 2]) + + kpts = np.array(kpts, copy=False) + + if self.kpt_color is None or isinstance(self.kpt_color, str): + kpt_color = [self.kpt_color] * len(kpts) + elif len(self.kpt_color) == len(kpts): + kpt_color = self.kpt_color + else: + raise ValueError( + f'the length of kpt_color ' + f'({len(self.kpt_color)}) does not matches ' + f'that of keypoints ({len(kpts)})') + + kpts = kpts[valid] + x_3d, y_3d, z_3d = np.split(kpts[:, :3], [1, 2], axis=1) + + kpt_color = kpt_color[valid][..., ::-1] / 255. + + ax.scatter(x_3d, y_3d, z_3d, marker='o', color=kpt_color) + + for kpt_idx in range(len(x_3d)): + ax.text(x_3d[kpt_idx][0], y_3d[kpt_idx][0], + z_3d[kpt_idx][0], str(kpt_idx)) + + if self.skeleton is not None and self.link_color is not None: + if self.link_color is None or isinstance( + self.link_color, str): + link_color = [self.link_color] * len(self.skeleton) + elif len(self.link_color) == len(self.skeleton): + link_color = self.link_color + else: + raise ValueError( + f'the length of link_color ' + f'({len(self.link_color)}) does not matches ' + f'that of skeleton ({len(self.skeleton)})') + + for sk_id, sk in enumerate(self.skeleton): + sk_indices = [_i for _i in sk] + xs_3d = kpts[sk_indices, 0] + ys_3d = kpts[sk_indices, 1] + zs_3d = kpts[sk_indices, 2] + kpt_score = score[sk_indices] + if kpt_score.min() > kpt_thr: + # matplotlib uses RGB color in [0, 1] value range + _color = link_color[sk_id][::-1] / 255. + ax.plot( + xs_3d, ys_3d, zs_3d, color=_color, zdir='z') + + if 'keypoints' in pred_instances: + keypoints = pred_instances.get('keypoints', + pred_instances.keypoints) + + if 'keypoint_scores' in pred_instances: + scores = pred_instances.keypoint_scores + else: + scores = np.ones(keypoints.shape[:-1]) + + if 'keypoints_visible' in pred_instances: + keypoints_visible = pred_instances.keypoints_visible + else: + keypoints_visible = np.ones(keypoints.shape[:-1]) + + _draw_3d_instances_kpts(keypoints, scores, keypoints_visible, 1, + 'Prediction') + + if draw_gt and 'gt_instances' in pose_samples: + gt_instances = pose_samples.gt_instances + if 'lifting_target' in gt_instances: + keypoints = gt_instances.get('lifting_target', + gt_instances.lifting_target) + scores = np.ones(keypoints.shape[:-1]) + + if 'lifting_target_visible' in gt_instances: + keypoints_visible = gt_instances.lifting_target_visible + else: + keypoints_visible = np.ones(keypoints.shape[:-1]) + + _draw_3d_instances_kpts(keypoints, scores, keypoints_visible, + 2, 'Ground Truth') + + # convert figure to numpy array + fig.tight_layout() + fig.canvas.draw() + + pred_img_data = fig.canvas.tostring_rgb() + pred_img_data = np.frombuffer( + fig.canvas.tostring_rgb(), dtype=np.uint8) + + if not pred_img_data.any(): + pred_img_data = np.full((vis_height, vis_width, 3), 255) + else: + pred_img_data = pred_img_data.reshape(vis_height, + vis_width * num_instances, + -1) + + plt.close(fig) + + return pred_img_data + + def _draw_instances_kpts(self, + image: np.ndarray, + instances: InstanceData, + kpt_thr: float = 0.3, + show_kpt_idx: bool = False, + skeleton_style: str = 'mmpose'): + """Draw keypoints and skeletons (optional) of GT or prediction. + + Args: + image (np.ndarray): The image to draw. + instances (:obj:`InstanceData`): Data structure for + instance-level annotations or predictions. + kpt_thr (float, optional): Minimum threshold of keypoints + to be shown. Default: 0.3. + show_kpt_idx (bool): Whether to show the index of keypoints. + Defaults to ``False`` + skeleton_style (str): Skeleton style selection. Defaults to + ``'mmpose'`` + + Returns: + np.ndarray: the drawn image which channel is RGB. + """ + + self.set_image(image) + img_h, img_w, _ = image.shape + + if 'keypoints' in instances: + keypoints = instances.get('transformed_keypoints', + instances.keypoints) + + if 'keypoint_scores' in instances: + scores = instances.keypoint_scores + else: + scores = np.ones(keypoints.shape[:-1]) + + if 'keypoints_visible' in instances: + keypoints_visible = instances.keypoints_visible + else: + keypoints_visible = np.ones(keypoints.shape[:-1]) + + if skeleton_style == 'openpose': + keypoints_info = np.concatenate( + (keypoints, scores[..., None], keypoints_visible[..., + None]), + axis=-1) + # compute neck joint + neck = np.mean(keypoints_info[:, [5, 6]], axis=1) + # neck score when visualizing pred + neck[:, 2:4] = np.logical_and( + keypoints_info[:, 5, 2:4] > kpt_thr, + keypoints_info[:, 6, 2:4] > kpt_thr).astype(int) + new_keypoints_info = np.insert( + keypoints_info, 17, neck, axis=1) + + mmpose_idx = [ + 17, 6, 8, 10, 7, 9, 12, 14, 16, 13, 15, 2, 1, 4, 3 + ] + openpose_idx = [ + 1, 2, 3, 4, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17 + ] + new_keypoints_info[:, openpose_idx] = \ + new_keypoints_info[:, mmpose_idx] + keypoints_info = new_keypoints_info + + keypoints, scores, keypoints_visible = keypoints_info[ + ..., :2], keypoints_info[..., 2], keypoints_info[..., 3] + + kpt_color = self.kpt_color + if self.det_kpt_color is not None: + kpt_color = self.det_kpt_color + + for kpts, score, visible in zip(keypoints, scores, + keypoints_visible): + kpts = np.array(kpts, copy=False) + + if kpt_color is None or isinstance(kpt_color, str): + kpt_color = [kpt_color] * len(kpts) + elif len(kpt_color) == len(kpts): + kpt_color = kpt_color + else: + raise ValueError(f'the length of kpt_color ' + f'({len(kpt_color)}) does not matches ' + f'that of keypoints ({len(kpts)})') + + # draw each point on image + for kid, kpt in enumerate(kpts): + if score[kid] < kpt_thr or not visible[ + kid] or kpt_color[kid] is None: + # skip the point that should not be drawn + continue + + color = kpt_color[kid] + if not isinstance(color, str): + color = tuple(int(c) for c in color) + transparency = self.alpha + if self.show_keypoint_weight: + transparency *= max(0, min(1, score[kid])) + self.draw_circles( + kpt, + radius=np.array([self.radius]), + face_colors=color, + edge_colors=color, + alpha=transparency, + line_widths=self.radius) + if show_kpt_idx: + self.draw_texts( + str(kid), + kpt, + colors=color, + font_sizes=self.radius * 3, + vertical_alignments='bottom', + horizontal_alignments='center') + + # draw links + skeleton = self.skeleton + if self.det_dataset_skeleton is not None: + skeleton = self.det_dataset_skeleton + link_color = self.link_color + if self.det_dataset_link_color is not None: + link_color = self.det_dataset_link_color + if skeleton is not None and link_color is not None: + if link_color is None or isinstance(link_color, str): + link_color = [link_color] * len(skeleton) + elif len(link_color) == len(skeleton): + link_color = link_color + else: + raise ValueError( + f'the length of link_color ' + f'({len(link_color)}) does not matches ' + f'that of skeleton ({len(skeleton)})') + + for sk_id, sk in enumerate(skeleton): + pos1 = (int(kpts[sk[0], 0]), int(kpts[sk[0], 1])) + pos2 = (int(kpts[sk[1], 0]), int(kpts[sk[1], 1])) + if not (visible[sk[0]] and visible[sk[1]]): + continue + + if (pos1[0] <= 0 or pos1[0] >= img_w or pos1[1] <= 0 + or pos1[1] >= img_h or pos2[0] <= 0 + or pos2[0] >= img_w or pos2[1] <= 0 + or pos2[1] >= img_h or score[sk[0]] < kpt_thr + or score[sk[1]] < kpt_thr + or link_color[sk_id] is None): + # skip the link that should not be drawn + continue + X = np.array((pos1[0], pos2[0])) + Y = np.array((pos1[1], pos2[1])) + color = link_color[sk_id] + if not isinstance(color, str): + color = tuple(int(c) for c in color) + transparency = self.alpha + if self.show_keypoint_weight: + transparency *= max( + 0, min(1, 0.5 * (score[sk[0]] + score[sk[1]]))) + + if skeleton_style == 'openpose': + mX = np.mean(X) + mY = np.mean(Y) + length = ((Y[0] - Y[1])**2 + (X[0] - X[1])**2)**0.5 + angle = math.degrees( + math.atan2(Y[0] - Y[1], X[0] - X[1])) + stickwidth = 2 + polygons = cv2.ellipse2Poly( + (int(mX), int(mY)), + (int(length / 2), int(stickwidth)), int(angle), + 0, 360, 1) + + self.draw_polygons( + polygons, + edge_colors=color, + face_colors=color, + alpha=transparency) + + else: + self.draw_lines( + X, Y, color, line_widths=self.line_width) + + return self.get_image() + + @master_only + def add_datasample(self, + name: str, + image: np.ndarray, + data_sample: PoseDataSample, + det_data_sample: Optional[PoseDataSample] = None, + draw_gt: bool = True, + draw_pred: bool = True, + draw_2d: bool = True, + draw_bbox: bool = False, + show_kpt_idx: bool = False, + skeleton_style: str = 'mmpose', + num_instances: int = -1, + show: bool = False, + wait_time: float = 0, + out_file: Optional[str] = None, + kpt_thr: float = 0.3, + step: int = 0) -> None: + """Draw datasample and save to all backends. + + - If GT and prediction are plotted at the same time, they are + displayed in a stitched image where the left image is the + ground truth and the right image is the prediction. + - If ``show`` is True, all storage backends are ignored, and + the images will be displayed in a local window. + - If ``out_file`` is specified, the drawn image will be + saved to ``out_file``. t is usually used when the display + is not available. + + Args: + name (str): The image identifier + image (np.ndarray): The image to draw + data_sample (:obj:`PoseDataSample`): The 3d data sample + to visualize + det_data_sample (:obj:`PoseDataSample`, optional): The 2d detection + data sample to visualize + draw_gt (bool): Whether to draw GT PoseDataSample. Default to + ``True`` + draw_pred (bool): Whether to draw Prediction PoseDataSample. + Defaults to ``True`` + draw_2d (bool): Whether to draw 2d detection results. Defaults to + ``True`` + draw_bbox (bool): Whether to draw bounding boxes. Default to + ``False`` + show_kpt_idx (bool): Whether to show the index of keypoints. + Defaults to ``False`` + skeleton_style (str): Skeleton style selection. Defaults to + ``'mmpose'`` + num_instances (int): Number of instances to be shown in 3D. If + smaller than 0, all the instances in the pose_result will be + shown. Otherwise, pad or truncate the pose_result to a length + of num_instances. Defaults to -1 + show (bool): Whether to display the drawn image. Default to + ``False`` + wait_time (float): The interval of show (s). Defaults to 0 + out_file (str): Path to output file. Defaults to ``None`` + kpt_thr (float, optional): Minimum threshold of keypoints + to be shown. Default: 0.3. + step (int): Global step value to record. Defaults to 0 + """ + + det_img_data = None + gt_img_data = None + + if draw_2d: + det_img_data = image.copy() + + # draw bboxes & keypoints + if 'pred_instances' in det_data_sample: + det_img_data = self._draw_instances_kpts( + det_img_data, det_data_sample.pred_instances, kpt_thr, + show_kpt_idx, skeleton_style) + if draw_bbox: + det_img_data = self._draw_instances_bbox( + det_img_data, det_data_sample.pred_instances) + + pred_img_data = self._draw_3d_data_samples( + image.copy(), + data_sample, + draw_gt=draw_gt, + num_instances=num_instances) + + # merge visualization results + if det_img_data is not None and gt_img_data is not None: + drawn_img = np.concatenate( + (det_img_data, pred_img_data, gt_img_data), axis=1) + elif det_img_data is not None: + drawn_img = np.concatenate((det_img_data, pred_img_data), axis=1) + elif gt_img_data is not None: + drawn_img = np.concatenate((det_img_data, gt_img_data), axis=1) + else: + drawn_img = pred_img_data + + # It is convenient for users to obtain the drawn image. + # For example, the user wants to obtain the drawn image and + # save it as a video during video inference. + self.set_image(drawn_img) + + if show: + self.show(drawn_img, win_name=name, wait_time=wait_time) + + if out_file is not None: + mmcv.imwrite(drawn_img[..., ::-1], out_file) + else: + # save drawn_img to backends + self.add_image(name, drawn_img, step) + + return self.get_image() diff --git a/mmpose/visualization/opencv_backend_visualizer.py b/mmpose/visualization/opencv_backend_visualizer.py index 1c17506640..3d4753f733 100644 --- a/mmpose/visualization/opencv_backend_visualizer.py +++ b/mmpose/visualization/opencv_backend_visualizer.py @@ -1,464 +1,464 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import List, Optional, Union - -import cv2 -import mmcv -import numpy as np -import torch -from mmengine.dist import master_only -from mmengine.visualization import Visualizer - - -class OpencvBackendVisualizer(Visualizer): - """Base visualizer with opencv backend support. - - Args: - name (str): Name of the instance. Defaults to 'visualizer'. - image (np.ndarray, optional): the origin image to draw. The format - should be RGB. Defaults to None. - vis_backends (list, optional): Visual backend config list. - Defaults to None. - save_dir (str, optional): Save file dir for all storage backends. - If it is None, the backend storage will not save any data. - fig_save_cfg (dict): Keyword parameters of figure for saving. - Defaults to empty dict. - fig_show_cfg (dict): Keyword parameters of figure for showing. - Defaults to empty dict. - backend (str): Backend used to draw elements on the image and display - the image. Defaults to 'matplotlib'. - alpha (int, float): The transparency of bboxes. Defaults to ``1.0`` - """ - - def __init__(self, - name='visualizer', - backend: str = 'matplotlib', - *args, - **kwargs): - super().__init__(name, *args, **kwargs) - assert backend in ('opencv', 'matplotlib'), f'the argument ' \ - f'\'backend\' must be either \'opencv\' or \'matplotlib\', ' \ - f'but got \'{backend}\'.' - self.backend = backend - - @master_only - def set_image(self, image: np.ndarray) -> None: - """Set the image to draw. - - Args: - image (np.ndarray): The image to draw. - backend (str): The backend to save the image. - """ - assert image is not None - image = image.astype('uint8') - self._image = image - self.width, self.height = image.shape[1], image.shape[0] - self._default_font_size = max( - np.sqrt(self.height * self.width) // 90, 10) - - if self.backend == 'matplotlib': - # add a small 1e-2 to avoid precision lost due to matplotlib's - # truncation (https://github.com/matplotlib/matplotlib/issues/15363) # noqa - self.fig_save.set_size_inches( # type: ignore - (self.width + 1e-2) / self.dpi, - (self.height + 1e-2) / self.dpi) - # self.canvas = mpl.backends.backend_cairo.FigureCanvasCairo(fig) - self.ax_save.cla() - self.ax_save.axis(False) - self.ax_save.imshow( - image, - extent=(0, self.width, self.height, 0), - interpolation='none') - - @master_only - def get_image(self) -> np.ndarray: - """Get the drawn image. The format is RGB. - - Returns: - np.ndarray: the drawn image which channel is RGB. - """ - assert self._image is not None, 'Please set image using `set_image`' - if self.backend == 'matplotlib': - return super().get_image() - else: - return self._image - - @master_only - def draw_circles(self, - center: Union[np.ndarray, torch.Tensor], - radius: Union[np.ndarray, torch.Tensor], - face_colors: Union[str, tuple, List[str], - List[tuple]] = 'none', - alpha: float = 1.0, - **kwargs) -> 'Visualizer': - """Draw single or multiple circles. - - Args: - center (Union[np.ndarray, torch.Tensor]): The x coordinate of - each line' start and end points. - radius (Union[np.ndarray, torch.Tensor]): The y coordinate of - each line' start and end points. - edge_colors (Union[str, tuple, List[str], List[tuple]]): The - colors of circles. ``colors`` can have the same length with - lines or just single value. If ``colors`` is single value, - all the lines will have the same colors. Reference to - https://matplotlib.org/stable/gallery/color/named_colors.html - for more details. Defaults to 'g. - line_styles (Union[str, List[str]]): The linestyle - of lines. ``line_styles`` can have the same length with - texts or just single value. If ``line_styles`` is single - value, all the lines will have the same linestyle. - Reference to - https://matplotlib.org/stable/api/collections_api.html?highlight=collection#matplotlib.collections.AsteriskPolygonCollection.set_linestyle - for more details. Defaults to '-'. - line_widths (Union[Union[int, float], List[Union[int, float]]]): - The linewidth of lines. ``line_widths`` can have - the same length with lines or just single value. - If ``line_widths`` is single value, all the lines will - have the same linewidth. Defaults to 2. - face_colors (Union[str, tuple, List[str], List[tuple]]): - The face colors. Defaults to None. - alpha (Union[int, float]): The transparency of circles. - Defaults to 0.8. - """ - if self.backend == 'matplotlib': - super().draw_circles( - center=center, - radius=radius, - face_colors=face_colors, - alpha=alpha, - **kwargs) - elif self.backend == 'opencv': - if isinstance(face_colors, str): - face_colors = mmcv.color_val(face_colors) - - if alpha == 1.0: - self._image = cv2.circle(self._image, - (int(center[0]), int(center[1])), - int(radius), face_colors, -1) - else: - img = cv2.circle(self._image.copy(), - (int(center[0]), int(center[1])), int(radius), - face_colors, -1) - self._image = cv2.addWeighted(self._image, 1 - alpha, img, - alpha, 0) - else: - raise ValueError(f'got unsupported backend {self.backend}') - - @master_only - def draw_texts( - self, - texts: Union[str, List[str]], - positions: Union[np.ndarray, torch.Tensor], - font_sizes: Optional[Union[int, List[int]]] = None, - colors: Union[str, tuple, List[str], List[tuple]] = 'g', - vertical_alignments: Union[str, List[str]] = 'top', - horizontal_alignments: Union[str, List[str]] = 'left', - bboxes: Optional[Union[dict, List[dict]]] = None, - **kwargs, - ) -> 'Visualizer': - """Draw single or multiple text boxes. - - Args: - texts (Union[str, List[str]]): Texts to draw. - positions (Union[np.ndarray, torch.Tensor]): The position to draw - the texts, which should have the same length with texts and - each dim contain x and y. - font_sizes (Union[int, List[int]], optional): The font size of - texts. ``font_sizes`` can have the same length with texts or - just single value. If ``font_sizes`` is single value, all the - texts will have the same font size. Defaults to None. - colors (Union[str, tuple, List[str], List[tuple]]): The colors - of texts. ``colors`` can have the same length with texts or - just single value. If ``colors`` is single value, all the - texts will have the same colors. Reference to - https://matplotlib.org/stable/gallery/color/named_colors.html - for more details. Defaults to 'g. - vertical_alignments (Union[str, List[str]]): The verticalalignment - of texts. verticalalignment controls whether the y positional - argument for the text indicates the bottom, center or top side - of the text bounding box. - ``vertical_alignments`` can have the same length with - texts or just single value. If ``vertical_alignments`` is - single value, all the texts will have the same - verticalalignment. verticalalignment can be 'center' or - 'top', 'bottom' or 'baseline'. Defaults to 'top'. - horizontal_alignments (Union[str, List[str]]): The - horizontalalignment of texts. Horizontalalignment controls - whether the x positional argument for the text indicates the - left, center or right side of the text bounding box. - ``horizontal_alignments`` can have - the same length with texts or just single value. - If ``horizontal_alignments`` is single value, all the texts - will have the same horizontalalignment. Horizontalalignment - can be 'center','right' or 'left'. Defaults to 'left'. - font_families (Union[str, List[str]]): The font family of - texts. ``font_families`` can have the same length with texts or - just single value. If ``font_families`` is single value, all - the texts will have the same font family. - font_familiy can be 'serif', 'sans-serif', 'cursive', 'fantasy' - or 'monospace'. Defaults to 'sans-serif'. - bboxes (Union[dict, List[dict]], optional): The bounding box of the - texts. If bboxes is None, there are no bounding box around - texts. ``bboxes`` can have the same length with texts or - just single value. If ``bboxes`` is single value, all - the texts will have the same bbox. Reference to - https://matplotlib.org/stable/api/_as_gen/matplotlib.patches.FancyBboxPatch.html#matplotlib.patches.FancyBboxPatch - for more details. Defaults to None. - font_properties (Union[FontProperties, List[FontProperties]], optional): - The font properties of texts. FontProperties is - a ``font_manager.FontProperties()`` object. - If you want to draw Chinese texts, you need to prepare - a font file that can show Chinese characters properly. - For example: `simhei.ttf`, `simsun.ttc`, `simkai.ttf` and so on. - Then set ``font_properties=matplotlib.font_manager.FontProperties(fname='path/to/font_file')`` - ``font_properties`` can have the same length with texts or - just single value. If ``font_properties`` is single value, - all the texts will have the same font properties. - Defaults to None. - `New in version 0.6.0.` - """ # noqa: E501 - - if self.backend == 'matplotlib': - super().draw_texts( - texts=texts, - positions=positions, - font_sizes=font_sizes, - colors=colors, - vertical_alignments=vertical_alignments, - horizontal_alignments=horizontal_alignments, - bboxes=bboxes, - **kwargs) - - elif self.backend == 'opencv': - font_scale = max(0.1, font_sizes / 30) - thickness = max(1, font_sizes // 15) - - text_size, text_baseline = cv2.getTextSize(texts, - cv2.FONT_HERSHEY_DUPLEX, - font_scale, thickness) - - x = int(positions[0]) - if horizontal_alignments == 'right': - x = max(0, x - text_size[0]) - y = int(positions[1]) - if vertical_alignments == 'top': - y = min(self.height, y + text_size[1]) - - if bboxes is not None: - bbox_color = bboxes[0]['facecolor'] - if isinstance(bbox_color, str): - bbox_color = mmcv.color_val(bbox_color) - - y = y - text_baseline // 2 - self._image = cv2.rectangle( - self._image, (x, y - text_size[1] - text_baseline // 2), - (x + text_size[0], y + text_baseline // 2), bbox_color, - cv2.FILLED) - - self._image = cv2.putText(self._image, texts, (x, y), - cv2.FONT_HERSHEY_SIMPLEX, font_scale, - colors, thickness - 1) - else: - raise ValueError(f'got unsupported backend {self.backend}') - - @master_only - def draw_bboxes(self, - bboxes: Union[np.ndarray, torch.Tensor], - edge_colors: Union[str, tuple, List[str], - List[tuple]] = 'g', - line_widths: Union[Union[int, float], - List[Union[int, float]]] = 2, - **kwargs) -> 'Visualizer': - """Draw single or multiple bboxes. - - Args: - bboxes (Union[np.ndarray, torch.Tensor]): The bboxes to draw with - the format of(x1,y1,x2,y2). - edge_colors (Union[str, tuple, List[str], List[tuple]]): The - colors of bboxes. ``colors`` can have the same length with - lines or just single value. If ``colors`` is single value, all - the lines will have the same colors. Refer to `matplotlib. - colors` for full list of formats that are accepted. - Defaults to 'g'. - line_styles (Union[str, List[str]]): The linestyle - of lines. ``line_styles`` can have the same length with - texts or just single value. If ``line_styles`` is single - value, all the lines will have the same linestyle. - Reference to - https://matplotlib.org/stable/api/collections_api.html?highlight=collection#matplotlib.collections.AsteriskPolygonCollection.set_linestyle - for more details. Defaults to '-'. - line_widths (Union[Union[int, float], List[Union[int, float]]]): - The linewidth of lines. ``line_widths`` can have - the same length with lines or just single value. - If ``line_widths`` is single value, all the lines will - have the same linewidth. Defaults to 2. - face_colors (Union[str, tuple, List[str], List[tuple]]): - The face colors. Defaults to None. - alpha (Union[int, float]): The transparency of bboxes. - Defaults to 0.8. - """ - if self.backend == 'matplotlib': - super().draw_bboxes( - bboxes=bboxes, - edge_colors=edge_colors, - line_widths=line_widths, - **kwargs) - - elif self.backend == 'opencv': - self._image = mmcv.imshow_bboxes( - self._image, - bboxes, - edge_colors, - top_k=-1, - thickness=line_widths, - show=False) - else: - raise ValueError(f'got unsupported backend {self.backend}') - - @master_only - def draw_lines(self, - x_datas: Union[np.ndarray, torch.Tensor], - y_datas: Union[np.ndarray, torch.Tensor], - colors: Union[str, tuple, List[str], List[tuple]] = 'g', - line_widths: Union[Union[int, float], - List[Union[int, float]]] = 2, - **kwargs) -> 'Visualizer': - """Draw single or multiple line segments. - - Args: - x_datas (Union[np.ndarray, torch.Tensor]): The x coordinate of - each line' start and end points. - y_datas (Union[np.ndarray, torch.Tensor]): The y coordinate of - each line' start and end points. - colors (Union[str, tuple, List[str], List[tuple]]): The colors of - lines. ``colors`` can have the same length with lines or just - single value. If ``colors`` is single value, all the lines - will have the same colors. Reference to - https://matplotlib.org/stable/gallery/color/named_colors.html - for more details. Defaults to 'g'. - line_styles (Union[str, List[str]]): The linestyle - of lines. ``line_styles`` can have the same length with - texts or just single value. If ``line_styles`` is single - value, all the lines will have the same linestyle. - Reference to - https://matplotlib.org/stable/api/collections_api.html?highlight=collection#matplotlib.collections.AsteriskPolygonCollection.set_linestyle - for more details. Defaults to '-'. - line_widths (Union[Union[int, float], List[Union[int, float]]]): - The linewidth of lines. ``line_widths`` can have - the same length with lines or just single value. - If ``line_widths`` is single value, all the lines will - have the same linewidth. Defaults to 2. - """ - if self.backend == 'matplotlib': - super().draw_lines( - x_datas=x_datas, - y_datas=y_datas, - colors=colors, - line_widths=line_widths, - **kwargs) - - elif self.backend == 'opencv': - - self._image = cv2.line( - self._image, (x_datas[0], y_datas[0]), - (x_datas[1], y_datas[1]), - colors, - thickness=line_widths) - else: - raise ValueError(f'got unsupported backend {self.backend}') - - @master_only - def draw_polygons(self, - polygons: Union[Union[np.ndarray, torch.Tensor], - List[Union[np.ndarray, torch.Tensor]]], - edge_colors: Union[str, tuple, List[str], - List[tuple]] = 'g', - alpha: float = 1.0, - **kwargs) -> 'Visualizer': - """Draw single or multiple bboxes. - - Args: - polygons (Union[Union[np.ndarray, torch.Tensor],\ - List[Union[np.ndarray, torch.Tensor]]]): The polygons to draw - with the format of (x1,y1,x2,y2,...,xn,yn). - edge_colors (Union[str, tuple, List[str], List[tuple]]): The - colors of polygons. ``colors`` can have the same length with - lines or just single value. If ``colors`` is single value, - all the lines will have the same colors. Refer to - `matplotlib.colors` for full list of formats that are accepted. - Defaults to 'g. - line_styles (Union[str, List[str]]): The linestyle - of lines. ``line_styles`` can have the same length with - texts or just single value. If ``line_styles`` is single - value, all the lines will have the same linestyle. - Reference to - https://matplotlib.org/stable/api/collections_api.html?highlight=collection#matplotlib.collections.AsteriskPolygonCollection.set_linestyle - for more details. Defaults to '-'. - line_widths (Union[Union[int, float], List[Union[int, float]]]): - The linewidth of lines. ``line_widths`` can have - the same length with lines or just single value. - If ``line_widths`` is single value, all the lines will - have the same linewidth. Defaults to 2. - face_colors (Union[str, tuple, List[str], List[tuple]]): - The face colors. Defaults to None. - alpha (Union[int, float]): The transparency of polygons. - Defaults to 0.8. - """ - if self.backend == 'matplotlib': - super().draw_polygons( - polygons=polygons, - edge_colors=edge_colors, - alpha=alpha, - **kwargs) - - elif self.backend == 'opencv': - if alpha == 1.0: - self._image = cv2.fillConvexPoly(self._image, polygons, - edge_colors) - else: - img = cv2.fillConvexPoly(self._image.copy(), polygons, - edge_colors) - self._image = cv2.addWeighted(self._image, 1 - alpha, img, - alpha, 0) - else: - raise ValueError(f'got unsupported backend {self.backend}') - - @master_only - def show(self, - drawn_img: Optional[np.ndarray] = None, - win_name: str = 'image', - wait_time: float = 0., - continue_key=' ') -> None: - """Show the drawn image. - - Args: - drawn_img (np.ndarray, optional): The image to show. If drawn_img - is None, it will show the image got by Visualizer. Defaults - to None. - win_name (str): The image title. Defaults to 'image'. - wait_time (float): Delay in seconds. 0 is the special - value that means "forever". Defaults to 0. - continue_key (str): The key for users to continue. Defaults to - the space key. - """ - if self.backend == 'matplotlib': - super().show( - drawn_img=drawn_img, - win_name=win_name, - wait_time=wait_time, - continue_key=continue_key) - - elif self.backend == 'opencv': - # Keep images are shown in the same window, and the title of window - # will be updated with `win_name`. - if not hasattr(self, win_name): - self._cv_win_name = win_name - cv2.namedWindow(winname=f'{id(self)}') - cv2.setWindowTitle(f'{id(self)}', win_name) - else: - cv2.setWindowTitle(f'{id(self)}', win_name) - shown_img = self.get_image() if drawn_img is None else drawn_img - cv2.imshow(str(id(self)), mmcv.bgr2rgb(shown_img)) - cv2.waitKey(int(np.ceil(wait_time * 1000))) - else: - raise ValueError(f'got unsupported backend {self.backend}') +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional, Union + +import cv2 +import mmcv +import numpy as np +import torch +from mmengine.dist import master_only +from mmengine.visualization import Visualizer + + +class OpencvBackendVisualizer(Visualizer): + """Base visualizer with opencv backend support. + + Args: + name (str): Name of the instance. Defaults to 'visualizer'. + image (np.ndarray, optional): the origin image to draw. The format + should be RGB. Defaults to None. + vis_backends (list, optional): Visual backend config list. + Defaults to None. + save_dir (str, optional): Save file dir for all storage backends. + If it is None, the backend storage will not save any data. + fig_save_cfg (dict): Keyword parameters of figure for saving. + Defaults to empty dict. + fig_show_cfg (dict): Keyword parameters of figure for showing. + Defaults to empty dict. + backend (str): Backend used to draw elements on the image and display + the image. Defaults to 'matplotlib'. + alpha (int, float): The transparency of bboxes. Defaults to ``1.0`` + """ + + def __init__(self, + name='visualizer', + backend: str = 'matplotlib', + *args, + **kwargs): + super().__init__(name, *args, **kwargs) + assert backend in ('opencv', 'matplotlib'), f'the argument ' \ + f'\'backend\' must be either \'opencv\' or \'matplotlib\', ' \ + f'but got \'{backend}\'.' + self.backend = backend + + @master_only + def set_image(self, image: np.ndarray) -> None: + """Set the image to draw. + + Args: + image (np.ndarray): The image to draw. + backend (str): The backend to save the image. + """ + assert image is not None + image = image.astype('uint8') + self._image = image + self.width, self.height = image.shape[1], image.shape[0] + self._default_font_size = max( + np.sqrt(self.height * self.width) // 90, 10) + + if self.backend == 'matplotlib': + # add a small 1e-2 to avoid precision lost due to matplotlib's + # truncation (https://github.com/matplotlib/matplotlib/issues/15363) # noqa + self.fig_save.set_size_inches( # type: ignore + (self.width + 1e-2) / self.dpi, + (self.height + 1e-2) / self.dpi) + # self.canvas = mpl.backends.backend_cairo.FigureCanvasCairo(fig) + self.ax_save.cla() + self.ax_save.axis(False) + self.ax_save.imshow( + image, + extent=(0, self.width, self.height, 0), + interpolation='none') + + @master_only + def get_image(self) -> np.ndarray: + """Get the drawn image. The format is RGB. + + Returns: + np.ndarray: the drawn image which channel is RGB. + """ + assert self._image is not None, 'Please set image using `set_image`' + if self.backend == 'matplotlib': + return super().get_image() + else: + return self._image + + @master_only + def draw_circles(self, + center: Union[np.ndarray, torch.Tensor], + radius: Union[np.ndarray, torch.Tensor], + face_colors: Union[str, tuple, List[str], + List[tuple]] = 'none', + alpha: float = 1.0, + **kwargs) -> 'Visualizer': + """Draw single or multiple circles. + + Args: + center (Union[np.ndarray, torch.Tensor]): The x coordinate of + each line' start and end points. + radius (Union[np.ndarray, torch.Tensor]): The y coordinate of + each line' start and end points. + edge_colors (Union[str, tuple, List[str], List[tuple]]): The + colors of circles. ``colors`` can have the same length with + lines or just single value. If ``colors`` is single value, + all the lines will have the same colors. Reference to + https://matplotlib.org/stable/gallery/color/named_colors.html + for more details. Defaults to 'g. + line_styles (Union[str, List[str]]): The linestyle + of lines. ``line_styles`` can have the same length with + texts or just single value. If ``line_styles`` is single + value, all the lines will have the same linestyle. + Reference to + https://matplotlib.org/stable/api/collections_api.html?highlight=collection#matplotlib.collections.AsteriskPolygonCollection.set_linestyle + for more details. Defaults to '-'. + line_widths (Union[Union[int, float], List[Union[int, float]]]): + The linewidth of lines. ``line_widths`` can have + the same length with lines or just single value. + If ``line_widths`` is single value, all the lines will + have the same linewidth. Defaults to 2. + face_colors (Union[str, tuple, List[str], List[tuple]]): + The face colors. Defaults to None. + alpha (Union[int, float]): The transparency of circles. + Defaults to 0.8. + """ + if self.backend == 'matplotlib': + super().draw_circles( + center=center, + radius=radius, + face_colors=face_colors, + alpha=alpha, + **kwargs) + elif self.backend == 'opencv': + if isinstance(face_colors, str): + face_colors = mmcv.color_val(face_colors) + + if alpha == 1.0: + self._image = cv2.circle(self._image, + (int(center[0]), int(center[1])), + int(radius), face_colors, -1) + else: + img = cv2.circle(self._image.copy(), + (int(center[0]), int(center[1])), int(radius), + face_colors, -1) + self._image = cv2.addWeighted(self._image, 1 - alpha, img, + alpha, 0) + else: + raise ValueError(f'got unsupported backend {self.backend}') + + @master_only + def draw_texts( + self, + texts: Union[str, List[str]], + positions: Union[np.ndarray, torch.Tensor], + font_sizes: Optional[Union[int, List[int]]] = None, + colors: Union[str, tuple, List[str], List[tuple]] = 'g', + vertical_alignments: Union[str, List[str]] = 'top', + horizontal_alignments: Union[str, List[str]] = 'left', + bboxes: Optional[Union[dict, List[dict]]] = None, + **kwargs, + ) -> 'Visualizer': + """Draw single or multiple text boxes. + + Args: + texts (Union[str, List[str]]): Texts to draw. + positions (Union[np.ndarray, torch.Tensor]): The position to draw + the texts, which should have the same length with texts and + each dim contain x and y. + font_sizes (Union[int, List[int]], optional): The font size of + texts. ``font_sizes`` can have the same length with texts or + just single value. If ``font_sizes`` is single value, all the + texts will have the same font size. Defaults to None. + colors (Union[str, tuple, List[str], List[tuple]]): The colors + of texts. ``colors`` can have the same length with texts or + just single value. If ``colors`` is single value, all the + texts will have the same colors. Reference to + https://matplotlib.org/stable/gallery/color/named_colors.html + for more details. Defaults to 'g. + vertical_alignments (Union[str, List[str]]): The verticalalignment + of texts. verticalalignment controls whether the y positional + argument for the text indicates the bottom, center or top side + of the text bounding box. + ``vertical_alignments`` can have the same length with + texts or just single value. If ``vertical_alignments`` is + single value, all the texts will have the same + verticalalignment. verticalalignment can be 'center' or + 'top', 'bottom' or 'baseline'. Defaults to 'top'. + horizontal_alignments (Union[str, List[str]]): The + horizontalalignment of texts. Horizontalalignment controls + whether the x positional argument for the text indicates the + left, center or right side of the text bounding box. + ``horizontal_alignments`` can have + the same length with texts or just single value. + If ``horizontal_alignments`` is single value, all the texts + will have the same horizontalalignment. Horizontalalignment + can be 'center','right' or 'left'. Defaults to 'left'. + font_families (Union[str, List[str]]): The font family of + texts. ``font_families`` can have the same length with texts or + just single value. If ``font_families`` is single value, all + the texts will have the same font family. + font_familiy can be 'serif', 'sans-serif', 'cursive', 'fantasy' + or 'monospace'. Defaults to 'sans-serif'. + bboxes (Union[dict, List[dict]], optional): The bounding box of the + texts. If bboxes is None, there are no bounding box around + texts. ``bboxes`` can have the same length with texts or + just single value. If ``bboxes`` is single value, all + the texts will have the same bbox. Reference to + https://matplotlib.org/stable/api/_as_gen/matplotlib.patches.FancyBboxPatch.html#matplotlib.patches.FancyBboxPatch + for more details. Defaults to None. + font_properties (Union[FontProperties, List[FontProperties]], optional): + The font properties of texts. FontProperties is + a ``font_manager.FontProperties()`` object. + If you want to draw Chinese texts, you need to prepare + a font file that can show Chinese characters properly. + For example: `simhei.ttf`, `simsun.ttc`, `simkai.ttf` and so on. + Then set ``font_properties=matplotlib.font_manager.FontProperties(fname='path/to/font_file')`` + ``font_properties`` can have the same length with texts or + just single value. If ``font_properties`` is single value, + all the texts will have the same font properties. + Defaults to None. + `New in version 0.6.0.` + """ # noqa: E501 + + if self.backend == 'matplotlib': + super().draw_texts( + texts=texts, + positions=positions, + font_sizes=font_sizes, + colors=colors, + vertical_alignments=vertical_alignments, + horizontal_alignments=horizontal_alignments, + bboxes=bboxes, + **kwargs) + + elif self.backend == 'opencv': + font_scale = max(0.1, font_sizes / 30) + thickness = max(1, font_sizes // 15) + + text_size, text_baseline = cv2.getTextSize(texts, + cv2.FONT_HERSHEY_DUPLEX, + font_scale, thickness) + + x = int(positions[0]) + if horizontal_alignments == 'right': + x = max(0, x - text_size[0]) + y = int(positions[1]) + if vertical_alignments == 'top': + y = min(self.height, y + text_size[1]) + + if bboxes is not None: + bbox_color = bboxes[0]['facecolor'] + if isinstance(bbox_color, str): + bbox_color = mmcv.color_val(bbox_color) + + y = y - text_baseline // 2 + self._image = cv2.rectangle( + self._image, (x, y - text_size[1] - text_baseline // 2), + (x + text_size[0], y + text_baseline // 2), bbox_color, + cv2.FILLED) + + self._image = cv2.putText(self._image, texts, (x, y), + cv2.FONT_HERSHEY_SIMPLEX, font_scale, + colors, thickness - 1) + else: + raise ValueError(f'got unsupported backend {self.backend}') + + @master_only + def draw_bboxes(self, + bboxes: Union[np.ndarray, torch.Tensor], + edge_colors: Union[str, tuple, List[str], + List[tuple]] = 'g', + line_widths: Union[Union[int, float], + List[Union[int, float]]] = 2, + **kwargs) -> 'Visualizer': + """Draw single or multiple bboxes. + + Args: + bboxes (Union[np.ndarray, torch.Tensor]): The bboxes to draw with + the format of(x1,y1,x2,y2). + edge_colors (Union[str, tuple, List[str], List[tuple]]): The + colors of bboxes. ``colors`` can have the same length with + lines or just single value. If ``colors`` is single value, all + the lines will have the same colors. Refer to `matplotlib. + colors` for full list of formats that are accepted. + Defaults to 'g'. + line_styles (Union[str, List[str]]): The linestyle + of lines. ``line_styles`` can have the same length with + texts or just single value. If ``line_styles`` is single + value, all the lines will have the same linestyle. + Reference to + https://matplotlib.org/stable/api/collections_api.html?highlight=collection#matplotlib.collections.AsteriskPolygonCollection.set_linestyle + for more details. Defaults to '-'. + line_widths (Union[Union[int, float], List[Union[int, float]]]): + The linewidth of lines. ``line_widths`` can have + the same length with lines or just single value. + If ``line_widths`` is single value, all the lines will + have the same linewidth. Defaults to 2. + face_colors (Union[str, tuple, List[str], List[tuple]]): + The face colors. Defaults to None. + alpha (Union[int, float]): The transparency of bboxes. + Defaults to 0.8. + """ + if self.backend == 'matplotlib': + super().draw_bboxes( + bboxes=bboxes, + edge_colors=edge_colors, + line_widths=line_widths, + **kwargs) + + elif self.backend == 'opencv': + self._image = mmcv.imshow_bboxes( + self._image, + bboxes, + edge_colors, + top_k=-1, + thickness=line_widths, + show=False) + else: + raise ValueError(f'got unsupported backend {self.backend}') + + @master_only + def draw_lines(self, + x_datas: Union[np.ndarray, torch.Tensor], + y_datas: Union[np.ndarray, torch.Tensor], + colors: Union[str, tuple, List[str], List[tuple]] = 'g', + line_widths: Union[Union[int, float], + List[Union[int, float]]] = 2, + **kwargs) -> 'Visualizer': + """Draw single or multiple line segments. + + Args: + x_datas (Union[np.ndarray, torch.Tensor]): The x coordinate of + each line' start and end points. + y_datas (Union[np.ndarray, torch.Tensor]): The y coordinate of + each line' start and end points. + colors (Union[str, tuple, List[str], List[tuple]]): The colors of + lines. ``colors`` can have the same length with lines or just + single value. If ``colors`` is single value, all the lines + will have the same colors. Reference to + https://matplotlib.org/stable/gallery/color/named_colors.html + for more details. Defaults to 'g'. + line_styles (Union[str, List[str]]): The linestyle + of lines. ``line_styles`` can have the same length with + texts or just single value. If ``line_styles`` is single + value, all the lines will have the same linestyle. + Reference to + https://matplotlib.org/stable/api/collections_api.html?highlight=collection#matplotlib.collections.AsteriskPolygonCollection.set_linestyle + for more details. Defaults to '-'. + line_widths (Union[Union[int, float], List[Union[int, float]]]): + The linewidth of lines. ``line_widths`` can have + the same length with lines or just single value. + If ``line_widths`` is single value, all the lines will + have the same linewidth. Defaults to 2. + """ + if self.backend == 'matplotlib': + super().draw_lines( + x_datas=x_datas, + y_datas=y_datas, + colors=colors, + line_widths=line_widths, + **kwargs) + + elif self.backend == 'opencv': + + self._image = cv2.line( + self._image, (x_datas[0], y_datas[0]), + (x_datas[1], y_datas[1]), + colors, + thickness=line_widths) + else: + raise ValueError(f'got unsupported backend {self.backend}') + + @master_only + def draw_polygons(self, + polygons: Union[Union[np.ndarray, torch.Tensor], + List[Union[np.ndarray, torch.Tensor]]], + edge_colors: Union[str, tuple, List[str], + List[tuple]] = 'g', + alpha: float = 1.0, + **kwargs) -> 'Visualizer': + """Draw single or multiple bboxes. + + Args: + polygons (Union[Union[np.ndarray, torch.Tensor],\ + List[Union[np.ndarray, torch.Tensor]]]): The polygons to draw + with the format of (x1,y1,x2,y2,...,xn,yn). + edge_colors (Union[str, tuple, List[str], List[tuple]]): The + colors of polygons. ``colors`` can have the same length with + lines or just single value. If ``colors`` is single value, + all the lines will have the same colors. Refer to + `matplotlib.colors` for full list of formats that are accepted. + Defaults to 'g. + line_styles (Union[str, List[str]]): The linestyle + of lines. ``line_styles`` can have the same length with + texts or just single value. If ``line_styles`` is single + value, all the lines will have the same linestyle. + Reference to + https://matplotlib.org/stable/api/collections_api.html?highlight=collection#matplotlib.collections.AsteriskPolygonCollection.set_linestyle + for more details. Defaults to '-'. + line_widths (Union[Union[int, float], List[Union[int, float]]]): + The linewidth of lines. ``line_widths`` can have + the same length with lines or just single value. + If ``line_widths`` is single value, all the lines will + have the same linewidth. Defaults to 2. + face_colors (Union[str, tuple, List[str], List[tuple]]): + The face colors. Defaults to None. + alpha (Union[int, float]): The transparency of polygons. + Defaults to 0.8. + """ + if self.backend == 'matplotlib': + super().draw_polygons( + polygons=polygons, + edge_colors=edge_colors, + alpha=alpha, + **kwargs) + + elif self.backend == 'opencv': + if alpha == 1.0: + self._image = cv2.fillConvexPoly(self._image, polygons, + edge_colors) + else: + img = cv2.fillConvexPoly(self._image.copy(), polygons, + edge_colors) + self._image = cv2.addWeighted(self._image, 1 - alpha, img, + alpha, 0) + else: + raise ValueError(f'got unsupported backend {self.backend}') + + @master_only + def show(self, + drawn_img: Optional[np.ndarray] = None, + win_name: str = 'image', + wait_time: float = 0., + continue_key=' ') -> None: + """Show the drawn image. + + Args: + drawn_img (np.ndarray, optional): The image to show. If drawn_img + is None, it will show the image got by Visualizer. Defaults + to None. + win_name (str): The image title. Defaults to 'image'. + wait_time (float): Delay in seconds. 0 is the special + value that means "forever". Defaults to 0. + continue_key (str): The key for users to continue. Defaults to + the space key. + """ + if self.backend == 'matplotlib': + super().show( + drawn_img=drawn_img, + win_name=win_name, + wait_time=wait_time, + continue_key=continue_key) + + elif self.backend == 'opencv': + # Keep images are shown in the same window, and the title of window + # will be updated with `win_name`. + if not hasattr(self, win_name): + self._cv_win_name = win_name + cv2.namedWindow(winname=f'{id(self)}') + cv2.setWindowTitle(f'{id(self)}', win_name) + else: + cv2.setWindowTitle(f'{id(self)}', win_name) + shown_img = self.get_image() if drawn_img is None else drawn_img + cv2.imshow(str(id(self)), mmcv.bgr2rgb(shown_img)) + cv2.waitKey(int(np.ceil(wait_time * 1000))) + else: + raise ValueError(f'got unsupported backend {self.backend}') diff --git a/mmpose/visualization/simcc_vis.py b/mmpose/visualization/simcc_vis.py index 3a5b602fb5..fe1a6d965a 100644 --- a/mmpose/visualization/simcc_vis.py +++ b/mmpose/visualization/simcc_vis.py @@ -1,136 +1,136 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import Optional, Union - -import cv2 as cv -import numpy as np -import torch -from torchvision.transforms import ToPILImage - - -class SimCCVisualizer: - - def draw_instance_xy_heatmap(self, - heatmap: torch.Tensor, - overlaid_image: Optional[np.ndarray], - n: int = 20, - mix: bool = True, - weight: float = 0.5): - """Draw heatmaps of GT or prediction. - - Args: - heatmap (torch.Tensor): Tensor of heatmap. - overlaid_image (np.ndarray): The image to draw. - n (int): Number of keypoint, up to 20. - mix (bool):Whether to merge heatmap and original image. - weight (float): Weight of original image during fusion. - - Returns: - np.ndarray: the drawn image which channel is RGB. - """ - heatmap2d = heatmap.data.max(0, keepdim=True)[0] - xy_heatmap, K = self.split_simcc_xy(heatmap) - K = K if K <= n else n - blank_size = tuple(heatmap.size()[1:]) - maps = {'x': [], 'y': []} - for i in xy_heatmap: - x, y = self.draw_1d_heatmaps(i['x']), self.draw_1d_heatmaps(i['y']) - maps['x'].append(x) - maps['y'].append(y) - white = self.creat_blank(blank_size, K) - map2d = self.draw_2d_heatmaps(heatmap2d) - if mix: - map2d = cv.addWeighted(overlaid_image, 1 - weight, map2d, weight, - 0) - self.image_cover(white, map2d, int(blank_size[1] * 0.1), - int(blank_size[0] * 0.1)) - white = self.add_1d_heatmaps(maps, white, blank_size, K) - return white - - def split_simcc_xy(self, heatmap: Union[np.ndarray, torch.Tensor]): - """Extract one-dimensional heatmap from two-dimensional heatmap and - calculate the number of keypoint.""" - size = heatmap.size() - k = size[0] if size[0] <= 20 else 20 - maps = [] - for _ in range(k): - xy_dict = {} - single_heatmap = heatmap[_] - xy_dict['x'], xy_dict['y'] = self.merge_maps(single_heatmap) - maps.append(xy_dict) - return maps, k - - def merge_maps(self, map_2d): - """Synthesis of one-dimensional heatmap.""" - x = map_2d.data.max(0, keepdim=True)[0] - y = map_2d.data.max(1, keepdim=True)[0] - return x, y - - def draw_1d_heatmaps(self, heatmap_1d): - """Draw one-dimensional heatmap.""" - size = heatmap_1d.size() - length = max(size) - np_heatmap = ToPILImage()(heatmap_1d).convert('RGB') - cv_img = cv.cvtColor(np.asarray(np_heatmap), cv.COLOR_RGB2BGR) - if size[0] < size[1]: - cv_img = cv.resize(cv_img, (length, 15)) - else: - cv_img = cv.resize(cv_img, (15, length)) - single_map = cv.applyColorMap(cv_img, cv.COLORMAP_JET) - return single_map - - def creat_blank(self, - size: Union[list, tuple], - K: int = 20, - interval: int = 10): - """Create the background.""" - blank_height = int( - max(size[0] * 2, size[0] * 1.1 + (K + 1) * (15 + interval))) - blank_width = int( - max(size[1] * 2, size[1] * 1.1 + (K + 1) * (15 + interval))) - blank = np.zeros((blank_height, blank_width, 3), np.uint8) - blank.fill(255) - return blank - - def draw_2d_heatmaps(self, heatmap_2d): - """Draw a two-dimensional heatmap fused with the original image.""" - np_heatmap = ToPILImage()(heatmap_2d).convert('RGB') - cv_img = cv.cvtColor(np.asarray(np_heatmap), cv.COLOR_RGB2BGR) - map_2d = cv.applyColorMap(cv_img, cv.COLORMAP_JET) - return map_2d - - def image_cover(self, background: np.ndarray, foreground: np.ndarray, - x: int, y: int): - """Paste the foreground on the background.""" - fore_size = foreground.shape - background[y:y + fore_size[0], x:x + fore_size[1]] = foreground - return background - - def add_1d_heatmaps(self, - maps: dict, - background: np.ndarray, - map2d_size: Union[tuple, list], - K: int, - interval: int = 10): - """Paste one-dimensional heatmaps onto the background in turn.""" - y_startpoint, x_startpoint = [int(1.1*map2d_size[1]), - int(0.1*map2d_size[0])],\ - [int(0.1*map2d_size[1]), - int(1.1*map2d_size[0])] - x_startpoint[1] += interval * 2 - y_startpoint[0] += interval * 2 - add = interval + 10 - for i in range(K): - self.image_cover(background, maps['x'][i], x_startpoint[0], - x_startpoint[1]) - cv.putText(background, str(i), - (x_startpoint[0] - 30, x_startpoint[1] + 10), - cv.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2) - self.image_cover(background, maps['y'][i], y_startpoint[0], - y_startpoint[1]) - cv.putText(background, str(i), - (y_startpoint[0], y_startpoint[1] - 5), - cv.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2) - x_startpoint[1] += add - y_startpoint[0] += add - return background[:x_startpoint[1] + y_startpoint[1] + - 1, :y_startpoint[0] + x_startpoint[0] + 1] +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Union + +import cv2 as cv +import numpy as np +import torch +from torchvision.transforms import ToPILImage + + +class SimCCVisualizer: + + def draw_instance_xy_heatmap(self, + heatmap: torch.Tensor, + overlaid_image: Optional[np.ndarray], + n: int = 20, + mix: bool = True, + weight: float = 0.5): + """Draw heatmaps of GT or prediction. + + Args: + heatmap (torch.Tensor): Tensor of heatmap. + overlaid_image (np.ndarray): The image to draw. + n (int): Number of keypoint, up to 20. + mix (bool):Whether to merge heatmap and original image. + weight (float): Weight of original image during fusion. + + Returns: + np.ndarray: the drawn image which channel is RGB. + """ + heatmap2d = heatmap.data.max(0, keepdim=True)[0] + xy_heatmap, K = self.split_simcc_xy(heatmap) + K = K if K <= n else n + blank_size = tuple(heatmap.size()[1:]) + maps = {'x': [], 'y': []} + for i in xy_heatmap: + x, y = self.draw_1d_heatmaps(i['x']), self.draw_1d_heatmaps(i['y']) + maps['x'].append(x) + maps['y'].append(y) + white = self.creat_blank(blank_size, K) + map2d = self.draw_2d_heatmaps(heatmap2d) + if mix: + map2d = cv.addWeighted(overlaid_image, 1 - weight, map2d, weight, + 0) + self.image_cover(white, map2d, int(blank_size[1] * 0.1), + int(blank_size[0] * 0.1)) + white = self.add_1d_heatmaps(maps, white, blank_size, K) + return white + + def split_simcc_xy(self, heatmap: Union[np.ndarray, torch.Tensor]): + """Extract one-dimensional heatmap from two-dimensional heatmap and + calculate the number of keypoint.""" + size = heatmap.size() + k = size[0] if size[0] <= 20 else 20 + maps = [] + for _ in range(k): + xy_dict = {} + single_heatmap = heatmap[_] + xy_dict['x'], xy_dict['y'] = self.merge_maps(single_heatmap) + maps.append(xy_dict) + return maps, k + + def merge_maps(self, map_2d): + """Synthesis of one-dimensional heatmap.""" + x = map_2d.data.max(0, keepdim=True)[0] + y = map_2d.data.max(1, keepdim=True)[0] + return x, y + + def draw_1d_heatmaps(self, heatmap_1d): + """Draw one-dimensional heatmap.""" + size = heatmap_1d.size() + length = max(size) + np_heatmap = ToPILImage()(heatmap_1d).convert('RGB') + cv_img = cv.cvtColor(np.asarray(np_heatmap), cv.COLOR_RGB2BGR) + if size[0] < size[1]: + cv_img = cv.resize(cv_img, (length, 15)) + else: + cv_img = cv.resize(cv_img, (15, length)) + single_map = cv.applyColorMap(cv_img, cv.COLORMAP_JET) + return single_map + + def creat_blank(self, + size: Union[list, tuple], + K: int = 20, + interval: int = 10): + """Create the background.""" + blank_height = int( + max(size[0] * 2, size[0] * 1.1 + (K + 1) * (15 + interval))) + blank_width = int( + max(size[1] * 2, size[1] * 1.1 + (K + 1) * (15 + interval))) + blank = np.zeros((blank_height, blank_width, 3), np.uint8) + blank.fill(255) + return blank + + def draw_2d_heatmaps(self, heatmap_2d): + """Draw a two-dimensional heatmap fused with the original image.""" + np_heatmap = ToPILImage()(heatmap_2d).convert('RGB') + cv_img = cv.cvtColor(np.asarray(np_heatmap), cv.COLOR_RGB2BGR) + map_2d = cv.applyColorMap(cv_img, cv.COLORMAP_JET) + return map_2d + + def image_cover(self, background: np.ndarray, foreground: np.ndarray, + x: int, y: int): + """Paste the foreground on the background.""" + fore_size = foreground.shape + background[y:y + fore_size[0], x:x + fore_size[1]] = foreground + return background + + def add_1d_heatmaps(self, + maps: dict, + background: np.ndarray, + map2d_size: Union[tuple, list], + K: int, + interval: int = 10): + """Paste one-dimensional heatmaps onto the background in turn.""" + y_startpoint, x_startpoint = [int(1.1*map2d_size[1]), + int(0.1*map2d_size[0])],\ + [int(0.1*map2d_size[1]), + int(1.1*map2d_size[0])] + x_startpoint[1] += interval * 2 + y_startpoint[0] += interval * 2 + add = interval + 10 + for i in range(K): + self.image_cover(background, maps['x'][i], x_startpoint[0], + x_startpoint[1]) + cv.putText(background, str(i), + (x_startpoint[0] - 30, x_startpoint[1] + 10), + cv.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2) + self.image_cover(background, maps['y'][i], y_startpoint[0], + y_startpoint[1]) + cv.putText(background, str(i), + (y_startpoint[0], y_startpoint[1] - 5), + cv.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2) + x_startpoint[1] += add + y_startpoint[0] += add + return background[:x_startpoint[1] + y_startpoint[1] + + 1, :y_startpoint[0] + x_startpoint[0] + 1] diff --git a/model-index.yml b/model-index.yml index 498e5bc743..446e15cad4 100644 --- a/model-index.yml +++ b/model-index.yml @@ -1,121 +1,121 @@ -Import: -- configs/animal_2d_keypoint/rtmpose/ap10k/rtmpose_ap10k.yml -- configs/animal_2d_keypoint/topdown_heatmap/animalpose/hrnet_animalpose.yml -- configs/animal_2d_keypoint/topdown_heatmap/animalpose/resnet_animalpose.yml -- configs/animal_2d_keypoint/topdown_heatmap/ap10k/resnet_ap10k.yml -- configs/animal_2d_keypoint/topdown_heatmap/ap10k/hrnet_ap10k.yml -- configs/animal_2d_keypoint/topdown_heatmap/ap10k/cspnext_udp_ap10k.yml -- configs/animal_2d_keypoint/topdown_heatmap/locust/resnet_locust.yml -- configs/animal_2d_keypoint/topdown_heatmap/zebra/resnet_zebra.yml -- configs/body_2d_keypoint/cid/coco/hrnet_coco.yml -- configs/body_2d_keypoint/dekr/coco/hrnet_coco.yml -- configs/body_2d_keypoint/dekr/crowdpose/hrnet_crowdpose.yml -- configs/body_2d_keypoint/integral_regression/coco/resnet_ipr_coco.yml -- configs/body_2d_keypoint/integral_regression/coco/resnet_dsnt_coco.yml -- configs/body_2d_keypoint/integral_regression/coco/resnet_debias_coco.yml -- configs/body_2d_keypoint/rtmpose/coco/rtmpose_coco.yml -- configs/body_2d_keypoint/rtmpose/crowdpose/rtmpose_crowdpose.yml -- configs/body_2d_keypoint/rtmpose/mpii/rtmpose_mpii.yml -- configs/body_2d_keypoint/simcc/coco/mobilenetv2_coco.yml -- configs/body_2d_keypoint/simcc/coco/resnet_coco.yml -- configs/body_2d_keypoint/simcc/coco/vipnas_coco.yml -- configs/body_2d_keypoint/topdown_heatmap/aic/hrnet_aic.yml -- configs/body_2d_keypoint/topdown_heatmap/aic/resnet_aic.yml -- configs/body_2d_keypoint/topdown_heatmap/coco/hourglass_coco.yml -- configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_coco.yml -- configs/body_2d_keypoint/topdown_heatmap/coco/litehrnet_coco.yml -- configs/body_2d_keypoint/topdown_heatmap/coco/mspn_coco.yml -- configs/body_2d_keypoint/topdown_heatmap/coco/vitpose_coco.yml -- configs/body_2d_keypoint/topdown_heatmap/coco/alexnet_coco.yml -- configs/body_2d_keypoint/topdown_heatmap/coco/resnet_coco.yml -- configs/body_2d_keypoint/topdown_heatmap/coco/cpm_coco.yml -- configs/body_2d_keypoint/topdown_heatmap/coco/hrformer_coco.yml -- configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_augmentation_coco.yml -- configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_dark_coco.yml -- configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_udp_coco.yml -- configs/body_2d_keypoint/topdown_heatmap/coco/mobilenetv2_coco.yml -- configs/body_2d_keypoint/topdown_heatmap/coco/pvt_coco.yml -- configs/body_2d_keypoint/topdown_heatmap/coco/resnest_coco.yml -- configs/body_2d_keypoint/topdown_heatmap/coco/resnet_dark_coco.yml -- configs/body_2d_keypoint/topdown_heatmap/coco/cspnext_udp_coco.yml -- configs/body_2d_keypoint/topdown_heatmap/coco/resnetv1d_coco.yml -- configs/body_2d_keypoint/topdown_heatmap/coco/resnext_coco.yml -- configs/body_2d_keypoint/topdown_heatmap/coco/rsn_coco.yml -- configs/body_2d_keypoint/topdown_heatmap/coco/scnet_coco.yml -- configs/body_2d_keypoint/topdown_heatmap/coco/seresnet_coco.yml -- configs/body_2d_keypoint/topdown_heatmap/coco/shufflenetv1_coco.yml -- configs/body_2d_keypoint/topdown_heatmap/coco/shufflenetv2_coco.yml -- configs/body_2d_keypoint/topdown_heatmap/coco/swin_coco.yml -- configs/body_2d_keypoint/topdown_heatmap/coco/vgg_coco.yml -- configs/body_2d_keypoint/topdown_heatmap/coco/vipnas_coco.yml -- configs/body_2d_keypoint/topdown_heatmap/crowdpose/hrnet_crowdpose.yml -- configs/body_2d_keypoint/topdown_heatmap/crowdpose/resnet_crowdpose.yml -- configs/body_2d_keypoint/topdown_heatmap/crowdpose/cspnext_udp_crowdpose.yml -- configs/body_2d_keypoint/topdown_heatmap/jhmdb/cpm_jhmdb.yml -- configs/body_2d_keypoint/topdown_heatmap/jhmdb/resnet_jhmdb.yml -- configs/body_2d_keypoint/topdown_heatmap/mpii/cpm_mpii.yml -- configs/body_2d_keypoint/topdown_heatmap/mpii/hourglass_mpii.yml -- configs/body_2d_keypoint/topdown_heatmap/mpii/cspnext_udp_mpii.yml -- configs/body_2d_keypoint/topdown_heatmap/mpii/hrnet_dark_mpii.yml -- configs/body_2d_keypoint/topdown_heatmap/mpii/hrnet_mpii.yml -- configs/body_2d_keypoint/topdown_heatmap/mpii/litehrnet_mpii.yml -- configs/body_2d_keypoint/topdown_heatmap/mpii/mobilenetv2_mpii.yml -- configs/body_2d_keypoint/topdown_heatmap/mpii/resnet_mpii.yml -- configs/body_2d_keypoint/topdown_heatmap/mpii/resnetv1d_mpii.yml -- configs/body_2d_keypoint/topdown_heatmap/mpii/resnext_mpii.yml -- configs/body_2d_keypoint/topdown_heatmap/mpii/scnet_mpii.yml -- configs/body_2d_keypoint/topdown_heatmap/mpii/seresnet_mpii.yml -- configs/body_2d_keypoint/topdown_heatmap/mpii/shufflenetv1_mpii.yml -- configs/body_2d_keypoint/topdown_heatmap/mpii/shufflenetv2_mpii.yml -- configs/body_2d_keypoint/topdown_heatmap/posetrack18/hrnet_posetrack18.yml -- configs/body_2d_keypoint/topdown_heatmap/posetrack18/resnet_posetrack18.yml -- configs/body_2d_keypoint/topdown_regression/coco/resnet_coco.yml -- configs/body_2d_keypoint/topdown_regression/coco/resnet_rle_coco.yml -- configs/body_2d_keypoint/topdown_regression/coco/mobilenetv2_rle_coco.yml -- configs/body_2d_keypoint/topdown_regression/mpii/resnet_mpii.yml -- configs/body_2d_keypoint/topdown_regression/mpii/resnet_rle_mpii.yml -- configs/body_3d_keypoint/pose_lift/h36m/videopose3d_h36m.yml -- configs/face_2d_keypoint/rtmpose/coco_wholebody_face/rtmpose_coco_wholebody_face.yml -- configs/face_2d_keypoint/rtmpose/wflw/rtmpose_wflw.yml -- configs/face_2d_keypoint/topdown_heatmap/300w/hrnetv2_300w.yml -- configs/face_2d_keypoint/topdown_heatmap/aflw/hrnetv2_aflw.yml -- configs/face_2d_keypoint/topdown_heatmap/aflw/hrnetv2_dark_aflw.yml -- configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/hourglass_coco_wholebody_face.yml -- configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/hrnetv2_coco_wholebody_face.yml -- configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/hrnetv2_dark_coco_wholebody_face.yml -- configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/mobilenetv2_coco_wholebody_face.yml -- configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/resnet_coco_wholebody_face.yml -- configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/scnet_coco_wholebody_face.yml -- configs/face_2d_keypoint/topdown_heatmap/cofw/hrnetv2_cofw.yml -- configs/face_2d_keypoint/topdown_heatmap/wflw/hrnetv2_wflw.yml -- configs/face_2d_keypoint/topdown_heatmap/wflw/hrnetv2_dark_wflw.yml -- configs/face_2d_keypoint/topdown_heatmap/wflw/hrnetv2_awing_wflw.yml -- configs/hand_2d_keypoint/rtmpose/coco_wholebody_hand/rtmpose_coco_wholebody_hand.yml -- configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/hourglass_coco_wholebody_hand.yml -- configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/hrnetv2_coco_wholebody_hand.yml -- configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/hrnetv2_dark_coco_wholebody_hand.yml -- configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/litehrnet_coco_wholebody_hand.yml -- configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/mobilenetv2_coco_wholebody_hand.yml -- configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/resnet_coco_wholebody_hand.yml -- configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/scnet_coco_wholebody_hand.yml -- configs/hand_2d_keypoint/topdown_heatmap/freihand2d/resnet_freihand2d.yml -- configs/hand_2d_keypoint/topdown_heatmap/onehand10k/resnet_onehand10k.yml -- configs/hand_2d_keypoint/topdown_heatmap/onehand10k/hrnetv2_dark_onehand10k.yml -- configs/hand_2d_keypoint/topdown_heatmap/onehand10k/hrnetv2_onehand10k.yml -- configs/hand_2d_keypoint/topdown_heatmap/onehand10k/hrnetv2_udp_onehand10k.yml -- configs/hand_2d_keypoint/topdown_heatmap/onehand10k/mobilenetv2_onehand10k.yml -- configs/hand_2d_keypoint/topdown_heatmap/rhd2d/hrnetv2_dark_rhd2d.yml -- configs/hand_2d_keypoint/topdown_heatmap/rhd2d/hrnetv2_rhd2d.yml -- configs/hand_2d_keypoint/topdown_heatmap/rhd2d/hrnetv2_udp_rhd2d.yml -- configs/hand_2d_keypoint/topdown_heatmap/rhd2d/mobilenetv2_rhd2d.yml -- configs/hand_2d_keypoint/topdown_heatmap/rhd2d/resnet_rhd2d.yml -- configs/hand_2d_keypoint/topdown_regression/onehand10k/resnet_onehand10k.yml -- configs/hand_2d_keypoint/topdown_regression/rhd2d/resnet_rhd2d.yml -- configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose_coco-wholebody.yml -- configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/hrnet_coco-wholebody.yml -- configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/hrnet_dark_coco-wholebody.yml -- configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/resnet_coco-wholebody.yml -- configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/vipnas_coco-wholebody.yml -- configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/vipnas_dark_coco-wholebody.yml -- configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/cspnext_udp_coco-wholebody.yml -- configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/res50_deepfasion2.yml +Import: +- configs/animal_2d_keypoint/rtmpose/ap10k/rtmpose_ap10k.yml +- configs/animal_2d_keypoint/topdown_heatmap/animalpose/hrnet_animalpose.yml +- configs/animal_2d_keypoint/topdown_heatmap/animalpose/resnet_animalpose.yml +- configs/animal_2d_keypoint/topdown_heatmap/ap10k/resnet_ap10k.yml +- configs/animal_2d_keypoint/topdown_heatmap/ap10k/hrnet_ap10k.yml +- configs/animal_2d_keypoint/topdown_heatmap/ap10k/cspnext_udp_ap10k.yml +- configs/animal_2d_keypoint/topdown_heatmap/locust/resnet_locust.yml +- configs/animal_2d_keypoint/topdown_heatmap/zebra/resnet_zebra.yml +- configs/body_2d_keypoint/cid/coco/hrnet_coco.yml +- configs/body_2d_keypoint/dekr/coco/hrnet_coco.yml +- configs/body_2d_keypoint/dekr/crowdpose/hrnet_crowdpose.yml +- configs/body_2d_keypoint/integral_regression/coco/resnet_ipr_coco.yml +- configs/body_2d_keypoint/integral_regression/coco/resnet_dsnt_coco.yml +- configs/body_2d_keypoint/integral_regression/coco/resnet_debias_coco.yml +- configs/body_2d_keypoint/rtmpose/coco/rtmpose_coco.yml +- configs/body_2d_keypoint/rtmpose/crowdpose/rtmpose_crowdpose.yml +- configs/body_2d_keypoint/rtmpose/mpii/rtmpose_mpii.yml +- configs/body_2d_keypoint/simcc/coco/mobilenetv2_coco.yml +- configs/body_2d_keypoint/simcc/coco/resnet_coco.yml +- configs/body_2d_keypoint/simcc/coco/vipnas_coco.yml +- configs/body_2d_keypoint/topdown_heatmap/aic/hrnet_aic.yml +- configs/body_2d_keypoint/topdown_heatmap/aic/resnet_aic.yml +- configs/body_2d_keypoint/topdown_heatmap/coco/hourglass_coco.yml +- configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_coco.yml +- configs/body_2d_keypoint/topdown_heatmap/coco/litehrnet_coco.yml +- configs/body_2d_keypoint/topdown_heatmap/coco/mspn_coco.yml +- configs/body_2d_keypoint/topdown_heatmap/coco/vitpose_coco.yml +- configs/body_2d_keypoint/topdown_heatmap/coco/alexnet_coco.yml +- configs/body_2d_keypoint/topdown_heatmap/coco/resnet_coco.yml +- configs/body_2d_keypoint/topdown_heatmap/coco/cpm_coco.yml +- configs/body_2d_keypoint/topdown_heatmap/coco/hrformer_coco.yml +- configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_augmentation_coco.yml +- configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_dark_coco.yml +- configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_udp_coco.yml +- configs/body_2d_keypoint/topdown_heatmap/coco/mobilenetv2_coco.yml +- configs/body_2d_keypoint/topdown_heatmap/coco/pvt_coco.yml +- configs/body_2d_keypoint/topdown_heatmap/coco/resnest_coco.yml +- configs/body_2d_keypoint/topdown_heatmap/coco/resnet_dark_coco.yml +- configs/body_2d_keypoint/topdown_heatmap/coco/cspnext_udp_coco.yml +- configs/body_2d_keypoint/topdown_heatmap/coco/resnetv1d_coco.yml +- configs/body_2d_keypoint/topdown_heatmap/coco/resnext_coco.yml +- configs/body_2d_keypoint/topdown_heatmap/coco/rsn_coco.yml +- configs/body_2d_keypoint/topdown_heatmap/coco/scnet_coco.yml +- configs/body_2d_keypoint/topdown_heatmap/coco/seresnet_coco.yml +- configs/body_2d_keypoint/topdown_heatmap/coco/shufflenetv1_coco.yml +- configs/body_2d_keypoint/topdown_heatmap/coco/shufflenetv2_coco.yml +- configs/body_2d_keypoint/topdown_heatmap/coco/swin_coco.yml +- configs/body_2d_keypoint/topdown_heatmap/coco/vgg_coco.yml +- configs/body_2d_keypoint/topdown_heatmap/coco/vipnas_coco.yml +- configs/body_2d_keypoint/topdown_heatmap/crowdpose/hrnet_crowdpose.yml +- configs/body_2d_keypoint/topdown_heatmap/crowdpose/resnet_crowdpose.yml +- configs/body_2d_keypoint/topdown_heatmap/crowdpose/cspnext_udp_crowdpose.yml +- configs/body_2d_keypoint/topdown_heatmap/jhmdb/cpm_jhmdb.yml +- configs/body_2d_keypoint/topdown_heatmap/jhmdb/resnet_jhmdb.yml +- configs/body_2d_keypoint/topdown_heatmap/mpii/cpm_mpii.yml +- configs/body_2d_keypoint/topdown_heatmap/mpii/hourglass_mpii.yml +- configs/body_2d_keypoint/topdown_heatmap/mpii/cspnext_udp_mpii.yml +- configs/body_2d_keypoint/topdown_heatmap/mpii/hrnet_dark_mpii.yml +- configs/body_2d_keypoint/topdown_heatmap/mpii/hrnet_mpii.yml +- configs/body_2d_keypoint/topdown_heatmap/mpii/litehrnet_mpii.yml +- configs/body_2d_keypoint/topdown_heatmap/mpii/mobilenetv2_mpii.yml +- configs/body_2d_keypoint/topdown_heatmap/mpii/resnet_mpii.yml +- configs/body_2d_keypoint/topdown_heatmap/mpii/resnetv1d_mpii.yml +- configs/body_2d_keypoint/topdown_heatmap/mpii/resnext_mpii.yml +- configs/body_2d_keypoint/topdown_heatmap/mpii/scnet_mpii.yml +- configs/body_2d_keypoint/topdown_heatmap/mpii/seresnet_mpii.yml +- configs/body_2d_keypoint/topdown_heatmap/mpii/shufflenetv1_mpii.yml +- configs/body_2d_keypoint/topdown_heatmap/mpii/shufflenetv2_mpii.yml +- configs/body_2d_keypoint/topdown_heatmap/posetrack18/hrnet_posetrack18.yml +- configs/body_2d_keypoint/topdown_heatmap/posetrack18/resnet_posetrack18.yml +- configs/body_2d_keypoint/topdown_regression/coco/resnet_coco.yml +- configs/body_2d_keypoint/topdown_regression/coco/resnet_rle_coco.yml +- configs/body_2d_keypoint/topdown_regression/coco/mobilenetv2_rle_coco.yml +- configs/body_2d_keypoint/topdown_regression/mpii/resnet_mpii.yml +- configs/body_2d_keypoint/topdown_regression/mpii/resnet_rle_mpii.yml +- configs/body_3d_keypoint/pose_lift/h36m/videopose3d_h36m.yml +- configs/face_2d_keypoint/rtmpose/coco_wholebody_face/rtmpose_coco_wholebody_face.yml +- configs/face_2d_keypoint/rtmpose/wflw/rtmpose_wflw.yml +- configs/face_2d_keypoint/topdown_heatmap/300w/hrnetv2_300w.yml +- configs/face_2d_keypoint/topdown_heatmap/aflw/hrnetv2_aflw.yml +- configs/face_2d_keypoint/topdown_heatmap/aflw/hrnetv2_dark_aflw.yml +- configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/hourglass_coco_wholebody_face.yml +- configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/hrnetv2_coco_wholebody_face.yml +- configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/hrnetv2_dark_coco_wholebody_face.yml +- configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/mobilenetv2_coco_wholebody_face.yml +- configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/resnet_coco_wholebody_face.yml +- configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/scnet_coco_wholebody_face.yml +- configs/face_2d_keypoint/topdown_heatmap/cofw/hrnetv2_cofw.yml +- configs/face_2d_keypoint/topdown_heatmap/wflw/hrnetv2_wflw.yml +- configs/face_2d_keypoint/topdown_heatmap/wflw/hrnetv2_dark_wflw.yml +- configs/face_2d_keypoint/topdown_heatmap/wflw/hrnetv2_awing_wflw.yml +- configs/hand_2d_keypoint/rtmpose/coco_wholebody_hand/rtmpose_coco_wholebody_hand.yml +- configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/hourglass_coco_wholebody_hand.yml +- configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/hrnetv2_coco_wholebody_hand.yml +- configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/hrnetv2_dark_coco_wholebody_hand.yml +- configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/litehrnet_coco_wholebody_hand.yml +- configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/mobilenetv2_coco_wholebody_hand.yml +- configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/resnet_coco_wholebody_hand.yml +- configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/scnet_coco_wholebody_hand.yml +- configs/hand_2d_keypoint/topdown_heatmap/freihand2d/resnet_freihand2d.yml +- configs/hand_2d_keypoint/topdown_heatmap/onehand10k/resnet_onehand10k.yml +- configs/hand_2d_keypoint/topdown_heatmap/onehand10k/hrnetv2_dark_onehand10k.yml +- configs/hand_2d_keypoint/topdown_heatmap/onehand10k/hrnetv2_onehand10k.yml +- configs/hand_2d_keypoint/topdown_heatmap/onehand10k/hrnetv2_udp_onehand10k.yml +- configs/hand_2d_keypoint/topdown_heatmap/onehand10k/mobilenetv2_onehand10k.yml +- configs/hand_2d_keypoint/topdown_heatmap/rhd2d/hrnetv2_dark_rhd2d.yml +- configs/hand_2d_keypoint/topdown_heatmap/rhd2d/hrnetv2_rhd2d.yml +- configs/hand_2d_keypoint/topdown_heatmap/rhd2d/hrnetv2_udp_rhd2d.yml +- configs/hand_2d_keypoint/topdown_heatmap/rhd2d/mobilenetv2_rhd2d.yml +- configs/hand_2d_keypoint/topdown_heatmap/rhd2d/resnet_rhd2d.yml +- configs/hand_2d_keypoint/topdown_regression/onehand10k/resnet_onehand10k.yml +- configs/hand_2d_keypoint/topdown_regression/rhd2d/resnet_rhd2d.yml +- configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/rtmpose_coco-wholebody.yml +- configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/hrnet_coco-wholebody.yml +- configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/hrnet_dark_coco-wholebody.yml +- configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/resnet_coco-wholebody.yml +- configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/vipnas_coco-wholebody.yml +- configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/vipnas_dark_coco-wholebody.yml +- configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/cspnext_udp_coco-wholebody.yml +- configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/res50_deepfasion2.yml diff --git a/myconfigs/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py b/myconfigs/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py index 7a1bee42b4..32286ade4d 100644 --- a/myconfigs/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py +++ b/myconfigs/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py @@ -1,150 +1,150 @@ -_base_ = ['../default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(32, 64)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(32, 64, 128)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(32, 64, 128, 256))), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/hrnet_w32-36af842e.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=32, - out_channels=17, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = '../../data/datasets/coco/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='images/train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file='../../data/datasets/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='images/val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['../default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = '../../data/datasets/coco/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='images/train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file='../../data/datasets/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='images/val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/myconfigs/default_runtime.py b/myconfigs/default_runtime.py index 561d574fa7..1235dfafb6 100644 --- a/myconfigs/default_runtime.py +++ b/myconfigs/default_runtime.py @@ -1,49 +1,49 @@ -default_scope = 'mmpose' - -# hooks -default_hooks = dict( - timer=dict(type='IterTimerHook'), - logger=dict(type='LoggerHook', interval=50), - param_scheduler=dict(type='ParamSchedulerHook'), - checkpoint=dict(type='CheckpointHook', interval=10), - sampler_seed=dict(type='DistSamplerSeedHook'), - visualization=dict(type='PoseVisualizationHook', enable=False), -) - -# custom hooks -custom_hooks = [ - # Synchronize model buffers such as running_mean and running_var in BN - # at the end of each epoch - dict(type='SyncBuffersHook') -] - -# multi-processing backend -env_cfg = dict( - cudnn_benchmark=False, - mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), - dist_cfg=dict(backend='nccl'), -) - -# visualizer -vis_backends = [ - dict(type='LocalVisBackend'), - # dict(type='TensorboardVisBackend'), - # dict(type='WandbVisBackend'), -] -visualizer = dict( - type='PoseLocalVisualizer', vis_backends=vis_backends, name='visualizer') - -# logger -log_processor = dict( - type='LogProcessor', window_size=50, by_epoch=True, num_digits=6) -log_level = 'INFO' -load_from = None -resume = False - -# file I/O backend -backend_args = dict(backend='local') - -# training/validation/testing progress -train_cfg = dict(by_epoch=True) -val_cfg = dict() -test_cfg = dict() +default_scope = 'mmpose' + +# hooks +default_hooks = dict( + timer=dict(type='IterTimerHook'), + logger=dict(type='LoggerHook', interval=50), + param_scheduler=dict(type='ParamSchedulerHook'), + checkpoint=dict(type='CheckpointHook', interval=10), + sampler_seed=dict(type='DistSamplerSeedHook'), + visualization=dict(type='PoseVisualizationHook', enable=False), +) + +# custom hooks +custom_hooks = [ + # Synchronize model buffers such as running_mean and running_var in BN + # at the end of each epoch + dict(type='SyncBuffersHook') +] + +# multi-processing backend +env_cfg = dict( + cudnn_benchmark=False, + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), + dist_cfg=dict(backend='nccl'), +) + +# visualizer +vis_backends = [ + dict(type='LocalVisBackend'), + # dict(type='TensorboardVisBackend'), + # dict(type='WandbVisBackend'), +] +visualizer = dict( + type='PoseLocalVisualizer', vis_backends=vis_backends, name='visualizer') + +# logger +log_processor = dict( + type='LogProcessor', window_size=50, by_epoch=True, num_digits=6) +log_level = 'INFO' +load_from = None +resume = False + +# file I/O backend +backend_args = dict(backend='local') + +# training/validation/testing progress +train_cfg = dict(by_epoch=True) +val_cfg = dict() +test_cfg = dict() diff --git a/myconfigs/octseg/dekr_hrnet-w32_8xb10-140e_octseg-512x512.py b/myconfigs/octseg/dekr_hrnet-w32_8xb10-140e_octseg-512x512.py index 0f91e07209..4cb7dc1536 100644 --- a/myconfigs/octseg/dekr_hrnet-w32_8xb10-140e_octseg-512x512.py +++ b/myconfigs/octseg/dekr_hrnet-w32_8xb10-140e_octseg-512x512.py @@ -1,206 +1,206 @@ -_base_ = ['../default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=1260, val_interval=60) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=1e-3, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=140, - milestones=[90, 120], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=80) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='SPR', - input_size=(512, 512), - heatmap_size=(128, 128), - sigma=(4, 2), - minimal_diagonal_length=32**0.5, - generate_keypoint_heatmaps=True, - decode_max_instances=30) - -# model settings -model = dict( - type='BottomupPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - # mean=[123.675, 116.28, 103.53], - # std=[58.395, 57.12, 57.375], - mean=[21.002], - std=[25.754] - # bgr_to_rgb=True - ), - backbone=dict( - type='HRNet', - # in_channels=3, - in_channels=1, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(32, 64)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(32, 64, 128)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(32, 64, 128, 256), - multiscale_output=True)), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/hrnet_w32-36af842e.pth'), - ), - neck=dict( - type='FeatureMapProcessor', - concat=True, - ), - head=dict( - type='DEKRHead', - in_channels=480, - # num_keypoints=17, - num_keypoints=2, - heatmap_loss=dict(type='KeypointMSELoss', use_target_weight=True), - displacement_loss=dict( - type='SoftWeightSmoothL1Loss', - use_target_weight=True, - supervise_empty=False, - beta=1 / 9, - loss_weight=0.002, - ), - decoder=codec - # rescore_cfg=dict( - # in_channels=74, - # norm_indexes=(5, 6), - # init_cfg=dict( - # type='Pretrained', - # checkpoint='https://download.openmmlab.com/mmpose/' - # 'pretrain_models/kpt_rescore_coco-33d58c5c.pth')), - ), - test_cfg=dict( - multiscale_test=False, - flip_test=True, - nms_dist_thr=0.05, - shift_heatmap=True, - align_corners=False)) - -# enable DDP training when rescore net is used -find_unused_parameters = True - -# base dataset settings -dataset_type = 'OCTSegDataset' -data_mode = 'bottomup' -data_root = '../../data/datasets/octseg/' - -# pipelines -train_pipeline = [ - # dict(type='LoadImage'), - dict(type='LoadImage', color_type='unchanged'), - dict(type='BottomupRandomAffine', input_size=codec['input_size']), - dict(type='RandomFlip', direction='horizontal'), # check flip!! - dict(type='GenerateTarget', encoder=codec), - # dict(type='BottomupGetHeatmapMask'), - dict(type='PackPoseInputs'), -] -val_pipeline = [ - dict(type='LoadImage', color_type='unchanged'), - dict( - type='BottomupResize', - input_size=codec['input_size'], - size_factor=32, - resize_mode='expand'), - dict( - type='PackPoseInputs', - meta_keys=('id', 'img_id', 'img_path', 'crowd_index', 'ori_shape', - 'img_shape', 'input_size', 'input_center', 'input_scale', - 'flip', 'flip_direction', 'flip_indices', 'raw_ann_info', - 'skeleton_links')) -] - -# data loaders -train_dataloader = dict( - batch_size=20, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='sidebranch_round_train.json', - data_prefix=dict(img='train/round/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=10, - num_workers=1, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='sidebranch_round_test.json', - data_prefix=dict(img='test/round/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = dict( - batch_size=1, - num_workers=1, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='sidebranch_round_test.json', - data_prefix=dict(img='test/round/'), - test_mode=True, - pipeline=val_pipeline, - )) - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'sidebranch_round_test.json', - nms_mode='none', - score_mode='keypoint', -) -test_evaluator = val_evaluator +_base_ = ['../default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=1260, val_interval=60) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=1e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=140, + milestones=[90, 120], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=80) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=2)) + +# codec settings +codec = dict( + type='SPR', + input_size=(512, 512), + heatmap_size=(128, 128), + sigma=(4, 2), + minimal_diagonal_length=32**0.5, + generate_keypoint_heatmaps=True, + decode_max_instances=30) + +# model settings +model = dict( + type='BottomupPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + # mean=[123.675, 116.28, 103.53], + # std=[58.395, 57.12, 57.375], + mean=[21.002], + std=[25.754] + # bgr_to_rgb=True + ), + backbone=dict( + type='HRNet', + # in_channels=3, + in_channels=1, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256), + multiscale_output=True)), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + neck=dict( + type='FeatureMapProcessor', + concat=True, + ), + head=dict( + type='DEKRHead', + in_channels=480, + # num_keypoints=17, + num_keypoints=2, + heatmap_loss=dict(type='KeypointMSELoss', use_target_weight=True), + displacement_loss=dict( + type='SoftWeightSmoothL1Loss', + use_target_weight=True, + supervise_empty=False, + beta=1 / 9, + loss_weight=0.002, + ), + decoder=codec + # rescore_cfg=dict( + # in_channels=74, + # norm_indexes=(5, 6), + # init_cfg=dict( + # type='Pretrained', + # checkpoint='https://download.openmmlab.com/mmpose/' + # 'pretrain_models/kpt_rescore_coco-33d58c5c.pth')), + ), + test_cfg=dict( + multiscale_test=False, + flip_test=True, + nms_dist_thr=0.05, + shift_heatmap=True, + align_corners=False)) + +# enable DDP training when rescore net is used +find_unused_parameters = True + +# base dataset settings +dataset_type = 'OCTSegDataset' +data_mode = 'bottomup' +data_root = '../../data/datasets/octseg/' + +# pipelines +train_pipeline = [ + # dict(type='LoadImage'), + dict(type='LoadImage', color_type='unchanged'), + dict(type='BottomupRandomAffine', input_size=codec['input_size']), + dict(type='RandomFlip', direction='horizontal'), # check flip!! + dict(type='GenerateTarget', encoder=codec), + # dict(type='BottomupGetHeatmapMask'), + dict(type='PackPoseInputs'), +] +val_pipeline = [ + dict(type='LoadImage', color_type='unchanged'), + dict( + type='BottomupResize', + input_size=codec['input_size'], + size_factor=32, + resize_mode='expand'), + dict( + type='PackPoseInputs', + meta_keys=('id', 'img_id', 'img_path', 'crowd_index', 'ori_shape', + 'img_shape', 'input_size', 'input_center', 'input_scale', + 'flip', 'flip_direction', 'flip_indices', 'raw_ann_info', + 'skeleton_links')) +] + +# data loaders +train_dataloader = dict( + batch_size=20, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='sidebranch_round_train.json', + data_prefix=dict(img='train/round/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='sidebranch_round_test.json', + data_prefix=dict(img='test/round/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='sidebranch_round_test.json', + data_prefix=dict(img='test/round/'), + test_mode=True, + pipeline=val_pipeline, + )) + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'sidebranch_round_test.json', + nms_mode='none', + score_mode='keypoint', +) +test_evaluator = val_evaluator diff --git a/myconfigs/octseg/dekr_octsb1-w32_8xb10-140e_octseg-round-512x512.py b/myconfigs/octseg/dekr_octsb1-w32_8xb10-140e_octseg-round-512x512.py new file mode 100644 index 0000000000..b41832d823 --- /dev/null +++ b/myconfigs/octseg/dekr_octsb1-w32_8xb10-140e_octseg-round-512x512.py @@ -0,0 +1,209 @@ +_base_ = ['../default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=1260, val_interval=60) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=1e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=140, + milestones=[90, 120], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=80) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=2)) + +# codec settings +codec = dict( + type='SPR', + input_size=(512, 512), + heatmap_size=(128, 128), + sigma=(4, 2), + minimal_diagonal_length=32**0.5, + generate_keypoint_heatmaps=True, + decode_max_instances=30) + +# model settings +model = dict( + type='BottomupPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + # mean=[123.675, 116.28, 103.53], + # std=[58.395, 57.12, 57.375], + mean=[0], + std=[255] + # bgr_to_rgb=True + ), + backbone=dict( + type='OCTSB1', + # in_channels=3, + in_channels=1, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256), + multiscale_output=True)), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + lumen_cfg=dict( + checkpoint_path='../../data/mmsegmentation/z-torchscript_models/unet-s5-d16_fcn-bce_4xb4-1280k_octlumen-round-random_resize512x512-crop256x256-no_wrapped-torchinput512x512.jit.pth' + ) + ), + neck=dict( + type='FeatureMapProcessor', + concat=True, + ), + head=dict( + type='DEKRHead', + in_channels=480, + # num_keypoints=17, + num_keypoints=2, + heatmap_loss=dict(type='KeypointMSELoss', use_target_weight=True), + displacement_loss=dict( + type='SoftWeightSmoothL1Loss', + use_target_weight=True, + supervise_empty=False, + beta=1 / 9, + loss_weight=0.002, + ), + decoder=codec + # rescore_cfg=dict( + # in_channels=74, + # norm_indexes=(5, 6), + # init_cfg=dict( + # type='Pretrained', + # checkpoint='https://download.openmmlab.com/mmpose/' + # 'pretrain_models/kpt_rescore_coco-33d58c5c.pth')), + ), + test_cfg=dict( + multiscale_test=False, + flip_test=True, + nms_dist_thr=0.05, + shift_heatmap=True, + align_corners=False)) + +# enable DDP training when rescore net is used +find_unused_parameters = True + +# base dataset settings +dataset_type = 'OCTSegDataset' +data_mode = 'bottomup' +data_root = '../../data/datasets/octseg/' + +# pipelines +train_pipeline = [ + # dict(type='LoadImage'), + dict(type='LoadImage', color_type='unchanged'), + dict(type='BottomupRandomAffine', input_size=codec['input_size']), + dict(type='RandomFlip', direction='horizontal'), # check flip!! + dict(type='GenerateTarget', encoder=codec), + # dict(type='BottomupGetHeatmapMask'), + dict(type='PackPoseInputs'), +] +val_pipeline = [ + dict(type='LoadImage', color_type='unchanged'), + dict( + type='BottomupResize', + input_size=codec['input_size'], + size_factor=32, + resize_mode='expand'), + dict( + type='PackPoseInputs', + meta_keys=('id', 'img_id', 'img_path', 'crowd_index', 'ori_shape', + 'img_shape', 'input_size', 'input_center', 'input_scale', + 'flip', 'flip_direction', 'flip_indices', 'raw_ann_info', + 'skeleton_links')) +] + +# data loaders +train_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='sidebranch_round_train.json', + data_prefix=dict(img='train/round/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='sidebranch_round_test.json', + data_prefix=dict(img='test/round/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='sidebranch_round_test.json', + data_prefix=dict(img='test/round/'), + test_mode=True, + pipeline=val_pipeline, + )) + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'sidebranch_round_test.json', + nms_mode='none', + score_mode='keypoint', +) +test_evaluator = val_evaluator diff --git a/myconfigs/octseg/dekr_octsb1-w32_8xb10-140e_octsegflat-512x512.py b/myconfigs/octseg/dekr_octsb1-w32_8xb10-140e_octsegflat-512x512.py new file mode 100644 index 0000000000..b359f3ee35 --- /dev/null +++ b/myconfigs/octseg/dekr_octsb1-w32_8xb10-140e_octsegflat-512x512.py @@ -0,0 +1,211 @@ +_base_ = ['../default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=1260, val_interval=60) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=1e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=140, + milestones=[90, 120], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=80) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=2)) + +# codec settings +codec = dict( + type='SPR', + input_size=(512, 512), + heatmap_size=(128, 128), + sigma=(4, 2), + minimal_diagonal_length=32**0.5, + generate_keypoint_heatmaps=True, + decode_max_instances=30) + +# model settings +model = dict( + type='BottomupPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + # mean=[123.675, 116.28, 103.53], + # std=[58.395, 57.12, 57.375], + mean=[0], + std=[255] + # bgr_to_rgb=True + ), + backbone=dict( + type='OCTSB1', + # in_channels=3, + in_channels=1, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256), + multiscale_output=True)), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + lumen_cfg=dict( + # config_path='../../data/mmsegmentation/work_dirs/unet-s5-d16_fcn-bce_4xb4-1280k_octlumen-random_resize512x512-crop256x256/unet-s5-d16_fcn-bce_4xb4-1280k_octlumen-random_resize512x512-crop256x256.py', + # checkpoint_path='../../data/mmsegmentation/work_dirs/unet-s5-d16_fcn-bce_4xb4-1280k_octlumen-random_resize512x512-crop256x256/best_mIoU_iter_768000.pth', + checkpoint_path='../../data/mmsegmentation/z-torchscript_models/unet-s5-d16_fcn-bce_4xb4-1280k_octlumen-random_resize512x512-crop256x256-no_wrapped-torchinput512x512.jit.pth' + ) + ), + neck=dict( + type='FeatureMapProcessor', + concat=True, + ), + head=dict( + type='DEKRHead', + in_channels=480, + # num_keypoints=17, + num_keypoints=2, + heatmap_loss=dict(type='KeypointMSELoss', use_target_weight=True), + displacement_loss=dict( + type='SoftWeightSmoothL1Loss', + use_target_weight=True, + supervise_empty=False, + beta=1 / 9, + loss_weight=0.002, + ), + decoder=codec + # rescore_cfg=dict( + # in_channels=74, + # norm_indexes=(5, 6), + # init_cfg=dict( + # type='Pretrained', + # checkpoint='https://download.openmmlab.com/mmpose/' + # 'pretrain_models/kpt_rescore_coco-33d58c5c.pth')), + ), + test_cfg=dict( + multiscale_test=False, + flip_test=True, + nms_dist_thr=0.05, + shift_heatmap=True, + align_corners=False)) + +# enable DDP training when rescore net is used +find_unused_parameters = True + +# base dataset settings +dataset_type = 'OCTSegDataset' +data_mode = 'bottomup' +data_root = '../../data/datasets/octseg/' + +# pipelines +train_pipeline = [ + # dict(type='LoadImage'), + dict(type='LoadImage', color_type='unchanged'), + dict(type='BottomupRandomAffine', input_size=codec['input_size']), + dict(type='RandomFlip', direction='horizontal'), # check flip!! + dict(type='GenerateTarget', encoder=codec), + # dict(type='BottomupGetHeatmapMask'), + dict(type='PackPoseInputs'), +] +val_pipeline = [ + dict(type='LoadImage', color_type='unchanged'), + dict( + type='BottomupResize', + input_size=codec['input_size'], + size_factor=32, + resize_mode='expand'), + dict( + type='PackPoseInputs', + meta_keys=('id', 'img_id', 'img_path', 'crowd_index', 'ori_shape', + 'img_shape', 'input_size', 'input_center', 'input_scale', + 'flip', 'flip_direction', 'flip_indices', 'raw_ann_info', + 'skeleton_links')) +] + +# data loaders +train_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='sidebranch_flat_train.json', + data_prefix=dict(img='train/flat/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='sidebranch_flat_test.json', + data_prefix=dict(img='test/flat/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='sidebranch_flat_test.json', + data_prefix=dict(img='test/flat/'), + test_mode=True, + pipeline=val_pipeline, + )) + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'sidebranch_flat_test.json', + nms_mode='none', + score_mode='keypoint', +) +test_evaluator = val_evaluator diff --git a/myconfigs/octseg/dekr_octsb2-w32_8xb10-140e_octsegflat-512x512.py b/myconfigs/octseg/dekr_octsb2-w32_8xb10-140e_octsegflat-512x512.py new file mode 100644 index 0000000000..4f5175d8f4 --- /dev/null +++ b/myconfigs/octseg/dekr_octsb2-w32_8xb10-140e_octsegflat-512x512.py @@ -0,0 +1,209 @@ +_base_ = ['../default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=1260, val_interval=60) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=1e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=140, + milestones=[90, 120], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=80) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=2)) + +# codec settings +codec = dict( + type='SPR', + input_size=(512, 512), + heatmap_size=(128, 128), + sigma=(4, 2), + minimal_diagonal_length=32**0.5, + generate_keypoint_heatmaps=True, + decode_max_instances=30) + +# model settings +model = dict( + type='BottomupPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + # mean=[123.675, 116.28, 103.53], + # std=[58.395, 57.12, 57.375], + mean=[0], + std=[255] + # bgr_to_rgb=True + ), + backbone=dict( + type='OCTSB2', + # in_channels=3, + in_channels=1, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256), + multiscale_output=True)), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + lumen_cfg=dict( + checkpoint_path='../../data/mmsegmentation/z-torchscript_models/unet-s5-d16_fcn-bce_4xb4-1280k_octflatguidewires-random_resize512x512-crop256x256-no_wrapped-torchinput512x512.jit.pth' + ) + ), + neck=dict( + type='FeatureMapProcessor', + concat=True, + ), + head=dict( + type='DEKRHead', + in_channels=480, + # num_keypoints=17, + num_keypoints=2, + heatmap_loss=dict(type='KeypointMSELoss', use_target_weight=True), + displacement_loss=dict( + type='SoftWeightSmoothL1Loss', + use_target_weight=True, + supervise_empty=False, + beta=1 / 9, + loss_weight=0.002, + ), + decoder=codec + # rescore_cfg=dict( + # in_channels=74, + # norm_indexes=(5, 6), + # init_cfg=dict( + # type='Pretrained', + # checkpoint='https://download.openmmlab.com/mmpose/' + # 'pretrain_models/kpt_rescore_coco-33d58c5c.pth')), + ), + test_cfg=dict( + multiscale_test=False, + flip_test=True, + nms_dist_thr=0.05, + shift_heatmap=True, + align_corners=False)) + +# enable DDP training when rescore net is used +find_unused_parameters = True + +# base dataset settings +dataset_type = 'OCTSegDataset' +data_mode = 'bottomup' +data_root = '../../data/datasets/octseg/' + +# pipelines +train_pipeline = [ + # dict(type='LoadImage'), + dict(type='LoadImage', color_type='unchanged'), + dict(type='BottomupRandomAffine', input_size=codec['input_size']), + dict(type='RandomFlip', direction='horizontal'), # check flip!! + dict(type='GenerateTarget', encoder=codec), + # dict(type='BottomupGetHeatmapMask'), + dict(type='PackPoseInputs'), +] +val_pipeline = [ + dict(type='LoadImage', color_type='unchanged'), + dict( + type='BottomupResize', + input_size=codec['input_size'], + size_factor=32, + resize_mode='expand'), + dict( + type='PackPoseInputs', + meta_keys=('id', 'img_id', 'img_path', 'crowd_index', 'ori_shape', + 'img_shape', 'input_size', 'input_center', 'input_scale', + 'flip', 'flip_direction', 'flip_indices', 'raw_ann_info', + 'skeleton_links')) +] + +# data loaders +train_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='sidebranch_flat_train.json', + data_prefix=dict(img='train/flat/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='sidebranch_flat_test.json', + data_prefix=dict(img='test/flat/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='sidebranch_flat_test.json', + data_prefix=dict(img='test/flat/'), + test_mode=True, + pipeline=val_pipeline, + )) + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'sidebranch_flat_test.json', + nms_mode='none', + score_mode='keypoint', +) +test_evaluator = val_evaluator diff --git a/myconfigs/octseg/dekr_octsb3-warping-w32_8xb10-140e_octsegflat-512x512.py b/myconfigs/octseg/dekr_octsb3-warping-w32_8xb10-140e_octsegflat-512x512.py new file mode 100644 index 0000000000..526c299e63 --- /dev/null +++ b/myconfigs/octseg/dekr_octsb3-warping-w32_8xb10-140e_octsegflat-512x512.py @@ -0,0 +1,211 @@ +_base_ = ['../default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=1260, val_interval=60) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=1e-3, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=140, + milestones=[90, 120], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=80) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=2)) + +# codec settings +codec = dict( + type='SPR', + input_size=(512, 512), + heatmap_size=(128, 128), + sigma=(4, 2), + minimal_diagonal_length=32**0.5, + generate_keypoint_heatmaps=True, + decode_max_instances=30) + +# model settings +model = dict( + type='BottomupPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + # mean=[123.675, 116.28, 103.53], + # std=[58.395, 57.12, 57.375], + mean=[0], + std=[255] + # bgr_to_rgb=True + ), + backbone=dict( + type='OCTSB2', + # in_channels=3, + in_channels=1, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256), + multiscale_output=True)), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + lumen_cfg=dict( + checkpoint_path='../../data/mmsegmentation/z-torchscript_models/unet-s5-d16_fcn-bce_4xb4-1280k_octroundguidewires-random_resize512x512-crop256x256-no_wrapped-torchinput512x512.jit.pth' + ) + ), + neck=dict( + type='FeatureMapProcessor', + concat=True, + ), + head=dict( + type='DEKRHead', + in_channels=480, + # num_keypoints=17, + num_keypoints=2, + heatmap_loss=dict(type='KeypointMSELoss', use_target_weight=True), + displacement_loss=dict( + type='SoftWeightSmoothL1Loss', + use_target_weight=True, + supervise_empty=False, + beta=1 / 9, + loss_weight=0.002, + ), + decoder=codec + # rescore_cfg=dict( + # in_channels=74, + # norm_indexes=(5, 6), + # init_cfg=dict( + # type='Pretrained', + # checkpoint='https://download.openmmlab.com/mmpose/' + # 'pretrain_models/kpt_rescore_coco-33d58c5c.pth')), + ), + test_cfg=dict( + multiscale_test=False, + flip_test=True, + nms_dist_thr=0.05, + shift_heatmap=True, + align_corners=False)) + +# enable DDP training when rescore net is used +find_unused_parameters = True + +# base dataset settings +dataset_type = 'OCTSegDataset' +data_mode = 'bottomup' +data_root = '../../data/datasets/octseg/' + +# pipelines +train_pipeline = [ + # dict(type='LoadImage'), + dict(type='LoadImage', color_type='unchanged'), + # dict(type='Warping', direction='cart2polar', n_beams=512, scale=1), + dict(type='BottomupRandomAffine', input_size=codec['input_size']), + dict(type='RandomFlip', direction=['horizontal', 'vertical']), # check flip!! + dict(type='GenerateTarget', encoder=codec), + # dict(type='BottomupGetHeatmapMask'), + dict(type='PackPoseInputs'), +] +val_pipeline = [ + dict(type='LoadImage', color_type='unchanged'), + # dict(type='Warping', direction='cart2polar', n_beams=512, scale=1), + dict( + type='BottomupResize', + input_size=codec['input_size'], + size_factor=32, + resize_mode='expand'), + dict( + type='PackPoseInputs', + meta_keys=('id', 'img_id', 'img_path', 'crowd_index', 'ori_shape', + 'img_shape', 'input_size', 'input_center', 'input_scale', + 'flip', 'flip_direction', 'flip_indices', 'raw_ann_info', + 'skeleton_links')) +] + +# data loaders +train_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='sidebranch_round_train.json', + data_prefix=dict(img='train/round/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='sidebranch_round_test.json', + data_prefix=dict(img='test/round/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='sidebranch_round_test.json', + data_prefix=dict(img='test/round/'), + test_mode=True, + pipeline=val_pipeline, + )) + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'sidebranch_round_test.json', + nms_mode='none', + score_mode='keypoint', +) +test_evaluator = val_evaluator diff --git a/myconfigs/octseg/td-hm_hrnet-w32_8xb64-210e_octseg-256x192.py b/myconfigs/octseg/td-hm_hrnet-w32_8xb64-210e_octseg-256x192.py index 0472d22479..a575238b00 100644 --- a/myconfigs/octseg/td-hm_hrnet-w32_8xb64-210e_octseg-256x192.py +++ b/myconfigs/octseg/td-hm_hrnet-w32_8xb64-210e_octseg-256x192.py @@ -1,88 +1,88 @@ -_base_ = ['../default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=210, val_interval=10) - -# optimizer -optim_wrapper = dict(optimizer=dict( - type='Adam', - lr=5e-4, -)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# codec settings -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(32, 64)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(32, 64, 128)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(32, 64, 128, 256))), - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmpose/' - 'pretrain_models/hrnet_w32-36af842e.pth'), - ), - head=dict( - type='HeatmapHead', - in_channels=32, - out_channels=17, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) +_base_ = ['../default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=210, val_interval=10) + +# optimizer +optim_wrapper = dict(optimizer=dict( + type='Adam', + lr=5e-4, +)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# codec settings +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmpose/' + 'pretrain_models/hrnet_w32-36af842e.pth'), + ), + head=dict( + type='HeatmapHead', + in_channels=32, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) diff --git a/myconfigs/octseg/td-hm_hrnet-w48_8xb32-210e_octseg-256x192.py b/myconfigs/octseg/td-hm_hrnet-w48_8xb32-210e_octseg-256x192.py index 650630368d..514353eee3 100644 --- a/myconfigs/octseg/td-hm_hrnet-w48_8xb32-210e_octseg-256x192.py +++ b/myconfigs/octseg/td-hm_hrnet-w48_8xb32-210e_octseg-256x192.py @@ -1,198 +1,198 @@ -default_scope = 'mmpose' -default_hooks = dict( - timer=dict(type='IterTimerHook'), - logger=dict(type='LoggerHook', interval=50), - param_scheduler=dict(type='ParamSchedulerHook'), - checkpoint=dict( - type='CheckpointHook', - interval=10, - save_best='coco/AP', - rule='greater'), - sampler_seed=dict(type='DistSamplerSeedHook'), - visualization=dict(type='PoseVisualizationHook', enable=False)) -custom_hooks = [dict(type='SyncBuffersHook')] -env_cfg = dict( - cudnn_benchmark=False, - mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), - dist_cfg=dict(backend='nccl')) -vis_backends = [dict(type='LocalVisBackend')] -visualizer = dict( - type='PoseLocalVisualizer', - vis_backends=[dict(type='LocalVisBackend')], - name='visualizer') -log_processor = dict( - type='LogProcessor', window_size=50, by_epoch=True, num_digits=6) -log_level = 'INFO' -load_from = None -resume = False -backend_args = dict(backend='local') -train_cfg = dict(by_epoch=True, max_epochs=210, val_interval=10) -val_cfg = dict() -test_cfg = dict() -optim_wrapper = dict(optimizer=dict(type='Adam', lr=0.0005)) -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, by_epoch=False), - dict( - type='MultiStepLR', - begin=0, - end=210, - milestones=[170, 200], - gamma=0.1, - by_epoch=True) -] -auto_scale_lr = dict(base_batch_size=512) -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(48, 96)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(48, 96, 192)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(48, 96, 192, 384))), - init_cfg=dict( - type='Pretrained', - checkpoint= - 'https://download.openmmlab.com/mmpose/pretrain_models/hrnet_w48-8ef0771d.pth' - )), - head=dict( - type='HeatmapHead', - in_channels=48, - out_channels=17, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=dict( - type='MSRAHeatmap', - input_size=(192, 256), - heatmap_size=(48, 64), - sigma=2)), - test_cfg=dict(flip_test=True, flip_mode='heatmap', shift_heatmap=True)) -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=(192, 256)), - dict( - type='GenerateTarget', - encoder=dict( - type='MSRAHeatmap', - input_size=(192, 256), - heatmap_size=(48, 64), - sigma=2)), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=(192, 256)), - dict(type='PackPoseInputs') -] -train_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type='CocoDataset', - data_root='data/coco/', - data_mode='topdown', - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=[ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict(type='RandomBBoxTransform'), - dict(type='TopdownAffine', input_size=(192, 256)), - dict( - type='GenerateTarget', - encoder=dict( - type='MSRAHeatmap', - input_size=(192, 256), - heatmap_size=(48, 64), - sigma=2)), - dict(type='PackPoseInputs') - ])) -val_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type='CocoDataset', - data_root='data/coco/', - data_mode='topdown', - ann_file='annotations/person_keypoints_val2017.json', - bbox_file= - 'data/coco/person_detection_results/COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=[ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=(192, 256)), - dict(type='PackPoseInputs') - ])) -test_dataloader = dict( - batch_size=32, - num_workers=2, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type='CocoDataset', - data_root='data/coco/', - data_mode='topdown', - ann_file='annotations/person_keypoints_val2017.json', - bbox_file= - 'data/coco/person_detection_results/COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=[ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=(192, 256)), - dict(type='PackPoseInputs') - ])) -val_evaluator = dict( - type='CocoMetric', - ann_file='data/coco/annotations/person_keypoints_val2017.json') -test_evaluator = dict( - type='CocoMetric', - ann_file='data/coco/annotations/person_keypoints_val2017.json') +default_scope = 'mmpose' +default_hooks = dict( + timer=dict(type='IterTimerHook'), + logger=dict(type='LoggerHook', interval=50), + param_scheduler=dict(type='ParamSchedulerHook'), + checkpoint=dict( + type='CheckpointHook', + interval=10, + save_best='coco/AP', + rule='greater'), + sampler_seed=dict(type='DistSamplerSeedHook'), + visualization=dict(type='PoseVisualizationHook', enable=False)) +custom_hooks = [dict(type='SyncBuffersHook')] +env_cfg = dict( + cudnn_benchmark=False, + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), + dist_cfg=dict(backend='nccl')) +vis_backends = [dict(type='LocalVisBackend')] +visualizer = dict( + type='PoseLocalVisualizer', + vis_backends=[dict(type='LocalVisBackend')], + name='visualizer') +log_processor = dict( + type='LogProcessor', window_size=50, by_epoch=True, num_digits=6) +log_level = 'INFO' +load_from = None +resume = False +backend_args = dict(backend='local') +train_cfg = dict(by_epoch=True, max_epochs=210, val_interval=10) +val_cfg = dict() +test_cfg = dict() +optim_wrapper = dict(optimizer=dict(type='Adam', lr=0.0005)) +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, by_epoch=False), + dict( + type='MultiStepLR', + begin=0, + end=210, + milestones=[170, 200], + gamma=0.1, + by_epoch=True) +] +auto_scale_lr = dict(base_batch_size=512) +codec = dict( + type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(48, 96)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(48, 96, 192)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(48, 96, 192, 384))), + init_cfg=dict( + type='Pretrained', + checkpoint= + 'https://download.openmmlab.com/mmpose/pretrain_models/hrnet_w48-8ef0771d.pth' + )), + head=dict( + type='HeatmapHead', + in_channels=48, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=dict( + type='MSRAHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + sigma=2)), + test_cfg=dict(flip_test=True, flip_mode='heatmap', shift_heatmap=True)) +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=(192, 256)), + dict( + type='GenerateTarget', + encoder=dict( + type='MSRAHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + sigma=2)), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=(192, 256)), + dict(type='PackPoseInputs') +] +train_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CocoDataset', + data_root='data/coco/', + data_mode='topdown', + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=[ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(type='TopdownAffine', input_size=(192, 256)), + dict( + type='GenerateTarget', + encoder=dict( + type='MSRAHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + sigma=2)), + dict(type='PackPoseInputs') + ])) +val_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CocoDataset', + data_root='data/coco/', + data_mode='topdown', + ann_file='annotations/person_keypoints_val2017.json', + bbox_file= + 'data/coco/person_detection_results/COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=[ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=(192, 256)), + dict(type='PackPoseInputs') + ])) +test_dataloader = dict( + batch_size=32, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CocoDataset', + data_root='data/coco/', + data_mode='topdown', + ann_file='annotations/person_keypoints_val2017.json', + bbox_file= + 'data/coco/person_detection_results/COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=[ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=(192, 256)), + dict(type='PackPoseInputs') + ])) +val_evaluator = dict( + type='CocoMetric', + ann_file='data/coco/annotations/person_keypoints_val2017.json') +test_evaluator = dict( + type='CocoMetric', + ann_file='data/coco/annotations/person_keypoints_val2017.json') diff --git a/mytests/common.py b/mytests/common.py new file mode 100644 index 0000000000..ab0ea970e5 --- /dev/null +++ b/mytests/common.py @@ -0,0 +1,30 @@ +import os +import os.path as osp +import cv2 +import numpy as np + +def draw_img_with_mask(img, mask, color=(255,255,255), alpha=0.8): + if img.ndim == 2: + img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) + + img = img.astype(np.float32) + img_draw = img.copy() + img_draw[mask] = color + out = img * (1 - alpha) + img_draw * alpha + + return out.astype(np.uint8) + +def select_checkpoint(work_dir): + print("work_dir:", osp.abspath(work_dir)) + dirs = sorted(os.listdir(work_dir)) + + for i, d in enumerate(dirs, 0): + print("({}) {}".format(i, d)) + d_idx = input("Select checkpoint that you want to load: ") + + path_opt = dirs[int(d_idx)] + chosen_checkpoint = osp.abspath(os.path.join(work_dir, path_opt)) + + print(f'loaded {chosen_checkpoint}') + + return chosen_checkpoint \ No newline at end of file diff --git a/mytests/test_warping.py b/mytests/test_warping.py new file mode 100644 index 0000000000..9ee3bef905 --- /dev/null +++ b/mytests/test_warping.py @@ -0,0 +1,173 @@ +import os +import os.path as osp +import argparse +import glob +import cv2 +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmengine.config import Config +from mmseg.apis import init_model + +import numpy as np +from math import pi as PI + +from common import draw_img_with_mask + +def select_work_dir(work_dir, checkpoint): + print("work_dir:", osp.abspath(work_dir)) + dirs = sorted(os.listdir(work_dir)) + + for i, d in enumerate(dirs, 0): + print("({}) {}".format(i, d)) + d_idx = input("Select directory that you want to load: ") + + path_opt = dirs[int(d_idx)] + config_dir = osp.abspath(os.path.join(work_dir, path_opt)) + config_path = glob.glob(osp.join(config_dir, '*.py'))[0] + + if checkpoint == 'last': + with open(osp.join(config_dir, 'last_checkpoint')) as cf: + pth_path = cf.readline() + else: + best_pths =glob.glob(osp.join(config_dir, 'best*.pth')) + pth_path = best_pths[len(best_pths) - 1] + + pth = osp.basename(pth_path) + pth_path = osp.join(config_dir, pth) + + # print('config_path:', config_path) + # print('pth_path:', pth_path) + + return config_path, pth_path + +class Warping(nn.Module): + def __init__(self, direction: str='cart2polar', n_beams: int=512, scale: float=0.5): + super.__init__() + + self.direction = direction + self.n_beams = n_beams + self.scale = scale + + def forward(self, x): + if self.direction == 'cart2polar': + cart = x + rho = torch.norm(cart, p=2, dim=-1).view(-1, 1) + theta = torch.atan2(cart[..., 1], cart[..., 0]).view(-1, 1) + theta = theta + (theta < 0).type_as(theta) * (2 * PI) + polar = torch.cat([rho, theta], dim=-1) + out = polar + + out = out + return out + + def backward(self): + self.loss.backward() + + + +if __name__ == '__main__': + work_dir = '../../../data/mmsegmentation/work_dirs' + parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('--work_dir', type=str, default=work_dir, + help='specify working directory of trainined model') + parser.add_argument('--checkpoint', type=str, default='best', choices=['last', 'best'], + help='select which chekpoint will be chosen [last|best]') + parser.add_argument('--input_size', type=int, default=512, + help='the size of input image') + parser.add_argument('--verbose', action='store_true', + help='show summary of the model') + + args = parser.parse_args() + + config_path, pth_path = select_work_dir(args.work_dir, args.checkpoint) + configname, _ = osp.splitext(osp.basename(config_path)) + cfg = Config.fromfile(config_path) + + # init model and load checkpoint + device = 'cuda:0' + print(f'Initializing model with {config_path} and {pth_path}') + net = init_model(config_path, pth_path) + + # print('net:', net) + net = net.to(device) + + warping = Warping() + warping = warping.to(device) + + + test_dir = osp.join('..', cfg.test_dataloader.dataset.data_root, cfg.test_dataloader.dataset.data_prefix.img_path) + annot_dir = osp.join('..', cfg.test_dataloader.dataset.data_root, cfg.test_dataloader.dataset.data_prefix.seg_map_path) + + case_list = os.listdir(test_dir) + + if '.png' in case_list[0]: + case_list = ['.'] + + for case in case_list: + case_dir = osp.join(test_dir, case) + file_list = os.listdir(case_dir) + annot_case_dir = osp.join(annot_dir, case) + + + # save_case_dir = osp.join(save_dir, case) + # os.makedirs(save_case_dir, exist_ok=True) + for fp in file_list: + # fp = file_list[4] + img_path = osp.join(case_dir, fp) + annot_path = osp.join(annot_case_dir, fp) + # result_path= osp.join(save_case_dir, fp) + # result_path= osp.join(save_dir, fp) + print(f'Inference on {img_path}') + + flat_np =cv2.imread(img_path, cv2.IMREAD_GRAYSCALE) + annot_np = cv2.imread(annot_path, cv2.IMREAD_GRAYSCALE) + annot_np = annot_np == np.max(annot_np) # Make int data type to boolean + + flat_t = torch.from_numpy(flat_np).float().to(device) + flat_t = flat_t.reshape((1, 1, flat_t.shape[0], flat_t.shape[1])) + flat_t = flat_t / 255.0 + + round_t = warping(flat_t) + round_np = round_t.squeeze().to('cpu').detach().numpy() + + cv2.imshow('results', round_np) + cv2.waitKey() + + + # # out_t = net(flat_t, mode='predict') + # # mask_t = out_t[0].pred_sem_seg.data + + + # out_t = net(flat_t, mode='tensor') + # if '-ce' in pth_path: + # print('multi class') + # mask_t = torch.argmax(out_t, dim=1) + # else: + # print('binary class') + # # print('out_t:', out_t) + # # out_t = F.sigmoid(out_t) + + # out_t = out_t.sigmoid() + # # print('sigmoid out_t', out_t) + # mask_t = out_t >= 0.3 + # print('mask_t.shape:', mask_t.shape) + + # nz = mask_t.nonzero() + # print('nz_.shape:', nz.shape) + # print('nz:', nz) + # x_ = nz[:, 3] + # print('x_.shape:', x_.shape) + # print('x_:', x_) + # x_ = x_.unique() + # print('unique x_', x_) + # mask_t[:, :, :, x_] = 1 + + # pred_np = mask_t.squeeze().to('cpu').detach().numpy().astype(np.bool_) + # pred_masked = draw_img_with_mask(flat_np, pred_np, color=(0, 255, 0), alpha=0.2) + # gt_masked = draw_img_with_mask(flat_np, annot_np, color=(0, 0, 255), alpha=0.2) + + # results = np.concatenate([pred_masked, gt_masked], axis=1) + + cv2.imshow('results', results) + cv2.waitKey() diff --git a/projects/README.md b/projects/README.md index a10ccad65a..a81a94f947 100644 --- a/projects/README.md +++ b/projects/README.md @@ -1,57 +1,57 @@ -# Welcome to Projects of MMPose - -Hey there! This is the place for you to contribute your awesome keypoint detection techniques to MMPose! - -We know the unit tests in core package can be a bit intimidating, so we've made it easier and more efficient for you to implement your algorithms here. - -And the **best part**? - -- Projects in this folder are designed to be **easier to merge**! - -- Projects in this folder are **NOT** strictly required for **writing unit tests**! - -- We want to make it **as painless as possible** for you to contribute and make MMPose even greater. - -If you're not sure where to start, check out our [example project](./example_project) to see how to add your algorithms easily. And if you have any questions, take a look at our [FAQ](./faq.md). - -We also provide some documentation listed below to help you get started: - -- [New Model Guide](https://mmpose.readthedocs.io/en/latest/guide_to_framework.html#step3-model) - - A guide to help you add new models to MMPose. - -- [Contribution Guide](https://mmpose.readthedocs.io/en/latest/contribution_guide.html) - - A guide for new contributors on how to add their projects to MMPose. - -- [Discussions](https://github.com/open-mmlab/mmpose/discussions) - - We encourage you to start a discussion and share your ideas! - -## Project List - -- **[:zap:RTMPose](./rtmpose)**: Real-Time Multi-Person Pose Estimation toolkit based on MMPose - -
    - -

    - -- **[:art:MMPose4AIGC](./mmpose4aigc)**: Guide AI image generation with MMPose - -
    - -

    - -- **[:bulb:YOLOX-Pose](./yolox-pose)**: Enhancing YOLO for Multi Person Pose Estimation Using Object Keypoint Similarity Loss - -
    - -

    - -- **[📖Awesome MMPose](./awesome-mmpose/)**: A list of Tutorials, Papers, Datasets related to MMPose - -
    - -

    - -- **What's next? Join the rank of *MMPose contributors* by creating a new project**! +# Welcome to Projects of MMPose + +Hey there! This is the place for you to contribute your awesome keypoint detection techniques to MMPose! + +We know the unit tests in core package can be a bit intimidating, so we've made it easier and more efficient for you to implement your algorithms here. + +And the **best part**? + +- Projects in this folder are designed to be **easier to merge**! + +- Projects in this folder are **NOT** strictly required for **writing unit tests**! + +- We want to make it **as painless as possible** for you to contribute and make MMPose even greater. + +If you're not sure where to start, check out our [example project](./example_project) to see how to add your algorithms easily. And if you have any questions, take a look at our [FAQ](./faq.md). + +We also provide some documentation listed below to help you get started: + +- [New Model Guide](https://mmpose.readthedocs.io/en/latest/guide_to_framework.html#step3-model) + + A guide to help you add new models to MMPose. + +- [Contribution Guide](https://mmpose.readthedocs.io/en/latest/contribution_guide.html) + + A guide for new contributors on how to add their projects to MMPose. + +- [Discussions](https://github.com/open-mmlab/mmpose/discussions) + + We encourage you to start a discussion and share your ideas! + +## Project List + +- **[:zap:RTMPose](./rtmpose)**: Real-Time Multi-Person Pose Estimation toolkit based on MMPose + +
    + +

    + +- **[:art:MMPose4AIGC](./mmpose4aigc)**: Guide AI image generation with MMPose + +
    + +

    + +- **[:bulb:YOLOX-Pose](./yolox-pose)**: Enhancing YOLO for Multi Person Pose Estimation Using Object Keypoint Similarity Loss + +
    + +

    + +- **[📖Awesome MMPose](./awesome-mmpose/)**: A list of Tutorials, Papers, Datasets related to MMPose + +
    + +

    + +- **What's next? Join the rank of *MMPose contributors* by creating a new project**! diff --git a/projects/awesome-mmpose/README.md b/projects/awesome-mmpose/README.md index 99a6472269..cdd2a3cf4e 100644 --- a/projects/awesome-mmpose/README.md +++ b/projects/awesome-mmpose/README.md @@ -1,80 +1,80 @@ -# Awesome MMPose - -A list of resources related to MMPose. Feel free to contribute! - -
    - -

    - -## Contents - -- [Tutorials](#tutorials) -- [Papers](#papers) -- [Datasets](#datasets) -- [Projects](#projects) - -## Tutorials - -- [MMPose Tutorial (Chinese)](https://github.com/TommyZihao/MMPose_Tutorials) - - MMPose 中文视频代码教程,from 同济子豪兄 - -
    - -

    - -- [OpenMMLab Course](https://github.com/open-mmlab/OpenMMLabCourse) - - This repository hosts articles, lectures and tutorials on computer vision and OpenMMLab, helping learners to understand algorithms and master our toolboxes in a systematical way. - -## Papers - -- [\[paper\]](https://arxiv.org/abs/2207.10387) [\[code\]](https://github.com/luminxu/Pose-for-Everything) - - ECCV 2022, Pose for Everything: Towards Category-Agnostic Pose Estimation - -- [\[paper\]](https://arxiv.org/abs/2201.04676) [\[code\]](https://github.com/Sense-X/UniFormer) - - ICLR 2022, UniFormer: Unified Transformer for Efficient Spatiotemporal Representation Learning - -- [\[paper\]](https://arxiv.org/abs/2201.07412) [\[code\]](https://github.com/aim-uofa/Poseur) - - ECCV 2022, Poseur:Direct Human Pose Regression with Transformers - -- [\[paper\]](https://arxiv.org/abs/2106.03348) [\[code\]](https://github.com/ViTAE-Transformer/ViTAE-Transformer) - - NeurIPS 2022, ViTAEv2: Vision Transformer Advanced by Exploring Inductive Bias for Image Recognition and Beyond - -- [\[paper\]](https://arxiv.org/abs/2204.10762) [\[code\]](https://github.com/ZiyiZhang27/Dite-HRNet) - - IJCAI-ECAI 2021, Dite-HRNet:Dynamic Lightweight High-Resolution Network for Human Pose Estimation - -- [\[paper\]](https://arxiv.org/abs/2302.08453) [\[code\]](https://github.com/TencentARC/T2I-Adapter) - - T2I-Adapter: Learning Adapters to Dig out More Controllable Ability for Text-to-Image Diffusion Models - -- [\[paper\]](https://arxiv.org/pdf/2303.11638.pdf) [\[code\]](https://github.com/Gengzigang/PCT) - - CVPR 2023, Human Pose as Compositional Tokens - -## Datasets - -- [\[github\]](https://github.com/luminxu/Pose-for-Everything) **MP-100** - - Multi-category Pose (MP-100) dataset, which is a 2D pose dataset of 100 object categories containing over 20K instances and is well-designed for developing CAPE algorithms. - -
    - -

    - -- [\[github\]](https://github.com/facebookresearch/Ego4d/) **Ego4D** - - EGO4D is the world's largest egocentric (first person) video ML dataset and benchmark suite, with 3,600 hrs (and counting) of densely narrated video and a wide range of annotations across five new benchmark tasks. It covers hundreds of scenarios (household, outdoor, workplace, leisure, etc.) of daily life activity captured in-the-wild by 926 unique camera wearers from 74 worldwide locations and 9 different countries. - -
    - -

    - -## Projects - -Waiting for your contribution! +# Awesome MMPose + +A list of resources related to MMPose. Feel free to contribute! + +
    + +

    + +## Contents + +- [Tutorials](#tutorials) +- [Papers](#papers) +- [Datasets](#datasets) +- [Projects](#projects) + +## Tutorials + +- [MMPose Tutorial (Chinese)](https://github.com/TommyZihao/MMPose_Tutorials) + + MMPose 中文视频代码教程,from 同济子豪兄 + +
    + +

    + +- [OpenMMLab Course](https://github.com/open-mmlab/OpenMMLabCourse) + + This repository hosts articles, lectures and tutorials on computer vision and OpenMMLab, helping learners to understand algorithms and master our toolboxes in a systematical way. + +## Papers + +- [\[paper\]](https://arxiv.org/abs/2207.10387) [\[code\]](https://github.com/luminxu/Pose-for-Everything) + + ECCV 2022, Pose for Everything: Towards Category-Agnostic Pose Estimation + +- [\[paper\]](https://arxiv.org/abs/2201.04676) [\[code\]](https://github.com/Sense-X/UniFormer) + + ICLR 2022, UniFormer: Unified Transformer for Efficient Spatiotemporal Representation Learning + +- [\[paper\]](https://arxiv.org/abs/2201.07412) [\[code\]](https://github.com/aim-uofa/Poseur) + + ECCV 2022, Poseur:Direct Human Pose Regression with Transformers + +- [\[paper\]](https://arxiv.org/abs/2106.03348) [\[code\]](https://github.com/ViTAE-Transformer/ViTAE-Transformer) + + NeurIPS 2022, ViTAEv2: Vision Transformer Advanced by Exploring Inductive Bias for Image Recognition and Beyond + +- [\[paper\]](https://arxiv.org/abs/2204.10762) [\[code\]](https://github.com/ZiyiZhang27/Dite-HRNet) + + IJCAI-ECAI 2021, Dite-HRNet:Dynamic Lightweight High-Resolution Network for Human Pose Estimation + +- [\[paper\]](https://arxiv.org/abs/2302.08453) [\[code\]](https://github.com/TencentARC/T2I-Adapter) + + T2I-Adapter: Learning Adapters to Dig out More Controllable Ability for Text-to-Image Diffusion Models + +- [\[paper\]](https://arxiv.org/pdf/2303.11638.pdf) [\[code\]](https://github.com/Gengzigang/PCT) + + CVPR 2023, Human Pose as Compositional Tokens + +## Datasets + +- [\[github\]](https://github.com/luminxu/Pose-for-Everything) **MP-100** + + Multi-category Pose (MP-100) dataset, which is a 2D pose dataset of 100 object categories containing over 20K instances and is well-designed for developing CAPE algorithms. + +
    + +

    + +- [\[github\]](https://github.com/facebookresearch/Ego4d/) **Ego4D** + + EGO4D is the world's largest egocentric (first person) video ML dataset and benchmark suite, with 3,600 hrs (and counting) of densely narrated video and a wide range of annotations across five new benchmark tasks. It covers hundreds of scenarios (household, outdoor, workplace, leisure, etc.) of daily life activity captured in-the-wild by 926 unique camera wearers from 74 worldwide locations and 9 different countries. + +
    + +

    + +## Projects + +Waiting for your contribution! diff --git a/projects/example_project/README.md b/projects/example_project/README.md index d355741aa4..dfafc6c980 100644 --- a/projects/example_project/README.md +++ b/projects/example_project/README.md @@ -1,166 +1,166 @@ -# Example Project - -> A README.md template for releasing a project. -> -> All the fields in this README are **mandatory** for others to understand what you have achieved in this implementation. -> Please read our [Projects FAQ](../faq.md) if you still feel unclear about the requirements, or raise an [issue](https://github.com/open-mmlab/mmpose/issues) to us! - -## Description - -> Share any information you would like others to know. For example: -> -> Author: @xxx. -> -> This is an implementation of \[XXX\]. - -Author: @xxx. - -This project implements a top-down pose estimator with custom head and loss functions that have been seamlessly inherited from existing modules within MMPose. - -## Usage - -> For a typical model, this section should contain the commands for training and testing. -> You are also suggested to dump your environment specification to env.yml by `conda env export > env.yml`. - -### Prerequisites - -- Python 3.7 -- PyTorch 1.6 or higher -- [MIM](https://github.com/open-mmlab/mim) v0.33 or higher -- [MMPose](https://github.com/open-mmlab/mmpose) v1.0.0rc0 or higher - -All the commands below rely on the correct configuration of `PYTHONPATH`, which should point to the project's directory so that Python can locate the module files. In `example_project/` root directory, run the following line to add the current directory to `PYTHONPATH`: - -```shell -export PYTHONPATH=`pwd`:$PYTHONPATH -``` - -### Data Preparation - -Prepare the COCO dataset according to the [instruction](https://mmpose.readthedocs.io/en/dev-1.x/dataset_zoo/2d_body_keypoint.html#coco). - -### Training commands - -**To train with single GPU:** - -```shell -mim train mmpose configs/example-head-loss_hrnet-w32_8xb64-210e_coco-256x192.py -``` - -**To train with multiple GPUs:** - -```shell -mim train mmpose configs/example-head-loss_hrnet-w32_8xb64-210e_coco-256x192.py --launcher pytorch --gpus 8 -``` - -**To train with multiple GPUs by slurm:** - -```shell -mim train mmpose configs/example-head-loss_hrnet-w32_8xb64-210e_coco-256x192.py --launcher slurm \ - --gpus 16 --gpus-per-node 8 --partition $PARTITION -``` - -### Testing commands - -**To test with single GPU:** - -```shell -mim test mmpose configs/example-head-loss_hrnet-w32_8xb64-210e_coco-256x192.py $CHECKPOINT -``` - -**To test with multiple GPUs:** - -```shell -mim test mmpose configs/example-head-loss_hrnet-w32_8xb64-210e_coco-256x192.py $CHECKPOINT --launcher pytorch --gpus 8 -``` - -**To test with multiple GPUs by slurm:** - -```shell -mim test mmpose configs/example-head-loss_hrnet-w32_8xb64-210e_coco-256x192.py $CHECKPOINT --launcher slurm \ - --gpus 16 --gpus-per-node 8 --partition $PARTITION -``` - -## Results - -> List the results as usually done in other model's README. Here is an [Example](https://github.com/open-mmlab/mmpose/blob/dev-1.x/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_coco.md). - -> You should claim whether this is based on the pre-trained weights, which are converted from the official release; or it's a reproduced result obtained from retraining the model in this project - -| Model | Backbone | Input Size | AP | AP50 | AP75 | AR | AR50 | Download | -| :-----------------------------------------------------------: | :-------: | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :---------------------------------------------------------------: | -| [ExampleHead + ExampleLoss](./configs/example-head-loss_hrnet-w32_8xb64-210e_coco-256x192.py) | HRNet-w32 | 256x912 | 0.749 | 0.906 | 0.821 | 0.804 | 0.945 | [model](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192-81c58e40_20220909.pth) \| [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220909.log) | - -## Citation - -> You may remove this section if not applicable. - -```bibtex -@misc{mmpose2020, - title={OpenMMLab Pose Estimation Toolbox and Benchmark}, - author={MMPose Contributors}, - howpublished = {\url{https://github.com/open-mmlab/mmpose}}, - year={2020} -} -``` - -## Checklist - -Here is a checklist of this project's progress. And you can ignore this part if you don't plan to contribute -to MMPose projects. - -> The PIC (person in charge) or contributors of this project should check all the items that they believe have been finished, which will further be verified by codebase maintainers via a PR. - -> OpenMMLab's maintainer will review the code to ensure the project's quality. Reaching the first milestone means that this project suffices the minimum requirement of being merged into 'projects/'. But this project is only eligible to become a part of the core package upon attaining the last milestone. - -> Note that keeping this section up-to-date is crucial not only for this project's developers but the entire community, since there might be some other contributors joining this project and deciding their starting point from this list. It also helps maintainers accurately estimate time and effort on further code polishing, if needed. - -> A project does not necessarily have to be finished in a single PR, but it's essential for the project to at least reach the first milestone in its very first PR. - -- [ ] Milestone 1: PR-ready, and acceptable to be one of the `projects/`. - - - [ ] Finish the code - - > The code's design shall follow existing interfaces and convention. For example, each model component should be registered into `mmpose.registry.MODELS` and configurable via a config file. - - - [ ] Basic docstrings & proper citation - - > Each major class should contains a docstring, describing its functionality and arguments. If your code is copied or modified from other open-source projects, don't forget to cite the source project in docstring and make sure your behavior is not against its license. Typically, we do not accept any code snippet under GPL license. [A Short Guide to Open Source Licenses](https://medium.com/nationwide-technology/a-short-guide-to-open-source-licenses-cf5b1c329edd) - - - [ ] Test-time correctness - - > If you are reproducing the result from a paper, make sure your model's inference-time performance matches that in the original paper. The weights usually could be obtained by simply renaming the keys in the official pre-trained weights. This test could be skipped though, if you are able to prove the training-time correctness and check the second milestone. - - - [ ] A full README - - > As this template does. - -- [ ] Milestone 2: Indicates a successful model implementation. - - - [ ] Training-time correctness - - > If you are reproducing the result from a paper, checking this item means that you should have trained your model from scratch based on the original paper's specification and verified that the final result matches the report within a minor error range. - -- [ ] Milestone 3: Good to be a part of our core package! - - - [ ] Type hints and docstrings - - > Ideally *all* the methods should have [type hints](https://www.pythontutorial.net/python-basics/python-type-hints/) and [docstrings](https://google.github.io/styleguide/pyguide.html#381-docstrings). [Example](https://github.com/open-mmlab/mmpose/blob/0fb7f22000197181dc0629f767dd99d881d23d76/mmpose/utils/tensor_utils.py#L53) - - - [ ] Unit tests - - > Unit tests for the major module are required. [Example](https://github.com/open-mmlab/mmpose/blob/dev-1.x/tests/test_models/test_heads/test_heatmap_heads/test_heatmap_head.py) - - - [ ] Code polishing - - > Refactor your code according to reviewer's comment. - - - [ ] Metafile.yml - - > It will be parsed by MIM and Inferencer. [Example](https://github.com/open-mmlab/mmpose/blob/dev-1.x/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_coco.yml) - - - [ ] Move your modules into the core package following the codebase's file hierarchy structure. - - > In particular, you may have to refactor this README into a standard one. [Example](https://github.com/open-mmlab/mmpose/blob/dev-1.x/configs/body_2d_keypoint/topdown_heatmap/README.md) - - - [ ] Refactor your modules into the core package following the codebase's file hierarchy structure. +# Example Project + +> A README.md template for releasing a project. +> +> All the fields in this README are **mandatory** for others to understand what you have achieved in this implementation. +> Please read our [Projects FAQ](../faq.md) if you still feel unclear about the requirements, or raise an [issue](https://github.com/open-mmlab/mmpose/issues) to us! + +## Description + +> Share any information you would like others to know. For example: +> +> Author: @xxx. +> +> This is an implementation of \[XXX\]. + +Author: @xxx. + +This project implements a top-down pose estimator with custom head and loss functions that have been seamlessly inherited from existing modules within MMPose. + +## Usage + +> For a typical model, this section should contain the commands for training and testing. +> You are also suggested to dump your environment specification to env.yml by `conda env export > env.yml`. + +### Prerequisites + +- Python 3.7 +- PyTorch 1.6 or higher +- [MIM](https://github.com/open-mmlab/mim) v0.33 or higher +- [MMPose](https://github.com/open-mmlab/mmpose) v1.0.0rc0 or higher + +All the commands below rely on the correct configuration of `PYTHONPATH`, which should point to the project's directory so that Python can locate the module files. In `example_project/` root directory, run the following line to add the current directory to `PYTHONPATH`: + +```shell +export PYTHONPATH=`pwd`:$PYTHONPATH +``` + +### Data Preparation + +Prepare the COCO dataset according to the [instruction](https://mmpose.readthedocs.io/en/dev-1.x/dataset_zoo/2d_body_keypoint.html#coco). + +### Training commands + +**To train with single GPU:** + +```shell +mim train mmpose configs/example-head-loss_hrnet-w32_8xb64-210e_coco-256x192.py +``` + +**To train with multiple GPUs:** + +```shell +mim train mmpose configs/example-head-loss_hrnet-w32_8xb64-210e_coco-256x192.py --launcher pytorch --gpus 8 +``` + +**To train with multiple GPUs by slurm:** + +```shell +mim train mmpose configs/example-head-loss_hrnet-w32_8xb64-210e_coco-256x192.py --launcher slurm \ + --gpus 16 --gpus-per-node 8 --partition $PARTITION +``` + +### Testing commands + +**To test with single GPU:** + +```shell +mim test mmpose configs/example-head-loss_hrnet-w32_8xb64-210e_coco-256x192.py $CHECKPOINT +``` + +**To test with multiple GPUs:** + +```shell +mim test mmpose configs/example-head-loss_hrnet-w32_8xb64-210e_coco-256x192.py $CHECKPOINT --launcher pytorch --gpus 8 +``` + +**To test with multiple GPUs by slurm:** + +```shell +mim test mmpose configs/example-head-loss_hrnet-w32_8xb64-210e_coco-256x192.py $CHECKPOINT --launcher slurm \ + --gpus 16 --gpus-per-node 8 --partition $PARTITION +``` + +## Results + +> List the results as usually done in other model's README. Here is an [Example](https://github.com/open-mmlab/mmpose/blob/dev-1.x/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_coco.md). + +> You should claim whether this is based on the pre-trained weights, which are converted from the official release; or it's a reproduced result obtained from retraining the model in this project + +| Model | Backbone | Input Size | AP | AP50 | AP75 | AR | AR50 | Download | +| :-----------------------------------------------------------: | :-------: | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :---------------------------------------------------------------: | +| [ExampleHead + ExampleLoss](./configs/example-head-loss_hrnet-w32_8xb64-210e_coco-256x192.py) | HRNet-w32 | 256x912 | 0.749 | 0.906 | 0.821 | 0.804 | 0.945 | [model](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192-81c58e40_20220909.pth) \| [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192_20220909.log) | + +## Citation + +> You may remove this section if not applicable. + +```bibtex +@misc{mmpose2020, + title={OpenMMLab Pose Estimation Toolbox and Benchmark}, + author={MMPose Contributors}, + howpublished = {\url{https://github.com/open-mmlab/mmpose}}, + year={2020} +} +``` + +## Checklist + +Here is a checklist of this project's progress. And you can ignore this part if you don't plan to contribute +to MMPose projects. + +> The PIC (person in charge) or contributors of this project should check all the items that they believe have been finished, which will further be verified by codebase maintainers via a PR. + +> OpenMMLab's maintainer will review the code to ensure the project's quality. Reaching the first milestone means that this project suffices the minimum requirement of being merged into 'projects/'. But this project is only eligible to become a part of the core package upon attaining the last milestone. + +> Note that keeping this section up-to-date is crucial not only for this project's developers but the entire community, since there might be some other contributors joining this project and deciding their starting point from this list. It also helps maintainers accurately estimate time and effort on further code polishing, if needed. + +> A project does not necessarily have to be finished in a single PR, but it's essential for the project to at least reach the first milestone in its very first PR. + +- [ ] Milestone 1: PR-ready, and acceptable to be one of the `projects/`. + + - [ ] Finish the code + + > The code's design shall follow existing interfaces and convention. For example, each model component should be registered into `mmpose.registry.MODELS` and configurable via a config file. + + - [ ] Basic docstrings & proper citation + + > Each major class should contains a docstring, describing its functionality and arguments. If your code is copied or modified from other open-source projects, don't forget to cite the source project in docstring and make sure your behavior is not against its license. Typically, we do not accept any code snippet under GPL license. [A Short Guide to Open Source Licenses](https://medium.com/nationwide-technology/a-short-guide-to-open-source-licenses-cf5b1c329edd) + + - [ ] Test-time correctness + + > If you are reproducing the result from a paper, make sure your model's inference-time performance matches that in the original paper. The weights usually could be obtained by simply renaming the keys in the official pre-trained weights. This test could be skipped though, if you are able to prove the training-time correctness and check the second milestone. + + - [ ] A full README + + > As this template does. + +- [ ] Milestone 2: Indicates a successful model implementation. + + - [ ] Training-time correctness + + > If you are reproducing the result from a paper, checking this item means that you should have trained your model from scratch based on the original paper's specification and verified that the final result matches the report within a minor error range. + +- [ ] Milestone 3: Good to be a part of our core package! + + - [ ] Type hints and docstrings + + > Ideally *all* the methods should have [type hints](https://www.pythontutorial.net/python-basics/python-type-hints/) and [docstrings](https://google.github.io/styleguide/pyguide.html#381-docstrings). [Example](https://github.com/open-mmlab/mmpose/blob/0fb7f22000197181dc0629f767dd99d881d23d76/mmpose/utils/tensor_utils.py#L53) + + - [ ] Unit tests + + > Unit tests for the major module are required. [Example](https://github.com/open-mmlab/mmpose/blob/dev-1.x/tests/test_models/test_heads/test_heatmap_heads/test_heatmap_head.py) + + - [ ] Code polishing + + > Refactor your code according to reviewer's comment. + + - [ ] Metafile.yml + + > It will be parsed by MIM and Inferencer. [Example](https://github.com/open-mmlab/mmpose/blob/dev-1.x/configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_coco.yml) + + - [ ] Move your modules into the core package following the codebase's file hierarchy structure. + + > In particular, you may have to refactor this README into a standard one. [Example](https://github.com/open-mmlab/mmpose/blob/dev-1.x/configs/body_2d_keypoint/topdown_heatmap/README.md) + + - [ ] Refactor your modules into the core package following the codebase's file hierarchy structure. diff --git a/projects/example_project/configs/example-head-loss_hrnet-w32_8xb64-210e_coco-256x192.py b/projects/example_project/configs/example-head-loss_hrnet-w32_8xb64-210e_coco-256x192.py index 99b19d478c..8cd169254e 100644 --- a/projects/example_project/configs/example-head-loss_hrnet-w32_8xb64-210e_coco-256x192.py +++ b/projects/example_project/configs/example-head-loss_hrnet-w32_8xb64-210e_coco-256x192.py @@ -1,15 +1,15 @@ -# Directly inherit the entire recipe you want to use. -_base_ = 'mmpose::body_2d_keypoint/topdown_heatmap/coco/' \ - 'td-hm_hrnet-w32_8xb64-210e_coco-256x192.py' - -# This line is to import your own modules. -custom_imports = dict(imports='models') - -# Modify the model to use your own head and loss. -_base_['model']['head'] = dict( - type='ExampleHead', - in_channels=32, - out_channels=17, - deconv_out_channels=None, - loss=dict(type='ExampleLoss', use_target_weight=True), - decoder=_base_['codec']) +# Directly inherit the entire recipe you want to use. +_base_ = 'mmpose::body_2d_keypoint/topdown_heatmap/coco/' \ + 'td-hm_hrnet-w32_8xb64-210e_coco-256x192.py' + +# This line is to import your own modules. +custom_imports = dict(imports='models') + +# Modify the model to use your own head and loss. +_base_['model']['head'] = dict( + type='ExampleHead', + in_channels=32, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='ExampleLoss', use_target_weight=True), + decoder=_base_['codec']) diff --git a/projects/example_project/models/__init__.py b/projects/example_project/models/__init__.py index 61dc5dac0e..dd4a1337c6 100644 --- a/projects/example_project/models/__init__.py +++ b/projects/example_project/models/__init__.py @@ -1,4 +1,4 @@ -from .example_head import ExampleHead -from .example_loss import ExampleLoss - -__all__ = ['ExampleHead', 'ExampleLoss'] +from .example_head import ExampleHead +from .example_loss import ExampleLoss + +__all__ = ['ExampleHead', 'ExampleLoss'] diff --git a/projects/example_project/models/example_head.py b/projects/example_project/models/example_head.py index c5da95d481..d59a68dab6 100644 --- a/projects/example_project/models/example_head.py +++ b/projects/example_project/models/example_head.py @@ -1,77 +1,77 @@ -from mmpose.models import HeatmapHead -from mmpose.registry import MODELS - - -# Register your head to the `MODELS`. -@MODELS.register_module() -class ExampleHead(HeatmapHead): - """Implements an example head. - - Implement the model head just like a normal pytorch module. - """ - - def __init__(self, **kwargs) -> None: - print('Initializing ExampleHead...') - super().__init__(**kwargs) - - def forward(self, feats): - """Forward the network. The input is multi scale feature maps and the - output is the coordinates. - - Args: - feats (Tuple[Tensor]): Multi scale feature maps. - - Returns: - Tensor: output coordinates or heatmaps. - """ - return super().forward(feats) - - def predict(self, feats, batch_data_samples, test_cfg={}): - """Predict results from outputs. The behaviour of head during testing - should be defined in this function. - - Args: - feats (Tuple[Tensor] | List[Tuple[Tensor]]): The multi-stage - features (or multiple multi-stage features in TTA) - batch_data_samples (List[:obj:`PoseDataSample`]): A list of - data samples for instances in a batch - test_cfg (dict): The runtime config for testing process. Defaults - to {} - - Returns: - Union[InstanceList | Tuple[InstanceList | PixelDataList]]: If - ``test_cfg['output_heatmap']==True``, return both pose and heatmap - prediction; otherwise only return the pose prediction. - - The pose prediction is a list of ``InstanceData``, each contains - the following fields: - - - keypoints (np.ndarray): predicted keypoint coordinates in - shape (num_instances, K, D) where K is the keypoint number - and D is the keypoint dimension - - keypoint_scores (np.ndarray): predicted keypoint scores in - shape (num_instances, K) - - The heatmap prediction is a list of ``PixelData``, each contains - the following fields: - - - heatmaps (Tensor): The predicted heatmaps in shape (K, h, w) - """ - return super().predict(feats, batch_data_samples, test_cfg) - - def loss(self, feats, batch_data_samples, train_cfg={}) -> dict: - """Calculate losses from a batch of inputs and data samples. The - behaviour of head during training should be defined in this function. - - Args: - feats (Tuple[Tensor]): The multi-stage features - batch_data_samples (List[:obj:`PoseDataSample`]): A list of - data samples for instances in a batch - train_cfg (dict): The runtime config for training process. - Defaults to {} - - Returns: - dict: A dictionary of losses. - """ - - return super().loss(feats, batch_data_samples, train_cfg) +from mmpose.models import HeatmapHead +from mmpose.registry import MODELS + + +# Register your head to the `MODELS`. +@MODELS.register_module() +class ExampleHead(HeatmapHead): + """Implements an example head. + + Implement the model head just like a normal pytorch module. + """ + + def __init__(self, **kwargs) -> None: + print('Initializing ExampleHead...') + super().__init__(**kwargs) + + def forward(self, feats): + """Forward the network. The input is multi scale feature maps and the + output is the coordinates. + + Args: + feats (Tuple[Tensor]): Multi scale feature maps. + + Returns: + Tensor: output coordinates or heatmaps. + """ + return super().forward(feats) + + def predict(self, feats, batch_data_samples, test_cfg={}): + """Predict results from outputs. The behaviour of head during testing + should be defined in this function. + + Args: + feats (Tuple[Tensor] | List[Tuple[Tensor]]): The multi-stage + features (or multiple multi-stage features in TTA) + batch_data_samples (List[:obj:`PoseDataSample`]): A list of + data samples for instances in a batch + test_cfg (dict): The runtime config for testing process. Defaults + to {} + + Returns: + Union[InstanceList | Tuple[InstanceList | PixelDataList]]: If + ``test_cfg['output_heatmap']==True``, return both pose and heatmap + prediction; otherwise only return the pose prediction. + + The pose prediction is a list of ``InstanceData``, each contains + the following fields: + + - keypoints (np.ndarray): predicted keypoint coordinates in + shape (num_instances, K, D) where K is the keypoint number + and D is the keypoint dimension + - keypoint_scores (np.ndarray): predicted keypoint scores in + shape (num_instances, K) + + The heatmap prediction is a list of ``PixelData``, each contains + the following fields: + + - heatmaps (Tensor): The predicted heatmaps in shape (K, h, w) + """ + return super().predict(feats, batch_data_samples, test_cfg) + + def loss(self, feats, batch_data_samples, train_cfg={}) -> dict: + """Calculate losses from a batch of inputs and data samples. The + behaviour of head during training should be defined in this function. + + Args: + feats (Tuple[Tensor]): The multi-stage features + batch_data_samples (List[:obj:`PoseDataSample`]): A list of + data samples for instances in a batch + train_cfg (dict): The runtime config for training process. + Defaults to {} + + Returns: + dict: A dictionary of losses. + """ + + return super().loss(feats, batch_data_samples, train_cfg) diff --git a/projects/example_project/models/example_loss.py b/projects/example_project/models/example_loss.py index e55d03537e..c9186b3ce2 100644 --- a/projects/example_project/models/example_loss.py +++ b/projects/example_project/models/example_loss.py @@ -1,40 +1,40 @@ -from mmpose.models import KeypointMSELoss -from mmpose.registry import MODELS - - -# Register your loss to the `MODELS`. -@MODELS.register_module() -class ExampleLoss(KeypointMSELoss): - """Implements an example loss. - - Implement the loss just like a normal pytorch module. - """ - - def __init__(self, **kwargs) -> None: - print('Initializing ExampleLoss...') - super().__init__(**kwargs) - - def forward(self, output, target, target_weights=None, mask=None): - """Forward function of loss. The input arguments should match those - given in `head.loss` function. - - Note: - - batch_size: B - - num_keypoints: K - - heatmaps height: H - - heatmaps weight: W - - Args: - output (Tensor): The output heatmaps with shape [B, K, H, W] - target (Tensor): The target heatmaps with shape [B, K, H, W] - target_weights (Tensor, optional): The target weights of differet - keypoints, with shape [B, K] (keypoint-wise) or - [B, K, H, W] (pixel-wise). - mask (Tensor, optional): The masks of valid heatmap pixels in - shape [B, K, H, W] or [B, 1, H, W]. If ``None``, no mask will - be applied. Defaults to ``None`` - - Returns: - Tensor: The calculated loss. - """ - return super().forward(output, target, target_weights, mask) +from mmpose.models import KeypointMSELoss +from mmpose.registry import MODELS + + +# Register your loss to the `MODELS`. +@MODELS.register_module() +class ExampleLoss(KeypointMSELoss): + """Implements an example loss. + + Implement the loss just like a normal pytorch module. + """ + + def __init__(self, **kwargs) -> None: + print('Initializing ExampleLoss...') + super().__init__(**kwargs) + + def forward(self, output, target, target_weights=None, mask=None): + """Forward function of loss. The input arguments should match those + given in `head.loss` function. + + Note: + - batch_size: B + - num_keypoints: K + - heatmaps height: H + - heatmaps weight: W + + Args: + output (Tensor): The output heatmaps with shape [B, K, H, W] + target (Tensor): The target heatmaps with shape [B, K, H, W] + target_weights (Tensor, optional): The target weights of differet + keypoints, with shape [B, K] (keypoint-wise) or + [B, K, H, W] (pixel-wise). + mask (Tensor, optional): The masks of valid heatmap pixels in + shape [B, K, H, W] or [B, 1, H, W]. If ``None``, no mask will + be applied. Defaults to ``None`` + + Returns: + Tensor: The calculated loss. + """ + return super().forward(output, target, target_weights, mask) diff --git a/projects/faq.md b/projects/faq.md index 3f62e14ec5..8f88599fa7 100644 --- a/projects/faq.md +++ b/projects/faq.md @@ -1,23 +1,23 @@ -# FAQ - -To help users better understand the `projects/` folder and how to use it effectively, we've created this FAQ page. Here, users can find answers to common questions and learn more about various aspects of the `projects/` folder, such as its usage and contribution guidance. - -## Q1: Why set up `projects/` folder? - -Implementing new models and features into OpenMMLab's algorithm libraries could be troublesome due to the rigorous requirements on code quality, which could hinder the fast iteration of SOTA models and might discourage our members from sharing their latest outcomes here. And that's why we have this `projects/` folder now, where some experimental features, frameworks and models are placed, only needed to satisfy the minimum requirement on the code quality, and can be used as standalone libraries. Users are welcome to use them if they [use MMPose from source](https://mmpose.readthedocs.io/en/dev-1.x/installation.html#best-practices). - -## Q2: Why should there be a checklist for a project? - -This checkelist is crucial not only for this project's developers but the entire community, since there might be some other contributors joining this project and deciding their starting point from this list. It also helps maintainers accurately estimate time and effort on further code polishing, if needed. - -## Q3: What kind of PR will be merged? - -Reaching the first milestone means that this project suffices the minimum requirement of being merged into 'projects/'. That is, the very first PR of a project must have all the terms in the first milestone checked. We do not have any extra requirements on the project's following PRs, so they can be a minor bug fix or update, and do not have to achieve one milestone at once. But keep in mind that this project is only eligible to become a part of the core package upon attaining the last milestone. - -## Q4: Compared to other models in the core packages, why do the model implementations in projects have different training/testing commands? - -Projects are organized independently from the core package, and therefore their modules cannot be directly imported by `train.py` and `test.py`. Each model implementation in projects should either use `mim` for training/testing as suggested in the example project or provide a custom `train.py`/`test.py`. - -## Q5: How to debug a project with a debugger? - -Debugger makes our lives easier, but using it becomes a bit tricky if we have to train/test a model via `mim`. The way to circumvent that is that we can take advantage of relative path to import these modules. Assuming that we are developing a project X and the core modules are placed under `projects/X/modules`, then simply adding `custom_imports = dict(imports='projects.X.modules')` to the config allows us to debug from usual entrypoints (e.g. `tools/train.py`) from the root directory of the algorithm library. Just don't forget to remove 'projects.X' before project publishment. +# FAQ + +To help users better understand the `projects/` folder and how to use it effectively, we've created this FAQ page. Here, users can find answers to common questions and learn more about various aspects of the `projects/` folder, such as its usage and contribution guidance. + +## Q1: Why set up `projects/` folder? + +Implementing new models and features into OpenMMLab's algorithm libraries could be troublesome due to the rigorous requirements on code quality, which could hinder the fast iteration of SOTA models and might discourage our members from sharing their latest outcomes here. And that's why we have this `projects/` folder now, where some experimental features, frameworks and models are placed, only needed to satisfy the minimum requirement on the code quality, and can be used as standalone libraries. Users are welcome to use them if they [use MMPose from source](https://mmpose.readthedocs.io/en/dev-1.x/installation.html#best-practices). + +## Q2: Why should there be a checklist for a project? + +This checkelist is crucial not only for this project's developers but the entire community, since there might be some other contributors joining this project and deciding their starting point from this list. It also helps maintainers accurately estimate time and effort on further code polishing, if needed. + +## Q3: What kind of PR will be merged? + +Reaching the first milestone means that this project suffices the minimum requirement of being merged into 'projects/'. That is, the very first PR of a project must have all the terms in the first milestone checked. We do not have any extra requirements on the project's following PRs, so they can be a minor bug fix or update, and do not have to achieve one milestone at once. But keep in mind that this project is only eligible to become a part of the core package upon attaining the last milestone. + +## Q4: Compared to other models in the core packages, why do the model implementations in projects have different training/testing commands? + +Projects are organized independently from the core package, and therefore their modules cannot be directly imported by `train.py` and `test.py`. Each model implementation in projects should either use `mim` for training/testing as suggested in the example project or provide a custom `train.py`/`test.py`. + +## Q5: How to debug a project with a debugger? + +Debugger makes our lives easier, but using it becomes a bit tricky if we have to train/test a model via `mim`. The way to circumvent that is that we can take advantage of relative path to import these modules. Assuming that we are developing a project X and the core modules are placed under `projects/X/modules`, then simply adding `custom_imports = dict(imports='projects.X.modules')` to the config allows us to debug from usual entrypoints (e.g. `tools/train.py`) from the root directory of the algorithm library. Just don't forget to remove 'projects.X' before project publishment. diff --git a/projects/mmpose4aigc/README.md b/projects/mmpose4aigc/README.md index c3759d846c..b46f2267cb 100644 --- a/projects/mmpose4aigc/README.md +++ b/projects/mmpose4aigc/README.md @@ -1,111 +1,111 @@ -# MMPose for AIGC (AI Generated Content) - -
    - -
    - -English | [简体中文](./README_CN.md) - -This project will demonstrate how to use MMPose to generate skeleton images for pose guided AI image generation. - -Currently, we support: - -- [T2I Adapter](https://huggingface.co/spaces/Adapter/T2I-Adapter) - -Please feel free to share interesting pose-guided AIGC projects to us! - -## Get Started - -### Generate OpenPose-style Skeleton - -#### Step 1: Preparation - -Run the following commands to prepare the project: - -```shell -# install mmpose mmdet -pip install openmim -git clone https://github.com/open-mmlab/mmpose.git -cd mmpose -mim install -e . -mim install "mmdet>=3.0.0rc6" - -# download models -bash download_models.sh -``` - -#### Step 2: Generate a Skeleton Image - -Run the following command to generate a skeleton image: - -```shell -# generate a skeleton image -bash mmpose_openpose.sh ../../tests/data/coco/000000000785.jpg -``` - -The input image and its skeleton are as follows: - -
    - -
    - -### Generate MMPose-style Skeleton - -#### Step 1: Preparation - -**Env Requirements:** - -- GCC >= 7.5 -- cmake >= 3.14 - -Run the following commands to install the project: - -```shell -bash install_posetracker_linux.sh -``` - -After installation, files are organized as follows: - -```shell -|----mmdeploy-1.0.0-linux-x86_64-cxx11abi -| |----README.md -| |----rtmpose-ort -| | |----rtmdet-nano -| | |----rtmpose-m -| | |----000000147979.jpg -| | |----t2i-adapter_skeleton.txt -``` - -#### Step 2: Generate a Skeleton Image - -Run the following command to generate a skeleton image: - -```shell -# generate a skeleton image -bash mmpose_style_skeleton.sh \ - mmdeploy-1.0.0-linux-x86_64-cxx11abi/rtmpose-ort/000000147979.jpg -``` - -For more details, you can refer to [RTMPose](../rtmpose/README.md). - -The input image and its skeleton are as follows: - -
    - -
    - -### Upload to T2I-Adapter - -The demo page of T2I- Adapter is [Here](https://huggingface.co/spaces/Adapter/T2I-Adapter). - -[![Huggingface Gradio](https://img.shields.io/static/v1?label=Demo&message=Huggingface%20Gradio&color=orange)](https://huggingface.co/spaces/ChongMou/T2I-Adapter) - -
    - -
    - -## Gallery - -
    - -
    +# MMPose for AIGC (AI Generated Content) + +
    + +
    + +English | [简体中文](./README_CN.md) + +This project will demonstrate how to use MMPose to generate skeleton images for pose guided AI image generation. + +Currently, we support: + +- [T2I Adapter](https://huggingface.co/spaces/Adapter/T2I-Adapter) + +Please feel free to share interesting pose-guided AIGC projects to us! + +## Get Started + +### Generate OpenPose-style Skeleton + +#### Step 1: Preparation + +Run the following commands to prepare the project: + +```shell +# install mmpose mmdet +pip install openmim +git clone https://github.com/open-mmlab/mmpose.git +cd mmpose +mim install -e . +mim install "mmdet>=3.0.0rc6" + +# download models +bash download_models.sh +``` + +#### Step 2: Generate a Skeleton Image + +Run the following command to generate a skeleton image: + +```shell +# generate a skeleton image +bash mmpose_openpose.sh ../../tests/data/coco/000000000785.jpg +``` + +The input image and its skeleton are as follows: + +
    + +
    + +### Generate MMPose-style Skeleton + +#### Step 1: Preparation + +**Env Requirements:** + +- GCC >= 7.5 +- cmake >= 3.14 + +Run the following commands to install the project: + +```shell +bash install_posetracker_linux.sh +``` + +After installation, files are organized as follows: + +```shell +|----mmdeploy-1.0.0-linux-x86_64-cxx11abi +| |----README.md +| |----rtmpose-ort +| | |----rtmdet-nano +| | |----rtmpose-m +| | |----000000147979.jpg +| | |----t2i-adapter_skeleton.txt +``` + +#### Step 2: Generate a Skeleton Image + +Run the following command to generate a skeleton image: + +```shell +# generate a skeleton image +bash mmpose_style_skeleton.sh \ + mmdeploy-1.0.0-linux-x86_64-cxx11abi/rtmpose-ort/000000147979.jpg +``` + +For more details, you can refer to [RTMPose](../rtmpose/README.md). + +The input image and its skeleton are as follows: + +
    + +
    + +### Upload to T2I-Adapter + +The demo page of T2I- Adapter is [Here](https://huggingface.co/spaces/Adapter/T2I-Adapter). + +[![Huggingface Gradio](https://img.shields.io/static/v1?label=Demo&message=Huggingface%20Gradio&color=orange)](https://huggingface.co/spaces/ChongMou/T2I-Adapter) + +
    + +
    + +## Gallery + +
    + +
    diff --git a/projects/mmpose4aigc/README_CN.md b/projects/mmpose4aigc/README_CN.md index 44bbe2d459..d6fe6ce493 100644 --- a/projects/mmpose4aigc/README_CN.md +++ b/projects/mmpose4aigc/README_CN.md @@ -1,110 +1,110 @@ -# MMPose for AIGC (AI Generated Content) - -
    - -
    - -简体中文 | [English](./README.md) - -本项目将支持使用 MMPose 来生成骨架图片,用于姿态引导的 AI 图像生成。 - -当前已支持: - -- [T2I Adapter](https://huggingface.co/spaces/Adapter/T2I-Adapter) - -欢迎分享更多姿态引导的 AIGC 项目给我们! - -## 快速上手 - -### 生成 Openpose 风格的骨架图片 - -#### Step 1: 准备 - -运行以下命令准备项目: - -```shell -# install mmpose mmdet -pip install openmim -git clone https://github.com/open-mmlab/mmpose.git -cd mmpose -mim install -e . -mim install "mmdet>=3.0.0rc6" - -# download models -bash download_models.sh -``` - -#### Step 2: 生成骨架图片 - -运行以下命令生成骨架图片: - -```shell -bash mmpose_openpose.sh ../../tests/data/coco/000000000785.jpg -``` - -输入图片与生成骨架图片如下: - -
    - -
    - -### 生成 MMPose 风格的骨架图片 - -#### Step 1: 准备 - -**环境要求:** - -- GCC >= 7.5 -- cmake >= 3.14 - -运行以下命令安装项目: - -```shell -bash install_posetracker_linux.sh -``` - -最终的文件结构如下: - -```shell -|----mmdeploy-1.0.0-linux-x86_64-cxx11abi -| |----README.md -| |----rtmpose-ort -| | |----rtmdet-nano -| | |----rtmpose-m -| | |----000000147979.jpg -| | |----t2i-adapter_skeleton.txt -``` - -#### Step 2: 生成姿态骨架图片 - -运行以下命令生成姿态骨架图片: - -```shell -# 生成骨架图片 -bash mmpose_style_skeleton.sh \ - mmdeploy-1.0.0-linux-x86_64-cxx11abi/rtmpose-ort/000000147979.jpg -``` - -更多详细信息可以查看 [RTMPose](../rtmpose/README_CN.md)。 - -输入图片与生成骨架图片如下: - -
    - -
    - -### 使用 T2I-Adapter - -T2I- Adapter 在线试玩请点击 [这里](https://huggingface.co/spaces/Adapter/T2I-Adapter) - -[![Huggingface Gradio](https://img.shields.io/static/v1?label=Demo&message=Huggingface%20Gradio&color=orange)](https://huggingface.co/spaces/ChongMou/T2I-Adapter) - -
    - -
    - -## 结果展示 - -
    - -
    +# MMPose for AIGC (AI Generated Content) + +
    + +
    + +简体中文 | [English](./README.md) + +本项目将支持使用 MMPose 来生成骨架图片,用于姿态引导的 AI 图像生成。 + +当前已支持: + +- [T2I Adapter](https://huggingface.co/spaces/Adapter/T2I-Adapter) + +欢迎分享更多姿态引导的 AIGC 项目给我们! + +## 快速上手 + +### 生成 Openpose 风格的骨架图片 + +#### Step 1: 准备 + +运行以下命令准备项目: + +```shell +# install mmpose mmdet +pip install openmim +git clone https://github.com/open-mmlab/mmpose.git +cd mmpose +mim install -e . +mim install "mmdet>=3.0.0rc6" + +# download models +bash download_models.sh +``` + +#### Step 2: 生成骨架图片 + +运行以下命令生成骨架图片: + +```shell +bash mmpose_openpose.sh ../../tests/data/coco/000000000785.jpg +``` + +输入图片与生成骨架图片如下: + +
    + +
    + +### 生成 MMPose 风格的骨架图片 + +#### Step 1: 准备 + +**环境要求:** + +- GCC >= 7.5 +- cmake >= 3.14 + +运行以下命令安装项目: + +```shell +bash install_posetracker_linux.sh +``` + +最终的文件结构如下: + +```shell +|----mmdeploy-1.0.0-linux-x86_64-cxx11abi +| |----README.md +| |----rtmpose-ort +| | |----rtmdet-nano +| | |----rtmpose-m +| | |----000000147979.jpg +| | |----t2i-adapter_skeleton.txt +``` + +#### Step 2: 生成姿态骨架图片 + +运行以下命令生成姿态骨架图片: + +```shell +# 生成骨架图片 +bash mmpose_style_skeleton.sh \ + mmdeploy-1.0.0-linux-x86_64-cxx11abi/rtmpose-ort/000000147979.jpg +``` + +更多详细信息可以查看 [RTMPose](../rtmpose/README_CN.md)。 + +输入图片与生成骨架图片如下: + +
    + +
    + +### 使用 T2I-Adapter + +T2I- Adapter 在线试玩请点击 [这里](https://huggingface.co/spaces/Adapter/T2I-Adapter) + +[![Huggingface Gradio](https://img.shields.io/static/v1?label=Demo&message=Huggingface%20Gradio&color=orange)](https://huggingface.co/spaces/ChongMou/T2I-Adapter) + +
    + +
    + +## 结果展示 + +
    + +
    diff --git a/projects/mmpose4aigc/download_models.sh b/projects/mmpose4aigc/download_models.sh index c26a3c833f..febe43df16 100644 --- a/projects/mmpose4aigc/download_models.sh +++ b/projects/mmpose4aigc/download_models.sh @@ -1,20 +1,20 @@ -#!/bin/bash -# Copyright (c) OpenMMLab. All rights reserved. - -# Create models folder -mkdir models - -# Go to models folder -cd models - -# Download det model -wget https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_nano_8xb32-100e_coco-obj365-person-05d8511e.pth - -# Download pose model -wget https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.pth - -# Go back mmpose4aigc -cd .. - -# Success -echo "Download completed." +#!/bin/bash +# Copyright (c) OpenMMLab. All rights reserved. + +# Create models folder +mkdir models + +# Go to models folder +cd models + +# Download det model +wget https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_nano_8xb32-100e_coco-obj365-person-05d8511e.pth + +# Download pose model +wget https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.pth + +# Go back mmpose4aigc +cd .. + +# Success +echo "Download completed." diff --git a/projects/mmpose4aigc/install_posetracker_linux.sh b/projects/mmpose4aigc/install_posetracker_linux.sh index 09c91ce9d1..ff6171880a 100644 --- a/projects/mmpose4aigc/install_posetracker_linux.sh +++ b/projects/mmpose4aigc/install_posetracker_linux.sh @@ -1,30 +1,30 @@ -#!/bin/bash -# Copyright (c) OpenMMLab. All rights reserved. - -# Download pre-compiled files -wget https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0/mmdeploy-1.0.0-linux-x86_64-cxx11abi.tar.gz - -# Unzip files -tar -xzvf mmdeploy-1.0.0-linux-x86_64-cxx11abi.tar.gz - -# Go to the sdk folder -cd mmdeploy-1.0.0-linux-x86_64-cxx11abi - -# Init environment -source set_env.sh - -# If opencv 3+ is not installed on your system, execute the following command. -# If it is installed, skip this command -bash install_opencv.sh - -# Compile executable programs -bash build_sdk.sh - -# Download models -wget https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmpose-cpu.zip - -# Unzip files -unzip rtmpose-cpu.zip - -# Success -echo "Installation completed." +#!/bin/bash +# Copyright (c) OpenMMLab. All rights reserved. + +# Download pre-compiled files +wget https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0/mmdeploy-1.0.0-linux-x86_64-cxx11abi.tar.gz + +# Unzip files +tar -xzvf mmdeploy-1.0.0-linux-x86_64-cxx11abi.tar.gz + +# Go to the sdk folder +cd mmdeploy-1.0.0-linux-x86_64-cxx11abi + +# Init environment +source set_env.sh + +# If opencv 3+ is not installed on your system, execute the following command. +# If it is installed, skip this command +bash install_opencv.sh + +# Compile executable programs +bash build_sdk.sh + +# Download models +wget https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmpose-cpu.zip + +# Unzip files +unzip rtmpose-cpu.zip + +# Success +echo "Installation completed." diff --git a/projects/mmpose4aigc/mmpose_openpose.sh b/projects/mmpose4aigc/mmpose_openpose.sh index 1b011a6192..2e06197fc1 100644 --- a/projects/mmpose4aigc/mmpose_openpose.sh +++ b/projects/mmpose4aigc/mmpose_openpose.sh @@ -1,12 +1,12 @@ -#!/bin/bash -# Copyright (c) OpenMMLab. All rights reserved. - -INPUT_IMAGE=$1 - -python openpose_visualization.py \ - ../rtmpose/rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py \ - models/rtmdet_nano_8xb32-100e_coco-obj365-person-05d8511e.pth \ - ../rtmpose/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py \ - models/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.pth \ - --input $INPUT_IMAGE \ - --device cuda:0 \ +#!/bin/bash +# Copyright (c) OpenMMLab. All rights reserved. + +INPUT_IMAGE=$1 + +python openpose_visualization.py \ + ../rtmpose/rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py \ + models/rtmdet_nano_8xb32-100e_coco-obj365-person-05d8511e.pth \ + ../rtmpose/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py \ + models/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.pth \ + --input $INPUT_IMAGE \ + --device cuda:0 \ diff --git a/projects/mmpose4aigc/mmpose_style_skeleton.sh b/projects/mmpose4aigc/mmpose_style_skeleton.sh index afb03ecfc7..eaef23b867 100644 --- a/projects/mmpose4aigc/mmpose_style_skeleton.sh +++ b/projects/mmpose4aigc/mmpose_style_skeleton.sh @@ -1,17 +1,17 @@ -#!/bin/bash -# Copyright (c) OpenMMLab. All rights reserved. - -WORKSPACE=mmdeploy-1.0.0-linux-x86_64-cxx11abi -export LD_LIBRARY_PATH=${WORKSPACE}/lib:${WORKSPACE}/thirdparty/onnxruntime/lib:$LD_LIBRARY_PATH - -INPUT_IMAGE=$1 - -${WORKSPACE}/bin/pose_tracker \ - ${WORKSPACE}/rtmpose-ort/rtmdet-nano \ - ${WORKSPACE}/rtmpose-ort/rtmpose-m \ - $INPUT_IMAGE \ - --background black \ - --skeleton ${WORKSPACE}/rtmpose-ort/t2i-adapter_skeleton.txt \ - --output ./skeleton_res.jpg \ - --pose_kpt_thr 0.4 \ - --show -1 +#!/bin/bash +# Copyright (c) OpenMMLab. All rights reserved. + +WORKSPACE=mmdeploy-1.0.0-linux-x86_64-cxx11abi +export LD_LIBRARY_PATH=${WORKSPACE}/lib:${WORKSPACE}/thirdparty/onnxruntime/lib:$LD_LIBRARY_PATH + +INPUT_IMAGE=$1 + +${WORKSPACE}/bin/pose_tracker \ + ${WORKSPACE}/rtmpose-ort/rtmdet-nano \ + ${WORKSPACE}/rtmpose-ort/rtmpose-m \ + $INPUT_IMAGE \ + --background black \ + --skeleton ${WORKSPACE}/rtmpose-ort/t2i-adapter_skeleton.txt \ + --output ./skeleton_res.jpg \ + --pose_kpt_thr 0.4 \ + --show -1 diff --git a/projects/mmpose4aigc/openpose_visualization.py b/projects/mmpose4aigc/openpose_visualization.py index b634d07757..b128d3a648 100644 --- a/projects/mmpose4aigc/openpose_visualization.py +++ b/projects/mmpose4aigc/openpose_visualization.py @@ -1,174 +1,174 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import math -import mimetypes -import os -from argparse import ArgumentParser -from itertools import product - -import cv2 -import mmcv -import numpy as np -from mmengine.registry import init_default_scope - -from mmpose.apis import inference_topdown -from mmpose.apis import init_model as init_pose_estimator -from mmpose.evaluation.functional import nms -from mmpose.structures import merge_data_samples - -try: - from mmdet.apis import inference_detector, init_detector - has_mmdet = True -except (ImportError, ModuleNotFoundError): - has_mmdet = False - -# openpose format -limb_seq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10], - [10, 11], [2, 12], [12, 13], [13, 14], [2, 1], [1, 15], [15, 17], - [1, 16], [16, 18]] - -colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, - 0], [170, 255, 0], - [85, 255, 0], [0, 255, 0], - [0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], - [0, 85, 255], [0, 0, 255], [85, 0, 255], [170, 0, - 255], [255, 0, 255], - [255, 0, 170], [255, 0, 85]] - -stickwidth = 4 -num_openpose_kpt = 18 -num_link = len(limb_seq) - - -def mmpose_to_openpose_visualization(args, img_path, detector, pose_estimator): - """Visualize predicted keypoints of one image in openpose format.""" - - # predict bbox - scope = detector.cfg.get('default_scope', 'mmdet') - if scope is not None: - init_default_scope(scope) - det_result = inference_detector(detector, img_path) - pred_instance = det_result.pred_instances.cpu().numpy() - bboxes = np.concatenate( - (pred_instance.bboxes, pred_instance.scores[:, None]), axis=1) - bboxes = bboxes[np.logical_and(pred_instance.labels == args.det_cat_id, - pred_instance.scores > args.bbox_thr)] - bboxes = bboxes[nms(bboxes, args.nms_thr), :4] - - # predict keypoints - pose_results = inference_topdown(pose_estimator, img_path, bboxes) - data_samples = merge_data_samples(pose_results) - - # concatenate scores and keypoints - keypoints = np.concatenate( - (data_samples.pred_instances.keypoints, - data_samples.pred_instances.keypoint_scores.reshape(-1, 17, 1)), - axis=-1) - - # compute neck joint - neck = (keypoints[:, 5] + keypoints[:, 6]) / 2 - if keypoints[:, 5, 2] < args.kpt_thr or keypoints[:, 6, 2] < args.kpt_thr: - neck[:, 2] = 0 - - # 17 keypoints to 18 keypoints - new_keypoints = np.insert(keypoints[:, ], 17, neck, axis=1) - - # mmpose format to openpose format - openpose_idx = [15, 14, 17, 16, 2, 6, 3, 7, 4, 8, 12, 9, 13, 10, 1] - mmpose_idx = [1, 2, 3, 4, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17] - new_keypoints[:, openpose_idx, :] = new_keypoints[:, mmpose_idx, :] - - # show the results - img = mmcv.imread(img_path, channel_order='rgb') - - # black background - black_img = np.zeros_like(img) - - num_instance = new_keypoints.shape[0] - - # draw keypoints - for i, j in product(range(num_instance), range(num_openpose_kpt)): - x, y, conf = new_keypoints[i][j] - if conf > args.kpt_thr: - cv2.circle(black_img, (int(x), int(y)), 4, colors[j], thickness=-1) - - # draw links - cur_black_img = black_img.copy() - for i, link_idx in product(range(num_instance), range(num_link)): - conf = new_keypoints[i][np.array(limb_seq[link_idx]) - 1, 2] - if np.sum(conf > args.kpt_thr) == 2: - Y = new_keypoints[i][np.array(limb_seq[link_idx]) - 1, 0] - X = new_keypoints[i][np.array(limb_seq[link_idx]) - 1, 1] - mX = np.mean(X) - mY = np.mean(Y) - length = ((X[0] - X[1])**2 + (Y[0] - Y[1])**2)**0.5 - angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1])) - polygon = cv2.ellipse2Poly( - (int(mY), int(mX)), (int(length / 2), stickwidth), int(angle), - 0, 360, 1) - cv2.fillConvexPoly(cur_black_img, polygon, colors[link_idx]) - black_img = cv2.addWeighted(black_img, 0.4, cur_black_img, 0.6, 0) - - # save image - out_file = 'openpose_' + os.path.splitext( - os.path.basename(img_path))[0] + '.png' - cv2.imwrite(out_file, black_img[:, :, [2, 1, 0]]) - - -def main(): - """Visualize the demo images. - - Using mmdet to detect the human. - """ - parser = ArgumentParser() - parser.add_argument('det_config', help='Config file for detection') - parser.add_argument('det_checkpoint', help='Checkpoint file for detection') - parser.add_argument('pose_config', help='Config file for pose') - parser.add_argument('pose_checkpoint', help='Checkpoint file for pose') - parser.add_argument('--input', type=str, help='input Image file') - parser.add_argument( - '--device', default='cuda:0', help='Device used for inference') - parser.add_argument( - '--det-cat-id', - type=int, - default=0, - help='Category id for bounding box detection model') - parser.add_argument( - '--bbox-thr', - type=float, - default=0.4, - help='Bounding box score threshold') - parser.add_argument( - '--nms-thr', - type=float, - default=0.3, - help='IoU threshold for bounding box NMS') - parser.add_argument( - '--kpt-thr', type=float, default=0.4, help='Keypoint score threshold') - - assert has_mmdet, 'Please install mmdet to run the demo.' - - args = parser.parse_args() - - assert args.input != '' - assert args.det_config is not None - assert args.det_checkpoint is not None - - # build detector - detector = init_detector( - args.det_config, args.det_checkpoint, device=args.device) - - # build pose estimator - pose_estimator = init_pose_estimator( - args.pose_config, - args.pose_checkpoint, - device=args.device, - cfg_options=dict(model=dict(test_cfg=dict(output_heatmaps=False)))) - - input_type = mimetypes.guess_type(args.input)[0].split('/')[0] - if input_type == 'image': - mmpose_to_openpose_visualization(args, args.input, detector, - pose_estimator) - - -if __name__ == '__main__': - main() +# Copyright (c) OpenMMLab. All rights reserved. +import math +import mimetypes +import os +from argparse import ArgumentParser +from itertools import product + +import cv2 +import mmcv +import numpy as np +from mmengine.registry import init_default_scope + +from mmpose.apis import inference_topdown +from mmpose.apis import init_model as init_pose_estimator +from mmpose.evaluation.functional import nms +from mmpose.structures import merge_data_samples + +try: + from mmdet.apis import inference_detector, init_detector + has_mmdet = True +except (ImportError, ModuleNotFoundError): + has_mmdet = False + +# openpose format +limb_seq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10], + [10, 11], [2, 12], [12, 13], [13, 14], [2, 1], [1, 15], [15, 17], + [1, 16], [16, 18]] + +colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, + 0], [170, 255, 0], + [85, 255, 0], [0, 255, 0], + [0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], + [0, 85, 255], [0, 0, 255], [85, 0, 255], [170, 0, + 255], [255, 0, 255], + [255, 0, 170], [255, 0, 85]] + +stickwidth = 4 +num_openpose_kpt = 18 +num_link = len(limb_seq) + + +def mmpose_to_openpose_visualization(args, img_path, detector, pose_estimator): + """Visualize predicted keypoints of one image in openpose format.""" + + # predict bbox + scope = detector.cfg.get('default_scope', 'mmdet') + if scope is not None: + init_default_scope(scope) + det_result = inference_detector(detector, img_path) + pred_instance = det_result.pred_instances.cpu().numpy() + bboxes = np.concatenate( + (pred_instance.bboxes, pred_instance.scores[:, None]), axis=1) + bboxes = bboxes[np.logical_and(pred_instance.labels == args.det_cat_id, + pred_instance.scores > args.bbox_thr)] + bboxes = bboxes[nms(bboxes, args.nms_thr), :4] + + # predict keypoints + pose_results = inference_topdown(pose_estimator, img_path, bboxes) + data_samples = merge_data_samples(pose_results) + + # concatenate scores and keypoints + keypoints = np.concatenate( + (data_samples.pred_instances.keypoints, + data_samples.pred_instances.keypoint_scores.reshape(-1, 17, 1)), + axis=-1) + + # compute neck joint + neck = (keypoints[:, 5] + keypoints[:, 6]) / 2 + if keypoints[:, 5, 2] < args.kpt_thr or keypoints[:, 6, 2] < args.kpt_thr: + neck[:, 2] = 0 + + # 17 keypoints to 18 keypoints + new_keypoints = np.insert(keypoints[:, ], 17, neck, axis=1) + + # mmpose format to openpose format + openpose_idx = [15, 14, 17, 16, 2, 6, 3, 7, 4, 8, 12, 9, 13, 10, 1] + mmpose_idx = [1, 2, 3, 4, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17] + new_keypoints[:, openpose_idx, :] = new_keypoints[:, mmpose_idx, :] + + # show the results + img = mmcv.imread(img_path, channel_order='rgb') + + # black background + black_img = np.zeros_like(img) + + num_instance = new_keypoints.shape[0] + + # draw keypoints + for i, j in product(range(num_instance), range(num_openpose_kpt)): + x, y, conf = new_keypoints[i][j] + if conf > args.kpt_thr: + cv2.circle(black_img, (int(x), int(y)), 4, colors[j], thickness=-1) + + # draw links + cur_black_img = black_img.copy() + for i, link_idx in product(range(num_instance), range(num_link)): + conf = new_keypoints[i][np.array(limb_seq[link_idx]) - 1, 2] + if np.sum(conf > args.kpt_thr) == 2: + Y = new_keypoints[i][np.array(limb_seq[link_idx]) - 1, 0] + X = new_keypoints[i][np.array(limb_seq[link_idx]) - 1, 1] + mX = np.mean(X) + mY = np.mean(Y) + length = ((X[0] - X[1])**2 + (Y[0] - Y[1])**2)**0.5 + angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1])) + polygon = cv2.ellipse2Poly( + (int(mY), int(mX)), (int(length / 2), stickwidth), int(angle), + 0, 360, 1) + cv2.fillConvexPoly(cur_black_img, polygon, colors[link_idx]) + black_img = cv2.addWeighted(black_img, 0.4, cur_black_img, 0.6, 0) + + # save image + out_file = 'openpose_' + os.path.splitext( + os.path.basename(img_path))[0] + '.png' + cv2.imwrite(out_file, black_img[:, :, [2, 1, 0]]) + + +def main(): + """Visualize the demo images. + + Using mmdet to detect the human. + """ + parser = ArgumentParser() + parser.add_argument('det_config', help='Config file for detection') + parser.add_argument('det_checkpoint', help='Checkpoint file for detection') + parser.add_argument('pose_config', help='Config file for pose') + parser.add_argument('pose_checkpoint', help='Checkpoint file for pose') + parser.add_argument('--input', type=str, help='input Image file') + parser.add_argument( + '--device', default='cuda:0', help='Device used for inference') + parser.add_argument( + '--det-cat-id', + type=int, + default=0, + help='Category id for bounding box detection model') + parser.add_argument( + '--bbox-thr', + type=float, + default=0.4, + help='Bounding box score threshold') + parser.add_argument( + '--nms-thr', + type=float, + default=0.3, + help='IoU threshold for bounding box NMS') + parser.add_argument( + '--kpt-thr', type=float, default=0.4, help='Keypoint score threshold') + + assert has_mmdet, 'Please install mmdet to run the demo.' + + args = parser.parse_args() + + assert args.input != '' + assert args.det_config is not None + assert args.det_checkpoint is not None + + # build detector + detector = init_detector( + args.det_config, args.det_checkpoint, device=args.device) + + # build pose estimator + pose_estimator = init_pose_estimator( + args.pose_config, + args.pose_checkpoint, + device=args.device, + cfg_options=dict(model=dict(test_cfg=dict(output_heatmaps=False)))) + + input_type = mimetypes.guess_type(args.input)[0].split('/')[0] + if input_type == 'image': + mmpose_to_openpose_visualization(args, args.input, detector, + pose_estimator) + + +if __name__ == '__main__': + main() diff --git a/projects/rtmpose/README.md b/projects/rtmpose/README.md index dc5b0dbe23..c963e0e0df 100644 --- a/projects/rtmpose/README.md +++ b/projects/rtmpose/README.md @@ -1,1158 +1,1158 @@ -
    - -
    - -# RTMPose: Real-Time Multi-Person Pose Estimation toolkit based on MMPose - -> [RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose](https://arxiv.org/abs/2303.07399) - -
    - -English | [简体中文](README_CN.md) - -
    - -______________________________________________________________________ - -## Abstract - -Recent studies on 2D pose estimation have achieved excellent performance on public benchmarks, yet its application in the industrial community still suffers from heavy model parameters and high latency. -In order to bridge this gap, we empirically study five aspects that affect the performance of multi-person pose estimation algorithms: paradigm, backbone network, localization algorithm, training strategy, and deployment inference, and present a high-performance real-time multi-person pose estimation framework, **RTMPose**, based on MMPose. -Our RTMPose-m achieves **75.8% AP** on COCO with **90+ FPS** on an Intel i7-11700 CPU and **430+ FPS** on an NVIDIA GTX 1660 Ti GPU. -To further evaluate RTMPose's capability in critical real-time applications, we also report the performance after deploying on the mobile device. Our RTMPose-s achieves **72.2% AP** on COCO with **70+ FPS** on a Snapdragon 865 chip, outperforming existing open-source libraries. -With the help of MMDeploy, our project supports various platforms like CPU, GPU, NVIDIA Jetson, and mobile devices and multiple inference backends such as ONNXRuntime, TensorRT, ncnn, etc. - -![rtmpose_intro](https://user-images.githubusercontent.com/13503330/219269619-935499e5-bdd9-49ea-8104-3c7796dbd862.png) - -______________________________________________________________________ - -## 📄 Table of Contents - -- [🥳 🚀 What's New](#--whats-new-) -- [📖 Introduction](#-introduction-) -- [🙌 Community](#-community-) -- [⚡ Pipeline Performance](#-pipeline-performance-) -- [📊 Model Zoo](#-model-zoo-) -- [👀 Visualization](#-visualization-) -- [😎 Get Started](#-get-started-) -- [👨‍🏫 How to Train](#-how-to-train-) -- [🏗️ How to Deploy](#️-how-to-deploy-) -- [📚 Common Usage](#️-common-usage-) - - [🚀 Inference Speed Test](#-inference-speed-test-) - - [📊 Model Test](#-model-test-) -- [📜 Citation](#-citation-) - -## 🥳 🚀 What's New [🔝](#-table-of-contents) - -- Jun. 2023: - - Release 26-keypoint Body models trained on combined datasets. -- May. 2023: - - Add [code examples](./examples/) of RTMPose. - - Release Hand, Face, Body models trained on combined datasets. -- Mar. 2023: RTMPose is released. RTMPose-m runs at 430+ FPS and achieves 75.8 mAP on COCO val set. - -## 📖 Introduction [🔝](#-table-of-contents) - -
    - -
    - -
    - -
    -
    - -
    - -### ✨ Major Features - -- 🚀 **High efficiency and high accuracy** - - | Model | AP(COCO) | CPU-FPS | GPU-FPS | - | :---: | :------: | :-----: | :-----: | - | t | 68.5 | 300+ | 940+ | - | s | 72.2 | 200+ | 710+ | - | m | 75.8 | 90+ | 430+ | - | l | 76.5 | 50+ | 280+ | - -- 🛠️ **Easy to deploy** - - - Step-by-step deployment tutorials. - - Support various backends including - - ONNX - - TensorRT - - ncnn - - OpenVINO - - etc. - - Support various platforms including - - Linux - - Windows - - NVIDIA Jetson - - ARM - - etc. - -- 🏗️ **Design for practical applications** - - - Pipeline inference API and SDK for - - Python - - C++ - - C# - - JAVA - - etc. - -## 🙌 Community [🔝](#-table-of-contents) - -RTMPose is a long-term project dedicated to the training, optimization and deployment of high-performance real-time pose estimation algorithms in practical scenarios, so we are looking forward to the power from the community. Welcome to share the training configurations and tricks based on RTMPose in different business applications to help more community users! - -✨ ✨ ✨ - -- **If you are a new user of RTMPose, we eagerly hope you can fill out this [Google Questionnaire](https://docs.google.com/forms/d/e/1FAIpQLSfzwWr3eNlDzhU98qzk2Eph44Zio6hi5r0iSwfO9wSARkHdWg/viewform?usp=sf_link)/[Chinese version](https://uua478.fanqier.cn/f/xxmynrki), it's very important for our work!** - -✨ ✨ ✨ - -Feel free to join our community group for more help: - -- WeChat Group: - -
    - -
    - -- Discord Group: - - 🙌 https://discord.gg/raweFPmdzG 🙌 - -## ⚡ Pipeline Performance [🔝](#-table-of-contents) - -**Notes** - -- Pipeline latency is tested under skip-frame settings, the detection interval is 5 frames by defaults. -- Flip test is NOT used. -- Env Setup: - - torch >= 1.7.1 - - onnxruntime 1.12.1 - - TensorRT 8.4.3.1 - - ncnn 20221128 - - cuDNN 8.3.2 - - CUDA 11.3 - -| Detection Config | Pose Config | Input Size
    (Det/Pose) | Model AP
    (COCO) | Pipeline AP
    (COCO) | Params (M)
    (Det/Pose) | Flops (G)
    (Det/Pose) | ORT-Latency(ms)
    (i7-11700) | TRT-FP16-Latency(ms)
    (GTX 1660Ti) | Download | -| :------------------------------------------------------------------ | :---------------------------------------------------------------------------- | :---------------------------: | :---------------------: | :------------------------: | :---------------------------: | :--------------------------: | :--------------------------------: | :---------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| [RTMDet-nano](./rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py) | [RTMPose-t](./rtmpose/body_2d_keypoint/rtmpose-t_8xb256-420e_coco-256x192.py) | 320x320
    256x192 | 40.3
    67.1 | 64.4 | 0.99
    3.34 | 0.31
    0.36 | 12.403 | 2.467 | [det](https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_nano_8xb32-100e_coco-obj365-person-05d8511e.pth)
    [pose](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-tiny_simcc-aic-coco_pt-aic-coco_420e-256x192-cfc8f33d_20230126.pth) | -| [RTMDet-nano](./rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py) | [RTMPose-s](./rtmpose/body_2d_keypoint/rtmpose-s_8xb256-420e_coco-256x192.py) | 320x320
    256x192 | 40.3
    71.1 | 68.5 | 0.99
    5.47 | 0.31
    0.68 | 16.658 | 2.730 | [det](https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_nano_8xb32-100e_coco-obj365-person-05d8511e.pth)
    [pose](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-aic-coco_pt-aic-coco_420e-256x192-fcb2599b_20230126.pth) | -| [RTMDet-nano](./rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py) | [RTMPose-m](./rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py) | 320x320
    256x192 | 40.3
    75.3 | 73.2 | 0.99
    13.59 | 0.31
    1.93 | 26.613 | 4.312 | [det](https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_nano_8xb32-100e_coco-obj365-person-05d8511e.pth)
    [pose](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.pth) | -| [RTMDet-nano](./rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py) | [RTMPose-l](./rtmpose/body_2d_keypoint/rtmpose-l_8xb256-420e_coco-256x192.py) | 320x320
    256x192 | 40.3
    76.3 | 74.2 | 0.99
    27.66 | 0.31
    4.16 | 36.311 | 4.644 | [det](https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_nano_8xb32-100e_coco-obj365-person-05d8511e.pth)
    [pose](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-aic-coco_pt-aic-coco_420e-256x192-f016ffe0_20230126.pth) | -| [RTMDet-m](./rtmdet/person/rtmdet_m_640-8xb32_coco-person.py) | [RTMPose-m](./rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py) | 640x640
    256x192 | 62.5
    75.3 | 75.7 | 24.66
    13.59 | 38.95
    1.93 | - | 6.923 | [det](https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_m_8xb32-100e_coco-obj365-person-235e8209.pth)
    [pose](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.pth) | -| [RTMDet-m](./rtmdet/person/rtmdet_m_640-8xb32_coco-person.py) | [RTMPose-l](./rtmpose/body_2d_keypoint/rtmpose-l_8xb256-420e_coco-256x192.py) | 640x640
    256x192 | 62.5
    76.3 | 76.6 | 24.66
    27.66 | 38.95
    4.16 | - | 7.204 | [det](https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_m_8xb32-100e_coco-obj365-person-235e8209.pth)
    [pose](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-aic-coco_pt-aic-coco_420e-256x192-f016ffe0_20230126.pth) | - -## 📊 Model Zoo [🔝](#-table-of-contents) - -**Notes** - -- Since all models are trained on multi-domain combined datasets for practical applications, results are **not** suitable for academic comparison. -- More results of RTMPose on public benchmarks can refer to [Model Zoo](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/algorithms.html) -- Flip test is used. -- Inference speed measured on more hardware platforms can refer to [Benchmark](./benchmark/README.md) -- If you have datasets you would like us to support, feel free to [contact us](https://docs.google.com/forms/d/e/1FAIpQLSfzwWr3eNlDzhU98qzk2Eph44Zio6hi5r0iSwfO9wSARkHdWg/viewform?usp=sf_link)/[联系我们](https://uua478.fanqier.cn/f/xxmynrki). - -### Body 2d - -#### 17 Keypoints - -- Keypoints are defined as [COCO](http://cocodataset.org/). For details please refer to the [meta info](/configs/_base_/datasets/coco.py). -- - -
    -AIC+COCO - -| Config | Input Size | AP
    (COCO) | PCK@0.1
    (Body8) | AUC
    (Body8) | Params
    (M) | FLOPS
    (G) | ORT-Latency
    (ms)
    (i7-11700) | TRT-FP16-Latency
    (ms)
    (GTX 1660Ti) | ncnn-FP16-Latency
    (ms)
    (Snapdragon 865) | Download | -| :---------------------------------------------------------------------------: | :--------: | :---------------: | :---------------------: | :-----------------: | :----------------: | :---------------: | :-----------------------------------------: | :------------------------------------------------: | :-----------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------: | -| [RTMPose-t](./rtmpose/body_2d_keypoint/rtmpose-t_8xb256-420e_coco-256x192.py) | 256x192 | 68.5 | 91.28 | 63.38 | 3.34 | 0.36 | 3.20 | 1.06 | 9.02 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-tiny_simcc-aic-coco_pt-aic-coco_420e-256x192-cfc8f33d_20230126.pth) | -| [RTMPose-s](./rtmpose/body_2d_keypoint/rtmpose-s_8xb256-420e_coco-256x192.py) | 256x192 | 72.2 | 92.95 | 66.19 | 5.47 | 0.68 | 4.48 | 1.39 | 13.89 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-aic-coco_pt-aic-coco_420e-256x192-fcb2599b_20230126.pth) | -| [RTMPose-m](./rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py) | 256x192 | 75.8 | 94.13 | 68.53 | 13.59 | 1.93 | 11.06 | 2.29 | 26.44 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.pth) | -| [RTMPose-l](./rtmpose/body_2d_keypoint/rtmpose-l_8xb256-420e_coco-256x192.py) | 256x192 | 76.5 | 94.35 | 68.98 | 27.66 | 4.16 | 18.85 | 3.46 | 45.37 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-aic-coco_pt-aic-coco_420e-256x192-f016ffe0_20230126.pth) | -| [RTMPose-m](./rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-384x288.py) | 384x288 | 77.0 | 94.32 | 69.85 | 13.72 | 4.33 | 24.78 | 3.66 | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-384x288-a62a0b32_20230228.pth) | -| [RTMPose-l](./rtmpose/body_2d_keypoint/rtmpose-l_8xb256-420e_coco-384x288.py) | 384x288 | 77.3 | 94.54 | 70.14 | 27.79 | 9.35 | - | 6.05 | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-aic-coco_pt-aic-coco_420e-384x288-97d6cb0f_20230228.pth) | - -
    - -
    -Body8 - -- `*` denotes model trained on 7 public datasets: - - [AI Challenger](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#aic) - - [MS COCO](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#coco) - - [CrowdPose](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#crowdpose) - - [MPII](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#mpii) - - [sub-JHMDB](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#sub-jhmdb-dataset) - - [Halpe](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_wholebody_keypoint.html#halpe) - - [PoseTrack18](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#posetrack18) -- `Body8` denotes the addition of the [OCHuman](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#ochuman) dataset, in addition to the 7 datasets mentioned above, for evaluation. - -| Config | Input Size | AP
    (COCO) | PCK@0.1
    (Body8) | AUC
    (Body8) | Params
    (M) | FLOPS
    (G) | ORT-Latency
    (ms)
    (i7-11700) | TRT-FP16-Latency
    (ms)
    (GTX 1660Ti) | ncnn-FP16-Latency
    (ms)
    (Snapdragon 865) | Download | -| :-----------------------------------------------------------------------------: | :--------: | :---------------: | :---------------------: | :-----------------: | :----------------: | :---------------: | :-----------------------------------------: | :------------------------------------------------: | :-----------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------: | -| [RTMPose-t\*](./rtmpose/body_2d_keypoint/rtmpose-t_8xb256-420e_coco-256x192.py) | 256x192 | 65.9 | 91.44 | 63.18 | 3.34 | 0.36 | 3.20 | 1.06 | 9.02 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-t_simcc-body7_pt-body7_420e-256x192-026a1439_20230504.pth) | -| [RTMPose-s\*](./rtmpose/body_2d_keypoint/rtmpose-s_8xb256-420e_coco-256x192.py) | 256x192 | 69.7 | 92.45 | 65.15 | 5.47 | 0.68 | 4.48 | 1.39 | 13.89 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-body7_pt-body7_420e-256x192-acd4a1ef_20230504.pth) | -| [RTMPose-m\*](./rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py) | 256x192 | 74.9 | 94.25 | 68.59 | 13.59 | 1.93 | 11.06 | 2.29 | 26.44 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-body7_pt-body7_420e-256x192-e48f03d0_20230504.pth) | -| [RTMPose-l\*](./rtmpose/body_2d_keypoint/rtmpose-l_8xb256-420e_coco-256x192.py) | 256x192 | 76.7 | 95.08 | 70.14 | 27.66 | 4.16 | 18.85 | 3.46 | 45.37 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-body7_pt-body7_420e-256x192-4dba18fc_20230504.pth) | -| [RTMPose-m\*](./rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-384x288.py) | 384x288 | 76.6 | 94.64 | 70.38 | 13.72 | 4.33 | 24.78 | 3.66 | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-body7_pt-body7_420e-384x288-65e718c4_20230504.pth) | -| [RTMPose-l\*](./rtmpose/body_2d_keypoint/rtmpose-l_8xb256-420e_coco-384x288.py) | 384x288 | 78.3 | 95.36 | 71.58 | 27.79 | 9.35 | - | 6.05 | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-body7_pt-body7_420e-384x288-3f5a1437_20230504.pth) | -| [RTMPose-x\*](./rtmpose/body_2d_keypoint/rtmpose-x_8xb256-700e_coco-384x288.py) | 384x288 | 78.8 | - | - | 49.43 | 17.22 | - | - | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-x_simcc-body7_pt-body7_700e-384x288-71d7b7e9_20230629.pth) | - -
    - -#### 26 Keypoints - -- Keypoints are defined as [Halpe26](https://github.com/Fang-Haoshu/Halpe-FullBody/). For details please refer to the [meta info](/configs/_base_/datasets/halpe26.py). -- -- Models are trained and evaluated on `Body8`. - -| Config | Input Size | PCK@0.1
    (Body8) | AUC
    (Body8) | Params(M) | FLOPS(G) | ORT-Latency
    (ms)
    (i7-11700) | TRT-FP16-Latency
    (ms)
    (GTX 1660Ti) | ncnn-FP16-Latency
    (ms)
    (Snapdragon 865) | Download | -| :---------------------------------------------------------------------------------------: | :--------: | :---------------------: | :-----------------: | :-------: | :------: | :-----------------------------------------: | :------------------------------------------------: | :-----------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------: | -| [RTMPose-t\*](./rtmpose/body_2d_keypoint/rtmpose-t_8xb1024-700e_body8-halpe26-256x192.py) | 256x192 | 91.89 | 66.35 | 3.51 | 0.37 | - | - | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-t_simcc-body7_pt-body7-halpe26_700e-256x192-6020f8a6_20230605.pth) | -| [RTMPose-s\*](./rtmpose/body_2d_keypoint/rtmpose-s_8xb1024-700e_body8-halpe26-256x192.py) | 256x192 | 93.01 | 68.62 | 5.70 | 0.70 | - | - | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-body7_pt-body7-halpe26_700e-256x192-7f134165_20230605.pth) | -| [RTMPose-m\*](./rtmpose/body_2d_keypoint/rtmpose-m_8xb512-700e_body8-halpe26-256x192.py) | 256x192 | 94.75 | 71.91 | 13.93 | 1.95 | - | - | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-body7_pt-body7-halpe26_700e-256x192-4d3e73dd_20230605.pth) | -| [RTMPose-l\*](./rtmpose/body_2d_keypoint/rtmpose-l_8xb512-700e_body8-halpe26-256x192.py) | 256x192 | 95.37 | 73.19 | 28.11 | 4.19 | - | - | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-body7_pt-body7-halpe26_700e-256x192-2abb7558_20230605.pth) | -| [RTMPose-m\*](./rtmpose/body_2d_keypoint/rtmpose-m_8xb512-700e_body8-halpe26-384x288.py) | 384x288 | 95.15 | 73.56 | 14.06 | 4.37 | - | - | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-body7_pt-body7-halpe26_700e-384x288-89e6428b_20230605.pth) | -| [RTMPose-l\*](./rtmpose/body_2d_keypoint/rtmpose-l_8xb512-700e_body8-halpe26-384x288.py) | 384x288 | 95.56 | 74.38 | 28.24 | 9.40 | - | - | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-body7_pt-body7-halpe26_700e-384x288-734182ce_20230605.pth) | -| [RTMPose-x\*](./rtmpose/body_2d_keypoint/rtmpose-x_8xb256-700e_body8-halpe26-384x288.py) | 384x288 | 95.74 | 74.82 | 50.00 | 17.29 | - | - | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-x_simcc-body7_pt-body7-halpe26_700e-384x288-7fb6e239_20230606.pth) | - -#### Model Pruning - -**Notes** - -- Model pruning is supported by [MMRazor](https://github.com/open-mmlab/mmrazor) - -| Config | Input Size | AP
    (COCO) | Params
    (M) | FLOPS
    (G) | ORT-Latency
    (ms)
    (i7-11700) | TRT-FP16-Latency
    (ms)
    (GTX 1660Ti) | ncnn-FP16-Latency
    (ms)
    (Snapdragon 865) | Download | -| :-----------------------: | :--------: | :---------------: | :----------------: | :---------------: | :-----------------------------------------: | :------------------------------------------------: | :-----------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: | -| RTMPose-s-aic-coco-pruned | 256x192 | 69.4 | 3.43 | 0.35 | - | - | - | [Model](https://download.openmmlab.com/mmrazor/v1/pruning/group_fisher/rtmpose-s/group_fisher_finetune_rtmpose-s_8xb256-420e_aic-coco-256x192.pth) | - -For more details, please refer to [GroupFisher Pruning for RTMPose](./rtmpose/pruning/README.md). - -### WholeBody 2d (133 Keypoints) - -- Keypoints are defined as [COCO-WholeBody](https://github.com/jin-s13/COCO-WholeBody/). For details please refer to the [meta info](/configs/_base_/datasets/coco_wholebody.py). -- - -| Config | Input Size | Whole AP | Whole AR | FLOPS
    (G) | ORT-Latency
    (ms)
    (i7-11700) | TRT-FP16-Latency
    (ms)
    (GTX 1660Ti) | Download | -| :------------------------------ | :--------: | :------: | :------: | :---------------: | :-----------------------------------------: | :------------------------------------------------: | :-------------------------------: | -| [RTMPose-m](./rtmpose/wholebody_2d_keypoint/rtmpose-m_8xb64-270e_coco-wholebody-256x192.py) | 256x192 | 58.2 | 67.4 | 2.22 | 13.50 | 4.00 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco-wholebody_pt-aic-coco_270e-256x192-cd5e845c_20230123.pth) | -| [RTMPose-l](./rtmpose/wholebody_2d_keypoint/rtmpose-l_8xb64-270e_coco-wholebody-256x192.py) | 256x192 | 61.1 | 70.0 | 4.52 | 23.41 | 5.67 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-coco-wholebody_pt-aic-coco_270e-256x192-6f206314_20230124.pth) | -| [RTMPose-l](./rtmpose/wholebody_2d_keypoint/rtmpose-l_8xb32-270e_coco-wholebody-384x288.py) | 384x288 | 64.8 | 73.0 | 10.07 | 44.58 | 7.68 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-coco-wholebody_pt-aic-coco_270e-384x288-eaeb96c8_20230125.pth) | -| [RTMPose-x](./rtmpose/wholebody_2d_keypoint/rtmpose-x_8xb32-270e_coco-wholebody-384x288.py) | 384x288 | 65.3 | 73.3 | 18.1 | - | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-x_simcc-coco-wholebody_pt-body7_270e-384x288-401dfc90_20230629.pth) | - -### Animal 2d (17 Keypoints) - -- Keypoints are defined as [AP-10K](https://github.com/AlexTheBad/AP-10K/). For details please refer to the [meta info](/configs/_base_/datasets/ap10k.py). -- - -| Config | Input Size | AP
    (AP10K) | FLOPS
    (G) | ORT-Latency
    (ms)
    (i7-11700) | TRT-FP16-Latency
    (ms)
    (GTX 1660Ti) | Download | -| :----------------------------: | :--------: | :----------------: | :---------------: | :-----------------------------------------: | :------------------------------------------------: | :------------------------------: | -| [RTMPose-m](./rtmpose/animal_2d_keypoint/rtmpose-m_8xb64-210e_ap10k-256x256.py) | 256x256 | 72.2 | 2.57 | 14.157 | 2.404 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-ap10k_pt-aic-coco_210e-256x256-7a041aa1_20230206.pth) | - -### Face 2d (106 Keypoints) - -- Keypoints are defined as [LaPa](https://github.com/JDAI-CV/lapa-dataset). For details please refer to the [meta info](/configs/_base_/datasets/lapa.py). -- - -
    -Face6 - -- `Face6` and `*` denote model trained on 6 public datasets: - - [COCO-Wholebody-Face](https://github.com/jin-s13/COCO-WholeBody/) - - [WFLW](https://wywu.github.io/projects/LAB/WFLW.html) - - [300W](https://ibug.doc.ic.ac.uk/resources/300-W/) - - [COFW](http://www.vision.caltech.edu/xpburgos/ICCV13/) - - [Halpe](https://github.com/Fang-Haoshu/Halpe-FullBody/) - - [LaPa](https://github.com/JDAI-CV/lapa-dataset) - -| Config | Input Size | NME
    (LaPa) | FLOPS
    (G) | ORT-Latency
    (ms)
    (i7-11700) | TRT-FP16-Latency
    (ms)
    (GTX 1660Ti) | Download | -| :----------------------------: | :--------: | :----------------: | :---------------: | :-----------------------------------------: | :------------------------------------------------: | :------------------------------: | -| [RTMPose-t\*](./rtmpose/face_2d_keypoint/rtmpose-t_8xb256-120e_lapa-256x256.py) | 256x256 | 1.67 | 0.652 | - | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-t_simcc-face6_pt-in1k_120e-256x256-df79d9a5_20230529.pth) | -| [RTMPose-s\*](./rtmpose/face_2d_keypoint/rtmpose-s_8xb256-120e_lapa-256x256.py) | 256x256 | 1.59 | 1.119 | - | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-face6_pt-in1k_120e-256x256-d779fdef_20230529.pth) | -| [RTMPose-m\*](./rtmpose/face_2d_keypoint/rtmpose-m_8xb256-120e_lapa-256x256.py) | 256x256 | 1.44 | 2.852 | - | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-face6_pt-in1k_120e-256x256-72a37400_20230529.pth) | - -
    - -### Hand 2d (21 Keypoints) - -- Keypoints are defined as [COCO-WholeBody](https://github.com/jin-s13/COCO-WholeBody/). For details please refer to the [meta info](/configs/_base_/datasets/coco_wholebody_hand.py). -- - -| Detection Config | Input Size | Model AP
    (OneHand10K) | Flops
    (G) | ORT-Latency
    (ms)
    (i7-11700) | TRT-FP16-Latency
    (ms)
    (GTX 1660Ti) | Download | -| :---------------------------: | :--------: | :---------------------------: | :---------------: | :-----------------------------------------: | :------------------------------------------------: | :--------------------: | -| [RTMDet-nano
    (alpha version)](./rtmdet/hand/rtmdet_nano_320-8xb32_hand.py) | 320x320 | 76.0 | 0.31 | - | - | [Det Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmdet_nano_8xb32-300e_hand-267f9c8f.pth) | - -
    -Hand5 - -- `Hand5` and `*` denote model trained on 5 public datasets: - - [COCO-Wholebody-Hand](https://github.com/jin-s13/COCO-WholeBody/) - - [OneHand10K](https://www.yangangwang.com/papers/WANG-MCC-2018-10.html) - - [FreiHand2d](https://lmb.informatik.uni-freiburg.de/projects/freihand/) - - [RHD2d](https://lmb.informatik.uni-freiburg.de/resources/datasets/RenderedHandposeDataset.en.html) - - [Halpe](https://github.com/Fang-Haoshu/Halpe-FullBody/) - -| Config | Input Size | PCK@0.2
    (COCO-Wholebody-Hand) | PCK@0.2
    (Hand5) | AUC
    (Hand5) | FLOPS
    (G) | ORT-Latency
    (ms)
    (i7-11700) | TRT-FP16-Latency
    (ms)
    (GTX 1660Ti) | Download | -| :-------------------------------------------------------------------------------------------------------------------: | :--------: | :-----------------------------------: | :---------------------: | :-----------------: | :---------------: | :-----------------------------------------: | :------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------: | -| [RTMPose-m\*
    (alpha version)](./rtmpose/hand_2d_keypoint/rtmpose-m_8xb32-210e_coco-wholebody-hand-256x256.py) | 256x256 | 81.5 | 96.4 | 83.9 | 2.581 | - | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-hand5_pt-aic-coco_210e-256x256-74fb594_20230320.pth) | - -
    - -### Pretrained Models - -We provide the UDP pretraining configs of the CSPNeXt backbone. Find more details in the [pretrain_cspnext_udp folder](./rtmpose/pretrain_cspnext_udp/). - -
    -AIC+COCO - -| Model | Input Size | Params
    (M) | Flops
    (G) | AP
    (GT) | AR
    (GT) | Download | -| :----------: | :--------: | :----------------: | :---------------: | :-------------: | :-------------: | :---------------------------------------------------------------------------------------------------------------: | -| CSPNeXt-tiny | 256x192 | 6.03 | 1.43 | 65.5 | 68.9 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmpose/cspnext-tiny_udp-aic-coco_210e-256x192-cbed682d_20230130.pth) | -| CSPNeXt-s | 256x192 | 8.58 | 1.78 | 70.0 | 73.3 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmpose/cspnext-s_udp-aic-coco_210e-256x192-92f5a029_20230130.pth) | -| CSPNeXt-m | 256x192 | 17.53 | 3.05 | 74.8 | 77.7 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmpose/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth) | -| CSPNeXt-l | 256x192 | 32.44 | 5.32 | 77.2 | 79.9 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmpose/cspnext-l_udp-aic-coco_210e-256x192-273b7631_20230130.pth) | - -
    - -
    -Body8 - -- `*` denotes model trained on 7 public datasets: - - [AI Challenger](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#aic) - - [MS COCO](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#coco) - - [CrowdPose](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#crowdpose) - - [MPII](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#mpii) - - [sub-JHMDB](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#sub-jhmdb-dataset) - - [Halpe](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_wholebody_keypoint.html#halpe) - - [PoseTrack18](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#posetrack18) -- `Body8` denotes the addition of the [OCHuman](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#ochuman) dataset, in addition to the 7 datasets mentioned above, for evaluation. - -| Model | Input Size | Params
    (M) | Flops
    (G) | AP
    (COCO) | PCK@0.2
    (Body8) | AUC
    (Body8) | Download | -| :------------: | :--------: | :----------------: | :---------------: | :---------------: | :---------------------: | :-----------------: | :--------------------------------------------------------------------------------: | -| CSPNeXt-tiny\* | 256x192 | 6.03 | 1.43 | 65.9 | 96.34 | 63.80 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-tiny_udp-body7_210e-256x192-a3775292_20230504.pth) | -| CSPNeXt-s\* | 256x192 | 8.58 | 1.78 | 68.7 | 96.59 | 64.92 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-s_udp-body7_210e-256x192-8c9ccbdb_20230504.pth) | -| CSPNeXt-m\* | 256x192 | 17.53 | 3.05 | 73.7 | 97.42 | 68.19 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-body7_210e-256x192-e0c9327b_20230504.pth) | -| CSPNeXt-l\* | 256x192 | 32.44 | 5.32 | 75.7 | 97.76 | 69.57 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-l_udp-body7_210e-256x192-5e9558ef_20230504.pth) | -| CSPNeXt-m\* | 384x288 | 17.53 | 6.86 | 75.8 | 97.60 | 70.18 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-body7_210e-384x288-b9bc2b57_20230504.pth) | -| CSPNeXt-l\* | 384x288 | 32.44 | 11.96 | 77.2 | 97.89 | 71.23 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-l_udp-body7_210e-384x288-b15bc30d_20230504.pth) | -| CSPNeXt-x\* | 384x288 | 54.92 | 19.96 | 78.1 | 98.00 | 71.79 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-x_udp-body7_210e-384x288-d28b58e6_20230529.pth) | - -
    - -#### ImageNet - -We also provide the ImageNet classification pre-trained weights of the CSPNeXt backbone. Find more details in [RTMDet](https://github.com/open-mmlab/mmdetection/blob/latest/configs/rtmdet/README.md#classification). - -| Model | Input Size | Params
    (M) | Flops
    (G) | Top-1 (%) | Top-5 (%) | Download | -| :----------: | :--------: | :----------------: | :---------------: | :-------: | :-------: | :---------------------------------------------------------------------------------------------------------------------------: | -| CSPNeXt-tiny | 224x224 | 2.73 | 0.34 | 69.44 | 89.45 | [Model](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-tiny_imagenet_600e-3a2dd350.pth) | -| CSPNeXt-s | 224x224 | 4.89 | 0.66 | 74.41 | 92.23 | [Model](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-s_imagenet_600e-ea671761.pth) | -| CSPNeXt-m | 224x224 | 13.05 | 1.93 | 79.27 | 94.79 | [Model](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-m_8xb256-rsb-a1-600e_in1k-ecb3bbd9.pth) | -| CSPNeXt-l | 224x224 | 27.16 | 4.19 | 81.30 | 95.62 | [Model](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-l_8xb256-rsb-a1-600e_in1k-6a760974.pth) | -| CSPNeXt-x | 224x224 | 48.85 | 7.76 | 82.10 | 95.69 | [Model](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-x_8xb256-rsb-a1-600e_in1k-b3f78edd.pth) | - -## 👀 Visualization [🔝](#-table-of-contents) - -
    - - -
    - -## 😎 Get Started [🔝](#-table-of-contents) - -We provide two appoaches to try RTMPose: - -- MMPose demo scripts -- Pre-compiled MMDeploy SDK (Recommend, 6-10 times faster) - -### MMPose demo scripts - -MMPose provides demo scripts to conduct [inference with existing models](https://mmpose.readthedocs.io/en/latest/user_guides/inference.html). - -**Note:** - -- Inferencing with Pytorch can not reach the maximum speed of RTMPose, just for verification. -- Model file can be either a local path or a download link - -```shell -# go to the mmpose folder -cd ${PATH_TO_MMPOSE} - -# inference with rtmdet -python demo/topdown_demo_with_mmdet.py \ - projects/rtmpose/rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py \ - https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_nano_8xb32-100e_coco-obj365-person-05d8511e.pth \ - projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py \ - https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.pth \ - --input {YOUR_TEST_IMG_or_VIDEO} \ - --show - -# inference with webcam -python demo/topdown_demo_with_mmdet.py \ - projects/rtmpose/rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py \ - https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_nano_8xb32-100e_coco-obj365-person-05d8511e.pth \ - projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py \ - https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.pth \ - --input webcam \ - --show -``` - -Result is as follows: - -![topdown_inference_with_rtmdet](https://user-images.githubusercontent.com/13503330/220005020-06bdf37f-6817-4681-a2c8-9dd55e4fbf1e.png) - -### Pre-compiled MMDeploy SDK (Recommended) - -MMDeploy provides a precompiled SDK for Pipeline reasoning on RTMPose projects, where the model used for reasoning is the SDK version. - -- All models must by exported by `tools/deploy.py` before PoseTracker can be used for inference. -- For the tutorial of exporting the SDK version model, see [SDK Reasoning](#%EF%B8%8F-step3-inference-with-sdk), and for detailed parameter settings of inference, see [Pipeline Reasoning](#-step4-pipeline-inference). -- Exported SDK models (ONNX, TRT, ncnn, etc.) can be downloaded from [OpenMMLab Deploee](https://platform.openmmlab.com/deploee). -- You can also convert `.pth` models into SDK [online](https://platform.openmmlab.com/deploee/task-convert-list). - -#### Linux - -Env Requirements: - -- GCC >= 7.5 -- cmake >= 3.20 - -##### Python Inference - -1. Install mmdeploy_runtime or mmdeploy_runtime_gpu - -```shell -# for onnxruntime -pip install mmdeploy-runtime - -# for onnxruntime-gpu / tensorrt -pip install mmdeploy-runtime-gpu -``` - -2. Download Pre-compiled files. - -```shell -# onnxruntime -# for ubuntu -wget -c https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0/mmdeploy-1.0.0-linux-x86_64-cxx11abi.tar.gz -# unzip then add third party runtime libraries to the PATH - -# for centos7 and lower -wget -c https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0/mmdeploy-1.0.0-linux-x86_64.tar.gz -# unzip then add third party runtime libraries to the PATH - -# onnxruntime-gpu / tensorrt -# for ubuntu -wget -c https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0/mmdeploy-1.0.0-linux-x86_64-cxx11abi-cuda11.3.tar.gz -# unzip then add third party runtime libraries to the PATH - -# for centos7 and lower -wget -c https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0/mmdeploy-1.0.0-linux-x86_64-cuda11.3.tar.gz -# unzip then add third party runtime libraries to the PATH -``` - -3. Download the sdk models and unzip to `./example/python`. (If you need other models, please export sdk models refer to [SDK Reasoning](#%EF%B8%8F-step3-inference-with-sdk)) - -```shell -# rtmdet-nano + rtmpose-m for cpu sdk -wget https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmpose-cpu.zip - -unzip rtmpose-cpu.zip -``` - -4. Inference with `pose_tracker.py`: - -```shell -# go to ./example/python - -# Please pass the folder of the model, not the model file -# Format: -# python pose_tracker.py cpu {det work-dir} {pose work-dir} {your_video.mp4} - -# Example: -python pose_tracker.py cpu rtmpose-ort/rtmdet-nano/ rtmpose-ort/rtmpose-m/ your_video.mp4 - -# webcam -python pose_tracker.py cpu rtmpose-ort/rtmdet-nano/ rtmpose-ort/rtmpose-m/ 0 -``` - -##### ONNX - -```shell -# Download pre-compiled files -wget https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0/mmdeploy-1.0.0-linux-x86_64-cxx11abi.tar.gz - -# Unzip files -tar -xzvf mmdeploy-1.0.0-linux-x86_64-cxx11abi.tar.gz - -# Go to the sdk folder -cd mmdeploy-1.0.0-linux-x86_64-cxx11abi - -# Init environment -source set_env.sh - -# If opencv 3+ is not installed on your system, execute the following command. -# If it is installed, skip this command -bash install_opencv.sh - -# Compile executable programs -bash build_sdk.sh - -# Inference for an image -# Please pass the folder of the model, not the model file -./bin/det_pose rtmpose-ort/rtmdet-nano/ rtmpose-ort/rtmpose-m/ your_img.jpg --device cpu - -# Inference for a video -# Please pass the folder of the model, not the model file -./bin/pose_tracker rtmpose-ort/rtmdet-nano/ rtmpose-ort/rtmpose-m/ your_video.mp4 --device cpu - -# Inference using webcam -# Please pass the folder of the model, not the model file -./bin/pose_tracker rtmpose-ort/rtmdet-nano/ rtmpose-ort/rtmpose-m/ 0 --device cpu -``` - -##### TensorRT - -```shell -# Download pre-compiled files -wget https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0/mmdeploy-1.0.0-linux-x86_64-cxx11abi-cuda11.3.tar.gz - -# Unzip files -tar -xzvf mmdeploy-1.0.0-linux-x86_64-cxx11abi-cuda11.3.tar.gz - -# Go to the sdk folder -cd mmdeploy-1.0.0-linux-x86_64-cxx11abi-cuda11.3 - -# Init environment -source set_env.sh - -# If opencv 3+ is not installed on your system, execute the following command. -# If it is installed, skip this command -bash install_opencv.sh - -# Compile executable programs -bash build_sdk.sh - -# Inference for an image -# Please pass the folder of the model, not the model file -./bin/det_pose rtmpose-ort/rtmdet-nano/ rtmpose-ort/rtmpose-m/ your_img.jpg --device cuda - -# Inference for a video -# Please pass the folder of the model, not the model file -./bin/pose_tracker rtmpose-ort/rtmdet-nano/ rtmpose-ort/rtmpose-m/ your_video.mp4 --device cuda - -# Inference using webcam -# Please pass the folder of the model, not the model file -./bin/pose_tracker rtmpose-ort/rtmdet-nano/ rtmpose-ort/rtmpose-m/ 0 --device cuda -``` - -For details, see [Pipeline Inference](#-step4-pipeline-inference). - -#### Windows - -##### Python Inference - -1. Install mmdeploy_runtime or mmdeploy_runtime_gpu - -```shell -# for onnxruntime -pip install mmdeploy-runtime -# download [sdk](https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0/mmdeploy-1.0.0-windows-amd64.zip) add third party runtime libraries to the PATH - -# for onnxruntime-gpu / tensorrt -pip install mmdeploy-runtime-gpu -# download [sdk](https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0/mmdeploy-1.0.0-windows-amd64-cuda11.3.zip) add third party runtime libraries to the PATH -``` - -2. Download the sdk models and unzip to `./example/python`. (If you need other models, please export sdk models refer to [SDK Reasoning](#%EF%B8%8F-step3-inference-with-sdk)) - -```shell -# rtmdet-nano + rtmpose-m for cpu sdk -wget https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmpose-cpu.zip - -unzip rtmpose-cpu.zip -``` - -3. Inference with `pose_tracker.py`: - -```shell -# go to ./example/python -# Please pass the folder of the model, not the model file -python pose_tracker.py cpu rtmpose-ort/rtmdet-nano/ rtmpose-ort/rtmpose-m/ your_video.mp4 - -# Inference using webcam -# Please pass the folder of the model, not the model file -python pose_tracker.py cpu rtmpose-ort/rtmdet-nano/ rtmpose-ort/rtmpose-m/ 0 -``` - -##### Executable Inference - -1. Install [CMake](https://cmake.org/download/). -2. Download the [pre-compiled SDK](https://github.com/open-mmlab/mmdeploy/releases). -3. Unzip the SDK and go to the `sdk` folder. -4. open windows powerShell with administrator privileges - -```shell -set-ExecutionPolicy RemoteSigned -``` - -5. Install OpenCV: - -```shell -# in sdk folder: -.\install_opencv.ps1 -``` - -6. Set environment variables: - -```shell -# in sdk folder: -. .\set_env.ps1 -``` - -7. Compile the SDK: - -```shell -# in sdk folder: -# (if you installed opencv by .\install_opencv.ps1) -.\build_sdk.ps1 -# (if you installed opencv yourself) -.\build_sdk.ps1 "path/to/folder/of/OpenCVConfig.cmake" -``` - -8. the executable will be generated in: - -```shell -example\cpp\build\Release -``` - -### MMPose demo scripts - -MMPose provides demo scripts to conduct [inference with existing models](https://mmpose.readthedocs.io/en/latest/user_guides/inference.html). - -**Note:** - -- Inferencing with Pytorch can not reach the maximum speed of RTMPose, just for verification. - -```shell -# go to the mmpose folder -cd ${PATH_TO_MMPOSE} - -# inference with rtmdet -python demo/topdown_demo_with_mmdet.py \ - projects/rtmpose/rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py \ - {PATH_TO_CHECKPOINT}/rtmdet_nano_8xb32-100e_coco-obj365-person-05d8511e.pth \ - projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py \ - {PATH_TO_CHECKPOINT}/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.pth \ - --input {YOUR_TEST_IMG_or_VIDEO} \ - --show - -# inference with webcam -python demo/topdown_demo_with_mmdet.py \ - projects/rtmpose/rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py \ - {PATH_TO_CHECKPOINT}/rtmdet_nano_8xb32-100e_coco-obj365-person-05d8511e.pth \ - projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py \ - {PATH_TO_CHECKPOINT}/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.pth \ - --input webcam \ - --show -``` - -Result is as follows: - -![topdown_inference_with_rtmdet](https://user-images.githubusercontent.com/13503330/220005020-06bdf37f-6817-4681-a2c8-9dd55e4fbf1e.png) - -## 👨‍🏫 How to Train [🔝](#-table-of-contents) - -Please refer to [Train and Test](https://mmpose.readthedocs.io/en/latest/user_guides/train_and_test.html). - -**Tips**: - -- Please accordinally reduce `batch_size` and `base_lr` when your dataset is small. -- Guidelines to choose a model - - m: Recommended and Preferred Use - - t/s: For mobile devices with extremely low computing power, or scenarios with stringent inference speed requirements - - l: Suitable for scenarios with strong computing power and not sensitive to speed - -## 🏗️ How to Deploy [🔝](#-table-of-contents) - -Here is a basic example of deploy RTMPose with [MMDeploy](https://github.com/open-mmlab/mmdeploy/tree/main). - -- Exported SDK models (ONNX, TRT, ncnn, etc.) can be downloaded from [OpenMMLab Deploee](https://platform.openmmlab.com/deploee). -- You can also convert `.pth` models into SDK [online](https://platform.openmmlab.com/deploee/task-convert-list). - -### 🧩 Step1. Install MMDeploy - -Before starting the deployment, please make sure you install MMPose and MMDeploy correctly. - -- Install MMPose, please refer to the [MMPose installation guide](https://mmpose.readthedocs.io/en/latest/installation.html). -- Install MMDeploy, please refer to the [MMDeploy installation guide](https://mmdeploy.readthedocs.io/en/latest/get_started.html#installation). - -Depending on the deployment backend, some backends require compilation of custom operators, so please refer to the corresponding document to ensure the environment is built correctly according to your needs: - -- [ONNX RUNTIME SUPPORT](https://mmdeploy.readthedocs.io/en/latest/05-supported-backends/onnxruntime.html) -- [TENSORRT SUPPORT](https://mmdeploy.readthedocs.io/en/latest/05-supported-backends/tensorrt.html) -- [OPENVINO SUPPORT](https://mmdeploy.readthedocs.io/en/latest/05-supported-backends/openvino.html) -- [More](https://github.com/open-mmlab/mmdeploy/tree/main/docs/en/05-supported-backends) - -### 🛠️ Step2. Convert Model - -After the installation, you can enjoy the model deployment journey starting from converting PyTorch model to backend model by running MMDeploy's `tools/deploy.py`. - -The detailed model conversion tutorial please refer to the [MMDeploy document](https://mmdeploy.readthedocs.io/en/latest/02-how-to-run/convert_model.html). Here we only give the example of converting RTMPose. - -Here we take converting RTMDet-nano and RTMPose-m to ONNX/TensorRT as an example. - -- If you only want to use ONNX, please use: - - [`detection_onnxruntime_static.py`](https://github.com/open-mmlab/mmdeploy/blob/main/configs/mmdet/detection/detection_onnxruntime_static.py) for RTMDet. - - [`pose-detection_simcc_onnxruntime_dynamic.py`](https://github.com/open-mmlab/mmdeploy/blob/main/configs/mmpose/pose-detection_simcc_onnxruntime_dynamic.py) for RTMPose. -- If you want to use TensorRT, please use: - - [`detection_tensorrt_static-320x320.py`](https://github.com/open-mmlab/mmdeploy/blob/main/configs/mmdet/detection/detection_tensorrt_static-320x320.py) for RTMDet. - - [`pose-detection_simcc_tensorrt_dynamic-256x192.py`](https://github.com/open-mmlab/mmdeploy/blob/main/configs/mmpose/pose-detection_simcc_tensorrt_dynamic-256x192.py) for RTMPose. - -If you want to customize the settings in the deployment config for your requirements, please refer to [MMDeploy config tutorial](https://mmdeploy.readthedocs.io/en/latest/02-how-to-run/write_config.html). - -In this tutorial, we organize files as follows: - -```shell -|----mmdeploy -|----mmdetection -|----mmpose -``` - -#### ONNX - -```shell -# go to the mmdeploy folder -cd ${PATH_TO_MMDEPLOY} - -# run the command to convert RTMDet -# Model file can be either a local path or a download link -python tools/deploy.py \ - configs/mmdet/detection/detection_onnxruntime_static.py \ - ../mmpose/projects/rtmpose/rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py \ - https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_nano_8xb32-100e_coco-obj365-person-05d8511e.pth \ - demo/resources/human-pose.jpg \ - --work-dir rtmpose-ort/rtmdet-nano \ - --device cpu \ - --show \ - --dump-info # dump sdk info - -# run the command to convert RTMPose -# Model file can be either a local path or a download link -python tools/deploy.py \ - configs/mmpose/pose-detection_simcc_onnxruntime_dynamic.py \ - ../mmpose/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py \ - https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.pth \ - demo/resources/human-pose.jpg \ - --work-dir rtmpose-ort/rtmpose-m \ - --device cpu \ - --show \ - --dump-info # dump sdk info -``` - -The converted model file is `{work-dir}/end2end.onnx` by defaults. - -#### TensorRT - -```shell -# go to the mmdeploy folder -cd ${PATH_TO_MMDEPLOY} - -# run the command to convert RTMDet -# Model file can be either a local path or a download link -python tools/deploy.py \ - configs/mmdet/detection/detection_tensorrt_static-320x320.py \ - ../mmpose/projects/rtmpose/rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py \ - https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_nano_8xb32-100e_coco-obj365-person-05d8511e.pth \ - demo/resources/human-pose.jpg \ - --work-dir rtmpose-trt/rtmdet-nano \ - --device cuda:0 \ - --show \ - --dump-info # dump sdk info - -# run the command to convert RTMPose -# Model file can be either a local path or a download link -python tools/deploy.py \ - configs/mmpose/pose-detection_simcc_tensorrt_dynamic-256x192.py \ - ../mmpose/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py \ - https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.pth \ - demo/resources/human-pose.jpg \ - --work-dir rtmpose-trt/rtmpose-m \ - --device cuda:0 \ - --show \ - --dump-info # dump sdk info -``` - -The converted model file is `{work-dir}/end2end.engine` by defaults. - -🎊 If the script runs successfully, you will see the following files: - -![convert_models](https://user-images.githubusercontent.com/13503330/217726963-7815dd01-561a-4605-b0c6-07b6fe1956c3.png) - -#### Advanced Setting - -To convert the model with TRT-FP16, you can enable the fp16 mode in your deploy config: - -```Python -# in MMDeploy config -backend_config = dict( - type='tensorrt', - common_config=dict( - fp16_mode=True # enable fp16 - )) -``` - -### 🕹️ Step3. Inference with SDK - -We provide both Python and C++ inference API with MMDeploy SDK. - -To use SDK, you need to dump the required info during converting the model. Just add --dump-info to the model conversion command. - -```shell -# RTMDet -# Model file can be either a local path or a download link -python tools/deploy.py \ - configs/mmdet/detection/detection_onnxruntime_dynamic.py \ - ../mmpose/projects/rtmpose/rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py \ - https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_nano_8xb32-100e_coco-obj365-person-05d8511e.pth \ - demo/resources/human-pose.jpg \ - --work-dir rtmpose-ort/rtmdet-nano \ - --device cpu \ - --show \ - --dump-info # dump sdk info - -# RTMPose -# Model file can be either a local path or a download link -python tools/deploy.py \ - configs/mmpose/pose-detection_simcc_onnxruntime_dynamic.py \ - ../mmpose/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py \ - https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.pth \ - demo/resources/human-pose.jpg \ - --work-dir rtmpose-ort/rtmpose-m \ - --device cpu \ - --show \ - --dump-info # dump sdk info -``` - -After running the command, it will dump 3 json files additionally for the SDK: - -```shell -|----{work-dir} - |----end2end.onnx # ONNX model - |----end2end.engine # TensorRT engine file - - |----pipeline.json # - |----deploy.json # json files for the SDK - |----detail.json # -``` - -#### Python API - -Here is a basic example of SDK Python API: - -```Python -# Copyright (c) OpenMMLab. All rights reserved. -import argparse - -import cv2 -import numpy as np -from mmdeploy_runtime import PoseDetector - - -def parse_args(): - parser = argparse.ArgumentParser( - description='show how to use sdk python api') - parser.add_argument('device_name', help='name of device, cuda or cpu') - parser.add_argument( - 'model_path', - help='path of mmdeploy SDK model dumped by model converter') - parser.add_argument('image_path', help='path of an image') - parser.add_argument( - '--bbox', - default=None, - nargs='+', - type=int, - help='bounding box of an object in format (x, y, w, h)') - args = parser.parse_args() - return args - - -def main(): - args = parse_args() - - img = cv2.imread(args.image_path) - - detector = PoseDetector( - model_path=args.model_path, device_name=args.device_name, device_id=0) - - if args.bbox is None: - result = detector(img) - else: - # converter (x, y, w, h) -> (left, top, right, bottom) - print(args.bbox) - bbox = np.array(args.bbox, dtype=int) - bbox[2:] += bbox[:2] - result = detector(img, bbox) - print(result) - - _, point_num, _ = result.shape - points = result[:, :, :2].reshape(point_num, 2) - for [x, y] in points.astype(int): - cv2.circle(img, (x, y), 1, (0, 255, 0), 2) - - cv2.imwrite('output_pose.png', img) - - -if __name__ == '__main__': - main() -``` - -#### C++ API - -Here is a basic example of SDK C++ API: - -```C++ -#include "mmdeploy/detector.hpp" - -#include "opencv2/imgcodecs/imgcodecs.hpp" -#include "utils/argparse.h" -#include "utils/visualize.h" - -DEFINE_ARG_string(model, "Model path"); -DEFINE_ARG_string(image, "Input image path"); -DEFINE_string(device, "cpu", R"(Device name, e.g. "cpu", "cuda")"); -DEFINE_string(output, "detector_output.jpg", "Output image path"); - -DEFINE_double(det_thr, .5, "Detection score threshold"); - -int main(int argc, char* argv[]) { - if (!utils::ParseArguments(argc, argv)) { - return -1; - } - - cv::Mat img = cv::imread(ARGS_image); - if (img.empty()) { - fprintf(stderr, "failed to load image: %s\n", ARGS_image.c_str()); - return -1; - } - - // construct a detector instance - mmdeploy::Detector detector(mmdeploy::Model{ARGS_model}, mmdeploy::Device{FLAGS_device}); - - // apply the detector, the result is an array-like class holding references to - // `mmdeploy_detection_t`, will be released automatically on destruction - mmdeploy::Detector::Result dets = detector.Apply(img); - - // visualize - utils::Visualize v; - auto sess = v.get_session(img); - int count = 0; - for (const mmdeploy_detection_t& det : dets) { - if (det.score > FLAGS_det_thr) { // filter bboxes - sess.add_det(det.bbox, det.label_id, det.score, det.mask, count++); - } - } - - if (!FLAGS_output.empty()) { - cv::imwrite(FLAGS_output, sess.get()); - } - - return 0; -} -``` - -To build C++ example, please add MMDeploy package in your CMake project as following: - -```CMake -find_package(MMDeploy REQUIRED) -target_link_libraries(${name} PRIVATE mmdeploy ${OpenCV_LIBS}) -``` - -#### Other languages - -- [C# API Examples](https://github.com/open-mmlab/mmdeploy/tree/main/demo/csharp) -- [JAVA API Examples](https://github.com/open-mmlab/mmdeploy/tree/main/demo/java) - -## 🚀 Step4. Pipeline Inference - -### Inference for images - -If the user has MMDeploy compiled correctly, you will see the `det_pose` executable under the `mmdeploy/build/bin/`. - -```shell -# go to the mmdeploy folder -cd ${PATH_TO_MMDEPLOY}/build/bin/ - -# inference for an image -./det_pose rtmpose-ort/rtmdet-nano/ rtmpose-ort/rtmpose-m/ your_img.jpg --device cpu - -required arguments: - det_model Object detection model path [string] - pose_model Pose estimation model path [string] - image Input image path [string] - -optional arguments: - --device Device name, e.g. "cpu", "cuda" [string = "cpu"] - --output Output image path [string = "det_pose_output.jpg"] - --skeleton Path to skeleton data or name of predefined skeletons: - "coco" [string = "coco", "coco-wholoebody"] - --det_label Detection label use for pose estimation [int32 = 0] - (0 refers to 'person' in coco) - --det_thr Detection score threshold [double = 0.5] - --det_min_bbox_size Detection minimum bbox size [double = -1] - --pose_thr Pose key-point threshold [double = 0] -``` - -#### API Example - -- [`det_pose.py`](https://github.com/open-mmlab/mmdeploy/blob/main/demo/python/det_pose.py) -- [`det_pose.cxx`](https://github.com/open-mmlab/mmdeploy/blob/main/demo/csrc/cpp/det_pose.cxx) - -### Inference for a video - -If the user has MMDeploy compiled correctly, you will see the `pose_tracker` executable under the `mmdeploy/build/bin/`. - -- pass `0` to `input` can inference from a webcam - -```shell -# go to the mmdeploy folder -cd ${PATH_TO_MMDEPLOY}/build/bin/ - -# inference for a video -./pose_tracker rtmpose-ort/rtmdet-nano/ rtmpose-ort/rtmpose-m/ your_video.mp4 --device cpu - -required arguments: - det_model Object detection model path [string] - pose_model Pose estimation model path [string] - input Input video path or camera index [string] - -optional arguments: - --device Device name, e.g. "cpu", "cuda" [string = "cpu"] - --output Output video path or format string [string = ""] - --output_size Long-edge of output frames [int32 = 0] - --flip Set to 1 for flipping the input horizontally [int32 = 0] - --show Delay passed to `cv::waitKey` when using `cv::imshow`; - -1: disable [int32 = 1] - --skeleton Path to skeleton data or name of predefined skeletons: - "coco", "coco-wholebody" [string = "coco"] - --background Output background, "default": original image, "black": - black background [string = "default"] - --det_interval Detection interval [int32 = 1] - --det_label Detection label use for pose estimation [int32 = 0] - (0 refers to 'person' in coco) - --det_thr Detection score threshold [double = 0.5] - --det_min_bbox_size Detection minimum bbox size [double = -1] - --det_nms_thr NMS IOU threshold for merging detected bboxes and - bboxes from tracked targets [double = 0.7] - --pose_max_num_bboxes Max number of bboxes used for pose estimation per frame - [int32 = -1] - --pose_kpt_thr Threshold for visible key-points [double = 0.5] - --pose_min_keypoints Min number of key-points for valid poses, -1 indicates - ceil(n_kpts/2) [int32 = -1] - --pose_bbox_scale Scale for expanding key-points to bbox [double = 1.25] - --pose_min_bbox_size Min pose bbox size, tracks with bbox size smaller than - the threshold will be dropped [double = -1] - --pose_nms_thr NMS OKS/IOU threshold for suppressing overlapped poses, - useful when multiple pose estimations collapse to the - same target [double = 0.5] - --track_iou_thr IOU threshold for associating missing tracks - [double = 0.4] - --track_max_missing Max number of missing frames before a missing tracks is - removed [int32 = 10] -``` - -#### API Example - -- [`pose_tracker.py`](https://github.com/open-mmlab/mmdeploy/blob/main/demo/python/pose_tracker.py) -- [`pose_tracker.cxx`](https://github.com/open-mmlab/mmdeploy/blob/main/demo/csrc/cpp/pose_tracker.cxx) - -## 📚 Common Usage [🔝](#-table-of-contents) - -### 🚀 Inference Speed Test [🔝](#-table-of-contents) - -If you need to test the inference speed of the model under the deployment framework, MMDeploy provides a convenient `tools/profiler.py` script. - -The user needs to prepare a folder for the test images `./test_images`, the profiler will randomly read images from this directory for the model speed test. - -```shell -python tools/profiler.py \ - configs/mmpose/pose-detection_simcc_onnxruntime_dynamic.py \ - {RTMPOSE_PROJECT}/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py \ - ../test_images \ - --model {WORK_DIR}/end2end.onnx \ - --shape 256x192 \ - --device cpu \ - --warmup 50 \ - --num-iter 200 -``` - -The result is as follows: - -```shell -01/30 15:06:35 - mmengine - INFO - [onnxruntime]-70 times per count: 8.73 ms, 114.50 FPS -01/30 15:06:36 - mmengine - INFO - [onnxruntime]-90 times per count: 9.05 ms, 110.48 FPS -01/30 15:06:37 - mmengine - INFO - [onnxruntime]-110 times per count: 9.87 ms, 101.32 FPS -01/30 15:06:37 - mmengine - INFO - [onnxruntime]-130 times per count: 9.99 ms, 100.10 FPS -01/30 15:06:38 - mmengine - INFO - [onnxruntime]-150 times per count: 10.39 ms, 96.29 FPS -01/30 15:06:39 - mmengine - INFO - [onnxruntime]-170 times per count: 10.77 ms, 92.86 FPS -01/30 15:06:40 - mmengine - INFO - [onnxruntime]-190 times per count: 10.98 ms, 91.05 FPS -01/30 15:06:40 - mmengine - INFO - [onnxruntime]-210 times per count: 11.19 ms, 89.33 FPS -01/30 15:06:41 - mmengine - INFO - [onnxruntime]-230 times per count: 11.16 ms, 89.58 FPS -01/30 15:06:42 - mmengine - INFO - [onnxruntime]-250 times per count: 11.06 ms, 90.41 FPS ------ Settings: -+------------+---------+ -| batch size | 1 | -| shape | 256x192 | -| iterations | 200 | -| warmup | 50 | -+------------+---------+ ------ Results: -+--------+------------+---------+ -| Stats | Latency/ms | FPS | -+--------+------------+---------+ -| Mean | 11.060 | 90.412 | -| Median | 11.852 | 84.375 | -| Min | 7.812 | 128.007 | -| Max | 13.690 | 73.044 | -+--------+------------+---------+ -``` - -If you want to learn more details of profiler, you can refer to the [Profiler Docs](https://mmdeploy.readthedocs.io/en/latest/02-how-to-run/useful_tools.html#profiler). - -### 📊 Model Test [🔝](#-table-of-contents) - -If you need to test the inference accuracy of the model on the deployment backend, MMDeploy provides a convenient `tools/test.py` script. - -```shell -python tools/test.py \ - configs/mmpose/pose-detection_simcc_onnxruntime_dynamic.py \ - {RTMPOSE_PROJECT}/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py \ - --model {PATH_TO_MODEL}/rtmpose_m.pth \ - --device cpu -``` - -You can also refer to [MMDeploy Docs](https://github.com/open-mmlab/mmdeploy/blob/main/docs/en/02-how-to-run/profile_model.md) for more details. - -## 📜 Citation [🔝](#-table-of-contents) - -If you find RTMPose useful in your research, please consider cite: - -```bibtex -@misc{https://doi.org/10.48550/arxiv.2303.07399, - doi = {10.48550/ARXIV.2303.07399}, - url = {https://arxiv.org/abs/2303.07399}, - author = {Jiang, Tao and Lu, Peng and Zhang, Li and Ma, Ningsheng and Han, Rui and Lyu, Chengqi and Li, Yining and Chen, Kai}, - keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences}, - title = {RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose}, - publisher = {arXiv}, - year = {2023}, - copyright = {Creative Commons Attribution 4.0 International} -} - -@misc{mmpose2020, - title={OpenMMLab Pose Estimation Toolbox and Benchmark}, - author={MMPose Contributors}, - howpublished = {\url{https://github.com/open-mmlab/mmpose}}, - year={2020} -} -``` +
    + +
    + +# RTMPose: Real-Time Multi-Person Pose Estimation toolkit based on MMPose + +> [RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose](https://arxiv.org/abs/2303.07399) + +
    + +English | [简体中文](README_CN.md) + +
    + +______________________________________________________________________ + +## Abstract + +Recent studies on 2D pose estimation have achieved excellent performance on public benchmarks, yet its application in the industrial community still suffers from heavy model parameters and high latency. +In order to bridge this gap, we empirically study five aspects that affect the performance of multi-person pose estimation algorithms: paradigm, backbone network, localization algorithm, training strategy, and deployment inference, and present a high-performance real-time multi-person pose estimation framework, **RTMPose**, based on MMPose. +Our RTMPose-m achieves **75.8% AP** on COCO with **90+ FPS** on an Intel i7-11700 CPU and **430+ FPS** on an NVIDIA GTX 1660 Ti GPU. +To further evaluate RTMPose's capability in critical real-time applications, we also report the performance after deploying on the mobile device. Our RTMPose-s achieves **72.2% AP** on COCO with **70+ FPS** on a Snapdragon 865 chip, outperforming existing open-source libraries. +With the help of MMDeploy, our project supports various platforms like CPU, GPU, NVIDIA Jetson, and mobile devices and multiple inference backends such as ONNXRuntime, TensorRT, ncnn, etc. + +![rtmpose_intro](https://user-images.githubusercontent.com/13503330/219269619-935499e5-bdd9-49ea-8104-3c7796dbd862.png) + +______________________________________________________________________ + +## 📄 Table of Contents + +- [🥳 🚀 What's New](#--whats-new-) +- [📖 Introduction](#-introduction-) +- [🙌 Community](#-community-) +- [⚡ Pipeline Performance](#-pipeline-performance-) +- [📊 Model Zoo](#-model-zoo-) +- [👀 Visualization](#-visualization-) +- [😎 Get Started](#-get-started-) +- [👨‍🏫 How to Train](#-how-to-train-) +- [🏗️ How to Deploy](#️-how-to-deploy-) +- [📚 Common Usage](#️-common-usage-) + - [🚀 Inference Speed Test](#-inference-speed-test-) + - [📊 Model Test](#-model-test-) +- [📜 Citation](#-citation-) + +## 🥳 🚀 What's New [🔝](#-table-of-contents) + +- Jun. 2023: + - Release 26-keypoint Body models trained on combined datasets. +- May. 2023: + - Add [code examples](./examples/) of RTMPose. + - Release Hand, Face, Body models trained on combined datasets. +- Mar. 2023: RTMPose is released. RTMPose-m runs at 430+ FPS and achieves 75.8 mAP on COCO val set. + +## 📖 Introduction [🔝](#-table-of-contents) + +
    + +
    + +
    + +
    +
    + +
    + +### ✨ Major Features + +- 🚀 **High efficiency and high accuracy** + + | Model | AP(COCO) | CPU-FPS | GPU-FPS | + | :---: | :------: | :-----: | :-----: | + | t | 68.5 | 300+ | 940+ | + | s | 72.2 | 200+ | 710+ | + | m | 75.8 | 90+ | 430+ | + | l | 76.5 | 50+ | 280+ | + +- 🛠️ **Easy to deploy** + + - Step-by-step deployment tutorials. + - Support various backends including + - ONNX + - TensorRT + - ncnn + - OpenVINO + - etc. + - Support various platforms including + - Linux + - Windows + - NVIDIA Jetson + - ARM + - etc. + +- 🏗️ **Design for practical applications** + + - Pipeline inference API and SDK for + - Python + - C++ + - C# + - JAVA + - etc. + +## 🙌 Community [🔝](#-table-of-contents) + +RTMPose is a long-term project dedicated to the training, optimization and deployment of high-performance real-time pose estimation algorithms in practical scenarios, so we are looking forward to the power from the community. Welcome to share the training configurations and tricks based on RTMPose in different business applications to help more community users! + +✨ ✨ ✨ + +- **If you are a new user of RTMPose, we eagerly hope you can fill out this [Google Questionnaire](https://docs.google.com/forms/d/e/1FAIpQLSfzwWr3eNlDzhU98qzk2Eph44Zio6hi5r0iSwfO9wSARkHdWg/viewform?usp=sf_link)/[Chinese version](https://uua478.fanqier.cn/f/xxmynrki), it's very important for our work!** + +✨ ✨ ✨ + +Feel free to join our community group for more help: + +- WeChat Group: + +
    + +
    + +- Discord Group: + - 🙌 https://discord.gg/raweFPmdzG 🙌 + +## ⚡ Pipeline Performance [🔝](#-table-of-contents) + +**Notes** + +- Pipeline latency is tested under skip-frame settings, the detection interval is 5 frames by defaults. +- Flip test is NOT used. +- Env Setup: + - torch >= 1.7.1 + - onnxruntime 1.12.1 + - TensorRT 8.4.3.1 + - ncnn 20221128 + - cuDNN 8.3.2 + - CUDA 11.3 + +| Detection Config | Pose Config | Input Size
    (Det/Pose) | Model AP
    (COCO) | Pipeline AP
    (COCO) | Params (M)
    (Det/Pose) | Flops (G)
    (Det/Pose) | ORT-Latency(ms)
    (i7-11700) | TRT-FP16-Latency(ms)
    (GTX 1660Ti) | Download | +| :------------------------------------------------------------------ | :---------------------------------------------------------------------------- | :---------------------------: | :---------------------: | :------------------------: | :---------------------------: | :--------------------------: | :--------------------------------: | :---------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| [RTMDet-nano](./rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py) | [RTMPose-t](./rtmpose/body_2d_keypoint/rtmpose-t_8xb256-420e_coco-256x192.py) | 320x320
    256x192 | 40.3
    67.1 | 64.4 | 0.99
    3.34 | 0.31
    0.36 | 12.403 | 2.467 | [det](https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_nano_8xb32-100e_coco-obj365-person-05d8511e.pth)
    [pose](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-tiny_simcc-aic-coco_pt-aic-coco_420e-256x192-cfc8f33d_20230126.pth) | +| [RTMDet-nano](./rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py) | [RTMPose-s](./rtmpose/body_2d_keypoint/rtmpose-s_8xb256-420e_coco-256x192.py) | 320x320
    256x192 | 40.3
    71.1 | 68.5 | 0.99
    5.47 | 0.31
    0.68 | 16.658 | 2.730 | [det](https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_nano_8xb32-100e_coco-obj365-person-05d8511e.pth)
    [pose](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-aic-coco_pt-aic-coco_420e-256x192-fcb2599b_20230126.pth) | +| [RTMDet-nano](./rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py) | [RTMPose-m](./rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py) | 320x320
    256x192 | 40.3
    75.3 | 73.2 | 0.99
    13.59 | 0.31
    1.93 | 26.613 | 4.312 | [det](https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_nano_8xb32-100e_coco-obj365-person-05d8511e.pth)
    [pose](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.pth) | +| [RTMDet-nano](./rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py) | [RTMPose-l](./rtmpose/body_2d_keypoint/rtmpose-l_8xb256-420e_coco-256x192.py) | 320x320
    256x192 | 40.3
    76.3 | 74.2 | 0.99
    27.66 | 0.31
    4.16 | 36.311 | 4.644 | [det](https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_nano_8xb32-100e_coco-obj365-person-05d8511e.pth)
    [pose](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-aic-coco_pt-aic-coco_420e-256x192-f016ffe0_20230126.pth) | +| [RTMDet-m](./rtmdet/person/rtmdet_m_640-8xb32_coco-person.py) | [RTMPose-m](./rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py) | 640x640
    256x192 | 62.5
    75.3 | 75.7 | 24.66
    13.59 | 38.95
    1.93 | - | 6.923 | [det](https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_m_8xb32-100e_coco-obj365-person-235e8209.pth)
    [pose](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.pth) | +| [RTMDet-m](./rtmdet/person/rtmdet_m_640-8xb32_coco-person.py) | [RTMPose-l](./rtmpose/body_2d_keypoint/rtmpose-l_8xb256-420e_coco-256x192.py) | 640x640
    256x192 | 62.5
    76.3 | 76.6 | 24.66
    27.66 | 38.95
    4.16 | - | 7.204 | [det](https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_m_8xb32-100e_coco-obj365-person-235e8209.pth)
    [pose](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-aic-coco_pt-aic-coco_420e-256x192-f016ffe0_20230126.pth) | + +## 📊 Model Zoo [🔝](#-table-of-contents) + +**Notes** + +- Since all models are trained on multi-domain combined datasets for practical applications, results are **not** suitable for academic comparison. +- More results of RTMPose on public benchmarks can refer to [Model Zoo](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/algorithms.html) +- Flip test is used. +- Inference speed measured on more hardware platforms can refer to [Benchmark](./benchmark/README.md) +- If you have datasets you would like us to support, feel free to [contact us](https://docs.google.com/forms/d/e/1FAIpQLSfzwWr3eNlDzhU98qzk2Eph44Zio6hi5r0iSwfO9wSARkHdWg/viewform?usp=sf_link)/[联系我们](https://uua478.fanqier.cn/f/xxmynrki). + +### Body 2d + +#### 17 Keypoints + +- Keypoints are defined as [COCO](http://cocodataset.org/). For details please refer to the [meta info](/configs/_base_/datasets/coco.py). +- + +
    +AIC+COCO + +| Config | Input Size | AP
    (COCO) | PCK@0.1
    (Body8) | AUC
    (Body8) | Params
    (M) | FLOPS
    (G) | ORT-Latency
    (ms)
    (i7-11700) | TRT-FP16-Latency
    (ms)
    (GTX 1660Ti) | ncnn-FP16-Latency
    (ms)
    (Snapdragon 865) | Download | +| :---------------------------------------------------------------------------: | :--------: | :---------------: | :---------------------: | :-----------------: | :----------------: | :---------------: | :-----------------------------------------: | :------------------------------------------------: | :-----------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------: | +| [RTMPose-t](./rtmpose/body_2d_keypoint/rtmpose-t_8xb256-420e_coco-256x192.py) | 256x192 | 68.5 | 91.28 | 63.38 | 3.34 | 0.36 | 3.20 | 1.06 | 9.02 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-tiny_simcc-aic-coco_pt-aic-coco_420e-256x192-cfc8f33d_20230126.pth) | +| [RTMPose-s](./rtmpose/body_2d_keypoint/rtmpose-s_8xb256-420e_coco-256x192.py) | 256x192 | 72.2 | 92.95 | 66.19 | 5.47 | 0.68 | 4.48 | 1.39 | 13.89 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-aic-coco_pt-aic-coco_420e-256x192-fcb2599b_20230126.pth) | +| [RTMPose-m](./rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py) | 256x192 | 75.8 | 94.13 | 68.53 | 13.59 | 1.93 | 11.06 | 2.29 | 26.44 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.pth) | +| [RTMPose-l](./rtmpose/body_2d_keypoint/rtmpose-l_8xb256-420e_coco-256x192.py) | 256x192 | 76.5 | 94.35 | 68.98 | 27.66 | 4.16 | 18.85 | 3.46 | 45.37 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-aic-coco_pt-aic-coco_420e-256x192-f016ffe0_20230126.pth) | +| [RTMPose-m](./rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-384x288.py) | 384x288 | 77.0 | 94.32 | 69.85 | 13.72 | 4.33 | 24.78 | 3.66 | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-384x288-a62a0b32_20230228.pth) | +| [RTMPose-l](./rtmpose/body_2d_keypoint/rtmpose-l_8xb256-420e_coco-384x288.py) | 384x288 | 77.3 | 94.54 | 70.14 | 27.79 | 9.35 | - | 6.05 | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-aic-coco_pt-aic-coco_420e-384x288-97d6cb0f_20230228.pth) | + +
    + +
    +Body8 + +- `*` denotes model trained on 7 public datasets: + - [AI Challenger](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#aic) + - [MS COCO](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#coco) + - [CrowdPose](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#crowdpose) + - [MPII](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#mpii) + - [sub-JHMDB](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#sub-jhmdb-dataset) + - [Halpe](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_wholebody_keypoint.html#halpe) + - [PoseTrack18](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#posetrack18) +- `Body8` denotes the addition of the [OCHuman](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#ochuman) dataset, in addition to the 7 datasets mentioned above, for evaluation. + +| Config | Input Size | AP
    (COCO) | PCK@0.1
    (Body8) | AUC
    (Body8) | Params
    (M) | FLOPS
    (G) | ORT-Latency
    (ms)
    (i7-11700) | TRT-FP16-Latency
    (ms)
    (GTX 1660Ti) | ncnn-FP16-Latency
    (ms)
    (Snapdragon 865) | Download | +| :-----------------------------------------------------------------------------: | :--------: | :---------------: | :---------------------: | :-----------------: | :----------------: | :---------------: | :-----------------------------------------: | :------------------------------------------------: | :-----------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------: | +| [RTMPose-t\*](./rtmpose/body_2d_keypoint/rtmpose-t_8xb256-420e_coco-256x192.py) | 256x192 | 65.9 | 91.44 | 63.18 | 3.34 | 0.36 | 3.20 | 1.06 | 9.02 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-t_simcc-body7_pt-body7_420e-256x192-026a1439_20230504.pth) | +| [RTMPose-s\*](./rtmpose/body_2d_keypoint/rtmpose-s_8xb256-420e_coco-256x192.py) | 256x192 | 69.7 | 92.45 | 65.15 | 5.47 | 0.68 | 4.48 | 1.39 | 13.89 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-body7_pt-body7_420e-256x192-acd4a1ef_20230504.pth) | +| [RTMPose-m\*](./rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py) | 256x192 | 74.9 | 94.25 | 68.59 | 13.59 | 1.93 | 11.06 | 2.29 | 26.44 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-body7_pt-body7_420e-256x192-e48f03d0_20230504.pth) | +| [RTMPose-l\*](./rtmpose/body_2d_keypoint/rtmpose-l_8xb256-420e_coco-256x192.py) | 256x192 | 76.7 | 95.08 | 70.14 | 27.66 | 4.16 | 18.85 | 3.46 | 45.37 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-body7_pt-body7_420e-256x192-4dba18fc_20230504.pth) | +| [RTMPose-m\*](./rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-384x288.py) | 384x288 | 76.6 | 94.64 | 70.38 | 13.72 | 4.33 | 24.78 | 3.66 | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-body7_pt-body7_420e-384x288-65e718c4_20230504.pth) | +| [RTMPose-l\*](./rtmpose/body_2d_keypoint/rtmpose-l_8xb256-420e_coco-384x288.py) | 384x288 | 78.3 | 95.36 | 71.58 | 27.79 | 9.35 | - | 6.05 | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-body7_pt-body7_420e-384x288-3f5a1437_20230504.pth) | +| [RTMPose-x\*](./rtmpose/body_2d_keypoint/rtmpose-x_8xb256-700e_coco-384x288.py) | 384x288 | 78.8 | - | - | 49.43 | 17.22 | - | - | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-x_simcc-body7_pt-body7_700e-384x288-71d7b7e9_20230629.pth) | + +
    + +#### 26 Keypoints + +- Keypoints are defined as [Halpe26](https://github.com/Fang-Haoshu/Halpe-FullBody/). For details please refer to the [meta info](/configs/_base_/datasets/halpe26.py). +- +- Models are trained and evaluated on `Body8`. + +| Config | Input Size | PCK@0.1
    (Body8) | AUC
    (Body8) | Params(M) | FLOPS(G) | ORT-Latency
    (ms)
    (i7-11700) | TRT-FP16-Latency
    (ms)
    (GTX 1660Ti) | ncnn-FP16-Latency
    (ms)
    (Snapdragon 865) | Download | +| :---------------------------------------------------------------------------------------: | :--------: | :---------------------: | :-----------------: | :-------: | :------: | :-----------------------------------------: | :------------------------------------------------: | :-----------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------: | +| [RTMPose-t\*](./rtmpose/body_2d_keypoint/rtmpose-t_8xb1024-700e_body8-halpe26-256x192.py) | 256x192 | 91.89 | 66.35 | 3.51 | 0.37 | - | - | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-t_simcc-body7_pt-body7-halpe26_700e-256x192-6020f8a6_20230605.pth) | +| [RTMPose-s\*](./rtmpose/body_2d_keypoint/rtmpose-s_8xb1024-700e_body8-halpe26-256x192.py) | 256x192 | 93.01 | 68.62 | 5.70 | 0.70 | - | - | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-body7_pt-body7-halpe26_700e-256x192-7f134165_20230605.pth) | +| [RTMPose-m\*](./rtmpose/body_2d_keypoint/rtmpose-m_8xb512-700e_body8-halpe26-256x192.py) | 256x192 | 94.75 | 71.91 | 13.93 | 1.95 | - | - | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-body7_pt-body7-halpe26_700e-256x192-4d3e73dd_20230605.pth) | +| [RTMPose-l\*](./rtmpose/body_2d_keypoint/rtmpose-l_8xb512-700e_body8-halpe26-256x192.py) | 256x192 | 95.37 | 73.19 | 28.11 | 4.19 | - | - | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-body7_pt-body7-halpe26_700e-256x192-2abb7558_20230605.pth) | +| [RTMPose-m\*](./rtmpose/body_2d_keypoint/rtmpose-m_8xb512-700e_body8-halpe26-384x288.py) | 384x288 | 95.15 | 73.56 | 14.06 | 4.37 | - | - | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-body7_pt-body7-halpe26_700e-384x288-89e6428b_20230605.pth) | +| [RTMPose-l\*](./rtmpose/body_2d_keypoint/rtmpose-l_8xb512-700e_body8-halpe26-384x288.py) | 384x288 | 95.56 | 74.38 | 28.24 | 9.40 | - | - | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-body7_pt-body7-halpe26_700e-384x288-734182ce_20230605.pth) | +| [RTMPose-x\*](./rtmpose/body_2d_keypoint/rtmpose-x_8xb256-700e_body8-halpe26-384x288.py) | 384x288 | 95.74 | 74.82 | 50.00 | 17.29 | - | - | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-x_simcc-body7_pt-body7-halpe26_700e-384x288-7fb6e239_20230606.pth) | + +#### Model Pruning + +**Notes** + +- Model pruning is supported by [MMRazor](https://github.com/open-mmlab/mmrazor) + +| Config | Input Size | AP
    (COCO) | Params
    (M) | FLOPS
    (G) | ORT-Latency
    (ms)
    (i7-11700) | TRT-FP16-Latency
    (ms)
    (GTX 1660Ti) | ncnn-FP16-Latency
    (ms)
    (Snapdragon 865) | Download | +| :-----------------------: | :--------: | :---------------: | :----------------: | :---------------: | :-----------------------------------------: | :------------------------------------------------: | :-----------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: | +| RTMPose-s-aic-coco-pruned | 256x192 | 69.4 | 3.43 | 0.35 | - | - | - | [Model](https://download.openmmlab.com/mmrazor/v1/pruning/group_fisher/rtmpose-s/group_fisher_finetune_rtmpose-s_8xb256-420e_aic-coco-256x192.pth) | + +For more details, please refer to [GroupFisher Pruning for RTMPose](./rtmpose/pruning/README.md). + +### WholeBody 2d (133 Keypoints) + +- Keypoints are defined as [COCO-WholeBody](https://github.com/jin-s13/COCO-WholeBody/). For details please refer to the [meta info](/configs/_base_/datasets/coco_wholebody.py). +- + +| Config | Input Size | Whole AP | Whole AR | FLOPS
    (G) | ORT-Latency
    (ms)
    (i7-11700) | TRT-FP16-Latency
    (ms)
    (GTX 1660Ti) | Download | +| :------------------------------ | :--------: | :------: | :------: | :---------------: | :-----------------------------------------: | :------------------------------------------------: | :-------------------------------: | +| [RTMPose-m](./rtmpose/wholebody_2d_keypoint/rtmpose-m_8xb64-270e_coco-wholebody-256x192.py) | 256x192 | 58.2 | 67.4 | 2.22 | 13.50 | 4.00 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco-wholebody_pt-aic-coco_270e-256x192-cd5e845c_20230123.pth) | +| [RTMPose-l](./rtmpose/wholebody_2d_keypoint/rtmpose-l_8xb64-270e_coco-wholebody-256x192.py) | 256x192 | 61.1 | 70.0 | 4.52 | 23.41 | 5.67 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-coco-wholebody_pt-aic-coco_270e-256x192-6f206314_20230124.pth) | +| [RTMPose-l](./rtmpose/wholebody_2d_keypoint/rtmpose-l_8xb32-270e_coco-wholebody-384x288.py) | 384x288 | 64.8 | 73.0 | 10.07 | 44.58 | 7.68 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-coco-wholebody_pt-aic-coco_270e-384x288-eaeb96c8_20230125.pth) | +| [RTMPose-x](./rtmpose/wholebody_2d_keypoint/rtmpose-x_8xb32-270e_coco-wholebody-384x288.py) | 384x288 | 65.3 | 73.3 | 18.1 | - | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-x_simcc-coco-wholebody_pt-body7_270e-384x288-401dfc90_20230629.pth) | + +### Animal 2d (17 Keypoints) + +- Keypoints are defined as [AP-10K](https://github.com/AlexTheBad/AP-10K/). For details please refer to the [meta info](/configs/_base_/datasets/ap10k.py). +- + +| Config | Input Size | AP
    (AP10K) | FLOPS
    (G) | ORT-Latency
    (ms)
    (i7-11700) | TRT-FP16-Latency
    (ms)
    (GTX 1660Ti) | Download | +| :----------------------------: | :--------: | :----------------: | :---------------: | :-----------------------------------------: | :------------------------------------------------: | :------------------------------: | +| [RTMPose-m](./rtmpose/animal_2d_keypoint/rtmpose-m_8xb64-210e_ap10k-256x256.py) | 256x256 | 72.2 | 2.57 | 14.157 | 2.404 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-ap10k_pt-aic-coco_210e-256x256-7a041aa1_20230206.pth) | + +### Face 2d (106 Keypoints) + +- Keypoints are defined as [LaPa](https://github.com/JDAI-CV/lapa-dataset). For details please refer to the [meta info](/configs/_base_/datasets/lapa.py). +- + +
    +Face6 + +- `Face6` and `*` denote model trained on 6 public datasets: + - [COCO-Wholebody-Face](https://github.com/jin-s13/COCO-WholeBody/) + - [WFLW](https://wywu.github.io/projects/LAB/WFLW.html) + - [300W](https://ibug.doc.ic.ac.uk/resources/300-W/) + - [COFW](http://www.vision.caltech.edu/xpburgos/ICCV13/) + - [Halpe](https://github.com/Fang-Haoshu/Halpe-FullBody/) + - [LaPa](https://github.com/JDAI-CV/lapa-dataset) + +| Config | Input Size | NME
    (LaPa) | FLOPS
    (G) | ORT-Latency
    (ms)
    (i7-11700) | TRT-FP16-Latency
    (ms)
    (GTX 1660Ti) | Download | +| :----------------------------: | :--------: | :----------------: | :---------------: | :-----------------------------------------: | :------------------------------------------------: | :------------------------------: | +| [RTMPose-t\*](./rtmpose/face_2d_keypoint/rtmpose-t_8xb256-120e_lapa-256x256.py) | 256x256 | 1.67 | 0.652 | - | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-t_simcc-face6_pt-in1k_120e-256x256-df79d9a5_20230529.pth) | +| [RTMPose-s\*](./rtmpose/face_2d_keypoint/rtmpose-s_8xb256-120e_lapa-256x256.py) | 256x256 | 1.59 | 1.119 | - | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-face6_pt-in1k_120e-256x256-d779fdef_20230529.pth) | +| [RTMPose-m\*](./rtmpose/face_2d_keypoint/rtmpose-m_8xb256-120e_lapa-256x256.py) | 256x256 | 1.44 | 2.852 | - | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-face6_pt-in1k_120e-256x256-72a37400_20230529.pth) | + +
    + +### Hand 2d (21 Keypoints) + +- Keypoints are defined as [COCO-WholeBody](https://github.com/jin-s13/COCO-WholeBody/). For details please refer to the [meta info](/configs/_base_/datasets/coco_wholebody_hand.py). +- + +| Detection Config | Input Size | Model AP
    (OneHand10K) | Flops
    (G) | ORT-Latency
    (ms)
    (i7-11700) | TRT-FP16-Latency
    (ms)
    (GTX 1660Ti) | Download | +| :---------------------------: | :--------: | :---------------------------: | :---------------: | :-----------------------------------------: | :------------------------------------------------: | :--------------------: | +| [RTMDet-nano
    (alpha version)](./rtmdet/hand/rtmdet_nano_320-8xb32_hand.py) | 320x320 | 76.0 | 0.31 | - | - | [Det Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmdet_nano_8xb32-300e_hand-267f9c8f.pth) | + +
    +Hand5 + +- `Hand5` and `*` denote model trained on 5 public datasets: + - [COCO-Wholebody-Hand](https://github.com/jin-s13/COCO-WholeBody/) + - [OneHand10K](https://www.yangangwang.com/papers/WANG-MCC-2018-10.html) + - [FreiHand2d](https://lmb.informatik.uni-freiburg.de/projects/freihand/) + - [RHD2d](https://lmb.informatik.uni-freiburg.de/resources/datasets/RenderedHandposeDataset.en.html) + - [Halpe](https://github.com/Fang-Haoshu/Halpe-FullBody/) + +| Config | Input Size | PCK@0.2
    (COCO-Wholebody-Hand) | PCK@0.2
    (Hand5) | AUC
    (Hand5) | FLOPS
    (G) | ORT-Latency
    (ms)
    (i7-11700) | TRT-FP16-Latency
    (ms)
    (GTX 1660Ti) | Download | +| :-------------------------------------------------------------------------------------------------------------------: | :--------: | :-----------------------------------: | :---------------------: | :-----------------: | :---------------: | :-----------------------------------------: | :------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------: | +| [RTMPose-m\*
    (alpha version)](./rtmpose/hand_2d_keypoint/rtmpose-m_8xb32-210e_coco-wholebody-hand-256x256.py) | 256x256 | 81.5 | 96.4 | 83.9 | 2.581 | - | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-hand5_pt-aic-coco_210e-256x256-74fb594_20230320.pth) | + +
    + +### Pretrained Models + +We provide the UDP pretraining configs of the CSPNeXt backbone. Find more details in the [pretrain_cspnext_udp folder](./rtmpose/pretrain_cspnext_udp/). + +
    +AIC+COCO + +| Model | Input Size | Params
    (M) | Flops
    (G) | AP
    (GT) | AR
    (GT) | Download | +| :----------: | :--------: | :----------------: | :---------------: | :-------------: | :-------------: | :---------------------------------------------------------------------------------------------------------------: | +| CSPNeXt-tiny | 256x192 | 6.03 | 1.43 | 65.5 | 68.9 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmpose/cspnext-tiny_udp-aic-coco_210e-256x192-cbed682d_20230130.pth) | +| CSPNeXt-s | 256x192 | 8.58 | 1.78 | 70.0 | 73.3 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmpose/cspnext-s_udp-aic-coco_210e-256x192-92f5a029_20230130.pth) | +| CSPNeXt-m | 256x192 | 17.53 | 3.05 | 74.8 | 77.7 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmpose/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth) | +| CSPNeXt-l | 256x192 | 32.44 | 5.32 | 77.2 | 79.9 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmpose/cspnext-l_udp-aic-coco_210e-256x192-273b7631_20230130.pth) | + +
    + +
    +Body8 + +- `*` denotes model trained on 7 public datasets: + - [AI Challenger](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#aic) + - [MS COCO](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#coco) + - [CrowdPose](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#crowdpose) + - [MPII](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#mpii) + - [sub-JHMDB](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#sub-jhmdb-dataset) + - [Halpe](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_wholebody_keypoint.html#halpe) + - [PoseTrack18](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#posetrack18) +- `Body8` denotes the addition of the [OCHuman](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#ochuman) dataset, in addition to the 7 datasets mentioned above, for evaluation. + +| Model | Input Size | Params
    (M) | Flops
    (G) | AP
    (COCO) | PCK@0.2
    (Body8) | AUC
    (Body8) | Download | +| :------------: | :--------: | :----------------: | :---------------: | :---------------: | :---------------------: | :-----------------: | :--------------------------------------------------------------------------------: | +| CSPNeXt-tiny\* | 256x192 | 6.03 | 1.43 | 65.9 | 96.34 | 63.80 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-tiny_udp-body7_210e-256x192-a3775292_20230504.pth) | +| CSPNeXt-s\* | 256x192 | 8.58 | 1.78 | 68.7 | 96.59 | 64.92 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-s_udp-body7_210e-256x192-8c9ccbdb_20230504.pth) | +| CSPNeXt-m\* | 256x192 | 17.53 | 3.05 | 73.7 | 97.42 | 68.19 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-body7_210e-256x192-e0c9327b_20230504.pth) | +| CSPNeXt-l\* | 256x192 | 32.44 | 5.32 | 75.7 | 97.76 | 69.57 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-l_udp-body7_210e-256x192-5e9558ef_20230504.pth) | +| CSPNeXt-m\* | 384x288 | 17.53 | 6.86 | 75.8 | 97.60 | 70.18 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-body7_210e-384x288-b9bc2b57_20230504.pth) | +| CSPNeXt-l\* | 384x288 | 32.44 | 11.96 | 77.2 | 97.89 | 71.23 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-l_udp-body7_210e-384x288-b15bc30d_20230504.pth) | +| CSPNeXt-x\* | 384x288 | 54.92 | 19.96 | 78.1 | 98.00 | 71.79 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-x_udp-body7_210e-384x288-d28b58e6_20230529.pth) | + +
    + +#### ImageNet + +We also provide the ImageNet classification pre-trained weights of the CSPNeXt backbone. Find more details in [RTMDet](https://github.com/open-mmlab/mmdetection/blob/latest/configs/rtmdet/README.md#classification). + +| Model | Input Size | Params
    (M) | Flops
    (G) | Top-1 (%) | Top-5 (%) | Download | +| :----------: | :--------: | :----------------: | :---------------: | :-------: | :-------: | :---------------------------------------------------------------------------------------------------------------------------: | +| CSPNeXt-tiny | 224x224 | 2.73 | 0.34 | 69.44 | 89.45 | [Model](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-tiny_imagenet_600e-3a2dd350.pth) | +| CSPNeXt-s | 224x224 | 4.89 | 0.66 | 74.41 | 92.23 | [Model](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-s_imagenet_600e-ea671761.pth) | +| CSPNeXt-m | 224x224 | 13.05 | 1.93 | 79.27 | 94.79 | [Model](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-m_8xb256-rsb-a1-600e_in1k-ecb3bbd9.pth) | +| CSPNeXt-l | 224x224 | 27.16 | 4.19 | 81.30 | 95.62 | [Model](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-l_8xb256-rsb-a1-600e_in1k-6a760974.pth) | +| CSPNeXt-x | 224x224 | 48.85 | 7.76 | 82.10 | 95.69 | [Model](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-x_8xb256-rsb-a1-600e_in1k-b3f78edd.pth) | + +## 👀 Visualization [🔝](#-table-of-contents) + +
    + + +
    + +## 😎 Get Started [🔝](#-table-of-contents) + +We provide two appoaches to try RTMPose: + +- MMPose demo scripts +- Pre-compiled MMDeploy SDK (Recommend, 6-10 times faster) + +### MMPose demo scripts + +MMPose provides demo scripts to conduct [inference with existing models](https://mmpose.readthedocs.io/en/latest/user_guides/inference.html). + +**Note:** + +- Inferencing with Pytorch can not reach the maximum speed of RTMPose, just for verification. +- Model file can be either a local path or a download link + +```shell +# go to the mmpose folder +cd ${PATH_TO_MMPOSE} + +# inference with rtmdet +python demo/topdown_demo_with_mmdet.py \ + projects/rtmpose/rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_nano_8xb32-100e_coco-obj365-person-05d8511e.pth \ + projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.pth \ + --input {YOUR_TEST_IMG_or_VIDEO} \ + --show + +# inference with webcam +python demo/topdown_demo_with_mmdet.py \ + projects/rtmpose/rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_nano_8xb32-100e_coco-obj365-person-05d8511e.pth \ + projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.pth \ + --input webcam \ + --show +``` + +Result is as follows: + +![topdown_inference_with_rtmdet](https://user-images.githubusercontent.com/13503330/220005020-06bdf37f-6817-4681-a2c8-9dd55e4fbf1e.png) + +### Pre-compiled MMDeploy SDK (Recommended) + +MMDeploy provides a precompiled SDK for Pipeline reasoning on RTMPose projects, where the model used for reasoning is the SDK version. + +- All models must by exported by `tools/deploy.py` before PoseTracker can be used for inference. +- For the tutorial of exporting the SDK version model, see [SDK Reasoning](#%EF%B8%8F-step3-inference-with-sdk), and for detailed parameter settings of inference, see [Pipeline Reasoning](#-step4-pipeline-inference). +- Exported SDK models (ONNX, TRT, ncnn, etc.) can be downloaded from [OpenMMLab Deploee](https://platform.openmmlab.com/deploee). +- You can also convert `.pth` models into SDK [online](https://platform.openmmlab.com/deploee/task-convert-list). + +#### Linux + +Env Requirements: + +- GCC >= 7.5 +- cmake >= 3.20 + +##### Python Inference + +1. Install mmdeploy_runtime or mmdeploy_runtime_gpu + +```shell +# for onnxruntime +pip install mmdeploy-runtime + +# for onnxruntime-gpu / tensorrt +pip install mmdeploy-runtime-gpu +``` + +2. Download Pre-compiled files. + +```shell +# onnxruntime +# for ubuntu +wget -c https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0/mmdeploy-1.0.0-linux-x86_64-cxx11abi.tar.gz +# unzip then add third party runtime libraries to the PATH + +# for centos7 and lower +wget -c https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0/mmdeploy-1.0.0-linux-x86_64.tar.gz +# unzip then add third party runtime libraries to the PATH + +# onnxruntime-gpu / tensorrt +# for ubuntu +wget -c https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0/mmdeploy-1.0.0-linux-x86_64-cxx11abi-cuda11.3.tar.gz +# unzip then add third party runtime libraries to the PATH + +# for centos7 and lower +wget -c https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0/mmdeploy-1.0.0-linux-x86_64-cuda11.3.tar.gz +# unzip then add third party runtime libraries to the PATH +``` + +3. Download the sdk models and unzip to `./example/python`. (If you need other models, please export sdk models refer to [SDK Reasoning](#%EF%B8%8F-step3-inference-with-sdk)) + +```shell +# rtmdet-nano + rtmpose-m for cpu sdk +wget https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmpose-cpu.zip + +unzip rtmpose-cpu.zip +``` + +4. Inference with `pose_tracker.py`: + +```shell +# go to ./example/python + +# Please pass the folder of the model, not the model file +# Format: +# python pose_tracker.py cpu {det work-dir} {pose work-dir} {your_video.mp4} + +# Example: +python pose_tracker.py cpu rtmpose-ort/rtmdet-nano/ rtmpose-ort/rtmpose-m/ your_video.mp4 + +# webcam +python pose_tracker.py cpu rtmpose-ort/rtmdet-nano/ rtmpose-ort/rtmpose-m/ 0 +``` + +##### ONNX + +```shell +# Download pre-compiled files +wget https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0/mmdeploy-1.0.0-linux-x86_64-cxx11abi.tar.gz + +# Unzip files +tar -xzvf mmdeploy-1.0.0-linux-x86_64-cxx11abi.tar.gz + +# Go to the sdk folder +cd mmdeploy-1.0.0-linux-x86_64-cxx11abi + +# Init environment +source set_env.sh + +# If opencv 3+ is not installed on your system, execute the following command. +# If it is installed, skip this command +bash install_opencv.sh + +# Compile executable programs +bash build_sdk.sh + +# Inference for an image +# Please pass the folder of the model, not the model file +./bin/det_pose rtmpose-ort/rtmdet-nano/ rtmpose-ort/rtmpose-m/ your_img.jpg --device cpu + +# Inference for a video +# Please pass the folder of the model, not the model file +./bin/pose_tracker rtmpose-ort/rtmdet-nano/ rtmpose-ort/rtmpose-m/ your_video.mp4 --device cpu + +# Inference using webcam +# Please pass the folder of the model, not the model file +./bin/pose_tracker rtmpose-ort/rtmdet-nano/ rtmpose-ort/rtmpose-m/ 0 --device cpu +``` + +##### TensorRT + +```shell +# Download pre-compiled files +wget https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0/mmdeploy-1.0.0-linux-x86_64-cxx11abi-cuda11.3.tar.gz + +# Unzip files +tar -xzvf mmdeploy-1.0.0-linux-x86_64-cxx11abi-cuda11.3.tar.gz + +# Go to the sdk folder +cd mmdeploy-1.0.0-linux-x86_64-cxx11abi-cuda11.3 + +# Init environment +source set_env.sh + +# If opencv 3+ is not installed on your system, execute the following command. +# If it is installed, skip this command +bash install_opencv.sh + +# Compile executable programs +bash build_sdk.sh + +# Inference for an image +# Please pass the folder of the model, not the model file +./bin/det_pose rtmpose-ort/rtmdet-nano/ rtmpose-ort/rtmpose-m/ your_img.jpg --device cuda + +# Inference for a video +# Please pass the folder of the model, not the model file +./bin/pose_tracker rtmpose-ort/rtmdet-nano/ rtmpose-ort/rtmpose-m/ your_video.mp4 --device cuda + +# Inference using webcam +# Please pass the folder of the model, not the model file +./bin/pose_tracker rtmpose-ort/rtmdet-nano/ rtmpose-ort/rtmpose-m/ 0 --device cuda +``` + +For details, see [Pipeline Inference](#-step4-pipeline-inference). + +#### Windows + +##### Python Inference + +1. Install mmdeploy_runtime or mmdeploy_runtime_gpu + +```shell +# for onnxruntime +pip install mmdeploy-runtime +# download [sdk](https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0/mmdeploy-1.0.0-windows-amd64.zip) add third party runtime libraries to the PATH + +# for onnxruntime-gpu / tensorrt +pip install mmdeploy-runtime-gpu +# download [sdk](https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0/mmdeploy-1.0.0-windows-amd64-cuda11.3.zip) add third party runtime libraries to the PATH +``` + +2. Download the sdk models and unzip to `./example/python`. (If you need other models, please export sdk models refer to [SDK Reasoning](#%EF%B8%8F-step3-inference-with-sdk)) + +```shell +# rtmdet-nano + rtmpose-m for cpu sdk +wget https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmpose-cpu.zip + +unzip rtmpose-cpu.zip +``` + +3. Inference with `pose_tracker.py`: + +```shell +# go to ./example/python +# Please pass the folder of the model, not the model file +python pose_tracker.py cpu rtmpose-ort/rtmdet-nano/ rtmpose-ort/rtmpose-m/ your_video.mp4 + +# Inference using webcam +# Please pass the folder of the model, not the model file +python pose_tracker.py cpu rtmpose-ort/rtmdet-nano/ rtmpose-ort/rtmpose-m/ 0 +``` + +##### Executable Inference + +1. Install [CMake](https://cmake.org/download/). +2. Download the [pre-compiled SDK](https://github.com/open-mmlab/mmdeploy/releases). +3. Unzip the SDK and go to the `sdk` folder. +4. open windows powerShell with administrator privileges + +```shell +set-ExecutionPolicy RemoteSigned +``` + +5. Install OpenCV: + +```shell +# in sdk folder: +.\install_opencv.ps1 +``` + +6. Set environment variables: + +```shell +# in sdk folder: +. .\set_env.ps1 +``` + +7. Compile the SDK: + +```shell +# in sdk folder: +# (if you installed opencv by .\install_opencv.ps1) +.\build_sdk.ps1 +# (if you installed opencv yourself) +.\build_sdk.ps1 "path/to/folder/of/OpenCVConfig.cmake" +``` + +8. the executable will be generated in: + +```shell +example\cpp\build\Release +``` + +### MMPose demo scripts + +MMPose provides demo scripts to conduct [inference with existing models](https://mmpose.readthedocs.io/en/latest/user_guides/inference.html). + +**Note:** + +- Inferencing with Pytorch can not reach the maximum speed of RTMPose, just for verification. + +```shell +# go to the mmpose folder +cd ${PATH_TO_MMPOSE} + +# inference with rtmdet +python demo/topdown_demo_with_mmdet.py \ + projects/rtmpose/rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py \ + {PATH_TO_CHECKPOINT}/rtmdet_nano_8xb32-100e_coco-obj365-person-05d8511e.pth \ + projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py \ + {PATH_TO_CHECKPOINT}/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.pth \ + --input {YOUR_TEST_IMG_or_VIDEO} \ + --show + +# inference with webcam +python demo/topdown_demo_with_mmdet.py \ + projects/rtmpose/rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py \ + {PATH_TO_CHECKPOINT}/rtmdet_nano_8xb32-100e_coco-obj365-person-05d8511e.pth \ + projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py \ + {PATH_TO_CHECKPOINT}/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.pth \ + --input webcam \ + --show +``` + +Result is as follows: + +![topdown_inference_with_rtmdet](https://user-images.githubusercontent.com/13503330/220005020-06bdf37f-6817-4681-a2c8-9dd55e4fbf1e.png) + +## 👨‍🏫 How to Train [🔝](#-table-of-contents) + +Please refer to [Train and Test](https://mmpose.readthedocs.io/en/latest/user_guides/train_and_test.html). + +**Tips**: + +- Please accordinally reduce `batch_size` and `base_lr` when your dataset is small. +- Guidelines to choose a model + - m: Recommended and Preferred Use + - t/s: For mobile devices with extremely low computing power, or scenarios with stringent inference speed requirements + - l: Suitable for scenarios with strong computing power and not sensitive to speed + +## 🏗️ How to Deploy [🔝](#-table-of-contents) + +Here is a basic example of deploy RTMPose with [MMDeploy](https://github.com/open-mmlab/mmdeploy/tree/main). + +- Exported SDK models (ONNX, TRT, ncnn, etc.) can be downloaded from [OpenMMLab Deploee](https://platform.openmmlab.com/deploee). +- You can also convert `.pth` models into SDK [online](https://platform.openmmlab.com/deploee/task-convert-list). + +### 🧩 Step1. Install MMDeploy + +Before starting the deployment, please make sure you install MMPose and MMDeploy correctly. + +- Install MMPose, please refer to the [MMPose installation guide](https://mmpose.readthedocs.io/en/latest/installation.html). +- Install MMDeploy, please refer to the [MMDeploy installation guide](https://mmdeploy.readthedocs.io/en/latest/get_started.html#installation). + +Depending on the deployment backend, some backends require compilation of custom operators, so please refer to the corresponding document to ensure the environment is built correctly according to your needs: + +- [ONNX RUNTIME SUPPORT](https://mmdeploy.readthedocs.io/en/latest/05-supported-backends/onnxruntime.html) +- [TENSORRT SUPPORT](https://mmdeploy.readthedocs.io/en/latest/05-supported-backends/tensorrt.html) +- [OPENVINO SUPPORT](https://mmdeploy.readthedocs.io/en/latest/05-supported-backends/openvino.html) +- [More](https://github.com/open-mmlab/mmdeploy/tree/main/docs/en/05-supported-backends) + +### 🛠️ Step2. Convert Model + +After the installation, you can enjoy the model deployment journey starting from converting PyTorch model to backend model by running MMDeploy's `tools/deploy.py`. + +The detailed model conversion tutorial please refer to the [MMDeploy document](https://mmdeploy.readthedocs.io/en/latest/02-how-to-run/convert_model.html). Here we only give the example of converting RTMPose. + +Here we take converting RTMDet-nano and RTMPose-m to ONNX/TensorRT as an example. + +- If you only want to use ONNX, please use: + - [`detection_onnxruntime_static.py`](https://github.com/open-mmlab/mmdeploy/blob/main/configs/mmdet/detection/detection_onnxruntime_static.py) for RTMDet. + - [`pose-detection_simcc_onnxruntime_dynamic.py`](https://github.com/open-mmlab/mmdeploy/blob/main/configs/mmpose/pose-detection_simcc_onnxruntime_dynamic.py) for RTMPose. +- If you want to use TensorRT, please use: + - [`detection_tensorrt_static-320x320.py`](https://github.com/open-mmlab/mmdeploy/blob/main/configs/mmdet/detection/detection_tensorrt_static-320x320.py) for RTMDet. + - [`pose-detection_simcc_tensorrt_dynamic-256x192.py`](https://github.com/open-mmlab/mmdeploy/blob/main/configs/mmpose/pose-detection_simcc_tensorrt_dynamic-256x192.py) for RTMPose. + +If you want to customize the settings in the deployment config for your requirements, please refer to [MMDeploy config tutorial](https://mmdeploy.readthedocs.io/en/latest/02-how-to-run/write_config.html). + +In this tutorial, we organize files as follows: + +```shell +|----mmdeploy +|----mmdetection +|----mmpose +``` + +#### ONNX + +```shell +# go to the mmdeploy folder +cd ${PATH_TO_MMDEPLOY} + +# run the command to convert RTMDet +# Model file can be either a local path or a download link +python tools/deploy.py \ + configs/mmdet/detection/detection_onnxruntime_static.py \ + ../mmpose/projects/rtmpose/rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_nano_8xb32-100e_coco-obj365-person-05d8511e.pth \ + demo/resources/human-pose.jpg \ + --work-dir rtmpose-ort/rtmdet-nano \ + --device cpu \ + --show \ + --dump-info # dump sdk info + +# run the command to convert RTMPose +# Model file can be either a local path or a download link +python tools/deploy.py \ + configs/mmpose/pose-detection_simcc_onnxruntime_dynamic.py \ + ../mmpose/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.pth \ + demo/resources/human-pose.jpg \ + --work-dir rtmpose-ort/rtmpose-m \ + --device cpu \ + --show \ + --dump-info # dump sdk info +``` + +The converted model file is `{work-dir}/end2end.onnx` by defaults. + +#### TensorRT + +```shell +# go to the mmdeploy folder +cd ${PATH_TO_MMDEPLOY} + +# run the command to convert RTMDet +# Model file can be either a local path or a download link +python tools/deploy.py \ + configs/mmdet/detection/detection_tensorrt_static-320x320.py \ + ../mmpose/projects/rtmpose/rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_nano_8xb32-100e_coco-obj365-person-05d8511e.pth \ + demo/resources/human-pose.jpg \ + --work-dir rtmpose-trt/rtmdet-nano \ + --device cuda:0 \ + --show \ + --dump-info # dump sdk info + +# run the command to convert RTMPose +# Model file can be either a local path or a download link +python tools/deploy.py \ + configs/mmpose/pose-detection_simcc_tensorrt_dynamic-256x192.py \ + ../mmpose/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.pth \ + demo/resources/human-pose.jpg \ + --work-dir rtmpose-trt/rtmpose-m \ + --device cuda:0 \ + --show \ + --dump-info # dump sdk info +``` + +The converted model file is `{work-dir}/end2end.engine` by defaults. + +🎊 If the script runs successfully, you will see the following files: + +![convert_models](https://user-images.githubusercontent.com/13503330/217726963-7815dd01-561a-4605-b0c6-07b6fe1956c3.png) + +#### Advanced Setting + +To convert the model with TRT-FP16, you can enable the fp16 mode in your deploy config: + +```Python +# in MMDeploy config +backend_config = dict( + type='tensorrt', + common_config=dict( + fp16_mode=True # enable fp16 + )) +``` + +### 🕹️ Step3. Inference with SDK + +We provide both Python and C++ inference API with MMDeploy SDK. + +To use SDK, you need to dump the required info during converting the model. Just add --dump-info to the model conversion command. + +```shell +# RTMDet +# Model file can be either a local path or a download link +python tools/deploy.py \ + configs/mmdet/detection/detection_onnxruntime_dynamic.py \ + ../mmpose/projects/rtmpose/rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_nano_8xb32-100e_coco-obj365-person-05d8511e.pth \ + demo/resources/human-pose.jpg \ + --work-dir rtmpose-ort/rtmdet-nano \ + --device cpu \ + --show \ + --dump-info # dump sdk info + +# RTMPose +# Model file can be either a local path or a download link +python tools/deploy.py \ + configs/mmpose/pose-detection_simcc_onnxruntime_dynamic.py \ + ../mmpose/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.pth \ + demo/resources/human-pose.jpg \ + --work-dir rtmpose-ort/rtmpose-m \ + --device cpu \ + --show \ + --dump-info # dump sdk info +``` + +After running the command, it will dump 3 json files additionally for the SDK: + +```shell +|----{work-dir} + |----end2end.onnx # ONNX model + |----end2end.engine # TensorRT engine file + + |----pipeline.json # + |----deploy.json # json files for the SDK + |----detail.json # +``` + +#### Python API + +Here is a basic example of SDK Python API: + +```Python +# Copyright (c) OpenMMLab. All rights reserved. +import argparse + +import cv2 +import numpy as np +from mmdeploy_runtime import PoseDetector + + +def parse_args(): + parser = argparse.ArgumentParser( + description='show how to use sdk python api') + parser.add_argument('device_name', help='name of device, cuda or cpu') + parser.add_argument( + 'model_path', + help='path of mmdeploy SDK model dumped by model converter') + parser.add_argument('image_path', help='path of an image') + parser.add_argument( + '--bbox', + default=None, + nargs='+', + type=int, + help='bounding box of an object in format (x, y, w, h)') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + + img = cv2.imread(args.image_path) + + detector = PoseDetector( + model_path=args.model_path, device_name=args.device_name, device_id=0) + + if args.bbox is None: + result = detector(img) + else: + # converter (x, y, w, h) -> (left, top, right, bottom) + print(args.bbox) + bbox = np.array(args.bbox, dtype=int) + bbox[2:] += bbox[:2] + result = detector(img, bbox) + print(result) + + _, point_num, _ = result.shape + points = result[:, :, :2].reshape(point_num, 2) + for [x, y] in points.astype(int): + cv2.circle(img, (x, y), 1, (0, 255, 0), 2) + + cv2.imwrite('output_pose.png', img) + + +if __name__ == '__main__': + main() +``` + +#### C++ API + +Here is a basic example of SDK C++ API: + +```C++ +#include "mmdeploy/detector.hpp" + +#include "opencv2/imgcodecs/imgcodecs.hpp" +#include "utils/argparse.h" +#include "utils/visualize.h" + +DEFINE_ARG_string(model, "Model path"); +DEFINE_ARG_string(image, "Input image path"); +DEFINE_string(device, "cpu", R"(Device name, e.g. "cpu", "cuda")"); +DEFINE_string(output, "detector_output.jpg", "Output image path"); + +DEFINE_double(det_thr, .5, "Detection score threshold"); + +int main(int argc, char* argv[]) { + if (!utils::ParseArguments(argc, argv)) { + return -1; + } + + cv::Mat img = cv::imread(ARGS_image); + if (img.empty()) { + fprintf(stderr, "failed to load image: %s\n", ARGS_image.c_str()); + return -1; + } + + // construct a detector instance + mmdeploy::Detector detector(mmdeploy::Model{ARGS_model}, mmdeploy::Device{FLAGS_device}); + + // apply the detector, the result is an array-like class holding references to + // `mmdeploy_detection_t`, will be released automatically on destruction + mmdeploy::Detector::Result dets = detector.Apply(img); + + // visualize + utils::Visualize v; + auto sess = v.get_session(img); + int count = 0; + for (const mmdeploy_detection_t& det : dets) { + if (det.score > FLAGS_det_thr) { // filter bboxes + sess.add_det(det.bbox, det.label_id, det.score, det.mask, count++); + } + } + + if (!FLAGS_output.empty()) { + cv::imwrite(FLAGS_output, sess.get()); + } + + return 0; +} +``` + +To build C++ example, please add MMDeploy package in your CMake project as following: + +```CMake +find_package(MMDeploy REQUIRED) +target_link_libraries(${name} PRIVATE mmdeploy ${OpenCV_LIBS}) +``` + +#### Other languages + +- [C# API Examples](https://github.com/open-mmlab/mmdeploy/tree/main/demo/csharp) +- [JAVA API Examples](https://github.com/open-mmlab/mmdeploy/tree/main/demo/java) + +## 🚀 Step4. Pipeline Inference + +### Inference for images + +If the user has MMDeploy compiled correctly, you will see the `det_pose` executable under the `mmdeploy/build/bin/`. + +```shell +# go to the mmdeploy folder +cd ${PATH_TO_MMDEPLOY}/build/bin/ + +# inference for an image +./det_pose rtmpose-ort/rtmdet-nano/ rtmpose-ort/rtmpose-m/ your_img.jpg --device cpu + +required arguments: + det_model Object detection model path [string] + pose_model Pose estimation model path [string] + image Input image path [string] + +optional arguments: + --device Device name, e.g. "cpu", "cuda" [string = "cpu"] + --output Output image path [string = "det_pose_output.jpg"] + --skeleton Path to skeleton data or name of predefined skeletons: + "coco" [string = "coco", "coco-wholoebody"] + --det_label Detection label use for pose estimation [int32 = 0] + (0 refers to 'person' in coco) + --det_thr Detection score threshold [double = 0.5] + --det_min_bbox_size Detection minimum bbox size [double = -1] + --pose_thr Pose key-point threshold [double = 0] +``` + +#### API Example + +- [`det_pose.py`](https://github.com/open-mmlab/mmdeploy/blob/main/demo/python/det_pose.py) +- [`det_pose.cxx`](https://github.com/open-mmlab/mmdeploy/blob/main/demo/csrc/cpp/det_pose.cxx) + +### Inference for a video + +If the user has MMDeploy compiled correctly, you will see the `pose_tracker` executable under the `mmdeploy/build/bin/`. + +- pass `0` to `input` can inference from a webcam + +```shell +# go to the mmdeploy folder +cd ${PATH_TO_MMDEPLOY}/build/bin/ + +# inference for a video +./pose_tracker rtmpose-ort/rtmdet-nano/ rtmpose-ort/rtmpose-m/ your_video.mp4 --device cpu + +required arguments: + det_model Object detection model path [string] + pose_model Pose estimation model path [string] + input Input video path or camera index [string] + +optional arguments: + --device Device name, e.g. "cpu", "cuda" [string = "cpu"] + --output Output video path or format string [string = ""] + --output_size Long-edge of output frames [int32 = 0] + --flip Set to 1 for flipping the input horizontally [int32 = 0] + --show Delay passed to `cv::waitKey` when using `cv::imshow`; + -1: disable [int32 = 1] + --skeleton Path to skeleton data or name of predefined skeletons: + "coco", "coco-wholebody" [string = "coco"] + --background Output background, "default": original image, "black": + black background [string = "default"] + --det_interval Detection interval [int32 = 1] + --det_label Detection label use for pose estimation [int32 = 0] + (0 refers to 'person' in coco) + --det_thr Detection score threshold [double = 0.5] + --det_min_bbox_size Detection minimum bbox size [double = -1] + --det_nms_thr NMS IOU threshold for merging detected bboxes and + bboxes from tracked targets [double = 0.7] + --pose_max_num_bboxes Max number of bboxes used for pose estimation per frame + [int32 = -1] + --pose_kpt_thr Threshold for visible key-points [double = 0.5] + --pose_min_keypoints Min number of key-points for valid poses, -1 indicates + ceil(n_kpts/2) [int32 = -1] + --pose_bbox_scale Scale for expanding key-points to bbox [double = 1.25] + --pose_min_bbox_size Min pose bbox size, tracks with bbox size smaller than + the threshold will be dropped [double = -1] + --pose_nms_thr NMS OKS/IOU threshold for suppressing overlapped poses, + useful when multiple pose estimations collapse to the + same target [double = 0.5] + --track_iou_thr IOU threshold for associating missing tracks + [double = 0.4] + --track_max_missing Max number of missing frames before a missing tracks is + removed [int32 = 10] +``` + +#### API Example + +- [`pose_tracker.py`](https://github.com/open-mmlab/mmdeploy/blob/main/demo/python/pose_tracker.py) +- [`pose_tracker.cxx`](https://github.com/open-mmlab/mmdeploy/blob/main/demo/csrc/cpp/pose_tracker.cxx) + +## 📚 Common Usage [🔝](#-table-of-contents) + +### 🚀 Inference Speed Test [🔝](#-table-of-contents) + +If you need to test the inference speed of the model under the deployment framework, MMDeploy provides a convenient `tools/profiler.py` script. + +The user needs to prepare a folder for the test images `./test_images`, the profiler will randomly read images from this directory for the model speed test. + +```shell +python tools/profiler.py \ + configs/mmpose/pose-detection_simcc_onnxruntime_dynamic.py \ + {RTMPOSE_PROJECT}/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py \ + ../test_images \ + --model {WORK_DIR}/end2end.onnx \ + --shape 256x192 \ + --device cpu \ + --warmup 50 \ + --num-iter 200 +``` + +The result is as follows: + +```shell +01/30 15:06:35 - mmengine - INFO - [onnxruntime]-70 times per count: 8.73 ms, 114.50 FPS +01/30 15:06:36 - mmengine - INFO - [onnxruntime]-90 times per count: 9.05 ms, 110.48 FPS +01/30 15:06:37 - mmengine - INFO - [onnxruntime]-110 times per count: 9.87 ms, 101.32 FPS +01/30 15:06:37 - mmengine - INFO - [onnxruntime]-130 times per count: 9.99 ms, 100.10 FPS +01/30 15:06:38 - mmengine - INFO - [onnxruntime]-150 times per count: 10.39 ms, 96.29 FPS +01/30 15:06:39 - mmengine - INFO - [onnxruntime]-170 times per count: 10.77 ms, 92.86 FPS +01/30 15:06:40 - mmengine - INFO - [onnxruntime]-190 times per count: 10.98 ms, 91.05 FPS +01/30 15:06:40 - mmengine - INFO - [onnxruntime]-210 times per count: 11.19 ms, 89.33 FPS +01/30 15:06:41 - mmengine - INFO - [onnxruntime]-230 times per count: 11.16 ms, 89.58 FPS +01/30 15:06:42 - mmengine - INFO - [onnxruntime]-250 times per count: 11.06 ms, 90.41 FPS +----- Settings: ++------------+---------+ +| batch size | 1 | +| shape | 256x192 | +| iterations | 200 | +| warmup | 50 | ++------------+---------+ +----- Results: ++--------+------------+---------+ +| Stats | Latency/ms | FPS | ++--------+------------+---------+ +| Mean | 11.060 | 90.412 | +| Median | 11.852 | 84.375 | +| Min | 7.812 | 128.007 | +| Max | 13.690 | 73.044 | ++--------+------------+---------+ +``` + +If you want to learn more details of profiler, you can refer to the [Profiler Docs](https://mmdeploy.readthedocs.io/en/latest/02-how-to-run/useful_tools.html#profiler). + +### 📊 Model Test [🔝](#-table-of-contents) + +If you need to test the inference accuracy of the model on the deployment backend, MMDeploy provides a convenient `tools/test.py` script. + +```shell +python tools/test.py \ + configs/mmpose/pose-detection_simcc_onnxruntime_dynamic.py \ + {RTMPOSE_PROJECT}/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py \ + --model {PATH_TO_MODEL}/rtmpose_m.pth \ + --device cpu +``` + +You can also refer to [MMDeploy Docs](https://github.com/open-mmlab/mmdeploy/blob/main/docs/en/02-how-to-run/profile_model.md) for more details. + +## 📜 Citation [🔝](#-table-of-contents) + +If you find RTMPose useful in your research, please consider cite: + +```bibtex +@misc{https://doi.org/10.48550/arxiv.2303.07399, + doi = {10.48550/ARXIV.2303.07399}, + url = {https://arxiv.org/abs/2303.07399}, + author = {Jiang, Tao and Lu, Peng and Zhang, Li and Ma, Ningsheng and Han, Rui and Lyu, Chengqi and Li, Yining and Chen, Kai}, + keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences}, + title = {RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose}, + publisher = {arXiv}, + year = {2023}, + copyright = {Creative Commons Attribution 4.0 International} +} + +@misc{mmpose2020, + title={OpenMMLab Pose Estimation Toolbox and Benchmark}, + author={MMPose Contributors}, + howpublished = {\url{https://github.com/open-mmlab/mmpose}}, + year={2020} +} +``` diff --git a/projects/rtmpose/README_CN.md b/projects/rtmpose/README_CN.md index 30bddf9ecd..1d2bb86805 100644 --- a/projects/rtmpose/README_CN.md +++ b/projects/rtmpose/README_CN.md @@ -1,1146 +1,1146 @@ -
    - -
    - -# RTMPose: Real-Time Multi-Person Pose Estimation toolkit based on MMPose - -> [RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose](https://arxiv.org/abs/2303.07399) - -
    - -[English](README.md) | 简体中文 - -
    - -______________________________________________________________________ - -## Abstract - -近年来,2D 姿态估计的研究在公开数据集上取得了出色的成绩,但是它在工业界的应用仍然受到笨重的模型参数和高推理延迟的影响。为了让前沿姿态估计算法在工业界落地,我们通过实验研究了多人姿态估计算法的五个方面:范式、骨干网络、定位算法、训练策略和部署推理,基于 MMPose 提出了一个高性能的实时多人姿态估计框架 **RTMPose**。我们的 RTMPose-m 模型在 COCO 上取得 **75.8%AP**,在 Intel i7-11700 CPU 上达到 **90+FPS**,在 NVIDIA GTX 1660 Ti GPU 上达到 **430+FPS**。我们同样验证了在算力有限的设备上做实时姿态估计,RTMPose-s 在移动端骁龙865芯片上可以达到 **COCO 72.2%AP**,**70+FPS**。在 MMDeploy 的帮助下,我们的项目支持 CPU、GPU、Jetson、移动端等多种部署环境。 - -![rtmpose_intro](https://user-images.githubusercontent.com/13503330/219269619-935499e5-bdd9-49ea-8104-3c7796dbd862.png) - -______________________________________________________________________ - -## 📄 Table of Contents - -- [🥳 🚀 最新进展](#--最新进展-) -- [📖 简介](#-简介-) -- [🙌 社区共建](#-社区共建-) -- [⚡ Pipeline 性能](#-pipeline-性能-) -- [📊 模型库](#-模型库-) -- [👀 可视化](#-可视化-) -- [😎 快速尝试](#-快速尝试-) -- [👨‍🏫 模型训练](#-模型训练-) -- [🏗️ 部署教程](#️-部署教程-) -- [📚 常用功能](#️-常用功能-) - - [🚀 模型测速](#-模型测速-) - - [📊 精度验证](#-精度验证-) -- [📜 引用](#-引用-) - -## 🥳 最新进展 [🔝](#-table-of-contents) - -- 2023 年 6 月: - - 发布混合数据集训练的 26 点 Body 模型。 -- 2023 年 5 月: - - 添加 [代码示例](./examples/) - - 发布混合数据集训练的 Hand, Face, Body 模型。 -- 2023 年 3 月:发布 RTMPose。RTMPose-m 取得 COCO 验证集 75.8 mAP,推理速度达到 430+ FPS 。 - -## 📖 简介 [🔝](#-table-of-contents) - -
    - -
    - -
    - -
    -
    - -
    - -### ✨ 主要特性 - -- 🚀 **高精度,低延迟** - - | Model | AP(COCO) | CPU-FPS | GPU-FPS | - | :---: | :------: | :-----: | :-----: | - | t | 68.5 | 300+ | 940+ | - | s | 72.2 | 200+ | 710+ | - | m | 75.8 | 90+ | 430+ | - | l | 76.5 | 50+ | 280+ | - -- 🛠️ **易部署** - - - 详细的部署代码教程,手把手教你模型部署 - - MMDeploy 助力 - - 支持多种部署后端 - - ONNX - - TensorRT - - ncnn - - OpenVINO 等 - - 支持多种平台 - - Linux - - Windows - - NVIDIA Jetson - - ARM 等 - -- 🏗️ **为实际业务设计** - - - 提供多种 Pipeline 推理接口和 SDK - - Python - - C++ - - C# - - JAVA 等 - -## 🙌 社区共建 [🔝](#-table-of-contents) - -RTMPose 是一个长期优化迭代的项目,致力于业务场景下的高性能实时姿态估计算法的训练、优化和部署,因此我们十分期待来自社区的力量,欢迎分享不同业务场景中 RTMPose 的训练配置与技巧,助力更多的社区用户! - -✨ ✨ ✨ - -- **如果你是 RTMPose 的新用户,我们热切希望你能参与[这份问卷](https://uua478.fanqier.cn/f/xxmynrki)/[Google Questionnaire](https://docs.google.com/forms/d/e/1FAIpQLSfzwWr3eNlDzhU98qzk2Eph44Zio6hi5r0iSwfO9wSARkHdWg/viewform?usp=sf_link),这对于我们的工作非常重要!** - -✨ ✨ ✨ - -欢迎加入我们的社区交流群获得更多帮助: - -- 微信用户群 - -
    - - -- Discord Group: - - 🙌 https://discord.gg/raweFPmdzG 🙌 - -## ⚡ Pipeline 性能 [🔝](#-table-of-contents) - -**说明** - -- Pipeline 速度测试时开启了隔帧检测策略,默认检测间隔为 5 帧。 -- 环境配置: - - torch >= 1.7.1 - - onnxruntime 1.12.1 - - TensorRT 8.4.3.1 - - cuDNN 8.3.2 - - CUDA 11.3 - -| Detection Config | Pose Config | Input Size
    (Det/Pose) | Model AP
    (COCO) | Pipeline AP
    (COCO) | Params (M)
    (Det/Pose) | Flops (G)
    (Det/Pose) | ORT-Latency(ms)
    (i7-11700) | TRT-FP16-Latency(ms)
    (GTX 1660Ti) | Download | -| :------------------------------------------------------------------ | :---------------------------------------------------------------------------- | :---------------------------: | :---------------------: | :------------------------: | :---------------------------: | :--------------------------: | :--------------------------------: | :---------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| [RTMDet-nano](./rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py) | [RTMPose-t](./rtmpose/body_2d_keypoint/rtmpose-t_8xb256-420e_coco-256x192.py) | 320x320
    256x192 | 40.3
    67.1 | 64.4 | 0.99
    3.34 | 0.31
    0.36 | 12.403 | 2.467 | [det](https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_nano_8xb32-100e_coco-obj365-person-05d8511e.pth)
    [pose](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-tiny_simcc-aic-coco_pt-aic-coco_420e-256x192-cfc8f33d_20230126.pth) | -| [RTMDet-nano](./rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py) | [RTMPose-s](./rtmpose/body_2d_keypoint/rtmpose-s_8xb256-420e_coco-256x192.py) | 320x320
    256x192 | 40.3
    71.1 | 68.5 | 0.99
    5.47 | 0.31
    0.68 | 16.658 | 2.730 | [det](https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_nano_8xb32-100e_coco-obj365-person-05d8511e.pth)
    [pose](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-aic-coco_pt-aic-coco_420e-256x192-fcb2599b_20230126.pth) | -| [RTMDet-nano](./rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py) | [RTMPose-m](./rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py) | 320x320
    256x192 | 40.3
    75.3 | 73.2 | 0.99
    13.59 | 0.31
    1.93 | 26.613 | 4.312 | [det](https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_nano_8xb32-100e_coco-obj365-person-05d8511e.pth)
    [pose](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.pth) | -| [RTMDet-nano](./rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py) | [RTMPose-l](./rtmpose/body_2d_keypoint/rtmpose-l_8xb256-420e_coco-256x192.py) | 320x320
    256x192 | 40.3
    76.3 | 74.2 | 0.99
    27.66 | 0.31
    4.16 | 36.311 | 4.644 | [det](https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_nano_8xb32-100e_coco-obj365-person-05d8511e.pth)
    [pose](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-aic-coco_pt-aic-coco_420e-256x192-f016ffe0_20230126.pth) | -| [RTMDet-m](./rtmdet/person/rtmdet_m_640-8xb32_coco-person.py) | [RTMPose-m](./rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py) | 640x640
    256x192 | 62.5
    75.3 | 75.7 | 24.66
    13.59 | 38.95
    1.93 | - | 6.923 | [det](https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_m_8xb32-100e_coco-obj365-person-235e8209.pth)
    [pose](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.pth) | -| [RTMDet-m](./rtmdet/person/rtmdet_m_640-8xb32_coco-person.py) | [RTMPose-l](./rtmpose/body_2d_keypoint/rtmpose-l_8xb256-420e_coco-256x192.py) | 640x640
    256x192 | 62.5
    76.3 | 76.6 | 24.66
    27.66 | 38.95
    4.16 | - | 7.204 | [det](https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_m_8xb32-100e_coco-obj365-person-235e8209.pth)
    [pose](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-aic-coco_pt-aic-coco_420e-256x192-f016ffe0_20230126.pth) | - -## 📊 模型库 [🔝](#-table-of-contents) - -**说明** - -- 此处提供的模型采用了多数据集联合训练以提高性能,模型指标不适用于学术比较。 -- 表格中为开启了 Flip Test 的测试结果。 -- RTMPose 在更多公开数据集上的性能指标可以前往 [Model Zoo](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/algorithms.html) 查看。 -- RTMPose 在更多硬件平台上的推理速度可以前往 [Benchmark](./benchmark/README_CN.md) 查看。 -- 如果你有希望我们支持的数据集,欢迎[联系我们](https://uua478.fanqier.cn/f/xxmynrki)/[Google Questionnaire](https://docs.google.com/forms/d/e/1FAIpQLSfzwWr3eNlDzhU98qzk2Eph44Zio6hi5r0iSwfO9wSARkHdWg/viewform?usp=sf_link)! - -### 人体 2d 关键点 - -#### 17 Keypoints - -- 关键点骨架定义遵循 [COCO](http://cocodataset.org/). 详情见 [meta info](/configs/_base_/datasets/coco.py). -- - -
    -AIC+COCO - -| Config | Input Size | AP
    (COCO) | PCK@0.1
    (Body8) | AUC
    (Body8) | Params
    (M) | FLOPS
    (G) | ORT-Latency
    (ms)
    (i7-11700) | TRT-FP16-Latency
    (ms)
    (GTX 1660Ti) | ncnn-FP16-Latency
    (ms)
    (Snapdragon 865) | Download | -| :---------------------------------------------------------------------------: | :--------: | :---------------: | :---------------------: | :-----------------: | :----------------: | :---------------: | :-----------------------------------------: | :------------------------------------------------: | :-----------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------: | -| [RTMPose-t](./rtmpose/body_2d_keypoint/rtmpose-t_8xb256-420e_coco-256x192.py) | 256x192 | 68.5 | 91.28 | 63.38 | 3.34 | 0.36 | 3.20 | 1.06 | 9.02 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-tiny_simcc-aic-coco_pt-aic-coco_420e-256x192-cfc8f33d_20230126.pth) | -| [RTMPose-s](./rtmpose/body_2d_keypoint/rtmpose-s_8xb256-420e_coco-256x192.py) | 256x192 | 72.2 | 92.95 | 66.19 | 5.47 | 0.68 | 4.48 | 1.39 | 13.89 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-aic-coco_pt-aic-coco_420e-256x192-fcb2599b_20230126.pth) | -| [RTMPose-m](./rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py) | 256x192 | 75.8 | 94.13 | 68.53 | 13.59 | 1.93 | 11.06 | 2.29 | 26.44 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.pth) | -| [RTMPose-l](./rtmpose/body_2d_keypoint/rtmpose-l_8xb256-420e_coco-256x192.py) | 256x192 | 76.5 | 94.35 | 68.98 | 27.66 | 4.16 | 18.85 | 3.46 | 45.37 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-aic-coco_pt-aic-coco_420e-256x192-f016ffe0_20230126.pth) | -| [RTMPose-m](./rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-384x288.py) | 384x288 | 77.0 | 94.32 | 69.85 | 13.72 | 4.33 | 24.78 | 3.66 | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-384x288-a62a0b32_20230228.pth) | -| [RTMPose-l](./rtmpose/body_2d_keypoint/rtmpose-l_8xb256-420e_coco-384x288.py) | 384x288 | 77.3 | 94.54 | 70.14 | 27.79 | 9.35 | - | 6.05 | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-aic-coco_pt-aic-coco_420e-384x288-97d6cb0f_20230228.pth) | - -
    - -
    -Body8 - -- `*` 代表模型在 7 个开源数据集上训练得到: - - [AI Challenger](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#aic) - - [MS COCO](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#coco) - - [CrowdPose](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#crowdpose) - - [MPII](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#mpii) - - [sub-JHMDB](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#sub-jhmdb-dataset) - - [Halpe](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_wholebody_keypoint.html#halpe) - - [PoseTrack18](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#posetrack18) -- `Body8` 代表除了以上提到的 7 个数据集,再加上 [OCHuman](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#ochuman) 合并后一起进行评测得到的指标。 - -| Config | Input Size | AP
    (COCO) | PCK@0.1
    (Body8) | AUC
    (Body8) | Params
    (M) | FLOPS
    (G) | ORT-Latency
    (ms)
    (i7-11700) | TRT-FP16-Latency
    (ms)
    (GTX 1660Ti) | ncnn-FP16-Latency
    (ms)
    (Snapdragon 865) | Download | -| :-----------------------------------------------------------------------------: | :--------: | :---------------: | :---------------------: | :-----------------: | :----------------: | :---------------: | :-----------------------------------------: | :------------------------------------------------: | :-----------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------: | -| [RTMPose-t\*](./rtmpose/body_2d_keypoint/rtmpose-t_8xb256-420e_coco-256x192.py) | 256x192 | 65.9 | 91.44 | 63.18 | 3.34 | 0.36 | 3.20 | 1.06 | 9.02 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-t_simcc-body7_pt-body7_420e-256x192-026a1439_20230504.pth) | -| [RTMPose-s\*](./rtmpose/body_2d_keypoint/rtmpose-s_8xb256-420e_coco-256x192.py) | 256x192 | 69.7 | 92.45 | 65.15 | 5.47 | 0.68 | 4.48 | 1.39 | 13.89 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-body7_pt-body7_420e-256x192-acd4a1ef_20230504.pth) | -| [RTMPose-m\*](./rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py) | 256x192 | 74.9 | 94.25 | 68.59 | 13.59 | 1.93 | 11.06 | 2.29 | 26.44 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-body7_pt-body7_420e-256x192-e48f03d0_20230504.pth) | -| [RTMPose-l\*](./rtmpose/body_2d_keypoint/rtmpose-l_8xb256-420e_coco-256x192.py) | 256x192 | 76.7 | 95.08 | 70.14 | 27.66 | 4.16 | 18.85 | 3.46 | 45.37 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-body7_pt-body7_420e-256x192-4dba18fc_20230504.pth) | -| [RTMPose-m\*](./rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-384x288.py) | 384x288 | 76.6 | 94.64 | 70.38 | 13.72 | 4.33 | 24.78 | 3.66 | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-body7_pt-body7_420e-384x288-65e718c4_20230504.pth) | -| [RTMPose-l\*](./rtmpose/body_2d_keypoint/rtmpose-l_8xb256-420e_coco-384x288.py) | 384x288 | 78.3 | 95.36 | 71.58 | 27.79 | 9.35 | - | 6.05 | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-body7_pt-body7_420e-384x288-3f5a1437_20230504.pth) | -| [RTMPose-x\*](./rtmpose/body_2d_keypoint/rtmpose-x_8xb256-700e_coco-384x288.py) | 384x288 | 78.8 | - | - | 49.43 | 17.22 | - | - | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-x_simcc-body7_pt-body7_700e-384x288-71d7b7e9_20230629.pth) | - -
    - -#### 26 Keypoints - -- 关键点骨架定义遵循 [Halpe26](https://github.com/Fang-Haoshu/Halpe-FullBody/),详情见 [meta info](/configs/_base_/datasets/halpe26.py)。 -- -- 模型在 `Body8` 上进行训练和评估。 - -| Config | Input Size | PCK@0.1
    (Body8) | AUC
    (Body8) | Params(M) | FLOPS(G) | ORT-Latency
    (ms)
    (i7-11700) | TRT-FP16-Latency
    (ms)
    (GTX 1660Ti) | ncnn-FP16-Latency
    (ms)
    (Snapdragon 865) | Download | -| :---------------------------------------------------------------------------------------: | :--------: | :---------------------: | :-----------------: | :-------: | :------: | :-----------------------------------------: | :------------------------------------------------: | :-----------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------: | -| [RTMPose-t\*](./rtmpose/body_2d_keypoint/rtmpose-t_8xb1024-700e_body8-halpe26-256x192.py) | 256x192 | 91.89 | 66.35 | 3.51 | 0.37 | - | - | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-t_simcc-body7_pt-body7-halpe26_700e-256x192-6020f8a6_20230605.pth) | -| [RTMPose-s\*](./rtmpose/body_2d_keypoint/rtmpose-s_8xb1024-700e_body8-halpe26-256x192.py) | 256x192 | 93.01 | 68.62 | 5.70 | 0.70 | - | - | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-body7_pt-body7-halpe26_700e-256x192-7f134165_20230605.pth) | -| [RTMPose-m\*](./rtmpose/body_2d_keypoint/rtmpose-m_8xb512-700e_body8-halpe26-256x192.py) | 256x192 | 94.75 | 71.91 | 13.93 | 1.95 | - | - | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-body7_pt-body7-halpe26_700e-256x192-4d3e73dd_20230605.pth) | -| [RTMPose-l\*](./rtmpose/body_2d_keypoint/rtmpose-l_8xb512-700e_body8-halpe26-256x192.py) | 256x192 | 95.37 | 73.19 | 28.11 | 4.19 | - | - | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-body7_pt-body7-halpe26_700e-256x192-2abb7558_20230605.pth) | -| [RTMPose-m\*](./rtmpose/body_2d_keypoint/rtmpose-m_8xb512-700e_body8-halpe26-384x288.py) | 384x288 | 95.15 | 73.56 | 14.06 | 4.37 | - | - | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-body7_pt-body7-halpe26_700e-384x288-89e6428b_20230605.pth) | -| [RTMPose-l\*](./rtmpose/body_2d_keypoint/rtmpose-l_8xb512-700e_body8-halpe26-384x288.py) | 384x288 | 95.56 | 74.38 | 28.24 | 9.40 | - | - | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-body7_pt-body7-halpe26_700e-384x288-734182ce_20230605.pth) | -| [RTMPose-x\*](./rtmpose/body_2d_keypoint/rtmpose-x_8xb256-700e_body8-halpe26-384x288.py) | 384x288 | 95.74 | 74.82 | 50.00 | 17.29 | - | - | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-x_simcc-body7_pt-body7-halpe26_700e-384x288-7fb6e239_20230606.pth) | - -#### 模型剪枝 - -**说明** - -- 模型剪枝由 [MMRazor](https://github.com/open-mmlab/mmrazor) 提供 - -| Config | Input Size | AP
    (COCO) | Params
    (M) | FLOPS
    (G) | ORT-Latency
    (ms)
    (i7-11700) | TRT-FP16-Latency
    (ms)
    (GTX 1660Ti) | ncnn-FP16-Latency
    (ms)
    (Snapdragon 865) | Download | -| :-----------------------: | :--------: | :---------------: | :----------------: | :---------------: | :-----------------------------------------: | :------------------------------------------------: | :-----------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: | -| RTMPose-s-aic-coco-pruned | 256x192 | 69.4 | 3.43 | 0.35 | - | - | - | [Model](https://download.openmmlab.com/mmrazor/v1/pruning/group_fisher/rtmpose-s/group_fisher_finetune_rtmpose-s_8xb256-420e_aic-coco-256x192.pth) | - -更多信息,请参考 [GroupFisher Pruning for RTMPose](./rtmpose/pruning/README.md). - -### 人体全身 2d 关键点 (133 Keypoints) - -- 关键点骨架定义遵循 [COCO-WholeBody](https://github.com/jin-s13/COCO-WholeBody/),详情见 [meta info](/configs/_base_/datasets/coco_wholebody.py)。 -- - -| Config | Input Size | Whole AP | Whole AR | FLOPS
    (G) | ORT-Latency
    (ms)
    (i7-11700) | TRT-FP16-Latency
    (ms)
    (GTX 1660Ti) | Download | -| :------------------------------ | :--------: | :------: | :------: | :---------------: | :-----------------------------------------: | :------------------------------------------------: | :-------------------------------: | -| [RTMPose-m](./rtmpose/wholebody_2d_keypoint/rtmpose-m_8xb64-270e_coco-wholebody-256x192.py) | 256x192 | 58.2 | 67.4 | 2.22 | 13.50 | 4.00 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco-wholebody_pt-aic-coco_270e-256x192-cd5e845c_20230123.pth) | -| [RTMPose-l](./rtmpose/wholebody_2d_keypoint/rtmpose-l_8xb64-270e_coco-wholebody-256x192.py) | 256x192 | 61.1 | 70.0 | 4.52 | 23.41 | 5.67 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-coco-wholebody_pt-aic-coco_270e-256x192-6f206314_20230124.pth) | -| [RTMPose-l](./rtmpose/wholebody_2d_keypoint/rtmpose-l_8xb32-270e_coco-wholebody-384x288.py) | 384x288 | 64.8 | 73.0 | 10.07 | 44.58 | 7.68 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-coco-wholebody_pt-aic-coco_270e-384x288-eaeb96c8_20230125.pth) | -| [RTMPose-x](./rtmpose/wholebody_2d_keypoint/rtmpose-x_8xb32-270e_coco-wholebody-384x288.py) | 384x288 | 65.3 | 73.3 | 18.1 | - | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-x_simcc-coco-wholebody_pt-body7_270e-384x288-401dfc90_20230629.pth) | - -### 动物 2d 关键点 (17 Keypoints) - -- 关键点骨架定义遵循 [AP-10K](https://github.com/AlexTheBad/AP-10K/),详情见 [meta info](/configs/_base_/datasets/ap10k.py)。 -- - -| Config | Input Size | AP
    (AP10K) | FLOPS
    (G) | ORT-Latency
    (ms)
    (i7-11700) | TRT-FP16-Latency
    (ms)
    (GTX 1660Ti) | Download | -| :----------------------------: | :--------: | :----------------: | :---------------: | :-----------------------------------------: | :------------------------------------------------: | :------------------------------: | -| [RTMPose-m](./rtmpose/animal_2d_keypoint/rtmpose-m_8xb64-210e_ap10k-256x256.py) | 256x256 | 72.2 | 2.57 | 14.157 | 2.404 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-ap10k_pt-aic-coco_210e-256x256-7a041aa1_20230206.pth) | - -### 脸部 2d 关键点 (106 Keypoints) - -- 关键点骨架定义遵循 [LaPa](https://github.com/JDAI-CV/lapa-dataset),详情见 [meta info](/configs/_base_/datasets/lapa.py)。 -- - -
    -Face6 - -- `Face6` and `*` 代表模型在 6 个开源数据集上训练得到: - - [COCO-Wholebody-Face](https://github.com/jin-s13/COCO-WholeBody/) - - [WFLW](https://wywu.github.io/projects/LAB/WFLW.html) - - [300W](https://ibug.doc.ic.ac.uk/resources/300-W/) - - [COFW](http://www.vision.caltech.edu/xpburgos/ICCV13/) - - [Halpe](https://github.com/Fang-Haoshu/Halpe-FullBody/) - - [LaPa](https://github.com/JDAI-CV/lapa-dataset) - -| Config | Input Size | NME
    (LaPa) | FLOPS
    (G) | ORT-Latency
    (ms)
    (i7-11700) | TRT-FP16-Latency
    (ms)
    (GTX 1660Ti) | Download | -| :----------------------------: | :--------: | :----------------: | :---------------: | :-----------------------------------------: | :------------------------------------------------: | :------------------------------: | -| [RTMPose-t\*](./rtmpose/face_2d_keypoint/rtmpose-t_8xb256-120e_lapa-256x256.py) | 256x256 | 1.67 | 0.652 | - | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-t_simcc-face6_pt-in1k_120e-256x256-df79d9a5_20230529.pth) | -| [RTMPose-s\*](./rtmpose/face_2d_keypoint/rtmpose-s_8xb256-120e_lapa-256x256.py) | 256x256 | 1.59 | 1.119 | - | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-face6_pt-in1k_120e-256x256-d779fdef_20230529.pth) | -| [RTMPose-m\*](./rtmpose/face_2d_keypoint/rtmpose-m_8xb256-120e_lapa-256x256.py) | 256x256 | 1.44 | 2.852 | - | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-face6_pt-in1k_120e-256x256-72a37400_20230529.pth) | - -
    - -### 手部 2d 关键点 (21 Keypoints) - -- 关键点骨架定义遵循 [COCO-WholeBody](https://github.com/jin-s13/COCO-WholeBody/),详情见 [meta info](/configs/_base_/datasets/coco_wholebody_hand.py)。 -- - -| Detection Config | Input Size | Model AP
    (OneHand10K) | Flops
    (G) | ORT-Latency
    (ms)
    (i7-11700) | TRT-FP16-Latency
    (ms)
    (GTX 1660Ti) | Download | -| :---------------------------: | :--------: | :---------------------------: | :---------------: | :-----------------------------------------: | :------------------------------------------------: | :--------------------: | -| [RTMDet-nano (试用)](./rtmdet/hand/rtmdet_nano_320-8xb32_hand.py) | 320x320 | 76.0 | 0.31 | - | - | [Det Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmdet_nano_8xb32-300e_hand-267f9c8f.pth) | - -
    -Hand5 - -- `Hand5` and `*` 代表模型在 5 个开源数据集上训练得到: - - [COCO-Wholebody-Hand](https://github.com/jin-s13/COCO-WholeBody/) - - [OneHand10K](https://www.yangangwang.com/papers/WANG-MCC-2018-10.html) - - [FreiHand2d](https://lmb.informatik.uni-freiburg.de/projects/freihand/) - - [RHD2d](https://lmb.informatik.uni-freiburg.de/resources/datasets/RenderedHandposeDataset.en.html) - - [Halpe](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_wholebody_keypoint.html#halpe) - -| Config | Input Size | PCK@0.2
    (COCO-Wholebody-Hand) | PCK@0.2
    (Hand5) | AUC
    (Hand5) | FLOPS
    (G) | ORT-Latency
    (ms)
    (i7-11700) | TRT-FP16-Latency
    (ms)
    (GTX 1660Ti) | Download | -| :----------------------------------------------------------------------------------------------------------: | :--------: | :-----------------------------------: | :---------------------: | :-----------------: | :---------------: | :-----------------------------------------: | :------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------: | -| [RTMPose-m\*
    (试用)](./rtmpose/hand_2d_keypoint/rtmpose-m_8xb32-210e_coco-wholebody-hand-256x256.py) | 256x256 | 81.5 | 96.4 | 83.9 | 2.581 | - | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-hand5_pt-aic-coco_210e-256x256-74fb594_20230320.pth) | - -
    - -### 预训练模型 - -我们提供了 UDP 预训练的 CSPNeXt 模型参数,训练配置请参考 [pretrain_cspnext_udp folder](./rtmpose/pretrain_cspnext_udp/)。 - -
    -AIC+COCO - -| Model | Input Size | Params
    (M) | Flops
    (G) | AP
    (GT) | AR
    (GT) | Download | -| :----------: | :--------: | :----------------: | :---------------: | :-------------: | :-------------: | :---------------------------------------------------------------------------------------------------------------: | -| CSPNeXt-tiny | 256x192 | 6.03 | 1.43 | 65.5 | 68.9 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmpose/cspnext-tiny_udp-aic-coco_210e-256x192-cbed682d_20230130.pth) | -| CSPNeXt-s | 256x192 | 8.58 | 1.78 | 70.0 | 73.3 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmpose/cspnext-s_udp-aic-coco_210e-256x192-92f5a029_20230130.pth) | -| CSPNeXt-m | 256x192 | 17.53 | 3.05 | 74.8 | 77.7 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmpose/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth) | -| CSPNeXt-l | 256x192 | 32.44 | 5.32 | 77.2 | 79.9 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmpose/cspnext-l_udp-aic-coco_210e-256x192-273b7631_20230130.pth) | - -
    - -
    -Body8 - -- `*` 代表模型在 7 个开源数据集上训练得到: - - [AI Challenger](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#aic) - - [MS COCO](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#coco) - - [CrowdPose](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#crowdpose) - - [MPII](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#mpii) - - [sub-JHMDB](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#sub-jhmdb-dataset) - - [Halpe](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_wholebody_keypoint.html#halpe) - - [PoseTrack18](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#posetrack18) -- `Body8` 代表除了以上提到的 7 个数据集,再加上 [OCHuman](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#ochuman) 合并后一起进行评测得到的指标。 - -| Model | Input Size | Params
    (M) | Flops
    (G) | AP
    (COCO) | PCK@0.2
    (Body8) | AUC
    (Body8) | Download | -| :------------: | :--------: | :----------------: | :---------------: | :---------------: | :---------------------: | :-----------------: | :--------------------------------------------------------------------------------: | -| CSPNeXt-tiny\* | 256x192 | 6.03 | 1.43 | 65.9 | 96.34 | 63.80 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-tiny_udp-body7_210e-256x192-a3775292_20230504.pth) | -| CSPNeXt-s\* | 256x192 | 8.58 | 1.78 | 68.7 | 96.59 | 64.92 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-s_udp-body7_210e-256x192-8c9ccbdb_20230504.pth) | -| CSPNeXt-m\* | 256x192 | 17.53 | 3.05 | 73.7 | 97.42 | 68.19 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-body7_210e-256x192-e0c9327b_20230504.pth) | -| CSPNeXt-l\* | 256x192 | 32.44 | 5.32 | 75.7 | 97.76 | 69.57 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-l_udp-body7_210e-256x192-5e9558ef_20230504.pth) | -| CSPNeXt-m\* | 384x288 | 17.53 | 6.86 | 75.8 | 97.60 | 70.18 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-body7_210e-384x288-b9bc2b57_20230504.pth) | -| CSPNeXt-l\* | 384x288 | 32.44 | 11.96 | 77.2 | 97.89 | 71.23 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-l_udp-body7_210e-384x288-b15bc30d_20230504.pth) | -| CSPNeXt-x\* | 384x288 | 54.92 | 19.96 | 78.1 | 98.00 | 71.79 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-x_udp-body7_210e-384x288-d28b58e6_20230529.pth) | - -
    - -#### ImageNet - -我们提供了 ImageNet 分类训练的 CSPNeXt 模型参数,更多细节请参考 [RTMDet](https://github.com/open-mmlab/mmdetection/blob/latest/configs/rtmdet/README.md#classification)。 - -| Model | Input Size | Params
    (M) | Flops
    (G) | Top-1 (%) | Top-5 (%) | Download | -| :----------: | :--------: | :----------------: | :---------------: | :-------: | :-------: | :---------------------------------------------------------------------------------------------------------------------------: | -| CSPNeXt-tiny | 224x224 | 2.73 | 0.34 | 69.44 | 89.45 | [Model](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-tiny_imagenet_600e-3a2dd350.pth) | -| CSPNeXt-s | 224x224 | 4.89 | 0.66 | 74.41 | 92.23 | [Model](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-s_imagenet_600e-ea671761.pth) | -| CSPNeXt-m | 224x224 | 13.05 | 1.93 | 79.27 | 94.79 | [Model](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-m_8xb256-rsb-a1-600e_in1k-ecb3bbd9.pth) | -| CSPNeXt-l | 224x224 | 27.16 | 4.19 | 81.30 | 95.62 | [Model](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-l_8xb256-rsb-a1-600e_in1k-6a760974.pth) | -| CSPNeXt-x | 224x224 | 48.85 | 7.76 | 82.10 | 95.69 | [Model](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-x_8xb256-rsb-a1-600e_in1k-b3f78edd.pth) | - -## 👀 可视化 [🔝](#-table-of-contents) - -
    - - -
    - -## 😎 快速尝试 [🔝](#-table-of-contents) - -我们提供了两种途径来让用户尝试 RTMPose 模型: - -- MMPose demo 脚本 -- MMDeploy SDK 预编译包 (推荐,速度提升6-10倍) - -### MMPose demo 脚本 - -通过 MMPose 提供的 demo 脚本可以基于 Pytorch 快速进行[模型推理](https://mmpose.readthedocs.io/en/latest/user_guides/inference.html)和效果验证。 - -**提示:** - -- 基于 Pytorch 推理并不能达到 RTMPose 模型的最大推理速度,只用于模型效果验证。 -- 输入模型路径可以是本地路径,也可以是下载链接。 - -```shell -# 前往 mmpose 目录 -cd ${PATH_TO_MMPOSE} - -# RTMDet 与 RTMPose 联合推理 -# 输入模型路径可以是本地路径,也可以是下载链接。 -python demo/topdown_demo_with_mmdet.py \ - projects/rtmpose/rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py \ - https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_nano_8xb32-100e_coco-obj365-person-05d8511e.pth \ - projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py \ - https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.pth \ - --input {YOUR_TEST_IMG_or_VIDEO} \ - --show - -# 摄像头推理 -# 输入模型路径可以是本地路径,也可以是下载链接。 -python demo/topdown_demo_with_mmdet.py \ - projects/rtmpose/rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py \ - https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_nano_8xb32-100e_coco-obj365-person-05d8511e.pth \ - projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py \ - https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.pth \ - --input webcam \ - --show -``` - -效果展示: - -![topdown_inference_with_rtmdet](https://user-images.githubusercontent.com/13503330/220005020-06bdf37f-6817-4681-a2c8-9dd55e4fbf1e.png) - -### MMDeploy SDK 预编译包 (推荐) - -MMDeploy 提供了预编译的 SDK,用于对 RTMPose 项目进行 Pipeline 推理,其中推理所用的模型为 SDK 版本。 - -- 所有的模型必须经过 `tools/deploy.py` 导出后才能使用 PoseTracker 进行推理。 -- 导出 SDK 版模型的教程见 [SDK 推理](#%EF%B8%8F-sdk-推理),推理的详细参数设置见 [Pipeline 推理](#-pipeline-推理)。 -- 你可以从 [硬件模型库](https://platform.openmmlab.com/deploee) 直接下载 SDK 版模型(ONNX、 TRT、ncnn 等)。 -- 同时我们也支持 [在线模型转换](https://platform.openmmlab.com/deploee/task-convert-list)。 - -#### Linux\\ - -说明: - -- GCC 版本需大于 7.5 -- cmake 版本需大于 3.20 - -##### Python 推理 - -1. 安装 mmdeploy_runtime 或者 mmdeploy_runtime_gpu - -```shell -# for onnxruntime -pip install mmdeploy-runtime - -# for onnxruntime-gpu / tensorrt -pip install mmdeploy-runtime-gpu -``` - -2. 下载预编译包: - -```shell -# onnxruntime -# for ubuntu -wget -c https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0/mmdeploy-1.0.0-linux-x86_64-cxx11abi.tar.gz -# 解压并将 third_party 中第三方推理库的动态库添加到 PATH - -# for centos7 and lower -wget -c https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0/mmdeploy-1.0.0-linux-x86_64.tar.gz -# 解压并将 third_party 中第三方推理库的动态库添加到 PATH - -# onnxruntime-gpu / tensorrt -# for ubuntu -wget -c https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0/mmdeploy-1.0.0-linux-x86_64-cxx11abi-cuda11.3.tar.gz -# 解压并将 third_party 中第三方推理库的动态库添加到 PATH - -# for centos7 and lower -wget -c https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0/mmdeploy-1.0.0-linux-x86_64-cuda11.3.tar.gz -# 解压并将 third_party 中第三方推理库的动态库添加到 PATH -``` - -3. 下载 sdk 模型并解压到 `./example/python` 下。(该模型只用于演示,如需其他模型,请参考 [SDK 推理](#%EF%B8%8F-sdk-推理)) - -```shell -# rtmdet-nano + rtmpose-m for cpu sdk -wget https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmpose-cpu.zip - -unzip rtmpose-cpu.zip -``` - -4. 使用 `pose_tracker.py` 进行推理: - -```shell -# 进入 ./example/python - -# 请传入模型目录,而不是模型文件 -# 格式: -# python pose_tracker.py cpu {det work-dir} {pose work-dir} {your_video.mp4} - -# 示例: -python pose_tracker.py cpu rtmpose-ort/rtmdet-nano/ rtmpose-ort/rtmpose-m/ your_video.mp4 - -# 摄像头 -python pose_tracker.py cpu rtmpose-ort/rtmdet-nano/ rtmpose-ort/rtmpose-m/ 0 -``` - -##### ONNX - -```shell -# 下载预编译包 -wget https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0/mmdeploy-1.0.0-linux-x86_64-cxx11abi.tar.gz - -# 解压文件 -tar -xzvf mmdeploy-1.0.0-linux-x86_64-cxx11abi.tar.gz - -# 切换到 sdk 目录 -cd mmdeploy-1.0.0-linux-x86_64-cxx11abi - -# 设置环境变量 -source set_env.sh - -# 如果系统中没有安装 opencv 3+,请执行以下命令。如果已安装,可略过 -bash install_opencv.sh - -# 编译可执行程序 -bash build_sdk.sh - -# 图片推理 -# 请传入模型目录,而不是模型文件 -./bin/det_pose rtmpose-ort/rtmdet-nano/ rtmpose-ort/rtmpose-m/ your_img.jpg --device cpu - -# 视频推理 -# 请传入模型目录,而不是模型文件 -./bin/pose_tracker rtmpose-ort/rtmdet-nano/ rtmpose-ort/rtmpose-m/ your_video.mp4 --device cpu - -# 摄像头推理 -# 请传入模型目录,而不是模型文件 -./bin/pose_tracker rtmpose-ort/rtmdet-nano/ rtmpose-ort/rtmpose-m/ 0 --device cpu -``` - -##### TensorRT - -```shell -# 下载预编译包 -wget https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0/mmdeploy-1.0.0-linux-x86_64-cxx11abi-cuda11.3.tar.gz - -# 解压文件 -tar -xzvf mmdeploy-1.0.0-linux-x86_64-cxx11abi-cuda11.3.tar.gz - -# 切换到 sdk 目录 -cd mmdeploy-1.0.0-linux-x86_64-cxx11abi-cuda11.3 - -# 设置环境变量 -source set_env.sh - -# 如果系统中没有安装 opencv 3+,请执行以下命令。如果已安装,可略过 -bash install_opencv.sh - -# 编译可执行程序 -bash build_sdk.sh - -# 图片推理 -# 请传入模型目录,而不是模型文件 -./bin/det_pose rtmpose-ort/rtmdet-nano/ rtmpose-ort/rtmpose-m/ your_img.jpg --device cuda - -# 视频推理 -# 请传入模型目录,而不是模型文件 -./bin/pose_tracker rtmpose-ort/rtmdet-nano/ rtmpose-ort/rtmpose-m/ your_video.mp4 --device cuda - -# 摄像头推理 -# 请传入模型目录,而不是模型文件 -./bin/pose_tracker rtmpose-ort/rtmdet-nano/ rtmpose-ort/rtmpose-m/ 0 --device cuda -``` - -详细参数设置见 [Pipeline 推理](#-pipeline-推理)。 - -#### Windows - -##### Python 推理 - -1. 安装 mmdeploy_runtime 或者 mmdeploy_runtime_gpu - -```shell -# for onnxruntime -pip install mmdeploy-runtime -# 下载 [sdk](https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0/mmdeploy-1.0.0-windows-amd64.zip) 并将 third_party 中第三方推理库的动态库添加到 PATH - -# for onnxruntime-gpu / tensorrt -pip install mmdeploy-runtime-gpu -# 下载 [sdk](https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0/mmdeploy-1.0.0-windows-amd64-cuda11.3.zip) 并将 third_party 中第三方推理库的动态库添加到 PATH -``` - -2. 下载 sdk 模型并解压到 `./example/python` 下。(该模型只用于演示,如需其他模型,请参考 [SDK 推理](#%EF%B8%8F-sdk-推理)) - -```shell -# rtmdet-nano + rtmpose-m for cpu sdk -wget https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmpose-cpu.zip - -unzip rtmpose-cpu.zip -``` - -3. 使用 `pose_tracker.py` 进行推理: - -```shell -# 进入 ./example/python -# 请传入模型目录,而不是模型文件 -python pose_tracker.py cpu rtmpose-ort/rtmdet-nano/ rtmpose-ort/rtmpose-m/ your_video.mp4 - -# 摄像头 -# 请传入模型目录,而不是模型文件 -python pose_tracker.py cpu rtmpose-ort/rtmdet-nano/ rtmpose-ort/rtmpose-m/ 0 -``` - -##### 可执行文件推理 - -1. 安装 [cmake](https://cmake.org/download/)。 -2. 前往 [mmdeploy](https://github.com/open-mmlab/mmdeploy/releases) 下载 win 预编译包。 -3. 解压文件,进入 sdk 目录。 -4. 使用管理员权限打开 PowerShell,执行以下命令: - -```shell -set-ExecutionPolicy RemoteSigned -``` - -5. 安装 OpenCV: - -```shell -# in sdk folder: -.\install_opencv.ps1 -``` - -6. 配置环境变量: - -```shell -# in sdk folder: -. .\set_env.ps1 -``` - -7. 编译 sdk: - -```shell -# in sdk folder: -# 如果你通过 .\install_opencv.ps1 安装 opencv,直接运行如下指令: -.\build_sdk.ps1 -# 如果你自行安装了 opencv,需要指定 OpenCVConfig.cmake 的路径: -.\build_sdk.ps1 "path/to/folder/of/OpenCVConfig.cmake" -``` - -8. 可执行文件会在如下路径生成: - -```shell -example\cpp\build\Release -``` - -### MMPose demo 脚本 - -通过 MMPose 提供的 demo 脚本可以基于 Pytorch 快速进行[模型推理](https://mmpose.readthedocs.io/en/latest/user_guides/inference.html)和效果验证。 - -**提示:** - -- 基于 Pytorch 推理并不能达到 RTMPose 模型的真实推理速度,只用于模型效果验证。 - -```shell -# 前往 mmpose 目录 -cd ${PATH_TO_MMPOSE} - -# RTMDet 与 RTMPose 联合推理 -python demo/topdown_demo_with_mmdet.py \ - projects/rtmpose/rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py \ - {PATH_TO_CHECKPOINT}/rtmdet_nano_8xb32-100e_coco-obj365-person-05d8511e.pth \ - projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py \ - {PATH_TO_CHECKPOINT}/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.pth \ - --input {YOUR_TEST_IMG_or_VIDEO} \ - --show - -# 摄像头推理 -python demo/topdown_demo_with_mmdet.py \ - projects/rtmpose/rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py \ - {PATH_TO_CHECKPOINT}/rtmdet_nano_8xb32-100e_coco-obj365-person-05d8511e.pth \ - projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py \ - {PATH_TO_CHECKPOINT}/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.pth \ - --input webcam \ - --show -``` - -效果展示: - -![topdown_inference_with_rtmdet](https://user-images.githubusercontent.com/13503330/220005020-06bdf37f-6817-4681-a2c8-9dd55e4fbf1e.png) - -## 👨‍🏫 模型训练 [🔝](#-table-of-contents) - -请参考 [训练与测试](https://mmpose.readthedocs.io/en/latest/user_guides/train_and_test.html) 进行 RTMPose 的训练。 - -**提示**: - -- 当用户的数据集较小时请根据情况缩小 `batch_size` 和 `base_lr`。 -- 模型选择 - - m:推荐首选使用 - - t/s:适用于极端低算力的移动设备,或对推理速度要求严格的场景 - - l:适用于算力较强、对速度不敏感的场景 - -## 🏗️ 部署教程 [🔝](#-table-of-contents) - -本教程将展示如何通过 [MMDeploy](https://github.com/open-mmlab/mmdeploy/tree/main) 部署 RTMPose 项目。 - -- 你可以从 [硬件模型库](https://platform.openmmlab.com/deploee) 直接下载 SDK 版模型(ONNX、 TRT、ncnn 等)。 -- 同时我们也支持 [在线模型转换](https://platform.openmmlab.com/deploee/task-convert-list)。 - -### 🧩 安装 - -在开始部署之前,首先你需要确保正确安装了 MMPose, MMDetection, MMDeploy,相关安装教程如下: - -- [安装 MMPose 与 MMDetection](https://mmpose.readthedocs.io/zh_CN/latest/installation.html) -- [安装 MMDeploy](https://mmdeploy.readthedocs.io/zh_CN/latest/04-supported-codebases/mmpose.html) - -根据部署后端的不同,有的后端需要对自定义算子进行编译,请根据需求前往对应的文档确保环境搭建正确: - -- [ONNX](https://mmdeploy.readthedocs.io/zh_CN/latest/05-supported-backends/onnxruntime.html) -- [TensorRT](https://mmdeploy.readthedocs.io/zh_CN/latest/05-supported-backends/tensorrt.html) -- [OpenVINO](https://mmdeploy.readthedocs.io/zh_CN/latest/05-supported-backends/openvino.html) -- [更多](https://github.com/open-mmlab/mmdeploy/tree/main/docs/en/05-supported-backends) - -### 🛠️ 模型转换 - -在完成安装之后,你就可以开始模型部署了。通过 MMDeploy 提供的 `tools/deploy.py` 可以方便地将 Pytorch 模型转换到不同的部署后端。 - -我们本节演示将 RTMDet 和 RTMPose 模型导出为 ONNX 和 TensorRT 格式,如果你希望了解更多内容请前往 [MMDeploy 文档](https://mmdeploy.readthedocs.io/zh_CN/latest/02-how-to-run/convert_model.html)。 - -- ONNX 配置 - - \- RTMDet:[`detection_onnxruntime_static.py`](https://github.com/open-mmlab/mmdeploy/blob/main/configs/mmdet/detection/detection_onnxruntime_static.py) - - \- RTMPose:[`pose-detection_simcc_onnxruntime_dynamic.py`](https://github.com/open-mmlab/mmdeploy/blob/main/configs/mmpose/pose-detection_simcc_onnxruntime_dynamic.py) - -- TensorRT 配置 - - \- RTMDet:[`detection_tensorrt_static-320x320.py`](https://github.com/open-mmlab/mmdeploy/blob/main/configs/mmdet/detection/detection_tensorrt_static-320x320.py) - - \- RTMPose:[`pose-detection_simcc_tensorrt_dynamic-256x192.py`](https://github.com/open-mmlab/mmdeploy/blob/main/configs/mmpose/pose-detection_simcc_tensorrt_dynamic-256x192.py) - -如果你需要对部署配置进行修改,请参考 [MMDeploy config tutorial](https://mmdeploy.readthedocs.io/zh_CN/latest/02-how-to-run/write_config.html). - -本教程中使用的文件结构如下: - -```shell -|----mmdeploy -|----mmdetection -|----mmpose -``` - -#### ONNX - -运行如下命令: - -```shell -# 前往 mmdeploy 目录 -cd ${PATH_TO_MMDEPLOY} - -# 转换 RTMDet -# 输入模型路径可以是本地路径,也可以是下载链接。 -python tools/deploy.py \ - configs/mmdet/detection/detection_onnxruntime_static.py \ - ../mmpose/projects/rtmpose/rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py \ - https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_nano_8xb32-100e_coco-obj365-person-05d8511e.pth \ - demo/resources/human-pose.jpg \ - --work-dir rtmpose-ort/rtmdet-nano \ - --device cpu \ - --show \ - --dump-info # 导出 sdk info - -# 转换 RTMPose -# 输入模型路径可以是本地路径,也可以是下载链接。 -python tools/deploy.py \ - configs/mmpose/pose-detection_simcc_onnxruntime_dynamic.py \ - ../mmpose/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py \ - https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.pth \ - demo/resources/human-pose.jpg \ - --work-dir rtmpose-ort/rtmpose-m \ - --device cpu \ - --show \ - --dump-info # 导出 sdk info -``` - -默认导出模型文件为 `{work-dir}/end2end.onnx` - -#### TensorRT - -运行如下命令: - -```shell -# 前往 mmdeploy 目录 -cd ${PATH_TO_MMDEPLOY} - -# 转换 RTMDet -# 输入模型路径可以是本地路径,也可以是下载链接。 -python tools/deploy.py \ - configs/mmdet/detection/detection_tensorrt_static-320x320.py \ - ../mmpose/projects/rtmpose/rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py \ - https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_nano_8xb32-100e_coco-obj365-person-05d8511e.pth \ - demo/resources/human-pose.jpg \ - --work-dir rtmpose-trt/rtmdet-nano \ - --device cuda:0 \ - --show \ - --dump-info # 导出 sdk info - -# 转换 RTMPose -# 输入模型路径可以是本地路径,也可以是下载链接。 -python tools/deploy.py \ - configs/mmpose/pose-detection_simcc_tensorrt_dynamic-256x192.py \ - ../mmpose/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py \ - https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.pth \ - demo/resources/human-pose.jpg \ - --work-dir rtmpose-trt/rtmpose-m \ - --device cuda:0 \ - --show \ - --dump-info # 导出 sdk info -``` - -默认导出模型文件为 `{work-dir}/end2end.engine` - -🎊 如果模型顺利导出,你将会看到样例图片上的检测结果: - -![convert_models](https://user-images.githubusercontent.com/13503330/217726963-7815dd01-561a-4605-b0c6-07b6fe1956c3.png) - -#### 高级设置 - -如果需要使用 TensorRT-FP16,你可以通过修改以下配置开启: - -```Python -# in MMDeploy config -backend_config = dict( - type='tensorrt', - common_config=dict( - fp16_mode=True # 打开 fp16 - )) -``` - -### 🕹️ SDK 推理 - -要进行 Pipeline 推理,需要先用 MMDeploy 导出 SDK 版本的 det 和 pose 模型,只需要在参数中加上`--dump-info`。 - -此处以 onnxruntime 的 cpu 模型为例,运行如下命令: - -```shell -# RTMDet -# 输入模型路径可以是本地路径,也可以是下载链接。 -python tools/deploy.py \ - configs/mmdet/detection/detection_onnxruntime_dynamic.py \ - ../mmpose/projects/rtmpose/rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py \ - https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_nano_8xb32-100e_coco-obj365-person-05d8511e.pth \ - demo/resources/human-pose.jpg \ - --work-dir rtmpose-ort/rtmdet-nano \ - --device cpu \ - --show \ - --dump-info # 导出 sdk info - -# RTMPose -# 输入模型路径可以是本地路径,也可以是下载链接。 -python tools/deploy.py \ - configs/mmpose/pose-detection_simcc_onnxruntime_dynamic.py \ - ../mmpose/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py \ - https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.pth \ - demo/resources/human-pose.jpg \ - --work-dir rtmpose-ort/rtmpose-m \ - --device cpu \ - --show \ - --dump-info # 导出 sdk info -``` - -默认会导出三个 json 文件: - -```shell -|----sdk - |----end2end.onnx # ONNX model - |----end2end.engine # TensorRT engine file - - |----pipeline.json # - |----deploy.json # json files for the SDK - |----detail.json # -``` - -#### Python API - -```Python -# Copyright (c) OpenMMLab. All rights reserved. -import argparse - -import cv2 -import numpy as np -from mmdeploy_runtime import PoseDetector - -def parse_args(): - parser = argparse.ArgumentParser( - description='show how to use sdk python api') - parser.add_argument('device_name', help='name of device, cuda or cpu') - parser.add_argument( - 'model_path', - help='path of mmdeploy SDK model dumped by model converter') - parser.add_argument('image_path', help='path of an image') - parser.add_argument( - '--bbox', - default=None, - nargs='+', - type=int, - help='bounding box of an object in format (x, y, w, h)') - args = parser.parse_args() - return args - -def main(): - args = parse_args() - - img = cv2.imread(args.image_path) - - detector = PoseDetector( - model_path=args.model_path, device_name=args.device_name, device_id=0) - - if args.bbox is None: - result = detector(img) - else: - # converter (x, y, w, h) -> (left, top, right, bottom) - print(args.bbox) - bbox = np.array(args.bbox, dtype=int) - bbox[2:] += bbox[:2] - result = detector(img, bbox) - print(result) - - _, point_num, _ = result.shape - points = result[:, :, :2].reshape(point_num, 2) - for [x, y] in points.astype(int): - cv2.circle(img, (x, y), 1, (0, 255, 0), 2) - - cv2.imwrite('output_pose.png', img) - -if __name__ == '__main__': - main() -``` - -#### C++ API - -```C++ -#include "mmdeploy/detector.hpp" - -#include "opencv2/imgcodecs/imgcodecs.hpp" -#include "utils/argparse.h" -#include "utils/visualize.h" - -DEFINE_ARG_string(model, "Model path"); -DEFINE_ARG_string(image, "Input image path"); -DEFINE_string(device, "cpu", R"(Device name, e.g. "cpu", "cuda")"); -DEFINE_string(output, "detector_output.jpg", "Output image path"); - -DEFINE_double(det_thr, .5, "Detection score threshold"); - -int main(int argc, char* argv[]) { - if (!utils::ParseArguments(argc, argv)) { - return -1; - } - - cv::Mat img = cv::imread(ARGS_image); - if (img.empty()) { - fprintf(stderr, "failed to load image: %s\n", ARGS_image.c_str()); - return -1; - } - - // construct a detector instance - mmdeploy::Detector detector(mmdeploy::Model{ARGS_model}, mmdeploy::Device{FLAGS_device}); - - // apply the detector, the result is an array-like class holding references to - // `mmdeploy_detection_t`, will be released automatically on destruction - mmdeploy::Detector::Result dets = detector.Apply(img); - - // visualize - utils::Visualize v; - auto sess = v.get_session(img); - int count = 0; - for (const mmdeploy_detection_t& det : dets) { - if (det.score > FLAGS_det_thr) { // filter bboxes - sess.add_det(det.bbox, det.label_id, det.score, det.mask, count++); - } - } - - if (!FLAGS_output.empty()) { - cv::imwrite(FLAGS_output, sess.get()); - } - - return 0; -} -``` - -对于 C++ API 示例,请将 MMDeploy 加入到 CMake 项目中: - -```CMake -find_package(MMDeploy REQUIRED) -target_link_libraries(${name} PRIVATE mmdeploy ${OpenCV_LIBS}) -``` - -#### 其他语言 - -- [C# API 示例](https://github.com/open-mmlab/mmdeploy/tree/main/demo/csharp) -- [JAVA API 示例](https://github.com/open-mmlab/mmdeploy/tree/main/demo/java) - -### 🚀 Pipeline 推理 - -#### 图片推理 - -如果用户有跟随 MMDeploy 安装教程进行正确编译,在 `mmdeploy/build/bin/` 路径下会看到 `det_pose` 的可执行文件。 - -```shell -# 前往 mmdeploy 目录 -cd ${PATH_TO_MMDEPLOY}/build/bin/ - -# 单张图片推理 -./det_pose rtmpose-ort/rtmdet-nano/ rtmpose-ort/rtmpose-m/ your_img.jpg --device cpu - -required arguments: - det_model Detection 模型路径 [string] - pose_model Pose 模型路径 [string] - image 输入图片路径 [string] - -optional arguments: - --device 推理设备 "cpu", "cuda" [string = "cpu"] - --output 导出图片路径 [string = "det_pose_output.jpg"] - --skeleton 骨架定义文件路径,或使用预定义骨架: - "coco" [string = "coco", "coco-wholoebody"] - --det_label 用于姿势估计的检测标签 [int32 = 0] - (0 在 coco 中对应 person) - --det_thr 检测分数阈值 [double = 0.5] - --det_min_bbox_size 最小检测框大小 [double = -1] - --pose_thr 关键点置信度阈值 [double = 0] -``` - -**API** **示例** - -\- [`det_pose.py`](https://github.com/open-mmlab/mmdeploy/blob/main/demo/python/det_pose.py) - -\- [`det_pose.cxx`](https://github.com/open-mmlab/mmdeploy/blob/main/demo/csrc/cpp/det_pose.cxx) - -#### 视频推理 - -如果用户有跟随 MMDeploy 安装教程进行正确编译,在 `mmdeploy/build/bin/` 路径下会看到 `pose_tracker` 的可执行文件。 - -- 将 `input` 输入 `0` 可以使用摄像头推理 - -```shell -# 前往 mmdeploy 目录 -cd ${PATH_TO_MMDEPLOY}/build/bin/ - -# 视频推理 -./pose_tracker rtmpose-ort/rtmdet-nano/ rtmpose-ort/rtmpose-m/ your_video.mp4 --device cpu - -required arguments: - det_model Detection 模型路径 [string] - pose_model Pose 模型路径 [string] - input 输入图片路径或摄像头序号 [string] - -optional arguments: - --device 推理设备 "cpu", "cuda" [string = "cpu"] - --output 导出视频路径 [string = ""] - --output_size 输出视频帧的长边 [int32 = 0] - --flip 设置为1,用于水平翻转输入 [int32 = 0] - --show 使用`cv::imshow`时,传递给`cv::waitKey`的延迟; - -1: 关闭 [int32 = 1] - --skeleton 骨架数据的路径或预定义骨架的名称: - "coco", "coco-wholebody" [string = "coco"] - --background 导出视频背景颜色, "default": 原图, "black": - 纯黑背景 [string = "default"] - --det_interval 检测间隔 [int32 = 1] - --det_label 用于姿势估计的检测标签 [int32 = 0] - (0 在 coco 中对应 person) - --det_thr 检测分数阈值 [double = 0.5] - --det_min_bbox_size 最小检测框大小 [double = -1] - --det_nms_thr NMS IOU阈值,用于合并检测到的bboxes和 - 追踪到的目标的 bboxes [double = 0.7] - --pose_max_num_bboxes 每一帧用于姿势估计的 bboxes 的最大数量 - [int32 = -1] - --pose_kpt_thr 可见关键点的阈值 [double = 0.5] - --pose_min_keypoints 有效姿势的最小关键点数量,-1表示上限(n_kpts/2) [int32 = -1] - --pose_bbox_scale 将关键点扩展到 bbox 的比例 [double = 1.25] - --pose_min_bbox_size 最小追踪尺寸,尺寸小于阈值的 bbox 将被剔除 [double = -1] - --pose_nms_thr 用于抑制重叠姿势的 NMS OKS/IOU阈值。 - 当多个姿态估计重叠到同一目标时非常有用 [double = 0.5] - --track_iou_thr 追踪 IOU 阈值 [double = 0.4] - --track_max_missing 最大追踪容错 [int32 = 10] -``` - -**API** **示例** - -\- [`pose_tracker.py`](https://github.com/open-mmlab/mmdeploy/blob/main/demo/python/pose_tracker.py) - -\- [`pose_tracker.cxx`](https://github.com/open-mmlab/mmdeploy/blob/main/demo/csrc/cpp/pose_tracker.cxx) - -## 📚 常用功能 [🔝](#-table-of-contents) - -### 🚀 模型测速 [🔝](#-table-of-contents) - -如果需要测试模型在部署框架下的推理速度,MMDeploy 提供了方便的 `tools/profiler.py` 脚本。 - -用户需要准备一个存放测试图片的文件夹`./test_images`,profiler 将随机从该目录下抽取图片用于模型测速。 - -```shell -python tools/profiler.py \ - configs/mmpose/pose-detection_simcc_onnxruntime_dynamic.py \ - {RTMPOSE_PROJECT}/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py \ - ../test_images \ - --model {WORK_DIR}/end2end.onnx \ - --shape 256x192 \ - --device cpu \ - --warmup 50 \ - --num-iter 200 -``` - -测试结果如下: - -```shell -01/30 15:06:35 - mmengine - INFO - [onnxruntime]-70 times per count: 8.73 ms, 114.50 FPS -01/30 15:06:36 - mmengine - INFO - [onnxruntime]-90 times per count: 9.05 ms, 110.48 FPS -01/30 15:06:37 - mmengine - INFO - [onnxruntime]-110 times per count: 9.87 ms, 101.32 FPS -01/30 15:06:37 - mmengine - INFO - [onnxruntime]-130 times per count: 9.99 ms, 100.10 FPS -01/30 15:06:38 - mmengine - INFO - [onnxruntime]-150 times per count: 10.39 ms, 96.29 FPS -01/30 15:06:39 - mmengine - INFO - [onnxruntime]-170 times per count: 10.77 ms, 92.86 FPS -01/30 15:06:40 - mmengine - INFO - [onnxruntime]-190 times per count: 10.98 ms, 91.05 FPS -01/30 15:06:40 - mmengine - INFO - [onnxruntime]-210 times per count: 11.19 ms, 89.33 FPS -01/30 15:06:41 - mmengine - INFO - [onnxruntime]-230 times per count: 11.16 ms, 89.58 FPS -01/30 15:06:42 - mmengine - INFO - [onnxruntime]-250 times per count: 11.06 ms, 90.41 FPS ------ Settings: -+------------+---------+ -| batch size | 1 | -| shape | 256x192 | -| iterations | 200 | -| warmup | 50 | -+------------+---------+ ------ Results: -+--------+------------+---------+ -| Stats | Latency/ms | FPS | -+--------+------------+---------+ -| Mean | 11.060 | 90.412 | -| Median | 11.852 | 84.375 | -| Min | 7.812 | 128.007 | -| Max | 13.690 | 73.044 | -+--------+------------+---------+ -``` - -如果你希望详细了解 profiler 的更多参数设置与功能,可以前往 [Profiler Docs](https://mmdeploy.readthedocs.io/en/main/02-how-to-run/useful_tools.html#profiler) - -### 📊 精度验证 [🔝](#-table-of-contents) - -如果需要测试模型在部署框架下的推理精度,MMDeploy 提供了方便的 `tools/test.py` 脚本。 - -```shell -python tools/test.py \ - configs/mmpose/pose-detection_simcc_onnxruntime_dynamic.py \ - {RTMPOSE_PROJECT}/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py \ - --model {PATH_TO_MODEL}/rtmpose_m.pth \ - --device cpu -``` - -详细内容请参考 [MMDeploys Docs](https://github.com/open-mmlab/mmdeploy/blob/main/docs/zh_cn/02-how-to-run/profile_model.md) - -## 📜 引用 [🔝](#-table-of-contents) - -如果您觉得 RTMPose 对您的研究工作有所帮助,请考虑引用它: - -```bibtex -@misc{https://doi.org/10.48550/arxiv.2303.07399, - doi = {10.48550/ARXIV.2303.07399}, - url = {https://arxiv.org/abs/2303.07399}, - author = {Jiang, Tao and Lu, Peng and Zhang, Li and Ma, Ningsheng and Han, Rui and Lyu, Chengqi and Li, Yining and Chen, Kai}, - keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences}, - title = {RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose}, - publisher = {arXiv}, - year = {2023}, - copyright = {Creative Commons Attribution 4.0 International} -} - -@misc{mmpose2020, - title={OpenMMLab Pose Estimation Toolbox and Benchmark}, - author={MMPose Contributors}, - howpublished = {\url{https://github.com/open-mmlab/mmpose}}, - year={2020} -} -``` +
    + +
    + +# RTMPose: Real-Time Multi-Person Pose Estimation toolkit based on MMPose + +> [RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose](https://arxiv.org/abs/2303.07399) + +
    + +[English](README.md) | 简体中文 + +
    + +______________________________________________________________________ + +## Abstract + +近年来,2D 姿态估计的研究在公开数据集上取得了出色的成绩,但是它在工业界的应用仍然受到笨重的模型参数和高推理延迟的影响。为了让前沿姿态估计算法在工业界落地,我们通过实验研究了多人姿态估计算法的五个方面:范式、骨干网络、定位算法、训练策略和部署推理,基于 MMPose 提出了一个高性能的实时多人姿态估计框架 **RTMPose**。我们的 RTMPose-m 模型在 COCO 上取得 **75.8%AP**,在 Intel i7-11700 CPU 上达到 **90+FPS**,在 NVIDIA GTX 1660 Ti GPU 上达到 **430+FPS**。我们同样验证了在算力有限的设备上做实时姿态估计,RTMPose-s 在移动端骁龙865芯片上可以达到 **COCO 72.2%AP**,**70+FPS**。在 MMDeploy 的帮助下,我们的项目支持 CPU、GPU、Jetson、移动端等多种部署环境。 + +![rtmpose_intro](https://user-images.githubusercontent.com/13503330/219269619-935499e5-bdd9-49ea-8104-3c7796dbd862.png) + +______________________________________________________________________ + +## 📄 Table of Contents + +- [🥳 🚀 最新进展](#--最新进展-) +- [📖 简介](#-简介-) +- [🙌 社区共建](#-社区共建-) +- [⚡ Pipeline 性能](#-pipeline-性能-) +- [📊 模型库](#-模型库-) +- [👀 可视化](#-可视化-) +- [😎 快速尝试](#-快速尝试-) +- [👨‍🏫 模型训练](#-模型训练-) +- [🏗️ 部署教程](#️-部署教程-) +- [📚 常用功能](#️-常用功能-) + - [🚀 模型测速](#-模型测速-) + - [📊 精度验证](#-精度验证-) +- [📜 引用](#-引用-) + +## 🥳 最新进展 [🔝](#-table-of-contents) + +- 2023 年 6 月: + - 发布混合数据集训练的 26 点 Body 模型。 +- 2023 年 5 月: + - 添加 [代码示例](./examples/) + - 发布混合数据集训练的 Hand, Face, Body 模型。 +- 2023 年 3 月:发布 RTMPose。RTMPose-m 取得 COCO 验证集 75.8 mAP,推理速度达到 430+ FPS 。 + +## 📖 简介 [🔝](#-table-of-contents) + +
    + +
    + +
    + +
    +
    + +
    + +### ✨ 主要特性 + +- 🚀 **高精度,低延迟** + + | Model | AP(COCO) | CPU-FPS | GPU-FPS | + | :---: | :------: | :-----: | :-----: | + | t | 68.5 | 300+ | 940+ | + | s | 72.2 | 200+ | 710+ | + | m | 75.8 | 90+ | 430+ | + | l | 76.5 | 50+ | 280+ | + +- 🛠️ **易部署** + + - 详细的部署代码教程,手把手教你模型部署 + - MMDeploy 助力 + - 支持多种部署后端 + - ONNX + - TensorRT + - ncnn + - OpenVINO 等 + - 支持多种平台 + - Linux + - Windows + - NVIDIA Jetson + - ARM 等 + +- 🏗️ **为实际业务设计** + + - 提供多种 Pipeline 推理接口和 SDK + - Python + - C++ + - C# + - JAVA 等 + +## 🙌 社区共建 [🔝](#-table-of-contents) + +RTMPose 是一个长期优化迭代的项目,致力于业务场景下的高性能实时姿态估计算法的训练、优化和部署,因此我们十分期待来自社区的力量,欢迎分享不同业务场景中 RTMPose 的训练配置与技巧,助力更多的社区用户! + +✨ ✨ ✨ + +- **如果你是 RTMPose 的新用户,我们热切希望你能参与[这份问卷](https://uua478.fanqier.cn/f/xxmynrki)/[Google Questionnaire](https://docs.google.com/forms/d/e/1FAIpQLSfzwWr3eNlDzhU98qzk2Eph44Zio6hi5r0iSwfO9wSARkHdWg/viewform?usp=sf_link),这对于我们的工作非常重要!** + +✨ ✨ ✨ + +欢迎加入我们的社区交流群获得更多帮助: + +- 微信用户群 + +
    + + +- Discord Group: + - 🙌 https://discord.gg/raweFPmdzG 🙌 + +## ⚡ Pipeline 性能 [🔝](#-table-of-contents) + +**说明** + +- Pipeline 速度测试时开启了隔帧检测策略,默认检测间隔为 5 帧。 +- 环境配置: + - torch >= 1.7.1 + - onnxruntime 1.12.1 + - TensorRT 8.4.3.1 + - cuDNN 8.3.2 + - CUDA 11.3 + +| Detection Config | Pose Config | Input Size
    (Det/Pose) | Model AP
    (COCO) | Pipeline AP
    (COCO) | Params (M)
    (Det/Pose) | Flops (G)
    (Det/Pose) | ORT-Latency(ms)
    (i7-11700) | TRT-FP16-Latency(ms)
    (GTX 1660Ti) | Download | +| :------------------------------------------------------------------ | :---------------------------------------------------------------------------- | :---------------------------: | :---------------------: | :------------------------: | :---------------------------: | :--------------------------: | :--------------------------------: | :---------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| [RTMDet-nano](./rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py) | [RTMPose-t](./rtmpose/body_2d_keypoint/rtmpose-t_8xb256-420e_coco-256x192.py) | 320x320
    256x192 | 40.3
    67.1 | 64.4 | 0.99
    3.34 | 0.31
    0.36 | 12.403 | 2.467 | [det](https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_nano_8xb32-100e_coco-obj365-person-05d8511e.pth)
    [pose](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-tiny_simcc-aic-coco_pt-aic-coco_420e-256x192-cfc8f33d_20230126.pth) | +| [RTMDet-nano](./rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py) | [RTMPose-s](./rtmpose/body_2d_keypoint/rtmpose-s_8xb256-420e_coco-256x192.py) | 320x320
    256x192 | 40.3
    71.1 | 68.5 | 0.99
    5.47 | 0.31
    0.68 | 16.658 | 2.730 | [det](https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_nano_8xb32-100e_coco-obj365-person-05d8511e.pth)
    [pose](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-aic-coco_pt-aic-coco_420e-256x192-fcb2599b_20230126.pth) | +| [RTMDet-nano](./rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py) | [RTMPose-m](./rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py) | 320x320
    256x192 | 40.3
    75.3 | 73.2 | 0.99
    13.59 | 0.31
    1.93 | 26.613 | 4.312 | [det](https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_nano_8xb32-100e_coco-obj365-person-05d8511e.pth)
    [pose](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.pth) | +| [RTMDet-nano](./rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py) | [RTMPose-l](./rtmpose/body_2d_keypoint/rtmpose-l_8xb256-420e_coco-256x192.py) | 320x320
    256x192 | 40.3
    76.3 | 74.2 | 0.99
    27.66 | 0.31
    4.16 | 36.311 | 4.644 | [det](https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_nano_8xb32-100e_coco-obj365-person-05d8511e.pth)
    [pose](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-aic-coco_pt-aic-coco_420e-256x192-f016ffe0_20230126.pth) | +| [RTMDet-m](./rtmdet/person/rtmdet_m_640-8xb32_coco-person.py) | [RTMPose-m](./rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py) | 640x640
    256x192 | 62.5
    75.3 | 75.7 | 24.66
    13.59 | 38.95
    1.93 | - | 6.923 | [det](https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_m_8xb32-100e_coco-obj365-person-235e8209.pth)
    [pose](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.pth) | +| [RTMDet-m](./rtmdet/person/rtmdet_m_640-8xb32_coco-person.py) | [RTMPose-l](./rtmpose/body_2d_keypoint/rtmpose-l_8xb256-420e_coco-256x192.py) | 640x640
    256x192 | 62.5
    76.3 | 76.6 | 24.66
    27.66 | 38.95
    4.16 | - | 7.204 | [det](https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_m_8xb32-100e_coco-obj365-person-235e8209.pth)
    [pose](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-aic-coco_pt-aic-coco_420e-256x192-f016ffe0_20230126.pth) | + +## 📊 模型库 [🔝](#-table-of-contents) + +**说明** + +- 此处提供的模型采用了多数据集联合训练以提高性能,模型指标不适用于学术比较。 +- 表格中为开启了 Flip Test 的测试结果。 +- RTMPose 在更多公开数据集上的性能指标可以前往 [Model Zoo](https://mmpose.readthedocs.io/en/latest/model_zoo_papers/algorithms.html) 查看。 +- RTMPose 在更多硬件平台上的推理速度可以前往 [Benchmark](./benchmark/README_CN.md) 查看。 +- 如果你有希望我们支持的数据集,欢迎[联系我们](https://uua478.fanqier.cn/f/xxmynrki)/[Google Questionnaire](https://docs.google.com/forms/d/e/1FAIpQLSfzwWr3eNlDzhU98qzk2Eph44Zio6hi5r0iSwfO9wSARkHdWg/viewform?usp=sf_link)! + +### 人体 2d 关键点 + +#### 17 Keypoints + +- 关键点骨架定义遵循 [COCO](http://cocodataset.org/). 详情见 [meta info](/configs/_base_/datasets/coco.py). +- + +
    +AIC+COCO + +| Config | Input Size | AP
    (COCO) | PCK@0.1
    (Body8) | AUC
    (Body8) | Params
    (M) | FLOPS
    (G) | ORT-Latency
    (ms)
    (i7-11700) | TRT-FP16-Latency
    (ms)
    (GTX 1660Ti) | ncnn-FP16-Latency
    (ms)
    (Snapdragon 865) | Download | +| :---------------------------------------------------------------------------: | :--------: | :---------------: | :---------------------: | :-----------------: | :----------------: | :---------------: | :-----------------------------------------: | :------------------------------------------------: | :-----------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------: | +| [RTMPose-t](./rtmpose/body_2d_keypoint/rtmpose-t_8xb256-420e_coco-256x192.py) | 256x192 | 68.5 | 91.28 | 63.38 | 3.34 | 0.36 | 3.20 | 1.06 | 9.02 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-tiny_simcc-aic-coco_pt-aic-coco_420e-256x192-cfc8f33d_20230126.pth) | +| [RTMPose-s](./rtmpose/body_2d_keypoint/rtmpose-s_8xb256-420e_coco-256x192.py) | 256x192 | 72.2 | 92.95 | 66.19 | 5.47 | 0.68 | 4.48 | 1.39 | 13.89 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-aic-coco_pt-aic-coco_420e-256x192-fcb2599b_20230126.pth) | +| [RTMPose-m](./rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py) | 256x192 | 75.8 | 94.13 | 68.53 | 13.59 | 1.93 | 11.06 | 2.29 | 26.44 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.pth) | +| [RTMPose-l](./rtmpose/body_2d_keypoint/rtmpose-l_8xb256-420e_coco-256x192.py) | 256x192 | 76.5 | 94.35 | 68.98 | 27.66 | 4.16 | 18.85 | 3.46 | 45.37 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-aic-coco_pt-aic-coco_420e-256x192-f016ffe0_20230126.pth) | +| [RTMPose-m](./rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-384x288.py) | 384x288 | 77.0 | 94.32 | 69.85 | 13.72 | 4.33 | 24.78 | 3.66 | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-384x288-a62a0b32_20230228.pth) | +| [RTMPose-l](./rtmpose/body_2d_keypoint/rtmpose-l_8xb256-420e_coco-384x288.py) | 384x288 | 77.3 | 94.54 | 70.14 | 27.79 | 9.35 | - | 6.05 | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-aic-coco_pt-aic-coco_420e-384x288-97d6cb0f_20230228.pth) | + +
    + +
    +Body8 + +- `*` 代表模型在 7 个开源数据集上训练得到: + - [AI Challenger](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#aic) + - [MS COCO](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#coco) + - [CrowdPose](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#crowdpose) + - [MPII](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#mpii) + - [sub-JHMDB](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#sub-jhmdb-dataset) + - [Halpe](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_wholebody_keypoint.html#halpe) + - [PoseTrack18](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#posetrack18) +- `Body8` 代表除了以上提到的 7 个数据集,再加上 [OCHuman](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#ochuman) 合并后一起进行评测得到的指标。 + +| Config | Input Size | AP
    (COCO) | PCK@0.1
    (Body8) | AUC
    (Body8) | Params
    (M) | FLOPS
    (G) | ORT-Latency
    (ms)
    (i7-11700) | TRT-FP16-Latency
    (ms)
    (GTX 1660Ti) | ncnn-FP16-Latency
    (ms)
    (Snapdragon 865) | Download | +| :-----------------------------------------------------------------------------: | :--------: | :---------------: | :---------------------: | :-----------------: | :----------------: | :---------------: | :-----------------------------------------: | :------------------------------------------------: | :-----------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------: | +| [RTMPose-t\*](./rtmpose/body_2d_keypoint/rtmpose-t_8xb256-420e_coco-256x192.py) | 256x192 | 65.9 | 91.44 | 63.18 | 3.34 | 0.36 | 3.20 | 1.06 | 9.02 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-t_simcc-body7_pt-body7_420e-256x192-026a1439_20230504.pth) | +| [RTMPose-s\*](./rtmpose/body_2d_keypoint/rtmpose-s_8xb256-420e_coco-256x192.py) | 256x192 | 69.7 | 92.45 | 65.15 | 5.47 | 0.68 | 4.48 | 1.39 | 13.89 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-body7_pt-body7_420e-256x192-acd4a1ef_20230504.pth) | +| [RTMPose-m\*](./rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py) | 256x192 | 74.9 | 94.25 | 68.59 | 13.59 | 1.93 | 11.06 | 2.29 | 26.44 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-body7_pt-body7_420e-256x192-e48f03d0_20230504.pth) | +| [RTMPose-l\*](./rtmpose/body_2d_keypoint/rtmpose-l_8xb256-420e_coco-256x192.py) | 256x192 | 76.7 | 95.08 | 70.14 | 27.66 | 4.16 | 18.85 | 3.46 | 45.37 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-body7_pt-body7_420e-256x192-4dba18fc_20230504.pth) | +| [RTMPose-m\*](./rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-384x288.py) | 384x288 | 76.6 | 94.64 | 70.38 | 13.72 | 4.33 | 24.78 | 3.66 | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-body7_pt-body7_420e-384x288-65e718c4_20230504.pth) | +| [RTMPose-l\*](./rtmpose/body_2d_keypoint/rtmpose-l_8xb256-420e_coco-384x288.py) | 384x288 | 78.3 | 95.36 | 71.58 | 27.79 | 9.35 | - | 6.05 | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-body7_pt-body7_420e-384x288-3f5a1437_20230504.pth) | +| [RTMPose-x\*](./rtmpose/body_2d_keypoint/rtmpose-x_8xb256-700e_coco-384x288.py) | 384x288 | 78.8 | - | - | 49.43 | 17.22 | - | - | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-x_simcc-body7_pt-body7_700e-384x288-71d7b7e9_20230629.pth) | + +
    + +#### 26 Keypoints + +- 关键点骨架定义遵循 [Halpe26](https://github.com/Fang-Haoshu/Halpe-FullBody/),详情见 [meta info](/configs/_base_/datasets/halpe26.py)。 +- +- 模型在 `Body8` 上进行训练和评估。 + +| Config | Input Size | PCK@0.1
    (Body8) | AUC
    (Body8) | Params(M) | FLOPS(G) | ORT-Latency
    (ms)
    (i7-11700) | TRT-FP16-Latency
    (ms)
    (GTX 1660Ti) | ncnn-FP16-Latency
    (ms)
    (Snapdragon 865) | Download | +| :---------------------------------------------------------------------------------------: | :--------: | :---------------------: | :-----------------: | :-------: | :------: | :-----------------------------------------: | :------------------------------------------------: | :-----------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------: | +| [RTMPose-t\*](./rtmpose/body_2d_keypoint/rtmpose-t_8xb1024-700e_body8-halpe26-256x192.py) | 256x192 | 91.89 | 66.35 | 3.51 | 0.37 | - | - | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-t_simcc-body7_pt-body7-halpe26_700e-256x192-6020f8a6_20230605.pth) | +| [RTMPose-s\*](./rtmpose/body_2d_keypoint/rtmpose-s_8xb1024-700e_body8-halpe26-256x192.py) | 256x192 | 93.01 | 68.62 | 5.70 | 0.70 | - | - | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-body7_pt-body7-halpe26_700e-256x192-7f134165_20230605.pth) | +| [RTMPose-m\*](./rtmpose/body_2d_keypoint/rtmpose-m_8xb512-700e_body8-halpe26-256x192.py) | 256x192 | 94.75 | 71.91 | 13.93 | 1.95 | - | - | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-body7_pt-body7-halpe26_700e-256x192-4d3e73dd_20230605.pth) | +| [RTMPose-l\*](./rtmpose/body_2d_keypoint/rtmpose-l_8xb512-700e_body8-halpe26-256x192.py) | 256x192 | 95.37 | 73.19 | 28.11 | 4.19 | - | - | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-body7_pt-body7-halpe26_700e-256x192-2abb7558_20230605.pth) | +| [RTMPose-m\*](./rtmpose/body_2d_keypoint/rtmpose-m_8xb512-700e_body8-halpe26-384x288.py) | 384x288 | 95.15 | 73.56 | 14.06 | 4.37 | - | - | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-body7_pt-body7-halpe26_700e-384x288-89e6428b_20230605.pth) | +| [RTMPose-l\*](./rtmpose/body_2d_keypoint/rtmpose-l_8xb512-700e_body8-halpe26-384x288.py) | 384x288 | 95.56 | 74.38 | 28.24 | 9.40 | - | - | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-body7_pt-body7-halpe26_700e-384x288-734182ce_20230605.pth) | +| [RTMPose-x\*](./rtmpose/body_2d_keypoint/rtmpose-x_8xb256-700e_body8-halpe26-384x288.py) | 384x288 | 95.74 | 74.82 | 50.00 | 17.29 | - | - | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-x_simcc-body7_pt-body7-halpe26_700e-384x288-7fb6e239_20230606.pth) | + +#### 模型剪枝 + +**说明** + +- 模型剪枝由 [MMRazor](https://github.com/open-mmlab/mmrazor) 提供 + +| Config | Input Size | AP
    (COCO) | Params
    (M) | FLOPS
    (G) | ORT-Latency
    (ms)
    (i7-11700) | TRT-FP16-Latency
    (ms)
    (GTX 1660Ti) | ncnn-FP16-Latency
    (ms)
    (Snapdragon 865) | Download | +| :-----------------------: | :--------: | :---------------: | :----------------: | :---------------: | :-----------------------------------------: | :------------------------------------------------: | :-----------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: | +| RTMPose-s-aic-coco-pruned | 256x192 | 69.4 | 3.43 | 0.35 | - | - | - | [Model](https://download.openmmlab.com/mmrazor/v1/pruning/group_fisher/rtmpose-s/group_fisher_finetune_rtmpose-s_8xb256-420e_aic-coco-256x192.pth) | + +更多信息,请参考 [GroupFisher Pruning for RTMPose](./rtmpose/pruning/README.md). + +### 人体全身 2d 关键点 (133 Keypoints) + +- 关键点骨架定义遵循 [COCO-WholeBody](https://github.com/jin-s13/COCO-WholeBody/),详情见 [meta info](/configs/_base_/datasets/coco_wholebody.py)。 +- + +| Config | Input Size | Whole AP | Whole AR | FLOPS
    (G) | ORT-Latency
    (ms)
    (i7-11700) | TRT-FP16-Latency
    (ms)
    (GTX 1660Ti) | Download | +| :------------------------------ | :--------: | :------: | :------: | :---------------: | :-----------------------------------------: | :------------------------------------------------: | :-------------------------------: | +| [RTMPose-m](./rtmpose/wholebody_2d_keypoint/rtmpose-m_8xb64-270e_coco-wholebody-256x192.py) | 256x192 | 58.2 | 67.4 | 2.22 | 13.50 | 4.00 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco-wholebody_pt-aic-coco_270e-256x192-cd5e845c_20230123.pth) | +| [RTMPose-l](./rtmpose/wholebody_2d_keypoint/rtmpose-l_8xb64-270e_coco-wholebody-256x192.py) | 256x192 | 61.1 | 70.0 | 4.52 | 23.41 | 5.67 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-coco-wholebody_pt-aic-coco_270e-256x192-6f206314_20230124.pth) | +| [RTMPose-l](./rtmpose/wholebody_2d_keypoint/rtmpose-l_8xb32-270e_coco-wholebody-384x288.py) | 384x288 | 64.8 | 73.0 | 10.07 | 44.58 | 7.68 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-coco-wholebody_pt-aic-coco_270e-384x288-eaeb96c8_20230125.pth) | +| [RTMPose-x](./rtmpose/wholebody_2d_keypoint/rtmpose-x_8xb32-270e_coco-wholebody-384x288.py) | 384x288 | 65.3 | 73.3 | 18.1 | - | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-x_simcc-coco-wholebody_pt-body7_270e-384x288-401dfc90_20230629.pth) | + +### 动物 2d 关键点 (17 Keypoints) + +- 关键点骨架定义遵循 [AP-10K](https://github.com/AlexTheBad/AP-10K/),详情见 [meta info](/configs/_base_/datasets/ap10k.py)。 +- + +| Config | Input Size | AP
    (AP10K) | FLOPS
    (G) | ORT-Latency
    (ms)
    (i7-11700) | TRT-FP16-Latency
    (ms)
    (GTX 1660Ti) | Download | +| :----------------------------: | :--------: | :----------------: | :---------------: | :-----------------------------------------: | :------------------------------------------------: | :------------------------------: | +| [RTMPose-m](./rtmpose/animal_2d_keypoint/rtmpose-m_8xb64-210e_ap10k-256x256.py) | 256x256 | 72.2 | 2.57 | 14.157 | 2.404 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-ap10k_pt-aic-coco_210e-256x256-7a041aa1_20230206.pth) | + +### 脸部 2d 关键点 (106 Keypoints) + +- 关键点骨架定义遵循 [LaPa](https://github.com/JDAI-CV/lapa-dataset),详情见 [meta info](/configs/_base_/datasets/lapa.py)。 +- + +
    +Face6 + +- `Face6` and `*` 代表模型在 6 个开源数据集上训练得到: + - [COCO-Wholebody-Face](https://github.com/jin-s13/COCO-WholeBody/) + - [WFLW](https://wywu.github.io/projects/LAB/WFLW.html) + - [300W](https://ibug.doc.ic.ac.uk/resources/300-W/) + - [COFW](http://www.vision.caltech.edu/xpburgos/ICCV13/) + - [Halpe](https://github.com/Fang-Haoshu/Halpe-FullBody/) + - [LaPa](https://github.com/JDAI-CV/lapa-dataset) + +| Config | Input Size | NME
    (LaPa) | FLOPS
    (G) | ORT-Latency
    (ms)
    (i7-11700) | TRT-FP16-Latency
    (ms)
    (GTX 1660Ti) | Download | +| :----------------------------: | :--------: | :----------------: | :---------------: | :-----------------------------------------: | :------------------------------------------------: | :------------------------------: | +| [RTMPose-t\*](./rtmpose/face_2d_keypoint/rtmpose-t_8xb256-120e_lapa-256x256.py) | 256x256 | 1.67 | 0.652 | - | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-t_simcc-face6_pt-in1k_120e-256x256-df79d9a5_20230529.pth) | +| [RTMPose-s\*](./rtmpose/face_2d_keypoint/rtmpose-s_8xb256-120e_lapa-256x256.py) | 256x256 | 1.59 | 1.119 | - | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-s_simcc-face6_pt-in1k_120e-256x256-d779fdef_20230529.pth) | +| [RTMPose-m\*](./rtmpose/face_2d_keypoint/rtmpose-m_8xb256-120e_lapa-256x256.py) | 256x256 | 1.44 | 2.852 | - | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-face6_pt-in1k_120e-256x256-72a37400_20230529.pth) | + +
    + +### 手部 2d 关键点 (21 Keypoints) + +- 关键点骨架定义遵循 [COCO-WholeBody](https://github.com/jin-s13/COCO-WholeBody/),详情见 [meta info](/configs/_base_/datasets/coco_wholebody_hand.py)。 +- + +| Detection Config | Input Size | Model AP
    (OneHand10K) | Flops
    (G) | ORT-Latency
    (ms)
    (i7-11700) | TRT-FP16-Latency
    (ms)
    (GTX 1660Ti) | Download | +| :---------------------------: | :--------: | :---------------------------: | :---------------: | :-----------------------------------------: | :------------------------------------------------: | :--------------------: | +| [RTMDet-nano (试用)](./rtmdet/hand/rtmdet_nano_320-8xb32_hand.py) | 320x320 | 76.0 | 0.31 | - | - | [Det Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmdet_nano_8xb32-300e_hand-267f9c8f.pth) | + +
    +Hand5 + +- `Hand5` and `*` 代表模型在 5 个开源数据集上训练得到: + - [COCO-Wholebody-Hand](https://github.com/jin-s13/COCO-WholeBody/) + - [OneHand10K](https://www.yangangwang.com/papers/WANG-MCC-2018-10.html) + - [FreiHand2d](https://lmb.informatik.uni-freiburg.de/projects/freihand/) + - [RHD2d](https://lmb.informatik.uni-freiburg.de/resources/datasets/RenderedHandposeDataset.en.html) + - [Halpe](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_wholebody_keypoint.html#halpe) + +| Config | Input Size | PCK@0.2
    (COCO-Wholebody-Hand) | PCK@0.2
    (Hand5) | AUC
    (Hand5) | FLOPS
    (G) | ORT-Latency
    (ms)
    (i7-11700) | TRT-FP16-Latency
    (ms)
    (GTX 1660Ti) | Download | +| :----------------------------------------------------------------------------------------------------------: | :--------: | :-----------------------------------: | :---------------------: | :-----------------: | :---------------: | :-----------------------------------------: | :------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------: | +| [RTMPose-m\*
    (试用)](./rtmpose/hand_2d_keypoint/rtmpose-m_8xb32-210e_coco-wholebody-hand-256x256.py) | 256x256 | 81.5 | 96.4 | 83.9 | 2.581 | - | - | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-hand5_pt-aic-coco_210e-256x256-74fb594_20230320.pth) | + +
    + +### 预训练模型 + +我们提供了 UDP 预训练的 CSPNeXt 模型参数,训练配置请参考 [pretrain_cspnext_udp folder](./rtmpose/pretrain_cspnext_udp/)。 + +
    +AIC+COCO + +| Model | Input Size | Params
    (M) | Flops
    (G) | AP
    (GT) | AR
    (GT) | Download | +| :----------: | :--------: | :----------------: | :---------------: | :-------------: | :-------------: | :---------------------------------------------------------------------------------------------------------------: | +| CSPNeXt-tiny | 256x192 | 6.03 | 1.43 | 65.5 | 68.9 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmpose/cspnext-tiny_udp-aic-coco_210e-256x192-cbed682d_20230130.pth) | +| CSPNeXt-s | 256x192 | 8.58 | 1.78 | 70.0 | 73.3 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmpose/cspnext-s_udp-aic-coco_210e-256x192-92f5a029_20230130.pth) | +| CSPNeXt-m | 256x192 | 17.53 | 3.05 | 74.8 | 77.7 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmpose/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth) | +| CSPNeXt-l | 256x192 | 32.44 | 5.32 | 77.2 | 79.9 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmpose/cspnext-l_udp-aic-coco_210e-256x192-273b7631_20230130.pth) | + +
    + +
    +Body8 + +- `*` 代表模型在 7 个开源数据集上训练得到: + - [AI Challenger](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#aic) + - [MS COCO](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#coco) + - [CrowdPose](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#crowdpose) + - [MPII](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#mpii) + - [sub-JHMDB](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#sub-jhmdb-dataset) + - [Halpe](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_wholebody_keypoint.html#halpe) + - [PoseTrack18](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#posetrack18) +- `Body8` 代表除了以上提到的 7 个数据集,再加上 [OCHuman](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#ochuman) 合并后一起进行评测得到的指标。 + +| Model | Input Size | Params
    (M) | Flops
    (G) | AP
    (COCO) | PCK@0.2
    (Body8) | AUC
    (Body8) | Download | +| :------------: | :--------: | :----------------: | :---------------: | :---------------: | :---------------------: | :-----------------: | :--------------------------------------------------------------------------------: | +| CSPNeXt-tiny\* | 256x192 | 6.03 | 1.43 | 65.9 | 96.34 | 63.80 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-tiny_udp-body7_210e-256x192-a3775292_20230504.pth) | +| CSPNeXt-s\* | 256x192 | 8.58 | 1.78 | 68.7 | 96.59 | 64.92 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-s_udp-body7_210e-256x192-8c9ccbdb_20230504.pth) | +| CSPNeXt-m\* | 256x192 | 17.53 | 3.05 | 73.7 | 97.42 | 68.19 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-body7_210e-256x192-e0c9327b_20230504.pth) | +| CSPNeXt-l\* | 256x192 | 32.44 | 5.32 | 75.7 | 97.76 | 69.57 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-l_udp-body7_210e-256x192-5e9558ef_20230504.pth) | +| CSPNeXt-m\* | 384x288 | 17.53 | 6.86 | 75.8 | 97.60 | 70.18 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-body7_210e-384x288-b9bc2b57_20230504.pth) | +| CSPNeXt-l\* | 384x288 | 32.44 | 11.96 | 77.2 | 97.89 | 71.23 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-l_udp-body7_210e-384x288-b15bc30d_20230504.pth) | +| CSPNeXt-x\* | 384x288 | 54.92 | 19.96 | 78.1 | 98.00 | 71.79 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-x_udp-body7_210e-384x288-d28b58e6_20230529.pth) | + +
    + +#### ImageNet + +我们提供了 ImageNet 分类训练的 CSPNeXt 模型参数,更多细节请参考 [RTMDet](https://github.com/open-mmlab/mmdetection/blob/latest/configs/rtmdet/README.md#classification)。 + +| Model | Input Size | Params
    (M) | Flops
    (G) | Top-1 (%) | Top-5 (%) | Download | +| :----------: | :--------: | :----------------: | :---------------: | :-------: | :-------: | :---------------------------------------------------------------------------------------------------------------------------: | +| CSPNeXt-tiny | 224x224 | 2.73 | 0.34 | 69.44 | 89.45 | [Model](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-tiny_imagenet_600e-3a2dd350.pth) | +| CSPNeXt-s | 224x224 | 4.89 | 0.66 | 74.41 | 92.23 | [Model](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-s_imagenet_600e-ea671761.pth) | +| CSPNeXt-m | 224x224 | 13.05 | 1.93 | 79.27 | 94.79 | [Model](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-m_8xb256-rsb-a1-600e_in1k-ecb3bbd9.pth) | +| CSPNeXt-l | 224x224 | 27.16 | 4.19 | 81.30 | 95.62 | [Model](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-l_8xb256-rsb-a1-600e_in1k-6a760974.pth) | +| CSPNeXt-x | 224x224 | 48.85 | 7.76 | 82.10 | 95.69 | [Model](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-x_8xb256-rsb-a1-600e_in1k-b3f78edd.pth) | + +## 👀 可视化 [🔝](#-table-of-contents) + +
    + + +
    + +## 😎 快速尝试 [🔝](#-table-of-contents) + +我们提供了两种途径来让用户尝试 RTMPose 模型: + +- MMPose demo 脚本 +- MMDeploy SDK 预编译包 (推荐,速度提升6-10倍) + +### MMPose demo 脚本 + +通过 MMPose 提供的 demo 脚本可以基于 Pytorch 快速进行[模型推理](https://mmpose.readthedocs.io/en/latest/user_guides/inference.html)和效果验证。 + +**提示:** + +- 基于 Pytorch 推理并不能达到 RTMPose 模型的最大推理速度,只用于模型效果验证。 +- 输入模型路径可以是本地路径,也可以是下载链接。 + +```shell +# 前往 mmpose 目录 +cd ${PATH_TO_MMPOSE} + +# RTMDet 与 RTMPose 联合推理 +# 输入模型路径可以是本地路径,也可以是下载链接。 +python demo/topdown_demo_with_mmdet.py \ + projects/rtmpose/rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_nano_8xb32-100e_coco-obj365-person-05d8511e.pth \ + projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.pth \ + --input {YOUR_TEST_IMG_or_VIDEO} \ + --show + +# 摄像头推理 +# 输入模型路径可以是本地路径,也可以是下载链接。 +python demo/topdown_demo_with_mmdet.py \ + projects/rtmpose/rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_nano_8xb32-100e_coco-obj365-person-05d8511e.pth \ + projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.pth \ + --input webcam \ + --show +``` + +效果展示: + +![topdown_inference_with_rtmdet](https://user-images.githubusercontent.com/13503330/220005020-06bdf37f-6817-4681-a2c8-9dd55e4fbf1e.png) + +### MMDeploy SDK 预编译包 (推荐) + +MMDeploy 提供了预编译的 SDK,用于对 RTMPose 项目进行 Pipeline 推理,其中推理所用的模型为 SDK 版本。 + +- 所有的模型必须经过 `tools/deploy.py` 导出后才能使用 PoseTracker 进行推理。 +- 导出 SDK 版模型的教程见 [SDK 推理](#%EF%B8%8F-sdk-推理),推理的详细参数设置见 [Pipeline 推理](#-pipeline-推理)。 +- 你可以从 [硬件模型库](https://platform.openmmlab.com/deploee) 直接下载 SDK 版模型(ONNX、 TRT、ncnn 等)。 +- 同时我们也支持 [在线模型转换](https://platform.openmmlab.com/deploee/task-convert-list)。 + +#### Linux\\ + +说明: + +- GCC 版本需大于 7.5 +- cmake 版本需大于 3.20 + +##### Python 推理 + +1. 安装 mmdeploy_runtime 或者 mmdeploy_runtime_gpu + +```shell +# for onnxruntime +pip install mmdeploy-runtime + +# for onnxruntime-gpu / tensorrt +pip install mmdeploy-runtime-gpu +``` + +2. 下载预编译包: + +```shell +# onnxruntime +# for ubuntu +wget -c https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0/mmdeploy-1.0.0-linux-x86_64-cxx11abi.tar.gz +# 解压并将 third_party 中第三方推理库的动态库添加到 PATH + +# for centos7 and lower +wget -c https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0/mmdeploy-1.0.0-linux-x86_64.tar.gz +# 解压并将 third_party 中第三方推理库的动态库添加到 PATH + +# onnxruntime-gpu / tensorrt +# for ubuntu +wget -c https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0/mmdeploy-1.0.0-linux-x86_64-cxx11abi-cuda11.3.tar.gz +# 解压并将 third_party 中第三方推理库的动态库添加到 PATH + +# for centos7 and lower +wget -c https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0/mmdeploy-1.0.0-linux-x86_64-cuda11.3.tar.gz +# 解压并将 third_party 中第三方推理库的动态库添加到 PATH +``` + +3. 下载 sdk 模型并解压到 `./example/python` 下。(该模型只用于演示,如需其他模型,请参考 [SDK 推理](#%EF%B8%8F-sdk-推理)) + +```shell +# rtmdet-nano + rtmpose-m for cpu sdk +wget https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmpose-cpu.zip + +unzip rtmpose-cpu.zip +``` + +4. 使用 `pose_tracker.py` 进行推理: + +```shell +# 进入 ./example/python + +# 请传入模型目录,而不是模型文件 +# 格式: +# python pose_tracker.py cpu {det work-dir} {pose work-dir} {your_video.mp4} + +# 示例: +python pose_tracker.py cpu rtmpose-ort/rtmdet-nano/ rtmpose-ort/rtmpose-m/ your_video.mp4 + +# 摄像头 +python pose_tracker.py cpu rtmpose-ort/rtmdet-nano/ rtmpose-ort/rtmpose-m/ 0 +``` + +##### ONNX + +```shell +# 下载预编译包 +wget https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0/mmdeploy-1.0.0-linux-x86_64-cxx11abi.tar.gz + +# 解压文件 +tar -xzvf mmdeploy-1.0.0-linux-x86_64-cxx11abi.tar.gz + +# 切换到 sdk 目录 +cd mmdeploy-1.0.0-linux-x86_64-cxx11abi + +# 设置环境变量 +source set_env.sh + +# 如果系统中没有安装 opencv 3+,请执行以下命令。如果已安装,可略过 +bash install_opencv.sh + +# 编译可执行程序 +bash build_sdk.sh + +# 图片推理 +# 请传入模型目录,而不是模型文件 +./bin/det_pose rtmpose-ort/rtmdet-nano/ rtmpose-ort/rtmpose-m/ your_img.jpg --device cpu + +# 视频推理 +# 请传入模型目录,而不是模型文件 +./bin/pose_tracker rtmpose-ort/rtmdet-nano/ rtmpose-ort/rtmpose-m/ your_video.mp4 --device cpu + +# 摄像头推理 +# 请传入模型目录,而不是模型文件 +./bin/pose_tracker rtmpose-ort/rtmdet-nano/ rtmpose-ort/rtmpose-m/ 0 --device cpu +``` + +##### TensorRT + +```shell +# 下载预编译包 +wget https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0/mmdeploy-1.0.0-linux-x86_64-cxx11abi-cuda11.3.tar.gz + +# 解压文件 +tar -xzvf mmdeploy-1.0.0-linux-x86_64-cxx11abi-cuda11.3.tar.gz + +# 切换到 sdk 目录 +cd mmdeploy-1.0.0-linux-x86_64-cxx11abi-cuda11.3 + +# 设置环境变量 +source set_env.sh + +# 如果系统中没有安装 opencv 3+,请执行以下命令。如果已安装,可略过 +bash install_opencv.sh + +# 编译可执行程序 +bash build_sdk.sh + +# 图片推理 +# 请传入模型目录,而不是模型文件 +./bin/det_pose rtmpose-ort/rtmdet-nano/ rtmpose-ort/rtmpose-m/ your_img.jpg --device cuda + +# 视频推理 +# 请传入模型目录,而不是模型文件 +./bin/pose_tracker rtmpose-ort/rtmdet-nano/ rtmpose-ort/rtmpose-m/ your_video.mp4 --device cuda + +# 摄像头推理 +# 请传入模型目录,而不是模型文件 +./bin/pose_tracker rtmpose-ort/rtmdet-nano/ rtmpose-ort/rtmpose-m/ 0 --device cuda +``` + +详细参数设置见 [Pipeline 推理](#-pipeline-推理)。 + +#### Windows + +##### Python 推理 + +1. 安装 mmdeploy_runtime 或者 mmdeploy_runtime_gpu + +```shell +# for onnxruntime +pip install mmdeploy-runtime +# 下载 [sdk](https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0/mmdeploy-1.0.0-windows-amd64.zip) 并将 third_party 中第三方推理库的动态库添加到 PATH + +# for onnxruntime-gpu / tensorrt +pip install mmdeploy-runtime-gpu +# 下载 [sdk](https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0/mmdeploy-1.0.0-windows-amd64-cuda11.3.zip) 并将 third_party 中第三方推理库的动态库添加到 PATH +``` + +2. 下载 sdk 模型并解压到 `./example/python` 下。(该模型只用于演示,如需其他模型,请参考 [SDK 推理](#%EF%B8%8F-sdk-推理)) + +```shell +# rtmdet-nano + rtmpose-m for cpu sdk +wget https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmpose-cpu.zip + +unzip rtmpose-cpu.zip +``` + +3. 使用 `pose_tracker.py` 进行推理: + +```shell +# 进入 ./example/python +# 请传入模型目录,而不是模型文件 +python pose_tracker.py cpu rtmpose-ort/rtmdet-nano/ rtmpose-ort/rtmpose-m/ your_video.mp4 + +# 摄像头 +# 请传入模型目录,而不是模型文件 +python pose_tracker.py cpu rtmpose-ort/rtmdet-nano/ rtmpose-ort/rtmpose-m/ 0 +``` + +##### 可执行文件推理 + +1. 安装 [cmake](https://cmake.org/download/)。 +2. 前往 [mmdeploy](https://github.com/open-mmlab/mmdeploy/releases) 下载 win 预编译包。 +3. 解压文件,进入 sdk 目录。 +4. 使用管理员权限打开 PowerShell,执行以下命令: + +```shell +set-ExecutionPolicy RemoteSigned +``` + +5. 安装 OpenCV: + +```shell +# in sdk folder: +.\install_opencv.ps1 +``` + +6. 配置环境变量: + +```shell +# in sdk folder: +. .\set_env.ps1 +``` + +7. 编译 sdk: + +```shell +# in sdk folder: +# 如果你通过 .\install_opencv.ps1 安装 opencv,直接运行如下指令: +.\build_sdk.ps1 +# 如果你自行安装了 opencv,需要指定 OpenCVConfig.cmake 的路径: +.\build_sdk.ps1 "path/to/folder/of/OpenCVConfig.cmake" +``` + +8. 可执行文件会在如下路径生成: + +```shell +example\cpp\build\Release +``` + +### MMPose demo 脚本 + +通过 MMPose 提供的 demo 脚本可以基于 Pytorch 快速进行[模型推理](https://mmpose.readthedocs.io/en/latest/user_guides/inference.html)和效果验证。 + +**提示:** + +- 基于 Pytorch 推理并不能达到 RTMPose 模型的真实推理速度,只用于模型效果验证。 + +```shell +# 前往 mmpose 目录 +cd ${PATH_TO_MMPOSE} + +# RTMDet 与 RTMPose 联合推理 +python demo/topdown_demo_with_mmdet.py \ + projects/rtmpose/rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py \ + {PATH_TO_CHECKPOINT}/rtmdet_nano_8xb32-100e_coco-obj365-person-05d8511e.pth \ + projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py \ + {PATH_TO_CHECKPOINT}/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.pth \ + --input {YOUR_TEST_IMG_or_VIDEO} \ + --show + +# 摄像头推理 +python demo/topdown_demo_with_mmdet.py \ + projects/rtmpose/rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py \ + {PATH_TO_CHECKPOINT}/rtmdet_nano_8xb32-100e_coco-obj365-person-05d8511e.pth \ + projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py \ + {PATH_TO_CHECKPOINT}/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.pth \ + --input webcam \ + --show +``` + +效果展示: + +![topdown_inference_with_rtmdet](https://user-images.githubusercontent.com/13503330/220005020-06bdf37f-6817-4681-a2c8-9dd55e4fbf1e.png) + +## 👨‍🏫 模型训练 [🔝](#-table-of-contents) + +请参考 [训练与测试](https://mmpose.readthedocs.io/en/latest/user_guides/train_and_test.html) 进行 RTMPose 的训练。 + +**提示**: + +- 当用户的数据集较小时请根据情况缩小 `batch_size` 和 `base_lr`。 +- 模型选择 + - m:推荐首选使用 + - t/s:适用于极端低算力的移动设备,或对推理速度要求严格的场景 + - l:适用于算力较强、对速度不敏感的场景 + +## 🏗️ 部署教程 [🔝](#-table-of-contents) + +本教程将展示如何通过 [MMDeploy](https://github.com/open-mmlab/mmdeploy/tree/main) 部署 RTMPose 项目。 + +- 你可以从 [硬件模型库](https://platform.openmmlab.com/deploee) 直接下载 SDK 版模型(ONNX、 TRT、ncnn 等)。 +- 同时我们也支持 [在线模型转换](https://platform.openmmlab.com/deploee/task-convert-list)。 + +### 🧩 安装 + +在开始部署之前,首先你需要确保正确安装了 MMPose, MMDetection, MMDeploy,相关安装教程如下: + +- [安装 MMPose 与 MMDetection](https://mmpose.readthedocs.io/zh_CN/latest/installation.html) +- [安装 MMDeploy](https://mmdeploy.readthedocs.io/zh_CN/latest/04-supported-codebases/mmpose.html) + +根据部署后端的不同,有的后端需要对自定义算子进行编译,请根据需求前往对应的文档确保环境搭建正确: + +- [ONNX](https://mmdeploy.readthedocs.io/zh_CN/latest/05-supported-backends/onnxruntime.html) +- [TensorRT](https://mmdeploy.readthedocs.io/zh_CN/latest/05-supported-backends/tensorrt.html) +- [OpenVINO](https://mmdeploy.readthedocs.io/zh_CN/latest/05-supported-backends/openvino.html) +- [更多](https://github.com/open-mmlab/mmdeploy/tree/main/docs/en/05-supported-backends) + +### 🛠️ 模型转换 + +在完成安装之后,你就可以开始模型部署了。通过 MMDeploy 提供的 `tools/deploy.py` 可以方便地将 Pytorch 模型转换到不同的部署后端。 + +我们本节演示将 RTMDet 和 RTMPose 模型导出为 ONNX 和 TensorRT 格式,如果你希望了解更多内容请前往 [MMDeploy 文档](https://mmdeploy.readthedocs.io/zh_CN/latest/02-how-to-run/convert_model.html)。 + +- ONNX 配置 + + \- RTMDet:[`detection_onnxruntime_static.py`](https://github.com/open-mmlab/mmdeploy/blob/main/configs/mmdet/detection/detection_onnxruntime_static.py) + + \- RTMPose:[`pose-detection_simcc_onnxruntime_dynamic.py`](https://github.com/open-mmlab/mmdeploy/blob/main/configs/mmpose/pose-detection_simcc_onnxruntime_dynamic.py) + +- TensorRT 配置 + + \- RTMDet:[`detection_tensorrt_static-320x320.py`](https://github.com/open-mmlab/mmdeploy/blob/main/configs/mmdet/detection/detection_tensorrt_static-320x320.py) + + \- RTMPose:[`pose-detection_simcc_tensorrt_dynamic-256x192.py`](https://github.com/open-mmlab/mmdeploy/blob/main/configs/mmpose/pose-detection_simcc_tensorrt_dynamic-256x192.py) + +如果你需要对部署配置进行修改,请参考 [MMDeploy config tutorial](https://mmdeploy.readthedocs.io/zh_CN/latest/02-how-to-run/write_config.html). + +本教程中使用的文件结构如下: + +```shell +|----mmdeploy +|----mmdetection +|----mmpose +``` + +#### ONNX + +运行如下命令: + +```shell +# 前往 mmdeploy 目录 +cd ${PATH_TO_MMDEPLOY} + +# 转换 RTMDet +# 输入模型路径可以是本地路径,也可以是下载链接。 +python tools/deploy.py \ + configs/mmdet/detection/detection_onnxruntime_static.py \ + ../mmpose/projects/rtmpose/rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_nano_8xb32-100e_coco-obj365-person-05d8511e.pth \ + demo/resources/human-pose.jpg \ + --work-dir rtmpose-ort/rtmdet-nano \ + --device cpu \ + --show \ + --dump-info # 导出 sdk info + +# 转换 RTMPose +# 输入模型路径可以是本地路径,也可以是下载链接。 +python tools/deploy.py \ + configs/mmpose/pose-detection_simcc_onnxruntime_dynamic.py \ + ../mmpose/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.pth \ + demo/resources/human-pose.jpg \ + --work-dir rtmpose-ort/rtmpose-m \ + --device cpu \ + --show \ + --dump-info # 导出 sdk info +``` + +默认导出模型文件为 `{work-dir}/end2end.onnx` + +#### TensorRT + +运行如下命令: + +```shell +# 前往 mmdeploy 目录 +cd ${PATH_TO_MMDEPLOY} + +# 转换 RTMDet +# 输入模型路径可以是本地路径,也可以是下载链接。 +python tools/deploy.py \ + configs/mmdet/detection/detection_tensorrt_static-320x320.py \ + ../mmpose/projects/rtmpose/rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_nano_8xb32-100e_coco-obj365-person-05d8511e.pth \ + demo/resources/human-pose.jpg \ + --work-dir rtmpose-trt/rtmdet-nano \ + --device cuda:0 \ + --show \ + --dump-info # 导出 sdk info + +# 转换 RTMPose +# 输入模型路径可以是本地路径,也可以是下载链接。 +python tools/deploy.py \ + configs/mmpose/pose-detection_simcc_tensorrt_dynamic-256x192.py \ + ../mmpose/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.pth \ + demo/resources/human-pose.jpg \ + --work-dir rtmpose-trt/rtmpose-m \ + --device cuda:0 \ + --show \ + --dump-info # 导出 sdk info +``` + +默认导出模型文件为 `{work-dir}/end2end.engine` + +🎊 如果模型顺利导出,你将会看到样例图片上的检测结果: + +![convert_models](https://user-images.githubusercontent.com/13503330/217726963-7815dd01-561a-4605-b0c6-07b6fe1956c3.png) + +#### 高级设置 + +如果需要使用 TensorRT-FP16,你可以通过修改以下配置开启: + +```Python +# in MMDeploy config +backend_config = dict( + type='tensorrt', + common_config=dict( + fp16_mode=True # 打开 fp16 + )) +``` + +### 🕹️ SDK 推理 + +要进行 Pipeline 推理,需要先用 MMDeploy 导出 SDK 版本的 det 和 pose 模型,只需要在参数中加上`--dump-info`。 + +此处以 onnxruntime 的 cpu 模型为例,运行如下命令: + +```shell +# RTMDet +# 输入模型路径可以是本地路径,也可以是下载链接。 +python tools/deploy.py \ + configs/mmdet/detection/detection_onnxruntime_dynamic.py \ + ../mmpose/projects/rtmpose/rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_nano_8xb32-100e_coco-obj365-person-05d8511e.pth \ + demo/resources/human-pose.jpg \ + --work-dir rtmpose-ort/rtmdet-nano \ + --device cpu \ + --show \ + --dump-info # 导出 sdk info + +# RTMPose +# 输入模型路径可以是本地路径,也可以是下载链接。 +python tools/deploy.py \ + configs/mmpose/pose-detection_simcc_onnxruntime_dynamic.py \ + ../mmpose/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py \ + https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.pth \ + demo/resources/human-pose.jpg \ + --work-dir rtmpose-ort/rtmpose-m \ + --device cpu \ + --show \ + --dump-info # 导出 sdk info +``` + +默认会导出三个 json 文件: + +```shell +|----sdk + |----end2end.onnx # ONNX model + |----end2end.engine # TensorRT engine file + + |----pipeline.json # + |----deploy.json # json files for the SDK + |----detail.json # +``` + +#### Python API + +```Python +# Copyright (c) OpenMMLab. All rights reserved. +import argparse + +import cv2 +import numpy as np +from mmdeploy_runtime import PoseDetector + +def parse_args(): + parser = argparse.ArgumentParser( + description='show how to use sdk python api') + parser.add_argument('device_name', help='name of device, cuda or cpu') + parser.add_argument( + 'model_path', + help='path of mmdeploy SDK model dumped by model converter') + parser.add_argument('image_path', help='path of an image') + parser.add_argument( + '--bbox', + default=None, + nargs='+', + type=int, + help='bounding box of an object in format (x, y, w, h)') + args = parser.parse_args() + return args + +def main(): + args = parse_args() + + img = cv2.imread(args.image_path) + + detector = PoseDetector( + model_path=args.model_path, device_name=args.device_name, device_id=0) + + if args.bbox is None: + result = detector(img) + else: + # converter (x, y, w, h) -> (left, top, right, bottom) + print(args.bbox) + bbox = np.array(args.bbox, dtype=int) + bbox[2:] += bbox[:2] + result = detector(img, bbox) + print(result) + + _, point_num, _ = result.shape + points = result[:, :, :2].reshape(point_num, 2) + for [x, y] in points.astype(int): + cv2.circle(img, (x, y), 1, (0, 255, 0), 2) + + cv2.imwrite('output_pose.png', img) + +if __name__ == '__main__': + main() +``` + +#### C++ API + +```C++ +#include "mmdeploy/detector.hpp" + +#include "opencv2/imgcodecs/imgcodecs.hpp" +#include "utils/argparse.h" +#include "utils/visualize.h" + +DEFINE_ARG_string(model, "Model path"); +DEFINE_ARG_string(image, "Input image path"); +DEFINE_string(device, "cpu", R"(Device name, e.g. "cpu", "cuda")"); +DEFINE_string(output, "detector_output.jpg", "Output image path"); + +DEFINE_double(det_thr, .5, "Detection score threshold"); + +int main(int argc, char* argv[]) { + if (!utils::ParseArguments(argc, argv)) { + return -1; + } + + cv::Mat img = cv::imread(ARGS_image); + if (img.empty()) { + fprintf(stderr, "failed to load image: %s\n", ARGS_image.c_str()); + return -1; + } + + // construct a detector instance + mmdeploy::Detector detector(mmdeploy::Model{ARGS_model}, mmdeploy::Device{FLAGS_device}); + + // apply the detector, the result is an array-like class holding references to + // `mmdeploy_detection_t`, will be released automatically on destruction + mmdeploy::Detector::Result dets = detector.Apply(img); + + // visualize + utils::Visualize v; + auto sess = v.get_session(img); + int count = 0; + for (const mmdeploy_detection_t& det : dets) { + if (det.score > FLAGS_det_thr) { // filter bboxes + sess.add_det(det.bbox, det.label_id, det.score, det.mask, count++); + } + } + + if (!FLAGS_output.empty()) { + cv::imwrite(FLAGS_output, sess.get()); + } + + return 0; +} +``` + +对于 C++ API 示例,请将 MMDeploy 加入到 CMake 项目中: + +```CMake +find_package(MMDeploy REQUIRED) +target_link_libraries(${name} PRIVATE mmdeploy ${OpenCV_LIBS}) +``` + +#### 其他语言 + +- [C# API 示例](https://github.com/open-mmlab/mmdeploy/tree/main/demo/csharp) +- [JAVA API 示例](https://github.com/open-mmlab/mmdeploy/tree/main/demo/java) + +### 🚀 Pipeline 推理 + +#### 图片推理 + +如果用户有跟随 MMDeploy 安装教程进行正确编译,在 `mmdeploy/build/bin/` 路径下会看到 `det_pose` 的可执行文件。 + +```shell +# 前往 mmdeploy 目录 +cd ${PATH_TO_MMDEPLOY}/build/bin/ + +# 单张图片推理 +./det_pose rtmpose-ort/rtmdet-nano/ rtmpose-ort/rtmpose-m/ your_img.jpg --device cpu + +required arguments: + det_model Detection 模型路径 [string] + pose_model Pose 模型路径 [string] + image 输入图片路径 [string] + +optional arguments: + --device 推理设备 "cpu", "cuda" [string = "cpu"] + --output 导出图片路径 [string = "det_pose_output.jpg"] + --skeleton 骨架定义文件路径,或使用预定义骨架: + "coco" [string = "coco", "coco-wholoebody"] + --det_label 用于姿势估计的检测标签 [int32 = 0] + (0 在 coco 中对应 person) + --det_thr 检测分数阈值 [double = 0.5] + --det_min_bbox_size 最小检测框大小 [double = -1] + --pose_thr 关键点置信度阈值 [double = 0] +``` + +**API** **示例** + +\- [`det_pose.py`](https://github.com/open-mmlab/mmdeploy/blob/main/demo/python/det_pose.py) + +\- [`det_pose.cxx`](https://github.com/open-mmlab/mmdeploy/blob/main/demo/csrc/cpp/det_pose.cxx) + +#### 视频推理 + +如果用户有跟随 MMDeploy 安装教程进行正确编译,在 `mmdeploy/build/bin/` 路径下会看到 `pose_tracker` 的可执行文件。 + +- 将 `input` 输入 `0` 可以使用摄像头推理 + +```shell +# 前往 mmdeploy 目录 +cd ${PATH_TO_MMDEPLOY}/build/bin/ + +# 视频推理 +./pose_tracker rtmpose-ort/rtmdet-nano/ rtmpose-ort/rtmpose-m/ your_video.mp4 --device cpu + +required arguments: + det_model Detection 模型路径 [string] + pose_model Pose 模型路径 [string] + input 输入图片路径或摄像头序号 [string] + +optional arguments: + --device 推理设备 "cpu", "cuda" [string = "cpu"] + --output 导出视频路径 [string = ""] + --output_size 输出视频帧的长边 [int32 = 0] + --flip 设置为1,用于水平翻转输入 [int32 = 0] + --show 使用`cv::imshow`时,传递给`cv::waitKey`的延迟; + -1: 关闭 [int32 = 1] + --skeleton 骨架数据的路径或预定义骨架的名称: + "coco", "coco-wholebody" [string = "coco"] + --background 导出视频背景颜色, "default": 原图, "black": + 纯黑背景 [string = "default"] + --det_interval 检测间隔 [int32 = 1] + --det_label 用于姿势估计的检测标签 [int32 = 0] + (0 在 coco 中对应 person) + --det_thr 检测分数阈值 [double = 0.5] + --det_min_bbox_size 最小检测框大小 [double = -1] + --det_nms_thr NMS IOU阈值,用于合并检测到的bboxes和 + 追踪到的目标的 bboxes [double = 0.7] + --pose_max_num_bboxes 每一帧用于姿势估计的 bboxes 的最大数量 + [int32 = -1] + --pose_kpt_thr 可见关键点的阈值 [double = 0.5] + --pose_min_keypoints 有效姿势的最小关键点数量,-1表示上限(n_kpts/2) [int32 = -1] + --pose_bbox_scale 将关键点扩展到 bbox 的比例 [double = 1.25] + --pose_min_bbox_size 最小追踪尺寸,尺寸小于阈值的 bbox 将被剔除 [double = -1] + --pose_nms_thr 用于抑制重叠姿势的 NMS OKS/IOU阈值。 + 当多个姿态估计重叠到同一目标时非常有用 [double = 0.5] + --track_iou_thr 追踪 IOU 阈值 [double = 0.4] + --track_max_missing 最大追踪容错 [int32 = 10] +``` + +**API** **示例** + +\- [`pose_tracker.py`](https://github.com/open-mmlab/mmdeploy/blob/main/demo/python/pose_tracker.py) + +\- [`pose_tracker.cxx`](https://github.com/open-mmlab/mmdeploy/blob/main/demo/csrc/cpp/pose_tracker.cxx) + +## 📚 常用功能 [🔝](#-table-of-contents) + +### 🚀 模型测速 [🔝](#-table-of-contents) + +如果需要测试模型在部署框架下的推理速度,MMDeploy 提供了方便的 `tools/profiler.py` 脚本。 + +用户需要准备一个存放测试图片的文件夹`./test_images`,profiler 将随机从该目录下抽取图片用于模型测速。 + +```shell +python tools/profiler.py \ + configs/mmpose/pose-detection_simcc_onnxruntime_dynamic.py \ + {RTMPOSE_PROJECT}/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py \ + ../test_images \ + --model {WORK_DIR}/end2end.onnx \ + --shape 256x192 \ + --device cpu \ + --warmup 50 \ + --num-iter 200 +``` + +测试结果如下: + +```shell +01/30 15:06:35 - mmengine - INFO - [onnxruntime]-70 times per count: 8.73 ms, 114.50 FPS +01/30 15:06:36 - mmengine - INFO - [onnxruntime]-90 times per count: 9.05 ms, 110.48 FPS +01/30 15:06:37 - mmengine - INFO - [onnxruntime]-110 times per count: 9.87 ms, 101.32 FPS +01/30 15:06:37 - mmengine - INFO - [onnxruntime]-130 times per count: 9.99 ms, 100.10 FPS +01/30 15:06:38 - mmengine - INFO - [onnxruntime]-150 times per count: 10.39 ms, 96.29 FPS +01/30 15:06:39 - mmengine - INFO - [onnxruntime]-170 times per count: 10.77 ms, 92.86 FPS +01/30 15:06:40 - mmengine - INFO - [onnxruntime]-190 times per count: 10.98 ms, 91.05 FPS +01/30 15:06:40 - mmengine - INFO - [onnxruntime]-210 times per count: 11.19 ms, 89.33 FPS +01/30 15:06:41 - mmengine - INFO - [onnxruntime]-230 times per count: 11.16 ms, 89.58 FPS +01/30 15:06:42 - mmengine - INFO - [onnxruntime]-250 times per count: 11.06 ms, 90.41 FPS +----- Settings: ++------------+---------+ +| batch size | 1 | +| shape | 256x192 | +| iterations | 200 | +| warmup | 50 | ++------------+---------+ +----- Results: ++--------+------------+---------+ +| Stats | Latency/ms | FPS | ++--------+------------+---------+ +| Mean | 11.060 | 90.412 | +| Median | 11.852 | 84.375 | +| Min | 7.812 | 128.007 | +| Max | 13.690 | 73.044 | ++--------+------------+---------+ +``` + +如果你希望详细了解 profiler 的更多参数设置与功能,可以前往 [Profiler Docs](https://mmdeploy.readthedocs.io/en/main/02-how-to-run/useful_tools.html#profiler) + +### 📊 精度验证 [🔝](#-table-of-contents) + +如果需要测试模型在部署框架下的推理精度,MMDeploy 提供了方便的 `tools/test.py` 脚本。 + +```shell +python tools/test.py \ + configs/mmpose/pose-detection_simcc_onnxruntime_dynamic.py \ + {RTMPOSE_PROJECT}/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py \ + --model {PATH_TO_MODEL}/rtmpose_m.pth \ + --device cpu +``` + +详细内容请参考 [MMDeploys Docs](https://github.com/open-mmlab/mmdeploy/blob/main/docs/zh_cn/02-how-to-run/profile_model.md) + +## 📜 引用 [🔝](#-table-of-contents) + +如果您觉得 RTMPose 对您的研究工作有所帮助,请考虑引用它: + +```bibtex +@misc{https://doi.org/10.48550/arxiv.2303.07399, + doi = {10.48550/ARXIV.2303.07399}, + url = {https://arxiv.org/abs/2303.07399}, + author = {Jiang, Tao and Lu, Peng and Zhang, Li and Ma, Ningsheng and Han, Rui and Lyu, Chengqi and Li, Yining and Chen, Kai}, + keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences}, + title = {RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose}, + publisher = {arXiv}, + year = {2023}, + copyright = {Creative Commons Attribution 4.0 International} +} + +@misc{mmpose2020, + title={OpenMMLab Pose Estimation Toolbox and Benchmark}, + author={MMPose Contributors}, + howpublished = {\url{https://github.com/open-mmlab/mmpose}}, + year={2020} +} +``` diff --git a/projects/rtmpose/benchmark/README.md b/projects/rtmpose/benchmark/README.md index 46c036273c..03042b1865 100644 --- a/projects/rtmpose/benchmark/README.md +++ b/projects/rtmpose/benchmark/README.md @@ -1,116 +1,116 @@ -# RTMPose Benchmarks - -English | [简体中文](./README_CN.md) - -Community users are welcome to contribute to this project directory by performing inference speed tests on different hardware devices. - -Currently tested: - -- CPU - - Intel i7-11700 -- GPU - - NVIDIA GeForce 1660 Ti - - NVIDIA GeForce RTX 3090 -- Nvidia Jetson - - AGX Orin - - Orin NX -- ARM - - Snapdragon 865 - -## Body 2d (17 Keypoints) - -### Model Info - -| Config | Input Size | AP
    (COCO) | Params(M) | FLOPS(G) | -| :-------------------------------------------------------------------------------: | :--------: | :---------------: | :-------: | :------: | -| [RTMPose-t](../rtmpose/body_2d_keypoint/rtmpose-tiny_8xb256-420e_coco-256x192.py) | 256x192 | 68.5 | 3.34 | 0.36 | -| [RTMPose-s](../rtmpose/body_2d_keypoint/rtmpose-s_8xb256-420e_coco-256x192.py) | 256x192 | 72.2 | 5.47 | 0.68 | -| [RTMPose-m](../rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py) | 256x192 | 75.8 | 13.59 | 1.93 | -| [RTMPose-l](../rtmpose/body_2d_keypoint/rtmpose-l_8xb256-420e_coco-256x192.py) | 256x192 | 76.5 | 27.66 | 4.16 | -| [RTMPose-m](../rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-384x288.py) | 384x288 | 77.0 | 13.72 | 4.33 | -| [RTMPose-l](../rtmpose/body_2d_keypoint/rtmpose-l_8xb256-420e_coco-384x288.py) | 384x288 | 77.3 | 27.79 | 9.35 | - -### Speed Benchmark - -- Numbers displayed in the table are inference latencies in millisecond(ms). - -| Config | Input Size | ORT
    (i7-11700) | TRT-FP16
    (GTX 1660Ti) | TRT-FP16
    (RTX 3090) | ncnn-FP16
    (Snapdragon 865) | TRT-FP16
    (Jetson AGX Orin) | TRT-FP16
    (Jetson Orin NX) | -| :---------: | :--------: | :--------------------: | :---------------------------: | :-------------------------: | :--------------------------------: | :--------------------------------: | :-------------------------------: | -| [RTMPose-t](../rtmpose/body_2d_keypoint/rtmpose-tiny_8xb256-420e_coco-256x192.py) | 256x192 | 3.20 | 1.06 | 0.98 | 9.02 | 1.63 | 1.97 | -| [RTMPose-s](../rtmpose/body_2d_keypoint/rtmpose-s_8xb256-420e_coco-256x192.py) | 256x192 | 4.48 | 1.39 | 1.12 | 13.89 | 1.85 | 2.18 | -| [RTMPose-m](../rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py) | 256x192 | 11.06 | 2.29 | 1.18 | 26.44 | 2.72 | 3.35 | -| [RTMPose-l](../rtmpose/body_2d_keypoint/rtmpose-l_8xb256-420e_coco-256x192.py) | 256x192 | 18.85 | 3.46 | 1.37 | 45.37 | 3.67 | 4.78 | -| [RTMPose-m](../rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-384x288.py) | 384x288 | 24.78 | 3.66 | 1.20 | 26.44 | 3.45 | 5.08 | -| [RTMPose-l](../rtmpose/body_2d_keypoint/rtmpose-l_8xb256-420e_coco-384x288.py) | 384x288 | - | 6.05 | 1.74 | - | 4.93 | 7.23 | - -## WholeBody 2d (133 Keypoints) - -### Model Info - -| Config | Input Size | Whole AP | Whole AR | FLOPS(G) | -| :------------------------------------------------------------------------------------------- | :--------: | :------: | :------: | :------: | -| [RTMPose-m](../rtmpose/wholebody_2d_keypoint/rtmpose-m_8xb64-270e_coco-wholebody-256x192.py) | 256x192 | 60.4 | 66.7 | 2.22 | -| [RTMPose-l](../rtmpose/wholebody_2d_keypoint/rtmpose-l_8xb64-270e_coco-wholebody-256x192.py) | 256x192 | 63.2 | 69.4 | 4.52 | -| [RTMPose-l](../rtmpose/wholebody_2d_keypoint/rtmpose-l_8xb32-270e_coco-wholebody-384x288.py) | 384x288 | 67.0 | 72.3 | 10.07 | - -### Speed Benchmark - -- Numbers displayed in the table are inference latencies in millisecond(ms). -- Data from different community users are separated by `|`. - -| Config | Input Size | ORT
    (i7-11700) | TRT-FP16
    (GTX 1660Ti) | TRT-FP16
    (RTX 3090) | TRT-FP16
    (Jetson AGX Orin) | TRT-FP16
    (Jetson Orin NX) | -| :-------------------------------------------- | :--------: | :--------------------: | :---------------------------: | :-------------------------: | :--------------------------------: | :-------------------------------: | -| [RTMPose-m](../rtmpose/wholebody_2d_keypoint/rtmpose-m_8xb64-270e_coco-wholebody-256x192.py) | 256x192 | 13.50 | 4.00 | 1.17 \| 1.84 | 2.79 | 3.51 | -| [RTMPose-l](../rtmpose/wholebody_2d_keypoint/rtmpose-l_8xb64-270e_coco-wholebody-256x192.py) | 256x192 | 23.41 | 5.67 | 1.44 \| 2.61 | 3.80 | 4.95 | -| [RTMPose-l](../rtmpose/wholebody_2d_keypoint/rtmpose-l_8xb32-270e_coco-wholebody-384x288.py) | 384x288 | 44.58 | 7.68 | 1.75 \| 4.24 | 5.08 | 7.20 | - -## How To Test Speed - -If you need to test the inference speed of the model under the deployment framework, MMDeploy provides a convenient `tools/profiler.py` script. - -The user needs to prepare a folder for the test images `./test_images`, the profiler will randomly read images from this directory for the model speed test. - -```shell -python tools/profiler.py \ - configs/mmpose/pose-detection_simcc_onnxruntime_dynamic.py \ - {RTMPOSE_PROJECT}/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py \ - ../test_images \ - --model {WORK_DIR}/end2end.onnx \ - --shape 256x192 \ - --device cpu \ - --warmup 50 \ - --num-iter 200 -``` - -The result is as follows: - -```shell -01/30 15:06:35 - mmengine - INFO - [onnxruntime]-70 times per count: 8.73 ms, 114.50 FPS -01/30 15:06:36 - mmengine - INFO - [onnxruntime]-90 times per count: 9.05 ms, 110.48 FPS -01/30 15:06:37 - mmengine - INFO - [onnxruntime]-110 times per count: 9.87 ms, 101.32 FPS -01/30 15:06:37 - mmengine - INFO - [onnxruntime]-130 times per count: 9.99 ms, 100.10 FPS -01/30 15:06:38 - mmengine - INFO - [onnxruntime]-150 times per count: 10.39 ms, 96.29 FPS -01/30 15:06:39 - mmengine - INFO - [onnxruntime]-170 times per count: 10.77 ms, 92.86 FPS -01/30 15:06:40 - mmengine - INFO - [onnxruntime]-190 times per count: 10.98 ms, 91.05 FPS -01/30 15:06:40 - mmengine - INFO - [onnxruntime]-210 times per count: 11.19 ms, 89.33 FPS -01/30 15:06:41 - mmengine - INFO - [onnxruntime]-230 times per count: 11.16 ms, 89.58 FPS -01/30 15:06:42 - mmengine - INFO - [onnxruntime]-250 times per count: 11.06 ms, 90.41 FPS ------ Settings: -+------------+---------+ -| batch size | 1 | -| shape | 256x192 | -| iterations | 200 | -| warmup | 50 | -+------------+---------+ ------ Results: -+--------+------------+---------+ -| Stats | Latency/ms | FPS | -+--------+------------+---------+ -| Mean | 11.060 | 90.412 | -| Median | 11.852 | 84.375 | -| Min | 7.812 | 128.007 | -| Max | 13.690 | 73.044 | -+--------+------------+---------+ -``` - -If you want to learn more details of profiler, you can refer to the [Profiler Docs](https://mmdeploy.readthedocs.io/en/latest/02-how-to-run/useful_tools.html#profiler). +# RTMPose Benchmarks + +English | [简体中文](./README_CN.md) + +Community users are welcome to contribute to this project directory by performing inference speed tests on different hardware devices. + +Currently tested: + +- CPU + - Intel i7-11700 +- GPU + - NVIDIA GeForce 1660 Ti + - NVIDIA GeForce RTX 3090 +- Nvidia Jetson + - AGX Orin + - Orin NX +- ARM + - Snapdragon 865 + +## Body 2d (17 Keypoints) + +### Model Info + +| Config | Input Size | AP
    (COCO) | Params(M) | FLOPS(G) | +| :-------------------------------------------------------------------------------: | :--------: | :---------------: | :-------: | :------: | +| [RTMPose-t](../rtmpose/body_2d_keypoint/rtmpose-tiny_8xb256-420e_coco-256x192.py) | 256x192 | 68.5 | 3.34 | 0.36 | +| [RTMPose-s](../rtmpose/body_2d_keypoint/rtmpose-s_8xb256-420e_coco-256x192.py) | 256x192 | 72.2 | 5.47 | 0.68 | +| [RTMPose-m](../rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py) | 256x192 | 75.8 | 13.59 | 1.93 | +| [RTMPose-l](../rtmpose/body_2d_keypoint/rtmpose-l_8xb256-420e_coco-256x192.py) | 256x192 | 76.5 | 27.66 | 4.16 | +| [RTMPose-m](../rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-384x288.py) | 384x288 | 77.0 | 13.72 | 4.33 | +| [RTMPose-l](../rtmpose/body_2d_keypoint/rtmpose-l_8xb256-420e_coco-384x288.py) | 384x288 | 77.3 | 27.79 | 9.35 | + +### Speed Benchmark + +- Numbers displayed in the table are inference latencies in millisecond(ms). + +| Config | Input Size | ORT
    (i7-11700) | TRT-FP16
    (GTX 1660Ti) | TRT-FP16
    (RTX 3090) | ncnn-FP16
    (Snapdragon 865) | TRT-FP16
    (Jetson AGX Orin) | TRT-FP16
    (Jetson Orin NX) | +| :---------: | :--------: | :--------------------: | :---------------------------: | :-------------------------: | :--------------------------------: | :--------------------------------: | :-------------------------------: | +| [RTMPose-t](../rtmpose/body_2d_keypoint/rtmpose-tiny_8xb256-420e_coco-256x192.py) | 256x192 | 3.20 | 1.06 | 0.98 | 9.02 | 1.63 | 1.97 | +| [RTMPose-s](../rtmpose/body_2d_keypoint/rtmpose-s_8xb256-420e_coco-256x192.py) | 256x192 | 4.48 | 1.39 | 1.12 | 13.89 | 1.85 | 2.18 | +| [RTMPose-m](../rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py) | 256x192 | 11.06 | 2.29 | 1.18 | 26.44 | 2.72 | 3.35 | +| [RTMPose-l](../rtmpose/body_2d_keypoint/rtmpose-l_8xb256-420e_coco-256x192.py) | 256x192 | 18.85 | 3.46 | 1.37 | 45.37 | 3.67 | 4.78 | +| [RTMPose-m](../rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-384x288.py) | 384x288 | 24.78 | 3.66 | 1.20 | 26.44 | 3.45 | 5.08 | +| [RTMPose-l](../rtmpose/body_2d_keypoint/rtmpose-l_8xb256-420e_coco-384x288.py) | 384x288 | - | 6.05 | 1.74 | - | 4.93 | 7.23 | + +## WholeBody 2d (133 Keypoints) + +### Model Info + +| Config | Input Size | Whole AP | Whole AR | FLOPS(G) | +| :------------------------------------------------------------------------------------------- | :--------: | :------: | :------: | :------: | +| [RTMPose-m](../rtmpose/wholebody_2d_keypoint/rtmpose-m_8xb64-270e_coco-wholebody-256x192.py) | 256x192 | 60.4 | 66.7 | 2.22 | +| [RTMPose-l](../rtmpose/wholebody_2d_keypoint/rtmpose-l_8xb64-270e_coco-wholebody-256x192.py) | 256x192 | 63.2 | 69.4 | 4.52 | +| [RTMPose-l](../rtmpose/wholebody_2d_keypoint/rtmpose-l_8xb32-270e_coco-wholebody-384x288.py) | 384x288 | 67.0 | 72.3 | 10.07 | + +### Speed Benchmark + +- Numbers displayed in the table are inference latencies in millisecond(ms). +- Data from different community users are separated by `|`. + +| Config | Input Size | ORT
    (i7-11700) | TRT-FP16
    (GTX 1660Ti) | TRT-FP16
    (RTX 3090) | TRT-FP16
    (Jetson AGX Orin) | TRT-FP16
    (Jetson Orin NX) | +| :-------------------------------------------- | :--------: | :--------------------: | :---------------------------: | :-------------------------: | :--------------------------------: | :-------------------------------: | +| [RTMPose-m](../rtmpose/wholebody_2d_keypoint/rtmpose-m_8xb64-270e_coco-wholebody-256x192.py) | 256x192 | 13.50 | 4.00 | 1.17 \| 1.84 | 2.79 | 3.51 | +| [RTMPose-l](../rtmpose/wholebody_2d_keypoint/rtmpose-l_8xb64-270e_coco-wholebody-256x192.py) | 256x192 | 23.41 | 5.67 | 1.44 \| 2.61 | 3.80 | 4.95 | +| [RTMPose-l](../rtmpose/wholebody_2d_keypoint/rtmpose-l_8xb32-270e_coco-wholebody-384x288.py) | 384x288 | 44.58 | 7.68 | 1.75 \| 4.24 | 5.08 | 7.20 | + +## How To Test Speed + +If you need to test the inference speed of the model under the deployment framework, MMDeploy provides a convenient `tools/profiler.py` script. + +The user needs to prepare a folder for the test images `./test_images`, the profiler will randomly read images from this directory for the model speed test. + +```shell +python tools/profiler.py \ + configs/mmpose/pose-detection_simcc_onnxruntime_dynamic.py \ + {RTMPOSE_PROJECT}/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py \ + ../test_images \ + --model {WORK_DIR}/end2end.onnx \ + --shape 256x192 \ + --device cpu \ + --warmup 50 \ + --num-iter 200 +``` + +The result is as follows: + +```shell +01/30 15:06:35 - mmengine - INFO - [onnxruntime]-70 times per count: 8.73 ms, 114.50 FPS +01/30 15:06:36 - mmengine - INFO - [onnxruntime]-90 times per count: 9.05 ms, 110.48 FPS +01/30 15:06:37 - mmengine - INFO - [onnxruntime]-110 times per count: 9.87 ms, 101.32 FPS +01/30 15:06:37 - mmengine - INFO - [onnxruntime]-130 times per count: 9.99 ms, 100.10 FPS +01/30 15:06:38 - mmengine - INFO - [onnxruntime]-150 times per count: 10.39 ms, 96.29 FPS +01/30 15:06:39 - mmengine - INFO - [onnxruntime]-170 times per count: 10.77 ms, 92.86 FPS +01/30 15:06:40 - mmengine - INFO - [onnxruntime]-190 times per count: 10.98 ms, 91.05 FPS +01/30 15:06:40 - mmengine - INFO - [onnxruntime]-210 times per count: 11.19 ms, 89.33 FPS +01/30 15:06:41 - mmengine - INFO - [onnxruntime]-230 times per count: 11.16 ms, 89.58 FPS +01/30 15:06:42 - mmengine - INFO - [onnxruntime]-250 times per count: 11.06 ms, 90.41 FPS +----- Settings: ++------------+---------+ +| batch size | 1 | +| shape | 256x192 | +| iterations | 200 | +| warmup | 50 | ++------------+---------+ +----- Results: ++--------+------------+---------+ +| Stats | Latency/ms | FPS | ++--------+------------+---------+ +| Mean | 11.060 | 90.412 | +| Median | 11.852 | 84.375 | +| Min | 7.812 | 128.007 | +| Max | 13.690 | 73.044 | ++--------+------------+---------+ +``` + +If you want to learn more details of profiler, you can refer to the [Profiler Docs](https://mmdeploy.readthedocs.io/en/latest/02-how-to-run/useful_tools.html#profiler). diff --git a/projects/rtmpose/benchmark/README_CN.md b/projects/rtmpose/benchmark/README_CN.md index e1824d12b7..9a504aee45 100644 --- a/projects/rtmpose/benchmark/README_CN.md +++ b/projects/rtmpose/benchmark/README_CN.md @@ -1,116 +1,116 @@ -# RTMPose Benchmarks - -简体中文 | [English](./README.md) - -欢迎社区用户在不同硬件设备上进行推理速度测试,贡献到本项目目录下。 - -当前已测试: - -- CPU - - Intel i7-11700 -- GPU - - NVIDIA GeForce 1660 Ti - - NVIDIA GeForce RTX 3090 -- Nvidia Jetson - - AGX Orin - - Orin NX -- ARM - - Snapdragon 865 - -### 人体 2d 关键点 (17 Keypoints) - -### Model Info - -| Config | Input Size | AP
    (COCO) | Params(M) | FLOPS(G) | -| :-------------------------------------------------------------------------------: | :--------: | :---------------: | :-------: | :------: | -| [RTMPose-t](../rtmpose/body_2d_keypoint/rtmpose-tiny_8xb256-420e_coco-256x192.py) | 256x192 | 68.5 | 3.34 | 0.36 | -| [RTMPose-s](../rtmpose/body_2d_keypoint/rtmpose-s_8xb256-420e_coco-256x192.py) | 256x192 | 72.2 | 5.47 | 0.68 | -| [RTMPose-m](../rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py) | 256x192 | 75.8 | 13.59 | 1.93 | -| [RTMPose-l](../rtmpose/body_2d_keypoint/rtmpose-l_8xb256-420e_coco-256x192.py) | 256x192 | 76.5 | 27.66 | 4.16 | -| [RTMPose-m](../rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-384x288.py) | 384x288 | 77.0 | 13.72 | 4.33 | -| [RTMPose-l](../rtmpose/body_2d_keypoint/rtmpose-l_8xb256-420e_coco-384x288.py) | 384x288 | 77.3 | 27.79 | 9.35 | - -### Speed Benchmark - -图中所示为模型推理时间,单位毫秒。 - -| Config | Input Size | ORT
    (i7-11700) | TRT-FP16
    (GTX 1660Ti) | TRT-FP16
    (RTX 3090) | ncnn-FP16
    (Snapdragon 865) | TRT-FP16
    (Jetson AGX Orin) | TRT-FP16
    (Jetson Orin NX) | -| :---------: | :--------: | :--------------------: | :---------------------------: | :-------------------------: | :--------------------------------: | :--------------------------------: | :-------------------------------: | -| [RTMPose-t](../rtmpose/body_2d_keypoint/rtmpose-tiny_8xb256-420e_coco-256x192.py) | 256x192 | 3.20 | 1.06 | 0.98 | 9.02 | 1.63 | 1.97 | -| [RTMPose-s](../rtmpose/body_2d_keypoint/rtmpose-s_8xb256-420e_coco-256x192.py) | 256x192 | 4.48 | 1.39 | 1.12 | 13.89 | 1.85 | 2.18 | -| [RTMPose-m](../rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py) | 256x192 | 11.06 | 2.29 | 1.18 | 26.44 | 2.72 | 3.35 | -| [RTMPose-l](../rtmpose/body_2d_keypoint/rtmpose-l_8xb256-420e_coco-256x192.py) | 256x192 | 18.85 | 3.46 | 1.37 | 45.37 | 3.67 | 4.78 | -| [RTMPose-m](../rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-384x288.py) | 384x288 | 24.78 | 3.66 | 1.20 | 26.44 | 3.45 | 5.08 | -| [RTMPose-l](../rtmpose/body_2d_keypoint/rtmpose-l_8xb256-420e_coco-384x288.py) | 384x288 | - | 6.05 | 1.74 | - | 4.93 | 7.23 | - -### 人体全身 2d 关键点 (133 Keypoints) - -### Model Info - -| Config | Input Size | Whole AP | Whole AR | FLOPS(G) | -| :------------------------------------------------------------------------------------------- | :--------: | :------: | :------: | :------: | -| [RTMPose-m](../rtmpose/wholebody_2d_keypoint/rtmpose-m_8xb64-270e_coco-wholebody-256x192.py) | 256x192 | 60.4 | 66.7 | 2.22 | -| [RTMPose-l](../rtmpose/wholebody_2d_keypoint/rtmpose-l_8xb64-270e_coco-wholebody-256x192.py) | 256x192 | 63.2 | 69.4 | 4.52 | -| [RTMPose-l](../rtmpose/wholebody_2d_keypoint/rtmpose-l_8xb32-270e_coco-wholebody-384x288.py) | 384x288 | 67.0 | 72.3 | 10.07 | - -### Speed Benchmark - -- 图中所示为模型推理时间,单位毫秒。 -- 来自不同社区用户的测试数据用 `|` 分隔开。 - -| Config | Input Size | ORT
    (i7-11700) | TRT-FP16
    (GTX 1660Ti) | TRT-FP16
    (RTX 3090) | TRT-FP16
    (Jetson AGX Orin) | TRT-FP16
    (Jetson Orin NX) | -| :-------------------------------------------- | :--------: | :--------------------: | :---------------------------: | :-------------------------: | :--------------------------------: | :-------------------------------: | -| [RTMPose-m](../rtmpose/wholebody_2d_keypoint/rtmpose-m_8xb64-270e_coco-wholebody-256x192.py) | 256x192 | 13.50 | 4.00 | 1.17 \| 1.84 | 2.79 | 3.51 | -| [RTMPose-l](../rtmpose/wholebody_2d_keypoint/rtmpose-l_8xb64-270e_coco-wholebody-256x192.py) | 256x192 | 23.41 | 5.67 | 1.44 \| 2.61 | 3.80 | 4.95 | -| [RTMPose-l](../rtmpose/wholebody_2d_keypoint/rtmpose-l_8xb32-270e_coco-wholebody-384x288.py) | 384x288 | 44.58 | 7.68 | 1.75 \| 4.24 | 5.08 | 7.20 | - -## 如何测试推理速度 - -我们使用 MMDeploy 提供的 `tools/profiler.py` 脚本进行模型测速。 - -用户需要准备一个存放测试图片的文件夹`./test_images`,profiler 将随机从该目录下抽取图片用于模型测速。 - -```shell -python tools/profiler.py \ - configs/mmpose/pose-detection_simcc_onnxruntime_dynamic.py \ - {RTMPOSE_PROJECT}/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py \ - ../test_images \ - --model {WORK_DIR}/end2end.onnx \ - --shape 256x192 \ - --device cpu \ - --warmup 50 \ - --num-iter 200 -``` - -The result is as follows: - -```shell -01/30 15:06:35 - mmengine - INFO - [onnxruntime]-70 times per count: 8.73 ms, 114.50 FPS -01/30 15:06:36 - mmengine - INFO - [onnxruntime]-90 times per count: 9.05 ms, 110.48 FPS -01/30 15:06:37 - mmengine - INFO - [onnxruntime]-110 times per count: 9.87 ms, 101.32 FPS -01/30 15:06:37 - mmengine - INFO - [onnxruntime]-130 times per count: 9.99 ms, 100.10 FPS -01/30 15:06:38 - mmengine - INFO - [onnxruntime]-150 times per count: 10.39 ms, 96.29 FPS -01/30 15:06:39 - mmengine - INFO - [onnxruntime]-170 times per count: 10.77 ms, 92.86 FPS -01/30 15:06:40 - mmengine - INFO - [onnxruntime]-190 times per count: 10.98 ms, 91.05 FPS -01/30 15:06:40 - mmengine - INFO - [onnxruntime]-210 times per count: 11.19 ms, 89.33 FPS -01/30 15:06:41 - mmengine - INFO - [onnxruntime]-230 times per count: 11.16 ms, 89.58 FPS -01/30 15:06:42 - mmengine - INFO - [onnxruntime]-250 times per count: 11.06 ms, 90.41 FPS ------ Settings: -+------------+---------+ -| batch size | 1 | -| shape | 256x192 | -| iterations | 200 | -| warmup | 50 | -+------------+---------+ ------ Results: -+--------+------------+---------+ -| Stats | Latency/ms | FPS | -+--------+------------+---------+ -| Mean | 11.060 | 90.412 | -| Median | 11.852 | 84.375 | -| Min | 7.812 | 128.007 | -| Max | 13.690 | 73.044 | -+--------+------------+---------+ -``` - -If you want to learn more details of profiler, you can refer to the [Profiler Docs](https://mmdeploy.readthedocs.io/en/latest/02-how-to-run/useful_tools.html#profiler). +# RTMPose Benchmarks + +简体中文 | [English](./README.md) + +欢迎社区用户在不同硬件设备上进行推理速度测试,贡献到本项目目录下。 + +当前已测试: + +- CPU + - Intel i7-11700 +- GPU + - NVIDIA GeForce 1660 Ti + - NVIDIA GeForce RTX 3090 +- Nvidia Jetson + - AGX Orin + - Orin NX +- ARM + - Snapdragon 865 + +### 人体 2d 关键点 (17 Keypoints) + +### Model Info + +| Config | Input Size | AP
    (COCO) | Params(M) | FLOPS(G) | +| :-------------------------------------------------------------------------------: | :--------: | :---------------: | :-------: | :------: | +| [RTMPose-t](../rtmpose/body_2d_keypoint/rtmpose-tiny_8xb256-420e_coco-256x192.py) | 256x192 | 68.5 | 3.34 | 0.36 | +| [RTMPose-s](../rtmpose/body_2d_keypoint/rtmpose-s_8xb256-420e_coco-256x192.py) | 256x192 | 72.2 | 5.47 | 0.68 | +| [RTMPose-m](../rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py) | 256x192 | 75.8 | 13.59 | 1.93 | +| [RTMPose-l](../rtmpose/body_2d_keypoint/rtmpose-l_8xb256-420e_coco-256x192.py) | 256x192 | 76.5 | 27.66 | 4.16 | +| [RTMPose-m](../rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-384x288.py) | 384x288 | 77.0 | 13.72 | 4.33 | +| [RTMPose-l](../rtmpose/body_2d_keypoint/rtmpose-l_8xb256-420e_coco-384x288.py) | 384x288 | 77.3 | 27.79 | 9.35 | + +### Speed Benchmark + +图中所示为模型推理时间,单位毫秒。 + +| Config | Input Size | ORT
    (i7-11700) | TRT-FP16
    (GTX 1660Ti) | TRT-FP16
    (RTX 3090) | ncnn-FP16
    (Snapdragon 865) | TRT-FP16
    (Jetson AGX Orin) | TRT-FP16
    (Jetson Orin NX) | +| :---------: | :--------: | :--------------------: | :---------------------------: | :-------------------------: | :--------------------------------: | :--------------------------------: | :-------------------------------: | +| [RTMPose-t](../rtmpose/body_2d_keypoint/rtmpose-tiny_8xb256-420e_coco-256x192.py) | 256x192 | 3.20 | 1.06 | 0.98 | 9.02 | 1.63 | 1.97 | +| [RTMPose-s](../rtmpose/body_2d_keypoint/rtmpose-s_8xb256-420e_coco-256x192.py) | 256x192 | 4.48 | 1.39 | 1.12 | 13.89 | 1.85 | 2.18 | +| [RTMPose-m](../rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py) | 256x192 | 11.06 | 2.29 | 1.18 | 26.44 | 2.72 | 3.35 | +| [RTMPose-l](../rtmpose/body_2d_keypoint/rtmpose-l_8xb256-420e_coco-256x192.py) | 256x192 | 18.85 | 3.46 | 1.37 | 45.37 | 3.67 | 4.78 | +| [RTMPose-m](../rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-384x288.py) | 384x288 | 24.78 | 3.66 | 1.20 | 26.44 | 3.45 | 5.08 | +| [RTMPose-l](../rtmpose/body_2d_keypoint/rtmpose-l_8xb256-420e_coco-384x288.py) | 384x288 | - | 6.05 | 1.74 | - | 4.93 | 7.23 | + +### 人体全身 2d 关键点 (133 Keypoints) + +### Model Info + +| Config | Input Size | Whole AP | Whole AR | FLOPS(G) | +| :------------------------------------------------------------------------------------------- | :--------: | :------: | :------: | :------: | +| [RTMPose-m](../rtmpose/wholebody_2d_keypoint/rtmpose-m_8xb64-270e_coco-wholebody-256x192.py) | 256x192 | 60.4 | 66.7 | 2.22 | +| [RTMPose-l](../rtmpose/wholebody_2d_keypoint/rtmpose-l_8xb64-270e_coco-wholebody-256x192.py) | 256x192 | 63.2 | 69.4 | 4.52 | +| [RTMPose-l](../rtmpose/wholebody_2d_keypoint/rtmpose-l_8xb32-270e_coco-wholebody-384x288.py) | 384x288 | 67.0 | 72.3 | 10.07 | + +### Speed Benchmark + +- 图中所示为模型推理时间,单位毫秒。 +- 来自不同社区用户的测试数据用 `|` 分隔开。 + +| Config | Input Size | ORT
    (i7-11700) | TRT-FP16
    (GTX 1660Ti) | TRT-FP16
    (RTX 3090) | TRT-FP16
    (Jetson AGX Orin) | TRT-FP16
    (Jetson Orin NX) | +| :-------------------------------------------- | :--------: | :--------------------: | :---------------------------: | :-------------------------: | :--------------------------------: | :-------------------------------: | +| [RTMPose-m](../rtmpose/wholebody_2d_keypoint/rtmpose-m_8xb64-270e_coco-wholebody-256x192.py) | 256x192 | 13.50 | 4.00 | 1.17 \| 1.84 | 2.79 | 3.51 | +| [RTMPose-l](../rtmpose/wholebody_2d_keypoint/rtmpose-l_8xb64-270e_coco-wholebody-256x192.py) | 256x192 | 23.41 | 5.67 | 1.44 \| 2.61 | 3.80 | 4.95 | +| [RTMPose-l](../rtmpose/wholebody_2d_keypoint/rtmpose-l_8xb32-270e_coco-wholebody-384x288.py) | 384x288 | 44.58 | 7.68 | 1.75 \| 4.24 | 5.08 | 7.20 | + +## 如何测试推理速度 + +我们使用 MMDeploy 提供的 `tools/profiler.py` 脚本进行模型测速。 + +用户需要准备一个存放测试图片的文件夹`./test_images`,profiler 将随机从该目录下抽取图片用于模型测速。 + +```shell +python tools/profiler.py \ + configs/mmpose/pose-detection_simcc_onnxruntime_dynamic.py \ + {RTMPOSE_PROJECT}/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py \ + ../test_images \ + --model {WORK_DIR}/end2end.onnx \ + --shape 256x192 \ + --device cpu \ + --warmup 50 \ + --num-iter 200 +``` + +The result is as follows: + +```shell +01/30 15:06:35 - mmengine - INFO - [onnxruntime]-70 times per count: 8.73 ms, 114.50 FPS +01/30 15:06:36 - mmengine - INFO - [onnxruntime]-90 times per count: 9.05 ms, 110.48 FPS +01/30 15:06:37 - mmengine - INFO - [onnxruntime]-110 times per count: 9.87 ms, 101.32 FPS +01/30 15:06:37 - mmengine - INFO - [onnxruntime]-130 times per count: 9.99 ms, 100.10 FPS +01/30 15:06:38 - mmengine - INFO - [onnxruntime]-150 times per count: 10.39 ms, 96.29 FPS +01/30 15:06:39 - mmengine - INFO - [onnxruntime]-170 times per count: 10.77 ms, 92.86 FPS +01/30 15:06:40 - mmengine - INFO - [onnxruntime]-190 times per count: 10.98 ms, 91.05 FPS +01/30 15:06:40 - mmengine - INFO - [onnxruntime]-210 times per count: 11.19 ms, 89.33 FPS +01/30 15:06:41 - mmengine - INFO - [onnxruntime]-230 times per count: 11.16 ms, 89.58 FPS +01/30 15:06:42 - mmengine - INFO - [onnxruntime]-250 times per count: 11.06 ms, 90.41 FPS +----- Settings: ++------------+---------+ +| batch size | 1 | +| shape | 256x192 | +| iterations | 200 | +| warmup | 50 | ++------------+---------+ +----- Results: ++--------+------------+---------+ +| Stats | Latency/ms | FPS | ++--------+------------+---------+ +| Mean | 11.060 | 90.412 | +| Median | 11.852 | 84.375 | +| Min | 7.812 | 128.007 | +| Max | 13.690 | 73.044 | ++--------+------------+---------+ +``` + +If you want to learn more details of profiler, you can refer to the [Profiler Docs](https://mmdeploy.readthedocs.io/en/latest/02-how-to-run/useful_tools.html#profiler). diff --git a/projects/rtmpose/examples/PoseTracker-Android-Prototype/README.md b/projects/rtmpose/examples/PoseTracker-Android-Prototype/README.md index edce803106..0436fedbc6 100644 --- a/projects/rtmpose/examples/PoseTracker-Android-Prototype/README.md +++ b/projects/rtmpose/examples/PoseTracker-Android-Prototype/README.md @@ -1,5 +1,5 @@ -# PoseTracker-Android-Prototype - -PoseTracker Android Demo Prototype, which is based on [mmdeploy](https://github.com/open-mmlab/mmdeploy/tree/dev-1.x) - -Please refer to [Original Repository](https://github.com/hanrui1sensetime/PoseTracker-Android-Prototype). +# PoseTracker-Android-Prototype + +PoseTracker Android Demo Prototype, which is based on [mmdeploy](https://github.com/open-mmlab/mmdeploy/tree/dev-1.x) + +Please refer to [Original Repository](https://github.com/hanrui1sensetime/PoseTracker-Android-Prototype). diff --git a/projects/rtmpose/examples/README.md b/projects/rtmpose/examples/README.md index 5846f039e7..c32ee03532 100644 --- a/projects/rtmpose/examples/README.md +++ b/projects/rtmpose/examples/README.md @@ -1,18 +1,18 @@ -## List of examples - -### 1. RTMPose-Deploy - -RTMPose-Deploy is a C++ code example for RTMPose localized deployment. - -- [ONNXRuntime-CPU](https://github.com/HW140701/RTMPose-Deploy) -- [TensorRT](https://github.com/Dominic23331/rtmpose_tensorrt) - -### 2. RTMPose inference with ONNXRuntime (Python) - -This example shows how to run RTMPose inference with ONNXRuntime in Python. - -### 3. PoseTracker Android Demo - -PoseTracker Android Demo Prototype based on mmdeploy. - -- [Original Repository](https://github.com/hanrui1sensetime/PoseTracker-Android-Prototype) +## List of examples + +### 1. RTMPose-Deploy + +RTMPose-Deploy is a C++ code example for RTMPose localized deployment. + +- [ONNXRuntime-CPU](https://github.com/HW140701/RTMPose-Deploy) +- [TensorRT](https://github.com/Dominic23331/rtmpose_tensorrt) + +### 2. RTMPose inference with ONNXRuntime (Python) + +This example shows how to run RTMPose inference with ONNXRuntime in Python. + +### 3. PoseTracker Android Demo + +PoseTracker Android Demo Prototype based on mmdeploy. + +- [Original Repository](https://github.com/hanrui1sensetime/PoseTracker-Android-Prototype) diff --git a/projects/rtmpose/examples/RTMPose-Deploy/README.md b/projects/rtmpose/examples/RTMPose-Deploy/README.md index c4fce9a4df..24c27314f9 100644 --- a/projects/rtmpose/examples/RTMPose-Deploy/README.md +++ b/projects/rtmpose/examples/RTMPose-Deploy/README.md @@ -1,12 +1,12 @@ -# RTMPose-Deploy - -[中文说明](./README_CN.md) - -RTMPose-Deploy is a C ++ code example for RTMPose localized deployment. - -At present, RTMPose-Deploy has completed to use ONNXRuntime-CPU and TensorRT to deploy the RTMDet and RTMPose on the Windows system. - -| Deployment Framework | Repo | -| -------------------- | -------------------------------------------------------------------- | -| ONNXRuntime-CPU | [RTMPose-Deploy](https://github.com/HW140701/RTMPose-Deploy) | -| TensorRT | [rtmpose_tensorrt](https://github.com/Dominic23331/rtmpose_tensorrt) | +# RTMPose-Deploy + +[中文说明](./README_CN.md) + +RTMPose-Deploy is a C ++ code example for RTMPose localized deployment. + +At present, RTMPose-Deploy has completed to use ONNXRuntime-CPU and TensorRT to deploy the RTMDet and RTMPose on the Windows system. + +| Deployment Framework | Repo | +| -------------------- | -------------------------------------------------------------------- | +| ONNXRuntime-CPU | [RTMPose-Deploy](https://github.com/HW140701/RTMPose-Deploy) | +| TensorRT | [rtmpose_tensorrt](https://github.com/Dominic23331/rtmpose_tensorrt) | diff --git a/projects/rtmpose/examples/RTMPose-Deploy/README_CN.md b/projects/rtmpose/examples/RTMPose-Deploy/README_CN.md index 82ee093658..627ff5b2c4 100644 --- a/projects/rtmpose/examples/RTMPose-Deploy/README_CN.md +++ b/projects/rtmpose/examples/RTMPose-Deploy/README_CN.md @@ -1,10 +1,10 @@ -# RTMPose-Deploy - -RTMPose-Deploy 是一个进行 RTMPose 本地化部署的 C++ 代码示例。 - -目前,RTMPose-Deploy 已完成在 Windows 系统上使用 OnnxRuntime CPU 和TensorRT 对 RTMDet 和 RTMPose 完成了部署。 - -| 部署框架 | 仓库 | -| --------------- | -------------------------------------------------------------------- | -| ONNXRuntime-CPU | [RTMPose-Deploy](https://github.com/HW140701/RTMPose-Deploy) | -| TensorRT | [rtmpose_tensorrt](https://github.com/Dominic23331/rtmpose_tensorrt) | +# RTMPose-Deploy + +RTMPose-Deploy 是一个进行 RTMPose 本地化部署的 C++ 代码示例。 + +目前,RTMPose-Deploy 已完成在 Windows 系统上使用 OnnxRuntime CPU 和TensorRT 对 RTMDet 和 RTMPose 完成了部署。 + +| 部署框架 | 仓库 | +| --------------- | -------------------------------------------------------------------- | +| ONNXRuntime-CPU | [RTMPose-Deploy](https://github.com/HW140701/RTMPose-Deploy) | +| TensorRT | [rtmpose_tensorrt](https://github.com/Dominic23331/rtmpose_tensorrt) | diff --git a/projects/rtmpose/examples/RTMPose-Deploy/Windows/OnnxRumtime-CPU/src/RTMPoseOnnxRuntime/characterset_convert.h b/projects/rtmpose/examples/RTMPose-Deploy/Windows/OnnxRumtime-CPU/src/RTMPoseOnnxRuntime/characterset_convert.h index fe586e2356..eb3ebf12b2 100644 --- a/projects/rtmpose/examples/RTMPose-Deploy/Windows/OnnxRumtime-CPU/src/RTMPoseOnnxRuntime/characterset_convert.h +++ b/projects/rtmpose/examples/RTMPose-Deploy/Windows/OnnxRumtime-CPU/src/RTMPoseOnnxRuntime/characterset_convert.h @@ -1,151 +1,151 @@ -#ifndef _CHARACTERSET_CONVERT_H_ -#define _CHARACTERSET_CONVERT_H_ - -#include - -#if defined(WIN32) || defined(_WIN32) || defined(__WIN32__) || defined(__NT__) - -#include - -#elif defined(linux) || defined(__linux) - -#include -#include - -#endif - -namespace stubbornhuang -{ - class CharactersetConvert - { - public: -#if defined(WIN32) || defined(_WIN32) || defined(__WIN32__) || defined(__NT__) - static std::wstring string_to_wstring(const std::string& str) - { - std::wstring result; - int len = MultiByteToWideChar(CP_ACP, 0, str.c_str(), -1, NULL, 0); - wchar_t* wstr = new wchar_t[len + 1]; - memset(wstr, 0, len + 1); - MultiByteToWideChar(CP_ACP, 0, str.c_str(), -1, wstr, len); - wstr[len] = '\0'; - result.append(wstr); - delete[] wstr; - return result; - } - - static std::string gbk_to_utf8(const std::string& gbk_str) - { - int len = MultiByteToWideChar(CP_ACP, 0, gbk_str.c_str(), -1, NULL, 0); - wchar_t* wstr = new wchar_t[len + 1]; - memset(wstr, 0, len + 1); - MultiByteToWideChar(CP_ACP, 0, gbk_str.c_str(), -1, wstr, len); - len = WideCharToMultiByte(CP_UTF8, 0, wstr, -1, NULL, 0, NULL, NULL); - char* str = new char[len + 1]; - memset(str, 0, len + 1); - WideCharToMultiByte(CP_UTF8, 0, wstr, -1, str, len, NULL, NULL); - std::string strTemp = str; - if (wstr) delete[] wstr; - if (str) delete[] str; - return strTemp; - } - - static std::string utf8_to_gbk(const std::string& utf8_str) - { - int len = MultiByteToWideChar(CP_UTF8, 0, utf8_str.c_str(), -1, NULL, 0); - wchar_t* wszGBK = new wchar_t[len + 1]; - memset(wszGBK, 0, len * 2 + 2); - MultiByteToWideChar(CP_UTF8, 0, utf8_str.c_str(), -1, wszGBK, len); - len = WideCharToMultiByte(CP_ACP, 0, wszGBK, -1, NULL, 0, NULL, NULL); - char* szGBK = new char[len + 1]; - memset(szGBK, 0, len + 1); - WideCharToMultiByte(CP_ACP, 0, wszGBK, -1, szGBK, len, NULL, NULL); - std::string strTemp(szGBK); - if (wszGBK) delete[] wszGBK; - if (szGBK) delete[] szGBK; - return strTemp; - } - -#elif defined(linux) || defined(__linux) - static int code_convert( - const char* from_charset, - const char* to_charset, - char* inbuf, size_t inlen, - char* outbuf, size_t outlen - ) { - iconv_t cd; - char** pin = &inbuf; - char** pout = &outbuf; - - cd = iconv_open(to_charset, from_charset); - if (cd == 0) - return -1; - - memset(outbuf, 0, outlen); - - if ((int)iconv(cd, pin, &inlen, pout, &outlen) == -1) - { - iconv_close(cd); - return -1; - } - iconv_close(cd); - *pout = '\0'; - - return 0; - } - - static int u2g(char* inbuf, size_t inlen, char* outbuf, size_t outlen) { - return code_convert("utf-8", "gb2312", inbuf, inlen, outbuf, outlen); - } - - static int g2u(char* inbuf, size_t inlen, char* outbuf, size_t outlen) { - return code_convert("gb2312", "utf-8", inbuf, inlen, outbuf, outlen); - } - - - static std::string gbk_to_utf8(const std::string& gbk_str) - { - int length = gbk_str.size() * 2 + 1; - - char* temp = (char*)malloc(sizeof(char) * length); - - if (g2u((char*)gbk_str.c_str(), gbk_str.size(), temp, length) >= 0) - { - std::string str_result; - str_result.append(temp); - free(temp); - return str_result; - } - else - { - free(temp); - return ""; - } - } - - static std::string utf8_to_gbk(const std::string& utf8_str) - { - int length = strlen(utf8_str); - - char* temp = (char*)malloc(sizeof(char) * length); - - if (u2g((char*)utf8_str, length, temp, length) >= 0) - { - std::string str_result; - str_result.append(temp); - free(temp); - - return str_result; - } - else - { - free(temp); - return ""; - } - } - -#endif - - }; -} - -#endif // !_CHARACTERSET_CONVERT_H_ +#ifndef _CHARACTERSET_CONVERT_H_ +#define _CHARACTERSET_CONVERT_H_ + +#include + +#if defined(WIN32) || defined(_WIN32) || defined(__WIN32__) || defined(__NT__) + +#include + +#elif defined(linux) || defined(__linux) + +#include +#include + +#endif + +namespace stubbornhuang +{ + class CharactersetConvert + { + public: +#if defined(WIN32) || defined(_WIN32) || defined(__WIN32__) || defined(__NT__) + static std::wstring string_to_wstring(const std::string& str) + { + std::wstring result; + int len = MultiByteToWideChar(CP_ACP, 0, str.c_str(), -1, NULL, 0); + wchar_t* wstr = new wchar_t[len + 1]; + memset(wstr, 0, len + 1); + MultiByteToWideChar(CP_ACP, 0, str.c_str(), -1, wstr, len); + wstr[len] = '\0'; + result.append(wstr); + delete[] wstr; + return result; + } + + static std::string gbk_to_utf8(const std::string& gbk_str) + { + int len = MultiByteToWideChar(CP_ACP, 0, gbk_str.c_str(), -1, NULL, 0); + wchar_t* wstr = new wchar_t[len + 1]; + memset(wstr, 0, len + 1); + MultiByteToWideChar(CP_ACP, 0, gbk_str.c_str(), -1, wstr, len); + len = WideCharToMultiByte(CP_UTF8, 0, wstr, -1, NULL, 0, NULL, NULL); + char* str = new char[len + 1]; + memset(str, 0, len + 1); + WideCharToMultiByte(CP_UTF8, 0, wstr, -1, str, len, NULL, NULL); + std::string strTemp = str; + if (wstr) delete[] wstr; + if (str) delete[] str; + return strTemp; + } + + static std::string utf8_to_gbk(const std::string& utf8_str) + { + int len = MultiByteToWideChar(CP_UTF8, 0, utf8_str.c_str(), -1, NULL, 0); + wchar_t* wszGBK = new wchar_t[len + 1]; + memset(wszGBK, 0, len * 2 + 2); + MultiByteToWideChar(CP_UTF8, 0, utf8_str.c_str(), -1, wszGBK, len); + len = WideCharToMultiByte(CP_ACP, 0, wszGBK, -1, NULL, 0, NULL, NULL); + char* szGBK = new char[len + 1]; + memset(szGBK, 0, len + 1); + WideCharToMultiByte(CP_ACP, 0, wszGBK, -1, szGBK, len, NULL, NULL); + std::string strTemp(szGBK); + if (wszGBK) delete[] wszGBK; + if (szGBK) delete[] szGBK; + return strTemp; + } + +#elif defined(linux) || defined(__linux) + static int code_convert( + const char* from_charset, + const char* to_charset, + char* inbuf, size_t inlen, + char* outbuf, size_t outlen + ) { + iconv_t cd; + char** pin = &inbuf; + char** pout = &outbuf; + + cd = iconv_open(to_charset, from_charset); + if (cd == 0) + return -1; + + memset(outbuf, 0, outlen); + + if ((int)iconv(cd, pin, &inlen, pout, &outlen) == -1) + { + iconv_close(cd); + return -1; + } + iconv_close(cd); + *pout = '\0'; + + return 0; + } + + static int u2g(char* inbuf, size_t inlen, char* outbuf, size_t outlen) { + return code_convert("utf-8", "gb2312", inbuf, inlen, outbuf, outlen); + } + + static int g2u(char* inbuf, size_t inlen, char* outbuf, size_t outlen) { + return code_convert("gb2312", "utf-8", inbuf, inlen, outbuf, outlen); + } + + + static std::string gbk_to_utf8(const std::string& gbk_str) + { + int length = gbk_str.size() * 2 + 1; + + char* temp = (char*)malloc(sizeof(char) * length); + + if (g2u((char*)gbk_str.c_str(), gbk_str.size(), temp, length) >= 0) + { + std::string str_result; + str_result.append(temp); + free(temp); + return str_result; + } + else + { + free(temp); + return ""; + } + } + + static std::string utf8_to_gbk(const std::string& utf8_str) + { + int length = strlen(utf8_str); + + char* temp = (char*)malloc(sizeof(char) * length); + + if (u2g((char*)utf8_str, length, temp, length) >= 0) + { + std::string str_result; + str_result.append(temp); + free(temp); + + return str_result; + } + else + { + free(temp); + return ""; + } + } + +#endif + + }; +} + +#endif // !_CHARACTERSET_CONVERT_H_ diff --git a/projects/rtmpose/examples/RTMPose-Deploy/Windows/OnnxRumtime-CPU/src/RTMPoseOnnxRuntime/main.cpp b/projects/rtmpose/examples/RTMPose-Deploy/Windows/OnnxRumtime-CPU/src/RTMPoseOnnxRuntime/main.cpp index b2175047a2..034e040774 100644 --- a/projects/rtmpose/examples/RTMPose-Deploy/Windows/OnnxRumtime-CPU/src/RTMPoseOnnxRuntime/main.cpp +++ b/projects/rtmpose/examples/RTMPose-Deploy/Windows/OnnxRumtime-CPU/src/RTMPoseOnnxRuntime/main.cpp @@ -1,75 +1,75 @@ -#include - -#include "opencv2/opencv.hpp" - -#include "rtmpose_utils.h" -#include "rtmpose_onnxruntime.h" -#include "rtmdet_onnxruntime.h" -#include "rtmpose_tracker_onnxruntime.h" - -std::vector> coco_17_joint_links = { - {0,1},{0,2},{1,3},{2,4},{5,7},{7,9},{6,8},{8,10},{5,6},{5,11},{6,12},{11,12},{11,13},{13,15},{12,14},{14,16} -}; - -int main() -{ - std::string rtm_detnano_onnx_path = ""; - std::string rtm_pose_onnx_path = ""; -#ifdef _DEBUG - rtm_detnano_onnx_path = "../../resource/model/rtmpose-cpu/rtmpose-ort/rtmdet-nano/end2end.onnx"; - rtm_pose_onnx_path = "../../resource/model/rtmpose-cpu/rtmpose-ort/rtmpose-m/end2end.onnx"; -#else - rtm_detnano_onnx_path = "./resource/model/rtmpose-cpu/rtmpose-ort/rtmdet-nano/end2end.onnx"; - rtm_pose_onnx_path = "./resource/model/rtmpose-cpu/rtmpose-ort/rtmpose-m/end2end.onnx"; -#endif - - RTMPoseTrackerOnnxruntime rtmpose_tracker_onnxruntime(rtm_detnano_onnx_path, rtm_pose_onnx_path); - - cv::VideoCapture video_reader(0); - int frame_num = 0; - DetectBox detect_box; - while (video_reader.isOpened()) - { - cv::Mat frame; - video_reader >> frame; - - if (frame.empty()) - break; - - std::pair> inference_box= rtmpose_tracker_onnxruntime.Inference(frame); - DetectBox detect_box = inference_box.first; - std::vector pose_result = inference_box.second; - - cv::rectangle( - frame, - cv::Point(detect_box.left, detect_box.top), - cv::Point(detect_box.right, detect_box.bottom), - cv::Scalar{ 255, 0, 0 }, - 2); - - for (int i = 0; i < pose_result.size(); ++i) - { - cv::circle(frame, cv::Point(pose_result[i].x, pose_result[i].y), 1, cv::Scalar{ 0, 0, 255 }, 5, cv::LINE_AA); - } - - for (int i = 0; i < coco_17_joint_links.size(); ++i) - { - std::pair joint_links = coco_17_joint_links[i]; - cv::line( - frame, - cv::Point(pose_result[joint_links.first].x, pose_result[joint_links.first].y), - cv::Point(pose_result[joint_links.second].x, pose_result[joint_links.second].y), - cv::Scalar{ 0, 255, 0 }, - 2, - cv::LINE_AA); - } - - imshow("RTMPose", frame); - cv::waitKey(1); - } - - video_reader.release(); - cv::destroyAllWindows(); - - return 0; -} +#include + +#include "opencv2/opencv.hpp" + +#include "rtmpose_utils.h" +#include "rtmpose_onnxruntime.h" +#include "rtmdet_onnxruntime.h" +#include "rtmpose_tracker_onnxruntime.h" + +std::vector> coco_17_joint_links = { + {0,1},{0,2},{1,3},{2,4},{5,7},{7,9},{6,8},{8,10},{5,6},{5,11},{6,12},{11,12},{11,13},{13,15},{12,14},{14,16} +}; + +int main() +{ + std::string rtm_detnano_onnx_path = ""; + std::string rtm_pose_onnx_path = ""; +#ifdef _DEBUG + rtm_detnano_onnx_path = "../../resource/model/rtmpose-cpu/rtmpose-ort/rtmdet-nano/end2end.onnx"; + rtm_pose_onnx_path = "../../resource/model/rtmpose-cpu/rtmpose-ort/rtmpose-m/end2end.onnx"; +#else + rtm_detnano_onnx_path = "./resource/model/rtmpose-cpu/rtmpose-ort/rtmdet-nano/end2end.onnx"; + rtm_pose_onnx_path = "./resource/model/rtmpose-cpu/rtmpose-ort/rtmpose-m/end2end.onnx"; +#endif + + RTMPoseTrackerOnnxruntime rtmpose_tracker_onnxruntime(rtm_detnano_onnx_path, rtm_pose_onnx_path); + + cv::VideoCapture video_reader(0); + int frame_num = 0; + DetectBox detect_box; + while (video_reader.isOpened()) + { + cv::Mat frame; + video_reader >> frame; + + if (frame.empty()) + break; + + std::pair> inference_box= rtmpose_tracker_onnxruntime.Inference(frame); + DetectBox detect_box = inference_box.first; + std::vector pose_result = inference_box.second; + + cv::rectangle( + frame, + cv::Point(detect_box.left, detect_box.top), + cv::Point(detect_box.right, detect_box.bottom), + cv::Scalar{ 255, 0, 0 }, + 2); + + for (int i = 0; i < pose_result.size(); ++i) + { + cv::circle(frame, cv::Point(pose_result[i].x, pose_result[i].y), 1, cv::Scalar{ 0, 0, 255 }, 5, cv::LINE_AA); + } + + for (int i = 0; i < coco_17_joint_links.size(); ++i) + { + std::pair joint_links = coco_17_joint_links[i]; + cv::line( + frame, + cv::Point(pose_result[joint_links.first].x, pose_result[joint_links.first].y), + cv::Point(pose_result[joint_links.second].x, pose_result[joint_links.second].y), + cv::Scalar{ 0, 255, 0 }, + 2, + cv::LINE_AA); + } + + imshow("RTMPose", frame); + cv::waitKey(1); + } + + video_reader.release(); + cv::destroyAllWindows(); + + return 0; +} diff --git a/projects/rtmpose/examples/RTMPose-Deploy/Windows/OnnxRumtime-CPU/src/RTMPoseOnnxRuntime/rtmdet_onnxruntime.cpp b/projects/rtmpose/examples/RTMPose-Deploy/Windows/OnnxRumtime-CPU/src/RTMPoseOnnxRuntime/rtmdet_onnxruntime.cpp index 85e111b2f6..4a8b37b7c5 100644 --- a/projects/rtmpose/examples/RTMPose-Deploy/Windows/OnnxRumtime-CPU/src/RTMPoseOnnxRuntime/rtmdet_onnxruntime.cpp +++ b/projects/rtmpose/examples/RTMPose-Deploy/Windows/OnnxRumtime-CPU/src/RTMPoseOnnxRuntime/rtmdet_onnxruntime.cpp @@ -1,174 +1,174 @@ -#include "rtmdet_onnxruntime.h" - -#include -#include - -#include "characterset_convert.h" - - -RTMDetOnnxruntime::RTMDetOnnxruntime(const std::string& onnx_model_path) - :m_session(nullptr), - m_env(nullptr) -{ - std::wstring onnx_model_path_wstr = stubbornhuang::CharactersetConvert::string_to_wstring(onnx_model_path); - - m_env = Ort::Env(ORT_LOGGING_LEVEL_ERROR, "rtmdet_onnxruntime_cpu"); - - int cpu_processor_num = std::thread::hardware_concurrency(); - cpu_processor_num /= 2; - - Ort::SessionOptions session_options; - session_options.SetIntraOpNumThreads(cpu_processor_num); - session_options.SetGraphOptimizationLevel(GraphOptimizationLevel::ORT_ENABLE_ALL); - session_options.SetLogSeverityLevel(4); - - OrtSessionOptionsAppendExecutionProvider_CPU(session_options, 0); - m_session = Ort::Session(m_env, onnx_model_path_wstr.c_str(), session_options); - - PrintModelInfo(m_session); -} - -RTMDetOnnxruntime::~RTMDetOnnxruntime() -{ -} - -DetectBox RTMDetOnnxruntime::Inference(const cv::Mat& input_mat) -{ - // Deep copy - cv::Mat input_mat_copy; - input_mat.copyTo(input_mat_copy); - - // BGR to RGB - cv::Mat input_mat_copy_rgb; - cv::cvtColor(input_mat_copy, input_mat_copy_rgb, CV_BGR2RGB); - - // image data, HWC->CHW, image_data - mean / std normalize - int image_height = input_mat_copy_rgb.rows; - int image_width = input_mat_copy_rgb.cols; - int image_channels = input_mat_copy_rgb.channels(); - - std::vector input_image_array; - input_image_array.resize(1 * image_channels * image_height * image_width); - - float* input_image = input_image_array.data(); - for (int h = 0; h < image_height; ++h) - { - for (int w = 0; w < image_width; ++w) - { - for (int c = 0; c < image_channels; ++c) - { - int chw_index = c * image_height * image_width + h * image_width + w; - - float tmp = input_mat_copy_rgb.ptr(h)[w * 3 + c]; - - input_image[chw_index] = (tmp - IMAGE_MEAN[c]) / IMAGE_STD[c]; - } - } - } - - // inference - std::vector m_onnx_input_names{ "input" }; - std::vector m_onnx_output_names{ "dets","labels"}; - std::array input_shape{ 1, image_channels, image_height, image_width }; - - auto memory_info = Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU); - Ort::Value input_tensor = Ort::Value::CreateTensor( - memory_info, - input_image_array.data(), - input_image_array.size(), - input_shape.data(), - input_shape.size() - ); - - assert(input_tensor.IsTensor()); - - auto output_tensors = m_session.Run( - Ort::RunOptions{ nullptr }, - m_onnx_input_names.data(), - &input_tensor, - 1, - m_onnx_output_names.data(), - m_onnx_output_names.size() - ); - - // pose process - std::vector det_result_dims = output_tensors[0].GetTensorTypeAndShapeInfo().GetShape(); - std::vector label_result_dims = output_tensors[1].GetTensorTypeAndShapeInfo().GetShape(); - - assert(det_result_dims.size() == 3 && label_result_dims.size() == 2); - - int batch_size = det_result_dims[0] == label_result_dims[0] ? det_result_dims[0] : 0; - int num_dets = det_result_dims[1] == label_result_dims[1] ? det_result_dims[1] : 0; - int reshap_dims = det_result_dims[2]; - - float* det_result = output_tensors[0].GetTensorMutableData(); - int* label_result = output_tensors[1].GetTensorMutableData(); - - std::vector all_box; - for (int i = 0; i < num_dets; ++i) - { - int classes = label_result[i]; - if (classes != 0) - continue; - - DetectBox temp_box; - temp_box.left = int(det_result[i * reshap_dims]); - temp_box.top = int(det_result[i * reshap_dims + 1]); - temp_box.right = int(det_result[i * reshap_dims + 2]); - temp_box.bottom = int(det_result[i * reshap_dims + 3]); - temp_box.score = det_result[i * reshap_dims + 4]; - temp_box.label = label_result[i]; - - all_box.emplace_back(temp_box); - } - - // descending sort - std::sort(all_box.begin(), all_box.end(), BoxCompare); - - //cv::rectangle(input_mat_copy, cv::Point{ all_box[0].left, all_box[0].top }, cv::Point{ all_box[0].right, all_box[0].bottom }, cv::Scalar{ 0, 255, 0 }); - - //cv::imwrite("detect.jpg", input_mat_copy); - - DetectBox result_box; - - if (!all_box.empty()) - { - result_box = all_box[0]; - } - - return result_box; -} - -void RTMDetOnnxruntime::PrintModelInfo(Ort::Session& session) -{ - // print the number of model input nodes - size_t num_input_nodes = session.GetInputCount(); - size_t num_output_nodes = session.GetOutputCount(); - std::cout << "Number of input node is:" << num_input_nodes << std::endl; - std::cout << "Number of output node is:" << num_output_nodes << std::endl; - - // print node name - Ort::AllocatorWithDefaultOptions allocator; - std::cout << std::endl; - for (auto i = 0; i < num_input_nodes; i++) - std::cout << "The input op-name " << i << " is:" << session.GetInputNameAllocated(i, allocator) << std::endl; - for (auto i = 0; i < num_output_nodes; i++) - std::cout << "The output op-name " << i << " is:" << session.GetOutputNameAllocated(i, allocator) << std::endl; - - - // print input and output dims - for (auto i = 0; i < num_input_nodes; i++) - { - std::vector input_dims = session.GetInputTypeInfo(i).GetTensorTypeAndShapeInfo().GetShape(); - std::cout << std::endl << "input " << i << " dim is: "; - for (auto j = 0; j < input_dims.size(); j++) - std::cout << input_dims[j] << " "; - } - for (auto i = 0; i < num_output_nodes; i++) - { - std::vector output_dims = session.GetOutputTypeInfo(i).GetTensorTypeAndShapeInfo().GetShape(); - std::cout << std::endl << "output " << i << " dim is: "; - for (auto j = 0; j < output_dims.size(); j++) - std::cout << output_dims[j] << " "; - } -} +#include "rtmdet_onnxruntime.h" + +#include +#include + +#include "characterset_convert.h" + + +RTMDetOnnxruntime::RTMDetOnnxruntime(const std::string& onnx_model_path) + :m_session(nullptr), + m_env(nullptr) +{ + std::wstring onnx_model_path_wstr = stubbornhuang::CharactersetConvert::string_to_wstring(onnx_model_path); + + m_env = Ort::Env(ORT_LOGGING_LEVEL_ERROR, "rtmdet_onnxruntime_cpu"); + + int cpu_processor_num = std::thread::hardware_concurrency(); + cpu_processor_num /= 2; + + Ort::SessionOptions session_options; + session_options.SetIntraOpNumThreads(cpu_processor_num); + session_options.SetGraphOptimizationLevel(GraphOptimizationLevel::ORT_ENABLE_ALL); + session_options.SetLogSeverityLevel(4); + + OrtSessionOptionsAppendExecutionProvider_CPU(session_options, 0); + m_session = Ort::Session(m_env, onnx_model_path_wstr.c_str(), session_options); + + PrintModelInfo(m_session); +} + +RTMDetOnnxruntime::~RTMDetOnnxruntime() +{ +} + +DetectBox RTMDetOnnxruntime::Inference(const cv::Mat& input_mat) +{ + // Deep copy + cv::Mat input_mat_copy; + input_mat.copyTo(input_mat_copy); + + // BGR to RGB + cv::Mat input_mat_copy_rgb; + cv::cvtColor(input_mat_copy, input_mat_copy_rgb, CV_BGR2RGB); + + // image data, HWC->CHW, image_data - mean / std normalize + int image_height = input_mat_copy_rgb.rows; + int image_width = input_mat_copy_rgb.cols; + int image_channels = input_mat_copy_rgb.channels(); + + std::vector input_image_array; + input_image_array.resize(1 * image_channels * image_height * image_width); + + float* input_image = input_image_array.data(); + for (int h = 0; h < image_height; ++h) + { + for (int w = 0; w < image_width; ++w) + { + for (int c = 0; c < image_channels; ++c) + { + int chw_index = c * image_height * image_width + h * image_width + w; + + float tmp = input_mat_copy_rgb.ptr(h)[w * 3 + c]; + + input_image[chw_index] = (tmp - IMAGE_MEAN[c]) / IMAGE_STD[c]; + } + } + } + + // inference + std::vector m_onnx_input_names{ "input" }; + std::vector m_onnx_output_names{ "dets","labels"}; + std::array input_shape{ 1, image_channels, image_height, image_width }; + + auto memory_info = Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU); + Ort::Value input_tensor = Ort::Value::CreateTensor( + memory_info, + input_image_array.data(), + input_image_array.size(), + input_shape.data(), + input_shape.size() + ); + + assert(input_tensor.IsTensor()); + + auto output_tensors = m_session.Run( + Ort::RunOptions{ nullptr }, + m_onnx_input_names.data(), + &input_tensor, + 1, + m_onnx_output_names.data(), + m_onnx_output_names.size() + ); + + // pose process + std::vector det_result_dims = output_tensors[0].GetTensorTypeAndShapeInfo().GetShape(); + std::vector label_result_dims = output_tensors[1].GetTensorTypeAndShapeInfo().GetShape(); + + assert(det_result_dims.size() == 3 && label_result_dims.size() == 2); + + int batch_size = det_result_dims[0] == label_result_dims[0] ? det_result_dims[0] : 0; + int num_dets = det_result_dims[1] == label_result_dims[1] ? det_result_dims[1] : 0; + int reshap_dims = det_result_dims[2]; + + float* det_result = output_tensors[0].GetTensorMutableData(); + int* label_result = output_tensors[1].GetTensorMutableData(); + + std::vector all_box; + for (int i = 0; i < num_dets; ++i) + { + int classes = label_result[i]; + if (classes != 0) + continue; + + DetectBox temp_box; + temp_box.left = int(det_result[i * reshap_dims]); + temp_box.top = int(det_result[i * reshap_dims + 1]); + temp_box.right = int(det_result[i * reshap_dims + 2]); + temp_box.bottom = int(det_result[i * reshap_dims + 3]); + temp_box.score = det_result[i * reshap_dims + 4]; + temp_box.label = label_result[i]; + + all_box.emplace_back(temp_box); + } + + // descending sort + std::sort(all_box.begin(), all_box.end(), BoxCompare); + + //cv::rectangle(input_mat_copy, cv::Point{ all_box[0].left, all_box[0].top }, cv::Point{ all_box[0].right, all_box[0].bottom }, cv::Scalar{ 0, 255, 0 }); + + //cv::imwrite("detect.jpg", input_mat_copy); + + DetectBox result_box; + + if (!all_box.empty()) + { + result_box = all_box[0]; + } + + return result_box; +} + +void RTMDetOnnxruntime::PrintModelInfo(Ort::Session& session) +{ + // print the number of model input nodes + size_t num_input_nodes = session.GetInputCount(); + size_t num_output_nodes = session.GetOutputCount(); + std::cout << "Number of input node is:" << num_input_nodes << std::endl; + std::cout << "Number of output node is:" << num_output_nodes << std::endl; + + // print node name + Ort::AllocatorWithDefaultOptions allocator; + std::cout << std::endl; + for (auto i = 0; i < num_input_nodes; i++) + std::cout << "The input op-name " << i << " is:" << session.GetInputNameAllocated(i, allocator) << std::endl; + for (auto i = 0; i < num_output_nodes; i++) + std::cout << "The output op-name " << i << " is:" << session.GetOutputNameAllocated(i, allocator) << std::endl; + + + // print input and output dims + for (auto i = 0; i < num_input_nodes; i++) + { + std::vector input_dims = session.GetInputTypeInfo(i).GetTensorTypeAndShapeInfo().GetShape(); + std::cout << std::endl << "input " << i << " dim is: "; + for (auto j = 0; j < input_dims.size(); j++) + std::cout << input_dims[j] << " "; + } + for (auto i = 0; i < num_output_nodes; i++) + { + std::vector output_dims = session.GetOutputTypeInfo(i).GetTensorTypeAndShapeInfo().GetShape(); + std::cout << std::endl << "output " << i << " dim is: "; + for (auto j = 0; j < output_dims.size(); j++) + std::cout << output_dims[j] << " "; + } +} diff --git a/projects/rtmpose/examples/RTMPose-Deploy/Windows/OnnxRumtime-CPU/src/RTMPoseOnnxRuntime/rtmdet_onnxruntime.h b/projects/rtmpose/examples/RTMPose-Deploy/Windows/OnnxRumtime-CPU/src/RTMPoseOnnxRuntime/rtmdet_onnxruntime.h index 72500bc9c1..d313b5d1cf 100644 --- a/projects/rtmpose/examples/RTMPose-Deploy/Windows/OnnxRumtime-CPU/src/RTMPoseOnnxRuntime/rtmdet_onnxruntime.h +++ b/projects/rtmpose/examples/RTMPose-Deploy/Windows/OnnxRumtime-CPU/src/RTMPoseOnnxRuntime/rtmdet_onnxruntime.h @@ -1,32 +1,32 @@ -#ifndef _RTM_DET_ONNX_RUNTIME_H_ -#define _RTM_DET_ONNX_RUNTIME_H_ - -#include - -#include "opencv2/opencv.hpp" - -#include "onnxruntime_cxx_api.h" -#include "cpu_provider_factory.h" -#include "rtmpose_utils.h" - - -class RTMDetOnnxruntime -{ -public: - RTMDetOnnxruntime() = delete; - RTMDetOnnxruntime(const std::string& onnx_model_path); - virtual~RTMDetOnnxruntime(); - -public: - DetectBox Inference(const cv::Mat& input_mat); - -private: - void PrintModelInfo(Ort::Session& session); - -private: - Ort::Env m_env; - Ort::Session m_session; - -}; - -#endif // !_RTM_DET_ONNX_RUNTIME_H_ +#ifndef _RTM_DET_ONNX_RUNTIME_H_ +#define _RTM_DET_ONNX_RUNTIME_H_ + +#include + +#include "opencv2/opencv.hpp" + +#include "onnxruntime_cxx_api.h" +#include "cpu_provider_factory.h" +#include "rtmpose_utils.h" + + +class RTMDetOnnxruntime +{ +public: + RTMDetOnnxruntime() = delete; + RTMDetOnnxruntime(const std::string& onnx_model_path); + virtual~RTMDetOnnxruntime(); + +public: + DetectBox Inference(const cv::Mat& input_mat); + +private: + void PrintModelInfo(Ort::Session& session); + +private: + Ort::Env m_env; + Ort::Session m_session; + +}; + +#endif // !_RTM_DET_ONNX_RUNTIME_H_ diff --git a/projects/rtmpose/examples/RTMPose-Deploy/Windows/OnnxRumtime-CPU/src/RTMPoseOnnxRuntime/rtmpose_onnxruntime.cpp b/projects/rtmpose/examples/RTMPose-Deploy/Windows/OnnxRumtime-CPU/src/RTMPoseOnnxRuntime/rtmpose_onnxruntime.cpp index debdda570b..dd4e32cdb4 100644 --- a/projects/rtmpose/examples/RTMPose-Deploy/Windows/OnnxRumtime-CPU/src/RTMPoseOnnxRuntime/rtmpose_onnxruntime.cpp +++ b/projects/rtmpose/examples/RTMPose-Deploy/Windows/OnnxRumtime-CPU/src/RTMPoseOnnxRuntime/rtmpose_onnxruntime.cpp @@ -1,261 +1,261 @@ -#include "rtmpose_onnxruntime.h" - -#include -#include - -#include "characterset_convert.h" -#include "rtmpose_utils.h" - -#undef max - - -RTMPoseOnnxruntime::RTMPoseOnnxruntime(const std::string& onnx_model_path) - :m_session(nullptr) -{ - std::wstring onnx_model_path_wstr = stubbornhuang::CharactersetConvert::string_to_wstring(onnx_model_path); - - m_env = Ort::Env(ORT_LOGGING_LEVEL_ERROR, "rtmpose_onnxruntime_cpu"); - - int cpu_processor_num = std::thread::hardware_concurrency(); - cpu_processor_num /= 2; - - Ort::SessionOptions session_options; - session_options.SetIntraOpNumThreads(cpu_processor_num); - session_options.SetGraphOptimizationLevel(GraphOptimizationLevel::ORT_ENABLE_ALL); - session_options.SetLogSeverityLevel(4); - - OrtSessionOptionsAppendExecutionProvider_CPU(session_options, 0); - m_session = Ort::Session(m_env, onnx_model_path_wstr.c_str(), session_options); - - PrintModelInfo(m_session); -} - -RTMPoseOnnxruntime::~RTMPoseOnnxruntime() -{ -} - -std::vector RTMPoseOnnxruntime::Inference(const cv::Mat& input_mat, const DetectBox& box) -{ - std::vector pose_result; - - if (!box.IsValid()) - return pose_result; - - std::pair crop_result_pair = CropImageByDetectBox(input_mat, box); - - cv::Mat crop_mat = crop_result_pair.first; - cv::Mat affine_transform_reverse = crop_result_pair.second; - - // deep copy - cv::Mat crop_mat_copy; - crop_mat.copyTo(crop_mat_copy); - - // BGR to RGB - cv::Mat input_mat_copy_rgb; - cv::cvtColor(crop_mat, input_mat_copy_rgb, CV_BGR2RGB); - - // image data, HWC->CHW, image_data - mean / std normalize - int image_height = input_mat_copy_rgb.rows; - int image_width = input_mat_copy_rgb.cols; - int image_channels = input_mat_copy_rgb.channels(); - - std::vector input_image_array; - input_image_array.resize(1 * image_channels * image_height * image_width); - - float* input_image = input_image_array.data(); - for (int h = 0; h < image_height; ++h) - { - for (int w = 0; w < image_width; ++w) - { - for (int c = 0; c < image_channels; ++c) - { - int chw_index = c * image_height * image_width + h * image_width + w; - - float tmp = input_mat_copy_rgb.ptr(h)[w * 3 + c]; - - input_image[chw_index] = (tmp - IMAGE_MEAN[c]) / IMAGE_STD[c]; - } - } - } - - // inference - std::vector m_onnx_input_names{ "input" }; - std::vector m_onnx_output_names{ "simcc_x","simcc_y" }; - std::array input_shape{ 1, image_channels, image_height, image_width }; - - auto memory_info = Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU); - Ort::Value input_tensor = Ort::Value::CreateTensor( - memory_info, - input_image_array.data(), - input_image_array.size(), - input_shape.data(), - input_shape.size() - ); - - assert(input_tensor.IsTensor()); - - auto output_tensors = m_session.Run( - Ort::RunOptions{ nullptr }, - m_onnx_input_names.data(), - &input_tensor, - 1, - m_onnx_output_names.data(), - m_onnx_output_names.size() - ); - - // pose process - std::vector simcc_x_dims = output_tensors[0].GetTensorTypeAndShapeInfo().GetShape(); - std::vector simcc_y_dims = output_tensors[1].GetTensorTypeAndShapeInfo().GetShape(); - - assert(simcc_x_dims.size() == 3 && simcc_y_dims.size() == 3); - - int batch_size = simcc_x_dims[0] == simcc_y_dims[0] ? simcc_x_dims[0] : 0; - int joint_num = simcc_x_dims[1] == simcc_y_dims[1] ? simcc_x_dims[1] : 0; - int extend_width = simcc_x_dims[2]; - int extend_height = simcc_y_dims[2]; - - float* simcc_x_result = output_tensors[0].GetTensorMutableData(); - float* simcc_y_result = output_tensors[1].GetTensorMutableData(); - - - for (int i = 0; i < joint_num; ++i) - { - // find the maximum and maximum indexes in the value of each Extend_width length - auto x_biggest_iter = std::max_element(simcc_x_result + i * extend_width, simcc_x_result + i * extend_width + extend_width); - int max_x_pos = std::distance(simcc_x_result + i * extend_width, x_biggest_iter); - int pose_x = max_x_pos / 2; - float score_x = *x_biggest_iter; - - // find the maximum and maximum indexes in the value of each exten_height length - auto y_biggest_iter = std::max_element(simcc_y_result + i * extend_height, simcc_y_result + i * extend_height + extend_height); - int max_y_pos = std::distance(simcc_y_result + i * extend_height, y_biggest_iter); - int pose_y = max_y_pos / 2; - float score_y = *y_biggest_iter; - - //float score = (score_x + score_y) / 2; - float score = std::max(score_x, score_y); - - PosePoint temp_point; - temp_point.x = int(pose_x); - temp_point.y = int(pose_y); - temp_point.score = score; - pose_result.emplace_back(temp_point); - } - - // anti affine transformation to obtain the coordinates on the original picture - for (int i = 0; i < pose_result.size(); ++i) - { - cv::Mat origin_point_Mat = cv::Mat::ones(3, 1, CV_64FC1); - origin_point_Mat.at(0, 0) = pose_result[i].x; - origin_point_Mat.at(1, 0) = pose_result[i].y; - - cv::Mat temp_result_mat = affine_transform_reverse * origin_point_Mat; - - pose_result[i].x = temp_result_mat.at(0, 0); - pose_result[i].y = temp_result_mat.at(1, 0); - } - - return pose_result; -} - -std::pair RTMPoseOnnxruntime::CropImageByDetectBox(const cv::Mat& input_image, const DetectBox& box) -{ - std::pair result_pair; - - if (!input_image.data) - { - return result_pair; - } - - if (!box.IsValid()) - { - return result_pair; - } - - // deep copy - cv::Mat input_mat_copy; - input_image.copyTo(input_mat_copy); - - // calculate the width, height and center points of the human detection box - int box_width = box.right - box.left; - int box_height = box.bottom - box.top; - int box_center_x = box.left + box_width / 2; - int box_center_y = box.top + box_height / 2; - - float aspect_ratio = 192.0 / 256.0; - - // adjust the width and height ratio of the size of the picture in the RTMPOSE input - if (box_width > (aspect_ratio * box_height)) - { - box_height = box_width / aspect_ratio; - } - else if (box_width < (aspect_ratio * box_height)) - { - box_width = box_height * aspect_ratio; - } - - float scale_image_width = box_width * 1.2; - float scale_image_height = box_height * 1.2; - - // get the affine matrix - cv::Mat affine_transform = GetAffineTransform( - box_center_x, - box_center_y, - scale_image_width, - scale_image_height, - 192, - 256 - ); - - cv::Mat affine_transform_reverse = GetAffineTransform( - box_center_x, - box_center_y, - scale_image_width, - scale_image_height, - 192, - 256, - true - ); - - // affine transform - cv::Mat affine_image; - cv::warpAffine(input_mat_copy, affine_image, affine_transform, cv::Size(192, 256), cv::INTER_LINEAR); - //cv::imwrite("affine_img.jpg", affine_image); - - result_pair = std::make_pair(affine_image, affine_transform_reverse); - - return result_pair; -} - -void RTMPoseOnnxruntime::PrintModelInfo(Ort::Session& session) -{ - // print the number of model input nodes - size_t num_input_nodes = session.GetInputCount(); - size_t num_output_nodes = session.GetOutputCount(); - std::cout << "Number of input node is:" << num_input_nodes << std::endl; - std::cout << "Number of output node is:" << num_output_nodes << std::endl; - - // print node name - Ort::AllocatorWithDefaultOptions allocator; - std::cout << std::endl; - for (auto i = 0; i < num_input_nodes; i++) - std::cout << "The input op-name " << i << " is:" << session.GetInputNameAllocated(i, allocator) << std::endl; - for (auto i = 0; i < num_output_nodes; i++) - std::cout << "The output op-name " << i << " is:" << session.GetOutputNameAllocated(i, allocator) << std::endl; - - // print input and output dims - for (auto i = 0; i < num_input_nodes; i++) - { - std::vector input_dims = session.GetInputTypeInfo(i).GetTensorTypeAndShapeInfo().GetShape(); - std::cout << std::endl << "input " << i << " dim is: "; - for (auto j = 0; j < input_dims.size(); j++) - std::cout << input_dims[j] << " "; - } - for (auto i = 0; i < num_output_nodes; i++) - { - std::vector output_dims = session.GetOutputTypeInfo(i).GetTensorTypeAndShapeInfo().GetShape(); - std::cout << std::endl << "output " << i << " dim is: "; - for (auto j = 0; j < output_dims.size(); j++) - std::cout << output_dims[j] << " "; - } -} +#include "rtmpose_onnxruntime.h" + +#include +#include + +#include "characterset_convert.h" +#include "rtmpose_utils.h" + +#undef max + + +RTMPoseOnnxruntime::RTMPoseOnnxruntime(const std::string& onnx_model_path) + :m_session(nullptr) +{ + std::wstring onnx_model_path_wstr = stubbornhuang::CharactersetConvert::string_to_wstring(onnx_model_path); + + m_env = Ort::Env(ORT_LOGGING_LEVEL_ERROR, "rtmpose_onnxruntime_cpu"); + + int cpu_processor_num = std::thread::hardware_concurrency(); + cpu_processor_num /= 2; + + Ort::SessionOptions session_options; + session_options.SetIntraOpNumThreads(cpu_processor_num); + session_options.SetGraphOptimizationLevel(GraphOptimizationLevel::ORT_ENABLE_ALL); + session_options.SetLogSeverityLevel(4); + + OrtSessionOptionsAppendExecutionProvider_CPU(session_options, 0); + m_session = Ort::Session(m_env, onnx_model_path_wstr.c_str(), session_options); + + PrintModelInfo(m_session); +} + +RTMPoseOnnxruntime::~RTMPoseOnnxruntime() +{ +} + +std::vector RTMPoseOnnxruntime::Inference(const cv::Mat& input_mat, const DetectBox& box) +{ + std::vector pose_result; + + if (!box.IsValid()) + return pose_result; + + std::pair crop_result_pair = CropImageByDetectBox(input_mat, box); + + cv::Mat crop_mat = crop_result_pair.first; + cv::Mat affine_transform_reverse = crop_result_pair.second; + + // deep copy + cv::Mat crop_mat_copy; + crop_mat.copyTo(crop_mat_copy); + + // BGR to RGB + cv::Mat input_mat_copy_rgb; + cv::cvtColor(crop_mat, input_mat_copy_rgb, CV_BGR2RGB); + + // image data, HWC->CHW, image_data - mean / std normalize + int image_height = input_mat_copy_rgb.rows; + int image_width = input_mat_copy_rgb.cols; + int image_channels = input_mat_copy_rgb.channels(); + + std::vector input_image_array; + input_image_array.resize(1 * image_channels * image_height * image_width); + + float* input_image = input_image_array.data(); + for (int h = 0; h < image_height; ++h) + { + for (int w = 0; w < image_width; ++w) + { + for (int c = 0; c < image_channels; ++c) + { + int chw_index = c * image_height * image_width + h * image_width + w; + + float tmp = input_mat_copy_rgb.ptr(h)[w * 3 + c]; + + input_image[chw_index] = (tmp - IMAGE_MEAN[c]) / IMAGE_STD[c]; + } + } + } + + // inference + std::vector m_onnx_input_names{ "input" }; + std::vector m_onnx_output_names{ "simcc_x","simcc_y" }; + std::array input_shape{ 1, image_channels, image_height, image_width }; + + auto memory_info = Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU); + Ort::Value input_tensor = Ort::Value::CreateTensor( + memory_info, + input_image_array.data(), + input_image_array.size(), + input_shape.data(), + input_shape.size() + ); + + assert(input_tensor.IsTensor()); + + auto output_tensors = m_session.Run( + Ort::RunOptions{ nullptr }, + m_onnx_input_names.data(), + &input_tensor, + 1, + m_onnx_output_names.data(), + m_onnx_output_names.size() + ); + + // pose process + std::vector simcc_x_dims = output_tensors[0].GetTensorTypeAndShapeInfo().GetShape(); + std::vector simcc_y_dims = output_tensors[1].GetTensorTypeAndShapeInfo().GetShape(); + + assert(simcc_x_dims.size() == 3 && simcc_y_dims.size() == 3); + + int batch_size = simcc_x_dims[0] == simcc_y_dims[0] ? simcc_x_dims[0] : 0; + int joint_num = simcc_x_dims[1] == simcc_y_dims[1] ? simcc_x_dims[1] : 0; + int extend_width = simcc_x_dims[2]; + int extend_height = simcc_y_dims[2]; + + float* simcc_x_result = output_tensors[0].GetTensorMutableData(); + float* simcc_y_result = output_tensors[1].GetTensorMutableData(); + + + for (int i = 0; i < joint_num; ++i) + { + // find the maximum and maximum indexes in the value of each Extend_width length + auto x_biggest_iter = std::max_element(simcc_x_result + i * extend_width, simcc_x_result + i * extend_width + extend_width); + int max_x_pos = std::distance(simcc_x_result + i * extend_width, x_biggest_iter); + int pose_x = max_x_pos / 2; + float score_x = *x_biggest_iter; + + // find the maximum and maximum indexes in the value of each exten_height length + auto y_biggest_iter = std::max_element(simcc_y_result + i * extend_height, simcc_y_result + i * extend_height + extend_height); + int max_y_pos = std::distance(simcc_y_result + i * extend_height, y_biggest_iter); + int pose_y = max_y_pos / 2; + float score_y = *y_biggest_iter; + + //float score = (score_x + score_y) / 2; + float score = std::max(score_x, score_y); + + PosePoint temp_point; + temp_point.x = int(pose_x); + temp_point.y = int(pose_y); + temp_point.score = score; + pose_result.emplace_back(temp_point); + } + + // anti affine transformation to obtain the coordinates on the original picture + for (int i = 0; i < pose_result.size(); ++i) + { + cv::Mat origin_point_Mat = cv::Mat::ones(3, 1, CV_64FC1); + origin_point_Mat.at(0, 0) = pose_result[i].x; + origin_point_Mat.at(1, 0) = pose_result[i].y; + + cv::Mat temp_result_mat = affine_transform_reverse * origin_point_Mat; + + pose_result[i].x = temp_result_mat.at(0, 0); + pose_result[i].y = temp_result_mat.at(1, 0); + } + + return pose_result; +} + +std::pair RTMPoseOnnxruntime::CropImageByDetectBox(const cv::Mat& input_image, const DetectBox& box) +{ + std::pair result_pair; + + if (!input_image.data) + { + return result_pair; + } + + if (!box.IsValid()) + { + return result_pair; + } + + // deep copy + cv::Mat input_mat_copy; + input_image.copyTo(input_mat_copy); + + // calculate the width, height and center points of the human detection box + int box_width = box.right - box.left; + int box_height = box.bottom - box.top; + int box_center_x = box.left + box_width / 2; + int box_center_y = box.top + box_height / 2; + + float aspect_ratio = 192.0 / 256.0; + + // adjust the width and height ratio of the size of the picture in the RTMPOSE input + if (box_width > (aspect_ratio * box_height)) + { + box_height = box_width / aspect_ratio; + } + else if (box_width < (aspect_ratio * box_height)) + { + box_width = box_height * aspect_ratio; + } + + float scale_image_width = box_width * 1.2; + float scale_image_height = box_height * 1.2; + + // get the affine matrix + cv::Mat affine_transform = GetAffineTransform( + box_center_x, + box_center_y, + scale_image_width, + scale_image_height, + 192, + 256 + ); + + cv::Mat affine_transform_reverse = GetAffineTransform( + box_center_x, + box_center_y, + scale_image_width, + scale_image_height, + 192, + 256, + true + ); + + // affine transform + cv::Mat affine_image; + cv::warpAffine(input_mat_copy, affine_image, affine_transform, cv::Size(192, 256), cv::INTER_LINEAR); + //cv::imwrite("affine_img.jpg", affine_image); + + result_pair = std::make_pair(affine_image, affine_transform_reverse); + + return result_pair; +} + +void RTMPoseOnnxruntime::PrintModelInfo(Ort::Session& session) +{ + // print the number of model input nodes + size_t num_input_nodes = session.GetInputCount(); + size_t num_output_nodes = session.GetOutputCount(); + std::cout << "Number of input node is:" << num_input_nodes << std::endl; + std::cout << "Number of output node is:" << num_output_nodes << std::endl; + + // print node name + Ort::AllocatorWithDefaultOptions allocator; + std::cout << std::endl; + for (auto i = 0; i < num_input_nodes; i++) + std::cout << "The input op-name " << i << " is:" << session.GetInputNameAllocated(i, allocator) << std::endl; + for (auto i = 0; i < num_output_nodes; i++) + std::cout << "The output op-name " << i << " is:" << session.GetOutputNameAllocated(i, allocator) << std::endl; + + // print input and output dims + for (auto i = 0; i < num_input_nodes; i++) + { + std::vector input_dims = session.GetInputTypeInfo(i).GetTensorTypeAndShapeInfo().GetShape(); + std::cout << std::endl << "input " << i << " dim is: "; + for (auto j = 0; j < input_dims.size(); j++) + std::cout << input_dims[j] << " "; + } + for (auto i = 0; i < num_output_nodes; i++) + { + std::vector output_dims = session.GetOutputTypeInfo(i).GetTensorTypeAndShapeInfo().GetShape(); + std::cout << std::endl << "output " << i << " dim is: "; + for (auto j = 0; j < output_dims.size(); j++) + std::cout << output_dims[j] << " "; + } +} diff --git a/projects/rtmpose/examples/RTMPose-Deploy/Windows/OnnxRumtime-CPU/src/RTMPoseOnnxRuntime/rtmpose_onnxruntime.h b/projects/rtmpose/examples/RTMPose-Deploy/Windows/OnnxRumtime-CPU/src/RTMPoseOnnxRuntime/rtmpose_onnxruntime.h index f23adacb34..ae7339052d 100644 --- a/projects/rtmpose/examples/RTMPose-Deploy/Windows/OnnxRumtime-CPU/src/RTMPoseOnnxRuntime/rtmpose_onnxruntime.h +++ b/projects/rtmpose/examples/RTMPose-Deploy/Windows/OnnxRumtime-CPU/src/RTMPoseOnnxRuntime/rtmpose_onnxruntime.h @@ -1,34 +1,34 @@ -#ifndef _RTM_POSE_ONNXRUNTIME_H_ -#define _RTM_POSE_ONNXRUNTIME_H_ - -#include - -#include "onnxruntime_cxx_api.h" -#include "cpu_provider_factory.h" -#include "opencv2/opencv.hpp" - -#include "rtmdet_onnxruntime.h" -#include "rtmpose_utils.h" - -class RTMPoseOnnxruntime -{ -public: - RTMPoseOnnxruntime() = delete; - RTMPoseOnnxruntime(const std::string& onnx_model_path); - virtual~RTMPoseOnnxruntime(); - -public: - std::vector Inference(const cv::Mat& input_mat, const DetectBox& box); - -private: - std::pair CropImageByDetectBox(const cv::Mat& input_image, const DetectBox& box); - -private: - void PrintModelInfo(Ort::Session& session); - -private: - Ort::Env m_env; - Ort::Session m_session; -}; - -#endif // !_RTM_POSE_ONNXRUNTIME_H_ +#ifndef _RTM_POSE_ONNXRUNTIME_H_ +#define _RTM_POSE_ONNXRUNTIME_H_ + +#include + +#include "onnxruntime_cxx_api.h" +#include "cpu_provider_factory.h" +#include "opencv2/opencv.hpp" + +#include "rtmdet_onnxruntime.h" +#include "rtmpose_utils.h" + +class RTMPoseOnnxruntime +{ +public: + RTMPoseOnnxruntime() = delete; + RTMPoseOnnxruntime(const std::string& onnx_model_path); + virtual~RTMPoseOnnxruntime(); + +public: + std::vector Inference(const cv::Mat& input_mat, const DetectBox& box); + +private: + std::pair CropImageByDetectBox(const cv::Mat& input_image, const DetectBox& box); + +private: + void PrintModelInfo(Ort::Session& session); + +private: + Ort::Env m_env; + Ort::Session m_session; +}; + +#endif // !_RTM_POSE_ONNXRUNTIME_H_ diff --git a/projects/rtmpose/examples/RTMPose-Deploy/Windows/OnnxRumtime-CPU/src/RTMPoseOnnxRuntime/rtmpose_tracker_onnxruntime.cpp b/projects/rtmpose/examples/RTMPose-Deploy/Windows/OnnxRumtime-CPU/src/RTMPoseOnnxRuntime/rtmpose_tracker_onnxruntime.cpp index 4ad83ada47..12f647b52d 100644 --- a/projects/rtmpose/examples/RTMPose-Deploy/Windows/OnnxRumtime-CPU/src/RTMPoseOnnxRuntime/rtmpose_tracker_onnxruntime.cpp +++ b/projects/rtmpose/examples/RTMPose-Deploy/Windows/OnnxRumtime-CPU/src/RTMPoseOnnxRuntime/rtmpose_tracker_onnxruntime.cpp @@ -1,34 +1,34 @@ -#include "rtmpose_tracker_onnxruntime.h" - -RTMPoseTrackerOnnxruntime::RTMPoseTrackerOnnxruntime(const std::string& det_model_path, const std::string& pose_model_path, int dectect_interval) - :m_rtm_det_ptr(nullptr), - m_rtm_pose_ptr(nullptr), - m_frame_num(0), - m_dectect_interval(dectect_interval) -{ - m_rtm_det_ptr = std::make_unique(det_model_path); - m_rtm_pose_ptr = std::make_unique(pose_model_path); -} - -RTMPoseTrackerOnnxruntime::~RTMPoseTrackerOnnxruntime() -{ -} - -std::pair> RTMPoseTrackerOnnxruntime::Inference(const cv::Mat& input_mat) -{ - std::pair> result; - - if (m_rtm_det_ptr == nullptr || m_rtm_pose_ptr == nullptr) - return result; - - if (m_frame_num % m_dectect_interval == 0) - { - m_detect_box = m_rtm_det_ptr->Inference(input_mat); - } - - std::vector pose_result = m_rtm_pose_ptr->Inference(input_mat, m_detect_box); - - m_frame_num += 1; - - return std::make_pair(m_detect_box, pose_result); -} +#include "rtmpose_tracker_onnxruntime.h" + +RTMPoseTrackerOnnxruntime::RTMPoseTrackerOnnxruntime(const std::string& det_model_path, const std::string& pose_model_path, int dectect_interval) + :m_rtm_det_ptr(nullptr), + m_rtm_pose_ptr(nullptr), + m_frame_num(0), + m_dectect_interval(dectect_interval) +{ + m_rtm_det_ptr = std::make_unique(det_model_path); + m_rtm_pose_ptr = std::make_unique(pose_model_path); +} + +RTMPoseTrackerOnnxruntime::~RTMPoseTrackerOnnxruntime() +{ +} + +std::pair> RTMPoseTrackerOnnxruntime::Inference(const cv::Mat& input_mat) +{ + std::pair> result; + + if (m_rtm_det_ptr == nullptr || m_rtm_pose_ptr == nullptr) + return result; + + if (m_frame_num % m_dectect_interval == 0) + { + m_detect_box = m_rtm_det_ptr->Inference(input_mat); + } + + std::vector pose_result = m_rtm_pose_ptr->Inference(input_mat, m_detect_box); + + m_frame_num += 1; + + return std::make_pair(m_detect_box, pose_result); +} diff --git a/projects/rtmpose/examples/RTMPose-Deploy/Windows/OnnxRumtime-CPU/src/RTMPoseOnnxRuntime/rtmpose_tracker_onnxruntime.h b/projects/rtmpose/examples/RTMPose-Deploy/Windows/OnnxRumtime-CPU/src/RTMPoseOnnxRuntime/rtmpose_tracker_onnxruntime.h index 76cb8a261b..063b8202c3 100644 --- a/projects/rtmpose/examples/RTMPose-Deploy/Windows/OnnxRumtime-CPU/src/RTMPoseOnnxRuntime/rtmpose_tracker_onnxruntime.h +++ b/projects/rtmpose/examples/RTMPose-Deploy/Windows/OnnxRumtime-CPU/src/RTMPoseOnnxRuntime/rtmpose_tracker_onnxruntime.h @@ -1,32 +1,32 @@ -#ifndef _RTM_POSE_TRACKER_ONNXRUNTIME_H_ -#define _RTM_POSE_TRACKER_ONNXRUNTIME_H_ - -#include "rtmdet_onnxruntime.h" -#include "rtmpose_onnxruntime.h" - -#include -#include - -class RTMPoseTrackerOnnxruntime -{ -public: - RTMPoseTrackerOnnxruntime() = delete; - RTMPoseTrackerOnnxruntime( - const std::string& det_model_path, - const std::string& pose_model_path, - int dectect_interval = 10 - ); - virtual~RTMPoseTrackerOnnxruntime(); - -public: - std::pair> Inference(const cv::Mat& input_mat); - -private: - std::unique_ptr m_rtm_det_ptr; - std::unique_ptr m_rtm_pose_ptr; - unsigned int m_frame_num; - DetectBox m_detect_box; - int m_dectect_interval; -}; - -#endif // !_RTM_POSE_TRACKER_ONNXRUNTIME_H_ +#ifndef _RTM_POSE_TRACKER_ONNXRUNTIME_H_ +#define _RTM_POSE_TRACKER_ONNXRUNTIME_H_ + +#include "rtmdet_onnxruntime.h" +#include "rtmpose_onnxruntime.h" + +#include +#include + +class RTMPoseTrackerOnnxruntime +{ +public: + RTMPoseTrackerOnnxruntime() = delete; + RTMPoseTrackerOnnxruntime( + const std::string& det_model_path, + const std::string& pose_model_path, + int dectect_interval = 10 + ); + virtual~RTMPoseTrackerOnnxruntime(); + +public: + std::pair> Inference(const cv::Mat& input_mat); + +private: + std::unique_ptr m_rtm_det_ptr; + std::unique_ptr m_rtm_pose_ptr; + unsigned int m_frame_num; + DetectBox m_detect_box; + int m_dectect_interval; +}; + +#endif // !_RTM_POSE_TRACKER_ONNXRUNTIME_H_ diff --git a/projects/rtmpose/examples/RTMPose-Deploy/Windows/OnnxRumtime-CPU/src/RTMPoseOnnxRuntime/rtmpose_utils.h b/projects/rtmpose/examples/RTMPose-Deploy/Windows/OnnxRumtime-CPU/src/RTMPoseOnnxRuntime/rtmpose_utils.h index 6ecb9ccd1b..8f0d09c316 100644 --- a/projects/rtmpose/examples/RTMPose-Deploy/Windows/OnnxRumtime-CPU/src/RTMPoseOnnxRuntime/rtmpose_utils.h +++ b/projects/rtmpose/examples/RTMPose-Deploy/Windows/OnnxRumtime-CPU/src/RTMPoseOnnxRuntime/rtmpose_utils.h @@ -1,115 +1,115 @@ -#ifndef _RTM_POSE_UTILS_H_ -#define _RTM_POSE_UTILS_H_ - -#include "opencv2/opencv.hpp" - -const std::vector IMAGE_MEAN{ 123.675, 116.28, 103.53 }; -const std::vector IMAGE_STD{ 58.395, 57.12, 57.375 }; - -struct DetectBox -{ - int left; - int top; - int right; - int bottom; - float score; - int label; - - DetectBox() - { - left = -1; - top = -1; - right = -1; - bottom = -1; - score = -1.0; - label = -1; - } - - bool IsValid() const - { - return left != -1 && top != -1 && right != -1 && bottom != -1 && score != -1.0 && label != -1; - } -}; - -static bool BoxCompare( - const DetectBox& a, - const DetectBox& b) { - return a.score > b.score; -} - -struct PosePoint -{ - int x; - int y; - float score; - - PosePoint() - { - x = 0; - y = 0; - score = 0.0; - } -}; - -typedef PosePoint Vector2D; - - -static cv::Mat GetAffineTransform(float center_x, float center_y, float scale_width, float scale_height, int output_image_width, int output_image_height, bool inverse = false) -{ - // solve the affine transformation matrix - - // get the three points corresponding to the source picture and the target picture - cv::Point2f src_point_1; - src_point_1.x = center_x; - src_point_1.y = center_y; - - cv::Point2f src_point_2; - src_point_2.x = center_x; - src_point_2.y = center_y - scale_width * 0.5; - - cv::Point2f src_point_3; - src_point_3.x = src_point_2.x - (src_point_1.y - src_point_2.y); - src_point_3.y = src_point_2.y + (src_point_1.x - src_point_2.x); - - - float alphapose_image_center_x = output_image_width / 2; - float alphapose_image_center_y = output_image_height / 2; - - cv::Point2f dst_point_1; - dst_point_1.x = alphapose_image_center_x; - dst_point_1.y = alphapose_image_center_y; - - cv::Point2f dst_point_2; - dst_point_2.x = alphapose_image_center_x; - dst_point_2.y = alphapose_image_center_y - output_image_width * 0.5; - - cv::Point2f dst_point_3; - dst_point_3.x = dst_point_2.x - (dst_point_1.y - dst_point_2.y); - dst_point_3.y = dst_point_2.y + (dst_point_1.x - dst_point_2.x); - - - cv::Point2f srcPoints[3]; - srcPoints[0] = src_point_1; - srcPoints[1] = src_point_2; - srcPoints[2] = src_point_3; - - cv::Point2f dstPoints[3]; - dstPoints[0] = dst_point_1; - dstPoints[1] = dst_point_2; - dstPoints[2] = dst_point_3; - - // get affine matrix - cv::Mat affineTransform; - if (inverse) - { - affineTransform = cv::getAffineTransform(dstPoints, srcPoints); - } - else - { - affineTransform = cv::getAffineTransform(srcPoints, dstPoints); - } - - return affineTransform; -} - -#endif // !_RTM_POSE_UTILS_H_ +#ifndef _RTM_POSE_UTILS_H_ +#define _RTM_POSE_UTILS_H_ + +#include "opencv2/opencv.hpp" + +const std::vector IMAGE_MEAN{ 123.675, 116.28, 103.53 }; +const std::vector IMAGE_STD{ 58.395, 57.12, 57.375 }; + +struct DetectBox +{ + int left; + int top; + int right; + int bottom; + float score; + int label; + + DetectBox() + { + left = -1; + top = -1; + right = -1; + bottom = -1; + score = -1.0; + label = -1; + } + + bool IsValid() const + { + return left != -1 && top != -1 && right != -1 && bottom != -1 && score != -1.0 && label != -1; + } +}; + +static bool BoxCompare( + const DetectBox& a, + const DetectBox& b) { + return a.score > b.score; +} + +struct PosePoint +{ + int x; + int y; + float score; + + PosePoint() + { + x = 0; + y = 0; + score = 0.0; + } +}; + +typedef PosePoint Vector2D; + + +static cv::Mat GetAffineTransform(float center_x, float center_y, float scale_width, float scale_height, int output_image_width, int output_image_height, bool inverse = false) +{ + // solve the affine transformation matrix + + // get the three points corresponding to the source picture and the target picture + cv::Point2f src_point_1; + src_point_1.x = center_x; + src_point_1.y = center_y; + + cv::Point2f src_point_2; + src_point_2.x = center_x; + src_point_2.y = center_y - scale_width * 0.5; + + cv::Point2f src_point_3; + src_point_3.x = src_point_2.x - (src_point_1.y - src_point_2.y); + src_point_3.y = src_point_2.y + (src_point_1.x - src_point_2.x); + + + float alphapose_image_center_x = output_image_width / 2; + float alphapose_image_center_y = output_image_height / 2; + + cv::Point2f dst_point_1; + dst_point_1.x = alphapose_image_center_x; + dst_point_1.y = alphapose_image_center_y; + + cv::Point2f dst_point_2; + dst_point_2.x = alphapose_image_center_x; + dst_point_2.y = alphapose_image_center_y - output_image_width * 0.5; + + cv::Point2f dst_point_3; + dst_point_3.x = dst_point_2.x - (dst_point_1.y - dst_point_2.y); + dst_point_3.y = dst_point_2.y + (dst_point_1.x - dst_point_2.x); + + + cv::Point2f srcPoints[3]; + srcPoints[0] = src_point_1; + srcPoints[1] = src_point_2; + srcPoints[2] = src_point_3; + + cv::Point2f dstPoints[3]; + dstPoints[0] = dst_point_1; + dstPoints[1] = dst_point_2; + dstPoints[2] = dst_point_3; + + // get affine matrix + cv::Mat affineTransform; + if (inverse) + { + affineTransform = cv::getAffineTransform(dstPoints, srcPoints); + } + else + { + affineTransform = cv::getAffineTransform(srcPoints, dstPoints); + } + + return affineTransform; +} + +#endif // !_RTM_POSE_UTILS_H_ diff --git a/projects/rtmpose/examples/RTMPose-Deploy/Windows/TensorRT/README.md b/projects/rtmpose/examples/RTMPose-Deploy/Windows/TensorRT/README.md index c9615d89d3..171f0d35cf 100644 --- a/projects/rtmpose/examples/RTMPose-Deploy/Windows/TensorRT/README.md +++ b/projects/rtmpose/examples/RTMPose-Deploy/Windows/TensorRT/README.md @@ -1,73 +1,73 @@ -# rtmpose_tensorrt - -## Description - -This repository is use the TensorRT to deploy RTMDet and RTMPose. Your computer should have these components: - -- NVIDIA GPU -- CUDA -- cudnn -- TensorRT 8.x -- OPENCV -- VS2019 - -The effect of the code is as follows: - -![mabaoguo](https://github.com/Dominic23331/rtmpose_tensorrt/assets/53283758/568563be-a31d-4d03-9629-842dad3745e2) - -## Get Started - -### I. Convert Model - -#### 1. RTMDet - -When you start to convert a RTMDet model, you can use **convert_rtmdet.py** to convert pth file to onnx. - -```shell -python convert_rtmdet.py --config --checkpoint --output -``` - -Note that RTMDet should be the mmdetection version, and the conversion of mmyolo is not supported. - -#### 2. RTMPose - -You can use mmdeploy to convert RTMPose. The mmdeploy config file should use **configs/mmpose/pose-detection_simcc_onnxruntime_dynamic.py**. The convert command as follow: - -```shell -python tools/deploy.py -``` - -#### 3. Convert to TensorRT engine file - -You can use trtexec to convert an ONNX file to engine file. The command as follow: - -``` -trtexec --onnx= --saveEngine= -``` - -**Note that the engine files included in the project are only for storing examples. As the engine files generated by TensorRT are related to hardware, it is necessary to regenerate the engine files on the computer where the code needs to be run.** - -### II. Run - -At first, you should fill in the model locations for RTMDet and RTMPose as follows: - -```c++ -// set engine file path -string detEngineFile = "./model/rtmdet.engine"; -string poseEngineFile = "./model/rtmpose_m.engine"; -``` - -Then, you can set the cap to video file or camera. - -``` -// open cap -cv::VideoCapture cap(0); -``` - -If you want to change iou threshold or confidence threshold, you can change them when you initialize RTMDet model. - -``` -RTMDet det_model(detEngineFile, logger, 0.5, 0.65); -``` - -Finally, you can run the **main.cpp** file to get result. +# rtmpose_tensorrt + +## Description + +This repository is use the TensorRT to deploy RTMDet and RTMPose. Your computer should have these components: + +- NVIDIA GPU +- CUDA +- cudnn +- TensorRT 8.x +- OPENCV +- VS2019 + +The effect of the code is as follows: + +![mabaoguo](https://github.com/Dominic23331/rtmpose_tensorrt/assets/53283758/568563be-a31d-4d03-9629-842dad3745e2) + +## Get Started + +### I. Convert Model + +#### 1. RTMDet + +When you start to convert a RTMDet model, you can use **convert_rtmdet.py** to convert pth file to onnx. + +```shell +python convert_rtmdet.py --config --checkpoint --output +``` + +Note that RTMDet should be the mmdetection version, and the conversion of mmyolo is not supported. + +#### 2. RTMPose + +You can use mmdeploy to convert RTMPose. The mmdeploy config file should use **configs/mmpose/pose-detection_simcc_onnxruntime_dynamic.py**. The convert command as follow: + +```shell +python tools/deploy.py +``` + +#### 3. Convert to TensorRT engine file + +You can use trtexec to convert an ONNX file to engine file. The command as follow: + +``` +trtexec --onnx= --saveEngine= +``` + +**Note that the engine files included in the project are only for storing examples. As the engine files generated by TensorRT are related to hardware, it is necessary to regenerate the engine files on the computer where the code needs to be run.** + +### II. Run + +At first, you should fill in the model locations for RTMDet and RTMPose as follows: + +```c++ +// set engine file path +string detEngineFile = "./model/rtmdet.engine"; +string poseEngineFile = "./model/rtmpose_m.engine"; +``` + +Then, you can set the cap to video file or camera. + +``` +// open cap +cv::VideoCapture cap(0); +``` + +If you want to change iou threshold or confidence threshold, you can change them when you initialize RTMDet model. + +``` +RTMDet det_model(detEngineFile, logger, 0.5, 0.65); +``` + +Finally, you can run the **main.cpp** file to get result. diff --git a/projects/rtmpose/examples/RTMPose-Deploy/Windows/TensorRT/python/convert_rtmdet.py b/projects/rtmpose/examples/RTMPose-Deploy/Windows/TensorRT/python/convert_rtmdet.py index 81196413dd..0f030419f9 100644 --- a/projects/rtmpose/examples/RTMPose-Deploy/Windows/TensorRT/python/convert_rtmdet.py +++ b/projects/rtmpose/examples/RTMPose-Deploy/Windows/TensorRT/python/convert_rtmdet.py @@ -1,115 +1,115 @@ -import argparse - -import torch -import torch.nn.functional as F -from mmdet.apis import init_detector -from torch import nn - - -def build_model_from_cfg(config_path: str, checkpoint_path: str, device): - model = init_detector(config_path, checkpoint_path, device=device) - model.eval() - return model - - -class RTMDet(nn.Module): - """Load RTMDet model and add postprocess. - - Args: - model (nn.Module): The RTMDet model. - """ - - def __init__(self, model: nn.Module) -> None: - super().__init__() - self.model = model - self.stage = [80, 40, 20] - self.input_shape = 640 - - def forward(self, inputs): - """model forward function.""" - boxes = [] - neck_outputs = self.model(inputs) - for i, (cls, box) in enumerate(zip(*neck_outputs)): - cls = cls.permute(0, 2, 3, 1) - box = box.permute(0, 2, 3, 1) - box = self.decode(box, cls, i) - boxes.append(box) - result_box = torch.cat(boxes, dim=1) - return result_box - - def decode(self, box: torch.Tensor, cls: torch.Tensor, stage: int): - """RTMDet postprocess function. - - Args: - box (torch.Tensor): output boxes. - cls (torch.Tensor): output cls. - stage (int): RTMDet output stage. - - Returns: - torch.Tensor: The decode boxes. - Format is [x1, y1, x2, y2, class, confidence] - """ - cls = F.sigmoid(cls) - conf = torch.max(cls, dim=3, keepdim=True)[0] - cls = torch.argmax(cls, dim=3, keepdim=True).to(torch.float32) - - box = torch.cat([box, cls, conf], dim=-1) - - step = self.input_shape // self.stage[stage] - - block_step = torch.linspace( - 0, self.stage[stage] - 1, steps=self.stage[stage], - device='cuda') * step - block_x = torch.broadcast_to(block_step, - [self.stage[stage], self.stage[stage]]) - block_y = torch.transpose(block_x, 1, 0) - block_x = torch.unsqueeze(block_x, 0) - block_y = torch.unsqueeze(block_y, 0) - block = torch.stack([block_x, block_y], -1) - - box[..., :2] = block - box[..., :2] - box[..., 2:4] = block + box[..., 2:4] - box = box.reshape(1, -1, 6) - return box - - -def parse_args(): - parser = argparse.ArgumentParser( - description='convert rtmdet model to ONNX.') - parser.add_argument( - '--config', type=str, help='rtmdet config file path from mmdetection.') - parser.add_argument( - '--checkpoint', - type=str, - help='rtmdet checkpoint path from mmdetection.') - parser.add_argument('--output', type=str, help='output filename.') - parser.add_argument( - '--device', - type=str, - default='cuda:0', - help='Device used for inference') - parser.add_argument( - '--input-name', type=str, default='image', help='ONNX input name.') - parser.add_argument( - '--output-name', type=str, default='output', help='ONNX output name.') - parser.add_argument( - '--opset', type=int, default=11, help='ONNX opset version.') - args = parser.parse_args() - return args - - -if __name__ == '__main__': - args = parse_args() - - model = build_model_from_cfg(args.config, args.checkpoint, args.device) - rtmdet = RTMDet(model) - rtmdet.eval() - x = torch.randn((1, 3, 640, 640), device=args.device) - - torch.onnx.export( - rtmdet, - x, - args.output, - input_names=[args.input_name], - output_names=[args.output_name], - opset_version=args.opset) +import argparse + +import torch +import torch.nn.functional as F +from mmdet.apis import init_detector +from torch import nn + + +def build_model_from_cfg(config_path: str, checkpoint_path: str, device): + model = init_detector(config_path, checkpoint_path, device=device) + model.eval() + return model + + +class RTMDet(nn.Module): + """Load RTMDet model and add postprocess. + + Args: + model (nn.Module): The RTMDet model. + """ + + def __init__(self, model: nn.Module) -> None: + super().__init__() + self.model = model + self.stage = [80, 40, 20] + self.input_shape = 640 + + def forward(self, inputs): + """model forward function.""" + boxes = [] + neck_outputs = self.model(inputs) + for i, (cls, box) in enumerate(zip(*neck_outputs)): + cls = cls.permute(0, 2, 3, 1) + box = box.permute(0, 2, 3, 1) + box = self.decode(box, cls, i) + boxes.append(box) + result_box = torch.cat(boxes, dim=1) + return result_box + + def decode(self, box: torch.Tensor, cls: torch.Tensor, stage: int): + """RTMDet postprocess function. + + Args: + box (torch.Tensor): output boxes. + cls (torch.Tensor): output cls. + stage (int): RTMDet output stage. + + Returns: + torch.Tensor: The decode boxes. + Format is [x1, y1, x2, y2, class, confidence] + """ + cls = F.sigmoid(cls) + conf = torch.max(cls, dim=3, keepdim=True)[0] + cls = torch.argmax(cls, dim=3, keepdim=True).to(torch.float32) + + box = torch.cat([box, cls, conf], dim=-1) + + step = self.input_shape // self.stage[stage] + + block_step = torch.linspace( + 0, self.stage[stage] - 1, steps=self.stage[stage], + device='cuda') * step + block_x = torch.broadcast_to(block_step, + [self.stage[stage], self.stage[stage]]) + block_y = torch.transpose(block_x, 1, 0) + block_x = torch.unsqueeze(block_x, 0) + block_y = torch.unsqueeze(block_y, 0) + block = torch.stack([block_x, block_y], -1) + + box[..., :2] = block - box[..., :2] + box[..., 2:4] = block + box[..., 2:4] + box = box.reshape(1, -1, 6) + return box + + +def parse_args(): + parser = argparse.ArgumentParser( + description='convert rtmdet model to ONNX.') + parser.add_argument( + '--config', type=str, help='rtmdet config file path from mmdetection.') + parser.add_argument( + '--checkpoint', + type=str, + help='rtmdet checkpoint path from mmdetection.') + parser.add_argument('--output', type=str, help='output filename.') + parser.add_argument( + '--device', + type=str, + default='cuda:0', + help='Device used for inference') + parser.add_argument( + '--input-name', type=str, default='image', help='ONNX input name.') + parser.add_argument( + '--output-name', type=str, default='output', help='ONNX output name.') + parser.add_argument( + '--opset', type=int, default=11, help='ONNX opset version.') + args = parser.parse_args() + return args + + +if __name__ == '__main__': + args = parse_args() + + model = build_model_from_cfg(args.config, args.checkpoint, args.device) + rtmdet = RTMDet(model) + rtmdet.eval() + x = torch.randn((1, 3, 640, 640), device=args.device) + + torch.onnx.export( + rtmdet, + x, + args.output, + input_names=[args.input_name], + output_names=[args.output_name], + opset_version=args.opset) diff --git a/projects/rtmpose/examples/RTMPose-Deploy/Windows/TensorRT/src/RTMPoseTensorRT/inference.cpp b/projects/rtmpose/examples/RTMPose-Deploy/Windows/TensorRT/src/RTMPoseTensorRT/inference.cpp index bc4e8449a7..f9433c1c86 100644 --- a/projects/rtmpose/examples/RTMPose-Deploy/Windows/TensorRT/src/RTMPoseTensorRT/inference.cpp +++ b/projects/rtmpose/examples/RTMPose-Deploy/Windows/TensorRT/src/RTMPoseTensorRT/inference.cpp @@ -1,38 +1,38 @@ -#include "inference.h" - - -/** - * @brief Inference network - * @param image Input image - * @param detect_model RTMDet model - * @param pose_model RTMPose model - * @return Inference result -*/ -std::vector> inference(cv::Mat& image, RTMDet& detect_model, RTMPose& pose_model) -{ - cv::Mat im0; - image.copyTo(im0); - - // inference detection model - std::vector det_result = detect_model.predict(image); - std::vector> result; - for (int i = 0; i < det_result.size(); i++) - { - // Select the detection box labeled as human - if (!isEqual(det_result[i].cls, 0.0)) - continue; - - // cut image to input the pose model - cv::Mat person_image = img_cut(im0, det_result[i].x1, det_result[i].y1, det_result[i].x2, det_result[i].y2); - std::vector pose_result = pose_model.predict(person_image); - - // Restore points to original image - for (int j = 0; j < pose_result.size(); j++) - { - pose_result[j].x += det_result[i].x1; - pose_result[j].y += det_result[i].y1; - } - result.push_back(pose_result); - } - return result; -} +#include "inference.h" + + +/** + * @brief Inference network + * @param image Input image + * @param detect_model RTMDet model + * @param pose_model RTMPose model + * @return Inference result +*/ +std::vector> inference(cv::Mat& image, RTMDet& detect_model, RTMPose& pose_model) +{ + cv::Mat im0; + image.copyTo(im0); + + // inference detection model + std::vector det_result = detect_model.predict(image); + std::vector> result; + for (int i = 0; i < det_result.size(); i++) + { + // Select the detection box labeled as human + if (!isEqual(det_result[i].cls, 0.0)) + continue; + + // cut image to input the pose model + cv::Mat person_image = img_cut(im0, det_result[i].x1, det_result[i].y1, det_result[i].x2, det_result[i].y2); + std::vector pose_result = pose_model.predict(person_image); + + // Restore points to original image + for (int j = 0; j < pose_result.size(); j++) + { + pose_result[j].x += det_result[i].x1; + pose_result[j].y += det_result[i].y1; + } + result.push_back(pose_result); + } + return result; +} diff --git a/projects/rtmpose/examples/RTMPose-Deploy/Windows/TensorRT/src/RTMPoseTensorRT/inference.h b/projects/rtmpose/examples/RTMPose-Deploy/Windows/TensorRT/src/RTMPoseTensorRT/inference.h index 8f603ffc1c..4da1b8d5b1 100644 --- a/projects/rtmpose/examples/RTMPose-Deploy/Windows/TensorRT/src/RTMPoseTensorRT/inference.h +++ b/projects/rtmpose/examples/RTMPose-Deploy/Windows/TensorRT/src/RTMPoseTensorRT/inference.h @@ -1,14 +1,14 @@ -#pragma once -#include -#include - -#include -#include - -#include "rtmdet.h" -#include "rtmpose.h" -#include "utils.h" - - - -std::vector> inference(cv::Mat& image, RTMDet& detect_model, RTMPose& pose_model); +#pragma once +#include +#include + +#include +#include + +#include "rtmdet.h" +#include "rtmpose.h" +#include "utils.h" + + + +std::vector> inference(cv::Mat& image, RTMDet& detect_model, RTMPose& pose_model); diff --git a/projects/rtmpose/examples/RTMPose-Deploy/Windows/TensorRT/src/RTMPoseTensorRT/main.cpp b/projects/rtmpose/examples/RTMPose-Deploy/Windows/TensorRT/src/RTMPoseTensorRT/main.cpp index 3799bca896..6dcf60f997 100644 --- a/projects/rtmpose/examples/RTMPose-Deploy/Windows/TensorRT/src/RTMPoseTensorRT/main.cpp +++ b/projects/rtmpose/examples/RTMPose-Deploy/Windows/TensorRT/src/RTMPoseTensorRT/main.cpp @@ -1,63 +1,63 @@ -#include -#include -#include -#include -#include - -#include "rtmdet.h" -#include "rtmpose.h" -#include "utils.h" -#include "inference.h" - - -using namespace std; - -/** - * @brief Setting up Tensorrt logger -*/ -class Logger : public nvinfer1::ILogger -{ - void log(Severity severity, const char* msg) noexcept override - { - // Only output logs with severity greater than warning - if (severity <= Severity::kWARNING) - std::cout << msg << std::endl; - } -}logger; - - -int main() -{ - // set engine file path - string detEngineFile = "./model/rtmdet.engine"; - string poseEngineFile = "./model/rtmpose_m.engine"; - - // init model - RTMDet det_model(detEngineFile, logger); - RTMPose pose_model(poseEngineFile, logger); - - // open cap - cv::VideoCapture cap(0); - - while (cap.isOpened()) - { - cv::Mat frame; - cv::Mat show_frame; - cap >> frame; - - if (frame.empty()) - break; - - frame.copyTo(show_frame); - auto result = inference(frame, det_model, pose_model); - draw_pose(show_frame, result); - - cv::imshow("result", show_frame); - if (cv::waitKey(1) == 'q') - break; - } - cv::destroyAllWindows(); - cap.release(); - - return 0; -} +#include +#include +#include +#include +#include + +#include "rtmdet.h" +#include "rtmpose.h" +#include "utils.h" +#include "inference.h" + + +using namespace std; + +/** + * @brief Setting up Tensorrt logger +*/ +class Logger : public nvinfer1::ILogger +{ + void log(Severity severity, const char* msg) noexcept override + { + // Only output logs with severity greater than warning + if (severity <= Severity::kWARNING) + std::cout << msg << std::endl; + } +}logger; + + +int main() +{ + // set engine file path + string detEngineFile = "./model/rtmdet.engine"; + string poseEngineFile = "./model/rtmpose_m.engine"; + + // init model + RTMDet det_model(detEngineFile, logger); + RTMPose pose_model(poseEngineFile, logger); + + // open cap + cv::VideoCapture cap(0); + + while (cap.isOpened()) + { + cv::Mat frame; + cv::Mat show_frame; + cap >> frame; + + if (frame.empty()) + break; + + frame.copyTo(show_frame); + auto result = inference(frame, det_model, pose_model); + draw_pose(show_frame, result); + + cv::imshow("result", show_frame); + if (cv::waitKey(1) == 'q') + break; + } + cv::destroyAllWindows(); + cap.release(); + + return 0; +} diff --git a/projects/rtmpose/examples/RTMPose-Deploy/Windows/TensorRT/src/RTMPoseTensorRT/rtmdet.cpp b/projects/rtmpose/examples/RTMPose-Deploy/Windows/TensorRT/src/RTMPoseTensorRT/rtmdet.cpp index abc8ebd32d..91c9c9db45 100644 --- a/projects/rtmpose/examples/RTMPose-Deploy/Windows/TensorRT/src/RTMPoseTensorRT/rtmdet.cpp +++ b/projects/rtmpose/examples/RTMPose-Deploy/Windows/TensorRT/src/RTMPoseTensorRT/rtmdet.cpp @@ -1,198 +1,198 @@ -#include "rtmdet.h" - - -// set network params -float RTMDet::input_h = 640; -float RTMDet::input_w = 640; -float RTMDet::mean[3] = { 123.675, 116.28, 103.53 }; -float RTMDet::std[3] = { 58.395, 57.12, 57.375 }; - -/** - * @brief RTMDet`s constructor - * @param model_path RTMDet engine file path - * @param logger Nvinfer ILogger - * @param conf_thre The confidence threshold - * @param iou_thre The iou threshold of nms -*/ -RTMDet::RTMDet(std::string model_path, nvinfer1::ILogger& logger, float conf_thre, float iou_thre) : conf_thre(conf_thre), iou_thre(iou_thre) -{ - // read the engine file - std::ifstream engineStream(model_path, std::ios::binary); - engineStream.seekg(0, std::ios::end); - const size_t modelSize = engineStream.tellg(); - engineStream.seekg(0, std::ios::beg); - std::unique_ptr engineData(new char[modelSize]); - engineStream.read(engineData.get(), modelSize); - engineStream.close(); - - // create tensorrt model - runtime = nvinfer1::createInferRuntime(logger); - engine = runtime->deserializeCudaEngine(engineData.get(), modelSize); - context = engine->createExecutionContext(); - - // Define input dimensions - context->setBindingDimensions(0, nvinfer1::Dims4(1, 3, input_h, input_w)); - - // create CUDA stream - cudaStreamCreate(&stream); - - // Initialize offset - offset.push_back(0); - offset.push_back(0); -} - - -/** - * @brief RTMDet`s destructor -*/ -RTMDet::~RTMDet() -{ - cudaFree(stream); - cudaFree(buffer[0]); - cudaFree(buffer[1]); -} - - -/** - * @brief Display network input and output parameters -*/ -void RTMDet::show() -{ - for (int i = 0; i < engine->getNbBindings(); i++) - { - std::cout << "node: " << engine->getBindingName(i) << ", "; - if (engine->bindingIsInput(i)) - { - std::cout << "type: input" << ", "; - } - else - { - std::cout << "type: output" << ", "; - } - nvinfer1::Dims dim = engine->getBindingDimensions(i); - std::cout << "dimensions: "; - for (int d = 0; d < dim.nbDims; d++) - { - std::cout << dim.d[d] << " "; - } - std::cout << "\n"; - } -} - - -/** - * @brief Network preprocessing function - * @param image Input image - * @return Processed Tensor -*/ -std::vector RTMDet::preprocess(cv::Mat& image) -{ - // resize image - std::tuple resized = resize(image, input_w, input_h); - cv::Mat resized_image = std::get<0>(resized); - offset[0] = std::get<1>(resized); - offset[1] = std::get<2>(resized); - - // BGR2RGB - cv::cvtColor(resized_image, resized_image, cv::COLOR_BGR2RGB); - - // subtract mean and divide variance - std::vector input_tensor; - for (int k = 0; k < 3; k++) - { - for (int i = 0; i < resized_image.rows; i++) - { - for (int j = 0; j < resized_image.cols; j++) - { - input_tensor.emplace_back(((float)resized_image.at(i, j)[k] - mean[k]) / std[k]); - } - } - } - - return input_tensor; -} - - -/** - * @brief Network post-processing function - * @param boxes_result The result of rtmdet - * @param img_w The width of input image - * @param img_h The height of input image - * @return Detect boxes -*/ -std::vector RTMDet::postprocess(std::vector boxes_result, int img_w, int img_h) -{ - std::vector result; - std::vector buff; - for (int i = 0; i < 8400; i++) - { - // x1, y1, x2, y2, class, confidence - buff.insert(buff.end(), boxes_result.begin() + i * 6, boxes_result.begin() + i * 6 + 6); - // drop the box which confidence less than threshold - if (buff[5] < conf_thre) - { - buff.clear(); - continue; - } - - Box box; - box.x1 = buff[0]; - box.y1 = buff[1]; - box.x2 = buff[2]; - box.y2 = buff[3]; - box.cls = buff[4]; - box.conf = buff[5]; - result.emplace_back(box); - buff.clear(); - } - - // nms - result = non_maximum_suppression(result, iou_thre); - - // return the box to real image - for (int i = 0; i < result.size(); i++) - { - result[i].x1 = MAX((result[i].x1 - offset[0]) * img_w / (input_w - 2 * offset[0]), 0); - result[i].y1 = MAX((result[i].y1 - offset[1]) * img_h / (input_h - 2 * offset[1]), 0); - result[i].x2 = MIN((result[i].x2 - offset[0]) * img_w / (input_w - 2 * offset[0]), img_w); - result[i].y2 = MIN((result[i].y2 - offset[1]) * img_h / (input_h - 2 * offset[1]), img_h); - } - - return result; -} - - -/** - * @brief Predict function - * @param image Input image - * @return Predict results -*/ -std::vector RTMDet::predict(cv::Mat& image) -{ - // get input image size - int img_w = image.cols; - int img_h = image.rows; - std::vector input = preprocess(image); - - // apply for GPU space - cudaMalloc(&buffer[0], 3 * input_h * input_w * sizeof(float)); - cudaMalloc(&buffer[1], 8400 * 6 * sizeof(float)); - - // copy data to GPU - cudaMemcpyAsync(buffer[0], input.data(), 3 * input_h * input_w * sizeof(float), cudaMemcpyHostToDevice, stream); - - // network inference - context->enqueueV2(buffer, stream, nullptr); - cudaStreamSynchronize(stream); - - // get result from GPU - std::vector boxes_result(8400 * 6); - cudaMemcpyAsync(boxes_result.data(), buffer[1], 8400 * 6 * sizeof(float), cudaMemcpyDeviceToHost); - - std::vector result = postprocess(boxes_result, img_w, img_h); - - cudaFree(buffer[0]); - cudaFree(buffer[1]); - - return result; -} +#include "rtmdet.h" + + +// set network params +float RTMDet::input_h = 640; +float RTMDet::input_w = 640; +float RTMDet::mean[3] = { 123.675, 116.28, 103.53 }; +float RTMDet::std[3] = { 58.395, 57.12, 57.375 }; + +/** + * @brief RTMDet`s constructor + * @param model_path RTMDet engine file path + * @param logger Nvinfer ILogger + * @param conf_thre The confidence threshold + * @param iou_thre The iou threshold of nms +*/ +RTMDet::RTMDet(std::string model_path, nvinfer1::ILogger& logger, float conf_thre, float iou_thre) : conf_thre(conf_thre), iou_thre(iou_thre) +{ + // read the engine file + std::ifstream engineStream(model_path, std::ios::binary); + engineStream.seekg(0, std::ios::end); + const size_t modelSize = engineStream.tellg(); + engineStream.seekg(0, std::ios::beg); + std::unique_ptr engineData(new char[modelSize]); + engineStream.read(engineData.get(), modelSize); + engineStream.close(); + + // create tensorrt model + runtime = nvinfer1::createInferRuntime(logger); + engine = runtime->deserializeCudaEngine(engineData.get(), modelSize); + context = engine->createExecutionContext(); + + // Define input dimensions + context->setBindingDimensions(0, nvinfer1::Dims4(1, 3, input_h, input_w)); + + // create CUDA stream + cudaStreamCreate(&stream); + + // Initialize offset + offset.push_back(0); + offset.push_back(0); +} + + +/** + * @brief RTMDet`s destructor +*/ +RTMDet::~RTMDet() +{ + cudaFree(stream); + cudaFree(buffer[0]); + cudaFree(buffer[1]); +} + + +/** + * @brief Display network input and output parameters +*/ +void RTMDet::show() +{ + for (int i = 0; i < engine->getNbBindings(); i++) + { + std::cout << "node: " << engine->getBindingName(i) << ", "; + if (engine->bindingIsInput(i)) + { + std::cout << "type: input" << ", "; + } + else + { + std::cout << "type: output" << ", "; + } + nvinfer1::Dims dim = engine->getBindingDimensions(i); + std::cout << "dimensions: "; + for (int d = 0; d < dim.nbDims; d++) + { + std::cout << dim.d[d] << " "; + } + std::cout << "\n"; + } +} + + +/** + * @brief Network preprocessing function + * @param image Input image + * @return Processed Tensor +*/ +std::vector RTMDet::preprocess(cv::Mat& image) +{ + // resize image + std::tuple resized = resize(image, input_w, input_h); + cv::Mat resized_image = std::get<0>(resized); + offset[0] = std::get<1>(resized); + offset[1] = std::get<2>(resized); + + // BGR2RGB + cv::cvtColor(resized_image, resized_image, cv::COLOR_BGR2RGB); + + // subtract mean and divide variance + std::vector input_tensor; + for (int k = 0; k < 3; k++) + { + for (int i = 0; i < resized_image.rows; i++) + { + for (int j = 0; j < resized_image.cols; j++) + { + input_tensor.emplace_back(((float)resized_image.at(i, j)[k] - mean[k]) / std[k]); + } + } + } + + return input_tensor; +} + + +/** + * @brief Network post-processing function + * @param boxes_result The result of rtmdet + * @param img_w The width of input image + * @param img_h The height of input image + * @return Detect boxes +*/ +std::vector RTMDet::postprocess(std::vector boxes_result, int img_w, int img_h) +{ + std::vector result; + std::vector buff; + for (int i = 0; i < 8400; i++) + { + // x1, y1, x2, y2, class, confidence + buff.insert(buff.end(), boxes_result.begin() + i * 6, boxes_result.begin() + i * 6 + 6); + // drop the box which confidence less than threshold + if (buff[5] < conf_thre) + { + buff.clear(); + continue; + } + + Box box; + box.x1 = buff[0]; + box.y1 = buff[1]; + box.x2 = buff[2]; + box.y2 = buff[3]; + box.cls = buff[4]; + box.conf = buff[5]; + result.emplace_back(box); + buff.clear(); + } + + // nms + result = non_maximum_suppression(result, iou_thre); + + // return the box to real image + for (int i = 0; i < result.size(); i++) + { + result[i].x1 = MAX((result[i].x1 - offset[0]) * img_w / (input_w - 2 * offset[0]), 0); + result[i].y1 = MAX((result[i].y1 - offset[1]) * img_h / (input_h - 2 * offset[1]), 0); + result[i].x2 = MIN((result[i].x2 - offset[0]) * img_w / (input_w - 2 * offset[0]), img_w); + result[i].y2 = MIN((result[i].y2 - offset[1]) * img_h / (input_h - 2 * offset[1]), img_h); + } + + return result; +} + + +/** + * @brief Predict function + * @param image Input image + * @return Predict results +*/ +std::vector RTMDet::predict(cv::Mat& image) +{ + // get input image size + int img_w = image.cols; + int img_h = image.rows; + std::vector input = preprocess(image); + + // apply for GPU space + cudaMalloc(&buffer[0], 3 * input_h * input_w * sizeof(float)); + cudaMalloc(&buffer[1], 8400 * 6 * sizeof(float)); + + // copy data to GPU + cudaMemcpyAsync(buffer[0], input.data(), 3 * input_h * input_w * sizeof(float), cudaMemcpyHostToDevice, stream); + + // network inference + context->enqueueV2(buffer, stream, nullptr); + cudaStreamSynchronize(stream); + + // get result from GPU + std::vector boxes_result(8400 * 6); + cudaMemcpyAsync(boxes_result.data(), buffer[1], 8400 * 6 * sizeof(float), cudaMemcpyDeviceToHost); + + std::vector result = postprocess(boxes_result, img_w, img_h); + + cudaFree(buffer[0]); + cudaFree(buffer[1]); + + return result; +} diff --git a/projects/rtmpose/examples/RTMPose-Deploy/Windows/TensorRT/src/RTMPoseTensorRT/rtmdet.h b/projects/rtmpose/examples/RTMPose-Deploy/Windows/TensorRT/src/RTMPoseTensorRT/rtmdet.h index 7a30a9d48e..c5112c8492 100644 --- a/projects/rtmpose/examples/RTMPose-Deploy/Windows/TensorRT/src/RTMPoseTensorRT/rtmdet.h +++ b/projects/rtmpose/examples/RTMPose-Deploy/Windows/TensorRT/src/RTMPoseTensorRT/rtmdet.h @@ -1,40 +1,40 @@ -#pragma once -#include -#include -#include -#include -#include -#include - -#include "utils.h" - - - -class RTMDet -{ -public: - RTMDet(std::string model_path, nvinfer1::ILogger& logger, float conf_thre=0.5, float iou_thre=0.65); - void show(); - std::vector predict(cv::Mat& image); - ~RTMDet(); - -private: - static float input_w; - static float input_h; - static float mean[3]; - static float std[3]; - - float conf_thre; - float iou_thre; - std::vector offset; - - nvinfer1::IRuntime* runtime; - nvinfer1::ICudaEngine* engine; - nvinfer1::IExecutionContext* context; - - void* buffer[2]; - cudaStream_t stream; - - std::vector preprocess(cv::Mat& image); - std::vector postprocess(std::vector boxes_result, int img_w, int img_h); -}; +#pragma once +#include +#include +#include +#include +#include +#include + +#include "utils.h" + + + +class RTMDet +{ +public: + RTMDet(std::string model_path, nvinfer1::ILogger& logger, float conf_thre=0.5, float iou_thre=0.65); + void show(); + std::vector predict(cv::Mat& image); + ~RTMDet(); + +private: + static float input_w; + static float input_h; + static float mean[3]; + static float std[3]; + + float conf_thre; + float iou_thre; + std::vector offset; + + nvinfer1::IRuntime* runtime; + nvinfer1::ICudaEngine* engine; + nvinfer1::IExecutionContext* context; + + void* buffer[2]; + cudaStream_t stream; + + std::vector preprocess(cv::Mat& image); + std::vector postprocess(std::vector boxes_result, int img_w, int img_h); +}; diff --git a/projects/rtmpose/examples/RTMPose-Deploy/Windows/TensorRT/src/RTMPoseTensorRT/rtmpose.cpp b/projects/rtmpose/examples/RTMPose-Deploy/Windows/TensorRT/src/RTMPoseTensorRT/rtmpose.cpp index 1a190ceda2..cf2ed25d88 100644 --- a/projects/rtmpose/examples/RTMPose-Deploy/Windows/TensorRT/src/RTMPoseTensorRT/rtmpose.cpp +++ b/projects/rtmpose/examples/RTMPose-Deploy/Windows/TensorRT/src/RTMPoseTensorRT/rtmpose.cpp @@ -1,193 +1,193 @@ -#include "rtmpose.h" - - -// set network params -float RTMPose::input_h = 256; -float RTMPose::input_w = 192; -int RTMPose::extend_width = 384; -int RTMPose::extend_height = 512; -int RTMPose::num_points = 17; -float RTMPose::mean[3] = { 123.675, 116.28, 103.53 }; -float RTMPose::std[3] = { 58.395, 57.12, 57.375 }; - -/** - * @brief RTMPose`s constructor - * @param model_path RTMPose engine file path - * @param logger Nvinfer ILogger -*/ -RTMPose::RTMPose(std::string model_path, nvinfer1::ILogger& logger) -{ - // read the engine file - std::ifstream engineStream(model_path, std::ios::binary); - engineStream.seekg(0, std::ios::end); - const size_t modelSize = engineStream.tellg(); - engineStream.seekg(0, std::ios::beg); - std::unique_ptr engineData(new char[modelSize]); - engineStream.read(engineData.get(), modelSize); - engineStream.close(); - - // create tensorrt model - runtime = nvinfer1::createInferRuntime(logger); - engine = runtime->deserializeCudaEngine(engineData.get(), modelSize); - context = engine->createExecutionContext(); - - // Define input dimensions - context->setBindingDimensions(0, nvinfer1::Dims4(1, 3, input_h, input_w)); - - // create CUDA stream - cudaStreamCreate(&stream); - - // Initialize offset - offset.push_back(0); - offset.push_back(0); -} - -/** - * @brief RTMPose`s destructor -*/ -RTMPose::~RTMPose() -{ - cudaFree(stream); - cudaFree(buffer[0]); - cudaFree(buffer[1]); - cudaFree(buffer[2]); -} - - -/** - * @brief Display network input and output parameters -*/ -void RTMPose::show() -{ - for (int i = 0; i < engine->getNbBindings(); i++) - { - std::cout << "node: " << engine->getBindingName(i) << ", "; - if (engine->bindingIsInput(i)) - { - std::cout << "type: input" << ", "; - } - else - { - std::cout << "type: output" << ", "; - } - nvinfer1::Dims dim = engine->getBindingDimensions(i); - std::cout << "dimensions: "; - for (int d = 0; d < dim.nbDims; d++) - { - std::cout << dim.d[d] << " "; - } - std::cout << "\n"; - } -} - - -/** - * @brief Network preprocessing function - * @param image Input image - * @return Processed Tensor -*/ -std::vector RTMPose::preprocess(cv::Mat& image) -{ - // resize image - std::tuple resized = resize(image, input_w, input_h); - cv::Mat resized_image = std::get<0>(resized); - offset[0] = std::get<1>(resized); - offset[1] = std::get<2>(resized); - - // BGR2RGB - cv::cvtColor(resized_image, resized_image, cv::COLOR_BGR2RGB); - - // subtract mean and divide variance - std::vector input_tensor; - for (int k = 0; k < 3; k++) - { - for (int i = 0; i < resized_image.rows; i++) - { - for (int j = 0; j < resized_image.cols; j++) - { - input_tensor.emplace_back(((float)resized_image.at(i, j)[k] - mean[k]) / std[k]); - } - } - } - - return input_tensor; -} - - -/** - * @brief Network post-processing function - * @param simcc_x_result SimCC x dimension output - * @param simcc_y_result SimCC y dimension output - * @param img_w The width of input image - * @param img_h The height of input image - * @return -*/ -std::vector RTMPose::postprocess(std::vector simcc_x_result, std::vector simcc_y_result, int img_w, int img_h) -{ - std::vector pose_result; - for (int i = 0; i < num_points; ++i) - { - // find the maximum and maximum indexes in the value of each Extend_width length - auto x_biggest_iter = std::max_element(simcc_x_result.begin() + i * extend_width, simcc_x_result.begin() + i * extend_width + extend_width); - int max_x_pos = std::distance(simcc_x_result.begin() + i * extend_width, x_biggest_iter); - int pose_x = max_x_pos / 2; - float score_x = *x_biggest_iter; - - // find the maximum and maximum indexes in the value of each exten_height length - auto y_biggest_iter = std::max_element(simcc_y_result.begin() + i * extend_height, simcc_y_result.begin() + i * extend_height + extend_height); - int max_y_pos = std::distance(simcc_y_result.begin() + i * extend_height, y_biggest_iter); - int pose_y = max_y_pos / 2; - float score_y = *y_biggest_iter; - - // get point confidence - float score = MAX(score_x, score_y); - - PosePoint temp_point; - temp_point.x = (pose_x - offset[0]) * img_w / (input_w - 2 * offset[0]); - temp_point.y = (pose_y - offset[1]) * img_h / (input_h - 2 * offset[1]); - temp_point.score = score; - pose_result.emplace_back(temp_point); - } - - return pose_result; -} - - -/** - * @brief Predict function - * @param image Input image - * @return Predict results -*/ -std::vector RTMPose::predict(cv::Mat& image) -{ - // get input image size - int img_w = image.cols; - int img_h = image.rows; - std::vector input = preprocess(image); - - // apply for GPU space - cudaMalloc(&buffer[0], 3 * input_h * input_w * sizeof(float)); - cudaMalloc(&buffer[1], num_points * extend_width * sizeof(float)); - cudaMalloc(&buffer[2], num_points * extend_height * sizeof(float)); - - // copy data to GPU - cudaMemcpyAsync(buffer[0], input.data(), 3 * input_h * input_w * sizeof(float), cudaMemcpyHostToDevice, stream); - - // network inference - context->enqueueV2(buffer, stream, nullptr); - cudaStreamSynchronize(stream); - - // get result from GPU - std::vector simcc_x_result(num_points * extend_width); - std::vector simcc_y_result(num_points * extend_height); - cudaMemcpyAsync(simcc_x_result.data(), buffer[1], num_points * extend_width * sizeof(float), cudaMemcpyDeviceToHost); - cudaMemcpyAsync(simcc_y_result.data(), buffer[2], num_points * extend_height * sizeof(float), cudaMemcpyDeviceToHost); - - std::vector pose_result = postprocess(simcc_x_result, simcc_y_result, img_w, img_h); - - cudaFree(buffer[0]); - cudaFree(buffer[1]); - cudaFree(buffer[2]); - - return pose_result; -} +#include "rtmpose.h" + + +// set network params +float RTMPose::input_h = 256; +float RTMPose::input_w = 192; +int RTMPose::extend_width = 384; +int RTMPose::extend_height = 512; +int RTMPose::num_points = 17; +float RTMPose::mean[3] = { 123.675, 116.28, 103.53 }; +float RTMPose::std[3] = { 58.395, 57.12, 57.375 }; + +/** + * @brief RTMPose`s constructor + * @param model_path RTMPose engine file path + * @param logger Nvinfer ILogger +*/ +RTMPose::RTMPose(std::string model_path, nvinfer1::ILogger& logger) +{ + // read the engine file + std::ifstream engineStream(model_path, std::ios::binary); + engineStream.seekg(0, std::ios::end); + const size_t modelSize = engineStream.tellg(); + engineStream.seekg(0, std::ios::beg); + std::unique_ptr engineData(new char[modelSize]); + engineStream.read(engineData.get(), modelSize); + engineStream.close(); + + // create tensorrt model + runtime = nvinfer1::createInferRuntime(logger); + engine = runtime->deserializeCudaEngine(engineData.get(), modelSize); + context = engine->createExecutionContext(); + + // Define input dimensions + context->setBindingDimensions(0, nvinfer1::Dims4(1, 3, input_h, input_w)); + + // create CUDA stream + cudaStreamCreate(&stream); + + // Initialize offset + offset.push_back(0); + offset.push_back(0); +} + +/** + * @brief RTMPose`s destructor +*/ +RTMPose::~RTMPose() +{ + cudaFree(stream); + cudaFree(buffer[0]); + cudaFree(buffer[1]); + cudaFree(buffer[2]); +} + + +/** + * @brief Display network input and output parameters +*/ +void RTMPose::show() +{ + for (int i = 0; i < engine->getNbBindings(); i++) + { + std::cout << "node: " << engine->getBindingName(i) << ", "; + if (engine->bindingIsInput(i)) + { + std::cout << "type: input" << ", "; + } + else + { + std::cout << "type: output" << ", "; + } + nvinfer1::Dims dim = engine->getBindingDimensions(i); + std::cout << "dimensions: "; + for (int d = 0; d < dim.nbDims; d++) + { + std::cout << dim.d[d] << " "; + } + std::cout << "\n"; + } +} + + +/** + * @brief Network preprocessing function + * @param image Input image + * @return Processed Tensor +*/ +std::vector RTMPose::preprocess(cv::Mat& image) +{ + // resize image + std::tuple resized = resize(image, input_w, input_h); + cv::Mat resized_image = std::get<0>(resized); + offset[0] = std::get<1>(resized); + offset[1] = std::get<2>(resized); + + // BGR2RGB + cv::cvtColor(resized_image, resized_image, cv::COLOR_BGR2RGB); + + // subtract mean and divide variance + std::vector input_tensor; + for (int k = 0; k < 3; k++) + { + for (int i = 0; i < resized_image.rows; i++) + { + for (int j = 0; j < resized_image.cols; j++) + { + input_tensor.emplace_back(((float)resized_image.at(i, j)[k] - mean[k]) / std[k]); + } + } + } + + return input_tensor; +} + + +/** + * @brief Network post-processing function + * @param simcc_x_result SimCC x dimension output + * @param simcc_y_result SimCC y dimension output + * @param img_w The width of input image + * @param img_h The height of input image + * @return +*/ +std::vector RTMPose::postprocess(std::vector simcc_x_result, std::vector simcc_y_result, int img_w, int img_h) +{ + std::vector pose_result; + for (int i = 0; i < num_points; ++i) + { + // find the maximum and maximum indexes in the value of each Extend_width length + auto x_biggest_iter = std::max_element(simcc_x_result.begin() + i * extend_width, simcc_x_result.begin() + i * extend_width + extend_width); + int max_x_pos = std::distance(simcc_x_result.begin() + i * extend_width, x_biggest_iter); + int pose_x = max_x_pos / 2; + float score_x = *x_biggest_iter; + + // find the maximum and maximum indexes in the value of each exten_height length + auto y_biggest_iter = std::max_element(simcc_y_result.begin() + i * extend_height, simcc_y_result.begin() + i * extend_height + extend_height); + int max_y_pos = std::distance(simcc_y_result.begin() + i * extend_height, y_biggest_iter); + int pose_y = max_y_pos / 2; + float score_y = *y_biggest_iter; + + // get point confidence + float score = MAX(score_x, score_y); + + PosePoint temp_point; + temp_point.x = (pose_x - offset[0]) * img_w / (input_w - 2 * offset[0]); + temp_point.y = (pose_y - offset[1]) * img_h / (input_h - 2 * offset[1]); + temp_point.score = score; + pose_result.emplace_back(temp_point); + } + + return pose_result; +} + + +/** + * @brief Predict function + * @param image Input image + * @return Predict results +*/ +std::vector RTMPose::predict(cv::Mat& image) +{ + // get input image size + int img_w = image.cols; + int img_h = image.rows; + std::vector input = preprocess(image); + + // apply for GPU space + cudaMalloc(&buffer[0], 3 * input_h * input_w * sizeof(float)); + cudaMalloc(&buffer[1], num_points * extend_width * sizeof(float)); + cudaMalloc(&buffer[2], num_points * extend_height * sizeof(float)); + + // copy data to GPU + cudaMemcpyAsync(buffer[0], input.data(), 3 * input_h * input_w * sizeof(float), cudaMemcpyHostToDevice, stream); + + // network inference + context->enqueueV2(buffer, stream, nullptr); + cudaStreamSynchronize(stream); + + // get result from GPU + std::vector simcc_x_result(num_points * extend_width); + std::vector simcc_y_result(num_points * extend_height); + cudaMemcpyAsync(simcc_x_result.data(), buffer[1], num_points * extend_width * sizeof(float), cudaMemcpyDeviceToHost); + cudaMemcpyAsync(simcc_y_result.data(), buffer[2], num_points * extend_height * sizeof(float), cudaMemcpyDeviceToHost); + + std::vector pose_result = postprocess(simcc_x_result, simcc_y_result, img_w, img_h); + + cudaFree(buffer[0]); + cudaFree(buffer[1]); + cudaFree(buffer[2]); + + return pose_result; +} diff --git a/projects/rtmpose/examples/RTMPose-Deploy/Windows/TensorRT/src/RTMPoseTensorRT/rtmpose.h b/projects/rtmpose/examples/RTMPose-Deploy/Windows/TensorRT/src/RTMPoseTensorRT/rtmpose.h index 0b1bca4924..2909baf3de 100644 --- a/projects/rtmpose/examples/RTMPose-Deploy/Windows/TensorRT/src/RTMPoseTensorRT/rtmpose.h +++ b/projects/rtmpose/examples/RTMPose-Deploy/Windows/TensorRT/src/RTMPoseTensorRT/rtmpose.h @@ -1,43 +1,43 @@ -#pragma once -#include -#include -#include -#include -#include -#include -#include -#include - -#include "utils.h" - - - -class RTMPose -{ -public: - RTMPose(std::string model_path, nvinfer1::ILogger &logger); - void show(); - std::vector predict(cv::Mat& image); - ~RTMPose(); - -private: - static float input_w; - static float input_h; - static int extend_width; - static int extend_height; - static float mean[3]; - static float std[3]; - static int num_points; - - std::vector offset; - - nvinfer1::IRuntime* runtime; - nvinfer1::ICudaEngine* engine; - nvinfer1::IExecutionContext* context; - - void* buffer[3]; - cudaStream_t stream; - - std::vector preprocess(cv::Mat& image); - std::vector postprocess(std::vector simcc_x_result, std::vector simcc_y_result, int img_w, int img_h); -}; +#pragma once +#include +#include +#include +#include +#include +#include +#include +#include + +#include "utils.h" + + + +class RTMPose +{ +public: + RTMPose(std::string model_path, nvinfer1::ILogger &logger); + void show(); + std::vector predict(cv::Mat& image); + ~RTMPose(); + +private: + static float input_w; + static float input_h; + static int extend_width; + static int extend_height; + static float mean[3]; + static float std[3]; + static int num_points; + + std::vector offset; + + nvinfer1::IRuntime* runtime; + nvinfer1::ICudaEngine* engine; + nvinfer1::IExecutionContext* context; + + void* buffer[3]; + cudaStream_t stream; + + std::vector preprocess(cv::Mat& image); + std::vector postprocess(std::vector simcc_x_result, std::vector simcc_y_result, int img_w, int img_h); +}; diff --git a/projects/rtmpose/examples/RTMPose-Deploy/Windows/TensorRT/src/RTMPoseTensorRT/utils.cpp b/projects/rtmpose/examples/RTMPose-Deploy/Windows/TensorRT/src/RTMPoseTensorRT/utils.cpp index 053b9e5a58..f084fc6db6 100644 --- a/projects/rtmpose/examples/RTMPose-Deploy/Windows/TensorRT/src/RTMPoseTensorRT/utils.cpp +++ b/projects/rtmpose/examples/RTMPose-Deploy/Windows/TensorRT/src/RTMPoseTensorRT/utils.cpp @@ -1,212 +1,212 @@ -#include "utils.h" - - -// set points links -std::vector> coco_17_joint_links = { - {0,1},{0,2},{1,3},{2,4},{5,7},{7,9},{6,8},{8,10},{5,6}, - {5,11},{6,12},{11,12},{11,13},{13,15},{12,14},{14,16} -}; - - -/** - * @brief Mix two images - * @param srcImage Original image - * @param mixImage Past image - * @param startPoint Start point - * @return Success or not -*/ -bool MixImage(cv::Mat& srcImage, cv::Mat mixImage, cv::Point startPoint) -{ - - if (!srcImage.data || !mixImage.data) - { - return false; - } - - int addCols = startPoint.x + mixImage.cols > srcImage.cols ? 0 : mixImage.cols; - int addRows = startPoint.y + mixImage.rows > srcImage.rows ? 0 : mixImage.rows; - if (addCols == 0 || addRows == 0) - { - return false; - } - - cv::Mat roiImage = srcImage(cv::Rect(startPoint.x, startPoint.y, addCols, addRows)); - - mixImage.copyTo(roiImage, mixImage); - return true; -} - - -/** - * @brief Resize image - * @param img Input image - * @param w Resized width - * @param h Resized height - * @return Resized image and offset -*/ -std::tuple resize(cv::Mat& img, int w, int h) -{ - cv::Mat result; - - int ih = img.rows; - int iw = img.cols; - - float scale = MIN(float(w) / float(iw), float(h) / float(ih)); - int nw = iw * scale; - int nh = ih * scale; - - cv::resize(img, img, cv::Size(nw, nh)); - result = cv::Mat::ones(cv::Size(w, h), CV_8UC1) * 128; - cv::cvtColor(result, result, cv::COLOR_GRAY2RGB); - cv::cvtColor(img, img, cv::COLOR_BGR2RGB); - - bool ifg = MixImage(result, img, cv::Point((w - nw) / 2, (h - nh) / 2)); - if (!ifg) - { - std::cerr << "MixImage failed" << std::endl; - abort(); - } - - std::tuple res_tuple = std::make_tuple(result, (w - nw) / 2, (h - nh) / 2); - - return res_tuple; -} - - -/** - * @brief Compare two boxes - * @param b1 Box1 - * @param b2 Box2 - * @return Compare result -*/ -bool compare_boxes(const Box& b1, const Box& b2) -{ - return b1.conf < b2.conf; -} - - -/** - * @brief Iou function - * @param b1 Box1 - * @param b2 Box2 - * @return Iou -*/ -float intersection_over_union(const Box& b1, const Box& b2) -{ - float x1 = std::max(b1.x1, b2.x1); - float y1 = std::max(b1.y1, b2.y1); - float x2 = std::min(b1.x2, b2.x2); - float y2 = std::min(b1.y2, b2.y2); - - // get intersection - float box_intersection = std::max((float)0, x2 - x1) * std::max((float)0, y2 - y1); - - // get union - float area1 = (b1.x2 - b1.x1) * (b1.y2 - b1.y1); - float area2 = (b2.x2 - b2.x1) * (b2.y2 - b2.y1); - float box_union = area1 + area2 - box_intersection; - - // To prevent the denominator from being zero, add a very small numerical value to the denominator - float iou = box_intersection / (box_union + 0.0001); - - return iou; -} - - -/** - * @brief Non-Maximum Suppression function - * @param boxes Input boxes - * @param iou_thre Iou threshold - * @return Boxes after nms -*/ -std::vector non_maximum_suppression(std::vector boxes, float iou_thre) -{ - // Sort boxes based on confidence - std::sort(boxes.begin(), boxes.end(), compare_boxes); - - std::vector result; - std::vector temp; - while (!boxes.empty()) - { - temp.clear(); - - Box chosen_box = boxes.back(); - boxes.pop_back(); - for (int i = 0; i < boxes.size(); i++) - { - if (boxes[i].cls != chosen_box.cls || intersection_over_union(boxes[i], chosen_box) < iou_thre) - temp.push_back(boxes[i]); - } - - boxes = temp; - result.push_back(chosen_box); - } - return result; -} - - -/** - * @brief Cut image - * @param image Input image - * @param x1 The left coordinate of cut box - * @param y1 The top coordinate of cut box - * @param x2 The right coordinate of cut box - * @param y2 The bottom coordinate of cut box - * @return Cut image -*/ -cv::Mat img_cut(cv::Mat& image, int x1, int y1, int x2, int y2) -{ - cv::Rect roi(x1, y1, x2 - x1, y2 - y1); - cv::Mat croppedImage = image(roi); - return croppedImage; -} - - -/** - * @brief Judge whether two floating point numbers are equal - * @param a Number a - * @param b Number b - * @return Result -*/ -bool isEqual(float a, float b) -{ - return std::fabs(a - b) < 1e-5; -} - - -/** - * @brief Draw detection result to image - * @param image Input image - * @param points Detection result -*/ -void draw_pose(cv::Mat& image, std::vector> points) -{ - for (int p = 0; p < points.size(); p++) - { - // draw points links - for (int i = 0; i < coco_17_joint_links.size(); i++) - { - std::pair joint_link = coco_17_joint_links[i]; - cv::line( - image, - cv::Point(points[p][joint_link.first].x, points[p][joint_link.first].y), - cv::Point(points[p][joint_link.second].x, points[p][joint_link.second].y), - cv::Scalar{ 0, 255, 0 }, - 2, - cv::LINE_AA - ); - } - //draw points - for (int i = 0; i < points[p].size(); i++) - { - cv::circle( - image, - cv::Point(points[p][i].x, points[p][i].y), - 1, - cv::Scalar{ 0, 0, 255 }, - 5, - cv::LINE_AA - ); - } - } -} +#include "utils.h" + + +// set points links +std::vector> coco_17_joint_links = { + {0,1},{0,2},{1,3},{2,4},{5,7},{7,9},{6,8},{8,10},{5,6}, + {5,11},{6,12},{11,12},{11,13},{13,15},{12,14},{14,16} +}; + + +/** + * @brief Mix two images + * @param srcImage Original image + * @param mixImage Past image + * @param startPoint Start point + * @return Success or not +*/ +bool MixImage(cv::Mat& srcImage, cv::Mat mixImage, cv::Point startPoint) +{ + + if (!srcImage.data || !mixImage.data) + { + return false; + } + + int addCols = startPoint.x + mixImage.cols > srcImage.cols ? 0 : mixImage.cols; + int addRows = startPoint.y + mixImage.rows > srcImage.rows ? 0 : mixImage.rows; + if (addCols == 0 || addRows == 0) + { + return false; + } + + cv::Mat roiImage = srcImage(cv::Rect(startPoint.x, startPoint.y, addCols, addRows)); + + mixImage.copyTo(roiImage, mixImage); + return true; +} + + +/** + * @brief Resize image + * @param img Input image + * @param w Resized width + * @param h Resized height + * @return Resized image and offset +*/ +std::tuple resize(cv::Mat& img, int w, int h) +{ + cv::Mat result; + + int ih = img.rows; + int iw = img.cols; + + float scale = MIN(float(w) / float(iw), float(h) / float(ih)); + int nw = iw * scale; + int nh = ih * scale; + + cv::resize(img, img, cv::Size(nw, nh)); + result = cv::Mat::ones(cv::Size(w, h), CV_8UC1) * 128; + cv::cvtColor(result, result, cv::COLOR_GRAY2RGB); + cv::cvtColor(img, img, cv::COLOR_BGR2RGB); + + bool ifg = MixImage(result, img, cv::Point((w - nw) / 2, (h - nh) / 2)); + if (!ifg) + { + std::cerr << "MixImage failed" << std::endl; + abort(); + } + + std::tuple res_tuple = std::make_tuple(result, (w - nw) / 2, (h - nh) / 2); + + return res_tuple; +} + + +/** + * @brief Compare two boxes + * @param b1 Box1 + * @param b2 Box2 + * @return Compare result +*/ +bool compare_boxes(const Box& b1, const Box& b2) +{ + return b1.conf < b2.conf; +} + + +/** + * @brief Iou function + * @param b1 Box1 + * @param b2 Box2 + * @return Iou +*/ +float intersection_over_union(const Box& b1, const Box& b2) +{ + float x1 = std::max(b1.x1, b2.x1); + float y1 = std::max(b1.y1, b2.y1); + float x2 = std::min(b1.x2, b2.x2); + float y2 = std::min(b1.y2, b2.y2); + + // get intersection + float box_intersection = std::max((float)0, x2 - x1) * std::max((float)0, y2 - y1); + + // get union + float area1 = (b1.x2 - b1.x1) * (b1.y2 - b1.y1); + float area2 = (b2.x2 - b2.x1) * (b2.y2 - b2.y1); + float box_union = area1 + area2 - box_intersection; + + // To prevent the denominator from being zero, add a very small numerical value to the denominator + float iou = box_intersection / (box_union + 0.0001); + + return iou; +} + + +/** + * @brief Non-Maximum Suppression function + * @param boxes Input boxes + * @param iou_thre Iou threshold + * @return Boxes after nms +*/ +std::vector non_maximum_suppression(std::vector boxes, float iou_thre) +{ + // Sort boxes based on confidence + std::sort(boxes.begin(), boxes.end(), compare_boxes); + + std::vector result; + std::vector temp; + while (!boxes.empty()) + { + temp.clear(); + + Box chosen_box = boxes.back(); + boxes.pop_back(); + for (int i = 0; i < boxes.size(); i++) + { + if (boxes[i].cls != chosen_box.cls || intersection_over_union(boxes[i], chosen_box) < iou_thre) + temp.push_back(boxes[i]); + } + + boxes = temp; + result.push_back(chosen_box); + } + return result; +} + + +/** + * @brief Cut image + * @param image Input image + * @param x1 The left coordinate of cut box + * @param y1 The top coordinate of cut box + * @param x2 The right coordinate of cut box + * @param y2 The bottom coordinate of cut box + * @return Cut image +*/ +cv::Mat img_cut(cv::Mat& image, int x1, int y1, int x2, int y2) +{ + cv::Rect roi(x1, y1, x2 - x1, y2 - y1); + cv::Mat croppedImage = image(roi); + return croppedImage; +} + + +/** + * @brief Judge whether two floating point numbers are equal + * @param a Number a + * @param b Number b + * @return Result +*/ +bool isEqual(float a, float b) +{ + return std::fabs(a - b) < 1e-5; +} + + +/** + * @brief Draw detection result to image + * @param image Input image + * @param points Detection result +*/ +void draw_pose(cv::Mat& image, std::vector> points) +{ + for (int p = 0; p < points.size(); p++) + { + // draw points links + for (int i = 0; i < coco_17_joint_links.size(); i++) + { + std::pair joint_link = coco_17_joint_links[i]; + cv::line( + image, + cv::Point(points[p][joint_link.first].x, points[p][joint_link.first].y), + cv::Point(points[p][joint_link.second].x, points[p][joint_link.second].y), + cv::Scalar{ 0, 255, 0 }, + 2, + cv::LINE_AA + ); + } + //draw points + for (int i = 0; i < points[p].size(); i++) + { + cv::circle( + image, + cv::Point(points[p][i].x, points[p][i].y), + 1, + cv::Scalar{ 0, 0, 255 }, + 5, + cv::LINE_AA + ); + } + } +} diff --git a/projects/rtmpose/examples/RTMPose-Deploy/Windows/TensorRT/src/RTMPoseTensorRT/utils.h b/projects/rtmpose/examples/RTMPose-Deploy/Windows/TensorRT/src/RTMPoseTensorRT/utils.h index fa165c03ec..21a875e8a6 100644 --- a/projects/rtmpose/examples/RTMPose-Deploy/Windows/TensorRT/src/RTMPoseTensorRT/utils.h +++ b/projects/rtmpose/examples/RTMPose-Deploy/Windows/TensorRT/src/RTMPoseTensorRT/utils.h @@ -1,56 +1,56 @@ -#pragma once -#include -#include -#include -#include -#include - - -/** - * @brief Key point structure -*/ -struct PosePoint -{ - int x; - int y; - float score; - - PosePoint() - { - x = 0; - y = 0; - score = 0.0; - } -}; - -/** - * @brief Detection box structure -*/ -struct Box -{ - float x1; - float y1; - float x2; - float y2; - int cls; - float conf; - - Box() - { - x1 = 0; - y1 = 0; - x2 = 0; - y2 = 0; - cls = 0; - conf = 0; - } -}; - -bool MixImage(cv::Mat& srcImage, cv::Mat mixImage, cv::Point startPoint); -std::tuple resize(cv::Mat& img, int w, int h); -bool compare_boxes(const Box& b1, const Box& b2); -float intersection_over_union(const Box& b1, const Box& b2); -std::vector non_maximum_suppression(std::vector boxes, float iou_thre); -cv::Mat img_cut(cv::Mat& image, int x1, int y1, int x2, int y2); -bool isEqual(float a, float b); -void draw_pose(cv::Mat& image, std::vector> points); +#pragma once +#include +#include +#include +#include +#include + + +/** + * @brief Key point structure +*/ +struct PosePoint +{ + int x; + int y; + float score; + + PosePoint() + { + x = 0; + y = 0; + score = 0.0; + } +}; + +/** + * @brief Detection box structure +*/ +struct Box +{ + float x1; + float y1; + float x2; + float y2; + int cls; + float conf; + + Box() + { + x1 = 0; + y1 = 0; + x2 = 0; + y2 = 0; + cls = 0; + conf = 0; + } +}; + +bool MixImage(cv::Mat& srcImage, cv::Mat mixImage, cv::Point startPoint); +std::tuple resize(cv::Mat& img, int w, int h); +bool compare_boxes(const Box& b1, const Box& b2); +float intersection_over_union(const Box& b1, const Box& b2); +std::vector non_maximum_suppression(std::vector boxes, float iou_thre); +cv::Mat img_cut(cv::Mat& image, int x1, int y1, int x2, int y2); +bool isEqual(float a, float b); +void draw_pose(cv::Mat& image, std::vector> points); diff --git a/projects/rtmpose/examples/onnxruntime/README.md b/projects/rtmpose/examples/onnxruntime/README.md index 0e0f8b7f63..2b6ff29963 100644 --- a/projects/rtmpose/examples/onnxruntime/README.md +++ b/projects/rtmpose/examples/onnxruntime/README.md @@ -1,87 +1,87 @@ -# RTMPose inference with ONNXRuntime - -This example shows how to run RTMPose inference with ONNXRuntime in Python. - -## Prerequisites - -### 1. Install onnxruntime inference engine. - -Choose one of the following ways to install onnxruntime. - -- CPU version - -```bash -wget https://github.com/microsoft/onnxruntime/releases/download/v1.8.1/onnxruntime-linux-x64-1.8.1.tgz -tar -zxvf onnxruntime-linux-x64-1.8.1.tgz -export ONNXRUNTIME_DIR=$(pwd)/onnxruntime-linux-x64-1.8.1 -export LD_LIBRARY_PATH=$ONNXRUNTIME_DIR/lib:$LD_LIBRARY_PATH -``` - -- GPU version - -```bash -pip install onnxruntime-gpu==1.8.1 -wget https://github.com/microsoft/onnxruntime/releases/download/v1.8.1/onnxruntime-linux-x64-gpu-1.8.1.tgz -tar -zxvf onnxruntime-linux-x64-gpu-1.8.1.tgz -export ONNXRUNTIME_DIR=$(pwd)/onnxruntime-linux-x64-gpu-1.8.1 -export LD_LIBRARY_PATH=$ONNXRUNTIME_DIR/lib:$LD_LIBRARY_PATH -``` - -### 2. Convert model to onnx files - -- Install `mim` tool. - -```bash -pip install -U openmim -``` - -- Download `mmpose` model. - -```bash -# choose one rtmpose model -mim download mmpose --config rtmpose-m_8xb64-270e_coco-wholebody-256x192 --dest . -``` - -- Clone `mmdeploy` repo. - -```bash -git clone https://github.com/open-mmlab/mmdeploy.git -``` - -- Convert model to onnx files. - -```bash -python mmdeploy/tools/deploy.py \ - mmdeploy/configs/mmpose/pose-detection_simcc_onnxruntime_dynamic.py \ - mmpose/rtmpose-m_8xb64-270e_coco-wholebody-256x192.py \ - mmpose/rtmpose-m_simcc-coco-wholebody_pt-aic-coco_270e-256x192-cd5e845c_20230123.pth \ - mmdeploy/demo/resources/human-pose.jpg \ - --work-dir mmdeploy_model/mmpose/ort \ - --device cuda \ - --dump-info -``` - -## Run demo - -### Install dependencies - -```bash -pip install -r requirements.txt -``` - -### Usage: - -```bash -python main.py \ - {ONNX_FILE} \ - {IMAGE_FILE} \ - --device {DEVICE} \ - --save-path {SAVE_PATH} -``` - -### Description of all arguments - -- `ONNX_FILE`: The path of onnx file -- `IMAGE_FILE`: The path of image file -- `DEVICE`: The device to run the model, default is `cpu` -- `SAVE_PATH`: The path to save the output image, default is `output.jpg` +# RTMPose inference with ONNXRuntime + +This example shows how to run RTMPose inference with ONNXRuntime in Python. + +## Prerequisites + +### 1. Install onnxruntime inference engine. + +Choose one of the following ways to install onnxruntime. + +- CPU version + +```bash +wget https://github.com/microsoft/onnxruntime/releases/download/v1.8.1/onnxruntime-linux-x64-1.8.1.tgz +tar -zxvf onnxruntime-linux-x64-1.8.1.tgz +export ONNXRUNTIME_DIR=$(pwd)/onnxruntime-linux-x64-1.8.1 +export LD_LIBRARY_PATH=$ONNXRUNTIME_DIR/lib:$LD_LIBRARY_PATH +``` + +- GPU version + +```bash +pip install onnxruntime-gpu==1.8.1 +wget https://github.com/microsoft/onnxruntime/releases/download/v1.8.1/onnxruntime-linux-x64-gpu-1.8.1.tgz +tar -zxvf onnxruntime-linux-x64-gpu-1.8.1.tgz +export ONNXRUNTIME_DIR=$(pwd)/onnxruntime-linux-x64-gpu-1.8.1 +export LD_LIBRARY_PATH=$ONNXRUNTIME_DIR/lib:$LD_LIBRARY_PATH +``` + +### 2. Convert model to onnx files + +- Install `mim` tool. + +```bash +pip install -U openmim +``` + +- Download `mmpose` model. + +```bash +# choose one rtmpose model +mim download mmpose --config rtmpose-m_8xb64-270e_coco-wholebody-256x192 --dest . +``` + +- Clone `mmdeploy` repo. + +```bash +git clone https://github.com/open-mmlab/mmdeploy.git +``` + +- Convert model to onnx files. + +```bash +python mmdeploy/tools/deploy.py \ + mmdeploy/configs/mmpose/pose-detection_simcc_onnxruntime_dynamic.py \ + mmpose/rtmpose-m_8xb64-270e_coco-wholebody-256x192.py \ + mmpose/rtmpose-m_simcc-coco-wholebody_pt-aic-coco_270e-256x192-cd5e845c_20230123.pth \ + mmdeploy/demo/resources/human-pose.jpg \ + --work-dir mmdeploy_model/mmpose/ort \ + --device cuda \ + --dump-info +``` + +## Run demo + +### Install dependencies + +```bash +pip install -r requirements.txt +``` + +### Usage: + +```bash +python main.py \ + {ONNX_FILE} \ + {IMAGE_FILE} \ + --device {DEVICE} \ + --save-path {SAVE_PATH} +``` + +### Description of all arguments + +- `ONNX_FILE`: The path of onnx file +- `IMAGE_FILE`: The path of image file +- `DEVICE`: The device to run the model, default is `cpu` +- `SAVE_PATH`: The path to save the output image, default is `output.jpg` diff --git a/projects/rtmpose/examples/onnxruntime/README_CN.md b/projects/rtmpose/examples/onnxruntime/README_CN.md index 684d42d5ff..42192f3783 100644 --- a/projects/rtmpose/examples/onnxruntime/README_CN.md +++ b/projects/rtmpose/examples/onnxruntime/README_CN.md @@ -1,87 +1,87 @@ -# 使用ONNXRuntime进行RTMPose推理 - -本示例展示了如何在Python中用ONNXRuntime推理RTMPose模型。 - -## 准备 - -### 1. 安装onnxruntime推理引擎. - -选择以下方式之一来安装onnxruntime。 - -- CPU版本 - -```bash -wget https://github.com/microsoft/onnxruntime/releases/download/v1.8.1/onnxruntime-linux-x64-1.8.1.tgz -tar -zxvf onnxruntime-linux-x64-1.8.1.tgz -export ONNXRUNTIME_DIR=$(pwd)/onnxruntime-linux-x64-1.8.1 -export LD_LIBRARY_PATH=$ONNXRUNTIME_DIR/lib:$LD_LIBRARY_PATH -``` - -- GPU版本 - -```bash -pip install onnxruntime-gpu==1.8.1 -wget https://github.com/microsoft/onnxruntime/releases/download/v1.8.1/onnxruntime-linux-x64-gpu-1.8.1.tgz -tar -zxvf onnxruntime-linux-x64-gpu-1.8.1.tgz -export ONNXRUNTIME_DIR=$(pwd)/onnxruntime-linux-x64-gpu-1.8.1 -export LD_LIBRARY_PATH=$ONNXRUNTIME_DIR/lib:$LD_LIBRARY_PATH -``` - -### 2. 将模型转换为onnx文件 - -- 安装`mim`工具 - -```bash -pip install -U openmim -``` - -- 下载`mmpose`模型 - -```bash -# choose one rtmpose model -mim download mmpose --config rtmpose-m_8xb64-270e_coco-wholebody-256x192 --dest . -``` - -- 克隆`mmdeploy`仓库 - -```bash -git clone https://github.com/open-mmlab/mmdeploy.git -``` - -- 将模型转换为onnx文件 - -```bash -python mmdeploy/tools/deploy.py \ - mmdeploy/configs/mmpose/pose-detection_simcc_onnxruntime_dynamic.py \ - mmpose/rtmpose-m_8xb64-270e_coco-wholebody-256x192.py \ - mmpose/rtmpose-m_simcc-coco-wholebody_pt-aic-coco_270e-256x192-cd5e845c_20230123.pth \ - mmdeploy/demo/resources/human-pose.jpg \ - --work-dir mmdeploy_model/mmpose/ort \ - --device cuda \ - --dump-info -``` - -## 运行 - -### 安装依赖 - -```bash -pip install -r requirements.txt -``` - -### 用法: - -```bash -python main.py \ - {ONNX_FILE} \ - {IMAGE_FILE} \ - --device {DEVICE} \ - --save-path {SAVE_PATH} -``` - -### 参数解释 - -- `ONNX_FILE`: onnx文件的路径 -- `IMAGE_FILE`: 图像文件的路径 -- `DEVICE`: 运行模型的设备,默认为\`cpu' -- `SAVE_PATH`: 保存输出图像的路径,默认为 "output.jpg" +# 使用ONNXRuntime进行RTMPose推理 + +本示例展示了如何在Python中用ONNXRuntime推理RTMPose模型。 + +## 准备 + +### 1. 安装onnxruntime推理引擎. + +选择以下方式之一来安装onnxruntime。 + +- CPU版本 + +```bash +wget https://github.com/microsoft/onnxruntime/releases/download/v1.8.1/onnxruntime-linux-x64-1.8.1.tgz +tar -zxvf onnxruntime-linux-x64-1.8.1.tgz +export ONNXRUNTIME_DIR=$(pwd)/onnxruntime-linux-x64-1.8.1 +export LD_LIBRARY_PATH=$ONNXRUNTIME_DIR/lib:$LD_LIBRARY_PATH +``` + +- GPU版本 + +```bash +pip install onnxruntime-gpu==1.8.1 +wget https://github.com/microsoft/onnxruntime/releases/download/v1.8.1/onnxruntime-linux-x64-gpu-1.8.1.tgz +tar -zxvf onnxruntime-linux-x64-gpu-1.8.1.tgz +export ONNXRUNTIME_DIR=$(pwd)/onnxruntime-linux-x64-gpu-1.8.1 +export LD_LIBRARY_PATH=$ONNXRUNTIME_DIR/lib:$LD_LIBRARY_PATH +``` + +### 2. 将模型转换为onnx文件 + +- 安装`mim`工具 + +```bash +pip install -U openmim +``` + +- 下载`mmpose`模型 + +```bash +# choose one rtmpose model +mim download mmpose --config rtmpose-m_8xb64-270e_coco-wholebody-256x192 --dest . +``` + +- 克隆`mmdeploy`仓库 + +```bash +git clone https://github.com/open-mmlab/mmdeploy.git +``` + +- 将模型转换为onnx文件 + +```bash +python mmdeploy/tools/deploy.py \ + mmdeploy/configs/mmpose/pose-detection_simcc_onnxruntime_dynamic.py \ + mmpose/rtmpose-m_8xb64-270e_coco-wholebody-256x192.py \ + mmpose/rtmpose-m_simcc-coco-wholebody_pt-aic-coco_270e-256x192-cd5e845c_20230123.pth \ + mmdeploy/demo/resources/human-pose.jpg \ + --work-dir mmdeploy_model/mmpose/ort \ + --device cuda \ + --dump-info +``` + +## 运行 + +### 安装依赖 + +```bash +pip install -r requirements.txt +``` + +### 用法: + +```bash +python main.py \ + {ONNX_FILE} \ + {IMAGE_FILE} \ + --device {DEVICE} \ + --save-path {SAVE_PATH} +``` + +### 参数解释 + +- `ONNX_FILE`: onnx文件的路径 +- `IMAGE_FILE`: 图像文件的路径 +- `DEVICE`: 运行模型的设备,默认为\`cpu' +- `SAVE_PATH`: 保存输出图像的路径,默认为 "output.jpg" diff --git a/projects/rtmpose/examples/onnxruntime/main.py b/projects/rtmpose/examples/onnxruntime/main.py index df4858c8dd..30b458ce38 100644 --- a/projects/rtmpose/examples/onnxruntime/main.py +++ b/projects/rtmpose/examples/onnxruntime/main.py @@ -1,472 +1,472 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import argparse -import time -from typing import List, Tuple - -import cv2 -import loguru -import numpy as np -import onnxruntime as ort - -logger = loguru.logger - - -def parse_args(): - parser = argparse.ArgumentParser( - description='RTMPose ONNX inference demo.') - parser.add_argument('onnx_file', help='ONNX file path') - parser.add_argument('image_file', help='Input image file path') - parser.add_argument( - '--device', help='device type for inference', default='cpu') - parser.add_argument( - '--save-path', - help='path to save the output image', - default='output.jpg') - args = parser.parse_args() - return args - - -def preprocess( - img: np.ndarray, input_size: Tuple[int, int] = (192, 256) -) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: - """Do preprocessing for RTMPose model inference. - - Args: - img (np.ndarray): Input image in shape. - input_size (tuple): Input image size in shape (w, h). - - Returns: - tuple: - - resized_img (np.ndarray): Preprocessed image. - - center (np.ndarray): Center of image. - - scale (np.ndarray): Scale of image. - """ - # get shape of image - img_shape = img.shape[:2] - bbox = np.array([0, 0, img_shape[1], img_shape[0]]) - - # get center and scale - center, scale = bbox_xyxy2cs(bbox, padding=1.25) - - # do affine transformation - resized_img, scale = top_down_affine(input_size, scale, center, img) - - # normalize image - mean = np.array([123.675, 116.28, 103.53]) - std = np.array([58.395, 57.12, 57.375]) - resized_img = (resized_img - mean) / std - - return resized_img, center, scale - - -def build_session(onnx_file: str, device: str = 'cpu') -> ort.InferenceSession: - """Build onnxruntime session. - - Args: - onnx_file (str): ONNX file path. - device (str): Device type for inference. - - Returns: - sess (ort.InferenceSession): ONNXRuntime session. - """ - providers = ['CPUExecutionProvider' - ] if device == 'cpu' else ['CUDAExecutionProvider'] - sess = ort.InferenceSession(path_or_bytes=onnx_file, providers=providers) - - return sess - - -def inference(sess: ort.InferenceSession, img: np.ndarray) -> np.ndarray: - """Inference RTMPose model. - - Args: - sess (ort.InferenceSession): ONNXRuntime session. - img (np.ndarray): Input image in shape. - - Returns: - outputs (np.ndarray): Output of RTMPose model. - """ - # build input - input = [img.transpose(2, 0, 1)] - - # build output - sess_input = {sess.get_inputs()[0].name: input} - sess_output = [] - for out in sess.get_outputs(): - sess_output.append(out.name) - - # run model - outputs = sess.run(sess_output, sess_input) - - return outputs - - -def postprocess(outputs: List[np.ndarray], - model_input_size: Tuple[int, int], - center: Tuple[int, int], - scale: Tuple[int, int], - simcc_split_ratio: float = 2.0 - ) -> Tuple[np.ndarray, np.ndarray]: - """Postprocess for RTMPose model output. - - Args: - outputs (np.ndarray): Output of RTMPose model. - model_input_size (tuple): RTMPose model Input image size. - center (tuple): Center of bbox in shape (x, y). - scale (tuple): Scale of bbox in shape (w, h). - simcc_split_ratio (float): Split ratio of simcc. - - Returns: - tuple: - - keypoints (np.ndarray): Rescaled keypoints. - - scores (np.ndarray): Model predict scores. - """ - # use simcc to decode - simcc_x, simcc_y = outputs - keypoints, scores = decode(simcc_x, simcc_y, simcc_split_ratio) - - # rescale keypoints - keypoints = keypoints / model_input_size * scale + center - scale / 2 - - return keypoints, scores - - -def visualize(img: np.ndarray, - keypoints: np.ndarray, - scores: np.ndarray, - filename: str = 'output.jpg', - thr=0.3) -> np.ndarray: - """Visualize the keypoints and skeleton on image. - - Args: - img (np.ndarray): Input image in shape. - keypoints (np.ndarray): Keypoints in image. - scores (np.ndarray): Model predict scores. - thr (float): Threshold for visualize. - - Returns: - img (np.ndarray): Visualized image. - """ - # default color - skeleton = [(15, 13), (13, 11), (16, 14), (14, 12), (11, 12), (5, 11), - (6, 12), (5, 6), (5, 7), (6, 8), (7, 9), (8, 10), (1, 2), - (0, 1), (0, 2), (1, 3), (2, 4), (3, 5), (4, 6), (15, 17), - (15, 18), (15, 19), (16, 20), (16, 21), (16, 22), (91, 92), - (92, 93), (93, 94), (94, 95), (91, 96), (96, 97), (97, 98), - (98, 99), (91, 100), (100, 101), (101, 102), (102, 103), - (91, 104), (104, 105), (105, 106), (106, 107), (91, 108), - (108, 109), (109, 110), (110, 111), (112, 113), (113, 114), - (114, 115), (115, 116), (112, 117), (117, 118), (118, 119), - (119, 120), (112, 121), (121, 122), (122, 123), (123, 124), - (112, 125), (125, 126), (126, 127), (127, 128), (112, 129), - (129, 130), (130, 131), (131, 132)] - palette = [[51, 153, 255], [0, 255, 0], [255, 128, 0], [255, 255, 255], - [255, 153, 255], [102, 178, 255], [255, 51, 51]] - link_color = [ - 1, 1, 2, 2, 0, 0, 0, 0, 1, 2, 1, 2, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, - 2, 2, 2, 2, 2, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 1, 1, 1, 1, 2, 2, 2, - 2, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 1, 1, 1, 1 - ] - point_color = [ - 0, 0, 0, 0, 0, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 2, 2, 2, 2, 2, 2, 3, - 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, - 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 1, 1, 1, 1, 3, 2, 2, 2, 2, 4, 4, 4, - 4, 5, 5, 5, 5, 6, 6, 6, 6, 1, 1, 1, 1 - ] - - # draw keypoints and skeleton - for kpts, score in zip(keypoints, scores): - for kpt, color in zip(kpts, point_color): - cv2.circle(img, tuple(kpt.astype(np.int32)), 1, palette[color], 1, - cv2.LINE_AA) - for (u, v), color in zip(skeleton, link_color): - if score[u] > thr and score[v] > thr: - cv2.line(img, tuple(kpts[u].astype(np.int32)), - tuple(kpts[v].astype(np.int32)), palette[color], 2, - cv2.LINE_AA) - - # save to local - cv2.imwrite(filename, img) - - return img - - -def bbox_xyxy2cs(bbox: np.ndarray, - padding: float = 1.) -> Tuple[np.ndarray, np.ndarray]: - """Transform the bbox format from (x,y,w,h) into (center, scale) - - Args: - bbox (ndarray): Bounding box(es) in shape (4,) or (n, 4), formatted - as (left, top, right, bottom) - padding (float): BBox padding factor that will be multilied to scale. - Default: 1.0 - - Returns: - tuple: A tuple containing center and scale. - - np.ndarray[float32]: Center (x, y) of the bbox in shape (2,) or - (n, 2) - - np.ndarray[float32]: Scale (w, h) of the bbox in shape (2,) or - (n, 2) - """ - # convert single bbox from (4, ) to (1, 4) - dim = bbox.ndim - if dim == 1: - bbox = bbox[None, :] - - # get bbox center and scale - x1, y1, x2, y2 = np.hsplit(bbox, [1, 2, 3]) - center = np.hstack([x1 + x2, y1 + y2]) * 0.5 - scale = np.hstack([x2 - x1, y2 - y1]) * padding - - if dim == 1: - center = center[0] - scale = scale[0] - - return center, scale - - -def _fix_aspect_ratio(bbox_scale: np.ndarray, - aspect_ratio: float) -> np.ndarray: - """Extend the scale to match the given aspect ratio. - - Args: - scale (np.ndarray): The image scale (w, h) in shape (2, ) - aspect_ratio (float): The ratio of ``w/h`` - - Returns: - np.ndarray: The reshaped image scale in (2, ) - """ - w, h = np.hsplit(bbox_scale, [1]) - bbox_scale = np.where(w > h * aspect_ratio, - np.hstack([w, w / aspect_ratio]), - np.hstack([h * aspect_ratio, h])) - return bbox_scale - - -def _rotate_point(pt: np.ndarray, angle_rad: float) -> np.ndarray: - """Rotate a point by an angle. - - Args: - pt (np.ndarray): 2D point coordinates (x, y) in shape (2, ) - angle_rad (float): rotation angle in radian - - Returns: - np.ndarray: Rotated point in shape (2, ) - """ - sn, cs = np.sin(angle_rad), np.cos(angle_rad) - rot_mat = np.array([[cs, -sn], [sn, cs]]) - return rot_mat @ pt - - -def _get_3rd_point(a: np.ndarray, b: np.ndarray) -> np.ndarray: - """To calculate the affine matrix, three pairs of points are required. This - function is used to get the 3rd point, given 2D points a & b. - - The 3rd point is defined by rotating vector `a - b` by 90 degrees - anticlockwise, using b as the rotation center. - - Args: - a (np.ndarray): The 1st point (x,y) in shape (2, ) - b (np.ndarray): The 2nd point (x,y) in shape (2, ) - - Returns: - np.ndarray: The 3rd point. - """ - direction = a - b - c = b + np.r_[-direction[1], direction[0]] - return c - - -def get_warp_matrix(center: np.ndarray, - scale: np.ndarray, - rot: float, - output_size: Tuple[int, int], - shift: Tuple[float, float] = (0., 0.), - inv: bool = False) -> np.ndarray: - """Calculate the affine transformation matrix that can warp the bbox area - in the input image to the output size. - - Args: - center (np.ndarray[2, ]): Center of the bounding box (x, y). - scale (np.ndarray[2, ]): Scale of the bounding box - wrt [width, height]. - rot (float): Rotation angle (degree). - output_size (np.ndarray[2, ] | list(2,)): Size of the - destination heatmaps. - shift (0-100%): Shift translation ratio wrt the width/height. - Default (0., 0.). - inv (bool): Option to inverse the affine transform direction. - (inv=False: src->dst or inv=True: dst->src) - - Returns: - np.ndarray: A 2x3 transformation matrix - """ - shift = np.array(shift) - src_w = scale[0] - dst_w = output_size[0] - dst_h = output_size[1] - - # compute transformation matrix - rot_rad = np.deg2rad(rot) - src_dir = _rotate_point(np.array([0., src_w * -0.5]), rot_rad) - dst_dir = np.array([0., dst_w * -0.5]) - - # get four corners of the src rectangle in the original image - src = np.zeros((3, 2), dtype=np.float32) - src[0, :] = center + scale * shift - src[1, :] = center + src_dir + scale * shift - src[2, :] = _get_3rd_point(src[0, :], src[1, :]) - - # get four corners of the dst rectangle in the input image - dst = np.zeros((3, 2), dtype=np.float32) - dst[0, :] = [dst_w * 0.5, dst_h * 0.5] - dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst_dir - dst[2, :] = _get_3rd_point(dst[0, :], dst[1, :]) - - if inv: - warp_mat = cv2.getAffineTransform(np.float32(dst), np.float32(src)) - else: - warp_mat = cv2.getAffineTransform(np.float32(src), np.float32(dst)) - - return warp_mat - - -def top_down_affine(input_size: dict, bbox_scale: dict, bbox_center: dict, - img: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: - """Get the bbox image as the model input by affine transform. - - Args: - input_size (dict): The input size of the model. - bbox_scale (dict): The bbox scale of the img. - bbox_center (dict): The bbox center of the img. - img (np.ndarray): The original image. - - Returns: - tuple: A tuple containing center and scale. - - np.ndarray[float32]: img after affine transform. - - np.ndarray[float32]: bbox scale after affine transform. - """ - w, h = input_size - warp_size = (int(w), int(h)) - - # reshape bbox to fixed aspect ratio - bbox_scale = _fix_aspect_ratio(bbox_scale, aspect_ratio=w / h) - - # get the affine matrix - center = bbox_center - scale = bbox_scale - rot = 0 - warp_mat = get_warp_matrix(center, scale, rot, output_size=(w, h)) - - # do affine transform - img = cv2.warpAffine(img, warp_mat, warp_size, flags=cv2.INTER_LINEAR) - - return img, bbox_scale - - -def get_simcc_maximum(simcc_x: np.ndarray, - simcc_y: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: - """Get maximum response location and value from simcc representations. - - Note: - instance number: N - num_keypoints: K - heatmap height: H - heatmap width: W - - Args: - simcc_x (np.ndarray): x-axis SimCC in shape (K, Wx) or (N, K, Wx) - simcc_y (np.ndarray): y-axis SimCC in shape (K, Wy) or (N, K, Wy) - - Returns: - tuple: - - locs (np.ndarray): locations of maximum heatmap responses in shape - (K, 2) or (N, K, 2) - - vals (np.ndarray): values of maximum heatmap responses in shape - (K,) or (N, K) - """ - N, K, Wx = simcc_x.shape - simcc_x = simcc_x.reshape(N * K, -1) - simcc_y = simcc_y.reshape(N * K, -1) - - # get maximum value locations - x_locs = np.argmax(simcc_x, axis=1) - y_locs = np.argmax(simcc_y, axis=1) - locs = np.stack((x_locs, y_locs), axis=-1).astype(np.float32) - max_val_x = np.amax(simcc_x, axis=1) - max_val_y = np.amax(simcc_y, axis=1) - - # get maximum value across x and y axis - mask = max_val_x > max_val_y - max_val_x[mask] = max_val_y[mask] - vals = max_val_x - locs[vals <= 0.] = -1 - - # reshape - locs = locs.reshape(N, K, 2) - vals = vals.reshape(N, K) - - return locs, vals - - -def decode(simcc_x: np.ndarray, simcc_y: np.ndarray, - simcc_split_ratio) -> Tuple[np.ndarray, np.ndarray]: - """Modulate simcc distribution with Gaussian. - - Args: - simcc_x (np.ndarray[K, Wx]): model predicted simcc in x. - simcc_y (np.ndarray[K, Wy]): model predicted simcc in y. - simcc_split_ratio (int): The split ratio of simcc. - - Returns: - tuple: A tuple containing center and scale. - - np.ndarray[float32]: keypoints in shape (K, 2) or (n, K, 2) - - np.ndarray[float32]: scores in shape (K,) or (n, K) - """ - keypoints, scores = get_simcc_maximum(simcc_x, simcc_y) - keypoints /= simcc_split_ratio - - return keypoints, scores - - -def main(): - args = parse_args() - logger.info('Start running model on RTMPose...') - - # read image from file - logger.info('1. Read image from {}...'.format(args.image_file)) - img = cv2.imread(args.image_file) - - # build onnx model - logger.info('2. Build onnx model from {}...'.format(args.onnx_file)) - sess = build_session(args.onnx_file, args.device) - h, w = sess.get_inputs()[0].shape[2:] - model_input_size = (w, h) - - # preprocessing - logger.info('3. Preprocess image...') - resized_img, center, scale = preprocess(img, model_input_size) - - # inference - logger.info('4. Inference...') - start_time = time.time() - outputs = inference(sess, resized_img) - end_time = time.time() - logger.info('4. Inference done, time cost: {:.4f}s'.format(end_time - - start_time)) - - # postprocessing - logger.info('5. Postprocess...') - keypoints, scores = postprocess(outputs, model_input_size, center, scale) - - # visualize inference result - logger.info('6. Visualize inference result...') - visualize(img, keypoints, scores, args.save_path) - - logger.info('Done...') - - -if __name__ == '__main__': - main() +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import time +from typing import List, Tuple + +import cv2 +import loguru +import numpy as np +import onnxruntime as ort + +logger = loguru.logger + + +def parse_args(): + parser = argparse.ArgumentParser( + description='RTMPose ONNX inference demo.') + parser.add_argument('onnx_file', help='ONNX file path') + parser.add_argument('image_file', help='Input image file path') + parser.add_argument( + '--device', help='device type for inference', default='cpu') + parser.add_argument( + '--save-path', + help='path to save the output image', + default='output.jpg') + args = parser.parse_args() + return args + + +def preprocess( + img: np.ndarray, input_size: Tuple[int, int] = (192, 256) +) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + """Do preprocessing for RTMPose model inference. + + Args: + img (np.ndarray): Input image in shape. + input_size (tuple): Input image size in shape (w, h). + + Returns: + tuple: + - resized_img (np.ndarray): Preprocessed image. + - center (np.ndarray): Center of image. + - scale (np.ndarray): Scale of image. + """ + # get shape of image + img_shape = img.shape[:2] + bbox = np.array([0, 0, img_shape[1], img_shape[0]]) + + # get center and scale + center, scale = bbox_xyxy2cs(bbox, padding=1.25) + + # do affine transformation + resized_img, scale = top_down_affine(input_size, scale, center, img) + + # normalize image + mean = np.array([123.675, 116.28, 103.53]) + std = np.array([58.395, 57.12, 57.375]) + resized_img = (resized_img - mean) / std + + return resized_img, center, scale + + +def build_session(onnx_file: str, device: str = 'cpu') -> ort.InferenceSession: + """Build onnxruntime session. + + Args: + onnx_file (str): ONNX file path. + device (str): Device type for inference. + + Returns: + sess (ort.InferenceSession): ONNXRuntime session. + """ + providers = ['CPUExecutionProvider' + ] if device == 'cpu' else ['CUDAExecutionProvider'] + sess = ort.InferenceSession(path_or_bytes=onnx_file, providers=providers) + + return sess + + +def inference(sess: ort.InferenceSession, img: np.ndarray) -> np.ndarray: + """Inference RTMPose model. + + Args: + sess (ort.InferenceSession): ONNXRuntime session. + img (np.ndarray): Input image in shape. + + Returns: + outputs (np.ndarray): Output of RTMPose model. + """ + # build input + input = [img.transpose(2, 0, 1)] + + # build output + sess_input = {sess.get_inputs()[0].name: input} + sess_output = [] + for out in sess.get_outputs(): + sess_output.append(out.name) + + # run model + outputs = sess.run(sess_output, sess_input) + + return outputs + + +def postprocess(outputs: List[np.ndarray], + model_input_size: Tuple[int, int], + center: Tuple[int, int], + scale: Tuple[int, int], + simcc_split_ratio: float = 2.0 + ) -> Tuple[np.ndarray, np.ndarray]: + """Postprocess for RTMPose model output. + + Args: + outputs (np.ndarray): Output of RTMPose model. + model_input_size (tuple): RTMPose model Input image size. + center (tuple): Center of bbox in shape (x, y). + scale (tuple): Scale of bbox in shape (w, h). + simcc_split_ratio (float): Split ratio of simcc. + + Returns: + tuple: + - keypoints (np.ndarray): Rescaled keypoints. + - scores (np.ndarray): Model predict scores. + """ + # use simcc to decode + simcc_x, simcc_y = outputs + keypoints, scores = decode(simcc_x, simcc_y, simcc_split_ratio) + + # rescale keypoints + keypoints = keypoints / model_input_size * scale + center - scale / 2 + + return keypoints, scores + + +def visualize(img: np.ndarray, + keypoints: np.ndarray, + scores: np.ndarray, + filename: str = 'output.jpg', + thr=0.3) -> np.ndarray: + """Visualize the keypoints and skeleton on image. + + Args: + img (np.ndarray): Input image in shape. + keypoints (np.ndarray): Keypoints in image. + scores (np.ndarray): Model predict scores. + thr (float): Threshold for visualize. + + Returns: + img (np.ndarray): Visualized image. + """ + # default color + skeleton = [(15, 13), (13, 11), (16, 14), (14, 12), (11, 12), (5, 11), + (6, 12), (5, 6), (5, 7), (6, 8), (7, 9), (8, 10), (1, 2), + (0, 1), (0, 2), (1, 3), (2, 4), (3, 5), (4, 6), (15, 17), + (15, 18), (15, 19), (16, 20), (16, 21), (16, 22), (91, 92), + (92, 93), (93, 94), (94, 95), (91, 96), (96, 97), (97, 98), + (98, 99), (91, 100), (100, 101), (101, 102), (102, 103), + (91, 104), (104, 105), (105, 106), (106, 107), (91, 108), + (108, 109), (109, 110), (110, 111), (112, 113), (113, 114), + (114, 115), (115, 116), (112, 117), (117, 118), (118, 119), + (119, 120), (112, 121), (121, 122), (122, 123), (123, 124), + (112, 125), (125, 126), (126, 127), (127, 128), (112, 129), + (129, 130), (130, 131), (131, 132)] + palette = [[51, 153, 255], [0, 255, 0], [255, 128, 0], [255, 255, 255], + [255, 153, 255], [102, 178, 255], [255, 51, 51]] + link_color = [ + 1, 1, 2, 2, 0, 0, 0, 0, 1, 2, 1, 2, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, + 2, 2, 2, 2, 2, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 1, 1, 1, 1, 2, 2, 2, + 2, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 1, 1, 1, 1 + ] + point_color = [ + 0, 0, 0, 0, 0, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 2, 2, 2, 2, 2, 2, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, + 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 1, 1, 1, 1, 3, 2, 2, 2, 2, 4, 4, 4, + 4, 5, 5, 5, 5, 6, 6, 6, 6, 1, 1, 1, 1 + ] + + # draw keypoints and skeleton + for kpts, score in zip(keypoints, scores): + for kpt, color in zip(kpts, point_color): + cv2.circle(img, tuple(kpt.astype(np.int32)), 1, palette[color], 1, + cv2.LINE_AA) + for (u, v), color in zip(skeleton, link_color): + if score[u] > thr and score[v] > thr: + cv2.line(img, tuple(kpts[u].astype(np.int32)), + tuple(kpts[v].astype(np.int32)), palette[color], 2, + cv2.LINE_AA) + + # save to local + cv2.imwrite(filename, img) + + return img + + +def bbox_xyxy2cs(bbox: np.ndarray, + padding: float = 1.) -> Tuple[np.ndarray, np.ndarray]: + """Transform the bbox format from (x,y,w,h) into (center, scale) + + Args: + bbox (ndarray): Bounding box(es) in shape (4,) or (n, 4), formatted + as (left, top, right, bottom) + padding (float): BBox padding factor that will be multilied to scale. + Default: 1.0 + + Returns: + tuple: A tuple containing center and scale. + - np.ndarray[float32]: Center (x, y) of the bbox in shape (2,) or + (n, 2) + - np.ndarray[float32]: Scale (w, h) of the bbox in shape (2,) or + (n, 2) + """ + # convert single bbox from (4, ) to (1, 4) + dim = bbox.ndim + if dim == 1: + bbox = bbox[None, :] + + # get bbox center and scale + x1, y1, x2, y2 = np.hsplit(bbox, [1, 2, 3]) + center = np.hstack([x1 + x2, y1 + y2]) * 0.5 + scale = np.hstack([x2 - x1, y2 - y1]) * padding + + if dim == 1: + center = center[0] + scale = scale[0] + + return center, scale + + +def _fix_aspect_ratio(bbox_scale: np.ndarray, + aspect_ratio: float) -> np.ndarray: + """Extend the scale to match the given aspect ratio. + + Args: + scale (np.ndarray): The image scale (w, h) in shape (2, ) + aspect_ratio (float): The ratio of ``w/h`` + + Returns: + np.ndarray: The reshaped image scale in (2, ) + """ + w, h = np.hsplit(bbox_scale, [1]) + bbox_scale = np.where(w > h * aspect_ratio, + np.hstack([w, w / aspect_ratio]), + np.hstack([h * aspect_ratio, h])) + return bbox_scale + + +def _rotate_point(pt: np.ndarray, angle_rad: float) -> np.ndarray: + """Rotate a point by an angle. + + Args: + pt (np.ndarray): 2D point coordinates (x, y) in shape (2, ) + angle_rad (float): rotation angle in radian + + Returns: + np.ndarray: Rotated point in shape (2, ) + """ + sn, cs = np.sin(angle_rad), np.cos(angle_rad) + rot_mat = np.array([[cs, -sn], [sn, cs]]) + return rot_mat @ pt + + +def _get_3rd_point(a: np.ndarray, b: np.ndarray) -> np.ndarray: + """To calculate the affine matrix, three pairs of points are required. This + function is used to get the 3rd point, given 2D points a & b. + + The 3rd point is defined by rotating vector `a - b` by 90 degrees + anticlockwise, using b as the rotation center. + + Args: + a (np.ndarray): The 1st point (x,y) in shape (2, ) + b (np.ndarray): The 2nd point (x,y) in shape (2, ) + + Returns: + np.ndarray: The 3rd point. + """ + direction = a - b + c = b + np.r_[-direction[1], direction[0]] + return c + + +def get_warp_matrix(center: np.ndarray, + scale: np.ndarray, + rot: float, + output_size: Tuple[int, int], + shift: Tuple[float, float] = (0., 0.), + inv: bool = False) -> np.ndarray: + """Calculate the affine transformation matrix that can warp the bbox area + in the input image to the output size. + + Args: + center (np.ndarray[2, ]): Center of the bounding box (x, y). + scale (np.ndarray[2, ]): Scale of the bounding box + wrt [width, height]. + rot (float): Rotation angle (degree). + output_size (np.ndarray[2, ] | list(2,)): Size of the + destination heatmaps. + shift (0-100%): Shift translation ratio wrt the width/height. + Default (0., 0.). + inv (bool): Option to inverse the affine transform direction. + (inv=False: src->dst or inv=True: dst->src) + + Returns: + np.ndarray: A 2x3 transformation matrix + """ + shift = np.array(shift) + src_w = scale[0] + dst_w = output_size[0] + dst_h = output_size[1] + + # compute transformation matrix + rot_rad = np.deg2rad(rot) + src_dir = _rotate_point(np.array([0., src_w * -0.5]), rot_rad) + dst_dir = np.array([0., dst_w * -0.5]) + + # get four corners of the src rectangle in the original image + src = np.zeros((3, 2), dtype=np.float32) + src[0, :] = center + scale * shift + src[1, :] = center + src_dir + scale * shift + src[2, :] = _get_3rd_point(src[0, :], src[1, :]) + + # get four corners of the dst rectangle in the input image + dst = np.zeros((3, 2), dtype=np.float32) + dst[0, :] = [dst_w * 0.5, dst_h * 0.5] + dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst_dir + dst[2, :] = _get_3rd_point(dst[0, :], dst[1, :]) + + if inv: + warp_mat = cv2.getAffineTransform(np.float32(dst), np.float32(src)) + else: + warp_mat = cv2.getAffineTransform(np.float32(src), np.float32(dst)) + + return warp_mat + + +def top_down_affine(input_size: dict, bbox_scale: dict, bbox_center: dict, + img: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + """Get the bbox image as the model input by affine transform. + + Args: + input_size (dict): The input size of the model. + bbox_scale (dict): The bbox scale of the img. + bbox_center (dict): The bbox center of the img. + img (np.ndarray): The original image. + + Returns: + tuple: A tuple containing center and scale. + - np.ndarray[float32]: img after affine transform. + - np.ndarray[float32]: bbox scale after affine transform. + """ + w, h = input_size + warp_size = (int(w), int(h)) + + # reshape bbox to fixed aspect ratio + bbox_scale = _fix_aspect_ratio(bbox_scale, aspect_ratio=w / h) + + # get the affine matrix + center = bbox_center + scale = bbox_scale + rot = 0 + warp_mat = get_warp_matrix(center, scale, rot, output_size=(w, h)) + + # do affine transform + img = cv2.warpAffine(img, warp_mat, warp_size, flags=cv2.INTER_LINEAR) + + return img, bbox_scale + + +def get_simcc_maximum(simcc_x: np.ndarray, + simcc_y: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + """Get maximum response location and value from simcc representations. + + Note: + instance number: N + num_keypoints: K + heatmap height: H + heatmap width: W + + Args: + simcc_x (np.ndarray): x-axis SimCC in shape (K, Wx) or (N, K, Wx) + simcc_y (np.ndarray): y-axis SimCC in shape (K, Wy) or (N, K, Wy) + + Returns: + tuple: + - locs (np.ndarray): locations of maximum heatmap responses in shape + (K, 2) or (N, K, 2) + - vals (np.ndarray): values of maximum heatmap responses in shape + (K,) or (N, K) + """ + N, K, Wx = simcc_x.shape + simcc_x = simcc_x.reshape(N * K, -1) + simcc_y = simcc_y.reshape(N * K, -1) + + # get maximum value locations + x_locs = np.argmax(simcc_x, axis=1) + y_locs = np.argmax(simcc_y, axis=1) + locs = np.stack((x_locs, y_locs), axis=-1).astype(np.float32) + max_val_x = np.amax(simcc_x, axis=1) + max_val_y = np.amax(simcc_y, axis=1) + + # get maximum value across x and y axis + mask = max_val_x > max_val_y + max_val_x[mask] = max_val_y[mask] + vals = max_val_x + locs[vals <= 0.] = -1 + + # reshape + locs = locs.reshape(N, K, 2) + vals = vals.reshape(N, K) + + return locs, vals + + +def decode(simcc_x: np.ndarray, simcc_y: np.ndarray, + simcc_split_ratio) -> Tuple[np.ndarray, np.ndarray]: + """Modulate simcc distribution with Gaussian. + + Args: + simcc_x (np.ndarray[K, Wx]): model predicted simcc in x. + simcc_y (np.ndarray[K, Wy]): model predicted simcc in y. + simcc_split_ratio (int): The split ratio of simcc. + + Returns: + tuple: A tuple containing center and scale. + - np.ndarray[float32]: keypoints in shape (K, 2) or (n, K, 2) + - np.ndarray[float32]: scores in shape (K,) or (n, K) + """ + keypoints, scores = get_simcc_maximum(simcc_x, simcc_y) + keypoints /= simcc_split_ratio + + return keypoints, scores + + +def main(): + args = parse_args() + logger.info('Start running model on RTMPose...') + + # read image from file + logger.info('1. Read image from {}...'.format(args.image_file)) + img = cv2.imread(args.image_file) + + # build onnx model + logger.info('2. Build onnx model from {}...'.format(args.onnx_file)) + sess = build_session(args.onnx_file, args.device) + h, w = sess.get_inputs()[0].shape[2:] + model_input_size = (w, h) + + # preprocessing + logger.info('3. Preprocess image...') + resized_img, center, scale = preprocess(img, model_input_size) + + # inference + logger.info('4. Inference...') + start_time = time.time() + outputs = inference(sess, resized_img) + end_time = time.time() + logger.info('4. Inference done, time cost: {:.4f}s'.format(end_time - + start_time)) + + # postprocessing + logger.info('5. Postprocess...') + keypoints, scores = postprocess(outputs, model_input_size, center, scale) + + # visualize inference result + logger.info('6. Visualize inference result...') + visualize(img, keypoints, scores, args.save_path) + + logger.info('Done...') + + +if __name__ == '__main__': + main() diff --git a/projects/rtmpose/examples/onnxruntime/requirements.txt b/projects/rtmpose/examples/onnxruntime/requirements.txt index 88548c0203..9910ebd659 100644 --- a/projects/rtmpose/examples/onnxruntime/requirements.txt +++ b/projects/rtmpose/examples/onnxruntime/requirements.txt @@ -1,4 +1,4 @@ -loguru==0.6.0 -numpy==1.21.6 -onnxruntime==1.14.1 -onnxruntime-gpu==1.8.1 +loguru==0.6.0 +numpy==1.21.6 +onnxruntime==1.14.1 +onnxruntime-gpu==1.8.1 diff --git a/projects/rtmpose/rtmdet/hand/rtmdet_nano_320-8xb32_hand.py b/projects/rtmpose/rtmdet/hand/rtmdet_nano_320-8xb32_hand.py index 278cc0bfe8..abec3c077a 100644 --- a/projects/rtmpose/rtmdet/hand/rtmdet_nano_320-8xb32_hand.py +++ b/projects/rtmpose/rtmdet/hand/rtmdet_nano_320-8xb32_hand.py @@ -1,171 +1,171 @@ -_base_ = 'mmdet::rtmdet/rtmdet_l_8xb32-300e_coco.py' - -input_shape = 320 - -model = dict( - backbone=dict( - deepen_factor=0.33, - widen_factor=0.25, - use_depthwise=True, - ), - neck=dict( - in_channels=[64, 128, 256], - out_channels=64, - num_csp_blocks=1, - use_depthwise=True, - ), - bbox_head=dict( - in_channels=64, - feat_channels=64, - share_conv=False, - exp_on_reg=False, - use_depthwise=True, - num_classes=1), - test_cfg=dict( - nms_pre=1000, - min_bbox_size=0, - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.6), - max_per_img=100)) - -# file_client_args = dict( -# backend='petrel', -# path_mapping=dict({'data/': 's3://openmmlab/datasets/'})) - -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='CachedMosaic', - img_scale=(input_shape, input_shape), - pad_val=114.0, - max_cached_images=20, - random_pop=False), - dict( - type='RandomResize', - scale=(input_shape * 2, input_shape * 2), - ratio_range=(0.5, 1.5), - keep_ratio=True), - dict(type='RandomCrop', crop_size=(input_shape, input_shape)), - dict(type='YOLOXHSVRandomAug'), - dict(type='RandomFlip', prob=0.5), - dict( - type='Pad', - size=(input_shape, input_shape), - pad_val=dict(img=(114, 114, 114))), - dict(type='PackDetInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='RandomResize', - scale=(input_shape, input_shape), - ratio_range=(0.5, 1.5), - keep_ratio=True), - dict(type='RandomCrop', crop_size=(input_shape, input_shape)), - dict(type='YOLOXHSVRandomAug'), - dict(type='RandomFlip', prob=0.5), - dict( - type='Pad', - size=(input_shape, input_shape), - pad_val=dict(img=(114, 114, 114))), - dict(type='PackDetInputs') -] - -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='Resize', scale=(input_shape, input_shape), keep_ratio=True), - dict( - type='Pad', - size=(input_shape, input_shape), - pad_val=dict(img=(114, 114, 114))), - dict( - type='PackDetInputs', - meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', - 'scale_factor')) -] - -data_mode = 'topdown' -data_root = 'data/' - -train_dataset = dict( - _delete_=True, - type='ConcatDataset', - datasets=[ - dict( - type='mmpose.OneHand10KDataset', - data_root=data_root, - data_mode=data_mode, - pipeline=train_pipeline, - ann_file='onehand10k/annotations/onehand10k_train.json', - data_prefix=dict(img='pose/OneHand10K/')), - dict( - type='mmpose.FreiHandDataset', - data_root=data_root, - data_mode=data_mode, - pipeline=train_pipeline, - ann_file='freihand/annotations/freihand_train.json', - data_prefix=dict(img='pose/FreiHand/')), - dict( - type='mmpose.Rhd2DDataset', - data_root=data_root, - data_mode=data_mode, - pipeline=train_pipeline, - ann_file='rhd/annotations/rhd_train.json', - data_prefix=dict(img='pose/RHD/')), - dict( - type='mmpose.HalpeHandDataset', - data_root=data_root, - data_mode=data_mode, - pipeline=train_pipeline, - ann_file='halpe/annotations/halpe_train_v1.json', - data_prefix=dict( - img='pose/Halpe/hico_20160224_det/images/train2015/') # noqa - ) - ], - ignore_keys=[ - 'CLASSES', 'dataset_keypoint_weights', 'dataset_name', 'flip_indices', - 'flip_pairs', 'keypoint_colors', 'keypoint_id2name', - 'keypoint_name2id', 'lower_body_ids', 'num_keypoints', - 'num_skeleton_links', 'sigmas', 'skeleton_link_colors', - 'skeleton_links', 'upper_body_ids' - ], -) - -test_dataset = dict( - _delete_=True, - type='mmpose.OneHand10KDataset', - data_root=data_root, - data_mode=data_mode, - pipeline=test_pipeline, - ann_file='onehand10k/annotations/onehand10k_test.json', - data_prefix=dict(img='pose/OneHand10K/'), -) - -train_dataloader = dict(dataset=train_dataset) -val_dataloader = dict(dataset=test_dataset) -test_dataloader = val_dataloader - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='PipelineSwitchHook', - switch_epoch=280, - switch_pipeline=train_pipeline_stage2) -] - -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'onehand10k/annotations/onehand10k_test.json', - metric='bbox', - format_only=False) -test_evaluator = val_evaluator - -train_cfg = dict(val_interval=1) +_base_ = 'mmdet::rtmdet/rtmdet_l_8xb32-300e_coco.py' + +input_shape = 320 + +model = dict( + backbone=dict( + deepen_factor=0.33, + widen_factor=0.25, + use_depthwise=True, + ), + neck=dict( + in_channels=[64, 128, 256], + out_channels=64, + num_csp_blocks=1, + use_depthwise=True, + ), + bbox_head=dict( + in_channels=64, + feat_channels=64, + share_conv=False, + exp_on_reg=False, + use_depthwise=True, + num_classes=1), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100)) + +# file_client_args = dict( +# backend='petrel', +# path_mapping=dict({'data/': 's3://openmmlab/datasets/'})) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='CachedMosaic', + img_scale=(input_shape, input_shape), + pad_val=114.0, + max_cached_images=20, + random_pop=False), + dict( + type='RandomResize', + scale=(input_shape * 2, input_shape * 2), + ratio_range=(0.5, 1.5), + keep_ratio=True), + dict(type='RandomCrop', crop_size=(input_shape, input_shape)), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip', prob=0.5), + dict( + type='Pad', + size=(input_shape, input_shape), + pad_val=dict(img=(114, 114, 114))), + dict(type='PackDetInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='RandomResize', + scale=(input_shape, input_shape), + ratio_range=(0.5, 1.5), + keep_ratio=True), + dict(type='RandomCrop', crop_size=(input_shape, input_shape)), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip', prob=0.5), + dict( + type='Pad', + size=(input_shape, input_shape), + pad_val=dict(img=(114, 114, 114))), + dict(type='PackDetInputs') +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=(input_shape, input_shape), keep_ratio=True), + dict( + type='Pad', + size=(input_shape, input_shape), + pad_val=dict(img=(114, 114, 114))), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] + +data_mode = 'topdown' +data_root = 'data/' + +train_dataset = dict( + _delete_=True, + type='ConcatDataset', + datasets=[ + dict( + type='mmpose.OneHand10KDataset', + data_root=data_root, + data_mode=data_mode, + pipeline=train_pipeline, + ann_file='onehand10k/annotations/onehand10k_train.json', + data_prefix=dict(img='pose/OneHand10K/')), + dict( + type='mmpose.FreiHandDataset', + data_root=data_root, + data_mode=data_mode, + pipeline=train_pipeline, + ann_file='freihand/annotations/freihand_train.json', + data_prefix=dict(img='pose/FreiHand/')), + dict( + type='mmpose.Rhd2DDataset', + data_root=data_root, + data_mode=data_mode, + pipeline=train_pipeline, + ann_file='rhd/annotations/rhd_train.json', + data_prefix=dict(img='pose/RHD/')), + dict( + type='mmpose.HalpeHandDataset', + data_root=data_root, + data_mode=data_mode, + pipeline=train_pipeline, + ann_file='halpe/annotations/halpe_train_v1.json', + data_prefix=dict( + img='pose/Halpe/hico_20160224_det/images/train2015/') # noqa + ) + ], + ignore_keys=[ + 'CLASSES', 'dataset_keypoint_weights', 'dataset_name', 'flip_indices', + 'flip_pairs', 'keypoint_colors', 'keypoint_id2name', + 'keypoint_name2id', 'lower_body_ids', 'num_keypoints', + 'num_skeleton_links', 'sigmas', 'skeleton_link_colors', + 'skeleton_links', 'upper_body_ids' + ], +) + +test_dataset = dict( + _delete_=True, + type='mmpose.OneHand10KDataset', + data_root=data_root, + data_mode=data_mode, + pipeline=test_pipeline, + ann_file='onehand10k/annotations/onehand10k_test.json', + data_prefix=dict(img='pose/OneHand10K/'), +) + +train_dataloader = dict(dataset=train_dataset) +val_dataloader = dict(dataset=test_dataset) +test_dataloader = val_dataloader + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='PipelineSwitchHook', + switch_epoch=280, + switch_pipeline=train_pipeline_stage2) +] + +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'onehand10k/annotations/onehand10k_test.json', + metric='bbox', + format_only=False) +test_evaluator = val_evaluator + +train_cfg = dict(val_interval=1) diff --git a/projects/rtmpose/rtmdet/person/rtmdet_m_640-8xb32_coco-person.py b/projects/rtmpose/rtmdet/person/rtmdet_m_640-8xb32_coco-person.py index 620de8dc8f..87bc3c811d 100644 --- a/projects/rtmpose/rtmdet/person/rtmdet_m_640-8xb32_coco-person.py +++ b/projects/rtmpose/rtmdet/person/rtmdet_m_640-8xb32_coco-person.py @@ -1,20 +1,20 @@ -_base_ = 'mmdet::rtmdet/rtmdet_m_8xb32-300e_coco.py' - -checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-m_8xb256-rsb-a1-600e_in1k-ecb3bbd9.pth' # noqa - -model = dict( - backbone=dict( - init_cfg=dict( - type='Pretrained', prefix='backbone.', checkpoint=checkpoint)), - bbox_head=dict(num_classes=1), - test_cfg=dict( - nms_pre=1000, - min_bbox_size=0, - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.6), - max_per_img=100)) - -train_dataloader = dict(dataset=dict(metainfo=dict(classes=('person', )))) - -val_dataloader = dict(dataset=dict(metainfo=dict(classes=('person', )))) -test_dataloader = val_dataloader +_base_ = 'mmdet::rtmdet/rtmdet_m_8xb32-300e_coco.py' + +checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-m_8xb256-rsb-a1-600e_in1k-ecb3bbd9.pth' # noqa + +model = dict( + backbone=dict( + init_cfg=dict( + type='Pretrained', prefix='backbone.', checkpoint=checkpoint)), + bbox_head=dict(num_classes=1), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100)) + +train_dataloader = dict(dataset=dict(metainfo=dict(classes=('person', )))) + +val_dataloader = dict(dataset=dict(metainfo=dict(classes=('person', )))) +test_dataloader = val_dataloader diff --git a/projects/rtmpose/rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py b/projects/rtmpose/rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py index c2f1b64e4a..a681da0971 100644 --- a/projects/rtmpose/rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py +++ b/projects/rtmpose/rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py @@ -1,104 +1,104 @@ -_base_ = 'mmdet::rtmdet/rtmdet_l_8xb32-300e_coco.py' - -input_shape = 320 - -model = dict( - backbone=dict( - deepen_factor=0.33, - widen_factor=0.25, - use_depthwise=True, - ), - neck=dict( - in_channels=[64, 128, 256], - out_channels=64, - num_csp_blocks=1, - use_depthwise=True, - ), - bbox_head=dict( - in_channels=64, - feat_channels=64, - share_conv=False, - exp_on_reg=False, - use_depthwise=True, - num_classes=1), - test_cfg=dict( - nms_pre=1000, - min_bbox_size=0, - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.6), - max_per_img=100)) - -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='CachedMosaic', - img_scale=(input_shape, input_shape), - pad_val=114.0, - max_cached_images=20, - random_pop=False), - dict( - type='RandomResize', - scale=(input_shape * 2, input_shape * 2), - ratio_range=(0.5, 1.5), - keep_ratio=True), - dict(type='RandomCrop', crop_size=(input_shape, input_shape)), - dict(type='YOLOXHSVRandomAug'), - dict(type='RandomFlip', prob=0.5), - dict( - type='Pad', - size=(input_shape, input_shape), - pad_val=dict(img=(114, 114, 114))), - dict(type='PackDetInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='RandomResize', - scale=(input_shape, input_shape), - ratio_range=(0.5, 1.5), - keep_ratio=True), - dict(type='RandomCrop', crop_size=(input_shape, input_shape)), - dict(type='YOLOXHSVRandomAug'), - dict(type='RandomFlip', prob=0.5), - dict( - type='Pad', - size=(input_shape, input_shape), - pad_val=dict(img=(114, 114, 114))), - dict(type='PackDetInputs') -] - -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='Resize', scale=(input_shape, input_shape), keep_ratio=True), - dict( - type='Pad', - size=(input_shape, input_shape), - pad_val=dict(img=(114, 114, 114))), - dict( - type='PackDetInputs', - meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', - 'scale_factor')) -] - -train_dataloader = dict( - dataset=dict(pipeline=train_pipeline, metainfo=dict(classes=('person', )))) - -val_dataloader = dict( - dataset=dict(pipeline=test_pipeline, metainfo=dict(classes=('person', )))) -test_dataloader = val_dataloader - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='PipelineSwitchHook', - switch_epoch=280, - switch_pipeline=train_pipeline_stage2) -] +_base_ = 'mmdet::rtmdet/rtmdet_l_8xb32-300e_coco.py' + +input_shape = 320 + +model = dict( + backbone=dict( + deepen_factor=0.33, + widen_factor=0.25, + use_depthwise=True, + ), + neck=dict( + in_channels=[64, 128, 256], + out_channels=64, + num_csp_blocks=1, + use_depthwise=True, + ), + bbox_head=dict( + in_channels=64, + feat_channels=64, + share_conv=False, + exp_on_reg=False, + use_depthwise=True, + num_classes=1), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100)) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='CachedMosaic', + img_scale=(input_shape, input_shape), + pad_val=114.0, + max_cached_images=20, + random_pop=False), + dict( + type='RandomResize', + scale=(input_shape * 2, input_shape * 2), + ratio_range=(0.5, 1.5), + keep_ratio=True), + dict(type='RandomCrop', crop_size=(input_shape, input_shape)), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip', prob=0.5), + dict( + type='Pad', + size=(input_shape, input_shape), + pad_val=dict(img=(114, 114, 114))), + dict(type='PackDetInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='RandomResize', + scale=(input_shape, input_shape), + ratio_range=(0.5, 1.5), + keep_ratio=True), + dict(type='RandomCrop', crop_size=(input_shape, input_shape)), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip', prob=0.5), + dict( + type='Pad', + size=(input_shape, input_shape), + pad_val=dict(img=(114, 114, 114))), + dict(type='PackDetInputs') +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=(input_shape, input_shape), keep_ratio=True), + dict( + type='Pad', + size=(input_shape, input_shape), + pad_val=dict(img=(114, 114, 114))), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] + +train_dataloader = dict( + dataset=dict(pipeline=train_pipeline, metainfo=dict(classes=('person', )))) + +val_dataloader = dict( + dataset=dict(pipeline=test_pipeline, metainfo=dict(classes=('person', )))) +test_dataloader = val_dataloader + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='PipelineSwitchHook', + switch_epoch=280, + switch_pipeline=train_pipeline_stage2) +] diff --git a/projects/rtmpose/rtmpose/animal_2d_keypoint/rtmpose-m_8xb64-210e_ap10k-256x256.py b/projects/rtmpose/rtmpose/animal_2d_keypoint/rtmpose-m_8xb64-210e_ap10k-256x256.py index d25fd13e70..c2d1b32d1d 100644 --- a/projects/rtmpose/rtmpose/animal_2d_keypoint/rtmpose-m_8xb64-210e_ap10k-256x256.py +++ b/projects/rtmpose/rtmpose/animal_2d_keypoint/rtmpose-m_8xb64-210e_ap10k-256x256.py @@ -1,246 +1,246 @@ -_base_ = ['mmpose::_base_/default_runtime.py'] - -# common setting -num_keypoints = 17 -input_size = (256, 256) - -# runtime -max_epochs = 210 -stage2_num_epochs = 30 -base_lr = 4e-3 -train_batch_size = 64 -val_batch_size = 32 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - clip_grad=dict(max_norm=35, norm_type=2), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=input_size, - sigma=(5.66, 5.66), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=0.67, - widen_factor=0.75, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmposev1/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=768, - out_channels=num_keypoints, - input_size=codec['input_size'], - in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True, )) - -# base dataset settings -dataset_type = 'AP10KDataset' -data_mode = 'topdown' -data_root = 'data/ap10k/' - -backend_args = dict(backend='local') - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.0), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=train_batch_size, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/ap10k-train-split1.json', - data_prefix=dict(img='data/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=val_batch_size, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/ap10k-val-split1.json', - data_prefix=dict(img='data/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = dict( - batch_size=val_batch_size, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/ap10k-test-split1.json', - data_prefix=dict(img='data/'), - test_mode=True, - pipeline=val_pipeline, - )) - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/ap10k-val-split1.json') -test_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/ap10k-test-split1.json') +_base_ = ['mmpose::_base_/default_runtime.py'] + +# common setting +num_keypoints = 17 +input_size = (256, 256) + +# runtime +max_epochs = 210 +stage2_num_epochs = 30 +base_lr = 4e-3 +train_batch_size = 64 +val_batch_size = 32 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + clip_grad=dict(max_norm=35, norm_type=2), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=input_size, + sigma=(5.66, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=768, + out_channels=num_keypoints, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'AP10KDataset' +data_mode = 'topdown' +data_root = 'data/ap10k/' + +backend_args = dict(backend='local') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=train_batch_size, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ap10k-train-split1.json', + data_prefix=dict(img='data/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=val_batch_size, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ap10k-val-split1.json', + data_prefix=dict(img='data/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = dict( + batch_size=val_batch_size, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/ap10k-test-split1.json', + data_prefix=dict(img='data/'), + test_mode=True, + pipeline=val_pipeline, + )) + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/ap10k-val-split1.json') +test_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/ap10k-test-split1.json') diff --git a/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-l_8xb256-420e_coco-256x192.py b/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-l_8xb256-420e_coco-256x192.py index c472cac1fb..e0fcd9791c 100644 --- a/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-l_8xb256-420e_coco-256x192.py +++ b/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-l_8xb256-420e_coco-256x192.py @@ -1,238 +1,238 @@ -_base_ = ['mmpose::_base_/default_runtime.py'] - -# common setting -num_keypoints = 17 -input_size = (192, 256) - -# runtime -max_epochs = 420 -stage2_num_epochs = 30 -base_lr = 4e-3 -train_batch_size = 256 -val_batch_size = 64 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - clip_grad=dict(max_norm=35, norm_type=2), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=1024) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=input_size, - sigma=(4.9, 5.66), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=1., - widen_factor=1., - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmposev1/cspnext-l_udp-aic-coco_210e-256x192-273b7631_20230130.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=1024, - out_channels=num_keypoints, - input_size=codec['input_size'], - in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True)) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -backend_args = dict(backend='local') -# backend_args = dict( -# backend='petrel', -# path_mapping=dict({ -# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', -# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' -# })) - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=train_batch_size, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=val_batch_size, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - # bbox_file=f'{data_root}person_detection_results/' - # 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['mmpose::_base_/default_runtime.py'] + +# common setting +num_keypoints = 17 +input_size = (192, 256) + +# runtime +max_epochs = 420 +stage2_num_epochs = 30 +base_lr = 4e-3 +train_batch_size = 256 +val_batch_size = 64 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + clip_grad=dict(max_norm=35, norm_type=2), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=input_size, + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=1., + widen_factor=1., + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-l_udp-aic-coco_210e-256x192-273b7631_20230130.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=1024, + out_channels=num_keypoints, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=train_batch_size, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=val_batch_size, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + # bbox_file=f'{data_root}person_detection_results/' + # 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-l_8xb256-420e_coco-384x288.py b/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-l_8xb256-420e_coco-384x288.py index 47697078d5..aaa3268dcc 100644 --- a/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-l_8xb256-420e_coco-384x288.py +++ b/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-l_8xb256-420e_coco-384x288.py @@ -1,238 +1,238 @@ -_base_ = ['mmpose::_base_/default_runtime.py'] - -# common setting -num_keypoints = 17 -input_size = (288, 384) - -# runtime -max_epochs = 420 -stage2_num_epochs = 30 -base_lr = 4e-3 -train_batch_size = 256 -val_batch_size = 64 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - clip_grad=dict(max_norm=35, norm_type=2), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=1024) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=input_size, - sigma=(6., 6.93), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=1., - widen_factor=1., - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmposev1/cspnext-l_udp-aic-coco_210e-256x192-273b7631_20230130.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=1024, - out_channels=num_keypoints, - input_size=codec['input_size'], - in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True)) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -backend_args = dict(backend='local') -# backend_args = dict( -# backend='petrel', -# path_mapping=dict({ -# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', -# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' -# })) - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=train_batch_size, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=val_batch_size, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - # bbox_file=f'{data_root}person_detection_results/' - # 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['mmpose::_base_/default_runtime.py'] + +# common setting +num_keypoints = 17 +input_size = (288, 384) + +# runtime +max_epochs = 420 +stage2_num_epochs = 30 +base_lr = 4e-3 +train_batch_size = 256 +val_batch_size = 64 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + clip_grad=dict(max_norm=35, norm_type=2), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=input_size, + sigma=(6., 6.93), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=1., + widen_factor=1., + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-l_udp-aic-coco_210e-256x192-273b7631_20230130.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=1024, + out_channels=num_keypoints, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=train_batch_size, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=val_batch_size, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + # bbox_file=f'{data_root}person_detection_results/' + # 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-l_8xb512-700e_body8-halpe26-256x192.py b/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-l_8xb512-700e_body8-halpe26-256x192.py index fe19d45af9..4729eb292d 100644 --- a/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-l_8xb512-700e_body8-halpe26-256x192.py +++ b/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-l_8xb512-700e_body8-halpe26-256x192.py @@ -1,535 +1,535 @@ -_base_ = ['mmpose::_base_/default_runtime.py'] - -# common setting -num_keypoints = 26 -input_size = (192, 256) - -# runtime -max_epochs = 700 -stage2_num_epochs = 30 -base_lr = 4e-3 -train_batch_size = 512 -val_batch_size = 64 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - clip_grad=dict(max_norm=35, norm_type=2), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=1024) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=input_size, - sigma=(4.9, 5.66), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=1., - widen_factor=1., - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmposev1/rtmpose-l_simcc-body7_pt-body7_420e-256x192-4dba18fc_20230504.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=1024, - out_channels=num_keypoints, - input_size=input_size, - in_featuremap_size=tuple([s // 32 for s in input_size]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True)) - -# base dataset settings -dataset_type = 'CocoWholeBodyDataset' -data_mode = 'topdown' -data_root = 'data/' - -backend_args = dict(backend='local') - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=90), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PhotometricDistortion'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.0), - ]), - dict( - type='GenerateTarget', - encoder=codec, - use_dataset_keypoint_weights=True), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.5, 1.5], - rotate_factor=90), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict( - type='GenerateTarget', - encoder=codec, - use_dataset_keypoint_weights=True), - dict(type='PackPoseInputs') -] - -# mapping -coco_halpe26 = [(i, i) for i in range(17)] + [(17, 20), (18, 22), (19, 24), - (20, 21), (21, 23), (22, 25)] - -aic_halpe26 = [(0, 6), (1, 8), (2, 10), (3, 5), (4, 7), - (5, 9), (6, 12), (7, 14), (8, 16), (9, 11), (10, 13), (11, 15), - (12, 17), (13, 18)] - -crowdpose_halpe26 = [(0, 5), (1, 6), (2, 7), (3, 8), (4, 9), (5, 10), (6, 11), - (7, 12), (8, 13), (9, 14), (10, 15), (11, 16), (12, 17), - (13, 18)] - -mpii_halpe26 = [ - (0, 16), - (1, 14), - (2, 12), - (3, 11), - (4, 13), - (5, 15), - (8, 18), - (9, 17), - (10, 10), - (11, 8), - (12, 6), - (13, 5), - (14, 7), - (15, 9), -] - -jhmdb_halpe26 = [ - (0, 18), - (2, 17), - (3, 6), - (4, 5), - (5, 12), - (6, 11), - (7, 8), - (8, 7), - (9, 14), - (10, 13), - (11, 10), - (12, 9), - (13, 16), - (14, 15), -] - -halpe_halpe26 = [(i, i) for i in range(26)] - -ochuman_halpe26 = [(i, i) for i in range(17)] - -posetrack_halpe26 = [ - (0, 0), - (2, 17), - (3, 3), - (4, 4), - (5, 5), - (6, 6), - (7, 7), - (8, 8), - (9, 9), - (10, 10), - (11, 11), - (12, 12), - (13, 13), - (14, 14), - (15, 15), - (16, 16), -] - -# train datasets -dataset_coco = dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='detection/coco/train2017/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=coco_halpe26) - ], -) - -dataset_aic = dict( - type='AicDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='aic/annotations/aic_train.json', - data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' - '_train_20170902/keypoint_train_images_20170902/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=aic_halpe26) - ], -) - -dataset_crowdpose = dict( - type='CrowdPoseDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', - data_prefix=dict(img='pose/CrowdPose/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=crowdpose_halpe26) - ], -) - -dataset_mpii = dict( - type='MpiiDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='mpii/annotations/mpii_train.json', - data_prefix=dict(img='pose/MPI/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=mpii_halpe26) - ], -) - -dataset_jhmdb = dict( - type='JhmdbDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='jhmdb/annotations/Sub1_train.json', - data_prefix=dict(img='pose/JHMDB/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=jhmdb_halpe26) - ], -) - -dataset_halpe = dict( - type='HalpeDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='halpe/annotations/halpe_train_v1.json', - data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=halpe_halpe26) - ], -) - -dataset_posetrack = dict( - type='PoseTrack18Dataset', - data_root=data_root, - data_mode=data_mode, - ann_file='posetrack18/annotations/posetrack18_train.json', - data_prefix=dict(img='pose/PoseChallenge2018/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=posetrack_halpe26) - ], -) - -# data loaders -train_dataloader = dict( - batch_size=train_batch_size, - num_workers=5, - pin_memory=True, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type='CombinedDataset', - metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), - datasets=[ - dataset_coco, - dataset_aic, - dataset_crowdpose, - dataset_mpii, - dataset_jhmdb, - dataset_halpe, - dataset_posetrack, - ], - pipeline=train_pipeline, - test_mode=False, - )) - -# val datasets -val_coco = dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='detection/coco/val2017/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=coco_halpe26) - ], -) - -val_aic = dict( - type='AicDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='aic/annotations/aic_val.json', - data_prefix=dict( - img='pose/ai_challenge/ai_challenger_keypoint' - '_validation_20170911/keypoint_validation_images_20170911/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=aic_halpe26) - ], -) - -val_crowdpose = dict( - type='CrowdPoseDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', - data_prefix=dict(img='pose/CrowdPose/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=crowdpose_halpe26) - ], -) - -val_mpii = dict( - type='MpiiDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='mpii/annotations/mpii_val.json', - data_prefix=dict(img='pose/MPI/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=mpii_halpe26) - ], -) - -val_jhmdb = dict( - type='JhmdbDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='jhmdb/annotations/Sub1_test.json', - data_prefix=dict(img='pose/JHMDB/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=jhmdb_halpe26) - ], -) - -val_halpe = dict( - type='HalpeDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='halpe/annotations/halpe_val_v1.json', - data_prefix=dict(img='detection/coco/val2017/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=halpe_halpe26) - ], -) - -val_ochuman = dict( - type='OCHumanDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='ochuman/annotations/' - 'ochuman_coco_format_val_range_0.00_1.00.json', - data_prefix=dict(img='pose/OCHuman/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=ochuman_halpe26) - ], -) - -val_posetrack = dict( - type='PoseTrack18Dataset', - data_root=data_root, - data_mode=data_mode, - ann_file='posetrack18/annotations/posetrack18_val.json', - data_prefix=dict(img='pose/PoseChallenge2018/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=posetrack_halpe26) - ], -) - -val_dataloader = dict( - batch_size=val_batch_size, - num_workers=5, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type='CombinedDataset', - metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), - datasets=[ - val_coco, - val_aic, - val_crowdpose, - val_mpii, - val_jhmdb, - val_halpe, - val_ochuman, - val_posetrack, - ], - pipeline=val_pipeline, - test_mode=True, - )) - -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -test_evaluator = [dict(type='PCKAccuracy', thr=0.1), dict(type='AUC')] -val_evaluator = test_evaluator +_base_ = ['mmpose::_base_/default_runtime.py'] + +# common setting +num_keypoints = 26 +input_size = (192, 256) + +# runtime +max_epochs = 700 +stage2_num_epochs = 30 +base_lr = 4e-3 +train_batch_size = 512 +val_batch_size = 64 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + clip_grad=dict(max_norm=35, norm_type=2), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=input_size, + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=1., + widen_factor=1., + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/rtmpose-l_simcc-body7_pt-body7_420e-256x192-4dba18fc_20230504.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=1024, + out_channels=num_keypoints, + input_size=input_size, + in_featuremap_size=tuple([s // 32 for s in input_size]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PhotometricDistortion'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict( + type='GenerateTarget', + encoder=codec, + use_dataset_keypoint_weights=True), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.5, 1.5], + rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict( + type='GenerateTarget', + encoder=codec, + use_dataset_keypoint_weights=True), + dict(type='PackPoseInputs') +] + +# mapping +coco_halpe26 = [(i, i) for i in range(17)] + [(17, 20), (18, 22), (19, 24), + (20, 21), (21, 23), (22, 25)] + +aic_halpe26 = [(0, 6), (1, 8), (2, 10), (3, 5), (4, 7), + (5, 9), (6, 12), (7, 14), (8, 16), (9, 11), (10, 13), (11, 15), + (12, 17), (13, 18)] + +crowdpose_halpe26 = [(0, 5), (1, 6), (2, 7), (3, 8), (4, 9), (5, 10), (6, 11), + (7, 12), (8, 13), (9, 14), (10, 15), (11, 16), (12, 17), + (13, 18)] + +mpii_halpe26 = [ + (0, 16), + (1, 14), + (2, 12), + (3, 11), + (4, 13), + (5, 15), + (8, 18), + (9, 17), + (10, 10), + (11, 8), + (12, 6), + (13, 5), + (14, 7), + (15, 9), +] + +jhmdb_halpe26 = [ + (0, 18), + (2, 17), + (3, 6), + (4, 5), + (5, 12), + (6, 11), + (7, 8), + (8, 7), + (9, 14), + (10, 13), + (11, 10), + (12, 9), + (13, 16), + (14, 15), +] + +halpe_halpe26 = [(i, i) for i in range(26)] + +ochuman_halpe26 = [(i, i) for i in range(17)] + +posetrack_halpe26 = [ + (0, 0), + (2, 17), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +# train datasets +dataset_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=coco_halpe26) + ], +) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=aic_halpe26) + ], +) + +dataset_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=crowdpose_halpe26) + ], +) + +dataset_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_train.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=mpii_halpe26) + ], +) + +dataset_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_train.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=jhmdb_halpe26) + ], +) + +dataset_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_train_v1.json', + data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=halpe_halpe26) + ], +) + +dataset_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_train.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=posetrack_halpe26) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=train_batch_size, + num_workers=5, + pin_memory=True, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), + datasets=[ + dataset_coco, + dataset_aic, + dataset_crowdpose, + dataset_mpii, + dataset_jhmdb, + dataset_halpe, + dataset_posetrack, + ], + pipeline=train_pipeline, + test_mode=False, + )) + +# val datasets +val_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=coco_halpe26) + ], +) + +val_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_val.json', + data_prefix=dict( + img='pose/ai_challenge/ai_challenger_keypoint' + '_validation_20170911/keypoint_validation_images_20170911/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=aic_halpe26) + ], +) + +val_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=crowdpose_halpe26) + ], +) + +val_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_val.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=mpii_halpe26) + ], +) + +val_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_test.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=jhmdb_halpe26) + ], +) + +val_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_val_v1.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=halpe_halpe26) + ], +) + +val_ochuman = dict( + type='OCHumanDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='ochuman/annotations/' + 'ochuman_coco_format_val_range_0.00_1.00.json', + data_prefix=dict(img='pose/OCHuman/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=ochuman_halpe26) + ], +) + +val_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_val.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=posetrack_halpe26) + ], +) + +val_dataloader = dict( + batch_size=val_batch_size, + num_workers=5, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), + datasets=[ + val_coco, + val_aic, + val_crowdpose, + val_mpii, + val_jhmdb, + val_halpe, + val_ochuman, + val_posetrack, + ], + pipeline=val_pipeline, + test_mode=True, + )) + +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +test_evaluator = [dict(type='PCKAccuracy', thr=0.1), dict(type='AUC')] +val_evaluator = test_evaluator diff --git a/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-l_8xb512-700e_body8-halpe26-384x288.py b/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-l_8xb512-700e_body8-halpe26-384x288.py index bec4fcb924..c7a2e3c8b9 100644 --- a/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-l_8xb512-700e_body8-halpe26-384x288.py +++ b/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-l_8xb512-700e_body8-halpe26-384x288.py @@ -1,535 +1,535 @@ -_base_ = ['mmpose::_base_/default_runtime.py'] - -# common setting -num_keypoints = 26 -input_size = (288, 384) - -# runtime -max_epochs = 700 -stage2_num_epochs = 30 -base_lr = 4e-3 -train_batch_size = 512 -val_batch_size = 64 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - clip_grad=dict(max_norm=35, norm_type=2), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=1024) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=input_size, - sigma=(6., 6.93), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=1., - widen_factor=1., - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmposev1/rtmpose-l_simcc-body7_pt-body7_420e-384x288-3f5a1437_20230504.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=1024, - out_channels=num_keypoints, - input_size=input_size, - in_featuremap_size=tuple([s // 32 for s in input_size]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True)) - -# base dataset settings -dataset_type = 'CocoWholeBodyDataset' -data_mode = 'topdown' -data_root = 'data/' - -backend_args = dict(backend='local') - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=90), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PhotometricDistortion'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.0), - ]), - dict( - type='GenerateTarget', - encoder=codec, - use_dataset_keypoint_weights=True), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.5, 1.5], - rotate_factor=90), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict( - type='GenerateTarget', - encoder=codec, - use_dataset_keypoint_weights=True), - dict(type='PackPoseInputs') -] - -# mapping -coco_halpe26 = [(i, i) for i in range(17)] + [(17, 20), (18, 22), (19, 24), - (20, 21), (21, 23), (22, 25)] - -aic_halpe26 = [(0, 6), (1, 8), (2, 10), (3, 5), (4, 7), - (5, 9), (6, 12), (7, 14), (8, 16), (9, 11), (10, 13), (11, 15), - (12, 17), (13, 18)] - -crowdpose_halpe26 = [(0, 5), (1, 6), (2, 7), (3, 8), (4, 9), (5, 10), (6, 11), - (7, 12), (8, 13), (9, 14), (10, 15), (11, 16), (12, 17), - (13, 18)] - -mpii_halpe26 = [ - (0, 16), - (1, 14), - (2, 12), - (3, 11), - (4, 13), - (5, 15), - (8, 18), - (9, 17), - (10, 10), - (11, 8), - (12, 6), - (13, 5), - (14, 7), - (15, 9), -] - -jhmdb_halpe26 = [ - (0, 18), - (2, 17), - (3, 6), - (4, 5), - (5, 12), - (6, 11), - (7, 8), - (8, 7), - (9, 14), - (10, 13), - (11, 10), - (12, 9), - (13, 16), - (14, 15), -] - -halpe_halpe26 = [(i, i) for i in range(26)] - -ochuman_halpe26 = [(i, i) for i in range(17)] - -posetrack_halpe26 = [ - (0, 0), - (2, 17), - (3, 3), - (4, 4), - (5, 5), - (6, 6), - (7, 7), - (8, 8), - (9, 9), - (10, 10), - (11, 11), - (12, 12), - (13, 13), - (14, 14), - (15, 15), - (16, 16), -] - -# train datasets -dataset_coco = dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='detection/coco/train2017/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=coco_halpe26) - ], -) - -dataset_aic = dict( - type='AicDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='aic/annotations/aic_train.json', - data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' - '_train_20170902/keypoint_train_images_20170902/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=aic_halpe26) - ], -) - -dataset_crowdpose = dict( - type='CrowdPoseDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', - data_prefix=dict(img='pose/CrowdPose/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=crowdpose_halpe26) - ], -) - -dataset_mpii = dict( - type='MpiiDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='mpii/annotations/mpii_train.json', - data_prefix=dict(img='pose/MPI/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=mpii_halpe26) - ], -) - -dataset_jhmdb = dict( - type='JhmdbDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='jhmdb/annotations/Sub1_train.json', - data_prefix=dict(img='pose/JHMDB/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=jhmdb_halpe26) - ], -) - -dataset_halpe = dict( - type='HalpeDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='halpe/annotations/halpe_train_v1.json', - data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=halpe_halpe26) - ], -) - -dataset_posetrack = dict( - type='PoseTrack18Dataset', - data_root=data_root, - data_mode=data_mode, - ann_file='posetrack18/annotations/posetrack18_train.json', - data_prefix=dict(img='pose/PoseChallenge2018/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=posetrack_halpe26) - ], -) - -# data loaders -train_dataloader = dict( - batch_size=train_batch_size, - num_workers=10, - pin_memory=True, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type='CombinedDataset', - metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), - datasets=[ - dataset_coco, - dataset_aic, - dataset_crowdpose, - dataset_mpii, - dataset_jhmdb, - dataset_halpe, - dataset_posetrack, - ], - pipeline=train_pipeline, - test_mode=False, - )) - -# val datasets -val_coco = dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='detection/coco/val2017/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=coco_halpe26) - ], -) - -val_aic = dict( - type='AicDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='aic/annotations/aic_val.json', - data_prefix=dict( - img='pose/ai_challenge/ai_challenger_keypoint' - '_validation_20170911/keypoint_validation_images_20170911/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=aic_halpe26) - ], -) - -val_crowdpose = dict( - type='CrowdPoseDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', - data_prefix=dict(img='pose/CrowdPose/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=crowdpose_halpe26) - ], -) - -val_mpii = dict( - type='MpiiDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='mpii/annotations/mpii_val.json', - data_prefix=dict(img='pose/MPI/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=mpii_halpe26) - ], -) - -val_jhmdb = dict( - type='JhmdbDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='jhmdb/annotations/Sub1_test.json', - data_prefix=dict(img='pose/JHMDB/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=jhmdb_halpe26) - ], -) - -val_halpe = dict( - type='HalpeDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='halpe/annotations/halpe_val_v1.json', - data_prefix=dict(img='detection/coco/val2017/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=halpe_halpe26) - ], -) - -val_ochuman = dict( - type='OCHumanDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='ochuman/annotations/' - 'ochuman_coco_format_val_range_0.00_1.00.json', - data_prefix=dict(img='pose/OCHuman/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=ochuman_halpe26) - ], -) - -val_posetrack = dict( - type='PoseTrack18Dataset', - data_root=data_root, - data_mode=data_mode, - ann_file='posetrack18/annotations/posetrack18_val.json', - data_prefix=dict(img='pose/PoseChallenge2018/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=posetrack_halpe26) - ], -) - -val_dataloader = dict( - batch_size=val_batch_size, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type='CombinedDataset', - metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), - datasets=[ - val_coco, - val_aic, - val_crowdpose, - val_mpii, - val_jhmdb, - val_halpe, - val_ochuman, - val_posetrack, - ], - pipeline=val_pipeline, - test_mode=True, - )) - -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -test_evaluator = [dict(type='PCKAccuracy', thr=0.1), dict(type='AUC')] -val_evaluator = test_evaluator +_base_ = ['mmpose::_base_/default_runtime.py'] + +# common setting +num_keypoints = 26 +input_size = (288, 384) + +# runtime +max_epochs = 700 +stage2_num_epochs = 30 +base_lr = 4e-3 +train_batch_size = 512 +val_batch_size = 64 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + clip_grad=dict(max_norm=35, norm_type=2), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=input_size, + sigma=(6., 6.93), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=1., + widen_factor=1., + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/rtmpose-l_simcc-body7_pt-body7_420e-384x288-3f5a1437_20230504.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=1024, + out_channels=num_keypoints, + input_size=input_size, + in_featuremap_size=tuple([s // 32 for s in input_size]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PhotometricDistortion'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict( + type='GenerateTarget', + encoder=codec, + use_dataset_keypoint_weights=True), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.5, 1.5], + rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict( + type='GenerateTarget', + encoder=codec, + use_dataset_keypoint_weights=True), + dict(type='PackPoseInputs') +] + +# mapping +coco_halpe26 = [(i, i) for i in range(17)] + [(17, 20), (18, 22), (19, 24), + (20, 21), (21, 23), (22, 25)] + +aic_halpe26 = [(0, 6), (1, 8), (2, 10), (3, 5), (4, 7), + (5, 9), (6, 12), (7, 14), (8, 16), (9, 11), (10, 13), (11, 15), + (12, 17), (13, 18)] + +crowdpose_halpe26 = [(0, 5), (1, 6), (2, 7), (3, 8), (4, 9), (5, 10), (6, 11), + (7, 12), (8, 13), (9, 14), (10, 15), (11, 16), (12, 17), + (13, 18)] + +mpii_halpe26 = [ + (0, 16), + (1, 14), + (2, 12), + (3, 11), + (4, 13), + (5, 15), + (8, 18), + (9, 17), + (10, 10), + (11, 8), + (12, 6), + (13, 5), + (14, 7), + (15, 9), +] + +jhmdb_halpe26 = [ + (0, 18), + (2, 17), + (3, 6), + (4, 5), + (5, 12), + (6, 11), + (7, 8), + (8, 7), + (9, 14), + (10, 13), + (11, 10), + (12, 9), + (13, 16), + (14, 15), +] + +halpe_halpe26 = [(i, i) for i in range(26)] + +ochuman_halpe26 = [(i, i) for i in range(17)] + +posetrack_halpe26 = [ + (0, 0), + (2, 17), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +# train datasets +dataset_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=coco_halpe26) + ], +) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=aic_halpe26) + ], +) + +dataset_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=crowdpose_halpe26) + ], +) + +dataset_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_train.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=mpii_halpe26) + ], +) + +dataset_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_train.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=jhmdb_halpe26) + ], +) + +dataset_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_train_v1.json', + data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=halpe_halpe26) + ], +) + +dataset_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_train.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=posetrack_halpe26) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=train_batch_size, + num_workers=10, + pin_memory=True, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), + datasets=[ + dataset_coco, + dataset_aic, + dataset_crowdpose, + dataset_mpii, + dataset_jhmdb, + dataset_halpe, + dataset_posetrack, + ], + pipeline=train_pipeline, + test_mode=False, + )) + +# val datasets +val_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=coco_halpe26) + ], +) + +val_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_val.json', + data_prefix=dict( + img='pose/ai_challenge/ai_challenger_keypoint' + '_validation_20170911/keypoint_validation_images_20170911/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=aic_halpe26) + ], +) + +val_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=crowdpose_halpe26) + ], +) + +val_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_val.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=mpii_halpe26) + ], +) + +val_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_test.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=jhmdb_halpe26) + ], +) + +val_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_val_v1.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=halpe_halpe26) + ], +) + +val_ochuman = dict( + type='OCHumanDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='ochuman/annotations/' + 'ochuman_coco_format_val_range_0.00_1.00.json', + data_prefix=dict(img='pose/OCHuman/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=ochuman_halpe26) + ], +) + +val_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_val.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=posetrack_halpe26) + ], +) + +val_dataloader = dict( + batch_size=val_batch_size, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), + datasets=[ + val_coco, + val_aic, + val_crowdpose, + val_mpii, + val_jhmdb, + val_halpe, + val_ochuman, + val_posetrack, + ], + pipeline=val_pipeline, + test_mode=True, + )) + +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +test_evaluator = [dict(type='PCKAccuracy', thr=0.1), dict(type='AUC')] +val_evaluator = test_evaluator diff --git a/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py b/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py index 97e70667e6..8dfb261133 100644 --- a/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py +++ b/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py @@ -1,232 +1,232 @@ -_base_ = ['mmpose::_base_/default_runtime.py'] - -# common setting -num_keypoints = 17 -input_size = (192, 256) - -# runtime -max_epochs = 420 -stage2_num_epochs = 30 -base_lr = 4e-3 -train_batch_size = 256 -val_batch_size = 64 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - clip_grad=dict(max_norm=35, norm_type=2), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=1024) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=input_size, - sigma=(4.9, 5.66), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=0.67, - widen_factor=0.75, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmposev1/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=768, - out_channels=num_keypoints, - input_size=codec['input_size'], - in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True)) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -backend_args = dict(backend='local') - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=train_batch_size, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=val_batch_size, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - # bbox_file=f'{data_root}person_detection_results/' - # 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['mmpose::_base_/default_runtime.py'] + +# common setting +num_keypoints = 17 +input_size = (192, 256) + +# runtime +max_epochs = 420 +stage2_num_epochs = 30 +base_lr = 4e-3 +train_batch_size = 256 +val_batch_size = 64 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + clip_grad=dict(max_norm=35, norm_type=2), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=input_size, + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=768, + out_channels=num_keypoints, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +backend_args = dict(backend='local') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=train_batch_size, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=val_batch_size, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + # bbox_file=f'{data_root}person_detection_results/' + # 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-384x288.py b/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-384x288.py index 5216cf1b44..0bf9c405a3 100644 --- a/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-384x288.py +++ b/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-384x288.py @@ -1,232 +1,232 @@ -_base_ = ['mmpose::_base_/default_runtime.py'] - -# common setting -num_keypoints = 17 -input_size = (288, 384) - -# runtime -max_epochs = 420 -stage2_num_epochs = 30 -base_lr = 4e-3 -train_batch_size = 256 -val_batch_size = 64 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - clip_grad=dict(max_norm=35, norm_type=2), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=1024) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=input_size, - sigma=(6., 6.93), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=0.67, - widen_factor=0.75, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmposev1/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=768, - out_channels=num_keypoints, - input_size=codec['input_size'], - in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True)) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -backend_args = dict(backend='local') - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=train_batch_size, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=val_batch_size, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - # bbox_file=f'{data_root}person_detection_results/' - # 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['mmpose::_base_/default_runtime.py'] + +# common setting +num_keypoints = 17 +input_size = (288, 384) + +# runtime +max_epochs = 420 +stage2_num_epochs = 30 +base_lr = 4e-3 +train_batch_size = 256 +val_batch_size = 64 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + clip_grad=dict(max_norm=35, norm_type=2), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=input_size, + sigma=(6., 6.93), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=768, + out_channels=num_keypoints, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +backend_args = dict(backend='local') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=train_batch_size, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=val_batch_size, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + # bbox_file=f'{data_root}person_detection_results/' + # 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-m_8xb512-700e_body8-halpe26-256x192.py b/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-m_8xb512-700e_body8-halpe26-256x192.py index 6391044c87..73e04127ca 100644 --- a/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-m_8xb512-700e_body8-halpe26-256x192.py +++ b/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-m_8xb512-700e_body8-halpe26-256x192.py @@ -1,529 +1,529 @@ -_base_ = ['mmpose::_base_/default_runtime.py'] - -# common setting -num_keypoints = 26 -input_size = (192, 256) - -# runtime -max_epochs = 700 -stage2_num_epochs = 30 -base_lr = 4e-3 -train_batch_size = 512 -val_batch_size = 64 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - clip_grad=dict(max_norm=35, norm_type=2), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=1024) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=input_size, - sigma=(4.9, 5.66), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=0.67, - widen_factor=0.75, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmposev1/rtmpose-m_simcc-body7_pt-body7_420e-256x192-e48f03d0_20230504.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=768, - out_channels=num_keypoints, - input_size=input_size, - in_featuremap_size=tuple([s // 32 for s in input_size]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True)) - -# base dataset settings -dataset_type = 'CocoWholeBodyDataset' -data_mode = 'topdown' -data_root = 'data/' - -backend_args = dict(backend='local') - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=90), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PhotometricDistortion'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.0), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.5, 1.5], - rotate_factor=90), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# mapping -coco_halpe26 = [(i, i) for i in range(17)] + [(17, 20), (18, 22), (19, 24), - (20, 21), (21, 23), (22, 25)] - -aic_halpe26 = [(0, 6), (1, 8), (2, 10), (3, 5), (4, 7), - (5, 9), (6, 12), (7, 14), (8, 16), (9, 11), (10, 13), (11, 15), - (12, 17), (13, 18)] - -crowdpose_halpe26 = [(0, 5), (1, 6), (2, 7), (3, 8), (4, 9), (5, 10), (6, 11), - (7, 12), (8, 13), (9, 14), (10, 15), (11, 16), (12, 17), - (13, 18)] - -mpii_halpe26 = [ - (0, 16), - (1, 14), - (2, 12), - (3, 11), - (4, 13), - (5, 15), - (8, 18), - (9, 17), - (10, 10), - (11, 8), - (12, 6), - (13, 5), - (14, 7), - (15, 9), -] - -jhmdb_halpe26 = [ - (0, 18), - (2, 17), - (3, 6), - (4, 5), - (5, 12), - (6, 11), - (7, 8), - (8, 7), - (9, 14), - (10, 13), - (11, 10), - (12, 9), - (13, 16), - (14, 15), -] - -halpe_halpe26 = [(i, i) for i in range(26)] - -ochuman_halpe26 = [(i, i) for i in range(17)] - -posetrack_halpe26 = [ - (0, 0), - (2, 17), - (3, 3), - (4, 4), - (5, 5), - (6, 6), - (7, 7), - (8, 8), - (9, 9), - (10, 10), - (11, 11), - (12, 12), - (13, 13), - (14, 14), - (15, 15), - (16, 16), -] - -# train datasets -dataset_coco = dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='detection/coco/train2017/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=coco_halpe26) - ], -) - -dataset_aic = dict( - type='AicDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='aic/annotations/aic_train.json', - data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' - '_train_20170902/keypoint_train_images_20170902/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=aic_halpe26) - ], -) - -dataset_crowdpose = dict( - type='CrowdPoseDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', - data_prefix=dict(img='pose/CrowdPose/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=crowdpose_halpe26) - ], -) - -dataset_mpii = dict( - type='MpiiDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='mpii/annotations/mpii_train.json', - data_prefix=dict(img='pose/MPI/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=mpii_halpe26) - ], -) - -dataset_jhmdb = dict( - type='JhmdbDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='jhmdb/annotations/Sub1_train.json', - data_prefix=dict(img='pose/JHMDB/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=jhmdb_halpe26) - ], -) - -dataset_halpe = dict( - type='HalpeDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='halpe/annotations/halpe_train_v1.json', - data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=halpe_halpe26) - ], -) - -dataset_posetrack = dict( - type='PoseTrack18Dataset', - data_root=data_root, - data_mode=data_mode, - ann_file='posetrack18/annotations/posetrack18_train.json', - data_prefix=dict(img='pose/PoseChallenge2018/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=posetrack_halpe26) - ], -) - -# data loaders -train_dataloader = dict( - batch_size=train_batch_size, - num_workers=10, - pin_memory=True, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type='CombinedDataset', - metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), - datasets=[ - dataset_coco, - dataset_aic, - dataset_crowdpose, - dataset_mpii, - dataset_jhmdb, - dataset_halpe, - dataset_posetrack, - ], - pipeline=train_pipeline, - test_mode=False, - )) - -# val datasets -val_coco = dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='detection/coco/val2017/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=coco_halpe26) - ], -) - -val_aic = dict( - type='AicDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='aic/annotations/aic_val.json', - data_prefix=dict( - img='pose/ai_challenge/ai_challenger_keypoint' - '_validation_20170911/keypoint_validation_images_20170911/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=aic_halpe26) - ], -) - -val_crowdpose = dict( - type='CrowdPoseDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', - data_prefix=dict(img='pose/CrowdPose/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=crowdpose_halpe26) - ], -) - -val_mpii = dict( - type='MpiiDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='mpii/annotations/mpii_val.json', - data_prefix=dict(img='pose/MPI/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=mpii_halpe26) - ], -) - -val_jhmdb = dict( - type='JhmdbDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='jhmdb/annotations/Sub1_test.json', - data_prefix=dict(img='pose/JHMDB/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=jhmdb_halpe26) - ], -) - -val_halpe = dict( - type='HalpeDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='halpe/annotations/halpe_val_v1.json', - data_prefix=dict(img='detection/coco/val2017/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=halpe_halpe26) - ], -) - -val_ochuman = dict( - type='OCHumanDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='ochuman/annotations/' - 'ochuman_coco_format_val_range_0.00_1.00.json', - data_prefix=dict(img='pose/OCHuman/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=ochuman_halpe26) - ], -) - -val_posetrack = dict( - type='PoseTrack18Dataset', - data_root=data_root, - data_mode=data_mode, - ann_file='posetrack18/annotations/posetrack18_val.json', - data_prefix=dict(img='pose/PoseChallenge2018/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=posetrack_halpe26) - ], -) - -val_dataloader = dict( - batch_size=val_batch_size, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type='CombinedDataset', - metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), - datasets=[ - val_coco, - val_aic, - val_crowdpose, - val_mpii, - val_jhmdb, - val_halpe, - val_ochuman, - val_posetrack, - ], - pipeline=val_pipeline, - test_mode=True, - )) - -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -test_evaluator = [dict(type='PCKAccuracy', thr=0.1), dict(type='AUC')] -val_evaluator = test_evaluator +_base_ = ['mmpose::_base_/default_runtime.py'] + +# common setting +num_keypoints = 26 +input_size = (192, 256) + +# runtime +max_epochs = 700 +stage2_num_epochs = 30 +base_lr = 4e-3 +train_batch_size = 512 +val_batch_size = 64 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + clip_grad=dict(max_norm=35, norm_type=2), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=input_size, + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/rtmpose-m_simcc-body7_pt-body7_420e-256x192-e48f03d0_20230504.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=768, + out_channels=num_keypoints, + input_size=input_size, + in_featuremap_size=tuple([s // 32 for s in input_size]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PhotometricDistortion'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.5, 1.5], + rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# mapping +coco_halpe26 = [(i, i) for i in range(17)] + [(17, 20), (18, 22), (19, 24), + (20, 21), (21, 23), (22, 25)] + +aic_halpe26 = [(0, 6), (1, 8), (2, 10), (3, 5), (4, 7), + (5, 9), (6, 12), (7, 14), (8, 16), (9, 11), (10, 13), (11, 15), + (12, 17), (13, 18)] + +crowdpose_halpe26 = [(0, 5), (1, 6), (2, 7), (3, 8), (4, 9), (5, 10), (6, 11), + (7, 12), (8, 13), (9, 14), (10, 15), (11, 16), (12, 17), + (13, 18)] + +mpii_halpe26 = [ + (0, 16), + (1, 14), + (2, 12), + (3, 11), + (4, 13), + (5, 15), + (8, 18), + (9, 17), + (10, 10), + (11, 8), + (12, 6), + (13, 5), + (14, 7), + (15, 9), +] + +jhmdb_halpe26 = [ + (0, 18), + (2, 17), + (3, 6), + (4, 5), + (5, 12), + (6, 11), + (7, 8), + (8, 7), + (9, 14), + (10, 13), + (11, 10), + (12, 9), + (13, 16), + (14, 15), +] + +halpe_halpe26 = [(i, i) for i in range(26)] + +ochuman_halpe26 = [(i, i) for i in range(17)] + +posetrack_halpe26 = [ + (0, 0), + (2, 17), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +# train datasets +dataset_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=coco_halpe26) + ], +) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=aic_halpe26) + ], +) + +dataset_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=crowdpose_halpe26) + ], +) + +dataset_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_train.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=mpii_halpe26) + ], +) + +dataset_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_train.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=jhmdb_halpe26) + ], +) + +dataset_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_train_v1.json', + data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=halpe_halpe26) + ], +) + +dataset_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_train.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=posetrack_halpe26) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=train_batch_size, + num_workers=10, + pin_memory=True, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), + datasets=[ + dataset_coco, + dataset_aic, + dataset_crowdpose, + dataset_mpii, + dataset_jhmdb, + dataset_halpe, + dataset_posetrack, + ], + pipeline=train_pipeline, + test_mode=False, + )) + +# val datasets +val_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=coco_halpe26) + ], +) + +val_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_val.json', + data_prefix=dict( + img='pose/ai_challenge/ai_challenger_keypoint' + '_validation_20170911/keypoint_validation_images_20170911/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=aic_halpe26) + ], +) + +val_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=crowdpose_halpe26) + ], +) + +val_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_val.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=mpii_halpe26) + ], +) + +val_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_test.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=jhmdb_halpe26) + ], +) + +val_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_val_v1.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=halpe_halpe26) + ], +) + +val_ochuman = dict( + type='OCHumanDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='ochuman/annotations/' + 'ochuman_coco_format_val_range_0.00_1.00.json', + data_prefix=dict(img='pose/OCHuman/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=ochuman_halpe26) + ], +) + +val_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_val.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=posetrack_halpe26) + ], +) + +val_dataloader = dict( + batch_size=val_batch_size, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), + datasets=[ + val_coco, + val_aic, + val_crowdpose, + val_mpii, + val_jhmdb, + val_halpe, + val_ochuman, + val_posetrack, + ], + pipeline=val_pipeline, + test_mode=True, + )) + +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +test_evaluator = [dict(type='PCKAccuracy', thr=0.1), dict(type='AUC')] +val_evaluator = test_evaluator diff --git a/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-m_8xb512-700e_body8-halpe26-384x288.py b/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-m_8xb512-700e_body8-halpe26-384x288.py index 2944058bd1..4751b0002b 100644 --- a/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-m_8xb512-700e_body8-halpe26-384x288.py +++ b/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-m_8xb512-700e_body8-halpe26-384x288.py @@ -1,542 +1,542 @@ -_base_ = ['mmpose::_base_/default_runtime.py'] - -# common setting -num_keypoints = 26 -input_size = (288, 384) - -# runtime -max_epochs = 700 -stage2_num_epochs = 30 -base_lr = 4e-3 -train_batch_size = 512 -val_batch_size = 64 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - clip_grad=dict(max_norm=35, norm_type=2), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=1024) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=input_size, - sigma=(6., 6.93), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=0.67, - widen_factor=0.75, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmposev1/rtmpose-m_simcc-body7_pt-body7_420e-384x288-65e718c4_20230504.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=768, - out_channels=num_keypoints, - input_size=input_size, - in_featuremap_size=tuple([s // 32 for s in input_size]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True)) - -# base dataset settings -dataset_type = 'CocoWholeBodyDataset' -data_mode = 'topdown' -data_root = 'data/' - -# backend_args = dict(backend='local') -backend_args = dict( - backend='petrel', - path_mapping=dict({ - f'{data_root}': 's3://openmmlab/datasets/', - f'{data_root}': 's3://openmmlab/datasets/' - })) - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=90), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PhotometricDistortion'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.0), - ]), - dict( - type='GenerateTarget', - encoder=codec, - use_dataset_keypoint_weights=True), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.5, 1.5], - rotate_factor=90), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict( - type='GenerateTarget', - encoder=codec, - use_dataset_keypoint_weights=True), - dict(type='PackPoseInputs') -] - -# mapping -coco_halpe26 = [(i, i) for i in range(17)] + [(17, 20), (18, 22), (19, 24), - (20, 21), (21, 23), (22, 25)] - -aic_halpe26 = [(0, 6), (1, 8), (2, 10), (3, 5), (4, 7), - (5, 9), (6, 12), (7, 14), (8, 16), (9, 11), (10, 13), (11, 15), - (12, 17), (13, 18)] - -crowdpose_halpe26 = [(0, 5), (1, 6), (2, 7), (3, 8), (4, 9), (5, 10), (6, 11), - (7, 12), (8, 13), (9, 14), (10, 15), (11, 16), (12, 17), - (13, 18)] - -mpii_halpe26 = [ - (0, 16), - (1, 14), - (2, 12), - (3, 11), - (4, 13), - (5, 15), - (8, 18), - (9, 17), - (10, 10), - (11, 8), - (12, 6), - (13, 5), - (14, 7), - (15, 9), -] - -jhmdb_halpe26 = [ - (0, 18), - (2, 17), - (3, 6), - (4, 5), - (5, 12), - (6, 11), - (7, 8), - (8, 7), - (9, 14), - (10, 13), - (11, 10), - (12, 9), - (13, 16), - (14, 15), -] - -halpe_halpe26 = [(i, i) for i in range(26)] - -ochuman_halpe26 = [(i, i) for i in range(17)] - -posetrack_halpe26 = [ - (0, 0), - (2, 17), - (3, 3), - (4, 4), - (5, 5), - (6, 6), - (7, 7), - (8, 8), - (9, 9), - (10, 10), - (11, 11), - (12, 12), - (13, 13), - (14, 14), - (15, 15), - (16, 16), -] - -# train datasets -dataset_coco = dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='detection/coco/train2017/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=coco_halpe26) - ], -) - -dataset_aic = dict( - type='AicDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='aic/annotations/aic_train.json', - data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' - '_train_20170902/keypoint_train_images_20170902/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=aic_halpe26) - ], -) - -dataset_crowdpose = dict( - type='CrowdPoseDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', - data_prefix=dict(img='pose/CrowdPose/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=crowdpose_halpe26) - ], -) - -dataset_mpii = dict( - type='MpiiDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='mpii/annotations/mpii_train.json', - data_prefix=dict(img='pose/MPI/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=mpii_halpe26) - ], -) - -dataset_jhmdb = dict( - type='JhmdbDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='jhmdb/annotations/Sub1_train.json', - data_prefix=dict(img='pose/JHMDB/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=jhmdb_halpe26) - ], -) - -dataset_halpe = dict( - type='HalpeDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='halpe/annotations/halpe_train_v1.json', - data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=halpe_halpe26) - ], -) - -dataset_posetrack = dict( - type='PoseTrack18Dataset', - data_root=data_root, - data_mode=data_mode, - ann_file='posetrack18/annotations/posetrack18_train.json', - data_prefix=dict(img='pose/PoseChallenge2018/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=posetrack_halpe26) - ], -) - -# data loaders -train_dataloader = dict( - batch_size=train_batch_size, - num_workers=10, - pin_memory=True, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type='CombinedDataset', - metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), - datasets=[ - dataset_coco, - dataset_aic, - dataset_crowdpose, - dataset_mpii, - dataset_jhmdb, - dataset_halpe, - dataset_posetrack, - ], - pipeline=train_pipeline, - test_mode=False, - )) - -# val datasets -val_coco = dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='detection/coco/val2017/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=coco_halpe26) - ], -) - -val_aic = dict( - type='AicDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='aic/annotations/aic_val.json', - data_prefix=dict( - img='pose/ai_challenge/ai_challenger_keypoint' - '_validation_20170911/keypoint_validation_images_20170911/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=aic_halpe26) - ], -) - -val_crowdpose = dict( - type='CrowdPoseDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', - data_prefix=dict(img='pose/CrowdPose/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=crowdpose_halpe26) - ], -) - -val_mpii = dict( - type='MpiiDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='mpii/annotations/mpii_val.json', - data_prefix=dict(img='pose/MPI/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=mpii_halpe26) - ], -) - -val_jhmdb = dict( - type='JhmdbDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='jhmdb/annotations/Sub1_test.json', - data_prefix=dict(img='pose/JHMDB/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=jhmdb_halpe26) - ], -) - -val_halpe = dict( - type='HalpeDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='halpe/annotations/halpe_val_v1.json', - data_prefix=dict(img='detection/coco/val2017/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=halpe_halpe26) - ], -) - -val_ochuman = dict( - type='OCHumanDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='ochuman/annotations/' - 'ochuman_coco_format_val_range_0.00_1.00.json', - data_prefix=dict(img='pose/OCHuman/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=ochuman_halpe26) - ], -) - -val_posetrack = dict( - type='PoseTrack18Dataset', - data_root=data_root, - data_mode=data_mode, - ann_file='posetrack18/annotations/posetrack18_val.json', - data_prefix=dict(img='pose/PoseChallenge2018/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=posetrack_halpe26) - ], -) - -val_dataloader = dict( - batch_size=val_batch_size, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type='CombinedDataset', - metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), - datasets=[ - val_coco, - val_aic, - val_crowdpose, - val_mpii, - val_jhmdb, - val_halpe, - val_ochuman, - val_posetrack, - ], - pipeline=val_pipeline, - test_mode=True, - )) - -test_dataloader = val_dataloader - -# hooks -# default_hooks = dict( -default_hooks = dict( - checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -test_evaluator = [dict(type='PCKAccuracy', thr=0.1), dict(type='AUC')] -val_evaluator = test_evaluator +_base_ = ['mmpose::_base_/default_runtime.py'] + +# common setting +num_keypoints = 26 +input_size = (288, 384) + +# runtime +max_epochs = 700 +stage2_num_epochs = 30 +base_lr = 4e-3 +train_batch_size = 512 +val_batch_size = 64 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + clip_grad=dict(max_norm=35, norm_type=2), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=input_size, + sigma=(6., 6.93), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/rtmpose-m_simcc-body7_pt-body7_420e-384x288-65e718c4_20230504.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=768, + out_channels=num_keypoints, + input_size=input_size, + in_featuremap_size=tuple([s // 32 for s in input_size]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/' + +# backend_args = dict(backend='local') +backend_args = dict( + backend='petrel', + path_mapping=dict({ + f'{data_root}': 's3://openmmlab/datasets/', + f'{data_root}': 's3://openmmlab/datasets/' + })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PhotometricDistortion'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict( + type='GenerateTarget', + encoder=codec, + use_dataset_keypoint_weights=True), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.5, 1.5], + rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict( + type='GenerateTarget', + encoder=codec, + use_dataset_keypoint_weights=True), + dict(type='PackPoseInputs') +] + +# mapping +coco_halpe26 = [(i, i) for i in range(17)] + [(17, 20), (18, 22), (19, 24), + (20, 21), (21, 23), (22, 25)] + +aic_halpe26 = [(0, 6), (1, 8), (2, 10), (3, 5), (4, 7), + (5, 9), (6, 12), (7, 14), (8, 16), (9, 11), (10, 13), (11, 15), + (12, 17), (13, 18)] + +crowdpose_halpe26 = [(0, 5), (1, 6), (2, 7), (3, 8), (4, 9), (5, 10), (6, 11), + (7, 12), (8, 13), (9, 14), (10, 15), (11, 16), (12, 17), + (13, 18)] + +mpii_halpe26 = [ + (0, 16), + (1, 14), + (2, 12), + (3, 11), + (4, 13), + (5, 15), + (8, 18), + (9, 17), + (10, 10), + (11, 8), + (12, 6), + (13, 5), + (14, 7), + (15, 9), +] + +jhmdb_halpe26 = [ + (0, 18), + (2, 17), + (3, 6), + (4, 5), + (5, 12), + (6, 11), + (7, 8), + (8, 7), + (9, 14), + (10, 13), + (11, 10), + (12, 9), + (13, 16), + (14, 15), +] + +halpe_halpe26 = [(i, i) for i in range(26)] + +ochuman_halpe26 = [(i, i) for i in range(17)] + +posetrack_halpe26 = [ + (0, 0), + (2, 17), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +# train datasets +dataset_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=coco_halpe26) + ], +) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=aic_halpe26) + ], +) + +dataset_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=crowdpose_halpe26) + ], +) + +dataset_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_train.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=mpii_halpe26) + ], +) + +dataset_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_train.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=jhmdb_halpe26) + ], +) + +dataset_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_train_v1.json', + data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=halpe_halpe26) + ], +) + +dataset_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_train.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=posetrack_halpe26) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=train_batch_size, + num_workers=10, + pin_memory=True, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), + datasets=[ + dataset_coco, + dataset_aic, + dataset_crowdpose, + dataset_mpii, + dataset_jhmdb, + dataset_halpe, + dataset_posetrack, + ], + pipeline=train_pipeline, + test_mode=False, + )) + +# val datasets +val_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=coco_halpe26) + ], +) + +val_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_val.json', + data_prefix=dict( + img='pose/ai_challenge/ai_challenger_keypoint' + '_validation_20170911/keypoint_validation_images_20170911/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=aic_halpe26) + ], +) + +val_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=crowdpose_halpe26) + ], +) + +val_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_val.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=mpii_halpe26) + ], +) + +val_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_test.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=jhmdb_halpe26) + ], +) + +val_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_val_v1.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=halpe_halpe26) + ], +) + +val_ochuman = dict( + type='OCHumanDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='ochuman/annotations/' + 'ochuman_coco_format_val_range_0.00_1.00.json', + data_prefix=dict(img='pose/OCHuman/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=ochuman_halpe26) + ], +) + +val_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_val.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=posetrack_halpe26) + ], +) + +val_dataloader = dict( + batch_size=val_batch_size, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), + datasets=[ + val_coco, + val_aic, + val_crowdpose, + val_mpii, + val_jhmdb, + val_halpe, + val_ochuman, + val_posetrack, + ], + pipeline=val_pipeline, + test_mode=True, + )) + +test_dataloader = val_dataloader + +# hooks +# default_hooks = dict( +default_hooks = dict( + checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +test_evaluator = [dict(type='PCKAccuracy', thr=0.1), dict(type='AUC')] +val_evaluator = test_evaluator diff --git a/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-s_8xb1024-700e_body8-halpe26-256x192.py b/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-s_8xb1024-700e_body8-halpe26-256x192.py index 3f7d985079..f313d2545a 100644 --- a/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-s_8xb1024-700e_body8-halpe26-256x192.py +++ b/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-s_8xb1024-700e_body8-halpe26-256x192.py @@ -1,535 +1,535 @@ -_base_ = ['mmpose::_base_/default_runtime.py'] - -# common setting -num_keypoints = 26 -input_size = (192, 256) - -# runtime -max_epochs = 700 -stage2_num_epochs = 30 -base_lr = 4e-3 -train_batch_size = 1024 -val_batch_size = 64 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.0), - clip_grad=dict(max_norm=35, norm_type=2), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=1024) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=input_size, - sigma=(4.9, 5.66), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=0.33, - widen_factor=0.5, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmposev1/rtmpose-s_simcc-body7_pt-body7_420e-256x192-acd4a1ef_20230504.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=512, - out_channels=num_keypoints, - input_size=input_size, - in_featuremap_size=tuple([s // 32 for s in input_size]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True)) - -# base dataset settings -dataset_type = 'CocoWholeBodyDataset' -data_mode = 'topdown' -data_root = 'data/' - -backend_args = dict(backend='local') - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PhotometricDistortion'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.0), - ]), - dict( - type='GenerateTarget', - encoder=codec, - use_dataset_keypoint_weights=True), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.6, 1.4], - rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict( - type='GenerateTarget', - encoder=codec, - use_dataset_keypoint_weights=True), - dict(type='PackPoseInputs') -] - -# mapping -coco_halpe26 = [(i, i) for i in range(17)] + [(17, 20), (18, 22), (19, 24), - (20, 21), (21, 23), (22, 25)] - -aic_halpe26 = [(0, 6), (1, 8), (2, 10), (3, 5), (4, 7), - (5, 9), (6, 12), (7, 14), (8, 16), (9, 11), (10, 13), (11, 15), - (12, 17), (13, 18)] - -crowdpose_halpe26 = [(0, 5), (1, 6), (2, 7), (3, 8), (4, 9), (5, 10), (6, 11), - (7, 12), (8, 13), (9, 14), (10, 15), (11, 16), (12, 17), - (13, 18)] - -mpii_halpe26 = [ - (0, 16), - (1, 14), - (2, 12), - (3, 11), - (4, 13), - (5, 15), - (8, 18), - (9, 17), - (10, 10), - (11, 8), - (12, 6), - (13, 5), - (14, 7), - (15, 9), -] - -jhmdb_halpe26 = [ - (0, 18), - (2, 17), - (3, 6), - (4, 5), - (5, 12), - (6, 11), - (7, 8), - (8, 7), - (9, 14), - (10, 13), - (11, 10), - (12, 9), - (13, 16), - (14, 15), -] - -halpe_halpe26 = [(i, i) for i in range(26)] - -ochuman_halpe26 = [(i, i) for i in range(17)] - -posetrack_halpe26 = [ - (0, 0), - (2, 17), - (3, 3), - (4, 4), - (5, 5), - (6, 6), - (7, 7), - (8, 8), - (9, 9), - (10, 10), - (11, 11), - (12, 12), - (13, 13), - (14, 14), - (15, 15), - (16, 16), -] - -# train datasets -dataset_coco = dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='detection/coco/train2017/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=coco_halpe26) - ], -) - -dataset_aic = dict( - type='AicDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='aic/annotations/aic_train.json', - data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' - '_train_20170902/keypoint_train_images_20170902/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=aic_halpe26) - ], -) - -dataset_crowdpose = dict( - type='CrowdPoseDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', - data_prefix=dict(img='pose/CrowdPose/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=crowdpose_halpe26) - ], -) - -dataset_mpii = dict( - type='MpiiDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='mpii/annotations/mpii_train.json', - data_prefix=dict(img='pose/MPI/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=mpii_halpe26) - ], -) - -dataset_jhmdb = dict( - type='JhmdbDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='jhmdb/annotations/Sub1_train.json', - data_prefix=dict(img='pose/JHMDB/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=jhmdb_halpe26) - ], -) - -dataset_halpe = dict( - type='HalpeDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='halpe/annotations/halpe_train_v1.json', - data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=halpe_halpe26) - ], -) - -dataset_posetrack = dict( - type='PoseTrack18Dataset', - data_root=data_root, - data_mode=data_mode, - ann_file='posetrack18/annotations/posetrack18_train.json', - data_prefix=dict(img='pose/PoseChallenge2018/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=posetrack_halpe26) - ], -) - -# data loaders -train_dataloader = dict( - batch_size=train_batch_size, - num_workers=10, - pin_memory=True, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type='CombinedDataset', - metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), - datasets=[ - dataset_coco, - dataset_aic, - dataset_crowdpose, - dataset_mpii, - dataset_jhmdb, - dataset_halpe, - dataset_posetrack, - ], - pipeline=train_pipeline, - test_mode=False, - )) - -# val datasets -val_coco = dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='detection/coco/val2017/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=coco_halpe26) - ], -) - -val_aic = dict( - type='AicDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='aic/annotations/aic_val.json', - data_prefix=dict( - img='pose/ai_challenge/ai_challenger_keypoint' - '_validation_20170911/keypoint_validation_images_20170911/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=aic_halpe26) - ], -) - -val_crowdpose = dict( - type='CrowdPoseDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', - data_prefix=dict(img='pose/CrowdPose/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=crowdpose_halpe26) - ], -) - -val_mpii = dict( - type='MpiiDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='mpii/annotations/mpii_val.json', - data_prefix=dict(img='pose/MPI/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=mpii_halpe26) - ], -) - -val_jhmdb = dict( - type='JhmdbDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='jhmdb/annotations/Sub1_test.json', - data_prefix=dict(img='pose/JHMDB/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=jhmdb_halpe26) - ], -) - -val_halpe = dict( - type='HalpeDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='halpe/annotations/halpe_val_v1.json', - data_prefix=dict(img='detection/coco/val2017/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=halpe_halpe26) - ], -) - -val_ochuman = dict( - type='OCHumanDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='ochuman/annotations/' - 'ochuman_coco_format_val_range_0.00_1.00.json', - data_prefix=dict(img='pose/OCHuman/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=ochuman_halpe26) - ], -) - -val_posetrack = dict( - type='PoseTrack18Dataset', - data_root=data_root, - data_mode=data_mode, - ann_file='posetrack18/annotations/posetrack18_val.json', - data_prefix=dict(img='pose/PoseChallenge2018/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=posetrack_halpe26) - ], -) - -val_dataloader = dict( - batch_size=val_batch_size, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type='CombinedDataset', - metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), - datasets=[ - val_coco, - val_aic, - val_crowdpose, - val_mpii, - val_jhmdb, - val_halpe, - val_ochuman, - val_posetrack, - ], - pipeline=val_pipeline, - test_mode=True, - )) - -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -test_evaluator = [dict(type='PCKAccuracy', thr=0.1), dict(type='AUC')] -val_evaluator = test_evaluator +_base_ = ['mmpose::_base_/default_runtime.py'] + +# common setting +num_keypoints = 26 +input_size = (192, 256) + +# runtime +max_epochs = 700 +stage2_num_epochs = 30 +base_lr = 4e-3 +train_batch_size = 1024 +val_batch_size = 64 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.0), + clip_grad=dict(max_norm=35, norm_type=2), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=input_size, + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.33, + widen_factor=0.5, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/rtmpose-s_simcc-body7_pt-body7_420e-256x192-acd4a1ef_20230504.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=512, + out_channels=num_keypoints, + input_size=input_size, + in_featuremap_size=tuple([s // 32 for s in input_size]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PhotometricDistortion'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict( + type='GenerateTarget', + encoder=codec, + use_dataset_keypoint_weights=True), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.6, 1.4], + rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict( + type='GenerateTarget', + encoder=codec, + use_dataset_keypoint_weights=True), + dict(type='PackPoseInputs') +] + +# mapping +coco_halpe26 = [(i, i) for i in range(17)] + [(17, 20), (18, 22), (19, 24), + (20, 21), (21, 23), (22, 25)] + +aic_halpe26 = [(0, 6), (1, 8), (2, 10), (3, 5), (4, 7), + (5, 9), (6, 12), (7, 14), (8, 16), (9, 11), (10, 13), (11, 15), + (12, 17), (13, 18)] + +crowdpose_halpe26 = [(0, 5), (1, 6), (2, 7), (3, 8), (4, 9), (5, 10), (6, 11), + (7, 12), (8, 13), (9, 14), (10, 15), (11, 16), (12, 17), + (13, 18)] + +mpii_halpe26 = [ + (0, 16), + (1, 14), + (2, 12), + (3, 11), + (4, 13), + (5, 15), + (8, 18), + (9, 17), + (10, 10), + (11, 8), + (12, 6), + (13, 5), + (14, 7), + (15, 9), +] + +jhmdb_halpe26 = [ + (0, 18), + (2, 17), + (3, 6), + (4, 5), + (5, 12), + (6, 11), + (7, 8), + (8, 7), + (9, 14), + (10, 13), + (11, 10), + (12, 9), + (13, 16), + (14, 15), +] + +halpe_halpe26 = [(i, i) for i in range(26)] + +ochuman_halpe26 = [(i, i) for i in range(17)] + +posetrack_halpe26 = [ + (0, 0), + (2, 17), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +# train datasets +dataset_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=coco_halpe26) + ], +) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=aic_halpe26) + ], +) + +dataset_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=crowdpose_halpe26) + ], +) + +dataset_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_train.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=mpii_halpe26) + ], +) + +dataset_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_train.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=jhmdb_halpe26) + ], +) + +dataset_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_train_v1.json', + data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=halpe_halpe26) + ], +) + +dataset_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_train.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=posetrack_halpe26) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=train_batch_size, + num_workers=10, + pin_memory=True, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), + datasets=[ + dataset_coco, + dataset_aic, + dataset_crowdpose, + dataset_mpii, + dataset_jhmdb, + dataset_halpe, + dataset_posetrack, + ], + pipeline=train_pipeline, + test_mode=False, + )) + +# val datasets +val_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=coco_halpe26) + ], +) + +val_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_val.json', + data_prefix=dict( + img='pose/ai_challenge/ai_challenger_keypoint' + '_validation_20170911/keypoint_validation_images_20170911/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=aic_halpe26) + ], +) + +val_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=crowdpose_halpe26) + ], +) + +val_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_val.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=mpii_halpe26) + ], +) + +val_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_test.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=jhmdb_halpe26) + ], +) + +val_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_val_v1.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=halpe_halpe26) + ], +) + +val_ochuman = dict( + type='OCHumanDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='ochuman/annotations/' + 'ochuman_coco_format_val_range_0.00_1.00.json', + data_prefix=dict(img='pose/OCHuman/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=ochuman_halpe26) + ], +) + +val_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_val.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=posetrack_halpe26) + ], +) + +val_dataloader = dict( + batch_size=val_batch_size, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), + datasets=[ + val_coco, + val_aic, + val_crowdpose, + val_mpii, + val_jhmdb, + val_halpe, + val_ochuman, + val_posetrack, + ], + pipeline=val_pipeline, + test_mode=True, + )) + +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +test_evaluator = [dict(type='PCKAccuracy', thr=0.1), dict(type='AUC')] +val_evaluator = test_evaluator diff --git a/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-s_8xb256-420e_coco-256x192.py b/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-s_8xb256-420e_coco-256x192.py index dd854f10f0..a9ddcbb4e9 100644 --- a/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-s_8xb256-420e_coco-256x192.py +++ b/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-s_8xb256-420e_coco-256x192.py @@ -1,238 +1,238 @@ -_base_ = ['mmpose::_base_/default_runtime.py'] - -# common setting -num_keypoints = 17 -input_size = (192, 256) - -# runtime -max_epochs = 420 -stage2_num_epochs = 30 -base_lr = 4e-3 -train_batch_size = 256 -val_batch_size = 64 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.), - clip_grad=dict(max_norm=35, norm_type=2), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=1024) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=input_size, - sigma=(4.9, 5.66), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=0.33, - widen_factor=0.5, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmposev1/cspnext-s_udp-aic-coco_210e-256x192-92f5a029_20230130.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=512, - out_channels=num_keypoints, - input_size=codec['input_size'], - in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True)) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -backend_args = dict(backend='local') -# backend_args = dict( -# backend='petrel', -# path_mapping=dict({ -# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', -# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' -# })) - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=train_batch_size, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=val_batch_size, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - # bbox_file=f'{data_root}person_detection_results/' - # 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['mmpose::_base_/default_runtime.py'] + +# common setting +num_keypoints = 17 +input_size = (192, 256) + +# runtime +max_epochs = 420 +stage2_num_epochs = 30 +base_lr = 4e-3 +train_batch_size = 256 +val_batch_size = 64 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.), + clip_grad=dict(max_norm=35, norm_type=2), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=input_size, + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.33, + widen_factor=0.5, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-s_udp-aic-coco_210e-256x192-92f5a029_20230130.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=512, + out_channels=num_keypoints, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=train_batch_size, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=val_batch_size, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + # bbox_file=f'{data_root}person_detection_results/' + # 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-t_8xb1024-700e_body8-halpe26-256x192.py b/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-t_8xb1024-700e_body8-halpe26-256x192.py index 69100b6cdc..44373500ed 100644 --- a/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-t_8xb1024-700e_body8-halpe26-256x192.py +++ b/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-t_8xb1024-700e_body8-halpe26-256x192.py @@ -1,536 +1,536 @@ -_base_ = ['mmpose::_base_/default_runtime.py'] - -# common setting -num_keypoints = 26 -input_size = (192, 256) - -# runtime -max_epochs = 700 -stage2_num_epochs = 30 -base_lr = 4e-3 -train_batch_size = 1024 -val_batch_size = 64 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.), - clip_grad=dict(max_norm=35, norm_type=2), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=1024) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=input_size, - sigma=(4.9, 5.66), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=0.167, - widen_factor=0.375, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmposev1/cspnext-tiny_udp-body7_210e-256x192-a3775292_20230504.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=384, - out_channels=num_keypoints, - input_size=input_size, - in_featuremap_size=tuple([s // 32 for s in input_size]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True)) - -# base dataset settings -dataset_type = 'CocoWholeBodyDataset' -data_mode = 'topdown' -data_root = 'data/' - -backend_args = dict(backend='local') - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PhotometricDistortion'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.0), - ]), - dict( - type='GenerateTarget', - encoder=codec, - use_dataset_keypoint_weights=True), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.6, 1.4], - rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict( - type='GenerateTarget', - encoder=codec, - use_dataset_keypoint_weights=True), - dict(type='PackPoseInputs') -] - -# mapping -coco_halpe26 = [(i, i) for i in range(17)] + [(17, 20), (18, 22), (19, 24), - (20, 21), (21, 23), (22, 25)] - -aic_halpe26 = [(0, 6), (1, 8), (2, 10), (3, 5), (4, 7), - (5, 9), (6, 12), (7, 14), (8, 16), (9, 11), (10, 13), (11, 15), - (12, 17), (13, 18)] - -crowdpose_halpe26 = [(0, 5), (1, 6), (2, 7), (3, 8), (4, 9), (5, 10), (6, 11), - (7, 12), (8, 13), (9, 14), (10, 15), (11, 16), (12, 17), - (13, 18)] - -mpii_halpe26 = [ - (0, 16), - (1, 14), - (2, 12), - (3, 11), - (4, 13), - (5, 15), - (8, 18), - (9, 17), - (10, 10), - (11, 8), - (12, 6), - (13, 5), - (14, 7), - (15, 9), -] - -jhmdb_halpe26 = [ - (0, 18), - (2, 17), - (3, 6), - (4, 5), - (5, 12), - (6, 11), - (7, 8), - (8, 7), - (9, 14), - (10, 13), - (11, 10), - (12, 9), - (13, 16), - (14, 15), -] - -halpe_halpe26 = [(i, i) for i in range(26)] - -ochuman_halpe26 = [(i, i) for i in range(17)] - -posetrack_halpe26 = [ - (0, 0), - (2, 17), - (3, 3), - (4, 4), - (5, 5), - (6, 6), - (7, 7), - (8, 8), - (9, 9), - (10, 10), - (11, 11), - (12, 12), - (13, 13), - (14, 14), - (15, 15), - (16, 16), -] - -# train datasets -dataset_coco = dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='detection/coco/train2017/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=coco_halpe26) - ], -) - -dataset_aic = dict( - type='AicDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='aic/annotations/aic_train.json', - data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' - '_train_20170902/keypoint_train_images_20170902/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=aic_halpe26) - ], -) - -dataset_crowdpose = dict( - type='CrowdPoseDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', - data_prefix=dict(img='pose/CrowdPose/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=crowdpose_halpe26) - ], -) - -dataset_mpii = dict( - type='MpiiDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='mpii/annotations/mpii_train.json', - data_prefix=dict(img='pose/MPI/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=mpii_halpe26) - ], -) - -dataset_jhmdb = dict( - type='JhmdbDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='jhmdb/annotations/Sub1_train.json', - data_prefix=dict(img='pose/JHMDB/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=jhmdb_halpe26) - ], -) - -dataset_halpe = dict( - type='HalpeDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='halpe/annotations/halpe_train_v1.json', - data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=halpe_halpe26) - ], -) - -dataset_posetrack = dict( - type='PoseTrack18Dataset', - data_root=data_root, - data_mode=data_mode, - ann_file='posetrack18/annotations/posetrack18_train.json', - data_prefix=dict(img='pose/PoseChallenge2018/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=posetrack_halpe26) - ], -) - -# data loaders -train_dataloader = dict( - batch_size=train_batch_size, - num_workers=10, - pin_memory=True, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type='CombinedDataset', - metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), - datasets=[ - dataset_coco, - dataset_aic, - dataset_crowdpose, - dataset_mpii, - dataset_jhmdb, - dataset_halpe, - dataset_posetrack, - ], - pipeline=train_pipeline, - test_mode=False, - )) - -# val datasets -val_coco = dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='detection/coco/val2017/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=coco_halpe26) - ], -) - -val_aic = dict( - type='AicDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='aic/annotations/aic_val.json', - data_prefix=dict( - img='pose/ai_challenge/ai_challenger_keypoint' - '_validation_20170911/keypoint_validation_images_20170911/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=aic_halpe26) - ], -) - -val_crowdpose = dict( - type='CrowdPoseDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', - data_prefix=dict(img='pose/CrowdPose/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=crowdpose_halpe26) - ], -) - -val_mpii = dict( - type='MpiiDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='mpii/annotations/mpii_val.json', - data_prefix=dict(img='pose/MPI/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=mpii_halpe26) - ], -) - -val_jhmdb = dict( - type='JhmdbDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='jhmdb/annotations/Sub1_test.json', - data_prefix=dict(img='pose/JHMDB/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=jhmdb_halpe26) - ], -) - -val_halpe = dict( - type='HalpeDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='halpe/annotations/halpe_val_v1.json', - data_prefix=dict(img='detection/coco/val2017/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=halpe_halpe26) - ], -) - -val_ochuman = dict( - type='OCHumanDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='ochuman/annotations/' - 'ochuman_coco_format_val_range_0.00_1.00.json', - data_prefix=dict(img='pose/OCHuman/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=ochuman_halpe26) - ], -) - -val_posetrack = dict( - type='PoseTrack18Dataset', - data_root=data_root, - data_mode=data_mode, - ann_file='posetrack18/annotations/posetrack18_val.json', - data_prefix=dict(img='pose/PoseChallenge2018/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=posetrack_halpe26) - ], -) - -val_dataloader = dict( - batch_size=val_batch_size, - num_workers=10, - pin_memory=True, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type='CombinedDataset', - metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), - datasets=[ - val_coco, - val_aic, - val_crowdpose, - val_mpii, - val_jhmdb, - val_halpe, - val_ochuman, - val_posetrack, - ], - pipeline=val_pipeline, - test_mode=True, - )) - -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - # dict( - # type='EMAHook', - # ema_type='ExpMomentumEMA', - # momentum=0.0002, - # update_buffers=True, - # priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -test_evaluator = [dict(type='PCKAccuracy', thr=0.1), dict(type='AUC')] -val_evaluator = test_evaluator +_base_ = ['mmpose::_base_/default_runtime.py'] + +# common setting +num_keypoints = 26 +input_size = (192, 256) + +# runtime +max_epochs = 700 +stage2_num_epochs = 30 +base_lr = 4e-3 +train_batch_size = 1024 +val_batch_size = 64 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.), + clip_grad=dict(max_norm=35, norm_type=2), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=input_size, + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.167, + widen_factor=0.375, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-tiny_udp-body7_210e-256x192-a3775292_20230504.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=384, + out_channels=num_keypoints, + input_size=input_size, + in_featuremap_size=tuple([s // 32 for s in input_size]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PhotometricDistortion'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict( + type='GenerateTarget', + encoder=codec, + use_dataset_keypoint_weights=True), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.6, 1.4], + rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict( + type='GenerateTarget', + encoder=codec, + use_dataset_keypoint_weights=True), + dict(type='PackPoseInputs') +] + +# mapping +coco_halpe26 = [(i, i) for i in range(17)] + [(17, 20), (18, 22), (19, 24), + (20, 21), (21, 23), (22, 25)] + +aic_halpe26 = [(0, 6), (1, 8), (2, 10), (3, 5), (4, 7), + (5, 9), (6, 12), (7, 14), (8, 16), (9, 11), (10, 13), (11, 15), + (12, 17), (13, 18)] + +crowdpose_halpe26 = [(0, 5), (1, 6), (2, 7), (3, 8), (4, 9), (5, 10), (6, 11), + (7, 12), (8, 13), (9, 14), (10, 15), (11, 16), (12, 17), + (13, 18)] + +mpii_halpe26 = [ + (0, 16), + (1, 14), + (2, 12), + (3, 11), + (4, 13), + (5, 15), + (8, 18), + (9, 17), + (10, 10), + (11, 8), + (12, 6), + (13, 5), + (14, 7), + (15, 9), +] + +jhmdb_halpe26 = [ + (0, 18), + (2, 17), + (3, 6), + (4, 5), + (5, 12), + (6, 11), + (7, 8), + (8, 7), + (9, 14), + (10, 13), + (11, 10), + (12, 9), + (13, 16), + (14, 15), +] + +halpe_halpe26 = [(i, i) for i in range(26)] + +ochuman_halpe26 = [(i, i) for i in range(17)] + +posetrack_halpe26 = [ + (0, 0), + (2, 17), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +# train datasets +dataset_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=coco_halpe26) + ], +) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=aic_halpe26) + ], +) + +dataset_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=crowdpose_halpe26) + ], +) + +dataset_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_train.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=mpii_halpe26) + ], +) + +dataset_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_train.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=jhmdb_halpe26) + ], +) + +dataset_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_train_v1.json', + data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=halpe_halpe26) + ], +) + +dataset_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_train.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=posetrack_halpe26) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=train_batch_size, + num_workers=10, + pin_memory=True, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), + datasets=[ + dataset_coco, + dataset_aic, + dataset_crowdpose, + dataset_mpii, + dataset_jhmdb, + dataset_halpe, + dataset_posetrack, + ], + pipeline=train_pipeline, + test_mode=False, + )) + +# val datasets +val_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=coco_halpe26) + ], +) + +val_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_val.json', + data_prefix=dict( + img='pose/ai_challenge/ai_challenger_keypoint' + '_validation_20170911/keypoint_validation_images_20170911/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=aic_halpe26) + ], +) + +val_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=crowdpose_halpe26) + ], +) + +val_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_val.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=mpii_halpe26) + ], +) + +val_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_test.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=jhmdb_halpe26) + ], +) + +val_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_val_v1.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=halpe_halpe26) + ], +) + +val_ochuman = dict( + type='OCHumanDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='ochuman/annotations/' + 'ochuman_coco_format_val_range_0.00_1.00.json', + data_prefix=dict(img='pose/OCHuman/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=ochuman_halpe26) + ], +) + +val_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_val.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=posetrack_halpe26) + ], +) + +val_dataloader = dict( + batch_size=val_batch_size, + num_workers=10, + pin_memory=True, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), + datasets=[ + val_coco, + val_aic, + val_crowdpose, + val_mpii, + val_jhmdb, + val_halpe, + val_ochuman, + val_posetrack, + ], + pipeline=val_pipeline, + test_mode=True, + )) + +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + # dict( + # type='EMAHook', + # ema_type='ExpMomentumEMA', + # momentum=0.0002, + # update_buffers=True, + # priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +test_evaluator = [dict(type='PCKAccuracy', thr=0.1), dict(type='AUC')] +val_evaluator = test_evaluator diff --git a/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-t_8xb256-420e_coco-256x192.py b/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-t_8xb256-420e_coco-256x192.py index 1f344c72d1..27fc75ccdf 100644 --- a/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-t_8xb256-420e_coco-256x192.py +++ b/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-t_8xb256-420e_coco-256x192.py @@ -1,239 +1,239 @@ -_base_ = ['mmpose::_base_/default_runtime.py'] - -# common setting -num_keypoints = 17 -input_size = (192, 256) - -# runtime -max_epochs = 420 -stage2_num_epochs = 30 -base_lr = 4e-3 -train_batch_size = 256 -val_batch_size = 64 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.), - clip_grad=dict(max_norm=35, norm_type=2), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=1024) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=input_size, - sigma=(4.9, 5.66), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=0.167, - widen_factor=0.375, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmposev1/cspnext-tiny_udp-aic-coco_210e-256x192-cbed682d_20230130.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=384, - out_channels=num_keypoints, - input_size=codec['input_size'], - in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True)) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -backend_args = dict(backend='local') -# backend_args = dict( -# backend='petrel', -# path_mapping=dict({ -# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', -# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' -# })) - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=train_batch_size, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=val_batch_size, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - # bbox_file=f'{data_root}person_detection_results/' - # 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - # Turn off EMA while training the tiny model - # dict( - # type='EMAHook', - # ema_type='ExpMomentumEMA', - # momentum=0.0002, - # update_buffers=True, - # priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['mmpose::_base_/default_runtime.py'] + +# common setting +num_keypoints = 17 +input_size = (192, 256) + +# runtime +max_epochs = 420 +stage2_num_epochs = 30 +base_lr = 4e-3 +train_batch_size = 256 +val_batch_size = 64 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.), + clip_grad=dict(max_norm=35, norm_type=2), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=input_size, + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.167, + widen_factor=0.375, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-tiny_udp-aic-coco_210e-256x192-cbed682d_20230130.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=384, + out_channels=num_keypoints, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=train_batch_size, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=val_batch_size, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + # bbox_file=f'{data_root}person_detection_results/' + # 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + # Turn off EMA while training the tiny model + # dict( + # type='EMAHook', + # ema_type='ExpMomentumEMA', + # momentum=0.0002, + # update_buffers=True, + # priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-x_8xb256-700e_body8-halpe26-384x288.py b/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-x_8xb256-700e_body8-halpe26-384x288.py index e0ad3aeb9d..4afa1ea24a 100644 --- a/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-x_8xb256-700e_body8-halpe26-384x288.py +++ b/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-x_8xb256-700e_body8-halpe26-384x288.py @@ -1,535 +1,535 @@ -_base_ = ['mmpose::_base_/default_runtime.py'] - -# common setting -num_keypoints = 26 -input_size = (288, 384) - -# runtime -max_epochs = 700 -stage2_num_epochs = 20 -base_lr = 4e-3 -train_batch_size = 256 -val_batch_size = 64 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - clip_grad=dict(max_norm=35, norm_type=2), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=1024) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=input_size, - sigma=(6., 6.93), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=1.33, - widen_factor=1.25, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmposev1/cspnext-x_udp-body7_210e-384x288-d28b58e6_20230529.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=1280, - out_channels=num_keypoints, - input_size=input_size, - in_featuremap_size=tuple([s // 32 for s in input_size]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True)) - -# base dataset settings -dataset_type = 'CocoWholeBodyDataset' -data_mode = 'topdown' -data_root = 'data/' - -backend_args = dict(backend='local') - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=90), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PhotometricDistortion'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.0), - ]), - dict( - type='GenerateTarget', - encoder=codec, - use_dataset_keypoint_weights=True), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.5, 1.5], - rotate_factor=90), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict( - type='GenerateTarget', - encoder=codec, - use_dataset_keypoint_weights=True), - dict(type='PackPoseInputs') -] - -# mapping -coco_halpe26 = [(i, i) for i in range(17)] + [(17, 20), (18, 22), (19, 24), - (20, 21), (21, 23), (22, 25)] - -aic_halpe26 = [(0, 6), (1, 8), (2, 10), (3, 5), (4, 7), - (5, 9), (6, 12), (7, 14), (8, 16), (9, 11), (10, 13), (11, 15), - (12, 17), (13, 18)] - -crowdpose_halpe26 = [(0, 5), (1, 6), (2, 7), (3, 8), (4, 9), (5, 10), (6, 11), - (7, 12), (8, 13), (9, 14), (10, 15), (11, 16), (12, 17), - (13, 18)] - -mpii_halpe26 = [ - (0, 16), - (1, 14), - (2, 12), - (3, 11), - (4, 13), - (5, 15), - (8, 18), - (9, 17), - (10, 10), - (11, 8), - (12, 6), - (13, 5), - (14, 7), - (15, 9), -] - -jhmdb_halpe26 = [ - (0, 18), - (2, 17), - (3, 6), - (4, 5), - (5, 12), - (6, 11), - (7, 8), - (8, 7), - (9, 14), - (10, 13), - (11, 10), - (12, 9), - (13, 16), - (14, 15), -] - -halpe_halpe26 = [(i, i) for i in range(26)] - -ochuman_halpe26 = [(i, i) for i in range(17)] - -posetrack_halpe26 = [ - (0, 0), - (2, 17), - (3, 3), - (4, 4), - (5, 5), - (6, 6), - (7, 7), - (8, 8), - (9, 9), - (10, 10), - (11, 11), - (12, 12), - (13, 13), - (14, 14), - (15, 15), - (16, 16), -] - -# train datasets -dataset_coco = dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='detection/coco/train2017/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=coco_halpe26) - ], -) - -dataset_aic = dict( - type='AicDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='aic/annotations/aic_train.json', - data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' - '_train_20170902/keypoint_train_images_20170902/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=aic_halpe26) - ], -) - -dataset_crowdpose = dict( - type='CrowdPoseDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', - data_prefix=dict(img='pose/CrowdPose/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=crowdpose_halpe26) - ], -) - -dataset_mpii = dict( - type='MpiiDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='mpii/annotations/mpii_train.json', - data_prefix=dict(img='pose/MPI/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=mpii_halpe26) - ], -) - -dataset_jhmdb = dict( - type='JhmdbDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='jhmdb/annotations/Sub1_train.json', - data_prefix=dict(img='pose/JHMDB/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=jhmdb_halpe26) - ], -) - -dataset_halpe = dict( - type='HalpeDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='halpe/annotations/halpe_train_v1.json', - data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=halpe_halpe26) - ], -) - -dataset_posetrack = dict( - type='PoseTrack18Dataset', - data_root=data_root, - data_mode=data_mode, - ann_file='posetrack18/annotations/posetrack18_train.json', - data_prefix=dict(img='pose/PoseChallenge2018/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=posetrack_halpe26) - ], -) - -# data loaders -train_dataloader = dict( - batch_size=train_batch_size, - num_workers=10, - pin_memory=True, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type='CombinedDataset', - metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), - datasets=[ - dataset_coco, - dataset_aic, - dataset_crowdpose, - dataset_mpii, - dataset_jhmdb, - dataset_halpe, - dataset_posetrack, - ], - pipeline=train_pipeline, - test_mode=False, - )) - -# val datasets -val_coco = dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='coco/annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='detection/coco/val2017/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=coco_halpe26) - ], -) - -val_aic = dict( - type='AicDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='aic/annotations/aic_val.json', - data_prefix=dict( - img='pose/ai_challenge/ai_challenger_keypoint' - '_validation_20170911/keypoint_validation_images_20170911/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=aic_halpe26) - ], -) - -val_crowdpose = dict( - type='CrowdPoseDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', - data_prefix=dict(img='pose/CrowdPose/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=crowdpose_halpe26) - ], -) - -val_mpii = dict( - type='MpiiDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='mpii/annotations/mpii_val.json', - data_prefix=dict(img='pose/MPI/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=mpii_halpe26) - ], -) - -val_jhmdb = dict( - type='JhmdbDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='jhmdb/annotations/Sub1_test.json', - data_prefix=dict(img='pose/JHMDB/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=jhmdb_halpe26) - ], -) - -val_halpe = dict( - type='HalpeDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='halpe/annotations/halpe_val_v1.json', - data_prefix=dict(img='detection/coco/val2017/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=halpe_halpe26) - ], -) - -val_ochuman = dict( - type='OCHumanDataset', - data_root=data_root, - data_mode=data_mode, - ann_file='ochuman/annotations/' - 'ochuman_coco_format_val_range_0.00_1.00.json', - data_prefix=dict(img='pose/OCHuman/images/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=ochuman_halpe26) - ], -) - -val_posetrack = dict( - type='PoseTrack18Dataset', - data_root=data_root, - data_mode=data_mode, - ann_file='posetrack18/annotations/posetrack18_val.json', - data_prefix=dict(img='pose/PoseChallenge2018/'), - pipeline=[ - dict( - type='KeypointConverter', - num_keypoints=num_keypoints, - mapping=posetrack_halpe26) - ], -) - -val_dataloader = dict( - batch_size=val_batch_size, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type='CombinedDataset', - metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), - datasets=[ - val_coco, - val_aic, - val_crowdpose, - val_mpii, - val_jhmdb, - val_halpe, - val_ochuman, - val_posetrack, - ], - pipeline=val_pipeline, - test_mode=True, - )) - -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -test_evaluator = [dict(type='PCKAccuracy', thr=0.1), dict(type='AUC')] -val_evaluator = test_evaluator +_base_ = ['mmpose::_base_/default_runtime.py'] + +# common setting +num_keypoints = 26 +input_size = (288, 384) + +# runtime +max_epochs = 700 +stage2_num_epochs = 20 +base_lr = 4e-3 +train_batch_size = 256 +val_batch_size = 64 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + clip_grad=dict(max_norm=35, norm_type=2), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=input_size, + sigma=(6., 6.93), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=1.33, + widen_factor=1.25, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-x_udp-body7_210e-384x288-d28b58e6_20230529.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=1280, + out_channels=num_keypoints, + input_size=input_size, + in_featuremap_size=tuple([s // 32 for s in input_size]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PhotometricDistortion'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict( + type='GenerateTarget', + encoder=codec, + use_dataset_keypoint_weights=True), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.5, 1.5], + rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict( + type='GenerateTarget', + encoder=codec, + use_dataset_keypoint_weights=True), + dict(type='PackPoseInputs') +] + +# mapping +coco_halpe26 = [(i, i) for i in range(17)] + [(17, 20), (18, 22), (19, 24), + (20, 21), (21, 23), (22, 25)] + +aic_halpe26 = [(0, 6), (1, 8), (2, 10), (3, 5), (4, 7), + (5, 9), (6, 12), (7, 14), (8, 16), (9, 11), (10, 13), (11, 15), + (12, 17), (13, 18)] + +crowdpose_halpe26 = [(0, 5), (1, 6), (2, 7), (3, 8), (4, 9), (5, 10), (6, 11), + (7, 12), (8, 13), (9, 14), (10, 15), (11, 16), (12, 17), + (13, 18)] + +mpii_halpe26 = [ + (0, 16), + (1, 14), + (2, 12), + (3, 11), + (4, 13), + (5, 15), + (8, 18), + (9, 17), + (10, 10), + (11, 8), + (12, 6), + (13, 5), + (14, 7), + (15, 9), +] + +jhmdb_halpe26 = [ + (0, 18), + (2, 17), + (3, 6), + (4, 5), + (5, 12), + (6, 11), + (7, 8), + (8, 7), + (9, 14), + (10, 13), + (11, 10), + (12, 9), + (13, 16), + (14, 15), +] + +halpe_halpe26 = [(i, i) for i in range(26)] + +ochuman_halpe26 = [(i, i) for i in range(17)] + +posetrack_halpe26 = [ + (0, 0), + (2, 17), + (3, 3), + (4, 4), + (5, 5), + (6, 6), + (7, 7), + (8, 8), + (9, 9), + (10, 10), + (11, 11), + (12, 12), + (13, 13), + (14, 14), + (15, 15), + (16, 16), +] + +# train datasets +dataset_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='detection/coco/train2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=coco_halpe26) + ], +) + +dataset_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_train.json', + data_prefix=dict(img='pose/ai_challenge/ai_challenger_keypoint' + '_train_20170902/keypoint_train_images_20170902/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=aic_halpe26) + ], +) + +dataset_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_trainval.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=crowdpose_halpe26) + ], +) + +dataset_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_train.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=mpii_halpe26) + ], +) + +dataset_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_train.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=jhmdb_halpe26) + ], +) + +dataset_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_train_v1.json', + data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=halpe_halpe26) + ], +) + +dataset_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_train.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=posetrack_halpe26) + ], +) + +# data loaders +train_dataloader = dict( + batch_size=train_batch_size, + num_workers=10, + pin_memory=True, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), + datasets=[ + dataset_coco, + dataset_aic, + dataset_crowdpose, + dataset_mpii, + dataset_jhmdb, + dataset_halpe, + dataset_posetrack, + ], + pipeline=train_pipeline, + test_mode=False, + )) + +# val datasets +val_coco = dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='coco/annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=coco_halpe26) + ], +) + +val_aic = dict( + type='AicDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='aic/annotations/aic_val.json', + data_prefix=dict( + img='pose/ai_challenge/ai_challenger_keypoint' + '_validation_20170911/keypoint_validation_images_20170911/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=aic_halpe26) + ], +) + +val_crowdpose = dict( + type='CrowdPoseDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='crowdpose/annotations/mmpose_crowdpose_test.json', + data_prefix=dict(img='pose/CrowdPose/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=crowdpose_halpe26) + ], +) + +val_mpii = dict( + type='MpiiDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='mpii/annotations/mpii_val.json', + data_prefix=dict(img='pose/MPI/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=mpii_halpe26) + ], +) + +val_jhmdb = dict( + type='JhmdbDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='jhmdb/annotations/Sub1_test.json', + data_prefix=dict(img='pose/JHMDB/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=jhmdb_halpe26) + ], +) + +val_halpe = dict( + type='HalpeDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='halpe/annotations/halpe_val_v1.json', + data_prefix=dict(img='detection/coco/val2017/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=halpe_halpe26) + ], +) + +val_ochuman = dict( + type='OCHumanDataset', + data_root=data_root, + data_mode=data_mode, + ann_file='ochuman/annotations/' + 'ochuman_coco_format_val_range_0.00_1.00.json', + data_prefix=dict(img='pose/OCHuman/images/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=ochuman_halpe26) + ], +) + +val_posetrack = dict( + type='PoseTrack18Dataset', + data_root=data_root, + data_mode=data_mode, + ann_file='posetrack18/annotations/posetrack18_val.json', + data_prefix=dict(img='pose/PoseChallenge2018/'), + pipeline=[ + dict( + type='KeypointConverter', + num_keypoints=num_keypoints, + mapping=posetrack_halpe26) + ], +) + +val_dataloader = dict( + batch_size=val_batch_size, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type='CombinedDataset', + metainfo=dict(from_file='configs/_base_/datasets/halpe26.py'), + datasets=[ + val_coco, + val_aic, + val_crowdpose, + val_mpii, + val_jhmdb, + val_halpe, + val_ochuman, + val_posetrack, + ], + pipeline=val_pipeline, + test_mode=True, + )) + +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +test_evaluator = [dict(type='PCKAccuracy', thr=0.1), dict(type='AUC')] +val_evaluator = test_evaluator diff --git a/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-x_8xb256-700e_coco-384x288.py b/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-x_8xb256-700e_coco-384x288.py index 1441e07791..a8cc56e053 100644 --- a/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-x_8xb256-700e_coco-384x288.py +++ b/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-x_8xb256-700e_coco-384x288.py @@ -1,238 +1,238 @@ -_base_ = ['mmpose::_base_/default_runtime.py'] - -# common setting -num_keypoints = 17 -input_size = (288, 384) - -# runtime -max_epochs = 700 -stage2_num_epochs = 20 -base_lr = 4e-3 -train_batch_size = 256 -val_batch_size = 64 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - clip_grad=dict(max_norm=35, norm_type=2), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=1024) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=input_size, - sigma=(6., 6.93), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=1.33, - widen_factor=1.28, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmposev1/cspnext-x_udp-body7_210e-384x288-d28b58e6_20230529.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=1280, - out_channels=num_keypoints, - input_size=codec['input_size'], - in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True)) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -backend_args = dict(backend='local') -# backend_args = dict( -# backend='petrel', -# path_mapping=dict({ -# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', -# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' -# })) - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=90), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PhotometricDistortion'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.5, 1.5], - rotate_factor=90), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=train_batch_size, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=val_batch_size, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - bbox_file=f'{data_root}person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['mmpose::_base_/default_runtime.py'] + +# common setting +num_keypoints = 17 +input_size = (288, 384) + +# runtime +max_epochs = 700 +stage2_num_epochs = 20 +base_lr = 4e-3 +train_batch_size = 256 +val_batch_size = 64 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + clip_grad=dict(max_norm=35, norm_type=2), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=input_size, + sigma=(6., 6.93), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=1.33, + widen_factor=1.28, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-x_udp-body7_210e-384x288-d28b58e6_20230529.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=1280, + out_channels=num_keypoints, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True)) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PhotometricDistortion'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.5, 1.5], + rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=train_batch_size, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=val_batch_size, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + bbox_file=f'{data_root}person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/projects/rtmpose/rtmpose/face_2d_keypoint/rtmpose-m_8xb256-120e_lapa-256x256.py b/projects/rtmpose/rtmpose/face_2d_keypoint/rtmpose-m_8xb256-120e_lapa-256x256.py index 5490074a4d..139f453e57 100644 --- a/projects/rtmpose/rtmpose/face_2d_keypoint/rtmpose-m_8xb256-120e_lapa-256x256.py +++ b/projects/rtmpose/rtmpose/face_2d_keypoint/rtmpose-m_8xb256-120e_lapa-256x256.py @@ -1,246 +1,246 @@ -_base_ = ['mmpose::_base_/default_runtime.py'] - -# common setting -num_keypoints = 106 -input_size = (256, 256) - -# runtime -max_epochs = 120 -stage2_num_epochs = 10 -base_lr = 4e-3 -train_batch_size = 256 -val_batch_size = 32 - -train_cfg = dict(max_epochs=max_epochs, val_interval=1) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - clip_grad=dict(max_norm=35, norm_type=2), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - type='CosineAnnealingLR', - eta_min=base_lr * 0.005, - begin=30, - end=max_epochs, - T_max=max_epochs - 30, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=input_size, - sigma=(5.66, 5.66), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=0.67, - widen_factor=0.75, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' - 'rtmdet/cspnext_rsb_pretrain/cspnext-m_8xb256-rsb-a1-600e_in1k-ecb3bbd9.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=768, - out_channels=num_keypoints, - input_size=codec['input_size'], - in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True, )) - -# base dataset settings -dataset_type = 'LapaDataset' -data_mode = 'topdown' -data_root = 'data/' - -backend_args = dict(backend='local') - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.2), - dict(type='MedianBlur', p=0.2), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.0), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.5, 1.5], - rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=train_batch_size, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/lapa_trainval.json', - data_prefix=dict(img='LaPa/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=val_batch_size, - num_workers=4, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/lapa_test.json', - data_prefix=dict(img='LaPa/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = dict( - batch_size=val_batch_size, - num_workers=4, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/lapa_test.json', - data_prefix=dict(img='LaPa/'), - test_mode=True, - pipeline=val_pipeline, - )) - -# hooks -default_hooks = dict( - checkpoint=dict( - save_best='NME', rule='less', max_keep_ckpts=3, interval=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='NME', - norm_mode='keypoint_distance', -) -test_evaluator = val_evaluator +_base_ = ['mmpose::_base_/default_runtime.py'] + +# common setting +num_keypoints = 106 +input_size = (256, 256) + +# runtime +max_epochs = 120 +stage2_num_epochs = 10 +base_lr = 4e-3 +train_batch_size = 256 +val_batch_size = 32 + +train_cfg = dict(max_epochs=max_epochs, val_interval=1) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + clip_grad=dict(max_norm=35, norm_type=2), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.005, + begin=30, + end=max_epochs, + T_max=max_epochs - 30, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=input_size, + sigma=(5.66, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' + 'rtmdet/cspnext_rsb_pretrain/cspnext-m_8xb256-rsb-a1-600e_in1k-ecb3bbd9.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=768, + out_channels=num_keypoints, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'LapaDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.2), + dict(type='MedianBlur', p=0.2), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.5, 1.5], + rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=train_batch_size, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/lapa_trainval.json', + data_prefix=dict(img='LaPa/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=val_batch_size, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/lapa_test.json', + data_prefix=dict(img='LaPa/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = dict( + batch_size=val_batch_size, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/lapa_test.json', + data_prefix=dict(img='LaPa/'), + test_mode=True, + pipeline=val_pipeline, + )) + +# hooks +default_hooks = dict( + checkpoint=dict( + save_best='NME', rule='less', max_keep_ckpts=3, interval=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='NME', + norm_mode='keypoint_distance', +) +test_evaluator = val_evaluator diff --git a/projects/rtmpose/rtmpose/face_2d_keypoint/rtmpose-s_8xb256-120e_lapa-256x256.py b/projects/rtmpose/rtmpose/face_2d_keypoint/rtmpose-s_8xb256-120e_lapa-256x256.py index 2763ecd927..cc2503d7be 100644 --- a/projects/rtmpose/rtmpose/face_2d_keypoint/rtmpose-s_8xb256-120e_lapa-256x256.py +++ b/projects/rtmpose/rtmpose/face_2d_keypoint/rtmpose-s_8xb256-120e_lapa-256x256.py @@ -1,246 +1,246 @@ -_base_ = ['mmpose::_base_/default_runtime.py'] - -# common setting -num_keypoints = 106 -input_size = (256, 256) - -# runtime -max_epochs = 120 -stage2_num_epochs = 10 -base_lr = 4e-3 -train_batch_size = 256 -val_batch_size = 32 - -train_cfg = dict(max_epochs=max_epochs, val_interval=1) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.), - clip_grad=dict(max_norm=35, norm_type=2), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - type='CosineAnnealingLR', - eta_min=base_lr * 0.005, - begin=30, - end=max_epochs, - T_max=max_epochs - 30, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=input_size, - sigma=(5.66, 5.66), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=0.33, - widen_factor=0.5, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' - 'rtmdet/cspnext_rsb_pretrain/cspnext-s_imagenet_600e-ea671761.pth') - ), - head=dict( - type='RTMCCHead', - in_channels=512, - out_channels=num_keypoints, - input_size=codec['input_size'], - in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True, )) - -# base dataset settings -dataset_type = 'LapaDataset' -data_mode = 'topdown' -data_root = 'data/' - -backend_args = dict(backend='local') - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.2), - dict(type='MedianBlur', p=0.2), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.0), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=train_batch_size, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/lapa_trainval.json', - data_prefix=dict(img='LaPa/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=val_batch_size, - num_workers=4, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/lapa_test.json', - data_prefix=dict(img='LaPa/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = dict( - batch_size=val_batch_size, - num_workers=4, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/lapa_test.json', - data_prefix=dict(img='LaPa/'), - test_mode=True, - pipeline=val_pipeline, - )) - -# hooks -default_hooks = dict( - checkpoint=dict( - save_best='NME', rule='less', max_keep_ckpts=3, interval=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='NME', - norm_mode='keypoint_distance', -) -test_evaluator = val_evaluator +_base_ = ['mmpose::_base_/default_runtime.py'] + +# common setting +num_keypoints = 106 +input_size = (256, 256) + +# runtime +max_epochs = 120 +stage2_num_epochs = 10 +base_lr = 4e-3 +train_batch_size = 256 +val_batch_size = 32 + +train_cfg = dict(max_epochs=max_epochs, val_interval=1) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.), + clip_grad=dict(max_norm=35, norm_type=2), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.005, + begin=30, + end=max_epochs, + T_max=max_epochs - 30, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=input_size, + sigma=(5.66, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.33, + widen_factor=0.5, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' + 'rtmdet/cspnext_rsb_pretrain/cspnext-s_imagenet_600e-ea671761.pth') + ), + head=dict( + type='RTMCCHead', + in_channels=512, + out_channels=num_keypoints, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'LapaDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.2), + dict(type='MedianBlur', p=0.2), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=train_batch_size, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/lapa_trainval.json', + data_prefix=dict(img='LaPa/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=val_batch_size, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/lapa_test.json', + data_prefix=dict(img='LaPa/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = dict( + batch_size=val_batch_size, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/lapa_test.json', + data_prefix=dict(img='LaPa/'), + test_mode=True, + pipeline=val_pipeline, + )) + +# hooks +default_hooks = dict( + checkpoint=dict( + save_best='NME', rule='less', max_keep_ckpts=3, interval=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='NME', + norm_mode='keypoint_distance', +) +test_evaluator = val_evaluator diff --git a/projects/rtmpose/rtmpose/face_2d_keypoint/rtmpose-t_8xb256-120e_lapa-256x256.py b/projects/rtmpose/rtmpose/face_2d_keypoint/rtmpose-t_8xb256-120e_lapa-256x256.py index ad6e4b212f..ade51e9b6d 100644 --- a/projects/rtmpose/rtmpose/face_2d_keypoint/rtmpose-t_8xb256-120e_lapa-256x256.py +++ b/projects/rtmpose/rtmpose/face_2d_keypoint/rtmpose-t_8xb256-120e_lapa-256x256.py @@ -1,246 +1,246 @@ -_base_ = ['mmpose::_base_/default_runtime.py'] - -# common setting -num_keypoints = 106 -input_size = (256, 256) - -# runtime -max_epochs = 120 -stage2_num_epochs = 10 -base_lr = 4e-3 -train_batch_size = 256 -val_batch_size = 32 - -train_cfg = dict(max_epochs=max_epochs, val_interval=1) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.), - clip_grad=dict(max_norm=35, norm_type=2), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - type='CosineAnnealingLR', - eta_min=base_lr * 0.005, - begin=30, - end=max_epochs, - T_max=max_epochs - 30, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=input_size, - sigma=(5.66, 5.66), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=0.167, - widen_factor=0.375, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' - 'rtmdet/cspnext_rsb_pretrain/cspnext-tiny_imagenet_600e-3a2dd350.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=384, - out_channels=num_keypoints, - input_size=codec['input_size'], - in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True, )) - -# base dataset settings -dataset_type = 'LapaDataset' -data_mode = 'topdown' -data_root = 'data/' - -backend_args = dict(backend='local') - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.2), - dict(type='MedianBlur', p=0.2), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.0), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=train_batch_size, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/lapa_trainval.json', - data_prefix=dict(img='LaPa/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=val_batch_size, - num_workers=4, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/lapa_test.json', - data_prefix=dict(img='LaPa/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = dict( - batch_size=val_batch_size, - num_workers=4, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/lapa_test.json', - data_prefix=dict(img='LaPa/'), - test_mode=True, - pipeline=val_pipeline, - )) - -# hooks -default_hooks = dict( - checkpoint=dict( - save_best='NME', rule='less', max_keep_ckpts=3, interval=1)) - -custom_hooks = [ - # dict( - # type='EMAHook', - # ema_type='ExpMomentumEMA', - # momentum=0.0002, - # update_buffers=True, - # priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='NME', - norm_mode='keypoint_distance', -) -test_evaluator = val_evaluator +_base_ = ['mmpose::_base_/default_runtime.py'] + +# common setting +num_keypoints = 106 +input_size = (256, 256) + +# runtime +max_epochs = 120 +stage2_num_epochs = 10 +base_lr = 4e-3 +train_batch_size = 256 +val_batch_size = 32 + +train_cfg = dict(max_epochs=max_epochs, val_interval=1) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.), + clip_grad=dict(max_norm=35, norm_type=2), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.005, + begin=30, + end=max_epochs, + T_max=max_epochs - 30, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=input_size, + sigma=(5.66, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.167, + widen_factor=0.375, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' + 'rtmdet/cspnext_rsb_pretrain/cspnext-tiny_imagenet_600e-3a2dd350.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=384, + out_channels=num_keypoints, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'LapaDataset' +data_mode = 'topdown' +data_root = 'data/' + +backend_args = dict(backend='local') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.2), + dict(type='MedianBlur', p=0.2), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=train_batch_size, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/lapa_trainval.json', + data_prefix=dict(img='LaPa/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=val_batch_size, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/lapa_test.json', + data_prefix=dict(img='LaPa/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = dict( + batch_size=val_batch_size, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/lapa_test.json', + data_prefix=dict(img='LaPa/'), + test_mode=True, + pipeline=val_pipeline, + )) + +# hooks +default_hooks = dict( + checkpoint=dict( + save_best='NME', rule='less', max_keep_ckpts=3, interval=1)) + +custom_hooks = [ + # dict( + # type='EMAHook', + # ema_type='ExpMomentumEMA', + # momentum=0.0002, + # update_buffers=True, + # priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='NME', + norm_mode='keypoint_distance', +) +test_evaluator = val_evaluator diff --git a/projects/rtmpose/rtmpose/hand_2d_keypoint/rtmpose-m_8xb32-210e_coco-wholebody-hand-256x256.py b/projects/rtmpose/rtmpose/hand_2d_keypoint/rtmpose-m_8xb32-210e_coco-wholebody-hand-256x256.py index fc96cf7e67..2bedd2cfdd 100644 --- a/projects/rtmpose/rtmpose/hand_2d_keypoint/rtmpose-m_8xb32-210e_coco-wholebody-hand-256x256.py +++ b/projects/rtmpose/rtmpose/hand_2d_keypoint/rtmpose-m_8xb32-210e_coco-wholebody-hand-256x256.py @@ -1,233 +1,233 @@ -_base_ = ['mmpose::_base_/default_runtime.py'] - -# common setting -num_keypoints = 21 -input_size = (256, 256) - -# runtime -max_epochs = 210 -stage2_num_epochs = 30 -base_lr = 4e-3 -train_batch_size = 32 -val_batch_size = 32 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - clip_grad=dict(max_norm=35, norm_type=2), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=256) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=input_size, - sigma=(5.66, 5.66), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=0.67, - widen_factor=0.75, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmposev1/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=768, - out_channels=num_keypoints, - input_size=codec['input_size'], - in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True, )) - -# base dataset settings -dataset_type = 'CocoWholeBodyHandDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -backend_args = dict(backend='local') - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - # dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.5, 1.5], - rotate_factor=180), - dict(type='RandomFlip', direction='horizontal'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.0), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - # dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=180), - dict(type='RandomFlip', direction='horizontal'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=train_batch_size, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=val_batch_size, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = [ - dict(type='PCKAccuracy', thr=0.2), - dict(type='AUC'), - dict(type='EPE') -] -test_evaluator = val_evaluator +_base_ = ['mmpose::_base_/default_runtime.py'] + +# common setting +num_keypoints = 21 +input_size = (256, 256) + +# runtime +max_epochs = 210 +stage2_num_epochs = 30 +base_lr = 4e-3 +train_batch_size = 32 +val_batch_size = 32 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + clip_grad=dict(max_norm=35, norm_type=2), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=256) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=input_size, + sigma=(5.66, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=768, + out_channels=num_keypoints, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'CocoWholeBodyHandDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +backend_args = dict(backend='local') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + # dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.5, 1.5], + rotate_factor=180), + dict(type='RandomFlip', direction='horizontal'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + # dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=180), + dict(type='RandomFlip', direction='horizontal'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=train_batch_size, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=val_batch_size, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='AUC', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = [ + dict(type='PCKAccuracy', thr=0.2), + dict(type='AUC'), + dict(type='EPE') +] +test_evaluator = val_evaluator diff --git a/projects/rtmpose/rtmpose/pretrain_cspnext_udp/cspnext-l_udp_8xb256-210e_coco-256x192.py b/projects/rtmpose/rtmpose/pretrain_cspnext_udp/cspnext-l_udp_8xb256-210e_coco-256x192.py index a76610961e..34aa2c7655 100644 --- a/projects/rtmpose/rtmpose/pretrain_cspnext_udp/cspnext-l_udp_8xb256-210e_coco-256x192.py +++ b/projects/rtmpose/rtmpose/pretrain_cspnext_udp/cspnext-l_udp_8xb256-210e_coco-256x192.py @@ -1,214 +1,214 @@ -_base_ = ['mmpose::_base_/default_runtime.py'] - -# runtime -max_epochs = 210 -stage2_num_epochs = 30 -base_lr = 4e-3 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - # use cosine lr from 105 to 210 epoch - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=1024) - -# codec settings -codec = dict( - type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=1., - widen_factor=1., - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' - 'rtmdet/cspnext_rsb_pretrain/' - 'cspnext-l_8xb256-rsb-a1-600e_in1k-6a760974.pth')), - head=dict( - type='HeatmapHead', - in_channels=1024, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=False, - flip_mode='heatmap', - shift_heatmap=False, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -backend_args = dict(backend='local') -# backend_args = dict( -# backend='petrel', -# path_mapping=dict({ -# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', -# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' -# })) - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=256, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=64, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - # bbox_file='data/coco/person_detection_results/' - # 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['mmpose::_base_/default_runtime.py'] + +# runtime +max_epochs = 210 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 105 to 210 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=1., + widen_factor=1., + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' + 'rtmdet/cspnext_rsb_pretrain/' + 'cspnext-l_8xb256-rsb-a1-600e_in1k-6a760974.pth')), + head=dict( + type='HeatmapHead', + in_channels=1024, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=False, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + # bbox_file='data/coco/person_detection_results/' + # 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/projects/rtmpose/rtmpose/pretrain_cspnext_udp/cspnext-m_udp_8xb256-210e_coco-256x192.py b/projects/rtmpose/rtmpose/pretrain_cspnext_udp/cspnext-m_udp_8xb256-210e_coco-256x192.py index f104f0c093..337dbacca3 100644 --- a/projects/rtmpose/rtmpose/pretrain_cspnext_udp/cspnext-m_udp_8xb256-210e_coco-256x192.py +++ b/projects/rtmpose/rtmpose/pretrain_cspnext_udp/cspnext-m_udp_8xb256-210e_coco-256x192.py @@ -1,214 +1,214 @@ -_base_ = ['mmpose::_base_/default_runtime.py'] - -# runtime -max_epochs = 210 -stage2_num_epochs = 30 -base_lr = 4e-3 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - # use cosine lr from 105 to 210 epoch - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=1024) - -# codec settings -codec = dict( - type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=0.67, - widen_factor=0.75, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' - 'rtmdet/cspnext_rsb_pretrain/' - 'cspnext-m_8xb256-rsb-a1-600e_in1k-ecb3bbd9.pth')), - head=dict( - type='HeatmapHead', - in_channels=768, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=False, - flip_mode='heatmap', - shift_heatmap=False, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -backend_args = dict(backend='local') -# backend_args = dict( -# backend='petrel', -# path_mapping=dict({ -# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', -# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' -# })) - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=256, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=64, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - # bbox_file='data/coco/person_detection_results/' - # 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['mmpose::_base_/default_runtime.py'] + +# runtime +max_epochs = 210 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 105 to 210 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' + 'rtmdet/cspnext_rsb_pretrain/' + 'cspnext-m_8xb256-rsb-a1-600e_in1k-ecb3bbd9.pth')), + head=dict( + type='HeatmapHead', + in_channels=768, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=False, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + # bbox_file='data/coco/person_detection_results/' + # 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/projects/rtmpose/rtmpose/pretrain_cspnext_udp/cspnext-s_udp_8xb256-210e_coco-256x192.py b/projects/rtmpose/rtmpose/pretrain_cspnext_udp/cspnext-s_udp_8xb256-210e_coco-256x192.py index 94f51c2f21..7086d1e9a2 100644 --- a/projects/rtmpose/rtmpose/pretrain_cspnext_udp/cspnext-s_udp_8xb256-210e_coco-256x192.py +++ b/projects/rtmpose/rtmpose/pretrain_cspnext_udp/cspnext-s_udp_8xb256-210e_coco-256x192.py @@ -1,214 +1,214 @@ -_base_ = ['mmpose::_base_/default_runtime.py'] - -# runtime -max_epochs = 210 -stage2_num_epochs = 30 -base_lr = 4e-3 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - # use cosine lr from 105 to 210 epoch - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=1024) - -# codec settings -codec = dict( - type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=0.33, - widen_factor=0.5, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' - 'rtmdet/cspnext_rsb_pretrain/' - 'cspnext-s_imagenet_600e-ea671761.pth')), - head=dict( - type='HeatmapHead', - in_channels=512, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=False, - flip_mode='heatmap', - shift_heatmap=False, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -backend_args = dict(backend='local') -# backend_args = dict( -# backend='petrel', -# path_mapping=dict({ -# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', -# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' -# })) - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=256, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=64, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - # bbox_file='data/coco/person_detection_results/' - # 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['mmpose::_base_/default_runtime.py'] + +# runtime +max_epochs = 210 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 105 to 210 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.33, + widen_factor=0.5, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' + 'rtmdet/cspnext_rsb_pretrain/' + 'cspnext-s_imagenet_600e-ea671761.pth')), + head=dict( + type='HeatmapHead', + in_channels=512, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=False, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + # bbox_file='data/coco/person_detection_results/' + # 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/projects/rtmpose/rtmpose/pretrain_cspnext_udp/cspnext-tiny_udp_8xb256-210e_coco-256x192.py b/projects/rtmpose/rtmpose/pretrain_cspnext_udp/cspnext-tiny_udp_8xb256-210e_coco-256x192.py index ca29888ae1..6e186412a7 100644 --- a/projects/rtmpose/rtmpose/pretrain_cspnext_udp/cspnext-tiny_udp_8xb256-210e_coco-256x192.py +++ b/projects/rtmpose/rtmpose/pretrain_cspnext_udp/cspnext-tiny_udp_8xb256-210e_coco-256x192.py @@ -1,214 +1,214 @@ -_base_ = ['mmpose::_base_/default_runtime.py'] - -# runtime -max_epochs = 210 -stage2_num_epochs = 30 -base_lr = 4e-3 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - # use cosine lr from 105 to 210 epoch - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=1024) - -# codec settings -codec = dict( - type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=0.167, - widen_factor=0.375, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' - 'rtmdet/cspnext_rsb_pretrain/' - 'cspnext-tiny_imagenet_600e-3a2dd350.pth')), - head=dict( - type='HeatmapHead', - in_channels=384, - out_channels=17, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=False, - flip_mode='heatmap', - shift_heatmap=False, - )) - -# base dataset settings -dataset_type = 'CocoDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -backend_args = dict(backend='local') -# backend_args = dict( -# backend='petrel', -# path_mapping=dict({ -# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', -# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' -# })) - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=256, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=64, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/person_keypoints_val2017.json', - # bbox_file='data/coco/person_detection_results/' - # 'COCO_val2017_detections_AP_H_56_person.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - # dict( - # type='EMAHook', - # ema_type='ExpMomentumEMA', - # momentum=0.0002, - # update_buffers=True, - # priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json') -test_evaluator = val_evaluator +_base_ = ['mmpose::_base_/default_runtime.py'] + +# runtime +max_epochs = 210 +stage2_num_epochs = 30 +base_lr = 4e-3 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 105 to 210 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=1024) + +# codec settings +codec = dict( + type='UDPHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.167, + widen_factor=0.375, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmdetection/v3.0/' + 'rtmdet/cspnext_rsb_pretrain/' + 'cspnext-tiny_imagenet_600e-3a2dd350.pth')), + head=dict( + type='HeatmapHead', + in_channels=384, + out_channels=17, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=False, + flip_mode='heatmap', + shift_heatmap=False, + )) + +# base dataset settings +dataset_type = 'CocoDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +backend_args = dict(backend='local') +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/', +# f'{data_root}': 's3://openmmlab/datasets/detection/coco/' +# })) + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size'], use_udp=True), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=256, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=64, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/person_keypoints_val2017.json', + # bbox_file='data/coco/person_detection_results/' + # 'COCO_val2017_detections_AP_H_56_person.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + # dict( + # type='EMAHook', + # ema_type='ExpMomentumEMA', + # momentum=0.0002, + # update_buffers=True, + # priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json') +test_evaluator = val_evaluator diff --git a/projects/rtmpose/rtmpose/pruning/README.md b/projects/rtmpose/rtmpose/pruning/README.md index 0d10a89509..6af0efddfa 100644 --- a/projects/rtmpose/rtmpose/pruning/README.md +++ b/projects/rtmpose/rtmpose/pruning/README.md @@ -1,117 +1,117 @@ -# GroupFisher Pruning for RTMPose - -# Description - -We try to apply a pruning algorithm to RTMPose models. In detail, we prune a RTMPose model to a smaller size as the same as a smaller RTMPose model, like pruning RTMPose-S to the size of RTMPose-T. -The expriments show that the pruned model have better performance(AP) than the RTMPose model with the similar size and inference speed. - -Concretly, we select the RTMPose-S as the base model and prune it to the size of RTMPose-T, and use GroupFisher pruning algorithm which is able to determine the pruning structure automatically. -Furthermore, we provide two version of the pruned models including only using coco and using both of coco and ai-challenge datasets. - -# Results and Models - -| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | Flops | Params | ckpt | log | -| :-------------------------------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :---: | :----: | :---------------------------------------: | :------------: | -| [rtmpose-s-pruned](./group_fisher_finetune_rtmpose-s_8xb256-420e_coco-256x192.py) | 256x192 | 0.691 | 0.885 | 0.765 | 0.745 | 0.925 | 0.34 | 3.42 | [pruned][rp_sc_p] \| [finetuned][rp_sc_f] | [log][rp_sc_l] | -| [rtmpose-s-aic-coco-pruned](./group_fisher_finetune_rtmpose-s_8xb256-420e_aic-coco-256x192.py) | 256x192 | 0.694 | 0.884 | 0.771 | 0.747 | 0.922 | 0.35 | 3.43 | [pruned][rp_sa_p] \| [finetuned][rp_sa_f] | [log][rp_sa_l] | - -## Get Started - -We have three steps to apply GroupFisher to your model, including Prune, Finetune, Deploy. - -Note: please use torch>=1.12, as we need fxtracer to parse the models automatically. - -### Prune - -```bash -CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 PORT=29500 ./tools/dist_train.sh \ - {config_folder}/group_fisher_{normalization_type}_prune_{model_name}.py 8 \ - --work-dir $WORK_DIR -``` - -In the pruning config file. You have to fill some args as below. - -```python -""" -_base_ (str): The path to your pretrained model checkpoint. -pretrained_path (str): The path to your pretrained model checkpoint. - -interval (int): Interval between pruning two channels. You should ensure you - can reach your target pruning ratio when the training ends. -normalization_type (str): GroupFisher uses two methods to normlized the channel - importance, including ['flops','act']. The former uses flops, while the - latter uses the memory occupation of activation feature maps. -lr_ratio (float): Ratio to decrease lr rate. As pruning progress is unstable, - you need to decrease the original lr rate until the pruning training work - steadly without getting nan. - -target_flop_ratio (float): The target flop ratio to prune your model. -input_shape (Tuple): input shape to measure the flops. -""" -``` - -After the pruning process, you will get a checkpoint of the pruned model named flops\_{target_flop_ratio}.pth in your workdir. - -### Finetune - -```bash -CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 PORT=29500 ./tools/dist_train.sh \ - {config_folder}/group_fisher_{normalization_type}_finetune_{model_name}.py 8 \ - --work-dir $WORK_DIR -``` - -There are also some args for you to fill in the config file as below. - -```python -""" -_base_(str): The path to your pruning config file. -pruned_path (str): The path to the checkpoint of the pruned model. -finetune_lr (float): The lr rate to finetune. Usually, we directly use the lr - rate of the pretrain. -""" -``` - -After finetuning, except a checkpoint of the best model, there is also a fix_subnet.json, which records the pruned model structure. It will be used when deploying. - -### Test - -```bash -CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 PORT=29500 ./tools/dist_test.sh \ - {config_folder}/group_fisher_{normalization_type}_finetune_{model_name}.py {checkpoint_path} 8 -``` - -### Deploy - -For a pruned model, you only need to use the pruning deploy config to instead the pretrain config to deploy the pruned version of your model. If you are not familiar with mmdeploy, it's recommended to refer to [MMDeploy document](https://mmdeploy.readthedocs.io/en/latest/02-how-to-run/convert_model.html). - -```bash -python {mmdeploy}/tools/deploy.py \ - {mmdeploy}/{mmdeploy_config}.py \ - {config_folder}/group_fisher_{normalization_type}_deploy_{model_name}.py \ - {path_to_finetuned_checkpoint}.pth \ - {mmdeploy}/tests/data/tiger.jpeg -``` - -The deploy config has some args as below: - -```python -""" -_base_ (str): The path to your pretrain config file. -fix_subnet (Union[dict,str]): The dict store the pruning structure or the - json file including it. -divisor (int): The divisor the make the channel number divisible. -""" -``` - -The divisor is important for the actual inference speed, and we suggest you to test it in \[1,2,4,8,16,32\] to find the fastest divisor. - -## Reference - -[GroupFisher in MMRazor](https://github.com/open-mmlab/mmrazor/tree/main/configs/pruning/base/group_fisher) - -[rp_sa_f]: https://download.openmmlab.com/mmrazor/v1/pruning/group_fisher/rtmpose-s/group_fisher_finetune_rtmpose-s_8xb256-420e_aic-coco-256x192.pth -[rp_sa_l]: https://download.openmmlab.com/mmrazor/v1/pruning/group_fisher/rtmpose-s/group_fisher_finetune_rtmpose-s_8xb256-420e_aic-coco-256x192.json -[rp_sa_p]: https://download.openmmlab.com/mmrazor/v1/pruning/group_fisher/rtmpose-s/group_fisher_prune_rtmpose-s_8xb256-420e_aic-coco-256x192.pth -[rp_sc_f]: https://download.openmmlab.com/mmrazor/v1/pruning/group_fisher/rtmpose-s/group_fisher_finetune_rtmpose-s_8xb256-420e_coco-256x192.pth -[rp_sc_l]: https://download.openmmlab.com/mmrazor/v1/pruning/group_fisher/rtmpose-s/group_fisher_finetune_rtmpose-s_8xb256-420e_coco-256x192.json -[rp_sc_p]: https://download.openmmlab.com/mmrazor/v1/pruning/group_fisher/rtmpose-s/group_fisher_prune_rtmpose-s_8xb256-420e_coco-256x192.pth +# GroupFisher Pruning for RTMPose + +# Description + +We try to apply a pruning algorithm to RTMPose models. In detail, we prune a RTMPose model to a smaller size as the same as a smaller RTMPose model, like pruning RTMPose-S to the size of RTMPose-T. +The expriments show that the pruned model have better performance(AP) than the RTMPose model with the similar size and inference speed. + +Concretly, we select the RTMPose-S as the base model and prune it to the size of RTMPose-T, and use GroupFisher pruning algorithm which is able to determine the pruning structure automatically. +Furthermore, we provide two version of the pruned models including only using coco and using both of coco and ai-challenge datasets. + +# Results and Models + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | Flops | Params | ckpt | log | +| :-------------------------------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :---: | :----: | :---------------------------------------: | :------------: | +| [rtmpose-s-pruned](./group_fisher_finetune_rtmpose-s_8xb256-420e_coco-256x192.py) | 256x192 | 0.691 | 0.885 | 0.765 | 0.745 | 0.925 | 0.34 | 3.42 | [pruned][rp_sc_p] \| [finetuned][rp_sc_f] | [log][rp_sc_l] | +| [rtmpose-s-aic-coco-pruned](./group_fisher_finetune_rtmpose-s_8xb256-420e_aic-coco-256x192.py) | 256x192 | 0.694 | 0.884 | 0.771 | 0.747 | 0.922 | 0.35 | 3.43 | [pruned][rp_sa_p] \| [finetuned][rp_sa_f] | [log][rp_sa_l] | + +## Get Started + +We have three steps to apply GroupFisher to your model, including Prune, Finetune, Deploy. + +Note: please use torch>=1.12, as we need fxtracer to parse the models automatically. + +### Prune + +```bash +CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 PORT=29500 ./tools/dist_train.sh \ + {config_folder}/group_fisher_{normalization_type}_prune_{model_name}.py 8 \ + --work-dir $WORK_DIR +``` + +In the pruning config file. You have to fill some args as below. + +```python +""" +_base_ (str): The path to your pretrained model checkpoint. +pretrained_path (str): The path to your pretrained model checkpoint. + +interval (int): Interval between pruning two channels. You should ensure you + can reach your target pruning ratio when the training ends. +normalization_type (str): GroupFisher uses two methods to normlized the channel + importance, including ['flops','act']. The former uses flops, while the + latter uses the memory occupation of activation feature maps. +lr_ratio (float): Ratio to decrease lr rate. As pruning progress is unstable, + you need to decrease the original lr rate until the pruning training work + steadly without getting nan. + +target_flop_ratio (float): The target flop ratio to prune your model. +input_shape (Tuple): input shape to measure the flops. +""" +``` + +After the pruning process, you will get a checkpoint of the pruned model named flops\_{target_flop_ratio}.pth in your workdir. + +### Finetune + +```bash +CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 PORT=29500 ./tools/dist_train.sh \ + {config_folder}/group_fisher_{normalization_type}_finetune_{model_name}.py 8 \ + --work-dir $WORK_DIR +``` + +There are also some args for you to fill in the config file as below. + +```python +""" +_base_(str): The path to your pruning config file. +pruned_path (str): The path to the checkpoint of the pruned model. +finetune_lr (float): The lr rate to finetune. Usually, we directly use the lr + rate of the pretrain. +""" +``` + +After finetuning, except a checkpoint of the best model, there is also a fix_subnet.json, which records the pruned model structure. It will be used when deploying. + +### Test + +```bash +CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 PORT=29500 ./tools/dist_test.sh \ + {config_folder}/group_fisher_{normalization_type}_finetune_{model_name}.py {checkpoint_path} 8 +``` + +### Deploy + +For a pruned model, you only need to use the pruning deploy config to instead the pretrain config to deploy the pruned version of your model. If you are not familiar with mmdeploy, it's recommended to refer to [MMDeploy document](https://mmdeploy.readthedocs.io/en/latest/02-how-to-run/convert_model.html). + +```bash +python {mmdeploy}/tools/deploy.py \ + {mmdeploy}/{mmdeploy_config}.py \ + {config_folder}/group_fisher_{normalization_type}_deploy_{model_name}.py \ + {path_to_finetuned_checkpoint}.pth \ + {mmdeploy}/tests/data/tiger.jpeg +``` + +The deploy config has some args as below: + +```python +""" +_base_ (str): The path to your pretrain config file. +fix_subnet (Union[dict,str]): The dict store the pruning structure or the + json file including it. +divisor (int): The divisor the make the channel number divisible. +""" +``` + +The divisor is important for the actual inference speed, and we suggest you to test it in \[1,2,4,8,16,32\] to find the fastest divisor. + +## Reference + +[GroupFisher in MMRazor](https://github.com/open-mmlab/mmrazor/tree/main/configs/pruning/base/group_fisher) + +[rp_sa_f]: https://download.openmmlab.com/mmrazor/v1/pruning/group_fisher/rtmpose-s/group_fisher_finetune_rtmpose-s_8xb256-420e_aic-coco-256x192.pth +[rp_sa_l]: https://download.openmmlab.com/mmrazor/v1/pruning/group_fisher/rtmpose-s/group_fisher_finetune_rtmpose-s_8xb256-420e_aic-coco-256x192.json +[rp_sa_p]: https://download.openmmlab.com/mmrazor/v1/pruning/group_fisher/rtmpose-s/group_fisher_prune_rtmpose-s_8xb256-420e_aic-coco-256x192.pth +[rp_sc_f]: https://download.openmmlab.com/mmrazor/v1/pruning/group_fisher/rtmpose-s/group_fisher_finetune_rtmpose-s_8xb256-420e_coco-256x192.pth +[rp_sc_l]: https://download.openmmlab.com/mmrazor/v1/pruning/group_fisher/rtmpose-s/group_fisher_finetune_rtmpose-s_8xb256-420e_coco-256x192.json +[rp_sc_p]: https://download.openmmlab.com/mmrazor/v1/pruning/group_fisher/rtmpose-s/group_fisher_prune_rtmpose-s_8xb256-420e_coco-256x192.pth diff --git a/projects/rtmpose/rtmpose/pruning/README_CN.md b/projects/rtmpose/rtmpose/pruning/README_CN.md index f3da9ef5c7..d3f398b2cd 100644 --- a/projects/rtmpose/rtmpose/pruning/README_CN.md +++ b/projects/rtmpose/rtmpose/pruning/README_CN.md @@ -1,116 +1,116 @@ -# 使用GroupFisher剪枝RTMPose - -# 概述 - -我们尝试使用 GroupFisher 算法对 RTMPose 模型进行剪枝。具体来说,我们将一个 RTMPose 模型剪枝到与较小的 RTMPose 模型相同的大小,例如将 RTMPose-S 剪枝到 RTMPose-T 的大小。 -实验表明,剪枝后的模型比具有相似大小和推理速度的 RTMPose 模型具有更好的性能(AP)。 - -我们使用能自动确定剪枝结构的 GroupFisher 剪枝算法,将 RTMPose-S 剪枝到 RTMPose-T 的大小。 -此外,我们提供了两个版本的剪枝模型,其中一个只使用 coco 数据集,另一个同时使用 coco 和 ai-challenge 数据集。 - -# 实验结果 - -| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | Flops | Params | ckpt | log | -| :-------------------------------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :---: | :----: | :---------------------------------------: | :------------: | -| [rtmpose-s-pruned](./group_fisher_finetune_rtmpose-s_8xb256-420e_coco-256x192.py) | 256x192 | 0.691 | 0.885 | 0.765 | 0.745 | 0.925 | 0.34 | 3.42 | [pruned][rp_sc_p] \| [finetuned][rp_sc_f] | [log][rp_sc_l] | -| [rtmpose-s-aic-coco-pruned](./group_fisher_finetune_rtmpose-s_8xb256-420e_aic-coco-256x192.py) | 256x192 | 0.694 | 0.884 | 0.771 | 0.747 | 0.922 | 0.35 | 3.43 | [pruned][rp_sa_p] \| [finetuned][rp_sa_f] | [log][rp_sa_l] | - -## Get Started - -我们需要三个步骤来将 GroupFisher 应用于你的模型,包括剪枝(Prune),微调(Finetune),部署(Deploy)。 -注意:请使用torch>=1.12,因为我们需要fxtracer来自动解析模型。 - -### Prune - -```bash -CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 PORT=29500 ./tools/dist_train.sh \ - {config_folder}/group_fisher_{normalization_type}_prune_{model_name}.py 8 \ - --work-dir $WORK_DIR -``` - -在剪枝配置文件中,你需要填写以下参数。 - -```python -""" -_base_ (str): The path to your pretrained model checkpoint. -pretrained_path (str): The path to your pretrained model checkpoint. - -interval (int): Interval between pruning two channels. You should ensure you - can reach your target pruning ratio when the training ends. -normalization_type (str): GroupFisher uses two methods to normlized the channel - importance, including ['flops','act']. The former uses flops, while the - latter uses the memory occupation of activation feature maps. -lr_ratio (float): Ratio to decrease lr rate. As pruning progress is unstable, - you need to decrease the original lr rate until the pruning training work - steadly without getting nan. - -target_flop_ratio (float): The target flop ratio to prune your model. -input_shape (Tuple): input shape to measure the flops. -""" -``` - -在剪枝结束后,你将获得一个剪枝模型的 checkpoint,该 checkpoint 的名称为 flops\_{target_flop_ratio}.pth,位于你的 workdir 中。 - -### Finetune - -```bash -CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 PORT=29500 ./tools/dist_train.sh \ - {config_folder}/group_fisher_{normalization_type}_finetune_{model_name}.py 8 \ - --work-dir $WORK_DIR -``` - -微调时也有一些参数需要你填写。 - -```python -""" -_base_(str): The path to your pruning config file. -pruned_path (str): The path to the checkpoint of the pruned model. -finetune_lr (float): The lr rate to finetune. Usually, we directly use the lr - rate of the pretrain. -""" -``` - -在微调结束后,除了最佳模型的 checkpoint 外,还有一个 fix_subnet.json,它记录了剪枝模型的结构。它将在部署时使用。 - -### Test - -```bash -CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 PORT=29500 ./tools/dist_test.sh \ - {config_folder}/group_fisher_{normalization_type}_finetune_{model_name}.py {checkpoint_path} 8 -``` - -### Deploy - -对于剪枝模型,你只需要使用剪枝部署 config 来代替预训练 config 来部署模型的剪枝版本。如果你不熟悉 MMDeploy,请参看[MMDeploy document](https://mmdeploy.readthedocs.io/en/latest/02-how-to-run/convert_model.html)。 - -```bash -python {mmdeploy}/tools/deploy.py \ - {mmdeploy}/{mmdeploy_config}.py \ - {config_folder}/group_fisher_{normalization_type}_deploy_{model_name}.py \ - {path_to_finetuned_checkpoint}.pth \ - {mmdeploy}/tests/data/tiger.jpeg -``` - -部署配置文件有如下参数: - -```python -""" -_base_ (str): The path to your pretrain config file. -fix_subnet (Union[dict,str]): The dict store the pruning structure or the - json file including it. -divisor (int): The divisor the make the channel number divisible. -""" -``` - -divisor 设置十分重要,我们建议你在尝试 \[1,2,4,8,16,32\],以找到最佳设置。 - -## Reference - -[GroupFisher in MMRazor](https://github.com/open-mmlab/mmrazor/tree/main/configs/pruning/base/group_fisher) - -[rp_sa_f]: https://download.openmmlab.com/mmrazor/v1/pruning/group_fisher/rtmpose-s/group_fisher_finetune_rtmpose-s_8xb256-420e_aic-coco-256x192.pth -[rp_sa_l]: https://download.openmmlab.com/mmrazor/v1/pruning/group_fisher/rtmpose-s/group_fisher_finetune_rtmpose-s_8xb256-420e_aic-coco-256x192.json -[rp_sa_p]: https://download.openmmlab.com/mmrazor/v1/pruning/group_fisher/rtmpose-s/group_fisher_prune_rtmpose-s_8xb256-420e_aic-coco-256x192.pth -[rp_sc_f]: https://download.openmmlab.com/mmrazor/v1/pruning/group_fisher/rtmpose-s/group_fisher_finetune_rtmpose-s_8xb256-420e_coco-256x192.pth -[rp_sc_l]: https://download.openmmlab.com/mmrazor/v1/pruning/group_fisher/rtmpose-s/group_fisher_finetune_rtmpose-s_8xb256-420e_coco-256x192.json -[rp_sc_p]: https://download.openmmlab.com/mmrazor/v1/pruning/group_fisher/rtmpose-s/group_fisher_prune_rtmpose-s_8xb256-420e_coco-256x192.pth +# 使用GroupFisher剪枝RTMPose + +# 概述 + +我们尝试使用 GroupFisher 算法对 RTMPose 模型进行剪枝。具体来说,我们将一个 RTMPose 模型剪枝到与较小的 RTMPose 模型相同的大小,例如将 RTMPose-S 剪枝到 RTMPose-T 的大小。 +实验表明,剪枝后的模型比具有相似大小和推理速度的 RTMPose 模型具有更好的性能(AP)。 + +我们使用能自动确定剪枝结构的 GroupFisher 剪枝算法,将 RTMPose-S 剪枝到 RTMPose-T 的大小。 +此外,我们提供了两个版本的剪枝模型,其中一个只使用 coco 数据集,另一个同时使用 coco 和 ai-challenge 数据集。 + +# 实验结果 + +| Arch | Input Size | AP | AP50 | AP75 | AR | AR50 | Flops | Params | ckpt | log | +| :-------------------------------------------------------------------- | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :---: | :----: | :---------------------------------------: | :------------: | +| [rtmpose-s-pruned](./group_fisher_finetune_rtmpose-s_8xb256-420e_coco-256x192.py) | 256x192 | 0.691 | 0.885 | 0.765 | 0.745 | 0.925 | 0.34 | 3.42 | [pruned][rp_sc_p] \| [finetuned][rp_sc_f] | [log][rp_sc_l] | +| [rtmpose-s-aic-coco-pruned](./group_fisher_finetune_rtmpose-s_8xb256-420e_aic-coco-256x192.py) | 256x192 | 0.694 | 0.884 | 0.771 | 0.747 | 0.922 | 0.35 | 3.43 | [pruned][rp_sa_p] \| [finetuned][rp_sa_f] | [log][rp_sa_l] | + +## Get Started + +我们需要三个步骤来将 GroupFisher 应用于你的模型,包括剪枝(Prune),微调(Finetune),部署(Deploy)。 +注意:请使用torch>=1.12,因为我们需要fxtracer来自动解析模型。 + +### Prune + +```bash +CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 PORT=29500 ./tools/dist_train.sh \ + {config_folder}/group_fisher_{normalization_type}_prune_{model_name}.py 8 \ + --work-dir $WORK_DIR +``` + +在剪枝配置文件中,你需要填写以下参数。 + +```python +""" +_base_ (str): The path to your pretrained model checkpoint. +pretrained_path (str): The path to your pretrained model checkpoint. + +interval (int): Interval between pruning two channels. You should ensure you + can reach your target pruning ratio when the training ends. +normalization_type (str): GroupFisher uses two methods to normlized the channel + importance, including ['flops','act']. The former uses flops, while the + latter uses the memory occupation of activation feature maps. +lr_ratio (float): Ratio to decrease lr rate. As pruning progress is unstable, + you need to decrease the original lr rate until the pruning training work + steadly without getting nan. + +target_flop_ratio (float): The target flop ratio to prune your model. +input_shape (Tuple): input shape to measure the flops. +""" +``` + +在剪枝结束后,你将获得一个剪枝模型的 checkpoint,该 checkpoint 的名称为 flops\_{target_flop_ratio}.pth,位于你的 workdir 中。 + +### Finetune + +```bash +CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 PORT=29500 ./tools/dist_train.sh \ + {config_folder}/group_fisher_{normalization_type}_finetune_{model_name}.py 8 \ + --work-dir $WORK_DIR +``` + +微调时也有一些参数需要你填写。 + +```python +""" +_base_(str): The path to your pruning config file. +pruned_path (str): The path to the checkpoint of the pruned model. +finetune_lr (float): The lr rate to finetune. Usually, we directly use the lr + rate of the pretrain. +""" +``` + +在微调结束后,除了最佳模型的 checkpoint 外,还有一个 fix_subnet.json,它记录了剪枝模型的结构。它将在部署时使用。 + +### Test + +```bash +CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 PORT=29500 ./tools/dist_test.sh \ + {config_folder}/group_fisher_{normalization_type}_finetune_{model_name}.py {checkpoint_path} 8 +``` + +### Deploy + +对于剪枝模型,你只需要使用剪枝部署 config 来代替预训练 config 来部署模型的剪枝版本。如果你不熟悉 MMDeploy,请参看[MMDeploy document](https://mmdeploy.readthedocs.io/en/latest/02-how-to-run/convert_model.html)。 + +```bash +python {mmdeploy}/tools/deploy.py \ + {mmdeploy}/{mmdeploy_config}.py \ + {config_folder}/group_fisher_{normalization_type}_deploy_{model_name}.py \ + {path_to_finetuned_checkpoint}.pth \ + {mmdeploy}/tests/data/tiger.jpeg +``` + +部署配置文件有如下参数: + +```python +""" +_base_ (str): The path to your pretrain config file. +fix_subnet (Union[dict,str]): The dict store the pruning structure or the + json file including it. +divisor (int): The divisor the make the channel number divisible. +""" +``` + +divisor 设置十分重要,我们建议你在尝试 \[1,2,4,8,16,32\],以找到最佳设置。 + +## Reference + +[GroupFisher in MMRazor](https://github.com/open-mmlab/mmrazor/tree/main/configs/pruning/base/group_fisher) + +[rp_sa_f]: https://download.openmmlab.com/mmrazor/v1/pruning/group_fisher/rtmpose-s/group_fisher_finetune_rtmpose-s_8xb256-420e_aic-coco-256x192.pth +[rp_sa_l]: https://download.openmmlab.com/mmrazor/v1/pruning/group_fisher/rtmpose-s/group_fisher_finetune_rtmpose-s_8xb256-420e_aic-coco-256x192.json +[rp_sa_p]: https://download.openmmlab.com/mmrazor/v1/pruning/group_fisher/rtmpose-s/group_fisher_prune_rtmpose-s_8xb256-420e_aic-coco-256x192.pth +[rp_sc_f]: https://download.openmmlab.com/mmrazor/v1/pruning/group_fisher/rtmpose-s/group_fisher_finetune_rtmpose-s_8xb256-420e_coco-256x192.pth +[rp_sc_l]: https://download.openmmlab.com/mmrazor/v1/pruning/group_fisher/rtmpose-s/group_fisher_finetune_rtmpose-s_8xb256-420e_coco-256x192.json +[rp_sc_p]: https://download.openmmlab.com/mmrazor/v1/pruning/group_fisher/rtmpose-s/group_fisher_prune_rtmpose-s_8xb256-420e_coco-256x192.pth diff --git a/projects/rtmpose/rtmpose/pruning/group_fisher_deploy_rtmpose-s_8xb256-420e_aic-coco-256x192.py b/projects/rtmpose/rtmpose/pruning/group_fisher_deploy_rtmpose-s_8xb256-420e_aic-coco-256x192.py index 3c720566f0..6d6be8053c 100644 --- a/projects/rtmpose/rtmpose/pruning/group_fisher_deploy_rtmpose-s_8xb256-420e_aic-coco-256x192.py +++ b/projects/rtmpose/rtmpose/pruning/group_fisher_deploy_rtmpose-s_8xb256-420e_aic-coco-256x192.py @@ -1,53 +1,53 @@ -############################################################################# -"""You have to fill these args. - -_base_(str): The path to your pretrain config file. -fix_subnet (Union[dict,str]): The dict store the pruning structure or the - json file including it. -divisor (int): The divisor the make the channel number divisible. -""" - -_base_ = 'mmpose::body_2d_keypoint/rtmpose/coco/rtmpose-s_8xb256-420e_aic-coco-256x192.py' # noqa -fix_subnet = { - 'backbone.stem.0.conv_(0, 16)_16': 8, - 'backbone.stem.1.conv_(0, 16)_16': 9, - 'backbone.stem.2.conv_(0, 32)_32': 9, - 'backbone.stage1.0.conv_(0, 64)_64': 32, - 'backbone.stage1.1.short_conv.conv_(0, 32)_32': 30, - 'backbone.stage1.1.main_conv.conv_(0, 32)_32': 29, - 'backbone.stage1.1.blocks.0.conv1.conv_(0, 32)_32': 24, - 'backbone.stage1.1.final_conv.conv_(0, 64)_64': 27, - 'backbone.stage2.0.conv_(0, 128)_128': 62, - 'backbone.stage2.1.short_conv.conv_(0, 64)_64': 63, - 'backbone.stage2.1.main_conv.conv_(0, 64)_64': 64, - 'backbone.stage2.1.blocks.0.conv1.conv_(0, 64)_64': 56, - 'backbone.stage2.1.blocks.1.conv1.conv_(0, 64)_64': 62, - 'backbone.stage2.1.final_conv.conv_(0, 128)_128': 65, - 'backbone.stage3.0.conv_(0, 256)_256': 167, - 'backbone.stage3.1.short_conv.conv_(0, 128)_128': 127, - 'backbone.stage3.1.main_conv.conv_(0, 128)_128': 128, - 'backbone.stage3.1.blocks.0.conv1.conv_(0, 128)_128': 124, - 'backbone.stage3.1.blocks.1.conv1.conv_(0, 128)_128': 123, - 'backbone.stage3.1.final_conv.conv_(0, 256)_256': 172, - 'backbone.stage4.0.conv_(0, 512)_512': 337, - 'backbone.stage4.1.conv1.conv_(0, 256)_256': 256, - 'backbone.stage4.1.conv2.conv_(0, 512)_512': 379, - 'backbone.stage4.2.short_conv.conv_(0, 256)_256': 188, - 'backbone.stage4.2.main_conv.conv_(0, 256)_256': 227, - 'backbone.stage4.2.blocks.0.conv1.conv_(0, 256)_256': 238, - 'backbone.stage4.2.blocks.0.conv2.pointwise_conv.conv_(0, 256)_256': 195, - 'backbone.stage4.2.final_conv.conv_(0, 512)_512': 163 -} -divisor = 8 -############################################################################## - -architecture = _base_.model - -model = dict( - _delete_=True, - _scope_='mmrazor', - type='GroupFisherDeploySubModel', - architecture=architecture, - fix_subnet=fix_subnet, - divisor=divisor, -) +############################################################################# +"""You have to fill these args. + +_base_(str): The path to your pretrain config file. +fix_subnet (Union[dict,str]): The dict store the pruning structure or the + json file including it. +divisor (int): The divisor the make the channel number divisible. +""" + +_base_ = 'mmpose::body_2d_keypoint/rtmpose/coco/rtmpose-s_8xb256-420e_aic-coco-256x192.py' # noqa +fix_subnet = { + 'backbone.stem.0.conv_(0, 16)_16': 8, + 'backbone.stem.1.conv_(0, 16)_16': 9, + 'backbone.stem.2.conv_(0, 32)_32': 9, + 'backbone.stage1.0.conv_(0, 64)_64': 32, + 'backbone.stage1.1.short_conv.conv_(0, 32)_32': 30, + 'backbone.stage1.1.main_conv.conv_(0, 32)_32': 29, + 'backbone.stage1.1.blocks.0.conv1.conv_(0, 32)_32': 24, + 'backbone.stage1.1.final_conv.conv_(0, 64)_64': 27, + 'backbone.stage2.0.conv_(0, 128)_128': 62, + 'backbone.stage2.1.short_conv.conv_(0, 64)_64': 63, + 'backbone.stage2.1.main_conv.conv_(0, 64)_64': 64, + 'backbone.stage2.1.blocks.0.conv1.conv_(0, 64)_64': 56, + 'backbone.stage2.1.blocks.1.conv1.conv_(0, 64)_64': 62, + 'backbone.stage2.1.final_conv.conv_(0, 128)_128': 65, + 'backbone.stage3.0.conv_(0, 256)_256': 167, + 'backbone.stage3.1.short_conv.conv_(0, 128)_128': 127, + 'backbone.stage3.1.main_conv.conv_(0, 128)_128': 128, + 'backbone.stage3.1.blocks.0.conv1.conv_(0, 128)_128': 124, + 'backbone.stage3.1.blocks.1.conv1.conv_(0, 128)_128': 123, + 'backbone.stage3.1.final_conv.conv_(0, 256)_256': 172, + 'backbone.stage4.0.conv_(0, 512)_512': 337, + 'backbone.stage4.1.conv1.conv_(0, 256)_256': 256, + 'backbone.stage4.1.conv2.conv_(0, 512)_512': 379, + 'backbone.stage4.2.short_conv.conv_(0, 256)_256': 188, + 'backbone.stage4.2.main_conv.conv_(0, 256)_256': 227, + 'backbone.stage4.2.blocks.0.conv1.conv_(0, 256)_256': 238, + 'backbone.stage4.2.blocks.0.conv2.pointwise_conv.conv_(0, 256)_256': 195, + 'backbone.stage4.2.final_conv.conv_(0, 512)_512': 163 +} +divisor = 8 +############################################################################## + +architecture = _base_.model + +model = dict( + _delete_=True, + _scope_='mmrazor', + type='GroupFisherDeploySubModel', + architecture=architecture, + fix_subnet=fix_subnet, + divisor=divisor, +) diff --git a/projects/rtmpose/rtmpose/pruning/group_fisher_deploy_rtmpose-s_8xb256-420e_coco-256x192.py b/projects/rtmpose/rtmpose/pruning/group_fisher_deploy_rtmpose-s_8xb256-420e_coco-256x192.py index 64fa6c2b6b..31a3d154a9 100644 --- a/projects/rtmpose/rtmpose/pruning/group_fisher_deploy_rtmpose-s_8xb256-420e_coco-256x192.py +++ b/projects/rtmpose/rtmpose/pruning/group_fisher_deploy_rtmpose-s_8xb256-420e_coco-256x192.py @@ -1,53 +1,53 @@ -############################################################################# -"""You have to fill these args. - -_base_(str): The path to your pretrain config file. -fix_subnet (Union[dict,str]): The dict store the pruning structure or the - json file including it. -divisor (int): The divisor the make the channel number divisible. -""" - -_base_ = 'mmpose::body_2d_keypoint/rtmpose/coco/rtmpose-s_8xb256-420e_coco-256x192.py' # noqa -fix_subnet = { - 'backbone.stem.0.conv_(0, 16)_16': 8, - 'backbone.stem.1.conv_(0, 16)_16': 10, - 'backbone.stem.2.conv_(0, 32)_32': 11, - 'backbone.stage1.0.conv_(0, 64)_64': 32, - 'backbone.stage1.1.short_conv.conv_(0, 32)_32': 32, - 'backbone.stage1.1.main_conv.conv_(0, 32)_32': 23, - 'backbone.stage1.1.blocks.0.conv1.conv_(0, 32)_32': 25, - 'backbone.stage1.1.final_conv.conv_(0, 64)_64': 25, - 'backbone.stage2.0.conv_(0, 128)_128': 71, - 'backbone.stage2.1.short_conv.conv_(0, 64)_64': 61, - 'backbone.stage2.1.main_conv.conv_(0, 64)_64': 62, - 'backbone.stage2.1.blocks.0.conv1.conv_(0, 64)_64': 57, - 'backbone.stage2.1.blocks.1.conv1.conv_(0, 64)_64': 59, - 'backbone.stage2.1.final_conv.conv_(0, 128)_128': 69, - 'backbone.stage3.0.conv_(0, 256)_256': 177, - 'backbone.stage3.1.short_conv.conv_(0, 128)_128': 122, - 'backbone.stage3.1.main_conv.conv_(0, 128)_128': 123, - 'backbone.stage3.1.blocks.0.conv1.conv_(0, 128)_128': 125, - 'backbone.stage3.1.blocks.1.conv1.conv_(0, 128)_128': 123, - 'backbone.stage3.1.final_conv.conv_(0, 256)_256': 171, - 'backbone.stage4.0.conv_(0, 512)_512': 351, - 'backbone.stage4.1.conv1.conv_(0, 256)_256': 256, - 'backbone.stage4.1.conv2.conv_(0, 512)_512': 367, - 'backbone.stage4.2.short_conv.conv_(0, 256)_256': 183, - 'backbone.stage4.2.main_conv.conv_(0, 256)_256': 216, - 'backbone.stage4.2.blocks.0.conv1.conv_(0, 256)_256': 238, - 'backbone.stage4.2.blocks.0.conv2.pointwise_conv.conv_(0, 256)_256': 195, - 'backbone.stage4.2.final_conv.conv_(0, 512)_512': 187 -} -divisor = 16 -############################################################################## - -architecture = _base_.model - -model = dict( - _delete_=True, - _scope_='mmrazor', - type='GroupFisherDeploySubModel', - architecture=architecture, - fix_subnet=fix_subnet, - divisor=divisor, -) +############################################################################# +"""You have to fill these args. + +_base_(str): The path to your pretrain config file. +fix_subnet (Union[dict,str]): The dict store the pruning structure or the + json file including it. +divisor (int): The divisor the make the channel number divisible. +""" + +_base_ = 'mmpose::body_2d_keypoint/rtmpose/coco/rtmpose-s_8xb256-420e_coco-256x192.py' # noqa +fix_subnet = { + 'backbone.stem.0.conv_(0, 16)_16': 8, + 'backbone.stem.1.conv_(0, 16)_16': 10, + 'backbone.stem.2.conv_(0, 32)_32': 11, + 'backbone.stage1.0.conv_(0, 64)_64': 32, + 'backbone.stage1.1.short_conv.conv_(0, 32)_32': 32, + 'backbone.stage1.1.main_conv.conv_(0, 32)_32': 23, + 'backbone.stage1.1.blocks.0.conv1.conv_(0, 32)_32': 25, + 'backbone.stage1.1.final_conv.conv_(0, 64)_64': 25, + 'backbone.stage2.0.conv_(0, 128)_128': 71, + 'backbone.stage2.1.short_conv.conv_(0, 64)_64': 61, + 'backbone.stage2.1.main_conv.conv_(0, 64)_64': 62, + 'backbone.stage2.1.blocks.0.conv1.conv_(0, 64)_64': 57, + 'backbone.stage2.1.blocks.1.conv1.conv_(0, 64)_64': 59, + 'backbone.stage2.1.final_conv.conv_(0, 128)_128': 69, + 'backbone.stage3.0.conv_(0, 256)_256': 177, + 'backbone.stage3.1.short_conv.conv_(0, 128)_128': 122, + 'backbone.stage3.1.main_conv.conv_(0, 128)_128': 123, + 'backbone.stage3.1.blocks.0.conv1.conv_(0, 128)_128': 125, + 'backbone.stage3.1.blocks.1.conv1.conv_(0, 128)_128': 123, + 'backbone.stage3.1.final_conv.conv_(0, 256)_256': 171, + 'backbone.stage4.0.conv_(0, 512)_512': 351, + 'backbone.stage4.1.conv1.conv_(0, 256)_256': 256, + 'backbone.stage4.1.conv2.conv_(0, 512)_512': 367, + 'backbone.stage4.2.short_conv.conv_(0, 256)_256': 183, + 'backbone.stage4.2.main_conv.conv_(0, 256)_256': 216, + 'backbone.stage4.2.blocks.0.conv1.conv_(0, 256)_256': 238, + 'backbone.stage4.2.blocks.0.conv2.pointwise_conv.conv_(0, 256)_256': 195, + 'backbone.stage4.2.final_conv.conv_(0, 512)_512': 187 +} +divisor = 16 +############################################################################## + +architecture = _base_.model + +model = dict( + _delete_=True, + _scope_='mmrazor', + type='GroupFisherDeploySubModel', + architecture=architecture, + fix_subnet=fix_subnet, + divisor=divisor, +) diff --git a/projects/rtmpose/rtmpose/pruning/group_fisher_finetune_rtmpose-s_8xb256-420e_aic-coco-256x192.py b/projects/rtmpose/rtmpose/pruning/group_fisher_finetune_rtmpose-s_8xb256-420e_aic-coco-256x192.py index b4fb4f827c..810b11c839 100644 --- a/projects/rtmpose/rtmpose/pruning/group_fisher_finetune_rtmpose-s_8xb256-420e_aic-coco-256x192.py +++ b/projects/rtmpose/rtmpose/pruning/group_fisher_finetune_rtmpose-s_8xb256-420e_aic-coco-256x192.py @@ -1,32 +1,32 @@ -############################################################################# -"""# You have to fill these args. - -_base_(str): The path to your pruning config file. -pruned_path (str): The path to the checkpoint of the pruned model. -finetune_lr (float): The lr rate to finetune. Usually, we directly use the lr - rate of the pretrain. -""" - -_base_ = './group_fisher_prune_rtmpose-s_8xb256-420e_aic-coco-256x192.py' # noqa -pruned_path = 'https://download.openmmlab.com/mmrazor/v1/pruning/group_fisher/rtmpose-s/group_fisher_prune_rtmpose-s_8xb256-420e_aic-coco-256x192.pth' # noqa -finetune_lr = 4e-3 -############################################################################## - -algorithm = _base_.model -algorithm.init_cfg = dict(type='Pretrained', checkpoint=pruned_path) - -model = dict( - _delete_=True, - _scope_='mmrazor', - type='GroupFisherSubModel', - algorithm=algorithm, -) - -# restore lr -optim_wrapper = dict(optimizer=dict(lr=finetune_lr)) - -# remove pruning related hooks -custom_hooks = _base_.custom_hooks[:-2] - -# delete ddp -model_wrapper_cfg = None +############################################################################# +"""# You have to fill these args. + +_base_(str): The path to your pruning config file. +pruned_path (str): The path to the checkpoint of the pruned model. +finetune_lr (float): The lr rate to finetune. Usually, we directly use the lr + rate of the pretrain. +""" + +_base_ = './group_fisher_prune_rtmpose-s_8xb256-420e_aic-coco-256x192.py' # noqa +pruned_path = 'https://download.openmmlab.com/mmrazor/v1/pruning/group_fisher/rtmpose-s/group_fisher_prune_rtmpose-s_8xb256-420e_aic-coco-256x192.pth' # noqa +finetune_lr = 4e-3 +############################################################################## + +algorithm = _base_.model +algorithm.init_cfg = dict(type='Pretrained', checkpoint=pruned_path) + +model = dict( + _delete_=True, + _scope_='mmrazor', + type='GroupFisherSubModel', + algorithm=algorithm, +) + +# restore lr +optim_wrapper = dict(optimizer=dict(lr=finetune_lr)) + +# remove pruning related hooks +custom_hooks = _base_.custom_hooks[:-2] + +# delete ddp +model_wrapper_cfg = None diff --git a/projects/rtmpose/rtmpose/pruning/group_fisher_finetune_rtmpose-s_8xb256-420e_coco-256x192.py b/projects/rtmpose/rtmpose/pruning/group_fisher_finetune_rtmpose-s_8xb256-420e_coco-256x192.py index 5cc6db15e4..3e42326c3a 100644 --- a/projects/rtmpose/rtmpose/pruning/group_fisher_finetune_rtmpose-s_8xb256-420e_coco-256x192.py +++ b/projects/rtmpose/rtmpose/pruning/group_fisher_finetune_rtmpose-s_8xb256-420e_coco-256x192.py @@ -1,33 +1,33 @@ -############################################################################# -"""# You have to fill these args. - -_base_(str): The path to your pruning config file. -pruned_path (str): The path to the checkpoint of the pruned model. -finetune_lr (float): The lr rate to finetune. Usually, we directly use the lr - rate of the pretrain. -""" - -_base_ = './group_fisher_prune_rtmpose-s_8xb256-420e_coco-256x192.py' -pruned_path = 'https://download.openmmlab.com/mmrazor/v1/pruning/group_fisher/rtmpose-s/group_fisher_prune_rtmpose-s_8xb256-420e_coco-256x192.pth' # noqa -finetune_lr = 4e-3 -############################################################################## - -algorithm = _base_.model -algorithm.init_cfg = dict(type='Pretrained', checkpoint=pruned_path) -# algorithm.update(dict(architecture=dict(test_cfg=dict(flip_test=False), ))) # disable flip test # noqa - -model = dict( - _delete_=True, - _scope_='mmrazor', - type='GroupFisherSubModel', - algorithm=algorithm, -) - -# restore lr -optim_wrapper = dict(optimizer=dict(lr=finetune_lr)) - -# remove pruning related hooks -custom_hooks = _base_.custom_hooks[:-2] - -# delete ddp -model_wrapper_cfg = None +############################################################################# +"""# You have to fill these args. + +_base_(str): The path to your pruning config file. +pruned_path (str): The path to the checkpoint of the pruned model. +finetune_lr (float): The lr rate to finetune. Usually, we directly use the lr + rate of the pretrain. +""" + +_base_ = './group_fisher_prune_rtmpose-s_8xb256-420e_coco-256x192.py' +pruned_path = 'https://download.openmmlab.com/mmrazor/v1/pruning/group_fisher/rtmpose-s/group_fisher_prune_rtmpose-s_8xb256-420e_coco-256x192.pth' # noqa +finetune_lr = 4e-3 +############################################################################## + +algorithm = _base_.model +algorithm.init_cfg = dict(type='Pretrained', checkpoint=pruned_path) +# algorithm.update(dict(architecture=dict(test_cfg=dict(flip_test=False), ))) # disable flip test # noqa + +model = dict( + _delete_=True, + _scope_='mmrazor', + type='GroupFisherSubModel', + algorithm=algorithm, +) + +# restore lr +optim_wrapper = dict(optimizer=dict(lr=finetune_lr)) + +# remove pruning related hooks +custom_hooks = _base_.custom_hooks[:-2] + +# delete ddp +model_wrapper_cfg = None diff --git a/projects/rtmpose/rtmpose/pruning/group_fisher_prune_rtmpose-s_8xb256-420e_aic-coco-256x192.py b/projects/rtmpose/rtmpose/pruning/group_fisher_prune_rtmpose-s_8xb256-420e_aic-coco-256x192.py index 14bdc96f5e..26bed5e886 100644 --- a/projects/rtmpose/rtmpose/pruning/group_fisher_prune_rtmpose-s_8xb256-420e_aic-coco-256x192.py +++ b/projects/rtmpose/rtmpose/pruning/group_fisher_prune_rtmpose-s_8xb256-420e_aic-coco-256x192.py @@ -1,75 +1,75 @@ -############################################################################# -"""You have to fill these args. - -_base_ (str): The path to your pretrained model checkpoint. -pretrained_path (str): The path to your pretrained model checkpoint. - -interval (int): Interval between pruning two channels. You should ensure you - can reach your target pruning ratio when the training ends. -normalization_type (str): GroupFisher uses two methods to normlized the channel - importance, including ['flops','act']. The former uses flops, while the - latter uses the memory occupation of activation feature maps. -lr_ratio (float): Ratio to decrease lr rate. As pruning progress is unstable, - you need to decrease the original lr rate until the pruning training work - steadly without getting nan. - -target_flop_ratio (float): The target flop ratio to prune your model. -input_shape (Tuple): input shape to measure the flops. -""" - -_base_ = 'mmpose::body_2d_keypoint/rtmpose/coco/rtmpose-s_8xb256-420e_aic-coco-256x192.py' # noqa -pretrained_path = 'https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmpose-s_simcc-aic-coco_pt-aic-coco_420e-256x192-fcb2599b_20230126.pth' # noqa - -interval = 10 -normalization_type = 'act' -lr_ratio = 0.1 - -target_flop_ratio = 0.51 -input_shape = (1, 3, 256, 192) -############################################################################## - -architecture = _base_.model - -if hasattr(_base_, 'data_preprocessor'): - architecture.update({'data_preprocessor': _base_.data_preprocessor}) - data_preprocessor = None - -architecture.init_cfg = dict(type='Pretrained', checkpoint=pretrained_path) -architecture['_scope_'] = _base_.default_scope - -model = dict( - _delete_=True, - _scope_='mmrazor', - type='GroupFisherAlgorithm', - architecture=architecture, - interval=interval, - mutator=dict( - type='GroupFisherChannelMutator', - parse_cfg=dict(type='ChannelAnalyzer', tracer_type='FxTracer'), - channel_unit_cfg=dict( - type='GroupFisherChannelUnit', - default_args=dict(normalization_type=normalization_type, ), - ), - ), -) - -model_wrapper_cfg = dict( - type='mmrazor.GroupFisherDDP', - broadcast_buffers=False, -) - -optim_wrapper = dict( - optimizer=dict(lr=_base_.optim_wrapper.optimizer.lr * lr_ratio)) - -custom_hooks = getattr(_base_, 'custom_hooks', []) + [ - dict(type='mmrazor.PruningStructureHook'), - dict( - type='mmrazor.ResourceInfoHook', - interval=interval, - demo_input=dict( - type='mmrazor.DefaultDemoInput', - input_shape=input_shape, - ), - save_ckpt_thr=[target_flop_ratio], - ), -] +############################################################################# +"""You have to fill these args. + +_base_ (str): The path to your pretrained model checkpoint. +pretrained_path (str): The path to your pretrained model checkpoint. + +interval (int): Interval between pruning two channels. You should ensure you + can reach your target pruning ratio when the training ends. +normalization_type (str): GroupFisher uses two methods to normlized the channel + importance, including ['flops','act']. The former uses flops, while the + latter uses the memory occupation of activation feature maps. +lr_ratio (float): Ratio to decrease lr rate. As pruning progress is unstable, + you need to decrease the original lr rate until the pruning training work + steadly without getting nan. + +target_flop_ratio (float): The target flop ratio to prune your model. +input_shape (Tuple): input shape to measure the flops. +""" + +_base_ = 'mmpose::body_2d_keypoint/rtmpose/coco/rtmpose-s_8xb256-420e_aic-coco-256x192.py' # noqa +pretrained_path = 'https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmpose-s_simcc-aic-coco_pt-aic-coco_420e-256x192-fcb2599b_20230126.pth' # noqa + +interval = 10 +normalization_type = 'act' +lr_ratio = 0.1 + +target_flop_ratio = 0.51 +input_shape = (1, 3, 256, 192) +############################################################################## + +architecture = _base_.model + +if hasattr(_base_, 'data_preprocessor'): + architecture.update({'data_preprocessor': _base_.data_preprocessor}) + data_preprocessor = None + +architecture.init_cfg = dict(type='Pretrained', checkpoint=pretrained_path) +architecture['_scope_'] = _base_.default_scope + +model = dict( + _delete_=True, + _scope_='mmrazor', + type='GroupFisherAlgorithm', + architecture=architecture, + interval=interval, + mutator=dict( + type='GroupFisherChannelMutator', + parse_cfg=dict(type='ChannelAnalyzer', tracer_type='FxTracer'), + channel_unit_cfg=dict( + type='GroupFisherChannelUnit', + default_args=dict(normalization_type=normalization_type, ), + ), + ), +) + +model_wrapper_cfg = dict( + type='mmrazor.GroupFisherDDP', + broadcast_buffers=False, +) + +optim_wrapper = dict( + optimizer=dict(lr=_base_.optim_wrapper.optimizer.lr * lr_ratio)) + +custom_hooks = getattr(_base_, 'custom_hooks', []) + [ + dict(type='mmrazor.PruningStructureHook'), + dict( + type='mmrazor.ResourceInfoHook', + interval=interval, + demo_input=dict( + type='mmrazor.DefaultDemoInput', + input_shape=input_shape, + ), + save_ckpt_thr=[target_flop_ratio], + ), +] diff --git a/projects/rtmpose/rtmpose/pruning/group_fisher_prune_rtmpose-s_8xb256-420e_coco-256x192.py b/projects/rtmpose/rtmpose/pruning/group_fisher_prune_rtmpose-s_8xb256-420e_coco-256x192.py index 5a998e5934..2945d5a684 100644 --- a/projects/rtmpose/rtmpose/pruning/group_fisher_prune_rtmpose-s_8xb256-420e_coco-256x192.py +++ b/projects/rtmpose/rtmpose/pruning/group_fisher_prune_rtmpose-s_8xb256-420e_coco-256x192.py @@ -1,75 +1,75 @@ -############################################################################# -"""You have to fill these args. - -_base_ (str): The path to your pretrained model checkpoint. -pretrained_path (str): The path to your pretrained model checkpoint. - -interval (int): Interval between pruning two channels. You should ensure you - can reach your target pruning ratio when the training ends. -normalization_type (str): GroupFisher uses two methods to normlized the channel - importance, including ['flops','act']. The former uses flops, while the - latter uses the memory occupation of activation feature maps. -lr_ratio (float): Ratio to decrease lr rate. As pruning progress is unstable, - you need to decrease the original lr rate until the pruning training work - steadly without getting nan. - -target_flop_ratio (float): The target flop ratio to prune your model. -input_shape (Tuple): input shape to measure the flops. -""" - -_base_ = 'mmpose::body_2d_keypoint/rtmpose/coco/rtmpose-s_8xb256-420e_coco-256x192.py' # noqa -pretrained_path = 'https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmpose-s_simcc-coco_pt-aic-coco_420e-256x192-8edcf0d7_20230127.pth' # noqa - -interval = 10 -normalization_type = 'act' -lr_ratio = 0.1 - -target_flop_ratio = 0.51 -input_shape = (1, 3, 256, 192) -############################################################################## - -architecture = _base_.model - -if hasattr(_base_, 'data_preprocessor'): - architecture.update({'data_preprocessor': _base_.data_preprocessor}) - data_preprocessor = None - -architecture.init_cfg = dict(type='Pretrained', checkpoint=pretrained_path) -architecture['_scope_'] = _base_.default_scope - -model = dict( - _delete_=True, - _scope_='mmrazor', - type='GroupFisherAlgorithm', - architecture=architecture, - interval=interval, - mutator=dict( - type='GroupFisherChannelMutator', - parse_cfg=dict(type='ChannelAnalyzer', tracer_type='FxTracer'), - channel_unit_cfg=dict( - type='GroupFisherChannelUnit', - default_args=dict(normalization_type=normalization_type, ), - ), - ), -) - -model_wrapper_cfg = dict( - type='mmrazor.GroupFisherDDP', - broadcast_buffers=False, -) - -optim_wrapper = dict( - optimizer=dict(lr=_base_.optim_wrapper.optimizer.lr * lr_ratio)) - -custom_hooks = getattr(_base_, 'custom_hooks', []) + [ - dict(type='mmrazor.PruningStructureHook'), - dict( - type='mmrazor.ResourceInfoHook', - interval=interval, - demo_input=dict( - type='mmrazor.DefaultDemoInput', - input_shape=input_shape, - ), - save_ckpt_thr=[target_flop_ratio], - ), -] +############################################################################# +"""You have to fill these args. + +_base_ (str): The path to your pretrained model checkpoint. +pretrained_path (str): The path to your pretrained model checkpoint. + +interval (int): Interval between pruning two channels. You should ensure you + can reach your target pruning ratio when the training ends. +normalization_type (str): GroupFisher uses two methods to normlized the channel + importance, including ['flops','act']. The former uses flops, while the + latter uses the memory occupation of activation feature maps. +lr_ratio (float): Ratio to decrease lr rate. As pruning progress is unstable, + you need to decrease the original lr rate until the pruning training work + steadly without getting nan. + +target_flop_ratio (float): The target flop ratio to prune your model. +input_shape (Tuple): input shape to measure the flops. +""" + +_base_ = 'mmpose::body_2d_keypoint/rtmpose/coco/rtmpose-s_8xb256-420e_coco-256x192.py' # noqa +pretrained_path = 'https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmpose-s_simcc-coco_pt-aic-coco_420e-256x192-8edcf0d7_20230127.pth' # noqa + +interval = 10 +normalization_type = 'act' +lr_ratio = 0.1 + +target_flop_ratio = 0.51 +input_shape = (1, 3, 256, 192) +############################################################################## + +architecture = _base_.model + +if hasattr(_base_, 'data_preprocessor'): + architecture.update({'data_preprocessor': _base_.data_preprocessor}) + data_preprocessor = None + +architecture.init_cfg = dict(type='Pretrained', checkpoint=pretrained_path) +architecture['_scope_'] = _base_.default_scope + +model = dict( + _delete_=True, + _scope_='mmrazor', + type='GroupFisherAlgorithm', + architecture=architecture, + interval=interval, + mutator=dict( + type='GroupFisherChannelMutator', + parse_cfg=dict(type='ChannelAnalyzer', tracer_type='FxTracer'), + channel_unit_cfg=dict( + type='GroupFisherChannelUnit', + default_args=dict(normalization_type=normalization_type, ), + ), + ), +) + +model_wrapper_cfg = dict( + type='mmrazor.GroupFisherDDP', + broadcast_buffers=False, +) + +optim_wrapper = dict( + optimizer=dict(lr=_base_.optim_wrapper.optimizer.lr * lr_ratio)) + +custom_hooks = getattr(_base_, 'custom_hooks', []) + [ + dict(type='mmrazor.PruningStructureHook'), + dict( + type='mmrazor.ResourceInfoHook', + interval=interval, + demo_input=dict( + type='mmrazor.DefaultDemoInput', + input_shape=input_shape, + ), + save_ckpt_thr=[target_flop_ratio], + ), +] diff --git a/projects/rtmpose/rtmpose/wholebody_2d_keypoint/rtmpose-l_8xb32-270e_coco-wholebody-384x288.py b/projects/rtmpose/rtmpose/wholebody_2d_keypoint/rtmpose-l_8xb32-270e_coco-wholebody-384x288.py index 5fd8ce8e1e..9a3c7c1169 100644 --- a/projects/rtmpose/rtmpose/wholebody_2d_keypoint/rtmpose-l_8xb32-270e_coco-wholebody-384x288.py +++ b/projects/rtmpose/rtmpose/wholebody_2d_keypoint/rtmpose-l_8xb32-270e_coco-wholebody-384x288.py @@ -1,233 +1,233 @@ -_base_ = ['mmpose::_base_/default_runtime.py'] - -# common setting -num_keypoints = 133 -input_size = (288, 384) - -# runtime -max_epochs = 270 -stage2_num_epochs = 30 -base_lr = 4e-3 -train_batch_size = 32 -val_batch_size = 32 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - clip_grad=dict(max_norm=35, norm_type=2), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=input_size, - sigma=(6., 6.93), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=1., - widen_factor=1., - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmposev1/cspnext-l_udp-aic-coco_210e-256x192-273b7631_20230130.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=1024, - out_channels=num_keypoints, - input_size=codec['input_size'], - in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True, )) - -# base dataset settings -dataset_type = 'CocoWholeBodyDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -backend_args = dict(backend='local') - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.0), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=train_batch_size, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=val_batch_size, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict( - save_best='coco-wholebody/AP', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='CocoWholeBodyMetric', - ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') -test_evaluator = val_evaluator +_base_ = ['mmpose::_base_/default_runtime.py'] + +# common setting +num_keypoints = 133 +input_size = (288, 384) + +# runtime +max_epochs = 270 +stage2_num_epochs = 30 +base_lr = 4e-3 +train_batch_size = 32 +val_batch_size = 32 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + clip_grad=dict(max_norm=35, norm_type=2), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=input_size, + sigma=(6., 6.93), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=1., + widen_factor=1., + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-l_udp-aic-coco_210e-256x192-273b7631_20230130.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=1024, + out_channels=num_keypoints, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +backend_args = dict(backend='local') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=train_batch_size, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=val_batch_size, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict( + save_best='coco-wholebody/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/projects/rtmpose/rtmpose/wholebody_2d_keypoint/rtmpose-l_8xb64-270e_coco-wholebody-256x192.py b/projects/rtmpose/rtmpose/wholebody_2d_keypoint/rtmpose-l_8xb64-270e_coco-wholebody-256x192.py index f4005028b6..ca194538bb 100644 --- a/projects/rtmpose/rtmpose/wholebody_2d_keypoint/rtmpose-l_8xb64-270e_coco-wholebody-256x192.py +++ b/projects/rtmpose/rtmpose/wholebody_2d_keypoint/rtmpose-l_8xb64-270e_coco-wholebody-256x192.py @@ -1,233 +1,233 @@ -_base_ = ['mmpose::_base_/default_runtime.py'] - -# common setting -num_keypoints = 133 -input_size = (192, 256) - -# runtime -max_epochs = 270 -stage2_num_epochs = 30 -base_lr = 4e-3 -train_batch_size = 64 -val_batch_size = 32 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - clip_grad=dict(max_norm=35, norm_type=2), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=input_size, - sigma=(4.9, 5.66), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=1., - widen_factor=1., - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmposev1/cspnext-l_udp-aic-coco_210e-256x192-273b7631_20230130.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=1024, - out_channels=num_keypoints, - input_size=codec['input_size'], - in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True, )) - -# base dataset settings -dataset_type = 'CocoWholeBodyDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -backend_args = dict(backend='local') - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.0), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=train_batch_size, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=val_batch_size, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict( - save_best='coco-wholebody/AP', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='CocoWholeBodyMetric', - ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') -test_evaluator = val_evaluator +_base_ = ['mmpose::_base_/default_runtime.py'] + +# common setting +num_keypoints = 133 +input_size = (192, 256) + +# runtime +max_epochs = 270 +stage2_num_epochs = 30 +base_lr = 4e-3 +train_batch_size = 64 +val_batch_size = 32 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + clip_grad=dict(max_norm=35, norm_type=2), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=input_size, + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=1., + widen_factor=1., + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-l_udp-aic-coco_210e-256x192-273b7631_20230130.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=1024, + out_channels=num_keypoints, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +backend_args = dict(backend='local') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=train_batch_size, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=val_batch_size, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict( + save_best='coco-wholebody/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/projects/rtmpose/rtmpose/wholebody_2d_keypoint/rtmpose-m_8xb64-270e_coco-wholebody-256x192.py b/projects/rtmpose/rtmpose/wholebody_2d_keypoint/rtmpose-m_8xb64-270e_coco-wholebody-256x192.py index d0096056a4..2869082e93 100644 --- a/projects/rtmpose/rtmpose/wholebody_2d_keypoint/rtmpose-m_8xb64-270e_coco-wholebody-256x192.py +++ b/projects/rtmpose/rtmpose/wholebody_2d_keypoint/rtmpose-m_8xb64-270e_coco-wholebody-256x192.py @@ -1,233 +1,233 @@ -_base_ = ['mmpose::_base_/default_runtime.py'] - -# common setting -num_keypoints = 133 -input_size = (192, 256) - -# runtime -max_epochs = 270 -stage2_num_epochs = 30 -base_lr = 4e-3 -train_batch_size = 64 -val_batch_size = 32 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - clip_grad=dict(max_norm=35, norm_type=2), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=input_size, - sigma=(4.9, 5.66), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=0.67, - widen_factor=0.75, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmposev1/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=768, - out_channels=num_keypoints, - input_size=codec['input_size'], - in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True, )) - -# base dataset settings -dataset_type = 'CocoWholeBodyDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -backend_args = dict(backend='local') - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.0), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.75, 1.25], - rotate_factor=60), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=train_batch_size, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=val_batch_size, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict( - save_best='coco-wholebody/AP', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='CocoWholeBodyMetric', - ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') -test_evaluator = val_evaluator +_base_ = ['mmpose::_base_/default_runtime.py'] + +# common setting +num_keypoints = 133 +input_size = (192, 256) + +# runtime +max_epochs = 270 +stage2_num_epochs = 30 +base_lr = 4e-3 +train_batch_size = 64 +val_batch_size = 32 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + clip_grad=dict(max_norm=35, norm_type=2), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=input_size, + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=0.67, + widen_factor=0.75, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=768, + out_channels=num_keypoints, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +backend_args = dict(backend='local') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.75, 1.25], + rotate_factor=60), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=train_batch_size, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=val_batch_size, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict( + save_best='coco-wholebody/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/projects/rtmpose/rtmpose/wholebody_2d_keypoint/rtmpose-x_8xb32-270e_coco-wholebody-384x288.py b/projects/rtmpose/rtmpose/wholebody_2d_keypoint/rtmpose-x_8xb32-270e_coco-wholebody-384x288.py index 429016e825..a536d6c463 100644 --- a/projects/rtmpose/rtmpose/wholebody_2d_keypoint/rtmpose-x_8xb32-270e_coco-wholebody-384x288.py +++ b/projects/rtmpose/rtmpose/wholebody_2d_keypoint/rtmpose-x_8xb32-270e_coco-wholebody-384x288.py @@ -1,233 +1,233 @@ -_base_ = ['mmpose::_base_/default_runtime.py'] - -# common setting -num_keypoints = 133 -input_size = (288, 384) - -# runtime -max_epochs = 270 -stage2_num_epochs = 30 -base_lr = 4e-3 -train_batch_size = 32 -val_batch_size = 32 - -train_cfg = dict(max_epochs=max_epochs, val_interval=10) -randomness = dict(seed=21) - -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - clip_grad=dict(max_norm=35, norm_type=2), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -# learning rate -param_scheduler = [ - dict( - type='LinearLR', - start_factor=1.0e-5, - by_epoch=False, - begin=0, - end=1000), - dict( - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=max_epochs // 2, - end=max_epochs, - T_max=max_epochs // 2, - by_epoch=True, - convert_to_iter_based=True), -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# codec settings -codec = dict( - type='SimCCLabel', - input_size=input_size, - sigma=(6., 6.93), - simcc_split_ratio=2.0, - normalize=False, - use_dark=False) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - _scope_='mmdet', - type='CSPNeXt', - arch='P5', - expand_ratio=0.5, - deepen_factor=1.33, - widen_factor=1.25, - out_indices=(4, ), - channel_attention=True, - norm_cfg=dict(type='SyncBN'), - act_cfg=dict(type='SiLU'), - init_cfg=dict( - type='Pretrained', - prefix='backbone.', - checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' - 'rtmposev1/cspnext-x_udp-body7_210e-384x288-d28b58e6_20230529.pth' # noqa - )), - head=dict( - type='RTMCCHead', - in_channels=1280, - out_channels=num_keypoints, - input_size=codec['input_size'], - in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), - simcc_split_ratio=codec['simcc_split_ratio'], - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True), - decoder=codec), - test_cfg=dict(flip_test=True, )) - -# base dataset settings -dataset_type = 'CocoWholeBodyDataset' -data_mode = 'topdown' -data_root = 'data/coco/' - -backend_args = dict(backend='local') - -# pipelines -train_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=90), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=1.0), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -train_pipeline_stage2 = [ - dict(type='LoadImage', backend_args=backend_args), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict(type='RandomHalfBody'), - dict( - type='RandomBBoxTransform', - shift_factor=0., - scale_factor=[0.5, 1.5], - rotate_factor=90), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict( - type='Albumentation', - transforms=[ - dict(type='Blur', p=0.1), - dict(type='MedianBlur', p=0.1), - dict( - type='CoarseDropout', - max_holes=1, - max_height=0.4, - max_width=0.4, - min_holes=1, - min_height=0.2, - min_width=0.2, - p=0.5), - ]), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=train_batch_size, - num_workers=10, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_train_v1.0.json', - data_prefix=dict(img='train2017/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=val_batch_size, - num_workers=10, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/coco_wholebody_val_v1.0.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - bbox_file='data/coco/person_detection_results/' - 'COCO_val2017_detections_AP_H_56_person.json', - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# hooks -default_hooks = dict( - checkpoint=dict( - save_best='coco-wholebody/AP', rule='greater', max_keep_ckpts=1)) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - priority=49), - dict( - type='mmdet.PipelineSwitchHook', - switch_epoch=max_epochs - stage2_num_epochs, - switch_pipeline=train_pipeline_stage2) -] - -# evaluators -val_evaluator = dict( - type='CocoWholeBodyMetric', - ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') -test_evaluator = val_evaluator +_base_ = ['mmpose::_base_/default_runtime.py'] + +# common setting +num_keypoints = 133 +input_size = (288, 384) + +# runtime +max_epochs = 270 +stage2_num_epochs = 30 +base_lr = 4e-3 +train_batch_size = 32 +val_batch_size = 32 + +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +randomness = dict(seed=21) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + clip_grad=dict(max_norm=35, norm_type=2), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0e-5, + by_epoch=False, + begin=0, + end=1000), + dict( + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# codec settings +codec = dict( + type='SimCCLabel', + input_size=input_size, + sigma=(6., 6.93), + simcc_split_ratio=2.0, + normalize=False, + use_dark=False) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + _scope_='mmdet', + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=1.33, + widen_factor=1.25, + out_indices=(4, ), + channel_attention=True, + norm_cfg=dict(type='SyncBN'), + act_cfg=dict(type='SiLU'), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint='https://download.openmmlab.com/mmpose/v1/projects/' + 'rtmposev1/cspnext-x_udp-body7_210e-384x288-d28b58e6_20230529.pth' # noqa + )), + head=dict( + type='RTMCCHead', + in_channels=1280, + out_channels=num_keypoints, + input_size=codec['input_size'], + in_featuremap_size=tuple([s // 32 for s in codec['input_size']]), + simcc_split_ratio=codec['simcc_split_ratio'], + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True), + decoder=codec), + test_cfg=dict(flip_test=True, )) + +# base dataset settings +dataset_type = 'CocoWholeBodyDataset' +data_mode = 'topdown' +data_root = 'data/coco/' + +backend_args = dict(backend='local') + +# pipelines +train_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=1.0), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImage', backend_args=backend_args), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict(type='RandomHalfBody'), + dict( + type='RandomBBoxTransform', + shift_factor=0., + scale_factor=[0.5, 1.5], + rotate_factor=90), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='Albumentation', + transforms=[ + dict(type='Blur', p=0.1), + dict(type='MedianBlur', p=0.1), + dict( + type='CoarseDropout', + max_holes=1, + max_height=0.4, + max_width=0.4, + min_holes=1, + min_height=0.2, + min_width=0.2, + p=0.5), + ]), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=train_batch_size, + num_workers=10, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_train_v1.0.json', + data_prefix=dict(img='train2017/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=val_batch_size, + num_workers=10, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/coco_wholebody_val_v1.0.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + bbox_file='data/coco/person_detection_results/' + 'COCO_val2017_detections_AP_H_56_person.json', + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# hooks +default_hooks = dict( + checkpoint=dict( + save_best='coco-wholebody/AP', rule='greater', max_keep_ckpts=1)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - stage2_num_epochs, + switch_pipeline=train_pipeline_stage2) +] + +# evaluators +val_evaluator = dict( + type='CocoWholeBodyMetric', + ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json') +test_evaluator = val_evaluator diff --git a/projects/skps/README.md b/projects/skps/README.md index 13e8c4a7ab..e70c407f75 100644 --- a/projects/skps/README.md +++ b/projects/skps/README.md @@ -1,83 +1,83 @@ -# Simple Keypoints - -## Description - -Author: @2120140200@mail.nankai.edu.cn - -It is a simple keypoints detector model. The model predict a score heatmap and an encoded location map. -The result in wflw achieves 3.94 NME. - -## Usage - -### Prerequisites - -- Python 3.7 -- PyTorch 1.6 or higher -- [MIM](https://github.com/open-mmlab/mim) v0.33 or higher -- [MMPose](https://github.com/open-mmlab/mmpose) v1.0.0rc0 or higher - -All the commands below rely on the correct configuration of `PYTHONPATH`, which should point to the project's directory so that Python can locate the module files. In `example_project/` root directory, run the following line to add the current directory to `PYTHONPATH`: - -```shell -export PYTHONPATH=`pwd`:$PYTHONPATH -``` - -### Data Preparation - -Prepare the COCO dataset according to the [instruction](https://mmpose.readthedocs.io/en/dev-1.x/dataset_zoo/2d_body_keypoint.html#coco). - -### Training commands - -**To train with single GPU:** - -```shell -mim train mmpose configs/td-hm_hrnetv2-w18_skps-1xb64-80e_wflw-256x256.py -``` - -**To train with multiple GPUs:** - -```shell -mim train mmpose configs/td-hm_hrnetv2-w18_skps-1xb64-80e_wflw-256x256.py --launcher pytorch --gpus 8 -``` - -**To train with multiple GPUs by slurm:** - -```shell -mim train mmpose configs/td-hm_hrnetv2-w18_skps-1xb64-80e_wflw-256x256.py --launcher slurm \ - --gpus 16 --gpus-per-node 8 --partition $PARTITION -``` - -### Testing commands - -**To test with single GPU:** - -```shell -mim test mmpose configs/td-hm_hrnetv2-w18_skps-1xb64-80e_wflw-256x256.py -C $CHECKPOINT -``` - -**To test with multiple GPUs:** - -```shell -mim test mmpose configs/td-hm_hrnetv2-w18_skps-1xb64-80e_wflw-256x256.py -C $CHECKPOINT --launcher pytorch --gpus 8 -``` - -**To test with multiple GPUs by slurm:** - -```shell -mim test mmpose configs/td-hm_hrnetv2-w18_skps-1xb64-80e_wflw-256x256.py -C $CHECKPOINT --launcher slurm \ - --gpus 16 --gpus-per-node 8 --partition $PARTITION -``` - -## Results - -WFLW - -| Arch | Input Size | NME*test* | NME*pose* | NME*illumination* | NME*occlusion* | NME*blur* | NME*makeup* | NME*expression* | ckpt | log | -| :--------- | :--------: | :------------------: | :------------------: | :--------------------------: | :-----------------------: | :------------------: | :--------------------: | :------------------------: | :--------: | :-------: | -| [skps](./configs/td-hm_hrnetv2-w18_skps-1xb64-80e_wflw-256x256.py) | 256x256 | 3.88 | 6.60 | 3.81 | 4.57 | 4.44 | 3.75 | 4.13 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/skps/best_NME_epoch_80.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/skps/20230522_142437.log) | - -COFW - -| Arch | Input Size | NME | ckpt | log | -| :------------------------------------------------------------- | :--------: | :--: | :------------------------------------------------------------: | :------------------------------------------------------------: | -| [skps](./configs/td-hm_hrnetv2-w18_skps-1xb16-160e_cofw-256x256.py) | 256x256 | 3.20 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/skps/best_NME_epoch_113.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/skps/20230524_074949.log) | +# Simple Keypoints + +## Description + +Author: @2120140200@mail.nankai.edu.cn + +It is a simple keypoints detector model. The model predict a score heatmap and an encoded location map. +The result in wflw achieves 3.94 NME. + +## Usage + +### Prerequisites + +- Python 3.7 +- PyTorch 1.6 or higher +- [MIM](https://github.com/open-mmlab/mim) v0.33 or higher +- [MMPose](https://github.com/open-mmlab/mmpose) v1.0.0rc0 or higher + +All the commands below rely on the correct configuration of `PYTHONPATH`, which should point to the project's directory so that Python can locate the module files. In `example_project/` root directory, run the following line to add the current directory to `PYTHONPATH`: + +```shell +export PYTHONPATH=`pwd`:$PYTHONPATH +``` + +### Data Preparation + +Prepare the COCO dataset according to the [instruction](https://mmpose.readthedocs.io/en/dev-1.x/dataset_zoo/2d_body_keypoint.html#coco). + +### Training commands + +**To train with single GPU:** + +```shell +mim train mmpose configs/td-hm_hrnetv2-w18_skps-1xb64-80e_wflw-256x256.py +``` + +**To train with multiple GPUs:** + +```shell +mim train mmpose configs/td-hm_hrnetv2-w18_skps-1xb64-80e_wflw-256x256.py --launcher pytorch --gpus 8 +``` + +**To train with multiple GPUs by slurm:** + +```shell +mim train mmpose configs/td-hm_hrnetv2-w18_skps-1xb64-80e_wflw-256x256.py --launcher slurm \ + --gpus 16 --gpus-per-node 8 --partition $PARTITION +``` + +### Testing commands + +**To test with single GPU:** + +```shell +mim test mmpose configs/td-hm_hrnetv2-w18_skps-1xb64-80e_wflw-256x256.py -C $CHECKPOINT +``` + +**To test with multiple GPUs:** + +```shell +mim test mmpose configs/td-hm_hrnetv2-w18_skps-1xb64-80e_wflw-256x256.py -C $CHECKPOINT --launcher pytorch --gpus 8 +``` + +**To test with multiple GPUs by slurm:** + +```shell +mim test mmpose configs/td-hm_hrnetv2-w18_skps-1xb64-80e_wflw-256x256.py -C $CHECKPOINT --launcher slurm \ + --gpus 16 --gpus-per-node 8 --partition $PARTITION +``` + +## Results + +WFLW + +| Arch | Input Size | NME*test* | NME*pose* | NME*illumination* | NME*occlusion* | NME*blur* | NME*makeup* | NME*expression* | ckpt | log | +| :--------- | :--------: | :------------------: | :------------------: | :--------------------------: | :-----------------------: | :------------------: | :--------------------: | :------------------------: | :--------: | :-------: | +| [skps](./configs/td-hm_hrnetv2-w18_skps-1xb64-80e_wflw-256x256.py) | 256x256 | 3.88 | 6.60 | 3.81 | 4.57 | 4.44 | 3.75 | 4.13 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/skps/best_NME_epoch_80.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/skps/20230522_142437.log) | + +COFW + +| Arch | Input Size | NME | ckpt | log | +| :------------------------------------------------------------- | :--------: | :--: | :------------------------------------------------------------: | :------------------------------------------------------------: | +| [skps](./configs/td-hm_hrnetv2-w18_skps-1xb16-160e_cofw-256x256.py) | 256x256 | 3.20 | [ckpt](https://download.openmmlab.com/mmpose/v1/projects/skps/best_NME_epoch_113.pth) | [log](https://download.openmmlab.com/mmpose/v1/projects/skps/20230524_074949.log) | diff --git a/projects/skps/configs/td-hm_hrnetv2-w18_skps-1xb16-160e_cofw-256x256.py b/projects/skps/configs/td-hm_hrnetv2-w18_skps-1xb16-160e_cofw-256x256.py index 494c4325df..df55709168 100644 --- a/projects/skps/configs/td-hm_hrnetv2-w18_skps-1xb16-160e_cofw-256x256.py +++ b/projects/skps/configs/td-hm_hrnetv2-w18_skps-1xb16-160e_cofw-256x256.py @@ -1,176 +1,176 @@ -custom_imports = dict(imports=['custom_codecs', 'models']) - -_base_ = ['mmpose::_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=160, val_interval=1) - -# optimizer -optim_wrapper = dict( - optimizer=dict(type='AdamW', lr=2e-3, weight_decay=0.0005)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=160, - milestones=[80, 120], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='NME', rule='less', interval=1)) - -# codec settings -codec = dict( - type='SKPSHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(18, 36)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(18, 36, 72)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(18, 36, 72, 144), - multiscale_output=True), - upsample=dict(mode='bilinear', align_corners=False)), - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18'), - ), - neck=dict( - type='FeatureMapProcessor', - concat=True, - ), - head=dict( - type='SKPSHead', - in_channels=270, - out_channels=29, - conv_out_channels=(270, ), - conv_kernel_sizes=(1, ), - heatmap_loss=dict(type='AdaptiveWingLoss', use_target_weight=True), - offside_loss=dict(type='AdaptiveWingLoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'COFWDataset' -data_mode = 'topdown' -data_root = 'data/cofw/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale', padding=1), - dict(type='RandomFlip', direction='horizontal'), - dict( - type='Albumentation', - transforms=[ - dict(type='RandomBrightnessContrast', p=0.5), - dict(type='HueSaturationValue', p=0.5), - dict(type='GaussianBlur', p=0.5), - dict(type='GaussNoise', p=0.1), - dict( - type='CoarseDropout', - max_holes=8, - max_height=0.2, - max_width=0.2, - min_holes=1, - min_height=0.1, - min_width=0.1, - p=0.5), - ]), - dict( - type='RandomBBoxTransform', - shift_prob=0., - rotate_factor=45, - scale_factor=(0.75, 1.25), - scale_prob=0), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale', padding=1), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=16, - num_workers=4, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/cofw_train.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=4, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/cofw_test.json', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='NME', - norm_mode='keypoint_distance', -) -test_evaluator = val_evaluator +custom_imports = dict(imports=['custom_codecs', 'models']) + +_base_ = ['mmpose::_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=160, val_interval=1) + +# optimizer +optim_wrapper = dict( + optimizer=dict(type='AdamW', lr=2e-3, weight_decay=0.0005)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=160, + milestones=[80, 120], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='NME', rule='less', interval=1)) + +# codec settings +codec = dict( + type='SKPSHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(18, 36)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(18, 36, 72)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(18, 36, 72, 144), + multiscale_output=True), + upsample=dict(mode='bilinear', align_corners=False)), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18'), + ), + neck=dict( + type='FeatureMapProcessor', + concat=True, + ), + head=dict( + type='SKPSHead', + in_channels=270, + out_channels=29, + conv_out_channels=(270, ), + conv_kernel_sizes=(1, ), + heatmap_loss=dict(type='AdaptiveWingLoss', use_target_weight=True), + offside_loss=dict(type='AdaptiveWingLoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'COFWDataset' +data_mode = 'topdown' +data_root = 'data/cofw/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale', padding=1), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='Albumentation', + transforms=[ + dict(type='RandomBrightnessContrast', p=0.5), + dict(type='HueSaturationValue', p=0.5), + dict(type='GaussianBlur', p=0.5), + dict(type='GaussNoise', p=0.1), + dict( + type='CoarseDropout', + max_holes=8, + max_height=0.2, + max_width=0.2, + min_holes=1, + min_height=0.1, + min_width=0.1, + p=0.5), + ]), + dict( + type='RandomBBoxTransform', + shift_prob=0., + rotate_factor=45, + scale_factor=(0.75, 1.25), + scale_prob=0), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale', padding=1), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=16, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/cofw_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/cofw_test.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='NME', + norm_mode='keypoint_distance', +) +test_evaluator = val_evaluator diff --git a/projects/skps/configs/td-hm_hrnetv2-w18_skps-1xb64-80e_wflw-256x256.py b/projects/skps/configs/td-hm_hrnetv2-w18_skps-1xb64-80e_wflw-256x256.py index 0547ebcff2..6a9f17869f 100644 --- a/projects/skps/configs/td-hm_hrnetv2-w18_skps-1xb64-80e_wflw-256x256.py +++ b/projects/skps/configs/td-hm_hrnetv2-w18_skps-1xb64-80e_wflw-256x256.py @@ -1,176 +1,176 @@ -custom_imports = dict(imports=['custom_codecs', 'models']) - -_base_ = ['mmpose::_base_/default_runtime.py'] - -# runtime -train_cfg = dict(max_epochs=80, val_interval=1) - -# optimizer -optim_wrapper = dict( - optimizer=dict(type='AdamW', lr=2e-3, weight_decay=0.0005)) - -# learning policy -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, - by_epoch=False), # warm-up - dict( - type='MultiStepLR', - begin=0, - end=80, - milestones=[40, 60], - gamma=0.1, - by_epoch=True) -] - -# automatically scaling LR based on the actual training batch size -auto_scale_lr = dict(base_batch_size=512) - -# hooks -default_hooks = dict(checkpoint=dict(save_best='NME', rule='less', interval=1)) - -# codec settings -codec = dict( - type='SKPSHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) - -# model settings -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='HRNet', - in_channels=3, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(18, 36)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(18, 36, 72)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(18, 36, 72, 144), - multiscale_output=True), - upsample=dict(mode='bilinear', align_corners=False)), - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18'), - ), - neck=dict( - type='FeatureMapProcessor', - concat=True, - ), - head=dict( - type='SKPSHead', - in_channels=270, - out_channels=98, - conv_out_channels=(270, ), - conv_kernel_sizes=(1, ), - heatmap_loss=dict(type='AdaptiveWingLoss', use_target_weight=True), - offside_loss=dict(type='AdaptiveWingLoss', use_target_weight=True), - decoder=codec), - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - -# base dataset settings -dataset_type = 'WFLWDataset' -data_mode = 'topdown' -data_root = './data/wflw/' - -# pipelines -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict( - type='Albumentation', - transforms=[ - dict(type='RandomBrightnessContrast', p=0.5), - dict(type='HueSaturationValue', p=0.5), - dict(type='GaussianBlur', p=0.5), - dict(type='GaussNoise', p=0.1), - dict( - type='CoarseDropout', - max_holes=8, - max_height=0.2, - max_width=0.2, - min_holes=1, - min_height=0.1, - min_width=0.1, - p=0.5), - ]), - dict( - type='RandomBBoxTransform', - shift_prob=0.0, - rotate_factor=45, - scale_factor=(0.75, 1.25), - scale_prob=1.), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='GenerateTarget', encoder=codec), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=codec['input_size']), - dict(type='PackPoseInputs') -] - -# data loaders -train_dataloader = dict( - batch_size=64, - num_workers=4, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/face_landmarks_wflw_train.json', - data_prefix=dict(img='images/'), - pipeline=train_pipeline, - )) -val_dataloader = dict( - batch_size=32, - num_workers=4, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_mode=data_mode, - ann_file='annotations/face_landmarks_wflw_test.json', - data_prefix=dict(img='images/'), - test_mode=True, - pipeline=val_pipeline, - )) -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='NME', - norm_mode='keypoint_distance', -) -test_evaluator = val_evaluator +custom_imports = dict(imports=['custom_codecs', 'models']) + +_base_ = ['mmpose::_base_/default_runtime.py'] + +# runtime +train_cfg = dict(max_epochs=80, val_interval=1) + +# optimizer +optim_wrapper = dict( + optimizer=dict(type='AdamW', lr=2e-3, weight_decay=0.0005)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', begin=0, end=500, start_factor=0.001, + by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=0, + end=80, + milestones=[40, 60], + gamma=0.1, + by_epoch=True) +] + +# automatically scaling LR based on the actual training batch size +auto_scale_lr = dict(base_batch_size=512) + +# hooks +default_hooks = dict(checkpoint=dict(save_best='NME', rule='less', interval=1)) + +# codec settings +codec = dict( + type='SKPSHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2) + +# model settings +model = dict( + type='TopdownPoseEstimator', + data_preprocessor=dict( + type='PoseDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='HRNet', + in_channels=3, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(18, 36)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(18, 36, 72)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(18, 36, 72, 144), + multiscale_output=True), + upsample=dict(mode='bilinear', align_corners=False)), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18'), + ), + neck=dict( + type='FeatureMapProcessor', + concat=True, + ), + head=dict( + type='SKPSHead', + in_channels=270, + out_channels=98, + conv_out_channels=(270, ), + conv_kernel_sizes=(1, ), + heatmap_loss=dict(type='AdaptiveWingLoss', use_target_weight=True), + offside_loss=dict(type='AdaptiveWingLoss', use_target_weight=True), + decoder=codec), + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + +# base dataset settings +dataset_type = 'WFLWDataset' +data_mode = 'topdown' +data_root = './data/wflw/' + +# pipelines +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='RandomFlip', direction='horizontal'), + dict( + type='Albumentation', + transforms=[ + dict(type='RandomBrightnessContrast', p=0.5), + dict(type='HueSaturationValue', p=0.5), + dict(type='GaussianBlur', p=0.5), + dict(type='GaussNoise', p=0.1), + dict( + type='CoarseDropout', + max_holes=8, + max_height=0.2, + max_width=0.2, + min_holes=1, + min_height=0.1, + min_width=0.1, + p=0.5), + ]), + dict( + type='RandomBBoxTransform', + shift_prob=0.0, + rotate_factor=45, + scale_factor=(0.75, 1.25), + scale_prob=1.), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='GenerateTarget', encoder=codec), + dict(type='PackPoseInputs') +] +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(type='TopdownAffine', input_size=codec['input_size']), + dict(type='PackPoseInputs') +] + +# data loaders +train_dataloader = dict( + batch_size=64, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/face_landmarks_wflw_train.json', + data_prefix=dict(img='images/'), + pipeline=train_pipeline, + )) +val_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_mode=data_mode, + ann_file='annotations/face_landmarks_wflw_test.json', + data_prefix=dict(img='images/'), + test_mode=True, + pipeline=val_pipeline, + )) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='NME', + norm_mode='keypoint_distance', +) +test_evaluator = val_evaluator diff --git a/projects/skps/custom_codecs/__init__.py b/projects/skps/custom_codecs/__init__.py index b346b55de6..ecc59c0144 100644 --- a/projects/skps/custom_codecs/__init__.py +++ b/projects/skps/custom_codecs/__init__.py @@ -1,3 +1,3 @@ -from .skps_heatmap import SKPSHeatmap - -__all__ = ['SKPSHeatmap'] +from .skps_heatmap import SKPSHeatmap + +__all__ = ['SKPSHeatmap'] diff --git a/projects/skps/custom_codecs/skps_heatmap.py b/projects/skps/custom_codecs/skps_heatmap.py index f542ff2970..5529379064 100644 --- a/projects/skps/custom_codecs/skps_heatmap.py +++ b/projects/skps/custom_codecs/skps_heatmap.py @@ -1,164 +1,164 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import Optional, Tuple - -import numpy as np - -from mmpose.codecs.base import BaseKeypointCodec -from mmpose.codecs.utils.gaussian_heatmap import \ - generate_unbiased_gaussian_heatmaps -from mmpose.codecs.utils.post_processing import get_heatmap_maximum -from mmpose.registry import KEYPOINT_CODECS - - -@KEYPOINT_CODECS.register_module() -class SKPSHeatmap(BaseKeypointCodec): - """Generate heatmap the same with MSRAHeatmap, and produce offset map - within x and y directions. - - Note: - - - instance number: N - - keypoint number: K - - keypoint dimension: D - - image size: [w, h] - - heatmap size: [W, H] - - offset_map size: [W, H] - - Encoded: - - - heatmaps (np.ndarray): The generated heatmap in shape (K, H, W) - where [W, H] is the `heatmap_size` - - offset_maps (np.ndarray): The generated offset map in x and y - direction in shape (2K, H, W) where [W, H] is the - `offset_map_size` - - keypoint_weights (np.ndarray): The target weights in shape (N, K) - - Args: - input_size (tuple): Image size in [w, h] - heatmap_size (tuple): Heatmap size in [W, H] - sigma (float): The sigma value of the Gaussian heatmap - """ - - def __init__(self, input_size: Tuple[int, int], - heatmap_size: Tuple[int, int], sigma: float) -> None: - super().__init__() - self.input_size = input_size - self.heatmap_size = heatmap_size - self.sigma = sigma - self.scale_factor = (np.array(input_size) / - heatmap_size).astype(np.float32) - - self.y_range, self.x_range = np.meshgrid( - np.arange(0, self.heatmap_size[1]), - np.arange(0, self.heatmap_size[0]), - indexing='ij') - - def encode(self, - keypoints: np.ndarray, - keypoints_visible: Optional[np.ndarray] = None) -> dict: - """Encode keypoints into heatmaps. Note that the original keypoint - coordinates should be in the input image space. - - Args: - keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) - keypoints_visible (np.ndarray): Keypoint visibilities in shape - (N, K) - - Returns: - dict: - - heatmaps (np.ndarray): The generated heatmap in shape - (K, H, W) where [W, H] is the `heatmap_size` - - offset_maps (np.ndarray): The generated offset maps in x and y - directions in shape (2*K, H, W) where [W, H] is the - `offset_map_size` - - keypoint_weights (np.ndarray): The target weights in shape - (N, K) - """ - - assert keypoints.shape[0] == 1, ( - f'{self.__class__.__name__} only support single-instance ' - 'keypoint encoding') - - if keypoints_visible is None: - keypoints_visible = np.ones(keypoints.shape[:2], dtype=np.float32) - - heatmaps, keypoint_weights = generate_unbiased_gaussian_heatmaps( - heatmap_size=self.heatmap_size, - keypoints=keypoints / self.scale_factor, - keypoints_visible=keypoints_visible, - sigma=self.sigma) - - offset_maps = self.generate_offset_map( - heatmap_size=self.heatmap_size, - keypoints=keypoints / self.scale_factor, - ) - - encoded = dict( - heatmaps=heatmaps, - keypoint_weights=keypoint_weights[0], - displacements=offset_maps) - - return encoded - - def generate_offset_map(self, heatmap_size: Tuple[int, int], - keypoints: np.ndarray): - - N, K, _ = keypoints.shape - - # batchsize 1 - keypoints = keypoints[0] - - # caution: there will be a broadcast which produce - # offside_x and offside_y with shape 64x64x98 - - offset_x = keypoints[:, 0] - np.expand_dims(self.x_range, axis=-1) - offset_y = keypoints[:, 1] - np.expand_dims(self.y_range, axis=-1) - - offset_map = np.concatenate([offset_x, offset_y], axis=-1) - - offset_map = np.transpose(offset_map, axes=[2, 0, 1]) - - return offset_map - - def decode(self, encoded: np.ndarray, - offset_maps: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: - """Decode keypoint coordinates from heatmaps. The decoded keypoint - coordinates are in the input image space. - - Args: - encoded (np.ndarray): Heatmaps in shape (K, H, W) - - Returns: - tuple: - - keypoints (np.ndarray): Decoded keypoint coordinates in shape - (N, K, D) - - scores (np.ndarray): The keypoint scores in shape (N, K). It - usually represents the confidence of the keypoint prediction - """ - heatmaps = encoded.copy() - - offset_maps = offset_maps.copy() - - K, H, W = heatmaps.shape - - keypoints, scores = get_heatmap_maximum(heatmaps) - - offset_x = offset_maps[:K, ...] - offset_y = offset_maps[K:, ...] - - keypoints_interger = keypoints.astype(np.int32) - keypoints_decimal = np.zeros_like(keypoints) - - for i in range(K): - [x, y] = keypoints_interger[i] - if x < 0 or y < 0: - x = y = 0 - - # caution: torch tensor shape is nchw, so index should be i,y,x - keypoints_decimal[i][0] = x + offset_x[i, y, x] - keypoints_decimal[i][1] = y + offset_y[i, y, x] - - # Restore the keypoint scale - keypoints_decimal = keypoints_decimal * self.scale_factor - - return keypoints_decimal[None], scores[None] +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Tuple + +import numpy as np + +from mmpose.codecs.base import BaseKeypointCodec +from mmpose.codecs.utils.gaussian_heatmap import \ + generate_unbiased_gaussian_heatmaps +from mmpose.codecs.utils.post_processing import get_heatmap_maximum +from mmpose.registry import KEYPOINT_CODECS + + +@KEYPOINT_CODECS.register_module() +class SKPSHeatmap(BaseKeypointCodec): + """Generate heatmap the same with MSRAHeatmap, and produce offset map + within x and y directions. + + Note: + + - instance number: N + - keypoint number: K + - keypoint dimension: D + - image size: [w, h] + - heatmap size: [W, H] + - offset_map size: [W, H] + + Encoded: + + - heatmaps (np.ndarray): The generated heatmap in shape (K, H, W) + where [W, H] is the `heatmap_size` + - offset_maps (np.ndarray): The generated offset map in x and y + direction in shape (2K, H, W) where [W, H] is the + `offset_map_size` + - keypoint_weights (np.ndarray): The target weights in shape (N, K) + + Args: + input_size (tuple): Image size in [w, h] + heatmap_size (tuple): Heatmap size in [W, H] + sigma (float): The sigma value of the Gaussian heatmap + """ + + def __init__(self, input_size: Tuple[int, int], + heatmap_size: Tuple[int, int], sigma: float) -> None: + super().__init__() + self.input_size = input_size + self.heatmap_size = heatmap_size + self.sigma = sigma + self.scale_factor = (np.array(input_size) / + heatmap_size).astype(np.float32) + + self.y_range, self.x_range = np.meshgrid( + np.arange(0, self.heatmap_size[1]), + np.arange(0, self.heatmap_size[0]), + indexing='ij') + + def encode(self, + keypoints: np.ndarray, + keypoints_visible: Optional[np.ndarray] = None) -> dict: + """Encode keypoints into heatmaps. Note that the original keypoint + coordinates should be in the input image space. + + Args: + keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) + keypoints_visible (np.ndarray): Keypoint visibilities in shape + (N, K) + + Returns: + dict: + - heatmaps (np.ndarray): The generated heatmap in shape + (K, H, W) where [W, H] is the `heatmap_size` + - offset_maps (np.ndarray): The generated offset maps in x and y + directions in shape (2*K, H, W) where [W, H] is the + `offset_map_size` + - keypoint_weights (np.ndarray): The target weights in shape + (N, K) + """ + + assert keypoints.shape[0] == 1, ( + f'{self.__class__.__name__} only support single-instance ' + 'keypoint encoding') + + if keypoints_visible is None: + keypoints_visible = np.ones(keypoints.shape[:2], dtype=np.float32) + + heatmaps, keypoint_weights = generate_unbiased_gaussian_heatmaps( + heatmap_size=self.heatmap_size, + keypoints=keypoints / self.scale_factor, + keypoints_visible=keypoints_visible, + sigma=self.sigma) + + offset_maps = self.generate_offset_map( + heatmap_size=self.heatmap_size, + keypoints=keypoints / self.scale_factor, + ) + + encoded = dict( + heatmaps=heatmaps, + keypoint_weights=keypoint_weights[0], + displacements=offset_maps) + + return encoded + + def generate_offset_map(self, heatmap_size: Tuple[int, int], + keypoints: np.ndarray): + + N, K, _ = keypoints.shape + + # batchsize 1 + keypoints = keypoints[0] + + # caution: there will be a broadcast which produce + # offside_x and offside_y with shape 64x64x98 + + offset_x = keypoints[:, 0] - np.expand_dims(self.x_range, axis=-1) + offset_y = keypoints[:, 1] - np.expand_dims(self.y_range, axis=-1) + + offset_map = np.concatenate([offset_x, offset_y], axis=-1) + + offset_map = np.transpose(offset_map, axes=[2, 0, 1]) + + return offset_map + + def decode(self, encoded: np.ndarray, + offset_maps: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + """Decode keypoint coordinates from heatmaps. The decoded keypoint + coordinates are in the input image space. + + Args: + encoded (np.ndarray): Heatmaps in shape (K, H, W) + + Returns: + tuple: + - keypoints (np.ndarray): Decoded keypoint coordinates in shape + (N, K, D) + - scores (np.ndarray): The keypoint scores in shape (N, K). It + usually represents the confidence of the keypoint prediction + """ + heatmaps = encoded.copy() + + offset_maps = offset_maps.copy() + + K, H, W = heatmaps.shape + + keypoints, scores = get_heatmap_maximum(heatmaps) + + offset_x = offset_maps[:K, ...] + offset_y = offset_maps[K:, ...] + + keypoints_interger = keypoints.astype(np.int32) + keypoints_decimal = np.zeros_like(keypoints) + + for i in range(K): + [x, y] = keypoints_interger[i] + if x < 0 or y < 0: + x = y = 0 + + # caution: torch tensor shape is nchw, so index should be i,y,x + keypoints_decimal[i][0] = x + offset_x[i, y, x] + keypoints_decimal[i][1] = y + offset_y[i, y, x] + + # Restore the keypoint scale + keypoints_decimal = keypoints_decimal * self.scale_factor + + return keypoints_decimal[None], scores[None] diff --git a/projects/skps/models/__init__.py b/projects/skps/models/__init__.py index 55377c089c..60ddd555ff 100644 --- a/projects/skps/models/__init__.py +++ b/projects/skps/models/__init__.py @@ -1,3 +1,3 @@ -from .skps_head import SKPSHead - -__all__ = ['SKPSHead'] +from .skps_head import SKPSHead + +__all__ = ['SKPSHead'] diff --git a/projects/skps/models/skps_head.py b/projects/skps/models/skps_head.py index 73f84dc443..ec30dc0a7e 100644 --- a/projects/skps/models/skps_head.py +++ b/projects/skps/models/skps_head.py @@ -1,399 +1,399 @@ -# Copyright (c) OpenMMLab. All rights reserved. - -from typing import Optional, Sequence, Tuple, Union - -import torch -import torch.nn as nn -from mmcv.cnn import build_conv_layer -from mmengine.model import ModuleDict -from mmengine.structures import InstanceData -from torch import Tensor - -from mmpose.evaluation.functional import pose_pck_accuracy -from mmpose.models.heads.base_head import BaseHead -from mmpose.models.utils.tta import flip_coordinates -from mmpose.registry import KEYPOINT_CODECS, MODELS -from mmpose.utils.tensor_utils import to_numpy -from mmpose.utils.typing import (ConfigType, Features, InstanceList, - OptConfigType, OptSampleList, Predictions) - -OptIntSeq = Optional[Sequence[int]] - - -@MODELS.register_module() -class SKPSHead(BaseHead): - """DisEntangled Keypoint Regression head introduced in `Bottom-up human - pose estimation via disentangled keypoint regression`_ by Geng et al - (2021). The head is composed of a heatmap branch and a displacement branch. - - Args: - in_channels (int | Sequence[int]): Number of channels in the input - feature map - out_channels (int): Number of channels in the output heatmap - conv_out_channels (Sequence[int], optional): The output channel number - of each intermediate conv layer. ``None`` means no intermediate - conv layer between deconv layers and the final conv layer. - Defaults to ``None`` - conv_kernel_sizes (Sequence[int | tuple], optional): The kernel size - of each intermediate conv layer. Defaults to ``None`` - final_layer (dict): Arguments of the final Conv2d layer. - Defaults to ``dict(kernel_size=1)`` - loss (Config): Config of the keypoint loss. Defaults to use - :class:`KeypointMSELoss` - decoder (Config, optional): The decoder config that controls decoding - keypoint coordinates from the network output. Defaults to ``None`` - init_cfg (Config, optional): Config to control the initialization. See - :attr:`default_init_cfg` for default settings - - - .. _`Bottom-up human pose estimation via disentangled keypoint regression`: - https://arxiv.org/abs/2104.02300 - """ - - _version = 2 - - def __init__(self, - in_channels: Union[int, Sequence[int]], - out_channels: int, - conv_out_channels: OptIntSeq = None, - conv_kernel_sizes: OptIntSeq = None, - final_layer: dict = dict(kernel_size=1), - heatmap_loss: ConfigType = dict( - type='AdaptiveWingLoss', use_target_weight=True), - offside_loss: ConfigType = dict( - type='AdaptiveWingLoss', use_target_weight=True), - decoder: OptConfigType = None, - init_cfg: OptConfigType = None): - - if init_cfg is None: - init_cfg = self.default_init_cfg - - super().__init__(init_cfg) - - self.in_channels = in_channels - self.out_channels = out_channels - - if conv_out_channels: - if conv_kernel_sizes is None or len(conv_out_channels) != len( - conv_kernel_sizes): - raise ValueError( - '"conv_out_channels" and "conv_kernel_sizes" should ' - 'be integer sequences with the same length. Got ' - f'mismatched lengths {conv_out_channels} and ' - f'{conv_kernel_sizes}') - - self.conv_layers = self._make_conv_layers( - in_channels=in_channels, - layer_out_channels=conv_out_channels, - layer_kernel_sizes=conv_kernel_sizes) - in_channels = conv_out_channels[-1] - else: - self.conv_layers = nn.Identity() - - if final_layer is not None: - cfg = dict( - type='Conv2d', - in_channels=in_channels, - out_channels=self.out_channels * 3, - kernel_size=1, - bias=True) - cfg.update(final_layer) - self.final_layer = build_conv_layer(cfg) - else: - self.final_layer = nn.Identity() - - # build losses - self.loss_module = ModuleDict( - dict( - heatmap=MODELS.build(heatmap_loss), - offside=MODELS.build(offside_loss), - )) - - # build decoder - if decoder is not None: - self.decoder = KEYPOINT_CODECS.build(decoder) - else: - self.decoder = None - - # Register the hook to automatically convert old version state dicts - self._register_load_state_dict_pre_hook(self._load_state_dict_pre_hook) - - @property - def default_init_cfg(self): - init_cfg = [ - dict(type='Normal', layer=['Conv2d', 'ConvTranspose2d'], std=0.01), - dict(type='Constant', layer='BatchNorm2d', val=1) - ] - return init_cfg - - def _make_conv_layers(self, in_channels: int, - layer_out_channels: Sequence[int], - layer_kernel_sizes: Sequence[int]) -> nn.Module: - """Create convolutional layers by given parameters.""" - - layers = [] - for out_channels, kernel_size in zip(layer_out_channels, - layer_kernel_sizes): - padding = (kernel_size - 1) // 2 - cfg = dict( - type='Conv2d', - in_channels=in_channels, - out_channels=out_channels, - kernel_size=kernel_size, - stride=1, - padding=padding) - layers.append(build_conv_layer(cfg)) - layers.append(nn.BatchNorm2d(num_features=out_channels)) - layers.append(nn.ReLU(inplace=True)) - in_channels = out_channels - - return nn.Sequential(*layers) - - def forward(self, feats: Tuple[Tensor]) -> Tensor: - """Forward the network. The input is multi scale feature maps and the - output is a tuple of heatmap and displacement. - - Args: - feats (Tuple[Tensor]): Multi scale feature maps. - - Returns: - Tuple[Tensor]: output heatmap and displacement. - """ - x = feats[-1] - - x = self.conv_layers(x) - x = self.final_layer(x) - heatmaps = x[:, :self.out_channels, ...] - offside = x[:, self.out_channels:, ...] - return heatmaps, offside - - def loss(self, - feats: Tuple[Tensor], - batch_data_samples: OptSampleList, - train_cfg: ConfigType = {}) -> dict: - """Calculate losses from a batch of inputs and data samples. - - Args: - feats (Tuple[Tensor]): The multi-stage features - batch_data_samples (List[:obj:`PoseDataSample`]): The batch - data samples - train_cfg (dict): The runtime config for training process. - Defaults to {} - - Returns: - dict: A dictionary of losses. - """ - pred_heatmaps, pred_offside = self.forward(feats) - gt_heatmaps = torch.stack( - [d.gt_fields.heatmaps for d in batch_data_samples]) - keypoint_weights = torch.stack([ - d.gt_instance_labels.keypoint_weights for d in batch_data_samples - ]) - gt_offside = torch.stack( - [d.gt_fields.displacements for d in batch_data_samples]) - - # calculate losses - losses = dict() - heatmap_loss = self.loss_module['heatmap'](pred_heatmaps, gt_heatmaps, - keypoint_weights) - - n, c, h, w = pred_offside.size() - offside_loss_x = self.loss_module['offside'](pred_offside[:, :c // 2], - gt_offside[:, :c // 2], - gt_heatmaps) - - offside_loss_y = self.loss_module['offside'](pred_offside[:, c // 2:], - gt_offside[:, c // 2:], - gt_heatmaps) - - offside_loss = (offside_loss_x + offside_loss_y) / 2. - - losses.update({ - 'loss/heatmap': heatmap_loss, - 'loss/offside': offside_loss, - }) - # calculate accuracy - if train_cfg.get('compute_acc', True): - _, avg_acc, _ = pose_pck_accuracy( - output=to_numpy(pred_heatmaps), - target=to_numpy(gt_heatmaps), - mask=to_numpy(keypoint_weights) > 0) - - acc_pose = torch.tensor(avg_acc, device=gt_heatmaps.device) - losses.update(acc_pose=acc_pose) - - return losses - - def predict(self, - feats: Features, - batch_data_samples: OptSampleList, - test_cfg: ConfigType = {}) -> Predictions: - """Predict results from features. - - Args: - feats (Tuple[Tensor] | List[Tuple[Tensor]]): The multi-stage - features (or multiple multi-scale features in TTA) - batch_data_samples (List[:obj:`PoseDataSample`]): The batch - data samples - test_cfg (dict): The runtime config for testing process. Defaults - to {} - - Returns: - Union[InstanceList | Tuple[InstanceList | PixelDataList]]: If - ``test_cfg['output_heatmap']==True``, return both pose and heatmap - prediction; otherwise only return the pose prediction. - - The pose prediction is a list of ``InstanceData``, each contains - the following fields: - - - keypoints (np.ndarray): predicted keypoint coordinates in - shape (num_instances, K, D) where K is the keypoint number - and D is the keypoint dimension - - keypoint_scores (np.ndarray): predicted keypoint scores in - shape (num_instances, K) - """ - - flip_test = test_cfg.get('flip_test', False) - metainfo = batch_data_samples[0].metainfo - - if flip_test: - assert isinstance(feats, list) and len(feats) == 2 - flip_indices = metainfo['flip_indices'] - _feat, _feat_flip = feats - _heatmaps, _displacements = self.forward(_feat) - _heatmaps_flip, _displacements_flip = self.forward(_feat_flip) - - batch_size = _heatmaps.shape[0] - - _heatmaps = to_numpy(_heatmaps) - _displacements = to_numpy(_displacements) - - _heatmaps_flip = to_numpy(_heatmaps_flip) - _displacements_flip = to_numpy(_displacements_flip) - preds = [] - for b in range(batch_size): - _keypoints, _keypoint_scores = self.decoder.decode( - _heatmaps[b], _displacements[b]) - - _keypoints_flip, _keypoint_scores_flip = self.decoder.decode( - _heatmaps_flip[b], _displacements_flip[b]) - - # flip the kps coords - real_w = self.decoder.input_size[0] - real_h = self.decoder.input_size[1] - - # the coordinate range is 0-255 for 256x256 input size - _keypoints_flip /= (real_w - 1) - _keypoints_flip = flip_coordinates( - _keypoints_flip, - flip_indices=flip_indices, - shift_coords=False, - input_size=((real_w - 1), (real_h - 1))) - _keypoints_flip *= (real_w - 1) - - _keypoints = (_keypoints + _keypoints_flip) / 2. - # pack outputs - preds.append(InstanceData(keypoints=_keypoints)) - return preds - - else: - batch_heatmaps, batch_displacements = self.forward(feats) - - preds = self.decode(batch_heatmaps, batch_displacements, test_cfg, - metainfo) - - return preds - - def decode(self, - heatmaps: Tuple[Tensor], - offside: Tuple[Tensor], - test_cfg: ConfigType = {}, - metainfo: dict = {}) -> InstanceList: - """Decode keypoints from outputs. - - Args: - heatmaps (Tuple[Tensor]): The output heatmaps inferred from one - image or multi-scale images. - offside (Tuple[Tensor]): The output displacement fields - inferred from one image or multi-scale images. - test_cfg (dict): The runtime config for testing process. Defaults - to {} - metainfo (dict): The metainfo of test dataset. Defaults to {} - - Returns: - List[InstanceData]: A list of InstanceData, each contains the - decoded pose information of the instances of one data sample. - """ - - if self.decoder is None: - raise RuntimeError( - f'The decoder has not been set in {self.__class__.__name__}. ' - 'Please set the decoder configs in the init parameters to ' - 'enable head methods `head.predict()` and `head.decode()`') - - preds = [] - batch_size = heatmaps.shape[0] - - heatmaps = to_numpy(heatmaps) - offside = to_numpy(offside) - - for b in range(batch_size): - keypoints, keypoint_scores = self.decoder.decode( - heatmaps[b], offside[b]) - - # pack outputs - preds.append( - InstanceData( - keypoints=keypoints, keypoint_scores=keypoint_scores)) - - return preds - - def _load_state_dict_pre_hook(self, state_dict, prefix, local_meta, *args, - **kwargs): - """A hook function to convert old-version state dict of - :class:`DeepposeRegressionHead` (before MMPose v1.0.0) to a - compatible format of :class:`RegressionHead`. - - The hook will be automatically registered during initialization. - """ - version = local_meta.get('version', None) - if version and version >= self._version: - return - - # convert old-version state dict - keys = list(state_dict.keys()) - for _k in keys: - if not _k.startswith(prefix): - continue - v = state_dict.pop(_k) - k = _k[len(prefix):] - # In old version, "final_layer" includes both intermediate - # conv layers (new "conv_layers") and final conv layers (new - # "final_layer"). - # - # If there is no intermediate conv layer, old "final_layer" will - # have keys like "final_layer.xxx", which should be still - # named "final_layer.xxx"; - # - # If there are intermediate conv layers, old "final_layer" will - # have keys like "final_layer.n.xxx", where the weights of the last - # one should be renamed "final_layer.xxx", and others should be - # renamed "conv_layers.n.xxx" - k_parts = k.split('.') - if k_parts[0] == 'final_layer': - if len(k_parts) == 3: - assert isinstance(self.conv_layers, nn.Sequential) - idx = int(k_parts[1]) - if idx < len(self.conv_layers): - # final_layer.n.xxx -> conv_layers.n.xxx - k_new = 'conv_layers.' + '.'.join(k_parts[1:]) - else: - # final_layer.n.xxx -> final_layer.xxx - k_new = 'final_layer.' + k_parts[2] - else: - # final_layer.xxx remains final_layer.xxx - k_new = k - else: - k_new = k - - state_dict[prefix + k_new] = v +# Copyright (c) OpenMMLab. All rights reserved. + +from typing import Optional, Sequence, Tuple, Union + +import torch +import torch.nn as nn +from mmcv.cnn import build_conv_layer +from mmengine.model import ModuleDict +from mmengine.structures import InstanceData +from torch import Tensor + +from mmpose.evaluation.functional import pose_pck_accuracy +from mmpose.models.heads.base_head import BaseHead +from mmpose.models.utils.tta import flip_coordinates +from mmpose.registry import KEYPOINT_CODECS, MODELS +from mmpose.utils.tensor_utils import to_numpy +from mmpose.utils.typing import (ConfigType, Features, InstanceList, + OptConfigType, OptSampleList, Predictions) + +OptIntSeq = Optional[Sequence[int]] + + +@MODELS.register_module() +class SKPSHead(BaseHead): + """DisEntangled Keypoint Regression head introduced in `Bottom-up human + pose estimation via disentangled keypoint regression`_ by Geng et al + (2021). The head is composed of a heatmap branch and a displacement branch. + + Args: + in_channels (int | Sequence[int]): Number of channels in the input + feature map + out_channels (int): Number of channels in the output heatmap + conv_out_channels (Sequence[int], optional): The output channel number + of each intermediate conv layer. ``None`` means no intermediate + conv layer between deconv layers and the final conv layer. + Defaults to ``None`` + conv_kernel_sizes (Sequence[int | tuple], optional): The kernel size + of each intermediate conv layer. Defaults to ``None`` + final_layer (dict): Arguments of the final Conv2d layer. + Defaults to ``dict(kernel_size=1)`` + loss (Config): Config of the keypoint loss. Defaults to use + :class:`KeypointMSELoss` + decoder (Config, optional): The decoder config that controls decoding + keypoint coordinates from the network output. Defaults to ``None`` + init_cfg (Config, optional): Config to control the initialization. See + :attr:`default_init_cfg` for default settings + + + .. _`Bottom-up human pose estimation via disentangled keypoint regression`: + https://arxiv.org/abs/2104.02300 + """ + + _version = 2 + + def __init__(self, + in_channels: Union[int, Sequence[int]], + out_channels: int, + conv_out_channels: OptIntSeq = None, + conv_kernel_sizes: OptIntSeq = None, + final_layer: dict = dict(kernel_size=1), + heatmap_loss: ConfigType = dict( + type='AdaptiveWingLoss', use_target_weight=True), + offside_loss: ConfigType = dict( + type='AdaptiveWingLoss', use_target_weight=True), + decoder: OptConfigType = None, + init_cfg: OptConfigType = None): + + if init_cfg is None: + init_cfg = self.default_init_cfg + + super().__init__(init_cfg) + + self.in_channels = in_channels + self.out_channels = out_channels + + if conv_out_channels: + if conv_kernel_sizes is None or len(conv_out_channels) != len( + conv_kernel_sizes): + raise ValueError( + '"conv_out_channels" and "conv_kernel_sizes" should ' + 'be integer sequences with the same length. Got ' + f'mismatched lengths {conv_out_channels} and ' + f'{conv_kernel_sizes}') + + self.conv_layers = self._make_conv_layers( + in_channels=in_channels, + layer_out_channels=conv_out_channels, + layer_kernel_sizes=conv_kernel_sizes) + in_channels = conv_out_channels[-1] + else: + self.conv_layers = nn.Identity() + + if final_layer is not None: + cfg = dict( + type='Conv2d', + in_channels=in_channels, + out_channels=self.out_channels * 3, + kernel_size=1, + bias=True) + cfg.update(final_layer) + self.final_layer = build_conv_layer(cfg) + else: + self.final_layer = nn.Identity() + + # build losses + self.loss_module = ModuleDict( + dict( + heatmap=MODELS.build(heatmap_loss), + offside=MODELS.build(offside_loss), + )) + + # build decoder + if decoder is not None: + self.decoder = KEYPOINT_CODECS.build(decoder) + else: + self.decoder = None + + # Register the hook to automatically convert old version state dicts + self._register_load_state_dict_pre_hook(self._load_state_dict_pre_hook) + + @property + def default_init_cfg(self): + init_cfg = [ + dict(type='Normal', layer=['Conv2d', 'ConvTranspose2d'], std=0.01), + dict(type='Constant', layer='BatchNorm2d', val=1) + ] + return init_cfg + + def _make_conv_layers(self, in_channels: int, + layer_out_channels: Sequence[int], + layer_kernel_sizes: Sequence[int]) -> nn.Module: + """Create convolutional layers by given parameters.""" + + layers = [] + for out_channels, kernel_size in zip(layer_out_channels, + layer_kernel_sizes): + padding = (kernel_size - 1) // 2 + cfg = dict( + type='Conv2d', + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=1, + padding=padding) + layers.append(build_conv_layer(cfg)) + layers.append(nn.BatchNorm2d(num_features=out_channels)) + layers.append(nn.ReLU(inplace=True)) + in_channels = out_channels + + return nn.Sequential(*layers) + + def forward(self, feats: Tuple[Tensor]) -> Tensor: + """Forward the network. The input is multi scale feature maps and the + output is a tuple of heatmap and displacement. + + Args: + feats (Tuple[Tensor]): Multi scale feature maps. + + Returns: + Tuple[Tensor]: output heatmap and displacement. + """ + x = feats[-1] + + x = self.conv_layers(x) + x = self.final_layer(x) + heatmaps = x[:, :self.out_channels, ...] + offside = x[:, self.out_channels:, ...] + return heatmaps, offside + + def loss(self, + feats: Tuple[Tensor], + batch_data_samples: OptSampleList, + train_cfg: ConfigType = {}) -> dict: + """Calculate losses from a batch of inputs and data samples. + + Args: + feats (Tuple[Tensor]): The multi-stage features + batch_data_samples (List[:obj:`PoseDataSample`]): The batch + data samples + train_cfg (dict): The runtime config for training process. + Defaults to {} + + Returns: + dict: A dictionary of losses. + """ + pred_heatmaps, pred_offside = self.forward(feats) + gt_heatmaps = torch.stack( + [d.gt_fields.heatmaps for d in batch_data_samples]) + keypoint_weights = torch.stack([ + d.gt_instance_labels.keypoint_weights for d in batch_data_samples + ]) + gt_offside = torch.stack( + [d.gt_fields.displacements for d in batch_data_samples]) + + # calculate losses + losses = dict() + heatmap_loss = self.loss_module['heatmap'](pred_heatmaps, gt_heatmaps, + keypoint_weights) + + n, c, h, w = pred_offside.size() + offside_loss_x = self.loss_module['offside'](pred_offside[:, :c // 2], + gt_offside[:, :c // 2], + gt_heatmaps) + + offside_loss_y = self.loss_module['offside'](pred_offside[:, c // 2:], + gt_offside[:, c // 2:], + gt_heatmaps) + + offside_loss = (offside_loss_x + offside_loss_y) / 2. + + losses.update({ + 'loss/heatmap': heatmap_loss, + 'loss/offside': offside_loss, + }) + # calculate accuracy + if train_cfg.get('compute_acc', True): + _, avg_acc, _ = pose_pck_accuracy( + output=to_numpy(pred_heatmaps), + target=to_numpy(gt_heatmaps), + mask=to_numpy(keypoint_weights) > 0) + + acc_pose = torch.tensor(avg_acc, device=gt_heatmaps.device) + losses.update(acc_pose=acc_pose) + + return losses + + def predict(self, + feats: Features, + batch_data_samples: OptSampleList, + test_cfg: ConfigType = {}) -> Predictions: + """Predict results from features. + + Args: + feats (Tuple[Tensor] | List[Tuple[Tensor]]): The multi-stage + features (or multiple multi-scale features in TTA) + batch_data_samples (List[:obj:`PoseDataSample`]): The batch + data samples + test_cfg (dict): The runtime config for testing process. Defaults + to {} + + Returns: + Union[InstanceList | Tuple[InstanceList | PixelDataList]]: If + ``test_cfg['output_heatmap']==True``, return both pose and heatmap + prediction; otherwise only return the pose prediction. + + The pose prediction is a list of ``InstanceData``, each contains + the following fields: + + - keypoints (np.ndarray): predicted keypoint coordinates in + shape (num_instances, K, D) where K is the keypoint number + and D is the keypoint dimension + - keypoint_scores (np.ndarray): predicted keypoint scores in + shape (num_instances, K) + """ + + flip_test = test_cfg.get('flip_test', False) + metainfo = batch_data_samples[0].metainfo + + if flip_test: + assert isinstance(feats, list) and len(feats) == 2 + flip_indices = metainfo['flip_indices'] + _feat, _feat_flip = feats + _heatmaps, _displacements = self.forward(_feat) + _heatmaps_flip, _displacements_flip = self.forward(_feat_flip) + + batch_size = _heatmaps.shape[0] + + _heatmaps = to_numpy(_heatmaps) + _displacements = to_numpy(_displacements) + + _heatmaps_flip = to_numpy(_heatmaps_flip) + _displacements_flip = to_numpy(_displacements_flip) + preds = [] + for b in range(batch_size): + _keypoints, _keypoint_scores = self.decoder.decode( + _heatmaps[b], _displacements[b]) + + _keypoints_flip, _keypoint_scores_flip = self.decoder.decode( + _heatmaps_flip[b], _displacements_flip[b]) + + # flip the kps coords + real_w = self.decoder.input_size[0] + real_h = self.decoder.input_size[1] + + # the coordinate range is 0-255 for 256x256 input size + _keypoints_flip /= (real_w - 1) + _keypoints_flip = flip_coordinates( + _keypoints_flip, + flip_indices=flip_indices, + shift_coords=False, + input_size=((real_w - 1), (real_h - 1))) + _keypoints_flip *= (real_w - 1) + + _keypoints = (_keypoints + _keypoints_flip) / 2. + # pack outputs + preds.append(InstanceData(keypoints=_keypoints)) + return preds + + else: + batch_heatmaps, batch_displacements = self.forward(feats) + + preds = self.decode(batch_heatmaps, batch_displacements, test_cfg, + metainfo) + + return preds + + def decode(self, + heatmaps: Tuple[Tensor], + offside: Tuple[Tensor], + test_cfg: ConfigType = {}, + metainfo: dict = {}) -> InstanceList: + """Decode keypoints from outputs. + + Args: + heatmaps (Tuple[Tensor]): The output heatmaps inferred from one + image or multi-scale images. + offside (Tuple[Tensor]): The output displacement fields + inferred from one image or multi-scale images. + test_cfg (dict): The runtime config for testing process. Defaults + to {} + metainfo (dict): The metainfo of test dataset. Defaults to {} + + Returns: + List[InstanceData]: A list of InstanceData, each contains the + decoded pose information of the instances of one data sample. + """ + + if self.decoder is None: + raise RuntimeError( + f'The decoder has not been set in {self.__class__.__name__}. ' + 'Please set the decoder configs in the init parameters to ' + 'enable head methods `head.predict()` and `head.decode()`') + + preds = [] + batch_size = heatmaps.shape[0] + + heatmaps = to_numpy(heatmaps) + offside = to_numpy(offside) + + for b in range(batch_size): + keypoints, keypoint_scores = self.decoder.decode( + heatmaps[b], offside[b]) + + # pack outputs + preds.append( + InstanceData( + keypoints=keypoints, keypoint_scores=keypoint_scores)) + + return preds + + def _load_state_dict_pre_hook(self, state_dict, prefix, local_meta, *args, + **kwargs): + """A hook function to convert old-version state dict of + :class:`DeepposeRegressionHead` (before MMPose v1.0.0) to a + compatible format of :class:`RegressionHead`. + + The hook will be automatically registered during initialization. + """ + version = local_meta.get('version', None) + if version and version >= self._version: + return + + # convert old-version state dict + keys = list(state_dict.keys()) + for _k in keys: + if not _k.startswith(prefix): + continue + v = state_dict.pop(_k) + k = _k[len(prefix):] + # In old version, "final_layer" includes both intermediate + # conv layers (new "conv_layers") and final conv layers (new + # "final_layer"). + # + # If there is no intermediate conv layer, old "final_layer" will + # have keys like "final_layer.xxx", which should be still + # named "final_layer.xxx"; + # + # If there are intermediate conv layers, old "final_layer" will + # have keys like "final_layer.n.xxx", where the weights of the last + # one should be renamed "final_layer.xxx", and others should be + # renamed "conv_layers.n.xxx" + k_parts = k.split('.') + if k_parts[0] == 'final_layer': + if len(k_parts) == 3: + assert isinstance(self.conv_layers, nn.Sequential) + idx = int(k_parts[1]) + if idx < len(self.conv_layers): + # final_layer.n.xxx -> conv_layers.n.xxx + k_new = 'conv_layers.' + '.'.join(k_parts[1:]) + else: + # final_layer.n.xxx -> final_layer.xxx + k_new = 'final_layer.' + k_parts[2] + else: + # final_layer.xxx remains final_layer.xxx + k_new = k + else: + k_new = k + + state_dict[prefix + k_new] = v diff --git a/projects/yolox_pose/README.md b/projects/yolox_pose/README.md index 264b65fe9f..e544252600 100644 --- a/projects/yolox_pose/README.md +++ b/projects/yolox_pose/README.md @@ -1,144 +1,144 @@ -# YOLOX-Pose - -This project implements a YOLOX-based human pose estimator, utilizing the approach outlined in **YOLO-Pose: Enhancing YOLO for Multi Person Pose Estimation Using Object Keypoint Similarity Loss** (CVPRW 2022). This pose estimator is lightweight and quick, making it well-suited for crowded scenes. - -
    - -## Usage - -### Prerequisites - -- Python 3.7 or higher -- PyTorch 1.6 or higher -- [MMEngine](https://github.com/open-mmlab/mmengine) v0.6.0 or higher -- [MMCV](https://github.com/open-mmlab/mmcv) v2.0.0rc4 or higher -- [MMDetection](https://github.com/open-mmlab/mmdetection) v3.0.0rc6 or higher -- [MMYOLO](https://github.com/open-mmlab/mmyolo) v0.5.0 or higher -- [MMPose](https://github.com/open-mmlab/mmpose) v1.0.0rc1 or higher - -All the commands below rely on the correct configuration of `PYTHONPATH`, which should point to the project's directory so that Python can locate the module files. **In `yolox-pose/` root directory**, run the following line to add the current directory to `PYTHONPATH`: - -```shell -export PYTHONPATH=`pwd`:$PYTHONPATH -``` - -### Inference - -Users can apply YOLOX-Pose models to estimate human poses using the inferencer found in the MMPose core package. Use the command below: - -```shell -python demo/inferencer_demo.py $INPUTS \ - --pose2d $CONFIG --pose2d-weights $CHECKPOINT --scope mmyolo \ - [--show] [--vis-out-dir $VIS_OUT_DIR] [--pred-out-dir $PRED_OUT_DIR] -``` - -For more information on using the inferencer, please see [this document](https://mmpose.readthedocs.io/en/latest/user_guides/inference.html#out-of-the-box-inferencer). - -Here's an example code: - -```shell -python demo/inferencer_demo.py ../../tests/data/coco/000000000785.jpg \ - --pose2d configs/yolox-pose_s_8xb32-300e_coco.py \ - --pose2d-weights https://download.openmmlab.com/mmpose/v1/projects/yolox-pose/yolox-pose_s_8xb32-300e_coco-9f5e3924_20230321.pth \ - --scope mmyolo --vis-out-dir vis_results -``` - -This will create an output image `vis_results/000000000785.jpg`, which appears like: - -
    - -### Training & Testing - -#### Data Preparation - -Prepare the COCO dataset according to the [instruction](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#coco). - -#### Commands - -**To train with multiple GPUs:** - -```shell -bash tools/dist_train.sh $CONFIG 8 --amp -``` - -**To train with slurm:** - -```shell -bash tools/slurm_train.sh $PARTITION $JOBNAME $CONFIG $WORKDIR --amp -``` - -**To test with single GPU:** - -```shell -python tools/test.py $CONFIG $CHECKPOINT -``` - -**To test with multiple GPUs:** - -```shell -bash tools/dist_test.sh $CONFIG $CHECKPOINT 8 -``` - -**To test with multiple GPUs by slurm:** - -```shell -bash tools/slurm_test.sh $PARTITION $JOBNAME $CONFIG $CHECKPOINT -``` - -### Results - -Results on COCO val2017 - -| Model | Input Size | AP | AP50 | AP75 | AR | AR50 | Download | -| :-------------------------------------------------------------: | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :----------------------------------------------------------------------: | -| [YOLOX-tiny-Pose](./configs/yolox-pose_tiny_4xb64-300e_coco.py) | 416 | 0.518 | 0.799 | 0.545 | 0.566 | 0.841 | [model](https://download.openmmlab.com/mmpose/v1/projects/yolox-pose/yolox-pose_tiny_4xb64-300e_coco-c47dd83b_20230321.pth) \| [log](https://download.openmmlab.com/mmpose/v1/projects/yolox-pose/yolox-pose_tiny_4xb64-300e_coco_20230321.json) | -| [YOLOX-s-Pose](./configs/yolox-pose_s_8xb32-300e_coco.py) | 640 | 0.632 | 0.875 | 0.692 | 0.676 | 0.907 | [model](https://download.openmmlab.com/mmpose/v1/projects/yolox-pose/yolox-pose_s_8xb32-300e_coco-9f5e3924_20230321.pth) \| [log](https://download.openmmlab.com/mmpose/v1/projects/yolox-pose/yolox-pose_s_8xb32-300e_coco_20230321.json) | -| [YOLOX-m-Pose](./configs/yolox-pose_m_4xb64-300e_coco.py) | 640 | 0.685 | 0.897 | 0.753 | 0.727 | 0.925 | [model](https://download.openmmlab.com/mmpose/v1/projects/yolox-pose/yolox-pose_m_4xb64-300e_coco-cbd11d30_20230321.pth) \| [log](https://download.openmmlab.com/mmpose/v1/projects/yolox-pose/yolox-pose_m_4xb64-300e_coco_20230321.json) | -| [YOLOX-l-Pose](./configs/yolox-pose_l_4xb64-300e_coco.py) | 640 | 0.706 | 0.907 | 0.775 | 0.747 | 0.934 | [model](https://download.openmmlab.com/mmpose/v1/projects/yolox-pose/yolox-pose_l_4xb64-300e_coco-122e4cf8_20230321.pth) \| [log](https://download.openmmlab.com/mmpose/v1/projects/yolox-pose/yolox-pose_l_4xb64-300e_coco_20230321.json) | - -We have only trained models with an input size of 640, as we couldn't replicate the performance enhancement mentioned in the paper when increasing the input size from 640 to 960. We warmly welcome any contributions if you can successfully reproduce the results from the paper! - -**NEW!** - -[MMYOLO](https://github.com/open-mmlab/mmyolo/blob/dev/configs/yolox/README.md#yolox-pose) also supports YOLOX-Pose and achieves better performance. Their models are fully compatible with this project. Here are their results on COCO val2017: - -| Backbone | Size | Batch Size | AMP | RTMDet-Hyp | Mem (GB) | AP | Config | Download | -| :--------: | :--: | :--------: | :-: | :--------: | :------: | :--: | :------------------------------------------------------------------------: | :---------------------------------------------------------------------------: | -| YOLOX-tiny | 416 | 8xb32 | Yes | Yes | 5.3 | 52.8 | [config](https://github.com/open-mmlab/mmyolo/blob/dev/configs/yolox/pose/yolox-pose_tiny_8xb32-300e-rtmdet-hyp_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolox/pose/yolox-pose_tiny_8xb32-300e-rtmdet-hyp_coco/yolox-pose_tiny_8xb32-300e-rtmdet-hyp_coco_20230427_080351-2117af67.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolox/pose/yolox-pose_tiny_8xb32-300e-rtmdet-hyp_coco/yolox-pose_tiny_8xb32-300e-rtmdet-hyp_coco_20230427_080351.log.json) | -| YOLOX-s | 640 | 8xb32 | Yes | Yes | 10.7 | 63.7 | [config](https://github.com/open-mmlab/mmyolo/blob/dev/configs/yolox/pose/yolox-pose_s_8xb32-300e-rtmdet-hyp_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolox/pose/yolox-pose_s_8xb32-300e-rtmdet-hyp_coco/yolox-pose_s_8xb32-300e-rtmdet-hyp_coco_20230427_005150-e87d843a.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolox/pose/yolox-pose_s_8xb32-300e-rtmdet-hyp_coco/yolox-pose_s_8xb32-300e-rtmdet-hyp_coco_20230427_005150.log.json) | -| YOLOX-m | 640 | 8xb32 | Yes | Yes | 19.2 | 69.3 | [config](https://github.com/open-mmlab/mmyolo/blob/dev/configs/yolox/pose/yolox-pose_m_8xb32-300e-rtmdet-hyp_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolox/pose/yolox-pose_m_8xb32-300e-rtmdet-hyp_coco/yolox-pose_m_8xb32-300e-rtmdet-hyp_coco_20230427_094024-bbeacc1c.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolox/pose/yolox-pose_m_8xb32-300e-rtmdet-hyp_coco/yolox-pose_m_8xb32-300e-rtmdet-hyp_coco_20230427_094024.log.json) | -| YOLOX-l | 640 | 8xb32 | Yes | Yes | 30.3 | 71.1 | [config](https://github.com/open-mmlab/mmyolo/blob/dev/configs/yolox/pose/yolox-pose_l_8xb32-300e-rtmdet-hyp_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolox/pose/yolox-pose_l_8xb32-300e-rtmdet-hyp_coco/yolox-pose_l_8xb32-300e-rtmdet-hyp_coco_20230427_041140-82d65ac8.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolox/pose/yolox-pose_l_8xb32-300e-rtmdet-hyp_coco/yolox-pose_l_8xb32-300e-rtmdet-hyp_coco_20230427_041140.log.json) | - -## Citation - -If this project benefits your work, please kindly consider citing the original papers: - -```bibtex -@inproceedings{maji2022yolo, - title={YOLO-Pose: Enhancing YOLO for Multi Person Pose Estimation Using Object Keypoint Similarity Loss}, - author={Maji, Debapriya and Nagori, Soyeb and Mathew, Manu and Poddar, Deepak}, - booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, - pages={2637--2646}, - year={2022} -} -``` - -```bibtex -@article{yolox2021, - title={{YOLOX}: Exceeding YOLO Series in 2021}, - author={Ge, Zheng and Liu, Songtao and Wang, Feng and Li, Zeming and Sun, Jian}, - journal={arXiv preprint arXiv:2107.08430}, - year={2021} -} -``` - -Additionally, please cite our work as well: - -```bibtex -@misc{mmpose2020, - title={OpenMMLab Pose Estimation Toolbox and Benchmark}, - author={MMPose Contributors}, - howpublished = {\url{https://github.com/open-mmlab/mmpose}}, - year={2020} -} -``` +# YOLOX-Pose + +This project implements a YOLOX-based human pose estimator, utilizing the approach outlined in **YOLO-Pose: Enhancing YOLO for Multi Person Pose Estimation Using Object Keypoint Similarity Loss** (CVPRW 2022). This pose estimator is lightweight and quick, making it well-suited for crowded scenes. + +
    + +## Usage + +### Prerequisites + +- Python 3.7 or higher +- PyTorch 1.6 or higher +- [MMEngine](https://github.com/open-mmlab/mmengine) v0.6.0 or higher +- [MMCV](https://github.com/open-mmlab/mmcv) v2.0.0rc4 or higher +- [MMDetection](https://github.com/open-mmlab/mmdetection) v3.0.0rc6 or higher +- [MMYOLO](https://github.com/open-mmlab/mmyolo) v0.5.0 or higher +- [MMPose](https://github.com/open-mmlab/mmpose) v1.0.0rc1 or higher + +All the commands below rely on the correct configuration of `PYTHONPATH`, which should point to the project's directory so that Python can locate the module files. **In `yolox-pose/` root directory**, run the following line to add the current directory to `PYTHONPATH`: + +```shell +export PYTHONPATH=`pwd`:$PYTHONPATH +``` + +### Inference + +Users can apply YOLOX-Pose models to estimate human poses using the inferencer found in the MMPose core package. Use the command below: + +```shell +python demo/inferencer_demo.py $INPUTS \ + --pose2d $CONFIG --pose2d-weights $CHECKPOINT --scope mmyolo \ + [--show] [--vis-out-dir $VIS_OUT_DIR] [--pred-out-dir $PRED_OUT_DIR] +``` + +For more information on using the inferencer, please see [this document](https://mmpose.readthedocs.io/en/latest/user_guides/inference.html#out-of-the-box-inferencer). + +Here's an example code: + +```shell +python demo/inferencer_demo.py ../../tests/data/coco/000000000785.jpg \ + --pose2d configs/yolox-pose_s_8xb32-300e_coco.py \ + --pose2d-weights https://download.openmmlab.com/mmpose/v1/projects/yolox-pose/yolox-pose_s_8xb32-300e_coco-9f5e3924_20230321.pth \ + --scope mmyolo --vis-out-dir vis_results +``` + +This will create an output image `vis_results/000000000785.jpg`, which appears like: + +
    + +### Training & Testing + +#### Data Preparation + +Prepare the COCO dataset according to the [instruction](https://mmpose.readthedocs.io/en/latest/dataset_zoo/2d_body_keypoint.html#coco). + +#### Commands + +**To train with multiple GPUs:** + +```shell +bash tools/dist_train.sh $CONFIG 8 --amp +``` + +**To train with slurm:** + +```shell +bash tools/slurm_train.sh $PARTITION $JOBNAME $CONFIG $WORKDIR --amp +``` + +**To test with single GPU:** + +```shell +python tools/test.py $CONFIG $CHECKPOINT +``` + +**To test with multiple GPUs:** + +```shell +bash tools/dist_test.sh $CONFIG $CHECKPOINT 8 +``` + +**To test with multiple GPUs by slurm:** + +```shell +bash tools/slurm_test.sh $PARTITION $JOBNAME $CONFIG $CHECKPOINT +``` + +### Results + +Results on COCO val2017 + +| Model | Input Size | AP | AP50 | AP75 | AR | AR50 | Download | +| :-------------------------------------------------------------: | :--------: | :---: | :-------------: | :-------------: | :---: | :-------------: | :----------------------------------------------------------------------: | +| [YOLOX-tiny-Pose](./configs/yolox-pose_tiny_4xb64-300e_coco.py) | 416 | 0.518 | 0.799 | 0.545 | 0.566 | 0.841 | [model](https://download.openmmlab.com/mmpose/v1/projects/yolox-pose/yolox-pose_tiny_4xb64-300e_coco-c47dd83b_20230321.pth) \| [log](https://download.openmmlab.com/mmpose/v1/projects/yolox-pose/yolox-pose_tiny_4xb64-300e_coco_20230321.json) | +| [YOLOX-s-Pose](./configs/yolox-pose_s_8xb32-300e_coco.py) | 640 | 0.632 | 0.875 | 0.692 | 0.676 | 0.907 | [model](https://download.openmmlab.com/mmpose/v1/projects/yolox-pose/yolox-pose_s_8xb32-300e_coco-9f5e3924_20230321.pth) \| [log](https://download.openmmlab.com/mmpose/v1/projects/yolox-pose/yolox-pose_s_8xb32-300e_coco_20230321.json) | +| [YOLOX-m-Pose](./configs/yolox-pose_m_4xb64-300e_coco.py) | 640 | 0.685 | 0.897 | 0.753 | 0.727 | 0.925 | [model](https://download.openmmlab.com/mmpose/v1/projects/yolox-pose/yolox-pose_m_4xb64-300e_coco-cbd11d30_20230321.pth) \| [log](https://download.openmmlab.com/mmpose/v1/projects/yolox-pose/yolox-pose_m_4xb64-300e_coco_20230321.json) | +| [YOLOX-l-Pose](./configs/yolox-pose_l_4xb64-300e_coco.py) | 640 | 0.706 | 0.907 | 0.775 | 0.747 | 0.934 | [model](https://download.openmmlab.com/mmpose/v1/projects/yolox-pose/yolox-pose_l_4xb64-300e_coco-122e4cf8_20230321.pth) \| [log](https://download.openmmlab.com/mmpose/v1/projects/yolox-pose/yolox-pose_l_4xb64-300e_coco_20230321.json) | + +We have only trained models with an input size of 640, as we couldn't replicate the performance enhancement mentioned in the paper when increasing the input size from 640 to 960. We warmly welcome any contributions if you can successfully reproduce the results from the paper! + +**NEW!** + +[MMYOLO](https://github.com/open-mmlab/mmyolo/blob/dev/configs/yolox/README.md#yolox-pose) also supports YOLOX-Pose and achieves better performance. Their models are fully compatible with this project. Here are their results on COCO val2017: + +| Backbone | Size | Batch Size | AMP | RTMDet-Hyp | Mem (GB) | AP | Config | Download | +| :--------: | :--: | :--------: | :-: | :--------: | :------: | :--: | :------------------------------------------------------------------------: | :---------------------------------------------------------------------------: | +| YOLOX-tiny | 416 | 8xb32 | Yes | Yes | 5.3 | 52.8 | [config](https://github.com/open-mmlab/mmyolo/blob/dev/configs/yolox/pose/yolox-pose_tiny_8xb32-300e-rtmdet-hyp_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolox/pose/yolox-pose_tiny_8xb32-300e-rtmdet-hyp_coco/yolox-pose_tiny_8xb32-300e-rtmdet-hyp_coco_20230427_080351-2117af67.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolox/pose/yolox-pose_tiny_8xb32-300e-rtmdet-hyp_coco/yolox-pose_tiny_8xb32-300e-rtmdet-hyp_coco_20230427_080351.log.json) | +| YOLOX-s | 640 | 8xb32 | Yes | Yes | 10.7 | 63.7 | [config](https://github.com/open-mmlab/mmyolo/blob/dev/configs/yolox/pose/yolox-pose_s_8xb32-300e-rtmdet-hyp_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolox/pose/yolox-pose_s_8xb32-300e-rtmdet-hyp_coco/yolox-pose_s_8xb32-300e-rtmdet-hyp_coco_20230427_005150-e87d843a.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolox/pose/yolox-pose_s_8xb32-300e-rtmdet-hyp_coco/yolox-pose_s_8xb32-300e-rtmdet-hyp_coco_20230427_005150.log.json) | +| YOLOX-m | 640 | 8xb32 | Yes | Yes | 19.2 | 69.3 | [config](https://github.com/open-mmlab/mmyolo/blob/dev/configs/yolox/pose/yolox-pose_m_8xb32-300e-rtmdet-hyp_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolox/pose/yolox-pose_m_8xb32-300e-rtmdet-hyp_coco/yolox-pose_m_8xb32-300e-rtmdet-hyp_coco_20230427_094024-bbeacc1c.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolox/pose/yolox-pose_m_8xb32-300e-rtmdet-hyp_coco/yolox-pose_m_8xb32-300e-rtmdet-hyp_coco_20230427_094024.log.json) | +| YOLOX-l | 640 | 8xb32 | Yes | Yes | 30.3 | 71.1 | [config](https://github.com/open-mmlab/mmyolo/blob/dev/configs/yolox/pose/yolox-pose_l_8xb32-300e-rtmdet-hyp_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolox/pose/yolox-pose_l_8xb32-300e-rtmdet-hyp_coco/yolox-pose_l_8xb32-300e-rtmdet-hyp_coco_20230427_041140-82d65ac8.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolox/pose/yolox-pose_l_8xb32-300e-rtmdet-hyp_coco/yolox-pose_l_8xb32-300e-rtmdet-hyp_coco_20230427_041140.log.json) | + +## Citation + +If this project benefits your work, please kindly consider citing the original papers: + +```bibtex +@inproceedings{maji2022yolo, + title={YOLO-Pose: Enhancing YOLO for Multi Person Pose Estimation Using Object Keypoint Similarity Loss}, + author={Maji, Debapriya and Nagori, Soyeb and Mathew, Manu and Poddar, Deepak}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={2637--2646}, + year={2022} +} +``` + +```bibtex +@article{yolox2021, + title={{YOLOX}: Exceeding YOLO Series in 2021}, + author={Ge, Zheng and Liu, Songtao and Wang, Feng and Li, Zeming and Sun, Jian}, + journal={arXiv preprint arXiv:2107.08430}, + year={2021} +} +``` + +Additionally, please cite our work as well: + +```bibtex +@misc{mmpose2020, + title={OpenMMLab Pose Estimation Toolbox and Benchmark}, + author={MMPose Contributors}, + howpublished = {\url{https://github.com/open-mmlab/mmpose}}, + year={2020} +} +``` diff --git a/projects/yolox_pose/configs/_base_/default_runtime.py b/projects/yolox_pose/configs/_base_/default_runtime.py index 7057585015..cf2a7f5367 100644 --- a/projects/yolox_pose/configs/_base_/default_runtime.py +++ b/projects/yolox_pose/configs/_base_/default_runtime.py @@ -1,41 +1,41 @@ -default_scope = 'mmyolo' -custom_imports = dict(imports=['models', 'datasets']) - -# hooks -default_hooks = dict( - timer=dict(type='IterTimerHook'), - logger=dict(type='LoggerHook', interval=50), - param_scheduler=dict(type='ParamSchedulerHook'), - checkpoint=dict(type='CheckpointHook', interval=10, max_keep_ckpts=3), - sampler_seed=dict(type='DistSamplerSeedHook'), - visualization=dict(type='mmpose.PoseVisualizationHook', enable=False), -) - -# multi-processing backend -env_cfg = dict( - cudnn_benchmark=False, - mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), - dist_cfg=dict(backend='nccl'), -) - -# visualizer -vis_backends = [dict(type='LocalVisBackend')] -visualizer = dict( - type='mmpose.PoseLocalVisualizer', - vis_backends=vis_backends, - name='visualizer') - -# logger -log_processor = dict( - type='LogProcessor', window_size=50, by_epoch=True, num_digits=6) -log_level = 'INFO' -load_from = None -resume = False - -# file I/O backend -backend_args = dict(backend='local') - -# training/validation/testing progress -train_cfg = dict() -val_cfg = dict(type='ValLoop') -test_cfg = dict(type='TestLoop') +default_scope = 'mmyolo' +custom_imports = dict(imports=['models', 'datasets']) + +# hooks +default_hooks = dict( + timer=dict(type='IterTimerHook'), + logger=dict(type='LoggerHook', interval=50), + param_scheduler=dict(type='ParamSchedulerHook'), + checkpoint=dict(type='CheckpointHook', interval=10, max_keep_ckpts=3), + sampler_seed=dict(type='DistSamplerSeedHook'), + visualization=dict(type='mmpose.PoseVisualizationHook', enable=False), +) + +# multi-processing backend +env_cfg = dict( + cudnn_benchmark=False, + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), + dist_cfg=dict(backend='nccl'), +) + +# visualizer +vis_backends = [dict(type='LocalVisBackend')] +visualizer = dict( + type='mmpose.PoseLocalVisualizer', + vis_backends=vis_backends, + name='visualizer') + +# logger +log_processor = dict( + type='LogProcessor', window_size=50, by_epoch=True, num_digits=6) +log_level = 'INFO' +load_from = None +resume = False + +# file I/O backend +backend_args = dict(backend='local') + +# training/validation/testing progress +train_cfg = dict() +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') diff --git a/projects/yolox_pose/configs/_base_/py_default_runtime.py b/projects/yolox_pose/configs/_base_/py_default_runtime.py index 354d96ad0d..f0c1f3fe68 100644 --- a/projects/yolox_pose/configs/_base_/py_default_runtime.py +++ b/projects/yolox_pose/configs/_base_/py_default_runtime.py @@ -1,45 +1,45 @@ -from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook, - LoggerHook, ParamSchedulerHook) -from mmengine.runner import LogProcessor, TestLoop, ValLoop -from mmengine.visualization import LocalVisBackend - -from mmpose.engine.hooks import PoseVisualizationHook -from mmpose.visualization import PoseLocalVisualizer - -default_scope = None -# hooks -default_hooks = dict( - timer=dict(type=IterTimerHook), - logger=dict(type=LoggerHook, interval=50), - param_scheduler=dict(type=ParamSchedulerHook), - checkpoint=dict(type=CheckpointHook, interval=10, max_keep_ckpts=3), - sampler_seed=dict(type=DistSamplerSeedHook), - visualization=dict(type=PoseVisualizationHook, enable=False), -) - -# multi-processing backend -env_cfg = dict( - cudnn_benchmark=False, - mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), - dist_cfg=dict(backend='nccl'), -) - -# visualizer -vis_backends = [dict(type=LocalVisBackend)] -visualizer = dict( - type=PoseLocalVisualizer, vis_backends=vis_backends, name='visualizer') - -# logger -log_processor = dict( - type=LogProcessor, window_size=50, by_epoch=True, num_digits=6) -log_level = 'INFO' -load_from = None -resume = False - -# file I/O backend -backend_args = dict(backend='local') - -# training/validation/testing progress -train_cfg = dict() -val_cfg = dict(type=ValLoop) -test_cfg = dict(type=TestLoop) +from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook, + LoggerHook, ParamSchedulerHook) +from mmengine.runner import LogProcessor, TestLoop, ValLoop +from mmengine.visualization import LocalVisBackend + +from mmpose.engine.hooks import PoseVisualizationHook +from mmpose.visualization import PoseLocalVisualizer + +default_scope = None +# hooks +default_hooks = dict( + timer=dict(type=IterTimerHook), + logger=dict(type=LoggerHook, interval=50), + param_scheduler=dict(type=ParamSchedulerHook), + checkpoint=dict(type=CheckpointHook, interval=10, max_keep_ckpts=3), + sampler_seed=dict(type=DistSamplerSeedHook), + visualization=dict(type=PoseVisualizationHook, enable=False), +) + +# multi-processing backend +env_cfg = dict( + cudnn_benchmark=False, + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), + dist_cfg=dict(backend='nccl'), +) + +# visualizer +vis_backends = [dict(type=LocalVisBackend)] +visualizer = dict( + type=PoseLocalVisualizer, vis_backends=vis_backends, name='visualizer') + +# logger +log_processor = dict( + type=LogProcessor, window_size=50, by_epoch=True, num_digits=6) +log_level = 'INFO' +load_from = None +resume = False + +# file I/O backend +backend_args = dict(backend='local') + +# training/validation/testing progress +train_cfg = dict() +val_cfg = dict(type=ValLoop) +test_cfg = dict(type=TestLoop) diff --git a/projects/yolox_pose/configs/py_yolox_pose_s_8xb32_300e_coco.py b/projects/yolox_pose/configs/py_yolox_pose_s_8xb32_300e_coco.py index 9a75e35e8d..29822237cb 100644 --- a/projects/yolox_pose/configs/py_yolox_pose_s_8xb32_300e_coco.py +++ b/projects/yolox_pose/configs/py_yolox_pose_s_8xb32_300e_coco.py @@ -1,283 +1,283 @@ -from mmengine.config import read_base - -with read_base(): - from ._base_.py_default_runtime import * - -from datasets import (CocoDataset, FilterDetPoseAnnotations, PackDetPoseInputs, - PoseToDetConverter) -from mmcv.ops import nms -from mmdet.datasets.transforms import (Pad, RandomAffine, RandomFlip, Resize, - YOLOXHSVRandomAug) -from mmdet.engine.hooks import SyncNormHook -from mmdet.engine.schedulers import QuadraticWarmupLR -from mmdet.models import CrossEntropyLoss, DetDataPreprocessor, IoULoss, L1Loss -from mmdet.models.task_modules import BboxOverlaps2D -from mmengine.dataset import DefaultSampler -from mmengine.hooks import EMAHook -from mmengine.model import PretrainedInit -from mmengine.optim import ConstantLR, CosineAnnealingLR, OptimWrapper -from mmengine.runner import EpochBasedTrainLoop -from mmyolo.datasets.transforms import Mosaic, YOLOXMixUp -from mmyolo.engine.hooks import YOLOXModeSwitchHook -from mmyolo.models import (YOLOXPAFPN, ExpMomentumEMA, YOLODetector, - YOLOXCSPDarknet) -from models import (OksLoss, PoseBatchSyncRandomResize, PoseSimOTAAssigner, - YOLOXPoseHead, YOLOXPoseHeadModule) -from torch.nn import BatchNorm2d, SiLU -from torch.optim import AdamW - -from mmpose.datasets.transforms import LoadImage -from mmpose.evaluation import CocoMetric - -# model settings -model = dict( - type=YOLODetector, - use_syncbn=False, - init_cfg=dict( - type=PretrainedInit, - checkpoint='https://download.openmmlab.com/mmyolo/v0/yolox/' - 'yolox_s_fast_8xb32-300e-rtmdet-hyp_coco/yolox_s_fast_' - '8xb32-300e-rtmdet-hyp_coco_20230210_134645-3a8dfbd7.pth'), - data_preprocessor=dict( - type=DetDataPreprocessor, - pad_size_divisor=32, - batch_augments=[ - dict( - type=PoseBatchSyncRandomResize, - random_size_range=(480, 800), - size_divisor=32, - interval=1) - ]), - backbone=dict( - type=YOLOXCSPDarknet, - deepen_factor=0.33, - widen_factor=0.5, - out_indices=(2, 3, 4), - spp_kernal_sizes=(5, 9, 13), - norm_cfg=dict(type=BatchNorm2d, momentum=0.03, eps=0.001), - act_cfg=dict(type=SiLU, inplace=True), - ), - neck=dict( - type=YOLOXPAFPN, - deepen_factor=0.33, - widen_factor=0.5, - in_channels=[256, 512, 1024], - out_channels=256, - norm_cfg=dict(type=BatchNorm2d, momentum=0.03, eps=0.001), - act_cfg=dict(type=SiLU, inplace=True)), - bbox_head=dict( - type=YOLOXPoseHead, - head_module=dict( - type=YOLOXPoseHeadModule, - num_classes=1, - in_channels=256, - feat_channels=256, - widen_factor=0.5, - stacked_convs=2, - num_keypoints=17, - featmap_strides=(8, 16, 32), - use_depthwise=False, - norm_cfg=dict(type=BatchNorm2d, momentum=0.03, eps=0.001), - act_cfg=dict(type=SiLU, inplace=True), - ), - loss_cls=dict( - type=CrossEntropyLoss, - use_sigmoid=True, - reduction='sum', - loss_weight=1.0), - loss_bbox=dict( - type=IoULoss, - mode='square', - eps=1e-16, - reduction='sum', - loss_weight=5.0), - loss_obj=dict( - type=CrossEntropyLoss, - use_sigmoid=True, - reduction='sum', - loss_weight=1.0), - loss_pose=dict( - type=OksLoss, - metainfo='configs/_base_/datasets/coco.py', - loss_weight=30.0), - loss_bbox_aux=dict(type=L1Loss, reduction='sum', loss_weight=1.0)), - train_cfg=dict( - assigner=dict( - type=PoseSimOTAAssigner, - center_radius=2.5, - iou_calculator=dict(type=BboxOverlaps2D), - oks_calculator=dict( - type=OksLoss, metainfo='configs/_base_/datasets/coco.py'))), - test_cfg=dict( - yolox_style=True, - multi_label=False, - score_thr=0.001, - max_per_img=300, - nms=dict(type=nms, iou_threshold=0.65))) - -# data related -img_scale = (640, 640) - -# pipelines -pre_transform = [ - dict(type=LoadImage, backend_args=backend_args), - dict(type=PoseToDetConverter) -] - -train_pipeline_stage1 = [ - *pre_transform, - dict( - type=Mosaic, - img_scale=img_scale, - pad_val=114.0, - pre_transform=pre_transform), - dict( - type=RandomAffine, - scaling_ratio_range=(0.75, 1.0), - border=(-img_scale[0] // 2, -img_scale[1] // 2)), - dict( - type=YOLOXMixUp, - img_scale=img_scale, - ratio_range=(0.8, 1.6), - pad_val=114.0, - pre_transform=pre_transform), - dict(type=YOLOXHSVRandomAug), - dict(type=RandomFlip, prob=0.5), - dict(type=FilterDetPoseAnnotations, keep_empty=False), - dict( - type=PackDetPoseInputs, - meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape')) -] - -train_pipeline_stage2 = [ - *pre_transform, - dict(type=Resize, scale=img_scale, keep_ratio=True), - dict( - type=Pad, pad_to_square=True, pad_val=dict(img=(114.0, 114.0, 114.0))), - dict(type=YOLOXHSVRandomAug), - dict(type=RandomFlip, prob=0.5), - dict(type=FilterDetPoseAnnotations, keep_empty=False), - dict(type=PackDetPoseInputs) -] - -test_pipeline = [ - *pre_transform, - dict(type=Resize, scale=img_scale, keep_ratio=True), - dict( - type=Pad, pad_to_square=True, pad_val=dict(img=(114.0, 114.0, 114.0))), - dict( - type=PackDetPoseInputs, - meta_keys=('id', 'img_id', 'img_path', 'ori_shape', 'img_shape', - 'scale_factor', 'flip_indices')) -] - -# dataset settings -dataset_type = CocoDataset -data_mode = 'bottomup' -data_root = 'data/coco/' - -train_dataloader = dict( - batch_size=32, - num_workers=8, - persistent_workers=True, - pin_memory=True, - sampler=dict(type=DefaultSampler, shuffle=True), - dataset=dict( - type=dataset_type, - data_mode=data_mode, - data_root=data_root, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - filter_cfg=dict(filter_empty_gt=False, min_size=32), - pipeline=train_pipeline_stage1)) - -val_dataloader = dict( - batch_size=1, - num_workers=2, - persistent_workers=True, - pin_memory=True, - drop_last=False, - sampler=dict(type=DefaultSampler, shuffle=False), - dataset=dict( - type=dataset_type, - data_mode=data_mode, - data_root=data_root, - ann_file='annotations/person_keypoints_val2017.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=test_pipeline)) - -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type=CocoMetric, - ann_file=data_root + 'annotations/person_keypoints_val2017.json', - score_mode='bbox') -test_evaluator = val_evaluator - -default_hooks.update( - dict(checkpoint=dict(save_best='coco/AP', rule='greater'))) - -# optimizer -base_lr = 0.004 -max_epochs = 300 -num_last_epochs = 20 -optim_wrapper = dict( - type=OptimWrapper, - optimizer=dict(type=AdamW, lr=base_lr, weight_decay=0.05), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -param_scheduler = [ - dict( - # use quadratic formula to warm up 5 epochs - # and lr is updated by iteration - type=QuadraticWarmupLR, - by_epoch=True, - begin=0, - end=5, - convert_to_iter_based=True), - dict( - # use cosine lr from 5 to 285 epoch - type=CosineAnnealingLR, - eta_min=base_lr * 0.05, - begin=5, - T_max=max_epochs - num_last_epochs, - end=max_epochs - num_last_epochs, - by_epoch=True, - convert_to_iter_based=True), - dict( - # use fixed lr during last num_last_epochs epochs - type=ConstantLR, - by_epoch=True, - factor=1, - begin=max_epochs - num_last_epochs, - end=max_epochs, - ) -] - -# runtime -custom_hooks = [ - dict( - type=YOLOXModeSwitchHook, - num_last_epochs=num_last_epochs, - new_train_pipeline=train_pipeline_stage2, - priority=48), - dict(type=SyncNormHook, priority=48), - dict( - type=EMAHook, - ema_type=ExpMomentumEMA, - momentum=0.0002, - update_buffers=True, - strict_load=False, - priority=49) -] - -train_cfg = dict( - type=EpochBasedTrainLoop, - max_epochs=max_epochs, - val_interval=10, - dynamic_intervals=[(max_epochs - num_last_epochs, 1)]) - -auto_scale_lr = dict(base_batch_size=256) +from mmengine.config import read_base + +with read_base(): + from ._base_.py_default_runtime import * + +from datasets import (CocoDataset, FilterDetPoseAnnotations, PackDetPoseInputs, + PoseToDetConverter) +from mmcv.ops import nms +from mmdet.datasets.transforms import (Pad, RandomAffine, RandomFlip, Resize, + YOLOXHSVRandomAug) +from mmdet.engine.hooks import SyncNormHook +from mmdet.engine.schedulers import QuadraticWarmupLR +from mmdet.models import CrossEntropyLoss, DetDataPreprocessor, IoULoss, L1Loss +from mmdet.models.task_modules import BboxOverlaps2D +from mmengine.dataset import DefaultSampler +from mmengine.hooks import EMAHook +from mmengine.model import PretrainedInit +from mmengine.optim import ConstantLR, CosineAnnealingLR, OptimWrapper +from mmengine.runner import EpochBasedTrainLoop +from mmyolo.datasets.transforms import Mosaic, YOLOXMixUp +from mmyolo.engine.hooks import YOLOXModeSwitchHook +from mmyolo.models import (YOLOXPAFPN, ExpMomentumEMA, YOLODetector, + YOLOXCSPDarknet) +from models import (OksLoss, PoseBatchSyncRandomResize, PoseSimOTAAssigner, + YOLOXPoseHead, YOLOXPoseHeadModule) +from torch.nn import BatchNorm2d, SiLU +from torch.optim import AdamW + +from mmpose.datasets.transforms import LoadImage +from mmpose.evaluation import CocoMetric + +# model settings +model = dict( + type=YOLODetector, + use_syncbn=False, + init_cfg=dict( + type=PretrainedInit, + checkpoint='https://download.openmmlab.com/mmyolo/v0/yolox/' + 'yolox_s_fast_8xb32-300e-rtmdet-hyp_coco/yolox_s_fast_' + '8xb32-300e-rtmdet-hyp_coco_20230210_134645-3a8dfbd7.pth'), + data_preprocessor=dict( + type=DetDataPreprocessor, + pad_size_divisor=32, + batch_augments=[ + dict( + type=PoseBatchSyncRandomResize, + random_size_range=(480, 800), + size_divisor=32, + interval=1) + ]), + backbone=dict( + type=YOLOXCSPDarknet, + deepen_factor=0.33, + widen_factor=0.5, + out_indices=(2, 3, 4), + spp_kernal_sizes=(5, 9, 13), + norm_cfg=dict(type=BatchNorm2d, momentum=0.03, eps=0.001), + act_cfg=dict(type=SiLU, inplace=True), + ), + neck=dict( + type=YOLOXPAFPN, + deepen_factor=0.33, + widen_factor=0.5, + in_channels=[256, 512, 1024], + out_channels=256, + norm_cfg=dict(type=BatchNorm2d, momentum=0.03, eps=0.001), + act_cfg=dict(type=SiLU, inplace=True)), + bbox_head=dict( + type=YOLOXPoseHead, + head_module=dict( + type=YOLOXPoseHeadModule, + num_classes=1, + in_channels=256, + feat_channels=256, + widen_factor=0.5, + stacked_convs=2, + num_keypoints=17, + featmap_strides=(8, 16, 32), + use_depthwise=False, + norm_cfg=dict(type=BatchNorm2d, momentum=0.03, eps=0.001), + act_cfg=dict(type=SiLU, inplace=True), + ), + loss_cls=dict( + type=CrossEntropyLoss, + use_sigmoid=True, + reduction='sum', + loss_weight=1.0), + loss_bbox=dict( + type=IoULoss, + mode='square', + eps=1e-16, + reduction='sum', + loss_weight=5.0), + loss_obj=dict( + type=CrossEntropyLoss, + use_sigmoid=True, + reduction='sum', + loss_weight=1.0), + loss_pose=dict( + type=OksLoss, + metainfo='configs/_base_/datasets/coco.py', + loss_weight=30.0), + loss_bbox_aux=dict(type=L1Loss, reduction='sum', loss_weight=1.0)), + train_cfg=dict( + assigner=dict( + type=PoseSimOTAAssigner, + center_radius=2.5, + iou_calculator=dict(type=BboxOverlaps2D), + oks_calculator=dict( + type=OksLoss, metainfo='configs/_base_/datasets/coco.py'))), + test_cfg=dict( + yolox_style=True, + multi_label=False, + score_thr=0.001, + max_per_img=300, + nms=dict(type=nms, iou_threshold=0.65))) + +# data related +img_scale = (640, 640) + +# pipelines +pre_transform = [ + dict(type=LoadImage, backend_args=backend_args), + dict(type=PoseToDetConverter) +] + +train_pipeline_stage1 = [ + *pre_transform, + dict( + type=Mosaic, + img_scale=img_scale, + pad_val=114.0, + pre_transform=pre_transform), + dict( + type=RandomAffine, + scaling_ratio_range=(0.75, 1.0), + border=(-img_scale[0] // 2, -img_scale[1] // 2)), + dict( + type=YOLOXMixUp, + img_scale=img_scale, + ratio_range=(0.8, 1.6), + pad_val=114.0, + pre_transform=pre_transform), + dict(type=YOLOXHSVRandomAug), + dict(type=RandomFlip, prob=0.5), + dict(type=FilterDetPoseAnnotations, keep_empty=False), + dict( + type=PackDetPoseInputs, + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape')) +] + +train_pipeline_stage2 = [ + *pre_transform, + dict(type=Resize, scale=img_scale, keep_ratio=True), + dict( + type=Pad, pad_to_square=True, pad_val=dict(img=(114.0, 114.0, 114.0))), + dict(type=YOLOXHSVRandomAug), + dict(type=RandomFlip, prob=0.5), + dict(type=FilterDetPoseAnnotations, keep_empty=False), + dict(type=PackDetPoseInputs) +] + +test_pipeline = [ + *pre_transform, + dict(type=Resize, scale=img_scale, keep_ratio=True), + dict( + type=Pad, pad_to_square=True, pad_val=dict(img=(114.0, 114.0, 114.0))), + dict( + type=PackDetPoseInputs, + meta_keys=('id', 'img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'flip_indices')) +] + +# dataset settings +dataset_type = CocoDataset +data_mode = 'bottomup' +data_root = 'data/coco/' + +train_dataloader = dict( + batch_size=32, + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(type=DefaultSampler, shuffle=True), + dataset=dict( + type=dataset_type, + data_mode=data_mode, + data_root=data_root, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=train_pipeline_stage1)) + +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + pin_memory=True, + drop_last=False, + sampler=dict(type=DefaultSampler, shuffle=False), + dataset=dict( + type=dataset_type, + data_mode=data_mode, + data_root=data_root, + ann_file='annotations/person_keypoints_val2017.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=test_pipeline)) + +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type=CocoMetric, + ann_file=data_root + 'annotations/person_keypoints_val2017.json', + score_mode='bbox') +test_evaluator = val_evaluator + +default_hooks.update( + dict(checkpoint=dict(save_best='coco/AP', rule='greater'))) + +# optimizer +base_lr = 0.004 +max_epochs = 300 +num_last_epochs = 20 +optim_wrapper = dict( + type=OptimWrapper, + optimizer=dict(type=AdamW, lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +param_scheduler = [ + dict( + # use quadratic formula to warm up 5 epochs + # and lr is updated by iteration + type=QuadraticWarmupLR, + by_epoch=True, + begin=0, + end=5, + convert_to_iter_based=True), + dict( + # use cosine lr from 5 to 285 epoch + type=CosineAnnealingLR, + eta_min=base_lr * 0.05, + begin=5, + T_max=max_epochs - num_last_epochs, + end=max_epochs - num_last_epochs, + by_epoch=True, + convert_to_iter_based=True), + dict( + # use fixed lr during last num_last_epochs epochs + type=ConstantLR, + by_epoch=True, + factor=1, + begin=max_epochs - num_last_epochs, + end=max_epochs, + ) +] + +# runtime +custom_hooks = [ + dict( + type=YOLOXModeSwitchHook, + num_last_epochs=num_last_epochs, + new_train_pipeline=train_pipeline_stage2, + priority=48), + dict(type=SyncNormHook, priority=48), + dict( + type=EMAHook, + ema_type=ExpMomentumEMA, + momentum=0.0002, + update_buffers=True, + strict_load=False, + priority=49) +] + +train_cfg = dict( + type=EpochBasedTrainLoop, + max_epochs=max_epochs, + val_interval=10, + dynamic_intervals=[(max_epochs - num_last_epochs, 1)]) + +auto_scale_lr = dict(base_batch_size=256) diff --git a/projects/yolox_pose/configs/yolox-pose_l_4xb64-300e_coco.py b/projects/yolox_pose/configs/yolox-pose_l_4xb64-300e_coco.py index ec31acd238..70c7fd938f 100644 --- a/projects/yolox_pose/configs/yolox-pose_l_4xb64-300e_coco.py +++ b/projects/yolox_pose/configs/yolox-pose_l_4xb64-300e_coco.py @@ -1,18 +1,18 @@ -_base_ = ['./yolox-pose_s_8xb32-300e_coco.py'] - -# model settings -model = dict( - init_cfg=dict(checkpoint='https://download.openmmlab.com/mmyolo/v0/yolox/' - 'yolox_l_fast_8xb8-300e_coco/yolox_l_fast_8xb8-300e_' - 'coco_20230213_160715-c731eb1c.pth'), - backbone=dict( - deepen_factor=1.0, - widen_factor=1.0, - ), - neck=dict( - deepen_factor=1.0, - widen_factor=1.0, - ), - bbox_head=dict(head_module=dict(widen_factor=1.0))) - -train_dataloader = dict(batch_size=64) +_base_ = ['./yolox-pose_s_8xb32-300e_coco.py'] + +# model settings +model = dict( + init_cfg=dict(checkpoint='https://download.openmmlab.com/mmyolo/v0/yolox/' + 'yolox_l_fast_8xb8-300e_coco/yolox_l_fast_8xb8-300e_' + 'coco_20230213_160715-c731eb1c.pth'), + backbone=dict( + deepen_factor=1.0, + widen_factor=1.0, + ), + neck=dict( + deepen_factor=1.0, + widen_factor=1.0, + ), + bbox_head=dict(head_module=dict(widen_factor=1.0))) + +train_dataloader = dict(batch_size=64) diff --git a/projects/yolox_pose/configs/yolox-pose_m_4xb64-300e_coco.py b/projects/yolox_pose/configs/yolox-pose_m_4xb64-300e_coco.py index 9088e6f145..e32e87b45f 100644 --- a/projects/yolox_pose/configs/yolox-pose_m_4xb64-300e_coco.py +++ b/projects/yolox_pose/configs/yolox-pose_m_4xb64-300e_coco.py @@ -1,18 +1,18 @@ -_base_ = ['./yolox-pose_s_8xb32-300e_coco.py'] - -# model settings -model = dict( - init_cfg=dict(checkpoint='https://download.openmmlab.com/mmyolo/v0/yolox/' - 'yolox_m_fast_8xb32-300e-rtmdet-hyp_coco/yolox_m_fast_8xb32' - '-300e-rtmdet-hyp_coco_20230210_144328-e657e182.pth'), - backbone=dict( - deepen_factor=0.67, - widen_factor=0.75, - ), - neck=dict( - deepen_factor=0.67, - widen_factor=0.75, - ), - bbox_head=dict(head_module=dict(widen_factor=0.75))) - -train_dataloader = dict(batch_size=64) +_base_ = ['./yolox-pose_s_8xb32-300e_coco.py'] + +# model settings +model = dict( + init_cfg=dict(checkpoint='https://download.openmmlab.com/mmyolo/v0/yolox/' + 'yolox_m_fast_8xb32-300e-rtmdet-hyp_coco/yolox_m_fast_8xb32' + '-300e-rtmdet-hyp_coco_20230210_144328-e657e182.pth'), + backbone=dict( + deepen_factor=0.67, + widen_factor=0.75, + ), + neck=dict( + deepen_factor=0.67, + widen_factor=0.75, + ), + bbox_head=dict(head_module=dict(widen_factor=0.75))) + +train_dataloader = dict(batch_size=64) diff --git a/projects/yolox_pose/configs/yolox-pose_s_8xb32-300e_coco.py b/projects/yolox_pose/configs/yolox-pose_s_8xb32-300e_coco.py index 1854e51e1d..0f5fa471e8 100644 --- a/projects/yolox_pose/configs/yolox-pose_s_8xb32-300e_coco.py +++ b/projects/yolox_pose/configs/yolox-pose_s_8xb32-300e_coco.py @@ -1,258 +1,258 @@ -_base_ = ['_base_/default_runtime.py'] - -# model settings -model = dict( - type='YOLODetector', - use_syncbn=False, - init_cfg=dict( - type='Pretrained', - checkpoint='https://download.openmmlab.com/mmyolo/v0/yolox/' - 'yolox_s_fast_8xb32-300e-rtmdet-hyp_coco/yolox_s_fast_' - '8xb32-300e-rtmdet-hyp_coco_20230210_134645-3a8dfbd7.pth'), - data_preprocessor=dict( - type='mmdet.DetDataPreprocessor', - pad_size_divisor=32, - batch_augments=[ - dict( - type='PoseBatchSyncRandomResize', - random_size_range=(480, 800), - size_divisor=32, - interval=1) - ]), - backbone=dict( - type='YOLOXCSPDarknet', - deepen_factor=0.33, - widen_factor=0.5, - out_indices=(2, 3, 4), - spp_kernal_sizes=(5, 9, 13), - norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), - act_cfg=dict(type='SiLU', inplace=True), - ), - neck=dict( - type='YOLOXPAFPN', - deepen_factor=0.33, - widen_factor=0.5, - in_channels=[256, 512, 1024], - out_channels=256, - norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), - act_cfg=dict(type='SiLU', inplace=True)), - bbox_head=dict( - type='YOLOXPoseHead', - head_module=dict( - type='YOLOXPoseHeadModule', - num_classes=1, - in_channels=256, - feat_channels=256, - widen_factor=0.5, - stacked_convs=2, - num_keypoints=17, - featmap_strides=(8, 16, 32), - use_depthwise=False, - norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), - act_cfg=dict(type='SiLU', inplace=True), - ), - loss_cls=dict( - type='mmdet.CrossEntropyLoss', - use_sigmoid=True, - reduction='sum', - loss_weight=1.0), - loss_bbox=dict( - type='mmdet.IoULoss', - mode='square', - eps=1e-16, - reduction='sum', - loss_weight=5.0), - loss_obj=dict( - type='mmdet.CrossEntropyLoss', - use_sigmoid=True, - reduction='sum', - loss_weight=1.0), - loss_pose=dict( - type='OksLoss', - metainfo='configs/_base_/datasets/coco.py', - loss_weight=30.0), - loss_bbox_aux=dict( - type='mmdet.L1Loss', reduction='sum', loss_weight=1.0)), - train_cfg=dict( - assigner=dict( - type='PoseSimOTAAssigner', - center_radius=2.5, - iou_calculator=dict(type='mmdet.BboxOverlaps2D'), - oks_calculator=dict( - type='OksLoss', metainfo='configs/_base_/datasets/coco.py'))), - test_cfg=dict( - yolox_style=True, - multi_label=False, - score_thr=0.001, - max_per_img=300, - nms=dict(type='nms', iou_threshold=0.65))) - -# data related -img_scale = (640, 640) - -# pipelines -pre_transform = [ - dict(type='mmpose.LoadImage', backend_args=_base_.backend_args), - dict(type='PoseToDetConverter') -] - -train_pipeline_stage1 = [ - *pre_transform, - dict( - type='Mosaic', - img_scale=img_scale, - pad_val=114.0, - pre_transform=pre_transform), - dict( - type='mmdet.RandomAffine', - scaling_ratio_range=(0.75, 1.0), - border=(-img_scale[0] // 2, -img_scale[1] // 2)), - dict( - type='YOLOXMixUp', - img_scale=img_scale, - ratio_range=(0.8, 1.6), - pad_val=114.0, - pre_transform=pre_transform), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict(type='mmdet.RandomFlip', prob=0.5), - dict(type='FilterDetPoseAnnotations', keep_empty=False), - dict( - type='PackDetPoseInputs', - meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape')) -] - -train_pipeline_stage2 = [ - *pre_transform, - dict(type='mmdet.Resize', scale=img_scale, keep_ratio=True), - dict( - type='mmdet.Pad', - pad_to_square=True, - pad_val=dict(img=(114.0, 114.0, 114.0))), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict(type='mmdet.RandomFlip', prob=0.5), - dict(type='FilterDetPoseAnnotations', keep_empty=False), - dict(type='PackDetPoseInputs') -] - -test_pipeline = [ - *pre_transform, - dict(type='mmdet.Resize', scale=img_scale, keep_ratio=True), - dict( - type='mmdet.Pad', - pad_to_square=True, - pad_val=dict(img=(114.0, 114.0, 114.0))), - dict( - type='PackDetPoseInputs', - meta_keys=('id', 'img_id', 'img_path', 'ori_shape', 'img_shape', - 'scale_factor', 'flip_indices')) -] - -# dataset settings -dataset_type = 'CocoDataset' -data_mode = 'bottomup' -data_root = 'data/coco/' - -train_dataloader = dict( - batch_size=32, - num_workers=8, - persistent_workers=True, - pin_memory=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_mode=data_mode, - data_root=data_root, - ann_file='annotations/person_keypoints_train2017.json', - data_prefix=dict(img='train2017/'), - filter_cfg=dict(filter_empty_gt=False, min_size=32), - pipeline=train_pipeline_stage1)) - -val_dataloader = dict( - batch_size=1, - num_workers=2, - persistent_workers=True, - pin_memory=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False), - dataset=dict( - type=dataset_type, - data_mode=data_mode, - data_root=data_root, - ann_file='annotations/person_keypoints_val2017.json', - data_prefix=dict(img='val2017/'), - test_mode=True, - pipeline=test_pipeline)) - -test_dataloader = val_dataloader - -# evaluators -val_evaluator = dict( - type='mmpose.CocoMetric', - ann_file=data_root + 'annotations/person_keypoints_val2017.json', - score_mode='bbox') -test_evaluator = val_evaluator - -default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) - -# optimizer -base_lr = 0.004 -max_epochs = 300 -num_last_epochs = 20 -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), - paramwise_cfg=dict( - norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) - -param_scheduler = [ - dict( - # use quadratic formula to warm up 5 epochs - # and lr is updated by iteration - type='mmdet.QuadraticWarmupLR', - by_epoch=True, - begin=0, - end=5, - convert_to_iter_based=True), - dict( - # use cosine lr from 5 to 285 epoch - type='CosineAnnealingLR', - eta_min=base_lr * 0.05, - begin=5, - T_max=max_epochs - num_last_epochs, - end=max_epochs - num_last_epochs, - by_epoch=True, - convert_to_iter_based=True), - dict( - # use fixed lr during last num_last_epochs epochs - type='ConstantLR', - by_epoch=True, - factor=1, - begin=max_epochs - num_last_epochs, - end=max_epochs, - ) -] - -# runtime -custom_hooks = [ - dict( - type='YOLOXModeSwitchHook', - num_last_epochs=num_last_epochs, - new_train_pipeline=train_pipeline_stage2, - priority=48), - dict(type='mmdet.SyncNormHook', priority=48), - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0002, - update_buffers=True, - strict_load=False, - priority=49) -] - -train_cfg = dict( - type='EpochBasedTrainLoop', - max_epochs=max_epochs, - val_interval=10, - dynamic_intervals=[(max_epochs - num_last_epochs, 1)]) - -auto_scale_lr = dict(base_batch_size=256) +_base_ = ['_base_/default_runtime.py'] + +# model settings +model = dict( + type='YOLODetector', + use_syncbn=False, + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmyolo/v0/yolox/' + 'yolox_s_fast_8xb32-300e-rtmdet-hyp_coco/yolox_s_fast_' + '8xb32-300e-rtmdet-hyp_coco_20230210_134645-3a8dfbd7.pth'), + data_preprocessor=dict( + type='mmdet.DetDataPreprocessor', + pad_size_divisor=32, + batch_augments=[ + dict( + type='PoseBatchSyncRandomResize', + random_size_range=(480, 800), + size_divisor=32, + interval=1) + ]), + backbone=dict( + type='YOLOXCSPDarknet', + deepen_factor=0.33, + widen_factor=0.5, + out_indices=(2, 3, 4), + spp_kernal_sizes=(5, 9, 13), + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), + act_cfg=dict(type='SiLU', inplace=True), + ), + neck=dict( + type='YOLOXPAFPN', + deepen_factor=0.33, + widen_factor=0.5, + in_channels=[256, 512, 1024], + out_channels=256, + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), + act_cfg=dict(type='SiLU', inplace=True)), + bbox_head=dict( + type='YOLOXPoseHead', + head_module=dict( + type='YOLOXPoseHeadModule', + num_classes=1, + in_channels=256, + feat_channels=256, + widen_factor=0.5, + stacked_convs=2, + num_keypoints=17, + featmap_strides=(8, 16, 32), + use_depthwise=False, + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), + act_cfg=dict(type='SiLU', inplace=True), + ), + loss_cls=dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=True, + reduction='sum', + loss_weight=1.0), + loss_bbox=dict( + type='mmdet.IoULoss', + mode='square', + eps=1e-16, + reduction='sum', + loss_weight=5.0), + loss_obj=dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=True, + reduction='sum', + loss_weight=1.0), + loss_pose=dict( + type='OksLoss', + metainfo='configs/_base_/datasets/coco.py', + loss_weight=30.0), + loss_bbox_aux=dict( + type='mmdet.L1Loss', reduction='sum', loss_weight=1.0)), + train_cfg=dict( + assigner=dict( + type='PoseSimOTAAssigner', + center_radius=2.5, + iou_calculator=dict(type='mmdet.BboxOverlaps2D'), + oks_calculator=dict( + type='OksLoss', metainfo='configs/_base_/datasets/coco.py'))), + test_cfg=dict( + yolox_style=True, + multi_label=False, + score_thr=0.001, + max_per_img=300, + nms=dict(type='nms', iou_threshold=0.65))) + +# data related +img_scale = (640, 640) + +# pipelines +pre_transform = [ + dict(type='mmpose.LoadImage', backend_args=_base_.backend_args), + dict(type='PoseToDetConverter') +] + +train_pipeline_stage1 = [ + *pre_transform, + dict( + type='Mosaic', + img_scale=img_scale, + pad_val=114.0, + pre_transform=pre_transform), + dict( + type='mmdet.RandomAffine', + scaling_ratio_range=(0.75, 1.0), + border=(-img_scale[0] // 2, -img_scale[1] // 2)), + dict( + type='YOLOXMixUp', + img_scale=img_scale, + ratio_range=(0.8, 1.6), + pad_val=114.0, + pre_transform=pre_transform), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict(type='mmdet.RandomFlip', prob=0.5), + dict(type='FilterDetPoseAnnotations', keep_empty=False), + dict( + type='PackDetPoseInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape')) +] + +train_pipeline_stage2 = [ + *pre_transform, + dict(type='mmdet.Resize', scale=img_scale, keep_ratio=True), + dict( + type='mmdet.Pad', + pad_to_square=True, + pad_val=dict(img=(114.0, 114.0, 114.0))), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict(type='mmdet.RandomFlip', prob=0.5), + dict(type='FilterDetPoseAnnotations', keep_empty=False), + dict(type='PackDetPoseInputs') +] + +test_pipeline = [ + *pre_transform, + dict(type='mmdet.Resize', scale=img_scale, keep_ratio=True), + dict( + type='mmdet.Pad', + pad_to_square=True, + pad_val=dict(img=(114.0, 114.0, 114.0))), + dict( + type='PackDetPoseInputs', + meta_keys=('id', 'img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'flip_indices')) +] + +# dataset settings +dataset_type = 'CocoDataset' +data_mode = 'bottomup' +data_root = 'data/coco/' + +train_dataloader = dict( + batch_size=32, + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_mode=data_mode, + data_root=data_root, + ann_file='annotations/person_keypoints_train2017.json', + data_prefix=dict(img='train2017/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=train_pipeline_stage1)) + +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + pin_memory=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_mode=data_mode, + data_root=data_root, + ann_file='annotations/person_keypoints_val2017.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=test_pipeline)) + +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + type='mmpose.CocoMetric', + ann_file=data_root + 'annotations/person_keypoints_val2017.json', + score_mode='bbox') +test_evaluator = val_evaluator + +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +# optimizer +base_lr = 0.004 +max_epochs = 300 +num_last_epochs = 20 +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +param_scheduler = [ + dict( + # use quadratic formula to warm up 5 epochs + # and lr is updated by iteration + type='mmdet.QuadraticWarmupLR', + by_epoch=True, + begin=0, + end=5, + convert_to_iter_based=True), + dict( + # use cosine lr from 5 to 285 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=5, + T_max=max_epochs - num_last_epochs, + end=max_epochs - num_last_epochs, + by_epoch=True, + convert_to_iter_based=True), + dict( + # use fixed lr during last num_last_epochs epochs + type='ConstantLR', + by_epoch=True, + factor=1, + begin=max_epochs - num_last_epochs, + end=max_epochs, + ) +] + +# runtime +custom_hooks = [ + dict( + type='YOLOXModeSwitchHook', + num_last_epochs=num_last_epochs, + new_train_pipeline=train_pipeline_stage2, + priority=48), + dict(type='mmdet.SyncNormHook', priority=48), + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + strict_load=False, + priority=49) +] + +train_cfg = dict( + type='EpochBasedTrainLoop', + max_epochs=max_epochs, + val_interval=10, + dynamic_intervals=[(max_epochs - num_last_epochs, 1)]) + +auto_scale_lr = dict(base_batch_size=256) diff --git a/projects/yolox_pose/configs/yolox-pose_tiny_4xb64-300e_coco.py b/projects/yolox_pose/configs/yolox-pose_tiny_4xb64-300e_coco.py index 3794368d5c..4f026c3e7b 100644 --- a/projects/yolox_pose/configs/yolox-pose_tiny_4xb64-300e_coco.py +++ b/projects/yolox_pose/configs/yolox-pose_tiny_4xb64-300e_coco.py @@ -1,67 +1,67 @@ -_base_ = ['./yolox-pose_s_8xb32-300e_coco.py'] - -# model settings -model = dict( - init_cfg=dict(checkpoint='https://download.openmmlab.com/mmyolo/v0/yolox/' - 'yolox_tiny_fast_8xb32-300e-rtmdet-hyp_coco/yolox_tiny_fast_' - '8xb32-300e-rtmdet-hyp_coco_20230210_143637-4c338102.pth'), - data_preprocessor=dict(batch_augments=[ - dict( - type='PoseBatchSyncRandomResize', - random_size_range=(320, 640), - size_divisor=32, - interval=1) - ]), - backbone=dict( - deepen_factor=0.33, - widen_factor=0.375, - ), - neck=dict( - deepen_factor=0.33, - widen_factor=0.375, - ), - bbox_head=dict(head_module=dict(widen_factor=0.375))) - -# data settings -img_scale = _base_.img_scale -pre_transform = _base_.pre_transform - -train_pipeline_stage1 = [ - *pre_transform, - dict( - type='Mosaic', - img_scale=(img_scale), - pad_val=114.0, - pre_transform=pre_transform), - dict( - type='mmdet.RandomAffine', - scaling_ratio_range=(0.75, 1.0), - border=(-img_scale[0] // 2, -img_scale[1] // 2)), - dict(type='mmdet.YOLOXHSVRandomAug'), - dict(type='mmdet.RandomFlip', prob=0.5), - dict( - type='FilterDetPoseAnnotations', - min_gt_bbox_wh=(1, 1), - keep_empty=False), - dict( - type='PackDetPoseInputs', - meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape')) -] - -test_pipeline = [ - *pre_transform, - dict(type='mmdet.Resize', scale=(416, 416), keep_ratio=True), - dict( - type='mmdet.Pad', - pad_to_square=True, - pad_val=dict(img=(114.0, 114.0, 114.0))), - dict( - type='PackDetPoseInputs', - meta_keys=('id', 'img_id', 'img_path', 'ori_shape', 'img_shape', - 'scale_factor', 'flip_indices')) -] - -train_dataloader = dict( - batch_size=64, dataset=dict(pipeline=train_pipeline_stage1)) -val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) -test_dataloader = val_dataloader +_base_ = ['./yolox-pose_s_8xb32-300e_coco.py'] + +# model settings +model = dict( + init_cfg=dict(checkpoint='https://download.openmmlab.com/mmyolo/v0/yolox/' + 'yolox_tiny_fast_8xb32-300e-rtmdet-hyp_coco/yolox_tiny_fast_' + '8xb32-300e-rtmdet-hyp_coco_20230210_143637-4c338102.pth'), + data_preprocessor=dict(batch_augments=[ + dict( + type='PoseBatchSyncRandomResize', + random_size_range=(320, 640), + size_divisor=32, + interval=1) + ]), + backbone=dict( + deepen_factor=0.33, + widen_factor=0.375, + ), + neck=dict( + deepen_factor=0.33, + widen_factor=0.375, + ), + bbox_head=dict(head_module=dict(widen_factor=0.375))) + +# data settings +img_scale = _base_.img_scale +pre_transform = _base_.pre_transform + +train_pipeline_stage1 = [ + *pre_transform, + dict( + type='Mosaic', + img_scale=(img_scale), + pad_val=114.0, + pre_transform=pre_transform), + dict( + type='mmdet.RandomAffine', + scaling_ratio_range=(0.75, 1.0), + border=(-img_scale[0] // 2, -img_scale[1] // 2)), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict(type='mmdet.RandomFlip', prob=0.5), + dict( + type='FilterDetPoseAnnotations', + min_gt_bbox_wh=(1, 1), + keep_empty=False), + dict( + type='PackDetPoseInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape')) +] + +test_pipeline = [ + *pre_transform, + dict(type='mmdet.Resize', scale=(416, 416), keep_ratio=True), + dict( + type='mmdet.Pad', + pad_to_square=True, + pad_val=dict(img=(114.0, 114.0, 114.0))), + dict( + type='PackDetPoseInputs', + meta_keys=('id', 'img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'flip_indices')) +] + +train_dataloader = dict( + batch_size=64, dataset=dict(pipeline=train_pipeline_stage1)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader diff --git a/projects/yolox_pose/datasets/__init__.py b/projects/yolox_pose/datasets/__init__.py index 69bae9de53..51bba2c990 100644 --- a/projects/yolox_pose/datasets/__init__.py +++ b/projects/yolox_pose/datasets/__init__.py @@ -1,3 +1,3 @@ -from .bbox_keypoint_structure import * # noqa -from .coco_dataset import * # noqa -from .transforms import * # noqa +from .bbox_keypoint_structure import * # noqa +from .coco_dataset import * # noqa +from .transforms import * # noqa diff --git a/projects/yolox_pose/datasets/bbox_keypoint_structure.py b/projects/yolox_pose/datasets/bbox_keypoint_structure.py index 6b385f2f09..cfa6fe1dd2 100644 --- a/projects/yolox_pose/datasets/bbox_keypoint_structure.py +++ b/projects/yolox_pose/datasets/bbox_keypoint_structure.py @@ -1,285 +1,285 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from copy import deepcopy -from typing import List, Optional, Sequence, Tuple, Type, TypeVar, Union - -import numpy as np -import torch -from mmdet.structures.bbox import HorizontalBoxes -from torch import Tensor - -DeviceType = Union[str, torch.device] -T = TypeVar('T') -IndexType = Union[slice, int, list, torch.LongTensor, torch.cuda.LongTensor, - torch.BoolTensor, torch.cuda.BoolTensor, np.ndarray] - - -class BBoxKeypoints(HorizontalBoxes): - """The BBoxKeypoints class is a combination of bounding boxes and keypoints - representation. The box format used in BBoxKeypoints is the same as - HorizontalBoxes. - - Args: - data (Tensor or np.ndarray): The box data with shape of - (N, 4). - keypoints (Tensor or np.ndarray): The keypoint data with shape of - (N, K, 2). - keypoints_visible (Tensor or np.ndarray): The visibility of keypoints - with shape of (N, K). - dtype (torch.dtype, Optional): data type of boxes. Defaults to None. - device (str or torch.device, Optional): device of boxes. - Default to None. - clone (bool): Whether clone ``boxes`` or not. Defaults to True. - mode (str, Optional): the mode of boxes. If it is 'cxcywh', the - `data` will be converted to 'xyxy' mode. Defaults to None. - flip_indices (list, Optional): The indices of keypoints when the - images is flipped. Defaults to None. - - Notes: - N: the number of instances. - K: the number of keypoints. - """ - - def __init__(self, - data: Union[Tensor, np.ndarray], - keypoints: Union[Tensor, np.ndarray], - keypoints_visible: Union[Tensor, np.ndarray], - dtype: Optional[torch.dtype] = None, - device: Optional[DeviceType] = None, - clone: bool = True, - in_mode: Optional[str] = None, - flip_indices: Optional[List] = None) -> None: - - super().__init__( - data=data, - dtype=dtype, - device=device, - clone=clone, - in_mode=in_mode) - - assert len(data) == len(keypoints) - assert len(data) == len(keypoints_visible) - - assert keypoints.ndim == 3 - assert keypoints_visible.ndim == 2 - - keypoints = torch.as_tensor(keypoints) - keypoints_visible = torch.as_tensor(keypoints_visible) - - if device is not None: - keypoints = keypoints.to(device=device) - keypoints_visible = keypoints_visible.to(device=device) - - if clone: - keypoints = keypoints.clone() - keypoints_visible = keypoints_visible.clone() - - self.keypoints = keypoints - self.keypoints_visible = keypoints_visible - self.flip_indices = flip_indices - - def flip_(self, - img_shape: Tuple[int, int], - direction: str = 'horizontal') -> None: - """Flip boxes & kpts horizontally in-place. - - Args: - img_shape (Tuple[int, int]): A tuple of image height and width. - direction (str): Flip direction, options are "horizontal", - "vertical" and "diagonal". Defaults to "horizontal" - """ - assert direction == 'horizontal' - super().flip_(img_shape, direction) - self.keypoints[..., 0] = img_shape[1] - self.keypoints[..., 0] - self.keypoints = self.keypoints[:, self.flip_indices] - self.keypoints_visible = self.keypoints_visible[:, self.flip_indices] - - def translate_(self, distances: Tuple[float, float]) -> None: - """Translate boxes and keypoints in-place. - - Args: - distances (Tuple[float, float]): translate distances. The first - is horizontal distance and the second is vertical distance. - """ - boxes = self.tensor - assert len(distances) == 2 - self.tensor = boxes + boxes.new_tensor(distances).repeat(2) - distances = self.keypoints.new_tensor(distances).reshape(1, 1, 2) - self.keypoints = self.keypoints + distances - - def rescale_(self, scale_factor: Tuple[float, float]) -> None: - """Rescale boxes & keypoints w.r.t. rescale_factor in-place. - - Note: - Both ``rescale_`` and ``resize_`` will enlarge or shrink boxes - w.r.t ``scale_facotr``. The difference is that ``resize_`` only - changes the width and the height of boxes, but ``rescale_`` also - rescales the box centers simultaneously. - - Args: - scale_factor (Tuple[float, float]): factors for scaling boxes. - The length should be 2. - """ - boxes = self.tensor - assert len(scale_factor) == 2 - - self.tensor = boxes * boxes.new_tensor(scale_factor).repeat(2) - scale_factor = self.keypoints.new_tensor(scale_factor).reshape(1, 1, 2) - self.keypoints = self.keypoints * scale_factor - - def clip_(self, img_shape: Tuple[int, int]) -> None: - """Clip bounding boxes and set invisible keypoints outside the image - boundary in-place. - - Args: - img_shape (Tuple[int, int]): A tuple of image height and width. - """ - boxes = self.tensor - boxes[..., 0::2] = boxes[..., 0::2].clamp(0, img_shape[1]) - boxes[..., 1::2] = boxes[..., 1::2].clamp(0, img_shape[0]) - - kpt_outside = torch.logical_or( - torch.logical_or(self.keypoints[..., 0] < 0, - self.keypoints[..., 1] < 0), - torch.logical_or(self.keypoints[..., 0] > img_shape[1], - self.keypoints[..., 1] > img_shape[0])) - self.keypoints_visible[kpt_outside] *= 0 - - def project_(self, homography_matrix: Union[Tensor, np.ndarray]) -> None: - """Geometrically transform bounding boxes and keypoints in-place using - a homography matrix. - - Args: - homography_matrix (Tensor or np.ndarray): A 3x3 tensor or ndarray - representing the homography matrix for the transformation. - """ - boxes = self.tensor - if isinstance(homography_matrix, np.ndarray): - homography_matrix = boxes.new_tensor(homography_matrix) - - # Convert boxes to corners in homogeneous coordinates - corners = self.hbox2corner(boxes) - corners = torch.cat( - [corners, corners.new_ones(*corners.shape[:-1], 1)], dim=-1) - - # Convert keypoints to homogeneous coordinates - keypoints = torch.cat([ - self.keypoints, - self.keypoints.new_ones(*self.keypoints.shape[:-1], 1) - ], - dim=-1) - - # Transpose corners and keypoints for matrix multiplication - corners_T = torch.transpose(corners, -1, -2) - keypoints_T = torch.transpose(keypoints, -1, 0).contiguous().flatten(1) - - # Apply homography matrix to corners and keypoints - corners_T = torch.matmul(homography_matrix, corners_T) - keypoints_T = torch.matmul(homography_matrix, keypoints_T) - - # Transpose back to original shape - corners = torch.transpose(corners_T, -1, -2) - keypoints_T = keypoints_T.reshape(3, self.keypoints.shape[1], -1) - keypoints = torch.transpose(keypoints_T, -1, 0).contiguous() - - # Convert corners and keypoints back to non-homogeneous coordinates - corners = corners[..., :2] / corners[..., 2:3] - keypoints = keypoints[..., :2] / keypoints[..., 2:3] - - # Convert corners back to bounding boxes and update object attributes - self.tensor = self.corner2hbox(corners) - self.keypoints = keypoints - - @classmethod - def cat(cls: Type[T], box_list: Sequence[T], dim: int = 0) -> T: - """Cancatenates an instance list into one single instance. Similar to - ``torch.cat``. - - Args: - box_list (Sequence[T]): A sequence of instances. - dim (int): The dimension over which the box and keypoint are - concatenated. Defaults to 0. - - Returns: - T: Concatenated instance. - """ - assert isinstance(box_list, Sequence) - if len(box_list) == 0: - raise ValueError('box_list should not be a empty list.') - - assert dim == 0 - assert all(isinstance(boxes, cls) for boxes in box_list) - - th_box_list = torch.cat([boxes.tensor for boxes in box_list], dim=dim) - th_kpt_list = torch.cat([boxes.keypoints for boxes in box_list], - dim=dim) - th_kpt_vis_list = torch.cat( - [boxes.keypoints_visible for boxes in box_list], dim=dim) - flip_indices = box_list[0].flip_indices - return cls( - th_box_list, - th_kpt_list, - th_kpt_vis_list, - clone=False, - flip_indices=flip_indices) - - def __getitem__(self: T, index: IndexType) -> T: - """Rewrite getitem to protect the last dimension shape.""" - boxes = self.tensor - if isinstance(index, np.ndarray): - index = torch.as_tensor(index, device=self.device) - if isinstance(index, Tensor) and index.dtype == torch.bool: - assert index.dim() < boxes.dim() - elif isinstance(index, tuple): - assert len(index) < boxes.dim() - # `Ellipsis`(...) is commonly used in index like [None, ...]. - # When `Ellipsis` is in index, it must be the last item. - if Ellipsis in index: - assert index[-1] is Ellipsis - - boxes = boxes[index] - keypoints = self.keypoints[index] - keypoints_visible = self.keypoints_visible[index] - if boxes.dim() == 1: - boxes = boxes.reshape(1, -1) - keypoints = keypoints.reshape(1, -1, 2) - keypoints_visible = keypoints_visible.reshape(1, -1) - return type(self)( - boxes, - keypoints, - keypoints_visible, - flip_indices=self.flip_indices, - clone=False) - - @property - def num_keypoints(self) -> Tensor: - """Compute the number of visible keypoints for each object.""" - return self.keypoints_visible.sum(dim=1).int() - - def __deepcopy__(self, memo): - """Only clone the tensors when applying deepcopy.""" - cls = self.__class__ - other = cls.__new__(cls) - memo[id(self)] = other - other.tensor = self.tensor.clone() - other.keypoints = self.keypoints.clone() - other.keypoints_visible = self.keypoints_visible.clone() - other.flip_indices = deepcopy(self.flip_indices) - return other - - def clone(self: T) -> T: - """Reload ``clone`` for tensors.""" - return type(self)( - self.tensor, - self.keypoints, - self.keypoints_visible, - flip_indices=self.flip_indices, - clone=True) - - def to(self: T, *args, **kwargs) -> T: - """Reload ``to`` for tensors.""" - return type(self)( - self.tensor.to(*args, **kwargs), - self.keypoints.to(*args, **kwargs), - self.keypoints_visible.to(*args, **kwargs), - flip_indices=self.flip_indices, - clone=False) +# Copyright (c) OpenMMLab. All rights reserved. +from copy import deepcopy +from typing import List, Optional, Sequence, Tuple, Type, TypeVar, Union + +import numpy as np +import torch +from mmdet.structures.bbox import HorizontalBoxes +from torch import Tensor + +DeviceType = Union[str, torch.device] +T = TypeVar('T') +IndexType = Union[slice, int, list, torch.LongTensor, torch.cuda.LongTensor, + torch.BoolTensor, torch.cuda.BoolTensor, np.ndarray] + + +class BBoxKeypoints(HorizontalBoxes): + """The BBoxKeypoints class is a combination of bounding boxes and keypoints + representation. The box format used in BBoxKeypoints is the same as + HorizontalBoxes. + + Args: + data (Tensor or np.ndarray): The box data with shape of + (N, 4). + keypoints (Tensor or np.ndarray): The keypoint data with shape of + (N, K, 2). + keypoints_visible (Tensor or np.ndarray): The visibility of keypoints + with shape of (N, K). + dtype (torch.dtype, Optional): data type of boxes. Defaults to None. + device (str or torch.device, Optional): device of boxes. + Default to None. + clone (bool): Whether clone ``boxes`` or not. Defaults to True. + mode (str, Optional): the mode of boxes. If it is 'cxcywh', the + `data` will be converted to 'xyxy' mode. Defaults to None. + flip_indices (list, Optional): The indices of keypoints when the + images is flipped. Defaults to None. + + Notes: + N: the number of instances. + K: the number of keypoints. + """ + + def __init__(self, + data: Union[Tensor, np.ndarray], + keypoints: Union[Tensor, np.ndarray], + keypoints_visible: Union[Tensor, np.ndarray], + dtype: Optional[torch.dtype] = None, + device: Optional[DeviceType] = None, + clone: bool = True, + in_mode: Optional[str] = None, + flip_indices: Optional[List] = None) -> None: + + super().__init__( + data=data, + dtype=dtype, + device=device, + clone=clone, + in_mode=in_mode) + + assert len(data) == len(keypoints) + assert len(data) == len(keypoints_visible) + + assert keypoints.ndim == 3 + assert keypoints_visible.ndim == 2 + + keypoints = torch.as_tensor(keypoints) + keypoints_visible = torch.as_tensor(keypoints_visible) + + if device is not None: + keypoints = keypoints.to(device=device) + keypoints_visible = keypoints_visible.to(device=device) + + if clone: + keypoints = keypoints.clone() + keypoints_visible = keypoints_visible.clone() + + self.keypoints = keypoints + self.keypoints_visible = keypoints_visible + self.flip_indices = flip_indices + + def flip_(self, + img_shape: Tuple[int, int], + direction: str = 'horizontal') -> None: + """Flip boxes & kpts horizontally in-place. + + Args: + img_shape (Tuple[int, int]): A tuple of image height and width. + direction (str): Flip direction, options are "horizontal", + "vertical" and "diagonal". Defaults to "horizontal" + """ + assert direction == 'horizontal' + super().flip_(img_shape, direction) + self.keypoints[..., 0] = img_shape[1] - self.keypoints[..., 0] + self.keypoints = self.keypoints[:, self.flip_indices] + self.keypoints_visible = self.keypoints_visible[:, self.flip_indices] + + def translate_(self, distances: Tuple[float, float]) -> None: + """Translate boxes and keypoints in-place. + + Args: + distances (Tuple[float, float]): translate distances. The first + is horizontal distance and the second is vertical distance. + """ + boxes = self.tensor + assert len(distances) == 2 + self.tensor = boxes + boxes.new_tensor(distances).repeat(2) + distances = self.keypoints.new_tensor(distances).reshape(1, 1, 2) + self.keypoints = self.keypoints + distances + + def rescale_(self, scale_factor: Tuple[float, float]) -> None: + """Rescale boxes & keypoints w.r.t. rescale_factor in-place. + + Note: + Both ``rescale_`` and ``resize_`` will enlarge or shrink boxes + w.r.t ``scale_facotr``. The difference is that ``resize_`` only + changes the width and the height of boxes, but ``rescale_`` also + rescales the box centers simultaneously. + + Args: + scale_factor (Tuple[float, float]): factors for scaling boxes. + The length should be 2. + """ + boxes = self.tensor + assert len(scale_factor) == 2 + + self.tensor = boxes * boxes.new_tensor(scale_factor).repeat(2) + scale_factor = self.keypoints.new_tensor(scale_factor).reshape(1, 1, 2) + self.keypoints = self.keypoints * scale_factor + + def clip_(self, img_shape: Tuple[int, int]) -> None: + """Clip bounding boxes and set invisible keypoints outside the image + boundary in-place. + + Args: + img_shape (Tuple[int, int]): A tuple of image height and width. + """ + boxes = self.tensor + boxes[..., 0::2] = boxes[..., 0::2].clamp(0, img_shape[1]) + boxes[..., 1::2] = boxes[..., 1::2].clamp(0, img_shape[0]) + + kpt_outside = torch.logical_or( + torch.logical_or(self.keypoints[..., 0] < 0, + self.keypoints[..., 1] < 0), + torch.logical_or(self.keypoints[..., 0] > img_shape[1], + self.keypoints[..., 1] > img_shape[0])) + self.keypoints_visible[kpt_outside] *= 0 + + def project_(self, homography_matrix: Union[Tensor, np.ndarray]) -> None: + """Geometrically transform bounding boxes and keypoints in-place using + a homography matrix. + + Args: + homography_matrix (Tensor or np.ndarray): A 3x3 tensor or ndarray + representing the homography matrix for the transformation. + """ + boxes = self.tensor + if isinstance(homography_matrix, np.ndarray): + homography_matrix = boxes.new_tensor(homography_matrix) + + # Convert boxes to corners in homogeneous coordinates + corners = self.hbox2corner(boxes) + corners = torch.cat( + [corners, corners.new_ones(*corners.shape[:-1], 1)], dim=-1) + + # Convert keypoints to homogeneous coordinates + keypoints = torch.cat([ + self.keypoints, + self.keypoints.new_ones(*self.keypoints.shape[:-1], 1) + ], + dim=-1) + + # Transpose corners and keypoints for matrix multiplication + corners_T = torch.transpose(corners, -1, -2) + keypoints_T = torch.transpose(keypoints, -1, 0).contiguous().flatten(1) + + # Apply homography matrix to corners and keypoints + corners_T = torch.matmul(homography_matrix, corners_T) + keypoints_T = torch.matmul(homography_matrix, keypoints_T) + + # Transpose back to original shape + corners = torch.transpose(corners_T, -1, -2) + keypoints_T = keypoints_T.reshape(3, self.keypoints.shape[1], -1) + keypoints = torch.transpose(keypoints_T, -1, 0).contiguous() + + # Convert corners and keypoints back to non-homogeneous coordinates + corners = corners[..., :2] / corners[..., 2:3] + keypoints = keypoints[..., :2] / keypoints[..., 2:3] + + # Convert corners back to bounding boxes and update object attributes + self.tensor = self.corner2hbox(corners) + self.keypoints = keypoints + + @classmethod + def cat(cls: Type[T], box_list: Sequence[T], dim: int = 0) -> T: + """Cancatenates an instance list into one single instance. Similar to + ``torch.cat``. + + Args: + box_list (Sequence[T]): A sequence of instances. + dim (int): The dimension over which the box and keypoint are + concatenated. Defaults to 0. + + Returns: + T: Concatenated instance. + """ + assert isinstance(box_list, Sequence) + if len(box_list) == 0: + raise ValueError('box_list should not be a empty list.') + + assert dim == 0 + assert all(isinstance(boxes, cls) for boxes in box_list) + + th_box_list = torch.cat([boxes.tensor for boxes in box_list], dim=dim) + th_kpt_list = torch.cat([boxes.keypoints for boxes in box_list], + dim=dim) + th_kpt_vis_list = torch.cat( + [boxes.keypoints_visible for boxes in box_list], dim=dim) + flip_indices = box_list[0].flip_indices + return cls( + th_box_list, + th_kpt_list, + th_kpt_vis_list, + clone=False, + flip_indices=flip_indices) + + def __getitem__(self: T, index: IndexType) -> T: + """Rewrite getitem to protect the last dimension shape.""" + boxes = self.tensor + if isinstance(index, np.ndarray): + index = torch.as_tensor(index, device=self.device) + if isinstance(index, Tensor) and index.dtype == torch.bool: + assert index.dim() < boxes.dim() + elif isinstance(index, tuple): + assert len(index) < boxes.dim() + # `Ellipsis`(...) is commonly used in index like [None, ...]. + # When `Ellipsis` is in index, it must be the last item. + if Ellipsis in index: + assert index[-1] is Ellipsis + + boxes = boxes[index] + keypoints = self.keypoints[index] + keypoints_visible = self.keypoints_visible[index] + if boxes.dim() == 1: + boxes = boxes.reshape(1, -1) + keypoints = keypoints.reshape(1, -1, 2) + keypoints_visible = keypoints_visible.reshape(1, -1) + return type(self)( + boxes, + keypoints, + keypoints_visible, + flip_indices=self.flip_indices, + clone=False) + + @property + def num_keypoints(self) -> Tensor: + """Compute the number of visible keypoints for each object.""" + return self.keypoints_visible.sum(dim=1).int() + + def __deepcopy__(self, memo): + """Only clone the tensors when applying deepcopy.""" + cls = self.__class__ + other = cls.__new__(cls) + memo[id(self)] = other + other.tensor = self.tensor.clone() + other.keypoints = self.keypoints.clone() + other.keypoints_visible = self.keypoints_visible.clone() + other.flip_indices = deepcopy(self.flip_indices) + return other + + def clone(self: T) -> T: + """Reload ``clone`` for tensors.""" + return type(self)( + self.tensor, + self.keypoints, + self.keypoints_visible, + flip_indices=self.flip_indices, + clone=True) + + def to(self: T, *args, **kwargs) -> T: + """Reload ``to`` for tensors.""" + return type(self)( + self.tensor.to(*args, **kwargs), + self.keypoints.to(*args, **kwargs), + self.keypoints_visible.to(*args, **kwargs), + flip_indices=self.flip_indices, + clone=False) diff --git a/projects/yolox_pose/datasets/coco_dataset.py b/projects/yolox_pose/datasets/coco_dataset.py index 80e36fc194..2113cb9d83 100644 --- a/projects/yolox_pose/datasets/coco_dataset.py +++ b/projects/yolox_pose/datasets/coco_dataset.py @@ -1,17 +1,17 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import Any - -from mmengine.dataset import force_full_init -from mmyolo.registry import DATASETS - -from mmpose.datasets import CocoDataset as MMPoseCocoDataset - - -@DATASETS.register_module() -class CocoDataset(MMPoseCocoDataset): - - @force_full_init - def prepare_data(self, idx) -> Any: - data_info = self.get_data_info(idx) - data_info['dataset'] = self - return self.pipeline(data_info) +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Any + +from mmengine.dataset import force_full_init +from mmyolo.registry import DATASETS + +from mmpose.datasets import CocoDataset as MMPoseCocoDataset + + +@DATASETS.register_module() +class CocoDataset(MMPoseCocoDataset): + + @force_full_init + def prepare_data(self, idx) -> Any: + data_info = self.get_data_info(idx) + data_info['dataset'] = self + return self.pipeline(data_info) diff --git a/projects/yolox_pose/datasets/transforms.py b/projects/yolox_pose/datasets/transforms.py index 0f36dd2b6c..ac50ba5c90 100644 --- a/projects/yolox_pose/datasets/transforms.py +++ b/projects/yolox_pose/datasets/transforms.py @@ -1,128 +1,128 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import Union - -import numpy as np -from mmcv.transforms import BaseTransform -from mmdet.datasets.transforms import FilterAnnotations as FilterDetAnnotations -from mmdet.datasets.transforms import PackDetInputs -from mmdet.structures.bbox.box_type import autocast_box_type -from mmyolo.registry import TRANSFORMS - -from .bbox_keypoint_structure import BBoxKeypoints - - -@TRANSFORMS.register_module() -class PoseToDetConverter(BaseTransform): - """This transform converts the pose data element into a format that is - suitable for the mmdet transforms.""" - - def transform(self, results: dict) -> dict: - - results['seg_map_path'] = None - results['height'] = results['img_shape'][0] - results['width'] = results['img_shape'][1] - - num_instances = len(results.get('bbox', [])) - - if num_instances == 0: - results['bbox'] = np.empty((0, 4), dtype=np.float32) - results['keypoints'] = np.empty( - (0, len(results['flip_indices']), 2), dtype=np.float32) - results['keypoints_visible'] = np.empty( - (0, len(results['flip_indices'])), dtype=np.int32) - results['category_id'] = [] - - results['gt_bboxes'] = BBoxKeypoints( - data=results['bbox'], - keypoints=results['keypoints'], - keypoints_visible=results['keypoints_visible'], - flip_indices=results['flip_indices'], - ) - - results['gt_ignore_flags'] = np.array([False] * num_instances) - results['gt_bboxes_labels'] = np.array(results['category_id']) - 1 - - return results - - -@TRANSFORMS.register_module() -class PackDetPoseInputs(PackDetInputs): - mapping_table = { - 'gt_bboxes': 'bboxes', - 'gt_bboxes_labels': 'labels', - 'gt_masks': 'masks', - 'gt_keypoints': 'keypoints', - 'gt_keypoints_visible': 'keypoints_visible' - } - - def __init__(self, - meta_keys=('id', 'img_id', 'img_path', 'ori_shape', - 'img_shape', 'scale_factor', 'flip', - 'flip_direction', 'flip_indices', 'raw_ann_info'), - pack_transformed=False): - self.meta_keys = meta_keys - - def transform(self, results: dict) -> dict: - # Add keypoints and their visibility to the results dictionary - results['gt_keypoints'] = results['gt_bboxes'].keypoints - results['gt_keypoints_visible'] = results[ - 'gt_bboxes'].keypoints_visible - - # Ensure all keys in `self.meta_keys` are in the `results` dictionary, - # which is necessary for `PackDetInputs` but not guaranteed during - # inference with an inferencer - for key in self.meta_keys: - if key not in results: - results[key] = None - return super().transform(results) - - -@TRANSFORMS.register_module() -class FilterDetPoseAnnotations(FilterDetAnnotations): - """Filter invalid annotations. - - In addition to the conditions checked by ``FilterDetAnnotations``, this - filter adds a new condition requiring instances to have at least one - visible keypoints. - """ - - @autocast_box_type() - def transform(self, results: dict) -> Union[dict, None]: - """Transform function to filter annotations. - - Args: - results (dict): Result dict. - - Returns: - dict: Updated result dict. - """ - assert 'gt_bboxes' in results - gt_bboxes = results['gt_bboxes'] - if gt_bboxes.shape[0] == 0: - return results - - tests = [] - if self.by_box: - tests.append(((gt_bboxes.widths > self.min_gt_bbox_wh[0]) & - (gt_bboxes.heights > self.min_gt_bbox_wh[1]) & - (gt_bboxes.num_keypoints > 0)).numpy()) - - if self.by_mask: - assert 'gt_masks' in results - gt_masks = results['gt_masks'] - tests.append(gt_masks.areas >= self.min_gt_mask_area) - - keep = tests[0] - for t in tests[1:]: - keep = keep & t - - if not keep.any(): - if self.keep_empty: - return None - - keys = ('gt_bboxes', 'gt_bboxes_labels', 'gt_masks', 'gt_ignore_flags') - for key in keys: - if key in results: - results[key] = results[key][keep] - - return results +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Union + +import numpy as np +from mmcv.transforms import BaseTransform +from mmdet.datasets.transforms import FilterAnnotations as FilterDetAnnotations +from mmdet.datasets.transforms import PackDetInputs +from mmdet.structures.bbox.box_type import autocast_box_type +from mmyolo.registry import TRANSFORMS + +from .bbox_keypoint_structure import BBoxKeypoints + + +@TRANSFORMS.register_module() +class PoseToDetConverter(BaseTransform): + """This transform converts the pose data element into a format that is + suitable for the mmdet transforms.""" + + def transform(self, results: dict) -> dict: + + results['seg_map_path'] = None + results['height'] = results['img_shape'][0] + results['width'] = results['img_shape'][1] + + num_instances = len(results.get('bbox', [])) + + if num_instances == 0: + results['bbox'] = np.empty((0, 4), dtype=np.float32) + results['keypoints'] = np.empty( + (0, len(results['flip_indices']), 2), dtype=np.float32) + results['keypoints_visible'] = np.empty( + (0, len(results['flip_indices'])), dtype=np.int32) + results['category_id'] = [] + + results['gt_bboxes'] = BBoxKeypoints( + data=results['bbox'], + keypoints=results['keypoints'], + keypoints_visible=results['keypoints_visible'], + flip_indices=results['flip_indices'], + ) + + results['gt_ignore_flags'] = np.array([False] * num_instances) + results['gt_bboxes_labels'] = np.array(results['category_id']) - 1 + + return results + + +@TRANSFORMS.register_module() +class PackDetPoseInputs(PackDetInputs): + mapping_table = { + 'gt_bboxes': 'bboxes', + 'gt_bboxes_labels': 'labels', + 'gt_masks': 'masks', + 'gt_keypoints': 'keypoints', + 'gt_keypoints_visible': 'keypoints_visible' + } + + def __init__(self, + meta_keys=('id', 'img_id', 'img_path', 'ori_shape', + 'img_shape', 'scale_factor', 'flip', + 'flip_direction', 'flip_indices', 'raw_ann_info'), + pack_transformed=False): + self.meta_keys = meta_keys + + def transform(self, results: dict) -> dict: + # Add keypoints and their visibility to the results dictionary + results['gt_keypoints'] = results['gt_bboxes'].keypoints + results['gt_keypoints_visible'] = results[ + 'gt_bboxes'].keypoints_visible + + # Ensure all keys in `self.meta_keys` are in the `results` dictionary, + # which is necessary for `PackDetInputs` but not guaranteed during + # inference with an inferencer + for key in self.meta_keys: + if key not in results: + results[key] = None + return super().transform(results) + + +@TRANSFORMS.register_module() +class FilterDetPoseAnnotations(FilterDetAnnotations): + """Filter invalid annotations. + + In addition to the conditions checked by ``FilterDetAnnotations``, this + filter adds a new condition requiring instances to have at least one + visible keypoints. + """ + + @autocast_box_type() + def transform(self, results: dict) -> Union[dict, None]: + """Transform function to filter annotations. + + Args: + results (dict): Result dict. + + Returns: + dict: Updated result dict. + """ + assert 'gt_bboxes' in results + gt_bboxes = results['gt_bboxes'] + if gt_bboxes.shape[0] == 0: + return results + + tests = [] + if self.by_box: + tests.append(((gt_bboxes.widths > self.min_gt_bbox_wh[0]) & + (gt_bboxes.heights > self.min_gt_bbox_wh[1]) & + (gt_bboxes.num_keypoints > 0)).numpy()) + + if self.by_mask: + assert 'gt_masks' in results + gt_masks = results['gt_masks'] + tests.append(gt_masks.areas >= self.min_gt_mask_area) + + keep = tests[0] + for t in tests[1:]: + keep = keep & t + + if not keep.any(): + if self.keep_empty: + return None + + keys = ('gt_bboxes', 'gt_bboxes_labels', 'gt_masks', 'gt_ignore_flags') + for key in keys: + if key in results: + results[key] = results[key][keep] + + return results diff --git a/projects/yolox_pose/models/__init__.py b/projects/yolox_pose/models/__init__.py index 0d4804e70a..bed1faab9c 100644 --- a/projects/yolox_pose/models/__init__.py +++ b/projects/yolox_pose/models/__init__.py @@ -1,5 +1,5 @@ -from .assigner import * # noqa -from .data_preprocessor import * # noqa -from .oks_loss import * # noqa -from .utils import * # noqa -from .yolox_pose_head import * # noqa +from .assigner import * # noqa +from .data_preprocessor import * # noqa +from .oks_loss import * # noqa +from .utils import * # noqa +from .yolox_pose_head import * # noqa diff --git a/projects/yolox_pose/models/assigner.py b/projects/yolox_pose/models/assigner.py index 960dd3b385..223dcaea12 100644 --- a/projects/yolox_pose/models/assigner.py +++ b/projects/yolox_pose/models/assigner.py @@ -1,208 +1,208 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import Optional, Tuple - -import torch -import torch.nn.functional as F -from mmdet.models.task_modules.assigners import AssignResult, SimOTAAssigner -from mmdet.utils import ConfigType -from mmengine.structures import InstanceData -from mmyolo.registry import MODELS, TASK_UTILS -from torch import Tensor - -INF = 100000.0 -EPS = 1.0e-7 - - -@TASK_UTILS.register_module() -class PoseSimOTAAssigner(SimOTAAssigner): - - def __init__(self, - center_radius: float = 2.5, - candidate_topk: int = 10, - iou_weight: float = 3.0, - cls_weight: float = 1.0, - oks_weight: float = 0.0, - vis_weight: float = 0.0, - iou_calculator: ConfigType = dict(type='BboxOverlaps2D'), - oks_calculator: ConfigType = dict(type='OksLoss')): - self.center_radius = center_radius - self.candidate_topk = candidate_topk - self.iou_weight = iou_weight - self.cls_weight = cls_weight - self.oks_weight = oks_weight - self.vis_weight = vis_weight - - self.iou_calculator = TASK_UTILS.build(iou_calculator) - self.oks_calculator = MODELS.build(oks_calculator) - - def assign(self, - pred_instances: InstanceData, - gt_instances: InstanceData, - gt_instances_ignore: Optional[InstanceData] = None, - **kwargs) -> AssignResult: - """Assign gt to priors using SimOTA. - - Args: - pred_instances (:obj:`InstanceData`): Instances of model - predictions. It includes ``priors``, and the priors can - be anchors or points, or the bboxes predicted by the - previous stage, has shape (n, 4). The bboxes predicted by - the current model or stage will be named ``bboxes``, - ``labels``, and ``scores``, the same as the ``InstanceData`` - in other places. - gt_instances (:obj:`InstanceData`): Ground truth of instance - annotations. It usually includes ``bboxes``, with shape (k, 4), - and ``labels``, with shape (k, ). - gt_instances_ignore (:obj:`InstanceData`, optional): Instances - to be ignored during training. It includes ``bboxes`` - attribute data that is ignored during training and testing. - Defaults to None. - Returns: - obj:`AssignResult`: The assigned result. - """ - gt_bboxes = gt_instances.bboxes - gt_labels = gt_instances.labels - gt_keypoints = gt_instances.keypoints - gt_keypoints_visible = gt_instances.keypoints_visible - num_gt = gt_bboxes.size(0) - - decoded_bboxes = pred_instances.bboxes[..., :4] - pred_kpts = pred_instances.bboxes[..., 4:] - pred_kpts = pred_kpts.reshape(*pred_kpts.shape[:-1], -1, 3) - pred_kpts_vis = pred_kpts[..., -1] - pred_kpts = pred_kpts[..., :2] - pred_scores = pred_instances.scores - priors = pred_instances.priors - num_bboxes = decoded_bboxes.size(0) - - # assign 0 by default - assigned_gt_inds = decoded_bboxes.new_full((num_bboxes, ), - 0, - dtype=torch.long) - if num_gt == 0 or num_bboxes == 0: - # No ground truth or boxes, return empty assignment - max_overlaps = decoded_bboxes.new_zeros((num_bboxes, )) - assigned_labels = decoded_bboxes.new_full((num_bboxes, ), - -1, - dtype=torch.long) - return AssignResult( - num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) - - valid_mask, is_in_boxes_and_center = self.get_in_gt_and_in_center_info( - priors, gt_bboxes) - valid_decoded_bbox = decoded_bboxes[valid_mask] - valid_pred_scores = pred_scores[valid_mask] - valid_pred_kpts = pred_kpts[valid_mask] - valid_pred_kpts_vis = pred_kpts_vis[valid_mask] - num_valid = valid_decoded_bbox.size(0) - if num_valid == 0: - # No valid bboxes, return empty assignment - max_overlaps = decoded_bboxes.new_zeros((num_bboxes, )) - assigned_labels = decoded_bboxes.new_full((num_bboxes, ), - -1, - dtype=torch.long) - return AssignResult( - num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) - - cost_matrix = (~is_in_boxes_and_center) * INF - - # calculate iou - pairwise_ious = self.iou_calculator(valid_decoded_bbox, gt_bboxes) - if self.iou_weight > 0: - iou_cost = -torch.log(pairwise_ious + EPS) - cost_matrix = cost_matrix + iou_cost * self.iou_weight - - # calculate oks - pairwise_oks = self.oks_calculator.compute_oks( - valid_pred_kpts.unsqueeze(1), # [num_valid, -1, k, 2] - gt_keypoints.unsqueeze(0), # [1, num_gt, k, 2] - gt_keypoints_visible.unsqueeze(0), # [1, num_gt, k] - bboxes=gt_bboxes.unsqueeze(0), # [1, num_gt, 4] - ) # -> [num_valid, num_gt] - if self.oks_weight > 0: - oks_cost = -torch.log(pairwise_oks + EPS) - cost_matrix = cost_matrix + oks_cost * self.oks_weight - - # calculate cls - if self.cls_weight > 0: - gt_onehot_label = ( - F.one_hot(gt_labels.to(torch.int64), - pred_scores.shape[-1]).float().unsqueeze(0).repeat( - num_valid, 1, 1)) - - valid_pred_scores = valid_pred_scores.unsqueeze(1).repeat( - 1, num_gt, 1) - # disable AMP autocast to avoid overflow - with torch.cuda.amp.autocast(enabled=False): - cls_cost = ( - F.binary_cross_entropy( - valid_pred_scores.to(dtype=torch.float32), - gt_onehot_label, - reduction='none', - ).sum(-1).to(dtype=valid_pred_scores.dtype)) - cost_matrix = cost_matrix + cls_cost * self.cls_weight - - # calculate vis - if self.vis_weight > 0: - valid_pred_kpts_vis = valid_pred_kpts_vis.sigmoid().unsqueeze( - 1).repeat(1, num_gt, 1) # [num_valid, 1, k] - gt_kpt_vis = gt_keypoints_visible.unsqueeze( - 0).float() # [1, num_gt, k] - with torch.cuda.amp.autocast(enabled=False): - vis_cost = ( - F.binary_cross_entropy( - valid_pred_kpts_vis.to(dtype=torch.float32), - gt_kpt_vis.repeat(num_valid, 1, 1), - reduction='none', - ).sum(-1).to(dtype=valid_pred_kpts_vis.dtype)) - cost_matrix = cost_matrix + vis_cost * self.vis_weight - - # mixed metric - pairwise_oks = pairwise_oks.pow(0.5) - matched_pred_oks, matched_gt_inds = \ - self.dynamic_k_matching( - cost_matrix, pairwise_ious, pairwise_oks, num_gt, valid_mask) - - # convert to AssignResult format - assigned_gt_inds[valid_mask] = matched_gt_inds + 1 - assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1) - assigned_labels[valid_mask] = gt_labels[matched_gt_inds].long() - max_overlaps = assigned_gt_inds.new_full((num_bboxes, ), - -INF, - dtype=torch.float32) - max_overlaps[valid_mask] = matched_pred_oks - return AssignResult( - num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) - - def dynamic_k_matching(self, cost: Tensor, pairwise_ious: Tensor, - pairwise_oks: Tensor, num_gt: int, - valid_mask: Tensor) -> Tuple[Tensor, Tensor]: - """Use IoU and matching cost to calculate the dynamic top-k positive - targets.""" - matching_matrix = torch.zeros_like(cost, dtype=torch.uint8) - # select candidate topk ious for dynamic-k calculation - candidate_topk = min(self.candidate_topk, pairwise_ious.size(0)) - topk_ious, _ = torch.topk(pairwise_ious, candidate_topk, dim=0) - # calculate dynamic k for each gt - dynamic_ks = torch.clamp(topk_ious.sum(0).int(), min=1) - for gt_idx in range(num_gt): - _, pos_idx = torch.topk( - cost[:, gt_idx], k=dynamic_ks[gt_idx], largest=False) - matching_matrix[:, gt_idx][pos_idx] = 1 - - del topk_ious, dynamic_ks, pos_idx - - prior_match_gt_mask = matching_matrix.sum(1) > 1 - if prior_match_gt_mask.sum() > 0: - cost_min, cost_argmin = torch.min( - cost[prior_match_gt_mask, :], dim=1) - matching_matrix[prior_match_gt_mask, :] *= 0 - matching_matrix[prior_match_gt_mask, cost_argmin] = 1 - # get foreground mask inside box and center prior - fg_mask_inboxes = matching_matrix.sum(1) > 0 - valid_mask[valid_mask.clone()] = fg_mask_inboxes - - matched_gt_inds = matching_matrix[fg_mask_inboxes, :].argmax(1) - matched_pred_oks = (matching_matrix * - pairwise_oks).sum(1)[fg_mask_inboxes] - return matched_pred_oks, matched_gt_inds +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Tuple + +import torch +import torch.nn.functional as F +from mmdet.models.task_modules.assigners import AssignResult, SimOTAAssigner +from mmdet.utils import ConfigType +from mmengine.structures import InstanceData +from mmyolo.registry import MODELS, TASK_UTILS +from torch import Tensor + +INF = 100000.0 +EPS = 1.0e-7 + + +@TASK_UTILS.register_module() +class PoseSimOTAAssigner(SimOTAAssigner): + + def __init__(self, + center_radius: float = 2.5, + candidate_topk: int = 10, + iou_weight: float = 3.0, + cls_weight: float = 1.0, + oks_weight: float = 0.0, + vis_weight: float = 0.0, + iou_calculator: ConfigType = dict(type='BboxOverlaps2D'), + oks_calculator: ConfigType = dict(type='OksLoss')): + self.center_radius = center_radius + self.candidate_topk = candidate_topk + self.iou_weight = iou_weight + self.cls_weight = cls_weight + self.oks_weight = oks_weight + self.vis_weight = vis_weight + + self.iou_calculator = TASK_UTILS.build(iou_calculator) + self.oks_calculator = MODELS.build(oks_calculator) + + def assign(self, + pred_instances: InstanceData, + gt_instances: InstanceData, + gt_instances_ignore: Optional[InstanceData] = None, + **kwargs) -> AssignResult: + """Assign gt to priors using SimOTA. + + Args: + pred_instances (:obj:`InstanceData`): Instances of model + predictions. It includes ``priors``, and the priors can + be anchors or points, or the bboxes predicted by the + previous stage, has shape (n, 4). The bboxes predicted by + the current model or stage will be named ``bboxes``, + ``labels``, and ``scores``, the same as the ``InstanceData`` + in other places. + gt_instances (:obj:`InstanceData`): Ground truth of instance + annotations. It usually includes ``bboxes``, with shape (k, 4), + and ``labels``, with shape (k, ). + gt_instances_ignore (:obj:`InstanceData`, optional): Instances + to be ignored during training. It includes ``bboxes`` + attribute data that is ignored during training and testing. + Defaults to None. + Returns: + obj:`AssignResult`: The assigned result. + """ + gt_bboxes = gt_instances.bboxes + gt_labels = gt_instances.labels + gt_keypoints = gt_instances.keypoints + gt_keypoints_visible = gt_instances.keypoints_visible + num_gt = gt_bboxes.size(0) + + decoded_bboxes = pred_instances.bboxes[..., :4] + pred_kpts = pred_instances.bboxes[..., 4:] + pred_kpts = pred_kpts.reshape(*pred_kpts.shape[:-1], -1, 3) + pred_kpts_vis = pred_kpts[..., -1] + pred_kpts = pred_kpts[..., :2] + pred_scores = pred_instances.scores + priors = pred_instances.priors + num_bboxes = decoded_bboxes.size(0) + + # assign 0 by default + assigned_gt_inds = decoded_bboxes.new_full((num_bboxes, ), + 0, + dtype=torch.long) + if num_gt == 0 or num_bboxes == 0: + # No ground truth or boxes, return empty assignment + max_overlaps = decoded_bboxes.new_zeros((num_bboxes, )) + assigned_labels = decoded_bboxes.new_full((num_bboxes, ), + -1, + dtype=torch.long) + return AssignResult( + num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) + + valid_mask, is_in_boxes_and_center = self.get_in_gt_and_in_center_info( + priors, gt_bboxes) + valid_decoded_bbox = decoded_bboxes[valid_mask] + valid_pred_scores = pred_scores[valid_mask] + valid_pred_kpts = pred_kpts[valid_mask] + valid_pred_kpts_vis = pred_kpts_vis[valid_mask] + num_valid = valid_decoded_bbox.size(0) + if num_valid == 0: + # No valid bboxes, return empty assignment + max_overlaps = decoded_bboxes.new_zeros((num_bboxes, )) + assigned_labels = decoded_bboxes.new_full((num_bboxes, ), + -1, + dtype=torch.long) + return AssignResult( + num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) + + cost_matrix = (~is_in_boxes_and_center) * INF + + # calculate iou + pairwise_ious = self.iou_calculator(valid_decoded_bbox, gt_bboxes) + if self.iou_weight > 0: + iou_cost = -torch.log(pairwise_ious + EPS) + cost_matrix = cost_matrix + iou_cost * self.iou_weight + + # calculate oks + pairwise_oks = self.oks_calculator.compute_oks( + valid_pred_kpts.unsqueeze(1), # [num_valid, -1, k, 2] + gt_keypoints.unsqueeze(0), # [1, num_gt, k, 2] + gt_keypoints_visible.unsqueeze(0), # [1, num_gt, k] + bboxes=gt_bboxes.unsqueeze(0), # [1, num_gt, 4] + ) # -> [num_valid, num_gt] + if self.oks_weight > 0: + oks_cost = -torch.log(pairwise_oks + EPS) + cost_matrix = cost_matrix + oks_cost * self.oks_weight + + # calculate cls + if self.cls_weight > 0: + gt_onehot_label = ( + F.one_hot(gt_labels.to(torch.int64), + pred_scores.shape[-1]).float().unsqueeze(0).repeat( + num_valid, 1, 1)) + + valid_pred_scores = valid_pred_scores.unsqueeze(1).repeat( + 1, num_gt, 1) + # disable AMP autocast to avoid overflow + with torch.cuda.amp.autocast(enabled=False): + cls_cost = ( + F.binary_cross_entropy( + valid_pred_scores.to(dtype=torch.float32), + gt_onehot_label, + reduction='none', + ).sum(-1).to(dtype=valid_pred_scores.dtype)) + cost_matrix = cost_matrix + cls_cost * self.cls_weight + + # calculate vis + if self.vis_weight > 0: + valid_pred_kpts_vis = valid_pred_kpts_vis.sigmoid().unsqueeze( + 1).repeat(1, num_gt, 1) # [num_valid, 1, k] + gt_kpt_vis = gt_keypoints_visible.unsqueeze( + 0).float() # [1, num_gt, k] + with torch.cuda.amp.autocast(enabled=False): + vis_cost = ( + F.binary_cross_entropy( + valid_pred_kpts_vis.to(dtype=torch.float32), + gt_kpt_vis.repeat(num_valid, 1, 1), + reduction='none', + ).sum(-1).to(dtype=valid_pred_kpts_vis.dtype)) + cost_matrix = cost_matrix + vis_cost * self.vis_weight + + # mixed metric + pairwise_oks = pairwise_oks.pow(0.5) + matched_pred_oks, matched_gt_inds = \ + self.dynamic_k_matching( + cost_matrix, pairwise_ious, pairwise_oks, num_gt, valid_mask) + + # convert to AssignResult format + assigned_gt_inds[valid_mask] = matched_gt_inds + 1 + assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1) + assigned_labels[valid_mask] = gt_labels[matched_gt_inds].long() + max_overlaps = assigned_gt_inds.new_full((num_bboxes, ), + -INF, + dtype=torch.float32) + max_overlaps[valid_mask] = matched_pred_oks + return AssignResult( + num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) + + def dynamic_k_matching(self, cost: Tensor, pairwise_ious: Tensor, + pairwise_oks: Tensor, num_gt: int, + valid_mask: Tensor) -> Tuple[Tensor, Tensor]: + """Use IoU and matching cost to calculate the dynamic top-k positive + targets.""" + matching_matrix = torch.zeros_like(cost, dtype=torch.uint8) + # select candidate topk ious for dynamic-k calculation + candidate_topk = min(self.candidate_topk, pairwise_ious.size(0)) + topk_ious, _ = torch.topk(pairwise_ious, candidate_topk, dim=0) + # calculate dynamic k for each gt + dynamic_ks = torch.clamp(topk_ious.sum(0).int(), min=1) + for gt_idx in range(num_gt): + _, pos_idx = torch.topk( + cost[:, gt_idx], k=dynamic_ks[gt_idx], largest=False) + matching_matrix[:, gt_idx][pos_idx] = 1 + + del topk_ious, dynamic_ks, pos_idx + + prior_match_gt_mask = matching_matrix.sum(1) > 1 + if prior_match_gt_mask.sum() > 0: + cost_min, cost_argmin = torch.min( + cost[prior_match_gt_mask, :], dim=1) + matching_matrix[prior_match_gt_mask, :] *= 0 + matching_matrix[prior_match_gt_mask, cost_argmin] = 1 + # get foreground mask inside box and center prior + fg_mask_inboxes = matching_matrix.sum(1) > 0 + valid_mask[valid_mask.clone()] = fg_mask_inboxes + + matched_gt_inds = matching_matrix[fg_mask_inboxes, :].argmax(1) + matched_pred_oks = (matching_matrix * + pairwise_oks).sum(1)[fg_mask_inboxes] + return matched_pred_oks, matched_gt_inds diff --git a/projects/yolox_pose/models/data_preprocessor.py b/projects/yolox_pose/models/data_preprocessor.py index 1a3a371aed..60945fb35a 100644 --- a/projects/yolox_pose/models/data_preprocessor.py +++ b/projects/yolox_pose/models/data_preprocessor.py @@ -1,33 +1,33 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import List, Tuple - -from mmdet.models import BatchSyncRandomResize -from mmyolo.registry import MODELS -from torch import Tensor - -from mmpose.structures import PoseDataSample - - -@MODELS.register_module() -class PoseBatchSyncRandomResize(BatchSyncRandomResize): - """Batch random resize which synchronizes the random size across ranks. - - This transform is similar to `mmdet.BatchSyncRandomResize`, but it also - rescales the keypoints coordinates simultaneously. - """ - - def forward(self, inputs: Tensor, data_samples: List[PoseDataSample] - ) -> Tuple[Tensor, List[PoseDataSample]]: - - inputs = inputs.float() - h, w = inputs.shape[-2:] - if self._input_size is None: - self._input_size = (h, w) - scale_y = self._input_size[0] / h - scale_x = self._input_size[1] / w - if scale_x != 1 or scale_y != 1: - for data_sample in data_samples: - data_sample.gt_instances.keypoints[..., 0] *= scale_x - data_sample.gt_instances.keypoints[..., 1] *= scale_y - - return super().forward(inputs, data_samples) +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Tuple + +from mmdet.models import BatchSyncRandomResize +from mmyolo.registry import MODELS +from torch import Tensor + +from mmpose.structures import PoseDataSample + + +@MODELS.register_module() +class PoseBatchSyncRandomResize(BatchSyncRandomResize): + """Batch random resize which synchronizes the random size across ranks. + + This transform is similar to `mmdet.BatchSyncRandomResize`, but it also + rescales the keypoints coordinates simultaneously. + """ + + def forward(self, inputs: Tensor, data_samples: List[PoseDataSample] + ) -> Tuple[Tensor, List[PoseDataSample]]: + + inputs = inputs.float() + h, w = inputs.shape[-2:] + if self._input_size is None: + self._input_size = (h, w) + scale_y = self._input_size[0] / h + scale_x = self._input_size[1] / w + if scale_x != 1 or scale_y != 1: + for data_sample in data_samples: + data_sample.gt_instances.keypoints[..., 0] *= scale_x + data_sample.gt_instances.keypoints[..., 1] *= scale_y + + return super().forward(inputs, data_samples) diff --git a/projects/yolox_pose/models/oks_loss.py b/projects/yolox_pose/models/oks_loss.py index c9c263ab18..7e1467b608 100644 --- a/projects/yolox_pose/models/oks_loss.py +++ b/projects/yolox_pose/models/oks_loss.py @@ -1,86 +1,86 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import Optional - -import torch -import torch.nn as nn -from mmyolo.registry import MODELS -from torch import Tensor - -from mmpose.datasets.datasets.utils import parse_pose_metainfo - - -@MODELS.register_module() -class OksLoss(nn.Module): - """A PyTorch implementation of the Object Keypoint Similarity (OKS) loss as - described in the paper "YOLO-Pose: Enhancing YOLO for Multi Person Pose - Estimation Using Object Keypoint Similarity Loss" by Debapriya et al. - (2022). - - The OKS loss is used for keypoint-based object recognition and consists - of a measure of the similarity between predicted and ground truth - keypoint locations, adjusted by the size of the object in the image. - - The loss function takes as input the predicted keypoint locations, the - ground truth keypoint locations, a mask indicating which keypoints are - valid, and bounding boxes for the objects. - - Args: - metainfo (Optional[str]): Path to a JSON file containing information - about the dataset's annotations. - loss_weight (float): Weight for the loss. - """ - - def __init__(self, - metainfo: Optional[str] = None, - loss_weight: float = 1.0): - super().__init__() - - if metainfo is not None: - metainfo = parse_pose_metainfo(dict(from_file=metainfo)) - sigmas = metainfo.get('sigmas', None) - if sigmas is not None: - self.register_buffer('sigmas', torch.as_tensor(sigmas)) - self.loss_weight = loss_weight - - def forward(self, - output: Tensor, - target: Tensor, - target_weights: Tensor, - bboxes: Optional[Tensor] = None) -> Tensor: - oks = self.compute_oks(output, target, target_weights, bboxes) - loss = 1 - oks - return loss * self.loss_weight - - def compute_oks(self, - output: Tensor, - target: Tensor, - target_weights: Tensor, - bboxes: Optional[Tensor] = None) -> Tensor: - """Calculates the OKS loss. - - Args: - output (Tensor): Predicted keypoints in shape N x k x 2, where N - is batch size, k is the number of keypoints, and 2 are the - xy coordinates. - target (Tensor): Ground truth keypoints in the same shape as - output. - target_weights (Tensor): Mask of valid keypoints in shape N x k, - with 1 for valid and 0 for invalid. - bboxes (Optional[Tensor]): Bounding boxes in shape N x 4, - where 4 are the xyxy coordinates. - - Returns: - Tensor: The calculated OKS loss. - """ - - dist = torch.norm(output - target, dim=-1) - - if hasattr(self, 'sigmas'): - sigmas = self.sigmas.reshape(*((1, ) * (dist.ndim - 1)), -1) - dist = dist / sigmas - if bboxes is not None: - area = torch.norm(bboxes[..., 2:] - bboxes[..., :2], dim=-1) - dist = dist / area.clip(min=1e-8).unsqueeze(-1) - - return (torch.exp(-dist.pow(2) / 2) * target_weights).sum( - dim=-1) / target_weights.sum(dim=-1).clip(min=1e-8) +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional + +import torch +import torch.nn as nn +from mmyolo.registry import MODELS +from torch import Tensor + +from mmpose.datasets.datasets.utils import parse_pose_metainfo + + +@MODELS.register_module() +class OksLoss(nn.Module): + """A PyTorch implementation of the Object Keypoint Similarity (OKS) loss as + described in the paper "YOLO-Pose: Enhancing YOLO for Multi Person Pose + Estimation Using Object Keypoint Similarity Loss" by Debapriya et al. + (2022). + + The OKS loss is used for keypoint-based object recognition and consists + of a measure of the similarity between predicted and ground truth + keypoint locations, adjusted by the size of the object in the image. + + The loss function takes as input the predicted keypoint locations, the + ground truth keypoint locations, a mask indicating which keypoints are + valid, and bounding boxes for the objects. + + Args: + metainfo (Optional[str]): Path to a JSON file containing information + about the dataset's annotations. + loss_weight (float): Weight for the loss. + """ + + def __init__(self, + metainfo: Optional[str] = None, + loss_weight: float = 1.0): + super().__init__() + + if metainfo is not None: + metainfo = parse_pose_metainfo(dict(from_file=metainfo)) + sigmas = metainfo.get('sigmas', None) + if sigmas is not None: + self.register_buffer('sigmas', torch.as_tensor(sigmas)) + self.loss_weight = loss_weight + + def forward(self, + output: Tensor, + target: Tensor, + target_weights: Tensor, + bboxes: Optional[Tensor] = None) -> Tensor: + oks = self.compute_oks(output, target, target_weights, bboxes) + loss = 1 - oks + return loss * self.loss_weight + + def compute_oks(self, + output: Tensor, + target: Tensor, + target_weights: Tensor, + bboxes: Optional[Tensor] = None) -> Tensor: + """Calculates the OKS loss. + + Args: + output (Tensor): Predicted keypoints in shape N x k x 2, where N + is batch size, k is the number of keypoints, and 2 are the + xy coordinates. + target (Tensor): Ground truth keypoints in the same shape as + output. + target_weights (Tensor): Mask of valid keypoints in shape N x k, + with 1 for valid and 0 for invalid. + bboxes (Optional[Tensor]): Bounding boxes in shape N x 4, + where 4 are the xyxy coordinates. + + Returns: + Tensor: The calculated OKS loss. + """ + + dist = torch.norm(output - target, dim=-1) + + if hasattr(self, 'sigmas'): + sigmas = self.sigmas.reshape(*((1, ) * (dist.ndim - 1)), -1) + dist = dist / sigmas + if bboxes is not None: + area = torch.norm(bboxes[..., 2:] - bboxes[..., :2], dim=-1) + dist = dist / area.clip(min=1e-8).unsqueeze(-1) + + return (torch.exp(-dist.pow(2) / 2) * target_weights).sum( + dim=-1) / target_weights.sum(dim=-1).clip(min=1e-8) diff --git a/projects/yolox_pose/models/utils.py b/projects/yolox_pose/models/utils.py index 63a15e9376..094a6391f9 100644 --- a/projects/yolox_pose/models/utils.py +++ b/projects/yolox_pose/models/utils.py @@ -1,92 +1,92 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from collections import defaultdict -from copy import deepcopy -from typing import Any, Callable, Dict, Optional, Tuple - - -class OutputSaveObjectWrapper: - """A wrapper class that saves the output of function calls on an object.""" - - def __init__(self, obj: Any) -> None: - self.obj = obj - self.log = defaultdict(list) - - def __getattr__(self, attr: str) -> Any: - """Overrides the default behavior when an attribute is accessed. - - - If the attribute is callable, hooks the attribute and saves the - returned value of the function call to the log. - - If the attribute is not callable, saves the attribute's value to the - log and returns the value. - """ - orig_attr = getattr(self.obj, attr) - - if not callable(orig_attr): - self.log[attr].append(orig_attr) - return orig_attr - - def hooked(*args: Tuple, **kwargs: Dict) -> Any: - """The hooked function that logs the return value of the original - function.""" - result = orig_attr(*args, **kwargs) - self.log[attr].append(result) - return result - - return hooked - - def clear(self): - """Clears the log of function call outputs.""" - self.log.clear() - - def __deepcopy__(self, memo): - """Only copy the object when applying deepcopy.""" - other = type(self)(deepcopy(self.obj)) - memo[id(self)] = other - return other - - -class OutputSaveFunctionWrapper: - """A class that wraps a function and saves its outputs. - - This class can be used to decorate a function to save its outputs. It wraps - the function with a `__call__` method that calls the original function and - saves the results in a log attribute. - - Args: - func (Callable): A function to wrap. - spec (Optional[Dict]): A dictionary of global variables to use as the - namespace for the wrapper. If `None`, the global namespace of the - original function is used. - """ - - def __init__(self, func: Callable, spec: Optional[Dict]) -> None: - """Initializes the OutputSaveFunctionWrapper instance.""" - assert callable(func) - self.log = [] - self.func = func - self.func_name = func.__name__ - - if isinstance(spec, dict): - self.spec = spec - elif hasattr(func, '__globals__'): - self.spec = func.__globals__ - else: - raise ValueError - - def __call__(self, *args, **kwargs) -> Any: - """Calls the wrapped function with the given arguments and saves the - results in the `log` attribute.""" - results = self.func(*args, **kwargs) - self.log.append(results) - return results - - def __enter__(self) -> None: - """Enters the context and sets the wrapped function to be a global - variable in the specified namespace.""" - self.spec[self.func_name] = self - return self.log - - def __exit__(self, exc_type, exc_val, exc_tb) -> None: - """Exits the context and resets the wrapped function to its original - value in the specified namespace.""" - self.spec[self.func_name] = self.func +# Copyright (c) OpenMMLab. All rights reserved. +from collections import defaultdict +from copy import deepcopy +from typing import Any, Callable, Dict, Optional, Tuple + + +class OutputSaveObjectWrapper: + """A wrapper class that saves the output of function calls on an object.""" + + def __init__(self, obj: Any) -> None: + self.obj = obj + self.log = defaultdict(list) + + def __getattr__(self, attr: str) -> Any: + """Overrides the default behavior when an attribute is accessed. + + - If the attribute is callable, hooks the attribute and saves the + returned value of the function call to the log. + - If the attribute is not callable, saves the attribute's value to the + log and returns the value. + """ + orig_attr = getattr(self.obj, attr) + + if not callable(orig_attr): + self.log[attr].append(orig_attr) + return orig_attr + + def hooked(*args: Tuple, **kwargs: Dict) -> Any: + """The hooked function that logs the return value of the original + function.""" + result = orig_attr(*args, **kwargs) + self.log[attr].append(result) + return result + + return hooked + + def clear(self): + """Clears the log of function call outputs.""" + self.log.clear() + + def __deepcopy__(self, memo): + """Only copy the object when applying deepcopy.""" + other = type(self)(deepcopy(self.obj)) + memo[id(self)] = other + return other + + +class OutputSaveFunctionWrapper: + """A class that wraps a function and saves its outputs. + + This class can be used to decorate a function to save its outputs. It wraps + the function with a `__call__` method that calls the original function and + saves the results in a log attribute. + + Args: + func (Callable): A function to wrap. + spec (Optional[Dict]): A dictionary of global variables to use as the + namespace for the wrapper. If `None`, the global namespace of the + original function is used. + """ + + def __init__(self, func: Callable, spec: Optional[Dict]) -> None: + """Initializes the OutputSaveFunctionWrapper instance.""" + assert callable(func) + self.log = [] + self.func = func + self.func_name = func.__name__ + + if isinstance(spec, dict): + self.spec = spec + elif hasattr(func, '__globals__'): + self.spec = func.__globals__ + else: + raise ValueError + + def __call__(self, *args, **kwargs) -> Any: + """Calls the wrapped function with the given arguments and saves the + results in the `log` attribute.""" + results = self.func(*args, **kwargs) + self.log.append(results) + return results + + def __enter__(self) -> None: + """Enters the context and sets the wrapped function to be a global + variable in the specified namespace.""" + self.spec[self.func_name] = self + return self.log + + def __exit__(self, exc_type, exc_val, exc_tb) -> None: + """Exits the context and resets the wrapped function to its original + value in the specified namespace.""" + self.spec[self.func_name] = self.func diff --git a/projects/yolox_pose/models/yolox_pose_head.py b/projects/yolox_pose/models/yolox_pose_head.py index 5fe3fc5900..3b8d94a692 100644 --- a/projects/yolox_pose/models/yolox_pose_head.py +++ b/projects/yolox_pose/models/yolox_pose_head.py @@ -1,359 +1,359 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from collections import defaultdict -from typing import List, Optional, Sequence, Tuple, Union - -import torch -import torch.nn as nn -from mmcv.ops import batched_nms -from mmdet.models.utils import filter_scores_and_topk -from mmdet.utils import ConfigType, OptInstanceList -from mmengine.config import ConfigDict -from mmengine.model import ModuleList, bias_init_with_prob -from mmengine.structures import InstanceData -from mmyolo.models.dense_heads import YOLOXHead, YOLOXHeadModule -from mmyolo.registry import MODELS -from torch import Tensor - -from .utils import OutputSaveFunctionWrapper, OutputSaveObjectWrapper - - -@MODELS.register_module() -class YOLOXPoseHeadModule(YOLOXHeadModule): - """YOLOXPoseHeadModule serves as a head module for `YOLOX-Pose`. - - In comparison to `YOLOXHeadModule`, this module introduces branches for - keypoint prediction. - """ - - def __init__(self, num_keypoints: int, *args, **kwargs): - self.num_keypoints = num_keypoints - super().__init__(*args, **kwargs) - - def _init_layers(self): - """Initializes the layers in the head module.""" - super()._init_layers() - - # The pose branch requires additional layers for precise regression - self.stacked_convs *= 2 - - # Create separate layers for each level of feature maps - pose_convs, offsets_preds, vis_preds = [], [], [] - for _ in self.featmap_strides: - pose_convs.append(self._build_stacked_convs()) - offsets_preds.append( - nn.Conv2d(self.feat_channels, self.num_keypoints * 2, 1)) - vis_preds.append( - nn.Conv2d(self.feat_channels, self.num_keypoints, 1)) - - self.multi_level_pose_convs = ModuleList(pose_convs) - self.multi_level_conv_offsets = ModuleList(offsets_preds) - self.multi_level_conv_vis = ModuleList(vis_preds) - - def init_weights(self): - """Initialize weights of the head.""" - super().init_weights() - - # Use prior in model initialization to improve stability - bias_init = bias_init_with_prob(0.01) - for conv_vis in self.multi_level_conv_vis: - conv_vis.bias.data.fill_(bias_init) - - def forward(self, x: Tuple[Tensor]) -> Tuple[List]: - """Forward features from the upstream network.""" - offsets_pred, vis_pred = [], [] - for i in range(len(x)): - pose_feat = self.multi_level_pose_convs[i](x[i]) - offsets_pred.append(self.multi_level_conv_offsets[i](pose_feat)) - vis_pred.append(self.multi_level_conv_vis[i](pose_feat)) - - return (*super().forward(x), offsets_pred, vis_pred) - - -@MODELS.register_module() -class YOLOXPoseHead(YOLOXHead): - """YOLOXPoseHead head used in `YOLO-Pose. - - `_. - - Args: - loss_pose (ConfigDict, optional): Config of keypoint OKS loss. - """ - - def __init__( - self, - loss_pose: Optional[ConfigType] = None, - *args, - **kwargs, - ): - super().__init__(*args, **kwargs) - self.loss_pose = MODELS.build(loss_pose) - self.num_keypoints = self.head_module.num_keypoints - - # set up buffers to save variables generated in methods of - # the class's base class. - self._log = defaultdict(list) - self.sampler = OutputSaveObjectWrapper(self.sampler) - - # ensure that the `sigmas` in self.assigner.oks_calculator - # is on the same device as the model - if hasattr(self.assigner, 'oks_calculator'): - self.add_module('assigner_oks_calculator', - self.assigner.oks_calculator) - - def _clear(self): - """Clear variable buffers.""" - self.sampler.clear() - self._log.clear() - - def loss_by_feat(self, - cls_scores: Sequence[Tensor], - bbox_preds: Sequence[Tensor], - objectnesses: Sequence[Tensor], - kpt_preds: Sequence[Tensor], - vis_preds: Sequence[Tensor], - batch_gt_instances: Sequence[InstanceData], - batch_img_metas: Sequence[dict], - batch_gt_instances_ignore: OptInstanceList = None - ) -> dict: - """Calculate the loss based on the features extracted by the detection - head. - - In addition to the base class method, keypoint losses are also - calculated in this method. - """ - - self._clear() - - # collect keypoints coordinates and visibility from model predictions - kpt_preds = torch.cat([ - kpt_pred.flatten(2).permute(0, 2, 1).contiguous() - for kpt_pred in kpt_preds - ], - dim=1) - - featmap_sizes = [cls_score.shape[2:] for cls_score in cls_scores] - mlvl_priors = self.prior_generator.grid_priors( - featmap_sizes, - dtype=cls_scores[0].dtype, - device=cls_scores[0].device, - with_stride=True) - grid_priors = torch.cat(mlvl_priors) - - flatten_kpts = self.decode_pose(grid_priors[..., :2], kpt_preds, - grid_priors[..., 2]) - - vis_preds = torch.cat([ - vis_pred.flatten(2).permute(0, 2, 1).contiguous() - for vis_pred in vis_preds - ], - dim=1) - - # compute detection losses and collect targets for keypoints - # predictions simultaneously - self._log['pred_keypoints'] = list(flatten_kpts.detach().split( - 1, dim=0)) - self._log['pred_keypoints_vis'] = list(vis_preds.detach().split( - 1, dim=0)) - - losses = super().loss_by_feat(cls_scores, bbox_preds, objectnesses, - batch_gt_instances, batch_img_metas, - batch_gt_instances_ignore) - - kpt_targets, vis_targets = [], [] - sampling_results = self.sampler.log['sample'] - sampling_result_idx = 0 - for gt_instances in batch_gt_instances: - if len(gt_instances) > 0: - sampling_result = sampling_results[sampling_result_idx] - kpt_target = gt_instances['keypoints'][ - sampling_result.pos_assigned_gt_inds] - vis_target = gt_instances['keypoints_visible'][ - sampling_result.pos_assigned_gt_inds] - sampling_result_idx += 1 - kpt_targets.append(kpt_target) - vis_targets.append(vis_target) - - if len(kpt_targets) > 0: - kpt_targets = torch.cat(kpt_targets, 0) - vis_targets = torch.cat(vis_targets, 0) - - # compute keypoint losses - if len(kpt_targets) > 0: - vis_targets = (vis_targets > 0).float() - pos_masks = torch.cat(self._log['foreground_mask'], 0) - bbox_targets = torch.cat(self._log['bbox_target'], 0) - loss_kpt = self.loss_pose( - flatten_kpts.view(-1, self.num_keypoints, 2)[pos_masks], - kpt_targets, vis_targets, bbox_targets) - loss_vis = self.loss_cls( - vis_preds.view(-1, self.num_keypoints)[pos_masks], - vis_targets) / vis_targets.sum() - else: - loss_kpt = kpt_preds.sum() * 0 - loss_vis = vis_preds.sum() * 0 - - losses.update(dict(loss_kpt=loss_kpt, loss_vis=loss_vis)) - - self._clear() - return losses - - @torch.no_grad() - def _get_targets_single(self, - priors: Tensor, - cls_preds: Tensor, - decoded_bboxes: Tensor, - objectness: Tensor, - gt_instances: InstanceData, - img_meta: dict, - gt_instances_ignore: Optional[InstanceData] = None - ) -> tuple: - """Calculates targets for a single image, and saves them to the log. - - This method is similar to the _get_targets_single method in the base - class, but additionally saves the foreground mask and bbox targets to - the log. - """ - - # Construct a combined representation of bboxes and keypoints to - # ensure keypoints are also involved in the positive sample - # assignment process - kpt = self._log['pred_keypoints'].pop(0).squeeze(0) - kpt_vis = self._log['pred_keypoints_vis'].pop(0).squeeze(0) - kpt = torch.cat((kpt, kpt_vis.unsqueeze(-1)), dim=-1) - decoded_bboxes = torch.cat((decoded_bboxes, kpt.flatten(1)), dim=1) - - targets = super()._get_targets_single(priors, cls_preds, - decoded_bboxes, objectness, - gt_instances, img_meta, - gt_instances_ignore) - self._log['foreground_mask'].append(targets[0]) - self._log['bbox_target'].append(targets[3]) - return targets - - def predict_by_feat(self, - cls_scores: List[Tensor], - bbox_preds: List[Tensor], - objectnesses: Optional[List[Tensor]] = None, - kpt_preds: Optional[List[Tensor]] = None, - vis_preds: Optional[List[Tensor]] = None, - batch_img_metas: Optional[List[dict]] = None, - cfg: Optional[ConfigDict] = None, - rescale: bool = True, - with_nms: bool = True) -> List[InstanceData]: - """Transform a batch of output features extracted by the head into bbox - and keypoint results. - - In addition to the base class method, keypoint predictions are also - calculated in this method. - """ - - # calculate predicted bboxes and get the kept instances indices - with OutputSaveFunctionWrapper( - filter_scores_and_topk, - super().predict_by_feat.__globals__) as outputs_1: - with OutputSaveFunctionWrapper( - batched_nms, - super()._bbox_post_process.__globals__) as outputs_2: - results_list = super().predict_by_feat(cls_scores, bbox_preds, - objectnesses, - batch_img_metas, cfg, - rescale, with_nms) - keep_indices_topk = [out[2] for out in outputs_1] - keep_indices_nms = [out[1] for out in outputs_2] - - num_imgs = len(batch_img_metas) - - # recover keypoints coordinates from model predictions - featmap_sizes = [vis_pred.shape[2:] for vis_pred in vis_preds] - priors = torch.cat(self.mlvl_priors) - strides = [ - priors.new_full((featmap_size.numel() * self.num_base_priors, ), - stride) for featmap_size, stride in zip( - featmap_sizes, self.featmap_strides) - ] - strides = torch.cat(strides) - kpt_preds = torch.cat([ - kpt_pred.permute(0, 2, 3, 1).reshape( - num_imgs, -1, self.num_keypoints * 2) for kpt_pred in kpt_preds - ], - dim=1) - flatten_decoded_kpts = self.decode_pose(priors, kpt_preds, strides) - - vis_preds = torch.cat([ - vis_pred.permute(0, 2, 3, 1).reshape( - num_imgs, -1, self.num_keypoints) for vis_pred in vis_preds - ], - dim=1).sigmoid() - - # select keypoints predictions according to bbox scores and nms result - keep_indices_nms_idx = 0 - for pred_instances, kpts, kpts_vis, img_meta, keep_idxs \ - in zip( - results_list, flatten_decoded_kpts, vis_preds, - batch_img_metas, keep_indices_topk): - - pred_instances.bbox_scores = pred_instances.scores - - if len(pred_instances) == 0: - pred_instances.keypoints = kpts[:0] - pred_instances.keypoint_scores = kpts_vis[:0] - continue - - kpts = kpts[keep_idxs] - kpts_vis = kpts_vis[keep_idxs] - - if rescale: - pad_param = img_meta.get('img_meta', None) - scale_factor = img_meta['scale_factor'] - if pad_param is not None: - kpts -= kpts.new_tensor([pad_param[2], pad_param[0]]) - kpts /= kpts.new_tensor(scale_factor).repeat( - (1, self.num_keypoints, 1)) - - keep_idxs_nms = keep_indices_nms[keep_indices_nms_idx] - kpts = kpts[keep_idxs_nms] - kpts_vis = kpts_vis[keep_idxs_nms] - keep_indices_nms_idx += 1 - - pred_instances.keypoints = kpts - pred_instances.keypoint_scores = kpts_vis - - return results_list - - def predict(self, - x: Tuple[Tensor], - batch_data_samples, - rescale: bool = False): - predictions = [ - pred_instances.numpy() for pred_instances in super().predict( - x, batch_data_samples, rescale) - ] - return predictions - - def decode_pose(self, grids: torch.Tensor, offsets: torch.Tensor, - strides: Union[torch.Tensor, int]) -> torch.Tensor: - """Decode regression offsets to keypoints. - - Args: - grids (torch.Tensor): The coordinates of the feature map grids. - offsets (torch.Tensor): The predicted offset of each keypoint - relative to its corresponding grid. - strides (torch.Tensor | int): The stride of the feature map for - each instance. - - Returns: - torch.Tensor: The decoded keypoints coordinates. - """ - - if isinstance(strides, int): - strides = torch.tensor([strides]).to(offsets) - - strides = strides.reshape(1, -1, 1, 1) - offsets = offsets.reshape(*offsets.shape[:2], -1, 2) - xy_coordinates = (offsets[..., :2] * strides) + grids.unsqueeze(1) - return xy_coordinates - - @staticmethod - def gt_instances_preprocess(batch_gt_instances: List[InstanceData], *args, - **kwargs) -> List[InstanceData]: - return batch_gt_instances +# Copyright (c) OpenMMLab. All rights reserved. +from collections import defaultdict +from typing import List, Optional, Sequence, Tuple, Union + +import torch +import torch.nn as nn +from mmcv.ops import batched_nms +from mmdet.models.utils import filter_scores_and_topk +from mmdet.utils import ConfigType, OptInstanceList +from mmengine.config import ConfigDict +from mmengine.model import ModuleList, bias_init_with_prob +from mmengine.structures import InstanceData +from mmyolo.models.dense_heads import YOLOXHead, YOLOXHeadModule +from mmyolo.registry import MODELS +from torch import Tensor + +from .utils import OutputSaveFunctionWrapper, OutputSaveObjectWrapper + + +@MODELS.register_module() +class YOLOXPoseHeadModule(YOLOXHeadModule): + """YOLOXPoseHeadModule serves as a head module for `YOLOX-Pose`. + + In comparison to `YOLOXHeadModule`, this module introduces branches for + keypoint prediction. + """ + + def __init__(self, num_keypoints: int, *args, **kwargs): + self.num_keypoints = num_keypoints + super().__init__(*args, **kwargs) + + def _init_layers(self): + """Initializes the layers in the head module.""" + super()._init_layers() + + # The pose branch requires additional layers for precise regression + self.stacked_convs *= 2 + + # Create separate layers for each level of feature maps + pose_convs, offsets_preds, vis_preds = [], [], [] + for _ in self.featmap_strides: + pose_convs.append(self._build_stacked_convs()) + offsets_preds.append( + nn.Conv2d(self.feat_channels, self.num_keypoints * 2, 1)) + vis_preds.append( + nn.Conv2d(self.feat_channels, self.num_keypoints, 1)) + + self.multi_level_pose_convs = ModuleList(pose_convs) + self.multi_level_conv_offsets = ModuleList(offsets_preds) + self.multi_level_conv_vis = ModuleList(vis_preds) + + def init_weights(self): + """Initialize weights of the head.""" + super().init_weights() + + # Use prior in model initialization to improve stability + bias_init = bias_init_with_prob(0.01) + for conv_vis in self.multi_level_conv_vis: + conv_vis.bias.data.fill_(bias_init) + + def forward(self, x: Tuple[Tensor]) -> Tuple[List]: + """Forward features from the upstream network.""" + offsets_pred, vis_pred = [], [] + for i in range(len(x)): + pose_feat = self.multi_level_pose_convs[i](x[i]) + offsets_pred.append(self.multi_level_conv_offsets[i](pose_feat)) + vis_pred.append(self.multi_level_conv_vis[i](pose_feat)) + + return (*super().forward(x), offsets_pred, vis_pred) + + +@MODELS.register_module() +class YOLOXPoseHead(YOLOXHead): + """YOLOXPoseHead head used in `YOLO-Pose. + + `_. + + Args: + loss_pose (ConfigDict, optional): Config of keypoint OKS loss. + """ + + def __init__( + self, + loss_pose: Optional[ConfigType] = None, + *args, + **kwargs, + ): + super().__init__(*args, **kwargs) + self.loss_pose = MODELS.build(loss_pose) + self.num_keypoints = self.head_module.num_keypoints + + # set up buffers to save variables generated in methods of + # the class's base class. + self._log = defaultdict(list) + self.sampler = OutputSaveObjectWrapper(self.sampler) + + # ensure that the `sigmas` in self.assigner.oks_calculator + # is on the same device as the model + if hasattr(self.assigner, 'oks_calculator'): + self.add_module('assigner_oks_calculator', + self.assigner.oks_calculator) + + def _clear(self): + """Clear variable buffers.""" + self.sampler.clear() + self._log.clear() + + def loss_by_feat(self, + cls_scores: Sequence[Tensor], + bbox_preds: Sequence[Tensor], + objectnesses: Sequence[Tensor], + kpt_preds: Sequence[Tensor], + vis_preds: Sequence[Tensor], + batch_gt_instances: Sequence[InstanceData], + batch_img_metas: Sequence[dict], + batch_gt_instances_ignore: OptInstanceList = None + ) -> dict: + """Calculate the loss based on the features extracted by the detection + head. + + In addition to the base class method, keypoint losses are also + calculated in this method. + """ + + self._clear() + + # collect keypoints coordinates and visibility from model predictions + kpt_preds = torch.cat([ + kpt_pred.flatten(2).permute(0, 2, 1).contiguous() + for kpt_pred in kpt_preds + ], + dim=1) + + featmap_sizes = [cls_score.shape[2:] for cls_score in cls_scores] + mlvl_priors = self.prior_generator.grid_priors( + featmap_sizes, + dtype=cls_scores[0].dtype, + device=cls_scores[0].device, + with_stride=True) + grid_priors = torch.cat(mlvl_priors) + + flatten_kpts = self.decode_pose(grid_priors[..., :2], kpt_preds, + grid_priors[..., 2]) + + vis_preds = torch.cat([ + vis_pred.flatten(2).permute(0, 2, 1).contiguous() + for vis_pred in vis_preds + ], + dim=1) + + # compute detection losses and collect targets for keypoints + # predictions simultaneously + self._log['pred_keypoints'] = list(flatten_kpts.detach().split( + 1, dim=0)) + self._log['pred_keypoints_vis'] = list(vis_preds.detach().split( + 1, dim=0)) + + losses = super().loss_by_feat(cls_scores, bbox_preds, objectnesses, + batch_gt_instances, batch_img_metas, + batch_gt_instances_ignore) + + kpt_targets, vis_targets = [], [] + sampling_results = self.sampler.log['sample'] + sampling_result_idx = 0 + for gt_instances in batch_gt_instances: + if len(gt_instances) > 0: + sampling_result = sampling_results[sampling_result_idx] + kpt_target = gt_instances['keypoints'][ + sampling_result.pos_assigned_gt_inds] + vis_target = gt_instances['keypoints_visible'][ + sampling_result.pos_assigned_gt_inds] + sampling_result_idx += 1 + kpt_targets.append(kpt_target) + vis_targets.append(vis_target) + + if len(kpt_targets) > 0: + kpt_targets = torch.cat(kpt_targets, 0) + vis_targets = torch.cat(vis_targets, 0) + + # compute keypoint losses + if len(kpt_targets) > 0: + vis_targets = (vis_targets > 0).float() + pos_masks = torch.cat(self._log['foreground_mask'], 0) + bbox_targets = torch.cat(self._log['bbox_target'], 0) + loss_kpt = self.loss_pose( + flatten_kpts.view(-1, self.num_keypoints, 2)[pos_masks], + kpt_targets, vis_targets, bbox_targets) + loss_vis = self.loss_cls( + vis_preds.view(-1, self.num_keypoints)[pos_masks], + vis_targets) / vis_targets.sum() + else: + loss_kpt = kpt_preds.sum() * 0 + loss_vis = vis_preds.sum() * 0 + + losses.update(dict(loss_kpt=loss_kpt, loss_vis=loss_vis)) + + self._clear() + return losses + + @torch.no_grad() + def _get_targets_single(self, + priors: Tensor, + cls_preds: Tensor, + decoded_bboxes: Tensor, + objectness: Tensor, + gt_instances: InstanceData, + img_meta: dict, + gt_instances_ignore: Optional[InstanceData] = None + ) -> tuple: + """Calculates targets for a single image, and saves them to the log. + + This method is similar to the _get_targets_single method in the base + class, but additionally saves the foreground mask and bbox targets to + the log. + """ + + # Construct a combined representation of bboxes and keypoints to + # ensure keypoints are also involved in the positive sample + # assignment process + kpt = self._log['pred_keypoints'].pop(0).squeeze(0) + kpt_vis = self._log['pred_keypoints_vis'].pop(0).squeeze(0) + kpt = torch.cat((kpt, kpt_vis.unsqueeze(-1)), dim=-1) + decoded_bboxes = torch.cat((decoded_bboxes, kpt.flatten(1)), dim=1) + + targets = super()._get_targets_single(priors, cls_preds, + decoded_bboxes, objectness, + gt_instances, img_meta, + gt_instances_ignore) + self._log['foreground_mask'].append(targets[0]) + self._log['bbox_target'].append(targets[3]) + return targets + + def predict_by_feat(self, + cls_scores: List[Tensor], + bbox_preds: List[Tensor], + objectnesses: Optional[List[Tensor]] = None, + kpt_preds: Optional[List[Tensor]] = None, + vis_preds: Optional[List[Tensor]] = None, + batch_img_metas: Optional[List[dict]] = None, + cfg: Optional[ConfigDict] = None, + rescale: bool = True, + with_nms: bool = True) -> List[InstanceData]: + """Transform a batch of output features extracted by the head into bbox + and keypoint results. + + In addition to the base class method, keypoint predictions are also + calculated in this method. + """ + + # calculate predicted bboxes and get the kept instances indices + with OutputSaveFunctionWrapper( + filter_scores_and_topk, + super().predict_by_feat.__globals__) as outputs_1: + with OutputSaveFunctionWrapper( + batched_nms, + super()._bbox_post_process.__globals__) as outputs_2: + results_list = super().predict_by_feat(cls_scores, bbox_preds, + objectnesses, + batch_img_metas, cfg, + rescale, with_nms) + keep_indices_topk = [out[2] for out in outputs_1] + keep_indices_nms = [out[1] for out in outputs_2] + + num_imgs = len(batch_img_metas) + + # recover keypoints coordinates from model predictions + featmap_sizes = [vis_pred.shape[2:] for vis_pred in vis_preds] + priors = torch.cat(self.mlvl_priors) + strides = [ + priors.new_full((featmap_size.numel() * self.num_base_priors, ), + stride) for featmap_size, stride in zip( + featmap_sizes, self.featmap_strides) + ] + strides = torch.cat(strides) + kpt_preds = torch.cat([ + kpt_pred.permute(0, 2, 3, 1).reshape( + num_imgs, -1, self.num_keypoints * 2) for kpt_pred in kpt_preds + ], + dim=1) + flatten_decoded_kpts = self.decode_pose(priors, kpt_preds, strides) + + vis_preds = torch.cat([ + vis_pred.permute(0, 2, 3, 1).reshape( + num_imgs, -1, self.num_keypoints) for vis_pred in vis_preds + ], + dim=1).sigmoid() + + # select keypoints predictions according to bbox scores and nms result + keep_indices_nms_idx = 0 + for pred_instances, kpts, kpts_vis, img_meta, keep_idxs \ + in zip( + results_list, flatten_decoded_kpts, vis_preds, + batch_img_metas, keep_indices_topk): + + pred_instances.bbox_scores = pred_instances.scores + + if len(pred_instances) == 0: + pred_instances.keypoints = kpts[:0] + pred_instances.keypoint_scores = kpts_vis[:0] + continue + + kpts = kpts[keep_idxs] + kpts_vis = kpts_vis[keep_idxs] + + if rescale: + pad_param = img_meta.get('img_meta', None) + scale_factor = img_meta['scale_factor'] + if pad_param is not None: + kpts -= kpts.new_tensor([pad_param[2], pad_param[0]]) + kpts /= kpts.new_tensor(scale_factor).repeat( + (1, self.num_keypoints, 1)) + + keep_idxs_nms = keep_indices_nms[keep_indices_nms_idx] + kpts = kpts[keep_idxs_nms] + kpts_vis = kpts_vis[keep_idxs_nms] + keep_indices_nms_idx += 1 + + pred_instances.keypoints = kpts + pred_instances.keypoint_scores = kpts_vis + + return results_list + + def predict(self, + x: Tuple[Tensor], + batch_data_samples, + rescale: bool = False): + predictions = [ + pred_instances.numpy() for pred_instances in super().predict( + x, batch_data_samples, rescale) + ] + return predictions + + def decode_pose(self, grids: torch.Tensor, offsets: torch.Tensor, + strides: Union[torch.Tensor, int]) -> torch.Tensor: + """Decode regression offsets to keypoints. + + Args: + grids (torch.Tensor): The coordinates of the feature map grids. + offsets (torch.Tensor): The predicted offset of each keypoint + relative to its corresponding grid. + strides (torch.Tensor | int): The stride of the feature map for + each instance. + + Returns: + torch.Tensor: The decoded keypoints coordinates. + """ + + if isinstance(strides, int): + strides = torch.tensor([strides]).to(offsets) + + strides = strides.reshape(1, -1, 1, 1) + offsets = offsets.reshape(*offsets.shape[:2], -1, 2) + xy_coordinates = (offsets[..., :2] * strides) + grids.unsqueeze(1) + return xy_coordinates + + @staticmethod + def gt_instances_preprocess(batch_gt_instances: List[InstanceData], *args, + **kwargs) -> List[InstanceData]: + return batch_gt_instances diff --git a/pytest.ini b/pytest.ini index cfe19897f5..61651c4fd1 100644 --- a/pytest.ini +++ b/pytest.ini @@ -1,7 +1,7 @@ -[pytest] -addopts = --xdoctest --xdoctest-style=auto -norecursedirs = .git ignore build __pycache__ data docker docs .eggs .mim tests/legacy - -filterwarnings= default - ignore:.*No cfgstr given in Cacher constructor or call.*:Warning - ignore:.*Define the __nice__ method for.*:Warning +[pytest] +addopts = --xdoctest --xdoctest-style=auto +norecursedirs = .git ignore build __pycache__ data docker docs .eggs .mim tests/legacy + +filterwarnings= default + ignore:.*No cfgstr given in Cacher constructor or call.*:Warning + ignore:.*Define the __nice__ method for.*:Warning diff --git a/requirements.txt b/requirements.txt index b5b5d97a6e..4004ff4992 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ --r requirements/build.txt --r requirements/runtime.txt --r requirements/tests.txt --r requirements/optional.txt +-r requirements/build.txt +-r requirements/runtime.txt +-r requirements/tests.txt +-r requirements/optional.txt diff --git a/requirements/albu.txt b/requirements/albu.txt index f421fbbdc4..78e155dac2 100644 --- a/requirements/albu.txt +++ b/requirements/albu.txt @@ -1 +1 @@ -albumentations>=0.3.2 --no-binary qudida,albumentations +albumentations>=0.3.2 --no-binary qudida,albumentations diff --git a/requirements/build.txt b/requirements/build.txt index fb44aadd43..615fc208a0 100644 --- a/requirements/build.txt +++ b/requirements/build.txt @@ -1,3 +1,3 @@ -# These must be installed before building mmpose -numpy -torch>=1.8 +# These must be installed before building mmpose +numpy +torch>=1.8 diff --git a/requirements/docs.txt b/requirements/docs.txt index d278090dbb..179c3b5b28 100644 --- a/requirements/docs.txt +++ b/requirements/docs.txt @@ -1,8 +1,8 @@ -docutils==0.16.0 -markdown -myst-parser --e git+https://github.com/gaotongxiao/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme -sphinx==4.5.0 -sphinx_copybutton -sphinx_markdown_tables -urllib3<2.0.0 +docutils==0.16.0 +markdown +myst-parser +-e git+https://github.com/gaotongxiao/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme +sphinx==4.5.0 +sphinx_copybutton +sphinx_markdown_tables +urllib3<2.0.0 diff --git a/requirements/mminstall.txt b/requirements/mminstall.txt index 30d8402a42..be89b72d0a 100644 --- a/requirements/mminstall.txt +++ b/requirements/mminstall.txt @@ -1,3 +1,3 @@ -mmcv>=2.0.0,<2.1.0 -mmdet>=3.0.0,<3.2.0 -mmengine>=0.4.0,<1.0.0 +mmcv>=2.0.0,<2.1.0 +mmdet>=3.0.0,<3.2.0 +mmengine>=0.4.0,<1.0.0 diff --git a/requirements/optional.txt b/requirements/optional.txt index f2293605cf..bebaa03d14 100644 --- a/requirements/optional.txt +++ b/requirements/optional.txt @@ -1 +1 @@ -requests +requests diff --git a/requirements/poseval.txt b/requirements/poseval.txt index f4d95e1afa..ffa97365fe 100644 --- a/requirements/poseval.txt +++ b/requirements/poseval.txt @@ -1,2 +1,2 @@ -poseval@git+https://github.com/svenkreiss/poseval.git -shapely==1.8.4 +poseval@git+https://github.com/svenkreiss/poseval.git +shapely==1.8.4 diff --git a/requirements/readthedocs.txt b/requirements/readthedocs.txt index 13af2ec22d..432e0b47d7 100644 --- a/requirements/readthedocs.txt +++ b/requirements/readthedocs.txt @@ -1,9 +1,9 @@ -mmcv>=2.0.0rc4 -mmengine>=0.6.0,<1.0.0 -munkres -regex -scipy -titlecase -torch>1.6 -torchvision -xtcocotools>=1.13 +mmcv>=2.0.0rc4 +mmengine>=0.6.0,<1.0.0 +munkres +regex +scipy +titlecase +torch>1.6 +torchvision +xtcocotools>=1.13 diff --git a/requirements/runtime.txt b/requirements/runtime.txt index ab5c0172e4..71fe34fd48 100644 --- a/requirements/runtime.txt +++ b/requirements/runtime.txt @@ -1,10 +1,10 @@ -chumpy -json_tricks -matplotlib -munkres -numpy -opencv-python -pillow -scipy -torchvision -xtcocotools>=1.12 +chumpy +json_tricks +matplotlib +munkres +numpy +opencv-python +pillow +scipy +torchvision +xtcocotools>=1.12 diff --git a/requirements/tests.txt b/requirements/tests.txt index c63bc90822..32f79aef72 100644 --- a/requirements/tests.txt +++ b/requirements/tests.txt @@ -1,9 +1,9 @@ -coverage -flake8 -interrogate -isort==4.3.21 -parameterized -pytest -pytest-runner -xdoctest>=0.10.0 -yapf +coverage +flake8 +interrogate +isort==4.3.21 +parameterized +pytest +pytest-runner +xdoctest>=0.10.0 +yapf diff --git a/run_inference.py b/run_inference.py index 646c3b19ea..9c8efabdd4 100644 --- a/run_inference.py +++ b/run_inference.py @@ -1,63 +1,186 @@ -import os -import os.path as osp -import glob -import argparse -import imageio -import numpy as np - -from mmengine.config import Config - -from mmpose.apis import inference_topdown, init_model -from mmpose.registry import VISUALIZERS -from mmpose.structures import merge_data_samples - -def select_work_dir(work_dir, checkpoint): - print("work_dir:", osp.abspath(work_dir)) - dirs = sorted(os.listdir(work_dir)) - - for i, d in enumerate(dirs, 0): - print("({}) {}".format(i, d)) - d_idx = input("Select directory that you want to load: ") - - path_opt = dirs[int(d_idx)] - chosen_dir = osp.abspath(os.path.join(work_dir, path_opt)) - config_path = glob.glob(osp.join(chosen_dir, '*.py'))[0] - - if checkpoint == 'last': - with open(osp.join(chosen_dir, 'last_checkpoint')) as cf: - pth_path = cf.readline() - else: - with open(osp.join(chosen_dir, 'lest_checkpoint')) as cf: - pth_path = cf.readline() - pth = osp.basename(pth_path) - pth_path = osp.join(chosen_dir, pth) - - print('config_path:', config_path) - print('pth_path:', pth_path) - - return config_path, pth_path - - -if __name__ == '__main__': - work_dir = '../../data/mmpose/work_dirs' - parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) - parser.add_argument('--work_dir', type=str, default=work_dir, - help='specify working directory of trainined model') - parser.add_argument('--checkpoint', type=str, default='last', choices=['last', 'best'], - help='select which chekpoint will be chosen [last|best]') - - args = parser.parse_args() - - config_path, pth_path = select_work_dir(args.work_dir, args.checkpoint) - configname = osp.splitext(osp.basename(config_path))[0] - cfg = Config.fromfile(config_path) - # print(cfg.test_dataloader) - - # # init model and load checkpoint - print(f'Initializing model with {config_path} and {pth_path}') - model = init_model(config_path, pth_path, device='cuda') - - test_dir = osp.join(cfg.test_dataloader.dataset.data_root, cfg.test_dataloader.dataset.data_prefix.img) - # annot_dir = osp.join(cfg.test_dataloader.dataset.data_root, cfg.test_dataloader.dataset.data_prefix.seg_map_path) - save_dir = osp.join(rf'../../data/mmpose/results/{cfg.test_dataloader.dataset.type}', configname) - os.makedirs(save_dir, exist_ok=True) +import os +import os.path as osp +import json +import glob +import argparse +import cv2 +import numpy as np + +from mmengine.config import Config + +from mmpose.apis import inference_topdown, init_model +from mmpose.apis import MMPoseInferencer + + + +def select_work_dir(work_dir, checkpoint): + print("work_dir:", osp.abspath(work_dir)) + dirs = sorted(os.listdir(work_dir)) + + for i, d in enumerate(dirs, 0): + print("({}) {}".format(i, d)) + d_idx = input("Select directory that you want to load: ") + + path_opt = dirs[int(d_idx)] + chosen_dir = osp.abspath(os.path.join(work_dir, path_opt)) + config_path = glob.glob(osp.join(chosen_dir, '*.py'))[0] + + if checkpoint == 'last': + with open(osp.join(chosen_dir, 'last_checkpoint')) as cf: + pth_path = cf.readline() + else: + with open(osp.join(chosen_dir, 'lest_checkpoint')) as cf: + pth_path = cf.readline() + pth = osp.basename(pth_path) + pth_path = osp.join(chosen_dir, pth) + + print('config_path:', config_path) + print('pth_path:', pth_path) + + return config_path, pth_path + +def get_keypoints(keypoint_list): + pt1 = keypoint_list[0:2] + pt2 = keypoint_list[3:5] + return (pt1, pt2) + +if __name__ == '__main__': + work_dir = '../../data/mmpose/work_dirs' + parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('--work_dir', type=str, default=work_dir, + help='specify working directory of trainined model') + parser.add_argument('--checkpoint', type=str, default='last', choices=['last', 'best'], + help='select which chekpoint will be chosen [last|best]') + parser.add_argument('--thr', type=float, default=0.5, + help='the threshold of keypoiont score') + parser.add_argument('--correct_range', type=int, default=15, + help='the range which include the correct prediction') + + args = parser.parse_args() + + pred_color = (255, 0, 0) + gt_color = (0, 255, 0) + + config_path, pth_path = select_work_dir(args.work_dir, args.checkpoint) + configname = osp.splitext(osp.basename(config_path))[0] + cfg = Config.fromfile(config_path) + # print(cfg.test_dataloader) + + # # init model and load checkpoint + print(f'Initializing model with {config_path} and {pth_path}') + # model = init_model(config_path, pth_path, device='cuda') + inferencer = MMPoseInferencer(pose2d=config_path, pose2d_weights=pth_path) + + test_dir = osp.join(cfg.test_dataloader.dataset.data_root, cfg.test_dataloader.dataset.data_prefix.img) + annot_path = osp.join(cfg.test_dataloader.dataset.data_root, cfg.test_dataloader.dataset.ann_file) + save_dir = osp.join(rf'../../data/mmpose/results/{cfg.test_dataloader.dataset.type}', configname) + os.makedirs(save_dir, exist_ok=True) + + with open(annot_path, 'r') as json_file: + annot_data = json.load(json_file) + + image_list = annot_data['images'] + annot_list = annot_data['annotations'] + + annot_dict = {} + for ann in annot_list: + + img_id = ann['image_id'] + + if not img_id in annot_dict: + annot_dict[img_id] = [ann] + else: + annot_dict[img_id].append(ann) + + # print('image_list:', image_list) + for img_data in image_list: + file_name = img_data['file_name'] + img_id = img_data['id'] + + img_path = osp.join(test_dir, file_name) + img_fn = file_name.replace('/', '-') + + img = cv2.imread(img_path, cv2.IMREAD_COLOR) + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + + result_generator = inferencer(img_path) + # inferencer.visualize() + result = next(result_generator) + # print(result_generator) + + predictions = result['predictions'] + + print('**************************') + # print('predictions:', predictions) + + print('>>>>>>>>>>>>>>>>>>>>>>>>>>') + keypoints_pred = [] + keypoints_gt = [] + for preds in predictions: + # print('preds:', preds) + for p in preds: + # print('p', p) + keypoints_pred = p['keypoints'] + keypoint_scores = p['keypoint_scores'] + # print('keypoints:', keypoints_pred) + # print('keypoint_scores:', keypoint_scores) + + keypoint_exist = False + for ks in keypoint_scores: + if ks > 0.5: keypoint_exist = True + + if keypoint_exist: + kp1 = [int(x + 0.5) for x in keypoints_pred[0]] + kp2 = [int(x + 0.5) for x in keypoints_pred[1]] + print('keypoint_scores:', keypoint_scores) + print('predictions:', kp1, kp2) + cv2.circle(img, kp1, 2, pred_color, -1) + cv2.circle(img, kp1, 10, pred_color, 1) + cv2.circle(img, kp2, 2, pred_color, -1) + cv2.circle(img, kp2, 10, pred_color, 1) + keypoints_pred.append((kp1, kp2)) + + + + # for i, ks in enumerate(keypoint_scores): + # if ks > args.thr: + # kp1 = [int(x + 0.5) for x in keypoints_pred[i][0]] + # kp2 = [int(x + 0.5) for x in keypoints_pred[i][1]] + # print('keypoint_scores:', keypoint_scores) + # print('predictions:', kp1, kp2) + # cv2.circle(img, kp1, 2, pred_color, -1) + # cv2.circle(img, kp2, 2, pred_color, -1) + # keypoints_pred.append((kp1, kp2)) + + + if img_id in annot_dict: + annots = annot_dict[img_id] + + for ann in annots: + keypoint_gt = ann['keypoints'] + print(keypoint_gt) + keypoints = get_keypoints(keypoint_gt) + print(keypoints) + keypoints_gt.append(keypoints) + cv2.circle(img, keypoints[0], 1, gt_color, 1) + cv2.circle(img, keypoints[0], 10, gt_color, 1) + cv2.circle(img, keypoints[1], 1, gt_color, 1) + cv2.circle(img, keypoints[1], 10, gt_color, 1) + + + + cv2.imshow('image', img) + cv2.waitKeyEx(1) + print(img_path, img.shape) + + + result_path = osp.join(save_dir, file_name.replace('/', '-')) + print(osp.abspath(result_path)) + cv2.imwrite(result_path, img) + print('----------------------------') + + + + + # if img_id in annot_dict: + # print(annot_dict[img_id] \ No newline at end of file diff --git a/run_metric.py b/run_metric.py new file mode 100644 index 0000000000..c321bc94f3 --- /dev/null +++ b/run_metric.py @@ -0,0 +1,204 @@ +import os +import os.path as osp +import json +import glob +import argparse +import cv2 +import numpy as np + +from mmengine.config import Config + +from mmpose.apis import inference_topdown, init_model +from mmpose.apis import MMPoseInferencer + + + +def select_work_dir(work_dir, checkpoint): + print("work_dir:", osp.abspath(work_dir)) + dirs = sorted(os.listdir(work_dir)) + + for i, d in enumerate(dirs, 0): + print("({}) {}".format(i, d)) + d_idx = input("Select directory that you want to load: ") + + path_opt = dirs[int(d_idx)] + chosen_dir = osp.abspath(os.path.join(work_dir, path_opt)) + config_path = glob.glob(osp.join(chosen_dir, '*.py'))[0] + + if checkpoint == 'last': + with open(osp.join(chosen_dir, 'last_checkpoint')) as cf: + pth_path = cf.readline() + else: + with open(osp.join(chosen_dir, 'lest_checkpoint')) as cf: + pth_path = cf.readline() + pth = osp.basename(pth_path) + pth_path = osp.join(chosen_dir, pth) + + print('config_path:', config_path) + print('pth_path:', pth_path) + + return config_path, pth_path + +def get_keypoints(keypoint_list): + pt1 = keypoint_list[0:2] + pt2 = keypoint_list[3:5] + return (pt1, pt2) + +if __name__ == '__main__': + work_dir = '../../data/mmpose/work_dirs' + parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('--work_dir', type=str, default=work_dir, + help='specify working directory of trainined model') + parser.add_argument('--checkpoint', type=str, default='last', choices=['last', 'best'], + help='select which chekpoint will be chosen [last|best]') + parser.add_argument('--thr', type=float, default=0.5, + help='the threshold of keypoiont score') + parser.add_argument('--correct_range', type=int, default=15, + help='the range which include the correct prediction') + parser.add_argument('--round2flat', action='store_true', + help='convert round to flat view') + + args = parser.parse_args() + + pred_color = (255, 0, 0) + gt_color = (0, 255, 0) + + config_path, pth_path = select_work_dir(args.work_dir, args.checkpoint) + configname = osp.splitext(osp.basename(config_path))[0] + cfg = Config.fromfile(config_path) + # print(cfg.test_dataloader) + + # # init model and load checkpoint + print(f'Initializing model with {config_path} and {pth_path}') + # model = init_model(config_path, pth_path, device='cuda') + inferencer = MMPoseInferencer(pose2d=config_path, pose2d_weights=pth_path) + + test_dir = osp.join(cfg.test_dataloader.dataset.data_root, cfg.test_dataloader.dataset.data_prefix.img) + annot_path = osp.join(cfg.test_dataloader.dataset.data_root, cfg.test_dataloader.dataset.ann_file) + if args.round2flat: + test_dir.replace('round', 'flat') + annot_path.replace('round', 'flat') + + save_dir = osp.join(rf'../../data/mmpose/results/{cfg.test_dataloader.dataset.type}', configname) + os.makedirs(save_dir, exist_ok=True) + + with open(annot_path, 'r') as json_file: + annot_data = json.load(json_file) + + image_list = annot_data['images'] + annot_list = annot_data['annotations'] + + annot_dict = {} + for ann in annot_list: + + img_id = ann['image_id'] + + if not img_id in annot_dict: + annot_dict[img_id] = [ann] + else: + annot_dict[img_id].append(ann) + + # print('image_list:', image_list) + for img_data in image_list: + file_name = img_data['file_name'] + img_id = img_data['id'] + + img_path = osp.join(test_dir, file_name) + img_fn = file_name.replace('/', '-') + + img = cv2.imread(img_path, cv2.IMREAD_COLOR) + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + + result_generator = inferencer(img_path) + # inferencer.visualize() + result = next(result_generator) + # print(result_generator) + + predictions = result['predictions'] + + print('**************************') + # print('predictions:', predictions) + + print('>>>>>>>>>>>>>>>>>>>>>>>>>>') + keypoints_pred = [] + keypoints_gt = [] + for preds in predictions: + # print('preds:', preds) + for p in preds: + # print('p', p) + keypoints_pred = p['keypoints'] + keypoint_scores = p['keypoint_scores'] + # print('keypoints:', keypoints_pred) + # print('keypoint_scores:', keypoint_scores) + + keypoint_exist = False + for ks in keypoint_scores: + if ks > 0.5: keypoint_exist = True + + if keypoint_exist: + kp1 = [int(x + 0.5) for x in keypoints_pred[0]] + kp2 = [int(x + 0.5) for x in keypoints_pred[1]] + print('keypoint_scores:', keypoint_scores) + print('predictions:', kp1, kp2) + cv2.circle(img, kp1, 2, pred_color, -1) + cv2.circle(img, kp1, 10, pred_color, 1) + cv2.circle(img, kp2, 2, pred_color, -1) + cv2.circle(img, kp2, 10, pred_color, 1) + keypoints_pred.append((kp1, kp2)) + + + + # for i, ks in enumerate(keypoint_scores): + # if ks > args.thr: + # kp1 = [int(x + 0.5) for x in keypoints_pred[i][0]] + # kp2 = [int(x + 0.5) for x in keypoints_pred[i][1]] + # print('keypoint_scores:', keypoint_scores) + # print('predictions:', kp1, kp2) + # cv2.circle(img, kp1, 2, pred_color, -1) + # cv2.circle(img, kp2, 2, pred_color, -1) + # keypoints_pred.append((kp1, kp2)) + + + if img_id in annot_dict: + annots = annot_dict[img_id] + + for ann in annots: + keypoint_gt = ann['keypoints'] + print(keypoint_gt) + keypoints = get_keypoints(keypoint_gt) + print(keypoints) + keypoints_gt.append(keypoints) + cv2.circle(img, keypoints[0], 1, gt_color, 1) + cv2.circle(img, keypoints[0], 10, gt_color, 1) + cv2.circle(img, keypoints[1], 1, gt_color, 1) + cv2.circle(img, keypoints[1], 10, gt_color, 1) + + + if args.round2flat: + print('convert keypoint points in round view to flat view') + n_gt = len(keypoints_gt) + n_pred = len(keypoints_pred) + + for kp_pair in keypoints_gt: + gt_interval = sorted([x[0] for x in kp_pair]) + print('gt_interval:', gt_interval) + + for kp_pair in keypoints_pred: + pred_interval = sorted([x[0] for x in kp_pair]) + print('pred_interval:', pred_interval) + + cv2.imshow('image', img) + cv2.waitKeyEx(1) + print(img_path, img.shape) + + + result_path = osp.join(save_dir, file_name.replace('/', '-')) + print(osp.abspath(result_path)) + cv2.imwrite(result_path, img) + print('----------------------------') + + + + + # if img_id in annot_dict: + # print(annot_dict[img_id] \ No newline at end of file diff --git a/run_test.py b/run_test.py index 402b63f75d..61605e5418 100644 --- a/run_test.py +++ b/run_test.py @@ -1,189 +1,189 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import argparse -import os -import os.path as osp -import glob - -import mmengine -from mmengine.config import Config, DictAction -from mmengine.hooks import Hook -from mmengine.runner import Runner - - -def parse_args(work_dir='../../data/mmpose/work_dirs'): - parser = argparse.ArgumentParser( - description='MMPose test (and eval) model') - # parser.add_argument('config', help='test config file path') - parser.add_argument('--checkpoint', - default=None, - type=str, - help='checkpoint file') - - parser.add_argument('--work-dir', type=str, default=work_dir, - help='the directory to save evaluation results') - - parser.add_argument('--load', - type=str, - default='best', - choices=['last', 'best'], - help='select which chekpoint will be chosen [last|best]') - - parser.add_argument('--out', help='the file to save metric results.') - parser.add_argument( - '--dump', - type=str, - help='dump predictions to a pickle file for offline evaluation') - parser.add_argument( - '--cfg-options', - nargs='+', - action=DictAction, - default={}, - help='override some settings in the used config, the key-value pair ' - 'in xxx=yyy format will be merged into config file. For example, ' - "'--cfg-options model.backbone.depth=18 model.backbone.with_cp=True'") - parser.add_argument( - '--show-dir', - help='directory where the visualization images will be saved.') - parser.add_argument( - '--show', - action='store_true', - help='whether to display the prediction results in a window.') - parser.add_argument( - '--interval', - type=int, - default=1, - help='visualize per interval samples.') - parser.add_argument( - '--wait-time', - type=float, - default=1, - help='display time of every window. (second)') - parser.add_argument( - '--launcher', - choices=['none', 'pytorch', 'slurm', 'mpi'], - default='none', - help='job launcher') - parser.add_argument('--local_rank', type=int, default=0) - args = parser.parse_args() - if 'LOCAL_RANK' not in os.environ: - os.environ['LOCAL_RANK'] = str(args.local_rank) - return args - - -def merge_args(cfg, args): - """Merge CLI arguments to config.""" - - cfg.launcher = args.launcher - cfg.load_from = args.checkpoint - - # -------------------- work directory -------------------- - # work_dir is determined in this priority: CLI > segment in file > filename - # if args.work_dir is not None: - # # update configs according to CLI args if args.work_dir is not None - # cfg.work_dir = args.work_dir - # elif cfg.get('work_dir', None) is None: - # # use config filename as default work_dir if cfg.work_dir is None - # cfg.work_dir = osp.join('./work_dirs', - # osp.splitext(osp.basename(args.config))[0]) - - # -------------------- visualization -------------------- - if args.show or (args.show_dir is not None): - assert 'visualization' in cfg.default_hooks, \ - 'PoseVisualizationHook is not set in the ' \ - '`default_hooks` field of config. Please set ' \ - '`visualization=dict(type="PoseVisualizationHook")`' - - cfg.default_hooks.visualization.enable = True - cfg.default_hooks.visualization.show = args.show - if args.show: - cfg.default_hooks.visualization.wait_time = args.wait_time - cfg.default_hooks.visualization.out_dir = args.show_dir - cfg.default_hooks.visualization.interval = args.interval - - # -------------------- Dump predictions -------------------- - if args.dump is not None: - assert args.dump.endswith(('.pkl', '.pickle')), \ - 'The dump file must be a pkl file.' - dump_metric = dict(type='DumpResults', out_file_path=args.dump) - if isinstance(cfg.test_evaluator, (list, tuple)): - cfg.test_evaluator = [*cfg.test_evaluator, dump_metric] - else: - cfg.test_evaluator = [cfg.test_evaluator, dump_metric] - - # -------------------- Other arguments -------------------- - if args.cfg_options is not None: - cfg.merge_from_dict(args.cfg_options) - - return cfg - -def select_work_dir(args): - work_dir = args.work_dir - load = args.load - print("work_dir:", osp.abspath(work_dir)) - dirs = sorted(os.listdir(work_dir)) - - for i, d in enumerate(dirs, 0): - print("({}) {}".format(i, d)) - d_idx = input("Select directory that you want to load: ") - - path_opt = dirs[int(d_idx)] - chosen_dir = osp.abspath(os.path.join(work_dir, path_opt)) - config_path = glob.glob(osp.join(chosen_dir, '*.py'))[0] - - if args.checkpoint is None: - if load == 'last': - with open(osp.join(chosen_dir, 'last_checkpoint')) as cf: - pth_path = cf.readline() - else: - best_pths =glob.glob(osp.join(chosen_dir, 'best*.pth')) - # print(best_pths) - pth_path = best_pths[len(best_pths) - 1] - - pth = osp.basename(pth_path) - pth_path = osp.join(chosen_dir, pth) - - args.checkpoint = pth_path - - # print('config_path:', config_path) - # print('pth_path:', args.checkpoint) - - return config_path, pth_path - - -def main(): - work_base_dir = r'../../data/mmpose/work_dirs' - args = parse_args(work_dir=work_base_dir) - - config_path, checkpoint_path = select_work_dir(args) - configname = osp.splitext(osp.basename(config_path))[0] - - - # load config - # cfg = Config.fromfile(args.config) - cfg = Config.fromfile(config_path) - cfg = merge_args(cfg, args) - - cfg.checkpoint = checkpoint_path - cfg.work_dir = osp.join(work_base_dir, configname) - print('cfg.work_dir:', cfg.work_dir) - - - # build the runner from config - runner = Runner.from_cfg(cfg) - - if args.out: - - class SaveMetricHook(Hook): - - def after_test_epoch(self, _, metrics=None): - if metrics is not None: - mmengine.dump(metrics, args.out) - - runner.register_hook(SaveMetricHook(), 'LOWEST') - - # start testing - runner.test() - - -if __name__ == '__main__': - main() +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os +import os.path as osp +import glob + +import mmengine +from mmengine.config import Config, DictAction +from mmengine.hooks import Hook +from mmengine.runner import Runner + + +def parse_args(work_dir='../../data/mmpose/work_dirs'): + parser = argparse.ArgumentParser( + description='MMPose test (and eval) model') + # parser.add_argument('config', help='test config file path') + parser.add_argument('--checkpoint', + default=None, + type=str, + help='checkpoint file') + + parser.add_argument('--work-dir', type=str, default=work_dir, + help='the directory to save evaluation results') + + parser.add_argument('--load', + type=str, + default='best', + choices=['last', 'best'], + help='select which chekpoint will be chosen [last|best]') + + parser.add_argument('--out', help='the file to save metric results.') + parser.add_argument( + '--dump', + type=str, + help='dump predictions to a pickle file for offline evaluation') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + default={}, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. For example, ' + "'--cfg-options model.backbone.depth=18 model.backbone.with_cp=True'") + parser.add_argument( + '--show-dir', + help='directory where the visualization images will be saved.') + parser.add_argument( + '--show', + action='store_true', + help='whether to display the prediction results in a window.') + parser.add_argument( + '--interval', + type=int, + default=1, + help='visualize per interval samples.') + parser.add_argument( + '--wait-time', + type=float, + default=1, + help='display time of every window. (second)') + parser.add_argument( + '--launcher', + choices=['none', 'pytorch', 'slurm', 'mpi'], + default='none', + help='job launcher') + parser.add_argument('--local_rank', type=int, default=0) + args = parser.parse_args() + if 'LOCAL_RANK' not in os.environ: + os.environ['LOCAL_RANK'] = str(args.local_rank) + return args + + +def merge_args(cfg, args): + """Merge CLI arguments to config.""" + + cfg.launcher = args.launcher + cfg.load_from = args.checkpoint + + # -------------------- work directory -------------------- + # work_dir is determined in this priority: CLI > segment in file > filename + # if args.work_dir is not None: + # # update configs according to CLI args if args.work_dir is not None + # cfg.work_dir = args.work_dir + # elif cfg.get('work_dir', None) is None: + # # use config filename as default work_dir if cfg.work_dir is None + # cfg.work_dir = osp.join('./work_dirs', + # osp.splitext(osp.basename(args.config))[0]) + + # -------------------- visualization -------------------- + if args.show or (args.show_dir is not None): + assert 'visualization' in cfg.default_hooks, \ + 'PoseVisualizationHook is not set in the ' \ + '`default_hooks` field of config. Please set ' \ + '`visualization=dict(type="PoseVisualizationHook")`' + + cfg.default_hooks.visualization.enable = True + cfg.default_hooks.visualization.show = args.show + if args.show: + cfg.default_hooks.visualization.wait_time = args.wait_time + cfg.default_hooks.visualization.out_dir = args.show_dir + cfg.default_hooks.visualization.interval = args.interval + + # -------------------- Dump predictions -------------------- + if args.dump is not None: + assert args.dump.endswith(('.pkl', '.pickle')), \ + 'The dump file must be a pkl file.' + dump_metric = dict(type='DumpResults', out_file_path=args.dump) + if isinstance(cfg.test_evaluator, (list, tuple)): + cfg.test_evaluator = [*cfg.test_evaluator, dump_metric] + else: + cfg.test_evaluator = [cfg.test_evaluator, dump_metric] + + # -------------------- Other arguments -------------------- + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + return cfg + +def select_work_dir(args): + work_dir = args.work_dir + load = args.load + print("work_dir:", osp.abspath(work_dir)) + dirs = sorted(os.listdir(work_dir)) + + for i, d in enumerate(dirs, 0): + print("({}) {}".format(i, d)) + d_idx = input("Select directory that you want to load: ") + + path_opt = dirs[int(d_idx)] + chosen_dir = osp.abspath(os.path.join(work_dir, path_opt)) + config_path = glob.glob(osp.join(chosen_dir, '*.py'))[0] + + if args.checkpoint is None: + if load == 'last': + with open(osp.join(chosen_dir, 'last_checkpoint')) as cf: + pth_path = cf.readline() + else: + best_pths =glob.glob(osp.join(chosen_dir, 'best*.pth')) + # print(best_pths) + pth_path = best_pths[len(best_pths) - 1] + + pth = osp.basename(pth_path) + pth_path = osp.join(chosen_dir, pth) + + args.checkpoint = pth_path + + # print('config_path:', config_path) + # print('pth_path:', args.checkpoint) + + return config_path, pth_path + + +def main(): + work_base_dir = r'../../data/mmpose/work_dirs' + args = parse_args(work_dir=work_base_dir) + + config_path, checkpoint_path = select_work_dir(args) + configname = osp.splitext(osp.basename(config_path))[0] + + + # load config + # cfg = Config.fromfile(args.config) + cfg = Config.fromfile(config_path) + cfg = merge_args(cfg, args) + + cfg.checkpoint = checkpoint_path + cfg.work_dir = osp.join(work_base_dir, configname) + print('cfg.work_dir:', cfg.work_dir) + + + # build the runner from config + runner = Runner.from_cfg(cfg) + + if args.out: + + class SaveMetricHook(Hook): + + def after_test_epoch(self, _, metrics=None): + if metrics is not None: + mmengine.dump(metrics, args.out) + + runner.register_hook(SaveMetricHook(), 'LOWEST') + + # start testing + runner.test() + + +if __name__ == '__main__': + main() diff --git a/run_train.py b/run_train.py index 5cd1ecfa0f..0bdb784e7b 100644 --- a/run_train.py +++ b/run_train.py @@ -1,167 +1,167 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import argparse -import os -import os.path as osp - -from mmengine.config import Config, DictAction -from mmengine.runner import Runner - - -def parse_args(): - parser = argparse.ArgumentParser(description='Train a pose model') - parser.add_argument('config', help='train config file path') - # parser.add_argument('--work-dir', help='the dir to save logs and models') - parser.add_argument( - '--work-dir', - default='../../data/mmpose/work_dirs', - help='the dir to save logs and models') - parser.add_argument( - '--resume', - nargs='?', - type=str, - const='auto', - help='If specify checkpint path, resume from it, while if not ' - 'specify, try to auto resume from the latest checkpoint ' - 'in the work directory.') - parser.add_argument( - '--amp', - action='store_true', - default=False, - help='enable automatic-mixed-precision training') - parser.add_argument( - '--no-validate', - action='store_true', - help='whether not to evaluate the checkpoint during training') - parser.add_argument( - '--auto-scale-lr', - action='store_true', - help='whether to auto scale the learning rate according to the ' - 'actual batch size and the original batch size.') - parser.add_argument( - '--show-dir', - help='directory where the visualization images will be saved.') - parser.add_argument( - '--show', - action='store_true', - help='whether to display the prediction results in a window.') - parser.add_argument( - '--interval', - type=int, - default=1, - help='visualize per interval samples.') - parser.add_argument( - '--wait-time', - type=float, - default=1, - help='display time of every window. (second)') - parser.add_argument( - '--cfg-options', - nargs='+', - action=DictAction, - help='override some settings in the used config, the key-value pair ' - 'in xxx=yyy format will be merged into config file. If the value to ' - 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' - 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' - 'Note that the quotation marks are necessary and that no white space ' - 'is allowed.') - parser.add_argument( - '--launcher', - choices=['none', 'pytorch', 'slurm', 'mpi'], - default='none', - help='job launcher') - # When using PyTorch version >= 2.0.0, the `torch.distributed.launch` - # will pass the `--local-rank` parameter to `tools/train.py` instead - # of `--local_rank`. - parser.add_argument('--local_rank', '--local-rank', type=int, default=0) - args = parser.parse_args() - if 'LOCAL_RANK' not in os.environ: - os.environ['LOCAL_RANK'] = str(args.local_rank) - - return args - - -def merge_args(cfg, args): - """Merge CLI arguments to config.""" - if args.no_validate: - cfg.val_cfg = None - cfg.val_dataloader = None - cfg.val_evaluator = None - - cfg.launcher = args.launcher - - # work_dir is determined in this priority: CLI > segment in file > filename - # if args.work_dir is not None: - # # update configs according to CLI args if args.work_dir is not None - # cfg.work_dir = args.work_dir - # elif cfg.get('work_dir', None) is None: - # # use config filename as default work_dir if cfg.work_dir is None - # cfg.work_dir = osp.join('./work_dirs', - # osp.splitext(osp.basename(args.config))[0]) - config_name = osp.splitext(osp.basename(args.config))[0] - cfg.work_dir = osp.join(args.work_dir, config_name) - - # enable automatic-mixed-precision training - if args.amp is True: - from mmengine.optim import AmpOptimWrapper, OptimWrapper - optim_wrapper = cfg.optim_wrapper.get('type', OptimWrapper) - assert optim_wrapper in (OptimWrapper, AmpOptimWrapper), \ - '`--amp` is not supported custom optimizer wrapper type ' \ - f'`{optim_wrapper}.' - cfg.optim_wrapper.type = 'AmpOptimWrapper' - cfg.optim_wrapper.setdefault('loss_scale', 'dynamic') - - # resume training - if args.resume == 'auto': - cfg.resume = True - cfg.load_from = None - elif args.resume is not None: - cfg.resume = True - cfg.load_from = args.resume - - # enable auto scale learning rate - if args.auto_scale_lr: - cfg.auto_scale_lr.enable = True - - # visualization - if args.show or (args.show_dir is not None): - assert 'visualization' in cfg.default_hooks, \ - 'PoseVisualizationHook is not set in the ' \ - '`default_hooks` field of config. Please set ' \ - '`visualization=dict(type="PoseVisualizationHook")`' - - cfg.default_hooks.visualization.enable = True - cfg.default_hooks.visualization.show = args.show - if args.show: - cfg.default_hooks.visualization.wait_time = args.wait_time - cfg.default_hooks.visualization.out_dir = args.show_dir - cfg.default_hooks.visualization.interval = args.interval - - if args.cfg_options is not None: - cfg.merge_from_dict(args.cfg_options) - - return cfg - - -def main(): - args = parse_args() - - # load config - cfg = Config.fromfile(args.config) - - # merge CLI arguments to config - cfg = merge_args(cfg, args) - - # set preprocess configs to model - if 'preprocess_cfg' in cfg: - cfg.model.setdefault('data_preprocessor', - cfg.get('preprocess_cfg', {})) - - # build the runner from config - runner = Runner.from_cfg(cfg) - - # start training - runner.train() - - -if __name__ == '__main__': - main() +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os +import os.path as osp + +from mmengine.config import Config, DictAction +from mmengine.runner import Runner + + +def parse_args(): + parser = argparse.ArgumentParser(description='Train a pose model') + parser.add_argument('config', help='train config file path') + # parser.add_argument('--work-dir', help='the dir to save logs and models') + parser.add_argument( + '--work-dir', + default='../../data/mmpose/work_dirs', + help='the dir to save logs and models') + parser.add_argument( + '--resume', + nargs='?', + type=str, + const='auto', + help='If specify checkpint path, resume from it, while if not ' + 'specify, try to auto resume from the latest checkpoint ' + 'in the work directory.') + parser.add_argument( + '--amp', + action='store_true', + default=False, + help='enable automatic-mixed-precision training') + parser.add_argument( + '--no-validate', + action='store_true', + help='whether not to evaluate the checkpoint during training') + parser.add_argument( + '--auto-scale-lr', + action='store_true', + help='whether to auto scale the learning rate according to the ' + 'actual batch size and the original batch size.') + parser.add_argument( + '--show-dir', + help='directory where the visualization images will be saved.') + parser.add_argument( + '--show', + action='store_true', + help='whether to display the prediction results in a window.') + parser.add_argument( + '--interval', + type=int, + default=1, + help='visualize per interval samples.') + parser.add_argument( + '--wait-time', + type=float, + default=1, + help='display time of every window. (second)') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + parser.add_argument( + '--launcher', + choices=['none', 'pytorch', 'slurm', 'mpi'], + default='none', + help='job launcher') + # When using PyTorch version >= 2.0.0, the `torch.distributed.launch` + # will pass the `--local-rank` parameter to `tools/train.py` instead + # of `--local_rank`. + parser.add_argument('--local_rank', '--local-rank', type=int, default=0) + args = parser.parse_args() + if 'LOCAL_RANK' not in os.environ: + os.environ['LOCAL_RANK'] = str(args.local_rank) + + return args + + +def merge_args(cfg, args): + """Merge CLI arguments to config.""" + if args.no_validate: + cfg.val_cfg = None + cfg.val_dataloader = None + cfg.val_evaluator = None + + cfg.launcher = args.launcher + + # work_dir is determined in this priority: CLI > segment in file > filename + # if args.work_dir is not None: + # # update configs according to CLI args if args.work_dir is not None + # cfg.work_dir = args.work_dir + # elif cfg.get('work_dir', None) is None: + # # use config filename as default work_dir if cfg.work_dir is None + # cfg.work_dir = osp.join('./work_dirs', + # osp.splitext(osp.basename(args.config))[0]) + config_name = osp.splitext(osp.basename(args.config))[0] + cfg.work_dir = osp.join(args.work_dir, config_name) + + # enable automatic-mixed-precision training + if args.amp is True: + from mmengine.optim import AmpOptimWrapper, OptimWrapper + optim_wrapper = cfg.optim_wrapper.get('type', OptimWrapper) + assert optim_wrapper in (OptimWrapper, AmpOptimWrapper), \ + '`--amp` is not supported custom optimizer wrapper type ' \ + f'`{optim_wrapper}.' + cfg.optim_wrapper.type = 'AmpOptimWrapper' + cfg.optim_wrapper.setdefault('loss_scale', 'dynamic') + + # resume training + if args.resume == 'auto': + cfg.resume = True + cfg.load_from = None + elif args.resume is not None: + cfg.resume = True + cfg.load_from = args.resume + + # enable auto scale learning rate + if args.auto_scale_lr: + cfg.auto_scale_lr.enable = True + + # visualization + if args.show or (args.show_dir is not None): + assert 'visualization' in cfg.default_hooks, \ + 'PoseVisualizationHook is not set in the ' \ + '`default_hooks` field of config. Please set ' \ + '`visualization=dict(type="PoseVisualizationHook")`' + + cfg.default_hooks.visualization.enable = True + cfg.default_hooks.visualization.show = args.show + if args.show: + cfg.default_hooks.visualization.wait_time = args.wait_time + cfg.default_hooks.visualization.out_dir = args.show_dir + cfg.default_hooks.visualization.interval = args.interval + + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + return cfg + + +def main(): + args = parse_args() + + # load config + cfg = Config.fromfile(args.config) + + # merge CLI arguments to config + cfg = merge_args(cfg, args) + + # set preprocess configs to model + if 'preprocess_cfg' in cfg: + cfg.model.setdefault('data_preprocessor', + cfg.get('preprocess_cfg', {})) + + # build the runner from config + runner = Runner.from_cfg(cfg) + + # start training + runner.train() + + +if __name__ == '__main__': + main() diff --git a/setup.cfg b/setup.cfg index e3a37d1b6d..f2c8a7f9f0 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,26 +1,26 @@ -[bdist_wheel] -universal=1 - -[aliases] -test=pytest - -[yapf] -based_on_style = pep8 -blank_line_before_nested_class_or_def = true -split_before_expression_after_opening_paren = true -split_penalty_import_names=0 -SPLIT_PENALTY_AFTER_OPENING_BRACKET=800 - -[isort] -line_length = 79 -multi_line_output = 0 -extra_standard_library = pkg_resources,setuptools -known_first_party = mmpose -known_third_party = PIL,cv2,h5py,json_tricks,matplotlib,mmcv,munkres,numpy,pytest,pytorch_sphinx_theme,requests,scipy,seaborn,spacepy,titlecase,torch,torchvision,webcam_apis,xmltodict,xtcocotools -no_lines_before = STDLIB,LOCALFOLDER -default_section = THIRDPARTY - -[flake8] -per-file-ignores = - mmpose/configs/*: F401,F403,F405 - projects/*/configs/*: F401,F403,F405 +[bdist_wheel] +universal=1 + +[aliases] +test=pytest + +[yapf] +based_on_style = pep8 +blank_line_before_nested_class_or_def = true +split_before_expression_after_opening_paren = true +split_penalty_import_names=0 +SPLIT_PENALTY_AFTER_OPENING_BRACKET=800 + +[isort] +line_length = 79 +multi_line_output = 0 +extra_standard_library = pkg_resources,setuptools +known_first_party = mmpose +known_third_party = PIL,cv2,h5py,json_tricks,matplotlib,mmcv,munkres,numpy,pytest,pytorch_sphinx_theme,requests,scipy,seaborn,spacepy,titlecase,torch,torchvision,webcam_apis,xmltodict,xtcocotools +no_lines_before = STDLIB,LOCALFOLDER +default_section = THIRDPARTY + +[flake8] +per-file-ignores = + mmpose/configs/*: F401,F403,F405 + projects/*/configs/*: F401,F403,F405 diff --git a/setup.py b/setup.py index 8b3265fb70..fbb69d325c 100644 --- a/setup.py +++ b/setup.py @@ -1,211 +1,211 @@ -import os -import os.path as osp -import platform -import shutil -import sys -import warnings -from setuptools import find_packages, setup - -try: - import google.colab # noqa - ON_COLAB = True -except ImportError: - ON_COLAB = False - - -def readme(): - with open('README.md', encoding='utf-8') as f: - content = f.read() - return content - - -version_file = 'mmpose/version.py' - - -def get_version(): - with open(version_file, 'r') as f: - exec(compile(f.read(), version_file, 'exec')) - import sys - - # return short version for sdist - if 'sdist' in sys.argv or 'bdist_wheel' in sys.argv: - return locals()['short_version'] - else: - return locals()['__version__'] - - -def parse_requirements(fname='requirements.txt', with_version=True): - """Parse the package dependencies listed in a requirements file but strips - specific versioning information. - - Args: - fname (str): path to requirements file - with_version (bool, default=False): if True include version specs - - Returns: - List[str]: list of requirements items - - CommandLine: - python -c "import setup; print(setup.parse_requirements())" - """ - import re - import sys - from os.path import exists - require_fpath = fname - - def parse_line(line): - """Parse information from a line in a requirements text file.""" - if line.startswith('-r '): - # Allow specifying requirements in other files - target = line.split(' ')[1] - for info in parse_require_file(target): - yield info - else: - info = {'line': line} - if line.startswith('-e '): - info['package'] = line.split('#egg=')[1] - elif '@git+' in line: - info['package'] = line - else: - # Remove versioning from the package - pat = '(' + '|'.join(['>=', '==', '>']) + ')' - parts = re.split(pat, line, maxsplit=1) - parts = [p.strip() for p in parts] - - info['package'] = parts[0] - if len(parts) > 1: - op, rest = parts[1:] - if ';' in rest: - # Handle platform specific dependencies - # http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies - version, platform_deps = map(str.strip, - rest.split(';')) - info['platform_deps'] = platform_deps - else: - version = rest # NOQA - info['version'] = (op, version) - - if ON_COLAB and info['package'] == 'xtcocotools': - # Due to an incompatibility between the Colab platform and the - # pre-built xtcocotools PyPI package, it is necessary to - # compile xtcocotools from source on Colab. - info = dict( - line=info['line'], - package='xtcocotools@' - 'git+https://github.com/jin-s13/xtcocoapi') - - yield info - - def parse_require_file(fpath): - with open(fpath, 'r') as f: - for line in f.readlines(): - line = line.strip() - if line and not line.startswith('#'): - for info in parse_line(line): - yield info - - def gen_packages_items(): - if exists(require_fpath): - for info in parse_require_file(require_fpath): - parts = [info['package']] - if with_version and 'version' in info: - parts.extend(info['version']) - if not sys.version.startswith('3.4'): - # apparently package_deps are broken in 3.4 - platform_deps = info.get('platform_deps') - if platform_deps is not None: - parts.append(';' + platform_deps) - item = ''.join(parts) - yield item - - packages = list(gen_packages_items()) - return packages - - -def add_mim_extension(): - """Add extra files that are required to support MIM into the package. - - These files will be added by creating a symlink to the originals if the - package is installed in `editable` mode (e.g. pip install -e .), or by - copying from the originals otherwise. - """ - - # parse installment mode - if 'develop' in sys.argv: - # installed by `pip install -e .` - if platform.system() == 'Windows': - mode = 'copy' - else: - mode = 'symlink' - elif 'sdist' in sys.argv or 'bdist_wheel' in sys.argv: - # installed by `pip install .` - # or create source distribution by `python setup.py sdist` - mode = 'copy' - else: - return - - filenames = [ - 'tools', 'configs', 'demo', 'model-index.yml', 'dataset-index.yml' - ] - repo_path = osp.dirname(__file__) - mim_path = osp.join(repo_path, 'mmpose', '.mim') - os.makedirs(mim_path, exist_ok=True) - - for filename in filenames: - if osp.exists(filename): - src_path = osp.join(repo_path, filename) - tar_path = osp.join(mim_path, filename) - - if osp.isfile(tar_path) or osp.islink(tar_path): - os.remove(tar_path) - elif osp.isdir(tar_path): - shutil.rmtree(tar_path) - - if mode == 'symlink': - src_relpath = osp.relpath(src_path, osp.dirname(tar_path)) - os.symlink(src_relpath, tar_path) - elif mode == 'copy': - if osp.isfile(src_path): - shutil.copyfile(src_path, tar_path) - elif osp.isdir(src_path): - shutil.copytree(src_path, tar_path) - else: - warnings.warn(f'Cannot copy file {src_path}.') - else: - raise ValueError(f'Invalid mode {mode}') - - -if __name__ == '__main__': - add_mim_extension() - setup( - name='mmpose', - version=get_version(), - description='OpenMMLab Pose Estimation Toolbox and Benchmark.', - author='MMPose Contributors', - author_email='openmmlab@gmail.com', - keywords='computer vision, pose estimation', - long_description=readme(), - long_description_content_type='text/markdown', - packages=find_packages(exclude=('configs', 'tools', 'demo')), - include_package_data=True, - package_data={'mmpose.ops': ['*/*.so']}, - classifiers=[ - 'Development Status :: 4 - Beta', - 'License :: OSI Approved :: Apache Software License', - 'Operating System :: OS Independent', - 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.7', - 'Programming Language :: Python :: 3.8', - 'Programming Language :: Python :: 3.9', - ], - url='https://github.com/open-mmlab/mmpose', - license='Apache License 2.0', - python_requires='>=3.7', - install_requires=parse_requirements('requirements/runtime.txt'), - extras_require={ - 'all': parse_requirements('requirements.txt'), - 'tests': parse_requirements('requirements/tests.txt'), - 'optional': parse_requirements('requirements/optional.txt'), - 'mim': parse_requirements('requirements/mminstall.txt'), - }, - zip_safe=False) +import os +import os.path as osp +import platform +import shutil +import sys +import warnings +from setuptools import find_packages, setup + +try: + import google.colab # noqa + ON_COLAB = True +except ImportError: + ON_COLAB = False + + +def readme(): + with open('README.md', encoding='utf-8') as f: + content = f.read() + return content + + +version_file = 'mmpose/version.py' + + +def get_version(): + with open(version_file, 'r') as f: + exec(compile(f.read(), version_file, 'exec')) + import sys + + # return short version for sdist + if 'sdist' in sys.argv or 'bdist_wheel' in sys.argv: + return locals()['short_version'] + else: + return locals()['__version__'] + + +def parse_requirements(fname='requirements.txt', with_version=True): + """Parse the package dependencies listed in a requirements file but strips + specific versioning information. + + Args: + fname (str): path to requirements file + with_version (bool, default=False): if True include version specs + + Returns: + List[str]: list of requirements items + + CommandLine: + python -c "import setup; print(setup.parse_requirements())" + """ + import re + import sys + from os.path import exists + require_fpath = fname + + def parse_line(line): + """Parse information from a line in a requirements text file.""" + if line.startswith('-r '): + # Allow specifying requirements in other files + target = line.split(' ')[1] + for info in parse_require_file(target): + yield info + else: + info = {'line': line} + if line.startswith('-e '): + info['package'] = line.split('#egg=')[1] + elif '@git+' in line: + info['package'] = line + else: + # Remove versioning from the package + pat = '(' + '|'.join(['>=', '==', '>']) + ')' + parts = re.split(pat, line, maxsplit=1) + parts = [p.strip() for p in parts] + + info['package'] = parts[0] + if len(parts) > 1: + op, rest = parts[1:] + if ';' in rest: + # Handle platform specific dependencies + # http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies + version, platform_deps = map(str.strip, + rest.split(';')) + info['platform_deps'] = platform_deps + else: + version = rest # NOQA + info['version'] = (op, version) + + if ON_COLAB and info['package'] == 'xtcocotools': + # Due to an incompatibility between the Colab platform and the + # pre-built xtcocotools PyPI package, it is necessary to + # compile xtcocotools from source on Colab. + info = dict( + line=info['line'], + package='xtcocotools@' + 'git+https://github.com/jin-s13/xtcocoapi') + + yield info + + def parse_require_file(fpath): + with open(fpath, 'r') as f: + for line in f.readlines(): + line = line.strip() + if line and not line.startswith('#'): + for info in parse_line(line): + yield info + + def gen_packages_items(): + if exists(require_fpath): + for info in parse_require_file(require_fpath): + parts = [info['package']] + if with_version and 'version' in info: + parts.extend(info['version']) + if not sys.version.startswith('3.4'): + # apparently package_deps are broken in 3.4 + platform_deps = info.get('platform_deps') + if platform_deps is not None: + parts.append(';' + platform_deps) + item = ''.join(parts) + yield item + + packages = list(gen_packages_items()) + return packages + + +def add_mim_extension(): + """Add extra files that are required to support MIM into the package. + + These files will be added by creating a symlink to the originals if the + package is installed in `editable` mode (e.g. pip install -e .), or by + copying from the originals otherwise. + """ + + # parse installment mode + if 'develop' in sys.argv: + # installed by `pip install -e .` + if platform.system() == 'Windows': + mode = 'copy' + else: + mode = 'symlink' + elif 'sdist' in sys.argv or 'bdist_wheel' in sys.argv: + # installed by `pip install .` + # or create source distribution by `python setup.py sdist` + mode = 'copy' + else: + return + + filenames = [ + 'tools', 'configs', 'demo', 'model-index.yml', 'dataset-index.yml' + ] + repo_path = osp.dirname(__file__) + mim_path = osp.join(repo_path, 'mmpose', '.mim') + os.makedirs(mim_path, exist_ok=True) + + for filename in filenames: + if osp.exists(filename): + src_path = osp.join(repo_path, filename) + tar_path = osp.join(mim_path, filename) + + if osp.isfile(tar_path) or osp.islink(tar_path): + os.remove(tar_path) + elif osp.isdir(tar_path): + shutil.rmtree(tar_path) + + if mode == 'symlink': + src_relpath = osp.relpath(src_path, osp.dirname(tar_path)) + os.symlink(src_relpath, tar_path) + elif mode == 'copy': + if osp.isfile(src_path): + shutil.copyfile(src_path, tar_path) + elif osp.isdir(src_path): + shutil.copytree(src_path, tar_path) + else: + warnings.warn(f'Cannot copy file {src_path}.') + else: + raise ValueError(f'Invalid mode {mode}') + + +if __name__ == '__main__': + add_mim_extension() + setup( + name='mmpose', + version=get_version(), + description='OpenMMLab Pose Estimation Toolbox and Benchmark.', + author='MMPose Contributors', + author_email='openmmlab@gmail.com', + keywords='computer vision, pose estimation', + long_description=readme(), + long_description_content_type='text/markdown', + packages=find_packages(exclude=('configs', 'tools', 'demo')), + include_package_data=True, + package_data={'mmpose.ops': ['*/*.so']}, + classifiers=[ + 'Development Status :: 4 - Beta', + 'License :: OSI Approved :: Apache Software License', + 'Operating System :: OS Independent', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', + ], + url='https://github.com/open-mmlab/mmpose', + license='Apache License 2.0', + python_requires='>=3.7', + install_requires=parse_requirements('requirements/runtime.txt'), + extras_require={ + 'all': parse_requirements('requirements.txt'), + 'tests': parse_requirements('requirements/tests.txt'), + 'optional': parse_requirements('requirements/optional.txt'), + 'mim': parse_requirements('requirements/mminstall.txt'), + }, + zip_safe=False) diff --git a/td-hm_hrnet-w48_8xb32-210e_coco-256x192.py b/td-hm_hrnet-w48_8xb32-210e_coco-256x192.py new file mode 100644 index 0000000000..cec1c810b2 --- /dev/null +++ b/td-hm_hrnet-w48_8xb32-210e_coco-256x192.py @@ -0,0 +1,280 @@ +auto_scale_lr = dict(base_batch_size=512) +backend_args = dict(backend='local') +codec = dict( + heatmap_size=( + 48, + 64, + ), + input_size=( + 192, + 256, + ), + sigma=2, + type='MSRAHeatmap') +custom_hooks = [ + dict(type='SyncBuffersHook'), +] +data_mode = 'topdown' +data_root = 'data/coco/' +dataset_type = 'CocoDataset' +default_hooks = dict( + checkpoint=dict( + interval=10, + rule='greater', + save_best='coco/AP', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict(type='ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(enable=False, type='PoseVisualizationHook')) +default_scope = 'mmpose' +env_cfg = dict( + cudnn_benchmark=False, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +load_from = None +log_level = 'INFO' +log_processor = dict( + by_epoch=True, num_digits=6, type='LogProcessor', window_size=50) +model = dict( + backbone=dict( + extra=dict( + stage1=dict( + block='BOTTLENECK', + num_blocks=(4, ), + num_branches=1, + num_channels=(64, ), + num_modules=1), + stage2=dict( + block='BASIC', + num_blocks=( + 4, + 4, + ), + num_branches=2, + num_channels=( + 48, + 96, + ), + num_modules=1), + stage3=dict( + block='BASIC', + num_blocks=( + 4, + 4, + 4, + ), + num_branches=3, + num_channels=( + 48, + 96, + 192, + ), + num_modules=4), + stage4=dict( + block='BASIC', + num_blocks=( + 4, + 4, + 4, + 4, + ), + num_branches=4, + num_channels=( + 48, + 96, + 192, + 384, + ), + num_modules=3)), + in_channels=3, + init_cfg=dict( + checkpoint= + 'https://download.openmmlab.com/mmpose/pretrain_models/hrnet_w48-8ef0771d.pth', + type='Pretrained'), + type='HRNet'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 123.675, + 116.28, + 103.53, + ], + std=[ + 58.395, + 57.12, + 57.375, + ], + type='PoseDataPreprocessor'), + head=dict( + decoder=dict( + heatmap_size=( + 48, + 64, + ), + input_size=( + 192, + 256, + ), + sigma=2, + type='MSRAHeatmap'), + deconv_out_channels=None, + in_channels=48, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + out_channels=17, + type='HeatmapHead'), + test_cfg=dict(flip_mode='heatmap', flip_test=True, shift_heatmap=True), + type='TopdownPoseEstimator') +optim_wrapper = dict(optimizer=dict(lr=0.0005, type='Adam')) +param_scheduler = [ + dict( + begin=0, by_epoch=False, end=500, start_factor=0.001, type='LinearLR'), + dict( + begin=0, + by_epoch=True, + end=210, + gamma=0.1, + milestones=[ + 170, + 200, + ], + type='MultiStepLR'), +] +resume = False +test_cfg = dict() +test_dataloader = dict( + batch_size=32, + dataset=dict( + ann_file='annotations/person_keypoints_val2017.json', + bbox_file= + 'data/coco/person_detection_results/COCO_val2017_detections_AP_H_56_person.json', + data_mode='topdown', + data_prefix=dict(img='val2017/'), + data_root='data/coco/', + pipeline=[ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(input_size=( + 192, + 256, + ), type='TopdownAffine'), + dict(type='PackPoseInputs'), + ], + test_mode=True, + type='CocoDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + sampler=dict(round_up=False, shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/annotations/person_keypoints_val2017.json', + type='CocoMetric') +train_cfg = dict(by_epoch=True, max_epochs=210, val_interval=10) +train_dataloader = dict( + batch_size=32, + dataset=dict( + ann_file='annotations/person_keypoints_train2017.json', + data_mode='topdown', + data_prefix=dict(img='train2017/'), + data_root='data/coco/', + pipeline=[ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(direction='horizontal', type='RandomFlip'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(input_size=( + 192, + 256, + ), type='TopdownAffine'), + dict( + encoder=dict( + heatmap_size=( + 48, + 64, + ), + input_size=( + 192, + 256, + ), + sigma=2, + type='MSRAHeatmap'), + type='GenerateTarget'), + dict(type='PackPoseInputs'), + ], + type='CocoDataset'), + num_workers=2, + persistent_workers=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(direction='horizontal', type='RandomFlip'), + dict(type='RandomHalfBody'), + dict(type='RandomBBoxTransform'), + dict(input_size=( + 192, + 256, + ), type='TopdownAffine'), + dict( + encoder=dict( + heatmap_size=( + 48, + 64, + ), + input_size=( + 192, + 256, + ), + sigma=2, + type='MSRAHeatmap'), + type='GenerateTarget'), + dict(type='PackPoseInputs'), +] +val_cfg = dict() +val_dataloader = dict( + batch_size=32, + dataset=dict( + ann_file='annotations/person_keypoints_val2017.json', + bbox_file= + 'data/coco/person_detection_results/COCO_val2017_detections_AP_H_56_person.json', + data_mode='topdown', + data_prefix=dict(img='val2017/'), + data_root='data/coco/', + pipeline=[ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(input_size=( + 192, + 256, + ), type='TopdownAffine'), + dict(type='PackPoseInputs'), + ], + test_mode=True, + type='CocoDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + sampler=dict(round_up=False, shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/annotations/person_keypoints_val2017.json', + type='CocoMetric') +val_pipeline = [ + dict(type='LoadImage'), + dict(type='GetBBoxCenterScale'), + dict(input_size=( + 192, + 256, + ), type='TopdownAffine'), + dict(type='PackPoseInputs'), +] +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='PoseLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) diff --git a/tests/data/300w/test_300w.json b/tests/data/300w/test_300w.json index e825300a57..53d2d387b6 100644 --- a/tests/data/300w/test_300w.json +++ b/tests/data/300w/test_300w.json @@ -1,477 +1,477 @@ -{ - "categories": [ - { - "supercategory": "person", - "id": 1, - "name": "face", - "keypoints": [], - "skeleton": [] - } - ], - "images": [ - { - "id": 197, - "file_name": "indoor_029.png", - "height": 845, - "width": 960 - }, - { - "id": 565, - "file_name": "indoor_020.png", - "height": 768, - "width": 726 - } - ], - "annotations": [ - { - "image_id": 197, - "id": 197, - "keypoints": [ - 268.0, - 398.882, - 1, - 285.21, - 470.547, - 1, - 303.994, - 540.61, - 1, - 332.8, - 611.274, - 1, - 376.829, - 659.993, - 1, - 428.904, - 701.529, - 1, - 493.765, - 726.48, - 1, - 566.941, - 741.209, - 1, - 615.5, - 733.248, - 1, - 660.628, - 711.888, - 1, - 693.575, - 666.8610000000001, - 1, - 707.9989999999998, - 602.151, - 1, - 710.0010000000002, - 540.7090000000002, - 1, - 710.702, - 482.586, - 1, - 705.705, - 430.128, - 1, - 698.574, - 376.051, - 1, - 687.17, - 325.797, - 1, - 335.426, - 370.217, - 1, - 352.01, - 339.706, - 1, - 400.98800000000006, - 317.285, - 1, - 449.164, - 310.243, - 1, - 493.34, - 314.9120000000001, - 1, - 548.874, - 304.259, - 1, - 572.625, - 284.111, - 1, - 609.946, - 265.0, - 1, - 650.465, - 269.886, - 1, - 672.5269999999998, - 287.694, - 1, - 531.823, - 349.5019999999999, - 1, - 543.992, - 387.47, - 1, - 557.0459999999998, - 425.639, - 1, - 570.283, - 465.089, - 1, - 521.077, - 509.142, - 1, - 543.5830000000002, - 511.647, - 1, - 569.154, - 510.935, - 1, - 589.758, - 504.75, - 1, - 607.544, - 494.626, - 1, - 372.146, - 389.57, - 1, - 399.878, - 370.642, - 1, - 431.883, - 359.838, - 1, - 465.725, - 371.503, - 1, - 437.99, - 384.279, - 1, - 406.296, - 393.511, - 1, - 571.331, - 349.968, - 1, - 599.158, - 324.208, - 1, - 630.259, - 318.067, - 1, - 656.076, - 327.782, - 1, - 635.32, - 340.57199999999995, - 1, - 607.295, - 346.391, - 1, - 479.066, - 604.947, - 1, - 519.818, - 577.8, - 1, - 547.948, - 566.137, - 1, - 572.52, - 568.232, - 1, - 594.948, - 556.586, - 1, - 621.335, - 562.737, - 1, - 653.6, - 571.3580000000002, - 1, - 623.72, - 596.32, - 1, - 606.549, - 604.577, - 1, - 578.673, - 606.798, - 1, - 554.4830000000002, - 609.318, - 1, - 525.276, - 609.497, - 1, - 494.741, - 601.097, - 1, - 549.953, - 585.0319999999998, - 1, - 573.969, - 584.442, - 1, - 599.372, - 575.65, - 1, - 640.35, - 573.788, - 1, - 599.372, - 575.65, - 1, - 573.969, - 584.442, - 1, - 549.953, - 585.0319999999998, - 1 - ], - "num_keypoints": 68, - "bbox": [ - 223.7298, - 217.3791, - 531.2424000000001, - 571.4508 - ], - "iscrowd": 0, - "area": 303578.89447392005, - "category_id": 1, - "center": [ - 489.5, - 503.5 - ], - "scale": 2.385 - }, - { - "image_id": 565, - "id": 565, - "keypoints": [ - 70.0, - 292.332, - 1, - 85.978, - 359.108, - 1, - 106.67, - 442.2480000000001, - 1, - 132.174, - 524.227, - 1, - 170.87900000000005, - 587.591, - 1, - 220.419, - 640.665, - 1, - 275.329, - 686.7510000000002, - 1, - 345.149, - 712.11, - 1, - 415.072, - 700.013, - 1, - 455.739, - 681.039, - 1, - 491.441, - 646.908, - 1, - 522.22, - 601.67, - 1, - 545.278, - 556.815, - 1, - 570.101, - 495.899, - 1, - 588.304, - 413.976, - 1, - 595.136, - 343.6280000000001, - 1, - 590.716, - 280.211, - 1, - 118.878, - 305.308, - 1, - 158.248, - 281.872, - 1, - 202.699, - 284.469, - 1, - 246.669, - 294.941, - 1, - 294.485, - 316.657, - 1, - 387.621, - 306.5490000000001, - 1, - 437.315, - 274.369, - 1, - 483.305, - 246.679, - 1, - 531.807, - 219.0, - 1, - 574.753, - 226.314, - 1, - 350.492, - 372.72, - 1, - 354.8180000000001, - 422.627, - 1, - 358.916, - 467.076, - 1, - 364.204, - 508.283, - 1, - 303.536, - 510.181, - 1, - 332.565, - 524.2280000000002, - 1, - 361.282, - 537.337, - 1, - 385.853, - 530.722, - 1, - 410.586, - 512.7090000000002, - 1, - 171.577, - 361.551, - 1, - 203.614, - 344.588, - 1, - 246.448, - 345.9380000000001, - 1, - 288.441, - 368.74300000000005, - 1, - 246.677, - 376.513, - 1, - 202.377, - 382.091, - 1, - 411.996, - 361.712, - 1, - 445.408, - 332.093, - 1, - 485.232, - 319.01, - 1, - 518.47, - 328.7990000000001, - 1, - 492.908, - 360.212, - 1, - 447.886, - 364.719, - 1, - 256.704, - 564.955, - 1, - 306.255, - 569.807, - 1, - 333.68, - 566.9019999999998, - 1, - 360.689, - 571.737, - 1, - 391.088, - 565.381, - 1, - 426.92, - 559.18, - 1, - 476.772, - 545.14, - 1, - 429.904, - 606.391, - 1, - 398.257, - 628.13, - 1, - 361.86, - 635.736, - 1, - 332.214, - 631.4259999999998, - 1, - 300.871, - 615.508, - 1, - 271.028, - 570.685, - 1, - 333.764, - 594.513, - 1, - 360.897, - 597.525, - 1, - 391.282, - 594.612, - 1, - 467.095, - 554.192, - 1, - 391.282, - 594.612, - 1, - 360.897, - 597.525, - 1, - 333.764, - 594.513, - 1 - ], - "num_keypoints": 68, - "bbox": [ - 17.486400000000003, - 169.689, - 630.1632, - 591.7320000000001 - ], - "iscrowd": 0, - "area": 372887.7306624, - "category_id": 1, - "center": [ - 333.0, - 466.0 - ], - "scale": 2.63 - } - ] +{ + "categories": [ + { + "supercategory": "person", + "id": 1, + "name": "face", + "keypoints": [], + "skeleton": [] + } + ], + "images": [ + { + "id": 197, + "file_name": "indoor_029.png", + "height": 845, + "width": 960 + }, + { + "id": 565, + "file_name": "indoor_020.png", + "height": 768, + "width": 726 + } + ], + "annotations": [ + { + "image_id": 197, + "id": 197, + "keypoints": [ + 268.0, + 398.882, + 1, + 285.21, + 470.547, + 1, + 303.994, + 540.61, + 1, + 332.8, + 611.274, + 1, + 376.829, + 659.993, + 1, + 428.904, + 701.529, + 1, + 493.765, + 726.48, + 1, + 566.941, + 741.209, + 1, + 615.5, + 733.248, + 1, + 660.628, + 711.888, + 1, + 693.575, + 666.8610000000001, + 1, + 707.9989999999998, + 602.151, + 1, + 710.0010000000002, + 540.7090000000002, + 1, + 710.702, + 482.586, + 1, + 705.705, + 430.128, + 1, + 698.574, + 376.051, + 1, + 687.17, + 325.797, + 1, + 335.426, + 370.217, + 1, + 352.01, + 339.706, + 1, + 400.98800000000006, + 317.285, + 1, + 449.164, + 310.243, + 1, + 493.34, + 314.9120000000001, + 1, + 548.874, + 304.259, + 1, + 572.625, + 284.111, + 1, + 609.946, + 265.0, + 1, + 650.465, + 269.886, + 1, + 672.5269999999998, + 287.694, + 1, + 531.823, + 349.5019999999999, + 1, + 543.992, + 387.47, + 1, + 557.0459999999998, + 425.639, + 1, + 570.283, + 465.089, + 1, + 521.077, + 509.142, + 1, + 543.5830000000002, + 511.647, + 1, + 569.154, + 510.935, + 1, + 589.758, + 504.75, + 1, + 607.544, + 494.626, + 1, + 372.146, + 389.57, + 1, + 399.878, + 370.642, + 1, + 431.883, + 359.838, + 1, + 465.725, + 371.503, + 1, + 437.99, + 384.279, + 1, + 406.296, + 393.511, + 1, + 571.331, + 349.968, + 1, + 599.158, + 324.208, + 1, + 630.259, + 318.067, + 1, + 656.076, + 327.782, + 1, + 635.32, + 340.57199999999995, + 1, + 607.295, + 346.391, + 1, + 479.066, + 604.947, + 1, + 519.818, + 577.8, + 1, + 547.948, + 566.137, + 1, + 572.52, + 568.232, + 1, + 594.948, + 556.586, + 1, + 621.335, + 562.737, + 1, + 653.6, + 571.3580000000002, + 1, + 623.72, + 596.32, + 1, + 606.549, + 604.577, + 1, + 578.673, + 606.798, + 1, + 554.4830000000002, + 609.318, + 1, + 525.276, + 609.497, + 1, + 494.741, + 601.097, + 1, + 549.953, + 585.0319999999998, + 1, + 573.969, + 584.442, + 1, + 599.372, + 575.65, + 1, + 640.35, + 573.788, + 1, + 599.372, + 575.65, + 1, + 573.969, + 584.442, + 1, + 549.953, + 585.0319999999998, + 1 + ], + "num_keypoints": 68, + "bbox": [ + 223.7298, + 217.3791, + 531.2424000000001, + 571.4508 + ], + "iscrowd": 0, + "area": 303578.89447392005, + "category_id": 1, + "center": [ + 489.5, + 503.5 + ], + "scale": 2.385 + }, + { + "image_id": 565, + "id": 565, + "keypoints": [ + 70.0, + 292.332, + 1, + 85.978, + 359.108, + 1, + 106.67, + 442.2480000000001, + 1, + 132.174, + 524.227, + 1, + 170.87900000000005, + 587.591, + 1, + 220.419, + 640.665, + 1, + 275.329, + 686.7510000000002, + 1, + 345.149, + 712.11, + 1, + 415.072, + 700.013, + 1, + 455.739, + 681.039, + 1, + 491.441, + 646.908, + 1, + 522.22, + 601.67, + 1, + 545.278, + 556.815, + 1, + 570.101, + 495.899, + 1, + 588.304, + 413.976, + 1, + 595.136, + 343.6280000000001, + 1, + 590.716, + 280.211, + 1, + 118.878, + 305.308, + 1, + 158.248, + 281.872, + 1, + 202.699, + 284.469, + 1, + 246.669, + 294.941, + 1, + 294.485, + 316.657, + 1, + 387.621, + 306.5490000000001, + 1, + 437.315, + 274.369, + 1, + 483.305, + 246.679, + 1, + 531.807, + 219.0, + 1, + 574.753, + 226.314, + 1, + 350.492, + 372.72, + 1, + 354.8180000000001, + 422.627, + 1, + 358.916, + 467.076, + 1, + 364.204, + 508.283, + 1, + 303.536, + 510.181, + 1, + 332.565, + 524.2280000000002, + 1, + 361.282, + 537.337, + 1, + 385.853, + 530.722, + 1, + 410.586, + 512.7090000000002, + 1, + 171.577, + 361.551, + 1, + 203.614, + 344.588, + 1, + 246.448, + 345.9380000000001, + 1, + 288.441, + 368.74300000000005, + 1, + 246.677, + 376.513, + 1, + 202.377, + 382.091, + 1, + 411.996, + 361.712, + 1, + 445.408, + 332.093, + 1, + 485.232, + 319.01, + 1, + 518.47, + 328.7990000000001, + 1, + 492.908, + 360.212, + 1, + 447.886, + 364.719, + 1, + 256.704, + 564.955, + 1, + 306.255, + 569.807, + 1, + 333.68, + 566.9019999999998, + 1, + 360.689, + 571.737, + 1, + 391.088, + 565.381, + 1, + 426.92, + 559.18, + 1, + 476.772, + 545.14, + 1, + 429.904, + 606.391, + 1, + 398.257, + 628.13, + 1, + 361.86, + 635.736, + 1, + 332.214, + 631.4259999999998, + 1, + 300.871, + 615.508, + 1, + 271.028, + 570.685, + 1, + 333.764, + 594.513, + 1, + 360.897, + 597.525, + 1, + 391.282, + 594.612, + 1, + 467.095, + 554.192, + 1, + 391.282, + 594.612, + 1, + 360.897, + 597.525, + 1, + 333.764, + 594.513, + 1 + ], + "num_keypoints": 68, + "bbox": [ + 17.486400000000003, + 169.689, + 630.1632, + 591.7320000000001 + ], + "iscrowd": 0, + "area": 372887.7306624, + "category_id": 1, + "center": [ + 333.0, + 466.0 + ], + "scale": 2.63 + } + ] } \ No newline at end of file diff --git a/tests/data/aflw/test_aflw.json b/tests/data/aflw/test_aflw.json index cc2e903e26..bf4336c1a4 100644 --- a/tests/data/aflw/test_aflw.json +++ b/tests/data/aflw/test_aflw.json @@ -1,185 +1,185 @@ -{ - "categories": [ - { - "supercategory": "person", - "id": 1, - "name": "face", - "keypoints": [], - "skeleton": [] - } - ], - "images": [ - { - "id": 3, - "file_name": "image22568.jpg", - "height": 1280, - "width": 853 - }, - { - "id": 68, - "file_name": "image04476.jpg", - "height": 500, - "width": 439 - } - ], - "annotations": [ - { - "image_id": 3, - "id": 3, - "keypoints": [ - 337.28341384863, - 205.78904991948002, - 1, - 370.46215780998, - 203.18679549113997, - 1, - 400.38808373591, - 210.99355877617, - 1, - 481.70853462158, - 217.49919484702, - 1, - 518.7906602254401, - 210.99355877617, - 1, - 549.3671497584501, - 223.35426731079, - 1, - 359.40257648952996, - 253.28019323671, - 1, - 391.28019323671, - 254.58132045089002, - 1, - 419.25442834138, - 261.08695652174, - 1, - 479.10628019324, - 259.78582930757005, - 1, - 507.08051529790976, - 255.88244766506, - 1, - 533.10305958132, - 258.4847020934, - 1, - 404.29146537842, - 323.54106280192997, - 1, - 443.32528180354007, - 338.5040257649, - 1, - 483.66022544283, - 327.44444444444, - 1, - 381.52173913043, - 374.28502415459, - 1, - 443.32528180354007, - 376.23671497584996, - 1, - 507.73107890499, - 372.98389694042, - 1, - 451.0, - 478.0, - 1 - ], - "num_keypoints": 19, - "bbox": [ - 316.07504025764797, - 175.70547504025396, - 254.50048309178408, - 329.7758454106321 - ], - "iscrowd": 0, - "area": 83928.11196900737, - "category_id": 1, - "center": [ - 449.0, - 321.0 - ], - "scale": 1.81, - "box_size": 362.0 - }, - { - "image_id": 68, - "id": 68, - "keypoints": [ - 126.64745330811, - 157.27305603027, - 1, - 134.30273752013, - 153.39452495973998, - 1, - 145.34465026855, - 153.01428222656, - 1, - 165.48123168945, - 146.28958129883, - 1, - 181.7833404541, - 140.24139404297, - 1, - 198.6918182373, - 143.05288696289, - 1, - 133.90043640137, - 167.45462036133, - 1, - 141.77455716586002, - 165.24637681158995, - 1, - 148.98872785829, - 163.70048309178998, - 1, - 174.96592712402, - 157.80386352539, - 1, - 185.42395019531003, - 155.1201171875, - 1, - 194.88919067383, - 154.83345031738, - 1, - 145.87278582931, - 188.89049919485, - 1, - 152.59581320451, - 177.61352657005, - 1, - 174.75362318841, - 185.34299516908, - 1, - 145.63929146538, - 213.68438003221002, - 1, - 161.87117552335, - 211.3655394525, - 1, - 187.12077294686, - 207.24315619968002, - 1, - 166.0, - 244.0, - 1 - ], - "num_keypoints": 19, - "bbox": [ - 119.443016815191, - 129.865533447267, - 86.453237915028, - 124.51032714843598 - ], - "iscrowd": 0, - "area": 10764.320935841704, - "category_id": 1, - "center": [ - 166.0, - 185.0 - ], - "scale": 0.64, - "box_size": 128.0 - } - ] +{ + "categories": [ + { + "supercategory": "person", + "id": 1, + "name": "face", + "keypoints": [], + "skeleton": [] + } + ], + "images": [ + { + "id": 3, + "file_name": "image22568.jpg", + "height": 1280, + "width": 853 + }, + { + "id": 68, + "file_name": "image04476.jpg", + "height": 500, + "width": 439 + } + ], + "annotations": [ + { + "image_id": 3, + "id": 3, + "keypoints": [ + 337.28341384863, + 205.78904991948002, + 1, + 370.46215780998, + 203.18679549113997, + 1, + 400.38808373591, + 210.99355877617, + 1, + 481.70853462158, + 217.49919484702, + 1, + 518.7906602254401, + 210.99355877617, + 1, + 549.3671497584501, + 223.35426731079, + 1, + 359.40257648952996, + 253.28019323671, + 1, + 391.28019323671, + 254.58132045089002, + 1, + 419.25442834138, + 261.08695652174, + 1, + 479.10628019324, + 259.78582930757005, + 1, + 507.08051529790976, + 255.88244766506, + 1, + 533.10305958132, + 258.4847020934, + 1, + 404.29146537842, + 323.54106280192997, + 1, + 443.32528180354007, + 338.5040257649, + 1, + 483.66022544283, + 327.44444444444, + 1, + 381.52173913043, + 374.28502415459, + 1, + 443.32528180354007, + 376.23671497584996, + 1, + 507.73107890499, + 372.98389694042, + 1, + 451.0, + 478.0, + 1 + ], + "num_keypoints": 19, + "bbox": [ + 316.07504025764797, + 175.70547504025396, + 254.50048309178408, + 329.7758454106321 + ], + "iscrowd": 0, + "area": 83928.11196900737, + "category_id": 1, + "center": [ + 449.0, + 321.0 + ], + "scale": 1.81, + "box_size": 362.0 + }, + { + "image_id": 68, + "id": 68, + "keypoints": [ + 126.64745330811, + 157.27305603027, + 1, + 134.30273752013, + 153.39452495973998, + 1, + 145.34465026855, + 153.01428222656, + 1, + 165.48123168945, + 146.28958129883, + 1, + 181.7833404541, + 140.24139404297, + 1, + 198.6918182373, + 143.05288696289, + 1, + 133.90043640137, + 167.45462036133, + 1, + 141.77455716586002, + 165.24637681158995, + 1, + 148.98872785829, + 163.70048309178998, + 1, + 174.96592712402, + 157.80386352539, + 1, + 185.42395019531003, + 155.1201171875, + 1, + 194.88919067383, + 154.83345031738, + 1, + 145.87278582931, + 188.89049919485, + 1, + 152.59581320451, + 177.61352657005, + 1, + 174.75362318841, + 185.34299516908, + 1, + 145.63929146538, + 213.68438003221002, + 1, + 161.87117552335, + 211.3655394525, + 1, + 187.12077294686, + 207.24315619968002, + 1, + 166.0, + 244.0, + 1 + ], + "num_keypoints": 19, + "bbox": [ + 119.443016815191, + 129.865533447267, + 86.453237915028, + 124.51032714843598 + ], + "iscrowd": 0, + "area": 10764.320935841704, + "category_id": 1, + "center": [ + 166.0, + 185.0 + ], + "scale": 0.64, + "box_size": 128.0 + } + ] } \ No newline at end of file diff --git a/tests/data/aic/test_aic.json b/tests/data/aic/test_aic.json index 28b006a5ff..769cf065d6 100644 --- a/tests/data/aic/test_aic.json +++ b/tests/data/aic/test_aic.json @@ -1,625 +1,625 @@ -{ - "info": { - "description": "MMPose example aic dataset", - "version": "1.0", - "year": "2020", - "date_created": "2020/08/25" - }, - "licenses": [ - { - "url": "", - "id": 1, - "name": "" - } - ], - "categories": [ - { - "supercategory": "person", - "id": 1, - "name": "person", - "keypoints": [ - "Right Shoulder", - "Right Elbow", - "Right Wrist", - "Left Shoulder", - "Left Elbow", - "Left Wrist", - "Right Hip", - "Right Knee", - "Right Ankle", - "Left Hip", - "Left Knee", - "Left Ankle", - "Head top", - "Neck" - ], - "skeleton": [ - [ - 3, - 2 - ], - [ - 2, - 1 - ], - [ - 1, - 14 - ], - [ - 14, - 4 - ], - [ - 4, - 5 - ], - [ - 5, - 6 - ], - [ - 9, - 8 - ], - [ - 8, - 7 - ], - [ - 7, - 10 - ], - [ - 10, - 11 - ], - [ - 11, - 12 - ], - [ - 13, - 14 - ], - [ - 1, - 7 - ], - [ - 4, - 10 - ] - ] - } - ], - "images": [ - { - "url": "http://www.sinaimg.cn/dy/slidenews/4_img/2013_47/704_1154733_789201.jpg", - "file_name": "054d9ce9201beffc76e5ff2169d2af2f027002ca.jpg", - "height": 600, - "width": 900, - "id": 1 - }, - { - "url": "http://www.sinaimg.cn/dy/slidenews/2_img/2015_26/820_1533617_599302.jpg", - "file_name": "fa436c914fe4a8ec1ec5474af4d3820b84d17561.jpg", - "height": 596, - "width": 900, - "id": 2 - }, - { - "url": "http://www.sinaimg.cn/dy/slidenews/2_img/2016_39/730_1947359_260964.jpg", - "file_name": "ff945ae2e729f24eea992814639d59b3bdec8bd8.jpg", - "height": 641, - "width": 950, - "id": 3 - } - ], - "annotations": [ - { - "bbox": [ - 279, - 55, - 213, - 544 - ], - "keypoints": [ - 313, - 201, - 2, - 312, - 313, - 1, - 320, - 424, - 2, - 406, - 197, - 1, - 431, - 286, - 1, - 459, - 269, - 2, - 375, - 447, - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 416, - 441, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 395, - 74, - 2, - 372, - 170, - 2 - ], - "num_keypoints": 10, - "image_id": 1, - "category_id": 1, - "id": 4 - }, - { - "bbox": [ - 541, - 131, - 329, - 468 - ], - "keypoints": [ - 637, - 374, - 1, - 626, - 509, - 2, - 0, - 0, - 0, - 755, - 347, - 2, - 728, - 538, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 604, - 169, - 2, - 674, - 290, - 2 - ], - "num_keypoints": 6, - "image_id": 1, - "category_id": 1, - "id": 5 - }, - { - "bbox": [ - 88, - 7, - 252, - 592 - ], - "keypoints": [ - 144, - 180, - 2, - 171, - 325, - 1, - 256, - 428, - 1, - 265, - 196, - 2, - 297, - 311, - 2, - 300, - 412, - 2, - 178, - 476, - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 253, - 474, - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 220, - 23, - 2, - 205, - 133, - 2 - ], - "num_keypoints": 10, - "image_id": 1, - "category_id": 1, - "id": 6 - }, - { - "bbox": [ - 497, - 179, - 401, - 416 - ], - "keypoints": [ - 692, - 332, - 1, - 587, - 430, - 2, - 612, - 552, - 1, - 657, - 422, - 2, - 533, - 571, - 2, - 621, - 450, - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 564, - 212, - 2, - 656, - 362, - 2 - ], - "num_keypoints": 8, - "image_id": 2, - "category_id": 1, - "id": 7 - }, - { - "bbox": [ - 336, - 26, - 177, - 254 - ], - "keypoints": [ - 368, - 142, - 2, - 365, - 237, - 1, - 415, - 271, - 1, - 487, - 147, - 2, - 493, - 240, - 2, - 431, - 265, - 2, - 393, - 296, - 1, - 326, - 306, - 1, - 339, - 390, - 1, - 449, - 297, - 1, - 373, - 315, - 1, - 376, - 389, - 1, - 435, - 43, - 2, - 430, - 131, - 2 - ], - "num_keypoints": 14, - "image_id": 2, - "category_id": 1, - "id": 8 - }, - { - "bbox": [ - 0, - 109, - 473, - 486 - ], - "keypoints": [ - 68, - 333, - 2, - 215, - 408, - 2, - 376, - 427, - 2, - 169, - 280, - 1, - 166, - 386, - 1, - 146, - 462, - 2, - 39, - 545, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 136, - 515, - 1, - 292, - 531, - 1, - 0, - 0, - 0, - 181, - 147, - 2, - 127, - 269, - 2 - ], - "num_keypoints": 11, - "image_id": 2, - "category_id": 1, - "id": 9 - }, - { - "bbox": [ - 681, - 3, - 267, - 607 - ], - "keypoints": [ - 846, - 98, - 1, - 862, - 223, - 1, - 794, - 282, - 2, - 824, - 134, - 2, - 875, - 241, - 2, - 842, - 329, - 2, - 903, - 296, - 1, - 766, - 397, - 1, - 777, - 562, - 2, - 886, - 299, - 2, - 757, - 399, - 2, - 871, - 514, - 2, - 761, - 29, - 2, - 813, - 87, - 2 - ], - "num_keypoints": 14, - "image_id": 3, - "category_id": 1, - "id": 10 - }, - { - "bbox": [ - 484, - 7, - 162, - 481 - ], - "keypoints": [ - 544, - 96, - 2, - 506, - 161, - 2, - 542, - 208, - 2, - 606, - 93, - 2, - 615, - 151, - 1, - 622, - 187, - 2, - 571, - 251, - 2, - 553, - 361, - 2, - 556, - 458, - 2, - 591, - 251, - 1, - 581, - 363, - 2, - 587, - 456, - 2, - 587, - 21, - 2, - 578, - 80, - 2 - ], - "num_keypoints": 14, - "image_id": 3, - "category_id": 1, - "id": 11 - }, - { - "bbox": [ - 33, - 73, - 493, - 566 - ], - "keypoints": [ - 254, - 203, - 2, - 169, - 203, - 2, - 111, - 187, - 2, - 391, - 204, - 2, - 425, - 276, - 2, - 475, - 346, - 2, - 272, - 376, - 2, - 185, - 485, - 2, - 126, - 607, - 1, - 357, - 383, - 2, - 359, - 459, - 2, - 350, - 561, - 2, - 338, - 111, - 2, - 325, - 180, - 1 - ], - "num_keypoints": 14, - "image_id": 3, - "category_id": 1, - "id": 12 - } - ] -} +{ + "info": { + "description": "MMPose example aic dataset", + "version": "1.0", + "year": "2020", + "date_created": "2020/08/25" + }, + "licenses": [ + { + "url": "", + "id": 1, + "name": "" + } + ], + "categories": [ + { + "supercategory": "person", + "id": 1, + "name": "person", + "keypoints": [ + "Right Shoulder", + "Right Elbow", + "Right Wrist", + "Left Shoulder", + "Left Elbow", + "Left Wrist", + "Right Hip", + "Right Knee", + "Right Ankle", + "Left Hip", + "Left Knee", + "Left Ankle", + "Head top", + "Neck" + ], + "skeleton": [ + [ + 3, + 2 + ], + [ + 2, + 1 + ], + [ + 1, + 14 + ], + [ + 14, + 4 + ], + [ + 4, + 5 + ], + [ + 5, + 6 + ], + [ + 9, + 8 + ], + [ + 8, + 7 + ], + [ + 7, + 10 + ], + [ + 10, + 11 + ], + [ + 11, + 12 + ], + [ + 13, + 14 + ], + [ + 1, + 7 + ], + [ + 4, + 10 + ] + ] + } + ], + "images": [ + { + "url": "http://www.sinaimg.cn/dy/slidenews/4_img/2013_47/704_1154733_789201.jpg", + "file_name": "054d9ce9201beffc76e5ff2169d2af2f027002ca.jpg", + "height": 600, + "width": 900, + "id": 1 + }, + { + "url": "http://www.sinaimg.cn/dy/slidenews/2_img/2015_26/820_1533617_599302.jpg", + "file_name": "fa436c914fe4a8ec1ec5474af4d3820b84d17561.jpg", + "height": 596, + "width": 900, + "id": 2 + }, + { + "url": "http://www.sinaimg.cn/dy/slidenews/2_img/2016_39/730_1947359_260964.jpg", + "file_name": "ff945ae2e729f24eea992814639d59b3bdec8bd8.jpg", + "height": 641, + "width": 950, + "id": 3 + } + ], + "annotations": [ + { + "bbox": [ + 279, + 55, + 213, + 544 + ], + "keypoints": [ + 313, + 201, + 2, + 312, + 313, + 1, + 320, + 424, + 2, + 406, + 197, + 1, + 431, + 286, + 1, + 459, + 269, + 2, + 375, + 447, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 416, + 441, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 395, + 74, + 2, + 372, + 170, + 2 + ], + "num_keypoints": 10, + "image_id": 1, + "category_id": 1, + "id": 4 + }, + { + "bbox": [ + 541, + 131, + 329, + 468 + ], + "keypoints": [ + 637, + 374, + 1, + 626, + 509, + 2, + 0, + 0, + 0, + 755, + 347, + 2, + 728, + 538, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 604, + 169, + 2, + 674, + 290, + 2 + ], + "num_keypoints": 6, + "image_id": 1, + "category_id": 1, + "id": 5 + }, + { + "bbox": [ + 88, + 7, + 252, + 592 + ], + "keypoints": [ + 144, + 180, + 2, + 171, + 325, + 1, + 256, + 428, + 1, + 265, + 196, + 2, + 297, + 311, + 2, + 300, + 412, + 2, + 178, + 476, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 253, + 474, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 220, + 23, + 2, + 205, + 133, + 2 + ], + "num_keypoints": 10, + "image_id": 1, + "category_id": 1, + "id": 6 + }, + { + "bbox": [ + 497, + 179, + 401, + 416 + ], + "keypoints": [ + 692, + 332, + 1, + 587, + 430, + 2, + 612, + 552, + 1, + 657, + 422, + 2, + 533, + 571, + 2, + 621, + 450, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 564, + 212, + 2, + 656, + 362, + 2 + ], + "num_keypoints": 8, + "image_id": 2, + "category_id": 1, + "id": 7 + }, + { + "bbox": [ + 336, + 26, + 177, + 254 + ], + "keypoints": [ + 368, + 142, + 2, + 365, + 237, + 1, + 415, + 271, + 1, + 487, + 147, + 2, + 493, + 240, + 2, + 431, + 265, + 2, + 393, + 296, + 1, + 326, + 306, + 1, + 339, + 390, + 1, + 449, + 297, + 1, + 373, + 315, + 1, + 376, + 389, + 1, + 435, + 43, + 2, + 430, + 131, + 2 + ], + "num_keypoints": 14, + "image_id": 2, + "category_id": 1, + "id": 8 + }, + { + "bbox": [ + 0, + 109, + 473, + 486 + ], + "keypoints": [ + 68, + 333, + 2, + 215, + 408, + 2, + 376, + 427, + 2, + 169, + 280, + 1, + 166, + 386, + 1, + 146, + 462, + 2, + 39, + 545, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 136, + 515, + 1, + 292, + 531, + 1, + 0, + 0, + 0, + 181, + 147, + 2, + 127, + 269, + 2 + ], + "num_keypoints": 11, + "image_id": 2, + "category_id": 1, + "id": 9 + }, + { + "bbox": [ + 681, + 3, + 267, + 607 + ], + "keypoints": [ + 846, + 98, + 1, + 862, + 223, + 1, + 794, + 282, + 2, + 824, + 134, + 2, + 875, + 241, + 2, + 842, + 329, + 2, + 903, + 296, + 1, + 766, + 397, + 1, + 777, + 562, + 2, + 886, + 299, + 2, + 757, + 399, + 2, + 871, + 514, + 2, + 761, + 29, + 2, + 813, + 87, + 2 + ], + "num_keypoints": 14, + "image_id": 3, + "category_id": 1, + "id": 10 + }, + { + "bbox": [ + 484, + 7, + 162, + 481 + ], + "keypoints": [ + 544, + 96, + 2, + 506, + 161, + 2, + 542, + 208, + 2, + 606, + 93, + 2, + 615, + 151, + 1, + 622, + 187, + 2, + 571, + 251, + 2, + 553, + 361, + 2, + 556, + 458, + 2, + 591, + 251, + 1, + 581, + 363, + 2, + 587, + 456, + 2, + 587, + 21, + 2, + 578, + 80, + 2 + ], + "num_keypoints": 14, + "image_id": 3, + "category_id": 1, + "id": 11 + }, + { + "bbox": [ + 33, + 73, + 493, + 566 + ], + "keypoints": [ + 254, + 203, + 2, + 169, + 203, + 2, + 111, + 187, + 2, + 391, + 204, + 2, + 425, + 276, + 2, + 475, + 346, + 2, + 272, + 376, + 2, + 185, + 485, + 2, + 126, + 607, + 1, + 357, + 383, + 2, + 359, + 459, + 2, + 350, + 561, + 2, + 338, + 111, + 2, + 325, + 180, + 1 + ], + "num_keypoints": 14, + "image_id": 3, + "category_id": 1, + "id": 12 + } + ] +} diff --git a/tests/data/ak/test_animalkingdom.json b/tests/data/ak/test_animalkingdom.json index 02aaf9f57a..c9f3e9d814 100644 --- a/tests/data/ak/test_animalkingdom.json +++ b/tests/data/ak/test_animalkingdom.json @@ -1,589 +1,589 @@ -{ - "info": { - "description": "[CVPR 2022] Animal Kingdom", - "url": "https://sutdcv.github.io/Animal-Kingdom", - "version": "1.0 (2022-06)", - "year": 2022, - "contributor": "Singapore University of Technology and Design, Singapore. Xun Long Ng, Kian Eng Ong, Qichen Zheng, Yun Ni, Si Yong Yeo, Jun Liu.", - "date_created": "2022-06" - }, - "licenses": [ - { - "url": "", - "id": 1, - "name": "" - } - ], - "images": [ - { - "id": 1, - "file_name": "AAOYRUDX/AAOYRUDX_f000027.jpg", - "width": 640, - "height": 360 - }, - { - "id": 2, - "file_name": "AAOYRUDX/AAOYRUDX_f000028.jpg", - "width": 640, - "height": 360 - } - ], - "categories": [ - { - "supercategory": "AK_Animal", - "id": 1, - "name": "Amphibian", - "keypoints": [ - "Head_Mid_Top", - "Eye_Left", - "Eye_Right", - "Mouth_Front_Top", - "Mouth_Back_Left", - "Mouth_Back_Right", - "Mouth_Front_Bottom", - "Shoulder_Left", - "Shoulder_Right", - "Elbow_Left", - "Elbow_Right", - "Wrist_Left", - "Wrist_Right", - "Torso_Mid_Back", - "Hip_Left", - "Hip_Right", - "Knee_Left", - "Knee_Right", - "Ankle_Left ", - "Ankle_Right", - "Tail_Top_Back", - "Tail_Mid_Back", - "Tail_End_Back" - ], - "skeleton": [ - [ - 1, - 0 - ], - [ - 2, - 0 - ], - [ - 3, - 4 - ], - [ - 3, - 5 - ], - [ - 4, - 6 - ], - [ - 5, - 6 - ], - [ - 0, - 7 - ], - [ - 0, - 8 - ], - [ - 7, - 9 - ], - [ - 8, - 10 - ], - [ - 9, - 11 - ], - [ - 10, - 12 - ], - [ - 0, - 13 - ], - [ - 13, - 20 - ], - [ - 20, - 14 - ], - [ - 20, - 15 - ], - [ - 14, - 16 - ], - [ - 15, - 17 - ], - [ - 16, - 18 - ], - [ - 17, - 19 - ], - [ - 20, - 21 - ], - [ - 21, - 22 - ] - ], - "flip_pairs": [ - [ - 1, - 2 - ], - [ - 4, - 5 - ], - [ - 7, - 8 - ], - [ - 9, - 10 - ], - [ - 11, - 12 - ], - [ - 14, - 15 - ], - [ - 16, - 17 - ], - [ - 18, - 19 - ] - ], - "upper_body_ids": [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13 - ], - "lower_body_ids": [ - 14, - 15, - 16, - 17, - 18, - 19, - 20, - 21, - 22 - ] - }, - { - "supercategory": "AK_Animal", - "id": 2, - "name": "Bird", - "keypoints": [ - "Head_Mid_Top", - "Eye_Left", - "Eye_Right", - "Mouth_Front_Top", - "Mouth_Back_Left", - "Mouth_Back_Right", - "Mouth_Front_Bottom", - "Shoulder_Left", - "Shoulder_Right", - "Elbow_Left", - "Elbow_Right", - "Wrist_Left", - "Wrist_Right", - "Torso_Mid_Back", - "Hip_Left", - "Hip_Right", - "Knee_Left", - "Knee_Right", - "Ankle_Left ", - "Ankle_Right", - "Tail_Top_Back", - "Tail_Mid_Back", - "Tail_End_Back" - ], - "skeleton": [ - [ - 1, - 0 - ], - [ - 2, - 0 - ], - [ - 3, - 4 - ], - [ - 3, - 5 - ], - [ - 4, - 6 - ], - [ - 5, - 6 - ], - [ - 0, - 7 - ], - [ - 0, - 8 - ], - [ - 7, - 9 - ], - [ - 8, - 10 - ], - [ - 9, - 11 - ], - [ - 10, - 12 - ], - [ - 0, - 13 - ], - [ - 13, - 20 - ], - [ - 20, - 14 - ], - [ - 20, - 15 - ], - [ - 14, - 16 - ], - [ - 15, - 17 - ], - [ - 16, - 18 - ], - [ - 17, - 19 - ], - [ - 20, - 21 - ], - [ - 21, - 22 - ] - ], - "flip_pairs": [ - [ - 1, - 2 - ], - [ - 4, - 5 - ], - [ - 7, - 8 - ], - [ - 9, - 10 - ], - [ - 11, - 12 - ], - [ - 14, - 15 - ], - [ - 16, - 17 - ], - [ - 18, - 19 - ] - ], - "upper_body_ids": [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13 - ], - "lower_body_ids": [ - 14, - 15, - 16, - 17, - 18, - 19, - 20, - 21, - 22 - ] - } - ], - "annotations": [ - { - "id": 1, - "image_id": 1, - "category_id": 4, - "animal_parent_class": "Mammal", - "animal_class": "Mammal", - "animal_subclass": "Elephant", - "animal": "Elephant", - "protocol": "ak_P1", - "train_test": "test", - "area": 7984.8320733706605, - "scale": 0.6674772036000001, - "center": [ - 229.7435897436, - 202.4316109422 - ], - "bbox": [ - 199.6111111111, - 136.1838905775, - 60.26495726500002, - 132.4954407295 - ], - "iscrowd": 0, - "num_keypoints": 6, - "keypoints": [ - 220.9914529915, - 135.6838905775, - 1.0, - 238.4957264957, - 151.0030395137, - 1.0, - 199.1111111111, - 153.1914893617, - 1.0, - -1.0, - -1.0, - 0.0, - -1.0, - -1.0, - 0.0, - -1.0, - -1.0, - 0.0, - -1.0, - -1.0, - 0.0, - 260.3760683761, - 154.2857142857, - 1.0, - -1.0, - -1.0, - 0.0, - 250.5299145299, - 195.8662613982, - 1.0, - -1.0, - -1.0, - 0.0, - 251.6239316239, - 269.179331307, - 1.0, - -1.0, - -1.0, - 0.0, - -1.0, - -1.0, - 0.0, - -1.0, - -1.0, - 0.0, - -1.0, - -1.0, - 0.0, - -1.0, - -1.0, - 0.0, - -1.0, - -1.0, - 0.0, - -1.0, - -1.0, - 0.0, - -1.0, - -1.0, - 0.0, - -1.0, - -1.0, - 0.0, - -1.0, - -1.0, - 0.0, - -1.0, - -1.0, - 0.0 - ] - }, - { - "id": 2, - "image_id": 2, - "category_id": 4, - "animal_parent_class": "Mammal", - "animal_class": "Mammal", - "animal_subclass": "Elephant", - "animal": "Elephant", - "protocol": "ak_P1", - "train_test": "test", - "area": 7984.8320733706605, - "scale": 0.6674772036000001, - "center": [ - 229.7435897436, - 202.4316109422 - ], - "bbox": [ - 199.6111111111, - 136.1838905775, - 60.26495726500002, - 132.4954407295 - ], - "iscrowd": 0, - "num_keypoints": 6, - "keypoints": [ - 220.9914529915, - 135.6838905775, - 1.0, - 238.4957264957, - 151.0030395137, - 1.0, - 199.1111111111, - 153.1914893617, - 1.0, - -1.0, - -1.0, - 0.0, - -1.0, - -1.0, - 0.0, - -1.0, - -1.0, - 0.0, - -1.0, - -1.0, - 0.0, - 260.3760683761, - 154.2857142857, - 1.0, - -1.0, - -1.0, - 0.0, - 250.5299145299, - 195.8662613982, - 1.0, - -1.0, - -1.0, - 0.0, - 251.6239316239, - 269.179331307, - 1.0, - -1.0, - -1.0, - 0.0, - -1.0, - -1.0, - 0.0, - -1.0, - -1.0, - 0.0, - -1.0, - -1.0, - 0.0, - -1.0, - -1.0, - 0.0, - -1.0, - -1.0, - 0.0, - -1.0, - -1.0, - 0.0, - -1.0, - -1.0, - 0.0, - -1.0, - -1.0, - 0.0, - -1.0, - -1.0, - 0.0, - -1.0, - -1.0, - 0.0 - ] - } - ] +{ + "info": { + "description": "[CVPR 2022] Animal Kingdom", + "url": "https://sutdcv.github.io/Animal-Kingdom", + "version": "1.0 (2022-06)", + "year": 2022, + "contributor": "Singapore University of Technology and Design, Singapore. Xun Long Ng, Kian Eng Ong, Qichen Zheng, Yun Ni, Si Yong Yeo, Jun Liu.", + "date_created": "2022-06" + }, + "licenses": [ + { + "url": "", + "id": 1, + "name": "" + } + ], + "images": [ + { + "id": 1, + "file_name": "AAOYRUDX/AAOYRUDX_f000027.jpg", + "width": 640, + "height": 360 + }, + { + "id": 2, + "file_name": "AAOYRUDX/AAOYRUDX_f000028.jpg", + "width": 640, + "height": 360 + } + ], + "categories": [ + { + "supercategory": "AK_Animal", + "id": 1, + "name": "Amphibian", + "keypoints": [ + "Head_Mid_Top", + "Eye_Left", + "Eye_Right", + "Mouth_Front_Top", + "Mouth_Back_Left", + "Mouth_Back_Right", + "Mouth_Front_Bottom", + "Shoulder_Left", + "Shoulder_Right", + "Elbow_Left", + "Elbow_Right", + "Wrist_Left", + "Wrist_Right", + "Torso_Mid_Back", + "Hip_Left", + "Hip_Right", + "Knee_Left", + "Knee_Right", + "Ankle_Left ", + "Ankle_Right", + "Tail_Top_Back", + "Tail_Mid_Back", + "Tail_End_Back" + ], + "skeleton": [ + [ + 1, + 0 + ], + [ + 2, + 0 + ], + [ + 3, + 4 + ], + [ + 3, + 5 + ], + [ + 4, + 6 + ], + [ + 5, + 6 + ], + [ + 0, + 7 + ], + [ + 0, + 8 + ], + [ + 7, + 9 + ], + [ + 8, + 10 + ], + [ + 9, + 11 + ], + [ + 10, + 12 + ], + [ + 0, + 13 + ], + [ + 13, + 20 + ], + [ + 20, + 14 + ], + [ + 20, + 15 + ], + [ + 14, + 16 + ], + [ + 15, + 17 + ], + [ + 16, + 18 + ], + [ + 17, + 19 + ], + [ + 20, + 21 + ], + [ + 21, + 22 + ] + ], + "flip_pairs": [ + [ + 1, + 2 + ], + [ + 4, + 5 + ], + [ + 7, + 8 + ], + [ + 9, + 10 + ], + [ + 11, + 12 + ], + [ + 14, + 15 + ], + [ + 16, + 17 + ], + [ + 18, + 19 + ] + ], + "upper_body_ids": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13 + ], + "lower_body_ids": [ + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22 + ] + }, + { + "supercategory": "AK_Animal", + "id": 2, + "name": "Bird", + "keypoints": [ + "Head_Mid_Top", + "Eye_Left", + "Eye_Right", + "Mouth_Front_Top", + "Mouth_Back_Left", + "Mouth_Back_Right", + "Mouth_Front_Bottom", + "Shoulder_Left", + "Shoulder_Right", + "Elbow_Left", + "Elbow_Right", + "Wrist_Left", + "Wrist_Right", + "Torso_Mid_Back", + "Hip_Left", + "Hip_Right", + "Knee_Left", + "Knee_Right", + "Ankle_Left ", + "Ankle_Right", + "Tail_Top_Back", + "Tail_Mid_Back", + "Tail_End_Back" + ], + "skeleton": [ + [ + 1, + 0 + ], + [ + 2, + 0 + ], + [ + 3, + 4 + ], + [ + 3, + 5 + ], + [ + 4, + 6 + ], + [ + 5, + 6 + ], + [ + 0, + 7 + ], + [ + 0, + 8 + ], + [ + 7, + 9 + ], + [ + 8, + 10 + ], + [ + 9, + 11 + ], + [ + 10, + 12 + ], + [ + 0, + 13 + ], + [ + 13, + 20 + ], + [ + 20, + 14 + ], + [ + 20, + 15 + ], + [ + 14, + 16 + ], + [ + 15, + 17 + ], + [ + 16, + 18 + ], + [ + 17, + 19 + ], + [ + 20, + 21 + ], + [ + 21, + 22 + ] + ], + "flip_pairs": [ + [ + 1, + 2 + ], + [ + 4, + 5 + ], + [ + 7, + 8 + ], + [ + 9, + 10 + ], + [ + 11, + 12 + ], + [ + 14, + 15 + ], + [ + 16, + 17 + ], + [ + 18, + 19 + ] + ], + "upper_body_ids": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13 + ], + "lower_body_ids": [ + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22 + ] + } + ], + "annotations": [ + { + "id": 1, + "image_id": 1, + "category_id": 4, + "animal_parent_class": "Mammal", + "animal_class": "Mammal", + "animal_subclass": "Elephant", + "animal": "Elephant", + "protocol": "ak_P1", + "train_test": "test", + "area": 7984.8320733706605, + "scale": 0.6674772036000001, + "center": [ + 229.7435897436, + 202.4316109422 + ], + "bbox": [ + 199.6111111111, + 136.1838905775, + 60.26495726500002, + 132.4954407295 + ], + "iscrowd": 0, + "num_keypoints": 6, + "keypoints": [ + 220.9914529915, + 135.6838905775, + 1.0, + 238.4957264957, + 151.0030395137, + 1.0, + 199.1111111111, + 153.1914893617, + 1.0, + -1.0, + -1.0, + 0.0, + -1.0, + -1.0, + 0.0, + -1.0, + -1.0, + 0.0, + -1.0, + -1.0, + 0.0, + 260.3760683761, + 154.2857142857, + 1.0, + -1.0, + -1.0, + 0.0, + 250.5299145299, + 195.8662613982, + 1.0, + -1.0, + -1.0, + 0.0, + 251.6239316239, + 269.179331307, + 1.0, + -1.0, + -1.0, + 0.0, + -1.0, + -1.0, + 0.0, + -1.0, + -1.0, + 0.0, + -1.0, + -1.0, + 0.0, + -1.0, + -1.0, + 0.0, + -1.0, + -1.0, + 0.0, + -1.0, + -1.0, + 0.0, + -1.0, + -1.0, + 0.0, + -1.0, + -1.0, + 0.0, + -1.0, + -1.0, + 0.0, + -1.0, + -1.0, + 0.0 + ] + }, + { + "id": 2, + "image_id": 2, + "category_id": 4, + "animal_parent_class": "Mammal", + "animal_class": "Mammal", + "animal_subclass": "Elephant", + "animal": "Elephant", + "protocol": "ak_P1", + "train_test": "test", + "area": 7984.8320733706605, + "scale": 0.6674772036000001, + "center": [ + 229.7435897436, + 202.4316109422 + ], + "bbox": [ + 199.6111111111, + 136.1838905775, + 60.26495726500002, + 132.4954407295 + ], + "iscrowd": 0, + "num_keypoints": 6, + "keypoints": [ + 220.9914529915, + 135.6838905775, + 1.0, + 238.4957264957, + 151.0030395137, + 1.0, + 199.1111111111, + 153.1914893617, + 1.0, + -1.0, + -1.0, + 0.0, + -1.0, + -1.0, + 0.0, + -1.0, + -1.0, + 0.0, + -1.0, + -1.0, + 0.0, + 260.3760683761, + 154.2857142857, + 1.0, + -1.0, + -1.0, + 0.0, + 250.5299145299, + 195.8662613982, + 1.0, + -1.0, + -1.0, + 0.0, + 251.6239316239, + 269.179331307, + 1.0, + -1.0, + -1.0, + 0.0, + -1.0, + -1.0, + 0.0, + -1.0, + -1.0, + 0.0, + -1.0, + -1.0, + 0.0, + -1.0, + -1.0, + 0.0, + -1.0, + -1.0, + 0.0, + -1.0, + -1.0, + 0.0, + -1.0, + -1.0, + 0.0, + -1.0, + -1.0, + 0.0, + -1.0, + -1.0, + 0.0, + -1.0, + -1.0, + 0.0 + ] + } + ] } \ No newline at end of file diff --git a/tests/data/animalpose/test_animalpose.json b/tests/data/animalpose/test_animalpose.json index 7b11465327..a73a0618cb 100644 --- a/tests/data/animalpose/test_animalpose.json +++ b/tests/data/animalpose/test_animalpose.json @@ -1,281 +1,281 @@ -{ - "categories": [ - { - "supercategory": "animal", - "id": 1, - "name": "cat", - "keypoints": [ - "L_Eye", - "R_Eye", - "L_EarBase", - "R_EarBase", - "Nose", - "Throat", - "TailBase", - "Withers", - "L_F_Elbow", - "R_F_Elbow", - "L_B_Elbow", - "R_B_Elbow", - "L_F_Knee", - "R_F_Knee", - "L_B_Knee", - "R_B_Knee", - "L_F_Paw", - "R_F_Paw", - "L_B_Paw", - "R_B_Paw" - ], - "skeleton": [ - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 4 - ], - [ - 1, - 5 - ], - [ - 2, - 5 - ], - [ - 5, - 6 - ], - [ - 6, - 8 - ], - [ - 7, - 8 - ], - [ - 6, - 9 - ], - [ - 9, - 13 - ], - [ - 13, - 17 - ], - [ - 6, - 10 - ], - [ - 10, - 14 - ], - [ - 14, - 18 - ], - [ - 7, - 11 - ], - [ - 11, - 15 - ], - [ - 15, - 19 - ], - [ - 7, - 12 - ], - [ - 12, - 16 - ], - [ - 16, - 20 - ] - ] - } - ], - "images": [ - { - "id": 110, - "file_name": "ca110.jpeg", - "height": 240, - "width": 300 - }, - { - "id": 3105, - "file_name": "ho105.jpeg", - "height": 255, - "width": 300 - } - ], - "annotations": [ - { - "keypoints": [ - 117.0, - 95.0, - 2.0, - 85.0, - 102.0, - 2.0, - 115.0, - 56.0, - 2.0, - 62.0, - 78.0, - 2.0, - 102.0, - 109.0, - 2.0, - 104.0, - 130.0, - 2.0, - 235.0, - 163.0, - 2.0, - 144.0, - 53.0, - 2.0, - 123.0, - 142.0, - 2.0, - 40.0, - 161.0, - 2.0, - 182.0, - 160.0, - 2.0, - 0.0, - 0.0, - 0.0, - 115.0, - 186.0, - 2.0, - 64.0, - 192.0, - 2.0, - 189.0, - 195.0, - 2.0, - 0.0, - 0.0, - 0.0, - 84.0, - 214.0, - 2.0, - 53.0, - 209.0, - 2.0, - 146.0, - 206.0, - 2.0, - 0.0, - 0.0, - 0.0 - ], - "image_id": 110, - "id": 129, - "num_keypoints": 17, - "bbox": [ - 13.0, - 36.0, - 284.0, - 192.0 - ], - "iscrowd": 0, - "area": 54528.0, - "category_id": 1 - }, - { - "keypoints": [ - 54.0, - 36.0, - 2.0, - 42.0, - 33.0, - 2.0, - 65.0, - 21.0, - 2.0, - 51.0, - 18.0, - 2.0, - 30.0, - 59.0, - 2.0, - 57.0, - 62.0, - 2.0, - 203.0, - 109.0, - 2.0, - 104.0, - 82.0, - 2.0, - 73.0, - 141.0, - 2.0, - 0.0, - 0.0, - 0.0, - 195.0, - 157.0, - 2.0, - 0.0, - 0.0, - 0.0, - 73.0, - 185.0, - 2.0, - 81.0, - 183.0, - 2.0, - 225.0, - 204.0, - 2.0, - 0.0, - 0.0, - 0.0, - 62.0, - 221.0, - 2.0, - 0.0, - 0.0, - 0.0, - 249.0, - 242.0, - 2.0, - 0.0, - 0.0, - 0.0 - ], - "image_id": 3105, - "id": 583, - "num_keypoints": 15, - "bbox": [ - 23.0, - 9.0, - 256.0, - 240.0 - ], - "iscrowd": 0, - "area": 61440.0, - "category_id": 1 - } - ] +{ + "categories": [ + { + "supercategory": "animal", + "id": 1, + "name": "cat", + "keypoints": [ + "L_Eye", + "R_Eye", + "L_EarBase", + "R_EarBase", + "Nose", + "Throat", + "TailBase", + "Withers", + "L_F_Elbow", + "R_F_Elbow", + "L_B_Elbow", + "R_B_Elbow", + "L_F_Knee", + "R_F_Knee", + "L_B_Knee", + "R_B_Knee", + "L_F_Paw", + "R_F_Paw", + "L_B_Paw", + "R_B_Paw" + ], + "skeleton": [ + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 4 + ], + [ + 1, + 5 + ], + [ + 2, + 5 + ], + [ + 5, + 6 + ], + [ + 6, + 8 + ], + [ + 7, + 8 + ], + [ + 6, + 9 + ], + [ + 9, + 13 + ], + [ + 13, + 17 + ], + [ + 6, + 10 + ], + [ + 10, + 14 + ], + [ + 14, + 18 + ], + [ + 7, + 11 + ], + [ + 11, + 15 + ], + [ + 15, + 19 + ], + [ + 7, + 12 + ], + [ + 12, + 16 + ], + [ + 16, + 20 + ] + ] + } + ], + "images": [ + { + "id": 110, + "file_name": "ca110.jpeg", + "height": 240, + "width": 300 + }, + { + "id": 3105, + "file_name": "ho105.jpeg", + "height": 255, + "width": 300 + } + ], + "annotations": [ + { + "keypoints": [ + 117.0, + 95.0, + 2.0, + 85.0, + 102.0, + 2.0, + 115.0, + 56.0, + 2.0, + 62.0, + 78.0, + 2.0, + 102.0, + 109.0, + 2.0, + 104.0, + 130.0, + 2.0, + 235.0, + 163.0, + 2.0, + 144.0, + 53.0, + 2.0, + 123.0, + 142.0, + 2.0, + 40.0, + 161.0, + 2.0, + 182.0, + 160.0, + 2.0, + 0.0, + 0.0, + 0.0, + 115.0, + 186.0, + 2.0, + 64.0, + 192.0, + 2.0, + 189.0, + 195.0, + 2.0, + 0.0, + 0.0, + 0.0, + 84.0, + 214.0, + 2.0, + 53.0, + 209.0, + 2.0, + 146.0, + 206.0, + 2.0, + 0.0, + 0.0, + 0.0 + ], + "image_id": 110, + "id": 129, + "num_keypoints": 17, + "bbox": [ + 13.0, + 36.0, + 284.0, + 192.0 + ], + "iscrowd": 0, + "area": 54528.0, + "category_id": 1 + }, + { + "keypoints": [ + 54.0, + 36.0, + 2.0, + 42.0, + 33.0, + 2.0, + 65.0, + 21.0, + 2.0, + 51.0, + 18.0, + 2.0, + 30.0, + 59.0, + 2.0, + 57.0, + 62.0, + 2.0, + 203.0, + 109.0, + 2.0, + 104.0, + 82.0, + 2.0, + 73.0, + 141.0, + 2.0, + 0.0, + 0.0, + 0.0, + 195.0, + 157.0, + 2.0, + 0.0, + 0.0, + 0.0, + 73.0, + 185.0, + 2.0, + 81.0, + 183.0, + 2.0, + 225.0, + 204.0, + 2.0, + 0.0, + 0.0, + 0.0, + 62.0, + 221.0, + 2.0, + 0.0, + 0.0, + 0.0, + 249.0, + 242.0, + 2.0, + 0.0, + 0.0, + 0.0 + ], + "image_id": 3105, + "id": 583, + "num_keypoints": 15, + "bbox": [ + 23.0, + 9.0, + 256.0, + 240.0 + ], + "iscrowd": 0, + "area": 61440.0, + "category_id": 1 + } + ] } \ No newline at end of file diff --git a/tests/data/ap10k/test_ap10k.json b/tests/data/ap10k/test_ap10k.json index 851dc1ad75..07766137c7 100644 --- a/tests/data/ap10k/test_ap10k.json +++ b/tests/data/ap10k/test_ap10k.json @@ -1,5249 +1,5249 @@ -{ - "info":{ - "description":"AP-10k", - "url":"https://github.com/AlexTheBad/AP-10K", - "version":"1.0", - "year":2021, - "contributor":"AP-10k Team", - "date_created":"2021/07/01" - }, - "licenses":[ - { - "id":1, - "name":"The MIT License", - "url":"https://www.mit.edu/~amini/LICENSE.md" - } - ], - "images":[ - { - "license":1, - "id":37516, - "file_name":"000000037516.jpg", - "width":1200, - "height":867, - "background":5 - }, - { - "license":1, - "id":4, - "file_name":"000000000004.jpg", - "width":1024, - "height":683, - "background":1 - } - ], - "annotations":[ - { - "id":9284, - "image_id":37516, - "category_id":26, - "bbox":[ - 66, - 192, - 1092, - 512 - ], - "area":559104, - "iscrowd":0, - "num_keypoints":16, - "keypoints":[ - 134, - 415, - 2, - 0, - 0, - 0, - 94, - 475, - 2, - 302, - 330, - 2, - 890, - 287, - 2, - 414, - 470, - 2, - 414, - 554, - 2, - 396, - 624, - 2, - 302, - 466, - 2, - 230, - 515, - 2, - 214, - 623, - 2, - 838, - 422, - 2, - 946, - 511, - 2, - 936, - 628, - 2, - 708, - 442, - 2, - 698, - 555, - 2, - 636, - 602, - 2 - ] - }, - { - "id":6, - "image_id":4, - "category_id":1, - "bbox":[ - 408, - 197, - 429, - 341 - ], - "area":146289, - "iscrowd":0, - "num_keypoints":16, - "keypoints":[ - 488, - 443, - 2, - 0, - 0, - 0, - 466, - 499, - 2, - 600, - 307, - 2, - 787, - 255, - 2, - 643, - 369, - 2, - 660, - 438, - 2, - 684, - 514, - 2, - 592, - 380, - 2, - 594, - 443, - 2, - 591, - 520, - 2, - 757, - 350, - 2, - 778, - 408, - 2, - 772, - 513, - 2, - 729, - 352, - 2, - 778, - 400, - 2, - 765, - 497, - 2 - ] - } - ], - "categories":[ - { - "id":1, - "name":"antelope", - "supercategory":"Bovidae", - "keypoints":[ - "left_eye", - "right_eye", - "nose", - "neck", - "root_of_tail", - "left_shoulder", - "left_elbow", - "left_front_paw", - "right_shoulder", - "right_elbow", - "right_front_paw", - "left_hip", - "left_knee", - "left_back_paw", - "right_hip", - "right_knee", - "right_back_paw" - ], - "skeleton":[ - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 3 - ], - [ - 3, - 4 - ], - [ - 4, - 5 - ], - [ - 4, - 6 - ], - [ - 6, - 7 - ], - [ - 7, - 8 - ], - [ - 4, - 9 - ], - [ - 9, - 10 - ], - [ - 10, - 11 - ], - [ - 5, - 12 - ], - [ - 12, - 13 - ], - [ - 13, - 14 - ], - [ - 5, - 15 - ], - [ - 15, - 16 - ], - [ - 16, - 17 - ] - ] - }, - { - "id":2, - "name":"argali sheep", - "supercategory":"Bovidae", - "keypoints":[ - "left_eye", - "right_eye", - "nose", - "neck", - "root_of_tail", - "left_shoulder", - "left_elbow", - "left_front_paw", - "right_shoulder", - "right_elbow", - "right_front_paw", - "left_hip", - "left_knee", - "left_back_paw", - "right_hip", - "right_knee", - "right_back_paw" - ], - "skeleton":[ - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 3 - ], - [ - 3, - 4 - ], - [ - 4, - 5 - ], - [ - 4, - 6 - ], - [ - 6, - 7 - ], - [ - 7, - 8 - ], - [ - 4, - 9 - ], - [ - 9, - 10 - ], - [ - 10, - 11 - ], - [ - 5, - 12 - ], - [ - 12, - 13 - ], - [ - 13, - 14 - ], - [ - 5, - 15 - ], - [ - 15, - 16 - ], - [ - 16, - 17 - ] - ] - }, - { - "id":3, - "name":"bison", - "supercategory":"Bovidae", - "keypoints":[ - "left_eye", - "right_eye", - "nose", - "neck", - "root_of_tail", - "left_shoulder", - "left_elbow", - "left_front_paw", - "right_shoulder", - "right_elbow", - "right_front_paw", - "left_hip", - "left_knee", - "left_back_paw", - "right_hip", - "right_knee", - "right_back_paw" - ], - "skeleton":[ - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 3 - ], - [ - 3, - 4 - ], - [ - 4, - 5 - ], - [ - 4, - 6 - ], - [ - 6, - 7 - ], - [ - 7, - 8 - ], - [ - 4, - 9 - ], - [ - 9, - 10 - ], - [ - 10, - 11 - ], - [ - 5, - 12 - ], - [ - 12, - 13 - ], - [ - 13, - 14 - ], - [ - 5, - 15 - ], - [ - 15, - 16 - ], - [ - 16, - 17 - ] - ] - }, - { - "id":4, - "name":"buffalo", - "supercategory":"Bovidae", - "keypoints":[ - "left_eye", - "right_eye", - "nose", - "neck", - "root_of_tail", - "left_shoulder", - "left_elbow", - "left_front_paw", - "right_shoulder", - "right_elbow", - "right_front_paw", - "left_hip", - "left_knee", - "left_back_paw", - "right_hip", - "right_knee", - "right_back_paw" - ], - "skeleton":[ - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 3 - ], - [ - 3, - 4 - ], - [ - 4, - 5 - ], - [ - 4, - 6 - ], - [ - 6, - 7 - ], - [ - 7, - 8 - ], - [ - 4, - 9 - ], - [ - 9, - 10 - ], - [ - 10, - 11 - ], - [ - 5, - 12 - ], - [ - 12, - 13 - ], - [ - 13, - 14 - ], - [ - 5, - 15 - ], - [ - 15, - 16 - ], - [ - 16, - 17 - ] - ] - }, - { - "id":5, - "name":"cow", - "supercategory":"Bovidae", - "keypoints":[ - "left_eye", - "right_eye", - "nose", - "neck", - "root_of_tail", - "left_shoulder", - "left_elbow", - "left_front_paw", - "right_shoulder", - "right_elbow", - "right_front_paw", - "left_hip", - "left_knee", - "left_back_paw", - "right_hip", - "right_knee", - "right_back_paw" - ], - "skeleton":[ - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 3 - ], - [ - 3, - 4 - ], - [ - 4, - 5 - ], - [ - 4, - 6 - ], - [ - 6, - 7 - ], - [ - 7, - 8 - ], - [ - 4, - 9 - ], - [ - 9, - 10 - ], - [ - 10, - 11 - ], - [ - 5, - 12 - ], - [ - 12, - 13 - ], - [ - 13, - 14 - ], - [ - 5, - 15 - ], - [ - 15, - 16 - ], - [ - 16, - 17 - ] - ] - }, - { - "id":6, - "name":"sheep", - "supercategory":"Bovidae", - "keypoints":[ - "left_eye", - "right_eye", - "nose", - "neck", - "root_of_tail", - "left_shoulder", - "left_elbow", - "left_front_paw", - "right_shoulder", - "right_elbow", - "right_front_paw", - "left_hip", - "left_knee", - "left_back_paw", - "right_hip", - "right_knee", - "right_back_paw" - ], - "skeleton":[ - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 3 - ], - [ - 3, - 4 - ], - [ - 4, - 5 - ], - [ - 4, - 6 - ], - [ - 6, - 7 - ], - [ - 7, - 8 - ], - [ - 4, - 9 - ], - [ - 9, - 10 - ], - [ - 10, - 11 - ], - [ - 5, - 12 - ], - [ - 12, - 13 - ], - [ - 13, - 14 - ], - [ - 5, - 15 - ], - [ - 15, - 16 - ], - [ - 16, - 17 - ] - ] - }, - { - "id":7, - "name":"arctic fox", - "supercategory":"Canidae", - "keypoints":[ - "left_eye", - "right_eye", - "nose", - "neck", - "root_of_tail", - "left_shoulder", - "left_elbow", - "left_front_paw", - "right_shoulder", - "right_elbow", - "right_front_paw", - "left_hip", - "left_knee", - "left_back_paw", - "right_hip", - "right_knee", - "right_back_paw" - ], - "skeleton":[ - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 3 - ], - [ - 3, - 4 - ], - [ - 4, - 5 - ], - [ - 4, - 6 - ], - [ - 6, - 7 - ], - [ - 7, - 8 - ], - [ - 4, - 9 - ], - [ - 9, - 10 - ], - [ - 10, - 11 - ], - [ - 5, - 12 - ], - [ - 12, - 13 - ], - [ - 13, - 14 - ], - [ - 5, - 15 - ], - [ - 15, - 16 - ], - [ - 16, - 17 - ] - ] - }, - { - "id":8, - "name":"dog", - "supercategory":"Canidae", - "keypoints":[ - "left_eye", - "right_eye", - "nose", - "neck", - "root_of_tail", - "left_shoulder", - "left_elbow", - "left_front_paw", - "right_shoulder", - "right_elbow", - "right_front_paw", - "left_hip", - "left_knee", - "left_back_paw", - "right_hip", - "right_knee", - "right_back_paw" - ], - "skeleton":[ - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 3 - ], - [ - 3, - 4 - ], - [ - 4, - 5 - ], - [ - 4, - 6 - ], - [ - 6, - 7 - ], - [ - 7, - 8 - ], - [ - 4, - 9 - ], - [ - 9, - 10 - ], - [ - 10, - 11 - ], - [ - 5, - 12 - ], - [ - 12, - 13 - ], - [ - 13, - 14 - ], - [ - 5, - 15 - ], - [ - 15, - 16 - ], - [ - 16, - 17 - ] - ] - }, - { - "id":9, - "name":"fox", - "supercategory":"Canidae", - "keypoints":[ - "left_eye", - "right_eye", - "nose", - "neck", - "root_of_tail", - "left_shoulder", - "left_elbow", - "left_front_paw", - "right_shoulder", - "right_elbow", - "right_front_paw", - "left_hip", - "left_knee", - "left_back_paw", - "right_hip", - "right_knee", - "right_back_paw" - ], - "skeleton":[ - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 3 - ], - [ - 3, - 4 - ], - [ - 4, - 5 - ], - [ - 4, - 6 - ], - [ - 6, - 7 - ], - [ - 7, - 8 - ], - [ - 4, - 9 - ], - [ - 9, - 10 - ], - [ - 10, - 11 - ], - [ - 5, - 12 - ], - [ - 12, - 13 - ], - [ - 13, - 14 - ], - [ - 5, - 15 - ], - [ - 15, - 16 - ], - [ - 16, - 17 - ] - ] - }, - { - "id":10, - "name":"wolf", - "supercategory":"Canidae", - "keypoints":[ - "left_eye", - "right_eye", - "nose", - "neck", - "root_of_tail", - "left_shoulder", - "left_elbow", - "left_front_paw", - "right_shoulder", - "right_elbow", - "right_front_paw", - "left_hip", - "left_knee", - "left_back_paw", - "right_hip", - "right_knee", - "right_back_paw" - ], - "skeleton":[ - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 3 - ], - [ - 3, - 4 - ], - [ - 4, - 5 - ], - [ - 4, - 6 - ], - [ - 6, - 7 - ], - [ - 7, - 8 - ], - [ - 4, - 9 - ], - [ - 9, - 10 - ], - [ - 10, - 11 - ], - [ - 5, - 12 - ], - [ - 12, - 13 - ], - [ - 13, - 14 - ], - [ - 5, - 15 - ], - [ - 15, - 16 - ], - [ - 16, - 17 - ] - ] - }, - { - "id":11, - "name":"beaver", - "supercategory":"Castoridae", - "keypoints":[ - "left_eye", - "right_eye", - "nose", - "neck", - "root_of_tail", - "left_shoulder", - "left_elbow", - "left_front_paw", - "right_shoulder", - "right_elbow", - "right_front_paw", - "left_hip", - "left_knee", - "left_back_paw", - "right_hip", - "right_knee", - "right_back_paw" - ], - "skeleton":[ - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 3 - ], - [ - 3, - 4 - ], - [ - 4, - 5 - ], - [ - 4, - 6 - ], - [ - 6, - 7 - ], - [ - 7, - 8 - ], - [ - 4, - 9 - ], - [ - 9, - 10 - ], - [ - 10, - 11 - ], - [ - 5, - 12 - ], - [ - 12, - 13 - ], - [ - 13, - 14 - ], - [ - 5, - 15 - ], - [ - 15, - 16 - ], - [ - 16, - 17 - ] - ] - }, - { - "id":12, - "name":"alouatta", - "supercategory":"Cercopithecidae", - "keypoints":[ - "left_eye", - "right_eye", - "nose", - "neck", - "root_of_tail", - "left_shoulder", - "left_elbow", - "left_front_paw", - "right_shoulder", - "right_elbow", - "right_front_paw", - "left_hip", - "left_knee", - "left_back_paw", - "right_hip", - "right_knee", - "right_back_paw" - ], - "skeleton":[ - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 3 - ], - [ - 3, - 4 - ], - [ - 4, - 5 - ], - [ - 4, - 6 - ], - [ - 6, - 7 - ], - [ - 7, - 8 - ], - [ - 4, - 9 - ], - [ - 9, - 10 - ], - [ - 10, - 11 - ], - [ - 5, - 12 - ], - [ - 12, - 13 - ], - [ - 13, - 14 - ], - [ - 5, - 15 - ], - [ - 15, - 16 - ], - [ - 16, - 17 - ] - ] - }, - { - "id":13, - "name":"monkey", - "supercategory":"Cercopithecidae", - "keypoints":[ - "left_eye", - "right_eye", - "nose", - "neck", - "root_of_tail", - "left_shoulder", - "left_elbow", - "left_front_paw", - "right_shoulder", - "right_elbow", - "right_front_paw", - "left_hip", - "left_knee", - "left_back_paw", - "right_hip", - "right_knee", - "right_back_paw" - ], - "skeleton":[ - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 3 - ], - [ - 3, - 4 - ], - [ - 4, - 5 - ], - [ - 4, - 6 - ], - [ - 6, - 7 - ], - [ - 7, - 8 - ], - [ - 4, - 9 - ], - [ - 9, - 10 - ], - [ - 10, - 11 - ], - [ - 5, - 12 - ], - [ - 12, - 13 - ], - [ - 13, - 14 - ], - [ - 5, - 15 - ], - [ - 15, - 16 - ], - [ - 16, - 17 - ] - ] - }, - { - "id":14, - "name":"noisy night monkey", - "supercategory":"Cercopithecidae", - "keypoints":[ - "left_eye", - "right_eye", - "nose", - "neck", - "root_of_tail", - "left_shoulder", - "left_elbow", - "left_front_paw", - "right_shoulder", - "right_elbow", - "right_front_paw", - "left_hip", - "left_knee", - "left_back_paw", - "right_hip", - "right_knee", - "right_back_paw" - ], - "skeleton":[ - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 3 - ], - [ - 3, - 4 - ], - [ - 4, - 5 - ], - [ - 4, - 6 - ], - [ - 6, - 7 - ], - [ - 7, - 8 - ], - [ - 4, - 9 - ], - [ - 9, - 10 - ], - [ - 10, - 11 - ], - [ - 5, - 12 - ], - [ - 12, - 13 - ], - [ - 13, - 14 - ], - [ - 5, - 15 - ], - [ - 15, - 16 - ], - [ - 16, - 17 - ] - ] - }, - { - "id":15, - "name":"spider monkey", - "supercategory":"Cercopithecidae", - "keypoints":[ - "left_eye", - "right_eye", - "nose", - "neck", - "root_of_tail", - "left_shoulder", - "left_elbow", - "left_front_paw", - "right_shoulder", - "right_elbow", - "right_front_paw", - "left_hip", - "left_knee", - "left_back_paw", - "right_hip", - "right_knee", - "right_back_paw" - ], - "skeleton":[ - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 3 - ], - [ - 3, - 4 - ], - [ - 4, - 5 - ], - [ - 4, - 6 - ], - [ - 6, - 7 - ], - [ - 7, - 8 - ], - [ - 4, - 9 - ], - [ - 9, - 10 - ], - [ - 10, - 11 - ], - [ - 5, - 12 - ], - [ - 12, - 13 - ], - [ - 13, - 14 - ], - [ - 5, - 15 - ], - [ - 15, - 16 - ], - [ - 16, - 17 - ] - ] - }, - { - "id":16, - "name":"uakari", - "supercategory":"Cercopithecidae", - "keypoints":[ - "left_eye", - "right_eye", - "nose", - "neck", - "root_of_tail", - "left_shoulder", - "left_elbow", - "left_front_paw", - "right_shoulder", - "right_elbow", - "right_front_paw", - "left_hip", - "left_knee", - "left_back_paw", - "right_hip", - "right_knee", - "right_back_paw" - ], - "skeleton":[ - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 3 - ], - [ - 3, - 4 - ], - [ - 4, - 5 - ], - [ - 4, - 6 - ], - [ - 6, - 7 - ], - [ - 7, - 8 - ], - [ - 4, - 9 - ], - [ - 9, - 10 - ], - [ - 10, - 11 - ], - [ - 5, - 12 - ], - [ - 12, - 13 - ], - [ - 13, - 14 - ], - [ - 5, - 15 - ], - [ - 15, - 16 - ], - [ - 16, - 17 - ] - ] - }, - { - "id":17, - "name":"deer", - "supercategory":"Cervidae", - "keypoints":[ - "left_eye", - "right_eye", - "nose", - "neck", - "root_of_tail", - "left_shoulder", - "left_elbow", - "left_front_paw", - "right_shoulder", - "right_elbow", - "right_front_paw", - "left_hip", - "left_knee", - "left_back_paw", - "right_hip", - "right_knee", - "right_back_paw" - ], - "skeleton":[ - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 3 - ], - [ - 3, - 4 - ], - [ - 4, - 5 - ], - [ - 4, - 6 - ], - [ - 6, - 7 - ], - [ - 7, - 8 - ], - [ - 4, - 9 - ], - [ - 9, - 10 - ], - [ - 10, - 11 - ], - [ - 5, - 12 - ], - [ - 12, - 13 - ], - [ - 13, - 14 - ], - [ - 5, - 15 - ], - [ - 15, - 16 - ], - [ - 16, - 17 - ] - ] - }, - { - "id":18, - "name":"moose", - "supercategory":"Cervidae", - "keypoints":[ - "left_eye", - "right_eye", - "nose", - "neck", - "root_of_tail", - "left_shoulder", - "left_elbow", - "left_front_paw", - "right_shoulder", - "right_elbow", - "right_front_paw", - "left_hip", - "left_knee", - "left_back_paw", - "right_hip", - "right_knee", - "right_back_paw" - ], - "skeleton":[ - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 3 - ], - [ - 3, - 4 - ], - [ - 4, - 5 - ], - [ - 4, - 6 - ], - [ - 6, - 7 - ], - [ - 7, - 8 - ], - [ - 4, - 9 - ], - [ - 9, - 10 - ], - [ - 10, - 11 - ], - [ - 5, - 12 - ], - [ - 12, - 13 - ], - [ - 13, - 14 - ], - [ - 5, - 15 - ], - [ - 15, - 16 - ], - [ - 16, - 17 - ] - ] - }, - { - "id":19, - "name":"hamster", - "supercategory":"Cricetidae", - "keypoints":[ - "left_eye", - "right_eye", - "nose", - "neck", - "root_of_tail", - "left_shoulder", - "left_elbow", - "left_front_paw", - "right_shoulder", - "right_elbow", - "right_front_paw", - "left_hip", - "left_knee", - "left_back_paw", - "right_hip", - "right_knee", - "right_back_paw" - ], - "skeleton":[ - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 3 - ], - [ - 3, - 4 - ], - [ - 4, - 5 - ], - [ - 4, - 6 - ], - [ - 6, - 7 - ], - [ - 7, - 8 - ], - [ - 4, - 9 - ], - [ - 9, - 10 - ], - [ - 10, - 11 - ], - [ - 5, - 12 - ], - [ - 12, - 13 - ], - [ - 13, - 14 - ], - [ - 5, - 15 - ], - [ - 15, - 16 - ], - [ - 16, - 17 - ] - ] - }, - { - "id":20, - "name":"elephant", - "supercategory":"Elephantidae", - "keypoints":[ - "left_eye", - "right_eye", - "nose", - "neck", - "root_of_tail", - "left_shoulder", - "left_elbow", - "left_front_paw", - "right_shoulder", - "right_elbow", - "right_front_paw", - "left_hip", - "left_knee", - "left_back_paw", - "right_hip", - "right_knee", - "right_back_paw" - ], - "skeleton":[ - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 3 - ], - [ - 3, - 4 - ], - [ - 4, - 5 - ], - [ - 4, - 6 - ], - [ - 6, - 7 - ], - [ - 7, - 8 - ], - [ - 4, - 9 - ], - [ - 9, - 10 - ], - [ - 10, - 11 - ], - [ - 5, - 12 - ], - [ - 12, - 13 - ], - [ - 13, - 14 - ], - [ - 5, - 15 - ], - [ - 15, - 16 - ], - [ - 16, - 17 - ] - ] - }, - { - "id":21, - "name":"horse", - "supercategory":"Equidae", - "keypoints":[ - "left_eye", - "right_eye", - "nose", - "neck", - "root_of_tail", - "left_shoulder", - "left_elbow", - "left_front_paw", - "right_shoulder", - "right_elbow", - "right_front_paw", - "left_hip", - "left_knee", - "left_back_paw", - "right_hip", - "right_knee", - "right_back_paw" - ], - "skeleton":[ - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 3 - ], - [ - 3, - 4 - ], - [ - 4, - 5 - ], - [ - 4, - 6 - ], - [ - 6, - 7 - ], - [ - 7, - 8 - ], - [ - 4, - 9 - ], - [ - 9, - 10 - ], - [ - 10, - 11 - ], - [ - 5, - 12 - ], - [ - 12, - 13 - ], - [ - 13, - 14 - ], - [ - 5, - 15 - ], - [ - 15, - 16 - ], - [ - 16, - 17 - ] - ] - }, - { - "id":22, - "name":"zebra", - "supercategory":"Equidae", - "keypoints":[ - "left_eye", - "right_eye", - "nose", - "neck", - "root_of_tail", - "left_shoulder", - "left_elbow", - "left_front_paw", - "right_shoulder", - "right_elbow", - "right_front_paw", - "left_hip", - "left_knee", - "left_back_paw", - "right_hip", - "right_knee", - "right_back_paw" - ], - "skeleton":[ - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 3 - ], - [ - 3, - 4 - ], - [ - 4, - 5 - ], - [ - 4, - 6 - ], - [ - 6, - 7 - ], - [ - 7, - 8 - ], - [ - 4, - 9 - ], - [ - 9, - 10 - ], - [ - 10, - 11 - ], - [ - 5, - 12 - ], - [ - 12, - 13 - ], - [ - 13, - 14 - ], - [ - 5, - 15 - ], - [ - 15, - 16 - ], - [ - 16, - 17 - ] - ] - }, - { - "id":23, - "name":"bobcat", - "supercategory":"Felidae", - "keypoints":[ - "left_eye", - "right_eye", - "nose", - "neck", - "root_of_tail", - "left_shoulder", - "left_elbow", - "left_front_paw", - "right_shoulder", - "right_elbow", - "right_front_paw", - "left_hip", - "left_knee", - "left_back_paw", - "right_hip", - "right_knee", - "right_back_paw" - ], - "skeleton":[ - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 3 - ], - [ - 3, - 4 - ], - [ - 4, - 5 - ], - [ - 4, - 6 - ], - [ - 6, - 7 - ], - [ - 7, - 8 - ], - [ - 4, - 9 - ], - [ - 9, - 10 - ], - [ - 10, - 11 - ], - [ - 5, - 12 - ], - [ - 12, - 13 - ], - [ - 13, - 14 - ], - [ - 5, - 15 - ], - [ - 15, - 16 - ], - [ - 16, - 17 - ] - ] - }, - { - "id":24, - "name":"cat", - "supercategory":"Felidae", - "keypoints":[ - "left_eye", - "right_eye", - "nose", - "neck", - "root_of_tail", - "left_shoulder", - "left_elbow", - "left_front_paw", - "right_shoulder", - "right_elbow", - "right_front_paw", - "left_hip", - "left_knee", - "left_back_paw", - "right_hip", - "right_knee", - "right_back_paw" - ], - "skeleton":[ - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 3 - ], - [ - 3, - 4 - ], - [ - 4, - 5 - ], - [ - 4, - 6 - ], - [ - 6, - 7 - ], - [ - 7, - 8 - ], - [ - 4, - 9 - ], - [ - 9, - 10 - ], - [ - 10, - 11 - ], - [ - 5, - 12 - ], - [ - 12, - 13 - ], - [ - 13, - 14 - ], - [ - 5, - 15 - ], - [ - 15, - 16 - ], - [ - 16, - 17 - ] - ] - }, - { - "id":25, - "name":"cheetah", - "supercategory":"Felidae", - "keypoints":[ - "left_eye", - "right_eye", - "nose", - "neck", - "root_of_tail", - "left_shoulder", - "left_elbow", - "left_front_paw", - "right_shoulder", - "right_elbow", - "right_front_paw", - "left_hip", - "left_knee", - "left_back_paw", - "right_hip", - "right_knee", - "right_back_paw" - ], - "skeleton":[ - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 3 - ], - [ - 3, - 4 - ], - [ - 4, - 5 - ], - [ - 4, - 6 - ], - [ - 6, - 7 - ], - [ - 7, - 8 - ], - [ - 4, - 9 - ], - [ - 9, - 10 - ], - [ - 10, - 11 - ], - [ - 5, - 12 - ], - [ - 12, - 13 - ], - [ - 13, - 14 - ], - [ - 5, - 15 - ], - [ - 15, - 16 - ], - [ - 16, - 17 - ] - ] - }, - { - "id":26, - "name":"jaguar", - "supercategory":"Felidae", - "keypoints":[ - "left_eye", - "right_eye", - "nose", - "neck", - "root_of_tail", - "left_shoulder", - "left_elbow", - "left_front_paw", - "right_shoulder", - "right_elbow", - "right_front_paw", - "left_hip", - "left_knee", - "left_back_paw", - "right_hip", - "right_knee", - "right_back_paw" - ], - "skeleton":[ - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 3 - ], - [ - 3, - 4 - ], - [ - 4, - 5 - ], - [ - 4, - 6 - ], - [ - 6, - 7 - ], - [ - 7, - 8 - ], - [ - 4, - 9 - ], - [ - 9, - 10 - ], - [ - 10, - 11 - ], - [ - 5, - 12 - ], - [ - 12, - 13 - ], - [ - 13, - 14 - ], - [ - 5, - 15 - ], - [ - 15, - 16 - ], - [ - 16, - 17 - ] - ] - }, - { - "id":27, - "name":"king cheetah", - "supercategory":"Felidae", - "keypoints":[ - "left_eye", - "right_eye", - "nose", - "neck", - "root_of_tail", - "left_shoulder", - "left_elbow", - "left_front_paw", - "right_shoulder", - "right_elbow", - "right_front_paw", - "left_hip", - "left_knee", - "left_back_paw", - "right_hip", - "right_knee", - "right_back_paw" - ], - "skeleton":[ - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 3 - ], - [ - 3, - 4 - ], - [ - 4, - 5 - ], - [ - 4, - 6 - ], - [ - 6, - 7 - ], - [ - 7, - 8 - ], - [ - 4, - 9 - ], - [ - 9, - 10 - ], - [ - 10, - 11 - ], - [ - 5, - 12 - ], - [ - 12, - 13 - ], - [ - 13, - 14 - ], - [ - 5, - 15 - ], - [ - 15, - 16 - ], - [ - 16, - 17 - ] - ] - }, - { - "id":28, - "name":"leopard", - "supercategory":"Felidae", - "keypoints":[ - "left_eye", - "right_eye", - "nose", - "neck", - "root_of_tail", - "left_shoulder", - "left_elbow", - "left_front_paw", - "right_shoulder", - "right_elbow", - "right_front_paw", - "left_hip", - "left_knee", - "left_back_paw", - "right_hip", - "right_knee", - "right_back_paw" - ], - "skeleton":[ - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 3 - ], - [ - 3, - 4 - ], - [ - 4, - 5 - ], - [ - 4, - 6 - ], - [ - 6, - 7 - ], - [ - 7, - 8 - ], - [ - 4, - 9 - ], - [ - 9, - 10 - ], - [ - 10, - 11 - ], - [ - 5, - 12 - ], - [ - 12, - 13 - ], - [ - 13, - 14 - ], - [ - 5, - 15 - ], - [ - 15, - 16 - ], - [ - 16, - 17 - ] - ] - }, - { - "id":29, - "name":"lion", - "supercategory":"Felidae", - "keypoints":[ - "left_eye", - "right_eye", - "nose", - "neck", - "root_of_tail", - "left_shoulder", - "left_elbow", - "left_front_paw", - "right_shoulder", - "right_elbow", - "right_front_paw", - "left_hip", - "left_knee", - "left_back_paw", - "right_hip", - "right_knee", - "right_back_paw" - ], - "skeleton":[ - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 3 - ], - [ - 3, - 4 - ], - [ - 4, - 5 - ], - [ - 4, - 6 - ], - [ - 6, - 7 - ], - [ - 7, - 8 - ], - [ - 4, - 9 - ], - [ - 9, - 10 - ], - [ - 10, - 11 - ], - [ - 5, - 12 - ], - [ - 12, - 13 - ], - [ - 13, - 14 - ], - [ - 5, - 15 - ], - [ - 15, - 16 - ], - [ - 16, - 17 - ] - ] - }, - { - "id":30, - "name":"panther", - "supercategory":"Felidae", - "keypoints":[ - "left_eye", - "right_eye", - "nose", - "neck", - "root_of_tail", - "left_shoulder", - "left_elbow", - "left_front_paw", - "right_shoulder", - "right_elbow", - "right_front_paw", - "left_hip", - "left_knee", - "left_back_paw", - "right_hip", - "right_knee", - "right_back_paw" - ], - "skeleton":[ - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 3 - ], - [ - 3, - 4 - ], - [ - 4, - 5 - ], - [ - 4, - 6 - ], - [ - 6, - 7 - ], - [ - 7, - 8 - ], - [ - 4, - 9 - ], - [ - 9, - 10 - ], - [ - 10, - 11 - ], - [ - 5, - 12 - ], - [ - 12, - 13 - ], - [ - 13, - 14 - ], - [ - 5, - 15 - ], - [ - 15, - 16 - ], - [ - 16, - 17 - ] - ] - }, - { - "id":31, - "name":"snow leopard", - "supercategory":"Felidae", - "keypoints":[ - "left_eye", - "right_eye", - "nose", - "neck", - "root_of_tail", - "left_shoulder", - "left_elbow", - "left_front_paw", - "right_shoulder", - "right_elbow", - "right_front_paw", - "left_hip", - "left_knee", - "left_back_paw", - "right_hip", - "right_knee", - "right_back_paw" - ], - "skeleton":[ - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 3 - ], - [ - 3, - 4 - ], - [ - 4, - 5 - ], - [ - 4, - 6 - ], - [ - 6, - 7 - ], - [ - 7, - 8 - ], - [ - 4, - 9 - ], - [ - 9, - 10 - ], - [ - 10, - 11 - ], - [ - 5, - 12 - ], - [ - 12, - 13 - ], - [ - 13, - 14 - ], - [ - 5, - 15 - ], - [ - 15, - 16 - ], - [ - 16, - 17 - ] - ] - }, - { - "id":32, - "name":"tiger", - "supercategory":"Felidae", - "keypoints":[ - "left_eye", - "right_eye", - "nose", - "neck", - "root_of_tail", - "left_shoulder", - "left_elbow", - "left_front_paw", - "right_shoulder", - "right_elbow", - "right_front_paw", - "left_hip", - "left_knee", - "left_back_paw", - "right_hip", - "right_knee", - "right_back_paw" - ], - "skeleton":[ - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 3 - ], - [ - 3, - 4 - ], - [ - 4, - 5 - ], - [ - 4, - 6 - ], - [ - 6, - 7 - ], - [ - 7, - 8 - ], - [ - 4, - 9 - ], - [ - 9, - 10 - ], - [ - 10, - 11 - ], - [ - 5, - 12 - ], - [ - 12, - 13 - ], - [ - 13, - 14 - ], - [ - 5, - 15 - ], - [ - 15, - 16 - ], - [ - 16, - 17 - ] - ] - }, - { - "id":33, - "name":"giraffe", - "supercategory":"Giraffidae", - "keypoints":[ - "left_eye", - "right_eye", - "nose", - "neck", - "root_of_tail", - "left_shoulder", - "left_elbow", - "left_front_paw", - "right_shoulder", - "right_elbow", - "right_front_paw", - "left_hip", - "left_knee", - "left_back_paw", - "right_hip", - "right_knee", - "right_back_paw" - ], - "skeleton":[ - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 3 - ], - [ - 3, - 4 - ], - [ - 4, - 5 - ], - [ - 4, - 6 - ], - [ - 6, - 7 - ], - [ - 7, - 8 - ], - [ - 4, - 9 - ], - [ - 9, - 10 - ], - [ - 10, - 11 - ], - [ - 5, - 12 - ], - [ - 12, - 13 - ], - [ - 13, - 14 - ], - [ - 5, - 15 - ], - [ - 15, - 16 - ], - [ - 16, - 17 - ] - ] - }, - { - "id":34, - "name":"hippo", - "supercategory":"Hippopotamidae", - "keypoints":[ - "left_eye", - "right_eye", - "nose", - "neck", - "root_of_tail", - "left_shoulder", - "left_elbow", - "left_front_paw", - "right_shoulder", - "right_elbow", - "right_front_paw", - "left_hip", - "left_knee", - "left_back_paw", - "right_hip", - "right_knee", - "right_back_paw" - ], - "skeleton":[ - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 3 - ], - [ - 3, - 4 - ], - [ - 4, - 5 - ], - [ - 4, - 6 - ], - [ - 6, - 7 - ], - [ - 7, - 8 - ], - [ - 4, - 9 - ], - [ - 9, - 10 - ], - [ - 10, - 11 - ], - [ - 5, - 12 - ], - [ - 12, - 13 - ], - [ - 13, - 14 - ], - [ - 5, - 15 - ], - [ - 15, - 16 - ], - [ - 16, - 17 - ] - ] - }, - { - "id":35, - "name":"chimpanzee", - "supercategory":"Hominidae", - "keypoints":[ - "left_eye", - "right_eye", - "nose", - "neck", - "root_of_tail", - "left_shoulder", - "left_elbow", - "left_front_paw", - "right_shoulder", - "right_elbow", - "right_front_paw", - "left_hip", - "left_knee", - "left_back_paw", - "right_hip", - "right_knee", - "right_back_paw" - ], - "skeleton":[ - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 3 - ], - [ - 3, - 4 - ], - [ - 4, - 5 - ], - [ - 4, - 6 - ], - [ - 6, - 7 - ], - [ - 7, - 8 - ], - [ - 4, - 9 - ], - [ - 9, - 10 - ], - [ - 10, - 11 - ], - [ - 5, - 12 - ], - [ - 12, - 13 - ], - [ - 13, - 14 - ], - [ - 5, - 15 - ], - [ - 15, - 16 - ], - [ - 16, - 17 - ] - ] - }, - { - "id":36, - "name":"gorilla", - "supercategory":"Hominidae", - "keypoints":[ - "left_eye", - "right_eye", - "nose", - "neck", - "root_of_tail", - "left_shoulder", - "left_elbow", - "left_front_paw", - "right_shoulder", - "right_elbow", - "right_front_paw", - "left_hip", - "left_knee", - "left_back_paw", - "right_hip", - "right_knee", - "right_back_paw" - ], - "skeleton":[ - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 3 - ], - [ - 3, - 4 - ], - [ - 4, - 5 - ], - [ - 4, - 6 - ], - [ - 6, - 7 - ], - [ - 7, - 8 - ], - [ - 4, - 9 - ], - [ - 9, - 10 - ], - [ - 10, - 11 - ], - [ - 5, - 12 - ], - [ - 12, - 13 - ], - [ - 13, - 14 - ], - [ - 5, - 15 - ], - [ - 15, - 16 - ], - [ - 16, - 17 - ] - ] - }, - { - "id":37, - "name":"orangutan", - "supercategory":"Hominidae", - "keypoints":[ - "left_eye", - "right_eye", - "nose", - "neck", - "root_of_tail", - "left_shoulder", - "left_elbow", - "left_front_paw", - "right_shoulder", - "right_elbow", - "right_front_paw", - "left_hip", - "left_knee", - "left_back_paw", - "right_hip", - "right_knee", - "right_back_paw" - ], - "skeleton":[ - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 3 - ], - [ - 3, - 4 - ], - [ - 4, - 5 - ], - [ - 4, - 6 - ], - [ - 6, - 7 - ], - [ - 7, - 8 - ], - [ - 4, - 9 - ], - [ - 9, - 10 - ], - [ - 10, - 11 - ], - [ - 5, - 12 - ], - [ - 12, - 13 - ], - [ - 13, - 14 - ], - [ - 5, - 15 - ], - [ - 15, - 16 - ], - [ - 16, - 17 - ] - ] - }, - { - "id":38, - "name":"rabbit", - "supercategory":"Leporidae", - "keypoints":[ - "left_eye", - "right_eye", - "nose", - "neck", - "root_of_tail", - "left_shoulder", - "left_elbow", - "left_front_paw", - "right_shoulder", - "right_elbow", - "right_front_paw", - "left_hip", - "left_knee", - "left_back_paw", - "right_hip", - "right_knee", - "right_back_paw" - ], - "skeleton":[ - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 3 - ], - [ - 3, - 4 - ], - [ - 4, - 5 - ], - [ - 4, - 6 - ], - [ - 6, - 7 - ], - [ - 7, - 8 - ], - [ - 4, - 9 - ], - [ - 9, - 10 - ], - [ - 10, - 11 - ], - [ - 5, - 12 - ], - [ - 12, - 13 - ], - [ - 13, - 14 - ], - [ - 5, - 15 - ], - [ - 15, - 16 - ], - [ - 16, - 17 - ] - ] - }, - { - "id":39, - "name":"skunk", - "supercategory":"Mephitidae", - "keypoints":[ - "left_eye", - "right_eye", - "nose", - "neck", - "root_of_tail", - "left_shoulder", - "left_elbow", - "left_front_paw", - "right_shoulder", - "right_elbow", - "right_front_paw", - "left_hip", - "left_knee", - "left_back_paw", - "right_hip", - "right_knee", - "right_back_paw" - ], - "skeleton":[ - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 3 - ], - [ - 3, - 4 - ], - [ - 4, - 5 - ], - [ - 4, - 6 - ], - [ - 6, - 7 - ], - [ - 7, - 8 - ], - [ - 4, - 9 - ], - [ - 9, - 10 - ], - [ - 10, - 11 - ], - [ - 5, - 12 - ], - [ - 12, - 13 - ], - [ - 13, - 14 - ], - [ - 5, - 15 - ], - [ - 15, - 16 - ], - [ - 16, - 17 - ] - ] - }, - { - "id":40, - "name":"mouse", - "supercategory":"Muridae", - "keypoints":[ - "left_eye", - "right_eye", - "nose", - "neck", - "root_of_tail", - "left_shoulder", - "left_elbow", - "left_front_paw", - "right_shoulder", - "right_elbow", - "right_front_paw", - "left_hip", - "left_knee", - "left_back_paw", - "right_hip", - "right_knee", - "right_back_paw" - ], - "skeleton":[ - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 3 - ], - [ - 3, - 4 - ], - [ - 4, - 5 - ], - [ - 4, - 6 - ], - [ - 6, - 7 - ], - [ - 7, - 8 - ], - [ - 4, - 9 - ], - [ - 9, - 10 - ], - [ - 10, - 11 - ], - [ - 5, - 12 - ], - [ - 12, - 13 - ], - [ - 13, - 14 - ], - [ - 5, - 15 - ], - [ - 15, - 16 - ], - [ - 16, - 17 - ] - ] - }, - { - "id":41, - "name":"rat", - "supercategory":"Muridae", - "keypoints":[ - "left_eye", - "right_eye", - "nose", - "neck", - "root_of_tail", - "left_shoulder", - "left_elbow", - "left_front_paw", - "right_shoulder", - "right_elbow", - "right_front_paw", - "left_hip", - "left_knee", - "left_back_paw", - "right_hip", - "right_knee", - "right_back_paw" - ], - "skeleton":[ - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 3 - ], - [ - 3, - 4 - ], - [ - 4, - 5 - ], - [ - 4, - 6 - ], - [ - 6, - 7 - ], - [ - 7, - 8 - ], - [ - 4, - 9 - ], - [ - 9, - 10 - ], - [ - 10, - 11 - ], - [ - 5, - 12 - ], - [ - 12, - 13 - ], - [ - 13, - 14 - ], - [ - 5, - 15 - ], - [ - 15, - 16 - ], - [ - 16, - 17 - ] - ] - }, - { - "id":42, - "name":"otter", - "supercategory":"Mustelidae", - "keypoints":[ - "left_eye", - "right_eye", - "nose", - "neck", - "root_of_tail", - "left_shoulder", - "left_elbow", - "left_front_paw", - "right_shoulder", - "right_elbow", - "right_front_paw", - "left_hip", - "left_knee", - "left_back_paw", - "right_hip", - "right_knee", - "right_back_paw" - ], - "skeleton":[ - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 3 - ], - [ - 3, - 4 - ], - [ - 4, - 5 - ], - [ - 4, - 6 - ], - [ - 6, - 7 - ], - [ - 7, - 8 - ], - [ - 4, - 9 - ], - [ - 9, - 10 - ], - [ - 10, - 11 - ], - [ - 5, - 12 - ], - [ - 12, - 13 - ], - [ - 13, - 14 - ], - [ - 5, - 15 - ], - [ - 15, - 16 - ], - [ - 16, - 17 - ] - ] - }, - { - "id":43, - "name":"weasel", - "supercategory":"Mustelidae", - "keypoints":[ - "left_eye", - "right_eye", - "nose", - "neck", - "root_of_tail", - "left_shoulder", - "left_elbow", - "left_front_paw", - "right_shoulder", - "right_elbow", - "right_front_paw", - "left_hip", - "left_knee", - "left_back_paw", - "right_hip", - "right_knee", - "right_back_paw" - ], - "skeleton":[ - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 3 - ], - [ - 3, - 4 - ], - [ - 4, - 5 - ], - [ - 4, - 6 - ], - [ - 6, - 7 - ], - [ - 7, - 8 - ], - [ - 4, - 9 - ], - [ - 9, - 10 - ], - [ - 10, - 11 - ], - [ - 5, - 12 - ], - [ - 12, - 13 - ], - [ - 13, - 14 - ], - [ - 5, - 15 - ], - [ - 15, - 16 - ], - [ - 16, - 17 - ] - ] - }, - { - "id":44, - "name":"raccoon", - "supercategory":"Procyonidae", - "keypoints":[ - "left_eye", - "right_eye", - "nose", - "neck", - "root_of_tail", - "left_shoulder", - "left_elbow", - "left_front_paw", - "right_shoulder", - "right_elbow", - "right_front_paw", - "left_hip", - "left_knee", - "left_back_paw", - "right_hip", - "right_knee", - "right_back_paw" - ], - "skeleton":[ - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 3 - ], - [ - 3, - 4 - ], - [ - 4, - 5 - ], - [ - 4, - 6 - ], - [ - 6, - 7 - ], - [ - 7, - 8 - ], - [ - 4, - 9 - ], - [ - 9, - 10 - ], - [ - 10, - 11 - ], - [ - 5, - 12 - ], - [ - 12, - 13 - ], - [ - 13, - 14 - ], - [ - 5, - 15 - ], - [ - 15, - 16 - ], - [ - 16, - 17 - ] - ] - }, - { - "id":45, - "name":"rhino", - "supercategory":"Rhinocerotidae", - "keypoints":[ - "left_eye", - "right_eye", - "nose", - "neck", - "root_of_tail", - "left_shoulder", - "left_elbow", - "left_front_paw", - "right_shoulder", - "right_elbow", - "right_front_paw", - "left_hip", - "left_knee", - "left_back_paw", - "right_hip", - "right_knee", - "right_back_paw" - ], - "skeleton":[ - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 3 - ], - [ - 3, - 4 - ], - [ - 4, - 5 - ], - [ - 4, - 6 - ], - [ - 6, - 7 - ], - [ - 7, - 8 - ], - [ - 4, - 9 - ], - [ - 9, - 10 - ], - [ - 10, - 11 - ], - [ - 5, - 12 - ], - [ - 12, - 13 - ], - [ - 13, - 14 - ], - [ - 5, - 15 - ], - [ - 15, - 16 - ], - [ - 16, - 17 - ] - ] - }, - { - "id":46, - "name":"marmot", - "supercategory":"Sciuridae", - "keypoints":[ - "left_eye", - "right_eye", - "nose", - "neck", - "root_of_tail", - "left_shoulder", - "left_elbow", - "left_front_paw", - "right_shoulder", - "right_elbow", - "right_front_paw", - "left_hip", - "left_knee", - "left_back_paw", - "right_hip", - "right_knee", - "right_back_paw" - ], - "skeleton":[ - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 3 - ], - [ - 3, - 4 - ], - [ - 4, - 5 - ], - [ - 4, - 6 - ], - [ - 6, - 7 - ], - [ - 7, - 8 - ], - [ - 4, - 9 - ], - [ - 9, - 10 - ], - [ - 10, - 11 - ], - [ - 5, - 12 - ], - [ - 12, - 13 - ], - [ - 13, - 14 - ], - [ - 5, - 15 - ], - [ - 15, - 16 - ], - [ - 16, - 17 - ] - ] - }, - { - "id":47, - "name":"squirrel", - "supercategory":"Sciuridae", - "keypoints":[ - "left_eye", - "right_eye", - "nose", - "neck", - "root_of_tail", - "left_shoulder", - "left_elbow", - "left_front_paw", - "right_shoulder", - "right_elbow", - "right_front_paw", - "left_hip", - "left_knee", - "left_back_paw", - "right_hip", - "right_knee", - "right_back_paw" - ], - "skeleton":[ - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 3 - ], - [ - 3, - 4 - ], - [ - 4, - 5 - ], - [ - 4, - 6 - ], - [ - 6, - 7 - ], - [ - 7, - 8 - ], - [ - 4, - 9 - ], - [ - 9, - 10 - ], - [ - 10, - 11 - ], - [ - 5, - 12 - ], - [ - 12, - 13 - ], - [ - 13, - 14 - ], - [ - 5, - 15 - ], - [ - 15, - 16 - ], - [ - 16, - 17 - ] - ] - }, - { - "id":48, - "name":"pig", - "supercategory":"Suidae", - "keypoints":[ - "left_eye", - "right_eye", - "nose", - "neck", - "root_of_tail", - "left_shoulder", - "left_elbow", - "left_front_paw", - "right_shoulder", - "right_elbow", - "right_front_paw", - "left_hip", - "left_knee", - "left_back_paw", - "right_hip", - "right_knee", - "right_back_paw" - ], - "skeleton":[ - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 3 - ], - [ - 3, - 4 - ], - [ - 4, - 5 - ], - [ - 4, - 6 - ], - [ - 6, - 7 - ], - [ - 7, - 8 - ], - [ - 4, - 9 - ], - [ - 9, - 10 - ], - [ - 10, - 11 - ], - [ - 5, - 12 - ], - [ - 12, - 13 - ], - [ - 13, - 14 - ], - [ - 5, - 15 - ], - [ - 15, - 16 - ], - [ - 16, - 17 - ] - ] - }, - { - "id":49, - "name":"mole", - "supercategory":"Talpidae", - "keypoints":[ - "left_eye", - "right_eye", - "nose", - "neck", - "root_of_tail", - "left_shoulder", - "left_elbow", - "left_front_paw", - "right_shoulder", - "right_elbow", - "right_front_paw", - "left_hip", - "left_knee", - "left_back_paw", - "right_hip", - "right_knee", - "right_back_paw" - ], - "skeleton":[ - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 3 - ], - [ - 3, - 4 - ], - [ - 4, - 5 - ], - [ - 4, - 6 - ], - [ - 6, - 7 - ], - [ - 7, - 8 - ], - [ - 4, - 9 - ], - [ - 9, - 10 - ], - [ - 10, - 11 - ], - [ - 5, - 12 - ], - [ - 12, - 13 - ], - [ - 13, - 14 - ], - [ - 5, - 15 - ], - [ - 15, - 16 - ], - [ - 16, - 17 - ] - ] - }, - { - "id":50, - "name":"black bear", - "supercategory":"Ursidae", - "keypoints":[ - "left_eye", - "right_eye", - "nose", - "neck", - "root_of_tail", - "left_shoulder", - "left_elbow", - "left_front_paw", - "right_shoulder", - "right_elbow", - "right_front_paw", - "left_hip", - "left_knee", - "left_back_paw", - "right_hip", - "right_knee", - "right_back_paw" - ], - "skeleton":[ - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 3 - ], - [ - 3, - 4 - ], - [ - 4, - 5 - ], - [ - 4, - 6 - ], - [ - 6, - 7 - ], - [ - 7, - 8 - ], - [ - 4, - 9 - ], - [ - 9, - 10 - ], - [ - 10, - 11 - ], - [ - 5, - 12 - ], - [ - 12, - 13 - ], - [ - 13, - 14 - ], - [ - 5, - 15 - ], - [ - 15, - 16 - ], - [ - 16, - 17 - ] - ] - }, - { - "id":51, - "name":"brown bear", - "supercategory":"Ursidae", - "keypoints":[ - "left_eye", - "right_eye", - "nose", - "neck", - "root_of_tail", - "left_shoulder", - "left_elbow", - "left_front_paw", - "right_shoulder", - "right_elbow", - "right_front_paw", - "left_hip", - "left_knee", - "left_back_paw", - "right_hip", - "right_knee", - "right_back_paw" - ], - "skeleton":[ - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 3 - ], - [ - 3, - 4 - ], - [ - 4, - 5 - ], - [ - 4, - 6 - ], - [ - 6, - 7 - ], - [ - 7, - 8 - ], - [ - 4, - 9 - ], - [ - 9, - 10 - ], - [ - 10, - 11 - ], - [ - 5, - 12 - ], - [ - 12, - 13 - ], - [ - 13, - 14 - ], - [ - 5, - 15 - ], - [ - 15, - 16 - ], - [ - 16, - 17 - ] - ] - }, - { - "id":52, - "name":"panda", - "supercategory":"Ursidae", - "keypoints":[ - "left_eye", - "right_eye", - "nose", - "neck", - "root_of_tail", - "left_shoulder", - "left_elbow", - "left_front_paw", - "right_shoulder", - "right_elbow", - "right_front_paw", - "left_hip", - "left_knee", - "left_back_paw", - "right_hip", - "right_knee", - "right_back_paw" - ], - "skeleton":[ - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 3 - ], - [ - 3, - 4 - ], - [ - 4, - 5 - ], - [ - 4, - 6 - ], - [ - 6, - 7 - ], - [ - 7, - 8 - ], - [ - 4, - 9 - ], - [ - 9, - 10 - ], - [ - 10, - 11 - ], - [ - 5, - 12 - ], - [ - 12, - 13 - ], - [ - 13, - 14 - ], - [ - 5, - 15 - ], - [ - 15, - 16 - ], - [ - 16, - 17 - ] - ] - }, - { - "id":53, - "name":"polar bear", - "supercategory":"Ursidae", - "keypoints":[ - "left_eye", - "right_eye", - "nose", - "neck", - "root_of_tail", - "left_shoulder", - "left_elbow", - "left_front_paw", - "right_shoulder", - "right_elbow", - "right_front_paw", - "left_hip", - "left_knee", - "left_back_paw", - "right_hip", - "right_knee", - "right_back_paw" - ], - "skeleton":[ - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 3 - ], - [ - 3, - 4 - ], - [ - 4, - 5 - ], - [ - 4, - 6 - ], - [ - 6, - 7 - ], - [ - 7, - 8 - ], - [ - 4, - 9 - ], - [ - 9, - 10 - ], - [ - 10, - 11 - ], - [ - 5, - 12 - ], - [ - 12, - 13 - ], - [ - 13, - 14 - ], - [ - 5, - 15 - ], - [ - 15, - 16 - ], - [ - 16, - 17 - ] - ] - }, - { - "id":54, - "name":"bat", - "supercategory":"Vespertilionidae", - "keypoints":[ - "left_eye", - "right_eye", - "nose", - "neck", - "root_of_tail", - "left_shoulder", - "left_elbow", - "left_front_paw", - "right_shoulder", - "right_elbow", - "right_front_paw", - "left_hip", - "left_knee", - "left_back_paw", - "right_hip", - "right_knee", - "right_back_paw" - ], - "skeleton":[ - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 3 - ], - [ - 3, - 4 - ], - [ - 4, - 5 - ], - [ - 4, - 6 - ], - [ - 6, - 7 - ], - [ - 7, - 8 - ], - [ - 4, - 9 - ], - [ - 9, - 10 - ], - [ - 10, - 11 - ], - [ - 5, - 12 - ], - [ - 12, - 13 - ], - [ - 13, - 14 - ], - [ - 5, - 15 - ], - [ - 15, - 16 - ], - [ - 16, - 17 - ] - ] - } - ] -} +{ + "info":{ + "description":"AP-10k", + "url":"https://github.com/AlexTheBad/AP-10K", + "version":"1.0", + "year":2021, + "contributor":"AP-10k Team", + "date_created":"2021/07/01" + }, + "licenses":[ + { + "id":1, + "name":"The MIT License", + "url":"https://www.mit.edu/~amini/LICENSE.md" + } + ], + "images":[ + { + "license":1, + "id":37516, + "file_name":"000000037516.jpg", + "width":1200, + "height":867, + "background":5 + }, + { + "license":1, + "id":4, + "file_name":"000000000004.jpg", + "width":1024, + "height":683, + "background":1 + } + ], + "annotations":[ + { + "id":9284, + "image_id":37516, + "category_id":26, + "bbox":[ + 66, + 192, + 1092, + 512 + ], + "area":559104, + "iscrowd":0, + "num_keypoints":16, + "keypoints":[ + 134, + 415, + 2, + 0, + 0, + 0, + 94, + 475, + 2, + 302, + 330, + 2, + 890, + 287, + 2, + 414, + 470, + 2, + 414, + 554, + 2, + 396, + 624, + 2, + 302, + 466, + 2, + 230, + 515, + 2, + 214, + 623, + 2, + 838, + 422, + 2, + 946, + 511, + 2, + 936, + 628, + 2, + 708, + 442, + 2, + 698, + 555, + 2, + 636, + 602, + 2 + ] + }, + { + "id":6, + "image_id":4, + "category_id":1, + "bbox":[ + 408, + 197, + 429, + 341 + ], + "area":146289, + "iscrowd":0, + "num_keypoints":16, + "keypoints":[ + 488, + 443, + 2, + 0, + 0, + 0, + 466, + 499, + 2, + 600, + 307, + 2, + 787, + 255, + 2, + 643, + 369, + 2, + 660, + 438, + 2, + 684, + 514, + 2, + 592, + 380, + 2, + 594, + 443, + 2, + 591, + 520, + 2, + 757, + 350, + 2, + 778, + 408, + 2, + 772, + 513, + 2, + 729, + 352, + 2, + 778, + 400, + 2, + 765, + 497, + 2 + ] + } + ], + "categories":[ + { + "id":1, + "name":"antelope", + "supercategory":"Bovidae", + "keypoints":[ + "left_eye", + "right_eye", + "nose", + "neck", + "root_of_tail", + "left_shoulder", + "left_elbow", + "left_front_paw", + "right_shoulder", + "right_elbow", + "right_front_paw", + "left_hip", + "left_knee", + "left_back_paw", + "right_hip", + "right_knee", + "right_back_paw" + ], + "skeleton":[ + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ], + [ + 4, + 5 + ], + [ + 4, + 6 + ], + [ + 6, + 7 + ], + [ + 7, + 8 + ], + [ + 4, + 9 + ], + [ + 9, + 10 + ], + [ + 10, + 11 + ], + [ + 5, + 12 + ], + [ + 12, + 13 + ], + [ + 13, + 14 + ], + [ + 5, + 15 + ], + [ + 15, + 16 + ], + [ + 16, + 17 + ] + ] + }, + { + "id":2, + "name":"argali sheep", + "supercategory":"Bovidae", + "keypoints":[ + "left_eye", + "right_eye", + "nose", + "neck", + "root_of_tail", + "left_shoulder", + "left_elbow", + "left_front_paw", + "right_shoulder", + "right_elbow", + "right_front_paw", + "left_hip", + "left_knee", + "left_back_paw", + "right_hip", + "right_knee", + "right_back_paw" + ], + "skeleton":[ + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ], + [ + 4, + 5 + ], + [ + 4, + 6 + ], + [ + 6, + 7 + ], + [ + 7, + 8 + ], + [ + 4, + 9 + ], + [ + 9, + 10 + ], + [ + 10, + 11 + ], + [ + 5, + 12 + ], + [ + 12, + 13 + ], + [ + 13, + 14 + ], + [ + 5, + 15 + ], + [ + 15, + 16 + ], + [ + 16, + 17 + ] + ] + }, + { + "id":3, + "name":"bison", + "supercategory":"Bovidae", + "keypoints":[ + "left_eye", + "right_eye", + "nose", + "neck", + "root_of_tail", + "left_shoulder", + "left_elbow", + "left_front_paw", + "right_shoulder", + "right_elbow", + "right_front_paw", + "left_hip", + "left_knee", + "left_back_paw", + "right_hip", + "right_knee", + "right_back_paw" + ], + "skeleton":[ + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ], + [ + 4, + 5 + ], + [ + 4, + 6 + ], + [ + 6, + 7 + ], + [ + 7, + 8 + ], + [ + 4, + 9 + ], + [ + 9, + 10 + ], + [ + 10, + 11 + ], + [ + 5, + 12 + ], + [ + 12, + 13 + ], + [ + 13, + 14 + ], + [ + 5, + 15 + ], + [ + 15, + 16 + ], + [ + 16, + 17 + ] + ] + }, + { + "id":4, + "name":"buffalo", + "supercategory":"Bovidae", + "keypoints":[ + "left_eye", + "right_eye", + "nose", + "neck", + "root_of_tail", + "left_shoulder", + "left_elbow", + "left_front_paw", + "right_shoulder", + "right_elbow", + "right_front_paw", + "left_hip", + "left_knee", + "left_back_paw", + "right_hip", + "right_knee", + "right_back_paw" + ], + "skeleton":[ + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ], + [ + 4, + 5 + ], + [ + 4, + 6 + ], + [ + 6, + 7 + ], + [ + 7, + 8 + ], + [ + 4, + 9 + ], + [ + 9, + 10 + ], + [ + 10, + 11 + ], + [ + 5, + 12 + ], + [ + 12, + 13 + ], + [ + 13, + 14 + ], + [ + 5, + 15 + ], + [ + 15, + 16 + ], + [ + 16, + 17 + ] + ] + }, + { + "id":5, + "name":"cow", + "supercategory":"Bovidae", + "keypoints":[ + "left_eye", + "right_eye", + "nose", + "neck", + "root_of_tail", + "left_shoulder", + "left_elbow", + "left_front_paw", + "right_shoulder", + "right_elbow", + "right_front_paw", + "left_hip", + "left_knee", + "left_back_paw", + "right_hip", + "right_knee", + "right_back_paw" + ], + "skeleton":[ + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ], + [ + 4, + 5 + ], + [ + 4, + 6 + ], + [ + 6, + 7 + ], + [ + 7, + 8 + ], + [ + 4, + 9 + ], + [ + 9, + 10 + ], + [ + 10, + 11 + ], + [ + 5, + 12 + ], + [ + 12, + 13 + ], + [ + 13, + 14 + ], + [ + 5, + 15 + ], + [ + 15, + 16 + ], + [ + 16, + 17 + ] + ] + }, + { + "id":6, + "name":"sheep", + "supercategory":"Bovidae", + "keypoints":[ + "left_eye", + "right_eye", + "nose", + "neck", + "root_of_tail", + "left_shoulder", + "left_elbow", + "left_front_paw", + "right_shoulder", + "right_elbow", + "right_front_paw", + "left_hip", + "left_knee", + "left_back_paw", + "right_hip", + "right_knee", + "right_back_paw" + ], + "skeleton":[ + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ], + [ + 4, + 5 + ], + [ + 4, + 6 + ], + [ + 6, + 7 + ], + [ + 7, + 8 + ], + [ + 4, + 9 + ], + [ + 9, + 10 + ], + [ + 10, + 11 + ], + [ + 5, + 12 + ], + [ + 12, + 13 + ], + [ + 13, + 14 + ], + [ + 5, + 15 + ], + [ + 15, + 16 + ], + [ + 16, + 17 + ] + ] + }, + { + "id":7, + "name":"arctic fox", + "supercategory":"Canidae", + "keypoints":[ + "left_eye", + "right_eye", + "nose", + "neck", + "root_of_tail", + "left_shoulder", + "left_elbow", + "left_front_paw", + "right_shoulder", + "right_elbow", + "right_front_paw", + "left_hip", + "left_knee", + "left_back_paw", + "right_hip", + "right_knee", + "right_back_paw" + ], + "skeleton":[ + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ], + [ + 4, + 5 + ], + [ + 4, + 6 + ], + [ + 6, + 7 + ], + [ + 7, + 8 + ], + [ + 4, + 9 + ], + [ + 9, + 10 + ], + [ + 10, + 11 + ], + [ + 5, + 12 + ], + [ + 12, + 13 + ], + [ + 13, + 14 + ], + [ + 5, + 15 + ], + [ + 15, + 16 + ], + [ + 16, + 17 + ] + ] + }, + { + "id":8, + "name":"dog", + "supercategory":"Canidae", + "keypoints":[ + "left_eye", + "right_eye", + "nose", + "neck", + "root_of_tail", + "left_shoulder", + "left_elbow", + "left_front_paw", + "right_shoulder", + "right_elbow", + "right_front_paw", + "left_hip", + "left_knee", + "left_back_paw", + "right_hip", + "right_knee", + "right_back_paw" + ], + "skeleton":[ + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ], + [ + 4, + 5 + ], + [ + 4, + 6 + ], + [ + 6, + 7 + ], + [ + 7, + 8 + ], + [ + 4, + 9 + ], + [ + 9, + 10 + ], + [ + 10, + 11 + ], + [ + 5, + 12 + ], + [ + 12, + 13 + ], + [ + 13, + 14 + ], + [ + 5, + 15 + ], + [ + 15, + 16 + ], + [ + 16, + 17 + ] + ] + }, + { + "id":9, + "name":"fox", + "supercategory":"Canidae", + "keypoints":[ + "left_eye", + "right_eye", + "nose", + "neck", + "root_of_tail", + "left_shoulder", + "left_elbow", + "left_front_paw", + "right_shoulder", + "right_elbow", + "right_front_paw", + "left_hip", + "left_knee", + "left_back_paw", + "right_hip", + "right_knee", + "right_back_paw" + ], + "skeleton":[ + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ], + [ + 4, + 5 + ], + [ + 4, + 6 + ], + [ + 6, + 7 + ], + [ + 7, + 8 + ], + [ + 4, + 9 + ], + [ + 9, + 10 + ], + [ + 10, + 11 + ], + [ + 5, + 12 + ], + [ + 12, + 13 + ], + [ + 13, + 14 + ], + [ + 5, + 15 + ], + [ + 15, + 16 + ], + [ + 16, + 17 + ] + ] + }, + { + "id":10, + "name":"wolf", + "supercategory":"Canidae", + "keypoints":[ + "left_eye", + "right_eye", + "nose", + "neck", + "root_of_tail", + "left_shoulder", + "left_elbow", + "left_front_paw", + "right_shoulder", + "right_elbow", + "right_front_paw", + "left_hip", + "left_knee", + "left_back_paw", + "right_hip", + "right_knee", + "right_back_paw" + ], + "skeleton":[ + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ], + [ + 4, + 5 + ], + [ + 4, + 6 + ], + [ + 6, + 7 + ], + [ + 7, + 8 + ], + [ + 4, + 9 + ], + [ + 9, + 10 + ], + [ + 10, + 11 + ], + [ + 5, + 12 + ], + [ + 12, + 13 + ], + [ + 13, + 14 + ], + [ + 5, + 15 + ], + [ + 15, + 16 + ], + [ + 16, + 17 + ] + ] + }, + { + "id":11, + "name":"beaver", + "supercategory":"Castoridae", + "keypoints":[ + "left_eye", + "right_eye", + "nose", + "neck", + "root_of_tail", + "left_shoulder", + "left_elbow", + "left_front_paw", + "right_shoulder", + "right_elbow", + "right_front_paw", + "left_hip", + "left_knee", + "left_back_paw", + "right_hip", + "right_knee", + "right_back_paw" + ], + "skeleton":[ + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ], + [ + 4, + 5 + ], + [ + 4, + 6 + ], + [ + 6, + 7 + ], + [ + 7, + 8 + ], + [ + 4, + 9 + ], + [ + 9, + 10 + ], + [ + 10, + 11 + ], + [ + 5, + 12 + ], + [ + 12, + 13 + ], + [ + 13, + 14 + ], + [ + 5, + 15 + ], + [ + 15, + 16 + ], + [ + 16, + 17 + ] + ] + }, + { + "id":12, + "name":"alouatta", + "supercategory":"Cercopithecidae", + "keypoints":[ + "left_eye", + "right_eye", + "nose", + "neck", + "root_of_tail", + "left_shoulder", + "left_elbow", + "left_front_paw", + "right_shoulder", + "right_elbow", + "right_front_paw", + "left_hip", + "left_knee", + "left_back_paw", + "right_hip", + "right_knee", + "right_back_paw" + ], + "skeleton":[ + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ], + [ + 4, + 5 + ], + [ + 4, + 6 + ], + [ + 6, + 7 + ], + [ + 7, + 8 + ], + [ + 4, + 9 + ], + [ + 9, + 10 + ], + [ + 10, + 11 + ], + [ + 5, + 12 + ], + [ + 12, + 13 + ], + [ + 13, + 14 + ], + [ + 5, + 15 + ], + [ + 15, + 16 + ], + [ + 16, + 17 + ] + ] + }, + { + "id":13, + "name":"monkey", + "supercategory":"Cercopithecidae", + "keypoints":[ + "left_eye", + "right_eye", + "nose", + "neck", + "root_of_tail", + "left_shoulder", + "left_elbow", + "left_front_paw", + "right_shoulder", + "right_elbow", + "right_front_paw", + "left_hip", + "left_knee", + "left_back_paw", + "right_hip", + "right_knee", + "right_back_paw" + ], + "skeleton":[ + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ], + [ + 4, + 5 + ], + [ + 4, + 6 + ], + [ + 6, + 7 + ], + [ + 7, + 8 + ], + [ + 4, + 9 + ], + [ + 9, + 10 + ], + [ + 10, + 11 + ], + [ + 5, + 12 + ], + [ + 12, + 13 + ], + [ + 13, + 14 + ], + [ + 5, + 15 + ], + [ + 15, + 16 + ], + [ + 16, + 17 + ] + ] + }, + { + "id":14, + "name":"noisy night monkey", + "supercategory":"Cercopithecidae", + "keypoints":[ + "left_eye", + "right_eye", + "nose", + "neck", + "root_of_tail", + "left_shoulder", + "left_elbow", + "left_front_paw", + "right_shoulder", + "right_elbow", + "right_front_paw", + "left_hip", + "left_knee", + "left_back_paw", + "right_hip", + "right_knee", + "right_back_paw" + ], + "skeleton":[ + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ], + [ + 4, + 5 + ], + [ + 4, + 6 + ], + [ + 6, + 7 + ], + [ + 7, + 8 + ], + [ + 4, + 9 + ], + [ + 9, + 10 + ], + [ + 10, + 11 + ], + [ + 5, + 12 + ], + [ + 12, + 13 + ], + [ + 13, + 14 + ], + [ + 5, + 15 + ], + [ + 15, + 16 + ], + [ + 16, + 17 + ] + ] + }, + { + "id":15, + "name":"spider monkey", + "supercategory":"Cercopithecidae", + "keypoints":[ + "left_eye", + "right_eye", + "nose", + "neck", + "root_of_tail", + "left_shoulder", + "left_elbow", + "left_front_paw", + "right_shoulder", + "right_elbow", + "right_front_paw", + "left_hip", + "left_knee", + "left_back_paw", + "right_hip", + "right_knee", + "right_back_paw" + ], + "skeleton":[ + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ], + [ + 4, + 5 + ], + [ + 4, + 6 + ], + [ + 6, + 7 + ], + [ + 7, + 8 + ], + [ + 4, + 9 + ], + [ + 9, + 10 + ], + [ + 10, + 11 + ], + [ + 5, + 12 + ], + [ + 12, + 13 + ], + [ + 13, + 14 + ], + [ + 5, + 15 + ], + [ + 15, + 16 + ], + [ + 16, + 17 + ] + ] + }, + { + "id":16, + "name":"uakari", + "supercategory":"Cercopithecidae", + "keypoints":[ + "left_eye", + "right_eye", + "nose", + "neck", + "root_of_tail", + "left_shoulder", + "left_elbow", + "left_front_paw", + "right_shoulder", + "right_elbow", + "right_front_paw", + "left_hip", + "left_knee", + "left_back_paw", + "right_hip", + "right_knee", + "right_back_paw" + ], + "skeleton":[ + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ], + [ + 4, + 5 + ], + [ + 4, + 6 + ], + [ + 6, + 7 + ], + [ + 7, + 8 + ], + [ + 4, + 9 + ], + [ + 9, + 10 + ], + [ + 10, + 11 + ], + [ + 5, + 12 + ], + [ + 12, + 13 + ], + [ + 13, + 14 + ], + [ + 5, + 15 + ], + [ + 15, + 16 + ], + [ + 16, + 17 + ] + ] + }, + { + "id":17, + "name":"deer", + "supercategory":"Cervidae", + "keypoints":[ + "left_eye", + "right_eye", + "nose", + "neck", + "root_of_tail", + "left_shoulder", + "left_elbow", + "left_front_paw", + "right_shoulder", + "right_elbow", + "right_front_paw", + "left_hip", + "left_knee", + "left_back_paw", + "right_hip", + "right_knee", + "right_back_paw" + ], + "skeleton":[ + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ], + [ + 4, + 5 + ], + [ + 4, + 6 + ], + [ + 6, + 7 + ], + [ + 7, + 8 + ], + [ + 4, + 9 + ], + [ + 9, + 10 + ], + [ + 10, + 11 + ], + [ + 5, + 12 + ], + [ + 12, + 13 + ], + [ + 13, + 14 + ], + [ + 5, + 15 + ], + [ + 15, + 16 + ], + [ + 16, + 17 + ] + ] + }, + { + "id":18, + "name":"moose", + "supercategory":"Cervidae", + "keypoints":[ + "left_eye", + "right_eye", + "nose", + "neck", + "root_of_tail", + "left_shoulder", + "left_elbow", + "left_front_paw", + "right_shoulder", + "right_elbow", + "right_front_paw", + "left_hip", + "left_knee", + "left_back_paw", + "right_hip", + "right_knee", + "right_back_paw" + ], + "skeleton":[ + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ], + [ + 4, + 5 + ], + [ + 4, + 6 + ], + [ + 6, + 7 + ], + [ + 7, + 8 + ], + [ + 4, + 9 + ], + [ + 9, + 10 + ], + [ + 10, + 11 + ], + [ + 5, + 12 + ], + [ + 12, + 13 + ], + [ + 13, + 14 + ], + [ + 5, + 15 + ], + [ + 15, + 16 + ], + [ + 16, + 17 + ] + ] + }, + { + "id":19, + "name":"hamster", + "supercategory":"Cricetidae", + "keypoints":[ + "left_eye", + "right_eye", + "nose", + "neck", + "root_of_tail", + "left_shoulder", + "left_elbow", + "left_front_paw", + "right_shoulder", + "right_elbow", + "right_front_paw", + "left_hip", + "left_knee", + "left_back_paw", + "right_hip", + "right_knee", + "right_back_paw" + ], + "skeleton":[ + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ], + [ + 4, + 5 + ], + [ + 4, + 6 + ], + [ + 6, + 7 + ], + [ + 7, + 8 + ], + [ + 4, + 9 + ], + [ + 9, + 10 + ], + [ + 10, + 11 + ], + [ + 5, + 12 + ], + [ + 12, + 13 + ], + [ + 13, + 14 + ], + [ + 5, + 15 + ], + [ + 15, + 16 + ], + [ + 16, + 17 + ] + ] + }, + { + "id":20, + "name":"elephant", + "supercategory":"Elephantidae", + "keypoints":[ + "left_eye", + "right_eye", + "nose", + "neck", + "root_of_tail", + "left_shoulder", + "left_elbow", + "left_front_paw", + "right_shoulder", + "right_elbow", + "right_front_paw", + "left_hip", + "left_knee", + "left_back_paw", + "right_hip", + "right_knee", + "right_back_paw" + ], + "skeleton":[ + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ], + [ + 4, + 5 + ], + [ + 4, + 6 + ], + [ + 6, + 7 + ], + [ + 7, + 8 + ], + [ + 4, + 9 + ], + [ + 9, + 10 + ], + [ + 10, + 11 + ], + [ + 5, + 12 + ], + [ + 12, + 13 + ], + [ + 13, + 14 + ], + [ + 5, + 15 + ], + [ + 15, + 16 + ], + [ + 16, + 17 + ] + ] + }, + { + "id":21, + "name":"horse", + "supercategory":"Equidae", + "keypoints":[ + "left_eye", + "right_eye", + "nose", + "neck", + "root_of_tail", + "left_shoulder", + "left_elbow", + "left_front_paw", + "right_shoulder", + "right_elbow", + "right_front_paw", + "left_hip", + "left_knee", + "left_back_paw", + "right_hip", + "right_knee", + "right_back_paw" + ], + "skeleton":[ + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ], + [ + 4, + 5 + ], + [ + 4, + 6 + ], + [ + 6, + 7 + ], + [ + 7, + 8 + ], + [ + 4, + 9 + ], + [ + 9, + 10 + ], + [ + 10, + 11 + ], + [ + 5, + 12 + ], + [ + 12, + 13 + ], + [ + 13, + 14 + ], + [ + 5, + 15 + ], + [ + 15, + 16 + ], + [ + 16, + 17 + ] + ] + }, + { + "id":22, + "name":"zebra", + "supercategory":"Equidae", + "keypoints":[ + "left_eye", + "right_eye", + "nose", + "neck", + "root_of_tail", + "left_shoulder", + "left_elbow", + "left_front_paw", + "right_shoulder", + "right_elbow", + "right_front_paw", + "left_hip", + "left_knee", + "left_back_paw", + "right_hip", + "right_knee", + "right_back_paw" + ], + "skeleton":[ + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ], + [ + 4, + 5 + ], + [ + 4, + 6 + ], + [ + 6, + 7 + ], + [ + 7, + 8 + ], + [ + 4, + 9 + ], + [ + 9, + 10 + ], + [ + 10, + 11 + ], + [ + 5, + 12 + ], + [ + 12, + 13 + ], + [ + 13, + 14 + ], + [ + 5, + 15 + ], + [ + 15, + 16 + ], + [ + 16, + 17 + ] + ] + }, + { + "id":23, + "name":"bobcat", + "supercategory":"Felidae", + "keypoints":[ + "left_eye", + "right_eye", + "nose", + "neck", + "root_of_tail", + "left_shoulder", + "left_elbow", + "left_front_paw", + "right_shoulder", + "right_elbow", + "right_front_paw", + "left_hip", + "left_knee", + "left_back_paw", + "right_hip", + "right_knee", + "right_back_paw" + ], + "skeleton":[ + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ], + [ + 4, + 5 + ], + [ + 4, + 6 + ], + [ + 6, + 7 + ], + [ + 7, + 8 + ], + [ + 4, + 9 + ], + [ + 9, + 10 + ], + [ + 10, + 11 + ], + [ + 5, + 12 + ], + [ + 12, + 13 + ], + [ + 13, + 14 + ], + [ + 5, + 15 + ], + [ + 15, + 16 + ], + [ + 16, + 17 + ] + ] + }, + { + "id":24, + "name":"cat", + "supercategory":"Felidae", + "keypoints":[ + "left_eye", + "right_eye", + "nose", + "neck", + "root_of_tail", + "left_shoulder", + "left_elbow", + "left_front_paw", + "right_shoulder", + "right_elbow", + "right_front_paw", + "left_hip", + "left_knee", + "left_back_paw", + "right_hip", + "right_knee", + "right_back_paw" + ], + "skeleton":[ + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ], + [ + 4, + 5 + ], + [ + 4, + 6 + ], + [ + 6, + 7 + ], + [ + 7, + 8 + ], + [ + 4, + 9 + ], + [ + 9, + 10 + ], + [ + 10, + 11 + ], + [ + 5, + 12 + ], + [ + 12, + 13 + ], + [ + 13, + 14 + ], + [ + 5, + 15 + ], + [ + 15, + 16 + ], + [ + 16, + 17 + ] + ] + }, + { + "id":25, + "name":"cheetah", + "supercategory":"Felidae", + "keypoints":[ + "left_eye", + "right_eye", + "nose", + "neck", + "root_of_tail", + "left_shoulder", + "left_elbow", + "left_front_paw", + "right_shoulder", + "right_elbow", + "right_front_paw", + "left_hip", + "left_knee", + "left_back_paw", + "right_hip", + "right_knee", + "right_back_paw" + ], + "skeleton":[ + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ], + [ + 4, + 5 + ], + [ + 4, + 6 + ], + [ + 6, + 7 + ], + [ + 7, + 8 + ], + [ + 4, + 9 + ], + [ + 9, + 10 + ], + [ + 10, + 11 + ], + [ + 5, + 12 + ], + [ + 12, + 13 + ], + [ + 13, + 14 + ], + [ + 5, + 15 + ], + [ + 15, + 16 + ], + [ + 16, + 17 + ] + ] + }, + { + "id":26, + "name":"jaguar", + "supercategory":"Felidae", + "keypoints":[ + "left_eye", + "right_eye", + "nose", + "neck", + "root_of_tail", + "left_shoulder", + "left_elbow", + "left_front_paw", + "right_shoulder", + "right_elbow", + "right_front_paw", + "left_hip", + "left_knee", + "left_back_paw", + "right_hip", + "right_knee", + "right_back_paw" + ], + "skeleton":[ + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ], + [ + 4, + 5 + ], + [ + 4, + 6 + ], + [ + 6, + 7 + ], + [ + 7, + 8 + ], + [ + 4, + 9 + ], + [ + 9, + 10 + ], + [ + 10, + 11 + ], + [ + 5, + 12 + ], + [ + 12, + 13 + ], + [ + 13, + 14 + ], + [ + 5, + 15 + ], + [ + 15, + 16 + ], + [ + 16, + 17 + ] + ] + }, + { + "id":27, + "name":"king cheetah", + "supercategory":"Felidae", + "keypoints":[ + "left_eye", + "right_eye", + "nose", + "neck", + "root_of_tail", + "left_shoulder", + "left_elbow", + "left_front_paw", + "right_shoulder", + "right_elbow", + "right_front_paw", + "left_hip", + "left_knee", + "left_back_paw", + "right_hip", + "right_knee", + "right_back_paw" + ], + "skeleton":[ + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ], + [ + 4, + 5 + ], + [ + 4, + 6 + ], + [ + 6, + 7 + ], + [ + 7, + 8 + ], + [ + 4, + 9 + ], + [ + 9, + 10 + ], + [ + 10, + 11 + ], + [ + 5, + 12 + ], + [ + 12, + 13 + ], + [ + 13, + 14 + ], + [ + 5, + 15 + ], + [ + 15, + 16 + ], + [ + 16, + 17 + ] + ] + }, + { + "id":28, + "name":"leopard", + "supercategory":"Felidae", + "keypoints":[ + "left_eye", + "right_eye", + "nose", + "neck", + "root_of_tail", + "left_shoulder", + "left_elbow", + "left_front_paw", + "right_shoulder", + "right_elbow", + "right_front_paw", + "left_hip", + "left_knee", + "left_back_paw", + "right_hip", + "right_knee", + "right_back_paw" + ], + "skeleton":[ + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ], + [ + 4, + 5 + ], + [ + 4, + 6 + ], + [ + 6, + 7 + ], + [ + 7, + 8 + ], + [ + 4, + 9 + ], + [ + 9, + 10 + ], + [ + 10, + 11 + ], + [ + 5, + 12 + ], + [ + 12, + 13 + ], + [ + 13, + 14 + ], + [ + 5, + 15 + ], + [ + 15, + 16 + ], + [ + 16, + 17 + ] + ] + }, + { + "id":29, + "name":"lion", + "supercategory":"Felidae", + "keypoints":[ + "left_eye", + "right_eye", + "nose", + "neck", + "root_of_tail", + "left_shoulder", + "left_elbow", + "left_front_paw", + "right_shoulder", + "right_elbow", + "right_front_paw", + "left_hip", + "left_knee", + "left_back_paw", + "right_hip", + "right_knee", + "right_back_paw" + ], + "skeleton":[ + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ], + [ + 4, + 5 + ], + [ + 4, + 6 + ], + [ + 6, + 7 + ], + [ + 7, + 8 + ], + [ + 4, + 9 + ], + [ + 9, + 10 + ], + [ + 10, + 11 + ], + [ + 5, + 12 + ], + [ + 12, + 13 + ], + [ + 13, + 14 + ], + [ + 5, + 15 + ], + [ + 15, + 16 + ], + [ + 16, + 17 + ] + ] + }, + { + "id":30, + "name":"panther", + "supercategory":"Felidae", + "keypoints":[ + "left_eye", + "right_eye", + "nose", + "neck", + "root_of_tail", + "left_shoulder", + "left_elbow", + "left_front_paw", + "right_shoulder", + "right_elbow", + "right_front_paw", + "left_hip", + "left_knee", + "left_back_paw", + "right_hip", + "right_knee", + "right_back_paw" + ], + "skeleton":[ + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ], + [ + 4, + 5 + ], + [ + 4, + 6 + ], + [ + 6, + 7 + ], + [ + 7, + 8 + ], + [ + 4, + 9 + ], + [ + 9, + 10 + ], + [ + 10, + 11 + ], + [ + 5, + 12 + ], + [ + 12, + 13 + ], + [ + 13, + 14 + ], + [ + 5, + 15 + ], + [ + 15, + 16 + ], + [ + 16, + 17 + ] + ] + }, + { + "id":31, + "name":"snow leopard", + "supercategory":"Felidae", + "keypoints":[ + "left_eye", + "right_eye", + "nose", + "neck", + "root_of_tail", + "left_shoulder", + "left_elbow", + "left_front_paw", + "right_shoulder", + "right_elbow", + "right_front_paw", + "left_hip", + "left_knee", + "left_back_paw", + "right_hip", + "right_knee", + "right_back_paw" + ], + "skeleton":[ + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ], + [ + 4, + 5 + ], + [ + 4, + 6 + ], + [ + 6, + 7 + ], + [ + 7, + 8 + ], + [ + 4, + 9 + ], + [ + 9, + 10 + ], + [ + 10, + 11 + ], + [ + 5, + 12 + ], + [ + 12, + 13 + ], + [ + 13, + 14 + ], + [ + 5, + 15 + ], + [ + 15, + 16 + ], + [ + 16, + 17 + ] + ] + }, + { + "id":32, + "name":"tiger", + "supercategory":"Felidae", + "keypoints":[ + "left_eye", + "right_eye", + "nose", + "neck", + "root_of_tail", + "left_shoulder", + "left_elbow", + "left_front_paw", + "right_shoulder", + "right_elbow", + "right_front_paw", + "left_hip", + "left_knee", + "left_back_paw", + "right_hip", + "right_knee", + "right_back_paw" + ], + "skeleton":[ + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ], + [ + 4, + 5 + ], + [ + 4, + 6 + ], + [ + 6, + 7 + ], + [ + 7, + 8 + ], + [ + 4, + 9 + ], + [ + 9, + 10 + ], + [ + 10, + 11 + ], + [ + 5, + 12 + ], + [ + 12, + 13 + ], + [ + 13, + 14 + ], + [ + 5, + 15 + ], + [ + 15, + 16 + ], + [ + 16, + 17 + ] + ] + }, + { + "id":33, + "name":"giraffe", + "supercategory":"Giraffidae", + "keypoints":[ + "left_eye", + "right_eye", + "nose", + "neck", + "root_of_tail", + "left_shoulder", + "left_elbow", + "left_front_paw", + "right_shoulder", + "right_elbow", + "right_front_paw", + "left_hip", + "left_knee", + "left_back_paw", + "right_hip", + "right_knee", + "right_back_paw" + ], + "skeleton":[ + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ], + [ + 4, + 5 + ], + [ + 4, + 6 + ], + [ + 6, + 7 + ], + [ + 7, + 8 + ], + [ + 4, + 9 + ], + [ + 9, + 10 + ], + [ + 10, + 11 + ], + [ + 5, + 12 + ], + [ + 12, + 13 + ], + [ + 13, + 14 + ], + [ + 5, + 15 + ], + [ + 15, + 16 + ], + [ + 16, + 17 + ] + ] + }, + { + "id":34, + "name":"hippo", + "supercategory":"Hippopotamidae", + "keypoints":[ + "left_eye", + "right_eye", + "nose", + "neck", + "root_of_tail", + "left_shoulder", + "left_elbow", + "left_front_paw", + "right_shoulder", + "right_elbow", + "right_front_paw", + "left_hip", + "left_knee", + "left_back_paw", + "right_hip", + "right_knee", + "right_back_paw" + ], + "skeleton":[ + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ], + [ + 4, + 5 + ], + [ + 4, + 6 + ], + [ + 6, + 7 + ], + [ + 7, + 8 + ], + [ + 4, + 9 + ], + [ + 9, + 10 + ], + [ + 10, + 11 + ], + [ + 5, + 12 + ], + [ + 12, + 13 + ], + [ + 13, + 14 + ], + [ + 5, + 15 + ], + [ + 15, + 16 + ], + [ + 16, + 17 + ] + ] + }, + { + "id":35, + "name":"chimpanzee", + "supercategory":"Hominidae", + "keypoints":[ + "left_eye", + "right_eye", + "nose", + "neck", + "root_of_tail", + "left_shoulder", + "left_elbow", + "left_front_paw", + "right_shoulder", + "right_elbow", + "right_front_paw", + "left_hip", + "left_knee", + "left_back_paw", + "right_hip", + "right_knee", + "right_back_paw" + ], + "skeleton":[ + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ], + [ + 4, + 5 + ], + [ + 4, + 6 + ], + [ + 6, + 7 + ], + [ + 7, + 8 + ], + [ + 4, + 9 + ], + [ + 9, + 10 + ], + [ + 10, + 11 + ], + [ + 5, + 12 + ], + [ + 12, + 13 + ], + [ + 13, + 14 + ], + [ + 5, + 15 + ], + [ + 15, + 16 + ], + [ + 16, + 17 + ] + ] + }, + { + "id":36, + "name":"gorilla", + "supercategory":"Hominidae", + "keypoints":[ + "left_eye", + "right_eye", + "nose", + "neck", + "root_of_tail", + "left_shoulder", + "left_elbow", + "left_front_paw", + "right_shoulder", + "right_elbow", + "right_front_paw", + "left_hip", + "left_knee", + "left_back_paw", + "right_hip", + "right_knee", + "right_back_paw" + ], + "skeleton":[ + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ], + [ + 4, + 5 + ], + [ + 4, + 6 + ], + [ + 6, + 7 + ], + [ + 7, + 8 + ], + [ + 4, + 9 + ], + [ + 9, + 10 + ], + [ + 10, + 11 + ], + [ + 5, + 12 + ], + [ + 12, + 13 + ], + [ + 13, + 14 + ], + [ + 5, + 15 + ], + [ + 15, + 16 + ], + [ + 16, + 17 + ] + ] + }, + { + "id":37, + "name":"orangutan", + "supercategory":"Hominidae", + "keypoints":[ + "left_eye", + "right_eye", + "nose", + "neck", + "root_of_tail", + "left_shoulder", + "left_elbow", + "left_front_paw", + "right_shoulder", + "right_elbow", + "right_front_paw", + "left_hip", + "left_knee", + "left_back_paw", + "right_hip", + "right_knee", + "right_back_paw" + ], + "skeleton":[ + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ], + [ + 4, + 5 + ], + [ + 4, + 6 + ], + [ + 6, + 7 + ], + [ + 7, + 8 + ], + [ + 4, + 9 + ], + [ + 9, + 10 + ], + [ + 10, + 11 + ], + [ + 5, + 12 + ], + [ + 12, + 13 + ], + [ + 13, + 14 + ], + [ + 5, + 15 + ], + [ + 15, + 16 + ], + [ + 16, + 17 + ] + ] + }, + { + "id":38, + "name":"rabbit", + "supercategory":"Leporidae", + "keypoints":[ + "left_eye", + "right_eye", + "nose", + "neck", + "root_of_tail", + "left_shoulder", + "left_elbow", + "left_front_paw", + "right_shoulder", + "right_elbow", + "right_front_paw", + "left_hip", + "left_knee", + "left_back_paw", + "right_hip", + "right_knee", + "right_back_paw" + ], + "skeleton":[ + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ], + [ + 4, + 5 + ], + [ + 4, + 6 + ], + [ + 6, + 7 + ], + [ + 7, + 8 + ], + [ + 4, + 9 + ], + [ + 9, + 10 + ], + [ + 10, + 11 + ], + [ + 5, + 12 + ], + [ + 12, + 13 + ], + [ + 13, + 14 + ], + [ + 5, + 15 + ], + [ + 15, + 16 + ], + [ + 16, + 17 + ] + ] + }, + { + "id":39, + "name":"skunk", + "supercategory":"Mephitidae", + "keypoints":[ + "left_eye", + "right_eye", + "nose", + "neck", + "root_of_tail", + "left_shoulder", + "left_elbow", + "left_front_paw", + "right_shoulder", + "right_elbow", + "right_front_paw", + "left_hip", + "left_knee", + "left_back_paw", + "right_hip", + "right_knee", + "right_back_paw" + ], + "skeleton":[ + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ], + [ + 4, + 5 + ], + [ + 4, + 6 + ], + [ + 6, + 7 + ], + [ + 7, + 8 + ], + [ + 4, + 9 + ], + [ + 9, + 10 + ], + [ + 10, + 11 + ], + [ + 5, + 12 + ], + [ + 12, + 13 + ], + [ + 13, + 14 + ], + [ + 5, + 15 + ], + [ + 15, + 16 + ], + [ + 16, + 17 + ] + ] + }, + { + "id":40, + "name":"mouse", + "supercategory":"Muridae", + "keypoints":[ + "left_eye", + "right_eye", + "nose", + "neck", + "root_of_tail", + "left_shoulder", + "left_elbow", + "left_front_paw", + "right_shoulder", + "right_elbow", + "right_front_paw", + "left_hip", + "left_knee", + "left_back_paw", + "right_hip", + "right_knee", + "right_back_paw" + ], + "skeleton":[ + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ], + [ + 4, + 5 + ], + [ + 4, + 6 + ], + [ + 6, + 7 + ], + [ + 7, + 8 + ], + [ + 4, + 9 + ], + [ + 9, + 10 + ], + [ + 10, + 11 + ], + [ + 5, + 12 + ], + [ + 12, + 13 + ], + [ + 13, + 14 + ], + [ + 5, + 15 + ], + [ + 15, + 16 + ], + [ + 16, + 17 + ] + ] + }, + { + "id":41, + "name":"rat", + "supercategory":"Muridae", + "keypoints":[ + "left_eye", + "right_eye", + "nose", + "neck", + "root_of_tail", + "left_shoulder", + "left_elbow", + "left_front_paw", + "right_shoulder", + "right_elbow", + "right_front_paw", + "left_hip", + "left_knee", + "left_back_paw", + "right_hip", + "right_knee", + "right_back_paw" + ], + "skeleton":[ + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ], + [ + 4, + 5 + ], + [ + 4, + 6 + ], + [ + 6, + 7 + ], + [ + 7, + 8 + ], + [ + 4, + 9 + ], + [ + 9, + 10 + ], + [ + 10, + 11 + ], + [ + 5, + 12 + ], + [ + 12, + 13 + ], + [ + 13, + 14 + ], + [ + 5, + 15 + ], + [ + 15, + 16 + ], + [ + 16, + 17 + ] + ] + }, + { + "id":42, + "name":"otter", + "supercategory":"Mustelidae", + "keypoints":[ + "left_eye", + "right_eye", + "nose", + "neck", + "root_of_tail", + "left_shoulder", + "left_elbow", + "left_front_paw", + "right_shoulder", + "right_elbow", + "right_front_paw", + "left_hip", + "left_knee", + "left_back_paw", + "right_hip", + "right_knee", + "right_back_paw" + ], + "skeleton":[ + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ], + [ + 4, + 5 + ], + [ + 4, + 6 + ], + [ + 6, + 7 + ], + [ + 7, + 8 + ], + [ + 4, + 9 + ], + [ + 9, + 10 + ], + [ + 10, + 11 + ], + [ + 5, + 12 + ], + [ + 12, + 13 + ], + [ + 13, + 14 + ], + [ + 5, + 15 + ], + [ + 15, + 16 + ], + [ + 16, + 17 + ] + ] + }, + { + "id":43, + "name":"weasel", + "supercategory":"Mustelidae", + "keypoints":[ + "left_eye", + "right_eye", + "nose", + "neck", + "root_of_tail", + "left_shoulder", + "left_elbow", + "left_front_paw", + "right_shoulder", + "right_elbow", + "right_front_paw", + "left_hip", + "left_knee", + "left_back_paw", + "right_hip", + "right_knee", + "right_back_paw" + ], + "skeleton":[ + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ], + [ + 4, + 5 + ], + [ + 4, + 6 + ], + [ + 6, + 7 + ], + [ + 7, + 8 + ], + [ + 4, + 9 + ], + [ + 9, + 10 + ], + [ + 10, + 11 + ], + [ + 5, + 12 + ], + [ + 12, + 13 + ], + [ + 13, + 14 + ], + [ + 5, + 15 + ], + [ + 15, + 16 + ], + [ + 16, + 17 + ] + ] + }, + { + "id":44, + "name":"raccoon", + "supercategory":"Procyonidae", + "keypoints":[ + "left_eye", + "right_eye", + "nose", + "neck", + "root_of_tail", + "left_shoulder", + "left_elbow", + "left_front_paw", + "right_shoulder", + "right_elbow", + "right_front_paw", + "left_hip", + "left_knee", + "left_back_paw", + "right_hip", + "right_knee", + "right_back_paw" + ], + "skeleton":[ + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ], + [ + 4, + 5 + ], + [ + 4, + 6 + ], + [ + 6, + 7 + ], + [ + 7, + 8 + ], + [ + 4, + 9 + ], + [ + 9, + 10 + ], + [ + 10, + 11 + ], + [ + 5, + 12 + ], + [ + 12, + 13 + ], + [ + 13, + 14 + ], + [ + 5, + 15 + ], + [ + 15, + 16 + ], + [ + 16, + 17 + ] + ] + }, + { + "id":45, + "name":"rhino", + "supercategory":"Rhinocerotidae", + "keypoints":[ + "left_eye", + "right_eye", + "nose", + "neck", + "root_of_tail", + "left_shoulder", + "left_elbow", + "left_front_paw", + "right_shoulder", + "right_elbow", + "right_front_paw", + "left_hip", + "left_knee", + "left_back_paw", + "right_hip", + "right_knee", + "right_back_paw" + ], + "skeleton":[ + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ], + [ + 4, + 5 + ], + [ + 4, + 6 + ], + [ + 6, + 7 + ], + [ + 7, + 8 + ], + [ + 4, + 9 + ], + [ + 9, + 10 + ], + [ + 10, + 11 + ], + [ + 5, + 12 + ], + [ + 12, + 13 + ], + [ + 13, + 14 + ], + [ + 5, + 15 + ], + [ + 15, + 16 + ], + [ + 16, + 17 + ] + ] + }, + { + "id":46, + "name":"marmot", + "supercategory":"Sciuridae", + "keypoints":[ + "left_eye", + "right_eye", + "nose", + "neck", + "root_of_tail", + "left_shoulder", + "left_elbow", + "left_front_paw", + "right_shoulder", + "right_elbow", + "right_front_paw", + "left_hip", + "left_knee", + "left_back_paw", + "right_hip", + "right_knee", + "right_back_paw" + ], + "skeleton":[ + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ], + [ + 4, + 5 + ], + [ + 4, + 6 + ], + [ + 6, + 7 + ], + [ + 7, + 8 + ], + [ + 4, + 9 + ], + [ + 9, + 10 + ], + [ + 10, + 11 + ], + [ + 5, + 12 + ], + [ + 12, + 13 + ], + [ + 13, + 14 + ], + [ + 5, + 15 + ], + [ + 15, + 16 + ], + [ + 16, + 17 + ] + ] + }, + { + "id":47, + "name":"squirrel", + "supercategory":"Sciuridae", + "keypoints":[ + "left_eye", + "right_eye", + "nose", + "neck", + "root_of_tail", + "left_shoulder", + "left_elbow", + "left_front_paw", + "right_shoulder", + "right_elbow", + "right_front_paw", + "left_hip", + "left_knee", + "left_back_paw", + "right_hip", + "right_knee", + "right_back_paw" + ], + "skeleton":[ + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ], + [ + 4, + 5 + ], + [ + 4, + 6 + ], + [ + 6, + 7 + ], + [ + 7, + 8 + ], + [ + 4, + 9 + ], + [ + 9, + 10 + ], + [ + 10, + 11 + ], + [ + 5, + 12 + ], + [ + 12, + 13 + ], + [ + 13, + 14 + ], + [ + 5, + 15 + ], + [ + 15, + 16 + ], + [ + 16, + 17 + ] + ] + }, + { + "id":48, + "name":"pig", + "supercategory":"Suidae", + "keypoints":[ + "left_eye", + "right_eye", + "nose", + "neck", + "root_of_tail", + "left_shoulder", + "left_elbow", + "left_front_paw", + "right_shoulder", + "right_elbow", + "right_front_paw", + "left_hip", + "left_knee", + "left_back_paw", + "right_hip", + "right_knee", + "right_back_paw" + ], + "skeleton":[ + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ], + [ + 4, + 5 + ], + [ + 4, + 6 + ], + [ + 6, + 7 + ], + [ + 7, + 8 + ], + [ + 4, + 9 + ], + [ + 9, + 10 + ], + [ + 10, + 11 + ], + [ + 5, + 12 + ], + [ + 12, + 13 + ], + [ + 13, + 14 + ], + [ + 5, + 15 + ], + [ + 15, + 16 + ], + [ + 16, + 17 + ] + ] + }, + { + "id":49, + "name":"mole", + "supercategory":"Talpidae", + "keypoints":[ + "left_eye", + "right_eye", + "nose", + "neck", + "root_of_tail", + "left_shoulder", + "left_elbow", + "left_front_paw", + "right_shoulder", + "right_elbow", + "right_front_paw", + "left_hip", + "left_knee", + "left_back_paw", + "right_hip", + "right_knee", + "right_back_paw" + ], + "skeleton":[ + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ], + [ + 4, + 5 + ], + [ + 4, + 6 + ], + [ + 6, + 7 + ], + [ + 7, + 8 + ], + [ + 4, + 9 + ], + [ + 9, + 10 + ], + [ + 10, + 11 + ], + [ + 5, + 12 + ], + [ + 12, + 13 + ], + [ + 13, + 14 + ], + [ + 5, + 15 + ], + [ + 15, + 16 + ], + [ + 16, + 17 + ] + ] + }, + { + "id":50, + "name":"black bear", + "supercategory":"Ursidae", + "keypoints":[ + "left_eye", + "right_eye", + "nose", + "neck", + "root_of_tail", + "left_shoulder", + "left_elbow", + "left_front_paw", + "right_shoulder", + "right_elbow", + "right_front_paw", + "left_hip", + "left_knee", + "left_back_paw", + "right_hip", + "right_knee", + "right_back_paw" + ], + "skeleton":[ + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ], + [ + 4, + 5 + ], + [ + 4, + 6 + ], + [ + 6, + 7 + ], + [ + 7, + 8 + ], + [ + 4, + 9 + ], + [ + 9, + 10 + ], + [ + 10, + 11 + ], + [ + 5, + 12 + ], + [ + 12, + 13 + ], + [ + 13, + 14 + ], + [ + 5, + 15 + ], + [ + 15, + 16 + ], + [ + 16, + 17 + ] + ] + }, + { + "id":51, + "name":"brown bear", + "supercategory":"Ursidae", + "keypoints":[ + "left_eye", + "right_eye", + "nose", + "neck", + "root_of_tail", + "left_shoulder", + "left_elbow", + "left_front_paw", + "right_shoulder", + "right_elbow", + "right_front_paw", + "left_hip", + "left_knee", + "left_back_paw", + "right_hip", + "right_knee", + "right_back_paw" + ], + "skeleton":[ + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ], + [ + 4, + 5 + ], + [ + 4, + 6 + ], + [ + 6, + 7 + ], + [ + 7, + 8 + ], + [ + 4, + 9 + ], + [ + 9, + 10 + ], + [ + 10, + 11 + ], + [ + 5, + 12 + ], + [ + 12, + 13 + ], + [ + 13, + 14 + ], + [ + 5, + 15 + ], + [ + 15, + 16 + ], + [ + 16, + 17 + ] + ] + }, + { + "id":52, + "name":"panda", + "supercategory":"Ursidae", + "keypoints":[ + "left_eye", + "right_eye", + "nose", + "neck", + "root_of_tail", + "left_shoulder", + "left_elbow", + "left_front_paw", + "right_shoulder", + "right_elbow", + "right_front_paw", + "left_hip", + "left_knee", + "left_back_paw", + "right_hip", + "right_knee", + "right_back_paw" + ], + "skeleton":[ + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ], + [ + 4, + 5 + ], + [ + 4, + 6 + ], + [ + 6, + 7 + ], + [ + 7, + 8 + ], + [ + 4, + 9 + ], + [ + 9, + 10 + ], + [ + 10, + 11 + ], + [ + 5, + 12 + ], + [ + 12, + 13 + ], + [ + 13, + 14 + ], + [ + 5, + 15 + ], + [ + 15, + 16 + ], + [ + 16, + 17 + ] + ] + }, + { + "id":53, + "name":"polar bear", + "supercategory":"Ursidae", + "keypoints":[ + "left_eye", + "right_eye", + "nose", + "neck", + "root_of_tail", + "left_shoulder", + "left_elbow", + "left_front_paw", + "right_shoulder", + "right_elbow", + "right_front_paw", + "left_hip", + "left_knee", + "left_back_paw", + "right_hip", + "right_knee", + "right_back_paw" + ], + "skeleton":[ + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ], + [ + 4, + 5 + ], + [ + 4, + 6 + ], + [ + 6, + 7 + ], + [ + 7, + 8 + ], + [ + 4, + 9 + ], + [ + 9, + 10 + ], + [ + 10, + 11 + ], + [ + 5, + 12 + ], + [ + 12, + 13 + ], + [ + 13, + 14 + ], + [ + 5, + 15 + ], + [ + 15, + 16 + ], + [ + 16, + 17 + ] + ] + }, + { + "id":54, + "name":"bat", + "supercategory":"Vespertilionidae", + "keypoints":[ + "left_eye", + "right_eye", + "nose", + "neck", + "root_of_tail", + "left_shoulder", + "left_elbow", + "left_front_paw", + "right_shoulder", + "right_elbow", + "right_front_paw", + "left_hip", + "left_knee", + "left_back_paw", + "right_hip", + "right_knee", + "right_back_paw" + ], + "skeleton":[ + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ], + [ + 4, + 5 + ], + [ + 4, + 6 + ], + [ + 6, + 7 + ], + [ + 7, + 8 + ], + [ + 4, + 9 + ], + [ + 9, + 10 + ], + [ + 10, + 11 + ], + [ + 5, + 12 + ], + [ + 12, + 13 + ], + [ + 13, + 14 + ], + [ + 5, + 15 + ], + [ + 15, + 16 + ], + [ + 16, + 17 + ] + ] + } + ] +} diff --git a/tests/data/atrw/test_atrw.json b/tests/data/atrw/test_atrw.json index 513faab788..cc6ce696e5 100644 --- a/tests/data/atrw/test_atrw.json +++ b/tests/data/atrw/test_atrw.json @@ -1,221 +1,221 @@ -{ - "categories": [ - { - "keypoints": [ - "left_ear", - "right_ear", - "nose", - "right_shoulder", - "right_front_paw", - "left_shoulder", - "left_front_paw", - "right_hip", - "right_knee", - "right_back_paw", - "left_hip", - "left_knee", - "left_back_paw", - "tail", - "center" - ], - "name": "tiger", - "skeleton": [ - [ - 0, - 2 - ], - [ - 1, - 2 - ], - [ - 2, - 14 - ], - [ - 5, - 6 - ], - [ - 5, - 14 - ], - [ - 3, - 4 - ], - [ - 3, - 14 - ], - [ - 13, - 14 - ], - [ - 9, - 8 - ], - [ - 8, - 7 - ], - [ - 7, - 13 - ], - [ - 12, - 11 - ], - [ - 11, - 10 - ], - [ - 10, - 13 - ] - ], - "id": 1 - } - ], - "images": [ - { - "width": 1239, - "height": 731, - "file_name": "003464.jpg", - "id": 3464 - }, - { - "width": 925, - "height": 1080, - "file_name": "000061.jpg", - "id": 61 - } - ], - "annotations": [ - { - "bbox": [ - 0, - 0, - 1239, - 731 - ], - "category_id": 1, - "keypoints": [ - 225, - 215, - 2, - 285, - 194, - 2, - 191, - 368, - 2, - 417, - 428, - 2, - 308, - 594, - 2, - 536, - 401, - 2, - 642, - 638, - 2, - 893, - 419, - 2, - 974, - 494, - 2, - 885, - 584, - 2, - 925, - 328, - 2, - 1065, - 419, - 2, - 1050, - 583, - 2, - 994, - 186, - 2, - 592, - 277, - 2 - ], - "num_keypoints": 15, - "image_id": 3464, - "id": 3464, - "area": 905709, - "iscrowd": 0 - }, - { - "bbox": [ - 0, - 0, - 925, - 1080 - ], - "category_id": 1, - "keypoints": [ - 324, - 571, - 2, - 158, - 568, - 2, - 246, - 806, - 2, - 217, - 806, - 2, - 359, - 805, - 2, - 447, - 657, - 2, - 362, - 911, - 2, - 539, - 546, - 2, - 369, - 552, - 2, - 546, - 612, - 2, - 638, - 358, - 2, - 722, - 332, - 2, - 712, - 507, - 2, - 593, - 121, - 2, - 419, - 463, - 2 - ], - "num_keypoints": 15, - "image_id": 61, - "id": 61, - "area": 999000, - "iscrowd": 0 - } - ] +{ + "categories": [ + { + "keypoints": [ + "left_ear", + "right_ear", + "nose", + "right_shoulder", + "right_front_paw", + "left_shoulder", + "left_front_paw", + "right_hip", + "right_knee", + "right_back_paw", + "left_hip", + "left_knee", + "left_back_paw", + "tail", + "center" + ], + "name": "tiger", + "skeleton": [ + [ + 0, + 2 + ], + [ + 1, + 2 + ], + [ + 2, + 14 + ], + [ + 5, + 6 + ], + [ + 5, + 14 + ], + [ + 3, + 4 + ], + [ + 3, + 14 + ], + [ + 13, + 14 + ], + [ + 9, + 8 + ], + [ + 8, + 7 + ], + [ + 7, + 13 + ], + [ + 12, + 11 + ], + [ + 11, + 10 + ], + [ + 10, + 13 + ] + ], + "id": 1 + } + ], + "images": [ + { + "width": 1239, + "height": 731, + "file_name": "003464.jpg", + "id": 3464 + }, + { + "width": 925, + "height": 1080, + "file_name": "000061.jpg", + "id": 61 + } + ], + "annotations": [ + { + "bbox": [ + 0, + 0, + 1239, + 731 + ], + "category_id": 1, + "keypoints": [ + 225, + 215, + 2, + 285, + 194, + 2, + 191, + 368, + 2, + 417, + 428, + 2, + 308, + 594, + 2, + 536, + 401, + 2, + 642, + 638, + 2, + 893, + 419, + 2, + 974, + 494, + 2, + 885, + 584, + 2, + 925, + 328, + 2, + 1065, + 419, + 2, + 1050, + 583, + 2, + 994, + 186, + 2, + 592, + 277, + 2 + ], + "num_keypoints": 15, + "image_id": 3464, + "id": 3464, + "area": 905709, + "iscrowd": 0 + }, + { + "bbox": [ + 0, + 0, + 925, + 1080 + ], + "category_id": 1, + "keypoints": [ + 324, + 571, + 2, + 158, + 568, + 2, + 246, + 806, + 2, + 217, + 806, + 2, + 359, + 805, + 2, + 447, + 657, + 2, + 362, + 911, + 2, + 539, + 546, + 2, + 369, + 552, + 2, + 546, + 612, + 2, + 638, + 358, + 2, + 722, + 332, + 2, + 712, + 507, + 2, + 593, + 121, + 2, + 419, + 463, + 2 + ], + "num_keypoints": 15, + "image_id": 61, + "id": 61, + "area": 999000, + "iscrowd": 0 + } + ] } \ No newline at end of file diff --git a/tests/data/campus/calibration_campus.json b/tests/data/campus/calibration_campus.json index 910d1eaf88..b797b5bad3 100644 --- a/tests/data/campus/calibration_campus.json +++ b/tests/data/campus/calibration_campus.json @@ -1,161 +1,161 @@ -{ - "0": { - "R": [ - [ - 0.9998819135498813, - -0.007627303394110196, - -0.013341034396255802 - ], - [ - -0.01412240122676837, - -0.11375390190151916, - -0.9934085803866252 - ], - [ - 0.00605943391894462, - 0.9934796797343738, - -0.11384818494586636 - ] - ], - "T": [ - [ - 1774.8953318252247 - ], - [ - -5051.695948238737 - ], - [ - 1923.3559877015355 - ] - ], - "fx": 437.9852173913044, - "fy": 437.9852173913044, - "cx": 185.3596, - "cy": 139.2537, - "k": [ - [ - 0.0 - ], - [ - 0.0 - ], - [ - 0.0 - ] - ], - "p": [ - [ - 0.0 - ], - [ - 0.0 - ] - ] - }, - "1": { - "R": [ - [ - -0.04633107785835382, - -0.9988140384937536, - 0.014964883303310195 - ], - [ - -0.13065076504992335, - -0.008793265243184023, - -0.9913894573164639 - ], - [ - 0.9903452977706073, - -0.04788731558734052, - -0.1300884168152014 - ] - ], - "T": [ - [ - -6240.579909342256 - ], - [ - 5247.348264374987 - ], - [ - 1947.3802148598609 - ] - ], - "fx": 430.03326086956525, - "fy": 430.03326086956525, - "cx": 184.0583, - "cy": 130.7467, - "k": [ - [ - 0.0 - ], - [ - 0.0 - ], - [ - 0.0 - ] - ], - "p": [ - [ - 0.0 - ], - [ - 0.0 - ] - ] - }, - "2": { - "R": [ - [ - 0.5386991962445586, - 0.8424723621738047, - -0.006595069276080057 - ], - [ - 0.10782367722838201, - -0.07670471706694504, - -0.9912065581949252 - ], - [ - -0.835570003407504, - 0.5332510715910186, - -0.13215923748499042 - ] - ], - "T": [ - [ - 11943.56106545541 - ], - [ - -1803.8527374133198 - ], - [ - 1973.3939116534714 - ] - ], - "fx": 700.9856521739131, - "fy": 700.9856521739131, - "cx": 167.59475, - "cy": 142.0545, - "k": [ - [ - 0.0 - ], - [ - 0.0 - ], - [ - 0.0 - ] - ], - "p": [ - [ - 0.0 - ], - [ - 0.0 - ] - ] - } +{ + "0": { + "R": [ + [ + 0.9998819135498813, + -0.007627303394110196, + -0.013341034396255802 + ], + [ + -0.01412240122676837, + -0.11375390190151916, + -0.9934085803866252 + ], + [ + 0.00605943391894462, + 0.9934796797343738, + -0.11384818494586636 + ] + ], + "T": [ + [ + 1774.8953318252247 + ], + [ + -5051.695948238737 + ], + [ + 1923.3559877015355 + ] + ], + "fx": 437.9852173913044, + "fy": 437.9852173913044, + "cx": 185.3596, + "cy": 139.2537, + "k": [ + [ + 0.0 + ], + [ + 0.0 + ], + [ + 0.0 + ] + ], + "p": [ + [ + 0.0 + ], + [ + 0.0 + ] + ] + }, + "1": { + "R": [ + [ + -0.04633107785835382, + -0.9988140384937536, + 0.014964883303310195 + ], + [ + -0.13065076504992335, + -0.008793265243184023, + -0.9913894573164639 + ], + [ + 0.9903452977706073, + -0.04788731558734052, + -0.1300884168152014 + ] + ], + "T": [ + [ + -6240.579909342256 + ], + [ + 5247.348264374987 + ], + [ + 1947.3802148598609 + ] + ], + "fx": 430.03326086956525, + "fy": 430.03326086956525, + "cx": 184.0583, + "cy": 130.7467, + "k": [ + [ + 0.0 + ], + [ + 0.0 + ], + [ + 0.0 + ] + ], + "p": [ + [ + 0.0 + ], + [ + 0.0 + ] + ] + }, + "2": { + "R": [ + [ + 0.5386991962445586, + 0.8424723621738047, + -0.006595069276080057 + ], + [ + 0.10782367722838201, + -0.07670471706694504, + -0.9912065581949252 + ], + [ + -0.835570003407504, + 0.5332510715910186, + -0.13215923748499042 + ] + ], + "T": [ + [ + 11943.56106545541 + ], + [ + -1803.8527374133198 + ], + [ + 1973.3939116534714 + ] + ], + "fx": 700.9856521739131, + "fy": 700.9856521739131, + "cx": 167.59475, + "cy": 142.0545, + "k": [ + [ + 0.0 + ], + [ + 0.0 + ], + [ + 0.0 + ] + ], + "p": [ + [ + 0.0 + ], + [ + 0.0 + ] + ] + } } \ No newline at end of file diff --git a/tests/data/coco/test_coco.json b/tests/data/coco/test_coco.json index 75448df5cd..4430fda0fd 100644 --- a/tests/data/coco/test_coco.json +++ b/tests/data/coco/test_coco.json @@ -1,2465 +1,2465 @@ -{ - "info": { - "description": "For testing COCO dataset only.", - "year": 2020, - "date_created": "2020/06/20" - }, - "licenses": [ - { - "url": "http://creativecommons.org/licenses/by-nc-sa/2.0/", - "id": 1, - "name": "Attribution-NonCommercial-ShareAlike License" - }, - { - "url": "http://creativecommons.org/licenses/by-nc/2.0/", - "id": 2, - "name": "Attribution-NonCommercial License" - }, - { - "url": "http://creativecommons.org/licenses/by-nc-nd/2.0/", - "id": 3, - "name": "Attribution-NonCommercial-NoDerivs License" - }, - { - "url": "http://creativecommons.org/licenses/by/2.0/", - "id": 4, - "name": "Attribution License" - }, - { - "url": "http://creativecommons.org/licenses/by-sa/2.0/", - "id": 5, - "name": "Attribution-ShareAlike License" - }, - { - "url": "http://creativecommons.org/licenses/by-nd/2.0/", - "id": 6, - "name": "Attribution-NoDerivs License" - }, - { - "url": "http://flickr.com/commons/usage/", - "id": 7, - "name": "No known copyright restrictions" - }, - { - "url": "http://www.usa.gov/copyright.shtml", - "id": 8, - "name": "United States Government Work" - } - ], - "categories": [ - { - "supercategory": "person", - "id": 1, - "name": "person", - "keypoints": [ - "nose", - "left_eye", - "right_eye", - "left_ear", - "right_ear", - "left_shoulder", - "right_shoulder", - "left_elbow", - "right_elbow", - "left_wrist", - "right_wrist", - "left_hip", - "right_hip", - "left_knee", - "right_knee", - "left_ankle", - "right_ankle" - ], - "skeleton": [ - [ - 16, - 14 - ], - [ - 14, - 12 - ], - [ - 17, - 15 - ], - [ - 15, - 13 - ], - [ - 12, - 13 - ], - [ - 6, - 12 - ], - [ - 7, - 13 - ], - [ - 6, - 7 - ], - [ - 6, - 8 - ], - [ - 7, - 9 - ], - [ - 8, - 10 - ], - [ - 9, - 11 - ], - [ - 2, - 3 - ], - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 4 - ], - [ - 3, - 5 - ], - [ - 4, - 6 - ], - [ - 5, - 7 - ] - ] - } - ], - "images": [ - { - "license": 4, - "file_name": "000000000785.jpg", - "coco_url": "http://images.cocodataset.org/val2017/000000000785.jpg", - "height": 425, - "width": 640, - "date_captured": "2013-11-19 21:22:42", - "flickr_url": "http://farm8.staticflickr.com/7015/6795644157_f019453ae7_z.jpg", - "id": 785 - }, - { - "license": 3, - "file_name": "000000040083.jpg", - "coco_url": "http://images.cocodataset.org/val2017/000000040083.jpg", - "height": 333, - "width": 500, - "date_captured": "2013-11-18 03:30:24", - "flickr_url": "http://farm1.staticflickr.com/116/254881838_e21c6d17b8_z.jpg", - "id": 40083 - }, - { - "license": 1, - "file_name": "000000196141.jpg", - "coco_url": "http://images.cocodataset.org/val2017/000000196141.jpg", - "height": 429, - "width": 640, - "date_captured": "2013-11-22 22:37:15", - "flickr_url": "http://farm4.staticflickr.com/3310/3611902235_57d4ae496d_z.jpg", - "id": 196141 - }, - { - "license": 3, - "file_name": "000000197388.jpg", - "coco_url": "http://images.cocodataset.org/val2017/000000197388.jpg", - "height": 392, - "width": 640, - "date_captured": "2013-11-19 20:10:37", - "flickr_url": "http://farm9.staticflickr.com/8375/8507321836_5b8b13188f_z.jpg", - "id": 197388 - } - ], - "annotations": [ - { - "segmentation": [ - [ - 353.37, - 67.65, - 358.15, - 52.37, - 362.92, - 47.59, - 374.38, - 44.73, - 389.66, - 52.37, - 389.66, - 67.65, - 389.66, - 76.25, - 393.48, - 83.89, - 396.35, - 88.66, - 397.3, - 91.53, - 406.85, - 99.17, - 413.54, - 104.9, - 451.74, - 148.83, - 458.43, - 153.6, - 462.25, - 166.02, - 467.02, - 173.66, - 463.2, - 181.3, - 449.83, - 183.21, - 448.88, - 191.81, - 455.56, - 226.19, - 448.88, - 254.84, - 453.65, - 286.36, - 475.62, - 323.6, - 491.85, - 361.81, - 494.72, - 382.82, - 494.72, - 382.82, - 499.49, - 391.41, - 416.4, - 391.41, - 424.04, - 383.77, - 439.33, - 374.22, - 445.06, - 360.85, - 436.46, - 334.11, - 421.18, - 303.55, - 416.4, - 289.22, - 409.72, - 268.21, - 396.35, - 280.63, - 405.9, - 298.77, - 417.36, - 324.56, - 425, - 349.39, - 425, - 357.99, - 419.27, - 360.85, - 394.44, - 367.54, - 362.92, - 370.4, - 346.69, - 367.54, - 360.06, - 362.76, - 369.61, - 360.85, - 382.98, - 340.8, - 355.28, - 271.08, - 360.06, - 266.3, - 386.8, - 219.5, - 368.65, - 162.2, - 348.6, - 175.57, - 309.44, - 187.03, - 301.8, - 192.76, - 288.43, - 193.72, - 282.7, - 193.72, - 280.79, - 187.03, - 280.79, - 174.62, - 287.47, - 171.75, - 291.29, - 171.75, - 295.11, - 171.75, - 306.57, - 166.98, - 312.3, - 165.07, - 345.73, - 142.14, - 350.51, - 117.31, - 350.51, - 102.03, - 350.51, - 90.57, - 353.37, - 65.74 - ] - ], - "num_keypoints": 17, - "area": 27789.11055, - "iscrowd": 0, - "keypoints": [ - 367, - 81, - 2, - 374, - 73, - 2, - 360, - 75, - 2, - 386, - 78, - 2, - 356, - 81, - 2, - 399, - 108, - 2, - 358, - 129, - 2, - 433, - 142, - 2, - 341, - 159, - 2, - 449, - 165, - 2, - 309, - 178, - 2, - 424, - 203, - 2, - 393, - 214, - 2, - 429, - 294, - 2, - 367, - 273, - 2, - 466, - 362, - 2, - 396, - 341, - 2 - ], - "image_id": 785, - "bbox": [ - 280.79, - 44.73, - 218.7, - 346.68 - ], - "category_id": 1, - "id": 442619 - }, - { - "segmentation": [ - [ - 98.56, - 273.72, - 132.9, - 267, - 140.37, - 281.93, - 165.75, - 285.66, - 156.79, - 264.01, - 170.23, - 261.02, - 177.7, - 272.97, - 182.18, - 279.69, - 200.85, - 268.49, - 212.79, - 255.05, - 188.9, - 256.54, - 164.26, - 240.12, - 139.62, - 212.49, - 109.01, - 221.45, - 103.04, - 220.71, - 122.45, - 202.04, - 113.49, - 196.07, - 96.32, - 168.44, - 97.06, - 162.47, - 110.5, - 136.34, - 112, - 124.39, - 91.09, - 110.95, - 80.64, - 114.68, - 71.68, - 131.86, - 62.72, - 147.54, - 57.49, - 156.5, - 48.53, - 168.44, - 41.07, - 180.39, - 38.08, - 193.08, - 40.32, - 205.03, - 47.04, - 213.24, - 54.5, - 216.23, - 82.13, - 252.06, - 91.09, - 271.48 - ] - ], - "num_keypoints": 14, - "area": 11025.219, - "iscrowd": 0, - "keypoints": [ - 99, - 144, - 2, - 104, - 141, - 2, - 96, - 137, - 2, - 0, - 0, - 0, - 78, - 133, - 2, - 56, - 161, - 2, - 81, - 162, - 2, - 0, - 0, - 0, - 103, - 208, - 2, - 116, - 204, - 2, - 0, - 0, - 0, - 57, - 246, - 1, - 82, - 259, - 1, - 137, - 219, - 2, - 138, - 247, - 2, - 177, - 256, - 2, - 158, - 296, - 1 - ], - "image_id": 40083, - "bbox": [ - 38.08, - 110.95, - 174.71, - 174.71 - ], - "category_id": 1, - "id": 198196 - }, - { - "segmentation": [ - [ - 257.76, - 288.05, - 273.4, - 258.26, - 325.55, - 253.79, - 335.23, - 232.93, - 326.3, - 186.74, - 333.74, - 177.05, - 327.79, - 153.21, - 333.74, - 142.04, - 344.17, - 139.06, - 353.11, - 139.06, - 359.07, - 145.02, - 360.56, - 148.74, - 362.05, - 168.86, - 388.87, - 197.17, - 397.81, - 276.88, - 372.48, - 293.27 - ] - ], - "num_keypoints": 15, - "area": 10171.9544, - "iscrowd": 0, - "keypoints": [ - 343, - 164, - 2, - 348, - 160, - 2, - 340, - 160, - 2, - 359, - 163, - 2, - 332, - 164, - 2, - 370, - 189, - 2, - 334, - 190, - 2, - 358, - 236, - 2, - 348, - 234, - 2, - 339, - 270, - 2, - 330, - 262, - 2, - 378, - 262, - 2, - 343, - 254, - 2, - 338, - 280, - 2, - 283, - 272, - 2, - 0, - 0, - 0, - 0, - 0, - 0 - ], - "image_id": 40083, - "bbox": [ - 257.76, - 139.06, - 140.05, - 154.21 - ], - "category_id": 1, - "id": 230195 - }, - { - "segmentation": [ - [ - 285.37, - 126.5, - 281.97, - 127.72, - 280.76, - 132.33, - 280.76, - 136.46, - 275.17, - 143.26, - 275.9, - 158.08, - 277.6, - 164.4, - 278.33, - 173.87, - 278.33, - 183.83, - 279.79, - 191.11, - 281.97, - 194.76, - 284.89, - 192.09, - 284.89, - 186.99, - 284.89, - 181.16, - 284.64, - 177.51, - 285.86, - 173.87 - ] - ], - "num_keypoints": 0, - "area": 491.2669, - "iscrowd": 0, - "keypoints": [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0 - ], - "image_id": 40083, - "bbox": [ - 275.17, - 126.5, - 10.69, - 68.26 - ], - "category_id": 1, - "id": 1202706 - }, - { - "segmentation": [ - [ - 339.34, - 107.97, - 338.38, - 102.19, - 339.34, - 91.58, - 335.49, - 84.84, - 326.81, - 74.23, - 312.35, - 74.23, - 301.75, - 74.23, - 295, - 86.76, - 295, - 93.51, - 292.11, - 99.3, - 287.29, - 102.19, - 291.14, - 107.01, - 295, - 107.01, - 295.96, - 112.79, - 301.75, - 115.69, - 305.6, - 119.54, - 307.53, - 123.4, - 317.17, - 123.4, - 311.39, - 129.18, - 286.32, - 139.79, - 274.75, - 139.79, - 264.15, - 138.82, - 262.22, - 144.61, - 261.26, - 147.5, - 253.54, - 147.5, - 247.76, - 150.39, - 249.69, - 159.07, - 256.44, - 161, - 262.22, - 161, - 268, - 161, - 276.68, - 161.96, - 284.39, - 168.71, - 293.07, - 174.49, - 301.75, - 174.49, - 308.49, - 169.67, - 308.49, - 188.95, - 311.39, - 194.74, - 312.35, - 208.23, - 307.53, - 221.73, - 297.89, - 229.44, - 281.5, - 250.65, - 269.93, - 262.22, - 278.61, - 320.06, - 281.5, - 331.63, - 276.68, - 338.38, - 270.9, - 349.95, - 262.22, - 356.7, - 253.54, - 359.59, - 253.54, - 365.37, - 274.75, - 365.37, - 291.14, - 365.37, - 306.57, - 359.59, - 303.67, - 352.84, - 297.89, - 340.31, - 293.07, - 318.13, - 295, - 294.03, - 293.07, - 278.61, - 294.03, - 270.9, - 305.6, - 259.33, - 313.31, - 299.82, - 319.1, - 309.46, - 341.27, - 317.17, - 384.65, - 330.67, - 387.55, - 335.49, - 383.69, - 341.27, - 397.19, - 350.91, - 398.15, - 363.44, - 398.15, - 375.01, - 405.86, - 374.05, - 409.72, - 357.66, - 411.65, - 342.24, - 416.47, - 328.74, - 417.43, - 321.03, - 410.68, - 319.1, - 401.04, - 318.13, - 392.37, - 318.13, - 382.73, - 314.28, - 348.98, - 300.78, - 339.34, - 293.07, - 334.52, - 285.36, - 340.31, - 259.33, - 340.31, - 246.8, - 340.31, - 242.94, - 350.91, - 228.48, - 358.62, - 214.98, - 355.22, - 204.32, - 357.05, - 196.11, - 361.61, - 188.82, - 361.61, - 181.97, - 365.26, - 165.63, - 367.54, - 139.18, - 366.17, - 123.68, - 361.15, - 112.73, - 353.86, - 107.72, - 351.58, - 105.89, - 344.74, - 105.89, - 340.18, - 109.08 - ] - ], - "num_keypoints": 15, - "area": 17123.92955, - "iscrowd": 0, - "keypoints": [ - 297, - 111, - 2, - 299, - 106, - 2, - 0, - 0, - 0, - 314, - 108, - 2, - 0, - 0, - 0, - 329, - 141, - 2, - 346, - 125, - 2, - 295, - 164, - 2, - 323, - 130, - 2, - 266, - 155, - 2, - 279, - 143, - 2, - 329, - 225, - 2, - 331, - 221, - 2, - 327, - 298, - 2, - 283, - 269, - 2, - 398, - 327, - 2, - 288, - 349, - 2 - ], - "image_id": 196141, - "bbox": [ - 247.76, - 74.23, - 169.67, - 300.78 - ], - "category_id": 1, - "id": 460541 - }, - { - "segmentation": [ - [ - 578.76, - 112.4, - 589.39, - 100.81, - 589.39, - 99.84, - 596.16, - 116.27, - 603.89, - 122.07, - 603.89, - 138.49, - 598.09, - 159.75, - 597.12, - 181, - 594.22, - 191.63, - 589.39, - 212.89, - 583.59, - 208.06, - 583.59, - 206.13, - 582.63, - 200.33, - 582.63, - 193.57, - 582.63, - 182.94, - 575.86, - 181, - 567.17, - 197.43, - 571.03, - 203.23, - 567.17, - 207.09, - 555.57, - 208.06, - 562.34, - 200.33, - 565.24, - 190.67, - 565.24, - 173.27, - 566.2, - 163.61, - 568.14, - 156.85, - 570.07, - 148.15, - 566.2, - 143.32, - 565.24, - 133.66, - 575.86, - 118.2 - ] - ], - "num_keypoints": 15, - "area": 2789.0208, - "iscrowd": 0, - "keypoints": [ - 589, - 113, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 595, - 112, - 1, - 584, - 110, - 2, - 598, - 123, - 2, - 579, - 119, - 2, - 594, - 141, - 2, - 570, - 137, - 2, - 576, - 135, - 2, - 585, - 139, - 2, - 590, - 157, - 2, - 574, - 156, - 2, - 589, - 192, - 2, - 565, - 189, - 1, - 587, - 222, - 1, - 557, - 219, - 1 - ], - "image_id": 196141, - "bbox": [ - 555.57, - 99.84, - 48.32, - 113.05 - ], - "category_id": 1, - "id": 488308 - }, - { - "segmentation": [ - [ - 446.96, - 73.13, - 445.81, - 77.71, - 443.33, - 78.29, - 441.61, - 81.72, - 441.23, - 84.58, - 440.85, - 90.5, - 442.19, - 94.32, - 443.52, - 97.18, - 443.52, - 102.33, - 442.57, - 105.58, - 446.58, - 105.19, - 447.15, - 99.85, - 447.53, - 94.89, - 446, - 93.55, - 446.38, - 92.03, - 453.64, - 92.41, - 454.02, - 94.51, - 457.64, - 94.51, - 455.74, - 88.4, - 455.35, - 82.29, - 453.64, - 78.48, - 451.92, - 77.71, - 452.87, - 74.47, - 450.58, - 73.13 - ] - ], - "num_keypoints": 0, - "area": 285.7906, - "iscrowd": 0, - "keypoints": [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0 - ], - "image_id": 196141, - "bbox": [ - 440.85, - 73.13, - 16.79, - 32.45 - ], - "category_id": 1, - "id": 508900 - }, - { - "segmentation": [ - [ - 497.15, - 413.95, - 531.55, - 417.68, - 548.74, - 411.7, - 551.74, - 403.48, - 546.5, - 394.5, - 543.51, - 386.28, - 571.93, - 390.76, - 574.92, - 391.51, - 579.4, - 409.46, - 605.58, - 409.46, - 615.3, - 408.71, - 607.07, - 389.27, - 598.1, - 381.79, - 607.82, - 366.83, - 607.82, - 352.63, - 610.06, - 338.42, - 619.04, - 345.15, - 631, - 344.4, - 630.25, - 336.92, - 626.51, - 318.98, - 616.05, - 286.07, - 598.85, - 263.64, - 585.39, - 257.66, - 593.61, - 244.2, - 601.09, - 235.97, - 596.6, - 219.52, - 587.63, - 211.29, - 577.91, - 208.3, - 563.7, - 206.81, - 556.22, - 214.29, - 548, - 217.28, - 539.77, - 229.99, - 539.77, - 241.95, - 539.02, - 247.19, - 523.32, - 247.19, - 503.88, - 254.67, - 485.93, - 254.67, - 479.95, - 248.68, - 473.22, - 241.21, - 485.93, - 227, - 477.7, - 215.78, - 457.51, - 215.78, - 453.77, - 235.22, - 463.5, - 246.44, - 465.74, - 261.4, - 490.42, - 274.11, - 501.63, - 275.6, - 504.62, - 286.07, - 519.58, - 286.07, - 522.57, - 292.06, - 512.85, - 310, - 515.09, - 330.94, - 530.05, - 343.65, - 505.37, - 341.41, - 479.95, - 339.91, - 465.74, - 346.64, - 463.5, - 358.61, - 473.97, - 381.04, - 485.18, - 390.02, - 501.63, - 398.99, - 504.62, - 404.22, - 491.16, - 412.45, - 495.65, - 417.68 - ] - ], - "num_keypoints": 12, - "area": 21608.94075, - "iscrowd": 0, - "keypoints": [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 552, - 234, - 2, - 0, - 0, - 0, - 531, - 262, - 2, - 600, - 283, - 2, - 480, - 260, - 2, - 622, - 336, - 2, - 466, - 242, - 2, - 0, - 0, - 0, - 546, - 365, - 2, - 592, - 371, - 2, - 470, - 351, - 2, - 551, - 330, - 2, - 519, - 394, - 2, - 589, - 391, - 2 - ], - "image_id": 196141, - "bbox": [ - 453.77, - 206.81, - 177.23, - 210.87 - ], - "category_id": 1, - "id": 1717641 - }, - { - "segmentation": [ - [ - 58.93, - 163.67, - 47.18, - 161.59, - 36.12, - 93.86, - 41.65, - 82.8, - 40.27, - 69.66, - 50.64, - 67.59, - 55.48, - 73.81, - 63.08, - 92.47, - 66.53, - 99.38, - 65.15, - 109.06, - 61, - 127.03, - 59.62, - 162.97 - ] - ], - "num_keypoints": 17, - "area": 1870.14015, - "iscrowd": 0, - "keypoints": [ - 48, - 79, - 2, - 50, - 77, - 2, - 46, - 77, - 2, - 54, - 78, - 2, - 45, - 78, - 2, - 57, - 90, - 2, - 42, - 90, - 2, - 63, - 103, - 2, - 42, - 105, - 2, - 56, - 113, - 2, - 49, - 112, - 2, - 55, - 117, - 2, - 44, - 117, - 2, - 55, - 140, - 2, - 47, - 140, - 2, - 56, - 160, - 2, - 49, - 159, - 2 - ], - "image_id": 196141, - "bbox": [ - 36.12, - 67.59, - 30.41, - 96.08 - ], - "category_id": 1, - "id": 1724673 - }, - { - "segmentation": [ - [ - 139.41, - 321.58, - 144.78, - 326.56, - 196.92, - 314.68, - 196.16, - 309.31, - 207.28, - 292.05, - 213.03, - 284, - 228.75, - 270.2, - 233.35, - 261.38, - 244.47, - 252.56, - 254.44, - 237.61, - 267.86, - 215.37, - 272.08, - 212.68, - 285.5, - 232.62, - 294.7, - 250.64, - 295.08, - 264.06, - 290.87, - 277.87, - 290.87, - 286.3, - 289.71, - 298.19, - 281.66, - 318.89, - 282.05, - 334.23, - 295.08, - 340.37, - 315.02, - 343.82, - 314.25, - 336.53, - 310.42, - 330.4, - 301.98, - 322.34, - 304.29, - 310.84, - 304.67, - 302.79, - 306.2, - 292.05, - 311.19, - 275.56, - 313.87, - 251.79, - 311.19, - 234.54, - 312.72, - 224.57, - 310.42, - 212.3, - 307.74, - 201.56, - 306.2, - 193.51, - 306.59, - 183.16, - 310.04, - 177.41, - 314.64, - 173.19, - 316.94, - 171.65, - 328.06, - 163.99, - 337.64, - 157.85, - 343.4, - 159.77, - 346.46, - 166.67, - 346.85, - 170.5, - 346.46, - 179.71, - 346.85, - 188.53, - 346.85, - 191.98, - 344.55, - 198.11, - 342.25, - 203.48, - 338.41, - 208.46, - 335.34, - 212.68, - 335.34, - 217.67, - 343.01, - 222.65, - 354.9, - 210.76, - 359.12, - 196.19, - 361.8, - 173.19, - 361.42, - 161.69, - 356.43, - 150.18, - 344.93, - 135.61, - 343.01, - 132.93, - 345.31, - 126.41, - 345.7, - 124.88, - 343.4, - 115.29, - 340.33, - 104.17, - 337.26, - 102.25, - 330.36, - 103.4, - 326.14, - 106.09, - 320.01, - 111.07, - 314.64, - 119.89, - 310.42, - 121.04, - 292.02, - 121.81, - 279.75, - 127.94, - 244.09, - 138.68, - 240.25, - 142.51, - 238.72, - 154.4, - 239.1, - 163.6, - 239.87, - 173.96, - 241.79, - 181.24, - 248.3, - 192.36, - 240.25, - 206.55, - 236.42, - 219.2, - 229.9, - 236.45, - 225.3, - 247.57, - 218.4, - 254.48, - 208.81, - 265.6, - 202.29, - 278.25, - 195.39, - 285.92, - 188.49, - 292.05, - 183.5, - 295.89, - 176.6, - 302.41, - 172, - 308.54, - 167.78, - 313.14, - 146.31, - 318.89 - ] - ], - "num_keypoints": 16, - "area": 14250.29385, - "iscrowd": 0, - "keypoints": [ - 334, - 135, - 2, - 340, - 129, - 2, - 331, - 129, - 2, - 0, - 0, - 0, - 319, - 123, - 2, - 340, - 146, - 2, - 292, - 133, - 2, - 353, - 164, - 2, - 246, - 144, - 2, - 354, - 197, - 2, - 250, - 185, - 2, - 293, - 197, - 2, - 265, - 187, - 2, - 305, - 252, - 2, - 231, - 254, - 2, - 293, - 321, - 2, - 193, - 297, - 2 - ], - "image_id": 197388, - "bbox": [ - 139.41, - 102.25, - 222.39, - 241.57 - ], - "category_id": 1, - "id": 437295 - }, - { - "segmentation": [ - [ - 287.17, - 121.42, - 294.22, - 106.44, - 302.15, - 116.13, - 303.03, - 121.42 - ], - [ - 297.74, - 99.39, - 310.08, - 76.49, - 326.81, - 76.49, - 329.46, - 67.68, - 337.38, - 61.52, - 346.19, - 62.4, - 353.24, - 65.92, - 353.24, - 76.49, - 355.88, - 84.42, - 359.41, - 87.94, - 362.05, - 96.75, - 354.12, - 139.04, - 349.72, - 142.56, - 345.31, - 139.92, - 349.72, - 117.89, - 348.84, - 108.2, - 345.31, - 113.49, - 336.5, - 101.16, - 325.93, - 110.85, - 311.84, - 123.18 - ], - [ - 324.17, - 176.91, - 332.1, - 191.89, - 328.58, - 198.94, - 327.69, - 205.98, - 333.86, - 213.03, - 337.38, - 227.13, - 332.98, - 227.13, - 319.77, - 219.2, - 313.6, - 211.27 - ], - [ - 332.98, - 165.46, - 341.79, - 161.06, - 336.5, - 174.27, - 333.86, - 186.6, - 326.81, - 176.03 - ] - ], - "num_keypoints": 16, - "area": 3404.869, - "iscrowd": 0, - "keypoints": [ - 345, - 92, - 2, - 350, - 87, - 2, - 341, - 87, - 2, - 0, - 0, - 0, - 330, - 83, - 2, - 357, - 94, - 2, - 316, - 92, - 2, - 357, - 104, - 2, - 291, - 123, - 1, - 351, - 133, - 2, - 281, - 136, - 1, - 326, - 131, - 1, - 305, - 128, - 1, - 336, - 152, - 1, - 303, - 171, - 1, - 318, - 206, - 2, - 294, - 211, - 1 - ], - "image_id": 197388, - "bbox": [ - 287.17, - 61.52, - 74.88, - 165.61 - ], - "category_id": 1, - "id": 467657 - }, - { - "segmentation": [ - [ - 547.95, - 201.57, - 546.73, - 190.62, - 547.95, - 181.49, - 547.95, - 169.31, - 547.95, - 156.53, - 546.73, - 144.36, - 544.3, - 139.49, - 540.04, - 132.19, - 540.04, - 121.84, - 542.47, - 107.24, - 544.3, - 99.33, - 548.56, - 88.98, - 561.95, - 78.03, - 572.29, - 71.33, - 572.29, - 71.33, - 572.29, - 65.25, - 574.12, - 51.86, - 583.86, - 48.81, - 592.99, - 48.81, - 597.86, - 57.33, - 599.07, - 64.64, - 608.2, - 76.81, - 614.9, - 82.89, - 620.98, - 89.59, - 628.89, - 93.24, - 636.81, - 101.76, - 640, - 109.67, - 640, - 115.76, - 640, - 127.93, - 620.37, - 111.5, - 619.16, - 111.5, - 618.55, - 112.11, - 608.2, - 105.41, - 600.9, - 119.41, - 592.99, - 131.58, - 596.03, - 148.01, - 605.16, - 162.01, - 612.46, - 190.01, - 614.9, - 204.61, - 606.98, - 216.78, - 603.94, - 226.52, - 606.38, - 239.91, - 605.16, - 256.95, - 604.55, - 264.26, - 602.12, - 271.56, - 586.29, - 272.17, - 584.47, - 255.13, - 588.73, - 237.48, - 592.99, - 221.65, - 596.64, - 207.05, - 596.64, - 197.31, - 594.2, - 186.96, - 584.47, - 172.36, - 577.77, - 166.27, - 570.47, - 170.53, - 558.91, - 179.66, - 555.86, - 192.44, - 548.56, - 198.53, - 547.95, - 198.53 - ] - ], - "num_keypoints": 15, - "area": 8913.98475, - "iscrowd": 0, - "keypoints": [ - 591, - 78, - 2, - 594, - 74, - 2, - 586, - 74, - 2, - 0, - 0, - 0, - 573, - 70, - 2, - 598, - 86, - 2, - 566, - 93, - 2, - 626, - 105, - 2, - 546, - 126, - 2, - 0, - 0, - 0, - 561, - 150, - 2, - 582, - 150, - 2, - 557, - 154, - 2, - 606, - 194, - 2, - 558, - 209, - 1, - 591, - 252, - 2, - 539, - 262, - 1 - ], - "image_id": 197388, - "bbox": [ - 540.04, - 48.81, - 99.96, - 223.36 - ], - "category_id": 1, - "id": 531914 - }, - { - "segmentation": [ - [ - 561.51, - 385.38, - 572.11, - 352.71, - 570.34, - 317.4, - 559.75, - 282.08, - 552.68, - 267.07, - 565.93, - 236.17, - 583.59, - 236.17, - 602.13, - 260.01, - 614.49, - 286.5, - 628.61, - 302.39, - 639.21, - 281.2, - 614.49, - 251.18, - 588, - 218.51, - 595.95, - 202.62, - 594.18, - 185.85, - 580.05, - 170.84, - 562.4, - 179.67, - 557.98, - 198.21, - 554.45, - 202.62, - 532.38, - 199.97, - 525.32, - 202.62, - 511.19, - 229.11, - 493.53, - 256.48, - 484.7, - 276.78, - 451.15, - 323.58, - 423.78, - 338.59, - 388.47, - 373.9, - 372.58, - 387.14, - 396.41, - 388.03, - 418.49, - 367.72, - 450.27, - 345.65, - 501.48, - 306.8, - 520.02, - 301.5, - 552.68, - 340.35, - 543.86, - 369.49 - ] - ], - "num_keypoints": 16, - "area": 14267.20475, - "iscrowd": 0, - "keypoints": [ - 580, - 211, - 2, - 586, - 206, - 2, - 574, - 204, - 2, - 0, - 0, - 0, - 562, - 198, - 2, - 584, - 220, - 2, - 529, - 215, - 2, - 599, - 242, - 2, - 512, - 260, - 2, - 619, - 274, - 2, - 538, - 285, - 2, - 537, - 288, - 2, - 506, - 277, - 2, - 562, - 332, - 2, - 452, - 332, - 2, - 550, - 387, - 1, - 402, - 371, - 2 - ], - "image_id": 197388, - "bbox": [ - 372.58, - 170.84, - 266.63, - 217.19 - ], - "category_id": 1, - "id": 533949 - }, - { - "segmentation": [ - [ - 2.03, - 75.18, - 10.85, - 70.58, - 16.99, - 65.59, - 17.75, - 55.24, - 20.05, - 50.25, - 29.64, - 43.74, - 37.31, - 47.57, - 41.52, - 53.7, - 43.83, - 64.82, - 53.03, - 70.19, - 61.85, - 77.09, - 72.58, - 87.06, - 74.88, - 79.01, - 78.72, - 73.64, - 86.39, - 77.86, - 90.6, - 90.13, - 86, - 93.2, - 82.17, - 102.4, - 75.27, - 106.24, - 68.75, - 104.7, - 50.34, - 90.9, - 43.06, - 112.37, - 40.76, - 123.11, - 42.29, - 130.78, - 48.04, - 161.83, - 52.26, - 190.59, - 50.73, - 210.15, - 44.21, - 245.04, - 50.34, - 256.16, - 53.03, - 261.53, - 47.28, - 263.83, - 40.37, - 263.83, - 31.56, - 260.76, - 28.1, - 256.16, - 26.95, - 244.65, - 29.25, - 233.54, - 32.71, - 223.95, - 33.09, - 213.98, - 32.32, - 206.31, - 32.71, - 194.81, - 33.09, - 185.61, - 24.65, - 177.17, - 16.99, - 161.45, - 13.53, - 176.02, - 10.85, - 206.31, - 1.65, - 231.62, - 1.65, - 235.84, - 0.5, - 146.88, - 0.88, - 122.34, - 1.65, - 75.56 - ] - ], - "num_keypoints": 13, - "area": 8260.75085, - "iscrowd": 0, - "keypoints": [ - 36, - 79, - 2, - 40, - 74, - 2, - 31, - 75, - 2, - 0, - 0, - 0, - 19, - 69, - 2, - 45, - 77, - 2, - 2, - 89, - 2, - 74, - 99, - 2, - 0, - 0, - 0, - 78, - 92, - 2, - 0, - 0, - 0, - 33, - 149, - 2, - 7, - 153, - 2, - 44, - 196, - 2, - 2, - 205, - 2, - 35, - 245, - 2, - 0, - 0, - 0 - ], - "image_id": 197388, - "bbox": [ - 0.5, - 43.74, - 90.1, - 220.09 - ], - "category_id": 1, - "id": 543117 - } - ] -} +{ + "info": { + "description": "For testing COCO dataset only.", + "year": 2020, + "date_created": "2020/06/20" + }, + "licenses": [ + { + "url": "http://creativecommons.org/licenses/by-nc-sa/2.0/", + "id": 1, + "name": "Attribution-NonCommercial-ShareAlike License" + }, + { + "url": "http://creativecommons.org/licenses/by-nc/2.0/", + "id": 2, + "name": "Attribution-NonCommercial License" + }, + { + "url": "http://creativecommons.org/licenses/by-nc-nd/2.0/", + "id": 3, + "name": "Attribution-NonCommercial-NoDerivs License" + }, + { + "url": "http://creativecommons.org/licenses/by/2.0/", + "id": 4, + "name": "Attribution License" + }, + { + "url": "http://creativecommons.org/licenses/by-sa/2.0/", + "id": 5, + "name": "Attribution-ShareAlike License" + }, + { + "url": "http://creativecommons.org/licenses/by-nd/2.0/", + "id": 6, + "name": "Attribution-NoDerivs License" + }, + { + "url": "http://flickr.com/commons/usage/", + "id": 7, + "name": "No known copyright restrictions" + }, + { + "url": "http://www.usa.gov/copyright.shtml", + "id": 8, + "name": "United States Government Work" + } + ], + "categories": [ + { + "supercategory": "person", + "id": 1, + "name": "person", + "keypoints": [ + "nose", + "left_eye", + "right_eye", + "left_ear", + "right_ear", + "left_shoulder", + "right_shoulder", + "left_elbow", + "right_elbow", + "left_wrist", + "right_wrist", + "left_hip", + "right_hip", + "left_knee", + "right_knee", + "left_ankle", + "right_ankle" + ], + "skeleton": [ + [ + 16, + 14 + ], + [ + 14, + 12 + ], + [ + 17, + 15 + ], + [ + 15, + 13 + ], + [ + 12, + 13 + ], + [ + 6, + 12 + ], + [ + 7, + 13 + ], + [ + 6, + 7 + ], + [ + 6, + 8 + ], + [ + 7, + 9 + ], + [ + 8, + 10 + ], + [ + 9, + 11 + ], + [ + 2, + 3 + ], + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 4 + ], + [ + 3, + 5 + ], + [ + 4, + 6 + ], + [ + 5, + 7 + ] + ] + } + ], + "images": [ + { + "license": 4, + "file_name": "000000000785.jpg", + "coco_url": "http://images.cocodataset.org/val2017/000000000785.jpg", + "height": 425, + "width": 640, + "date_captured": "2013-11-19 21:22:42", + "flickr_url": "http://farm8.staticflickr.com/7015/6795644157_f019453ae7_z.jpg", + "id": 785 + }, + { + "license": 3, + "file_name": "000000040083.jpg", + "coco_url": "http://images.cocodataset.org/val2017/000000040083.jpg", + "height": 333, + "width": 500, + "date_captured": "2013-11-18 03:30:24", + "flickr_url": "http://farm1.staticflickr.com/116/254881838_e21c6d17b8_z.jpg", + "id": 40083 + }, + { + "license": 1, + "file_name": "000000196141.jpg", + "coco_url": "http://images.cocodataset.org/val2017/000000196141.jpg", + "height": 429, + "width": 640, + "date_captured": "2013-11-22 22:37:15", + "flickr_url": "http://farm4.staticflickr.com/3310/3611902235_57d4ae496d_z.jpg", + "id": 196141 + }, + { + "license": 3, + "file_name": "000000197388.jpg", + "coco_url": "http://images.cocodataset.org/val2017/000000197388.jpg", + "height": 392, + "width": 640, + "date_captured": "2013-11-19 20:10:37", + "flickr_url": "http://farm9.staticflickr.com/8375/8507321836_5b8b13188f_z.jpg", + "id": 197388 + } + ], + "annotations": [ + { + "segmentation": [ + [ + 353.37, + 67.65, + 358.15, + 52.37, + 362.92, + 47.59, + 374.38, + 44.73, + 389.66, + 52.37, + 389.66, + 67.65, + 389.66, + 76.25, + 393.48, + 83.89, + 396.35, + 88.66, + 397.3, + 91.53, + 406.85, + 99.17, + 413.54, + 104.9, + 451.74, + 148.83, + 458.43, + 153.6, + 462.25, + 166.02, + 467.02, + 173.66, + 463.2, + 181.3, + 449.83, + 183.21, + 448.88, + 191.81, + 455.56, + 226.19, + 448.88, + 254.84, + 453.65, + 286.36, + 475.62, + 323.6, + 491.85, + 361.81, + 494.72, + 382.82, + 494.72, + 382.82, + 499.49, + 391.41, + 416.4, + 391.41, + 424.04, + 383.77, + 439.33, + 374.22, + 445.06, + 360.85, + 436.46, + 334.11, + 421.18, + 303.55, + 416.4, + 289.22, + 409.72, + 268.21, + 396.35, + 280.63, + 405.9, + 298.77, + 417.36, + 324.56, + 425, + 349.39, + 425, + 357.99, + 419.27, + 360.85, + 394.44, + 367.54, + 362.92, + 370.4, + 346.69, + 367.54, + 360.06, + 362.76, + 369.61, + 360.85, + 382.98, + 340.8, + 355.28, + 271.08, + 360.06, + 266.3, + 386.8, + 219.5, + 368.65, + 162.2, + 348.6, + 175.57, + 309.44, + 187.03, + 301.8, + 192.76, + 288.43, + 193.72, + 282.7, + 193.72, + 280.79, + 187.03, + 280.79, + 174.62, + 287.47, + 171.75, + 291.29, + 171.75, + 295.11, + 171.75, + 306.57, + 166.98, + 312.3, + 165.07, + 345.73, + 142.14, + 350.51, + 117.31, + 350.51, + 102.03, + 350.51, + 90.57, + 353.37, + 65.74 + ] + ], + "num_keypoints": 17, + "area": 27789.11055, + "iscrowd": 0, + "keypoints": [ + 367, + 81, + 2, + 374, + 73, + 2, + 360, + 75, + 2, + 386, + 78, + 2, + 356, + 81, + 2, + 399, + 108, + 2, + 358, + 129, + 2, + 433, + 142, + 2, + 341, + 159, + 2, + 449, + 165, + 2, + 309, + 178, + 2, + 424, + 203, + 2, + 393, + 214, + 2, + 429, + 294, + 2, + 367, + 273, + 2, + 466, + 362, + 2, + 396, + 341, + 2 + ], + "image_id": 785, + "bbox": [ + 280.79, + 44.73, + 218.7, + 346.68 + ], + "category_id": 1, + "id": 442619 + }, + { + "segmentation": [ + [ + 98.56, + 273.72, + 132.9, + 267, + 140.37, + 281.93, + 165.75, + 285.66, + 156.79, + 264.01, + 170.23, + 261.02, + 177.7, + 272.97, + 182.18, + 279.69, + 200.85, + 268.49, + 212.79, + 255.05, + 188.9, + 256.54, + 164.26, + 240.12, + 139.62, + 212.49, + 109.01, + 221.45, + 103.04, + 220.71, + 122.45, + 202.04, + 113.49, + 196.07, + 96.32, + 168.44, + 97.06, + 162.47, + 110.5, + 136.34, + 112, + 124.39, + 91.09, + 110.95, + 80.64, + 114.68, + 71.68, + 131.86, + 62.72, + 147.54, + 57.49, + 156.5, + 48.53, + 168.44, + 41.07, + 180.39, + 38.08, + 193.08, + 40.32, + 205.03, + 47.04, + 213.24, + 54.5, + 216.23, + 82.13, + 252.06, + 91.09, + 271.48 + ] + ], + "num_keypoints": 14, + "area": 11025.219, + "iscrowd": 0, + "keypoints": [ + 99, + 144, + 2, + 104, + 141, + 2, + 96, + 137, + 2, + 0, + 0, + 0, + 78, + 133, + 2, + 56, + 161, + 2, + 81, + 162, + 2, + 0, + 0, + 0, + 103, + 208, + 2, + 116, + 204, + 2, + 0, + 0, + 0, + 57, + 246, + 1, + 82, + 259, + 1, + 137, + 219, + 2, + 138, + 247, + 2, + 177, + 256, + 2, + 158, + 296, + 1 + ], + "image_id": 40083, + "bbox": [ + 38.08, + 110.95, + 174.71, + 174.71 + ], + "category_id": 1, + "id": 198196 + }, + { + "segmentation": [ + [ + 257.76, + 288.05, + 273.4, + 258.26, + 325.55, + 253.79, + 335.23, + 232.93, + 326.3, + 186.74, + 333.74, + 177.05, + 327.79, + 153.21, + 333.74, + 142.04, + 344.17, + 139.06, + 353.11, + 139.06, + 359.07, + 145.02, + 360.56, + 148.74, + 362.05, + 168.86, + 388.87, + 197.17, + 397.81, + 276.88, + 372.48, + 293.27 + ] + ], + "num_keypoints": 15, + "area": 10171.9544, + "iscrowd": 0, + "keypoints": [ + 343, + 164, + 2, + 348, + 160, + 2, + 340, + 160, + 2, + 359, + 163, + 2, + 332, + 164, + 2, + 370, + 189, + 2, + 334, + 190, + 2, + 358, + 236, + 2, + 348, + 234, + 2, + 339, + 270, + 2, + 330, + 262, + 2, + 378, + 262, + 2, + 343, + 254, + 2, + 338, + 280, + 2, + 283, + 272, + 2, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "image_id": 40083, + "bbox": [ + 257.76, + 139.06, + 140.05, + 154.21 + ], + "category_id": 1, + "id": 230195 + }, + { + "segmentation": [ + [ + 285.37, + 126.5, + 281.97, + 127.72, + 280.76, + 132.33, + 280.76, + 136.46, + 275.17, + 143.26, + 275.9, + 158.08, + 277.6, + 164.4, + 278.33, + 173.87, + 278.33, + 183.83, + 279.79, + 191.11, + 281.97, + 194.76, + 284.89, + 192.09, + 284.89, + 186.99, + 284.89, + 181.16, + 284.64, + 177.51, + 285.86, + 173.87 + ] + ], + "num_keypoints": 0, + "area": 491.2669, + "iscrowd": 0, + "keypoints": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "image_id": 40083, + "bbox": [ + 275.17, + 126.5, + 10.69, + 68.26 + ], + "category_id": 1, + "id": 1202706 + }, + { + "segmentation": [ + [ + 339.34, + 107.97, + 338.38, + 102.19, + 339.34, + 91.58, + 335.49, + 84.84, + 326.81, + 74.23, + 312.35, + 74.23, + 301.75, + 74.23, + 295, + 86.76, + 295, + 93.51, + 292.11, + 99.3, + 287.29, + 102.19, + 291.14, + 107.01, + 295, + 107.01, + 295.96, + 112.79, + 301.75, + 115.69, + 305.6, + 119.54, + 307.53, + 123.4, + 317.17, + 123.4, + 311.39, + 129.18, + 286.32, + 139.79, + 274.75, + 139.79, + 264.15, + 138.82, + 262.22, + 144.61, + 261.26, + 147.5, + 253.54, + 147.5, + 247.76, + 150.39, + 249.69, + 159.07, + 256.44, + 161, + 262.22, + 161, + 268, + 161, + 276.68, + 161.96, + 284.39, + 168.71, + 293.07, + 174.49, + 301.75, + 174.49, + 308.49, + 169.67, + 308.49, + 188.95, + 311.39, + 194.74, + 312.35, + 208.23, + 307.53, + 221.73, + 297.89, + 229.44, + 281.5, + 250.65, + 269.93, + 262.22, + 278.61, + 320.06, + 281.5, + 331.63, + 276.68, + 338.38, + 270.9, + 349.95, + 262.22, + 356.7, + 253.54, + 359.59, + 253.54, + 365.37, + 274.75, + 365.37, + 291.14, + 365.37, + 306.57, + 359.59, + 303.67, + 352.84, + 297.89, + 340.31, + 293.07, + 318.13, + 295, + 294.03, + 293.07, + 278.61, + 294.03, + 270.9, + 305.6, + 259.33, + 313.31, + 299.82, + 319.1, + 309.46, + 341.27, + 317.17, + 384.65, + 330.67, + 387.55, + 335.49, + 383.69, + 341.27, + 397.19, + 350.91, + 398.15, + 363.44, + 398.15, + 375.01, + 405.86, + 374.05, + 409.72, + 357.66, + 411.65, + 342.24, + 416.47, + 328.74, + 417.43, + 321.03, + 410.68, + 319.1, + 401.04, + 318.13, + 392.37, + 318.13, + 382.73, + 314.28, + 348.98, + 300.78, + 339.34, + 293.07, + 334.52, + 285.36, + 340.31, + 259.33, + 340.31, + 246.8, + 340.31, + 242.94, + 350.91, + 228.48, + 358.62, + 214.98, + 355.22, + 204.32, + 357.05, + 196.11, + 361.61, + 188.82, + 361.61, + 181.97, + 365.26, + 165.63, + 367.54, + 139.18, + 366.17, + 123.68, + 361.15, + 112.73, + 353.86, + 107.72, + 351.58, + 105.89, + 344.74, + 105.89, + 340.18, + 109.08 + ] + ], + "num_keypoints": 15, + "area": 17123.92955, + "iscrowd": 0, + "keypoints": [ + 297, + 111, + 2, + 299, + 106, + 2, + 0, + 0, + 0, + 314, + 108, + 2, + 0, + 0, + 0, + 329, + 141, + 2, + 346, + 125, + 2, + 295, + 164, + 2, + 323, + 130, + 2, + 266, + 155, + 2, + 279, + 143, + 2, + 329, + 225, + 2, + 331, + 221, + 2, + 327, + 298, + 2, + 283, + 269, + 2, + 398, + 327, + 2, + 288, + 349, + 2 + ], + "image_id": 196141, + "bbox": [ + 247.76, + 74.23, + 169.67, + 300.78 + ], + "category_id": 1, + "id": 460541 + }, + { + "segmentation": [ + [ + 578.76, + 112.4, + 589.39, + 100.81, + 589.39, + 99.84, + 596.16, + 116.27, + 603.89, + 122.07, + 603.89, + 138.49, + 598.09, + 159.75, + 597.12, + 181, + 594.22, + 191.63, + 589.39, + 212.89, + 583.59, + 208.06, + 583.59, + 206.13, + 582.63, + 200.33, + 582.63, + 193.57, + 582.63, + 182.94, + 575.86, + 181, + 567.17, + 197.43, + 571.03, + 203.23, + 567.17, + 207.09, + 555.57, + 208.06, + 562.34, + 200.33, + 565.24, + 190.67, + 565.24, + 173.27, + 566.2, + 163.61, + 568.14, + 156.85, + 570.07, + 148.15, + 566.2, + 143.32, + 565.24, + 133.66, + 575.86, + 118.2 + ] + ], + "num_keypoints": 15, + "area": 2789.0208, + "iscrowd": 0, + "keypoints": [ + 589, + 113, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 595, + 112, + 1, + 584, + 110, + 2, + 598, + 123, + 2, + 579, + 119, + 2, + 594, + 141, + 2, + 570, + 137, + 2, + 576, + 135, + 2, + 585, + 139, + 2, + 590, + 157, + 2, + 574, + 156, + 2, + 589, + 192, + 2, + 565, + 189, + 1, + 587, + 222, + 1, + 557, + 219, + 1 + ], + "image_id": 196141, + "bbox": [ + 555.57, + 99.84, + 48.32, + 113.05 + ], + "category_id": 1, + "id": 488308 + }, + { + "segmentation": [ + [ + 446.96, + 73.13, + 445.81, + 77.71, + 443.33, + 78.29, + 441.61, + 81.72, + 441.23, + 84.58, + 440.85, + 90.5, + 442.19, + 94.32, + 443.52, + 97.18, + 443.52, + 102.33, + 442.57, + 105.58, + 446.58, + 105.19, + 447.15, + 99.85, + 447.53, + 94.89, + 446, + 93.55, + 446.38, + 92.03, + 453.64, + 92.41, + 454.02, + 94.51, + 457.64, + 94.51, + 455.74, + 88.4, + 455.35, + 82.29, + 453.64, + 78.48, + 451.92, + 77.71, + 452.87, + 74.47, + 450.58, + 73.13 + ] + ], + "num_keypoints": 0, + "area": 285.7906, + "iscrowd": 0, + "keypoints": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "image_id": 196141, + "bbox": [ + 440.85, + 73.13, + 16.79, + 32.45 + ], + "category_id": 1, + "id": 508900 + }, + { + "segmentation": [ + [ + 497.15, + 413.95, + 531.55, + 417.68, + 548.74, + 411.7, + 551.74, + 403.48, + 546.5, + 394.5, + 543.51, + 386.28, + 571.93, + 390.76, + 574.92, + 391.51, + 579.4, + 409.46, + 605.58, + 409.46, + 615.3, + 408.71, + 607.07, + 389.27, + 598.1, + 381.79, + 607.82, + 366.83, + 607.82, + 352.63, + 610.06, + 338.42, + 619.04, + 345.15, + 631, + 344.4, + 630.25, + 336.92, + 626.51, + 318.98, + 616.05, + 286.07, + 598.85, + 263.64, + 585.39, + 257.66, + 593.61, + 244.2, + 601.09, + 235.97, + 596.6, + 219.52, + 587.63, + 211.29, + 577.91, + 208.3, + 563.7, + 206.81, + 556.22, + 214.29, + 548, + 217.28, + 539.77, + 229.99, + 539.77, + 241.95, + 539.02, + 247.19, + 523.32, + 247.19, + 503.88, + 254.67, + 485.93, + 254.67, + 479.95, + 248.68, + 473.22, + 241.21, + 485.93, + 227, + 477.7, + 215.78, + 457.51, + 215.78, + 453.77, + 235.22, + 463.5, + 246.44, + 465.74, + 261.4, + 490.42, + 274.11, + 501.63, + 275.6, + 504.62, + 286.07, + 519.58, + 286.07, + 522.57, + 292.06, + 512.85, + 310, + 515.09, + 330.94, + 530.05, + 343.65, + 505.37, + 341.41, + 479.95, + 339.91, + 465.74, + 346.64, + 463.5, + 358.61, + 473.97, + 381.04, + 485.18, + 390.02, + 501.63, + 398.99, + 504.62, + 404.22, + 491.16, + 412.45, + 495.65, + 417.68 + ] + ], + "num_keypoints": 12, + "area": 21608.94075, + "iscrowd": 0, + "keypoints": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 552, + 234, + 2, + 0, + 0, + 0, + 531, + 262, + 2, + 600, + 283, + 2, + 480, + 260, + 2, + 622, + 336, + 2, + 466, + 242, + 2, + 0, + 0, + 0, + 546, + 365, + 2, + 592, + 371, + 2, + 470, + 351, + 2, + 551, + 330, + 2, + 519, + 394, + 2, + 589, + 391, + 2 + ], + "image_id": 196141, + "bbox": [ + 453.77, + 206.81, + 177.23, + 210.87 + ], + "category_id": 1, + "id": 1717641 + }, + { + "segmentation": [ + [ + 58.93, + 163.67, + 47.18, + 161.59, + 36.12, + 93.86, + 41.65, + 82.8, + 40.27, + 69.66, + 50.64, + 67.59, + 55.48, + 73.81, + 63.08, + 92.47, + 66.53, + 99.38, + 65.15, + 109.06, + 61, + 127.03, + 59.62, + 162.97 + ] + ], + "num_keypoints": 17, + "area": 1870.14015, + "iscrowd": 0, + "keypoints": [ + 48, + 79, + 2, + 50, + 77, + 2, + 46, + 77, + 2, + 54, + 78, + 2, + 45, + 78, + 2, + 57, + 90, + 2, + 42, + 90, + 2, + 63, + 103, + 2, + 42, + 105, + 2, + 56, + 113, + 2, + 49, + 112, + 2, + 55, + 117, + 2, + 44, + 117, + 2, + 55, + 140, + 2, + 47, + 140, + 2, + 56, + 160, + 2, + 49, + 159, + 2 + ], + "image_id": 196141, + "bbox": [ + 36.12, + 67.59, + 30.41, + 96.08 + ], + "category_id": 1, + "id": 1724673 + }, + { + "segmentation": [ + [ + 139.41, + 321.58, + 144.78, + 326.56, + 196.92, + 314.68, + 196.16, + 309.31, + 207.28, + 292.05, + 213.03, + 284, + 228.75, + 270.2, + 233.35, + 261.38, + 244.47, + 252.56, + 254.44, + 237.61, + 267.86, + 215.37, + 272.08, + 212.68, + 285.5, + 232.62, + 294.7, + 250.64, + 295.08, + 264.06, + 290.87, + 277.87, + 290.87, + 286.3, + 289.71, + 298.19, + 281.66, + 318.89, + 282.05, + 334.23, + 295.08, + 340.37, + 315.02, + 343.82, + 314.25, + 336.53, + 310.42, + 330.4, + 301.98, + 322.34, + 304.29, + 310.84, + 304.67, + 302.79, + 306.2, + 292.05, + 311.19, + 275.56, + 313.87, + 251.79, + 311.19, + 234.54, + 312.72, + 224.57, + 310.42, + 212.3, + 307.74, + 201.56, + 306.2, + 193.51, + 306.59, + 183.16, + 310.04, + 177.41, + 314.64, + 173.19, + 316.94, + 171.65, + 328.06, + 163.99, + 337.64, + 157.85, + 343.4, + 159.77, + 346.46, + 166.67, + 346.85, + 170.5, + 346.46, + 179.71, + 346.85, + 188.53, + 346.85, + 191.98, + 344.55, + 198.11, + 342.25, + 203.48, + 338.41, + 208.46, + 335.34, + 212.68, + 335.34, + 217.67, + 343.01, + 222.65, + 354.9, + 210.76, + 359.12, + 196.19, + 361.8, + 173.19, + 361.42, + 161.69, + 356.43, + 150.18, + 344.93, + 135.61, + 343.01, + 132.93, + 345.31, + 126.41, + 345.7, + 124.88, + 343.4, + 115.29, + 340.33, + 104.17, + 337.26, + 102.25, + 330.36, + 103.4, + 326.14, + 106.09, + 320.01, + 111.07, + 314.64, + 119.89, + 310.42, + 121.04, + 292.02, + 121.81, + 279.75, + 127.94, + 244.09, + 138.68, + 240.25, + 142.51, + 238.72, + 154.4, + 239.1, + 163.6, + 239.87, + 173.96, + 241.79, + 181.24, + 248.3, + 192.36, + 240.25, + 206.55, + 236.42, + 219.2, + 229.9, + 236.45, + 225.3, + 247.57, + 218.4, + 254.48, + 208.81, + 265.6, + 202.29, + 278.25, + 195.39, + 285.92, + 188.49, + 292.05, + 183.5, + 295.89, + 176.6, + 302.41, + 172, + 308.54, + 167.78, + 313.14, + 146.31, + 318.89 + ] + ], + "num_keypoints": 16, + "area": 14250.29385, + "iscrowd": 0, + "keypoints": [ + 334, + 135, + 2, + 340, + 129, + 2, + 331, + 129, + 2, + 0, + 0, + 0, + 319, + 123, + 2, + 340, + 146, + 2, + 292, + 133, + 2, + 353, + 164, + 2, + 246, + 144, + 2, + 354, + 197, + 2, + 250, + 185, + 2, + 293, + 197, + 2, + 265, + 187, + 2, + 305, + 252, + 2, + 231, + 254, + 2, + 293, + 321, + 2, + 193, + 297, + 2 + ], + "image_id": 197388, + "bbox": [ + 139.41, + 102.25, + 222.39, + 241.57 + ], + "category_id": 1, + "id": 437295 + }, + { + "segmentation": [ + [ + 287.17, + 121.42, + 294.22, + 106.44, + 302.15, + 116.13, + 303.03, + 121.42 + ], + [ + 297.74, + 99.39, + 310.08, + 76.49, + 326.81, + 76.49, + 329.46, + 67.68, + 337.38, + 61.52, + 346.19, + 62.4, + 353.24, + 65.92, + 353.24, + 76.49, + 355.88, + 84.42, + 359.41, + 87.94, + 362.05, + 96.75, + 354.12, + 139.04, + 349.72, + 142.56, + 345.31, + 139.92, + 349.72, + 117.89, + 348.84, + 108.2, + 345.31, + 113.49, + 336.5, + 101.16, + 325.93, + 110.85, + 311.84, + 123.18 + ], + [ + 324.17, + 176.91, + 332.1, + 191.89, + 328.58, + 198.94, + 327.69, + 205.98, + 333.86, + 213.03, + 337.38, + 227.13, + 332.98, + 227.13, + 319.77, + 219.2, + 313.6, + 211.27 + ], + [ + 332.98, + 165.46, + 341.79, + 161.06, + 336.5, + 174.27, + 333.86, + 186.6, + 326.81, + 176.03 + ] + ], + "num_keypoints": 16, + "area": 3404.869, + "iscrowd": 0, + "keypoints": [ + 345, + 92, + 2, + 350, + 87, + 2, + 341, + 87, + 2, + 0, + 0, + 0, + 330, + 83, + 2, + 357, + 94, + 2, + 316, + 92, + 2, + 357, + 104, + 2, + 291, + 123, + 1, + 351, + 133, + 2, + 281, + 136, + 1, + 326, + 131, + 1, + 305, + 128, + 1, + 336, + 152, + 1, + 303, + 171, + 1, + 318, + 206, + 2, + 294, + 211, + 1 + ], + "image_id": 197388, + "bbox": [ + 287.17, + 61.52, + 74.88, + 165.61 + ], + "category_id": 1, + "id": 467657 + }, + { + "segmentation": [ + [ + 547.95, + 201.57, + 546.73, + 190.62, + 547.95, + 181.49, + 547.95, + 169.31, + 547.95, + 156.53, + 546.73, + 144.36, + 544.3, + 139.49, + 540.04, + 132.19, + 540.04, + 121.84, + 542.47, + 107.24, + 544.3, + 99.33, + 548.56, + 88.98, + 561.95, + 78.03, + 572.29, + 71.33, + 572.29, + 71.33, + 572.29, + 65.25, + 574.12, + 51.86, + 583.86, + 48.81, + 592.99, + 48.81, + 597.86, + 57.33, + 599.07, + 64.64, + 608.2, + 76.81, + 614.9, + 82.89, + 620.98, + 89.59, + 628.89, + 93.24, + 636.81, + 101.76, + 640, + 109.67, + 640, + 115.76, + 640, + 127.93, + 620.37, + 111.5, + 619.16, + 111.5, + 618.55, + 112.11, + 608.2, + 105.41, + 600.9, + 119.41, + 592.99, + 131.58, + 596.03, + 148.01, + 605.16, + 162.01, + 612.46, + 190.01, + 614.9, + 204.61, + 606.98, + 216.78, + 603.94, + 226.52, + 606.38, + 239.91, + 605.16, + 256.95, + 604.55, + 264.26, + 602.12, + 271.56, + 586.29, + 272.17, + 584.47, + 255.13, + 588.73, + 237.48, + 592.99, + 221.65, + 596.64, + 207.05, + 596.64, + 197.31, + 594.2, + 186.96, + 584.47, + 172.36, + 577.77, + 166.27, + 570.47, + 170.53, + 558.91, + 179.66, + 555.86, + 192.44, + 548.56, + 198.53, + 547.95, + 198.53 + ] + ], + "num_keypoints": 15, + "area": 8913.98475, + "iscrowd": 0, + "keypoints": [ + 591, + 78, + 2, + 594, + 74, + 2, + 586, + 74, + 2, + 0, + 0, + 0, + 573, + 70, + 2, + 598, + 86, + 2, + 566, + 93, + 2, + 626, + 105, + 2, + 546, + 126, + 2, + 0, + 0, + 0, + 561, + 150, + 2, + 582, + 150, + 2, + 557, + 154, + 2, + 606, + 194, + 2, + 558, + 209, + 1, + 591, + 252, + 2, + 539, + 262, + 1 + ], + "image_id": 197388, + "bbox": [ + 540.04, + 48.81, + 99.96, + 223.36 + ], + "category_id": 1, + "id": 531914 + }, + { + "segmentation": [ + [ + 561.51, + 385.38, + 572.11, + 352.71, + 570.34, + 317.4, + 559.75, + 282.08, + 552.68, + 267.07, + 565.93, + 236.17, + 583.59, + 236.17, + 602.13, + 260.01, + 614.49, + 286.5, + 628.61, + 302.39, + 639.21, + 281.2, + 614.49, + 251.18, + 588, + 218.51, + 595.95, + 202.62, + 594.18, + 185.85, + 580.05, + 170.84, + 562.4, + 179.67, + 557.98, + 198.21, + 554.45, + 202.62, + 532.38, + 199.97, + 525.32, + 202.62, + 511.19, + 229.11, + 493.53, + 256.48, + 484.7, + 276.78, + 451.15, + 323.58, + 423.78, + 338.59, + 388.47, + 373.9, + 372.58, + 387.14, + 396.41, + 388.03, + 418.49, + 367.72, + 450.27, + 345.65, + 501.48, + 306.8, + 520.02, + 301.5, + 552.68, + 340.35, + 543.86, + 369.49 + ] + ], + "num_keypoints": 16, + "area": 14267.20475, + "iscrowd": 0, + "keypoints": [ + 580, + 211, + 2, + 586, + 206, + 2, + 574, + 204, + 2, + 0, + 0, + 0, + 562, + 198, + 2, + 584, + 220, + 2, + 529, + 215, + 2, + 599, + 242, + 2, + 512, + 260, + 2, + 619, + 274, + 2, + 538, + 285, + 2, + 537, + 288, + 2, + 506, + 277, + 2, + 562, + 332, + 2, + 452, + 332, + 2, + 550, + 387, + 1, + 402, + 371, + 2 + ], + "image_id": 197388, + "bbox": [ + 372.58, + 170.84, + 266.63, + 217.19 + ], + "category_id": 1, + "id": 533949 + }, + { + "segmentation": [ + [ + 2.03, + 75.18, + 10.85, + 70.58, + 16.99, + 65.59, + 17.75, + 55.24, + 20.05, + 50.25, + 29.64, + 43.74, + 37.31, + 47.57, + 41.52, + 53.7, + 43.83, + 64.82, + 53.03, + 70.19, + 61.85, + 77.09, + 72.58, + 87.06, + 74.88, + 79.01, + 78.72, + 73.64, + 86.39, + 77.86, + 90.6, + 90.13, + 86, + 93.2, + 82.17, + 102.4, + 75.27, + 106.24, + 68.75, + 104.7, + 50.34, + 90.9, + 43.06, + 112.37, + 40.76, + 123.11, + 42.29, + 130.78, + 48.04, + 161.83, + 52.26, + 190.59, + 50.73, + 210.15, + 44.21, + 245.04, + 50.34, + 256.16, + 53.03, + 261.53, + 47.28, + 263.83, + 40.37, + 263.83, + 31.56, + 260.76, + 28.1, + 256.16, + 26.95, + 244.65, + 29.25, + 233.54, + 32.71, + 223.95, + 33.09, + 213.98, + 32.32, + 206.31, + 32.71, + 194.81, + 33.09, + 185.61, + 24.65, + 177.17, + 16.99, + 161.45, + 13.53, + 176.02, + 10.85, + 206.31, + 1.65, + 231.62, + 1.65, + 235.84, + 0.5, + 146.88, + 0.88, + 122.34, + 1.65, + 75.56 + ] + ], + "num_keypoints": 13, + "area": 8260.75085, + "iscrowd": 0, + "keypoints": [ + 36, + 79, + 2, + 40, + 74, + 2, + 31, + 75, + 2, + 0, + 0, + 0, + 19, + 69, + 2, + 45, + 77, + 2, + 2, + 89, + 2, + 74, + 99, + 2, + 0, + 0, + 0, + 78, + 92, + 2, + 0, + 0, + 0, + 33, + 149, + 2, + 7, + 153, + 2, + 44, + 196, + 2, + 2, + 205, + 2, + 35, + 245, + 2, + 0, + 0, + 0 + ], + "image_id": 197388, + "bbox": [ + 0.5, + 43.74, + 90.1, + 220.09 + ], + "category_id": 1, + "id": 543117 + } + ] +} diff --git a/tests/data/coco/test_coco_det_AP_H_56.json b/tests/data/coco/test_coco_det_AP_H_56.json index e166de0c64..6313c10c27 100644 --- a/tests/data/coco/test_coco_det_AP_H_56.json +++ b/tests/data/coco/test_coco_det_AP_H_56.json @@ -1,1300 +1,1300 @@ -[ - { - "bbox": [ - 277.1183158543966, - 45.699667786163765, - 225.09126579259754, - 333.5602652943344 - ], - "category_id": 1, - "image_id": 785, - "score": 0.9999731779098511 - }, - { - "bbox": [ - 281.950178384611, - 44.56940615106412, - 212.94084624881856, - 344.98328732330305 - ], - "category_id": 1, - "image_id": 785, - "score": 0.30122078732076535 - }, - { - "bbox": [ - 268.01163251716935, - 43.98534000198524, - 238.46561540311666, - 341.79494090239166 - ], - "category_id": 1, - "image_id": 785, - "score": 0.09537058952055945 - }, - { - "bbox": [ - 286.24685022227766, - 41.757854101745124, - 223.83092714841916, - 338.2323329803221 - ], - "category_id": 1, - "image_id": 785, - "score": 0.02974060317622316 - }, - { - "bbox": [ - 262.7942371596824, - 63.5024099030928, - 3.164080328447767, - 4.2931809049024 - ], - "category_id": 1, - "image_id": 785, - "score": 0.01697496324777603 - }, - { - "bbox": [ - 460.79934160584526, - 54.24632570186816, - 3.1264258976875112, - 5.30507188737684 - ], - "category_id": 1, - "image_id": 785, - "score": 0.011266417550507977 - }, - { - "bbox": [ - 457.74867915702885, - 54.642754761043186, - 3.1463156275978577, - 5.30487109975607 - ], - "category_id": 1, - "image_id": 785, - "score": 0.009877337450527405 - }, - { - "bbox": [ - 283.6326909128262, - 48.41948428440242, - 208.11973684568892, - 329.94523003138954 - ], - "category_id": 1, - "image_id": 785, - "score": 0.009197559746208601 - }, - { - "bbox": [ - 207.3711401479468, - 63.36160650309581, - 2.93447433643874, - 3.468569626452343 - ], - "category_id": 1, - "image_id": 785, - "score": 0.008295997977256775 - }, - { - "bbox": [ - 458.51562228937183, - 59.46703918462182, - 3.272054625157523, - 4.619048555254508 - ], - "category_id": 1, - "image_id": 785, - "score": 0.008173274752520696 - }, - { - "bbox": [ - 461.08150984219986, - 58.545150021384245, - 3.249185872840485, - 5.844152786677249 - ], - "category_id": 1, - "image_id": 785, - "score": 0.007174033771332924 - }, - { - "bbox": [ - 259.83498140597413, - 62.3517572900752, - 2.9195241669668235, - 4.559862560086913 - ], - "category_id": 1, - "image_id": 785, - "score": 0.006377489306032658 - }, - { - "bbox": [ - 206.80460173580252, - 62.5220090883142, - 3.1584765729102457, - 3.520867237953432 - ], - "category_id": 1, - "image_id": 785, - "score": 0.005891890564944476 - }, - { - "bbox": [ - 459.5511247244534, - 54.89593493663015, - 3.230180209185619, - 5.595806307593442 - ], - "category_id": 1, - "image_id": 785, - "score": 0.005863019167811413 - }, - { - "bbox": [ - 457.2902794671802, - 58.740074277713674, - 3.316325358758718, - 5.415639229745793 - ], - "category_id": 1, - "image_id": 785, - "score": 0.005827399869551478 - }, - { - "bbox": [ - 262.6182415084011, - 62.83701378140133, - 3.0697625867510396, - 4.148177980683975 - ], - "category_id": 1, - "image_id": 785, - "score": 0.005008531179775657 - }, - { - "bbox": [ - 209.95621769919438, - 63.58898404912936, - 3.097942773760309, - 3.9870186328652224 - ], - "category_id": 1, - "image_id": 785, - "score": 0.004536413883644729 - }, - { - "bbox": [ - 459.25342388420654, - 59.04022778823142, - 3.6918324658356596, - 6.2054702421954175 - ], - "category_id": 1, - "image_id": 785, - "score": 0.00384555541357817 - }, - { - "bbox": [ - 208.42983867925258, - 62.66620641784881, - 2.939843970544956, - 3.5128275773914908 - ], - "category_id": 1, - "image_id": 785, - "score": 0.003631657359987463 - }, - { - "bbox": [ - 213.41976294267863, - 62.71431286477077, - 2.528260915549936, - 3.4008991982205927 - ], - "category_id": 1, - "image_id": 785, - "score": 0.0033746918197721243 - }, - { - "bbox": [ - 161.97753405615518, - 61.730313756833205, - 2.8917805026908923, - 4.075206275914702 - ], - "category_id": 1, - "image_id": 785, - "score": 0.003240120830014348 - }, - { - "bbox": [ - 457.5244691894709, - 54.70691525725411, - 6.2095088496953394, - 8.39989354390223 - ], - "category_id": 1, - "image_id": 785, - "score": 0.0028898494491729535 - }, - { - "bbox": [ - 376.9178826443722, - 172.73052709081233, - 261.25961331942824, - 215.58502374291808 - ], - "category_id": 1, - "image_id": 197388, - "score": 0.9999579191207886 - }, - { - "bbox": [ - 163.9687616410633, - 80.41943032016765, - 200.19976794356094, - 259.2492676442412 - ], - "category_id": 1, - "image_id": 197388, - "score": 0.9999035596847534 - }, - { - "bbox": [ - 1.218278714743892, - 47.45300387559155, - 90.54113395922819, - 220.98988830655202 - ], - "category_id": 1, - "image_id": 197388, - "score": 0.9998950958251953 - }, - { - "bbox": [ - 542.055600304138, - 50.78951110214531, - 97.65374183236963, - 187.04227881069528 - ], - "category_id": 1, - "image_id": 197388, - "score": 0.9867184565824798 - }, - { - "bbox": [ - 281.8670596900398, - 58.53450299402189, - 82.11294655596839, - 86.20744367046282 - ], - "category_id": 1, - "image_id": 197388, - "score": 0.9736837699533164 - }, - { - "bbox": [ - 279.94252362290945, - 59.89339467038772, - 81.61478084086349, - 147.45283612214442 - ], - "category_id": 1, - "image_id": 197388, - "score": 0.5819535544584765 - }, - { - "bbox": [ - 535.4019505240893, - 48.1844256878009, - 105.27804947591062, - 239.31002317693435 - ], - "category_id": 1, - "image_id": 197388, - "score": 0.4461107432274131 - }, - { - "bbox": [ - 168.57347257788564, - 103.56636286623898, - 188.67170536354314, - 230.37891238088162 - ], - "category_id": 1, - "image_id": 197388, - "score": 0.3492993107937081 - }, - { - "bbox": [ - 372.0082417618134, - 163.99891619439003, - 236.90653900133447, - 224.81380141719242 - ], - "category_id": 1, - "image_id": 197388, - "score": 0.32743142104478484 - }, - { - "bbox": [ - 1.388905257619702, - 35.86500152126901, - 87.67960208998994, - 220.4727970838673 - ], - "category_id": 1, - "image_id": 197388, - "score": 0.31936580857404523 - }, - { - "bbox": [ - 283.65021434011885, - 57.518455359834334, - 81.08575097216988, - 85.11418577738398 - ], - "category_id": 1, - "image_id": 197388, - "score": 0.1897958763078807 - }, - { - "bbox": [ - 543.1779979060689, - 37.87532382036906, - 94.66280745251572, - 191.29243939893223 - ], - "category_id": 1, - "image_id": 197388, - "score": 0.17261266781373394 - }, - { - "bbox": [ - 258.5633408567725, - 60.27068241963883, - 102.3686462123, - 151.42071713691902 - ], - "category_id": 1, - "image_id": 197388, - "score": 0.13677866226510016 - }, - { - "bbox": [ - 380.00719017305823, - 181.1782438214781, - 257.505490623621, - 199.13011090655024 - ], - "category_id": 1, - "image_id": 197388, - "score": 0.12246560363252844 - }, - { - "bbox": [ - 177.40899563109633, - 78.35446740631232, - 189.53651142957023, - 263.45315194093274 - ], - "category_id": 1, - "image_id": 197388, - "score": 0.1013108540546625 - }, - { - "bbox": [ - 0.7289829477709847, - 43.73276160140667, - 85.41587076323728, - 221.3344387113314 - ], - "category_id": 1, - "image_id": 197388, - "score": 0.09960434746646744 - }, - { - "bbox": [ - 461.84120081448543, - 144.75681027711394, - 7.162490813687327, - 8.531497919325176 - ], - "category_id": 1, - "image_id": 197388, - "score": 0.08173750340938568 - }, - { - "bbox": [ - 296.17189402683806, - 85.73360082440907, - 62.47594584815931, - 130.1418854933646 - ], - "category_id": 1, - "image_id": 197388, - "score": 0.0717465542448663 - }, - { - "bbox": [ - 539.1454728501081, - 43.14242476252679, - 100.3810332864756, - 247.18086755992118 - ], - "category_id": 1, - "image_id": 197388, - "score": 0.06011599272181979 - }, - { - "bbox": [ - 277.97115514687323, - 62.833796387748365, - 85.73469418408934, - 109.64015622069529 - ], - "category_id": 1, - "image_id": 197388, - "score": 0.0423359872651069 - }, - { - "bbox": [ - 462.1613388043361, - 146.12331612284657, - 4.619414527763752, - 5.653142729845399 - ], - "category_id": 1, - "image_id": 197388, - "score": 0.03960325857728385 - }, - { - "bbox": [ - 365.7412020686737, - 174.63881714430087, - 251.65152786857914, - 216.71453560361638 - ], - "category_id": 1, - "image_id": 197388, - "score": 0.03937998874316995 - }, - { - "bbox": [ - 3.4297732174796693, - 45.43705430480154, - 92.63472057783511, - 222.82923167372067 - ], - "category_id": 1, - "image_id": 197388, - "score": 0.033127322744961746 - }, - { - "bbox": [ - 169.87771310995316, - 89.66612191248007, - 182.26201179942262, - 244.24356591209786 - ], - "category_id": 1, - "image_id": 197388, - "score": 0.03232751908601077 - }, - { - "bbox": [ - 236.36941077406334, - 63.89780825602214, - 126.04036089393139, - 167.83640884370914 - ], - "category_id": 1, - "image_id": 197388, - "score": 0.026460597694444848 - }, - { - "bbox": [ - 306.015998970117, - 102.95796459236254, - 50.95681252313989, - 115.84925059311661 - ], - "category_id": 1, - "image_id": 197388, - "score": 0.02226386399182351 - }, - { - "bbox": [ - 537.318841521999, - 51.127194758764055, - 100.70779100270272, - 184.38821643554354 - ], - "category_id": 1, - "image_id": 197388, - "score": 0.021828400794543387 - }, - { - "bbox": [ - 462.4003780259345, - 145.2270003005055, - 5.570865375100425, - 6.968161205149954 - ], - "category_id": 1, - "image_id": 197388, - "score": 0.017564592704083917 - }, - { - "bbox": [ - 284.4247396061427, - 58.40109305610073, - 77.51981649355616, - 85.87582588813615 - ], - "category_id": 1, - "image_id": 197388, - "score": 0.015670991050973693 - }, - { - "bbox": [ - 381.11136505330313, - 182.22526492755827, - 252.6961926281694, - 195.18863447956443 - ], - "category_id": 1, - "image_id": 197388, - "score": 0.012290037721773745 - }, - { - "bbox": [ - 159.00697010469204, - 66.94814529991709, - 208.17784842532066, - 275.3418926190766 - ], - "category_id": 1, - "image_id": 197388, - "score": 0.010543055168754003 - }, - { - "bbox": [ - 0.0, - 41.78049849392192, - 88.22526407776418, - 228.8951048951705 - ], - "category_id": 1, - "image_id": 197388, - "score": 0.009550385293192926 - }, - { - "bbox": [ - 577.9447869595953, - 225.0889245399691, - 34.613561069282355, - 45.224848999211105 - ], - "category_id": 1, - "image_id": 197388, - "score": 0.009009368302155088 - }, - { - "bbox": [ - 461.84120081448543, - 144.75681027711394, - 7.162490813687327, - 8.531497919325176 - ], - "category_id": 1, - "image_id": 197388, - "score": 0.008478489359995936 - }, - { - "bbox": [ - 536.7620147243282, - 50.12388034294447, - 103.91798527567175, - 227.99503472686746 - ], - "category_id": 1, - "image_id": 197388, - "score": 0.0070238283037164315 - }, - { - "bbox": [ - 324.4889601722706, - 132.0053388533619, - 33.860410488241655, - 86.62326758044719 - ], - "category_id": 1, - "image_id": 197388, - "score": 0.006766568381450841 - }, - { - "bbox": [ - 246.15395215941302, - 55.57516986353281, - 114.57893265029415, - 151.51097731653135 - ], - "category_id": 1, - "image_id": 197388, - "score": 0.00619416668365814 - }, - { - "bbox": [ - 38.32789823729127, - 112.41407584232527, - 174.68030024685248, - 169.5690071995081 - ], - "category_id": 1, - "image_id": 40083, - "score": 0.9999903440475464 - }, - { - "bbox": [ - 273.75504650493133, - 127.03007800217645, - 13.119059034012025, - 66.89919582171933 - ], - "category_id": 1, - "image_id": 40083, - "score": 0.9987139701843262 - }, - { - "bbox": [ - 281.037309318129, - 138.89800552022552, - 115.77299430404673, - 161.8925392525125 - ], - "category_id": 1, - "image_id": 40083, - "score": 0.9967334429627354 - }, - { - "bbox": [ - 122.98736914581909, - 149.19548926043387, - 13.238023418245518, - 13.251921410601938 - ], - "category_id": 1, - "image_id": 40083, - "score": 0.7115740632536128 - }, - { - "bbox": [ - 134.73643174966296, - 136.1444006258907, - 11.484101688887165, - 24.515063595289917 - ], - "category_id": 1, - "image_id": 40083, - "score": 0.6175192526221182 - }, - { - "bbox": [ - 244.00963353440733, - 141.97232651644495, - 149.05240181123492, - 151.9715830001215 - ], - "category_id": 1, - "image_id": 40083, - "score": 0.4946145965118973 - }, - { - "bbox": [ - 275.164993708296, - 126.95531864312014, - 13.321305363409294, - 66.11390534184258 - ], - "category_id": 1, - "image_id": 40083, - "score": 0.4050845742423741 - }, - { - "bbox": [ - 42.96185669219733, - 122.34524983009223, - 160.1285645732864, - 161.9463250366397 - ], - "category_id": 1, - "image_id": 40083, - "score": 0.353162111215626 - }, - { - "bbox": [ - 119.6385577246031, - 155.7402521228216, - 13.35265116435049, - 26.52128467487711 - ], - "category_id": 1, - "image_id": 40083, - "score": 0.28122130800324224 - }, - { - "bbox": [ - 134.01278713702155, - 135.5395238881317, - 11.64567949798922, - 24.682523935864452 - ], - "category_id": 1, - "image_id": 40083, - "score": 0.19370334661431887 - }, - { - "bbox": [ - 124.09760300731958, - 148.1338264630807, - 11.235262772767982, - 13.52837293393398 - ], - "category_id": 1, - "image_id": 40083, - "score": 0.176868630098971 - }, - { - "bbox": [ - 218.7332213212989, - 140.0443329358783, - 180.4683469351732, - 156.8554518569021 - ], - "category_id": 1, - "image_id": 40083, - "score": 0.16822000522327524 - }, - { - "bbox": [ - 270.92053528959764, - 133.3265646431611, - 13.58464710826729, - 56.339971422777694 - ], - "category_id": 1, - "image_id": 40083, - "score": 0.1562438273124175 - }, - { - "bbox": [ - 37.809250550065954, - 105.79757078726388, - 182.54979468741817, - 184.99414098124603 - ], - "category_id": 1, - "image_id": 40083, - "score": 0.14206553007930756 - }, - { - "bbox": [ - 131.5670033941938, - 158.319905396887, - 9.554075877756475, - 21.518604078379468 - ], - "category_id": 1, - "image_id": 40083, - "score": 0.1142622835492838 - }, - { - "bbox": [ - 127.07848171294685, - 138.86839277431187, - 17.235128293754656, - 44.84156945207431 - ], - "category_id": 1, - "image_id": 40083, - "score": 0.09938282001938761 - }, - { - "bbox": [ - 275.15638186104223, - 133.5832174441871, - 10.20764095132887, - 60.2529082432996 - ], - "category_id": 1, - "image_id": 40083, - "score": 0.08779323860838567 - }, - { - "bbox": [ - 118.09746041875155, - 153.9768088492941, - 17.64612772931838, - 33.0168198306535 - ], - "category_id": 1, - "image_id": 40083, - "score": 0.08400380428176607 - }, - { - "bbox": [ - 129.65247011589898, - 146.21014275291188, - 9.816644995735373, - 16.98788352109895 - ], - "category_id": 1, - "image_id": 40083, - "score": 0.07980794934855787 - }, - { - "bbox": [ - 271.7621155363754, - 144.86674821981342, - 124.64715453387907, - 156.9482558015152 - ], - "category_id": 1, - "image_id": 40083, - "score": 0.07801336023989208 - }, - { - "bbox": [ - 122.31437055574987, - 149.80085696138593, - 14.266245774025762, - 12.463835012516398 - ], - "category_id": 1, - "image_id": 40083, - "score": 0.06346535355569785 - }, - { - "bbox": [ - 34.56564215631444, - 135.92815585957712, - 177.51220438385354, - 164.41951766953704 - ], - "category_id": 1, - "image_id": 40083, - "score": 0.0485074333765967 - }, - { - "bbox": [ - 136.7368415229119, - 137.89135149894196, - 9.122227037700043, - 22.213023488378155 - ], - "category_id": 1, - "image_id": 40083, - "score": 0.04772781404400169 - }, - { - "bbox": [ - 123.3235499944418, - 150.25321417348, - 15.765761854272228, - 36.16957895970921 - ], - "category_id": 1, - "image_id": 40083, - "score": 0.04220727754152085 - }, - { - "bbox": [ - 271.90779626938615, - 128.14539407135078, - 15.405080085072711, - 64.71005682344074 - ], - "category_id": 1, - "image_id": 40083, - "score": 0.04092462762153748 - }, - { - "bbox": [ - 114.0193235709124, - 155.5618252886575, - 9.112663847332854, - 14.913955482463706 - ], - "category_id": 1, - "image_id": 40083, - "score": 0.040561411233867466 - }, - { - "bbox": [ - 246.79480278830977, - 74.45452361185933, - 168.83467296399175, - 294.5553838783887 - ], - "category_id": 1, - "image_id": 196141, - "score": 0.9998471736907959 - }, - { - "bbox": [ - 449.91721482790945, - 204.96684769367067, - 185.0938399278399, - 209.68341364145596 - ], - "category_id": 1, - "image_id": 196141, - "score": 0.9993680119514465 - }, - { - "bbox": [ - 551.8933527530817, - 98.62668626165973, - 53.015730818431166, - 114.70768739332982 - ], - "category_id": 1, - "image_id": 196141, - "score": 0.9989681245939074 - }, - { - "bbox": [ - 36.629787184254866, - 68.37446568096026, - 33.14949933628988, - 95.8618173172063 - ], - "category_id": 1, - "image_id": 196141, - "score": 0.9987284541130066 - }, - { - "bbox": [ - 440.89995321368673, - 70.30641025016695, - 19.43814726089363, - 37.077964642141026 - ], - "category_id": 1, - "image_id": 196141, - "score": 0.9947758913040161 - }, - { - "bbox": [ - 601.8062068801571, - 88.95295148681318, - 16.128385553229577, - 24.398472250098138 - ], - "category_id": 1, - "image_id": 196141, - "score": 0.7787292817106939 - }, - { - "bbox": [ - 443.0809847626748, - 71.63759967713678, - 13.50749833723944, - 32.66811758890536 - ], - "category_id": 1, - "image_id": 196141, - "score": 0.4904795373325092 - }, - { - "bbox": [ - 396.569778686132, - 70.2787260371438, - 13.479104730026052, - 31.759617864735645 - ], - "category_id": 1, - "image_id": 196141, - "score": 0.4112498931182214 - }, - { - "bbox": [ - 38.70719296509935, - 70.61443452888409, - 28.17963315510066, - 92.31016180688292 - ], - "category_id": 1, - "image_id": 196141, - "score": 0.3796398182128506 - }, - { - "bbox": [ - 609.3142175988798, - 93.72376246104807, - 19.058191027280486, - 20.77005778794522 - ], - "category_id": 1, - "image_id": 196141, - "score": 0.370328633830097 - }, - { - "bbox": [ - 548.7095132625554, - 98.39472701114634, - 53.25156101474022, - 116.43788199987897 - ], - "category_id": 1, - "image_id": 196141, - "score": 0.33102923130101364 - }, - { - "bbox": [ - 455.5297663676009, - 206.88078209027378, - 175.70291860814734, - 199.34403654904446 - ], - "category_id": 1, - "image_id": 196141, - "score": 0.3069290034626759 - }, - { - "bbox": [ - 250.74661573104714, - 87.13280710904513, - 167.45142937734437, - 278.3106151544837 - ], - "category_id": 1, - "image_id": 196141, - "score": 0.30579873324356427 - }, - { - "bbox": [ - 440.7002672189753, - 69.17369758813695, - 14.444703091985616, - 37.00946842030504 - ], - "category_id": 1, - "image_id": 196141, - "score": 0.25331338842056605 - }, - { - "bbox": [ - 614.9353977385917, - 95.74403799582933, - 11.596245346674664, - 17.631981747095708 - ], - "category_id": 1, - "image_id": 196141, - "score": 0.22204102380904406 - }, - { - "bbox": [ - 400.60963922399134, - 70.43862641691737, - 8.331775245023891, - 35.000620170929324 - ], - "category_id": 1, - "image_id": 196141, - "score": 0.20590268390631786 - }, - { - "bbox": [ - 602.6848618804396, - 88.3983294514046, - 15.524266109773862, - 24.329680417924536 - ], - "category_id": 1, - "image_id": 196141, - "score": 0.1935096033322262 - }, - { - "bbox": [ - 453.62495235047044, - 80.93588476309868, - 8.634490931609093, - 24.416622635007826 - ], - "category_id": 1, - "image_id": 196141, - "score": 0.13682630359796108 - }, - { - "bbox": [ - 438.1383792082668, - 71.62832244418284, - 13.671594135308055, - 34.59094773941301 - ], - "category_id": 1, - "image_id": 196141, - "score": 0.12521365808926627 - }, - { - "bbox": [ - 37.07150693742372, - 71.09337416480857, - 29.051661261168164, - 90.74910484197981 - ], - "category_id": 1, - "image_id": 196141, - "score": 0.11572668958758377 - }, - { - "bbox": [ - 612.4694532238449, - 94.33977605307147, - 11.44235234183725, - 18.834863504196264 - ], - "category_id": 1, - "image_id": 196141, - "score": 0.1118136151149066 - }, - { - "bbox": [ - 601.3005939432458, - 93.44761682206529, - 12.158258551431686, - 21.16533746684057 - ], - "category_id": 1, - "image_id": 196141, - "score": 0.10474070969851616 - }, - { - "bbox": [ - 552.5681619230662, - 93.99774029686462, - 52.01820025716597, - 118.51885706193504 - ], - "category_id": 1, - "image_id": 196141, - "score": 0.10326196808658804 - }, - { - "bbox": [ - 398.5848517781443, - 73.06106969434823, - 9.784228227546066, - 31.1350301063286 - ], - "category_id": 1, - "image_id": 196141, - "score": 0.09513584625155845 - }, - { - "bbox": [ - 447.4145013754455, - 199.11669450357687, - 182.9378852593169, - 211.20266858232594 - ], - "category_id": 1, - "image_id": 196141, - "score": 0.09457972184460144 - }, - { - "bbox": [ - 242.46158239970538, - 71.50036639162563, - 171.43617162489392, - 297.42260463621386 - ], - "category_id": 1, - "image_id": 196141, - "score": 0.09176039055855717 - }, - { - "bbox": [ - 597.2197814264931, - 82.37761224901661, - 11.327105500584025, - 31.481263735129318 - ], - "category_id": 1, - "image_id": 196141, - "score": 0.08028100931968704 - }, - { - "bbox": [ - 599.0760153957814, - 81.53235136929479, - 7.865899180085421, - 9.27911853791521 - ], - "category_id": 1, - "image_id": 196141, - "score": 0.06306317158251058 - }, - { - "bbox": [ - 458.0528386594554, - 76.79036559159022, - 7.6005536116708186, - 25.915126727881812 - ], - "category_id": 1, - "image_id": 196141, - "score": 0.06281862376239655 - }, - { - "bbox": [ - 446.7096696323964, - 70.72615937722122, - 12.841618701895356, - 34.64495922754935 - ], - "category_id": 1, - "image_id": 196141, - "score": 0.061957712774678333 - }, - { - "bbox": [ - 435.5707540307205, - 72.6766990179972, - 9.948115403515544, - 29.835360002866068 - ], - "category_id": 1, - "image_id": 196141, - "score": 0.05090554307604889 - }, - { - "bbox": [ - 395.9134672120448, - 68.37234648135498, - 13.313090353344592, - 35.21000811416911 - ], - "category_id": 1, - "image_id": 196141, - "score": 0.048676813090792935 - }, - { - "bbox": [ - 441.55283109201787, - 70.93636919677598, - 12.61247065074889, - 34.04032271350583 - ], - "category_id": 1, - "image_id": 196141, - "score": 0.041175731433019114 - } -] +[ + { + "bbox": [ + 277.1183158543966, + 45.699667786163765, + 225.09126579259754, + 333.5602652943344 + ], + "category_id": 1, + "image_id": 785, + "score": 0.9999731779098511 + }, + { + "bbox": [ + 281.950178384611, + 44.56940615106412, + 212.94084624881856, + 344.98328732330305 + ], + "category_id": 1, + "image_id": 785, + "score": 0.30122078732076535 + }, + { + "bbox": [ + 268.01163251716935, + 43.98534000198524, + 238.46561540311666, + 341.79494090239166 + ], + "category_id": 1, + "image_id": 785, + "score": 0.09537058952055945 + }, + { + "bbox": [ + 286.24685022227766, + 41.757854101745124, + 223.83092714841916, + 338.2323329803221 + ], + "category_id": 1, + "image_id": 785, + "score": 0.02974060317622316 + }, + { + "bbox": [ + 262.7942371596824, + 63.5024099030928, + 3.164080328447767, + 4.2931809049024 + ], + "category_id": 1, + "image_id": 785, + "score": 0.01697496324777603 + }, + { + "bbox": [ + 460.79934160584526, + 54.24632570186816, + 3.1264258976875112, + 5.30507188737684 + ], + "category_id": 1, + "image_id": 785, + "score": 0.011266417550507977 + }, + { + "bbox": [ + 457.74867915702885, + 54.642754761043186, + 3.1463156275978577, + 5.30487109975607 + ], + "category_id": 1, + "image_id": 785, + "score": 0.009877337450527405 + }, + { + "bbox": [ + 283.6326909128262, + 48.41948428440242, + 208.11973684568892, + 329.94523003138954 + ], + "category_id": 1, + "image_id": 785, + "score": 0.009197559746208601 + }, + { + "bbox": [ + 207.3711401479468, + 63.36160650309581, + 2.93447433643874, + 3.468569626452343 + ], + "category_id": 1, + "image_id": 785, + "score": 0.008295997977256775 + }, + { + "bbox": [ + 458.51562228937183, + 59.46703918462182, + 3.272054625157523, + 4.619048555254508 + ], + "category_id": 1, + "image_id": 785, + "score": 0.008173274752520696 + }, + { + "bbox": [ + 461.08150984219986, + 58.545150021384245, + 3.249185872840485, + 5.844152786677249 + ], + "category_id": 1, + "image_id": 785, + "score": 0.007174033771332924 + }, + { + "bbox": [ + 259.83498140597413, + 62.3517572900752, + 2.9195241669668235, + 4.559862560086913 + ], + "category_id": 1, + "image_id": 785, + "score": 0.006377489306032658 + }, + { + "bbox": [ + 206.80460173580252, + 62.5220090883142, + 3.1584765729102457, + 3.520867237953432 + ], + "category_id": 1, + "image_id": 785, + "score": 0.005891890564944476 + }, + { + "bbox": [ + 459.5511247244534, + 54.89593493663015, + 3.230180209185619, + 5.595806307593442 + ], + "category_id": 1, + "image_id": 785, + "score": 0.005863019167811413 + }, + { + "bbox": [ + 457.2902794671802, + 58.740074277713674, + 3.316325358758718, + 5.415639229745793 + ], + "category_id": 1, + "image_id": 785, + "score": 0.005827399869551478 + }, + { + "bbox": [ + 262.6182415084011, + 62.83701378140133, + 3.0697625867510396, + 4.148177980683975 + ], + "category_id": 1, + "image_id": 785, + "score": 0.005008531179775657 + }, + { + "bbox": [ + 209.95621769919438, + 63.58898404912936, + 3.097942773760309, + 3.9870186328652224 + ], + "category_id": 1, + "image_id": 785, + "score": 0.004536413883644729 + }, + { + "bbox": [ + 459.25342388420654, + 59.04022778823142, + 3.6918324658356596, + 6.2054702421954175 + ], + "category_id": 1, + "image_id": 785, + "score": 0.00384555541357817 + }, + { + "bbox": [ + 208.42983867925258, + 62.66620641784881, + 2.939843970544956, + 3.5128275773914908 + ], + "category_id": 1, + "image_id": 785, + "score": 0.003631657359987463 + }, + { + "bbox": [ + 213.41976294267863, + 62.71431286477077, + 2.528260915549936, + 3.4008991982205927 + ], + "category_id": 1, + "image_id": 785, + "score": 0.0033746918197721243 + }, + { + "bbox": [ + 161.97753405615518, + 61.730313756833205, + 2.8917805026908923, + 4.075206275914702 + ], + "category_id": 1, + "image_id": 785, + "score": 0.003240120830014348 + }, + { + "bbox": [ + 457.5244691894709, + 54.70691525725411, + 6.2095088496953394, + 8.39989354390223 + ], + "category_id": 1, + "image_id": 785, + "score": 0.0028898494491729535 + }, + { + "bbox": [ + 376.9178826443722, + 172.73052709081233, + 261.25961331942824, + 215.58502374291808 + ], + "category_id": 1, + "image_id": 197388, + "score": 0.9999579191207886 + }, + { + "bbox": [ + 163.9687616410633, + 80.41943032016765, + 200.19976794356094, + 259.2492676442412 + ], + "category_id": 1, + "image_id": 197388, + "score": 0.9999035596847534 + }, + { + "bbox": [ + 1.218278714743892, + 47.45300387559155, + 90.54113395922819, + 220.98988830655202 + ], + "category_id": 1, + "image_id": 197388, + "score": 0.9998950958251953 + }, + { + "bbox": [ + 542.055600304138, + 50.78951110214531, + 97.65374183236963, + 187.04227881069528 + ], + "category_id": 1, + "image_id": 197388, + "score": 0.9867184565824798 + }, + { + "bbox": [ + 281.8670596900398, + 58.53450299402189, + 82.11294655596839, + 86.20744367046282 + ], + "category_id": 1, + "image_id": 197388, + "score": 0.9736837699533164 + }, + { + "bbox": [ + 279.94252362290945, + 59.89339467038772, + 81.61478084086349, + 147.45283612214442 + ], + "category_id": 1, + "image_id": 197388, + "score": 0.5819535544584765 + }, + { + "bbox": [ + 535.4019505240893, + 48.1844256878009, + 105.27804947591062, + 239.31002317693435 + ], + "category_id": 1, + "image_id": 197388, + "score": 0.4461107432274131 + }, + { + "bbox": [ + 168.57347257788564, + 103.56636286623898, + 188.67170536354314, + 230.37891238088162 + ], + "category_id": 1, + "image_id": 197388, + "score": 0.3492993107937081 + }, + { + "bbox": [ + 372.0082417618134, + 163.99891619439003, + 236.90653900133447, + 224.81380141719242 + ], + "category_id": 1, + "image_id": 197388, + "score": 0.32743142104478484 + }, + { + "bbox": [ + 1.388905257619702, + 35.86500152126901, + 87.67960208998994, + 220.4727970838673 + ], + "category_id": 1, + "image_id": 197388, + "score": 0.31936580857404523 + }, + { + "bbox": [ + 283.65021434011885, + 57.518455359834334, + 81.08575097216988, + 85.11418577738398 + ], + "category_id": 1, + "image_id": 197388, + "score": 0.1897958763078807 + }, + { + "bbox": [ + 543.1779979060689, + 37.87532382036906, + 94.66280745251572, + 191.29243939893223 + ], + "category_id": 1, + "image_id": 197388, + "score": 0.17261266781373394 + }, + { + "bbox": [ + 258.5633408567725, + 60.27068241963883, + 102.3686462123, + 151.42071713691902 + ], + "category_id": 1, + "image_id": 197388, + "score": 0.13677866226510016 + }, + { + "bbox": [ + 380.00719017305823, + 181.1782438214781, + 257.505490623621, + 199.13011090655024 + ], + "category_id": 1, + "image_id": 197388, + "score": 0.12246560363252844 + }, + { + "bbox": [ + 177.40899563109633, + 78.35446740631232, + 189.53651142957023, + 263.45315194093274 + ], + "category_id": 1, + "image_id": 197388, + "score": 0.1013108540546625 + }, + { + "bbox": [ + 0.7289829477709847, + 43.73276160140667, + 85.41587076323728, + 221.3344387113314 + ], + "category_id": 1, + "image_id": 197388, + "score": 0.09960434746646744 + }, + { + "bbox": [ + 461.84120081448543, + 144.75681027711394, + 7.162490813687327, + 8.531497919325176 + ], + "category_id": 1, + "image_id": 197388, + "score": 0.08173750340938568 + }, + { + "bbox": [ + 296.17189402683806, + 85.73360082440907, + 62.47594584815931, + 130.1418854933646 + ], + "category_id": 1, + "image_id": 197388, + "score": 0.0717465542448663 + }, + { + "bbox": [ + 539.1454728501081, + 43.14242476252679, + 100.3810332864756, + 247.18086755992118 + ], + "category_id": 1, + "image_id": 197388, + "score": 0.06011599272181979 + }, + { + "bbox": [ + 277.97115514687323, + 62.833796387748365, + 85.73469418408934, + 109.64015622069529 + ], + "category_id": 1, + "image_id": 197388, + "score": 0.0423359872651069 + }, + { + "bbox": [ + 462.1613388043361, + 146.12331612284657, + 4.619414527763752, + 5.653142729845399 + ], + "category_id": 1, + "image_id": 197388, + "score": 0.03960325857728385 + }, + { + "bbox": [ + 365.7412020686737, + 174.63881714430087, + 251.65152786857914, + 216.71453560361638 + ], + "category_id": 1, + "image_id": 197388, + "score": 0.03937998874316995 + }, + { + "bbox": [ + 3.4297732174796693, + 45.43705430480154, + 92.63472057783511, + 222.82923167372067 + ], + "category_id": 1, + "image_id": 197388, + "score": 0.033127322744961746 + }, + { + "bbox": [ + 169.87771310995316, + 89.66612191248007, + 182.26201179942262, + 244.24356591209786 + ], + "category_id": 1, + "image_id": 197388, + "score": 0.03232751908601077 + }, + { + "bbox": [ + 236.36941077406334, + 63.89780825602214, + 126.04036089393139, + 167.83640884370914 + ], + "category_id": 1, + "image_id": 197388, + "score": 0.026460597694444848 + }, + { + "bbox": [ + 306.015998970117, + 102.95796459236254, + 50.95681252313989, + 115.84925059311661 + ], + "category_id": 1, + "image_id": 197388, + "score": 0.02226386399182351 + }, + { + "bbox": [ + 537.318841521999, + 51.127194758764055, + 100.70779100270272, + 184.38821643554354 + ], + "category_id": 1, + "image_id": 197388, + "score": 0.021828400794543387 + }, + { + "bbox": [ + 462.4003780259345, + 145.2270003005055, + 5.570865375100425, + 6.968161205149954 + ], + "category_id": 1, + "image_id": 197388, + "score": 0.017564592704083917 + }, + { + "bbox": [ + 284.4247396061427, + 58.40109305610073, + 77.51981649355616, + 85.87582588813615 + ], + "category_id": 1, + "image_id": 197388, + "score": 0.015670991050973693 + }, + { + "bbox": [ + 381.11136505330313, + 182.22526492755827, + 252.6961926281694, + 195.18863447956443 + ], + "category_id": 1, + "image_id": 197388, + "score": 0.012290037721773745 + }, + { + "bbox": [ + 159.00697010469204, + 66.94814529991709, + 208.17784842532066, + 275.3418926190766 + ], + "category_id": 1, + "image_id": 197388, + "score": 0.010543055168754003 + }, + { + "bbox": [ + 0.0, + 41.78049849392192, + 88.22526407776418, + 228.8951048951705 + ], + "category_id": 1, + "image_id": 197388, + "score": 0.009550385293192926 + }, + { + "bbox": [ + 577.9447869595953, + 225.0889245399691, + 34.613561069282355, + 45.224848999211105 + ], + "category_id": 1, + "image_id": 197388, + "score": 0.009009368302155088 + }, + { + "bbox": [ + 461.84120081448543, + 144.75681027711394, + 7.162490813687327, + 8.531497919325176 + ], + "category_id": 1, + "image_id": 197388, + "score": 0.008478489359995936 + }, + { + "bbox": [ + 536.7620147243282, + 50.12388034294447, + 103.91798527567175, + 227.99503472686746 + ], + "category_id": 1, + "image_id": 197388, + "score": 0.0070238283037164315 + }, + { + "bbox": [ + 324.4889601722706, + 132.0053388533619, + 33.860410488241655, + 86.62326758044719 + ], + "category_id": 1, + "image_id": 197388, + "score": 0.006766568381450841 + }, + { + "bbox": [ + 246.15395215941302, + 55.57516986353281, + 114.57893265029415, + 151.51097731653135 + ], + "category_id": 1, + "image_id": 197388, + "score": 0.00619416668365814 + }, + { + "bbox": [ + 38.32789823729127, + 112.41407584232527, + 174.68030024685248, + 169.5690071995081 + ], + "category_id": 1, + "image_id": 40083, + "score": 0.9999903440475464 + }, + { + "bbox": [ + 273.75504650493133, + 127.03007800217645, + 13.119059034012025, + 66.89919582171933 + ], + "category_id": 1, + "image_id": 40083, + "score": 0.9987139701843262 + }, + { + "bbox": [ + 281.037309318129, + 138.89800552022552, + 115.77299430404673, + 161.8925392525125 + ], + "category_id": 1, + "image_id": 40083, + "score": 0.9967334429627354 + }, + { + "bbox": [ + 122.98736914581909, + 149.19548926043387, + 13.238023418245518, + 13.251921410601938 + ], + "category_id": 1, + "image_id": 40083, + "score": 0.7115740632536128 + }, + { + "bbox": [ + 134.73643174966296, + 136.1444006258907, + 11.484101688887165, + 24.515063595289917 + ], + "category_id": 1, + "image_id": 40083, + "score": 0.6175192526221182 + }, + { + "bbox": [ + 244.00963353440733, + 141.97232651644495, + 149.05240181123492, + 151.9715830001215 + ], + "category_id": 1, + "image_id": 40083, + "score": 0.4946145965118973 + }, + { + "bbox": [ + 275.164993708296, + 126.95531864312014, + 13.321305363409294, + 66.11390534184258 + ], + "category_id": 1, + "image_id": 40083, + "score": 0.4050845742423741 + }, + { + "bbox": [ + 42.96185669219733, + 122.34524983009223, + 160.1285645732864, + 161.9463250366397 + ], + "category_id": 1, + "image_id": 40083, + "score": 0.353162111215626 + }, + { + "bbox": [ + 119.6385577246031, + 155.7402521228216, + 13.35265116435049, + 26.52128467487711 + ], + "category_id": 1, + "image_id": 40083, + "score": 0.28122130800324224 + }, + { + "bbox": [ + 134.01278713702155, + 135.5395238881317, + 11.64567949798922, + 24.682523935864452 + ], + "category_id": 1, + "image_id": 40083, + "score": 0.19370334661431887 + }, + { + "bbox": [ + 124.09760300731958, + 148.1338264630807, + 11.235262772767982, + 13.52837293393398 + ], + "category_id": 1, + "image_id": 40083, + "score": 0.176868630098971 + }, + { + "bbox": [ + 218.7332213212989, + 140.0443329358783, + 180.4683469351732, + 156.8554518569021 + ], + "category_id": 1, + "image_id": 40083, + "score": 0.16822000522327524 + }, + { + "bbox": [ + 270.92053528959764, + 133.3265646431611, + 13.58464710826729, + 56.339971422777694 + ], + "category_id": 1, + "image_id": 40083, + "score": 0.1562438273124175 + }, + { + "bbox": [ + 37.809250550065954, + 105.79757078726388, + 182.54979468741817, + 184.99414098124603 + ], + "category_id": 1, + "image_id": 40083, + "score": 0.14206553007930756 + }, + { + "bbox": [ + 131.5670033941938, + 158.319905396887, + 9.554075877756475, + 21.518604078379468 + ], + "category_id": 1, + "image_id": 40083, + "score": 0.1142622835492838 + }, + { + "bbox": [ + 127.07848171294685, + 138.86839277431187, + 17.235128293754656, + 44.84156945207431 + ], + "category_id": 1, + "image_id": 40083, + "score": 0.09938282001938761 + }, + { + "bbox": [ + 275.15638186104223, + 133.5832174441871, + 10.20764095132887, + 60.2529082432996 + ], + "category_id": 1, + "image_id": 40083, + "score": 0.08779323860838567 + }, + { + "bbox": [ + 118.09746041875155, + 153.9768088492941, + 17.64612772931838, + 33.0168198306535 + ], + "category_id": 1, + "image_id": 40083, + "score": 0.08400380428176607 + }, + { + "bbox": [ + 129.65247011589898, + 146.21014275291188, + 9.816644995735373, + 16.98788352109895 + ], + "category_id": 1, + "image_id": 40083, + "score": 0.07980794934855787 + }, + { + "bbox": [ + 271.7621155363754, + 144.86674821981342, + 124.64715453387907, + 156.9482558015152 + ], + "category_id": 1, + "image_id": 40083, + "score": 0.07801336023989208 + }, + { + "bbox": [ + 122.31437055574987, + 149.80085696138593, + 14.266245774025762, + 12.463835012516398 + ], + "category_id": 1, + "image_id": 40083, + "score": 0.06346535355569785 + }, + { + "bbox": [ + 34.56564215631444, + 135.92815585957712, + 177.51220438385354, + 164.41951766953704 + ], + "category_id": 1, + "image_id": 40083, + "score": 0.0485074333765967 + }, + { + "bbox": [ + 136.7368415229119, + 137.89135149894196, + 9.122227037700043, + 22.213023488378155 + ], + "category_id": 1, + "image_id": 40083, + "score": 0.04772781404400169 + }, + { + "bbox": [ + 123.3235499944418, + 150.25321417348, + 15.765761854272228, + 36.16957895970921 + ], + "category_id": 1, + "image_id": 40083, + "score": 0.04220727754152085 + }, + { + "bbox": [ + 271.90779626938615, + 128.14539407135078, + 15.405080085072711, + 64.71005682344074 + ], + "category_id": 1, + "image_id": 40083, + "score": 0.04092462762153748 + }, + { + "bbox": [ + 114.0193235709124, + 155.5618252886575, + 9.112663847332854, + 14.913955482463706 + ], + "category_id": 1, + "image_id": 40083, + "score": 0.040561411233867466 + }, + { + "bbox": [ + 246.79480278830977, + 74.45452361185933, + 168.83467296399175, + 294.5553838783887 + ], + "category_id": 1, + "image_id": 196141, + "score": 0.9998471736907959 + }, + { + "bbox": [ + 449.91721482790945, + 204.96684769367067, + 185.0938399278399, + 209.68341364145596 + ], + "category_id": 1, + "image_id": 196141, + "score": 0.9993680119514465 + }, + { + "bbox": [ + 551.8933527530817, + 98.62668626165973, + 53.015730818431166, + 114.70768739332982 + ], + "category_id": 1, + "image_id": 196141, + "score": 0.9989681245939074 + }, + { + "bbox": [ + 36.629787184254866, + 68.37446568096026, + 33.14949933628988, + 95.8618173172063 + ], + "category_id": 1, + "image_id": 196141, + "score": 0.9987284541130066 + }, + { + "bbox": [ + 440.89995321368673, + 70.30641025016695, + 19.43814726089363, + 37.077964642141026 + ], + "category_id": 1, + "image_id": 196141, + "score": 0.9947758913040161 + }, + { + "bbox": [ + 601.8062068801571, + 88.95295148681318, + 16.128385553229577, + 24.398472250098138 + ], + "category_id": 1, + "image_id": 196141, + "score": 0.7787292817106939 + }, + { + "bbox": [ + 443.0809847626748, + 71.63759967713678, + 13.50749833723944, + 32.66811758890536 + ], + "category_id": 1, + "image_id": 196141, + "score": 0.4904795373325092 + }, + { + "bbox": [ + 396.569778686132, + 70.2787260371438, + 13.479104730026052, + 31.759617864735645 + ], + "category_id": 1, + "image_id": 196141, + "score": 0.4112498931182214 + }, + { + "bbox": [ + 38.70719296509935, + 70.61443452888409, + 28.17963315510066, + 92.31016180688292 + ], + "category_id": 1, + "image_id": 196141, + "score": 0.3796398182128506 + }, + { + "bbox": [ + 609.3142175988798, + 93.72376246104807, + 19.058191027280486, + 20.77005778794522 + ], + "category_id": 1, + "image_id": 196141, + "score": 0.370328633830097 + }, + { + "bbox": [ + 548.7095132625554, + 98.39472701114634, + 53.25156101474022, + 116.43788199987897 + ], + "category_id": 1, + "image_id": 196141, + "score": 0.33102923130101364 + }, + { + "bbox": [ + 455.5297663676009, + 206.88078209027378, + 175.70291860814734, + 199.34403654904446 + ], + "category_id": 1, + "image_id": 196141, + "score": 0.3069290034626759 + }, + { + "bbox": [ + 250.74661573104714, + 87.13280710904513, + 167.45142937734437, + 278.3106151544837 + ], + "category_id": 1, + "image_id": 196141, + "score": 0.30579873324356427 + }, + { + "bbox": [ + 440.7002672189753, + 69.17369758813695, + 14.444703091985616, + 37.00946842030504 + ], + "category_id": 1, + "image_id": 196141, + "score": 0.25331338842056605 + }, + { + "bbox": [ + 614.9353977385917, + 95.74403799582933, + 11.596245346674664, + 17.631981747095708 + ], + "category_id": 1, + "image_id": 196141, + "score": 0.22204102380904406 + }, + { + "bbox": [ + 400.60963922399134, + 70.43862641691737, + 8.331775245023891, + 35.000620170929324 + ], + "category_id": 1, + "image_id": 196141, + "score": 0.20590268390631786 + }, + { + "bbox": [ + 602.6848618804396, + 88.3983294514046, + 15.524266109773862, + 24.329680417924536 + ], + "category_id": 1, + "image_id": 196141, + "score": 0.1935096033322262 + }, + { + "bbox": [ + 453.62495235047044, + 80.93588476309868, + 8.634490931609093, + 24.416622635007826 + ], + "category_id": 1, + "image_id": 196141, + "score": 0.13682630359796108 + }, + { + "bbox": [ + 438.1383792082668, + 71.62832244418284, + 13.671594135308055, + 34.59094773941301 + ], + "category_id": 1, + "image_id": 196141, + "score": 0.12521365808926627 + }, + { + "bbox": [ + 37.07150693742372, + 71.09337416480857, + 29.051661261168164, + 90.74910484197981 + ], + "category_id": 1, + "image_id": 196141, + "score": 0.11572668958758377 + }, + { + "bbox": [ + 612.4694532238449, + 94.33977605307147, + 11.44235234183725, + 18.834863504196264 + ], + "category_id": 1, + "image_id": 196141, + "score": 0.1118136151149066 + }, + { + "bbox": [ + 601.3005939432458, + 93.44761682206529, + 12.158258551431686, + 21.16533746684057 + ], + "category_id": 1, + "image_id": 196141, + "score": 0.10474070969851616 + }, + { + "bbox": [ + 552.5681619230662, + 93.99774029686462, + 52.01820025716597, + 118.51885706193504 + ], + "category_id": 1, + "image_id": 196141, + "score": 0.10326196808658804 + }, + { + "bbox": [ + 398.5848517781443, + 73.06106969434823, + 9.784228227546066, + 31.1350301063286 + ], + "category_id": 1, + "image_id": 196141, + "score": 0.09513584625155845 + }, + { + "bbox": [ + 447.4145013754455, + 199.11669450357687, + 182.9378852593169, + 211.20266858232594 + ], + "category_id": 1, + "image_id": 196141, + "score": 0.09457972184460144 + }, + { + "bbox": [ + 242.46158239970538, + 71.50036639162563, + 171.43617162489392, + 297.42260463621386 + ], + "category_id": 1, + "image_id": 196141, + "score": 0.09176039055855717 + }, + { + "bbox": [ + 597.2197814264931, + 82.37761224901661, + 11.327105500584025, + 31.481263735129318 + ], + "category_id": 1, + "image_id": 196141, + "score": 0.08028100931968704 + }, + { + "bbox": [ + 599.0760153957814, + 81.53235136929479, + 7.865899180085421, + 9.27911853791521 + ], + "category_id": 1, + "image_id": 196141, + "score": 0.06306317158251058 + }, + { + "bbox": [ + 458.0528386594554, + 76.79036559159022, + 7.6005536116708186, + 25.915126727881812 + ], + "category_id": 1, + "image_id": 196141, + "score": 0.06281862376239655 + }, + { + "bbox": [ + 446.7096696323964, + 70.72615937722122, + 12.841618701895356, + 34.64495922754935 + ], + "category_id": 1, + "image_id": 196141, + "score": 0.061957712774678333 + }, + { + "bbox": [ + 435.5707540307205, + 72.6766990179972, + 9.948115403515544, + 29.835360002866068 + ], + "category_id": 1, + "image_id": 196141, + "score": 0.05090554307604889 + }, + { + "bbox": [ + 395.9134672120448, + 68.37234648135498, + 13.313090353344592, + 35.21000811416911 + ], + "category_id": 1, + "image_id": 196141, + "score": 0.048676813090792935 + }, + { + "bbox": [ + 441.55283109201787, + 70.93636919677598, + 12.61247065074889, + 34.04032271350583 + ], + "category_id": 1, + "image_id": 196141, + "score": 0.041175731433019114 + } +] diff --git a/tests/data/coco/test_coco_wholebody.json b/tests/data/coco/test_coco_wholebody.json index 94d49664d4..155bafa045 100644 --- a/tests/data/coco/test_coco_wholebody.json +++ b/tests/data/coco/test_coco_wholebody.json @@ -1,7759 +1,7759 @@ -{ - "info": { - "description": "COCO-WholeBody sample", - "url": "https://github.com/jin-s13/COCO-WholeBody", - "version": "1.0", - "year": "2020", - "date_created": "2020/09/18" - }, - "licenses": [ - { - "url": "http://creativecommons.org/licenses/by-nc-sa/2.0/", - "id": 1, - "name": "Attribution-NonCommercial-ShareAlike License" - }, - { - "url": "http://creativecommons.org/licenses/by-nc/2.0/", - "id": 2, - "name": "Attribution-NonCommercial License" - }, - { - "url": "http://creativecommons.org/licenses/by-nc-nd/2.0/", - "id": 3, - "name": "Attribution-NonCommercial-NoDerivs License" - }, - { - "url": "http://creativecommons.org/licenses/by/2.0/", - "id": 4, - "name": "Attribution License" - }, - { - "url": "http://creativecommons.org/licenses/by-sa/2.0/", - "id": 5, - "name": "Attribution-ShareAlike License" - }, - { - "url": "http://creativecommons.org/licenses/by-nd/2.0/", - "id": 6, - "name": "Attribution-NoDerivs License" - }, - { - "url": "http://flickr.com/commons/usage/", - "id": 7, - "name": "No known copyright restrictions" - }, - { - "url": "http://www.usa.gov/copyright.shtml", - "id": 8, - "name": "United States Government Work" - } - ], - "categories": [ - { - "supercategory": "person", - "id": 1, - "name": "person", - "keypoints": [ - "nose", - "left_eye", - "right_eye", - "left_ear", - "right_ear", - "left_shoulder", - "right_shoulder", - "left_elbow", - "right_elbow", - "left_wrist", - "right_wrist", - "left_hip", - "right_hip", - "left_knee", - "right_knee", - "left_ankle", - "right_ankle" - ], - "skeleton": [ - [ - 16, - 14 - ], - [ - 14, - 12 - ], - [ - 17, - 15 - ], - [ - 15, - 13 - ], - [ - 12, - 13 - ], - [ - 6, - 12 - ], - [ - 7, - 13 - ], - [ - 6, - 7 - ], - [ - 6, - 8 - ], - [ - 7, - 9 - ], - [ - 8, - 10 - ], - [ - 9, - 11 - ], - [ - 2, - 3 - ], - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 4 - ], - [ - 3, - 5 - ], - [ - 4, - 6 - ], - [ - 5, - 7 - ] - ] - } - ], - "images": [ - { - "license": 4, - "file_name": "000000000785.jpg", - "coco_url": "http://images.cocodataset.org/val2017/000000000785.jpg", - "height": 425, - "width": 640, - "date_captured": "2013-11-19 21:22:42", - "flickr_url": "http://farm8.staticflickr.com/7015/6795644157_f019453ae7_z.jpg", - "id": 785 - }, - { - "license": 3, - "file_name": "000000040083.jpg", - "coco_url": "http://images.cocodataset.org/val2017/000000040083.jpg", - "height": 333, - "width": 500, - "date_captured": "2013-11-18 03:30:24", - "flickr_url": "http://farm1.staticflickr.com/116/254881838_e21c6d17b8_z.jpg", - "id": 40083 - }, - { - "license": 1, - "file_name": "000000196141.jpg", - "coco_url": "http://images.cocodataset.org/val2017/000000196141.jpg", - "height": 429, - "width": 640, - "date_captured": "2013-11-22 22:37:15", - "flickr_url": "http://farm4.staticflickr.com/3310/3611902235_57d4ae496d_z.jpg", - "id": 196141 - }, - { - "license": 3, - "file_name": "000000197388.jpg", - "coco_url": "http://images.cocodataset.org/val2017/000000197388.jpg", - "height": 392, - "width": 640, - "date_captured": "2013-11-19 20:10:37", - "flickr_url": "http://farm9.staticflickr.com/8375/8507321836_5b8b13188f_z.jpg", - "id": 197388 - } - ], - "annotations": [ - { - "segmentation": [ - [ - 353.37, - 67.65, - 358.15, - 52.37, - 362.92, - 47.59, - 374.38, - 44.73, - 389.66, - 52.37, - 389.66, - 67.65, - 389.66, - 76.25, - 393.48, - 83.89, - 396.35, - 88.66, - 397.3, - 91.53, - 406.85, - 99.17, - 413.54, - 104.9, - 451.74, - 148.83, - 458.43, - 153.6, - 462.25, - 166.02, - 467.02, - 173.66, - 463.2, - 181.3, - 449.83, - 183.21, - 448.88, - 191.81, - 455.56, - 226.19, - 448.88, - 254.84, - 453.65, - 286.36, - 475.62, - 323.6, - 491.85, - 361.81, - 494.72, - 382.82, - 494.72, - 382.82, - 499.49, - 391.41, - 416.4, - 391.41, - 424.04, - 383.77, - 439.33, - 374.22, - 445.06, - 360.85, - 436.46, - 334.11, - 421.18, - 303.55, - 416.4, - 289.22, - 409.72, - 268.21, - 396.35, - 280.63, - 405.9, - 298.77, - 417.36, - 324.56, - 425, - 349.39, - 425, - 357.99, - 419.27, - 360.85, - 394.44, - 367.54, - 362.92, - 370.4, - 346.69, - 367.54, - 360.06, - 362.76, - 369.61, - 360.85, - 382.98, - 340.8, - 355.28, - 271.08, - 360.06, - 266.3, - 386.8, - 219.5, - 368.65, - 162.2, - 348.6, - 175.57, - 309.44, - 187.03, - 301.8, - 192.76, - 288.43, - 193.72, - 282.7, - 193.72, - 280.79, - 187.03, - 280.79, - 174.62, - 287.47, - 171.75, - 291.29, - 171.75, - 295.11, - 171.75, - 306.57, - 166.98, - 312.3, - 165.07, - 345.73, - 142.14, - 350.51, - 117.31, - 350.51, - 102.03, - 350.51, - 90.57, - 353.37, - 65.74 - ] - ], - "num_keypoints": 17, - "area": 27789.11055, - "iscrowd": 0, - "keypoints": [ - 367, - 81, - 2, - 374, - 73, - 2, - 360, - 75, - 2, - 386, - 78, - 2, - 356, - 81, - 2, - 399, - 108, - 2, - 358, - 129, - 2, - 433, - 142, - 2, - 341, - 159, - 2, - 449, - 165, - 2, - 309, - 178, - 2, - 424, - 203, - 2, - 393, - 214, - 2, - 429, - 294, - 2, - 367, - 273, - 2, - 466, - 362, - 2, - 396, - 341, - 2 - ], - "image_id": 785, - "bbox": [ - 280.79, - 44.73, - 218.7, - 346.68 - ], - "category_id": 1, - "id": 442619, - "face_box": [ - 358.2, - 69.86, - 26.360000000000014, - 25.849999999999994 - ], - "lefthand_box": [ - 0.0, - 0.0, - 0.0, - 0.0 - ], - "righthand_box": [ - 280.43, - 173.12, - 27.860000000000014, - 24.849999999999994 - ], - "lefthand_kpts": [ - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0 - ], - "righthand_kpts": [ - 304.10366, - 181.75134, - 1, - 300.70183, - 182.77567, - 1, - 297.3, - 183.8, - 1, - 294.7, - 186.5, - 1, - 290.1, - 187.8, - 1, - 290.9, - 176.6, - 1, - 287.5, - 176.0, - 1, - 285.5, - 178.4, - 1, - 286.4, - 182.4, - 1, - 288.8, - 179.4, - 1, - 285.0, - 181.0, - 1, - 287.3, - 186.1, - 1, - 291.8, - 189.5, - 1, - 287.7, - 182.7, - 1, - 283.8, - 184.1, - 1, - 286.5, - 189.1, - 1, - 290.0, - 192.0, - 1, - 286.7, - 185.3, - 1, - 282.8, - 187.4, - 1, - 284.8, - 191.6, - 1, - 288.4, - 194.5, - 1 - ], - "face_kpts": [ - 355.823, - 75.36, - 1.0, - 356.354, - 79.0837, - 1.0, - 357.244, - 82.7374, - 1.0, - 358.518, - 86.2722, - 1.0, - 360.146, - 89.6578, - 1.0, - 362.266, - 92.7538, - 1.0, - 365.004, - 95.3223, - 1.0, - 368.487, - 96.6454, - 1.0, - 372.191, - 96.1419, - 1.0, - 375.644, - 94.6832, - 1.0, - 378.601, - 92.3665, - 1.0, - 381.101, - 89.5662, - 1.0, - 382.903, - 86.2741, - 1.0, - 383.896, - 82.6509, - 1.0, - 384.075, - 78.9011, - 1.0, - 384.1, - 75.1408, - 1.0, - 383.903, - 71.3861, - 1.0, - 357.084, - 72.9743, - 1.0, - 358.602, - 71.7848, - 1.0, - 360.42, - 71.3443, - 1.0, - 362.377, - 71.1566, - 1.0, - 364.36, - 71.1889, - 1.0, - 368.971, - 70.4992, - 1.0, - 370.945, - 69.8179, - 1.0, - 373.001, - 69.3543, - 1.0, - 375.14, - 69.2666, - 1.0, - 377.358, - 69.8865, - 1.0, - 366.57, - 73.9588, - 1.0, - 366.734, - 76.1499, - 1.0, - 366.88, - 78.3018, - 1.0, - 366.99, - 80.4957, - 1.0, - 365.104, - 82.5589, - 1.0, - 366.308, - 82.8331, - 1.0, - 367.645, - 82.8037, - 1.0, - 369.172, - 82.2061, - 1.0, - 370.693, - 81.6521, - 1.0, - 358.705, - 75.4542, - 1.0, - 360.294, - 74.0903, - 1.0, - 362.376, - 73.8423, - 1.0, - 364.302, - 74.6834, - 1.0, - 362.543, - 75.568, - 1.0, - 360.612, - 75.8883, - 1.0, - 369.771, - 73.7734, - 1.0, - 371.409, - 72.2638, - 1.0, - 373.615, - 71.9502, - 1.0, - 375.722, - 72.7144, - 1.0, - 373.888, - 73.699, - 1.0, - 371.835, - 74.0238, - 1.0, - 363.184, - 86.9317, - 1.0, - 364.788, - 85.4484, - 1.0, - 367.021, - 84.7474, - 1.0, - 368.048, - 84.5364, - 1.0, - 369.083, - 84.3709, - 1.0, - 372.183, - 84.0529, - 1.0, - 375.083, - 84.8901, - 1.0, - 373.687, - 87.0735, - 1.0, - 371.644, - 88.8121, - 1.0, - 369.024, - 89.6982, - 1.0, - 366.67, - 89.6039, - 1.0, - 364.721, - 88.606, - 1.0, - 363.588, - 86.903, - 1.0, - 365.723, - 85.8496, - 1.0, - 368.184, - 85.2863, - 1.0, - 371.444, - 84.8294, - 1.0, - 374.647, - 85.0454, - 1.0, - 372.166, - 87.2914, - 1.0, - 368.81, - 88.3791, - 1.0, - 365.965, - 88.3238, - 1.0 - ], - "face_valid": true, - "lefthand_valid": false, - "righthand_valid": true, - "foot_valid": true, - "foot_kpts": [ - 439, - 378, - 2, - 446, - 380, - 2, - 479, - 370, - 2, - 377, - 359, - 2, - 376, - 358, - 2, - 413, - 353, - 2 - ] - }, - { - "segmentation": [ - [ - 98.56, - 273.72, - 132.9, - 267, - 140.37, - 281.93, - 165.75, - 285.66, - 156.79, - 264.01, - 170.23, - 261.02, - 177.7, - 272.97, - 182.18, - 279.69, - 200.85, - 268.49, - 212.79, - 255.05, - 188.9, - 256.54, - 164.26, - 240.12, - 139.62, - 212.49, - 109.01, - 221.45, - 103.04, - 220.71, - 122.45, - 202.04, - 113.49, - 196.07, - 96.32, - 168.44, - 97.06, - 162.47, - 110.5, - 136.34, - 112, - 124.39, - 91.09, - 110.95, - 80.64, - 114.68, - 71.68, - 131.86, - 62.72, - 147.54, - 57.49, - 156.5, - 48.53, - 168.44, - 41.07, - 180.39, - 38.08, - 193.08, - 40.32, - 205.03, - 47.04, - 213.24, - 54.5, - 216.23, - 82.13, - 252.06, - 91.09, - 271.48 - ] - ], - "num_keypoints": 14, - "area": 11025.219, - "iscrowd": 0, - "keypoints": [ - 99, - 144, - 2, - 104, - 141, - 2, - 96, - 137, - 2, - 0, - 0, - 0, - 78, - 133, - 2, - 56, - 161, - 2, - 81, - 162, - 2, - 0, - 0, - 0, - 103, - 208, - 2, - 116, - 204, - 2, - 0, - 0, - 0, - 57, - 246, - 1, - 82, - 259, - 1, - 137, - 219, - 2, - 138, - 247, - 2, - 177, - 256, - 2, - 158, - 296, - 1 - ], - "image_id": 40083, - "bbox": [ - 38.08, - 110.95, - 174.71, - 174.71 - ], - "category_id": 1, - "id": 198196, - "face_box": [ - 79.19, - 131.64, - 29.290000000000006, - 28.480000000000018 - ], - "lefthand_box": [ - 104.83, - 196.48, - 16.400000000000006, - 15.810000000000002 - ], - "righthand_box": [ - 0.0, - 0.0, - 0.0, - 0.0 - ], - "lefthand_kpts": [ - 109.88978, - 204.46047, - 1, - 113.101195, - 201.939065, - 1, - 116.31261, - 199.41766, - 1, - 113.19977, - 199.3139, - 1, - 109.8794, - 200.24775, - 1, - 117.86903, - 199.10638, - 2, - 113.9261, - 199.00262, - 2, - 109.56812, - 198.48381, - 2, - 106.6628, - 198.38004999999998, - 1, - 117.1427, - 202.32298, - 2, - 111.2283, - 201.80417, - 2, - 107.07784000000001, - 201.38913, - 2, - 103.65371999999999, - 201.18161, - 1, - 116.52013, - 205.95463, - 2, - 112.5772, - 205.53958, - 2, - 107.59665, - 204.39821, - 2, - 104.27629, - 203.77564, - 2, - 116.41637, - 209.69004, - 2, - 112.16215, - 209.48252, - 2, - 108.73803000000001, - 208.34114, - 2, - 105.72895, - 206.68096, - 2 - ], - "righthand_kpts": [ - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0 - ], - "face_kpts": [ - 82.9654, - 131.144, - 1.0, - 81.8046, - 134.328, - 1.0, - 80.7007, - 137.531, - 1.0, - 79.8836, - 140.818, - 1.0, - 79.734, - 144.196, - 1.0, - 80.4763, - 147.486, - 1.0, - 82.0188, - 150.498, - 1.0, - 84.2352, - 153.057, - 1.0, - 86.8081, - 155.258, - 1.0, - 89.652, - 157.095, - 1.0, - 92.9128, - 157.812, - 1.0, - 95.962, - 156.474, - 1.0, - 98.5377, - 154.281, - 1.0, - 100.557, - 151.568, - 1.0, - 102.508, - 148.799, - 1.0, - 103.987, - 145.756, - 1.0, - 105.345, - 142.655, - 1.0, - 93.6074, - 132.13, - 1.0, - 95.8108, - 132.112, - 1.0, - 97.7956, - 132.618, - 1.0, - 99.6897, - 133.398, - 1.0, - 101.364, - 134.432, - 1.0, - 105.0, - 136.896, - 1.0, - 105.708, - 137.334, - 1.0, - 106.267, - 137.852, - 1.0, - 106.759, - 138.404, - 1.0, - 107.013, - 139.401, - 1.0, - 100.904, - 139.994, - 1.0, - 100.551, - 142.0, - 1.0, - 100.202, - 143.956, - 1.0, - 99.8116, - 145.919, - 1.0, - 94.7941, - 146.187, - 1.0, - 95.9823, - 147.027, - 1.0, - 97.3054, - 147.849, - 1.0, - 98.2362, - 148.403, - 1.0, - 99.2812, - 148.491, - 1.0, - 93.151, - 135.98, - 1.0, - 94.9184, - 136.187, - 1.0, - 96.5441, - 136.903, - 1.0, - 97.6034, - 138.308, - 1.0, - 95.8998, - 138.017, - 1.0, - 94.3941, - 137.178, - 1.0, - 102.085, - 141.003, - 1.0, - 103.379, - 141.05, - 1.0, - 104.485, - 141.71, - 1.0, - 104.899, - 142.915, - 1.0, - 103.704, - 142.739, - 1.0, - 102.729, - 142.026, - 1.0, - 89.8433, - 148.685, - 1.0, - 92.6494, - 149.006, - 1.0, - 95.2801, - 149.78, - 1.0, - 96.1096, - 150.259, - 1.0, - 96.7411, - 150.719, - 1.0, - 97.3853, - 151.82, - 1.0, - 97.337, - 153.217, - 1.0, - 96.5124, - 153.108, - 1.0, - 95.6091, - 152.796, - 1.0, - 94.7518, - 152.399, - 1.0, - 93.0313, - 151.317, - 1.0, - 91.3461, - 150.149, - 1.0, - 90.24, - 148.802, - 1.0, - 92.9121, - 149.883, - 1.0, - 95.4213, - 151.204, - 1.0, - 96.3082, - 152.03, - 1.0, - 97.1377, - 152.997, - 1.0, - 96.3098, - 152.035, - 1.0, - 95.406, - 151.234, - 1.0, - 92.8725, - 149.984, - 1.0 - ], - "face_valid": true, - "lefthand_valid": true, - "righthand_valid": false, - "foot_valid": true, - "foot_kpts": [ - 208.16049, - 257.42419, - 2.0, - 205.8824, - 259.13276, - 2.0, - 183.38626, - 275.93367, - 2.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0 - ] - }, - { - "segmentation": [ - [ - 257.76, - 288.05, - 273.4, - 258.26, - 325.55, - 253.79, - 335.23, - 232.93, - 326.3, - 186.74, - 333.74, - 177.05, - 327.79, - 153.21, - 333.74, - 142.04, - 344.17, - 139.06, - 353.11, - 139.06, - 359.07, - 145.02, - 360.56, - 148.74, - 362.05, - 168.86, - 388.87, - 197.17, - 397.81, - 276.88, - 372.48, - 293.27 - ] - ], - "num_keypoints": 15, - "area": 10171.9544, - "iscrowd": 0, - "keypoints": [ - 343, - 164, - 2, - 348, - 160, - 2, - 340, - 160, - 2, - 359, - 163, - 2, - 332, - 164, - 2, - 370, - 189, - 2, - 334, - 190, - 2, - 358, - 236, - 2, - 348, - 234, - 2, - 339, - 270, - 2, - 330, - 262, - 2, - 378, - 262, - 2, - 343, - 254, - 2, - 338, - 280, - 2, - 283, - 272, - 2, - 0, - 0, - 0, - 0, - 0, - 0 - ], - "image_id": 40083, - "bbox": [ - 257.76, - 139.06, - 140.05, - 154.21 - ], - "category_id": 1, - "id": 230195, - "face_box": [ - 333.96, - 154.32, - 23.28000000000003, - 26.79000000000002 - ], - "lefthand_box": [ - 0.0, - 0.0, - 0.0, - 0.0 - ], - "righthand_box": [ - 0.0, - 0.0, - 0.0, - 0.0 - ], - "lefthand_kpts": [ - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0 - ], - "righthand_kpts": [ - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0 - ], - "face_kpts": [ - 333.383, - 160.62, - 1.0, - 333.607, - 163.811, - 1.0, - 334.137, - 166.965, - 1.0, - 334.934, - 170.062, - 1.0, - 336.036, - 173.062, - 1.0, - 337.69, - 175.794, - 1.0, - 340.01, - 177.986, - 1.0, - 342.889, - 179.347, - 1.0, - 346.063, - 179.445, - 1.0, - 349.16, - 178.674, - 1.0, - 351.892, - 177.033, - 1.0, - 354.132, - 174.761, - 1.0, - 355.652, - 171.957, - 1.0, - 356.482, - 168.871, - 1.0, - 356.751, - 165.691, - 1.0, - 356.914, - 162.496, - 1.0, - 356.913, - 159.299, - 1.0, - 335.435, - 157.491, - 1.0, - 336.759, - 156.383, - 1.0, - 338.264, - 155.821, - 1.0, - 339.903, - 155.445, - 1.0, - 341.565, - 155.312, - 1.0, - 345.805, - 155.039, - 1.0, - 347.424, - 154.896, - 1.0, - 349.044, - 154.957, - 1.0, - 350.677, - 155.266, - 1.0, - 352.333, - 156.08, - 1.0, - 343.65, - 159.186, - 1.0, - 343.687, - 161.041, - 1.0, - 343.68, - 162.886, - 1.0, - 343.657, - 164.752, - 1.0, - 341.61, - 167.049, - 1.0, - 342.69, - 167.145, - 1.0, - 343.906, - 167.123, - 1.0, - 345.179, - 166.907, - 1.0, - 346.456, - 166.707, - 1.0, - 336.707, - 159.932, - 1.0, - 338.078, - 158.999, - 1.0, - 339.726, - 158.864, - 1.0, - 341.204, - 159.605, - 1.0, - 339.755, - 160.185, - 1.0, - 338.21, - 160.321, - 1.0, - 346.612, - 159.27, - 1.0, - 348.028, - 158.307, - 1.0, - 349.739, - 158.245, - 1.0, - 351.302, - 158.965, - 1.0, - 349.802, - 159.575, - 1.0, - 348.188, - 159.642, - 1.0, - 340.049, - 171.873, - 1.0, - 341.307, - 170.304, - 1.0, - 343.097, - 169.499, - 1.0, - 343.987, - 169.41, - 1.0, - 344.876, - 169.314, - 1.0, - 346.909, - 169.61, - 1.0, - 348.603, - 170.874, - 1.0, - 347.548, - 172.219, - 1.0, - 346.133, - 173.242, - 1.0, - 344.378, - 173.742, - 1.0, - 342.683, - 173.666, - 1.0, - 341.218, - 173.038, - 1.0, - 340.398, - 171.815, - 1.0, - 342.1, - 170.752, - 1.0, - 344.043, - 170.287, - 1.0, - 346.21, - 170.271, - 1.0, - 348.214, - 170.913, - 1.0, - 346.462, - 171.947, - 1.0, - 344.283, - 172.468, - 1.0, - 342.246, - 172.507, - 1.0 - ], - "face_valid": true, - "lefthand_valid": false, - "righthand_valid": false, - "foot_valid": false, - "foot_kpts": [ - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0 - ] - }, - { - "segmentation": [ - [ - 285.37, - 126.5, - 281.97, - 127.72, - 280.76, - 132.33, - 280.76, - 136.46, - 275.17, - 143.26, - 275.9, - 158.08, - 277.6, - 164.4, - 278.33, - 173.87, - 278.33, - 183.83, - 279.79, - 191.11, - 281.97, - 194.76, - 284.89, - 192.09, - 284.89, - 186.99, - 284.89, - 181.16, - 284.64, - 177.51, - 285.86, - 173.87 - ] - ], - "num_keypoints": 0, - "area": 491.2669, - "iscrowd": 0, - "keypoints": [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0 - ], - "image_id": 40083, - "bbox": [ - 275.17, - 126.5, - 10.69, - 68.26 - ], - "category_id": 1, - "id": 1202706, - "face_box": [ - 0.0, - 0.0, - 0.0, - 0.0 - ], - "lefthand_box": [ - 0.0, - 0.0, - 0.0, - 0.0 - ], - "righthand_box": [ - 0.0, - 0.0, - 0.0, - 0.0 - ], - "lefthand_kpts": [ - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0 - ], - "righthand_kpts": [ - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0 - ], - "face_kpts": [ - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0 - ], - "face_valid": false, - "lefthand_valid": false, - "righthand_valid": false, - "foot_valid": false, - "foot_kpts": [ - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0 - ] - }, - { - "segmentation": [ - [ - 339.34, - 107.97, - 338.38, - 102.19, - 339.34, - 91.58, - 335.49, - 84.84, - 326.81, - 74.23, - 312.35, - 74.23, - 301.75, - 74.23, - 295, - 86.76, - 295, - 93.51, - 292.11, - 99.3, - 287.29, - 102.19, - 291.14, - 107.01, - 295, - 107.01, - 295.96, - 112.79, - 301.75, - 115.69, - 305.6, - 119.54, - 307.53, - 123.4, - 317.17, - 123.4, - 311.39, - 129.18, - 286.32, - 139.79, - 274.75, - 139.79, - 264.15, - 138.82, - 262.22, - 144.61, - 261.26, - 147.5, - 253.54, - 147.5, - 247.76, - 150.39, - 249.69, - 159.07, - 256.44, - 161, - 262.22, - 161, - 268, - 161, - 276.68, - 161.96, - 284.39, - 168.71, - 293.07, - 174.49, - 301.75, - 174.49, - 308.49, - 169.67, - 308.49, - 188.95, - 311.39, - 194.74, - 312.35, - 208.23, - 307.53, - 221.73, - 297.89, - 229.44, - 281.5, - 250.65, - 269.93, - 262.22, - 278.61, - 320.06, - 281.5, - 331.63, - 276.68, - 338.38, - 270.9, - 349.95, - 262.22, - 356.7, - 253.54, - 359.59, - 253.54, - 365.37, - 274.75, - 365.37, - 291.14, - 365.37, - 306.57, - 359.59, - 303.67, - 352.84, - 297.89, - 340.31, - 293.07, - 318.13, - 295, - 294.03, - 293.07, - 278.61, - 294.03, - 270.9, - 305.6, - 259.33, - 313.31, - 299.82, - 319.1, - 309.46, - 341.27, - 317.17, - 384.65, - 330.67, - 387.55, - 335.49, - 383.69, - 341.27, - 397.19, - 350.91, - 398.15, - 363.44, - 398.15, - 375.01, - 405.86, - 374.05, - 409.72, - 357.66, - 411.65, - 342.24, - 416.47, - 328.74, - 417.43, - 321.03, - 410.68, - 319.1, - 401.04, - 318.13, - 392.37, - 318.13, - 382.73, - 314.28, - 348.98, - 300.78, - 339.34, - 293.07, - 334.52, - 285.36, - 340.31, - 259.33, - 340.31, - 246.8, - 340.31, - 242.94, - 350.91, - 228.48, - 358.62, - 214.98, - 355.22, - 204.32, - 357.05, - 196.11, - 361.61, - 188.82, - 361.61, - 181.97, - 365.26, - 165.63, - 367.54, - 139.18, - 366.17, - 123.68, - 361.15, - 112.73, - 353.86, - 107.72, - 351.58, - 105.89, - 344.74, - 105.89, - 340.18, - 109.08 - ] - ], - "num_keypoints": 15, - "area": 17123.92955, - "iscrowd": 0, - "keypoints": [ - 297, - 111, - 2, - 299, - 106, - 2, - 0, - 0, - 0, - 314, - 108, - 2, - 0, - 0, - 0, - 329, - 141, - 2, - 346, - 125, - 2, - 295, - 164, - 2, - 323, - 130, - 2, - 266, - 155, - 2, - 279, - 143, - 2, - 329, - 225, - 2, - 331, - 221, - 2, - 327, - 298, - 2, - 283, - 269, - 2, - 398, - 327, - 2, - 288, - 349, - 2 - ], - "image_id": 196141, - "bbox": [ - 247.76, - 74.23, - 169.67, - 300.78 - ], - "category_id": 1, - "id": 460541, - "face_box": [ - 0.0, - 0.0, - 0.0, - 0.0 - ], - "lefthand_box": [ - 249.12, - 146.31, - 19.920000000000016, - 15.819999999999993 - ], - "righthand_box": [ - 262.82, - 139.96, - 18.930000000000007, - 14.679999999999978 - ], - "lefthand_kpts": [ - 265.1, - 155.9, - 1, - 260.05, - 152.25, - 1, - 255.0, - 148.6, - 1, - 250.6, - 148.6, - 1, - 249.1, - 151.0, - 1, - 253.4, - 158.9, - 1, - 251.9, - 155.1, - 1, - 252.0, - 151.9, - 1, - 252.9, - 150.0, - 1, - 257.4, - 157.9, - 1, - 256.7, - 154.2, - 1, - 256.3, - 151.6, - 1, - 256.9, - 149.3, - 1, - 260.2, - 156.5, - 1, - 260.1, - 153.0, - 1, - 259.9, - 150.7, - 1, - 260.2, - 148.7, - 1, - 262.8, - 154.8, - 1, - 262.7, - 152.5, - 1, - 262.7, - 150.9, - 1, - 262.6, - 148.8, - 1 - ], - "righthand_kpts": [ - 280.8, - 146.5, - 1, - 275.4, - 149.15, - 1, - 270.0, - 151.8, - 1, - 266.2, - 152.2, - 1, - 263.5, - 151.9, - 1, - 266.6, - 142.5, - 1, - 263.6, - 147.0, - 1, - 264.9, - 151.0, - 1, - 268.5, - 152.9, - 1, - 270.6, - 142.0, - 1, - 267.9, - 146.0, - 1, - 269.4, - 149.6, - 1, - 272.5, - 151.5, - 1, - 273.8, - 142.1, - 1, - 272.2, - 146.0, - 1, - 274.2, - 149.1, - 1, - 276.5, - 149.6, - 1, - 277.4, - 142.3, - 1, - 276.6, - 145.2, - 1, - 277.6, - 148.3, - 1, - 279.4, - 148.6, - 1 - ], - "face_kpts": [ - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0 - ], - "face_valid": false, - "lefthand_valid": true, - "righthand_valid": true, - "foot_valid": true, - "foot_kpts": [ - 401.79499, - 364.28207, - 2.0, - 407.21854, - 361.57029, - 2.0, - 407.21854, - 325.86523, - 2.0, - 257.16687, - 361.57029, - 2.0, - 258.52276, - 361.11833, - 2.0, - 297.84353, - 355.69477, - 2.0 - ] - }, - { - "segmentation": [ - [ - 578.76, - 112.4, - 589.39, - 100.81, - 589.39, - 99.84, - 596.16, - 116.27, - 603.89, - 122.07, - 603.89, - 138.49, - 598.09, - 159.75, - 597.12, - 181, - 594.22, - 191.63, - 589.39, - 212.89, - 583.59, - 208.06, - 583.59, - 206.13, - 582.63, - 200.33, - 582.63, - 193.57, - 582.63, - 182.94, - 575.86, - 181, - 567.17, - 197.43, - 571.03, - 203.23, - 567.17, - 207.09, - 555.57, - 208.06, - 562.34, - 200.33, - 565.24, - 190.67, - 565.24, - 173.27, - 566.2, - 163.61, - 568.14, - 156.85, - 570.07, - 148.15, - 566.2, - 143.32, - 565.24, - 133.66, - 575.86, - 118.2 - ] - ], - "num_keypoints": 15, - "area": 2789.0208, - "iscrowd": 0, - "keypoints": [ - 589, - 113, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 595, - 112, - 1, - 584, - 110, - 2, - 598, - 123, - 2, - 579, - 119, - 2, - 594, - 141, - 2, - 570, - 137, - 2, - 576, - 135, - 2, - 585, - 139, - 2, - 590, - 157, - 2, - 574, - 156, - 2, - 589, - 192, - 2, - 565, - 189, - 1, - 587, - 222, - 1, - 557, - 219, - 1 - ], - "image_id": 196141, - "bbox": [ - 555.57, - 99.84, - 48.32, - 113.05 - ], - "category_id": 1, - "id": 488308, - "face_box": [ - 0.0, - 0.0, - 0.0, - 0.0 - ], - "lefthand_box": [ - 568.2, - 130.89, - 10.75, - 11.130000000000024 - ], - "righthand_box": [ - 0.0, - 0.0, - 0.0, - 0.0 - ], - "lefthand_kpts": [ - 578.8, - 135.7, - 2, - 577.55, - 134.35, - 2, - 576.3, - 133.0, - 1, - 574.6, - 134.1, - 1, - 574.0, - 135.5, - 1, - 574.3, - 132.9, - 2, - 572.0, - 132.4, - 2, - 570.3, - 131.8, - 2, - 568.9, - 130.7, - 2, - 573.3, - 134.4, - 2, - 570.9, - 134.0, - 2, - 569.5, - 133.9, - 2, - 568.2, - 133.8, - 2, - 572.8, - 135.7, - 2, - 572.6, - 138.3, - 2, - 574.1, - 139.4, - 2, - 576.2, - 139.4, - 1, - 574.4, - 138.0, - 2, - 575.4, - 139.5, - 2, - 576.3, - 140.2, - 2, - 577.6, - 140.8, - 2 - ], - "righthand_kpts": [ - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0 - ], - "face_kpts": [ - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0 - ], - "face_valid": false, - "lefthand_valid": true, - "righthand_valid": false, - "foot_valid": false, - "foot_kpts": [ - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0 - ] - }, - { - "segmentation": [ - [ - 446.96, - 73.13, - 445.81, - 77.71, - 443.33, - 78.29, - 441.61, - 81.72, - 441.23, - 84.58, - 440.85, - 90.5, - 442.19, - 94.32, - 443.52, - 97.18, - 443.52, - 102.33, - 442.57, - 105.58, - 446.58, - 105.19, - 447.15, - 99.85, - 447.53, - 94.89, - 446, - 93.55, - 446.38, - 92.03, - 453.64, - 92.41, - 454.02, - 94.51, - 457.64, - 94.51, - 455.74, - 88.4, - 455.35, - 82.29, - 453.64, - 78.48, - 451.92, - 77.71, - 452.87, - 74.47, - 450.58, - 73.13 - ] - ], - "num_keypoints": 0, - "area": 285.7906, - "iscrowd": 0, - "keypoints": [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0 - ], - "image_id": 196141, - "bbox": [ - 440.85, - 73.13, - 16.79, - 32.45 - ], - "category_id": 1, - "id": 508900, - "face_box": [ - 0.0, - 0.0, - 0.0, - 0.0 - ], - "lefthand_box": [ - 0.0, - 0.0, - 0.0, - 0.0 - ], - "righthand_box": [ - 0.0, - 0.0, - 0.0, - 0.0 - ], - "lefthand_kpts": [ - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0 - ], - "righthand_kpts": [ - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0 - ], - "face_kpts": [ - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0 - ], - "face_valid": false, - "lefthand_valid": false, - "righthand_valid": false, - "foot_valid": false, - "foot_kpts": [ - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0 - ] - }, - { - "segmentation": [ - [ - 497.15, - 413.95, - 531.55, - 417.68, - 548.74, - 411.7, - 551.74, - 403.48, - 546.5, - 394.5, - 543.51, - 386.28, - 571.93, - 390.76, - 574.92, - 391.51, - 579.4, - 409.46, - 605.58, - 409.46, - 615.3, - 408.71, - 607.07, - 389.27, - 598.1, - 381.79, - 607.82, - 366.83, - 607.82, - 352.63, - 610.06, - 338.42, - 619.04, - 345.15, - 631, - 344.4, - 630.25, - 336.92, - 626.51, - 318.98, - 616.05, - 286.07, - 598.85, - 263.64, - 585.39, - 257.66, - 593.61, - 244.2, - 601.09, - 235.97, - 596.6, - 219.52, - 587.63, - 211.29, - 577.91, - 208.3, - 563.7, - 206.81, - 556.22, - 214.29, - 548, - 217.28, - 539.77, - 229.99, - 539.77, - 241.95, - 539.02, - 247.19, - 523.32, - 247.19, - 503.88, - 254.67, - 485.93, - 254.67, - 479.95, - 248.68, - 473.22, - 241.21, - 485.93, - 227, - 477.7, - 215.78, - 457.51, - 215.78, - 453.77, - 235.22, - 463.5, - 246.44, - 465.74, - 261.4, - 490.42, - 274.11, - 501.63, - 275.6, - 504.62, - 286.07, - 519.58, - 286.07, - 522.57, - 292.06, - 512.85, - 310, - 515.09, - 330.94, - 530.05, - 343.65, - 505.37, - 341.41, - 479.95, - 339.91, - 465.74, - 346.64, - 463.5, - 358.61, - 473.97, - 381.04, - 485.18, - 390.02, - 501.63, - 398.99, - 504.62, - 404.22, - 491.16, - 412.45, - 495.65, - 417.68 - ] - ], - "num_keypoints": 12, - "area": 21608.94075, - "iscrowd": 0, - "keypoints": [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 552, - 234, - 2, - 0, - 0, - 0, - 531, - 262, - 2, - 600, - 283, - 2, - 480, - 260, - 2, - 622, - 336, - 2, - 466, - 242, - 2, - 0, - 0, - 0, - 546, - 365, - 2, - 592, - 371, - 2, - 470, - 351, - 2, - 551, - 330, - 2, - 519, - 394, - 2, - 589, - 391, - 2 - ], - "image_id": 196141, - "bbox": [ - 453.77, - 206.81, - 177.23, - 210.87 - ], - "category_id": 1, - "id": 1717641, - "face_box": [ - 0.0, - 0.0, - 0.0, - 0.0 - ], - "lefthand_box": [ - 0.0, - 0.0, - 0.0, - 0.0 - ], - "righthand_box": [ - 0.0, - 0.0, - 0.0, - 0.0 - ], - "lefthand_kpts": [ - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0 - ], - "righthand_kpts": [ - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0 - ], - "face_kpts": [ - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0 - ], - "face_valid": false, - "lefthand_valid": false, - "righthand_valid": false, - "foot_valid": true, - "foot_kpts": [ - 0.0, - 0.0, - 0.0, - 498.08009, - 412.23863, - 2.0, - 541.66626, - 400.39384, - 2.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 602.22109, - 403.58794, - 2.0 - ] - }, - { - "segmentation": [ - [ - 58.93, - 163.67, - 47.18, - 161.59, - 36.12, - 93.86, - 41.65, - 82.8, - 40.27, - 69.66, - 50.64, - 67.59, - 55.48, - 73.81, - 63.08, - 92.47, - 66.53, - 99.38, - 65.15, - 109.06, - 61, - 127.03, - 59.62, - 162.97 - ] - ], - "num_keypoints": 17, - "area": 1870.14015, - "iscrowd": 0, - "keypoints": [ - 48, - 79, - 2, - 50, - 77, - 2, - 46, - 77, - 2, - 54, - 78, - 2, - 45, - 78, - 2, - 57, - 90, - 2, - 42, - 90, - 2, - 63, - 103, - 2, - 42, - 105, - 2, - 56, - 113, - 2, - 49, - 112, - 2, - 55, - 117, - 2, - 44, - 117, - 2, - 55, - 140, - 2, - 47, - 140, - 2, - 56, - 160, - 2, - 49, - 159, - 2 - ], - "image_id": 196141, - "bbox": [ - 36.12, - 67.59, - 30.41, - 96.08 - ], - "category_id": 1, - "id": 1724673, - "face_box": [ - 0.0, - 0.0, - 0.0, - 0.0 - ], - "lefthand_box": [ - 0.0, - 0.0, - 0.0, - 0.0 - ], - "righthand_box": [ - 0.0, - 0.0, - 0.0, - 0.0 - ], - "lefthand_kpts": [ - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0 - ], - "righthand_kpts": [ - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0 - ], - "face_kpts": [ - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0 - ], - "face_valid": false, - "lefthand_valid": false, - "righthand_valid": false, - "foot_valid": true, - "foot_kpts": [ - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 44.4, - 162.6, - 2.0, - 43.4, - 161.5, - 2.0, - 51.7, - 160.7, - 2.0 - ] - }, - { - "segmentation": [ - [ - 139.41, - 321.58, - 144.78, - 326.56, - 196.92, - 314.68, - 196.16, - 309.31, - 207.28, - 292.05, - 213.03, - 284, - 228.75, - 270.2, - 233.35, - 261.38, - 244.47, - 252.56, - 254.44, - 237.61, - 267.86, - 215.37, - 272.08, - 212.68, - 285.5, - 232.62, - 294.7, - 250.64, - 295.08, - 264.06, - 290.87, - 277.87, - 290.87, - 286.3, - 289.71, - 298.19, - 281.66, - 318.89, - 282.05, - 334.23, - 295.08, - 340.37, - 315.02, - 343.82, - 314.25, - 336.53, - 310.42, - 330.4, - 301.98, - 322.34, - 304.29, - 310.84, - 304.67, - 302.79, - 306.2, - 292.05, - 311.19, - 275.56, - 313.87, - 251.79, - 311.19, - 234.54, - 312.72, - 224.57, - 310.42, - 212.3, - 307.74, - 201.56, - 306.2, - 193.51, - 306.59, - 183.16, - 310.04, - 177.41, - 314.64, - 173.19, - 316.94, - 171.65, - 328.06, - 163.99, - 337.64, - 157.85, - 343.4, - 159.77, - 346.46, - 166.67, - 346.85, - 170.5, - 346.46, - 179.71, - 346.85, - 188.53, - 346.85, - 191.98, - 344.55, - 198.11, - 342.25, - 203.48, - 338.41, - 208.46, - 335.34, - 212.68, - 335.34, - 217.67, - 343.01, - 222.65, - 354.9, - 210.76, - 359.12, - 196.19, - 361.8, - 173.19, - 361.42, - 161.69, - 356.43, - 150.18, - 344.93, - 135.61, - 343.01, - 132.93, - 345.31, - 126.41, - 345.7, - 124.88, - 343.4, - 115.29, - 340.33, - 104.17, - 337.26, - 102.25, - 330.36, - 103.4, - 326.14, - 106.09, - 320.01, - 111.07, - 314.64, - 119.89, - 310.42, - 121.04, - 292.02, - 121.81, - 279.75, - 127.94, - 244.09, - 138.68, - 240.25, - 142.51, - 238.72, - 154.4, - 239.1, - 163.6, - 239.87, - 173.96, - 241.79, - 181.24, - 248.3, - 192.36, - 240.25, - 206.55, - 236.42, - 219.2, - 229.9, - 236.45, - 225.3, - 247.57, - 218.4, - 254.48, - 208.81, - 265.6, - 202.29, - 278.25, - 195.39, - 285.92, - 188.49, - 292.05, - 183.5, - 295.89, - 176.6, - 302.41, - 172, - 308.54, - 167.78, - 313.14, - 146.31, - 318.89 - ] - ], - "num_keypoints": 16, - "area": 14250.29385, - "iscrowd": 0, - "keypoints": [ - 334, - 135, - 2, - 340, - 129, - 2, - 331, - 129, - 2, - 0, - 0, - 0, - 319, - 123, - 2, - 340, - 146, - 2, - 292, - 133, - 2, - 353, - 164, - 2, - 246, - 144, - 2, - 354, - 197, - 2, - 250, - 185, - 2, - 293, - 197, - 2, - 265, - 187, - 2, - 305, - 252, - 2, - 231, - 254, - 2, - 293, - 321, - 2, - 193, - 297, - 2 - ], - "image_id": 197388, - "bbox": [ - 139.41, - 102.25, - 222.39, - 241.57 - ], - "category_id": 1, - "id": 437295, - "face_box": [ - 320.23, - 123.84, - 21.049999999999955, - 23.5 - ], - "lefthand_box": [ - 333.65, - 198.45, - 23.150000000000034, - 23.57000000000002 - ], - "righthand_box": [ - 247.5, - 184.92, - 23.30000000000001, - 22.360000000000014 - ], - "lefthand_kpts": [ - 353.87482, - 196.49984999999998, - 1, - 349.01957500000003, - 201.76511, - 1, - 344.16433, - 207.03037, - 1, - 340.81534, - 210.64729, - 1, - 337.46165, - 216.59183000000002, - 1, - 346.65868, - 216.02586, - 1, - 342.27241, - 219.28019999999998, - 1, - 337.88613, - 219.70467, - 1, - 334.4903, - 218.57273, - 1, - 345.5, - 215.0, - 1, - 342.27241, - 217.72377, - 1, - 338.73509, - 218.00675999999999, - 1, - 334.77329, - 216.30885, - 1, - 343.7, - 213.8, - 1, - 341.42345, - 215.74288, - 1, - 338.73509, - 215.60138, - 1, - 335.62225, - 213.76198, - 1, - 342.4139, - 212.63003, - 1, - 340.85748, - 213.76198, - 1, - 338.87658, - 214.04496, - 1, - 337.17867, - 213.76198, - 1 - ], - "righthand_kpts": [ - 249.4, - 180.4, - 1, - 254.3, - 184.9, - 1, - 259.2, - 189.4, - 1, - 259.3, - 192.1, - 1, - 258.2, - 194.9, - 1, - 254.9, - 193.2, - 1, - 255.9, - 192.3, - 1, - 255.9, - 190.5, - 1, - 255.4, - 188.5, - 1, - 252.2, - 194.0, - 1, - 253.2, - 193.6, - 1, - 253.2, - 191.1, - 1, - 252.9, - 188.8, - 1, - 249.4, - 193.6, - 1, - 250.4, - 193.6, - 1, - 250.4, - 191.3, - 1, - 249.9, - 188.7, - 1, - 247.1, - 192.2, - 1, - 248.0, - 192.2, - 1, - 247.9, - 190.3, - 1, - 247.5, - 188.3, - 1 - ], - "face_kpts": [ - 319.681, - 126.613, - 1.0, - 319.155, - 129.261, - 1.0, - 318.92, - 131.954, - 1.0, - 319.187, - 134.631, - 1.0, - 319.707, - 137.271, - 1.0, - 320.991, - 139.649, - 1.0, - 322.846, - 141.606, - 1.0, - 325.009, - 143.216, - 1.0, - 327.359, - 144.544, - 1.0, - 329.907, - 145.384, - 1.0, - 332.347, - 144.347, - 1.0, - 334.268, - 142.449, - 1.0, - 335.767, - 140.222, - 1.0, - 336.675, - 137.69, - 1.0, - 337.019, - 135.009, - 1.0, - 336.982, - 132.311, - 1.0, - 337.13, - 129.618, - 1.0, - 328.503, - 125.823, - 1.0, - 329.531, - 125.489, - 1.0, - 330.619, - 125.626, - 1.0, - 331.573, - 125.909, - 1.0, - 332.529, - 126.431, - 1.0, - 334.479, - 127.459, - 1.0, - 334.815, - 127.43, - 1.0, - 335.157, - 127.316, - 1.0, - 335.52, - 127.327, - 1.0, - 335.949, - 127.701, - 1.0, - 332.762, - 129.334, - 1.0, - 333.168, - 130.389, - 1.0, - 333.603, - 131.342, - 1.0, - 333.928, - 132.331, - 1.0, - 331.671, - 134.291, - 1.0, - 332.232, - 134.389, - 1.0, - 332.931, - 134.487, - 1.0, - 333.332, - 134.463, - 1.0, - 333.645, - 134.212, - 1.0, - 329.271, - 128.208, - 1.0, - 329.963, - 128.464, - 1.0, - 330.676, - 128.659, - 1.0, - 331.392, - 128.839, - 1.0, - 330.672, - 128.659, - 1.0, - 330.003, - 128.334, - 1.0, - 333.792, - 129.611, - 1.0, - 334.158, - 129.741, - 1.0, - 334.546, - 129.765, - 1.0, - 334.878, - 129.954, - 1.0, - 334.523, - 129.822, - 1.0, - 334.161, - 129.704, - 1.0, - 327.38, - 138.818, - 1.0, - 329.757, - 138.136, - 1.0, - 332.086, - 137.874, - 1.0, - 332.75, - 138.208, - 1.0, - 333.221, - 138.515, - 1.0, - 334.495, - 139.634, - 1.0, - 335.213, - 141.054, - 1.0, - 334.12, - 140.754, - 1.0, - 333.208, - 140.234, - 1.0, - 332.2, - 139.888, - 1.0, - 330.765, - 139.414, - 1.0, - 329.069, - 139.351, - 1.0, - 327.561, - 138.814, - 1.0, - 329.88, - 138.346, - 1.0, - 332.517, - 138.668, - 1.0, - 334.031, - 139.589, - 1.0, - 335.123, - 140.862, - 1.0, - 333.726, - 140.572, - 1.0, - 332.203, - 140.032, - 1.0, - 329.731, - 139.403, - 1.0 - ], - "face_valid": true, - "lefthand_valid": true, - "righthand_valid": true, - "foot_valid": true, - "foot_kpts": [ - 300.24175, - 336.83838, - 2.0, - 306.59015, - 335.34464, - 2.0, - 290.07408, - 326.47826, - 2.0, - 182.60972, - 314.05885, - 2.0, - 175.88789, - 305.84328, - 2.0, - 189.70499, - 302.48236, - 2.0 - ] - }, - { - "segmentation": [ - [ - 287.17, - 121.42, - 294.22, - 106.44, - 302.15, - 116.13, - 303.03, - 121.42 - ], - [ - 297.74, - 99.39, - 310.08, - 76.49, - 326.81, - 76.49, - 329.46, - 67.68, - 337.38, - 61.52, - 346.19, - 62.4, - 353.24, - 65.92, - 353.24, - 76.49, - 355.88, - 84.42, - 359.41, - 87.94, - 362.05, - 96.75, - 354.12, - 139.04, - 349.72, - 142.56, - 345.31, - 139.92, - 349.72, - 117.89, - 348.84, - 108.2, - 345.31, - 113.49, - 336.5, - 101.16, - 325.93, - 110.85, - 311.84, - 123.18 - ], - [ - 324.17, - 176.91, - 332.1, - 191.89, - 328.58, - 198.94, - 327.69, - 205.98, - 333.86, - 213.03, - 337.38, - 227.13, - 332.98, - 227.13, - 319.77, - 219.2, - 313.6, - 211.27 - ], - [ - 332.98, - 165.46, - 341.79, - 161.06, - 336.5, - 174.27, - 333.86, - 186.6, - 326.81, - 176.03 - ] - ], - "num_keypoints": 16, - "area": 3404.869, - "iscrowd": 0, - "keypoints": [ - 345, - 92, - 2, - 350, - 87, - 2, - 341, - 87, - 2, - 0, - 0, - 0, - 330, - 83, - 2, - 357, - 94, - 2, - 316, - 92, - 2, - 357, - 104, - 2, - 291, - 123, - 1, - 351, - 133, - 2, - 281, - 136, - 1, - 326, - 131, - 1, - 305, - 128, - 1, - 336, - 152, - 1, - 303, - 171, - 1, - 318, - 206, - 2, - 294, - 211, - 1 - ], - "image_id": 197388, - "bbox": [ - 287.17, - 61.52, - 74.88, - 165.61 - ], - "category_id": 1, - "id": 467657, - "face_box": [ - 0.0, - 0.0, - 0.0, - 0.0 - ], - "lefthand_box": [ - 0.0, - 0.0, - 0.0, - 0.0 - ], - "righthand_box": [ - 0.0, - 0.0, - 0.0, - 0.0 - ], - "lefthand_kpts": [ - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0 - ], - "righthand_kpts": [ - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0 - ], - "face_kpts": [ - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0 - ], - "face_valid": false, - "lefthand_valid": false, - "righthand_valid": false, - "foot_valid": true, - "foot_kpts": [ - 322.595, - 216.245, - 2.0, - 327.23077, - 215.42692, - 2.0, - 316.81553, - 207.67155, - 2.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0 - ] - }, - { - "segmentation": [ - [ - 547.95, - 201.57, - 546.73, - 190.62, - 547.95, - 181.49, - 547.95, - 169.31, - 547.95, - 156.53, - 546.73, - 144.36, - 544.3, - 139.49, - 540.04, - 132.19, - 540.04, - 121.84, - 542.47, - 107.24, - 544.3, - 99.33, - 548.56, - 88.98, - 561.95, - 78.03, - 572.29, - 71.33, - 572.29, - 71.33, - 572.29, - 65.25, - 574.12, - 51.86, - 583.86, - 48.81, - 592.99, - 48.81, - 597.86, - 57.33, - 599.07, - 64.64, - 608.2, - 76.81, - 614.9, - 82.89, - 620.98, - 89.59, - 628.89, - 93.24, - 636.81, - 101.76, - 640, - 109.67, - 640, - 115.76, - 640, - 127.93, - 620.37, - 111.5, - 619.16, - 111.5, - 618.55, - 112.11, - 608.2, - 105.41, - 600.9, - 119.41, - 592.99, - 131.58, - 596.03, - 148.01, - 605.16, - 162.01, - 612.46, - 190.01, - 614.9, - 204.61, - 606.98, - 216.78, - 603.94, - 226.52, - 606.38, - 239.91, - 605.16, - 256.95, - 604.55, - 264.26, - 602.12, - 271.56, - 586.29, - 272.17, - 584.47, - 255.13, - 588.73, - 237.48, - 592.99, - 221.65, - 596.64, - 207.05, - 596.64, - 197.31, - 594.2, - 186.96, - 584.47, - 172.36, - 577.77, - 166.27, - 570.47, - 170.53, - 558.91, - 179.66, - 555.86, - 192.44, - 548.56, - 198.53, - 547.95, - 198.53 - ] - ], - "num_keypoints": 15, - "area": 8913.98475, - "iscrowd": 0, - "keypoints": [ - 591, - 78, - 2, - 594, - 74, - 2, - 586, - 74, - 2, - 0, - 0, - 0, - 573, - 70, - 2, - 598, - 86, - 2, - 566, - 93, - 2, - 626, - 105, - 2, - 546, - 126, - 2, - 0, - 0, - 0, - 561, - 150, - 2, - 582, - 150, - 2, - 557, - 154, - 2, - 606, - 194, - 2, - 558, - 209, - 1, - 591, - 252, - 2, - 539, - 262, - 1 - ], - "image_id": 197388, - "bbox": [ - 540.04, - 48.81, - 99.96, - 223.36 - ], - "category_id": 1, - "id": 531914, - "face_box": [ - 0.0, - 0.0, - 0.0, - 0.0 - ], - "lefthand_box": [ - 0.0, - 0.0, - 0.0, - 0.0 - ], - "righthand_box": [ - 557.05, - 149.73, - 19.879999999999995, - 21.76000000000002 - ], - "lefthand_kpts": [ - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0 - ], - "righthand_kpts": [ - 565.0, - 153.0, - 0.08773341029882431, - 568.0, - 156.0, - 0.04602484405040741, - 571.0, - 159.0, - 0.04602484405040741, - 573.0, - 161.0, - 0.06972061097621918, - 575.0, - 164.0, - 0.06297813355922699, - 569.0, - 158.0, - 0.294232040643692, - 570.0, - 162.0, - 0.26472434401512146, - 570.0, - 166.0, - 0.2826344072818756, - 571.0, - 171.0, - 0.374575674533844, - 565.0, - 159.0, - 0.2154899388551712, - 566.0, - 162.0, - 0.21613340079784393, - 566.0, - 164.0, - 0.2544613480567932, - 567.0, - 168.0, - 0.31771761178970337, - 562.0, - 160.0, - 0.23286579549312592, - 563.0, - 166.0, - 0.1579097956418991, - 564.0, - 166.0, - 0.17961391806602478, - 564.0, - 166.0, - 0.17504136264324188, - 559.0, - 160.0, - 0.3428754508495331, - 559.0, - 162.0, - 0.2897874116897583, - 561.0, - 165.0, - 0.24125981330871582, - 562.0, - 166.0, - 0.20118576288223267 - ], - "face_kpts": [ - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0 - ], - "face_valid": false, - "lefthand_valid": false, - "righthand_valid": true, - "foot_valid": true, - "foot_kpts": [ - 599.72032, - 264.75714, - 2.0, - 603.91172, - 265.80499, - 2.0, - 585.74897, - 265.10642, - 2.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0 - ] - }, - { - "segmentation": [ - [ - 561.51, - 385.38, - 572.11, - 352.71, - 570.34, - 317.4, - 559.75, - 282.08, - 552.68, - 267.07, - 565.93, - 236.17, - 583.59, - 236.17, - 602.13, - 260.01, - 614.49, - 286.5, - 628.61, - 302.39, - 639.21, - 281.2, - 614.49, - 251.18, - 588, - 218.51, - 595.95, - 202.62, - 594.18, - 185.85, - 580.05, - 170.84, - 562.4, - 179.67, - 557.98, - 198.21, - 554.45, - 202.62, - 532.38, - 199.97, - 525.32, - 202.62, - 511.19, - 229.11, - 493.53, - 256.48, - 484.7, - 276.78, - 451.15, - 323.58, - 423.78, - 338.59, - 388.47, - 373.9, - 372.58, - 387.14, - 396.41, - 388.03, - 418.49, - 367.72, - 450.27, - 345.65, - 501.48, - 306.8, - 520.02, - 301.5, - 552.68, - 340.35, - 543.86, - 369.49 - ] - ], - "num_keypoints": 16, - "area": 14267.20475, - "iscrowd": 0, - "keypoints": [ - 580, - 211, - 2, - 586, - 206, - 2, - 574, - 204, - 2, - 0, - 0, - 0, - 562, - 198, - 2, - 584, - 220, - 2, - 529, - 215, - 2, - 599, - 242, - 2, - 512, - 260, - 2, - 619, - 274, - 2, - 538, - 285, - 2, - 537, - 288, - 2, - 506, - 277, - 2, - 562, - 332, - 2, - 452, - 332, - 2, - 550, - 387, - 1, - 402, - 371, - 2 - ], - "image_id": 197388, - "bbox": [ - 372.58, - 170.84, - 266.63, - 217.19 - ], - "category_id": 1, - "id": 533949, - "face_box": [ - 0.0, - 0.0, - 0.0, - 0.0 - ], - "lefthand_box": [ - 615.22, - 271.56, - 22.139999999999986, - 28.839999999999975 - ], - "righthand_box": [ - 538.83, - 283.74, - 25.639999999999986, - 30.659999999999968 - ], - "lefthand_kpts": [ - 620.284, - 274.54006, - 1, - 621.65135, - 282.30908999999997, - 1, - 623.0187, - 290.07812, - 1, - 625.38048, - 294.55308, - 1, - 628.86101, - 298.90373999999997, - 1, - 630.22836, - 289.20799, - 1, - 634.57901, - 292.43991, - 1, - 633.08736, - 295.54752, - 1, - 628.6124, - 295.42321, - 1, - 632.46584, - 286.5976, - 1, - 631.3, - 291.9, - 1, - 627.7, - 291.6, - 1, - 625.6, - 288.9, - 1, - 633.7, - 284.2, - 1, - 632.3, - 288.0, - 1, - 629.1, - 288.0, - 1, - 627.0, - 285.9, - 1, - 633.2, - 280.4, - 1, - 632.8, - 283.6, - 1, - 630.8, - 284.4, - 1, - 629.1, - 283.2, - 1 - ], - "righthand_kpts": [ - 544.0, - 291.0, - 0.09089653939008713, - 551.0, - 291.0, - 0.041192591190338135, - 558.0, - 291.0, - 0.041192591190338135, - 559.0, - 294.0, - 0.056781601160764694, - 563.0, - 298.0, - 0.2960541546344757, - 559.0, - 296.0, - 0.18105527758598328, - 562.0, - 301.0, - 0.12244582921266556, - 559.0, - 308.0, - 0.05529222637414932, - 564.0, - 306.0, - 0.05997529253363609, - 555.0, - 299.0, - 0.18805834650993347, - 556.0, - 302.0, - 0.1534559577703476, - 555.0, - 306.0, - 0.20564205944538116, - 556.0, - 309.0, - 0.06228385493159294, - 550.0, - 300.0, - 0.1409723311662674, - 550.0, - 301.0, - 0.2223101258277893, - 551.0, - 305.0, - 0.2001882642507553, - 553.0, - 308.0, - 0.1712668538093567, - 545.0, - 302.0, - 0.1908813714981079, - 546.0, - 304.0, - 0.13619276881217957, - 547.0, - 306.0, - 0.19773860275745392, - 549.0, - 308.0, - 0.1341865360736847 - ], - "face_kpts": [ - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0 - ], - "face_valid": false, - "lefthand_valid": true, - "righthand_valid": true, - "foot_valid": true, - "foot_kpts": [ - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 375.00826, - 386.35839, - 2.0, - 399.52454, - 375.91627, - 2.0 - ] - }, - { - "segmentation": [ - [ - 2.03, - 75.18, - 10.85, - 70.58, - 16.99, - 65.59, - 17.75, - 55.24, - 20.05, - 50.25, - 29.64, - 43.74, - 37.31, - 47.57, - 41.52, - 53.7, - 43.83, - 64.82, - 53.03, - 70.19, - 61.85, - 77.09, - 72.58, - 87.06, - 74.88, - 79.01, - 78.72, - 73.64, - 86.39, - 77.86, - 90.6, - 90.13, - 86, - 93.2, - 82.17, - 102.4, - 75.27, - 106.24, - 68.75, - 104.7, - 50.34, - 90.9, - 43.06, - 112.37, - 40.76, - 123.11, - 42.29, - 130.78, - 48.04, - 161.83, - 52.26, - 190.59, - 50.73, - 210.15, - 44.21, - 245.04, - 50.34, - 256.16, - 53.03, - 261.53, - 47.28, - 263.83, - 40.37, - 263.83, - 31.56, - 260.76, - 28.1, - 256.16, - 26.95, - 244.65, - 29.25, - 233.54, - 32.71, - 223.95, - 33.09, - 213.98, - 32.32, - 206.31, - 32.71, - 194.81, - 33.09, - 185.61, - 24.65, - 177.17, - 16.99, - 161.45, - 13.53, - 176.02, - 10.85, - 206.31, - 1.65, - 231.62, - 1.65, - 235.84, - 0.5, - 146.88, - 0.88, - 122.34, - 1.65, - 75.56 - ] - ], - "num_keypoints": 13, - "area": 8260.75085, - "iscrowd": 0, - "keypoints": [ - 36, - 79, - 2, - 40, - 74, - 2, - 31, - 75, - 2, - 0, - 0, - 0, - 19, - 69, - 2, - 45, - 77, - 2, - 2, - 89, - 2, - 74, - 99, - 2, - 0, - 0, - 0, - 78, - 92, - 2, - 0, - 0, - 0, - 33, - 149, - 2, - 7, - 153, - 2, - 44, - 196, - 2, - 2, - 205, - 2, - 35, - 245, - 2, - 0, - 0, - 0 - ], - "image_id": 197388, - "bbox": [ - 0.5, - 43.74, - 90.1, - 220.09 - ], - "category_id": 1, - "id": 543117, - "face_box": [ - 0.0, - 0.0, - 0.0, - 0.0 - ], - "lefthand_box": [ - 0.0, - 0.0, - 0.0, - 0.0 - ], - "righthand_box": [ - 0.0, - 0.0, - 0.0, - 0.0 - ], - "lefthand_kpts": [ - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0 - ], - "righthand_kpts": [ - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0 - ], - "face_kpts": [ - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0 - ], - "face_valid": false, - "lefthand_valid": false, - "righthand_valid": false, - "foot_valid": true, - "foot_kpts": [ - 43.80826, - 259.40011, - 2.0, - 48.63752, - 257.67537, - 2.0, - 32.08007, - 256.29558, - 2.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0 - ] - } - ] -} +{ + "info": { + "description": "COCO-WholeBody sample", + "url": "https://github.com/jin-s13/COCO-WholeBody", + "version": "1.0", + "year": "2020", + "date_created": "2020/09/18" + }, + "licenses": [ + { + "url": "http://creativecommons.org/licenses/by-nc-sa/2.0/", + "id": 1, + "name": "Attribution-NonCommercial-ShareAlike License" + }, + { + "url": "http://creativecommons.org/licenses/by-nc/2.0/", + "id": 2, + "name": "Attribution-NonCommercial License" + }, + { + "url": "http://creativecommons.org/licenses/by-nc-nd/2.0/", + "id": 3, + "name": "Attribution-NonCommercial-NoDerivs License" + }, + { + "url": "http://creativecommons.org/licenses/by/2.0/", + "id": 4, + "name": "Attribution License" + }, + { + "url": "http://creativecommons.org/licenses/by-sa/2.0/", + "id": 5, + "name": "Attribution-ShareAlike License" + }, + { + "url": "http://creativecommons.org/licenses/by-nd/2.0/", + "id": 6, + "name": "Attribution-NoDerivs License" + }, + { + "url": "http://flickr.com/commons/usage/", + "id": 7, + "name": "No known copyright restrictions" + }, + { + "url": "http://www.usa.gov/copyright.shtml", + "id": 8, + "name": "United States Government Work" + } + ], + "categories": [ + { + "supercategory": "person", + "id": 1, + "name": "person", + "keypoints": [ + "nose", + "left_eye", + "right_eye", + "left_ear", + "right_ear", + "left_shoulder", + "right_shoulder", + "left_elbow", + "right_elbow", + "left_wrist", + "right_wrist", + "left_hip", + "right_hip", + "left_knee", + "right_knee", + "left_ankle", + "right_ankle" + ], + "skeleton": [ + [ + 16, + 14 + ], + [ + 14, + 12 + ], + [ + 17, + 15 + ], + [ + 15, + 13 + ], + [ + 12, + 13 + ], + [ + 6, + 12 + ], + [ + 7, + 13 + ], + [ + 6, + 7 + ], + [ + 6, + 8 + ], + [ + 7, + 9 + ], + [ + 8, + 10 + ], + [ + 9, + 11 + ], + [ + 2, + 3 + ], + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 4 + ], + [ + 3, + 5 + ], + [ + 4, + 6 + ], + [ + 5, + 7 + ] + ] + } + ], + "images": [ + { + "license": 4, + "file_name": "000000000785.jpg", + "coco_url": "http://images.cocodataset.org/val2017/000000000785.jpg", + "height": 425, + "width": 640, + "date_captured": "2013-11-19 21:22:42", + "flickr_url": "http://farm8.staticflickr.com/7015/6795644157_f019453ae7_z.jpg", + "id": 785 + }, + { + "license": 3, + "file_name": "000000040083.jpg", + "coco_url": "http://images.cocodataset.org/val2017/000000040083.jpg", + "height": 333, + "width": 500, + "date_captured": "2013-11-18 03:30:24", + "flickr_url": "http://farm1.staticflickr.com/116/254881838_e21c6d17b8_z.jpg", + "id": 40083 + }, + { + "license": 1, + "file_name": "000000196141.jpg", + "coco_url": "http://images.cocodataset.org/val2017/000000196141.jpg", + "height": 429, + "width": 640, + "date_captured": "2013-11-22 22:37:15", + "flickr_url": "http://farm4.staticflickr.com/3310/3611902235_57d4ae496d_z.jpg", + "id": 196141 + }, + { + "license": 3, + "file_name": "000000197388.jpg", + "coco_url": "http://images.cocodataset.org/val2017/000000197388.jpg", + "height": 392, + "width": 640, + "date_captured": "2013-11-19 20:10:37", + "flickr_url": "http://farm9.staticflickr.com/8375/8507321836_5b8b13188f_z.jpg", + "id": 197388 + } + ], + "annotations": [ + { + "segmentation": [ + [ + 353.37, + 67.65, + 358.15, + 52.37, + 362.92, + 47.59, + 374.38, + 44.73, + 389.66, + 52.37, + 389.66, + 67.65, + 389.66, + 76.25, + 393.48, + 83.89, + 396.35, + 88.66, + 397.3, + 91.53, + 406.85, + 99.17, + 413.54, + 104.9, + 451.74, + 148.83, + 458.43, + 153.6, + 462.25, + 166.02, + 467.02, + 173.66, + 463.2, + 181.3, + 449.83, + 183.21, + 448.88, + 191.81, + 455.56, + 226.19, + 448.88, + 254.84, + 453.65, + 286.36, + 475.62, + 323.6, + 491.85, + 361.81, + 494.72, + 382.82, + 494.72, + 382.82, + 499.49, + 391.41, + 416.4, + 391.41, + 424.04, + 383.77, + 439.33, + 374.22, + 445.06, + 360.85, + 436.46, + 334.11, + 421.18, + 303.55, + 416.4, + 289.22, + 409.72, + 268.21, + 396.35, + 280.63, + 405.9, + 298.77, + 417.36, + 324.56, + 425, + 349.39, + 425, + 357.99, + 419.27, + 360.85, + 394.44, + 367.54, + 362.92, + 370.4, + 346.69, + 367.54, + 360.06, + 362.76, + 369.61, + 360.85, + 382.98, + 340.8, + 355.28, + 271.08, + 360.06, + 266.3, + 386.8, + 219.5, + 368.65, + 162.2, + 348.6, + 175.57, + 309.44, + 187.03, + 301.8, + 192.76, + 288.43, + 193.72, + 282.7, + 193.72, + 280.79, + 187.03, + 280.79, + 174.62, + 287.47, + 171.75, + 291.29, + 171.75, + 295.11, + 171.75, + 306.57, + 166.98, + 312.3, + 165.07, + 345.73, + 142.14, + 350.51, + 117.31, + 350.51, + 102.03, + 350.51, + 90.57, + 353.37, + 65.74 + ] + ], + "num_keypoints": 17, + "area": 27789.11055, + "iscrowd": 0, + "keypoints": [ + 367, + 81, + 2, + 374, + 73, + 2, + 360, + 75, + 2, + 386, + 78, + 2, + 356, + 81, + 2, + 399, + 108, + 2, + 358, + 129, + 2, + 433, + 142, + 2, + 341, + 159, + 2, + 449, + 165, + 2, + 309, + 178, + 2, + 424, + 203, + 2, + 393, + 214, + 2, + 429, + 294, + 2, + 367, + 273, + 2, + 466, + 362, + 2, + 396, + 341, + 2 + ], + "image_id": 785, + "bbox": [ + 280.79, + 44.73, + 218.7, + 346.68 + ], + "category_id": 1, + "id": 442619, + "face_box": [ + 358.2, + 69.86, + 26.360000000000014, + 25.849999999999994 + ], + "lefthand_box": [ + 0.0, + 0.0, + 0.0, + 0.0 + ], + "righthand_box": [ + 280.43, + 173.12, + 27.860000000000014, + 24.849999999999994 + ], + "lefthand_kpts": [ + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ], + "righthand_kpts": [ + 304.10366, + 181.75134, + 1, + 300.70183, + 182.77567, + 1, + 297.3, + 183.8, + 1, + 294.7, + 186.5, + 1, + 290.1, + 187.8, + 1, + 290.9, + 176.6, + 1, + 287.5, + 176.0, + 1, + 285.5, + 178.4, + 1, + 286.4, + 182.4, + 1, + 288.8, + 179.4, + 1, + 285.0, + 181.0, + 1, + 287.3, + 186.1, + 1, + 291.8, + 189.5, + 1, + 287.7, + 182.7, + 1, + 283.8, + 184.1, + 1, + 286.5, + 189.1, + 1, + 290.0, + 192.0, + 1, + 286.7, + 185.3, + 1, + 282.8, + 187.4, + 1, + 284.8, + 191.6, + 1, + 288.4, + 194.5, + 1 + ], + "face_kpts": [ + 355.823, + 75.36, + 1.0, + 356.354, + 79.0837, + 1.0, + 357.244, + 82.7374, + 1.0, + 358.518, + 86.2722, + 1.0, + 360.146, + 89.6578, + 1.0, + 362.266, + 92.7538, + 1.0, + 365.004, + 95.3223, + 1.0, + 368.487, + 96.6454, + 1.0, + 372.191, + 96.1419, + 1.0, + 375.644, + 94.6832, + 1.0, + 378.601, + 92.3665, + 1.0, + 381.101, + 89.5662, + 1.0, + 382.903, + 86.2741, + 1.0, + 383.896, + 82.6509, + 1.0, + 384.075, + 78.9011, + 1.0, + 384.1, + 75.1408, + 1.0, + 383.903, + 71.3861, + 1.0, + 357.084, + 72.9743, + 1.0, + 358.602, + 71.7848, + 1.0, + 360.42, + 71.3443, + 1.0, + 362.377, + 71.1566, + 1.0, + 364.36, + 71.1889, + 1.0, + 368.971, + 70.4992, + 1.0, + 370.945, + 69.8179, + 1.0, + 373.001, + 69.3543, + 1.0, + 375.14, + 69.2666, + 1.0, + 377.358, + 69.8865, + 1.0, + 366.57, + 73.9588, + 1.0, + 366.734, + 76.1499, + 1.0, + 366.88, + 78.3018, + 1.0, + 366.99, + 80.4957, + 1.0, + 365.104, + 82.5589, + 1.0, + 366.308, + 82.8331, + 1.0, + 367.645, + 82.8037, + 1.0, + 369.172, + 82.2061, + 1.0, + 370.693, + 81.6521, + 1.0, + 358.705, + 75.4542, + 1.0, + 360.294, + 74.0903, + 1.0, + 362.376, + 73.8423, + 1.0, + 364.302, + 74.6834, + 1.0, + 362.543, + 75.568, + 1.0, + 360.612, + 75.8883, + 1.0, + 369.771, + 73.7734, + 1.0, + 371.409, + 72.2638, + 1.0, + 373.615, + 71.9502, + 1.0, + 375.722, + 72.7144, + 1.0, + 373.888, + 73.699, + 1.0, + 371.835, + 74.0238, + 1.0, + 363.184, + 86.9317, + 1.0, + 364.788, + 85.4484, + 1.0, + 367.021, + 84.7474, + 1.0, + 368.048, + 84.5364, + 1.0, + 369.083, + 84.3709, + 1.0, + 372.183, + 84.0529, + 1.0, + 375.083, + 84.8901, + 1.0, + 373.687, + 87.0735, + 1.0, + 371.644, + 88.8121, + 1.0, + 369.024, + 89.6982, + 1.0, + 366.67, + 89.6039, + 1.0, + 364.721, + 88.606, + 1.0, + 363.588, + 86.903, + 1.0, + 365.723, + 85.8496, + 1.0, + 368.184, + 85.2863, + 1.0, + 371.444, + 84.8294, + 1.0, + 374.647, + 85.0454, + 1.0, + 372.166, + 87.2914, + 1.0, + 368.81, + 88.3791, + 1.0, + 365.965, + 88.3238, + 1.0 + ], + "face_valid": true, + "lefthand_valid": false, + "righthand_valid": true, + "foot_valid": true, + "foot_kpts": [ + 439, + 378, + 2, + 446, + 380, + 2, + 479, + 370, + 2, + 377, + 359, + 2, + 376, + 358, + 2, + 413, + 353, + 2 + ] + }, + { + "segmentation": [ + [ + 98.56, + 273.72, + 132.9, + 267, + 140.37, + 281.93, + 165.75, + 285.66, + 156.79, + 264.01, + 170.23, + 261.02, + 177.7, + 272.97, + 182.18, + 279.69, + 200.85, + 268.49, + 212.79, + 255.05, + 188.9, + 256.54, + 164.26, + 240.12, + 139.62, + 212.49, + 109.01, + 221.45, + 103.04, + 220.71, + 122.45, + 202.04, + 113.49, + 196.07, + 96.32, + 168.44, + 97.06, + 162.47, + 110.5, + 136.34, + 112, + 124.39, + 91.09, + 110.95, + 80.64, + 114.68, + 71.68, + 131.86, + 62.72, + 147.54, + 57.49, + 156.5, + 48.53, + 168.44, + 41.07, + 180.39, + 38.08, + 193.08, + 40.32, + 205.03, + 47.04, + 213.24, + 54.5, + 216.23, + 82.13, + 252.06, + 91.09, + 271.48 + ] + ], + "num_keypoints": 14, + "area": 11025.219, + "iscrowd": 0, + "keypoints": [ + 99, + 144, + 2, + 104, + 141, + 2, + 96, + 137, + 2, + 0, + 0, + 0, + 78, + 133, + 2, + 56, + 161, + 2, + 81, + 162, + 2, + 0, + 0, + 0, + 103, + 208, + 2, + 116, + 204, + 2, + 0, + 0, + 0, + 57, + 246, + 1, + 82, + 259, + 1, + 137, + 219, + 2, + 138, + 247, + 2, + 177, + 256, + 2, + 158, + 296, + 1 + ], + "image_id": 40083, + "bbox": [ + 38.08, + 110.95, + 174.71, + 174.71 + ], + "category_id": 1, + "id": 198196, + "face_box": [ + 79.19, + 131.64, + 29.290000000000006, + 28.480000000000018 + ], + "lefthand_box": [ + 104.83, + 196.48, + 16.400000000000006, + 15.810000000000002 + ], + "righthand_box": [ + 0.0, + 0.0, + 0.0, + 0.0 + ], + "lefthand_kpts": [ + 109.88978, + 204.46047, + 1, + 113.101195, + 201.939065, + 1, + 116.31261, + 199.41766, + 1, + 113.19977, + 199.3139, + 1, + 109.8794, + 200.24775, + 1, + 117.86903, + 199.10638, + 2, + 113.9261, + 199.00262, + 2, + 109.56812, + 198.48381, + 2, + 106.6628, + 198.38004999999998, + 1, + 117.1427, + 202.32298, + 2, + 111.2283, + 201.80417, + 2, + 107.07784000000001, + 201.38913, + 2, + 103.65371999999999, + 201.18161, + 1, + 116.52013, + 205.95463, + 2, + 112.5772, + 205.53958, + 2, + 107.59665, + 204.39821, + 2, + 104.27629, + 203.77564, + 2, + 116.41637, + 209.69004, + 2, + 112.16215, + 209.48252, + 2, + 108.73803000000001, + 208.34114, + 2, + 105.72895, + 206.68096, + 2 + ], + "righthand_kpts": [ + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ], + "face_kpts": [ + 82.9654, + 131.144, + 1.0, + 81.8046, + 134.328, + 1.0, + 80.7007, + 137.531, + 1.0, + 79.8836, + 140.818, + 1.0, + 79.734, + 144.196, + 1.0, + 80.4763, + 147.486, + 1.0, + 82.0188, + 150.498, + 1.0, + 84.2352, + 153.057, + 1.0, + 86.8081, + 155.258, + 1.0, + 89.652, + 157.095, + 1.0, + 92.9128, + 157.812, + 1.0, + 95.962, + 156.474, + 1.0, + 98.5377, + 154.281, + 1.0, + 100.557, + 151.568, + 1.0, + 102.508, + 148.799, + 1.0, + 103.987, + 145.756, + 1.0, + 105.345, + 142.655, + 1.0, + 93.6074, + 132.13, + 1.0, + 95.8108, + 132.112, + 1.0, + 97.7956, + 132.618, + 1.0, + 99.6897, + 133.398, + 1.0, + 101.364, + 134.432, + 1.0, + 105.0, + 136.896, + 1.0, + 105.708, + 137.334, + 1.0, + 106.267, + 137.852, + 1.0, + 106.759, + 138.404, + 1.0, + 107.013, + 139.401, + 1.0, + 100.904, + 139.994, + 1.0, + 100.551, + 142.0, + 1.0, + 100.202, + 143.956, + 1.0, + 99.8116, + 145.919, + 1.0, + 94.7941, + 146.187, + 1.0, + 95.9823, + 147.027, + 1.0, + 97.3054, + 147.849, + 1.0, + 98.2362, + 148.403, + 1.0, + 99.2812, + 148.491, + 1.0, + 93.151, + 135.98, + 1.0, + 94.9184, + 136.187, + 1.0, + 96.5441, + 136.903, + 1.0, + 97.6034, + 138.308, + 1.0, + 95.8998, + 138.017, + 1.0, + 94.3941, + 137.178, + 1.0, + 102.085, + 141.003, + 1.0, + 103.379, + 141.05, + 1.0, + 104.485, + 141.71, + 1.0, + 104.899, + 142.915, + 1.0, + 103.704, + 142.739, + 1.0, + 102.729, + 142.026, + 1.0, + 89.8433, + 148.685, + 1.0, + 92.6494, + 149.006, + 1.0, + 95.2801, + 149.78, + 1.0, + 96.1096, + 150.259, + 1.0, + 96.7411, + 150.719, + 1.0, + 97.3853, + 151.82, + 1.0, + 97.337, + 153.217, + 1.0, + 96.5124, + 153.108, + 1.0, + 95.6091, + 152.796, + 1.0, + 94.7518, + 152.399, + 1.0, + 93.0313, + 151.317, + 1.0, + 91.3461, + 150.149, + 1.0, + 90.24, + 148.802, + 1.0, + 92.9121, + 149.883, + 1.0, + 95.4213, + 151.204, + 1.0, + 96.3082, + 152.03, + 1.0, + 97.1377, + 152.997, + 1.0, + 96.3098, + 152.035, + 1.0, + 95.406, + 151.234, + 1.0, + 92.8725, + 149.984, + 1.0 + ], + "face_valid": true, + "lefthand_valid": true, + "righthand_valid": false, + "foot_valid": true, + "foot_kpts": [ + 208.16049, + 257.42419, + 2.0, + 205.8824, + 259.13276, + 2.0, + 183.38626, + 275.93367, + 2.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ] + }, + { + "segmentation": [ + [ + 257.76, + 288.05, + 273.4, + 258.26, + 325.55, + 253.79, + 335.23, + 232.93, + 326.3, + 186.74, + 333.74, + 177.05, + 327.79, + 153.21, + 333.74, + 142.04, + 344.17, + 139.06, + 353.11, + 139.06, + 359.07, + 145.02, + 360.56, + 148.74, + 362.05, + 168.86, + 388.87, + 197.17, + 397.81, + 276.88, + 372.48, + 293.27 + ] + ], + "num_keypoints": 15, + "area": 10171.9544, + "iscrowd": 0, + "keypoints": [ + 343, + 164, + 2, + 348, + 160, + 2, + 340, + 160, + 2, + 359, + 163, + 2, + 332, + 164, + 2, + 370, + 189, + 2, + 334, + 190, + 2, + 358, + 236, + 2, + 348, + 234, + 2, + 339, + 270, + 2, + 330, + 262, + 2, + 378, + 262, + 2, + 343, + 254, + 2, + 338, + 280, + 2, + 283, + 272, + 2, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "image_id": 40083, + "bbox": [ + 257.76, + 139.06, + 140.05, + 154.21 + ], + "category_id": 1, + "id": 230195, + "face_box": [ + 333.96, + 154.32, + 23.28000000000003, + 26.79000000000002 + ], + "lefthand_box": [ + 0.0, + 0.0, + 0.0, + 0.0 + ], + "righthand_box": [ + 0.0, + 0.0, + 0.0, + 0.0 + ], + "lefthand_kpts": [ + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ], + "righthand_kpts": [ + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ], + "face_kpts": [ + 333.383, + 160.62, + 1.0, + 333.607, + 163.811, + 1.0, + 334.137, + 166.965, + 1.0, + 334.934, + 170.062, + 1.0, + 336.036, + 173.062, + 1.0, + 337.69, + 175.794, + 1.0, + 340.01, + 177.986, + 1.0, + 342.889, + 179.347, + 1.0, + 346.063, + 179.445, + 1.0, + 349.16, + 178.674, + 1.0, + 351.892, + 177.033, + 1.0, + 354.132, + 174.761, + 1.0, + 355.652, + 171.957, + 1.0, + 356.482, + 168.871, + 1.0, + 356.751, + 165.691, + 1.0, + 356.914, + 162.496, + 1.0, + 356.913, + 159.299, + 1.0, + 335.435, + 157.491, + 1.0, + 336.759, + 156.383, + 1.0, + 338.264, + 155.821, + 1.0, + 339.903, + 155.445, + 1.0, + 341.565, + 155.312, + 1.0, + 345.805, + 155.039, + 1.0, + 347.424, + 154.896, + 1.0, + 349.044, + 154.957, + 1.0, + 350.677, + 155.266, + 1.0, + 352.333, + 156.08, + 1.0, + 343.65, + 159.186, + 1.0, + 343.687, + 161.041, + 1.0, + 343.68, + 162.886, + 1.0, + 343.657, + 164.752, + 1.0, + 341.61, + 167.049, + 1.0, + 342.69, + 167.145, + 1.0, + 343.906, + 167.123, + 1.0, + 345.179, + 166.907, + 1.0, + 346.456, + 166.707, + 1.0, + 336.707, + 159.932, + 1.0, + 338.078, + 158.999, + 1.0, + 339.726, + 158.864, + 1.0, + 341.204, + 159.605, + 1.0, + 339.755, + 160.185, + 1.0, + 338.21, + 160.321, + 1.0, + 346.612, + 159.27, + 1.0, + 348.028, + 158.307, + 1.0, + 349.739, + 158.245, + 1.0, + 351.302, + 158.965, + 1.0, + 349.802, + 159.575, + 1.0, + 348.188, + 159.642, + 1.0, + 340.049, + 171.873, + 1.0, + 341.307, + 170.304, + 1.0, + 343.097, + 169.499, + 1.0, + 343.987, + 169.41, + 1.0, + 344.876, + 169.314, + 1.0, + 346.909, + 169.61, + 1.0, + 348.603, + 170.874, + 1.0, + 347.548, + 172.219, + 1.0, + 346.133, + 173.242, + 1.0, + 344.378, + 173.742, + 1.0, + 342.683, + 173.666, + 1.0, + 341.218, + 173.038, + 1.0, + 340.398, + 171.815, + 1.0, + 342.1, + 170.752, + 1.0, + 344.043, + 170.287, + 1.0, + 346.21, + 170.271, + 1.0, + 348.214, + 170.913, + 1.0, + 346.462, + 171.947, + 1.0, + 344.283, + 172.468, + 1.0, + 342.246, + 172.507, + 1.0 + ], + "face_valid": true, + "lefthand_valid": false, + "righthand_valid": false, + "foot_valid": false, + "foot_kpts": [ + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ] + }, + { + "segmentation": [ + [ + 285.37, + 126.5, + 281.97, + 127.72, + 280.76, + 132.33, + 280.76, + 136.46, + 275.17, + 143.26, + 275.9, + 158.08, + 277.6, + 164.4, + 278.33, + 173.87, + 278.33, + 183.83, + 279.79, + 191.11, + 281.97, + 194.76, + 284.89, + 192.09, + 284.89, + 186.99, + 284.89, + 181.16, + 284.64, + 177.51, + 285.86, + 173.87 + ] + ], + "num_keypoints": 0, + "area": 491.2669, + "iscrowd": 0, + "keypoints": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "image_id": 40083, + "bbox": [ + 275.17, + 126.5, + 10.69, + 68.26 + ], + "category_id": 1, + "id": 1202706, + "face_box": [ + 0.0, + 0.0, + 0.0, + 0.0 + ], + "lefthand_box": [ + 0.0, + 0.0, + 0.0, + 0.0 + ], + "righthand_box": [ + 0.0, + 0.0, + 0.0, + 0.0 + ], + "lefthand_kpts": [ + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ], + "righthand_kpts": [ + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ], + "face_kpts": [ + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ], + "face_valid": false, + "lefthand_valid": false, + "righthand_valid": false, + "foot_valid": false, + "foot_kpts": [ + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ] + }, + { + "segmentation": [ + [ + 339.34, + 107.97, + 338.38, + 102.19, + 339.34, + 91.58, + 335.49, + 84.84, + 326.81, + 74.23, + 312.35, + 74.23, + 301.75, + 74.23, + 295, + 86.76, + 295, + 93.51, + 292.11, + 99.3, + 287.29, + 102.19, + 291.14, + 107.01, + 295, + 107.01, + 295.96, + 112.79, + 301.75, + 115.69, + 305.6, + 119.54, + 307.53, + 123.4, + 317.17, + 123.4, + 311.39, + 129.18, + 286.32, + 139.79, + 274.75, + 139.79, + 264.15, + 138.82, + 262.22, + 144.61, + 261.26, + 147.5, + 253.54, + 147.5, + 247.76, + 150.39, + 249.69, + 159.07, + 256.44, + 161, + 262.22, + 161, + 268, + 161, + 276.68, + 161.96, + 284.39, + 168.71, + 293.07, + 174.49, + 301.75, + 174.49, + 308.49, + 169.67, + 308.49, + 188.95, + 311.39, + 194.74, + 312.35, + 208.23, + 307.53, + 221.73, + 297.89, + 229.44, + 281.5, + 250.65, + 269.93, + 262.22, + 278.61, + 320.06, + 281.5, + 331.63, + 276.68, + 338.38, + 270.9, + 349.95, + 262.22, + 356.7, + 253.54, + 359.59, + 253.54, + 365.37, + 274.75, + 365.37, + 291.14, + 365.37, + 306.57, + 359.59, + 303.67, + 352.84, + 297.89, + 340.31, + 293.07, + 318.13, + 295, + 294.03, + 293.07, + 278.61, + 294.03, + 270.9, + 305.6, + 259.33, + 313.31, + 299.82, + 319.1, + 309.46, + 341.27, + 317.17, + 384.65, + 330.67, + 387.55, + 335.49, + 383.69, + 341.27, + 397.19, + 350.91, + 398.15, + 363.44, + 398.15, + 375.01, + 405.86, + 374.05, + 409.72, + 357.66, + 411.65, + 342.24, + 416.47, + 328.74, + 417.43, + 321.03, + 410.68, + 319.1, + 401.04, + 318.13, + 392.37, + 318.13, + 382.73, + 314.28, + 348.98, + 300.78, + 339.34, + 293.07, + 334.52, + 285.36, + 340.31, + 259.33, + 340.31, + 246.8, + 340.31, + 242.94, + 350.91, + 228.48, + 358.62, + 214.98, + 355.22, + 204.32, + 357.05, + 196.11, + 361.61, + 188.82, + 361.61, + 181.97, + 365.26, + 165.63, + 367.54, + 139.18, + 366.17, + 123.68, + 361.15, + 112.73, + 353.86, + 107.72, + 351.58, + 105.89, + 344.74, + 105.89, + 340.18, + 109.08 + ] + ], + "num_keypoints": 15, + "area": 17123.92955, + "iscrowd": 0, + "keypoints": [ + 297, + 111, + 2, + 299, + 106, + 2, + 0, + 0, + 0, + 314, + 108, + 2, + 0, + 0, + 0, + 329, + 141, + 2, + 346, + 125, + 2, + 295, + 164, + 2, + 323, + 130, + 2, + 266, + 155, + 2, + 279, + 143, + 2, + 329, + 225, + 2, + 331, + 221, + 2, + 327, + 298, + 2, + 283, + 269, + 2, + 398, + 327, + 2, + 288, + 349, + 2 + ], + "image_id": 196141, + "bbox": [ + 247.76, + 74.23, + 169.67, + 300.78 + ], + "category_id": 1, + "id": 460541, + "face_box": [ + 0.0, + 0.0, + 0.0, + 0.0 + ], + "lefthand_box": [ + 249.12, + 146.31, + 19.920000000000016, + 15.819999999999993 + ], + "righthand_box": [ + 262.82, + 139.96, + 18.930000000000007, + 14.679999999999978 + ], + "lefthand_kpts": [ + 265.1, + 155.9, + 1, + 260.05, + 152.25, + 1, + 255.0, + 148.6, + 1, + 250.6, + 148.6, + 1, + 249.1, + 151.0, + 1, + 253.4, + 158.9, + 1, + 251.9, + 155.1, + 1, + 252.0, + 151.9, + 1, + 252.9, + 150.0, + 1, + 257.4, + 157.9, + 1, + 256.7, + 154.2, + 1, + 256.3, + 151.6, + 1, + 256.9, + 149.3, + 1, + 260.2, + 156.5, + 1, + 260.1, + 153.0, + 1, + 259.9, + 150.7, + 1, + 260.2, + 148.7, + 1, + 262.8, + 154.8, + 1, + 262.7, + 152.5, + 1, + 262.7, + 150.9, + 1, + 262.6, + 148.8, + 1 + ], + "righthand_kpts": [ + 280.8, + 146.5, + 1, + 275.4, + 149.15, + 1, + 270.0, + 151.8, + 1, + 266.2, + 152.2, + 1, + 263.5, + 151.9, + 1, + 266.6, + 142.5, + 1, + 263.6, + 147.0, + 1, + 264.9, + 151.0, + 1, + 268.5, + 152.9, + 1, + 270.6, + 142.0, + 1, + 267.9, + 146.0, + 1, + 269.4, + 149.6, + 1, + 272.5, + 151.5, + 1, + 273.8, + 142.1, + 1, + 272.2, + 146.0, + 1, + 274.2, + 149.1, + 1, + 276.5, + 149.6, + 1, + 277.4, + 142.3, + 1, + 276.6, + 145.2, + 1, + 277.6, + 148.3, + 1, + 279.4, + 148.6, + 1 + ], + "face_kpts": [ + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ], + "face_valid": false, + "lefthand_valid": true, + "righthand_valid": true, + "foot_valid": true, + "foot_kpts": [ + 401.79499, + 364.28207, + 2.0, + 407.21854, + 361.57029, + 2.0, + 407.21854, + 325.86523, + 2.0, + 257.16687, + 361.57029, + 2.0, + 258.52276, + 361.11833, + 2.0, + 297.84353, + 355.69477, + 2.0 + ] + }, + { + "segmentation": [ + [ + 578.76, + 112.4, + 589.39, + 100.81, + 589.39, + 99.84, + 596.16, + 116.27, + 603.89, + 122.07, + 603.89, + 138.49, + 598.09, + 159.75, + 597.12, + 181, + 594.22, + 191.63, + 589.39, + 212.89, + 583.59, + 208.06, + 583.59, + 206.13, + 582.63, + 200.33, + 582.63, + 193.57, + 582.63, + 182.94, + 575.86, + 181, + 567.17, + 197.43, + 571.03, + 203.23, + 567.17, + 207.09, + 555.57, + 208.06, + 562.34, + 200.33, + 565.24, + 190.67, + 565.24, + 173.27, + 566.2, + 163.61, + 568.14, + 156.85, + 570.07, + 148.15, + 566.2, + 143.32, + 565.24, + 133.66, + 575.86, + 118.2 + ] + ], + "num_keypoints": 15, + "area": 2789.0208, + "iscrowd": 0, + "keypoints": [ + 589, + 113, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 595, + 112, + 1, + 584, + 110, + 2, + 598, + 123, + 2, + 579, + 119, + 2, + 594, + 141, + 2, + 570, + 137, + 2, + 576, + 135, + 2, + 585, + 139, + 2, + 590, + 157, + 2, + 574, + 156, + 2, + 589, + 192, + 2, + 565, + 189, + 1, + 587, + 222, + 1, + 557, + 219, + 1 + ], + "image_id": 196141, + "bbox": [ + 555.57, + 99.84, + 48.32, + 113.05 + ], + "category_id": 1, + "id": 488308, + "face_box": [ + 0.0, + 0.0, + 0.0, + 0.0 + ], + "lefthand_box": [ + 568.2, + 130.89, + 10.75, + 11.130000000000024 + ], + "righthand_box": [ + 0.0, + 0.0, + 0.0, + 0.0 + ], + "lefthand_kpts": [ + 578.8, + 135.7, + 2, + 577.55, + 134.35, + 2, + 576.3, + 133.0, + 1, + 574.6, + 134.1, + 1, + 574.0, + 135.5, + 1, + 574.3, + 132.9, + 2, + 572.0, + 132.4, + 2, + 570.3, + 131.8, + 2, + 568.9, + 130.7, + 2, + 573.3, + 134.4, + 2, + 570.9, + 134.0, + 2, + 569.5, + 133.9, + 2, + 568.2, + 133.8, + 2, + 572.8, + 135.7, + 2, + 572.6, + 138.3, + 2, + 574.1, + 139.4, + 2, + 576.2, + 139.4, + 1, + 574.4, + 138.0, + 2, + 575.4, + 139.5, + 2, + 576.3, + 140.2, + 2, + 577.6, + 140.8, + 2 + ], + "righthand_kpts": [ + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ], + "face_kpts": [ + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ], + "face_valid": false, + "lefthand_valid": true, + "righthand_valid": false, + "foot_valid": false, + "foot_kpts": [ + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ] + }, + { + "segmentation": [ + [ + 446.96, + 73.13, + 445.81, + 77.71, + 443.33, + 78.29, + 441.61, + 81.72, + 441.23, + 84.58, + 440.85, + 90.5, + 442.19, + 94.32, + 443.52, + 97.18, + 443.52, + 102.33, + 442.57, + 105.58, + 446.58, + 105.19, + 447.15, + 99.85, + 447.53, + 94.89, + 446, + 93.55, + 446.38, + 92.03, + 453.64, + 92.41, + 454.02, + 94.51, + 457.64, + 94.51, + 455.74, + 88.4, + 455.35, + 82.29, + 453.64, + 78.48, + 451.92, + 77.71, + 452.87, + 74.47, + 450.58, + 73.13 + ] + ], + "num_keypoints": 0, + "area": 285.7906, + "iscrowd": 0, + "keypoints": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "image_id": 196141, + "bbox": [ + 440.85, + 73.13, + 16.79, + 32.45 + ], + "category_id": 1, + "id": 508900, + "face_box": [ + 0.0, + 0.0, + 0.0, + 0.0 + ], + "lefthand_box": [ + 0.0, + 0.0, + 0.0, + 0.0 + ], + "righthand_box": [ + 0.0, + 0.0, + 0.0, + 0.0 + ], + "lefthand_kpts": [ + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ], + "righthand_kpts": [ + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ], + "face_kpts": [ + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ], + "face_valid": false, + "lefthand_valid": false, + "righthand_valid": false, + "foot_valid": false, + "foot_kpts": [ + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ] + }, + { + "segmentation": [ + [ + 497.15, + 413.95, + 531.55, + 417.68, + 548.74, + 411.7, + 551.74, + 403.48, + 546.5, + 394.5, + 543.51, + 386.28, + 571.93, + 390.76, + 574.92, + 391.51, + 579.4, + 409.46, + 605.58, + 409.46, + 615.3, + 408.71, + 607.07, + 389.27, + 598.1, + 381.79, + 607.82, + 366.83, + 607.82, + 352.63, + 610.06, + 338.42, + 619.04, + 345.15, + 631, + 344.4, + 630.25, + 336.92, + 626.51, + 318.98, + 616.05, + 286.07, + 598.85, + 263.64, + 585.39, + 257.66, + 593.61, + 244.2, + 601.09, + 235.97, + 596.6, + 219.52, + 587.63, + 211.29, + 577.91, + 208.3, + 563.7, + 206.81, + 556.22, + 214.29, + 548, + 217.28, + 539.77, + 229.99, + 539.77, + 241.95, + 539.02, + 247.19, + 523.32, + 247.19, + 503.88, + 254.67, + 485.93, + 254.67, + 479.95, + 248.68, + 473.22, + 241.21, + 485.93, + 227, + 477.7, + 215.78, + 457.51, + 215.78, + 453.77, + 235.22, + 463.5, + 246.44, + 465.74, + 261.4, + 490.42, + 274.11, + 501.63, + 275.6, + 504.62, + 286.07, + 519.58, + 286.07, + 522.57, + 292.06, + 512.85, + 310, + 515.09, + 330.94, + 530.05, + 343.65, + 505.37, + 341.41, + 479.95, + 339.91, + 465.74, + 346.64, + 463.5, + 358.61, + 473.97, + 381.04, + 485.18, + 390.02, + 501.63, + 398.99, + 504.62, + 404.22, + 491.16, + 412.45, + 495.65, + 417.68 + ] + ], + "num_keypoints": 12, + "area": 21608.94075, + "iscrowd": 0, + "keypoints": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 552, + 234, + 2, + 0, + 0, + 0, + 531, + 262, + 2, + 600, + 283, + 2, + 480, + 260, + 2, + 622, + 336, + 2, + 466, + 242, + 2, + 0, + 0, + 0, + 546, + 365, + 2, + 592, + 371, + 2, + 470, + 351, + 2, + 551, + 330, + 2, + 519, + 394, + 2, + 589, + 391, + 2 + ], + "image_id": 196141, + "bbox": [ + 453.77, + 206.81, + 177.23, + 210.87 + ], + "category_id": 1, + "id": 1717641, + "face_box": [ + 0.0, + 0.0, + 0.0, + 0.0 + ], + "lefthand_box": [ + 0.0, + 0.0, + 0.0, + 0.0 + ], + "righthand_box": [ + 0.0, + 0.0, + 0.0, + 0.0 + ], + "lefthand_kpts": [ + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ], + "righthand_kpts": [ + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ], + "face_kpts": [ + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ], + "face_valid": false, + "lefthand_valid": false, + "righthand_valid": false, + "foot_valid": true, + "foot_kpts": [ + 0.0, + 0.0, + 0.0, + 498.08009, + 412.23863, + 2.0, + 541.66626, + 400.39384, + 2.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 602.22109, + 403.58794, + 2.0 + ] + }, + { + "segmentation": [ + [ + 58.93, + 163.67, + 47.18, + 161.59, + 36.12, + 93.86, + 41.65, + 82.8, + 40.27, + 69.66, + 50.64, + 67.59, + 55.48, + 73.81, + 63.08, + 92.47, + 66.53, + 99.38, + 65.15, + 109.06, + 61, + 127.03, + 59.62, + 162.97 + ] + ], + "num_keypoints": 17, + "area": 1870.14015, + "iscrowd": 0, + "keypoints": [ + 48, + 79, + 2, + 50, + 77, + 2, + 46, + 77, + 2, + 54, + 78, + 2, + 45, + 78, + 2, + 57, + 90, + 2, + 42, + 90, + 2, + 63, + 103, + 2, + 42, + 105, + 2, + 56, + 113, + 2, + 49, + 112, + 2, + 55, + 117, + 2, + 44, + 117, + 2, + 55, + 140, + 2, + 47, + 140, + 2, + 56, + 160, + 2, + 49, + 159, + 2 + ], + "image_id": 196141, + "bbox": [ + 36.12, + 67.59, + 30.41, + 96.08 + ], + "category_id": 1, + "id": 1724673, + "face_box": [ + 0.0, + 0.0, + 0.0, + 0.0 + ], + "lefthand_box": [ + 0.0, + 0.0, + 0.0, + 0.0 + ], + "righthand_box": [ + 0.0, + 0.0, + 0.0, + 0.0 + ], + "lefthand_kpts": [ + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ], + "righthand_kpts": [ + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ], + "face_kpts": [ + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ], + "face_valid": false, + "lefthand_valid": false, + "righthand_valid": false, + "foot_valid": true, + "foot_kpts": [ + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 44.4, + 162.6, + 2.0, + 43.4, + 161.5, + 2.0, + 51.7, + 160.7, + 2.0 + ] + }, + { + "segmentation": [ + [ + 139.41, + 321.58, + 144.78, + 326.56, + 196.92, + 314.68, + 196.16, + 309.31, + 207.28, + 292.05, + 213.03, + 284, + 228.75, + 270.2, + 233.35, + 261.38, + 244.47, + 252.56, + 254.44, + 237.61, + 267.86, + 215.37, + 272.08, + 212.68, + 285.5, + 232.62, + 294.7, + 250.64, + 295.08, + 264.06, + 290.87, + 277.87, + 290.87, + 286.3, + 289.71, + 298.19, + 281.66, + 318.89, + 282.05, + 334.23, + 295.08, + 340.37, + 315.02, + 343.82, + 314.25, + 336.53, + 310.42, + 330.4, + 301.98, + 322.34, + 304.29, + 310.84, + 304.67, + 302.79, + 306.2, + 292.05, + 311.19, + 275.56, + 313.87, + 251.79, + 311.19, + 234.54, + 312.72, + 224.57, + 310.42, + 212.3, + 307.74, + 201.56, + 306.2, + 193.51, + 306.59, + 183.16, + 310.04, + 177.41, + 314.64, + 173.19, + 316.94, + 171.65, + 328.06, + 163.99, + 337.64, + 157.85, + 343.4, + 159.77, + 346.46, + 166.67, + 346.85, + 170.5, + 346.46, + 179.71, + 346.85, + 188.53, + 346.85, + 191.98, + 344.55, + 198.11, + 342.25, + 203.48, + 338.41, + 208.46, + 335.34, + 212.68, + 335.34, + 217.67, + 343.01, + 222.65, + 354.9, + 210.76, + 359.12, + 196.19, + 361.8, + 173.19, + 361.42, + 161.69, + 356.43, + 150.18, + 344.93, + 135.61, + 343.01, + 132.93, + 345.31, + 126.41, + 345.7, + 124.88, + 343.4, + 115.29, + 340.33, + 104.17, + 337.26, + 102.25, + 330.36, + 103.4, + 326.14, + 106.09, + 320.01, + 111.07, + 314.64, + 119.89, + 310.42, + 121.04, + 292.02, + 121.81, + 279.75, + 127.94, + 244.09, + 138.68, + 240.25, + 142.51, + 238.72, + 154.4, + 239.1, + 163.6, + 239.87, + 173.96, + 241.79, + 181.24, + 248.3, + 192.36, + 240.25, + 206.55, + 236.42, + 219.2, + 229.9, + 236.45, + 225.3, + 247.57, + 218.4, + 254.48, + 208.81, + 265.6, + 202.29, + 278.25, + 195.39, + 285.92, + 188.49, + 292.05, + 183.5, + 295.89, + 176.6, + 302.41, + 172, + 308.54, + 167.78, + 313.14, + 146.31, + 318.89 + ] + ], + "num_keypoints": 16, + "area": 14250.29385, + "iscrowd": 0, + "keypoints": [ + 334, + 135, + 2, + 340, + 129, + 2, + 331, + 129, + 2, + 0, + 0, + 0, + 319, + 123, + 2, + 340, + 146, + 2, + 292, + 133, + 2, + 353, + 164, + 2, + 246, + 144, + 2, + 354, + 197, + 2, + 250, + 185, + 2, + 293, + 197, + 2, + 265, + 187, + 2, + 305, + 252, + 2, + 231, + 254, + 2, + 293, + 321, + 2, + 193, + 297, + 2 + ], + "image_id": 197388, + "bbox": [ + 139.41, + 102.25, + 222.39, + 241.57 + ], + "category_id": 1, + "id": 437295, + "face_box": [ + 320.23, + 123.84, + 21.049999999999955, + 23.5 + ], + "lefthand_box": [ + 333.65, + 198.45, + 23.150000000000034, + 23.57000000000002 + ], + "righthand_box": [ + 247.5, + 184.92, + 23.30000000000001, + 22.360000000000014 + ], + "lefthand_kpts": [ + 353.87482, + 196.49984999999998, + 1, + 349.01957500000003, + 201.76511, + 1, + 344.16433, + 207.03037, + 1, + 340.81534, + 210.64729, + 1, + 337.46165, + 216.59183000000002, + 1, + 346.65868, + 216.02586, + 1, + 342.27241, + 219.28019999999998, + 1, + 337.88613, + 219.70467, + 1, + 334.4903, + 218.57273, + 1, + 345.5, + 215.0, + 1, + 342.27241, + 217.72377, + 1, + 338.73509, + 218.00675999999999, + 1, + 334.77329, + 216.30885, + 1, + 343.7, + 213.8, + 1, + 341.42345, + 215.74288, + 1, + 338.73509, + 215.60138, + 1, + 335.62225, + 213.76198, + 1, + 342.4139, + 212.63003, + 1, + 340.85748, + 213.76198, + 1, + 338.87658, + 214.04496, + 1, + 337.17867, + 213.76198, + 1 + ], + "righthand_kpts": [ + 249.4, + 180.4, + 1, + 254.3, + 184.9, + 1, + 259.2, + 189.4, + 1, + 259.3, + 192.1, + 1, + 258.2, + 194.9, + 1, + 254.9, + 193.2, + 1, + 255.9, + 192.3, + 1, + 255.9, + 190.5, + 1, + 255.4, + 188.5, + 1, + 252.2, + 194.0, + 1, + 253.2, + 193.6, + 1, + 253.2, + 191.1, + 1, + 252.9, + 188.8, + 1, + 249.4, + 193.6, + 1, + 250.4, + 193.6, + 1, + 250.4, + 191.3, + 1, + 249.9, + 188.7, + 1, + 247.1, + 192.2, + 1, + 248.0, + 192.2, + 1, + 247.9, + 190.3, + 1, + 247.5, + 188.3, + 1 + ], + "face_kpts": [ + 319.681, + 126.613, + 1.0, + 319.155, + 129.261, + 1.0, + 318.92, + 131.954, + 1.0, + 319.187, + 134.631, + 1.0, + 319.707, + 137.271, + 1.0, + 320.991, + 139.649, + 1.0, + 322.846, + 141.606, + 1.0, + 325.009, + 143.216, + 1.0, + 327.359, + 144.544, + 1.0, + 329.907, + 145.384, + 1.0, + 332.347, + 144.347, + 1.0, + 334.268, + 142.449, + 1.0, + 335.767, + 140.222, + 1.0, + 336.675, + 137.69, + 1.0, + 337.019, + 135.009, + 1.0, + 336.982, + 132.311, + 1.0, + 337.13, + 129.618, + 1.0, + 328.503, + 125.823, + 1.0, + 329.531, + 125.489, + 1.0, + 330.619, + 125.626, + 1.0, + 331.573, + 125.909, + 1.0, + 332.529, + 126.431, + 1.0, + 334.479, + 127.459, + 1.0, + 334.815, + 127.43, + 1.0, + 335.157, + 127.316, + 1.0, + 335.52, + 127.327, + 1.0, + 335.949, + 127.701, + 1.0, + 332.762, + 129.334, + 1.0, + 333.168, + 130.389, + 1.0, + 333.603, + 131.342, + 1.0, + 333.928, + 132.331, + 1.0, + 331.671, + 134.291, + 1.0, + 332.232, + 134.389, + 1.0, + 332.931, + 134.487, + 1.0, + 333.332, + 134.463, + 1.0, + 333.645, + 134.212, + 1.0, + 329.271, + 128.208, + 1.0, + 329.963, + 128.464, + 1.0, + 330.676, + 128.659, + 1.0, + 331.392, + 128.839, + 1.0, + 330.672, + 128.659, + 1.0, + 330.003, + 128.334, + 1.0, + 333.792, + 129.611, + 1.0, + 334.158, + 129.741, + 1.0, + 334.546, + 129.765, + 1.0, + 334.878, + 129.954, + 1.0, + 334.523, + 129.822, + 1.0, + 334.161, + 129.704, + 1.0, + 327.38, + 138.818, + 1.0, + 329.757, + 138.136, + 1.0, + 332.086, + 137.874, + 1.0, + 332.75, + 138.208, + 1.0, + 333.221, + 138.515, + 1.0, + 334.495, + 139.634, + 1.0, + 335.213, + 141.054, + 1.0, + 334.12, + 140.754, + 1.0, + 333.208, + 140.234, + 1.0, + 332.2, + 139.888, + 1.0, + 330.765, + 139.414, + 1.0, + 329.069, + 139.351, + 1.0, + 327.561, + 138.814, + 1.0, + 329.88, + 138.346, + 1.0, + 332.517, + 138.668, + 1.0, + 334.031, + 139.589, + 1.0, + 335.123, + 140.862, + 1.0, + 333.726, + 140.572, + 1.0, + 332.203, + 140.032, + 1.0, + 329.731, + 139.403, + 1.0 + ], + "face_valid": true, + "lefthand_valid": true, + "righthand_valid": true, + "foot_valid": true, + "foot_kpts": [ + 300.24175, + 336.83838, + 2.0, + 306.59015, + 335.34464, + 2.0, + 290.07408, + 326.47826, + 2.0, + 182.60972, + 314.05885, + 2.0, + 175.88789, + 305.84328, + 2.0, + 189.70499, + 302.48236, + 2.0 + ] + }, + { + "segmentation": [ + [ + 287.17, + 121.42, + 294.22, + 106.44, + 302.15, + 116.13, + 303.03, + 121.42 + ], + [ + 297.74, + 99.39, + 310.08, + 76.49, + 326.81, + 76.49, + 329.46, + 67.68, + 337.38, + 61.52, + 346.19, + 62.4, + 353.24, + 65.92, + 353.24, + 76.49, + 355.88, + 84.42, + 359.41, + 87.94, + 362.05, + 96.75, + 354.12, + 139.04, + 349.72, + 142.56, + 345.31, + 139.92, + 349.72, + 117.89, + 348.84, + 108.2, + 345.31, + 113.49, + 336.5, + 101.16, + 325.93, + 110.85, + 311.84, + 123.18 + ], + [ + 324.17, + 176.91, + 332.1, + 191.89, + 328.58, + 198.94, + 327.69, + 205.98, + 333.86, + 213.03, + 337.38, + 227.13, + 332.98, + 227.13, + 319.77, + 219.2, + 313.6, + 211.27 + ], + [ + 332.98, + 165.46, + 341.79, + 161.06, + 336.5, + 174.27, + 333.86, + 186.6, + 326.81, + 176.03 + ] + ], + "num_keypoints": 16, + "area": 3404.869, + "iscrowd": 0, + "keypoints": [ + 345, + 92, + 2, + 350, + 87, + 2, + 341, + 87, + 2, + 0, + 0, + 0, + 330, + 83, + 2, + 357, + 94, + 2, + 316, + 92, + 2, + 357, + 104, + 2, + 291, + 123, + 1, + 351, + 133, + 2, + 281, + 136, + 1, + 326, + 131, + 1, + 305, + 128, + 1, + 336, + 152, + 1, + 303, + 171, + 1, + 318, + 206, + 2, + 294, + 211, + 1 + ], + "image_id": 197388, + "bbox": [ + 287.17, + 61.52, + 74.88, + 165.61 + ], + "category_id": 1, + "id": 467657, + "face_box": [ + 0.0, + 0.0, + 0.0, + 0.0 + ], + "lefthand_box": [ + 0.0, + 0.0, + 0.0, + 0.0 + ], + "righthand_box": [ + 0.0, + 0.0, + 0.0, + 0.0 + ], + "lefthand_kpts": [ + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ], + "righthand_kpts": [ + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ], + "face_kpts": [ + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ], + "face_valid": false, + "lefthand_valid": false, + "righthand_valid": false, + "foot_valid": true, + "foot_kpts": [ + 322.595, + 216.245, + 2.0, + 327.23077, + 215.42692, + 2.0, + 316.81553, + 207.67155, + 2.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ] + }, + { + "segmentation": [ + [ + 547.95, + 201.57, + 546.73, + 190.62, + 547.95, + 181.49, + 547.95, + 169.31, + 547.95, + 156.53, + 546.73, + 144.36, + 544.3, + 139.49, + 540.04, + 132.19, + 540.04, + 121.84, + 542.47, + 107.24, + 544.3, + 99.33, + 548.56, + 88.98, + 561.95, + 78.03, + 572.29, + 71.33, + 572.29, + 71.33, + 572.29, + 65.25, + 574.12, + 51.86, + 583.86, + 48.81, + 592.99, + 48.81, + 597.86, + 57.33, + 599.07, + 64.64, + 608.2, + 76.81, + 614.9, + 82.89, + 620.98, + 89.59, + 628.89, + 93.24, + 636.81, + 101.76, + 640, + 109.67, + 640, + 115.76, + 640, + 127.93, + 620.37, + 111.5, + 619.16, + 111.5, + 618.55, + 112.11, + 608.2, + 105.41, + 600.9, + 119.41, + 592.99, + 131.58, + 596.03, + 148.01, + 605.16, + 162.01, + 612.46, + 190.01, + 614.9, + 204.61, + 606.98, + 216.78, + 603.94, + 226.52, + 606.38, + 239.91, + 605.16, + 256.95, + 604.55, + 264.26, + 602.12, + 271.56, + 586.29, + 272.17, + 584.47, + 255.13, + 588.73, + 237.48, + 592.99, + 221.65, + 596.64, + 207.05, + 596.64, + 197.31, + 594.2, + 186.96, + 584.47, + 172.36, + 577.77, + 166.27, + 570.47, + 170.53, + 558.91, + 179.66, + 555.86, + 192.44, + 548.56, + 198.53, + 547.95, + 198.53 + ] + ], + "num_keypoints": 15, + "area": 8913.98475, + "iscrowd": 0, + "keypoints": [ + 591, + 78, + 2, + 594, + 74, + 2, + 586, + 74, + 2, + 0, + 0, + 0, + 573, + 70, + 2, + 598, + 86, + 2, + 566, + 93, + 2, + 626, + 105, + 2, + 546, + 126, + 2, + 0, + 0, + 0, + 561, + 150, + 2, + 582, + 150, + 2, + 557, + 154, + 2, + 606, + 194, + 2, + 558, + 209, + 1, + 591, + 252, + 2, + 539, + 262, + 1 + ], + "image_id": 197388, + "bbox": [ + 540.04, + 48.81, + 99.96, + 223.36 + ], + "category_id": 1, + "id": 531914, + "face_box": [ + 0.0, + 0.0, + 0.0, + 0.0 + ], + "lefthand_box": [ + 0.0, + 0.0, + 0.0, + 0.0 + ], + "righthand_box": [ + 557.05, + 149.73, + 19.879999999999995, + 21.76000000000002 + ], + "lefthand_kpts": [ + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ], + "righthand_kpts": [ + 565.0, + 153.0, + 0.08773341029882431, + 568.0, + 156.0, + 0.04602484405040741, + 571.0, + 159.0, + 0.04602484405040741, + 573.0, + 161.0, + 0.06972061097621918, + 575.0, + 164.0, + 0.06297813355922699, + 569.0, + 158.0, + 0.294232040643692, + 570.0, + 162.0, + 0.26472434401512146, + 570.0, + 166.0, + 0.2826344072818756, + 571.0, + 171.0, + 0.374575674533844, + 565.0, + 159.0, + 0.2154899388551712, + 566.0, + 162.0, + 0.21613340079784393, + 566.0, + 164.0, + 0.2544613480567932, + 567.0, + 168.0, + 0.31771761178970337, + 562.0, + 160.0, + 0.23286579549312592, + 563.0, + 166.0, + 0.1579097956418991, + 564.0, + 166.0, + 0.17961391806602478, + 564.0, + 166.0, + 0.17504136264324188, + 559.0, + 160.0, + 0.3428754508495331, + 559.0, + 162.0, + 0.2897874116897583, + 561.0, + 165.0, + 0.24125981330871582, + 562.0, + 166.0, + 0.20118576288223267 + ], + "face_kpts": [ + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ], + "face_valid": false, + "lefthand_valid": false, + "righthand_valid": true, + "foot_valid": true, + "foot_kpts": [ + 599.72032, + 264.75714, + 2.0, + 603.91172, + 265.80499, + 2.0, + 585.74897, + 265.10642, + 2.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ] + }, + { + "segmentation": [ + [ + 561.51, + 385.38, + 572.11, + 352.71, + 570.34, + 317.4, + 559.75, + 282.08, + 552.68, + 267.07, + 565.93, + 236.17, + 583.59, + 236.17, + 602.13, + 260.01, + 614.49, + 286.5, + 628.61, + 302.39, + 639.21, + 281.2, + 614.49, + 251.18, + 588, + 218.51, + 595.95, + 202.62, + 594.18, + 185.85, + 580.05, + 170.84, + 562.4, + 179.67, + 557.98, + 198.21, + 554.45, + 202.62, + 532.38, + 199.97, + 525.32, + 202.62, + 511.19, + 229.11, + 493.53, + 256.48, + 484.7, + 276.78, + 451.15, + 323.58, + 423.78, + 338.59, + 388.47, + 373.9, + 372.58, + 387.14, + 396.41, + 388.03, + 418.49, + 367.72, + 450.27, + 345.65, + 501.48, + 306.8, + 520.02, + 301.5, + 552.68, + 340.35, + 543.86, + 369.49 + ] + ], + "num_keypoints": 16, + "area": 14267.20475, + "iscrowd": 0, + "keypoints": [ + 580, + 211, + 2, + 586, + 206, + 2, + 574, + 204, + 2, + 0, + 0, + 0, + 562, + 198, + 2, + 584, + 220, + 2, + 529, + 215, + 2, + 599, + 242, + 2, + 512, + 260, + 2, + 619, + 274, + 2, + 538, + 285, + 2, + 537, + 288, + 2, + 506, + 277, + 2, + 562, + 332, + 2, + 452, + 332, + 2, + 550, + 387, + 1, + 402, + 371, + 2 + ], + "image_id": 197388, + "bbox": [ + 372.58, + 170.84, + 266.63, + 217.19 + ], + "category_id": 1, + "id": 533949, + "face_box": [ + 0.0, + 0.0, + 0.0, + 0.0 + ], + "lefthand_box": [ + 615.22, + 271.56, + 22.139999999999986, + 28.839999999999975 + ], + "righthand_box": [ + 538.83, + 283.74, + 25.639999999999986, + 30.659999999999968 + ], + "lefthand_kpts": [ + 620.284, + 274.54006, + 1, + 621.65135, + 282.30908999999997, + 1, + 623.0187, + 290.07812, + 1, + 625.38048, + 294.55308, + 1, + 628.86101, + 298.90373999999997, + 1, + 630.22836, + 289.20799, + 1, + 634.57901, + 292.43991, + 1, + 633.08736, + 295.54752, + 1, + 628.6124, + 295.42321, + 1, + 632.46584, + 286.5976, + 1, + 631.3, + 291.9, + 1, + 627.7, + 291.6, + 1, + 625.6, + 288.9, + 1, + 633.7, + 284.2, + 1, + 632.3, + 288.0, + 1, + 629.1, + 288.0, + 1, + 627.0, + 285.9, + 1, + 633.2, + 280.4, + 1, + 632.8, + 283.6, + 1, + 630.8, + 284.4, + 1, + 629.1, + 283.2, + 1 + ], + "righthand_kpts": [ + 544.0, + 291.0, + 0.09089653939008713, + 551.0, + 291.0, + 0.041192591190338135, + 558.0, + 291.0, + 0.041192591190338135, + 559.0, + 294.0, + 0.056781601160764694, + 563.0, + 298.0, + 0.2960541546344757, + 559.0, + 296.0, + 0.18105527758598328, + 562.0, + 301.0, + 0.12244582921266556, + 559.0, + 308.0, + 0.05529222637414932, + 564.0, + 306.0, + 0.05997529253363609, + 555.0, + 299.0, + 0.18805834650993347, + 556.0, + 302.0, + 0.1534559577703476, + 555.0, + 306.0, + 0.20564205944538116, + 556.0, + 309.0, + 0.06228385493159294, + 550.0, + 300.0, + 0.1409723311662674, + 550.0, + 301.0, + 0.2223101258277893, + 551.0, + 305.0, + 0.2001882642507553, + 553.0, + 308.0, + 0.1712668538093567, + 545.0, + 302.0, + 0.1908813714981079, + 546.0, + 304.0, + 0.13619276881217957, + 547.0, + 306.0, + 0.19773860275745392, + 549.0, + 308.0, + 0.1341865360736847 + ], + "face_kpts": [ + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ], + "face_valid": false, + "lefthand_valid": true, + "righthand_valid": true, + "foot_valid": true, + "foot_kpts": [ + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 375.00826, + 386.35839, + 2.0, + 399.52454, + 375.91627, + 2.0 + ] + }, + { + "segmentation": [ + [ + 2.03, + 75.18, + 10.85, + 70.58, + 16.99, + 65.59, + 17.75, + 55.24, + 20.05, + 50.25, + 29.64, + 43.74, + 37.31, + 47.57, + 41.52, + 53.7, + 43.83, + 64.82, + 53.03, + 70.19, + 61.85, + 77.09, + 72.58, + 87.06, + 74.88, + 79.01, + 78.72, + 73.64, + 86.39, + 77.86, + 90.6, + 90.13, + 86, + 93.2, + 82.17, + 102.4, + 75.27, + 106.24, + 68.75, + 104.7, + 50.34, + 90.9, + 43.06, + 112.37, + 40.76, + 123.11, + 42.29, + 130.78, + 48.04, + 161.83, + 52.26, + 190.59, + 50.73, + 210.15, + 44.21, + 245.04, + 50.34, + 256.16, + 53.03, + 261.53, + 47.28, + 263.83, + 40.37, + 263.83, + 31.56, + 260.76, + 28.1, + 256.16, + 26.95, + 244.65, + 29.25, + 233.54, + 32.71, + 223.95, + 33.09, + 213.98, + 32.32, + 206.31, + 32.71, + 194.81, + 33.09, + 185.61, + 24.65, + 177.17, + 16.99, + 161.45, + 13.53, + 176.02, + 10.85, + 206.31, + 1.65, + 231.62, + 1.65, + 235.84, + 0.5, + 146.88, + 0.88, + 122.34, + 1.65, + 75.56 + ] + ], + "num_keypoints": 13, + "area": 8260.75085, + "iscrowd": 0, + "keypoints": [ + 36, + 79, + 2, + 40, + 74, + 2, + 31, + 75, + 2, + 0, + 0, + 0, + 19, + 69, + 2, + 45, + 77, + 2, + 2, + 89, + 2, + 74, + 99, + 2, + 0, + 0, + 0, + 78, + 92, + 2, + 0, + 0, + 0, + 33, + 149, + 2, + 7, + 153, + 2, + 44, + 196, + 2, + 2, + 205, + 2, + 35, + 245, + 2, + 0, + 0, + 0 + ], + "image_id": 197388, + "bbox": [ + 0.5, + 43.74, + 90.1, + 220.09 + ], + "category_id": 1, + "id": 543117, + "face_box": [ + 0.0, + 0.0, + 0.0, + 0.0 + ], + "lefthand_box": [ + 0.0, + 0.0, + 0.0, + 0.0 + ], + "righthand_box": [ + 0.0, + 0.0, + 0.0, + 0.0 + ], + "lefthand_kpts": [ + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ], + "righthand_kpts": [ + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ], + "face_kpts": [ + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ], + "face_valid": false, + "lefthand_valid": false, + "righthand_valid": false, + "foot_valid": true, + "foot_kpts": [ + 43.80826, + 259.40011, + 2.0, + 48.63752, + 257.67537, + 2.0, + 32.08007, + 256.29558, + 2.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ] + } + ] +} diff --git a/tests/data/coco/test_keypoint_partition_metric.json b/tests/data/coco/test_keypoint_partition_metric.json index 9d04f5e87b..741df742b7 100644 --- a/tests/data/coco/test_keypoint_partition_metric.json +++ b/tests/data/coco/test_keypoint_partition_metric.json @@ -1,7647 +1,7647 @@ -{ - "info": { - "description": "COCO-WholeBody sample", - "url": "https://github.com/jin-s13/COCO-WholeBody", - "version": "1.0", - "year": "2020", - "date_created": "2020/09/18" - }, - "licenses": [ - { - "url": "http://creativecommons.org/licenses/by-nc-sa/2.0/", - "id": 1, - "name": "Attribution-NonCommercial-ShareAlike License" - }, - { - "url": "http://creativecommons.org/licenses/by-nc/2.0/", - "id": 2, - "name": "Attribution-NonCommercial License" - }, - { - "url": "http://creativecommons.org/licenses/by-nc-nd/2.0/", - "id": 3, - "name": "Attribution-NonCommercial-NoDerivs License" - }, - { - "url": "http://creativecommons.org/licenses/by/2.0/", - "id": 4, - "name": "Attribution License" - }, - { - "url": "http://creativecommons.org/licenses/by-sa/2.0/", - "id": 5, - "name": "Attribution-ShareAlike License" - }, - { - "url": "http://creativecommons.org/licenses/by-nd/2.0/", - "id": 6, - "name": "Attribution-NoDerivs License" - }, - { - "url": "http://flickr.com/commons/usage/", - "id": 7, - "name": "No known copyright restrictions" - }, - { - "url": "http://www.usa.gov/copyright.shtml", - "id": 8, - "name": "United States Government Work" - } - ], - "categories": [ - { - "supercategory": "person", - "id": 1, - "name": "person", - "keypoints": [ - "nose", - "left_eye", - "right_eye", - "left_ear", - "right_ear", - "left_shoulder", - "right_shoulder", - "left_elbow", - "right_elbow", - "left_wrist", - "right_wrist", - "left_hip", - "right_hip", - "left_knee", - "right_knee", - "left_ankle", - "right_ankle" - ], - "skeleton": [ - [ - 16, - 14 - ], - [ - 14, - 12 - ], - [ - 17, - 15 - ], - [ - 15, - 13 - ], - [ - 12, - 13 - ], - [ - 6, - 12 - ], - [ - 7, - 13 - ], - [ - 6, - 7 - ], - [ - 6, - 8 - ], - [ - 7, - 9 - ], - [ - 8, - 10 - ], - [ - 9, - 11 - ], - [ - 2, - 3 - ], - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 4 - ], - [ - 3, - 5 - ], - [ - 4, - 6 - ], - [ - 5, - 7 - ] - ] - } - ], - "images": [ - { - "license": 4, - "file_name": "000000000785.jpg", - "coco_url": "http://images.cocodataset.org/val2017/000000000785.jpg", - "height": 425, - "width": 640, - "date_captured": "2013-11-19 21:22:42", - "flickr_url": "http://farm8.staticflickr.com/7015/6795644157_f019453ae7_z.jpg", - "id": 785 - }, - { - "license": 3, - "file_name": "000000040083.jpg", - "coco_url": "http://images.cocodataset.org/val2017/000000040083.jpg", - "height": 333, - "width": 500, - "date_captured": "2013-11-18 03:30:24", - "flickr_url": "http://farm1.staticflickr.com/116/254881838_e21c6d17b8_z.jpg", - "id": 40083 - }, - { - "license": 1, - "file_name": "000000196141.jpg", - "coco_url": "http://images.cocodataset.org/val2017/000000196141.jpg", - "height": 429, - "width": 640, - "date_captured": "2013-11-22 22:37:15", - "flickr_url": "http://farm4.staticflickr.com/3310/3611902235_57d4ae496d_z.jpg", - "id": 196141 - }, - { - "license": 3, - "file_name": "000000197388.jpg", - "coco_url": "http://images.cocodataset.org/val2017/000000197388.jpg", - "height": 392, - "width": 640, - "date_captured": "2013-11-19 20:10:37", - "flickr_url": "http://farm9.staticflickr.com/8375/8507321836_5b8b13188f_z.jpg", - "id": 197388 - } - ], - "annotations": [ - { - "segmentation": [ - [ - 353.37, - 67.65, - 358.15, - 52.37, - 362.92, - 47.59, - 374.38, - 44.73, - 389.66, - 52.37, - 389.66, - 67.65, - 389.66, - 76.25, - 393.48, - 83.89, - 396.35, - 88.66, - 397.3, - 91.53, - 406.85, - 99.17, - 413.54, - 104.9, - 451.74, - 148.83, - 458.43, - 153.6, - 462.25, - 166.02, - 467.02, - 173.66, - 463.2, - 181.3, - 449.83, - 183.21, - 448.88, - 191.81, - 455.56, - 226.19, - 448.88, - 254.84, - 453.65, - 286.36, - 475.62, - 323.6, - 491.85, - 361.81, - 494.72, - 382.82, - 494.72, - 382.82, - 499.49, - 391.41, - 416.4, - 391.41, - 424.04, - 383.77, - 439.33, - 374.22, - 445.06, - 360.85, - 436.46, - 334.11, - 421.18, - 303.55, - 416.4, - 289.22, - 409.72, - 268.21, - 396.35, - 280.63, - 405.9, - 298.77, - 417.36, - 324.56, - 425, - 349.39, - 425, - 357.99, - 419.27, - 360.85, - 394.44, - 367.54, - 362.92, - 370.4, - 346.69, - 367.54, - 360.06, - 362.76, - 369.61, - 360.85, - 382.98, - 340.8, - 355.28, - 271.08, - 360.06, - 266.3, - 386.8, - 219.5, - 368.65, - 162.2, - 348.6, - 175.57, - 309.44, - 187.03, - 301.8, - 192.76, - 288.43, - 193.72, - 282.7, - 193.72, - 280.79, - 187.03, - 280.79, - 174.62, - 287.47, - 171.75, - 291.29, - 171.75, - 295.11, - 171.75, - 306.57, - 166.98, - 312.3, - 165.07, - 345.73, - 142.14, - 350.51, - 117.31, - 350.51, - 102.03, - 350.51, - 90.57, - 353.37, - 65.74 - ] - ], - "num_keypoints": 112, - "area": 27789.11055, - "iscrowd": 0, - "keypoints": [ - 367, - 81, - 2, - 374, - 73, - 2, - 360, - 75, - 2, - 386, - 78, - 2, - 356, - 81, - 2, - 399, - 108, - 2, - 358, - 129, - 2, - 433, - 142, - 2, - 341, - 159, - 2, - 449, - 165, - 2, - 309, - 178, - 2, - 424, - 203, - 2, - 393, - 214, - 2, - 429, - 294, - 2, - 367, - 273, - 2, - 466, - 362, - 2, - 396, - 341, - 2, - 439, - 378, - 2, - 446, - 380, - 2, - 479, - 370, - 2, - 377, - 359, - 2, - 376, - 358, - 2, - 413, - 353, - 2, - 355.823, - 75.36, - 1.0, - 356.354, - 79.0837, - 1.0, - 357.244, - 82.7374, - 1.0, - 358.518, - 86.2722, - 1.0, - 360.146, - 89.6578, - 1.0, - 362.266, - 92.7538, - 1.0, - 365.004, - 95.3223, - 1.0, - 368.487, - 96.6454, - 1.0, - 372.191, - 96.1419, - 1.0, - 375.644, - 94.6832, - 1.0, - 378.601, - 92.3665, - 1.0, - 381.101, - 89.5662, - 1.0, - 382.903, - 86.2741, - 1.0, - 383.896, - 82.6509, - 1.0, - 384.075, - 78.9011, - 1.0, - 384.1, - 75.1408, - 1.0, - 383.903, - 71.3861, - 1.0, - 357.084, - 72.9743, - 1.0, - 358.602, - 71.7848, - 1.0, - 360.42, - 71.3443, - 1.0, - 362.377, - 71.1566, - 1.0, - 364.36, - 71.1889, - 1.0, - 368.971, - 70.4992, - 1.0, - 370.945, - 69.8179, - 1.0, - 373.001, - 69.3543, - 1.0, - 375.14, - 69.2666, - 1.0, - 377.358, - 69.8865, - 1.0, - 366.57, - 73.9588, - 1.0, - 366.734, - 76.1499, - 1.0, - 366.88, - 78.3018, - 1.0, - 366.99, - 80.4957, - 1.0, - 365.104, - 82.5589, - 1.0, - 366.308, - 82.8331, - 1.0, - 367.645, - 82.8037, - 1.0, - 369.172, - 82.2061, - 1.0, - 370.693, - 81.6521, - 1.0, - 358.705, - 75.4542, - 1.0, - 360.294, - 74.0903, - 1.0, - 362.376, - 73.8423, - 1.0, - 364.302, - 74.6834, - 1.0, - 362.543, - 75.568, - 1.0, - 360.612, - 75.8883, - 1.0, - 369.771, - 73.7734, - 1.0, - 371.409, - 72.2638, - 1.0, - 373.615, - 71.9502, - 1.0, - 375.722, - 72.7144, - 1.0, - 373.888, - 73.699, - 1.0, - 371.835, - 74.0238, - 1.0, - 363.184, - 86.9317, - 1.0, - 364.788, - 85.4484, - 1.0, - 367.021, - 84.7474, - 1.0, - 368.048, - 84.5364, - 1.0, - 369.083, - 84.3709, - 1.0, - 372.183, - 84.0529, - 1.0, - 375.083, - 84.8901, - 1.0, - 373.687, - 87.0735, - 1.0, - 371.644, - 88.8121, - 1.0, - 369.024, - 89.6982, - 1.0, - 366.67, - 89.6039, - 1.0, - 364.721, - 88.606, - 1.0, - 363.588, - 86.903, - 1.0, - 365.723, - 85.8496, - 1.0, - 368.184, - 85.2863, - 1.0, - 371.444, - 84.8294, - 1.0, - 374.647, - 85.0454, - 1.0, - 372.166, - 87.2914, - 1.0, - 368.81, - 88.3791, - 1.0, - 365.965, - 88.3238, - 1.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 304.10366, - 181.75134, - 1, - 300.70183, - 182.77567, - 1, - 297.3, - 183.8, - 1, - 294.7, - 186.5, - 1, - 290.1, - 187.8, - 1, - 290.9, - 176.6, - 1, - 287.5, - 176.0, - 1, - 285.5, - 178.4, - 1, - 286.4, - 182.4, - 1, - 288.8, - 179.4, - 1, - 285.0, - 181.0, - 1, - 287.3, - 186.1, - 1, - 291.8, - 189.5, - 1, - 287.7, - 182.7, - 1, - 283.8, - 184.1, - 1, - 286.5, - 189.1, - 1, - 290.0, - 192.0, - 1, - 286.7, - 185.3, - 1, - 282.8, - 187.4, - 1, - 284.8, - 191.6, - 1, - 288.4, - 194.5, - 1 - ], - "image_id": 785, - "bbox": [ - 280.79, - 44.73, - 218.7, - 346.68 - ], - "category_id": 1, - "id": 442619, - "face_box": [ - 358.2, - 69.86, - 26.360000000000014, - 25.849999999999994 - ], - "lefthand_box": [ - 0.0, - 0.0, - 0.0, - 0.0 - ], - "righthand_box": [ - 280.43, - 173.12, - 27.860000000000014, - 24.849999999999994 - ], - "face_valid": true, - "lefthand_valid": false, - "righthand_valid": true, - "foot_valid": true - }, - { - "segmentation": [ - [ - 98.56, - 273.72, - 132.9, - 267, - 140.37, - 281.93, - 165.75, - 285.66, - 156.79, - 264.01, - 170.23, - 261.02, - 177.7, - 272.97, - 182.18, - 279.69, - 200.85, - 268.49, - 212.79, - 255.05, - 188.9, - 256.54, - 164.26, - 240.12, - 139.62, - 212.49, - 109.01, - 221.45, - 103.04, - 220.71, - 122.45, - 202.04, - 113.49, - 196.07, - 96.32, - 168.44, - 97.06, - 162.47, - 110.5, - 136.34, - 112, - 124.39, - 91.09, - 110.95, - 80.64, - 114.68, - 71.68, - 131.86, - 62.72, - 147.54, - 57.49, - 156.5, - 48.53, - 168.44, - 41.07, - 180.39, - 38.08, - 193.08, - 40.32, - 205.03, - 47.04, - 213.24, - 54.5, - 216.23, - 82.13, - 252.06, - 91.09, - 271.48 - ] - ], - "num_keypoints": 106, - "area": 11025.219, - "iscrowd": 0, - "keypoints": [ - 99, - 144, - 2, - 104, - 141, - 2, - 96, - 137, - 2, - 0, - 0, - 0, - 78, - 133, - 2, - 56, - 161, - 2, - 81, - 162, - 2, - 0, - 0, - 0, - 103, - 208, - 2, - 116, - 204, - 2, - 0, - 0, - 0, - 57, - 246, - 1, - 82, - 259, - 1, - 137, - 219, - 2, - 138, - 247, - 2, - 177, - 256, - 2, - 158, - 296, - 1, - 208.16049, - 257.42419, - 2.0, - 205.8824, - 259.13276, - 2.0, - 183.38626, - 275.93367, - 2.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 82.9654, - 131.144, - 1.0, - 81.8046, - 134.328, - 1.0, - 80.7007, - 137.531, - 1.0, - 79.8836, - 140.818, - 1.0, - 79.734, - 144.196, - 1.0, - 80.4763, - 147.486, - 1.0, - 82.0188, - 150.498, - 1.0, - 84.2352, - 153.057, - 1.0, - 86.8081, - 155.258, - 1.0, - 89.652, - 157.095, - 1.0, - 92.9128, - 157.812, - 1.0, - 95.962, - 156.474, - 1.0, - 98.5377, - 154.281, - 1.0, - 100.557, - 151.568, - 1.0, - 102.508, - 148.799, - 1.0, - 103.987, - 145.756, - 1.0, - 105.345, - 142.655, - 1.0, - 93.6074, - 132.13, - 1.0, - 95.8108, - 132.112, - 1.0, - 97.7956, - 132.618, - 1.0, - 99.6897, - 133.398, - 1.0, - 101.364, - 134.432, - 1.0, - 105.0, - 136.896, - 1.0, - 105.708, - 137.334, - 1.0, - 106.267, - 137.852, - 1.0, - 106.759, - 138.404, - 1.0, - 107.013, - 139.401, - 1.0, - 100.904, - 139.994, - 1.0, - 100.551, - 142.0, - 1.0, - 100.202, - 143.956, - 1.0, - 99.8116, - 145.919, - 1.0, - 94.7941, - 146.187, - 1.0, - 95.9823, - 147.027, - 1.0, - 97.3054, - 147.849, - 1.0, - 98.2362, - 148.403, - 1.0, - 99.2812, - 148.491, - 1.0, - 93.151, - 135.98, - 1.0, - 94.9184, - 136.187, - 1.0, - 96.5441, - 136.903, - 1.0, - 97.6034, - 138.308, - 1.0, - 95.8998, - 138.017, - 1.0, - 94.3941, - 137.178, - 1.0, - 102.085, - 141.003, - 1.0, - 103.379, - 141.05, - 1.0, - 104.485, - 141.71, - 1.0, - 104.899, - 142.915, - 1.0, - 103.704, - 142.739, - 1.0, - 102.729, - 142.026, - 1.0, - 89.8433, - 148.685, - 1.0, - 92.6494, - 149.006, - 1.0, - 95.2801, - 149.78, - 1.0, - 96.1096, - 150.259, - 1.0, - 96.7411, - 150.719, - 1.0, - 97.3853, - 151.82, - 1.0, - 97.337, - 153.217, - 1.0, - 96.5124, - 153.108, - 1.0, - 95.6091, - 152.796, - 1.0, - 94.7518, - 152.399, - 1.0, - 93.0313, - 151.317, - 1.0, - 91.3461, - 150.149, - 1.0, - 90.24, - 148.802, - 1.0, - 92.9121, - 149.883, - 1.0, - 95.4213, - 151.204, - 1.0, - 96.3082, - 152.03, - 1.0, - 97.1377, - 152.997, - 1.0, - 96.3098, - 152.035, - 1.0, - 95.406, - 151.234, - 1.0, - 92.8725, - 149.984, - 1.0, - 109.88978, - 204.46047, - 1, - 113.101195, - 201.939065, - 1, - 116.31261, - 199.41766, - 1, - 113.19977, - 199.3139, - 1, - 109.8794, - 200.24775, - 1, - 117.86903, - 199.10638, - 2, - 113.9261, - 199.00262, - 2, - 109.56812, - 198.48381, - 2, - 106.6628, - 198.38004999999998, - 1, - 117.1427, - 202.32298, - 2, - 111.2283, - 201.80417, - 2, - 107.07784000000001, - 201.38913, - 2, - 103.65371999999999, - 201.18161, - 1, - 116.52013, - 205.95463, - 2, - 112.5772, - 205.53958, - 2, - 107.59665, - 204.39821, - 2, - 104.27629, - 203.77564, - 2, - 116.41637, - 209.69004, - 2, - 112.16215, - 209.48252, - 2, - 108.73803000000001, - 208.34114, - 2, - 105.72895, - 206.68096, - 2, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0 - ], - "image_id": 40083, - "bbox": [ - 38.08, - 110.95, - 174.71, - 174.71 - ], - "category_id": 1, - "id": 198196, - "face_box": [ - 79.19, - 131.64, - 29.290000000000006, - 28.480000000000018 - ], - "lefthand_box": [ - 104.83, - 196.48, - 16.400000000000006, - 15.810000000000002 - ], - "righthand_box": [ - 0.0, - 0.0, - 0.0, - 0.0 - ], - "face_valid": true, - "lefthand_valid": true, - "righthand_valid": false, - "foot_valid": true - }, - { - "segmentation": [ - [ - 257.76, - 288.05, - 273.4, - 258.26, - 325.55, - 253.79, - 335.23, - 232.93, - 326.3, - 186.74, - 333.74, - 177.05, - 327.79, - 153.21, - 333.74, - 142.04, - 344.17, - 139.06, - 353.11, - 139.06, - 359.07, - 145.02, - 360.56, - 148.74, - 362.05, - 168.86, - 388.87, - 197.17, - 397.81, - 276.88, - 372.48, - 293.27 - ] - ], - "num_keypoints": 83, - "area": 10171.9544, - "iscrowd": 0, - "keypoints": [ - 343, - 164, - 2, - 348, - 160, - 2, - 340, - 160, - 2, - 359, - 163, - 2, - 332, - 164, - 2, - 370, - 189, - 2, - 334, - 190, - 2, - 358, - 236, - 2, - 348, - 234, - 2, - 339, - 270, - 2, - 330, - 262, - 2, - 378, - 262, - 2, - 343, - 254, - 2, - 338, - 280, - 2, - 283, - 272, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 333.383, - 160.62, - 1.0, - 333.607, - 163.811, - 1.0, - 334.137, - 166.965, - 1.0, - 334.934, - 170.062, - 1.0, - 336.036, - 173.062, - 1.0, - 337.69, - 175.794, - 1.0, - 340.01, - 177.986, - 1.0, - 342.889, - 179.347, - 1.0, - 346.063, - 179.445, - 1.0, - 349.16, - 178.674, - 1.0, - 351.892, - 177.033, - 1.0, - 354.132, - 174.761, - 1.0, - 355.652, - 171.957, - 1.0, - 356.482, - 168.871, - 1.0, - 356.751, - 165.691, - 1.0, - 356.914, - 162.496, - 1.0, - 356.913, - 159.299, - 1.0, - 335.435, - 157.491, - 1.0, - 336.759, - 156.383, - 1.0, - 338.264, - 155.821, - 1.0, - 339.903, - 155.445, - 1.0, - 341.565, - 155.312, - 1.0, - 345.805, - 155.039, - 1.0, - 347.424, - 154.896, - 1.0, - 349.044, - 154.957, - 1.0, - 350.677, - 155.266, - 1.0, - 352.333, - 156.08, - 1.0, - 343.65, - 159.186, - 1.0, - 343.687, - 161.041, - 1.0, - 343.68, - 162.886, - 1.0, - 343.657, - 164.752, - 1.0, - 341.61, - 167.049, - 1.0, - 342.69, - 167.145, - 1.0, - 343.906, - 167.123, - 1.0, - 345.179, - 166.907, - 1.0, - 346.456, - 166.707, - 1.0, - 336.707, - 159.932, - 1.0, - 338.078, - 158.999, - 1.0, - 339.726, - 158.864, - 1.0, - 341.204, - 159.605, - 1.0, - 339.755, - 160.185, - 1.0, - 338.21, - 160.321, - 1.0, - 346.612, - 159.27, - 1.0, - 348.028, - 158.307, - 1.0, - 349.739, - 158.245, - 1.0, - 351.302, - 158.965, - 1.0, - 349.802, - 159.575, - 1.0, - 348.188, - 159.642, - 1.0, - 340.049, - 171.873, - 1.0, - 341.307, - 170.304, - 1.0, - 343.097, - 169.499, - 1.0, - 343.987, - 169.41, - 1.0, - 344.876, - 169.314, - 1.0, - 346.909, - 169.61, - 1.0, - 348.603, - 170.874, - 1.0, - 347.548, - 172.219, - 1.0, - 346.133, - 173.242, - 1.0, - 344.378, - 173.742, - 1.0, - 342.683, - 173.666, - 1.0, - 341.218, - 173.038, - 1.0, - 340.398, - 171.815, - 1.0, - 342.1, - 170.752, - 1.0, - 344.043, - 170.287, - 1.0, - 346.21, - 170.271, - 1.0, - 348.214, - 170.913, - 1.0, - 346.462, - 171.947, - 1.0, - 344.283, - 172.468, - 1.0, - 342.246, - 172.507, - 1.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0 - ], - "image_id": 40083, - "bbox": [ - 257.76, - 139.06, - 140.05, - 154.21 - ], - "category_id": 1, - "id": 230195, - "face_box": [ - 333.96, - 154.32, - 23.28000000000003, - 26.79000000000002 - ], - "lefthand_box": [ - 0.0, - 0.0, - 0.0, - 0.0 - ], - "righthand_box": [ - 0.0, - 0.0, - 0.0, - 0.0 - ], - "face_valid": true, - "lefthand_valid": false, - "righthand_valid": false, - "foot_valid": false - }, - { - "segmentation": [ - [ - 285.37, - 126.5, - 281.97, - 127.72, - 280.76, - 132.33, - 280.76, - 136.46, - 275.17, - 143.26, - 275.9, - 158.08, - 277.6, - 164.4, - 278.33, - 173.87, - 278.33, - 183.83, - 279.79, - 191.11, - 281.97, - 194.76, - 284.89, - 192.09, - 284.89, - 186.99, - 284.89, - 181.16, - 284.64, - 177.51, - 285.86, - 173.87 - ] - ], - "num_keypoints": 0, - "area": 491.2669, - "iscrowd": 0, - "keypoints": [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0 - ], - "image_id": 40083, - "bbox": [ - 275.17, - 126.5, - 10.69, - 68.26 - ], - "category_id": 1, - "id": 1202706, - "face_box": [ - 0.0, - 0.0, - 0.0, - 0.0 - ], - "lefthand_box": [ - 0.0, - 0.0, - 0.0, - 0.0 - ], - "righthand_box": [ - 0.0, - 0.0, - 0.0, - 0.0 - ], - "face_valid": false, - "lefthand_valid": false, - "righthand_valid": false, - "foot_valid": false - }, - { - "segmentation": [ - [ - 339.34, - 107.97, - 338.38, - 102.19, - 339.34, - 91.58, - 335.49, - 84.84, - 326.81, - 74.23, - 312.35, - 74.23, - 301.75, - 74.23, - 295, - 86.76, - 295, - 93.51, - 292.11, - 99.3, - 287.29, - 102.19, - 291.14, - 107.01, - 295, - 107.01, - 295.96, - 112.79, - 301.75, - 115.69, - 305.6, - 119.54, - 307.53, - 123.4, - 317.17, - 123.4, - 311.39, - 129.18, - 286.32, - 139.79, - 274.75, - 139.79, - 264.15, - 138.82, - 262.22, - 144.61, - 261.26, - 147.5, - 253.54, - 147.5, - 247.76, - 150.39, - 249.69, - 159.07, - 256.44, - 161, - 262.22, - 161, - 268, - 161, - 276.68, - 161.96, - 284.39, - 168.71, - 293.07, - 174.49, - 301.75, - 174.49, - 308.49, - 169.67, - 308.49, - 188.95, - 311.39, - 194.74, - 312.35, - 208.23, - 307.53, - 221.73, - 297.89, - 229.44, - 281.5, - 250.65, - 269.93, - 262.22, - 278.61, - 320.06, - 281.5, - 331.63, - 276.68, - 338.38, - 270.9, - 349.95, - 262.22, - 356.7, - 253.54, - 359.59, - 253.54, - 365.37, - 274.75, - 365.37, - 291.14, - 365.37, - 306.57, - 359.59, - 303.67, - 352.84, - 297.89, - 340.31, - 293.07, - 318.13, - 295, - 294.03, - 293.07, - 278.61, - 294.03, - 270.9, - 305.6, - 259.33, - 313.31, - 299.82, - 319.1, - 309.46, - 341.27, - 317.17, - 384.65, - 330.67, - 387.55, - 335.49, - 383.69, - 341.27, - 397.19, - 350.91, - 398.15, - 363.44, - 398.15, - 375.01, - 405.86, - 374.05, - 409.72, - 357.66, - 411.65, - 342.24, - 416.47, - 328.74, - 417.43, - 321.03, - 410.68, - 319.1, - 401.04, - 318.13, - 392.37, - 318.13, - 382.73, - 314.28, - 348.98, - 300.78, - 339.34, - 293.07, - 334.52, - 285.36, - 340.31, - 259.33, - 340.31, - 246.8, - 340.31, - 242.94, - 350.91, - 228.48, - 358.62, - 214.98, - 355.22, - 204.32, - 357.05, - 196.11, - 361.61, - 188.82, - 361.61, - 181.97, - 365.26, - 165.63, - 367.54, - 139.18, - 366.17, - 123.68, - 361.15, - 112.73, - 353.86, - 107.72, - 351.58, - 105.89, - 344.74, - 105.89, - 340.18, - 109.08 - ] - ], - "num_keypoints": 63, - "area": 17123.92955, - "iscrowd": 0, - "keypoints": [ - 297, - 111, - 2, - 299, - 106, - 2, - 0, - 0, - 0, - 314, - 108, - 2, - 0, - 0, - 0, - 329, - 141, - 2, - 346, - 125, - 2, - 295, - 164, - 2, - 323, - 130, - 2, - 266, - 155, - 2, - 279, - 143, - 2, - 329, - 225, - 2, - 331, - 221, - 2, - 327, - 298, - 2, - 283, - 269, - 2, - 398, - 327, - 2, - 288, - 349, - 2, - 401.79499, - 364.28207, - 2.0, - 407.21854, - 361.57029, - 2.0, - 407.21854, - 325.86523, - 2.0, - 257.16687, - 361.57029, - 2.0, - 258.52276, - 361.11833, - 2.0, - 297.84353, - 355.69477, - 2.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 265.1, - 155.9, - 1, - 260.05, - 152.25, - 1, - 255.0, - 148.6, - 1, - 250.6, - 148.6, - 1, - 249.1, - 151.0, - 1, - 253.4, - 158.9, - 1, - 251.9, - 155.1, - 1, - 252.0, - 151.9, - 1, - 252.9, - 150.0, - 1, - 257.4, - 157.9, - 1, - 256.7, - 154.2, - 1, - 256.3, - 151.6, - 1, - 256.9, - 149.3, - 1, - 260.2, - 156.5, - 1, - 260.1, - 153.0, - 1, - 259.9, - 150.7, - 1, - 260.2, - 148.7, - 1, - 262.8, - 154.8, - 1, - 262.7, - 152.5, - 1, - 262.7, - 150.9, - 1, - 262.6, - 148.8, - 1, - 280.8, - 146.5, - 1, - 275.4, - 149.15, - 1, - 270.0, - 151.8, - 1, - 266.2, - 152.2, - 1, - 263.5, - 151.9, - 1, - 266.6, - 142.5, - 1, - 263.6, - 147.0, - 1, - 264.9, - 151.0, - 1, - 268.5, - 152.9, - 1, - 270.6, - 142.0, - 1, - 267.9, - 146.0, - 1, - 269.4, - 149.6, - 1, - 272.5, - 151.5, - 1, - 273.8, - 142.1, - 1, - 272.2, - 146.0, - 1, - 274.2, - 149.1, - 1, - 276.5, - 149.6, - 1, - 277.4, - 142.3, - 1, - 276.6, - 145.2, - 1, - 277.6, - 148.3, - 1, - 279.4, - 148.6, - 1 - ], - "image_id": 196141, - "bbox": [ - 247.76, - 74.23, - 169.67, - 300.78 - ], - "category_id": 1, - "id": 460541, - "face_box": [ - 0.0, - 0.0, - 0.0, - 0.0 - ], - "lefthand_box": [ - 249.12, - 146.31, - 19.920000000000016, - 15.819999999999993 - ], - "righthand_box": [ - 262.82, - 139.96, - 18.930000000000007, - 14.679999999999978 - ], - "face_valid": false, - "lefthand_valid": true, - "righthand_valid": true, - "foot_valid": true - }, - { - "segmentation": [ - [ - 578.76, - 112.4, - 589.39, - 100.81, - 589.39, - 99.84, - 596.16, - 116.27, - 603.89, - 122.07, - 603.89, - 138.49, - 598.09, - 159.75, - 597.12, - 181, - 594.22, - 191.63, - 589.39, - 212.89, - 583.59, - 208.06, - 583.59, - 206.13, - 582.63, - 200.33, - 582.63, - 193.57, - 582.63, - 182.94, - 575.86, - 181, - 567.17, - 197.43, - 571.03, - 203.23, - 567.17, - 207.09, - 555.57, - 208.06, - 562.34, - 200.33, - 565.24, - 190.67, - 565.24, - 173.27, - 566.2, - 163.61, - 568.14, - 156.85, - 570.07, - 148.15, - 566.2, - 143.32, - 565.24, - 133.66, - 575.86, - 118.2 - ] - ], - "num_keypoints": 36, - "area": 2789.0208, - "iscrowd": 0, - "keypoints": [ - 589, - 113, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 595, - 112, - 1, - 584, - 110, - 2, - 598, - 123, - 2, - 579, - 119, - 2, - 594, - 141, - 2, - 570, - 137, - 2, - 576, - 135, - 2, - 585, - 139, - 2, - 590, - 157, - 2, - 574, - 156, - 2, - 589, - 192, - 2, - 565, - 189, - 1, - 587, - 222, - 1, - 557, - 219, - 1, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 578.8, - 135.7, - 2, - 577.55, - 134.35, - 2, - 576.3, - 133.0, - 1, - 574.6, - 134.1, - 1, - 574.0, - 135.5, - 1, - 574.3, - 132.9, - 2, - 572.0, - 132.4, - 2, - 570.3, - 131.8, - 2, - 568.9, - 130.7, - 2, - 573.3, - 134.4, - 2, - 570.9, - 134.0, - 2, - 569.5, - 133.9, - 2, - 568.2, - 133.8, - 2, - 572.8, - 135.7, - 2, - 572.6, - 138.3, - 2, - 574.1, - 139.4, - 2, - 576.2, - 139.4, - 1, - 574.4, - 138.0, - 2, - 575.4, - 139.5, - 2, - 576.3, - 140.2, - 2, - 577.6, - 140.8, - 2, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0 - ], - "image_id": 196141, - "bbox": [ - 555.57, - 99.84, - 48.32, - 113.05 - ], - "category_id": 1, - "id": 488308, - "face_box": [ - 0.0, - 0.0, - 0.0, - 0.0 - ], - "lefthand_box": [ - 568.2, - 130.89, - 10.75, - 11.130000000000024 - ], - "righthand_box": [ - 0.0, - 0.0, - 0.0, - 0.0 - ], - "face_valid": false, - "lefthand_valid": true, - "righthand_valid": false, - "foot_valid": false - }, - { - "segmentation": [ - [ - 446.96, - 73.13, - 445.81, - 77.71, - 443.33, - 78.29, - 441.61, - 81.72, - 441.23, - 84.58, - 440.85, - 90.5, - 442.19, - 94.32, - 443.52, - 97.18, - 443.52, - 102.33, - 442.57, - 105.58, - 446.58, - 105.19, - 447.15, - 99.85, - 447.53, - 94.89, - 446, - 93.55, - 446.38, - 92.03, - 453.64, - 92.41, - 454.02, - 94.51, - 457.64, - 94.51, - 455.74, - 88.4, - 455.35, - 82.29, - 453.64, - 78.48, - 451.92, - 77.71, - 452.87, - 74.47, - 450.58, - 73.13 - ] - ], - "num_keypoints": 0, - "area": 285.7906, - "iscrowd": 0, - "keypoints": [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0 - ], - "image_id": 196141, - "bbox": [ - 440.85, - 73.13, - 16.79, - 32.45 - ], - "category_id": 1, - "id": 508900, - "face_box": [ - 0.0, - 0.0, - 0.0, - 0.0 - ], - "lefthand_box": [ - 0.0, - 0.0, - 0.0, - 0.0 - ], - "righthand_box": [ - 0.0, - 0.0, - 0.0, - 0.0 - ], - "face_valid": false, - "lefthand_valid": false, - "righthand_valid": false, - "foot_valid": false - }, - { - "segmentation": [ - [ - 497.15, - 413.95, - 531.55, - 417.68, - 548.74, - 411.7, - 551.74, - 403.48, - 546.5, - 394.5, - 543.51, - 386.28, - 571.93, - 390.76, - 574.92, - 391.51, - 579.4, - 409.46, - 605.58, - 409.46, - 615.3, - 408.71, - 607.07, - 389.27, - 598.1, - 381.79, - 607.82, - 366.83, - 607.82, - 352.63, - 610.06, - 338.42, - 619.04, - 345.15, - 631, - 344.4, - 630.25, - 336.92, - 626.51, - 318.98, - 616.05, - 286.07, - 598.85, - 263.64, - 585.39, - 257.66, - 593.61, - 244.2, - 601.09, - 235.97, - 596.6, - 219.52, - 587.63, - 211.29, - 577.91, - 208.3, - 563.7, - 206.81, - 556.22, - 214.29, - 548, - 217.28, - 539.77, - 229.99, - 539.77, - 241.95, - 539.02, - 247.19, - 523.32, - 247.19, - 503.88, - 254.67, - 485.93, - 254.67, - 479.95, - 248.68, - 473.22, - 241.21, - 485.93, - 227, - 477.7, - 215.78, - 457.51, - 215.78, - 453.77, - 235.22, - 463.5, - 246.44, - 465.74, - 261.4, - 490.42, - 274.11, - 501.63, - 275.6, - 504.62, - 286.07, - 519.58, - 286.07, - 522.57, - 292.06, - 512.85, - 310, - 515.09, - 330.94, - 530.05, - 343.65, - 505.37, - 341.41, - 479.95, - 339.91, - 465.74, - 346.64, - 463.5, - 358.61, - 473.97, - 381.04, - 485.18, - 390.02, - 501.63, - 398.99, - 504.62, - 404.22, - 491.16, - 412.45, - 495.65, - 417.68 - ] - ], - "num_keypoints": 15, - "area": 21608.94075, - "iscrowd": 0, - "keypoints": [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 552, - 234, - 2, - 0, - 0, - 0, - 531, - 262, - 2, - 600, - 283, - 2, - 480, - 260, - 2, - 622, - 336, - 2, - 466, - 242, - 2, - 0, - 0, - 0, - 546, - 365, - 2, - 592, - 371, - 2, - 470, - 351, - 2, - 551, - 330, - 2, - 519, - 394, - 2, - 589, - 391, - 2, - 0.0, - 0.0, - 0.0, - 498.08009, - 412.23863, - 2.0, - 541.66626, - 400.39384, - 2.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 602.22109, - 403.58794, - 2.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0 - ], - "image_id": 196141, - "bbox": [ - 453.77, - 206.81, - 177.23, - 210.87 - ], - "category_id": 1, - "id": 1717641, - "face_box": [ - 0.0, - 0.0, - 0.0, - 0.0 - ], - "lefthand_box": [ - 0.0, - 0.0, - 0.0, - 0.0 - ], - "righthand_box": [ - 0.0, - 0.0, - 0.0, - 0.0 - ], - "face_valid": false, - "lefthand_valid": false, - "righthand_valid": false, - "foot_valid": true - }, - { - "segmentation": [ - [ - 58.93, - 163.67, - 47.18, - 161.59, - 36.12, - 93.86, - 41.65, - 82.8, - 40.27, - 69.66, - 50.64, - 67.59, - 55.48, - 73.81, - 63.08, - 92.47, - 66.53, - 99.38, - 65.15, - 109.06, - 61, - 127.03, - 59.62, - 162.97 - ] - ], - "num_keypoints": 20, - "area": 1870.14015, - "iscrowd": 0, - "keypoints": [ - 48, - 79, - 2, - 50, - 77, - 2, - 46, - 77, - 2, - 54, - 78, - 2, - 45, - 78, - 2, - 57, - 90, - 2, - 42, - 90, - 2, - 63, - 103, - 2, - 42, - 105, - 2, - 56, - 113, - 2, - 49, - 112, - 2, - 55, - 117, - 2, - 44, - 117, - 2, - 55, - 140, - 2, - 47, - 140, - 2, - 56, - 160, - 2, - 49, - 159, - 2, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 44.4, - 162.6, - 2.0, - 43.4, - 161.5, - 2.0, - 51.7, - 160.7, - 2.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0 - ], - "image_id": 196141, - "bbox": [ - 36.12, - 67.59, - 30.41, - 96.08 - ], - "category_id": 1, - "id": 1724673, - "face_box": [ - 0.0, - 0.0, - 0.0, - 0.0 - ], - "lefthand_box": [ - 0.0, - 0.0, - 0.0, - 0.0 - ], - "righthand_box": [ - 0.0, - 0.0, - 0.0, - 0.0 - ], - "face_valid": false, - "lefthand_valid": false, - "righthand_valid": false, - "foot_valid": true - }, - { - "segmentation": [ - [ - 139.41, - 321.58, - 144.78, - 326.56, - 196.92, - 314.68, - 196.16, - 309.31, - 207.28, - 292.05, - 213.03, - 284, - 228.75, - 270.2, - 233.35, - 261.38, - 244.47, - 252.56, - 254.44, - 237.61, - 267.86, - 215.37, - 272.08, - 212.68, - 285.5, - 232.62, - 294.7, - 250.64, - 295.08, - 264.06, - 290.87, - 277.87, - 290.87, - 286.3, - 289.71, - 298.19, - 281.66, - 318.89, - 282.05, - 334.23, - 295.08, - 340.37, - 315.02, - 343.82, - 314.25, - 336.53, - 310.42, - 330.4, - 301.98, - 322.34, - 304.29, - 310.84, - 304.67, - 302.79, - 306.2, - 292.05, - 311.19, - 275.56, - 313.87, - 251.79, - 311.19, - 234.54, - 312.72, - 224.57, - 310.42, - 212.3, - 307.74, - 201.56, - 306.2, - 193.51, - 306.59, - 183.16, - 310.04, - 177.41, - 314.64, - 173.19, - 316.94, - 171.65, - 328.06, - 163.99, - 337.64, - 157.85, - 343.4, - 159.77, - 346.46, - 166.67, - 346.85, - 170.5, - 346.46, - 179.71, - 346.85, - 188.53, - 346.85, - 191.98, - 344.55, - 198.11, - 342.25, - 203.48, - 338.41, - 208.46, - 335.34, - 212.68, - 335.34, - 217.67, - 343.01, - 222.65, - 354.9, - 210.76, - 359.12, - 196.19, - 361.8, - 173.19, - 361.42, - 161.69, - 356.43, - 150.18, - 344.93, - 135.61, - 343.01, - 132.93, - 345.31, - 126.41, - 345.7, - 124.88, - 343.4, - 115.29, - 340.33, - 104.17, - 337.26, - 102.25, - 330.36, - 103.4, - 326.14, - 106.09, - 320.01, - 111.07, - 314.64, - 119.89, - 310.42, - 121.04, - 292.02, - 121.81, - 279.75, - 127.94, - 244.09, - 138.68, - 240.25, - 142.51, - 238.72, - 154.4, - 239.1, - 163.6, - 239.87, - 173.96, - 241.79, - 181.24, - 248.3, - 192.36, - 240.25, - 206.55, - 236.42, - 219.2, - 229.9, - 236.45, - 225.3, - 247.57, - 218.4, - 254.48, - 208.81, - 265.6, - 202.29, - 278.25, - 195.39, - 285.92, - 188.49, - 292.05, - 183.5, - 295.89, - 176.6, - 302.41, - 172, - 308.54, - 167.78, - 313.14, - 146.31, - 318.89 - ] - ], - "num_keypoints": 132, - "area": 14250.29385, - "iscrowd": 0, - "keypoints": [ - 334, - 135, - 2, - 340, - 129, - 2, - 331, - 129, - 2, - 0, - 0, - 0, - 319, - 123, - 2, - 340, - 146, - 2, - 292, - 133, - 2, - 353, - 164, - 2, - 246, - 144, - 2, - 354, - 197, - 2, - 250, - 185, - 2, - 293, - 197, - 2, - 265, - 187, - 2, - 305, - 252, - 2, - 231, - 254, - 2, - 293, - 321, - 2, - 193, - 297, - 2, - 300.24175, - 336.83838, - 2.0, - 306.59015, - 335.34464, - 2.0, - 290.07408, - 326.47826, - 2.0, - 182.60972, - 314.05885, - 2.0, - 175.88789, - 305.84328, - 2.0, - 189.70499, - 302.48236, - 2.0, - 319.681, - 126.613, - 1.0, - 319.155, - 129.261, - 1.0, - 318.92, - 131.954, - 1.0, - 319.187, - 134.631, - 1.0, - 319.707, - 137.271, - 1.0, - 320.991, - 139.649, - 1.0, - 322.846, - 141.606, - 1.0, - 325.009, - 143.216, - 1.0, - 327.359, - 144.544, - 1.0, - 329.907, - 145.384, - 1.0, - 332.347, - 144.347, - 1.0, - 334.268, - 142.449, - 1.0, - 335.767, - 140.222, - 1.0, - 336.675, - 137.69, - 1.0, - 337.019, - 135.009, - 1.0, - 336.982, - 132.311, - 1.0, - 337.13, - 129.618, - 1.0, - 328.503, - 125.823, - 1.0, - 329.531, - 125.489, - 1.0, - 330.619, - 125.626, - 1.0, - 331.573, - 125.909, - 1.0, - 332.529, - 126.431, - 1.0, - 334.479, - 127.459, - 1.0, - 334.815, - 127.43, - 1.0, - 335.157, - 127.316, - 1.0, - 335.52, - 127.327, - 1.0, - 335.949, - 127.701, - 1.0, - 332.762, - 129.334, - 1.0, - 333.168, - 130.389, - 1.0, - 333.603, - 131.342, - 1.0, - 333.928, - 132.331, - 1.0, - 331.671, - 134.291, - 1.0, - 332.232, - 134.389, - 1.0, - 332.931, - 134.487, - 1.0, - 333.332, - 134.463, - 1.0, - 333.645, - 134.212, - 1.0, - 329.271, - 128.208, - 1.0, - 329.963, - 128.464, - 1.0, - 330.676, - 128.659, - 1.0, - 331.392, - 128.839, - 1.0, - 330.672, - 128.659, - 1.0, - 330.003, - 128.334, - 1.0, - 333.792, - 129.611, - 1.0, - 334.158, - 129.741, - 1.0, - 334.546, - 129.765, - 1.0, - 334.878, - 129.954, - 1.0, - 334.523, - 129.822, - 1.0, - 334.161, - 129.704, - 1.0, - 327.38, - 138.818, - 1.0, - 329.757, - 138.136, - 1.0, - 332.086, - 137.874, - 1.0, - 332.75, - 138.208, - 1.0, - 333.221, - 138.515, - 1.0, - 334.495, - 139.634, - 1.0, - 335.213, - 141.054, - 1.0, - 334.12, - 140.754, - 1.0, - 333.208, - 140.234, - 1.0, - 332.2, - 139.888, - 1.0, - 330.765, - 139.414, - 1.0, - 329.069, - 139.351, - 1.0, - 327.561, - 138.814, - 1.0, - 329.88, - 138.346, - 1.0, - 332.517, - 138.668, - 1.0, - 334.031, - 139.589, - 1.0, - 335.123, - 140.862, - 1.0, - 333.726, - 140.572, - 1.0, - 332.203, - 140.032, - 1.0, - 329.731, - 139.403, - 1.0, - 353.87482, - 196.49984999999998, - 1, - 349.01957500000003, - 201.76511, - 1, - 344.16433, - 207.03037, - 1, - 340.81534, - 210.64729, - 1, - 337.46165, - 216.59183000000002, - 1, - 346.65868, - 216.02586, - 1, - 342.27241, - 219.28019999999998, - 1, - 337.88613, - 219.70467, - 1, - 334.4903, - 218.57273, - 1, - 345.5, - 215.0, - 1, - 342.27241, - 217.72377, - 1, - 338.73509, - 218.00675999999999, - 1, - 334.77329, - 216.30885, - 1, - 343.7, - 213.8, - 1, - 341.42345, - 215.74288, - 1, - 338.73509, - 215.60138, - 1, - 335.62225, - 213.76198, - 1, - 342.4139, - 212.63003, - 1, - 340.85748, - 213.76198, - 1, - 338.87658, - 214.04496, - 1, - 337.17867, - 213.76198, - 1, - 249.4, - 180.4, - 1, - 254.3, - 184.9, - 1, - 259.2, - 189.4, - 1, - 259.3, - 192.1, - 1, - 258.2, - 194.9, - 1, - 254.9, - 193.2, - 1, - 255.9, - 192.3, - 1, - 255.9, - 190.5, - 1, - 255.4, - 188.5, - 1, - 252.2, - 194.0, - 1, - 253.2, - 193.6, - 1, - 253.2, - 191.1, - 1, - 252.9, - 188.8, - 1, - 249.4, - 193.6, - 1, - 250.4, - 193.6, - 1, - 250.4, - 191.3, - 1, - 249.9, - 188.7, - 1, - 247.1, - 192.2, - 1, - 248.0, - 192.2, - 1, - 247.9, - 190.3, - 1, - 247.5, - 188.3, - 1 - ], - "image_id": 197388, - "bbox": [ - 139.41, - 102.25, - 222.39, - 241.57 - ], - "category_id": 1, - "id": 437295, - "face_box": [ - 320.23, - 123.84, - 21.049999999999955, - 23.5 - ], - "lefthand_box": [ - 333.65, - 198.45, - 23.150000000000034, - 23.57000000000002 - ], - "righthand_box": [ - 247.5, - 184.92, - 23.30000000000001, - 22.360000000000014 - ], - "face_valid": true, - "lefthand_valid": true, - "righthand_valid": true, - "foot_valid": true - }, - { - "segmentation": [ - [ - 287.17, - 121.42, - 294.22, - 106.44, - 302.15, - 116.13, - 303.03, - 121.42 - ], - [ - 297.74, - 99.39, - 310.08, - 76.49, - 326.81, - 76.49, - 329.46, - 67.68, - 337.38, - 61.52, - 346.19, - 62.4, - 353.24, - 65.92, - 353.24, - 76.49, - 355.88, - 84.42, - 359.41, - 87.94, - 362.05, - 96.75, - 354.12, - 139.04, - 349.72, - 142.56, - 345.31, - 139.92, - 349.72, - 117.89, - 348.84, - 108.2, - 345.31, - 113.49, - 336.5, - 101.16, - 325.93, - 110.85, - 311.84, - 123.18 - ], - [ - 324.17, - 176.91, - 332.1, - 191.89, - 328.58, - 198.94, - 327.69, - 205.98, - 333.86, - 213.03, - 337.38, - 227.13, - 332.98, - 227.13, - 319.77, - 219.2, - 313.6, - 211.27 - ], - [ - 332.98, - 165.46, - 341.79, - 161.06, - 336.5, - 174.27, - 333.86, - 186.6, - 326.81, - 176.03 - ] - ], - "num_keypoints": 19, - "area": 3404.869, - "iscrowd": 0, - "keypoints": [ - 345, - 92, - 2, - 350, - 87, - 2, - 341, - 87, - 2, - 0, - 0, - 0, - 330, - 83, - 2, - 357, - 94, - 2, - 316, - 92, - 2, - 357, - 104, - 2, - 291, - 123, - 1, - 351, - 133, - 2, - 281, - 136, - 1, - 326, - 131, - 1, - 305, - 128, - 1, - 336, - 152, - 1, - 303, - 171, - 1, - 318, - 206, - 2, - 294, - 211, - 1, - 322.595, - 216.245, - 2.0, - 327.23077, - 215.42692, - 2.0, - 316.81553, - 207.67155, - 2.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0 - ], - "image_id": 197388, - "bbox": [ - 287.17, - 61.52, - 74.88, - 165.61 - ], - "category_id": 1, - "id": 467657, - "face_box": [ - 0.0, - 0.0, - 0.0, - 0.0 - ], - "lefthand_box": [ - 0.0, - 0.0, - 0.0, - 0.0 - ], - "righthand_box": [ - 0.0, - 0.0, - 0.0, - 0.0 - ], - "face_valid": false, - "lefthand_valid": false, - "righthand_valid": false, - "foot_valid": true - }, - { - "segmentation": [ - [ - 547.95, - 201.57, - 546.73, - 190.62, - 547.95, - 181.49, - 547.95, - 169.31, - 547.95, - 156.53, - 546.73, - 144.36, - 544.3, - 139.49, - 540.04, - 132.19, - 540.04, - 121.84, - 542.47, - 107.24, - 544.3, - 99.33, - 548.56, - 88.98, - 561.95, - 78.03, - 572.29, - 71.33, - 572.29, - 71.33, - 572.29, - 65.25, - 574.12, - 51.86, - 583.86, - 48.81, - 592.99, - 48.81, - 597.86, - 57.33, - 599.07, - 64.64, - 608.2, - 76.81, - 614.9, - 82.89, - 620.98, - 89.59, - 628.89, - 93.24, - 636.81, - 101.76, - 640, - 109.67, - 640, - 115.76, - 640, - 127.93, - 620.37, - 111.5, - 619.16, - 111.5, - 618.55, - 112.11, - 608.2, - 105.41, - 600.9, - 119.41, - 592.99, - 131.58, - 596.03, - 148.01, - 605.16, - 162.01, - 612.46, - 190.01, - 614.9, - 204.61, - 606.98, - 216.78, - 603.94, - 226.52, - 606.38, - 239.91, - 605.16, - 256.95, - 604.55, - 264.26, - 602.12, - 271.56, - 586.29, - 272.17, - 584.47, - 255.13, - 588.73, - 237.48, - 592.99, - 221.65, - 596.64, - 207.05, - 596.64, - 197.31, - 594.2, - 186.96, - 584.47, - 172.36, - 577.77, - 166.27, - 570.47, - 170.53, - 558.91, - 179.66, - 555.86, - 192.44, - 548.56, - 198.53, - 547.95, - 198.53 - ] - ], - "num_keypoints": 39, - "area": 8913.98475, - "iscrowd": 0, - "keypoints": [ - 591, - 78, - 2, - 594, - 74, - 2, - 586, - 74, - 2, - 0, - 0, - 0, - 573, - 70, - 2, - 598, - 86, - 2, - 566, - 93, - 2, - 626, - 105, - 2, - 546, - 126, - 2, - 0, - 0, - 0, - 561, - 150, - 2, - 582, - 150, - 2, - 557, - 154, - 2, - 606, - 194, - 2, - 558, - 209, - 1, - 591, - 252, - 2, - 539, - 262, - 1, - 599.72032, - 264.75714, - 2.0, - 603.91172, - 265.80499, - 2.0, - 585.74897, - 265.10642, - 2.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 565.0, - 153.0, - 0.08773341029882431, - 568.0, - 156.0, - 0.04602484405040741, - 571.0, - 159.0, - 0.04602484405040741, - 573.0, - 161.0, - 0.06972061097621918, - 575.0, - 164.0, - 0.06297813355922699, - 569.0, - 158.0, - 0.294232040643692, - 570.0, - 162.0, - 0.26472434401512146, - 570.0, - 166.0, - 0.2826344072818756, - 571.0, - 171.0, - 0.374575674533844, - 565.0, - 159.0, - 0.2154899388551712, - 566.0, - 162.0, - 0.21613340079784393, - 566.0, - 164.0, - 0.2544613480567932, - 567.0, - 168.0, - 0.31771761178970337, - 562.0, - 160.0, - 0.23286579549312592, - 563.0, - 166.0, - 0.1579097956418991, - 564.0, - 166.0, - 0.17961391806602478, - 564.0, - 166.0, - 0.17504136264324188, - 559.0, - 160.0, - 0.3428754508495331, - 559.0, - 162.0, - 0.2897874116897583, - 561.0, - 165.0, - 0.24125981330871582, - 562.0, - 166.0, - 0.20118576288223267 - ], - "image_id": 197388, - "bbox": [ - 540.04, - 48.81, - 99.96, - 223.36 - ], - "category_id": 1, - "id": 531914, - "face_box": [ - 0.0, - 0.0, - 0.0, - 0.0 - ], - "lefthand_box": [ - 0.0, - 0.0, - 0.0, - 0.0 - ], - "righthand_box": [ - 557.05, - 149.73, - 19.879999999999995, - 21.76000000000002 - ], - "face_valid": false, - "lefthand_valid": false, - "righthand_valid": true, - "foot_valid": true - }, - { - "segmentation": [ - [ - 561.51, - 385.38, - 572.11, - 352.71, - 570.34, - 317.4, - 559.75, - 282.08, - 552.68, - 267.07, - 565.93, - 236.17, - 583.59, - 236.17, - 602.13, - 260.01, - 614.49, - 286.5, - 628.61, - 302.39, - 639.21, - 281.2, - 614.49, - 251.18, - 588, - 218.51, - 595.95, - 202.62, - 594.18, - 185.85, - 580.05, - 170.84, - 562.4, - 179.67, - 557.98, - 198.21, - 554.45, - 202.62, - 532.38, - 199.97, - 525.32, - 202.62, - 511.19, - 229.11, - 493.53, - 256.48, - 484.7, - 276.78, - 451.15, - 323.58, - 423.78, - 338.59, - 388.47, - 373.9, - 372.58, - 387.14, - 396.41, - 388.03, - 418.49, - 367.72, - 450.27, - 345.65, - 501.48, - 306.8, - 520.02, - 301.5, - 552.68, - 340.35, - 543.86, - 369.49 - ] - ], - "num_keypoints": 60, - "area": 14267.20475, - "iscrowd": 0, - "keypoints": [ - 580, - 211, - 2, - 586, - 206, - 2, - 574, - 204, - 2, - 0, - 0, - 0, - 562, - 198, - 2, - 584, - 220, - 2, - 529, - 215, - 2, - 599, - 242, - 2, - 512, - 260, - 2, - 619, - 274, - 2, - 538, - 285, - 2, - 537, - 288, - 2, - 506, - 277, - 2, - 562, - 332, - 2, - 452, - 332, - 2, - 550, - 387, - 1, - 402, - 371, - 2, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 375.00826, - 386.35839, - 2.0, - 399.52454, - 375.91627, - 2.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 620.284, - 274.54006, - 1, - 621.65135, - 282.30908999999997, - 1, - 623.0187, - 290.07812, - 1, - 625.38048, - 294.55308, - 1, - 628.86101, - 298.90373999999997, - 1, - 630.22836, - 289.20799, - 1, - 634.57901, - 292.43991, - 1, - 633.08736, - 295.54752, - 1, - 628.6124, - 295.42321, - 1, - 632.46584, - 286.5976, - 1, - 631.3, - 291.9, - 1, - 627.7, - 291.6, - 1, - 625.6, - 288.9, - 1, - 633.7, - 284.2, - 1, - 632.3, - 288.0, - 1, - 629.1, - 288.0, - 1, - 627.0, - 285.9, - 1, - 633.2, - 280.4, - 1, - 632.8, - 283.6, - 1, - 630.8, - 284.4, - 1, - 629.1, - 283.2, - 1, - 544.0, - 291.0, - 0.09089653939008713, - 551.0, - 291.0, - 0.041192591190338135, - 558.0, - 291.0, - 0.041192591190338135, - 559.0, - 294.0, - 0.056781601160764694, - 563.0, - 298.0, - 0.2960541546344757, - 559.0, - 296.0, - 0.18105527758598328, - 562.0, - 301.0, - 0.12244582921266556, - 559.0, - 308.0, - 0.05529222637414932, - 564.0, - 306.0, - 0.05997529253363609, - 555.0, - 299.0, - 0.18805834650993347, - 556.0, - 302.0, - 0.1534559577703476, - 555.0, - 306.0, - 0.20564205944538116, - 556.0, - 309.0, - 0.06228385493159294, - 550.0, - 300.0, - 0.1409723311662674, - 550.0, - 301.0, - 0.2223101258277893, - 551.0, - 305.0, - 0.2001882642507553, - 553.0, - 308.0, - 0.1712668538093567, - 545.0, - 302.0, - 0.1908813714981079, - 546.0, - 304.0, - 0.13619276881217957, - 547.0, - 306.0, - 0.19773860275745392, - 549.0, - 308.0, - 0.1341865360736847 - ], - "image_id": 197388, - "bbox": [ - 372.58, - 170.84, - 266.63, - 217.19 - ], - "category_id": 1, - "id": 533949, - "face_box": [ - 0.0, - 0.0, - 0.0, - 0.0 - ], - "lefthand_box": [ - 615.22, - 271.56, - 22.139999999999986, - 28.839999999999975 - ], - "righthand_box": [ - 538.83, - 283.74, - 25.639999999999986, - 30.659999999999968 - ], - "face_valid": false, - "lefthand_valid": true, - "righthand_valid": true, - "foot_valid": true - }, - { - "segmentation": [ - [ - 2.03, - 75.18, - 10.85, - 70.58, - 16.99, - 65.59, - 17.75, - 55.24, - 20.05, - 50.25, - 29.64, - 43.74, - 37.31, - 47.57, - 41.52, - 53.7, - 43.83, - 64.82, - 53.03, - 70.19, - 61.85, - 77.09, - 72.58, - 87.06, - 74.88, - 79.01, - 78.72, - 73.64, - 86.39, - 77.86, - 90.6, - 90.13, - 86, - 93.2, - 82.17, - 102.4, - 75.27, - 106.24, - 68.75, - 104.7, - 50.34, - 90.9, - 43.06, - 112.37, - 40.76, - 123.11, - 42.29, - 130.78, - 48.04, - 161.83, - 52.26, - 190.59, - 50.73, - 210.15, - 44.21, - 245.04, - 50.34, - 256.16, - 53.03, - 261.53, - 47.28, - 263.83, - 40.37, - 263.83, - 31.56, - 260.76, - 28.1, - 256.16, - 26.95, - 244.65, - 29.25, - 233.54, - 32.71, - 223.95, - 33.09, - 213.98, - 32.32, - 206.31, - 32.71, - 194.81, - 33.09, - 185.61, - 24.65, - 177.17, - 16.99, - 161.45, - 13.53, - 176.02, - 10.85, - 206.31, - 1.65, - 231.62, - 1.65, - 235.84, - 0.5, - 146.88, - 0.88, - 122.34, - 1.65, - 75.56 - ] - ], - "num_keypoints": 16, - "area": 8260.75085, - "iscrowd": 0, - "keypoints": [ - 36, - 79, - 2, - 40, - 74, - 2, - 31, - 75, - 2, - 0, - 0, - 0, - 19, - 69, - 2, - 45, - 77, - 2, - 2, - 89, - 2, - 74, - 99, - 2, - 0, - 0, - 0, - 78, - 92, - 2, - 0, - 0, - 0, - 33, - 149, - 2, - 7, - 153, - 2, - 44, - 196, - 2, - 2, - 205, - 2, - 35, - 245, - 2, - 0, - 0, - 0, - 43.80826, - 259.40011, - 2.0, - 48.63752, - 257.67537, - 2.0, - 32.08007, - 256.29558, - 2.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0 - ], - "image_id": 197388, - "bbox": [ - 0.5, - 43.74, - 90.1, - 220.09 - ], - "category_id": 1, - "id": 543117, - "face_box": [ - 0.0, - 0.0, - 0.0, - 0.0 - ], - "lefthand_box": [ - 0.0, - 0.0, - 0.0, - 0.0 - ], - "righthand_box": [ - 0.0, - 0.0, - 0.0, - 0.0 - ], - "face_valid": false, - "lefthand_valid": false, - "righthand_valid": false, - "foot_valid": true - } - ] +{ + "info": { + "description": "COCO-WholeBody sample", + "url": "https://github.com/jin-s13/COCO-WholeBody", + "version": "1.0", + "year": "2020", + "date_created": "2020/09/18" + }, + "licenses": [ + { + "url": "http://creativecommons.org/licenses/by-nc-sa/2.0/", + "id": 1, + "name": "Attribution-NonCommercial-ShareAlike License" + }, + { + "url": "http://creativecommons.org/licenses/by-nc/2.0/", + "id": 2, + "name": "Attribution-NonCommercial License" + }, + { + "url": "http://creativecommons.org/licenses/by-nc-nd/2.0/", + "id": 3, + "name": "Attribution-NonCommercial-NoDerivs License" + }, + { + "url": "http://creativecommons.org/licenses/by/2.0/", + "id": 4, + "name": "Attribution License" + }, + { + "url": "http://creativecommons.org/licenses/by-sa/2.0/", + "id": 5, + "name": "Attribution-ShareAlike License" + }, + { + "url": "http://creativecommons.org/licenses/by-nd/2.0/", + "id": 6, + "name": "Attribution-NoDerivs License" + }, + { + "url": "http://flickr.com/commons/usage/", + "id": 7, + "name": "No known copyright restrictions" + }, + { + "url": "http://www.usa.gov/copyright.shtml", + "id": 8, + "name": "United States Government Work" + } + ], + "categories": [ + { + "supercategory": "person", + "id": 1, + "name": "person", + "keypoints": [ + "nose", + "left_eye", + "right_eye", + "left_ear", + "right_ear", + "left_shoulder", + "right_shoulder", + "left_elbow", + "right_elbow", + "left_wrist", + "right_wrist", + "left_hip", + "right_hip", + "left_knee", + "right_knee", + "left_ankle", + "right_ankle" + ], + "skeleton": [ + [ + 16, + 14 + ], + [ + 14, + 12 + ], + [ + 17, + 15 + ], + [ + 15, + 13 + ], + [ + 12, + 13 + ], + [ + 6, + 12 + ], + [ + 7, + 13 + ], + [ + 6, + 7 + ], + [ + 6, + 8 + ], + [ + 7, + 9 + ], + [ + 8, + 10 + ], + [ + 9, + 11 + ], + [ + 2, + 3 + ], + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 4 + ], + [ + 3, + 5 + ], + [ + 4, + 6 + ], + [ + 5, + 7 + ] + ] + } + ], + "images": [ + { + "license": 4, + "file_name": "000000000785.jpg", + "coco_url": "http://images.cocodataset.org/val2017/000000000785.jpg", + "height": 425, + "width": 640, + "date_captured": "2013-11-19 21:22:42", + "flickr_url": "http://farm8.staticflickr.com/7015/6795644157_f019453ae7_z.jpg", + "id": 785 + }, + { + "license": 3, + "file_name": "000000040083.jpg", + "coco_url": "http://images.cocodataset.org/val2017/000000040083.jpg", + "height": 333, + "width": 500, + "date_captured": "2013-11-18 03:30:24", + "flickr_url": "http://farm1.staticflickr.com/116/254881838_e21c6d17b8_z.jpg", + "id": 40083 + }, + { + "license": 1, + "file_name": "000000196141.jpg", + "coco_url": "http://images.cocodataset.org/val2017/000000196141.jpg", + "height": 429, + "width": 640, + "date_captured": "2013-11-22 22:37:15", + "flickr_url": "http://farm4.staticflickr.com/3310/3611902235_57d4ae496d_z.jpg", + "id": 196141 + }, + { + "license": 3, + "file_name": "000000197388.jpg", + "coco_url": "http://images.cocodataset.org/val2017/000000197388.jpg", + "height": 392, + "width": 640, + "date_captured": "2013-11-19 20:10:37", + "flickr_url": "http://farm9.staticflickr.com/8375/8507321836_5b8b13188f_z.jpg", + "id": 197388 + } + ], + "annotations": [ + { + "segmentation": [ + [ + 353.37, + 67.65, + 358.15, + 52.37, + 362.92, + 47.59, + 374.38, + 44.73, + 389.66, + 52.37, + 389.66, + 67.65, + 389.66, + 76.25, + 393.48, + 83.89, + 396.35, + 88.66, + 397.3, + 91.53, + 406.85, + 99.17, + 413.54, + 104.9, + 451.74, + 148.83, + 458.43, + 153.6, + 462.25, + 166.02, + 467.02, + 173.66, + 463.2, + 181.3, + 449.83, + 183.21, + 448.88, + 191.81, + 455.56, + 226.19, + 448.88, + 254.84, + 453.65, + 286.36, + 475.62, + 323.6, + 491.85, + 361.81, + 494.72, + 382.82, + 494.72, + 382.82, + 499.49, + 391.41, + 416.4, + 391.41, + 424.04, + 383.77, + 439.33, + 374.22, + 445.06, + 360.85, + 436.46, + 334.11, + 421.18, + 303.55, + 416.4, + 289.22, + 409.72, + 268.21, + 396.35, + 280.63, + 405.9, + 298.77, + 417.36, + 324.56, + 425, + 349.39, + 425, + 357.99, + 419.27, + 360.85, + 394.44, + 367.54, + 362.92, + 370.4, + 346.69, + 367.54, + 360.06, + 362.76, + 369.61, + 360.85, + 382.98, + 340.8, + 355.28, + 271.08, + 360.06, + 266.3, + 386.8, + 219.5, + 368.65, + 162.2, + 348.6, + 175.57, + 309.44, + 187.03, + 301.8, + 192.76, + 288.43, + 193.72, + 282.7, + 193.72, + 280.79, + 187.03, + 280.79, + 174.62, + 287.47, + 171.75, + 291.29, + 171.75, + 295.11, + 171.75, + 306.57, + 166.98, + 312.3, + 165.07, + 345.73, + 142.14, + 350.51, + 117.31, + 350.51, + 102.03, + 350.51, + 90.57, + 353.37, + 65.74 + ] + ], + "num_keypoints": 112, + "area": 27789.11055, + "iscrowd": 0, + "keypoints": [ + 367, + 81, + 2, + 374, + 73, + 2, + 360, + 75, + 2, + 386, + 78, + 2, + 356, + 81, + 2, + 399, + 108, + 2, + 358, + 129, + 2, + 433, + 142, + 2, + 341, + 159, + 2, + 449, + 165, + 2, + 309, + 178, + 2, + 424, + 203, + 2, + 393, + 214, + 2, + 429, + 294, + 2, + 367, + 273, + 2, + 466, + 362, + 2, + 396, + 341, + 2, + 439, + 378, + 2, + 446, + 380, + 2, + 479, + 370, + 2, + 377, + 359, + 2, + 376, + 358, + 2, + 413, + 353, + 2, + 355.823, + 75.36, + 1.0, + 356.354, + 79.0837, + 1.0, + 357.244, + 82.7374, + 1.0, + 358.518, + 86.2722, + 1.0, + 360.146, + 89.6578, + 1.0, + 362.266, + 92.7538, + 1.0, + 365.004, + 95.3223, + 1.0, + 368.487, + 96.6454, + 1.0, + 372.191, + 96.1419, + 1.0, + 375.644, + 94.6832, + 1.0, + 378.601, + 92.3665, + 1.0, + 381.101, + 89.5662, + 1.0, + 382.903, + 86.2741, + 1.0, + 383.896, + 82.6509, + 1.0, + 384.075, + 78.9011, + 1.0, + 384.1, + 75.1408, + 1.0, + 383.903, + 71.3861, + 1.0, + 357.084, + 72.9743, + 1.0, + 358.602, + 71.7848, + 1.0, + 360.42, + 71.3443, + 1.0, + 362.377, + 71.1566, + 1.0, + 364.36, + 71.1889, + 1.0, + 368.971, + 70.4992, + 1.0, + 370.945, + 69.8179, + 1.0, + 373.001, + 69.3543, + 1.0, + 375.14, + 69.2666, + 1.0, + 377.358, + 69.8865, + 1.0, + 366.57, + 73.9588, + 1.0, + 366.734, + 76.1499, + 1.0, + 366.88, + 78.3018, + 1.0, + 366.99, + 80.4957, + 1.0, + 365.104, + 82.5589, + 1.0, + 366.308, + 82.8331, + 1.0, + 367.645, + 82.8037, + 1.0, + 369.172, + 82.2061, + 1.0, + 370.693, + 81.6521, + 1.0, + 358.705, + 75.4542, + 1.0, + 360.294, + 74.0903, + 1.0, + 362.376, + 73.8423, + 1.0, + 364.302, + 74.6834, + 1.0, + 362.543, + 75.568, + 1.0, + 360.612, + 75.8883, + 1.0, + 369.771, + 73.7734, + 1.0, + 371.409, + 72.2638, + 1.0, + 373.615, + 71.9502, + 1.0, + 375.722, + 72.7144, + 1.0, + 373.888, + 73.699, + 1.0, + 371.835, + 74.0238, + 1.0, + 363.184, + 86.9317, + 1.0, + 364.788, + 85.4484, + 1.0, + 367.021, + 84.7474, + 1.0, + 368.048, + 84.5364, + 1.0, + 369.083, + 84.3709, + 1.0, + 372.183, + 84.0529, + 1.0, + 375.083, + 84.8901, + 1.0, + 373.687, + 87.0735, + 1.0, + 371.644, + 88.8121, + 1.0, + 369.024, + 89.6982, + 1.0, + 366.67, + 89.6039, + 1.0, + 364.721, + 88.606, + 1.0, + 363.588, + 86.903, + 1.0, + 365.723, + 85.8496, + 1.0, + 368.184, + 85.2863, + 1.0, + 371.444, + 84.8294, + 1.0, + 374.647, + 85.0454, + 1.0, + 372.166, + 87.2914, + 1.0, + 368.81, + 88.3791, + 1.0, + 365.965, + 88.3238, + 1.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 304.10366, + 181.75134, + 1, + 300.70183, + 182.77567, + 1, + 297.3, + 183.8, + 1, + 294.7, + 186.5, + 1, + 290.1, + 187.8, + 1, + 290.9, + 176.6, + 1, + 287.5, + 176.0, + 1, + 285.5, + 178.4, + 1, + 286.4, + 182.4, + 1, + 288.8, + 179.4, + 1, + 285.0, + 181.0, + 1, + 287.3, + 186.1, + 1, + 291.8, + 189.5, + 1, + 287.7, + 182.7, + 1, + 283.8, + 184.1, + 1, + 286.5, + 189.1, + 1, + 290.0, + 192.0, + 1, + 286.7, + 185.3, + 1, + 282.8, + 187.4, + 1, + 284.8, + 191.6, + 1, + 288.4, + 194.5, + 1 + ], + "image_id": 785, + "bbox": [ + 280.79, + 44.73, + 218.7, + 346.68 + ], + "category_id": 1, + "id": 442619, + "face_box": [ + 358.2, + 69.86, + 26.360000000000014, + 25.849999999999994 + ], + "lefthand_box": [ + 0.0, + 0.0, + 0.0, + 0.0 + ], + "righthand_box": [ + 280.43, + 173.12, + 27.860000000000014, + 24.849999999999994 + ], + "face_valid": true, + "lefthand_valid": false, + "righthand_valid": true, + "foot_valid": true + }, + { + "segmentation": [ + [ + 98.56, + 273.72, + 132.9, + 267, + 140.37, + 281.93, + 165.75, + 285.66, + 156.79, + 264.01, + 170.23, + 261.02, + 177.7, + 272.97, + 182.18, + 279.69, + 200.85, + 268.49, + 212.79, + 255.05, + 188.9, + 256.54, + 164.26, + 240.12, + 139.62, + 212.49, + 109.01, + 221.45, + 103.04, + 220.71, + 122.45, + 202.04, + 113.49, + 196.07, + 96.32, + 168.44, + 97.06, + 162.47, + 110.5, + 136.34, + 112, + 124.39, + 91.09, + 110.95, + 80.64, + 114.68, + 71.68, + 131.86, + 62.72, + 147.54, + 57.49, + 156.5, + 48.53, + 168.44, + 41.07, + 180.39, + 38.08, + 193.08, + 40.32, + 205.03, + 47.04, + 213.24, + 54.5, + 216.23, + 82.13, + 252.06, + 91.09, + 271.48 + ] + ], + "num_keypoints": 106, + "area": 11025.219, + "iscrowd": 0, + "keypoints": [ + 99, + 144, + 2, + 104, + 141, + 2, + 96, + 137, + 2, + 0, + 0, + 0, + 78, + 133, + 2, + 56, + 161, + 2, + 81, + 162, + 2, + 0, + 0, + 0, + 103, + 208, + 2, + 116, + 204, + 2, + 0, + 0, + 0, + 57, + 246, + 1, + 82, + 259, + 1, + 137, + 219, + 2, + 138, + 247, + 2, + 177, + 256, + 2, + 158, + 296, + 1, + 208.16049, + 257.42419, + 2.0, + 205.8824, + 259.13276, + 2.0, + 183.38626, + 275.93367, + 2.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 82.9654, + 131.144, + 1.0, + 81.8046, + 134.328, + 1.0, + 80.7007, + 137.531, + 1.0, + 79.8836, + 140.818, + 1.0, + 79.734, + 144.196, + 1.0, + 80.4763, + 147.486, + 1.0, + 82.0188, + 150.498, + 1.0, + 84.2352, + 153.057, + 1.0, + 86.8081, + 155.258, + 1.0, + 89.652, + 157.095, + 1.0, + 92.9128, + 157.812, + 1.0, + 95.962, + 156.474, + 1.0, + 98.5377, + 154.281, + 1.0, + 100.557, + 151.568, + 1.0, + 102.508, + 148.799, + 1.0, + 103.987, + 145.756, + 1.0, + 105.345, + 142.655, + 1.0, + 93.6074, + 132.13, + 1.0, + 95.8108, + 132.112, + 1.0, + 97.7956, + 132.618, + 1.0, + 99.6897, + 133.398, + 1.0, + 101.364, + 134.432, + 1.0, + 105.0, + 136.896, + 1.0, + 105.708, + 137.334, + 1.0, + 106.267, + 137.852, + 1.0, + 106.759, + 138.404, + 1.0, + 107.013, + 139.401, + 1.0, + 100.904, + 139.994, + 1.0, + 100.551, + 142.0, + 1.0, + 100.202, + 143.956, + 1.0, + 99.8116, + 145.919, + 1.0, + 94.7941, + 146.187, + 1.0, + 95.9823, + 147.027, + 1.0, + 97.3054, + 147.849, + 1.0, + 98.2362, + 148.403, + 1.0, + 99.2812, + 148.491, + 1.0, + 93.151, + 135.98, + 1.0, + 94.9184, + 136.187, + 1.0, + 96.5441, + 136.903, + 1.0, + 97.6034, + 138.308, + 1.0, + 95.8998, + 138.017, + 1.0, + 94.3941, + 137.178, + 1.0, + 102.085, + 141.003, + 1.0, + 103.379, + 141.05, + 1.0, + 104.485, + 141.71, + 1.0, + 104.899, + 142.915, + 1.0, + 103.704, + 142.739, + 1.0, + 102.729, + 142.026, + 1.0, + 89.8433, + 148.685, + 1.0, + 92.6494, + 149.006, + 1.0, + 95.2801, + 149.78, + 1.0, + 96.1096, + 150.259, + 1.0, + 96.7411, + 150.719, + 1.0, + 97.3853, + 151.82, + 1.0, + 97.337, + 153.217, + 1.0, + 96.5124, + 153.108, + 1.0, + 95.6091, + 152.796, + 1.0, + 94.7518, + 152.399, + 1.0, + 93.0313, + 151.317, + 1.0, + 91.3461, + 150.149, + 1.0, + 90.24, + 148.802, + 1.0, + 92.9121, + 149.883, + 1.0, + 95.4213, + 151.204, + 1.0, + 96.3082, + 152.03, + 1.0, + 97.1377, + 152.997, + 1.0, + 96.3098, + 152.035, + 1.0, + 95.406, + 151.234, + 1.0, + 92.8725, + 149.984, + 1.0, + 109.88978, + 204.46047, + 1, + 113.101195, + 201.939065, + 1, + 116.31261, + 199.41766, + 1, + 113.19977, + 199.3139, + 1, + 109.8794, + 200.24775, + 1, + 117.86903, + 199.10638, + 2, + 113.9261, + 199.00262, + 2, + 109.56812, + 198.48381, + 2, + 106.6628, + 198.38004999999998, + 1, + 117.1427, + 202.32298, + 2, + 111.2283, + 201.80417, + 2, + 107.07784000000001, + 201.38913, + 2, + 103.65371999999999, + 201.18161, + 1, + 116.52013, + 205.95463, + 2, + 112.5772, + 205.53958, + 2, + 107.59665, + 204.39821, + 2, + 104.27629, + 203.77564, + 2, + 116.41637, + 209.69004, + 2, + 112.16215, + 209.48252, + 2, + 108.73803000000001, + 208.34114, + 2, + 105.72895, + 206.68096, + 2, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ], + "image_id": 40083, + "bbox": [ + 38.08, + 110.95, + 174.71, + 174.71 + ], + "category_id": 1, + "id": 198196, + "face_box": [ + 79.19, + 131.64, + 29.290000000000006, + 28.480000000000018 + ], + "lefthand_box": [ + 104.83, + 196.48, + 16.400000000000006, + 15.810000000000002 + ], + "righthand_box": [ + 0.0, + 0.0, + 0.0, + 0.0 + ], + "face_valid": true, + "lefthand_valid": true, + "righthand_valid": false, + "foot_valid": true + }, + { + "segmentation": [ + [ + 257.76, + 288.05, + 273.4, + 258.26, + 325.55, + 253.79, + 335.23, + 232.93, + 326.3, + 186.74, + 333.74, + 177.05, + 327.79, + 153.21, + 333.74, + 142.04, + 344.17, + 139.06, + 353.11, + 139.06, + 359.07, + 145.02, + 360.56, + 148.74, + 362.05, + 168.86, + 388.87, + 197.17, + 397.81, + 276.88, + 372.48, + 293.27 + ] + ], + "num_keypoints": 83, + "area": 10171.9544, + "iscrowd": 0, + "keypoints": [ + 343, + 164, + 2, + 348, + 160, + 2, + 340, + 160, + 2, + 359, + 163, + 2, + 332, + 164, + 2, + 370, + 189, + 2, + 334, + 190, + 2, + 358, + 236, + 2, + 348, + 234, + 2, + 339, + 270, + 2, + 330, + 262, + 2, + 378, + 262, + 2, + 343, + 254, + 2, + 338, + 280, + 2, + 283, + 272, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 333.383, + 160.62, + 1.0, + 333.607, + 163.811, + 1.0, + 334.137, + 166.965, + 1.0, + 334.934, + 170.062, + 1.0, + 336.036, + 173.062, + 1.0, + 337.69, + 175.794, + 1.0, + 340.01, + 177.986, + 1.0, + 342.889, + 179.347, + 1.0, + 346.063, + 179.445, + 1.0, + 349.16, + 178.674, + 1.0, + 351.892, + 177.033, + 1.0, + 354.132, + 174.761, + 1.0, + 355.652, + 171.957, + 1.0, + 356.482, + 168.871, + 1.0, + 356.751, + 165.691, + 1.0, + 356.914, + 162.496, + 1.0, + 356.913, + 159.299, + 1.0, + 335.435, + 157.491, + 1.0, + 336.759, + 156.383, + 1.0, + 338.264, + 155.821, + 1.0, + 339.903, + 155.445, + 1.0, + 341.565, + 155.312, + 1.0, + 345.805, + 155.039, + 1.0, + 347.424, + 154.896, + 1.0, + 349.044, + 154.957, + 1.0, + 350.677, + 155.266, + 1.0, + 352.333, + 156.08, + 1.0, + 343.65, + 159.186, + 1.0, + 343.687, + 161.041, + 1.0, + 343.68, + 162.886, + 1.0, + 343.657, + 164.752, + 1.0, + 341.61, + 167.049, + 1.0, + 342.69, + 167.145, + 1.0, + 343.906, + 167.123, + 1.0, + 345.179, + 166.907, + 1.0, + 346.456, + 166.707, + 1.0, + 336.707, + 159.932, + 1.0, + 338.078, + 158.999, + 1.0, + 339.726, + 158.864, + 1.0, + 341.204, + 159.605, + 1.0, + 339.755, + 160.185, + 1.0, + 338.21, + 160.321, + 1.0, + 346.612, + 159.27, + 1.0, + 348.028, + 158.307, + 1.0, + 349.739, + 158.245, + 1.0, + 351.302, + 158.965, + 1.0, + 349.802, + 159.575, + 1.0, + 348.188, + 159.642, + 1.0, + 340.049, + 171.873, + 1.0, + 341.307, + 170.304, + 1.0, + 343.097, + 169.499, + 1.0, + 343.987, + 169.41, + 1.0, + 344.876, + 169.314, + 1.0, + 346.909, + 169.61, + 1.0, + 348.603, + 170.874, + 1.0, + 347.548, + 172.219, + 1.0, + 346.133, + 173.242, + 1.0, + 344.378, + 173.742, + 1.0, + 342.683, + 173.666, + 1.0, + 341.218, + 173.038, + 1.0, + 340.398, + 171.815, + 1.0, + 342.1, + 170.752, + 1.0, + 344.043, + 170.287, + 1.0, + 346.21, + 170.271, + 1.0, + 348.214, + 170.913, + 1.0, + 346.462, + 171.947, + 1.0, + 344.283, + 172.468, + 1.0, + 342.246, + 172.507, + 1.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ], + "image_id": 40083, + "bbox": [ + 257.76, + 139.06, + 140.05, + 154.21 + ], + "category_id": 1, + "id": 230195, + "face_box": [ + 333.96, + 154.32, + 23.28000000000003, + 26.79000000000002 + ], + "lefthand_box": [ + 0.0, + 0.0, + 0.0, + 0.0 + ], + "righthand_box": [ + 0.0, + 0.0, + 0.0, + 0.0 + ], + "face_valid": true, + "lefthand_valid": false, + "righthand_valid": false, + "foot_valid": false + }, + { + "segmentation": [ + [ + 285.37, + 126.5, + 281.97, + 127.72, + 280.76, + 132.33, + 280.76, + 136.46, + 275.17, + 143.26, + 275.9, + 158.08, + 277.6, + 164.4, + 278.33, + 173.87, + 278.33, + 183.83, + 279.79, + 191.11, + 281.97, + 194.76, + 284.89, + 192.09, + 284.89, + 186.99, + 284.89, + 181.16, + 284.64, + 177.51, + 285.86, + 173.87 + ] + ], + "num_keypoints": 0, + "area": 491.2669, + "iscrowd": 0, + "keypoints": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ], + "image_id": 40083, + "bbox": [ + 275.17, + 126.5, + 10.69, + 68.26 + ], + "category_id": 1, + "id": 1202706, + "face_box": [ + 0.0, + 0.0, + 0.0, + 0.0 + ], + "lefthand_box": [ + 0.0, + 0.0, + 0.0, + 0.0 + ], + "righthand_box": [ + 0.0, + 0.0, + 0.0, + 0.0 + ], + "face_valid": false, + "lefthand_valid": false, + "righthand_valid": false, + "foot_valid": false + }, + { + "segmentation": [ + [ + 339.34, + 107.97, + 338.38, + 102.19, + 339.34, + 91.58, + 335.49, + 84.84, + 326.81, + 74.23, + 312.35, + 74.23, + 301.75, + 74.23, + 295, + 86.76, + 295, + 93.51, + 292.11, + 99.3, + 287.29, + 102.19, + 291.14, + 107.01, + 295, + 107.01, + 295.96, + 112.79, + 301.75, + 115.69, + 305.6, + 119.54, + 307.53, + 123.4, + 317.17, + 123.4, + 311.39, + 129.18, + 286.32, + 139.79, + 274.75, + 139.79, + 264.15, + 138.82, + 262.22, + 144.61, + 261.26, + 147.5, + 253.54, + 147.5, + 247.76, + 150.39, + 249.69, + 159.07, + 256.44, + 161, + 262.22, + 161, + 268, + 161, + 276.68, + 161.96, + 284.39, + 168.71, + 293.07, + 174.49, + 301.75, + 174.49, + 308.49, + 169.67, + 308.49, + 188.95, + 311.39, + 194.74, + 312.35, + 208.23, + 307.53, + 221.73, + 297.89, + 229.44, + 281.5, + 250.65, + 269.93, + 262.22, + 278.61, + 320.06, + 281.5, + 331.63, + 276.68, + 338.38, + 270.9, + 349.95, + 262.22, + 356.7, + 253.54, + 359.59, + 253.54, + 365.37, + 274.75, + 365.37, + 291.14, + 365.37, + 306.57, + 359.59, + 303.67, + 352.84, + 297.89, + 340.31, + 293.07, + 318.13, + 295, + 294.03, + 293.07, + 278.61, + 294.03, + 270.9, + 305.6, + 259.33, + 313.31, + 299.82, + 319.1, + 309.46, + 341.27, + 317.17, + 384.65, + 330.67, + 387.55, + 335.49, + 383.69, + 341.27, + 397.19, + 350.91, + 398.15, + 363.44, + 398.15, + 375.01, + 405.86, + 374.05, + 409.72, + 357.66, + 411.65, + 342.24, + 416.47, + 328.74, + 417.43, + 321.03, + 410.68, + 319.1, + 401.04, + 318.13, + 392.37, + 318.13, + 382.73, + 314.28, + 348.98, + 300.78, + 339.34, + 293.07, + 334.52, + 285.36, + 340.31, + 259.33, + 340.31, + 246.8, + 340.31, + 242.94, + 350.91, + 228.48, + 358.62, + 214.98, + 355.22, + 204.32, + 357.05, + 196.11, + 361.61, + 188.82, + 361.61, + 181.97, + 365.26, + 165.63, + 367.54, + 139.18, + 366.17, + 123.68, + 361.15, + 112.73, + 353.86, + 107.72, + 351.58, + 105.89, + 344.74, + 105.89, + 340.18, + 109.08 + ] + ], + "num_keypoints": 63, + "area": 17123.92955, + "iscrowd": 0, + "keypoints": [ + 297, + 111, + 2, + 299, + 106, + 2, + 0, + 0, + 0, + 314, + 108, + 2, + 0, + 0, + 0, + 329, + 141, + 2, + 346, + 125, + 2, + 295, + 164, + 2, + 323, + 130, + 2, + 266, + 155, + 2, + 279, + 143, + 2, + 329, + 225, + 2, + 331, + 221, + 2, + 327, + 298, + 2, + 283, + 269, + 2, + 398, + 327, + 2, + 288, + 349, + 2, + 401.79499, + 364.28207, + 2.0, + 407.21854, + 361.57029, + 2.0, + 407.21854, + 325.86523, + 2.0, + 257.16687, + 361.57029, + 2.0, + 258.52276, + 361.11833, + 2.0, + 297.84353, + 355.69477, + 2.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 265.1, + 155.9, + 1, + 260.05, + 152.25, + 1, + 255.0, + 148.6, + 1, + 250.6, + 148.6, + 1, + 249.1, + 151.0, + 1, + 253.4, + 158.9, + 1, + 251.9, + 155.1, + 1, + 252.0, + 151.9, + 1, + 252.9, + 150.0, + 1, + 257.4, + 157.9, + 1, + 256.7, + 154.2, + 1, + 256.3, + 151.6, + 1, + 256.9, + 149.3, + 1, + 260.2, + 156.5, + 1, + 260.1, + 153.0, + 1, + 259.9, + 150.7, + 1, + 260.2, + 148.7, + 1, + 262.8, + 154.8, + 1, + 262.7, + 152.5, + 1, + 262.7, + 150.9, + 1, + 262.6, + 148.8, + 1, + 280.8, + 146.5, + 1, + 275.4, + 149.15, + 1, + 270.0, + 151.8, + 1, + 266.2, + 152.2, + 1, + 263.5, + 151.9, + 1, + 266.6, + 142.5, + 1, + 263.6, + 147.0, + 1, + 264.9, + 151.0, + 1, + 268.5, + 152.9, + 1, + 270.6, + 142.0, + 1, + 267.9, + 146.0, + 1, + 269.4, + 149.6, + 1, + 272.5, + 151.5, + 1, + 273.8, + 142.1, + 1, + 272.2, + 146.0, + 1, + 274.2, + 149.1, + 1, + 276.5, + 149.6, + 1, + 277.4, + 142.3, + 1, + 276.6, + 145.2, + 1, + 277.6, + 148.3, + 1, + 279.4, + 148.6, + 1 + ], + "image_id": 196141, + "bbox": [ + 247.76, + 74.23, + 169.67, + 300.78 + ], + "category_id": 1, + "id": 460541, + "face_box": [ + 0.0, + 0.0, + 0.0, + 0.0 + ], + "lefthand_box": [ + 249.12, + 146.31, + 19.920000000000016, + 15.819999999999993 + ], + "righthand_box": [ + 262.82, + 139.96, + 18.930000000000007, + 14.679999999999978 + ], + "face_valid": false, + "lefthand_valid": true, + "righthand_valid": true, + "foot_valid": true + }, + { + "segmentation": [ + [ + 578.76, + 112.4, + 589.39, + 100.81, + 589.39, + 99.84, + 596.16, + 116.27, + 603.89, + 122.07, + 603.89, + 138.49, + 598.09, + 159.75, + 597.12, + 181, + 594.22, + 191.63, + 589.39, + 212.89, + 583.59, + 208.06, + 583.59, + 206.13, + 582.63, + 200.33, + 582.63, + 193.57, + 582.63, + 182.94, + 575.86, + 181, + 567.17, + 197.43, + 571.03, + 203.23, + 567.17, + 207.09, + 555.57, + 208.06, + 562.34, + 200.33, + 565.24, + 190.67, + 565.24, + 173.27, + 566.2, + 163.61, + 568.14, + 156.85, + 570.07, + 148.15, + 566.2, + 143.32, + 565.24, + 133.66, + 575.86, + 118.2 + ] + ], + "num_keypoints": 36, + "area": 2789.0208, + "iscrowd": 0, + "keypoints": [ + 589, + 113, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 595, + 112, + 1, + 584, + 110, + 2, + 598, + 123, + 2, + 579, + 119, + 2, + 594, + 141, + 2, + 570, + 137, + 2, + 576, + 135, + 2, + 585, + 139, + 2, + 590, + 157, + 2, + 574, + 156, + 2, + 589, + 192, + 2, + 565, + 189, + 1, + 587, + 222, + 1, + 557, + 219, + 1, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 578.8, + 135.7, + 2, + 577.55, + 134.35, + 2, + 576.3, + 133.0, + 1, + 574.6, + 134.1, + 1, + 574.0, + 135.5, + 1, + 574.3, + 132.9, + 2, + 572.0, + 132.4, + 2, + 570.3, + 131.8, + 2, + 568.9, + 130.7, + 2, + 573.3, + 134.4, + 2, + 570.9, + 134.0, + 2, + 569.5, + 133.9, + 2, + 568.2, + 133.8, + 2, + 572.8, + 135.7, + 2, + 572.6, + 138.3, + 2, + 574.1, + 139.4, + 2, + 576.2, + 139.4, + 1, + 574.4, + 138.0, + 2, + 575.4, + 139.5, + 2, + 576.3, + 140.2, + 2, + 577.6, + 140.8, + 2, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ], + "image_id": 196141, + "bbox": [ + 555.57, + 99.84, + 48.32, + 113.05 + ], + "category_id": 1, + "id": 488308, + "face_box": [ + 0.0, + 0.0, + 0.0, + 0.0 + ], + "lefthand_box": [ + 568.2, + 130.89, + 10.75, + 11.130000000000024 + ], + "righthand_box": [ + 0.0, + 0.0, + 0.0, + 0.0 + ], + "face_valid": false, + "lefthand_valid": true, + "righthand_valid": false, + "foot_valid": false + }, + { + "segmentation": [ + [ + 446.96, + 73.13, + 445.81, + 77.71, + 443.33, + 78.29, + 441.61, + 81.72, + 441.23, + 84.58, + 440.85, + 90.5, + 442.19, + 94.32, + 443.52, + 97.18, + 443.52, + 102.33, + 442.57, + 105.58, + 446.58, + 105.19, + 447.15, + 99.85, + 447.53, + 94.89, + 446, + 93.55, + 446.38, + 92.03, + 453.64, + 92.41, + 454.02, + 94.51, + 457.64, + 94.51, + 455.74, + 88.4, + 455.35, + 82.29, + 453.64, + 78.48, + 451.92, + 77.71, + 452.87, + 74.47, + 450.58, + 73.13 + ] + ], + "num_keypoints": 0, + "area": 285.7906, + "iscrowd": 0, + "keypoints": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ], + "image_id": 196141, + "bbox": [ + 440.85, + 73.13, + 16.79, + 32.45 + ], + "category_id": 1, + "id": 508900, + "face_box": [ + 0.0, + 0.0, + 0.0, + 0.0 + ], + "lefthand_box": [ + 0.0, + 0.0, + 0.0, + 0.0 + ], + "righthand_box": [ + 0.0, + 0.0, + 0.0, + 0.0 + ], + "face_valid": false, + "lefthand_valid": false, + "righthand_valid": false, + "foot_valid": false + }, + { + "segmentation": [ + [ + 497.15, + 413.95, + 531.55, + 417.68, + 548.74, + 411.7, + 551.74, + 403.48, + 546.5, + 394.5, + 543.51, + 386.28, + 571.93, + 390.76, + 574.92, + 391.51, + 579.4, + 409.46, + 605.58, + 409.46, + 615.3, + 408.71, + 607.07, + 389.27, + 598.1, + 381.79, + 607.82, + 366.83, + 607.82, + 352.63, + 610.06, + 338.42, + 619.04, + 345.15, + 631, + 344.4, + 630.25, + 336.92, + 626.51, + 318.98, + 616.05, + 286.07, + 598.85, + 263.64, + 585.39, + 257.66, + 593.61, + 244.2, + 601.09, + 235.97, + 596.6, + 219.52, + 587.63, + 211.29, + 577.91, + 208.3, + 563.7, + 206.81, + 556.22, + 214.29, + 548, + 217.28, + 539.77, + 229.99, + 539.77, + 241.95, + 539.02, + 247.19, + 523.32, + 247.19, + 503.88, + 254.67, + 485.93, + 254.67, + 479.95, + 248.68, + 473.22, + 241.21, + 485.93, + 227, + 477.7, + 215.78, + 457.51, + 215.78, + 453.77, + 235.22, + 463.5, + 246.44, + 465.74, + 261.4, + 490.42, + 274.11, + 501.63, + 275.6, + 504.62, + 286.07, + 519.58, + 286.07, + 522.57, + 292.06, + 512.85, + 310, + 515.09, + 330.94, + 530.05, + 343.65, + 505.37, + 341.41, + 479.95, + 339.91, + 465.74, + 346.64, + 463.5, + 358.61, + 473.97, + 381.04, + 485.18, + 390.02, + 501.63, + 398.99, + 504.62, + 404.22, + 491.16, + 412.45, + 495.65, + 417.68 + ] + ], + "num_keypoints": 15, + "area": 21608.94075, + "iscrowd": 0, + "keypoints": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 552, + 234, + 2, + 0, + 0, + 0, + 531, + 262, + 2, + 600, + 283, + 2, + 480, + 260, + 2, + 622, + 336, + 2, + 466, + 242, + 2, + 0, + 0, + 0, + 546, + 365, + 2, + 592, + 371, + 2, + 470, + 351, + 2, + 551, + 330, + 2, + 519, + 394, + 2, + 589, + 391, + 2, + 0.0, + 0.0, + 0.0, + 498.08009, + 412.23863, + 2.0, + 541.66626, + 400.39384, + 2.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 602.22109, + 403.58794, + 2.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ], + "image_id": 196141, + "bbox": [ + 453.77, + 206.81, + 177.23, + 210.87 + ], + "category_id": 1, + "id": 1717641, + "face_box": [ + 0.0, + 0.0, + 0.0, + 0.0 + ], + "lefthand_box": [ + 0.0, + 0.0, + 0.0, + 0.0 + ], + "righthand_box": [ + 0.0, + 0.0, + 0.0, + 0.0 + ], + "face_valid": false, + "lefthand_valid": false, + "righthand_valid": false, + "foot_valid": true + }, + { + "segmentation": [ + [ + 58.93, + 163.67, + 47.18, + 161.59, + 36.12, + 93.86, + 41.65, + 82.8, + 40.27, + 69.66, + 50.64, + 67.59, + 55.48, + 73.81, + 63.08, + 92.47, + 66.53, + 99.38, + 65.15, + 109.06, + 61, + 127.03, + 59.62, + 162.97 + ] + ], + "num_keypoints": 20, + "area": 1870.14015, + "iscrowd": 0, + "keypoints": [ + 48, + 79, + 2, + 50, + 77, + 2, + 46, + 77, + 2, + 54, + 78, + 2, + 45, + 78, + 2, + 57, + 90, + 2, + 42, + 90, + 2, + 63, + 103, + 2, + 42, + 105, + 2, + 56, + 113, + 2, + 49, + 112, + 2, + 55, + 117, + 2, + 44, + 117, + 2, + 55, + 140, + 2, + 47, + 140, + 2, + 56, + 160, + 2, + 49, + 159, + 2, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 44.4, + 162.6, + 2.0, + 43.4, + 161.5, + 2.0, + 51.7, + 160.7, + 2.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ], + "image_id": 196141, + "bbox": [ + 36.12, + 67.59, + 30.41, + 96.08 + ], + "category_id": 1, + "id": 1724673, + "face_box": [ + 0.0, + 0.0, + 0.0, + 0.0 + ], + "lefthand_box": [ + 0.0, + 0.0, + 0.0, + 0.0 + ], + "righthand_box": [ + 0.0, + 0.0, + 0.0, + 0.0 + ], + "face_valid": false, + "lefthand_valid": false, + "righthand_valid": false, + "foot_valid": true + }, + { + "segmentation": [ + [ + 139.41, + 321.58, + 144.78, + 326.56, + 196.92, + 314.68, + 196.16, + 309.31, + 207.28, + 292.05, + 213.03, + 284, + 228.75, + 270.2, + 233.35, + 261.38, + 244.47, + 252.56, + 254.44, + 237.61, + 267.86, + 215.37, + 272.08, + 212.68, + 285.5, + 232.62, + 294.7, + 250.64, + 295.08, + 264.06, + 290.87, + 277.87, + 290.87, + 286.3, + 289.71, + 298.19, + 281.66, + 318.89, + 282.05, + 334.23, + 295.08, + 340.37, + 315.02, + 343.82, + 314.25, + 336.53, + 310.42, + 330.4, + 301.98, + 322.34, + 304.29, + 310.84, + 304.67, + 302.79, + 306.2, + 292.05, + 311.19, + 275.56, + 313.87, + 251.79, + 311.19, + 234.54, + 312.72, + 224.57, + 310.42, + 212.3, + 307.74, + 201.56, + 306.2, + 193.51, + 306.59, + 183.16, + 310.04, + 177.41, + 314.64, + 173.19, + 316.94, + 171.65, + 328.06, + 163.99, + 337.64, + 157.85, + 343.4, + 159.77, + 346.46, + 166.67, + 346.85, + 170.5, + 346.46, + 179.71, + 346.85, + 188.53, + 346.85, + 191.98, + 344.55, + 198.11, + 342.25, + 203.48, + 338.41, + 208.46, + 335.34, + 212.68, + 335.34, + 217.67, + 343.01, + 222.65, + 354.9, + 210.76, + 359.12, + 196.19, + 361.8, + 173.19, + 361.42, + 161.69, + 356.43, + 150.18, + 344.93, + 135.61, + 343.01, + 132.93, + 345.31, + 126.41, + 345.7, + 124.88, + 343.4, + 115.29, + 340.33, + 104.17, + 337.26, + 102.25, + 330.36, + 103.4, + 326.14, + 106.09, + 320.01, + 111.07, + 314.64, + 119.89, + 310.42, + 121.04, + 292.02, + 121.81, + 279.75, + 127.94, + 244.09, + 138.68, + 240.25, + 142.51, + 238.72, + 154.4, + 239.1, + 163.6, + 239.87, + 173.96, + 241.79, + 181.24, + 248.3, + 192.36, + 240.25, + 206.55, + 236.42, + 219.2, + 229.9, + 236.45, + 225.3, + 247.57, + 218.4, + 254.48, + 208.81, + 265.6, + 202.29, + 278.25, + 195.39, + 285.92, + 188.49, + 292.05, + 183.5, + 295.89, + 176.6, + 302.41, + 172, + 308.54, + 167.78, + 313.14, + 146.31, + 318.89 + ] + ], + "num_keypoints": 132, + "area": 14250.29385, + "iscrowd": 0, + "keypoints": [ + 334, + 135, + 2, + 340, + 129, + 2, + 331, + 129, + 2, + 0, + 0, + 0, + 319, + 123, + 2, + 340, + 146, + 2, + 292, + 133, + 2, + 353, + 164, + 2, + 246, + 144, + 2, + 354, + 197, + 2, + 250, + 185, + 2, + 293, + 197, + 2, + 265, + 187, + 2, + 305, + 252, + 2, + 231, + 254, + 2, + 293, + 321, + 2, + 193, + 297, + 2, + 300.24175, + 336.83838, + 2.0, + 306.59015, + 335.34464, + 2.0, + 290.07408, + 326.47826, + 2.0, + 182.60972, + 314.05885, + 2.0, + 175.88789, + 305.84328, + 2.0, + 189.70499, + 302.48236, + 2.0, + 319.681, + 126.613, + 1.0, + 319.155, + 129.261, + 1.0, + 318.92, + 131.954, + 1.0, + 319.187, + 134.631, + 1.0, + 319.707, + 137.271, + 1.0, + 320.991, + 139.649, + 1.0, + 322.846, + 141.606, + 1.0, + 325.009, + 143.216, + 1.0, + 327.359, + 144.544, + 1.0, + 329.907, + 145.384, + 1.0, + 332.347, + 144.347, + 1.0, + 334.268, + 142.449, + 1.0, + 335.767, + 140.222, + 1.0, + 336.675, + 137.69, + 1.0, + 337.019, + 135.009, + 1.0, + 336.982, + 132.311, + 1.0, + 337.13, + 129.618, + 1.0, + 328.503, + 125.823, + 1.0, + 329.531, + 125.489, + 1.0, + 330.619, + 125.626, + 1.0, + 331.573, + 125.909, + 1.0, + 332.529, + 126.431, + 1.0, + 334.479, + 127.459, + 1.0, + 334.815, + 127.43, + 1.0, + 335.157, + 127.316, + 1.0, + 335.52, + 127.327, + 1.0, + 335.949, + 127.701, + 1.0, + 332.762, + 129.334, + 1.0, + 333.168, + 130.389, + 1.0, + 333.603, + 131.342, + 1.0, + 333.928, + 132.331, + 1.0, + 331.671, + 134.291, + 1.0, + 332.232, + 134.389, + 1.0, + 332.931, + 134.487, + 1.0, + 333.332, + 134.463, + 1.0, + 333.645, + 134.212, + 1.0, + 329.271, + 128.208, + 1.0, + 329.963, + 128.464, + 1.0, + 330.676, + 128.659, + 1.0, + 331.392, + 128.839, + 1.0, + 330.672, + 128.659, + 1.0, + 330.003, + 128.334, + 1.0, + 333.792, + 129.611, + 1.0, + 334.158, + 129.741, + 1.0, + 334.546, + 129.765, + 1.0, + 334.878, + 129.954, + 1.0, + 334.523, + 129.822, + 1.0, + 334.161, + 129.704, + 1.0, + 327.38, + 138.818, + 1.0, + 329.757, + 138.136, + 1.0, + 332.086, + 137.874, + 1.0, + 332.75, + 138.208, + 1.0, + 333.221, + 138.515, + 1.0, + 334.495, + 139.634, + 1.0, + 335.213, + 141.054, + 1.0, + 334.12, + 140.754, + 1.0, + 333.208, + 140.234, + 1.0, + 332.2, + 139.888, + 1.0, + 330.765, + 139.414, + 1.0, + 329.069, + 139.351, + 1.0, + 327.561, + 138.814, + 1.0, + 329.88, + 138.346, + 1.0, + 332.517, + 138.668, + 1.0, + 334.031, + 139.589, + 1.0, + 335.123, + 140.862, + 1.0, + 333.726, + 140.572, + 1.0, + 332.203, + 140.032, + 1.0, + 329.731, + 139.403, + 1.0, + 353.87482, + 196.49984999999998, + 1, + 349.01957500000003, + 201.76511, + 1, + 344.16433, + 207.03037, + 1, + 340.81534, + 210.64729, + 1, + 337.46165, + 216.59183000000002, + 1, + 346.65868, + 216.02586, + 1, + 342.27241, + 219.28019999999998, + 1, + 337.88613, + 219.70467, + 1, + 334.4903, + 218.57273, + 1, + 345.5, + 215.0, + 1, + 342.27241, + 217.72377, + 1, + 338.73509, + 218.00675999999999, + 1, + 334.77329, + 216.30885, + 1, + 343.7, + 213.8, + 1, + 341.42345, + 215.74288, + 1, + 338.73509, + 215.60138, + 1, + 335.62225, + 213.76198, + 1, + 342.4139, + 212.63003, + 1, + 340.85748, + 213.76198, + 1, + 338.87658, + 214.04496, + 1, + 337.17867, + 213.76198, + 1, + 249.4, + 180.4, + 1, + 254.3, + 184.9, + 1, + 259.2, + 189.4, + 1, + 259.3, + 192.1, + 1, + 258.2, + 194.9, + 1, + 254.9, + 193.2, + 1, + 255.9, + 192.3, + 1, + 255.9, + 190.5, + 1, + 255.4, + 188.5, + 1, + 252.2, + 194.0, + 1, + 253.2, + 193.6, + 1, + 253.2, + 191.1, + 1, + 252.9, + 188.8, + 1, + 249.4, + 193.6, + 1, + 250.4, + 193.6, + 1, + 250.4, + 191.3, + 1, + 249.9, + 188.7, + 1, + 247.1, + 192.2, + 1, + 248.0, + 192.2, + 1, + 247.9, + 190.3, + 1, + 247.5, + 188.3, + 1 + ], + "image_id": 197388, + "bbox": [ + 139.41, + 102.25, + 222.39, + 241.57 + ], + "category_id": 1, + "id": 437295, + "face_box": [ + 320.23, + 123.84, + 21.049999999999955, + 23.5 + ], + "lefthand_box": [ + 333.65, + 198.45, + 23.150000000000034, + 23.57000000000002 + ], + "righthand_box": [ + 247.5, + 184.92, + 23.30000000000001, + 22.360000000000014 + ], + "face_valid": true, + "lefthand_valid": true, + "righthand_valid": true, + "foot_valid": true + }, + { + "segmentation": [ + [ + 287.17, + 121.42, + 294.22, + 106.44, + 302.15, + 116.13, + 303.03, + 121.42 + ], + [ + 297.74, + 99.39, + 310.08, + 76.49, + 326.81, + 76.49, + 329.46, + 67.68, + 337.38, + 61.52, + 346.19, + 62.4, + 353.24, + 65.92, + 353.24, + 76.49, + 355.88, + 84.42, + 359.41, + 87.94, + 362.05, + 96.75, + 354.12, + 139.04, + 349.72, + 142.56, + 345.31, + 139.92, + 349.72, + 117.89, + 348.84, + 108.2, + 345.31, + 113.49, + 336.5, + 101.16, + 325.93, + 110.85, + 311.84, + 123.18 + ], + [ + 324.17, + 176.91, + 332.1, + 191.89, + 328.58, + 198.94, + 327.69, + 205.98, + 333.86, + 213.03, + 337.38, + 227.13, + 332.98, + 227.13, + 319.77, + 219.2, + 313.6, + 211.27 + ], + [ + 332.98, + 165.46, + 341.79, + 161.06, + 336.5, + 174.27, + 333.86, + 186.6, + 326.81, + 176.03 + ] + ], + "num_keypoints": 19, + "area": 3404.869, + "iscrowd": 0, + "keypoints": [ + 345, + 92, + 2, + 350, + 87, + 2, + 341, + 87, + 2, + 0, + 0, + 0, + 330, + 83, + 2, + 357, + 94, + 2, + 316, + 92, + 2, + 357, + 104, + 2, + 291, + 123, + 1, + 351, + 133, + 2, + 281, + 136, + 1, + 326, + 131, + 1, + 305, + 128, + 1, + 336, + 152, + 1, + 303, + 171, + 1, + 318, + 206, + 2, + 294, + 211, + 1, + 322.595, + 216.245, + 2.0, + 327.23077, + 215.42692, + 2.0, + 316.81553, + 207.67155, + 2.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ], + "image_id": 197388, + "bbox": [ + 287.17, + 61.52, + 74.88, + 165.61 + ], + "category_id": 1, + "id": 467657, + "face_box": [ + 0.0, + 0.0, + 0.0, + 0.0 + ], + "lefthand_box": [ + 0.0, + 0.0, + 0.0, + 0.0 + ], + "righthand_box": [ + 0.0, + 0.0, + 0.0, + 0.0 + ], + "face_valid": false, + "lefthand_valid": false, + "righthand_valid": false, + "foot_valid": true + }, + { + "segmentation": [ + [ + 547.95, + 201.57, + 546.73, + 190.62, + 547.95, + 181.49, + 547.95, + 169.31, + 547.95, + 156.53, + 546.73, + 144.36, + 544.3, + 139.49, + 540.04, + 132.19, + 540.04, + 121.84, + 542.47, + 107.24, + 544.3, + 99.33, + 548.56, + 88.98, + 561.95, + 78.03, + 572.29, + 71.33, + 572.29, + 71.33, + 572.29, + 65.25, + 574.12, + 51.86, + 583.86, + 48.81, + 592.99, + 48.81, + 597.86, + 57.33, + 599.07, + 64.64, + 608.2, + 76.81, + 614.9, + 82.89, + 620.98, + 89.59, + 628.89, + 93.24, + 636.81, + 101.76, + 640, + 109.67, + 640, + 115.76, + 640, + 127.93, + 620.37, + 111.5, + 619.16, + 111.5, + 618.55, + 112.11, + 608.2, + 105.41, + 600.9, + 119.41, + 592.99, + 131.58, + 596.03, + 148.01, + 605.16, + 162.01, + 612.46, + 190.01, + 614.9, + 204.61, + 606.98, + 216.78, + 603.94, + 226.52, + 606.38, + 239.91, + 605.16, + 256.95, + 604.55, + 264.26, + 602.12, + 271.56, + 586.29, + 272.17, + 584.47, + 255.13, + 588.73, + 237.48, + 592.99, + 221.65, + 596.64, + 207.05, + 596.64, + 197.31, + 594.2, + 186.96, + 584.47, + 172.36, + 577.77, + 166.27, + 570.47, + 170.53, + 558.91, + 179.66, + 555.86, + 192.44, + 548.56, + 198.53, + 547.95, + 198.53 + ] + ], + "num_keypoints": 39, + "area": 8913.98475, + "iscrowd": 0, + "keypoints": [ + 591, + 78, + 2, + 594, + 74, + 2, + 586, + 74, + 2, + 0, + 0, + 0, + 573, + 70, + 2, + 598, + 86, + 2, + 566, + 93, + 2, + 626, + 105, + 2, + 546, + 126, + 2, + 0, + 0, + 0, + 561, + 150, + 2, + 582, + 150, + 2, + 557, + 154, + 2, + 606, + 194, + 2, + 558, + 209, + 1, + 591, + 252, + 2, + 539, + 262, + 1, + 599.72032, + 264.75714, + 2.0, + 603.91172, + 265.80499, + 2.0, + 585.74897, + 265.10642, + 2.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 565.0, + 153.0, + 0.08773341029882431, + 568.0, + 156.0, + 0.04602484405040741, + 571.0, + 159.0, + 0.04602484405040741, + 573.0, + 161.0, + 0.06972061097621918, + 575.0, + 164.0, + 0.06297813355922699, + 569.0, + 158.0, + 0.294232040643692, + 570.0, + 162.0, + 0.26472434401512146, + 570.0, + 166.0, + 0.2826344072818756, + 571.0, + 171.0, + 0.374575674533844, + 565.0, + 159.0, + 0.2154899388551712, + 566.0, + 162.0, + 0.21613340079784393, + 566.0, + 164.0, + 0.2544613480567932, + 567.0, + 168.0, + 0.31771761178970337, + 562.0, + 160.0, + 0.23286579549312592, + 563.0, + 166.0, + 0.1579097956418991, + 564.0, + 166.0, + 0.17961391806602478, + 564.0, + 166.0, + 0.17504136264324188, + 559.0, + 160.0, + 0.3428754508495331, + 559.0, + 162.0, + 0.2897874116897583, + 561.0, + 165.0, + 0.24125981330871582, + 562.0, + 166.0, + 0.20118576288223267 + ], + "image_id": 197388, + "bbox": [ + 540.04, + 48.81, + 99.96, + 223.36 + ], + "category_id": 1, + "id": 531914, + "face_box": [ + 0.0, + 0.0, + 0.0, + 0.0 + ], + "lefthand_box": [ + 0.0, + 0.0, + 0.0, + 0.0 + ], + "righthand_box": [ + 557.05, + 149.73, + 19.879999999999995, + 21.76000000000002 + ], + "face_valid": false, + "lefthand_valid": false, + "righthand_valid": true, + "foot_valid": true + }, + { + "segmentation": [ + [ + 561.51, + 385.38, + 572.11, + 352.71, + 570.34, + 317.4, + 559.75, + 282.08, + 552.68, + 267.07, + 565.93, + 236.17, + 583.59, + 236.17, + 602.13, + 260.01, + 614.49, + 286.5, + 628.61, + 302.39, + 639.21, + 281.2, + 614.49, + 251.18, + 588, + 218.51, + 595.95, + 202.62, + 594.18, + 185.85, + 580.05, + 170.84, + 562.4, + 179.67, + 557.98, + 198.21, + 554.45, + 202.62, + 532.38, + 199.97, + 525.32, + 202.62, + 511.19, + 229.11, + 493.53, + 256.48, + 484.7, + 276.78, + 451.15, + 323.58, + 423.78, + 338.59, + 388.47, + 373.9, + 372.58, + 387.14, + 396.41, + 388.03, + 418.49, + 367.72, + 450.27, + 345.65, + 501.48, + 306.8, + 520.02, + 301.5, + 552.68, + 340.35, + 543.86, + 369.49 + ] + ], + "num_keypoints": 60, + "area": 14267.20475, + "iscrowd": 0, + "keypoints": [ + 580, + 211, + 2, + 586, + 206, + 2, + 574, + 204, + 2, + 0, + 0, + 0, + 562, + 198, + 2, + 584, + 220, + 2, + 529, + 215, + 2, + 599, + 242, + 2, + 512, + 260, + 2, + 619, + 274, + 2, + 538, + 285, + 2, + 537, + 288, + 2, + 506, + 277, + 2, + 562, + 332, + 2, + 452, + 332, + 2, + 550, + 387, + 1, + 402, + 371, + 2, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 375.00826, + 386.35839, + 2.0, + 399.52454, + 375.91627, + 2.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 620.284, + 274.54006, + 1, + 621.65135, + 282.30908999999997, + 1, + 623.0187, + 290.07812, + 1, + 625.38048, + 294.55308, + 1, + 628.86101, + 298.90373999999997, + 1, + 630.22836, + 289.20799, + 1, + 634.57901, + 292.43991, + 1, + 633.08736, + 295.54752, + 1, + 628.6124, + 295.42321, + 1, + 632.46584, + 286.5976, + 1, + 631.3, + 291.9, + 1, + 627.7, + 291.6, + 1, + 625.6, + 288.9, + 1, + 633.7, + 284.2, + 1, + 632.3, + 288.0, + 1, + 629.1, + 288.0, + 1, + 627.0, + 285.9, + 1, + 633.2, + 280.4, + 1, + 632.8, + 283.6, + 1, + 630.8, + 284.4, + 1, + 629.1, + 283.2, + 1, + 544.0, + 291.0, + 0.09089653939008713, + 551.0, + 291.0, + 0.041192591190338135, + 558.0, + 291.0, + 0.041192591190338135, + 559.0, + 294.0, + 0.056781601160764694, + 563.0, + 298.0, + 0.2960541546344757, + 559.0, + 296.0, + 0.18105527758598328, + 562.0, + 301.0, + 0.12244582921266556, + 559.0, + 308.0, + 0.05529222637414932, + 564.0, + 306.0, + 0.05997529253363609, + 555.0, + 299.0, + 0.18805834650993347, + 556.0, + 302.0, + 0.1534559577703476, + 555.0, + 306.0, + 0.20564205944538116, + 556.0, + 309.0, + 0.06228385493159294, + 550.0, + 300.0, + 0.1409723311662674, + 550.0, + 301.0, + 0.2223101258277893, + 551.0, + 305.0, + 0.2001882642507553, + 553.0, + 308.0, + 0.1712668538093567, + 545.0, + 302.0, + 0.1908813714981079, + 546.0, + 304.0, + 0.13619276881217957, + 547.0, + 306.0, + 0.19773860275745392, + 549.0, + 308.0, + 0.1341865360736847 + ], + "image_id": 197388, + "bbox": [ + 372.58, + 170.84, + 266.63, + 217.19 + ], + "category_id": 1, + "id": 533949, + "face_box": [ + 0.0, + 0.0, + 0.0, + 0.0 + ], + "lefthand_box": [ + 615.22, + 271.56, + 22.139999999999986, + 28.839999999999975 + ], + "righthand_box": [ + 538.83, + 283.74, + 25.639999999999986, + 30.659999999999968 + ], + "face_valid": false, + "lefthand_valid": true, + "righthand_valid": true, + "foot_valid": true + }, + { + "segmentation": [ + [ + 2.03, + 75.18, + 10.85, + 70.58, + 16.99, + 65.59, + 17.75, + 55.24, + 20.05, + 50.25, + 29.64, + 43.74, + 37.31, + 47.57, + 41.52, + 53.7, + 43.83, + 64.82, + 53.03, + 70.19, + 61.85, + 77.09, + 72.58, + 87.06, + 74.88, + 79.01, + 78.72, + 73.64, + 86.39, + 77.86, + 90.6, + 90.13, + 86, + 93.2, + 82.17, + 102.4, + 75.27, + 106.24, + 68.75, + 104.7, + 50.34, + 90.9, + 43.06, + 112.37, + 40.76, + 123.11, + 42.29, + 130.78, + 48.04, + 161.83, + 52.26, + 190.59, + 50.73, + 210.15, + 44.21, + 245.04, + 50.34, + 256.16, + 53.03, + 261.53, + 47.28, + 263.83, + 40.37, + 263.83, + 31.56, + 260.76, + 28.1, + 256.16, + 26.95, + 244.65, + 29.25, + 233.54, + 32.71, + 223.95, + 33.09, + 213.98, + 32.32, + 206.31, + 32.71, + 194.81, + 33.09, + 185.61, + 24.65, + 177.17, + 16.99, + 161.45, + 13.53, + 176.02, + 10.85, + 206.31, + 1.65, + 231.62, + 1.65, + 235.84, + 0.5, + 146.88, + 0.88, + 122.34, + 1.65, + 75.56 + ] + ], + "num_keypoints": 16, + "area": 8260.75085, + "iscrowd": 0, + "keypoints": [ + 36, + 79, + 2, + 40, + 74, + 2, + 31, + 75, + 2, + 0, + 0, + 0, + 19, + 69, + 2, + 45, + 77, + 2, + 2, + 89, + 2, + 74, + 99, + 2, + 0, + 0, + 0, + 78, + 92, + 2, + 0, + 0, + 0, + 33, + 149, + 2, + 7, + 153, + 2, + 44, + 196, + 2, + 2, + 205, + 2, + 35, + 245, + 2, + 0, + 0, + 0, + 43.80826, + 259.40011, + 2.0, + 48.63752, + 257.67537, + 2.0, + 32.08007, + 256.29558, + 2.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ], + "image_id": 197388, + "bbox": [ + 0.5, + 43.74, + 90.1, + 220.09 + ], + "category_id": 1, + "id": 543117, + "face_box": [ + 0.0, + 0.0, + 0.0, + 0.0 + ], + "lefthand_box": [ + 0.0, + 0.0, + 0.0, + 0.0 + ], + "righthand_box": [ + 0.0, + 0.0, + 0.0, + 0.0 + ], + "face_valid": false, + "lefthand_valid": false, + "righthand_valid": false, + "foot_valid": true + } + ] } \ No newline at end of file diff --git a/tests/data/cofw/test_cofw.json b/tests/data/cofw/test_cofw.json index a8e6ac5d0b..a6186b3940 100644 --- a/tests/data/cofw/test_cofw.json +++ b/tests/data/cofw/test_cofw.json @@ -1,239 +1,239 @@ -{ - "info": { - "description": "MMPose example COFW dataset", - "version": "1.0", - "year": "2020", - "date_created": "2020/12/31" - }, - "categories": [ - { - "supercategory": "person", - "id": 1, - "name": "face", - "keypoints": [], - "skeleton": [] - } - ], - "images": [ - { - "id": 1766, - "file_name": "001766.jpg", - "height": 322, - "width": 235 - }, - { - "id": 1805, - "file_name": "001805.jpg", - "height": 253, - "width": 352 - } - ], - "annotations": [ - { - "keypoints": [ - 46.562534549474826, - 120.07575548185008, - 2.0, - 131.52522111663905, - 115.97127786990977, - 2.0, - 65.8535793255943, - 118.02351667587989, - 2.0, - 105.66701216141516, - 114.73993458632765, - 2.0, - 56.41328081813161, - 110.63545697438737, - 2.0, - 56.41328081813161, - 118.43396443707394, - 2.0, - 119.62223604201218, - 111.45635249677541, - 2.0, - 119.62223604201218, - 117.61306891468585, - 2.0, - 51.89835544499728, - 144.29217339229783, - 2.0, - 129.06253454947486, - 143.88172563110376, - 2.0, - 72.83119126589278, - 145.9339644370739, - 2.0, - 108.12969872857936, - 143.47127786990976, - 2.0, - 62.980444997236056, - 136.0832181684172, - 2.0, - 60.928206191265936, - 148.3966510042381, - 2.0, - 120.03268380320623, - 136.90411369080527, - 2.0, - 120.44313156440026, - 147.16530772065602, - 2.0, - 65.44313156440026, - 143.06083010871572, - 2.0, - 120.44313156440026, - 143.06083010871572, - 2.0, - 65.44313156440026, - 184.10560622811872, - 2.0, - 103.61477335544504, - 183.69515846692468, - 2.0, - 78.5774599226092, - 180.0011286161784, - 2.0, - 79.80880320619127, - 190.26232264602916, - 2.0, - 60.928206191265936, - 212.8369495117008, - 1.0, - 116.74910171365397, - 214.4787405564769, - 1.0, - 83.50283305693756, - 207.5011286161784, - 1.0, - 83.91328081813161, - 211.19515846692468, - 1.0, - 88.83865395245994, - 232.94888981020827, - 1.0, - 88.83865395245994, - 239.92650175050676, - 1.0, - 94.99537037037038, - 271.120531601253, - 2.0 - ], - "image_id": 1766, - "id": 1766, - "num_keypoints": 29, - "bbox": [ - 38.0, - 102.0, - 105.0, - 180.0 - ], - "iscrowd": 0, - "area": 18900.0, - "category_id": 1 - }, - { - "keypoints": [ - 111.02581748226716, - 99.60260061678404, - 1.0, - 203.48326006615514, - 99.60260061678402, - 2.0, - 138.9556699294833, - 104.89964159815261, - 1.0, - 158.69918631458435, - 101.04724815715728, - 2.0, - 124.02764534562637, - 98.6395022565352, - 1.0, - 124.50919452575083, - 102.49189569753052, - 1.0, - 178.44270269968544, - 92.37936291491776, - 2.0, - 178.92425187980987, - 97.19485471616193, - 2.0, - 116.80440764376011, - 115.9752727410142, - 1.0, - 192.40762892329354, - 115.9752727410142, - 2.0, - 137.02947320898565, - 120.30921536213394, - 2.0, - 165.44087483632623, - 119.82766618200952, - 2.0, - 125.47229288599961, - 114.04907602051652, - 1.0, - 126.43539124624846, - 120.30921536213394, - 1.0, - 180.3688994201831, - 107.78893667889912, - 2.0, - 180.3688994201831, - 118.8645678217607, - 2.0, - 125.95384206612407, - 118.38301864163628, - 1.0, - 180.85044860030752, - 115.01217438076534, - 2.0, - 132.69553058786587, - 149.68371534972337, - 2.0, - 162.0700305754553, - 152.09146125034545, - 2.0, - 143.77116173072744, - 147.7575186292257, - 2.0, - 144.2527109108519, - 157.388502231714, - 2.0, - 132.21398140774147, - 168.4641333745756, - 1.0, - 184.22129286117845, - 167.9825841944512, - 2.0, - 149.06820271209602, - 165.09328911370469, - 2.0, - 150.51285025246932, - 170.3903300950733, - 2.0, - 151.95749779284256, - 182.42905959818367, - 2.0, - 153.4021453332158, - 188.6891989398011, - 2.0, - 155.32834205371347, - 218.54524810751494, - 2.0 - ], - "image_id": 1805, - "id": 1805, - "num_keypoints": 29, - "bbox": [ - 102.0, - 83.0, - 112.0, - 146.0 - ], - "iscrowd": 0, - "area": 16352.0, - "category_id": 1 - } - ] +{ + "info": { + "description": "MMPose example COFW dataset", + "version": "1.0", + "year": "2020", + "date_created": "2020/12/31" + }, + "categories": [ + { + "supercategory": "person", + "id": 1, + "name": "face", + "keypoints": [], + "skeleton": [] + } + ], + "images": [ + { + "id": 1766, + "file_name": "001766.jpg", + "height": 322, + "width": 235 + }, + { + "id": 1805, + "file_name": "001805.jpg", + "height": 253, + "width": 352 + } + ], + "annotations": [ + { + "keypoints": [ + 46.562534549474826, + 120.07575548185008, + 2.0, + 131.52522111663905, + 115.97127786990977, + 2.0, + 65.8535793255943, + 118.02351667587989, + 2.0, + 105.66701216141516, + 114.73993458632765, + 2.0, + 56.41328081813161, + 110.63545697438737, + 2.0, + 56.41328081813161, + 118.43396443707394, + 2.0, + 119.62223604201218, + 111.45635249677541, + 2.0, + 119.62223604201218, + 117.61306891468585, + 2.0, + 51.89835544499728, + 144.29217339229783, + 2.0, + 129.06253454947486, + 143.88172563110376, + 2.0, + 72.83119126589278, + 145.9339644370739, + 2.0, + 108.12969872857936, + 143.47127786990976, + 2.0, + 62.980444997236056, + 136.0832181684172, + 2.0, + 60.928206191265936, + 148.3966510042381, + 2.0, + 120.03268380320623, + 136.90411369080527, + 2.0, + 120.44313156440026, + 147.16530772065602, + 2.0, + 65.44313156440026, + 143.06083010871572, + 2.0, + 120.44313156440026, + 143.06083010871572, + 2.0, + 65.44313156440026, + 184.10560622811872, + 2.0, + 103.61477335544504, + 183.69515846692468, + 2.0, + 78.5774599226092, + 180.0011286161784, + 2.0, + 79.80880320619127, + 190.26232264602916, + 2.0, + 60.928206191265936, + 212.8369495117008, + 1.0, + 116.74910171365397, + 214.4787405564769, + 1.0, + 83.50283305693756, + 207.5011286161784, + 1.0, + 83.91328081813161, + 211.19515846692468, + 1.0, + 88.83865395245994, + 232.94888981020827, + 1.0, + 88.83865395245994, + 239.92650175050676, + 1.0, + 94.99537037037038, + 271.120531601253, + 2.0 + ], + "image_id": 1766, + "id": 1766, + "num_keypoints": 29, + "bbox": [ + 38.0, + 102.0, + 105.0, + 180.0 + ], + "iscrowd": 0, + "area": 18900.0, + "category_id": 1 + }, + { + "keypoints": [ + 111.02581748226716, + 99.60260061678404, + 1.0, + 203.48326006615514, + 99.60260061678402, + 2.0, + 138.9556699294833, + 104.89964159815261, + 1.0, + 158.69918631458435, + 101.04724815715728, + 2.0, + 124.02764534562637, + 98.6395022565352, + 1.0, + 124.50919452575083, + 102.49189569753052, + 1.0, + 178.44270269968544, + 92.37936291491776, + 2.0, + 178.92425187980987, + 97.19485471616193, + 2.0, + 116.80440764376011, + 115.9752727410142, + 1.0, + 192.40762892329354, + 115.9752727410142, + 2.0, + 137.02947320898565, + 120.30921536213394, + 2.0, + 165.44087483632623, + 119.82766618200952, + 2.0, + 125.47229288599961, + 114.04907602051652, + 1.0, + 126.43539124624846, + 120.30921536213394, + 1.0, + 180.3688994201831, + 107.78893667889912, + 2.0, + 180.3688994201831, + 118.8645678217607, + 2.0, + 125.95384206612407, + 118.38301864163628, + 1.0, + 180.85044860030752, + 115.01217438076534, + 2.0, + 132.69553058786587, + 149.68371534972337, + 2.0, + 162.0700305754553, + 152.09146125034545, + 2.0, + 143.77116173072744, + 147.7575186292257, + 2.0, + 144.2527109108519, + 157.388502231714, + 2.0, + 132.21398140774147, + 168.4641333745756, + 1.0, + 184.22129286117845, + 167.9825841944512, + 2.0, + 149.06820271209602, + 165.09328911370469, + 2.0, + 150.51285025246932, + 170.3903300950733, + 2.0, + 151.95749779284256, + 182.42905959818367, + 2.0, + 153.4021453332158, + 188.6891989398011, + 2.0, + 155.32834205371347, + 218.54524810751494, + 2.0 + ], + "image_id": 1805, + "id": 1805, + "num_keypoints": 29, + "bbox": [ + 102.0, + 83.0, + 112.0, + 146.0 + ], + "iscrowd": 0, + "area": 16352.0, + "category_id": 1 + } + ] } \ No newline at end of file diff --git a/tests/data/crowdpose/test_crowdpose.json b/tests/data/crowdpose/test_crowdpose.json index 9e9d9b7a8a..12168aadc4 100644 --- a/tests/data/crowdpose/test_crowdpose.json +++ b/tests/data/crowdpose/test_crowdpose.json @@ -1,378 +1,378 @@ -{ - "categories": [ - { - "supercategory": "person", - "id": 1, - "name": "person", - "keypoints": [ - "left_shoulder", - "right_shoulder", - "left_elbow", - "right_elbow", - "left_wrist", - "right_wrist", - "left_hip", - "right_hip", - "left_knee", - "right_knee", - "left_ankle", - "right_ankle", - "head", - "neck" - ], - "skeleton": [ - [ - 16, - 14 - ], - [ - 14, - 12 - ], - [ - 17, - 15 - ], - [ - 15, - 13 - ], - [ - 12, - 13 - ], - [ - 6, - 12 - ], - [ - 7, - 13 - ], - [ - 6, - 7 - ], - [ - 6, - 8 - ], - [ - 7, - 9 - ], - [ - 8, - 10 - ], - [ - 9, - 11 - ] - ] - } - ], - "images": [ - { - "file_name": "106848.jpg", - "id": 106848, - "height": 425, - "width": 640, - "crowdIndex": 0.33 - }, - { - "file_name": "103319.jpg", - "id": 103319, - "height": 480, - "width": 640, - "crowdIndex": 0.39 - } - ], - "annotations": [ - { - "num_keypoints": 5, - "iscrowd": 0, - "keypoints": [ - 0, - 0, - 0, - 208, - 108, - 2, - 0, - 0, - 0, - 278, - 158, - 2, - 262, - 206, - 2, - 348, - 98, - 2, - 0, - 0, - 0, - 173, - 299, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 256, - 27, - 1, - 220, - 89, - 1 - ], - "image_id": 106848, - "bbox": [ - 106.01, - 13.43, - 273.15, - 352.42 - ], - "category_id": 1, - "id": 123803 - }, - { - "num_keypoints": 0, - "iscrowd": 0, - "keypoints": [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0 - ], - "image_id": 106848, - "bbox": [ - 108.5, - 96.78, - 35.46, - 30.23 - ], - "category_id": 1, - "id": 131039 - }, - { - "num_keypoints": 10, - "iscrowd": 0, - "keypoints": [ - 482, - 129, - 2, - 364, - 126, - 2, - 513, - 213, - 2, - 339, - 163, - 2, - 431, - 210, - 2, - 276, - 163, - 1, - 440, - 308, - 2, - 371, - 304, - 1, - 432, - 419, - 1, - 366, - 419, - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 407, - 29, - 1, - 420, - 110, - 1 - ], - "image_id": 106848, - "bbox": [ - 281.51, - 21.92, - 244.5, - 349.72 - ], - "category_id": 1, - "id": 147481 - }, - { - "num_keypoints": 12, - "iscrowd": 0, - "keypoints": [ - 388, - 205, - 2, - 344, - 211, - 2, - 407, - 249, - 2, - 337, - 256, - 2, - 393, - 278, - 2, - 336, - 290, - 2, - 390, - 293, - 2, - 354, - 294, - 2, - 387, - 354, - 2, - 351, - 357, - 2, - 380, - 390, - 2, - 359, - 408, - 1, - 351, - 163, - 1, - 364, - 198, - 1 - ], - "image_id": 103319, - "bbox": [ - 316.76, - 157.3, - 100.54, - 247.56 - ], - "category_id": 1, - "id": 127068 - }, - { - "num_keypoints": 12, - "iscrowd": 0, - "keypoints": [ - 350, - 145, - 2, - 300, - 145, - 2, - 352, - 190, - 1, - 299, - 180, - 2, - 322, - 163, - 2, - 291, - 217, - 2, - 346, - 232, - 1, - 314, - 232, - 2, - 346, - 283, - 1, - 310, - 284, - 2, - 345, - 346, - 1, - 305, - 344, - 2, - 312, - 106, - 1, - 323, - 137, - 1 - ], - "image_id": 103319, - "bbox": [ - 279.68, - 102.17, - 81.13, - 255.49 - ], - "category_id": 1, - "id": 129014 - } - ] -} +{ + "categories": [ + { + "supercategory": "person", + "id": 1, + "name": "person", + "keypoints": [ + "left_shoulder", + "right_shoulder", + "left_elbow", + "right_elbow", + "left_wrist", + "right_wrist", + "left_hip", + "right_hip", + "left_knee", + "right_knee", + "left_ankle", + "right_ankle", + "head", + "neck" + ], + "skeleton": [ + [ + 16, + 14 + ], + [ + 14, + 12 + ], + [ + 17, + 15 + ], + [ + 15, + 13 + ], + [ + 12, + 13 + ], + [ + 6, + 12 + ], + [ + 7, + 13 + ], + [ + 6, + 7 + ], + [ + 6, + 8 + ], + [ + 7, + 9 + ], + [ + 8, + 10 + ], + [ + 9, + 11 + ] + ] + } + ], + "images": [ + { + "file_name": "106848.jpg", + "id": 106848, + "height": 425, + "width": 640, + "crowdIndex": 0.33 + }, + { + "file_name": "103319.jpg", + "id": 103319, + "height": 480, + "width": 640, + "crowdIndex": 0.39 + } + ], + "annotations": [ + { + "num_keypoints": 5, + "iscrowd": 0, + "keypoints": [ + 0, + 0, + 0, + 208, + 108, + 2, + 0, + 0, + 0, + 278, + 158, + 2, + 262, + 206, + 2, + 348, + 98, + 2, + 0, + 0, + 0, + 173, + 299, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 256, + 27, + 1, + 220, + 89, + 1 + ], + "image_id": 106848, + "bbox": [ + 106.01, + 13.43, + 273.15, + 352.42 + ], + "category_id": 1, + "id": 123803 + }, + { + "num_keypoints": 0, + "iscrowd": 0, + "keypoints": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "image_id": 106848, + "bbox": [ + 108.5, + 96.78, + 35.46, + 30.23 + ], + "category_id": 1, + "id": 131039 + }, + { + "num_keypoints": 10, + "iscrowd": 0, + "keypoints": [ + 482, + 129, + 2, + 364, + 126, + 2, + 513, + 213, + 2, + 339, + 163, + 2, + 431, + 210, + 2, + 276, + 163, + 1, + 440, + 308, + 2, + 371, + 304, + 1, + 432, + 419, + 1, + 366, + 419, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 407, + 29, + 1, + 420, + 110, + 1 + ], + "image_id": 106848, + "bbox": [ + 281.51, + 21.92, + 244.5, + 349.72 + ], + "category_id": 1, + "id": 147481 + }, + { + "num_keypoints": 12, + "iscrowd": 0, + "keypoints": [ + 388, + 205, + 2, + 344, + 211, + 2, + 407, + 249, + 2, + 337, + 256, + 2, + 393, + 278, + 2, + 336, + 290, + 2, + 390, + 293, + 2, + 354, + 294, + 2, + 387, + 354, + 2, + 351, + 357, + 2, + 380, + 390, + 2, + 359, + 408, + 1, + 351, + 163, + 1, + 364, + 198, + 1 + ], + "image_id": 103319, + "bbox": [ + 316.76, + 157.3, + 100.54, + 247.56 + ], + "category_id": 1, + "id": 127068 + }, + { + "num_keypoints": 12, + "iscrowd": 0, + "keypoints": [ + 350, + 145, + 2, + 300, + 145, + 2, + 352, + 190, + 1, + 299, + 180, + 2, + 322, + 163, + 2, + 291, + 217, + 2, + 346, + 232, + 1, + 314, + 232, + 2, + 346, + 283, + 1, + 310, + 284, + 2, + 345, + 346, + 1, + 305, + 344, + 2, + 312, + 106, + 1, + 323, + 137, + 1 + ], + "image_id": 103319, + "bbox": [ + 279.68, + 102.17, + 81.13, + 255.49 + ], + "category_id": 1, + "id": 129014 + } + ] +} diff --git a/tests/data/crowdpose/test_crowdpose_det_AP_40.json b/tests/data/crowdpose/test_crowdpose_det_AP_40.json index 36d0572bb4..8ba700b85f 100644 --- a/tests/data/crowdpose/test_crowdpose_det_AP_40.json +++ b/tests/data/crowdpose/test_crowdpose_det_AP_40.json @@ -1,68 +1,68 @@ -[ - { - "bbox": [ - 120.36583709716797, - 30.521512985229492, - 244.14288330078125, - 328.944580078125 - ], - "category_id": 1, - "image_id": 106848, - "score": 0.9999284744262695 - }, - { - "bbox": [ - 326.6805725097656, - 30.76219940185547, - 209.03128051757812, - 327.80035400390625 - ], - "category_id": 1, - "image_id": 106848, - "score": 0.9993789196014404 - }, - { - "bbox": [ - 109.94915008544922, - 95.794677734375, - 32.249656677246094, - 26.97345733642578 - ], - "category_id": 1, - "image_id": 106848, - "score": 0.9997813105583191 - }, - { - "bbox": [ - 315.21368408203125, - 149.79432678222656, - 100.7252197265625, - 259.96405029296875 - ], - "category_id": 1, - "image_id": 103319, - "score": 0.9998345375061035 - }, - { - "bbox": [ - 282.7766418457031, - 100.75929260253906, - 76.44869995117188, - 261.6209716796875 - ], - "category_id": 1, - "image_id": 103319, - "score": 0.9998021721839905 - }, - { - "bbox": [ - -0.10778862237930298, - 238.81455993652344, - 38.23238754272461, - 99.21165466308594 - ], - "category_id": 1, - "image_id": 103319, - "score": 0.9673888683319092 - } -] +[ + { + "bbox": [ + 120.36583709716797, + 30.521512985229492, + 244.14288330078125, + 328.944580078125 + ], + "category_id": 1, + "image_id": 106848, + "score": 0.9999284744262695 + }, + { + "bbox": [ + 326.6805725097656, + 30.76219940185547, + 209.03128051757812, + 327.80035400390625 + ], + "category_id": 1, + "image_id": 106848, + "score": 0.9993789196014404 + }, + { + "bbox": [ + 109.94915008544922, + 95.794677734375, + 32.249656677246094, + 26.97345733642578 + ], + "category_id": 1, + "image_id": 106848, + "score": 0.9997813105583191 + }, + { + "bbox": [ + 315.21368408203125, + 149.79432678222656, + 100.7252197265625, + 259.96405029296875 + ], + "category_id": 1, + "image_id": 103319, + "score": 0.9998345375061035 + }, + { + "bbox": [ + 282.7766418457031, + 100.75929260253906, + 76.44869995117188, + 261.6209716796875 + ], + "category_id": 1, + "image_id": 103319, + "score": 0.9998021721839905 + }, + { + "bbox": [ + -0.10778862237930298, + 238.81455993652344, + 38.23238754272461, + 99.21165466308594 + ], + "category_id": 1, + "image_id": 103319, + "score": 0.9673888683319092 + } +] diff --git a/tests/data/deepfasion2/deepfasion2.json b/tests/data/deepfasion2/deepfasion2.json index b3a25a6dca..eda16042f8 100644 --- a/tests/data/deepfasion2/deepfasion2.json +++ b/tests/data/deepfasion2/deepfasion2.json @@ -1,2404 +1,2404 @@ -{ - "info": "", - "licenses": "", - "images": [ - { - "coco_url": "", - "date_captured": "", - "file_name": "000264.jpg", - "flickr_url": "", - "id": 264, - "license": 0, - "width": 750, - "height": 750 - }, - { - "coco_url": "", - "date_captured": "", - "file_name": "000265.jpg", - "flickr_url": "", - "id": 265, - "license": 0, - "width": 750, - "height": 750 - } - ], - "annotations": [ - { - "area": 402069, - "bbox": [ - 103, - 80, - 601, - 669 - ], - "category_id": 11, - "id": 429, - "pair_id": 22, - "image_id": 264, - "iscrowd": 0, - "style": 1, - "num_keypoints": 28, - "keypoints": [ - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 417.0, - 107.0, - 1.0, - 307.0, - 87.0, - 2.0, - 349.0, - 118.0, - 2.0, - 405.0, - 131.0, - 2.0, - 461.0, - 133.0, - 2.0, - 504.0, - 116.0, - 2.0, - 242.0, - 112.0, - 2.0, - 209.0, - 208.0, - 2.0, - 182.0, - 284.0, - 2.0, - 142.0, - 331.0, - 2.0, - 121.0, - 376.0, - 2.0, - 274.0, - 440.0, - 2.0, - 277.0, - 388.0, - 2.0, - 259.0, - 345.0, - 2.0, - 263.0, - 282.0, - 1.0, - 266.0, - 222.0, - 1.0, - 258.0, - 246.0, - 2.0, - 259.0, - 318.0, - 2.0, - 249.0, - 412.0, - 1.0, - 202.0, - 579.0, - 1.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 566.0, - 593.0, - 2.0, - 532.0, - 435.0, - 2.0, - 527.0, - 355.0, - 2.0, - 540.0, - 289.0, - 2.0, - 542.0, - 271.0, - 2.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 573.0, - 479.0, - 2.0, - 698.0, - 403.0, - 2.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 594.0, - 151.0, - 2.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0 - ], - "segmentation": [ - [ - 345.05, - 120.04, - 321.68, - 101.99, - 308.4, - 87.04, - 295.88, - 91.41, - 267.98, - 97.61, - 250.24, - 109.15, - 234.6, - 130.26, - 224.23, - 161.09, - 212.72, - 198.51, - 193.23, - 252.37, - 183.68, - 279.52, - 164.61, - 297.25, - 133.78, - 341.78, - 118.78, - 373.07, - 120.42, - 381.12, - 142.25, - 390.36, - 147.3, - 396.65, - 166.82, - 412.43, - 186.56, - 425.17, - 206.63, - 432.42, - 206.48, - 422.24, - 272.85, - 442.95, - 230.31, - 484.86, - 202.28, - 579.98, - 198.77, - 649.55, - 195.22, - 697.46, - 196.55, - 740.97, - 208.78, - 750.0, - 310.72, - 749.0, - 439.41, - 749.2, - 562.85, - 749.18, - 578.32, - 739.97, - 574.34, - 685.58, - 562.73, - 564.57, - 545.52, - 474.66, - 536.85, - 436.58, - 546.89, - 450.73, - 559.79, - 470.3, - 564.49, - 478.28, - 571.67, - 479.65, - 579.75, - 484.9, - 611.0, - 467.77, - 640.13, - 458.35, - 635.77, - 445.53, - 671.39, - 430.58, - 697.71, - 414.0, - 697.93, - 397.2, - 691.25, - 349.8, - 680.6, - 307.09, - 664.02, - 293.31, - 645.57, - 286.35, - 619.11, - 282.39, - 620.03, - 237.9, - 611.34, - 191.23, - 602.52, - 164.16, - 590.09, - 149.08, - 553.5, - 136.68, - 523.0, - 129.71, - 512.15, - 122.14, - 502.99, - 116.17, - 473.33, - 129.07, - 412.19, - 129.14, - 403.58, - 124.73, - 386.42, - 126.75 - ] - ] - }, - { - "area": 288806, - "bbox": [ - 178, - 62, - 421, - 686 - ], - "category_id": 11, - "id": 430, - "pair_id": 22, - "image_id": 265, - "iscrowd": 0, - "style": 2, - "num_keypoints": 31, - "keypoints": [ - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 361.0, - 71.0, - 1.0, - 282.0, - 74.0, - 2.0, - 316.0, - 90.0, - 2.0, - 361.0, - 96.0, - 2.0, - 406.0, - 98.0, - 2.0, - 438.0, - 77.0, - 2.0, - 231.0, - 100.0, - 2.0, - 210.0, - 184.0, - 2.0, - 200.0, - 252.0, - 2.0, - 186.0, - 297.0, - 2.0, - 178.0, - 331.0, - 2.0, - 264.0, - 361.0, - 1.0, - 275.0, - 311.0, - 1.0, - 272.0, - 269.0, - 1.0, - 268.0, - 228.0, - 1.0, - 263.0, - 188.0, - 1.0, - 247.0, - 209.0, - 2.0, - 258.0, - 265.0, - 2.0, - 273.0, - 320.0, - 2.0, - 256.0, - 517.0, - 1.0, - 273.0, - 710.0, - 2.0, - 384.0, - 731.0, - 2.0, - 506.0, - 713.0, - 2.0, - 496.0, - 510.0, - 2.0, - 453.0, - 318.0, - 2.0, - 471.0, - 268.0, - 2.0, - 477.0, - 213.0, - 2.0, - 456.0, - 181.0, - 1.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 508.0, - 342.0, - 2.0, - 596.0, - 267.0, - 2.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 489.0, - 75.0, - 1.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0 - ], - "segmentation": [ - [ - 438, - 77, - 406, - 98, - 361, - 96, - 316, - 90, - 282, - 74, - 231, - 100, - 210, - 184, - 200, - 252, - 186, - 297, - 178, - 331, - 264, - 361, - 275, - 311, - 272, - 269, - 268, - 228, - 263, - 188, - 247, - 209, - 258, - 265, - 273, - 320, - 256, - 517, - 273, - 710, - 384, - 731, - 506, - 713, - 496, - 510, - 453, - 318, - 471, - 268, - 477, - 213, - 456, - 181, - 508, - 342, - 596, - 267, - 489, - 75, - 438, - 77 - ], - [ - 231, - 100, - 210, - 184, - 200, - 252, - 186, - 297, - 178, - 331, - 264, - 361, - 275, - 311, - 272, - 269, - 268, - 228, - 263, - 188, - 231, - 100 - ], - [ - 456, - 181, - 508, - 342, - 596, - 267, - 489, - 75, - 456, - 181 - ], - [ - 200, - 252, - 186, - 297, - 178, - 331, - 264, - 361, - 275, - 311, - 272, - 269, - 200, - 252 - ] - ], - "images": [ - { - "coco_url": "", - "date_captured": "", - "file_name": "000264.jpg", - "flickr_url": "", - "id": 264, - "license": 0, - "width": 750, - "height": 750 - }, - { - "coco_url": "", - "date_captured": "", - "file_name": "000265.jpg", - "flickr_url": "", - "id": 265, - "license": 0, - "width": 750, - "height": 750 - } - ] - } - ], - "categories": [ - { - "id": 11, - "name": "long_sleeved_dress", - "supercategory": "clothes", - "keypoints": [ - "1", - "2", - "3", - "4", - "5", - "6", - "7", - "8", - "9", - "10", - "11", - "12", - "13", - "14", - "15", - "16", - "17", - "18", - "19", - "20", - "21", - "22", - "23", - "24", - "25", - "26", - "27", - "28", - "29", - "30", - "31", - "32", - "33", - "34", - "35", - "36", - "37", - "38", - "39", - "40", - "41", - "42", - "43", - "44", - "45", - "46", - "47", - "48", - "49", - "50", - "51", - "52", - "53", - "54", - "55", - "56", - "57", - "58", - "59", - "60", - "61", - "62", - "63", - "64", - "65", - "66", - "67", - "68", - "69", - "70", - "71", - "72", - "73", - "74", - "75", - "76", - "77", - "78", - "79", - "80", - "81", - "82", - "83", - "84", - "85", - "86", - "87", - "88", - "89", - "90", - "91", - "92", - "93", - "94", - "95", - "96", - "97", - "98", - "99", - "100", - "101", - "102", - "103", - "104", - "105", - "106", - "107", - "108", - "109", - "110", - "111", - "112", - "113", - "114", - "115", - "116", - "117", - "118", - "119", - "120", - "121", - "122", - "123", - "124", - "125", - "126", - "127", - "128", - "129", - "130", - "131", - "132", - "133", - "134", - "135", - "136", - "137", - "138", - "139", - "140", - "141", - "142", - "143", - "144", - "145", - "146", - "147", - "148", - "149", - "150", - "151", - "152", - "153", - "154", - "155", - "156", - "157", - "158", - "159", - "160", - "161", - "162", - "163", - "164", - "165", - "166", - "167", - "168", - "169", - "170", - "171", - "172", - "173", - "174", - "175", - "176", - "177", - "178", - "179", - "180", - "181", - "182", - "183", - "184", - "185", - "186", - "187", - "188", - "189", - "190", - "191", - "192", - "193", - "194", - "195", - "196", - "197", - "198", - "199", - "200", - "201", - "202", - "203", - "204", - "205", - "206", - "207", - "208", - "209", - "210", - "211", - "212", - "213", - "214", - "215", - "216", - "217", - "218", - "219", - "220", - "221", - "222", - "223", - "224", - "225", - "226", - "227", - "228", - "229", - "230", - "231", - "232", - "233", - "234", - "235", - "236", - "237", - "238", - "239", - "240", - "241", - "242", - "243", - "244", - "245", - "246", - "247", - "248", - "249", - "250", - "251", - "252", - "253", - "254", - "255", - "256", - "257", - "258", - "259", - "260", - "261", - "262", - "263", - "264", - "265", - "266", - "267", - "268", - "269", - "270", - "271", - "272", - "273", - "274", - "275", - "276", - "277", - "278", - "279", - "280", - "281", - "282", - "283", - "284", - "285", - "286", - "287", - "288", - "289", - "290", - "291", - "292", - "293", - "294" - ], - "skeleton": [] - } - ] +{ + "info": "", + "licenses": "", + "images": [ + { + "coco_url": "", + "date_captured": "", + "file_name": "000264.jpg", + "flickr_url": "", + "id": 264, + "license": 0, + "width": 750, + "height": 750 + }, + { + "coco_url": "", + "date_captured": "", + "file_name": "000265.jpg", + "flickr_url": "", + "id": 265, + "license": 0, + "width": 750, + "height": 750 + } + ], + "annotations": [ + { + "area": 402069, + "bbox": [ + 103, + 80, + 601, + 669 + ], + "category_id": 11, + "id": 429, + "pair_id": 22, + "image_id": 264, + "iscrowd": 0, + "style": 1, + "num_keypoints": 28, + "keypoints": [ + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 417.0, + 107.0, + 1.0, + 307.0, + 87.0, + 2.0, + 349.0, + 118.0, + 2.0, + 405.0, + 131.0, + 2.0, + 461.0, + 133.0, + 2.0, + 504.0, + 116.0, + 2.0, + 242.0, + 112.0, + 2.0, + 209.0, + 208.0, + 2.0, + 182.0, + 284.0, + 2.0, + 142.0, + 331.0, + 2.0, + 121.0, + 376.0, + 2.0, + 274.0, + 440.0, + 2.0, + 277.0, + 388.0, + 2.0, + 259.0, + 345.0, + 2.0, + 263.0, + 282.0, + 1.0, + 266.0, + 222.0, + 1.0, + 258.0, + 246.0, + 2.0, + 259.0, + 318.0, + 2.0, + 249.0, + 412.0, + 1.0, + 202.0, + 579.0, + 1.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 566.0, + 593.0, + 2.0, + 532.0, + 435.0, + 2.0, + 527.0, + 355.0, + 2.0, + 540.0, + 289.0, + 2.0, + 542.0, + 271.0, + 2.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 573.0, + 479.0, + 2.0, + 698.0, + 403.0, + 2.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 594.0, + 151.0, + 2.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ], + "segmentation": [ + [ + 345.05, + 120.04, + 321.68, + 101.99, + 308.4, + 87.04, + 295.88, + 91.41, + 267.98, + 97.61, + 250.24, + 109.15, + 234.6, + 130.26, + 224.23, + 161.09, + 212.72, + 198.51, + 193.23, + 252.37, + 183.68, + 279.52, + 164.61, + 297.25, + 133.78, + 341.78, + 118.78, + 373.07, + 120.42, + 381.12, + 142.25, + 390.36, + 147.3, + 396.65, + 166.82, + 412.43, + 186.56, + 425.17, + 206.63, + 432.42, + 206.48, + 422.24, + 272.85, + 442.95, + 230.31, + 484.86, + 202.28, + 579.98, + 198.77, + 649.55, + 195.22, + 697.46, + 196.55, + 740.97, + 208.78, + 750.0, + 310.72, + 749.0, + 439.41, + 749.2, + 562.85, + 749.18, + 578.32, + 739.97, + 574.34, + 685.58, + 562.73, + 564.57, + 545.52, + 474.66, + 536.85, + 436.58, + 546.89, + 450.73, + 559.79, + 470.3, + 564.49, + 478.28, + 571.67, + 479.65, + 579.75, + 484.9, + 611.0, + 467.77, + 640.13, + 458.35, + 635.77, + 445.53, + 671.39, + 430.58, + 697.71, + 414.0, + 697.93, + 397.2, + 691.25, + 349.8, + 680.6, + 307.09, + 664.02, + 293.31, + 645.57, + 286.35, + 619.11, + 282.39, + 620.03, + 237.9, + 611.34, + 191.23, + 602.52, + 164.16, + 590.09, + 149.08, + 553.5, + 136.68, + 523.0, + 129.71, + 512.15, + 122.14, + 502.99, + 116.17, + 473.33, + 129.07, + 412.19, + 129.14, + 403.58, + 124.73, + 386.42, + 126.75 + ] + ] + }, + { + "area": 288806, + "bbox": [ + 178, + 62, + 421, + 686 + ], + "category_id": 11, + "id": 430, + "pair_id": 22, + "image_id": 265, + "iscrowd": 0, + "style": 2, + "num_keypoints": 31, + "keypoints": [ + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 361.0, + 71.0, + 1.0, + 282.0, + 74.0, + 2.0, + 316.0, + 90.0, + 2.0, + 361.0, + 96.0, + 2.0, + 406.0, + 98.0, + 2.0, + 438.0, + 77.0, + 2.0, + 231.0, + 100.0, + 2.0, + 210.0, + 184.0, + 2.0, + 200.0, + 252.0, + 2.0, + 186.0, + 297.0, + 2.0, + 178.0, + 331.0, + 2.0, + 264.0, + 361.0, + 1.0, + 275.0, + 311.0, + 1.0, + 272.0, + 269.0, + 1.0, + 268.0, + 228.0, + 1.0, + 263.0, + 188.0, + 1.0, + 247.0, + 209.0, + 2.0, + 258.0, + 265.0, + 2.0, + 273.0, + 320.0, + 2.0, + 256.0, + 517.0, + 1.0, + 273.0, + 710.0, + 2.0, + 384.0, + 731.0, + 2.0, + 506.0, + 713.0, + 2.0, + 496.0, + 510.0, + 2.0, + 453.0, + 318.0, + 2.0, + 471.0, + 268.0, + 2.0, + 477.0, + 213.0, + 2.0, + 456.0, + 181.0, + 1.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 508.0, + 342.0, + 2.0, + 596.0, + 267.0, + 2.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 489.0, + 75.0, + 1.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ], + "segmentation": [ + [ + 438, + 77, + 406, + 98, + 361, + 96, + 316, + 90, + 282, + 74, + 231, + 100, + 210, + 184, + 200, + 252, + 186, + 297, + 178, + 331, + 264, + 361, + 275, + 311, + 272, + 269, + 268, + 228, + 263, + 188, + 247, + 209, + 258, + 265, + 273, + 320, + 256, + 517, + 273, + 710, + 384, + 731, + 506, + 713, + 496, + 510, + 453, + 318, + 471, + 268, + 477, + 213, + 456, + 181, + 508, + 342, + 596, + 267, + 489, + 75, + 438, + 77 + ], + [ + 231, + 100, + 210, + 184, + 200, + 252, + 186, + 297, + 178, + 331, + 264, + 361, + 275, + 311, + 272, + 269, + 268, + 228, + 263, + 188, + 231, + 100 + ], + [ + 456, + 181, + 508, + 342, + 596, + 267, + 489, + 75, + 456, + 181 + ], + [ + 200, + 252, + 186, + 297, + 178, + 331, + 264, + 361, + 275, + 311, + 272, + 269, + 200, + 252 + ] + ], + "images": [ + { + "coco_url": "", + "date_captured": "", + "file_name": "000264.jpg", + "flickr_url": "", + "id": 264, + "license": 0, + "width": 750, + "height": 750 + }, + { + "coco_url": "", + "date_captured": "", + "file_name": "000265.jpg", + "flickr_url": "", + "id": 265, + "license": 0, + "width": 750, + "height": 750 + } + ] + } + ], + "categories": [ + { + "id": 11, + "name": "long_sleeved_dress", + "supercategory": "clothes", + "keypoints": [ + "1", + "2", + "3", + "4", + "5", + "6", + "7", + "8", + "9", + "10", + "11", + "12", + "13", + "14", + "15", + "16", + "17", + "18", + "19", + "20", + "21", + "22", + "23", + "24", + "25", + "26", + "27", + "28", + "29", + "30", + "31", + "32", + "33", + "34", + "35", + "36", + "37", + "38", + "39", + "40", + "41", + "42", + "43", + "44", + "45", + "46", + "47", + "48", + "49", + "50", + "51", + "52", + "53", + "54", + "55", + "56", + "57", + "58", + "59", + "60", + "61", + "62", + "63", + "64", + "65", + "66", + "67", + "68", + "69", + "70", + "71", + "72", + "73", + "74", + "75", + "76", + "77", + "78", + "79", + "80", + "81", + "82", + "83", + "84", + "85", + "86", + "87", + "88", + "89", + "90", + "91", + "92", + "93", + "94", + "95", + "96", + "97", + "98", + "99", + "100", + "101", + "102", + "103", + "104", + "105", + "106", + "107", + "108", + "109", + "110", + "111", + "112", + "113", + "114", + "115", + "116", + "117", + "118", + "119", + "120", + "121", + "122", + "123", + "124", + "125", + "126", + "127", + "128", + "129", + "130", + "131", + "132", + "133", + "134", + "135", + "136", + "137", + "138", + "139", + "140", + "141", + "142", + "143", + "144", + "145", + "146", + "147", + "148", + "149", + "150", + "151", + "152", + "153", + "154", + "155", + "156", + "157", + "158", + "159", + "160", + "161", + "162", + "163", + "164", + "165", + "166", + "167", + "168", + "169", + "170", + "171", + "172", + "173", + "174", + "175", + "176", + "177", + "178", + "179", + "180", + "181", + "182", + "183", + "184", + "185", + "186", + "187", + "188", + "189", + "190", + "191", + "192", + "193", + "194", + "195", + "196", + "197", + "198", + "199", + "200", + "201", + "202", + "203", + "204", + "205", + "206", + "207", + "208", + "209", + "210", + "211", + "212", + "213", + "214", + "215", + "216", + "217", + "218", + "219", + "220", + "221", + "222", + "223", + "224", + "225", + "226", + "227", + "228", + "229", + "230", + "231", + "232", + "233", + "234", + "235", + "236", + "237", + "238", + "239", + "240", + "241", + "242", + "243", + "244", + "245", + "246", + "247", + "248", + "249", + "250", + "251", + "252", + "253", + "254", + "255", + "256", + "257", + "258", + "259", + "260", + "261", + "262", + "263", + "264", + "265", + "266", + "267", + "268", + "269", + "270", + "271", + "272", + "273", + "274", + "275", + "276", + "277", + "278", + "279", + "280", + "281", + "282", + "283", + "284", + "285", + "286", + "287", + "288", + "289", + "290", + "291", + "292", + "293", + "294" + ], + "skeleton": [] + } + ] } \ No newline at end of file diff --git a/tests/data/fld/test_fld.json b/tests/data/fld/test_fld.json index 9996ac3dc2..8ce0d22c25 100644 --- a/tests/data/fld/test_fld.json +++ b/tests/data/fld/test_fld.json @@ -1,123 +1,123 @@ -{ - "info": { - "description": "Fashion Landmark Detection (FLD) test set for full-body clothes generated by MMPose Team.", - "url": "http://mmlab.ie.cuhk.edu.hk/projects/DeepFashion/LandmarkDetection.html", - "version": "1.0", - "year": "2021", - "date_created": "2021/01/02" - }, - "categories": [ - { - "supercategory": "person", - "id": 1, - "name": "fashion", - "keypoints": [ - "left collar", - "right collar", - "left sleeve", - "right sleeve", - "left waistline", - "right waistline", - "left hem", - "right hem" - ], - "skeleton": [] - } - ], - "images": [ - { - "id": 128, - "file_name": "img_00000128.jpg", - "height": 250, - "width": 200 - }, - { - "id": 132, - "file_name": "img_00000132.jpg", - "height": 250, - "width": 200 - } - ], - "annotations": [ - { - "keypoints": [ - 108.0, - 33.0, - 1.0, - 121.0, - 36.0, - 1.0, - 95.0, - 65.0, - 2.0, - 128.0, - 68.0, - 1.0, - 118.0, - 79.0, - 2.0, - 114.0, - 75.0, - 1.0, - 115.0, - 150.0, - 2.0, - 112.0, - 143.0, - 1.0 - ], - "num_keypoints": 8, - "bbox": [ - 88, - 21, - 48, - 142 - ], - "iscrowd": 0, - "area": 6816, - "category_id": 1, - "id": 128, - "image_id": 128 - }, - { - "keypoints": [ - 71.0, - 19.0, - 2.0, - 127.0, - 15.0, - 2.0, - 37.0, - 80.0, - 2.0, - 162.0, - 71.0, - 2.0, - 60.0, - 112.0, - 2.0, - 145.0, - 109.0, - 2.0, - 0.0, - 0.0, - 0.0, - 172.0, - 235.0, - 2.0 - ], - "num_keypoints": 7, - "bbox": [ - 1, - 1, - 199, - 249 - ], - "iscrowd": 0, - "area": 49551, - "category_id": 1, - "id": 132, - "image_id": 132 - } - ] +{ + "info": { + "description": "Fashion Landmark Detection (FLD) test set for full-body clothes generated by MMPose Team.", + "url": "http://mmlab.ie.cuhk.edu.hk/projects/DeepFashion/LandmarkDetection.html", + "version": "1.0", + "year": "2021", + "date_created": "2021/01/02" + }, + "categories": [ + { + "supercategory": "person", + "id": 1, + "name": "fashion", + "keypoints": [ + "left collar", + "right collar", + "left sleeve", + "right sleeve", + "left waistline", + "right waistline", + "left hem", + "right hem" + ], + "skeleton": [] + } + ], + "images": [ + { + "id": 128, + "file_name": "img_00000128.jpg", + "height": 250, + "width": 200 + }, + { + "id": 132, + "file_name": "img_00000132.jpg", + "height": 250, + "width": 200 + } + ], + "annotations": [ + { + "keypoints": [ + 108.0, + 33.0, + 1.0, + 121.0, + 36.0, + 1.0, + 95.0, + 65.0, + 2.0, + 128.0, + 68.0, + 1.0, + 118.0, + 79.0, + 2.0, + 114.0, + 75.0, + 1.0, + 115.0, + 150.0, + 2.0, + 112.0, + 143.0, + 1.0 + ], + "num_keypoints": 8, + "bbox": [ + 88, + 21, + 48, + 142 + ], + "iscrowd": 0, + "area": 6816, + "category_id": 1, + "id": 128, + "image_id": 128 + }, + { + "keypoints": [ + 71.0, + 19.0, + 2.0, + 127.0, + 15.0, + 2.0, + 37.0, + 80.0, + 2.0, + 162.0, + 71.0, + 2.0, + 60.0, + 112.0, + 2.0, + 145.0, + 109.0, + 2.0, + 0.0, + 0.0, + 0.0, + 172.0, + 235.0, + 2.0 + ], + "num_keypoints": 7, + "bbox": [ + 1, + 1, + 199, + 249 + ], + "iscrowd": 0, + "area": 49551, + "category_id": 1, + "id": 132, + "image_id": 132 + } + ] } \ No newline at end of file diff --git a/tests/data/fly/test_fly.json b/tests/data/fly/test_fly.json index 7cb11a0d17..7cd7b87281 100644 --- a/tests/data/fly/test_fly.json +++ b/tests/data/fly/test_fly.json @@ -1,385 +1,385 @@ -{ - "categories": [ - { - "supercategory": "animal", - "id": 1, - "name": "fly", - "keypoints": [ - "head", - "eyeL", - "eyeR", - "neck", - "thorax", - "abdomen", - "forelegR1", - "forelegR2", - "forelegR3", - "forelegR4", - "midlegR1", - "midlegR2", - "midlegR3", - "midlegR4", - "hindlegR1", - "hindlegR2", - "hindlegR3", - "hindlegR4", - "forelegL1", - "forelegL2", - "forelegL3", - "forelegL4", - "midlegL1", - "midlegL2", - "midlegL3", - "midlegL4", - "hindlegL1", - "hindlegL2", - "hindlegL3", - "hindlegL4", - "wingL", - "wingR" - ], - "skeleton": [ - [ - 2, - 1 - ], - [ - 3, - 1 - ], - [ - 4, - 1 - ], - [ - 5, - 4 - ], - [ - 6, - 5 - ], - [ - 8, - 7 - ], - [ - 9, - 8 - ], - [ - 10, - 9 - ], - [ - 12, - 11 - ], - [ - 13, - 12 - ], - [ - 14, - 13 - ], - [ - 16, - 15 - ], - [ - 17, - 16 - ], - [ - 18, - 17 - ], - [ - 20, - 19 - ], - [ - 21, - 20 - ], - [ - 22, - 21 - ], - [ - 24, - 23 - ], - [ - 25, - 24 - ], - [ - 26, - 25 - ], - [ - 28, - 27 - ], - [ - 29, - 28 - ], - [ - 30, - 29 - ], - [ - 31, - 4 - ], - [ - 32, - 4 - ] - ] - } - ], - "images": [ - { - "id": 1400, - "file_name": "1400.jpg", - "height": 192, - "width": 192 - }, - { - "id": 1450, - "file_name": "1450.jpg", - "height": 192, - "width": 192 - } - ], - "annotations": [ - { - "keypoints": [ - 146.0, - 95.0, - 2.0, - 134.0, - 80.0, - 2.0, - 136.0, - 112.0, - 2.0, - 129.0, - 97.0, - 2.0, - 99.0, - 97.0, - 2.0, - 52.0, - 95.0, - 2.0, - 123.0, - 107.0, - 2.0, - 140.0, - 114.0, - 2.0, - 158.0, - 109.0, - 2.0, - 173.0, - 109.0, - 2.0, - 110.0, - 106.0, - 2.0, - 115.0, - 127.0, - 2.0, - 133.0, - 127.0, - 2.0, - 146.0, - 138.0, - 2.0, - 96.0, - 106.0, - 2.0, - 64.18991088867188, - 120.96142578125, - 2.0, - 46.0, - 126.0, - 2.0, - 34.0, - 137.0, - 2.0, - 121.0, - 86.0, - 2.0, - 147.0, - 78.0, - 2.0, - 169.0, - 79.0, - 2.0, - 184.0, - 75.0, - 2.0, - 108.0, - 86.0, - 2.0, - 103.0, - 70.0, - 2.0, - 109.0, - 40.0, - 2.0, - 114.0, - 18.0, - 2.0, - 93.0, - 87.0, - 2.0, - 82.0, - 64.0, - 2.0, - 74.0, - 46.0, - 2.0, - 67.0, - 22.0, - 2.0, - 19.0, - 86.0, - 2.0, - 23.0, - 137.0, - 2.0 - ], - "image_id": 1400, - "id": 1400, - "num_keypoints": 32, - "bbox": [ - 19.0, - 18.0, - 166.0, - 121.0 - ], - "iscrowd": 0, - "area": 20086.0, - "category_id": 1 - }, - { - "keypoints": [ - 147.43026733398438, - 96.94955444335938, - 2.0, - 137.32937622070312, - 79.7210693359375, - 2.0, - 138.43026733398438, - 110.86053466796875, - 2.0, - 128.0, - 96.0, - 2.0, - 98.0, - 96.0, - 2.0, - 49.329376220703125, - 94.13946533203125, - 2.0, - 122.37982177734375, - 108.81008911132812, - 2.0, - 129.43026733398438, - 114.18991088867188, - 2.0, - 138.65875244140625, - 114.62017822265625, - 2.0, - 144.480712890625, - 118.7596435546875, - 2.0, - 112.18991088867188, - 109.18991088867188, - 2.0, - 105.557861328125, - 118.43026733398438, - 2.0, - 95.67062377929688, - 121.91098022460938, - 2.0, - 91.13946533203125, - 136.10089111328125, - 2.0, - 91.46884155273438, - 104.2017822265625, - 2.0, - 73.2403564453125, - 117.43026733398438, - 2.0, - 57.37833786010742, - 107.22997283935547, - 2.0, - 44.87240219116211, - 112.96142578125, - 2.0, - 119.65875244140625, - 84.81008911132812, - 2.0, - 123.78634643554688, - 77.94955444335938, - 2.0, - 132.36795043945312, - 76.58160400390625, - 2.0, - 140.0, - 64.0, - 2.0, - 113.2789306640625, - 83.29080200195312, - 2.0, - 100.08901977539062, - 74.98812866210938, - 2.0, - 98.519287109375, - 67.13946533203125, - 2.0, - 93.62017822265625, - 55.810089111328125, - 2.0, - 94.22848510742188, - 85.08901977539062, - 2.0, - 78.36795043945312, - 69.2403564453125, - 2.0, - 71.60830688476562, - 74.58160400390625, - 2.0, - 60.848663330078125, - 68.67062377929688, - 2.0, - 16.0, - 66.0, - 2.0, - 16.0, - 126.0, - 2.0 - ], - "image_id": 1450, - "id": 1450, - "num_keypoints": 32, - "bbox": [ - 16.0, - 55.810089111328125, - 132.43026733398438, - 81.29080200195312 - ], - "iscrowd": 0, - "area": 10765.362640912645, - "category_id": 1 - } - ] +{ + "categories": [ + { + "supercategory": "animal", + "id": 1, + "name": "fly", + "keypoints": [ + "head", + "eyeL", + "eyeR", + "neck", + "thorax", + "abdomen", + "forelegR1", + "forelegR2", + "forelegR3", + "forelegR4", + "midlegR1", + "midlegR2", + "midlegR3", + "midlegR4", + "hindlegR1", + "hindlegR2", + "hindlegR3", + "hindlegR4", + "forelegL1", + "forelegL2", + "forelegL3", + "forelegL4", + "midlegL1", + "midlegL2", + "midlegL3", + "midlegL4", + "hindlegL1", + "hindlegL2", + "hindlegL3", + "hindlegL4", + "wingL", + "wingR" + ], + "skeleton": [ + [ + 2, + 1 + ], + [ + 3, + 1 + ], + [ + 4, + 1 + ], + [ + 5, + 4 + ], + [ + 6, + 5 + ], + [ + 8, + 7 + ], + [ + 9, + 8 + ], + [ + 10, + 9 + ], + [ + 12, + 11 + ], + [ + 13, + 12 + ], + [ + 14, + 13 + ], + [ + 16, + 15 + ], + [ + 17, + 16 + ], + [ + 18, + 17 + ], + [ + 20, + 19 + ], + [ + 21, + 20 + ], + [ + 22, + 21 + ], + [ + 24, + 23 + ], + [ + 25, + 24 + ], + [ + 26, + 25 + ], + [ + 28, + 27 + ], + [ + 29, + 28 + ], + [ + 30, + 29 + ], + [ + 31, + 4 + ], + [ + 32, + 4 + ] + ] + } + ], + "images": [ + { + "id": 1400, + "file_name": "1400.jpg", + "height": 192, + "width": 192 + }, + { + "id": 1450, + "file_name": "1450.jpg", + "height": 192, + "width": 192 + } + ], + "annotations": [ + { + "keypoints": [ + 146.0, + 95.0, + 2.0, + 134.0, + 80.0, + 2.0, + 136.0, + 112.0, + 2.0, + 129.0, + 97.0, + 2.0, + 99.0, + 97.0, + 2.0, + 52.0, + 95.0, + 2.0, + 123.0, + 107.0, + 2.0, + 140.0, + 114.0, + 2.0, + 158.0, + 109.0, + 2.0, + 173.0, + 109.0, + 2.0, + 110.0, + 106.0, + 2.0, + 115.0, + 127.0, + 2.0, + 133.0, + 127.0, + 2.0, + 146.0, + 138.0, + 2.0, + 96.0, + 106.0, + 2.0, + 64.18991088867188, + 120.96142578125, + 2.0, + 46.0, + 126.0, + 2.0, + 34.0, + 137.0, + 2.0, + 121.0, + 86.0, + 2.0, + 147.0, + 78.0, + 2.0, + 169.0, + 79.0, + 2.0, + 184.0, + 75.0, + 2.0, + 108.0, + 86.0, + 2.0, + 103.0, + 70.0, + 2.0, + 109.0, + 40.0, + 2.0, + 114.0, + 18.0, + 2.0, + 93.0, + 87.0, + 2.0, + 82.0, + 64.0, + 2.0, + 74.0, + 46.0, + 2.0, + 67.0, + 22.0, + 2.0, + 19.0, + 86.0, + 2.0, + 23.0, + 137.0, + 2.0 + ], + "image_id": 1400, + "id": 1400, + "num_keypoints": 32, + "bbox": [ + 19.0, + 18.0, + 166.0, + 121.0 + ], + "iscrowd": 0, + "area": 20086.0, + "category_id": 1 + }, + { + "keypoints": [ + 147.43026733398438, + 96.94955444335938, + 2.0, + 137.32937622070312, + 79.7210693359375, + 2.0, + 138.43026733398438, + 110.86053466796875, + 2.0, + 128.0, + 96.0, + 2.0, + 98.0, + 96.0, + 2.0, + 49.329376220703125, + 94.13946533203125, + 2.0, + 122.37982177734375, + 108.81008911132812, + 2.0, + 129.43026733398438, + 114.18991088867188, + 2.0, + 138.65875244140625, + 114.62017822265625, + 2.0, + 144.480712890625, + 118.7596435546875, + 2.0, + 112.18991088867188, + 109.18991088867188, + 2.0, + 105.557861328125, + 118.43026733398438, + 2.0, + 95.67062377929688, + 121.91098022460938, + 2.0, + 91.13946533203125, + 136.10089111328125, + 2.0, + 91.46884155273438, + 104.2017822265625, + 2.0, + 73.2403564453125, + 117.43026733398438, + 2.0, + 57.37833786010742, + 107.22997283935547, + 2.0, + 44.87240219116211, + 112.96142578125, + 2.0, + 119.65875244140625, + 84.81008911132812, + 2.0, + 123.78634643554688, + 77.94955444335938, + 2.0, + 132.36795043945312, + 76.58160400390625, + 2.0, + 140.0, + 64.0, + 2.0, + 113.2789306640625, + 83.29080200195312, + 2.0, + 100.08901977539062, + 74.98812866210938, + 2.0, + 98.519287109375, + 67.13946533203125, + 2.0, + 93.62017822265625, + 55.810089111328125, + 2.0, + 94.22848510742188, + 85.08901977539062, + 2.0, + 78.36795043945312, + 69.2403564453125, + 2.0, + 71.60830688476562, + 74.58160400390625, + 2.0, + 60.848663330078125, + 68.67062377929688, + 2.0, + 16.0, + 66.0, + 2.0, + 16.0, + 126.0, + 2.0 + ], + "image_id": 1450, + "id": 1450, + "num_keypoints": 32, + "bbox": [ + 16.0, + 55.810089111328125, + 132.43026733398438, + 81.29080200195312 + ], + "iscrowd": 0, + "area": 10765.362640912645, + "category_id": 1 + } + ] } \ No newline at end of file diff --git a/tests/data/freihand/test_freihand.json b/tests/data/freihand/test_freihand.json index 2c6ebd0dc5..74d8a9599d 100644 --- a/tests/data/freihand/test_freihand.json +++ b/tests/data/freihand/test_freihand.json @@ -1,957 +1,957 @@ -{ - "info": { - "description": "FreiHand", - "version": "1.0", - "year": "2020", - "date_created": "2020/09/08" - }, - "licenses": "", - "images": [ - { - "file_name": "00017620.jpg", - "height": 224, - "width": 224, - "id": 17620 - }, - { - "file_name": "00050180.jpg", - "height": 224, - "width": 224, - "id": 50180 - }, - { - "file_name": "00082740.jpg", - "height": 224, - "width": 224, - "id": 82740 - }, - { - "file_name": "00115300.jpg", - "height": 224, - "width": 224, - "id": 115300 - }, - { - "file_name": "00000355.jpg", - "height": 224, - "width": 224, - "id": 355 - }, - { - "file_name": "00032915.jpg", - "height": 224, - "width": 224, - "id": 32915 - }, - { - "file_name": "00065475.jpg", - "height": 224, - "width": 224, - "id": 65475 - }, - { - "file_name": "00098035.jpg", - "height": 224, - "width": 224, - "id": 98035 - } - ], - "annotations": [ - { - "bbox": [ - 62, - 82, - 104, - 63 - ], - "keypoints": [ - 75.09007144965095, - 114.79035385093314, - 1, - 88.01978404720953, - 109.72359615889864, - 1, - 98.79950536639522, - 109.05442666062974, - 1, - 110.16327936938085, - 114.72375114390456, - 1, - 121.75826373686846, - 122.01572654269421, - 1, - 126.92528942089982, - 93.65489136216958, - 1, - 144.49316505581498, - 94.71206260545628, - 1, - 152.3510241000562, - 102.03474955900822, - 1, - 159.94413202793353, - 111.6105502403288, - 1, - 136.5822887073417, - 102.58162787991249, - 1, - 153.71181890922904, - 105.7627322321249, - 1, - 158.23785994857087, - 113.05793071695886, - 1, - 159.1827624858022, - 122.12860754004963, - 1, - 131.78312266215684, - 118.12603871987666, - 1, - 144.37435502719956, - 122.97613121869307, - 1, - 144.12850082414747, - 130.24233623490562, - 1, - 138.0058328373116, - 135.03475933083362, - 1, - 123.6128526185571, - 130.55957078894423, - 1, - 126.52617237783046, - 135.2764317635352, - 1, - 123.26857656908544, - 138.5518599403549, - 1, - 118.92147700299864, - 140.34319120176468, - 1 - ], - "category_id": 1, - "id": 17620, - "image_id": 17620, - "segmentation": [ - [ - 62, - 82, - 62, - 113.0, - 62, - 144, - 113.5, - 144, - 165, - 144, - 165, - 113.0, - 165, - 82, - 113.5, - 82 - ] - ], - "iscrowd": 0, - "area": 6552 - }, - { - "bbox": [ - 62, - 82, - 104, - 63 - ], - "keypoints": [ - 75.09007144965095, - 114.79035385093314, - 1, - 88.01978404720953, - 109.72359615889864, - 1, - 98.79950536639522, - 109.05442666062974, - 1, - 110.16327936938085, - 114.72375114390456, - 1, - 121.75826373686846, - 122.01572654269421, - 1, - 126.92528942089982, - 93.65489136216958, - 1, - 144.49316505581498, - 94.71206260545628, - 1, - 152.3510241000562, - 102.03474955900822, - 1, - 159.94413202793353, - 111.6105502403288, - 1, - 136.5822887073417, - 102.58162787991249, - 1, - 153.71181890922904, - 105.7627322321249, - 1, - 158.23785994857087, - 113.05793071695886, - 1, - 159.1827624858022, - 122.12860754004963, - 1, - 131.78312266215684, - 118.12603871987666, - 1, - 144.37435502719956, - 122.97613121869307, - 1, - 144.12850082414747, - 130.24233623490562, - 1, - 138.0058328373116, - 135.03475933083362, - 1, - 123.6128526185571, - 130.55957078894423, - 1, - 126.52617237783046, - 135.2764317635352, - 1, - 123.26857656908544, - 138.5518599403549, - 1, - 118.92147700299864, - 140.34319120176468, - 1 - ], - "category_id": 1, - "id": 50180, - "image_id": 50180, - "segmentation": [ - [ - 62, - 82, - 62, - 113.0, - 62, - 144, - 113.5, - 144, - 165, - 144, - 165, - 113.0, - 165, - 82, - 113.5, - 82 - ] - ], - "iscrowd": 0, - "area": 6552 - }, - { - "bbox": [ - 62, - 82, - 104, - 63 - ], - "keypoints": [ - 75.09007144965095, - 114.79035385093314, - 1, - 88.01978404720953, - 109.72359615889864, - 1, - 98.79950536639522, - 109.05442666062974, - 1, - 110.16327936938085, - 114.72375114390456, - 1, - 121.75826373686846, - 122.01572654269421, - 1, - 126.92528942089982, - 93.65489136216958, - 1, - 144.49316505581498, - 94.71206260545628, - 1, - 152.3510241000562, - 102.03474955900822, - 1, - 159.94413202793353, - 111.6105502403288, - 1, - 136.5822887073417, - 102.58162787991249, - 1, - 153.71181890922904, - 105.7627322321249, - 1, - 158.23785994857087, - 113.05793071695886, - 1, - 159.1827624858022, - 122.12860754004963, - 1, - 131.78312266215684, - 118.12603871987666, - 1, - 144.37435502719956, - 122.97613121869307, - 1, - 144.12850082414747, - 130.24233623490562, - 1, - 138.0058328373116, - 135.03475933083362, - 1, - 123.6128526185571, - 130.55957078894423, - 1, - 126.52617237783046, - 135.2764317635352, - 1, - 123.26857656908544, - 138.5518599403549, - 1, - 118.92147700299864, - 140.34319120176468, - 1 - ], - "category_id": 1, - "id": 82740, - "image_id": 82740, - "segmentation": [ - [ - 62, - 82, - 62, - 113.0, - 62, - 144, - 113.5, - 144, - 165, - 144, - 165, - 113.0, - 165, - 82, - 113.5, - 82 - ] - ], - "iscrowd": 0, - "area": 6552 - }, - { - "bbox": [ - 62, - 82, - 104, - 63 - ], - "keypoints": [ - 75.09007144965095, - 114.79035385093314, - 1, - 88.01978404720953, - 109.72359615889864, - 1, - 98.79950536639522, - 109.05442666062974, - 1, - 110.16327936938085, - 114.72375114390456, - 1, - 121.75826373686846, - 122.01572654269421, - 1, - 126.92528942089982, - 93.65489136216958, - 1, - 144.49316505581498, - 94.71206260545628, - 1, - 152.3510241000562, - 102.03474955900822, - 1, - 159.94413202793353, - 111.6105502403288, - 1, - 136.5822887073417, - 102.58162787991249, - 1, - 153.71181890922904, - 105.7627322321249, - 1, - 158.23785994857087, - 113.05793071695886, - 1, - 159.1827624858022, - 122.12860754004963, - 1, - 131.78312266215684, - 118.12603871987666, - 1, - 144.37435502719956, - 122.97613121869307, - 1, - 144.12850082414747, - 130.24233623490562, - 1, - 138.0058328373116, - 135.03475933083362, - 1, - 123.6128526185571, - 130.55957078894423, - 1, - 126.52617237783046, - 135.2764317635352, - 1, - 123.26857656908544, - 138.5518599403549, - 1, - 118.92147700299864, - 140.34319120176468, - 1 - ], - "category_id": 1, - "id": 115300, - "image_id": 115300, - "segmentation": [ - [ - 62, - 82, - 62, - 113.0, - 62, - 144, - 113.5, - 144, - 165, - 144, - 165, - 113.0, - 165, - 82, - 113.5, - 82 - ] - ], - "iscrowd": 0, - "area": 6552 - }, - { - "bbox": [ - 48, - 81, - 111, - 73 - ], - "keypoints": [ - 72.3863777322552, - 118.66396006693559, - 1, - 94.24833834345874, - 103.27814170253427, - 1, - 110.88311700561579, - 95.90395591649063, - 1, - 126.4579609506009, - 94.84947407598384, - 1, - 150.22575721471514, - 90.20807463489129, - 1, - 101.58391664034835, - 95.2364549099302, - 1, - 123.22957111339275, - 99.32947575213643, - 1, - 139.48821317513102, - 106.07413659069489, - 1, - 157.4869130814403, - 114.05678966958038, - 1, - 102.72641676686953, - 113.8112215401411, - 1, - 124.77010074005784, - 117.9386487787441, - 1, - 138.88096072705787, - 120.6828207743196, - 1, - 153.55692830019055, - 122.08891417018086, - 1, - 101.79667808841384, - 132.8686913780324, - 1, - 122.47431735923229, - 131.3244981984239, - 1, - 136.86479076428296, - 129.51781183394235, - 1, - 147.14149503293044, - 124.23211514642553, - 1, - 103.99186381010902, - 143.91615273519855, - 1, - 119.95852588057097, - 140.94459694337758, - 1, - 130.47757563177504, - 137.0559475661833, - 1, - 140.32638831475907, - 128.94416862968552, - 1 - ], - "category_id": 1, - "id": 355, - "image_id": 355, - "segmentation": [ - [ - 48, - 81, - 48, - 117.0, - 48, - 153, - 103.0, - 153, - 158, - 153, - 158, - 117.0, - 158, - 81, - 103.0, - 81 - ] - ], - "iscrowd": 0, - "area": 8103 - }, - { - "bbox": [ - 48, - 81, - 111, - 73 - ], - "keypoints": [ - 72.3863777322552, - 118.66396006693559, - 1, - 94.24833834345874, - 103.27814170253427, - 1, - 110.88311700561579, - 95.90395591649063, - 1, - 126.4579609506009, - 94.84947407598384, - 1, - 150.22575721471514, - 90.20807463489129, - 1, - 101.58391664034835, - 95.2364549099302, - 1, - 123.22957111339275, - 99.32947575213643, - 1, - 139.48821317513102, - 106.07413659069489, - 1, - 157.4869130814403, - 114.05678966958038, - 1, - 102.72641676686953, - 113.8112215401411, - 1, - 124.77010074005784, - 117.9386487787441, - 1, - 138.88096072705787, - 120.6828207743196, - 1, - 153.55692830019055, - 122.08891417018086, - 1, - 101.79667808841384, - 132.8686913780324, - 1, - 122.47431735923229, - 131.3244981984239, - 1, - 136.86479076428296, - 129.51781183394235, - 1, - 147.14149503293044, - 124.23211514642553, - 1, - 103.99186381010902, - 143.91615273519855, - 1, - 119.95852588057097, - 140.94459694337758, - 1, - 130.47757563177504, - 137.0559475661833, - 1, - 140.32638831475907, - 128.94416862968552, - 1 - ], - "category_id": 1, - "id": 32915, - "image_id": 32915, - "segmentation": [ - [ - 48, - 81, - 48, - 117.0, - 48, - 153, - 103.0, - 153, - 158, - 153, - 158, - 117.0, - 158, - 81, - 103.0, - 81 - ] - ], - "iscrowd": 0, - "area": 8103 - }, - { - "bbox": [ - 48, - 81, - 111, - 73 - ], - "keypoints": [ - 72.3863777322552, - 118.66396006693559, - 1, - 94.24833834345874, - 103.27814170253427, - 1, - 110.88311700561579, - 95.90395591649063, - 1, - 126.4579609506009, - 94.84947407598384, - 1, - 150.22575721471514, - 90.20807463489129, - 1, - 101.58391664034835, - 95.2364549099302, - 1, - 123.22957111339275, - 99.32947575213643, - 1, - 139.48821317513102, - 106.07413659069489, - 1, - 157.4869130814403, - 114.05678966958038, - 1, - 102.72641676686953, - 113.8112215401411, - 1, - 124.77010074005784, - 117.9386487787441, - 1, - 138.88096072705787, - 120.6828207743196, - 1, - 153.55692830019055, - 122.08891417018086, - 1, - 101.79667808841384, - 132.8686913780324, - 1, - 122.47431735923229, - 131.3244981984239, - 1, - 136.86479076428296, - 129.51781183394235, - 1, - 147.14149503293044, - 124.23211514642553, - 1, - 103.99186381010902, - 143.91615273519855, - 1, - 119.95852588057097, - 140.94459694337758, - 1, - 130.47757563177504, - 137.0559475661833, - 1, - 140.32638831475907, - 128.94416862968552, - 1 - ], - "category_id": 1, - "id": 65475, - "image_id": 65475, - "segmentation": [ - [ - 48, - 81, - 48, - 117.0, - 48, - 153, - 103.0, - 153, - 158, - 153, - 158, - 117.0, - 158, - 81, - 103.0, - 81 - ] - ], - "iscrowd": 0, - "area": 8103 - }, - { - "bbox": [ - 48, - 81, - 111, - 73 - ], - "keypoints": [ - 72.3863777322552, - 118.66396006693559, - 1, - 94.24833834345874, - 103.27814170253427, - 1, - 110.88311700561579, - 95.90395591649063, - 1, - 126.4579609506009, - 94.84947407598384, - 1, - 150.22575721471514, - 90.20807463489129, - 1, - 101.58391664034835, - 95.2364549099302, - 1, - 123.22957111339275, - 99.32947575213643, - 1, - 139.48821317513102, - 106.07413659069489, - 1, - 157.4869130814403, - 114.05678966958038, - 1, - 102.72641676686953, - 113.8112215401411, - 1, - 124.77010074005784, - 117.9386487787441, - 1, - 138.88096072705787, - 120.6828207743196, - 1, - 153.55692830019055, - 122.08891417018086, - 1, - 101.79667808841384, - 132.8686913780324, - 1, - 122.47431735923229, - 131.3244981984239, - 1, - 136.86479076428296, - 129.51781183394235, - 1, - 147.14149503293044, - 124.23211514642553, - 1, - 103.99186381010902, - 143.91615273519855, - 1, - 119.95852588057097, - 140.94459694337758, - 1, - 130.47757563177504, - 137.0559475661833, - 1, - 140.32638831475907, - 128.94416862968552, - 1 - ], - "category_id": 1, - "id": 98035, - "image_id": 98035, - "segmentation": [ - [ - 48, - 81, - 48, - 117.0, - 48, - 153, - 103.0, - 153, - 158, - 153, - 158, - 117.0, - 158, - 81, - 103.0, - 81 - ] - ], - "iscrowd": 0, - "area": 8103 - } - ], - "categories": [ - { - "supercategory": "hand", - "id": 1, - "name": "hand", - "keypoints": [ - "wrist", - "thumb1", - "thumb2", - "thumb3", - "thumb4", - "forefinger1", - "forefinger2", - "forefinger3", - "forefinger4", - "middle_finger1", - "middle_finger2", - "middle_finger3", - "middle_finger4", - "ring_finger1", - "ring_finger2", - "ring_finger3", - "ring_finger4", - "pinky_finger1", - "pinky_finger2", - "pinky_finger3", - "pinky_finger4" - ], - "skeleton": [ - [ - 1, - 2 - ], - [ - 2, - 3 - ], - [ - 3, - 4 - ], - [ - 4, - 5 - ], - [ - 1, - 6 - ], - [ - 6, - 7 - ], - [ - 7, - 8 - ], - [ - 8, - 9 - ], - [ - 1, - 10 - ], - [ - 10, - 11 - ], - [ - 11, - 12 - ], - [ - 12, - 13 - ], - [ - 1, - 14 - ], - [ - 14, - 15 - ], - [ - 15, - 16 - ], - [ - 16, - 17 - ], - [ - 1, - 18 - ], - [ - 18, - 19 - ], - [ - 19, - 20 - ], - [ - 20, - 21 - ] - ] - } - ] -} +{ + "info": { + "description": "FreiHand", + "version": "1.0", + "year": "2020", + "date_created": "2020/09/08" + }, + "licenses": "", + "images": [ + { + "file_name": "00017620.jpg", + "height": 224, + "width": 224, + "id": 17620 + }, + { + "file_name": "00050180.jpg", + "height": 224, + "width": 224, + "id": 50180 + }, + { + "file_name": "00082740.jpg", + "height": 224, + "width": 224, + "id": 82740 + }, + { + "file_name": "00115300.jpg", + "height": 224, + "width": 224, + "id": 115300 + }, + { + "file_name": "00000355.jpg", + "height": 224, + "width": 224, + "id": 355 + }, + { + "file_name": "00032915.jpg", + "height": 224, + "width": 224, + "id": 32915 + }, + { + "file_name": "00065475.jpg", + "height": 224, + "width": 224, + "id": 65475 + }, + { + "file_name": "00098035.jpg", + "height": 224, + "width": 224, + "id": 98035 + } + ], + "annotations": [ + { + "bbox": [ + 62, + 82, + 104, + 63 + ], + "keypoints": [ + 75.09007144965095, + 114.79035385093314, + 1, + 88.01978404720953, + 109.72359615889864, + 1, + 98.79950536639522, + 109.05442666062974, + 1, + 110.16327936938085, + 114.72375114390456, + 1, + 121.75826373686846, + 122.01572654269421, + 1, + 126.92528942089982, + 93.65489136216958, + 1, + 144.49316505581498, + 94.71206260545628, + 1, + 152.3510241000562, + 102.03474955900822, + 1, + 159.94413202793353, + 111.6105502403288, + 1, + 136.5822887073417, + 102.58162787991249, + 1, + 153.71181890922904, + 105.7627322321249, + 1, + 158.23785994857087, + 113.05793071695886, + 1, + 159.1827624858022, + 122.12860754004963, + 1, + 131.78312266215684, + 118.12603871987666, + 1, + 144.37435502719956, + 122.97613121869307, + 1, + 144.12850082414747, + 130.24233623490562, + 1, + 138.0058328373116, + 135.03475933083362, + 1, + 123.6128526185571, + 130.55957078894423, + 1, + 126.52617237783046, + 135.2764317635352, + 1, + 123.26857656908544, + 138.5518599403549, + 1, + 118.92147700299864, + 140.34319120176468, + 1 + ], + "category_id": 1, + "id": 17620, + "image_id": 17620, + "segmentation": [ + [ + 62, + 82, + 62, + 113.0, + 62, + 144, + 113.5, + 144, + 165, + 144, + 165, + 113.0, + 165, + 82, + 113.5, + 82 + ] + ], + "iscrowd": 0, + "area": 6552 + }, + { + "bbox": [ + 62, + 82, + 104, + 63 + ], + "keypoints": [ + 75.09007144965095, + 114.79035385093314, + 1, + 88.01978404720953, + 109.72359615889864, + 1, + 98.79950536639522, + 109.05442666062974, + 1, + 110.16327936938085, + 114.72375114390456, + 1, + 121.75826373686846, + 122.01572654269421, + 1, + 126.92528942089982, + 93.65489136216958, + 1, + 144.49316505581498, + 94.71206260545628, + 1, + 152.3510241000562, + 102.03474955900822, + 1, + 159.94413202793353, + 111.6105502403288, + 1, + 136.5822887073417, + 102.58162787991249, + 1, + 153.71181890922904, + 105.7627322321249, + 1, + 158.23785994857087, + 113.05793071695886, + 1, + 159.1827624858022, + 122.12860754004963, + 1, + 131.78312266215684, + 118.12603871987666, + 1, + 144.37435502719956, + 122.97613121869307, + 1, + 144.12850082414747, + 130.24233623490562, + 1, + 138.0058328373116, + 135.03475933083362, + 1, + 123.6128526185571, + 130.55957078894423, + 1, + 126.52617237783046, + 135.2764317635352, + 1, + 123.26857656908544, + 138.5518599403549, + 1, + 118.92147700299864, + 140.34319120176468, + 1 + ], + "category_id": 1, + "id": 50180, + "image_id": 50180, + "segmentation": [ + [ + 62, + 82, + 62, + 113.0, + 62, + 144, + 113.5, + 144, + 165, + 144, + 165, + 113.0, + 165, + 82, + 113.5, + 82 + ] + ], + "iscrowd": 0, + "area": 6552 + }, + { + "bbox": [ + 62, + 82, + 104, + 63 + ], + "keypoints": [ + 75.09007144965095, + 114.79035385093314, + 1, + 88.01978404720953, + 109.72359615889864, + 1, + 98.79950536639522, + 109.05442666062974, + 1, + 110.16327936938085, + 114.72375114390456, + 1, + 121.75826373686846, + 122.01572654269421, + 1, + 126.92528942089982, + 93.65489136216958, + 1, + 144.49316505581498, + 94.71206260545628, + 1, + 152.3510241000562, + 102.03474955900822, + 1, + 159.94413202793353, + 111.6105502403288, + 1, + 136.5822887073417, + 102.58162787991249, + 1, + 153.71181890922904, + 105.7627322321249, + 1, + 158.23785994857087, + 113.05793071695886, + 1, + 159.1827624858022, + 122.12860754004963, + 1, + 131.78312266215684, + 118.12603871987666, + 1, + 144.37435502719956, + 122.97613121869307, + 1, + 144.12850082414747, + 130.24233623490562, + 1, + 138.0058328373116, + 135.03475933083362, + 1, + 123.6128526185571, + 130.55957078894423, + 1, + 126.52617237783046, + 135.2764317635352, + 1, + 123.26857656908544, + 138.5518599403549, + 1, + 118.92147700299864, + 140.34319120176468, + 1 + ], + "category_id": 1, + "id": 82740, + "image_id": 82740, + "segmentation": [ + [ + 62, + 82, + 62, + 113.0, + 62, + 144, + 113.5, + 144, + 165, + 144, + 165, + 113.0, + 165, + 82, + 113.5, + 82 + ] + ], + "iscrowd": 0, + "area": 6552 + }, + { + "bbox": [ + 62, + 82, + 104, + 63 + ], + "keypoints": [ + 75.09007144965095, + 114.79035385093314, + 1, + 88.01978404720953, + 109.72359615889864, + 1, + 98.79950536639522, + 109.05442666062974, + 1, + 110.16327936938085, + 114.72375114390456, + 1, + 121.75826373686846, + 122.01572654269421, + 1, + 126.92528942089982, + 93.65489136216958, + 1, + 144.49316505581498, + 94.71206260545628, + 1, + 152.3510241000562, + 102.03474955900822, + 1, + 159.94413202793353, + 111.6105502403288, + 1, + 136.5822887073417, + 102.58162787991249, + 1, + 153.71181890922904, + 105.7627322321249, + 1, + 158.23785994857087, + 113.05793071695886, + 1, + 159.1827624858022, + 122.12860754004963, + 1, + 131.78312266215684, + 118.12603871987666, + 1, + 144.37435502719956, + 122.97613121869307, + 1, + 144.12850082414747, + 130.24233623490562, + 1, + 138.0058328373116, + 135.03475933083362, + 1, + 123.6128526185571, + 130.55957078894423, + 1, + 126.52617237783046, + 135.2764317635352, + 1, + 123.26857656908544, + 138.5518599403549, + 1, + 118.92147700299864, + 140.34319120176468, + 1 + ], + "category_id": 1, + "id": 115300, + "image_id": 115300, + "segmentation": [ + [ + 62, + 82, + 62, + 113.0, + 62, + 144, + 113.5, + 144, + 165, + 144, + 165, + 113.0, + 165, + 82, + 113.5, + 82 + ] + ], + "iscrowd": 0, + "area": 6552 + }, + { + "bbox": [ + 48, + 81, + 111, + 73 + ], + "keypoints": [ + 72.3863777322552, + 118.66396006693559, + 1, + 94.24833834345874, + 103.27814170253427, + 1, + 110.88311700561579, + 95.90395591649063, + 1, + 126.4579609506009, + 94.84947407598384, + 1, + 150.22575721471514, + 90.20807463489129, + 1, + 101.58391664034835, + 95.2364549099302, + 1, + 123.22957111339275, + 99.32947575213643, + 1, + 139.48821317513102, + 106.07413659069489, + 1, + 157.4869130814403, + 114.05678966958038, + 1, + 102.72641676686953, + 113.8112215401411, + 1, + 124.77010074005784, + 117.9386487787441, + 1, + 138.88096072705787, + 120.6828207743196, + 1, + 153.55692830019055, + 122.08891417018086, + 1, + 101.79667808841384, + 132.8686913780324, + 1, + 122.47431735923229, + 131.3244981984239, + 1, + 136.86479076428296, + 129.51781183394235, + 1, + 147.14149503293044, + 124.23211514642553, + 1, + 103.99186381010902, + 143.91615273519855, + 1, + 119.95852588057097, + 140.94459694337758, + 1, + 130.47757563177504, + 137.0559475661833, + 1, + 140.32638831475907, + 128.94416862968552, + 1 + ], + "category_id": 1, + "id": 355, + "image_id": 355, + "segmentation": [ + [ + 48, + 81, + 48, + 117.0, + 48, + 153, + 103.0, + 153, + 158, + 153, + 158, + 117.0, + 158, + 81, + 103.0, + 81 + ] + ], + "iscrowd": 0, + "area": 8103 + }, + { + "bbox": [ + 48, + 81, + 111, + 73 + ], + "keypoints": [ + 72.3863777322552, + 118.66396006693559, + 1, + 94.24833834345874, + 103.27814170253427, + 1, + 110.88311700561579, + 95.90395591649063, + 1, + 126.4579609506009, + 94.84947407598384, + 1, + 150.22575721471514, + 90.20807463489129, + 1, + 101.58391664034835, + 95.2364549099302, + 1, + 123.22957111339275, + 99.32947575213643, + 1, + 139.48821317513102, + 106.07413659069489, + 1, + 157.4869130814403, + 114.05678966958038, + 1, + 102.72641676686953, + 113.8112215401411, + 1, + 124.77010074005784, + 117.9386487787441, + 1, + 138.88096072705787, + 120.6828207743196, + 1, + 153.55692830019055, + 122.08891417018086, + 1, + 101.79667808841384, + 132.8686913780324, + 1, + 122.47431735923229, + 131.3244981984239, + 1, + 136.86479076428296, + 129.51781183394235, + 1, + 147.14149503293044, + 124.23211514642553, + 1, + 103.99186381010902, + 143.91615273519855, + 1, + 119.95852588057097, + 140.94459694337758, + 1, + 130.47757563177504, + 137.0559475661833, + 1, + 140.32638831475907, + 128.94416862968552, + 1 + ], + "category_id": 1, + "id": 32915, + "image_id": 32915, + "segmentation": [ + [ + 48, + 81, + 48, + 117.0, + 48, + 153, + 103.0, + 153, + 158, + 153, + 158, + 117.0, + 158, + 81, + 103.0, + 81 + ] + ], + "iscrowd": 0, + "area": 8103 + }, + { + "bbox": [ + 48, + 81, + 111, + 73 + ], + "keypoints": [ + 72.3863777322552, + 118.66396006693559, + 1, + 94.24833834345874, + 103.27814170253427, + 1, + 110.88311700561579, + 95.90395591649063, + 1, + 126.4579609506009, + 94.84947407598384, + 1, + 150.22575721471514, + 90.20807463489129, + 1, + 101.58391664034835, + 95.2364549099302, + 1, + 123.22957111339275, + 99.32947575213643, + 1, + 139.48821317513102, + 106.07413659069489, + 1, + 157.4869130814403, + 114.05678966958038, + 1, + 102.72641676686953, + 113.8112215401411, + 1, + 124.77010074005784, + 117.9386487787441, + 1, + 138.88096072705787, + 120.6828207743196, + 1, + 153.55692830019055, + 122.08891417018086, + 1, + 101.79667808841384, + 132.8686913780324, + 1, + 122.47431735923229, + 131.3244981984239, + 1, + 136.86479076428296, + 129.51781183394235, + 1, + 147.14149503293044, + 124.23211514642553, + 1, + 103.99186381010902, + 143.91615273519855, + 1, + 119.95852588057097, + 140.94459694337758, + 1, + 130.47757563177504, + 137.0559475661833, + 1, + 140.32638831475907, + 128.94416862968552, + 1 + ], + "category_id": 1, + "id": 65475, + "image_id": 65475, + "segmentation": [ + [ + 48, + 81, + 48, + 117.0, + 48, + 153, + 103.0, + 153, + 158, + 153, + 158, + 117.0, + 158, + 81, + 103.0, + 81 + ] + ], + "iscrowd": 0, + "area": 8103 + }, + { + "bbox": [ + 48, + 81, + 111, + 73 + ], + "keypoints": [ + 72.3863777322552, + 118.66396006693559, + 1, + 94.24833834345874, + 103.27814170253427, + 1, + 110.88311700561579, + 95.90395591649063, + 1, + 126.4579609506009, + 94.84947407598384, + 1, + 150.22575721471514, + 90.20807463489129, + 1, + 101.58391664034835, + 95.2364549099302, + 1, + 123.22957111339275, + 99.32947575213643, + 1, + 139.48821317513102, + 106.07413659069489, + 1, + 157.4869130814403, + 114.05678966958038, + 1, + 102.72641676686953, + 113.8112215401411, + 1, + 124.77010074005784, + 117.9386487787441, + 1, + 138.88096072705787, + 120.6828207743196, + 1, + 153.55692830019055, + 122.08891417018086, + 1, + 101.79667808841384, + 132.8686913780324, + 1, + 122.47431735923229, + 131.3244981984239, + 1, + 136.86479076428296, + 129.51781183394235, + 1, + 147.14149503293044, + 124.23211514642553, + 1, + 103.99186381010902, + 143.91615273519855, + 1, + 119.95852588057097, + 140.94459694337758, + 1, + 130.47757563177504, + 137.0559475661833, + 1, + 140.32638831475907, + 128.94416862968552, + 1 + ], + "category_id": 1, + "id": 98035, + "image_id": 98035, + "segmentation": [ + [ + 48, + 81, + 48, + 117.0, + 48, + 153, + 103.0, + 153, + 158, + 153, + 158, + 117.0, + 158, + 81, + 103.0, + 81 + ] + ], + "iscrowd": 0, + "area": 8103 + } + ], + "categories": [ + { + "supercategory": "hand", + "id": 1, + "name": "hand", + "keypoints": [ + "wrist", + "thumb1", + "thumb2", + "thumb3", + "thumb4", + "forefinger1", + "forefinger2", + "forefinger3", + "forefinger4", + "middle_finger1", + "middle_finger2", + "middle_finger3", + "middle_finger4", + "ring_finger1", + "ring_finger2", + "ring_finger3", + "ring_finger4", + "pinky_finger1", + "pinky_finger2", + "pinky_finger3", + "pinky_finger4" + ], + "skeleton": [ + [ + 1, + 2 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ], + [ + 4, + 5 + ], + [ + 1, + 6 + ], + [ + 6, + 7 + ], + [ + 7, + 8 + ], + [ + 8, + 9 + ], + [ + 1, + 10 + ], + [ + 10, + 11 + ], + [ + 11, + 12 + ], + [ + 12, + 13 + ], + [ + 1, + 14 + ], + [ + 14, + 15 + ], + [ + 15, + 16 + ], + [ + 16, + 17 + ], + [ + 1, + 18 + ], + [ + 18, + 19 + ], + [ + 19, + 20 + ], + [ + 20, + 21 + ] + ] + } + ] +} diff --git a/tests/data/halpe/test_halpe.json b/tests/data/halpe/test_halpe.json index 85b9e9d607..89499529ae 100644 --- a/tests/data/halpe/test_halpe.json +++ b/tests/data/halpe/test_halpe.json @@ -1,5991 +1,5991 @@ -{ - "categories": [ - { - "supercategory": "person", - "id": 1, - "name": "person", - "keypoints": [], - "skeleton": [] - } - ], - "images": [ - { - "license": 4, - "file_name": "000000000785.jpg", - "coco_url": "http://images.cocodataset.org/val2017/000000000785.jpg", - "height": 425, - "width": 640, - "date_captured": "2013-11-19 21:22:42", - "flickr_url": "http://farm8.staticflickr.com/7015/6795644157_f019453ae7_z.jpg", - "id": 785 - }, - { - "license": 3, - "file_name": "000000040083.jpg", - "coco_url": "http://images.cocodataset.org/val2017/000000040083.jpg", - "height": 333, - "width": 500, - "date_captured": "2013-11-18 03:30:24", - "flickr_url": "http://farm1.staticflickr.com/116/254881838_e21c6d17b8_z.jpg", - "id": 40083 - }, - { - "license": 1, - "file_name": "000000196141.jpg", - "coco_url": "http://images.cocodataset.org/val2017/000000196141.jpg", - "height": 429, - "width": 640, - "date_captured": "2013-11-22 22:37:15", - "flickr_url": "http://farm4.staticflickr.com/3310/3611902235_57d4ae496d_z.jpg", - "id": 196141 - }, - { - "license": 3, - "file_name": "000000197388.jpg", - "coco_url": "http://images.cocodataset.org/val2017/000000197388.jpg", - "height": 392, - "width": 640, - "date_captured": "2013-11-19 20:10:37", - "flickr_url": "http://farm9.staticflickr.com/8375/8507321836_5b8b13188f_z.jpg", - "id": 197388 - } - ], - "annotations": [ - { - "num_keypoints": 17, - "area": 27789.11055, - "iscrowd": 0, - "keypoints": [ - 367, - 81, - 2, - 374, - 73, - 2, - 360, - 75, - 2, - 386, - 78, - 2, - 356, - 81, - 2, - 399, - 108, - 2, - 358, - 129, - 2, - 433, - 142, - 2, - 341, - 159, - 2, - 449, - 165, - 2, - 309, - 178, - 2, - 424, - 203, - 2, - 393, - 214, - 2, - 429, - 294, - 2, - 367, - 273, - 2, - 466, - 362, - 2, - 396, - 341, - 2, - 370, - 52, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 431, - 378, - 2, - 364, - 366, - 2, - 437, - 383, - 2, - 358, - 361, - 2, - 488, - 372, - 2, - 414, - 353, - 2, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 382.0, - 88.0, - 0.0, - 384.0, - 85.0, - 0.0, - 386.0, - 80.0, - 0.0, - 386.0, - 76.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 367.0, - 76.0, - 0.0, - 367.0, - 79.0, - 0.0, - 367.0, - 81.0, - 0.0, - 364.0, - 83.0, - 0.0, - 366.0, - 83.0, - 0.0, - 367.0, - 84.0, - 0.0, - 369.0, - 83.0, - 0.0, - 371.0, - 83.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 363.0, - 88.0, - 0.0, - 364.0, - 86.0, - 0.0, - 366.0, - 86.0, - 0.0, - 368.0, - 86.0, - 0.0, - 369.0, - 86.0, - 0.0, - 372.0, - 86.0, - 0.0, - 376.0, - 86.0, - 0.0, - 373.0, - 89.0, - 0.0, - 371.0, - 90.0, - 0.0, - 368.0, - 90.0, - 0.0, - 366.0, - 90.0, - 0.0, - 364.0, - 89.0, - 0.0, - 364.0, - 88.0, - 0.0, - 366.0, - 87.0, - 0.0, - 368.0, - 87.0, - 0.0, - 370.0, - 87.0, - 0.0, - 375.0, - 87.0, - 0.0, - 370.0, - 89.0, - 0.0, - 368.0, - 89.0, - 0.0, - 367.0, - 89.0, - 0.0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 435, - 172, - 2, - 441, - 178, - 2, - 442, - 160, - 2, - 444, - 170, - 2, - 444, - 173, - 2, - 444, - 178, - 2, - 447, - 161, - 2, - 448, - 170, - 2, - 448, - 174, - 2, - 448, - 180, - 2, - 453, - 161, - 2, - 453, - 170, - 2, - 452, - 176, - 2, - 453, - 181, - 2, - 459, - 163, - 2, - 459, - 171, - 2, - 458, - 176, - 2, - 456, - 182, - 2, - 306, - 188, - 2, - 302, - 181, - 2, - 298, - 179, - 2, - 293, - 183, - 2, - 290, - 188, - 2, - 291, - 174, - 2, - 285, - 180, - 2, - 285, - 185, - 2, - 290, - 189, - 2, - 283, - 177, - 2, - 282, - 185, - 2, - 285, - 190, - 2, - 290, - 194, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0 - ], - "image_id": 785, - "bbox": [ - 280.79, - 44.73, - 218.7, - 346.68 - ], - "category_id": 1, - "id": 442619 - }, - { - "num_keypoints": 14, - "area": 11025.219, - "iscrowd": 0, - "keypoints": [ - 99, - 144, - 2, - 104, - 141, - 2, - 96, - 137, - 2, - 0, - 0, - 0, - 78, - 133, - 2, - 56, - 161, - 2, - 81, - 162, - 2, - 0, - 0, - 0, - 103, - 208, - 2, - 116, - 204, - 2, - 0, - 0, - 0, - 57, - 246, - 1, - 82, - 259, - 1, - 137, - 219, - 2, - 138, - 247, - 2, - 177, - 256, - 2, - 158, - 296, - 1, - 106, - 120, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 207, - 256, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 184, - 272, - 2, - 0, - 0, - 0, - 82, - 130, - 2, - 80, - 134, - 2, - 80, - 139, - 2, - 80, - 143, - 2, - 81, - 147, - 2, - 82, - 151, - 2, - 85, - 154, - 2, - 88, - 156, - 2, - 92, - 158, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 94, - 131, - 2, - 97, - 131, - 2, - 98, - 131, - 2, - 100, - 133, - 2, - 101, - 134, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 101, - 139, - 2, - 100, - 142, - 2, - 99, - 144, - 2, - 98, - 146, - 2, - 96, - 147, - 2, - 97, - 147, - 2, - 98, - 148, - 2, - 99, - 148, - 2, - 99, - 148, - 2, - 93, - 137, - 2, - 95, - 136, - 2, - 97, - 136, - 2, - 97, - 138, - 2, - 96, - 138, - 2, - 95, - 138, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 89, - 149, - 2, - 92, - 149, - 2, - 95, - 150, - 2, - 96, - 150, - 2, - 97, - 151, - 2, - 97, - 152, - 2, - 97, - 153, - 2, - 96, - 153, - 2, - 96, - 153, - 2, - 94, - 153, - 2, - 93, - 153, - 2, - 91, - 151, - 2, - 90, - 149, - 2, - 95, - 150, - 2, - 96, - 151, - 2, - 97, - 151, - 2, - 97, - 153, - 2, - 96, - 153, - 2, - 94, - 153, - 2, - 94, - 152, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 118, - 198, - 2, - 113, - 197, - 2, - 109, - 197, - 2, - 0, - 0, - 0, - 118, - 202, - 2, - 111, - 201, - 2, - 106, - 201, - 2, - 0, - 0, - 0, - 117, - 206, - 2, - 111, - 205, - 2, - 108, - 205, - 2, - 104, - 203, - 2, - 116, - 209, - 2, - 110, - 209, - 2, - 107, - 208, - 2, - 104, - 206, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0 - ], - "image_id": 40083, - "bbox": [ - 38.08, - 110.95, - 174.71, - 174.71 - ], - "category_id": 1, - "id": 198196 - }, - { - "num_keypoints": 15, - "area": 10171.9544, - "iscrowd": 0, - "keypoints": [ - 343, - 164, - 2, - 348, - 160, - 2, - 340, - 160, - 2, - 359, - 163, - 2, - 332, - 164, - 2, - 370, - 189, - 2, - 334, - 190, - 2, - 358, - 236, - 2, - 348, - 234, - 2, - 339, - 270, - 2, - 330, - 262, - 2, - 378, - 262, - 2, - 343, - 254, - 2, - 338, - 280, - 2, - 283, - 272, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 343, - 143, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 332.3577977797564, - 162.3496914134306, - 2.0, - 332.8988608117197, - 165.6187214570887, - 2.0, - 333.63467933804384, - 168.74800139782477, - 2.0, - 334.44826124602673, - 171.62306650199142, - 2.0, - 335.45694729674096, - 174.28548183067173, - 2.0, - 336.96602223714194, - 176.06009946336934, - 2.0, - 339.1693873087565, - 177.1661381740196, - 2.0, - 342.3967300714231, - 178.20855305989585, - 2.0, - 346.46408769196154, - 178.59725353764554, - 2.0, - 349.8700014600567, - 177.54131727031634, - 2.0, - 352.5932256960401, - 176.49227677887563, - 2.0, - 354.83135782877605, - 175.44453310499006, - 2.0, - 356.3679296755323, - 173.84137070599724, - 2.0, - 357.0065454221239, - 171.40940037147672, - 2.0, - 357.534409347235, - 168.54578019684436, - 2.0, - 357.7505070106656, - 165.60219732546338, - 2.0, - 357.9972831576478, - 162.53520322313494, - 2.0, - 334.98978292427813, - 157.50515154670268, - 2.0, - 336.268189015108, - 155.9984682569317, - 2.0, - 338.20047804888554, - 155.20954518037684, - 2.0, - 339.8509974460976, - 155.23421301748238, - 2.0, - 341.352836967917, - 155.51378012264476, - 2.0, - 347.8451109044692, - 155.22197044222963, - 2.0, - 349.3337133669386, - 154.8293061798694, - 2.0, - 351.12965129777496, - 154.6547285491345, - 2.0, - 353.1635732613358, - 155.35309825224036, - 2.0, - 354.5697377522786, - 156.92000379375384, - 2.0, - 344.713427734375, - 159.3260030409869, - 2.0, - 344.74998306573605, - 161.3128111596201, - 2.0, - 344.9170358096852, - 163.04858473235487, - 2.0, - 344.9786475088082, - 164.92118542241116, - 2.0, - 342.8344047097599, - 167.29107576258042, - 2.0, - 343.73243414186965, - 167.34131457758886, - 2.0, - 345.013671875, - 167.60332833084405, - 2.0, - 345.8795548981311, - 167.26825794893153, - 2.0, - 346.9039867326325, - 167.04604671702666, - 2.0, - 337.4534390917011, - 160.08626361921722, - 2.0, - 338.55446807262945, - 159.17182970233992, - 2.0, - 340.002108854406, - 159.25801017611636, - 2.0, - 341.49895665785846, - 160.03499301087624, - 2.0, - 340.23350459080115, - 160.5913200228822, - 2.0, - 338.5602124083276, - 160.56629581825405, - 2.0, - 347.86048488242955, - 159.88770386938955, - 2.0, - 349.3879867254519, - 159.04122164857154, - 2.0, - 350.88049507889093, - 158.927533976237, - 2.0, - 352.11961969113815, - 159.93540822945388, - 2.0, - 350.849705954159, - 160.3235902374866, - 2.0, - 349.1870314654182, - 160.32544540704464, - 2.0, - 340.80742998310166, - 172.02484322342218, - 2.0, - 342.28591649672563, - 170.90962129480698, - 2.0, - 344.0934833302217, - 170.10430531221277, - 2.0, - 345.1530334472656, - 170.32844890519684, - 2.0, - 345.8770950616575, - 170.0848247453278, - 2.0, - 347.8689553653493, - 170.66106716978783, - 2.0, - 349.58350770239736, - 171.62832581763175, - 2.0, - 348.09330994849114, - 172.68533762015548, - 2.0, - 346.88256608551626, - 173.17178057502298, - 2.0, - 345.6372661515778, - 173.27078642003676, - 2.0, - 343.9210619159773, - 173.28780972349878, - 2.0, - 342.63790340049593, - 172.8480547736673, - 2.0, - 341.26428671444165, - 171.89147685929842, - 2.0, - 343.8292683320887, - 171.36270207423792, - 2.0, - 345.2252255308862, - 171.2339672013825, - 2.0, - 346.42121037501914, - 171.26879086961932, - 2.0, - 349.3406477385876, - 171.65391995299098, - 2.0, - 346.50171341241577, - 171.7467015883502, - 2.0, - 345.33072832892924, - 171.8389222986558, - 2.0, - 343.8844602697036, - 171.8535089231005, - 2.0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0 - ], - "image_id": 40083, - "bbox": [ - 257.76, - 139.06, - 140.05, - 154.21 - ], - "category_id": 1, - "id": 230195 - }, - { - "num_keypoints": 0, - "area": 491.2669, - "iscrowd": 0, - "keypoints": [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0 - ], - "image_id": 40083, - "bbox": [ - 275.17, - 126.5, - 10.69, - 68.26 - ], - "category_id": 1, - "id": 1202706 - }, - { - "num_keypoints": 15, - "area": 17123.92955, - "iscrowd": 0, - "keypoints": [ - 297, - 111, - 2, - 299, - 106, - 2, - 0, - 0, - 0, - 314, - 108, - 2, - 0, - 0, - 0, - 329, - 141, - 2, - 346, - 125, - 2, - 295, - 164, - 2, - 323, - 130, - 2, - 266, - 155, - 2, - 279, - 143, - 2, - 329, - 225, - 2, - 331, - 221, - 2, - 327, - 298, - 2, - 283, - 269, - 2, - 398, - 327, - 2, - 288, - 349, - 2, - 309, - 78, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 399, - 363, - 2, - 261, - 361, - 2, - 402, - 360, - 2, - 254, - 359, - 2, - 408, - 327, - 2, - 296, - 358, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 308, - 121, - 2, - 310, - 119, - 2, - 311, - 117, - 2, - 312, - 115, - 2, - 313, - 112, - 2, - 313, - 110, - 2, - 314, - 108, - 2, - 313, - 105, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 298, - 101, - 2, - 300, - 101, - 2, - 302, - 101, - 2, - 303, - 101, - 2, - 305, - 103, - 2, - 297, - 104, - 2, - 297, - 106, - 2, - 296, - 109, - 2, - 296, - 111, - 2, - 299, - 111, - 2, - 300, - 111, - 2, - 298, - 112, - 2, - 299, - 112, - 2, - 300, - 112, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 298, - 104, - 2, - 300, - 103, - 2, - 302, - 104, - 2, - 304, - 106, - 2, - 302, - 106, - 2, - 300, - 106, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 264, - 156, - 2, - 263, - 147, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 253, - 149, - 2, - 248, - 147, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 253, - 154, - 2, - 245, - 155, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 251, - 157, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 252, - 160, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 276, - 146, - 2, - 270, - 150, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 268, - 139, - 2, - 262, - 145, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0 - ], - "image_id": 196141, - "bbox": [ - 247.76, - 74.23, - 169.67, - 300.78 - ], - "category_id": 1, - "id": 460541 - }, - { - "num_keypoints": 15, - "area": 2789.0208, - "iscrowd": 0, - "keypoints": [ - 589, - 113, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 595, - 112, - 1, - 584, - 110, - 2, - 598, - 123, - 2, - 579, - 119, - 2, - 594, - 141, - 2, - 570, - 137, - 2, - 576, - 135, - 2, - 585, - 139, - 2, - 590, - 157, - 2, - 574, - 156, - 2, - 589, - 192, - 2, - 565, - 189, - 1, - 587, - 222, - 1, - 557, - 219, - 1, - 589, - 102, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 584, - 111, - 2, - 584, - 112, - 2, - 584, - 113, - 2, - 584, - 115, - 2, - 585, - 116, - 2, - 586, - 117, - 2, - 587, - 118, - 2, - 588, - 119, - 2, - 589, - 119, - 2, - 591, - 119, - 2, - 592, - 118, - 2, - 593, - 117, - 2, - 593, - 116, - 2, - 594, - 115, - 2, - 594, - 113, - 2, - 594, - 112, - 2, - 594, - 110, - 2, - 584, - 108, - 2, - 585, - 108, - 2, - 587, - 108, - 2, - 588, - 108, - 2, - 589, - 108, - 2, - 591, - 109, - 2, - 592, - 108, - 2, - 593, - 108, - 2, - 593, - 109, - 2, - 594, - 109, - 2, - 589, - 110, - 2, - 589, - 111, - 2, - 589, - 112, - 2, - 589, - 112, - 2, - 588, - 113, - 2, - 589, - 113, - 2, - 589, - 113, - 2, - 590, - 113, - 2, - 590, - 113, - 2, - 585, - 110, - 2, - 586, - 109, - 2, - 587, - 109, - 2, - 588, - 110, - 2, - 587, - 110, - 2, - 586, - 110, - 2, - 590, - 110, - 2, - 591, - 109, - 2, - 592, - 109, - 2, - 594, - 110, - 2, - 592, - 110, - 2, - 591, - 110, - 2, - 587, - 115, - 2, - 588, - 115, - 2, - 589, - 114, - 2, - 589, - 114, - 2, - 590, - 114, - 2, - 591, - 115, - 2, - 591, - 115, - 2, - 591, - 115, - 2, - 590, - 116, - 2, - 589, - 116, - 2, - 589, - 116, - 2, - 588, - 116, - 2, - 587, - 115, - 2, - 589, - 115, - 2, - 589, - 115, - 2, - 590, - 115, - 2, - 591, - 115, - 2, - 590, - 116, - 2, - 589, - 116, - 2, - 589, - 116, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0 - ], - "image_id": 196141, - "bbox": [ - 555.57, - 99.84, - 48.32, - 113.05 - ], - "category_id": 1, - "id": 488308 - }, - { - "num_keypoints": 0, - "area": 285.7906, - "iscrowd": 0, - "keypoints": [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0 - ], - "image_id": 196141, - "bbox": [ - 440.85, - 73.13, - 16.79, - 32.45 - ], - "category_id": 1, - "id": 508900 - }, - { - "num_keypoints": 12, - "area": 21608.94075, - "iscrowd": 0, - "keypoints": [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 552, - 234, - 2, - 0, - 0, - 0, - 531, - 262, - 2, - 600, - 283, - 2, - 480, - 260, - 2, - 622, - 336, - 2, - 466, - 242, - 2, - 0, - 0, - 0, - 546, - 365, - 2, - 592, - 371, - 2, - 470, - 351, - 2, - 551, - 330, - 2, - 519, - 394, - 2, - 589, - 391, - 2, - 575, - 211, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 498, - 408, - 2, - 0, - 0, - 0, - 534, - 395, - 2, - 587, - 401, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0 - ], - "image_id": 196141, - "bbox": [ - 453.77, - 206.81, - 177.23, - 210.87 - ], - "category_id": 1, - "id": 1717641 - }, - { - "num_keypoints": 17, - "area": 1870.14015, - "iscrowd": 0, - "keypoints": [ - 48, - 79, - 2, - 50, - 77, - 2, - 46, - 77, - 2, - 54, - 78, - 2, - 45, - 78, - 2, - 57, - 90, - 2, - 42, - 90, - 2, - 63, - 103, - 2, - 42, - 105, - 2, - 56, - 113, - 2, - 49, - 112, - 2, - 55, - 117, - 2, - 44, - 117, - 2, - 55, - 140, - 2, - 47, - 140, - 2, - 56, - 160, - 2, - 49, - 159, - 2, - 47, - 71, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0 - ], - "image_id": 196141, - "bbox": [ - 36.12, - 67.59, - 30.41, - 96.08 - ], - "category_id": 1, - "id": 1724673 - }, - { - "num_keypoints": 16, - "area": 14250.29385, - "iscrowd": 0, - "keypoints": [ - 334, - 135, - 2, - 340, - 129, - 2, - 331, - 129, - 2, - 0, - 0, - 0, - 319, - 123, - 2, - 340, - 146, - 2, - 292, - 133, - 2, - 353, - 164, - 2, - 246, - 144, - 2, - 354, - 197, - 2, - 250, - 185, - 2, - 293, - 197, - 2, - 265, - 187, - 2, - 305, - 252, - 2, - 231, - 254, - 2, - 293, - 321, - 2, - 193, - 297, - 2, - 333, - 109, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 299, - 332, - 2, - 185, - 309, - 2, - 310, - 333, - 2, - 176, - 303, - 2, - 287, - 328, - 2, - 198, - 303, - 2, - 321, - 127, - 2, - 321, - 130, - 2, - 321, - 133, - 2, - 321, - 136, - 2, - 322, - 138, - 2, - 324, - 140, - 2, - 326, - 142, - 2, - 329, - 143, - 2, - 332, - 143, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 328, - 125, - 2, - 330, - 125, - 2, - 331, - 126, - 2, - 333, - 126, - 2, - 335, - 127, - 2, - 339, - 128, - 2, - 340, - 127, - 2, - 342, - 126, - 2, - 343, - 126, - 2, - 345, - 125, - 2, - 336, - 130, - 2, - 336, - 132, - 2, - 337, - 134, - 2, - 338, - 136, - 2, - 334, - 138, - 2, - 335, - 138, - 2, - 337, - 138, - 2, - 338, - 137, - 2, - 339, - 138, - 2, - 329, - 127, - 2, - 331, - 127, - 2, - 333, - 128, - 2, - 334, - 129, - 2, - 332, - 130, - 2, - 331, - 129, - 2, - 339, - 129, - 2, - 341, - 127, - 2, - 342, - 127, - 2, - 344, - 127, - 2, - 342, - 129, - 2, - 341, - 129, - 2, - 329, - 139, - 2, - 331, - 139, - 2, - 333, - 139, - 2, - 334, - 139, - 2, - 334, - 139, - 2, - 335, - 139, - 2, - 336, - 139, - 2, - 335, - 140, - 2, - 334, - 141, - 2, - 333, - 141, - 2, - 333, - 141, - 2, - 331, - 141, - 2, - 330, - 139, - 2, - 333, - 139, - 2, - 334, - 140, - 2, - 334, - 139, - 2, - 336, - 140, - 2, - 334, - 140, - 2, - 334, - 141, - 2, - 331, - 141, - 2, - 0, - 0, - 0, - 349, - 202, - 2, - 345, - 203, - 2, - 342, - 207, - 2, - 338, - 212, - 2, - 349, - 214, - 2, - 341, - 219, - 2, - 336, - 219, - 2, - 333, - 218, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 250, - 187, - 2, - 255, - 188, - 2, - 260, - 189, - 2, - 264, - 194, - 2, - 268, - 201, - 2, - 254, - 193, - 2, - 256, - 201, - 2, - 260, - 205, - 2, - 0, - 0, - 0, - 252, - 193, - 2, - 252, - 201, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 247, - 193, - 2, - 248, - 200, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0 - ], - "image_id": 197388, - "bbox": [ - 139.41, - 102.25, - 222.39, - 241.57 - ], - "category_id": 1, - "id": 437295 - }, - { - "num_keypoints": 16, - "area": 3404.869, - "iscrowd": 0, - "keypoints": [ - 345, - 92, - 2, - 350, - 87, - 2, - 341, - 87, - 2, - 0, - 0, - 0, - 330, - 83, - 2, - 357, - 94, - 2, - 316, - 92, - 2, - 357, - 104, - 2, - 291, - 123, - 1, - 351, - 133, - 2, - 281, - 136, - 1, - 326, - 131, - 1, - 305, - 128, - 1, - 336, - 152, - 1, - 303, - 171, - 1, - 318, - 206, - 2, - 294, - 211, - 1, - 344, - 70, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 320, - 214, - 2, - 0, - 0, - 0, - 328, - 213, - 2, - 0, - 0, - 0, - 313, - 210, - 2, - 0, - 0, - 0, - 333, - 85, - 2, - 333, - 87, - 2, - 333, - 89, - 2, - 334, - 92, - 2, - 335, - 95, - 2, - 337, - 97, - 2, - 338, - 98, - 2, - 341, - 99, - 2, - 343, - 100, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 337, - 86, - 2, - 339, - 85, - 2, - 341, - 85, - 2, - 342, - 86, - 2, - 344, - 87, - 2, - 348, - 87, - 2, - 349, - 86, - 2, - 350, - 85, - 2, - 351, - 85, - 2, - 353, - 84, - 2, - 345, - 88, - 2, - 345, - 90, - 2, - 345, - 92, - 2, - 345, - 94, - 2, - 342, - 94, - 2, - 343, - 94, - 2, - 345, - 95, - 2, - 346, - 94, - 2, - 347, - 94, - 2, - 337, - 87, - 2, - 339, - 86, - 2, - 341, - 86, - 2, - 343, - 88, - 2, - 341, - 88, - 2, - 340, - 88, - 2, - 348, - 88, - 2, - 349, - 86, - 2, - 351, - 86, - 2, - 353, - 86, - 2, - 351, - 87, - 2, - 350, - 88, - 2, - 340, - 97, - 2, - 341, - 96, - 2, - 343, - 96, - 2, - 344, - 96, - 2, - 345, - 96, - 2, - 346, - 96, - 2, - 346, - 97, - 2, - 346, - 98, - 2, - 345, - 98, - 2, - 344, - 98, - 2, - 343, - 98, - 2, - 341, - 98, - 2, - 341, - 97, - 2, - 343, - 96, - 2, - 344, - 96, - 2, - 345, - 96, - 2, - 346, - 97, - 2, - 345, - 98, - 2, - 344, - 98, - 2, - 343, - 98, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0 - ], - "image_id": 197388, - "bbox": [ - 287.17, - 61.52, - 74.88, - 165.61 - ], - "category_id": 1, - "id": 467657 - }, - { - "num_keypoints": 15, - "area": 8913.98475, - "iscrowd": 0, - "keypoints": [ - 591, - 78, - 2, - 594, - 74, - 2, - 586, - 74, - 2, - 0, - 0, - 0, - 573, - 70, - 2, - 598, - 86, - 2, - 566, - 93, - 2, - 626, - 105, - 2, - 546, - 126, - 2, - 0, - 0, - 0, - 561, - 150, - 2, - 582, - 150, - 2, - 557, - 154, - 2, - 606, - 194, - 2, - 558, - 209, - 1, - 591, - 252, - 2, - 539, - 262, - 1, - 587, - 57, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 600, - 262, - 2, - 0, - 0, - 0, - 604, - 261, - 2, - 0, - 0, - 0, - 586, - 262, - 2, - 0, - 0, - 0, - 576.0, - 73.0, - 2.0, - 577.0, - 76.0, - 2.0, - 577.0, - 78.0, - 2.0, - 577.0, - 81.0, - 2.0, - 579.0, - 83.0, - 2.0, - 580.0, - 85.0, - 2.0, - 583.0, - 86.0, - 2.0, - 585.0, - 87.0, - 2.0, - 588.0, - 88.0, - 2.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 590.0, - 76.0, - 0.0, - 590.0, - 77.0, - 0.0, - 591.0, - 79.0, - 0.0, - 591.0, - 80.0, - 0.0, - 587.0, - 81.0, - 0.0, - 589.0, - 81.0, - 0.0, - 591.0, - 81.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 586.8761575736252, - 83.61172634947533, - 2.0, - 588.9412473790786, - 83.41106519512101, - 2.0, - 590.7724136651731, - 82.86258592792586, - 2.0, - 591.6996507831648, - 82.73443932626762, - 2.0, - 592.2456105550131, - 82.31442081227021, - 2.0, - 593.6493129356235, - 81.90362788181679, - 2.0, - 594.2114473230698, - 81.26071885052849, - 2.0, - 594.1276526357614, - 83.53407437193627, - 2.0, - 593.6044897939645, - 84.44948682598039, - 2.0, - 592.6541667265051, - 84.92630393832337, - 2.0, - 590.9756801829618, - 85.08662594065947, - 2.0, - 589.348352170458, - 84.76877788468903, - 2.0, - 587.2321394378064, - 83.56702886843215, - 2.0, - 590.3445832495596, - 83.57368678672641, - 2.0, - 591.8126301484949, - 83.20736933689491, - 2.0, - 592.7565172980813, - 82.68511125153186, - 2.0, - 594.1612270579618, - 81.3825154024012, - 2.0, - 593.0988272872626, - 83.2510259291705, - 2.0, - 592.1117610557407, - 83.63720194498697, - 2.0, - 590.626023236443, - 84.00301465801164, - 2.0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 559, - 151, - 2, - 565, - 151, - 2, - 569, - 153, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 568, - 156, - 2, - 570, - 162, - 2, - 571, - 166, - 2, - 571, - 169, - 2, - 565, - 157, - 2, - 565, - 162, - 2, - 566, - 164, - 2, - 566, - 166, - 2, - 561, - 158, - 2, - 562, - 161, - 2, - 563, - 163, - 2, - 563, - 165, - 2, - 558, - 159, - 2, - 559, - 162, - 2, - 560, - 163, - 2, - 560, - 164, - 2 - ], - "image_id": 197388, - "bbox": [ - 540.04, - 48.81, - 99.96, - 223.36 - ], - "category_id": 1, - "id": 531914 - }, - { - "num_keypoints": 16, - "area": 14267.20475, - "iscrowd": 0, - "keypoints": [ - 580, - 211, - 2, - 586, - 206, - 2, - 574, - 204, - 2, - 0, - 0, - 0, - 562, - 198, - 2, - 584, - 220, - 2, - 529, - 215, - 2, - 599, - 242, - 2, - 512, - 260, - 2, - 619, - 274, - 2, - 538, - 285, - 2, - 537, - 288, - 2, - 506, - 277, - 2, - 562, - 332, - 2, - 452, - 332, - 2, - 550, - 387, - 1, - 402, - 371, - 2, - 582, - 184, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 387, - 389, - 2, - 0, - 0, - 0, - 374, - 383, - 2, - 0, - 0, - 0, - 390, - 365, - 2, - 559, - 197, - 2, - 559, - 202, - 2, - 559, - 205, - 2, - 560, - 209, - 2, - 561, - 213, - 2, - 564, - 217, - 2, - 567, - 220, - 2, - 570, - 223, - 2, - 573, - 225, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 573, - 201, - 2, - 575, - 202, - 2, - 577, - 203, - 2, - 579, - 204, - 2, - 580, - 206, - 2, - 584, - 207, - 2, - 585, - 206, - 2, - 587, - 205, - 2, - 589, - 205, - 2, - 590, - 205, - 2, - 582, - 207, - 2, - 582, - 209, - 2, - 581, - 212, - 2, - 581, - 215, - 2, - 577, - 214, - 2, - 578, - 214, - 2, - 580, - 216, - 2, - 581, - 216, - 2, - 582, - 216, - 2, - 573, - 204, - 2, - 576, - 204, - 2, - 578, - 205, - 2, - 580, - 207, - 2, - 578, - 207, - 2, - 575, - 206, - 2, - 584, - 208, - 2, - 586, - 207, - 2, - 588, - 206, - 2, - 590, - 207, - 2, - 588, - 208, - 2, - 586, - 209, - 2, - 571, - 217, - 2, - 574, - 217, - 2, - 576, - 217, - 2, - 577, - 217, - 2, - 577, - 217, - 2, - 578, - 217, - 2, - 579, - 218, - 2, - 578, - 219, - 2, - 577, - 219, - 2, - 576, - 220, - 2, - 575, - 219, - 2, - 573, - 218, - 2, - 572, - 217, - 2, - 576, - 217, - 2, - 576, - 218, - 2, - 577, - 218, - 2, - 579, - 218, - 2, - 577, - 219, - 2, - 576, - 219, - 2, - 575, - 219, - 2, - 622, - 274, - 2, - 620, - 281, - 2, - 620, - 287, - 2, - 623, - 292, - 2, - 627, - 297, - 2, - 628, - 284, - 2, - 635, - 290, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 628, - 281, - 2, - 631, - 285, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 628, - 278, - 2, - 632, - 283, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 628, - 277, - 2, - 631, - 279, - 2, - 633, - 282, - 2, - 0, - 0, - 0, - 542, - 286, - 2, - 551, - 285, - 2, - 557, - 289, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 557, - 293, - 2, - 559, - 301, - 2, - 559, - 306, - 2, - 558, - 312, - 2, - 551, - 293, - 2, - 552, - 302, - 2, - 552, - 307, - 2, - 0, - 0, - 0, - 546, - 296, - 2, - 548, - 302, - 2, - 549, - 307, - 2, - 0, - 0, - 0, - 543, - 298, - 2, - 544, - 303, - 2, - 545, - 307, - 2, - 0, - 0, - 0 - ], - "image_id": 197388, - "bbox": [ - 372.58, - 170.84, - 266.63, - 217.19 - ], - "category_id": 1, - "id": 533949 - }, - { - "num_keypoints": 13, - "area": 8260.75085, - "iscrowd": 0, - "keypoints": [ - 36, - 79, - 2, - 40, - 74, - 2, - 31, - 75, - 2, - 0, - 0, - 0, - 19, - 69, - 2, - 45, - 77, - 2, - 2, - 89, - 2, - 74, - 99, - 2, - 0, - 0, - 0, - 78, - 92, - 2, - 0, - 0, - 0, - 33, - 149, - 2, - 7, - 153, - 2, - 44, - 196, - 2, - 2, - 205, - 2, - 35, - 245, - 2, - 0, - 0, - 0, - 33, - 54, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 41, - 255, - 2, - 0, - 0, - 0, - 48, - 255, - 2, - 0, - 0, - 0, - 29, - 253, - 2, - 0, - 0, - 0, - 22, - 70, - 2, - 22, - 73, - 2, - 23, - 76, - 2, - 24, - 78, - 2, - 25, - 80, - 2, - 27, - 82, - 2, - 29, - 84, - 2, - 31, - 85, - 2, - 34, - 85, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 27, - 72, - 2, - 29, - 72, - 2, - 31, - 72, - 2, - 33, - 72, - 2, - 34, - 73, - 2, - 38, - 73, - 2, - 40, - 72, - 2, - 41, - 71, - 2, - 42, - 71, - 2, - 43, - 70, - 2, - 37, - 75, - 2, - 37, - 77, - 2, - 37, - 78, - 2, - 37, - 80, - 2, - 35, - 80, - 2, - 37, - 80, - 2, - 37, - 80, - 2, - 38, - 80, - 2, - 39, - 80, - 2, - 28, - 74, - 2, - 31, - 73, - 2, - 33, - 74, - 2, - 34, - 75, - 2, - 32, - 76, - 2, - 31, - 75, - 2, - 39, - 75, - 2, - 40, - 73, - 2, - 41, - 73, - 2, - 43, - 72, - 2, - 42, - 74, - 2, - 40, - 75, - 2, - 30, - 83, - 2, - 33, - 82, - 2, - 35, - 82, - 2, - 36, - 82, - 2, - 37, - 82, - 2, - 37, - 82, - 2, - 38, - 82, - 2, - 37, - 83, - 2, - 37, - 84, - 2, - 36, - 84, - 2, - 35, - 84, - 2, - 33, - 84, - 2, - 31, - 83, - 2, - 35, - 83, - 2, - 36, - 83, - 2, - 37, - 83, - 2, - 38, - 82, - 2, - 37, - 83, - 2, - 36, - 84, - 2, - 35, - 84, - 2, - 0, - 0, - 0, - 76, - 89, - 2, - 74, - 86, - 2, - 75, - 81, - 2, - 77, - 76, - 2, - 86, - 80, - 2, - 82, - 84, - 2, - 78, - 85, - 2, - 76, - 86, - 2, - 86, - 83, - 2, - 82, - 86, - 2, - 79, - 87, - 2, - 76, - 87, - 2, - 87, - 84, - 2, - 84, - 88, - 2, - 80, - 88, - 2, - 78, - 89, - 2, - 88, - 87, - 2, - 85, - 89, - 2, - 82, - 90, - 2, - 79, - 91, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0 - ], - "image_id": 197388, - "bbox": [ - 0.5, - 43.74, - 90.1, - 220.09 - ], - "category_id": 1, - "id": 543117 - } - ] +{ + "categories": [ + { + "supercategory": "person", + "id": 1, + "name": "person", + "keypoints": [], + "skeleton": [] + } + ], + "images": [ + { + "license": 4, + "file_name": "000000000785.jpg", + "coco_url": "http://images.cocodataset.org/val2017/000000000785.jpg", + "height": 425, + "width": 640, + "date_captured": "2013-11-19 21:22:42", + "flickr_url": "http://farm8.staticflickr.com/7015/6795644157_f019453ae7_z.jpg", + "id": 785 + }, + { + "license": 3, + "file_name": "000000040083.jpg", + "coco_url": "http://images.cocodataset.org/val2017/000000040083.jpg", + "height": 333, + "width": 500, + "date_captured": "2013-11-18 03:30:24", + "flickr_url": "http://farm1.staticflickr.com/116/254881838_e21c6d17b8_z.jpg", + "id": 40083 + }, + { + "license": 1, + "file_name": "000000196141.jpg", + "coco_url": "http://images.cocodataset.org/val2017/000000196141.jpg", + "height": 429, + "width": 640, + "date_captured": "2013-11-22 22:37:15", + "flickr_url": "http://farm4.staticflickr.com/3310/3611902235_57d4ae496d_z.jpg", + "id": 196141 + }, + { + "license": 3, + "file_name": "000000197388.jpg", + "coco_url": "http://images.cocodataset.org/val2017/000000197388.jpg", + "height": 392, + "width": 640, + "date_captured": "2013-11-19 20:10:37", + "flickr_url": "http://farm9.staticflickr.com/8375/8507321836_5b8b13188f_z.jpg", + "id": 197388 + } + ], + "annotations": [ + { + "num_keypoints": 17, + "area": 27789.11055, + "iscrowd": 0, + "keypoints": [ + 367, + 81, + 2, + 374, + 73, + 2, + 360, + 75, + 2, + 386, + 78, + 2, + 356, + 81, + 2, + 399, + 108, + 2, + 358, + 129, + 2, + 433, + 142, + 2, + 341, + 159, + 2, + 449, + 165, + 2, + 309, + 178, + 2, + 424, + 203, + 2, + 393, + 214, + 2, + 429, + 294, + 2, + 367, + 273, + 2, + 466, + 362, + 2, + 396, + 341, + 2, + 370, + 52, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 431, + 378, + 2, + 364, + 366, + 2, + 437, + 383, + 2, + 358, + 361, + 2, + 488, + 372, + 2, + 414, + 353, + 2, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 382.0, + 88.0, + 0.0, + 384.0, + 85.0, + 0.0, + 386.0, + 80.0, + 0.0, + 386.0, + 76.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 367.0, + 76.0, + 0.0, + 367.0, + 79.0, + 0.0, + 367.0, + 81.0, + 0.0, + 364.0, + 83.0, + 0.0, + 366.0, + 83.0, + 0.0, + 367.0, + 84.0, + 0.0, + 369.0, + 83.0, + 0.0, + 371.0, + 83.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 363.0, + 88.0, + 0.0, + 364.0, + 86.0, + 0.0, + 366.0, + 86.0, + 0.0, + 368.0, + 86.0, + 0.0, + 369.0, + 86.0, + 0.0, + 372.0, + 86.0, + 0.0, + 376.0, + 86.0, + 0.0, + 373.0, + 89.0, + 0.0, + 371.0, + 90.0, + 0.0, + 368.0, + 90.0, + 0.0, + 366.0, + 90.0, + 0.0, + 364.0, + 89.0, + 0.0, + 364.0, + 88.0, + 0.0, + 366.0, + 87.0, + 0.0, + 368.0, + 87.0, + 0.0, + 370.0, + 87.0, + 0.0, + 375.0, + 87.0, + 0.0, + 370.0, + 89.0, + 0.0, + 368.0, + 89.0, + 0.0, + 367.0, + 89.0, + 0.0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 435, + 172, + 2, + 441, + 178, + 2, + 442, + 160, + 2, + 444, + 170, + 2, + 444, + 173, + 2, + 444, + 178, + 2, + 447, + 161, + 2, + 448, + 170, + 2, + 448, + 174, + 2, + 448, + 180, + 2, + 453, + 161, + 2, + 453, + 170, + 2, + 452, + 176, + 2, + 453, + 181, + 2, + 459, + 163, + 2, + 459, + 171, + 2, + 458, + 176, + 2, + 456, + 182, + 2, + 306, + 188, + 2, + 302, + 181, + 2, + 298, + 179, + 2, + 293, + 183, + 2, + 290, + 188, + 2, + 291, + 174, + 2, + 285, + 180, + 2, + 285, + 185, + 2, + 290, + 189, + 2, + 283, + 177, + 2, + 282, + 185, + 2, + 285, + 190, + 2, + 290, + 194, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "image_id": 785, + "bbox": [ + 280.79, + 44.73, + 218.7, + 346.68 + ], + "category_id": 1, + "id": 442619 + }, + { + "num_keypoints": 14, + "area": 11025.219, + "iscrowd": 0, + "keypoints": [ + 99, + 144, + 2, + 104, + 141, + 2, + 96, + 137, + 2, + 0, + 0, + 0, + 78, + 133, + 2, + 56, + 161, + 2, + 81, + 162, + 2, + 0, + 0, + 0, + 103, + 208, + 2, + 116, + 204, + 2, + 0, + 0, + 0, + 57, + 246, + 1, + 82, + 259, + 1, + 137, + 219, + 2, + 138, + 247, + 2, + 177, + 256, + 2, + 158, + 296, + 1, + 106, + 120, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 207, + 256, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 184, + 272, + 2, + 0, + 0, + 0, + 82, + 130, + 2, + 80, + 134, + 2, + 80, + 139, + 2, + 80, + 143, + 2, + 81, + 147, + 2, + 82, + 151, + 2, + 85, + 154, + 2, + 88, + 156, + 2, + 92, + 158, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 94, + 131, + 2, + 97, + 131, + 2, + 98, + 131, + 2, + 100, + 133, + 2, + 101, + 134, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 101, + 139, + 2, + 100, + 142, + 2, + 99, + 144, + 2, + 98, + 146, + 2, + 96, + 147, + 2, + 97, + 147, + 2, + 98, + 148, + 2, + 99, + 148, + 2, + 99, + 148, + 2, + 93, + 137, + 2, + 95, + 136, + 2, + 97, + 136, + 2, + 97, + 138, + 2, + 96, + 138, + 2, + 95, + 138, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 89, + 149, + 2, + 92, + 149, + 2, + 95, + 150, + 2, + 96, + 150, + 2, + 97, + 151, + 2, + 97, + 152, + 2, + 97, + 153, + 2, + 96, + 153, + 2, + 96, + 153, + 2, + 94, + 153, + 2, + 93, + 153, + 2, + 91, + 151, + 2, + 90, + 149, + 2, + 95, + 150, + 2, + 96, + 151, + 2, + 97, + 151, + 2, + 97, + 153, + 2, + 96, + 153, + 2, + 94, + 153, + 2, + 94, + 152, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 118, + 198, + 2, + 113, + 197, + 2, + 109, + 197, + 2, + 0, + 0, + 0, + 118, + 202, + 2, + 111, + 201, + 2, + 106, + 201, + 2, + 0, + 0, + 0, + 117, + 206, + 2, + 111, + 205, + 2, + 108, + 205, + 2, + 104, + 203, + 2, + 116, + 209, + 2, + 110, + 209, + 2, + 107, + 208, + 2, + 104, + 206, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "image_id": 40083, + "bbox": [ + 38.08, + 110.95, + 174.71, + 174.71 + ], + "category_id": 1, + "id": 198196 + }, + { + "num_keypoints": 15, + "area": 10171.9544, + "iscrowd": 0, + "keypoints": [ + 343, + 164, + 2, + 348, + 160, + 2, + 340, + 160, + 2, + 359, + 163, + 2, + 332, + 164, + 2, + 370, + 189, + 2, + 334, + 190, + 2, + 358, + 236, + 2, + 348, + 234, + 2, + 339, + 270, + 2, + 330, + 262, + 2, + 378, + 262, + 2, + 343, + 254, + 2, + 338, + 280, + 2, + 283, + 272, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 343, + 143, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 332.3577977797564, + 162.3496914134306, + 2.0, + 332.8988608117197, + 165.6187214570887, + 2.0, + 333.63467933804384, + 168.74800139782477, + 2.0, + 334.44826124602673, + 171.62306650199142, + 2.0, + 335.45694729674096, + 174.28548183067173, + 2.0, + 336.96602223714194, + 176.06009946336934, + 2.0, + 339.1693873087565, + 177.1661381740196, + 2.0, + 342.3967300714231, + 178.20855305989585, + 2.0, + 346.46408769196154, + 178.59725353764554, + 2.0, + 349.8700014600567, + 177.54131727031634, + 2.0, + 352.5932256960401, + 176.49227677887563, + 2.0, + 354.83135782877605, + 175.44453310499006, + 2.0, + 356.3679296755323, + 173.84137070599724, + 2.0, + 357.0065454221239, + 171.40940037147672, + 2.0, + 357.534409347235, + 168.54578019684436, + 2.0, + 357.7505070106656, + 165.60219732546338, + 2.0, + 357.9972831576478, + 162.53520322313494, + 2.0, + 334.98978292427813, + 157.50515154670268, + 2.0, + 336.268189015108, + 155.9984682569317, + 2.0, + 338.20047804888554, + 155.20954518037684, + 2.0, + 339.8509974460976, + 155.23421301748238, + 2.0, + 341.352836967917, + 155.51378012264476, + 2.0, + 347.8451109044692, + 155.22197044222963, + 2.0, + 349.3337133669386, + 154.8293061798694, + 2.0, + 351.12965129777496, + 154.6547285491345, + 2.0, + 353.1635732613358, + 155.35309825224036, + 2.0, + 354.5697377522786, + 156.92000379375384, + 2.0, + 344.713427734375, + 159.3260030409869, + 2.0, + 344.74998306573605, + 161.3128111596201, + 2.0, + 344.9170358096852, + 163.04858473235487, + 2.0, + 344.9786475088082, + 164.92118542241116, + 2.0, + 342.8344047097599, + 167.29107576258042, + 2.0, + 343.73243414186965, + 167.34131457758886, + 2.0, + 345.013671875, + 167.60332833084405, + 2.0, + 345.8795548981311, + 167.26825794893153, + 2.0, + 346.9039867326325, + 167.04604671702666, + 2.0, + 337.4534390917011, + 160.08626361921722, + 2.0, + 338.55446807262945, + 159.17182970233992, + 2.0, + 340.002108854406, + 159.25801017611636, + 2.0, + 341.49895665785846, + 160.03499301087624, + 2.0, + 340.23350459080115, + 160.5913200228822, + 2.0, + 338.5602124083276, + 160.56629581825405, + 2.0, + 347.86048488242955, + 159.88770386938955, + 2.0, + 349.3879867254519, + 159.04122164857154, + 2.0, + 350.88049507889093, + 158.927533976237, + 2.0, + 352.11961969113815, + 159.93540822945388, + 2.0, + 350.849705954159, + 160.3235902374866, + 2.0, + 349.1870314654182, + 160.32544540704464, + 2.0, + 340.80742998310166, + 172.02484322342218, + 2.0, + 342.28591649672563, + 170.90962129480698, + 2.0, + 344.0934833302217, + 170.10430531221277, + 2.0, + 345.1530334472656, + 170.32844890519684, + 2.0, + 345.8770950616575, + 170.0848247453278, + 2.0, + 347.8689553653493, + 170.66106716978783, + 2.0, + 349.58350770239736, + 171.62832581763175, + 2.0, + 348.09330994849114, + 172.68533762015548, + 2.0, + 346.88256608551626, + 173.17178057502298, + 2.0, + 345.6372661515778, + 173.27078642003676, + 2.0, + 343.9210619159773, + 173.28780972349878, + 2.0, + 342.63790340049593, + 172.8480547736673, + 2.0, + 341.26428671444165, + 171.89147685929842, + 2.0, + 343.8292683320887, + 171.36270207423792, + 2.0, + 345.2252255308862, + 171.2339672013825, + 2.0, + 346.42121037501914, + 171.26879086961932, + 2.0, + 349.3406477385876, + 171.65391995299098, + 2.0, + 346.50171341241577, + 171.7467015883502, + 2.0, + 345.33072832892924, + 171.8389222986558, + 2.0, + 343.8844602697036, + 171.8535089231005, + 2.0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "image_id": 40083, + "bbox": [ + 257.76, + 139.06, + 140.05, + 154.21 + ], + "category_id": 1, + "id": 230195 + }, + { + "num_keypoints": 0, + "area": 491.2669, + "iscrowd": 0, + "keypoints": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "image_id": 40083, + "bbox": [ + 275.17, + 126.5, + 10.69, + 68.26 + ], + "category_id": 1, + "id": 1202706 + }, + { + "num_keypoints": 15, + "area": 17123.92955, + "iscrowd": 0, + "keypoints": [ + 297, + 111, + 2, + 299, + 106, + 2, + 0, + 0, + 0, + 314, + 108, + 2, + 0, + 0, + 0, + 329, + 141, + 2, + 346, + 125, + 2, + 295, + 164, + 2, + 323, + 130, + 2, + 266, + 155, + 2, + 279, + 143, + 2, + 329, + 225, + 2, + 331, + 221, + 2, + 327, + 298, + 2, + 283, + 269, + 2, + 398, + 327, + 2, + 288, + 349, + 2, + 309, + 78, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 399, + 363, + 2, + 261, + 361, + 2, + 402, + 360, + 2, + 254, + 359, + 2, + 408, + 327, + 2, + 296, + 358, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 308, + 121, + 2, + 310, + 119, + 2, + 311, + 117, + 2, + 312, + 115, + 2, + 313, + 112, + 2, + 313, + 110, + 2, + 314, + 108, + 2, + 313, + 105, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 298, + 101, + 2, + 300, + 101, + 2, + 302, + 101, + 2, + 303, + 101, + 2, + 305, + 103, + 2, + 297, + 104, + 2, + 297, + 106, + 2, + 296, + 109, + 2, + 296, + 111, + 2, + 299, + 111, + 2, + 300, + 111, + 2, + 298, + 112, + 2, + 299, + 112, + 2, + 300, + 112, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 298, + 104, + 2, + 300, + 103, + 2, + 302, + 104, + 2, + 304, + 106, + 2, + 302, + 106, + 2, + 300, + 106, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 264, + 156, + 2, + 263, + 147, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 253, + 149, + 2, + 248, + 147, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 253, + 154, + 2, + 245, + 155, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 251, + 157, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 252, + 160, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 276, + 146, + 2, + 270, + 150, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 268, + 139, + 2, + 262, + 145, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "image_id": 196141, + "bbox": [ + 247.76, + 74.23, + 169.67, + 300.78 + ], + "category_id": 1, + "id": 460541 + }, + { + "num_keypoints": 15, + "area": 2789.0208, + "iscrowd": 0, + "keypoints": [ + 589, + 113, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 595, + 112, + 1, + 584, + 110, + 2, + 598, + 123, + 2, + 579, + 119, + 2, + 594, + 141, + 2, + 570, + 137, + 2, + 576, + 135, + 2, + 585, + 139, + 2, + 590, + 157, + 2, + 574, + 156, + 2, + 589, + 192, + 2, + 565, + 189, + 1, + 587, + 222, + 1, + 557, + 219, + 1, + 589, + 102, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 584, + 111, + 2, + 584, + 112, + 2, + 584, + 113, + 2, + 584, + 115, + 2, + 585, + 116, + 2, + 586, + 117, + 2, + 587, + 118, + 2, + 588, + 119, + 2, + 589, + 119, + 2, + 591, + 119, + 2, + 592, + 118, + 2, + 593, + 117, + 2, + 593, + 116, + 2, + 594, + 115, + 2, + 594, + 113, + 2, + 594, + 112, + 2, + 594, + 110, + 2, + 584, + 108, + 2, + 585, + 108, + 2, + 587, + 108, + 2, + 588, + 108, + 2, + 589, + 108, + 2, + 591, + 109, + 2, + 592, + 108, + 2, + 593, + 108, + 2, + 593, + 109, + 2, + 594, + 109, + 2, + 589, + 110, + 2, + 589, + 111, + 2, + 589, + 112, + 2, + 589, + 112, + 2, + 588, + 113, + 2, + 589, + 113, + 2, + 589, + 113, + 2, + 590, + 113, + 2, + 590, + 113, + 2, + 585, + 110, + 2, + 586, + 109, + 2, + 587, + 109, + 2, + 588, + 110, + 2, + 587, + 110, + 2, + 586, + 110, + 2, + 590, + 110, + 2, + 591, + 109, + 2, + 592, + 109, + 2, + 594, + 110, + 2, + 592, + 110, + 2, + 591, + 110, + 2, + 587, + 115, + 2, + 588, + 115, + 2, + 589, + 114, + 2, + 589, + 114, + 2, + 590, + 114, + 2, + 591, + 115, + 2, + 591, + 115, + 2, + 591, + 115, + 2, + 590, + 116, + 2, + 589, + 116, + 2, + 589, + 116, + 2, + 588, + 116, + 2, + 587, + 115, + 2, + 589, + 115, + 2, + 589, + 115, + 2, + 590, + 115, + 2, + 591, + 115, + 2, + 590, + 116, + 2, + 589, + 116, + 2, + 589, + 116, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "image_id": 196141, + "bbox": [ + 555.57, + 99.84, + 48.32, + 113.05 + ], + "category_id": 1, + "id": 488308 + }, + { + "num_keypoints": 0, + "area": 285.7906, + "iscrowd": 0, + "keypoints": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "image_id": 196141, + "bbox": [ + 440.85, + 73.13, + 16.79, + 32.45 + ], + "category_id": 1, + "id": 508900 + }, + { + "num_keypoints": 12, + "area": 21608.94075, + "iscrowd": 0, + "keypoints": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 552, + 234, + 2, + 0, + 0, + 0, + 531, + 262, + 2, + 600, + 283, + 2, + 480, + 260, + 2, + 622, + 336, + 2, + 466, + 242, + 2, + 0, + 0, + 0, + 546, + 365, + 2, + 592, + 371, + 2, + 470, + 351, + 2, + 551, + 330, + 2, + 519, + 394, + 2, + 589, + 391, + 2, + 575, + 211, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 498, + 408, + 2, + 0, + 0, + 0, + 534, + 395, + 2, + 587, + 401, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "image_id": 196141, + "bbox": [ + 453.77, + 206.81, + 177.23, + 210.87 + ], + "category_id": 1, + "id": 1717641 + }, + { + "num_keypoints": 17, + "area": 1870.14015, + "iscrowd": 0, + "keypoints": [ + 48, + 79, + 2, + 50, + 77, + 2, + 46, + 77, + 2, + 54, + 78, + 2, + 45, + 78, + 2, + 57, + 90, + 2, + 42, + 90, + 2, + 63, + 103, + 2, + 42, + 105, + 2, + 56, + 113, + 2, + 49, + 112, + 2, + 55, + 117, + 2, + 44, + 117, + 2, + 55, + 140, + 2, + 47, + 140, + 2, + 56, + 160, + 2, + 49, + 159, + 2, + 47, + 71, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "image_id": 196141, + "bbox": [ + 36.12, + 67.59, + 30.41, + 96.08 + ], + "category_id": 1, + "id": 1724673 + }, + { + "num_keypoints": 16, + "area": 14250.29385, + "iscrowd": 0, + "keypoints": [ + 334, + 135, + 2, + 340, + 129, + 2, + 331, + 129, + 2, + 0, + 0, + 0, + 319, + 123, + 2, + 340, + 146, + 2, + 292, + 133, + 2, + 353, + 164, + 2, + 246, + 144, + 2, + 354, + 197, + 2, + 250, + 185, + 2, + 293, + 197, + 2, + 265, + 187, + 2, + 305, + 252, + 2, + 231, + 254, + 2, + 293, + 321, + 2, + 193, + 297, + 2, + 333, + 109, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 299, + 332, + 2, + 185, + 309, + 2, + 310, + 333, + 2, + 176, + 303, + 2, + 287, + 328, + 2, + 198, + 303, + 2, + 321, + 127, + 2, + 321, + 130, + 2, + 321, + 133, + 2, + 321, + 136, + 2, + 322, + 138, + 2, + 324, + 140, + 2, + 326, + 142, + 2, + 329, + 143, + 2, + 332, + 143, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 328, + 125, + 2, + 330, + 125, + 2, + 331, + 126, + 2, + 333, + 126, + 2, + 335, + 127, + 2, + 339, + 128, + 2, + 340, + 127, + 2, + 342, + 126, + 2, + 343, + 126, + 2, + 345, + 125, + 2, + 336, + 130, + 2, + 336, + 132, + 2, + 337, + 134, + 2, + 338, + 136, + 2, + 334, + 138, + 2, + 335, + 138, + 2, + 337, + 138, + 2, + 338, + 137, + 2, + 339, + 138, + 2, + 329, + 127, + 2, + 331, + 127, + 2, + 333, + 128, + 2, + 334, + 129, + 2, + 332, + 130, + 2, + 331, + 129, + 2, + 339, + 129, + 2, + 341, + 127, + 2, + 342, + 127, + 2, + 344, + 127, + 2, + 342, + 129, + 2, + 341, + 129, + 2, + 329, + 139, + 2, + 331, + 139, + 2, + 333, + 139, + 2, + 334, + 139, + 2, + 334, + 139, + 2, + 335, + 139, + 2, + 336, + 139, + 2, + 335, + 140, + 2, + 334, + 141, + 2, + 333, + 141, + 2, + 333, + 141, + 2, + 331, + 141, + 2, + 330, + 139, + 2, + 333, + 139, + 2, + 334, + 140, + 2, + 334, + 139, + 2, + 336, + 140, + 2, + 334, + 140, + 2, + 334, + 141, + 2, + 331, + 141, + 2, + 0, + 0, + 0, + 349, + 202, + 2, + 345, + 203, + 2, + 342, + 207, + 2, + 338, + 212, + 2, + 349, + 214, + 2, + 341, + 219, + 2, + 336, + 219, + 2, + 333, + 218, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 250, + 187, + 2, + 255, + 188, + 2, + 260, + 189, + 2, + 264, + 194, + 2, + 268, + 201, + 2, + 254, + 193, + 2, + 256, + 201, + 2, + 260, + 205, + 2, + 0, + 0, + 0, + 252, + 193, + 2, + 252, + 201, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 247, + 193, + 2, + 248, + 200, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "image_id": 197388, + "bbox": [ + 139.41, + 102.25, + 222.39, + 241.57 + ], + "category_id": 1, + "id": 437295 + }, + { + "num_keypoints": 16, + "area": 3404.869, + "iscrowd": 0, + "keypoints": [ + 345, + 92, + 2, + 350, + 87, + 2, + 341, + 87, + 2, + 0, + 0, + 0, + 330, + 83, + 2, + 357, + 94, + 2, + 316, + 92, + 2, + 357, + 104, + 2, + 291, + 123, + 1, + 351, + 133, + 2, + 281, + 136, + 1, + 326, + 131, + 1, + 305, + 128, + 1, + 336, + 152, + 1, + 303, + 171, + 1, + 318, + 206, + 2, + 294, + 211, + 1, + 344, + 70, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 320, + 214, + 2, + 0, + 0, + 0, + 328, + 213, + 2, + 0, + 0, + 0, + 313, + 210, + 2, + 0, + 0, + 0, + 333, + 85, + 2, + 333, + 87, + 2, + 333, + 89, + 2, + 334, + 92, + 2, + 335, + 95, + 2, + 337, + 97, + 2, + 338, + 98, + 2, + 341, + 99, + 2, + 343, + 100, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 337, + 86, + 2, + 339, + 85, + 2, + 341, + 85, + 2, + 342, + 86, + 2, + 344, + 87, + 2, + 348, + 87, + 2, + 349, + 86, + 2, + 350, + 85, + 2, + 351, + 85, + 2, + 353, + 84, + 2, + 345, + 88, + 2, + 345, + 90, + 2, + 345, + 92, + 2, + 345, + 94, + 2, + 342, + 94, + 2, + 343, + 94, + 2, + 345, + 95, + 2, + 346, + 94, + 2, + 347, + 94, + 2, + 337, + 87, + 2, + 339, + 86, + 2, + 341, + 86, + 2, + 343, + 88, + 2, + 341, + 88, + 2, + 340, + 88, + 2, + 348, + 88, + 2, + 349, + 86, + 2, + 351, + 86, + 2, + 353, + 86, + 2, + 351, + 87, + 2, + 350, + 88, + 2, + 340, + 97, + 2, + 341, + 96, + 2, + 343, + 96, + 2, + 344, + 96, + 2, + 345, + 96, + 2, + 346, + 96, + 2, + 346, + 97, + 2, + 346, + 98, + 2, + 345, + 98, + 2, + 344, + 98, + 2, + 343, + 98, + 2, + 341, + 98, + 2, + 341, + 97, + 2, + 343, + 96, + 2, + 344, + 96, + 2, + 345, + 96, + 2, + 346, + 97, + 2, + 345, + 98, + 2, + 344, + 98, + 2, + 343, + 98, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "image_id": 197388, + "bbox": [ + 287.17, + 61.52, + 74.88, + 165.61 + ], + "category_id": 1, + "id": 467657 + }, + { + "num_keypoints": 15, + "area": 8913.98475, + "iscrowd": 0, + "keypoints": [ + 591, + 78, + 2, + 594, + 74, + 2, + 586, + 74, + 2, + 0, + 0, + 0, + 573, + 70, + 2, + 598, + 86, + 2, + 566, + 93, + 2, + 626, + 105, + 2, + 546, + 126, + 2, + 0, + 0, + 0, + 561, + 150, + 2, + 582, + 150, + 2, + 557, + 154, + 2, + 606, + 194, + 2, + 558, + 209, + 1, + 591, + 252, + 2, + 539, + 262, + 1, + 587, + 57, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 600, + 262, + 2, + 0, + 0, + 0, + 604, + 261, + 2, + 0, + 0, + 0, + 586, + 262, + 2, + 0, + 0, + 0, + 576.0, + 73.0, + 2.0, + 577.0, + 76.0, + 2.0, + 577.0, + 78.0, + 2.0, + 577.0, + 81.0, + 2.0, + 579.0, + 83.0, + 2.0, + 580.0, + 85.0, + 2.0, + 583.0, + 86.0, + 2.0, + 585.0, + 87.0, + 2.0, + 588.0, + 88.0, + 2.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 590.0, + 76.0, + 0.0, + 590.0, + 77.0, + 0.0, + 591.0, + 79.0, + 0.0, + 591.0, + 80.0, + 0.0, + 587.0, + 81.0, + 0.0, + 589.0, + 81.0, + 0.0, + 591.0, + 81.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 586.8761575736252, + 83.61172634947533, + 2.0, + 588.9412473790786, + 83.41106519512101, + 2.0, + 590.7724136651731, + 82.86258592792586, + 2.0, + 591.6996507831648, + 82.73443932626762, + 2.0, + 592.2456105550131, + 82.31442081227021, + 2.0, + 593.6493129356235, + 81.90362788181679, + 2.0, + 594.2114473230698, + 81.26071885052849, + 2.0, + 594.1276526357614, + 83.53407437193627, + 2.0, + 593.6044897939645, + 84.44948682598039, + 2.0, + 592.6541667265051, + 84.92630393832337, + 2.0, + 590.9756801829618, + 85.08662594065947, + 2.0, + 589.348352170458, + 84.76877788468903, + 2.0, + 587.2321394378064, + 83.56702886843215, + 2.0, + 590.3445832495596, + 83.57368678672641, + 2.0, + 591.8126301484949, + 83.20736933689491, + 2.0, + 592.7565172980813, + 82.68511125153186, + 2.0, + 594.1612270579618, + 81.3825154024012, + 2.0, + 593.0988272872626, + 83.2510259291705, + 2.0, + 592.1117610557407, + 83.63720194498697, + 2.0, + 590.626023236443, + 84.00301465801164, + 2.0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 559, + 151, + 2, + 565, + 151, + 2, + 569, + 153, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 568, + 156, + 2, + 570, + 162, + 2, + 571, + 166, + 2, + 571, + 169, + 2, + 565, + 157, + 2, + 565, + 162, + 2, + 566, + 164, + 2, + 566, + 166, + 2, + 561, + 158, + 2, + 562, + 161, + 2, + 563, + 163, + 2, + 563, + 165, + 2, + 558, + 159, + 2, + 559, + 162, + 2, + 560, + 163, + 2, + 560, + 164, + 2 + ], + "image_id": 197388, + "bbox": [ + 540.04, + 48.81, + 99.96, + 223.36 + ], + "category_id": 1, + "id": 531914 + }, + { + "num_keypoints": 16, + "area": 14267.20475, + "iscrowd": 0, + "keypoints": [ + 580, + 211, + 2, + 586, + 206, + 2, + 574, + 204, + 2, + 0, + 0, + 0, + 562, + 198, + 2, + 584, + 220, + 2, + 529, + 215, + 2, + 599, + 242, + 2, + 512, + 260, + 2, + 619, + 274, + 2, + 538, + 285, + 2, + 537, + 288, + 2, + 506, + 277, + 2, + 562, + 332, + 2, + 452, + 332, + 2, + 550, + 387, + 1, + 402, + 371, + 2, + 582, + 184, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 387, + 389, + 2, + 0, + 0, + 0, + 374, + 383, + 2, + 0, + 0, + 0, + 390, + 365, + 2, + 559, + 197, + 2, + 559, + 202, + 2, + 559, + 205, + 2, + 560, + 209, + 2, + 561, + 213, + 2, + 564, + 217, + 2, + 567, + 220, + 2, + 570, + 223, + 2, + 573, + 225, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 573, + 201, + 2, + 575, + 202, + 2, + 577, + 203, + 2, + 579, + 204, + 2, + 580, + 206, + 2, + 584, + 207, + 2, + 585, + 206, + 2, + 587, + 205, + 2, + 589, + 205, + 2, + 590, + 205, + 2, + 582, + 207, + 2, + 582, + 209, + 2, + 581, + 212, + 2, + 581, + 215, + 2, + 577, + 214, + 2, + 578, + 214, + 2, + 580, + 216, + 2, + 581, + 216, + 2, + 582, + 216, + 2, + 573, + 204, + 2, + 576, + 204, + 2, + 578, + 205, + 2, + 580, + 207, + 2, + 578, + 207, + 2, + 575, + 206, + 2, + 584, + 208, + 2, + 586, + 207, + 2, + 588, + 206, + 2, + 590, + 207, + 2, + 588, + 208, + 2, + 586, + 209, + 2, + 571, + 217, + 2, + 574, + 217, + 2, + 576, + 217, + 2, + 577, + 217, + 2, + 577, + 217, + 2, + 578, + 217, + 2, + 579, + 218, + 2, + 578, + 219, + 2, + 577, + 219, + 2, + 576, + 220, + 2, + 575, + 219, + 2, + 573, + 218, + 2, + 572, + 217, + 2, + 576, + 217, + 2, + 576, + 218, + 2, + 577, + 218, + 2, + 579, + 218, + 2, + 577, + 219, + 2, + 576, + 219, + 2, + 575, + 219, + 2, + 622, + 274, + 2, + 620, + 281, + 2, + 620, + 287, + 2, + 623, + 292, + 2, + 627, + 297, + 2, + 628, + 284, + 2, + 635, + 290, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 628, + 281, + 2, + 631, + 285, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 628, + 278, + 2, + 632, + 283, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 628, + 277, + 2, + 631, + 279, + 2, + 633, + 282, + 2, + 0, + 0, + 0, + 542, + 286, + 2, + 551, + 285, + 2, + 557, + 289, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 557, + 293, + 2, + 559, + 301, + 2, + 559, + 306, + 2, + 558, + 312, + 2, + 551, + 293, + 2, + 552, + 302, + 2, + 552, + 307, + 2, + 0, + 0, + 0, + 546, + 296, + 2, + 548, + 302, + 2, + 549, + 307, + 2, + 0, + 0, + 0, + 543, + 298, + 2, + 544, + 303, + 2, + 545, + 307, + 2, + 0, + 0, + 0 + ], + "image_id": 197388, + "bbox": [ + 372.58, + 170.84, + 266.63, + 217.19 + ], + "category_id": 1, + "id": 533949 + }, + { + "num_keypoints": 13, + "area": 8260.75085, + "iscrowd": 0, + "keypoints": [ + 36, + 79, + 2, + 40, + 74, + 2, + 31, + 75, + 2, + 0, + 0, + 0, + 19, + 69, + 2, + 45, + 77, + 2, + 2, + 89, + 2, + 74, + 99, + 2, + 0, + 0, + 0, + 78, + 92, + 2, + 0, + 0, + 0, + 33, + 149, + 2, + 7, + 153, + 2, + 44, + 196, + 2, + 2, + 205, + 2, + 35, + 245, + 2, + 0, + 0, + 0, + 33, + 54, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 41, + 255, + 2, + 0, + 0, + 0, + 48, + 255, + 2, + 0, + 0, + 0, + 29, + 253, + 2, + 0, + 0, + 0, + 22, + 70, + 2, + 22, + 73, + 2, + 23, + 76, + 2, + 24, + 78, + 2, + 25, + 80, + 2, + 27, + 82, + 2, + 29, + 84, + 2, + 31, + 85, + 2, + 34, + 85, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27, + 72, + 2, + 29, + 72, + 2, + 31, + 72, + 2, + 33, + 72, + 2, + 34, + 73, + 2, + 38, + 73, + 2, + 40, + 72, + 2, + 41, + 71, + 2, + 42, + 71, + 2, + 43, + 70, + 2, + 37, + 75, + 2, + 37, + 77, + 2, + 37, + 78, + 2, + 37, + 80, + 2, + 35, + 80, + 2, + 37, + 80, + 2, + 37, + 80, + 2, + 38, + 80, + 2, + 39, + 80, + 2, + 28, + 74, + 2, + 31, + 73, + 2, + 33, + 74, + 2, + 34, + 75, + 2, + 32, + 76, + 2, + 31, + 75, + 2, + 39, + 75, + 2, + 40, + 73, + 2, + 41, + 73, + 2, + 43, + 72, + 2, + 42, + 74, + 2, + 40, + 75, + 2, + 30, + 83, + 2, + 33, + 82, + 2, + 35, + 82, + 2, + 36, + 82, + 2, + 37, + 82, + 2, + 37, + 82, + 2, + 38, + 82, + 2, + 37, + 83, + 2, + 37, + 84, + 2, + 36, + 84, + 2, + 35, + 84, + 2, + 33, + 84, + 2, + 31, + 83, + 2, + 35, + 83, + 2, + 36, + 83, + 2, + 37, + 83, + 2, + 38, + 82, + 2, + 37, + 83, + 2, + 36, + 84, + 2, + 35, + 84, + 2, + 0, + 0, + 0, + 76, + 89, + 2, + 74, + 86, + 2, + 75, + 81, + 2, + 77, + 76, + 2, + 86, + 80, + 2, + 82, + 84, + 2, + 78, + 85, + 2, + 76, + 86, + 2, + 86, + 83, + 2, + 82, + 86, + 2, + 79, + 87, + 2, + 76, + 87, + 2, + 87, + 84, + 2, + 84, + 88, + 2, + 80, + 88, + 2, + 78, + 89, + 2, + 88, + 87, + 2, + 85, + 89, + 2, + 82, + 90, + 2, + 79, + 91, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "image_id": 197388, + "bbox": [ + 0.5, + 43.74, + 90.1, + 220.09 + ], + "category_id": 1, + "id": 543117 + } + ] } \ No newline at end of file diff --git a/tests/data/horse10/test_horse10.json b/tests/data/horse10/test_horse10.json index c85f53f9b8..ca4b04e973 100644 --- a/tests/data/horse10/test_horse10.json +++ b/tests/data/horse10/test_horse10.json @@ -1,302 +1,302 @@ -{ - "categories": [ - { - "supercategory": "animal", - "id": 1, - "name": "horse", - "keypoints": [ - "Nose", - "Eye", - "Nearknee", - "Nearfrontfetlock", - "Nearfrontfoot", - "Offknee", - "Offfrontfetlock", - "Offfrontfoot", - "Shoulder", - "Midshoulder", - "Elbow", - "Girth", - "Wither", - "Nearhindhock", - "Nearhindfetlock", - "Nearhindfoot", - "Hip", - "Stifle", - "Offhindhock", - "Offhindfetlock", - "Offhindfoot", - "Ischium" - ], - "skeleton": [] - } - ], - "images": [ - { - "id": 100, - "file_name": "0244.png", - "height": 162, - "width": 288 - }, - { - "id": 500, - "file_name": "0292.png", - "height": 162, - "width": 288 - }, - { - "id": 900, - "file_name": "0465.png", - "height": 162, - "width": 288 - } - ], - "annotations": [ - { - "keypoints": [ - 126.0, - 71.1, - 2.0, - 117.3, - 56.4, - 2.0, - 90.0, - 98.7, - 2.0, - 92.1, - 112.8, - 2.0, - 98.7, - 117.3, - 2.0, - 71.39999999999999, - 102.89999999999999, - 2.0, - 63.599999999999994, - 114.0, - 2.0, - 56.699999999999996, - 120.0, - 2.0, - 80.1, - 73.5, - 2.0, - 78.3, - 63.0, - 2.0, - 67.5, - 82.2, - 2.0, - 65.39999999999999, - 82.8, - 2.0, - 72.0, - 52.199999999999996, - 2.0, - 29.4, - 97.5, - 2.0, - 27.0, - 113.39999999999999, - 2.0, - 31.5, - 120.6, - 2.0, - 36.3, - 56.1, - 2.0, - 37.5, - 75.6, - 2.0, - 38.4, - 97.8, - 2.0, - 46.8, - 112.8, - 2.0, - 51.0, - 120.3, - 2.0, - 23.099999999999998, - 63.599999999999994, - 2.0 - ], - "image_id": 100, - "id": 100, - "num_keypoints": 22, - "bbox": [ - 2, - 38, - 145, - 97 - ], - "iscrowd": 0, - "area": 14065, - "category_id": 1 - }, - { - "keypoints": [ - 267.9, - 67.8, - 2.0, - 265.5, - 51.6, - 2.0, - 200.7, - 94.8, - 2.0, - 190.79999999999998, - 106.2, - 2.0, - 190.2, - 114.6, - 2.0, - 229.5, - 97.8, - 2.0, - 234.0, - 111.6, - 2.0, - 240.6, - 118.19999999999999, - 2.0, - 233.7, - 69.0, - 2.0, - 226.5, - 57.599999999999994, - 2.0, - 219.6, - 79.5, - 2.0, - 213.0, - 81.6, - 2.0, - 216.29999999999998, - 48.3, - 2.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 182.1, - 47.699999999999996, - 2.0, - 176.1, - 72.0, - 2.0, - 162.0, - 92.1, - 2.0, - 162.9, - 111.89999999999999, - 2.0, - 167.4, - 117.6, - 2.0, - 161.4, - 54.9, - 2.0 - ], - "image_id": 500, - "id": 500, - "num_keypoints": 19, - "bbox": [ - 140, - 33, - 148, - 100 - ], - "iscrowd": 0, - "area": 14800, - "category_id": 1 - }, - { - "keypoints": [ - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 286.2, - 118.8, - 2.0, - 282.0, - 123.89999999999999, - 2.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 252.0, - 102.89999999999999, - 2.0, - 261.9, - 119.39999999999999, - 2.0, - 268.8, - 128.1, - 2.0, - 263.09999999999997, - 60.3, - 2.0, - 253.5, - 84.0, - 2.0, - 236.7, - 103.2, - 2.0, - 230.7, - 121.19999999999999, - 2.0, - 234.6, - 129.0, - 2.0, - 240.29999999999998, - 67.5, - 2.0 - ], - "image_id": 900, - "id": 900, - "num_keypoints": 11, - "bbox": [ - 219, - 46, - 69, - 97 - ], - "iscrowd": 0, - "area": 6693, - "category_id": 1 - } - ] +{ + "categories": [ + { + "supercategory": "animal", + "id": 1, + "name": "horse", + "keypoints": [ + "Nose", + "Eye", + "Nearknee", + "Nearfrontfetlock", + "Nearfrontfoot", + "Offknee", + "Offfrontfetlock", + "Offfrontfoot", + "Shoulder", + "Midshoulder", + "Elbow", + "Girth", + "Wither", + "Nearhindhock", + "Nearhindfetlock", + "Nearhindfoot", + "Hip", + "Stifle", + "Offhindhock", + "Offhindfetlock", + "Offhindfoot", + "Ischium" + ], + "skeleton": [] + } + ], + "images": [ + { + "id": 100, + "file_name": "0244.png", + "height": 162, + "width": 288 + }, + { + "id": 500, + "file_name": "0292.png", + "height": 162, + "width": 288 + }, + { + "id": 900, + "file_name": "0465.png", + "height": 162, + "width": 288 + } + ], + "annotations": [ + { + "keypoints": [ + 126.0, + 71.1, + 2.0, + 117.3, + 56.4, + 2.0, + 90.0, + 98.7, + 2.0, + 92.1, + 112.8, + 2.0, + 98.7, + 117.3, + 2.0, + 71.39999999999999, + 102.89999999999999, + 2.0, + 63.599999999999994, + 114.0, + 2.0, + 56.699999999999996, + 120.0, + 2.0, + 80.1, + 73.5, + 2.0, + 78.3, + 63.0, + 2.0, + 67.5, + 82.2, + 2.0, + 65.39999999999999, + 82.8, + 2.0, + 72.0, + 52.199999999999996, + 2.0, + 29.4, + 97.5, + 2.0, + 27.0, + 113.39999999999999, + 2.0, + 31.5, + 120.6, + 2.0, + 36.3, + 56.1, + 2.0, + 37.5, + 75.6, + 2.0, + 38.4, + 97.8, + 2.0, + 46.8, + 112.8, + 2.0, + 51.0, + 120.3, + 2.0, + 23.099999999999998, + 63.599999999999994, + 2.0 + ], + "image_id": 100, + "id": 100, + "num_keypoints": 22, + "bbox": [ + 2, + 38, + 145, + 97 + ], + "iscrowd": 0, + "area": 14065, + "category_id": 1 + }, + { + "keypoints": [ + 267.9, + 67.8, + 2.0, + 265.5, + 51.6, + 2.0, + 200.7, + 94.8, + 2.0, + 190.79999999999998, + 106.2, + 2.0, + 190.2, + 114.6, + 2.0, + 229.5, + 97.8, + 2.0, + 234.0, + 111.6, + 2.0, + 240.6, + 118.19999999999999, + 2.0, + 233.7, + 69.0, + 2.0, + 226.5, + 57.599999999999994, + 2.0, + 219.6, + 79.5, + 2.0, + 213.0, + 81.6, + 2.0, + 216.29999999999998, + 48.3, + 2.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 182.1, + 47.699999999999996, + 2.0, + 176.1, + 72.0, + 2.0, + 162.0, + 92.1, + 2.0, + 162.9, + 111.89999999999999, + 2.0, + 167.4, + 117.6, + 2.0, + 161.4, + 54.9, + 2.0 + ], + "image_id": 500, + "id": 500, + "num_keypoints": 19, + "bbox": [ + 140, + 33, + 148, + 100 + ], + "iscrowd": 0, + "area": 14800, + "category_id": 1 + }, + { + "keypoints": [ + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 286.2, + 118.8, + 2.0, + 282.0, + 123.89999999999999, + 2.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 252.0, + 102.89999999999999, + 2.0, + 261.9, + 119.39999999999999, + 2.0, + 268.8, + 128.1, + 2.0, + 263.09999999999997, + 60.3, + 2.0, + 253.5, + 84.0, + 2.0, + 236.7, + 103.2, + 2.0, + 230.7, + 121.19999999999999, + 2.0, + 234.6, + 129.0, + 2.0, + 240.29999999999998, + 67.5, + 2.0 + ], + "image_id": 900, + "id": 900, + "num_keypoints": 11, + "bbox": [ + 219, + 46, + 69, + 97 + ], + "iscrowd": 0, + "area": 6693, + "category_id": 1 + } + ] } \ No newline at end of file diff --git a/tests/data/humanart/test_humanart.json b/tests/data/humanart/test_humanart.json index 8cf13e3530..c5ad2788bc 100644 --- a/tests/data/humanart/test_humanart.json +++ b/tests/data/humanart/test_humanart.json @@ -1,716 +1,716 @@ -{ - "info": { - "description": "For testing Human-Art dataset only.", - "year": 2023, - "date_created": "2023/06/12" - }, - "images": [ - { - "file_name": "HumanArt/images/2D_virtual_human/digital_art/000000001648.jpg", - "height": 1750, - "width": 1280, - "id": 2000000001648, - "page_url": "https://www.deviantart.com/endemilk/art/Autumn-Mood-857953165", - "image_url": "https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/cef0f0b2-832e-4f53-95c6-32f822f796ac/de6swwd-8ae0bba7-f879-43db-9f34-33d067ea3683.png/v1/fill/w_1280,h_1750,q_80,strp/autumn_mood_by_endemilk_de6swwd-fullview.jpg?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7ImhlaWdodCI6Ijw9MTc1MCIsInBhdGgiOiJcL2ZcL2NlZjBmMGIyLTgzMmUtNGY1My05NWM2LTMyZjgyMmY3OTZhY1wvZGU2c3d3ZC04YWUwYmJhNy1mODc5LTQzZGItOWYzNC0zM2QwNjdlYTM2ODMucG5nIiwid2lkdGgiOiI8PTEyODAifV1dLCJhdWQiOlsidXJuOnNlcnZpY2U6aW1hZ2Uub3BlcmF0aW9ucyJdfQ.u2McWPeJ1MDJokGkVa3qnJlYJFoldamHt9B6rGtSf9Y", - "picture_name": "Autumn Mood", - "author": "Endemilk", - "description": "digital_art, a girl in a white dress standing under a tree with autumn leaves", - "category": "digital art" - }, - { - "file_name": "HumanArt/images/3D_virtual_human/garage_kits/000000005603.jpg", - "height": 600, - "width": 700, - "id": 12000000005603, - "page_url": "https://www.goodsmile.info/ja/product/6010/%E3%81%AD%E3%82%93%E3%81%A9%E3%82%8D%E3%81%84%E3%81%A9+%E3%82%A8%E3%83%83%E3%82%AF%E3%82%B9+%E3%83%95%E3%83%AB%E3%82%A2%E3%83%BC%E3%83%9E%E3%83%BC.html", - "image_url": "https://images.goodsmile.info/cgm/images/product/20161014/6010/41809/large/7b2d02a6a8a8d89af3a34f70942fdcc7.jpg", - "picture_name": "Irregular hunter who wants peace", - "author": "None", - "description": "garage_kits, a figurine of a character holding a gun", - "category": "garage kits" - }, - { - "file_name": "HumanArt/images/real_human/acrobatics/000000000590.jpg", - "height": 612, - "width": 589, - "id": 15000000000590, - "page_url": "https://www.istockphoto.com/hk/search/2/image?phrase=acrobatics&page=", - "image_url": "https://media.istockphoto.com/photos/women-couple-of-dancers-acrobats-picture-id494524123?k=20&m=494524123&s=612x612&w=0&h=Mt-1N5a2aCS3n6spX_Fw8JRmf3zAO2VnvB4T0mGCN4s=", - "picture_name": "None", - "author": "None", - "description": "acrobatics, two women in green and white performing acrobatics", - "category": "acrobatics" - } - ], - "annotations": [ - { - "keypoints": [ - 715.4305, - 970.0197, - 2, - 698.8416, - 942.6802, - 2, - 679.6984, - 941.7231, - 2, - 644.7338, - 948.259, - 2, - 611.9386, - 946.367, - 2, - 518.0118, - 1122.8295, - 2, - 656.3654, - 1106.6009, - 2, - 529.2618, - 1364.4753, - 2, - 589.2787, - 1375.8299, - 2, - 687.9009, - 1377.9864, - 2, - 744.6238, - 1409.0027, - 2, - 557.0198, - 1505.5454, - 2, - 680.6947, - 1499.8197, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0 - ], - "keypoints_21": [ - 715.4305, - 970.0197, - 2, - 698.8416, - 942.6802, - 2, - 679.6984, - 941.7231, - 2, - 644.7338, - 948.259, - 2, - 611.9386, - 946.367, - 2, - 518.0118, - 1122.8295, - 2, - 656.3654, - 1106.6009, - 2, - 529.2618, - 1364.4753, - 2, - 589.2787, - 1375.8299, - 2, - 687.9009, - 1377.9864, - 2, - 744.6238, - 1409.0027, - 2, - 557.0198, - 1505.5454, - 2, - 680.6947, - 1499.8197, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 711.6695, - 1391.3213, - 2, - 764.9766, - 1420.8272, - 2, - 0, - 0, - 0, - 0, - 0, - 0 - ], - "self_contact": [], - "num_keypoints": 13, - "num_keypoints_21": 15, - "iscrowd": 0, - "image_id": 2000000001648, - "area": 288736.90076053096, - "bbox": [ - 468.61884, - 828.9586400000001, - 355.629312, - 811.9041119999999 - ], - "category_id": 1, - "id": 2000000006746, - "annotator": 67037 - }, - { - "keypoints": [ - 313.972, - 252.666, - 2, - 333.8015, - 228.7117, - 2, - 272.5658, - 207.7711, - 2, - 342.3681, - 227.4426, - 2, - 200.6833, - 204.2117, - 2, - 0, - 0, - 0, - 251.3643, - 302.7895, - 2, - 0, - 0, - 0, - 275.9871, - 312.5822, - 2, - 0, - 0, - 0, - 292.1347, - 313.8643, - 2, - 304.7952, - 403.3614, - 2, - 286.269, - 402.473, - 2, - 330.7358, - 441.4618, - 2, - 260.2096, - 441.0565, - 2, - 321.9826, - 495.339, - 2, - 222.4324, - 493.9369, - 2 - ], - "keypoints_21": [ - 313.972, - 252.666, - 2, - 333.8015, - 228.7117, - 2, - 272.5658, - 207.7711, - 2, - 342.3681, - 227.4426, - 2, - 200.6833, - 204.2117, - 2, - 0, - 0, - 0, - 251.3643, - 302.7895, - 2, - 0, - 0, - 0, - 275.9871, - 312.5822, - 2, - 0, - 0, - 0, - 292.1347, - 313.8643, - 2, - 304.7952, - 403.3614, - 2, - 286.269, - 402.473, - 2, - 330.7358, - 441.4618, - 2, - 260.2096, - 441.0565, - 2, - 321.9826, - 495.339, - 2, - 222.4324, - 493.9369, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 398.5162, - 556.9002, - 2, - 212.5182, - 563.4001, - 2 - ], - "self_contact": [], - "num_keypoints": 14, - "num_keypoints_21": 16, - "iscrowd": 0, - "image_id": 12000000005603, - "area": 132932.1180077885, - "bbox": [ - 161.11672, - 132.37402000000003, - 284.87937600000004, - 466.62597999999997 - ], - "category_id": 1, - "id": 12000000076660, - "annotator": 66991 - }, - { - "keypoints": [ - 319.2161, - 546.3765, - 2, - 317.6563, - 536.4973, - 2, - 315.5024, - 536.8374, - 2, - 295.7777, - 539.4827, - 2, - 290.2372, - 538.9287, - 2, - 260.5583, - 539.1473, - 2, - 252.989, - 559.7042, - 2, - 222.0985, - 494.5581, - 2, - 204.3461, - 496.7641, - 2, - 229.7767, - 555.3691, - 2, - 203.9402, - 564.1676, - 2, - 254.6329, - 440.3163, - 2, - 252.7878, - 421.1483, - 2, - 351.9561, - 400.9315, - 2, - 368.0247, - 412.8534, - 2, - 347.6211, - 500.3006, - 2, - 367.0544, - 542.1705, - 2 - ], - "keypoints_21": [ - 319.2161, - 546.3765, - 2, - 317.6563, - 536.4973, - 2, - 315.5024, - 536.8374, - 2, - 295.7777, - 539.4827, - 2, - 290.2372, - 538.9287, - 2, - 260.5583, - 539.1473, - 2, - 252.989, - 559.7042, - 2, - 222.0985, - 494.5581, - 2, - 204.3461, - 496.7641, - 2, - 229.7767, - 555.3691, - 2, - 203.9402, - 564.1676, - 2, - 254.6329, - 440.3163, - 2, - 252.7878, - 421.1483, - 2, - 351.9561, - 400.9315, - 2, - 368.0247, - 412.8534, - 2, - 347.6211, - 500.3006, - 2, - 367.0544, - 542.1705, - 2, - 248.5114, - 559.976, - 2, - 253.5939, - 575.1541, - 2, - 357.1097, - 548.0375, - 2, - 379.7624, - 573.8666, - 2 - ], - "self_contact": [ - [ - 245.1376, - 570.4875 - ] - ], - "num_keypoints": 17, - "num_keypoints_21": 21, - "iscrowd": 0, - "image_id": 15000000000590, - "area": 62008.05021846336, - "bbox": [ - 168.77576, - 366.08698000000004, - 253.18396800000005, - 244.91301999999996 - ], - "category_id": 1, - "id": 15000000092347, - "annotator": 66705 - }, - { - "keypoints": [ - 233.1389, - 406.6037, - 2, - 243.5176, - 397.9166, - 2, - 243.0948, - 396.1787, - 2, - 235.8086, - 380.0257, - 2, - 233.4394, - 371.1951, - 2, - 200.7799, - 367.2566, - 2, - 222.3385, - 339.9251, - 2, - 218.5684, - 431.6162, - 2, - 216.3631, - 433.129, - 2, - 238.3363, - 495.4999, - 2, - 240.2118, - 500.6888, - 2, - 253.2291, - 222.9011, - 2, - 270.424, - 250.1, - 2, - 192.7242, - 138.9058, - 2, - 372.9364, - 324.4092, - 2, - 148.4319, - 79.9982, - 2, - 444.6949, - 407.9868, - 2 - ], - "keypoints_21": [ - 233.1389, - 406.6037, - 2, - 243.5176, - 397.9166, - 2, - 243.0948, - 396.1787, - 2, - 235.8086, - 380.0257, - 2, - 233.4394, - 371.1951, - 2, - 200.7799, - 367.2566, - 2, - 222.3385, - 339.9251, - 2, - 218.5684, - 431.6162, - 2, - 216.3631, - 433.129, - 2, - 238.3363, - 495.4999, - 2, - 240.2118, - 500.6888, - 2, - 253.2291, - 222.9011, - 2, - 270.424, - 250.1, - 2, - 192.7242, - 138.9058, - 2, - 372.9364, - 324.4092, - 2, - 148.4319, - 79.9982, - 2, - 444.6949, - 407.9868, - 2, - 245.196, - 517.5082, - 2, - 238.3205, - 541.3807, - 2, - 113.9739, - 40.4267, - 2, - 501.7295, - 448.3217, - 2 - ], - "self_contact": [], - "num_keypoints": 17, - "num_keypoints_21": 21, - "iscrowd": 0, - "image_id": 15000000000590, - "area": 337013.68142, - "bbox": [ - 36.42278, - 0, - 551.57722, - 611 - ], - "category_id": 1, - "id": 15000000092348, - "annotator": 66705 - } - ], - "categories": [ - { - "supercategory": "person", - "id": 1, - "name": "person", - "keypoints": [ - "nose", - "left_eye", - "right_eye", - "left_ear", - "right_ear", - "left_shoulder", - "right_shoulder", - "left_elbow", - "right_elbow", - "left_wrist", - "right_wrist", - "left_hip", - "right_hip", - "left_knee", - "right_knee", - "left_ankle", - "right_ankle", - "left_finger", - "right_finger", - "left_toe", - "right_toe" - ], - "skeleton": [ - [ - 20, - 16 - ], - [ - 16, - 14 - ], - [ - 14, - 12 - ], - [ - 21, - 17 - ], - [ - 17, - 15 - ], - [ - 15, - 13 - ], - [ - 12, - 13 - ], - [ - 6, - 12 - ], - [ - 7, - 13 - ], - [ - 6, - 7 - ], - [ - 6, - 8 - ], - [ - 7, - 9 - ], - [ - 10, - 18 - ], - [ - 8, - 10 - ], - [ - 11, - 19 - ], - [ - 9, - 11 - ], - [ - 2, - 3 - ], - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 4 - ], - [ - 3, - 5 - ], - [ - 4, - 6 - ], - [ - 5, - 7 - ] - ] - } - ] +{ + "info": { + "description": "For testing Human-Art dataset only.", + "year": 2023, + "date_created": "2023/06/12" + }, + "images": [ + { + "file_name": "HumanArt/images/2D_virtual_human/digital_art/000000001648.jpg", + "height": 1750, + "width": 1280, + "id": 2000000001648, + "page_url": "https://www.deviantart.com/endemilk/art/Autumn-Mood-857953165", + "image_url": "https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/cef0f0b2-832e-4f53-95c6-32f822f796ac/de6swwd-8ae0bba7-f879-43db-9f34-33d067ea3683.png/v1/fill/w_1280,h_1750,q_80,strp/autumn_mood_by_endemilk_de6swwd-fullview.jpg?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7ImhlaWdodCI6Ijw9MTc1MCIsInBhdGgiOiJcL2ZcL2NlZjBmMGIyLTgzMmUtNGY1My05NWM2LTMyZjgyMmY3OTZhY1wvZGU2c3d3ZC04YWUwYmJhNy1mODc5LTQzZGItOWYzNC0zM2QwNjdlYTM2ODMucG5nIiwid2lkdGgiOiI8PTEyODAifV1dLCJhdWQiOlsidXJuOnNlcnZpY2U6aW1hZ2Uub3BlcmF0aW9ucyJdfQ.u2McWPeJ1MDJokGkVa3qnJlYJFoldamHt9B6rGtSf9Y", + "picture_name": "Autumn Mood", + "author": "Endemilk", + "description": "digital_art, a girl in a white dress standing under a tree with autumn leaves", + "category": "digital art" + }, + { + "file_name": "HumanArt/images/3D_virtual_human/garage_kits/000000005603.jpg", + "height": 600, + "width": 700, + "id": 12000000005603, + "page_url": "https://www.goodsmile.info/ja/product/6010/%E3%81%AD%E3%82%93%E3%81%A9%E3%82%8D%E3%81%84%E3%81%A9+%E3%82%A8%E3%83%83%E3%82%AF%E3%82%B9+%E3%83%95%E3%83%AB%E3%82%A2%E3%83%BC%E3%83%9E%E3%83%BC.html", + "image_url": "https://images.goodsmile.info/cgm/images/product/20161014/6010/41809/large/7b2d02a6a8a8d89af3a34f70942fdcc7.jpg", + "picture_name": "Irregular hunter who wants peace", + "author": "None", + "description": "garage_kits, a figurine of a character holding a gun", + "category": "garage kits" + }, + { + "file_name": "HumanArt/images/real_human/acrobatics/000000000590.jpg", + "height": 612, + "width": 589, + "id": 15000000000590, + "page_url": "https://www.istockphoto.com/hk/search/2/image?phrase=acrobatics&page=", + "image_url": "https://media.istockphoto.com/photos/women-couple-of-dancers-acrobats-picture-id494524123?k=20&m=494524123&s=612x612&w=0&h=Mt-1N5a2aCS3n6spX_Fw8JRmf3zAO2VnvB4T0mGCN4s=", + "picture_name": "None", + "author": "None", + "description": "acrobatics, two women in green and white performing acrobatics", + "category": "acrobatics" + } + ], + "annotations": [ + { + "keypoints": [ + 715.4305, + 970.0197, + 2, + 698.8416, + 942.6802, + 2, + 679.6984, + 941.7231, + 2, + 644.7338, + 948.259, + 2, + 611.9386, + 946.367, + 2, + 518.0118, + 1122.8295, + 2, + 656.3654, + 1106.6009, + 2, + 529.2618, + 1364.4753, + 2, + 589.2787, + 1375.8299, + 2, + 687.9009, + 1377.9864, + 2, + 744.6238, + 1409.0027, + 2, + 557.0198, + 1505.5454, + 2, + 680.6947, + 1499.8197, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "keypoints_21": [ + 715.4305, + 970.0197, + 2, + 698.8416, + 942.6802, + 2, + 679.6984, + 941.7231, + 2, + 644.7338, + 948.259, + 2, + 611.9386, + 946.367, + 2, + 518.0118, + 1122.8295, + 2, + 656.3654, + 1106.6009, + 2, + 529.2618, + 1364.4753, + 2, + 589.2787, + 1375.8299, + 2, + 687.9009, + 1377.9864, + 2, + 744.6238, + 1409.0027, + 2, + 557.0198, + 1505.5454, + 2, + 680.6947, + 1499.8197, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 711.6695, + 1391.3213, + 2, + 764.9766, + 1420.8272, + 2, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "self_contact": [], + "num_keypoints": 13, + "num_keypoints_21": 15, + "iscrowd": 0, + "image_id": 2000000001648, + "area": 288736.90076053096, + "bbox": [ + 468.61884, + 828.9586400000001, + 355.629312, + 811.9041119999999 + ], + "category_id": 1, + "id": 2000000006746, + "annotator": 67037 + }, + { + "keypoints": [ + 313.972, + 252.666, + 2, + 333.8015, + 228.7117, + 2, + 272.5658, + 207.7711, + 2, + 342.3681, + 227.4426, + 2, + 200.6833, + 204.2117, + 2, + 0, + 0, + 0, + 251.3643, + 302.7895, + 2, + 0, + 0, + 0, + 275.9871, + 312.5822, + 2, + 0, + 0, + 0, + 292.1347, + 313.8643, + 2, + 304.7952, + 403.3614, + 2, + 286.269, + 402.473, + 2, + 330.7358, + 441.4618, + 2, + 260.2096, + 441.0565, + 2, + 321.9826, + 495.339, + 2, + 222.4324, + 493.9369, + 2 + ], + "keypoints_21": [ + 313.972, + 252.666, + 2, + 333.8015, + 228.7117, + 2, + 272.5658, + 207.7711, + 2, + 342.3681, + 227.4426, + 2, + 200.6833, + 204.2117, + 2, + 0, + 0, + 0, + 251.3643, + 302.7895, + 2, + 0, + 0, + 0, + 275.9871, + 312.5822, + 2, + 0, + 0, + 0, + 292.1347, + 313.8643, + 2, + 304.7952, + 403.3614, + 2, + 286.269, + 402.473, + 2, + 330.7358, + 441.4618, + 2, + 260.2096, + 441.0565, + 2, + 321.9826, + 495.339, + 2, + 222.4324, + 493.9369, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 398.5162, + 556.9002, + 2, + 212.5182, + 563.4001, + 2 + ], + "self_contact": [], + "num_keypoints": 14, + "num_keypoints_21": 16, + "iscrowd": 0, + "image_id": 12000000005603, + "area": 132932.1180077885, + "bbox": [ + 161.11672, + 132.37402000000003, + 284.87937600000004, + 466.62597999999997 + ], + "category_id": 1, + "id": 12000000076660, + "annotator": 66991 + }, + { + "keypoints": [ + 319.2161, + 546.3765, + 2, + 317.6563, + 536.4973, + 2, + 315.5024, + 536.8374, + 2, + 295.7777, + 539.4827, + 2, + 290.2372, + 538.9287, + 2, + 260.5583, + 539.1473, + 2, + 252.989, + 559.7042, + 2, + 222.0985, + 494.5581, + 2, + 204.3461, + 496.7641, + 2, + 229.7767, + 555.3691, + 2, + 203.9402, + 564.1676, + 2, + 254.6329, + 440.3163, + 2, + 252.7878, + 421.1483, + 2, + 351.9561, + 400.9315, + 2, + 368.0247, + 412.8534, + 2, + 347.6211, + 500.3006, + 2, + 367.0544, + 542.1705, + 2 + ], + "keypoints_21": [ + 319.2161, + 546.3765, + 2, + 317.6563, + 536.4973, + 2, + 315.5024, + 536.8374, + 2, + 295.7777, + 539.4827, + 2, + 290.2372, + 538.9287, + 2, + 260.5583, + 539.1473, + 2, + 252.989, + 559.7042, + 2, + 222.0985, + 494.5581, + 2, + 204.3461, + 496.7641, + 2, + 229.7767, + 555.3691, + 2, + 203.9402, + 564.1676, + 2, + 254.6329, + 440.3163, + 2, + 252.7878, + 421.1483, + 2, + 351.9561, + 400.9315, + 2, + 368.0247, + 412.8534, + 2, + 347.6211, + 500.3006, + 2, + 367.0544, + 542.1705, + 2, + 248.5114, + 559.976, + 2, + 253.5939, + 575.1541, + 2, + 357.1097, + 548.0375, + 2, + 379.7624, + 573.8666, + 2 + ], + "self_contact": [ + [ + 245.1376, + 570.4875 + ] + ], + "num_keypoints": 17, + "num_keypoints_21": 21, + "iscrowd": 0, + "image_id": 15000000000590, + "area": 62008.05021846336, + "bbox": [ + 168.77576, + 366.08698000000004, + 253.18396800000005, + 244.91301999999996 + ], + "category_id": 1, + "id": 15000000092347, + "annotator": 66705 + }, + { + "keypoints": [ + 233.1389, + 406.6037, + 2, + 243.5176, + 397.9166, + 2, + 243.0948, + 396.1787, + 2, + 235.8086, + 380.0257, + 2, + 233.4394, + 371.1951, + 2, + 200.7799, + 367.2566, + 2, + 222.3385, + 339.9251, + 2, + 218.5684, + 431.6162, + 2, + 216.3631, + 433.129, + 2, + 238.3363, + 495.4999, + 2, + 240.2118, + 500.6888, + 2, + 253.2291, + 222.9011, + 2, + 270.424, + 250.1, + 2, + 192.7242, + 138.9058, + 2, + 372.9364, + 324.4092, + 2, + 148.4319, + 79.9982, + 2, + 444.6949, + 407.9868, + 2 + ], + "keypoints_21": [ + 233.1389, + 406.6037, + 2, + 243.5176, + 397.9166, + 2, + 243.0948, + 396.1787, + 2, + 235.8086, + 380.0257, + 2, + 233.4394, + 371.1951, + 2, + 200.7799, + 367.2566, + 2, + 222.3385, + 339.9251, + 2, + 218.5684, + 431.6162, + 2, + 216.3631, + 433.129, + 2, + 238.3363, + 495.4999, + 2, + 240.2118, + 500.6888, + 2, + 253.2291, + 222.9011, + 2, + 270.424, + 250.1, + 2, + 192.7242, + 138.9058, + 2, + 372.9364, + 324.4092, + 2, + 148.4319, + 79.9982, + 2, + 444.6949, + 407.9868, + 2, + 245.196, + 517.5082, + 2, + 238.3205, + 541.3807, + 2, + 113.9739, + 40.4267, + 2, + 501.7295, + 448.3217, + 2 + ], + "self_contact": [], + "num_keypoints": 17, + "num_keypoints_21": 21, + "iscrowd": 0, + "image_id": 15000000000590, + "area": 337013.68142, + "bbox": [ + 36.42278, + 0, + 551.57722, + 611 + ], + "category_id": 1, + "id": 15000000092348, + "annotator": 66705 + } + ], + "categories": [ + { + "supercategory": "person", + "id": 1, + "name": "person", + "keypoints": [ + "nose", + "left_eye", + "right_eye", + "left_ear", + "right_ear", + "left_shoulder", + "right_shoulder", + "left_elbow", + "right_elbow", + "left_wrist", + "right_wrist", + "left_hip", + "right_hip", + "left_knee", + "right_knee", + "left_ankle", + "right_ankle", + "left_finger", + "right_finger", + "left_toe", + "right_toe" + ], + "skeleton": [ + [ + 20, + 16 + ], + [ + 16, + 14 + ], + [ + 14, + 12 + ], + [ + 21, + 17 + ], + [ + 17, + 15 + ], + [ + 15, + 13 + ], + [ + 12, + 13 + ], + [ + 6, + 12 + ], + [ + 7, + 13 + ], + [ + 6, + 7 + ], + [ + 6, + 8 + ], + [ + 7, + 9 + ], + [ + 10, + 18 + ], + [ + 8, + 10 + ], + [ + 11, + 19 + ], + [ + 9, + 11 + ], + [ + 2, + 3 + ], + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 4 + ], + [ + 3, + 5 + ], + [ + 4, + 6 + ], + [ + 5, + 7 + ] + ] + } + ] } \ No newline at end of file diff --git a/tests/data/humanart/test_humanart_det_AP_H_56.json b/tests/data/humanart/test_humanart_det_AP_H_56.json index 753caa0c07..8e1a2c24c7 100644 --- a/tests/data/humanart/test_humanart_det_AP_H_56.json +++ b/tests/data/humanart/test_humanart_det_AP_H_56.json @@ -1,145 +1,145 @@ -[ - { - "bbox": [ - 411.55450439453125, - 773.5175170898438, - 925.8963623046875, - 1736.38623046875 - ], - "category_id": 1, - "image_id": 2000000001648, - "score": 0.9018925428390503 - }, - { - "bbox": [ - 23.97265625, - 19.622175216674805, - 1121.828369140625, - 1269.2109375 - ], - "category_id": 1, - "image_id": 2000000001648, - "score": 0.4558742344379425 - }, - { - "bbox": [ - 82.678466796875, - 475.8934020996094, - 1093.4742431640625, - 1717.331298828125 - ], - "category_id": 1, - "image_id": 2000000001648, - "score": 0.37606894969940186 - }, - { - "bbox": [ - 393.59222412109375, - 125.75264739990234, - 895.0135498046875, - 1201.154296875 - ], - "category_id": 1, - "image_id": 2000000001648, - "score": 0.08204865455627441 - }, - { - "bbox": [ - 75.03559875488281, - 52.54023742675781, - 759.2489624023438, - 974.7556762695312 - ], - "category_id": 1, - "image_id": 2000000001648, - "score": 0.07333727180957794 - }, - { - "bbox": [ - 197.08047485351562, - 139.95877075195312, - 402.2601318359375, - 591.4268188476562 - ], - "category_id": 1, - "image_id": 12000000005603, - "score": 0.9604519009590149 - }, - { - "bbox": [ - 67.07928466796875, - 132.88070678710938, - 535.9130249023438, - 600.0 - ], - "category_id": 1, - "image_id": 12000000005603, - "score": 0.10827567428350449 - }, - { - "bbox": [ - 21.64974594116211, - 0.0, - 564.9321899414062, - 592.8584594726562 - ], - "category_id": 1, - "image_id": 15000000000590, - "score": 0.9986042380332947 - }, - { - "bbox": [ - 158.69786071777344, - 249.30482482910156, - 410.9751281738281, - 608.938720703125 - ], - "category_id": 1, - "image_id": 15000000000590, - "score": 0.7594972252845764 - }, - { - "bbox": [ - 184.25045776367188, - 370.5571594238281, - 361.1768493652344, - 601.1585083007812 - ], - "category_id": 1, - "image_id": 15000000000590, - "score": 0.26641231775283813 - }, - { - "bbox": [ - 129.24253845214844, - 251.26560974121094, - 552.2449951171875, - 517.3319702148438 - ], - "category_id": 1, - "image_id": 15000000000590, - "score": 0.05408962443470955 - }, - { - "bbox": [ - 168.77576, - 366.08698000000004, - 421.95972800000004, - 611.0 - ], - "category_id": 1, - "image_id": 15000000000590, - "score": 0.6465661513194214 - }, - { - "bbox": [ - 36.42278, - 0.0, - 588.0, - 611.0 - ], - "category_id": 1, - "image_id": 15000000000590, - "score": 0.844070429325392 - } +[ + { + "bbox": [ + 411.55450439453125, + 773.5175170898438, + 925.8963623046875, + 1736.38623046875 + ], + "category_id": 1, + "image_id": 2000000001648, + "score": 0.9018925428390503 + }, + { + "bbox": [ + 23.97265625, + 19.622175216674805, + 1121.828369140625, + 1269.2109375 + ], + "category_id": 1, + "image_id": 2000000001648, + "score": 0.4558742344379425 + }, + { + "bbox": [ + 82.678466796875, + 475.8934020996094, + 1093.4742431640625, + 1717.331298828125 + ], + "category_id": 1, + "image_id": 2000000001648, + "score": 0.37606894969940186 + }, + { + "bbox": [ + 393.59222412109375, + 125.75264739990234, + 895.0135498046875, + 1201.154296875 + ], + "category_id": 1, + "image_id": 2000000001648, + "score": 0.08204865455627441 + }, + { + "bbox": [ + 75.03559875488281, + 52.54023742675781, + 759.2489624023438, + 974.7556762695312 + ], + "category_id": 1, + "image_id": 2000000001648, + "score": 0.07333727180957794 + }, + { + "bbox": [ + 197.08047485351562, + 139.95877075195312, + 402.2601318359375, + 591.4268188476562 + ], + "category_id": 1, + "image_id": 12000000005603, + "score": 0.9604519009590149 + }, + { + "bbox": [ + 67.07928466796875, + 132.88070678710938, + 535.9130249023438, + 600.0 + ], + "category_id": 1, + "image_id": 12000000005603, + "score": 0.10827567428350449 + }, + { + "bbox": [ + 21.64974594116211, + 0.0, + 564.9321899414062, + 592.8584594726562 + ], + "category_id": 1, + "image_id": 15000000000590, + "score": 0.9986042380332947 + }, + { + "bbox": [ + 158.69786071777344, + 249.30482482910156, + 410.9751281738281, + 608.938720703125 + ], + "category_id": 1, + "image_id": 15000000000590, + "score": 0.7594972252845764 + }, + { + "bbox": [ + 184.25045776367188, + 370.5571594238281, + 361.1768493652344, + 601.1585083007812 + ], + "category_id": 1, + "image_id": 15000000000590, + "score": 0.26641231775283813 + }, + { + "bbox": [ + 129.24253845214844, + 251.26560974121094, + 552.2449951171875, + 517.3319702148438 + ], + "category_id": 1, + "image_id": 15000000000590, + "score": 0.05408962443470955 + }, + { + "bbox": [ + 168.77576, + 366.08698000000004, + 421.95972800000004, + 611.0 + ], + "category_id": 1, + "image_id": 15000000000590, + "score": 0.6465661513194214 + }, + { + "bbox": [ + 36.42278, + 0.0, + 588.0, + 611.0 + ], + "category_id": 1, + "image_id": 15000000000590, + "score": 0.844070429325392 + } ] \ No newline at end of file diff --git a/tests/data/interhand2.6m/test_interhand2.6m_camera.json b/tests/data/interhand2.6m/test_interhand2.6m_camera.json index fabfe886a1..cadff9da30 100644 --- a/tests/data/interhand2.6m/test_interhand2.6m_camera.json +++ b/tests/data/interhand2.6m/test_interhand2.6m_camera.json @@ -1,162 +1,162 @@ -{ - "3": { - "campos": { - "400026": [ - -415.1940002441406, - 132.24954223632812, - 59.5650749206543 - ] - }, - "camrot": { - "400026": [ - [ - 0.9201921224594116, - -0.012140202336013317, - -0.39127883315086365 - ], - [ - 0.06150508299469948, - 0.9915890097618103, - 0.11387889832258224 - ], - [ - 0.38660526275634766, - -0.1288560926914215, - 0.9131990671157837 - ] - ] - }, - "focal": { - "400026": [ - 1261.5291748046875, - 1261.6845703125 - ] - }, - "princpt": { - "400026": [ - 155.8163604736328, - 258.8305969238281 - ] - } - }, - "2": { - "campos": { - "400012": [ - 606.0524291992188, - -174.7548828125, - 163.86656188964844 - ] - }, - "camrot": { - "400012": [ - [ - 0.82091224193573, - 0.05194839835166931, - 0.5686866044998169 - ], - [ - 0.05824033170938492, - 0.9830448031425476, - -0.17387045919895172 - ], - [ - -0.5680767297744751, - 0.17585287988185883, - 0.8039680123329163 - ] - ] - }, - "focal": { - "400012": [ - 1270.7069091796875, - 1270.5194091796875 - ] - }, - "princpt": { - "400012": [ - 196.347412109375, - 240.42515563964844 - ] - } - }, - "7": { - "campos": { - "410053": [ - 973.9876098632812, - -151.85047912597656, - 576.7235107421875 - ] - }, - "camrot": { - "410053": [ - [ - 0.42785099148750305, - 0.07326933741569519, - 0.900874674320221 - ], - [ - 0.10334496945142746, - 0.9862067103385925, - -0.12929096817970276 - ], - [ - -0.8979216814041138, - 0.148418128490448, - 0.41437748074531555 - ] - ] - }, - "focal": { - "410053": [ - 1272.947021484375, - 1272.957275390625 - ] - }, - "princpt": { - "410053": [ - 187.24343872070312, - 243.6494903564453 - ] - } - }, - "4": { - "campos": { - "410028": [ - 224.87350463867188, - 144.3102569580078, - -8.186153411865234 - ] - }, - "camrot": { - "410028": [ - [ - 0.9784372448921204, - 0.024140462279319763, - 0.2051287442445755 - ], - [ - -0.048440802842378616, - 0.9922666549682617, - 0.11428194493055344 - ], - [ - -0.2007835954427719, - -0.12175431102514267, - 0.972040057182312 - ] - ] - }, - "focal": { - "410028": [ - 1274.1224365234375, - 1274.2861328125 - ] - }, - "princpt": { - "410028": [ - 270.805419921875, - 175.498046875 - ] - } - } -} +{ + "3": { + "campos": { + "400026": [ + -415.1940002441406, + 132.24954223632812, + 59.5650749206543 + ] + }, + "camrot": { + "400026": [ + [ + 0.9201921224594116, + -0.012140202336013317, + -0.39127883315086365 + ], + [ + 0.06150508299469948, + 0.9915890097618103, + 0.11387889832258224 + ], + [ + 0.38660526275634766, + -0.1288560926914215, + 0.9131990671157837 + ] + ] + }, + "focal": { + "400026": [ + 1261.5291748046875, + 1261.6845703125 + ] + }, + "princpt": { + "400026": [ + 155.8163604736328, + 258.8305969238281 + ] + } + }, + "2": { + "campos": { + "400012": [ + 606.0524291992188, + -174.7548828125, + 163.86656188964844 + ] + }, + "camrot": { + "400012": [ + [ + 0.82091224193573, + 0.05194839835166931, + 0.5686866044998169 + ], + [ + 0.05824033170938492, + 0.9830448031425476, + -0.17387045919895172 + ], + [ + -0.5680767297744751, + 0.17585287988185883, + 0.8039680123329163 + ] + ] + }, + "focal": { + "400012": [ + 1270.7069091796875, + 1270.5194091796875 + ] + }, + "princpt": { + "400012": [ + 196.347412109375, + 240.42515563964844 + ] + } + }, + "7": { + "campos": { + "410053": [ + 973.9876098632812, + -151.85047912597656, + 576.7235107421875 + ] + }, + "camrot": { + "410053": [ + [ + 0.42785099148750305, + 0.07326933741569519, + 0.900874674320221 + ], + [ + 0.10334496945142746, + 0.9862067103385925, + -0.12929096817970276 + ], + [ + -0.8979216814041138, + 0.148418128490448, + 0.41437748074531555 + ] + ] + }, + "focal": { + "410053": [ + 1272.947021484375, + 1272.957275390625 + ] + }, + "princpt": { + "410053": [ + 187.24343872070312, + 243.6494903564453 + ] + } + }, + "4": { + "campos": { + "410028": [ + 224.87350463867188, + 144.3102569580078, + -8.186153411865234 + ] + }, + "camrot": { + "410028": [ + [ + 0.9784372448921204, + 0.024140462279319763, + 0.2051287442445755 + ], + [ + -0.048440802842378616, + 0.9922666549682617, + 0.11428194493055344 + ], + [ + -0.2007835954427719, + -0.12175431102514267, + 0.972040057182312 + ] + ] + }, + "focal": { + "410028": [ + 1274.1224365234375, + 1274.2861328125 + ] + }, + "princpt": { + "410028": [ + 270.805419921875, + 175.498046875 + ] + } + } +} diff --git a/tests/data/interhand2.6m/test_interhand2.6m_data.json b/tests/data/interhand2.6m/test_interhand2.6m_data.json index 723af749ec..a5e1bd61bf 100644 --- a/tests/data/interhand2.6m/test_interhand2.6m_data.json +++ b/tests/data/interhand2.6m/test_interhand2.6m_data.json @@ -1,610 +1,610 @@ -{ - "images": [ - { - "id": 326750, - "file_name": "image69148.jpg", - "width": 334, - "height": 512, - "capture": 3, - "subject": 3, - "seq_name": "0390_dh_touchROM", - "camera": "400026", - "frame_idx": 69148 - }, - { - "id": 286291, - "file_name": "image44669.jpg", - "width": 334, - "height": 512, - "capture": 2, - "subject": 2, - "seq_name": "0266_dh_pray", - "camera": "400012", - "frame_idx": 44669 - }, - { - "id": 680801, - "file_name": "image29590.jpg", - "width": 334, - "height": 512, - "capture": 7, - "subject": 6, - "seq_name": "0115_rocker_backside", - "camera": "410053", - "frame_idx": 29590 - }, - { - "id": 471953, - "file_name": "image2017.jpg", - "width": 512, - "height": 334, - "capture": 4, - "subject": 0, - "seq_name": "0007_thumbup_normal", - "camera": "410028", - "frame_idx": 2017 - } - ], - "annotations": [ - { - "id": 326750, - "image_id": 326750, - "bbox": [ - 33.56839370727539, - 164.92373657226562, - 185.057861328125, - 142.7256622314453 - ], - "joint_valid": [ - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ] - ], - "hand_type": "interacting", - "hand_type_valid": 1 - }, - { - "id": 286291, - "image_id": 286291, - "bbox": [ - 116.43374633789062, - 79.66770935058594, - 163.1707763671875, - 175.00582885742188 - ], - "joint_valid": [ - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ] - ], - "hand_type": "interacting", - "hand_type_valid": 1 - }, - { - "id": 680801, - "image_id": 680801, - "bbox": [ - 32.624629974365234, - 116.9090805053711, - 182.95919799804688, - 117.79376983642578 - ], - "joint_valid": [ - [ - 0 - ], - [ - 0 - ], - [ - 0 - ], - [ - 0 - ], - [ - 0 - ], - [ - 0 - ], - [ - 0 - ], - [ - 0 - ], - [ - 0 - ], - [ - 0 - ], - [ - 0 - ], - [ - 0 - ], - [ - 0 - ], - [ - 0 - ], - [ - 0 - ], - [ - 0 - ], - [ - 0 - ], - [ - 0 - ], - [ - 0 - ], - [ - 0 - ], - [ - 0 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ] - ], - "hand_type": "left", - "hand_type_valid": 1 - }, - { - "id": 471953, - "image_id": 471953, - "bbox": [ - 154.45904541015625, - 27.944841384887695, - 90.6390380859375, - 184.53550720214844 - ], - "joint_valid": [ - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 1 - ], - [ - 0 - ], - [ - 0 - ], - [ - 0 - ], - [ - 0 - ], - [ - 0 - ], - [ - 0 - ], - [ - 0 - ], - [ - 0 - ], - [ - 0 - ], - [ - 0 - ], - [ - 0 - ], - [ - 0 - ], - [ - 0 - ], - [ - 0 - ], - [ - 0 - ], - [ - 0 - ], - [ - 0 - ], - [ - 0 - ], - [ - 0 - ], - [ - 0 - ], - [ - 0 - ] - ], - "hand_type": "right", - "hand_type_valid": 1 - } - ] -} +{ + "images": [ + { + "id": 326750, + "file_name": "image69148.jpg", + "width": 334, + "height": 512, + "capture": 3, + "subject": 3, + "seq_name": "0390_dh_touchROM", + "camera": "400026", + "frame_idx": 69148 + }, + { + "id": 286291, + "file_name": "image44669.jpg", + "width": 334, + "height": 512, + "capture": 2, + "subject": 2, + "seq_name": "0266_dh_pray", + "camera": "400012", + "frame_idx": 44669 + }, + { + "id": 680801, + "file_name": "image29590.jpg", + "width": 334, + "height": 512, + "capture": 7, + "subject": 6, + "seq_name": "0115_rocker_backside", + "camera": "410053", + "frame_idx": 29590 + }, + { + "id": 471953, + "file_name": "image2017.jpg", + "width": 512, + "height": 334, + "capture": 4, + "subject": 0, + "seq_name": "0007_thumbup_normal", + "camera": "410028", + "frame_idx": 2017 + } + ], + "annotations": [ + { + "id": 326750, + "image_id": 326750, + "bbox": [ + 33.56839370727539, + 164.92373657226562, + 185.057861328125, + 142.7256622314453 + ], + "joint_valid": [ + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ] + ], + "hand_type": "interacting", + "hand_type_valid": 1 + }, + { + "id": 286291, + "image_id": 286291, + "bbox": [ + 116.43374633789062, + 79.66770935058594, + 163.1707763671875, + 175.00582885742188 + ], + "joint_valid": [ + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ] + ], + "hand_type": "interacting", + "hand_type_valid": 1 + }, + { + "id": 680801, + "image_id": 680801, + "bbox": [ + 32.624629974365234, + 116.9090805053711, + 182.95919799804688, + 117.79376983642578 + ], + "joint_valid": [ + [ + 0 + ], + [ + 0 + ], + [ + 0 + ], + [ + 0 + ], + [ + 0 + ], + [ + 0 + ], + [ + 0 + ], + [ + 0 + ], + [ + 0 + ], + [ + 0 + ], + [ + 0 + ], + [ + 0 + ], + [ + 0 + ], + [ + 0 + ], + [ + 0 + ], + [ + 0 + ], + [ + 0 + ], + [ + 0 + ], + [ + 0 + ], + [ + 0 + ], + [ + 0 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ] + ], + "hand_type": "left", + "hand_type_valid": 1 + }, + { + "id": 471953, + "image_id": 471953, + "bbox": [ + 154.45904541015625, + 27.944841384887695, + 90.6390380859375, + 184.53550720214844 + ], + "joint_valid": [ + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 1 + ], + [ + 0 + ], + [ + 0 + ], + [ + 0 + ], + [ + 0 + ], + [ + 0 + ], + [ + 0 + ], + [ + 0 + ], + [ + 0 + ], + [ + 0 + ], + [ + 0 + ], + [ + 0 + ], + [ + 0 + ], + [ + 0 + ], + [ + 0 + ], + [ + 0 + ], + [ + 0 + ], + [ + 0 + ], + [ + 0 + ], + [ + 0 + ], + [ + 0 + ], + [ + 0 + ] + ], + "hand_type": "right", + "hand_type_valid": 1 + } + ] +} diff --git a/tests/data/interhand2.6m/test_interhand2.6m_joint_3d.json b/tests/data/interhand2.6m/test_interhand2.6m_joint_3d.json index c7df48ec6d..171a5dc511 100644 --- a/tests/data/interhand2.6m/test_interhand2.6m_joint_3d.json +++ b/tests/data/interhand2.6m/test_interhand2.6m_joint_3d.json @@ -1,1386 +1,1386 @@ -{ - "3": { - "69148": { - "world_coord": [ - [ - 43.03519821166992, - -82.2948989868164, - 1090.739990234375 - ], - [ - 15.719200134277344, - -80.0010986328125, - 1093.1600341796875 - ], - [ - -14.644499778747559, - -69.28589630126953, - 1099.43994140625 - ], - [ - -49.500701904296875, - -48.752601623535156, - 1105.739990234375 - ], - [ - 54.10329818725586, - -52.207000732421875, - 1071.3699951171875 - ], - [ - 44.71070098876953, - -60.90570068359375, - 1053.8299560546875 - ], - [ - 23.355300903320312, - -66.95800018310547, - 1042.2099609375 - ], - [ - -16.109899520874023, - -59.50210189819336, - 1054.7900390625 - ], - [ - 46.1421012878418, - -27.448999404907227, - 1044.06005859375 - ], - [ - 37.125099182128906, - -28.523000717163086, - 1025.0699462890625 - ], - [ - 11.805299758911133, - -33.19449996948242, - 1015.8599853515625 - ], - [ - -16.28969955444336, - -35.808101654052734, - 1042.489990234375 - ], - [ - 38.874000549316406, - -5.6127800941467285, - 1047.969970703125 - ], - [ - 29.25860023498535, - 1.5931299924850464, - 1033.010009765625 - ], - [ - 5.099699974060059, - 0.5625370144844055, - 1025.989990234375 - ], - [ - -19.3031005859375, - -10.963600158691406, - 1047.75 - ], - [ - 32.95539855957031, - 15.77239990234375, - 1067.75 - ], - [ - 20.76289939880371, - 22.153799057006836, - 1056.8699951171875 - ], - [ - 1.3557000160217285, - 20.561199188232422, - 1053.72998046875 - ], - [ - -22.658199310302734, - 9.142279624938965, - 1060.719970703125 - ], - [ - -73.1697006225586, - -24.75469970703125, - 1109.239990234375 - ], - [ - -25.849300384521484, - -76.86360168457031, - 1051.6700439453125 - ], - [ - -3.0323801040649414, - -76.0531997680664, - 1065.52001953125 - ], - [ - 23.313899993896484, - -64.78929901123047, - 1083.3800048828125 - ], - [ - 52.25170135498047, - -44.1338005065918, - 1104.050048828125 - ], - [ - -33.858699798583984, - -56.21229934692383, - 1052.5 - ], - [ - -22.00670051574707, - -54.78179931640625, - 1034.52001953125 - ], - [ - -1.2521899938583374, - -52.484100341796875, - 1021.3099975585938 - ], - [ - 34.637001037597656, - -44.75859832763672, - 1045.5 - ], - [ - -38.37810134887695, - -28.615999221801758, - 1048.77001953125 - ], - [ - -27.590499877929688, - -21.247900009155273, - 1031.81005859375 - ], - [ - -2.5142500400543213, - -15.51039981842041, - 1018.5399780273438 - ], - [ - 27.976900100708008, - -17.880300521850586, - 1042.260009765625 - ], - [ - -42.81999969482422, - -7.296229839324951, - 1054.06005859375 - ], - [ - -29.135400772094727, - 4.149099826812744, - 1043.780029296875 - ], - [ - -3.725130081176758, - 12.038700103759766, - 1038.300048828125 - ], - [ - 22.907699584960938, - 3.807229995727539, - 1054.469970703125 - ], - [ - -37.29899978637695, - 14.395500183105469, - 1082.6500244140625 - ], - [ - -24.36440086364746, - 21.874000549316406, - 1073.199951171875 - ], - [ - -4.3188300132751465, - 23.260000228881836, - 1067.699951171875 - ], - [ - 19.15329933166504, - 15.103500366210938, - 1070.9300537109375 - ], - [ - 68.70819854736328, - -17.395599365234375, - 1113.5 - ] - ], - "joint_valid": [ - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ] - ], - "hand_type": "interacting", - "hand_type_valid": true - } - }, - "2": { - "44669": { - "world_coord": [ - [ - 5.577770233154297, - -108.26300048828125, - 1036.1800537109375 - ], - [ - -1.7330399751663208, - -83.05719757080078, - 1046.1300048828125 - ], - [ - -9.004420280456543, - -57.55229949951172, - 1056.969970703125 - ], - [ - -15.412199974060059, - -23.791000366210938, - 1063.6199951171875 - ], - [ - 14.21619987487793, - -101.65699768066406, - 939.3079833984375 - ], - [ - 8.966190338134766, - -89.8812026977539, - 958.7620239257812 - ], - [ - 2.066649913787842, - -76.95020294189453, - 978.1170043945312 - ], - [ - -10.0802001953125, - -54.769500732421875, - 1008.3200073242188 - ], - [ - 15.2121000289917, - -91.22869873046875, - 921.1090087890625 - ], - [ - 9.925020217895508, - -78.84239959716797, - 940.4929809570312 - ], - [ - 0.7208520174026489, - -62.57080078125, - 963.5479736328125 - ], - [ - -12.486300468444824, - -35.79169845581055, - 996.291015625 - ], - [ - 15.342300415039062, - -71.01309967041016, - 920.666015625 - ], - [ - 10.613200187683105, - -56.69279861450195, - 938.3099975585938 - ], - [ - 3.1483700275421143, - -40.60240173339844, - 958.9559936523438 - ], - [ - -7.7616801261901855, - -16.659000396728516, - 990.3289794921875 - ], - [ - 9.923910140991211, - -27.469100952148438, - 926.0250244140625 - ], - [ - 7.101960182189941, - -17.535900115966797, - 944.4459838867188 - ], - [ - 3.706239938735962, - -9.478739738464355, - 961.2869873046875 - ], - [ - -3.7822699546813965, - 4.785309791564941, - 988.9990234375 - ], - [ - -38.23350143432617, - 10.85420036315918, - 1060.1099853515625 - ], - [ - 16.591100692749023, - -104.06300354003906, - 1032.6300048828125 - ], - [ - 17.85449981689453, - -79.44409942626953, - 1044.3399658203125 - ], - [ - 20.3125, - -54.15850067138672, - 1059.300048828125 - ], - [ - 23.35300064086914, - -20.347400665283203, - 1065.97998046875 - ], - [ - 28.29199981689453, - -103.08699798583984, - 941.9169921875 - ], - [ - 25.710399627685547, - -89.40409851074219, - 960.614013671875 - ], - [ - 24.782400131225586, - -75.04440307617188, - 980.3049926757812 - ], - [ - 28.035999298095703, - -51.11090087890625, - 1011.47998046875 - ], - [ - 28.736099243164062, - -94.62069702148438, - 921.4580078125 - ], - [ - 26.57539939880371, - -79.62640380859375, - 940.0040283203125 - ], - [ - 27.174400329589844, - -61.3489990234375, - 963.3049926757812 - ], - [ - 30.206899642944336, - -34.29090118408203, - 998.177001953125 - ], - [ - 27.319000244140625, - -72.35669708251953, - 919.8040161132812 - ], - [ - 24.843399047851562, - -56.612098693847656, - 937.927001953125 - ], - [ - 24.483699798583984, - -40.11029815673828, - 958.5869750976562 - ], - [ - 26.43560028076172, - -15.020400047302246, - 993.2479858398438 - ], - [ - 21.380199432373047, - -28.9552001953125, - 928.9580078125 - ], - [ - 19.721099853515625, - -17.84980010986328, - 945.948974609375 - ], - [ - 19.108400344848633, - -9.263039588928223, - 961.9609985351562 - ], - [ - 21.694400787353516, - 6.166550159454346, - 992.6019897460938 - ], - [ - 41.08219909667969, - 14.29419994354248, - 1066.219970703125 - ] - ], - "joint_valid": [ - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ] - ], - "hand_type": "interacting", - "hand_type_valid": true - } - }, - "7": { - "29590": { - "world_coord": [ - [ - 1.0, - 1.0, - 1.0 - ], - [ - 1.0, - 1.0, - 1.0 - ], - [ - 1.0, - 1.0, - 1.0 - ], - [ - 1.0, - 1.0, - 1.0 - ], - [ - 1.0, - 1.0, - 1.0 - ], - [ - 1.0, - 1.0, - 1.0 - ], - [ - 1.0, - 1.0, - 1.0 - ], - [ - 1.0, - 1.0, - 1.0 - ], - [ - 1.0, - 1.0, - 1.0 - ], - [ - 1.0, - 1.0, - 1.0 - ], - [ - 1.0, - 1.0, - 1.0 - ], - [ - 1.0, - 1.0, - 1.0 - ], - [ - 1.0, - 1.0, - 1.0 - ], - [ - 1.0, - 1.0, - 1.0 - ], - [ - 1.0, - 1.0, - 1.0 - ], - [ - 1.0, - 1.0, - 1.0 - ], - [ - 1.0, - 1.0, - 1.0 - ], - [ - 1.0, - 1.0, - 1.0 - ], - [ - 1.0, - 1.0, - 1.0 - ], - [ - 1.0, - 1.0, - 1.0 - ], - [ - 1.0, - 1.0, - 1.0 - ], - [ - -40.71049880981445, - -95.89289855957031, - 957.885986328125 - ], - [ - -26.640199661254883, - -90.76909637451172, - 980.64697265625 - ], - [ - -13.102499961853027, - -84.02850341796875, - 1006.030029296875 - ], - [ - -6.498330116271973, - -56.183799743652344, - 1019.77001953125 - ], - [ - -41.832698822021484, - -92.818603515625, - 924.9089965820312 - ], - [ - -22.394100189208984, - -89.95939636230469, - 928.322998046875 - ], - [ - -1.334820032119751, - -85.85769653320312, - 937.4730224609375 - ], - [ - 16.722900390625, - -76.81639862060547, - 969.6079711914062 - ], - [ - -50.71979904174805, - -79.05770111083984, - 930.426025390625 - ], - [ - -30.792600631713867, - -72.3136978149414, - 923.927978515625 - ], - [ - -3.884079933166504, - -64.59410095214844, - 926.760986328125 - ], - [ - 17.58009910583496, - -55.07429885864258, - 961.2780151367188 - ], - [ - -49.63779830932617, - -60.04690170288086, - 929.5150146484375 - ], - [ - -30.802799224853516, - -51.40850067138672, - 926.1209716796875 - ], - [ - -7.165579795837402, - -42.4640998840332, - 930.6539916992188 - ], - [ - 11.347599983215332, - -35.320701599121094, - 960.9920043945312 - ], - [ - -39.11000061035156, - -22.250900268554688, - 922.677978515625 - ], - [ - -25.091400146484375, - -18.327800750732422, - 932.7310180664062 - ], - [ - -11.675299644470215, - -16.21780014038086, - 943.9849853515625 - ], - [ - -1.8433400392532349, - -18.33609962463379, - 966.3060302734375 - ], - [ - 12.858099937438965, - -27.72319984436035, - 1035.3299560546875 - ] - ], - "joint_valid": [ - [ - false - ], - [ - false - ], - [ - false - ], - [ - false - ], - [ - false - ], - [ - false - ], - [ - false - ], - [ - false - ], - [ - false - ], - [ - false - ], - [ - false - ], - [ - false - ], - [ - false - ], - [ - false - ], - [ - false - ], - [ - false - ], - [ - false - ], - [ - false - ], - [ - false - ], - [ - false - ], - [ - false - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ] - ], - "hand_type": "left", - "hand_type_valid": true - } - }, - "4": { - "2017": { - "world_coord": [ - [ - -43.12799835205078, - -103.62300109863281, - 1034.6500244140625 - ], - [ - -46.652801513671875, - -77.29830169677734, - 1047.8599853515625 - ], - [ - -61.026798248291016, - -60.12670135498047, - 1071.8699951171875 - ], - [ - -65.07230377197266, - -23.870800018310547, - 1084.949951171875 - ], - [ - -21.526500701904297, - -36.08110046386719, - 1048.530029296875 - ], - [ - -23.342899322509766, - -42.05630111694336, - 1027.739990234375 - ], - [ - -40.31650161743164, - -48.307098388671875, - 1008.1400146484375 - ], - [ - -80.72380065917969, - -43.20439910888672, - 1025.510009765625 - ], - [ - -28.87649917602539, - -21.68560028076172, - 1051.6800537109375 - ], - [ - -20.812700271606445, - -22.777000427246094, - 1031.489990234375 - ], - [ - -34.154598236083984, - -25.006399154663086, - 1004.1900024414062 - ], - [ - -80.04650115966797, - -17.935100555419922, - 1016.8699951171875 - ], - [ - -29.98819923400879, - -4.726659774780273, - 1053.030029296875 - ], - [ - -20.322599411010742, - -2.968640089035034, - 1034.4000244140625 - ], - [ - -30.557600021362305, - -0.6155570149421692, - 1006.489990234375 - ], - [ - -75.96330261230469, - 6.1682000160217285, - 1017.4600219726562 - ], - [ - -36.91109848022461, - 10.100500106811523, - 1045.4200439453125 - ], - [ - -28.660600662231445, - 15.840399742126465, - 1029.7099609375 - ], - [ - -37.89339828491211, - 24.52589988708496, - 1012.3099975585938 - ], - [ - -72.37090301513672, - 29.537099838256836, - 1021.8099975585938 - ], - [ - -93.10230255126953, - 5.9222798347473145, - 1101.989990234375 - ], - [ - 1.0, - 1.0, - 1.0 - ], - [ - 1.0, - 1.0, - 1.0 - ], - [ - 1.0, - 1.0, - 1.0 - ], - [ - 1.0, - 1.0, - 1.0 - ], - [ - 1.0, - 1.0, - 1.0 - ], - [ - 1.0, - 1.0, - 1.0 - ], - [ - 1.0, - 1.0, - 1.0 - ], - [ - 1.0, - 1.0, - 1.0 - ], - [ - 1.0, - 1.0, - 1.0 - ], - [ - 1.0, - 1.0, - 1.0 - ], - [ - 1.0, - 1.0, - 1.0 - ], - [ - 1.0, - 1.0, - 1.0 - ], - [ - -60.13209915161133, - -4.4926300048828125, - 1036.8599853515625 - ], - [ - 1.0, - 1.0, - 1.0 - ], - [ - 1.0, - 1.0, - 1.0 - ], - [ - 1.0, - 1.0, - 1.0 - ], - [ - 1.0, - 1.0, - 1.0 - ], - [ - 1.0, - 1.0, - 1.0 - ], - [ - 1.0, - 1.0, - 1.0 - ], - [ - 1.0, - 1.0, - 1.0 - ], - [ - 1.0, - 1.0, - 1.0 - ] - ], - "joint_valid": [ - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - true - ], - [ - false - ], - [ - false - ], - [ - false - ], - [ - false - ], - [ - false - ], - [ - false - ], - [ - false - ], - [ - false - ], - [ - false - ], - [ - false - ], - [ - false - ], - [ - false - ], - [ - false - ], - [ - false - ], - [ - false - ], - [ - false - ], - [ - false - ], - [ - false - ], - [ - false - ], - [ - false - ], - [ - false - ] - ], - "hand_type": "right", - "hand_type_valid": true - } - } -} +{ + "3": { + "69148": { + "world_coord": [ + [ + 43.03519821166992, + -82.2948989868164, + 1090.739990234375 + ], + [ + 15.719200134277344, + -80.0010986328125, + 1093.1600341796875 + ], + [ + -14.644499778747559, + -69.28589630126953, + 1099.43994140625 + ], + [ + -49.500701904296875, + -48.752601623535156, + 1105.739990234375 + ], + [ + 54.10329818725586, + -52.207000732421875, + 1071.3699951171875 + ], + [ + 44.71070098876953, + -60.90570068359375, + 1053.8299560546875 + ], + [ + 23.355300903320312, + -66.95800018310547, + 1042.2099609375 + ], + [ + -16.109899520874023, + -59.50210189819336, + 1054.7900390625 + ], + [ + 46.1421012878418, + -27.448999404907227, + 1044.06005859375 + ], + [ + 37.125099182128906, + -28.523000717163086, + 1025.0699462890625 + ], + [ + 11.805299758911133, + -33.19449996948242, + 1015.8599853515625 + ], + [ + -16.28969955444336, + -35.808101654052734, + 1042.489990234375 + ], + [ + 38.874000549316406, + -5.6127800941467285, + 1047.969970703125 + ], + [ + 29.25860023498535, + 1.5931299924850464, + 1033.010009765625 + ], + [ + 5.099699974060059, + 0.5625370144844055, + 1025.989990234375 + ], + [ + -19.3031005859375, + -10.963600158691406, + 1047.75 + ], + [ + 32.95539855957031, + 15.77239990234375, + 1067.75 + ], + [ + 20.76289939880371, + 22.153799057006836, + 1056.8699951171875 + ], + [ + 1.3557000160217285, + 20.561199188232422, + 1053.72998046875 + ], + [ + -22.658199310302734, + 9.142279624938965, + 1060.719970703125 + ], + [ + -73.1697006225586, + -24.75469970703125, + 1109.239990234375 + ], + [ + -25.849300384521484, + -76.86360168457031, + 1051.6700439453125 + ], + [ + -3.0323801040649414, + -76.0531997680664, + 1065.52001953125 + ], + [ + 23.313899993896484, + -64.78929901123047, + 1083.3800048828125 + ], + [ + 52.25170135498047, + -44.1338005065918, + 1104.050048828125 + ], + [ + -33.858699798583984, + -56.21229934692383, + 1052.5 + ], + [ + -22.00670051574707, + -54.78179931640625, + 1034.52001953125 + ], + [ + -1.2521899938583374, + -52.484100341796875, + 1021.3099975585938 + ], + [ + 34.637001037597656, + -44.75859832763672, + 1045.5 + ], + [ + -38.37810134887695, + -28.615999221801758, + 1048.77001953125 + ], + [ + -27.590499877929688, + -21.247900009155273, + 1031.81005859375 + ], + [ + -2.5142500400543213, + -15.51039981842041, + 1018.5399780273438 + ], + [ + 27.976900100708008, + -17.880300521850586, + 1042.260009765625 + ], + [ + -42.81999969482422, + -7.296229839324951, + 1054.06005859375 + ], + [ + -29.135400772094727, + 4.149099826812744, + 1043.780029296875 + ], + [ + -3.725130081176758, + 12.038700103759766, + 1038.300048828125 + ], + [ + 22.907699584960938, + 3.807229995727539, + 1054.469970703125 + ], + [ + -37.29899978637695, + 14.395500183105469, + 1082.6500244140625 + ], + [ + -24.36440086364746, + 21.874000549316406, + 1073.199951171875 + ], + [ + -4.3188300132751465, + 23.260000228881836, + 1067.699951171875 + ], + [ + 19.15329933166504, + 15.103500366210938, + 1070.9300537109375 + ], + [ + 68.70819854736328, + -17.395599365234375, + 1113.5 + ] + ], + "joint_valid": [ + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ] + ], + "hand_type": "interacting", + "hand_type_valid": true + } + }, + "2": { + "44669": { + "world_coord": [ + [ + 5.577770233154297, + -108.26300048828125, + 1036.1800537109375 + ], + [ + -1.7330399751663208, + -83.05719757080078, + 1046.1300048828125 + ], + [ + -9.004420280456543, + -57.55229949951172, + 1056.969970703125 + ], + [ + -15.412199974060059, + -23.791000366210938, + 1063.6199951171875 + ], + [ + 14.21619987487793, + -101.65699768066406, + 939.3079833984375 + ], + [ + 8.966190338134766, + -89.8812026977539, + 958.7620239257812 + ], + [ + 2.066649913787842, + -76.95020294189453, + 978.1170043945312 + ], + [ + -10.0802001953125, + -54.769500732421875, + 1008.3200073242188 + ], + [ + 15.2121000289917, + -91.22869873046875, + 921.1090087890625 + ], + [ + 9.925020217895508, + -78.84239959716797, + 940.4929809570312 + ], + [ + 0.7208520174026489, + -62.57080078125, + 963.5479736328125 + ], + [ + -12.486300468444824, + -35.79169845581055, + 996.291015625 + ], + [ + 15.342300415039062, + -71.01309967041016, + 920.666015625 + ], + [ + 10.613200187683105, + -56.69279861450195, + 938.3099975585938 + ], + [ + 3.1483700275421143, + -40.60240173339844, + 958.9559936523438 + ], + [ + -7.7616801261901855, + -16.659000396728516, + 990.3289794921875 + ], + [ + 9.923910140991211, + -27.469100952148438, + 926.0250244140625 + ], + [ + 7.101960182189941, + -17.535900115966797, + 944.4459838867188 + ], + [ + 3.706239938735962, + -9.478739738464355, + 961.2869873046875 + ], + [ + -3.7822699546813965, + 4.785309791564941, + 988.9990234375 + ], + [ + -38.23350143432617, + 10.85420036315918, + 1060.1099853515625 + ], + [ + 16.591100692749023, + -104.06300354003906, + 1032.6300048828125 + ], + [ + 17.85449981689453, + -79.44409942626953, + 1044.3399658203125 + ], + [ + 20.3125, + -54.15850067138672, + 1059.300048828125 + ], + [ + 23.35300064086914, + -20.347400665283203, + 1065.97998046875 + ], + [ + 28.29199981689453, + -103.08699798583984, + 941.9169921875 + ], + [ + 25.710399627685547, + -89.40409851074219, + 960.614013671875 + ], + [ + 24.782400131225586, + -75.04440307617188, + 980.3049926757812 + ], + [ + 28.035999298095703, + -51.11090087890625, + 1011.47998046875 + ], + [ + 28.736099243164062, + -94.62069702148438, + 921.4580078125 + ], + [ + 26.57539939880371, + -79.62640380859375, + 940.0040283203125 + ], + [ + 27.174400329589844, + -61.3489990234375, + 963.3049926757812 + ], + [ + 30.206899642944336, + -34.29090118408203, + 998.177001953125 + ], + [ + 27.319000244140625, + -72.35669708251953, + 919.8040161132812 + ], + [ + 24.843399047851562, + -56.612098693847656, + 937.927001953125 + ], + [ + 24.483699798583984, + -40.11029815673828, + 958.5869750976562 + ], + [ + 26.43560028076172, + -15.020400047302246, + 993.2479858398438 + ], + [ + 21.380199432373047, + -28.9552001953125, + 928.9580078125 + ], + [ + 19.721099853515625, + -17.84980010986328, + 945.948974609375 + ], + [ + 19.108400344848633, + -9.263039588928223, + 961.9609985351562 + ], + [ + 21.694400787353516, + 6.166550159454346, + 992.6019897460938 + ], + [ + 41.08219909667969, + 14.29419994354248, + 1066.219970703125 + ] + ], + "joint_valid": [ + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ] + ], + "hand_type": "interacting", + "hand_type_valid": true + } + }, + "7": { + "29590": { + "world_coord": [ + [ + 1.0, + 1.0, + 1.0 + ], + [ + 1.0, + 1.0, + 1.0 + ], + [ + 1.0, + 1.0, + 1.0 + ], + [ + 1.0, + 1.0, + 1.0 + ], + [ + 1.0, + 1.0, + 1.0 + ], + [ + 1.0, + 1.0, + 1.0 + ], + [ + 1.0, + 1.0, + 1.0 + ], + [ + 1.0, + 1.0, + 1.0 + ], + [ + 1.0, + 1.0, + 1.0 + ], + [ + 1.0, + 1.0, + 1.0 + ], + [ + 1.0, + 1.0, + 1.0 + ], + [ + 1.0, + 1.0, + 1.0 + ], + [ + 1.0, + 1.0, + 1.0 + ], + [ + 1.0, + 1.0, + 1.0 + ], + [ + 1.0, + 1.0, + 1.0 + ], + [ + 1.0, + 1.0, + 1.0 + ], + [ + 1.0, + 1.0, + 1.0 + ], + [ + 1.0, + 1.0, + 1.0 + ], + [ + 1.0, + 1.0, + 1.0 + ], + [ + 1.0, + 1.0, + 1.0 + ], + [ + 1.0, + 1.0, + 1.0 + ], + [ + -40.71049880981445, + -95.89289855957031, + 957.885986328125 + ], + [ + -26.640199661254883, + -90.76909637451172, + 980.64697265625 + ], + [ + -13.102499961853027, + -84.02850341796875, + 1006.030029296875 + ], + [ + -6.498330116271973, + -56.183799743652344, + 1019.77001953125 + ], + [ + -41.832698822021484, + -92.818603515625, + 924.9089965820312 + ], + [ + -22.394100189208984, + -89.95939636230469, + 928.322998046875 + ], + [ + -1.334820032119751, + -85.85769653320312, + 937.4730224609375 + ], + [ + 16.722900390625, + -76.81639862060547, + 969.6079711914062 + ], + [ + -50.71979904174805, + -79.05770111083984, + 930.426025390625 + ], + [ + -30.792600631713867, + -72.3136978149414, + 923.927978515625 + ], + [ + -3.884079933166504, + -64.59410095214844, + 926.760986328125 + ], + [ + 17.58009910583496, + -55.07429885864258, + 961.2780151367188 + ], + [ + -49.63779830932617, + -60.04690170288086, + 929.5150146484375 + ], + [ + -30.802799224853516, + -51.40850067138672, + 926.1209716796875 + ], + [ + -7.165579795837402, + -42.4640998840332, + 930.6539916992188 + ], + [ + 11.347599983215332, + -35.320701599121094, + 960.9920043945312 + ], + [ + -39.11000061035156, + -22.250900268554688, + 922.677978515625 + ], + [ + -25.091400146484375, + -18.327800750732422, + 932.7310180664062 + ], + [ + -11.675299644470215, + -16.21780014038086, + 943.9849853515625 + ], + [ + -1.8433400392532349, + -18.33609962463379, + 966.3060302734375 + ], + [ + 12.858099937438965, + -27.72319984436035, + 1035.3299560546875 + ] + ], + "joint_valid": [ + [ + false + ], + [ + false + ], + [ + false + ], + [ + false + ], + [ + false + ], + [ + false + ], + [ + false + ], + [ + false + ], + [ + false + ], + [ + false + ], + [ + false + ], + [ + false + ], + [ + false + ], + [ + false + ], + [ + false + ], + [ + false + ], + [ + false + ], + [ + false + ], + [ + false + ], + [ + false + ], + [ + false + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ] + ], + "hand_type": "left", + "hand_type_valid": true + } + }, + "4": { + "2017": { + "world_coord": [ + [ + -43.12799835205078, + -103.62300109863281, + 1034.6500244140625 + ], + [ + -46.652801513671875, + -77.29830169677734, + 1047.8599853515625 + ], + [ + -61.026798248291016, + -60.12670135498047, + 1071.8699951171875 + ], + [ + -65.07230377197266, + -23.870800018310547, + 1084.949951171875 + ], + [ + -21.526500701904297, + -36.08110046386719, + 1048.530029296875 + ], + [ + -23.342899322509766, + -42.05630111694336, + 1027.739990234375 + ], + [ + -40.31650161743164, + -48.307098388671875, + 1008.1400146484375 + ], + [ + -80.72380065917969, + -43.20439910888672, + 1025.510009765625 + ], + [ + -28.87649917602539, + -21.68560028076172, + 1051.6800537109375 + ], + [ + -20.812700271606445, + -22.777000427246094, + 1031.489990234375 + ], + [ + -34.154598236083984, + -25.006399154663086, + 1004.1900024414062 + ], + [ + -80.04650115966797, + -17.935100555419922, + 1016.8699951171875 + ], + [ + -29.98819923400879, + -4.726659774780273, + 1053.030029296875 + ], + [ + -20.322599411010742, + -2.968640089035034, + 1034.4000244140625 + ], + [ + -30.557600021362305, + -0.6155570149421692, + 1006.489990234375 + ], + [ + -75.96330261230469, + 6.1682000160217285, + 1017.4600219726562 + ], + [ + -36.91109848022461, + 10.100500106811523, + 1045.4200439453125 + ], + [ + -28.660600662231445, + 15.840399742126465, + 1029.7099609375 + ], + [ + -37.89339828491211, + 24.52589988708496, + 1012.3099975585938 + ], + [ + -72.37090301513672, + 29.537099838256836, + 1021.8099975585938 + ], + [ + -93.10230255126953, + 5.9222798347473145, + 1101.989990234375 + ], + [ + 1.0, + 1.0, + 1.0 + ], + [ + 1.0, + 1.0, + 1.0 + ], + [ + 1.0, + 1.0, + 1.0 + ], + [ + 1.0, + 1.0, + 1.0 + ], + [ + 1.0, + 1.0, + 1.0 + ], + [ + 1.0, + 1.0, + 1.0 + ], + [ + 1.0, + 1.0, + 1.0 + ], + [ + 1.0, + 1.0, + 1.0 + ], + [ + 1.0, + 1.0, + 1.0 + ], + [ + 1.0, + 1.0, + 1.0 + ], + [ + 1.0, + 1.0, + 1.0 + ], + [ + 1.0, + 1.0, + 1.0 + ], + [ + -60.13209915161133, + -4.4926300048828125, + 1036.8599853515625 + ], + [ + 1.0, + 1.0, + 1.0 + ], + [ + 1.0, + 1.0, + 1.0 + ], + [ + 1.0, + 1.0, + 1.0 + ], + [ + 1.0, + 1.0, + 1.0 + ], + [ + 1.0, + 1.0, + 1.0 + ], + [ + 1.0, + 1.0, + 1.0 + ], + [ + 1.0, + 1.0, + 1.0 + ], + [ + 1.0, + 1.0, + 1.0 + ] + ], + "joint_valid": [ + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + true + ], + [ + false + ], + [ + false + ], + [ + false + ], + [ + false + ], + [ + false + ], + [ + false + ], + [ + false + ], + [ + false + ], + [ + false + ], + [ + false + ], + [ + false + ], + [ + false + ], + [ + false + ], + [ + false + ], + [ + false + ], + [ + false + ], + [ + false + ], + [ + false + ], + [ + false + ], + [ + false + ], + [ + false + ] + ], + "hand_type": "right", + "hand_type_valid": true + } + } +} diff --git a/tests/data/jhmdb/test_jhmdb_sub1.json b/tests/data/jhmdb/test_jhmdb_sub1.json index 3c9d8daa61..10bb283dba 100644 --- a/tests/data/jhmdb/test_jhmdb_sub1.json +++ b/tests/data/jhmdb/test_jhmdb_sub1.json @@ -1,298 +1,298 @@ -{ - "categories": [ - { - "supercategory": "person", - "id": 1, - "name": "person", - "keypoints": [ - "neck", - "belly", - "head", - "right_shoulder", - "left_shoulder", - "right_hip", - "left_hip", - "right_elbow", - "left_elbow", - "right_knee", - "left_knee", - "right_wrist", - "left_wrist", - "right_ankle", - "left_ankle" - ], - "skeleton": [ - [ - 1, - 3 - ], - [ - 1, - 4 - ], - [ - 1, - 5 - ], - [ - 1, - 2 - ], - [ - 4, - 8 - ], - [ - 8, - 12 - ], - [ - 5, - 9 - ], - [ - 9, - 13 - ], - [ - 2, - 6 - ], - [ - 2, - 7 - ], - [ - 6, - 10 - ], - [ - 10, - 14 - ], - [ - 7, - 11 - ], - [ - 11, - 15 - ] - ] - } - ], - "images": [ - { - "is_labeled": true, - "file_name": "Frisbee_catch_f_cm_np1_ri_med_0/00001.png", - "nframes": 37, - "frame_id": 2280001, - "vid_id": "00228", - "id": 2280001, - "width": 320, - "height": 240 - }, - { - "is_labeled": true, - "file_name": "Frisbee_catch_f_cm_np1_ri_med_1/00001.png", - "nframes": 40, - "frame_id": 2290001, - "vid_id": "00229", - "id": 2290001, - "width": 320, - "height": 240 - }, - { - "is_labeled": true, - "file_name": "Goalkeeper_Training_Day_@_7_catch_f_cm_np1_ri_med_0/00001.png", - "nframes": 30, - "frame_id": 2300001, - "vid_id": "00230", - "id": 2300001, - "width": 320, - "height": 240 - } - ], - "annotations": [ - { - "keypoints": [ - 98.851746, - 92.59851, - 2.0, - 101.382222, - 133.488694, - 2.0, - 100.914365, - 79.770933, - 2.0, - 86.888258, - 101.976452, - 2.0, - 107.314272, - 103.37138, - 2.0, - 96.914279, - 145.028519, - 2.0, - 106.514281, - 141.828552, - 2.0, - 91.779302, - 90.131713, - 2.0, - 111.71446, - 119.029127, - 2.0, - 101.371546, - 177.429379, - 2.0, - 113.428535, - 169.257124, - 2.0, - 90.261035, - 71.796419, - 2.0, - 125.372119, - 117.142762, - 2.0, - 96.68488, - 206.18226, - 2.0, - 87.838304, - 191.933582, - 2.0 - ], - "track_id": 0, - "image_id": 2280001, - "bbox": [ - 79.0, - 62.0, - 54.0, - 159.0 - ], - "scores": [], - "category_id": 1, - "id": 1000002280001 - }, - { - "keypoints": [ - 126.293586, - 86.516958, - 2.0, - 125.127052, - 119.880592, - 2.0, - 128.800121, - 77.713852, - 2.0, - 123.142858, - 93.771388, - 2.0, - 127.599998, - 93.314242, - 2.0, - 125.314285, - 126.685723, - 2.0, - 125.257142, - 128.685637, - 2.0, - 122.857184, - 111.302686, - 2.0, - 128.40003, - 107.885918, - 2.0, - 122.228575, - 148.91426, - 2.0, - 125.600109, - 150.403006, - 2.0, - 141.391708, - 106.511998, - 2.0, - 141.254766, - 105.486158, - 2.0, - 119.657303, - 169.255877, - 2.0, - 127.656398, - 173.757251, - 2.0 - ], - "track_id": 0, - "image_id": 2290001, - "bbox": [ - 114.0, - 68.0, - 38.0, - 115.0 - ], - "scores": [], - "category_id": 1, - "id": 1000002290001 - }, - { - "keypoints": [ - 104.590181, - 138.44876, - 2.0, - 105.733877, - 165.843418, - 2.0, - 104.400092, - 130.809558, - 2.0, - 113.714288, - 142.914297, - 2.0, - 97.999996, - 146.228585, - 2.0, - 110.914299, - 173.028594, - 2.0, - 102.628562, - 171.028599, - 2.0, - 116.687889, - 156.478492, - 2.0, - 94.455572, - 157.210571, - 2.0, - 121.257055, - 190.342707, - 2.0, - 95.200265, - 191.484992, - 2.0, - 120.571144, - 170.45612, - 2.0, - 93.885656, - 169.029784, - 2.0, - 128.177332, - 206.720448, - 2.0, - 90.000104, - 209.256786, - 2.0 - ], - "track_id": 0, - "image_id": 2300001, - "bbox": [ - 84.0, - 123.0, - 51.0, - 94.0 - ], - "scores": [], - "category_id": 1, - "id": 1000002300001 - } - ] -} +{ + "categories": [ + { + "supercategory": "person", + "id": 1, + "name": "person", + "keypoints": [ + "neck", + "belly", + "head", + "right_shoulder", + "left_shoulder", + "right_hip", + "left_hip", + "right_elbow", + "left_elbow", + "right_knee", + "left_knee", + "right_wrist", + "left_wrist", + "right_ankle", + "left_ankle" + ], + "skeleton": [ + [ + 1, + 3 + ], + [ + 1, + 4 + ], + [ + 1, + 5 + ], + [ + 1, + 2 + ], + [ + 4, + 8 + ], + [ + 8, + 12 + ], + [ + 5, + 9 + ], + [ + 9, + 13 + ], + [ + 2, + 6 + ], + [ + 2, + 7 + ], + [ + 6, + 10 + ], + [ + 10, + 14 + ], + [ + 7, + 11 + ], + [ + 11, + 15 + ] + ] + } + ], + "images": [ + { + "is_labeled": true, + "file_name": "Frisbee_catch_f_cm_np1_ri_med_0/00001.png", + "nframes": 37, + "frame_id": 2280001, + "vid_id": "00228", + "id": 2280001, + "width": 320, + "height": 240 + }, + { + "is_labeled": true, + "file_name": "Frisbee_catch_f_cm_np1_ri_med_1/00001.png", + "nframes": 40, + "frame_id": 2290001, + "vid_id": "00229", + "id": 2290001, + "width": 320, + "height": 240 + }, + { + "is_labeled": true, + "file_name": "Goalkeeper_Training_Day_@_7_catch_f_cm_np1_ri_med_0/00001.png", + "nframes": 30, + "frame_id": 2300001, + "vid_id": "00230", + "id": 2300001, + "width": 320, + "height": 240 + } + ], + "annotations": [ + { + "keypoints": [ + 98.851746, + 92.59851, + 2.0, + 101.382222, + 133.488694, + 2.0, + 100.914365, + 79.770933, + 2.0, + 86.888258, + 101.976452, + 2.0, + 107.314272, + 103.37138, + 2.0, + 96.914279, + 145.028519, + 2.0, + 106.514281, + 141.828552, + 2.0, + 91.779302, + 90.131713, + 2.0, + 111.71446, + 119.029127, + 2.0, + 101.371546, + 177.429379, + 2.0, + 113.428535, + 169.257124, + 2.0, + 90.261035, + 71.796419, + 2.0, + 125.372119, + 117.142762, + 2.0, + 96.68488, + 206.18226, + 2.0, + 87.838304, + 191.933582, + 2.0 + ], + "track_id": 0, + "image_id": 2280001, + "bbox": [ + 79.0, + 62.0, + 54.0, + 159.0 + ], + "scores": [], + "category_id": 1, + "id": 1000002280001 + }, + { + "keypoints": [ + 126.293586, + 86.516958, + 2.0, + 125.127052, + 119.880592, + 2.0, + 128.800121, + 77.713852, + 2.0, + 123.142858, + 93.771388, + 2.0, + 127.599998, + 93.314242, + 2.0, + 125.314285, + 126.685723, + 2.0, + 125.257142, + 128.685637, + 2.0, + 122.857184, + 111.302686, + 2.0, + 128.40003, + 107.885918, + 2.0, + 122.228575, + 148.91426, + 2.0, + 125.600109, + 150.403006, + 2.0, + 141.391708, + 106.511998, + 2.0, + 141.254766, + 105.486158, + 2.0, + 119.657303, + 169.255877, + 2.0, + 127.656398, + 173.757251, + 2.0 + ], + "track_id": 0, + "image_id": 2290001, + "bbox": [ + 114.0, + 68.0, + 38.0, + 115.0 + ], + "scores": [], + "category_id": 1, + "id": 1000002290001 + }, + { + "keypoints": [ + 104.590181, + 138.44876, + 2.0, + 105.733877, + 165.843418, + 2.0, + 104.400092, + 130.809558, + 2.0, + 113.714288, + 142.914297, + 2.0, + 97.999996, + 146.228585, + 2.0, + 110.914299, + 173.028594, + 2.0, + 102.628562, + 171.028599, + 2.0, + 116.687889, + 156.478492, + 2.0, + 94.455572, + 157.210571, + 2.0, + 121.257055, + 190.342707, + 2.0, + 95.200265, + 191.484992, + 2.0, + 120.571144, + 170.45612, + 2.0, + 93.885656, + 169.029784, + 2.0, + 128.177332, + 206.720448, + 2.0, + 90.000104, + 209.256786, + 2.0 + ], + "track_id": 0, + "image_id": 2300001, + "bbox": [ + 84.0, + 123.0, + 51.0, + 94.0 + ], + "scores": [], + "category_id": 1, + "id": 1000002300001 + } + ] +} diff --git a/tests/data/lapa/test_lapa.json b/tests/data/lapa/test_lapa.json index 0484f08c06..cfdcd9b398 100644 --- a/tests/data/lapa/test_lapa.json +++ b/tests/data/lapa/test_lapa.json @@ -1,39 +1,39 @@ -{ - "categories": [ - { - "supercategory": "person", - "id": 1, - "name": "face", - "keypoints": [], - "skeleton": [] - } - ], - "images": [ - {"id": 40, "file_name": "10773046825_0.jpg", "height": 1494, "width": 1424}, - {"id": 41, "file_name": "13609937564_5.jpg", "height": 496, "width": 486} - ], - "annotations": [ - { - "keypoints": [ - 406.0, 644.0, 2.0, 402.0, 682.0, 2.0, 397.0, 719.0, 2.0, 391.0, 757.0, 2.0, 388.0, 795.0, 2.0, 389.0, 834.0, 2.0, 394.0, 874.0, 2.0, 402.0, 913.0, 2.0, 413.0, 952.0, 2.0, 426.0, 989.0, 2.0, 443.0, 1025.0, 2.0, 461.0, 1059.0, 2.0, 481.0, 1092.0, 2.0, 502.0, 1126.0, 2.0, 527.0, 1156.0, 2.0, 559.0, 1180.0, 2.0, 603.0, 1193.0, 2.0, 658.0, 1195.0, 2.0, 713.0, 1187.0, 2.0, 766.0, 1172.0, 2.0, 816.0, 1151.0, 2.0, 863.0, 1128.0, 2.0, 907.0, 1101.0, 2.0, 945.0, 1067.0, 2.0, 978.0, 1029.0, 2.0, 1003.0, 986.0, 2.0, 1019.0, 938.0, 2.0, 1030.0, 888.0, 2.0, 1037.0, 838.0, 2.0, 1040.0, 788.0, 2.0, 1040.0, 739.0, 2.0, 1037.0, 689.0, 2.0, 1033.0, 640.0, 2.0, 417.0, 595.0, 2.0, 445.0, 559.0, 2.0, 488.0, 548.0, 2.0, 535.0, 558.0, 2.0, 569.0, 579.0, 2.0, 562.0, 604.0, 2.0, 526.0, 588.0, 2.0, 487.0, 579.0, 2.0, 451.0, 581.0, 2.0, 662.0, 566.0, 2.0, 713.0, 545.0, 2.0, 777.0, 541.0, 2.0, 839.0, 558.0, 2.0, 887.0, 600.0, 2.0, 832.0, 581.0, 2.0, 777.0, 572.0, 2.0, 721.0, 578.0, 2.0, 669.0, 593.0, 2.0, 614.0, 654.0, 2.0, 602.0, 704.0, 2.0, 590.0, 755.0, 2.0, 577.0, 807.0, 2.0, 573.0, 678.0, 2.0, 540.0, 778.0, 2.0, 518.0, 826.0, 2.0, 538.0, 846.0, 2.0, 562.0, 855.0, 2.0, 592.0, 866.0, 2.0, 632.0, 856.0, 2.0, 668.0, 848.0, 2.0, 703.0, 827.0, 2.0, 681.0, 778.0, 2.0, 667.0, 676.0, 2.0, 447.0, 672.0, 2.0, 472.0, 662.0, 2.0, 499.0, 658.0, 2.0, 526.0, 662.0, 2.0, 550.0, 675.0, 2.0, 524.0, 674.0, 2.0, 498.0, 673.0, 2.0, 472.0, 673.0, 2.0, 501.0, 666.0, 2.0, 701.0, 673.0, 2.0, 729.0, 658.0, 2.0, 760.0, 654.0, 2.0, 792.0, 659.0, 2.0, 822.0, 671.0, 2.0, 791.0, 672.0, 2.0, 761.0, 672.0, 2.0, 731.0, 672.0, 2.0, 762.0, 663.0, 2.0, 503.0, 940.0, 2.0, 532.0, 923.0, 2.0, 575.0, 921.0, 2.0, 602.0, 927.0, 2.0, 631.0, 922.0, 2.0, 704.0, 930.0, 2.0, 775.0, 951.0, 2.0, 735.0, 1001.0, 2.0, 680.0, 1032.0, 2.0, 608.0, 1040.0, 2.0, 553.0, 1023.0, 2.0, 522.0, 987.0, 2.0, 519.0, 945.0, 2.0, 549.0, 937.0, 2.0, 604.0, 944.0, 2.0, 687.0, 942.0, 2.0, 751.0, 955.0, 2.0, 700.0, 996.0, 2.0, 609.0, 1007.0, 2.0, 546.0, 987.0, 2.0, 501.0, 666.0, 2.0, 762.0, 663.0, 2.0], - "image_id": 40, - "id": 40, - "num_keypoints": 106, - "bbox": [388.0, 541.0, 652.0, 654.0], - "iscrowd": 0, - "area": 426408, - "category_id": 1 - }, - { - "keypoints": [ - 179.0, 213.0, 2.0, 176.0, 225.0, 2.0, 173.0, 237.0, 2.0, 170.0, 249.0, 2.0, 167.0, 261.0, 2.0, 166.0, 273.0, 2.0, 165.0, 286.0, 2.0, 166.0, 299.0, 2.0, 170.0, 311.0, 2.0, 176.0, 322.0, 2.0, 184.0, 331.0, 2.0, 194.0, 340.0, 2.0, 206.0, 347.0, 2.0, 218.0, 353.0, 2.0, 231.0, 358.0, 2.0, 244.0, 362.0, 2.0, 258.0, 365.0, 2.0, 269.0, 364.0, 2.0, 278.0, 361.0, 2.0, 286.0, 355.0, 2.0, 293.0, 349.0, 2.0, 300.0, 342.0, 2.0, 306.0, 334.0, 2.0, 311.0, 326.0, 2.0, 315.0, 317.0, 2.0, 318.0, 307.0, 2.0, 321.0, 298.0, 2.0, 323.0, 288.0, 2.0, 323.0, 279.0, 2.0, 323.0, 269.0, 2.0, 322.0, 260.0, 2.0, 321.0, 251.0, 2.0, 322.0, 242.0, 2.0, 207.0, 214.0, 2.0, 220.0, 206.0, 2.0, 236.0, 204.0, 2.0, 253.0, 208.0, 2.0, 266.0, 214.0, 2.0, 263.0, 221.0, 2.0, 250.0, 216.0, 2.0, 235.0, 212.0, 2.0, 221.0, 212.0, 2.0, 293.0, 223.0, 2.0, 302.0, 221.0, 2.0, 313.0, 221.0, 2.0, 321.0, 225.0, 2.0, 325.0, 233.0, 2.0, 318.0, 230.0, 2.0, 311.0, 228.0, 2.0, 302.0, 227.0, 2.0, 293.0, 228.0, 2.0, 277.0, 234.0, 2.0, 280.0, 244.0, 2.0, 283.0, 254.0, 2.0, 285.0, 265.0, 2.0, 261.0, 238.0, 2.0, 256.0, 257.0, 2.0, 248.0, 269.0, 2.0, 256.0, 275.0, 2.0, 266.0, 278.0, 2.0, 275.0, 282.0, 2.0, 282.0, 281.0, 2.0, 288.0, 281.0, 2.0, 293.0, 277.0, 2.0, 291.0, 263.0, 2.0, 285.0, 243.0, 2.0, 220.0, 228.0, 2.0, 228.0, 224.0, 2.0, 237.0, 224.0, 2.0, 245.0, 228.0, 2.0, 251.0, 235.0, 2.0, 243.0, 234.0, 2.0, 234.0, 234.0, 2.0, 226.0, 231.0, 2.0, 232.0, 228.0, 2.0, 287.0, 242.0, 2.0, 293.0, 238.0, 2.0, 301.0, 237.0, 2.0, 307.0, 241.0, 2.0, 311.0, 246.0, 2.0, 306.0, 247.0, 2.0, 299.0, 246.0, 2.0, 293.0, 245.0, 2.0, 297.0, 241.0, 2.0, 222.0, 299.0, 2.0, 242.0, 293.0, 2.0, 263.0, 292.0, 2.0, 271.0, 295.0, 2.0, 279.0, 295.0, 2.0, 288.0, 302.0, 2.0, 292.0, 310.0, 2.0, 286.0, 318.0, 2.0, 277.0, 324.0, 2.0, 263.0, 325.0, 2.0, 246.0, 320.0, 2.0, 233.0, 310.0, 2.0, 229.0, 300.0, 2.0, 246.0, 298.0, 2.0, 269.0, 302.0, 2.0, 282.0, 305.0, 2.0, 289.0, 310.0, 2.0, 280.0, 313.0, 2.0, 265.0, 313.0, 2.0, 243.0, 307.0, 2.0, 232.0, 228.0, 2.0, 297.0, 241.0, 2.0], - "image_id": 41, - "id": 41, - "num_keypoints": 106, - "bbox": [165.0, 204.0, 160.0, 161.0], - "iscrowd": 0, - "area": 25760, - "category_id": 1 - } - ] -} +{ + "categories": [ + { + "supercategory": "person", + "id": 1, + "name": "face", + "keypoints": [], + "skeleton": [] + } + ], + "images": [ + {"id": 40, "file_name": "10773046825_0.jpg", "height": 1494, "width": 1424}, + {"id": 41, "file_name": "13609937564_5.jpg", "height": 496, "width": 486} + ], + "annotations": [ + { + "keypoints": [ + 406.0, 644.0, 2.0, 402.0, 682.0, 2.0, 397.0, 719.0, 2.0, 391.0, 757.0, 2.0, 388.0, 795.0, 2.0, 389.0, 834.0, 2.0, 394.0, 874.0, 2.0, 402.0, 913.0, 2.0, 413.0, 952.0, 2.0, 426.0, 989.0, 2.0, 443.0, 1025.0, 2.0, 461.0, 1059.0, 2.0, 481.0, 1092.0, 2.0, 502.0, 1126.0, 2.0, 527.0, 1156.0, 2.0, 559.0, 1180.0, 2.0, 603.0, 1193.0, 2.0, 658.0, 1195.0, 2.0, 713.0, 1187.0, 2.0, 766.0, 1172.0, 2.0, 816.0, 1151.0, 2.0, 863.0, 1128.0, 2.0, 907.0, 1101.0, 2.0, 945.0, 1067.0, 2.0, 978.0, 1029.0, 2.0, 1003.0, 986.0, 2.0, 1019.0, 938.0, 2.0, 1030.0, 888.0, 2.0, 1037.0, 838.0, 2.0, 1040.0, 788.0, 2.0, 1040.0, 739.0, 2.0, 1037.0, 689.0, 2.0, 1033.0, 640.0, 2.0, 417.0, 595.0, 2.0, 445.0, 559.0, 2.0, 488.0, 548.0, 2.0, 535.0, 558.0, 2.0, 569.0, 579.0, 2.0, 562.0, 604.0, 2.0, 526.0, 588.0, 2.0, 487.0, 579.0, 2.0, 451.0, 581.0, 2.0, 662.0, 566.0, 2.0, 713.0, 545.0, 2.0, 777.0, 541.0, 2.0, 839.0, 558.0, 2.0, 887.0, 600.0, 2.0, 832.0, 581.0, 2.0, 777.0, 572.0, 2.0, 721.0, 578.0, 2.0, 669.0, 593.0, 2.0, 614.0, 654.0, 2.0, 602.0, 704.0, 2.0, 590.0, 755.0, 2.0, 577.0, 807.0, 2.0, 573.0, 678.0, 2.0, 540.0, 778.0, 2.0, 518.0, 826.0, 2.0, 538.0, 846.0, 2.0, 562.0, 855.0, 2.0, 592.0, 866.0, 2.0, 632.0, 856.0, 2.0, 668.0, 848.0, 2.0, 703.0, 827.0, 2.0, 681.0, 778.0, 2.0, 667.0, 676.0, 2.0, 447.0, 672.0, 2.0, 472.0, 662.0, 2.0, 499.0, 658.0, 2.0, 526.0, 662.0, 2.0, 550.0, 675.0, 2.0, 524.0, 674.0, 2.0, 498.0, 673.0, 2.0, 472.0, 673.0, 2.0, 501.0, 666.0, 2.0, 701.0, 673.0, 2.0, 729.0, 658.0, 2.0, 760.0, 654.0, 2.0, 792.0, 659.0, 2.0, 822.0, 671.0, 2.0, 791.0, 672.0, 2.0, 761.0, 672.0, 2.0, 731.0, 672.0, 2.0, 762.0, 663.0, 2.0, 503.0, 940.0, 2.0, 532.0, 923.0, 2.0, 575.0, 921.0, 2.0, 602.0, 927.0, 2.0, 631.0, 922.0, 2.0, 704.0, 930.0, 2.0, 775.0, 951.0, 2.0, 735.0, 1001.0, 2.0, 680.0, 1032.0, 2.0, 608.0, 1040.0, 2.0, 553.0, 1023.0, 2.0, 522.0, 987.0, 2.0, 519.0, 945.0, 2.0, 549.0, 937.0, 2.0, 604.0, 944.0, 2.0, 687.0, 942.0, 2.0, 751.0, 955.0, 2.0, 700.0, 996.0, 2.0, 609.0, 1007.0, 2.0, 546.0, 987.0, 2.0, 501.0, 666.0, 2.0, 762.0, 663.0, 2.0], + "image_id": 40, + "id": 40, + "num_keypoints": 106, + "bbox": [388.0, 541.0, 652.0, 654.0], + "iscrowd": 0, + "area": 426408, + "category_id": 1 + }, + { + "keypoints": [ + 179.0, 213.0, 2.0, 176.0, 225.0, 2.0, 173.0, 237.0, 2.0, 170.0, 249.0, 2.0, 167.0, 261.0, 2.0, 166.0, 273.0, 2.0, 165.0, 286.0, 2.0, 166.0, 299.0, 2.0, 170.0, 311.0, 2.0, 176.0, 322.0, 2.0, 184.0, 331.0, 2.0, 194.0, 340.0, 2.0, 206.0, 347.0, 2.0, 218.0, 353.0, 2.0, 231.0, 358.0, 2.0, 244.0, 362.0, 2.0, 258.0, 365.0, 2.0, 269.0, 364.0, 2.0, 278.0, 361.0, 2.0, 286.0, 355.0, 2.0, 293.0, 349.0, 2.0, 300.0, 342.0, 2.0, 306.0, 334.0, 2.0, 311.0, 326.0, 2.0, 315.0, 317.0, 2.0, 318.0, 307.0, 2.0, 321.0, 298.0, 2.0, 323.0, 288.0, 2.0, 323.0, 279.0, 2.0, 323.0, 269.0, 2.0, 322.0, 260.0, 2.0, 321.0, 251.0, 2.0, 322.0, 242.0, 2.0, 207.0, 214.0, 2.0, 220.0, 206.0, 2.0, 236.0, 204.0, 2.0, 253.0, 208.0, 2.0, 266.0, 214.0, 2.0, 263.0, 221.0, 2.0, 250.0, 216.0, 2.0, 235.0, 212.0, 2.0, 221.0, 212.0, 2.0, 293.0, 223.0, 2.0, 302.0, 221.0, 2.0, 313.0, 221.0, 2.0, 321.0, 225.0, 2.0, 325.0, 233.0, 2.0, 318.0, 230.0, 2.0, 311.0, 228.0, 2.0, 302.0, 227.0, 2.0, 293.0, 228.0, 2.0, 277.0, 234.0, 2.0, 280.0, 244.0, 2.0, 283.0, 254.0, 2.0, 285.0, 265.0, 2.0, 261.0, 238.0, 2.0, 256.0, 257.0, 2.0, 248.0, 269.0, 2.0, 256.0, 275.0, 2.0, 266.0, 278.0, 2.0, 275.0, 282.0, 2.0, 282.0, 281.0, 2.0, 288.0, 281.0, 2.0, 293.0, 277.0, 2.0, 291.0, 263.0, 2.0, 285.0, 243.0, 2.0, 220.0, 228.0, 2.0, 228.0, 224.0, 2.0, 237.0, 224.0, 2.0, 245.0, 228.0, 2.0, 251.0, 235.0, 2.0, 243.0, 234.0, 2.0, 234.0, 234.0, 2.0, 226.0, 231.0, 2.0, 232.0, 228.0, 2.0, 287.0, 242.0, 2.0, 293.0, 238.0, 2.0, 301.0, 237.0, 2.0, 307.0, 241.0, 2.0, 311.0, 246.0, 2.0, 306.0, 247.0, 2.0, 299.0, 246.0, 2.0, 293.0, 245.0, 2.0, 297.0, 241.0, 2.0, 222.0, 299.0, 2.0, 242.0, 293.0, 2.0, 263.0, 292.0, 2.0, 271.0, 295.0, 2.0, 279.0, 295.0, 2.0, 288.0, 302.0, 2.0, 292.0, 310.0, 2.0, 286.0, 318.0, 2.0, 277.0, 324.0, 2.0, 263.0, 325.0, 2.0, 246.0, 320.0, 2.0, 233.0, 310.0, 2.0, 229.0, 300.0, 2.0, 246.0, 298.0, 2.0, 269.0, 302.0, 2.0, 282.0, 305.0, 2.0, 289.0, 310.0, 2.0, 280.0, 313.0, 2.0, 265.0, 313.0, 2.0, 243.0, 307.0, 2.0, 232.0, 228.0, 2.0, 297.0, 241.0, 2.0], + "image_id": 41, + "id": 41, + "num_keypoints": 106, + "bbox": [165.0, 204.0, 160.0, 161.0], + "iscrowd": 0, + "area": 25760, + "category_id": 1 + } + ] +} diff --git a/tests/data/locust/test_locust.json b/tests/data/locust/test_locust.json index fc8bbcd5db..c3a50305cf 100644 --- a/tests/data/locust/test_locust.json +++ b/tests/data/locust/test_locust.json @@ -1,410 +1,410 @@ -{ - "categories": [ - { - "supercategory": "animal", - "id": 1, - "name": "locust", - "keypoints": [ - "head", - "neck", - "thorax", - "abdomen1", - "abdomen2", - "anttipL", - "antbaseL", - "eyeL", - "forelegL1", - "forelegL2", - "forelegL3", - "forelegL4", - "midlegL1", - "midlegL2", - "midlegL3", - "midlegL4", - "hindlegL1", - "hindlegL2", - "hindlegL3", - "hindlegL4", - "anttipR", - "antbaseR", - "eyeR", - "forelegR1", - "forelegR2", - "forelegR3", - "forelegR4", - "midlegR1", - "midlegR2", - "midlegR3", - "midlegR4", - "hindlegR1", - "hindlegR2", - "hindlegR3", - "hindlegR4" - ], - "skeleton": [ - [ - 2, - 1 - ], - [ - 3, - 2 - ], - [ - 4, - 3 - ], - [ - 5, - 4 - ], - [ - 7, - 6 - ], - [ - 8, - 7 - ], - [ - 10, - 9 - ], - [ - 11, - 10 - ], - [ - 12, - 11 - ], - [ - 14, - 13 - ], - [ - 15, - 14 - ], - [ - 16, - 15 - ], - [ - 18, - 17 - ], - [ - 19, - 18 - ], - [ - 20, - 19 - ], - [ - 22, - 21 - ], - [ - 23, - 22 - ], - [ - 25, - 24 - ], - [ - 26, - 25 - ], - [ - 27, - 26 - ], - [ - 29, - 28 - ], - [ - 30, - 29 - ], - [ - 31, - 30 - ], - [ - 33, - 32 - ], - [ - 34, - 33 - ], - [ - 35, - 34 - ] - ] - } - ], - "images": [ - { - "id": 630, - "file_name": "630.jpg", - "height": 160, - "width": 160 - }, - { - "id": 650, - "file_name": "650.jpg", - "height": 160, - "width": 160 - } - ], - "annotations": [ - { - "keypoints": [ - 96.50167788139936, - 79.08306303388312, - 2.0, - 88.16894217433088, - 80.0, - 2.0, - 71.83105782566912, - 80.0, - 2.0, - 43.076199694670166, - 81.43588116352915, - 2.0, - 25.32887764003749, - 82.27820200265606, - 2.0, - 110.83265850033396, - 64.38260807811851, - 2.0, - 96.89436603268481, - 77.79724180953298, - 2.0, - 92.64247009206748, - 75.90977635533528, - 2.0, - 83.39926607647823, - 72.82433076402732, - 2.0, - 82.67339213429909, - 64.27184461240981, - 2.0, - 77.6884112016259, - 61.04563086937941, - 2.0, - 77.45675634815713, - 53.70793132675738, - 2.0, - 76.53903805777047, - 72.5751936338004, - 2.0, - 71.96661261225319, - 65.52855444465679, - 2.0, - 71.75442535243388, - 57.456652943107045, - 2.0, - 71.32325166700342, - 50.50892818053555, - 2.0, - 68.30277076791707, - 73.75801488839979, - 2.0, - 53.231016278533986, - 76.08684171200879, - 2.0, - 43.82802063202446, - 71.2340227958044, - 2.0, - 35.106594786098235, - 71.66012724670512, - 2.0, - 106.38084243468204, - 93.57855909773465, - 2.0, - 96.92326999269929, - 80.82566265131587, - 2.0, - 94.00509910253301, - 82.81711130561807, - 2.0, - 86.23508453811776, - 87.44135484984199, - 2.0, - 89.53039251130028, - 95.03156856963247, - 2.0, - 93.56705070950602, - 96.78650579864731, - 2.0, - 95.92358648030009, - 102.7013970756846, - 2.0, - 76.38469744035021, - 88.48766220561612, - 2.0, - 68.9346295215593, - 95.07191551878313, - 2.0, - 61.51609313834261, - 101.49429058760627, - 2.0, - 58.801694058956855, - 107.68266252152361, - 2.0, - 68.60028938490109, - 86.4375531155976, - 2.0, - 49.508565619095066, - 85.14994772406058, - 2.0, - 46.69889605871468, - 93.99222310236672, - 2.0, - 38.16941690562348, - 96.27433127807184, - 2.0 - ], - "image_id": 630, - "id": 630, - "num_keypoints": 35, - "bbox": [ - 25.32887764003749, - 50.50892818053555, - 86.50378086029647, - 58.17373434098806 - ], - "iscrowd": 0, - "area": 5032.247967257935, - "category_id": 1 - }, - { - "keypoints": [ - 97.23191700267623, - 80.39325063190708, - 2.0, - 88.51415643927471, - 80.0, - 2.0, - 71.48584356072527, - 80.0, - 2.0, - 36.905138572570486, - 78.04476695194448, - 2.0, - 16.961673753971056, - 75.93092988166644, - 2.0, - 113.49247835569392, - 67.25231199016146, - 2.0, - 97.64673560186061, - 78.62374942355183, - 2.0, - 94.59207701254518, - 76.42905623590133, - 2.0, - 86.61299882845682, - 72.98025939672249, - 2.0, - 92.79065379033919, - 63.557810609540184, - 2.0, - 98.53306658179334, - 60.560826412407806, - 2.0, - 103.15691560103025, - 54.704957013528016, - 2.0, - 78.15050140841085, - 72.0525607684763, - 2.0, - 67.19679320947252, - 63.129491930981956, - 2.0, - 66.81613570544552, - 56.68704758248447, - 2.0, - 65.81511750771388, - 50.30081842401707, - 2.0, - 68.60029149309025, - 71.73022380161136, - 2.0, - 46.45069339825895, - 75.19901789908113, - 2.0, - 52.58790600614371, - 64.54029671009006, - 2.0, - 43.39186120464909, - 61.90008440661086, - 2.0, - 114.31225140311544, - 94.14582220648037, - 2.0, - 97.0916788683189, - 82.39643083701381, - 2.0, - 93.88962787007102, - 84.03290507899544, - 2.0, - 85.2589207759562, - 87.7242665022609, - 2.0, - 86.20699274387225, - 96.23021381618412, - 2.0, - 85.92496886773941, - 99.18054227199636, - 2.0, - 87.80771669496954, - 103.97613146233982, - 2.0, - 77.42016997828726, - 87.49638798189035, - 2.0, - 70.98251459751503, - 98.88127929151817, - 2.0, - 77.88427189277336, - 101.23547565641657, - 2.0, - 78.23906551163462, - 108.63777750516068, - 2.0, - 68.33776490317005, - 85.89688698861642, - 2.0, - 42.71215070869465, - 90.66846983209739, - 2.0, - 33.419979116798764, - 90.66772059057342, - 2.0, - 23.04868990312741, - 92.48441448580822, - 2.0 - ], - "image_id": 650, - "id": 650, - "num_keypoints": 35, - "bbox": [ - 16.961673753971056, - 50.30081842401707, - 98.35057764914438, - 59.33695908114361 - ], - "iscrowd": 0, - "area": 5835.824201574118, - "category_id": 1 - } - ] +{ + "categories": [ + { + "supercategory": "animal", + "id": 1, + "name": "locust", + "keypoints": [ + "head", + "neck", + "thorax", + "abdomen1", + "abdomen2", + "anttipL", + "antbaseL", + "eyeL", + "forelegL1", + "forelegL2", + "forelegL3", + "forelegL4", + "midlegL1", + "midlegL2", + "midlegL3", + "midlegL4", + "hindlegL1", + "hindlegL2", + "hindlegL3", + "hindlegL4", + "anttipR", + "antbaseR", + "eyeR", + "forelegR1", + "forelegR2", + "forelegR3", + "forelegR4", + "midlegR1", + "midlegR2", + "midlegR3", + "midlegR4", + "hindlegR1", + "hindlegR2", + "hindlegR3", + "hindlegR4" + ], + "skeleton": [ + [ + 2, + 1 + ], + [ + 3, + 2 + ], + [ + 4, + 3 + ], + [ + 5, + 4 + ], + [ + 7, + 6 + ], + [ + 8, + 7 + ], + [ + 10, + 9 + ], + [ + 11, + 10 + ], + [ + 12, + 11 + ], + [ + 14, + 13 + ], + [ + 15, + 14 + ], + [ + 16, + 15 + ], + [ + 18, + 17 + ], + [ + 19, + 18 + ], + [ + 20, + 19 + ], + [ + 22, + 21 + ], + [ + 23, + 22 + ], + [ + 25, + 24 + ], + [ + 26, + 25 + ], + [ + 27, + 26 + ], + [ + 29, + 28 + ], + [ + 30, + 29 + ], + [ + 31, + 30 + ], + [ + 33, + 32 + ], + [ + 34, + 33 + ], + [ + 35, + 34 + ] + ] + } + ], + "images": [ + { + "id": 630, + "file_name": "630.jpg", + "height": 160, + "width": 160 + }, + { + "id": 650, + "file_name": "650.jpg", + "height": 160, + "width": 160 + } + ], + "annotations": [ + { + "keypoints": [ + 96.50167788139936, + 79.08306303388312, + 2.0, + 88.16894217433088, + 80.0, + 2.0, + 71.83105782566912, + 80.0, + 2.0, + 43.076199694670166, + 81.43588116352915, + 2.0, + 25.32887764003749, + 82.27820200265606, + 2.0, + 110.83265850033396, + 64.38260807811851, + 2.0, + 96.89436603268481, + 77.79724180953298, + 2.0, + 92.64247009206748, + 75.90977635533528, + 2.0, + 83.39926607647823, + 72.82433076402732, + 2.0, + 82.67339213429909, + 64.27184461240981, + 2.0, + 77.6884112016259, + 61.04563086937941, + 2.0, + 77.45675634815713, + 53.70793132675738, + 2.0, + 76.53903805777047, + 72.5751936338004, + 2.0, + 71.96661261225319, + 65.52855444465679, + 2.0, + 71.75442535243388, + 57.456652943107045, + 2.0, + 71.32325166700342, + 50.50892818053555, + 2.0, + 68.30277076791707, + 73.75801488839979, + 2.0, + 53.231016278533986, + 76.08684171200879, + 2.0, + 43.82802063202446, + 71.2340227958044, + 2.0, + 35.106594786098235, + 71.66012724670512, + 2.0, + 106.38084243468204, + 93.57855909773465, + 2.0, + 96.92326999269929, + 80.82566265131587, + 2.0, + 94.00509910253301, + 82.81711130561807, + 2.0, + 86.23508453811776, + 87.44135484984199, + 2.0, + 89.53039251130028, + 95.03156856963247, + 2.0, + 93.56705070950602, + 96.78650579864731, + 2.0, + 95.92358648030009, + 102.7013970756846, + 2.0, + 76.38469744035021, + 88.48766220561612, + 2.0, + 68.9346295215593, + 95.07191551878313, + 2.0, + 61.51609313834261, + 101.49429058760627, + 2.0, + 58.801694058956855, + 107.68266252152361, + 2.0, + 68.60028938490109, + 86.4375531155976, + 2.0, + 49.508565619095066, + 85.14994772406058, + 2.0, + 46.69889605871468, + 93.99222310236672, + 2.0, + 38.16941690562348, + 96.27433127807184, + 2.0 + ], + "image_id": 630, + "id": 630, + "num_keypoints": 35, + "bbox": [ + 25.32887764003749, + 50.50892818053555, + 86.50378086029647, + 58.17373434098806 + ], + "iscrowd": 0, + "area": 5032.247967257935, + "category_id": 1 + }, + { + "keypoints": [ + 97.23191700267623, + 80.39325063190708, + 2.0, + 88.51415643927471, + 80.0, + 2.0, + 71.48584356072527, + 80.0, + 2.0, + 36.905138572570486, + 78.04476695194448, + 2.0, + 16.961673753971056, + 75.93092988166644, + 2.0, + 113.49247835569392, + 67.25231199016146, + 2.0, + 97.64673560186061, + 78.62374942355183, + 2.0, + 94.59207701254518, + 76.42905623590133, + 2.0, + 86.61299882845682, + 72.98025939672249, + 2.0, + 92.79065379033919, + 63.557810609540184, + 2.0, + 98.53306658179334, + 60.560826412407806, + 2.0, + 103.15691560103025, + 54.704957013528016, + 2.0, + 78.15050140841085, + 72.0525607684763, + 2.0, + 67.19679320947252, + 63.129491930981956, + 2.0, + 66.81613570544552, + 56.68704758248447, + 2.0, + 65.81511750771388, + 50.30081842401707, + 2.0, + 68.60029149309025, + 71.73022380161136, + 2.0, + 46.45069339825895, + 75.19901789908113, + 2.0, + 52.58790600614371, + 64.54029671009006, + 2.0, + 43.39186120464909, + 61.90008440661086, + 2.0, + 114.31225140311544, + 94.14582220648037, + 2.0, + 97.0916788683189, + 82.39643083701381, + 2.0, + 93.88962787007102, + 84.03290507899544, + 2.0, + 85.2589207759562, + 87.7242665022609, + 2.0, + 86.20699274387225, + 96.23021381618412, + 2.0, + 85.92496886773941, + 99.18054227199636, + 2.0, + 87.80771669496954, + 103.97613146233982, + 2.0, + 77.42016997828726, + 87.49638798189035, + 2.0, + 70.98251459751503, + 98.88127929151817, + 2.0, + 77.88427189277336, + 101.23547565641657, + 2.0, + 78.23906551163462, + 108.63777750516068, + 2.0, + 68.33776490317005, + 85.89688698861642, + 2.0, + 42.71215070869465, + 90.66846983209739, + 2.0, + 33.419979116798764, + 90.66772059057342, + 2.0, + 23.04868990312741, + 92.48441448580822, + 2.0 + ], + "image_id": 650, + "id": 650, + "num_keypoints": 35, + "bbox": [ + 16.961673753971056, + 50.30081842401707, + 98.35057764914438, + 59.33695908114361 + ], + "iscrowd": 0, + "area": 5835.824201574118, + "category_id": 1 + } + ] } \ No newline at end of file diff --git a/tests/data/macaque/test_macaque.json b/tests/data/macaque/test_macaque.json index f0cbb5dad0..a0f18f1f0c 100644 --- a/tests/data/macaque/test_macaque.json +++ b/tests/data/macaque/test_macaque.json @@ -1,426 +1,426 @@ -{ - "categories": [ - { - "supercategory": "animal", - "id": 1, - "name": "macaque", - "keypoints": [ - "nose", - "left_eye", - "right_eye", - "left_ear", - "right_ear", - "left_shoulder", - "right_shoulder", - "left_elbow", - "right_elbow", - "left_wrist", - "right_wrist", - "left_hip", - "right_hip", - "left_knee", - "right_knee", - "left_ankle", - "right_ankle" - ], - "skeleton": [ - [ - 16, - 14 - ], - [ - 14, - 12 - ], - [ - 17, - 15 - ], - [ - 15, - 13 - ], - [ - 12, - 13 - ], - [ - 6, - 12 - ], - [ - 7, - 13 - ], - [ - 6, - 7 - ], - [ - 6, - 8 - ], - [ - 7, - 9 - ], - [ - 8, - 10 - ], - [ - 9, - 11 - ], - [ - 2, - 3 - ], - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 4 - ], - [ - 3, - 5 - ], - [ - 4, - 6 - ], - [ - 5, - 7 - ] - ] - } - ], - "images": [ - { - "id": 12900, - "file_name": "d47f1b1ee9d3217e.jpg", - "height": 710, - "width": 1024 - }, - { - "id": 12950, - "file_name": "PRI_1473.jpg", - "height": 1424, - "width": 1728 - } - ], - "annotations": [ - { - "keypoints": [ - 641.06, - 308.89, - 2.0, - 644.23, - 269.75, - 2.0, - 612.5, - 275.04, - 2.0, - 0.0, - 0.0, - 0.0, - 545.85, - 216.86, - 2.0, - 580.0, - 225.0, - 2.0, - 464.0, - 258.0, - 2.0, - 582.0, - 366.0, - 2.0, - 377.65, - 360.73, - 2.0, - 697.12, - 358.61, - 2.0, - 590.28, - 394.58, - 2.0, - 508.0, - 424.0, - 2.0, - 385.0, - 475.0, - 2.0, - 618.0, - 369.0, - 2.0, - 445.0, - 386.0, - 2.0, - 695.0, - 468.0, - 2.0, - 522.58, - 550.08, - 2.0 - ], - "image_id": 12900, - "id": 16169, - "num_keypoints": 16, - "bbox": [ - 143.87, - 147.04, - 623.01, - 535.22 - ], - "iscrowd": 0, - "area": 138067.96479999926, - "category_id": 1, - "segmentation": [ - [ - 408.33, - 534.21, - 465.45, - 547.97, - 502.48, - 587.11, - 573.36, - 593.45, - 652.69, - 608.26, - 687.6, - 578.64, - 656.93, - 544.79, - 616.73, - 537.39, - 588.17, - 501.42, - 532.1, - 483.44, - 518.35, - 455.93, - 536.33, - 426.31, - 567.01, - 417.85, - 629.42, - 430.55, - 696.07, - 437.95, - 670.68, - 403.04, - 705.59, - 395.64, - 737.32, - 413.62, - 738.38, - 431.6, - 765.88, - 431.6, - 759.54, - 379.77, - 722.51, - 341.69, - 654.81, - 322.64, - 652.69, - 271.87, - 675.97, - 230.61, - 651.64, - 168.2, - 567.01, - 147.04, - 492.96, - 157.62, - 423.14, - 186.18, - 372.36, - 243.31, - 317.36, - 325.82, - 307.83, - 404.1, - 334.28, - 469.69, - 353.32, - 488.73, - 290.91, - 521.52, - 231.67, - 563.83, - 174.55, - 628.36, - 143.87, - 675.97, - 200.99, - 681.26, - 245.42, - 622.02, - 307.83, - 574.41, - 407.27, - 536.33 - ] - ] - }, - { - "keypoints": [ - 783.0, - 890.0, - 2.0, - 775.14, - 848.5, - 2.0, - 0.0, - 0.0, - 0.0, - 834.0, - 796.0, - 2.0, - 0.0, - 0.0, - 0.0, - 987.0, - 815.0, - 2.0, - 833.0, - 819.0, - 2.0, - 1132.15, - 789.82, - 2.0, - 887.0, - 919.0, - 2.0, - 1191.0, - 852.0, - 2.0, - 869.0, - 1040.0, - 2.0, - 1177.0, - 527.0, - 2.0, - 1082.0, - 513.0, - 2.0, - 1173.72, - 721.35, - 2.0, - 1086.0, - 737.0, - 2.0, - 1307.0, - 678.0, - 2.0, - 1218.0, - 783.0, - 2.0 - ], - "image_id": 12950, - "id": 16227, - "num_keypoints": 15, - "bbox": [ - 722.61, - 393.97, - 642.3100000000001, - 754.1700000000001 - ], - "iscrowd": 0, - "area": 242621.70749999955, - "category_id": 1, - "segmentation": [ - [ - 1248.37, - 529.25, - 1212.77, - 474.67, - 1179.55, - 420.08, - 1134.45, - 398.72, - 1084.61, - 393.97, - 1053.76, - 429.57, - 1043.26, - 467.44, - 996.8, - 531.63, - 930.35, - 607.57, - 842.53, - 659.79, - 806.93, - 704.88, - 768.96, - 728.61, - 722.61, - 767.77, - 727.12, - 831.0, - 740.67, - 878.42, - 783.58, - 937.13, - 812.93, - 957.45, - 812.93, - 1027.46, - 799.38, - 1072.62, - 765.51, - 1113.27, - 758.74, - 1142.62, - 803.9, - 1147.14, - 862.61, - 1138.11, - 923.58, - 1068.1, - 950.68, - 1000.36, - 948.42, - 905.52, - 1007.13, - 891.97, - 1083.91, - 873.9, - 1151.65, - 919.07, - 1187.78, - 957.45, - 1223.91, - 982.29, - 1262.3, - 950.68, - 1272.11, - 901.87, - 1264.56, - 844.55, - 1302.95, - 821.96, - 1341.34, - 779.06, - 1363.92, - 704.54, - 1352.8, - 633.68, - 1274.48, - 598.08 - ] - ] - } - ] +{ + "categories": [ + { + "supercategory": "animal", + "id": 1, + "name": "macaque", + "keypoints": [ + "nose", + "left_eye", + "right_eye", + "left_ear", + "right_ear", + "left_shoulder", + "right_shoulder", + "left_elbow", + "right_elbow", + "left_wrist", + "right_wrist", + "left_hip", + "right_hip", + "left_knee", + "right_knee", + "left_ankle", + "right_ankle" + ], + "skeleton": [ + [ + 16, + 14 + ], + [ + 14, + 12 + ], + [ + 17, + 15 + ], + [ + 15, + 13 + ], + [ + 12, + 13 + ], + [ + 6, + 12 + ], + [ + 7, + 13 + ], + [ + 6, + 7 + ], + [ + 6, + 8 + ], + [ + 7, + 9 + ], + [ + 8, + 10 + ], + [ + 9, + 11 + ], + [ + 2, + 3 + ], + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 4 + ], + [ + 3, + 5 + ], + [ + 4, + 6 + ], + [ + 5, + 7 + ] + ] + } + ], + "images": [ + { + "id": 12900, + "file_name": "d47f1b1ee9d3217e.jpg", + "height": 710, + "width": 1024 + }, + { + "id": 12950, + "file_name": "PRI_1473.jpg", + "height": 1424, + "width": 1728 + } + ], + "annotations": [ + { + "keypoints": [ + 641.06, + 308.89, + 2.0, + 644.23, + 269.75, + 2.0, + 612.5, + 275.04, + 2.0, + 0.0, + 0.0, + 0.0, + 545.85, + 216.86, + 2.0, + 580.0, + 225.0, + 2.0, + 464.0, + 258.0, + 2.0, + 582.0, + 366.0, + 2.0, + 377.65, + 360.73, + 2.0, + 697.12, + 358.61, + 2.0, + 590.28, + 394.58, + 2.0, + 508.0, + 424.0, + 2.0, + 385.0, + 475.0, + 2.0, + 618.0, + 369.0, + 2.0, + 445.0, + 386.0, + 2.0, + 695.0, + 468.0, + 2.0, + 522.58, + 550.08, + 2.0 + ], + "image_id": 12900, + "id": 16169, + "num_keypoints": 16, + "bbox": [ + 143.87, + 147.04, + 623.01, + 535.22 + ], + "iscrowd": 0, + "area": 138067.96479999926, + "category_id": 1, + "segmentation": [ + [ + 408.33, + 534.21, + 465.45, + 547.97, + 502.48, + 587.11, + 573.36, + 593.45, + 652.69, + 608.26, + 687.6, + 578.64, + 656.93, + 544.79, + 616.73, + 537.39, + 588.17, + 501.42, + 532.1, + 483.44, + 518.35, + 455.93, + 536.33, + 426.31, + 567.01, + 417.85, + 629.42, + 430.55, + 696.07, + 437.95, + 670.68, + 403.04, + 705.59, + 395.64, + 737.32, + 413.62, + 738.38, + 431.6, + 765.88, + 431.6, + 759.54, + 379.77, + 722.51, + 341.69, + 654.81, + 322.64, + 652.69, + 271.87, + 675.97, + 230.61, + 651.64, + 168.2, + 567.01, + 147.04, + 492.96, + 157.62, + 423.14, + 186.18, + 372.36, + 243.31, + 317.36, + 325.82, + 307.83, + 404.1, + 334.28, + 469.69, + 353.32, + 488.73, + 290.91, + 521.52, + 231.67, + 563.83, + 174.55, + 628.36, + 143.87, + 675.97, + 200.99, + 681.26, + 245.42, + 622.02, + 307.83, + 574.41, + 407.27, + 536.33 + ] + ] + }, + { + "keypoints": [ + 783.0, + 890.0, + 2.0, + 775.14, + 848.5, + 2.0, + 0.0, + 0.0, + 0.0, + 834.0, + 796.0, + 2.0, + 0.0, + 0.0, + 0.0, + 987.0, + 815.0, + 2.0, + 833.0, + 819.0, + 2.0, + 1132.15, + 789.82, + 2.0, + 887.0, + 919.0, + 2.0, + 1191.0, + 852.0, + 2.0, + 869.0, + 1040.0, + 2.0, + 1177.0, + 527.0, + 2.0, + 1082.0, + 513.0, + 2.0, + 1173.72, + 721.35, + 2.0, + 1086.0, + 737.0, + 2.0, + 1307.0, + 678.0, + 2.0, + 1218.0, + 783.0, + 2.0 + ], + "image_id": 12950, + "id": 16227, + "num_keypoints": 15, + "bbox": [ + 722.61, + 393.97, + 642.3100000000001, + 754.1700000000001 + ], + "iscrowd": 0, + "area": 242621.70749999955, + "category_id": 1, + "segmentation": [ + [ + 1248.37, + 529.25, + 1212.77, + 474.67, + 1179.55, + 420.08, + 1134.45, + 398.72, + 1084.61, + 393.97, + 1053.76, + 429.57, + 1043.26, + 467.44, + 996.8, + 531.63, + 930.35, + 607.57, + 842.53, + 659.79, + 806.93, + 704.88, + 768.96, + 728.61, + 722.61, + 767.77, + 727.12, + 831.0, + 740.67, + 878.42, + 783.58, + 937.13, + 812.93, + 957.45, + 812.93, + 1027.46, + 799.38, + 1072.62, + 765.51, + 1113.27, + 758.74, + 1142.62, + 803.9, + 1147.14, + 862.61, + 1138.11, + 923.58, + 1068.1, + 950.68, + 1000.36, + 948.42, + 905.52, + 1007.13, + 891.97, + 1083.91, + 873.9, + 1151.65, + 919.07, + 1187.78, + 957.45, + 1223.91, + 982.29, + 1262.3, + 950.68, + 1272.11, + 901.87, + 1264.56, + 844.55, + 1302.95, + 821.96, + 1341.34, + 779.06, + 1363.92, + 704.54, + 1352.8, + 633.68, + 1274.48, + 598.08 + ] + ] + } + ] } \ No newline at end of file diff --git a/tests/data/mhp/test_mhp.json b/tests/data/mhp/test_mhp.json index 3740f720e4..5dce5b37d8 100644 --- a/tests/data/mhp/test_mhp.json +++ b/tests/data/mhp/test_mhp.json @@ -1,391 +1,391 @@ -{ - "categories": [ - { - "supercategory": "person", - "id": 1, - "name": "person", - "keypoints": [ - "Right-ankle", - "Right-knee", - "Right-hip", - "Left-hip", - "Left-knee", - "Left-ankle", - "Pelvis", - "Thorax", - "Upper-neck", - "Head-top", - "Right-wrist", - "Right-elbow", - "Right-shoulder", - "Left-shoulder", - "Left-elbow", - "Left-wrist" - ], - "skeleton": [ - [ - 1, - 2 - ], - [ - 2, - 3 - ], - [ - 3, - 7 - ], - [ - 7, - 4 - ], - [ - 4, - 5 - ], - [ - 5, - 6 - ], - [ - 7, - 8 - ], - [ - 8, - 9 - ], - [ - 9, - 10 - ], - [ - 11, - 12 - ], - [ - 12, - 13 - ], - [ - 13, - 9 - ], - [ - 9, - 14 - ], - [ - 14, - 15 - ], - [ - 15, - 16 - ] - ] - } - ], - "images": [ - { - "license": 0, - "file_name": "10084.jpg", - "height": 299, - "width": 298, - "id": 2889 - }, - { - "license": 0, - "file_name": "10112.jpg", - "height": 180, - "width": 215, - "id": 3928 - } - ], - "annotations": [ - { - "segmentation": [], - "num_keypoints": 13, - "iscrowd": 0, - "keypoints": [ - 151.74249267578125, - 251.90750122070312, - 2.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 156.2274932861328, - 136.0449981689453, - 2.0, - 94.18499755859375, - 203.32000732421875, - 2.0, - 128.57000732421875, - 246.6750030517578, - 2.0, - 119.5999984741211, - 143.52000427246094, - 2.0, - 116.61000061035156, - 85.9625015258789, - 2.0, - 109.13500213623047, - 41.86000061035156, - 2.0, - 108.38749694824219, - 7.474999904632568, - 2.0, - 44.849998474121094, - 105.39749908447266, - 2.0, - 80.7300033569336, - 112.125, - 2.0, - 0.0, - 0.0, - 0.0, - 122.58999633789062, - 47.84000015258789, - 2.0, - 87.4574966430664, - 83.72000122070312, - 2.0, - 35.880001068115234, - 97.17500305175781, - 2.0 - ], - "image_id": 2889, - "bbox": [ - 3.737499952316284, - 5.232500076293945, - 169.68249821662903, - 282.5550060272217 - ], - "category_id": 1, - "id": 7646, - "face_box": [ - 96.42749786376953, - 12.707500457763672, - 35.13249969482422, - 29.15250015258789 - ], - "area": 47944.63930631365 - }, - { - "segmentation": [], - "num_keypoints": 14, - "iscrowd": 0, - "keypoints": [ - 0.0, - 0.0, - 0.0, - 254.14999389648438, - 219.76499938964844, - 2.0, - 0.0, - 0.0, - 0.0, - 292.2724914550781, - 147.25750732421875, - 2.0, - 223.50250244140625, - 195.09750366210938, - 2.0, - 242.19000244140625, - 276.57501220703125, - 2.0, - 264.614990234375, - 150.9949951171875, - 2.0, - 233.22000122070312, - 81.47750091552734, - 2.0, - 236.95750427246094, - 59.0525016784668, - 2.0, - 230.9774932861328, - 16.44499969482422, - 2.0, - 142.02499389648438, - 66.52749633789062, - 2.0, - 180.89500427246094, - 65.77999877929688, - 2.0, - 221.25999450683594, - 63.537498474121094, - 2.0, - 260.87750244140625, - 59.79999923706055, - 2.0, - 296.010009765625, - 92.69000244140625, - 2.0, - 281.05999755859375, - 146.50999450683594, - 2.0 - ], - "image_id": 2889, - "bbox": [ - 117.35749816894531, - 11.212499618530273, - 181.6425018310547, - 285.5450077056885 - ], - "category_id": 1, - "id": 7647, - "face_box": [ - 210.04750061035156, - 19.434999465942383, - 31.395004272460938, - 38.12249946594238 - ], - "area": 51867.109585029044 - }, - { - "segmentation": [], - "num_keypoints": 7, - "iscrowd": 0, - "keypoints": [ - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 38.70000076293945, - 117.44999694824219, - 2.0, - 48.599998474121094, - 71.0999984741211, - 2.0, - 66.5999984741211, - 17.549999237060547, - 2.0, - 0.0, - 0.0, - 0.0, - 3.1500000953674316, - 158.39999389648438, - 2.0, - 3.5999999046325684, - 74.69999694824219, - 2.0, - 97.19999694824219, - 76.94999694824219, - 2.0, - 102.1500015258789, - 145.35000610351562, - 2.0, - 0.0, - 0.0, - 0.0 - ], - "image_id": 3928, - "bbox": [ - 2.25, - 10.350000381469727, - 114.30000305175781, - 169.2000026702881 - ], - "category_id": 1, - "id": 10379, - "face_box": [ - 30.600000381469727, - 26.100000381469727, - 47.24999809265137, - 45.89999961853027 - ], - "area": 19339.56082157136 - }, - { - "segmentation": [], - "num_keypoints": 7, - "iscrowd": 0, - "keypoints": [ - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 147.60000610351562, - 126.9000015258789, - 2.0, - 155.6999969482422, - 81.9000015258789, - 2.0, - 152.5500030517578, - 25.200000762939453, - 2.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 127.3499984741211, - 93.1500015258789, - 2.0, - 198.4499969482422, - 89.0999984741211, - 2.0, - 198.89999389648438, - 163.35000610351562, - 2.0, - 148.5, - 151.64999389648438, - 2.0 - ], - "image_id": 3928, - "bbox": [ - 112.05000305175781, - 18.450000762939453, - 96.30000305175781, - 161.10000228881836 - ], - "category_id": 1, - "id": 10380, - "face_box": [ - 132.3000030517578, - 39.150001525878906, - 44.55000305175781, - 40.04999542236328 - ], - "area": 15513.930712051399 - } - ] +{ + "categories": [ + { + "supercategory": "person", + "id": 1, + "name": "person", + "keypoints": [ + "Right-ankle", + "Right-knee", + "Right-hip", + "Left-hip", + "Left-knee", + "Left-ankle", + "Pelvis", + "Thorax", + "Upper-neck", + "Head-top", + "Right-wrist", + "Right-elbow", + "Right-shoulder", + "Left-shoulder", + "Left-elbow", + "Left-wrist" + ], + "skeleton": [ + [ + 1, + 2 + ], + [ + 2, + 3 + ], + [ + 3, + 7 + ], + [ + 7, + 4 + ], + [ + 4, + 5 + ], + [ + 5, + 6 + ], + [ + 7, + 8 + ], + [ + 8, + 9 + ], + [ + 9, + 10 + ], + [ + 11, + 12 + ], + [ + 12, + 13 + ], + [ + 13, + 9 + ], + [ + 9, + 14 + ], + [ + 14, + 15 + ], + [ + 15, + 16 + ] + ] + } + ], + "images": [ + { + "license": 0, + "file_name": "10084.jpg", + "height": 299, + "width": 298, + "id": 2889 + }, + { + "license": 0, + "file_name": "10112.jpg", + "height": 180, + "width": 215, + "id": 3928 + } + ], + "annotations": [ + { + "segmentation": [], + "num_keypoints": 13, + "iscrowd": 0, + "keypoints": [ + 151.74249267578125, + 251.90750122070312, + 2.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 156.2274932861328, + 136.0449981689453, + 2.0, + 94.18499755859375, + 203.32000732421875, + 2.0, + 128.57000732421875, + 246.6750030517578, + 2.0, + 119.5999984741211, + 143.52000427246094, + 2.0, + 116.61000061035156, + 85.9625015258789, + 2.0, + 109.13500213623047, + 41.86000061035156, + 2.0, + 108.38749694824219, + 7.474999904632568, + 2.0, + 44.849998474121094, + 105.39749908447266, + 2.0, + 80.7300033569336, + 112.125, + 2.0, + 0.0, + 0.0, + 0.0, + 122.58999633789062, + 47.84000015258789, + 2.0, + 87.4574966430664, + 83.72000122070312, + 2.0, + 35.880001068115234, + 97.17500305175781, + 2.0 + ], + "image_id": 2889, + "bbox": [ + 3.737499952316284, + 5.232500076293945, + 169.68249821662903, + 282.5550060272217 + ], + "category_id": 1, + "id": 7646, + "face_box": [ + 96.42749786376953, + 12.707500457763672, + 35.13249969482422, + 29.15250015258789 + ], + "area": 47944.63930631365 + }, + { + "segmentation": [], + "num_keypoints": 14, + "iscrowd": 0, + "keypoints": [ + 0.0, + 0.0, + 0.0, + 254.14999389648438, + 219.76499938964844, + 2.0, + 0.0, + 0.0, + 0.0, + 292.2724914550781, + 147.25750732421875, + 2.0, + 223.50250244140625, + 195.09750366210938, + 2.0, + 242.19000244140625, + 276.57501220703125, + 2.0, + 264.614990234375, + 150.9949951171875, + 2.0, + 233.22000122070312, + 81.47750091552734, + 2.0, + 236.95750427246094, + 59.0525016784668, + 2.0, + 230.9774932861328, + 16.44499969482422, + 2.0, + 142.02499389648438, + 66.52749633789062, + 2.0, + 180.89500427246094, + 65.77999877929688, + 2.0, + 221.25999450683594, + 63.537498474121094, + 2.0, + 260.87750244140625, + 59.79999923706055, + 2.0, + 296.010009765625, + 92.69000244140625, + 2.0, + 281.05999755859375, + 146.50999450683594, + 2.0 + ], + "image_id": 2889, + "bbox": [ + 117.35749816894531, + 11.212499618530273, + 181.6425018310547, + 285.5450077056885 + ], + "category_id": 1, + "id": 7647, + "face_box": [ + 210.04750061035156, + 19.434999465942383, + 31.395004272460938, + 38.12249946594238 + ], + "area": 51867.109585029044 + }, + { + "segmentation": [], + "num_keypoints": 7, + "iscrowd": 0, + "keypoints": [ + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 38.70000076293945, + 117.44999694824219, + 2.0, + 48.599998474121094, + 71.0999984741211, + 2.0, + 66.5999984741211, + 17.549999237060547, + 2.0, + 0.0, + 0.0, + 0.0, + 3.1500000953674316, + 158.39999389648438, + 2.0, + 3.5999999046325684, + 74.69999694824219, + 2.0, + 97.19999694824219, + 76.94999694824219, + 2.0, + 102.1500015258789, + 145.35000610351562, + 2.0, + 0.0, + 0.0, + 0.0 + ], + "image_id": 3928, + "bbox": [ + 2.25, + 10.350000381469727, + 114.30000305175781, + 169.2000026702881 + ], + "category_id": 1, + "id": 10379, + "face_box": [ + 30.600000381469727, + 26.100000381469727, + 47.24999809265137, + 45.89999961853027 + ], + "area": 19339.56082157136 + }, + { + "segmentation": [], + "num_keypoints": 7, + "iscrowd": 0, + "keypoints": [ + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 147.60000610351562, + 126.9000015258789, + 2.0, + 155.6999969482422, + 81.9000015258789, + 2.0, + 152.5500030517578, + 25.200000762939453, + 2.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 127.3499984741211, + 93.1500015258789, + 2.0, + 198.4499969482422, + 89.0999984741211, + 2.0, + 198.89999389648438, + 163.35000610351562, + 2.0, + 148.5, + 151.64999389648438, + 2.0 + ], + "image_id": 3928, + "bbox": [ + 112.05000305175781, + 18.450000762939453, + 96.30000305175781, + 161.10000228881836 + ], + "category_id": 1, + "id": 10380, + "face_box": [ + 132.3000030517578, + 39.150001525878906, + 44.55000305175781, + 40.04999542236328 + ], + "area": 15513.930712051399 + } + ] } \ No newline at end of file diff --git a/tests/data/mpii/test_mpii.json b/tests/data/mpii/test_mpii.json index 5c13d6a860..3077d3c151 100644 --- a/tests/data/mpii/test_mpii.json +++ b/tests/data/mpii/test_mpii.json @@ -1,462 +1,462 @@ -[ - { - "joints_vis": [ - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1 - ], - "joints": [ - [ - 804.0, - 711.0 - ], - [ - 816.0, - 510.0 - ], - [ - 908.0, - 438.0 - ], - [ - 1040.0, - 454.0 - ], - [ - 906.0, - 528.0 - ], - [ - 883.0, - 707.0 - ], - [ - 974.0, - 446.0 - ], - [ - 985.0, - 253.0 - ], - [ - 982.7591, - 235.9694 - ], - [ - 962.2409, - 80.0306 - ], - [ - 869.0, - 214.0 - ], - [ - 798.0, - 340.0 - ], - [ - 902.0, - 253.0 - ], - [ - 1067.0, - 253.0 - ], - [ - 1167.0, - 353.0 - ], - [ - 1142.0, - 478.0 - ] - ], - "image": "005808361.jpg", - "scale": 4.718488, - "center": [ - 966.0, - 340.0 - ] - }, - { - "joints_vis": [ - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1 - ], - "joints": [ - [ - 317.0, - 412.0 - ], - [ - 318.0, - 299.0 - ], - [ - 290.0, - 274.0 - ], - [ - 353.0, - 275.0 - ], - [ - 403.0, - 299.0 - ], - [ - 394.0, - 409.0 - ], - [ - 322.0, - 275.0 - ], - [ - 327.0, - 172.0 - ], - [ - 329.9945, - 162.1051 - ], - [ - 347.0055, - 105.8949 - ], - [ - 296.0, - 135.0 - ], - [ - 281.0, - 208.0 - ], - [ - 296.0, - 167.0 - ], - [ - 358.0, - 177.0 - ], - [ - 387.0, - 236.0 - ], - [ - 392.0, - 167.0 - ] - ], - "image": "052475643.jpg", - "scale": 1.761835, - "center": [ - 316.0, - 220.0 - ] - }, - { - "joints_vis": [ - 0, - 1, - 1, - 1, - 1, - 0, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1 - ], - "joints": [ - [ - -1.0, - -1.0 - ], - [ - 1033.0, - 649.0 - ], - [ - 1072.0, - 474.0 - ], - [ - 973.0, - 496.0 - ], - [ - 961.0, - 650.0 - ], - [ - -1.0, - -1.0 - ], - [ - 1023.0, - 485.0 - ], - [ - 1031.0, - 295.0 - ], - [ - 1026.998, - 281.6248 - ], - [ - 997.002, - 181.3752 - ], - [ - 988.0, - 294.0 - ], - [ - 1018.0, - 317.0 - ], - [ - 1070.0, - 290.0 - ], - [ - 991.0, - 300.0 - ], - [ - 912.0, - 345.0 - ], - [ - 842.0, - 330.0 - ] - ], - "image": "051423444.jpg", - "scale": 3.139233, - "center": [ - 1030.0, - 396.0 - ] - }, - { - "joints_vis": [ - 0, - 1, - 1, - 1, - 1, - 0, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1 - ], - "joints": [ - [ - -1.0, - -1.0 - ], - [ - 804.0, - 659.0 - ], - [ - 786.0, - 498.0 - ], - [ - 868.0, - 509.0 - ], - [ - 860.0, - 693.0 - ], - [ - -1.0, - -1.0 - ], - [ - 827.0, - 504.0 - ], - [ - 840.0, - 314.0 - ], - [ - 838.9079, - 308.9326 - ], - [ - 816.0921, - 203.0674 - ], - [ - 698.0, - 264.0 - ], - [ - 740.0, - 297.0 - ], - [ - 790.0, - 300.0 - ], - [ - 889.0, - 328.0 - ], - [ - 915.0, - 452.0 - ], - [ - 906.0, - 553.0 - ] - ], - "image": "004645041.jpg", - "scale": 3.248877, - "center": [ - 809.0, - 403.0 - ] - }, - { - "joints_vis": [ - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1 - ], - "joints": [ - [ - 694.0, - 684.0 - ], - [ - 685.0, - 579.0 - ], - [ - 670.0, - 437.0 - ], - [ - 747.0, - 421.0 - ], - [ - 751.0, - 574.0 - ], - [ - 768.0, - 717.0 - ], - [ - 709.0, - 429.0 - ], - [ - 649.0, - 230.0 - ], - [ - 642.6337, - 217.5659 - ], - [ - 591.3663, - 117.4341 - ], - [ - 488.0, - 351.0 - ], - [ - 551.0, - 307.0 - ], - [ - 600.0, - 242.0 - ], - [ - 698.0, - 217.0 - ], - [ - 767.0, - 310.0 - ], - [ - 790.0, - 405.0 - ] - ], - "image": "060754485.jpg", - "scale": 3.374796, - "center": [ - 698.0, - 404.0 - ] - } -] +[ + { + "joints_vis": [ + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1 + ], + "joints": [ + [ + 804.0, + 711.0 + ], + [ + 816.0, + 510.0 + ], + [ + 908.0, + 438.0 + ], + [ + 1040.0, + 454.0 + ], + [ + 906.0, + 528.0 + ], + [ + 883.0, + 707.0 + ], + [ + 974.0, + 446.0 + ], + [ + 985.0, + 253.0 + ], + [ + 982.7591, + 235.9694 + ], + [ + 962.2409, + 80.0306 + ], + [ + 869.0, + 214.0 + ], + [ + 798.0, + 340.0 + ], + [ + 902.0, + 253.0 + ], + [ + 1067.0, + 253.0 + ], + [ + 1167.0, + 353.0 + ], + [ + 1142.0, + 478.0 + ] + ], + "image": "005808361.jpg", + "scale": 4.718488, + "center": [ + 966.0, + 340.0 + ] + }, + { + "joints_vis": [ + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1 + ], + "joints": [ + [ + 317.0, + 412.0 + ], + [ + 318.0, + 299.0 + ], + [ + 290.0, + 274.0 + ], + [ + 353.0, + 275.0 + ], + [ + 403.0, + 299.0 + ], + [ + 394.0, + 409.0 + ], + [ + 322.0, + 275.0 + ], + [ + 327.0, + 172.0 + ], + [ + 329.9945, + 162.1051 + ], + [ + 347.0055, + 105.8949 + ], + [ + 296.0, + 135.0 + ], + [ + 281.0, + 208.0 + ], + [ + 296.0, + 167.0 + ], + [ + 358.0, + 177.0 + ], + [ + 387.0, + 236.0 + ], + [ + 392.0, + 167.0 + ] + ], + "image": "052475643.jpg", + "scale": 1.761835, + "center": [ + 316.0, + 220.0 + ] + }, + { + "joints_vis": [ + 0, + 1, + 1, + 1, + 1, + 0, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1 + ], + "joints": [ + [ + -1.0, + -1.0 + ], + [ + 1033.0, + 649.0 + ], + [ + 1072.0, + 474.0 + ], + [ + 973.0, + 496.0 + ], + [ + 961.0, + 650.0 + ], + [ + -1.0, + -1.0 + ], + [ + 1023.0, + 485.0 + ], + [ + 1031.0, + 295.0 + ], + [ + 1026.998, + 281.6248 + ], + [ + 997.002, + 181.3752 + ], + [ + 988.0, + 294.0 + ], + [ + 1018.0, + 317.0 + ], + [ + 1070.0, + 290.0 + ], + [ + 991.0, + 300.0 + ], + [ + 912.0, + 345.0 + ], + [ + 842.0, + 330.0 + ] + ], + "image": "051423444.jpg", + "scale": 3.139233, + "center": [ + 1030.0, + 396.0 + ] + }, + { + "joints_vis": [ + 0, + 1, + 1, + 1, + 1, + 0, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1 + ], + "joints": [ + [ + -1.0, + -1.0 + ], + [ + 804.0, + 659.0 + ], + [ + 786.0, + 498.0 + ], + [ + 868.0, + 509.0 + ], + [ + 860.0, + 693.0 + ], + [ + -1.0, + -1.0 + ], + [ + 827.0, + 504.0 + ], + [ + 840.0, + 314.0 + ], + [ + 838.9079, + 308.9326 + ], + [ + 816.0921, + 203.0674 + ], + [ + 698.0, + 264.0 + ], + [ + 740.0, + 297.0 + ], + [ + 790.0, + 300.0 + ], + [ + 889.0, + 328.0 + ], + [ + 915.0, + 452.0 + ], + [ + 906.0, + 553.0 + ] + ], + "image": "004645041.jpg", + "scale": 3.248877, + "center": [ + 809.0, + 403.0 + ] + }, + { + "joints_vis": [ + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1 + ], + "joints": [ + [ + 694.0, + 684.0 + ], + [ + 685.0, + 579.0 + ], + [ + 670.0, + 437.0 + ], + [ + 747.0, + 421.0 + ], + [ + 751.0, + 574.0 + ], + [ + 768.0, + 717.0 + ], + [ + 709.0, + 429.0 + ], + [ + 649.0, + 230.0 + ], + [ + 642.6337, + 217.5659 + ], + [ + 591.3663, + 117.4341 + ], + [ + 488.0, + 351.0 + ], + [ + 551.0, + 307.0 + ], + [ + 600.0, + 242.0 + ], + [ + 698.0, + 217.0 + ], + [ + 767.0, + 310.0 + ], + [ + 790.0, + 405.0 + ] + ], + "image": "060754485.jpg", + "scale": 3.374796, + "center": [ + 698.0, + 404.0 + ] + } +] diff --git a/tests/data/mpii/test_mpii_trb.json b/tests/data/mpii/test_mpii_trb.json index 8014c99c8f..fd363b5659 100644 --- a/tests/data/mpii/test_mpii_trb.json +++ b/tests/data/mpii/test_mpii_trb.json @@ -1,760 +1,760 @@ -{ - "info": { - "description": "For TRBMPI testing.", - "year": "2020", - "date_created": "2020/06/20" - }, - "categories": [ - { - "supercategory": "person", - "id": 1, - "name": "person", - "keypoints": [ - "left_shoulder", - "right_shoulder", - "left_elbow", - "right_elbow", - "left_wrist", - "right_wrist", - "left_hip", - "right_hip", - "left_knee", - "right_knee", - "left_ankle", - "right_ankle", - "head", - "neck", - "right_neck", - "left_neck", - "medial_right_shoulder", - "lateral_right_shoulder", - "medial_right_bow", - "lateral_right_bow", - "medial_right_wrist", - "lateral_right_wrist", - "medial_left_shoulder", - "lateral_left_shoulder", - "medial_left_bow", - "lateral_left_bow", - "medial_left_wrist", - "lateral_left_wrist", - "medial_right_hip", - "lateral_right_hip", - "medial_right_knee", - "lateral_right_knee", - "medial_right_ankle", - "lateral_right_ankle", - "medial_left_hip", - "lateral_left_hip", - "medial_left_knee", - "lateral_left_knee", - "medial_left_ankle", - "lateral_left_ankle" - ] - } - ], - "images": [ - { - "file_name": "004645041.jpg", - "height": 720, - "width": 1280, - "id": 4645041 - }, - { - "file_name": "005808361.jpg", - "height": 720, - "width": 1280, - "id": 5808361 - }, - { - "file_name": "051423444.jpg", - "height": 720, - "width": 1280, - "id": 51423444 - }, - { - "file_name": "052475643.jpg", - "height": 480, - "width": 854, - "id": 52475643 - }, - { - "file_name": "060754485.jpg", - "height": 720, - "width": 1280, - "id": 60754485 - } - ], - "annotations": [ - { - "num_joints": 38, - "keypoints": [ - 1067.0, - 253.0, - 2.0, - 902.0, - 253.0, - 2.0, - 1167.0, - 353.0, - 2.0, - 798.0, - 340.0, - 2.0, - 1142.0, - 478.0, - 2.0, - 869.0, - 214.0, - 2.0, - 1040.0, - 454.0, - 2.0, - 908.0, - 438.0, - 2.0, - 906.0, - 528.0, - 2.0, - 816.0, - 510.0, - 2.0, - 883.0, - 707.0, - 2.0, - 804.0, - 711.0, - 2.0, - 962.2409, - 80.0306, - 2.0, - 982.7591, - 235.9694, - 2.0, - 895.418, - 241.258, - 2, - 1043.704, - 160.177, - 2, - 901.513, - 343.02, - 2, - 863.72, - 263.644, - 2, - 837.5939, - 349.993, - 2, - 862.766, - 257.015, - 2, - 801.5946, - 274.022, - 2, - 879.233, - 196.169, - 2, - 1110.547, - 339.254, - 2, - 1036.455, - 221.547, - 2, - 1133.252, - 424.742, - 2, - 1157.976, - 298.364, - 2, - 1128.938, - 496.521, - 2, - 1178.462, - 418.695, - 2, - 906.36, - 495.814, - 2, - 886.084, - 430.921, - 2, - 921.047, - 497.919, - 2, - 798.3963, - 620.615, - 2, - 883.956, - 622.444, - 2, - 0, - 0, - 0, - 906.36, - 495.814, - 2, - 1063.55, - 427.43, - 2, - 858.607, - 625.533, - 2, - 998.667, - 532.689, - 2, - 0, - 0, - 0, - 930.346, - 637.297, - 2 - ], - "image_id": 5808361, - "center": [ - 966.0, - 340.0 - ], - "scale": 0.1756068552, - "category_id": 1, - "id": 2736, - "iscrowd": 0 - }, - { - "num_joints": 40, - "keypoints": [ - 358.0, - 177.0, - 2.0, - 296.0, - 167.0, - 2.0, - 387.0, - 236.0, - 2.0, - 281.0, - 208.0, - 2.0, - 392.0, - 167.0, - 2.0, - 296.0, - 135.0, - 2.0, - 353.0, - 275.0, - 2.0, - 290.0, - 274.0, - 2.0, - 403.0, - 299.0, - 2.0, - 318.0, - 299.0, - 2.0, - 394.0, - 409.0, - 2.0, - 317.0, - 412.0, - 2.0, - 347.0055, - 105.8949, - 2.0, - 329.9945, - 162.1051, - 2.0, - 288.387, - 168.411, - 2, - 352.646, - 153.542, - 2, - 278.645, - 195.766, - 2, - 272.16, - 185.5, - 2, - 295.672, - 202.247, - 2, - 275.016, - 171.472, - 2, - 297.774, - 179.573, - 2, - 314.8, - 136.217, - 2, - 362.128, - 228.378, - 2, - 343.02, - 176.81, - 2, - 402.3, - 211.171, - 2, - 373.628, - 192.749, - 2, - 389.14, - 148.105, - 2, - 382.448, - 186.517, - 2, - 340.876, - 312.739, - 2, - 271.97, - 273.448, - 2, - 323.194, - 285.55, - 2, - 300.533, - 368.868, - 2, - 328.476, - 360.12, - 2, - 309.13, - 434.758, - 2, - 340.876, - 312.739, - 2, - 362.155, - 232.654, - 2, - 381.581, - 365.148, - 2, - 388.754, - 284.757, - 2, - 396.32, - 448.91, - 2, - 409.878, - 357.015, - 2 - ], - "image_id": 52475643, - "center": [ - 316.0, - 220.0 - ], - "scale": 0.47030507400000005, - "category_id": 1, - "id": 28438, - "iscrowd": 0 - }, - { - "num_joints": 32, - "keypoints": [ - 991.0, - 300.0, - 2.0, - 1070.0, - 290.0, - 2.0, - 912.0, - 345.0, - 2.0, - 1018.0, - 317.0, - 1.0, - 842.0, - 330.0, - 2.0, - 988.0, - 294.0, - 1.0, - 973.0, - 496.0, - 2.0, - 1072.0, - 474.0, - 2.0, - 961.0, - 650.0, - 2.0, - 1033.0, - 649.0, - 2.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 997.002, - 181.3752, - 2.0, - 1026.998, - 281.6248, - 2.0, - 1071.131, - 283.036, - 2, - 969.6, - 247.337, - 2, - 1087.017, - 347.51, - 2, - 1058.52, - 305.636, - 2, - 1026.458, - 332.152, - 2, - 1014.72, - 288.149, - 2, - 995.817, - 309.098, - 2, - 980.493, - 294.738, - 2, - 937.925, - 366.241, - 2, - 987.08, - 282.067, - 2, - 869.4918, - 356.925, - 2, - 931.259, - 311.619, - 2, - 844.2, - 326.671, - 2, - 873.5471, - 326.164, - 2, - 1004.56, - 610.365, - 2, - 1075.26, - 526.816, - 2, - 1005.788, - 610.747, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 1004.56, - 610.365, - 2, - 935.105, - 446.09, - 2, - 0, - 0, - 0, - 937.158, - 604.939, - 2, - 0, - 0, - 0, - 0, - 0, - 0 - ], - "image_id": 51423444, - "center": [ - 1030.0, - 396.0 - ], - "scale": 0.2639497014, - "category_id": 1, - "id": 27407, - "iscrowd": 0 - }, - { - "num_joints": 32, - "keypoints": [ - 889.0, - 328.0, - 2.0, - 790.0, - 300.0, - 2.0, - 915.0, - 452.0, - 2.0, - 740.0, - 297.0, - 2.0, - 906.0, - 553.0, - 2.0, - 698.0, - 264.0, - 2.0, - 868.0, - 509.0, - 2.0, - 786.0, - 498.0, - 2.0, - 860.0, - 693.0, - 2.0, - 804.0, - 659.0, - 2.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 816.0921, - 203.0674, - 2.0, - 838.9079, - 308.9326, - 2.0, - 790.983, - 286.144, - 2, - 864.959, - 243.71, - 2, - 769.273, - 388.686, - 2, - 780.19, - 289.158, - 2, - 742.1957, - 339.679, - 2, - 729.0975, - 277.63, - 2, - 710.4349, - 292.928, - 2, - 690.765, - 253.113, - 2, - 871.88, - 429.244, - 2, - 861.04, - 275.182, - 2, - 894.319, - 509.588, - 2, - 929.981, - 418.01, - 2, - 901.22, - 581.445, - 2, - 924.708, - 508.795, - 2, - 823.63, - 647.69, - 2, - 769.341, - 541.653, - 2, - 850.322, - 625.912, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 823.63, - 647.69, - 2, - 905.804, - 486.059, - 2, - 0, - 0, - 0, - 907.2, - 647.636, - 2, - 0, - 0, - 0, - 0, - 0, - 0 - ], - "image_id": 4645041, - "center": [ - 809.0, - 403.0 - ], - "scale": 0.2550422514, - "category_id": 1, - "id": 26901, - "iscrowd": 0 - }, - { - "num_joints": 39, - "keypoints": [ - 698.0, - 217.0, - 2.0, - 600.0, - 242.0, - 2.0, - 767.0, - 310.0, - 2.0, - 551.0, - 307.0, - 2.0, - 790.0, - 405.0, - 2.0, - 488.0, - 351.0, - 2.0, - 747.0, - 421.0, - 2.0, - 670.0, - 437.0, - 2.0, - 751.0, - 574.0, - 2.0, - 685.0, - 579.0, - 2.0, - 768.0, - 717.0, - 2.0, - 694.0, - 684.0, - 2.0, - 591.3663, - 117.4341, - 2.0, - 642.6337, - 217.5659, - 2.0, - 584.59, - 231.591, - 2, - 649.816, - 141.342, - 2, - 605.668, - 337.961, - 2, - 566.695, - 256.226, - 2, - 581.685, - 330.685, - 2, - 510.6881, - 317.872, - 2, - 530.2038, - 341.493, - 2, - 481.6367, - 358.297, - 2, - 725.537, - 311.805, - 2, - 651.465, - 169.726, - 2, - 766.905, - 363.613, - 2, - 774.747, - 267.874, - 2, - 784.675, - 432.399, - 2, - 796.495, - 356.847, - 2, - 726.118, - 528.068, - 2, - 649.638, - 446.552, - 2, - 737.496, - 516.31, - 2, - 667.32, - 620.422, - 2, - 736.118, - 628.657, - 2, - 663.697, - 699.859, - 2, - 726.118, - 528.068, - 2, - 799.279, - 341.113, - 2, - 727.888, - 644.205, - 2, - 798.633, - 526.499, - 2, - 0, - 0, - 0, - 799.314, - 644.016, - 2 - ], - "image_id": 60754485, - "center": [ - 698.0, - 404.0 - ], - "scale": 0.24552578040000003, - "category_id": 1, - "id": 26834, - "iscrowd": 0 - } - ] -} +{ + "info": { + "description": "For TRBMPI testing.", + "year": "2020", + "date_created": "2020/06/20" + }, + "categories": [ + { + "supercategory": "person", + "id": 1, + "name": "person", + "keypoints": [ + "left_shoulder", + "right_shoulder", + "left_elbow", + "right_elbow", + "left_wrist", + "right_wrist", + "left_hip", + "right_hip", + "left_knee", + "right_knee", + "left_ankle", + "right_ankle", + "head", + "neck", + "right_neck", + "left_neck", + "medial_right_shoulder", + "lateral_right_shoulder", + "medial_right_bow", + "lateral_right_bow", + "medial_right_wrist", + "lateral_right_wrist", + "medial_left_shoulder", + "lateral_left_shoulder", + "medial_left_bow", + "lateral_left_bow", + "medial_left_wrist", + "lateral_left_wrist", + "medial_right_hip", + "lateral_right_hip", + "medial_right_knee", + "lateral_right_knee", + "medial_right_ankle", + "lateral_right_ankle", + "medial_left_hip", + "lateral_left_hip", + "medial_left_knee", + "lateral_left_knee", + "medial_left_ankle", + "lateral_left_ankle" + ] + } + ], + "images": [ + { + "file_name": "004645041.jpg", + "height": 720, + "width": 1280, + "id": 4645041 + }, + { + "file_name": "005808361.jpg", + "height": 720, + "width": 1280, + "id": 5808361 + }, + { + "file_name": "051423444.jpg", + "height": 720, + "width": 1280, + "id": 51423444 + }, + { + "file_name": "052475643.jpg", + "height": 480, + "width": 854, + "id": 52475643 + }, + { + "file_name": "060754485.jpg", + "height": 720, + "width": 1280, + "id": 60754485 + } + ], + "annotations": [ + { + "num_joints": 38, + "keypoints": [ + 1067.0, + 253.0, + 2.0, + 902.0, + 253.0, + 2.0, + 1167.0, + 353.0, + 2.0, + 798.0, + 340.0, + 2.0, + 1142.0, + 478.0, + 2.0, + 869.0, + 214.0, + 2.0, + 1040.0, + 454.0, + 2.0, + 908.0, + 438.0, + 2.0, + 906.0, + 528.0, + 2.0, + 816.0, + 510.0, + 2.0, + 883.0, + 707.0, + 2.0, + 804.0, + 711.0, + 2.0, + 962.2409, + 80.0306, + 2.0, + 982.7591, + 235.9694, + 2.0, + 895.418, + 241.258, + 2, + 1043.704, + 160.177, + 2, + 901.513, + 343.02, + 2, + 863.72, + 263.644, + 2, + 837.5939, + 349.993, + 2, + 862.766, + 257.015, + 2, + 801.5946, + 274.022, + 2, + 879.233, + 196.169, + 2, + 1110.547, + 339.254, + 2, + 1036.455, + 221.547, + 2, + 1133.252, + 424.742, + 2, + 1157.976, + 298.364, + 2, + 1128.938, + 496.521, + 2, + 1178.462, + 418.695, + 2, + 906.36, + 495.814, + 2, + 886.084, + 430.921, + 2, + 921.047, + 497.919, + 2, + 798.3963, + 620.615, + 2, + 883.956, + 622.444, + 2, + 0, + 0, + 0, + 906.36, + 495.814, + 2, + 1063.55, + 427.43, + 2, + 858.607, + 625.533, + 2, + 998.667, + 532.689, + 2, + 0, + 0, + 0, + 930.346, + 637.297, + 2 + ], + "image_id": 5808361, + "center": [ + 966.0, + 340.0 + ], + "scale": 0.1756068552, + "category_id": 1, + "id": 2736, + "iscrowd": 0 + }, + { + "num_joints": 40, + "keypoints": [ + 358.0, + 177.0, + 2.0, + 296.0, + 167.0, + 2.0, + 387.0, + 236.0, + 2.0, + 281.0, + 208.0, + 2.0, + 392.0, + 167.0, + 2.0, + 296.0, + 135.0, + 2.0, + 353.0, + 275.0, + 2.0, + 290.0, + 274.0, + 2.0, + 403.0, + 299.0, + 2.0, + 318.0, + 299.0, + 2.0, + 394.0, + 409.0, + 2.0, + 317.0, + 412.0, + 2.0, + 347.0055, + 105.8949, + 2.0, + 329.9945, + 162.1051, + 2.0, + 288.387, + 168.411, + 2, + 352.646, + 153.542, + 2, + 278.645, + 195.766, + 2, + 272.16, + 185.5, + 2, + 295.672, + 202.247, + 2, + 275.016, + 171.472, + 2, + 297.774, + 179.573, + 2, + 314.8, + 136.217, + 2, + 362.128, + 228.378, + 2, + 343.02, + 176.81, + 2, + 402.3, + 211.171, + 2, + 373.628, + 192.749, + 2, + 389.14, + 148.105, + 2, + 382.448, + 186.517, + 2, + 340.876, + 312.739, + 2, + 271.97, + 273.448, + 2, + 323.194, + 285.55, + 2, + 300.533, + 368.868, + 2, + 328.476, + 360.12, + 2, + 309.13, + 434.758, + 2, + 340.876, + 312.739, + 2, + 362.155, + 232.654, + 2, + 381.581, + 365.148, + 2, + 388.754, + 284.757, + 2, + 396.32, + 448.91, + 2, + 409.878, + 357.015, + 2 + ], + "image_id": 52475643, + "center": [ + 316.0, + 220.0 + ], + "scale": 0.47030507400000005, + "category_id": 1, + "id": 28438, + "iscrowd": 0 + }, + { + "num_joints": 32, + "keypoints": [ + 991.0, + 300.0, + 2.0, + 1070.0, + 290.0, + 2.0, + 912.0, + 345.0, + 2.0, + 1018.0, + 317.0, + 1.0, + 842.0, + 330.0, + 2.0, + 988.0, + 294.0, + 1.0, + 973.0, + 496.0, + 2.0, + 1072.0, + 474.0, + 2.0, + 961.0, + 650.0, + 2.0, + 1033.0, + 649.0, + 2.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 997.002, + 181.3752, + 2.0, + 1026.998, + 281.6248, + 2.0, + 1071.131, + 283.036, + 2, + 969.6, + 247.337, + 2, + 1087.017, + 347.51, + 2, + 1058.52, + 305.636, + 2, + 1026.458, + 332.152, + 2, + 1014.72, + 288.149, + 2, + 995.817, + 309.098, + 2, + 980.493, + 294.738, + 2, + 937.925, + 366.241, + 2, + 987.08, + 282.067, + 2, + 869.4918, + 356.925, + 2, + 931.259, + 311.619, + 2, + 844.2, + 326.671, + 2, + 873.5471, + 326.164, + 2, + 1004.56, + 610.365, + 2, + 1075.26, + 526.816, + 2, + 1005.788, + 610.747, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1004.56, + 610.365, + 2, + 935.105, + 446.09, + 2, + 0, + 0, + 0, + 937.158, + 604.939, + 2, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "image_id": 51423444, + "center": [ + 1030.0, + 396.0 + ], + "scale": 0.2639497014, + "category_id": 1, + "id": 27407, + "iscrowd": 0 + }, + { + "num_joints": 32, + "keypoints": [ + 889.0, + 328.0, + 2.0, + 790.0, + 300.0, + 2.0, + 915.0, + 452.0, + 2.0, + 740.0, + 297.0, + 2.0, + 906.0, + 553.0, + 2.0, + 698.0, + 264.0, + 2.0, + 868.0, + 509.0, + 2.0, + 786.0, + 498.0, + 2.0, + 860.0, + 693.0, + 2.0, + 804.0, + 659.0, + 2.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 816.0921, + 203.0674, + 2.0, + 838.9079, + 308.9326, + 2.0, + 790.983, + 286.144, + 2, + 864.959, + 243.71, + 2, + 769.273, + 388.686, + 2, + 780.19, + 289.158, + 2, + 742.1957, + 339.679, + 2, + 729.0975, + 277.63, + 2, + 710.4349, + 292.928, + 2, + 690.765, + 253.113, + 2, + 871.88, + 429.244, + 2, + 861.04, + 275.182, + 2, + 894.319, + 509.588, + 2, + 929.981, + 418.01, + 2, + 901.22, + 581.445, + 2, + 924.708, + 508.795, + 2, + 823.63, + 647.69, + 2, + 769.341, + 541.653, + 2, + 850.322, + 625.912, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 823.63, + 647.69, + 2, + 905.804, + 486.059, + 2, + 0, + 0, + 0, + 907.2, + 647.636, + 2, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "image_id": 4645041, + "center": [ + 809.0, + 403.0 + ], + "scale": 0.2550422514, + "category_id": 1, + "id": 26901, + "iscrowd": 0 + }, + { + "num_joints": 39, + "keypoints": [ + 698.0, + 217.0, + 2.0, + 600.0, + 242.0, + 2.0, + 767.0, + 310.0, + 2.0, + 551.0, + 307.0, + 2.0, + 790.0, + 405.0, + 2.0, + 488.0, + 351.0, + 2.0, + 747.0, + 421.0, + 2.0, + 670.0, + 437.0, + 2.0, + 751.0, + 574.0, + 2.0, + 685.0, + 579.0, + 2.0, + 768.0, + 717.0, + 2.0, + 694.0, + 684.0, + 2.0, + 591.3663, + 117.4341, + 2.0, + 642.6337, + 217.5659, + 2.0, + 584.59, + 231.591, + 2, + 649.816, + 141.342, + 2, + 605.668, + 337.961, + 2, + 566.695, + 256.226, + 2, + 581.685, + 330.685, + 2, + 510.6881, + 317.872, + 2, + 530.2038, + 341.493, + 2, + 481.6367, + 358.297, + 2, + 725.537, + 311.805, + 2, + 651.465, + 169.726, + 2, + 766.905, + 363.613, + 2, + 774.747, + 267.874, + 2, + 784.675, + 432.399, + 2, + 796.495, + 356.847, + 2, + 726.118, + 528.068, + 2, + 649.638, + 446.552, + 2, + 737.496, + 516.31, + 2, + 667.32, + 620.422, + 2, + 736.118, + 628.657, + 2, + 663.697, + 699.859, + 2, + 726.118, + 528.068, + 2, + 799.279, + 341.113, + 2, + 727.888, + 644.205, + 2, + 798.633, + 526.499, + 2, + 0, + 0, + 0, + 799.314, + 644.016, + 2 + ], + "image_id": 60754485, + "center": [ + 698.0, + 404.0 + ], + "scale": 0.24552578040000003, + "category_id": 1, + "id": 26834, + "iscrowd": 0 + } + ] +} diff --git a/tests/data/ochuman/test_ochuman.json b/tests/data/ochuman/test_ochuman.json index cdb1c67200..ddd12eaa5d 100644 --- a/tests/data/ochuman/test_ochuman.json +++ b/tests/data/ochuman/test_ochuman.json @@ -1,504 +1,504 @@ -{ - "categories": [ - { - "keypoints": [ - "nose", - "left_eye", - "right_eye", - "left_ear", - "right_ear", - "left_shoulder", - "right_shoulder", - "left_elbow", - "right_elbow", - "left_wrist", - "right_wrist", - "left_hip", - "right_hip", - "left_knee", - "right_knee", - "left_ankle", - "right_ankle" - ], - "skeleton": [ - [ - 16, - 14 - ], - [ - 14, - 12 - ], - [ - 17, - 15 - ], - [ - 15, - 13 - ], - [ - 12, - 13 - ], - [ - 6, - 12 - ], - [ - 7, - 13 - ], - [ - 6, - 7 - ], - [ - 6, - 8 - ], - [ - 7, - 9 - ], - [ - 8, - 10 - ], - [ - 9, - 11 - ], - [ - 2, - 3 - ], - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 4 - ], - [ - 3, - 5 - ], - [ - 4, - 6 - ], - [ - 5, - 7 - ] - ], - "id": 1, - "supercategory": "person", - "name": "person" - } - ], - "images": [ - { - "file_name": "003799.jpg", - "width": 900, - "height": 864, - "id": 1 - }, - { - "file_name": "000817.jpg", - "width": 665, - "height": 1000, - "id": 2 - }, - { - "file_name": "003896.jpg", - "width": 602, - "height": 900, - "id": 3 - } - ], - "annotations": [ - { - "area": 356725, - "image_id": 1, - "iscrowd": 0, - "category_id": 1, - "bbox": [ - 250, - 112, - 475, - 751 - ], - "segmentation": { - "counts": "Zei62gj08I7N2N2N2N1O1O001O1O1O001O1O4L3M3M2N2N1N3N1c_OdNU7]1jHhNQ7Y1nHlNm6V1PIPOk6Q1TITOh6l0fFcNnKf0W=h0kFjNeKc0[=d0oFQO]Ka0`=>SGQO\\Kg0[=9YGQOZKk0X=5^GPOYKS1g9mNoFWOnMk1R6oNYK]1\\9WO\\FnNkNa1T6mNWKd1W9EdEdNIX1U6lNUKe1V9f0`EjNU6kNUKf1S9f0cEjNT6kNUKh1Q9c0fEkNT6kNSKi1_4oMRNc2XLkNT6jNRKk1Z4bNfMo1jLkNT6iNRKm1S4SO]M]1ZMkNS6iNRKn1n3]OYMS1dMjNS6iNPKo1l33dL>]NhNS6nNjJk1l37dL;cNfNS6mNiJn1`3?nL2fNeNR6mNiJP2l2P1_MAiNcNS6lNhJT2[2Z1oMUOkNbNS6jNiJW2U2X1UNVOjNbNS6iNhJZ2Q2T1[NYOiNaNR6iNhJ\\2m1U1]NVOlN`NS6iNgJ^2h1X1[NUOSO]NS6hNfJa2d1Z1gMFLhMS6gNeJd2a1V6gM_HS6hNdJf2^1T6kM_Ho5`MRJW1f0h2Z1S6oM]HP6bMQJU1f0l2V1Q6SN\\HP6dMPJS1f0P3S1n5VNZHR6fMnIS1g0Q3o0m5ZNYHR6gMnIR1f0T3j0m5`NVHR6QOcJP3e0k5fNTHR6QOdJR3?k5kNRHR6QOdJa3N^5\\OPH^5EXKQ>ZOZBZ5I\\Km=ZOZBo44hKa=YO\\Bm44jK`=YO\\Bl45kK_=YO`Ah5Q1PK^=XO_Ai5U1nJn>R5TAmJl>R5VAnJj>Q5VAoJj>Q5WAnJj>R5VAnJi>R5XAmJi>S5WAlJi>U5WAkJi>T5WAlJi>U5WAgJm>X5TAdJX=F_Ag5ja0YJV^Og5ia0ZJU^Oi5ia0XJV^Oj5ia0VJW^Oj5ha0WJX^Oj5ea0RJa^On5l`0dJU_O]5\\`0oJf_OR5m?YKS@h4k?YKV@h4g?XK[@h4c?YK]@i4`?WKa@k4[?WKe@j4Y?WKf@l4X?TKh@n4U?RKm@o4P?RKQAo4l>RKUAP5g>QKZAP5b>RK_Ao4^>QKdAQ5W>PKkAQ5R>oJPBR5n=mJTBU5h=lJYBU5d=kJ^BV5_=jJcBX5Z=iJfBX5W=iJjBX5S=jJmBX5o_9o@fFU7HBg9RIeFY7O[Oa9UIcF^75UO^:h0eEUO]:j0fESO[:k0hERO[:k0iEROX:m0jEPOX:o0jEoNX:n0kEoNW:P1kEnNV:Q1lEmNV:P1lEnNV:Q1lEmNU:R1mElNU::eFD\\96kFHV94nFJT92QGLP93QGLQ91QGMQ91QGNP90RGNP91QGOo8OTGOn8MUG2l8LVG4j8JXG5i8IYG5i8JXG4k8JUG4n8JTG4n8KSG1Q9MQGOT9OmFOU9NnFOU9OmFOU9NmF1U9MjF1\\9CcFd0b9[O\\Ff0f9YOXFh0j9XOTFg0o9XOPFh0R:TOSEnKlMm4S=ROPEVLjMh4X=QOlD\\LeMf4a=mNhDaLdMc4e=kNeDfLbM`4k=iNbDkL_MY4T>^NiD\\MPMP4_>cN`D`MmLm3g>aNZDh1YMXK_>P3VDf1dm\\OaEd`0m:k^OgEd`0fN1O2N2N2N010O010O010O0100O0YM_Eh@a:S?iEh@V:T?QFi@P:R?WFl@k9n>[Fo@h9j>^FTAe9e>bFWAb9b>eF[A^9^>iF_AZ9Z>mFbAX9V>nFhAV9P>PGnAS9k=SGRBQ9g=UGWBm8c=YG[Bj8J\\DVU:V@nFc?aTFSAl9b>^F`Ab9o;ZFgB>\\1X9_;PGaB2R2n8o:fG[BFh2c8`:RJoChK0V:c;_J_D^KJS:[;iJoDVKAR:`;fJSEZKYOP:c;eJWE^KROl9h;dJZEbKjNj9l;bJ^EfKbNh9P0100O00100O00100O10O01O10O0`MZE_@f:Z?eEb@Z:X?PFe@o9U?\\Ff@f9R?dFk@^9k>lFSAW9e>PGXAS9a>TG\\Ao8]>XG_Al8Z>ZGcAj8V>]GfAg8S>`GiAd8P>cGlAa8m=eGPB_8j=gGSB]8f=iGVB\\8;mCQ]m0\\OhROb0Pn0O101N2N3M3K5Kd^R9", - "size": [ - 1000, - 665 - ] - }, - "num_keypoints": 13, - "id": 3, - "keypoints": [ - 0, - 0, - 0, - 0, - 0, - 0, - 217.01654468945043, - 302.1730492658786, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 285, - 328, - 2, - 187, - 339, - 2, - 293, - 412, - 2, - 176, - 414, - 2, - 308, - 490, - 2, - 174, - 495, - 2, - 270, - 491, - 2, - 224, - 493, - 2, - 271, - 614, - 1, - 227, - 614, - 2, - 225, - 704, - 2, - 215, - 716, - 2 - ] - }, - { - "area": 517461, - "image_id": 3, - "iscrowd": 0, - "category_id": 1, - "bbox": [ - 0, - 39, - 601, - 861 - ], - "segmentation": { - "counts": "Xj0Y1jj02N101N10001N10000O0100O010O10O0100O02O00001N101O001N2O001O0O2O1O0O2O0UAUN]3l1aLZNY3h1fLXNZ3h1eLYN[3h1dLXN[3j1cLWN]3i1cLWN[EL[=o1XMVNZEN]=m1YMUNWE1_=l1XMSNXE4^=j1XMRNZE6]=i1XMQNZE8]=h1XMPNZE:j<^O_CZ2[:nM\\E<_S1gGn3^JjJl=W1eGP4cJeJh=\\1dGo3U9PLjFQ4W9oKgFR4Z9nKeFR4\\9mKdFS4\\9nKbFS4_9mK`FS4a9lK_FT4b9lK]FT4d9kK[FV4e9kKZFT4h9kKXFU4i9kKVFU4k9jKUFV4l9iKTFW4l9jKTFU4m9jKTFU4m9jKSFV4n9jKRFU4n9kKSFU4m9kKRFU4o9jKRFU4o9jKRFU4n9lKQFT4o9lKRFS4o9lKQFT4o9mKQFR4o9nKRFQ4o9nKQFR4o9nKRFQ4n9PLQFP4P:oKQFo3P:QLQFn3o9RLQFn3P:RLPFm3P:SLQFl3o9TLRFk3o9SLSFl3m9SLUFk3l9ULUFj3l9VLTFi3l9XLRFi3n9XLoEj3R:VLlEk3U:ULhEm3X:SLgEn3Z:RLdEo3]:QL`EQ4`:PL^EQ4c:oK[ER4e:oKYER4h:nKWER4j:nKTES4l:nKRET4n:lKPEU4P;lKnDU4S;jKmDV4S;kKkDV4U;kKjDU4W;kKgDV4Y;jKfDW4[;iKdDW4\\;jKbDW4^;iKaDX4`;hK_DX4a;iK]DX4c;hK\\DY4e;gKYDZ4g;gKWD[4i;dKVD]4j;dKUD\\4l;dKRD^4m;bKSD^4n;bKPD_4QXJbAj5^>VJ`Ak5a>UJ]Am5c>SJ[An5f>QJYAQ6g>oIWAR6i>oIUAS6k>mIRAU6o>jIPAW6Q?iIm@X6T?hIj@Y6V?hIg@Z6Z?fIc@]6\\?dIa@^6`?cI\\@_6d?cIY@^6h?cI`_OZN2U8_`0bIZ_ObN0m7f`0cIV_OU7k`0Q2001O1O001O0O2O010O0010O01O10O01O100O1O1000000001O000000001O0000001O0oLUEiDk:W;WEgDi:Y;YEeDh:Z;YEeDg:[;ZEdDf:\\;[EcDf:[;\\EdDd:\\;]EcDc:\\;_EcDa:];_EdDa:Z;aEeD_:[;cEcD^:[;dEdD]:[;dEdD^:Y;dEfD]:Y;dEfD^:W;eEgD[:Y;fEgDZ:h:XFVEh9i:\\FTEe9j:_FSEa9l:dFPE\\9o:kFkDU9U;PGeDP9\\;TG`Dk8a;ZGYDg8g;]GUDb8l;bGoC^8RR3U:k7fEjD7[3Z9`L\\FX;^1YHT8cLZF\\:FkEo1W3o7jLTFm98mEh1\\3k7]7cFSEd1a3h7Y7XIhHf6W7\\IiHc6U7aIkH]6T7eImHY6R7jInHT6Q7nIPIQ6n6RJQIm5n6UJSIi5k6[JUIc5j6_JVI`5g6eJYIZ5a6lJ_IS5X6WKiIg4S6^KmIb4o5bKQJ]4l5gKUJW4h5mKYJQ4f5QLZJo3c5UL]Ji3b5YL^Jf3W1jFTOd5Ec3T1oFoNb5KS4a0]FAc5NS4=\\FAe50P4=^F_Oe54m3<_F]Oh55k3<^F\\Oj58h3;_F[Ok59d3>bFXOk5<\\3`0jFROl5c0R3=SGnNm5j0h29\\GlNn5o0`26cGjNn5T1Y23jGgNo5V1U24mGdNS6T1n1:oGaNZ6P1e1`0RH_N`6k0]1g0SH\\Ng6h0mHbLc7Z4jH[Nh6j0fHdLh7`0oGm2k0SOk6m0]HfLn7;PHP3j0ROm6\\18aLQHR3k0oNo6]14aLRHT3l0mNP7\\12aLTHV3k0lNQ7\\1O`LVHY3j0jNT7\\1LVL_He3a0iNV7[1ROMgHWM1^2Y8;nN1iHVM0]2[8:mN1iHWM1]2[8:kN1jHWM1]2]88iNfMnGf1S2=W75hNdMRHa1D]N^2X2V74gNbMTHe1DXN^2\\2V73WOYOiId0Q71VO[OjIc0S7OSO]OlIc0S7NQO_OlId0U7KmNBoIb0W7IjNEPJa0W7IhNGRJ`0W7GgNIRJ`0X7GdNJUJ>Y7FbNLVJ>X7EaNNWJ=Y7D_N0YJN10100000000001OO01000000O1O100O1N2N3M2N3M2N3M2K5OO1O20O0000100O1O1O1O1O2N1N3QOnTOh0Xk0N3M2H8M3N3M2NTW3", - "size": [ - 900, - 602 - ] - }, - "num_keypoints": 12, - "id": 4, - "keypoints": [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 446, - 292, - 2, - 263, - 225, - 2, - 540, - 346, - 2, - 161, - 269, - 2, - 560, - 327, - 2, - 103, - 312, - 2, - 308, - 495, - 2, - 236, - 491, - 2, - 352, - 499, - 2, - 124, - 652, - 2, - 442, - 677, - 2, - 67, - 827, - 2 - ] - }, - { - "area": 404028, - "image_id": 3, - "iscrowd": 0, - "category_id": 1, - "bbox": [ - 84, - 45, - 516, - 783 - ], - "segmentation": { - "counts": "ln`2?ck04M2M3N2M3N2M3N2M3N2M3M4L3M3N3L5K4L5K5L2M3M2O2O0O2N2O0O2N101N2N2N2O1N2N2O1N2N4M4K4L2O1N2O1O000O10000000O1001N101O001O001O001O001O0010O01O1O001O1O3M;E1O2O0O2N1O2N2N1O2N1O3N2M3M3M3N2R@PKc8o4\\GQKe8o4YGSKh8l4VGUKl8f4e@fJ]6f0o8b4g@fJY6i0P9a4SG`Ko8_4PGaKR9]4mFcKV9\\4iFdKY9[4fFdK\\9[4cFfK_9Y4dARKX3d0W;Y4aATKW3a0[;Z4]AXKU3=`;Z4\\AZKR3;e;Z4YA]KP36k;\\4TAbKm21R<[4RAgKXO1U2Jf=]4m@kKiN>`2TOP>c4g@VMX1XNS>`4e@YMU1ZNY>Z4b@\\MS1[N_>U4_@`Mo0]Ng>o3Y@dMo0^Nl>j3V@hMk0_NT?f3Q@kMi0_N[?b3m_OoMe0`Na?`3i_OQNc0`Ng?]3g_OSN`0`Nk?]3e_OSN=`NR`0[3a_OVN;_NW`0Z3^_OWN7bN\\`0V3^_OXN1eNd`0R3[_OYNMgNj`0o2Z_OZNHjNPa0k2X_O[NDkN[;QO[Ig3gK]N_OlNa;SOWId3jK\\NZOnNh;TOSI`3lK^NUOPOn;UOnH]3oK^NQOROSmMa5P1l4b2]E_Lg5o0l4o4UKQKk4o4TKRKl4n4TKRKl4m4UKSKl4l4TKTKl4k4TKVKl4j4TKWKk4h4VKXKk4g4UKYKk4g4TKZKm4d4TK]Kl4b4TK^Kl4a4UK_Kl4`4SKbKl4]4UKcKl4\\4TKeKk4[4UKeKl4Y4TKiKl4V4TKjKl4U4UKkKl4T4TKmKk4R4VKnKj4R4WKnKh4R4XKSIcI^2U;^4ZKQIeI_2Q;`4ZKPIiI^2l:a4]KoHjI]2j:d4\\KnHmI\\2g:f4]KkHPJ\\2d:h4ULTKl3l4VLRKi3n4ZLoJg3Q5[LlJe3T5_LXJR4h5PLkI[4U6gK`Ia4`6cKWIc4i6_KoHg4P7V810O01000O010UKS[O^2md0dMS[O[2md0fMS[OX2md0jMT[OT2ld0nMS[OQ2ld0QNT[Om1md0TNS[Ok1ld0WNT[Og1md0[NS[O\\Nn0S1nc0c0U[OjMc1[1Wc0l0g]OlNZb0T1m]OdNTb0]1S^OZNna0f1W^OTNla0j1X^OQNja0n1Y^OnMia0R2Z^OiMha0V2[^OfMga0Y2]^ObMea0]2_^O^Mca0b2P4O1O2N3N1N2N3M2O2M3_NoUOi0Uj0POoUOP1Sj0hNRVOY1\\j0010O010000O10000O1000]LhNY\\OW1ec0mN[\\OR1bc0RO]\\Om0ac0VO`\\Oi0]c0[Oc\\Od0Zc0_Og\\O`0Ub0nNX[Of0b2PJRAQ6P?oIn@Q6T?oIi@R6Y?nIe@R6]?mIc@S6^?mI`@S6b?mI]@R6e?nIZ@P6i?PJV@j5Q`0VJm_Oh5W`0WJi_Og5Z`0YJe_Od5_`0\\J__Oc5d`0]J\\_O`5g`0_JY_O_5j`0`JV_O]5o`0aJQ_OR5]a0mJc^Oo4ba0QK^^Ok4fa0TKZ^Oh4ka0WKU^Og4S1WKR=1kAf4V1\\KkP2GkIHX4`0n1HSJ\\OQ4l0m1Gf38[LHd38]LHc37^LHb38_LHa37`LH`38aLH_37bLI^36cLI]37dLI\\36eLI\\36eLJZ36fLKZ33hLLY33hLMW33jLLW33jLMU32lLOT30mLOS31nLOR30oLOQ31PMOP3ORM0n20RM1n2NSM2l2MVM2k2MVM3i2LYM3h2KYM6f2J[M6d2I^M6c2I^M7a2HaM7_2IaM8^2GdM9\\2FeM:Z2EhM;W2EjM;V2ClM=T2BmM>R2APN?P2@PNa0P2]ORNc0m1]OTNc0l1[OVNe0j1ZOWNf0h1YOZNg0f1XO[Nh0e1VO]Nj0b1UO`Nl0_1ROcNo0\\1oNeNR1[1lNgNW1V1gNlN]1_CaNY=OZOl1c0SN^Oo1`0oMBS2W=1O1N0`VOlMQi0S2dVOXN]i0W21O1O1O1O010O1O1N2L4L4M2F;L3M4L3M4M2N3L3N3M2N3O0O2O0O2O0O2O0O2O002M5L3M4L_d0", - "size": [ - 900, - 602 - ] - }, - "num_keypoints": 15, - "id": 5, - "keypoints": [ - 138.28123044948043, - 178.42673457794075, - 2, - 133.4071828533262, - 152.16976849543238, - 2, - 0, - 0, - 0, - 168.78333476089736, - 123.08271026031831, - 2, - 0, - 0, - 0, - 201, - 192, - 2, - 322, - 124, - 1, - 249, - 344, - 1, - 488, - 172, - 2, - 219, - 433, - 1, - 572, - 238, - 2, - 436, - 381, - 2, - 439, - 380, - 2, - 354, - 608, - 2, - 307, - 523, - 1, - 494, - 683, - 2, - 313, - 753, - 2 - ] - } - ], - "info": { - "description": "MMPose example ochuman dataset", - "version": "1.0", - "year": "2020", - "date_created": "2020/08/31" - } -} +{ + "categories": [ + { + "keypoints": [ + "nose", + "left_eye", + "right_eye", + "left_ear", + "right_ear", + "left_shoulder", + "right_shoulder", + "left_elbow", + "right_elbow", + "left_wrist", + "right_wrist", + "left_hip", + "right_hip", + "left_knee", + "right_knee", + "left_ankle", + "right_ankle" + ], + "skeleton": [ + [ + 16, + 14 + ], + [ + 14, + 12 + ], + [ + 17, + 15 + ], + [ + 15, + 13 + ], + [ + 12, + 13 + ], + [ + 6, + 12 + ], + [ + 7, + 13 + ], + [ + 6, + 7 + ], + [ + 6, + 8 + ], + [ + 7, + 9 + ], + [ + 8, + 10 + ], + [ + 9, + 11 + ], + [ + 2, + 3 + ], + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 4 + ], + [ + 3, + 5 + ], + [ + 4, + 6 + ], + [ + 5, + 7 + ] + ], + "id": 1, + "supercategory": "person", + "name": "person" + } + ], + "images": [ + { + "file_name": "003799.jpg", + "width": 900, + "height": 864, + "id": 1 + }, + { + "file_name": "000817.jpg", + "width": 665, + "height": 1000, + "id": 2 + }, + { + "file_name": "003896.jpg", + "width": 602, + "height": 900, + "id": 3 + } + ], + "annotations": [ + { + "area": 356725, + "image_id": 1, + "iscrowd": 0, + "category_id": 1, + "bbox": [ + 250, + 112, + 475, + 751 + ], + "segmentation": { + "counts": "Zei62gj08I7N2N2N2N1O1O001O1O1O001O1O4L3M3M2N2N1N3N1c_OdNU7]1jHhNQ7Y1nHlNm6V1PIPOk6Q1TITOh6l0fFcNnKf0W=h0kFjNeKc0[=d0oFQO]Ka0`=>SGQO\\Kg0[=9YGQOZKk0X=5^GPOYKS1g9mNoFWOnMk1R6oNYK]1\\9WO\\FnNkNa1T6mNWKd1W9EdEdNIX1U6lNUKe1V9f0`EjNU6kNUKf1S9f0cEjNT6kNUKh1Q9c0fEkNT6kNSKi1_4oMRNc2XLkNT6jNRKk1Z4bNfMo1jLkNT6iNRKm1S4SO]M]1ZMkNS6iNRKn1n3]OYMS1dMjNS6iNPKo1l33dL>]NhNS6nNjJk1l37dL;cNfNS6mNiJn1`3?nL2fNeNR6mNiJP2l2P1_MAiNcNS6lNhJT2[2Z1oMUOkNbNS6jNiJW2U2X1UNVOjNbNS6iNhJZ2Q2T1[NYOiNaNR6iNhJ\\2m1U1]NVOlN`NS6iNgJ^2h1X1[NUOSO]NS6hNfJa2d1Z1gMFLhMS6gNeJd2a1V6gM_HS6hNdJf2^1T6kM_Ho5`MRJW1f0h2Z1S6oM]HP6bMQJU1f0l2V1Q6SN\\HP6dMPJS1f0P3S1n5VNZHR6fMnIS1g0Q3o0m5ZNYHR6gMnIR1f0T3j0m5`NVHR6QOcJP3e0k5fNTHR6QOdJR3?k5kNRHR6QOdJa3N^5\\OPH^5EXKQ>ZOZBZ5I\\Km=ZOZBo44hKa=YO\\Bm44jK`=YO\\Bl45kK_=YO`Ah5Q1PK^=XO_Ai5U1nJn>R5TAmJl>R5VAnJj>Q5VAoJj>Q5WAnJj>R5VAnJi>R5XAmJi>S5WAlJi>U5WAkJi>T5WAlJi>U5WAgJm>X5TAdJX=F_Ag5ja0YJV^Og5ia0ZJU^Oi5ia0XJV^Oj5ia0VJW^Oj5ha0WJX^Oj5ea0RJa^On5l`0dJU_O]5\\`0oJf_OR5m?YKS@h4k?YKV@h4g?XK[@h4c?YK]@i4`?WKa@k4[?WKe@j4Y?WKf@l4X?TKh@n4U?RKm@o4P?RKQAo4l>RKUAP5g>QKZAP5b>RK_Ao4^>QKdAQ5W>PKkAQ5R>oJPBR5n=mJTBU5h=lJYBU5d=kJ^BV5_=jJcBX5Z=iJfBX5W=iJjBX5S=jJmBX5o_9o@fFU7HBg9RIeFY7O[Oa9UIcF^75UO^:h0eEUO]:j0fESO[:k0hERO[:k0iEROX:m0jEPOX:o0jEoNX:n0kEoNW:P1kEnNV:Q1lEmNV:P1lEnNV:Q1lEmNU:R1mElNU::eFD\\96kFHV94nFJT92QGLP93QGLQ91QGMQ91QGNP90RGNP91QGOo8OTGOn8MUG2l8LVG4j8JXG5i8IYG5i8JXG4k8JUG4n8JTG4n8KSG1Q9MQGOT9OmFOU9NnFOU9OmFOU9NmF1U9MjF1\\9CcFd0b9[O\\Ff0f9YOXFh0j9XOTFg0o9XOPFh0R:TOSEnKlMm4S=ROPEVLjMh4X=QOlD\\LeMf4a=mNhDaLdMc4e=kNeDfLbM`4k=iNbDkL_MY4T>^NiD\\MPMP4_>cN`D`MmLm3g>aNZDh1YMXK_>P3VDf1dm\\OaEd`0m:k^OgEd`0fN1O2N2N2N010O010O010O0100O0YM_Eh@a:S?iEh@V:T?QFi@P:R?WFl@k9n>[Fo@h9j>^FTAe9e>bFWAb9b>eF[A^9^>iF_AZ9Z>mFbAX9V>nFhAV9P>PGnAS9k=SGRBQ9g=UGWBm8c=YG[Bj8J\\DVU:V@nFc?aTFSAl9b>^F`Ab9o;ZFgB>\\1X9_;PGaB2R2n8o:fG[BFh2c8`:RJoChK0V:c;_J_D^KJS:[;iJoDVKAR:`;fJSEZKYOP:c;eJWE^KROl9h;dJZEbKjNj9l;bJ^EfKbNh9P0100O00100O00100O10O01O10O0`MZE_@f:Z?eEb@Z:X?PFe@o9U?\\Ff@f9R?dFk@^9k>lFSAW9e>PGXAS9a>TG\\Ao8]>XG_Al8Z>ZGcAj8V>]GfAg8S>`GiAd8P>cGlAa8m=eGPB_8j=gGSB]8f=iGVB\\8;mCQ]m0\\OhROb0Pn0O101N2N3M3K5Kd^R9", + "size": [ + 1000, + 665 + ] + }, + "num_keypoints": 13, + "id": 3, + "keypoints": [ + 0, + 0, + 0, + 0, + 0, + 0, + 217.01654468945043, + 302.1730492658786, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 285, + 328, + 2, + 187, + 339, + 2, + 293, + 412, + 2, + 176, + 414, + 2, + 308, + 490, + 2, + 174, + 495, + 2, + 270, + 491, + 2, + 224, + 493, + 2, + 271, + 614, + 1, + 227, + 614, + 2, + 225, + 704, + 2, + 215, + 716, + 2 + ] + }, + { + "area": 517461, + "image_id": 3, + "iscrowd": 0, + "category_id": 1, + "bbox": [ + 0, + 39, + 601, + 861 + ], + "segmentation": { + "counts": "Xj0Y1jj02N101N10001N10000O0100O010O10O0100O02O00001N101O001N2O001O0O2O1O0O2O0UAUN]3l1aLZNY3h1fLXNZ3h1eLYN[3h1dLXN[3j1cLWN]3i1cLWN[EL[=o1XMVNZEN]=m1YMUNWE1_=l1XMSNXE4^=j1XMRNZE6]=i1XMQNZE8]=h1XMPNZE:j<^O_CZ2[:nM\\E<_S1gGn3^JjJl=W1eGP4cJeJh=\\1dGo3U9PLjFQ4W9oKgFR4Z9nKeFR4\\9mKdFS4\\9nKbFS4_9mK`FS4a9lK_FT4b9lK]FT4d9kK[FV4e9kKZFT4h9kKXFU4i9kKVFU4k9jKUFV4l9iKTFW4l9jKTFU4m9jKTFU4m9jKSFV4n9jKRFU4n9kKSFU4m9kKRFU4o9jKRFU4o9jKRFU4n9lKQFT4o9lKRFS4o9lKQFT4o9mKQFR4o9nKRFQ4o9nKQFR4o9nKRFQ4n9PLQFP4P:oKQFo3P:QLQFn3o9RLQFn3P:RLPFm3P:SLQFl3o9TLRFk3o9SLSFl3m9SLUFk3l9ULUFj3l9VLTFi3l9XLRFi3n9XLoEj3R:VLlEk3U:ULhEm3X:SLgEn3Z:RLdEo3]:QL`EQ4`:PL^EQ4c:oK[ER4e:oKYER4h:nKWER4j:nKTES4l:nKRET4n:lKPEU4P;lKnDU4S;jKmDV4S;kKkDV4U;kKjDU4W;kKgDV4Y;jKfDW4[;iKdDW4\\;jKbDW4^;iKaDX4`;hK_DX4a;iK]DX4c;hK\\DY4e;gKYDZ4g;gKWD[4i;dKVD]4j;dKUD\\4l;dKRD^4m;bKSD^4n;bKPD_4QXJbAj5^>VJ`Ak5a>UJ]Am5c>SJ[An5f>QJYAQ6g>oIWAR6i>oIUAS6k>mIRAU6o>jIPAW6Q?iIm@X6T?hIj@Y6V?hIg@Z6Z?fIc@]6\\?dIa@^6`?cI\\@_6d?cIY@^6h?cI`_OZN2U8_`0bIZ_ObN0m7f`0cIV_OU7k`0Q2001O1O001O0O2O010O0010O01O10O01O100O1O1000000001O000000001O0000001O0oLUEiDk:W;WEgDi:Y;YEeDh:Z;YEeDg:[;ZEdDf:\\;[EcDf:[;\\EdDd:\\;]EcDc:\\;_EcDa:];_EdDa:Z;aEeD_:[;cEcD^:[;dEdD]:[;dEdD^:Y;dEfD]:Y;dEfD^:W;eEgD[:Y;fEgDZ:h:XFVEh9i:\\FTEe9j:_FSEa9l:dFPE\\9o:kFkDU9U;PGeDP9\\;TG`Dk8a;ZGYDg8g;]GUDb8l;bGoC^8RR3U:k7fEjD7[3Z9`L\\FX;^1YHT8cLZF\\:FkEo1W3o7jLTFm98mEh1\\3k7]7cFSEd1a3h7Y7XIhHf6W7\\IiHc6U7aIkH]6T7eImHY6R7jInHT6Q7nIPIQ6n6RJQIm5n6UJSIi5k6[JUIc5j6_JVI`5g6eJYIZ5a6lJ_IS5X6WKiIg4S6^KmIb4o5bKQJ]4l5gKUJW4h5mKYJQ4f5QLZJo3c5UL]Ji3b5YL^Jf3W1jFTOd5Ec3T1oFoNb5KS4a0]FAc5NS4=\\FAe50P4=^F_Oe54m3<_F]Oh55k3<^F\\Oj58h3;_F[Ok59d3>bFXOk5<\\3`0jFROl5c0R3=SGnNm5j0h29\\GlNn5o0`26cGjNn5T1Y23jGgNo5V1U24mGdNS6T1n1:oGaNZ6P1e1`0RH_N`6k0]1g0SH\\Ng6h0mHbLc7Z4jH[Nh6j0fHdLh7`0oGm2k0SOk6m0]HfLn7;PHP3j0ROm6\\18aLQHR3k0oNo6]14aLRHT3l0mNP7\\12aLTHV3k0lNQ7\\1O`LVHY3j0jNT7\\1LVL_He3a0iNV7[1ROMgHWM1^2Y8;nN1iHVM0]2[8:mN1iHWM1]2[8:kN1jHWM1]2]88iNfMnGf1S2=W75hNdMRHa1D]N^2X2V74gNbMTHe1DXN^2\\2V73WOYOiId0Q71VO[OjIc0S7OSO]OlIc0S7NQO_OlId0U7KmNBoIb0W7IjNEPJa0W7IhNGRJ`0W7GgNIRJ`0X7GdNJUJ>Y7FbNLVJ>X7EaNNWJ=Y7D_N0YJN10100000000001OO01000000O1O100O1N2N3M2N3M2N3M2K5OO1O20O0000100O1O1O1O1O2N1N3QOnTOh0Xk0N3M2H8M3N3M2NTW3", + "size": [ + 900, + 602 + ] + }, + "num_keypoints": 12, + "id": 4, + "keypoints": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 446, + 292, + 2, + 263, + 225, + 2, + 540, + 346, + 2, + 161, + 269, + 2, + 560, + 327, + 2, + 103, + 312, + 2, + 308, + 495, + 2, + 236, + 491, + 2, + 352, + 499, + 2, + 124, + 652, + 2, + 442, + 677, + 2, + 67, + 827, + 2 + ] + }, + { + "area": 404028, + "image_id": 3, + "iscrowd": 0, + "category_id": 1, + "bbox": [ + 84, + 45, + 516, + 783 + ], + "segmentation": { + "counts": "ln`2?ck04M2M3N2M3N2M3N2M3N2M3M4L3M3N3L5K4L5K5L2M3M2O2O0O2N2O0O2N101N2N2N2O1N2N2O1N2N4M4K4L2O1N2O1O000O10000000O1001N101O001O001O001O001O0010O01O1O001O1O3M;E1O2O0O2N1O2N2N1O2N1O3N2M3M3M3N2R@PKc8o4\\GQKe8o4YGSKh8l4VGUKl8f4e@fJ]6f0o8b4g@fJY6i0P9a4SG`Ko8_4PGaKR9]4mFcKV9\\4iFdKY9[4fFdK\\9[4cFfK_9Y4dARKX3d0W;Y4aATKW3a0[;Z4]AXKU3=`;Z4\\AZKR3;e;Z4YA]KP36k;\\4TAbKm21R<[4RAgKXO1U2Jf=]4m@kKiN>`2TOP>c4g@VMX1XNS>`4e@YMU1ZNY>Z4b@\\MS1[N_>U4_@`Mo0]Ng>o3Y@dMo0^Nl>j3V@hMk0_NT?f3Q@kMi0_N[?b3m_OoMe0`Na?`3i_OQNc0`Ng?]3g_OSN`0`Nk?]3e_OSN=`NR`0[3a_OVN;_NW`0Z3^_OWN7bN\\`0V3^_OXN1eNd`0R3[_OYNMgNj`0o2Z_OZNHjNPa0k2X_O[NDkN[;QO[Ig3gK]N_OlNa;SOWId3jK\\NZOnNh;TOSI`3lK^NUOPOn;UOnH]3oK^NQOROSmMa5P1l4b2]E_Lg5o0l4o4UKQKk4o4TKRKl4n4TKRKl4m4UKSKl4l4TKTKl4k4TKVKl4j4TKWKk4h4VKXKk4g4UKYKk4g4TKZKm4d4TK]Kl4b4TK^Kl4a4UK_Kl4`4SKbKl4]4UKcKl4\\4TKeKk4[4UKeKl4Y4TKiKl4V4TKjKl4U4UKkKl4T4TKmKk4R4VKnKj4R4WKnKh4R4XKSIcI^2U;^4ZKQIeI_2Q;`4ZKPIiI^2l:a4]KoHjI]2j:d4\\KnHmI\\2g:f4]KkHPJ\\2d:h4ULTKl3l4VLRKi3n4ZLoJg3Q5[LlJe3T5_LXJR4h5PLkI[4U6gK`Ia4`6cKWIc4i6_KoHg4P7V810O01000O010UKS[O^2md0dMS[O[2md0fMS[OX2md0jMT[OT2ld0nMS[OQ2ld0QNT[Om1md0TNS[Ok1ld0WNT[Og1md0[NS[O\\Nn0S1nc0c0U[OjMc1[1Wc0l0g]OlNZb0T1m]OdNTb0]1S^OZNna0f1W^OTNla0j1X^OQNja0n1Y^OnMia0R2Z^OiMha0V2[^OfMga0Y2]^ObMea0]2_^O^Mca0b2P4O1O2N3N1N2N3M2O2M3_NoUOi0Uj0POoUOP1Sj0hNRVOY1\\j0010O010000O10000O1000]LhNY\\OW1ec0mN[\\OR1bc0RO]\\Om0ac0VO`\\Oi0]c0[Oc\\Od0Zc0_Og\\O`0Ub0nNX[Of0b2PJRAQ6P?oIn@Q6T?oIi@R6Y?nIe@R6]?mIc@S6^?mI`@S6b?mI]@R6e?nIZ@P6i?PJV@j5Q`0VJm_Oh5W`0WJi_Og5Z`0YJe_Od5_`0\\J__Oc5d`0]J\\_O`5g`0_JY_O_5j`0`JV_O]5o`0aJQ_OR5]a0mJc^Oo4ba0QK^^Ok4fa0TKZ^Oh4ka0WKU^Og4S1WKR=1kAf4V1\\KkP2GkIHX4`0n1HSJ\\OQ4l0m1Gf38[LHd38]LHc37^LHb38_LHa37`LH`38aLH_37bLI^36cLI]37dLI\\36eLI\\36eLJZ36fLKZ33hLLY33hLMW33jLLW33jLMU32lLOT30mLOS31nLOR30oLOQ31PMOP3ORM0n20RM1n2NSM2l2MVM2k2MVM3i2LYM3h2KYM6f2J[M6d2I^M6c2I^M7a2HaM7_2IaM8^2GdM9\\2FeM:Z2EhM;W2EjM;V2ClM=T2BmM>R2APN?P2@PNa0P2]ORNc0m1]OTNc0l1[OVNe0j1ZOWNf0h1YOZNg0f1XO[Nh0e1VO]Nj0b1UO`Nl0_1ROcNo0\\1oNeNR1[1lNgNW1V1gNlN]1_CaNY=OZOl1c0SN^Oo1`0oMBS2W=1O1N0`VOlMQi0S2dVOXN]i0W21O1O1O1O010O1O1N2L4L4M2F;L3M4L3M4M2N3L3N3M2N3O0O2O0O2O0O2O0O2O002M5L3M4L_d0", + "size": [ + 900, + 602 + ] + }, + "num_keypoints": 15, + "id": 5, + "keypoints": [ + 138.28123044948043, + 178.42673457794075, + 2, + 133.4071828533262, + 152.16976849543238, + 2, + 0, + 0, + 0, + 168.78333476089736, + 123.08271026031831, + 2, + 0, + 0, + 0, + 201, + 192, + 2, + 322, + 124, + 1, + 249, + 344, + 1, + 488, + 172, + 2, + 219, + 433, + 1, + 572, + 238, + 2, + 436, + 381, + 2, + 439, + 380, + 2, + 354, + 608, + 2, + 307, + 523, + 1, + 494, + 683, + 2, + 313, + 753, + 2 + ] + } + ], + "info": { + "description": "MMPose example ochuman dataset", + "version": "1.0", + "year": "2020", + "date_created": "2020/08/31" + } +} diff --git a/tests/data/onehand10k/test_onehand10k.json b/tests/data/onehand10k/test_onehand10k.json index 479f6aacda..4ad90a3e69 100644 --- a/tests/data/onehand10k/test_onehand10k.json +++ b/tests/data/onehand10k/test_onehand10k.json @@ -1,541 +1,541 @@ -{ - "info": { - "description": "OneHand10K", - "version": "1.0", - "year": "2020", - "date_created": "2020/08/03" - }, - "licenses": "", - "images": [ - { - "file_name": "9.jpg", - "height": 358, - "width": 238, - "id": 9 - }, - { - "file_name": "33.jpg", - "height": 346, - "width": 226, - "id": 33 - }, - { - "file_name": "784.jpg", - "height": 960, - "width": 540, - "id": 784 - }, - { - "file_name": "1402.jpg", - "height": 339, - "width": 226, - "id": 1402 - } - ], - "annotations": [ - { - "bbox": [ - 63, - 92, - 99, - 194 - ], - "keypoints": [ - 0, - 0, - 0, - 81, - 251, - 1, - 71, - 229, - 1, - 72, - 192, - 1, - 76, - 169, - 1, - 95, - 196, - 1, - 91, - 144, - 1, - 93, - 122, - 1, - 91, - 98, - 1, - 116, - 199, - 1, - 111, - 148, - 1, - 108, - 120, - 1, - 107, - 101, - 1, - 139, - 203, - 1, - 130, - 153, - 1, - 128, - 124, - 1, - 122, - 107, - 1, - 154, - 205, - 1, - 150, - 177, - 1, - 147, - 159, - 1, - 142, - 132, - 1 - ], - "category_id": 1, - "id": 9, - "image_id": 9, - "segmentation": [ - [ - 63, - 92, - 63, - 188.5, - 63, - 285, - 112.0, - 285, - 161, - 285, - 161, - 188.5, - 161, - 92, - 112.0, - 92 - ] - ], - "iscrowd": 0, - "area": 19206 - }, - { - "bbox": [ - 61, - 154, - 34, - 68 - ], - "keypoints": [ - 86, - 221, - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 92, - 176, - 1, - 90, - 168, - 1, - 90, - 160, - 1, - 92, - 189, - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 87, - 191, - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 79, - 194, - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0 - ], - "category_id": 1, - "id": 33, - "image_id": 33, - "segmentation": [ - [ - 61, - 154, - 61, - 187.5, - 61, - 221, - 77.5, - 221, - 94, - 221, - 94, - 187.5, - 94, - 154, - 77.5, - 154 - ] - ], - "iscrowd": 0, - "area": 2312 - }, - { - "bbox": [ - 51, - 312, - 376, - 372 - ], - "keypoints": [ - 153, - 652, - 1, - 198, - 486, - 1, - 258, - 438, - 1, - 333, - 384, - 1, - 393, - 352, - 1, - 160, - 369, - 1, - 274, - 334, - 1, - 325, - 337, - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 412, - 418, - 1, - 0, - 0, - 0, - 334, - 454, - 1, - 303, - 492, - 1, - 270, - 540, - 1, - 0, - 0, - 0, - 325, - 508, - 1, - 295, - 544, - 1, - 258, - 562, - 1 - ], - "category_id": 1, - "id": 784, - "image_id": 784, - "segmentation": [ - [ - 51, - 312, - 51, - 497.5, - 51, - 683, - 238.5, - 683, - 426, - 683, - 426, - 497.5, - 426, - 312, - 238.5, - 312 - ] - ], - "iscrowd": 0, - "area": 139872 - }, - { - "bbox": [ - 32, - 68, - 150, - 210 - ], - "keypoints": [ - 92, - 264, - 1, - 150, - 213, - 1, - 167, - 202, - 1, - 172, - 187, - 1, - 174, - 172, - 1, - 126, - 164, - 1, - 142, - 147, - 1, - 157, - 151, - 1, - 163, - 168, - 1, - 105, - 151, - 1, - 108, - 120, - 1, - 112, - 98, - 1, - 109, - 70, - 1, - 85, - 157, - 1, - 77, - 132, - 1, - 78, - 108, - 1, - 72, - 89, - 1, - 74, - 174, - 1, - 63, - 157, - 1, - 47, - 137, - 1, - 37, - 119, - 1 - ], - "category_id": 1, - "id": 1402, - "image_id": 1402, - "segmentation": [ - [ - 32, - 68, - 32, - 172.5, - 32, - 277, - 106.5, - 277, - 181, - 277, - 181, - 172.5, - 181, - 68, - 106.5, - 68 - ] - ], - "iscrowd": 0, - "area": 31500 - } - ], - "categories": [ - { - "supercategory": "hand", - "id": 1, - "name": "hand", - "keypoints": [ - "wrist", - "thumb1", - "thumb2", - "thumb3", - "thumb4", - "forefinger1", - "forefinger2", - "forefinger3", - "forefinger4", - "middle_finger1", - "middle_finger2", - "middle_finger3", - "middle_finger4", - "ring_finger1", - "ring_finger2", - "ring_finger3", - "ring_finger4", - "pinky_finger1", - "pinky_finger2", - "pinky_finger3", - "pinky_finger4" - ], - "skeleton": [ - [ - 1, - 2 - ], - [ - 2, - 3 - ], - [ - 3, - 4 - ], - [ - 4, - 5 - ], - [ - 1, - 6 - ], - [ - 6, - 7 - ], - [ - 7, - 8 - ], - [ - 8, - 9 - ], - [ - 1, - 10 - ], - [ - 10, - 11 - ], - [ - 11, - 12 - ], - [ - 12, - 13 - ], - [ - 1, - 14 - ], - [ - 14, - 15 - ], - [ - 15, - 16 - ], - [ - 16, - 17 - ], - [ - 1, - 18 - ], - [ - 18, - 19 - ], - [ - 19, - 20 - ], - [ - 20, - 21 - ] - ] - } - ] -} +{ + "info": { + "description": "OneHand10K", + "version": "1.0", + "year": "2020", + "date_created": "2020/08/03" + }, + "licenses": "", + "images": [ + { + "file_name": "9.jpg", + "height": 358, + "width": 238, + "id": 9 + }, + { + "file_name": "33.jpg", + "height": 346, + "width": 226, + "id": 33 + }, + { + "file_name": "784.jpg", + "height": 960, + "width": 540, + "id": 784 + }, + { + "file_name": "1402.jpg", + "height": 339, + "width": 226, + "id": 1402 + } + ], + "annotations": [ + { + "bbox": [ + 63, + 92, + 99, + 194 + ], + "keypoints": [ + 0, + 0, + 0, + 81, + 251, + 1, + 71, + 229, + 1, + 72, + 192, + 1, + 76, + 169, + 1, + 95, + 196, + 1, + 91, + 144, + 1, + 93, + 122, + 1, + 91, + 98, + 1, + 116, + 199, + 1, + 111, + 148, + 1, + 108, + 120, + 1, + 107, + 101, + 1, + 139, + 203, + 1, + 130, + 153, + 1, + 128, + 124, + 1, + 122, + 107, + 1, + 154, + 205, + 1, + 150, + 177, + 1, + 147, + 159, + 1, + 142, + 132, + 1 + ], + "category_id": 1, + "id": 9, + "image_id": 9, + "segmentation": [ + [ + 63, + 92, + 63, + 188.5, + 63, + 285, + 112.0, + 285, + 161, + 285, + 161, + 188.5, + 161, + 92, + 112.0, + 92 + ] + ], + "iscrowd": 0, + "area": 19206 + }, + { + "bbox": [ + 61, + 154, + 34, + 68 + ], + "keypoints": [ + 86, + 221, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 92, + 176, + 1, + 90, + 168, + 1, + 90, + 160, + 1, + 92, + 189, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 87, + 191, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 79, + 194, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "category_id": 1, + "id": 33, + "image_id": 33, + "segmentation": [ + [ + 61, + 154, + 61, + 187.5, + 61, + 221, + 77.5, + 221, + 94, + 221, + 94, + 187.5, + 94, + 154, + 77.5, + 154 + ] + ], + "iscrowd": 0, + "area": 2312 + }, + { + "bbox": [ + 51, + 312, + 376, + 372 + ], + "keypoints": [ + 153, + 652, + 1, + 198, + 486, + 1, + 258, + 438, + 1, + 333, + 384, + 1, + 393, + 352, + 1, + 160, + 369, + 1, + 274, + 334, + 1, + 325, + 337, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 412, + 418, + 1, + 0, + 0, + 0, + 334, + 454, + 1, + 303, + 492, + 1, + 270, + 540, + 1, + 0, + 0, + 0, + 325, + 508, + 1, + 295, + 544, + 1, + 258, + 562, + 1 + ], + "category_id": 1, + "id": 784, + "image_id": 784, + "segmentation": [ + [ + 51, + 312, + 51, + 497.5, + 51, + 683, + 238.5, + 683, + 426, + 683, + 426, + 497.5, + 426, + 312, + 238.5, + 312 + ] + ], + "iscrowd": 0, + "area": 139872 + }, + { + "bbox": [ + 32, + 68, + 150, + 210 + ], + "keypoints": [ + 92, + 264, + 1, + 150, + 213, + 1, + 167, + 202, + 1, + 172, + 187, + 1, + 174, + 172, + 1, + 126, + 164, + 1, + 142, + 147, + 1, + 157, + 151, + 1, + 163, + 168, + 1, + 105, + 151, + 1, + 108, + 120, + 1, + 112, + 98, + 1, + 109, + 70, + 1, + 85, + 157, + 1, + 77, + 132, + 1, + 78, + 108, + 1, + 72, + 89, + 1, + 74, + 174, + 1, + 63, + 157, + 1, + 47, + 137, + 1, + 37, + 119, + 1 + ], + "category_id": 1, + "id": 1402, + "image_id": 1402, + "segmentation": [ + [ + 32, + 68, + 32, + 172.5, + 32, + 277, + 106.5, + 277, + 181, + 277, + 181, + 172.5, + 181, + 68, + 106.5, + 68 + ] + ], + "iscrowd": 0, + "area": 31500 + } + ], + "categories": [ + { + "supercategory": "hand", + "id": 1, + "name": "hand", + "keypoints": [ + "wrist", + "thumb1", + "thumb2", + "thumb3", + "thumb4", + "forefinger1", + "forefinger2", + "forefinger3", + "forefinger4", + "middle_finger1", + "middle_finger2", + "middle_finger3", + "middle_finger4", + "ring_finger1", + "ring_finger2", + "ring_finger3", + "ring_finger4", + "pinky_finger1", + "pinky_finger2", + "pinky_finger3", + "pinky_finger4" + ], + "skeleton": [ + [ + 1, + 2 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ], + [ + 4, + 5 + ], + [ + 1, + 6 + ], + [ + 6, + 7 + ], + [ + 7, + 8 + ], + [ + 8, + 9 + ], + [ + 1, + 10 + ], + [ + 10, + 11 + ], + [ + 11, + 12 + ], + [ + 12, + 13 + ], + [ + 1, + 14 + ], + [ + 14, + 15 + ], + [ + 15, + 16 + ], + [ + 16, + 17 + ], + [ + 1, + 18 + ], + [ + 18, + 19 + ], + [ + 19, + 20 + ], + [ + 20, + 21 + ] + ] + } + ] +} diff --git a/tests/data/panoptic/test_panoptic.json b/tests/data/panoptic/test_panoptic.json index 6007d2db14..5799bc6ee5 100644 --- a/tests/data/panoptic/test_panoptic.json +++ b/tests/data/panoptic/test_panoptic.json @@ -1,565 +1,565 @@ -{ - "info": { - "description": "panoptic", - "version": "1.0", - "year": "2020", - "date_created": "2020/09/21" - }, - "licenses": "", - "images": [ - { - "file_name": "005880453_01_l.jpg", - "height": 720, - "width": 1280, - "id": 100586 - }, - { - "file_name": "005880453_01_r.jpg", - "height": 720, - "width": 1280, - "id": 100587 - }, - { - "file_name": "ex2_2.flv_000040_l.jpg", - "height": 300, - "width": 400, - "id": 100520 - }, - { - "file_name": "ex2_2.flv_000040_r.jpg", - "height": 300, - "width": 400, - "id": 100521 - } - ], - "annotations": [ - { - "bbox": [ - 720.32470703125, - 188.09907531738278, - 55.445434570312614, - 36.900924682617216 - ], - "head_size": 140.0, - "center": [ - 747.5474243164062, - 206.0495376586914 - ], - "keypoints": [ - 721.0, - 224.0, - 1.0, - 720.32470703125, - 216.51248168945315, - 1.0, - 727.2195434570314, - 200.88510131835943, - 1.0, - 739.107177734375, - 194.2553100585938, - 1.0, - 752.6591186523438, - 191.17718505859378, - 1.0, - 740.0582275390625, - 188.09907531738278, - 1.0, - 757.6519165039061, - 189.75651550292972, - 1.0, - 760.98046875, - 196.62309265136716, - 1.0, - 760.98046875, - 202.77932739257812, - 1.0, - 742.9112548828123, - 196.14953613281247, - 1.0, - 753.3723754882812, - 189.75651550292972, - 1.0, - 765.0222778320311, - 199.93798828125, - 1.0, - 767.6375122070312, - 207.04133605957034, - 1.0, - 747.6663208007811, - 202.5425567626953, - 1.0, - 757.1763916015626, - 197.80697631835943, - 1.0, - 769.3018188476562, - 203.72644042968753, - 1.0, - 772.3925781249999, - 209.40911865234378, - 1.0, - 753.610107421875, - 209.17234802246097, - 1.0, - 761.6937255859375, - 205.85745239257815, - 1.0, - 769.7772827148438, - 208.93556213378903, - 1.0, - 774.7701416015626, - 213.43435668945315, - 1.0 - ], - "category_id": 1, - "id": 100586, - "image_id": 100586, - "segmentation": [ - [ - 720.32470703125, - 188.09907531738278, - 720.32470703125, - 206.0495376586914, - 720.32470703125, - 224.0, - 747.5474243164062, - 224.0, - 774.7701416015626, - 224.0, - 774.7701416015626, - 206.0495376586914, - 774.7701416015626, - 188.09907531738278, - 747.5474243164062, - 188.09907531738278 - ] - ], - "iscrowd": 0, - "area": 2045.9878050740865, - "dataset": "mpii" - }, - { - "bbox": [ - 746.0122680664061, - 313.11645507812494, - 50.4322509765625, - 77.21240234375 - ], - "head_size": 140.0, - "center": [ - 770.7283935546874, - 351.22265624999994 - ], - "keypoints": [ - 746.0122680664061, - 363.82229614257807, - 1.0, - 750.3375854492186, - 347.22766113281256, - 1.0, - 766.0941162109375, - 330.6329956054688, - 1.0, - 778.1432495117188, - 324.1795349121093, - 1.0, - 785.2490844726564, - 313.11645507812494, - 1.0, - 787.720703125, - 349.3788146972656, - 1.0, - 795.4445190429686, - 349.3788146972656, - 1.0, - 790.5012817382812, - 348.7641906738282, - 1.0, - 785.5580444335938, - 347.5349426269532, - 1.0, - 787.720703125, - 362.90036010742193, - 1.0, - 793.8997802734376, - 360.74920654296875, - 1.0, - 785.5580444335938, - 359.51998901367193, - 1.0, - 780.9237670898438, - 359.2126770019531, - 1.0, - 783.7043457031251, - 374.2707824707031, - 1.0, - 791.4281616210938, - 374.57806396484375, - 1.0, - 784.3222656249998, - 371.5050048828125, - 1.0, - 778.7611083984374, - 370.2757568359375, - 1.0, - 777.8342895507812, - 384.71923828124994, - 1.0, - 782.1596069335938, - 389.32885742187494, - 1.0, - 774.435791015625, - 385.3338623046875, - 1.0, - 771.9641723632812, - 382.87539672851557, - 1.0 - ], - "category_id": 1, - "id": 100587, - "image_id": 100587, - "segmentation": [ - [ - 746.0122680664061, - 313.11645507812494, - 746.0122680664061, - 351.22265624999994, - 746.0122680664061, - 389.32885742187494, - 770.7283935546874, - 389.32885742187494, - 795.4445190429686, - 389.32885742187494, - 795.4445190429686, - 351.22265624999994, - 795.4445190429686, - 313.11645507812494, - 770.7283935546874, - 313.11645507812494 - ] - ], - "iscrowd": 0, - "area": 3893.9952535033226, - "dataset": "mpii" - }, - { - "bbox": [ - 179.84646606445315, - 260.29730224609364, - 42.822525024414034, - 30.21246337890642 - ], - "head_size": 72.5531, - "center": [ - 200.75772857666016, - 274.9035339355469 - ], - "keypoints": [ - 221.6689910888672, - 260.29730224609364, - 1.0, - 211.34570312499997, - 260.29730224609364, - 1.0, - 192.55204772949222, - 265.0344543457031, - 1.0, - 188.31684875488278, - 271.0874938964844, - 1.0, - 180.90525817871094, - 275.82464599609375, - 1.0, - 192.02264404296878, - 265.2976379394531, - 1.0, - 179.84646606445315, - 278.7195739746093, - 1.0, - 186.4639587402344, - 279.7722778320313, - 1.0, - 190.43444824218753, - 275.82464599609375, - 1.0, - 194.4049377441406, - 271.6138305664063, - 1.0, - 187.52275085449222, - 284.7726135253906, - 1.0, - 192.8167419433594, - 282.14086914062506, - 1.0, - 195.72843933105474, - 278.4563903808594, - 1.0, - 201.28712463378906, - 277.93005371093744, - 1.0, - 194.14024353027344, - 288.1938781738282, - 1.0, - 199.6989288330078, - 285.0357971191406, - 1.0, - 202.34593200683594, - 282.14086914062506, - 1.0, - 207.1105194091797, - 281.35131835937494, - 1.0, - 201.28712463378906, - 289.50976562500006, - 1.0, - 204.72822570800778, - 286.3516540527344, - 1.0, - 207.1105194091797, - 284.7726135253906, - 1.0 - ], - "category_id": 1, - "id": 100520, - "image_id": 100520, - "segmentation": [ - [ - 179.84646606445315, - 260.29730224609364, - 179.84646606445315, - 274.9035339355469, - 179.84646606445315, - 289.50976562500006, - 200.75772857666016, - 289.50976562500006, - 221.6689910888672, - 289.50976562500006, - 221.6689910888672, - 274.9035339355469, - 221.6689910888672, - 260.29730224609364, - 200.75772857666016, - 260.29730224609364 - ] - ], - "iscrowd": 0, - "area": 1293.7739690924127, - "dataset": "nzsl" - }, - { - "bbox": [ - 186.37617492675776, - 196.84266662597656, - 46.34579467773443, - 44.16563415527344 - ], - "head_size": 72.5531, - "center": [ - 209.04907226562497, - 218.42548370361328 - ], - "keypoints": [ - 186.37617492675776, - 232.66671752929688, - 1.0, - 190.1365051269531, - 223.60145568847656, - 1.0, - 200.65892028808597, - 212.39300537109378, - 1.0, - 212.89080810546878, - 203.87417602539062, - 1.0, - 219.56018066406247, - 196.84266662597656, - 1.0, - 205.30081176757812, - 208.3026885986328, - 1.0, - 217.7982025146484, - 207.76916503906244, - 1.0, - 227.2103424072266, - 213.4448394775391, - 1.0, - 231.7219696044922, - 220.28102111816406, - 1.0, - 198.5165100097656, - 212.5708312988281, - 1.0, - 219.04794311523443, - 217.55035400390625, - 1.0, - 223.4833374023438, - 223.01550292968747, - 1.0, - 226.81802368164062, - 230.0469970703125, - 1.0, - 196.21739196777344, - 218.71846008300778, - 1.0, - 215.47726440429688, - 225.73097229003903, - 1.0, - 219.04794311523443, - 232.31103515625, - 1.0, - 213.47929382324216, - 232.78147888183597, - 1.0, - 200.53286743164062, - 233.95339965820318, - 1.0, - 211.71386718749997, - 236.68786621093753, - 1.0, - 216.81396484374997, - 240.00830078125, - 1.0, - 210.92922973632812, - 239.6176605224609, - 1.0 - ], - "category_id": 1, - "id": 100521, - "image_id": 100521, - "segmentation": [ - [ - 186.37617492675776, - 196.84266662597656, - 186.37617492675776, - 218.42548370361328, - 186.37617492675776, - 240.00830078125, - 209.04907226562497, - 240.00830078125, - 231.7219696044922, - 240.00830078125, - 231.7219696044922, - 218.42548370361328, - 231.7219696044922, - 196.84266662597656, - 209.04907226562497, - 196.84266662597656 - ] - ], - "iscrowd": 0, - "area": 2046.8914123722377, - "dataset": "nzsl" - } - ], - "categories": [ - { - "supercategory": "hand", - "id": 1, - "name": "hand", - "keypoints": [ - "wrist", - "thumb1", - "thumb2", - "thumb3", - "thumb4", - "forefinger1", - "forefinger2", - "forefinger3", - "forefinger4", - "middle_finger1", - "middle_finger2", - "middle_finger3", - "middle_finger4", - "ring_finger1", - "ring_finger2", - "ring_finger3", - "ring_finger4", - "pinky_finger1", - "pinky_finger2", - "pinky_finger3", - "pinky_finger4" - ], - "skeleton": [ - [ - 1, - 2 - ], - [ - 2, - 3 - ], - [ - 3, - 4 - ], - [ - 4, - 5 - ], - [ - 1, - 6 - ], - [ - 6, - 7 - ], - [ - 7, - 8 - ], - [ - 8, - 9 - ], - [ - 1, - 10 - ], - [ - 10, - 11 - ], - [ - 11, - 12 - ], - [ - 12, - 13 - ], - [ - 1, - 14 - ], - [ - 14, - 15 - ], - [ - 15, - 16 - ], - [ - 16, - 17 - ], - [ - 1, - 18 - ], - [ - 18, - 19 - ], - [ - 19, - 20 - ], - [ - 20, - 21 - ] - ] - } - ] -} +{ + "info": { + "description": "panoptic", + "version": "1.0", + "year": "2020", + "date_created": "2020/09/21" + }, + "licenses": "", + "images": [ + { + "file_name": "005880453_01_l.jpg", + "height": 720, + "width": 1280, + "id": 100586 + }, + { + "file_name": "005880453_01_r.jpg", + "height": 720, + "width": 1280, + "id": 100587 + }, + { + "file_name": "ex2_2.flv_000040_l.jpg", + "height": 300, + "width": 400, + "id": 100520 + }, + { + "file_name": "ex2_2.flv_000040_r.jpg", + "height": 300, + "width": 400, + "id": 100521 + } + ], + "annotations": [ + { + "bbox": [ + 720.32470703125, + 188.09907531738278, + 55.445434570312614, + 36.900924682617216 + ], + "head_size": 140.0, + "center": [ + 747.5474243164062, + 206.0495376586914 + ], + "keypoints": [ + 721.0, + 224.0, + 1.0, + 720.32470703125, + 216.51248168945315, + 1.0, + 727.2195434570314, + 200.88510131835943, + 1.0, + 739.107177734375, + 194.2553100585938, + 1.0, + 752.6591186523438, + 191.17718505859378, + 1.0, + 740.0582275390625, + 188.09907531738278, + 1.0, + 757.6519165039061, + 189.75651550292972, + 1.0, + 760.98046875, + 196.62309265136716, + 1.0, + 760.98046875, + 202.77932739257812, + 1.0, + 742.9112548828123, + 196.14953613281247, + 1.0, + 753.3723754882812, + 189.75651550292972, + 1.0, + 765.0222778320311, + 199.93798828125, + 1.0, + 767.6375122070312, + 207.04133605957034, + 1.0, + 747.6663208007811, + 202.5425567626953, + 1.0, + 757.1763916015626, + 197.80697631835943, + 1.0, + 769.3018188476562, + 203.72644042968753, + 1.0, + 772.3925781249999, + 209.40911865234378, + 1.0, + 753.610107421875, + 209.17234802246097, + 1.0, + 761.6937255859375, + 205.85745239257815, + 1.0, + 769.7772827148438, + 208.93556213378903, + 1.0, + 774.7701416015626, + 213.43435668945315, + 1.0 + ], + "category_id": 1, + "id": 100586, + "image_id": 100586, + "segmentation": [ + [ + 720.32470703125, + 188.09907531738278, + 720.32470703125, + 206.0495376586914, + 720.32470703125, + 224.0, + 747.5474243164062, + 224.0, + 774.7701416015626, + 224.0, + 774.7701416015626, + 206.0495376586914, + 774.7701416015626, + 188.09907531738278, + 747.5474243164062, + 188.09907531738278 + ] + ], + "iscrowd": 0, + "area": 2045.9878050740865, + "dataset": "mpii" + }, + { + "bbox": [ + 746.0122680664061, + 313.11645507812494, + 50.4322509765625, + 77.21240234375 + ], + "head_size": 140.0, + "center": [ + 770.7283935546874, + 351.22265624999994 + ], + "keypoints": [ + 746.0122680664061, + 363.82229614257807, + 1.0, + 750.3375854492186, + 347.22766113281256, + 1.0, + 766.0941162109375, + 330.6329956054688, + 1.0, + 778.1432495117188, + 324.1795349121093, + 1.0, + 785.2490844726564, + 313.11645507812494, + 1.0, + 787.720703125, + 349.3788146972656, + 1.0, + 795.4445190429686, + 349.3788146972656, + 1.0, + 790.5012817382812, + 348.7641906738282, + 1.0, + 785.5580444335938, + 347.5349426269532, + 1.0, + 787.720703125, + 362.90036010742193, + 1.0, + 793.8997802734376, + 360.74920654296875, + 1.0, + 785.5580444335938, + 359.51998901367193, + 1.0, + 780.9237670898438, + 359.2126770019531, + 1.0, + 783.7043457031251, + 374.2707824707031, + 1.0, + 791.4281616210938, + 374.57806396484375, + 1.0, + 784.3222656249998, + 371.5050048828125, + 1.0, + 778.7611083984374, + 370.2757568359375, + 1.0, + 777.8342895507812, + 384.71923828124994, + 1.0, + 782.1596069335938, + 389.32885742187494, + 1.0, + 774.435791015625, + 385.3338623046875, + 1.0, + 771.9641723632812, + 382.87539672851557, + 1.0 + ], + "category_id": 1, + "id": 100587, + "image_id": 100587, + "segmentation": [ + [ + 746.0122680664061, + 313.11645507812494, + 746.0122680664061, + 351.22265624999994, + 746.0122680664061, + 389.32885742187494, + 770.7283935546874, + 389.32885742187494, + 795.4445190429686, + 389.32885742187494, + 795.4445190429686, + 351.22265624999994, + 795.4445190429686, + 313.11645507812494, + 770.7283935546874, + 313.11645507812494 + ] + ], + "iscrowd": 0, + "area": 3893.9952535033226, + "dataset": "mpii" + }, + { + "bbox": [ + 179.84646606445315, + 260.29730224609364, + 42.822525024414034, + 30.21246337890642 + ], + "head_size": 72.5531, + "center": [ + 200.75772857666016, + 274.9035339355469 + ], + "keypoints": [ + 221.6689910888672, + 260.29730224609364, + 1.0, + 211.34570312499997, + 260.29730224609364, + 1.0, + 192.55204772949222, + 265.0344543457031, + 1.0, + 188.31684875488278, + 271.0874938964844, + 1.0, + 180.90525817871094, + 275.82464599609375, + 1.0, + 192.02264404296878, + 265.2976379394531, + 1.0, + 179.84646606445315, + 278.7195739746093, + 1.0, + 186.4639587402344, + 279.7722778320313, + 1.0, + 190.43444824218753, + 275.82464599609375, + 1.0, + 194.4049377441406, + 271.6138305664063, + 1.0, + 187.52275085449222, + 284.7726135253906, + 1.0, + 192.8167419433594, + 282.14086914062506, + 1.0, + 195.72843933105474, + 278.4563903808594, + 1.0, + 201.28712463378906, + 277.93005371093744, + 1.0, + 194.14024353027344, + 288.1938781738282, + 1.0, + 199.6989288330078, + 285.0357971191406, + 1.0, + 202.34593200683594, + 282.14086914062506, + 1.0, + 207.1105194091797, + 281.35131835937494, + 1.0, + 201.28712463378906, + 289.50976562500006, + 1.0, + 204.72822570800778, + 286.3516540527344, + 1.0, + 207.1105194091797, + 284.7726135253906, + 1.0 + ], + "category_id": 1, + "id": 100520, + "image_id": 100520, + "segmentation": [ + [ + 179.84646606445315, + 260.29730224609364, + 179.84646606445315, + 274.9035339355469, + 179.84646606445315, + 289.50976562500006, + 200.75772857666016, + 289.50976562500006, + 221.6689910888672, + 289.50976562500006, + 221.6689910888672, + 274.9035339355469, + 221.6689910888672, + 260.29730224609364, + 200.75772857666016, + 260.29730224609364 + ] + ], + "iscrowd": 0, + "area": 1293.7739690924127, + "dataset": "nzsl" + }, + { + "bbox": [ + 186.37617492675776, + 196.84266662597656, + 46.34579467773443, + 44.16563415527344 + ], + "head_size": 72.5531, + "center": [ + 209.04907226562497, + 218.42548370361328 + ], + "keypoints": [ + 186.37617492675776, + 232.66671752929688, + 1.0, + 190.1365051269531, + 223.60145568847656, + 1.0, + 200.65892028808597, + 212.39300537109378, + 1.0, + 212.89080810546878, + 203.87417602539062, + 1.0, + 219.56018066406247, + 196.84266662597656, + 1.0, + 205.30081176757812, + 208.3026885986328, + 1.0, + 217.7982025146484, + 207.76916503906244, + 1.0, + 227.2103424072266, + 213.4448394775391, + 1.0, + 231.7219696044922, + 220.28102111816406, + 1.0, + 198.5165100097656, + 212.5708312988281, + 1.0, + 219.04794311523443, + 217.55035400390625, + 1.0, + 223.4833374023438, + 223.01550292968747, + 1.0, + 226.81802368164062, + 230.0469970703125, + 1.0, + 196.21739196777344, + 218.71846008300778, + 1.0, + 215.47726440429688, + 225.73097229003903, + 1.0, + 219.04794311523443, + 232.31103515625, + 1.0, + 213.47929382324216, + 232.78147888183597, + 1.0, + 200.53286743164062, + 233.95339965820318, + 1.0, + 211.71386718749997, + 236.68786621093753, + 1.0, + 216.81396484374997, + 240.00830078125, + 1.0, + 210.92922973632812, + 239.6176605224609, + 1.0 + ], + "category_id": 1, + "id": 100521, + "image_id": 100521, + "segmentation": [ + [ + 186.37617492675776, + 196.84266662597656, + 186.37617492675776, + 218.42548370361328, + 186.37617492675776, + 240.00830078125, + 209.04907226562497, + 240.00830078125, + 231.7219696044922, + 240.00830078125, + 231.7219696044922, + 218.42548370361328, + 231.7219696044922, + 196.84266662597656, + 209.04907226562497, + 196.84266662597656 + ] + ], + "iscrowd": 0, + "area": 2046.8914123722377, + "dataset": "nzsl" + } + ], + "categories": [ + { + "supercategory": "hand", + "id": 1, + "name": "hand", + "keypoints": [ + "wrist", + "thumb1", + "thumb2", + "thumb3", + "thumb4", + "forefinger1", + "forefinger2", + "forefinger3", + "forefinger4", + "middle_finger1", + "middle_finger2", + "middle_finger3", + "middle_finger4", + "ring_finger1", + "ring_finger2", + "ring_finger3", + "ring_finger4", + "pinky_finger1", + "pinky_finger2", + "pinky_finger3", + "pinky_finger4" + ], + "skeleton": [ + [ + 1, + 2 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ], + [ + 4, + 5 + ], + [ + 1, + 6 + ], + [ + 6, + 7 + ], + [ + 7, + 8 + ], + [ + 8, + 9 + ], + [ + 1, + 10 + ], + [ + 10, + 11 + ], + [ + 11, + 12 + ], + [ + 12, + 13 + ], + [ + 1, + 14 + ], + [ + 14, + 15 + ], + [ + 15, + 16 + ], + [ + 16, + 17 + ], + [ + 1, + 18 + ], + [ + 18, + 19 + ], + [ + 19, + 20 + ], + [ + 20, + 21 + ] + ] + } + ] +} diff --git a/tests/data/panoptic_body3d/160906_band1/calibration_160906_band1.json b/tests/data/panoptic_body3d/160906_band1/calibration_160906_band1.json index 31c0429b03..677a379dc7 100644 --- a/tests/data/panoptic_body3d/160906_band1/calibration_160906_band1.json +++ b/tests/data/panoptic_body3d/160906_band1/calibration_160906_band1.json @@ -1,11965 +1,11965 @@ -{ - "calibDataSource": "160906_calib_norm", - "cameras": [ - { - "name": "01_01", - "type": "vga", - "resolution": [640,480], - "panel": 1, - "node": 1, - "K": [ - [745.698,0,375.512], - [0,745.89,226.023], - [0,0,1] - ], - "distCoef": [-0.324009,0.0732398,-0.000601245,0.000808154,0.0311011], - "R": [ - [0.9609979695,0.02878724306,-0.2750530807], - [-0.05024448072,0.9961896773,-0.07128547526], - [0.2719529274,0.08232509619,0.9587826572] - ], - "t": [ - [-51.56945892], - [143.9587601], - [282.5664691] - ] - }, - { - "name": "01_02", - "type": "vga", - "resolution": [640,480], - "panel": 1, - "node": 2, - "K": [ - [745.462,0,369.225], - [0,745.627,226.687], - [0,0,1] - ], - "distCoef": [-0.336594,0.141798,-0.000612176,0.000160485,-0.0646767], - "R": [ - [0.9715220842,-0.01574832828,-0.2364251047], - [0.005323209906,0.998987679,-0.04466856407], - [0.2368892218,0.042137956,0.9706224236] - ], - "t": [ - [-66.22242206], - [142.1317177], - [278.6626087] - ] - }, - { - "name": "01_03", - "type": "vga", - "resolution": [640,480], - "panel": 1, - "node": 3, - "K": [ - [746.261,0,378.952], - [0,746.496,239.595], - [0,0,1] - ], - "distCoef": [-0.322069,0.0440329,-0.000951664,0.000892653,0.103376], - "R": [ - [0.9665011873,0.05534363601,-0.2506242943], - [-0.07024277085,0.996230894,-0.05089164033], - [0.2468631364,0.06679137568,0.9667458322] - ], - "t": [ - [-54.75524211], - [118.3584455], - [281.78809] - ] - }, - { - "name": "01_04", - "type": "vga", - "resolution": [640,480], - "panel": 1, - "node": 4, - "K": [ - [747.661,0,366.929], - [0,747.759,234.022], - [0,0,1] - ], - "distCoef": [-0.32333,0.0462607,-0.000972333,-0.000898261,0.102804], - "R": [ - [0.9662588837,0.08601234823,-0.2427872436], - [-0.1112831564,0.9894890375,-0.09234448444], - [0.23229255,0.1162468093,0.9656742984] - ], - "t": [ - [-29.08626445], - [96.75744843], - [287.7183779] - ] - }, - { - "name": "01_05", - "type": "vga", - "resolution": [640,480], - "panel": 1, - "node": 5, - "K": [ - [742.413,0,353.224], - [0,742.622,209.478], - [0,0,1] - ], - "distCoef": [-0.297729,-0.0985766,-0.000505185,-0.000773418,0.328727], - "R": [ - [0.9718071292,0.05098345905,-0.2301990238], - [-0.07271497659,0.9935575811,-0.0869244798], - [0.2242842746,0.1012127458,0.9692536016] - ], - "t": [ - [-26.91018729], - [77.97642882], - [285.7140393] - ] - }, - { - "name": "01_06", - "type": "vga", - "resolution": [640,480], - "panel": 1, - "node": 6, - "K": [ - [743.487,0,372.277], - [0,743.725,241.821], - [0,0,1] - ], - "distCoef": [-0.317534,0.0281748,0.00130284,-0.000186889,0.119129], - "R": [ - [0.9681278444,0.07458666466,-0.2390926732], - [-0.09383510211,0.9931135585,-0.07014580141], - [0.2322142341,0.09034538891,0.968459736] - ], - "t": [ - [-7.038020326], - [73.51221006], - [284.7303027] - ] - }, - { - "name": "01_07", - "type": "vga", - "resolution": [640,480], - "panel": 1, - "node": 7, - "K": [ - [748.393,0,380.919], - [0,748.388,229.353], - [0,0,1] - ], - "distCoef": [-0.344193,0.174813,-0.00034307,0.00107023,-0.0968505], - "R": [ - [0.9670535143,-0.02995409712,-0.2528047715], - [0.01712365053,0.9984582116,-0.0528013286], - [0.2539966162,0.04673276982,0.9660754459] - ], - "t": [ - [-4.52170598], - [98.55800179], - [280.6705064] - ] - }, - { - "name": "01_08", - "type": "vga", - "resolution": [640,480], - "panel": 1, - "node": 8, - "K": [ - [745.37,0,362.362], - [0,745.56,217.483], - [0,0,1] - ], - "distCoef": [-0.326014,0.0789588,-0.000462463,-0.00138061,0.0222432], - "R": [ - [0.9652282485,0.06485174985,-0.2532364089], - [-0.07898708824,0.9958116468,-0.0460456736], - [0.2491896228,0.06444699145,0.9663079826] - ], - "t": [ - [26.28384049], - [86.2200762], - [282.8912643] - ] - }, - { - "name": "01_09", - "type": "vga", - "resolution": [640,480], - "panel": 1, - "node": 9, - "K": [ - [746.037,0,338.236], - [0,746.053,236.859], - [0,0,1] - ], - "distCoef": [-0.314486,0.0395532,0.000625849,-0.000232478,0.0599275], - "R": [ - [0.9656569777,0.07278005487,-0.2494186543], - [-0.09030273149,0.9941334749,-0.05953193019], - [0.2436226964,0.08001060955,0.9665641645] - ], - "t": [ - [45.35508632], - [94.7965848], - [284.0947744] - ] - }, - { - "name": "01_10", - "type": "vga", - "resolution": [640,480], - "panel": 1, - "node": 10, - "K": [ - [747.938,0,379.271], - [0,748.269,227.432], - [0,0,1] - ], - "distCoef": [-0.3484,0.205218,-0.00110069,0.000562921,-0.151344], - "R": [ - [0.9662738854,-0.001312373382,-0.2575132151], - [-0.009587322107,0.9991104143,-0.04106657164], - [0.2573380297,0.04215041788,0.9654017199] - ], - "t": [ - [30.05861189], - [130.0028668], - [279.9552314] - ] - }, - { - "name": "01_11", - "type": "vga", - "resolution": [640,480], - "panel": 1, - "node": 11, - "K": [ - [746.12,0,364.693], - [0,745.844,223.621], - [0,0,1] - ], - "distCoef": [-0.335335,0.119703,0.000192218,0.00118296,-0.00812072], - "R": [ - [0.9869891455,-0.01212212734,-0.1603292883], - [0.00355647539,0.9985558958,-0.05360479805], - [0.1607475603,0.05233714665,0.9856069424] - ], - "t": [ - [71.07099717], - [142.6182462], - [275.3539702] - ] - }, - { - "name": "01_12", - "type": "vga", - "resolution": [640,480], - "panel": 1, - "node": 12, - "K": [ - [745.407,0,358.691], - [0,745.503,226.329], - [0,0,1] - ], - "distCoef": [-0.325389,0.0923962,-0.00061832,-0.00189678,-0.0159561], - "R": [ - [0.9589650047,0.08538224277,-0.2703627054], - [-0.09708669181,0.9948178626,-0.03019262438], - [0.2663837347,0.05520229083,0.9622849957] - ], - "t": [ - [54.63033668], - [157.9150468], - [281.9236261] - ] - }, - { - "name": "01_13", - "type": "vga", - "resolution": [640,480], - "panel": 1, - "node": 13, - "K": [ - [744.389,0,339.442], - [0,744.512,216.258], - [0,0,1] - ], - "distCoef": [-0.320138,0.0543285,-0.000196977,-0.00116274,0.0473598], - "R": [ - [0.9724830194,-0.06319437739,-0.2242392645], - [0.03959405574,0.9933373951,-0.1082272161], - [0.2295845984,0.09637058799,0.9685058709] - ], - "t": [ - [19.90234626], - [154.6647449], - [286.7518211] - ] - }, - { - "name": "01_14", - "type": "vga", - "resolution": [640,480], - "panel": 1, - "node": 14, - "K": [ - [746.213,0,363.165], - [0,746.641,235.418], - [0,0,1] - ], - "distCoef": [-0.33414,0.127633,-0.000792357,0.000136075,-0.0405619], - "R": [ - [0.9643490552,0.006836134333,-0.2645452079], - [-0.02440508255,0.9977035557,-0.06318233054], - [0.2635057717,0.0673860684,0.9623013177] - ], - "t": [ - [19.24633902], - [182.0747755], - [282.9928946] - ] - }, - { - "name": "01_15", - "type": "vga", - "resolution": [640,480], - "panel": 1, - "node": 15, - "K": [ - [745.225,0,366.568], - [0,745.569,216.05], - [0,0,1] - ], - "distCoef": [-0.319743,0.046174,-0.00158438,-0.000953331,0.0743504], - "R": [ - [0.9602661069,0.03565913048,-0.2767985376], - [-0.06162250151,0.9944158624,-0.08567239854], - [0.2721978533,0.09932531892,0.9571012536] - ], - "t": [ - [0.9330302863], - [174.5612072], - [288.1067574] - ] - }, - { - "name": "01_16", - "type": "vga", - "resolution": [640,480], - "panel": 1, - "node": 16, - "K": [ - [747.633,0,371.752], - [0,747.88,230.613], - [0,0,1] - ], - "distCoef": [-0.347758,0.198029,0.00072103,0.00029865,-0.136932], - "R": [ - [0.9682573711,0.05614690975,-0.2435676248], - [-0.07153002565,0.9959334273,-0.05477283913], - [0.2395018137,0.07045660367,0.968336072] - ], - "t": [ - [-3.74774], - [172.5737662], - [282.7618788] - ] - }, - { - "name": "01_17", - "type": "vga", - "resolution": [640,480], - "panel": 1, - "node": 17, - "K": [ - [748.152,0,373.9], - [0,748.508,234.452], - [0,0,1] - ], - "distCoef": [-0.345127,0.177692,-0.00116897,0.00210199,-0.0818461], - "R": [ - [0.9639501783,0.02458774974,-0.264944327], - [-0.04477053879,0.9965129817,-0.07040934697], - [0.2622892538,0.07973280283,0.9616896732] - ], - "t": [ - [-36.08309916], - [173.4726636], - [283.4522322] - ] - }, - { - "name": "01_18", - "type": "vga", - "resolution": [640,480], - "panel": 1, - "node": 18, - "K": [ - [743.791,0,363.617], - [0,744.126,236.963], - [0,0,1] - ], - "distCoef": [-0.312734,0.0122172,-0.00120247,-0.000963953,0.133944], - "R": [ - [0.9523198878,0.06045552763,-0.2990517689], - [-0.07234112338,0.9969633514,-0.02882425707], - [0.2964010681,0.04908365416,0.9538014478] - ], - "t": [ - [-57.80984395], - [175.8598769], - [275.2458542] - ] - }, - { - "name": "01_19", - "type": "vga", - "resolution": [640,480], - "panel": 1, - "node": 19, - "K": [ - [743.162,0,364.748], - [0,743.331,220.785], - [0,0,1] - ], - "distCoef": [-0.311505,0.00290054,-0.000860754,-0.000437091,0.146397], - "R": [ - [0.9677776267,0.05243241618,-0.246287042], - [-0.06515666231,0.9969134625,-0.04379677618], - [0.243230497,0.05843278173,0.968206866] - ], - "t": [ - [-19.88792012], - [144.796335], - [280.8929426] - ] - }, - { - "name": "01_20", - "type": "vga", - "resolution": [640,480], - "panel": 1, - "node": 20, - "K": [ - [744.661,0,343.237], - [0,744.907,246.044], - [0,0,1] - ], - "distCoef": [-0.326994,0.0904776,0.000984855,-0.00107766,-0.0214165], - "R": [ - [0.9717064093,0.03462931454,-0.2336396043], - [-0.0436324388,0.998486683,-0.03347468014], - [0.2321268283,0.04272182698,0.9717468709] - ], - "t": [ - [-15.15244103], - [127.7778149], - [279.5122056] - ] - }, - { - "name": "01_21", - "type": "vga", - "resolution": [640,480], - "panel": 1, - "node": 21, - "K": [ - [742.462,0,365.246], - [0,742.468,221.387], - [0,0,1] - ], - "distCoef": [-0.311193,-0.0017069,-0.0010044,-5.33063e-05,0.168374], - "R": [ - [0.9650420793,0.04068979072,-0.2589172188], - [-0.04945049005,0.9984003719,-0.02741069744], - [0.257387712,0.03925605981,0.965510501] - ], - "t": [ - [-1.672862451], - [122.1992626], - [279.1232554] - ] - }, - { - "name": "01_22", - "type": "vga", - "resolution": [640,480], - "panel": 1, - "node": 22, - "K": [ - [744.021,0,363.587], - [0,744.301,226.764], - [0,0,1] - ], - "distCoef": [-0.330855,0.115198,-0.00111581,-0.000578883,-0.0257811], - "R": [ - [0.9624230562,-0.007741542698,-0.2714441553], - [-0.003557050749,0.9991484058,-0.04110730506], - [0.271531229,0.0405281588,0.9615759252] - ], - "t": [ - [4.289641778], - [135.1743597], - [279.2863723] - ] - }, - { - "name": "01_23", - "type": "vga", - "resolution": [640,480], - "panel": 1, - "node": 23, - "K": [ - [745.029,0,358.645], - [0,745.162,224.101], - [0,0,1] - ], - "distCoef": [-0.31925,0.0412999,-0.000788365,0.000625647,0.108146], - "R": [ - [0.9553340738,0.01211961015,-0.2952793973], - [-0.03701510886,0.9961975848,-0.07886858543], - [0.293200766,0.08627564605,0.9521501057] - ], - "t": [ - [-2.968489269], - [143.230855], - [285.3382881] - ] - }, - { - "name": "01_24", - "type": "vga", - "resolution": [640,480], - "panel": 1, - "node": 24, - "K": [ - [744.501,0,369.38], - [0,744.575,244.409], - [0,0,1] - ], - "distCoef": [-0.317214,0.0306635,-5.65201e-05,-0.000305408,0.106933], - "R": [ - [0.9627375442,0.05351140442,-0.2650904574], - [-0.07422624073,0.9948691584,-0.06874462026], - [0.2600516991,0.08585969499,0.9617698408] - ], - "t": [ - [-7.333655278], - [148.0612654], - [284.8699573] - ] - }, - { - "name": "02_01", - "type": "vga", - "resolution": [640,480], - "panel": 2, - "node": 1, - "K": [ - [746.79,0,376.022], - [0,747.048,234.17], - [0,0,1] - ], - "distCoef": [-0.317408,0.0301922,-0.000108969,-0.00027109,0.105931], - "R": [ - [0.977473966,0.04697618088,0.2057617172], - [0.001487552662,0.9733575223,-0.2292878562], - [-0.211050783,0.2244289915,0.9513617581] - ], - "t": [ - [-1.729507611], - [175.3460492], - [304.9109171] - ] - }, - { - "name": "02_02", - "type": "vga", - "resolution": [640,480], - "panel": 2, - "node": 2, - "K": [ - [747.689,0,367.065], - [0,747.811,212.158], - [0,0,1] - ], - "distCoef": [-0.333664,0.117162,0.000577725,-0.000310896,-0.0327554], - "R": [ - [0.9812751339,-0.05714257326,0.183939767], - [0.09271495859,0.9771941455,-0.1910380552], - [-0.1688284573,0.2045148611,0.9641942873] - ], - "t": [ - [-50.62568249], - [190.9654762], - [299.6250374] - ] - }, - { - "name": "02_03", - "type": "vga", - "resolution": [640,480], - "panel": 2, - "node": 3, - "K": [ - [745.627,0,353.486], - [0,745.817,252.683], - [0,0,1] - ], - "distCoef": [-0.321416,0.0392112,-0.00107045,-0.00134198,0.0908854], - "R": [ - [0.9757098845,0.1270834984,0.1784376802], - [-0.07601456941,0.9603325594,-0.2682967771], - [-0.2054556071,0.248215954,0.946666168] - ], - "t": [ - [-23.13649132], - [169.3490841], - [309.2380875] - ] - }, - { - "name": "02_04", - "type": "vga", - "resolution": [640,480], - "panel": 2, - "node": 4, - "K": [ - [746.11,0,381.584], - [0,746.321,224.917], - [0,0,1] - ], - "distCoef": [-0.323963,0.0585021,-0.000871966,0.000552522,0.0715102], - "R": [ - [0.979331342,0.07410153523,0.1881995881], - [-0.02608477747,0.9689731658,-0.2457856551], - [-0.2005734451,0.2357964511,0.950878713] - ], - "t": [ - [-32.63906075], - [150.8763932], - [306.9317958] - ] - }, - { - "name": "02_05", - "type": "vga", - "resolution": [640,480], - "panel": 2, - "node": 5, - "K": [ - [744.11,0,378.377], - [0,744.035,244.823], - [0,0,1] - ], - "distCoef": [-0.323078,0.0494134,-0.000238923,-0.000981516,0.0727453], - "R": [ - [0.9857440106,0.05652749171,0.1584720428], - [-0.01525193411,0.9680163878,-0.250422945], - [-0.1675593154,0.244435913,0.95507851] - ], - "t": [ - [-62.3494258], - [135.8190029], - [306.0165552] - ] - }, - { - "name": "02_06", - "type": "vga", - "resolution": [640,480], - "panel": 2, - "node": 6, - "K": [ - [743.928,0,352.844], - [0,744.181,228.627], - [0,0,1] - ], - "distCoef": [-0.303908,-0.0528673,-0.000528541,8.08764e-05,0.267531], - "R": [ - [0.9814194485,0.06212733968,0.1815380393], - [-0.0101664424,0.9616367605,-0.2741375282], - [-0.1916050874,0.2671983057,0.9444006332] - ], - "t": [ - [-53.86742917], - [106.6702196], - [310.2214119] - ] - }, - { - "name": "02_07", - "type": "vga", - "resolution": [640,480], - "panel": 2, - "node": 7, - "K": [ - [746.501,0,376.178], - [0,746.591,217.394], - [0,0,1] - ], - "distCoef": [-0.323449,0.0621904,-0.000592526,0.000355354,0.0689781], - "R": [ - [0.9775323693,0.09704954661,0.1871145437], - [-0.05094527723,0.9701636443,-0.2370381445], - [-0.2045361721,0.2221798567,0.9533105819] - ], - "t": [ - [-27.21830655], - [111.2122483], - [305.8578091] - ] - }, - { - "name": "02_08", - "type": "vga", - "resolution": [640,480], - "panel": 2, - "node": 8, - "K": [ - [747.056,0,346.722], - [0,747.425,231.954], - [0,0,1] - ], - "distCoef": [-0.331626,0.0978711,0.000923123,-0.00170198,0.0128988], - "R": [ - [0.9738310577,0.04398424166,0.222976361], - [0.006459505741,0.9753414162,-0.2206068824], - [-0.2271813062,0.2162741507,0.9495336465] - ], - "t": [ - [-23.1615402], - [89.62617671], - [306.715437] - ] - }, - { - "name": "02_09", - "type": "vga", - "resolution": [640,480], - "panel": 2, - "node": 9, - "K": [ - [746.084,0,344.827], - [0,746.456,222.936], - [0,0,1] - ], - "distCoef": [-0.31385,0.00765504,0.000335804,0.000338293,0.157318], - "R": [ - [0.9708044988,0.02558390192,0.2385038556], - [0.01777728087,0.9838878899,-0.1779005014], - [-0.2392124442,0.1769465571,0.9547079776] - ], - "t": [ - [-1.622489705], - [92.86686988], - [302.6276511] - ] - }, - { - "name": "02_10", - "type": "vga", - "resolution": [640,480], - "panel": 2, - "node": 10, - "K": [ - [743.875,0,345.16], - [0,744.131,231.932], - [0,0,1] - ], - "distCoef": [-0.309364,-0.0158069,0.000435688,-0.000318284,0.167974], - "R": [ - [0.9837217555,0.04774800386,0.1732386674], - [-0.008457215477,0.9752859506,-0.220784488], - [-0.179499257,0.2157253874,0.9598138226] - ], - "t": [ - [0.6070589451], - [94.58504844], - [305.3954199] - ] - }, - { - "name": "02_11", - "type": "vga", - "resolution": [640,480], - "panel": 2, - "node": 11, - "K": [ - [748.642,0,372.727], - [0,749.029,221.349], - [0,0,1] - ], - "distCoef": [-0.329743,0.0894243,0.000705225,0.000452301,0.0255748], - "R": [ - [0.9762818677,-0.03993432779,0.2127885436], - [0.08495434643,0.9746762651,-0.20685487], - [-0.1991393328,0.2200259705,0.9549513592] - ], - "t": [ - [18.17502224], - [86.30258496], - [305.899008] - ] - }, - { - "name": "02_12", - "type": "vga", - "resolution": [640,480], - "panel": 2, - "node": 12, - "K": [ - [746.297,0,386.393], - [0,746.341,223.432], - [0,0,1] - ], - "distCoef": [-0.329805,0.088881,-0.000101498,-0.000342857,0.0238941], - "R": [ - [0.9769251111,-0.05225372472,0.2070914666], - [0.09392861168,0.9759243238,-0.1968479875], - [-0.1918195589,0.211757556,0.9583130982] - ], - "t": [ - [31.97904484], - [101.8192368], - [305.2554798] - ] - }, - { - "name": "02_13", - "type": "vga", - "resolution": [640,480], - "panel": 2, - "node": 13, - "K": [ - [746.887,0,386.903], - [0,746.77,241.912], - [0,0,1] - ], - "distCoef": [-0.330222,0.0894843,0.000608161,-0.000202457,0.0188277], - "R": [ - [0.9805035597,0.07291108666,0.1824739514], - [-0.03359954242,0.9771464723,-0.2098948364], - [-0.1936074385,0.199671593,0.9605453736] - ], - "t": [ - [39.8755561], - [121.0360498], - [302.8306622] - ] - }, - { - "name": "02_14", - "type": "vga", - "resolution": [640,480], - "panel": 2, - "node": 14, - "K": [ - [745.399,0,359.381], - [0,745.103,221.453], - [0,0,1] - ], - "distCoef": [-0.32351,0.0564367,0.000553752,0.000358328,0.0789504], - "R": [ - [0.9639890244,-0.01369700088,0.2655890681], - [0.06651808592,0.9793475216,-0.1909287203], - [-0.2574888447,0.2017196672,0.9449913601] - ], - "t": [ - [64.66924198], - [136.2834945], - [299.1868513] - ] - }, - { - "name": "02_15", - "type": "vga", - "resolution": [640,480], - "panel": 2, - "node": 15, - "K": [ - [746.343,0,376.035], - [0,746.136,233.449], - [0,0,1] - ], - "distCoef": [-0.332319,0.10939,0.000552685,0.00121175,-0.00685584], - "R": [ - [0.9739293667,-0.02993852249,0.2248672353], - [0.07982373372,0.9730868608,-0.2161715356], - [-0.2123434957,0.2284855491,0.9501076748] - ], - "t": [ - [41.67937397], - [146.9667487], - [305.3208703] - ] - }, - { - "name": "02_16", - "type": "vga", - "resolution": [640,480], - "panel": 2, - "node": 16, - "K": [ - [747.983,0,369.069], - [0,747.865,212.357], - [0,0,1] - ], - "distCoef": [-0.333814,0.119177,-0.00123283,0.000206724,-0.0313224], - "R": [ - [0.9828420813,0.01261378295,0.1840172159], - [0.03080156014,0.9724259604,-0.2311688027], - [-0.181859031,0.2328704445,0.9553526307] - ], - "t": [ - [22.33056427], - [154.6384713], - [307.0242051] - ] - }, - { - "name": "02_17", - "type": "vga", - "resolution": [640,480], - "panel": 2, - "node": 17, - "K": [ - [743.255,0,372.405], - [0,743.629,259.514], - [0,0,1] - ], - "distCoef": [-0.301911,-0.0577323,-0.000292445,-0.000537705,0.240913], - "R": [ - [0.9702237144,0.05425789408,0.2360551311], - [-0.004184220731,0.978195713,-0.2076430576], - [-0.2421743923,0.2004725119,0.9492957051] - ], - "t": [ - [39.95715372], - [182.9757461], - [299.4720725] - ] - }, - { - "name": "02_18", - "type": "vga", - "resolution": [640,480], - "panel": 2, - "node": 18, - "K": [ - [746.171,0,380.016], - [0,746.628,215.7], - [0,0,1] - ], - "distCoef": [-0.310416,0.0111871,-0.00156578,-0.000885002,0.110566], - "R": [ - [0.9751942313,0.01121985931,0.2210663386], - [0.02134458651,0.9892938663,-0.1443677759], - [-0.220319359,0.1455051918,0.9645141882] - ], - "t": [ - [9.159436194], - [213.6293599], - [288.3403437] - ] - }, - { - "name": "02_19", - "type": "vga", - "resolution": [640,480], - "panel": 2, - "node": 19, - "K": [ - [745.09,0,380.114], - [0,745.176,232.983], - [0,0,1] - ], - "distCoef": [-0.31746,0.043353,-0.000108725,0.000220738,0.0862213], - "R": [ - [0.9809185988,0.05584586521,0.1862255137], - [-0.01423917048,0.975920974,-0.2176591338], - [-0.1938967473,0.2108541957,0.9580942331] - ], - "t": [ - [-1.989355998], - [159.4183424], - [303.0216832] - ] - }, - { - "name": "02_20", - "type": "vga", - "resolution": [640,480], - "panel": 2, - "node": 20, - "K": [ - [746.359,0,393.165], - [0,746.438,228.007], - [0,0,1] - ], - "distCoef": [-0.32236,0.0673245,-0.000115957,0.00130444,0.0588071], - "R": [ - [0.9826018096,0.03015545669,0.1832602856], - [0.01576123022,0.9696317731,-0.2440610748], - [-0.1850547688,0.2427032613,0.9522866477] - ], - "t": [ - [-25.36954265], - [136.7143691], - [307.7149997] - ] - }, - { - "name": "02_21", - "type": "vga", - "resolution": [640,480], - "panel": 2, - "node": 21, - "K": [ - [747.137,0,358.509], - [0,747.202,238.678], - [0,0,1] - ], - "distCoef": [-0.327929,0.0852816,0.000460613,0.000357406,0.0365027], - "R": [ - [0.9780966382,0.08951991601,0.1879179366], - [-0.04045439222,0.9673344336,-0.2502549415], - [-0.2041822921,0.2371714111,0.9497680314] - ], - "t": [ - [-10.00427836], - [118.005594], - [307.3165834] - ] - }, - { - "name": "02_22", - "type": "vga", - "resolution": [640,480], - "panel": 2, - "node": 22, - "K": [ - [745.847,0,374.568], - [0,746.074,247.807], - [0,0,1] - ], - "distCoef": [-0.32052,0.063252,0.000743322,-0.000945252,0.0534877], - "R": [ - [0.9839840132,0.07804627455,0.160263036], - [-0.03749054936,0.9695570383,-0.2419785283], - [-0.1742696772,0.2320946541,0.9569546233] - ], - "t": [ - [-1.458572059], - [110.2636917], - [306.6072245] - ] - }, - { - "name": "02_23", - "type": "vga", - "resolution": [640,480], - "panel": 2, - "node": 23, - "K": [ - [744.851,0,375.128], - [0,744.899,236.672], - [0,0,1] - ], - "distCoef": [-0.328747,0.0731957,0.000409854,0.000115616,0.0573405], - "R": [ - [0.9798731388,0.006836815724,0.1995041098], - [0.04188111895,0.9701291749,-0.2389463451], - [-0.1951783896,0.2424925605,0.9503171862] - ], - "t": [ - [13.92766978], - [118.8861106], - [308.0337581] - ] - }, - { - "name": "02_24", - "type": "vga", - "resolution": [640,480], - "panel": 2, - "node": 24, - "K": [ - [748.108,0,365.63], - [0,748.409,236.546], - [0,0,1] - ], - "distCoef": [-0.337502,0.145226,-9.99404e-05,-0.000712599,-0.0768278], - "R": [ - [0.9858983234,-0.01937546959,0.166219996], - [0.057736328,0.9716683618,-0.2291879382], - [-0.1570700873,0.2355529362,0.9590848773] - ], - "t": [ - [-5.69779309], - [141.0775615], - [307.1963385] - ] - }, - { - "name": "03_01", - "type": "vga", - "resolution": [640,480], - "panel": 3, - "node": 1, - "K": [ - [745.205,0,364.445], - [0,745.671,223.278], - [0,0,1] - ], - "distCoef": [-0.321278,0.0550501,-0.000663141,0.000431329,0.0680735], - "R": [ - [0.789168654,0.1464091436,-0.5964706181], - [-0.3274382264,0.921936374,-0.2069239719], - [0.5196123973,0.3586051937,0.7755032377] - ], - "t": [ - [-15.48720347], - [106.8731646], - [321.197831] - ] - }, - { - "name": "03_02", - "type": "vga", - "resolution": [640,480], - "panel": 3, - "node": 2, - "K": [ - [746.402,0,367.989], - [0,746.656,218.884], - [0,0,1] - ], - "distCoef": [-0.319108,0.0415571,-0.000289565,0.00121415,0.0978966], - "R": [ - [0.7844411333,0.123213727,-0.6078408392], - [-0.3461950886,0.9001611021,-0.2643084389], - [0.5145882519,0.4177659246,0.7487793823] - ], - "t": [ - [-25.69855827], - [65.19717944], - [326.035328] - ] - }, - { - "name": "03_03", - "type": "vga", - "resolution": [640,480], - "panel": 3, - "node": 3, - "K": [ - [747.999,0,350.415], - [0,748.222,213.374], - [0,0,1] - ], - "distCoef": [-0.322361,0.0444301,-0.000132478,-4.14576e-05,0.110213], - "R": [ - [0.8075592295,0.0617799019,-0.5865418439], - [-0.2672496857,0.9248714179,-0.2705373648], - [0.525762015,0.3752280693,0.763399109] - ], - "t": [ - [-8.799326732], - [72.40249706], - [323.1224723] - ] - }, - { - "name": "03_04", - "type": "vga", - "resolution": [640,480], - "panel": 3, - "node": 4, - "K": [ - [744.819,0,376.394], - [0,744.912,212.894], - [0,0,1] - ], - "distCoef": [-0.335892,0.121706,-0.00015411,0.0017688,-0.0013985], - "R": [ - [0.8410364559,-0.03582960221,-0.5397906256], - [-0.192384631,0.9127679401,-0.3603371217], - [0.5056143132,0.4069040761,0.7607780486] - ], - "t": [ - [3.728898504], - [75.32503712], - [325.8417248] - ] - }, - { - "name": "03_05", - "type": "vga", - "resolution": [640,480], - "panel": 3, - "node": 5, - "K": [ - [746.446,0,376.523], - [0,746.682,251.012], - [0,0,1] - ], - "distCoef": [-0.330943,0.0996499,0.00144142,-0.000113946,0.0131394], - "R": [ - [0.8610606531,-0.05437396314,-0.5055868113], - [-0.176556083,0.9004429458,-0.3975304402], - [0.4768673833,0.4315622475,0.7657359371] - ], - "t": [ - [31.93527518], - [62.43528973], - [326.764058] - ] - }, - { - "name": "03_06", - "type": "vga", - "resolution": [640,480], - "panel": 3, - "node": 6, - "K": [ - [744.998,0,378.484], - [0,744.973,240.788], - [0,0,1] - ], - "distCoef": [-0.31652,0.0338012,-0.0010118,-0.000122735,0.0959735], - "R": [ - [0.8769583834,-0.06555368648,-0.4760742674], - [-0.1128149484,0.9348860407,-0.3365425358], - [0.4671367907,0.348842092,0.8124607151] - ], - "t": [ - [52.69213606], - [109.2131316], - [317.2562433] - ] - }, - { - "name": "03_07", - "type": "vga", - "resolution": [640,480], - "panel": 3, - "node": 7, - "K": [ - [744.942,0,394.454], - [0,745.513,230.902], - [0,0,1] - ], - "distCoef": [-0.322593,0.0669124,0.000685625,0.000650135,0.0435827], - "R": [ - [0.8511772215,-0.03734239681,-0.5235483579], - [-0.1521244983,0.9371023984,-0.3141611561], - [0.5023499524,0.3470513512,0.7919595223] - ], - "t": [ - [39.57000229], - [127.8421428], - [318.5564893] - ] - }, - { - "name": "03_08", - "type": "vga", - "resolution": [640,480], - "panel": 3, - "node": 8, - "K": [ - [744.592,0,375.596], - [0,744.695,234.586], - [0,0,1] - ], - "distCoef": [-0.314208,0.0115966,-0.0002404,-0.00129875,0.131833], - "R": [ - [0.863242284,-0.08735605341,-0.4971736911], - [-0.1241310572,0.9179337282,-0.3768144785], - [0.4892895255,0.386996887,0.7815556088] - ], - "t": [ - [48.3076273], - [133.8669044], - [323.1008342] - ] - }, - { - "name": "03_09", - "type": "vga", - "resolution": [640,480], - "panel": 3, - "node": 9, - "K": [ - [746.083,0,388.49], - [0,746.196,219.485], - [0,0,1] - ], - "distCoef": [-0.327776,0.0952708,0.000477894,0.00116098,0.0130168], - "R": [ - [0.8627791791,-0.162720556,-0.478679547], - [-0.06768333431,0.9010943873,-0.4283081501], - [0.5010299935,0.401933982,0.766432006] - ], - "t": [ - [23.91664651], - [150.3571005], - [326.7446808] - ] - }, - { - "name": "03_10", - "type": "vga", - "resolution": [640,480], - "panel": 3, - "node": 10, - "K": [ - [744.984,0,374.291], - [0,745.244,231.69], - [0,0,1] - ], - "distCoef": [-0.317288,0.0201616,0.000340337,0.000302133,0.135473], - "R": [ - [0.8433461687,-0.104156761,-0.5271798639], - [-0.1611508321,0.8868626272,-0.433018579], - [0.5126379318,0.4501400333,0.7311472501] - ], - "t": [ - [5.809004706], - [133.1751931], - [335.4888131] - ] - }, - { - "name": "03_11", - "type": "vga", - "resolution": [640,480], - "panel": 3, - "node": 11, - "K": [ - [746.325,0,369.755], - [0,746.606,238.315], - [0,0,1] - ], - "distCoef": [-0.330117,0.107892,0.000853042,-0.00148033,-0.0192727], - "R": [ - [0.8487877999,-0.06352852013,-0.5249032272], - [-0.1660312052,0.9105147821,-0.3786772643], - [0.5019889537,0.4085669574,0.7622861219] - ], - "t": [ - [10.90299391], - [168.9126588], - [328.8547345] - ] - }, - { - "name": "03_12", - "type": "vga", - "resolution": [640,480], - "panel": 3, - "node": 12, - "K": [ - [745.397,0,373.191], - [0,745.394,241.989], - [0,0,1] - ], - "distCoef": [-0.315431,0.0239438,0.00152043,8.78247e-05,0.132462], - "R": [ - [0.7899500519,0.01447673769,-0.613000277], - [-0.2772192125,0.9001468868,-0.3359837649], - [0.5469263421,0.4353458466,0.7150843098] - ], - "t": [ - [-11.01289772], - [165.4412244], - [333.9391633] - ] - }, - { - "name": "03_13", - "type": "vga", - "resolution": [640,480], - "panel": 3, - "node": 13, - "K": [ - [746.289,0,356.696], - [0,746.559,221.83], - [0,0,1] - ], - "distCoef": [-0.307674,-0.0320128,-0.000713248,-0.000212304,0.187939], - "R": [ - [0.7812025858,0.003231301473,-0.6242692358], - [-0.256925784,0.9130359895,-0.316787663], - [0.5689566429,0.4078662043,0.7140962805] - ], - "t": [ - [-30.04397497], - [158.6113997], - [327.0561852] - ] - }, - { - "name": "03_14", - "type": "vga", - "resolution": [640,480], - "panel": 3, - "node": 14, - "K": [ - [744.216,0,367.374], - [0,744.503,234.384], - [0,0,1] - ], - "distCoef": [-0.313106,0.0107213,0.00051099,0.000391129,0.137335], - "R": [ - [0.7647493291,0.08765142393,-0.6383382266], - [-0.3090501184,0.9192036391,-0.2440342068], - [0.5653728752,0.3839035005,0.7300490493] - ], - "t": [ - [-30.23656889], - [178.7825502], - [321.7207122] - ] - }, - { - "name": "03_15", - "type": "vga", - "resolution": [640,480], - "panel": 3, - "node": 15, - "K": [ - [747.827,0,380.852], - [0,747.806,237.021], - [0,0,1] - ], - "distCoef": [-0.329904,0.102056,0.000500868,0.000776535,0.0163276], - "R": [ - [0.8420936086,0.09442452017,-0.5310012847], - [-0.2692856411,0.9266613257,-0.2622670985], - [0.4672939095,0.3638444688,0.8057627471] - ], - "t": [ - [-9.683781844], - [164.2881649], - [322.7392687] - ] - }, - { - "name": "03_16", - "type": "vga", - "resolution": [640,480], - "panel": 3, - "node": 16, - "K": [ - [745.289,0,371.652], - [0,745.447,216.538], - [0,0,1] - ], - "distCoef": [-0.317152,0.0301694,-0.000847782,0.000226416,0.100881], - "R": [ - [0.7751085928,0.08020770062,-0.6267163586], - [-0.2817854267,0.9316829094,-0.2292682483], - [0.5655118413,0.3543073259,0.74475679] - ], - "t": [ - [-42.18053512], - [150.9579844], - [316.9204289] - ] - }, - { - "name": "03_17", - "type": "vga", - "resolution": [640,480], - "panel": 3, - "node": 17, - "K": [ - [744.591,0,386.471], - [0,744.601,243.766], - [0,0,1] - ], - "distCoef": [-0.308716,-0.020066,-0.000742984,7.36231e-05,0.18193], - "R": [ - [0.8000888793,0.13985822,-0.5833502066], - [-0.3086873752,0.9298003917,-0.2004578159], - [0.5143635773,0.3404569133,0.7870954202] - ], - "t": [ - [-29.24407076], - [139.76037], - [318.5389184] - ] - }, - { - "name": "03_18", - "type": "vga", - "resolution": [640,480], - "panel": 3, - "node": 18, - "K": [ - [747.091,0,388.41], - [0,747.213,245.147], - [0,0,1] - ], - "distCoef": [-0.331947,0.109947,-0.00018029,-0.000335458,-0.0100282], - "R": [ - [0.7812031275,0.143907843,-0.6074637489], - [-0.3493109676,0.9072427652,-0.2342912992], - [0.5174007358,0.3952228456,0.7590094735] - ], - "t": [ - [-39.38157975], - [101.9329028], - [324.6812046] - ] - }, - { - "name": "03_19", - "type": "vga", - "resolution": [640,480], - "panel": 3, - "node": 19, - "K": [ - [743.815,0,380.782], - [0,743.921,233.579], - [0,0,1] - ], - "distCoef": [-0.31618,0.0384848,0.000240219,0.000426998,0.0977231], - "R": [ - [0.8097086682,0.09665101941,-0.578818152], - [-0.2718115959,0.9359285209,-0.2239559336], - [0.5200868476,0.3386685464,0.784100304] - ], - "t": [ - [-3.817362892], - [126.1763792], - [318.2990602] - ] - }, - { - "name": "03_20", - "type": "vga", - "resolution": [640,480], - "panel": 3, - "node": 20, - "K": [ - [746.163,0,356.033], - [0,746.281,215.327], - [0,0,1] - ], - "distCoef": [-0.323416,0.0556958,5.62358e-06,-0.000684023,0.0815018], - "R": [ - [0.8690981447,0.003405692177,-0.4946279574], - [-0.1831744592,0.9310985933,-0.3154402114], - [0.4594731031,0.3647517111,0.8098398958] - ], - "t": [ - [22.15812523], - [111.197586], - [320.9871724] - ] - }, - { - "name": "03_21", - "type": "vga", - "resolution": [640,480], - "panel": 3, - "node": 21, - "K": [ - [745.277,0,370.698], - [0,745.633,251.594], - [0,0,1] - ], - "distCoef": [-0.309423,-0.0154759,-0.000871178,-0.000110471,0.185828], - "R": [ - [0.8519925598,-0.01534543221,-0.5233289556], - [-0.157671027,0.9456449668,-0.2844212441], - [0.4992479597,0.3248385977,0.8032629458] - ], - "t": [ - [23.66925749], - [140.0971121], - [315.3107012] - ] - }, - { - "name": "03_22", - "type": "vga", - "resolution": [640,480], - "panel": 3, - "node": 22, - "K": [ - [749.812,0,361.025], - [0,750.052,224.033], - [0,0,1] - ], - "distCoef": [-0.333335,0.0892582,3.32371e-05,-0.00136116,0.0353235], - "R": [ - [0.8242021998,-0.0118106517,-0.5661724493], - [-0.2609232338,0.8794144434,-0.3981824994], - [0.5026030242,0.4759104383,0.7217336453] - ], - "t": [ - [6.739100305], - [105.8858326], - [336.9710973] - ] - }, - { - "name": "03_23", - "type": "vga", - "resolution": [640,480], - "panel": 3, - "node": 23, - "K": [ - [744.781,0,365.976], - [0,744.836,235.682], - [0,0,1] - ], - "distCoef": [-0.319452,0.032528,0.000754874,-0.000913445,0.102166], - "R": [ - [0.8233335342,0.02583843362,-0.5669693703], - [-0.2570181529,0.9076367155,-0.3318693443], - [0.506027233,0.4189605805,0.7539286912] - ], - "t": [ - [-4.103462359], - [133.5127669], - [329.5726238] - ] - }, - { - "name": "03_24", - "type": "vga", - "resolution": [640,480], - "panel": 3, - "node": 24, - "K": [ - [746.135,0,373.553], - [0,746.515,225.298], - [0,0,1] - ], - "distCoef": [-0.323756,0.0623909,2.70614e-05,0.000962707,0.0761173], - "R": [ - [0.8557458945,0.0294251088,-0.5165589289], - [-0.2234217673,0.921515875,-0.3176337608], - [0.4666708454,0.3872242956,0.7951576366] - ], - "t": [ - [-1.49693002], - [128.5290469], - [325.1203285] - ] - }, - { - "name": "04_01", - "type": "vga", - "resolution": [640,480], - "panel": 4, - "node": 1, - "K": [ - [745.756,0,368.953], - [0,745.945,245.188], - [0,0,1] - ], - "distCoef": [-0.3245,0.0724334,-0.000312337,0.000678015,0.0415529], - "R": [ - [0.04501388353,-0.06073969189,-0.9971381249], - [-0.08162898106,0.9945884367,-0.06426936354], - [0.9956457501,0.08428838276,0.03981216889] - ], - "t": [ - [-59.71104012], - [137.3658878], - [280.4259077] - ] - }, - { - "name": "04_02", - "type": "vga", - "resolution": [640,480], - "panel": 4, - "node": 2, - "K": [ - [745.144,0,382.474], - [0,745.286,222.525], - [0,0,1] - ], - "distCoef": [-0.322843,0.0690658,-0.000684608,-0.000275864,0.0370253], - "R": [ - [0.1096717734,-0.01795980665,-0.9938055884], - [-0.007042199406,0.9997976117,-0.01884523745], - [0.9939429106,0.009065367736,0.1095231006] - ], - "t": [ - [-53.83503278], - [149.6185443], - [272.7820927] - ] - }, - { - "name": "04_03", - "type": "vga", - "resolution": [640,480], - "panel": 4, - "node": 3, - "K": [ - [742.832,0,377.499], - [0,742.665,258.984], - [0,0,1] - ], - "distCoef": [-0.312355,-0.00257413,0.000454129,0.00111055,0.151137], - "R": [ - [0.07040546321,0.04162572676,-0.9966495721], - [-0.08610880414,0.9956530214,0.03550119457], - [0.9937949208,0.08332082476,0.07368375372] - ], - "t": [ - [-50.21742462], - [111.4103034], - [280.5940976] - ] - }, - { - "name": "04_04", - "type": "vga", - "resolution": [640,480], - "panel": 4, - "node": 4, - "K": [ - [743.339,0,393.561], - [0,743.571,223.626], - [0,0,1] - ], - "distCoef": [-0.307228,-0.0295629,-0.000661125,6.4492e-05,0.183577], - "R": [ - [0.09450112049,0.05679880598,-0.993903131], - [-0.03670643306,0.9978910099,0.05353662459], - [0.9948478155,0.03142336774,0.09638670013] - ], - "t": [ - [-21.9069], - [118.1273376], - [275.8163164] - ] - }, - { - "name": "04_05", - "type": "vga", - "resolution": [640,480], - "panel": 4, - "node": 5, - "K": [ - [746.019,0,364.58], - [0,746.273,258.887], - [0,0,1] - ], - "distCoef": [-0.327759,0.0738839,0.000801649,0.000211169,0.0604088], - "R": [ - [0.135847977,0.01131634816,-0.9906650632], - [-0.049797809,0.9987488181,0.004580011864], - [0.98947739,0.04871076425,0.1362415358] - ], - "t": [ - [-12.12624478], - [90.71810202], - [278.5550143] - ] - }, - { - "name": "04_06", - "type": "vga", - "resolution": [640,480], - "panel": 4, - "node": 6, - "K": [ - [745.588,0,362.328], - [0,745.695,224.495], - [0,0,1] - ], - "distCoef": [-0.317313,0.0342325,-0.00011624,0.00140051,0.0955503], - "R": [ - [0.09768474559,0.09486669264,-0.9906856217], - [-0.08671696061,0.9924717325,0.0864871607], - [0.9914322262,0.07746076975,0.1051758999] - ], - "t": [ - [6.120914551], - [75.66522558], - [280.1538331] - ] - }, - { - "name": "04_07", - "type": "vga", - "resolution": [640,480], - "panel": 4, - "node": 7, - "K": [ - [744.949,0,374.902], - [0,744.948,218.152], - [0,0,1] - ], - "distCoef": [-0.307279,-0.0368619,-0.000928182,-0.000206153,0.214368], - "R": [ - [0.08413477249,-0.05845821559,-0.994738145], - [-0.03729096802,0.9973936317,-0.06176833509], - [0.9957563576,0.04229161317,0.08173552284] - ], - "t": [ - [3.352563309], - [99.7043349], - [277.3248716] - ] - }, - { - "name": "04_08", - "type": "vga", - "resolution": [640,480], - "panel": 4, - "node": 8, - "K": [ - [744.851,0,365.832], - [0,744.82,236.655], - [0,0,1] - ], - "distCoef": [-0.313642,0.00106915,0.000461187,-0.00049658,0.163492], - "R": [ - [0.1068294918,-0.02053293437,-0.9940653189], - [-0.04471775106,0.998675844,-0.02543386204], - [0.9932712532,0.04716945203,0.1057698462] - ], - "t": [ - [34.88142403], - [92.93282517], - [277.1804593] - ] - }, - { - "name": "04_09", - "type": "vga", - "resolution": [640,480], - "panel": 4, - "node": 9, - "K": [ - [745.947,0,354.92], - [0,745.962,217.292], - [0,0,1] - ], - "distCoef": [-0.332252,0.114802,-0.000779302,-0.000175195,-0.0220414], - "R": [ - [0.0951039165,0.01286389124,-0.99538423], - [-0.04378002227,0.9990030715,0.008727700331], - [0.9945041753,0.04274790527,0.09557228614] - ], - "t": [ - [51.3876018], - [107.4685168], - [276.8925649] - ] - }, - { - "name": "04_10", - "type": "vga", - "resolution": [640,480], - "panel": 4, - "node": 10, - "K": [ - [743.419,0,373.623], - [0,743.493,209.714], - [0,0,1] - ], - "distCoef": [-0.312784,-0.00205334,-0.00151839,-4.48796e-05,0.146707], - "R": [ - [0.07554192003,-0.02015366607,-0.996938939], - [-0.05402378201,0.9982445697,-0.02427365106], - [0.9956780852,0.05569209012,0.07432053419] - ], - "t": [ - [36.95032578], - [126.4783785], - [278.9862968] - ] - }, - { - "name": "04_11", - "type": "vga", - "resolution": [640,480], - "panel": 4, - "node": 11, - "K": [ - [743.168,0,378.723], - [0,743.196,231.359], - [0,0,1] - ], - "distCoef": [-0.312654,0.00616666,0.000125459,-0.000163635,0.137741], - "R": [ - [0.104627794,-0.01026277171,-0.994458496], - [-0.05855646041,0.9981483637,-0.01646162423], - [0.9927860624,0.05995431298,0.1038331098] - ], - "t": [ - [61.78762978], - [139.882294], - [278.0088471] - ] - }, - { - "name": "04_12", - "type": "vga", - "resolution": [640,480], - "panel": 4, - "node": 12, - "K": [ - [746.755,0,377.564], - [0,747.014,231.526], - [0,0,1] - ], - "distCoef": [-0.342661,0.169314,0.000669193,0.000564241,-0.092518], - "R": [ - [0.09069981891,0.03748374052,-0.9951726041], - [-0.02832816732,0.9989841486,0.03504548138], - [0.9954752924,0.02501279723,0.09166952704] - ], - "t": [ - [63.18640006], - [168.1511303], - [272.7093484] - ] - }, - { - "name": "04_13", - "type": "vga", - "resolution": [640,480], - "panel": 4, - "node": 13, - "K": [ - [745.766,0,371.377], - [0,745.897,229.211], - [0,0,1] - ], - "distCoef": [-0.323265,0.06437,0.000357726,0.000480753,0.061899], - "R": [ - [0.03414536791,0.03842962758,-0.9986777546], - [-0.02717943982,0.9989265658,0.03750992125], - [0.9990472321,0.02586271187,0.03515321085] - ], - "t": [ - [27.04698548], - [171.5967975], - [274.5649723] - ] - }, - { - "name": "04_14", - "type": "vga", - "resolution": [640,480], - "panel": 4, - "node": 14, - "K": [ - [744.965,0,366.266], - [0,745.319,235.632], - [0,0,1] - ], - "distCoef": [-0.317134,0.0349168,5.85303e-05,0.000379707,0.110605], - "R": [ - [0.05221731101,0.04748668842,-0.9975060736], - [0.03426805086,0.9981953182,0.04931335942], - [0.9980476207,-0.03675759989,0.05049579913] - ], - "t": [ - [31.93275734], - [208.7852536], - [260.7309393] - ] - }, - { - "name": "04_15", - "type": "vga", - "resolution": [640,480], - "panel": 4, - "node": 15, - "K": [ - [744.586,0,371.051], - [0,745.106,212.085], - [0,0,1] - ], - "distCoef": [-0.332822,0.11382,-0.000911903,0.000640183,-0.00904196], - "R": [ - [0.0693166226,0.04834029473,-0.9964228127], - [-0.01396942206,0.9987743784,0.04748258878], - [0.9974968978,0.01062811814,0.06990695264] - ], - "t": [ - [16.12425569], - [198.357827], - [269.7404532] - ] - }, - { - "name": "04_16", - "type": "vga", - "resolution": [640,480], - "panel": 4, - "node": 16, - "K": [ - [742.58,0,362.432], - [0,742.717,222.722], - [0,0,1] - ], - "distCoef": [-0.316061,0.0181932,0.000637155,-0.000119442,0.122715], - "R": [ - [0.07545496093,-0.0349426896,-0.9965367817], - [-0.03652359913,0.9986183515,-0.03778114217], - [0.9964800929,0.03924788454,0.07407447592] - ], - "t": [ - [-15.86676392], - [179.6369531], - [275.0674259] - ] - }, - { - "name": "04_17", - "type": "vga", - "resolution": [640,480], - "panel": 4, - "node": 17, - "K": [ - [745.044,0,350.241], - [0,745.211,214.104], - [0,0,1] - ], - "distCoef": [-0.330556,0.0995367,-0.000406045,-3.83783e-05,-0.00374247], - "R": [ - [0.0837025501,0.02221656332,-0.9962430965], - [-0.04478154079,0.9988252756,0.01851168242], - [0.9954840515,0.04306382584,0.08459911461] - ], - "t": [ - [-23.0620205], - [182.4550181], - [276.0013748] - ] - }, - { - "name": "04_18", - "type": "vga", - "resolution": [640,480], - "panel": 4, - "node": 18, - "K": [ - [747.543,0,399.307], - [0,747.43,229.515], - [0,0,1] - ], - "distCoef": [-0.337874,0.152604,0.000377489,0.002871,-0.0603327], - "R": [ - [0.03967719066,0.06607189882,-0.9970256891], - [-0.02383145062,0.9975901546,0.06516091958], - [0.998928317,0.02117516625,0.04115616396] - ], - "t": [ - [-45.47747339], - [181.8911988], - [269.8403328] - ] - }, - { - "name": "04_19", - "type": "vga", - "resolution": [640,480], - "panel": 4, - "node": 19, - "K": [ - [743.963,0,369.391], - [0,744.08,218.072], - [0,0,1] - ], - "distCoef": [-0.320196,0.0539371,0.000417857,0.00192962,0.0700112], - "R": [ - [0.0434323362,0.03783761887,-0.9983395949], - [-0.08481170801,0.9958149524,0.03405223652], - [0.9954499517,0.08319191804,0.04645964289] - ], - "t": [ - [-24.42650241], - [136.5925943], - [281.0885176] - ] - }, - { - "name": "04_20", - "type": "vga", - "resolution": [640,480], - "panel": 4, - "node": 20, - "K": [ - [745.858,0,356.253], - [0,746.045,207.418], - [0,0,1] - ], - "distCoef": [-0.328012,0.0801152,-7.74627e-05,-0.000454429,0.0269942], - "R": [ - [0.0976780849,0.06705669278,-0.9929563896], - [-0.1171365339,0.9915671608,0.05544004021], - [0.9883005738,0.1108961929,0.1047091699] - ], - "t": [ - [-1.775430866], - [107.2147587], - [285.054156] - ] - }, - { - "name": "04_21", - "type": "vga", - "resolution": [640,480], - "panel": 4, - "node": 21, - "K": [ - [746.156,0,369.678], - [0,746.129,226.325], - [0,0,1] - ], - "distCoef": [-0.331296,0.10434,-0.000526263,0.0017798,0.0107539], - "R": [ - [0.06864954522,0.009029787974,-0.9975999714], - [-0.09824772164,0.9951594531,0.00224680986], - [0.9927913301,0.09785768182,0.06920439997] - ], - "t": [ - [2.330018678], - [104.6606406], - [283.2576255] - ] - }, - { - "name": "04_22", - "type": "vga", - "resolution": [640,480], - "panel": 4, - "node": 22, - "K": [ - [746.305,0,363.016], - [0,746.511,222.294], - [0,0,1] - ], - "distCoef": [-0.313633,0.00103632,0.000318828,-0.000294887,0.154057], - "R": [ - [0.08441946195,-0.0784287402,-0.9933389588], - [-0.07957536672,0.9931828981,-0.08517917513], - [0.9932477614,0.08623609206,0.07760297012] - ], - "t": [ - [9.995164317], - [122.6888691], - [282.4272415] - ] - }, - { - "name": "04_23", - "type": "vga", - "resolution": [640,480], - "panel": 4, - "node": 23, - "K": [ - [745.178,0,358.539], - [0,745.299,233.674], - [0,0,1] - ], - "distCoef": [-0.315081,0.0210219,-6.99317e-06,-0.000330658,0.115227], - "R": [ - [0.1162513982,0.03935918122,-0.9924396542], - [-0.02556811677,0.999001962,0.03662446354], - [0.9928906706,0.02111716788,0.117141715] - ], - "t": [ - [32.91845612], - [159.7823772], - [272.1694603] - ] - }, - { - "name": "04_24", - "type": "vga", - "resolution": [640,480], - "panel": 4, - "node": 24, - "K": [ - [746.014,0,365.199], - [0,746.411,216.584], - [0,0,1] - ], - "distCoef": [-0.320661,0.0432533,-0.00136099,-0.000113861,0.0956118], - "R": [ - [0.1001711426,-0.0639180002,-0.9929150172], - [-0.0054812292,0.9978838124,-0.06479084071], - [0.9949551238,0.01193256733,0.09960881242] - ], - "t": [ - [-9.066812064], - [167.2144724], - [271.0944115] - ] - }, - { - "name": "05_01", - "type": "vga", - "resolution": [640,480], - "panel": 5, - "node": 1, - "K": [ - [744.506,0,379.212], - [0,745.093,221.816], - [0,0,1] - ], - "distCoef": [-0.322425,0.0503962,-0.00139268,-0.000488272,0.0792831], - "R": [ - [0.4832137358,-0.07031409603,-0.8726742883], - [-0.1214142278,0.9817563233,-0.14633218], - [0.8670427157,0.1766647942,0.465861009] - ], - "t": [ - [-31.81590772], - [187.5269902], - [291.8752718] - ] - }, - { - "name": "05_02", - "type": "vga", - "resolution": [640,480], - "panel": 5, - "node": 2, - "K": [ - [746.146,0,379.909], - [0,746.274,243.237], - [0,0,1] - ], - "distCoef": [-0.327102,0.0750235,0.00051439,0.000830868,0.0552106], - "R": [ - [0.559561068,-0.04316954181,-0.8276640634], - [-0.1711397799,0.9711012062,-0.1663539088], - [0.8109269924,0.2347314165,0.5360024022] - ], - "t": [ - [-21.47998338], - [182.028679], - [304.5116426] - ] - }, - { - "name": "05_03", - "type": "vga", - "resolution": [640,480], - "panel": 5, - "node": 3, - "K": [ - [746.598,0,366.137], - [0,746.916,245.497], - [0,0,1] - ], - "distCoef": [-0.34673,0.191883,-0.000717065,0.000142378,-0.151818], - "R": [ - [0.4493443217,0.06721032382,-0.8908268367], - [-0.2833621033,0.9563979118,-0.07077395533], - [0.8472281859,0.2842284411,0.4487968296] - ], - "t": [ - [-42.79170468], - [156.78227], - [309.5144468] - ] - }, - { - "name": "05_04", - "type": "vga", - "resolution": [640,480], - "panel": 5, - "node": 4, - "K": [ - [744.97,0,361.533], - [0,745.268,216.194], - [0,0,1] - ], - "distCoef": [-0.320215,0.0355127,-0.000935438,6.82351e-05,0.107335], - "R": [ - [0.5139859054,0.07264601249,-0.8547169391], - [-0.2477501277,0.96651576,-0.06683681477], - [0.8212419639,0.2461094116,0.5147735369] - ], - "t": [ - [-21.66847624], - [145.8563675], - [305.5618637] - ] - }, - { - "name": "05_05", - "type": "vga", - "resolution": [640,480], - "panel": 5, - "node": 5, - "K": [ - [743.904,0,367.466], - [0,744.108,216.808], - [0,0,1] - ], - "distCoef": [-0.328736,0.086922,-0.000934339,0.000214876,0.0243362], - "R": [ - [0.4889793362,0.07185582001,-0.8693307483], - [-0.2209595119,0.9743010874,-0.0437525441], - [0.8438460185,0.2134809878,0.4922903259] - ], - "t": [ - [-47.80972546], - [144.3254019], - [299.7644507] - ] - }, - { - "name": "05_06", - "type": "vga", - "resolution": [640,480], - "panel": 5, - "node": 6, - "K": [ - [745.323,0,383.952], - [0,745.526,234.808], - [0,0,1] - ], - "distCoef": [-0.334223,0.133657,-0.000107051,0.00148947,-0.0461754], - "R": [ - [0.4969854565,0.0559027949,-0.8659563116], - [-0.2018212488,0.978003949,-0.05269211703], - [0.8439630558,0.2009556001,0.4973361109] - ], - "t": [ - [-46.56558119], - [125.7186081], - [298.6423415] - ] - }, - { - "name": "05_07", - "type": "vga", - "resolution": [640,480], - "panel": 5, - "node": 7, - "K": [ - [746.158,0,356.674], - [0,746.317,240.893], - [0,0,1] - ], - "distCoef": [-0.334568,0.11153,0.000321304,-0.000871385,-0.0157856], - "R": [ - [0.5541201274,0.02610072644,-0.8320274253], - [-0.1769665492,0.9803549196,-0.08710380092], - [0.8134087072,0.1955069916,0.5478533484] - ], - "t": [ - [-14.70019562], - [115.5481293], - [299.4445791] - ] - }, - { - "name": "05_08", - "type": "vga", - "resolution": [640,480], - "panel": 5, - "node": 8, - "K": [ - [744.96,0,386.044], - [0,745.46,258.776], - [0,0,1] - ], - "distCoef": [-0.325919,0.068823,-0.000458274,0.000477805,0.0465958], - "R": [ - [0.4763065258,-0.004539644313,-0.8792675845], - [-0.1710253429,0.980409884,-0.09770768372], - [0.8624861886,0.1969158475,0.4661992314] - ], - "t": [ - [-40.46029545], - [93.91456762], - [297.4902987] - ] - }, - { - "name": "05_09", - "type": "vga", - "resolution": [640,480], - "panel": 5, - "node": 9, - "K": [ - [745.188,0,367.116], - [0,745.437,236.843], - [0,0,1] - ], - "distCoef": [-0.328194,0.058828,0.000388874,-0.00143808,0.0829656], - "R": [ - [0.5065601345,-0.04543027129,-0.8610069225], - [-0.1705921502,0.9735884993,-0.1517357977], - [0.845159836,0.2237443283,0.4854310735] - ], - "t": [ - [-16.55300824], - [76.93410209], - [300.8962768] - ] - }, - { - "name": "05_10", - "type": "vga", - "resolution": [640,480], - "panel": 5, - "node": 10, - "K": [ - [747.452,0,374.886], - [0,747.648,257.28], - [0,0,1] - ], - "distCoef": [-0.337728,0.123608,0.00138141,5.97732e-05,-0.0225942], - "R": [ - [0.4549222289,-0.02855444123,-0.8900732608], - [-0.1699899924,0.9783230281,-0.1182685721], - [0.8741562607,0.2051065493,0.4402069233] - ], - "t": [ - [-13.61854908], - [96.6157071], - [299.0141417] - ] - }, - { - "name": "05_11", - "type": "vga", - "resolution": [640,480], - "panel": 5, - "node": 11, - "K": [ - [746.39,0,405.604], - [0,746.458,241.87], - [0,0,1] - ], - "distCoef": [-0.333064,0.100943,0.000870611,0.00103156,0.0180409], - "R": [ - [0.5002384593,-0.05591048228,-0.8640807264], - [-0.1916757277,0.9660062257,-0.1734715752], - [0.8444062406,0.2524004556,0.4725167836] - ], - "t": [ - [16.55277765], - [75.44647006], - [303.7304898] - ] - }, - { - "name": "05_12", - "type": "vga", - "resolution": [640,480], - "panel": 5, - "node": 12, - "K": [ - [745.943,0,392.757], - [0,746.143,272.1], - [0,0,1] - ], - "distCoef": [-0.323245,0.0770562,0.00168738,0.000666505,0.0382015], - "R": [ - [0.5344619138,-0.0483612619,-0.8438078283], - [-0.2099054746,0.9594877737,-0.1879438847], - [0.818712498,0.277568731,0.5026583782] - ], - "t": [ - [45.5535171], - [81.37072912], - [304.8427161] - ] - }, - { - "name": "05_13", - "type": "vga", - "resolution": [640,480], - "panel": 5, - "node": 13, - "K": [ - [748.463,0,383.471], - [0,748.465,243.614], - [0,0,1] - ], - "distCoef": [-0.34071,0.149034,0.000455623,0.000254671,-0.0668973], - "R": [ - [0.550270912,-0.09726860505,-0.8293013577], - [-0.1127468592,0.975440235,-0.1892207537], - [0.82733915,0.1976238001,0.525789658] - ], - "t": [ - [34.15956958], - [127.9842494], - [295.9545727] - ] - }, - { - "name": "05_14", - "type": "vga", - "resolution": [640,480], - "panel": 5, - "node": 14, - "K": [ - [744.467,0,372.192], - [0,744.287,242.67], - [0,0,1] - ], - "distCoef": [-0.321164,0.0557106,-0.000170048,0.000249902,0.0584864], - "R": [ - [0.5607110475,-0.1151130063,-0.8199708025], - [-0.101866971,0.9731761842,-0.2062795062], - [0.8217215109,0.1991911399,0.5339444244] - ], - "t": [ - [50.41224037], - [142.3474205], - [294.74195] - ] - }, - { - "name": "05_15", - "type": "vga", - "resolution": [640,480], - "panel": 5, - "node": 15, - "K": [ - [746.542,0,352.38], - [0,746.666,240.759], - [0,0,1] - ], - "distCoef": [-0.327959,0.100036,-0.000636984,-0.00122606,-0.0366604], - "R": [ - [0.5029624145,-0.05772144518,-0.8623787128], - [-0.198700467,0.9633205664,-0.180365215], - [0.8411580909,0.262071977,0.4730447599] - ], - "t": [ - [34.04469815], - [136.31759], - [307.4406203] - ] - }, - { - "name": "05_16", - "type": "vga", - "resolution": [640,480], - "panel": 5, - "node": 16, - "K": [ - [747.042,0,371.719], - [0,747.231,244.896], - [0,0,1] - ], - "distCoef": [-0.323957,0.0675271,-0.000219383,0.00030566,0.0452733], - "R": [ - [0.5145114331,-0.105655334,-0.8509494319], - [-0.1209004538,0.9735279663,-0.1939752023], - [0.8489175846,0.2026826318,0.4881174913] - ], - "t": [ - [9.341169646], - [165.8735131], - [297.8569993] - ] - }, - { - "name": "05_17", - "type": "vga", - "resolution": [640,480], - "panel": 5, - "node": 17, - "K": [ - [745.814,0,386.675], - [0,746.085,252.153], - [0,0,1] - ], - "distCoef": [-0.320652,0.0597547,0.000647483,5.56623e-05,0.0523558], - "R": [ - [0.5123119379,-0.06682282728,-0.856195765], - [-0.1341513719,0.9785027468,-0.1566390244], - [0.8482569703,0.1951078787,0.4923342645] - ], - "t": [ - [9.076647729], - [186.6487394], - [296.0424945] - ] - }, - { - "name": "05_18", - "type": "vga", - "resolution": [640,480], - "panel": 5, - "node": 18, - "K": [ - [744.362,0,367.747], - [0,744.705,261.961], - [0,0,1] - ], - "distCoef": [-0.317525,0.0240072,0.000331,-0.000409781,0.122239], - "R": [ - [0.5214772573,-0.05602259067,-0.8514240656], - [-0.1526209796,0.9756261952,-0.1576716965], - [0.8395047985,0.2121673788,0.5002166498] - ], - "t": [ - [-2.829687906], - [192.8140289], - [298.6606918] - ] - }, - { - "name": "05_19", - "type": "vga", - "resolution": [640,480], - "panel": 5, - "node": 19, - "K": [ - [744.259,0,353.379], - [0,744.524,245.823], - [0,0,1] - ], - "distCoef": [-0.320328,0.0298824,0.00026675,-0.00161079,0.123162], - "R": [ - [0.5556726344,-0.05485450779,-0.8295896012], - [-0.2099711545,0.9562161648,-0.2038694692], - [0.8044501462,0.2874745713,0.519825291] - ], - "t": [ - [-1.476630227], - [134.2745178], - [310.4571486] - ] - }, - { - "name": "05_20", - "type": "vga", - "resolution": [640,480], - "panel": 5, - "node": 20, - "K": [ - [743.679,0,405.845], - [0,743.856,234.88], - [0,0,1] - ], - "distCoef": [-0.326644,0.0646831,0.000108119,5.73367e-05,0.058946], - "R": [ - [0.447769915,-0.01338423954,-0.894048637], - [-0.18660487,0.9764723016,-0.1080762074], - [0.8744602482,0.2152271039,0.4347373552] - ], - "t": [ - [-41.39083575], - [143.2049031], - [297.8732354] - ] - }, - { - "name": "05_21", - "type": "vga", - "resolution": [640,480], - "panel": 5, - "node": 21, - "K": [ - [746.956,0,354.763], - [0,747.081,232.068], - [0,0,1] - ], - "distCoef": [-0.333648,0.0797639,-0.000768992,-0.00091097,0.0508097], - "R": [ - [0.5053420531,-0.009379958189,-0.8628681393], - [-0.2526298673,0.9545207072,-0.1583299394], - [0.8251106347,0.2979970402,0.4799897963] - ], - "t": [ - [-19.66925616], - [96.29580053], - [309.4868577] - ] - }, - { - "name": "05_22", - "type": "vga", - "resolution": [640,480], - "panel": 5, - "node": 22, - "K": [ - [748.369,0,375.575], - [0,748.642,247.648], - [0,0,1] - ], - "distCoef": [-0.339087,0.143465,-0.000470446,0.00132222,-0.0624301], - "R": [ - [0.54260376,-0.05746408722,-0.8380209057], - [-0.1470082191,0.975763273,-0.1620944744], - [0.8270246327,0.2111490322,0.5210051277] - ], - "t": [ - [3.173863757], - [116.0988382], - [299.4207466] - ] - }, - { - "name": "05_23", - "type": "vga", - "resolution": [640,480], - "panel": 5, - "node": 23, - "K": [ - [744.544,0,368.615], - [0,744.426,281.181], - [0,0,1] - ], - "distCoef": [-0.322575,0.0664483,0.00114224,0.000391788,0.0483369], - "R": [ - [0.5347472888,-0.05715349527,-0.8430769924], - [-0.1466458645,0.9762943366,-0.1591991164], - [0.832190079,0.2087650503,0.5136894259] - ], - "t": [ - [16.7223507], - [130.5590862], - [298.5444367] - ] - }, - { - "name": "05_24", - "type": "vga", - "resolution": [640,480], - "panel": 5, - "node": 24, - "K": [ - [743.308,0,356.74], - [0,743.243,228.93], - [0,0,1] - ], - "distCoef": [-0.321093,0.0447792,0.000127467,-8.40104e-05,0.095825], - "R": [ - [0.5706235669,-0.133891243,-0.8102233519], - [-0.1678811389,0.9467635938,-0.2746900447], - [0.8038685639,0.2927658322,0.5177678046] - ], - "t": [ - [6.742844805], - [124.9131408], - [309.8640068] - ] - }, - { - "name": "06_01", - "type": "vga", - "resolution": [640,480], - "panel": 6, - "node": 1, - "K": [ - [744.518,0,344.042], - [0,744.512,240.289], - [0,0,1] - ], - "distCoef": [-0.313532,-0.0139368,0.00116047,-0.000125352,0.195046], - "R": [ - [-0.3305715804,0.1011846603,-0.9383411399], - [-0.314462461,0.9256148845,0.2105954561], - [0.8898515555,0.3646899369,-0.2741631979] - ], - "t": [ - [-23.56718534], - [104.1648487], - [320.754952] - ] - }, - { - "name": "06_02", - "type": "vga", - "resolution": [640,480], - "panel": 6, - "node": 2, - "K": [ - [748.956,0,345.566], - [0,748.875,227.82], - [0,0,1] - ], - "distCoef": [-0.335662,0.0955564,-6.0167e-05,-0.0012999,0.0278092], - "R": [ - [-0.2903396332,0.1603112194,-0.9433998147], - [-0.341086429,0.9037763758,0.2585504022], - [0.8940709957,0.3968483028,-0.2077221201] - ], - "t": [ - [-2.499901432], - [69.14355517], - [325.2941984] - ] - }, - { - "name": "06_03", - "type": "vga", - "resolution": [640,480], - "panel": 6, - "node": 3, - "K": [ - [743.901,0,369.68], - [0,743.816,251.042], - [0,0,1] - ], - "distCoef": [-0.320568,0.044977,0.000366128,-0.00033077,0.103335], - "R": [ - [-0.3123459653,0.110763308,-0.943488997], - [-0.3278062139,0.9196080197,0.216481353], - [0.891618239,0.3768986331,-0.250926954] - ], - "t": [ - [2.578346941], - [71.05917793], - [323.4074447] - ] - }, - { - "name": "06_04", - "type": "vga", - "resolution": [640,480], - "panel": 6, - "node": 4, - "K": [ - [745.814,0,378.476], - [0,745.908,222.393], - [0,0,1] - ], - "distCoef": [-0.316287,0.0251632,0.000357033,0.00145486,0.13215], - "R": [ - [-0.2756543214,0.09031338143,-0.9570048005], - [-0.3333214643,0.9248259371,0.1832860813], - [0.9016160472,0.3695138418,-0.2248288776] - ], - "t": [ - [26.15902854], - [86.10496093], - [322.4382284] - ] - }, - { - "name": "06_05", - "type": "vga", - "resolution": [640,480], - "panel": 6, - "node": 5, - "K": [ - [750.419,0,363.736], - [0,750.614,222.964], - [0,0,1] - ], - "distCoef": [-0.344753,0.14329,-0.000836382,-0.000451111,-0.060951], - "R": [ - [-0.2930259634,0.06094491301,-0.9541601031], - [-0.3875087878,0.9047544541,0.1767945619], - [0.8740553324,0.4215508218,-0.2414998562] - ], - "t": [ - [36.26889278], - [61.41890121], - [327.3260635] - ] - }, - { - "name": "06_06", - "type": "vga", - "resolution": [640,480], - "panel": 6, - "node": 6, - "K": [ - [747.394,0,354.724], - [0,747.506,211.184], - [0,0,1] - ], - "distCoef": [-0.329009,0.0921746,-0.00050966,0.000333806,0.021085], - "R": [ - [-0.2297156979,0.02557529828,-0.9729216835], - [-0.3964529538,0.9104994627,0.1175405629], - [0.888850805,0.4127185877,-0.199016617] - ], - "t": [ - [62.78312093], - [81.38139883], - [324.7093469] - ] - }, - { - "name": "06_07", - "type": "vga", - "resolution": [640,480], - "panel": 6, - "node": 7, - "K": [ - [746.623,0,374.989], - [0,746.758,209.923], - [0,0,1] - ], - "distCoef": [-0.319339,0.0433323,-0.00139256,0.000754597,0.0938733], - "R": [ - [-0.2846142448,0.03267216609,-0.9580852056], - [-0.3313740809,0.934457856,0.1303063082], - [0.8995476364,0.3545716359,-0.255133308] - ], - "t": [ - [45.81195811], - [121.7115234], - [320.8009986] - ] - }, - { - "name": "06_08", - "type": "vga", - "resolution": [640,480], - "panel": 6, - "node": 8, - "K": [ - [745.971,0,357.954], - [0,746.024,209.947], - [0,0,1] - ], - "distCoef": [-0.314348,0.0246684,-0.0014997,0.000635776,0.111152], - "R": [ - [-0.3038162213,-0.0261928812,-0.9523705354], - [-0.3441704234,0.9351353343,0.08407512184], - [0.8883931693,0.3533211563,-0.2931240987] - ], - "t": [ - [41.47715732], - [140.438376], - [322.3540865] - ] - }, - { - "name": "06_09", - "type": "vga", - "resolution": [640,480], - "panel": 6, - "node": 9, - "K": [ - [742.648,0,362.103], - [0,742.703,220.817], - [0,0,1] - ], - "distCoef": [-0.304218,-0.0643312,-0.000139411,-0.000234647,0.289172], - "R": [ - [-0.2807259034,-0.0411671215,-0.958904706], - [-0.3740921558,0.9247597922,0.06981680165], - [0.8838823599,0.3783181134,-0.2750043253] - ], - "t": [ - [37.64720227], - [153.3424109], - [325.0305142] - ] - }, - { - "name": "06_10", - "type": "vga", - "resolution": [640,480], - "panel": 6, - "node": 10, - "K": [ - [747.72,0,366.165], - [0,747.851,213.209], - [0,0,1] - ], - "distCoef": [-0.324647,0.0523798,-0.00077308,-0.000271098,0.0916616], - "R": [ - [-0.2880158499,0.02777358159,-0.957222805], - [-0.3788720768,0.9147158267,0.1405379157], - [0.8794900907,0.4031421393,-0.2529300217] - ], - "t": [ - [33.16578395], - [147.9736193], - [327.8869733] - ] - }, - { - "name": "06_11", - "type": "vga", - "resolution": [640,480], - "panel": 6, - "node": 11, - "K": [ - [745.331,0,369.444], - [0,745.587,207.732], - [0,0,1] - ], - "distCoef": [-0.317455,0.0357855,-0.00041249,0.000556817,0.0920153], - "R": [ - [-0.3142048567,0.04518634316,-0.9482792323], - [-0.3166241188,0.9366885696,0.1495449465], - [0.8949997069,0.3472358248,-0.2800050117] - ], - "t": [ - [26.61359186], - [187.9055539], - [317.8889871] - ] - }, - { - "name": "06_12", - "type": "vga", - "resolution": [640,480], - "panel": 6, - "node": 12, - "K": [ - [747.25,0,346.366], - [0,747.394,225.779], - [0,0,1] - ], - "distCoef": [-0.328454,0.0750084,3.92686e-05,0.00130952,0.0669429], - "R": [ - [-0.2993781475,0.05639323365,-0.9524665495], - [-0.3171785116,0.9355987261,0.1550897014], - [0.8998725002,0.3485323901,-0.2622110915] - ], - "t": [ - [13.58039626], - [195.4066632], - [317.2443523] - ] - }, - { - "name": "06_13", - "type": "vga", - "resolution": [640,480], - "panel": 6, - "node": 13, - "K": [ - [743.861,0,344.414], - [0,743.872,231.421], - [0,0,1] - ], - "distCoef": [-0.307564,-0.0231037,-0.000140407,-0.000635225,0.208058], - "R": [ - [-0.2583036736,0.07116007646,-0.9634393887], - [-0.3357690773,0.9284960528,0.1586007776], - [0.905835713,0.3644603181,-0.2159405881] - ], - "t": [ - [14.66480509], - [172.1699927], - [320.6722019] - ] - }, - { - "name": "06_14", - "type": "vga", - "resolution": [640,480], - "panel": 6, - "node": 14, - "K": [ - [744.949,0,378.98], - [0,744.921,225.408], - [0,0,1] - ], - "distCoef": [-0.321047,0.0567081,-0.000162218,0.000699701,0.0634367], - "R": [ - [-0.3208579847,0.07871363947,-0.9438507915], - [-0.3472646452,0.9173632389,0.1945557869], - [0.8811682132,0.3901907879,-0.267008856] - ], - "t": [ - [-45.70363788], - [100.2282059], - [322.9364507] - ] - }, - { - "name": "06_15", - "type": "vga", - "resolution": [640,480], - "panel": 6, - "node": 15, - "K": [ - [745.712,0,360.895], - [0,745.741,234.163], - [0,0,1] - ], - "distCoef": [-0.31006,-0.0103454,0.000398478,0.000813845,0.181221], - "R": [ - [-0.3227895896,0.1367774117,-0.9365355415], - [-0.3406635237,0.9063958148,0.2497898928], - [0.8830375102,0.3996730746,-0.245980058] - ], - "t": [ - [-14.93002532], - [154.0180569], - [326.396188] - ] - }, - { - "name": "06_16", - "type": "vga", - "resolution": [640,480], - "panel": 6, - "node": 16, - "K": [ - [745.931,0,372.193], - [0,746.03,212.813], - [0,0,1] - ], - "distCoef": [-0.325757,0.0830346,-0.000419051,0.00216162,0.0290765], - "R": [ - [-0.311559769,0.02363818266,-0.9499324958], - [-0.312276077,0.9416182622,0.1258518973], - [0.8974486961,0.3358515813,-0.2859887293] - ], - "t": [ - [-41.03283731], - [153.3338286], - [314.9665339] - ] - }, - { - "name": "06_17", - "type": "vga", - "resolution": [640,480], - "panel": 6, - "node": 17, - "K": [ - [744.756,0,368.403], - [0,744.752,202.816], - [0,0,1] - ], - "distCoef": [-0.313223,0.00720848,-0.00119606,0.000542174,0.130737], - "R": [ - [-0.3236003046,0.09291211415,-0.9416210394], - [-0.3175516679,0.9267842511,0.2005788875], - [0.8913157584,0.3639207207,-0.2704032691] - ], - "t": [ - [-41.098271], - [130.5289196], - [319.7107876] - ] - }, - { - "name": "06_18", - "type": "vga", - "resolution": [640,480], - "panel": 6, - "node": 18, - "K": [ - [744.889,0,373.989], - [0,745.092,230.989], - [0,0,1] - ], - "distCoef": [-0.319065,0.0283013,-0.000935078,-0.000739787,0.111424], - "R": [ - [-0.3391260928,0.0773602665,-0.9375547357], - [-0.3008220503,0.9353680392,0.1859911968], - [0.8913470633,0.3451116057,-0.2939360344] - ], - "t": [ - [-22.38901828], - [189.8595323], - [315.0907711] - ] - }, - { - "name": "06_19", - "type": "vga", - "resolution": [640,480], - "panel": 6, - "node": 19, - "K": [ - [743.21,0,358.424], - [0,743.138,251.445], - [0,0,1] - ], - "distCoef": [-0.316603,0.00648778,0.000375455,-0.000277526,0.16085], - "R": [ - [-0.34774011,0.09728469559,-0.9325301624], - [-0.3453355468,0.9113903597,0.2238548019], - [0.8716766465,0.399879107,-0.2833311204] - ], - "t": [ - [-13.32995299], - [105.9918293], - [324.8353482] - ] - }, - { - "name": "06_20", - "type": "vga", - "resolution": [640,480], - "panel": 6, - "node": 20, - "K": [ - [745.315,0,375.798], - [0,745.342,214.671], - [0,0,1] - ], - "distCoef": [-0.317661,0.021421,-0.000865931,0.000266434,0.124612], - "R": [ - [-0.2889220833,0.06736289331,-0.9549797225], - [-0.355115135,0.918816287,0.172249446], - [0.8890541438,0.3888944219,-0.2415447329] - ], - "t": [ - [16.18922492], - [101.394333], - [324.5371374] - ] - }, - { - "name": "06_21", - "type": "vga", - "resolution": [640,480], - "panel": 6, - "node": 21, - "K": [ - [743.803,0,341.335], - [0,743.805,238.935], - [0,0,1] - ], - "distCoef": [-0.305727,-0.0577903,-0.000702133,-0.00085287,0.249773], - "R": [ - [-0.2867564999,0.0564691645,-0.9563377767], - [-0.3641939053,0.9168870998,0.1633427245], - [0.8860775977,0.3951319776,-0.24235761] - ], - "t": [ - [29.77890794], - [113.785435], - [325.4988706] - ] - }, - { - "name": "06_22", - "type": "vga", - "resolution": [640,480], - "panel": 6, - "node": 22, - "K": [ - [745.285,0,373.625], - [0,745.232,235.431], - [0,0,1] - ], - "distCoef": [-0.319503,0.0483306,-0.000362012,0.00120612,0.080115], - "R": [ - [-0.3458253526,0.08893014684,-0.9340750797], - [-0.3902640321,0.8916714915,0.2293816395], - [0.8532870623,0.4438618933,-0.2736563703] - ], - "t": [ - [18.96316513], - [116.1979138], - [333.2100324] - ] - }, - { - "name": "06_23", - "type": "vga", - "resolution": [640,480], - "panel": 6, - "node": 23, - "K": [ - [744.536,0,366.592], - [0,744.501,224.531], - [0,0,1] - ], - "distCoef": [-0.312705,-0.014521,0.000375544,8.36622e-05,0.188212], - "R": [ - [-0.3181142509,0.09038767844,-0.94373375], - [-0.4081954831,0.8853909401,0.2223945386], - [0.8556750382,0.455974726,-0.2447596336] - ], - "t": [ - [6.972278595], - [119.3141773], - [334.5341124] - ] - }, - { - "name": "06_24", - "type": "vga", - "resolution": [640,480], - "panel": 6, - "node": 24, - "K": [ - [744.6,0,358.514], - [0,744.655,220.515], - [0,0,1] - ], - "distCoef": [-0.30152,-0.0573254,-0.000856409,-0.000288003,0.227002], - "R": [ - [-0.3545583501,0.05661769889,-0.9333181732], - [-0.3227337004,0.929412527,0.1789841147], - [0.8775712706,0.3646735401,-0.3112585327] - ], - "t": [ - [-25.22428756], - [139.0090865], - [319.514146] - ] - }, - { - "name": "07_01", - "type": "vga", - "resolution": [640,480], - "panel": 7, - "node": 1, - "K": [ - [745.635,0,384.154], - [0,745.75,223.733], - [0,0,1] - ], - "distCoef": [-0.328279,0.104082,-0.000872931,0.00144148,0.00404207], - "R": [ - [-0.9078071857,0.03344162453,-0.4180523547], - [0.00958043905,0.9982092569,0.05904654639], - [0.4192783428,0.049597754,-0.9065019217] - ], - "t": [ - [-23.31434773], - [152.0493649], - [282.3431498] - ] - }, - { - "name": "07_02", - "type": "vga", - "resolution": [640,480], - "panel": 7, - "node": 2, - "K": [ - [746.944,0,375.746], - [0,747.112,207.581], - [0,0,1] - ], - "distCoef": [-0.321827,0.078307,-0.00112183,4.35862e-05,0.0396046], - "R": [ - [-0.9306435439,0.005427673037,-0.3658867782], - [-0.02457764723,0.9967049447,0.07729936951], - [0.3651007167,0.08093079535,-0.9274436225] - ], - "t": [ - [-62.01828104], - [131.8151818], - [284.3018088] - ] - }, - { - "name": "07_03", - "type": "vga", - "resolution": [640,480], - "panel": 7, - "node": 3, - "K": [ - [743.881,0,383.122], - [0,743.965,237.105], - [0,0,1] - ], - "distCoef": [-0.311008,0.000325185,-0.000782967,0.00055371,0.154469], - "R": [ - [-0.9217631286,0.06528892794,-0.3822173342], - [0.03992506463,0.996464058,0.07392814261], - [0.3856925251,0.05288418425,-0.9211104924] - ], - "t": [ - [-43.22640533], - [121.5976731], - [282.3432951] - ] - }, - { - "name": "07_04", - "type": "vga", - "resolution": [640,480], - "panel": 7, - "node": 4, - "K": [ - [743.69,0,370.307], - [0,743.828,227.79], - [0,0,1] - ], - "distCoef": [-0.303025,-0.0263668,-0.000445815,0.00071591,0.180166], - "R": [ - [-0.9409979296,0.06863452498,-0.3313792366], - [0.04529042225,0.9959498431,0.07767037874], - [0.3353679682,0.05807936004,-0.9402952269] - ], - "t": [ - [-38.37277115], - [113.0266013], - [281.4230584] - ] - }, - { - "name": "07_05", - "type": "vga", - "resolution": [640,480], - "panel": 7, - "node": 5, - "K": [ - [743.998,0,375.484], - [0,744.299,220.79], - [0,0,1] - ], - "distCoef": [-0.310908,0.00595719,-5.69241e-05,0.000519591,0.131448], - "R": [ - [-0.9269484075,0.08594630429,-0.3652121064], - [0.04467826469,0.9917683984,0.1199970688], - [0.3725191305,0.09491404865,-0.9231580692] - ], - "t": [ - [-23.36597135], - [80.23534001], - [286.4206576] - ] - }, - { - "name": "07_06", - "type": "vga", - "resolution": [640,480], - "panel": 7, - "node": 6, - "K": [ - [745.602,0,379.444], - [0,745.67,224.268], - [0,0,1] - ], - "distCoef": [-0.303286,-0.0402497,-0.00132196,0.00012981,0.210105], - "R": [ - [-0.923694641,0.09319000989,-0.3716232396], - [0.04673933936,0.9901316615,0.1321163393], - [0.3802678586,0.1046657299,-0.9189349491] - ], - "t": [ - [-0.9450645075], - [68.69008136], - [287.3198917] - ] - }, - { - "name": "07_07", - "type": "vga", - "resolution": [640,480], - "panel": 7, - "node": 7, - "K": [ - [745.731,0,365.823], - [0,745.481,229.263], - [0,0,1] - ], - "distCoef": [-0.308219,-0.0231519,0.000110727,0.000180113,0.209056], - "R": [ - [-0.917494877,0.04967698427,-0.3946331815], - [0.001316203411,0.9925436367,0.1218827179], - [0.3977454189,0.1113073518,-0.9107190869] - ], - "t": [ - [18.92434207], - [79.05208738], - [288.1952445] - ] - }, - { - "name": "07_08", - "type": "vga", - "resolution": [640,480], - "panel": 7, - "node": 8, - "K": [ - [745.611,0,393.911], - [0,745.863,244.069], - [0,0,1] - ], - "distCoef": [-0.318705,0.0460564,0.000184451,0.000507881,0.0745222], - "R": [ - [-0.9083609307,0.09070031,-0.4082326216], - [0.05268537174,0.9932388068,0.1034452715], - [0.4148550001,0.07245775567,-0.9069979066] - ], - "t": [ - [48.31394514], - [81.42535523], - [283.8217571] - ] - }, - { - "name": "07_09", - "type": "vga", - "resolution": [640,480], - "panel": 7, - "node": 9, - "K": [ - [745.77,0,370.33], - [0,746.047,217.48], - [0,0,1] - ], - "distCoef": [-0.321786,0.069205,4.67533e-05,5.58471e-05,0.0372207], - "R": [ - [-0.9211612824,0.007939579541,-0.3891000576], - [-0.02433705705,0.996659961,0.07795274024], - [0.3884193603,0.08127659646,-0.9178913418] - ], - "t": [ - [49.65486911], - [97.0413663], - [285.6851525] - ] - }, - { - "name": "07_10", - "type": "vga", - "resolution": [640,480], - "panel": 7, - "node": 10, - "K": [ - [744.504,0,363.969], - [0,744.833,247.068], - [0,0,1] - ], - "distCoef": [-0.335916,0.144192,-0.000823922,-0.000462503,-0.076361], - "R": [ - [-0.9225918644,-0.01579725191,-0.3854538864], - [-0.05416624958,0.9945677902,0.08888716518], - [0.381955847,0.1028851669,-0.9184358297] - ], - "t": [ - [40.86826856], - [113.0714764], - [288.4804376] - ] - }, - { - "name": "07_11", - "type": "vga", - "resolution": [640,480], - "panel": 7, - "node": 11, - "K": [ - [744.999,0,387.199], - [0,745.384,239.21], - [0,0,1] - ], - "distCoef": [-0.313806,0.0330336,-7.01628e-05,0.00132279,0.0985619], - "R": [ - [-0.9109471902,-0.006922747781,-0.4124648981], - [-0.04540685091,0.9954664163,0.08357530662], - [0.4100163832,0.09486142287,-0.9071316751] - ], - "t": [ - [65.64483344], - [130.0336458], - [285.8729547] - ] - }, - { - "name": "07_12", - "type": "vga", - "resolution": [640,480], - "panel": 7, - "node": 12, - "K": [ - [743.664,0,350.646], - [0,743.861,222.503], - [0,0,1] - ], - "distCoef": [-0.300623,-0.0667329,-0.000394627,-0.00107967,0.272621], - "R": [ - [-0.9268683851,0.02536908581,-0.3745282449], - [0.006256924582,0.9986192343,0.0521581796], - [0.3753343145,0.04600037271,-0.9257473295] - ], - "t": [ - [57.10937388], - [163.0891099], - [280.8513179] - ] - }, - { - "name": "07_13", - "type": "vga", - "resolution": [640,480], - "panel": 7, - "node": 13, - "K": [ - [744.176,0,390.977], - [0,744.332,246.666], - [0,0,1] - ], - "distCoef": [-0.327257,0.10216,-0.000582688,0.00201022,0.0126373], - "R": [ - [-0.9290120658,-0.01909429991,-0.3695564765], - [-0.04453762663,0.9971777882,0.06043888335], - [0.3673594716,0.07260762025,-0.9272406117] - ], - "t": [ - [26.5211548], - [160.1280328], - [285.2494721] - ] - }, - { - "name": "07_14", - "type": "vga", - "resolution": [640,480], - "panel": 7, - "node": 14, - "K": [ - [744.044,0,360.721], - [0,744.333,226.474], - [0,0,1] - ], - "distCoef": [-0.311296,-0.00746755,-0.00165304,-0.000168766,0.17966], - "R": [ - [-0.9305033137,0.06302128148,-0.3608211486], - [0.03165130136,0.9952368859,0.09220485899], - [0.3649133847,0.07437646791,-0.9280659258] - ], - "t": [ - [37.8814582], - [178.0304645], - [285.6034633] - ] - }, - { - "name": "07_15", - "type": "vga", - "resolution": [640,480], - "panel": 7, - "node": 15, - "K": [ - [744.03,0,362.147], - [0,744.447,229.329], - [0,0,1] - ], - "distCoef": [-0.314413,0.0379836,-0.000745365,2.01034e-05,0.0898919], - "R": [ - [-0.9265853662,0.03975182478,-0.373977742], - [0.01411888978,0.9973739765,0.07103385017], - [0.3758193929,0.06053877555,-0.9247133829] - ], - "t": [ - [16.14446289], - [185.021862], - [282.5666312] - ] - }, - { - "name": "07_16", - "type": "vga", - "resolution": [640,480], - "panel": 7, - "node": 16, - "K": [ - [743.673,0,368.897], - [0,743.962,238.378], - [0,0,1] - ], - "distCoef": [-0.314216,0.0200058,-0.0002257,-0.000345788,0.11969], - "R": [ - [-0.9350006114,0.024774913,-0.3537796777], - [-0.006073372197,0.9962920776,0.08582080369], - [0.354594093,0.08239113958,-0.9313832344] - ], - "t": [ - [-10.51100446], - [168.6528502], - [285.9762696] - ] - }, - { - "name": "07_17", - "type": "vga", - "resolution": [640,480], - "panel": 7, - "node": 17, - "K": [ - [744.686,0,385.346], - [0,745.049,227.767], - [0,0,1] - ], - "distCoef": [-0.317176,0.0455424,-0.000136917,0.000534438,0.0739505], - "R": [ - [-0.908638426,0.05327873405,-0.4141709639], - [0.04010861029,0.9983767379,0.04043746577], - [0.4156531128,0.02013121347,-0.9093004036] - ], - "t": [ - [-7.322164421], - [189.4505625], - [275.8940033] - ] - }, - { - "name": "07_18", - "type": "vga", - "resolution": [640,480], - "panel": 7, - "node": 18, - "K": [ - [746.282,0,378.432], - [0,746.624,237.775], - [0,0,1] - ], - "distCoef": [-0.320382,0.058651,0.000451819,0.000534403,0.062414], - "R": [ - [-0.916555331,0.01769811564,-0.3995160846], - [-0.01470055472,0.9968539618,0.07788499561], - [0.3996376094,0.077259016,-0.9134116408] - ], - "t": [ - [-37.37478029], - [164.0712496], - [285.8486829] - ] - }, - { - "name": "07_19", - "type": "vga", - "resolution": [640,480], - "panel": 7, - "node": 19, - "K": [ - [743.687,0,374.362], - [0,743.883,225.048], - [0,0,1] - ], - "distCoef": [-0.322503,0.0715253,7.77555e-05,0.000517375,0.0539586], - "R": [ - [-0.9239544056,0.01616424802,-0.3821609261], - [-0.020576852,0.9955594902,0.09185801365], - [0.3819487525,0.09273628522,-0.9195189677] - ], - "t": [ - [-17.14443298], - [133.4982453], - [287.2304165] - ] - }, - { - "name": "07_20", - "type": "vga", - "resolution": [640,480], - "panel": 7, - "node": 20, - "K": [ - [745.801,0,368.555], - [0,746.033,233.687], - [0,0,1] - ], - "distCoef": [-0.317685,0.0475287,-3.52395e-05,0.000512076,0.0805211], - "R": [ - [-0.9241543321,-0.01069440692,-0.3818696113], - [-0.04324692472,0.9961108974,0.076764468], - [0.3795635307,0.08745690199,-0.9210227014] - ], - "t": [ - [-16.56758847], - [113.8864258], - [286.5218078] - ] - }, - { - "name": "07_21", - "type": "vga", - "resolution": [640,480], - "panel": 7, - "node": 21, - "K": [ - [744.1,0,390.405], - [0,744.284,237.593], - [0,0,1] - ], - "distCoef": [-0.322514,0.0588182,0.000321804,0.00147162,0.0689104], - "R": [ - [-0.9369369296,0.006948104691,-0.3494294118], - [-0.02026391849,0.9970404822,0.07415962808], - [0.3489105381,0.07656370335,-0.9340232522] - ], - "t": [ - [-3.618393153], - [111.1940513], - [285.5030449] - ] - }, - { - "name": "07_22", - "type": "vga", - "resolution": [640,480], - "panel": 7, - "node": 22, - "K": [ - [747.001,0,381.032], - [0,747.132,234.437], - [0,0,1] - ], - "distCoef": [-0.324882,0.0577225,-0.00134011,-0.00135265,0.0819201], - "R": [ - [-0.9282296861,0.06047570579,-0.3670590401], - [0.02337036389,0.9942284933,0.1047068731], - [0.3712727784,0.08861372459,-0.9242857414] - ], - "t": [ - [25.6408869], - [119.8980517], - [286.9452799] - ] - }, - { - "name": "07_23", - "type": "vga", - "resolution": [640,480], - "panel": 7, - "node": 23, - "K": [ - [743.981,0,363.51], - [0,744.339,258.582], - [0,0,1] - ], - "distCoef": [-0.313768,0.0101513,0.00111395,-0.00104272,0.1345], - "R": [ - [-0.9138255678,-0.001018785166,-0.4061056435], - [-0.03060482875,0.9973259054,0.06636552484], - [0.4049520663,0.0730753071,-0.9114130916] - ], - "t": [ - [24.3580015], - [146.5427691], - [284.2261849] - ] - }, - { - "name": "07_24", - "type": "vga", - "resolution": [640,480], - "panel": 7, - "node": 24, - "K": [ - [744.847,0,398.685], - [0,745.01,270.264], - [0,0,1] - ], - "distCoef": [-0.328511,0.106892,0.000179407,0.00152869,-0.00291861], - "R": [ - [-0.915939158,0.01937877811,-0.4008490012], - [-0.01852012751,0.9957282098,0.09045627137], - [0.4008895904,0.09027621565,-0.9116675607] - ], - "t": [ - [6.147743662], - [145.7157982], - [287.1579534] - ] - }, - { - "name": "08_01", - "type": "vga", - "resolution": [640,480], - "panel": 8, - "node": 1, - "K": [ - [743.703,0,360.221], - [0,744.108,227.682], - [0,0,1] - ], - "distCoef": [-0.309411,-0.0239561,-0.001159,0.000249551,0.191643], - "R": [ - [-0.6256262875,-0.004424555618,-0.7801103586], - [-0.1745259617,0.9754325172,0.134432485], - [0.7603502068,0.2202540071,-0.6110284243] - ], - "t": [ - [5.656398722], - [175.9817187], - [302.7764948] - ] - }, - { - "name": "08_02", - "type": "vga", - "resolution": [640,480], - "panel": 8, - "node": 2, - "K": [ - [747.203,0,376.344], - [0,747.435,209.923], - [0,0,1] - ], - "distCoef": [-0.331616,0.11313,4.7739e-05,0.00134479,-0.0154118], - "R": [ - [-0.6724252099,0.1092176997,-0.7320627235], - [-0.09964199407,0.9666926758,0.2357472025], - [0.7334274403,0.2314665517,-0.6391458561] - ], - "t": [ - [-0.9742570867], - [185.4525058], - [305.0714088] - ] - }, - { - "name": "08_03", - "type": "vga", - "resolution": [640,480], - "panel": 8, - "node": 3, - "K": [ - [747.234,0,368.091], - [0,747.404,224.293], - [0,0,1] - ], - "distCoef": [-0.329137,0.0905459,-0.000565165,-0.000329878,0.0231933], - "R": [ - [-0.656899377,0.0205246652,-0.7536988435], - [-0.2005757989,0.9588523348,0.2009267253], - [0.7268098496,0.2831623883,-0.6257527502] - ], - "t": [ - [-32.7353206], - [153.4285774], - [313.8994992] - ] - }, - { - "name": "08_04", - "type": "vga", - "resolution": [640,480], - "panel": 8, - "node": 4, - "K": [ - [747.386,0,362.788], - [0,747.713,235.953], - [0,0,1] - ], - "distCoef": [-0.341304,0.154379,-0.000777774,-0.000654564,-0.0867958], - "R": [ - [-0.6631685233,0.06657565756,-0.7455033143], - [-0.1433461882,0.9663011288,0.2138083224], - [0.7346151238,0.2486560079,-0.6312771259] - ], - "t": [ - [-22.98714967], - [144.6795235], - [307.788251] - ] - }, - { - "name": "08_05", - "type": "vga", - "resolution": [640,480], - "panel": 8, - "node": 5, - "K": [ - [745.746,0,376.748], - [0,745.752,233.642], - [0,0,1] - ], - "distCoef": [-0.32088,0.0642866,0.000720856,0.00118823,0.0489989], - "R": [ - [-0.6568191598,0.04935682433,-0.7524310568], - [-0.1452125328,0.970898021,0.19044777], - [0.7399337211,0.2343521638,-0.6305371929] - ], - "t": [ - [-42.15667108], - [135.9397275], - [306.138018] - ] - }, - { - "name": "08_06", - "type": "vga", - "resolution": [640,480], - "panel": 8, - "node": 6, - "K": [ - [743.581,0,359.642], - [0,743.625,223.766], - [0,0,1] - ], - "distCoef": [-0.309434,-0.0145066,-0.000137344,-0.000208072,0.169515], - "R": [ - [-0.6714433509,-0.01781555577,-0.7408417054], - [-0.2359597182,0.9528188479,0.1909430659], - [0.7024861834,0.3030162521,-0.6439676336] - ], - "t": [ - [-57.25895983], - [89.79547495], - [311.6502108] - ] - }, - { - "name": "08_07", - "type": "vga", - "resolution": [640,480], - "panel": 8, - "node": 7, - "K": [ - [745.148,0,371.237], - [0,745.103,220.621], - [0,0,1] - ], - "distCoef": [-0.318768,0.034703,-0.000217256,0.000447556,0.0954449], - "R": [ - [-0.7012843801,0.01049644172,-0.7128043511], - [-0.1276034542,0.9818947595,0.1400001421], - [0.7013683602,0.1891362102,-0.6872480755] - ], - "t": [ - [-43.70728874], - [118.2041714], - [298.0588141] - ] - }, - { - "name": "08_08", - "type": "vga", - "resolution": [640,480], - "panel": 8, - "node": 8, - "K": [ - [743.06,0,391.891], - [0,743.237,230.861], - [0,0,1] - ], - "distCoef": [-0.322908,0.0553375,0.000339696,0.00130059,0.0777268], - "R": [ - [-0.6299217379,0.07604043096,-0.7729272003], - [-0.1362742651,0.9689348188,0.2063846932], - [0.7646096578,0.2353362908,-0.5999907511] - ], - "t": [ - [-3.915515028], - [82.19520224], - [306.2551203] - ] - }, - { - "name": "08_09", - "type": "vga", - "resolution": [640,480], - "panel": 8, - "node": 9, - "K": [ - [746.456,0,356.955], - [0,746.592,233.352], - [0,0,1] - ], - "distCoef": [-0.320498,0.0507213,0.000550471,0.000126643,0.0741224], - "R": [ - [-0.684872543,0.06612723284,-0.7256561093], - [-0.09767122593,0.9785553778,0.1813551881], - [0.7220872049,0.1950809107,-0.6637269822] - ], - "t": [ - [-6.194765679], - [87.40737989], - [301.7039487] - ] - }, - { - "name": "08_10", - "type": "vga", - "resolution": [640,480], - "panel": 8, - "node": 10, - "K": [ - [747.33,0,361.528], - [0,747.71,220.883], - [0,0,1] - ], - "distCoef": [-0.322455,0.0389243,0.00118705,0.000768992,0.12227], - "R": [ - [-0.6055801648,0.01225702185,-0.7956899079], - [-0.1760343759,0.973047512,0.1489645524], - [0.7760699469,0.2302787546,-0.5871006154] - ], - "t": [ - [32.64204154], - [89.24589085], - [303.2777117] - ] - }, - { - "name": "08_11", - "type": "vga", - "resolution": [640,480], - "panel": 8, - "node": 11, - "K": [ - [747.774,0,350.264], - [0,747.981,233.163], - [0,0,1] - ], - "distCoef": [-0.312094,-0.0263709,0.00148203,-0.000526901,0.233175], - "R": [ - [-0.6738094891,0.06987822761,-0.7355935058], - [-0.1142917175,0.9736808734,0.1971876265], - [0.730012449,0.216939139,-0.6480889092] - ], - "t": [ - [35.79986479], - [83.7107121], - [303.8218457] - ] - }, - { - "name": "08_12", - "type": "vga", - "resolution": [640,480], - "panel": 8, - "node": 12, - "K": [ - [744.899,0,366.47], - [0,744.848,222.726], - [0,0,1] - ], - "distCoef": [-0.30396,-0.0418844,-0.00058576,-0.000160605,0.231689], - "R": [ - [-0.6160341517,-0.01803679921,-0.7875129191], - [-0.1884772348,0.9740736778,0.1251271436], - [0.7648387123,0.2255108512,-0.6034621779] - ], - "t": [ - [61.57356311], - [97.36793025], - [301.4047959] - ] - }, - { - "name": "08_13", - "type": "vga", - "resolution": [640,480], - "panel": 8, - "node": 13, - "K": [ - [746.859,0,368.586], - [0,747.139,224.684], - [0,0,1] - ], - "distCoef": [-0.318047,0.0428323,-0.000551709,0.000692584,0.0895927], - "R": [ - [-0.6485099772,-0.04236983322,-0.7600260566], - [-0.2235198928,0.9650338886,0.1369249841], - [0.7276494121,0.258678161,-0.6353046057] - ], - "t": [ - [38.13208236], - [106.9572182], - [307.8393222] - ] - }, - { - "name": "08_14", - "type": "vga", - "resolution": [640,480], - "panel": 8, - "node": 14, - "K": [ - [744.505,0,357.32], - [0,744.53,228.165], - [0,0,1] - ], - "distCoef": [-0.303025,-0.0702212,0.000533599,-0.000753966,0.269146], - "R": [ - [-0.6825611814,-0.04644305139,-0.729351271], - [-0.1871280484,0.9758162042,0.1129859684], - [0.7064653757,0.213601916,-0.6747450588] - ], - "t": [ - [41.82592662], - [132.5834032], - [304.3020009] - ] - }, - { - "name": "08_15", - "type": "vga", - "resolution": [640,480], - "panel": 8, - "node": 15, - "K": [ - [745.837,0,357.73], - [0,745.88,221.629], - [0,0,1] - ], - "distCoef": [-0.3197,0.0439542,-0.00136466,0.00170195,0.109142], - "R": [ - [-0.6069626381,-0.02117938565,-0.7944481037], - [-0.2107505505,0.968144583,0.1352045554], - [0.7662770787,0.2494944888,-0.5920911574] - ], - "t": [ - [64.87618524], - [141.1933336], - [303.6799609] - ] - }, - { - "name": "08_16", - "type": "vga", - "resolution": [640,480], - "panel": 8, - "node": 16, - "K": [ - [744.767,0,345.102], - [0,744.781,229.581], - [0,0,1] - ], - "distCoef": [-0.307131,-0.033453,0.0002274,-0.000565369,0.224073], - "R": [ - [-0.6350262321,-0.03398669713,-0.7717425665], - [-0.2527580664,0.9531820242,0.1660041824], - [0.7299692079,0.3004811693,-0.6138860012] - ], - "t": [ - [34.611726], - [134.434862], - [314.3473002] - ] - }, - { - "name": "08_17", - "type": "vga", - "resolution": [640,480], - "panel": 8, - "node": 17, - "K": [ - [743.543,0,370.548], - [0,743.847,224.118], - [0,0,1] - ], - "distCoef": [-0.308645,-0.0111516,9.80345e-05,-0.000744439,0.160705], - "R": [ - [-0.6124225565,-0.05791042639,-0.7884066177], - [-0.1936876385,0.977907652,0.07862393367], - [0.7664357188,0.2008556864,-0.610109238] - ], - "t": [ - [28.62018644], - [186.6213498], - [297.6164741] - ] - }, - { - "name": "08_18", - "type": "vga", - "resolution": [640,480], - "panel": 8, - "node": 18, - "K": [ - [743.39,0,376.249], - [0,743.751,216.723], - [0,0,1] - ], - "distCoef": [-0.319375,0.0602092,-1.05699e-05,0.00110696,0.0487054], - "R": [ - [-0.6887185447,0.08181736584,-0.720397588], - [-0.1043667464,0.9720764384,0.2101784484], - [0.7174777686,0.2199393475,-0.6609480577] - ], - "t": [ - [20.48604056], - [189.7333893], - [302.8177068] - ] - }, - { - "name": "08_19", - "type": "vga", - "resolution": [640,480], - "panel": 8, - "node": 19, - "K": [ - [747.038,0,360.923], - [0,747.259,204.023], - [0,0,1] - ], - "distCoef": [-0.32724,0.0825647,-0.000697091,0.000733699,0.0397455], - "R": [ - [-0.6726100217,0.03848005322,-0.7389959704], - [-0.1487286588,0.9712392562,0.1859411014], - [0.7248969201,0.2349757278,-0.6475421705] - ], - "t": [ - [3.177324598], - [151.0352965], - [305.3818706] - ] - }, - { - "name": "08_20", - "type": "vga", - "resolution": [640,480], - "panel": 8, - "node": 20, - "K": [ - [747.914,0,388.693], - [0,747.835,242.83], - [0,0,1] - ], - "distCoef": [-0.338429,0.134609,0.00136964,0.000561914,-0.0365273], - "R": [ - [-0.6685313457,0.02780025068,-0.7431641715], - [-0.1765857142,0.9647874561,0.194942684], - [0.722414926,0.2615574708,-0.6400815293] - ], - "t": [ - [-14.15175066], - [129.456494], - [308.9585645] - ] - }, - { - "name": "08_21", - "type": "vga", - "resolution": [640,480], - "panel": 8, - "node": 21, - "K": [ - [746.296,0,369.274], - [0,746.424,219.198], - [0,0,1] - ], - "distCoef": [-0.312598,-0.010091,-0.000298989,-0.000771876,0.160922], - "R": [ - [-0.6341455554,-0.01222382885,-0.7731170626], - [-0.1896201401,0.9718007188,0.1401697733], - [0.7496023059,0.2354866044,-0.6185809907] - ], - "t": [ - [-6.414673774], - [116.5175191], - [305.5663378] - ] - }, - { - "name": "08_22", - "type": "vga", - "resolution": [640,480], - "panel": 8, - "node": 22, - "K": [ - [743.609,0,361.562], - [0,743.794,221.87], - [0,0,1] - ], - "distCoef": [-0.314273,0.00142644,4.14402e-05,0.000150079,0.159707], - "R": [ - [-0.6552794634,-0.0176584532,-0.7551801135], - [-0.2007508014,0.9678470127,0.1515627784], - [0.7282224527,0.2509189891,-0.6377552198] - ], - "t": [ - [4.541098798], - [103.6271831], - [307.0310837] - ] - }, - { - "name": "08_23", - "type": "vga", - "resolution": [640,480], - "panel": 8, - "node": 23, - "K": [ - [748.435,0,354.117], - [0,748.457,219.552], - [0,0,1] - ], - "distCoef": [-0.324308,0.0627041,-0.000215295,-0.000444561,0.0758056], - "R": [ - [-0.6485698923,-0.03356212054,-0.7604148071], - [-0.2015811272,0.9709293787,0.1290782349], - [0.733976937,0.2370015309,-0.6364810526] - ], - "t": [ - [20.56445448], - [121.4098798], - [305.3725739] - ] - }, - { - "name": "08_24", - "type": "vga", - "resolution": [640,480], - "panel": 8, - "node": 24, - "K": [ - [745.572,0,350.678], - [0,745.729,218.826], - [0,0,1] - ], - "distCoef": [-0.313081,0.00890587,-0.000465969,-0.00023462,0.141032], - "R": [ - [-0.6716141,0.00283216084,-0.7408957278], - [-0.1390702972,0.9817365211,0.1298185488], - [0.7277320613,0.1902245569,-0.6589542206] - ], - "t": [ - [13.95231346], - [154.9907046], - [298.6967118] - ] - }, - { - "name": "09_01", - "type": "vga", - "resolution": [640,480], - "panel": 9, - "node": 1, - "K": [ - [745.377,0,383.314], - [0,745.581,229.65], - [0,0,1] - ], - "distCoef": [-0.311824,0.0113225,-0.000890232,0.000288511,0.13186], - "R": [ - [-0.9888207636,0.1490770148,-0.003088867539], - [0.1339941062,0.8974831076,0.420201917], - [0.06541465384,0.4150904904,-0.9074253732] - ], - "t": [ - [-5.5065201], - [83.70733211], - [330.6651976] - ] - }, - { - "name": "09_02", - "type": "vga", - "resolution": [640,480], - "panel": 9, - "node": 2, - "K": [ - [745.133,0,380.598], - [0,746.347,248.499], - [0,0,1] - ], - "distCoef": [-0.340543,0.0603048,-0.00219925,-0.00194065,0.128165], - "R": [ - [-0.9728033822,0.2090533065,0.09975116351], - [0.2316107347,0.8720009628,0.4312433055], - [0.003169728315,0.4426183864,-0.8967044758] - ], - "t": [ - [-23.76195567], - [58.26386366], - [329.69794] - ] - }, - { - "name": "09_03", - "type": "vga", - "resolution": [640,480], - "panel": 9, - "node": 3, - "K": [ - [745.787,0,382.41], - [0,745.973,216.203], - [0,0,1] - ], - "distCoef": [-0.309439,0.00115788,-0.000439278,0.00154239,0.140783], - "R": [ - [-0.995096801,0.09728424012,-0.01783629191], - [0.08253738581,0.9161639792,0.3922131349], - [0.05449712496,0.3888178749,-0.9197014317] - ], - "t": [ - [6.72584843], - [65.39953055], - [327.4514754] - ] - }, - { - "name": "09_04", - "type": "vga", - "resolution": [640,480], - "panel": 9, - "node": 4, - "K": [ - [744.782,0,384.335], - [0,745.051,230.833], - [0,0,1] - ], - "distCoef": [-0.319171,0.0452003,0.000841339,0.00114337,0.0902557], - "R": [ - [-0.9962766095,0.08536470964,0.01207409478], - [0.0830687393,0.9129812009,0.3994557689], - [0.02307600417,0.3989714189,-0.9166729542] - ], - "t": [ - [12.91980994], - [75.72355875], - [328.4117918] - ] - }, - { - "name": "09_05", - "type": "vga", - "resolution": [640,480], - "panel": 9, - "node": 5, - "K": [ - [745.938,0,386.124], - [0,746.151,234.663], - [0,0,1] - ], - "distCoef": [-0.322825,0.0563734,0.000659785,0.00216478,0.0846192], - "R": [ - [-0.9996885429,0.02460566921,0.004168718214], - [0.02372582958,0.8852416043,0.464525981], - [0.007739649829,0.4644802074,-0.8855496794] - ], - "t": [ - [23.79490616], - [45.57973364], - [333.4360246] - ] - }, - { - "name": "09_06", - "type": "vga", - "resolution": [640,480], - "panel": 9, - "node": 6, - "K": [ - [745.533,0,376.456], - [0,745.938,237.583], - [0,0,1] - ], - "distCoef": [-0.324418,0.0645728,-2.52302e-05,0.000695669,0.0784542], - "R": [ - [-0.9996292032,0.0242501169,-0.01238498622], - [0.01720849374,0.9151046106,0.4028491273], - [0.02110269642,0.4024866252,-0.9151826008] - ], - "t": [ - [44.50201086], - [83.15135806], - [329.4460526] - ] - }, - { - "name": "09_07", - "type": "vga", - "resolution": [640,480], - "panel": 9, - "node": 7, - "K": [ - [745.538,0,357.165], - [0,745.859,222.198], - [0,0,1] - ], - "distCoef": [-0.30448,-0.0356601,-0.000261684,-0.000249049,0.226264], - "R": [ - [-0.9994703128,-0.005373675551,-0.03209699996], - [-0.01769948118,0.9174086112,0.3975527241], - [0.02730974481,0.3979102457,-0.9170177829] - ], - "t": [ - [39.28939518], - [107.3778293], - [329.1138759] - ] - }, - { - "name": "09_08", - "type": "vga", - "resolution": [640,480], - "panel": 9, - "node": 8, - "K": [ - [746.393,0,361.584], - [0,746.73,220.937], - [0,0,1] - ], - "distCoef": [-0.31726,0.0513551,0.000643529,-0.000795525,0.0635312], - "R": [ - [-0.9973050313,-0.005865573042,-0.0731318648], - [-0.03181904441,0.9327538711,0.3591068981], - [0.06610766226,0.3604661023,-0.9304267656] - ], - "t": [ - [64.05594666], - [137.6750859], - [322.0323762] - ] - }, - { - "name": "09_09", - "type": "vga", - "resolution": [640,480], - "panel": 9, - "node": 9, - "K": [ - [750.271,0,344.156], - [0,750.817,228.346], - [0,0,1] - ], - "distCoef": [-0.379154,0.391779,0.000225814,-0.000528714,-0.53339], - "R": [ - [-0.9991212371,-0.002089946585,-0.04186150665], - [-0.01685937738,0.9344344151,0.355735977], - [0.03837336329,0.3561291283,-0.933648504] - ], - "t": [ - [51.49527243], - [159.1149955], - [322.66132] - ] - }, - { - "name": "09_10", - "type": "vga", - "resolution": [640,480], - "panel": 9, - "node": 10, - "K": [ - [744.897,0,366.998], - [0,745.389,227.752], - [0,0,1] - ], - "distCoef": [-0.317307,0.0499201,-0.000255849,-0.000414203,0.0689696], - "R": [ - [-0.9956077306,0.03830608065,-0.08542769468], - [0.005132094192,0.9334237661,0.3587390896], - [0.093482129,0.3567249879,-0.9295205079] - ], - "t": [ - [51.9897871], - [163.3127669], - [320.2676037] - ] - }, - { - "name": "09_11", - "type": "vga", - "resolution": [640,480], - "panel": 9, - "node": 11, - "K": [ - [745.812,0,365.568], - [0,746.463,243.927], - [0,0,1] - ], - "distCoef": [-0.334591,0.135033,-0.000586766,0.000648781,-0.0516408], - "R": [ - [-0.998272905,0.02856351314,-0.05133549401], - [0.007150624435,0.926422355,0.3764179707], - [0.05831016891,0.3754007803,-0.9250265825] - ], - "t": [ - [35.7749059], - [177.7642897], - [325.0135255] - ] - }, - { - "name": "09_12", - "type": "vga", - "resolution": [640,480], - "panel": 9, - "node": 12, - "K": [ - [743.195,0,380.908], - [0,743.577,227.789], - [0,0,1] - ], - "distCoef": [-0.308886,-0.0148964,-0.00146189,1.64512e-05,0.167268], - "R": [ - [-0.9994731762,0.02727182579,0.01759595347], - [0.03184982914,0.9284235071,0.3701558858], - [-0.006241669996,0.370521307,-0.9288029945] - ], - "t": [ - [-0.9618436208], - [187.4005014], - [324.424529] - ] - }, - { - "name": "09_13", - "type": "vga", - "resolution": [640,480], - "panel": 9, - "node": 13, - "K": [ - [745.52,0,396.637], - [0,745.641,231.295], - [0,0,1] - ], - "distCoef": [-0.327971,0.0908214,-0.00010844,0.00165709,0.0286999], - "R": [ - [-0.9916965419,0.1263943494,0.02371575794], - [0.1244737261,0.8970729317,0.4239887342], - [0.03231501572,0.4234201503,-0.9053568998] - ], - "t": [ - [12.62306638], - [150.537484], - [333.7640249] - ] - }, - { - "name": "09_14", - "type": "vga", - "resolution": [640,480], - "panel": 9, - "node": 14, - "K": [ - [744.91,0,372.463], - [0,744.965,226.423], - [0,0,1] - ], - "distCoef": [-0.308854,-0.0214085,8.99951e-05,0.000256405,0.180188], - "R": [ - [-0.9924146786,0.1180105859,0.03444716585], - [0.1215225705,0.8993517426,0.4199984619], - [0.01858414592,0.4209987468,-0.9068708203] - ], - "t": [ - [-10.68067405], - [162.2988485], - [333.0026074] - ] - }, - { - "name": "09_15", - "type": "vga", - "resolution": [640,480], - "panel": 9, - "node": 15, - "K": [ - [747.246,0,368.718], - [0,747.604,232.745], - [0,0,1] - ], - "distCoef": [-0.3413,0.139342,-0.00187439,-0.000934376,-0.0485015], - "R": [ - [-0.9858543141,0.1593536378,0.05193928607], - [0.1663907088,0.8933064559,0.4175137217], - [0.02013463084,0.4202499184,-0.9071849882] - ], - "t": [ - [-16.61956214], - [147.1949584], - [331.9981158] - ] - }, - { - "name": "09_16", - "type": "vga", - "resolution": [640,480], - "panel": 9, - "node": 16, - "K": [ - [743.705,0,367.288], - [0,743.835,246.124], - [0,0,1] - ], - "distCoef": [-0.316616,0.0215265,-3.02132e-05,0.000242548,0.131229], - "R": [ - [-0.9974602961,0.07055123587,0.009771425173], - [0.06902048446,0.9235857212,0.3771280794], - [0.01758210332,0.3768447143,-0.9261095675] - ], - "t": [ - [-30.73982653], - [139.9628037], - [324.9351286] - ] - }, - { - "name": "09_17", - "type": "vga", - "resolution": [640,480], - "panel": 9, - "node": 17, - "K": [ - [742.776,0,376.251], - [0,742.956,242.934], - [0,0,1] - ], - "distCoef": [-0.317736,0.0249159,0.000195501,0.000659428,0.110976], - "R": [ - [-0.9810894361,0.1806813104,0.06941024814], - [0.1934432758,0.9031273242,0.3833284952], - [0.006574003146,0.389506483,-0.9210002618] - ], - "t": [ - [-32.91453507], - [125.2651482], - [325.9500645] - ] - }, - { - "name": "09_18", - "type": "vga", - "resolution": [640,480], - "panel": 9, - "node": 18, - "K": [ - [744.563,0,383.579], - [0,744.554,245.613], - [0,0,1] - ], - "distCoef": [-0.324188,0.0688729,0.000784842,0.000316148,0.0548859], - "R": [ - [-0.970594512,0.2257141743,0.08366244524], - [0.2406675117,0.9026066179,0.3569039677], - [0.005044007626,0.3665438649,-0.9303870985] - ], - "t": [ - [-30.64851648], - [114.5848432], - [323.1694161] - ] - }, - { - "name": "09_19", - "type": "vga", - "resolution": [640,480], - "panel": 9, - "node": 19, - "K": [ - [745.897,0,369.27], - [0,746.007,226.27], - [0,0,1] - ], - "distCoef": [-0.314378,0.0131268,-0.000749673,-0.000436078,0.140449], - "R": [ - [-0.9929061616,0.1118291068,0.04039313118], - [0.1187797946,0.9175946163,0.3793566667], - [0.005358597494,0.3814634596,-0.9243683867] - ], - "t": [ - [-9.348770156], - [111.4514571], - [325.9373984] - ] - }, - { - "name": "09_20", - "type": "vga", - "resolution": [640,480], - "panel": 9, - "node": 20, - "K": [ - [743.647,0,378.532], - [0,743.859,221.629], - [0,0,1] - ], - "distCoef": [-0.312883,-0.00145442,-0.000725648,-1.91192e-05,0.160115], - "R": [ - [-0.9995005243,0.01416777706,-0.02824846864], - [0.002450265794,0.9259270935,0.3776943389], - [0.03150711165,0.3774364735,-0.9254993303] - ], - "t": [ - [6.861259295], - [105.360829], - [326.1962043] - ] - }, - { - "name": "09_21", - "type": "vga", - "resolution": [640,480], - "panel": 9, - "node": 21, - "K": [ - [745.35,0,364.423], - [0,745.51,242.824], - [0,0,1] - ], - "distCoef": [-0.317615,0.0309367,1.60295e-05,-0.00084218,0.138729], - "R": [ - [-0.9983267687,0.03243769532,-0.0478691851], - [0.01510269673,0.9453721551,0.3256430514], - [0.05581730476,0.3243752215,-0.9442802255] - ], - "t": [ - [30.85545331], - [138.1219419], - [318.1793043] - ] - }, - { - "name": "09_22", - "type": "vga", - "resolution": [640,480], - "panel": 9, - "node": 22, - "K": [ - [744.248,0,356.027], - [0,744.436,238.226], - [0,0,1] - ], - "distCoef": [-0.308137,-0.0481761,0.000357682,-8.3696e-05,0.245728], - "R": [ - [-0.9955839097,0.09158830299,-0.0205976113], - [0.07579544873,0.9137019347,0.3992540852], - [0.05538708142,0.3959297379,-0.9166089209] - ], - "t": [ - [35.25988756], - [131.4528362], - [328.3382973] - ] - }, - { - "name": "09_23", - "type": "vga", - "resolution": [640,480], - "panel": 9, - "node": 23, - "K": [ - [744.535,0,363.359], - [0,744.632,254.668], - [0,0,1] - ], - "distCoef": [-0.311847,-0.00198079,0.000462082,-0.000460419,0.174118], - "R": [ - [-0.9946906764,0.1028474748,0.003585412436], - [0.09771594436,0.9329851386,0.346396197], - [0.03228083764,0.3449074195,-0.9380814567] - ], - "t": [ - [12.3985171], - [157.8437238], - [320.5381764] - ] - }, - { - "name": "09_24", - "type": "vga", - "resolution": [640,480], - "panel": 9, - "node": 24, - "K": [ - [743.311,0,385.98], - [0,743.511,229.743], - [0,0,1] - ], - "distCoef": [-0.319602,0.0480118,-0.000790169,0.000699953,0.0704098], - "R": [ - [-0.9986396845,0.04700092247,-0.02257640097], - [0.03617494752,0.9363507866,0.3491970469], - [0.03755201414,0.3479053287,-0.93677731] - ], - "t": [ - [-8.936415104], - [142.1371611], - [321.4431282] - ] - }, - { - "name": "10_01", - "type": "vga", - "resolution": [640,480], - "panel": 10, - "node": 1, - "K": [ - [744.128,0,369.511], - [0,744.056,233.67], - [0,0,1] - ], - "distCoef": [-0.31156,0.00550691,-0.000430053,0.000410016,0.149166], - "R": [ - [-0.6229970612,0.0209936641,0.781942407], - [0.05250109858,0.9985078863,0.01502117145], - [-0.7804603106,0.05041098106,-0.6231696692] - ], - "t": [ - [-46.84686717], - [150.7389104], - [280.0083694] - ] - }, - { - "name": "10_02", - "type": "vga", - "resolution": [640,480], - "panel": 10, - "node": 2, - "K": [ - [743.282,0,357.827], - [0,743.347,211.632], - [0,0,1] - ], - "distCoef": [-0.30948,-0.00718458,0.000285593,0.000547399,0.164062], - "R": [ - [-0.6512046155,0.0977241901,0.7525839032], - [0.103617117,0.9938368806,-0.03939223155], - [-0.7517952126,0.05232817138,-0.6573170626] - ], - "t": [ - [-42.32005533], - [143.0774393], - [282.200902] - ] - }, - { - "name": "10_03", - "type": "vga", - "resolution": [640,480], - "panel": 10, - "node": 3, - "K": [ - [744.012,0,361.17], - [0,744.101,225.217], - [0,0,1] - ], - "distCoef": [-0.303567,-0.0563565,0.000757602,-0.000519388,0.263551], - "R": [ - [-0.6320598226,0.04182219841,0.773790207], - [0.06737176964,0.9977273282,0.001106034268], - [-0.771985379,0.05283069539,-0.6334409935] - ], - "t": [ - [-54.02554254], - [119.7786683], - [280.9354705] - ] - }, - { - "name": "10_04", - "type": "vga", - "resolution": [640,480], - "panel": 10, - "node": 4, - "K": [ - [744.209,0,380.966], - [0,744.256,205.476], - [0,0,1] - ], - "distCoef": [-0.315194,0.0249601,-0.000765583,0.001001,0.10286], - "R": [ - [-0.6566261636,0.06356030055,0.7515332125], - [0.0713368826,0.9972094103,-0.02201002698], - [-0.7508349555,0.03915967697,-0.6593279831] - ], - "t": [ - [-22.38173011], - [115.5645607], - [280.9145253] - ] - }, - { - "name": "10_05", - "type": "vga", - "resolution": [640,480], - "panel": 10, - "node": 5, - "K": [ - [744.499,0,353.834], - [0,744.652,215.524], - [0,0,1] - ], - "distCoef": [-0.317042,0.0236932,-0.00147688,-0.000206715,0.11602], - "R": [ - [-0.6480155592,0.1057846486,0.754244949], - [0.1559047408,0.9877614348,-0.004589090624], - [-0.7454995284,0.1146165612,-0.6565771067] - ], - "t": [ - [-17.37690425], - [72.84298088], - [287.4167752] - ] - }, - { - "name": "10_06", - "type": "vga", - "resolution": [640,480], - "panel": 10, - "node": 6, - "K": [ - [746.493,0,367.328], - [0,746.754,207.575], - [0,0,1] - ], - "distCoef": [-0.323089,0.0587326,-0.000981175,-0.000221417,0.0550321], - "R": [ - [-0.6607542091,0.07289791872,0.74705406], - [0.1340507848,0.9907326878,0.02188900409], - [-0.738535214,0.1146064347,-0.6644028167] - ], - "t": [ - [3.021864726], - [64.04371811], - [286.9062935] - ] - }, - { - "name": "10_07", - "type": "vga", - "resolution": [640,480], - "panel": 10, - "node": 7, - "K": [ - [744.949,0,365.308], - [0,744.944,217.014], - [0,0,1] - ], - "distCoef": [-0.320697,0.0459897,0.000335318,2.89241e-06,0.0947246], - "R": [ - [-0.643287111,0.03528116955,0.764811697], - [0.0902182212,0.9954712387,0.02996140018], - [-0.7602909742,0.08827373343,-0.6435568215] - ], - "t": [ - [9.776307982], - [84.51813798], - [285.3816638] - ] - }, - { - "name": "10_08", - "type": "vga", - "resolution": [640,480], - "panel": 10, - "node": 8, - "K": [ - [748.112,0,395.78], - [0,748.17,229.575], - [0,0,1] - ], - "distCoef": [-0.325424,0.0774932,-0.000546,0.000524276,0.0351183], - "R": [ - [-0.6241633069,0.05185263499,0.7795713377], - [0.04102617023,0.9985938587,-0.03357318505], - [-0.7802160084,0.0110276762,-0.6254129601] - ], - "t": [ - [-46.24758235], - [183.5392889], - [272.6641799] - ] - }, - { - "name": "10_09", - "type": "vga", - "resolution": [640,480], - "panel": 10, - "node": 9, - "K": [ - [746.122,0,370.333], - [0,746.261,210.753], - [0,0,1] - ], - "distCoef": [-0.323285,0.0813962,-0.00031195,0.00117949,0.0118242], - "R": [ - [-0.6717702835,0.002860846795,0.7407540089], - [0.1085475528,0.9895782107,0.09461708989], - [-0.7327633417,0.1439679842,-0.6650797731] - ], - "t": [ - [53.6134591], - [78.01841366], - [288.9552018] - ] - }, - { - "name": "10_10", - "type": "vga", - "resolution": [640,480], - "panel": 10, - "node": 10, - "K": [ - [746.498,0,355.775], - [0,746.616,218.183], - [0,0,1] - ], - "distCoef": [-0.320479,0.0482256,-0.000295345,0.000515541,0.088746], - "R": [ - [-0.6274497943,0.01735785812,0.7784635254], - [0.05740772193,0.9980618939,0.02401685623], - [-0.7765378993,0.0597591891,-0.6272302051] - ], - "t": [ - [35.32452291], - [122.8912729], - [283.9520693] - ] - }, - { - "name": "10_11", - "type": "vga", - "resolution": [640,480], - "panel": 10, - "node": 11, - "K": [ - [745.209,0,387.948], - [0,745.058,237.868], - [0,0,1] - ], - "distCoef": [-0.312054,0.0106095,2.04654e-05,-0.000407432,0.122509], - "R": [ - [-0.663538187,0.0558857692,0.74605218], - [0.09086672278,0.9958436408,0.006219474654], - [-0.742603739,0.07191817555,-0.6658584406] - ], - "t": [ - [70.41193089], - [130.903078], - [283.3216663] - ] - }, - { - "name": "10_12", - "type": "vga", - "resolution": [640,480], - "panel": 10, - "node": 12, - "K": [ - [746.923,0,359.191], - [0,746.955,219.728], - [0,0,1] - ], - "distCoef": [-0.34193,0.180291,-0.0011698,0.000387434,-0.142263], - "R": [ - [-0.6573529902,0.02662022179,0.7531124817], - [0.0203979596,0.9996382488,-0.01752982786], - [-0.7533066902,0.003838673213,-0.6576581901] - ], - "t": [ - [61.18715226], - [173.543055], - [273.2477614] - ] - }, - { - "name": "10_13", - "type": "vga", - "resolution": [640,480], - "panel": 10, - "node": 13, - "K": [ - [747.063,0,362.554], - [0,747.091,228.588], - [0,0,1] - ], - "distCoef": [-0.334743,0.115617,-0.000133435,0.000763825,-0.0142674], - "R": [ - [-0.6314178936,0.07344004486,0.771957255], - [0.07624079511,0.9965613541,-0.03244701456], - [-0.7716856775,0.03836700932,-0.6348457984] - ], - "t": [ - [39.63694261], - [165.7689372], - [279.8275089] - ] - }, - { - "name": "10_14", - "type": "vga", - "resolution": [640,480], - "panel": 10, - "node": 14, - "K": [ - [745.722,0,380.721], - [0,745.932,237.231], - [0,0,1] - ], - "distCoef": [-0.319645,0.0532601,-0.00105825,0.00148804,0.0812854], - "R": [ - [-0.6464741699,0.0407242176,0.7618482039], - [0.05782238306,0.998317631,-0.004298792509], - [-0.7607415591,0.04127282036,-0.6477413331] - ], - "t": [ - [37.16059778], - [187.0284564], - [279.5510011] - ] - }, - { - "name": "10_15", - "type": "vga", - "resolution": [640,480], - "panel": 10, - "node": 15, - "K": [ - [745.212,0,345.945], - [0,745.407,234.052], - [0,0,1] - ], - "distCoef": [-0.345973,0.208044,0.00063894,-0.000591324,-0.26389], - "R": [ - [-0.6892736753,0.06991501806,0.7211197479], - [0.04097555303,0.9975016565,-0.0575451947], - [-0.7233414164,-0.01011610737,-0.6904164394] - ], - "t": [ - [38.38229011], - [201.7157692], - [268.6124541] - ] - }, - { - "name": "10_16", - "type": "vga", - "resolution": [640,480], - "panel": 10, - "node": 16, - "K": [ - [746.402,0,351.743], - [0,746.432,235.34], - [0,0,1] - ], - "distCoef": [-0.332074,0.123634,0.000553061,0.000200886,-0.050504], - "R": [ - [-0.6626903808,0.1069713565,0.7412142659], - [0.1159650419,0.9924654921,-0.03955194002], - [-0.7398605059,0.05974425322,-0.6701022728] - ], - "t": [ - [18.24762504], - [172.5928493], - [282.9657885] - ] - }, - { - "name": "10_17", - "type": "vga", - "resolution": [640,480], - "panel": 10, - "node": 17, - "K": [ - [745.425,0,381.954], - [0,745.576,234.397], - [0,0,1] - ], - "distCoef": [-0.316953,0.0361047,-0.000329948,0.00146685,0.0995591], - "R": [ - [-0.6439914485,0.08005681888,0.7608323863], - [0.04150323442,0.9967010496,-0.06974596286], - [-0.7639060779,-0.01333879876,-0.6451895695] - ], - "t": [ - [-14.39474973], - [198.5707312], - [268.934139] - ] - }, - { - "name": "10_18", - "type": "vga", - "resolution": [640,480], - "panel": 10, - "node": 18, - "K": [ - [742.866,0,374.357], - [0,743.163,216.484], - [0,0,1] - ], - "distCoef": [-0.313801,-0.00472223,0.00105562,-0.000883374,0.146196], - "R": [ - [-0.6735625977,0.03695414336,0.7382058102], - [0.08136680684,0.9963864104,0.02436316713], - [-0.7346379174,0.07647556771,-0.6741354596] - ], - "t": [ - [41.81793908], - [81.57199105], - [283.0241236] - ] - }, - { - "name": "10_19", - "type": "vga", - "resolution": [640,480], - "panel": 10, - "node": 19, - "K": [ - [747.195,0,374.317], - [0,747.324,252.705], - [0,0,1] - ], - "distCoef": [-0.325848,0.0754879,0.000850799,-0.000494425,0.0423325], - "R": [ - [-0.6398121174,0.03550225829,0.7677109118], - [0.06489671873,0.9978603994,0.00793971962], - [-0.7657864391,0.05490184793,-0.6407471551] - ], - "t": [ - [-18.67539454], - [143.739157], - [281.6554752] - ] - }, - { - "name": "10_20", - "type": "vga", - "resolution": [640,480], - "panel": 10, - "node": 20, - "K": [ - [744.074,0,359.595], - [0,744.232,222.54], - [0,0,1] - ], - "distCoef": [-0.312038,-0.00652471,0.000517579,-0.000473896,0.154037], - "R": [ - [-0.6341018605,0.07503908623,0.769599874], - [0.1134623387,0.9935365213,-0.003387984729], - [-0.7648798129,0.08517227417,-0.6385174669] - ], - "t": [ - [-10.64771601], - [114.6784971], - [285.5473806] - ] - }, - { - "name": "10_21", - "type": "vga", - "resolution": [640,480], - "panel": 10, - "node": 21, - "K": [ - [745.669,0,353.595], - [0,745.986,221.41], - [0,0,1] - ], - "distCoef": [-0.331248,0.0956435,-0.00124938,0.0010706,0.0394747], - "R": [ - [-0.618235149,0.02815342604,0.7854888192], - [0.09838720035,0.994269895,0.04180113162], - [-0.7798110408,0.1031249747,-0.6174625335] - ], - "t": [ - [-3.462045404], - [102.4105128], - [287.5712577] - ] - }, - { - "name": "10_22", - "type": "vga", - "resolution": [640,480], - "panel": 10, - "node": 22, - "K": [ - [745.836,0,367.536], - [0,745.883,217.602], - [0,0,1] - ], - "distCoef": [-0.306908,-0.0326669,-0.000283909,0.000278093,0.200484], - "R": [ - [-0.6189078213,0.03804187807,0.7845418563], - [0.07413417155,0.9971968305,0.01012945108], - [-0.7819573092,0.06443055706,-0.6199931209] - ], - "t": [ - [14.73270812], - [126.5060302], - [283.9045417] - ] - }, - { - "name": "10_23", - "type": "vga", - "resolution": [640,480], - "panel": 10, - "node": 23, - "K": [ - [742.749,0,379.273], - [0,742.868,231.204], - [0,0,1] - ], - "distCoef": [-0.310394,-0.00460726,-0.000822068,-0.000336616,0.147608], - "R": [ - [-0.6037549899,0.1086195044,0.7897352186], - [0.1215591915,0.9916324658,-0.04345590495], - [-0.787847241,0.0697628552,-0.6119067485] - ], - "t": [ - [19.26192194], - [145.0128457], - [284.7838402] - ] - }, - { - "name": "10_24", - "type": "vga", - "resolution": [640,480], - "panel": 10, - "node": 24, - "K": [ - [745.597,0,368.627], - [0,745.598,227.731], - [0,0,1] - ], - "distCoef": [-0.309585,-0.00749389,-0.000770097,-0.000330202,0.147896], - "R": [ - [-0.6450785239,0.075478584,0.760379301], - [0.07622559694,0.9965021766,-0.03425011393], - [-0.7603047786,0.03586635318,-0.6485755533] - ], - "t": [ - [7.856697427], - [160.1393432], - [279.1413867] - ] - }, - { - "name": "11_01", - "type": "vga", - "resolution": [640,480], - "panel": 11, - "node": 1, - "K": [ - [742.855,0,374.596], - [0,743.116,213.495], - [0,0,1] - ], - "distCoef": [-0.312561,0.00631745,-0.000399255,9.31566e-05,0.13435], - "R": [ - [-0.9229364354,0.00164792287,0.3849488544], - [0.08421827064,0.9766305816,0.1977371741], - [-0.3756269679,0.2149185694,-0.9015067329] - ], - "t": [ - [-1.777017447], - [176.3500352], - [303.9155303] - ] - }, - { - "name": "11_02", - "type": "vga", - "resolution": [640,480], - "panel": 11, - "node": 2, - "K": [ - [743.543,0,362.467], - [0,743.612,228.587], - [0,0,1] - ], - "distCoef": [-0.311508,-0.0063044,0.000209199,0.000389142,0.157517], - "R": [ - [-0.9382305089,-0.009495783218,0.3458805319], - [0.07354737957,0.9713073762,0.226169768], - [-0.338103971,0.2376379833,-0.9106118238] - ], - "t": [ - [-11.88478771], - [180.6527832], - [308.9268929] - ] - }, - { - "name": "11_03", - "type": "vga", - "resolution": [640,480], - "panel": 11, - "node": 3, - "K": [ - [749.382,0,384.698], - [0,749.44,241.756], - [0,0,1] - ], - "distCoef": [-0.334994,0.135003,0.000819921,0.00199466,-0.05032], - "R": [ - [-0.9215516186,0.03410543981,0.3867550042], - [0.1287847641,0.966589567,0.2216282778], - [-0.3662746221,0.2540500501,-0.895154441] - ], - "t": [ - [-28.84627719], - [162.2565593], - [311.7587167] - ] - }, - { - "name": "11_04", - "type": "vga", - "resolution": [640,480], - "panel": 11, - "node": 4, - "K": [ - [747.478,0,355.1], - [0,747.786,237.425], - [0,0,1] - ], - "distCoef": [-0.332665,0.125805,0.000559145,-0.000285828,-0.0488142], - "R": [ - [-0.9186497576,-0.03493542623,0.3935252708], - [0.05923251482,0.9726444983,0.2246200995], - [-0.3906073886,0.2296566914,-0.8914503195] - ], - "t": [ - [-43.73591523], - [146.455357], - [306.7233507] - ] - }, - { - "name": "11_05", - "type": "vga", - "resolution": [640,480], - "panel": 11, - "node": 5, - "K": [ - [744.546,0,358.346], - [0,744.606,240.06], - [0,0,1] - ], - "distCoef": [-0.319412,0.0357687,0.00118284,-0.000939418,0.105494], - "R": [ - [-0.9252091585,0.02778676908,0.3784387777], - [0.1130706466,0.9721977994,0.2050523536], - [-0.3622196044,0.2325066328,-0.9026281759] - ], - "t": [ - [-43.43063623], - [134.4377466], - [308.7383564] - ] - }, - { - "name": "11_06", - "type": "vga", - "resolution": [640,480], - "panel": 11, - "node": 6, - "K": [ - [744.682,0,386.644], - [0,744.47,247.576], - [0,0,1] - ], - "distCoef": [-0.310524,-0.0156223,-0.000288596,-3.26402e-05,0.156674], - "R": [ - [-0.9144551399,0.0484228537,0.4017798207], - [0.1449564791,0.9661327489,0.2134833264], - [-0.3778351707,0.2534615133,-0.8905042645] - ], - "t": [ - [-44.21957265], - [107.5274508], - [309.8949628] - ] - }, - { - "name": "11_07", - "type": "vga", - "resolution": [640,480], - "panel": 11, - "node": 7, - "K": [ - [746.436,0,349.001], - [0,746.553,211.863], - [0,0,1] - ], - "distCoef": [-0.330393,0.0902383,-0.000783974,-0.000712996,0.00481592], - "R": [ - [-0.9105637485,0.003264968682,0.4133557789], - [0.1001837456,0.9718993559,0.2130137535], - [-0.401044732,0.2353741321,-0.8853034174] - ], - "t": [ - [-36.21090107], - [102.2867759], - [306.6852556] - ] - }, - { - "name": "11_08", - "type": "vga", - "resolution": [640,480], - "panel": 11, - "node": 8, - "K": [ - [745.743,0,370.625], - [0,745.85,233.671], - [0,0,1] - ], - "distCoef": [-0.3257,0.0614375,0.00126654,-0.000627381,0.0722474], - "R": [ - [-0.8981193216,-0.01090147501,0.4396166989], - [0.09488580103,0.9713398361,0.2179348702], - [-0.4293930238,0.2374449004,-0.8713446794] - ], - "t": [ - [-42.17364239], - [80.07059019], - [305.3107943] - ] - }, - { - "name": "11_09", - "type": "vga", - "resolution": [640,480], - "panel": 11, - "node": 9, - "K": [ - [743.294,0,376.993], - [0,743.306,225.516], - [0,0,1] - ], - "distCoef": [-0.315184,-0.00458353,0.00085295,-0.000315923,0.19344], - "R": [ - [-0.9287334953,0.02657190893,0.369794576], - [0.1072763174,0.9740215576,0.1994336907], - [-0.354888555,0.2248909489,-0.9074569822] - ], - "t": [ - [4.627896612], - [76.0139061], - [305.925361] - ] - }, - { - "name": "11_10", - "type": "vga", - "resolution": [640,480], - "panel": 11, - "node": 10, - "K": [ - [746.981,0,373.015], - [0,746.916,231.087], - [0,0,1] - ], - "distCoef": [-0.31553,-0.0133214,-7.49701e-05,-0.000474937,0.183355], - "R": [ - [-0.897589008,-0.01428097087,0.4406018914], - [0.092180686,0.9712994893,0.219271574], - [-0.431087803,0.2374307391,-0.8705113154] - ], - "t": [ - [-5.834972436], - [85.69962032], - [306.7617687] - ] - }, - { - "name": "11_11", - "type": "vga", - "resolution": [640,480], - "panel": 11, - "node": 11, - "K": [ - [743.956,0,385.014], - [0,743.968,233.944], - [0,0,1] - ], - "distCoef": [-0.321873,0.0619652,-0.000204505,0.000631491,0.0680901], - "R": [ - [-0.9171447001,-0.01735780695,0.3981762243], - [0.08629809142,0.9667012777,0.2409175774], - [-0.3890992656,0.2553181275,-0.8851070078] - ], - "t": [ - [26.82061991], - [73.01187567], - [307.7528197] - ] - }, - { - "name": "11_12", - "type": "vga", - "resolution": [640,480], - "panel": 11, - "node": 12, - "K": [ - [749.192,0,349.167], - [0,749.113,221.266], - [0,0,1] - ], - "distCoef": [-0.334032,0.094759,-0.000689735,0.000727903,0.0409048], - "R": [ - [-0.937850977,-0.03419002209,0.345349949], - [0.06230645433,0.9623765935,0.2644791068], - [-0.341399254,0.2695595196,-0.9004355695] - ], - "t": [ - [57.17130279], - [82.80130245], - [306.825197] - ] - }, - { - "name": "11_13", - "type": "vga", - "resolution": [640,480], - "panel": 11, - "node": 13, - "K": [ - [744.715,0,367.122], - [0,744.786,220.538], - [0,0,1] - ], - "distCoef": [-0.315954,0.0180051,3.91318e-05,0.000697083,0.145396], - "R": [ - [-0.9312656673,-0.01667316508,0.3639591494], - [0.07039560041,0.9718946087,0.2246448954], - [-0.3574754765,0.2348252013,-0.9039183639] - ], - "t": [ - [46.96203938], - [112.2947483], - [304.8878272] - ] - }, - { - "name": "11_14", - "type": "vga", - "resolution": [640,480], - "panel": 11, - "node": 14, - "K": [ - [746.505,0,367.697], - [0,746.62,222.237], - [0,0,1] - ], - "distCoef": [-0.323622,0.0629014,0.000917096,0.00064017,0.0716359], - "R": [ - [-0.9260527677,-0.07925799212,0.3689775632], - [0.02937617957,0.9595934278,0.279852628], - [-0.3762490021,0.2699974518,-0.8863058527] - ], - "t": [ - [50.81898209], - [116.0290364], - [310.1255555] - ] - }, - { - "name": "11_15", - "type": "vga", - "resolution": [640,480], - "panel": 11, - "node": 15, - "K": [ - [746.042,0,355.995], - [0,745.821,261.077], - [0,0,1] - ], - "distCoef": [-0.321065,0.0443736,0.000927074,0.000280863,0.106789], - "R": [ - [-0.9208600933,-0.04678508348,0.387076019], - [0.03581020852,0.9784294414,0.2034538209], - [-0.3882451771,0.2012137775,-0.8993212431] - ], - "t": [ - [43.08113165], - [154.6066575], - [301.5640854] - ] - }, - { - "name": "11_16", - "type": "vga", - "resolution": [640,480], - "panel": 11, - "node": 16, - "K": [ - [741.668,0,363.735], - [0,741.796,217.06], - [0,0,1] - ], - "distCoef": [-0.309875,-0.0179015,-1.19394e-05,-0.000437783,0.188022], - "R": [ - [-0.8991061052,-0.0185684781,0.437336739], - [0.0842559957,0.9730755765,0.214534029], - [-0.4295452698,0.2297370977,-0.873333686] - ], - "t": [ - [16.70791642], - [154.14567], - [307.2679797] - ] - }, - { - "name": "11_17", - "type": "vga", - "resolution": [640,480], - "panel": 11, - "node": 17, - "K": [ - [747.822,0,361.761], - [0,747.76,222.34], - [0,0,1] - ], - "distCoef": [-0.334628,0.097635,0.00152491,-0.000486737,0.0213673], - "R": [ - [-0.9162397179,0.01033450945,0.4004971626], - [0.1187416248,0.9617552428,0.2468345183], - [-0.3826293322,0.2737152732,-0.8824254888] - ], - "t": [ - [27.8785048], - [159.3368695], - [313.9971646] - ] - }, - { - "name": "11_18", - "type": "vga", - "resolution": [640,480], - "panel": 11, - "node": 18, - "K": [ - [745.448,0,360.818], - [0,745.84,214.85], - [0,0,1] - ], - "distCoef": [-0.329534,0.0903331,0.00014069,0.000717079,0.0211508], - "R": [ - [-0.9101418911,0.04432675398,0.411918532], - [0.1391589893,0.9692024732,0.2031781034], - [-0.3902262342,0.2422430698,-0.888280238] - ], - "t": [ - [16.35209076], - [181.679224], - [308.9632727] - ] - }, - { - "name": "11_19", - "type": "vga", - "resolution": [640,480], - "panel": 11, - "node": 19, - "K": [ - [746.167,0,363.996], - [0,746.229,234.387], - [0,0,1] - ], - "distCoef": [-0.310901,-0.0147285,-0.000729007,-0.000655789,0.178193], - "R": [ - [-0.9157731435,-0.03755396433,0.3999365568], - [0.06406747528,0.9692207168,0.2377110865], - [-0.3965537899,0.2433123544,-0.8851803149] - ], - "t": [ - [-10.79527777], - [146.8696803], - [308.5271108] - ] - }, - { - "name": "11_20", - "type": "vga", - "resolution": [640,480], - "panel": 11, - "node": 20, - "K": [ - [744.588,0,384.664], - [0,744.662,240.853], - [0,0,1] - ], - "distCoef": [-0.307863,-0.0295446,-0.000517465,0.000242427,0.189333], - "R": [ - [-0.9170523574,0.0431160901,0.396429031], - [0.124694228,0.9752892469,0.1823793695], - [-0.3787694858,0.2166838427,-0.8997676305] - ], - "t": [ - [-9.200936127], - [142.5227957], - [304.9039442] - ] - }, - { - "name": "11_21", - "type": "vga", - "resolution": [640,480], - "panel": 11, - "node": 21, - "K": [ - [745.832,0,378.426], - [0,745.825,230.649], - [0,0,1] - ], - "distCoef": [-0.317765,0.041948,0.000140897,0.000331931,0.0876249], - "R": [ - [-0.903416406,0.009580467792,0.4286572198], - [0.1299134284,0.9588705554,0.2523683006], - [-0.4086089801,0.2836819921,-0.8675040223] - ], - "t": [ - [-22.38884391], - [100.2357286], - [311.942278] - ] - }, - { - "name": "11_22", - "type": "vga", - "resolution": [640,480], - "panel": 11, - "node": 22, - "K": [ - [745.759,0,381.189], - [0,746.033,229.615], - [0,0,1] - ], - "distCoef": [-0.307738,-0.0303832,0.000694314,-0.000395606,0.211723], - "R": [ - [-0.9121889441,-0.007451044875,0.4097021017], - [0.1102495844,0.9585035751,0.2628990789], - [-0.394659802,0.2849831196,-0.8735148895] - ], - "t": [ - [-0.4671669308], - [91.25062129], - [311.8622342] - ] - }, - { - "name": "11_23", - "type": "vga", - "resolution": [640,480], - "panel": 11, - "node": 23, - "K": [ - [748.678,0,358.839], - [0,748.651,239.635], - [0,0,1] - ], - "distCoef": [-0.328983,0.0919887,-1.22475e-05,-0.000911096,0.0194744], - "R": [ - [-0.9251940915,-0.06790089301,0.3733702744], - [0.01633387562,0.9758259889,0.2179377065], - [-0.3791425821,0.207733262,-0.9017193545] - ], - "t": [ - [15.23843998], - [129.776393], - [302.9631654] - ] - }, - { - "name": "11_24", - "type": "vga", - "resolution": [640,480], - "panel": 11, - "node": 24, - "K": [ - [747.741,0,374.843], - [0,747.8,238.972], - [0,0,1] - ], - "distCoef": [-0.320184,0.0453956,8.07771e-05,-0.000586724,0.0799959], - "R": [ - [-0.901120423,0.005145678853,0.4335383549], - [0.1030532182,0.9738156258,0.2026404726], - [-0.4211437016,0.2272809911,-0.8780554275] - ], - "t": [ - [6.522845915], - [142.0951003], - [306.255293] - ] - }, - { - "name": "12_01", - "type": "vga", - "resolution": [640,480], - "panel": 12, - "node": 1, - "K": [ - [745.397,0,350.188], - [0,745.422,244.528], - [0,0,1] - ], - "distCoef": [-0.318784,0.0421446,0.000567418,-0.000208,0.092208], - "R": [ - [-0.2717431751,0.1656287556,0.9480098956], - [0.4128654434,0.9098857043,-0.04062180222], - [-0.86930879,0.3803618284,-0.3156376199] - ], - "t": [ - [-13.70303847], - [97.1923903], - [326.2673629] - ] - }, - { - "name": "12_02", - "type": "vga", - "resolution": [640,480], - "panel": 12, - "node": 2, - "K": [ - [747.727,0,370.501], - [0,747.788,234.298], - [0,0,1] - ], - "distCoef": [-0.349811,0.202844,-0.00194754,-0.000389321,-0.178679], - "R": [ - [-0.3883456032,0.1438043201,0.9102241537], - [0.3131714459,0.9495549238,-0.01640403197], - [-0.8666667975,0.2786857806,-0.4137908865] - ], - "t": [ - [13.37192963], - [105.5473845], - [318.08591] - ] - }, - { - "name": "12_03", - "type": "vga", - "resolution": [640,480], - "panel": 12, - "node": 3, - "K": [ - [746.831,0,387.09], - [0,746.752,242.092], - [0,0,1] - ], - "distCoef": [-0.338844,0.109538,-0.000689346,-0.00140957,-0.0011227], - "R": [ - [-0.2489409576,0.07810816372,0.9653639285], - [0.3865744043,0.9219167609,0.0250941395], - [-0.8880251289,0.3794319447,-0.2596974581] - ], - "t": [ - [-20.03334166], - [70.50216381], - [325.3775618] - ] - }, - { - "name": "12_04", - "type": "vga", - "resolution": [640,480], - "panel": 12, - "node": 4, - "K": [ - [746.601,0,360.45], - [0,746.776,222.063], - [0,0,1] - ], - "distCoef": [-0.336822,0.124774,0.000206697,-0.000417774,-0.0398672], - "R": [ - [-0.3081671276,0.03567998316,0.9506629057], - [0.4212102042,0.9011275261,0.1027187694], - [-0.8530035084,0.4320834647,-0.2927266543] - ], - "t": [ - [4.764737811], - [63.41476985], - [331.1517594] - ] - }, - { - "name": "12_05", - "type": "vga", - "resolution": [640,480], - "panel": 12, - "node": 5, - "K": [ - [748.2,0,362.212], - [0,748.363,218.877], - [0,0,1] - ], - "distCoef": [-0.337789,0.133894,-0.000945522,-0.000498923,-0.0570031], - "R": [ - [-0.2841336654,-0.004801876737,0.9587726541], - [0.3831436474,0.9161034097,0.118133349], - [-0.8789021593,0.4009133132,-0.2584560111] - ], - "t": [ - [10.92507323], - [68.32263664], - [329.7866549] - ] - }, - { - "name": "12_06", - "type": "vga", - "resolution": [640,480], - "panel": 12, - "node": 6, - "K": [ - [747.371,0,350.388], - [0,747.497,231.124], - [0,0,1] - ], - "distCoef": [-0.351189,0.233364,-0.000450075,-0.00118874,-0.265042], - "R": [ - [-0.3878504716,-0.01635524947,0.9215771902], - [0.3346075558,0.9291346168,0.1573106717], - [-0.8588421248,0.3693797093,-0.3548927092] - ], - "t": [ - [53.76493542], - [97.09757883], - [324.1315487] - ] - }, - { - "name": "12_07", - "type": "vga", - "resolution": [640,480], - "panel": 12, - "node": 7, - "K": [ - [747.196,0,383.602], - [0,747.258,260.076], - [0,0,1] - ], - "distCoef": [-0.340453,0.149462,7.57635e-05,-0.00150211,-0.0810731], - "R": [ - [-0.3567494973,0.01375486298,0.934098817], - [0.3428523716,0.9320474424,0.1172169629], - [-0.8690121101,0.3620750873,-0.3372233439] - ], - "t": [ - [46.87962376], - [118.8343508], - [324.070693] - ] - }, - { - "name": "12_08", - "type": "vga", - "resolution": [640,480], - "panel": 12, - "node": 8, - "K": [ - [748.388,0,360.952], - [0,748.584,220.934], - [0,0,1] - ], - "distCoef": [-0.353387,0.236369,0.000317101,-0.000350889,-0.25062], - "R": [ - [-0.3882650784,-0.0538394581,0.9199736636], - [0.3529834406,0.9134681838,0.2024316376], - [-0.8512654812,0.4033326047,-0.3356633588] - ], - "t": [ - [53.63586961], - [124.5990463], - [329.2926486] - ] - }, - { - "name": "12_09", - "type": "vga", - "resolution": [640,480], - "panel": 12, - "node": 9, - "K": [ - [745.023,0,373.202], - [0,745.321,253.183], - [0,0,1] - ], - "distCoef": [-0.310235,-0.0270349,0.000213071,-0.0010354,0.204812], - "R": [ - [-0.3615436505,-0.1034754049,0.9265953968], - [0.3189620476,0.9201303682,0.2272076531], - [-0.8760989676,0.3776942494,-0.2996625652] - ], - "t": [ - [26.36947949], - [154.1173845], - [328.14772] - ] - }, - { - "name": "12_10", - "type": "vga", - "resolution": [640,480], - "panel": 12, - "node": 10, - "K": [ - [743.497,0,337.094], - [0,743.775,230.392], - [0,0,1] - ], - "distCoef": [-0.323522,0.0697077,-0.000922284,-0.00112939,0.0376595], - "R": [ - [-0.409013364,-0.03192166586,0.9119698873], - [0.3635432206,0.9109541012,0.1949331996], - [-0.8369853014,0.4112707536,-0.3609874961] - ], - "t": [ - [36.39561956], - [146.2733377], - [330.6860766] - ] - }, - { - "name": "12_11", - "type": "vga", - "resolution": [640,480], - "panel": 12, - "node": 11, - "K": [ - [744.432,0,350.161], - [0,744.664,216.764], - [0,0,1] - ], - "distCoef": [-0.3138,0.0423232,-0.000980128,0.000347352,0.0411803], - "R": [ - [-0.3625324698,0.01191238118,0.9318950067], - [0.4332658145,0.8874493782,0.157207936], - [-0.8251369234,0.4607512304,-0.3268904424] - ], - "t": [ - [30.02223667], - [146.021886], - [340.9352409] - ] - }, - { - "name": "12_12", - "type": "vga", - "resolution": [640,480], - "panel": 12, - "node": 12, - "K": [ - [745.59,0,349.499], - [0,745.978,243.824], - [0,0,1] - ], - "distCoef": [-0.328804,0.102744,-0.00034172,-0.00160085,-0.0230968], - "R": [ - [-0.3184962228,0.07265474811,0.9451356747], - [0.3862627531,0.9204738181,0.05940568743], - [-0.8656565379,0.3839911948,-0.3212312573] - ], - "t": [ - [17.04074577], - [180.9741057], - [327.7548666] - ] - }, - { - "name": "12_13", - "type": "vga", - "resolution": [640,480], - "panel": 12, - "node": 13, - "K": [ - [744.766,0,364.423], - [0,744.926,205.341], - [0,0,1] - ], - "distCoef": [-0.32165,0.0514735,-0.000885848,-0.00113933,0.0656482], - "R": [ - [-0.2748509499,0.06379038152,0.9593684081], - [0.3894986417,0.919644886,0.05043898999], - [-0.8790607279,0.3875358962,-0.2776115375] - ], - "t": [ - [-9.802475588], - [164.1613661], - [327.7325897] - ] - }, - { - "name": "12_14", - "type": "vga", - "resolution": [640,480], - "panel": 12, - "node": 14, - "K": [ - [744.556,0,345.329], - [0,744.551,253.003], - [0,0,1] - ], - "distCoef": [-0.311027,-0.00213006,0.0011289,-0.000863959,0.162024], - "R": [ - [-0.3202755169,0.1244082889,0.9391198917], - [0.4530679872,0.8907277919,0.0365157459], - [-0.831957326,0.4371802584,-0.3416437171] - ], - "t": [ - [0.5161253202], - [152.8799295], - [338.113135] - ] - }, - { - "name": "12_15", - "type": "vga", - "resolution": [640,480], - "panel": 12, - "node": 15, - "K": [ - [747.233,0,347.644], - [0,747.329,227.375], - [0,0,1] - ], - "distCoef": [-0.323105,0.049287,-0.00101918,5.08353e-05,0.100564], - "R": [ - [-0.2639942301,0.1219548974,0.9567831779], - [0.4010015368,0.9160569375,-0.006120025947], - [-0.8772142349,0.3820558732,-0.2907378472] - ], - "t": [ - [-27.43280694], - [159.7105652], - [325.8203908] - ] - }, - { - "name": "12_16", - "type": "vga", - "resolution": [640,480], - "panel": 12, - "node": 16, - "K": [ - [744.634,0,382.866], - [0,744.52,241.14], - [0,0,1] - ], - "distCoef": [-0.320913,0.0518689,0.000556907,0.000900625,0.0851061], - "R": [ - [-0.2918914105,0.1153635448,0.9494686183], - [0.4055533141,0.9139698053,0.01362734066], - [-0.8662135499,0.3890378484,-0.3135660035] - ], - "t": [ - [-22.908528], - [135.1916248], - [327.5972929] - ] - }, - { - "name": "12_17", - "type": "vga", - "resolution": [640,480], - "panel": 12, - "node": 17, - "K": [ - [745.929,0,399.922], - [0,745.76,235.115], - [0,0,1] - ], - "distCoef": [-0.324412,0.0924767,0.000808772,0.00160345,0.0125449], - "R": [ - [-0.2332319969,0.1531844985,0.9602798264], - [0.4252056559,0.9041694633,-0.04096012482], - [-0.8745301515,0.3987632018,-0.2760161646] - ], - "t": [ - [-42.90434909], - [120.9469461], - [326.5490528] - ] - }, - { - "name": "12_18", - "type": "vga", - "resolution": [640,480], - "panel": 12, - "node": 18, - "K": [ - [745.596,0,390.427], - [0,745.457,235.855], - [0,0,1] - ], - "distCoef": [-0.331545,0.0834192,0.000515021,-0.000851112,0.0388274], - "R": [ - [-0.2198853867,0.1587089693,0.9625288982], - [0.4990272732,0.8661072571,-0.02880971702], - [-0.8382256244,0.4739933356,-0.2696444333] - ], - "t": [ - [-48.83152805], - [73.52609427], - [332.6787653] - ] - }, - { - "name": "12_19", - "type": "vga", - "resolution": [640,480], - "panel": 12, - "node": 19, - "K": [ - [744.284,0,396.863], - [0,744.47,248.804], - [0,0,1] - ], - "distCoef": [-0.318049,0.0444362,0.000417829,0.000948817,0.0847095], - "R": [ - [-0.2972813843,0.0975420226,0.9497943632], - [0.4134272643,0.9098266462,0.03596346693], - [-0.8606402708,0.4033621545,-0.3108010564] - ], - "t": [ - [-6.347004052], - [101.4062297], - [328.9550302] - ] - }, - { - "name": "12_20", - "type": "vga", - "resolution": [640,480], - "panel": 12, - "node": 20, - "K": [ - [745.173,0,391.68], - [0,745.292,239.851], - [0,0,1] - ], - "distCoef": [-0.316891,0.030971,0.000827356,0.00064571,0.114679], - "R": [ - [-0.3480625566,0.05516818218,0.9358466372], - [0.3680676982,0.9261498325,0.08229615655], - [-0.8621940769,0.3730991283,-0.3426637043] - ], - "t": [ - [18.00373906], - [105.1024652], - [325.6162418] - ] - }, - { - "name": "12_21", - "type": "vga", - "resolution": [640,480], - "panel": 12, - "node": 21, - "K": [ - [744.07,0,385.155], - [0,744.184,238.534], - [0,0,1] - ], - "distCoef": [-0.325321,0.0749068,6.22505e-05,8.78769e-06,0.0274316], - "R": [ - [-0.2944173655,-0.00519814937,0.9556628036], - [0.365777539,0.9232287513,0.117709238], - [-0.882907247,0.3842156322,-0.2699132104] - ], - "t": [ - [4.17424328], - [116.8807078], - [328.2455421] - ] - }, - { - "name": "12_22", - "type": "vga", - "resolution": [640,480], - "panel": 12, - "node": 22, - "K": [ - [747.36,0,358.25], - [0,747.451,237.291], - [0,0,1] - ], - "distCoef": [-0.329867,0.116416,-0.000580151,-0.000763801,-0.0625995], - "R": [ - [-0.323867873,0.0530845029,0.9446118972], - [0.387407199,0.9183241349,0.08121850418], - [-0.8631484594,0.3922535134,-0.3179810029] - ], - "t": [ - [22.53106717], - [133.6738778], - [328.8995429] - ] - }, - { - "name": "12_23", - "type": "vga", - "resolution": [640,480], - "panel": 12, - "node": 23, - "K": [ - [748.813,0,380.156], - [0,748.859,237.356], - [0,0,1] - ], - "distCoef": [-0.333932,0.115832,0.000621747,-0.000254241,-0.0140772], - "R": [ - [-0.3097958639,0.0326105921,0.9502436908], - [0.3550951383,0.9310652686,0.08381472691], - [-0.8820056493,0.3633923705,-0.3000200319] - ], - "t": [ - [-6.485061334], - [151.418855], - [323.8858443] - ] - }, - { - "name": "12_24", - "type": "vga", - "resolution": [640,480], - "panel": 12, - "node": 24, - "K": [ - [745.33,0,360.408], - [0,745.472,237.433], - [0,0,1] - ], - "distCoef": [-0.321653,0.057929,3.69615e-05,-0.000478596,0.0560779], - "R": [ - [-0.3250711399,0.1046959739,0.9398763254], - [0.4072848242,0.9124585149,0.03922410658], - [-0.8534915501,0.395547989,-0.3392550109] - ], - "t": [ - [2.217299854], - [123.8595425], - [329.2221602] - ] - }, - { - "name": "13_01", - "type": "vga", - "resolution": [640,480], - "panel": 13, - "node": 1, - "K": [ - [747.6,0,355.92], - [0,747.783,249.853], - [0,0,1] - ], - "distCoef": [-0.333712,0.144699,-6.46303e-05,-0.0011294,-0.0924471], - "R": [ - [0.5138271048,0.01100033104,0.857823233], - [0.08358608019,0.9945184566,-0.06282043172], - [-0.8538120833,0.1039809221,0.5100910647] - ], - "t": [ - [-37.95328646], - [135.6435695], - [289.9999799] - ] - }, - { - "name": "13_02", - "type": "vga", - "resolution": [640,480], - "panel": 13, - "node": 2, - "K": [ - [743.227,0,372.15], - [0,743.265,265.407], - [0,0,1] - ], - "distCoef": [-0.306942,-0.0266079,0.000311285,0.000595534,0.199806], - "R": [ - [0.4485620057,-0.005900946102,0.8937322339], - [0.06601293956,0.9974655925,-0.02654587691], - [-0.8913105064,0.07090536373,0.4478147055] - ], - "t": [ - [-38.28645032], - [133.2984516], - [288.856211] - ] - }, - { - "name": "13_03", - "type": "vga", - "resolution": [640,480], - "panel": 13, - "node": 3, - "K": [ - [746.538,0,387.516], - [0,746.833,233.181], - [0,0,1] - ], - "distCoef": [-0.322577,0.0715483,-4.90461e-05,0.000787497,0.0326639], - "R": [ - [0.5260210271,0.02315422103,0.8501563157], - [0.07372016672,0.9946254291,-0.07270208278], - [-0.8472704504,0.1009164896,0.5214869567] - ], - "t": [ - [-53.0750023], - [105.7642054], - [287.8235486] - ] - }, - { - "name": "13_04", - "type": "vga", - "resolution": [640,480], - "panel": 13, - "node": 4, - "K": [ - [744.864,0,367.763], - [0,745.005,229.771], - [0,0,1] - ], - "distCoef": [-0.318118,0.0367901,0.000364188,-0.000713933,0.0879467], - "R": [ - [0.4575577495,0.1623260474,0.8742374736], - [-0.0244195278,0.9851184177,-0.1701334469], - [-0.8888445267,0.05649741078,0.4547124916] - ], - "t": [ - [4.756699591], - [110.8595803], - [285.3944853] - ] - }, - { - "name": "13_05", - "type": "vga", - "resolution": [640,480], - "panel": 13, - "node": 5, - "K": [ - [744.026,0,374.462], - [0,744.21,219.295], - [0,0,1] - ], - "distCoef": [-0.309274,-0.00813814,-0.000611939,0.000562163,0.16533], - "R": [ - [0.5236500196,-0.01990538858,0.8517009055], - [0.0479853053,0.9988290545,-0.006158764858], - [-0.8505810176,0.04409416531,0.5239920201] - ], - "t": [ - [-32.80347729], - [91.75629107], - [282.6719703] - ] - }, - { - "name": "13_06", - "type": "vga", - "resolution": [640,480], - "panel": 13, - "node": 6, - "K": [ - [746.172,0,347.715], - [0,746.412,223.735], - [0,0,1] - ], - "distCoef": [-0.315889,0.0243673,0.00083413,-0.000596366,0.129203], - "R": [ - [0.489601615,0.07237643337,0.8689372305], - [-0.010214584,0.9969567785,-0.07728417735], - [-0.8718864151,0.02896262571,0.488850944] - ], - "t": [ - [7.55259059], - [89.5920217], - [281.8493454] - ] - }, - { - "name": "13_07", - "type": "vga", - "resolution": [640,480], - "panel": 13, - "node": 7, - "K": [ - [745.619,0,383.372], - [0,745.683,224.508], - [0,0,1] - ], - "distCoef": [-0.315816,0.0424659,0.000456201,0.000714024,0.0879752], - "R": [ - [0.5142457137,-0.005076098829,0.8576278792], - [0.07753605572,0.9961627141,-0.04059565316], - [-0.8541308483,0.08737322366,0.5126659866] - ], - "t": [ - [9.165152848], - [86.80281732], - [287.1451009] - ] - }, - { - "name": "13_08", - "type": "vga", - "resolution": [640,480], - "panel": 13, - "node": 8, - "K": [ - [746.151,0,390.693], - [0,746.159,238.847], - [0,0,1] - ], - "distCoef": [-0.312796,0.0112848,0.00109903,0.000945928,0.138088], - "R": [ - [0.5333632905,-0.08775347438,0.841322131], - [0.13459771,0.9907366672,0.0180086874], - [-0.8351090089,0.1036348594,0.5402339855] - ], - "t": [ - [14.59630248], - [78.12680456], - [289.302137] - ] - }, - { - "name": "13_09", - "type": "vga", - "resolution": [640,480], - "panel": 13, - "node": 9, - "K": [ - [744.811,0,365.557], - [0,745.05,239.01], - [0,0,1] - ], - "distCoef": [-0.302561,-0.0588071,-0.000331846,-0.00065645,0.252299], - "R": [ - [0.515993865,0.007464548532,0.8565597538], - [0.05311793688,0.9977587535,-0.04069342277], - [-0.8549437502,0.06649624343,0.5144408941] - ], - "t": [ - [47.02842806], - [101.5821868], - [285.7219747] - ] - }, - { - "name": "13_10", - "type": "vga", - "resolution": [640,480], - "panel": 13, - "node": 10, - "K": [ - [744.185,0,393.537], - [0,744.44,231.354], - [0,0,1] - ], - "distCoef": [-0.321367,0.0639595,-3.49657e-05,0.000800078,0.0579089], - "R": [ - [0.5364096096,-0.02345912583,0.8436316733], - [0.07330244032,0.9971310212,-0.01888064639], - [-0.8407683884,0.07196802054,0.536590273] - ], - "t": [ - [31.38919798], - [122.486781], - [287.1552388] - ] - }, - { - "name": "13_11", - "type": "vga", - "resolution": [640,480], - "panel": 13, - "node": 11, - "K": [ - [745.973,0,365.594], - [0,746.037,211.677], - [0,0,1] - ], - "distCoef": [-0.32905,0.0977698,-0.000962762,0.000946642,0.0190885], - "R": [ - [0.5178117038,0.00482526951,0.8554810087], - [0.01921134431,0.9996663333,-0.01726691564], - [-0.8552788806,0.02537595122,0.5175462273] - ], - "t": [ - [57.16543019], - [149.3252564], - [279.6241941] - ] - }, - { - "name": "13_12", - "type": "vga", - "resolution": [640,480], - "panel": 13, - "node": 12, - "K": [ - [745.909,0,358.218], - [0,746.022,220.333], - [0,0,1] - ], - "distCoef": [-0.338571,0.148871,-0.00100229,-0.000678393,-0.0710162], - "R": [ - [0.5368407815,0.02503814463,0.8433119628], - [-0.01156171997,0.9996840035,-0.02232083821], - [-0.8436043516,0.002232599467,0.5369606257] - ], - "t": [ - [51.57359577], - [176.1957711], - [275.7319623] - ] - }, - { - "name": "13_13", - "type": "vga", - "resolution": [640,480], - "panel": 13, - "node": 13, - "K": [ - [743.068,0,370.139], - [0,743.357,232.303], - [0,0,1] - ], - "distCoef": [-0.302401,-0.0553181,-0.00107418,-0.000672395,0.220417], - "R": [ - [0.5299693687,-0.06080201885,0.8458342525], - [0.13849556,0.9902402801,-0.01559383094], - [-0.8366310107,0.1254085412,0.5332178257] - ], - "t": [ - [16.99243391], - [145.7883087], - [295.0494301] - ] - }, - { - "name": "13_14", - "type": "vga", - "resolution": [640,480], - "panel": 13, - "node": 14, - "K": [ - [743.724,0,347.611], - [0,743.902,235.434], - [0,0,1] - ], - "distCoef": [-0.315484,0.0296225,-0.000529931,-0.000276443,0.110913], - "R": [ - [0.5388576125,-0.001120175332,0.8423961174], - [0.06888686412,0.9967085439,-0.04273965901], - [-0.8395755317,0.08106061749,0.5371611517] - ], - "t": [ - [22.68047362], - [178.4537167], - [288.5132471] - ] - }, - { - "name": "13_15", - "type": "vga", - "resolution": [640,480], - "panel": 13, - "node": 15, - "K": [ - [748.48,0,370.578], - [0,748.498,231.761], - [0,0,1] - ], - "distCoef": [-0.333743,0.123731,0.000274987,0.00129665,-0.0264397], - "R": [ - [0.5569883215,-0.02228411773,0.8302213126], - [0.06483002391,0.9977563557,-0.01671294857], - [-0.827986158,0.06313218472,0.5571833177] - ], - "t": [ - [-8.30154925], - [184.6918205], - [284.5865319] - ] - }, - { - "name": "13_16", - "type": "vga", - "resolution": [640,480], - "panel": 13, - "node": 16, - "K": [ - [748.413,0,364.616], - [0,748.358,230.166], - [0,0,1] - ], - "distCoef": [-0.337541,0.138107,0.000557985,-0.000490808,-0.0648839], - "R": [ - [0.5035312414,0.04830043061,0.8626258501], - [0.03089895722,0.996790644,-0.07384894344], - [-0.8634243125,0.06383948941,0.5004227975] - ], - "t": [ - [5.312179267], - [173.5565462], - [284.5085099] - ] - }, - { - "name": "13_17", - "type": "vga", - "resolution": [640,480], - "panel": 13, - "node": 17, - "K": [ - [745.143,0,372.782], - [0,745.112,223.2], - [0,0,1] - ], - "distCoef": [-0.321603,0.0646008,-0.000584526,0.000805086,0.0603349], - "R": [ - [0.5471603314,0.02993221277,0.8364924593], - [0.06649342528,0.9946477166,-0.07908567611], - [-0.8343825239,0.09889379359,0.5422414789] - ], - "t": [ - [-32.63653561], - [167.4383368], - [289.2367997] - ] - }, - { - "name": "13_18", - "type": "vga", - "resolution": [640,480], - "panel": 13, - "node": 18, - "K": [ - [745.136,0,373.506], - [0,745.259,215.704], - [0,0,1] - ], - "distCoef": [-0.333755,0.12331,-0.00049301,0.00138004,-0.0323155], - "R": [ - [0.5039095131,0.07384116584,0.8605943788], - [0.02822760746,0.9943991795,-0.1018502524], - [-0.8632950856,0.07561583139,0.4990028469] - ], - "t": [ - [-29.61131213], - [166.0398843], - [286.9453226] - ] - }, - { - "name": "13_19", - "type": "vga", - "resolution": [640,480], - "panel": 13, - "node": 19, - "K": [ - [743.638,0,344.046], - [0,743.783,238.416], - [0,0,1] - ], - "distCoef": [-0.319291,0.0355055,-0.000169258,0.000161892,0.118247], - "R": [ - [0.5180347054,0.01180967192,0.8552780692], - [0.1057363227,0.9913513706,-0.07773216881], - [-0.8487990775,0.1307019191,0.512305704] - ], - "t": [ - [-19.08174331], - [122.2280138], - [293.3272927] - ] - }, - { - "name": "13_20", - "type": "vga", - "resolution": [640,480], - "panel": 13, - "node": 20, - "K": [ - [745.321,0,372.761], - [0,745.559,236.547], - [0,0,1] - ], - "distCoef": [-0.320489,0.0479206,-9.03328e-05,-0.000256288,0.0784864], - "R": [ - [0.4966252135,-0.01754426777,0.8677877598], - [0.06583916704,0.9976766247,-0.01750875645], - [-0.8654643848,0.06582971318,0.4966264667] - ], - "t": [ - [-11.61163777], - [120.2765647], - [285.1928757] - ] - }, - { - "name": "13_21", - "type": "vga", - "resolution": [640,480], - "panel": 13, - "node": 21, - "K": [ - [745.539,0,371.886], - [0,745.656,230.519], - [0,0,1] - ], - "distCoef": [-0.326644,0.0839413,-0.000557984,0.000204085,0.0126328], - "R": [ - [0.5330371562,-0.03752357961,0.8452593514], - [0.08887796824,0.9959722199,-0.01183402057], - [-0.8414107777,0.08143290645,0.5342252193] - ], - "t": [ - [-6.03247131], - [109.6165459], - [286.9430377] - ] - }, - { - "name": "13_22", - "type": "vga", - "resolution": [640,480], - "panel": 13, - "node": 22, - "K": [ - [744.018,0,396.717], - [0,744.224,249.141], - [0,0,1] - ], - "distCoef": [-0.315372,0.0205822,-0.000440151,0.000134817,0.105074], - "R": [ - [0.4984198723,-0.001673636668,0.8669341554], - [0.03130878513,0.9993805529,-0.01607079461], - [-0.8663702389,0.03515265859,0.4981635271] - ], - "t": [ - [26.09238071], - [136.8142763], - [280.4949188] - ] - }, - { - "name": "13_23", - "type": "vga", - "resolution": [640,480], - "panel": 13, - "node": 23, - "K": [ - [744.884,0,382.514], - [0,744.877,235.74], - [0,0,1] - ], - "distCoef": [-0.326378,0.0966908,-9.48994e-05,0.00105607,0.00534895], - "R": [ - [0.4908089633,-0.01723518027,0.8710967283], - [0.04978157704,0.9987257364,-0.008288432131], - [-0.8698438688,0.04743260567,0.4910415377] - ], - "t": [ - [21.95453226], - [154.6836493], - [281.6596012] - ] - }, - { - "name": "13_24", - "type": "vga", - "resolution": [640,480], - "panel": 13, - "node": 24, - "K": [ - [744.481,0,341.813], - [0,744.509,213.322], - [0,0,1] - ], - "distCoef": [-0.310201,-0.0109775,-0.00130948,-0.000370453,0.189258], - "R": [ - [0.5283332962,-0.01827851401,0.8488402818], - [0.07383881778,0.996969434,-0.02449033896], - [-0.8458201683,0.0756164244,0.5280818111] - ], - "t": [ - [-10.59416721], - [149.8670778], - [286.3856475] - ] - }, - { - "name": "14_01", - "type": "vga", - "resolution": [640,480], - "panel": 14, - "node": 1, - "K": [ - [745.639,0,394.42], - [0,745.872,232.374], - [0,0,1] - ], - "distCoef": [-0.317821,0.05701,0.000216723,0.00145431,0.0516441], - "R": [ - [0.1117244957,0.006687085701,0.9937167202], - [0.1929264895,0.9808052728,-0.02829110459], - [-0.9748317838,0.1948750877,0.1082898585] - ], - "t": [ - [-10.76838593], - [183.2092961], - [300.2249606] - ] - }, - { - "name": "14_02", - "type": "vga", - "resolution": [640,480], - "panel": 14, - "node": 2, - "K": [ - [744.265,0,384.24], - [0,744.607,234.555], - [0,0,1] - ], - "distCoef": [-0.314122,0.0172489,-0.000351192,-3.05431e-05,0.116521], - "R": [ - [0.09126102309,0.01926845044,0.9956405739], - [0.1889483007,0.9813154942,-0.03631033643], - [-0.9777371658,0.191438313,0.08591511501] - ], - "t": [ - [-20.54744948], - [195.8515337], - [299.6149103] - ] - }, - { - "name": "14_03", - "type": "vga", - "resolution": [640,480], - "panel": 14, - "node": 3, - "K": [ - [742.909,0,383.13], - [0,743.051,234.161], - [0,0,1] - ], - "distCoef": [-0.311566,0.0211516,-0.000212815,-9.64233e-05,0.110817], - "R": [ - [0.07658267666,-0.01244461629,0.9969855692], - [0.2193131093,0.9756433613,-0.004668149478], - [-0.9726442586,0.2190095044,0.07744664757] - ], - "t": [ - [-39.95619704], - [171.7405641], - [305.3439137] - ] - }, - { - "name": "14_04", - "type": "vga", - "resolution": [640,480], - "panel": 14, - "node": 4, - "K": [ - [745.057,0,349.277], - [0,745.321,214.2], - [0,0,1] - ], - "distCoef": [-0.31581,0.0237721,-0.00140945,-0.000667487,0.124292], - "R": [ - [0.09341145846,-0.02354383001,0.9953491787], - [0.2305453591,0.9730606003,0.001380415192], - [-0.9685675696,0.2293441873,0.09632293059] - ], - "t": [ - [-43.73412593], - [146.7921304], - [306.2893961] - ] - }, - { - "name": "14_05", - "type": "vga", - "resolution": [640,480], - "panel": 14, - "node": 5, - "K": [ - [744.634,0,387.597], - [0,744.752,225.246], - [0,0,1] - ], - "distCoef": [-0.315944,0.0434616,-0.000268259,0.00110436,0.0780237], - "R": [ - [0.1133728096,0.0374780752,0.9928454059], - [0.2222309073,0.973014014,-0.06210597779], - [-0.9683801061,0.2276820645,0.1019845459] - ], - "t": [ - [-53.79623552], - [137.113178], - [305.5099477] - ] - }, - { - "name": "14_06", - "type": "vga", - "resolution": [640,480], - "panel": 14, - "node": 6, - "K": [ - [744.759,0,388.645], - [0,744.666,221.73], - [0,0,1] - ], - "distCoef": [-0.306159,-0.0283273,-0.000508774,0.00094455,0.192402], - "R": [ - [0.1564984143,0.01913164242,0.9874928995], - [0.2309282446,0.9713913042,-0.05541732523], - [-0.96030224,0.2367127254,0.1476031622] - ], - "t": [ - [-66.24261018], - [112.7515407], - [303.5978047] - ] - }, - { - "name": "14_07", - "type": "vga", - "resolution": [640,480], - "panel": 14, - "node": 7, - "K": [ - [744.959,0,375.286], - [0,745.092,235.744], - [0,0,1] - ], - "distCoef": [-0.302136,-0.0624017,-0.000302824,-0.00146028,0.239945], - "R": [ - [0.0628689268,0.03077162571,0.9975472947], - [0.2444661638,0.9685997585,-0.04528578729], - [-0.967617586,0.2467136292,0.05337220603] - ], - "t": [ - [-19.11814477], - [98.74694092], - [308.9777955] - ] - }, - { - "name": "14_08", - "type": "vga", - "resolution": [640,480], - "panel": 14, - "node": 8, - "K": [ - [746.649,0,384.752], - [0,746.836,237.267], - [0,0,1] - ], - "distCoef": [-0.321628,0.0600031,0.000104796,0.000953791,0.0524376], - "R": [ - [0.1158239713,-0.07384920575,0.9905206219], - [0.2473198554,0.9679682291,0.043248082], - [-0.9619863288,0.2399662524,0.1303782992] - ], - "t": [ - [-45.76229918], - [76.40869106], - [305.3733784] - ] - }, - { - "name": "14_09", - "type": "vga", - "resolution": [640,480], - "panel": 14, - "node": 9, - "K": [ - [745.672,0,372.774], - [0,745.737,209.129], - [0,0,1] - ], - "distCoef": [-0.30917,-0.00857977,-4.68803e-05,-0.000521617,0.17194], - "R": [ - [0.1233501146,0.01050711315,0.9923075883], - [0.2153087978,0.9758411417,-0.0370970036], - [-0.9687243523,0.2182284735,0.1181078428] - ], - "t": [ - [-15.44854612], - [78.73632155], - [304.5944309] - ] - }, - { - "name": "14_10", - "type": "vga", - "resolution": [640,480], - "panel": 14, - "node": 10, - "K": [ - [744.36,0,350.493], - [0,744.605,227.167], - [0,0,1] - ], - "distCoef": [-0.324539,0.0696676,-0.000964917,-0.000688724,0.0453805], - "R": [ - [0.0653712546,0.005547467364,0.9978455916], - [0.2748842968,0.9611936881,-0.02335203178], - [-0.9592524289,0.2758186354,0.06130952564] - ], - "t": [ - [17.36142141], - [73.86484437], - [309.5485763] - ] - }, - { - "name": "14_11", - "type": "vga", - "resolution": [640,480], - "panel": 14, - "node": 11, - "K": [ - [744.072,0,352.953], - [0,744.032,218.847], - [0,0,1] - ], - "distCoef": [-0.310531,-0.00866492,-5.61729e-06,0.000627577,0.179884], - "R": [ - [0.08325845442,0.01268657881,0.9964472292], - [0.1993298125,0.97949952,-0.02912586749], - [-0.9763890903,0.2010466141,0.07902280276] - ], - "t": [ - [33.26019053], - [89.58305599], - [303.0664402] - ] - }, - { - "name": "14_12", - "type": "vga", - "resolution": [640,480], - "panel": 14, - "node": 12, - "K": [ - [743.677,0,359.077], - [0,743.623,233.815], - [0,0,1] - ], - "distCoef": [-0.305265,-0.0518121,0.000714314,0.000432839,0.265088], - "R": [ - [0.06818541392,0.004787243789,0.9976611808], - [0.2533830838,0.9671167716,-0.02195821049], - [-0.9649599796,0.2542876962,0.06473025078] - ], - "t": [ - [54.03449748], - [85.53998459], - [306.9876015] - ] - }, - { - "name": "14_13", - "type": "vga", - "resolution": [640,480], - "panel": 14, - "node": 13, - "K": [ - [742.736,0,368.122], - [0,742.832,238.615], - [0,0,1] - ], - "distCoef": [-0.303469,-0.0412536,1.82225e-05,-0.000473228,0.205739], - "R": [ - [0.1225239282,-0.0735967149,0.9897329996], - [0.2305366224,0.9720798639,0.0437447595], - [-0.9653189902,0.222809923,0.1360697815] - ], - "t": [ - [17.43625272], - [116.7070017], - [307.0317679] - ] - }, - { - "name": "14_14", - "type": "vga", - "resolution": [640,480], - "panel": 14, - "node": 14, - "K": [ - [745.328,0,371.219], - [0,745.487,209.713], - [0,0,1] - ], - "distCoef": [-0.318297,0.0286867,-0.0013247,0.000626009,0.137928], - "R": [ - [0.06972690557,-0.0276618613,0.9971825209], - [0.2175762615,0.9759712693,0.01185967683], - [-0.9735495514,0.2161363064,0.0740700209] - ], - "t": [ - [57.75964066], - [131.0709572], - [303.578107] - ] - }, - { - "name": "14_15", - "type": "vga", - "resolution": [640,480], - "panel": 14, - "node": 15, - "K": [ - [743.637,0,370.163], - [0,743.479,235.403], - [0,0,1] - ], - "distCoef": [-0.301307,-0.0600698,0.000220332,0.000264974,0.263845], - "R": [ - [0.0871387997,-0.1078492175,0.9903410402], - [0.2171380052,0.9722761796,0.08677624828], - [-0.9722437535,0.2074790999,0.1081411432] - ], - "t": [ - [27.10934266], - [155.0300785], - [303.8314173] - ] - }, - { - "name": "14_16", - "type": "vga", - "resolution": [640,480], - "panel": 14, - "node": 16, - "K": [ - [747.749,0,388.765], - [0,747.73,234.855], - [0,0,1] - ], - "distCoef": [-0.320028,0.057848,-0.00103044,0.00101463,0.0716113], - "R": [ - [0.09276252326,-0.02731891999,0.9953134134], - [0.2004837996,0.9796626634,0.008204393401], - [-0.9752955246,0.1987831547,0.09635298148] - ], - "t": [ - [25.02944215], - [165.1686099], - [301.5459594] - ] - }, - { - "name": "14_17", - "type": "vga", - "resolution": [640,480], - "panel": 14, - "node": 17, - "K": [ - [745.477,0,358.035], - [0,745.633,228.78], - [0,0,1] - ], - "distCoef": [-0.315933,0.0359808,-0.000244793,0.00106736,0.101835], - "R": [ - [0.09323456203,-0.04884472803,0.9944453273], - [0.1997864834,0.9793990461,0.02937464128], - [-0.9753936013,0.1959380031,0.1010723576] - ], - "t": [ - [12.52671676], - [185.8338565], - [300.6683817] - ] - }, - { - "name": "14_19", - "type": "vga", - "resolution": [640,480], - "panel": 14, - "node": 19, - "K": [ - [746.962,0,392.223], - [0,747.34,219.936], - [0,0,1] - ], - "distCoef": [-0.325078,0.0885503,-0.00165532,0.000580691,0.0160315], - "R": [ - [0.129696032,0.03909405168,0.990782819], - [0.1776002444,0.9821476201,-0.06200165731], - [-0.9755188837,0.1840046397,0.1204375361] - ], - "t": [ - [-4.746570817], - [166.089254], - [298.9402723] - ] - }, - { - "name": "14_20", - "type": "vga", - "resolution": [640,480], - "panel": 14, - "node": 20, - "K": [ - [744.91,0,339.915], - [0,744.956,221.133], - [0,0,1] - ], - "distCoef": [-0.306862,-0.0244375,-6.76743e-05,-0.000102471,0.205298], - "R": [ - [0.09943504227,-0.007298095184,0.9950172914], - [0.2125993636,0.9770380132,-0.01407946415], - [-0.9720669642,0.212940035,0.09870338653] - ], - "t": [ - [-22.7866272], - [143.0595857], - [303.8181509] - ] - }, - { - "name": "14_21", - "type": "vga", - "resolution": [640,480], - "panel": 14, - "node": 21, - "K": [ - [743.577,0,349.797], - [0,743.73,227.793], - [0,0,1] - ], - "distCoef": [-0.307046,-0.0206712,-0.000861395,-9.97172e-05,0.196115], - "R": [ - [0.09969364468,-0.01462231859,0.9949107322], - [0.2541863771,0.9670897407,-0.01125696175], - [-0.9620033591,0.2540150021,0.1001294952] - ], - "t": [ - [-20.43364439], - [109.4423166], - [308.9174676] - ] - }, - { - "name": "14_22", - "type": "vga", - "resolution": [640,480], - "panel": 14, - "node": 22, - "K": [ - [745.066,0,381.498], - [0,745.047,229.678], - [0,0,1] - ], - "distCoef": [-0.314894,0.0257947,-0.000483886,0.00117112,0.111876], - "R": [ - [0.08696832552,-0.05294226024,0.9948033109], - [0.2154078845,0.9759627551,0.03310806346], - [-0.9726437959,0.2114091239,0.09628202687] - ], - "t": [ - [-4.298071534], - [115.0382234], - [303.8536261] - ] - }, - { - "name": "14_23", - "type": "vga", - "resolution": [640,480], - "panel": 14, - "node": 23, - "K": [ - [746.602,0,379.206], - [0,746.635,260.689], - [0,0,1] - ], - "distCoef": [-0.319922,0.0568918,0.00103779,-0.000422086,0.0766843], - "R": [ - [0.09129519856,-0.01052008078,0.9957683037], - [0.2195471399,0.9755524467,-0.009822274065], - [-0.9713208739,0.2195148095,0.09137290798] - ], - "t": [ - [18.69590833], - [125.3942709], - [304.7857903] - ] - }, - { - "name": "14_24", - "type": "vga", - "resolution": [640,480], - "panel": 14, - "node": 24, - "K": [ - [745.388,0,382.392], - [0,745.496,224.015], - [0,0,1] - ], - "distCoef": [-0.302393,-0.0525763,-0.000559682,-6.77e-05,0.234314], - "R": [ - [0.08118536371,-0.04636746828,0.9956199047], - [0.1796446798,0.9832385033,0.03114216711], - [-0.9803758084,0.1763295309,0.0881542445] - ], - "t": [ - [8.147122648], - [159.0280693], - [298.1193244] - ] - }, - { - "name": "15_01", - "type": "vga", - "resolution": [640,480], - "panel": 15, - "node": 1, - "K": [ - [747.532,0,374.739], - [0,747.668,233.944], - [0,0,1] - ], - "distCoef": [-0.331439,0.109037,-0.000609362,0.000392501,-0.000621335], - "R": [ - [0.7848571462,0.05717032211,0.6170338843], - [0.1817012858,0.9307358272,-0.3173569956], - [-0.5924389444,0.3611957561,0.7201067442] - ], - "t": [ - [-19.59276639], - [102.5270366], - [325.6365462] - ] - }, - { - "name": "15_02", - "type": "vga", - "resolution": [640,480], - "panel": 15, - "node": 2, - "K": [ - [743.597,0,385.764], - [0,743.786,211.188], - [0,0,1] - ], - "distCoef": [-0.307778,-0.0279819,-0.000454196,0.00143268,0.205643], - "R": [ - [0.7963392439,-0.01332837804,0.6047033677], - [0.2601504211,0.910106147,-0.3225345868], - [-0.5460453892,0.4141607847,0.7282206241] - ], - "t": [ - [-38.00771612], - [61.10094736], - [329.1235579] - ] - }, - { - "name": "15_03", - "type": "vga", - "resolution": [640,480], - "panel": 15, - "node": 3, - "K": [ - [746.709,0,382.284], - [0,746.792,243.451], - [0,0,1] - ], - "distCoef": [-0.343209,0.149416,0.000603517,0.00195788,-0.0395936], - "R": [ - [0.7773715491,0.01124156294,0.6289412548], - [0.2547080739,0.908583342,-0.3310590698], - [-0.5751671686,0.4175523175,0.7034435232] - ], - "t": [ - [-3.435783379], - [55.70511308], - [330.3798829] - ] - }, - { - "name": "15_04", - "type": "vga", - "resolution": [640,480], - "panel": 15, - "node": 4, - "K": [ - [743.976,0,365.248], - [0,744.344,229.757], - [0,0,1] - ], - "distCoef": [-0.297483,-0.106842,0.000162294,-0.00147347,0.393874], - "R": [ - [0.7524447247,-0.05297584633,0.6565215122], - [0.2825071426,0.9263759092,-0.2490329079], - [-0.5949929838,0.3728555143,0.7120127209] - ], - "t": [ - [9.049706825], - [87.26745214], - [326.8342451] - ] - }, - { - "name": "15_05", - "type": "vga", - "resolution": [640,480], - "panel": 15, - "node": 5, - "K": [ - [748.766,0,349.367], - [0,748.975,233.229], - [0,0,1] - ], - "distCoef": [-0.341466,0.149186,0.00133441,-0.000377568,-0.0615035], - "R": [ - [0.7609990379,-0.1304343502,0.6355055818], - [0.3323849453,0.9196335935,-0.2092708816], - [-0.5571361704,0.3704874276,0.7431946943] - ], - "t": [ - [9.029843232], - [83.469382], - [327.9910328] - ] - }, - { - "name": "15_06", - "type": "vga", - "resolution": [640,480], - "panel": 15, - "node": 6, - "K": [ - [747.104,0,395.739], - [0,747.205,237.611], - [0,0,1] - ], - "distCoef": [-0.337038,0.14046,-0.00100634,0.00170735,-0.0468264], - "R": [ - [0.7339738121,-0.1238803965,0.6677844641], - [0.3595276943,0.9050347286,-0.227270713], - [-0.5762137452,0.4068977603,0.7088102232] - ], - "t": [ - [34.88470946], - [89.42074723], - [330.2467181] - ] - }, - { - "name": "15_07", - "type": "vga", - "resolution": [640,480], - "panel": 15, - "node": 7, - "K": [ - [743.991,0,393.18], - [0,744.112,255.459], - [0,0,1] - ], - "distCoef": [-0.325283,0.0732539,0.00077889,1.70805e-05,0.0462558], - "R": [ - [0.7496842409,-0.1571943749,0.6428557128], - [0.3434403747,0.9227495198,-0.1748771933], - [-0.5657050892,0.3518852828,0.7457576683] - ], - "t": [ - [12.35233863], - [128.2674639], - [324.6313017] - ] - }, - { - "name": "15_08", - "type": "vga", - "resolution": [640,480], - "panel": 15, - "node": 8, - "K": [ - [744.616,0,369.102], - [0,744.835,223.742], - [0,0,1] - ], - "distCoef": [-0.336732,0.141968,-0.000206183,0.000677154,-0.0657397], - "R": [ - [0.7264947252,-0.2131742795,0.6532703428], - [0.4249899792,0.8864309285,-0.1833677358], - [-0.5399897516,0.4108490422,0.7345843265] - ], - "t": [ - [15.28675757], - [126.0458703], - [333.4285141] - ] - }, - { - "name": "15_09", - "type": "vga", - "resolution": [640,480], - "panel": 15, - "node": 9, - "K": [ - [747.517,0,392.733], - [0,747.836,218.574], - [0,0,1] - ], - "distCoef": [-0.334626,0.113242,0.000443349,0.00121381,-0.00550976], - "R": [ - [0.8000319441,0.07155257429,0.5956753458], - [0.1937456116,0.9088549369,-0.3693850858], - [-0.5678129326,0.4109293525,0.7132499848] - ], - "t": [ - [-44.09712116], - [90.97242653], - [330.2186197] - ] - }, - { - "name": "15_10", - "type": "vga", - "resolution": [640,480], - "panel": 15, - "node": 10, - "K": [ - [743.904,0,354.135], - [0,744.494,220.038], - [0,0,1] - ], - "distCoef": [-0.309276,-0.0261099,-0.00127318,0.000283377,0.220693], - "R": [ - [0.7314656006,-0.1499734814,0.6651812009], - [0.3639090401,0.9108337109,-0.1948131455], - [-0.576652656,0.3845645668,0.720820233] - ], - "t": [ - [2.360923884], - [158.0207055], - [327.7017732] - ] - }, - { - "name": "15_11", - "type": "vga", - "resolution": [640,480], - "panel": 15, - "node": 11, - "K": [ - [745.441,0,366.024], - [0,745.471,238.165], - [0,0,1] - ], - "distCoef": [-0.311636,0.00305556,-0.00136926,0.00112458,0.163822], - "R": [ - [0.743215427,-0.1065195831,0.660518287], - [0.3430146167,0.9082888556,-0.2394834597], - [-0.5744317207,0.4045552288,0.7115920636] - ], - "t": [ - [3.38448511], - [170.5922255], - [331.2143489] - ] - }, - { - "name": "15_12", - "type": "vga", - "resolution": [640,480], - "panel": 15, - "node": 12, - "K": [ - [743.816,0,384.478], - [0,744.21,221.813], - [0,0,1] - ], - "distCoef": [-0.309294,-0.0116228,-0.000777235,0.00017565,0.174372], - "R": [ - [0.799529392,-0.03302696284,0.5997182431], - [0.261290645,0.91817945,-0.2977812898], - [-0.540814155,0.3947856601,0.7427410938] - ], - "t": [ - [-15.11731065], - [179.1857595], - [329.2699106] - ] - }, - { - "name": "15_13", - "type": "vga", - "resolution": [640,480], - "panel": 15, - "node": 13, - "K": [ - [744.594,0,366.809], - [0,744.805,211.378], - [0,0,1] - ], - "distCoef": [-0.313339,0.0076854,-0.000770441,0.000328229,0.137582], - "R": [ - [0.7697001229,-0.07364256128,0.6341439064], - [0.280866324,0.9310898592,-0.2327783971], - [-0.5733025631,0.3572792288,0.7373436945] - ], - "t": [ - [-27.06753178], - [173.6081799], - [322.2797536] - ] - }, - { - "name": "15_14", - "type": "vga", - "resolution": [640,480], - "panel": 15, - "node": 14, - "K": [ - [744.088,0,376.311], - [0,744.421,235.85], - [0,0,1] - ], - "distCoef": [-0.308902,-0.0157485,-0.000258056,-0.00040893,0.167363], - "R": [ - [0.8019727226,0.02030217439,0.5970155559], - [0.20788107,0.9274680659,-0.31078682], - [-0.5600225111,0.3733507848,0.7395836522] - ], - "t": [ - [-32.35663304], - [177.8511702], - [324.3990212] - ] - }, - { - "name": "15_15", - "type": "vga", - "resolution": [640,480], - "panel": 15, - "node": 15, - "K": [ - [745.471,0,391.786], - [0,745.597,244.782], - [0,0,1] - ], - "distCoef": [-0.319471,0.0520955,-9.03549e-05,0.00103599,0.0679082], - "R": [ - [0.7993824794,0.07801580494,0.5957358356], - [0.170767806,0.9211391478,-0.3497728217], - [-0.5760434082,0.3813347671,0.723019908] - ], - "t": [ - [-27.66881494], - [158.8808021], - [326.8395357] - ] - }, - { - "name": "15_16", - "type": "vga", - "resolution": [640,480], - "panel": 15, - "node": 16, - "K": [ - [744.688,0,372.572], - [0,744.687,232.622], - [0,0,1] - ], - "distCoef": [-0.313079,0.00611683,0.000601543,0.00134427,0.153664], - "R": [ - [0.8032635264,0.07397377164,0.5910123419], - [0.1542914416,0.9325457224,-0.3264239985], - [-0.5752928456,0.3533926383,0.7376664456] - ], - "t": [ - [-29.95169554], - [148.2901373], - [322.192073] - ] - }, - { - "name": "15_17", - "type": "vga", - "resolution": [640,480], - "panel": 15, - "node": 17, - "K": [ - [746.029,0,371.631], - [0,745.957,227.751], - [0,0,1] - ], - "distCoef": [-0.328618,0.10871,0.000376647,0.00140085,-0.015131], - "R": [ - [0.7930332571,0.09578045983,0.6016014933], - [0.1573865304,0.9218193412,-0.3542295616], - [-0.5884961625,0.3755997947,0.7159588403] - ], - "t": [ - [-34.37744536], - [124.5681533], - [326.9926029] - ] - }, - { - "name": "15_18", - "type": "vga", - "resolution": [640,480], - "panel": 15, - "node": 18, - "K": [ - [745.728,0,355.008], - [0,745.836,235.366], - [0,0,1] - ], - "distCoef": [-0.326785,0.0753795,-0.00141997,0.000421746,0.0593081], - "R": [ - [0.7423074724,-0.1183757606,0.6595201254], - [0.3246236378,0.9245812728,-0.1994215728], - [-0.5861732766,0.362127946,0.7247511576] - ], - "t": [ - [30.16113415], - [163.1800117], - [323.8887405] - ] - }, - { - "name": "15_19", - "type": "vga", - "resolution": [640,480], - "panel": 15, - "node": 19, - "K": [ - [745.415,0,362.511], - [0,745.431,246.567], - [0,0,1] - ], - "distCoef": [-0.31824,0.0392935,0.000511921,2.0382e-05,0.0980721], - "R": [ - [0.7792023734,-0.03485918818,0.6258022837], - [0.250771695,0.9323920084,-0.2603050127], - [-0.5744190268,0.3597637832,0.7352637636] - ], - "t": [ - [-23.21577405], - [116.3982595], - [324.3931588] - ] - }, - { - "name": "15_20", - "type": "vga", - "resolution": [640,480], - "panel": 15, - "node": 20, - "K": [ - [745.757,0,370.457], - [0,745.798,252.296], - [0,0,1] - ], - "distCoef": [-0.322058,0.058259,0.000816175,0.000770211,0.0698692], - "R": [ - [0.7754488131,-0.03297117701,0.6305489986], - [0.2704225106,0.9197540051,-0.2844718542], - [-0.5705705951,0.391108005,0.7221383001] - ], - "t": [ - [-0.5150360293], - [101.3336776], - [328.6175717] - ] - }, - { - "name": "15_21", - "type": "vga", - "resolution": [640,480], - "panel": 15, - "node": 21, - "K": [ - [746.009,0,385.23], - [0,746.113,244.377], - [0,0,1] - ], - "distCoef": [-0.328614,0.0717398,0.00119782,0.000153035,0.0631847], - "R": [ - [0.7150247804,-0.1629175474,0.6798510396], - [0.3900461789,0.9000077369,-0.194550898], - [-0.5801754405,0.4042820134,0.7070732013] - ], - "t": [ - [2.095653738], - [113.9962742], - [330.0144097] - ] - }, - { - "name": "15_22", - "type": "vga", - "resolution": [640,480], - "panel": 15, - "node": 22, - "K": [ - [747.044,0,384.928], - [0,747.43,218.136], - [0,0,1] - ], - "distCoef": [-0.332061,0.0970763,-0.00131827,0.000796644,0.024739], - "R": [ - [0.7476996574,-0.1120966581,0.6545071135], - [0.3349363173,0.9147459603,-0.2259590484], - [-0.5733784838,0.3881677053,0.7215004829] - ], - "t": [ - [-3.202807266], - [138.4357179], - [328.3283502] - ] - }, - { - "name": "15_23", - "type": "vga", - "resolution": [640,480], - "panel": 15, - "node": 23, - "K": [ - [746.525,0,381.586], - [0,746.566,231.744], - [0,0,1] - ], - "distCoef": [-0.323751,0.0809499,0.00143311,0.000786746,0.0334271], - "R": [ - [0.7874675535,-0.04961201835,0.6143561669], - [0.2785108695,0.9178324582,-0.2828697124], - [-0.5498422936,0.3938555906,0.7365807667] - ], - "t": [ - [-21.67007007], - [141.1281207], - [328.549187] - ] - }, - { - "name": "15_24", - "type": "vga", - "resolution": [640,480], - "panel": 15, - "node": 24, - "K": [ - [744.493,0,392.291], - [0,744.573,223.193], - [0,0,1] - ], - "distCoef": [-0.308278,-0.0176562,-0.000671893,0.00116828,0.17277], - "R": [ - [0.7758686755,-0.01407586642,0.6307374005], - [0.2927445364,0.8936390769,-0.3401614861], - [-0.5588635207,0.4485655695,0.6974672] - ], - "t": [ - [-20.05926183], - [105.1778582], - [335.8474538] - ] - }, - { - "name": "16_01", - "type": "vga", - "resolution": [640,480], - "panel": 16, - "node": 1, - "K": [ - [745.918,0,380.409], - [0,745.86,226.454], - [0,0,1] - ], - "distCoef": [-0.329171,0.0901569,-0.000500393,-0.000311386,0.0200307], - "R": [ - [0.8121486446,0.04341076946,0.5818333819], - [-0.0759194996,0.9966126489,0.03161419974], - [-0.5784901112,-0.06984792866,0.8126933358] - ], - "t": [ - [55.6088262], - [125.3657692], - [265.9940479] - ] - }, - { - "name": "16_02", - "type": "vga", - "resolution": [640,480], - "panel": 16, - "node": 2, - "K": [ - [747.364,0,392.411], - [0,747.161,225.523], - [0,0,1] - ], - "distCoef": [-0.325367,0.0819479,0.000479765,0.00158774,0.0247525], - "R": [ - [0.8168932447,0.07701494166,0.5716241121], - [-0.08391193553,0.9963702084,-0.01432462351], - [-0.5706524458,-0.03626439747,0.8203905653] - ], - "t": [ - [75.42528996], - [124.1426197], - [270.1790967] - ] - }, - { - "name": "16_03", - "type": "vga", - "resolution": [640,480], - "panel": 16, - "node": 3, - "K": [ - [744.743,0,378.771], - [0,744.551,249.858], - [0,0,1] - ], - "distCoef": [-0.319546,0.0369202,-5.08119e-05,0.00111176,0.115068], - "R": [ - [0.8437113062,0.07102371173,0.5320778742], - [-0.08587784221,0.9963005803,0.003185889303], - [-0.5298832211,-0.04838167055,0.8466894271] - ], - "t": [ - [57.15960424], - [150.0301024], - [271.4615922] - ] - }, - { - "name": "16_04", - "type": "vga", - "resolution": [640,480], - "panel": 16, - "node": 4, - "K": [ - [745.916,0,377.522], - [0,746.078,215.704], - [0,0,1] - ], - "distCoef": [-0.32195,0.0590592,-0.000295617,0.000900619,0.0691531], - "R": [ - [0.8298382679,0.121110683,0.5447023514], - [-0.1306769278,0.9911961099,-0.02130286834], - [-0.5424868568,-0.05350209448,0.8383588349] - ], - "t": [ - [50.00635036], - [157.1807453], - [269.6015294] - ] - }, - { - "name": "16_05", - "type": "vga", - "resolution": [640,480], - "panel": 16, - "node": 5, - "K": [ - [745.303,0,378.655], - [0,745.572,246.962], - [0,0,1] - ], - "distCoef": [-0.315703,0.0277156,6.06815e-05,0.000389915,0.121683], - "R": [ - [0.8187116226,0.05412921644,0.5716478872], - [-0.09011941267,0.9953220251,0.0348218015], - [-0.5670888559,-0.08002558546,0.8197598034] - ], - "t": [ - [44.81120287], - [188.347539], - [263.8787228] - ] - }, - { - "name": "16_06", - "type": "vga", - "resolution": [640,480], - "panel": 16, - "node": 6, - "K": [ - [745.606,0,364.995], - [0,745.957,239.275], - [0,0,1] - ], - "distCoef": [-0.315328,0.0257972,-0.000148911,-0.000553771,0.11289], - "R": [ - [0.8250072615,0.03741598225,0.5638821355], - [-0.06134414867,0.997839028,0.02354080738], - [-0.5617827996,-0.05401220659,0.8255196955] - ], - "t": [ - [18.96573731], - [189.9536973], - [269.3804852] - ] - }, - { - "name": "16_07", - "type": "vga", - "resolution": [640,480], - "panel": 16, - "node": 7, - "K": [ - [748.144,0,375.351], - [0,748.158,222.981], - [0,0,1] - ], - "distCoef": [-0.330846,0.0923667,0.000924419,-0.000952259,0.0155541], - "R": [ - [0.837010476,0.04764620621,0.5451085232], - [-0.06946161724,0.9973944363,0.0194787641], - [-0.542760119,-0.05416804921,0.8381391744] - ], - "t": [ - [-3.044263505], - [177.2440129], - [269.3681033] - ] - }, - { - "name": "16_08", - "type": "vga", - "resolution": [640,480], - "panel": 16, - "node": 8, - "K": [ - [744.865,0,367.243], - [0,744.958,216.687], - [0,0,1] - ], - "distCoef": [-0.318901,0.0494498,-4.02299e-05,-0.00132469,0.0675277], - "R": [ - [0.820488273,0.02086231711,0.571282555], - [-0.05401864215,0.9976917237,0.04114864192], - [-0.569105421,-0.06462188605,0.8197213134] - ], - "t": [ - [-19.55260409], - [185.7078501], - [268.0867658] - ] - }, - { - "name": "16_09", - "type": "vga", - "resolution": [640,480], - "panel": 16, - "node": 9, - "K": [ - [747.002,0,387.115], - [0,747.11,221.005], - [0,0,1] - ], - "distCoef": [-0.330535,0.106093,-0.000909516,-0.000158007,-0.000767667], - "R": [ - [0.7988895638,0.03324884852,0.6005580562], - [-0.04929092881,0.9987315997,0.01027599727], - [-0.5994546431,-0.03781145137,0.7995151187] - ], - "t": [ - [-23.46737596], - [164.4653247], - [274.3468777] - ] - }, - { - "name": "16_10", - "type": "vga", - "resolution": [640,480], - "panel": 16, - "node": 10, - "K": [ - [747.13,0,370.332], - [0,747.181,215.13], - [0,0,1] - ], - "distCoef": [-0.317083,0.0321021,0.000973109,0.00011315,0.117938], - "R": [ - [0.8533830718,-0.04475694932,0.5193593633], - [-0.01101437775,0.9945367161,0.1038046423], - [-0.5211679348,-0.09430554471,0.8482278279] - ], - "t": [ - [-57.15311463], - [154.6074069], - [261.7210039] - ] - }, - { - "name": "16_11", - "type": "vga", - "resolution": [640,480], - "panel": 16, - "node": 11, - "K": [ - [743.847,0,352.444], - [0,743.813,257.427], - [0,0,1] - ], - "distCoef": [-0.317406,0.0378558,0.000559662,0.00156409,0.0978841], - "R": [ - [0.8306368039,-0.006305585156,0.5567788965], - [-0.01286906876,0.999451376,0.03051776569], - [-0.5566658666,-0.03251440526,0.8300999496] - ], - "t": [ - [-55.68789985], - [125.5954887], - [272.609285] - ] - }, - { - "name": "16_12", - "type": "vga", - "resolution": [640,480], - "panel": 16, - "node": 12, - "K": [ - [744.746,0,358.295], - [0,744.902,240.075], - [0,0,1] - ], - "distCoef": [-0.311924,0.00313238,0.000282789,0.000109914,0.161883], - "R": [ - [0.8248636519,0.04296544146,0.5636966618], - [-0.06337887364,0.9978500361,0.01668603434], - [-0.5617678116,-0.04949016272,0.8258133262] - ], - "t": [ - [-45.5470475], - [111.3455785], - [270.6081331] - ] - }, - { - "name": "16_13", - "type": "vga", - "resolution": [640,480], - "panel": 16, - "node": 13, - "K": [ - [742.599,0,373.118], - [0,742.696,232.489], - [0,0,1] - ], - "distCoef": [-0.30659,-0.0244311,-0.000674534,-0.000450328,0.198624], - "R": [ - [0.8431633834,0.1596479738,0.5134082522], - [-0.1755645793,0.9843078819,-0.01775026834], - [-0.5081855837,-0.07516992751,0.8579608934] - ], - "t": [ - [-27.27822308], - [119.4613899], - [265.3318331] - ] - }, - { - "name": "16_14", - "type": "vga", - "resolution": [640,480], - "panel": 16, - "node": 14, - "K": [ - [745.804,0,370.921], - [0,745.998,236.13], - [0,0,1] - ], - "distCoef": [-0.32821,0.0986121,-0.000141995,-6.949e-05,-0.000912797], - "R": [ - [0.8387309717,0.02755081107,0.5438486094], - [-0.05712815546,0.9976599438,0.03756341813], - [-0.5415410705,-0.06257467009,0.8383422211] - ], - "t": [ - [-30.56519475], - [90.10611059], - [268.3571691] - ] - }, - { - "name": "16_15", - "type": "vga", - "resolution": [640,480], - "panel": 16, - "node": 15, - "K": [ - [746.816,0,365.456], - [0,746.849,225.794], - [0,0,1] - ], - "distCoef": [-0.313831,-0.00769663,-0.000408313,0.00132145,0.204366], - "R": [ - [0.832563643,0.03033638007,0.5530980784], - [-0.06055031945,0.9974999941,0.03643378343], - [-0.5506100609,-0.06382370879,0.8323191065] - ], - "t": [ - [-6.42740827], - [88.69840867], - [268.7038743] - ] - }, - { - "name": "16_16", - "type": "vga", - "resolution": [640,480], - "panel": 16, - "node": 16, - "K": [ - [745.958,0,362.302], - [0,745.997,246.977], - [0,0,1] - ], - "distCoef": [-0.334292,0.102923,-0.000499879,-0.000549652,0.00793805], - "R": [ - [0.8469636173,0.04048111503,0.5301074517], - [-0.08872767491,0.9938758,0.0658657255], - [-0.5241946497,-0.1028210748,0.8453684379] - ], - "t": [ - [4.584618298], - [109.8657875], - [264.6056558] - ] - }, - { - "name": "16_17", - "type": "vga", - "resolution": [640,480], - "panel": 16, - "node": 17, - "K": [ - [743.409,0,347.233], - [0,743.501,244.449], - [0,0,1] - ], - "distCoef": [-0.321337,0.060438,0.000289347,-0.000274585,0.0540146], - "R": [ - [0.8338949711,0.06176137043,0.5484566622], - [-0.07967791451,0.9967809419,0.008898524832], - [-0.5461415633,-0.05112031815,0.8361316319] - ], - "t": [ - [32.73506114], - [91.25662398], - [270.2531272] - ] - }, - { - "name": "16_18", - "type": "vga", - "resolution": [640,480], - "panel": 16, - "node": 18, - "K": [ - [745.291,0,372.769], - [0,745.233,242.994], - [0,0,1] - ], - "distCoef": [-0.333422,0.127228,0.000470045,-0.000171948,-0.0533425], - "R": [ - [0.83476387,0.01583088955,0.5503804723], - [-0.006383142992,0.9997976531,-0.01907638369], - [-0.5505711006,0.01241111862,0.8346960089] - ], - "t": [ - [48.20146308], - [84.31846371], - [276.1979749] - ] - }, - { - "name": "16_19", - "type": "vga", - "resolution": [640,480], - "panel": 16, - "node": 19, - "K": [ - [746.318,0,365.802], - [0,746.439,228.058], - [0,0,1] - ], - "distCoef": [-0.329752,0.106043,0.000413141,0.00102356,-0.00232913], - "R": [ - [0.812564017,0.08482803737,0.576666214], - [-0.09768913876,0.9951785947,-0.008740529432], - [-0.5746273144,-0.04923178609,0.8169330944] - ], - "t": [ - [39.50134988], - [124.7306793], - [269.4016435] - ] - }, - { - "name": "16_20", - "type": "vga", - "resolution": [640,480], - "panel": 16, - "node": 20, - "K": [ - [745.104,0,371.377], - [0,745.158,252.192], - [0,0,1] - ], - "distCoef": [-0.317414,0.0233642,0.000269725,0.000539732,0.145301], - "R": [ - [0.8445515108,0.05428741136,0.5327153297], - [-0.06949119822,0.9975462456,0.00851241329], - [-0.5309460603,-0.04420819807,0.8462516862] - ], - "t": [ - [17.33430135], - [146.0606392], - [271.3134014] - ] - }, - { - "name": "16_21", - "type": "vga", - "resolution": [640,480], - "panel": 16, - "node": 21, - "K": [ - [744.321,0,365.126], - [0,744.44,221.253], - [0,0,1] - ], - "distCoef": [-0.310945,0.00293318,4.64093e-05,-0.000454281,0.146346], - "R": [ - [0.8382052649,0.09941648006,0.5362166515], - [-0.1229674254,0.9923765769,0.008230548616], - [-0.531310593,-0.07283607028,0.8440402601] - ], - "t": [ - [5.636303812], - [160.8368098], - [266.310691] - ] - }, - { - "name": "16_22", - "type": "vga", - "resolution": [640,480], - "panel": 16, - "node": 22, - "K": [ - [745.695,0,387.973], - [0,745.975,222.039], - [0,0,1] - ], - "distCoef": [-0.325844,0.0780224,-0.000861123,0.000487347,0.0459906], - "R": [ - [0.8503320636,-0.003175777979,0.52623692], - [-0.02504000004,0.9986049625,0.04648792516], - [-0.5256504352,-0.05270714583,0.8490662971] - ], - "t": [ - [-29.03965018], - [141.2975723], - [268.9897195] - ] - }, - { - "name": "16_23", - "type": "vga", - "resolution": [640,480], - "panel": 16, - "node": 23, - "K": [ - [746.757,0,385.384], - [0,746.697,250.739], - [0,0,1] - ], - "distCoef": [-0.330103,0.0993513,0.000581277,0.0005991,0.0043047], - "R": [ - [0.8172674448,0.1129970073,0.565071323], - [-0.1204798393,0.992420693,-0.02420281713], - [-0.5635233199,-0.0482995277,0.8246869852] - ], - "t": [ - [1.484048414], - [120.2737991], - [270.3939501] - ] - }, - { - "name": "16_24", - "type": "vga", - "resolution": [640,480], - "panel": 16, - "node": 24, - "K": [ - [743.909,0,365.262], - [0,744.1,225.983], - [0,0,1] - ], - "distCoef": [-0.309366,-0.0151251,-0.000569796,0.000128233,0.192772], - "R": [ - [0.8488529257,0.0258708029,0.5279956553], - [-0.02681353424,0.9996232069,-0.005871843729], - [-0.5279486195,-0.009173097852,0.8492267715] - ], - "t": [ - [-1.170097817], - [104.9858918], - [274.723166] - ] - }, - { - "name": "17_01", - "type": "vga", - "resolution": [640,480], - "panel": 17, - "node": 1, - "K": [ - [743.511,0,382.741], - [0,744.07,233.668], - [0,0,1] - ], - "distCoef": [-0.303608,-0.0460126,4.19904e-05,0.000729649,0.232264], - "R": [ - [0.7426987355,0.03664601822,-0.6686222084], - [-0.01756201576,0.9992239229,0.035258014], - [0.6693953719,-0.01444372865,0.742765922] - ], - "t": [ - [27.30884403], - [110.2809812], - [269.7471778] - ] - }, - { - "name": "17_02", - "type": "vga", - "resolution": [640,480], - "panel": 17, - "node": 2, - "K": [ - [744.491,0,371.868], - [0,744.58,223.545], - [0,0,1] - ], - "distCoef": [-0.320104,0.0388113,-0.000303412,-0.00118762,0.0743207], - "R": [ - [0.773334615,0.1038173874,-0.6254402635], - [-0.04654036662,0.9931361468,0.107306049], - [0.6322875671,-0.05387526291,0.7728582591] - ], - "t": [ - [68.17402308], - [125.7906344], - [263.8293382] - ] - }, - { - "name": "17_03", - "type": "vga", - "resolution": [640,480], - "panel": 17, - "node": 3, - "K": [ - [744.096,0,373.775], - [0,744.072,232.317], - [0,0,1] - ], - "distCoef": [-0.314223,0.0332024,-0.000194112,2.11963e-05,0.079313], - "R": [ - [0.7946878724,-0.02084896757,-0.6066601239], - [0.03470365887,0.999335828,0.01111570764], - [0.6060254462,-0.02988684405,0.7948835985] - ], - "t": [ - [55.17367606], - [148.0232969], - [266.1261169] - ] - }, - { - "name": "17_04", - "type": "vga", - "resolution": [640,480], - "panel": 17, - "node": 4, - "K": [ - [748.225,0,373.118], - [0,748.618,236.287], - [0,0,1] - ], - "distCoef": [-0.325852,0.0883394,-0.000431944,-0.00077703,0.0075009], - "R": [ - [0.7874797118,0.07165214706,-0.6121614766], - [-0.03177741847,0.9966185482,0.07577377574], - [0.6155208357,-0.04021739967,0.7870938073] - ], - "t": [ - [46.04066644], - [153.679907], - [265.8341529] - ] - }, - { - "name": "17_05", - "type": "vga", - "resolution": [640,480], - "panel": 17, - "node": 5, - "K": [ - [745.23,0,378.585], - [0,745.614,229.474], - [0,0,1] - ], - "distCoef": [-0.323397,0.071697,-0.000659822,0.000678056,0.0530686], - "R": [ - [0.7680042357,0.04160049173,-0.6390922414], - [0.01355248597,0.9966090615,0.08115854064], - [0.6403013541,-0.07099139161,0.7648361904] - ], - "t": [ - [29.31016003], - [185.453895], - [261.9380867] - ] - }, - { - "name": "17_06", - "type": "vga", - "resolution": [640,480], - "panel": 17, - "node": 6, - "K": [ - [742.876,0,352.101], - [0,743.303,231.794], - [0,0,1] - ], - "distCoef": [-0.319343,0.0421325,-0.000546468,-1.33187e-05,0.10149], - "R": [ - [0.8064347587,0.08751734637,-0.584810819], - [-0.03388642915,0.9942014648,0.1020546777], - [0.5903513275,-0.062483289,0.8047242688] - ], - "t": [ - [35.39857301], - [188.6248332], - [262.8234665] - ] - }, - { - "name": "17_07", - "type": "vga", - "resolution": [640,480], - "panel": 17, - "node": 7, - "K": [ - [745.054,0,358.779], - [0,745.36,231.687], - [0,0,1] - ], - "distCoef": [-0.309912,-0.00132311,-0.00013553,-0.000280643,0.151777], - "R": [ - [0.7882500993,-0.004275732235,-0.615340149], - [0.05540043824,0.996408109,0.06404429605], - [0.612856078,-0.08457303664,0.7856556683] - ], - "t": [ - [-7.246792888], - [183.4614511], - [259.402568] - ] - }, - { - "name": "17_08", - "type": "vga", - "resolution": [640,480], - "panel": 17, - "node": 8, - "K": [ - [745.254,0,343.02], - [0,745.689,227.622], - [0,0,1] - ], - "distCoef": [-0.309897,-0.0109758,-0.00111103,0.000256129,0.180098], - "R": [ - [0.7946287881,0.03514926038,-0.6060772382], - [0.01090423253,0.9973351466,0.07213669658], - [0.6069976827,-0.06393070292,0.7921279432] - ], - "t": [ - [-18.41109561], - [184.5517176], - [263.9542066] - ] - }, - { - "name": "17_09", - "type": "vga", - "resolution": [640,480], - "panel": 17, - "node": 9, - "K": [ - [745.379,0,338.137], - [0,745.543,245.392], - [0,0,1] - ], - "distCoef": [-0.314138,0.0142784,0.00088856,-0.00114362,0.123117], - "R": [ - [0.7570044814,0.09852948519,-0.6459381981], - [-0.05745310106,0.9947735679,0.08440787789], - [0.6508789107,-0.02678598925,0.7587088733] - ], - "t": [ - [-40.16389387], - [164.132571], - [267.7674295] - ] - }, - { - "name": "17_10", - "type": "vga", - "resolution": [640,480], - "panel": 17, - "node": 10, - "K": [ - [743.633,0,369.381], - [0,743.739,253.863], - [0,0,1] - ], - "distCoef": [-0.313678,0.00191444,-0.000367883,0.000526793,0.16208], - "R": [ - [0.7732990879,0.03177464522,-0.6332447335], - [0.01440724919,0.9976050167,0.06765102948], - [0.6338777104,-0.06143779407,0.7709892643] - ], - "t": [ - [-41.17430449], - [148.5957101], - [262.973747] - ] - }, - { - "name": "17_11", - "type": "vga", - "resolution": [640,480], - "panel": 17, - "node": 11, - "K": [ - [749.691,0,360.347], - [0,749.465,221.979], - [0,0,1] - ], - "distCoef": [-0.36212,0.288042,0.00167589,0.000680745,-0.303613], - "R": [ - [0.7747984815,0.06051645956,-0.629305229], - [-0.01350572868,0.9967652932,0.07922465313], - [0.6320640066,-0.05288391526,0.7731095544] - ], - "t": [ - [-52.93053536], - [133.9502209], - [264.0833713] - ] - }, - { - "name": "17_12", - "type": "vga", - "resolution": [640,480], - "panel": 17, - "node": 12, - "K": [ - [746.505,0,357.704], - [0,746.569,217.534], - [0,0,1] - ], - "distCoef": [-0.312272,-0.0352904,0.000404412,-0.00107082,0.237629], - "R": [ - [0.7725304823,-0.04233401582,-0.633564902], - [0.05994143841,0.9981814314,0.006391704783], - [0.6321421342,-0.04291457833,0.7736631445] - ], - "t": [ - [-62.64410987], - [104.0188122], - [265.010728] - ] - }, - { - "name": "17_13", - "type": "vga", - "resolution": [640,480], - "panel": 17, - "node": 13, - "K": [ - [745.264,0,354.32], - [0,745.302,226.261], - [0,0,1] - ], - "distCoef": [-0.318398,0.0346929,0.000845692,0.000532231,0.122684], - "R": [ - [0.7851484689,0.03204817868,-0.6184778056], - [-0.002225165301,0.9987996914,0.04893081946], - [0.619303585,-0.03704174263,0.784277361] - ], - "t": [ - [-29.19489341], - [103.2650402], - [265.9795804] - ] - }, - { - "name": "17_14", - "type": "vga", - "resolution": [640,480], - "panel": 17, - "node": 14, - "K": [ - [744.589,0,353.058], - [0,744.664,227.639], - [0,0,1] - ], - "distCoef": [-0.324606,0.0822873,0.00100728,-0.000415736,0.0203245], - "R": [ - [0.7765409088,-0.02900211747,-0.6293989944], - [0.06862390156,0.9968904955,0.03873112579], - [0.6263185908,-0.07326811825,0.7761164898] - ], - "t": [ - [-35.65491372], - [89.93385082], - [261.6973052] - ] - }, - { - "name": "17_15", - "type": "vga", - "resolution": [640,480], - "panel": 17, - "node": 15, - "K": [ - [744.009,0,351.118], - [0,743.982,227.187], - [0,0,1] - ], - "distCoef": [-0.31768,0.0289626,0.000394183,-0.00106594,0.077624], - "R": [ - [0.7703409519,0.009578036972,-0.6375602553], - [0.03762675731,0.9974619202,0.06044786963], - [0.6365210484,-0.07055479443,0.7680253746] - ], - "t": [ - [-14.94306331], - [88.85755459], - [261.4804843] - ] - }, - { - "name": "17_16", - "type": "vga", - "resolution": [640,480], - "panel": 17, - "node": 16, - "K": [ - [745.298,0,365.044], - [0,745.641,201.543], - [0,0,1] - ], - "distCoef": [-0.315769,0.0139989,-0.000983596,0.000497246,0.155532], - "R": [ - [0.7668905855,0.04755147693,-0.6400138177], - [0.009922268647,0.9962536216,0.0859084976], - [0.6417011597,-0.07223280706,0.7635457047] - ], - "t": [ - [4.594602528], - [99.8882812], - [261.439958] - ] - }, - { - "name": "17_17", - "type": "vga", - "resolution": [640,480], - "panel": 17, - "node": 17, - "K": [ - [744.772,0,356.238], - [0,744.946,209.811], - [0,0,1] - ], - "distCoef": [-0.307562,-0.0273551,-0.000331097,0.000403566,0.231396], - "R": [ - [0.7386328767,0.1026186384,-0.6662513704], - [-0.03586762178,0.992927984,0.1131703685], - [0.6731530192,-0.05969450264,0.7370899397] - ], - "t": [ - [18.92063539], - [92.1220326], - [263.1909682] - ] - }, - { - "name": "17_18", - "type": "vga", - "resolution": [640,480], - "panel": 17, - "node": 18, - "K": [ - [746.696,0,345.664], - [0,746.883,230.9], - [0,0,1] - ], - "distCoef": [-0.332087,0.135716,-0.000396371,4.15402e-05,-0.0769473], - "R": [ - [0.7676740293,0.0869303765,-0.6349170767], - [-0.05592901251,0.9960646798,0.06875390322], - [0.6383952774,-0.01727030079,0.7695149163] - ], - "t": [ - [48.13164066], - [87.731429], - [267.0873794] - ] - }, - { - "name": "17_19", - "type": "vga", - "resolution": [640,480], - "panel": 17, - "node": 19, - "K": [ - [743.785,0,363.137], - [0,743.962,239.724], - [0,0,1] - ], - "distCoef": [-0.322076,0.0699752,0.00130957,8.28091e-06,0.0447641], - "R": [ - [0.7666015958,0.09362030423,-0.6352615462], - [-0.01827880108,0.9920950944,0.1241499457], - [0.6418628193,-0.08356172708,0.7622529495] - ], - "t": [ - [25.25313987], - [133.2656265], - [259.9680703] - ] - }, - { - "name": "17_20", - "type": "vga", - "resolution": [640,480], - "panel": 17, - "node": 20, - "K": [ - [747.071,0,344.427], - [0,747.404,242.981], - [0,0,1] - ], - "distCoef": [-0.349964,0.20917,0.0008789,-0.000586258,-0.211765], - "R": [ - [0.7775513873,0.03007697302,-0.6280996862], - [-0.01270805589,0.999403059,0.03212523871], - [0.6286909777,-0.01699709801,0.7774694548] - ], - "t": [ - [17.35278566], - [137.2956705], - [269.3773006] - ] - }, - { - "name": "17_21", - "type": "vga", - "resolution": [640,480], - "panel": 17, - "node": 21, - "K": [ - [744.669,0,371.314], - [0,744.881,251.475], - [0,0,1] - ], - "distCoef": [-0.32107,0.0528121,0.000172414,0.000961494,0.0921892], - "R": [ - [0.7854342878,0.01663631847,-0.6187214337], - [0.02446292583,0.9980232337,0.05788946549], - [0.6184614336,-0.06060410764,0.7834746947] - ], - "t": [ - [-1.039205356], - [155.8049723], - [263.425936] - ] - }, - { - "name": "17_22", - "type": "vga", - "resolution": [640,480], - "panel": 17, - "node": 22, - "K": [ - [744.126,0,368.359], - [0,744.205,218.365], - [0,0,1] - ], - "distCoef": [-0.306681,-0.0309893,-0.000506643,-0.000551257,0.209183], - "R": [ - [0.7742934088,0.08491898973,-0.6271032469], - [-0.02171436959,0.9939373135,0.1077826651], - [0.6324541115,-0.06983825553,0.771443073] - ], - "t": [ - [-12.48615074], - [146.2169272], - [261.8070617] - ] - }, - { - "name": "17_23", - "type": "vga", - "resolution": [640,480], - "panel": 17, - "node": 23, - "K": [ - [746.439,0,363.854], - [0,746.575,224.032], - [0,0,1] - ], - "distCoef": [-0.333494,0.127943,0.00111227,0.000376509,-0.0438307], - "R": [ - [0.7741360077,0.05745954338,-0.6304060933], - [-0.01777243196,0.9974520988,0.06909016755], - [0.6327697704,-0.04228133707,0.7731847814] - ], - "t": [ - [-14.18178238], - [117.4047924], - [265.0998909] - ] - }, - { - "name": "17_24", - "type": "vga", - "resolution": [640,480], - "panel": 17, - "node": 24, - "K": [ - [745.824,0,346.505], - [0,746.017,224.098], - [0,0,1] - ], - "distCoef": [-0.317434,0.0247137,-0.000866957,0.000304145,0.138958], - "R": [ - [0.7656627697,0.09930116127,-0.6355311184], - [-0.04982185052,0.99419918,0.09531932471], - [0.6413098365,-0.04131912178,0.7661686654] - ], - "t": [ - [7.35512715], - [111.8344509], - [265.0127015] - ] - }, - { - "name": "18_01", - "type": "vga", - "resolution": [640,480], - "panel": 18, - "node": 1, - "K": [ - [744.96,0,372.705], - [0,744.564,226.392], - [0,0,1] - ], - "distCoef": [-0.321978,0.0724692,0.000483988,0.000458946,0.0380169], - "R": [ - [-0.3520669355,0.03279886428,-0.9353999719], - [0.04913052402,0.9986556534,0.01652505738], - [0.9346844732,-0.04013876447,-0.3532050609] - ], - "t": [ - [47.10128491], - [117.3460549], - [266.6541908] - ] - }, - { - "name": "18_02", - "type": "vga", - "resolution": [640,480], - "panel": 18, - "node": 2, - "K": [ - [748.843,0,358.358], - [0,748.813,225.018], - [0,0,1] - ], - "distCoef": [-0.335266,0.148062,0.000634215,-0.00153008,-0.105518], - "R": [ - [-0.3389880085,0.04020239671,-0.9399313259], - [0.04795713663,0.9985260662,0.02541275744], - [0.9395675831,-0.03646179499,-0.3404163544] - ], - "t": [ - [70.51461434], - [125.984952], - [266.5287049] - ] - }, - { - "name": "18_03", - "type": "vga", - "resolution": [640,480], - "panel": 18, - "node": 3, - "K": [ - [746.557,0,370.525], - [0,746.643,239.094], - [0,0,1] - ], - "distCoef": [-0.336876,0.137869,0.0006954,0.000424607,-0.0538424], - "R": [ - [-0.3751735108,0.06869685522,-0.9244055273], - [0.01802710881,0.9976021763,0.06682006625], - [0.9267792942,0.008404759824,-0.3755123165] - ], - "t": [ - [58.58769651], - [133.6261971], - [275.7276294] - ] - }, - { - "name": "18_04", - "type": "vga", - "resolution": [640,480], - "panel": 18, - "node": 4, - "K": [ - [744.71,0,356.151], - [0,744.769,223.97], - [0,0,1] - ], - "distCoef": [-0.312604,0.00791514,0.000747313,-0.000519594,0.158336], - "R": [ - [-0.3438161676,0.01243889994,-0.9389545871], - [0.0251972518,0.9996744288,0.00401683712], - [0.9386988555,-0.02227802162,-0.344017657] - ], - "t": [ - [40.26546697], - [152.0702476], - [270.0686857] - ] - }, - { - "name": "18_05", - "type": "vga", - "resolution": [640,480], - "panel": 18, - "node": 5, - "K": [ - [743.927,0,355.392], - [0,744.057,262.153], - [0,0,1] - ], - "distCoef": [-0.316206,0.0381773,0.00109867,0.000112775,0.102099], - "R": [ - [-0.3913025917,0.04706716523,-0.9190576498], - [0.07535158968,0.9969764632,0.0189755056], - [0.9171719684,-0.0618272904,-0.3936660596] - ], - "t": [ - [27.50168157], - [183.5367771], - [265.1462318] - ] - }, - { - "name": "18_06", - "type": "vga", - "resolution": [640,480], - "panel": 18, - "node": 6, - "K": [ - [744.89,0,353.646], - [0,744.816,246.705], - [0,0,1] - ], - "distCoef": [-0.311434,-0.0151537,0.000898898,0.00113623,0.19919], - "R": [ - [-0.3540366423,0.02766248657,-0.9348223589], - [0.06855079724,0.9976412764,0.003559761167], - [0.9327158432,-0.06282253209,-0.3550978532] - ], - "t": [ - [15.12228299], - [191.0759947], - [263.959739] - ] - }, - { - "name": "18_07", - "type": "vga", - "resolution": [640,480], - "panel": 18, - "node": 7, - "K": [ - [744.21,0,382.066], - [0,744.474,221.564], - [0,0,1] - ], - "distCoef": [-0.318836,0.0439442,-0.000310088,0.000693195,0.0844966], - "R": [ - [-0.3784097731,0.01208936744,-0.9255592314], - [0.03775536538,0.9992841689,-0.002383732641], - [0.9248678695,-0.03584685469,-0.3785953341] - ], - "t": [ - [-11.73143391], - [170.7040215], - [268.2801795] - ] - }, - { - "name": "18_08", - "type": "vga", - "resolution": [640,480], - "panel": 18, - "node": 8, - "K": [ - [744.996,0,378.911], - [0,745.249,217.173], - [0,0,1] - ], - "distCoef": [-0.317298,0.0439499,-0.000470842,0.000645598,0.0800391], - "R": [ - [-0.3573644405,-0.02168005213,-0.9337133564], - [0.09030348924,0.9942444419,-0.05764780686], - [0.9295891224,-0.1049188503,-0.3533498244] - ], - "t": [ - [-32.18764663], - [193.5958696], - [255.9258617] - ] - }, - { - "name": "18_09", - "type": "vga", - "resolution": [640,480], - "panel": 18, - "node": 9, - "K": [ - [745.488,0,367.703], - [0,745.136,254.274], - [0,0,1] - ], - "distCoef": [-0.333608,0.117291,0.00107107,0.000590786,-0.0167148], - "R": [ - [-0.3755971335,-0.01611847579,-0.9266428589], - [0.03486308067,0.9988953473,-0.03150636014], - [0.9261270749,-0.0441393233,-0.3746202894] - ], - "t": [ - [-52.11061688], - [162.8813669], - [265.66749] - ] - }, - { - "name": "18_10", - "type": "vga", - "resolution": [640,480], - "panel": 18, - "node": 10, - "K": [ - [746.691,0,377.016], - [0,746.35,247.895], - [0,0,1] - ], - "distCoef": [-0.324348,0.0759263,0.000632098,0.000973799,0.0365142], - "R": [ - [-0.3979832561,-0.05264507275,-0.9158809007], - [0.03842303812,0.9965195246,-0.07397639654], - [0.9165876925,-0.06463229393,-0.3945753015] - ], - "t": [ - [-58.47639535], - [144.7851801], - [261.4908418] - ] - }, - { - "name": "18_11", - "type": "vga", - "resolution": [640,480], - "panel": 18, - "node": 11, - "K": [ - [743.499,0,383.73], - [0,743.269,228.607], - [0,0,1] - ], - "distCoef": [-0.318101,0.0343673,-0.000192972,9.02677e-05,0.0940376], - "R": [ - [-0.3591156591,-0.0799459609,-0.9298626709], - [0.01693912278,0.9956019804,-0.09213990831], - [0.9331393302,-0.04883994185,-0.356182047] - ], - "t": [ - [-65.19666066], - [124.1115675], - [265.1913912] - ] - }, - { - "name": "18_12", - "type": "vga", - "resolution": [640,480], - "panel": 18, - "node": 12, - "K": [ - [744.847,0,377.843], - [0,744.539,240.133], - [0,0,1] - ], - "distCoef": [-0.322594,0.0777366,0.000608553,0.000730506,0.0395492], - "R": [ - [-0.3599917326,-0.04959232233,-0.9316364924], - [0.02914279324,0.9975011607,-0.0643593979], - [0.9325002145,-0.05031934083,-0.3576469123] - ], - "t": [ - [-57.61171896], - [105.5688064], - [264.3974594] - ] - }, - { - "name": "18_13", - "type": "vga", - "resolution": [640,480], - "panel": 18, - "node": 13, - "K": [ - [742.264,0,386.065], - [0,742.375,236.247], - [0,0,1] - ], - "distCoef": [-0.316238,0.0182785,-0.000395794,0.00144239,0.136479], - "R": [ - [-0.3232019546,0.03338047233,-0.9457411066], - [0.05161368011,0.9985119503,0.01760435083], - [0.9449214383,-0.04312341834,-0.324443903] - ], - "t": [ - [61.04698375], - [97.35388185], - [264.1973208] - ] - }, - { - "name": "18_14", - "type": "vga", - "resolution": [640,480], - "panel": 18, - "node": 14, - "K": [ - [744.531,0,362.517], - [0,744.694,222.936], - [0,0,1] - ], - "distCoef": [-0.323155,0.0551,-0.000315217,0.00114443,0.0791805], - "R": [ - [-0.3124904102,0.02154150537,-0.9496766329], - [-0.004629448499,0.999696432,0.02419942065], - [0.9499096335,0.01195856595,-0.3122958229] - ], - "t": [ - [-14.02426098], - [68.46079663], - [270.3325449] - ] - }, - { - "name": "18_15", - "type": "vga", - "resolution": [640,480], - "panel": 18, - "node": 15, - "K": [ - [747.429,0,398.562], - [0,747.425,233.615], - [0,0,1] - ], - "distCoef": [-0.333617,0.122405,0.000303778,0.00134383,-0.0202721], - "R": [ - [-0.358025731,-0.0142572014,-0.9336028643], - [0.04081564607,0.9986886699,-0.03090345813], - [0.9328191995,-0.04916983726,-0.3569743242] - ], - "t": [ - [-8.683192747], - [83.02873835], - [264.4620974] - ] - }, - { - "name": "18_16", - "type": "vga", - "resolution": [640,480], - "panel": 18, - "node": 16, - "K": [ - [742.757,0,357.304], - [0,742.66,220.331], - [0,0,1] - ], - "distCoef": [-0.305443,-0.0527047,-0.000521453,0.00022453,0.250047], - "R": [ - [-0.3364590891,0.05374146283,-0.9401633563], - [0.05791647683,0.99766121,0.03630140184], - [0.9399154021,-0.04223701264,-0.3387846981] - ], - "t": [ - [20.062846], - [91.33983095], - [265.2581766] - ] - }, - { - "name": "18_17", - "type": "vga", - "resolution": [640,480], - "panel": 18, - "node": 17, - "K": [ - [750.787,0,361.922], - [0,750.723,216.611], - [0,0,1] - ], - "distCoef": [-0.368257,0.303211,-0.00101236,-0.000679192,-0.335284], - "R": [ - [-0.3521002367,0.0154136189,-0.9358353721], - [0.04957845599,0.9987678018,-0.002203336065], - [0.9346482761,-0.04717306796,-0.3524305629] - ], - "t": [ - [32.75189895], - [90.38015946], - [265.2110414] - ] - }, - { - "name": "18_18", - "type": "vga", - "resolution": [640,480], - "panel": 18, - "node": 18, - "K": [ - [745.69,0,366.196], - [0,745.645,224.452], - [0,0,1] - ], - "distCoef": [-0.325076,0.0695314,0.000207452,8.09151e-05,0.0569118], - "R": [ - [-0.369329094,-0.008664471876,-0.929258278], - [0.06369637747,0.997368813,-0.03461534879], - [0.9271131494,-0.07197484145,-0.3678054246] - ], - "t": [ - [-35.28307581], - [111.055802], - [261.8818226] - ] - }, - { - "name": "18_19", - "type": "vga", - "resolution": [640,480], - "panel": 18, - "node": 19, - "K": [ - [745.552,0,357.301], - [0,745.545,223.113], - [0,0,1] - ], - "distCoef": [-0.320101,0.042192,0.00043748,0.000103204,0.104558], - "R": [ - [-0.3584191226,-0.04877846794,-0.9322855752], - [0.07086164718,0.9943315632,-0.07926770686], - [0.9308675306,-0.09447435344,-0.3529309238] - ], - "t": [ - [16.14340371], - [139.4376601], - [259.6452388] - ] - }, - { - "name": "18_20", - "type": "vga", - "resolution": [640,480], - "panel": 18, - "node": 20, - "K": [ - [746.078,0,363.03], - [0,746.077,221.582], - [0,0,1] - ], - "distCoef": [-0.321359,0.0569666,0.000169599,0.000938787,0.0797635], - "R": [ - [-0.3631410096,0.0448531679,-0.9306539639], - [0.06634832184,0.9975497918,0.02218813063], - [0.9293688758,-0.05368990856,-0.3652271709] - ], - "t": [ - [21.37501917], - [147.345749], - [265.5705493] - ] - }, - { - "name": "18_21", - "type": "vga", - "resolution": [640,480], - "panel": 18, - "node": 21, - "K": [ - [745.043,0,372.293], - [0,745.076,222.901], - [0,0,1] - ], - "distCoef": [-0.317484,0.0404748,0.000192535,-0.000111527,0.0957966], - "R": [ - [-0.3461967977,-0.005928135698,-0.9381431844], - [0.04577092509,0.9986824948,-0.02320122706], - [0.937044716,-0.05097187193,-0.3454693453] - ], - "t": [ - [-0.5259425122], - [153.3372726], - [265.7616305] - ] - }, - { - "name": "18_22", - "type": "vga", - "resolution": [640,480], - "panel": 18, - "node": 22, - "K": [ - [745.252,0,401.788], - [0,745.346,245.295], - [0,0,1] - ], - "distCoef": [-0.315494,0.0267895,-0.000624877,0.000210937,0.0993279], - "R": [ - [-0.3267831921,-0.004575639121,-0.9450882546], - [0.07739750703,0.9964998407,-0.03158628616], - [0.9419248225,-0.08346934224,-0.3252852558] - ], - "t": [ - [-10.3938656], - [148.3069178], - [261.1183693] - ] - }, - { - "name": "18_23", - "type": "vga", - "resolution": [640,480], - "panel": 18, - "node": 23, - "K": [ - [747.114,0,358.608], - [0,746.941,217.398], - [0,0,1] - ], - "distCoef": [-0.324507,0.0792141,-0.000227367,0.0013287,0.0357905], - "R": [ - [-0.356358404,-0.03218270054,-0.9337949248], - [0.02645826287,0.9986582749,-0.04451528213], - [0.9339746507,-0.04056998648,-0.3550287707] - ], - "t": [ - [-18.04448695], - [115.7023496], - [266.3010308] - ] - }, - { - "name": "18_24", - "type": "vga", - "resolution": [640,480], - "panel": 18, - "node": 24, - "K": [ - [747.28,0,383.407], - [0,747.414,233.333], - [0,0,1] - ], - "distCoef": [-0.321806,0.0494121,-0.000677773,0.00106862,0.0725344], - "R": [ - [-0.3696831614,0.01690678518,-0.9290040478], - [0.03916078476,0.9992295361,0.002601362608], - [0.9283322644,-0.03541884761,-0.3700604169] - ], - "t": [ - [3.487638933], - [110.8874693], - [266.9764809] - ] - }, - { - "name": "19_01", - "type": "vga", - "resolution": [640,480], - "panel": 19, - "node": 1, - "K": [ - [742.815,0,376.349], - [0,742.96,226.412], - [0,0,1] - ], - "distCoef": [-0.311242,0.000676611,0.00127048,0.000398816,0.145683], - "R": [ - [-0.9986287013,0.0334613179,0.04026235479], - [0.03051664863,0.9969627365,-0.07165218936], - [-0.04253764409,-0.07032526067,-0.99661673] - ], - "t": [ - [47.87451164], - [124.5257469], - [265.3025885] - ] - }, - { - "name": "19_02", - "type": "vga", - "resolution": [640,480], - "panel": 19, - "node": 2, - "K": [ - [746.352,0,362.211], - [0,746.799,224.495], - [0,0,1] - ], - "distCoef": [-0.33354,0.113916,-0.000650978,0.00200875,0.00369896], - "R": [ - [-0.9978769066,0.0627015602,0.01761231284], - [0.06225819076,0.9977547513,-0.02468550225], - [-0.01912058832,-0.02353658189,-0.9995401105] - ], - "t": [ - [76.18899734], - [119.4504319], - [269.470097] - ] - }, - { - "name": "19_03", - "type": "vga", - "resolution": [640,480], - "panel": 19, - "node": 3, - "K": [ - [744.923,0,335.897], - [0,744.843,232.622], - [0,0,1] - ], - "distCoef": [-0.310786,-0.00740435,0.000477261,-0.00048183,0.169837], - "R": [ - [-0.9959217828,0.05942221639,0.06788816328], - [0.05820019172,0.9981077555,-0.01984051806], - [-0.06893866983,-0.0158085,-0.9974956397] - ], - "t": [ - [57.6907282], - [139.716188], - [274.5941587] - ] - }, - { - "name": "19_04", - "type": "vga", - "resolution": [640,480], - "panel": 19, - "node": 4, - "K": [ - [745.3,0,371.455], - [0,745.339,223.979], - [0,0,1] - ], - "distCoef": [-0.316788,0.039021,-0.00160053,-0.000126119,0.09467], - "R": [ - [-0.995350133,0.07444232287,0.06112653567], - [0.06997485872,0.994930028,-0.0722340534], - [-0.06619389658,-0.06762085396,-0.9955128267] - ], - "t": [ - [42.04206067], - [161.4993909], - [266.5642499] - ] - }, - { - "name": "19_05", - "type": "vga", - "resolution": [640,480], - "panel": 19, - "node": 5, - "K": [ - [741.339,0,353.354], - [0,741.563,231.192], - [0,0,1] - ], - "distCoef": [-0.304803,-0.0634451,-0.00114618,-0.000982934,0.282182], - "R": [ - [-0.9964181101,0.07478982294,0.03946431643], - [0.07096423127,0.993341211,-0.09075966339], - [-0.04598943103,-0.08763401739,-0.9950905744] - ], - "t": [ - [45.56899486], - [188.2245222], - [262.1501617] - ] - }, - { - "name": "19_06", - "type": "vga", - "resolution": [640,480], - "panel": 19, - "node": 6, - "K": [ - [745.947,0,350.894], - [0,746.217,234.332], - [0,0,1] - ], - "distCoef": [-0.313212,0.0178381,0.000340441,0.00055626,0.126083], - "R": [ - [-0.9969018679,0.07865171151,0.0007576151751], - [0.07854654264,0.9959829876,-0.04299219736], - [-0.004135981729,-0.0427994938,-0.9990751208] - ], - "t": [ - [37.2742824], - [183.4195047], - [270.0123608] - ] - }, - { - "name": "19_07", - "type": "vga", - "resolution": [640,480], - "panel": 19, - "node": 7, - "K": [ - [748.821,0,355.822], - [0,748.684,217.17], - [0,0,1] - ], - "distCoef": [-0.342444,0.16602,-0.000477836,-0.000195363,-0.106824], - "R": [ - [-0.9928808048,-0.04900785176,0.10856306], - [-0.05236016128,0.998228751,-0.02824489671], - [-0.106986546,-0.0337281951,-0.9936882247] - ], - "t": [ - [-31.49326377], - [168.7489309], - [271.4480177] - ] - }, - { - "name": "19_08", - "type": "vga", - "resolution": [640,480], - "panel": 19, - "node": 8, - "K": [ - [747.238,0,359.034], - [0,747.474,233.038], - [0,0,1] - ], - "distCoef": [-0.313675,0.00436645,0.000419802,0.000604189,0.154068], - "R": [ - [-0.9913876468,0.02931278851,0.127637354], - [0.0192008625,0.9966303068,-0.07974558542], - [-0.1295448208,-0.07660804099,-0.9886098055] - ], - "t": [ - [-44.88902211], - [188.5485089], - [261.5304555] - ] - }, - { - "name": "19_09", - "type": "vga", - "resolution": [640,480], - "panel": 19, - "node": 9, - "K": [ - [743.415,0,332.333], - [0,743.715,235.337], - [0,0,1] - ], - "distCoef": [-0.308464,-0.0208585,-0.00102455,0.000256502,0.207947], - "R": [ - [-0.9954977047,0.04566149696,0.08306231217], - [0.04175753042,0.9979670543,-0.04814631117], - [-0.08509188364,-0.04446106523,-0.9953806232] - ], - "t": [ - [-46.35184093], - [166.6378451], - [268.6077116] - ] - }, - { - "name": "19_10", - "type": "vga", - "resolution": [640,480], - "panel": 19, - "node": 10, - "K": [ - [747.206,0,362.728], - [0,747.412,248.496], - [0,0,1] - ], - "distCoef": [-0.340118,0.138855,0.000965068,4.5306e-05,-0.0441245], - "R": [ - [-0.9935175509,0.05252798067,0.1008151146], - [0.05439486481,0.9983935823,0.01585728578], - [-0.09982021218,0.02123831626,-0.9947787991] - ], - "t": [ - [-46.95074625], - [127.5778656], - [276.6370715] - ] - }, - { - "name": "19_11", - "type": "vga", - "resolution": [640,480], - "panel": 19, - "node": 11, - "K": [ - [745.45,0,355.141], - [0,745.641,249.232], - [0,0,1] - ], - "distCoef": [-0.326245,0.10077,0.000216744,-2.37583e-05,-0.0259903], - "R": [ - [-0.9983050345,-0.001439505441,0.05818063101], - [-0.002578079686,0.9998065462,-0.01949932386], - [-0.05814130636,-0.01961626748,-0.9981156198] - ], - "t": [ - [-58.09544547], - [121.7224759], - [272.659258] - ] - }, - { - "name": "19_12", - "type": "vga", - "resolution": [640,480], - "panel": 19, - "node": 12, - "K": [ - [743.805,0,368.42], - [0,744.013,242.015], - [0,0,1] - ], - "distCoef": [-0.323306,0.0785457,-0.00106293,0.000187763,0.0236672], - "R": [ - [-0.9954771119,0.0748660766,0.05848410323], - [0.07512966129,0.9971710788,0.002318097681], - [-0.05814510944,0.006701504052,-0.9982856485] - ], - "t": [ - [-47.8147621], - [97.15541342], - [274.4212668] - ] - }, - { - "name": "19_13", - "type": "vga", - "resolution": [640,480], - "panel": 19, - "node": 13, - "K": [ - [742.693,0,353.966], - [0,742.776,227.014], - [0,0,1] - ], - "distCoef": [-0.307193,-0.0103139,0.000109263,-0.000950495,0.159317], - "R": [ - [-0.9933059489,0.1045971031,0.04901773034], - [0.1016362638,0.9930442478,-0.05944065861], - [-0.05489409585,-0.05406078084,-0.9970276176] - ], - "t": [ - [-21.5323637], - [109.7713479], - [268.3161895] - ] - }, - { - "name": "19_14", - "type": "vga", - "resolution": [640,480], - "panel": 19, - "node": 14, - "K": [ - [742.837,0,362.248], - [0,743.502,226.37], - [0,0,1] - ], - "distCoef": [-0.308934,-0.00321353,-0.0010059,0.000705591,0.156528], - "R": [ - [-0.9919154966,0.0987006026,0.07976113456], - [0.09553429302,0.9945144894,-0.04259259489], - [-0.08352751879,-0.03462833131,-0.995903626] - ], - "t": [ - [-30.66946365], - [84.06052642], - [268.8728165] - ] - }, - { - "name": "19_15", - "type": "vga", - "resolution": [640,480], - "panel": 19, - "node": 15, - "K": [ - [742.618,0,345.237], - [0,742.923,230.439], - [0,0,1] - ], - "distCoef": [-0.302695,-0.0546693,-0.000167537,-0.000784726,0.259585], - "R": [ - [-0.9885523252,0.1391044686,0.05843155954], - [0.1381120085,0.9902000007,-0.02071308279], - [-0.06074021267,-0.01240586611,-0.9980765106] - ], - "t": [ - [-1.26146274], - [74.12977283], - [271.0351679] - ] - }, - { - "name": "19_16", - "type": "vga", - "resolution": [640,480], - "panel": 19, - "node": 16, - "K": [ - [744.088,0,370.473], - [0,744.417,231.755], - [0,0,1] - ], - "distCoef": [-0.300902,-0.0664899,-0.000333311,0.000589361,0.253926], - "R": [ - [-0.9917390399,0.06178336486,0.1124121551], - [0.06447509535,0.9977094298,0.02046596672], - [-0.1108902109,0.02754468261,-0.9934508803] - ], - "t": [ - [-3.269853258], - [73.62667861], - [274.8694227] - ] - }, - { - "name": "19_17", - "type": "vga", - "resolution": [640,480], - "panel": 19, - "node": 17, - "K": [ - [745.582,0,373.528], - [0,745.86,237.254], - [0,0,1] - ], - "distCoef": [-0.322134,0.0530706,-0.000603814,0.00101303,0.0846746], - "R": [ - [-0.9897330936,0.1313546283,0.05634150462], - [0.1318000226,0.9912672261,0.00424742025], - [-0.05529156869,0.01162962396,-0.9984025212] - ], - "t": [ - [37.3391924], - [70.20661568], - [273.1392775] - ] - }, - { - "name": "19_18", - "type": "vga", - "resolution": [640,480], - "panel": 19, - "node": 18, - "K": [ - [742.542,0,374.105], - [0,742.758,223.273], - [0,0,1] - ], - "distCoef": [-0.306762,-0.0452572,-0.00032402,-0.000364469,0.245651], - "R": [ - [-0.9920842372,0.1065981921,0.06637538524], - [0.106818653,0.9942784937,-0.0002288198192], - [-0.06602000984,0.006863120707,-0.9977946963] - ], - "t": [ - [52.26513597], - [79.91641464], - [273.9509772] - ] - }, - { - "name": "19_19", - "type": "vga", - "resolution": [640,480], - "panel": 19, - "node": 19, - "K": [ - [744.378,0,361.433], - [0,744.589,244.618], - [0,0,1] - ], - "distCoef": [-0.310422,-0.000364242,-0.000710118,0.000839407,0.169675], - "R": [ - [-0.9919054981,0.126974259,0.001010166835], - [0.1269495258,0.9918188066,-0.01338927975], - [-0.002701996339,-0.01315266,-0.9999098493] - ], - "t": [ - [49.23489662], - [110.9052228], - [271.6142806] - ] - }, - { - "name": "19_20", - "type": "vga", - "resolution": [640,480], - "panel": 19, - "node": 20, - "K": [ - [745.72,0,364.99], - [0,745.913,248.461], - [0,0,1] - ], - "distCoef": [-0.32476,0.0791445,0.000409065,0.000522525,0.0385155], - "R": [ - [-0.9808466558,0.1869185946,0.05478391053], - [0.1851721888,0.9820671342,-0.03543168776], - [-0.06042431929,-0.02460859583,-0.9978693896] - ], - "t": [ - [40.23583817], - [134.9359413], - [272.7493911] - ] - }, - { - "name": "19_21", - "type": "vga", - "resolution": [640,480], - "panel": 19, - "node": 21, - "K": [ - [745.966,0,347.023], - [0,745.905,254.016], - [0,0,1] - ], - "distCoef": [-0.312122,-0.0171046,0.00101358,-9.38575e-05,0.213424], - "R": [ - [-0.9944456328,0.07811965146,0.07053512206], - [0.07435713108,0.9957422838,-0.0544823029], - [-0.07449094204,-0.04893489886,-0.9960203187] - ], - "t": [ - [2.247391851], - [153.0572023], - [268.8284628] - ] - }, - { - "name": "19_22", - "type": "vga", - "resolution": [640,480], - "panel": 19, - "node": 22, - "K": [ - [743.607,0,364.935], - [0,743.756,243.53], - [0,0,1] - ], - "distCoef": [-0.311531,0.000696399,0.00010932,-0.000314324,0.159615], - "R": [ - [-0.9924188487,0.09367860135,0.07955594568], - [0.08900119243,0.9941960017,-0.06044086279], - [-0.0847562186,-0.05290207743,-0.9949963586] - ], - "t": [ - [-15.3150092], - [142.5037842], - [267.7211288] - ] - }, - { - "name": "19_23", - "type": "vga", - "resolution": [640,480], - "panel": 19, - "node": 23, - "K": [ - [743.508,0,369.721], - [0,743.449,243.575], - [0,0,1] - ], - "distCoef": [-0.309744,-0.0191119,0.000292611,0.000847107,0.198605], - "R": [ - [-0.9987856124,0.03694807636,0.03259049098], - [0.03470669556,0.9971594314,-0.06684694127], - [-0.03496778135,-0.06563465492,-0.997230839] - ], - "t": [ - [-6.799650163], - [123.3743131], - [267.1549958] - ] - }, - { - "name": "19_24", - "type": "vga", - "resolution": [640,480], - "panel": 19, - "node": 24, - "K": [ - [742.775,0,379.613], - [0,742.864,224.449], - [0,0,1] - ], - "distCoef": [-0.316586,0.0333112,-0.000180777,0.00112675,0.112087], - "R": [ - [-0.9947573056,0.06853183176,0.07590316848], - [0.05765365411,0.9888586451,-0.1372393391], - [-0.08446276764,-0.1321437401,-0.9876254719] - ], - "t": [ - [4.340029177], - [136.5307812], - [258.2193706] - ] - }, - { - "name": "20_01", - "type": "vga", - "resolution": [640,480], - "panel": 20, - "node": 1, - "K": [ - [745.267,0,367.511], - [0,745.253,228.976], - [0,0,1] - ], - "distCoef": [-0.316421,0.0232694,0.000233523,0.00095017,0.129164], - "R": [ - [-0.2595515744,0.03264633198,0.965177288], - [-0.02439656235,0.9988878376,-0.04034718866], - [-0.9654210418,-0.03401918423,-0.2584664527] - ], - "t": [ - [43.91564589], - [114.6472759], - [269.2437955] - ] - }, - { - "name": "20_02", - "type": "vga", - "resolution": [640,480], - "panel": 20, - "node": 2, - "K": [ - [746.737,0,383.621], - [0,746.553,234.139], - [0,0,1] - ], - "distCoef": [-0.330711,0.126048,0.000259954,-0.000232797,-0.067441], - "R": [ - [-0.2600597375,0.03354081135,0.965009817], - [-0.06475754991,0.9965406566,-0.05208818886], - [-0.9634185968,-0.07603771211,-0.2569880808] - ], - "t": [ - [63.03617994], - [136.0112472], - [264.2112923] - ] - }, - { - "name": "20_03", - "type": "vga", - "resolution": [640,480], - "panel": 20, - "node": 3, - "K": [ - [748.567,0,371.842], - [0,748.646,223.378], - [0,0,1] - ], - "distCoef": [-0.332561,0.132401,-0.000978802,0.0010132,-0.0596871], - "R": [ - [-0.2517963519,0.03200567411,0.967250864], - [0.0115205721,0.9994813079,-0.03007310314], - [-0.9677116686,0.003570985655,-0.2520344708] - ], - "t": [ - [55.32226207], - [135.5872215], - [276.5287505] - ] - }, - { - "name": "20_04", - "type": "vga", - "resolution": [640,480], - "panel": 20, - "node": 4, - "K": [ - [747.412,0,375.731], - [0,747.545,213.638], - [0,0,1] - ], - "distCoef": [-0.324984,0.0823763,-0.00190711,0.0010176,0.0382164], - "R": [ - [-0.2864406942,-0.001302983566,0.9580970885], - [-0.1193951903,0.9922525608,-0.03434594761], - [-0.9506295373,-0.1242302613,-0.2843770823] - ], - "t": [ - [40.5108683], - [178.4576708], - [254.9563649] - ] - }, - { - "name": "20_05", - "type": "vga", - "resolution": [640,480], - "panel": 20, - "node": 5, - "K": [ - [747.818,0,377.646], - [0,748.63,232.294], - [0,0,1] - ], - "distCoef": [-0.327048,0.100477,-0.00250563,-0.000951363,0.00505748], - "R": [ - [-0.2682590325,-0.01756457816,0.9631866782], - [-0.1175373506,0.9929607203,-0.014628026], - [-0.9561496027,-0.1171345104,-0.2684351761] - ], - "t": [ - [28.10870602], - [198.6254244], - [256.0861594] - ] - }, - { - "name": "20_06", - "type": "vga", - "resolution": [640,480], - "panel": 20, - "node": 6, - "K": [ - [744.281,0,376.164], - [0,744.733,212.764], - [0,0,1] - ], - "distCoef": [-0.314115,0.0261091,-0.00186017,0.000146826,0.111047], - "R": [ - [-0.2995512244,0.02650351378,0.9537120256], - [-0.1164678133,0.9911222418,-0.06412449085], - [-0.9469447251,-0.1302853239,-0.2938050747] - ], - "t": [ - [24.38602287], - [207.7342285], - [252.6787249] - ] - }, - { - "name": "20_07", - "type": "vga", - "resolution": [640,480], - "panel": 20, - "node": 7, - "K": [ - [744.844,0,367.199], - [0,744.885,234.874], - [0,0,1] - ], - "distCoef": [-0.307447,-0.0235368,-0.000447762,-0.000552595,0.198481], - "R": [ - [-0.2246138655,-0.03605175288,0.9737807158], - [-0.1345418425,0.9908917963,0.005651603877], - [-0.965115073,-0.1297448231,-0.2274185059] - ], - "t": [ - [-24.57828512], - [193.807989], - [253.6581871] - ] - }, - { - "name": "20_08", - "type": "vga", - "resolution": [640,480], - "panel": 20, - "node": 8, - "K": [ - [745.265,0,373.297], - [0,745.204,222.406], - [0,0,1] - ], - "distCoef": [-0.322725,0.0753011,-0.00198414,9.48962e-05,0.0496562], - "R": [ - [-0.2740281164,0.007089557403,0.9616955493], - [-0.08615117171,0.9957715968,-0.0318889104], - [-0.9578551911,-0.09158965645,-0.2722586413] - ], - "t": [ - [-24.40184383], - [190.6520913], - [261.5790911] - ] - }, - { - "name": "20_09", - "type": "vga", - "resolution": [640,480], - "panel": 20, - "node": 9, - "K": [ - [743.742,0,376.404], - [0,743.442,252.182], - [0,0,1] - ], - "distCoef": [-0.310951,0.0101818,-0.000165117,0.000699519,0.141452], - "R": [ - [-0.234740558,-0.05401621619,0.9705560874], - [-0.06709368181,0.9969740023,0.03925909634], - [-0.9697398147,-0.05590247913,-0.2376543804] - ], - "t": [ - [-60.89112675], - [163.1020008], - [266.420435] - ] - }, - { - "name": "20_10", - "type": "vga", - "resolution": [640,480], - "panel": 20, - "node": 10, - "K": [ - [746.237,0,381.452], - [0,745.998,235.104], - [0,0,1] - ], - "distCoef": [-0.321635,0.0804606,-0.000793429,0.000500703,0.0308776], - "R": [ - [-0.2327490461,-0.03063038999,0.9720543507], - [-0.1073579574,0.9942045343,0.005622535858], - [-0.9665930636,-0.1030491297,-0.2346885731] - ], - "t": [ - [-52.7687065], - [155.650502], - [258.7092289] - ] - }, - { - "name": "20_11", - "type": "vga", - "resolution": [640,480], - "panel": 20, - "node": 11, - "K": [ - [744.465,0,352.406], - [0,744.368,231.635], - [0,0,1] - ], - "distCoef": [-0.307896,-0.0267024,-0.00138959,-0.000489454,0.213952], - "R": [ - [-0.2568719183,-0.003646201445,0.9664385768], - [-0.06909534804,0.997503196,-0.01460160774], - [-0.9639723287,-0.07052715282,-0.256482495] - ], - "t": [ - [-58.11810551], - [133.8270577], - [264.378006] - ] - }, - { - "name": "20_12", - "type": "vga", - "resolution": [640,480], - "panel": 20, - "node": 12, - "K": [ - [744.557,0,351.376], - [0,744.424,216.683], - [0,0,1] - ], - "distCoef": [-0.317479,0.0158652,-0.000659121,-0.00059258,0.147681], - "R": [ - [-0.2372383683,-0.02274879941,0.9711850744], - [-0.1004253449,0.9949438408,-0.001226302928], - [-0.9662467111,-0.09782252214,-0.2383234094] - ], - "t": [ - [-62.35654103], - [118.4734964], - [259.8400796] - ] - }, - { - "name": "20_13", - "type": "vga", - "resolution": [640,480], - "panel": 20, - "node": 13, - "K": [ - [743.07,0,377.102], - [0,743.158,222.988], - [0,0,1] - ], - "distCoef": [-0.29868,-0.0827266,-0.00133003,-0.00119832,0.273178], - "R": [ - [-0.2367527853,-0.03686088138,0.9708704311], - [-0.08746956632,0.9960307636,0.01648614259], - [-0.9676245107,-0.08101847538,-0.2390372628] - ], - "t": [ - [-42.43038274], - [111.3831569], - [262.4188123] - ] - }, - { - "name": "20_14", - "type": "vga", - "resolution": [640,480], - "panel": 20, - "node": 14, - "K": [ - [745.597,0,372.306], - [0,745.414,237.499], - [0,0,1] - ], - "distCoef": [-0.320131,0.0615197,0.00113665,-0.000991542,0.0414761], - "R": [ - [-0.2769894269,0.05383368349,0.9593637433], - [-0.05406721308,0.9959742516,-0.07149843787], - [-0.9593506105,-0.07167443526,-0.2729636999] - ], - "t": [ - [-21.49417033], - [90.7530727], - [264.2254974] - ] - }, - { - "name": "20_15", - "type": "vga", - "resolution": [640,480], - "panel": 20, - "node": 15, - "K": [ - [746.296,0,380.788], - [0,746.161,226.883], - [0,0,1] - ], - "distCoef": [-0.321885,0.0553182,0.000132369,-0.000878491,0.0778662], - "R": [ - [-0.2870302882,0.01079685294,0.9578606588], - [-0.05665486447,0.9979947406,-0.02822630231], - [-0.9562446549,-0.06236926949,-0.2858430237] - ], - "t": [ - [-1.106709776], - [85.82297146], - [264.8070963] - ] - }, - { - "name": "20_16", - "type": "vga", - "resolution": [640,480], - "panel": 20, - "node": 16, - "K": [ - [744.119,0,345.288], - [0,744.112,227.607], - [0,0,1] - ], - "distCoef": [-0.302547,-0.0664079,0.000893953,-0.000627784,0.303861], - "R": [ - [-0.252548592,0.05539030986,0.9659974753], - [-0.08640189331,0.9930807476,-0.07953201617], - [-0.963718798,-0.1035497095,-0.2460153169] - ], - "t": [ - [10.51473419], - [107.4721829], - [260.872486] - ] - }, - { - "name": "20_17", - "type": "vga", - "resolution": [640,480], - "panel": 20, - "node": 17, - "K": [ - [745.831,0,353.784], - [0,745.87,219.754], - [0,0,1] - ], - "distCoef": [-0.321082,0.0599511,-0.000750204,0.000386726,0.0615888], - "R": [ - [-0.3124433364,0.0857084176,0.9460619582], - [-0.03834810703,0.9939715084,-0.1027135007], - [-0.9491620432,-0.06837183409,-0.3072730188] - ], - "t": [ - [50.17882687], - [91.39390134], - [262.9120903] - ] - }, - { - "name": "20_18", - "type": "vga", - "resolution": [640,480], - "panel": 20, - "node": 18, - "K": [ - [745.227,0,385.13], - [0,745.129,233.897], - [0,0,1] - ], - "distCoef": [-0.311291,0.0180828,0.00116452,0.000576614,0.0928398], - "R": [ - [-0.2786751196,0.05379991941,0.9588773365], - [-0.03740853519,0.9970639104,-0.06681437094], - [-0.9596565944,-0.0544896994,-0.2758443282] - ], - "t": [ - [57.04086511], - [98.35557378], - [265.4113916] - ] - }, - { - "name": "20_19", - "type": "vga", - "resolution": [640,480], - "panel": 20, - "node": 19, - "K": [ - [746.424,0,373.724], - [0,746.378,215.089], - [0,0,1] - ], - "distCoef": [-0.317589,0.0452179,0.000839363,0.00087423,0.0858828], - "R": [ - [-0.2053627335,-0.023863444,0.9783949528], - [-0.1366627843,0.9906072975,-0.004523879826], - [-0.9690972248,-0.1346392148,-0.2066950671] - ], - "t": [ - [2.454839771], - [148.020868], - [256.5149472] - ] - }, - { - "name": "20_20", - "type": "vga", - "resolution": [640,480], - "panel": 20, - "node": 20, - "K": [ - [744.35,0,378.361], - [0,744.386,245.706], - [0,0,1] - ], - "distCoef": [-0.305792,-0.0298413,-5.26611e-05,9.57392e-05,0.206854], - "R": [ - [-0.2653224987,0.04663873586,0.9630310483], - [-0.08123292055,0.9941966424,-0.07052835541], - [-0.9607315881,-0.09694258412,-0.2599941366] - ], - "t": [ - [23.42848118], - [157.616994], - [260.7931406] - ] - }, - { - "name": "20_21", - "type": "vga", - "resolution": [640,480], - "panel": 20, - "node": 21, - "K": [ - [747.371,0,368.768], - [0,747.344,231.897], - [0,0,1] - ], - "distCoef": [-0.308946,-0.0139041,-0.000755627,-0.000244894,0.190547], - "R": [ - [-0.2375675449,-0.01520768023,0.9712519694], - [-0.09352440886,0.9955903179,-0.007287238765], - [-0.966858235,-0.09256697771,-0.2379422368] - ], - "t": [ - [-12.76210059], - [163.3748289], - [261.1782343] - ] - }, - { - "name": "20_22", - "type": "vga", - "resolution": [640,480], - "panel": 20, - "node": 22, - "K": [ - [746.314,0,371.788], - [0,745.992,237.732], - [0,0,1] - ], - "distCoef": [-0.315167,0.0352154,-0.000828301,0.000312219,0.0891012], - "R": [ - [-0.2145858088,0.0004599306573,0.9767050318], - [-0.07749764501,0.9968390076,-0.017495939], - [-0.9736257216,-0.07944672006,-0.2138718611] - ], - "t": [ - [-33.0373727], - [146.3668194], - [262.1626174] - ] - }, - { - "name": "20_23", - "type": "vga", - "resolution": [640,480], - "panel": 20, - "node": 23, - "K": [ - [746.318,0,371.868], - [0,746.096,236.531], - [0,0,1] - ], - "distCoef": [-0.318459,0.0405311,0.000489761,-0.000285822,0.0876741], - "R": [ - [-0.2554085937,0.004734611177,0.9668216142], - [-0.07039835709,0.9972425561,-0.02348096154], - [-0.9642668311,-0.0740598926,-0.25437101] - ], - "t": [ - [-17.40671779], - [124.2252344], - [264.0602836] - ] - }, - { - "name": "20_24", - "type": "vga", - "resolution": [640,480], - "panel": 20, - "node": 24, - "K": [ - [745.832,0,382.965], - [0,745.816,231.317], - [0,0,1] - ], - "distCoef": [-0.320385,0.0446211,0.00028801,0.00167617,0.104376], - "R": [ - [-0.2362773498,-0.02089730322,0.9714609188], - [-0.1013714927,0.9948433166,-0.003255144035], - [-0.9663833786,-0.09924756028,-0.2371773332] - ], - "t": [ - [-5.093436327], - [126.6662443], - [260.9183094] - ] - }, - { - "name": "00_00", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 0, - "K": [ - [1634.03,0,942.792], - [0,1629.73,558.29], - [0,0,1] - ], - "distCoef": [-0.222445,0.199192,8.73054e-05,0.000982243,0.0238445], - "R": [ - [0.1369296663,0.03357591931,-0.9900115778], - [-0.09021094677,0.9956950625,0.02129149064], - [0.9864645212,0.08639444504,0.1393691081] - ], - "t": [ - [20.90028135], - [127.2202879], - [283.1159034] - ] - }, - { - "name": "00_01", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 1, - "K": [ - [1395.91,0,951.559], - [0,1392.24,561.398], - [0,0,1] - ], - "distCoef": [-0.286227,0.183082,-4.29815e-05,0.000644874,-0.0479635], - "R": [ - [0.05337497606,0.02479711619,0.9982666052], - [0.6376765256,0.7684660834,-0.05318390075], - [-0.7684528356,0.6394098699,0.0252043199] - ], - "t": [ - [6.299256813], - [104.397182], - [363.078698] - ] - }, - { - "name": "00_02", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 2, - "K": [ - [1397.02,0,939.355], - [0,1394.04,556.611], - [0,0,1] - ], - "distCoef": [-0.28229,0.173658,-0.000610716,0.000955319,-0.0398628], - "R": [ - [-0.9970491806,0.05290586318,-0.05562284625], - [-0.01182874156,0.6100448884,0.792278559], - [0.07584861407,0.7905986364,-0.6076189463] - ], - "t": [ - [-16.22360931], - [63.30660163], - [381.0181823] - ] - }, - { - "name": "00_03", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 3, - "K": [ - [1395.71,0,949.456], - [0,1392.06,566.648], - [0,0,1] - ], - "distCoef": [-0.281728,0.168097,-0.00021431,1.8072e-05,-0.0371786], - "R": [ - [-0.6216465312,-0.0285781748,0.7827763909], - [0.07448493547,0.9926490654,0.09539301533], - [-0.7797484111,0.117605786,-0.6149482047] - ], - "t": [ - [-14.50346059], - [117.4297203], - [290.1984382] - ] - }, - { - "name": "00_04", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 4, - "K": [ - [1633.26,0,949.479], - [0,1629.32,572.374], - [0,0,1] - ], - "distCoef": [-0.223003,0.185095,-0.000261654,0.00109433,0.0657602], - "R": [ - [-0.5292732399,-0.01229259603,0.8483623811], - [0.636650989,0.6551966806,0.4066851706], - [-0.5608434325,0.7553583268,-0.3389519765] - ], - "t": [ - [-5.411400695], - [80.12176746], - [379.8488129] - ] - }, - { - "name": "00_05", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 5, - "K": [ - [1396.29,0,933.34], - [0,1392.95,560.462], - [0,0,1] - ], - "distCoef": [-0.28733,0.185523,-0.000225825,-0.000143128,-0.0508452], - "R": [ - [-0.9314658579,-0.01073438439,-0.363670357], - [-0.021313424,0.9994579907,0.02508909603], - [0.3632039283,0.03112069687,-0.9311897813] - ], - "t": [ - [-6.050515741], - [143.9213951], - [280.3813532] - ] - }, - { - "name": "00_06", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 6, - "K": [ - [1396.11,0,950.228], - [0,1392.54,548.78], - [0,0,1] - ], - "distCoef": [-0.286481,0.183173,-0.000152555,0.0010664,-0.0482263], - "R": [ - [0.9448241112,-0.04876703013,-0.3239277321], - [-0.2141569626,0.6563150135,-0.7234551806], - [0.2478793944,0.7529092773,0.6096584503] - ], - "t": [ - [-10.023614], - [84.45695974], - [376.925635] - ] - }, - { - "name": "00_07", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 7, - "K": [ - [1395.51,0,947.67], - [0,1392.41,549.081], - [0,0,1] - ], - "distCoef": [-0.286691,0.185163,-6.53256e-05,4.32858e-06,-0.052639], - "R": [ - [-0.9419632708,-0.03700247277,0.3336705164], - [0.180351898,0.7825307202,0.5959185052], - [-0.2831578878,0.6215114552,-0.7304417305] - ], - "t": [ - [-5.250326149], - [112.5645453], - [360.2387508] - ] - }, - { - "name": "00_08", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 8, - "K": [ - [1642.7,0,945.082], - [0,1638.64,562.465], - [0,0,1] - ], - "distCoef": [-0.22444,0.208938,-0.000569838,0.000484927,0.0287248], - "R": [ - [0.9544726119,0.01685383959,-0.2978220632], - [-0.03362017317,0.9981191009,-0.05126347965], - [0.2963979035,0.05894241665,0.9532439742] - ], - "t": [ - [-19.67808464], - [136.6798831], - [282.6801175] - ] - }, - { - "name": "00_09", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 9, - "K": [ - [1396.79,0,945.482], - [0,1393.03,542.64], - [0,0,1] - ], - "distCoef": [-0.284259,0.175176,-0.000406823,0.000640552,-0.0406716], - "R": [ - [-0.3169419478,-0.08460972789,0.9446634298], - [-0.1243350249,0.9911238917,0.04705563528], - [-0.9402598595,-0.1025408464,-0.3246486894] - ], - "t": [ - [6.780958613], - [147.0057696], - [260.6395044] - ] - }, - { - "name": "00_10", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 10, - "K": [ - [1393.87,0,944.546], - [0,1390.36,563.199], - [0,0,1] - ], - "distCoef": [-0.285353,0.177704,-0.000109708,0.000471392,-0.0432146], - "R": [ - [0.9503475669,0.04849461332,0.3073886376], - [0.1560494297,0.7803459045,-0.6055648973], - [-0.2692360999,0.6234649483,0.734032275] - ], - "t": [ - [22.71992555], - [112.7759402], - [360.0009328] - ] - }, - { - "name": "00_11", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 11, - "K": [ - [1492.96,0,934.544], - [0,1489.74,547.466], - [0,0,1] - ], - "distCoef": [-0.259288,0.190057,-5.50625e-05,0.00031915,-0.0281283], - "R": [ - [0.8129763959,0.04080422416,-0.5808652124], - [-0.2848486357,0.8979062573,-0.3355973896], - [0.5078687177,0.4382914196,0.7415996205] - ], - "t": [ - [-0.03199165418], - [105.1487628], - [331.4862369] - ] - }, - { - "name": "00_12", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 12, - "K": [ - [1395.93,0,964.611], - [0,1392.67,564.875], - [0,0,1] - ], - "distCoef": [-0.290995,0.19463,-0.000241491,0.000727782,-0.0582663], - "R": [ - [-0.9950957343,0.04321912909,-0.08897520145], - [-0.001969290489,0.8906636271,0.454658581], - [0.09889692354,0.4526040326,-0.886210465] - ], - "t": [ - [24.66653867], - [97.49188585], - [334.8897626] - ] - }, - { - "name": "00_13", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 13, - "K": [ - [1592.21,0,937.375], - [0,1588.39,560.919], - [0,0,1] - ], - "distCoef": [-0.239248,0.229218,0.000137317,0.000315934,-0.0358302], - "R": [ - [-0.2862766934,0.07452649614,-0.9552441867], - [-0.7557457469,0.5952786327,0.2729317047], - [0.588977097,0.8000557173,-0.1140913162] - ], - "t": [ - [-15.47943966], - [60.20818768], - [381.0821849] - ] - }, - { - "name": "00_14", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 14, - "K": [ - [1649.51,0,934.882], - [0,1644.85,568.024], - [0,0,1] - ], - "distCoef": [-0.22365,0.220791,-0.000591343,0.000286172,0.0121962], - "R": [ - [0.827339054,-0.07848137689,0.5561930989], - [0.02005408661,0.9936867625,0.110383204], - [-0.5613447456,-0.08017039095,0.8236897383] - ], - "t": [ - [-7.23447972], - [142.1657406], - [267.9541185] - ] - }, - { - "name": "00_15", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 15, - "K": [ - [1430.11,0,948.926], - [0,1426.48,561.705], - [0,0,1] - ], - "distCoef": [-0.277948,0.185701,0.000192514,0.000149713,-0.0424254], - "R": [ - [-0.9997414125,0.006454955712,0.02180462522], - [0.005192647027,0.9983342904,-0.05746025644], - [-0.02213920846,-0.05733217422,-0.9981096519] - ], - "t": [ - [9.642162177], - [134.9258555], - [268.2324221] - ] - }, - { - "name": "00_16", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 16, - "K": [ - [1427.34,0,949.618], - [0,1423.13,548.132], - [0,0,1] - ], - "distCoef": [-0.279453,0.188683,-0.000345265,0.000583475,-0.0479414], - "R": [ - [0.7694875517,0.002369830201,0.6386574134], - [0.2539259376,0.9164213706,-0.3093436433], - [-0.586012394,0.4002077652,0.7045730755] - ], - "t": [ - [4.866150988], - [118.1652356], - [330.6340665] - ] - }, - { - "name": "00_17", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 17, - "K": [ - [1393.35,0,916.395], - [0,1390.34,563.652], - [0,0,1] - ], - "distCoef": [-0.287138,0.186145,7.50854e-05,0.000557424,-0.0513205], - "R": [ - [0.5039250676,0.09465184024,-0.8585456047], - [-0.6050310345,0.7480627966,-0.2726527087], - [0.6164389455,0.6568432701,0.4342348962] - ], - "t": [ - [18.2296155], - [97.71531857], - [361.6667015] - ] - }, - { - "name": "00_18", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 18, - "K": [ - [1542.2,0,947.567], - [0,1538.02,555.168], - [0,0,1] - ], - "distCoef": [-0.245751,0.182006,3.81269e-06,0.000651097,0.00472657], - "R": [ - [-0.4048875531,-0.001022756131,0.9143659133], - [0.3656410889,0.9163838146,0.1629334173], - [-0.8380767647,0.4002994608,-0.3706584387] - ], - "t": [ - [16.25260358], - [116.7586119], - [329.7529305] - ] - }, - { - "name": "00_19", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 19, - "K": [ - [1396.57,0,949.242], - [0,1393.19,554.872], - [0,0,1] - ], - "distCoef": [-0.280864,0.167216,-6.6519e-05,0.000917406,-0.0342733], - "R": [ - [0.7360342296,0.009501079563,0.6768776421], - [0.5173282683,0.6370082142,-0.5714822813], - [-0.4366063167,0.7707984591,0.4639446731] - ], - "t": [ - [-24.15514071], - [74.04862943], - [379.5076537] - ] - }, - { - "name": "00_20", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 20, - "K": [ - [1403.46,0,940.386], - [0,1400.1,552.684], - [0,0,1] - ], - "distCoef": [-0.287177,0.194004,-0.000120001,8.41526e-05,-0.0604614], - "R": [ - [-0.6201222217,0.04052054618,-0.7834580496], - [-0.1302964194,0.9794749929,0.1537907063], - [0.773609251,0.1974508131,-0.6021145267] - ], - "t": [ - [24.4496252], - [140.6900046], - [300.8290806] - ] - }, - { - "name": "00_21", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 21, - "K": [ - [1397.56,0,932.828], - [0,1393.91,562.186], - [0,0,1] - ], - "distCoef": [-0.28642,0.185674,-0.000229601,1.91211e-05,-0.052608], - "R": [ - [-0.2617478675,-0.05032313647,-0.9638234464], - [-0.4532392419,0.8880813121,0.07671878938], - [0.8520928608,0.4569235877,-0.2552618099] - ], - "t": [ - [-8.784671236], - [98.11062797], - [332.9193692] - ] - }, - { - "name": "00_22", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 22, - "K": [ - [1514.1,0,945.861], - [0,1510.18,558.694], - [0,0,1] - ], - "distCoef": [-0.260535,0.216046,-0.000156491,0.000677315,-0.0506741], - "R": [ - [-0.9239818557,-0.0613765916,0.3774790647], - [0.05486070575,0.9555572213,0.289656175], - [-0.3784809549,0.288345818,-0.8795503715] - ], - "t": [ - [-5.224239691], - [110.7456244], - [313.8855054] - ] - }, - { - "name": "00_23", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 23, - "K": [ - [1572.86,0,941.716], - [0,1568.17,560.048], - [0,0,1] - ], - "distCoef": [-0.240801,0.195963,-0.000444179,0.000458513,0.00455186], - "R": [ - [0.5162966551,0.01335424781,0.856305686], - [0.1418829708,0.9847272537,-0.100903213], - [-0.8445750331,0.173591186,0.506516647] - ], - "t": [ - [2.417701344], - [102.3557555], - [298.3746617] - ] - }, - { - "name": "00_24", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 24, - "K": [ - [1399.63,0,954.539], - [0,1396.27,546.388], - [0,0,1] - ], - "distCoef": [-0.288761,0.190789,4.23479e-05,6.78832e-05,-0.0577764], - "R": [ - [-0.388991142,-0.05987834367,-0.9192934653], - [0.02928793432,0.9965772059,-0.07730517199], - [0.9207758187,-0.05699523376,-0.3859059924] - ], - "t": [ - [-15.12220678], - [134.1751339], - [265.239245] - ] - }, - { - "name": "00_25", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 25, - "K": [ - [1397.66,0,935.585], - [0,1394.65,559.251], - [0,0,1] - ], - "distCoef": [-0.285722,0.183994,-0.000502702,0.000494145,-0.0515729], - "R": [ - [0.7926422733,0.00130484237,-0.6096855943], - [0.04487405742,0.9971605675,0.06047414042], - [0.6080333424,-0.07529342651,0.7903330655] - ], - "t": [ - [4.539475053], - [139.2223569], - [261.6293171] - ] - }, - { - "name": "00_26", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 26, - "K": [ - [1616.8,0,950.116], - [0,1613.47,551.417], - [0,0,1] - ], - "distCoef": [-0.223464,0.185279,-0.00090721,0.000127112,0.0351947], - "R": [ - [-0.7556190155,-0.04350579001,-0.6535649545], - [0.1389994774,0.9644159151,-0.2249023966], - [0.6400930001,-0.2607857146,-0.7226837222] - ], - "t": [ - [-12.5475419], - [141.1612209], - [240.8579734] - ] - }, - { - "name": "00_27", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 27, - "K": [ - [1861.86,0,934.556], - [0,1857.26,552.106], - [0,0,1] - ], - "distCoef": [-0.171511,0.209759,-1.83176e-05,-3.41566e-05,0.211418], - "R": [ - [0.9782876177,0.02697940456,0.2054883178], - [0.02691509764,0.9665557486,-0.2550403151], - [-0.2054967507,0.2550335204,0.9448433674] - ], - "t": [ - [-0.5131666478], - [123.4498457], - [311.6401591] - ] - }, - { - "name": "00_28", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 28, - "K": [ - [1395.57,0,953.143], - [0,1392.36,561.982], - [0,0,1] - ], - "distCoef": [-0.284934,0.181016,0.000127361,0.000271191,-0.0471616], - "R": [ - [-0.6310677524,-0.02949081954,-0.775166939], - [-0.5128354354,0.7656140117,0.3883748207], - [0.5820251782,0.6426238999,-0.4982782509] - ], - "t": [ - [-8.508070023], - [104.2896072], - [361.3816814] - ] - }, - { - "name": "00_29", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 29, - "K": [ - [1400.36,0,939.608], - [0,1397.25,572.603], - [0,0,1] - ], - "distCoef": [-0.286109,0.1878,-0.000309515,0.000886248,-0.0523515], - "R": [ - [0.4887300705,-0.07268882749,-0.8694016635], - [-0.08227020668,0.9882426049,-0.1288726774], - [0.8685473685,0.1345098073,0.4770037531] - ], - "t": [ - [-20.72850042], - [158.8912224], - [289.281465] - ] - }, - { - "name": "00_30", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 30, - "K": [ - [1407.21,0,946.883], - [0,1403.86,563.032], - [0,0,1] - ], - "distCoef": [-0.285813,0.195568,-0.000394067,0.000468367,-0.0600751], - "R": [ - [0.08635045426,0.06174190292,0.9943498059], - [0.2147800801,0.9734543185,-0.07909618832], - [-0.9728376618,0.2203965227,0.07079729175] - ], - "t": [ - [13.79078928], - [132.1300437], - [306.0754676] - ] - }, - { - "name": "50_01", - "type": "kinect-color", - "resolution": [1920,1080], - "panel": 50, - "node": 1, - "K": [ - [1053.92,0,947.294], - [0,1054.32,535.405], - [0,0,1] - ], - "distCoef": [0.0476403,-0.053786,0.000733314,-0.000579648,0.0122759], - "R": [ - [0.9095307192,0.0006254166507,-0.4156362348], - [-0.003349684277,0.999977422,-0.0058253781], - [0.4156232073,0.006690610494,0.9095122788] - ], - "t": [ - [-15.84850815], - [103.1392168], - [269.3362326] - ] - }, - { - "name": "50_02", - "type": "kinect-color", - "resolution": [1920,1080], - "panel": 50, - "node": 2, - "K": [ - [1058.92,0,971.224], - [0,1059.3,541.276], - [0,0,1] - ], - "distCoef": [0.0485216,-0.0529886,-0.000413578,-0.000171659,0.00909728], - "R": [ - [-0.08404700998,-0.006825065684,-0.9964384169], - [-0.04073006897,0.9991643735,-0.003408260769], - [0.9956290281,0.04029855131,-0.08425476347] - ], - "t": [ - [-4.246538185], - [93.69672118], - [271.0169727] - ] - }, - { - "name": "50_03", - "type": "kinect-color", - "resolution": [1920,1080], - "panel": 50, - "node": 3, - "K": [ - [1050.35,0,971.069], - [0,1050.88,535.343], - [0,0,1] - ], - "distCoef": [0.0482196,-0.0555053,0.000460862,0.000594278,0.0128034], - "R": [ - [-0.9791929995,-0.0009192386581,-0.2029291126], - [0.004325206908,0.9996680429,-0.02539875018], - [0.2028850964,-0.02574798878,-0.9788639736] - ], - "t": [ - [-10.71273011], - [112.0293664], - [269.2258843] - ] - }, - { - "name": "50_04", - "type": "kinect-color", - "resolution": [1920,1080], - "panel": 50, - "node": 4, - "K": [ - [1053.76,0,952.563], - [0,1053.62,535.073], - [0,0,1] - ], - "distCoef": [0.0534802,-0.059505,0.000265754,-0.00038559,0.0128987], - "R": [ - [-0.4973721867,-0.01252789009,0.8674468052], - [-0.05725964091,0.9981894693,-0.01841512904], - [-0.8656455634,-0.05882886558,-0.4971890215] - ], - "t": [ - [-12.12207689], - [119.639642], - [263.8142799] - ] - }, - { - "name": "50_05", - "type": "kinect-color", - "resolution": [1920,1080], - "panel": 50, - "node": 5, - "K": [ - [1061.53,0,963.346], - [0,1061.99,535.689], - [0,0,1] - ], - "distCoef": [0.0450742,-0.0483577,0.000117724,0.00131017,0.00746483], - "R": [ - [0.6332975321,0.02789684006,0.7734054578], - [-0.04440403331,0.9990136015,0.0003253688515], - [-0.772633495,-0.034548377,0.6339115806] - ], - "t": [ - [4.398197962], - [114.449943], - [269.0646085] - ] - }, - { - "name": "50_06", - "type": "kinect-color", - "resolution": [1920,1080], - "panel": 50, - "node": 6, - "K": [ - [1053.8,0,975.87], - [0,1054.44,518.546], - [0,0,1] - ], - "distCoef": [0.0608578,-0.0758877,0.000572907,0.000423304,0.0232485], - "R": [ - [0.9936973916,-0.01776547634,0.1106791841], - [0.08238304881,0.7853099766,-0.6135969963], - [-0.07601662453,0.6188478234,0.7818240495] - ], - "t": [ - [-23.36095562], - [58.01362542], - [350.0526212] - ] - }, - { - "name": "50_07", - "type": "kinect-color", - "resolution": [1920,1080], - "panel": 50, - "node": 7, - "K": [ - [1058.37,0,951.456], - [0,1058.06,537.752], - [0,0,1] - ], - "distCoef": [0.0510704,-0.0625189,-0.000144014,6.68608e-05,0.016463], - "R": [ - [0.4325769754,-0.03234243573,-0.9010167186], - [-0.4868424381,0.832758343,-0.2636247005], - [0.7588554545,0.5526911516,0.344486415] - ], - "t": [ - [-19.0385587], - [87.13576568], - [341.2560709] - ] - }, - { - "name": "50_08", - "type": "kinect-color", - "resolution": [1920,1080], - "panel": 50, - "node": 8, - "K": [ - [1051.92,0,937.937], - [0,1051.86,554.246], - [0,0,1] - ], - "distCoef": [0.0499863,-0.0613843,-4.12419e-05,-0.000155211,0.0174279], - "R": [ - [-0.7043873056,-0.07078753835,-0.7062773168], - [-0.4398115151,0.8245196459,0.3559960458], - [0.5571394394,0.5613879923,-0.6119143463] - ], - "t": [ - [-21.03532832], - [82.26745729], - [344.5100871] - ] - }, - { - "name": "50_09", - "type": "kinect-color", - "resolution": [1920,1080], - "panel": 50, - "node": 9, - "K": [ - [1054,0,961.563], - [0,1054.08,544.179], - [0,0,1] - ], - "distCoef": [0.0446773,-0.0530941,0.000226286,-0.000324258,0.0121913], - "R": [ - [-0.8728623151,-0.0989156561,0.4778358211], - [0.2068965126,0.8118396582,0.5459946908], - [-0.4419334927,0.5754407548,-0.6881589393] - ], - "t": [ - [-36.30074608], - [73.0041962], - [346.5857858] - ] - }, - { - "name": "50_10", - "type": "kinect-color", - "resolution": [1920,1080], - "panel": 50, - "node": 10, - "K": [ - [1050.04,0,941.59], - [0,1050.6,559.398], - [0,0,1] - ], - "distCoef": [0.0506861,-0.0636966,0.000195295,-6.41025e-06,0.0181857], - "R": [ - [0.1849149694,0.002001709126,0.9827524852], - [0.5894867579,0.7998990427,-0.1125472514], - [-0.786328059,0.6001312479,0.146733326] - ], - "t": [ - [-12.26435316], - [64.88453925], - [349.5293231] - ] - } - ] -} +{ + "calibDataSource": "160906_calib_norm", + "cameras": [ + { + "name": "01_01", + "type": "vga", + "resolution": [640,480], + "panel": 1, + "node": 1, + "K": [ + [745.698,0,375.512], + [0,745.89,226.023], + [0,0,1] + ], + "distCoef": [-0.324009,0.0732398,-0.000601245,0.000808154,0.0311011], + "R": [ + [0.9609979695,0.02878724306,-0.2750530807], + [-0.05024448072,0.9961896773,-0.07128547526], + [0.2719529274,0.08232509619,0.9587826572] + ], + "t": [ + [-51.56945892], + [143.9587601], + [282.5664691] + ] + }, + { + "name": "01_02", + "type": "vga", + "resolution": [640,480], + "panel": 1, + "node": 2, + "K": [ + [745.462,0,369.225], + [0,745.627,226.687], + [0,0,1] + ], + "distCoef": [-0.336594,0.141798,-0.000612176,0.000160485,-0.0646767], + "R": [ + [0.9715220842,-0.01574832828,-0.2364251047], + [0.005323209906,0.998987679,-0.04466856407], + [0.2368892218,0.042137956,0.9706224236] + ], + "t": [ + [-66.22242206], + [142.1317177], + [278.6626087] + ] + }, + { + "name": "01_03", + "type": "vga", + "resolution": [640,480], + "panel": 1, + "node": 3, + "K": [ + [746.261,0,378.952], + [0,746.496,239.595], + [0,0,1] + ], + "distCoef": [-0.322069,0.0440329,-0.000951664,0.000892653,0.103376], + "R": [ + [0.9665011873,0.05534363601,-0.2506242943], + [-0.07024277085,0.996230894,-0.05089164033], + [0.2468631364,0.06679137568,0.9667458322] + ], + "t": [ + [-54.75524211], + [118.3584455], + [281.78809] + ] + }, + { + "name": "01_04", + "type": "vga", + "resolution": [640,480], + "panel": 1, + "node": 4, + "K": [ + [747.661,0,366.929], + [0,747.759,234.022], + [0,0,1] + ], + "distCoef": [-0.32333,0.0462607,-0.000972333,-0.000898261,0.102804], + "R": [ + [0.9662588837,0.08601234823,-0.2427872436], + [-0.1112831564,0.9894890375,-0.09234448444], + [0.23229255,0.1162468093,0.9656742984] + ], + "t": [ + [-29.08626445], + [96.75744843], + [287.7183779] + ] + }, + { + "name": "01_05", + "type": "vga", + "resolution": [640,480], + "panel": 1, + "node": 5, + "K": [ + [742.413,0,353.224], + [0,742.622,209.478], + [0,0,1] + ], + "distCoef": [-0.297729,-0.0985766,-0.000505185,-0.000773418,0.328727], + "R": [ + [0.9718071292,0.05098345905,-0.2301990238], + [-0.07271497659,0.9935575811,-0.0869244798], + [0.2242842746,0.1012127458,0.9692536016] + ], + "t": [ + [-26.91018729], + [77.97642882], + [285.7140393] + ] + }, + { + "name": "01_06", + "type": "vga", + "resolution": [640,480], + "panel": 1, + "node": 6, + "K": [ + [743.487,0,372.277], + [0,743.725,241.821], + [0,0,1] + ], + "distCoef": [-0.317534,0.0281748,0.00130284,-0.000186889,0.119129], + "R": [ + [0.9681278444,0.07458666466,-0.2390926732], + [-0.09383510211,0.9931135585,-0.07014580141], + [0.2322142341,0.09034538891,0.968459736] + ], + "t": [ + [-7.038020326], + [73.51221006], + [284.7303027] + ] + }, + { + "name": "01_07", + "type": "vga", + "resolution": [640,480], + "panel": 1, + "node": 7, + "K": [ + [748.393,0,380.919], + [0,748.388,229.353], + [0,0,1] + ], + "distCoef": [-0.344193,0.174813,-0.00034307,0.00107023,-0.0968505], + "R": [ + [0.9670535143,-0.02995409712,-0.2528047715], + [0.01712365053,0.9984582116,-0.0528013286], + [0.2539966162,0.04673276982,0.9660754459] + ], + "t": [ + [-4.52170598], + [98.55800179], + [280.6705064] + ] + }, + { + "name": "01_08", + "type": "vga", + "resolution": [640,480], + "panel": 1, + "node": 8, + "K": [ + [745.37,0,362.362], + [0,745.56,217.483], + [0,0,1] + ], + "distCoef": [-0.326014,0.0789588,-0.000462463,-0.00138061,0.0222432], + "R": [ + [0.9652282485,0.06485174985,-0.2532364089], + [-0.07898708824,0.9958116468,-0.0460456736], + [0.2491896228,0.06444699145,0.9663079826] + ], + "t": [ + [26.28384049], + [86.2200762], + [282.8912643] + ] + }, + { + "name": "01_09", + "type": "vga", + "resolution": [640,480], + "panel": 1, + "node": 9, + "K": [ + [746.037,0,338.236], + [0,746.053,236.859], + [0,0,1] + ], + "distCoef": [-0.314486,0.0395532,0.000625849,-0.000232478,0.0599275], + "R": [ + [0.9656569777,0.07278005487,-0.2494186543], + [-0.09030273149,0.9941334749,-0.05953193019], + [0.2436226964,0.08001060955,0.9665641645] + ], + "t": [ + [45.35508632], + [94.7965848], + [284.0947744] + ] + }, + { + "name": "01_10", + "type": "vga", + "resolution": [640,480], + "panel": 1, + "node": 10, + "K": [ + [747.938,0,379.271], + [0,748.269,227.432], + [0,0,1] + ], + "distCoef": [-0.3484,0.205218,-0.00110069,0.000562921,-0.151344], + "R": [ + [0.9662738854,-0.001312373382,-0.2575132151], + [-0.009587322107,0.9991104143,-0.04106657164], + [0.2573380297,0.04215041788,0.9654017199] + ], + "t": [ + [30.05861189], + [130.0028668], + [279.9552314] + ] + }, + { + "name": "01_11", + "type": "vga", + "resolution": [640,480], + "panel": 1, + "node": 11, + "K": [ + [746.12,0,364.693], + [0,745.844,223.621], + [0,0,1] + ], + "distCoef": [-0.335335,0.119703,0.000192218,0.00118296,-0.00812072], + "R": [ + [0.9869891455,-0.01212212734,-0.1603292883], + [0.00355647539,0.9985558958,-0.05360479805], + [0.1607475603,0.05233714665,0.9856069424] + ], + "t": [ + [71.07099717], + [142.6182462], + [275.3539702] + ] + }, + { + "name": "01_12", + "type": "vga", + "resolution": [640,480], + "panel": 1, + "node": 12, + "K": [ + [745.407,0,358.691], + [0,745.503,226.329], + [0,0,1] + ], + "distCoef": [-0.325389,0.0923962,-0.00061832,-0.00189678,-0.0159561], + "R": [ + [0.9589650047,0.08538224277,-0.2703627054], + [-0.09708669181,0.9948178626,-0.03019262438], + [0.2663837347,0.05520229083,0.9622849957] + ], + "t": [ + [54.63033668], + [157.9150468], + [281.9236261] + ] + }, + { + "name": "01_13", + "type": "vga", + "resolution": [640,480], + "panel": 1, + "node": 13, + "K": [ + [744.389,0,339.442], + [0,744.512,216.258], + [0,0,1] + ], + "distCoef": [-0.320138,0.0543285,-0.000196977,-0.00116274,0.0473598], + "R": [ + [0.9724830194,-0.06319437739,-0.2242392645], + [0.03959405574,0.9933373951,-0.1082272161], + [0.2295845984,0.09637058799,0.9685058709] + ], + "t": [ + [19.90234626], + [154.6647449], + [286.7518211] + ] + }, + { + "name": "01_14", + "type": "vga", + "resolution": [640,480], + "panel": 1, + "node": 14, + "K": [ + [746.213,0,363.165], + [0,746.641,235.418], + [0,0,1] + ], + "distCoef": [-0.33414,0.127633,-0.000792357,0.000136075,-0.0405619], + "R": [ + [0.9643490552,0.006836134333,-0.2645452079], + [-0.02440508255,0.9977035557,-0.06318233054], + [0.2635057717,0.0673860684,0.9623013177] + ], + "t": [ + [19.24633902], + [182.0747755], + [282.9928946] + ] + }, + { + "name": "01_15", + "type": "vga", + "resolution": [640,480], + "panel": 1, + "node": 15, + "K": [ + [745.225,0,366.568], + [0,745.569,216.05], + [0,0,1] + ], + "distCoef": [-0.319743,0.046174,-0.00158438,-0.000953331,0.0743504], + "R": [ + [0.9602661069,0.03565913048,-0.2767985376], + [-0.06162250151,0.9944158624,-0.08567239854], + [0.2721978533,0.09932531892,0.9571012536] + ], + "t": [ + [0.9330302863], + [174.5612072], + [288.1067574] + ] + }, + { + "name": "01_16", + "type": "vga", + "resolution": [640,480], + "panel": 1, + "node": 16, + "K": [ + [747.633,0,371.752], + [0,747.88,230.613], + [0,0,1] + ], + "distCoef": [-0.347758,0.198029,0.00072103,0.00029865,-0.136932], + "R": [ + [0.9682573711,0.05614690975,-0.2435676248], + [-0.07153002565,0.9959334273,-0.05477283913], + [0.2395018137,0.07045660367,0.968336072] + ], + "t": [ + [-3.74774], + [172.5737662], + [282.7618788] + ] + }, + { + "name": "01_17", + "type": "vga", + "resolution": [640,480], + "panel": 1, + "node": 17, + "K": [ + [748.152,0,373.9], + [0,748.508,234.452], + [0,0,1] + ], + "distCoef": [-0.345127,0.177692,-0.00116897,0.00210199,-0.0818461], + "R": [ + [0.9639501783,0.02458774974,-0.264944327], + [-0.04477053879,0.9965129817,-0.07040934697], + [0.2622892538,0.07973280283,0.9616896732] + ], + "t": [ + [-36.08309916], + [173.4726636], + [283.4522322] + ] + }, + { + "name": "01_18", + "type": "vga", + "resolution": [640,480], + "panel": 1, + "node": 18, + "K": [ + [743.791,0,363.617], + [0,744.126,236.963], + [0,0,1] + ], + "distCoef": [-0.312734,0.0122172,-0.00120247,-0.000963953,0.133944], + "R": [ + [0.9523198878,0.06045552763,-0.2990517689], + [-0.07234112338,0.9969633514,-0.02882425707], + [0.2964010681,0.04908365416,0.9538014478] + ], + "t": [ + [-57.80984395], + [175.8598769], + [275.2458542] + ] + }, + { + "name": "01_19", + "type": "vga", + "resolution": [640,480], + "panel": 1, + "node": 19, + "K": [ + [743.162,0,364.748], + [0,743.331,220.785], + [0,0,1] + ], + "distCoef": [-0.311505,0.00290054,-0.000860754,-0.000437091,0.146397], + "R": [ + [0.9677776267,0.05243241618,-0.246287042], + [-0.06515666231,0.9969134625,-0.04379677618], + [0.243230497,0.05843278173,0.968206866] + ], + "t": [ + [-19.88792012], + [144.796335], + [280.8929426] + ] + }, + { + "name": "01_20", + "type": "vga", + "resolution": [640,480], + "panel": 1, + "node": 20, + "K": [ + [744.661,0,343.237], + [0,744.907,246.044], + [0,0,1] + ], + "distCoef": [-0.326994,0.0904776,0.000984855,-0.00107766,-0.0214165], + "R": [ + [0.9717064093,0.03462931454,-0.2336396043], + [-0.0436324388,0.998486683,-0.03347468014], + [0.2321268283,0.04272182698,0.9717468709] + ], + "t": [ + [-15.15244103], + [127.7778149], + [279.5122056] + ] + }, + { + "name": "01_21", + "type": "vga", + "resolution": [640,480], + "panel": 1, + "node": 21, + "K": [ + [742.462,0,365.246], + [0,742.468,221.387], + [0,0,1] + ], + "distCoef": [-0.311193,-0.0017069,-0.0010044,-5.33063e-05,0.168374], + "R": [ + [0.9650420793,0.04068979072,-0.2589172188], + [-0.04945049005,0.9984003719,-0.02741069744], + [0.257387712,0.03925605981,0.965510501] + ], + "t": [ + [-1.672862451], + [122.1992626], + [279.1232554] + ] + }, + { + "name": "01_22", + "type": "vga", + "resolution": [640,480], + "panel": 1, + "node": 22, + "K": [ + [744.021,0,363.587], + [0,744.301,226.764], + [0,0,1] + ], + "distCoef": [-0.330855,0.115198,-0.00111581,-0.000578883,-0.0257811], + "R": [ + [0.9624230562,-0.007741542698,-0.2714441553], + [-0.003557050749,0.9991484058,-0.04110730506], + [0.271531229,0.0405281588,0.9615759252] + ], + "t": [ + [4.289641778], + [135.1743597], + [279.2863723] + ] + }, + { + "name": "01_23", + "type": "vga", + "resolution": [640,480], + "panel": 1, + "node": 23, + "K": [ + [745.029,0,358.645], + [0,745.162,224.101], + [0,0,1] + ], + "distCoef": [-0.31925,0.0412999,-0.000788365,0.000625647,0.108146], + "R": [ + [0.9553340738,0.01211961015,-0.2952793973], + [-0.03701510886,0.9961975848,-0.07886858543], + [0.293200766,0.08627564605,0.9521501057] + ], + "t": [ + [-2.968489269], + [143.230855], + [285.3382881] + ] + }, + { + "name": "01_24", + "type": "vga", + "resolution": [640,480], + "panel": 1, + "node": 24, + "K": [ + [744.501,0,369.38], + [0,744.575,244.409], + [0,0,1] + ], + "distCoef": [-0.317214,0.0306635,-5.65201e-05,-0.000305408,0.106933], + "R": [ + [0.9627375442,0.05351140442,-0.2650904574], + [-0.07422624073,0.9948691584,-0.06874462026], + [0.2600516991,0.08585969499,0.9617698408] + ], + "t": [ + [-7.333655278], + [148.0612654], + [284.8699573] + ] + }, + { + "name": "02_01", + "type": "vga", + "resolution": [640,480], + "panel": 2, + "node": 1, + "K": [ + [746.79,0,376.022], + [0,747.048,234.17], + [0,0,1] + ], + "distCoef": [-0.317408,0.0301922,-0.000108969,-0.00027109,0.105931], + "R": [ + [0.977473966,0.04697618088,0.2057617172], + [0.001487552662,0.9733575223,-0.2292878562], + [-0.211050783,0.2244289915,0.9513617581] + ], + "t": [ + [-1.729507611], + [175.3460492], + [304.9109171] + ] + }, + { + "name": "02_02", + "type": "vga", + "resolution": [640,480], + "panel": 2, + "node": 2, + "K": [ + [747.689,0,367.065], + [0,747.811,212.158], + [0,0,1] + ], + "distCoef": [-0.333664,0.117162,0.000577725,-0.000310896,-0.0327554], + "R": [ + [0.9812751339,-0.05714257326,0.183939767], + [0.09271495859,0.9771941455,-0.1910380552], + [-0.1688284573,0.2045148611,0.9641942873] + ], + "t": [ + [-50.62568249], + [190.9654762], + [299.6250374] + ] + }, + { + "name": "02_03", + "type": "vga", + "resolution": [640,480], + "panel": 2, + "node": 3, + "K": [ + [745.627,0,353.486], + [0,745.817,252.683], + [0,0,1] + ], + "distCoef": [-0.321416,0.0392112,-0.00107045,-0.00134198,0.0908854], + "R": [ + [0.9757098845,0.1270834984,0.1784376802], + [-0.07601456941,0.9603325594,-0.2682967771], + [-0.2054556071,0.248215954,0.946666168] + ], + "t": [ + [-23.13649132], + [169.3490841], + [309.2380875] + ] + }, + { + "name": "02_04", + "type": "vga", + "resolution": [640,480], + "panel": 2, + "node": 4, + "K": [ + [746.11,0,381.584], + [0,746.321,224.917], + [0,0,1] + ], + "distCoef": [-0.323963,0.0585021,-0.000871966,0.000552522,0.0715102], + "R": [ + [0.979331342,0.07410153523,0.1881995881], + [-0.02608477747,0.9689731658,-0.2457856551], + [-0.2005734451,0.2357964511,0.950878713] + ], + "t": [ + [-32.63906075], + [150.8763932], + [306.9317958] + ] + }, + { + "name": "02_05", + "type": "vga", + "resolution": [640,480], + "panel": 2, + "node": 5, + "K": [ + [744.11,0,378.377], + [0,744.035,244.823], + [0,0,1] + ], + "distCoef": [-0.323078,0.0494134,-0.000238923,-0.000981516,0.0727453], + "R": [ + [0.9857440106,0.05652749171,0.1584720428], + [-0.01525193411,0.9680163878,-0.250422945], + [-0.1675593154,0.244435913,0.95507851] + ], + "t": [ + [-62.3494258], + [135.8190029], + [306.0165552] + ] + }, + { + "name": "02_06", + "type": "vga", + "resolution": [640,480], + "panel": 2, + "node": 6, + "K": [ + [743.928,0,352.844], + [0,744.181,228.627], + [0,0,1] + ], + "distCoef": [-0.303908,-0.0528673,-0.000528541,8.08764e-05,0.267531], + "R": [ + [0.9814194485,0.06212733968,0.1815380393], + [-0.0101664424,0.9616367605,-0.2741375282], + [-0.1916050874,0.2671983057,0.9444006332] + ], + "t": [ + [-53.86742917], + [106.6702196], + [310.2214119] + ] + }, + { + "name": "02_07", + "type": "vga", + "resolution": [640,480], + "panel": 2, + "node": 7, + "K": [ + [746.501,0,376.178], + [0,746.591,217.394], + [0,0,1] + ], + "distCoef": [-0.323449,0.0621904,-0.000592526,0.000355354,0.0689781], + "R": [ + [0.9775323693,0.09704954661,0.1871145437], + [-0.05094527723,0.9701636443,-0.2370381445], + [-0.2045361721,0.2221798567,0.9533105819] + ], + "t": [ + [-27.21830655], + [111.2122483], + [305.8578091] + ] + }, + { + "name": "02_08", + "type": "vga", + "resolution": [640,480], + "panel": 2, + "node": 8, + "K": [ + [747.056,0,346.722], + [0,747.425,231.954], + [0,0,1] + ], + "distCoef": [-0.331626,0.0978711,0.000923123,-0.00170198,0.0128988], + "R": [ + [0.9738310577,0.04398424166,0.222976361], + [0.006459505741,0.9753414162,-0.2206068824], + [-0.2271813062,0.2162741507,0.9495336465] + ], + "t": [ + [-23.1615402], + [89.62617671], + [306.715437] + ] + }, + { + "name": "02_09", + "type": "vga", + "resolution": [640,480], + "panel": 2, + "node": 9, + "K": [ + [746.084,0,344.827], + [0,746.456,222.936], + [0,0,1] + ], + "distCoef": [-0.31385,0.00765504,0.000335804,0.000338293,0.157318], + "R": [ + [0.9708044988,0.02558390192,0.2385038556], + [0.01777728087,0.9838878899,-0.1779005014], + [-0.2392124442,0.1769465571,0.9547079776] + ], + "t": [ + [-1.622489705], + [92.86686988], + [302.6276511] + ] + }, + { + "name": "02_10", + "type": "vga", + "resolution": [640,480], + "panel": 2, + "node": 10, + "K": [ + [743.875,0,345.16], + [0,744.131,231.932], + [0,0,1] + ], + "distCoef": [-0.309364,-0.0158069,0.000435688,-0.000318284,0.167974], + "R": [ + [0.9837217555,0.04774800386,0.1732386674], + [-0.008457215477,0.9752859506,-0.220784488], + [-0.179499257,0.2157253874,0.9598138226] + ], + "t": [ + [0.6070589451], + [94.58504844], + [305.3954199] + ] + }, + { + "name": "02_11", + "type": "vga", + "resolution": [640,480], + "panel": 2, + "node": 11, + "K": [ + [748.642,0,372.727], + [0,749.029,221.349], + [0,0,1] + ], + "distCoef": [-0.329743,0.0894243,0.000705225,0.000452301,0.0255748], + "R": [ + [0.9762818677,-0.03993432779,0.2127885436], + [0.08495434643,0.9746762651,-0.20685487], + [-0.1991393328,0.2200259705,0.9549513592] + ], + "t": [ + [18.17502224], + [86.30258496], + [305.899008] + ] + }, + { + "name": "02_12", + "type": "vga", + "resolution": [640,480], + "panel": 2, + "node": 12, + "K": [ + [746.297,0,386.393], + [0,746.341,223.432], + [0,0,1] + ], + "distCoef": [-0.329805,0.088881,-0.000101498,-0.000342857,0.0238941], + "R": [ + [0.9769251111,-0.05225372472,0.2070914666], + [0.09392861168,0.9759243238,-0.1968479875], + [-0.1918195589,0.211757556,0.9583130982] + ], + "t": [ + [31.97904484], + [101.8192368], + [305.2554798] + ] + }, + { + "name": "02_13", + "type": "vga", + "resolution": [640,480], + "panel": 2, + "node": 13, + "K": [ + [746.887,0,386.903], + [0,746.77,241.912], + [0,0,1] + ], + "distCoef": [-0.330222,0.0894843,0.000608161,-0.000202457,0.0188277], + "R": [ + [0.9805035597,0.07291108666,0.1824739514], + [-0.03359954242,0.9771464723,-0.2098948364], + [-0.1936074385,0.199671593,0.9605453736] + ], + "t": [ + [39.8755561], + [121.0360498], + [302.8306622] + ] + }, + { + "name": "02_14", + "type": "vga", + "resolution": [640,480], + "panel": 2, + "node": 14, + "K": [ + [745.399,0,359.381], + [0,745.103,221.453], + [0,0,1] + ], + "distCoef": [-0.32351,0.0564367,0.000553752,0.000358328,0.0789504], + "R": [ + [0.9639890244,-0.01369700088,0.2655890681], + [0.06651808592,0.9793475216,-0.1909287203], + [-0.2574888447,0.2017196672,0.9449913601] + ], + "t": [ + [64.66924198], + [136.2834945], + [299.1868513] + ] + }, + { + "name": "02_15", + "type": "vga", + "resolution": [640,480], + "panel": 2, + "node": 15, + "K": [ + [746.343,0,376.035], + [0,746.136,233.449], + [0,0,1] + ], + "distCoef": [-0.332319,0.10939,0.000552685,0.00121175,-0.00685584], + "R": [ + [0.9739293667,-0.02993852249,0.2248672353], + [0.07982373372,0.9730868608,-0.2161715356], + [-0.2123434957,0.2284855491,0.9501076748] + ], + "t": [ + [41.67937397], + [146.9667487], + [305.3208703] + ] + }, + { + "name": "02_16", + "type": "vga", + "resolution": [640,480], + "panel": 2, + "node": 16, + "K": [ + [747.983,0,369.069], + [0,747.865,212.357], + [0,0,1] + ], + "distCoef": [-0.333814,0.119177,-0.00123283,0.000206724,-0.0313224], + "R": [ + [0.9828420813,0.01261378295,0.1840172159], + [0.03080156014,0.9724259604,-0.2311688027], + [-0.181859031,0.2328704445,0.9553526307] + ], + "t": [ + [22.33056427], + [154.6384713], + [307.0242051] + ] + }, + { + "name": "02_17", + "type": "vga", + "resolution": [640,480], + "panel": 2, + "node": 17, + "K": [ + [743.255,0,372.405], + [0,743.629,259.514], + [0,0,1] + ], + "distCoef": [-0.301911,-0.0577323,-0.000292445,-0.000537705,0.240913], + "R": [ + [0.9702237144,0.05425789408,0.2360551311], + [-0.004184220731,0.978195713,-0.2076430576], + [-0.2421743923,0.2004725119,0.9492957051] + ], + "t": [ + [39.95715372], + [182.9757461], + [299.4720725] + ] + }, + { + "name": "02_18", + "type": "vga", + "resolution": [640,480], + "panel": 2, + "node": 18, + "K": [ + [746.171,0,380.016], + [0,746.628,215.7], + [0,0,1] + ], + "distCoef": [-0.310416,0.0111871,-0.00156578,-0.000885002,0.110566], + "R": [ + [0.9751942313,0.01121985931,0.2210663386], + [0.02134458651,0.9892938663,-0.1443677759], + [-0.220319359,0.1455051918,0.9645141882] + ], + "t": [ + [9.159436194], + [213.6293599], + [288.3403437] + ] + }, + { + "name": "02_19", + "type": "vga", + "resolution": [640,480], + "panel": 2, + "node": 19, + "K": [ + [745.09,0,380.114], + [0,745.176,232.983], + [0,0,1] + ], + "distCoef": [-0.31746,0.043353,-0.000108725,0.000220738,0.0862213], + "R": [ + [0.9809185988,0.05584586521,0.1862255137], + [-0.01423917048,0.975920974,-0.2176591338], + [-0.1938967473,0.2108541957,0.9580942331] + ], + "t": [ + [-1.989355998], + [159.4183424], + [303.0216832] + ] + }, + { + "name": "02_20", + "type": "vga", + "resolution": [640,480], + "panel": 2, + "node": 20, + "K": [ + [746.359,0,393.165], + [0,746.438,228.007], + [0,0,1] + ], + "distCoef": [-0.32236,0.0673245,-0.000115957,0.00130444,0.0588071], + "R": [ + [0.9826018096,0.03015545669,0.1832602856], + [0.01576123022,0.9696317731,-0.2440610748], + [-0.1850547688,0.2427032613,0.9522866477] + ], + "t": [ + [-25.36954265], + [136.7143691], + [307.7149997] + ] + }, + { + "name": "02_21", + "type": "vga", + "resolution": [640,480], + "panel": 2, + "node": 21, + "K": [ + [747.137,0,358.509], + [0,747.202,238.678], + [0,0,1] + ], + "distCoef": [-0.327929,0.0852816,0.000460613,0.000357406,0.0365027], + "R": [ + [0.9780966382,0.08951991601,0.1879179366], + [-0.04045439222,0.9673344336,-0.2502549415], + [-0.2041822921,0.2371714111,0.9497680314] + ], + "t": [ + [-10.00427836], + [118.005594], + [307.3165834] + ] + }, + { + "name": "02_22", + "type": "vga", + "resolution": [640,480], + "panel": 2, + "node": 22, + "K": [ + [745.847,0,374.568], + [0,746.074,247.807], + [0,0,1] + ], + "distCoef": [-0.32052,0.063252,0.000743322,-0.000945252,0.0534877], + "R": [ + [0.9839840132,0.07804627455,0.160263036], + [-0.03749054936,0.9695570383,-0.2419785283], + [-0.1742696772,0.2320946541,0.9569546233] + ], + "t": [ + [-1.458572059], + [110.2636917], + [306.6072245] + ] + }, + { + "name": "02_23", + "type": "vga", + "resolution": [640,480], + "panel": 2, + "node": 23, + "K": [ + [744.851,0,375.128], + [0,744.899,236.672], + [0,0,1] + ], + "distCoef": [-0.328747,0.0731957,0.000409854,0.000115616,0.0573405], + "R": [ + [0.9798731388,0.006836815724,0.1995041098], + [0.04188111895,0.9701291749,-0.2389463451], + [-0.1951783896,0.2424925605,0.9503171862] + ], + "t": [ + [13.92766978], + [118.8861106], + [308.0337581] + ] + }, + { + "name": "02_24", + "type": "vga", + "resolution": [640,480], + "panel": 2, + "node": 24, + "K": [ + [748.108,0,365.63], + [0,748.409,236.546], + [0,0,1] + ], + "distCoef": [-0.337502,0.145226,-9.99404e-05,-0.000712599,-0.0768278], + "R": [ + [0.9858983234,-0.01937546959,0.166219996], + [0.057736328,0.9716683618,-0.2291879382], + [-0.1570700873,0.2355529362,0.9590848773] + ], + "t": [ + [-5.69779309], + [141.0775615], + [307.1963385] + ] + }, + { + "name": "03_01", + "type": "vga", + "resolution": [640,480], + "panel": 3, + "node": 1, + "K": [ + [745.205,0,364.445], + [0,745.671,223.278], + [0,0,1] + ], + "distCoef": [-0.321278,0.0550501,-0.000663141,0.000431329,0.0680735], + "R": [ + [0.789168654,0.1464091436,-0.5964706181], + [-0.3274382264,0.921936374,-0.2069239719], + [0.5196123973,0.3586051937,0.7755032377] + ], + "t": [ + [-15.48720347], + [106.8731646], + [321.197831] + ] + }, + { + "name": "03_02", + "type": "vga", + "resolution": [640,480], + "panel": 3, + "node": 2, + "K": [ + [746.402,0,367.989], + [0,746.656,218.884], + [0,0,1] + ], + "distCoef": [-0.319108,0.0415571,-0.000289565,0.00121415,0.0978966], + "R": [ + [0.7844411333,0.123213727,-0.6078408392], + [-0.3461950886,0.9001611021,-0.2643084389], + [0.5145882519,0.4177659246,0.7487793823] + ], + "t": [ + [-25.69855827], + [65.19717944], + [326.035328] + ] + }, + { + "name": "03_03", + "type": "vga", + "resolution": [640,480], + "panel": 3, + "node": 3, + "K": [ + [747.999,0,350.415], + [0,748.222,213.374], + [0,0,1] + ], + "distCoef": [-0.322361,0.0444301,-0.000132478,-4.14576e-05,0.110213], + "R": [ + [0.8075592295,0.0617799019,-0.5865418439], + [-0.2672496857,0.9248714179,-0.2705373648], + [0.525762015,0.3752280693,0.763399109] + ], + "t": [ + [-8.799326732], + [72.40249706], + [323.1224723] + ] + }, + { + "name": "03_04", + "type": "vga", + "resolution": [640,480], + "panel": 3, + "node": 4, + "K": [ + [744.819,0,376.394], + [0,744.912,212.894], + [0,0,1] + ], + "distCoef": [-0.335892,0.121706,-0.00015411,0.0017688,-0.0013985], + "R": [ + [0.8410364559,-0.03582960221,-0.5397906256], + [-0.192384631,0.9127679401,-0.3603371217], + [0.5056143132,0.4069040761,0.7607780486] + ], + "t": [ + [3.728898504], + [75.32503712], + [325.8417248] + ] + }, + { + "name": "03_05", + "type": "vga", + "resolution": [640,480], + "panel": 3, + "node": 5, + "K": [ + [746.446,0,376.523], + [0,746.682,251.012], + [0,0,1] + ], + "distCoef": [-0.330943,0.0996499,0.00144142,-0.000113946,0.0131394], + "R": [ + [0.8610606531,-0.05437396314,-0.5055868113], + [-0.176556083,0.9004429458,-0.3975304402], + [0.4768673833,0.4315622475,0.7657359371] + ], + "t": [ + [31.93527518], + [62.43528973], + [326.764058] + ] + }, + { + "name": "03_06", + "type": "vga", + "resolution": [640,480], + "panel": 3, + "node": 6, + "K": [ + [744.998,0,378.484], + [0,744.973,240.788], + [0,0,1] + ], + "distCoef": [-0.31652,0.0338012,-0.0010118,-0.000122735,0.0959735], + "R": [ + [0.8769583834,-0.06555368648,-0.4760742674], + [-0.1128149484,0.9348860407,-0.3365425358], + [0.4671367907,0.348842092,0.8124607151] + ], + "t": [ + [52.69213606], + [109.2131316], + [317.2562433] + ] + }, + { + "name": "03_07", + "type": "vga", + "resolution": [640,480], + "panel": 3, + "node": 7, + "K": [ + [744.942,0,394.454], + [0,745.513,230.902], + [0,0,1] + ], + "distCoef": [-0.322593,0.0669124,0.000685625,0.000650135,0.0435827], + "R": [ + [0.8511772215,-0.03734239681,-0.5235483579], + [-0.1521244983,0.9371023984,-0.3141611561], + [0.5023499524,0.3470513512,0.7919595223] + ], + "t": [ + [39.57000229], + [127.8421428], + [318.5564893] + ] + }, + { + "name": "03_08", + "type": "vga", + "resolution": [640,480], + "panel": 3, + "node": 8, + "K": [ + [744.592,0,375.596], + [0,744.695,234.586], + [0,0,1] + ], + "distCoef": [-0.314208,0.0115966,-0.0002404,-0.00129875,0.131833], + "R": [ + [0.863242284,-0.08735605341,-0.4971736911], + [-0.1241310572,0.9179337282,-0.3768144785], + [0.4892895255,0.386996887,0.7815556088] + ], + "t": [ + [48.3076273], + [133.8669044], + [323.1008342] + ] + }, + { + "name": "03_09", + "type": "vga", + "resolution": [640,480], + "panel": 3, + "node": 9, + "K": [ + [746.083,0,388.49], + [0,746.196,219.485], + [0,0,1] + ], + "distCoef": [-0.327776,0.0952708,0.000477894,0.00116098,0.0130168], + "R": [ + [0.8627791791,-0.162720556,-0.478679547], + [-0.06768333431,0.9010943873,-0.4283081501], + [0.5010299935,0.401933982,0.766432006] + ], + "t": [ + [23.91664651], + [150.3571005], + [326.7446808] + ] + }, + { + "name": "03_10", + "type": "vga", + "resolution": [640,480], + "panel": 3, + "node": 10, + "K": [ + [744.984,0,374.291], + [0,745.244,231.69], + [0,0,1] + ], + "distCoef": [-0.317288,0.0201616,0.000340337,0.000302133,0.135473], + "R": [ + [0.8433461687,-0.104156761,-0.5271798639], + [-0.1611508321,0.8868626272,-0.433018579], + [0.5126379318,0.4501400333,0.7311472501] + ], + "t": [ + [5.809004706], + [133.1751931], + [335.4888131] + ] + }, + { + "name": "03_11", + "type": "vga", + "resolution": [640,480], + "panel": 3, + "node": 11, + "K": [ + [746.325,0,369.755], + [0,746.606,238.315], + [0,0,1] + ], + "distCoef": [-0.330117,0.107892,0.000853042,-0.00148033,-0.0192727], + "R": [ + [0.8487877999,-0.06352852013,-0.5249032272], + [-0.1660312052,0.9105147821,-0.3786772643], + [0.5019889537,0.4085669574,0.7622861219] + ], + "t": [ + [10.90299391], + [168.9126588], + [328.8547345] + ] + }, + { + "name": "03_12", + "type": "vga", + "resolution": [640,480], + "panel": 3, + "node": 12, + "K": [ + [745.397,0,373.191], + [0,745.394,241.989], + [0,0,1] + ], + "distCoef": [-0.315431,0.0239438,0.00152043,8.78247e-05,0.132462], + "R": [ + [0.7899500519,0.01447673769,-0.613000277], + [-0.2772192125,0.9001468868,-0.3359837649], + [0.5469263421,0.4353458466,0.7150843098] + ], + "t": [ + [-11.01289772], + [165.4412244], + [333.9391633] + ] + }, + { + "name": "03_13", + "type": "vga", + "resolution": [640,480], + "panel": 3, + "node": 13, + "K": [ + [746.289,0,356.696], + [0,746.559,221.83], + [0,0,1] + ], + "distCoef": [-0.307674,-0.0320128,-0.000713248,-0.000212304,0.187939], + "R": [ + [0.7812025858,0.003231301473,-0.6242692358], + [-0.256925784,0.9130359895,-0.316787663], + [0.5689566429,0.4078662043,0.7140962805] + ], + "t": [ + [-30.04397497], + [158.6113997], + [327.0561852] + ] + }, + { + "name": "03_14", + "type": "vga", + "resolution": [640,480], + "panel": 3, + "node": 14, + "K": [ + [744.216,0,367.374], + [0,744.503,234.384], + [0,0,1] + ], + "distCoef": [-0.313106,0.0107213,0.00051099,0.000391129,0.137335], + "R": [ + [0.7647493291,0.08765142393,-0.6383382266], + [-0.3090501184,0.9192036391,-0.2440342068], + [0.5653728752,0.3839035005,0.7300490493] + ], + "t": [ + [-30.23656889], + [178.7825502], + [321.7207122] + ] + }, + { + "name": "03_15", + "type": "vga", + "resolution": [640,480], + "panel": 3, + "node": 15, + "K": [ + [747.827,0,380.852], + [0,747.806,237.021], + [0,0,1] + ], + "distCoef": [-0.329904,0.102056,0.000500868,0.000776535,0.0163276], + "R": [ + [0.8420936086,0.09442452017,-0.5310012847], + [-0.2692856411,0.9266613257,-0.2622670985], + [0.4672939095,0.3638444688,0.8057627471] + ], + "t": [ + [-9.683781844], + [164.2881649], + [322.7392687] + ] + }, + { + "name": "03_16", + "type": "vga", + "resolution": [640,480], + "panel": 3, + "node": 16, + "K": [ + [745.289,0,371.652], + [0,745.447,216.538], + [0,0,1] + ], + "distCoef": [-0.317152,0.0301694,-0.000847782,0.000226416,0.100881], + "R": [ + [0.7751085928,0.08020770062,-0.6267163586], + [-0.2817854267,0.9316829094,-0.2292682483], + [0.5655118413,0.3543073259,0.74475679] + ], + "t": [ + [-42.18053512], + [150.9579844], + [316.9204289] + ] + }, + { + "name": "03_17", + "type": "vga", + "resolution": [640,480], + "panel": 3, + "node": 17, + "K": [ + [744.591,0,386.471], + [0,744.601,243.766], + [0,0,1] + ], + "distCoef": [-0.308716,-0.020066,-0.000742984,7.36231e-05,0.18193], + "R": [ + [0.8000888793,0.13985822,-0.5833502066], + [-0.3086873752,0.9298003917,-0.2004578159], + [0.5143635773,0.3404569133,0.7870954202] + ], + "t": [ + [-29.24407076], + [139.76037], + [318.5389184] + ] + }, + { + "name": "03_18", + "type": "vga", + "resolution": [640,480], + "panel": 3, + "node": 18, + "K": [ + [747.091,0,388.41], + [0,747.213,245.147], + [0,0,1] + ], + "distCoef": [-0.331947,0.109947,-0.00018029,-0.000335458,-0.0100282], + "R": [ + [0.7812031275,0.143907843,-0.6074637489], + [-0.3493109676,0.9072427652,-0.2342912992], + [0.5174007358,0.3952228456,0.7590094735] + ], + "t": [ + [-39.38157975], + [101.9329028], + [324.6812046] + ] + }, + { + "name": "03_19", + "type": "vga", + "resolution": [640,480], + "panel": 3, + "node": 19, + "K": [ + [743.815,0,380.782], + [0,743.921,233.579], + [0,0,1] + ], + "distCoef": [-0.31618,0.0384848,0.000240219,0.000426998,0.0977231], + "R": [ + [0.8097086682,0.09665101941,-0.578818152], + [-0.2718115959,0.9359285209,-0.2239559336], + [0.5200868476,0.3386685464,0.784100304] + ], + "t": [ + [-3.817362892], + [126.1763792], + [318.2990602] + ] + }, + { + "name": "03_20", + "type": "vga", + "resolution": [640,480], + "panel": 3, + "node": 20, + "K": [ + [746.163,0,356.033], + [0,746.281,215.327], + [0,0,1] + ], + "distCoef": [-0.323416,0.0556958,5.62358e-06,-0.000684023,0.0815018], + "R": [ + [0.8690981447,0.003405692177,-0.4946279574], + [-0.1831744592,0.9310985933,-0.3154402114], + [0.4594731031,0.3647517111,0.8098398958] + ], + "t": [ + [22.15812523], + [111.197586], + [320.9871724] + ] + }, + { + "name": "03_21", + "type": "vga", + "resolution": [640,480], + "panel": 3, + "node": 21, + "K": [ + [745.277,0,370.698], + [0,745.633,251.594], + [0,0,1] + ], + "distCoef": [-0.309423,-0.0154759,-0.000871178,-0.000110471,0.185828], + "R": [ + [0.8519925598,-0.01534543221,-0.5233289556], + [-0.157671027,0.9456449668,-0.2844212441], + [0.4992479597,0.3248385977,0.8032629458] + ], + "t": [ + [23.66925749], + [140.0971121], + [315.3107012] + ] + }, + { + "name": "03_22", + "type": "vga", + "resolution": [640,480], + "panel": 3, + "node": 22, + "K": [ + [749.812,0,361.025], + [0,750.052,224.033], + [0,0,1] + ], + "distCoef": [-0.333335,0.0892582,3.32371e-05,-0.00136116,0.0353235], + "R": [ + [0.8242021998,-0.0118106517,-0.5661724493], + [-0.2609232338,0.8794144434,-0.3981824994], + [0.5026030242,0.4759104383,0.7217336453] + ], + "t": [ + [6.739100305], + [105.8858326], + [336.9710973] + ] + }, + { + "name": "03_23", + "type": "vga", + "resolution": [640,480], + "panel": 3, + "node": 23, + "K": [ + [744.781,0,365.976], + [0,744.836,235.682], + [0,0,1] + ], + "distCoef": [-0.319452,0.032528,0.000754874,-0.000913445,0.102166], + "R": [ + [0.8233335342,0.02583843362,-0.5669693703], + [-0.2570181529,0.9076367155,-0.3318693443], + [0.506027233,0.4189605805,0.7539286912] + ], + "t": [ + [-4.103462359], + [133.5127669], + [329.5726238] + ] + }, + { + "name": "03_24", + "type": "vga", + "resolution": [640,480], + "panel": 3, + "node": 24, + "K": [ + [746.135,0,373.553], + [0,746.515,225.298], + [0,0,1] + ], + "distCoef": [-0.323756,0.0623909,2.70614e-05,0.000962707,0.0761173], + "R": [ + [0.8557458945,0.0294251088,-0.5165589289], + [-0.2234217673,0.921515875,-0.3176337608], + [0.4666708454,0.3872242956,0.7951576366] + ], + "t": [ + [-1.49693002], + [128.5290469], + [325.1203285] + ] + }, + { + "name": "04_01", + "type": "vga", + "resolution": [640,480], + "panel": 4, + "node": 1, + "K": [ + [745.756,0,368.953], + [0,745.945,245.188], + [0,0,1] + ], + "distCoef": [-0.3245,0.0724334,-0.000312337,0.000678015,0.0415529], + "R": [ + [0.04501388353,-0.06073969189,-0.9971381249], + [-0.08162898106,0.9945884367,-0.06426936354], + [0.9956457501,0.08428838276,0.03981216889] + ], + "t": [ + [-59.71104012], + [137.3658878], + [280.4259077] + ] + }, + { + "name": "04_02", + "type": "vga", + "resolution": [640,480], + "panel": 4, + "node": 2, + "K": [ + [745.144,0,382.474], + [0,745.286,222.525], + [0,0,1] + ], + "distCoef": [-0.322843,0.0690658,-0.000684608,-0.000275864,0.0370253], + "R": [ + [0.1096717734,-0.01795980665,-0.9938055884], + [-0.007042199406,0.9997976117,-0.01884523745], + [0.9939429106,0.009065367736,0.1095231006] + ], + "t": [ + [-53.83503278], + [149.6185443], + [272.7820927] + ] + }, + { + "name": "04_03", + "type": "vga", + "resolution": [640,480], + "panel": 4, + "node": 3, + "K": [ + [742.832,0,377.499], + [0,742.665,258.984], + [0,0,1] + ], + "distCoef": [-0.312355,-0.00257413,0.000454129,0.00111055,0.151137], + "R": [ + [0.07040546321,0.04162572676,-0.9966495721], + [-0.08610880414,0.9956530214,0.03550119457], + [0.9937949208,0.08332082476,0.07368375372] + ], + "t": [ + [-50.21742462], + [111.4103034], + [280.5940976] + ] + }, + { + "name": "04_04", + "type": "vga", + "resolution": [640,480], + "panel": 4, + "node": 4, + "K": [ + [743.339,0,393.561], + [0,743.571,223.626], + [0,0,1] + ], + "distCoef": [-0.307228,-0.0295629,-0.000661125,6.4492e-05,0.183577], + "R": [ + [0.09450112049,0.05679880598,-0.993903131], + [-0.03670643306,0.9978910099,0.05353662459], + [0.9948478155,0.03142336774,0.09638670013] + ], + "t": [ + [-21.9069], + [118.1273376], + [275.8163164] + ] + }, + { + "name": "04_05", + "type": "vga", + "resolution": [640,480], + "panel": 4, + "node": 5, + "K": [ + [746.019,0,364.58], + [0,746.273,258.887], + [0,0,1] + ], + "distCoef": [-0.327759,0.0738839,0.000801649,0.000211169,0.0604088], + "R": [ + [0.135847977,0.01131634816,-0.9906650632], + [-0.049797809,0.9987488181,0.004580011864], + [0.98947739,0.04871076425,0.1362415358] + ], + "t": [ + [-12.12624478], + [90.71810202], + [278.5550143] + ] + }, + { + "name": "04_06", + "type": "vga", + "resolution": [640,480], + "panel": 4, + "node": 6, + "K": [ + [745.588,0,362.328], + [0,745.695,224.495], + [0,0,1] + ], + "distCoef": [-0.317313,0.0342325,-0.00011624,0.00140051,0.0955503], + "R": [ + [0.09768474559,0.09486669264,-0.9906856217], + [-0.08671696061,0.9924717325,0.0864871607], + [0.9914322262,0.07746076975,0.1051758999] + ], + "t": [ + [6.120914551], + [75.66522558], + [280.1538331] + ] + }, + { + "name": "04_07", + "type": "vga", + "resolution": [640,480], + "panel": 4, + "node": 7, + "K": [ + [744.949,0,374.902], + [0,744.948,218.152], + [0,0,1] + ], + "distCoef": [-0.307279,-0.0368619,-0.000928182,-0.000206153,0.214368], + "R": [ + [0.08413477249,-0.05845821559,-0.994738145], + [-0.03729096802,0.9973936317,-0.06176833509], + [0.9957563576,0.04229161317,0.08173552284] + ], + "t": [ + [3.352563309], + [99.7043349], + [277.3248716] + ] + }, + { + "name": "04_08", + "type": "vga", + "resolution": [640,480], + "panel": 4, + "node": 8, + "K": [ + [744.851,0,365.832], + [0,744.82,236.655], + [0,0,1] + ], + "distCoef": [-0.313642,0.00106915,0.000461187,-0.00049658,0.163492], + "R": [ + [0.1068294918,-0.02053293437,-0.9940653189], + [-0.04471775106,0.998675844,-0.02543386204], + [0.9932712532,0.04716945203,0.1057698462] + ], + "t": [ + [34.88142403], + [92.93282517], + [277.1804593] + ] + }, + { + "name": "04_09", + "type": "vga", + "resolution": [640,480], + "panel": 4, + "node": 9, + "K": [ + [745.947,0,354.92], + [0,745.962,217.292], + [0,0,1] + ], + "distCoef": [-0.332252,0.114802,-0.000779302,-0.000175195,-0.0220414], + "R": [ + [0.0951039165,0.01286389124,-0.99538423], + [-0.04378002227,0.9990030715,0.008727700331], + [0.9945041753,0.04274790527,0.09557228614] + ], + "t": [ + [51.3876018], + [107.4685168], + [276.8925649] + ] + }, + { + "name": "04_10", + "type": "vga", + "resolution": [640,480], + "panel": 4, + "node": 10, + "K": [ + [743.419,0,373.623], + [0,743.493,209.714], + [0,0,1] + ], + "distCoef": [-0.312784,-0.00205334,-0.00151839,-4.48796e-05,0.146707], + "R": [ + [0.07554192003,-0.02015366607,-0.996938939], + [-0.05402378201,0.9982445697,-0.02427365106], + [0.9956780852,0.05569209012,0.07432053419] + ], + "t": [ + [36.95032578], + [126.4783785], + [278.9862968] + ] + }, + { + "name": "04_11", + "type": "vga", + "resolution": [640,480], + "panel": 4, + "node": 11, + "K": [ + [743.168,0,378.723], + [0,743.196,231.359], + [0,0,1] + ], + "distCoef": [-0.312654,0.00616666,0.000125459,-0.000163635,0.137741], + "R": [ + [0.104627794,-0.01026277171,-0.994458496], + [-0.05855646041,0.9981483637,-0.01646162423], + [0.9927860624,0.05995431298,0.1038331098] + ], + "t": [ + [61.78762978], + [139.882294], + [278.0088471] + ] + }, + { + "name": "04_12", + "type": "vga", + "resolution": [640,480], + "panel": 4, + "node": 12, + "K": [ + [746.755,0,377.564], + [0,747.014,231.526], + [0,0,1] + ], + "distCoef": [-0.342661,0.169314,0.000669193,0.000564241,-0.092518], + "R": [ + [0.09069981891,0.03748374052,-0.9951726041], + [-0.02832816732,0.9989841486,0.03504548138], + [0.9954752924,0.02501279723,0.09166952704] + ], + "t": [ + [63.18640006], + [168.1511303], + [272.7093484] + ] + }, + { + "name": "04_13", + "type": "vga", + "resolution": [640,480], + "panel": 4, + "node": 13, + "K": [ + [745.766,0,371.377], + [0,745.897,229.211], + [0,0,1] + ], + "distCoef": [-0.323265,0.06437,0.000357726,0.000480753,0.061899], + "R": [ + [0.03414536791,0.03842962758,-0.9986777546], + [-0.02717943982,0.9989265658,0.03750992125], + [0.9990472321,0.02586271187,0.03515321085] + ], + "t": [ + [27.04698548], + [171.5967975], + [274.5649723] + ] + }, + { + "name": "04_14", + "type": "vga", + "resolution": [640,480], + "panel": 4, + "node": 14, + "K": [ + [744.965,0,366.266], + [0,745.319,235.632], + [0,0,1] + ], + "distCoef": [-0.317134,0.0349168,5.85303e-05,0.000379707,0.110605], + "R": [ + [0.05221731101,0.04748668842,-0.9975060736], + [0.03426805086,0.9981953182,0.04931335942], + [0.9980476207,-0.03675759989,0.05049579913] + ], + "t": [ + [31.93275734], + [208.7852536], + [260.7309393] + ] + }, + { + "name": "04_15", + "type": "vga", + "resolution": [640,480], + "panel": 4, + "node": 15, + "K": [ + [744.586,0,371.051], + [0,745.106,212.085], + [0,0,1] + ], + "distCoef": [-0.332822,0.11382,-0.000911903,0.000640183,-0.00904196], + "R": [ + [0.0693166226,0.04834029473,-0.9964228127], + [-0.01396942206,0.9987743784,0.04748258878], + [0.9974968978,0.01062811814,0.06990695264] + ], + "t": [ + [16.12425569], + [198.357827], + [269.7404532] + ] + }, + { + "name": "04_16", + "type": "vga", + "resolution": [640,480], + "panel": 4, + "node": 16, + "K": [ + [742.58,0,362.432], + [0,742.717,222.722], + [0,0,1] + ], + "distCoef": [-0.316061,0.0181932,0.000637155,-0.000119442,0.122715], + "R": [ + [0.07545496093,-0.0349426896,-0.9965367817], + [-0.03652359913,0.9986183515,-0.03778114217], + [0.9964800929,0.03924788454,0.07407447592] + ], + "t": [ + [-15.86676392], + [179.6369531], + [275.0674259] + ] + }, + { + "name": "04_17", + "type": "vga", + "resolution": [640,480], + "panel": 4, + "node": 17, + "K": [ + [745.044,0,350.241], + [0,745.211,214.104], + [0,0,1] + ], + "distCoef": [-0.330556,0.0995367,-0.000406045,-3.83783e-05,-0.00374247], + "R": [ + [0.0837025501,0.02221656332,-0.9962430965], + [-0.04478154079,0.9988252756,0.01851168242], + [0.9954840515,0.04306382584,0.08459911461] + ], + "t": [ + [-23.0620205], + [182.4550181], + [276.0013748] + ] + }, + { + "name": "04_18", + "type": "vga", + "resolution": [640,480], + "panel": 4, + "node": 18, + "K": [ + [747.543,0,399.307], + [0,747.43,229.515], + [0,0,1] + ], + "distCoef": [-0.337874,0.152604,0.000377489,0.002871,-0.0603327], + "R": [ + [0.03967719066,0.06607189882,-0.9970256891], + [-0.02383145062,0.9975901546,0.06516091958], + [0.998928317,0.02117516625,0.04115616396] + ], + "t": [ + [-45.47747339], + [181.8911988], + [269.8403328] + ] + }, + { + "name": "04_19", + "type": "vga", + "resolution": [640,480], + "panel": 4, + "node": 19, + "K": [ + [743.963,0,369.391], + [0,744.08,218.072], + [0,0,1] + ], + "distCoef": [-0.320196,0.0539371,0.000417857,0.00192962,0.0700112], + "R": [ + [0.0434323362,0.03783761887,-0.9983395949], + [-0.08481170801,0.9958149524,0.03405223652], + [0.9954499517,0.08319191804,0.04645964289] + ], + "t": [ + [-24.42650241], + [136.5925943], + [281.0885176] + ] + }, + { + "name": "04_20", + "type": "vga", + "resolution": [640,480], + "panel": 4, + "node": 20, + "K": [ + [745.858,0,356.253], + [0,746.045,207.418], + [0,0,1] + ], + "distCoef": [-0.328012,0.0801152,-7.74627e-05,-0.000454429,0.0269942], + "R": [ + [0.0976780849,0.06705669278,-0.9929563896], + [-0.1171365339,0.9915671608,0.05544004021], + [0.9883005738,0.1108961929,0.1047091699] + ], + "t": [ + [-1.775430866], + [107.2147587], + [285.054156] + ] + }, + { + "name": "04_21", + "type": "vga", + "resolution": [640,480], + "panel": 4, + "node": 21, + "K": [ + [746.156,0,369.678], + [0,746.129,226.325], + [0,0,1] + ], + "distCoef": [-0.331296,0.10434,-0.000526263,0.0017798,0.0107539], + "R": [ + [0.06864954522,0.009029787974,-0.9975999714], + [-0.09824772164,0.9951594531,0.00224680986], + [0.9927913301,0.09785768182,0.06920439997] + ], + "t": [ + [2.330018678], + [104.6606406], + [283.2576255] + ] + }, + { + "name": "04_22", + "type": "vga", + "resolution": [640,480], + "panel": 4, + "node": 22, + "K": [ + [746.305,0,363.016], + [0,746.511,222.294], + [0,0,1] + ], + "distCoef": [-0.313633,0.00103632,0.000318828,-0.000294887,0.154057], + "R": [ + [0.08441946195,-0.0784287402,-0.9933389588], + [-0.07957536672,0.9931828981,-0.08517917513], + [0.9932477614,0.08623609206,0.07760297012] + ], + "t": [ + [9.995164317], + [122.6888691], + [282.4272415] + ] + }, + { + "name": "04_23", + "type": "vga", + "resolution": [640,480], + "panel": 4, + "node": 23, + "K": [ + [745.178,0,358.539], + [0,745.299,233.674], + [0,0,1] + ], + "distCoef": [-0.315081,0.0210219,-6.99317e-06,-0.000330658,0.115227], + "R": [ + [0.1162513982,0.03935918122,-0.9924396542], + [-0.02556811677,0.999001962,0.03662446354], + [0.9928906706,0.02111716788,0.117141715] + ], + "t": [ + [32.91845612], + [159.7823772], + [272.1694603] + ] + }, + { + "name": "04_24", + "type": "vga", + "resolution": [640,480], + "panel": 4, + "node": 24, + "K": [ + [746.014,0,365.199], + [0,746.411,216.584], + [0,0,1] + ], + "distCoef": [-0.320661,0.0432533,-0.00136099,-0.000113861,0.0956118], + "R": [ + [0.1001711426,-0.0639180002,-0.9929150172], + [-0.0054812292,0.9978838124,-0.06479084071], + [0.9949551238,0.01193256733,0.09960881242] + ], + "t": [ + [-9.066812064], + [167.2144724], + [271.0944115] + ] + }, + { + "name": "05_01", + "type": "vga", + "resolution": [640,480], + "panel": 5, + "node": 1, + "K": [ + [744.506,0,379.212], + [0,745.093,221.816], + [0,0,1] + ], + "distCoef": [-0.322425,0.0503962,-0.00139268,-0.000488272,0.0792831], + "R": [ + [0.4832137358,-0.07031409603,-0.8726742883], + [-0.1214142278,0.9817563233,-0.14633218], + [0.8670427157,0.1766647942,0.465861009] + ], + "t": [ + [-31.81590772], + [187.5269902], + [291.8752718] + ] + }, + { + "name": "05_02", + "type": "vga", + "resolution": [640,480], + "panel": 5, + "node": 2, + "K": [ + [746.146,0,379.909], + [0,746.274,243.237], + [0,0,1] + ], + "distCoef": [-0.327102,0.0750235,0.00051439,0.000830868,0.0552106], + "R": [ + [0.559561068,-0.04316954181,-0.8276640634], + [-0.1711397799,0.9711012062,-0.1663539088], + [0.8109269924,0.2347314165,0.5360024022] + ], + "t": [ + [-21.47998338], + [182.028679], + [304.5116426] + ] + }, + { + "name": "05_03", + "type": "vga", + "resolution": [640,480], + "panel": 5, + "node": 3, + "K": [ + [746.598,0,366.137], + [0,746.916,245.497], + [0,0,1] + ], + "distCoef": [-0.34673,0.191883,-0.000717065,0.000142378,-0.151818], + "R": [ + [0.4493443217,0.06721032382,-0.8908268367], + [-0.2833621033,0.9563979118,-0.07077395533], + [0.8472281859,0.2842284411,0.4487968296] + ], + "t": [ + [-42.79170468], + [156.78227], + [309.5144468] + ] + }, + { + "name": "05_04", + "type": "vga", + "resolution": [640,480], + "panel": 5, + "node": 4, + "K": [ + [744.97,0,361.533], + [0,745.268,216.194], + [0,0,1] + ], + "distCoef": [-0.320215,0.0355127,-0.000935438,6.82351e-05,0.107335], + "R": [ + [0.5139859054,0.07264601249,-0.8547169391], + [-0.2477501277,0.96651576,-0.06683681477], + [0.8212419639,0.2461094116,0.5147735369] + ], + "t": [ + [-21.66847624], + [145.8563675], + [305.5618637] + ] + }, + { + "name": "05_05", + "type": "vga", + "resolution": [640,480], + "panel": 5, + "node": 5, + "K": [ + [743.904,0,367.466], + [0,744.108,216.808], + [0,0,1] + ], + "distCoef": [-0.328736,0.086922,-0.000934339,0.000214876,0.0243362], + "R": [ + [0.4889793362,0.07185582001,-0.8693307483], + [-0.2209595119,0.9743010874,-0.0437525441], + [0.8438460185,0.2134809878,0.4922903259] + ], + "t": [ + [-47.80972546], + [144.3254019], + [299.7644507] + ] + }, + { + "name": "05_06", + "type": "vga", + "resolution": [640,480], + "panel": 5, + "node": 6, + "K": [ + [745.323,0,383.952], + [0,745.526,234.808], + [0,0,1] + ], + "distCoef": [-0.334223,0.133657,-0.000107051,0.00148947,-0.0461754], + "R": [ + [0.4969854565,0.0559027949,-0.8659563116], + [-0.2018212488,0.978003949,-0.05269211703], + [0.8439630558,0.2009556001,0.4973361109] + ], + "t": [ + [-46.56558119], + [125.7186081], + [298.6423415] + ] + }, + { + "name": "05_07", + "type": "vga", + "resolution": [640,480], + "panel": 5, + "node": 7, + "K": [ + [746.158,0,356.674], + [0,746.317,240.893], + [0,0,1] + ], + "distCoef": [-0.334568,0.11153,0.000321304,-0.000871385,-0.0157856], + "R": [ + [0.5541201274,0.02610072644,-0.8320274253], + [-0.1769665492,0.9803549196,-0.08710380092], + [0.8134087072,0.1955069916,0.5478533484] + ], + "t": [ + [-14.70019562], + [115.5481293], + [299.4445791] + ] + }, + { + "name": "05_08", + "type": "vga", + "resolution": [640,480], + "panel": 5, + "node": 8, + "K": [ + [744.96,0,386.044], + [0,745.46,258.776], + [0,0,1] + ], + "distCoef": [-0.325919,0.068823,-0.000458274,0.000477805,0.0465958], + "R": [ + [0.4763065258,-0.004539644313,-0.8792675845], + [-0.1710253429,0.980409884,-0.09770768372], + [0.8624861886,0.1969158475,0.4661992314] + ], + "t": [ + [-40.46029545], + [93.91456762], + [297.4902987] + ] + }, + { + "name": "05_09", + "type": "vga", + "resolution": [640,480], + "panel": 5, + "node": 9, + "K": [ + [745.188,0,367.116], + [0,745.437,236.843], + [0,0,1] + ], + "distCoef": [-0.328194,0.058828,0.000388874,-0.00143808,0.0829656], + "R": [ + [0.5065601345,-0.04543027129,-0.8610069225], + [-0.1705921502,0.9735884993,-0.1517357977], + [0.845159836,0.2237443283,0.4854310735] + ], + "t": [ + [-16.55300824], + [76.93410209], + [300.8962768] + ] + }, + { + "name": "05_10", + "type": "vga", + "resolution": [640,480], + "panel": 5, + "node": 10, + "K": [ + [747.452,0,374.886], + [0,747.648,257.28], + [0,0,1] + ], + "distCoef": [-0.337728,0.123608,0.00138141,5.97732e-05,-0.0225942], + "R": [ + [0.4549222289,-0.02855444123,-0.8900732608], + [-0.1699899924,0.9783230281,-0.1182685721], + [0.8741562607,0.2051065493,0.4402069233] + ], + "t": [ + [-13.61854908], + [96.6157071], + [299.0141417] + ] + }, + { + "name": "05_11", + "type": "vga", + "resolution": [640,480], + "panel": 5, + "node": 11, + "K": [ + [746.39,0,405.604], + [0,746.458,241.87], + [0,0,1] + ], + "distCoef": [-0.333064,0.100943,0.000870611,0.00103156,0.0180409], + "R": [ + [0.5002384593,-0.05591048228,-0.8640807264], + [-0.1916757277,0.9660062257,-0.1734715752], + [0.8444062406,0.2524004556,0.4725167836] + ], + "t": [ + [16.55277765], + [75.44647006], + [303.7304898] + ] + }, + { + "name": "05_12", + "type": "vga", + "resolution": [640,480], + "panel": 5, + "node": 12, + "K": [ + [745.943,0,392.757], + [0,746.143,272.1], + [0,0,1] + ], + "distCoef": [-0.323245,0.0770562,0.00168738,0.000666505,0.0382015], + "R": [ + [0.5344619138,-0.0483612619,-0.8438078283], + [-0.2099054746,0.9594877737,-0.1879438847], + [0.818712498,0.277568731,0.5026583782] + ], + "t": [ + [45.5535171], + [81.37072912], + [304.8427161] + ] + }, + { + "name": "05_13", + "type": "vga", + "resolution": [640,480], + "panel": 5, + "node": 13, + "K": [ + [748.463,0,383.471], + [0,748.465,243.614], + [0,0,1] + ], + "distCoef": [-0.34071,0.149034,0.000455623,0.000254671,-0.0668973], + "R": [ + [0.550270912,-0.09726860505,-0.8293013577], + [-0.1127468592,0.975440235,-0.1892207537], + [0.82733915,0.1976238001,0.525789658] + ], + "t": [ + [34.15956958], + [127.9842494], + [295.9545727] + ] + }, + { + "name": "05_14", + "type": "vga", + "resolution": [640,480], + "panel": 5, + "node": 14, + "K": [ + [744.467,0,372.192], + [0,744.287,242.67], + [0,0,1] + ], + "distCoef": [-0.321164,0.0557106,-0.000170048,0.000249902,0.0584864], + "R": [ + [0.5607110475,-0.1151130063,-0.8199708025], + [-0.101866971,0.9731761842,-0.2062795062], + [0.8217215109,0.1991911399,0.5339444244] + ], + "t": [ + [50.41224037], + [142.3474205], + [294.74195] + ] + }, + { + "name": "05_15", + "type": "vga", + "resolution": [640,480], + "panel": 5, + "node": 15, + "K": [ + [746.542,0,352.38], + [0,746.666,240.759], + [0,0,1] + ], + "distCoef": [-0.327959,0.100036,-0.000636984,-0.00122606,-0.0366604], + "R": [ + [0.5029624145,-0.05772144518,-0.8623787128], + [-0.198700467,0.9633205664,-0.180365215], + [0.8411580909,0.262071977,0.4730447599] + ], + "t": [ + [34.04469815], + [136.31759], + [307.4406203] + ] + }, + { + "name": "05_16", + "type": "vga", + "resolution": [640,480], + "panel": 5, + "node": 16, + "K": [ + [747.042,0,371.719], + [0,747.231,244.896], + [0,0,1] + ], + "distCoef": [-0.323957,0.0675271,-0.000219383,0.00030566,0.0452733], + "R": [ + [0.5145114331,-0.105655334,-0.8509494319], + [-0.1209004538,0.9735279663,-0.1939752023], + [0.8489175846,0.2026826318,0.4881174913] + ], + "t": [ + [9.341169646], + [165.8735131], + [297.8569993] + ] + }, + { + "name": "05_17", + "type": "vga", + "resolution": [640,480], + "panel": 5, + "node": 17, + "K": [ + [745.814,0,386.675], + [0,746.085,252.153], + [0,0,1] + ], + "distCoef": [-0.320652,0.0597547,0.000647483,5.56623e-05,0.0523558], + "R": [ + [0.5123119379,-0.06682282728,-0.856195765], + [-0.1341513719,0.9785027468,-0.1566390244], + [0.8482569703,0.1951078787,0.4923342645] + ], + "t": [ + [9.076647729], + [186.6487394], + [296.0424945] + ] + }, + { + "name": "05_18", + "type": "vga", + "resolution": [640,480], + "panel": 5, + "node": 18, + "K": [ + [744.362,0,367.747], + [0,744.705,261.961], + [0,0,1] + ], + "distCoef": [-0.317525,0.0240072,0.000331,-0.000409781,0.122239], + "R": [ + [0.5214772573,-0.05602259067,-0.8514240656], + [-0.1526209796,0.9756261952,-0.1576716965], + [0.8395047985,0.2121673788,0.5002166498] + ], + "t": [ + [-2.829687906], + [192.8140289], + [298.6606918] + ] + }, + { + "name": "05_19", + "type": "vga", + "resolution": [640,480], + "panel": 5, + "node": 19, + "K": [ + [744.259,0,353.379], + [0,744.524,245.823], + [0,0,1] + ], + "distCoef": [-0.320328,0.0298824,0.00026675,-0.00161079,0.123162], + "R": [ + [0.5556726344,-0.05485450779,-0.8295896012], + [-0.2099711545,0.9562161648,-0.2038694692], + [0.8044501462,0.2874745713,0.519825291] + ], + "t": [ + [-1.476630227], + [134.2745178], + [310.4571486] + ] + }, + { + "name": "05_20", + "type": "vga", + "resolution": [640,480], + "panel": 5, + "node": 20, + "K": [ + [743.679,0,405.845], + [0,743.856,234.88], + [0,0,1] + ], + "distCoef": [-0.326644,0.0646831,0.000108119,5.73367e-05,0.058946], + "R": [ + [0.447769915,-0.01338423954,-0.894048637], + [-0.18660487,0.9764723016,-0.1080762074], + [0.8744602482,0.2152271039,0.4347373552] + ], + "t": [ + [-41.39083575], + [143.2049031], + [297.8732354] + ] + }, + { + "name": "05_21", + "type": "vga", + "resolution": [640,480], + "panel": 5, + "node": 21, + "K": [ + [746.956,0,354.763], + [0,747.081,232.068], + [0,0,1] + ], + "distCoef": [-0.333648,0.0797639,-0.000768992,-0.00091097,0.0508097], + "R": [ + [0.5053420531,-0.009379958189,-0.8628681393], + [-0.2526298673,0.9545207072,-0.1583299394], + [0.8251106347,0.2979970402,0.4799897963] + ], + "t": [ + [-19.66925616], + [96.29580053], + [309.4868577] + ] + }, + { + "name": "05_22", + "type": "vga", + "resolution": [640,480], + "panel": 5, + "node": 22, + "K": [ + [748.369,0,375.575], + [0,748.642,247.648], + [0,0,1] + ], + "distCoef": [-0.339087,0.143465,-0.000470446,0.00132222,-0.0624301], + "R": [ + [0.54260376,-0.05746408722,-0.8380209057], + [-0.1470082191,0.975763273,-0.1620944744], + [0.8270246327,0.2111490322,0.5210051277] + ], + "t": [ + [3.173863757], + [116.0988382], + [299.4207466] + ] + }, + { + "name": "05_23", + "type": "vga", + "resolution": [640,480], + "panel": 5, + "node": 23, + "K": [ + [744.544,0,368.615], + [0,744.426,281.181], + [0,0,1] + ], + "distCoef": [-0.322575,0.0664483,0.00114224,0.000391788,0.0483369], + "R": [ + [0.5347472888,-0.05715349527,-0.8430769924], + [-0.1466458645,0.9762943366,-0.1591991164], + [0.832190079,0.2087650503,0.5136894259] + ], + "t": [ + [16.7223507], + [130.5590862], + [298.5444367] + ] + }, + { + "name": "05_24", + "type": "vga", + "resolution": [640,480], + "panel": 5, + "node": 24, + "K": [ + [743.308,0,356.74], + [0,743.243,228.93], + [0,0,1] + ], + "distCoef": [-0.321093,0.0447792,0.000127467,-8.40104e-05,0.095825], + "R": [ + [0.5706235669,-0.133891243,-0.8102233519], + [-0.1678811389,0.9467635938,-0.2746900447], + [0.8038685639,0.2927658322,0.5177678046] + ], + "t": [ + [6.742844805], + [124.9131408], + [309.8640068] + ] + }, + { + "name": "06_01", + "type": "vga", + "resolution": [640,480], + "panel": 6, + "node": 1, + "K": [ + [744.518,0,344.042], + [0,744.512,240.289], + [0,0,1] + ], + "distCoef": [-0.313532,-0.0139368,0.00116047,-0.000125352,0.195046], + "R": [ + [-0.3305715804,0.1011846603,-0.9383411399], + [-0.314462461,0.9256148845,0.2105954561], + [0.8898515555,0.3646899369,-0.2741631979] + ], + "t": [ + [-23.56718534], + [104.1648487], + [320.754952] + ] + }, + { + "name": "06_02", + "type": "vga", + "resolution": [640,480], + "panel": 6, + "node": 2, + "K": [ + [748.956,0,345.566], + [0,748.875,227.82], + [0,0,1] + ], + "distCoef": [-0.335662,0.0955564,-6.0167e-05,-0.0012999,0.0278092], + "R": [ + [-0.2903396332,0.1603112194,-0.9433998147], + [-0.341086429,0.9037763758,0.2585504022], + [0.8940709957,0.3968483028,-0.2077221201] + ], + "t": [ + [-2.499901432], + [69.14355517], + [325.2941984] + ] + }, + { + "name": "06_03", + "type": "vga", + "resolution": [640,480], + "panel": 6, + "node": 3, + "K": [ + [743.901,0,369.68], + [0,743.816,251.042], + [0,0,1] + ], + "distCoef": [-0.320568,0.044977,0.000366128,-0.00033077,0.103335], + "R": [ + [-0.3123459653,0.110763308,-0.943488997], + [-0.3278062139,0.9196080197,0.216481353], + [0.891618239,0.3768986331,-0.250926954] + ], + "t": [ + [2.578346941], + [71.05917793], + [323.4074447] + ] + }, + { + "name": "06_04", + "type": "vga", + "resolution": [640,480], + "panel": 6, + "node": 4, + "K": [ + [745.814,0,378.476], + [0,745.908,222.393], + [0,0,1] + ], + "distCoef": [-0.316287,0.0251632,0.000357033,0.00145486,0.13215], + "R": [ + [-0.2756543214,0.09031338143,-0.9570048005], + [-0.3333214643,0.9248259371,0.1832860813], + [0.9016160472,0.3695138418,-0.2248288776] + ], + "t": [ + [26.15902854], + [86.10496093], + [322.4382284] + ] + }, + { + "name": "06_05", + "type": "vga", + "resolution": [640,480], + "panel": 6, + "node": 5, + "K": [ + [750.419,0,363.736], + [0,750.614,222.964], + [0,0,1] + ], + "distCoef": [-0.344753,0.14329,-0.000836382,-0.000451111,-0.060951], + "R": [ + [-0.2930259634,0.06094491301,-0.9541601031], + [-0.3875087878,0.9047544541,0.1767945619], + [0.8740553324,0.4215508218,-0.2414998562] + ], + "t": [ + [36.26889278], + [61.41890121], + [327.3260635] + ] + }, + { + "name": "06_06", + "type": "vga", + "resolution": [640,480], + "panel": 6, + "node": 6, + "K": [ + [747.394,0,354.724], + [0,747.506,211.184], + [0,0,1] + ], + "distCoef": [-0.329009,0.0921746,-0.00050966,0.000333806,0.021085], + "R": [ + [-0.2297156979,0.02557529828,-0.9729216835], + [-0.3964529538,0.9104994627,0.1175405629], + [0.888850805,0.4127185877,-0.199016617] + ], + "t": [ + [62.78312093], + [81.38139883], + [324.7093469] + ] + }, + { + "name": "06_07", + "type": "vga", + "resolution": [640,480], + "panel": 6, + "node": 7, + "K": [ + [746.623,0,374.989], + [0,746.758,209.923], + [0,0,1] + ], + "distCoef": [-0.319339,0.0433323,-0.00139256,0.000754597,0.0938733], + "R": [ + [-0.2846142448,0.03267216609,-0.9580852056], + [-0.3313740809,0.934457856,0.1303063082], + [0.8995476364,0.3545716359,-0.255133308] + ], + "t": [ + [45.81195811], + [121.7115234], + [320.8009986] + ] + }, + { + "name": "06_08", + "type": "vga", + "resolution": [640,480], + "panel": 6, + "node": 8, + "K": [ + [745.971,0,357.954], + [0,746.024,209.947], + [0,0,1] + ], + "distCoef": [-0.314348,0.0246684,-0.0014997,0.000635776,0.111152], + "R": [ + [-0.3038162213,-0.0261928812,-0.9523705354], + [-0.3441704234,0.9351353343,0.08407512184], + [0.8883931693,0.3533211563,-0.2931240987] + ], + "t": [ + [41.47715732], + [140.438376], + [322.3540865] + ] + }, + { + "name": "06_09", + "type": "vga", + "resolution": [640,480], + "panel": 6, + "node": 9, + "K": [ + [742.648,0,362.103], + [0,742.703,220.817], + [0,0,1] + ], + "distCoef": [-0.304218,-0.0643312,-0.000139411,-0.000234647,0.289172], + "R": [ + [-0.2807259034,-0.0411671215,-0.958904706], + [-0.3740921558,0.9247597922,0.06981680165], + [0.8838823599,0.3783181134,-0.2750043253] + ], + "t": [ + [37.64720227], + [153.3424109], + [325.0305142] + ] + }, + { + "name": "06_10", + "type": "vga", + "resolution": [640,480], + "panel": 6, + "node": 10, + "K": [ + [747.72,0,366.165], + [0,747.851,213.209], + [0,0,1] + ], + "distCoef": [-0.324647,0.0523798,-0.00077308,-0.000271098,0.0916616], + "R": [ + [-0.2880158499,0.02777358159,-0.957222805], + [-0.3788720768,0.9147158267,0.1405379157], + [0.8794900907,0.4031421393,-0.2529300217] + ], + "t": [ + [33.16578395], + [147.9736193], + [327.8869733] + ] + }, + { + "name": "06_11", + "type": "vga", + "resolution": [640,480], + "panel": 6, + "node": 11, + "K": [ + [745.331,0,369.444], + [0,745.587,207.732], + [0,0,1] + ], + "distCoef": [-0.317455,0.0357855,-0.00041249,0.000556817,0.0920153], + "R": [ + [-0.3142048567,0.04518634316,-0.9482792323], + [-0.3166241188,0.9366885696,0.1495449465], + [0.8949997069,0.3472358248,-0.2800050117] + ], + "t": [ + [26.61359186], + [187.9055539], + [317.8889871] + ] + }, + { + "name": "06_12", + "type": "vga", + "resolution": [640,480], + "panel": 6, + "node": 12, + "K": [ + [747.25,0,346.366], + [0,747.394,225.779], + [0,0,1] + ], + "distCoef": [-0.328454,0.0750084,3.92686e-05,0.00130952,0.0669429], + "R": [ + [-0.2993781475,0.05639323365,-0.9524665495], + [-0.3171785116,0.9355987261,0.1550897014], + [0.8998725002,0.3485323901,-0.2622110915] + ], + "t": [ + [13.58039626], + [195.4066632], + [317.2443523] + ] + }, + { + "name": "06_13", + "type": "vga", + "resolution": [640,480], + "panel": 6, + "node": 13, + "K": [ + [743.861,0,344.414], + [0,743.872,231.421], + [0,0,1] + ], + "distCoef": [-0.307564,-0.0231037,-0.000140407,-0.000635225,0.208058], + "R": [ + [-0.2583036736,0.07116007646,-0.9634393887], + [-0.3357690773,0.9284960528,0.1586007776], + [0.905835713,0.3644603181,-0.2159405881] + ], + "t": [ + [14.66480509], + [172.1699927], + [320.6722019] + ] + }, + { + "name": "06_14", + "type": "vga", + "resolution": [640,480], + "panel": 6, + "node": 14, + "K": [ + [744.949,0,378.98], + [0,744.921,225.408], + [0,0,1] + ], + "distCoef": [-0.321047,0.0567081,-0.000162218,0.000699701,0.0634367], + "R": [ + [-0.3208579847,0.07871363947,-0.9438507915], + [-0.3472646452,0.9173632389,0.1945557869], + [0.8811682132,0.3901907879,-0.267008856] + ], + "t": [ + [-45.70363788], + [100.2282059], + [322.9364507] + ] + }, + { + "name": "06_15", + "type": "vga", + "resolution": [640,480], + "panel": 6, + "node": 15, + "K": [ + [745.712,0,360.895], + [0,745.741,234.163], + [0,0,1] + ], + "distCoef": [-0.31006,-0.0103454,0.000398478,0.000813845,0.181221], + "R": [ + [-0.3227895896,0.1367774117,-0.9365355415], + [-0.3406635237,0.9063958148,0.2497898928], + [0.8830375102,0.3996730746,-0.245980058] + ], + "t": [ + [-14.93002532], + [154.0180569], + [326.396188] + ] + }, + { + "name": "06_16", + "type": "vga", + "resolution": [640,480], + "panel": 6, + "node": 16, + "K": [ + [745.931,0,372.193], + [0,746.03,212.813], + [0,0,1] + ], + "distCoef": [-0.325757,0.0830346,-0.000419051,0.00216162,0.0290765], + "R": [ + [-0.311559769,0.02363818266,-0.9499324958], + [-0.312276077,0.9416182622,0.1258518973], + [0.8974486961,0.3358515813,-0.2859887293] + ], + "t": [ + [-41.03283731], + [153.3338286], + [314.9665339] + ] + }, + { + "name": "06_17", + "type": "vga", + "resolution": [640,480], + "panel": 6, + "node": 17, + "K": [ + [744.756,0,368.403], + [0,744.752,202.816], + [0,0,1] + ], + "distCoef": [-0.313223,0.00720848,-0.00119606,0.000542174,0.130737], + "R": [ + [-0.3236003046,0.09291211415,-0.9416210394], + [-0.3175516679,0.9267842511,0.2005788875], + [0.8913157584,0.3639207207,-0.2704032691] + ], + "t": [ + [-41.098271], + [130.5289196], + [319.7107876] + ] + }, + { + "name": "06_18", + "type": "vga", + "resolution": [640,480], + "panel": 6, + "node": 18, + "K": [ + [744.889,0,373.989], + [0,745.092,230.989], + [0,0,1] + ], + "distCoef": [-0.319065,0.0283013,-0.000935078,-0.000739787,0.111424], + "R": [ + [-0.3391260928,0.0773602665,-0.9375547357], + [-0.3008220503,0.9353680392,0.1859911968], + [0.8913470633,0.3451116057,-0.2939360344] + ], + "t": [ + [-22.38901828], + [189.8595323], + [315.0907711] + ] + }, + { + "name": "06_19", + "type": "vga", + "resolution": [640,480], + "panel": 6, + "node": 19, + "K": [ + [743.21,0,358.424], + [0,743.138,251.445], + [0,0,1] + ], + "distCoef": [-0.316603,0.00648778,0.000375455,-0.000277526,0.16085], + "R": [ + [-0.34774011,0.09728469559,-0.9325301624], + [-0.3453355468,0.9113903597,0.2238548019], + [0.8716766465,0.399879107,-0.2833311204] + ], + "t": [ + [-13.32995299], + [105.9918293], + [324.8353482] + ] + }, + { + "name": "06_20", + "type": "vga", + "resolution": [640,480], + "panel": 6, + "node": 20, + "K": [ + [745.315,0,375.798], + [0,745.342,214.671], + [0,0,1] + ], + "distCoef": [-0.317661,0.021421,-0.000865931,0.000266434,0.124612], + "R": [ + [-0.2889220833,0.06736289331,-0.9549797225], + [-0.355115135,0.918816287,0.172249446], + [0.8890541438,0.3888944219,-0.2415447329] + ], + "t": [ + [16.18922492], + [101.394333], + [324.5371374] + ] + }, + { + "name": "06_21", + "type": "vga", + "resolution": [640,480], + "panel": 6, + "node": 21, + "K": [ + [743.803,0,341.335], + [0,743.805,238.935], + [0,0,1] + ], + "distCoef": [-0.305727,-0.0577903,-0.000702133,-0.00085287,0.249773], + "R": [ + [-0.2867564999,0.0564691645,-0.9563377767], + [-0.3641939053,0.9168870998,0.1633427245], + [0.8860775977,0.3951319776,-0.24235761] + ], + "t": [ + [29.77890794], + [113.785435], + [325.4988706] + ] + }, + { + "name": "06_22", + "type": "vga", + "resolution": [640,480], + "panel": 6, + "node": 22, + "K": [ + [745.285,0,373.625], + [0,745.232,235.431], + [0,0,1] + ], + "distCoef": [-0.319503,0.0483306,-0.000362012,0.00120612,0.080115], + "R": [ + [-0.3458253526,0.08893014684,-0.9340750797], + [-0.3902640321,0.8916714915,0.2293816395], + [0.8532870623,0.4438618933,-0.2736563703] + ], + "t": [ + [18.96316513], + [116.1979138], + [333.2100324] + ] + }, + { + "name": "06_23", + "type": "vga", + "resolution": [640,480], + "panel": 6, + "node": 23, + "K": [ + [744.536,0,366.592], + [0,744.501,224.531], + [0,0,1] + ], + "distCoef": [-0.312705,-0.014521,0.000375544,8.36622e-05,0.188212], + "R": [ + [-0.3181142509,0.09038767844,-0.94373375], + [-0.4081954831,0.8853909401,0.2223945386], + [0.8556750382,0.455974726,-0.2447596336] + ], + "t": [ + [6.972278595], + [119.3141773], + [334.5341124] + ] + }, + { + "name": "06_24", + "type": "vga", + "resolution": [640,480], + "panel": 6, + "node": 24, + "K": [ + [744.6,0,358.514], + [0,744.655,220.515], + [0,0,1] + ], + "distCoef": [-0.30152,-0.0573254,-0.000856409,-0.000288003,0.227002], + "R": [ + [-0.3545583501,0.05661769889,-0.9333181732], + [-0.3227337004,0.929412527,0.1789841147], + [0.8775712706,0.3646735401,-0.3112585327] + ], + "t": [ + [-25.22428756], + [139.0090865], + [319.514146] + ] + }, + { + "name": "07_01", + "type": "vga", + "resolution": [640,480], + "panel": 7, + "node": 1, + "K": [ + [745.635,0,384.154], + [0,745.75,223.733], + [0,0,1] + ], + "distCoef": [-0.328279,0.104082,-0.000872931,0.00144148,0.00404207], + "R": [ + [-0.9078071857,0.03344162453,-0.4180523547], + [0.00958043905,0.9982092569,0.05904654639], + [0.4192783428,0.049597754,-0.9065019217] + ], + "t": [ + [-23.31434773], + [152.0493649], + [282.3431498] + ] + }, + { + "name": "07_02", + "type": "vga", + "resolution": [640,480], + "panel": 7, + "node": 2, + "K": [ + [746.944,0,375.746], + [0,747.112,207.581], + [0,0,1] + ], + "distCoef": [-0.321827,0.078307,-0.00112183,4.35862e-05,0.0396046], + "R": [ + [-0.9306435439,0.005427673037,-0.3658867782], + [-0.02457764723,0.9967049447,0.07729936951], + [0.3651007167,0.08093079535,-0.9274436225] + ], + "t": [ + [-62.01828104], + [131.8151818], + [284.3018088] + ] + }, + { + "name": "07_03", + "type": "vga", + "resolution": [640,480], + "panel": 7, + "node": 3, + "K": [ + [743.881,0,383.122], + [0,743.965,237.105], + [0,0,1] + ], + "distCoef": [-0.311008,0.000325185,-0.000782967,0.00055371,0.154469], + "R": [ + [-0.9217631286,0.06528892794,-0.3822173342], + [0.03992506463,0.996464058,0.07392814261], + [0.3856925251,0.05288418425,-0.9211104924] + ], + "t": [ + [-43.22640533], + [121.5976731], + [282.3432951] + ] + }, + { + "name": "07_04", + "type": "vga", + "resolution": [640,480], + "panel": 7, + "node": 4, + "K": [ + [743.69,0,370.307], + [0,743.828,227.79], + [0,0,1] + ], + "distCoef": [-0.303025,-0.0263668,-0.000445815,0.00071591,0.180166], + "R": [ + [-0.9409979296,0.06863452498,-0.3313792366], + [0.04529042225,0.9959498431,0.07767037874], + [0.3353679682,0.05807936004,-0.9402952269] + ], + "t": [ + [-38.37277115], + [113.0266013], + [281.4230584] + ] + }, + { + "name": "07_05", + "type": "vga", + "resolution": [640,480], + "panel": 7, + "node": 5, + "K": [ + [743.998,0,375.484], + [0,744.299,220.79], + [0,0,1] + ], + "distCoef": [-0.310908,0.00595719,-5.69241e-05,0.000519591,0.131448], + "R": [ + [-0.9269484075,0.08594630429,-0.3652121064], + [0.04467826469,0.9917683984,0.1199970688], + [0.3725191305,0.09491404865,-0.9231580692] + ], + "t": [ + [-23.36597135], + [80.23534001], + [286.4206576] + ] + }, + { + "name": "07_06", + "type": "vga", + "resolution": [640,480], + "panel": 7, + "node": 6, + "K": [ + [745.602,0,379.444], + [0,745.67,224.268], + [0,0,1] + ], + "distCoef": [-0.303286,-0.0402497,-0.00132196,0.00012981,0.210105], + "R": [ + [-0.923694641,0.09319000989,-0.3716232396], + [0.04673933936,0.9901316615,0.1321163393], + [0.3802678586,0.1046657299,-0.9189349491] + ], + "t": [ + [-0.9450645075], + [68.69008136], + [287.3198917] + ] + }, + { + "name": "07_07", + "type": "vga", + "resolution": [640,480], + "panel": 7, + "node": 7, + "K": [ + [745.731,0,365.823], + [0,745.481,229.263], + [0,0,1] + ], + "distCoef": [-0.308219,-0.0231519,0.000110727,0.000180113,0.209056], + "R": [ + [-0.917494877,0.04967698427,-0.3946331815], + [0.001316203411,0.9925436367,0.1218827179], + [0.3977454189,0.1113073518,-0.9107190869] + ], + "t": [ + [18.92434207], + [79.05208738], + [288.1952445] + ] + }, + { + "name": "07_08", + "type": "vga", + "resolution": [640,480], + "panel": 7, + "node": 8, + "K": [ + [745.611,0,393.911], + [0,745.863,244.069], + [0,0,1] + ], + "distCoef": [-0.318705,0.0460564,0.000184451,0.000507881,0.0745222], + "R": [ + [-0.9083609307,0.09070031,-0.4082326216], + [0.05268537174,0.9932388068,0.1034452715], + [0.4148550001,0.07245775567,-0.9069979066] + ], + "t": [ + [48.31394514], + [81.42535523], + [283.8217571] + ] + }, + { + "name": "07_09", + "type": "vga", + "resolution": [640,480], + "panel": 7, + "node": 9, + "K": [ + [745.77,0,370.33], + [0,746.047,217.48], + [0,0,1] + ], + "distCoef": [-0.321786,0.069205,4.67533e-05,5.58471e-05,0.0372207], + "R": [ + [-0.9211612824,0.007939579541,-0.3891000576], + [-0.02433705705,0.996659961,0.07795274024], + [0.3884193603,0.08127659646,-0.9178913418] + ], + "t": [ + [49.65486911], + [97.0413663], + [285.6851525] + ] + }, + { + "name": "07_10", + "type": "vga", + "resolution": [640,480], + "panel": 7, + "node": 10, + "K": [ + [744.504,0,363.969], + [0,744.833,247.068], + [0,0,1] + ], + "distCoef": [-0.335916,0.144192,-0.000823922,-0.000462503,-0.076361], + "R": [ + [-0.9225918644,-0.01579725191,-0.3854538864], + [-0.05416624958,0.9945677902,0.08888716518], + [0.381955847,0.1028851669,-0.9184358297] + ], + "t": [ + [40.86826856], + [113.0714764], + [288.4804376] + ] + }, + { + "name": "07_11", + "type": "vga", + "resolution": [640,480], + "panel": 7, + "node": 11, + "K": [ + [744.999,0,387.199], + [0,745.384,239.21], + [0,0,1] + ], + "distCoef": [-0.313806,0.0330336,-7.01628e-05,0.00132279,0.0985619], + "R": [ + [-0.9109471902,-0.006922747781,-0.4124648981], + [-0.04540685091,0.9954664163,0.08357530662], + [0.4100163832,0.09486142287,-0.9071316751] + ], + "t": [ + [65.64483344], + [130.0336458], + [285.8729547] + ] + }, + { + "name": "07_12", + "type": "vga", + "resolution": [640,480], + "panel": 7, + "node": 12, + "K": [ + [743.664,0,350.646], + [0,743.861,222.503], + [0,0,1] + ], + "distCoef": [-0.300623,-0.0667329,-0.000394627,-0.00107967,0.272621], + "R": [ + [-0.9268683851,0.02536908581,-0.3745282449], + [0.006256924582,0.9986192343,0.0521581796], + [0.3753343145,0.04600037271,-0.9257473295] + ], + "t": [ + [57.10937388], + [163.0891099], + [280.8513179] + ] + }, + { + "name": "07_13", + "type": "vga", + "resolution": [640,480], + "panel": 7, + "node": 13, + "K": [ + [744.176,0,390.977], + [0,744.332,246.666], + [0,0,1] + ], + "distCoef": [-0.327257,0.10216,-0.000582688,0.00201022,0.0126373], + "R": [ + [-0.9290120658,-0.01909429991,-0.3695564765], + [-0.04453762663,0.9971777882,0.06043888335], + [0.3673594716,0.07260762025,-0.9272406117] + ], + "t": [ + [26.5211548], + [160.1280328], + [285.2494721] + ] + }, + { + "name": "07_14", + "type": "vga", + "resolution": [640,480], + "panel": 7, + "node": 14, + "K": [ + [744.044,0,360.721], + [0,744.333,226.474], + [0,0,1] + ], + "distCoef": [-0.311296,-0.00746755,-0.00165304,-0.000168766,0.17966], + "R": [ + [-0.9305033137,0.06302128148,-0.3608211486], + [0.03165130136,0.9952368859,0.09220485899], + [0.3649133847,0.07437646791,-0.9280659258] + ], + "t": [ + [37.8814582], + [178.0304645], + [285.6034633] + ] + }, + { + "name": "07_15", + "type": "vga", + "resolution": [640,480], + "panel": 7, + "node": 15, + "K": [ + [744.03,0,362.147], + [0,744.447,229.329], + [0,0,1] + ], + "distCoef": [-0.314413,0.0379836,-0.000745365,2.01034e-05,0.0898919], + "R": [ + [-0.9265853662,0.03975182478,-0.373977742], + [0.01411888978,0.9973739765,0.07103385017], + [0.3758193929,0.06053877555,-0.9247133829] + ], + "t": [ + [16.14446289], + [185.021862], + [282.5666312] + ] + }, + { + "name": "07_16", + "type": "vga", + "resolution": [640,480], + "panel": 7, + "node": 16, + "K": [ + [743.673,0,368.897], + [0,743.962,238.378], + [0,0,1] + ], + "distCoef": [-0.314216,0.0200058,-0.0002257,-0.000345788,0.11969], + "R": [ + [-0.9350006114,0.024774913,-0.3537796777], + [-0.006073372197,0.9962920776,0.08582080369], + [0.354594093,0.08239113958,-0.9313832344] + ], + "t": [ + [-10.51100446], + [168.6528502], + [285.9762696] + ] + }, + { + "name": "07_17", + "type": "vga", + "resolution": [640,480], + "panel": 7, + "node": 17, + "K": [ + [744.686,0,385.346], + [0,745.049,227.767], + [0,0,1] + ], + "distCoef": [-0.317176,0.0455424,-0.000136917,0.000534438,0.0739505], + "R": [ + [-0.908638426,0.05327873405,-0.4141709639], + [0.04010861029,0.9983767379,0.04043746577], + [0.4156531128,0.02013121347,-0.9093004036] + ], + "t": [ + [-7.322164421], + [189.4505625], + [275.8940033] + ] + }, + { + "name": "07_18", + "type": "vga", + "resolution": [640,480], + "panel": 7, + "node": 18, + "K": [ + [746.282,0,378.432], + [0,746.624,237.775], + [0,0,1] + ], + "distCoef": [-0.320382,0.058651,0.000451819,0.000534403,0.062414], + "R": [ + [-0.916555331,0.01769811564,-0.3995160846], + [-0.01470055472,0.9968539618,0.07788499561], + [0.3996376094,0.077259016,-0.9134116408] + ], + "t": [ + [-37.37478029], + [164.0712496], + [285.8486829] + ] + }, + { + "name": "07_19", + "type": "vga", + "resolution": [640,480], + "panel": 7, + "node": 19, + "K": [ + [743.687,0,374.362], + [0,743.883,225.048], + [0,0,1] + ], + "distCoef": [-0.322503,0.0715253,7.77555e-05,0.000517375,0.0539586], + "R": [ + [-0.9239544056,0.01616424802,-0.3821609261], + [-0.020576852,0.9955594902,0.09185801365], + [0.3819487525,0.09273628522,-0.9195189677] + ], + "t": [ + [-17.14443298], + [133.4982453], + [287.2304165] + ] + }, + { + "name": "07_20", + "type": "vga", + "resolution": [640,480], + "panel": 7, + "node": 20, + "K": [ + [745.801,0,368.555], + [0,746.033,233.687], + [0,0,1] + ], + "distCoef": [-0.317685,0.0475287,-3.52395e-05,0.000512076,0.0805211], + "R": [ + [-0.9241543321,-0.01069440692,-0.3818696113], + [-0.04324692472,0.9961108974,0.076764468], + [0.3795635307,0.08745690199,-0.9210227014] + ], + "t": [ + [-16.56758847], + [113.8864258], + [286.5218078] + ] + }, + { + "name": "07_21", + "type": "vga", + "resolution": [640,480], + "panel": 7, + "node": 21, + "K": [ + [744.1,0,390.405], + [0,744.284,237.593], + [0,0,1] + ], + "distCoef": [-0.322514,0.0588182,0.000321804,0.00147162,0.0689104], + "R": [ + [-0.9369369296,0.006948104691,-0.3494294118], + [-0.02026391849,0.9970404822,0.07415962808], + [0.3489105381,0.07656370335,-0.9340232522] + ], + "t": [ + [-3.618393153], + [111.1940513], + [285.5030449] + ] + }, + { + "name": "07_22", + "type": "vga", + "resolution": [640,480], + "panel": 7, + "node": 22, + "K": [ + [747.001,0,381.032], + [0,747.132,234.437], + [0,0,1] + ], + "distCoef": [-0.324882,0.0577225,-0.00134011,-0.00135265,0.0819201], + "R": [ + [-0.9282296861,0.06047570579,-0.3670590401], + [0.02337036389,0.9942284933,0.1047068731], + [0.3712727784,0.08861372459,-0.9242857414] + ], + "t": [ + [25.6408869], + [119.8980517], + [286.9452799] + ] + }, + { + "name": "07_23", + "type": "vga", + "resolution": [640,480], + "panel": 7, + "node": 23, + "K": [ + [743.981,0,363.51], + [0,744.339,258.582], + [0,0,1] + ], + "distCoef": [-0.313768,0.0101513,0.00111395,-0.00104272,0.1345], + "R": [ + [-0.9138255678,-0.001018785166,-0.4061056435], + [-0.03060482875,0.9973259054,0.06636552484], + [0.4049520663,0.0730753071,-0.9114130916] + ], + "t": [ + [24.3580015], + [146.5427691], + [284.2261849] + ] + }, + { + "name": "07_24", + "type": "vga", + "resolution": [640,480], + "panel": 7, + "node": 24, + "K": [ + [744.847,0,398.685], + [0,745.01,270.264], + [0,0,1] + ], + "distCoef": [-0.328511,0.106892,0.000179407,0.00152869,-0.00291861], + "R": [ + [-0.915939158,0.01937877811,-0.4008490012], + [-0.01852012751,0.9957282098,0.09045627137], + [0.4008895904,0.09027621565,-0.9116675607] + ], + "t": [ + [6.147743662], + [145.7157982], + [287.1579534] + ] + }, + { + "name": "08_01", + "type": "vga", + "resolution": [640,480], + "panel": 8, + "node": 1, + "K": [ + [743.703,0,360.221], + [0,744.108,227.682], + [0,0,1] + ], + "distCoef": [-0.309411,-0.0239561,-0.001159,0.000249551,0.191643], + "R": [ + [-0.6256262875,-0.004424555618,-0.7801103586], + [-0.1745259617,0.9754325172,0.134432485], + [0.7603502068,0.2202540071,-0.6110284243] + ], + "t": [ + [5.656398722], + [175.9817187], + [302.7764948] + ] + }, + { + "name": "08_02", + "type": "vga", + "resolution": [640,480], + "panel": 8, + "node": 2, + "K": [ + [747.203,0,376.344], + [0,747.435,209.923], + [0,0,1] + ], + "distCoef": [-0.331616,0.11313,4.7739e-05,0.00134479,-0.0154118], + "R": [ + [-0.6724252099,0.1092176997,-0.7320627235], + [-0.09964199407,0.9666926758,0.2357472025], + [0.7334274403,0.2314665517,-0.6391458561] + ], + "t": [ + [-0.9742570867], + [185.4525058], + [305.0714088] + ] + }, + { + "name": "08_03", + "type": "vga", + "resolution": [640,480], + "panel": 8, + "node": 3, + "K": [ + [747.234,0,368.091], + [0,747.404,224.293], + [0,0,1] + ], + "distCoef": [-0.329137,0.0905459,-0.000565165,-0.000329878,0.0231933], + "R": [ + [-0.656899377,0.0205246652,-0.7536988435], + [-0.2005757989,0.9588523348,0.2009267253], + [0.7268098496,0.2831623883,-0.6257527502] + ], + "t": [ + [-32.7353206], + [153.4285774], + [313.8994992] + ] + }, + { + "name": "08_04", + "type": "vga", + "resolution": [640,480], + "panel": 8, + "node": 4, + "K": [ + [747.386,0,362.788], + [0,747.713,235.953], + [0,0,1] + ], + "distCoef": [-0.341304,0.154379,-0.000777774,-0.000654564,-0.0867958], + "R": [ + [-0.6631685233,0.06657565756,-0.7455033143], + [-0.1433461882,0.9663011288,0.2138083224], + [0.7346151238,0.2486560079,-0.6312771259] + ], + "t": [ + [-22.98714967], + [144.6795235], + [307.788251] + ] + }, + { + "name": "08_05", + "type": "vga", + "resolution": [640,480], + "panel": 8, + "node": 5, + "K": [ + [745.746,0,376.748], + [0,745.752,233.642], + [0,0,1] + ], + "distCoef": [-0.32088,0.0642866,0.000720856,0.00118823,0.0489989], + "R": [ + [-0.6568191598,0.04935682433,-0.7524310568], + [-0.1452125328,0.970898021,0.19044777], + [0.7399337211,0.2343521638,-0.6305371929] + ], + "t": [ + [-42.15667108], + [135.9397275], + [306.138018] + ] + }, + { + "name": "08_06", + "type": "vga", + "resolution": [640,480], + "panel": 8, + "node": 6, + "K": [ + [743.581,0,359.642], + [0,743.625,223.766], + [0,0,1] + ], + "distCoef": [-0.309434,-0.0145066,-0.000137344,-0.000208072,0.169515], + "R": [ + [-0.6714433509,-0.01781555577,-0.7408417054], + [-0.2359597182,0.9528188479,0.1909430659], + [0.7024861834,0.3030162521,-0.6439676336] + ], + "t": [ + [-57.25895983], + [89.79547495], + [311.6502108] + ] + }, + { + "name": "08_07", + "type": "vga", + "resolution": [640,480], + "panel": 8, + "node": 7, + "K": [ + [745.148,0,371.237], + [0,745.103,220.621], + [0,0,1] + ], + "distCoef": [-0.318768,0.034703,-0.000217256,0.000447556,0.0954449], + "R": [ + [-0.7012843801,0.01049644172,-0.7128043511], + [-0.1276034542,0.9818947595,0.1400001421], + [0.7013683602,0.1891362102,-0.6872480755] + ], + "t": [ + [-43.70728874], + [118.2041714], + [298.0588141] + ] + }, + { + "name": "08_08", + "type": "vga", + "resolution": [640,480], + "panel": 8, + "node": 8, + "K": [ + [743.06,0,391.891], + [0,743.237,230.861], + [0,0,1] + ], + "distCoef": [-0.322908,0.0553375,0.000339696,0.00130059,0.0777268], + "R": [ + [-0.6299217379,0.07604043096,-0.7729272003], + [-0.1362742651,0.9689348188,0.2063846932], + [0.7646096578,0.2353362908,-0.5999907511] + ], + "t": [ + [-3.915515028], + [82.19520224], + [306.2551203] + ] + }, + { + "name": "08_09", + "type": "vga", + "resolution": [640,480], + "panel": 8, + "node": 9, + "K": [ + [746.456,0,356.955], + [0,746.592,233.352], + [0,0,1] + ], + "distCoef": [-0.320498,0.0507213,0.000550471,0.000126643,0.0741224], + "R": [ + [-0.684872543,0.06612723284,-0.7256561093], + [-0.09767122593,0.9785553778,0.1813551881], + [0.7220872049,0.1950809107,-0.6637269822] + ], + "t": [ + [-6.194765679], + [87.40737989], + [301.7039487] + ] + }, + { + "name": "08_10", + "type": "vga", + "resolution": [640,480], + "panel": 8, + "node": 10, + "K": [ + [747.33,0,361.528], + [0,747.71,220.883], + [0,0,1] + ], + "distCoef": [-0.322455,0.0389243,0.00118705,0.000768992,0.12227], + "R": [ + [-0.6055801648,0.01225702185,-0.7956899079], + [-0.1760343759,0.973047512,0.1489645524], + [0.7760699469,0.2302787546,-0.5871006154] + ], + "t": [ + [32.64204154], + [89.24589085], + [303.2777117] + ] + }, + { + "name": "08_11", + "type": "vga", + "resolution": [640,480], + "panel": 8, + "node": 11, + "K": [ + [747.774,0,350.264], + [0,747.981,233.163], + [0,0,1] + ], + "distCoef": [-0.312094,-0.0263709,0.00148203,-0.000526901,0.233175], + "R": [ + [-0.6738094891,0.06987822761,-0.7355935058], + [-0.1142917175,0.9736808734,0.1971876265], + [0.730012449,0.216939139,-0.6480889092] + ], + "t": [ + [35.79986479], + [83.7107121], + [303.8218457] + ] + }, + { + "name": "08_12", + "type": "vga", + "resolution": [640,480], + "panel": 8, + "node": 12, + "K": [ + [744.899,0,366.47], + [0,744.848,222.726], + [0,0,1] + ], + "distCoef": [-0.30396,-0.0418844,-0.00058576,-0.000160605,0.231689], + "R": [ + [-0.6160341517,-0.01803679921,-0.7875129191], + [-0.1884772348,0.9740736778,0.1251271436], + [0.7648387123,0.2255108512,-0.6034621779] + ], + "t": [ + [61.57356311], + [97.36793025], + [301.4047959] + ] + }, + { + "name": "08_13", + "type": "vga", + "resolution": [640,480], + "panel": 8, + "node": 13, + "K": [ + [746.859,0,368.586], + [0,747.139,224.684], + [0,0,1] + ], + "distCoef": [-0.318047,0.0428323,-0.000551709,0.000692584,0.0895927], + "R": [ + [-0.6485099772,-0.04236983322,-0.7600260566], + [-0.2235198928,0.9650338886,0.1369249841], + [0.7276494121,0.258678161,-0.6353046057] + ], + "t": [ + [38.13208236], + [106.9572182], + [307.8393222] + ] + }, + { + "name": "08_14", + "type": "vga", + "resolution": [640,480], + "panel": 8, + "node": 14, + "K": [ + [744.505,0,357.32], + [0,744.53,228.165], + [0,0,1] + ], + "distCoef": [-0.303025,-0.0702212,0.000533599,-0.000753966,0.269146], + "R": [ + [-0.6825611814,-0.04644305139,-0.729351271], + [-0.1871280484,0.9758162042,0.1129859684], + [0.7064653757,0.213601916,-0.6747450588] + ], + "t": [ + [41.82592662], + [132.5834032], + [304.3020009] + ] + }, + { + "name": "08_15", + "type": "vga", + "resolution": [640,480], + "panel": 8, + "node": 15, + "K": [ + [745.837,0,357.73], + [0,745.88,221.629], + [0,0,1] + ], + "distCoef": [-0.3197,0.0439542,-0.00136466,0.00170195,0.109142], + "R": [ + [-0.6069626381,-0.02117938565,-0.7944481037], + [-0.2107505505,0.968144583,0.1352045554], + [0.7662770787,0.2494944888,-0.5920911574] + ], + "t": [ + [64.87618524], + [141.1933336], + [303.6799609] + ] + }, + { + "name": "08_16", + "type": "vga", + "resolution": [640,480], + "panel": 8, + "node": 16, + "K": [ + [744.767,0,345.102], + [0,744.781,229.581], + [0,0,1] + ], + "distCoef": [-0.307131,-0.033453,0.0002274,-0.000565369,0.224073], + "R": [ + [-0.6350262321,-0.03398669713,-0.7717425665], + [-0.2527580664,0.9531820242,0.1660041824], + [0.7299692079,0.3004811693,-0.6138860012] + ], + "t": [ + [34.611726], + [134.434862], + [314.3473002] + ] + }, + { + "name": "08_17", + "type": "vga", + "resolution": [640,480], + "panel": 8, + "node": 17, + "K": [ + [743.543,0,370.548], + [0,743.847,224.118], + [0,0,1] + ], + "distCoef": [-0.308645,-0.0111516,9.80345e-05,-0.000744439,0.160705], + "R": [ + [-0.6124225565,-0.05791042639,-0.7884066177], + [-0.1936876385,0.977907652,0.07862393367], + [0.7664357188,0.2008556864,-0.610109238] + ], + "t": [ + [28.62018644], + [186.6213498], + [297.6164741] + ] + }, + { + "name": "08_18", + "type": "vga", + "resolution": [640,480], + "panel": 8, + "node": 18, + "K": [ + [743.39,0,376.249], + [0,743.751,216.723], + [0,0,1] + ], + "distCoef": [-0.319375,0.0602092,-1.05699e-05,0.00110696,0.0487054], + "R": [ + [-0.6887185447,0.08181736584,-0.720397588], + [-0.1043667464,0.9720764384,0.2101784484], + [0.7174777686,0.2199393475,-0.6609480577] + ], + "t": [ + [20.48604056], + [189.7333893], + [302.8177068] + ] + }, + { + "name": "08_19", + "type": "vga", + "resolution": [640,480], + "panel": 8, + "node": 19, + "K": [ + [747.038,0,360.923], + [0,747.259,204.023], + [0,0,1] + ], + "distCoef": [-0.32724,0.0825647,-0.000697091,0.000733699,0.0397455], + "R": [ + [-0.6726100217,0.03848005322,-0.7389959704], + [-0.1487286588,0.9712392562,0.1859411014], + [0.7248969201,0.2349757278,-0.6475421705] + ], + "t": [ + [3.177324598], + [151.0352965], + [305.3818706] + ] + }, + { + "name": "08_20", + "type": "vga", + "resolution": [640,480], + "panel": 8, + "node": 20, + "K": [ + [747.914,0,388.693], + [0,747.835,242.83], + [0,0,1] + ], + "distCoef": [-0.338429,0.134609,0.00136964,0.000561914,-0.0365273], + "R": [ + [-0.6685313457,0.02780025068,-0.7431641715], + [-0.1765857142,0.9647874561,0.194942684], + [0.722414926,0.2615574708,-0.6400815293] + ], + "t": [ + [-14.15175066], + [129.456494], + [308.9585645] + ] + }, + { + "name": "08_21", + "type": "vga", + "resolution": [640,480], + "panel": 8, + "node": 21, + "K": [ + [746.296,0,369.274], + [0,746.424,219.198], + [0,0,1] + ], + "distCoef": [-0.312598,-0.010091,-0.000298989,-0.000771876,0.160922], + "R": [ + [-0.6341455554,-0.01222382885,-0.7731170626], + [-0.1896201401,0.9718007188,0.1401697733], + [0.7496023059,0.2354866044,-0.6185809907] + ], + "t": [ + [-6.414673774], + [116.5175191], + [305.5663378] + ] + }, + { + "name": "08_22", + "type": "vga", + "resolution": [640,480], + "panel": 8, + "node": 22, + "K": [ + [743.609,0,361.562], + [0,743.794,221.87], + [0,0,1] + ], + "distCoef": [-0.314273,0.00142644,4.14402e-05,0.000150079,0.159707], + "R": [ + [-0.6552794634,-0.0176584532,-0.7551801135], + [-0.2007508014,0.9678470127,0.1515627784], + [0.7282224527,0.2509189891,-0.6377552198] + ], + "t": [ + [4.541098798], + [103.6271831], + [307.0310837] + ] + }, + { + "name": "08_23", + "type": "vga", + "resolution": [640,480], + "panel": 8, + "node": 23, + "K": [ + [748.435,0,354.117], + [0,748.457,219.552], + [0,0,1] + ], + "distCoef": [-0.324308,0.0627041,-0.000215295,-0.000444561,0.0758056], + "R": [ + [-0.6485698923,-0.03356212054,-0.7604148071], + [-0.2015811272,0.9709293787,0.1290782349], + [0.733976937,0.2370015309,-0.6364810526] + ], + "t": [ + [20.56445448], + [121.4098798], + [305.3725739] + ] + }, + { + "name": "08_24", + "type": "vga", + "resolution": [640,480], + "panel": 8, + "node": 24, + "K": [ + [745.572,0,350.678], + [0,745.729,218.826], + [0,0,1] + ], + "distCoef": [-0.313081,0.00890587,-0.000465969,-0.00023462,0.141032], + "R": [ + [-0.6716141,0.00283216084,-0.7408957278], + [-0.1390702972,0.9817365211,0.1298185488], + [0.7277320613,0.1902245569,-0.6589542206] + ], + "t": [ + [13.95231346], + [154.9907046], + [298.6967118] + ] + }, + { + "name": "09_01", + "type": "vga", + "resolution": [640,480], + "panel": 9, + "node": 1, + "K": [ + [745.377,0,383.314], + [0,745.581,229.65], + [0,0,1] + ], + "distCoef": [-0.311824,0.0113225,-0.000890232,0.000288511,0.13186], + "R": [ + [-0.9888207636,0.1490770148,-0.003088867539], + [0.1339941062,0.8974831076,0.420201917], + [0.06541465384,0.4150904904,-0.9074253732] + ], + "t": [ + [-5.5065201], + [83.70733211], + [330.6651976] + ] + }, + { + "name": "09_02", + "type": "vga", + "resolution": [640,480], + "panel": 9, + "node": 2, + "K": [ + [745.133,0,380.598], + [0,746.347,248.499], + [0,0,1] + ], + "distCoef": [-0.340543,0.0603048,-0.00219925,-0.00194065,0.128165], + "R": [ + [-0.9728033822,0.2090533065,0.09975116351], + [0.2316107347,0.8720009628,0.4312433055], + [0.003169728315,0.4426183864,-0.8967044758] + ], + "t": [ + [-23.76195567], + [58.26386366], + [329.69794] + ] + }, + { + "name": "09_03", + "type": "vga", + "resolution": [640,480], + "panel": 9, + "node": 3, + "K": [ + [745.787,0,382.41], + [0,745.973,216.203], + [0,0,1] + ], + "distCoef": [-0.309439,0.00115788,-0.000439278,0.00154239,0.140783], + "R": [ + [-0.995096801,0.09728424012,-0.01783629191], + [0.08253738581,0.9161639792,0.3922131349], + [0.05449712496,0.3888178749,-0.9197014317] + ], + "t": [ + [6.72584843], + [65.39953055], + [327.4514754] + ] + }, + { + "name": "09_04", + "type": "vga", + "resolution": [640,480], + "panel": 9, + "node": 4, + "K": [ + [744.782,0,384.335], + [0,745.051,230.833], + [0,0,1] + ], + "distCoef": [-0.319171,0.0452003,0.000841339,0.00114337,0.0902557], + "R": [ + [-0.9962766095,0.08536470964,0.01207409478], + [0.0830687393,0.9129812009,0.3994557689], + [0.02307600417,0.3989714189,-0.9166729542] + ], + "t": [ + [12.91980994], + [75.72355875], + [328.4117918] + ] + }, + { + "name": "09_05", + "type": "vga", + "resolution": [640,480], + "panel": 9, + "node": 5, + "K": [ + [745.938,0,386.124], + [0,746.151,234.663], + [0,0,1] + ], + "distCoef": [-0.322825,0.0563734,0.000659785,0.00216478,0.0846192], + "R": [ + [-0.9996885429,0.02460566921,0.004168718214], + [0.02372582958,0.8852416043,0.464525981], + [0.007739649829,0.4644802074,-0.8855496794] + ], + "t": [ + [23.79490616], + [45.57973364], + [333.4360246] + ] + }, + { + "name": "09_06", + "type": "vga", + "resolution": [640,480], + "panel": 9, + "node": 6, + "K": [ + [745.533,0,376.456], + [0,745.938,237.583], + [0,0,1] + ], + "distCoef": [-0.324418,0.0645728,-2.52302e-05,0.000695669,0.0784542], + "R": [ + [-0.9996292032,0.0242501169,-0.01238498622], + [0.01720849374,0.9151046106,0.4028491273], + [0.02110269642,0.4024866252,-0.9151826008] + ], + "t": [ + [44.50201086], + [83.15135806], + [329.4460526] + ] + }, + { + "name": "09_07", + "type": "vga", + "resolution": [640,480], + "panel": 9, + "node": 7, + "K": [ + [745.538,0,357.165], + [0,745.859,222.198], + [0,0,1] + ], + "distCoef": [-0.30448,-0.0356601,-0.000261684,-0.000249049,0.226264], + "R": [ + [-0.9994703128,-0.005373675551,-0.03209699996], + [-0.01769948118,0.9174086112,0.3975527241], + [0.02730974481,0.3979102457,-0.9170177829] + ], + "t": [ + [39.28939518], + [107.3778293], + [329.1138759] + ] + }, + { + "name": "09_08", + "type": "vga", + "resolution": [640,480], + "panel": 9, + "node": 8, + "K": [ + [746.393,0,361.584], + [0,746.73,220.937], + [0,0,1] + ], + "distCoef": [-0.31726,0.0513551,0.000643529,-0.000795525,0.0635312], + "R": [ + [-0.9973050313,-0.005865573042,-0.0731318648], + [-0.03181904441,0.9327538711,0.3591068981], + [0.06610766226,0.3604661023,-0.9304267656] + ], + "t": [ + [64.05594666], + [137.6750859], + [322.0323762] + ] + }, + { + "name": "09_09", + "type": "vga", + "resolution": [640,480], + "panel": 9, + "node": 9, + "K": [ + [750.271,0,344.156], + [0,750.817,228.346], + [0,0,1] + ], + "distCoef": [-0.379154,0.391779,0.000225814,-0.000528714,-0.53339], + "R": [ + [-0.9991212371,-0.002089946585,-0.04186150665], + [-0.01685937738,0.9344344151,0.355735977], + [0.03837336329,0.3561291283,-0.933648504] + ], + "t": [ + [51.49527243], + [159.1149955], + [322.66132] + ] + }, + { + "name": "09_10", + "type": "vga", + "resolution": [640,480], + "panel": 9, + "node": 10, + "K": [ + [744.897,0,366.998], + [0,745.389,227.752], + [0,0,1] + ], + "distCoef": [-0.317307,0.0499201,-0.000255849,-0.000414203,0.0689696], + "R": [ + [-0.9956077306,0.03830608065,-0.08542769468], + [0.005132094192,0.9334237661,0.3587390896], + [0.093482129,0.3567249879,-0.9295205079] + ], + "t": [ + [51.9897871], + [163.3127669], + [320.2676037] + ] + }, + { + "name": "09_11", + "type": "vga", + "resolution": [640,480], + "panel": 9, + "node": 11, + "K": [ + [745.812,0,365.568], + [0,746.463,243.927], + [0,0,1] + ], + "distCoef": [-0.334591,0.135033,-0.000586766,0.000648781,-0.0516408], + "R": [ + [-0.998272905,0.02856351314,-0.05133549401], + [0.007150624435,0.926422355,0.3764179707], + [0.05831016891,0.3754007803,-0.9250265825] + ], + "t": [ + [35.7749059], + [177.7642897], + [325.0135255] + ] + }, + { + "name": "09_12", + "type": "vga", + "resolution": [640,480], + "panel": 9, + "node": 12, + "K": [ + [743.195,0,380.908], + [0,743.577,227.789], + [0,0,1] + ], + "distCoef": [-0.308886,-0.0148964,-0.00146189,1.64512e-05,0.167268], + "R": [ + [-0.9994731762,0.02727182579,0.01759595347], + [0.03184982914,0.9284235071,0.3701558858], + [-0.006241669996,0.370521307,-0.9288029945] + ], + "t": [ + [-0.9618436208], + [187.4005014], + [324.424529] + ] + }, + { + "name": "09_13", + "type": "vga", + "resolution": [640,480], + "panel": 9, + "node": 13, + "K": [ + [745.52,0,396.637], + [0,745.641,231.295], + [0,0,1] + ], + "distCoef": [-0.327971,0.0908214,-0.00010844,0.00165709,0.0286999], + "R": [ + [-0.9916965419,0.1263943494,0.02371575794], + [0.1244737261,0.8970729317,0.4239887342], + [0.03231501572,0.4234201503,-0.9053568998] + ], + "t": [ + [12.62306638], + [150.537484], + [333.7640249] + ] + }, + { + "name": "09_14", + "type": "vga", + "resolution": [640,480], + "panel": 9, + "node": 14, + "K": [ + [744.91,0,372.463], + [0,744.965,226.423], + [0,0,1] + ], + "distCoef": [-0.308854,-0.0214085,8.99951e-05,0.000256405,0.180188], + "R": [ + [-0.9924146786,0.1180105859,0.03444716585], + [0.1215225705,0.8993517426,0.4199984619], + [0.01858414592,0.4209987468,-0.9068708203] + ], + "t": [ + [-10.68067405], + [162.2988485], + [333.0026074] + ] + }, + { + "name": "09_15", + "type": "vga", + "resolution": [640,480], + "panel": 9, + "node": 15, + "K": [ + [747.246,0,368.718], + [0,747.604,232.745], + [0,0,1] + ], + "distCoef": [-0.3413,0.139342,-0.00187439,-0.000934376,-0.0485015], + "R": [ + [-0.9858543141,0.1593536378,0.05193928607], + [0.1663907088,0.8933064559,0.4175137217], + [0.02013463084,0.4202499184,-0.9071849882] + ], + "t": [ + [-16.61956214], + [147.1949584], + [331.9981158] + ] + }, + { + "name": "09_16", + "type": "vga", + "resolution": [640,480], + "panel": 9, + "node": 16, + "K": [ + [743.705,0,367.288], + [0,743.835,246.124], + [0,0,1] + ], + "distCoef": [-0.316616,0.0215265,-3.02132e-05,0.000242548,0.131229], + "R": [ + [-0.9974602961,0.07055123587,0.009771425173], + [0.06902048446,0.9235857212,0.3771280794], + [0.01758210332,0.3768447143,-0.9261095675] + ], + "t": [ + [-30.73982653], + [139.9628037], + [324.9351286] + ] + }, + { + "name": "09_17", + "type": "vga", + "resolution": [640,480], + "panel": 9, + "node": 17, + "K": [ + [742.776,0,376.251], + [0,742.956,242.934], + [0,0,1] + ], + "distCoef": [-0.317736,0.0249159,0.000195501,0.000659428,0.110976], + "R": [ + [-0.9810894361,0.1806813104,0.06941024814], + [0.1934432758,0.9031273242,0.3833284952], + [0.006574003146,0.389506483,-0.9210002618] + ], + "t": [ + [-32.91453507], + [125.2651482], + [325.9500645] + ] + }, + { + "name": "09_18", + "type": "vga", + "resolution": [640,480], + "panel": 9, + "node": 18, + "K": [ + [744.563,0,383.579], + [0,744.554,245.613], + [0,0,1] + ], + "distCoef": [-0.324188,0.0688729,0.000784842,0.000316148,0.0548859], + "R": [ + [-0.970594512,0.2257141743,0.08366244524], + [0.2406675117,0.9026066179,0.3569039677], + [0.005044007626,0.3665438649,-0.9303870985] + ], + "t": [ + [-30.64851648], + [114.5848432], + [323.1694161] + ] + }, + { + "name": "09_19", + "type": "vga", + "resolution": [640,480], + "panel": 9, + "node": 19, + "K": [ + [745.897,0,369.27], + [0,746.007,226.27], + [0,0,1] + ], + "distCoef": [-0.314378,0.0131268,-0.000749673,-0.000436078,0.140449], + "R": [ + [-0.9929061616,0.1118291068,0.04039313118], + [0.1187797946,0.9175946163,0.3793566667], + [0.005358597494,0.3814634596,-0.9243683867] + ], + "t": [ + [-9.348770156], + [111.4514571], + [325.9373984] + ] + }, + { + "name": "09_20", + "type": "vga", + "resolution": [640,480], + "panel": 9, + "node": 20, + "K": [ + [743.647,0,378.532], + [0,743.859,221.629], + [0,0,1] + ], + "distCoef": [-0.312883,-0.00145442,-0.000725648,-1.91192e-05,0.160115], + "R": [ + [-0.9995005243,0.01416777706,-0.02824846864], + [0.002450265794,0.9259270935,0.3776943389], + [0.03150711165,0.3774364735,-0.9254993303] + ], + "t": [ + [6.861259295], + [105.360829], + [326.1962043] + ] + }, + { + "name": "09_21", + "type": "vga", + "resolution": [640,480], + "panel": 9, + "node": 21, + "K": [ + [745.35,0,364.423], + [0,745.51,242.824], + [0,0,1] + ], + "distCoef": [-0.317615,0.0309367,1.60295e-05,-0.00084218,0.138729], + "R": [ + [-0.9983267687,0.03243769532,-0.0478691851], + [0.01510269673,0.9453721551,0.3256430514], + [0.05581730476,0.3243752215,-0.9442802255] + ], + "t": [ + [30.85545331], + [138.1219419], + [318.1793043] + ] + }, + { + "name": "09_22", + "type": "vga", + "resolution": [640,480], + "panel": 9, + "node": 22, + "K": [ + [744.248,0,356.027], + [0,744.436,238.226], + [0,0,1] + ], + "distCoef": [-0.308137,-0.0481761,0.000357682,-8.3696e-05,0.245728], + "R": [ + [-0.9955839097,0.09158830299,-0.0205976113], + [0.07579544873,0.9137019347,0.3992540852], + [0.05538708142,0.3959297379,-0.9166089209] + ], + "t": [ + [35.25988756], + [131.4528362], + [328.3382973] + ] + }, + { + "name": "09_23", + "type": "vga", + "resolution": [640,480], + "panel": 9, + "node": 23, + "K": [ + [744.535,0,363.359], + [0,744.632,254.668], + [0,0,1] + ], + "distCoef": [-0.311847,-0.00198079,0.000462082,-0.000460419,0.174118], + "R": [ + [-0.9946906764,0.1028474748,0.003585412436], + [0.09771594436,0.9329851386,0.346396197], + [0.03228083764,0.3449074195,-0.9380814567] + ], + "t": [ + [12.3985171], + [157.8437238], + [320.5381764] + ] + }, + { + "name": "09_24", + "type": "vga", + "resolution": [640,480], + "panel": 9, + "node": 24, + "K": [ + [743.311,0,385.98], + [0,743.511,229.743], + [0,0,1] + ], + "distCoef": [-0.319602,0.0480118,-0.000790169,0.000699953,0.0704098], + "R": [ + [-0.9986396845,0.04700092247,-0.02257640097], + [0.03617494752,0.9363507866,0.3491970469], + [0.03755201414,0.3479053287,-0.93677731] + ], + "t": [ + [-8.936415104], + [142.1371611], + [321.4431282] + ] + }, + { + "name": "10_01", + "type": "vga", + "resolution": [640,480], + "panel": 10, + "node": 1, + "K": [ + [744.128,0,369.511], + [0,744.056,233.67], + [0,0,1] + ], + "distCoef": [-0.31156,0.00550691,-0.000430053,0.000410016,0.149166], + "R": [ + [-0.6229970612,0.0209936641,0.781942407], + [0.05250109858,0.9985078863,0.01502117145], + [-0.7804603106,0.05041098106,-0.6231696692] + ], + "t": [ + [-46.84686717], + [150.7389104], + [280.0083694] + ] + }, + { + "name": "10_02", + "type": "vga", + "resolution": [640,480], + "panel": 10, + "node": 2, + "K": [ + [743.282,0,357.827], + [0,743.347,211.632], + [0,0,1] + ], + "distCoef": [-0.30948,-0.00718458,0.000285593,0.000547399,0.164062], + "R": [ + [-0.6512046155,0.0977241901,0.7525839032], + [0.103617117,0.9938368806,-0.03939223155], + [-0.7517952126,0.05232817138,-0.6573170626] + ], + "t": [ + [-42.32005533], + [143.0774393], + [282.200902] + ] + }, + { + "name": "10_03", + "type": "vga", + "resolution": [640,480], + "panel": 10, + "node": 3, + "K": [ + [744.012,0,361.17], + [0,744.101,225.217], + [0,0,1] + ], + "distCoef": [-0.303567,-0.0563565,0.000757602,-0.000519388,0.263551], + "R": [ + [-0.6320598226,0.04182219841,0.773790207], + [0.06737176964,0.9977273282,0.001106034268], + [-0.771985379,0.05283069539,-0.6334409935] + ], + "t": [ + [-54.02554254], + [119.7786683], + [280.9354705] + ] + }, + { + "name": "10_04", + "type": "vga", + "resolution": [640,480], + "panel": 10, + "node": 4, + "K": [ + [744.209,0,380.966], + [0,744.256,205.476], + [0,0,1] + ], + "distCoef": [-0.315194,0.0249601,-0.000765583,0.001001,0.10286], + "R": [ + [-0.6566261636,0.06356030055,0.7515332125], + [0.0713368826,0.9972094103,-0.02201002698], + [-0.7508349555,0.03915967697,-0.6593279831] + ], + "t": [ + [-22.38173011], + [115.5645607], + [280.9145253] + ] + }, + { + "name": "10_05", + "type": "vga", + "resolution": [640,480], + "panel": 10, + "node": 5, + "K": [ + [744.499,0,353.834], + [0,744.652,215.524], + [0,0,1] + ], + "distCoef": [-0.317042,0.0236932,-0.00147688,-0.000206715,0.11602], + "R": [ + [-0.6480155592,0.1057846486,0.754244949], + [0.1559047408,0.9877614348,-0.004589090624], + [-0.7454995284,0.1146165612,-0.6565771067] + ], + "t": [ + [-17.37690425], + [72.84298088], + [287.4167752] + ] + }, + { + "name": "10_06", + "type": "vga", + "resolution": [640,480], + "panel": 10, + "node": 6, + "K": [ + [746.493,0,367.328], + [0,746.754,207.575], + [0,0,1] + ], + "distCoef": [-0.323089,0.0587326,-0.000981175,-0.000221417,0.0550321], + "R": [ + [-0.6607542091,0.07289791872,0.74705406], + [0.1340507848,0.9907326878,0.02188900409], + [-0.738535214,0.1146064347,-0.6644028167] + ], + "t": [ + [3.021864726], + [64.04371811], + [286.9062935] + ] + }, + { + "name": "10_07", + "type": "vga", + "resolution": [640,480], + "panel": 10, + "node": 7, + "K": [ + [744.949,0,365.308], + [0,744.944,217.014], + [0,0,1] + ], + "distCoef": [-0.320697,0.0459897,0.000335318,2.89241e-06,0.0947246], + "R": [ + [-0.643287111,0.03528116955,0.764811697], + [0.0902182212,0.9954712387,0.02996140018], + [-0.7602909742,0.08827373343,-0.6435568215] + ], + "t": [ + [9.776307982], + [84.51813798], + [285.3816638] + ] + }, + { + "name": "10_08", + "type": "vga", + "resolution": [640,480], + "panel": 10, + "node": 8, + "K": [ + [748.112,0,395.78], + [0,748.17,229.575], + [0,0,1] + ], + "distCoef": [-0.325424,0.0774932,-0.000546,0.000524276,0.0351183], + "R": [ + [-0.6241633069,0.05185263499,0.7795713377], + [0.04102617023,0.9985938587,-0.03357318505], + [-0.7802160084,0.0110276762,-0.6254129601] + ], + "t": [ + [-46.24758235], + [183.5392889], + [272.6641799] + ] + }, + { + "name": "10_09", + "type": "vga", + "resolution": [640,480], + "panel": 10, + "node": 9, + "K": [ + [746.122,0,370.333], + [0,746.261,210.753], + [0,0,1] + ], + "distCoef": [-0.323285,0.0813962,-0.00031195,0.00117949,0.0118242], + "R": [ + [-0.6717702835,0.002860846795,0.7407540089], + [0.1085475528,0.9895782107,0.09461708989], + [-0.7327633417,0.1439679842,-0.6650797731] + ], + "t": [ + [53.6134591], + [78.01841366], + [288.9552018] + ] + }, + { + "name": "10_10", + "type": "vga", + "resolution": [640,480], + "panel": 10, + "node": 10, + "K": [ + [746.498,0,355.775], + [0,746.616,218.183], + [0,0,1] + ], + "distCoef": [-0.320479,0.0482256,-0.000295345,0.000515541,0.088746], + "R": [ + [-0.6274497943,0.01735785812,0.7784635254], + [0.05740772193,0.9980618939,0.02401685623], + [-0.7765378993,0.0597591891,-0.6272302051] + ], + "t": [ + [35.32452291], + [122.8912729], + [283.9520693] + ] + }, + { + "name": "10_11", + "type": "vga", + "resolution": [640,480], + "panel": 10, + "node": 11, + "K": [ + [745.209,0,387.948], + [0,745.058,237.868], + [0,0,1] + ], + "distCoef": [-0.312054,0.0106095,2.04654e-05,-0.000407432,0.122509], + "R": [ + [-0.663538187,0.0558857692,0.74605218], + [0.09086672278,0.9958436408,0.006219474654], + [-0.742603739,0.07191817555,-0.6658584406] + ], + "t": [ + [70.41193089], + [130.903078], + [283.3216663] + ] + }, + { + "name": "10_12", + "type": "vga", + "resolution": [640,480], + "panel": 10, + "node": 12, + "K": [ + [746.923,0,359.191], + [0,746.955,219.728], + [0,0,1] + ], + "distCoef": [-0.34193,0.180291,-0.0011698,0.000387434,-0.142263], + "R": [ + [-0.6573529902,0.02662022179,0.7531124817], + [0.0203979596,0.9996382488,-0.01752982786], + [-0.7533066902,0.003838673213,-0.6576581901] + ], + "t": [ + [61.18715226], + [173.543055], + [273.2477614] + ] + }, + { + "name": "10_13", + "type": "vga", + "resolution": [640,480], + "panel": 10, + "node": 13, + "K": [ + [747.063,0,362.554], + [0,747.091,228.588], + [0,0,1] + ], + "distCoef": [-0.334743,0.115617,-0.000133435,0.000763825,-0.0142674], + "R": [ + [-0.6314178936,0.07344004486,0.771957255], + [0.07624079511,0.9965613541,-0.03244701456], + [-0.7716856775,0.03836700932,-0.6348457984] + ], + "t": [ + [39.63694261], + [165.7689372], + [279.8275089] + ] + }, + { + "name": "10_14", + "type": "vga", + "resolution": [640,480], + "panel": 10, + "node": 14, + "K": [ + [745.722,0,380.721], + [0,745.932,237.231], + [0,0,1] + ], + "distCoef": [-0.319645,0.0532601,-0.00105825,0.00148804,0.0812854], + "R": [ + [-0.6464741699,0.0407242176,0.7618482039], + [0.05782238306,0.998317631,-0.004298792509], + [-0.7607415591,0.04127282036,-0.6477413331] + ], + "t": [ + [37.16059778], + [187.0284564], + [279.5510011] + ] + }, + { + "name": "10_15", + "type": "vga", + "resolution": [640,480], + "panel": 10, + "node": 15, + "K": [ + [745.212,0,345.945], + [0,745.407,234.052], + [0,0,1] + ], + "distCoef": [-0.345973,0.208044,0.00063894,-0.000591324,-0.26389], + "R": [ + [-0.6892736753,0.06991501806,0.7211197479], + [0.04097555303,0.9975016565,-0.0575451947], + [-0.7233414164,-0.01011610737,-0.6904164394] + ], + "t": [ + [38.38229011], + [201.7157692], + [268.6124541] + ] + }, + { + "name": "10_16", + "type": "vga", + "resolution": [640,480], + "panel": 10, + "node": 16, + "K": [ + [746.402,0,351.743], + [0,746.432,235.34], + [0,0,1] + ], + "distCoef": [-0.332074,0.123634,0.000553061,0.000200886,-0.050504], + "R": [ + [-0.6626903808,0.1069713565,0.7412142659], + [0.1159650419,0.9924654921,-0.03955194002], + [-0.7398605059,0.05974425322,-0.6701022728] + ], + "t": [ + [18.24762504], + [172.5928493], + [282.9657885] + ] + }, + { + "name": "10_17", + "type": "vga", + "resolution": [640,480], + "panel": 10, + "node": 17, + "K": [ + [745.425,0,381.954], + [0,745.576,234.397], + [0,0,1] + ], + "distCoef": [-0.316953,0.0361047,-0.000329948,0.00146685,0.0995591], + "R": [ + [-0.6439914485,0.08005681888,0.7608323863], + [0.04150323442,0.9967010496,-0.06974596286], + [-0.7639060779,-0.01333879876,-0.6451895695] + ], + "t": [ + [-14.39474973], + [198.5707312], + [268.934139] + ] + }, + { + "name": "10_18", + "type": "vga", + "resolution": [640,480], + "panel": 10, + "node": 18, + "K": [ + [742.866,0,374.357], + [0,743.163,216.484], + [0,0,1] + ], + "distCoef": [-0.313801,-0.00472223,0.00105562,-0.000883374,0.146196], + "R": [ + [-0.6735625977,0.03695414336,0.7382058102], + [0.08136680684,0.9963864104,0.02436316713], + [-0.7346379174,0.07647556771,-0.6741354596] + ], + "t": [ + [41.81793908], + [81.57199105], + [283.0241236] + ] + }, + { + "name": "10_19", + "type": "vga", + "resolution": [640,480], + "panel": 10, + "node": 19, + "K": [ + [747.195,0,374.317], + [0,747.324,252.705], + [0,0,1] + ], + "distCoef": [-0.325848,0.0754879,0.000850799,-0.000494425,0.0423325], + "R": [ + [-0.6398121174,0.03550225829,0.7677109118], + [0.06489671873,0.9978603994,0.00793971962], + [-0.7657864391,0.05490184793,-0.6407471551] + ], + "t": [ + [-18.67539454], + [143.739157], + [281.6554752] + ] + }, + { + "name": "10_20", + "type": "vga", + "resolution": [640,480], + "panel": 10, + "node": 20, + "K": [ + [744.074,0,359.595], + [0,744.232,222.54], + [0,0,1] + ], + "distCoef": [-0.312038,-0.00652471,0.000517579,-0.000473896,0.154037], + "R": [ + [-0.6341018605,0.07503908623,0.769599874], + [0.1134623387,0.9935365213,-0.003387984729], + [-0.7648798129,0.08517227417,-0.6385174669] + ], + "t": [ + [-10.64771601], + [114.6784971], + [285.5473806] + ] + }, + { + "name": "10_21", + "type": "vga", + "resolution": [640,480], + "panel": 10, + "node": 21, + "K": [ + [745.669,0,353.595], + [0,745.986,221.41], + [0,0,1] + ], + "distCoef": [-0.331248,0.0956435,-0.00124938,0.0010706,0.0394747], + "R": [ + [-0.618235149,0.02815342604,0.7854888192], + [0.09838720035,0.994269895,0.04180113162], + [-0.7798110408,0.1031249747,-0.6174625335] + ], + "t": [ + [-3.462045404], + [102.4105128], + [287.5712577] + ] + }, + { + "name": "10_22", + "type": "vga", + "resolution": [640,480], + "panel": 10, + "node": 22, + "K": [ + [745.836,0,367.536], + [0,745.883,217.602], + [0,0,1] + ], + "distCoef": [-0.306908,-0.0326669,-0.000283909,0.000278093,0.200484], + "R": [ + [-0.6189078213,0.03804187807,0.7845418563], + [0.07413417155,0.9971968305,0.01012945108], + [-0.7819573092,0.06443055706,-0.6199931209] + ], + "t": [ + [14.73270812], + [126.5060302], + [283.9045417] + ] + }, + { + "name": "10_23", + "type": "vga", + "resolution": [640,480], + "panel": 10, + "node": 23, + "K": [ + [742.749,0,379.273], + [0,742.868,231.204], + [0,0,1] + ], + "distCoef": [-0.310394,-0.00460726,-0.000822068,-0.000336616,0.147608], + "R": [ + [-0.6037549899,0.1086195044,0.7897352186], + [0.1215591915,0.9916324658,-0.04345590495], + [-0.787847241,0.0697628552,-0.6119067485] + ], + "t": [ + [19.26192194], + [145.0128457], + [284.7838402] + ] + }, + { + "name": "10_24", + "type": "vga", + "resolution": [640,480], + "panel": 10, + "node": 24, + "K": [ + [745.597,0,368.627], + [0,745.598,227.731], + [0,0,1] + ], + "distCoef": [-0.309585,-0.00749389,-0.000770097,-0.000330202,0.147896], + "R": [ + [-0.6450785239,0.075478584,0.760379301], + [0.07622559694,0.9965021766,-0.03425011393], + [-0.7603047786,0.03586635318,-0.6485755533] + ], + "t": [ + [7.856697427], + [160.1393432], + [279.1413867] + ] + }, + { + "name": "11_01", + "type": "vga", + "resolution": [640,480], + "panel": 11, + "node": 1, + "K": [ + [742.855,0,374.596], + [0,743.116,213.495], + [0,0,1] + ], + "distCoef": [-0.312561,0.00631745,-0.000399255,9.31566e-05,0.13435], + "R": [ + [-0.9229364354,0.00164792287,0.3849488544], + [0.08421827064,0.9766305816,0.1977371741], + [-0.3756269679,0.2149185694,-0.9015067329] + ], + "t": [ + [-1.777017447], + [176.3500352], + [303.9155303] + ] + }, + { + "name": "11_02", + "type": "vga", + "resolution": [640,480], + "panel": 11, + "node": 2, + "K": [ + [743.543,0,362.467], + [0,743.612,228.587], + [0,0,1] + ], + "distCoef": [-0.311508,-0.0063044,0.000209199,0.000389142,0.157517], + "R": [ + [-0.9382305089,-0.009495783218,0.3458805319], + [0.07354737957,0.9713073762,0.226169768], + [-0.338103971,0.2376379833,-0.9106118238] + ], + "t": [ + [-11.88478771], + [180.6527832], + [308.9268929] + ] + }, + { + "name": "11_03", + "type": "vga", + "resolution": [640,480], + "panel": 11, + "node": 3, + "K": [ + [749.382,0,384.698], + [0,749.44,241.756], + [0,0,1] + ], + "distCoef": [-0.334994,0.135003,0.000819921,0.00199466,-0.05032], + "R": [ + [-0.9215516186,0.03410543981,0.3867550042], + [0.1287847641,0.966589567,0.2216282778], + [-0.3662746221,0.2540500501,-0.895154441] + ], + "t": [ + [-28.84627719], + [162.2565593], + [311.7587167] + ] + }, + { + "name": "11_04", + "type": "vga", + "resolution": [640,480], + "panel": 11, + "node": 4, + "K": [ + [747.478,0,355.1], + [0,747.786,237.425], + [0,0,1] + ], + "distCoef": [-0.332665,0.125805,0.000559145,-0.000285828,-0.0488142], + "R": [ + [-0.9186497576,-0.03493542623,0.3935252708], + [0.05923251482,0.9726444983,0.2246200995], + [-0.3906073886,0.2296566914,-0.8914503195] + ], + "t": [ + [-43.73591523], + [146.455357], + [306.7233507] + ] + }, + { + "name": "11_05", + "type": "vga", + "resolution": [640,480], + "panel": 11, + "node": 5, + "K": [ + [744.546,0,358.346], + [0,744.606,240.06], + [0,0,1] + ], + "distCoef": [-0.319412,0.0357687,0.00118284,-0.000939418,0.105494], + "R": [ + [-0.9252091585,0.02778676908,0.3784387777], + [0.1130706466,0.9721977994,0.2050523536], + [-0.3622196044,0.2325066328,-0.9026281759] + ], + "t": [ + [-43.43063623], + [134.4377466], + [308.7383564] + ] + }, + { + "name": "11_06", + "type": "vga", + "resolution": [640,480], + "panel": 11, + "node": 6, + "K": [ + [744.682,0,386.644], + [0,744.47,247.576], + [0,0,1] + ], + "distCoef": [-0.310524,-0.0156223,-0.000288596,-3.26402e-05,0.156674], + "R": [ + [-0.9144551399,0.0484228537,0.4017798207], + [0.1449564791,0.9661327489,0.2134833264], + [-0.3778351707,0.2534615133,-0.8905042645] + ], + "t": [ + [-44.21957265], + [107.5274508], + [309.8949628] + ] + }, + { + "name": "11_07", + "type": "vga", + "resolution": [640,480], + "panel": 11, + "node": 7, + "K": [ + [746.436,0,349.001], + [0,746.553,211.863], + [0,0,1] + ], + "distCoef": [-0.330393,0.0902383,-0.000783974,-0.000712996,0.00481592], + "R": [ + [-0.9105637485,0.003264968682,0.4133557789], + [0.1001837456,0.9718993559,0.2130137535], + [-0.401044732,0.2353741321,-0.8853034174] + ], + "t": [ + [-36.21090107], + [102.2867759], + [306.6852556] + ] + }, + { + "name": "11_08", + "type": "vga", + "resolution": [640,480], + "panel": 11, + "node": 8, + "K": [ + [745.743,0,370.625], + [0,745.85,233.671], + [0,0,1] + ], + "distCoef": [-0.3257,0.0614375,0.00126654,-0.000627381,0.0722474], + "R": [ + [-0.8981193216,-0.01090147501,0.4396166989], + [0.09488580103,0.9713398361,0.2179348702], + [-0.4293930238,0.2374449004,-0.8713446794] + ], + "t": [ + [-42.17364239], + [80.07059019], + [305.3107943] + ] + }, + { + "name": "11_09", + "type": "vga", + "resolution": [640,480], + "panel": 11, + "node": 9, + "K": [ + [743.294,0,376.993], + [0,743.306,225.516], + [0,0,1] + ], + "distCoef": [-0.315184,-0.00458353,0.00085295,-0.000315923,0.19344], + "R": [ + [-0.9287334953,0.02657190893,0.369794576], + [0.1072763174,0.9740215576,0.1994336907], + [-0.354888555,0.2248909489,-0.9074569822] + ], + "t": [ + [4.627896612], + [76.0139061], + [305.925361] + ] + }, + { + "name": "11_10", + "type": "vga", + "resolution": [640,480], + "panel": 11, + "node": 10, + "K": [ + [746.981,0,373.015], + [0,746.916,231.087], + [0,0,1] + ], + "distCoef": [-0.31553,-0.0133214,-7.49701e-05,-0.000474937,0.183355], + "R": [ + [-0.897589008,-0.01428097087,0.4406018914], + [0.092180686,0.9712994893,0.219271574], + [-0.431087803,0.2374307391,-0.8705113154] + ], + "t": [ + [-5.834972436], + [85.69962032], + [306.7617687] + ] + }, + { + "name": "11_11", + "type": "vga", + "resolution": [640,480], + "panel": 11, + "node": 11, + "K": [ + [743.956,0,385.014], + [0,743.968,233.944], + [0,0,1] + ], + "distCoef": [-0.321873,0.0619652,-0.000204505,0.000631491,0.0680901], + "R": [ + [-0.9171447001,-0.01735780695,0.3981762243], + [0.08629809142,0.9667012777,0.2409175774], + [-0.3890992656,0.2553181275,-0.8851070078] + ], + "t": [ + [26.82061991], + [73.01187567], + [307.7528197] + ] + }, + { + "name": "11_12", + "type": "vga", + "resolution": [640,480], + "panel": 11, + "node": 12, + "K": [ + [749.192,0,349.167], + [0,749.113,221.266], + [0,0,1] + ], + "distCoef": [-0.334032,0.094759,-0.000689735,0.000727903,0.0409048], + "R": [ + [-0.937850977,-0.03419002209,0.345349949], + [0.06230645433,0.9623765935,0.2644791068], + [-0.341399254,0.2695595196,-0.9004355695] + ], + "t": [ + [57.17130279], + [82.80130245], + [306.825197] + ] + }, + { + "name": "11_13", + "type": "vga", + "resolution": [640,480], + "panel": 11, + "node": 13, + "K": [ + [744.715,0,367.122], + [0,744.786,220.538], + [0,0,1] + ], + "distCoef": [-0.315954,0.0180051,3.91318e-05,0.000697083,0.145396], + "R": [ + [-0.9312656673,-0.01667316508,0.3639591494], + [0.07039560041,0.9718946087,0.2246448954], + [-0.3574754765,0.2348252013,-0.9039183639] + ], + "t": [ + [46.96203938], + [112.2947483], + [304.8878272] + ] + }, + { + "name": "11_14", + "type": "vga", + "resolution": [640,480], + "panel": 11, + "node": 14, + "K": [ + [746.505,0,367.697], + [0,746.62,222.237], + [0,0,1] + ], + "distCoef": [-0.323622,0.0629014,0.000917096,0.00064017,0.0716359], + "R": [ + [-0.9260527677,-0.07925799212,0.3689775632], + [0.02937617957,0.9595934278,0.279852628], + [-0.3762490021,0.2699974518,-0.8863058527] + ], + "t": [ + [50.81898209], + [116.0290364], + [310.1255555] + ] + }, + { + "name": "11_15", + "type": "vga", + "resolution": [640,480], + "panel": 11, + "node": 15, + "K": [ + [746.042,0,355.995], + [0,745.821,261.077], + [0,0,1] + ], + "distCoef": [-0.321065,0.0443736,0.000927074,0.000280863,0.106789], + "R": [ + [-0.9208600933,-0.04678508348,0.387076019], + [0.03581020852,0.9784294414,0.2034538209], + [-0.3882451771,0.2012137775,-0.8993212431] + ], + "t": [ + [43.08113165], + [154.6066575], + [301.5640854] + ] + }, + { + "name": "11_16", + "type": "vga", + "resolution": [640,480], + "panel": 11, + "node": 16, + "K": [ + [741.668,0,363.735], + [0,741.796,217.06], + [0,0,1] + ], + "distCoef": [-0.309875,-0.0179015,-1.19394e-05,-0.000437783,0.188022], + "R": [ + [-0.8991061052,-0.0185684781,0.437336739], + [0.0842559957,0.9730755765,0.214534029], + [-0.4295452698,0.2297370977,-0.873333686] + ], + "t": [ + [16.70791642], + [154.14567], + [307.2679797] + ] + }, + { + "name": "11_17", + "type": "vga", + "resolution": [640,480], + "panel": 11, + "node": 17, + "K": [ + [747.822,0,361.761], + [0,747.76,222.34], + [0,0,1] + ], + "distCoef": [-0.334628,0.097635,0.00152491,-0.000486737,0.0213673], + "R": [ + [-0.9162397179,0.01033450945,0.4004971626], + [0.1187416248,0.9617552428,0.2468345183], + [-0.3826293322,0.2737152732,-0.8824254888] + ], + "t": [ + [27.8785048], + [159.3368695], + [313.9971646] + ] + }, + { + "name": "11_18", + "type": "vga", + "resolution": [640,480], + "panel": 11, + "node": 18, + "K": [ + [745.448,0,360.818], + [0,745.84,214.85], + [0,0,1] + ], + "distCoef": [-0.329534,0.0903331,0.00014069,0.000717079,0.0211508], + "R": [ + [-0.9101418911,0.04432675398,0.411918532], + [0.1391589893,0.9692024732,0.2031781034], + [-0.3902262342,0.2422430698,-0.888280238] + ], + "t": [ + [16.35209076], + [181.679224], + [308.9632727] + ] + }, + { + "name": "11_19", + "type": "vga", + "resolution": [640,480], + "panel": 11, + "node": 19, + "K": [ + [746.167,0,363.996], + [0,746.229,234.387], + [0,0,1] + ], + "distCoef": [-0.310901,-0.0147285,-0.000729007,-0.000655789,0.178193], + "R": [ + [-0.9157731435,-0.03755396433,0.3999365568], + [0.06406747528,0.9692207168,0.2377110865], + [-0.3965537899,0.2433123544,-0.8851803149] + ], + "t": [ + [-10.79527777], + [146.8696803], + [308.5271108] + ] + }, + { + "name": "11_20", + "type": "vga", + "resolution": [640,480], + "panel": 11, + "node": 20, + "K": [ + [744.588,0,384.664], + [0,744.662,240.853], + [0,0,1] + ], + "distCoef": [-0.307863,-0.0295446,-0.000517465,0.000242427,0.189333], + "R": [ + [-0.9170523574,0.0431160901,0.396429031], + [0.124694228,0.9752892469,0.1823793695], + [-0.3787694858,0.2166838427,-0.8997676305] + ], + "t": [ + [-9.200936127], + [142.5227957], + [304.9039442] + ] + }, + { + "name": "11_21", + "type": "vga", + "resolution": [640,480], + "panel": 11, + "node": 21, + "K": [ + [745.832,0,378.426], + [0,745.825,230.649], + [0,0,1] + ], + "distCoef": [-0.317765,0.041948,0.000140897,0.000331931,0.0876249], + "R": [ + [-0.903416406,0.009580467792,0.4286572198], + [0.1299134284,0.9588705554,0.2523683006], + [-0.4086089801,0.2836819921,-0.8675040223] + ], + "t": [ + [-22.38884391], + [100.2357286], + [311.942278] + ] + }, + { + "name": "11_22", + "type": "vga", + "resolution": [640,480], + "panel": 11, + "node": 22, + "K": [ + [745.759,0,381.189], + [0,746.033,229.615], + [0,0,1] + ], + "distCoef": [-0.307738,-0.0303832,0.000694314,-0.000395606,0.211723], + "R": [ + [-0.9121889441,-0.007451044875,0.4097021017], + [0.1102495844,0.9585035751,0.2628990789], + [-0.394659802,0.2849831196,-0.8735148895] + ], + "t": [ + [-0.4671669308], + [91.25062129], + [311.8622342] + ] + }, + { + "name": "11_23", + "type": "vga", + "resolution": [640,480], + "panel": 11, + "node": 23, + "K": [ + [748.678,0,358.839], + [0,748.651,239.635], + [0,0,1] + ], + "distCoef": [-0.328983,0.0919887,-1.22475e-05,-0.000911096,0.0194744], + "R": [ + [-0.9251940915,-0.06790089301,0.3733702744], + [0.01633387562,0.9758259889,0.2179377065], + [-0.3791425821,0.207733262,-0.9017193545] + ], + "t": [ + [15.23843998], + [129.776393], + [302.9631654] + ] + }, + { + "name": "11_24", + "type": "vga", + "resolution": [640,480], + "panel": 11, + "node": 24, + "K": [ + [747.741,0,374.843], + [0,747.8,238.972], + [0,0,1] + ], + "distCoef": [-0.320184,0.0453956,8.07771e-05,-0.000586724,0.0799959], + "R": [ + [-0.901120423,0.005145678853,0.4335383549], + [0.1030532182,0.9738156258,0.2026404726], + [-0.4211437016,0.2272809911,-0.8780554275] + ], + "t": [ + [6.522845915], + [142.0951003], + [306.255293] + ] + }, + { + "name": "12_01", + "type": "vga", + "resolution": [640,480], + "panel": 12, + "node": 1, + "K": [ + [745.397,0,350.188], + [0,745.422,244.528], + [0,0,1] + ], + "distCoef": [-0.318784,0.0421446,0.000567418,-0.000208,0.092208], + "R": [ + [-0.2717431751,0.1656287556,0.9480098956], + [0.4128654434,0.9098857043,-0.04062180222], + [-0.86930879,0.3803618284,-0.3156376199] + ], + "t": [ + [-13.70303847], + [97.1923903], + [326.2673629] + ] + }, + { + "name": "12_02", + "type": "vga", + "resolution": [640,480], + "panel": 12, + "node": 2, + "K": [ + [747.727,0,370.501], + [0,747.788,234.298], + [0,0,1] + ], + "distCoef": [-0.349811,0.202844,-0.00194754,-0.000389321,-0.178679], + "R": [ + [-0.3883456032,0.1438043201,0.9102241537], + [0.3131714459,0.9495549238,-0.01640403197], + [-0.8666667975,0.2786857806,-0.4137908865] + ], + "t": [ + [13.37192963], + [105.5473845], + [318.08591] + ] + }, + { + "name": "12_03", + "type": "vga", + "resolution": [640,480], + "panel": 12, + "node": 3, + "K": [ + [746.831,0,387.09], + [0,746.752,242.092], + [0,0,1] + ], + "distCoef": [-0.338844,0.109538,-0.000689346,-0.00140957,-0.0011227], + "R": [ + [-0.2489409576,0.07810816372,0.9653639285], + [0.3865744043,0.9219167609,0.0250941395], + [-0.8880251289,0.3794319447,-0.2596974581] + ], + "t": [ + [-20.03334166], + [70.50216381], + [325.3775618] + ] + }, + { + "name": "12_04", + "type": "vga", + "resolution": [640,480], + "panel": 12, + "node": 4, + "K": [ + [746.601,0,360.45], + [0,746.776,222.063], + [0,0,1] + ], + "distCoef": [-0.336822,0.124774,0.000206697,-0.000417774,-0.0398672], + "R": [ + [-0.3081671276,0.03567998316,0.9506629057], + [0.4212102042,0.9011275261,0.1027187694], + [-0.8530035084,0.4320834647,-0.2927266543] + ], + "t": [ + [4.764737811], + [63.41476985], + [331.1517594] + ] + }, + { + "name": "12_05", + "type": "vga", + "resolution": [640,480], + "panel": 12, + "node": 5, + "K": [ + [748.2,0,362.212], + [0,748.363,218.877], + [0,0,1] + ], + "distCoef": [-0.337789,0.133894,-0.000945522,-0.000498923,-0.0570031], + "R": [ + [-0.2841336654,-0.004801876737,0.9587726541], + [0.3831436474,0.9161034097,0.118133349], + [-0.8789021593,0.4009133132,-0.2584560111] + ], + "t": [ + [10.92507323], + [68.32263664], + [329.7866549] + ] + }, + { + "name": "12_06", + "type": "vga", + "resolution": [640,480], + "panel": 12, + "node": 6, + "K": [ + [747.371,0,350.388], + [0,747.497,231.124], + [0,0,1] + ], + "distCoef": [-0.351189,0.233364,-0.000450075,-0.00118874,-0.265042], + "R": [ + [-0.3878504716,-0.01635524947,0.9215771902], + [0.3346075558,0.9291346168,0.1573106717], + [-0.8588421248,0.3693797093,-0.3548927092] + ], + "t": [ + [53.76493542], + [97.09757883], + [324.1315487] + ] + }, + { + "name": "12_07", + "type": "vga", + "resolution": [640,480], + "panel": 12, + "node": 7, + "K": [ + [747.196,0,383.602], + [0,747.258,260.076], + [0,0,1] + ], + "distCoef": [-0.340453,0.149462,7.57635e-05,-0.00150211,-0.0810731], + "R": [ + [-0.3567494973,0.01375486298,0.934098817], + [0.3428523716,0.9320474424,0.1172169629], + [-0.8690121101,0.3620750873,-0.3372233439] + ], + "t": [ + [46.87962376], + [118.8343508], + [324.070693] + ] + }, + { + "name": "12_08", + "type": "vga", + "resolution": [640,480], + "panel": 12, + "node": 8, + "K": [ + [748.388,0,360.952], + [0,748.584,220.934], + [0,0,1] + ], + "distCoef": [-0.353387,0.236369,0.000317101,-0.000350889,-0.25062], + "R": [ + [-0.3882650784,-0.0538394581,0.9199736636], + [0.3529834406,0.9134681838,0.2024316376], + [-0.8512654812,0.4033326047,-0.3356633588] + ], + "t": [ + [53.63586961], + [124.5990463], + [329.2926486] + ] + }, + { + "name": "12_09", + "type": "vga", + "resolution": [640,480], + "panel": 12, + "node": 9, + "K": [ + [745.023,0,373.202], + [0,745.321,253.183], + [0,0,1] + ], + "distCoef": [-0.310235,-0.0270349,0.000213071,-0.0010354,0.204812], + "R": [ + [-0.3615436505,-0.1034754049,0.9265953968], + [0.3189620476,0.9201303682,0.2272076531], + [-0.8760989676,0.3776942494,-0.2996625652] + ], + "t": [ + [26.36947949], + [154.1173845], + [328.14772] + ] + }, + { + "name": "12_10", + "type": "vga", + "resolution": [640,480], + "panel": 12, + "node": 10, + "K": [ + [743.497,0,337.094], + [0,743.775,230.392], + [0,0,1] + ], + "distCoef": [-0.323522,0.0697077,-0.000922284,-0.00112939,0.0376595], + "R": [ + [-0.409013364,-0.03192166586,0.9119698873], + [0.3635432206,0.9109541012,0.1949331996], + [-0.8369853014,0.4112707536,-0.3609874961] + ], + "t": [ + [36.39561956], + [146.2733377], + [330.6860766] + ] + }, + { + "name": "12_11", + "type": "vga", + "resolution": [640,480], + "panel": 12, + "node": 11, + "K": [ + [744.432,0,350.161], + [0,744.664,216.764], + [0,0,1] + ], + "distCoef": [-0.3138,0.0423232,-0.000980128,0.000347352,0.0411803], + "R": [ + [-0.3625324698,0.01191238118,0.9318950067], + [0.4332658145,0.8874493782,0.157207936], + [-0.8251369234,0.4607512304,-0.3268904424] + ], + "t": [ + [30.02223667], + [146.021886], + [340.9352409] + ] + }, + { + "name": "12_12", + "type": "vga", + "resolution": [640,480], + "panel": 12, + "node": 12, + "K": [ + [745.59,0,349.499], + [0,745.978,243.824], + [0,0,1] + ], + "distCoef": [-0.328804,0.102744,-0.00034172,-0.00160085,-0.0230968], + "R": [ + [-0.3184962228,0.07265474811,0.9451356747], + [0.3862627531,0.9204738181,0.05940568743], + [-0.8656565379,0.3839911948,-0.3212312573] + ], + "t": [ + [17.04074577], + [180.9741057], + [327.7548666] + ] + }, + { + "name": "12_13", + "type": "vga", + "resolution": [640,480], + "panel": 12, + "node": 13, + "K": [ + [744.766,0,364.423], + [0,744.926,205.341], + [0,0,1] + ], + "distCoef": [-0.32165,0.0514735,-0.000885848,-0.00113933,0.0656482], + "R": [ + [-0.2748509499,0.06379038152,0.9593684081], + [0.3894986417,0.919644886,0.05043898999], + [-0.8790607279,0.3875358962,-0.2776115375] + ], + "t": [ + [-9.802475588], + [164.1613661], + [327.7325897] + ] + }, + { + "name": "12_14", + "type": "vga", + "resolution": [640,480], + "panel": 12, + "node": 14, + "K": [ + [744.556,0,345.329], + [0,744.551,253.003], + [0,0,1] + ], + "distCoef": [-0.311027,-0.00213006,0.0011289,-0.000863959,0.162024], + "R": [ + [-0.3202755169,0.1244082889,0.9391198917], + [0.4530679872,0.8907277919,0.0365157459], + [-0.831957326,0.4371802584,-0.3416437171] + ], + "t": [ + [0.5161253202], + [152.8799295], + [338.113135] + ] + }, + { + "name": "12_15", + "type": "vga", + "resolution": [640,480], + "panel": 12, + "node": 15, + "K": [ + [747.233,0,347.644], + [0,747.329,227.375], + [0,0,1] + ], + "distCoef": [-0.323105,0.049287,-0.00101918,5.08353e-05,0.100564], + "R": [ + [-0.2639942301,0.1219548974,0.9567831779], + [0.4010015368,0.9160569375,-0.006120025947], + [-0.8772142349,0.3820558732,-0.2907378472] + ], + "t": [ + [-27.43280694], + [159.7105652], + [325.8203908] + ] + }, + { + "name": "12_16", + "type": "vga", + "resolution": [640,480], + "panel": 12, + "node": 16, + "K": [ + [744.634,0,382.866], + [0,744.52,241.14], + [0,0,1] + ], + "distCoef": [-0.320913,0.0518689,0.000556907,0.000900625,0.0851061], + "R": [ + [-0.2918914105,0.1153635448,0.9494686183], + [0.4055533141,0.9139698053,0.01362734066], + [-0.8662135499,0.3890378484,-0.3135660035] + ], + "t": [ + [-22.908528], + [135.1916248], + [327.5972929] + ] + }, + { + "name": "12_17", + "type": "vga", + "resolution": [640,480], + "panel": 12, + "node": 17, + "K": [ + [745.929,0,399.922], + [0,745.76,235.115], + [0,0,1] + ], + "distCoef": [-0.324412,0.0924767,0.000808772,0.00160345,0.0125449], + "R": [ + [-0.2332319969,0.1531844985,0.9602798264], + [0.4252056559,0.9041694633,-0.04096012482], + [-0.8745301515,0.3987632018,-0.2760161646] + ], + "t": [ + [-42.90434909], + [120.9469461], + [326.5490528] + ] + }, + { + "name": "12_18", + "type": "vga", + "resolution": [640,480], + "panel": 12, + "node": 18, + "K": [ + [745.596,0,390.427], + [0,745.457,235.855], + [0,0,1] + ], + "distCoef": [-0.331545,0.0834192,0.000515021,-0.000851112,0.0388274], + "R": [ + [-0.2198853867,0.1587089693,0.9625288982], + [0.4990272732,0.8661072571,-0.02880971702], + [-0.8382256244,0.4739933356,-0.2696444333] + ], + "t": [ + [-48.83152805], + [73.52609427], + [332.6787653] + ] + }, + { + "name": "12_19", + "type": "vga", + "resolution": [640,480], + "panel": 12, + "node": 19, + "K": [ + [744.284,0,396.863], + [0,744.47,248.804], + [0,0,1] + ], + "distCoef": [-0.318049,0.0444362,0.000417829,0.000948817,0.0847095], + "R": [ + [-0.2972813843,0.0975420226,0.9497943632], + [0.4134272643,0.9098266462,0.03596346693], + [-0.8606402708,0.4033621545,-0.3108010564] + ], + "t": [ + [-6.347004052], + [101.4062297], + [328.9550302] + ] + }, + { + "name": "12_20", + "type": "vga", + "resolution": [640,480], + "panel": 12, + "node": 20, + "K": [ + [745.173,0,391.68], + [0,745.292,239.851], + [0,0,1] + ], + "distCoef": [-0.316891,0.030971,0.000827356,0.00064571,0.114679], + "R": [ + [-0.3480625566,0.05516818218,0.9358466372], + [0.3680676982,0.9261498325,0.08229615655], + [-0.8621940769,0.3730991283,-0.3426637043] + ], + "t": [ + [18.00373906], + [105.1024652], + [325.6162418] + ] + }, + { + "name": "12_21", + "type": "vga", + "resolution": [640,480], + "panel": 12, + "node": 21, + "K": [ + [744.07,0,385.155], + [0,744.184,238.534], + [0,0,1] + ], + "distCoef": [-0.325321,0.0749068,6.22505e-05,8.78769e-06,0.0274316], + "R": [ + [-0.2944173655,-0.00519814937,0.9556628036], + [0.365777539,0.9232287513,0.117709238], + [-0.882907247,0.3842156322,-0.2699132104] + ], + "t": [ + [4.17424328], + [116.8807078], + [328.2455421] + ] + }, + { + "name": "12_22", + "type": "vga", + "resolution": [640,480], + "panel": 12, + "node": 22, + "K": [ + [747.36,0,358.25], + [0,747.451,237.291], + [0,0,1] + ], + "distCoef": [-0.329867,0.116416,-0.000580151,-0.000763801,-0.0625995], + "R": [ + [-0.323867873,0.0530845029,0.9446118972], + [0.387407199,0.9183241349,0.08121850418], + [-0.8631484594,0.3922535134,-0.3179810029] + ], + "t": [ + [22.53106717], + [133.6738778], + [328.8995429] + ] + }, + { + "name": "12_23", + "type": "vga", + "resolution": [640,480], + "panel": 12, + "node": 23, + "K": [ + [748.813,0,380.156], + [0,748.859,237.356], + [0,0,1] + ], + "distCoef": [-0.333932,0.115832,0.000621747,-0.000254241,-0.0140772], + "R": [ + [-0.3097958639,0.0326105921,0.9502436908], + [0.3550951383,0.9310652686,0.08381472691], + [-0.8820056493,0.3633923705,-0.3000200319] + ], + "t": [ + [-6.485061334], + [151.418855], + [323.8858443] + ] + }, + { + "name": "12_24", + "type": "vga", + "resolution": [640,480], + "panel": 12, + "node": 24, + "K": [ + [745.33,0,360.408], + [0,745.472,237.433], + [0,0,1] + ], + "distCoef": [-0.321653,0.057929,3.69615e-05,-0.000478596,0.0560779], + "R": [ + [-0.3250711399,0.1046959739,0.9398763254], + [0.4072848242,0.9124585149,0.03922410658], + [-0.8534915501,0.395547989,-0.3392550109] + ], + "t": [ + [2.217299854], + [123.8595425], + [329.2221602] + ] + }, + { + "name": "13_01", + "type": "vga", + "resolution": [640,480], + "panel": 13, + "node": 1, + "K": [ + [747.6,0,355.92], + [0,747.783,249.853], + [0,0,1] + ], + "distCoef": [-0.333712,0.144699,-6.46303e-05,-0.0011294,-0.0924471], + "R": [ + [0.5138271048,0.01100033104,0.857823233], + [0.08358608019,0.9945184566,-0.06282043172], + [-0.8538120833,0.1039809221,0.5100910647] + ], + "t": [ + [-37.95328646], + [135.6435695], + [289.9999799] + ] + }, + { + "name": "13_02", + "type": "vga", + "resolution": [640,480], + "panel": 13, + "node": 2, + "K": [ + [743.227,0,372.15], + [0,743.265,265.407], + [0,0,1] + ], + "distCoef": [-0.306942,-0.0266079,0.000311285,0.000595534,0.199806], + "R": [ + [0.4485620057,-0.005900946102,0.8937322339], + [0.06601293956,0.9974655925,-0.02654587691], + [-0.8913105064,0.07090536373,0.4478147055] + ], + "t": [ + [-38.28645032], + [133.2984516], + [288.856211] + ] + }, + { + "name": "13_03", + "type": "vga", + "resolution": [640,480], + "panel": 13, + "node": 3, + "K": [ + [746.538,0,387.516], + [0,746.833,233.181], + [0,0,1] + ], + "distCoef": [-0.322577,0.0715483,-4.90461e-05,0.000787497,0.0326639], + "R": [ + [0.5260210271,0.02315422103,0.8501563157], + [0.07372016672,0.9946254291,-0.07270208278], + [-0.8472704504,0.1009164896,0.5214869567] + ], + "t": [ + [-53.0750023], + [105.7642054], + [287.8235486] + ] + }, + { + "name": "13_04", + "type": "vga", + "resolution": [640,480], + "panel": 13, + "node": 4, + "K": [ + [744.864,0,367.763], + [0,745.005,229.771], + [0,0,1] + ], + "distCoef": [-0.318118,0.0367901,0.000364188,-0.000713933,0.0879467], + "R": [ + [0.4575577495,0.1623260474,0.8742374736], + [-0.0244195278,0.9851184177,-0.1701334469], + [-0.8888445267,0.05649741078,0.4547124916] + ], + "t": [ + [4.756699591], + [110.8595803], + [285.3944853] + ] + }, + { + "name": "13_05", + "type": "vga", + "resolution": [640,480], + "panel": 13, + "node": 5, + "K": [ + [744.026,0,374.462], + [0,744.21,219.295], + [0,0,1] + ], + "distCoef": [-0.309274,-0.00813814,-0.000611939,0.000562163,0.16533], + "R": [ + [0.5236500196,-0.01990538858,0.8517009055], + [0.0479853053,0.9988290545,-0.006158764858], + [-0.8505810176,0.04409416531,0.5239920201] + ], + "t": [ + [-32.80347729], + [91.75629107], + [282.6719703] + ] + }, + { + "name": "13_06", + "type": "vga", + "resolution": [640,480], + "panel": 13, + "node": 6, + "K": [ + [746.172,0,347.715], + [0,746.412,223.735], + [0,0,1] + ], + "distCoef": [-0.315889,0.0243673,0.00083413,-0.000596366,0.129203], + "R": [ + [0.489601615,0.07237643337,0.8689372305], + [-0.010214584,0.9969567785,-0.07728417735], + [-0.8718864151,0.02896262571,0.488850944] + ], + "t": [ + [7.55259059], + [89.5920217], + [281.8493454] + ] + }, + { + "name": "13_07", + "type": "vga", + "resolution": [640,480], + "panel": 13, + "node": 7, + "K": [ + [745.619,0,383.372], + [0,745.683,224.508], + [0,0,1] + ], + "distCoef": [-0.315816,0.0424659,0.000456201,0.000714024,0.0879752], + "R": [ + [0.5142457137,-0.005076098829,0.8576278792], + [0.07753605572,0.9961627141,-0.04059565316], + [-0.8541308483,0.08737322366,0.5126659866] + ], + "t": [ + [9.165152848], + [86.80281732], + [287.1451009] + ] + }, + { + "name": "13_08", + "type": "vga", + "resolution": [640,480], + "panel": 13, + "node": 8, + "K": [ + [746.151,0,390.693], + [0,746.159,238.847], + [0,0,1] + ], + "distCoef": [-0.312796,0.0112848,0.00109903,0.000945928,0.138088], + "R": [ + [0.5333632905,-0.08775347438,0.841322131], + [0.13459771,0.9907366672,0.0180086874], + [-0.8351090089,0.1036348594,0.5402339855] + ], + "t": [ + [14.59630248], + [78.12680456], + [289.302137] + ] + }, + { + "name": "13_09", + "type": "vga", + "resolution": [640,480], + "panel": 13, + "node": 9, + "K": [ + [744.811,0,365.557], + [0,745.05,239.01], + [0,0,1] + ], + "distCoef": [-0.302561,-0.0588071,-0.000331846,-0.00065645,0.252299], + "R": [ + [0.515993865,0.007464548532,0.8565597538], + [0.05311793688,0.9977587535,-0.04069342277], + [-0.8549437502,0.06649624343,0.5144408941] + ], + "t": [ + [47.02842806], + [101.5821868], + [285.7219747] + ] + }, + { + "name": "13_10", + "type": "vga", + "resolution": [640,480], + "panel": 13, + "node": 10, + "K": [ + [744.185,0,393.537], + [0,744.44,231.354], + [0,0,1] + ], + "distCoef": [-0.321367,0.0639595,-3.49657e-05,0.000800078,0.0579089], + "R": [ + [0.5364096096,-0.02345912583,0.8436316733], + [0.07330244032,0.9971310212,-0.01888064639], + [-0.8407683884,0.07196802054,0.536590273] + ], + "t": [ + [31.38919798], + [122.486781], + [287.1552388] + ] + }, + { + "name": "13_11", + "type": "vga", + "resolution": [640,480], + "panel": 13, + "node": 11, + "K": [ + [745.973,0,365.594], + [0,746.037,211.677], + [0,0,1] + ], + "distCoef": [-0.32905,0.0977698,-0.000962762,0.000946642,0.0190885], + "R": [ + [0.5178117038,0.00482526951,0.8554810087], + [0.01921134431,0.9996663333,-0.01726691564], + [-0.8552788806,0.02537595122,0.5175462273] + ], + "t": [ + [57.16543019], + [149.3252564], + [279.6241941] + ] + }, + { + "name": "13_12", + "type": "vga", + "resolution": [640,480], + "panel": 13, + "node": 12, + "K": [ + [745.909,0,358.218], + [0,746.022,220.333], + [0,0,1] + ], + "distCoef": [-0.338571,0.148871,-0.00100229,-0.000678393,-0.0710162], + "R": [ + [0.5368407815,0.02503814463,0.8433119628], + [-0.01156171997,0.9996840035,-0.02232083821], + [-0.8436043516,0.002232599467,0.5369606257] + ], + "t": [ + [51.57359577], + [176.1957711], + [275.7319623] + ] + }, + { + "name": "13_13", + "type": "vga", + "resolution": [640,480], + "panel": 13, + "node": 13, + "K": [ + [743.068,0,370.139], + [0,743.357,232.303], + [0,0,1] + ], + "distCoef": [-0.302401,-0.0553181,-0.00107418,-0.000672395,0.220417], + "R": [ + [0.5299693687,-0.06080201885,0.8458342525], + [0.13849556,0.9902402801,-0.01559383094], + [-0.8366310107,0.1254085412,0.5332178257] + ], + "t": [ + [16.99243391], + [145.7883087], + [295.0494301] + ] + }, + { + "name": "13_14", + "type": "vga", + "resolution": [640,480], + "panel": 13, + "node": 14, + "K": [ + [743.724,0,347.611], + [0,743.902,235.434], + [0,0,1] + ], + "distCoef": [-0.315484,0.0296225,-0.000529931,-0.000276443,0.110913], + "R": [ + [0.5388576125,-0.001120175332,0.8423961174], + [0.06888686412,0.9967085439,-0.04273965901], + [-0.8395755317,0.08106061749,0.5371611517] + ], + "t": [ + [22.68047362], + [178.4537167], + [288.5132471] + ] + }, + { + "name": "13_15", + "type": "vga", + "resolution": [640,480], + "panel": 13, + "node": 15, + "K": [ + [748.48,0,370.578], + [0,748.498,231.761], + [0,0,1] + ], + "distCoef": [-0.333743,0.123731,0.000274987,0.00129665,-0.0264397], + "R": [ + [0.5569883215,-0.02228411773,0.8302213126], + [0.06483002391,0.9977563557,-0.01671294857], + [-0.827986158,0.06313218472,0.5571833177] + ], + "t": [ + [-8.30154925], + [184.6918205], + [284.5865319] + ] + }, + { + "name": "13_16", + "type": "vga", + "resolution": [640,480], + "panel": 13, + "node": 16, + "K": [ + [748.413,0,364.616], + [0,748.358,230.166], + [0,0,1] + ], + "distCoef": [-0.337541,0.138107,0.000557985,-0.000490808,-0.0648839], + "R": [ + [0.5035312414,0.04830043061,0.8626258501], + [0.03089895722,0.996790644,-0.07384894344], + [-0.8634243125,0.06383948941,0.5004227975] + ], + "t": [ + [5.312179267], + [173.5565462], + [284.5085099] + ] + }, + { + "name": "13_17", + "type": "vga", + "resolution": [640,480], + "panel": 13, + "node": 17, + "K": [ + [745.143,0,372.782], + [0,745.112,223.2], + [0,0,1] + ], + "distCoef": [-0.321603,0.0646008,-0.000584526,0.000805086,0.0603349], + "R": [ + [0.5471603314,0.02993221277,0.8364924593], + [0.06649342528,0.9946477166,-0.07908567611], + [-0.8343825239,0.09889379359,0.5422414789] + ], + "t": [ + [-32.63653561], + [167.4383368], + [289.2367997] + ] + }, + { + "name": "13_18", + "type": "vga", + "resolution": [640,480], + "panel": 13, + "node": 18, + "K": [ + [745.136,0,373.506], + [0,745.259,215.704], + [0,0,1] + ], + "distCoef": [-0.333755,0.12331,-0.00049301,0.00138004,-0.0323155], + "R": [ + [0.5039095131,0.07384116584,0.8605943788], + [0.02822760746,0.9943991795,-0.1018502524], + [-0.8632950856,0.07561583139,0.4990028469] + ], + "t": [ + [-29.61131213], + [166.0398843], + [286.9453226] + ] + }, + { + "name": "13_19", + "type": "vga", + "resolution": [640,480], + "panel": 13, + "node": 19, + "K": [ + [743.638,0,344.046], + [0,743.783,238.416], + [0,0,1] + ], + "distCoef": [-0.319291,0.0355055,-0.000169258,0.000161892,0.118247], + "R": [ + [0.5180347054,0.01180967192,0.8552780692], + [0.1057363227,0.9913513706,-0.07773216881], + [-0.8487990775,0.1307019191,0.512305704] + ], + "t": [ + [-19.08174331], + [122.2280138], + [293.3272927] + ] + }, + { + "name": "13_20", + "type": "vga", + "resolution": [640,480], + "panel": 13, + "node": 20, + "K": [ + [745.321,0,372.761], + [0,745.559,236.547], + [0,0,1] + ], + "distCoef": [-0.320489,0.0479206,-9.03328e-05,-0.000256288,0.0784864], + "R": [ + [0.4966252135,-0.01754426777,0.8677877598], + [0.06583916704,0.9976766247,-0.01750875645], + [-0.8654643848,0.06582971318,0.4966264667] + ], + "t": [ + [-11.61163777], + [120.2765647], + [285.1928757] + ] + }, + { + "name": "13_21", + "type": "vga", + "resolution": [640,480], + "panel": 13, + "node": 21, + "K": [ + [745.539,0,371.886], + [0,745.656,230.519], + [0,0,1] + ], + "distCoef": [-0.326644,0.0839413,-0.000557984,0.000204085,0.0126328], + "R": [ + [0.5330371562,-0.03752357961,0.8452593514], + [0.08887796824,0.9959722199,-0.01183402057], + [-0.8414107777,0.08143290645,0.5342252193] + ], + "t": [ + [-6.03247131], + [109.6165459], + [286.9430377] + ] + }, + { + "name": "13_22", + "type": "vga", + "resolution": [640,480], + "panel": 13, + "node": 22, + "K": [ + [744.018,0,396.717], + [0,744.224,249.141], + [0,0,1] + ], + "distCoef": [-0.315372,0.0205822,-0.000440151,0.000134817,0.105074], + "R": [ + [0.4984198723,-0.001673636668,0.8669341554], + [0.03130878513,0.9993805529,-0.01607079461], + [-0.8663702389,0.03515265859,0.4981635271] + ], + "t": [ + [26.09238071], + [136.8142763], + [280.4949188] + ] + }, + { + "name": "13_23", + "type": "vga", + "resolution": [640,480], + "panel": 13, + "node": 23, + "K": [ + [744.884,0,382.514], + [0,744.877,235.74], + [0,0,1] + ], + "distCoef": [-0.326378,0.0966908,-9.48994e-05,0.00105607,0.00534895], + "R": [ + [0.4908089633,-0.01723518027,0.8710967283], + [0.04978157704,0.9987257364,-0.008288432131], + [-0.8698438688,0.04743260567,0.4910415377] + ], + "t": [ + [21.95453226], + [154.6836493], + [281.6596012] + ] + }, + { + "name": "13_24", + "type": "vga", + "resolution": [640,480], + "panel": 13, + "node": 24, + "K": [ + [744.481,0,341.813], + [0,744.509,213.322], + [0,0,1] + ], + "distCoef": [-0.310201,-0.0109775,-0.00130948,-0.000370453,0.189258], + "R": [ + [0.5283332962,-0.01827851401,0.8488402818], + [0.07383881778,0.996969434,-0.02449033896], + [-0.8458201683,0.0756164244,0.5280818111] + ], + "t": [ + [-10.59416721], + [149.8670778], + [286.3856475] + ] + }, + { + "name": "14_01", + "type": "vga", + "resolution": [640,480], + "panel": 14, + "node": 1, + "K": [ + [745.639,0,394.42], + [0,745.872,232.374], + [0,0,1] + ], + "distCoef": [-0.317821,0.05701,0.000216723,0.00145431,0.0516441], + "R": [ + [0.1117244957,0.006687085701,0.9937167202], + [0.1929264895,0.9808052728,-0.02829110459], + [-0.9748317838,0.1948750877,0.1082898585] + ], + "t": [ + [-10.76838593], + [183.2092961], + [300.2249606] + ] + }, + { + "name": "14_02", + "type": "vga", + "resolution": [640,480], + "panel": 14, + "node": 2, + "K": [ + [744.265,0,384.24], + [0,744.607,234.555], + [0,0,1] + ], + "distCoef": [-0.314122,0.0172489,-0.000351192,-3.05431e-05,0.116521], + "R": [ + [0.09126102309,0.01926845044,0.9956405739], + [0.1889483007,0.9813154942,-0.03631033643], + [-0.9777371658,0.191438313,0.08591511501] + ], + "t": [ + [-20.54744948], + [195.8515337], + [299.6149103] + ] + }, + { + "name": "14_03", + "type": "vga", + "resolution": [640,480], + "panel": 14, + "node": 3, + "K": [ + [742.909,0,383.13], + [0,743.051,234.161], + [0,0,1] + ], + "distCoef": [-0.311566,0.0211516,-0.000212815,-9.64233e-05,0.110817], + "R": [ + [0.07658267666,-0.01244461629,0.9969855692], + [0.2193131093,0.9756433613,-0.004668149478], + [-0.9726442586,0.2190095044,0.07744664757] + ], + "t": [ + [-39.95619704], + [171.7405641], + [305.3439137] + ] + }, + { + "name": "14_04", + "type": "vga", + "resolution": [640,480], + "panel": 14, + "node": 4, + "K": [ + [745.057,0,349.277], + [0,745.321,214.2], + [0,0,1] + ], + "distCoef": [-0.31581,0.0237721,-0.00140945,-0.000667487,0.124292], + "R": [ + [0.09341145846,-0.02354383001,0.9953491787], + [0.2305453591,0.9730606003,0.001380415192], + [-0.9685675696,0.2293441873,0.09632293059] + ], + "t": [ + [-43.73412593], + [146.7921304], + [306.2893961] + ] + }, + { + "name": "14_05", + "type": "vga", + "resolution": [640,480], + "panel": 14, + "node": 5, + "K": [ + [744.634,0,387.597], + [0,744.752,225.246], + [0,0,1] + ], + "distCoef": [-0.315944,0.0434616,-0.000268259,0.00110436,0.0780237], + "R": [ + [0.1133728096,0.0374780752,0.9928454059], + [0.2222309073,0.973014014,-0.06210597779], + [-0.9683801061,0.2276820645,0.1019845459] + ], + "t": [ + [-53.79623552], + [137.113178], + [305.5099477] + ] + }, + { + "name": "14_06", + "type": "vga", + "resolution": [640,480], + "panel": 14, + "node": 6, + "K": [ + [744.759,0,388.645], + [0,744.666,221.73], + [0,0,1] + ], + "distCoef": [-0.306159,-0.0283273,-0.000508774,0.00094455,0.192402], + "R": [ + [0.1564984143,0.01913164242,0.9874928995], + [0.2309282446,0.9713913042,-0.05541732523], + [-0.96030224,0.2367127254,0.1476031622] + ], + "t": [ + [-66.24261018], + [112.7515407], + [303.5978047] + ] + }, + { + "name": "14_07", + "type": "vga", + "resolution": [640,480], + "panel": 14, + "node": 7, + "K": [ + [744.959,0,375.286], + [0,745.092,235.744], + [0,0,1] + ], + "distCoef": [-0.302136,-0.0624017,-0.000302824,-0.00146028,0.239945], + "R": [ + [0.0628689268,0.03077162571,0.9975472947], + [0.2444661638,0.9685997585,-0.04528578729], + [-0.967617586,0.2467136292,0.05337220603] + ], + "t": [ + [-19.11814477], + [98.74694092], + [308.9777955] + ] + }, + { + "name": "14_08", + "type": "vga", + "resolution": [640,480], + "panel": 14, + "node": 8, + "K": [ + [746.649,0,384.752], + [0,746.836,237.267], + [0,0,1] + ], + "distCoef": [-0.321628,0.0600031,0.000104796,0.000953791,0.0524376], + "R": [ + [0.1158239713,-0.07384920575,0.9905206219], + [0.2473198554,0.9679682291,0.043248082], + [-0.9619863288,0.2399662524,0.1303782992] + ], + "t": [ + [-45.76229918], + [76.40869106], + [305.3733784] + ] + }, + { + "name": "14_09", + "type": "vga", + "resolution": [640,480], + "panel": 14, + "node": 9, + "K": [ + [745.672,0,372.774], + [0,745.737,209.129], + [0,0,1] + ], + "distCoef": [-0.30917,-0.00857977,-4.68803e-05,-0.000521617,0.17194], + "R": [ + [0.1233501146,0.01050711315,0.9923075883], + [0.2153087978,0.9758411417,-0.0370970036], + [-0.9687243523,0.2182284735,0.1181078428] + ], + "t": [ + [-15.44854612], + [78.73632155], + [304.5944309] + ] + }, + { + "name": "14_10", + "type": "vga", + "resolution": [640,480], + "panel": 14, + "node": 10, + "K": [ + [744.36,0,350.493], + [0,744.605,227.167], + [0,0,1] + ], + "distCoef": [-0.324539,0.0696676,-0.000964917,-0.000688724,0.0453805], + "R": [ + [0.0653712546,0.005547467364,0.9978455916], + [0.2748842968,0.9611936881,-0.02335203178], + [-0.9592524289,0.2758186354,0.06130952564] + ], + "t": [ + [17.36142141], + [73.86484437], + [309.5485763] + ] + }, + { + "name": "14_11", + "type": "vga", + "resolution": [640,480], + "panel": 14, + "node": 11, + "K": [ + [744.072,0,352.953], + [0,744.032,218.847], + [0,0,1] + ], + "distCoef": [-0.310531,-0.00866492,-5.61729e-06,0.000627577,0.179884], + "R": [ + [0.08325845442,0.01268657881,0.9964472292], + [0.1993298125,0.97949952,-0.02912586749], + [-0.9763890903,0.2010466141,0.07902280276] + ], + "t": [ + [33.26019053], + [89.58305599], + [303.0664402] + ] + }, + { + "name": "14_12", + "type": "vga", + "resolution": [640,480], + "panel": 14, + "node": 12, + "K": [ + [743.677,0,359.077], + [0,743.623,233.815], + [0,0,1] + ], + "distCoef": [-0.305265,-0.0518121,0.000714314,0.000432839,0.265088], + "R": [ + [0.06818541392,0.004787243789,0.9976611808], + [0.2533830838,0.9671167716,-0.02195821049], + [-0.9649599796,0.2542876962,0.06473025078] + ], + "t": [ + [54.03449748], + [85.53998459], + [306.9876015] + ] + }, + { + "name": "14_13", + "type": "vga", + "resolution": [640,480], + "panel": 14, + "node": 13, + "K": [ + [742.736,0,368.122], + [0,742.832,238.615], + [0,0,1] + ], + "distCoef": [-0.303469,-0.0412536,1.82225e-05,-0.000473228,0.205739], + "R": [ + [0.1225239282,-0.0735967149,0.9897329996], + [0.2305366224,0.9720798639,0.0437447595], + [-0.9653189902,0.222809923,0.1360697815] + ], + "t": [ + [17.43625272], + [116.7070017], + [307.0317679] + ] + }, + { + "name": "14_14", + "type": "vga", + "resolution": [640,480], + "panel": 14, + "node": 14, + "K": [ + [745.328,0,371.219], + [0,745.487,209.713], + [0,0,1] + ], + "distCoef": [-0.318297,0.0286867,-0.0013247,0.000626009,0.137928], + "R": [ + [0.06972690557,-0.0276618613,0.9971825209], + [0.2175762615,0.9759712693,0.01185967683], + [-0.9735495514,0.2161363064,0.0740700209] + ], + "t": [ + [57.75964066], + [131.0709572], + [303.578107] + ] + }, + { + "name": "14_15", + "type": "vga", + "resolution": [640,480], + "panel": 14, + "node": 15, + "K": [ + [743.637,0,370.163], + [0,743.479,235.403], + [0,0,1] + ], + "distCoef": [-0.301307,-0.0600698,0.000220332,0.000264974,0.263845], + "R": [ + [0.0871387997,-0.1078492175,0.9903410402], + [0.2171380052,0.9722761796,0.08677624828], + [-0.9722437535,0.2074790999,0.1081411432] + ], + "t": [ + [27.10934266], + [155.0300785], + [303.8314173] + ] + }, + { + "name": "14_16", + "type": "vga", + "resolution": [640,480], + "panel": 14, + "node": 16, + "K": [ + [747.749,0,388.765], + [0,747.73,234.855], + [0,0,1] + ], + "distCoef": [-0.320028,0.057848,-0.00103044,0.00101463,0.0716113], + "R": [ + [0.09276252326,-0.02731891999,0.9953134134], + [0.2004837996,0.9796626634,0.008204393401], + [-0.9752955246,0.1987831547,0.09635298148] + ], + "t": [ + [25.02944215], + [165.1686099], + [301.5459594] + ] + }, + { + "name": "14_17", + "type": "vga", + "resolution": [640,480], + "panel": 14, + "node": 17, + "K": [ + [745.477,0,358.035], + [0,745.633,228.78], + [0,0,1] + ], + "distCoef": [-0.315933,0.0359808,-0.000244793,0.00106736,0.101835], + "R": [ + [0.09323456203,-0.04884472803,0.9944453273], + [0.1997864834,0.9793990461,0.02937464128], + [-0.9753936013,0.1959380031,0.1010723576] + ], + "t": [ + [12.52671676], + [185.8338565], + [300.6683817] + ] + }, + { + "name": "14_19", + "type": "vga", + "resolution": [640,480], + "panel": 14, + "node": 19, + "K": [ + [746.962,0,392.223], + [0,747.34,219.936], + [0,0,1] + ], + "distCoef": [-0.325078,0.0885503,-0.00165532,0.000580691,0.0160315], + "R": [ + [0.129696032,0.03909405168,0.990782819], + [0.1776002444,0.9821476201,-0.06200165731], + [-0.9755188837,0.1840046397,0.1204375361] + ], + "t": [ + [-4.746570817], + [166.089254], + [298.9402723] + ] + }, + { + "name": "14_20", + "type": "vga", + "resolution": [640,480], + "panel": 14, + "node": 20, + "K": [ + [744.91,0,339.915], + [0,744.956,221.133], + [0,0,1] + ], + "distCoef": [-0.306862,-0.0244375,-6.76743e-05,-0.000102471,0.205298], + "R": [ + [0.09943504227,-0.007298095184,0.9950172914], + [0.2125993636,0.9770380132,-0.01407946415], + [-0.9720669642,0.212940035,0.09870338653] + ], + "t": [ + [-22.7866272], + [143.0595857], + [303.8181509] + ] + }, + { + "name": "14_21", + "type": "vga", + "resolution": [640,480], + "panel": 14, + "node": 21, + "K": [ + [743.577,0,349.797], + [0,743.73,227.793], + [0,0,1] + ], + "distCoef": [-0.307046,-0.0206712,-0.000861395,-9.97172e-05,0.196115], + "R": [ + [0.09969364468,-0.01462231859,0.9949107322], + [0.2541863771,0.9670897407,-0.01125696175], + [-0.9620033591,0.2540150021,0.1001294952] + ], + "t": [ + [-20.43364439], + [109.4423166], + [308.9174676] + ] + }, + { + "name": "14_22", + "type": "vga", + "resolution": [640,480], + "panel": 14, + "node": 22, + "K": [ + [745.066,0,381.498], + [0,745.047,229.678], + [0,0,1] + ], + "distCoef": [-0.314894,0.0257947,-0.000483886,0.00117112,0.111876], + "R": [ + [0.08696832552,-0.05294226024,0.9948033109], + [0.2154078845,0.9759627551,0.03310806346], + [-0.9726437959,0.2114091239,0.09628202687] + ], + "t": [ + [-4.298071534], + [115.0382234], + [303.8536261] + ] + }, + { + "name": "14_23", + "type": "vga", + "resolution": [640,480], + "panel": 14, + "node": 23, + "K": [ + [746.602,0,379.206], + [0,746.635,260.689], + [0,0,1] + ], + "distCoef": [-0.319922,0.0568918,0.00103779,-0.000422086,0.0766843], + "R": [ + [0.09129519856,-0.01052008078,0.9957683037], + [0.2195471399,0.9755524467,-0.009822274065], + [-0.9713208739,0.2195148095,0.09137290798] + ], + "t": [ + [18.69590833], + [125.3942709], + [304.7857903] + ] + }, + { + "name": "14_24", + "type": "vga", + "resolution": [640,480], + "panel": 14, + "node": 24, + "K": [ + [745.388,0,382.392], + [0,745.496,224.015], + [0,0,1] + ], + "distCoef": [-0.302393,-0.0525763,-0.000559682,-6.77e-05,0.234314], + "R": [ + [0.08118536371,-0.04636746828,0.9956199047], + [0.1796446798,0.9832385033,0.03114216711], + [-0.9803758084,0.1763295309,0.0881542445] + ], + "t": [ + [8.147122648], + [159.0280693], + [298.1193244] + ] + }, + { + "name": "15_01", + "type": "vga", + "resolution": [640,480], + "panel": 15, + "node": 1, + "K": [ + [747.532,0,374.739], + [0,747.668,233.944], + [0,0,1] + ], + "distCoef": [-0.331439,0.109037,-0.000609362,0.000392501,-0.000621335], + "R": [ + [0.7848571462,0.05717032211,0.6170338843], + [0.1817012858,0.9307358272,-0.3173569956], + [-0.5924389444,0.3611957561,0.7201067442] + ], + "t": [ + [-19.59276639], + [102.5270366], + [325.6365462] + ] + }, + { + "name": "15_02", + "type": "vga", + "resolution": [640,480], + "panel": 15, + "node": 2, + "K": [ + [743.597,0,385.764], + [0,743.786,211.188], + [0,0,1] + ], + "distCoef": [-0.307778,-0.0279819,-0.000454196,0.00143268,0.205643], + "R": [ + [0.7963392439,-0.01332837804,0.6047033677], + [0.2601504211,0.910106147,-0.3225345868], + [-0.5460453892,0.4141607847,0.7282206241] + ], + "t": [ + [-38.00771612], + [61.10094736], + [329.1235579] + ] + }, + { + "name": "15_03", + "type": "vga", + "resolution": [640,480], + "panel": 15, + "node": 3, + "K": [ + [746.709,0,382.284], + [0,746.792,243.451], + [0,0,1] + ], + "distCoef": [-0.343209,0.149416,0.000603517,0.00195788,-0.0395936], + "R": [ + [0.7773715491,0.01124156294,0.6289412548], + [0.2547080739,0.908583342,-0.3310590698], + [-0.5751671686,0.4175523175,0.7034435232] + ], + "t": [ + [-3.435783379], + [55.70511308], + [330.3798829] + ] + }, + { + "name": "15_04", + "type": "vga", + "resolution": [640,480], + "panel": 15, + "node": 4, + "K": [ + [743.976,0,365.248], + [0,744.344,229.757], + [0,0,1] + ], + "distCoef": [-0.297483,-0.106842,0.000162294,-0.00147347,0.393874], + "R": [ + [0.7524447247,-0.05297584633,0.6565215122], + [0.2825071426,0.9263759092,-0.2490329079], + [-0.5949929838,0.3728555143,0.7120127209] + ], + "t": [ + [9.049706825], + [87.26745214], + [326.8342451] + ] + }, + { + "name": "15_05", + "type": "vga", + "resolution": [640,480], + "panel": 15, + "node": 5, + "K": [ + [748.766,0,349.367], + [0,748.975,233.229], + [0,0,1] + ], + "distCoef": [-0.341466,0.149186,0.00133441,-0.000377568,-0.0615035], + "R": [ + [0.7609990379,-0.1304343502,0.6355055818], + [0.3323849453,0.9196335935,-0.2092708816], + [-0.5571361704,0.3704874276,0.7431946943] + ], + "t": [ + [9.029843232], + [83.469382], + [327.9910328] + ] + }, + { + "name": "15_06", + "type": "vga", + "resolution": [640,480], + "panel": 15, + "node": 6, + "K": [ + [747.104,0,395.739], + [0,747.205,237.611], + [0,0,1] + ], + "distCoef": [-0.337038,0.14046,-0.00100634,0.00170735,-0.0468264], + "R": [ + [0.7339738121,-0.1238803965,0.6677844641], + [0.3595276943,0.9050347286,-0.227270713], + [-0.5762137452,0.4068977603,0.7088102232] + ], + "t": [ + [34.88470946], + [89.42074723], + [330.2467181] + ] + }, + { + "name": "15_07", + "type": "vga", + "resolution": [640,480], + "panel": 15, + "node": 7, + "K": [ + [743.991,0,393.18], + [0,744.112,255.459], + [0,0,1] + ], + "distCoef": [-0.325283,0.0732539,0.00077889,1.70805e-05,0.0462558], + "R": [ + [0.7496842409,-0.1571943749,0.6428557128], + [0.3434403747,0.9227495198,-0.1748771933], + [-0.5657050892,0.3518852828,0.7457576683] + ], + "t": [ + [12.35233863], + [128.2674639], + [324.6313017] + ] + }, + { + "name": "15_08", + "type": "vga", + "resolution": [640,480], + "panel": 15, + "node": 8, + "K": [ + [744.616,0,369.102], + [0,744.835,223.742], + [0,0,1] + ], + "distCoef": [-0.336732,0.141968,-0.000206183,0.000677154,-0.0657397], + "R": [ + [0.7264947252,-0.2131742795,0.6532703428], + [0.4249899792,0.8864309285,-0.1833677358], + [-0.5399897516,0.4108490422,0.7345843265] + ], + "t": [ + [15.28675757], + [126.0458703], + [333.4285141] + ] + }, + { + "name": "15_09", + "type": "vga", + "resolution": [640,480], + "panel": 15, + "node": 9, + "K": [ + [747.517,0,392.733], + [0,747.836,218.574], + [0,0,1] + ], + "distCoef": [-0.334626,0.113242,0.000443349,0.00121381,-0.00550976], + "R": [ + [0.8000319441,0.07155257429,0.5956753458], + [0.1937456116,0.9088549369,-0.3693850858], + [-0.5678129326,0.4109293525,0.7132499848] + ], + "t": [ + [-44.09712116], + [90.97242653], + [330.2186197] + ] + }, + { + "name": "15_10", + "type": "vga", + "resolution": [640,480], + "panel": 15, + "node": 10, + "K": [ + [743.904,0,354.135], + [0,744.494,220.038], + [0,0,1] + ], + "distCoef": [-0.309276,-0.0261099,-0.00127318,0.000283377,0.220693], + "R": [ + [0.7314656006,-0.1499734814,0.6651812009], + [0.3639090401,0.9108337109,-0.1948131455], + [-0.576652656,0.3845645668,0.720820233] + ], + "t": [ + [2.360923884], + [158.0207055], + [327.7017732] + ] + }, + { + "name": "15_11", + "type": "vga", + "resolution": [640,480], + "panel": 15, + "node": 11, + "K": [ + [745.441,0,366.024], + [0,745.471,238.165], + [0,0,1] + ], + "distCoef": [-0.311636,0.00305556,-0.00136926,0.00112458,0.163822], + "R": [ + [0.743215427,-0.1065195831,0.660518287], + [0.3430146167,0.9082888556,-0.2394834597], + [-0.5744317207,0.4045552288,0.7115920636] + ], + "t": [ + [3.38448511], + [170.5922255], + [331.2143489] + ] + }, + { + "name": "15_12", + "type": "vga", + "resolution": [640,480], + "panel": 15, + "node": 12, + "K": [ + [743.816,0,384.478], + [0,744.21,221.813], + [0,0,1] + ], + "distCoef": [-0.309294,-0.0116228,-0.000777235,0.00017565,0.174372], + "R": [ + [0.799529392,-0.03302696284,0.5997182431], + [0.261290645,0.91817945,-0.2977812898], + [-0.540814155,0.3947856601,0.7427410938] + ], + "t": [ + [-15.11731065], + [179.1857595], + [329.2699106] + ] + }, + { + "name": "15_13", + "type": "vga", + "resolution": [640,480], + "panel": 15, + "node": 13, + "K": [ + [744.594,0,366.809], + [0,744.805,211.378], + [0,0,1] + ], + "distCoef": [-0.313339,0.0076854,-0.000770441,0.000328229,0.137582], + "R": [ + [0.7697001229,-0.07364256128,0.6341439064], + [0.280866324,0.9310898592,-0.2327783971], + [-0.5733025631,0.3572792288,0.7373436945] + ], + "t": [ + [-27.06753178], + [173.6081799], + [322.2797536] + ] + }, + { + "name": "15_14", + "type": "vga", + "resolution": [640,480], + "panel": 15, + "node": 14, + "K": [ + [744.088,0,376.311], + [0,744.421,235.85], + [0,0,1] + ], + "distCoef": [-0.308902,-0.0157485,-0.000258056,-0.00040893,0.167363], + "R": [ + [0.8019727226,0.02030217439,0.5970155559], + [0.20788107,0.9274680659,-0.31078682], + [-0.5600225111,0.3733507848,0.7395836522] + ], + "t": [ + [-32.35663304], + [177.8511702], + [324.3990212] + ] + }, + { + "name": "15_15", + "type": "vga", + "resolution": [640,480], + "panel": 15, + "node": 15, + "K": [ + [745.471,0,391.786], + [0,745.597,244.782], + [0,0,1] + ], + "distCoef": [-0.319471,0.0520955,-9.03549e-05,0.00103599,0.0679082], + "R": [ + [0.7993824794,0.07801580494,0.5957358356], + [0.170767806,0.9211391478,-0.3497728217], + [-0.5760434082,0.3813347671,0.723019908] + ], + "t": [ + [-27.66881494], + [158.8808021], + [326.8395357] + ] + }, + { + "name": "15_16", + "type": "vga", + "resolution": [640,480], + "panel": 15, + "node": 16, + "K": [ + [744.688,0,372.572], + [0,744.687,232.622], + [0,0,1] + ], + "distCoef": [-0.313079,0.00611683,0.000601543,0.00134427,0.153664], + "R": [ + [0.8032635264,0.07397377164,0.5910123419], + [0.1542914416,0.9325457224,-0.3264239985], + [-0.5752928456,0.3533926383,0.7376664456] + ], + "t": [ + [-29.95169554], + [148.2901373], + [322.192073] + ] + }, + { + "name": "15_17", + "type": "vga", + "resolution": [640,480], + "panel": 15, + "node": 17, + "K": [ + [746.029,0,371.631], + [0,745.957,227.751], + [0,0,1] + ], + "distCoef": [-0.328618,0.10871,0.000376647,0.00140085,-0.015131], + "R": [ + [0.7930332571,0.09578045983,0.6016014933], + [0.1573865304,0.9218193412,-0.3542295616], + [-0.5884961625,0.3755997947,0.7159588403] + ], + "t": [ + [-34.37744536], + [124.5681533], + [326.9926029] + ] + }, + { + "name": "15_18", + "type": "vga", + "resolution": [640,480], + "panel": 15, + "node": 18, + "K": [ + [745.728,0,355.008], + [0,745.836,235.366], + [0,0,1] + ], + "distCoef": [-0.326785,0.0753795,-0.00141997,0.000421746,0.0593081], + "R": [ + [0.7423074724,-0.1183757606,0.6595201254], + [0.3246236378,0.9245812728,-0.1994215728], + [-0.5861732766,0.362127946,0.7247511576] + ], + "t": [ + [30.16113415], + [163.1800117], + [323.8887405] + ] + }, + { + "name": "15_19", + "type": "vga", + "resolution": [640,480], + "panel": 15, + "node": 19, + "K": [ + [745.415,0,362.511], + [0,745.431,246.567], + [0,0,1] + ], + "distCoef": [-0.31824,0.0392935,0.000511921,2.0382e-05,0.0980721], + "R": [ + [0.7792023734,-0.03485918818,0.6258022837], + [0.250771695,0.9323920084,-0.2603050127], + [-0.5744190268,0.3597637832,0.7352637636] + ], + "t": [ + [-23.21577405], + [116.3982595], + [324.3931588] + ] + }, + { + "name": "15_20", + "type": "vga", + "resolution": [640,480], + "panel": 15, + "node": 20, + "K": [ + [745.757,0,370.457], + [0,745.798,252.296], + [0,0,1] + ], + "distCoef": [-0.322058,0.058259,0.000816175,0.000770211,0.0698692], + "R": [ + [0.7754488131,-0.03297117701,0.6305489986], + [0.2704225106,0.9197540051,-0.2844718542], + [-0.5705705951,0.391108005,0.7221383001] + ], + "t": [ + [-0.5150360293], + [101.3336776], + [328.6175717] + ] + }, + { + "name": "15_21", + "type": "vga", + "resolution": [640,480], + "panel": 15, + "node": 21, + "K": [ + [746.009,0,385.23], + [0,746.113,244.377], + [0,0,1] + ], + "distCoef": [-0.328614,0.0717398,0.00119782,0.000153035,0.0631847], + "R": [ + [0.7150247804,-0.1629175474,0.6798510396], + [0.3900461789,0.9000077369,-0.194550898], + [-0.5801754405,0.4042820134,0.7070732013] + ], + "t": [ + [2.095653738], + [113.9962742], + [330.0144097] + ] + }, + { + "name": "15_22", + "type": "vga", + "resolution": [640,480], + "panel": 15, + "node": 22, + "K": [ + [747.044,0,384.928], + [0,747.43,218.136], + [0,0,1] + ], + "distCoef": [-0.332061,0.0970763,-0.00131827,0.000796644,0.024739], + "R": [ + [0.7476996574,-0.1120966581,0.6545071135], + [0.3349363173,0.9147459603,-0.2259590484], + [-0.5733784838,0.3881677053,0.7215004829] + ], + "t": [ + [-3.202807266], + [138.4357179], + [328.3283502] + ] + }, + { + "name": "15_23", + "type": "vga", + "resolution": [640,480], + "panel": 15, + "node": 23, + "K": [ + [746.525,0,381.586], + [0,746.566,231.744], + [0,0,1] + ], + "distCoef": [-0.323751,0.0809499,0.00143311,0.000786746,0.0334271], + "R": [ + [0.7874675535,-0.04961201835,0.6143561669], + [0.2785108695,0.9178324582,-0.2828697124], + [-0.5498422936,0.3938555906,0.7365807667] + ], + "t": [ + [-21.67007007], + [141.1281207], + [328.549187] + ] + }, + { + "name": "15_24", + "type": "vga", + "resolution": [640,480], + "panel": 15, + "node": 24, + "K": [ + [744.493,0,392.291], + [0,744.573,223.193], + [0,0,1] + ], + "distCoef": [-0.308278,-0.0176562,-0.000671893,0.00116828,0.17277], + "R": [ + [0.7758686755,-0.01407586642,0.6307374005], + [0.2927445364,0.8936390769,-0.3401614861], + [-0.5588635207,0.4485655695,0.6974672] + ], + "t": [ + [-20.05926183], + [105.1778582], + [335.8474538] + ] + }, + { + "name": "16_01", + "type": "vga", + "resolution": [640,480], + "panel": 16, + "node": 1, + "K": [ + [745.918,0,380.409], + [0,745.86,226.454], + [0,0,1] + ], + "distCoef": [-0.329171,0.0901569,-0.000500393,-0.000311386,0.0200307], + "R": [ + [0.8121486446,0.04341076946,0.5818333819], + [-0.0759194996,0.9966126489,0.03161419974], + [-0.5784901112,-0.06984792866,0.8126933358] + ], + "t": [ + [55.6088262], + [125.3657692], + [265.9940479] + ] + }, + { + "name": "16_02", + "type": "vga", + "resolution": [640,480], + "panel": 16, + "node": 2, + "K": [ + [747.364,0,392.411], + [0,747.161,225.523], + [0,0,1] + ], + "distCoef": [-0.325367,0.0819479,0.000479765,0.00158774,0.0247525], + "R": [ + [0.8168932447,0.07701494166,0.5716241121], + [-0.08391193553,0.9963702084,-0.01432462351], + [-0.5706524458,-0.03626439747,0.8203905653] + ], + "t": [ + [75.42528996], + [124.1426197], + [270.1790967] + ] + }, + { + "name": "16_03", + "type": "vga", + "resolution": [640,480], + "panel": 16, + "node": 3, + "K": [ + [744.743,0,378.771], + [0,744.551,249.858], + [0,0,1] + ], + "distCoef": [-0.319546,0.0369202,-5.08119e-05,0.00111176,0.115068], + "R": [ + [0.8437113062,0.07102371173,0.5320778742], + [-0.08587784221,0.9963005803,0.003185889303], + [-0.5298832211,-0.04838167055,0.8466894271] + ], + "t": [ + [57.15960424], + [150.0301024], + [271.4615922] + ] + }, + { + "name": "16_04", + "type": "vga", + "resolution": [640,480], + "panel": 16, + "node": 4, + "K": [ + [745.916,0,377.522], + [0,746.078,215.704], + [0,0,1] + ], + "distCoef": [-0.32195,0.0590592,-0.000295617,0.000900619,0.0691531], + "R": [ + [0.8298382679,0.121110683,0.5447023514], + [-0.1306769278,0.9911961099,-0.02130286834], + [-0.5424868568,-0.05350209448,0.8383588349] + ], + "t": [ + [50.00635036], + [157.1807453], + [269.6015294] + ] + }, + { + "name": "16_05", + "type": "vga", + "resolution": [640,480], + "panel": 16, + "node": 5, + "K": [ + [745.303,0,378.655], + [0,745.572,246.962], + [0,0,1] + ], + "distCoef": [-0.315703,0.0277156,6.06815e-05,0.000389915,0.121683], + "R": [ + [0.8187116226,0.05412921644,0.5716478872], + [-0.09011941267,0.9953220251,0.0348218015], + [-0.5670888559,-0.08002558546,0.8197598034] + ], + "t": [ + [44.81120287], + [188.347539], + [263.8787228] + ] + }, + { + "name": "16_06", + "type": "vga", + "resolution": [640,480], + "panel": 16, + "node": 6, + "K": [ + [745.606,0,364.995], + [0,745.957,239.275], + [0,0,1] + ], + "distCoef": [-0.315328,0.0257972,-0.000148911,-0.000553771,0.11289], + "R": [ + [0.8250072615,0.03741598225,0.5638821355], + [-0.06134414867,0.997839028,0.02354080738], + [-0.5617827996,-0.05401220659,0.8255196955] + ], + "t": [ + [18.96573731], + [189.9536973], + [269.3804852] + ] + }, + { + "name": "16_07", + "type": "vga", + "resolution": [640,480], + "panel": 16, + "node": 7, + "K": [ + [748.144,0,375.351], + [0,748.158,222.981], + [0,0,1] + ], + "distCoef": [-0.330846,0.0923667,0.000924419,-0.000952259,0.0155541], + "R": [ + [0.837010476,0.04764620621,0.5451085232], + [-0.06946161724,0.9973944363,0.0194787641], + [-0.542760119,-0.05416804921,0.8381391744] + ], + "t": [ + [-3.044263505], + [177.2440129], + [269.3681033] + ] + }, + { + "name": "16_08", + "type": "vga", + "resolution": [640,480], + "panel": 16, + "node": 8, + "K": [ + [744.865,0,367.243], + [0,744.958,216.687], + [0,0,1] + ], + "distCoef": [-0.318901,0.0494498,-4.02299e-05,-0.00132469,0.0675277], + "R": [ + [0.820488273,0.02086231711,0.571282555], + [-0.05401864215,0.9976917237,0.04114864192], + [-0.569105421,-0.06462188605,0.8197213134] + ], + "t": [ + [-19.55260409], + [185.7078501], + [268.0867658] + ] + }, + { + "name": "16_09", + "type": "vga", + "resolution": [640,480], + "panel": 16, + "node": 9, + "K": [ + [747.002,0,387.115], + [0,747.11,221.005], + [0,0,1] + ], + "distCoef": [-0.330535,0.106093,-0.000909516,-0.000158007,-0.000767667], + "R": [ + [0.7988895638,0.03324884852,0.6005580562], + [-0.04929092881,0.9987315997,0.01027599727], + [-0.5994546431,-0.03781145137,0.7995151187] + ], + "t": [ + [-23.46737596], + [164.4653247], + [274.3468777] + ] + }, + { + "name": "16_10", + "type": "vga", + "resolution": [640,480], + "panel": 16, + "node": 10, + "K": [ + [747.13,0,370.332], + [0,747.181,215.13], + [0,0,1] + ], + "distCoef": [-0.317083,0.0321021,0.000973109,0.00011315,0.117938], + "R": [ + [0.8533830718,-0.04475694932,0.5193593633], + [-0.01101437775,0.9945367161,0.1038046423], + [-0.5211679348,-0.09430554471,0.8482278279] + ], + "t": [ + [-57.15311463], + [154.6074069], + [261.7210039] + ] + }, + { + "name": "16_11", + "type": "vga", + "resolution": [640,480], + "panel": 16, + "node": 11, + "K": [ + [743.847,0,352.444], + [0,743.813,257.427], + [0,0,1] + ], + "distCoef": [-0.317406,0.0378558,0.000559662,0.00156409,0.0978841], + "R": [ + [0.8306368039,-0.006305585156,0.5567788965], + [-0.01286906876,0.999451376,0.03051776569], + [-0.5566658666,-0.03251440526,0.8300999496] + ], + "t": [ + [-55.68789985], + [125.5954887], + [272.609285] + ] + }, + { + "name": "16_12", + "type": "vga", + "resolution": [640,480], + "panel": 16, + "node": 12, + "K": [ + [744.746,0,358.295], + [0,744.902,240.075], + [0,0,1] + ], + "distCoef": [-0.311924,0.00313238,0.000282789,0.000109914,0.161883], + "R": [ + [0.8248636519,0.04296544146,0.5636966618], + [-0.06337887364,0.9978500361,0.01668603434], + [-0.5617678116,-0.04949016272,0.8258133262] + ], + "t": [ + [-45.5470475], + [111.3455785], + [270.6081331] + ] + }, + { + "name": "16_13", + "type": "vga", + "resolution": [640,480], + "panel": 16, + "node": 13, + "K": [ + [742.599,0,373.118], + [0,742.696,232.489], + [0,0,1] + ], + "distCoef": [-0.30659,-0.0244311,-0.000674534,-0.000450328,0.198624], + "R": [ + [0.8431633834,0.1596479738,0.5134082522], + [-0.1755645793,0.9843078819,-0.01775026834], + [-0.5081855837,-0.07516992751,0.8579608934] + ], + "t": [ + [-27.27822308], + [119.4613899], + [265.3318331] + ] + }, + { + "name": "16_14", + "type": "vga", + "resolution": [640,480], + "panel": 16, + "node": 14, + "K": [ + [745.804,0,370.921], + [0,745.998,236.13], + [0,0,1] + ], + "distCoef": [-0.32821,0.0986121,-0.000141995,-6.949e-05,-0.000912797], + "R": [ + [0.8387309717,0.02755081107,0.5438486094], + [-0.05712815546,0.9976599438,0.03756341813], + [-0.5415410705,-0.06257467009,0.8383422211] + ], + "t": [ + [-30.56519475], + [90.10611059], + [268.3571691] + ] + }, + { + "name": "16_15", + "type": "vga", + "resolution": [640,480], + "panel": 16, + "node": 15, + "K": [ + [746.816,0,365.456], + [0,746.849,225.794], + [0,0,1] + ], + "distCoef": [-0.313831,-0.00769663,-0.000408313,0.00132145,0.204366], + "R": [ + [0.832563643,0.03033638007,0.5530980784], + [-0.06055031945,0.9974999941,0.03643378343], + [-0.5506100609,-0.06382370879,0.8323191065] + ], + "t": [ + [-6.42740827], + [88.69840867], + [268.7038743] + ] + }, + { + "name": "16_16", + "type": "vga", + "resolution": [640,480], + "panel": 16, + "node": 16, + "K": [ + [745.958,0,362.302], + [0,745.997,246.977], + [0,0,1] + ], + "distCoef": [-0.334292,0.102923,-0.000499879,-0.000549652,0.00793805], + "R": [ + [0.8469636173,0.04048111503,0.5301074517], + [-0.08872767491,0.9938758,0.0658657255], + [-0.5241946497,-0.1028210748,0.8453684379] + ], + "t": [ + [4.584618298], + [109.8657875], + [264.6056558] + ] + }, + { + "name": "16_17", + "type": "vga", + "resolution": [640,480], + "panel": 16, + "node": 17, + "K": [ + [743.409,0,347.233], + [0,743.501,244.449], + [0,0,1] + ], + "distCoef": [-0.321337,0.060438,0.000289347,-0.000274585,0.0540146], + "R": [ + [0.8338949711,0.06176137043,0.5484566622], + [-0.07967791451,0.9967809419,0.008898524832], + [-0.5461415633,-0.05112031815,0.8361316319] + ], + "t": [ + [32.73506114], + [91.25662398], + [270.2531272] + ] + }, + { + "name": "16_18", + "type": "vga", + "resolution": [640,480], + "panel": 16, + "node": 18, + "K": [ + [745.291,0,372.769], + [0,745.233,242.994], + [0,0,1] + ], + "distCoef": [-0.333422,0.127228,0.000470045,-0.000171948,-0.0533425], + "R": [ + [0.83476387,0.01583088955,0.5503804723], + [-0.006383142992,0.9997976531,-0.01907638369], + [-0.5505711006,0.01241111862,0.8346960089] + ], + "t": [ + [48.20146308], + [84.31846371], + [276.1979749] + ] + }, + { + "name": "16_19", + "type": "vga", + "resolution": [640,480], + "panel": 16, + "node": 19, + "K": [ + [746.318,0,365.802], + [0,746.439,228.058], + [0,0,1] + ], + "distCoef": [-0.329752,0.106043,0.000413141,0.00102356,-0.00232913], + "R": [ + [0.812564017,0.08482803737,0.576666214], + [-0.09768913876,0.9951785947,-0.008740529432], + [-0.5746273144,-0.04923178609,0.8169330944] + ], + "t": [ + [39.50134988], + [124.7306793], + [269.4016435] + ] + }, + { + "name": "16_20", + "type": "vga", + "resolution": [640,480], + "panel": 16, + "node": 20, + "K": [ + [745.104,0,371.377], + [0,745.158,252.192], + [0,0,1] + ], + "distCoef": [-0.317414,0.0233642,0.000269725,0.000539732,0.145301], + "R": [ + [0.8445515108,0.05428741136,0.5327153297], + [-0.06949119822,0.9975462456,0.00851241329], + [-0.5309460603,-0.04420819807,0.8462516862] + ], + "t": [ + [17.33430135], + [146.0606392], + [271.3134014] + ] + }, + { + "name": "16_21", + "type": "vga", + "resolution": [640,480], + "panel": 16, + "node": 21, + "K": [ + [744.321,0,365.126], + [0,744.44,221.253], + [0,0,1] + ], + "distCoef": [-0.310945,0.00293318,4.64093e-05,-0.000454281,0.146346], + "R": [ + [0.8382052649,0.09941648006,0.5362166515], + [-0.1229674254,0.9923765769,0.008230548616], + [-0.531310593,-0.07283607028,0.8440402601] + ], + "t": [ + [5.636303812], + [160.8368098], + [266.310691] + ] + }, + { + "name": "16_22", + "type": "vga", + "resolution": [640,480], + "panel": 16, + "node": 22, + "K": [ + [745.695,0,387.973], + [0,745.975,222.039], + [0,0,1] + ], + "distCoef": [-0.325844,0.0780224,-0.000861123,0.000487347,0.0459906], + "R": [ + [0.8503320636,-0.003175777979,0.52623692], + [-0.02504000004,0.9986049625,0.04648792516], + [-0.5256504352,-0.05270714583,0.8490662971] + ], + "t": [ + [-29.03965018], + [141.2975723], + [268.9897195] + ] + }, + { + "name": "16_23", + "type": "vga", + "resolution": [640,480], + "panel": 16, + "node": 23, + "K": [ + [746.757,0,385.384], + [0,746.697,250.739], + [0,0,1] + ], + "distCoef": [-0.330103,0.0993513,0.000581277,0.0005991,0.0043047], + "R": [ + [0.8172674448,0.1129970073,0.565071323], + [-0.1204798393,0.992420693,-0.02420281713], + [-0.5635233199,-0.0482995277,0.8246869852] + ], + "t": [ + [1.484048414], + [120.2737991], + [270.3939501] + ] + }, + { + "name": "16_24", + "type": "vga", + "resolution": [640,480], + "panel": 16, + "node": 24, + "K": [ + [743.909,0,365.262], + [0,744.1,225.983], + [0,0,1] + ], + "distCoef": [-0.309366,-0.0151251,-0.000569796,0.000128233,0.192772], + "R": [ + [0.8488529257,0.0258708029,0.5279956553], + [-0.02681353424,0.9996232069,-0.005871843729], + [-0.5279486195,-0.009173097852,0.8492267715] + ], + "t": [ + [-1.170097817], + [104.9858918], + [274.723166] + ] + }, + { + "name": "17_01", + "type": "vga", + "resolution": [640,480], + "panel": 17, + "node": 1, + "K": [ + [743.511,0,382.741], + [0,744.07,233.668], + [0,0,1] + ], + "distCoef": [-0.303608,-0.0460126,4.19904e-05,0.000729649,0.232264], + "R": [ + [0.7426987355,0.03664601822,-0.6686222084], + [-0.01756201576,0.9992239229,0.035258014], + [0.6693953719,-0.01444372865,0.742765922] + ], + "t": [ + [27.30884403], + [110.2809812], + [269.7471778] + ] + }, + { + "name": "17_02", + "type": "vga", + "resolution": [640,480], + "panel": 17, + "node": 2, + "K": [ + [744.491,0,371.868], + [0,744.58,223.545], + [0,0,1] + ], + "distCoef": [-0.320104,0.0388113,-0.000303412,-0.00118762,0.0743207], + "R": [ + [0.773334615,0.1038173874,-0.6254402635], + [-0.04654036662,0.9931361468,0.107306049], + [0.6322875671,-0.05387526291,0.7728582591] + ], + "t": [ + [68.17402308], + [125.7906344], + [263.8293382] + ] + }, + { + "name": "17_03", + "type": "vga", + "resolution": [640,480], + "panel": 17, + "node": 3, + "K": [ + [744.096,0,373.775], + [0,744.072,232.317], + [0,0,1] + ], + "distCoef": [-0.314223,0.0332024,-0.000194112,2.11963e-05,0.079313], + "R": [ + [0.7946878724,-0.02084896757,-0.6066601239], + [0.03470365887,0.999335828,0.01111570764], + [0.6060254462,-0.02988684405,0.7948835985] + ], + "t": [ + [55.17367606], + [148.0232969], + [266.1261169] + ] + }, + { + "name": "17_04", + "type": "vga", + "resolution": [640,480], + "panel": 17, + "node": 4, + "K": [ + [748.225,0,373.118], + [0,748.618,236.287], + [0,0,1] + ], + "distCoef": [-0.325852,0.0883394,-0.000431944,-0.00077703,0.0075009], + "R": [ + [0.7874797118,0.07165214706,-0.6121614766], + [-0.03177741847,0.9966185482,0.07577377574], + [0.6155208357,-0.04021739967,0.7870938073] + ], + "t": [ + [46.04066644], + [153.679907], + [265.8341529] + ] + }, + { + "name": "17_05", + "type": "vga", + "resolution": [640,480], + "panel": 17, + "node": 5, + "K": [ + [745.23,0,378.585], + [0,745.614,229.474], + [0,0,1] + ], + "distCoef": [-0.323397,0.071697,-0.000659822,0.000678056,0.0530686], + "R": [ + [0.7680042357,0.04160049173,-0.6390922414], + [0.01355248597,0.9966090615,0.08115854064], + [0.6403013541,-0.07099139161,0.7648361904] + ], + "t": [ + [29.31016003], + [185.453895], + [261.9380867] + ] + }, + { + "name": "17_06", + "type": "vga", + "resolution": [640,480], + "panel": 17, + "node": 6, + "K": [ + [742.876,0,352.101], + [0,743.303,231.794], + [0,0,1] + ], + "distCoef": [-0.319343,0.0421325,-0.000546468,-1.33187e-05,0.10149], + "R": [ + [0.8064347587,0.08751734637,-0.584810819], + [-0.03388642915,0.9942014648,0.1020546777], + [0.5903513275,-0.062483289,0.8047242688] + ], + "t": [ + [35.39857301], + [188.6248332], + [262.8234665] + ] + }, + { + "name": "17_07", + "type": "vga", + "resolution": [640,480], + "panel": 17, + "node": 7, + "K": [ + [745.054,0,358.779], + [0,745.36,231.687], + [0,0,1] + ], + "distCoef": [-0.309912,-0.00132311,-0.00013553,-0.000280643,0.151777], + "R": [ + [0.7882500993,-0.004275732235,-0.615340149], + [0.05540043824,0.996408109,0.06404429605], + [0.612856078,-0.08457303664,0.7856556683] + ], + "t": [ + [-7.246792888], + [183.4614511], + [259.402568] + ] + }, + { + "name": "17_08", + "type": "vga", + "resolution": [640,480], + "panel": 17, + "node": 8, + "K": [ + [745.254,0,343.02], + [0,745.689,227.622], + [0,0,1] + ], + "distCoef": [-0.309897,-0.0109758,-0.00111103,0.000256129,0.180098], + "R": [ + [0.7946287881,0.03514926038,-0.6060772382], + [0.01090423253,0.9973351466,0.07213669658], + [0.6069976827,-0.06393070292,0.7921279432] + ], + "t": [ + [-18.41109561], + [184.5517176], + [263.9542066] + ] + }, + { + "name": "17_09", + "type": "vga", + "resolution": [640,480], + "panel": 17, + "node": 9, + "K": [ + [745.379,0,338.137], + [0,745.543,245.392], + [0,0,1] + ], + "distCoef": [-0.314138,0.0142784,0.00088856,-0.00114362,0.123117], + "R": [ + [0.7570044814,0.09852948519,-0.6459381981], + [-0.05745310106,0.9947735679,0.08440787789], + [0.6508789107,-0.02678598925,0.7587088733] + ], + "t": [ + [-40.16389387], + [164.132571], + [267.7674295] + ] + }, + { + "name": "17_10", + "type": "vga", + "resolution": [640,480], + "panel": 17, + "node": 10, + "K": [ + [743.633,0,369.381], + [0,743.739,253.863], + [0,0,1] + ], + "distCoef": [-0.313678,0.00191444,-0.000367883,0.000526793,0.16208], + "R": [ + [0.7732990879,0.03177464522,-0.6332447335], + [0.01440724919,0.9976050167,0.06765102948], + [0.6338777104,-0.06143779407,0.7709892643] + ], + "t": [ + [-41.17430449], + [148.5957101], + [262.973747] + ] + }, + { + "name": "17_11", + "type": "vga", + "resolution": [640,480], + "panel": 17, + "node": 11, + "K": [ + [749.691,0,360.347], + [0,749.465,221.979], + [0,0,1] + ], + "distCoef": [-0.36212,0.288042,0.00167589,0.000680745,-0.303613], + "R": [ + [0.7747984815,0.06051645956,-0.629305229], + [-0.01350572868,0.9967652932,0.07922465313], + [0.6320640066,-0.05288391526,0.7731095544] + ], + "t": [ + [-52.93053536], + [133.9502209], + [264.0833713] + ] + }, + { + "name": "17_12", + "type": "vga", + "resolution": [640,480], + "panel": 17, + "node": 12, + "K": [ + [746.505,0,357.704], + [0,746.569,217.534], + [0,0,1] + ], + "distCoef": [-0.312272,-0.0352904,0.000404412,-0.00107082,0.237629], + "R": [ + [0.7725304823,-0.04233401582,-0.633564902], + [0.05994143841,0.9981814314,0.006391704783], + [0.6321421342,-0.04291457833,0.7736631445] + ], + "t": [ + [-62.64410987], + [104.0188122], + [265.010728] + ] + }, + { + "name": "17_13", + "type": "vga", + "resolution": [640,480], + "panel": 17, + "node": 13, + "K": [ + [745.264,0,354.32], + [0,745.302,226.261], + [0,0,1] + ], + "distCoef": [-0.318398,0.0346929,0.000845692,0.000532231,0.122684], + "R": [ + [0.7851484689,0.03204817868,-0.6184778056], + [-0.002225165301,0.9987996914,0.04893081946], + [0.619303585,-0.03704174263,0.784277361] + ], + "t": [ + [-29.19489341], + [103.2650402], + [265.9795804] + ] + }, + { + "name": "17_14", + "type": "vga", + "resolution": [640,480], + "panel": 17, + "node": 14, + "K": [ + [744.589,0,353.058], + [0,744.664,227.639], + [0,0,1] + ], + "distCoef": [-0.324606,0.0822873,0.00100728,-0.000415736,0.0203245], + "R": [ + [0.7765409088,-0.02900211747,-0.6293989944], + [0.06862390156,0.9968904955,0.03873112579], + [0.6263185908,-0.07326811825,0.7761164898] + ], + "t": [ + [-35.65491372], + [89.93385082], + [261.6973052] + ] + }, + { + "name": "17_15", + "type": "vga", + "resolution": [640,480], + "panel": 17, + "node": 15, + "K": [ + [744.009,0,351.118], + [0,743.982,227.187], + [0,0,1] + ], + "distCoef": [-0.31768,0.0289626,0.000394183,-0.00106594,0.077624], + "R": [ + [0.7703409519,0.009578036972,-0.6375602553], + [0.03762675731,0.9974619202,0.06044786963], + [0.6365210484,-0.07055479443,0.7680253746] + ], + "t": [ + [-14.94306331], + [88.85755459], + [261.4804843] + ] + }, + { + "name": "17_16", + "type": "vga", + "resolution": [640,480], + "panel": 17, + "node": 16, + "K": [ + [745.298,0,365.044], + [0,745.641,201.543], + [0,0,1] + ], + "distCoef": [-0.315769,0.0139989,-0.000983596,0.000497246,0.155532], + "R": [ + [0.7668905855,0.04755147693,-0.6400138177], + [0.009922268647,0.9962536216,0.0859084976], + [0.6417011597,-0.07223280706,0.7635457047] + ], + "t": [ + [4.594602528], + [99.8882812], + [261.439958] + ] + }, + { + "name": "17_17", + "type": "vga", + "resolution": [640,480], + "panel": 17, + "node": 17, + "K": [ + [744.772,0,356.238], + [0,744.946,209.811], + [0,0,1] + ], + "distCoef": [-0.307562,-0.0273551,-0.000331097,0.000403566,0.231396], + "R": [ + [0.7386328767,0.1026186384,-0.6662513704], + [-0.03586762178,0.992927984,0.1131703685], + [0.6731530192,-0.05969450264,0.7370899397] + ], + "t": [ + [18.92063539], + [92.1220326], + [263.1909682] + ] + }, + { + "name": "17_18", + "type": "vga", + "resolution": [640,480], + "panel": 17, + "node": 18, + "K": [ + [746.696,0,345.664], + [0,746.883,230.9], + [0,0,1] + ], + "distCoef": [-0.332087,0.135716,-0.000396371,4.15402e-05,-0.0769473], + "R": [ + [0.7676740293,0.0869303765,-0.6349170767], + [-0.05592901251,0.9960646798,0.06875390322], + [0.6383952774,-0.01727030079,0.7695149163] + ], + "t": [ + [48.13164066], + [87.731429], + [267.0873794] + ] + }, + { + "name": "17_19", + "type": "vga", + "resolution": [640,480], + "panel": 17, + "node": 19, + "K": [ + [743.785,0,363.137], + [0,743.962,239.724], + [0,0,1] + ], + "distCoef": [-0.322076,0.0699752,0.00130957,8.28091e-06,0.0447641], + "R": [ + [0.7666015958,0.09362030423,-0.6352615462], + [-0.01827880108,0.9920950944,0.1241499457], + [0.6418628193,-0.08356172708,0.7622529495] + ], + "t": [ + [25.25313987], + [133.2656265], + [259.9680703] + ] + }, + { + "name": "17_20", + "type": "vga", + "resolution": [640,480], + "panel": 17, + "node": 20, + "K": [ + [747.071,0,344.427], + [0,747.404,242.981], + [0,0,1] + ], + "distCoef": [-0.349964,0.20917,0.0008789,-0.000586258,-0.211765], + "R": [ + [0.7775513873,0.03007697302,-0.6280996862], + [-0.01270805589,0.999403059,0.03212523871], + [0.6286909777,-0.01699709801,0.7774694548] + ], + "t": [ + [17.35278566], + [137.2956705], + [269.3773006] + ] + }, + { + "name": "17_21", + "type": "vga", + "resolution": [640,480], + "panel": 17, + "node": 21, + "K": [ + [744.669,0,371.314], + [0,744.881,251.475], + [0,0,1] + ], + "distCoef": [-0.32107,0.0528121,0.000172414,0.000961494,0.0921892], + "R": [ + [0.7854342878,0.01663631847,-0.6187214337], + [0.02446292583,0.9980232337,0.05788946549], + [0.6184614336,-0.06060410764,0.7834746947] + ], + "t": [ + [-1.039205356], + [155.8049723], + [263.425936] + ] + }, + { + "name": "17_22", + "type": "vga", + "resolution": [640,480], + "panel": 17, + "node": 22, + "K": [ + [744.126,0,368.359], + [0,744.205,218.365], + [0,0,1] + ], + "distCoef": [-0.306681,-0.0309893,-0.000506643,-0.000551257,0.209183], + "R": [ + [0.7742934088,0.08491898973,-0.6271032469], + [-0.02171436959,0.9939373135,0.1077826651], + [0.6324541115,-0.06983825553,0.771443073] + ], + "t": [ + [-12.48615074], + [146.2169272], + [261.8070617] + ] + }, + { + "name": "17_23", + "type": "vga", + "resolution": [640,480], + "panel": 17, + "node": 23, + "K": [ + [746.439,0,363.854], + [0,746.575,224.032], + [0,0,1] + ], + "distCoef": [-0.333494,0.127943,0.00111227,0.000376509,-0.0438307], + "R": [ + [0.7741360077,0.05745954338,-0.6304060933], + [-0.01777243196,0.9974520988,0.06909016755], + [0.6327697704,-0.04228133707,0.7731847814] + ], + "t": [ + [-14.18178238], + [117.4047924], + [265.0998909] + ] + }, + { + "name": "17_24", + "type": "vga", + "resolution": [640,480], + "panel": 17, + "node": 24, + "K": [ + [745.824,0,346.505], + [0,746.017,224.098], + [0,0,1] + ], + "distCoef": [-0.317434,0.0247137,-0.000866957,0.000304145,0.138958], + "R": [ + [0.7656627697,0.09930116127,-0.6355311184], + [-0.04982185052,0.99419918,0.09531932471], + [0.6413098365,-0.04131912178,0.7661686654] + ], + "t": [ + [7.35512715], + [111.8344509], + [265.0127015] + ] + }, + { + "name": "18_01", + "type": "vga", + "resolution": [640,480], + "panel": 18, + "node": 1, + "K": [ + [744.96,0,372.705], + [0,744.564,226.392], + [0,0,1] + ], + "distCoef": [-0.321978,0.0724692,0.000483988,0.000458946,0.0380169], + "R": [ + [-0.3520669355,0.03279886428,-0.9353999719], + [0.04913052402,0.9986556534,0.01652505738], + [0.9346844732,-0.04013876447,-0.3532050609] + ], + "t": [ + [47.10128491], + [117.3460549], + [266.6541908] + ] + }, + { + "name": "18_02", + "type": "vga", + "resolution": [640,480], + "panel": 18, + "node": 2, + "K": [ + [748.843,0,358.358], + [0,748.813,225.018], + [0,0,1] + ], + "distCoef": [-0.335266,0.148062,0.000634215,-0.00153008,-0.105518], + "R": [ + [-0.3389880085,0.04020239671,-0.9399313259], + [0.04795713663,0.9985260662,0.02541275744], + [0.9395675831,-0.03646179499,-0.3404163544] + ], + "t": [ + [70.51461434], + [125.984952], + [266.5287049] + ] + }, + { + "name": "18_03", + "type": "vga", + "resolution": [640,480], + "panel": 18, + "node": 3, + "K": [ + [746.557,0,370.525], + [0,746.643,239.094], + [0,0,1] + ], + "distCoef": [-0.336876,0.137869,0.0006954,0.000424607,-0.0538424], + "R": [ + [-0.3751735108,0.06869685522,-0.9244055273], + [0.01802710881,0.9976021763,0.06682006625], + [0.9267792942,0.008404759824,-0.3755123165] + ], + "t": [ + [58.58769651], + [133.6261971], + [275.7276294] + ] + }, + { + "name": "18_04", + "type": "vga", + "resolution": [640,480], + "panel": 18, + "node": 4, + "K": [ + [744.71,0,356.151], + [0,744.769,223.97], + [0,0,1] + ], + "distCoef": [-0.312604,0.00791514,0.000747313,-0.000519594,0.158336], + "R": [ + [-0.3438161676,0.01243889994,-0.9389545871], + [0.0251972518,0.9996744288,0.00401683712], + [0.9386988555,-0.02227802162,-0.344017657] + ], + "t": [ + [40.26546697], + [152.0702476], + [270.0686857] + ] + }, + { + "name": "18_05", + "type": "vga", + "resolution": [640,480], + "panel": 18, + "node": 5, + "K": [ + [743.927,0,355.392], + [0,744.057,262.153], + [0,0,1] + ], + "distCoef": [-0.316206,0.0381773,0.00109867,0.000112775,0.102099], + "R": [ + [-0.3913025917,0.04706716523,-0.9190576498], + [0.07535158968,0.9969764632,0.0189755056], + [0.9171719684,-0.0618272904,-0.3936660596] + ], + "t": [ + [27.50168157], + [183.5367771], + [265.1462318] + ] + }, + { + "name": "18_06", + "type": "vga", + "resolution": [640,480], + "panel": 18, + "node": 6, + "K": [ + [744.89,0,353.646], + [0,744.816,246.705], + [0,0,1] + ], + "distCoef": [-0.311434,-0.0151537,0.000898898,0.00113623,0.19919], + "R": [ + [-0.3540366423,0.02766248657,-0.9348223589], + [0.06855079724,0.9976412764,0.003559761167], + [0.9327158432,-0.06282253209,-0.3550978532] + ], + "t": [ + [15.12228299], + [191.0759947], + [263.959739] + ] + }, + { + "name": "18_07", + "type": "vga", + "resolution": [640,480], + "panel": 18, + "node": 7, + "K": [ + [744.21,0,382.066], + [0,744.474,221.564], + [0,0,1] + ], + "distCoef": [-0.318836,0.0439442,-0.000310088,0.000693195,0.0844966], + "R": [ + [-0.3784097731,0.01208936744,-0.9255592314], + [0.03775536538,0.9992841689,-0.002383732641], + [0.9248678695,-0.03584685469,-0.3785953341] + ], + "t": [ + [-11.73143391], + [170.7040215], + [268.2801795] + ] + }, + { + "name": "18_08", + "type": "vga", + "resolution": [640,480], + "panel": 18, + "node": 8, + "K": [ + [744.996,0,378.911], + [0,745.249,217.173], + [0,0,1] + ], + "distCoef": [-0.317298,0.0439499,-0.000470842,0.000645598,0.0800391], + "R": [ + [-0.3573644405,-0.02168005213,-0.9337133564], + [0.09030348924,0.9942444419,-0.05764780686], + [0.9295891224,-0.1049188503,-0.3533498244] + ], + "t": [ + [-32.18764663], + [193.5958696], + [255.9258617] + ] + }, + { + "name": "18_09", + "type": "vga", + "resolution": [640,480], + "panel": 18, + "node": 9, + "K": [ + [745.488,0,367.703], + [0,745.136,254.274], + [0,0,1] + ], + "distCoef": [-0.333608,0.117291,0.00107107,0.000590786,-0.0167148], + "R": [ + [-0.3755971335,-0.01611847579,-0.9266428589], + [0.03486308067,0.9988953473,-0.03150636014], + [0.9261270749,-0.0441393233,-0.3746202894] + ], + "t": [ + [-52.11061688], + [162.8813669], + [265.66749] + ] + }, + { + "name": "18_10", + "type": "vga", + "resolution": [640,480], + "panel": 18, + "node": 10, + "K": [ + [746.691,0,377.016], + [0,746.35,247.895], + [0,0,1] + ], + "distCoef": [-0.324348,0.0759263,0.000632098,0.000973799,0.0365142], + "R": [ + [-0.3979832561,-0.05264507275,-0.9158809007], + [0.03842303812,0.9965195246,-0.07397639654], + [0.9165876925,-0.06463229393,-0.3945753015] + ], + "t": [ + [-58.47639535], + [144.7851801], + [261.4908418] + ] + }, + { + "name": "18_11", + "type": "vga", + "resolution": [640,480], + "panel": 18, + "node": 11, + "K": [ + [743.499,0,383.73], + [0,743.269,228.607], + [0,0,1] + ], + "distCoef": [-0.318101,0.0343673,-0.000192972,9.02677e-05,0.0940376], + "R": [ + [-0.3591156591,-0.0799459609,-0.9298626709], + [0.01693912278,0.9956019804,-0.09213990831], + [0.9331393302,-0.04883994185,-0.356182047] + ], + "t": [ + [-65.19666066], + [124.1115675], + [265.1913912] + ] + }, + { + "name": "18_12", + "type": "vga", + "resolution": [640,480], + "panel": 18, + "node": 12, + "K": [ + [744.847,0,377.843], + [0,744.539,240.133], + [0,0,1] + ], + "distCoef": [-0.322594,0.0777366,0.000608553,0.000730506,0.0395492], + "R": [ + [-0.3599917326,-0.04959232233,-0.9316364924], + [0.02914279324,0.9975011607,-0.0643593979], + [0.9325002145,-0.05031934083,-0.3576469123] + ], + "t": [ + [-57.61171896], + [105.5688064], + [264.3974594] + ] + }, + { + "name": "18_13", + "type": "vga", + "resolution": [640,480], + "panel": 18, + "node": 13, + "K": [ + [742.264,0,386.065], + [0,742.375,236.247], + [0,0,1] + ], + "distCoef": [-0.316238,0.0182785,-0.000395794,0.00144239,0.136479], + "R": [ + [-0.3232019546,0.03338047233,-0.9457411066], + [0.05161368011,0.9985119503,0.01760435083], + [0.9449214383,-0.04312341834,-0.324443903] + ], + "t": [ + [61.04698375], + [97.35388185], + [264.1973208] + ] + }, + { + "name": "18_14", + "type": "vga", + "resolution": [640,480], + "panel": 18, + "node": 14, + "K": [ + [744.531,0,362.517], + [0,744.694,222.936], + [0,0,1] + ], + "distCoef": [-0.323155,0.0551,-0.000315217,0.00114443,0.0791805], + "R": [ + [-0.3124904102,0.02154150537,-0.9496766329], + [-0.004629448499,0.999696432,0.02419942065], + [0.9499096335,0.01195856595,-0.3122958229] + ], + "t": [ + [-14.02426098], + [68.46079663], + [270.3325449] + ] + }, + { + "name": "18_15", + "type": "vga", + "resolution": [640,480], + "panel": 18, + "node": 15, + "K": [ + [747.429,0,398.562], + [0,747.425,233.615], + [0,0,1] + ], + "distCoef": [-0.333617,0.122405,0.000303778,0.00134383,-0.0202721], + "R": [ + [-0.358025731,-0.0142572014,-0.9336028643], + [0.04081564607,0.9986886699,-0.03090345813], + [0.9328191995,-0.04916983726,-0.3569743242] + ], + "t": [ + [-8.683192747], + [83.02873835], + [264.4620974] + ] + }, + { + "name": "18_16", + "type": "vga", + "resolution": [640,480], + "panel": 18, + "node": 16, + "K": [ + [742.757,0,357.304], + [0,742.66,220.331], + [0,0,1] + ], + "distCoef": [-0.305443,-0.0527047,-0.000521453,0.00022453,0.250047], + "R": [ + [-0.3364590891,0.05374146283,-0.9401633563], + [0.05791647683,0.99766121,0.03630140184], + [0.9399154021,-0.04223701264,-0.3387846981] + ], + "t": [ + [20.062846], + [91.33983095], + [265.2581766] + ] + }, + { + "name": "18_17", + "type": "vga", + "resolution": [640,480], + "panel": 18, + "node": 17, + "K": [ + [750.787,0,361.922], + [0,750.723,216.611], + [0,0,1] + ], + "distCoef": [-0.368257,0.303211,-0.00101236,-0.000679192,-0.335284], + "R": [ + [-0.3521002367,0.0154136189,-0.9358353721], + [0.04957845599,0.9987678018,-0.002203336065], + [0.9346482761,-0.04717306796,-0.3524305629] + ], + "t": [ + [32.75189895], + [90.38015946], + [265.2110414] + ] + }, + { + "name": "18_18", + "type": "vga", + "resolution": [640,480], + "panel": 18, + "node": 18, + "K": [ + [745.69,0,366.196], + [0,745.645,224.452], + [0,0,1] + ], + "distCoef": [-0.325076,0.0695314,0.000207452,8.09151e-05,0.0569118], + "R": [ + [-0.369329094,-0.008664471876,-0.929258278], + [0.06369637747,0.997368813,-0.03461534879], + [0.9271131494,-0.07197484145,-0.3678054246] + ], + "t": [ + [-35.28307581], + [111.055802], + [261.8818226] + ] + }, + { + "name": "18_19", + "type": "vga", + "resolution": [640,480], + "panel": 18, + "node": 19, + "K": [ + [745.552,0,357.301], + [0,745.545,223.113], + [0,0,1] + ], + "distCoef": [-0.320101,0.042192,0.00043748,0.000103204,0.104558], + "R": [ + [-0.3584191226,-0.04877846794,-0.9322855752], + [0.07086164718,0.9943315632,-0.07926770686], + [0.9308675306,-0.09447435344,-0.3529309238] + ], + "t": [ + [16.14340371], + [139.4376601], + [259.6452388] + ] + }, + { + "name": "18_20", + "type": "vga", + "resolution": [640,480], + "panel": 18, + "node": 20, + "K": [ + [746.078,0,363.03], + [0,746.077,221.582], + [0,0,1] + ], + "distCoef": [-0.321359,0.0569666,0.000169599,0.000938787,0.0797635], + "R": [ + [-0.3631410096,0.0448531679,-0.9306539639], + [0.06634832184,0.9975497918,0.02218813063], + [0.9293688758,-0.05368990856,-0.3652271709] + ], + "t": [ + [21.37501917], + [147.345749], + [265.5705493] + ] + }, + { + "name": "18_21", + "type": "vga", + "resolution": [640,480], + "panel": 18, + "node": 21, + "K": [ + [745.043,0,372.293], + [0,745.076,222.901], + [0,0,1] + ], + "distCoef": [-0.317484,0.0404748,0.000192535,-0.000111527,0.0957966], + "R": [ + [-0.3461967977,-0.005928135698,-0.9381431844], + [0.04577092509,0.9986824948,-0.02320122706], + [0.937044716,-0.05097187193,-0.3454693453] + ], + "t": [ + [-0.5259425122], + [153.3372726], + [265.7616305] + ] + }, + { + "name": "18_22", + "type": "vga", + "resolution": [640,480], + "panel": 18, + "node": 22, + "K": [ + [745.252,0,401.788], + [0,745.346,245.295], + [0,0,1] + ], + "distCoef": [-0.315494,0.0267895,-0.000624877,0.000210937,0.0993279], + "R": [ + [-0.3267831921,-0.004575639121,-0.9450882546], + [0.07739750703,0.9964998407,-0.03158628616], + [0.9419248225,-0.08346934224,-0.3252852558] + ], + "t": [ + [-10.3938656], + [148.3069178], + [261.1183693] + ] + }, + { + "name": "18_23", + "type": "vga", + "resolution": [640,480], + "panel": 18, + "node": 23, + "K": [ + [747.114,0,358.608], + [0,746.941,217.398], + [0,0,1] + ], + "distCoef": [-0.324507,0.0792141,-0.000227367,0.0013287,0.0357905], + "R": [ + [-0.356358404,-0.03218270054,-0.9337949248], + [0.02645826287,0.9986582749,-0.04451528213], + [0.9339746507,-0.04056998648,-0.3550287707] + ], + "t": [ + [-18.04448695], + [115.7023496], + [266.3010308] + ] + }, + { + "name": "18_24", + "type": "vga", + "resolution": [640,480], + "panel": 18, + "node": 24, + "K": [ + [747.28,0,383.407], + [0,747.414,233.333], + [0,0,1] + ], + "distCoef": [-0.321806,0.0494121,-0.000677773,0.00106862,0.0725344], + "R": [ + [-0.3696831614,0.01690678518,-0.9290040478], + [0.03916078476,0.9992295361,0.002601362608], + [0.9283322644,-0.03541884761,-0.3700604169] + ], + "t": [ + [3.487638933], + [110.8874693], + [266.9764809] + ] + }, + { + "name": "19_01", + "type": "vga", + "resolution": [640,480], + "panel": 19, + "node": 1, + "K": [ + [742.815,0,376.349], + [0,742.96,226.412], + [0,0,1] + ], + "distCoef": [-0.311242,0.000676611,0.00127048,0.000398816,0.145683], + "R": [ + [-0.9986287013,0.0334613179,0.04026235479], + [0.03051664863,0.9969627365,-0.07165218936], + [-0.04253764409,-0.07032526067,-0.99661673] + ], + "t": [ + [47.87451164], + [124.5257469], + [265.3025885] + ] + }, + { + "name": "19_02", + "type": "vga", + "resolution": [640,480], + "panel": 19, + "node": 2, + "K": [ + [746.352,0,362.211], + [0,746.799,224.495], + [0,0,1] + ], + "distCoef": [-0.33354,0.113916,-0.000650978,0.00200875,0.00369896], + "R": [ + [-0.9978769066,0.0627015602,0.01761231284], + [0.06225819076,0.9977547513,-0.02468550225], + [-0.01912058832,-0.02353658189,-0.9995401105] + ], + "t": [ + [76.18899734], + [119.4504319], + [269.470097] + ] + }, + { + "name": "19_03", + "type": "vga", + "resolution": [640,480], + "panel": 19, + "node": 3, + "K": [ + [744.923,0,335.897], + [0,744.843,232.622], + [0,0,1] + ], + "distCoef": [-0.310786,-0.00740435,0.000477261,-0.00048183,0.169837], + "R": [ + [-0.9959217828,0.05942221639,0.06788816328], + [0.05820019172,0.9981077555,-0.01984051806], + [-0.06893866983,-0.0158085,-0.9974956397] + ], + "t": [ + [57.6907282], + [139.716188], + [274.5941587] + ] + }, + { + "name": "19_04", + "type": "vga", + "resolution": [640,480], + "panel": 19, + "node": 4, + "K": [ + [745.3,0,371.455], + [0,745.339,223.979], + [0,0,1] + ], + "distCoef": [-0.316788,0.039021,-0.00160053,-0.000126119,0.09467], + "R": [ + [-0.995350133,0.07444232287,0.06112653567], + [0.06997485872,0.994930028,-0.0722340534], + [-0.06619389658,-0.06762085396,-0.9955128267] + ], + "t": [ + [42.04206067], + [161.4993909], + [266.5642499] + ] + }, + { + "name": "19_05", + "type": "vga", + "resolution": [640,480], + "panel": 19, + "node": 5, + "K": [ + [741.339,0,353.354], + [0,741.563,231.192], + [0,0,1] + ], + "distCoef": [-0.304803,-0.0634451,-0.00114618,-0.000982934,0.282182], + "R": [ + [-0.9964181101,0.07478982294,0.03946431643], + [0.07096423127,0.993341211,-0.09075966339], + [-0.04598943103,-0.08763401739,-0.9950905744] + ], + "t": [ + [45.56899486], + [188.2245222], + [262.1501617] + ] + }, + { + "name": "19_06", + "type": "vga", + "resolution": [640,480], + "panel": 19, + "node": 6, + "K": [ + [745.947,0,350.894], + [0,746.217,234.332], + [0,0,1] + ], + "distCoef": [-0.313212,0.0178381,0.000340441,0.00055626,0.126083], + "R": [ + [-0.9969018679,0.07865171151,0.0007576151751], + [0.07854654264,0.9959829876,-0.04299219736], + [-0.004135981729,-0.0427994938,-0.9990751208] + ], + "t": [ + [37.2742824], + [183.4195047], + [270.0123608] + ] + }, + { + "name": "19_07", + "type": "vga", + "resolution": [640,480], + "panel": 19, + "node": 7, + "K": [ + [748.821,0,355.822], + [0,748.684,217.17], + [0,0,1] + ], + "distCoef": [-0.342444,0.16602,-0.000477836,-0.000195363,-0.106824], + "R": [ + [-0.9928808048,-0.04900785176,0.10856306], + [-0.05236016128,0.998228751,-0.02824489671], + [-0.106986546,-0.0337281951,-0.9936882247] + ], + "t": [ + [-31.49326377], + [168.7489309], + [271.4480177] + ] + }, + { + "name": "19_08", + "type": "vga", + "resolution": [640,480], + "panel": 19, + "node": 8, + "K": [ + [747.238,0,359.034], + [0,747.474,233.038], + [0,0,1] + ], + "distCoef": [-0.313675,0.00436645,0.000419802,0.000604189,0.154068], + "R": [ + [-0.9913876468,0.02931278851,0.127637354], + [0.0192008625,0.9966303068,-0.07974558542], + [-0.1295448208,-0.07660804099,-0.9886098055] + ], + "t": [ + [-44.88902211], + [188.5485089], + [261.5304555] + ] + }, + { + "name": "19_09", + "type": "vga", + "resolution": [640,480], + "panel": 19, + "node": 9, + "K": [ + [743.415,0,332.333], + [0,743.715,235.337], + [0,0,1] + ], + "distCoef": [-0.308464,-0.0208585,-0.00102455,0.000256502,0.207947], + "R": [ + [-0.9954977047,0.04566149696,0.08306231217], + [0.04175753042,0.9979670543,-0.04814631117], + [-0.08509188364,-0.04446106523,-0.9953806232] + ], + "t": [ + [-46.35184093], + [166.6378451], + [268.6077116] + ] + }, + { + "name": "19_10", + "type": "vga", + "resolution": [640,480], + "panel": 19, + "node": 10, + "K": [ + [747.206,0,362.728], + [0,747.412,248.496], + [0,0,1] + ], + "distCoef": [-0.340118,0.138855,0.000965068,4.5306e-05,-0.0441245], + "R": [ + [-0.9935175509,0.05252798067,0.1008151146], + [0.05439486481,0.9983935823,0.01585728578], + [-0.09982021218,0.02123831626,-0.9947787991] + ], + "t": [ + [-46.95074625], + [127.5778656], + [276.6370715] + ] + }, + { + "name": "19_11", + "type": "vga", + "resolution": [640,480], + "panel": 19, + "node": 11, + "K": [ + [745.45,0,355.141], + [0,745.641,249.232], + [0,0,1] + ], + "distCoef": [-0.326245,0.10077,0.000216744,-2.37583e-05,-0.0259903], + "R": [ + [-0.9983050345,-0.001439505441,0.05818063101], + [-0.002578079686,0.9998065462,-0.01949932386], + [-0.05814130636,-0.01961626748,-0.9981156198] + ], + "t": [ + [-58.09544547], + [121.7224759], + [272.659258] + ] + }, + { + "name": "19_12", + "type": "vga", + "resolution": [640,480], + "panel": 19, + "node": 12, + "K": [ + [743.805,0,368.42], + [0,744.013,242.015], + [0,0,1] + ], + "distCoef": [-0.323306,0.0785457,-0.00106293,0.000187763,0.0236672], + "R": [ + [-0.9954771119,0.0748660766,0.05848410323], + [0.07512966129,0.9971710788,0.002318097681], + [-0.05814510944,0.006701504052,-0.9982856485] + ], + "t": [ + [-47.8147621], + [97.15541342], + [274.4212668] + ] + }, + { + "name": "19_13", + "type": "vga", + "resolution": [640,480], + "panel": 19, + "node": 13, + "K": [ + [742.693,0,353.966], + [0,742.776,227.014], + [0,0,1] + ], + "distCoef": [-0.307193,-0.0103139,0.000109263,-0.000950495,0.159317], + "R": [ + [-0.9933059489,0.1045971031,0.04901773034], + [0.1016362638,0.9930442478,-0.05944065861], + [-0.05489409585,-0.05406078084,-0.9970276176] + ], + "t": [ + [-21.5323637], + [109.7713479], + [268.3161895] + ] + }, + { + "name": "19_14", + "type": "vga", + "resolution": [640,480], + "panel": 19, + "node": 14, + "K": [ + [742.837,0,362.248], + [0,743.502,226.37], + [0,0,1] + ], + "distCoef": [-0.308934,-0.00321353,-0.0010059,0.000705591,0.156528], + "R": [ + [-0.9919154966,0.0987006026,0.07976113456], + [0.09553429302,0.9945144894,-0.04259259489], + [-0.08352751879,-0.03462833131,-0.995903626] + ], + "t": [ + [-30.66946365], + [84.06052642], + [268.8728165] + ] + }, + { + "name": "19_15", + "type": "vga", + "resolution": [640,480], + "panel": 19, + "node": 15, + "K": [ + [742.618,0,345.237], + [0,742.923,230.439], + [0,0,1] + ], + "distCoef": [-0.302695,-0.0546693,-0.000167537,-0.000784726,0.259585], + "R": [ + [-0.9885523252,0.1391044686,0.05843155954], + [0.1381120085,0.9902000007,-0.02071308279], + [-0.06074021267,-0.01240586611,-0.9980765106] + ], + "t": [ + [-1.26146274], + [74.12977283], + [271.0351679] + ] + }, + { + "name": "19_16", + "type": "vga", + "resolution": [640,480], + "panel": 19, + "node": 16, + "K": [ + [744.088,0,370.473], + [0,744.417,231.755], + [0,0,1] + ], + "distCoef": [-0.300902,-0.0664899,-0.000333311,0.000589361,0.253926], + "R": [ + [-0.9917390399,0.06178336486,0.1124121551], + [0.06447509535,0.9977094298,0.02046596672], + [-0.1108902109,0.02754468261,-0.9934508803] + ], + "t": [ + [-3.269853258], + [73.62667861], + [274.8694227] + ] + }, + { + "name": "19_17", + "type": "vga", + "resolution": [640,480], + "panel": 19, + "node": 17, + "K": [ + [745.582,0,373.528], + [0,745.86,237.254], + [0,0,1] + ], + "distCoef": [-0.322134,0.0530706,-0.000603814,0.00101303,0.0846746], + "R": [ + [-0.9897330936,0.1313546283,0.05634150462], + [0.1318000226,0.9912672261,0.00424742025], + [-0.05529156869,0.01162962396,-0.9984025212] + ], + "t": [ + [37.3391924], + [70.20661568], + [273.1392775] + ] + }, + { + "name": "19_18", + "type": "vga", + "resolution": [640,480], + "panel": 19, + "node": 18, + "K": [ + [742.542,0,374.105], + [0,742.758,223.273], + [0,0,1] + ], + "distCoef": [-0.306762,-0.0452572,-0.00032402,-0.000364469,0.245651], + "R": [ + [-0.9920842372,0.1065981921,0.06637538524], + [0.106818653,0.9942784937,-0.0002288198192], + [-0.06602000984,0.006863120707,-0.9977946963] + ], + "t": [ + [52.26513597], + [79.91641464], + [273.9509772] + ] + }, + { + "name": "19_19", + "type": "vga", + "resolution": [640,480], + "panel": 19, + "node": 19, + "K": [ + [744.378,0,361.433], + [0,744.589,244.618], + [0,0,1] + ], + "distCoef": [-0.310422,-0.000364242,-0.000710118,0.000839407,0.169675], + "R": [ + [-0.9919054981,0.126974259,0.001010166835], + [0.1269495258,0.9918188066,-0.01338927975], + [-0.002701996339,-0.01315266,-0.9999098493] + ], + "t": [ + [49.23489662], + [110.9052228], + [271.6142806] + ] + }, + { + "name": "19_20", + "type": "vga", + "resolution": [640,480], + "panel": 19, + "node": 20, + "K": [ + [745.72,0,364.99], + [0,745.913,248.461], + [0,0,1] + ], + "distCoef": [-0.32476,0.0791445,0.000409065,0.000522525,0.0385155], + "R": [ + [-0.9808466558,0.1869185946,0.05478391053], + [0.1851721888,0.9820671342,-0.03543168776], + [-0.06042431929,-0.02460859583,-0.9978693896] + ], + "t": [ + [40.23583817], + [134.9359413], + [272.7493911] + ] + }, + { + "name": "19_21", + "type": "vga", + "resolution": [640,480], + "panel": 19, + "node": 21, + "K": [ + [745.966,0,347.023], + [0,745.905,254.016], + [0,0,1] + ], + "distCoef": [-0.312122,-0.0171046,0.00101358,-9.38575e-05,0.213424], + "R": [ + [-0.9944456328,0.07811965146,0.07053512206], + [0.07435713108,0.9957422838,-0.0544823029], + [-0.07449094204,-0.04893489886,-0.9960203187] + ], + "t": [ + [2.247391851], + [153.0572023], + [268.8284628] + ] + }, + { + "name": "19_22", + "type": "vga", + "resolution": [640,480], + "panel": 19, + "node": 22, + "K": [ + [743.607,0,364.935], + [0,743.756,243.53], + [0,0,1] + ], + "distCoef": [-0.311531,0.000696399,0.00010932,-0.000314324,0.159615], + "R": [ + [-0.9924188487,0.09367860135,0.07955594568], + [0.08900119243,0.9941960017,-0.06044086279], + [-0.0847562186,-0.05290207743,-0.9949963586] + ], + "t": [ + [-15.3150092], + [142.5037842], + [267.7211288] + ] + }, + { + "name": "19_23", + "type": "vga", + "resolution": [640,480], + "panel": 19, + "node": 23, + "K": [ + [743.508,0,369.721], + [0,743.449,243.575], + [0,0,1] + ], + "distCoef": [-0.309744,-0.0191119,0.000292611,0.000847107,0.198605], + "R": [ + [-0.9987856124,0.03694807636,0.03259049098], + [0.03470669556,0.9971594314,-0.06684694127], + [-0.03496778135,-0.06563465492,-0.997230839] + ], + "t": [ + [-6.799650163], + [123.3743131], + [267.1549958] + ] + }, + { + "name": "19_24", + "type": "vga", + "resolution": [640,480], + "panel": 19, + "node": 24, + "K": [ + [742.775,0,379.613], + [0,742.864,224.449], + [0,0,1] + ], + "distCoef": [-0.316586,0.0333112,-0.000180777,0.00112675,0.112087], + "R": [ + [-0.9947573056,0.06853183176,0.07590316848], + [0.05765365411,0.9888586451,-0.1372393391], + [-0.08446276764,-0.1321437401,-0.9876254719] + ], + "t": [ + [4.340029177], + [136.5307812], + [258.2193706] + ] + }, + { + "name": "20_01", + "type": "vga", + "resolution": [640,480], + "panel": 20, + "node": 1, + "K": [ + [745.267,0,367.511], + [0,745.253,228.976], + [0,0,1] + ], + "distCoef": [-0.316421,0.0232694,0.000233523,0.00095017,0.129164], + "R": [ + [-0.2595515744,0.03264633198,0.965177288], + [-0.02439656235,0.9988878376,-0.04034718866], + [-0.9654210418,-0.03401918423,-0.2584664527] + ], + "t": [ + [43.91564589], + [114.6472759], + [269.2437955] + ] + }, + { + "name": "20_02", + "type": "vga", + "resolution": [640,480], + "panel": 20, + "node": 2, + "K": [ + [746.737,0,383.621], + [0,746.553,234.139], + [0,0,1] + ], + "distCoef": [-0.330711,0.126048,0.000259954,-0.000232797,-0.067441], + "R": [ + [-0.2600597375,0.03354081135,0.965009817], + [-0.06475754991,0.9965406566,-0.05208818886], + [-0.9634185968,-0.07603771211,-0.2569880808] + ], + "t": [ + [63.03617994], + [136.0112472], + [264.2112923] + ] + }, + { + "name": "20_03", + "type": "vga", + "resolution": [640,480], + "panel": 20, + "node": 3, + "K": [ + [748.567,0,371.842], + [0,748.646,223.378], + [0,0,1] + ], + "distCoef": [-0.332561,0.132401,-0.000978802,0.0010132,-0.0596871], + "R": [ + [-0.2517963519,0.03200567411,0.967250864], + [0.0115205721,0.9994813079,-0.03007310314], + [-0.9677116686,0.003570985655,-0.2520344708] + ], + "t": [ + [55.32226207], + [135.5872215], + [276.5287505] + ] + }, + { + "name": "20_04", + "type": "vga", + "resolution": [640,480], + "panel": 20, + "node": 4, + "K": [ + [747.412,0,375.731], + [0,747.545,213.638], + [0,0,1] + ], + "distCoef": [-0.324984,0.0823763,-0.00190711,0.0010176,0.0382164], + "R": [ + [-0.2864406942,-0.001302983566,0.9580970885], + [-0.1193951903,0.9922525608,-0.03434594761], + [-0.9506295373,-0.1242302613,-0.2843770823] + ], + "t": [ + [40.5108683], + [178.4576708], + [254.9563649] + ] + }, + { + "name": "20_05", + "type": "vga", + "resolution": [640,480], + "panel": 20, + "node": 5, + "K": [ + [747.818,0,377.646], + [0,748.63,232.294], + [0,0,1] + ], + "distCoef": [-0.327048,0.100477,-0.00250563,-0.000951363,0.00505748], + "R": [ + [-0.2682590325,-0.01756457816,0.9631866782], + [-0.1175373506,0.9929607203,-0.014628026], + [-0.9561496027,-0.1171345104,-0.2684351761] + ], + "t": [ + [28.10870602], + [198.6254244], + [256.0861594] + ] + }, + { + "name": "20_06", + "type": "vga", + "resolution": [640,480], + "panel": 20, + "node": 6, + "K": [ + [744.281,0,376.164], + [0,744.733,212.764], + [0,0,1] + ], + "distCoef": [-0.314115,0.0261091,-0.00186017,0.000146826,0.111047], + "R": [ + [-0.2995512244,0.02650351378,0.9537120256], + [-0.1164678133,0.9911222418,-0.06412449085], + [-0.9469447251,-0.1302853239,-0.2938050747] + ], + "t": [ + [24.38602287], + [207.7342285], + [252.6787249] + ] + }, + { + "name": "20_07", + "type": "vga", + "resolution": [640,480], + "panel": 20, + "node": 7, + "K": [ + [744.844,0,367.199], + [0,744.885,234.874], + [0,0,1] + ], + "distCoef": [-0.307447,-0.0235368,-0.000447762,-0.000552595,0.198481], + "R": [ + [-0.2246138655,-0.03605175288,0.9737807158], + [-0.1345418425,0.9908917963,0.005651603877], + [-0.965115073,-0.1297448231,-0.2274185059] + ], + "t": [ + [-24.57828512], + [193.807989], + [253.6581871] + ] + }, + { + "name": "20_08", + "type": "vga", + "resolution": [640,480], + "panel": 20, + "node": 8, + "K": [ + [745.265,0,373.297], + [0,745.204,222.406], + [0,0,1] + ], + "distCoef": [-0.322725,0.0753011,-0.00198414,9.48962e-05,0.0496562], + "R": [ + [-0.2740281164,0.007089557403,0.9616955493], + [-0.08615117171,0.9957715968,-0.0318889104], + [-0.9578551911,-0.09158965645,-0.2722586413] + ], + "t": [ + [-24.40184383], + [190.6520913], + [261.5790911] + ] + }, + { + "name": "20_09", + "type": "vga", + "resolution": [640,480], + "panel": 20, + "node": 9, + "K": [ + [743.742,0,376.404], + [0,743.442,252.182], + [0,0,1] + ], + "distCoef": [-0.310951,0.0101818,-0.000165117,0.000699519,0.141452], + "R": [ + [-0.234740558,-0.05401621619,0.9705560874], + [-0.06709368181,0.9969740023,0.03925909634], + [-0.9697398147,-0.05590247913,-0.2376543804] + ], + "t": [ + [-60.89112675], + [163.1020008], + [266.420435] + ] + }, + { + "name": "20_10", + "type": "vga", + "resolution": [640,480], + "panel": 20, + "node": 10, + "K": [ + [746.237,0,381.452], + [0,745.998,235.104], + [0,0,1] + ], + "distCoef": [-0.321635,0.0804606,-0.000793429,0.000500703,0.0308776], + "R": [ + [-0.2327490461,-0.03063038999,0.9720543507], + [-0.1073579574,0.9942045343,0.005622535858], + [-0.9665930636,-0.1030491297,-0.2346885731] + ], + "t": [ + [-52.7687065], + [155.650502], + [258.7092289] + ] + }, + { + "name": "20_11", + "type": "vga", + "resolution": [640,480], + "panel": 20, + "node": 11, + "K": [ + [744.465,0,352.406], + [0,744.368,231.635], + [0,0,1] + ], + "distCoef": [-0.307896,-0.0267024,-0.00138959,-0.000489454,0.213952], + "R": [ + [-0.2568719183,-0.003646201445,0.9664385768], + [-0.06909534804,0.997503196,-0.01460160774], + [-0.9639723287,-0.07052715282,-0.256482495] + ], + "t": [ + [-58.11810551], + [133.8270577], + [264.378006] + ] + }, + { + "name": "20_12", + "type": "vga", + "resolution": [640,480], + "panel": 20, + "node": 12, + "K": [ + [744.557,0,351.376], + [0,744.424,216.683], + [0,0,1] + ], + "distCoef": [-0.317479,0.0158652,-0.000659121,-0.00059258,0.147681], + "R": [ + [-0.2372383683,-0.02274879941,0.9711850744], + [-0.1004253449,0.9949438408,-0.001226302928], + [-0.9662467111,-0.09782252214,-0.2383234094] + ], + "t": [ + [-62.35654103], + [118.4734964], + [259.8400796] + ] + }, + { + "name": "20_13", + "type": "vga", + "resolution": [640,480], + "panel": 20, + "node": 13, + "K": [ + [743.07,0,377.102], + [0,743.158,222.988], + [0,0,1] + ], + "distCoef": [-0.29868,-0.0827266,-0.00133003,-0.00119832,0.273178], + "R": [ + [-0.2367527853,-0.03686088138,0.9708704311], + [-0.08746956632,0.9960307636,0.01648614259], + [-0.9676245107,-0.08101847538,-0.2390372628] + ], + "t": [ + [-42.43038274], + [111.3831569], + [262.4188123] + ] + }, + { + "name": "20_14", + "type": "vga", + "resolution": [640,480], + "panel": 20, + "node": 14, + "K": [ + [745.597,0,372.306], + [0,745.414,237.499], + [0,0,1] + ], + "distCoef": [-0.320131,0.0615197,0.00113665,-0.000991542,0.0414761], + "R": [ + [-0.2769894269,0.05383368349,0.9593637433], + [-0.05406721308,0.9959742516,-0.07149843787], + [-0.9593506105,-0.07167443526,-0.2729636999] + ], + "t": [ + [-21.49417033], + [90.7530727], + [264.2254974] + ] + }, + { + "name": "20_15", + "type": "vga", + "resolution": [640,480], + "panel": 20, + "node": 15, + "K": [ + [746.296,0,380.788], + [0,746.161,226.883], + [0,0,1] + ], + "distCoef": [-0.321885,0.0553182,0.000132369,-0.000878491,0.0778662], + "R": [ + [-0.2870302882,0.01079685294,0.9578606588], + [-0.05665486447,0.9979947406,-0.02822630231], + [-0.9562446549,-0.06236926949,-0.2858430237] + ], + "t": [ + [-1.106709776], + [85.82297146], + [264.8070963] + ] + }, + { + "name": "20_16", + "type": "vga", + "resolution": [640,480], + "panel": 20, + "node": 16, + "K": [ + [744.119,0,345.288], + [0,744.112,227.607], + [0,0,1] + ], + "distCoef": [-0.302547,-0.0664079,0.000893953,-0.000627784,0.303861], + "R": [ + [-0.252548592,0.05539030986,0.9659974753], + [-0.08640189331,0.9930807476,-0.07953201617], + [-0.963718798,-0.1035497095,-0.2460153169] + ], + "t": [ + [10.51473419], + [107.4721829], + [260.872486] + ] + }, + { + "name": "20_17", + "type": "vga", + "resolution": [640,480], + "panel": 20, + "node": 17, + "K": [ + [745.831,0,353.784], + [0,745.87,219.754], + [0,0,1] + ], + "distCoef": [-0.321082,0.0599511,-0.000750204,0.000386726,0.0615888], + "R": [ + [-0.3124433364,0.0857084176,0.9460619582], + [-0.03834810703,0.9939715084,-0.1027135007], + [-0.9491620432,-0.06837183409,-0.3072730188] + ], + "t": [ + [50.17882687], + [91.39390134], + [262.9120903] + ] + }, + { + "name": "20_18", + "type": "vga", + "resolution": [640,480], + "panel": 20, + "node": 18, + "K": [ + [745.227,0,385.13], + [0,745.129,233.897], + [0,0,1] + ], + "distCoef": [-0.311291,0.0180828,0.00116452,0.000576614,0.0928398], + "R": [ + [-0.2786751196,0.05379991941,0.9588773365], + [-0.03740853519,0.9970639104,-0.06681437094], + [-0.9596565944,-0.0544896994,-0.2758443282] + ], + "t": [ + [57.04086511], + [98.35557378], + [265.4113916] + ] + }, + { + "name": "20_19", + "type": "vga", + "resolution": [640,480], + "panel": 20, + "node": 19, + "K": [ + [746.424,0,373.724], + [0,746.378,215.089], + [0,0,1] + ], + "distCoef": [-0.317589,0.0452179,0.000839363,0.00087423,0.0858828], + "R": [ + [-0.2053627335,-0.023863444,0.9783949528], + [-0.1366627843,0.9906072975,-0.004523879826], + [-0.9690972248,-0.1346392148,-0.2066950671] + ], + "t": [ + [2.454839771], + [148.020868], + [256.5149472] + ] + }, + { + "name": "20_20", + "type": "vga", + "resolution": [640,480], + "panel": 20, + "node": 20, + "K": [ + [744.35,0,378.361], + [0,744.386,245.706], + [0,0,1] + ], + "distCoef": [-0.305792,-0.0298413,-5.26611e-05,9.57392e-05,0.206854], + "R": [ + [-0.2653224987,0.04663873586,0.9630310483], + [-0.08123292055,0.9941966424,-0.07052835541], + [-0.9607315881,-0.09694258412,-0.2599941366] + ], + "t": [ + [23.42848118], + [157.616994], + [260.7931406] + ] + }, + { + "name": "20_21", + "type": "vga", + "resolution": [640,480], + "panel": 20, + "node": 21, + "K": [ + [747.371,0,368.768], + [0,747.344,231.897], + [0,0,1] + ], + "distCoef": [-0.308946,-0.0139041,-0.000755627,-0.000244894,0.190547], + "R": [ + [-0.2375675449,-0.01520768023,0.9712519694], + [-0.09352440886,0.9955903179,-0.007287238765], + [-0.966858235,-0.09256697771,-0.2379422368] + ], + "t": [ + [-12.76210059], + [163.3748289], + [261.1782343] + ] + }, + { + "name": "20_22", + "type": "vga", + "resolution": [640,480], + "panel": 20, + "node": 22, + "K": [ + [746.314,0,371.788], + [0,745.992,237.732], + [0,0,1] + ], + "distCoef": [-0.315167,0.0352154,-0.000828301,0.000312219,0.0891012], + "R": [ + [-0.2145858088,0.0004599306573,0.9767050318], + [-0.07749764501,0.9968390076,-0.017495939], + [-0.9736257216,-0.07944672006,-0.2138718611] + ], + "t": [ + [-33.0373727], + [146.3668194], + [262.1626174] + ] + }, + { + "name": "20_23", + "type": "vga", + "resolution": [640,480], + "panel": 20, + "node": 23, + "K": [ + [746.318,0,371.868], + [0,746.096,236.531], + [0,0,1] + ], + "distCoef": [-0.318459,0.0405311,0.000489761,-0.000285822,0.0876741], + "R": [ + [-0.2554085937,0.004734611177,0.9668216142], + [-0.07039835709,0.9972425561,-0.02348096154], + [-0.9642668311,-0.0740598926,-0.25437101] + ], + "t": [ + [-17.40671779], + [124.2252344], + [264.0602836] + ] + }, + { + "name": "20_24", + "type": "vga", + "resolution": [640,480], + "panel": 20, + "node": 24, + "K": [ + [745.832,0,382.965], + [0,745.816,231.317], + [0,0,1] + ], + "distCoef": [-0.320385,0.0446211,0.00028801,0.00167617,0.104376], + "R": [ + [-0.2362773498,-0.02089730322,0.9714609188], + [-0.1013714927,0.9948433166,-0.003255144035], + [-0.9663833786,-0.09924756028,-0.2371773332] + ], + "t": [ + [-5.093436327], + [126.6662443], + [260.9183094] + ] + }, + { + "name": "00_00", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 0, + "K": [ + [1634.03,0,942.792], + [0,1629.73,558.29], + [0,0,1] + ], + "distCoef": [-0.222445,0.199192,8.73054e-05,0.000982243,0.0238445], + "R": [ + [0.1369296663,0.03357591931,-0.9900115778], + [-0.09021094677,0.9956950625,0.02129149064], + [0.9864645212,0.08639444504,0.1393691081] + ], + "t": [ + [20.90028135], + [127.2202879], + [283.1159034] + ] + }, + { + "name": "00_01", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 1, + "K": [ + [1395.91,0,951.559], + [0,1392.24,561.398], + [0,0,1] + ], + "distCoef": [-0.286227,0.183082,-4.29815e-05,0.000644874,-0.0479635], + "R": [ + [0.05337497606,0.02479711619,0.9982666052], + [0.6376765256,0.7684660834,-0.05318390075], + [-0.7684528356,0.6394098699,0.0252043199] + ], + "t": [ + [6.299256813], + [104.397182], + [363.078698] + ] + }, + { + "name": "00_02", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 2, + "K": [ + [1397.02,0,939.355], + [0,1394.04,556.611], + [0,0,1] + ], + "distCoef": [-0.28229,0.173658,-0.000610716,0.000955319,-0.0398628], + "R": [ + [-0.9970491806,0.05290586318,-0.05562284625], + [-0.01182874156,0.6100448884,0.792278559], + [0.07584861407,0.7905986364,-0.6076189463] + ], + "t": [ + [-16.22360931], + [63.30660163], + [381.0181823] + ] + }, + { + "name": "00_03", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 3, + "K": [ + [1395.71,0,949.456], + [0,1392.06,566.648], + [0,0,1] + ], + "distCoef": [-0.281728,0.168097,-0.00021431,1.8072e-05,-0.0371786], + "R": [ + [-0.6216465312,-0.0285781748,0.7827763909], + [0.07448493547,0.9926490654,0.09539301533], + [-0.7797484111,0.117605786,-0.6149482047] + ], + "t": [ + [-14.50346059], + [117.4297203], + [290.1984382] + ] + }, + { + "name": "00_04", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 4, + "K": [ + [1633.26,0,949.479], + [0,1629.32,572.374], + [0,0,1] + ], + "distCoef": [-0.223003,0.185095,-0.000261654,0.00109433,0.0657602], + "R": [ + [-0.5292732399,-0.01229259603,0.8483623811], + [0.636650989,0.6551966806,0.4066851706], + [-0.5608434325,0.7553583268,-0.3389519765] + ], + "t": [ + [-5.411400695], + [80.12176746], + [379.8488129] + ] + }, + { + "name": "00_05", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 5, + "K": [ + [1396.29,0,933.34], + [0,1392.95,560.462], + [0,0,1] + ], + "distCoef": [-0.28733,0.185523,-0.000225825,-0.000143128,-0.0508452], + "R": [ + [-0.9314658579,-0.01073438439,-0.363670357], + [-0.021313424,0.9994579907,0.02508909603], + [0.3632039283,0.03112069687,-0.9311897813] + ], + "t": [ + [-6.050515741], + [143.9213951], + [280.3813532] + ] + }, + { + "name": "00_06", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 6, + "K": [ + [1396.11,0,950.228], + [0,1392.54,548.78], + [0,0,1] + ], + "distCoef": [-0.286481,0.183173,-0.000152555,0.0010664,-0.0482263], + "R": [ + [0.9448241112,-0.04876703013,-0.3239277321], + [-0.2141569626,0.6563150135,-0.7234551806], + [0.2478793944,0.7529092773,0.6096584503] + ], + "t": [ + [-10.023614], + [84.45695974], + [376.925635] + ] + }, + { + "name": "00_07", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 7, + "K": [ + [1395.51,0,947.67], + [0,1392.41,549.081], + [0,0,1] + ], + "distCoef": [-0.286691,0.185163,-6.53256e-05,4.32858e-06,-0.052639], + "R": [ + [-0.9419632708,-0.03700247277,0.3336705164], + [0.180351898,0.7825307202,0.5959185052], + [-0.2831578878,0.6215114552,-0.7304417305] + ], + "t": [ + [-5.250326149], + [112.5645453], + [360.2387508] + ] + }, + { + "name": "00_08", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 8, + "K": [ + [1642.7,0,945.082], + [0,1638.64,562.465], + [0,0,1] + ], + "distCoef": [-0.22444,0.208938,-0.000569838,0.000484927,0.0287248], + "R": [ + [0.9544726119,0.01685383959,-0.2978220632], + [-0.03362017317,0.9981191009,-0.05126347965], + [0.2963979035,0.05894241665,0.9532439742] + ], + "t": [ + [-19.67808464], + [136.6798831], + [282.6801175] + ] + }, + { + "name": "00_09", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 9, + "K": [ + [1396.79,0,945.482], + [0,1393.03,542.64], + [0,0,1] + ], + "distCoef": [-0.284259,0.175176,-0.000406823,0.000640552,-0.0406716], + "R": [ + [-0.3169419478,-0.08460972789,0.9446634298], + [-0.1243350249,0.9911238917,0.04705563528], + [-0.9402598595,-0.1025408464,-0.3246486894] + ], + "t": [ + [6.780958613], + [147.0057696], + [260.6395044] + ] + }, + { + "name": "00_10", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 10, + "K": [ + [1393.87,0,944.546], + [0,1390.36,563.199], + [0,0,1] + ], + "distCoef": [-0.285353,0.177704,-0.000109708,0.000471392,-0.0432146], + "R": [ + [0.9503475669,0.04849461332,0.3073886376], + [0.1560494297,0.7803459045,-0.6055648973], + [-0.2692360999,0.6234649483,0.734032275] + ], + "t": [ + [22.71992555], + [112.7759402], + [360.0009328] + ] + }, + { + "name": "00_11", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 11, + "K": [ + [1492.96,0,934.544], + [0,1489.74,547.466], + [0,0,1] + ], + "distCoef": [-0.259288,0.190057,-5.50625e-05,0.00031915,-0.0281283], + "R": [ + [0.8129763959,0.04080422416,-0.5808652124], + [-0.2848486357,0.8979062573,-0.3355973896], + [0.5078687177,0.4382914196,0.7415996205] + ], + "t": [ + [-0.03199165418], + [105.1487628], + [331.4862369] + ] + }, + { + "name": "00_12", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 12, + "K": [ + [1395.93,0,964.611], + [0,1392.67,564.875], + [0,0,1] + ], + "distCoef": [-0.290995,0.19463,-0.000241491,0.000727782,-0.0582663], + "R": [ + [-0.9950957343,0.04321912909,-0.08897520145], + [-0.001969290489,0.8906636271,0.454658581], + [0.09889692354,0.4526040326,-0.886210465] + ], + "t": [ + [24.66653867], + [97.49188585], + [334.8897626] + ] + }, + { + "name": "00_13", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 13, + "K": [ + [1592.21,0,937.375], + [0,1588.39,560.919], + [0,0,1] + ], + "distCoef": [-0.239248,0.229218,0.000137317,0.000315934,-0.0358302], + "R": [ + [-0.2862766934,0.07452649614,-0.9552441867], + [-0.7557457469,0.5952786327,0.2729317047], + [0.588977097,0.8000557173,-0.1140913162] + ], + "t": [ + [-15.47943966], + [60.20818768], + [381.0821849] + ] + }, + { + "name": "00_14", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 14, + "K": [ + [1649.51,0,934.882], + [0,1644.85,568.024], + [0,0,1] + ], + "distCoef": [-0.22365,0.220791,-0.000591343,0.000286172,0.0121962], + "R": [ + [0.827339054,-0.07848137689,0.5561930989], + [0.02005408661,0.9936867625,0.110383204], + [-0.5613447456,-0.08017039095,0.8236897383] + ], + "t": [ + [-7.23447972], + [142.1657406], + [267.9541185] + ] + }, + { + "name": "00_15", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 15, + "K": [ + [1430.11,0,948.926], + [0,1426.48,561.705], + [0,0,1] + ], + "distCoef": [-0.277948,0.185701,0.000192514,0.000149713,-0.0424254], + "R": [ + [-0.9997414125,0.006454955712,0.02180462522], + [0.005192647027,0.9983342904,-0.05746025644], + [-0.02213920846,-0.05733217422,-0.9981096519] + ], + "t": [ + [9.642162177], + [134.9258555], + [268.2324221] + ] + }, + { + "name": "00_16", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 16, + "K": [ + [1427.34,0,949.618], + [0,1423.13,548.132], + [0,0,1] + ], + "distCoef": [-0.279453,0.188683,-0.000345265,0.000583475,-0.0479414], + "R": [ + [0.7694875517,0.002369830201,0.6386574134], + [0.2539259376,0.9164213706,-0.3093436433], + [-0.586012394,0.4002077652,0.7045730755] + ], + "t": [ + [4.866150988], + [118.1652356], + [330.6340665] + ] + }, + { + "name": "00_17", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 17, + "K": [ + [1393.35,0,916.395], + [0,1390.34,563.652], + [0,0,1] + ], + "distCoef": [-0.287138,0.186145,7.50854e-05,0.000557424,-0.0513205], + "R": [ + [0.5039250676,0.09465184024,-0.8585456047], + [-0.6050310345,0.7480627966,-0.2726527087], + [0.6164389455,0.6568432701,0.4342348962] + ], + "t": [ + [18.2296155], + [97.71531857], + [361.6667015] + ] + }, + { + "name": "00_18", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 18, + "K": [ + [1542.2,0,947.567], + [0,1538.02,555.168], + [0,0,1] + ], + "distCoef": [-0.245751,0.182006,3.81269e-06,0.000651097,0.00472657], + "R": [ + [-0.4048875531,-0.001022756131,0.9143659133], + [0.3656410889,0.9163838146,0.1629334173], + [-0.8380767647,0.4002994608,-0.3706584387] + ], + "t": [ + [16.25260358], + [116.7586119], + [329.7529305] + ] + }, + { + "name": "00_19", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 19, + "K": [ + [1396.57,0,949.242], + [0,1393.19,554.872], + [0,0,1] + ], + "distCoef": [-0.280864,0.167216,-6.6519e-05,0.000917406,-0.0342733], + "R": [ + [0.7360342296,0.009501079563,0.6768776421], + [0.5173282683,0.6370082142,-0.5714822813], + [-0.4366063167,0.7707984591,0.4639446731] + ], + "t": [ + [-24.15514071], + [74.04862943], + [379.5076537] + ] + }, + { + "name": "00_20", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 20, + "K": [ + [1403.46,0,940.386], + [0,1400.1,552.684], + [0,0,1] + ], + "distCoef": [-0.287177,0.194004,-0.000120001,8.41526e-05,-0.0604614], + "R": [ + [-0.6201222217,0.04052054618,-0.7834580496], + [-0.1302964194,0.9794749929,0.1537907063], + [0.773609251,0.1974508131,-0.6021145267] + ], + "t": [ + [24.4496252], + [140.6900046], + [300.8290806] + ] + }, + { + "name": "00_21", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 21, + "K": [ + [1397.56,0,932.828], + [0,1393.91,562.186], + [0,0,1] + ], + "distCoef": [-0.28642,0.185674,-0.000229601,1.91211e-05,-0.052608], + "R": [ + [-0.2617478675,-0.05032313647,-0.9638234464], + [-0.4532392419,0.8880813121,0.07671878938], + [0.8520928608,0.4569235877,-0.2552618099] + ], + "t": [ + [-8.784671236], + [98.11062797], + [332.9193692] + ] + }, + { + "name": "00_22", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 22, + "K": [ + [1514.1,0,945.861], + [0,1510.18,558.694], + [0,0,1] + ], + "distCoef": [-0.260535,0.216046,-0.000156491,0.000677315,-0.0506741], + "R": [ + [-0.9239818557,-0.0613765916,0.3774790647], + [0.05486070575,0.9555572213,0.289656175], + [-0.3784809549,0.288345818,-0.8795503715] + ], + "t": [ + [-5.224239691], + [110.7456244], + [313.8855054] + ] + }, + { + "name": "00_23", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 23, + "K": [ + [1572.86,0,941.716], + [0,1568.17,560.048], + [0,0,1] + ], + "distCoef": [-0.240801,0.195963,-0.000444179,0.000458513,0.00455186], + "R": [ + [0.5162966551,0.01335424781,0.856305686], + [0.1418829708,0.9847272537,-0.100903213], + [-0.8445750331,0.173591186,0.506516647] + ], + "t": [ + [2.417701344], + [102.3557555], + [298.3746617] + ] + }, + { + "name": "00_24", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 24, + "K": [ + [1399.63,0,954.539], + [0,1396.27,546.388], + [0,0,1] + ], + "distCoef": [-0.288761,0.190789,4.23479e-05,6.78832e-05,-0.0577764], + "R": [ + [-0.388991142,-0.05987834367,-0.9192934653], + [0.02928793432,0.9965772059,-0.07730517199], + [0.9207758187,-0.05699523376,-0.3859059924] + ], + "t": [ + [-15.12220678], + [134.1751339], + [265.239245] + ] + }, + { + "name": "00_25", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 25, + "K": [ + [1397.66,0,935.585], + [0,1394.65,559.251], + [0,0,1] + ], + "distCoef": [-0.285722,0.183994,-0.000502702,0.000494145,-0.0515729], + "R": [ + [0.7926422733,0.00130484237,-0.6096855943], + [0.04487405742,0.9971605675,0.06047414042], + [0.6080333424,-0.07529342651,0.7903330655] + ], + "t": [ + [4.539475053], + [139.2223569], + [261.6293171] + ] + }, + { + "name": "00_26", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 26, + "K": [ + [1616.8,0,950.116], + [0,1613.47,551.417], + [0,0,1] + ], + "distCoef": [-0.223464,0.185279,-0.00090721,0.000127112,0.0351947], + "R": [ + [-0.7556190155,-0.04350579001,-0.6535649545], + [0.1389994774,0.9644159151,-0.2249023966], + [0.6400930001,-0.2607857146,-0.7226837222] + ], + "t": [ + [-12.5475419], + [141.1612209], + [240.8579734] + ] + }, + { + "name": "00_27", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 27, + "K": [ + [1861.86,0,934.556], + [0,1857.26,552.106], + [0,0,1] + ], + "distCoef": [-0.171511,0.209759,-1.83176e-05,-3.41566e-05,0.211418], + "R": [ + [0.9782876177,0.02697940456,0.2054883178], + [0.02691509764,0.9665557486,-0.2550403151], + [-0.2054967507,0.2550335204,0.9448433674] + ], + "t": [ + [-0.5131666478], + [123.4498457], + [311.6401591] + ] + }, + { + "name": "00_28", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 28, + "K": [ + [1395.57,0,953.143], + [0,1392.36,561.982], + [0,0,1] + ], + "distCoef": [-0.284934,0.181016,0.000127361,0.000271191,-0.0471616], + "R": [ + [-0.6310677524,-0.02949081954,-0.775166939], + [-0.5128354354,0.7656140117,0.3883748207], + [0.5820251782,0.6426238999,-0.4982782509] + ], + "t": [ + [-8.508070023], + [104.2896072], + [361.3816814] + ] + }, + { + "name": "00_29", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 29, + "K": [ + [1400.36,0,939.608], + [0,1397.25,572.603], + [0,0,1] + ], + "distCoef": [-0.286109,0.1878,-0.000309515,0.000886248,-0.0523515], + "R": [ + [0.4887300705,-0.07268882749,-0.8694016635], + [-0.08227020668,0.9882426049,-0.1288726774], + [0.8685473685,0.1345098073,0.4770037531] + ], + "t": [ + [-20.72850042], + [158.8912224], + [289.281465] + ] + }, + { + "name": "00_30", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 30, + "K": [ + [1407.21,0,946.883], + [0,1403.86,563.032], + [0,0,1] + ], + "distCoef": [-0.285813,0.195568,-0.000394067,0.000468367,-0.0600751], + "R": [ + [0.08635045426,0.06174190292,0.9943498059], + [0.2147800801,0.9734543185,-0.07909618832], + [-0.9728376618,0.2203965227,0.07079729175] + ], + "t": [ + [13.79078928], + [132.1300437], + [306.0754676] + ] + }, + { + "name": "50_01", + "type": "kinect-color", + "resolution": [1920,1080], + "panel": 50, + "node": 1, + "K": [ + [1053.92,0,947.294], + [0,1054.32,535.405], + [0,0,1] + ], + "distCoef": [0.0476403,-0.053786,0.000733314,-0.000579648,0.0122759], + "R": [ + [0.9095307192,0.0006254166507,-0.4156362348], + [-0.003349684277,0.999977422,-0.0058253781], + [0.4156232073,0.006690610494,0.9095122788] + ], + "t": [ + [-15.84850815], + [103.1392168], + [269.3362326] + ] + }, + { + "name": "50_02", + "type": "kinect-color", + "resolution": [1920,1080], + "panel": 50, + "node": 2, + "K": [ + [1058.92,0,971.224], + [0,1059.3,541.276], + [0,0,1] + ], + "distCoef": [0.0485216,-0.0529886,-0.000413578,-0.000171659,0.00909728], + "R": [ + [-0.08404700998,-0.006825065684,-0.9964384169], + [-0.04073006897,0.9991643735,-0.003408260769], + [0.9956290281,0.04029855131,-0.08425476347] + ], + "t": [ + [-4.246538185], + [93.69672118], + [271.0169727] + ] + }, + { + "name": "50_03", + "type": "kinect-color", + "resolution": [1920,1080], + "panel": 50, + "node": 3, + "K": [ + [1050.35,0,971.069], + [0,1050.88,535.343], + [0,0,1] + ], + "distCoef": [0.0482196,-0.0555053,0.000460862,0.000594278,0.0128034], + "R": [ + [-0.9791929995,-0.0009192386581,-0.2029291126], + [0.004325206908,0.9996680429,-0.02539875018], + [0.2028850964,-0.02574798878,-0.9788639736] + ], + "t": [ + [-10.71273011], + [112.0293664], + [269.2258843] + ] + }, + { + "name": "50_04", + "type": "kinect-color", + "resolution": [1920,1080], + "panel": 50, + "node": 4, + "K": [ + [1053.76,0,952.563], + [0,1053.62,535.073], + [0,0,1] + ], + "distCoef": [0.0534802,-0.059505,0.000265754,-0.00038559,0.0128987], + "R": [ + [-0.4973721867,-0.01252789009,0.8674468052], + [-0.05725964091,0.9981894693,-0.01841512904], + [-0.8656455634,-0.05882886558,-0.4971890215] + ], + "t": [ + [-12.12207689], + [119.639642], + [263.8142799] + ] + }, + { + "name": "50_05", + "type": "kinect-color", + "resolution": [1920,1080], + "panel": 50, + "node": 5, + "K": [ + [1061.53,0,963.346], + [0,1061.99,535.689], + [0,0,1] + ], + "distCoef": [0.0450742,-0.0483577,0.000117724,0.00131017,0.00746483], + "R": [ + [0.6332975321,0.02789684006,0.7734054578], + [-0.04440403331,0.9990136015,0.0003253688515], + [-0.772633495,-0.034548377,0.6339115806] + ], + "t": [ + [4.398197962], + [114.449943], + [269.0646085] + ] + }, + { + "name": "50_06", + "type": "kinect-color", + "resolution": [1920,1080], + "panel": 50, + "node": 6, + "K": [ + [1053.8,0,975.87], + [0,1054.44,518.546], + [0,0,1] + ], + "distCoef": [0.0608578,-0.0758877,0.000572907,0.000423304,0.0232485], + "R": [ + [0.9936973916,-0.01776547634,0.1106791841], + [0.08238304881,0.7853099766,-0.6135969963], + [-0.07601662453,0.6188478234,0.7818240495] + ], + "t": [ + [-23.36095562], + [58.01362542], + [350.0526212] + ] + }, + { + "name": "50_07", + "type": "kinect-color", + "resolution": [1920,1080], + "panel": 50, + "node": 7, + "K": [ + [1058.37,0,951.456], + [0,1058.06,537.752], + [0,0,1] + ], + "distCoef": [0.0510704,-0.0625189,-0.000144014,6.68608e-05,0.016463], + "R": [ + [0.4325769754,-0.03234243573,-0.9010167186], + [-0.4868424381,0.832758343,-0.2636247005], + [0.7588554545,0.5526911516,0.344486415] + ], + "t": [ + [-19.0385587], + [87.13576568], + [341.2560709] + ] + }, + { + "name": "50_08", + "type": "kinect-color", + "resolution": [1920,1080], + "panel": 50, + "node": 8, + "K": [ + [1051.92,0,937.937], + [0,1051.86,554.246], + [0,0,1] + ], + "distCoef": [0.0499863,-0.0613843,-4.12419e-05,-0.000155211,0.0174279], + "R": [ + [-0.7043873056,-0.07078753835,-0.7062773168], + [-0.4398115151,0.8245196459,0.3559960458], + [0.5571394394,0.5613879923,-0.6119143463] + ], + "t": [ + [-21.03532832], + [82.26745729], + [344.5100871] + ] + }, + { + "name": "50_09", + "type": "kinect-color", + "resolution": [1920,1080], + "panel": 50, + "node": 9, + "K": [ + [1054,0,961.563], + [0,1054.08,544.179], + [0,0,1] + ], + "distCoef": [0.0446773,-0.0530941,0.000226286,-0.000324258,0.0121913], + "R": [ + [-0.8728623151,-0.0989156561,0.4778358211], + [0.2068965126,0.8118396582,0.5459946908], + [-0.4419334927,0.5754407548,-0.6881589393] + ], + "t": [ + [-36.30074608], + [73.0041962], + [346.5857858] + ] + }, + { + "name": "50_10", + "type": "kinect-color", + "resolution": [1920,1080], + "panel": 50, + "node": 10, + "K": [ + [1050.04,0,941.59], + [0,1050.6,559.398], + [0,0,1] + ], + "distCoef": [0.0506861,-0.0636966,0.000195295,-6.41025e-06,0.0181857], + "R": [ + [0.1849149694,0.002001709126,0.9827524852], + [0.5894867579,0.7998990427,-0.1125472514], + [-0.786328059,0.6001312479,0.146733326] + ], + "t": [ + [-12.26435316], + [64.88453925], + [349.5293231] + ] + } + ] +} diff --git a/tests/data/panoptic_body3d/160906_band2/calibration_160906_band2.json b/tests/data/panoptic_body3d/160906_band2/calibration_160906_band2.json index 31c0429b03..677a379dc7 100644 --- a/tests/data/panoptic_body3d/160906_band2/calibration_160906_band2.json +++ b/tests/data/panoptic_body3d/160906_band2/calibration_160906_band2.json @@ -1,11965 +1,11965 @@ -{ - "calibDataSource": "160906_calib_norm", - "cameras": [ - { - "name": "01_01", - "type": "vga", - "resolution": [640,480], - "panel": 1, - "node": 1, - "K": [ - [745.698,0,375.512], - [0,745.89,226.023], - [0,0,1] - ], - "distCoef": [-0.324009,0.0732398,-0.000601245,0.000808154,0.0311011], - "R": [ - [0.9609979695,0.02878724306,-0.2750530807], - [-0.05024448072,0.9961896773,-0.07128547526], - [0.2719529274,0.08232509619,0.9587826572] - ], - "t": [ - [-51.56945892], - [143.9587601], - [282.5664691] - ] - }, - { - "name": "01_02", - "type": "vga", - "resolution": [640,480], - "panel": 1, - "node": 2, - "K": [ - [745.462,0,369.225], - [0,745.627,226.687], - [0,0,1] - ], - "distCoef": [-0.336594,0.141798,-0.000612176,0.000160485,-0.0646767], - "R": [ - [0.9715220842,-0.01574832828,-0.2364251047], - [0.005323209906,0.998987679,-0.04466856407], - [0.2368892218,0.042137956,0.9706224236] - ], - "t": [ - [-66.22242206], - [142.1317177], - [278.6626087] - ] - }, - { - "name": "01_03", - "type": "vga", - "resolution": [640,480], - "panel": 1, - "node": 3, - "K": [ - [746.261,0,378.952], - [0,746.496,239.595], - [0,0,1] - ], - "distCoef": [-0.322069,0.0440329,-0.000951664,0.000892653,0.103376], - "R": [ - [0.9665011873,0.05534363601,-0.2506242943], - [-0.07024277085,0.996230894,-0.05089164033], - [0.2468631364,0.06679137568,0.9667458322] - ], - "t": [ - [-54.75524211], - [118.3584455], - [281.78809] - ] - }, - { - "name": "01_04", - "type": "vga", - "resolution": [640,480], - "panel": 1, - "node": 4, - "K": [ - [747.661,0,366.929], - [0,747.759,234.022], - [0,0,1] - ], - "distCoef": [-0.32333,0.0462607,-0.000972333,-0.000898261,0.102804], - "R": [ - [0.9662588837,0.08601234823,-0.2427872436], - [-0.1112831564,0.9894890375,-0.09234448444], - [0.23229255,0.1162468093,0.9656742984] - ], - "t": [ - [-29.08626445], - [96.75744843], - [287.7183779] - ] - }, - { - "name": "01_05", - "type": "vga", - "resolution": [640,480], - "panel": 1, - "node": 5, - "K": [ - [742.413,0,353.224], - [0,742.622,209.478], - [0,0,1] - ], - "distCoef": [-0.297729,-0.0985766,-0.000505185,-0.000773418,0.328727], - "R": [ - [0.9718071292,0.05098345905,-0.2301990238], - [-0.07271497659,0.9935575811,-0.0869244798], - [0.2242842746,0.1012127458,0.9692536016] - ], - "t": [ - [-26.91018729], - [77.97642882], - [285.7140393] - ] - }, - { - "name": "01_06", - "type": "vga", - "resolution": [640,480], - "panel": 1, - "node": 6, - "K": [ - [743.487,0,372.277], - [0,743.725,241.821], - [0,0,1] - ], - "distCoef": [-0.317534,0.0281748,0.00130284,-0.000186889,0.119129], - "R": [ - [0.9681278444,0.07458666466,-0.2390926732], - [-0.09383510211,0.9931135585,-0.07014580141], - [0.2322142341,0.09034538891,0.968459736] - ], - "t": [ - [-7.038020326], - [73.51221006], - [284.7303027] - ] - }, - { - "name": "01_07", - "type": "vga", - "resolution": [640,480], - "panel": 1, - "node": 7, - "K": [ - [748.393,0,380.919], - [0,748.388,229.353], - [0,0,1] - ], - "distCoef": [-0.344193,0.174813,-0.00034307,0.00107023,-0.0968505], - "R": [ - [0.9670535143,-0.02995409712,-0.2528047715], - [0.01712365053,0.9984582116,-0.0528013286], - [0.2539966162,0.04673276982,0.9660754459] - ], - "t": [ - [-4.52170598], - [98.55800179], - [280.6705064] - ] - }, - { - "name": "01_08", - "type": "vga", - "resolution": [640,480], - "panel": 1, - "node": 8, - "K": [ - [745.37,0,362.362], - [0,745.56,217.483], - [0,0,1] - ], - "distCoef": [-0.326014,0.0789588,-0.000462463,-0.00138061,0.0222432], - "R": [ - [0.9652282485,0.06485174985,-0.2532364089], - [-0.07898708824,0.9958116468,-0.0460456736], - [0.2491896228,0.06444699145,0.9663079826] - ], - "t": [ - [26.28384049], - [86.2200762], - [282.8912643] - ] - }, - { - "name": "01_09", - "type": "vga", - "resolution": [640,480], - "panel": 1, - "node": 9, - "K": [ - [746.037,0,338.236], - [0,746.053,236.859], - [0,0,1] - ], - "distCoef": [-0.314486,0.0395532,0.000625849,-0.000232478,0.0599275], - "R": [ - [0.9656569777,0.07278005487,-0.2494186543], - [-0.09030273149,0.9941334749,-0.05953193019], - [0.2436226964,0.08001060955,0.9665641645] - ], - "t": [ - [45.35508632], - [94.7965848], - [284.0947744] - ] - }, - { - "name": "01_10", - "type": "vga", - "resolution": [640,480], - "panel": 1, - "node": 10, - "K": [ - [747.938,0,379.271], - [0,748.269,227.432], - [0,0,1] - ], - "distCoef": [-0.3484,0.205218,-0.00110069,0.000562921,-0.151344], - "R": [ - [0.9662738854,-0.001312373382,-0.2575132151], - [-0.009587322107,0.9991104143,-0.04106657164], - [0.2573380297,0.04215041788,0.9654017199] - ], - "t": [ - [30.05861189], - [130.0028668], - [279.9552314] - ] - }, - { - "name": "01_11", - "type": "vga", - "resolution": [640,480], - "panel": 1, - "node": 11, - "K": [ - [746.12,0,364.693], - [0,745.844,223.621], - [0,0,1] - ], - "distCoef": [-0.335335,0.119703,0.000192218,0.00118296,-0.00812072], - "R": [ - [0.9869891455,-0.01212212734,-0.1603292883], - [0.00355647539,0.9985558958,-0.05360479805], - [0.1607475603,0.05233714665,0.9856069424] - ], - "t": [ - [71.07099717], - [142.6182462], - [275.3539702] - ] - }, - { - "name": "01_12", - "type": "vga", - "resolution": [640,480], - "panel": 1, - "node": 12, - "K": [ - [745.407,0,358.691], - [0,745.503,226.329], - [0,0,1] - ], - "distCoef": [-0.325389,0.0923962,-0.00061832,-0.00189678,-0.0159561], - "R": [ - [0.9589650047,0.08538224277,-0.2703627054], - [-0.09708669181,0.9948178626,-0.03019262438], - [0.2663837347,0.05520229083,0.9622849957] - ], - "t": [ - [54.63033668], - [157.9150468], - [281.9236261] - ] - }, - { - "name": "01_13", - "type": "vga", - "resolution": [640,480], - "panel": 1, - "node": 13, - "K": [ - [744.389,0,339.442], - [0,744.512,216.258], - [0,0,1] - ], - "distCoef": [-0.320138,0.0543285,-0.000196977,-0.00116274,0.0473598], - "R": [ - [0.9724830194,-0.06319437739,-0.2242392645], - [0.03959405574,0.9933373951,-0.1082272161], - [0.2295845984,0.09637058799,0.9685058709] - ], - "t": [ - [19.90234626], - [154.6647449], - [286.7518211] - ] - }, - { - "name": "01_14", - "type": "vga", - "resolution": [640,480], - "panel": 1, - "node": 14, - "K": [ - [746.213,0,363.165], - [0,746.641,235.418], - [0,0,1] - ], - "distCoef": [-0.33414,0.127633,-0.000792357,0.000136075,-0.0405619], - "R": [ - [0.9643490552,0.006836134333,-0.2645452079], - [-0.02440508255,0.9977035557,-0.06318233054], - [0.2635057717,0.0673860684,0.9623013177] - ], - "t": [ - [19.24633902], - [182.0747755], - [282.9928946] - ] - }, - { - "name": "01_15", - "type": "vga", - "resolution": [640,480], - "panel": 1, - "node": 15, - "K": [ - [745.225,0,366.568], - [0,745.569,216.05], - [0,0,1] - ], - "distCoef": [-0.319743,0.046174,-0.00158438,-0.000953331,0.0743504], - "R": [ - [0.9602661069,0.03565913048,-0.2767985376], - [-0.06162250151,0.9944158624,-0.08567239854], - [0.2721978533,0.09932531892,0.9571012536] - ], - "t": [ - [0.9330302863], - [174.5612072], - [288.1067574] - ] - }, - { - "name": "01_16", - "type": "vga", - "resolution": [640,480], - "panel": 1, - "node": 16, - "K": [ - [747.633,0,371.752], - [0,747.88,230.613], - [0,0,1] - ], - "distCoef": [-0.347758,0.198029,0.00072103,0.00029865,-0.136932], - "R": [ - [0.9682573711,0.05614690975,-0.2435676248], - [-0.07153002565,0.9959334273,-0.05477283913], - [0.2395018137,0.07045660367,0.968336072] - ], - "t": [ - [-3.74774], - [172.5737662], - [282.7618788] - ] - }, - { - "name": "01_17", - "type": "vga", - "resolution": [640,480], - "panel": 1, - "node": 17, - "K": [ - [748.152,0,373.9], - [0,748.508,234.452], - [0,0,1] - ], - "distCoef": [-0.345127,0.177692,-0.00116897,0.00210199,-0.0818461], - "R": [ - [0.9639501783,0.02458774974,-0.264944327], - [-0.04477053879,0.9965129817,-0.07040934697], - [0.2622892538,0.07973280283,0.9616896732] - ], - "t": [ - [-36.08309916], - [173.4726636], - [283.4522322] - ] - }, - { - "name": "01_18", - "type": "vga", - "resolution": [640,480], - "panel": 1, - "node": 18, - "K": [ - [743.791,0,363.617], - [0,744.126,236.963], - [0,0,1] - ], - "distCoef": [-0.312734,0.0122172,-0.00120247,-0.000963953,0.133944], - "R": [ - [0.9523198878,0.06045552763,-0.2990517689], - [-0.07234112338,0.9969633514,-0.02882425707], - [0.2964010681,0.04908365416,0.9538014478] - ], - "t": [ - [-57.80984395], - [175.8598769], - [275.2458542] - ] - }, - { - "name": "01_19", - "type": "vga", - "resolution": [640,480], - "panel": 1, - "node": 19, - "K": [ - [743.162,0,364.748], - [0,743.331,220.785], - [0,0,1] - ], - "distCoef": [-0.311505,0.00290054,-0.000860754,-0.000437091,0.146397], - "R": [ - [0.9677776267,0.05243241618,-0.246287042], - [-0.06515666231,0.9969134625,-0.04379677618], - [0.243230497,0.05843278173,0.968206866] - ], - "t": [ - [-19.88792012], - [144.796335], - [280.8929426] - ] - }, - { - "name": "01_20", - "type": "vga", - "resolution": [640,480], - "panel": 1, - "node": 20, - "K": [ - [744.661,0,343.237], - [0,744.907,246.044], - [0,0,1] - ], - "distCoef": [-0.326994,0.0904776,0.000984855,-0.00107766,-0.0214165], - "R": [ - [0.9717064093,0.03462931454,-0.2336396043], - [-0.0436324388,0.998486683,-0.03347468014], - [0.2321268283,0.04272182698,0.9717468709] - ], - "t": [ - [-15.15244103], - [127.7778149], - [279.5122056] - ] - }, - { - "name": "01_21", - "type": "vga", - "resolution": [640,480], - "panel": 1, - "node": 21, - "K": [ - [742.462,0,365.246], - [0,742.468,221.387], - [0,0,1] - ], - "distCoef": [-0.311193,-0.0017069,-0.0010044,-5.33063e-05,0.168374], - "R": [ - [0.9650420793,0.04068979072,-0.2589172188], - [-0.04945049005,0.9984003719,-0.02741069744], - [0.257387712,0.03925605981,0.965510501] - ], - "t": [ - [-1.672862451], - [122.1992626], - [279.1232554] - ] - }, - { - "name": "01_22", - "type": "vga", - "resolution": [640,480], - "panel": 1, - "node": 22, - "K": [ - [744.021,0,363.587], - [0,744.301,226.764], - [0,0,1] - ], - "distCoef": [-0.330855,0.115198,-0.00111581,-0.000578883,-0.0257811], - "R": [ - [0.9624230562,-0.007741542698,-0.2714441553], - [-0.003557050749,0.9991484058,-0.04110730506], - [0.271531229,0.0405281588,0.9615759252] - ], - "t": [ - [4.289641778], - [135.1743597], - [279.2863723] - ] - }, - { - "name": "01_23", - "type": "vga", - "resolution": [640,480], - "panel": 1, - "node": 23, - "K": [ - [745.029,0,358.645], - [0,745.162,224.101], - [0,0,1] - ], - "distCoef": [-0.31925,0.0412999,-0.000788365,0.000625647,0.108146], - "R": [ - [0.9553340738,0.01211961015,-0.2952793973], - [-0.03701510886,0.9961975848,-0.07886858543], - [0.293200766,0.08627564605,0.9521501057] - ], - "t": [ - [-2.968489269], - [143.230855], - [285.3382881] - ] - }, - { - "name": "01_24", - "type": "vga", - "resolution": [640,480], - "panel": 1, - "node": 24, - "K": [ - [744.501,0,369.38], - [0,744.575,244.409], - [0,0,1] - ], - "distCoef": [-0.317214,0.0306635,-5.65201e-05,-0.000305408,0.106933], - "R": [ - [0.9627375442,0.05351140442,-0.2650904574], - [-0.07422624073,0.9948691584,-0.06874462026], - [0.2600516991,0.08585969499,0.9617698408] - ], - "t": [ - [-7.333655278], - [148.0612654], - [284.8699573] - ] - }, - { - "name": "02_01", - "type": "vga", - "resolution": [640,480], - "panel": 2, - "node": 1, - "K": [ - [746.79,0,376.022], - [0,747.048,234.17], - [0,0,1] - ], - "distCoef": [-0.317408,0.0301922,-0.000108969,-0.00027109,0.105931], - "R": [ - [0.977473966,0.04697618088,0.2057617172], - [0.001487552662,0.9733575223,-0.2292878562], - [-0.211050783,0.2244289915,0.9513617581] - ], - "t": [ - [-1.729507611], - [175.3460492], - [304.9109171] - ] - }, - { - "name": "02_02", - "type": "vga", - "resolution": [640,480], - "panel": 2, - "node": 2, - "K": [ - [747.689,0,367.065], - [0,747.811,212.158], - [0,0,1] - ], - "distCoef": [-0.333664,0.117162,0.000577725,-0.000310896,-0.0327554], - "R": [ - [0.9812751339,-0.05714257326,0.183939767], - [0.09271495859,0.9771941455,-0.1910380552], - [-0.1688284573,0.2045148611,0.9641942873] - ], - "t": [ - [-50.62568249], - [190.9654762], - [299.6250374] - ] - }, - { - "name": "02_03", - "type": "vga", - "resolution": [640,480], - "panel": 2, - "node": 3, - "K": [ - [745.627,0,353.486], - [0,745.817,252.683], - [0,0,1] - ], - "distCoef": [-0.321416,0.0392112,-0.00107045,-0.00134198,0.0908854], - "R": [ - [0.9757098845,0.1270834984,0.1784376802], - [-0.07601456941,0.9603325594,-0.2682967771], - [-0.2054556071,0.248215954,0.946666168] - ], - "t": [ - [-23.13649132], - [169.3490841], - [309.2380875] - ] - }, - { - "name": "02_04", - "type": "vga", - "resolution": [640,480], - "panel": 2, - "node": 4, - "K": [ - [746.11,0,381.584], - [0,746.321,224.917], - [0,0,1] - ], - "distCoef": [-0.323963,0.0585021,-0.000871966,0.000552522,0.0715102], - "R": [ - [0.979331342,0.07410153523,0.1881995881], - [-0.02608477747,0.9689731658,-0.2457856551], - [-0.2005734451,0.2357964511,0.950878713] - ], - "t": [ - [-32.63906075], - [150.8763932], - [306.9317958] - ] - }, - { - "name": "02_05", - "type": "vga", - "resolution": [640,480], - "panel": 2, - "node": 5, - "K": [ - [744.11,0,378.377], - [0,744.035,244.823], - [0,0,1] - ], - "distCoef": [-0.323078,0.0494134,-0.000238923,-0.000981516,0.0727453], - "R": [ - [0.9857440106,0.05652749171,0.1584720428], - [-0.01525193411,0.9680163878,-0.250422945], - [-0.1675593154,0.244435913,0.95507851] - ], - "t": [ - [-62.3494258], - [135.8190029], - [306.0165552] - ] - }, - { - "name": "02_06", - "type": "vga", - "resolution": [640,480], - "panel": 2, - "node": 6, - "K": [ - [743.928,0,352.844], - [0,744.181,228.627], - [0,0,1] - ], - "distCoef": [-0.303908,-0.0528673,-0.000528541,8.08764e-05,0.267531], - "R": [ - [0.9814194485,0.06212733968,0.1815380393], - [-0.0101664424,0.9616367605,-0.2741375282], - [-0.1916050874,0.2671983057,0.9444006332] - ], - "t": [ - [-53.86742917], - [106.6702196], - [310.2214119] - ] - }, - { - "name": "02_07", - "type": "vga", - "resolution": [640,480], - "panel": 2, - "node": 7, - "K": [ - [746.501,0,376.178], - [0,746.591,217.394], - [0,0,1] - ], - "distCoef": [-0.323449,0.0621904,-0.000592526,0.000355354,0.0689781], - "R": [ - [0.9775323693,0.09704954661,0.1871145437], - [-0.05094527723,0.9701636443,-0.2370381445], - [-0.2045361721,0.2221798567,0.9533105819] - ], - "t": [ - [-27.21830655], - [111.2122483], - [305.8578091] - ] - }, - { - "name": "02_08", - "type": "vga", - "resolution": [640,480], - "panel": 2, - "node": 8, - "K": [ - [747.056,0,346.722], - [0,747.425,231.954], - [0,0,1] - ], - "distCoef": [-0.331626,0.0978711,0.000923123,-0.00170198,0.0128988], - "R": [ - [0.9738310577,0.04398424166,0.222976361], - [0.006459505741,0.9753414162,-0.2206068824], - [-0.2271813062,0.2162741507,0.9495336465] - ], - "t": [ - [-23.1615402], - [89.62617671], - [306.715437] - ] - }, - { - "name": "02_09", - "type": "vga", - "resolution": [640,480], - "panel": 2, - "node": 9, - "K": [ - [746.084,0,344.827], - [0,746.456,222.936], - [0,0,1] - ], - "distCoef": [-0.31385,0.00765504,0.000335804,0.000338293,0.157318], - "R": [ - [0.9708044988,0.02558390192,0.2385038556], - [0.01777728087,0.9838878899,-0.1779005014], - [-0.2392124442,0.1769465571,0.9547079776] - ], - "t": [ - [-1.622489705], - [92.86686988], - [302.6276511] - ] - }, - { - "name": "02_10", - "type": "vga", - "resolution": [640,480], - "panel": 2, - "node": 10, - "K": [ - [743.875,0,345.16], - [0,744.131,231.932], - [0,0,1] - ], - "distCoef": [-0.309364,-0.0158069,0.000435688,-0.000318284,0.167974], - "R": [ - [0.9837217555,0.04774800386,0.1732386674], - [-0.008457215477,0.9752859506,-0.220784488], - [-0.179499257,0.2157253874,0.9598138226] - ], - "t": [ - [0.6070589451], - [94.58504844], - [305.3954199] - ] - }, - { - "name": "02_11", - "type": "vga", - "resolution": [640,480], - "panel": 2, - "node": 11, - "K": [ - [748.642,0,372.727], - [0,749.029,221.349], - [0,0,1] - ], - "distCoef": [-0.329743,0.0894243,0.000705225,0.000452301,0.0255748], - "R": [ - [0.9762818677,-0.03993432779,0.2127885436], - [0.08495434643,0.9746762651,-0.20685487], - [-0.1991393328,0.2200259705,0.9549513592] - ], - "t": [ - [18.17502224], - [86.30258496], - [305.899008] - ] - }, - { - "name": "02_12", - "type": "vga", - "resolution": [640,480], - "panel": 2, - "node": 12, - "K": [ - [746.297,0,386.393], - [0,746.341,223.432], - [0,0,1] - ], - "distCoef": [-0.329805,0.088881,-0.000101498,-0.000342857,0.0238941], - "R": [ - [0.9769251111,-0.05225372472,0.2070914666], - [0.09392861168,0.9759243238,-0.1968479875], - [-0.1918195589,0.211757556,0.9583130982] - ], - "t": [ - [31.97904484], - [101.8192368], - [305.2554798] - ] - }, - { - "name": "02_13", - "type": "vga", - "resolution": [640,480], - "panel": 2, - "node": 13, - "K": [ - [746.887,0,386.903], - [0,746.77,241.912], - [0,0,1] - ], - "distCoef": [-0.330222,0.0894843,0.000608161,-0.000202457,0.0188277], - "R": [ - [0.9805035597,0.07291108666,0.1824739514], - [-0.03359954242,0.9771464723,-0.2098948364], - [-0.1936074385,0.199671593,0.9605453736] - ], - "t": [ - [39.8755561], - [121.0360498], - [302.8306622] - ] - }, - { - "name": "02_14", - "type": "vga", - "resolution": [640,480], - "panel": 2, - "node": 14, - "K": [ - [745.399,0,359.381], - [0,745.103,221.453], - [0,0,1] - ], - "distCoef": [-0.32351,0.0564367,0.000553752,0.000358328,0.0789504], - "R": [ - [0.9639890244,-0.01369700088,0.2655890681], - [0.06651808592,0.9793475216,-0.1909287203], - [-0.2574888447,0.2017196672,0.9449913601] - ], - "t": [ - [64.66924198], - [136.2834945], - [299.1868513] - ] - }, - { - "name": "02_15", - "type": "vga", - "resolution": [640,480], - "panel": 2, - "node": 15, - "K": [ - [746.343,0,376.035], - [0,746.136,233.449], - [0,0,1] - ], - "distCoef": [-0.332319,0.10939,0.000552685,0.00121175,-0.00685584], - "R": [ - [0.9739293667,-0.02993852249,0.2248672353], - [0.07982373372,0.9730868608,-0.2161715356], - [-0.2123434957,0.2284855491,0.9501076748] - ], - "t": [ - [41.67937397], - [146.9667487], - [305.3208703] - ] - }, - { - "name": "02_16", - "type": "vga", - "resolution": [640,480], - "panel": 2, - "node": 16, - "K": [ - [747.983,0,369.069], - [0,747.865,212.357], - [0,0,1] - ], - "distCoef": [-0.333814,0.119177,-0.00123283,0.000206724,-0.0313224], - "R": [ - [0.9828420813,0.01261378295,0.1840172159], - [0.03080156014,0.9724259604,-0.2311688027], - [-0.181859031,0.2328704445,0.9553526307] - ], - "t": [ - [22.33056427], - [154.6384713], - [307.0242051] - ] - }, - { - "name": "02_17", - "type": "vga", - "resolution": [640,480], - "panel": 2, - "node": 17, - "K": [ - [743.255,0,372.405], - [0,743.629,259.514], - [0,0,1] - ], - "distCoef": [-0.301911,-0.0577323,-0.000292445,-0.000537705,0.240913], - "R": [ - [0.9702237144,0.05425789408,0.2360551311], - [-0.004184220731,0.978195713,-0.2076430576], - [-0.2421743923,0.2004725119,0.9492957051] - ], - "t": [ - [39.95715372], - [182.9757461], - [299.4720725] - ] - }, - { - "name": "02_18", - "type": "vga", - "resolution": [640,480], - "panel": 2, - "node": 18, - "K": [ - [746.171,0,380.016], - [0,746.628,215.7], - [0,0,1] - ], - "distCoef": [-0.310416,0.0111871,-0.00156578,-0.000885002,0.110566], - "R": [ - [0.9751942313,0.01121985931,0.2210663386], - [0.02134458651,0.9892938663,-0.1443677759], - [-0.220319359,0.1455051918,0.9645141882] - ], - "t": [ - [9.159436194], - [213.6293599], - [288.3403437] - ] - }, - { - "name": "02_19", - "type": "vga", - "resolution": [640,480], - "panel": 2, - "node": 19, - "K": [ - [745.09,0,380.114], - [0,745.176,232.983], - [0,0,1] - ], - "distCoef": [-0.31746,0.043353,-0.000108725,0.000220738,0.0862213], - "R": [ - [0.9809185988,0.05584586521,0.1862255137], - [-0.01423917048,0.975920974,-0.2176591338], - [-0.1938967473,0.2108541957,0.9580942331] - ], - "t": [ - [-1.989355998], - [159.4183424], - [303.0216832] - ] - }, - { - "name": "02_20", - "type": "vga", - "resolution": [640,480], - "panel": 2, - "node": 20, - "K": [ - [746.359,0,393.165], - [0,746.438,228.007], - [0,0,1] - ], - "distCoef": [-0.32236,0.0673245,-0.000115957,0.00130444,0.0588071], - "R": [ - [0.9826018096,0.03015545669,0.1832602856], - [0.01576123022,0.9696317731,-0.2440610748], - [-0.1850547688,0.2427032613,0.9522866477] - ], - "t": [ - [-25.36954265], - [136.7143691], - [307.7149997] - ] - }, - { - "name": "02_21", - "type": "vga", - "resolution": [640,480], - "panel": 2, - "node": 21, - "K": [ - [747.137,0,358.509], - [0,747.202,238.678], - [0,0,1] - ], - "distCoef": [-0.327929,0.0852816,0.000460613,0.000357406,0.0365027], - "R": [ - [0.9780966382,0.08951991601,0.1879179366], - [-0.04045439222,0.9673344336,-0.2502549415], - [-0.2041822921,0.2371714111,0.9497680314] - ], - "t": [ - [-10.00427836], - [118.005594], - [307.3165834] - ] - }, - { - "name": "02_22", - "type": "vga", - "resolution": [640,480], - "panel": 2, - "node": 22, - "K": [ - [745.847,0,374.568], - [0,746.074,247.807], - [0,0,1] - ], - "distCoef": [-0.32052,0.063252,0.000743322,-0.000945252,0.0534877], - "R": [ - [0.9839840132,0.07804627455,0.160263036], - [-0.03749054936,0.9695570383,-0.2419785283], - [-0.1742696772,0.2320946541,0.9569546233] - ], - "t": [ - [-1.458572059], - [110.2636917], - [306.6072245] - ] - }, - { - "name": "02_23", - "type": "vga", - "resolution": [640,480], - "panel": 2, - "node": 23, - "K": [ - [744.851,0,375.128], - [0,744.899,236.672], - [0,0,1] - ], - "distCoef": [-0.328747,0.0731957,0.000409854,0.000115616,0.0573405], - "R": [ - [0.9798731388,0.006836815724,0.1995041098], - [0.04188111895,0.9701291749,-0.2389463451], - [-0.1951783896,0.2424925605,0.9503171862] - ], - "t": [ - [13.92766978], - [118.8861106], - [308.0337581] - ] - }, - { - "name": "02_24", - "type": "vga", - "resolution": [640,480], - "panel": 2, - "node": 24, - "K": [ - [748.108,0,365.63], - [0,748.409,236.546], - [0,0,1] - ], - "distCoef": [-0.337502,0.145226,-9.99404e-05,-0.000712599,-0.0768278], - "R": [ - [0.9858983234,-0.01937546959,0.166219996], - [0.057736328,0.9716683618,-0.2291879382], - [-0.1570700873,0.2355529362,0.9590848773] - ], - "t": [ - [-5.69779309], - [141.0775615], - [307.1963385] - ] - }, - { - "name": "03_01", - "type": "vga", - "resolution": [640,480], - "panel": 3, - "node": 1, - "K": [ - [745.205,0,364.445], - [0,745.671,223.278], - [0,0,1] - ], - "distCoef": [-0.321278,0.0550501,-0.000663141,0.000431329,0.0680735], - "R": [ - [0.789168654,0.1464091436,-0.5964706181], - [-0.3274382264,0.921936374,-0.2069239719], - [0.5196123973,0.3586051937,0.7755032377] - ], - "t": [ - [-15.48720347], - [106.8731646], - [321.197831] - ] - }, - { - "name": "03_02", - "type": "vga", - "resolution": [640,480], - "panel": 3, - "node": 2, - "K": [ - [746.402,0,367.989], - [0,746.656,218.884], - [0,0,1] - ], - "distCoef": [-0.319108,0.0415571,-0.000289565,0.00121415,0.0978966], - "R": [ - [0.7844411333,0.123213727,-0.6078408392], - [-0.3461950886,0.9001611021,-0.2643084389], - [0.5145882519,0.4177659246,0.7487793823] - ], - "t": [ - [-25.69855827], - [65.19717944], - [326.035328] - ] - }, - { - "name": "03_03", - "type": "vga", - "resolution": [640,480], - "panel": 3, - "node": 3, - "K": [ - [747.999,0,350.415], - [0,748.222,213.374], - [0,0,1] - ], - "distCoef": [-0.322361,0.0444301,-0.000132478,-4.14576e-05,0.110213], - "R": [ - [0.8075592295,0.0617799019,-0.5865418439], - [-0.2672496857,0.9248714179,-0.2705373648], - [0.525762015,0.3752280693,0.763399109] - ], - "t": [ - [-8.799326732], - [72.40249706], - [323.1224723] - ] - }, - { - "name": "03_04", - "type": "vga", - "resolution": [640,480], - "panel": 3, - "node": 4, - "K": [ - [744.819,0,376.394], - [0,744.912,212.894], - [0,0,1] - ], - "distCoef": [-0.335892,0.121706,-0.00015411,0.0017688,-0.0013985], - "R": [ - [0.8410364559,-0.03582960221,-0.5397906256], - [-0.192384631,0.9127679401,-0.3603371217], - [0.5056143132,0.4069040761,0.7607780486] - ], - "t": [ - [3.728898504], - [75.32503712], - [325.8417248] - ] - }, - { - "name": "03_05", - "type": "vga", - "resolution": [640,480], - "panel": 3, - "node": 5, - "K": [ - [746.446,0,376.523], - [0,746.682,251.012], - [0,0,1] - ], - "distCoef": [-0.330943,0.0996499,0.00144142,-0.000113946,0.0131394], - "R": [ - [0.8610606531,-0.05437396314,-0.5055868113], - [-0.176556083,0.9004429458,-0.3975304402], - [0.4768673833,0.4315622475,0.7657359371] - ], - "t": [ - [31.93527518], - [62.43528973], - [326.764058] - ] - }, - { - "name": "03_06", - "type": "vga", - "resolution": [640,480], - "panel": 3, - "node": 6, - "K": [ - [744.998,0,378.484], - [0,744.973,240.788], - [0,0,1] - ], - "distCoef": [-0.31652,0.0338012,-0.0010118,-0.000122735,0.0959735], - "R": [ - [0.8769583834,-0.06555368648,-0.4760742674], - [-0.1128149484,0.9348860407,-0.3365425358], - [0.4671367907,0.348842092,0.8124607151] - ], - "t": [ - [52.69213606], - [109.2131316], - [317.2562433] - ] - }, - { - "name": "03_07", - "type": "vga", - "resolution": [640,480], - "panel": 3, - "node": 7, - "K": [ - [744.942,0,394.454], - [0,745.513,230.902], - [0,0,1] - ], - "distCoef": [-0.322593,0.0669124,0.000685625,0.000650135,0.0435827], - "R": [ - [0.8511772215,-0.03734239681,-0.5235483579], - [-0.1521244983,0.9371023984,-0.3141611561], - [0.5023499524,0.3470513512,0.7919595223] - ], - "t": [ - [39.57000229], - [127.8421428], - [318.5564893] - ] - }, - { - "name": "03_08", - "type": "vga", - "resolution": [640,480], - "panel": 3, - "node": 8, - "K": [ - [744.592,0,375.596], - [0,744.695,234.586], - [0,0,1] - ], - "distCoef": [-0.314208,0.0115966,-0.0002404,-0.00129875,0.131833], - "R": [ - [0.863242284,-0.08735605341,-0.4971736911], - [-0.1241310572,0.9179337282,-0.3768144785], - [0.4892895255,0.386996887,0.7815556088] - ], - "t": [ - [48.3076273], - [133.8669044], - [323.1008342] - ] - }, - { - "name": "03_09", - "type": "vga", - "resolution": [640,480], - "panel": 3, - "node": 9, - "K": [ - [746.083,0,388.49], - [0,746.196,219.485], - [0,0,1] - ], - "distCoef": [-0.327776,0.0952708,0.000477894,0.00116098,0.0130168], - "R": [ - [0.8627791791,-0.162720556,-0.478679547], - [-0.06768333431,0.9010943873,-0.4283081501], - [0.5010299935,0.401933982,0.766432006] - ], - "t": [ - [23.91664651], - [150.3571005], - [326.7446808] - ] - }, - { - "name": "03_10", - "type": "vga", - "resolution": [640,480], - "panel": 3, - "node": 10, - "K": [ - [744.984,0,374.291], - [0,745.244,231.69], - [0,0,1] - ], - "distCoef": [-0.317288,0.0201616,0.000340337,0.000302133,0.135473], - "R": [ - [0.8433461687,-0.104156761,-0.5271798639], - [-0.1611508321,0.8868626272,-0.433018579], - [0.5126379318,0.4501400333,0.7311472501] - ], - "t": [ - [5.809004706], - [133.1751931], - [335.4888131] - ] - }, - { - "name": "03_11", - "type": "vga", - "resolution": [640,480], - "panel": 3, - "node": 11, - "K": [ - [746.325,0,369.755], - [0,746.606,238.315], - [0,0,1] - ], - "distCoef": [-0.330117,0.107892,0.000853042,-0.00148033,-0.0192727], - "R": [ - [0.8487877999,-0.06352852013,-0.5249032272], - [-0.1660312052,0.9105147821,-0.3786772643], - [0.5019889537,0.4085669574,0.7622861219] - ], - "t": [ - [10.90299391], - [168.9126588], - [328.8547345] - ] - }, - { - "name": "03_12", - "type": "vga", - "resolution": [640,480], - "panel": 3, - "node": 12, - "K": [ - [745.397,0,373.191], - [0,745.394,241.989], - [0,0,1] - ], - "distCoef": [-0.315431,0.0239438,0.00152043,8.78247e-05,0.132462], - "R": [ - [0.7899500519,0.01447673769,-0.613000277], - [-0.2772192125,0.9001468868,-0.3359837649], - [0.5469263421,0.4353458466,0.7150843098] - ], - "t": [ - [-11.01289772], - [165.4412244], - [333.9391633] - ] - }, - { - "name": "03_13", - "type": "vga", - "resolution": [640,480], - "panel": 3, - "node": 13, - "K": [ - [746.289,0,356.696], - [0,746.559,221.83], - [0,0,1] - ], - "distCoef": [-0.307674,-0.0320128,-0.000713248,-0.000212304,0.187939], - "R": [ - [0.7812025858,0.003231301473,-0.6242692358], - [-0.256925784,0.9130359895,-0.316787663], - [0.5689566429,0.4078662043,0.7140962805] - ], - "t": [ - [-30.04397497], - [158.6113997], - [327.0561852] - ] - }, - { - "name": "03_14", - "type": "vga", - "resolution": [640,480], - "panel": 3, - "node": 14, - "K": [ - [744.216,0,367.374], - [0,744.503,234.384], - [0,0,1] - ], - "distCoef": [-0.313106,0.0107213,0.00051099,0.000391129,0.137335], - "R": [ - [0.7647493291,0.08765142393,-0.6383382266], - [-0.3090501184,0.9192036391,-0.2440342068], - [0.5653728752,0.3839035005,0.7300490493] - ], - "t": [ - [-30.23656889], - [178.7825502], - [321.7207122] - ] - }, - { - "name": "03_15", - "type": "vga", - "resolution": [640,480], - "panel": 3, - "node": 15, - "K": [ - [747.827,0,380.852], - [0,747.806,237.021], - [0,0,1] - ], - "distCoef": [-0.329904,0.102056,0.000500868,0.000776535,0.0163276], - "R": [ - [0.8420936086,0.09442452017,-0.5310012847], - [-0.2692856411,0.9266613257,-0.2622670985], - [0.4672939095,0.3638444688,0.8057627471] - ], - "t": [ - [-9.683781844], - [164.2881649], - [322.7392687] - ] - }, - { - "name": "03_16", - "type": "vga", - "resolution": [640,480], - "panel": 3, - "node": 16, - "K": [ - [745.289,0,371.652], - [0,745.447,216.538], - [0,0,1] - ], - "distCoef": [-0.317152,0.0301694,-0.000847782,0.000226416,0.100881], - "R": [ - [0.7751085928,0.08020770062,-0.6267163586], - [-0.2817854267,0.9316829094,-0.2292682483], - [0.5655118413,0.3543073259,0.74475679] - ], - "t": [ - [-42.18053512], - [150.9579844], - [316.9204289] - ] - }, - { - "name": "03_17", - "type": "vga", - "resolution": [640,480], - "panel": 3, - "node": 17, - "K": [ - [744.591,0,386.471], - [0,744.601,243.766], - [0,0,1] - ], - "distCoef": [-0.308716,-0.020066,-0.000742984,7.36231e-05,0.18193], - "R": [ - [0.8000888793,0.13985822,-0.5833502066], - [-0.3086873752,0.9298003917,-0.2004578159], - [0.5143635773,0.3404569133,0.7870954202] - ], - "t": [ - [-29.24407076], - [139.76037], - [318.5389184] - ] - }, - { - "name": "03_18", - "type": "vga", - "resolution": [640,480], - "panel": 3, - "node": 18, - "K": [ - [747.091,0,388.41], - [0,747.213,245.147], - [0,0,1] - ], - "distCoef": [-0.331947,0.109947,-0.00018029,-0.000335458,-0.0100282], - "R": [ - [0.7812031275,0.143907843,-0.6074637489], - [-0.3493109676,0.9072427652,-0.2342912992], - [0.5174007358,0.3952228456,0.7590094735] - ], - "t": [ - [-39.38157975], - [101.9329028], - [324.6812046] - ] - }, - { - "name": "03_19", - "type": "vga", - "resolution": [640,480], - "panel": 3, - "node": 19, - "K": [ - [743.815,0,380.782], - [0,743.921,233.579], - [0,0,1] - ], - "distCoef": [-0.31618,0.0384848,0.000240219,0.000426998,0.0977231], - "R": [ - [0.8097086682,0.09665101941,-0.578818152], - [-0.2718115959,0.9359285209,-0.2239559336], - [0.5200868476,0.3386685464,0.784100304] - ], - "t": [ - [-3.817362892], - [126.1763792], - [318.2990602] - ] - }, - { - "name": "03_20", - "type": "vga", - "resolution": [640,480], - "panel": 3, - "node": 20, - "K": [ - [746.163,0,356.033], - [0,746.281,215.327], - [0,0,1] - ], - "distCoef": [-0.323416,0.0556958,5.62358e-06,-0.000684023,0.0815018], - "R": [ - [0.8690981447,0.003405692177,-0.4946279574], - [-0.1831744592,0.9310985933,-0.3154402114], - [0.4594731031,0.3647517111,0.8098398958] - ], - "t": [ - [22.15812523], - [111.197586], - [320.9871724] - ] - }, - { - "name": "03_21", - "type": "vga", - "resolution": [640,480], - "panel": 3, - "node": 21, - "K": [ - [745.277,0,370.698], - [0,745.633,251.594], - [0,0,1] - ], - "distCoef": [-0.309423,-0.0154759,-0.000871178,-0.000110471,0.185828], - "R": [ - [0.8519925598,-0.01534543221,-0.5233289556], - [-0.157671027,0.9456449668,-0.2844212441], - [0.4992479597,0.3248385977,0.8032629458] - ], - "t": [ - [23.66925749], - [140.0971121], - [315.3107012] - ] - }, - { - "name": "03_22", - "type": "vga", - "resolution": [640,480], - "panel": 3, - "node": 22, - "K": [ - [749.812,0,361.025], - [0,750.052,224.033], - [0,0,1] - ], - "distCoef": [-0.333335,0.0892582,3.32371e-05,-0.00136116,0.0353235], - "R": [ - [0.8242021998,-0.0118106517,-0.5661724493], - [-0.2609232338,0.8794144434,-0.3981824994], - [0.5026030242,0.4759104383,0.7217336453] - ], - "t": [ - [6.739100305], - [105.8858326], - [336.9710973] - ] - }, - { - "name": "03_23", - "type": "vga", - "resolution": [640,480], - "panel": 3, - "node": 23, - "K": [ - [744.781,0,365.976], - [0,744.836,235.682], - [0,0,1] - ], - "distCoef": [-0.319452,0.032528,0.000754874,-0.000913445,0.102166], - "R": [ - [0.8233335342,0.02583843362,-0.5669693703], - [-0.2570181529,0.9076367155,-0.3318693443], - [0.506027233,0.4189605805,0.7539286912] - ], - "t": [ - [-4.103462359], - [133.5127669], - [329.5726238] - ] - }, - { - "name": "03_24", - "type": "vga", - "resolution": [640,480], - "panel": 3, - "node": 24, - "K": [ - [746.135,0,373.553], - [0,746.515,225.298], - [0,0,1] - ], - "distCoef": [-0.323756,0.0623909,2.70614e-05,0.000962707,0.0761173], - "R": [ - [0.8557458945,0.0294251088,-0.5165589289], - [-0.2234217673,0.921515875,-0.3176337608], - [0.4666708454,0.3872242956,0.7951576366] - ], - "t": [ - [-1.49693002], - [128.5290469], - [325.1203285] - ] - }, - { - "name": "04_01", - "type": "vga", - "resolution": [640,480], - "panel": 4, - "node": 1, - "K": [ - [745.756,0,368.953], - [0,745.945,245.188], - [0,0,1] - ], - "distCoef": [-0.3245,0.0724334,-0.000312337,0.000678015,0.0415529], - "R": [ - [0.04501388353,-0.06073969189,-0.9971381249], - [-0.08162898106,0.9945884367,-0.06426936354], - [0.9956457501,0.08428838276,0.03981216889] - ], - "t": [ - [-59.71104012], - [137.3658878], - [280.4259077] - ] - }, - { - "name": "04_02", - "type": "vga", - "resolution": [640,480], - "panel": 4, - "node": 2, - "K": [ - [745.144,0,382.474], - [0,745.286,222.525], - [0,0,1] - ], - "distCoef": [-0.322843,0.0690658,-0.000684608,-0.000275864,0.0370253], - "R": [ - [0.1096717734,-0.01795980665,-0.9938055884], - [-0.007042199406,0.9997976117,-0.01884523745], - [0.9939429106,0.009065367736,0.1095231006] - ], - "t": [ - [-53.83503278], - [149.6185443], - [272.7820927] - ] - }, - { - "name": "04_03", - "type": "vga", - "resolution": [640,480], - "panel": 4, - "node": 3, - "K": [ - [742.832,0,377.499], - [0,742.665,258.984], - [0,0,1] - ], - "distCoef": [-0.312355,-0.00257413,0.000454129,0.00111055,0.151137], - "R": [ - [0.07040546321,0.04162572676,-0.9966495721], - [-0.08610880414,0.9956530214,0.03550119457], - [0.9937949208,0.08332082476,0.07368375372] - ], - "t": [ - [-50.21742462], - [111.4103034], - [280.5940976] - ] - }, - { - "name": "04_04", - "type": "vga", - "resolution": [640,480], - "panel": 4, - "node": 4, - "K": [ - [743.339,0,393.561], - [0,743.571,223.626], - [0,0,1] - ], - "distCoef": [-0.307228,-0.0295629,-0.000661125,6.4492e-05,0.183577], - "R": [ - [0.09450112049,0.05679880598,-0.993903131], - [-0.03670643306,0.9978910099,0.05353662459], - [0.9948478155,0.03142336774,0.09638670013] - ], - "t": [ - [-21.9069], - [118.1273376], - [275.8163164] - ] - }, - { - "name": "04_05", - "type": "vga", - "resolution": [640,480], - "panel": 4, - "node": 5, - "K": [ - [746.019,0,364.58], - [0,746.273,258.887], - [0,0,1] - ], - "distCoef": [-0.327759,0.0738839,0.000801649,0.000211169,0.0604088], - "R": [ - [0.135847977,0.01131634816,-0.9906650632], - [-0.049797809,0.9987488181,0.004580011864], - [0.98947739,0.04871076425,0.1362415358] - ], - "t": [ - [-12.12624478], - [90.71810202], - [278.5550143] - ] - }, - { - "name": "04_06", - "type": "vga", - "resolution": [640,480], - "panel": 4, - "node": 6, - "K": [ - [745.588,0,362.328], - [0,745.695,224.495], - [0,0,1] - ], - "distCoef": [-0.317313,0.0342325,-0.00011624,0.00140051,0.0955503], - "R": [ - [0.09768474559,0.09486669264,-0.9906856217], - [-0.08671696061,0.9924717325,0.0864871607], - [0.9914322262,0.07746076975,0.1051758999] - ], - "t": [ - [6.120914551], - [75.66522558], - [280.1538331] - ] - }, - { - "name": "04_07", - "type": "vga", - "resolution": [640,480], - "panel": 4, - "node": 7, - "K": [ - [744.949,0,374.902], - [0,744.948,218.152], - [0,0,1] - ], - "distCoef": [-0.307279,-0.0368619,-0.000928182,-0.000206153,0.214368], - "R": [ - [0.08413477249,-0.05845821559,-0.994738145], - [-0.03729096802,0.9973936317,-0.06176833509], - [0.9957563576,0.04229161317,0.08173552284] - ], - "t": [ - [3.352563309], - [99.7043349], - [277.3248716] - ] - }, - { - "name": "04_08", - "type": "vga", - "resolution": [640,480], - "panel": 4, - "node": 8, - "K": [ - [744.851,0,365.832], - [0,744.82,236.655], - [0,0,1] - ], - "distCoef": [-0.313642,0.00106915,0.000461187,-0.00049658,0.163492], - "R": [ - [0.1068294918,-0.02053293437,-0.9940653189], - [-0.04471775106,0.998675844,-0.02543386204], - [0.9932712532,0.04716945203,0.1057698462] - ], - "t": [ - [34.88142403], - [92.93282517], - [277.1804593] - ] - }, - { - "name": "04_09", - "type": "vga", - "resolution": [640,480], - "panel": 4, - "node": 9, - "K": [ - [745.947,0,354.92], - [0,745.962,217.292], - [0,0,1] - ], - "distCoef": [-0.332252,0.114802,-0.000779302,-0.000175195,-0.0220414], - "R": [ - [0.0951039165,0.01286389124,-0.99538423], - [-0.04378002227,0.9990030715,0.008727700331], - [0.9945041753,0.04274790527,0.09557228614] - ], - "t": [ - [51.3876018], - [107.4685168], - [276.8925649] - ] - }, - { - "name": "04_10", - "type": "vga", - "resolution": [640,480], - "panel": 4, - "node": 10, - "K": [ - [743.419,0,373.623], - [0,743.493,209.714], - [0,0,1] - ], - "distCoef": [-0.312784,-0.00205334,-0.00151839,-4.48796e-05,0.146707], - "R": [ - [0.07554192003,-0.02015366607,-0.996938939], - [-0.05402378201,0.9982445697,-0.02427365106], - [0.9956780852,0.05569209012,0.07432053419] - ], - "t": [ - [36.95032578], - [126.4783785], - [278.9862968] - ] - }, - { - "name": "04_11", - "type": "vga", - "resolution": [640,480], - "panel": 4, - "node": 11, - "K": [ - [743.168,0,378.723], - [0,743.196,231.359], - [0,0,1] - ], - "distCoef": [-0.312654,0.00616666,0.000125459,-0.000163635,0.137741], - "R": [ - [0.104627794,-0.01026277171,-0.994458496], - [-0.05855646041,0.9981483637,-0.01646162423], - [0.9927860624,0.05995431298,0.1038331098] - ], - "t": [ - [61.78762978], - [139.882294], - [278.0088471] - ] - }, - { - "name": "04_12", - "type": "vga", - "resolution": [640,480], - "panel": 4, - "node": 12, - "K": [ - [746.755,0,377.564], - [0,747.014,231.526], - [0,0,1] - ], - "distCoef": [-0.342661,0.169314,0.000669193,0.000564241,-0.092518], - "R": [ - [0.09069981891,0.03748374052,-0.9951726041], - [-0.02832816732,0.9989841486,0.03504548138], - [0.9954752924,0.02501279723,0.09166952704] - ], - "t": [ - [63.18640006], - [168.1511303], - [272.7093484] - ] - }, - { - "name": "04_13", - "type": "vga", - "resolution": [640,480], - "panel": 4, - "node": 13, - "K": [ - [745.766,0,371.377], - [0,745.897,229.211], - [0,0,1] - ], - "distCoef": [-0.323265,0.06437,0.000357726,0.000480753,0.061899], - "R": [ - [0.03414536791,0.03842962758,-0.9986777546], - [-0.02717943982,0.9989265658,0.03750992125], - [0.9990472321,0.02586271187,0.03515321085] - ], - "t": [ - [27.04698548], - [171.5967975], - [274.5649723] - ] - }, - { - "name": "04_14", - "type": "vga", - "resolution": [640,480], - "panel": 4, - "node": 14, - "K": [ - [744.965,0,366.266], - [0,745.319,235.632], - [0,0,1] - ], - "distCoef": [-0.317134,0.0349168,5.85303e-05,0.000379707,0.110605], - "R": [ - [0.05221731101,0.04748668842,-0.9975060736], - [0.03426805086,0.9981953182,0.04931335942], - [0.9980476207,-0.03675759989,0.05049579913] - ], - "t": [ - [31.93275734], - [208.7852536], - [260.7309393] - ] - }, - { - "name": "04_15", - "type": "vga", - "resolution": [640,480], - "panel": 4, - "node": 15, - "K": [ - [744.586,0,371.051], - [0,745.106,212.085], - [0,0,1] - ], - "distCoef": [-0.332822,0.11382,-0.000911903,0.000640183,-0.00904196], - "R": [ - [0.0693166226,0.04834029473,-0.9964228127], - [-0.01396942206,0.9987743784,0.04748258878], - [0.9974968978,0.01062811814,0.06990695264] - ], - "t": [ - [16.12425569], - [198.357827], - [269.7404532] - ] - }, - { - "name": "04_16", - "type": "vga", - "resolution": [640,480], - "panel": 4, - "node": 16, - "K": [ - [742.58,0,362.432], - [0,742.717,222.722], - [0,0,1] - ], - "distCoef": [-0.316061,0.0181932,0.000637155,-0.000119442,0.122715], - "R": [ - [0.07545496093,-0.0349426896,-0.9965367817], - [-0.03652359913,0.9986183515,-0.03778114217], - [0.9964800929,0.03924788454,0.07407447592] - ], - "t": [ - [-15.86676392], - [179.6369531], - [275.0674259] - ] - }, - { - "name": "04_17", - "type": "vga", - "resolution": [640,480], - "panel": 4, - "node": 17, - "K": [ - [745.044,0,350.241], - [0,745.211,214.104], - [0,0,1] - ], - "distCoef": [-0.330556,0.0995367,-0.000406045,-3.83783e-05,-0.00374247], - "R": [ - [0.0837025501,0.02221656332,-0.9962430965], - [-0.04478154079,0.9988252756,0.01851168242], - [0.9954840515,0.04306382584,0.08459911461] - ], - "t": [ - [-23.0620205], - [182.4550181], - [276.0013748] - ] - }, - { - "name": "04_18", - "type": "vga", - "resolution": [640,480], - "panel": 4, - "node": 18, - "K": [ - [747.543,0,399.307], - [0,747.43,229.515], - [0,0,1] - ], - "distCoef": [-0.337874,0.152604,0.000377489,0.002871,-0.0603327], - "R": [ - [0.03967719066,0.06607189882,-0.9970256891], - [-0.02383145062,0.9975901546,0.06516091958], - [0.998928317,0.02117516625,0.04115616396] - ], - "t": [ - [-45.47747339], - [181.8911988], - [269.8403328] - ] - }, - { - "name": "04_19", - "type": "vga", - "resolution": [640,480], - "panel": 4, - "node": 19, - "K": [ - [743.963,0,369.391], - [0,744.08,218.072], - [0,0,1] - ], - "distCoef": [-0.320196,0.0539371,0.000417857,0.00192962,0.0700112], - "R": [ - [0.0434323362,0.03783761887,-0.9983395949], - [-0.08481170801,0.9958149524,0.03405223652], - [0.9954499517,0.08319191804,0.04645964289] - ], - "t": [ - [-24.42650241], - [136.5925943], - [281.0885176] - ] - }, - { - "name": "04_20", - "type": "vga", - "resolution": [640,480], - "panel": 4, - "node": 20, - "K": [ - [745.858,0,356.253], - [0,746.045,207.418], - [0,0,1] - ], - "distCoef": [-0.328012,0.0801152,-7.74627e-05,-0.000454429,0.0269942], - "R": [ - [0.0976780849,0.06705669278,-0.9929563896], - [-0.1171365339,0.9915671608,0.05544004021], - [0.9883005738,0.1108961929,0.1047091699] - ], - "t": [ - [-1.775430866], - [107.2147587], - [285.054156] - ] - }, - { - "name": "04_21", - "type": "vga", - "resolution": [640,480], - "panel": 4, - "node": 21, - "K": [ - [746.156,0,369.678], - [0,746.129,226.325], - [0,0,1] - ], - "distCoef": [-0.331296,0.10434,-0.000526263,0.0017798,0.0107539], - "R": [ - [0.06864954522,0.009029787974,-0.9975999714], - [-0.09824772164,0.9951594531,0.00224680986], - [0.9927913301,0.09785768182,0.06920439997] - ], - "t": [ - [2.330018678], - [104.6606406], - [283.2576255] - ] - }, - { - "name": "04_22", - "type": "vga", - "resolution": [640,480], - "panel": 4, - "node": 22, - "K": [ - [746.305,0,363.016], - [0,746.511,222.294], - [0,0,1] - ], - "distCoef": [-0.313633,0.00103632,0.000318828,-0.000294887,0.154057], - "R": [ - [0.08441946195,-0.0784287402,-0.9933389588], - [-0.07957536672,0.9931828981,-0.08517917513], - [0.9932477614,0.08623609206,0.07760297012] - ], - "t": [ - [9.995164317], - [122.6888691], - [282.4272415] - ] - }, - { - "name": "04_23", - "type": "vga", - "resolution": [640,480], - "panel": 4, - "node": 23, - "K": [ - [745.178,0,358.539], - [0,745.299,233.674], - [0,0,1] - ], - "distCoef": [-0.315081,0.0210219,-6.99317e-06,-0.000330658,0.115227], - "R": [ - [0.1162513982,0.03935918122,-0.9924396542], - [-0.02556811677,0.999001962,0.03662446354], - [0.9928906706,0.02111716788,0.117141715] - ], - "t": [ - [32.91845612], - [159.7823772], - [272.1694603] - ] - }, - { - "name": "04_24", - "type": "vga", - "resolution": [640,480], - "panel": 4, - "node": 24, - "K": [ - [746.014,0,365.199], - [0,746.411,216.584], - [0,0,1] - ], - "distCoef": [-0.320661,0.0432533,-0.00136099,-0.000113861,0.0956118], - "R": [ - [0.1001711426,-0.0639180002,-0.9929150172], - [-0.0054812292,0.9978838124,-0.06479084071], - [0.9949551238,0.01193256733,0.09960881242] - ], - "t": [ - [-9.066812064], - [167.2144724], - [271.0944115] - ] - }, - { - "name": "05_01", - "type": "vga", - "resolution": [640,480], - "panel": 5, - "node": 1, - "K": [ - [744.506,0,379.212], - [0,745.093,221.816], - [0,0,1] - ], - "distCoef": [-0.322425,0.0503962,-0.00139268,-0.000488272,0.0792831], - "R": [ - [0.4832137358,-0.07031409603,-0.8726742883], - [-0.1214142278,0.9817563233,-0.14633218], - [0.8670427157,0.1766647942,0.465861009] - ], - "t": [ - [-31.81590772], - [187.5269902], - [291.8752718] - ] - }, - { - "name": "05_02", - "type": "vga", - "resolution": [640,480], - "panel": 5, - "node": 2, - "K": [ - [746.146,0,379.909], - [0,746.274,243.237], - [0,0,1] - ], - "distCoef": [-0.327102,0.0750235,0.00051439,0.000830868,0.0552106], - "R": [ - [0.559561068,-0.04316954181,-0.8276640634], - [-0.1711397799,0.9711012062,-0.1663539088], - [0.8109269924,0.2347314165,0.5360024022] - ], - "t": [ - [-21.47998338], - [182.028679], - [304.5116426] - ] - }, - { - "name": "05_03", - "type": "vga", - "resolution": [640,480], - "panel": 5, - "node": 3, - "K": [ - [746.598,0,366.137], - [0,746.916,245.497], - [0,0,1] - ], - "distCoef": [-0.34673,0.191883,-0.000717065,0.000142378,-0.151818], - "R": [ - [0.4493443217,0.06721032382,-0.8908268367], - [-0.2833621033,0.9563979118,-0.07077395533], - [0.8472281859,0.2842284411,0.4487968296] - ], - "t": [ - [-42.79170468], - [156.78227], - [309.5144468] - ] - }, - { - "name": "05_04", - "type": "vga", - "resolution": [640,480], - "panel": 5, - "node": 4, - "K": [ - [744.97,0,361.533], - [0,745.268,216.194], - [0,0,1] - ], - "distCoef": [-0.320215,0.0355127,-0.000935438,6.82351e-05,0.107335], - "R": [ - [0.5139859054,0.07264601249,-0.8547169391], - [-0.2477501277,0.96651576,-0.06683681477], - [0.8212419639,0.2461094116,0.5147735369] - ], - "t": [ - [-21.66847624], - [145.8563675], - [305.5618637] - ] - }, - { - "name": "05_05", - "type": "vga", - "resolution": [640,480], - "panel": 5, - "node": 5, - "K": [ - [743.904,0,367.466], - [0,744.108,216.808], - [0,0,1] - ], - "distCoef": [-0.328736,0.086922,-0.000934339,0.000214876,0.0243362], - "R": [ - [0.4889793362,0.07185582001,-0.8693307483], - [-0.2209595119,0.9743010874,-0.0437525441], - [0.8438460185,0.2134809878,0.4922903259] - ], - "t": [ - [-47.80972546], - [144.3254019], - [299.7644507] - ] - }, - { - "name": "05_06", - "type": "vga", - "resolution": [640,480], - "panel": 5, - "node": 6, - "K": [ - [745.323,0,383.952], - [0,745.526,234.808], - [0,0,1] - ], - "distCoef": [-0.334223,0.133657,-0.000107051,0.00148947,-0.0461754], - "R": [ - [0.4969854565,0.0559027949,-0.8659563116], - [-0.2018212488,0.978003949,-0.05269211703], - [0.8439630558,0.2009556001,0.4973361109] - ], - "t": [ - [-46.56558119], - [125.7186081], - [298.6423415] - ] - }, - { - "name": "05_07", - "type": "vga", - "resolution": [640,480], - "panel": 5, - "node": 7, - "K": [ - [746.158,0,356.674], - [0,746.317,240.893], - [0,0,1] - ], - "distCoef": [-0.334568,0.11153,0.000321304,-0.000871385,-0.0157856], - "R": [ - [0.5541201274,0.02610072644,-0.8320274253], - [-0.1769665492,0.9803549196,-0.08710380092], - [0.8134087072,0.1955069916,0.5478533484] - ], - "t": [ - [-14.70019562], - [115.5481293], - [299.4445791] - ] - }, - { - "name": "05_08", - "type": "vga", - "resolution": [640,480], - "panel": 5, - "node": 8, - "K": [ - [744.96,0,386.044], - [0,745.46,258.776], - [0,0,1] - ], - "distCoef": [-0.325919,0.068823,-0.000458274,0.000477805,0.0465958], - "R": [ - [0.4763065258,-0.004539644313,-0.8792675845], - [-0.1710253429,0.980409884,-0.09770768372], - [0.8624861886,0.1969158475,0.4661992314] - ], - "t": [ - [-40.46029545], - [93.91456762], - [297.4902987] - ] - }, - { - "name": "05_09", - "type": "vga", - "resolution": [640,480], - "panel": 5, - "node": 9, - "K": [ - [745.188,0,367.116], - [0,745.437,236.843], - [0,0,1] - ], - "distCoef": [-0.328194,0.058828,0.000388874,-0.00143808,0.0829656], - "R": [ - [0.5065601345,-0.04543027129,-0.8610069225], - [-0.1705921502,0.9735884993,-0.1517357977], - [0.845159836,0.2237443283,0.4854310735] - ], - "t": [ - [-16.55300824], - [76.93410209], - [300.8962768] - ] - }, - { - "name": "05_10", - "type": "vga", - "resolution": [640,480], - "panel": 5, - "node": 10, - "K": [ - [747.452,0,374.886], - [0,747.648,257.28], - [0,0,1] - ], - "distCoef": [-0.337728,0.123608,0.00138141,5.97732e-05,-0.0225942], - "R": [ - [0.4549222289,-0.02855444123,-0.8900732608], - [-0.1699899924,0.9783230281,-0.1182685721], - [0.8741562607,0.2051065493,0.4402069233] - ], - "t": [ - [-13.61854908], - [96.6157071], - [299.0141417] - ] - }, - { - "name": "05_11", - "type": "vga", - "resolution": [640,480], - "panel": 5, - "node": 11, - "K": [ - [746.39,0,405.604], - [0,746.458,241.87], - [0,0,1] - ], - "distCoef": [-0.333064,0.100943,0.000870611,0.00103156,0.0180409], - "R": [ - [0.5002384593,-0.05591048228,-0.8640807264], - [-0.1916757277,0.9660062257,-0.1734715752], - [0.8444062406,0.2524004556,0.4725167836] - ], - "t": [ - [16.55277765], - [75.44647006], - [303.7304898] - ] - }, - { - "name": "05_12", - "type": "vga", - "resolution": [640,480], - "panel": 5, - "node": 12, - "K": [ - [745.943,0,392.757], - [0,746.143,272.1], - [0,0,1] - ], - "distCoef": [-0.323245,0.0770562,0.00168738,0.000666505,0.0382015], - "R": [ - [0.5344619138,-0.0483612619,-0.8438078283], - [-0.2099054746,0.9594877737,-0.1879438847], - [0.818712498,0.277568731,0.5026583782] - ], - "t": [ - [45.5535171], - [81.37072912], - [304.8427161] - ] - }, - { - "name": "05_13", - "type": "vga", - "resolution": [640,480], - "panel": 5, - "node": 13, - "K": [ - [748.463,0,383.471], - [0,748.465,243.614], - [0,0,1] - ], - "distCoef": [-0.34071,0.149034,0.000455623,0.000254671,-0.0668973], - "R": [ - [0.550270912,-0.09726860505,-0.8293013577], - [-0.1127468592,0.975440235,-0.1892207537], - [0.82733915,0.1976238001,0.525789658] - ], - "t": [ - [34.15956958], - [127.9842494], - [295.9545727] - ] - }, - { - "name": "05_14", - "type": "vga", - "resolution": [640,480], - "panel": 5, - "node": 14, - "K": [ - [744.467,0,372.192], - [0,744.287,242.67], - [0,0,1] - ], - "distCoef": [-0.321164,0.0557106,-0.000170048,0.000249902,0.0584864], - "R": [ - [0.5607110475,-0.1151130063,-0.8199708025], - [-0.101866971,0.9731761842,-0.2062795062], - [0.8217215109,0.1991911399,0.5339444244] - ], - "t": [ - [50.41224037], - [142.3474205], - [294.74195] - ] - }, - { - "name": "05_15", - "type": "vga", - "resolution": [640,480], - "panel": 5, - "node": 15, - "K": [ - [746.542,0,352.38], - [0,746.666,240.759], - [0,0,1] - ], - "distCoef": [-0.327959,0.100036,-0.000636984,-0.00122606,-0.0366604], - "R": [ - [0.5029624145,-0.05772144518,-0.8623787128], - [-0.198700467,0.9633205664,-0.180365215], - [0.8411580909,0.262071977,0.4730447599] - ], - "t": [ - [34.04469815], - [136.31759], - [307.4406203] - ] - }, - { - "name": "05_16", - "type": "vga", - "resolution": [640,480], - "panel": 5, - "node": 16, - "K": [ - [747.042,0,371.719], - [0,747.231,244.896], - [0,0,1] - ], - "distCoef": [-0.323957,0.0675271,-0.000219383,0.00030566,0.0452733], - "R": [ - [0.5145114331,-0.105655334,-0.8509494319], - [-0.1209004538,0.9735279663,-0.1939752023], - [0.8489175846,0.2026826318,0.4881174913] - ], - "t": [ - [9.341169646], - [165.8735131], - [297.8569993] - ] - }, - { - "name": "05_17", - "type": "vga", - "resolution": [640,480], - "panel": 5, - "node": 17, - "K": [ - [745.814,0,386.675], - [0,746.085,252.153], - [0,0,1] - ], - "distCoef": [-0.320652,0.0597547,0.000647483,5.56623e-05,0.0523558], - "R": [ - [0.5123119379,-0.06682282728,-0.856195765], - [-0.1341513719,0.9785027468,-0.1566390244], - [0.8482569703,0.1951078787,0.4923342645] - ], - "t": [ - [9.076647729], - [186.6487394], - [296.0424945] - ] - }, - { - "name": "05_18", - "type": "vga", - "resolution": [640,480], - "panel": 5, - "node": 18, - "K": [ - [744.362,0,367.747], - [0,744.705,261.961], - [0,0,1] - ], - "distCoef": [-0.317525,0.0240072,0.000331,-0.000409781,0.122239], - "R": [ - [0.5214772573,-0.05602259067,-0.8514240656], - [-0.1526209796,0.9756261952,-0.1576716965], - [0.8395047985,0.2121673788,0.5002166498] - ], - "t": [ - [-2.829687906], - [192.8140289], - [298.6606918] - ] - }, - { - "name": "05_19", - "type": "vga", - "resolution": [640,480], - "panel": 5, - "node": 19, - "K": [ - [744.259,0,353.379], - [0,744.524,245.823], - [0,0,1] - ], - "distCoef": [-0.320328,0.0298824,0.00026675,-0.00161079,0.123162], - "R": [ - [0.5556726344,-0.05485450779,-0.8295896012], - [-0.2099711545,0.9562161648,-0.2038694692], - [0.8044501462,0.2874745713,0.519825291] - ], - "t": [ - [-1.476630227], - [134.2745178], - [310.4571486] - ] - }, - { - "name": "05_20", - "type": "vga", - "resolution": [640,480], - "panel": 5, - "node": 20, - "K": [ - [743.679,0,405.845], - [0,743.856,234.88], - [0,0,1] - ], - "distCoef": [-0.326644,0.0646831,0.000108119,5.73367e-05,0.058946], - "R": [ - [0.447769915,-0.01338423954,-0.894048637], - [-0.18660487,0.9764723016,-0.1080762074], - [0.8744602482,0.2152271039,0.4347373552] - ], - "t": [ - [-41.39083575], - [143.2049031], - [297.8732354] - ] - }, - { - "name": "05_21", - "type": "vga", - "resolution": [640,480], - "panel": 5, - "node": 21, - "K": [ - [746.956,0,354.763], - [0,747.081,232.068], - [0,0,1] - ], - "distCoef": [-0.333648,0.0797639,-0.000768992,-0.00091097,0.0508097], - "R": [ - [0.5053420531,-0.009379958189,-0.8628681393], - [-0.2526298673,0.9545207072,-0.1583299394], - [0.8251106347,0.2979970402,0.4799897963] - ], - "t": [ - [-19.66925616], - [96.29580053], - [309.4868577] - ] - }, - { - "name": "05_22", - "type": "vga", - "resolution": [640,480], - "panel": 5, - "node": 22, - "K": [ - [748.369,0,375.575], - [0,748.642,247.648], - [0,0,1] - ], - "distCoef": [-0.339087,0.143465,-0.000470446,0.00132222,-0.0624301], - "R": [ - [0.54260376,-0.05746408722,-0.8380209057], - [-0.1470082191,0.975763273,-0.1620944744], - [0.8270246327,0.2111490322,0.5210051277] - ], - "t": [ - [3.173863757], - [116.0988382], - [299.4207466] - ] - }, - { - "name": "05_23", - "type": "vga", - "resolution": [640,480], - "panel": 5, - "node": 23, - "K": [ - [744.544,0,368.615], - [0,744.426,281.181], - [0,0,1] - ], - "distCoef": [-0.322575,0.0664483,0.00114224,0.000391788,0.0483369], - "R": [ - [0.5347472888,-0.05715349527,-0.8430769924], - [-0.1466458645,0.9762943366,-0.1591991164], - [0.832190079,0.2087650503,0.5136894259] - ], - "t": [ - [16.7223507], - [130.5590862], - [298.5444367] - ] - }, - { - "name": "05_24", - "type": "vga", - "resolution": [640,480], - "panel": 5, - "node": 24, - "K": [ - [743.308,0,356.74], - [0,743.243,228.93], - [0,0,1] - ], - "distCoef": [-0.321093,0.0447792,0.000127467,-8.40104e-05,0.095825], - "R": [ - [0.5706235669,-0.133891243,-0.8102233519], - [-0.1678811389,0.9467635938,-0.2746900447], - [0.8038685639,0.2927658322,0.5177678046] - ], - "t": [ - [6.742844805], - [124.9131408], - [309.8640068] - ] - }, - { - "name": "06_01", - "type": "vga", - "resolution": [640,480], - "panel": 6, - "node": 1, - "K": [ - [744.518,0,344.042], - [0,744.512,240.289], - [0,0,1] - ], - "distCoef": [-0.313532,-0.0139368,0.00116047,-0.000125352,0.195046], - "R": [ - [-0.3305715804,0.1011846603,-0.9383411399], - [-0.314462461,0.9256148845,0.2105954561], - [0.8898515555,0.3646899369,-0.2741631979] - ], - "t": [ - [-23.56718534], - [104.1648487], - [320.754952] - ] - }, - { - "name": "06_02", - "type": "vga", - "resolution": [640,480], - "panel": 6, - "node": 2, - "K": [ - [748.956,0,345.566], - [0,748.875,227.82], - [0,0,1] - ], - "distCoef": [-0.335662,0.0955564,-6.0167e-05,-0.0012999,0.0278092], - "R": [ - [-0.2903396332,0.1603112194,-0.9433998147], - [-0.341086429,0.9037763758,0.2585504022], - [0.8940709957,0.3968483028,-0.2077221201] - ], - "t": [ - [-2.499901432], - [69.14355517], - [325.2941984] - ] - }, - { - "name": "06_03", - "type": "vga", - "resolution": [640,480], - "panel": 6, - "node": 3, - "K": [ - [743.901,0,369.68], - [0,743.816,251.042], - [0,0,1] - ], - "distCoef": [-0.320568,0.044977,0.000366128,-0.00033077,0.103335], - "R": [ - [-0.3123459653,0.110763308,-0.943488997], - [-0.3278062139,0.9196080197,0.216481353], - [0.891618239,0.3768986331,-0.250926954] - ], - "t": [ - [2.578346941], - [71.05917793], - [323.4074447] - ] - }, - { - "name": "06_04", - "type": "vga", - "resolution": [640,480], - "panel": 6, - "node": 4, - "K": [ - [745.814,0,378.476], - [0,745.908,222.393], - [0,0,1] - ], - "distCoef": [-0.316287,0.0251632,0.000357033,0.00145486,0.13215], - "R": [ - [-0.2756543214,0.09031338143,-0.9570048005], - [-0.3333214643,0.9248259371,0.1832860813], - [0.9016160472,0.3695138418,-0.2248288776] - ], - "t": [ - [26.15902854], - [86.10496093], - [322.4382284] - ] - }, - { - "name": "06_05", - "type": "vga", - "resolution": [640,480], - "panel": 6, - "node": 5, - "K": [ - [750.419,0,363.736], - [0,750.614,222.964], - [0,0,1] - ], - "distCoef": [-0.344753,0.14329,-0.000836382,-0.000451111,-0.060951], - "R": [ - [-0.2930259634,0.06094491301,-0.9541601031], - [-0.3875087878,0.9047544541,0.1767945619], - [0.8740553324,0.4215508218,-0.2414998562] - ], - "t": [ - [36.26889278], - [61.41890121], - [327.3260635] - ] - }, - { - "name": "06_06", - "type": "vga", - "resolution": [640,480], - "panel": 6, - "node": 6, - "K": [ - [747.394,0,354.724], - [0,747.506,211.184], - [0,0,1] - ], - "distCoef": [-0.329009,0.0921746,-0.00050966,0.000333806,0.021085], - "R": [ - [-0.2297156979,0.02557529828,-0.9729216835], - [-0.3964529538,0.9104994627,0.1175405629], - [0.888850805,0.4127185877,-0.199016617] - ], - "t": [ - [62.78312093], - [81.38139883], - [324.7093469] - ] - }, - { - "name": "06_07", - "type": "vga", - "resolution": [640,480], - "panel": 6, - "node": 7, - "K": [ - [746.623,0,374.989], - [0,746.758,209.923], - [0,0,1] - ], - "distCoef": [-0.319339,0.0433323,-0.00139256,0.000754597,0.0938733], - "R": [ - [-0.2846142448,0.03267216609,-0.9580852056], - [-0.3313740809,0.934457856,0.1303063082], - [0.8995476364,0.3545716359,-0.255133308] - ], - "t": [ - [45.81195811], - [121.7115234], - [320.8009986] - ] - }, - { - "name": "06_08", - "type": "vga", - "resolution": [640,480], - "panel": 6, - "node": 8, - "K": [ - [745.971,0,357.954], - [0,746.024,209.947], - [0,0,1] - ], - "distCoef": [-0.314348,0.0246684,-0.0014997,0.000635776,0.111152], - "R": [ - [-0.3038162213,-0.0261928812,-0.9523705354], - [-0.3441704234,0.9351353343,0.08407512184], - [0.8883931693,0.3533211563,-0.2931240987] - ], - "t": [ - [41.47715732], - [140.438376], - [322.3540865] - ] - }, - { - "name": "06_09", - "type": "vga", - "resolution": [640,480], - "panel": 6, - "node": 9, - "K": [ - [742.648,0,362.103], - [0,742.703,220.817], - [0,0,1] - ], - "distCoef": [-0.304218,-0.0643312,-0.000139411,-0.000234647,0.289172], - "R": [ - [-0.2807259034,-0.0411671215,-0.958904706], - [-0.3740921558,0.9247597922,0.06981680165], - [0.8838823599,0.3783181134,-0.2750043253] - ], - "t": [ - [37.64720227], - [153.3424109], - [325.0305142] - ] - }, - { - "name": "06_10", - "type": "vga", - "resolution": [640,480], - "panel": 6, - "node": 10, - "K": [ - [747.72,0,366.165], - [0,747.851,213.209], - [0,0,1] - ], - "distCoef": [-0.324647,0.0523798,-0.00077308,-0.000271098,0.0916616], - "R": [ - [-0.2880158499,0.02777358159,-0.957222805], - [-0.3788720768,0.9147158267,0.1405379157], - [0.8794900907,0.4031421393,-0.2529300217] - ], - "t": [ - [33.16578395], - [147.9736193], - [327.8869733] - ] - }, - { - "name": "06_11", - "type": "vga", - "resolution": [640,480], - "panel": 6, - "node": 11, - "K": [ - [745.331,0,369.444], - [0,745.587,207.732], - [0,0,1] - ], - "distCoef": [-0.317455,0.0357855,-0.00041249,0.000556817,0.0920153], - "R": [ - [-0.3142048567,0.04518634316,-0.9482792323], - [-0.3166241188,0.9366885696,0.1495449465], - [0.8949997069,0.3472358248,-0.2800050117] - ], - "t": [ - [26.61359186], - [187.9055539], - [317.8889871] - ] - }, - { - "name": "06_12", - "type": "vga", - "resolution": [640,480], - "panel": 6, - "node": 12, - "K": [ - [747.25,0,346.366], - [0,747.394,225.779], - [0,0,1] - ], - "distCoef": [-0.328454,0.0750084,3.92686e-05,0.00130952,0.0669429], - "R": [ - [-0.2993781475,0.05639323365,-0.9524665495], - [-0.3171785116,0.9355987261,0.1550897014], - [0.8998725002,0.3485323901,-0.2622110915] - ], - "t": [ - [13.58039626], - [195.4066632], - [317.2443523] - ] - }, - { - "name": "06_13", - "type": "vga", - "resolution": [640,480], - "panel": 6, - "node": 13, - "K": [ - [743.861,0,344.414], - [0,743.872,231.421], - [0,0,1] - ], - "distCoef": [-0.307564,-0.0231037,-0.000140407,-0.000635225,0.208058], - "R": [ - [-0.2583036736,0.07116007646,-0.9634393887], - [-0.3357690773,0.9284960528,0.1586007776], - [0.905835713,0.3644603181,-0.2159405881] - ], - "t": [ - [14.66480509], - [172.1699927], - [320.6722019] - ] - }, - { - "name": "06_14", - "type": "vga", - "resolution": [640,480], - "panel": 6, - "node": 14, - "K": [ - [744.949,0,378.98], - [0,744.921,225.408], - [0,0,1] - ], - "distCoef": [-0.321047,0.0567081,-0.000162218,0.000699701,0.0634367], - "R": [ - [-0.3208579847,0.07871363947,-0.9438507915], - [-0.3472646452,0.9173632389,0.1945557869], - [0.8811682132,0.3901907879,-0.267008856] - ], - "t": [ - [-45.70363788], - [100.2282059], - [322.9364507] - ] - }, - { - "name": "06_15", - "type": "vga", - "resolution": [640,480], - "panel": 6, - "node": 15, - "K": [ - [745.712,0,360.895], - [0,745.741,234.163], - [0,0,1] - ], - "distCoef": [-0.31006,-0.0103454,0.000398478,0.000813845,0.181221], - "R": [ - [-0.3227895896,0.1367774117,-0.9365355415], - [-0.3406635237,0.9063958148,0.2497898928], - [0.8830375102,0.3996730746,-0.245980058] - ], - "t": [ - [-14.93002532], - [154.0180569], - [326.396188] - ] - }, - { - "name": "06_16", - "type": "vga", - "resolution": [640,480], - "panel": 6, - "node": 16, - "K": [ - [745.931,0,372.193], - [0,746.03,212.813], - [0,0,1] - ], - "distCoef": [-0.325757,0.0830346,-0.000419051,0.00216162,0.0290765], - "R": [ - [-0.311559769,0.02363818266,-0.9499324958], - [-0.312276077,0.9416182622,0.1258518973], - [0.8974486961,0.3358515813,-0.2859887293] - ], - "t": [ - [-41.03283731], - [153.3338286], - [314.9665339] - ] - }, - { - "name": "06_17", - "type": "vga", - "resolution": [640,480], - "panel": 6, - "node": 17, - "K": [ - [744.756,0,368.403], - [0,744.752,202.816], - [0,0,1] - ], - "distCoef": [-0.313223,0.00720848,-0.00119606,0.000542174,0.130737], - "R": [ - [-0.3236003046,0.09291211415,-0.9416210394], - [-0.3175516679,0.9267842511,0.2005788875], - [0.8913157584,0.3639207207,-0.2704032691] - ], - "t": [ - [-41.098271], - [130.5289196], - [319.7107876] - ] - }, - { - "name": "06_18", - "type": "vga", - "resolution": [640,480], - "panel": 6, - "node": 18, - "K": [ - [744.889,0,373.989], - [0,745.092,230.989], - [0,0,1] - ], - "distCoef": [-0.319065,0.0283013,-0.000935078,-0.000739787,0.111424], - "R": [ - [-0.3391260928,0.0773602665,-0.9375547357], - [-0.3008220503,0.9353680392,0.1859911968], - [0.8913470633,0.3451116057,-0.2939360344] - ], - "t": [ - [-22.38901828], - [189.8595323], - [315.0907711] - ] - }, - { - "name": "06_19", - "type": "vga", - "resolution": [640,480], - "panel": 6, - "node": 19, - "K": [ - [743.21,0,358.424], - [0,743.138,251.445], - [0,0,1] - ], - "distCoef": [-0.316603,0.00648778,0.000375455,-0.000277526,0.16085], - "R": [ - [-0.34774011,0.09728469559,-0.9325301624], - [-0.3453355468,0.9113903597,0.2238548019], - [0.8716766465,0.399879107,-0.2833311204] - ], - "t": [ - [-13.32995299], - [105.9918293], - [324.8353482] - ] - }, - { - "name": "06_20", - "type": "vga", - "resolution": [640,480], - "panel": 6, - "node": 20, - "K": [ - [745.315,0,375.798], - [0,745.342,214.671], - [0,0,1] - ], - "distCoef": [-0.317661,0.021421,-0.000865931,0.000266434,0.124612], - "R": [ - [-0.2889220833,0.06736289331,-0.9549797225], - [-0.355115135,0.918816287,0.172249446], - [0.8890541438,0.3888944219,-0.2415447329] - ], - "t": [ - [16.18922492], - [101.394333], - [324.5371374] - ] - }, - { - "name": "06_21", - "type": "vga", - "resolution": [640,480], - "panel": 6, - "node": 21, - "K": [ - [743.803,0,341.335], - [0,743.805,238.935], - [0,0,1] - ], - "distCoef": [-0.305727,-0.0577903,-0.000702133,-0.00085287,0.249773], - "R": [ - [-0.2867564999,0.0564691645,-0.9563377767], - [-0.3641939053,0.9168870998,0.1633427245], - [0.8860775977,0.3951319776,-0.24235761] - ], - "t": [ - [29.77890794], - [113.785435], - [325.4988706] - ] - }, - { - "name": "06_22", - "type": "vga", - "resolution": [640,480], - "panel": 6, - "node": 22, - "K": [ - [745.285,0,373.625], - [0,745.232,235.431], - [0,0,1] - ], - "distCoef": [-0.319503,0.0483306,-0.000362012,0.00120612,0.080115], - "R": [ - [-0.3458253526,0.08893014684,-0.9340750797], - [-0.3902640321,0.8916714915,0.2293816395], - [0.8532870623,0.4438618933,-0.2736563703] - ], - "t": [ - [18.96316513], - [116.1979138], - [333.2100324] - ] - }, - { - "name": "06_23", - "type": "vga", - "resolution": [640,480], - "panel": 6, - "node": 23, - "K": [ - [744.536,0,366.592], - [0,744.501,224.531], - [0,0,1] - ], - "distCoef": [-0.312705,-0.014521,0.000375544,8.36622e-05,0.188212], - "R": [ - [-0.3181142509,0.09038767844,-0.94373375], - [-0.4081954831,0.8853909401,0.2223945386], - [0.8556750382,0.455974726,-0.2447596336] - ], - "t": [ - [6.972278595], - [119.3141773], - [334.5341124] - ] - }, - { - "name": "06_24", - "type": "vga", - "resolution": [640,480], - "panel": 6, - "node": 24, - "K": [ - [744.6,0,358.514], - [0,744.655,220.515], - [0,0,1] - ], - "distCoef": [-0.30152,-0.0573254,-0.000856409,-0.000288003,0.227002], - "R": [ - [-0.3545583501,0.05661769889,-0.9333181732], - [-0.3227337004,0.929412527,0.1789841147], - [0.8775712706,0.3646735401,-0.3112585327] - ], - "t": [ - [-25.22428756], - [139.0090865], - [319.514146] - ] - }, - { - "name": "07_01", - "type": "vga", - "resolution": [640,480], - "panel": 7, - "node": 1, - "K": [ - [745.635,0,384.154], - [0,745.75,223.733], - [0,0,1] - ], - "distCoef": [-0.328279,0.104082,-0.000872931,0.00144148,0.00404207], - "R": [ - [-0.9078071857,0.03344162453,-0.4180523547], - [0.00958043905,0.9982092569,0.05904654639], - [0.4192783428,0.049597754,-0.9065019217] - ], - "t": [ - [-23.31434773], - [152.0493649], - [282.3431498] - ] - }, - { - "name": "07_02", - "type": "vga", - "resolution": [640,480], - "panel": 7, - "node": 2, - "K": [ - [746.944,0,375.746], - [0,747.112,207.581], - [0,0,1] - ], - "distCoef": [-0.321827,0.078307,-0.00112183,4.35862e-05,0.0396046], - "R": [ - [-0.9306435439,0.005427673037,-0.3658867782], - [-0.02457764723,0.9967049447,0.07729936951], - [0.3651007167,0.08093079535,-0.9274436225] - ], - "t": [ - [-62.01828104], - [131.8151818], - [284.3018088] - ] - }, - { - "name": "07_03", - "type": "vga", - "resolution": [640,480], - "panel": 7, - "node": 3, - "K": [ - [743.881,0,383.122], - [0,743.965,237.105], - [0,0,1] - ], - "distCoef": [-0.311008,0.000325185,-0.000782967,0.00055371,0.154469], - "R": [ - [-0.9217631286,0.06528892794,-0.3822173342], - [0.03992506463,0.996464058,0.07392814261], - [0.3856925251,0.05288418425,-0.9211104924] - ], - "t": [ - [-43.22640533], - [121.5976731], - [282.3432951] - ] - }, - { - "name": "07_04", - "type": "vga", - "resolution": [640,480], - "panel": 7, - "node": 4, - "K": [ - [743.69,0,370.307], - [0,743.828,227.79], - [0,0,1] - ], - "distCoef": [-0.303025,-0.0263668,-0.000445815,0.00071591,0.180166], - "R": [ - [-0.9409979296,0.06863452498,-0.3313792366], - [0.04529042225,0.9959498431,0.07767037874], - [0.3353679682,0.05807936004,-0.9402952269] - ], - "t": [ - [-38.37277115], - [113.0266013], - [281.4230584] - ] - }, - { - "name": "07_05", - "type": "vga", - "resolution": [640,480], - "panel": 7, - "node": 5, - "K": [ - [743.998,0,375.484], - [0,744.299,220.79], - [0,0,1] - ], - "distCoef": [-0.310908,0.00595719,-5.69241e-05,0.000519591,0.131448], - "R": [ - [-0.9269484075,0.08594630429,-0.3652121064], - [0.04467826469,0.9917683984,0.1199970688], - [0.3725191305,0.09491404865,-0.9231580692] - ], - "t": [ - [-23.36597135], - [80.23534001], - [286.4206576] - ] - }, - { - "name": "07_06", - "type": "vga", - "resolution": [640,480], - "panel": 7, - "node": 6, - "K": [ - [745.602,0,379.444], - [0,745.67,224.268], - [0,0,1] - ], - "distCoef": [-0.303286,-0.0402497,-0.00132196,0.00012981,0.210105], - "R": [ - [-0.923694641,0.09319000989,-0.3716232396], - [0.04673933936,0.9901316615,0.1321163393], - [0.3802678586,0.1046657299,-0.9189349491] - ], - "t": [ - [-0.9450645075], - [68.69008136], - [287.3198917] - ] - }, - { - "name": "07_07", - "type": "vga", - "resolution": [640,480], - "panel": 7, - "node": 7, - "K": [ - [745.731,0,365.823], - [0,745.481,229.263], - [0,0,1] - ], - "distCoef": [-0.308219,-0.0231519,0.000110727,0.000180113,0.209056], - "R": [ - [-0.917494877,0.04967698427,-0.3946331815], - [0.001316203411,0.9925436367,0.1218827179], - [0.3977454189,0.1113073518,-0.9107190869] - ], - "t": [ - [18.92434207], - [79.05208738], - [288.1952445] - ] - }, - { - "name": "07_08", - "type": "vga", - "resolution": [640,480], - "panel": 7, - "node": 8, - "K": [ - [745.611,0,393.911], - [0,745.863,244.069], - [0,0,1] - ], - "distCoef": [-0.318705,0.0460564,0.000184451,0.000507881,0.0745222], - "R": [ - [-0.9083609307,0.09070031,-0.4082326216], - [0.05268537174,0.9932388068,0.1034452715], - [0.4148550001,0.07245775567,-0.9069979066] - ], - "t": [ - [48.31394514], - [81.42535523], - [283.8217571] - ] - }, - { - "name": "07_09", - "type": "vga", - "resolution": [640,480], - "panel": 7, - "node": 9, - "K": [ - [745.77,0,370.33], - [0,746.047,217.48], - [0,0,1] - ], - "distCoef": [-0.321786,0.069205,4.67533e-05,5.58471e-05,0.0372207], - "R": [ - [-0.9211612824,0.007939579541,-0.3891000576], - [-0.02433705705,0.996659961,0.07795274024], - [0.3884193603,0.08127659646,-0.9178913418] - ], - "t": [ - [49.65486911], - [97.0413663], - [285.6851525] - ] - }, - { - "name": "07_10", - "type": "vga", - "resolution": [640,480], - "panel": 7, - "node": 10, - "K": [ - [744.504,0,363.969], - [0,744.833,247.068], - [0,0,1] - ], - "distCoef": [-0.335916,0.144192,-0.000823922,-0.000462503,-0.076361], - "R": [ - [-0.9225918644,-0.01579725191,-0.3854538864], - [-0.05416624958,0.9945677902,0.08888716518], - [0.381955847,0.1028851669,-0.9184358297] - ], - "t": [ - [40.86826856], - [113.0714764], - [288.4804376] - ] - }, - { - "name": "07_11", - "type": "vga", - "resolution": [640,480], - "panel": 7, - "node": 11, - "K": [ - [744.999,0,387.199], - [0,745.384,239.21], - [0,0,1] - ], - "distCoef": [-0.313806,0.0330336,-7.01628e-05,0.00132279,0.0985619], - "R": [ - [-0.9109471902,-0.006922747781,-0.4124648981], - [-0.04540685091,0.9954664163,0.08357530662], - [0.4100163832,0.09486142287,-0.9071316751] - ], - "t": [ - [65.64483344], - [130.0336458], - [285.8729547] - ] - }, - { - "name": "07_12", - "type": "vga", - "resolution": [640,480], - "panel": 7, - "node": 12, - "K": [ - [743.664,0,350.646], - [0,743.861,222.503], - [0,0,1] - ], - "distCoef": [-0.300623,-0.0667329,-0.000394627,-0.00107967,0.272621], - "R": [ - [-0.9268683851,0.02536908581,-0.3745282449], - [0.006256924582,0.9986192343,0.0521581796], - [0.3753343145,0.04600037271,-0.9257473295] - ], - "t": [ - [57.10937388], - [163.0891099], - [280.8513179] - ] - }, - { - "name": "07_13", - "type": "vga", - "resolution": [640,480], - "panel": 7, - "node": 13, - "K": [ - [744.176,0,390.977], - [0,744.332,246.666], - [0,0,1] - ], - "distCoef": [-0.327257,0.10216,-0.000582688,0.00201022,0.0126373], - "R": [ - [-0.9290120658,-0.01909429991,-0.3695564765], - [-0.04453762663,0.9971777882,0.06043888335], - [0.3673594716,0.07260762025,-0.9272406117] - ], - "t": [ - [26.5211548], - [160.1280328], - [285.2494721] - ] - }, - { - "name": "07_14", - "type": "vga", - "resolution": [640,480], - "panel": 7, - "node": 14, - "K": [ - [744.044,0,360.721], - [0,744.333,226.474], - [0,0,1] - ], - "distCoef": [-0.311296,-0.00746755,-0.00165304,-0.000168766,0.17966], - "R": [ - [-0.9305033137,0.06302128148,-0.3608211486], - [0.03165130136,0.9952368859,0.09220485899], - [0.3649133847,0.07437646791,-0.9280659258] - ], - "t": [ - [37.8814582], - [178.0304645], - [285.6034633] - ] - }, - { - "name": "07_15", - "type": "vga", - "resolution": [640,480], - "panel": 7, - "node": 15, - "K": [ - [744.03,0,362.147], - [0,744.447,229.329], - [0,0,1] - ], - "distCoef": [-0.314413,0.0379836,-0.000745365,2.01034e-05,0.0898919], - "R": [ - [-0.9265853662,0.03975182478,-0.373977742], - [0.01411888978,0.9973739765,0.07103385017], - [0.3758193929,0.06053877555,-0.9247133829] - ], - "t": [ - [16.14446289], - [185.021862], - [282.5666312] - ] - }, - { - "name": "07_16", - "type": "vga", - "resolution": [640,480], - "panel": 7, - "node": 16, - "K": [ - [743.673,0,368.897], - [0,743.962,238.378], - [0,0,1] - ], - "distCoef": [-0.314216,0.0200058,-0.0002257,-0.000345788,0.11969], - "R": [ - [-0.9350006114,0.024774913,-0.3537796777], - [-0.006073372197,0.9962920776,0.08582080369], - [0.354594093,0.08239113958,-0.9313832344] - ], - "t": [ - [-10.51100446], - [168.6528502], - [285.9762696] - ] - }, - { - "name": "07_17", - "type": "vga", - "resolution": [640,480], - "panel": 7, - "node": 17, - "K": [ - [744.686,0,385.346], - [0,745.049,227.767], - [0,0,1] - ], - "distCoef": [-0.317176,0.0455424,-0.000136917,0.000534438,0.0739505], - "R": [ - [-0.908638426,0.05327873405,-0.4141709639], - [0.04010861029,0.9983767379,0.04043746577], - [0.4156531128,0.02013121347,-0.9093004036] - ], - "t": [ - [-7.322164421], - [189.4505625], - [275.8940033] - ] - }, - { - "name": "07_18", - "type": "vga", - "resolution": [640,480], - "panel": 7, - "node": 18, - "K": [ - [746.282,0,378.432], - [0,746.624,237.775], - [0,0,1] - ], - "distCoef": [-0.320382,0.058651,0.000451819,0.000534403,0.062414], - "R": [ - [-0.916555331,0.01769811564,-0.3995160846], - [-0.01470055472,0.9968539618,0.07788499561], - [0.3996376094,0.077259016,-0.9134116408] - ], - "t": [ - [-37.37478029], - [164.0712496], - [285.8486829] - ] - }, - { - "name": "07_19", - "type": "vga", - "resolution": [640,480], - "panel": 7, - "node": 19, - "K": [ - [743.687,0,374.362], - [0,743.883,225.048], - [0,0,1] - ], - "distCoef": [-0.322503,0.0715253,7.77555e-05,0.000517375,0.0539586], - "R": [ - [-0.9239544056,0.01616424802,-0.3821609261], - [-0.020576852,0.9955594902,0.09185801365], - [0.3819487525,0.09273628522,-0.9195189677] - ], - "t": [ - [-17.14443298], - [133.4982453], - [287.2304165] - ] - }, - { - "name": "07_20", - "type": "vga", - "resolution": [640,480], - "panel": 7, - "node": 20, - "K": [ - [745.801,0,368.555], - [0,746.033,233.687], - [0,0,1] - ], - "distCoef": [-0.317685,0.0475287,-3.52395e-05,0.000512076,0.0805211], - "R": [ - [-0.9241543321,-0.01069440692,-0.3818696113], - [-0.04324692472,0.9961108974,0.076764468], - [0.3795635307,0.08745690199,-0.9210227014] - ], - "t": [ - [-16.56758847], - [113.8864258], - [286.5218078] - ] - }, - { - "name": "07_21", - "type": "vga", - "resolution": [640,480], - "panel": 7, - "node": 21, - "K": [ - [744.1,0,390.405], - [0,744.284,237.593], - [0,0,1] - ], - "distCoef": [-0.322514,0.0588182,0.000321804,0.00147162,0.0689104], - "R": [ - [-0.9369369296,0.006948104691,-0.3494294118], - [-0.02026391849,0.9970404822,0.07415962808], - [0.3489105381,0.07656370335,-0.9340232522] - ], - "t": [ - [-3.618393153], - [111.1940513], - [285.5030449] - ] - }, - { - "name": "07_22", - "type": "vga", - "resolution": [640,480], - "panel": 7, - "node": 22, - "K": [ - [747.001,0,381.032], - [0,747.132,234.437], - [0,0,1] - ], - "distCoef": [-0.324882,0.0577225,-0.00134011,-0.00135265,0.0819201], - "R": [ - [-0.9282296861,0.06047570579,-0.3670590401], - [0.02337036389,0.9942284933,0.1047068731], - [0.3712727784,0.08861372459,-0.9242857414] - ], - "t": [ - [25.6408869], - [119.8980517], - [286.9452799] - ] - }, - { - "name": "07_23", - "type": "vga", - "resolution": [640,480], - "panel": 7, - "node": 23, - "K": [ - [743.981,0,363.51], - [0,744.339,258.582], - [0,0,1] - ], - "distCoef": [-0.313768,0.0101513,0.00111395,-0.00104272,0.1345], - "R": [ - [-0.9138255678,-0.001018785166,-0.4061056435], - [-0.03060482875,0.9973259054,0.06636552484], - [0.4049520663,0.0730753071,-0.9114130916] - ], - "t": [ - [24.3580015], - [146.5427691], - [284.2261849] - ] - }, - { - "name": "07_24", - "type": "vga", - "resolution": [640,480], - "panel": 7, - "node": 24, - "K": [ - [744.847,0,398.685], - [0,745.01,270.264], - [0,0,1] - ], - "distCoef": [-0.328511,0.106892,0.000179407,0.00152869,-0.00291861], - "R": [ - [-0.915939158,0.01937877811,-0.4008490012], - [-0.01852012751,0.9957282098,0.09045627137], - [0.4008895904,0.09027621565,-0.9116675607] - ], - "t": [ - [6.147743662], - [145.7157982], - [287.1579534] - ] - }, - { - "name": "08_01", - "type": "vga", - "resolution": [640,480], - "panel": 8, - "node": 1, - "K": [ - [743.703,0,360.221], - [0,744.108,227.682], - [0,0,1] - ], - "distCoef": [-0.309411,-0.0239561,-0.001159,0.000249551,0.191643], - "R": [ - [-0.6256262875,-0.004424555618,-0.7801103586], - [-0.1745259617,0.9754325172,0.134432485], - [0.7603502068,0.2202540071,-0.6110284243] - ], - "t": [ - [5.656398722], - [175.9817187], - [302.7764948] - ] - }, - { - "name": "08_02", - "type": "vga", - "resolution": [640,480], - "panel": 8, - "node": 2, - "K": [ - [747.203,0,376.344], - [0,747.435,209.923], - [0,0,1] - ], - "distCoef": [-0.331616,0.11313,4.7739e-05,0.00134479,-0.0154118], - "R": [ - [-0.6724252099,0.1092176997,-0.7320627235], - [-0.09964199407,0.9666926758,0.2357472025], - [0.7334274403,0.2314665517,-0.6391458561] - ], - "t": [ - [-0.9742570867], - [185.4525058], - [305.0714088] - ] - }, - { - "name": "08_03", - "type": "vga", - "resolution": [640,480], - "panel": 8, - "node": 3, - "K": [ - [747.234,0,368.091], - [0,747.404,224.293], - [0,0,1] - ], - "distCoef": [-0.329137,0.0905459,-0.000565165,-0.000329878,0.0231933], - "R": [ - [-0.656899377,0.0205246652,-0.7536988435], - [-0.2005757989,0.9588523348,0.2009267253], - [0.7268098496,0.2831623883,-0.6257527502] - ], - "t": [ - [-32.7353206], - [153.4285774], - [313.8994992] - ] - }, - { - "name": "08_04", - "type": "vga", - "resolution": [640,480], - "panel": 8, - "node": 4, - "K": [ - [747.386,0,362.788], - [0,747.713,235.953], - [0,0,1] - ], - "distCoef": [-0.341304,0.154379,-0.000777774,-0.000654564,-0.0867958], - "R": [ - [-0.6631685233,0.06657565756,-0.7455033143], - [-0.1433461882,0.9663011288,0.2138083224], - [0.7346151238,0.2486560079,-0.6312771259] - ], - "t": [ - [-22.98714967], - [144.6795235], - [307.788251] - ] - }, - { - "name": "08_05", - "type": "vga", - "resolution": [640,480], - "panel": 8, - "node": 5, - "K": [ - [745.746,0,376.748], - [0,745.752,233.642], - [0,0,1] - ], - "distCoef": [-0.32088,0.0642866,0.000720856,0.00118823,0.0489989], - "R": [ - [-0.6568191598,0.04935682433,-0.7524310568], - [-0.1452125328,0.970898021,0.19044777], - [0.7399337211,0.2343521638,-0.6305371929] - ], - "t": [ - [-42.15667108], - [135.9397275], - [306.138018] - ] - }, - { - "name": "08_06", - "type": "vga", - "resolution": [640,480], - "panel": 8, - "node": 6, - "K": [ - [743.581,0,359.642], - [0,743.625,223.766], - [0,0,1] - ], - "distCoef": [-0.309434,-0.0145066,-0.000137344,-0.000208072,0.169515], - "R": [ - [-0.6714433509,-0.01781555577,-0.7408417054], - [-0.2359597182,0.9528188479,0.1909430659], - [0.7024861834,0.3030162521,-0.6439676336] - ], - "t": [ - [-57.25895983], - [89.79547495], - [311.6502108] - ] - }, - { - "name": "08_07", - "type": "vga", - "resolution": [640,480], - "panel": 8, - "node": 7, - "K": [ - [745.148,0,371.237], - [0,745.103,220.621], - [0,0,1] - ], - "distCoef": [-0.318768,0.034703,-0.000217256,0.000447556,0.0954449], - "R": [ - [-0.7012843801,0.01049644172,-0.7128043511], - [-0.1276034542,0.9818947595,0.1400001421], - [0.7013683602,0.1891362102,-0.6872480755] - ], - "t": [ - [-43.70728874], - [118.2041714], - [298.0588141] - ] - }, - { - "name": "08_08", - "type": "vga", - "resolution": [640,480], - "panel": 8, - "node": 8, - "K": [ - [743.06,0,391.891], - [0,743.237,230.861], - [0,0,1] - ], - "distCoef": [-0.322908,0.0553375,0.000339696,0.00130059,0.0777268], - "R": [ - [-0.6299217379,0.07604043096,-0.7729272003], - [-0.1362742651,0.9689348188,0.2063846932], - [0.7646096578,0.2353362908,-0.5999907511] - ], - "t": [ - [-3.915515028], - [82.19520224], - [306.2551203] - ] - }, - { - "name": "08_09", - "type": "vga", - "resolution": [640,480], - "panel": 8, - "node": 9, - "K": [ - [746.456,0,356.955], - [0,746.592,233.352], - [0,0,1] - ], - "distCoef": [-0.320498,0.0507213,0.000550471,0.000126643,0.0741224], - "R": [ - [-0.684872543,0.06612723284,-0.7256561093], - [-0.09767122593,0.9785553778,0.1813551881], - [0.7220872049,0.1950809107,-0.6637269822] - ], - "t": [ - [-6.194765679], - [87.40737989], - [301.7039487] - ] - }, - { - "name": "08_10", - "type": "vga", - "resolution": [640,480], - "panel": 8, - "node": 10, - "K": [ - [747.33,0,361.528], - [0,747.71,220.883], - [0,0,1] - ], - "distCoef": [-0.322455,0.0389243,0.00118705,0.000768992,0.12227], - "R": [ - [-0.6055801648,0.01225702185,-0.7956899079], - [-0.1760343759,0.973047512,0.1489645524], - [0.7760699469,0.2302787546,-0.5871006154] - ], - "t": [ - [32.64204154], - [89.24589085], - [303.2777117] - ] - }, - { - "name": "08_11", - "type": "vga", - "resolution": [640,480], - "panel": 8, - "node": 11, - "K": [ - [747.774,0,350.264], - [0,747.981,233.163], - [0,0,1] - ], - "distCoef": [-0.312094,-0.0263709,0.00148203,-0.000526901,0.233175], - "R": [ - [-0.6738094891,0.06987822761,-0.7355935058], - [-0.1142917175,0.9736808734,0.1971876265], - [0.730012449,0.216939139,-0.6480889092] - ], - "t": [ - [35.79986479], - [83.7107121], - [303.8218457] - ] - }, - { - "name": "08_12", - "type": "vga", - "resolution": [640,480], - "panel": 8, - "node": 12, - "K": [ - [744.899,0,366.47], - [0,744.848,222.726], - [0,0,1] - ], - "distCoef": [-0.30396,-0.0418844,-0.00058576,-0.000160605,0.231689], - "R": [ - [-0.6160341517,-0.01803679921,-0.7875129191], - [-0.1884772348,0.9740736778,0.1251271436], - [0.7648387123,0.2255108512,-0.6034621779] - ], - "t": [ - [61.57356311], - [97.36793025], - [301.4047959] - ] - }, - { - "name": "08_13", - "type": "vga", - "resolution": [640,480], - "panel": 8, - "node": 13, - "K": [ - [746.859,0,368.586], - [0,747.139,224.684], - [0,0,1] - ], - "distCoef": [-0.318047,0.0428323,-0.000551709,0.000692584,0.0895927], - "R": [ - [-0.6485099772,-0.04236983322,-0.7600260566], - [-0.2235198928,0.9650338886,0.1369249841], - [0.7276494121,0.258678161,-0.6353046057] - ], - "t": [ - [38.13208236], - [106.9572182], - [307.8393222] - ] - }, - { - "name": "08_14", - "type": "vga", - "resolution": [640,480], - "panel": 8, - "node": 14, - "K": [ - [744.505,0,357.32], - [0,744.53,228.165], - [0,0,1] - ], - "distCoef": [-0.303025,-0.0702212,0.000533599,-0.000753966,0.269146], - "R": [ - [-0.6825611814,-0.04644305139,-0.729351271], - [-0.1871280484,0.9758162042,0.1129859684], - [0.7064653757,0.213601916,-0.6747450588] - ], - "t": [ - [41.82592662], - [132.5834032], - [304.3020009] - ] - }, - { - "name": "08_15", - "type": "vga", - "resolution": [640,480], - "panel": 8, - "node": 15, - "K": [ - [745.837,0,357.73], - [0,745.88,221.629], - [0,0,1] - ], - "distCoef": [-0.3197,0.0439542,-0.00136466,0.00170195,0.109142], - "R": [ - [-0.6069626381,-0.02117938565,-0.7944481037], - [-0.2107505505,0.968144583,0.1352045554], - [0.7662770787,0.2494944888,-0.5920911574] - ], - "t": [ - [64.87618524], - [141.1933336], - [303.6799609] - ] - }, - { - "name": "08_16", - "type": "vga", - "resolution": [640,480], - "panel": 8, - "node": 16, - "K": [ - [744.767,0,345.102], - [0,744.781,229.581], - [0,0,1] - ], - "distCoef": [-0.307131,-0.033453,0.0002274,-0.000565369,0.224073], - "R": [ - [-0.6350262321,-0.03398669713,-0.7717425665], - [-0.2527580664,0.9531820242,0.1660041824], - [0.7299692079,0.3004811693,-0.6138860012] - ], - "t": [ - [34.611726], - [134.434862], - [314.3473002] - ] - }, - { - "name": "08_17", - "type": "vga", - "resolution": [640,480], - "panel": 8, - "node": 17, - "K": [ - [743.543,0,370.548], - [0,743.847,224.118], - [0,0,1] - ], - "distCoef": [-0.308645,-0.0111516,9.80345e-05,-0.000744439,0.160705], - "R": [ - [-0.6124225565,-0.05791042639,-0.7884066177], - [-0.1936876385,0.977907652,0.07862393367], - [0.7664357188,0.2008556864,-0.610109238] - ], - "t": [ - [28.62018644], - [186.6213498], - [297.6164741] - ] - }, - { - "name": "08_18", - "type": "vga", - "resolution": [640,480], - "panel": 8, - "node": 18, - "K": [ - [743.39,0,376.249], - [0,743.751,216.723], - [0,0,1] - ], - "distCoef": [-0.319375,0.0602092,-1.05699e-05,0.00110696,0.0487054], - "R": [ - [-0.6887185447,0.08181736584,-0.720397588], - [-0.1043667464,0.9720764384,0.2101784484], - [0.7174777686,0.2199393475,-0.6609480577] - ], - "t": [ - [20.48604056], - [189.7333893], - [302.8177068] - ] - }, - { - "name": "08_19", - "type": "vga", - "resolution": [640,480], - "panel": 8, - "node": 19, - "K": [ - [747.038,0,360.923], - [0,747.259,204.023], - [0,0,1] - ], - "distCoef": [-0.32724,0.0825647,-0.000697091,0.000733699,0.0397455], - "R": [ - [-0.6726100217,0.03848005322,-0.7389959704], - [-0.1487286588,0.9712392562,0.1859411014], - [0.7248969201,0.2349757278,-0.6475421705] - ], - "t": [ - [3.177324598], - [151.0352965], - [305.3818706] - ] - }, - { - "name": "08_20", - "type": "vga", - "resolution": [640,480], - "panel": 8, - "node": 20, - "K": [ - [747.914,0,388.693], - [0,747.835,242.83], - [0,0,1] - ], - "distCoef": [-0.338429,0.134609,0.00136964,0.000561914,-0.0365273], - "R": [ - [-0.6685313457,0.02780025068,-0.7431641715], - [-0.1765857142,0.9647874561,0.194942684], - [0.722414926,0.2615574708,-0.6400815293] - ], - "t": [ - [-14.15175066], - [129.456494], - [308.9585645] - ] - }, - { - "name": "08_21", - "type": "vga", - "resolution": [640,480], - "panel": 8, - "node": 21, - "K": [ - [746.296,0,369.274], - [0,746.424,219.198], - [0,0,1] - ], - "distCoef": [-0.312598,-0.010091,-0.000298989,-0.000771876,0.160922], - "R": [ - [-0.6341455554,-0.01222382885,-0.7731170626], - [-0.1896201401,0.9718007188,0.1401697733], - [0.7496023059,0.2354866044,-0.6185809907] - ], - "t": [ - [-6.414673774], - [116.5175191], - [305.5663378] - ] - }, - { - "name": "08_22", - "type": "vga", - "resolution": [640,480], - "panel": 8, - "node": 22, - "K": [ - [743.609,0,361.562], - [0,743.794,221.87], - [0,0,1] - ], - "distCoef": [-0.314273,0.00142644,4.14402e-05,0.000150079,0.159707], - "R": [ - [-0.6552794634,-0.0176584532,-0.7551801135], - [-0.2007508014,0.9678470127,0.1515627784], - [0.7282224527,0.2509189891,-0.6377552198] - ], - "t": [ - [4.541098798], - [103.6271831], - [307.0310837] - ] - }, - { - "name": "08_23", - "type": "vga", - "resolution": [640,480], - "panel": 8, - "node": 23, - "K": [ - [748.435,0,354.117], - [0,748.457,219.552], - [0,0,1] - ], - "distCoef": [-0.324308,0.0627041,-0.000215295,-0.000444561,0.0758056], - "R": [ - [-0.6485698923,-0.03356212054,-0.7604148071], - [-0.2015811272,0.9709293787,0.1290782349], - [0.733976937,0.2370015309,-0.6364810526] - ], - "t": [ - [20.56445448], - [121.4098798], - [305.3725739] - ] - }, - { - "name": "08_24", - "type": "vga", - "resolution": [640,480], - "panel": 8, - "node": 24, - "K": [ - [745.572,0,350.678], - [0,745.729,218.826], - [0,0,1] - ], - "distCoef": [-0.313081,0.00890587,-0.000465969,-0.00023462,0.141032], - "R": [ - [-0.6716141,0.00283216084,-0.7408957278], - [-0.1390702972,0.9817365211,0.1298185488], - [0.7277320613,0.1902245569,-0.6589542206] - ], - "t": [ - [13.95231346], - [154.9907046], - [298.6967118] - ] - }, - { - "name": "09_01", - "type": "vga", - "resolution": [640,480], - "panel": 9, - "node": 1, - "K": [ - [745.377,0,383.314], - [0,745.581,229.65], - [0,0,1] - ], - "distCoef": [-0.311824,0.0113225,-0.000890232,0.000288511,0.13186], - "R": [ - [-0.9888207636,0.1490770148,-0.003088867539], - [0.1339941062,0.8974831076,0.420201917], - [0.06541465384,0.4150904904,-0.9074253732] - ], - "t": [ - [-5.5065201], - [83.70733211], - [330.6651976] - ] - }, - { - "name": "09_02", - "type": "vga", - "resolution": [640,480], - "panel": 9, - "node": 2, - "K": [ - [745.133,0,380.598], - [0,746.347,248.499], - [0,0,1] - ], - "distCoef": [-0.340543,0.0603048,-0.00219925,-0.00194065,0.128165], - "R": [ - [-0.9728033822,0.2090533065,0.09975116351], - [0.2316107347,0.8720009628,0.4312433055], - [0.003169728315,0.4426183864,-0.8967044758] - ], - "t": [ - [-23.76195567], - [58.26386366], - [329.69794] - ] - }, - { - "name": "09_03", - "type": "vga", - "resolution": [640,480], - "panel": 9, - "node": 3, - "K": [ - [745.787,0,382.41], - [0,745.973,216.203], - [0,0,1] - ], - "distCoef": [-0.309439,0.00115788,-0.000439278,0.00154239,0.140783], - "R": [ - [-0.995096801,0.09728424012,-0.01783629191], - [0.08253738581,0.9161639792,0.3922131349], - [0.05449712496,0.3888178749,-0.9197014317] - ], - "t": [ - [6.72584843], - [65.39953055], - [327.4514754] - ] - }, - { - "name": "09_04", - "type": "vga", - "resolution": [640,480], - "panel": 9, - "node": 4, - "K": [ - [744.782,0,384.335], - [0,745.051,230.833], - [0,0,1] - ], - "distCoef": [-0.319171,0.0452003,0.000841339,0.00114337,0.0902557], - "R": [ - [-0.9962766095,0.08536470964,0.01207409478], - [0.0830687393,0.9129812009,0.3994557689], - [0.02307600417,0.3989714189,-0.9166729542] - ], - "t": [ - [12.91980994], - [75.72355875], - [328.4117918] - ] - }, - { - "name": "09_05", - "type": "vga", - "resolution": [640,480], - "panel": 9, - "node": 5, - "K": [ - [745.938,0,386.124], - [0,746.151,234.663], - [0,0,1] - ], - "distCoef": [-0.322825,0.0563734,0.000659785,0.00216478,0.0846192], - "R": [ - [-0.9996885429,0.02460566921,0.004168718214], - [0.02372582958,0.8852416043,0.464525981], - [0.007739649829,0.4644802074,-0.8855496794] - ], - "t": [ - [23.79490616], - [45.57973364], - [333.4360246] - ] - }, - { - "name": "09_06", - "type": "vga", - "resolution": [640,480], - "panel": 9, - "node": 6, - "K": [ - [745.533,0,376.456], - [0,745.938,237.583], - [0,0,1] - ], - "distCoef": [-0.324418,0.0645728,-2.52302e-05,0.000695669,0.0784542], - "R": [ - [-0.9996292032,0.0242501169,-0.01238498622], - [0.01720849374,0.9151046106,0.4028491273], - [0.02110269642,0.4024866252,-0.9151826008] - ], - "t": [ - [44.50201086], - [83.15135806], - [329.4460526] - ] - }, - { - "name": "09_07", - "type": "vga", - "resolution": [640,480], - "panel": 9, - "node": 7, - "K": [ - [745.538,0,357.165], - [0,745.859,222.198], - [0,0,1] - ], - "distCoef": [-0.30448,-0.0356601,-0.000261684,-0.000249049,0.226264], - "R": [ - [-0.9994703128,-0.005373675551,-0.03209699996], - [-0.01769948118,0.9174086112,0.3975527241], - [0.02730974481,0.3979102457,-0.9170177829] - ], - "t": [ - [39.28939518], - [107.3778293], - [329.1138759] - ] - }, - { - "name": "09_08", - "type": "vga", - "resolution": [640,480], - "panel": 9, - "node": 8, - "K": [ - [746.393,0,361.584], - [0,746.73,220.937], - [0,0,1] - ], - "distCoef": [-0.31726,0.0513551,0.000643529,-0.000795525,0.0635312], - "R": [ - [-0.9973050313,-0.005865573042,-0.0731318648], - [-0.03181904441,0.9327538711,0.3591068981], - [0.06610766226,0.3604661023,-0.9304267656] - ], - "t": [ - [64.05594666], - [137.6750859], - [322.0323762] - ] - }, - { - "name": "09_09", - "type": "vga", - "resolution": [640,480], - "panel": 9, - "node": 9, - "K": [ - [750.271,0,344.156], - [0,750.817,228.346], - [0,0,1] - ], - "distCoef": [-0.379154,0.391779,0.000225814,-0.000528714,-0.53339], - "R": [ - [-0.9991212371,-0.002089946585,-0.04186150665], - [-0.01685937738,0.9344344151,0.355735977], - [0.03837336329,0.3561291283,-0.933648504] - ], - "t": [ - [51.49527243], - [159.1149955], - [322.66132] - ] - }, - { - "name": "09_10", - "type": "vga", - "resolution": [640,480], - "panel": 9, - "node": 10, - "K": [ - [744.897,0,366.998], - [0,745.389,227.752], - [0,0,1] - ], - "distCoef": [-0.317307,0.0499201,-0.000255849,-0.000414203,0.0689696], - "R": [ - [-0.9956077306,0.03830608065,-0.08542769468], - [0.005132094192,0.9334237661,0.3587390896], - [0.093482129,0.3567249879,-0.9295205079] - ], - "t": [ - [51.9897871], - [163.3127669], - [320.2676037] - ] - }, - { - "name": "09_11", - "type": "vga", - "resolution": [640,480], - "panel": 9, - "node": 11, - "K": [ - [745.812,0,365.568], - [0,746.463,243.927], - [0,0,1] - ], - "distCoef": [-0.334591,0.135033,-0.000586766,0.000648781,-0.0516408], - "R": [ - [-0.998272905,0.02856351314,-0.05133549401], - [0.007150624435,0.926422355,0.3764179707], - [0.05831016891,0.3754007803,-0.9250265825] - ], - "t": [ - [35.7749059], - [177.7642897], - [325.0135255] - ] - }, - { - "name": "09_12", - "type": "vga", - "resolution": [640,480], - "panel": 9, - "node": 12, - "K": [ - [743.195,0,380.908], - [0,743.577,227.789], - [0,0,1] - ], - "distCoef": [-0.308886,-0.0148964,-0.00146189,1.64512e-05,0.167268], - "R": [ - [-0.9994731762,0.02727182579,0.01759595347], - [0.03184982914,0.9284235071,0.3701558858], - [-0.006241669996,0.370521307,-0.9288029945] - ], - "t": [ - [-0.9618436208], - [187.4005014], - [324.424529] - ] - }, - { - "name": "09_13", - "type": "vga", - "resolution": [640,480], - "panel": 9, - "node": 13, - "K": [ - [745.52,0,396.637], - [0,745.641,231.295], - [0,0,1] - ], - "distCoef": [-0.327971,0.0908214,-0.00010844,0.00165709,0.0286999], - "R": [ - [-0.9916965419,0.1263943494,0.02371575794], - [0.1244737261,0.8970729317,0.4239887342], - [0.03231501572,0.4234201503,-0.9053568998] - ], - "t": [ - [12.62306638], - [150.537484], - [333.7640249] - ] - }, - { - "name": "09_14", - "type": "vga", - "resolution": [640,480], - "panel": 9, - "node": 14, - "K": [ - [744.91,0,372.463], - [0,744.965,226.423], - [0,0,1] - ], - "distCoef": [-0.308854,-0.0214085,8.99951e-05,0.000256405,0.180188], - "R": [ - [-0.9924146786,0.1180105859,0.03444716585], - [0.1215225705,0.8993517426,0.4199984619], - [0.01858414592,0.4209987468,-0.9068708203] - ], - "t": [ - [-10.68067405], - [162.2988485], - [333.0026074] - ] - }, - { - "name": "09_15", - "type": "vga", - "resolution": [640,480], - "panel": 9, - "node": 15, - "K": [ - [747.246,0,368.718], - [0,747.604,232.745], - [0,0,1] - ], - "distCoef": [-0.3413,0.139342,-0.00187439,-0.000934376,-0.0485015], - "R": [ - [-0.9858543141,0.1593536378,0.05193928607], - [0.1663907088,0.8933064559,0.4175137217], - [0.02013463084,0.4202499184,-0.9071849882] - ], - "t": [ - [-16.61956214], - [147.1949584], - [331.9981158] - ] - }, - { - "name": "09_16", - "type": "vga", - "resolution": [640,480], - "panel": 9, - "node": 16, - "K": [ - [743.705,0,367.288], - [0,743.835,246.124], - [0,0,1] - ], - "distCoef": [-0.316616,0.0215265,-3.02132e-05,0.000242548,0.131229], - "R": [ - [-0.9974602961,0.07055123587,0.009771425173], - [0.06902048446,0.9235857212,0.3771280794], - [0.01758210332,0.3768447143,-0.9261095675] - ], - "t": [ - [-30.73982653], - [139.9628037], - [324.9351286] - ] - }, - { - "name": "09_17", - "type": "vga", - "resolution": [640,480], - "panel": 9, - "node": 17, - "K": [ - [742.776,0,376.251], - [0,742.956,242.934], - [0,0,1] - ], - "distCoef": [-0.317736,0.0249159,0.000195501,0.000659428,0.110976], - "R": [ - [-0.9810894361,0.1806813104,0.06941024814], - [0.1934432758,0.9031273242,0.3833284952], - [0.006574003146,0.389506483,-0.9210002618] - ], - "t": [ - [-32.91453507], - [125.2651482], - [325.9500645] - ] - }, - { - "name": "09_18", - "type": "vga", - "resolution": [640,480], - "panel": 9, - "node": 18, - "K": [ - [744.563,0,383.579], - [0,744.554,245.613], - [0,0,1] - ], - "distCoef": [-0.324188,0.0688729,0.000784842,0.000316148,0.0548859], - "R": [ - [-0.970594512,0.2257141743,0.08366244524], - [0.2406675117,0.9026066179,0.3569039677], - [0.005044007626,0.3665438649,-0.9303870985] - ], - "t": [ - [-30.64851648], - [114.5848432], - [323.1694161] - ] - }, - { - "name": "09_19", - "type": "vga", - "resolution": [640,480], - "panel": 9, - "node": 19, - "K": [ - [745.897,0,369.27], - [0,746.007,226.27], - [0,0,1] - ], - "distCoef": [-0.314378,0.0131268,-0.000749673,-0.000436078,0.140449], - "R": [ - [-0.9929061616,0.1118291068,0.04039313118], - [0.1187797946,0.9175946163,0.3793566667], - [0.005358597494,0.3814634596,-0.9243683867] - ], - "t": [ - [-9.348770156], - [111.4514571], - [325.9373984] - ] - }, - { - "name": "09_20", - "type": "vga", - "resolution": [640,480], - "panel": 9, - "node": 20, - "K": [ - [743.647,0,378.532], - [0,743.859,221.629], - [0,0,1] - ], - "distCoef": [-0.312883,-0.00145442,-0.000725648,-1.91192e-05,0.160115], - "R": [ - [-0.9995005243,0.01416777706,-0.02824846864], - [0.002450265794,0.9259270935,0.3776943389], - [0.03150711165,0.3774364735,-0.9254993303] - ], - "t": [ - [6.861259295], - [105.360829], - [326.1962043] - ] - }, - { - "name": "09_21", - "type": "vga", - "resolution": [640,480], - "panel": 9, - "node": 21, - "K": [ - [745.35,0,364.423], - [0,745.51,242.824], - [0,0,1] - ], - "distCoef": [-0.317615,0.0309367,1.60295e-05,-0.00084218,0.138729], - "R": [ - [-0.9983267687,0.03243769532,-0.0478691851], - [0.01510269673,0.9453721551,0.3256430514], - [0.05581730476,0.3243752215,-0.9442802255] - ], - "t": [ - [30.85545331], - [138.1219419], - [318.1793043] - ] - }, - { - "name": "09_22", - "type": "vga", - "resolution": [640,480], - "panel": 9, - "node": 22, - "K": [ - [744.248,0,356.027], - [0,744.436,238.226], - [0,0,1] - ], - "distCoef": [-0.308137,-0.0481761,0.000357682,-8.3696e-05,0.245728], - "R": [ - [-0.9955839097,0.09158830299,-0.0205976113], - [0.07579544873,0.9137019347,0.3992540852], - [0.05538708142,0.3959297379,-0.9166089209] - ], - "t": [ - [35.25988756], - [131.4528362], - [328.3382973] - ] - }, - { - "name": "09_23", - "type": "vga", - "resolution": [640,480], - "panel": 9, - "node": 23, - "K": [ - [744.535,0,363.359], - [0,744.632,254.668], - [0,0,1] - ], - "distCoef": [-0.311847,-0.00198079,0.000462082,-0.000460419,0.174118], - "R": [ - [-0.9946906764,0.1028474748,0.003585412436], - [0.09771594436,0.9329851386,0.346396197], - [0.03228083764,0.3449074195,-0.9380814567] - ], - "t": [ - [12.3985171], - [157.8437238], - [320.5381764] - ] - }, - { - "name": "09_24", - "type": "vga", - "resolution": [640,480], - "panel": 9, - "node": 24, - "K": [ - [743.311,0,385.98], - [0,743.511,229.743], - [0,0,1] - ], - "distCoef": [-0.319602,0.0480118,-0.000790169,0.000699953,0.0704098], - "R": [ - [-0.9986396845,0.04700092247,-0.02257640097], - [0.03617494752,0.9363507866,0.3491970469], - [0.03755201414,0.3479053287,-0.93677731] - ], - "t": [ - [-8.936415104], - [142.1371611], - [321.4431282] - ] - }, - { - "name": "10_01", - "type": "vga", - "resolution": [640,480], - "panel": 10, - "node": 1, - "K": [ - [744.128,0,369.511], - [0,744.056,233.67], - [0,0,1] - ], - "distCoef": [-0.31156,0.00550691,-0.000430053,0.000410016,0.149166], - "R": [ - [-0.6229970612,0.0209936641,0.781942407], - [0.05250109858,0.9985078863,0.01502117145], - [-0.7804603106,0.05041098106,-0.6231696692] - ], - "t": [ - [-46.84686717], - [150.7389104], - [280.0083694] - ] - }, - { - "name": "10_02", - "type": "vga", - "resolution": [640,480], - "panel": 10, - "node": 2, - "K": [ - [743.282,0,357.827], - [0,743.347,211.632], - [0,0,1] - ], - "distCoef": [-0.30948,-0.00718458,0.000285593,0.000547399,0.164062], - "R": [ - [-0.6512046155,0.0977241901,0.7525839032], - [0.103617117,0.9938368806,-0.03939223155], - [-0.7517952126,0.05232817138,-0.6573170626] - ], - "t": [ - [-42.32005533], - [143.0774393], - [282.200902] - ] - }, - { - "name": "10_03", - "type": "vga", - "resolution": [640,480], - "panel": 10, - "node": 3, - "K": [ - [744.012,0,361.17], - [0,744.101,225.217], - [0,0,1] - ], - "distCoef": [-0.303567,-0.0563565,0.000757602,-0.000519388,0.263551], - "R": [ - [-0.6320598226,0.04182219841,0.773790207], - [0.06737176964,0.9977273282,0.001106034268], - [-0.771985379,0.05283069539,-0.6334409935] - ], - "t": [ - [-54.02554254], - [119.7786683], - [280.9354705] - ] - }, - { - "name": "10_04", - "type": "vga", - "resolution": [640,480], - "panel": 10, - "node": 4, - "K": [ - [744.209,0,380.966], - [0,744.256,205.476], - [0,0,1] - ], - "distCoef": [-0.315194,0.0249601,-0.000765583,0.001001,0.10286], - "R": [ - [-0.6566261636,0.06356030055,0.7515332125], - [0.0713368826,0.9972094103,-0.02201002698], - [-0.7508349555,0.03915967697,-0.6593279831] - ], - "t": [ - [-22.38173011], - [115.5645607], - [280.9145253] - ] - }, - { - "name": "10_05", - "type": "vga", - "resolution": [640,480], - "panel": 10, - "node": 5, - "K": [ - [744.499,0,353.834], - [0,744.652,215.524], - [0,0,1] - ], - "distCoef": [-0.317042,0.0236932,-0.00147688,-0.000206715,0.11602], - "R": [ - [-0.6480155592,0.1057846486,0.754244949], - [0.1559047408,0.9877614348,-0.004589090624], - [-0.7454995284,0.1146165612,-0.6565771067] - ], - "t": [ - [-17.37690425], - [72.84298088], - [287.4167752] - ] - }, - { - "name": "10_06", - "type": "vga", - "resolution": [640,480], - "panel": 10, - "node": 6, - "K": [ - [746.493,0,367.328], - [0,746.754,207.575], - [0,0,1] - ], - "distCoef": [-0.323089,0.0587326,-0.000981175,-0.000221417,0.0550321], - "R": [ - [-0.6607542091,0.07289791872,0.74705406], - [0.1340507848,0.9907326878,0.02188900409], - [-0.738535214,0.1146064347,-0.6644028167] - ], - "t": [ - [3.021864726], - [64.04371811], - [286.9062935] - ] - }, - { - "name": "10_07", - "type": "vga", - "resolution": [640,480], - "panel": 10, - "node": 7, - "K": [ - [744.949,0,365.308], - [0,744.944,217.014], - [0,0,1] - ], - "distCoef": [-0.320697,0.0459897,0.000335318,2.89241e-06,0.0947246], - "R": [ - [-0.643287111,0.03528116955,0.764811697], - [0.0902182212,0.9954712387,0.02996140018], - [-0.7602909742,0.08827373343,-0.6435568215] - ], - "t": [ - [9.776307982], - [84.51813798], - [285.3816638] - ] - }, - { - "name": "10_08", - "type": "vga", - "resolution": [640,480], - "panel": 10, - "node": 8, - "K": [ - [748.112,0,395.78], - [0,748.17,229.575], - [0,0,1] - ], - "distCoef": [-0.325424,0.0774932,-0.000546,0.000524276,0.0351183], - "R": [ - [-0.6241633069,0.05185263499,0.7795713377], - [0.04102617023,0.9985938587,-0.03357318505], - [-0.7802160084,0.0110276762,-0.6254129601] - ], - "t": [ - [-46.24758235], - [183.5392889], - [272.6641799] - ] - }, - { - "name": "10_09", - "type": "vga", - "resolution": [640,480], - "panel": 10, - "node": 9, - "K": [ - [746.122,0,370.333], - [0,746.261,210.753], - [0,0,1] - ], - "distCoef": [-0.323285,0.0813962,-0.00031195,0.00117949,0.0118242], - "R": [ - [-0.6717702835,0.002860846795,0.7407540089], - [0.1085475528,0.9895782107,0.09461708989], - [-0.7327633417,0.1439679842,-0.6650797731] - ], - "t": [ - [53.6134591], - [78.01841366], - [288.9552018] - ] - }, - { - "name": "10_10", - "type": "vga", - "resolution": [640,480], - "panel": 10, - "node": 10, - "K": [ - [746.498,0,355.775], - [0,746.616,218.183], - [0,0,1] - ], - "distCoef": [-0.320479,0.0482256,-0.000295345,0.000515541,0.088746], - "R": [ - [-0.6274497943,0.01735785812,0.7784635254], - [0.05740772193,0.9980618939,0.02401685623], - [-0.7765378993,0.0597591891,-0.6272302051] - ], - "t": [ - [35.32452291], - [122.8912729], - [283.9520693] - ] - }, - { - "name": "10_11", - "type": "vga", - "resolution": [640,480], - "panel": 10, - "node": 11, - "K": [ - [745.209,0,387.948], - [0,745.058,237.868], - [0,0,1] - ], - "distCoef": [-0.312054,0.0106095,2.04654e-05,-0.000407432,0.122509], - "R": [ - [-0.663538187,0.0558857692,0.74605218], - [0.09086672278,0.9958436408,0.006219474654], - [-0.742603739,0.07191817555,-0.6658584406] - ], - "t": [ - [70.41193089], - [130.903078], - [283.3216663] - ] - }, - { - "name": "10_12", - "type": "vga", - "resolution": [640,480], - "panel": 10, - "node": 12, - "K": [ - [746.923,0,359.191], - [0,746.955,219.728], - [0,0,1] - ], - "distCoef": [-0.34193,0.180291,-0.0011698,0.000387434,-0.142263], - "R": [ - [-0.6573529902,0.02662022179,0.7531124817], - [0.0203979596,0.9996382488,-0.01752982786], - [-0.7533066902,0.003838673213,-0.6576581901] - ], - "t": [ - [61.18715226], - [173.543055], - [273.2477614] - ] - }, - { - "name": "10_13", - "type": "vga", - "resolution": [640,480], - "panel": 10, - "node": 13, - "K": [ - [747.063,0,362.554], - [0,747.091,228.588], - [0,0,1] - ], - "distCoef": [-0.334743,0.115617,-0.000133435,0.000763825,-0.0142674], - "R": [ - [-0.6314178936,0.07344004486,0.771957255], - [0.07624079511,0.9965613541,-0.03244701456], - [-0.7716856775,0.03836700932,-0.6348457984] - ], - "t": [ - [39.63694261], - [165.7689372], - [279.8275089] - ] - }, - { - "name": "10_14", - "type": "vga", - "resolution": [640,480], - "panel": 10, - "node": 14, - "K": [ - [745.722,0,380.721], - [0,745.932,237.231], - [0,0,1] - ], - "distCoef": [-0.319645,0.0532601,-0.00105825,0.00148804,0.0812854], - "R": [ - [-0.6464741699,0.0407242176,0.7618482039], - [0.05782238306,0.998317631,-0.004298792509], - [-0.7607415591,0.04127282036,-0.6477413331] - ], - "t": [ - [37.16059778], - [187.0284564], - [279.5510011] - ] - }, - { - "name": "10_15", - "type": "vga", - "resolution": [640,480], - "panel": 10, - "node": 15, - "K": [ - [745.212,0,345.945], - [0,745.407,234.052], - [0,0,1] - ], - "distCoef": [-0.345973,0.208044,0.00063894,-0.000591324,-0.26389], - "R": [ - [-0.6892736753,0.06991501806,0.7211197479], - [0.04097555303,0.9975016565,-0.0575451947], - [-0.7233414164,-0.01011610737,-0.6904164394] - ], - "t": [ - [38.38229011], - [201.7157692], - [268.6124541] - ] - }, - { - "name": "10_16", - "type": "vga", - "resolution": [640,480], - "panel": 10, - "node": 16, - "K": [ - [746.402,0,351.743], - [0,746.432,235.34], - [0,0,1] - ], - "distCoef": [-0.332074,0.123634,0.000553061,0.000200886,-0.050504], - "R": [ - [-0.6626903808,0.1069713565,0.7412142659], - [0.1159650419,0.9924654921,-0.03955194002], - [-0.7398605059,0.05974425322,-0.6701022728] - ], - "t": [ - [18.24762504], - [172.5928493], - [282.9657885] - ] - }, - { - "name": "10_17", - "type": "vga", - "resolution": [640,480], - "panel": 10, - "node": 17, - "K": [ - [745.425,0,381.954], - [0,745.576,234.397], - [0,0,1] - ], - "distCoef": [-0.316953,0.0361047,-0.000329948,0.00146685,0.0995591], - "R": [ - [-0.6439914485,0.08005681888,0.7608323863], - [0.04150323442,0.9967010496,-0.06974596286], - [-0.7639060779,-0.01333879876,-0.6451895695] - ], - "t": [ - [-14.39474973], - [198.5707312], - [268.934139] - ] - }, - { - "name": "10_18", - "type": "vga", - "resolution": [640,480], - "panel": 10, - "node": 18, - "K": [ - [742.866,0,374.357], - [0,743.163,216.484], - [0,0,1] - ], - "distCoef": [-0.313801,-0.00472223,0.00105562,-0.000883374,0.146196], - "R": [ - [-0.6735625977,0.03695414336,0.7382058102], - [0.08136680684,0.9963864104,0.02436316713], - [-0.7346379174,0.07647556771,-0.6741354596] - ], - "t": [ - [41.81793908], - [81.57199105], - [283.0241236] - ] - }, - { - "name": "10_19", - "type": "vga", - "resolution": [640,480], - "panel": 10, - "node": 19, - "K": [ - [747.195,0,374.317], - [0,747.324,252.705], - [0,0,1] - ], - "distCoef": [-0.325848,0.0754879,0.000850799,-0.000494425,0.0423325], - "R": [ - [-0.6398121174,0.03550225829,0.7677109118], - [0.06489671873,0.9978603994,0.00793971962], - [-0.7657864391,0.05490184793,-0.6407471551] - ], - "t": [ - [-18.67539454], - [143.739157], - [281.6554752] - ] - }, - { - "name": "10_20", - "type": "vga", - "resolution": [640,480], - "panel": 10, - "node": 20, - "K": [ - [744.074,0,359.595], - [0,744.232,222.54], - [0,0,1] - ], - "distCoef": [-0.312038,-0.00652471,0.000517579,-0.000473896,0.154037], - "R": [ - [-0.6341018605,0.07503908623,0.769599874], - [0.1134623387,0.9935365213,-0.003387984729], - [-0.7648798129,0.08517227417,-0.6385174669] - ], - "t": [ - [-10.64771601], - [114.6784971], - [285.5473806] - ] - }, - { - "name": "10_21", - "type": "vga", - "resolution": [640,480], - "panel": 10, - "node": 21, - "K": [ - [745.669,0,353.595], - [0,745.986,221.41], - [0,0,1] - ], - "distCoef": [-0.331248,0.0956435,-0.00124938,0.0010706,0.0394747], - "R": [ - [-0.618235149,0.02815342604,0.7854888192], - [0.09838720035,0.994269895,0.04180113162], - [-0.7798110408,0.1031249747,-0.6174625335] - ], - "t": [ - [-3.462045404], - [102.4105128], - [287.5712577] - ] - }, - { - "name": "10_22", - "type": "vga", - "resolution": [640,480], - "panel": 10, - "node": 22, - "K": [ - [745.836,0,367.536], - [0,745.883,217.602], - [0,0,1] - ], - "distCoef": [-0.306908,-0.0326669,-0.000283909,0.000278093,0.200484], - "R": [ - [-0.6189078213,0.03804187807,0.7845418563], - [0.07413417155,0.9971968305,0.01012945108], - [-0.7819573092,0.06443055706,-0.6199931209] - ], - "t": [ - [14.73270812], - [126.5060302], - [283.9045417] - ] - }, - { - "name": "10_23", - "type": "vga", - "resolution": [640,480], - "panel": 10, - "node": 23, - "K": [ - [742.749,0,379.273], - [0,742.868,231.204], - [0,0,1] - ], - "distCoef": [-0.310394,-0.00460726,-0.000822068,-0.000336616,0.147608], - "R": [ - [-0.6037549899,0.1086195044,0.7897352186], - [0.1215591915,0.9916324658,-0.04345590495], - [-0.787847241,0.0697628552,-0.6119067485] - ], - "t": [ - [19.26192194], - [145.0128457], - [284.7838402] - ] - }, - { - "name": "10_24", - "type": "vga", - "resolution": [640,480], - "panel": 10, - "node": 24, - "K": [ - [745.597,0,368.627], - [0,745.598,227.731], - [0,0,1] - ], - "distCoef": [-0.309585,-0.00749389,-0.000770097,-0.000330202,0.147896], - "R": [ - [-0.6450785239,0.075478584,0.760379301], - [0.07622559694,0.9965021766,-0.03425011393], - [-0.7603047786,0.03586635318,-0.6485755533] - ], - "t": [ - [7.856697427], - [160.1393432], - [279.1413867] - ] - }, - { - "name": "11_01", - "type": "vga", - "resolution": [640,480], - "panel": 11, - "node": 1, - "K": [ - [742.855,0,374.596], - [0,743.116,213.495], - [0,0,1] - ], - "distCoef": [-0.312561,0.00631745,-0.000399255,9.31566e-05,0.13435], - "R": [ - [-0.9229364354,0.00164792287,0.3849488544], - [0.08421827064,0.9766305816,0.1977371741], - [-0.3756269679,0.2149185694,-0.9015067329] - ], - "t": [ - [-1.777017447], - [176.3500352], - [303.9155303] - ] - }, - { - "name": "11_02", - "type": "vga", - "resolution": [640,480], - "panel": 11, - "node": 2, - "K": [ - [743.543,0,362.467], - [0,743.612,228.587], - [0,0,1] - ], - "distCoef": [-0.311508,-0.0063044,0.000209199,0.000389142,0.157517], - "R": [ - [-0.9382305089,-0.009495783218,0.3458805319], - [0.07354737957,0.9713073762,0.226169768], - [-0.338103971,0.2376379833,-0.9106118238] - ], - "t": [ - [-11.88478771], - [180.6527832], - [308.9268929] - ] - }, - { - "name": "11_03", - "type": "vga", - "resolution": [640,480], - "panel": 11, - "node": 3, - "K": [ - [749.382,0,384.698], - [0,749.44,241.756], - [0,0,1] - ], - "distCoef": [-0.334994,0.135003,0.000819921,0.00199466,-0.05032], - "R": [ - [-0.9215516186,0.03410543981,0.3867550042], - [0.1287847641,0.966589567,0.2216282778], - [-0.3662746221,0.2540500501,-0.895154441] - ], - "t": [ - [-28.84627719], - [162.2565593], - [311.7587167] - ] - }, - { - "name": "11_04", - "type": "vga", - "resolution": [640,480], - "panel": 11, - "node": 4, - "K": [ - [747.478,0,355.1], - [0,747.786,237.425], - [0,0,1] - ], - "distCoef": [-0.332665,0.125805,0.000559145,-0.000285828,-0.0488142], - "R": [ - [-0.9186497576,-0.03493542623,0.3935252708], - [0.05923251482,0.9726444983,0.2246200995], - [-0.3906073886,0.2296566914,-0.8914503195] - ], - "t": [ - [-43.73591523], - [146.455357], - [306.7233507] - ] - }, - { - "name": "11_05", - "type": "vga", - "resolution": [640,480], - "panel": 11, - "node": 5, - "K": [ - [744.546,0,358.346], - [0,744.606,240.06], - [0,0,1] - ], - "distCoef": [-0.319412,0.0357687,0.00118284,-0.000939418,0.105494], - "R": [ - [-0.9252091585,0.02778676908,0.3784387777], - [0.1130706466,0.9721977994,0.2050523536], - [-0.3622196044,0.2325066328,-0.9026281759] - ], - "t": [ - [-43.43063623], - [134.4377466], - [308.7383564] - ] - }, - { - "name": "11_06", - "type": "vga", - "resolution": [640,480], - "panel": 11, - "node": 6, - "K": [ - [744.682,0,386.644], - [0,744.47,247.576], - [0,0,1] - ], - "distCoef": [-0.310524,-0.0156223,-0.000288596,-3.26402e-05,0.156674], - "R": [ - [-0.9144551399,0.0484228537,0.4017798207], - [0.1449564791,0.9661327489,0.2134833264], - [-0.3778351707,0.2534615133,-0.8905042645] - ], - "t": [ - [-44.21957265], - [107.5274508], - [309.8949628] - ] - }, - { - "name": "11_07", - "type": "vga", - "resolution": [640,480], - "panel": 11, - "node": 7, - "K": [ - [746.436,0,349.001], - [0,746.553,211.863], - [0,0,1] - ], - "distCoef": [-0.330393,0.0902383,-0.000783974,-0.000712996,0.00481592], - "R": [ - [-0.9105637485,0.003264968682,0.4133557789], - [0.1001837456,0.9718993559,0.2130137535], - [-0.401044732,0.2353741321,-0.8853034174] - ], - "t": [ - [-36.21090107], - [102.2867759], - [306.6852556] - ] - }, - { - "name": "11_08", - "type": "vga", - "resolution": [640,480], - "panel": 11, - "node": 8, - "K": [ - [745.743,0,370.625], - [0,745.85,233.671], - [0,0,1] - ], - "distCoef": [-0.3257,0.0614375,0.00126654,-0.000627381,0.0722474], - "R": [ - [-0.8981193216,-0.01090147501,0.4396166989], - [0.09488580103,0.9713398361,0.2179348702], - [-0.4293930238,0.2374449004,-0.8713446794] - ], - "t": [ - [-42.17364239], - [80.07059019], - [305.3107943] - ] - }, - { - "name": "11_09", - "type": "vga", - "resolution": [640,480], - "panel": 11, - "node": 9, - "K": [ - [743.294,0,376.993], - [0,743.306,225.516], - [0,0,1] - ], - "distCoef": [-0.315184,-0.00458353,0.00085295,-0.000315923,0.19344], - "R": [ - [-0.9287334953,0.02657190893,0.369794576], - [0.1072763174,0.9740215576,0.1994336907], - [-0.354888555,0.2248909489,-0.9074569822] - ], - "t": [ - [4.627896612], - [76.0139061], - [305.925361] - ] - }, - { - "name": "11_10", - "type": "vga", - "resolution": [640,480], - "panel": 11, - "node": 10, - "K": [ - [746.981,0,373.015], - [0,746.916,231.087], - [0,0,1] - ], - "distCoef": [-0.31553,-0.0133214,-7.49701e-05,-0.000474937,0.183355], - "R": [ - [-0.897589008,-0.01428097087,0.4406018914], - [0.092180686,0.9712994893,0.219271574], - [-0.431087803,0.2374307391,-0.8705113154] - ], - "t": [ - [-5.834972436], - [85.69962032], - [306.7617687] - ] - }, - { - "name": "11_11", - "type": "vga", - "resolution": [640,480], - "panel": 11, - "node": 11, - "K": [ - [743.956,0,385.014], - [0,743.968,233.944], - [0,0,1] - ], - "distCoef": [-0.321873,0.0619652,-0.000204505,0.000631491,0.0680901], - "R": [ - [-0.9171447001,-0.01735780695,0.3981762243], - [0.08629809142,0.9667012777,0.2409175774], - [-0.3890992656,0.2553181275,-0.8851070078] - ], - "t": [ - [26.82061991], - [73.01187567], - [307.7528197] - ] - }, - { - "name": "11_12", - "type": "vga", - "resolution": [640,480], - "panel": 11, - "node": 12, - "K": [ - [749.192,0,349.167], - [0,749.113,221.266], - [0,0,1] - ], - "distCoef": [-0.334032,0.094759,-0.000689735,0.000727903,0.0409048], - "R": [ - [-0.937850977,-0.03419002209,0.345349949], - [0.06230645433,0.9623765935,0.2644791068], - [-0.341399254,0.2695595196,-0.9004355695] - ], - "t": [ - [57.17130279], - [82.80130245], - [306.825197] - ] - }, - { - "name": "11_13", - "type": "vga", - "resolution": [640,480], - "panel": 11, - "node": 13, - "K": [ - [744.715,0,367.122], - [0,744.786,220.538], - [0,0,1] - ], - "distCoef": [-0.315954,0.0180051,3.91318e-05,0.000697083,0.145396], - "R": [ - [-0.9312656673,-0.01667316508,0.3639591494], - [0.07039560041,0.9718946087,0.2246448954], - [-0.3574754765,0.2348252013,-0.9039183639] - ], - "t": [ - [46.96203938], - [112.2947483], - [304.8878272] - ] - }, - { - "name": "11_14", - "type": "vga", - "resolution": [640,480], - "panel": 11, - "node": 14, - "K": [ - [746.505,0,367.697], - [0,746.62,222.237], - [0,0,1] - ], - "distCoef": [-0.323622,0.0629014,0.000917096,0.00064017,0.0716359], - "R": [ - [-0.9260527677,-0.07925799212,0.3689775632], - [0.02937617957,0.9595934278,0.279852628], - [-0.3762490021,0.2699974518,-0.8863058527] - ], - "t": [ - [50.81898209], - [116.0290364], - [310.1255555] - ] - }, - { - "name": "11_15", - "type": "vga", - "resolution": [640,480], - "panel": 11, - "node": 15, - "K": [ - [746.042,0,355.995], - [0,745.821,261.077], - [0,0,1] - ], - "distCoef": [-0.321065,0.0443736,0.000927074,0.000280863,0.106789], - "R": [ - [-0.9208600933,-0.04678508348,0.387076019], - [0.03581020852,0.9784294414,0.2034538209], - [-0.3882451771,0.2012137775,-0.8993212431] - ], - "t": [ - [43.08113165], - [154.6066575], - [301.5640854] - ] - }, - { - "name": "11_16", - "type": "vga", - "resolution": [640,480], - "panel": 11, - "node": 16, - "K": [ - [741.668,0,363.735], - [0,741.796,217.06], - [0,0,1] - ], - "distCoef": [-0.309875,-0.0179015,-1.19394e-05,-0.000437783,0.188022], - "R": [ - [-0.8991061052,-0.0185684781,0.437336739], - [0.0842559957,0.9730755765,0.214534029], - [-0.4295452698,0.2297370977,-0.873333686] - ], - "t": [ - [16.70791642], - [154.14567], - [307.2679797] - ] - }, - { - "name": "11_17", - "type": "vga", - "resolution": [640,480], - "panel": 11, - "node": 17, - "K": [ - [747.822,0,361.761], - [0,747.76,222.34], - [0,0,1] - ], - "distCoef": [-0.334628,0.097635,0.00152491,-0.000486737,0.0213673], - "R": [ - [-0.9162397179,0.01033450945,0.4004971626], - [0.1187416248,0.9617552428,0.2468345183], - [-0.3826293322,0.2737152732,-0.8824254888] - ], - "t": [ - [27.8785048], - [159.3368695], - [313.9971646] - ] - }, - { - "name": "11_18", - "type": "vga", - "resolution": [640,480], - "panel": 11, - "node": 18, - "K": [ - [745.448,0,360.818], - [0,745.84,214.85], - [0,0,1] - ], - "distCoef": [-0.329534,0.0903331,0.00014069,0.000717079,0.0211508], - "R": [ - [-0.9101418911,0.04432675398,0.411918532], - [0.1391589893,0.9692024732,0.2031781034], - [-0.3902262342,0.2422430698,-0.888280238] - ], - "t": [ - [16.35209076], - [181.679224], - [308.9632727] - ] - }, - { - "name": "11_19", - "type": "vga", - "resolution": [640,480], - "panel": 11, - "node": 19, - "K": [ - [746.167,0,363.996], - [0,746.229,234.387], - [0,0,1] - ], - "distCoef": [-0.310901,-0.0147285,-0.000729007,-0.000655789,0.178193], - "R": [ - [-0.9157731435,-0.03755396433,0.3999365568], - [0.06406747528,0.9692207168,0.2377110865], - [-0.3965537899,0.2433123544,-0.8851803149] - ], - "t": [ - [-10.79527777], - [146.8696803], - [308.5271108] - ] - }, - { - "name": "11_20", - "type": "vga", - "resolution": [640,480], - "panel": 11, - "node": 20, - "K": [ - [744.588,0,384.664], - [0,744.662,240.853], - [0,0,1] - ], - "distCoef": [-0.307863,-0.0295446,-0.000517465,0.000242427,0.189333], - "R": [ - [-0.9170523574,0.0431160901,0.396429031], - [0.124694228,0.9752892469,0.1823793695], - [-0.3787694858,0.2166838427,-0.8997676305] - ], - "t": [ - [-9.200936127], - [142.5227957], - [304.9039442] - ] - }, - { - "name": "11_21", - "type": "vga", - "resolution": [640,480], - "panel": 11, - "node": 21, - "K": [ - [745.832,0,378.426], - [0,745.825,230.649], - [0,0,1] - ], - "distCoef": [-0.317765,0.041948,0.000140897,0.000331931,0.0876249], - "R": [ - [-0.903416406,0.009580467792,0.4286572198], - [0.1299134284,0.9588705554,0.2523683006], - [-0.4086089801,0.2836819921,-0.8675040223] - ], - "t": [ - [-22.38884391], - [100.2357286], - [311.942278] - ] - }, - { - "name": "11_22", - "type": "vga", - "resolution": [640,480], - "panel": 11, - "node": 22, - "K": [ - [745.759,0,381.189], - [0,746.033,229.615], - [0,0,1] - ], - "distCoef": [-0.307738,-0.0303832,0.000694314,-0.000395606,0.211723], - "R": [ - [-0.9121889441,-0.007451044875,0.4097021017], - [0.1102495844,0.9585035751,0.2628990789], - [-0.394659802,0.2849831196,-0.8735148895] - ], - "t": [ - [-0.4671669308], - [91.25062129], - [311.8622342] - ] - }, - { - "name": "11_23", - "type": "vga", - "resolution": [640,480], - "panel": 11, - "node": 23, - "K": [ - [748.678,0,358.839], - [0,748.651,239.635], - [0,0,1] - ], - "distCoef": [-0.328983,0.0919887,-1.22475e-05,-0.000911096,0.0194744], - "R": [ - [-0.9251940915,-0.06790089301,0.3733702744], - [0.01633387562,0.9758259889,0.2179377065], - [-0.3791425821,0.207733262,-0.9017193545] - ], - "t": [ - [15.23843998], - [129.776393], - [302.9631654] - ] - }, - { - "name": "11_24", - "type": "vga", - "resolution": [640,480], - "panel": 11, - "node": 24, - "K": [ - [747.741,0,374.843], - [0,747.8,238.972], - [0,0,1] - ], - "distCoef": [-0.320184,0.0453956,8.07771e-05,-0.000586724,0.0799959], - "R": [ - [-0.901120423,0.005145678853,0.4335383549], - [0.1030532182,0.9738156258,0.2026404726], - [-0.4211437016,0.2272809911,-0.8780554275] - ], - "t": [ - [6.522845915], - [142.0951003], - [306.255293] - ] - }, - { - "name": "12_01", - "type": "vga", - "resolution": [640,480], - "panel": 12, - "node": 1, - "K": [ - [745.397,0,350.188], - [0,745.422,244.528], - [0,0,1] - ], - "distCoef": [-0.318784,0.0421446,0.000567418,-0.000208,0.092208], - "R": [ - [-0.2717431751,0.1656287556,0.9480098956], - [0.4128654434,0.9098857043,-0.04062180222], - [-0.86930879,0.3803618284,-0.3156376199] - ], - "t": [ - [-13.70303847], - [97.1923903], - [326.2673629] - ] - }, - { - "name": "12_02", - "type": "vga", - "resolution": [640,480], - "panel": 12, - "node": 2, - "K": [ - [747.727,0,370.501], - [0,747.788,234.298], - [0,0,1] - ], - "distCoef": [-0.349811,0.202844,-0.00194754,-0.000389321,-0.178679], - "R": [ - [-0.3883456032,0.1438043201,0.9102241537], - [0.3131714459,0.9495549238,-0.01640403197], - [-0.8666667975,0.2786857806,-0.4137908865] - ], - "t": [ - [13.37192963], - [105.5473845], - [318.08591] - ] - }, - { - "name": "12_03", - "type": "vga", - "resolution": [640,480], - "panel": 12, - "node": 3, - "K": [ - [746.831,0,387.09], - [0,746.752,242.092], - [0,0,1] - ], - "distCoef": [-0.338844,0.109538,-0.000689346,-0.00140957,-0.0011227], - "R": [ - [-0.2489409576,0.07810816372,0.9653639285], - [0.3865744043,0.9219167609,0.0250941395], - [-0.8880251289,0.3794319447,-0.2596974581] - ], - "t": [ - [-20.03334166], - [70.50216381], - [325.3775618] - ] - }, - { - "name": "12_04", - "type": "vga", - "resolution": [640,480], - "panel": 12, - "node": 4, - "K": [ - [746.601,0,360.45], - [0,746.776,222.063], - [0,0,1] - ], - "distCoef": [-0.336822,0.124774,0.000206697,-0.000417774,-0.0398672], - "R": [ - [-0.3081671276,0.03567998316,0.9506629057], - [0.4212102042,0.9011275261,0.1027187694], - [-0.8530035084,0.4320834647,-0.2927266543] - ], - "t": [ - [4.764737811], - [63.41476985], - [331.1517594] - ] - }, - { - "name": "12_05", - "type": "vga", - "resolution": [640,480], - "panel": 12, - "node": 5, - "K": [ - [748.2,0,362.212], - [0,748.363,218.877], - [0,0,1] - ], - "distCoef": [-0.337789,0.133894,-0.000945522,-0.000498923,-0.0570031], - "R": [ - [-0.2841336654,-0.004801876737,0.9587726541], - [0.3831436474,0.9161034097,0.118133349], - [-0.8789021593,0.4009133132,-0.2584560111] - ], - "t": [ - [10.92507323], - [68.32263664], - [329.7866549] - ] - }, - { - "name": "12_06", - "type": "vga", - "resolution": [640,480], - "panel": 12, - "node": 6, - "K": [ - [747.371,0,350.388], - [0,747.497,231.124], - [0,0,1] - ], - "distCoef": [-0.351189,0.233364,-0.000450075,-0.00118874,-0.265042], - "R": [ - [-0.3878504716,-0.01635524947,0.9215771902], - [0.3346075558,0.9291346168,0.1573106717], - [-0.8588421248,0.3693797093,-0.3548927092] - ], - "t": [ - [53.76493542], - [97.09757883], - [324.1315487] - ] - }, - { - "name": "12_07", - "type": "vga", - "resolution": [640,480], - "panel": 12, - "node": 7, - "K": [ - [747.196,0,383.602], - [0,747.258,260.076], - [0,0,1] - ], - "distCoef": [-0.340453,0.149462,7.57635e-05,-0.00150211,-0.0810731], - "R": [ - [-0.3567494973,0.01375486298,0.934098817], - [0.3428523716,0.9320474424,0.1172169629], - [-0.8690121101,0.3620750873,-0.3372233439] - ], - "t": [ - [46.87962376], - [118.8343508], - [324.070693] - ] - }, - { - "name": "12_08", - "type": "vga", - "resolution": [640,480], - "panel": 12, - "node": 8, - "K": [ - [748.388,0,360.952], - [0,748.584,220.934], - [0,0,1] - ], - "distCoef": [-0.353387,0.236369,0.000317101,-0.000350889,-0.25062], - "R": [ - [-0.3882650784,-0.0538394581,0.9199736636], - [0.3529834406,0.9134681838,0.2024316376], - [-0.8512654812,0.4033326047,-0.3356633588] - ], - "t": [ - [53.63586961], - [124.5990463], - [329.2926486] - ] - }, - { - "name": "12_09", - "type": "vga", - "resolution": [640,480], - "panel": 12, - "node": 9, - "K": [ - [745.023,0,373.202], - [0,745.321,253.183], - [0,0,1] - ], - "distCoef": [-0.310235,-0.0270349,0.000213071,-0.0010354,0.204812], - "R": [ - [-0.3615436505,-0.1034754049,0.9265953968], - [0.3189620476,0.9201303682,0.2272076531], - [-0.8760989676,0.3776942494,-0.2996625652] - ], - "t": [ - [26.36947949], - [154.1173845], - [328.14772] - ] - }, - { - "name": "12_10", - "type": "vga", - "resolution": [640,480], - "panel": 12, - "node": 10, - "K": [ - [743.497,0,337.094], - [0,743.775,230.392], - [0,0,1] - ], - "distCoef": [-0.323522,0.0697077,-0.000922284,-0.00112939,0.0376595], - "R": [ - [-0.409013364,-0.03192166586,0.9119698873], - [0.3635432206,0.9109541012,0.1949331996], - [-0.8369853014,0.4112707536,-0.3609874961] - ], - "t": [ - [36.39561956], - [146.2733377], - [330.6860766] - ] - }, - { - "name": "12_11", - "type": "vga", - "resolution": [640,480], - "panel": 12, - "node": 11, - "K": [ - [744.432,0,350.161], - [0,744.664,216.764], - [0,0,1] - ], - "distCoef": [-0.3138,0.0423232,-0.000980128,0.000347352,0.0411803], - "R": [ - [-0.3625324698,0.01191238118,0.9318950067], - [0.4332658145,0.8874493782,0.157207936], - [-0.8251369234,0.4607512304,-0.3268904424] - ], - "t": [ - [30.02223667], - [146.021886], - [340.9352409] - ] - }, - { - "name": "12_12", - "type": "vga", - "resolution": [640,480], - "panel": 12, - "node": 12, - "K": [ - [745.59,0,349.499], - [0,745.978,243.824], - [0,0,1] - ], - "distCoef": [-0.328804,0.102744,-0.00034172,-0.00160085,-0.0230968], - "R": [ - [-0.3184962228,0.07265474811,0.9451356747], - [0.3862627531,0.9204738181,0.05940568743], - [-0.8656565379,0.3839911948,-0.3212312573] - ], - "t": [ - [17.04074577], - [180.9741057], - [327.7548666] - ] - }, - { - "name": "12_13", - "type": "vga", - "resolution": [640,480], - "panel": 12, - "node": 13, - "K": [ - [744.766,0,364.423], - [0,744.926,205.341], - [0,0,1] - ], - "distCoef": [-0.32165,0.0514735,-0.000885848,-0.00113933,0.0656482], - "R": [ - [-0.2748509499,0.06379038152,0.9593684081], - [0.3894986417,0.919644886,0.05043898999], - [-0.8790607279,0.3875358962,-0.2776115375] - ], - "t": [ - [-9.802475588], - [164.1613661], - [327.7325897] - ] - }, - { - "name": "12_14", - "type": "vga", - "resolution": [640,480], - "panel": 12, - "node": 14, - "K": [ - [744.556,0,345.329], - [0,744.551,253.003], - [0,0,1] - ], - "distCoef": [-0.311027,-0.00213006,0.0011289,-0.000863959,0.162024], - "R": [ - [-0.3202755169,0.1244082889,0.9391198917], - [0.4530679872,0.8907277919,0.0365157459], - [-0.831957326,0.4371802584,-0.3416437171] - ], - "t": [ - [0.5161253202], - [152.8799295], - [338.113135] - ] - }, - { - "name": "12_15", - "type": "vga", - "resolution": [640,480], - "panel": 12, - "node": 15, - "K": [ - [747.233,0,347.644], - [0,747.329,227.375], - [0,0,1] - ], - "distCoef": [-0.323105,0.049287,-0.00101918,5.08353e-05,0.100564], - "R": [ - [-0.2639942301,0.1219548974,0.9567831779], - [0.4010015368,0.9160569375,-0.006120025947], - [-0.8772142349,0.3820558732,-0.2907378472] - ], - "t": [ - [-27.43280694], - [159.7105652], - [325.8203908] - ] - }, - { - "name": "12_16", - "type": "vga", - "resolution": [640,480], - "panel": 12, - "node": 16, - "K": [ - [744.634,0,382.866], - [0,744.52,241.14], - [0,0,1] - ], - "distCoef": [-0.320913,0.0518689,0.000556907,0.000900625,0.0851061], - "R": [ - [-0.2918914105,0.1153635448,0.9494686183], - [0.4055533141,0.9139698053,0.01362734066], - [-0.8662135499,0.3890378484,-0.3135660035] - ], - "t": [ - [-22.908528], - [135.1916248], - [327.5972929] - ] - }, - { - "name": "12_17", - "type": "vga", - "resolution": [640,480], - "panel": 12, - "node": 17, - "K": [ - [745.929,0,399.922], - [0,745.76,235.115], - [0,0,1] - ], - "distCoef": [-0.324412,0.0924767,0.000808772,0.00160345,0.0125449], - "R": [ - [-0.2332319969,0.1531844985,0.9602798264], - [0.4252056559,0.9041694633,-0.04096012482], - [-0.8745301515,0.3987632018,-0.2760161646] - ], - "t": [ - [-42.90434909], - [120.9469461], - [326.5490528] - ] - }, - { - "name": "12_18", - "type": "vga", - "resolution": [640,480], - "panel": 12, - "node": 18, - "K": [ - [745.596,0,390.427], - [0,745.457,235.855], - [0,0,1] - ], - "distCoef": [-0.331545,0.0834192,0.000515021,-0.000851112,0.0388274], - "R": [ - [-0.2198853867,0.1587089693,0.9625288982], - [0.4990272732,0.8661072571,-0.02880971702], - [-0.8382256244,0.4739933356,-0.2696444333] - ], - "t": [ - [-48.83152805], - [73.52609427], - [332.6787653] - ] - }, - { - "name": "12_19", - "type": "vga", - "resolution": [640,480], - "panel": 12, - "node": 19, - "K": [ - [744.284,0,396.863], - [0,744.47,248.804], - [0,0,1] - ], - "distCoef": [-0.318049,0.0444362,0.000417829,0.000948817,0.0847095], - "R": [ - [-0.2972813843,0.0975420226,0.9497943632], - [0.4134272643,0.9098266462,0.03596346693], - [-0.8606402708,0.4033621545,-0.3108010564] - ], - "t": [ - [-6.347004052], - [101.4062297], - [328.9550302] - ] - }, - { - "name": "12_20", - "type": "vga", - "resolution": [640,480], - "panel": 12, - "node": 20, - "K": [ - [745.173,0,391.68], - [0,745.292,239.851], - [0,0,1] - ], - "distCoef": [-0.316891,0.030971,0.000827356,0.00064571,0.114679], - "R": [ - [-0.3480625566,0.05516818218,0.9358466372], - [0.3680676982,0.9261498325,0.08229615655], - [-0.8621940769,0.3730991283,-0.3426637043] - ], - "t": [ - [18.00373906], - [105.1024652], - [325.6162418] - ] - }, - { - "name": "12_21", - "type": "vga", - "resolution": [640,480], - "panel": 12, - "node": 21, - "K": [ - [744.07,0,385.155], - [0,744.184,238.534], - [0,0,1] - ], - "distCoef": [-0.325321,0.0749068,6.22505e-05,8.78769e-06,0.0274316], - "R": [ - [-0.2944173655,-0.00519814937,0.9556628036], - [0.365777539,0.9232287513,0.117709238], - [-0.882907247,0.3842156322,-0.2699132104] - ], - "t": [ - [4.17424328], - [116.8807078], - [328.2455421] - ] - }, - { - "name": "12_22", - "type": "vga", - "resolution": [640,480], - "panel": 12, - "node": 22, - "K": [ - [747.36,0,358.25], - [0,747.451,237.291], - [0,0,1] - ], - "distCoef": [-0.329867,0.116416,-0.000580151,-0.000763801,-0.0625995], - "R": [ - [-0.323867873,0.0530845029,0.9446118972], - [0.387407199,0.9183241349,0.08121850418], - [-0.8631484594,0.3922535134,-0.3179810029] - ], - "t": [ - [22.53106717], - [133.6738778], - [328.8995429] - ] - }, - { - "name": "12_23", - "type": "vga", - "resolution": [640,480], - "panel": 12, - "node": 23, - "K": [ - [748.813,0,380.156], - [0,748.859,237.356], - [0,0,1] - ], - "distCoef": [-0.333932,0.115832,0.000621747,-0.000254241,-0.0140772], - "R": [ - [-0.3097958639,0.0326105921,0.9502436908], - [0.3550951383,0.9310652686,0.08381472691], - [-0.8820056493,0.3633923705,-0.3000200319] - ], - "t": [ - [-6.485061334], - [151.418855], - [323.8858443] - ] - }, - { - "name": "12_24", - "type": "vga", - "resolution": [640,480], - "panel": 12, - "node": 24, - "K": [ - [745.33,0,360.408], - [0,745.472,237.433], - [0,0,1] - ], - "distCoef": [-0.321653,0.057929,3.69615e-05,-0.000478596,0.0560779], - "R": [ - [-0.3250711399,0.1046959739,0.9398763254], - [0.4072848242,0.9124585149,0.03922410658], - [-0.8534915501,0.395547989,-0.3392550109] - ], - "t": [ - [2.217299854], - [123.8595425], - [329.2221602] - ] - }, - { - "name": "13_01", - "type": "vga", - "resolution": [640,480], - "panel": 13, - "node": 1, - "K": [ - [747.6,0,355.92], - [0,747.783,249.853], - [0,0,1] - ], - "distCoef": [-0.333712,0.144699,-6.46303e-05,-0.0011294,-0.0924471], - "R": [ - [0.5138271048,0.01100033104,0.857823233], - [0.08358608019,0.9945184566,-0.06282043172], - [-0.8538120833,0.1039809221,0.5100910647] - ], - "t": [ - [-37.95328646], - [135.6435695], - [289.9999799] - ] - }, - { - "name": "13_02", - "type": "vga", - "resolution": [640,480], - "panel": 13, - "node": 2, - "K": [ - [743.227,0,372.15], - [0,743.265,265.407], - [0,0,1] - ], - "distCoef": [-0.306942,-0.0266079,0.000311285,0.000595534,0.199806], - "R": [ - [0.4485620057,-0.005900946102,0.8937322339], - [0.06601293956,0.9974655925,-0.02654587691], - [-0.8913105064,0.07090536373,0.4478147055] - ], - "t": [ - [-38.28645032], - [133.2984516], - [288.856211] - ] - }, - { - "name": "13_03", - "type": "vga", - "resolution": [640,480], - "panel": 13, - "node": 3, - "K": [ - [746.538,0,387.516], - [0,746.833,233.181], - [0,0,1] - ], - "distCoef": [-0.322577,0.0715483,-4.90461e-05,0.000787497,0.0326639], - "R": [ - [0.5260210271,0.02315422103,0.8501563157], - [0.07372016672,0.9946254291,-0.07270208278], - [-0.8472704504,0.1009164896,0.5214869567] - ], - "t": [ - [-53.0750023], - [105.7642054], - [287.8235486] - ] - }, - { - "name": "13_04", - "type": "vga", - "resolution": [640,480], - "panel": 13, - "node": 4, - "K": [ - [744.864,0,367.763], - [0,745.005,229.771], - [0,0,1] - ], - "distCoef": [-0.318118,0.0367901,0.000364188,-0.000713933,0.0879467], - "R": [ - [0.4575577495,0.1623260474,0.8742374736], - [-0.0244195278,0.9851184177,-0.1701334469], - [-0.8888445267,0.05649741078,0.4547124916] - ], - "t": [ - [4.756699591], - [110.8595803], - [285.3944853] - ] - }, - { - "name": "13_05", - "type": "vga", - "resolution": [640,480], - "panel": 13, - "node": 5, - "K": [ - [744.026,0,374.462], - [0,744.21,219.295], - [0,0,1] - ], - "distCoef": [-0.309274,-0.00813814,-0.000611939,0.000562163,0.16533], - "R": [ - [0.5236500196,-0.01990538858,0.8517009055], - [0.0479853053,0.9988290545,-0.006158764858], - [-0.8505810176,0.04409416531,0.5239920201] - ], - "t": [ - [-32.80347729], - [91.75629107], - [282.6719703] - ] - }, - { - "name": "13_06", - "type": "vga", - "resolution": [640,480], - "panel": 13, - "node": 6, - "K": [ - [746.172,0,347.715], - [0,746.412,223.735], - [0,0,1] - ], - "distCoef": [-0.315889,0.0243673,0.00083413,-0.000596366,0.129203], - "R": [ - [0.489601615,0.07237643337,0.8689372305], - [-0.010214584,0.9969567785,-0.07728417735], - [-0.8718864151,0.02896262571,0.488850944] - ], - "t": [ - [7.55259059], - [89.5920217], - [281.8493454] - ] - }, - { - "name": "13_07", - "type": "vga", - "resolution": [640,480], - "panel": 13, - "node": 7, - "K": [ - [745.619,0,383.372], - [0,745.683,224.508], - [0,0,1] - ], - "distCoef": [-0.315816,0.0424659,0.000456201,0.000714024,0.0879752], - "R": [ - [0.5142457137,-0.005076098829,0.8576278792], - [0.07753605572,0.9961627141,-0.04059565316], - [-0.8541308483,0.08737322366,0.5126659866] - ], - "t": [ - [9.165152848], - [86.80281732], - [287.1451009] - ] - }, - { - "name": "13_08", - "type": "vga", - "resolution": [640,480], - "panel": 13, - "node": 8, - "K": [ - [746.151,0,390.693], - [0,746.159,238.847], - [0,0,1] - ], - "distCoef": [-0.312796,0.0112848,0.00109903,0.000945928,0.138088], - "R": [ - [0.5333632905,-0.08775347438,0.841322131], - [0.13459771,0.9907366672,0.0180086874], - [-0.8351090089,0.1036348594,0.5402339855] - ], - "t": [ - [14.59630248], - [78.12680456], - [289.302137] - ] - }, - { - "name": "13_09", - "type": "vga", - "resolution": [640,480], - "panel": 13, - "node": 9, - "K": [ - [744.811,0,365.557], - [0,745.05,239.01], - [0,0,1] - ], - "distCoef": [-0.302561,-0.0588071,-0.000331846,-0.00065645,0.252299], - "R": [ - [0.515993865,0.007464548532,0.8565597538], - [0.05311793688,0.9977587535,-0.04069342277], - [-0.8549437502,0.06649624343,0.5144408941] - ], - "t": [ - [47.02842806], - [101.5821868], - [285.7219747] - ] - }, - { - "name": "13_10", - "type": "vga", - "resolution": [640,480], - "panel": 13, - "node": 10, - "K": [ - [744.185,0,393.537], - [0,744.44,231.354], - [0,0,1] - ], - "distCoef": [-0.321367,0.0639595,-3.49657e-05,0.000800078,0.0579089], - "R": [ - [0.5364096096,-0.02345912583,0.8436316733], - [0.07330244032,0.9971310212,-0.01888064639], - [-0.8407683884,0.07196802054,0.536590273] - ], - "t": [ - [31.38919798], - [122.486781], - [287.1552388] - ] - }, - { - "name": "13_11", - "type": "vga", - "resolution": [640,480], - "panel": 13, - "node": 11, - "K": [ - [745.973,0,365.594], - [0,746.037,211.677], - [0,0,1] - ], - "distCoef": [-0.32905,0.0977698,-0.000962762,0.000946642,0.0190885], - "R": [ - [0.5178117038,0.00482526951,0.8554810087], - [0.01921134431,0.9996663333,-0.01726691564], - [-0.8552788806,0.02537595122,0.5175462273] - ], - "t": [ - [57.16543019], - [149.3252564], - [279.6241941] - ] - }, - { - "name": "13_12", - "type": "vga", - "resolution": [640,480], - "panel": 13, - "node": 12, - "K": [ - [745.909,0,358.218], - [0,746.022,220.333], - [0,0,1] - ], - "distCoef": [-0.338571,0.148871,-0.00100229,-0.000678393,-0.0710162], - "R": [ - [0.5368407815,0.02503814463,0.8433119628], - [-0.01156171997,0.9996840035,-0.02232083821], - [-0.8436043516,0.002232599467,0.5369606257] - ], - "t": [ - [51.57359577], - [176.1957711], - [275.7319623] - ] - }, - { - "name": "13_13", - "type": "vga", - "resolution": [640,480], - "panel": 13, - "node": 13, - "K": [ - [743.068,0,370.139], - [0,743.357,232.303], - [0,0,1] - ], - "distCoef": [-0.302401,-0.0553181,-0.00107418,-0.000672395,0.220417], - "R": [ - [0.5299693687,-0.06080201885,0.8458342525], - [0.13849556,0.9902402801,-0.01559383094], - [-0.8366310107,0.1254085412,0.5332178257] - ], - "t": [ - [16.99243391], - [145.7883087], - [295.0494301] - ] - }, - { - "name": "13_14", - "type": "vga", - "resolution": [640,480], - "panel": 13, - "node": 14, - "K": [ - [743.724,0,347.611], - [0,743.902,235.434], - [0,0,1] - ], - "distCoef": [-0.315484,0.0296225,-0.000529931,-0.000276443,0.110913], - "R": [ - [0.5388576125,-0.001120175332,0.8423961174], - [0.06888686412,0.9967085439,-0.04273965901], - [-0.8395755317,0.08106061749,0.5371611517] - ], - "t": [ - [22.68047362], - [178.4537167], - [288.5132471] - ] - }, - { - "name": "13_15", - "type": "vga", - "resolution": [640,480], - "panel": 13, - "node": 15, - "K": [ - [748.48,0,370.578], - [0,748.498,231.761], - [0,0,1] - ], - "distCoef": [-0.333743,0.123731,0.000274987,0.00129665,-0.0264397], - "R": [ - [0.5569883215,-0.02228411773,0.8302213126], - [0.06483002391,0.9977563557,-0.01671294857], - [-0.827986158,0.06313218472,0.5571833177] - ], - "t": [ - [-8.30154925], - [184.6918205], - [284.5865319] - ] - }, - { - "name": "13_16", - "type": "vga", - "resolution": [640,480], - "panel": 13, - "node": 16, - "K": [ - [748.413,0,364.616], - [0,748.358,230.166], - [0,0,1] - ], - "distCoef": [-0.337541,0.138107,0.000557985,-0.000490808,-0.0648839], - "R": [ - [0.5035312414,0.04830043061,0.8626258501], - [0.03089895722,0.996790644,-0.07384894344], - [-0.8634243125,0.06383948941,0.5004227975] - ], - "t": [ - [5.312179267], - [173.5565462], - [284.5085099] - ] - }, - { - "name": "13_17", - "type": "vga", - "resolution": [640,480], - "panel": 13, - "node": 17, - "K": [ - [745.143,0,372.782], - [0,745.112,223.2], - [0,0,1] - ], - "distCoef": [-0.321603,0.0646008,-0.000584526,0.000805086,0.0603349], - "R": [ - [0.5471603314,0.02993221277,0.8364924593], - [0.06649342528,0.9946477166,-0.07908567611], - [-0.8343825239,0.09889379359,0.5422414789] - ], - "t": [ - [-32.63653561], - [167.4383368], - [289.2367997] - ] - }, - { - "name": "13_18", - "type": "vga", - "resolution": [640,480], - "panel": 13, - "node": 18, - "K": [ - [745.136,0,373.506], - [0,745.259,215.704], - [0,0,1] - ], - "distCoef": [-0.333755,0.12331,-0.00049301,0.00138004,-0.0323155], - "R": [ - [0.5039095131,0.07384116584,0.8605943788], - [0.02822760746,0.9943991795,-0.1018502524], - [-0.8632950856,0.07561583139,0.4990028469] - ], - "t": [ - [-29.61131213], - [166.0398843], - [286.9453226] - ] - }, - { - "name": "13_19", - "type": "vga", - "resolution": [640,480], - "panel": 13, - "node": 19, - "K": [ - [743.638,0,344.046], - [0,743.783,238.416], - [0,0,1] - ], - "distCoef": [-0.319291,0.0355055,-0.000169258,0.000161892,0.118247], - "R": [ - [0.5180347054,0.01180967192,0.8552780692], - [0.1057363227,0.9913513706,-0.07773216881], - [-0.8487990775,0.1307019191,0.512305704] - ], - "t": [ - [-19.08174331], - [122.2280138], - [293.3272927] - ] - }, - { - "name": "13_20", - "type": "vga", - "resolution": [640,480], - "panel": 13, - "node": 20, - "K": [ - [745.321,0,372.761], - [0,745.559,236.547], - [0,0,1] - ], - "distCoef": [-0.320489,0.0479206,-9.03328e-05,-0.000256288,0.0784864], - "R": [ - [0.4966252135,-0.01754426777,0.8677877598], - [0.06583916704,0.9976766247,-0.01750875645], - [-0.8654643848,0.06582971318,0.4966264667] - ], - "t": [ - [-11.61163777], - [120.2765647], - [285.1928757] - ] - }, - { - "name": "13_21", - "type": "vga", - "resolution": [640,480], - "panel": 13, - "node": 21, - "K": [ - [745.539,0,371.886], - [0,745.656,230.519], - [0,0,1] - ], - "distCoef": [-0.326644,0.0839413,-0.000557984,0.000204085,0.0126328], - "R": [ - [0.5330371562,-0.03752357961,0.8452593514], - [0.08887796824,0.9959722199,-0.01183402057], - [-0.8414107777,0.08143290645,0.5342252193] - ], - "t": [ - [-6.03247131], - [109.6165459], - [286.9430377] - ] - }, - { - "name": "13_22", - "type": "vga", - "resolution": [640,480], - "panel": 13, - "node": 22, - "K": [ - [744.018,0,396.717], - [0,744.224,249.141], - [0,0,1] - ], - "distCoef": [-0.315372,0.0205822,-0.000440151,0.000134817,0.105074], - "R": [ - [0.4984198723,-0.001673636668,0.8669341554], - [0.03130878513,0.9993805529,-0.01607079461], - [-0.8663702389,0.03515265859,0.4981635271] - ], - "t": [ - [26.09238071], - [136.8142763], - [280.4949188] - ] - }, - { - "name": "13_23", - "type": "vga", - "resolution": [640,480], - "panel": 13, - "node": 23, - "K": [ - [744.884,0,382.514], - [0,744.877,235.74], - [0,0,1] - ], - "distCoef": [-0.326378,0.0966908,-9.48994e-05,0.00105607,0.00534895], - "R": [ - [0.4908089633,-0.01723518027,0.8710967283], - [0.04978157704,0.9987257364,-0.008288432131], - [-0.8698438688,0.04743260567,0.4910415377] - ], - "t": [ - [21.95453226], - [154.6836493], - [281.6596012] - ] - }, - { - "name": "13_24", - "type": "vga", - "resolution": [640,480], - "panel": 13, - "node": 24, - "K": [ - [744.481,0,341.813], - [0,744.509,213.322], - [0,0,1] - ], - "distCoef": [-0.310201,-0.0109775,-0.00130948,-0.000370453,0.189258], - "R": [ - [0.5283332962,-0.01827851401,0.8488402818], - [0.07383881778,0.996969434,-0.02449033896], - [-0.8458201683,0.0756164244,0.5280818111] - ], - "t": [ - [-10.59416721], - [149.8670778], - [286.3856475] - ] - }, - { - "name": "14_01", - "type": "vga", - "resolution": [640,480], - "panel": 14, - "node": 1, - "K": [ - [745.639,0,394.42], - [0,745.872,232.374], - [0,0,1] - ], - "distCoef": [-0.317821,0.05701,0.000216723,0.00145431,0.0516441], - "R": [ - [0.1117244957,0.006687085701,0.9937167202], - [0.1929264895,0.9808052728,-0.02829110459], - [-0.9748317838,0.1948750877,0.1082898585] - ], - "t": [ - [-10.76838593], - [183.2092961], - [300.2249606] - ] - }, - { - "name": "14_02", - "type": "vga", - "resolution": [640,480], - "panel": 14, - "node": 2, - "K": [ - [744.265,0,384.24], - [0,744.607,234.555], - [0,0,1] - ], - "distCoef": [-0.314122,0.0172489,-0.000351192,-3.05431e-05,0.116521], - "R": [ - [0.09126102309,0.01926845044,0.9956405739], - [0.1889483007,0.9813154942,-0.03631033643], - [-0.9777371658,0.191438313,0.08591511501] - ], - "t": [ - [-20.54744948], - [195.8515337], - [299.6149103] - ] - }, - { - "name": "14_03", - "type": "vga", - "resolution": [640,480], - "panel": 14, - "node": 3, - "K": [ - [742.909,0,383.13], - [0,743.051,234.161], - [0,0,1] - ], - "distCoef": [-0.311566,0.0211516,-0.000212815,-9.64233e-05,0.110817], - "R": [ - [0.07658267666,-0.01244461629,0.9969855692], - [0.2193131093,0.9756433613,-0.004668149478], - [-0.9726442586,0.2190095044,0.07744664757] - ], - "t": [ - [-39.95619704], - [171.7405641], - [305.3439137] - ] - }, - { - "name": "14_04", - "type": "vga", - "resolution": [640,480], - "panel": 14, - "node": 4, - "K": [ - [745.057,0,349.277], - [0,745.321,214.2], - [0,0,1] - ], - "distCoef": [-0.31581,0.0237721,-0.00140945,-0.000667487,0.124292], - "R": [ - [0.09341145846,-0.02354383001,0.9953491787], - [0.2305453591,0.9730606003,0.001380415192], - [-0.9685675696,0.2293441873,0.09632293059] - ], - "t": [ - [-43.73412593], - [146.7921304], - [306.2893961] - ] - }, - { - "name": "14_05", - "type": "vga", - "resolution": [640,480], - "panel": 14, - "node": 5, - "K": [ - [744.634,0,387.597], - [0,744.752,225.246], - [0,0,1] - ], - "distCoef": [-0.315944,0.0434616,-0.000268259,0.00110436,0.0780237], - "R": [ - [0.1133728096,0.0374780752,0.9928454059], - [0.2222309073,0.973014014,-0.06210597779], - [-0.9683801061,0.2276820645,0.1019845459] - ], - "t": [ - [-53.79623552], - [137.113178], - [305.5099477] - ] - }, - { - "name": "14_06", - "type": "vga", - "resolution": [640,480], - "panel": 14, - "node": 6, - "K": [ - [744.759,0,388.645], - [0,744.666,221.73], - [0,0,1] - ], - "distCoef": [-0.306159,-0.0283273,-0.000508774,0.00094455,0.192402], - "R": [ - [0.1564984143,0.01913164242,0.9874928995], - [0.2309282446,0.9713913042,-0.05541732523], - [-0.96030224,0.2367127254,0.1476031622] - ], - "t": [ - [-66.24261018], - [112.7515407], - [303.5978047] - ] - }, - { - "name": "14_07", - "type": "vga", - "resolution": [640,480], - "panel": 14, - "node": 7, - "K": [ - [744.959,0,375.286], - [0,745.092,235.744], - [0,0,1] - ], - "distCoef": [-0.302136,-0.0624017,-0.000302824,-0.00146028,0.239945], - "R": [ - [0.0628689268,0.03077162571,0.9975472947], - [0.2444661638,0.9685997585,-0.04528578729], - [-0.967617586,0.2467136292,0.05337220603] - ], - "t": [ - [-19.11814477], - [98.74694092], - [308.9777955] - ] - }, - { - "name": "14_08", - "type": "vga", - "resolution": [640,480], - "panel": 14, - "node": 8, - "K": [ - [746.649,0,384.752], - [0,746.836,237.267], - [0,0,1] - ], - "distCoef": [-0.321628,0.0600031,0.000104796,0.000953791,0.0524376], - "R": [ - [0.1158239713,-0.07384920575,0.9905206219], - [0.2473198554,0.9679682291,0.043248082], - [-0.9619863288,0.2399662524,0.1303782992] - ], - "t": [ - [-45.76229918], - [76.40869106], - [305.3733784] - ] - }, - { - "name": "14_09", - "type": "vga", - "resolution": [640,480], - "panel": 14, - "node": 9, - "K": [ - [745.672,0,372.774], - [0,745.737,209.129], - [0,0,1] - ], - "distCoef": [-0.30917,-0.00857977,-4.68803e-05,-0.000521617,0.17194], - "R": [ - [0.1233501146,0.01050711315,0.9923075883], - [0.2153087978,0.9758411417,-0.0370970036], - [-0.9687243523,0.2182284735,0.1181078428] - ], - "t": [ - [-15.44854612], - [78.73632155], - [304.5944309] - ] - }, - { - "name": "14_10", - "type": "vga", - "resolution": [640,480], - "panel": 14, - "node": 10, - "K": [ - [744.36,0,350.493], - [0,744.605,227.167], - [0,0,1] - ], - "distCoef": [-0.324539,0.0696676,-0.000964917,-0.000688724,0.0453805], - "R": [ - [0.0653712546,0.005547467364,0.9978455916], - [0.2748842968,0.9611936881,-0.02335203178], - [-0.9592524289,0.2758186354,0.06130952564] - ], - "t": [ - [17.36142141], - [73.86484437], - [309.5485763] - ] - }, - { - "name": "14_11", - "type": "vga", - "resolution": [640,480], - "panel": 14, - "node": 11, - "K": [ - [744.072,0,352.953], - [0,744.032,218.847], - [0,0,1] - ], - "distCoef": [-0.310531,-0.00866492,-5.61729e-06,0.000627577,0.179884], - "R": [ - [0.08325845442,0.01268657881,0.9964472292], - [0.1993298125,0.97949952,-0.02912586749], - [-0.9763890903,0.2010466141,0.07902280276] - ], - "t": [ - [33.26019053], - [89.58305599], - [303.0664402] - ] - }, - { - "name": "14_12", - "type": "vga", - "resolution": [640,480], - "panel": 14, - "node": 12, - "K": [ - [743.677,0,359.077], - [0,743.623,233.815], - [0,0,1] - ], - "distCoef": [-0.305265,-0.0518121,0.000714314,0.000432839,0.265088], - "R": [ - [0.06818541392,0.004787243789,0.9976611808], - [0.2533830838,0.9671167716,-0.02195821049], - [-0.9649599796,0.2542876962,0.06473025078] - ], - "t": [ - [54.03449748], - [85.53998459], - [306.9876015] - ] - }, - { - "name": "14_13", - "type": "vga", - "resolution": [640,480], - "panel": 14, - "node": 13, - "K": [ - [742.736,0,368.122], - [0,742.832,238.615], - [0,0,1] - ], - "distCoef": [-0.303469,-0.0412536,1.82225e-05,-0.000473228,0.205739], - "R": [ - [0.1225239282,-0.0735967149,0.9897329996], - [0.2305366224,0.9720798639,0.0437447595], - [-0.9653189902,0.222809923,0.1360697815] - ], - "t": [ - [17.43625272], - [116.7070017], - [307.0317679] - ] - }, - { - "name": "14_14", - "type": "vga", - "resolution": [640,480], - "panel": 14, - "node": 14, - "K": [ - [745.328,0,371.219], - [0,745.487,209.713], - [0,0,1] - ], - "distCoef": [-0.318297,0.0286867,-0.0013247,0.000626009,0.137928], - "R": [ - [0.06972690557,-0.0276618613,0.9971825209], - [0.2175762615,0.9759712693,0.01185967683], - [-0.9735495514,0.2161363064,0.0740700209] - ], - "t": [ - [57.75964066], - [131.0709572], - [303.578107] - ] - }, - { - "name": "14_15", - "type": "vga", - "resolution": [640,480], - "panel": 14, - "node": 15, - "K": [ - [743.637,0,370.163], - [0,743.479,235.403], - [0,0,1] - ], - "distCoef": [-0.301307,-0.0600698,0.000220332,0.000264974,0.263845], - "R": [ - [0.0871387997,-0.1078492175,0.9903410402], - [0.2171380052,0.9722761796,0.08677624828], - [-0.9722437535,0.2074790999,0.1081411432] - ], - "t": [ - [27.10934266], - [155.0300785], - [303.8314173] - ] - }, - { - "name": "14_16", - "type": "vga", - "resolution": [640,480], - "panel": 14, - "node": 16, - "K": [ - [747.749,0,388.765], - [0,747.73,234.855], - [0,0,1] - ], - "distCoef": [-0.320028,0.057848,-0.00103044,0.00101463,0.0716113], - "R": [ - [0.09276252326,-0.02731891999,0.9953134134], - [0.2004837996,0.9796626634,0.008204393401], - [-0.9752955246,0.1987831547,0.09635298148] - ], - "t": [ - [25.02944215], - [165.1686099], - [301.5459594] - ] - }, - { - "name": "14_17", - "type": "vga", - "resolution": [640,480], - "panel": 14, - "node": 17, - "K": [ - [745.477,0,358.035], - [0,745.633,228.78], - [0,0,1] - ], - "distCoef": [-0.315933,0.0359808,-0.000244793,0.00106736,0.101835], - "R": [ - [0.09323456203,-0.04884472803,0.9944453273], - [0.1997864834,0.9793990461,0.02937464128], - [-0.9753936013,0.1959380031,0.1010723576] - ], - "t": [ - [12.52671676], - [185.8338565], - [300.6683817] - ] - }, - { - "name": "14_19", - "type": "vga", - "resolution": [640,480], - "panel": 14, - "node": 19, - "K": [ - [746.962,0,392.223], - [0,747.34,219.936], - [0,0,1] - ], - "distCoef": [-0.325078,0.0885503,-0.00165532,0.000580691,0.0160315], - "R": [ - [0.129696032,0.03909405168,0.990782819], - [0.1776002444,0.9821476201,-0.06200165731], - [-0.9755188837,0.1840046397,0.1204375361] - ], - "t": [ - [-4.746570817], - [166.089254], - [298.9402723] - ] - }, - { - "name": "14_20", - "type": "vga", - "resolution": [640,480], - "panel": 14, - "node": 20, - "K": [ - [744.91,0,339.915], - [0,744.956,221.133], - [0,0,1] - ], - "distCoef": [-0.306862,-0.0244375,-6.76743e-05,-0.000102471,0.205298], - "R": [ - [0.09943504227,-0.007298095184,0.9950172914], - [0.2125993636,0.9770380132,-0.01407946415], - [-0.9720669642,0.212940035,0.09870338653] - ], - "t": [ - [-22.7866272], - [143.0595857], - [303.8181509] - ] - }, - { - "name": "14_21", - "type": "vga", - "resolution": [640,480], - "panel": 14, - "node": 21, - "K": [ - [743.577,0,349.797], - [0,743.73,227.793], - [0,0,1] - ], - "distCoef": [-0.307046,-0.0206712,-0.000861395,-9.97172e-05,0.196115], - "R": [ - [0.09969364468,-0.01462231859,0.9949107322], - [0.2541863771,0.9670897407,-0.01125696175], - [-0.9620033591,0.2540150021,0.1001294952] - ], - "t": [ - [-20.43364439], - [109.4423166], - [308.9174676] - ] - }, - { - "name": "14_22", - "type": "vga", - "resolution": [640,480], - "panel": 14, - "node": 22, - "K": [ - [745.066,0,381.498], - [0,745.047,229.678], - [0,0,1] - ], - "distCoef": [-0.314894,0.0257947,-0.000483886,0.00117112,0.111876], - "R": [ - [0.08696832552,-0.05294226024,0.9948033109], - [0.2154078845,0.9759627551,0.03310806346], - [-0.9726437959,0.2114091239,0.09628202687] - ], - "t": [ - [-4.298071534], - [115.0382234], - [303.8536261] - ] - }, - { - "name": "14_23", - "type": "vga", - "resolution": [640,480], - "panel": 14, - "node": 23, - "K": [ - [746.602,0,379.206], - [0,746.635,260.689], - [0,0,1] - ], - "distCoef": [-0.319922,0.0568918,0.00103779,-0.000422086,0.0766843], - "R": [ - [0.09129519856,-0.01052008078,0.9957683037], - [0.2195471399,0.9755524467,-0.009822274065], - [-0.9713208739,0.2195148095,0.09137290798] - ], - "t": [ - [18.69590833], - [125.3942709], - [304.7857903] - ] - }, - { - "name": "14_24", - "type": "vga", - "resolution": [640,480], - "panel": 14, - "node": 24, - "K": [ - [745.388,0,382.392], - [0,745.496,224.015], - [0,0,1] - ], - "distCoef": [-0.302393,-0.0525763,-0.000559682,-6.77e-05,0.234314], - "R": [ - [0.08118536371,-0.04636746828,0.9956199047], - [0.1796446798,0.9832385033,0.03114216711], - [-0.9803758084,0.1763295309,0.0881542445] - ], - "t": [ - [8.147122648], - [159.0280693], - [298.1193244] - ] - }, - { - "name": "15_01", - "type": "vga", - "resolution": [640,480], - "panel": 15, - "node": 1, - "K": [ - [747.532,0,374.739], - [0,747.668,233.944], - [0,0,1] - ], - "distCoef": [-0.331439,0.109037,-0.000609362,0.000392501,-0.000621335], - "R": [ - [0.7848571462,0.05717032211,0.6170338843], - [0.1817012858,0.9307358272,-0.3173569956], - [-0.5924389444,0.3611957561,0.7201067442] - ], - "t": [ - [-19.59276639], - [102.5270366], - [325.6365462] - ] - }, - { - "name": "15_02", - "type": "vga", - "resolution": [640,480], - "panel": 15, - "node": 2, - "K": [ - [743.597,0,385.764], - [0,743.786,211.188], - [0,0,1] - ], - "distCoef": [-0.307778,-0.0279819,-0.000454196,0.00143268,0.205643], - "R": [ - [0.7963392439,-0.01332837804,0.6047033677], - [0.2601504211,0.910106147,-0.3225345868], - [-0.5460453892,0.4141607847,0.7282206241] - ], - "t": [ - [-38.00771612], - [61.10094736], - [329.1235579] - ] - }, - { - "name": "15_03", - "type": "vga", - "resolution": [640,480], - "panel": 15, - "node": 3, - "K": [ - [746.709,0,382.284], - [0,746.792,243.451], - [0,0,1] - ], - "distCoef": [-0.343209,0.149416,0.000603517,0.00195788,-0.0395936], - "R": [ - [0.7773715491,0.01124156294,0.6289412548], - [0.2547080739,0.908583342,-0.3310590698], - [-0.5751671686,0.4175523175,0.7034435232] - ], - "t": [ - [-3.435783379], - [55.70511308], - [330.3798829] - ] - }, - { - "name": "15_04", - "type": "vga", - "resolution": [640,480], - "panel": 15, - "node": 4, - "K": [ - [743.976,0,365.248], - [0,744.344,229.757], - [0,0,1] - ], - "distCoef": [-0.297483,-0.106842,0.000162294,-0.00147347,0.393874], - "R": [ - [0.7524447247,-0.05297584633,0.6565215122], - [0.2825071426,0.9263759092,-0.2490329079], - [-0.5949929838,0.3728555143,0.7120127209] - ], - "t": [ - [9.049706825], - [87.26745214], - [326.8342451] - ] - }, - { - "name": "15_05", - "type": "vga", - "resolution": [640,480], - "panel": 15, - "node": 5, - "K": [ - [748.766,0,349.367], - [0,748.975,233.229], - [0,0,1] - ], - "distCoef": [-0.341466,0.149186,0.00133441,-0.000377568,-0.0615035], - "R": [ - [0.7609990379,-0.1304343502,0.6355055818], - [0.3323849453,0.9196335935,-0.2092708816], - [-0.5571361704,0.3704874276,0.7431946943] - ], - "t": [ - [9.029843232], - [83.469382], - [327.9910328] - ] - }, - { - "name": "15_06", - "type": "vga", - "resolution": [640,480], - "panel": 15, - "node": 6, - "K": [ - [747.104,0,395.739], - [0,747.205,237.611], - [0,0,1] - ], - "distCoef": [-0.337038,0.14046,-0.00100634,0.00170735,-0.0468264], - "R": [ - [0.7339738121,-0.1238803965,0.6677844641], - [0.3595276943,0.9050347286,-0.227270713], - [-0.5762137452,0.4068977603,0.7088102232] - ], - "t": [ - [34.88470946], - [89.42074723], - [330.2467181] - ] - }, - { - "name": "15_07", - "type": "vga", - "resolution": [640,480], - "panel": 15, - "node": 7, - "K": [ - [743.991,0,393.18], - [0,744.112,255.459], - [0,0,1] - ], - "distCoef": [-0.325283,0.0732539,0.00077889,1.70805e-05,0.0462558], - "R": [ - [0.7496842409,-0.1571943749,0.6428557128], - [0.3434403747,0.9227495198,-0.1748771933], - [-0.5657050892,0.3518852828,0.7457576683] - ], - "t": [ - [12.35233863], - [128.2674639], - [324.6313017] - ] - }, - { - "name": "15_08", - "type": "vga", - "resolution": [640,480], - "panel": 15, - "node": 8, - "K": [ - [744.616,0,369.102], - [0,744.835,223.742], - [0,0,1] - ], - "distCoef": [-0.336732,0.141968,-0.000206183,0.000677154,-0.0657397], - "R": [ - [0.7264947252,-0.2131742795,0.6532703428], - [0.4249899792,0.8864309285,-0.1833677358], - [-0.5399897516,0.4108490422,0.7345843265] - ], - "t": [ - [15.28675757], - [126.0458703], - [333.4285141] - ] - }, - { - "name": "15_09", - "type": "vga", - "resolution": [640,480], - "panel": 15, - "node": 9, - "K": [ - [747.517,0,392.733], - [0,747.836,218.574], - [0,0,1] - ], - "distCoef": [-0.334626,0.113242,0.000443349,0.00121381,-0.00550976], - "R": [ - [0.8000319441,0.07155257429,0.5956753458], - [0.1937456116,0.9088549369,-0.3693850858], - [-0.5678129326,0.4109293525,0.7132499848] - ], - "t": [ - [-44.09712116], - [90.97242653], - [330.2186197] - ] - }, - { - "name": "15_10", - "type": "vga", - "resolution": [640,480], - "panel": 15, - "node": 10, - "K": [ - [743.904,0,354.135], - [0,744.494,220.038], - [0,0,1] - ], - "distCoef": [-0.309276,-0.0261099,-0.00127318,0.000283377,0.220693], - "R": [ - [0.7314656006,-0.1499734814,0.6651812009], - [0.3639090401,0.9108337109,-0.1948131455], - [-0.576652656,0.3845645668,0.720820233] - ], - "t": [ - [2.360923884], - [158.0207055], - [327.7017732] - ] - }, - { - "name": "15_11", - "type": "vga", - "resolution": [640,480], - "panel": 15, - "node": 11, - "K": [ - [745.441,0,366.024], - [0,745.471,238.165], - [0,0,1] - ], - "distCoef": [-0.311636,0.00305556,-0.00136926,0.00112458,0.163822], - "R": [ - [0.743215427,-0.1065195831,0.660518287], - [0.3430146167,0.9082888556,-0.2394834597], - [-0.5744317207,0.4045552288,0.7115920636] - ], - "t": [ - [3.38448511], - [170.5922255], - [331.2143489] - ] - }, - { - "name": "15_12", - "type": "vga", - "resolution": [640,480], - "panel": 15, - "node": 12, - "K": [ - [743.816,0,384.478], - [0,744.21,221.813], - [0,0,1] - ], - "distCoef": [-0.309294,-0.0116228,-0.000777235,0.00017565,0.174372], - "R": [ - [0.799529392,-0.03302696284,0.5997182431], - [0.261290645,0.91817945,-0.2977812898], - [-0.540814155,0.3947856601,0.7427410938] - ], - "t": [ - [-15.11731065], - [179.1857595], - [329.2699106] - ] - }, - { - "name": "15_13", - "type": "vga", - "resolution": [640,480], - "panel": 15, - "node": 13, - "K": [ - [744.594,0,366.809], - [0,744.805,211.378], - [0,0,1] - ], - "distCoef": [-0.313339,0.0076854,-0.000770441,0.000328229,0.137582], - "R": [ - [0.7697001229,-0.07364256128,0.6341439064], - [0.280866324,0.9310898592,-0.2327783971], - [-0.5733025631,0.3572792288,0.7373436945] - ], - "t": [ - [-27.06753178], - [173.6081799], - [322.2797536] - ] - }, - { - "name": "15_14", - "type": "vga", - "resolution": [640,480], - "panel": 15, - "node": 14, - "K": [ - [744.088,0,376.311], - [0,744.421,235.85], - [0,0,1] - ], - "distCoef": [-0.308902,-0.0157485,-0.000258056,-0.00040893,0.167363], - "R": [ - [0.8019727226,0.02030217439,0.5970155559], - [0.20788107,0.9274680659,-0.31078682], - [-0.5600225111,0.3733507848,0.7395836522] - ], - "t": [ - [-32.35663304], - [177.8511702], - [324.3990212] - ] - }, - { - "name": "15_15", - "type": "vga", - "resolution": [640,480], - "panel": 15, - "node": 15, - "K": [ - [745.471,0,391.786], - [0,745.597,244.782], - [0,0,1] - ], - "distCoef": [-0.319471,0.0520955,-9.03549e-05,0.00103599,0.0679082], - "R": [ - [0.7993824794,0.07801580494,0.5957358356], - [0.170767806,0.9211391478,-0.3497728217], - [-0.5760434082,0.3813347671,0.723019908] - ], - "t": [ - [-27.66881494], - [158.8808021], - [326.8395357] - ] - }, - { - "name": "15_16", - "type": "vga", - "resolution": [640,480], - "panel": 15, - "node": 16, - "K": [ - [744.688,0,372.572], - [0,744.687,232.622], - [0,0,1] - ], - "distCoef": [-0.313079,0.00611683,0.000601543,0.00134427,0.153664], - "R": [ - [0.8032635264,0.07397377164,0.5910123419], - [0.1542914416,0.9325457224,-0.3264239985], - [-0.5752928456,0.3533926383,0.7376664456] - ], - "t": [ - [-29.95169554], - [148.2901373], - [322.192073] - ] - }, - { - "name": "15_17", - "type": "vga", - "resolution": [640,480], - "panel": 15, - "node": 17, - "K": [ - [746.029,0,371.631], - [0,745.957,227.751], - [0,0,1] - ], - "distCoef": [-0.328618,0.10871,0.000376647,0.00140085,-0.015131], - "R": [ - [0.7930332571,0.09578045983,0.6016014933], - [0.1573865304,0.9218193412,-0.3542295616], - [-0.5884961625,0.3755997947,0.7159588403] - ], - "t": [ - [-34.37744536], - [124.5681533], - [326.9926029] - ] - }, - { - "name": "15_18", - "type": "vga", - "resolution": [640,480], - "panel": 15, - "node": 18, - "K": [ - [745.728,0,355.008], - [0,745.836,235.366], - [0,0,1] - ], - "distCoef": [-0.326785,0.0753795,-0.00141997,0.000421746,0.0593081], - "R": [ - [0.7423074724,-0.1183757606,0.6595201254], - [0.3246236378,0.9245812728,-0.1994215728], - [-0.5861732766,0.362127946,0.7247511576] - ], - "t": [ - [30.16113415], - [163.1800117], - [323.8887405] - ] - }, - { - "name": "15_19", - "type": "vga", - "resolution": [640,480], - "panel": 15, - "node": 19, - "K": [ - [745.415,0,362.511], - [0,745.431,246.567], - [0,0,1] - ], - "distCoef": [-0.31824,0.0392935,0.000511921,2.0382e-05,0.0980721], - "R": [ - [0.7792023734,-0.03485918818,0.6258022837], - [0.250771695,0.9323920084,-0.2603050127], - [-0.5744190268,0.3597637832,0.7352637636] - ], - "t": [ - [-23.21577405], - [116.3982595], - [324.3931588] - ] - }, - { - "name": "15_20", - "type": "vga", - "resolution": [640,480], - "panel": 15, - "node": 20, - "K": [ - [745.757,0,370.457], - [0,745.798,252.296], - [0,0,1] - ], - "distCoef": [-0.322058,0.058259,0.000816175,0.000770211,0.0698692], - "R": [ - [0.7754488131,-0.03297117701,0.6305489986], - [0.2704225106,0.9197540051,-0.2844718542], - [-0.5705705951,0.391108005,0.7221383001] - ], - "t": [ - [-0.5150360293], - [101.3336776], - [328.6175717] - ] - }, - { - "name": "15_21", - "type": "vga", - "resolution": [640,480], - "panel": 15, - "node": 21, - "K": [ - [746.009,0,385.23], - [0,746.113,244.377], - [0,0,1] - ], - "distCoef": [-0.328614,0.0717398,0.00119782,0.000153035,0.0631847], - "R": [ - [0.7150247804,-0.1629175474,0.6798510396], - [0.3900461789,0.9000077369,-0.194550898], - [-0.5801754405,0.4042820134,0.7070732013] - ], - "t": [ - [2.095653738], - [113.9962742], - [330.0144097] - ] - }, - { - "name": "15_22", - "type": "vga", - "resolution": [640,480], - "panel": 15, - "node": 22, - "K": [ - [747.044,0,384.928], - [0,747.43,218.136], - [0,0,1] - ], - "distCoef": [-0.332061,0.0970763,-0.00131827,0.000796644,0.024739], - "R": [ - [0.7476996574,-0.1120966581,0.6545071135], - [0.3349363173,0.9147459603,-0.2259590484], - [-0.5733784838,0.3881677053,0.7215004829] - ], - "t": [ - [-3.202807266], - [138.4357179], - [328.3283502] - ] - }, - { - "name": "15_23", - "type": "vga", - "resolution": [640,480], - "panel": 15, - "node": 23, - "K": [ - [746.525,0,381.586], - [0,746.566,231.744], - [0,0,1] - ], - "distCoef": [-0.323751,0.0809499,0.00143311,0.000786746,0.0334271], - "R": [ - [0.7874675535,-0.04961201835,0.6143561669], - [0.2785108695,0.9178324582,-0.2828697124], - [-0.5498422936,0.3938555906,0.7365807667] - ], - "t": [ - [-21.67007007], - [141.1281207], - [328.549187] - ] - }, - { - "name": "15_24", - "type": "vga", - "resolution": [640,480], - "panel": 15, - "node": 24, - "K": [ - [744.493,0,392.291], - [0,744.573,223.193], - [0,0,1] - ], - "distCoef": [-0.308278,-0.0176562,-0.000671893,0.00116828,0.17277], - "R": [ - [0.7758686755,-0.01407586642,0.6307374005], - [0.2927445364,0.8936390769,-0.3401614861], - [-0.5588635207,0.4485655695,0.6974672] - ], - "t": [ - [-20.05926183], - [105.1778582], - [335.8474538] - ] - }, - { - "name": "16_01", - "type": "vga", - "resolution": [640,480], - "panel": 16, - "node": 1, - "K": [ - [745.918,0,380.409], - [0,745.86,226.454], - [0,0,1] - ], - "distCoef": [-0.329171,0.0901569,-0.000500393,-0.000311386,0.0200307], - "R": [ - [0.8121486446,0.04341076946,0.5818333819], - [-0.0759194996,0.9966126489,0.03161419974], - [-0.5784901112,-0.06984792866,0.8126933358] - ], - "t": [ - [55.6088262], - [125.3657692], - [265.9940479] - ] - }, - { - "name": "16_02", - "type": "vga", - "resolution": [640,480], - "panel": 16, - "node": 2, - "K": [ - [747.364,0,392.411], - [0,747.161,225.523], - [0,0,1] - ], - "distCoef": [-0.325367,0.0819479,0.000479765,0.00158774,0.0247525], - "R": [ - [0.8168932447,0.07701494166,0.5716241121], - [-0.08391193553,0.9963702084,-0.01432462351], - [-0.5706524458,-0.03626439747,0.8203905653] - ], - "t": [ - [75.42528996], - [124.1426197], - [270.1790967] - ] - }, - { - "name": "16_03", - "type": "vga", - "resolution": [640,480], - "panel": 16, - "node": 3, - "K": [ - [744.743,0,378.771], - [0,744.551,249.858], - [0,0,1] - ], - "distCoef": [-0.319546,0.0369202,-5.08119e-05,0.00111176,0.115068], - "R": [ - [0.8437113062,0.07102371173,0.5320778742], - [-0.08587784221,0.9963005803,0.003185889303], - [-0.5298832211,-0.04838167055,0.8466894271] - ], - "t": [ - [57.15960424], - [150.0301024], - [271.4615922] - ] - }, - { - "name": "16_04", - "type": "vga", - "resolution": [640,480], - "panel": 16, - "node": 4, - "K": [ - [745.916,0,377.522], - [0,746.078,215.704], - [0,0,1] - ], - "distCoef": [-0.32195,0.0590592,-0.000295617,0.000900619,0.0691531], - "R": [ - [0.8298382679,0.121110683,0.5447023514], - [-0.1306769278,0.9911961099,-0.02130286834], - [-0.5424868568,-0.05350209448,0.8383588349] - ], - "t": [ - [50.00635036], - [157.1807453], - [269.6015294] - ] - }, - { - "name": "16_05", - "type": "vga", - "resolution": [640,480], - "panel": 16, - "node": 5, - "K": [ - [745.303,0,378.655], - [0,745.572,246.962], - [0,0,1] - ], - "distCoef": [-0.315703,0.0277156,6.06815e-05,0.000389915,0.121683], - "R": [ - [0.8187116226,0.05412921644,0.5716478872], - [-0.09011941267,0.9953220251,0.0348218015], - [-0.5670888559,-0.08002558546,0.8197598034] - ], - "t": [ - [44.81120287], - [188.347539], - [263.8787228] - ] - }, - { - "name": "16_06", - "type": "vga", - "resolution": [640,480], - "panel": 16, - "node": 6, - "K": [ - [745.606,0,364.995], - [0,745.957,239.275], - [0,0,1] - ], - "distCoef": [-0.315328,0.0257972,-0.000148911,-0.000553771,0.11289], - "R": [ - [0.8250072615,0.03741598225,0.5638821355], - [-0.06134414867,0.997839028,0.02354080738], - [-0.5617827996,-0.05401220659,0.8255196955] - ], - "t": [ - [18.96573731], - [189.9536973], - [269.3804852] - ] - }, - { - "name": "16_07", - "type": "vga", - "resolution": [640,480], - "panel": 16, - "node": 7, - "K": [ - [748.144,0,375.351], - [0,748.158,222.981], - [0,0,1] - ], - "distCoef": [-0.330846,0.0923667,0.000924419,-0.000952259,0.0155541], - "R": [ - [0.837010476,0.04764620621,0.5451085232], - [-0.06946161724,0.9973944363,0.0194787641], - [-0.542760119,-0.05416804921,0.8381391744] - ], - "t": [ - [-3.044263505], - [177.2440129], - [269.3681033] - ] - }, - { - "name": "16_08", - "type": "vga", - "resolution": [640,480], - "panel": 16, - "node": 8, - "K": [ - [744.865,0,367.243], - [0,744.958,216.687], - [0,0,1] - ], - "distCoef": [-0.318901,0.0494498,-4.02299e-05,-0.00132469,0.0675277], - "R": [ - [0.820488273,0.02086231711,0.571282555], - [-0.05401864215,0.9976917237,0.04114864192], - [-0.569105421,-0.06462188605,0.8197213134] - ], - "t": [ - [-19.55260409], - [185.7078501], - [268.0867658] - ] - }, - { - "name": "16_09", - "type": "vga", - "resolution": [640,480], - "panel": 16, - "node": 9, - "K": [ - [747.002,0,387.115], - [0,747.11,221.005], - [0,0,1] - ], - "distCoef": [-0.330535,0.106093,-0.000909516,-0.000158007,-0.000767667], - "R": [ - [0.7988895638,0.03324884852,0.6005580562], - [-0.04929092881,0.9987315997,0.01027599727], - [-0.5994546431,-0.03781145137,0.7995151187] - ], - "t": [ - [-23.46737596], - [164.4653247], - [274.3468777] - ] - }, - { - "name": "16_10", - "type": "vga", - "resolution": [640,480], - "panel": 16, - "node": 10, - "K": [ - [747.13,0,370.332], - [0,747.181,215.13], - [0,0,1] - ], - "distCoef": [-0.317083,0.0321021,0.000973109,0.00011315,0.117938], - "R": [ - [0.8533830718,-0.04475694932,0.5193593633], - [-0.01101437775,0.9945367161,0.1038046423], - [-0.5211679348,-0.09430554471,0.8482278279] - ], - "t": [ - [-57.15311463], - [154.6074069], - [261.7210039] - ] - }, - { - "name": "16_11", - "type": "vga", - "resolution": [640,480], - "panel": 16, - "node": 11, - "K": [ - [743.847,0,352.444], - [0,743.813,257.427], - [0,0,1] - ], - "distCoef": [-0.317406,0.0378558,0.000559662,0.00156409,0.0978841], - "R": [ - [0.8306368039,-0.006305585156,0.5567788965], - [-0.01286906876,0.999451376,0.03051776569], - [-0.5566658666,-0.03251440526,0.8300999496] - ], - "t": [ - [-55.68789985], - [125.5954887], - [272.609285] - ] - }, - { - "name": "16_12", - "type": "vga", - "resolution": [640,480], - "panel": 16, - "node": 12, - "K": [ - [744.746,0,358.295], - [0,744.902,240.075], - [0,0,1] - ], - "distCoef": [-0.311924,0.00313238,0.000282789,0.000109914,0.161883], - "R": [ - [0.8248636519,0.04296544146,0.5636966618], - [-0.06337887364,0.9978500361,0.01668603434], - [-0.5617678116,-0.04949016272,0.8258133262] - ], - "t": [ - [-45.5470475], - [111.3455785], - [270.6081331] - ] - }, - { - "name": "16_13", - "type": "vga", - "resolution": [640,480], - "panel": 16, - "node": 13, - "K": [ - [742.599,0,373.118], - [0,742.696,232.489], - [0,0,1] - ], - "distCoef": [-0.30659,-0.0244311,-0.000674534,-0.000450328,0.198624], - "R": [ - [0.8431633834,0.1596479738,0.5134082522], - [-0.1755645793,0.9843078819,-0.01775026834], - [-0.5081855837,-0.07516992751,0.8579608934] - ], - "t": [ - [-27.27822308], - [119.4613899], - [265.3318331] - ] - }, - { - "name": "16_14", - "type": "vga", - "resolution": [640,480], - "panel": 16, - "node": 14, - "K": [ - [745.804,0,370.921], - [0,745.998,236.13], - [0,0,1] - ], - "distCoef": [-0.32821,0.0986121,-0.000141995,-6.949e-05,-0.000912797], - "R": [ - [0.8387309717,0.02755081107,0.5438486094], - [-0.05712815546,0.9976599438,0.03756341813], - [-0.5415410705,-0.06257467009,0.8383422211] - ], - "t": [ - [-30.56519475], - [90.10611059], - [268.3571691] - ] - }, - { - "name": "16_15", - "type": "vga", - "resolution": [640,480], - "panel": 16, - "node": 15, - "K": [ - [746.816,0,365.456], - [0,746.849,225.794], - [0,0,1] - ], - "distCoef": [-0.313831,-0.00769663,-0.000408313,0.00132145,0.204366], - "R": [ - [0.832563643,0.03033638007,0.5530980784], - [-0.06055031945,0.9974999941,0.03643378343], - [-0.5506100609,-0.06382370879,0.8323191065] - ], - "t": [ - [-6.42740827], - [88.69840867], - [268.7038743] - ] - }, - { - "name": "16_16", - "type": "vga", - "resolution": [640,480], - "panel": 16, - "node": 16, - "K": [ - [745.958,0,362.302], - [0,745.997,246.977], - [0,0,1] - ], - "distCoef": [-0.334292,0.102923,-0.000499879,-0.000549652,0.00793805], - "R": [ - [0.8469636173,0.04048111503,0.5301074517], - [-0.08872767491,0.9938758,0.0658657255], - [-0.5241946497,-0.1028210748,0.8453684379] - ], - "t": [ - [4.584618298], - [109.8657875], - [264.6056558] - ] - }, - { - "name": "16_17", - "type": "vga", - "resolution": [640,480], - "panel": 16, - "node": 17, - "K": [ - [743.409,0,347.233], - [0,743.501,244.449], - [0,0,1] - ], - "distCoef": [-0.321337,0.060438,0.000289347,-0.000274585,0.0540146], - "R": [ - [0.8338949711,0.06176137043,0.5484566622], - [-0.07967791451,0.9967809419,0.008898524832], - [-0.5461415633,-0.05112031815,0.8361316319] - ], - "t": [ - [32.73506114], - [91.25662398], - [270.2531272] - ] - }, - { - "name": "16_18", - "type": "vga", - "resolution": [640,480], - "panel": 16, - "node": 18, - "K": [ - [745.291,0,372.769], - [0,745.233,242.994], - [0,0,1] - ], - "distCoef": [-0.333422,0.127228,0.000470045,-0.000171948,-0.0533425], - "R": [ - [0.83476387,0.01583088955,0.5503804723], - [-0.006383142992,0.9997976531,-0.01907638369], - [-0.5505711006,0.01241111862,0.8346960089] - ], - "t": [ - [48.20146308], - [84.31846371], - [276.1979749] - ] - }, - { - "name": "16_19", - "type": "vga", - "resolution": [640,480], - "panel": 16, - "node": 19, - "K": [ - [746.318,0,365.802], - [0,746.439,228.058], - [0,0,1] - ], - "distCoef": [-0.329752,0.106043,0.000413141,0.00102356,-0.00232913], - "R": [ - [0.812564017,0.08482803737,0.576666214], - [-0.09768913876,0.9951785947,-0.008740529432], - [-0.5746273144,-0.04923178609,0.8169330944] - ], - "t": [ - [39.50134988], - [124.7306793], - [269.4016435] - ] - }, - { - "name": "16_20", - "type": "vga", - "resolution": [640,480], - "panel": 16, - "node": 20, - "K": [ - [745.104,0,371.377], - [0,745.158,252.192], - [0,0,1] - ], - "distCoef": [-0.317414,0.0233642,0.000269725,0.000539732,0.145301], - "R": [ - [0.8445515108,0.05428741136,0.5327153297], - [-0.06949119822,0.9975462456,0.00851241329], - [-0.5309460603,-0.04420819807,0.8462516862] - ], - "t": [ - [17.33430135], - [146.0606392], - [271.3134014] - ] - }, - { - "name": "16_21", - "type": "vga", - "resolution": [640,480], - "panel": 16, - "node": 21, - "K": [ - [744.321,0,365.126], - [0,744.44,221.253], - [0,0,1] - ], - "distCoef": [-0.310945,0.00293318,4.64093e-05,-0.000454281,0.146346], - "R": [ - [0.8382052649,0.09941648006,0.5362166515], - [-0.1229674254,0.9923765769,0.008230548616], - [-0.531310593,-0.07283607028,0.8440402601] - ], - "t": [ - [5.636303812], - [160.8368098], - [266.310691] - ] - }, - { - "name": "16_22", - "type": "vga", - "resolution": [640,480], - "panel": 16, - "node": 22, - "K": [ - [745.695,0,387.973], - [0,745.975,222.039], - [0,0,1] - ], - "distCoef": [-0.325844,0.0780224,-0.000861123,0.000487347,0.0459906], - "R": [ - [0.8503320636,-0.003175777979,0.52623692], - [-0.02504000004,0.9986049625,0.04648792516], - [-0.5256504352,-0.05270714583,0.8490662971] - ], - "t": [ - [-29.03965018], - [141.2975723], - [268.9897195] - ] - }, - { - "name": "16_23", - "type": "vga", - "resolution": [640,480], - "panel": 16, - "node": 23, - "K": [ - [746.757,0,385.384], - [0,746.697,250.739], - [0,0,1] - ], - "distCoef": [-0.330103,0.0993513,0.000581277,0.0005991,0.0043047], - "R": [ - [0.8172674448,0.1129970073,0.565071323], - [-0.1204798393,0.992420693,-0.02420281713], - [-0.5635233199,-0.0482995277,0.8246869852] - ], - "t": [ - [1.484048414], - [120.2737991], - [270.3939501] - ] - }, - { - "name": "16_24", - "type": "vga", - "resolution": [640,480], - "panel": 16, - "node": 24, - "K": [ - [743.909,0,365.262], - [0,744.1,225.983], - [0,0,1] - ], - "distCoef": [-0.309366,-0.0151251,-0.000569796,0.000128233,0.192772], - "R": [ - [0.8488529257,0.0258708029,0.5279956553], - [-0.02681353424,0.9996232069,-0.005871843729], - [-0.5279486195,-0.009173097852,0.8492267715] - ], - "t": [ - [-1.170097817], - [104.9858918], - [274.723166] - ] - }, - { - "name": "17_01", - "type": "vga", - "resolution": [640,480], - "panel": 17, - "node": 1, - "K": [ - [743.511,0,382.741], - [0,744.07,233.668], - [0,0,1] - ], - "distCoef": [-0.303608,-0.0460126,4.19904e-05,0.000729649,0.232264], - "R": [ - [0.7426987355,0.03664601822,-0.6686222084], - [-0.01756201576,0.9992239229,0.035258014], - [0.6693953719,-0.01444372865,0.742765922] - ], - "t": [ - [27.30884403], - [110.2809812], - [269.7471778] - ] - }, - { - "name": "17_02", - "type": "vga", - "resolution": [640,480], - "panel": 17, - "node": 2, - "K": [ - [744.491,0,371.868], - [0,744.58,223.545], - [0,0,1] - ], - "distCoef": [-0.320104,0.0388113,-0.000303412,-0.00118762,0.0743207], - "R": [ - [0.773334615,0.1038173874,-0.6254402635], - [-0.04654036662,0.9931361468,0.107306049], - [0.6322875671,-0.05387526291,0.7728582591] - ], - "t": [ - [68.17402308], - [125.7906344], - [263.8293382] - ] - }, - { - "name": "17_03", - "type": "vga", - "resolution": [640,480], - "panel": 17, - "node": 3, - "K": [ - [744.096,0,373.775], - [0,744.072,232.317], - [0,0,1] - ], - "distCoef": [-0.314223,0.0332024,-0.000194112,2.11963e-05,0.079313], - "R": [ - [0.7946878724,-0.02084896757,-0.6066601239], - [0.03470365887,0.999335828,0.01111570764], - [0.6060254462,-0.02988684405,0.7948835985] - ], - "t": [ - [55.17367606], - [148.0232969], - [266.1261169] - ] - }, - { - "name": "17_04", - "type": "vga", - "resolution": [640,480], - "panel": 17, - "node": 4, - "K": [ - [748.225,0,373.118], - [0,748.618,236.287], - [0,0,1] - ], - "distCoef": [-0.325852,0.0883394,-0.000431944,-0.00077703,0.0075009], - "R": [ - [0.7874797118,0.07165214706,-0.6121614766], - [-0.03177741847,0.9966185482,0.07577377574], - [0.6155208357,-0.04021739967,0.7870938073] - ], - "t": [ - [46.04066644], - [153.679907], - [265.8341529] - ] - }, - { - "name": "17_05", - "type": "vga", - "resolution": [640,480], - "panel": 17, - "node": 5, - "K": [ - [745.23,0,378.585], - [0,745.614,229.474], - [0,0,1] - ], - "distCoef": [-0.323397,0.071697,-0.000659822,0.000678056,0.0530686], - "R": [ - [0.7680042357,0.04160049173,-0.6390922414], - [0.01355248597,0.9966090615,0.08115854064], - [0.6403013541,-0.07099139161,0.7648361904] - ], - "t": [ - [29.31016003], - [185.453895], - [261.9380867] - ] - }, - { - "name": "17_06", - "type": "vga", - "resolution": [640,480], - "panel": 17, - "node": 6, - "K": [ - [742.876,0,352.101], - [0,743.303,231.794], - [0,0,1] - ], - "distCoef": [-0.319343,0.0421325,-0.000546468,-1.33187e-05,0.10149], - "R": [ - [0.8064347587,0.08751734637,-0.584810819], - [-0.03388642915,0.9942014648,0.1020546777], - [0.5903513275,-0.062483289,0.8047242688] - ], - "t": [ - [35.39857301], - [188.6248332], - [262.8234665] - ] - }, - { - "name": "17_07", - "type": "vga", - "resolution": [640,480], - "panel": 17, - "node": 7, - "K": [ - [745.054,0,358.779], - [0,745.36,231.687], - [0,0,1] - ], - "distCoef": [-0.309912,-0.00132311,-0.00013553,-0.000280643,0.151777], - "R": [ - [0.7882500993,-0.004275732235,-0.615340149], - [0.05540043824,0.996408109,0.06404429605], - [0.612856078,-0.08457303664,0.7856556683] - ], - "t": [ - [-7.246792888], - [183.4614511], - [259.402568] - ] - }, - { - "name": "17_08", - "type": "vga", - "resolution": [640,480], - "panel": 17, - "node": 8, - "K": [ - [745.254,0,343.02], - [0,745.689,227.622], - [0,0,1] - ], - "distCoef": [-0.309897,-0.0109758,-0.00111103,0.000256129,0.180098], - "R": [ - [0.7946287881,0.03514926038,-0.6060772382], - [0.01090423253,0.9973351466,0.07213669658], - [0.6069976827,-0.06393070292,0.7921279432] - ], - "t": [ - [-18.41109561], - [184.5517176], - [263.9542066] - ] - }, - { - "name": "17_09", - "type": "vga", - "resolution": [640,480], - "panel": 17, - "node": 9, - "K": [ - [745.379,0,338.137], - [0,745.543,245.392], - [0,0,1] - ], - "distCoef": [-0.314138,0.0142784,0.00088856,-0.00114362,0.123117], - "R": [ - [0.7570044814,0.09852948519,-0.6459381981], - [-0.05745310106,0.9947735679,0.08440787789], - [0.6508789107,-0.02678598925,0.7587088733] - ], - "t": [ - [-40.16389387], - [164.132571], - [267.7674295] - ] - }, - { - "name": "17_10", - "type": "vga", - "resolution": [640,480], - "panel": 17, - "node": 10, - "K": [ - [743.633,0,369.381], - [0,743.739,253.863], - [0,0,1] - ], - "distCoef": [-0.313678,0.00191444,-0.000367883,0.000526793,0.16208], - "R": [ - [0.7732990879,0.03177464522,-0.6332447335], - [0.01440724919,0.9976050167,0.06765102948], - [0.6338777104,-0.06143779407,0.7709892643] - ], - "t": [ - [-41.17430449], - [148.5957101], - [262.973747] - ] - }, - { - "name": "17_11", - "type": "vga", - "resolution": [640,480], - "panel": 17, - "node": 11, - "K": [ - [749.691,0,360.347], - [0,749.465,221.979], - [0,0,1] - ], - "distCoef": [-0.36212,0.288042,0.00167589,0.000680745,-0.303613], - "R": [ - [0.7747984815,0.06051645956,-0.629305229], - [-0.01350572868,0.9967652932,0.07922465313], - [0.6320640066,-0.05288391526,0.7731095544] - ], - "t": [ - [-52.93053536], - [133.9502209], - [264.0833713] - ] - }, - { - "name": "17_12", - "type": "vga", - "resolution": [640,480], - "panel": 17, - "node": 12, - "K": [ - [746.505,0,357.704], - [0,746.569,217.534], - [0,0,1] - ], - "distCoef": [-0.312272,-0.0352904,0.000404412,-0.00107082,0.237629], - "R": [ - [0.7725304823,-0.04233401582,-0.633564902], - [0.05994143841,0.9981814314,0.006391704783], - [0.6321421342,-0.04291457833,0.7736631445] - ], - "t": [ - [-62.64410987], - [104.0188122], - [265.010728] - ] - }, - { - "name": "17_13", - "type": "vga", - "resolution": [640,480], - "panel": 17, - "node": 13, - "K": [ - [745.264,0,354.32], - [0,745.302,226.261], - [0,0,1] - ], - "distCoef": [-0.318398,0.0346929,0.000845692,0.000532231,0.122684], - "R": [ - [0.7851484689,0.03204817868,-0.6184778056], - [-0.002225165301,0.9987996914,0.04893081946], - [0.619303585,-0.03704174263,0.784277361] - ], - "t": [ - [-29.19489341], - [103.2650402], - [265.9795804] - ] - }, - { - "name": "17_14", - "type": "vga", - "resolution": [640,480], - "panel": 17, - "node": 14, - "K": [ - [744.589,0,353.058], - [0,744.664,227.639], - [0,0,1] - ], - "distCoef": [-0.324606,0.0822873,0.00100728,-0.000415736,0.0203245], - "R": [ - [0.7765409088,-0.02900211747,-0.6293989944], - [0.06862390156,0.9968904955,0.03873112579], - [0.6263185908,-0.07326811825,0.7761164898] - ], - "t": [ - [-35.65491372], - [89.93385082], - [261.6973052] - ] - }, - { - "name": "17_15", - "type": "vga", - "resolution": [640,480], - "panel": 17, - "node": 15, - "K": [ - [744.009,0,351.118], - [0,743.982,227.187], - [0,0,1] - ], - "distCoef": [-0.31768,0.0289626,0.000394183,-0.00106594,0.077624], - "R": [ - [0.7703409519,0.009578036972,-0.6375602553], - [0.03762675731,0.9974619202,0.06044786963], - [0.6365210484,-0.07055479443,0.7680253746] - ], - "t": [ - [-14.94306331], - [88.85755459], - [261.4804843] - ] - }, - { - "name": "17_16", - "type": "vga", - "resolution": [640,480], - "panel": 17, - "node": 16, - "K": [ - [745.298,0,365.044], - [0,745.641,201.543], - [0,0,1] - ], - "distCoef": [-0.315769,0.0139989,-0.000983596,0.000497246,0.155532], - "R": [ - [0.7668905855,0.04755147693,-0.6400138177], - [0.009922268647,0.9962536216,0.0859084976], - [0.6417011597,-0.07223280706,0.7635457047] - ], - "t": [ - [4.594602528], - [99.8882812], - [261.439958] - ] - }, - { - "name": "17_17", - "type": "vga", - "resolution": [640,480], - "panel": 17, - "node": 17, - "K": [ - [744.772,0,356.238], - [0,744.946,209.811], - [0,0,1] - ], - "distCoef": [-0.307562,-0.0273551,-0.000331097,0.000403566,0.231396], - "R": [ - [0.7386328767,0.1026186384,-0.6662513704], - [-0.03586762178,0.992927984,0.1131703685], - [0.6731530192,-0.05969450264,0.7370899397] - ], - "t": [ - [18.92063539], - [92.1220326], - [263.1909682] - ] - }, - { - "name": "17_18", - "type": "vga", - "resolution": [640,480], - "panel": 17, - "node": 18, - "K": [ - [746.696,0,345.664], - [0,746.883,230.9], - [0,0,1] - ], - "distCoef": [-0.332087,0.135716,-0.000396371,4.15402e-05,-0.0769473], - "R": [ - [0.7676740293,0.0869303765,-0.6349170767], - [-0.05592901251,0.9960646798,0.06875390322], - [0.6383952774,-0.01727030079,0.7695149163] - ], - "t": [ - [48.13164066], - [87.731429], - [267.0873794] - ] - }, - { - "name": "17_19", - "type": "vga", - "resolution": [640,480], - "panel": 17, - "node": 19, - "K": [ - [743.785,0,363.137], - [0,743.962,239.724], - [0,0,1] - ], - "distCoef": [-0.322076,0.0699752,0.00130957,8.28091e-06,0.0447641], - "R": [ - [0.7666015958,0.09362030423,-0.6352615462], - [-0.01827880108,0.9920950944,0.1241499457], - [0.6418628193,-0.08356172708,0.7622529495] - ], - "t": [ - [25.25313987], - [133.2656265], - [259.9680703] - ] - }, - { - "name": "17_20", - "type": "vga", - "resolution": [640,480], - "panel": 17, - "node": 20, - "K": [ - [747.071,0,344.427], - [0,747.404,242.981], - [0,0,1] - ], - "distCoef": [-0.349964,0.20917,0.0008789,-0.000586258,-0.211765], - "R": [ - [0.7775513873,0.03007697302,-0.6280996862], - [-0.01270805589,0.999403059,0.03212523871], - [0.6286909777,-0.01699709801,0.7774694548] - ], - "t": [ - [17.35278566], - [137.2956705], - [269.3773006] - ] - }, - { - "name": "17_21", - "type": "vga", - "resolution": [640,480], - "panel": 17, - "node": 21, - "K": [ - [744.669,0,371.314], - [0,744.881,251.475], - [0,0,1] - ], - "distCoef": [-0.32107,0.0528121,0.000172414,0.000961494,0.0921892], - "R": [ - [0.7854342878,0.01663631847,-0.6187214337], - [0.02446292583,0.9980232337,0.05788946549], - [0.6184614336,-0.06060410764,0.7834746947] - ], - "t": [ - [-1.039205356], - [155.8049723], - [263.425936] - ] - }, - { - "name": "17_22", - "type": "vga", - "resolution": [640,480], - "panel": 17, - "node": 22, - "K": [ - [744.126,0,368.359], - [0,744.205,218.365], - [0,0,1] - ], - "distCoef": [-0.306681,-0.0309893,-0.000506643,-0.000551257,0.209183], - "R": [ - [0.7742934088,0.08491898973,-0.6271032469], - [-0.02171436959,0.9939373135,0.1077826651], - [0.6324541115,-0.06983825553,0.771443073] - ], - "t": [ - [-12.48615074], - [146.2169272], - [261.8070617] - ] - }, - { - "name": "17_23", - "type": "vga", - "resolution": [640,480], - "panel": 17, - "node": 23, - "K": [ - [746.439,0,363.854], - [0,746.575,224.032], - [0,0,1] - ], - "distCoef": [-0.333494,0.127943,0.00111227,0.000376509,-0.0438307], - "R": [ - [0.7741360077,0.05745954338,-0.6304060933], - [-0.01777243196,0.9974520988,0.06909016755], - [0.6327697704,-0.04228133707,0.7731847814] - ], - "t": [ - [-14.18178238], - [117.4047924], - [265.0998909] - ] - }, - { - "name": "17_24", - "type": "vga", - "resolution": [640,480], - "panel": 17, - "node": 24, - "K": [ - [745.824,0,346.505], - [0,746.017,224.098], - [0,0,1] - ], - "distCoef": [-0.317434,0.0247137,-0.000866957,0.000304145,0.138958], - "R": [ - [0.7656627697,0.09930116127,-0.6355311184], - [-0.04982185052,0.99419918,0.09531932471], - [0.6413098365,-0.04131912178,0.7661686654] - ], - "t": [ - [7.35512715], - [111.8344509], - [265.0127015] - ] - }, - { - "name": "18_01", - "type": "vga", - "resolution": [640,480], - "panel": 18, - "node": 1, - "K": [ - [744.96,0,372.705], - [0,744.564,226.392], - [0,0,1] - ], - "distCoef": [-0.321978,0.0724692,0.000483988,0.000458946,0.0380169], - "R": [ - [-0.3520669355,0.03279886428,-0.9353999719], - [0.04913052402,0.9986556534,0.01652505738], - [0.9346844732,-0.04013876447,-0.3532050609] - ], - "t": [ - [47.10128491], - [117.3460549], - [266.6541908] - ] - }, - { - "name": "18_02", - "type": "vga", - "resolution": [640,480], - "panel": 18, - "node": 2, - "K": [ - [748.843,0,358.358], - [0,748.813,225.018], - [0,0,1] - ], - "distCoef": [-0.335266,0.148062,0.000634215,-0.00153008,-0.105518], - "R": [ - [-0.3389880085,0.04020239671,-0.9399313259], - [0.04795713663,0.9985260662,0.02541275744], - [0.9395675831,-0.03646179499,-0.3404163544] - ], - "t": [ - [70.51461434], - [125.984952], - [266.5287049] - ] - }, - { - "name": "18_03", - "type": "vga", - "resolution": [640,480], - "panel": 18, - "node": 3, - "K": [ - [746.557,0,370.525], - [0,746.643,239.094], - [0,0,1] - ], - "distCoef": [-0.336876,0.137869,0.0006954,0.000424607,-0.0538424], - "R": [ - [-0.3751735108,0.06869685522,-0.9244055273], - [0.01802710881,0.9976021763,0.06682006625], - [0.9267792942,0.008404759824,-0.3755123165] - ], - "t": [ - [58.58769651], - [133.6261971], - [275.7276294] - ] - }, - { - "name": "18_04", - "type": "vga", - "resolution": [640,480], - "panel": 18, - "node": 4, - "K": [ - [744.71,0,356.151], - [0,744.769,223.97], - [0,0,1] - ], - "distCoef": [-0.312604,0.00791514,0.000747313,-0.000519594,0.158336], - "R": [ - [-0.3438161676,0.01243889994,-0.9389545871], - [0.0251972518,0.9996744288,0.00401683712], - [0.9386988555,-0.02227802162,-0.344017657] - ], - "t": [ - [40.26546697], - [152.0702476], - [270.0686857] - ] - }, - { - "name": "18_05", - "type": "vga", - "resolution": [640,480], - "panel": 18, - "node": 5, - "K": [ - [743.927,0,355.392], - [0,744.057,262.153], - [0,0,1] - ], - "distCoef": [-0.316206,0.0381773,0.00109867,0.000112775,0.102099], - "R": [ - [-0.3913025917,0.04706716523,-0.9190576498], - [0.07535158968,0.9969764632,0.0189755056], - [0.9171719684,-0.0618272904,-0.3936660596] - ], - "t": [ - [27.50168157], - [183.5367771], - [265.1462318] - ] - }, - { - "name": "18_06", - "type": "vga", - "resolution": [640,480], - "panel": 18, - "node": 6, - "K": [ - [744.89,0,353.646], - [0,744.816,246.705], - [0,0,1] - ], - "distCoef": [-0.311434,-0.0151537,0.000898898,0.00113623,0.19919], - "R": [ - [-0.3540366423,0.02766248657,-0.9348223589], - [0.06855079724,0.9976412764,0.003559761167], - [0.9327158432,-0.06282253209,-0.3550978532] - ], - "t": [ - [15.12228299], - [191.0759947], - [263.959739] - ] - }, - { - "name": "18_07", - "type": "vga", - "resolution": [640,480], - "panel": 18, - "node": 7, - "K": [ - [744.21,0,382.066], - [0,744.474,221.564], - [0,0,1] - ], - "distCoef": [-0.318836,0.0439442,-0.000310088,0.000693195,0.0844966], - "R": [ - [-0.3784097731,0.01208936744,-0.9255592314], - [0.03775536538,0.9992841689,-0.002383732641], - [0.9248678695,-0.03584685469,-0.3785953341] - ], - "t": [ - [-11.73143391], - [170.7040215], - [268.2801795] - ] - }, - { - "name": "18_08", - "type": "vga", - "resolution": [640,480], - "panel": 18, - "node": 8, - "K": [ - [744.996,0,378.911], - [0,745.249,217.173], - [0,0,1] - ], - "distCoef": [-0.317298,0.0439499,-0.000470842,0.000645598,0.0800391], - "R": [ - [-0.3573644405,-0.02168005213,-0.9337133564], - [0.09030348924,0.9942444419,-0.05764780686], - [0.9295891224,-0.1049188503,-0.3533498244] - ], - "t": [ - [-32.18764663], - [193.5958696], - [255.9258617] - ] - }, - { - "name": "18_09", - "type": "vga", - "resolution": [640,480], - "panel": 18, - "node": 9, - "K": [ - [745.488,0,367.703], - [0,745.136,254.274], - [0,0,1] - ], - "distCoef": [-0.333608,0.117291,0.00107107,0.000590786,-0.0167148], - "R": [ - [-0.3755971335,-0.01611847579,-0.9266428589], - [0.03486308067,0.9988953473,-0.03150636014], - [0.9261270749,-0.0441393233,-0.3746202894] - ], - "t": [ - [-52.11061688], - [162.8813669], - [265.66749] - ] - }, - { - "name": "18_10", - "type": "vga", - "resolution": [640,480], - "panel": 18, - "node": 10, - "K": [ - [746.691,0,377.016], - [0,746.35,247.895], - [0,0,1] - ], - "distCoef": [-0.324348,0.0759263,0.000632098,0.000973799,0.0365142], - "R": [ - [-0.3979832561,-0.05264507275,-0.9158809007], - [0.03842303812,0.9965195246,-0.07397639654], - [0.9165876925,-0.06463229393,-0.3945753015] - ], - "t": [ - [-58.47639535], - [144.7851801], - [261.4908418] - ] - }, - { - "name": "18_11", - "type": "vga", - "resolution": [640,480], - "panel": 18, - "node": 11, - "K": [ - [743.499,0,383.73], - [0,743.269,228.607], - [0,0,1] - ], - "distCoef": [-0.318101,0.0343673,-0.000192972,9.02677e-05,0.0940376], - "R": [ - [-0.3591156591,-0.0799459609,-0.9298626709], - [0.01693912278,0.9956019804,-0.09213990831], - [0.9331393302,-0.04883994185,-0.356182047] - ], - "t": [ - [-65.19666066], - [124.1115675], - [265.1913912] - ] - }, - { - "name": "18_12", - "type": "vga", - "resolution": [640,480], - "panel": 18, - "node": 12, - "K": [ - [744.847,0,377.843], - [0,744.539,240.133], - [0,0,1] - ], - "distCoef": [-0.322594,0.0777366,0.000608553,0.000730506,0.0395492], - "R": [ - [-0.3599917326,-0.04959232233,-0.9316364924], - [0.02914279324,0.9975011607,-0.0643593979], - [0.9325002145,-0.05031934083,-0.3576469123] - ], - "t": [ - [-57.61171896], - [105.5688064], - [264.3974594] - ] - }, - { - "name": "18_13", - "type": "vga", - "resolution": [640,480], - "panel": 18, - "node": 13, - "K": [ - [742.264,0,386.065], - [0,742.375,236.247], - [0,0,1] - ], - "distCoef": [-0.316238,0.0182785,-0.000395794,0.00144239,0.136479], - "R": [ - [-0.3232019546,0.03338047233,-0.9457411066], - [0.05161368011,0.9985119503,0.01760435083], - [0.9449214383,-0.04312341834,-0.324443903] - ], - "t": [ - [61.04698375], - [97.35388185], - [264.1973208] - ] - }, - { - "name": "18_14", - "type": "vga", - "resolution": [640,480], - "panel": 18, - "node": 14, - "K": [ - [744.531,0,362.517], - [0,744.694,222.936], - [0,0,1] - ], - "distCoef": [-0.323155,0.0551,-0.000315217,0.00114443,0.0791805], - "R": [ - [-0.3124904102,0.02154150537,-0.9496766329], - [-0.004629448499,0.999696432,0.02419942065], - [0.9499096335,0.01195856595,-0.3122958229] - ], - "t": [ - [-14.02426098], - [68.46079663], - [270.3325449] - ] - }, - { - "name": "18_15", - "type": "vga", - "resolution": [640,480], - "panel": 18, - "node": 15, - "K": [ - [747.429,0,398.562], - [0,747.425,233.615], - [0,0,1] - ], - "distCoef": [-0.333617,0.122405,0.000303778,0.00134383,-0.0202721], - "R": [ - [-0.358025731,-0.0142572014,-0.9336028643], - [0.04081564607,0.9986886699,-0.03090345813], - [0.9328191995,-0.04916983726,-0.3569743242] - ], - "t": [ - [-8.683192747], - [83.02873835], - [264.4620974] - ] - }, - { - "name": "18_16", - "type": "vga", - "resolution": [640,480], - "panel": 18, - "node": 16, - "K": [ - [742.757,0,357.304], - [0,742.66,220.331], - [0,0,1] - ], - "distCoef": [-0.305443,-0.0527047,-0.000521453,0.00022453,0.250047], - "R": [ - [-0.3364590891,0.05374146283,-0.9401633563], - [0.05791647683,0.99766121,0.03630140184], - [0.9399154021,-0.04223701264,-0.3387846981] - ], - "t": [ - [20.062846], - [91.33983095], - [265.2581766] - ] - }, - { - "name": "18_17", - "type": "vga", - "resolution": [640,480], - "panel": 18, - "node": 17, - "K": [ - [750.787,0,361.922], - [0,750.723,216.611], - [0,0,1] - ], - "distCoef": [-0.368257,0.303211,-0.00101236,-0.000679192,-0.335284], - "R": [ - [-0.3521002367,0.0154136189,-0.9358353721], - [0.04957845599,0.9987678018,-0.002203336065], - [0.9346482761,-0.04717306796,-0.3524305629] - ], - "t": [ - [32.75189895], - [90.38015946], - [265.2110414] - ] - }, - { - "name": "18_18", - "type": "vga", - "resolution": [640,480], - "panel": 18, - "node": 18, - "K": [ - [745.69,0,366.196], - [0,745.645,224.452], - [0,0,1] - ], - "distCoef": [-0.325076,0.0695314,0.000207452,8.09151e-05,0.0569118], - "R": [ - [-0.369329094,-0.008664471876,-0.929258278], - [0.06369637747,0.997368813,-0.03461534879], - [0.9271131494,-0.07197484145,-0.3678054246] - ], - "t": [ - [-35.28307581], - [111.055802], - [261.8818226] - ] - }, - { - "name": "18_19", - "type": "vga", - "resolution": [640,480], - "panel": 18, - "node": 19, - "K": [ - [745.552,0,357.301], - [0,745.545,223.113], - [0,0,1] - ], - "distCoef": [-0.320101,0.042192,0.00043748,0.000103204,0.104558], - "R": [ - [-0.3584191226,-0.04877846794,-0.9322855752], - [0.07086164718,0.9943315632,-0.07926770686], - [0.9308675306,-0.09447435344,-0.3529309238] - ], - "t": [ - [16.14340371], - [139.4376601], - [259.6452388] - ] - }, - { - "name": "18_20", - "type": "vga", - "resolution": [640,480], - "panel": 18, - "node": 20, - "K": [ - [746.078,0,363.03], - [0,746.077,221.582], - [0,0,1] - ], - "distCoef": [-0.321359,0.0569666,0.000169599,0.000938787,0.0797635], - "R": [ - [-0.3631410096,0.0448531679,-0.9306539639], - [0.06634832184,0.9975497918,0.02218813063], - [0.9293688758,-0.05368990856,-0.3652271709] - ], - "t": [ - [21.37501917], - [147.345749], - [265.5705493] - ] - }, - { - "name": "18_21", - "type": "vga", - "resolution": [640,480], - "panel": 18, - "node": 21, - "K": [ - [745.043,0,372.293], - [0,745.076,222.901], - [0,0,1] - ], - "distCoef": [-0.317484,0.0404748,0.000192535,-0.000111527,0.0957966], - "R": [ - [-0.3461967977,-0.005928135698,-0.9381431844], - [0.04577092509,0.9986824948,-0.02320122706], - [0.937044716,-0.05097187193,-0.3454693453] - ], - "t": [ - [-0.5259425122], - [153.3372726], - [265.7616305] - ] - }, - { - "name": "18_22", - "type": "vga", - "resolution": [640,480], - "panel": 18, - "node": 22, - "K": [ - [745.252,0,401.788], - [0,745.346,245.295], - [0,0,1] - ], - "distCoef": [-0.315494,0.0267895,-0.000624877,0.000210937,0.0993279], - "R": [ - [-0.3267831921,-0.004575639121,-0.9450882546], - [0.07739750703,0.9964998407,-0.03158628616], - [0.9419248225,-0.08346934224,-0.3252852558] - ], - "t": [ - [-10.3938656], - [148.3069178], - [261.1183693] - ] - }, - { - "name": "18_23", - "type": "vga", - "resolution": [640,480], - "panel": 18, - "node": 23, - "K": [ - [747.114,0,358.608], - [0,746.941,217.398], - [0,0,1] - ], - "distCoef": [-0.324507,0.0792141,-0.000227367,0.0013287,0.0357905], - "R": [ - [-0.356358404,-0.03218270054,-0.9337949248], - [0.02645826287,0.9986582749,-0.04451528213], - [0.9339746507,-0.04056998648,-0.3550287707] - ], - "t": [ - [-18.04448695], - [115.7023496], - [266.3010308] - ] - }, - { - "name": "18_24", - "type": "vga", - "resolution": [640,480], - "panel": 18, - "node": 24, - "K": [ - [747.28,0,383.407], - [0,747.414,233.333], - [0,0,1] - ], - "distCoef": [-0.321806,0.0494121,-0.000677773,0.00106862,0.0725344], - "R": [ - [-0.3696831614,0.01690678518,-0.9290040478], - [0.03916078476,0.9992295361,0.002601362608], - [0.9283322644,-0.03541884761,-0.3700604169] - ], - "t": [ - [3.487638933], - [110.8874693], - [266.9764809] - ] - }, - { - "name": "19_01", - "type": "vga", - "resolution": [640,480], - "panel": 19, - "node": 1, - "K": [ - [742.815,0,376.349], - [0,742.96,226.412], - [0,0,1] - ], - "distCoef": [-0.311242,0.000676611,0.00127048,0.000398816,0.145683], - "R": [ - [-0.9986287013,0.0334613179,0.04026235479], - [0.03051664863,0.9969627365,-0.07165218936], - [-0.04253764409,-0.07032526067,-0.99661673] - ], - "t": [ - [47.87451164], - [124.5257469], - [265.3025885] - ] - }, - { - "name": "19_02", - "type": "vga", - "resolution": [640,480], - "panel": 19, - "node": 2, - "K": [ - [746.352,0,362.211], - [0,746.799,224.495], - [0,0,1] - ], - "distCoef": [-0.33354,0.113916,-0.000650978,0.00200875,0.00369896], - "R": [ - [-0.9978769066,0.0627015602,0.01761231284], - [0.06225819076,0.9977547513,-0.02468550225], - [-0.01912058832,-0.02353658189,-0.9995401105] - ], - "t": [ - [76.18899734], - [119.4504319], - [269.470097] - ] - }, - { - "name": "19_03", - "type": "vga", - "resolution": [640,480], - "panel": 19, - "node": 3, - "K": [ - [744.923,0,335.897], - [0,744.843,232.622], - [0,0,1] - ], - "distCoef": [-0.310786,-0.00740435,0.000477261,-0.00048183,0.169837], - "R": [ - [-0.9959217828,0.05942221639,0.06788816328], - [0.05820019172,0.9981077555,-0.01984051806], - [-0.06893866983,-0.0158085,-0.9974956397] - ], - "t": [ - [57.6907282], - [139.716188], - [274.5941587] - ] - }, - { - "name": "19_04", - "type": "vga", - "resolution": [640,480], - "panel": 19, - "node": 4, - "K": [ - [745.3,0,371.455], - [0,745.339,223.979], - [0,0,1] - ], - "distCoef": [-0.316788,0.039021,-0.00160053,-0.000126119,0.09467], - "R": [ - [-0.995350133,0.07444232287,0.06112653567], - [0.06997485872,0.994930028,-0.0722340534], - [-0.06619389658,-0.06762085396,-0.9955128267] - ], - "t": [ - [42.04206067], - [161.4993909], - [266.5642499] - ] - }, - { - "name": "19_05", - "type": "vga", - "resolution": [640,480], - "panel": 19, - "node": 5, - "K": [ - [741.339,0,353.354], - [0,741.563,231.192], - [0,0,1] - ], - "distCoef": [-0.304803,-0.0634451,-0.00114618,-0.000982934,0.282182], - "R": [ - [-0.9964181101,0.07478982294,0.03946431643], - [0.07096423127,0.993341211,-0.09075966339], - [-0.04598943103,-0.08763401739,-0.9950905744] - ], - "t": [ - [45.56899486], - [188.2245222], - [262.1501617] - ] - }, - { - "name": "19_06", - "type": "vga", - "resolution": [640,480], - "panel": 19, - "node": 6, - "K": [ - [745.947,0,350.894], - [0,746.217,234.332], - [0,0,1] - ], - "distCoef": [-0.313212,0.0178381,0.000340441,0.00055626,0.126083], - "R": [ - [-0.9969018679,0.07865171151,0.0007576151751], - [0.07854654264,0.9959829876,-0.04299219736], - [-0.004135981729,-0.0427994938,-0.9990751208] - ], - "t": [ - [37.2742824], - [183.4195047], - [270.0123608] - ] - }, - { - "name": "19_07", - "type": "vga", - "resolution": [640,480], - "panel": 19, - "node": 7, - "K": [ - [748.821,0,355.822], - [0,748.684,217.17], - [0,0,1] - ], - "distCoef": [-0.342444,0.16602,-0.000477836,-0.000195363,-0.106824], - "R": [ - [-0.9928808048,-0.04900785176,0.10856306], - [-0.05236016128,0.998228751,-0.02824489671], - [-0.106986546,-0.0337281951,-0.9936882247] - ], - "t": [ - [-31.49326377], - [168.7489309], - [271.4480177] - ] - }, - { - "name": "19_08", - "type": "vga", - "resolution": [640,480], - "panel": 19, - "node": 8, - "K": [ - [747.238,0,359.034], - [0,747.474,233.038], - [0,0,1] - ], - "distCoef": [-0.313675,0.00436645,0.000419802,0.000604189,0.154068], - "R": [ - [-0.9913876468,0.02931278851,0.127637354], - [0.0192008625,0.9966303068,-0.07974558542], - [-0.1295448208,-0.07660804099,-0.9886098055] - ], - "t": [ - [-44.88902211], - [188.5485089], - [261.5304555] - ] - }, - { - "name": "19_09", - "type": "vga", - "resolution": [640,480], - "panel": 19, - "node": 9, - "K": [ - [743.415,0,332.333], - [0,743.715,235.337], - [0,0,1] - ], - "distCoef": [-0.308464,-0.0208585,-0.00102455,0.000256502,0.207947], - "R": [ - [-0.9954977047,0.04566149696,0.08306231217], - [0.04175753042,0.9979670543,-0.04814631117], - [-0.08509188364,-0.04446106523,-0.9953806232] - ], - "t": [ - [-46.35184093], - [166.6378451], - [268.6077116] - ] - }, - { - "name": "19_10", - "type": "vga", - "resolution": [640,480], - "panel": 19, - "node": 10, - "K": [ - [747.206,0,362.728], - [0,747.412,248.496], - [0,0,1] - ], - "distCoef": [-0.340118,0.138855,0.000965068,4.5306e-05,-0.0441245], - "R": [ - [-0.9935175509,0.05252798067,0.1008151146], - [0.05439486481,0.9983935823,0.01585728578], - [-0.09982021218,0.02123831626,-0.9947787991] - ], - "t": [ - [-46.95074625], - [127.5778656], - [276.6370715] - ] - }, - { - "name": "19_11", - "type": "vga", - "resolution": [640,480], - "panel": 19, - "node": 11, - "K": [ - [745.45,0,355.141], - [0,745.641,249.232], - [0,0,1] - ], - "distCoef": [-0.326245,0.10077,0.000216744,-2.37583e-05,-0.0259903], - "R": [ - [-0.9983050345,-0.001439505441,0.05818063101], - [-0.002578079686,0.9998065462,-0.01949932386], - [-0.05814130636,-0.01961626748,-0.9981156198] - ], - "t": [ - [-58.09544547], - [121.7224759], - [272.659258] - ] - }, - { - "name": "19_12", - "type": "vga", - "resolution": [640,480], - "panel": 19, - "node": 12, - "K": [ - [743.805,0,368.42], - [0,744.013,242.015], - [0,0,1] - ], - "distCoef": [-0.323306,0.0785457,-0.00106293,0.000187763,0.0236672], - "R": [ - [-0.9954771119,0.0748660766,0.05848410323], - [0.07512966129,0.9971710788,0.002318097681], - [-0.05814510944,0.006701504052,-0.9982856485] - ], - "t": [ - [-47.8147621], - [97.15541342], - [274.4212668] - ] - }, - { - "name": "19_13", - "type": "vga", - "resolution": [640,480], - "panel": 19, - "node": 13, - "K": [ - [742.693,0,353.966], - [0,742.776,227.014], - [0,0,1] - ], - "distCoef": [-0.307193,-0.0103139,0.000109263,-0.000950495,0.159317], - "R": [ - [-0.9933059489,0.1045971031,0.04901773034], - [0.1016362638,0.9930442478,-0.05944065861], - [-0.05489409585,-0.05406078084,-0.9970276176] - ], - "t": [ - [-21.5323637], - [109.7713479], - [268.3161895] - ] - }, - { - "name": "19_14", - "type": "vga", - "resolution": [640,480], - "panel": 19, - "node": 14, - "K": [ - [742.837,0,362.248], - [0,743.502,226.37], - [0,0,1] - ], - "distCoef": [-0.308934,-0.00321353,-0.0010059,0.000705591,0.156528], - "R": [ - [-0.9919154966,0.0987006026,0.07976113456], - [0.09553429302,0.9945144894,-0.04259259489], - [-0.08352751879,-0.03462833131,-0.995903626] - ], - "t": [ - [-30.66946365], - [84.06052642], - [268.8728165] - ] - }, - { - "name": "19_15", - "type": "vga", - "resolution": [640,480], - "panel": 19, - "node": 15, - "K": [ - [742.618,0,345.237], - [0,742.923,230.439], - [0,0,1] - ], - "distCoef": [-0.302695,-0.0546693,-0.000167537,-0.000784726,0.259585], - "R": [ - [-0.9885523252,0.1391044686,0.05843155954], - [0.1381120085,0.9902000007,-0.02071308279], - [-0.06074021267,-0.01240586611,-0.9980765106] - ], - "t": [ - [-1.26146274], - [74.12977283], - [271.0351679] - ] - }, - { - "name": "19_16", - "type": "vga", - "resolution": [640,480], - "panel": 19, - "node": 16, - "K": [ - [744.088,0,370.473], - [0,744.417,231.755], - [0,0,1] - ], - "distCoef": [-0.300902,-0.0664899,-0.000333311,0.000589361,0.253926], - "R": [ - [-0.9917390399,0.06178336486,0.1124121551], - [0.06447509535,0.9977094298,0.02046596672], - [-0.1108902109,0.02754468261,-0.9934508803] - ], - "t": [ - [-3.269853258], - [73.62667861], - [274.8694227] - ] - }, - { - "name": "19_17", - "type": "vga", - "resolution": [640,480], - "panel": 19, - "node": 17, - "K": [ - [745.582,0,373.528], - [0,745.86,237.254], - [0,0,1] - ], - "distCoef": [-0.322134,0.0530706,-0.000603814,0.00101303,0.0846746], - "R": [ - [-0.9897330936,0.1313546283,0.05634150462], - [0.1318000226,0.9912672261,0.00424742025], - [-0.05529156869,0.01162962396,-0.9984025212] - ], - "t": [ - [37.3391924], - [70.20661568], - [273.1392775] - ] - }, - { - "name": "19_18", - "type": "vga", - "resolution": [640,480], - "panel": 19, - "node": 18, - "K": [ - [742.542,0,374.105], - [0,742.758,223.273], - [0,0,1] - ], - "distCoef": [-0.306762,-0.0452572,-0.00032402,-0.000364469,0.245651], - "R": [ - [-0.9920842372,0.1065981921,0.06637538524], - [0.106818653,0.9942784937,-0.0002288198192], - [-0.06602000984,0.006863120707,-0.9977946963] - ], - "t": [ - [52.26513597], - [79.91641464], - [273.9509772] - ] - }, - { - "name": "19_19", - "type": "vga", - "resolution": [640,480], - "panel": 19, - "node": 19, - "K": [ - [744.378,0,361.433], - [0,744.589,244.618], - [0,0,1] - ], - "distCoef": [-0.310422,-0.000364242,-0.000710118,0.000839407,0.169675], - "R": [ - [-0.9919054981,0.126974259,0.001010166835], - [0.1269495258,0.9918188066,-0.01338927975], - [-0.002701996339,-0.01315266,-0.9999098493] - ], - "t": [ - [49.23489662], - [110.9052228], - [271.6142806] - ] - }, - { - "name": "19_20", - "type": "vga", - "resolution": [640,480], - "panel": 19, - "node": 20, - "K": [ - [745.72,0,364.99], - [0,745.913,248.461], - [0,0,1] - ], - "distCoef": [-0.32476,0.0791445,0.000409065,0.000522525,0.0385155], - "R": [ - [-0.9808466558,0.1869185946,0.05478391053], - [0.1851721888,0.9820671342,-0.03543168776], - [-0.06042431929,-0.02460859583,-0.9978693896] - ], - "t": [ - [40.23583817], - [134.9359413], - [272.7493911] - ] - }, - { - "name": "19_21", - "type": "vga", - "resolution": [640,480], - "panel": 19, - "node": 21, - "K": [ - [745.966,0,347.023], - [0,745.905,254.016], - [0,0,1] - ], - "distCoef": [-0.312122,-0.0171046,0.00101358,-9.38575e-05,0.213424], - "R": [ - [-0.9944456328,0.07811965146,0.07053512206], - [0.07435713108,0.9957422838,-0.0544823029], - [-0.07449094204,-0.04893489886,-0.9960203187] - ], - "t": [ - [2.247391851], - [153.0572023], - [268.8284628] - ] - }, - { - "name": "19_22", - "type": "vga", - "resolution": [640,480], - "panel": 19, - "node": 22, - "K": [ - [743.607,0,364.935], - [0,743.756,243.53], - [0,0,1] - ], - "distCoef": [-0.311531,0.000696399,0.00010932,-0.000314324,0.159615], - "R": [ - [-0.9924188487,0.09367860135,0.07955594568], - [0.08900119243,0.9941960017,-0.06044086279], - [-0.0847562186,-0.05290207743,-0.9949963586] - ], - "t": [ - [-15.3150092], - [142.5037842], - [267.7211288] - ] - }, - { - "name": "19_23", - "type": "vga", - "resolution": [640,480], - "panel": 19, - "node": 23, - "K": [ - [743.508,0,369.721], - [0,743.449,243.575], - [0,0,1] - ], - "distCoef": [-0.309744,-0.0191119,0.000292611,0.000847107,0.198605], - "R": [ - [-0.9987856124,0.03694807636,0.03259049098], - [0.03470669556,0.9971594314,-0.06684694127], - [-0.03496778135,-0.06563465492,-0.997230839] - ], - "t": [ - [-6.799650163], - [123.3743131], - [267.1549958] - ] - }, - { - "name": "19_24", - "type": "vga", - "resolution": [640,480], - "panel": 19, - "node": 24, - "K": [ - [742.775,0,379.613], - [0,742.864,224.449], - [0,0,1] - ], - "distCoef": [-0.316586,0.0333112,-0.000180777,0.00112675,0.112087], - "R": [ - [-0.9947573056,0.06853183176,0.07590316848], - [0.05765365411,0.9888586451,-0.1372393391], - [-0.08446276764,-0.1321437401,-0.9876254719] - ], - "t": [ - [4.340029177], - [136.5307812], - [258.2193706] - ] - }, - { - "name": "20_01", - "type": "vga", - "resolution": [640,480], - "panel": 20, - "node": 1, - "K": [ - [745.267,0,367.511], - [0,745.253,228.976], - [0,0,1] - ], - "distCoef": [-0.316421,0.0232694,0.000233523,0.00095017,0.129164], - "R": [ - [-0.2595515744,0.03264633198,0.965177288], - [-0.02439656235,0.9988878376,-0.04034718866], - [-0.9654210418,-0.03401918423,-0.2584664527] - ], - "t": [ - [43.91564589], - [114.6472759], - [269.2437955] - ] - }, - { - "name": "20_02", - "type": "vga", - "resolution": [640,480], - "panel": 20, - "node": 2, - "K": [ - [746.737,0,383.621], - [0,746.553,234.139], - [0,0,1] - ], - "distCoef": [-0.330711,0.126048,0.000259954,-0.000232797,-0.067441], - "R": [ - [-0.2600597375,0.03354081135,0.965009817], - [-0.06475754991,0.9965406566,-0.05208818886], - [-0.9634185968,-0.07603771211,-0.2569880808] - ], - "t": [ - [63.03617994], - [136.0112472], - [264.2112923] - ] - }, - { - "name": "20_03", - "type": "vga", - "resolution": [640,480], - "panel": 20, - "node": 3, - "K": [ - [748.567,0,371.842], - [0,748.646,223.378], - [0,0,1] - ], - "distCoef": [-0.332561,0.132401,-0.000978802,0.0010132,-0.0596871], - "R": [ - [-0.2517963519,0.03200567411,0.967250864], - [0.0115205721,0.9994813079,-0.03007310314], - [-0.9677116686,0.003570985655,-0.2520344708] - ], - "t": [ - [55.32226207], - [135.5872215], - [276.5287505] - ] - }, - { - "name": "20_04", - "type": "vga", - "resolution": [640,480], - "panel": 20, - "node": 4, - "K": [ - [747.412,0,375.731], - [0,747.545,213.638], - [0,0,1] - ], - "distCoef": [-0.324984,0.0823763,-0.00190711,0.0010176,0.0382164], - "R": [ - [-0.2864406942,-0.001302983566,0.9580970885], - [-0.1193951903,0.9922525608,-0.03434594761], - [-0.9506295373,-0.1242302613,-0.2843770823] - ], - "t": [ - [40.5108683], - [178.4576708], - [254.9563649] - ] - }, - { - "name": "20_05", - "type": "vga", - "resolution": [640,480], - "panel": 20, - "node": 5, - "K": [ - [747.818,0,377.646], - [0,748.63,232.294], - [0,0,1] - ], - "distCoef": [-0.327048,0.100477,-0.00250563,-0.000951363,0.00505748], - "R": [ - [-0.2682590325,-0.01756457816,0.9631866782], - [-0.1175373506,0.9929607203,-0.014628026], - [-0.9561496027,-0.1171345104,-0.2684351761] - ], - "t": [ - [28.10870602], - [198.6254244], - [256.0861594] - ] - }, - { - "name": "20_06", - "type": "vga", - "resolution": [640,480], - "panel": 20, - "node": 6, - "K": [ - [744.281,0,376.164], - [0,744.733,212.764], - [0,0,1] - ], - "distCoef": [-0.314115,0.0261091,-0.00186017,0.000146826,0.111047], - "R": [ - [-0.2995512244,0.02650351378,0.9537120256], - [-0.1164678133,0.9911222418,-0.06412449085], - [-0.9469447251,-0.1302853239,-0.2938050747] - ], - "t": [ - [24.38602287], - [207.7342285], - [252.6787249] - ] - }, - { - "name": "20_07", - "type": "vga", - "resolution": [640,480], - "panel": 20, - "node": 7, - "K": [ - [744.844,0,367.199], - [0,744.885,234.874], - [0,0,1] - ], - "distCoef": [-0.307447,-0.0235368,-0.000447762,-0.000552595,0.198481], - "R": [ - [-0.2246138655,-0.03605175288,0.9737807158], - [-0.1345418425,0.9908917963,0.005651603877], - [-0.965115073,-0.1297448231,-0.2274185059] - ], - "t": [ - [-24.57828512], - [193.807989], - [253.6581871] - ] - }, - { - "name": "20_08", - "type": "vga", - "resolution": [640,480], - "panel": 20, - "node": 8, - "K": [ - [745.265,0,373.297], - [0,745.204,222.406], - [0,0,1] - ], - "distCoef": [-0.322725,0.0753011,-0.00198414,9.48962e-05,0.0496562], - "R": [ - [-0.2740281164,0.007089557403,0.9616955493], - [-0.08615117171,0.9957715968,-0.0318889104], - [-0.9578551911,-0.09158965645,-0.2722586413] - ], - "t": [ - [-24.40184383], - [190.6520913], - [261.5790911] - ] - }, - { - "name": "20_09", - "type": "vga", - "resolution": [640,480], - "panel": 20, - "node": 9, - "K": [ - [743.742,0,376.404], - [0,743.442,252.182], - [0,0,1] - ], - "distCoef": [-0.310951,0.0101818,-0.000165117,0.000699519,0.141452], - "R": [ - [-0.234740558,-0.05401621619,0.9705560874], - [-0.06709368181,0.9969740023,0.03925909634], - [-0.9697398147,-0.05590247913,-0.2376543804] - ], - "t": [ - [-60.89112675], - [163.1020008], - [266.420435] - ] - }, - { - "name": "20_10", - "type": "vga", - "resolution": [640,480], - "panel": 20, - "node": 10, - "K": [ - [746.237,0,381.452], - [0,745.998,235.104], - [0,0,1] - ], - "distCoef": [-0.321635,0.0804606,-0.000793429,0.000500703,0.0308776], - "R": [ - [-0.2327490461,-0.03063038999,0.9720543507], - [-0.1073579574,0.9942045343,0.005622535858], - [-0.9665930636,-0.1030491297,-0.2346885731] - ], - "t": [ - [-52.7687065], - [155.650502], - [258.7092289] - ] - }, - { - "name": "20_11", - "type": "vga", - "resolution": [640,480], - "panel": 20, - "node": 11, - "K": [ - [744.465,0,352.406], - [0,744.368,231.635], - [0,0,1] - ], - "distCoef": [-0.307896,-0.0267024,-0.00138959,-0.000489454,0.213952], - "R": [ - [-0.2568719183,-0.003646201445,0.9664385768], - [-0.06909534804,0.997503196,-0.01460160774], - [-0.9639723287,-0.07052715282,-0.256482495] - ], - "t": [ - [-58.11810551], - [133.8270577], - [264.378006] - ] - }, - { - "name": "20_12", - "type": "vga", - "resolution": [640,480], - "panel": 20, - "node": 12, - "K": [ - [744.557,0,351.376], - [0,744.424,216.683], - [0,0,1] - ], - "distCoef": [-0.317479,0.0158652,-0.000659121,-0.00059258,0.147681], - "R": [ - [-0.2372383683,-0.02274879941,0.9711850744], - [-0.1004253449,0.9949438408,-0.001226302928], - [-0.9662467111,-0.09782252214,-0.2383234094] - ], - "t": [ - [-62.35654103], - [118.4734964], - [259.8400796] - ] - }, - { - "name": "20_13", - "type": "vga", - "resolution": [640,480], - "panel": 20, - "node": 13, - "K": [ - [743.07,0,377.102], - [0,743.158,222.988], - [0,0,1] - ], - "distCoef": [-0.29868,-0.0827266,-0.00133003,-0.00119832,0.273178], - "R": [ - [-0.2367527853,-0.03686088138,0.9708704311], - [-0.08746956632,0.9960307636,0.01648614259], - [-0.9676245107,-0.08101847538,-0.2390372628] - ], - "t": [ - [-42.43038274], - [111.3831569], - [262.4188123] - ] - }, - { - "name": "20_14", - "type": "vga", - "resolution": [640,480], - "panel": 20, - "node": 14, - "K": [ - [745.597,0,372.306], - [0,745.414,237.499], - [0,0,1] - ], - "distCoef": [-0.320131,0.0615197,0.00113665,-0.000991542,0.0414761], - "R": [ - [-0.2769894269,0.05383368349,0.9593637433], - [-0.05406721308,0.9959742516,-0.07149843787], - [-0.9593506105,-0.07167443526,-0.2729636999] - ], - "t": [ - [-21.49417033], - [90.7530727], - [264.2254974] - ] - }, - { - "name": "20_15", - "type": "vga", - "resolution": [640,480], - "panel": 20, - "node": 15, - "K": [ - [746.296,0,380.788], - [0,746.161,226.883], - [0,0,1] - ], - "distCoef": [-0.321885,0.0553182,0.000132369,-0.000878491,0.0778662], - "R": [ - [-0.2870302882,0.01079685294,0.9578606588], - [-0.05665486447,0.9979947406,-0.02822630231], - [-0.9562446549,-0.06236926949,-0.2858430237] - ], - "t": [ - [-1.106709776], - [85.82297146], - [264.8070963] - ] - }, - { - "name": "20_16", - "type": "vga", - "resolution": [640,480], - "panel": 20, - "node": 16, - "K": [ - [744.119,0,345.288], - [0,744.112,227.607], - [0,0,1] - ], - "distCoef": [-0.302547,-0.0664079,0.000893953,-0.000627784,0.303861], - "R": [ - [-0.252548592,0.05539030986,0.9659974753], - [-0.08640189331,0.9930807476,-0.07953201617], - [-0.963718798,-0.1035497095,-0.2460153169] - ], - "t": [ - [10.51473419], - [107.4721829], - [260.872486] - ] - }, - { - "name": "20_17", - "type": "vga", - "resolution": [640,480], - "panel": 20, - "node": 17, - "K": [ - [745.831,0,353.784], - [0,745.87,219.754], - [0,0,1] - ], - "distCoef": [-0.321082,0.0599511,-0.000750204,0.000386726,0.0615888], - "R": [ - [-0.3124433364,0.0857084176,0.9460619582], - [-0.03834810703,0.9939715084,-0.1027135007], - [-0.9491620432,-0.06837183409,-0.3072730188] - ], - "t": [ - [50.17882687], - [91.39390134], - [262.9120903] - ] - }, - { - "name": "20_18", - "type": "vga", - "resolution": [640,480], - "panel": 20, - "node": 18, - "K": [ - [745.227,0,385.13], - [0,745.129,233.897], - [0,0,1] - ], - "distCoef": [-0.311291,0.0180828,0.00116452,0.000576614,0.0928398], - "R": [ - [-0.2786751196,0.05379991941,0.9588773365], - [-0.03740853519,0.9970639104,-0.06681437094], - [-0.9596565944,-0.0544896994,-0.2758443282] - ], - "t": [ - [57.04086511], - [98.35557378], - [265.4113916] - ] - }, - { - "name": "20_19", - "type": "vga", - "resolution": [640,480], - "panel": 20, - "node": 19, - "K": [ - [746.424,0,373.724], - [0,746.378,215.089], - [0,0,1] - ], - "distCoef": [-0.317589,0.0452179,0.000839363,0.00087423,0.0858828], - "R": [ - [-0.2053627335,-0.023863444,0.9783949528], - [-0.1366627843,0.9906072975,-0.004523879826], - [-0.9690972248,-0.1346392148,-0.2066950671] - ], - "t": [ - [2.454839771], - [148.020868], - [256.5149472] - ] - }, - { - "name": "20_20", - "type": "vga", - "resolution": [640,480], - "panel": 20, - "node": 20, - "K": [ - [744.35,0,378.361], - [0,744.386,245.706], - [0,0,1] - ], - "distCoef": [-0.305792,-0.0298413,-5.26611e-05,9.57392e-05,0.206854], - "R": [ - [-0.2653224987,0.04663873586,0.9630310483], - [-0.08123292055,0.9941966424,-0.07052835541], - [-0.9607315881,-0.09694258412,-0.2599941366] - ], - "t": [ - [23.42848118], - [157.616994], - [260.7931406] - ] - }, - { - "name": "20_21", - "type": "vga", - "resolution": [640,480], - "panel": 20, - "node": 21, - "K": [ - [747.371,0,368.768], - [0,747.344,231.897], - [0,0,1] - ], - "distCoef": [-0.308946,-0.0139041,-0.000755627,-0.000244894,0.190547], - "R": [ - [-0.2375675449,-0.01520768023,0.9712519694], - [-0.09352440886,0.9955903179,-0.007287238765], - [-0.966858235,-0.09256697771,-0.2379422368] - ], - "t": [ - [-12.76210059], - [163.3748289], - [261.1782343] - ] - }, - { - "name": "20_22", - "type": "vga", - "resolution": [640,480], - "panel": 20, - "node": 22, - "K": [ - [746.314,0,371.788], - [0,745.992,237.732], - [0,0,1] - ], - "distCoef": [-0.315167,0.0352154,-0.000828301,0.000312219,0.0891012], - "R": [ - [-0.2145858088,0.0004599306573,0.9767050318], - [-0.07749764501,0.9968390076,-0.017495939], - [-0.9736257216,-0.07944672006,-0.2138718611] - ], - "t": [ - [-33.0373727], - [146.3668194], - [262.1626174] - ] - }, - { - "name": "20_23", - "type": "vga", - "resolution": [640,480], - "panel": 20, - "node": 23, - "K": [ - [746.318,0,371.868], - [0,746.096,236.531], - [0,0,1] - ], - "distCoef": [-0.318459,0.0405311,0.000489761,-0.000285822,0.0876741], - "R": [ - [-0.2554085937,0.004734611177,0.9668216142], - [-0.07039835709,0.9972425561,-0.02348096154], - [-0.9642668311,-0.0740598926,-0.25437101] - ], - "t": [ - [-17.40671779], - [124.2252344], - [264.0602836] - ] - }, - { - "name": "20_24", - "type": "vga", - "resolution": [640,480], - "panel": 20, - "node": 24, - "K": [ - [745.832,0,382.965], - [0,745.816,231.317], - [0,0,1] - ], - "distCoef": [-0.320385,0.0446211,0.00028801,0.00167617,0.104376], - "R": [ - [-0.2362773498,-0.02089730322,0.9714609188], - [-0.1013714927,0.9948433166,-0.003255144035], - [-0.9663833786,-0.09924756028,-0.2371773332] - ], - "t": [ - [-5.093436327], - [126.6662443], - [260.9183094] - ] - }, - { - "name": "00_00", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 0, - "K": [ - [1634.03,0,942.792], - [0,1629.73,558.29], - [0,0,1] - ], - "distCoef": [-0.222445,0.199192,8.73054e-05,0.000982243,0.0238445], - "R": [ - [0.1369296663,0.03357591931,-0.9900115778], - [-0.09021094677,0.9956950625,0.02129149064], - [0.9864645212,0.08639444504,0.1393691081] - ], - "t": [ - [20.90028135], - [127.2202879], - [283.1159034] - ] - }, - { - "name": "00_01", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 1, - "K": [ - [1395.91,0,951.559], - [0,1392.24,561.398], - [0,0,1] - ], - "distCoef": [-0.286227,0.183082,-4.29815e-05,0.000644874,-0.0479635], - "R": [ - [0.05337497606,0.02479711619,0.9982666052], - [0.6376765256,0.7684660834,-0.05318390075], - [-0.7684528356,0.6394098699,0.0252043199] - ], - "t": [ - [6.299256813], - [104.397182], - [363.078698] - ] - }, - { - "name": "00_02", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 2, - "K": [ - [1397.02,0,939.355], - [0,1394.04,556.611], - [0,0,1] - ], - "distCoef": [-0.28229,0.173658,-0.000610716,0.000955319,-0.0398628], - "R": [ - [-0.9970491806,0.05290586318,-0.05562284625], - [-0.01182874156,0.6100448884,0.792278559], - [0.07584861407,0.7905986364,-0.6076189463] - ], - "t": [ - [-16.22360931], - [63.30660163], - [381.0181823] - ] - }, - { - "name": "00_03", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 3, - "K": [ - [1395.71,0,949.456], - [0,1392.06,566.648], - [0,0,1] - ], - "distCoef": [-0.281728,0.168097,-0.00021431,1.8072e-05,-0.0371786], - "R": [ - [-0.6216465312,-0.0285781748,0.7827763909], - [0.07448493547,0.9926490654,0.09539301533], - [-0.7797484111,0.117605786,-0.6149482047] - ], - "t": [ - [-14.50346059], - [117.4297203], - [290.1984382] - ] - }, - { - "name": "00_04", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 4, - "K": [ - [1633.26,0,949.479], - [0,1629.32,572.374], - [0,0,1] - ], - "distCoef": [-0.223003,0.185095,-0.000261654,0.00109433,0.0657602], - "R": [ - [-0.5292732399,-0.01229259603,0.8483623811], - [0.636650989,0.6551966806,0.4066851706], - [-0.5608434325,0.7553583268,-0.3389519765] - ], - "t": [ - [-5.411400695], - [80.12176746], - [379.8488129] - ] - }, - { - "name": "00_05", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 5, - "K": [ - [1396.29,0,933.34], - [0,1392.95,560.462], - [0,0,1] - ], - "distCoef": [-0.28733,0.185523,-0.000225825,-0.000143128,-0.0508452], - "R": [ - [-0.9314658579,-0.01073438439,-0.363670357], - [-0.021313424,0.9994579907,0.02508909603], - [0.3632039283,0.03112069687,-0.9311897813] - ], - "t": [ - [-6.050515741], - [143.9213951], - [280.3813532] - ] - }, - { - "name": "00_06", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 6, - "K": [ - [1396.11,0,950.228], - [0,1392.54,548.78], - [0,0,1] - ], - "distCoef": [-0.286481,0.183173,-0.000152555,0.0010664,-0.0482263], - "R": [ - [0.9448241112,-0.04876703013,-0.3239277321], - [-0.2141569626,0.6563150135,-0.7234551806], - [0.2478793944,0.7529092773,0.6096584503] - ], - "t": [ - [-10.023614], - [84.45695974], - [376.925635] - ] - }, - { - "name": "00_07", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 7, - "K": [ - [1395.51,0,947.67], - [0,1392.41,549.081], - [0,0,1] - ], - "distCoef": [-0.286691,0.185163,-6.53256e-05,4.32858e-06,-0.052639], - "R": [ - [-0.9419632708,-0.03700247277,0.3336705164], - [0.180351898,0.7825307202,0.5959185052], - [-0.2831578878,0.6215114552,-0.7304417305] - ], - "t": [ - [-5.250326149], - [112.5645453], - [360.2387508] - ] - }, - { - "name": "00_08", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 8, - "K": [ - [1642.7,0,945.082], - [0,1638.64,562.465], - [0,0,1] - ], - "distCoef": [-0.22444,0.208938,-0.000569838,0.000484927,0.0287248], - "R": [ - [0.9544726119,0.01685383959,-0.2978220632], - [-0.03362017317,0.9981191009,-0.05126347965], - [0.2963979035,0.05894241665,0.9532439742] - ], - "t": [ - [-19.67808464], - [136.6798831], - [282.6801175] - ] - }, - { - "name": "00_09", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 9, - "K": [ - [1396.79,0,945.482], - [0,1393.03,542.64], - [0,0,1] - ], - "distCoef": [-0.284259,0.175176,-0.000406823,0.000640552,-0.0406716], - "R": [ - [-0.3169419478,-0.08460972789,0.9446634298], - [-0.1243350249,0.9911238917,0.04705563528], - [-0.9402598595,-0.1025408464,-0.3246486894] - ], - "t": [ - [6.780958613], - [147.0057696], - [260.6395044] - ] - }, - { - "name": "00_10", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 10, - "K": [ - [1393.87,0,944.546], - [0,1390.36,563.199], - [0,0,1] - ], - "distCoef": [-0.285353,0.177704,-0.000109708,0.000471392,-0.0432146], - "R": [ - [0.9503475669,0.04849461332,0.3073886376], - [0.1560494297,0.7803459045,-0.6055648973], - [-0.2692360999,0.6234649483,0.734032275] - ], - "t": [ - [22.71992555], - [112.7759402], - [360.0009328] - ] - }, - { - "name": "00_11", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 11, - "K": [ - [1492.96,0,934.544], - [0,1489.74,547.466], - [0,0,1] - ], - "distCoef": [-0.259288,0.190057,-5.50625e-05,0.00031915,-0.0281283], - "R": [ - [0.8129763959,0.04080422416,-0.5808652124], - [-0.2848486357,0.8979062573,-0.3355973896], - [0.5078687177,0.4382914196,0.7415996205] - ], - "t": [ - [-0.03199165418], - [105.1487628], - [331.4862369] - ] - }, - { - "name": "00_12", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 12, - "K": [ - [1395.93,0,964.611], - [0,1392.67,564.875], - [0,0,1] - ], - "distCoef": [-0.290995,0.19463,-0.000241491,0.000727782,-0.0582663], - "R": [ - [-0.9950957343,0.04321912909,-0.08897520145], - [-0.001969290489,0.8906636271,0.454658581], - [0.09889692354,0.4526040326,-0.886210465] - ], - "t": [ - [24.66653867], - [97.49188585], - [334.8897626] - ] - }, - { - "name": "00_13", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 13, - "K": [ - [1592.21,0,937.375], - [0,1588.39,560.919], - [0,0,1] - ], - "distCoef": [-0.239248,0.229218,0.000137317,0.000315934,-0.0358302], - "R": [ - [-0.2862766934,0.07452649614,-0.9552441867], - [-0.7557457469,0.5952786327,0.2729317047], - [0.588977097,0.8000557173,-0.1140913162] - ], - "t": [ - [-15.47943966], - [60.20818768], - [381.0821849] - ] - }, - { - "name": "00_14", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 14, - "K": [ - [1649.51,0,934.882], - [0,1644.85,568.024], - [0,0,1] - ], - "distCoef": [-0.22365,0.220791,-0.000591343,0.000286172,0.0121962], - "R": [ - [0.827339054,-0.07848137689,0.5561930989], - [0.02005408661,0.9936867625,0.110383204], - [-0.5613447456,-0.08017039095,0.8236897383] - ], - "t": [ - [-7.23447972], - [142.1657406], - [267.9541185] - ] - }, - { - "name": "00_15", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 15, - "K": [ - [1430.11,0,948.926], - [0,1426.48,561.705], - [0,0,1] - ], - "distCoef": [-0.277948,0.185701,0.000192514,0.000149713,-0.0424254], - "R": [ - [-0.9997414125,0.006454955712,0.02180462522], - [0.005192647027,0.9983342904,-0.05746025644], - [-0.02213920846,-0.05733217422,-0.9981096519] - ], - "t": [ - [9.642162177], - [134.9258555], - [268.2324221] - ] - }, - { - "name": "00_16", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 16, - "K": [ - [1427.34,0,949.618], - [0,1423.13,548.132], - [0,0,1] - ], - "distCoef": [-0.279453,0.188683,-0.000345265,0.000583475,-0.0479414], - "R": [ - [0.7694875517,0.002369830201,0.6386574134], - [0.2539259376,0.9164213706,-0.3093436433], - [-0.586012394,0.4002077652,0.7045730755] - ], - "t": [ - [4.866150988], - [118.1652356], - [330.6340665] - ] - }, - { - "name": "00_17", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 17, - "K": [ - [1393.35,0,916.395], - [0,1390.34,563.652], - [0,0,1] - ], - "distCoef": [-0.287138,0.186145,7.50854e-05,0.000557424,-0.0513205], - "R": [ - [0.5039250676,0.09465184024,-0.8585456047], - [-0.6050310345,0.7480627966,-0.2726527087], - [0.6164389455,0.6568432701,0.4342348962] - ], - "t": [ - [18.2296155], - [97.71531857], - [361.6667015] - ] - }, - { - "name": "00_18", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 18, - "K": [ - [1542.2,0,947.567], - [0,1538.02,555.168], - [0,0,1] - ], - "distCoef": [-0.245751,0.182006,3.81269e-06,0.000651097,0.00472657], - "R": [ - [-0.4048875531,-0.001022756131,0.9143659133], - [0.3656410889,0.9163838146,0.1629334173], - [-0.8380767647,0.4002994608,-0.3706584387] - ], - "t": [ - [16.25260358], - [116.7586119], - [329.7529305] - ] - }, - { - "name": "00_19", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 19, - "K": [ - [1396.57,0,949.242], - [0,1393.19,554.872], - [0,0,1] - ], - "distCoef": [-0.280864,0.167216,-6.6519e-05,0.000917406,-0.0342733], - "R": [ - [0.7360342296,0.009501079563,0.6768776421], - [0.5173282683,0.6370082142,-0.5714822813], - [-0.4366063167,0.7707984591,0.4639446731] - ], - "t": [ - [-24.15514071], - [74.04862943], - [379.5076537] - ] - }, - { - "name": "00_20", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 20, - "K": [ - [1403.46,0,940.386], - [0,1400.1,552.684], - [0,0,1] - ], - "distCoef": [-0.287177,0.194004,-0.000120001,8.41526e-05,-0.0604614], - "R": [ - [-0.6201222217,0.04052054618,-0.7834580496], - [-0.1302964194,0.9794749929,0.1537907063], - [0.773609251,0.1974508131,-0.6021145267] - ], - "t": [ - [24.4496252], - [140.6900046], - [300.8290806] - ] - }, - { - "name": "00_21", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 21, - "K": [ - [1397.56,0,932.828], - [0,1393.91,562.186], - [0,0,1] - ], - "distCoef": [-0.28642,0.185674,-0.000229601,1.91211e-05,-0.052608], - "R": [ - [-0.2617478675,-0.05032313647,-0.9638234464], - [-0.4532392419,0.8880813121,0.07671878938], - [0.8520928608,0.4569235877,-0.2552618099] - ], - "t": [ - [-8.784671236], - [98.11062797], - [332.9193692] - ] - }, - { - "name": "00_22", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 22, - "K": [ - [1514.1,0,945.861], - [0,1510.18,558.694], - [0,0,1] - ], - "distCoef": [-0.260535,0.216046,-0.000156491,0.000677315,-0.0506741], - "R": [ - [-0.9239818557,-0.0613765916,0.3774790647], - [0.05486070575,0.9555572213,0.289656175], - [-0.3784809549,0.288345818,-0.8795503715] - ], - "t": [ - [-5.224239691], - [110.7456244], - [313.8855054] - ] - }, - { - "name": "00_23", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 23, - "K": [ - [1572.86,0,941.716], - [0,1568.17,560.048], - [0,0,1] - ], - "distCoef": [-0.240801,0.195963,-0.000444179,0.000458513,0.00455186], - "R": [ - [0.5162966551,0.01335424781,0.856305686], - [0.1418829708,0.9847272537,-0.100903213], - [-0.8445750331,0.173591186,0.506516647] - ], - "t": [ - [2.417701344], - [102.3557555], - [298.3746617] - ] - }, - { - "name": "00_24", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 24, - "K": [ - [1399.63,0,954.539], - [0,1396.27,546.388], - [0,0,1] - ], - "distCoef": [-0.288761,0.190789,4.23479e-05,6.78832e-05,-0.0577764], - "R": [ - [-0.388991142,-0.05987834367,-0.9192934653], - [0.02928793432,0.9965772059,-0.07730517199], - [0.9207758187,-0.05699523376,-0.3859059924] - ], - "t": [ - [-15.12220678], - [134.1751339], - [265.239245] - ] - }, - { - "name": "00_25", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 25, - "K": [ - [1397.66,0,935.585], - [0,1394.65,559.251], - [0,0,1] - ], - "distCoef": [-0.285722,0.183994,-0.000502702,0.000494145,-0.0515729], - "R": [ - [0.7926422733,0.00130484237,-0.6096855943], - [0.04487405742,0.9971605675,0.06047414042], - [0.6080333424,-0.07529342651,0.7903330655] - ], - "t": [ - [4.539475053], - [139.2223569], - [261.6293171] - ] - }, - { - "name": "00_26", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 26, - "K": [ - [1616.8,0,950.116], - [0,1613.47,551.417], - [0,0,1] - ], - "distCoef": [-0.223464,0.185279,-0.00090721,0.000127112,0.0351947], - "R": [ - [-0.7556190155,-0.04350579001,-0.6535649545], - [0.1389994774,0.9644159151,-0.2249023966], - [0.6400930001,-0.2607857146,-0.7226837222] - ], - "t": [ - [-12.5475419], - [141.1612209], - [240.8579734] - ] - }, - { - "name": "00_27", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 27, - "K": [ - [1861.86,0,934.556], - [0,1857.26,552.106], - [0,0,1] - ], - "distCoef": [-0.171511,0.209759,-1.83176e-05,-3.41566e-05,0.211418], - "R": [ - [0.9782876177,0.02697940456,0.2054883178], - [0.02691509764,0.9665557486,-0.2550403151], - [-0.2054967507,0.2550335204,0.9448433674] - ], - "t": [ - [-0.5131666478], - [123.4498457], - [311.6401591] - ] - }, - { - "name": "00_28", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 28, - "K": [ - [1395.57,0,953.143], - [0,1392.36,561.982], - [0,0,1] - ], - "distCoef": [-0.284934,0.181016,0.000127361,0.000271191,-0.0471616], - "R": [ - [-0.6310677524,-0.02949081954,-0.775166939], - [-0.5128354354,0.7656140117,0.3883748207], - [0.5820251782,0.6426238999,-0.4982782509] - ], - "t": [ - [-8.508070023], - [104.2896072], - [361.3816814] - ] - }, - { - "name": "00_29", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 29, - "K": [ - [1400.36,0,939.608], - [0,1397.25,572.603], - [0,0,1] - ], - "distCoef": [-0.286109,0.1878,-0.000309515,0.000886248,-0.0523515], - "R": [ - [0.4887300705,-0.07268882749,-0.8694016635], - [-0.08227020668,0.9882426049,-0.1288726774], - [0.8685473685,0.1345098073,0.4770037531] - ], - "t": [ - [-20.72850042], - [158.8912224], - [289.281465] - ] - }, - { - "name": "00_30", - "type": "hd", - "resolution": [1920,1080], - "panel": 0, - "node": 30, - "K": [ - [1407.21,0,946.883], - [0,1403.86,563.032], - [0,0,1] - ], - "distCoef": [-0.285813,0.195568,-0.000394067,0.000468367,-0.0600751], - "R": [ - [0.08635045426,0.06174190292,0.9943498059], - [0.2147800801,0.9734543185,-0.07909618832], - [-0.9728376618,0.2203965227,0.07079729175] - ], - "t": [ - [13.79078928], - [132.1300437], - [306.0754676] - ] - }, - { - "name": "50_01", - "type": "kinect-color", - "resolution": [1920,1080], - "panel": 50, - "node": 1, - "K": [ - [1053.92,0,947.294], - [0,1054.32,535.405], - [0,0,1] - ], - "distCoef": [0.0476403,-0.053786,0.000733314,-0.000579648,0.0122759], - "R": [ - [0.9095307192,0.0006254166507,-0.4156362348], - [-0.003349684277,0.999977422,-0.0058253781], - [0.4156232073,0.006690610494,0.9095122788] - ], - "t": [ - [-15.84850815], - [103.1392168], - [269.3362326] - ] - }, - { - "name": "50_02", - "type": "kinect-color", - "resolution": [1920,1080], - "panel": 50, - "node": 2, - "K": [ - [1058.92,0,971.224], - [0,1059.3,541.276], - [0,0,1] - ], - "distCoef": [0.0485216,-0.0529886,-0.000413578,-0.000171659,0.00909728], - "R": [ - [-0.08404700998,-0.006825065684,-0.9964384169], - [-0.04073006897,0.9991643735,-0.003408260769], - [0.9956290281,0.04029855131,-0.08425476347] - ], - "t": [ - [-4.246538185], - [93.69672118], - [271.0169727] - ] - }, - { - "name": "50_03", - "type": "kinect-color", - "resolution": [1920,1080], - "panel": 50, - "node": 3, - "K": [ - [1050.35,0,971.069], - [0,1050.88,535.343], - [0,0,1] - ], - "distCoef": [0.0482196,-0.0555053,0.000460862,0.000594278,0.0128034], - "R": [ - [-0.9791929995,-0.0009192386581,-0.2029291126], - [0.004325206908,0.9996680429,-0.02539875018], - [0.2028850964,-0.02574798878,-0.9788639736] - ], - "t": [ - [-10.71273011], - [112.0293664], - [269.2258843] - ] - }, - { - "name": "50_04", - "type": "kinect-color", - "resolution": [1920,1080], - "panel": 50, - "node": 4, - "K": [ - [1053.76,0,952.563], - [0,1053.62,535.073], - [0,0,1] - ], - "distCoef": [0.0534802,-0.059505,0.000265754,-0.00038559,0.0128987], - "R": [ - [-0.4973721867,-0.01252789009,0.8674468052], - [-0.05725964091,0.9981894693,-0.01841512904], - [-0.8656455634,-0.05882886558,-0.4971890215] - ], - "t": [ - [-12.12207689], - [119.639642], - [263.8142799] - ] - }, - { - "name": "50_05", - "type": "kinect-color", - "resolution": [1920,1080], - "panel": 50, - "node": 5, - "K": [ - [1061.53,0,963.346], - [0,1061.99,535.689], - [0,0,1] - ], - "distCoef": [0.0450742,-0.0483577,0.000117724,0.00131017,0.00746483], - "R": [ - [0.6332975321,0.02789684006,0.7734054578], - [-0.04440403331,0.9990136015,0.0003253688515], - [-0.772633495,-0.034548377,0.6339115806] - ], - "t": [ - [4.398197962], - [114.449943], - [269.0646085] - ] - }, - { - "name": "50_06", - "type": "kinect-color", - "resolution": [1920,1080], - "panel": 50, - "node": 6, - "K": [ - [1053.8,0,975.87], - [0,1054.44,518.546], - [0,0,1] - ], - "distCoef": [0.0608578,-0.0758877,0.000572907,0.000423304,0.0232485], - "R": [ - [0.9936973916,-0.01776547634,0.1106791841], - [0.08238304881,0.7853099766,-0.6135969963], - [-0.07601662453,0.6188478234,0.7818240495] - ], - "t": [ - [-23.36095562], - [58.01362542], - [350.0526212] - ] - }, - { - "name": "50_07", - "type": "kinect-color", - "resolution": [1920,1080], - "panel": 50, - "node": 7, - "K": [ - [1058.37,0,951.456], - [0,1058.06,537.752], - [0,0,1] - ], - "distCoef": [0.0510704,-0.0625189,-0.000144014,6.68608e-05,0.016463], - "R": [ - [0.4325769754,-0.03234243573,-0.9010167186], - [-0.4868424381,0.832758343,-0.2636247005], - [0.7588554545,0.5526911516,0.344486415] - ], - "t": [ - [-19.0385587], - [87.13576568], - [341.2560709] - ] - }, - { - "name": "50_08", - "type": "kinect-color", - "resolution": [1920,1080], - "panel": 50, - "node": 8, - "K": [ - [1051.92,0,937.937], - [0,1051.86,554.246], - [0,0,1] - ], - "distCoef": [0.0499863,-0.0613843,-4.12419e-05,-0.000155211,0.0174279], - "R": [ - [-0.7043873056,-0.07078753835,-0.7062773168], - [-0.4398115151,0.8245196459,0.3559960458], - [0.5571394394,0.5613879923,-0.6119143463] - ], - "t": [ - [-21.03532832], - [82.26745729], - [344.5100871] - ] - }, - { - "name": "50_09", - "type": "kinect-color", - "resolution": [1920,1080], - "panel": 50, - "node": 9, - "K": [ - [1054,0,961.563], - [0,1054.08,544.179], - [0,0,1] - ], - "distCoef": [0.0446773,-0.0530941,0.000226286,-0.000324258,0.0121913], - "R": [ - [-0.8728623151,-0.0989156561,0.4778358211], - [0.2068965126,0.8118396582,0.5459946908], - [-0.4419334927,0.5754407548,-0.6881589393] - ], - "t": [ - [-36.30074608], - [73.0041962], - [346.5857858] - ] - }, - { - "name": "50_10", - "type": "kinect-color", - "resolution": [1920,1080], - "panel": 50, - "node": 10, - "K": [ - [1050.04,0,941.59], - [0,1050.6,559.398], - [0,0,1] - ], - "distCoef": [0.0506861,-0.0636966,0.000195295,-6.41025e-06,0.0181857], - "R": [ - [0.1849149694,0.002001709126,0.9827524852], - [0.5894867579,0.7998990427,-0.1125472514], - [-0.786328059,0.6001312479,0.146733326] - ], - "t": [ - [-12.26435316], - [64.88453925], - [349.5293231] - ] - } - ] -} +{ + "calibDataSource": "160906_calib_norm", + "cameras": [ + { + "name": "01_01", + "type": "vga", + "resolution": [640,480], + "panel": 1, + "node": 1, + "K": [ + [745.698,0,375.512], + [0,745.89,226.023], + [0,0,1] + ], + "distCoef": [-0.324009,0.0732398,-0.000601245,0.000808154,0.0311011], + "R": [ + [0.9609979695,0.02878724306,-0.2750530807], + [-0.05024448072,0.9961896773,-0.07128547526], + [0.2719529274,0.08232509619,0.9587826572] + ], + "t": [ + [-51.56945892], + [143.9587601], + [282.5664691] + ] + }, + { + "name": "01_02", + "type": "vga", + "resolution": [640,480], + "panel": 1, + "node": 2, + "K": [ + [745.462,0,369.225], + [0,745.627,226.687], + [0,0,1] + ], + "distCoef": [-0.336594,0.141798,-0.000612176,0.000160485,-0.0646767], + "R": [ + [0.9715220842,-0.01574832828,-0.2364251047], + [0.005323209906,0.998987679,-0.04466856407], + [0.2368892218,0.042137956,0.9706224236] + ], + "t": [ + [-66.22242206], + [142.1317177], + [278.6626087] + ] + }, + { + "name": "01_03", + "type": "vga", + "resolution": [640,480], + "panel": 1, + "node": 3, + "K": [ + [746.261,0,378.952], + [0,746.496,239.595], + [0,0,1] + ], + "distCoef": [-0.322069,0.0440329,-0.000951664,0.000892653,0.103376], + "R": [ + [0.9665011873,0.05534363601,-0.2506242943], + [-0.07024277085,0.996230894,-0.05089164033], + [0.2468631364,0.06679137568,0.9667458322] + ], + "t": [ + [-54.75524211], + [118.3584455], + [281.78809] + ] + }, + { + "name": "01_04", + "type": "vga", + "resolution": [640,480], + "panel": 1, + "node": 4, + "K": [ + [747.661,0,366.929], + [0,747.759,234.022], + [0,0,1] + ], + "distCoef": [-0.32333,0.0462607,-0.000972333,-0.000898261,0.102804], + "R": [ + [0.9662588837,0.08601234823,-0.2427872436], + [-0.1112831564,0.9894890375,-0.09234448444], + [0.23229255,0.1162468093,0.9656742984] + ], + "t": [ + [-29.08626445], + [96.75744843], + [287.7183779] + ] + }, + { + "name": "01_05", + "type": "vga", + "resolution": [640,480], + "panel": 1, + "node": 5, + "K": [ + [742.413,0,353.224], + [0,742.622,209.478], + [0,0,1] + ], + "distCoef": [-0.297729,-0.0985766,-0.000505185,-0.000773418,0.328727], + "R": [ + [0.9718071292,0.05098345905,-0.2301990238], + [-0.07271497659,0.9935575811,-0.0869244798], + [0.2242842746,0.1012127458,0.9692536016] + ], + "t": [ + [-26.91018729], + [77.97642882], + [285.7140393] + ] + }, + { + "name": "01_06", + "type": "vga", + "resolution": [640,480], + "panel": 1, + "node": 6, + "K": [ + [743.487,0,372.277], + [0,743.725,241.821], + [0,0,1] + ], + "distCoef": [-0.317534,0.0281748,0.00130284,-0.000186889,0.119129], + "R": [ + [0.9681278444,0.07458666466,-0.2390926732], + [-0.09383510211,0.9931135585,-0.07014580141], + [0.2322142341,0.09034538891,0.968459736] + ], + "t": [ + [-7.038020326], + [73.51221006], + [284.7303027] + ] + }, + { + "name": "01_07", + "type": "vga", + "resolution": [640,480], + "panel": 1, + "node": 7, + "K": [ + [748.393,0,380.919], + [0,748.388,229.353], + [0,0,1] + ], + "distCoef": [-0.344193,0.174813,-0.00034307,0.00107023,-0.0968505], + "R": [ + [0.9670535143,-0.02995409712,-0.2528047715], + [0.01712365053,0.9984582116,-0.0528013286], + [0.2539966162,0.04673276982,0.9660754459] + ], + "t": [ + [-4.52170598], + [98.55800179], + [280.6705064] + ] + }, + { + "name": "01_08", + "type": "vga", + "resolution": [640,480], + "panel": 1, + "node": 8, + "K": [ + [745.37,0,362.362], + [0,745.56,217.483], + [0,0,1] + ], + "distCoef": [-0.326014,0.0789588,-0.000462463,-0.00138061,0.0222432], + "R": [ + [0.9652282485,0.06485174985,-0.2532364089], + [-0.07898708824,0.9958116468,-0.0460456736], + [0.2491896228,0.06444699145,0.9663079826] + ], + "t": [ + [26.28384049], + [86.2200762], + [282.8912643] + ] + }, + { + "name": "01_09", + "type": "vga", + "resolution": [640,480], + "panel": 1, + "node": 9, + "K": [ + [746.037,0,338.236], + [0,746.053,236.859], + [0,0,1] + ], + "distCoef": [-0.314486,0.0395532,0.000625849,-0.000232478,0.0599275], + "R": [ + [0.9656569777,0.07278005487,-0.2494186543], + [-0.09030273149,0.9941334749,-0.05953193019], + [0.2436226964,0.08001060955,0.9665641645] + ], + "t": [ + [45.35508632], + [94.7965848], + [284.0947744] + ] + }, + { + "name": "01_10", + "type": "vga", + "resolution": [640,480], + "panel": 1, + "node": 10, + "K": [ + [747.938,0,379.271], + [0,748.269,227.432], + [0,0,1] + ], + "distCoef": [-0.3484,0.205218,-0.00110069,0.000562921,-0.151344], + "R": [ + [0.9662738854,-0.001312373382,-0.2575132151], + [-0.009587322107,0.9991104143,-0.04106657164], + [0.2573380297,0.04215041788,0.9654017199] + ], + "t": [ + [30.05861189], + [130.0028668], + [279.9552314] + ] + }, + { + "name": "01_11", + "type": "vga", + "resolution": [640,480], + "panel": 1, + "node": 11, + "K": [ + [746.12,0,364.693], + [0,745.844,223.621], + [0,0,1] + ], + "distCoef": [-0.335335,0.119703,0.000192218,0.00118296,-0.00812072], + "R": [ + [0.9869891455,-0.01212212734,-0.1603292883], + [0.00355647539,0.9985558958,-0.05360479805], + [0.1607475603,0.05233714665,0.9856069424] + ], + "t": [ + [71.07099717], + [142.6182462], + [275.3539702] + ] + }, + { + "name": "01_12", + "type": "vga", + "resolution": [640,480], + "panel": 1, + "node": 12, + "K": [ + [745.407,0,358.691], + [0,745.503,226.329], + [0,0,1] + ], + "distCoef": [-0.325389,0.0923962,-0.00061832,-0.00189678,-0.0159561], + "R": [ + [0.9589650047,0.08538224277,-0.2703627054], + [-0.09708669181,0.9948178626,-0.03019262438], + [0.2663837347,0.05520229083,0.9622849957] + ], + "t": [ + [54.63033668], + [157.9150468], + [281.9236261] + ] + }, + { + "name": "01_13", + "type": "vga", + "resolution": [640,480], + "panel": 1, + "node": 13, + "K": [ + [744.389,0,339.442], + [0,744.512,216.258], + [0,0,1] + ], + "distCoef": [-0.320138,0.0543285,-0.000196977,-0.00116274,0.0473598], + "R": [ + [0.9724830194,-0.06319437739,-0.2242392645], + [0.03959405574,0.9933373951,-0.1082272161], + [0.2295845984,0.09637058799,0.9685058709] + ], + "t": [ + [19.90234626], + [154.6647449], + [286.7518211] + ] + }, + { + "name": "01_14", + "type": "vga", + "resolution": [640,480], + "panel": 1, + "node": 14, + "K": [ + [746.213,0,363.165], + [0,746.641,235.418], + [0,0,1] + ], + "distCoef": [-0.33414,0.127633,-0.000792357,0.000136075,-0.0405619], + "R": [ + [0.9643490552,0.006836134333,-0.2645452079], + [-0.02440508255,0.9977035557,-0.06318233054], + [0.2635057717,0.0673860684,0.9623013177] + ], + "t": [ + [19.24633902], + [182.0747755], + [282.9928946] + ] + }, + { + "name": "01_15", + "type": "vga", + "resolution": [640,480], + "panel": 1, + "node": 15, + "K": [ + [745.225,0,366.568], + [0,745.569,216.05], + [0,0,1] + ], + "distCoef": [-0.319743,0.046174,-0.00158438,-0.000953331,0.0743504], + "R": [ + [0.9602661069,0.03565913048,-0.2767985376], + [-0.06162250151,0.9944158624,-0.08567239854], + [0.2721978533,0.09932531892,0.9571012536] + ], + "t": [ + [0.9330302863], + [174.5612072], + [288.1067574] + ] + }, + { + "name": "01_16", + "type": "vga", + "resolution": [640,480], + "panel": 1, + "node": 16, + "K": [ + [747.633,0,371.752], + [0,747.88,230.613], + [0,0,1] + ], + "distCoef": [-0.347758,0.198029,0.00072103,0.00029865,-0.136932], + "R": [ + [0.9682573711,0.05614690975,-0.2435676248], + [-0.07153002565,0.9959334273,-0.05477283913], + [0.2395018137,0.07045660367,0.968336072] + ], + "t": [ + [-3.74774], + [172.5737662], + [282.7618788] + ] + }, + { + "name": "01_17", + "type": "vga", + "resolution": [640,480], + "panel": 1, + "node": 17, + "K": [ + [748.152,0,373.9], + [0,748.508,234.452], + [0,0,1] + ], + "distCoef": [-0.345127,0.177692,-0.00116897,0.00210199,-0.0818461], + "R": [ + [0.9639501783,0.02458774974,-0.264944327], + [-0.04477053879,0.9965129817,-0.07040934697], + [0.2622892538,0.07973280283,0.9616896732] + ], + "t": [ + [-36.08309916], + [173.4726636], + [283.4522322] + ] + }, + { + "name": "01_18", + "type": "vga", + "resolution": [640,480], + "panel": 1, + "node": 18, + "K": [ + [743.791,0,363.617], + [0,744.126,236.963], + [0,0,1] + ], + "distCoef": [-0.312734,0.0122172,-0.00120247,-0.000963953,0.133944], + "R": [ + [0.9523198878,0.06045552763,-0.2990517689], + [-0.07234112338,0.9969633514,-0.02882425707], + [0.2964010681,0.04908365416,0.9538014478] + ], + "t": [ + [-57.80984395], + [175.8598769], + [275.2458542] + ] + }, + { + "name": "01_19", + "type": "vga", + "resolution": [640,480], + "panel": 1, + "node": 19, + "K": [ + [743.162,0,364.748], + [0,743.331,220.785], + [0,0,1] + ], + "distCoef": [-0.311505,0.00290054,-0.000860754,-0.000437091,0.146397], + "R": [ + [0.9677776267,0.05243241618,-0.246287042], + [-0.06515666231,0.9969134625,-0.04379677618], + [0.243230497,0.05843278173,0.968206866] + ], + "t": [ + [-19.88792012], + [144.796335], + [280.8929426] + ] + }, + { + "name": "01_20", + "type": "vga", + "resolution": [640,480], + "panel": 1, + "node": 20, + "K": [ + [744.661,0,343.237], + [0,744.907,246.044], + [0,0,1] + ], + "distCoef": [-0.326994,0.0904776,0.000984855,-0.00107766,-0.0214165], + "R": [ + [0.9717064093,0.03462931454,-0.2336396043], + [-0.0436324388,0.998486683,-0.03347468014], + [0.2321268283,0.04272182698,0.9717468709] + ], + "t": [ + [-15.15244103], + [127.7778149], + [279.5122056] + ] + }, + { + "name": "01_21", + "type": "vga", + "resolution": [640,480], + "panel": 1, + "node": 21, + "K": [ + [742.462,0,365.246], + [0,742.468,221.387], + [0,0,1] + ], + "distCoef": [-0.311193,-0.0017069,-0.0010044,-5.33063e-05,0.168374], + "R": [ + [0.9650420793,0.04068979072,-0.2589172188], + [-0.04945049005,0.9984003719,-0.02741069744], + [0.257387712,0.03925605981,0.965510501] + ], + "t": [ + [-1.672862451], + [122.1992626], + [279.1232554] + ] + }, + { + "name": "01_22", + "type": "vga", + "resolution": [640,480], + "panel": 1, + "node": 22, + "K": [ + [744.021,0,363.587], + [0,744.301,226.764], + [0,0,1] + ], + "distCoef": [-0.330855,0.115198,-0.00111581,-0.000578883,-0.0257811], + "R": [ + [0.9624230562,-0.007741542698,-0.2714441553], + [-0.003557050749,0.9991484058,-0.04110730506], + [0.271531229,0.0405281588,0.9615759252] + ], + "t": [ + [4.289641778], + [135.1743597], + [279.2863723] + ] + }, + { + "name": "01_23", + "type": "vga", + "resolution": [640,480], + "panel": 1, + "node": 23, + "K": [ + [745.029,0,358.645], + [0,745.162,224.101], + [0,0,1] + ], + "distCoef": [-0.31925,0.0412999,-0.000788365,0.000625647,0.108146], + "R": [ + [0.9553340738,0.01211961015,-0.2952793973], + [-0.03701510886,0.9961975848,-0.07886858543], + [0.293200766,0.08627564605,0.9521501057] + ], + "t": [ + [-2.968489269], + [143.230855], + [285.3382881] + ] + }, + { + "name": "01_24", + "type": "vga", + "resolution": [640,480], + "panel": 1, + "node": 24, + "K": [ + [744.501,0,369.38], + [0,744.575,244.409], + [0,0,1] + ], + "distCoef": [-0.317214,0.0306635,-5.65201e-05,-0.000305408,0.106933], + "R": [ + [0.9627375442,0.05351140442,-0.2650904574], + [-0.07422624073,0.9948691584,-0.06874462026], + [0.2600516991,0.08585969499,0.9617698408] + ], + "t": [ + [-7.333655278], + [148.0612654], + [284.8699573] + ] + }, + { + "name": "02_01", + "type": "vga", + "resolution": [640,480], + "panel": 2, + "node": 1, + "K": [ + [746.79,0,376.022], + [0,747.048,234.17], + [0,0,1] + ], + "distCoef": [-0.317408,0.0301922,-0.000108969,-0.00027109,0.105931], + "R": [ + [0.977473966,0.04697618088,0.2057617172], + [0.001487552662,0.9733575223,-0.2292878562], + [-0.211050783,0.2244289915,0.9513617581] + ], + "t": [ + [-1.729507611], + [175.3460492], + [304.9109171] + ] + }, + { + "name": "02_02", + "type": "vga", + "resolution": [640,480], + "panel": 2, + "node": 2, + "K": [ + [747.689,0,367.065], + [0,747.811,212.158], + [0,0,1] + ], + "distCoef": [-0.333664,0.117162,0.000577725,-0.000310896,-0.0327554], + "R": [ + [0.9812751339,-0.05714257326,0.183939767], + [0.09271495859,0.9771941455,-0.1910380552], + [-0.1688284573,0.2045148611,0.9641942873] + ], + "t": [ + [-50.62568249], + [190.9654762], + [299.6250374] + ] + }, + { + "name": "02_03", + "type": "vga", + "resolution": [640,480], + "panel": 2, + "node": 3, + "K": [ + [745.627,0,353.486], + [0,745.817,252.683], + [0,0,1] + ], + "distCoef": [-0.321416,0.0392112,-0.00107045,-0.00134198,0.0908854], + "R": [ + [0.9757098845,0.1270834984,0.1784376802], + [-0.07601456941,0.9603325594,-0.2682967771], + [-0.2054556071,0.248215954,0.946666168] + ], + "t": [ + [-23.13649132], + [169.3490841], + [309.2380875] + ] + }, + { + "name": "02_04", + "type": "vga", + "resolution": [640,480], + "panel": 2, + "node": 4, + "K": [ + [746.11,0,381.584], + [0,746.321,224.917], + [0,0,1] + ], + "distCoef": [-0.323963,0.0585021,-0.000871966,0.000552522,0.0715102], + "R": [ + [0.979331342,0.07410153523,0.1881995881], + [-0.02608477747,0.9689731658,-0.2457856551], + [-0.2005734451,0.2357964511,0.950878713] + ], + "t": [ + [-32.63906075], + [150.8763932], + [306.9317958] + ] + }, + { + "name": "02_05", + "type": "vga", + "resolution": [640,480], + "panel": 2, + "node": 5, + "K": [ + [744.11,0,378.377], + [0,744.035,244.823], + [0,0,1] + ], + "distCoef": [-0.323078,0.0494134,-0.000238923,-0.000981516,0.0727453], + "R": [ + [0.9857440106,0.05652749171,0.1584720428], + [-0.01525193411,0.9680163878,-0.250422945], + [-0.1675593154,0.244435913,0.95507851] + ], + "t": [ + [-62.3494258], + [135.8190029], + [306.0165552] + ] + }, + { + "name": "02_06", + "type": "vga", + "resolution": [640,480], + "panel": 2, + "node": 6, + "K": [ + [743.928,0,352.844], + [0,744.181,228.627], + [0,0,1] + ], + "distCoef": [-0.303908,-0.0528673,-0.000528541,8.08764e-05,0.267531], + "R": [ + [0.9814194485,0.06212733968,0.1815380393], + [-0.0101664424,0.9616367605,-0.2741375282], + [-0.1916050874,0.2671983057,0.9444006332] + ], + "t": [ + [-53.86742917], + [106.6702196], + [310.2214119] + ] + }, + { + "name": "02_07", + "type": "vga", + "resolution": [640,480], + "panel": 2, + "node": 7, + "K": [ + [746.501,0,376.178], + [0,746.591,217.394], + [0,0,1] + ], + "distCoef": [-0.323449,0.0621904,-0.000592526,0.000355354,0.0689781], + "R": [ + [0.9775323693,0.09704954661,0.1871145437], + [-0.05094527723,0.9701636443,-0.2370381445], + [-0.2045361721,0.2221798567,0.9533105819] + ], + "t": [ + [-27.21830655], + [111.2122483], + [305.8578091] + ] + }, + { + "name": "02_08", + "type": "vga", + "resolution": [640,480], + "panel": 2, + "node": 8, + "K": [ + [747.056,0,346.722], + [0,747.425,231.954], + [0,0,1] + ], + "distCoef": [-0.331626,0.0978711,0.000923123,-0.00170198,0.0128988], + "R": [ + [0.9738310577,0.04398424166,0.222976361], + [0.006459505741,0.9753414162,-0.2206068824], + [-0.2271813062,0.2162741507,0.9495336465] + ], + "t": [ + [-23.1615402], + [89.62617671], + [306.715437] + ] + }, + { + "name": "02_09", + "type": "vga", + "resolution": [640,480], + "panel": 2, + "node": 9, + "K": [ + [746.084,0,344.827], + [0,746.456,222.936], + [0,0,1] + ], + "distCoef": [-0.31385,0.00765504,0.000335804,0.000338293,0.157318], + "R": [ + [0.9708044988,0.02558390192,0.2385038556], + [0.01777728087,0.9838878899,-0.1779005014], + [-0.2392124442,0.1769465571,0.9547079776] + ], + "t": [ + [-1.622489705], + [92.86686988], + [302.6276511] + ] + }, + { + "name": "02_10", + "type": "vga", + "resolution": [640,480], + "panel": 2, + "node": 10, + "K": [ + [743.875,0,345.16], + [0,744.131,231.932], + [0,0,1] + ], + "distCoef": [-0.309364,-0.0158069,0.000435688,-0.000318284,0.167974], + "R": [ + [0.9837217555,0.04774800386,0.1732386674], + [-0.008457215477,0.9752859506,-0.220784488], + [-0.179499257,0.2157253874,0.9598138226] + ], + "t": [ + [0.6070589451], + [94.58504844], + [305.3954199] + ] + }, + { + "name": "02_11", + "type": "vga", + "resolution": [640,480], + "panel": 2, + "node": 11, + "K": [ + [748.642,0,372.727], + [0,749.029,221.349], + [0,0,1] + ], + "distCoef": [-0.329743,0.0894243,0.000705225,0.000452301,0.0255748], + "R": [ + [0.9762818677,-0.03993432779,0.2127885436], + [0.08495434643,0.9746762651,-0.20685487], + [-0.1991393328,0.2200259705,0.9549513592] + ], + "t": [ + [18.17502224], + [86.30258496], + [305.899008] + ] + }, + { + "name": "02_12", + "type": "vga", + "resolution": [640,480], + "panel": 2, + "node": 12, + "K": [ + [746.297,0,386.393], + [0,746.341,223.432], + [0,0,1] + ], + "distCoef": [-0.329805,0.088881,-0.000101498,-0.000342857,0.0238941], + "R": [ + [0.9769251111,-0.05225372472,0.2070914666], + [0.09392861168,0.9759243238,-0.1968479875], + [-0.1918195589,0.211757556,0.9583130982] + ], + "t": [ + [31.97904484], + [101.8192368], + [305.2554798] + ] + }, + { + "name": "02_13", + "type": "vga", + "resolution": [640,480], + "panel": 2, + "node": 13, + "K": [ + [746.887,0,386.903], + [0,746.77,241.912], + [0,0,1] + ], + "distCoef": [-0.330222,0.0894843,0.000608161,-0.000202457,0.0188277], + "R": [ + [0.9805035597,0.07291108666,0.1824739514], + [-0.03359954242,0.9771464723,-0.2098948364], + [-0.1936074385,0.199671593,0.9605453736] + ], + "t": [ + [39.8755561], + [121.0360498], + [302.8306622] + ] + }, + { + "name": "02_14", + "type": "vga", + "resolution": [640,480], + "panel": 2, + "node": 14, + "K": [ + [745.399,0,359.381], + [0,745.103,221.453], + [0,0,1] + ], + "distCoef": [-0.32351,0.0564367,0.000553752,0.000358328,0.0789504], + "R": [ + [0.9639890244,-0.01369700088,0.2655890681], + [0.06651808592,0.9793475216,-0.1909287203], + [-0.2574888447,0.2017196672,0.9449913601] + ], + "t": [ + [64.66924198], + [136.2834945], + [299.1868513] + ] + }, + { + "name": "02_15", + "type": "vga", + "resolution": [640,480], + "panel": 2, + "node": 15, + "K": [ + [746.343,0,376.035], + [0,746.136,233.449], + [0,0,1] + ], + "distCoef": [-0.332319,0.10939,0.000552685,0.00121175,-0.00685584], + "R": [ + [0.9739293667,-0.02993852249,0.2248672353], + [0.07982373372,0.9730868608,-0.2161715356], + [-0.2123434957,0.2284855491,0.9501076748] + ], + "t": [ + [41.67937397], + [146.9667487], + [305.3208703] + ] + }, + { + "name": "02_16", + "type": "vga", + "resolution": [640,480], + "panel": 2, + "node": 16, + "K": [ + [747.983,0,369.069], + [0,747.865,212.357], + [0,0,1] + ], + "distCoef": [-0.333814,0.119177,-0.00123283,0.000206724,-0.0313224], + "R": [ + [0.9828420813,0.01261378295,0.1840172159], + [0.03080156014,0.9724259604,-0.2311688027], + [-0.181859031,0.2328704445,0.9553526307] + ], + "t": [ + [22.33056427], + [154.6384713], + [307.0242051] + ] + }, + { + "name": "02_17", + "type": "vga", + "resolution": [640,480], + "panel": 2, + "node": 17, + "K": [ + [743.255,0,372.405], + [0,743.629,259.514], + [0,0,1] + ], + "distCoef": [-0.301911,-0.0577323,-0.000292445,-0.000537705,0.240913], + "R": [ + [0.9702237144,0.05425789408,0.2360551311], + [-0.004184220731,0.978195713,-0.2076430576], + [-0.2421743923,0.2004725119,0.9492957051] + ], + "t": [ + [39.95715372], + [182.9757461], + [299.4720725] + ] + }, + { + "name": "02_18", + "type": "vga", + "resolution": [640,480], + "panel": 2, + "node": 18, + "K": [ + [746.171,0,380.016], + [0,746.628,215.7], + [0,0,1] + ], + "distCoef": [-0.310416,0.0111871,-0.00156578,-0.000885002,0.110566], + "R": [ + [0.9751942313,0.01121985931,0.2210663386], + [0.02134458651,0.9892938663,-0.1443677759], + [-0.220319359,0.1455051918,0.9645141882] + ], + "t": [ + [9.159436194], + [213.6293599], + [288.3403437] + ] + }, + { + "name": "02_19", + "type": "vga", + "resolution": [640,480], + "panel": 2, + "node": 19, + "K": [ + [745.09,0,380.114], + [0,745.176,232.983], + [0,0,1] + ], + "distCoef": [-0.31746,0.043353,-0.000108725,0.000220738,0.0862213], + "R": [ + [0.9809185988,0.05584586521,0.1862255137], + [-0.01423917048,0.975920974,-0.2176591338], + [-0.1938967473,0.2108541957,0.9580942331] + ], + "t": [ + [-1.989355998], + [159.4183424], + [303.0216832] + ] + }, + { + "name": "02_20", + "type": "vga", + "resolution": [640,480], + "panel": 2, + "node": 20, + "K": [ + [746.359,0,393.165], + [0,746.438,228.007], + [0,0,1] + ], + "distCoef": [-0.32236,0.0673245,-0.000115957,0.00130444,0.0588071], + "R": [ + [0.9826018096,0.03015545669,0.1832602856], + [0.01576123022,0.9696317731,-0.2440610748], + [-0.1850547688,0.2427032613,0.9522866477] + ], + "t": [ + [-25.36954265], + [136.7143691], + [307.7149997] + ] + }, + { + "name": "02_21", + "type": "vga", + "resolution": [640,480], + "panel": 2, + "node": 21, + "K": [ + [747.137,0,358.509], + [0,747.202,238.678], + [0,0,1] + ], + "distCoef": [-0.327929,0.0852816,0.000460613,0.000357406,0.0365027], + "R": [ + [0.9780966382,0.08951991601,0.1879179366], + [-0.04045439222,0.9673344336,-0.2502549415], + [-0.2041822921,0.2371714111,0.9497680314] + ], + "t": [ + [-10.00427836], + [118.005594], + [307.3165834] + ] + }, + { + "name": "02_22", + "type": "vga", + "resolution": [640,480], + "panel": 2, + "node": 22, + "K": [ + [745.847,0,374.568], + [0,746.074,247.807], + [0,0,1] + ], + "distCoef": [-0.32052,0.063252,0.000743322,-0.000945252,0.0534877], + "R": [ + [0.9839840132,0.07804627455,0.160263036], + [-0.03749054936,0.9695570383,-0.2419785283], + [-0.1742696772,0.2320946541,0.9569546233] + ], + "t": [ + [-1.458572059], + [110.2636917], + [306.6072245] + ] + }, + { + "name": "02_23", + "type": "vga", + "resolution": [640,480], + "panel": 2, + "node": 23, + "K": [ + [744.851,0,375.128], + [0,744.899,236.672], + [0,0,1] + ], + "distCoef": [-0.328747,0.0731957,0.000409854,0.000115616,0.0573405], + "R": [ + [0.9798731388,0.006836815724,0.1995041098], + [0.04188111895,0.9701291749,-0.2389463451], + [-0.1951783896,0.2424925605,0.9503171862] + ], + "t": [ + [13.92766978], + [118.8861106], + [308.0337581] + ] + }, + { + "name": "02_24", + "type": "vga", + "resolution": [640,480], + "panel": 2, + "node": 24, + "K": [ + [748.108,0,365.63], + [0,748.409,236.546], + [0,0,1] + ], + "distCoef": [-0.337502,0.145226,-9.99404e-05,-0.000712599,-0.0768278], + "R": [ + [0.9858983234,-0.01937546959,0.166219996], + [0.057736328,0.9716683618,-0.2291879382], + [-0.1570700873,0.2355529362,0.9590848773] + ], + "t": [ + [-5.69779309], + [141.0775615], + [307.1963385] + ] + }, + { + "name": "03_01", + "type": "vga", + "resolution": [640,480], + "panel": 3, + "node": 1, + "K": [ + [745.205,0,364.445], + [0,745.671,223.278], + [0,0,1] + ], + "distCoef": [-0.321278,0.0550501,-0.000663141,0.000431329,0.0680735], + "R": [ + [0.789168654,0.1464091436,-0.5964706181], + [-0.3274382264,0.921936374,-0.2069239719], + [0.5196123973,0.3586051937,0.7755032377] + ], + "t": [ + [-15.48720347], + [106.8731646], + [321.197831] + ] + }, + { + "name": "03_02", + "type": "vga", + "resolution": [640,480], + "panel": 3, + "node": 2, + "K": [ + [746.402,0,367.989], + [0,746.656,218.884], + [0,0,1] + ], + "distCoef": [-0.319108,0.0415571,-0.000289565,0.00121415,0.0978966], + "R": [ + [0.7844411333,0.123213727,-0.6078408392], + [-0.3461950886,0.9001611021,-0.2643084389], + [0.5145882519,0.4177659246,0.7487793823] + ], + "t": [ + [-25.69855827], + [65.19717944], + [326.035328] + ] + }, + { + "name": "03_03", + "type": "vga", + "resolution": [640,480], + "panel": 3, + "node": 3, + "K": [ + [747.999,0,350.415], + [0,748.222,213.374], + [0,0,1] + ], + "distCoef": [-0.322361,0.0444301,-0.000132478,-4.14576e-05,0.110213], + "R": [ + [0.8075592295,0.0617799019,-0.5865418439], + [-0.2672496857,0.9248714179,-0.2705373648], + [0.525762015,0.3752280693,0.763399109] + ], + "t": [ + [-8.799326732], + [72.40249706], + [323.1224723] + ] + }, + { + "name": "03_04", + "type": "vga", + "resolution": [640,480], + "panel": 3, + "node": 4, + "K": [ + [744.819,0,376.394], + [0,744.912,212.894], + [0,0,1] + ], + "distCoef": [-0.335892,0.121706,-0.00015411,0.0017688,-0.0013985], + "R": [ + [0.8410364559,-0.03582960221,-0.5397906256], + [-0.192384631,0.9127679401,-0.3603371217], + [0.5056143132,0.4069040761,0.7607780486] + ], + "t": [ + [3.728898504], + [75.32503712], + [325.8417248] + ] + }, + { + "name": "03_05", + "type": "vga", + "resolution": [640,480], + "panel": 3, + "node": 5, + "K": [ + [746.446,0,376.523], + [0,746.682,251.012], + [0,0,1] + ], + "distCoef": [-0.330943,0.0996499,0.00144142,-0.000113946,0.0131394], + "R": [ + [0.8610606531,-0.05437396314,-0.5055868113], + [-0.176556083,0.9004429458,-0.3975304402], + [0.4768673833,0.4315622475,0.7657359371] + ], + "t": [ + [31.93527518], + [62.43528973], + [326.764058] + ] + }, + { + "name": "03_06", + "type": "vga", + "resolution": [640,480], + "panel": 3, + "node": 6, + "K": [ + [744.998,0,378.484], + [0,744.973,240.788], + [0,0,1] + ], + "distCoef": [-0.31652,0.0338012,-0.0010118,-0.000122735,0.0959735], + "R": [ + [0.8769583834,-0.06555368648,-0.4760742674], + [-0.1128149484,0.9348860407,-0.3365425358], + [0.4671367907,0.348842092,0.8124607151] + ], + "t": [ + [52.69213606], + [109.2131316], + [317.2562433] + ] + }, + { + "name": "03_07", + "type": "vga", + "resolution": [640,480], + "panel": 3, + "node": 7, + "K": [ + [744.942,0,394.454], + [0,745.513,230.902], + [0,0,1] + ], + "distCoef": [-0.322593,0.0669124,0.000685625,0.000650135,0.0435827], + "R": [ + [0.8511772215,-0.03734239681,-0.5235483579], + [-0.1521244983,0.9371023984,-0.3141611561], + [0.5023499524,0.3470513512,0.7919595223] + ], + "t": [ + [39.57000229], + [127.8421428], + [318.5564893] + ] + }, + { + "name": "03_08", + "type": "vga", + "resolution": [640,480], + "panel": 3, + "node": 8, + "K": [ + [744.592,0,375.596], + [0,744.695,234.586], + [0,0,1] + ], + "distCoef": [-0.314208,0.0115966,-0.0002404,-0.00129875,0.131833], + "R": [ + [0.863242284,-0.08735605341,-0.4971736911], + [-0.1241310572,0.9179337282,-0.3768144785], + [0.4892895255,0.386996887,0.7815556088] + ], + "t": [ + [48.3076273], + [133.8669044], + [323.1008342] + ] + }, + { + "name": "03_09", + "type": "vga", + "resolution": [640,480], + "panel": 3, + "node": 9, + "K": [ + [746.083,0,388.49], + [0,746.196,219.485], + [0,0,1] + ], + "distCoef": [-0.327776,0.0952708,0.000477894,0.00116098,0.0130168], + "R": [ + [0.8627791791,-0.162720556,-0.478679547], + [-0.06768333431,0.9010943873,-0.4283081501], + [0.5010299935,0.401933982,0.766432006] + ], + "t": [ + [23.91664651], + [150.3571005], + [326.7446808] + ] + }, + { + "name": "03_10", + "type": "vga", + "resolution": [640,480], + "panel": 3, + "node": 10, + "K": [ + [744.984,0,374.291], + [0,745.244,231.69], + [0,0,1] + ], + "distCoef": [-0.317288,0.0201616,0.000340337,0.000302133,0.135473], + "R": [ + [0.8433461687,-0.104156761,-0.5271798639], + [-0.1611508321,0.8868626272,-0.433018579], + [0.5126379318,0.4501400333,0.7311472501] + ], + "t": [ + [5.809004706], + [133.1751931], + [335.4888131] + ] + }, + { + "name": "03_11", + "type": "vga", + "resolution": [640,480], + "panel": 3, + "node": 11, + "K": [ + [746.325,0,369.755], + [0,746.606,238.315], + [0,0,1] + ], + "distCoef": [-0.330117,0.107892,0.000853042,-0.00148033,-0.0192727], + "R": [ + [0.8487877999,-0.06352852013,-0.5249032272], + [-0.1660312052,0.9105147821,-0.3786772643], + [0.5019889537,0.4085669574,0.7622861219] + ], + "t": [ + [10.90299391], + [168.9126588], + [328.8547345] + ] + }, + { + "name": "03_12", + "type": "vga", + "resolution": [640,480], + "panel": 3, + "node": 12, + "K": [ + [745.397,0,373.191], + [0,745.394,241.989], + [0,0,1] + ], + "distCoef": [-0.315431,0.0239438,0.00152043,8.78247e-05,0.132462], + "R": [ + [0.7899500519,0.01447673769,-0.613000277], + [-0.2772192125,0.9001468868,-0.3359837649], + [0.5469263421,0.4353458466,0.7150843098] + ], + "t": [ + [-11.01289772], + [165.4412244], + [333.9391633] + ] + }, + { + "name": "03_13", + "type": "vga", + "resolution": [640,480], + "panel": 3, + "node": 13, + "K": [ + [746.289,0,356.696], + [0,746.559,221.83], + [0,0,1] + ], + "distCoef": [-0.307674,-0.0320128,-0.000713248,-0.000212304,0.187939], + "R": [ + [0.7812025858,0.003231301473,-0.6242692358], + [-0.256925784,0.9130359895,-0.316787663], + [0.5689566429,0.4078662043,0.7140962805] + ], + "t": [ + [-30.04397497], + [158.6113997], + [327.0561852] + ] + }, + { + "name": "03_14", + "type": "vga", + "resolution": [640,480], + "panel": 3, + "node": 14, + "K": [ + [744.216,0,367.374], + [0,744.503,234.384], + [0,0,1] + ], + "distCoef": [-0.313106,0.0107213,0.00051099,0.000391129,0.137335], + "R": [ + [0.7647493291,0.08765142393,-0.6383382266], + [-0.3090501184,0.9192036391,-0.2440342068], + [0.5653728752,0.3839035005,0.7300490493] + ], + "t": [ + [-30.23656889], + [178.7825502], + [321.7207122] + ] + }, + { + "name": "03_15", + "type": "vga", + "resolution": [640,480], + "panel": 3, + "node": 15, + "K": [ + [747.827,0,380.852], + [0,747.806,237.021], + [0,0,1] + ], + "distCoef": [-0.329904,0.102056,0.000500868,0.000776535,0.0163276], + "R": [ + [0.8420936086,0.09442452017,-0.5310012847], + [-0.2692856411,0.9266613257,-0.2622670985], + [0.4672939095,0.3638444688,0.8057627471] + ], + "t": [ + [-9.683781844], + [164.2881649], + [322.7392687] + ] + }, + { + "name": "03_16", + "type": "vga", + "resolution": [640,480], + "panel": 3, + "node": 16, + "K": [ + [745.289,0,371.652], + [0,745.447,216.538], + [0,0,1] + ], + "distCoef": [-0.317152,0.0301694,-0.000847782,0.000226416,0.100881], + "R": [ + [0.7751085928,0.08020770062,-0.6267163586], + [-0.2817854267,0.9316829094,-0.2292682483], + [0.5655118413,0.3543073259,0.74475679] + ], + "t": [ + [-42.18053512], + [150.9579844], + [316.9204289] + ] + }, + { + "name": "03_17", + "type": "vga", + "resolution": [640,480], + "panel": 3, + "node": 17, + "K": [ + [744.591,0,386.471], + [0,744.601,243.766], + [0,0,1] + ], + "distCoef": [-0.308716,-0.020066,-0.000742984,7.36231e-05,0.18193], + "R": [ + [0.8000888793,0.13985822,-0.5833502066], + [-0.3086873752,0.9298003917,-0.2004578159], + [0.5143635773,0.3404569133,0.7870954202] + ], + "t": [ + [-29.24407076], + [139.76037], + [318.5389184] + ] + }, + { + "name": "03_18", + "type": "vga", + "resolution": [640,480], + "panel": 3, + "node": 18, + "K": [ + [747.091,0,388.41], + [0,747.213,245.147], + [0,0,1] + ], + "distCoef": [-0.331947,0.109947,-0.00018029,-0.000335458,-0.0100282], + "R": [ + [0.7812031275,0.143907843,-0.6074637489], + [-0.3493109676,0.9072427652,-0.2342912992], + [0.5174007358,0.3952228456,0.7590094735] + ], + "t": [ + [-39.38157975], + [101.9329028], + [324.6812046] + ] + }, + { + "name": "03_19", + "type": "vga", + "resolution": [640,480], + "panel": 3, + "node": 19, + "K": [ + [743.815,0,380.782], + [0,743.921,233.579], + [0,0,1] + ], + "distCoef": [-0.31618,0.0384848,0.000240219,0.000426998,0.0977231], + "R": [ + [0.8097086682,0.09665101941,-0.578818152], + [-0.2718115959,0.9359285209,-0.2239559336], + [0.5200868476,0.3386685464,0.784100304] + ], + "t": [ + [-3.817362892], + [126.1763792], + [318.2990602] + ] + }, + { + "name": "03_20", + "type": "vga", + "resolution": [640,480], + "panel": 3, + "node": 20, + "K": [ + [746.163,0,356.033], + [0,746.281,215.327], + [0,0,1] + ], + "distCoef": [-0.323416,0.0556958,5.62358e-06,-0.000684023,0.0815018], + "R": [ + [0.8690981447,0.003405692177,-0.4946279574], + [-0.1831744592,0.9310985933,-0.3154402114], + [0.4594731031,0.3647517111,0.8098398958] + ], + "t": [ + [22.15812523], + [111.197586], + [320.9871724] + ] + }, + { + "name": "03_21", + "type": "vga", + "resolution": [640,480], + "panel": 3, + "node": 21, + "K": [ + [745.277,0,370.698], + [0,745.633,251.594], + [0,0,1] + ], + "distCoef": [-0.309423,-0.0154759,-0.000871178,-0.000110471,0.185828], + "R": [ + [0.8519925598,-0.01534543221,-0.5233289556], + [-0.157671027,0.9456449668,-0.2844212441], + [0.4992479597,0.3248385977,0.8032629458] + ], + "t": [ + [23.66925749], + [140.0971121], + [315.3107012] + ] + }, + { + "name": "03_22", + "type": "vga", + "resolution": [640,480], + "panel": 3, + "node": 22, + "K": [ + [749.812,0,361.025], + [0,750.052,224.033], + [0,0,1] + ], + "distCoef": [-0.333335,0.0892582,3.32371e-05,-0.00136116,0.0353235], + "R": [ + [0.8242021998,-0.0118106517,-0.5661724493], + [-0.2609232338,0.8794144434,-0.3981824994], + [0.5026030242,0.4759104383,0.7217336453] + ], + "t": [ + [6.739100305], + [105.8858326], + [336.9710973] + ] + }, + { + "name": "03_23", + "type": "vga", + "resolution": [640,480], + "panel": 3, + "node": 23, + "K": [ + [744.781,0,365.976], + [0,744.836,235.682], + [0,0,1] + ], + "distCoef": [-0.319452,0.032528,0.000754874,-0.000913445,0.102166], + "R": [ + [0.8233335342,0.02583843362,-0.5669693703], + [-0.2570181529,0.9076367155,-0.3318693443], + [0.506027233,0.4189605805,0.7539286912] + ], + "t": [ + [-4.103462359], + [133.5127669], + [329.5726238] + ] + }, + { + "name": "03_24", + "type": "vga", + "resolution": [640,480], + "panel": 3, + "node": 24, + "K": [ + [746.135,0,373.553], + [0,746.515,225.298], + [0,0,1] + ], + "distCoef": [-0.323756,0.0623909,2.70614e-05,0.000962707,0.0761173], + "R": [ + [0.8557458945,0.0294251088,-0.5165589289], + [-0.2234217673,0.921515875,-0.3176337608], + [0.4666708454,0.3872242956,0.7951576366] + ], + "t": [ + [-1.49693002], + [128.5290469], + [325.1203285] + ] + }, + { + "name": "04_01", + "type": "vga", + "resolution": [640,480], + "panel": 4, + "node": 1, + "K": [ + [745.756,0,368.953], + [0,745.945,245.188], + [0,0,1] + ], + "distCoef": [-0.3245,0.0724334,-0.000312337,0.000678015,0.0415529], + "R": [ + [0.04501388353,-0.06073969189,-0.9971381249], + [-0.08162898106,0.9945884367,-0.06426936354], + [0.9956457501,0.08428838276,0.03981216889] + ], + "t": [ + [-59.71104012], + [137.3658878], + [280.4259077] + ] + }, + { + "name": "04_02", + "type": "vga", + "resolution": [640,480], + "panel": 4, + "node": 2, + "K": [ + [745.144,0,382.474], + [0,745.286,222.525], + [0,0,1] + ], + "distCoef": [-0.322843,0.0690658,-0.000684608,-0.000275864,0.0370253], + "R": [ + [0.1096717734,-0.01795980665,-0.9938055884], + [-0.007042199406,0.9997976117,-0.01884523745], + [0.9939429106,0.009065367736,0.1095231006] + ], + "t": [ + [-53.83503278], + [149.6185443], + [272.7820927] + ] + }, + { + "name": "04_03", + "type": "vga", + "resolution": [640,480], + "panel": 4, + "node": 3, + "K": [ + [742.832,0,377.499], + [0,742.665,258.984], + [0,0,1] + ], + "distCoef": [-0.312355,-0.00257413,0.000454129,0.00111055,0.151137], + "R": [ + [0.07040546321,0.04162572676,-0.9966495721], + [-0.08610880414,0.9956530214,0.03550119457], + [0.9937949208,0.08332082476,0.07368375372] + ], + "t": [ + [-50.21742462], + [111.4103034], + [280.5940976] + ] + }, + { + "name": "04_04", + "type": "vga", + "resolution": [640,480], + "panel": 4, + "node": 4, + "K": [ + [743.339,0,393.561], + [0,743.571,223.626], + [0,0,1] + ], + "distCoef": [-0.307228,-0.0295629,-0.000661125,6.4492e-05,0.183577], + "R": [ + [0.09450112049,0.05679880598,-0.993903131], + [-0.03670643306,0.9978910099,0.05353662459], + [0.9948478155,0.03142336774,0.09638670013] + ], + "t": [ + [-21.9069], + [118.1273376], + [275.8163164] + ] + }, + { + "name": "04_05", + "type": "vga", + "resolution": [640,480], + "panel": 4, + "node": 5, + "K": [ + [746.019,0,364.58], + [0,746.273,258.887], + [0,0,1] + ], + "distCoef": [-0.327759,0.0738839,0.000801649,0.000211169,0.0604088], + "R": [ + [0.135847977,0.01131634816,-0.9906650632], + [-0.049797809,0.9987488181,0.004580011864], + [0.98947739,0.04871076425,0.1362415358] + ], + "t": [ + [-12.12624478], + [90.71810202], + [278.5550143] + ] + }, + { + "name": "04_06", + "type": "vga", + "resolution": [640,480], + "panel": 4, + "node": 6, + "K": [ + [745.588,0,362.328], + [0,745.695,224.495], + [0,0,1] + ], + "distCoef": [-0.317313,0.0342325,-0.00011624,0.00140051,0.0955503], + "R": [ + [0.09768474559,0.09486669264,-0.9906856217], + [-0.08671696061,0.9924717325,0.0864871607], + [0.9914322262,0.07746076975,0.1051758999] + ], + "t": [ + [6.120914551], + [75.66522558], + [280.1538331] + ] + }, + { + "name": "04_07", + "type": "vga", + "resolution": [640,480], + "panel": 4, + "node": 7, + "K": [ + [744.949,0,374.902], + [0,744.948,218.152], + [0,0,1] + ], + "distCoef": [-0.307279,-0.0368619,-0.000928182,-0.000206153,0.214368], + "R": [ + [0.08413477249,-0.05845821559,-0.994738145], + [-0.03729096802,0.9973936317,-0.06176833509], + [0.9957563576,0.04229161317,0.08173552284] + ], + "t": [ + [3.352563309], + [99.7043349], + [277.3248716] + ] + }, + { + "name": "04_08", + "type": "vga", + "resolution": [640,480], + "panel": 4, + "node": 8, + "K": [ + [744.851,0,365.832], + [0,744.82,236.655], + [0,0,1] + ], + "distCoef": [-0.313642,0.00106915,0.000461187,-0.00049658,0.163492], + "R": [ + [0.1068294918,-0.02053293437,-0.9940653189], + [-0.04471775106,0.998675844,-0.02543386204], + [0.9932712532,0.04716945203,0.1057698462] + ], + "t": [ + [34.88142403], + [92.93282517], + [277.1804593] + ] + }, + { + "name": "04_09", + "type": "vga", + "resolution": [640,480], + "panel": 4, + "node": 9, + "K": [ + [745.947,0,354.92], + [0,745.962,217.292], + [0,0,1] + ], + "distCoef": [-0.332252,0.114802,-0.000779302,-0.000175195,-0.0220414], + "R": [ + [0.0951039165,0.01286389124,-0.99538423], + [-0.04378002227,0.9990030715,0.008727700331], + [0.9945041753,0.04274790527,0.09557228614] + ], + "t": [ + [51.3876018], + [107.4685168], + [276.8925649] + ] + }, + { + "name": "04_10", + "type": "vga", + "resolution": [640,480], + "panel": 4, + "node": 10, + "K": [ + [743.419,0,373.623], + [0,743.493,209.714], + [0,0,1] + ], + "distCoef": [-0.312784,-0.00205334,-0.00151839,-4.48796e-05,0.146707], + "R": [ + [0.07554192003,-0.02015366607,-0.996938939], + [-0.05402378201,0.9982445697,-0.02427365106], + [0.9956780852,0.05569209012,0.07432053419] + ], + "t": [ + [36.95032578], + [126.4783785], + [278.9862968] + ] + }, + { + "name": "04_11", + "type": "vga", + "resolution": [640,480], + "panel": 4, + "node": 11, + "K": [ + [743.168,0,378.723], + [0,743.196,231.359], + [0,0,1] + ], + "distCoef": [-0.312654,0.00616666,0.000125459,-0.000163635,0.137741], + "R": [ + [0.104627794,-0.01026277171,-0.994458496], + [-0.05855646041,0.9981483637,-0.01646162423], + [0.9927860624,0.05995431298,0.1038331098] + ], + "t": [ + [61.78762978], + [139.882294], + [278.0088471] + ] + }, + { + "name": "04_12", + "type": "vga", + "resolution": [640,480], + "panel": 4, + "node": 12, + "K": [ + [746.755,0,377.564], + [0,747.014,231.526], + [0,0,1] + ], + "distCoef": [-0.342661,0.169314,0.000669193,0.000564241,-0.092518], + "R": [ + [0.09069981891,0.03748374052,-0.9951726041], + [-0.02832816732,0.9989841486,0.03504548138], + [0.9954752924,0.02501279723,0.09166952704] + ], + "t": [ + [63.18640006], + [168.1511303], + [272.7093484] + ] + }, + { + "name": "04_13", + "type": "vga", + "resolution": [640,480], + "panel": 4, + "node": 13, + "K": [ + [745.766,0,371.377], + [0,745.897,229.211], + [0,0,1] + ], + "distCoef": [-0.323265,0.06437,0.000357726,0.000480753,0.061899], + "R": [ + [0.03414536791,0.03842962758,-0.9986777546], + [-0.02717943982,0.9989265658,0.03750992125], + [0.9990472321,0.02586271187,0.03515321085] + ], + "t": [ + [27.04698548], + [171.5967975], + [274.5649723] + ] + }, + { + "name": "04_14", + "type": "vga", + "resolution": [640,480], + "panel": 4, + "node": 14, + "K": [ + [744.965,0,366.266], + [0,745.319,235.632], + [0,0,1] + ], + "distCoef": [-0.317134,0.0349168,5.85303e-05,0.000379707,0.110605], + "R": [ + [0.05221731101,0.04748668842,-0.9975060736], + [0.03426805086,0.9981953182,0.04931335942], + [0.9980476207,-0.03675759989,0.05049579913] + ], + "t": [ + [31.93275734], + [208.7852536], + [260.7309393] + ] + }, + { + "name": "04_15", + "type": "vga", + "resolution": [640,480], + "panel": 4, + "node": 15, + "K": [ + [744.586,0,371.051], + [0,745.106,212.085], + [0,0,1] + ], + "distCoef": [-0.332822,0.11382,-0.000911903,0.000640183,-0.00904196], + "R": [ + [0.0693166226,0.04834029473,-0.9964228127], + [-0.01396942206,0.9987743784,0.04748258878], + [0.9974968978,0.01062811814,0.06990695264] + ], + "t": [ + [16.12425569], + [198.357827], + [269.7404532] + ] + }, + { + "name": "04_16", + "type": "vga", + "resolution": [640,480], + "panel": 4, + "node": 16, + "K": [ + [742.58,0,362.432], + [0,742.717,222.722], + [0,0,1] + ], + "distCoef": [-0.316061,0.0181932,0.000637155,-0.000119442,0.122715], + "R": [ + [0.07545496093,-0.0349426896,-0.9965367817], + [-0.03652359913,0.9986183515,-0.03778114217], + [0.9964800929,0.03924788454,0.07407447592] + ], + "t": [ + [-15.86676392], + [179.6369531], + [275.0674259] + ] + }, + { + "name": "04_17", + "type": "vga", + "resolution": [640,480], + "panel": 4, + "node": 17, + "K": [ + [745.044,0,350.241], + [0,745.211,214.104], + [0,0,1] + ], + "distCoef": [-0.330556,0.0995367,-0.000406045,-3.83783e-05,-0.00374247], + "R": [ + [0.0837025501,0.02221656332,-0.9962430965], + [-0.04478154079,0.9988252756,0.01851168242], + [0.9954840515,0.04306382584,0.08459911461] + ], + "t": [ + [-23.0620205], + [182.4550181], + [276.0013748] + ] + }, + { + "name": "04_18", + "type": "vga", + "resolution": [640,480], + "panel": 4, + "node": 18, + "K": [ + [747.543,0,399.307], + [0,747.43,229.515], + [0,0,1] + ], + "distCoef": [-0.337874,0.152604,0.000377489,0.002871,-0.0603327], + "R": [ + [0.03967719066,0.06607189882,-0.9970256891], + [-0.02383145062,0.9975901546,0.06516091958], + [0.998928317,0.02117516625,0.04115616396] + ], + "t": [ + [-45.47747339], + [181.8911988], + [269.8403328] + ] + }, + { + "name": "04_19", + "type": "vga", + "resolution": [640,480], + "panel": 4, + "node": 19, + "K": [ + [743.963,0,369.391], + [0,744.08,218.072], + [0,0,1] + ], + "distCoef": [-0.320196,0.0539371,0.000417857,0.00192962,0.0700112], + "R": [ + [0.0434323362,0.03783761887,-0.9983395949], + [-0.08481170801,0.9958149524,0.03405223652], + [0.9954499517,0.08319191804,0.04645964289] + ], + "t": [ + [-24.42650241], + [136.5925943], + [281.0885176] + ] + }, + { + "name": "04_20", + "type": "vga", + "resolution": [640,480], + "panel": 4, + "node": 20, + "K": [ + [745.858,0,356.253], + [0,746.045,207.418], + [0,0,1] + ], + "distCoef": [-0.328012,0.0801152,-7.74627e-05,-0.000454429,0.0269942], + "R": [ + [0.0976780849,0.06705669278,-0.9929563896], + [-0.1171365339,0.9915671608,0.05544004021], + [0.9883005738,0.1108961929,0.1047091699] + ], + "t": [ + [-1.775430866], + [107.2147587], + [285.054156] + ] + }, + { + "name": "04_21", + "type": "vga", + "resolution": [640,480], + "panel": 4, + "node": 21, + "K": [ + [746.156,0,369.678], + [0,746.129,226.325], + [0,0,1] + ], + "distCoef": [-0.331296,0.10434,-0.000526263,0.0017798,0.0107539], + "R": [ + [0.06864954522,0.009029787974,-0.9975999714], + [-0.09824772164,0.9951594531,0.00224680986], + [0.9927913301,0.09785768182,0.06920439997] + ], + "t": [ + [2.330018678], + [104.6606406], + [283.2576255] + ] + }, + { + "name": "04_22", + "type": "vga", + "resolution": [640,480], + "panel": 4, + "node": 22, + "K": [ + [746.305,0,363.016], + [0,746.511,222.294], + [0,0,1] + ], + "distCoef": [-0.313633,0.00103632,0.000318828,-0.000294887,0.154057], + "R": [ + [0.08441946195,-0.0784287402,-0.9933389588], + [-0.07957536672,0.9931828981,-0.08517917513], + [0.9932477614,0.08623609206,0.07760297012] + ], + "t": [ + [9.995164317], + [122.6888691], + [282.4272415] + ] + }, + { + "name": "04_23", + "type": "vga", + "resolution": [640,480], + "panel": 4, + "node": 23, + "K": [ + [745.178,0,358.539], + [0,745.299,233.674], + [0,0,1] + ], + "distCoef": [-0.315081,0.0210219,-6.99317e-06,-0.000330658,0.115227], + "R": [ + [0.1162513982,0.03935918122,-0.9924396542], + [-0.02556811677,0.999001962,0.03662446354], + [0.9928906706,0.02111716788,0.117141715] + ], + "t": [ + [32.91845612], + [159.7823772], + [272.1694603] + ] + }, + { + "name": "04_24", + "type": "vga", + "resolution": [640,480], + "panel": 4, + "node": 24, + "K": [ + [746.014,0,365.199], + [0,746.411,216.584], + [0,0,1] + ], + "distCoef": [-0.320661,0.0432533,-0.00136099,-0.000113861,0.0956118], + "R": [ + [0.1001711426,-0.0639180002,-0.9929150172], + [-0.0054812292,0.9978838124,-0.06479084071], + [0.9949551238,0.01193256733,0.09960881242] + ], + "t": [ + [-9.066812064], + [167.2144724], + [271.0944115] + ] + }, + { + "name": "05_01", + "type": "vga", + "resolution": [640,480], + "panel": 5, + "node": 1, + "K": [ + [744.506,0,379.212], + [0,745.093,221.816], + [0,0,1] + ], + "distCoef": [-0.322425,0.0503962,-0.00139268,-0.000488272,0.0792831], + "R": [ + [0.4832137358,-0.07031409603,-0.8726742883], + [-0.1214142278,0.9817563233,-0.14633218], + [0.8670427157,0.1766647942,0.465861009] + ], + "t": [ + [-31.81590772], + [187.5269902], + [291.8752718] + ] + }, + { + "name": "05_02", + "type": "vga", + "resolution": [640,480], + "panel": 5, + "node": 2, + "K": [ + [746.146,0,379.909], + [0,746.274,243.237], + [0,0,1] + ], + "distCoef": [-0.327102,0.0750235,0.00051439,0.000830868,0.0552106], + "R": [ + [0.559561068,-0.04316954181,-0.8276640634], + [-0.1711397799,0.9711012062,-0.1663539088], + [0.8109269924,0.2347314165,0.5360024022] + ], + "t": [ + [-21.47998338], + [182.028679], + [304.5116426] + ] + }, + { + "name": "05_03", + "type": "vga", + "resolution": [640,480], + "panel": 5, + "node": 3, + "K": [ + [746.598,0,366.137], + [0,746.916,245.497], + [0,0,1] + ], + "distCoef": [-0.34673,0.191883,-0.000717065,0.000142378,-0.151818], + "R": [ + [0.4493443217,0.06721032382,-0.8908268367], + [-0.2833621033,0.9563979118,-0.07077395533], + [0.8472281859,0.2842284411,0.4487968296] + ], + "t": [ + [-42.79170468], + [156.78227], + [309.5144468] + ] + }, + { + "name": "05_04", + "type": "vga", + "resolution": [640,480], + "panel": 5, + "node": 4, + "K": [ + [744.97,0,361.533], + [0,745.268,216.194], + [0,0,1] + ], + "distCoef": [-0.320215,0.0355127,-0.000935438,6.82351e-05,0.107335], + "R": [ + [0.5139859054,0.07264601249,-0.8547169391], + [-0.2477501277,0.96651576,-0.06683681477], + [0.8212419639,0.2461094116,0.5147735369] + ], + "t": [ + [-21.66847624], + [145.8563675], + [305.5618637] + ] + }, + { + "name": "05_05", + "type": "vga", + "resolution": [640,480], + "panel": 5, + "node": 5, + "K": [ + [743.904,0,367.466], + [0,744.108,216.808], + [0,0,1] + ], + "distCoef": [-0.328736,0.086922,-0.000934339,0.000214876,0.0243362], + "R": [ + [0.4889793362,0.07185582001,-0.8693307483], + [-0.2209595119,0.9743010874,-0.0437525441], + [0.8438460185,0.2134809878,0.4922903259] + ], + "t": [ + [-47.80972546], + [144.3254019], + [299.7644507] + ] + }, + { + "name": "05_06", + "type": "vga", + "resolution": [640,480], + "panel": 5, + "node": 6, + "K": [ + [745.323,0,383.952], + [0,745.526,234.808], + [0,0,1] + ], + "distCoef": [-0.334223,0.133657,-0.000107051,0.00148947,-0.0461754], + "R": [ + [0.4969854565,0.0559027949,-0.8659563116], + [-0.2018212488,0.978003949,-0.05269211703], + [0.8439630558,0.2009556001,0.4973361109] + ], + "t": [ + [-46.56558119], + [125.7186081], + [298.6423415] + ] + }, + { + "name": "05_07", + "type": "vga", + "resolution": [640,480], + "panel": 5, + "node": 7, + "K": [ + [746.158,0,356.674], + [0,746.317,240.893], + [0,0,1] + ], + "distCoef": [-0.334568,0.11153,0.000321304,-0.000871385,-0.0157856], + "R": [ + [0.5541201274,0.02610072644,-0.8320274253], + [-0.1769665492,0.9803549196,-0.08710380092], + [0.8134087072,0.1955069916,0.5478533484] + ], + "t": [ + [-14.70019562], + [115.5481293], + [299.4445791] + ] + }, + { + "name": "05_08", + "type": "vga", + "resolution": [640,480], + "panel": 5, + "node": 8, + "K": [ + [744.96,0,386.044], + [0,745.46,258.776], + [0,0,1] + ], + "distCoef": [-0.325919,0.068823,-0.000458274,0.000477805,0.0465958], + "R": [ + [0.4763065258,-0.004539644313,-0.8792675845], + [-0.1710253429,0.980409884,-0.09770768372], + [0.8624861886,0.1969158475,0.4661992314] + ], + "t": [ + [-40.46029545], + [93.91456762], + [297.4902987] + ] + }, + { + "name": "05_09", + "type": "vga", + "resolution": [640,480], + "panel": 5, + "node": 9, + "K": [ + [745.188,0,367.116], + [0,745.437,236.843], + [0,0,1] + ], + "distCoef": [-0.328194,0.058828,0.000388874,-0.00143808,0.0829656], + "R": [ + [0.5065601345,-0.04543027129,-0.8610069225], + [-0.1705921502,0.9735884993,-0.1517357977], + [0.845159836,0.2237443283,0.4854310735] + ], + "t": [ + [-16.55300824], + [76.93410209], + [300.8962768] + ] + }, + { + "name": "05_10", + "type": "vga", + "resolution": [640,480], + "panel": 5, + "node": 10, + "K": [ + [747.452,0,374.886], + [0,747.648,257.28], + [0,0,1] + ], + "distCoef": [-0.337728,0.123608,0.00138141,5.97732e-05,-0.0225942], + "R": [ + [0.4549222289,-0.02855444123,-0.8900732608], + [-0.1699899924,0.9783230281,-0.1182685721], + [0.8741562607,0.2051065493,0.4402069233] + ], + "t": [ + [-13.61854908], + [96.6157071], + [299.0141417] + ] + }, + { + "name": "05_11", + "type": "vga", + "resolution": [640,480], + "panel": 5, + "node": 11, + "K": [ + [746.39,0,405.604], + [0,746.458,241.87], + [0,0,1] + ], + "distCoef": [-0.333064,0.100943,0.000870611,0.00103156,0.0180409], + "R": [ + [0.5002384593,-0.05591048228,-0.8640807264], + [-0.1916757277,0.9660062257,-0.1734715752], + [0.8444062406,0.2524004556,0.4725167836] + ], + "t": [ + [16.55277765], + [75.44647006], + [303.7304898] + ] + }, + { + "name": "05_12", + "type": "vga", + "resolution": [640,480], + "panel": 5, + "node": 12, + "K": [ + [745.943,0,392.757], + [0,746.143,272.1], + [0,0,1] + ], + "distCoef": [-0.323245,0.0770562,0.00168738,0.000666505,0.0382015], + "R": [ + [0.5344619138,-0.0483612619,-0.8438078283], + [-0.2099054746,0.9594877737,-0.1879438847], + [0.818712498,0.277568731,0.5026583782] + ], + "t": [ + [45.5535171], + [81.37072912], + [304.8427161] + ] + }, + { + "name": "05_13", + "type": "vga", + "resolution": [640,480], + "panel": 5, + "node": 13, + "K": [ + [748.463,0,383.471], + [0,748.465,243.614], + [0,0,1] + ], + "distCoef": [-0.34071,0.149034,0.000455623,0.000254671,-0.0668973], + "R": [ + [0.550270912,-0.09726860505,-0.8293013577], + [-0.1127468592,0.975440235,-0.1892207537], + [0.82733915,0.1976238001,0.525789658] + ], + "t": [ + [34.15956958], + [127.9842494], + [295.9545727] + ] + }, + { + "name": "05_14", + "type": "vga", + "resolution": [640,480], + "panel": 5, + "node": 14, + "K": [ + [744.467,0,372.192], + [0,744.287,242.67], + [0,0,1] + ], + "distCoef": [-0.321164,0.0557106,-0.000170048,0.000249902,0.0584864], + "R": [ + [0.5607110475,-0.1151130063,-0.8199708025], + [-0.101866971,0.9731761842,-0.2062795062], + [0.8217215109,0.1991911399,0.5339444244] + ], + "t": [ + [50.41224037], + [142.3474205], + [294.74195] + ] + }, + { + "name": "05_15", + "type": "vga", + "resolution": [640,480], + "panel": 5, + "node": 15, + "K": [ + [746.542,0,352.38], + [0,746.666,240.759], + [0,0,1] + ], + "distCoef": [-0.327959,0.100036,-0.000636984,-0.00122606,-0.0366604], + "R": [ + [0.5029624145,-0.05772144518,-0.8623787128], + [-0.198700467,0.9633205664,-0.180365215], + [0.8411580909,0.262071977,0.4730447599] + ], + "t": [ + [34.04469815], + [136.31759], + [307.4406203] + ] + }, + { + "name": "05_16", + "type": "vga", + "resolution": [640,480], + "panel": 5, + "node": 16, + "K": [ + [747.042,0,371.719], + [0,747.231,244.896], + [0,0,1] + ], + "distCoef": [-0.323957,0.0675271,-0.000219383,0.00030566,0.0452733], + "R": [ + [0.5145114331,-0.105655334,-0.8509494319], + [-0.1209004538,0.9735279663,-0.1939752023], + [0.8489175846,0.2026826318,0.4881174913] + ], + "t": [ + [9.341169646], + [165.8735131], + [297.8569993] + ] + }, + { + "name": "05_17", + "type": "vga", + "resolution": [640,480], + "panel": 5, + "node": 17, + "K": [ + [745.814,0,386.675], + [0,746.085,252.153], + [0,0,1] + ], + "distCoef": [-0.320652,0.0597547,0.000647483,5.56623e-05,0.0523558], + "R": [ + [0.5123119379,-0.06682282728,-0.856195765], + [-0.1341513719,0.9785027468,-0.1566390244], + [0.8482569703,0.1951078787,0.4923342645] + ], + "t": [ + [9.076647729], + [186.6487394], + [296.0424945] + ] + }, + { + "name": "05_18", + "type": "vga", + "resolution": [640,480], + "panel": 5, + "node": 18, + "K": [ + [744.362,0,367.747], + [0,744.705,261.961], + [0,0,1] + ], + "distCoef": [-0.317525,0.0240072,0.000331,-0.000409781,0.122239], + "R": [ + [0.5214772573,-0.05602259067,-0.8514240656], + [-0.1526209796,0.9756261952,-0.1576716965], + [0.8395047985,0.2121673788,0.5002166498] + ], + "t": [ + [-2.829687906], + [192.8140289], + [298.6606918] + ] + }, + { + "name": "05_19", + "type": "vga", + "resolution": [640,480], + "panel": 5, + "node": 19, + "K": [ + [744.259,0,353.379], + [0,744.524,245.823], + [0,0,1] + ], + "distCoef": [-0.320328,0.0298824,0.00026675,-0.00161079,0.123162], + "R": [ + [0.5556726344,-0.05485450779,-0.8295896012], + [-0.2099711545,0.9562161648,-0.2038694692], + [0.8044501462,0.2874745713,0.519825291] + ], + "t": [ + [-1.476630227], + [134.2745178], + [310.4571486] + ] + }, + { + "name": "05_20", + "type": "vga", + "resolution": [640,480], + "panel": 5, + "node": 20, + "K": [ + [743.679,0,405.845], + [0,743.856,234.88], + [0,0,1] + ], + "distCoef": [-0.326644,0.0646831,0.000108119,5.73367e-05,0.058946], + "R": [ + [0.447769915,-0.01338423954,-0.894048637], + [-0.18660487,0.9764723016,-0.1080762074], + [0.8744602482,0.2152271039,0.4347373552] + ], + "t": [ + [-41.39083575], + [143.2049031], + [297.8732354] + ] + }, + { + "name": "05_21", + "type": "vga", + "resolution": [640,480], + "panel": 5, + "node": 21, + "K": [ + [746.956,0,354.763], + [0,747.081,232.068], + [0,0,1] + ], + "distCoef": [-0.333648,0.0797639,-0.000768992,-0.00091097,0.0508097], + "R": [ + [0.5053420531,-0.009379958189,-0.8628681393], + [-0.2526298673,0.9545207072,-0.1583299394], + [0.8251106347,0.2979970402,0.4799897963] + ], + "t": [ + [-19.66925616], + [96.29580053], + [309.4868577] + ] + }, + { + "name": "05_22", + "type": "vga", + "resolution": [640,480], + "panel": 5, + "node": 22, + "K": [ + [748.369,0,375.575], + [0,748.642,247.648], + [0,0,1] + ], + "distCoef": [-0.339087,0.143465,-0.000470446,0.00132222,-0.0624301], + "R": [ + [0.54260376,-0.05746408722,-0.8380209057], + [-0.1470082191,0.975763273,-0.1620944744], + [0.8270246327,0.2111490322,0.5210051277] + ], + "t": [ + [3.173863757], + [116.0988382], + [299.4207466] + ] + }, + { + "name": "05_23", + "type": "vga", + "resolution": [640,480], + "panel": 5, + "node": 23, + "K": [ + [744.544,0,368.615], + [0,744.426,281.181], + [0,0,1] + ], + "distCoef": [-0.322575,0.0664483,0.00114224,0.000391788,0.0483369], + "R": [ + [0.5347472888,-0.05715349527,-0.8430769924], + [-0.1466458645,0.9762943366,-0.1591991164], + [0.832190079,0.2087650503,0.5136894259] + ], + "t": [ + [16.7223507], + [130.5590862], + [298.5444367] + ] + }, + { + "name": "05_24", + "type": "vga", + "resolution": [640,480], + "panel": 5, + "node": 24, + "K": [ + [743.308,0,356.74], + [0,743.243,228.93], + [0,0,1] + ], + "distCoef": [-0.321093,0.0447792,0.000127467,-8.40104e-05,0.095825], + "R": [ + [0.5706235669,-0.133891243,-0.8102233519], + [-0.1678811389,0.9467635938,-0.2746900447], + [0.8038685639,0.2927658322,0.5177678046] + ], + "t": [ + [6.742844805], + [124.9131408], + [309.8640068] + ] + }, + { + "name": "06_01", + "type": "vga", + "resolution": [640,480], + "panel": 6, + "node": 1, + "K": [ + [744.518,0,344.042], + [0,744.512,240.289], + [0,0,1] + ], + "distCoef": [-0.313532,-0.0139368,0.00116047,-0.000125352,0.195046], + "R": [ + [-0.3305715804,0.1011846603,-0.9383411399], + [-0.314462461,0.9256148845,0.2105954561], + [0.8898515555,0.3646899369,-0.2741631979] + ], + "t": [ + [-23.56718534], + [104.1648487], + [320.754952] + ] + }, + { + "name": "06_02", + "type": "vga", + "resolution": [640,480], + "panel": 6, + "node": 2, + "K": [ + [748.956,0,345.566], + [0,748.875,227.82], + [0,0,1] + ], + "distCoef": [-0.335662,0.0955564,-6.0167e-05,-0.0012999,0.0278092], + "R": [ + [-0.2903396332,0.1603112194,-0.9433998147], + [-0.341086429,0.9037763758,0.2585504022], + [0.8940709957,0.3968483028,-0.2077221201] + ], + "t": [ + [-2.499901432], + [69.14355517], + [325.2941984] + ] + }, + { + "name": "06_03", + "type": "vga", + "resolution": [640,480], + "panel": 6, + "node": 3, + "K": [ + [743.901,0,369.68], + [0,743.816,251.042], + [0,0,1] + ], + "distCoef": [-0.320568,0.044977,0.000366128,-0.00033077,0.103335], + "R": [ + [-0.3123459653,0.110763308,-0.943488997], + [-0.3278062139,0.9196080197,0.216481353], + [0.891618239,0.3768986331,-0.250926954] + ], + "t": [ + [2.578346941], + [71.05917793], + [323.4074447] + ] + }, + { + "name": "06_04", + "type": "vga", + "resolution": [640,480], + "panel": 6, + "node": 4, + "K": [ + [745.814,0,378.476], + [0,745.908,222.393], + [0,0,1] + ], + "distCoef": [-0.316287,0.0251632,0.000357033,0.00145486,0.13215], + "R": [ + [-0.2756543214,0.09031338143,-0.9570048005], + [-0.3333214643,0.9248259371,0.1832860813], + [0.9016160472,0.3695138418,-0.2248288776] + ], + "t": [ + [26.15902854], + [86.10496093], + [322.4382284] + ] + }, + { + "name": "06_05", + "type": "vga", + "resolution": [640,480], + "panel": 6, + "node": 5, + "K": [ + [750.419,0,363.736], + [0,750.614,222.964], + [0,0,1] + ], + "distCoef": [-0.344753,0.14329,-0.000836382,-0.000451111,-0.060951], + "R": [ + [-0.2930259634,0.06094491301,-0.9541601031], + [-0.3875087878,0.9047544541,0.1767945619], + [0.8740553324,0.4215508218,-0.2414998562] + ], + "t": [ + [36.26889278], + [61.41890121], + [327.3260635] + ] + }, + { + "name": "06_06", + "type": "vga", + "resolution": [640,480], + "panel": 6, + "node": 6, + "K": [ + [747.394,0,354.724], + [0,747.506,211.184], + [0,0,1] + ], + "distCoef": [-0.329009,0.0921746,-0.00050966,0.000333806,0.021085], + "R": [ + [-0.2297156979,0.02557529828,-0.9729216835], + [-0.3964529538,0.9104994627,0.1175405629], + [0.888850805,0.4127185877,-0.199016617] + ], + "t": [ + [62.78312093], + [81.38139883], + [324.7093469] + ] + }, + { + "name": "06_07", + "type": "vga", + "resolution": [640,480], + "panel": 6, + "node": 7, + "K": [ + [746.623,0,374.989], + [0,746.758,209.923], + [0,0,1] + ], + "distCoef": [-0.319339,0.0433323,-0.00139256,0.000754597,0.0938733], + "R": [ + [-0.2846142448,0.03267216609,-0.9580852056], + [-0.3313740809,0.934457856,0.1303063082], + [0.8995476364,0.3545716359,-0.255133308] + ], + "t": [ + [45.81195811], + [121.7115234], + [320.8009986] + ] + }, + { + "name": "06_08", + "type": "vga", + "resolution": [640,480], + "panel": 6, + "node": 8, + "K": [ + [745.971,0,357.954], + [0,746.024,209.947], + [0,0,1] + ], + "distCoef": [-0.314348,0.0246684,-0.0014997,0.000635776,0.111152], + "R": [ + [-0.3038162213,-0.0261928812,-0.9523705354], + [-0.3441704234,0.9351353343,0.08407512184], + [0.8883931693,0.3533211563,-0.2931240987] + ], + "t": [ + [41.47715732], + [140.438376], + [322.3540865] + ] + }, + { + "name": "06_09", + "type": "vga", + "resolution": [640,480], + "panel": 6, + "node": 9, + "K": [ + [742.648,0,362.103], + [0,742.703,220.817], + [0,0,1] + ], + "distCoef": [-0.304218,-0.0643312,-0.000139411,-0.000234647,0.289172], + "R": [ + [-0.2807259034,-0.0411671215,-0.958904706], + [-0.3740921558,0.9247597922,0.06981680165], + [0.8838823599,0.3783181134,-0.2750043253] + ], + "t": [ + [37.64720227], + [153.3424109], + [325.0305142] + ] + }, + { + "name": "06_10", + "type": "vga", + "resolution": [640,480], + "panel": 6, + "node": 10, + "K": [ + [747.72,0,366.165], + [0,747.851,213.209], + [0,0,1] + ], + "distCoef": [-0.324647,0.0523798,-0.00077308,-0.000271098,0.0916616], + "R": [ + [-0.2880158499,0.02777358159,-0.957222805], + [-0.3788720768,0.9147158267,0.1405379157], + [0.8794900907,0.4031421393,-0.2529300217] + ], + "t": [ + [33.16578395], + [147.9736193], + [327.8869733] + ] + }, + { + "name": "06_11", + "type": "vga", + "resolution": [640,480], + "panel": 6, + "node": 11, + "K": [ + [745.331,0,369.444], + [0,745.587,207.732], + [0,0,1] + ], + "distCoef": [-0.317455,0.0357855,-0.00041249,0.000556817,0.0920153], + "R": [ + [-0.3142048567,0.04518634316,-0.9482792323], + [-0.3166241188,0.9366885696,0.1495449465], + [0.8949997069,0.3472358248,-0.2800050117] + ], + "t": [ + [26.61359186], + [187.9055539], + [317.8889871] + ] + }, + { + "name": "06_12", + "type": "vga", + "resolution": [640,480], + "panel": 6, + "node": 12, + "K": [ + [747.25,0,346.366], + [0,747.394,225.779], + [0,0,1] + ], + "distCoef": [-0.328454,0.0750084,3.92686e-05,0.00130952,0.0669429], + "R": [ + [-0.2993781475,0.05639323365,-0.9524665495], + [-0.3171785116,0.9355987261,0.1550897014], + [0.8998725002,0.3485323901,-0.2622110915] + ], + "t": [ + [13.58039626], + [195.4066632], + [317.2443523] + ] + }, + { + "name": "06_13", + "type": "vga", + "resolution": [640,480], + "panel": 6, + "node": 13, + "K": [ + [743.861,0,344.414], + [0,743.872,231.421], + [0,0,1] + ], + "distCoef": [-0.307564,-0.0231037,-0.000140407,-0.000635225,0.208058], + "R": [ + [-0.2583036736,0.07116007646,-0.9634393887], + [-0.3357690773,0.9284960528,0.1586007776], + [0.905835713,0.3644603181,-0.2159405881] + ], + "t": [ + [14.66480509], + [172.1699927], + [320.6722019] + ] + }, + { + "name": "06_14", + "type": "vga", + "resolution": [640,480], + "panel": 6, + "node": 14, + "K": [ + [744.949,0,378.98], + [0,744.921,225.408], + [0,0,1] + ], + "distCoef": [-0.321047,0.0567081,-0.000162218,0.000699701,0.0634367], + "R": [ + [-0.3208579847,0.07871363947,-0.9438507915], + [-0.3472646452,0.9173632389,0.1945557869], + [0.8811682132,0.3901907879,-0.267008856] + ], + "t": [ + [-45.70363788], + [100.2282059], + [322.9364507] + ] + }, + { + "name": "06_15", + "type": "vga", + "resolution": [640,480], + "panel": 6, + "node": 15, + "K": [ + [745.712,0,360.895], + [0,745.741,234.163], + [0,0,1] + ], + "distCoef": [-0.31006,-0.0103454,0.000398478,0.000813845,0.181221], + "R": [ + [-0.3227895896,0.1367774117,-0.9365355415], + [-0.3406635237,0.9063958148,0.2497898928], + [0.8830375102,0.3996730746,-0.245980058] + ], + "t": [ + [-14.93002532], + [154.0180569], + [326.396188] + ] + }, + { + "name": "06_16", + "type": "vga", + "resolution": [640,480], + "panel": 6, + "node": 16, + "K": [ + [745.931,0,372.193], + [0,746.03,212.813], + [0,0,1] + ], + "distCoef": [-0.325757,0.0830346,-0.000419051,0.00216162,0.0290765], + "R": [ + [-0.311559769,0.02363818266,-0.9499324958], + [-0.312276077,0.9416182622,0.1258518973], + [0.8974486961,0.3358515813,-0.2859887293] + ], + "t": [ + [-41.03283731], + [153.3338286], + [314.9665339] + ] + }, + { + "name": "06_17", + "type": "vga", + "resolution": [640,480], + "panel": 6, + "node": 17, + "K": [ + [744.756,0,368.403], + [0,744.752,202.816], + [0,0,1] + ], + "distCoef": [-0.313223,0.00720848,-0.00119606,0.000542174,0.130737], + "R": [ + [-0.3236003046,0.09291211415,-0.9416210394], + [-0.3175516679,0.9267842511,0.2005788875], + [0.8913157584,0.3639207207,-0.2704032691] + ], + "t": [ + [-41.098271], + [130.5289196], + [319.7107876] + ] + }, + { + "name": "06_18", + "type": "vga", + "resolution": [640,480], + "panel": 6, + "node": 18, + "K": [ + [744.889,0,373.989], + [0,745.092,230.989], + [0,0,1] + ], + "distCoef": [-0.319065,0.0283013,-0.000935078,-0.000739787,0.111424], + "R": [ + [-0.3391260928,0.0773602665,-0.9375547357], + [-0.3008220503,0.9353680392,0.1859911968], + [0.8913470633,0.3451116057,-0.2939360344] + ], + "t": [ + [-22.38901828], + [189.8595323], + [315.0907711] + ] + }, + { + "name": "06_19", + "type": "vga", + "resolution": [640,480], + "panel": 6, + "node": 19, + "K": [ + [743.21,0,358.424], + [0,743.138,251.445], + [0,0,1] + ], + "distCoef": [-0.316603,0.00648778,0.000375455,-0.000277526,0.16085], + "R": [ + [-0.34774011,0.09728469559,-0.9325301624], + [-0.3453355468,0.9113903597,0.2238548019], + [0.8716766465,0.399879107,-0.2833311204] + ], + "t": [ + [-13.32995299], + [105.9918293], + [324.8353482] + ] + }, + { + "name": "06_20", + "type": "vga", + "resolution": [640,480], + "panel": 6, + "node": 20, + "K": [ + [745.315,0,375.798], + [0,745.342,214.671], + [0,0,1] + ], + "distCoef": [-0.317661,0.021421,-0.000865931,0.000266434,0.124612], + "R": [ + [-0.2889220833,0.06736289331,-0.9549797225], + [-0.355115135,0.918816287,0.172249446], + [0.8890541438,0.3888944219,-0.2415447329] + ], + "t": [ + [16.18922492], + [101.394333], + [324.5371374] + ] + }, + { + "name": "06_21", + "type": "vga", + "resolution": [640,480], + "panel": 6, + "node": 21, + "K": [ + [743.803,0,341.335], + [0,743.805,238.935], + [0,0,1] + ], + "distCoef": [-0.305727,-0.0577903,-0.000702133,-0.00085287,0.249773], + "R": [ + [-0.2867564999,0.0564691645,-0.9563377767], + [-0.3641939053,0.9168870998,0.1633427245], + [0.8860775977,0.3951319776,-0.24235761] + ], + "t": [ + [29.77890794], + [113.785435], + [325.4988706] + ] + }, + { + "name": "06_22", + "type": "vga", + "resolution": [640,480], + "panel": 6, + "node": 22, + "K": [ + [745.285,0,373.625], + [0,745.232,235.431], + [0,0,1] + ], + "distCoef": [-0.319503,0.0483306,-0.000362012,0.00120612,0.080115], + "R": [ + [-0.3458253526,0.08893014684,-0.9340750797], + [-0.3902640321,0.8916714915,0.2293816395], + [0.8532870623,0.4438618933,-0.2736563703] + ], + "t": [ + [18.96316513], + [116.1979138], + [333.2100324] + ] + }, + { + "name": "06_23", + "type": "vga", + "resolution": [640,480], + "panel": 6, + "node": 23, + "K": [ + [744.536,0,366.592], + [0,744.501,224.531], + [0,0,1] + ], + "distCoef": [-0.312705,-0.014521,0.000375544,8.36622e-05,0.188212], + "R": [ + [-0.3181142509,0.09038767844,-0.94373375], + [-0.4081954831,0.8853909401,0.2223945386], + [0.8556750382,0.455974726,-0.2447596336] + ], + "t": [ + [6.972278595], + [119.3141773], + [334.5341124] + ] + }, + { + "name": "06_24", + "type": "vga", + "resolution": [640,480], + "panel": 6, + "node": 24, + "K": [ + [744.6,0,358.514], + [0,744.655,220.515], + [0,0,1] + ], + "distCoef": [-0.30152,-0.0573254,-0.000856409,-0.000288003,0.227002], + "R": [ + [-0.3545583501,0.05661769889,-0.9333181732], + [-0.3227337004,0.929412527,0.1789841147], + [0.8775712706,0.3646735401,-0.3112585327] + ], + "t": [ + [-25.22428756], + [139.0090865], + [319.514146] + ] + }, + { + "name": "07_01", + "type": "vga", + "resolution": [640,480], + "panel": 7, + "node": 1, + "K": [ + [745.635,0,384.154], + [0,745.75,223.733], + [0,0,1] + ], + "distCoef": [-0.328279,0.104082,-0.000872931,0.00144148,0.00404207], + "R": [ + [-0.9078071857,0.03344162453,-0.4180523547], + [0.00958043905,0.9982092569,0.05904654639], + [0.4192783428,0.049597754,-0.9065019217] + ], + "t": [ + [-23.31434773], + [152.0493649], + [282.3431498] + ] + }, + { + "name": "07_02", + "type": "vga", + "resolution": [640,480], + "panel": 7, + "node": 2, + "K": [ + [746.944,0,375.746], + [0,747.112,207.581], + [0,0,1] + ], + "distCoef": [-0.321827,0.078307,-0.00112183,4.35862e-05,0.0396046], + "R": [ + [-0.9306435439,0.005427673037,-0.3658867782], + [-0.02457764723,0.9967049447,0.07729936951], + [0.3651007167,0.08093079535,-0.9274436225] + ], + "t": [ + [-62.01828104], + [131.8151818], + [284.3018088] + ] + }, + { + "name": "07_03", + "type": "vga", + "resolution": [640,480], + "panel": 7, + "node": 3, + "K": [ + [743.881,0,383.122], + [0,743.965,237.105], + [0,0,1] + ], + "distCoef": [-0.311008,0.000325185,-0.000782967,0.00055371,0.154469], + "R": [ + [-0.9217631286,0.06528892794,-0.3822173342], + [0.03992506463,0.996464058,0.07392814261], + [0.3856925251,0.05288418425,-0.9211104924] + ], + "t": [ + [-43.22640533], + [121.5976731], + [282.3432951] + ] + }, + { + "name": "07_04", + "type": "vga", + "resolution": [640,480], + "panel": 7, + "node": 4, + "K": [ + [743.69,0,370.307], + [0,743.828,227.79], + [0,0,1] + ], + "distCoef": [-0.303025,-0.0263668,-0.000445815,0.00071591,0.180166], + "R": [ + [-0.9409979296,0.06863452498,-0.3313792366], + [0.04529042225,0.9959498431,0.07767037874], + [0.3353679682,0.05807936004,-0.9402952269] + ], + "t": [ + [-38.37277115], + [113.0266013], + [281.4230584] + ] + }, + { + "name": "07_05", + "type": "vga", + "resolution": [640,480], + "panel": 7, + "node": 5, + "K": [ + [743.998,0,375.484], + [0,744.299,220.79], + [0,0,1] + ], + "distCoef": [-0.310908,0.00595719,-5.69241e-05,0.000519591,0.131448], + "R": [ + [-0.9269484075,0.08594630429,-0.3652121064], + [0.04467826469,0.9917683984,0.1199970688], + [0.3725191305,0.09491404865,-0.9231580692] + ], + "t": [ + [-23.36597135], + [80.23534001], + [286.4206576] + ] + }, + { + "name": "07_06", + "type": "vga", + "resolution": [640,480], + "panel": 7, + "node": 6, + "K": [ + [745.602,0,379.444], + [0,745.67,224.268], + [0,0,1] + ], + "distCoef": [-0.303286,-0.0402497,-0.00132196,0.00012981,0.210105], + "R": [ + [-0.923694641,0.09319000989,-0.3716232396], + [0.04673933936,0.9901316615,0.1321163393], + [0.3802678586,0.1046657299,-0.9189349491] + ], + "t": [ + [-0.9450645075], + [68.69008136], + [287.3198917] + ] + }, + { + "name": "07_07", + "type": "vga", + "resolution": [640,480], + "panel": 7, + "node": 7, + "K": [ + [745.731,0,365.823], + [0,745.481,229.263], + [0,0,1] + ], + "distCoef": [-0.308219,-0.0231519,0.000110727,0.000180113,0.209056], + "R": [ + [-0.917494877,0.04967698427,-0.3946331815], + [0.001316203411,0.9925436367,0.1218827179], + [0.3977454189,0.1113073518,-0.9107190869] + ], + "t": [ + [18.92434207], + [79.05208738], + [288.1952445] + ] + }, + { + "name": "07_08", + "type": "vga", + "resolution": [640,480], + "panel": 7, + "node": 8, + "K": [ + [745.611,0,393.911], + [0,745.863,244.069], + [0,0,1] + ], + "distCoef": [-0.318705,0.0460564,0.000184451,0.000507881,0.0745222], + "R": [ + [-0.9083609307,0.09070031,-0.4082326216], + [0.05268537174,0.9932388068,0.1034452715], + [0.4148550001,0.07245775567,-0.9069979066] + ], + "t": [ + [48.31394514], + [81.42535523], + [283.8217571] + ] + }, + { + "name": "07_09", + "type": "vga", + "resolution": [640,480], + "panel": 7, + "node": 9, + "K": [ + [745.77,0,370.33], + [0,746.047,217.48], + [0,0,1] + ], + "distCoef": [-0.321786,0.069205,4.67533e-05,5.58471e-05,0.0372207], + "R": [ + [-0.9211612824,0.007939579541,-0.3891000576], + [-0.02433705705,0.996659961,0.07795274024], + [0.3884193603,0.08127659646,-0.9178913418] + ], + "t": [ + [49.65486911], + [97.0413663], + [285.6851525] + ] + }, + { + "name": "07_10", + "type": "vga", + "resolution": [640,480], + "panel": 7, + "node": 10, + "K": [ + [744.504,0,363.969], + [0,744.833,247.068], + [0,0,1] + ], + "distCoef": [-0.335916,0.144192,-0.000823922,-0.000462503,-0.076361], + "R": [ + [-0.9225918644,-0.01579725191,-0.3854538864], + [-0.05416624958,0.9945677902,0.08888716518], + [0.381955847,0.1028851669,-0.9184358297] + ], + "t": [ + [40.86826856], + [113.0714764], + [288.4804376] + ] + }, + { + "name": "07_11", + "type": "vga", + "resolution": [640,480], + "panel": 7, + "node": 11, + "K": [ + [744.999,0,387.199], + [0,745.384,239.21], + [0,0,1] + ], + "distCoef": [-0.313806,0.0330336,-7.01628e-05,0.00132279,0.0985619], + "R": [ + [-0.9109471902,-0.006922747781,-0.4124648981], + [-0.04540685091,0.9954664163,0.08357530662], + [0.4100163832,0.09486142287,-0.9071316751] + ], + "t": [ + [65.64483344], + [130.0336458], + [285.8729547] + ] + }, + { + "name": "07_12", + "type": "vga", + "resolution": [640,480], + "panel": 7, + "node": 12, + "K": [ + [743.664,0,350.646], + [0,743.861,222.503], + [0,0,1] + ], + "distCoef": [-0.300623,-0.0667329,-0.000394627,-0.00107967,0.272621], + "R": [ + [-0.9268683851,0.02536908581,-0.3745282449], + [0.006256924582,0.9986192343,0.0521581796], + [0.3753343145,0.04600037271,-0.9257473295] + ], + "t": [ + [57.10937388], + [163.0891099], + [280.8513179] + ] + }, + { + "name": "07_13", + "type": "vga", + "resolution": [640,480], + "panel": 7, + "node": 13, + "K": [ + [744.176,0,390.977], + [0,744.332,246.666], + [0,0,1] + ], + "distCoef": [-0.327257,0.10216,-0.000582688,0.00201022,0.0126373], + "R": [ + [-0.9290120658,-0.01909429991,-0.3695564765], + [-0.04453762663,0.9971777882,0.06043888335], + [0.3673594716,0.07260762025,-0.9272406117] + ], + "t": [ + [26.5211548], + [160.1280328], + [285.2494721] + ] + }, + { + "name": "07_14", + "type": "vga", + "resolution": [640,480], + "panel": 7, + "node": 14, + "K": [ + [744.044,0,360.721], + [0,744.333,226.474], + [0,0,1] + ], + "distCoef": [-0.311296,-0.00746755,-0.00165304,-0.000168766,0.17966], + "R": [ + [-0.9305033137,0.06302128148,-0.3608211486], + [0.03165130136,0.9952368859,0.09220485899], + [0.3649133847,0.07437646791,-0.9280659258] + ], + "t": [ + [37.8814582], + [178.0304645], + [285.6034633] + ] + }, + { + "name": "07_15", + "type": "vga", + "resolution": [640,480], + "panel": 7, + "node": 15, + "K": [ + [744.03,0,362.147], + [0,744.447,229.329], + [0,0,1] + ], + "distCoef": [-0.314413,0.0379836,-0.000745365,2.01034e-05,0.0898919], + "R": [ + [-0.9265853662,0.03975182478,-0.373977742], + [0.01411888978,0.9973739765,0.07103385017], + [0.3758193929,0.06053877555,-0.9247133829] + ], + "t": [ + [16.14446289], + [185.021862], + [282.5666312] + ] + }, + { + "name": "07_16", + "type": "vga", + "resolution": [640,480], + "panel": 7, + "node": 16, + "K": [ + [743.673,0,368.897], + [0,743.962,238.378], + [0,0,1] + ], + "distCoef": [-0.314216,0.0200058,-0.0002257,-0.000345788,0.11969], + "R": [ + [-0.9350006114,0.024774913,-0.3537796777], + [-0.006073372197,0.9962920776,0.08582080369], + [0.354594093,0.08239113958,-0.9313832344] + ], + "t": [ + [-10.51100446], + [168.6528502], + [285.9762696] + ] + }, + { + "name": "07_17", + "type": "vga", + "resolution": [640,480], + "panel": 7, + "node": 17, + "K": [ + [744.686,0,385.346], + [0,745.049,227.767], + [0,0,1] + ], + "distCoef": [-0.317176,0.0455424,-0.000136917,0.000534438,0.0739505], + "R": [ + [-0.908638426,0.05327873405,-0.4141709639], + [0.04010861029,0.9983767379,0.04043746577], + [0.4156531128,0.02013121347,-0.9093004036] + ], + "t": [ + [-7.322164421], + [189.4505625], + [275.8940033] + ] + }, + { + "name": "07_18", + "type": "vga", + "resolution": [640,480], + "panel": 7, + "node": 18, + "K": [ + [746.282,0,378.432], + [0,746.624,237.775], + [0,0,1] + ], + "distCoef": [-0.320382,0.058651,0.000451819,0.000534403,0.062414], + "R": [ + [-0.916555331,0.01769811564,-0.3995160846], + [-0.01470055472,0.9968539618,0.07788499561], + [0.3996376094,0.077259016,-0.9134116408] + ], + "t": [ + [-37.37478029], + [164.0712496], + [285.8486829] + ] + }, + { + "name": "07_19", + "type": "vga", + "resolution": [640,480], + "panel": 7, + "node": 19, + "K": [ + [743.687,0,374.362], + [0,743.883,225.048], + [0,0,1] + ], + "distCoef": [-0.322503,0.0715253,7.77555e-05,0.000517375,0.0539586], + "R": [ + [-0.9239544056,0.01616424802,-0.3821609261], + [-0.020576852,0.9955594902,0.09185801365], + [0.3819487525,0.09273628522,-0.9195189677] + ], + "t": [ + [-17.14443298], + [133.4982453], + [287.2304165] + ] + }, + { + "name": "07_20", + "type": "vga", + "resolution": [640,480], + "panel": 7, + "node": 20, + "K": [ + [745.801,0,368.555], + [0,746.033,233.687], + [0,0,1] + ], + "distCoef": [-0.317685,0.0475287,-3.52395e-05,0.000512076,0.0805211], + "R": [ + [-0.9241543321,-0.01069440692,-0.3818696113], + [-0.04324692472,0.9961108974,0.076764468], + [0.3795635307,0.08745690199,-0.9210227014] + ], + "t": [ + [-16.56758847], + [113.8864258], + [286.5218078] + ] + }, + { + "name": "07_21", + "type": "vga", + "resolution": [640,480], + "panel": 7, + "node": 21, + "K": [ + [744.1,0,390.405], + [0,744.284,237.593], + [0,0,1] + ], + "distCoef": [-0.322514,0.0588182,0.000321804,0.00147162,0.0689104], + "R": [ + [-0.9369369296,0.006948104691,-0.3494294118], + [-0.02026391849,0.9970404822,0.07415962808], + [0.3489105381,0.07656370335,-0.9340232522] + ], + "t": [ + [-3.618393153], + [111.1940513], + [285.5030449] + ] + }, + { + "name": "07_22", + "type": "vga", + "resolution": [640,480], + "panel": 7, + "node": 22, + "K": [ + [747.001,0,381.032], + [0,747.132,234.437], + [0,0,1] + ], + "distCoef": [-0.324882,0.0577225,-0.00134011,-0.00135265,0.0819201], + "R": [ + [-0.9282296861,0.06047570579,-0.3670590401], + [0.02337036389,0.9942284933,0.1047068731], + [0.3712727784,0.08861372459,-0.9242857414] + ], + "t": [ + [25.6408869], + [119.8980517], + [286.9452799] + ] + }, + { + "name": "07_23", + "type": "vga", + "resolution": [640,480], + "panel": 7, + "node": 23, + "K": [ + [743.981,0,363.51], + [0,744.339,258.582], + [0,0,1] + ], + "distCoef": [-0.313768,0.0101513,0.00111395,-0.00104272,0.1345], + "R": [ + [-0.9138255678,-0.001018785166,-0.4061056435], + [-0.03060482875,0.9973259054,0.06636552484], + [0.4049520663,0.0730753071,-0.9114130916] + ], + "t": [ + [24.3580015], + [146.5427691], + [284.2261849] + ] + }, + { + "name": "07_24", + "type": "vga", + "resolution": [640,480], + "panel": 7, + "node": 24, + "K": [ + [744.847,0,398.685], + [0,745.01,270.264], + [0,0,1] + ], + "distCoef": [-0.328511,0.106892,0.000179407,0.00152869,-0.00291861], + "R": [ + [-0.915939158,0.01937877811,-0.4008490012], + [-0.01852012751,0.9957282098,0.09045627137], + [0.4008895904,0.09027621565,-0.9116675607] + ], + "t": [ + [6.147743662], + [145.7157982], + [287.1579534] + ] + }, + { + "name": "08_01", + "type": "vga", + "resolution": [640,480], + "panel": 8, + "node": 1, + "K": [ + [743.703,0,360.221], + [0,744.108,227.682], + [0,0,1] + ], + "distCoef": [-0.309411,-0.0239561,-0.001159,0.000249551,0.191643], + "R": [ + [-0.6256262875,-0.004424555618,-0.7801103586], + [-0.1745259617,0.9754325172,0.134432485], + [0.7603502068,0.2202540071,-0.6110284243] + ], + "t": [ + [5.656398722], + [175.9817187], + [302.7764948] + ] + }, + { + "name": "08_02", + "type": "vga", + "resolution": [640,480], + "panel": 8, + "node": 2, + "K": [ + [747.203,0,376.344], + [0,747.435,209.923], + [0,0,1] + ], + "distCoef": [-0.331616,0.11313,4.7739e-05,0.00134479,-0.0154118], + "R": [ + [-0.6724252099,0.1092176997,-0.7320627235], + [-0.09964199407,0.9666926758,0.2357472025], + [0.7334274403,0.2314665517,-0.6391458561] + ], + "t": [ + [-0.9742570867], + [185.4525058], + [305.0714088] + ] + }, + { + "name": "08_03", + "type": "vga", + "resolution": [640,480], + "panel": 8, + "node": 3, + "K": [ + [747.234,0,368.091], + [0,747.404,224.293], + [0,0,1] + ], + "distCoef": [-0.329137,0.0905459,-0.000565165,-0.000329878,0.0231933], + "R": [ + [-0.656899377,0.0205246652,-0.7536988435], + [-0.2005757989,0.9588523348,0.2009267253], + [0.7268098496,0.2831623883,-0.6257527502] + ], + "t": [ + [-32.7353206], + [153.4285774], + [313.8994992] + ] + }, + { + "name": "08_04", + "type": "vga", + "resolution": [640,480], + "panel": 8, + "node": 4, + "K": [ + [747.386,0,362.788], + [0,747.713,235.953], + [0,0,1] + ], + "distCoef": [-0.341304,0.154379,-0.000777774,-0.000654564,-0.0867958], + "R": [ + [-0.6631685233,0.06657565756,-0.7455033143], + [-0.1433461882,0.9663011288,0.2138083224], + [0.7346151238,0.2486560079,-0.6312771259] + ], + "t": [ + [-22.98714967], + [144.6795235], + [307.788251] + ] + }, + { + "name": "08_05", + "type": "vga", + "resolution": [640,480], + "panel": 8, + "node": 5, + "K": [ + [745.746,0,376.748], + [0,745.752,233.642], + [0,0,1] + ], + "distCoef": [-0.32088,0.0642866,0.000720856,0.00118823,0.0489989], + "R": [ + [-0.6568191598,0.04935682433,-0.7524310568], + [-0.1452125328,0.970898021,0.19044777], + [0.7399337211,0.2343521638,-0.6305371929] + ], + "t": [ + [-42.15667108], + [135.9397275], + [306.138018] + ] + }, + { + "name": "08_06", + "type": "vga", + "resolution": [640,480], + "panel": 8, + "node": 6, + "K": [ + [743.581,0,359.642], + [0,743.625,223.766], + [0,0,1] + ], + "distCoef": [-0.309434,-0.0145066,-0.000137344,-0.000208072,0.169515], + "R": [ + [-0.6714433509,-0.01781555577,-0.7408417054], + [-0.2359597182,0.9528188479,0.1909430659], + [0.7024861834,0.3030162521,-0.6439676336] + ], + "t": [ + [-57.25895983], + [89.79547495], + [311.6502108] + ] + }, + { + "name": "08_07", + "type": "vga", + "resolution": [640,480], + "panel": 8, + "node": 7, + "K": [ + [745.148,0,371.237], + [0,745.103,220.621], + [0,0,1] + ], + "distCoef": [-0.318768,0.034703,-0.000217256,0.000447556,0.0954449], + "R": [ + [-0.7012843801,0.01049644172,-0.7128043511], + [-0.1276034542,0.9818947595,0.1400001421], + [0.7013683602,0.1891362102,-0.6872480755] + ], + "t": [ + [-43.70728874], + [118.2041714], + [298.0588141] + ] + }, + { + "name": "08_08", + "type": "vga", + "resolution": [640,480], + "panel": 8, + "node": 8, + "K": [ + [743.06,0,391.891], + [0,743.237,230.861], + [0,0,1] + ], + "distCoef": [-0.322908,0.0553375,0.000339696,0.00130059,0.0777268], + "R": [ + [-0.6299217379,0.07604043096,-0.7729272003], + [-0.1362742651,0.9689348188,0.2063846932], + [0.7646096578,0.2353362908,-0.5999907511] + ], + "t": [ + [-3.915515028], + [82.19520224], + [306.2551203] + ] + }, + { + "name": "08_09", + "type": "vga", + "resolution": [640,480], + "panel": 8, + "node": 9, + "K": [ + [746.456,0,356.955], + [0,746.592,233.352], + [0,0,1] + ], + "distCoef": [-0.320498,0.0507213,0.000550471,0.000126643,0.0741224], + "R": [ + [-0.684872543,0.06612723284,-0.7256561093], + [-0.09767122593,0.9785553778,0.1813551881], + [0.7220872049,0.1950809107,-0.6637269822] + ], + "t": [ + [-6.194765679], + [87.40737989], + [301.7039487] + ] + }, + { + "name": "08_10", + "type": "vga", + "resolution": [640,480], + "panel": 8, + "node": 10, + "K": [ + [747.33,0,361.528], + [0,747.71,220.883], + [0,0,1] + ], + "distCoef": [-0.322455,0.0389243,0.00118705,0.000768992,0.12227], + "R": [ + [-0.6055801648,0.01225702185,-0.7956899079], + [-0.1760343759,0.973047512,0.1489645524], + [0.7760699469,0.2302787546,-0.5871006154] + ], + "t": [ + [32.64204154], + [89.24589085], + [303.2777117] + ] + }, + { + "name": "08_11", + "type": "vga", + "resolution": [640,480], + "panel": 8, + "node": 11, + "K": [ + [747.774,0,350.264], + [0,747.981,233.163], + [0,0,1] + ], + "distCoef": [-0.312094,-0.0263709,0.00148203,-0.000526901,0.233175], + "R": [ + [-0.6738094891,0.06987822761,-0.7355935058], + [-0.1142917175,0.9736808734,0.1971876265], + [0.730012449,0.216939139,-0.6480889092] + ], + "t": [ + [35.79986479], + [83.7107121], + [303.8218457] + ] + }, + { + "name": "08_12", + "type": "vga", + "resolution": [640,480], + "panel": 8, + "node": 12, + "K": [ + [744.899,0,366.47], + [0,744.848,222.726], + [0,0,1] + ], + "distCoef": [-0.30396,-0.0418844,-0.00058576,-0.000160605,0.231689], + "R": [ + [-0.6160341517,-0.01803679921,-0.7875129191], + [-0.1884772348,0.9740736778,0.1251271436], + [0.7648387123,0.2255108512,-0.6034621779] + ], + "t": [ + [61.57356311], + [97.36793025], + [301.4047959] + ] + }, + { + "name": "08_13", + "type": "vga", + "resolution": [640,480], + "panel": 8, + "node": 13, + "K": [ + [746.859,0,368.586], + [0,747.139,224.684], + [0,0,1] + ], + "distCoef": [-0.318047,0.0428323,-0.000551709,0.000692584,0.0895927], + "R": [ + [-0.6485099772,-0.04236983322,-0.7600260566], + [-0.2235198928,0.9650338886,0.1369249841], + [0.7276494121,0.258678161,-0.6353046057] + ], + "t": [ + [38.13208236], + [106.9572182], + [307.8393222] + ] + }, + { + "name": "08_14", + "type": "vga", + "resolution": [640,480], + "panel": 8, + "node": 14, + "K": [ + [744.505,0,357.32], + [0,744.53,228.165], + [0,0,1] + ], + "distCoef": [-0.303025,-0.0702212,0.000533599,-0.000753966,0.269146], + "R": [ + [-0.6825611814,-0.04644305139,-0.729351271], + [-0.1871280484,0.9758162042,0.1129859684], + [0.7064653757,0.213601916,-0.6747450588] + ], + "t": [ + [41.82592662], + [132.5834032], + [304.3020009] + ] + }, + { + "name": "08_15", + "type": "vga", + "resolution": [640,480], + "panel": 8, + "node": 15, + "K": [ + [745.837,0,357.73], + [0,745.88,221.629], + [0,0,1] + ], + "distCoef": [-0.3197,0.0439542,-0.00136466,0.00170195,0.109142], + "R": [ + [-0.6069626381,-0.02117938565,-0.7944481037], + [-0.2107505505,0.968144583,0.1352045554], + [0.7662770787,0.2494944888,-0.5920911574] + ], + "t": [ + [64.87618524], + [141.1933336], + [303.6799609] + ] + }, + { + "name": "08_16", + "type": "vga", + "resolution": [640,480], + "panel": 8, + "node": 16, + "K": [ + [744.767,0,345.102], + [0,744.781,229.581], + [0,0,1] + ], + "distCoef": [-0.307131,-0.033453,0.0002274,-0.000565369,0.224073], + "R": [ + [-0.6350262321,-0.03398669713,-0.7717425665], + [-0.2527580664,0.9531820242,0.1660041824], + [0.7299692079,0.3004811693,-0.6138860012] + ], + "t": [ + [34.611726], + [134.434862], + [314.3473002] + ] + }, + { + "name": "08_17", + "type": "vga", + "resolution": [640,480], + "panel": 8, + "node": 17, + "K": [ + [743.543,0,370.548], + [0,743.847,224.118], + [0,0,1] + ], + "distCoef": [-0.308645,-0.0111516,9.80345e-05,-0.000744439,0.160705], + "R": [ + [-0.6124225565,-0.05791042639,-0.7884066177], + [-0.1936876385,0.977907652,0.07862393367], + [0.7664357188,0.2008556864,-0.610109238] + ], + "t": [ + [28.62018644], + [186.6213498], + [297.6164741] + ] + }, + { + "name": "08_18", + "type": "vga", + "resolution": [640,480], + "panel": 8, + "node": 18, + "K": [ + [743.39,0,376.249], + [0,743.751,216.723], + [0,0,1] + ], + "distCoef": [-0.319375,0.0602092,-1.05699e-05,0.00110696,0.0487054], + "R": [ + [-0.6887185447,0.08181736584,-0.720397588], + [-0.1043667464,0.9720764384,0.2101784484], + [0.7174777686,0.2199393475,-0.6609480577] + ], + "t": [ + [20.48604056], + [189.7333893], + [302.8177068] + ] + }, + { + "name": "08_19", + "type": "vga", + "resolution": [640,480], + "panel": 8, + "node": 19, + "K": [ + [747.038,0,360.923], + [0,747.259,204.023], + [0,0,1] + ], + "distCoef": [-0.32724,0.0825647,-0.000697091,0.000733699,0.0397455], + "R": [ + [-0.6726100217,0.03848005322,-0.7389959704], + [-0.1487286588,0.9712392562,0.1859411014], + [0.7248969201,0.2349757278,-0.6475421705] + ], + "t": [ + [3.177324598], + [151.0352965], + [305.3818706] + ] + }, + { + "name": "08_20", + "type": "vga", + "resolution": [640,480], + "panel": 8, + "node": 20, + "K": [ + [747.914,0,388.693], + [0,747.835,242.83], + [0,0,1] + ], + "distCoef": [-0.338429,0.134609,0.00136964,0.000561914,-0.0365273], + "R": [ + [-0.6685313457,0.02780025068,-0.7431641715], + [-0.1765857142,0.9647874561,0.194942684], + [0.722414926,0.2615574708,-0.6400815293] + ], + "t": [ + [-14.15175066], + [129.456494], + [308.9585645] + ] + }, + { + "name": "08_21", + "type": "vga", + "resolution": [640,480], + "panel": 8, + "node": 21, + "K": [ + [746.296,0,369.274], + [0,746.424,219.198], + [0,0,1] + ], + "distCoef": [-0.312598,-0.010091,-0.000298989,-0.000771876,0.160922], + "R": [ + [-0.6341455554,-0.01222382885,-0.7731170626], + [-0.1896201401,0.9718007188,0.1401697733], + [0.7496023059,0.2354866044,-0.6185809907] + ], + "t": [ + [-6.414673774], + [116.5175191], + [305.5663378] + ] + }, + { + "name": "08_22", + "type": "vga", + "resolution": [640,480], + "panel": 8, + "node": 22, + "K": [ + [743.609,0,361.562], + [0,743.794,221.87], + [0,0,1] + ], + "distCoef": [-0.314273,0.00142644,4.14402e-05,0.000150079,0.159707], + "R": [ + [-0.6552794634,-0.0176584532,-0.7551801135], + [-0.2007508014,0.9678470127,0.1515627784], + [0.7282224527,0.2509189891,-0.6377552198] + ], + "t": [ + [4.541098798], + [103.6271831], + [307.0310837] + ] + }, + { + "name": "08_23", + "type": "vga", + "resolution": [640,480], + "panel": 8, + "node": 23, + "K": [ + [748.435,0,354.117], + [0,748.457,219.552], + [0,0,1] + ], + "distCoef": [-0.324308,0.0627041,-0.000215295,-0.000444561,0.0758056], + "R": [ + [-0.6485698923,-0.03356212054,-0.7604148071], + [-0.2015811272,0.9709293787,0.1290782349], + [0.733976937,0.2370015309,-0.6364810526] + ], + "t": [ + [20.56445448], + [121.4098798], + [305.3725739] + ] + }, + { + "name": "08_24", + "type": "vga", + "resolution": [640,480], + "panel": 8, + "node": 24, + "K": [ + [745.572,0,350.678], + [0,745.729,218.826], + [0,0,1] + ], + "distCoef": [-0.313081,0.00890587,-0.000465969,-0.00023462,0.141032], + "R": [ + [-0.6716141,0.00283216084,-0.7408957278], + [-0.1390702972,0.9817365211,0.1298185488], + [0.7277320613,0.1902245569,-0.6589542206] + ], + "t": [ + [13.95231346], + [154.9907046], + [298.6967118] + ] + }, + { + "name": "09_01", + "type": "vga", + "resolution": [640,480], + "panel": 9, + "node": 1, + "K": [ + [745.377,0,383.314], + [0,745.581,229.65], + [0,0,1] + ], + "distCoef": [-0.311824,0.0113225,-0.000890232,0.000288511,0.13186], + "R": [ + [-0.9888207636,0.1490770148,-0.003088867539], + [0.1339941062,0.8974831076,0.420201917], + [0.06541465384,0.4150904904,-0.9074253732] + ], + "t": [ + [-5.5065201], + [83.70733211], + [330.6651976] + ] + }, + { + "name": "09_02", + "type": "vga", + "resolution": [640,480], + "panel": 9, + "node": 2, + "K": [ + [745.133,0,380.598], + [0,746.347,248.499], + [0,0,1] + ], + "distCoef": [-0.340543,0.0603048,-0.00219925,-0.00194065,0.128165], + "R": [ + [-0.9728033822,0.2090533065,0.09975116351], + [0.2316107347,0.8720009628,0.4312433055], + [0.003169728315,0.4426183864,-0.8967044758] + ], + "t": [ + [-23.76195567], + [58.26386366], + [329.69794] + ] + }, + { + "name": "09_03", + "type": "vga", + "resolution": [640,480], + "panel": 9, + "node": 3, + "K": [ + [745.787,0,382.41], + [0,745.973,216.203], + [0,0,1] + ], + "distCoef": [-0.309439,0.00115788,-0.000439278,0.00154239,0.140783], + "R": [ + [-0.995096801,0.09728424012,-0.01783629191], + [0.08253738581,0.9161639792,0.3922131349], + [0.05449712496,0.3888178749,-0.9197014317] + ], + "t": [ + [6.72584843], + [65.39953055], + [327.4514754] + ] + }, + { + "name": "09_04", + "type": "vga", + "resolution": [640,480], + "panel": 9, + "node": 4, + "K": [ + [744.782,0,384.335], + [0,745.051,230.833], + [0,0,1] + ], + "distCoef": [-0.319171,0.0452003,0.000841339,0.00114337,0.0902557], + "R": [ + [-0.9962766095,0.08536470964,0.01207409478], + [0.0830687393,0.9129812009,0.3994557689], + [0.02307600417,0.3989714189,-0.9166729542] + ], + "t": [ + [12.91980994], + [75.72355875], + [328.4117918] + ] + }, + { + "name": "09_05", + "type": "vga", + "resolution": [640,480], + "panel": 9, + "node": 5, + "K": [ + [745.938,0,386.124], + [0,746.151,234.663], + [0,0,1] + ], + "distCoef": [-0.322825,0.0563734,0.000659785,0.00216478,0.0846192], + "R": [ + [-0.9996885429,0.02460566921,0.004168718214], + [0.02372582958,0.8852416043,0.464525981], + [0.007739649829,0.4644802074,-0.8855496794] + ], + "t": [ + [23.79490616], + [45.57973364], + [333.4360246] + ] + }, + { + "name": "09_06", + "type": "vga", + "resolution": [640,480], + "panel": 9, + "node": 6, + "K": [ + [745.533,0,376.456], + [0,745.938,237.583], + [0,0,1] + ], + "distCoef": [-0.324418,0.0645728,-2.52302e-05,0.000695669,0.0784542], + "R": [ + [-0.9996292032,0.0242501169,-0.01238498622], + [0.01720849374,0.9151046106,0.4028491273], + [0.02110269642,0.4024866252,-0.9151826008] + ], + "t": [ + [44.50201086], + [83.15135806], + [329.4460526] + ] + }, + { + "name": "09_07", + "type": "vga", + "resolution": [640,480], + "panel": 9, + "node": 7, + "K": [ + [745.538,0,357.165], + [0,745.859,222.198], + [0,0,1] + ], + "distCoef": [-0.30448,-0.0356601,-0.000261684,-0.000249049,0.226264], + "R": [ + [-0.9994703128,-0.005373675551,-0.03209699996], + [-0.01769948118,0.9174086112,0.3975527241], + [0.02730974481,0.3979102457,-0.9170177829] + ], + "t": [ + [39.28939518], + [107.3778293], + [329.1138759] + ] + }, + { + "name": "09_08", + "type": "vga", + "resolution": [640,480], + "panel": 9, + "node": 8, + "K": [ + [746.393,0,361.584], + [0,746.73,220.937], + [0,0,1] + ], + "distCoef": [-0.31726,0.0513551,0.000643529,-0.000795525,0.0635312], + "R": [ + [-0.9973050313,-0.005865573042,-0.0731318648], + [-0.03181904441,0.9327538711,0.3591068981], + [0.06610766226,0.3604661023,-0.9304267656] + ], + "t": [ + [64.05594666], + [137.6750859], + [322.0323762] + ] + }, + { + "name": "09_09", + "type": "vga", + "resolution": [640,480], + "panel": 9, + "node": 9, + "K": [ + [750.271,0,344.156], + [0,750.817,228.346], + [0,0,1] + ], + "distCoef": [-0.379154,0.391779,0.000225814,-0.000528714,-0.53339], + "R": [ + [-0.9991212371,-0.002089946585,-0.04186150665], + [-0.01685937738,0.9344344151,0.355735977], + [0.03837336329,0.3561291283,-0.933648504] + ], + "t": [ + [51.49527243], + [159.1149955], + [322.66132] + ] + }, + { + "name": "09_10", + "type": "vga", + "resolution": [640,480], + "panel": 9, + "node": 10, + "K": [ + [744.897,0,366.998], + [0,745.389,227.752], + [0,0,1] + ], + "distCoef": [-0.317307,0.0499201,-0.000255849,-0.000414203,0.0689696], + "R": [ + [-0.9956077306,0.03830608065,-0.08542769468], + [0.005132094192,0.9334237661,0.3587390896], + [0.093482129,0.3567249879,-0.9295205079] + ], + "t": [ + [51.9897871], + [163.3127669], + [320.2676037] + ] + }, + { + "name": "09_11", + "type": "vga", + "resolution": [640,480], + "panel": 9, + "node": 11, + "K": [ + [745.812,0,365.568], + [0,746.463,243.927], + [0,0,1] + ], + "distCoef": [-0.334591,0.135033,-0.000586766,0.000648781,-0.0516408], + "R": [ + [-0.998272905,0.02856351314,-0.05133549401], + [0.007150624435,0.926422355,0.3764179707], + [0.05831016891,0.3754007803,-0.9250265825] + ], + "t": [ + [35.7749059], + [177.7642897], + [325.0135255] + ] + }, + { + "name": "09_12", + "type": "vga", + "resolution": [640,480], + "panel": 9, + "node": 12, + "K": [ + [743.195,0,380.908], + [0,743.577,227.789], + [0,0,1] + ], + "distCoef": [-0.308886,-0.0148964,-0.00146189,1.64512e-05,0.167268], + "R": [ + [-0.9994731762,0.02727182579,0.01759595347], + [0.03184982914,0.9284235071,0.3701558858], + [-0.006241669996,0.370521307,-0.9288029945] + ], + "t": [ + [-0.9618436208], + [187.4005014], + [324.424529] + ] + }, + { + "name": "09_13", + "type": "vga", + "resolution": [640,480], + "panel": 9, + "node": 13, + "K": [ + [745.52,0,396.637], + [0,745.641,231.295], + [0,0,1] + ], + "distCoef": [-0.327971,0.0908214,-0.00010844,0.00165709,0.0286999], + "R": [ + [-0.9916965419,0.1263943494,0.02371575794], + [0.1244737261,0.8970729317,0.4239887342], + [0.03231501572,0.4234201503,-0.9053568998] + ], + "t": [ + [12.62306638], + [150.537484], + [333.7640249] + ] + }, + { + "name": "09_14", + "type": "vga", + "resolution": [640,480], + "panel": 9, + "node": 14, + "K": [ + [744.91,0,372.463], + [0,744.965,226.423], + [0,0,1] + ], + "distCoef": [-0.308854,-0.0214085,8.99951e-05,0.000256405,0.180188], + "R": [ + [-0.9924146786,0.1180105859,0.03444716585], + [0.1215225705,0.8993517426,0.4199984619], + [0.01858414592,0.4209987468,-0.9068708203] + ], + "t": [ + [-10.68067405], + [162.2988485], + [333.0026074] + ] + }, + { + "name": "09_15", + "type": "vga", + "resolution": [640,480], + "panel": 9, + "node": 15, + "K": [ + [747.246,0,368.718], + [0,747.604,232.745], + [0,0,1] + ], + "distCoef": [-0.3413,0.139342,-0.00187439,-0.000934376,-0.0485015], + "R": [ + [-0.9858543141,0.1593536378,0.05193928607], + [0.1663907088,0.8933064559,0.4175137217], + [0.02013463084,0.4202499184,-0.9071849882] + ], + "t": [ + [-16.61956214], + [147.1949584], + [331.9981158] + ] + }, + { + "name": "09_16", + "type": "vga", + "resolution": [640,480], + "panel": 9, + "node": 16, + "K": [ + [743.705,0,367.288], + [0,743.835,246.124], + [0,0,1] + ], + "distCoef": [-0.316616,0.0215265,-3.02132e-05,0.000242548,0.131229], + "R": [ + [-0.9974602961,0.07055123587,0.009771425173], + [0.06902048446,0.9235857212,0.3771280794], + [0.01758210332,0.3768447143,-0.9261095675] + ], + "t": [ + [-30.73982653], + [139.9628037], + [324.9351286] + ] + }, + { + "name": "09_17", + "type": "vga", + "resolution": [640,480], + "panel": 9, + "node": 17, + "K": [ + [742.776,0,376.251], + [0,742.956,242.934], + [0,0,1] + ], + "distCoef": [-0.317736,0.0249159,0.000195501,0.000659428,0.110976], + "R": [ + [-0.9810894361,0.1806813104,0.06941024814], + [0.1934432758,0.9031273242,0.3833284952], + [0.006574003146,0.389506483,-0.9210002618] + ], + "t": [ + [-32.91453507], + [125.2651482], + [325.9500645] + ] + }, + { + "name": "09_18", + "type": "vga", + "resolution": [640,480], + "panel": 9, + "node": 18, + "K": [ + [744.563,0,383.579], + [0,744.554,245.613], + [0,0,1] + ], + "distCoef": [-0.324188,0.0688729,0.000784842,0.000316148,0.0548859], + "R": [ + [-0.970594512,0.2257141743,0.08366244524], + [0.2406675117,0.9026066179,0.3569039677], + [0.005044007626,0.3665438649,-0.9303870985] + ], + "t": [ + [-30.64851648], + [114.5848432], + [323.1694161] + ] + }, + { + "name": "09_19", + "type": "vga", + "resolution": [640,480], + "panel": 9, + "node": 19, + "K": [ + [745.897,0,369.27], + [0,746.007,226.27], + [0,0,1] + ], + "distCoef": [-0.314378,0.0131268,-0.000749673,-0.000436078,0.140449], + "R": [ + [-0.9929061616,0.1118291068,0.04039313118], + [0.1187797946,0.9175946163,0.3793566667], + [0.005358597494,0.3814634596,-0.9243683867] + ], + "t": [ + [-9.348770156], + [111.4514571], + [325.9373984] + ] + }, + { + "name": "09_20", + "type": "vga", + "resolution": [640,480], + "panel": 9, + "node": 20, + "K": [ + [743.647,0,378.532], + [0,743.859,221.629], + [0,0,1] + ], + "distCoef": [-0.312883,-0.00145442,-0.000725648,-1.91192e-05,0.160115], + "R": [ + [-0.9995005243,0.01416777706,-0.02824846864], + [0.002450265794,0.9259270935,0.3776943389], + [0.03150711165,0.3774364735,-0.9254993303] + ], + "t": [ + [6.861259295], + [105.360829], + [326.1962043] + ] + }, + { + "name": "09_21", + "type": "vga", + "resolution": [640,480], + "panel": 9, + "node": 21, + "K": [ + [745.35,0,364.423], + [0,745.51,242.824], + [0,0,1] + ], + "distCoef": [-0.317615,0.0309367,1.60295e-05,-0.00084218,0.138729], + "R": [ + [-0.9983267687,0.03243769532,-0.0478691851], + [0.01510269673,0.9453721551,0.3256430514], + [0.05581730476,0.3243752215,-0.9442802255] + ], + "t": [ + [30.85545331], + [138.1219419], + [318.1793043] + ] + }, + { + "name": "09_22", + "type": "vga", + "resolution": [640,480], + "panel": 9, + "node": 22, + "K": [ + [744.248,0,356.027], + [0,744.436,238.226], + [0,0,1] + ], + "distCoef": [-0.308137,-0.0481761,0.000357682,-8.3696e-05,0.245728], + "R": [ + [-0.9955839097,0.09158830299,-0.0205976113], + [0.07579544873,0.9137019347,0.3992540852], + [0.05538708142,0.3959297379,-0.9166089209] + ], + "t": [ + [35.25988756], + [131.4528362], + [328.3382973] + ] + }, + { + "name": "09_23", + "type": "vga", + "resolution": [640,480], + "panel": 9, + "node": 23, + "K": [ + [744.535,0,363.359], + [0,744.632,254.668], + [0,0,1] + ], + "distCoef": [-0.311847,-0.00198079,0.000462082,-0.000460419,0.174118], + "R": [ + [-0.9946906764,0.1028474748,0.003585412436], + [0.09771594436,0.9329851386,0.346396197], + [0.03228083764,0.3449074195,-0.9380814567] + ], + "t": [ + [12.3985171], + [157.8437238], + [320.5381764] + ] + }, + { + "name": "09_24", + "type": "vga", + "resolution": [640,480], + "panel": 9, + "node": 24, + "K": [ + [743.311,0,385.98], + [0,743.511,229.743], + [0,0,1] + ], + "distCoef": [-0.319602,0.0480118,-0.000790169,0.000699953,0.0704098], + "R": [ + [-0.9986396845,0.04700092247,-0.02257640097], + [0.03617494752,0.9363507866,0.3491970469], + [0.03755201414,0.3479053287,-0.93677731] + ], + "t": [ + [-8.936415104], + [142.1371611], + [321.4431282] + ] + }, + { + "name": "10_01", + "type": "vga", + "resolution": [640,480], + "panel": 10, + "node": 1, + "K": [ + [744.128,0,369.511], + [0,744.056,233.67], + [0,0,1] + ], + "distCoef": [-0.31156,0.00550691,-0.000430053,0.000410016,0.149166], + "R": [ + [-0.6229970612,0.0209936641,0.781942407], + [0.05250109858,0.9985078863,0.01502117145], + [-0.7804603106,0.05041098106,-0.6231696692] + ], + "t": [ + [-46.84686717], + [150.7389104], + [280.0083694] + ] + }, + { + "name": "10_02", + "type": "vga", + "resolution": [640,480], + "panel": 10, + "node": 2, + "K": [ + [743.282,0,357.827], + [0,743.347,211.632], + [0,0,1] + ], + "distCoef": [-0.30948,-0.00718458,0.000285593,0.000547399,0.164062], + "R": [ + [-0.6512046155,0.0977241901,0.7525839032], + [0.103617117,0.9938368806,-0.03939223155], + [-0.7517952126,0.05232817138,-0.6573170626] + ], + "t": [ + [-42.32005533], + [143.0774393], + [282.200902] + ] + }, + { + "name": "10_03", + "type": "vga", + "resolution": [640,480], + "panel": 10, + "node": 3, + "K": [ + [744.012,0,361.17], + [0,744.101,225.217], + [0,0,1] + ], + "distCoef": [-0.303567,-0.0563565,0.000757602,-0.000519388,0.263551], + "R": [ + [-0.6320598226,0.04182219841,0.773790207], + [0.06737176964,0.9977273282,0.001106034268], + [-0.771985379,0.05283069539,-0.6334409935] + ], + "t": [ + [-54.02554254], + [119.7786683], + [280.9354705] + ] + }, + { + "name": "10_04", + "type": "vga", + "resolution": [640,480], + "panel": 10, + "node": 4, + "K": [ + [744.209,0,380.966], + [0,744.256,205.476], + [0,0,1] + ], + "distCoef": [-0.315194,0.0249601,-0.000765583,0.001001,0.10286], + "R": [ + [-0.6566261636,0.06356030055,0.7515332125], + [0.0713368826,0.9972094103,-0.02201002698], + [-0.7508349555,0.03915967697,-0.6593279831] + ], + "t": [ + [-22.38173011], + [115.5645607], + [280.9145253] + ] + }, + { + "name": "10_05", + "type": "vga", + "resolution": [640,480], + "panel": 10, + "node": 5, + "K": [ + [744.499,0,353.834], + [0,744.652,215.524], + [0,0,1] + ], + "distCoef": [-0.317042,0.0236932,-0.00147688,-0.000206715,0.11602], + "R": [ + [-0.6480155592,0.1057846486,0.754244949], + [0.1559047408,0.9877614348,-0.004589090624], + [-0.7454995284,0.1146165612,-0.6565771067] + ], + "t": [ + [-17.37690425], + [72.84298088], + [287.4167752] + ] + }, + { + "name": "10_06", + "type": "vga", + "resolution": [640,480], + "panel": 10, + "node": 6, + "K": [ + [746.493,0,367.328], + [0,746.754,207.575], + [0,0,1] + ], + "distCoef": [-0.323089,0.0587326,-0.000981175,-0.000221417,0.0550321], + "R": [ + [-0.6607542091,0.07289791872,0.74705406], + [0.1340507848,0.9907326878,0.02188900409], + [-0.738535214,0.1146064347,-0.6644028167] + ], + "t": [ + [3.021864726], + [64.04371811], + [286.9062935] + ] + }, + { + "name": "10_07", + "type": "vga", + "resolution": [640,480], + "panel": 10, + "node": 7, + "K": [ + [744.949,0,365.308], + [0,744.944,217.014], + [0,0,1] + ], + "distCoef": [-0.320697,0.0459897,0.000335318,2.89241e-06,0.0947246], + "R": [ + [-0.643287111,0.03528116955,0.764811697], + [0.0902182212,0.9954712387,0.02996140018], + [-0.7602909742,0.08827373343,-0.6435568215] + ], + "t": [ + [9.776307982], + [84.51813798], + [285.3816638] + ] + }, + { + "name": "10_08", + "type": "vga", + "resolution": [640,480], + "panel": 10, + "node": 8, + "K": [ + [748.112,0,395.78], + [0,748.17,229.575], + [0,0,1] + ], + "distCoef": [-0.325424,0.0774932,-0.000546,0.000524276,0.0351183], + "R": [ + [-0.6241633069,0.05185263499,0.7795713377], + [0.04102617023,0.9985938587,-0.03357318505], + [-0.7802160084,0.0110276762,-0.6254129601] + ], + "t": [ + [-46.24758235], + [183.5392889], + [272.6641799] + ] + }, + { + "name": "10_09", + "type": "vga", + "resolution": [640,480], + "panel": 10, + "node": 9, + "K": [ + [746.122,0,370.333], + [0,746.261,210.753], + [0,0,1] + ], + "distCoef": [-0.323285,0.0813962,-0.00031195,0.00117949,0.0118242], + "R": [ + [-0.6717702835,0.002860846795,0.7407540089], + [0.1085475528,0.9895782107,0.09461708989], + [-0.7327633417,0.1439679842,-0.6650797731] + ], + "t": [ + [53.6134591], + [78.01841366], + [288.9552018] + ] + }, + { + "name": "10_10", + "type": "vga", + "resolution": [640,480], + "panel": 10, + "node": 10, + "K": [ + [746.498,0,355.775], + [0,746.616,218.183], + [0,0,1] + ], + "distCoef": [-0.320479,0.0482256,-0.000295345,0.000515541,0.088746], + "R": [ + [-0.6274497943,0.01735785812,0.7784635254], + [0.05740772193,0.9980618939,0.02401685623], + [-0.7765378993,0.0597591891,-0.6272302051] + ], + "t": [ + [35.32452291], + [122.8912729], + [283.9520693] + ] + }, + { + "name": "10_11", + "type": "vga", + "resolution": [640,480], + "panel": 10, + "node": 11, + "K": [ + [745.209,0,387.948], + [0,745.058,237.868], + [0,0,1] + ], + "distCoef": [-0.312054,0.0106095,2.04654e-05,-0.000407432,0.122509], + "R": [ + [-0.663538187,0.0558857692,0.74605218], + [0.09086672278,0.9958436408,0.006219474654], + [-0.742603739,0.07191817555,-0.6658584406] + ], + "t": [ + [70.41193089], + [130.903078], + [283.3216663] + ] + }, + { + "name": "10_12", + "type": "vga", + "resolution": [640,480], + "panel": 10, + "node": 12, + "K": [ + [746.923,0,359.191], + [0,746.955,219.728], + [0,0,1] + ], + "distCoef": [-0.34193,0.180291,-0.0011698,0.000387434,-0.142263], + "R": [ + [-0.6573529902,0.02662022179,0.7531124817], + [0.0203979596,0.9996382488,-0.01752982786], + [-0.7533066902,0.003838673213,-0.6576581901] + ], + "t": [ + [61.18715226], + [173.543055], + [273.2477614] + ] + }, + { + "name": "10_13", + "type": "vga", + "resolution": [640,480], + "panel": 10, + "node": 13, + "K": [ + [747.063,0,362.554], + [0,747.091,228.588], + [0,0,1] + ], + "distCoef": [-0.334743,0.115617,-0.000133435,0.000763825,-0.0142674], + "R": [ + [-0.6314178936,0.07344004486,0.771957255], + [0.07624079511,0.9965613541,-0.03244701456], + [-0.7716856775,0.03836700932,-0.6348457984] + ], + "t": [ + [39.63694261], + [165.7689372], + [279.8275089] + ] + }, + { + "name": "10_14", + "type": "vga", + "resolution": [640,480], + "panel": 10, + "node": 14, + "K": [ + [745.722,0,380.721], + [0,745.932,237.231], + [0,0,1] + ], + "distCoef": [-0.319645,0.0532601,-0.00105825,0.00148804,0.0812854], + "R": [ + [-0.6464741699,0.0407242176,0.7618482039], + [0.05782238306,0.998317631,-0.004298792509], + [-0.7607415591,0.04127282036,-0.6477413331] + ], + "t": [ + [37.16059778], + [187.0284564], + [279.5510011] + ] + }, + { + "name": "10_15", + "type": "vga", + "resolution": [640,480], + "panel": 10, + "node": 15, + "K": [ + [745.212,0,345.945], + [0,745.407,234.052], + [0,0,1] + ], + "distCoef": [-0.345973,0.208044,0.00063894,-0.000591324,-0.26389], + "R": [ + [-0.6892736753,0.06991501806,0.7211197479], + [0.04097555303,0.9975016565,-0.0575451947], + [-0.7233414164,-0.01011610737,-0.6904164394] + ], + "t": [ + [38.38229011], + [201.7157692], + [268.6124541] + ] + }, + { + "name": "10_16", + "type": "vga", + "resolution": [640,480], + "panel": 10, + "node": 16, + "K": [ + [746.402,0,351.743], + [0,746.432,235.34], + [0,0,1] + ], + "distCoef": [-0.332074,0.123634,0.000553061,0.000200886,-0.050504], + "R": [ + [-0.6626903808,0.1069713565,0.7412142659], + [0.1159650419,0.9924654921,-0.03955194002], + [-0.7398605059,0.05974425322,-0.6701022728] + ], + "t": [ + [18.24762504], + [172.5928493], + [282.9657885] + ] + }, + { + "name": "10_17", + "type": "vga", + "resolution": [640,480], + "panel": 10, + "node": 17, + "K": [ + [745.425,0,381.954], + [0,745.576,234.397], + [0,0,1] + ], + "distCoef": [-0.316953,0.0361047,-0.000329948,0.00146685,0.0995591], + "R": [ + [-0.6439914485,0.08005681888,0.7608323863], + [0.04150323442,0.9967010496,-0.06974596286], + [-0.7639060779,-0.01333879876,-0.6451895695] + ], + "t": [ + [-14.39474973], + [198.5707312], + [268.934139] + ] + }, + { + "name": "10_18", + "type": "vga", + "resolution": [640,480], + "panel": 10, + "node": 18, + "K": [ + [742.866,0,374.357], + [0,743.163,216.484], + [0,0,1] + ], + "distCoef": [-0.313801,-0.00472223,0.00105562,-0.000883374,0.146196], + "R": [ + [-0.6735625977,0.03695414336,0.7382058102], + [0.08136680684,0.9963864104,0.02436316713], + [-0.7346379174,0.07647556771,-0.6741354596] + ], + "t": [ + [41.81793908], + [81.57199105], + [283.0241236] + ] + }, + { + "name": "10_19", + "type": "vga", + "resolution": [640,480], + "panel": 10, + "node": 19, + "K": [ + [747.195,0,374.317], + [0,747.324,252.705], + [0,0,1] + ], + "distCoef": [-0.325848,0.0754879,0.000850799,-0.000494425,0.0423325], + "R": [ + [-0.6398121174,0.03550225829,0.7677109118], + [0.06489671873,0.9978603994,0.00793971962], + [-0.7657864391,0.05490184793,-0.6407471551] + ], + "t": [ + [-18.67539454], + [143.739157], + [281.6554752] + ] + }, + { + "name": "10_20", + "type": "vga", + "resolution": [640,480], + "panel": 10, + "node": 20, + "K": [ + [744.074,0,359.595], + [0,744.232,222.54], + [0,0,1] + ], + "distCoef": [-0.312038,-0.00652471,0.000517579,-0.000473896,0.154037], + "R": [ + [-0.6341018605,0.07503908623,0.769599874], + [0.1134623387,0.9935365213,-0.003387984729], + [-0.7648798129,0.08517227417,-0.6385174669] + ], + "t": [ + [-10.64771601], + [114.6784971], + [285.5473806] + ] + }, + { + "name": "10_21", + "type": "vga", + "resolution": [640,480], + "panel": 10, + "node": 21, + "K": [ + [745.669,0,353.595], + [0,745.986,221.41], + [0,0,1] + ], + "distCoef": [-0.331248,0.0956435,-0.00124938,0.0010706,0.0394747], + "R": [ + [-0.618235149,0.02815342604,0.7854888192], + [0.09838720035,0.994269895,0.04180113162], + [-0.7798110408,0.1031249747,-0.6174625335] + ], + "t": [ + [-3.462045404], + [102.4105128], + [287.5712577] + ] + }, + { + "name": "10_22", + "type": "vga", + "resolution": [640,480], + "panel": 10, + "node": 22, + "K": [ + [745.836,0,367.536], + [0,745.883,217.602], + [0,0,1] + ], + "distCoef": [-0.306908,-0.0326669,-0.000283909,0.000278093,0.200484], + "R": [ + [-0.6189078213,0.03804187807,0.7845418563], + [0.07413417155,0.9971968305,0.01012945108], + [-0.7819573092,0.06443055706,-0.6199931209] + ], + "t": [ + [14.73270812], + [126.5060302], + [283.9045417] + ] + }, + { + "name": "10_23", + "type": "vga", + "resolution": [640,480], + "panel": 10, + "node": 23, + "K": [ + [742.749,0,379.273], + [0,742.868,231.204], + [0,0,1] + ], + "distCoef": [-0.310394,-0.00460726,-0.000822068,-0.000336616,0.147608], + "R": [ + [-0.6037549899,0.1086195044,0.7897352186], + [0.1215591915,0.9916324658,-0.04345590495], + [-0.787847241,0.0697628552,-0.6119067485] + ], + "t": [ + [19.26192194], + [145.0128457], + [284.7838402] + ] + }, + { + "name": "10_24", + "type": "vga", + "resolution": [640,480], + "panel": 10, + "node": 24, + "K": [ + [745.597,0,368.627], + [0,745.598,227.731], + [0,0,1] + ], + "distCoef": [-0.309585,-0.00749389,-0.000770097,-0.000330202,0.147896], + "R": [ + [-0.6450785239,0.075478584,0.760379301], + [0.07622559694,0.9965021766,-0.03425011393], + [-0.7603047786,0.03586635318,-0.6485755533] + ], + "t": [ + [7.856697427], + [160.1393432], + [279.1413867] + ] + }, + { + "name": "11_01", + "type": "vga", + "resolution": [640,480], + "panel": 11, + "node": 1, + "K": [ + [742.855,0,374.596], + [0,743.116,213.495], + [0,0,1] + ], + "distCoef": [-0.312561,0.00631745,-0.000399255,9.31566e-05,0.13435], + "R": [ + [-0.9229364354,0.00164792287,0.3849488544], + [0.08421827064,0.9766305816,0.1977371741], + [-0.3756269679,0.2149185694,-0.9015067329] + ], + "t": [ + [-1.777017447], + [176.3500352], + [303.9155303] + ] + }, + { + "name": "11_02", + "type": "vga", + "resolution": [640,480], + "panel": 11, + "node": 2, + "K": [ + [743.543,0,362.467], + [0,743.612,228.587], + [0,0,1] + ], + "distCoef": [-0.311508,-0.0063044,0.000209199,0.000389142,0.157517], + "R": [ + [-0.9382305089,-0.009495783218,0.3458805319], + [0.07354737957,0.9713073762,0.226169768], + [-0.338103971,0.2376379833,-0.9106118238] + ], + "t": [ + [-11.88478771], + [180.6527832], + [308.9268929] + ] + }, + { + "name": "11_03", + "type": "vga", + "resolution": [640,480], + "panel": 11, + "node": 3, + "K": [ + [749.382,0,384.698], + [0,749.44,241.756], + [0,0,1] + ], + "distCoef": [-0.334994,0.135003,0.000819921,0.00199466,-0.05032], + "R": [ + [-0.9215516186,0.03410543981,0.3867550042], + [0.1287847641,0.966589567,0.2216282778], + [-0.3662746221,0.2540500501,-0.895154441] + ], + "t": [ + [-28.84627719], + [162.2565593], + [311.7587167] + ] + }, + { + "name": "11_04", + "type": "vga", + "resolution": [640,480], + "panel": 11, + "node": 4, + "K": [ + [747.478,0,355.1], + [0,747.786,237.425], + [0,0,1] + ], + "distCoef": [-0.332665,0.125805,0.000559145,-0.000285828,-0.0488142], + "R": [ + [-0.9186497576,-0.03493542623,0.3935252708], + [0.05923251482,0.9726444983,0.2246200995], + [-0.3906073886,0.2296566914,-0.8914503195] + ], + "t": [ + [-43.73591523], + [146.455357], + [306.7233507] + ] + }, + { + "name": "11_05", + "type": "vga", + "resolution": [640,480], + "panel": 11, + "node": 5, + "K": [ + [744.546,0,358.346], + [0,744.606,240.06], + [0,0,1] + ], + "distCoef": [-0.319412,0.0357687,0.00118284,-0.000939418,0.105494], + "R": [ + [-0.9252091585,0.02778676908,0.3784387777], + [0.1130706466,0.9721977994,0.2050523536], + [-0.3622196044,0.2325066328,-0.9026281759] + ], + "t": [ + [-43.43063623], + [134.4377466], + [308.7383564] + ] + }, + { + "name": "11_06", + "type": "vga", + "resolution": [640,480], + "panel": 11, + "node": 6, + "K": [ + [744.682,0,386.644], + [0,744.47,247.576], + [0,0,1] + ], + "distCoef": [-0.310524,-0.0156223,-0.000288596,-3.26402e-05,0.156674], + "R": [ + [-0.9144551399,0.0484228537,0.4017798207], + [0.1449564791,0.9661327489,0.2134833264], + [-0.3778351707,0.2534615133,-0.8905042645] + ], + "t": [ + [-44.21957265], + [107.5274508], + [309.8949628] + ] + }, + { + "name": "11_07", + "type": "vga", + "resolution": [640,480], + "panel": 11, + "node": 7, + "K": [ + [746.436,0,349.001], + [0,746.553,211.863], + [0,0,1] + ], + "distCoef": [-0.330393,0.0902383,-0.000783974,-0.000712996,0.00481592], + "R": [ + [-0.9105637485,0.003264968682,0.4133557789], + [0.1001837456,0.9718993559,0.2130137535], + [-0.401044732,0.2353741321,-0.8853034174] + ], + "t": [ + [-36.21090107], + [102.2867759], + [306.6852556] + ] + }, + { + "name": "11_08", + "type": "vga", + "resolution": [640,480], + "panel": 11, + "node": 8, + "K": [ + [745.743,0,370.625], + [0,745.85,233.671], + [0,0,1] + ], + "distCoef": [-0.3257,0.0614375,0.00126654,-0.000627381,0.0722474], + "R": [ + [-0.8981193216,-0.01090147501,0.4396166989], + [0.09488580103,0.9713398361,0.2179348702], + [-0.4293930238,0.2374449004,-0.8713446794] + ], + "t": [ + [-42.17364239], + [80.07059019], + [305.3107943] + ] + }, + { + "name": "11_09", + "type": "vga", + "resolution": [640,480], + "panel": 11, + "node": 9, + "K": [ + [743.294,0,376.993], + [0,743.306,225.516], + [0,0,1] + ], + "distCoef": [-0.315184,-0.00458353,0.00085295,-0.000315923,0.19344], + "R": [ + [-0.9287334953,0.02657190893,0.369794576], + [0.1072763174,0.9740215576,0.1994336907], + [-0.354888555,0.2248909489,-0.9074569822] + ], + "t": [ + [4.627896612], + [76.0139061], + [305.925361] + ] + }, + { + "name": "11_10", + "type": "vga", + "resolution": [640,480], + "panel": 11, + "node": 10, + "K": [ + [746.981,0,373.015], + [0,746.916,231.087], + [0,0,1] + ], + "distCoef": [-0.31553,-0.0133214,-7.49701e-05,-0.000474937,0.183355], + "R": [ + [-0.897589008,-0.01428097087,0.4406018914], + [0.092180686,0.9712994893,0.219271574], + [-0.431087803,0.2374307391,-0.8705113154] + ], + "t": [ + [-5.834972436], + [85.69962032], + [306.7617687] + ] + }, + { + "name": "11_11", + "type": "vga", + "resolution": [640,480], + "panel": 11, + "node": 11, + "K": [ + [743.956,0,385.014], + [0,743.968,233.944], + [0,0,1] + ], + "distCoef": [-0.321873,0.0619652,-0.000204505,0.000631491,0.0680901], + "R": [ + [-0.9171447001,-0.01735780695,0.3981762243], + [0.08629809142,0.9667012777,0.2409175774], + [-0.3890992656,0.2553181275,-0.8851070078] + ], + "t": [ + [26.82061991], + [73.01187567], + [307.7528197] + ] + }, + { + "name": "11_12", + "type": "vga", + "resolution": [640,480], + "panel": 11, + "node": 12, + "K": [ + [749.192,0,349.167], + [0,749.113,221.266], + [0,0,1] + ], + "distCoef": [-0.334032,0.094759,-0.000689735,0.000727903,0.0409048], + "R": [ + [-0.937850977,-0.03419002209,0.345349949], + [0.06230645433,0.9623765935,0.2644791068], + [-0.341399254,0.2695595196,-0.9004355695] + ], + "t": [ + [57.17130279], + [82.80130245], + [306.825197] + ] + }, + { + "name": "11_13", + "type": "vga", + "resolution": [640,480], + "panel": 11, + "node": 13, + "K": [ + [744.715,0,367.122], + [0,744.786,220.538], + [0,0,1] + ], + "distCoef": [-0.315954,0.0180051,3.91318e-05,0.000697083,0.145396], + "R": [ + [-0.9312656673,-0.01667316508,0.3639591494], + [0.07039560041,0.9718946087,0.2246448954], + [-0.3574754765,0.2348252013,-0.9039183639] + ], + "t": [ + [46.96203938], + [112.2947483], + [304.8878272] + ] + }, + { + "name": "11_14", + "type": "vga", + "resolution": [640,480], + "panel": 11, + "node": 14, + "K": [ + [746.505,0,367.697], + [0,746.62,222.237], + [0,0,1] + ], + "distCoef": [-0.323622,0.0629014,0.000917096,0.00064017,0.0716359], + "R": [ + [-0.9260527677,-0.07925799212,0.3689775632], + [0.02937617957,0.9595934278,0.279852628], + [-0.3762490021,0.2699974518,-0.8863058527] + ], + "t": [ + [50.81898209], + [116.0290364], + [310.1255555] + ] + }, + { + "name": "11_15", + "type": "vga", + "resolution": [640,480], + "panel": 11, + "node": 15, + "K": [ + [746.042,0,355.995], + [0,745.821,261.077], + [0,0,1] + ], + "distCoef": [-0.321065,0.0443736,0.000927074,0.000280863,0.106789], + "R": [ + [-0.9208600933,-0.04678508348,0.387076019], + [0.03581020852,0.9784294414,0.2034538209], + [-0.3882451771,0.2012137775,-0.8993212431] + ], + "t": [ + [43.08113165], + [154.6066575], + [301.5640854] + ] + }, + { + "name": "11_16", + "type": "vga", + "resolution": [640,480], + "panel": 11, + "node": 16, + "K": [ + [741.668,0,363.735], + [0,741.796,217.06], + [0,0,1] + ], + "distCoef": [-0.309875,-0.0179015,-1.19394e-05,-0.000437783,0.188022], + "R": [ + [-0.8991061052,-0.0185684781,0.437336739], + [0.0842559957,0.9730755765,0.214534029], + [-0.4295452698,0.2297370977,-0.873333686] + ], + "t": [ + [16.70791642], + [154.14567], + [307.2679797] + ] + }, + { + "name": "11_17", + "type": "vga", + "resolution": [640,480], + "panel": 11, + "node": 17, + "K": [ + [747.822,0,361.761], + [0,747.76,222.34], + [0,0,1] + ], + "distCoef": [-0.334628,0.097635,0.00152491,-0.000486737,0.0213673], + "R": [ + [-0.9162397179,0.01033450945,0.4004971626], + [0.1187416248,0.9617552428,0.2468345183], + [-0.3826293322,0.2737152732,-0.8824254888] + ], + "t": [ + [27.8785048], + [159.3368695], + [313.9971646] + ] + }, + { + "name": "11_18", + "type": "vga", + "resolution": [640,480], + "panel": 11, + "node": 18, + "K": [ + [745.448,0,360.818], + [0,745.84,214.85], + [0,0,1] + ], + "distCoef": [-0.329534,0.0903331,0.00014069,0.000717079,0.0211508], + "R": [ + [-0.9101418911,0.04432675398,0.411918532], + [0.1391589893,0.9692024732,0.2031781034], + [-0.3902262342,0.2422430698,-0.888280238] + ], + "t": [ + [16.35209076], + [181.679224], + [308.9632727] + ] + }, + { + "name": "11_19", + "type": "vga", + "resolution": [640,480], + "panel": 11, + "node": 19, + "K": [ + [746.167,0,363.996], + [0,746.229,234.387], + [0,0,1] + ], + "distCoef": [-0.310901,-0.0147285,-0.000729007,-0.000655789,0.178193], + "R": [ + [-0.9157731435,-0.03755396433,0.3999365568], + [0.06406747528,0.9692207168,0.2377110865], + [-0.3965537899,0.2433123544,-0.8851803149] + ], + "t": [ + [-10.79527777], + [146.8696803], + [308.5271108] + ] + }, + { + "name": "11_20", + "type": "vga", + "resolution": [640,480], + "panel": 11, + "node": 20, + "K": [ + [744.588,0,384.664], + [0,744.662,240.853], + [0,0,1] + ], + "distCoef": [-0.307863,-0.0295446,-0.000517465,0.000242427,0.189333], + "R": [ + [-0.9170523574,0.0431160901,0.396429031], + [0.124694228,0.9752892469,0.1823793695], + [-0.3787694858,0.2166838427,-0.8997676305] + ], + "t": [ + [-9.200936127], + [142.5227957], + [304.9039442] + ] + }, + { + "name": "11_21", + "type": "vga", + "resolution": [640,480], + "panel": 11, + "node": 21, + "K": [ + [745.832,0,378.426], + [0,745.825,230.649], + [0,0,1] + ], + "distCoef": [-0.317765,0.041948,0.000140897,0.000331931,0.0876249], + "R": [ + [-0.903416406,0.009580467792,0.4286572198], + [0.1299134284,0.9588705554,0.2523683006], + [-0.4086089801,0.2836819921,-0.8675040223] + ], + "t": [ + [-22.38884391], + [100.2357286], + [311.942278] + ] + }, + { + "name": "11_22", + "type": "vga", + "resolution": [640,480], + "panel": 11, + "node": 22, + "K": [ + [745.759,0,381.189], + [0,746.033,229.615], + [0,0,1] + ], + "distCoef": [-0.307738,-0.0303832,0.000694314,-0.000395606,0.211723], + "R": [ + [-0.9121889441,-0.007451044875,0.4097021017], + [0.1102495844,0.9585035751,0.2628990789], + [-0.394659802,0.2849831196,-0.8735148895] + ], + "t": [ + [-0.4671669308], + [91.25062129], + [311.8622342] + ] + }, + { + "name": "11_23", + "type": "vga", + "resolution": [640,480], + "panel": 11, + "node": 23, + "K": [ + [748.678,0,358.839], + [0,748.651,239.635], + [0,0,1] + ], + "distCoef": [-0.328983,0.0919887,-1.22475e-05,-0.000911096,0.0194744], + "R": [ + [-0.9251940915,-0.06790089301,0.3733702744], + [0.01633387562,0.9758259889,0.2179377065], + [-0.3791425821,0.207733262,-0.9017193545] + ], + "t": [ + [15.23843998], + [129.776393], + [302.9631654] + ] + }, + { + "name": "11_24", + "type": "vga", + "resolution": [640,480], + "panel": 11, + "node": 24, + "K": [ + [747.741,0,374.843], + [0,747.8,238.972], + [0,0,1] + ], + "distCoef": [-0.320184,0.0453956,8.07771e-05,-0.000586724,0.0799959], + "R": [ + [-0.901120423,0.005145678853,0.4335383549], + [0.1030532182,0.9738156258,0.2026404726], + [-0.4211437016,0.2272809911,-0.8780554275] + ], + "t": [ + [6.522845915], + [142.0951003], + [306.255293] + ] + }, + { + "name": "12_01", + "type": "vga", + "resolution": [640,480], + "panel": 12, + "node": 1, + "K": [ + [745.397,0,350.188], + [0,745.422,244.528], + [0,0,1] + ], + "distCoef": [-0.318784,0.0421446,0.000567418,-0.000208,0.092208], + "R": [ + [-0.2717431751,0.1656287556,0.9480098956], + [0.4128654434,0.9098857043,-0.04062180222], + [-0.86930879,0.3803618284,-0.3156376199] + ], + "t": [ + [-13.70303847], + [97.1923903], + [326.2673629] + ] + }, + { + "name": "12_02", + "type": "vga", + "resolution": [640,480], + "panel": 12, + "node": 2, + "K": [ + [747.727,0,370.501], + [0,747.788,234.298], + [0,0,1] + ], + "distCoef": [-0.349811,0.202844,-0.00194754,-0.000389321,-0.178679], + "R": [ + [-0.3883456032,0.1438043201,0.9102241537], + [0.3131714459,0.9495549238,-0.01640403197], + [-0.8666667975,0.2786857806,-0.4137908865] + ], + "t": [ + [13.37192963], + [105.5473845], + [318.08591] + ] + }, + { + "name": "12_03", + "type": "vga", + "resolution": [640,480], + "panel": 12, + "node": 3, + "K": [ + [746.831,0,387.09], + [0,746.752,242.092], + [0,0,1] + ], + "distCoef": [-0.338844,0.109538,-0.000689346,-0.00140957,-0.0011227], + "R": [ + [-0.2489409576,0.07810816372,0.9653639285], + [0.3865744043,0.9219167609,0.0250941395], + [-0.8880251289,0.3794319447,-0.2596974581] + ], + "t": [ + [-20.03334166], + [70.50216381], + [325.3775618] + ] + }, + { + "name": "12_04", + "type": "vga", + "resolution": [640,480], + "panel": 12, + "node": 4, + "K": [ + [746.601,0,360.45], + [0,746.776,222.063], + [0,0,1] + ], + "distCoef": [-0.336822,0.124774,0.000206697,-0.000417774,-0.0398672], + "R": [ + [-0.3081671276,0.03567998316,0.9506629057], + [0.4212102042,0.9011275261,0.1027187694], + [-0.8530035084,0.4320834647,-0.2927266543] + ], + "t": [ + [4.764737811], + [63.41476985], + [331.1517594] + ] + }, + { + "name": "12_05", + "type": "vga", + "resolution": [640,480], + "panel": 12, + "node": 5, + "K": [ + [748.2,0,362.212], + [0,748.363,218.877], + [0,0,1] + ], + "distCoef": [-0.337789,0.133894,-0.000945522,-0.000498923,-0.0570031], + "R": [ + [-0.2841336654,-0.004801876737,0.9587726541], + [0.3831436474,0.9161034097,0.118133349], + [-0.8789021593,0.4009133132,-0.2584560111] + ], + "t": [ + [10.92507323], + [68.32263664], + [329.7866549] + ] + }, + { + "name": "12_06", + "type": "vga", + "resolution": [640,480], + "panel": 12, + "node": 6, + "K": [ + [747.371,0,350.388], + [0,747.497,231.124], + [0,0,1] + ], + "distCoef": [-0.351189,0.233364,-0.000450075,-0.00118874,-0.265042], + "R": [ + [-0.3878504716,-0.01635524947,0.9215771902], + [0.3346075558,0.9291346168,0.1573106717], + [-0.8588421248,0.3693797093,-0.3548927092] + ], + "t": [ + [53.76493542], + [97.09757883], + [324.1315487] + ] + }, + { + "name": "12_07", + "type": "vga", + "resolution": [640,480], + "panel": 12, + "node": 7, + "K": [ + [747.196,0,383.602], + [0,747.258,260.076], + [0,0,1] + ], + "distCoef": [-0.340453,0.149462,7.57635e-05,-0.00150211,-0.0810731], + "R": [ + [-0.3567494973,0.01375486298,0.934098817], + [0.3428523716,0.9320474424,0.1172169629], + [-0.8690121101,0.3620750873,-0.3372233439] + ], + "t": [ + [46.87962376], + [118.8343508], + [324.070693] + ] + }, + { + "name": "12_08", + "type": "vga", + "resolution": [640,480], + "panel": 12, + "node": 8, + "K": [ + [748.388,0,360.952], + [0,748.584,220.934], + [0,0,1] + ], + "distCoef": [-0.353387,0.236369,0.000317101,-0.000350889,-0.25062], + "R": [ + [-0.3882650784,-0.0538394581,0.9199736636], + [0.3529834406,0.9134681838,0.2024316376], + [-0.8512654812,0.4033326047,-0.3356633588] + ], + "t": [ + [53.63586961], + [124.5990463], + [329.2926486] + ] + }, + { + "name": "12_09", + "type": "vga", + "resolution": [640,480], + "panel": 12, + "node": 9, + "K": [ + [745.023,0,373.202], + [0,745.321,253.183], + [0,0,1] + ], + "distCoef": [-0.310235,-0.0270349,0.000213071,-0.0010354,0.204812], + "R": [ + [-0.3615436505,-0.1034754049,0.9265953968], + [0.3189620476,0.9201303682,0.2272076531], + [-0.8760989676,0.3776942494,-0.2996625652] + ], + "t": [ + [26.36947949], + [154.1173845], + [328.14772] + ] + }, + { + "name": "12_10", + "type": "vga", + "resolution": [640,480], + "panel": 12, + "node": 10, + "K": [ + [743.497,0,337.094], + [0,743.775,230.392], + [0,0,1] + ], + "distCoef": [-0.323522,0.0697077,-0.000922284,-0.00112939,0.0376595], + "R": [ + [-0.409013364,-0.03192166586,0.9119698873], + [0.3635432206,0.9109541012,0.1949331996], + [-0.8369853014,0.4112707536,-0.3609874961] + ], + "t": [ + [36.39561956], + [146.2733377], + [330.6860766] + ] + }, + { + "name": "12_11", + "type": "vga", + "resolution": [640,480], + "panel": 12, + "node": 11, + "K": [ + [744.432,0,350.161], + [0,744.664,216.764], + [0,0,1] + ], + "distCoef": [-0.3138,0.0423232,-0.000980128,0.000347352,0.0411803], + "R": [ + [-0.3625324698,0.01191238118,0.9318950067], + [0.4332658145,0.8874493782,0.157207936], + [-0.8251369234,0.4607512304,-0.3268904424] + ], + "t": [ + [30.02223667], + [146.021886], + [340.9352409] + ] + }, + { + "name": "12_12", + "type": "vga", + "resolution": [640,480], + "panel": 12, + "node": 12, + "K": [ + [745.59,0,349.499], + [0,745.978,243.824], + [0,0,1] + ], + "distCoef": [-0.328804,0.102744,-0.00034172,-0.00160085,-0.0230968], + "R": [ + [-0.3184962228,0.07265474811,0.9451356747], + [0.3862627531,0.9204738181,0.05940568743], + [-0.8656565379,0.3839911948,-0.3212312573] + ], + "t": [ + [17.04074577], + [180.9741057], + [327.7548666] + ] + }, + { + "name": "12_13", + "type": "vga", + "resolution": [640,480], + "panel": 12, + "node": 13, + "K": [ + [744.766,0,364.423], + [0,744.926,205.341], + [0,0,1] + ], + "distCoef": [-0.32165,0.0514735,-0.000885848,-0.00113933,0.0656482], + "R": [ + [-0.2748509499,0.06379038152,0.9593684081], + [0.3894986417,0.919644886,0.05043898999], + [-0.8790607279,0.3875358962,-0.2776115375] + ], + "t": [ + [-9.802475588], + [164.1613661], + [327.7325897] + ] + }, + { + "name": "12_14", + "type": "vga", + "resolution": [640,480], + "panel": 12, + "node": 14, + "K": [ + [744.556,0,345.329], + [0,744.551,253.003], + [0,0,1] + ], + "distCoef": [-0.311027,-0.00213006,0.0011289,-0.000863959,0.162024], + "R": [ + [-0.3202755169,0.1244082889,0.9391198917], + [0.4530679872,0.8907277919,0.0365157459], + [-0.831957326,0.4371802584,-0.3416437171] + ], + "t": [ + [0.5161253202], + [152.8799295], + [338.113135] + ] + }, + { + "name": "12_15", + "type": "vga", + "resolution": [640,480], + "panel": 12, + "node": 15, + "K": [ + [747.233,0,347.644], + [0,747.329,227.375], + [0,0,1] + ], + "distCoef": [-0.323105,0.049287,-0.00101918,5.08353e-05,0.100564], + "R": [ + [-0.2639942301,0.1219548974,0.9567831779], + [0.4010015368,0.9160569375,-0.006120025947], + [-0.8772142349,0.3820558732,-0.2907378472] + ], + "t": [ + [-27.43280694], + [159.7105652], + [325.8203908] + ] + }, + { + "name": "12_16", + "type": "vga", + "resolution": [640,480], + "panel": 12, + "node": 16, + "K": [ + [744.634,0,382.866], + [0,744.52,241.14], + [0,0,1] + ], + "distCoef": [-0.320913,0.0518689,0.000556907,0.000900625,0.0851061], + "R": [ + [-0.2918914105,0.1153635448,0.9494686183], + [0.4055533141,0.9139698053,0.01362734066], + [-0.8662135499,0.3890378484,-0.3135660035] + ], + "t": [ + [-22.908528], + [135.1916248], + [327.5972929] + ] + }, + { + "name": "12_17", + "type": "vga", + "resolution": [640,480], + "panel": 12, + "node": 17, + "K": [ + [745.929,0,399.922], + [0,745.76,235.115], + [0,0,1] + ], + "distCoef": [-0.324412,0.0924767,0.000808772,0.00160345,0.0125449], + "R": [ + [-0.2332319969,0.1531844985,0.9602798264], + [0.4252056559,0.9041694633,-0.04096012482], + [-0.8745301515,0.3987632018,-0.2760161646] + ], + "t": [ + [-42.90434909], + [120.9469461], + [326.5490528] + ] + }, + { + "name": "12_18", + "type": "vga", + "resolution": [640,480], + "panel": 12, + "node": 18, + "K": [ + [745.596,0,390.427], + [0,745.457,235.855], + [0,0,1] + ], + "distCoef": [-0.331545,0.0834192,0.000515021,-0.000851112,0.0388274], + "R": [ + [-0.2198853867,0.1587089693,0.9625288982], + [0.4990272732,0.8661072571,-0.02880971702], + [-0.8382256244,0.4739933356,-0.2696444333] + ], + "t": [ + [-48.83152805], + [73.52609427], + [332.6787653] + ] + }, + { + "name": "12_19", + "type": "vga", + "resolution": [640,480], + "panel": 12, + "node": 19, + "K": [ + [744.284,0,396.863], + [0,744.47,248.804], + [0,0,1] + ], + "distCoef": [-0.318049,0.0444362,0.000417829,0.000948817,0.0847095], + "R": [ + [-0.2972813843,0.0975420226,0.9497943632], + [0.4134272643,0.9098266462,0.03596346693], + [-0.8606402708,0.4033621545,-0.3108010564] + ], + "t": [ + [-6.347004052], + [101.4062297], + [328.9550302] + ] + }, + { + "name": "12_20", + "type": "vga", + "resolution": [640,480], + "panel": 12, + "node": 20, + "K": [ + [745.173,0,391.68], + [0,745.292,239.851], + [0,0,1] + ], + "distCoef": [-0.316891,0.030971,0.000827356,0.00064571,0.114679], + "R": [ + [-0.3480625566,0.05516818218,0.9358466372], + [0.3680676982,0.9261498325,0.08229615655], + [-0.8621940769,0.3730991283,-0.3426637043] + ], + "t": [ + [18.00373906], + [105.1024652], + [325.6162418] + ] + }, + { + "name": "12_21", + "type": "vga", + "resolution": [640,480], + "panel": 12, + "node": 21, + "K": [ + [744.07,0,385.155], + [0,744.184,238.534], + [0,0,1] + ], + "distCoef": [-0.325321,0.0749068,6.22505e-05,8.78769e-06,0.0274316], + "R": [ + [-0.2944173655,-0.00519814937,0.9556628036], + [0.365777539,0.9232287513,0.117709238], + [-0.882907247,0.3842156322,-0.2699132104] + ], + "t": [ + [4.17424328], + [116.8807078], + [328.2455421] + ] + }, + { + "name": "12_22", + "type": "vga", + "resolution": [640,480], + "panel": 12, + "node": 22, + "K": [ + [747.36,0,358.25], + [0,747.451,237.291], + [0,0,1] + ], + "distCoef": [-0.329867,0.116416,-0.000580151,-0.000763801,-0.0625995], + "R": [ + [-0.323867873,0.0530845029,0.9446118972], + [0.387407199,0.9183241349,0.08121850418], + [-0.8631484594,0.3922535134,-0.3179810029] + ], + "t": [ + [22.53106717], + [133.6738778], + [328.8995429] + ] + }, + { + "name": "12_23", + "type": "vga", + "resolution": [640,480], + "panel": 12, + "node": 23, + "K": [ + [748.813,0,380.156], + [0,748.859,237.356], + [0,0,1] + ], + "distCoef": [-0.333932,0.115832,0.000621747,-0.000254241,-0.0140772], + "R": [ + [-0.3097958639,0.0326105921,0.9502436908], + [0.3550951383,0.9310652686,0.08381472691], + [-0.8820056493,0.3633923705,-0.3000200319] + ], + "t": [ + [-6.485061334], + [151.418855], + [323.8858443] + ] + }, + { + "name": "12_24", + "type": "vga", + "resolution": [640,480], + "panel": 12, + "node": 24, + "K": [ + [745.33,0,360.408], + [0,745.472,237.433], + [0,0,1] + ], + "distCoef": [-0.321653,0.057929,3.69615e-05,-0.000478596,0.0560779], + "R": [ + [-0.3250711399,0.1046959739,0.9398763254], + [0.4072848242,0.9124585149,0.03922410658], + [-0.8534915501,0.395547989,-0.3392550109] + ], + "t": [ + [2.217299854], + [123.8595425], + [329.2221602] + ] + }, + { + "name": "13_01", + "type": "vga", + "resolution": [640,480], + "panel": 13, + "node": 1, + "K": [ + [747.6,0,355.92], + [0,747.783,249.853], + [0,0,1] + ], + "distCoef": [-0.333712,0.144699,-6.46303e-05,-0.0011294,-0.0924471], + "R": [ + [0.5138271048,0.01100033104,0.857823233], + [0.08358608019,0.9945184566,-0.06282043172], + [-0.8538120833,0.1039809221,0.5100910647] + ], + "t": [ + [-37.95328646], + [135.6435695], + [289.9999799] + ] + }, + { + "name": "13_02", + "type": "vga", + "resolution": [640,480], + "panel": 13, + "node": 2, + "K": [ + [743.227,0,372.15], + [0,743.265,265.407], + [0,0,1] + ], + "distCoef": [-0.306942,-0.0266079,0.000311285,0.000595534,0.199806], + "R": [ + [0.4485620057,-0.005900946102,0.8937322339], + [0.06601293956,0.9974655925,-0.02654587691], + [-0.8913105064,0.07090536373,0.4478147055] + ], + "t": [ + [-38.28645032], + [133.2984516], + [288.856211] + ] + }, + { + "name": "13_03", + "type": "vga", + "resolution": [640,480], + "panel": 13, + "node": 3, + "K": [ + [746.538,0,387.516], + [0,746.833,233.181], + [0,0,1] + ], + "distCoef": [-0.322577,0.0715483,-4.90461e-05,0.000787497,0.0326639], + "R": [ + [0.5260210271,0.02315422103,0.8501563157], + [0.07372016672,0.9946254291,-0.07270208278], + [-0.8472704504,0.1009164896,0.5214869567] + ], + "t": [ + [-53.0750023], + [105.7642054], + [287.8235486] + ] + }, + { + "name": "13_04", + "type": "vga", + "resolution": [640,480], + "panel": 13, + "node": 4, + "K": [ + [744.864,0,367.763], + [0,745.005,229.771], + [0,0,1] + ], + "distCoef": [-0.318118,0.0367901,0.000364188,-0.000713933,0.0879467], + "R": [ + [0.4575577495,0.1623260474,0.8742374736], + [-0.0244195278,0.9851184177,-0.1701334469], + [-0.8888445267,0.05649741078,0.4547124916] + ], + "t": [ + [4.756699591], + [110.8595803], + [285.3944853] + ] + }, + { + "name": "13_05", + "type": "vga", + "resolution": [640,480], + "panel": 13, + "node": 5, + "K": [ + [744.026,0,374.462], + [0,744.21,219.295], + [0,0,1] + ], + "distCoef": [-0.309274,-0.00813814,-0.000611939,0.000562163,0.16533], + "R": [ + [0.5236500196,-0.01990538858,0.8517009055], + [0.0479853053,0.9988290545,-0.006158764858], + [-0.8505810176,0.04409416531,0.5239920201] + ], + "t": [ + [-32.80347729], + [91.75629107], + [282.6719703] + ] + }, + { + "name": "13_06", + "type": "vga", + "resolution": [640,480], + "panel": 13, + "node": 6, + "K": [ + [746.172,0,347.715], + [0,746.412,223.735], + [0,0,1] + ], + "distCoef": [-0.315889,0.0243673,0.00083413,-0.000596366,0.129203], + "R": [ + [0.489601615,0.07237643337,0.8689372305], + [-0.010214584,0.9969567785,-0.07728417735], + [-0.8718864151,0.02896262571,0.488850944] + ], + "t": [ + [7.55259059], + [89.5920217], + [281.8493454] + ] + }, + { + "name": "13_07", + "type": "vga", + "resolution": [640,480], + "panel": 13, + "node": 7, + "K": [ + [745.619,0,383.372], + [0,745.683,224.508], + [0,0,1] + ], + "distCoef": [-0.315816,0.0424659,0.000456201,0.000714024,0.0879752], + "R": [ + [0.5142457137,-0.005076098829,0.8576278792], + [0.07753605572,0.9961627141,-0.04059565316], + [-0.8541308483,0.08737322366,0.5126659866] + ], + "t": [ + [9.165152848], + [86.80281732], + [287.1451009] + ] + }, + { + "name": "13_08", + "type": "vga", + "resolution": [640,480], + "panel": 13, + "node": 8, + "K": [ + [746.151,0,390.693], + [0,746.159,238.847], + [0,0,1] + ], + "distCoef": [-0.312796,0.0112848,0.00109903,0.000945928,0.138088], + "R": [ + [0.5333632905,-0.08775347438,0.841322131], + [0.13459771,0.9907366672,0.0180086874], + [-0.8351090089,0.1036348594,0.5402339855] + ], + "t": [ + [14.59630248], + [78.12680456], + [289.302137] + ] + }, + { + "name": "13_09", + "type": "vga", + "resolution": [640,480], + "panel": 13, + "node": 9, + "K": [ + [744.811,0,365.557], + [0,745.05,239.01], + [0,0,1] + ], + "distCoef": [-0.302561,-0.0588071,-0.000331846,-0.00065645,0.252299], + "R": [ + [0.515993865,0.007464548532,0.8565597538], + [0.05311793688,0.9977587535,-0.04069342277], + [-0.8549437502,0.06649624343,0.5144408941] + ], + "t": [ + [47.02842806], + [101.5821868], + [285.7219747] + ] + }, + { + "name": "13_10", + "type": "vga", + "resolution": [640,480], + "panel": 13, + "node": 10, + "K": [ + [744.185,0,393.537], + [0,744.44,231.354], + [0,0,1] + ], + "distCoef": [-0.321367,0.0639595,-3.49657e-05,0.000800078,0.0579089], + "R": [ + [0.5364096096,-0.02345912583,0.8436316733], + [0.07330244032,0.9971310212,-0.01888064639], + [-0.8407683884,0.07196802054,0.536590273] + ], + "t": [ + [31.38919798], + [122.486781], + [287.1552388] + ] + }, + { + "name": "13_11", + "type": "vga", + "resolution": [640,480], + "panel": 13, + "node": 11, + "K": [ + [745.973,0,365.594], + [0,746.037,211.677], + [0,0,1] + ], + "distCoef": [-0.32905,0.0977698,-0.000962762,0.000946642,0.0190885], + "R": [ + [0.5178117038,0.00482526951,0.8554810087], + [0.01921134431,0.9996663333,-0.01726691564], + [-0.8552788806,0.02537595122,0.5175462273] + ], + "t": [ + [57.16543019], + [149.3252564], + [279.6241941] + ] + }, + { + "name": "13_12", + "type": "vga", + "resolution": [640,480], + "panel": 13, + "node": 12, + "K": [ + [745.909,0,358.218], + [0,746.022,220.333], + [0,0,1] + ], + "distCoef": [-0.338571,0.148871,-0.00100229,-0.000678393,-0.0710162], + "R": [ + [0.5368407815,0.02503814463,0.8433119628], + [-0.01156171997,0.9996840035,-0.02232083821], + [-0.8436043516,0.002232599467,0.5369606257] + ], + "t": [ + [51.57359577], + [176.1957711], + [275.7319623] + ] + }, + { + "name": "13_13", + "type": "vga", + "resolution": [640,480], + "panel": 13, + "node": 13, + "K": [ + [743.068,0,370.139], + [0,743.357,232.303], + [0,0,1] + ], + "distCoef": [-0.302401,-0.0553181,-0.00107418,-0.000672395,0.220417], + "R": [ + [0.5299693687,-0.06080201885,0.8458342525], + [0.13849556,0.9902402801,-0.01559383094], + [-0.8366310107,0.1254085412,0.5332178257] + ], + "t": [ + [16.99243391], + [145.7883087], + [295.0494301] + ] + }, + { + "name": "13_14", + "type": "vga", + "resolution": [640,480], + "panel": 13, + "node": 14, + "K": [ + [743.724,0,347.611], + [0,743.902,235.434], + [0,0,1] + ], + "distCoef": [-0.315484,0.0296225,-0.000529931,-0.000276443,0.110913], + "R": [ + [0.5388576125,-0.001120175332,0.8423961174], + [0.06888686412,0.9967085439,-0.04273965901], + [-0.8395755317,0.08106061749,0.5371611517] + ], + "t": [ + [22.68047362], + [178.4537167], + [288.5132471] + ] + }, + { + "name": "13_15", + "type": "vga", + "resolution": [640,480], + "panel": 13, + "node": 15, + "K": [ + [748.48,0,370.578], + [0,748.498,231.761], + [0,0,1] + ], + "distCoef": [-0.333743,0.123731,0.000274987,0.00129665,-0.0264397], + "R": [ + [0.5569883215,-0.02228411773,0.8302213126], + [0.06483002391,0.9977563557,-0.01671294857], + [-0.827986158,0.06313218472,0.5571833177] + ], + "t": [ + [-8.30154925], + [184.6918205], + [284.5865319] + ] + }, + { + "name": "13_16", + "type": "vga", + "resolution": [640,480], + "panel": 13, + "node": 16, + "K": [ + [748.413,0,364.616], + [0,748.358,230.166], + [0,0,1] + ], + "distCoef": [-0.337541,0.138107,0.000557985,-0.000490808,-0.0648839], + "R": [ + [0.5035312414,0.04830043061,0.8626258501], + [0.03089895722,0.996790644,-0.07384894344], + [-0.8634243125,0.06383948941,0.5004227975] + ], + "t": [ + [5.312179267], + [173.5565462], + [284.5085099] + ] + }, + { + "name": "13_17", + "type": "vga", + "resolution": [640,480], + "panel": 13, + "node": 17, + "K": [ + [745.143,0,372.782], + [0,745.112,223.2], + [0,0,1] + ], + "distCoef": [-0.321603,0.0646008,-0.000584526,0.000805086,0.0603349], + "R": [ + [0.5471603314,0.02993221277,0.8364924593], + [0.06649342528,0.9946477166,-0.07908567611], + [-0.8343825239,0.09889379359,0.5422414789] + ], + "t": [ + [-32.63653561], + [167.4383368], + [289.2367997] + ] + }, + { + "name": "13_18", + "type": "vga", + "resolution": [640,480], + "panel": 13, + "node": 18, + "K": [ + [745.136,0,373.506], + [0,745.259,215.704], + [0,0,1] + ], + "distCoef": [-0.333755,0.12331,-0.00049301,0.00138004,-0.0323155], + "R": [ + [0.5039095131,0.07384116584,0.8605943788], + [0.02822760746,0.9943991795,-0.1018502524], + [-0.8632950856,0.07561583139,0.4990028469] + ], + "t": [ + [-29.61131213], + [166.0398843], + [286.9453226] + ] + }, + { + "name": "13_19", + "type": "vga", + "resolution": [640,480], + "panel": 13, + "node": 19, + "K": [ + [743.638,0,344.046], + [0,743.783,238.416], + [0,0,1] + ], + "distCoef": [-0.319291,0.0355055,-0.000169258,0.000161892,0.118247], + "R": [ + [0.5180347054,0.01180967192,0.8552780692], + [0.1057363227,0.9913513706,-0.07773216881], + [-0.8487990775,0.1307019191,0.512305704] + ], + "t": [ + [-19.08174331], + [122.2280138], + [293.3272927] + ] + }, + { + "name": "13_20", + "type": "vga", + "resolution": [640,480], + "panel": 13, + "node": 20, + "K": [ + [745.321,0,372.761], + [0,745.559,236.547], + [0,0,1] + ], + "distCoef": [-0.320489,0.0479206,-9.03328e-05,-0.000256288,0.0784864], + "R": [ + [0.4966252135,-0.01754426777,0.8677877598], + [0.06583916704,0.9976766247,-0.01750875645], + [-0.8654643848,0.06582971318,0.4966264667] + ], + "t": [ + [-11.61163777], + [120.2765647], + [285.1928757] + ] + }, + { + "name": "13_21", + "type": "vga", + "resolution": [640,480], + "panel": 13, + "node": 21, + "K": [ + [745.539,0,371.886], + [0,745.656,230.519], + [0,0,1] + ], + "distCoef": [-0.326644,0.0839413,-0.000557984,0.000204085,0.0126328], + "R": [ + [0.5330371562,-0.03752357961,0.8452593514], + [0.08887796824,0.9959722199,-0.01183402057], + [-0.8414107777,0.08143290645,0.5342252193] + ], + "t": [ + [-6.03247131], + [109.6165459], + [286.9430377] + ] + }, + { + "name": "13_22", + "type": "vga", + "resolution": [640,480], + "panel": 13, + "node": 22, + "K": [ + [744.018,0,396.717], + [0,744.224,249.141], + [0,0,1] + ], + "distCoef": [-0.315372,0.0205822,-0.000440151,0.000134817,0.105074], + "R": [ + [0.4984198723,-0.001673636668,0.8669341554], + [0.03130878513,0.9993805529,-0.01607079461], + [-0.8663702389,0.03515265859,0.4981635271] + ], + "t": [ + [26.09238071], + [136.8142763], + [280.4949188] + ] + }, + { + "name": "13_23", + "type": "vga", + "resolution": [640,480], + "panel": 13, + "node": 23, + "K": [ + [744.884,0,382.514], + [0,744.877,235.74], + [0,0,1] + ], + "distCoef": [-0.326378,0.0966908,-9.48994e-05,0.00105607,0.00534895], + "R": [ + [0.4908089633,-0.01723518027,0.8710967283], + [0.04978157704,0.9987257364,-0.008288432131], + [-0.8698438688,0.04743260567,0.4910415377] + ], + "t": [ + [21.95453226], + [154.6836493], + [281.6596012] + ] + }, + { + "name": "13_24", + "type": "vga", + "resolution": [640,480], + "panel": 13, + "node": 24, + "K": [ + [744.481,0,341.813], + [0,744.509,213.322], + [0,0,1] + ], + "distCoef": [-0.310201,-0.0109775,-0.00130948,-0.000370453,0.189258], + "R": [ + [0.5283332962,-0.01827851401,0.8488402818], + [0.07383881778,0.996969434,-0.02449033896], + [-0.8458201683,0.0756164244,0.5280818111] + ], + "t": [ + [-10.59416721], + [149.8670778], + [286.3856475] + ] + }, + { + "name": "14_01", + "type": "vga", + "resolution": [640,480], + "panel": 14, + "node": 1, + "K": [ + [745.639,0,394.42], + [0,745.872,232.374], + [0,0,1] + ], + "distCoef": [-0.317821,0.05701,0.000216723,0.00145431,0.0516441], + "R": [ + [0.1117244957,0.006687085701,0.9937167202], + [0.1929264895,0.9808052728,-0.02829110459], + [-0.9748317838,0.1948750877,0.1082898585] + ], + "t": [ + [-10.76838593], + [183.2092961], + [300.2249606] + ] + }, + { + "name": "14_02", + "type": "vga", + "resolution": [640,480], + "panel": 14, + "node": 2, + "K": [ + [744.265,0,384.24], + [0,744.607,234.555], + [0,0,1] + ], + "distCoef": [-0.314122,0.0172489,-0.000351192,-3.05431e-05,0.116521], + "R": [ + [0.09126102309,0.01926845044,0.9956405739], + [0.1889483007,0.9813154942,-0.03631033643], + [-0.9777371658,0.191438313,0.08591511501] + ], + "t": [ + [-20.54744948], + [195.8515337], + [299.6149103] + ] + }, + { + "name": "14_03", + "type": "vga", + "resolution": [640,480], + "panel": 14, + "node": 3, + "K": [ + [742.909,0,383.13], + [0,743.051,234.161], + [0,0,1] + ], + "distCoef": [-0.311566,0.0211516,-0.000212815,-9.64233e-05,0.110817], + "R": [ + [0.07658267666,-0.01244461629,0.9969855692], + [0.2193131093,0.9756433613,-0.004668149478], + [-0.9726442586,0.2190095044,0.07744664757] + ], + "t": [ + [-39.95619704], + [171.7405641], + [305.3439137] + ] + }, + { + "name": "14_04", + "type": "vga", + "resolution": [640,480], + "panel": 14, + "node": 4, + "K": [ + [745.057,0,349.277], + [0,745.321,214.2], + [0,0,1] + ], + "distCoef": [-0.31581,0.0237721,-0.00140945,-0.000667487,0.124292], + "R": [ + [0.09341145846,-0.02354383001,0.9953491787], + [0.2305453591,0.9730606003,0.001380415192], + [-0.9685675696,0.2293441873,0.09632293059] + ], + "t": [ + [-43.73412593], + [146.7921304], + [306.2893961] + ] + }, + { + "name": "14_05", + "type": "vga", + "resolution": [640,480], + "panel": 14, + "node": 5, + "K": [ + [744.634,0,387.597], + [0,744.752,225.246], + [0,0,1] + ], + "distCoef": [-0.315944,0.0434616,-0.000268259,0.00110436,0.0780237], + "R": [ + [0.1133728096,0.0374780752,0.9928454059], + [0.2222309073,0.973014014,-0.06210597779], + [-0.9683801061,0.2276820645,0.1019845459] + ], + "t": [ + [-53.79623552], + [137.113178], + [305.5099477] + ] + }, + { + "name": "14_06", + "type": "vga", + "resolution": [640,480], + "panel": 14, + "node": 6, + "K": [ + [744.759,0,388.645], + [0,744.666,221.73], + [0,0,1] + ], + "distCoef": [-0.306159,-0.0283273,-0.000508774,0.00094455,0.192402], + "R": [ + [0.1564984143,0.01913164242,0.9874928995], + [0.2309282446,0.9713913042,-0.05541732523], + [-0.96030224,0.2367127254,0.1476031622] + ], + "t": [ + [-66.24261018], + [112.7515407], + [303.5978047] + ] + }, + { + "name": "14_07", + "type": "vga", + "resolution": [640,480], + "panel": 14, + "node": 7, + "K": [ + [744.959,0,375.286], + [0,745.092,235.744], + [0,0,1] + ], + "distCoef": [-0.302136,-0.0624017,-0.000302824,-0.00146028,0.239945], + "R": [ + [0.0628689268,0.03077162571,0.9975472947], + [0.2444661638,0.9685997585,-0.04528578729], + [-0.967617586,0.2467136292,0.05337220603] + ], + "t": [ + [-19.11814477], + [98.74694092], + [308.9777955] + ] + }, + { + "name": "14_08", + "type": "vga", + "resolution": [640,480], + "panel": 14, + "node": 8, + "K": [ + [746.649,0,384.752], + [0,746.836,237.267], + [0,0,1] + ], + "distCoef": [-0.321628,0.0600031,0.000104796,0.000953791,0.0524376], + "R": [ + [0.1158239713,-0.07384920575,0.9905206219], + [0.2473198554,0.9679682291,0.043248082], + [-0.9619863288,0.2399662524,0.1303782992] + ], + "t": [ + [-45.76229918], + [76.40869106], + [305.3733784] + ] + }, + { + "name": "14_09", + "type": "vga", + "resolution": [640,480], + "panel": 14, + "node": 9, + "K": [ + [745.672,0,372.774], + [0,745.737,209.129], + [0,0,1] + ], + "distCoef": [-0.30917,-0.00857977,-4.68803e-05,-0.000521617,0.17194], + "R": [ + [0.1233501146,0.01050711315,0.9923075883], + [0.2153087978,0.9758411417,-0.0370970036], + [-0.9687243523,0.2182284735,0.1181078428] + ], + "t": [ + [-15.44854612], + [78.73632155], + [304.5944309] + ] + }, + { + "name": "14_10", + "type": "vga", + "resolution": [640,480], + "panel": 14, + "node": 10, + "K": [ + [744.36,0,350.493], + [0,744.605,227.167], + [0,0,1] + ], + "distCoef": [-0.324539,0.0696676,-0.000964917,-0.000688724,0.0453805], + "R": [ + [0.0653712546,0.005547467364,0.9978455916], + [0.2748842968,0.9611936881,-0.02335203178], + [-0.9592524289,0.2758186354,0.06130952564] + ], + "t": [ + [17.36142141], + [73.86484437], + [309.5485763] + ] + }, + { + "name": "14_11", + "type": "vga", + "resolution": [640,480], + "panel": 14, + "node": 11, + "K": [ + [744.072,0,352.953], + [0,744.032,218.847], + [0,0,1] + ], + "distCoef": [-0.310531,-0.00866492,-5.61729e-06,0.000627577,0.179884], + "R": [ + [0.08325845442,0.01268657881,0.9964472292], + [0.1993298125,0.97949952,-0.02912586749], + [-0.9763890903,0.2010466141,0.07902280276] + ], + "t": [ + [33.26019053], + [89.58305599], + [303.0664402] + ] + }, + { + "name": "14_12", + "type": "vga", + "resolution": [640,480], + "panel": 14, + "node": 12, + "K": [ + [743.677,0,359.077], + [0,743.623,233.815], + [0,0,1] + ], + "distCoef": [-0.305265,-0.0518121,0.000714314,0.000432839,0.265088], + "R": [ + [0.06818541392,0.004787243789,0.9976611808], + [0.2533830838,0.9671167716,-0.02195821049], + [-0.9649599796,0.2542876962,0.06473025078] + ], + "t": [ + [54.03449748], + [85.53998459], + [306.9876015] + ] + }, + { + "name": "14_13", + "type": "vga", + "resolution": [640,480], + "panel": 14, + "node": 13, + "K": [ + [742.736,0,368.122], + [0,742.832,238.615], + [0,0,1] + ], + "distCoef": [-0.303469,-0.0412536,1.82225e-05,-0.000473228,0.205739], + "R": [ + [0.1225239282,-0.0735967149,0.9897329996], + [0.2305366224,0.9720798639,0.0437447595], + [-0.9653189902,0.222809923,0.1360697815] + ], + "t": [ + [17.43625272], + [116.7070017], + [307.0317679] + ] + }, + { + "name": "14_14", + "type": "vga", + "resolution": [640,480], + "panel": 14, + "node": 14, + "K": [ + [745.328,0,371.219], + [0,745.487,209.713], + [0,0,1] + ], + "distCoef": [-0.318297,0.0286867,-0.0013247,0.000626009,0.137928], + "R": [ + [0.06972690557,-0.0276618613,0.9971825209], + [0.2175762615,0.9759712693,0.01185967683], + [-0.9735495514,0.2161363064,0.0740700209] + ], + "t": [ + [57.75964066], + [131.0709572], + [303.578107] + ] + }, + { + "name": "14_15", + "type": "vga", + "resolution": [640,480], + "panel": 14, + "node": 15, + "K": [ + [743.637,0,370.163], + [0,743.479,235.403], + [0,0,1] + ], + "distCoef": [-0.301307,-0.0600698,0.000220332,0.000264974,0.263845], + "R": [ + [0.0871387997,-0.1078492175,0.9903410402], + [0.2171380052,0.9722761796,0.08677624828], + [-0.9722437535,0.2074790999,0.1081411432] + ], + "t": [ + [27.10934266], + [155.0300785], + [303.8314173] + ] + }, + { + "name": "14_16", + "type": "vga", + "resolution": [640,480], + "panel": 14, + "node": 16, + "K": [ + [747.749,0,388.765], + [0,747.73,234.855], + [0,0,1] + ], + "distCoef": [-0.320028,0.057848,-0.00103044,0.00101463,0.0716113], + "R": [ + [0.09276252326,-0.02731891999,0.9953134134], + [0.2004837996,0.9796626634,0.008204393401], + [-0.9752955246,0.1987831547,0.09635298148] + ], + "t": [ + [25.02944215], + [165.1686099], + [301.5459594] + ] + }, + { + "name": "14_17", + "type": "vga", + "resolution": [640,480], + "panel": 14, + "node": 17, + "K": [ + [745.477,0,358.035], + [0,745.633,228.78], + [0,0,1] + ], + "distCoef": [-0.315933,0.0359808,-0.000244793,0.00106736,0.101835], + "R": [ + [0.09323456203,-0.04884472803,0.9944453273], + [0.1997864834,0.9793990461,0.02937464128], + [-0.9753936013,0.1959380031,0.1010723576] + ], + "t": [ + [12.52671676], + [185.8338565], + [300.6683817] + ] + }, + { + "name": "14_19", + "type": "vga", + "resolution": [640,480], + "panel": 14, + "node": 19, + "K": [ + [746.962,0,392.223], + [0,747.34,219.936], + [0,0,1] + ], + "distCoef": [-0.325078,0.0885503,-0.00165532,0.000580691,0.0160315], + "R": [ + [0.129696032,0.03909405168,0.990782819], + [0.1776002444,0.9821476201,-0.06200165731], + [-0.9755188837,0.1840046397,0.1204375361] + ], + "t": [ + [-4.746570817], + [166.089254], + [298.9402723] + ] + }, + { + "name": "14_20", + "type": "vga", + "resolution": [640,480], + "panel": 14, + "node": 20, + "K": [ + [744.91,0,339.915], + [0,744.956,221.133], + [0,0,1] + ], + "distCoef": [-0.306862,-0.0244375,-6.76743e-05,-0.000102471,0.205298], + "R": [ + [0.09943504227,-0.007298095184,0.9950172914], + [0.2125993636,0.9770380132,-0.01407946415], + [-0.9720669642,0.212940035,0.09870338653] + ], + "t": [ + [-22.7866272], + [143.0595857], + [303.8181509] + ] + }, + { + "name": "14_21", + "type": "vga", + "resolution": [640,480], + "panel": 14, + "node": 21, + "K": [ + [743.577,0,349.797], + [0,743.73,227.793], + [0,0,1] + ], + "distCoef": [-0.307046,-0.0206712,-0.000861395,-9.97172e-05,0.196115], + "R": [ + [0.09969364468,-0.01462231859,0.9949107322], + [0.2541863771,0.9670897407,-0.01125696175], + [-0.9620033591,0.2540150021,0.1001294952] + ], + "t": [ + [-20.43364439], + [109.4423166], + [308.9174676] + ] + }, + { + "name": "14_22", + "type": "vga", + "resolution": [640,480], + "panel": 14, + "node": 22, + "K": [ + [745.066,0,381.498], + [0,745.047,229.678], + [0,0,1] + ], + "distCoef": [-0.314894,0.0257947,-0.000483886,0.00117112,0.111876], + "R": [ + [0.08696832552,-0.05294226024,0.9948033109], + [0.2154078845,0.9759627551,0.03310806346], + [-0.9726437959,0.2114091239,0.09628202687] + ], + "t": [ + [-4.298071534], + [115.0382234], + [303.8536261] + ] + }, + { + "name": "14_23", + "type": "vga", + "resolution": [640,480], + "panel": 14, + "node": 23, + "K": [ + [746.602,0,379.206], + [0,746.635,260.689], + [0,0,1] + ], + "distCoef": [-0.319922,0.0568918,0.00103779,-0.000422086,0.0766843], + "R": [ + [0.09129519856,-0.01052008078,0.9957683037], + [0.2195471399,0.9755524467,-0.009822274065], + [-0.9713208739,0.2195148095,0.09137290798] + ], + "t": [ + [18.69590833], + [125.3942709], + [304.7857903] + ] + }, + { + "name": "14_24", + "type": "vga", + "resolution": [640,480], + "panel": 14, + "node": 24, + "K": [ + [745.388,0,382.392], + [0,745.496,224.015], + [0,0,1] + ], + "distCoef": [-0.302393,-0.0525763,-0.000559682,-6.77e-05,0.234314], + "R": [ + [0.08118536371,-0.04636746828,0.9956199047], + [0.1796446798,0.9832385033,0.03114216711], + [-0.9803758084,0.1763295309,0.0881542445] + ], + "t": [ + [8.147122648], + [159.0280693], + [298.1193244] + ] + }, + { + "name": "15_01", + "type": "vga", + "resolution": [640,480], + "panel": 15, + "node": 1, + "K": [ + [747.532,0,374.739], + [0,747.668,233.944], + [0,0,1] + ], + "distCoef": [-0.331439,0.109037,-0.000609362,0.000392501,-0.000621335], + "R": [ + [0.7848571462,0.05717032211,0.6170338843], + [0.1817012858,0.9307358272,-0.3173569956], + [-0.5924389444,0.3611957561,0.7201067442] + ], + "t": [ + [-19.59276639], + [102.5270366], + [325.6365462] + ] + }, + { + "name": "15_02", + "type": "vga", + "resolution": [640,480], + "panel": 15, + "node": 2, + "K": [ + [743.597,0,385.764], + [0,743.786,211.188], + [0,0,1] + ], + "distCoef": [-0.307778,-0.0279819,-0.000454196,0.00143268,0.205643], + "R": [ + [0.7963392439,-0.01332837804,0.6047033677], + [0.2601504211,0.910106147,-0.3225345868], + [-0.5460453892,0.4141607847,0.7282206241] + ], + "t": [ + [-38.00771612], + [61.10094736], + [329.1235579] + ] + }, + { + "name": "15_03", + "type": "vga", + "resolution": [640,480], + "panel": 15, + "node": 3, + "K": [ + [746.709,0,382.284], + [0,746.792,243.451], + [0,0,1] + ], + "distCoef": [-0.343209,0.149416,0.000603517,0.00195788,-0.0395936], + "R": [ + [0.7773715491,0.01124156294,0.6289412548], + [0.2547080739,0.908583342,-0.3310590698], + [-0.5751671686,0.4175523175,0.7034435232] + ], + "t": [ + [-3.435783379], + [55.70511308], + [330.3798829] + ] + }, + { + "name": "15_04", + "type": "vga", + "resolution": [640,480], + "panel": 15, + "node": 4, + "K": [ + [743.976,0,365.248], + [0,744.344,229.757], + [0,0,1] + ], + "distCoef": [-0.297483,-0.106842,0.000162294,-0.00147347,0.393874], + "R": [ + [0.7524447247,-0.05297584633,0.6565215122], + [0.2825071426,0.9263759092,-0.2490329079], + [-0.5949929838,0.3728555143,0.7120127209] + ], + "t": [ + [9.049706825], + [87.26745214], + [326.8342451] + ] + }, + { + "name": "15_05", + "type": "vga", + "resolution": [640,480], + "panel": 15, + "node": 5, + "K": [ + [748.766,0,349.367], + [0,748.975,233.229], + [0,0,1] + ], + "distCoef": [-0.341466,0.149186,0.00133441,-0.000377568,-0.0615035], + "R": [ + [0.7609990379,-0.1304343502,0.6355055818], + [0.3323849453,0.9196335935,-0.2092708816], + [-0.5571361704,0.3704874276,0.7431946943] + ], + "t": [ + [9.029843232], + [83.469382], + [327.9910328] + ] + }, + { + "name": "15_06", + "type": "vga", + "resolution": [640,480], + "panel": 15, + "node": 6, + "K": [ + [747.104,0,395.739], + [0,747.205,237.611], + [0,0,1] + ], + "distCoef": [-0.337038,0.14046,-0.00100634,0.00170735,-0.0468264], + "R": [ + [0.7339738121,-0.1238803965,0.6677844641], + [0.3595276943,0.9050347286,-0.227270713], + [-0.5762137452,0.4068977603,0.7088102232] + ], + "t": [ + [34.88470946], + [89.42074723], + [330.2467181] + ] + }, + { + "name": "15_07", + "type": "vga", + "resolution": [640,480], + "panel": 15, + "node": 7, + "K": [ + [743.991,0,393.18], + [0,744.112,255.459], + [0,0,1] + ], + "distCoef": [-0.325283,0.0732539,0.00077889,1.70805e-05,0.0462558], + "R": [ + [0.7496842409,-0.1571943749,0.6428557128], + [0.3434403747,0.9227495198,-0.1748771933], + [-0.5657050892,0.3518852828,0.7457576683] + ], + "t": [ + [12.35233863], + [128.2674639], + [324.6313017] + ] + }, + { + "name": "15_08", + "type": "vga", + "resolution": [640,480], + "panel": 15, + "node": 8, + "K": [ + [744.616,0,369.102], + [0,744.835,223.742], + [0,0,1] + ], + "distCoef": [-0.336732,0.141968,-0.000206183,0.000677154,-0.0657397], + "R": [ + [0.7264947252,-0.2131742795,0.6532703428], + [0.4249899792,0.8864309285,-0.1833677358], + [-0.5399897516,0.4108490422,0.7345843265] + ], + "t": [ + [15.28675757], + [126.0458703], + [333.4285141] + ] + }, + { + "name": "15_09", + "type": "vga", + "resolution": [640,480], + "panel": 15, + "node": 9, + "K": [ + [747.517,0,392.733], + [0,747.836,218.574], + [0,0,1] + ], + "distCoef": [-0.334626,0.113242,0.000443349,0.00121381,-0.00550976], + "R": [ + [0.8000319441,0.07155257429,0.5956753458], + [0.1937456116,0.9088549369,-0.3693850858], + [-0.5678129326,0.4109293525,0.7132499848] + ], + "t": [ + [-44.09712116], + [90.97242653], + [330.2186197] + ] + }, + { + "name": "15_10", + "type": "vga", + "resolution": [640,480], + "panel": 15, + "node": 10, + "K": [ + [743.904,0,354.135], + [0,744.494,220.038], + [0,0,1] + ], + "distCoef": [-0.309276,-0.0261099,-0.00127318,0.000283377,0.220693], + "R": [ + [0.7314656006,-0.1499734814,0.6651812009], + [0.3639090401,0.9108337109,-0.1948131455], + [-0.576652656,0.3845645668,0.720820233] + ], + "t": [ + [2.360923884], + [158.0207055], + [327.7017732] + ] + }, + { + "name": "15_11", + "type": "vga", + "resolution": [640,480], + "panel": 15, + "node": 11, + "K": [ + [745.441,0,366.024], + [0,745.471,238.165], + [0,0,1] + ], + "distCoef": [-0.311636,0.00305556,-0.00136926,0.00112458,0.163822], + "R": [ + [0.743215427,-0.1065195831,0.660518287], + [0.3430146167,0.9082888556,-0.2394834597], + [-0.5744317207,0.4045552288,0.7115920636] + ], + "t": [ + [3.38448511], + [170.5922255], + [331.2143489] + ] + }, + { + "name": "15_12", + "type": "vga", + "resolution": [640,480], + "panel": 15, + "node": 12, + "K": [ + [743.816,0,384.478], + [0,744.21,221.813], + [0,0,1] + ], + "distCoef": [-0.309294,-0.0116228,-0.000777235,0.00017565,0.174372], + "R": [ + [0.799529392,-0.03302696284,0.5997182431], + [0.261290645,0.91817945,-0.2977812898], + [-0.540814155,0.3947856601,0.7427410938] + ], + "t": [ + [-15.11731065], + [179.1857595], + [329.2699106] + ] + }, + { + "name": "15_13", + "type": "vga", + "resolution": [640,480], + "panel": 15, + "node": 13, + "K": [ + [744.594,0,366.809], + [0,744.805,211.378], + [0,0,1] + ], + "distCoef": [-0.313339,0.0076854,-0.000770441,0.000328229,0.137582], + "R": [ + [0.7697001229,-0.07364256128,0.6341439064], + [0.280866324,0.9310898592,-0.2327783971], + [-0.5733025631,0.3572792288,0.7373436945] + ], + "t": [ + [-27.06753178], + [173.6081799], + [322.2797536] + ] + }, + { + "name": "15_14", + "type": "vga", + "resolution": [640,480], + "panel": 15, + "node": 14, + "K": [ + [744.088,0,376.311], + [0,744.421,235.85], + [0,0,1] + ], + "distCoef": [-0.308902,-0.0157485,-0.000258056,-0.00040893,0.167363], + "R": [ + [0.8019727226,0.02030217439,0.5970155559], + [0.20788107,0.9274680659,-0.31078682], + [-0.5600225111,0.3733507848,0.7395836522] + ], + "t": [ + [-32.35663304], + [177.8511702], + [324.3990212] + ] + }, + { + "name": "15_15", + "type": "vga", + "resolution": [640,480], + "panel": 15, + "node": 15, + "K": [ + [745.471,0,391.786], + [0,745.597,244.782], + [0,0,1] + ], + "distCoef": [-0.319471,0.0520955,-9.03549e-05,0.00103599,0.0679082], + "R": [ + [0.7993824794,0.07801580494,0.5957358356], + [0.170767806,0.9211391478,-0.3497728217], + [-0.5760434082,0.3813347671,0.723019908] + ], + "t": [ + [-27.66881494], + [158.8808021], + [326.8395357] + ] + }, + { + "name": "15_16", + "type": "vga", + "resolution": [640,480], + "panel": 15, + "node": 16, + "K": [ + [744.688,0,372.572], + [0,744.687,232.622], + [0,0,1] + ], + "distCoef": [-0.313079,0.00611683,0.000601543,0.00134427,0.153664], + "R": [ + [0.8032635264,0.07397377164,0.5910123419], + [0.1542914416,0.9325457224,-0.3264239985], + [-0.5752928456,0.3533926383,0.7376664456] + ], + "t": [ + [-29.95169554], + [148.2901373], + [322.192073] + ] + }, + { + "name": "15_17", + "type": "vga", + "resolution": [640,480], + "panel": 15, + "node": 17, + "K": [ + [746.029,0,371.631], + [0,745.957,227.751], + [0,0,1] + ], + "distCoef": [-0.328618,0.10871,0.000376647,0.00140085,-0.015131], + "R": [ + [0.7930332571,0.09578045983,0.6016014933], + [0.1573865304,0.9218193412,-0.3542295616], + [-0.5884961625,0.3755997947,0.7159588403] + ], + "t": [ + [-34.37744536], + [124.5681533], + [326.9926029] + ] + }, + { + "name": "15_18", + "type": "vga", + "resolution": [640,480], + "panel": 15, + "node": 18, + "K": [ + [745.728,0,355.008], + [0,745.836,235.366], + [0,0,1] + ], + "distCoef": [-0.326785,0.0753795,-0.00141997,0.000421746,0.0593081], + "R": [ + [0.7423074724,-0.1183757606,0.6595201254], + [0.3246236378,0.9245812728,-0.1994215728], + [-0.5861732766,0.362127946,0.7247511576] + ], + "t": [ + [30.16113415], + [163.1800117], + [323.8887405] + ] + }, + { + "name": "15_19", + "type": "vga", + "resolution": [640,480], + "panel": 15, + "node": 19, + "K": [ + [745.415,0,362.511], + [0,745.431,246.567], + [0,0,1] + ], + "distCoef": [-0.31824,0.0392935,0.000511921,2.0382e-05,0.0980721], + "R": [ + [0.7792023734,-0.03485918818,0.6258022837], + [0.250771695,0.9323920084,-0.2603050127], + [-0.5744190268,0.3597637832,0.7352637636] + ], + "t": [ + [-23.21577405], + [116.3982595], + [324.3931588] + ] + }, + { + "name": "15_20", + "type": "vga", + "resolution": [640,480], + "panel": 15, + "node": 20, + "K": [ + [745.757,0,370.457], + [0,745.798,252.296], + [0,0,1] + ], + "distCoef": [-0.322058,0.058259,0.000816175,0.000770211,0.0698692], + "R": [ + [0.7754488131,-0.03297117701,0.6305489986], + [0.2704225106,0.9197540051,-0.2844718542], + [-0.5705705951,0.391108005,0.7221383001] + ], + "t": [ + [-0.5150360293], + [101.3336776], + [328.6175717] + ] + }, + { + "name": "15_21", + "type": "vga", + "resolution": [640,480], + "panel": 15, + "node": 21, + "K": [ + [746.009,0,385.23], + [0,746.113,244.377], + [0,0,1] + ], + "distCoef": [-0.328614,0.0717398,0.00119782,0.000153035,0.0631847], + "R": [ + [0.7150247804,-0.1629175474,0.6798510396], + [0.3900461789,0.9000077369,-0.194550898], + [-0.5801754405,0.4042820134,0.7070732013] + ], + "t": [ + [2.095653738], + [113.9962742], + [330.0144097] + ] + }, + { + "name": "15_22", + "type": "vga", + "resolution": [640,480], + "panel": 15, + "node": 22, + "K": [ + [747.044,0,384.928], + [0,747.43,218.136], + [0,0,1] + ], + "distCoef": [-0.332061,0.0970763,-0.00131827,0.000796644,0.024739], + "R": [ + [0.7476996574,-0.1120966581,0.6545071135], + [0.3349363173,0.9147459603,-0.2259590484], + [-0.5733784838,0.3881677053,0.7215004829] + ], + "t": [ + [-3.202807266], + [138.4357179], + [328.3283502] + ] + }, + { + "name": "15_23", + "type": "vga", + "resolution": [640,480], + "panel": 15, + "node": 23, + "K": [ + [746.525,0,381.586], + [0,746.566,231.744], + [0,0,1] + ], + "distCoef": [-0.323751,0.0809499,0.00143311,0.000786746,0.0334271], + "R": [ + [0.7874675535,-0.04961201835,0.6143561669], + [0.2785108695,0.9178324582,-0.2828697124], + [-0.5498422936,0.3938555906,0.7365807667] + ], + "t": [ + [-21.67007007], + [141.1281207], + [328.549187] + ] + }, + { + "name": "15_24", + "type": "vga", + "resolution": [640,480], + "panel": 15, + "node": 24, + "K": [ + [744.493,0,392.291], + [0,744.573,223.193], + [0,0,1] + ], + "distCoef": [-0.308278,-0.0176562,-0.000671893,0.00116828,0.17277], + "R": [ + [0.7758686755,-0.01407586642,0.6307374005], + [0.2927445364,0.8936390769,-0.3401614861], + [-0.5588635207,0.4485655695,0.6974672] + ], + "t": [ + [-20.05926183], + [105.1778582], + [335.8474538] + ] + }, + { + "name": "16_01", + "type": "vga", + "resolution": [640,480], + "panel": 16, + "node": 1, + "K": [ + [745.918,0,380.409], + [0,745.86,226.454], + [0,0,1] + ], + "distCoef": [-0.329171,0.0901569,-0.000500393,-0.000311386,0.0200307], + "R": [ + [0.8121486446,0.04341076946,0.5818333819], + [-0.0759194996,0.9966126489,0.03161419974], + [-0.5784901112,-0.06984792866,0.8126933358] + ], + "t": [ + [55.6088262], + [125.3657692], + [265.9940479] + ] + }, + { + "name": "16_02", + "type": "vga", + "resolution": [640,480], + "panel": 16, + "node": 2, + "K": [ + [747.364,0,392.411], + [0,747.161,225.523], + [0,0,1] + ], + "distCoef": [-0.325367,0.0819479,0.000479765,0.00158774,0.0247525], + "R": [ + [0.8168932447,0.07701494166,0.5716241121], + [-0.08391193553,0.9963702084,-0.01432462351], + [-0.5706524458,-0.03626439747,0.8203905653] + ], + "t": [ + [75.42528996], + [124.1426197], + [270.1790967] + ] + }, + { + "name": "16_03", + "type": "vga", + "resolution": [640,480], + "panel": 16, + "node": 3, + "K": [ + [744.743,0,378.771], + [0,744.551,249.858], + [0,0,1] + ], + "distCoef": [-0.319546,0.0369202,-5.08119e-05,0.00111176,0.115068], + "R": [ + [0.8437113062,0.07102371173,0.5320778742], + [-0.08587784221,0.9963005803,0.003185889303], + [-0.5298832211,-0.04838167055,0.8466894271] + ], + "t": [ + [57.15960424], + [150.0301024], + [271.4615922] + ] + }, + { + "name": "16_04", + "type": "vga", + "resolution": [640,480], + "panel": 16, + "node": 4, + "K": [ + [745.916,0,377.522], + [0,746.078,215.704], + [0,0,1] + ], + "distCoef": [-0.32195,0.0590592,-0.000295617,0.000900619,0.0691531], + "R": [ + [0.8298382679,0.121110683,0.5447023514], + [-0.1306769278,0.9911961099,-0.02130286834], + [-0.5424868568,-0.05350209448,0.8383588349] + ], + "t": [ + [50.00635036], + [157.1807453], + [269.6015294] + ] + }, + { + "name": "16_05", + "type": "vga", + "resolution": [640,480], + "panel": 16, + "node": 5, + "K": [ + [745.303,0,378.655], + [0,745.572,246.962], + [0,0,1] + ], + "distCoef": [-0.315703,0.0277156,6.06815e-05,0.000389915,0.121683], + "R": [ + [0.8187116226,0.05412921644,0.5716478872], + [-0.09011941267,0.9953220251,0.0348218015], + [-0.5670888559,-0.08002558546,0.8197598034] + ], + "t": [ + [44.81120287], + [188.347539], + [263.8787228] + ] + }, + { + "name": "16_06", + "type": "vga", + "resolution": [640,480], + "panel": 16, + "node": 6, + "K": [ + [745.606,0,364.995], + [0,745.957,239.275], + [0,0,1] + ], + "distCoef": [-0.315328,0.0257972,-0.000148911,-0.000553771,0.11289], + "R": [ + [0.8250072615,0.03741598225,0.5638821355], + [-0.06134414867,0.997839028,0.02354080738], + [-0.5617827996,-0.05401220659,0.8255196955] + ], + "t": [ + [18.96573731], + [189.9536973], + [269.3804852] + ] + }, + { + "name": "16_07", + "type": "vga", + "resolution": [640,480], + "panel": 16, + "node": 7, + "K": [ + [748.144,0,375.351], + [0,748.158,222.981], + [0,0,1] + ], + "distCoef": [-0.330846,0.0923667,0.000924419,-0.000952259,0.0155541], + "R": [ + [0.837010476,0.04764620621,0.5451085232], + [-0.06946161724,0.9973944363,0.0194787641], + [-0.542760119,-0.05416804921,0.8381391744] + ], + "t": [ + [-3.044263505], + [177.2440129], + [269.3681033] + ] + }, + { + "name": "16_08", + "type": "vga", + "resolution": [640,480], + "panel": 16, + "node": 8, + "K": [ + [744.865,0,367.243], + [0,744.958,216.687], + [0,0,1] + ], + "distCoef": [-0.318901,0.0494498,-4.02299e-05,-0.00132469,0.0675277], + "R": [ + [0.820488273,0.02086231711,0.571282555], + [-0.05401864215,0.9976917237,0.04114864192], + [-0.569105421,-0.06462188605,0.8197213134] + ], + "t": [ + [-19.55260409], + [185.7078501], + [268.0867658] + ] + }, + { + "name": "16_09", + "type": "vga", + "resolution": [640,480], + "panel": 16, + "node": 9, + "K": [ + [747.002,0,387.115], + [0,747.11,221.005], + [0,0,1] + ], + "distCoef": [-0.330535,0.106093,-0.000909516,-0.000158007,-0.000767667], + "R": [ + [0.7988895638,0.03324884852,0.6005580562], + [-0.04929092881,0.9987315997,0.01027599727], + [-0.5994546431,-0.03781145137,0.7995151187] + ], + "t": [ + [-23.46737596], + [164.4653247], + [274.3468777] + ] + }, + { + "name": "16_10", + "type": "vga", + "resolution": [640,480], + "panel": 16, + "node": 10, + "K": [ + [747.13,0,370.332], + [0,747.181,215.13], + [0,0,1] + ], + "distCoef": [-0.317083,0.0321021,0.000973109,0.00011315,0.117938], + "R": [ + [0.8533830718,-0.04475694932,0.5193593633], + [-0.01101437775,0.9945367161,0.1038046423], + [-0.5211679348,-0.09430554471,0.8482278279] + ], + "t": [ + [-57.15311463], + [154.6074069], + [261.7210039] + ] + }, + { + "name": "16_11", + "type": "vga", + "resolution": [640,480], + "panel": 16, + "node": 11, + "K": [ + [743.847,0,352.444], + [0,743.813,257.427], + [0,0,1] + ], + "distCoef": [-0.317406,0.0378558,0.000559662,0.00156409,0.0978841], + "R": [ + [0.8306368039,-0.006305585156,0.5567788965], + [-0.01286906876,0.999451376,0.03051776569], + [-0.5566658666,-0.03251440526,0.8300999496] + ], + "t": [ + [-55.68789985], + [125.5954887], + [272.609285] + ] + }, + { + "name": "16_12", + "type": "vga", + "resolution": [640,480], + "panel": 16, + "node": 12, + "K": [ + [744.746,0,358.295], + [0,744.902,240.075], + [0,0,1] + ], + "distCoef": [-0.311924,0.00313238,0.000282789,0.000109914,0.161883], + "R": [ + [0.8248636519,0.04296544146,0.5636966618], + [-0.06337887364,0.9978500361,0.01668603434], + [-0.5617678116,-0.04949016272,0.8258133262] + ], + "t": [ + [-45.5470475], + [111.3455785], + [270.6081331] + ] + }, + { + "name": "16_13", + "type": "vga", + "resolution": [640,480], + "panel": 16, + "node": 13, + "K": [ + [742.599,0,373.118], + [0,742.696,232.489], + [0,0,1] + ], + "distCoef": [-0.30659,-0.0244311,-0.000674534,-0.000450328,0.198624], + "R": [ + [0.8431633834,0.1596479738,0.5134082522], + [-0.1755645793,0.9843078819,-0.01775026834], + [-0.5081855837,-0.07516992751,0.8579608934] + ], + "t": [ + [-27.27822308], + [119.4613899], + [265.3318331] + ] + }, + { + "name": "16_14", + "type": "vga", + "resolution": [640,480], + "panel": 16, + "node": 14, + "K": [ + [745.804,0,370.921], + [0,745.998,236.13], + [0,0,1] + ], + "distCoef": [-0.32821,0.0986121,-0.000141995,-6.949e-05,-0.000912797], + "R": [ + [0.8387309717,0.02755081107,0.5438486094], + [-0.05712815546,0.9976599438,0.03756341813], + [-0.5415410705,-0.06257467009,0.8383422211] + ], + "t": [ + [-30.56519475], + [90.10611059], + [268.3571691] + ] + }, + { + "name": "16_15", + "type": "vga", + "resolution": [640,480], + "panel": 16, + "node": 15, + "K": [ + [746.816,0,365.456], + [0,746.849,225.794], + [0,0,1] + ], + "distCoef": [-0.313831,-0.00769663,-0.000408313,0.00132145,0.204366], + "R": [ + [0.832563643,0.03033638007,0.5530980784], + [-0.06055031945,0.9974999941,0.03643378343], + [-0.5506100609,-0.06382370879,0.8323191065] + ], + "t": [ + [-6.42740827], + [88.69840867], + [268.7038743] + ] + }, + { + "name": "16_16", + "type": "vga", + "resolution": [640,480], + "panel": 16, + "node": 16, + "K": [ + [745.958,0,362.302], + [0,745.997,246.977], + [0,0,1] + ], + "distCoef": [-0.334292,0.102923,-0.000499879,-0.000549652,0.00793805], + "R": [ + [0.8469636173,0.04048111503,0.5301074517], + [-0.08872767491,0.9938758,0.0658657255], + [-0.5241946497,-0.1028210748,0.8453684379] + ], + "t": [ + [4.584618298], + [109.8657875], + [264.6056558] + ] + }, + { + "name": "16_17", + "type": "vga", + "resolution": [640,480], + "panel": 16, + "node": 17, + "K": [ + [743.409,0,347.233], + [0,743.501,244.449], + [0,0,1] + ], + "distCoef": [-0.321337,0.060438,0.000289347,-0.000274585,0.0540146], + "R": [ + [0.8338949711,0.06176137043,0.5484566622], + [-0.07967791451,0.9967809419,0.008898524832], + [-0.5461415633,-0.05112031815,0.8361316319] + ], + "t": [ + [32.73506114], + [91.25662398], + [270.2531272] + ] + }, + { + "name": "16_18", + "type": "vga", + "resolution": [640,480], + "panel": 16, + "node": 18, + "K": [ + [745.291,0,372.769], + [0,745.233,242.994], + [0,0,1] + ], + "distCoef": [-0.333422,0.127228,0.000470045,-0.000171948,-0.0533425], + "R": [ + [0.83476387,0.01583088955,0.5503804723], + [-0.006383142992,0.9997976531,-0.01907638369], + [-0.5505711006,0.01241111862,0.8346960089] + ], + "t": [ + [48.20146308], + [84.31846371], + [276.1979749] + ] + }, + { + "name": "16_19", + "type": "vga", + "resolution": [640,480], + "panel": 16, + "node": 19, + "K": [ + [746.318,0,365.802], + [0,746.439,228.058], + [0,0,1] + ], + "distCoef": [-0.329752,0.106043,0.000413141,0.00102356,-0.00232913], + "R": [ + [0.812564017,0.08482803737,0.576666214], + [-0.09768913876,0.9951785947,-0.008740529432], + [-0.5746273144,-0.04923178609,0.8169330944] + ], + "t": [ + [39.50134988], + [124.7306793], + [269.4016435] + ] + }, + { + "name": "16_20", + "type": "vga", + "resolution": [640,480], + "panel": 16, + "node": 20, + "K": [ + [745.104,0,371.377], + [0,745.158,252.192], + [0,0,1] + ], + "distCoef": [-0.317414,0.0233642,0.000269725,0.000539732,0.145301], + "R": [ + [0.8445515108,0.05428741136,0.5327153297], + [-0.06949119822,0.9975462456,0.00851241329], + [-0.5309460603,-0.04420819807,0.8462516862] + ], + "t": [ + [17.33430135], + [146.0606392], + [271.3134014] + ] + }, + { + "name": "16_21", + "type": "vga", + "resolution": [640,480], + "panel": 16, + "node": 21, + "K": [ + [744.321,0,365.126], + [0,744.44,221.253], + [0,0,1] + ], + "distCoef": [-0.310945,0.00293318,4.64093e-05,-0.000454281,0.146346], + "R": [ + [0.8382052649,0.09941648006,0.5362166515], + [-0.1229674254,0.9923765769,0.008230548616], + [-0.531310593,-0.07283607028,0.8440402601] + ], + "t": [ + [5.636303812], + [160.8368098], + [266.310691] + ] + }, + { + "name": "16_22", + "type": "vga", + "resolution": [640,480], + "panel": 16, + "node": 22, + "K": [ + [745.695,0,387.973], + [0,745.975,222.039], + [0,0,1] + ], + "distCoef": [-0.325844,0.0780224,-0.000861123,0.000487347,0.0459906], + "R": [ + [0.8503320636,-0.003175777979,0.52623692], + [-0.02504000004,0.9986049625,0.04648792516], + [-0.5256504352,-0.05270714583,0.8490662971] + ], + "t": [ + [-29.03965018], + [141.2975723], + [268.9897195] + ] + }, + { + "name": "16_23", + "type": "vga", + "resolution": [640,480], + "panel": 16, + "node": 23, + "K": [ + [746.757,0,385.384], + [0,746.697,250.739], + [0,0,1] + ], + "distCoef": [-0.330103,0.0993513,0.000581277,0.0005991,0.0043047], + "R": [ + [0.8172674448,0.1129970073,0.565071323], + [-0.1204798393,0.992420693,-0.02420281713], + [-0.5635233199,-0.0482995277,0.8246869852] + ], + "t": [ + [1.484048414], + [120.2737991], + [270.3939501] + ] + }, + { + "name": "16_24", + "type": "vga", + "resolution": [640,480], + "panel": 16, + "node": 24, + "K": [ + [743.909,0,365.262], + [0,744.1,225.983], + [0,0,1] + ], + "distCoef": [-0.309366,-0.0151251,-0.000569796,0.000128233,0.192772], + "R": [ + [0.8488529257,0.0258708029,0.5279956553], + [-0.02681353424,0.9996232069,-0.005871843729], + [-0.5279486195,-0.009173097852,0.8492267715] + ], + "t": [ + [-1.170097817], + [104.9858918], + [274.723166] + ] + }, + { + "name": "17_01", + "type": "vga", + "resolution": [640,480], + "panel": 17, + "node": 1, + "K": [ + [743.511,0,382.741], + [0,744.07,233.668], + [0,0,1] + ], + "distCoef": [-0.303608,-0.0460126,4.19904e-05,0.000729649,0.232264], + "R": [ + [0.7426987355,0.03664601822,-0.6686222084], + [-0.01756201576,0.9992239229,0.035258014], + [0.6693953719,-0.01444372865,0.742765922] + ], + "t": [ + [27.30884403], + [110.2809812], + [269.7471778] + ] + }, + { + "name": "17_02", + "type": "vga", + "resolution": [640,480], + "panel": 17, + "node": 2, + "K": [ + [744.491,0,371.868], + [0,744.58,223.545], + [0,0,1] + ], + "distCoef": [-0.320104,0.0388113,-0.000303412,-0.00118762,0.0743207], + "R": [ + [0.773334615,0.1038173874,-0.6254402635], + [-0.04654036662,0.9931361468,0.107306049], + [0.6322875671,-0.05387526291,0.7728582591] + ], + "t": [ + [68.17402308], + [125.7906344], + [263.8293382] + ] + }, + { + "name": "17_03", + "type": "vga", + "resolution": [640,480], + "panel": 17, + "node": 3, + "K": [ + [744.096,0,373.775], + [0,744.072,232.317], + [0,0,1] + ], + "distCoef": [-0.314223,0.0332024,-0.000194112,2.11963e-05,0.079313], + "R": [ + [0.7946878724,-0.02084896757,-0.6066601239], + [0.03470365887,0.999335828,0.01111570764], + [0.6060254462,-0.02988684405,0.7948835985] + ], + "t": [ + [55.17367606], + [148.0232969], + [266.1261169] + ] + }, + { + "name": "17_04", + "type": "vga", + "resolution": [640,480], + "panel": 17, + "node": 4, + "K": [ + [748.225,0,373.118], + [0,748.618,236.287], + [0,0,1] + ], + "distCoef": [-0.325852,0.0883394,-0.000431944,-0.00077703,0.0075009], + "R": [ + [0.7874797118,0.07165214706,-0.6121614766], + [-0.03177741847,0.9966185482,0.07577377574], + [0.6155208357,-0.04021739967,0.7870938073] + ], + "t": [ + [46.04066644], + [153.679907], + [265.8341529] + ] + }, + { + "name": "17_05", + "type": "vga", + "resolution": [640,480], + "panel": 17, + "node": 5, + "K": [ + [745.23,0,378.585], + [0,745.614,229.474], + [0,0,1] + ], + "distCoef": [-0.323397,0.071697,-0.000659822,0.000678056,0.0530686], + "R": [ + [0.7680042357,0.04160049173,-0.6390922414], + [0.01355248597,0.9966090615,0.08115854064], + [0.6403013541,-0.07099139161,0.7648361904] + ], + "t": [ + [29.31016003], + [185.453895], + [261.9380867] + ] + }, + { + "name": "17_06", + "type": "vga", + "resolution": [640,480], + "panel": 17, + "node": 6, + "K": [ + [742.876,0,352.101], + [0,743.303,231.794], + [0,0,1] + ], + "distCoef": [-0.319343,0.0421325,-0.000546468,-1.33187e-05,0.10149], + "R": [ + [0.8064347587,0.08751734637,-0.584810819], + [-0.03388642915,0.9942014648,0.1020546777], + [0.5903513275,-0.062483289,0.8047242688] + ], + "t": [ + [35.39857301], + [188.6248332], + [262.8234665] + ] + }, + { + "name": "17_07", + "type": "vga", + "resolution": [640,480], + "panel": 17, + "node": 7, + "K": [ + [745.054,0,358.779], + [0,745.36,231.687], + [0,0,1] + ], + "distCoef": [-0.309912,-0.00132311,-0.00013553,-0.000280643,0.151777], + "R": [ + [0.7882500993,-0.004275732235,-0.615340149], + [0.05540043824,0.996408109,0.06404429605], + [0.612856078,-0.08457303664,0.7856556683] + ], + "t": [ + [-7.246792888], + [183.4614511], + [259.402568] + ] + }, + { + "name": "17_08", + "type": "vga", + "resolution": [640,480], + "panel": 17, + "node": 8, + "K": [ + [745.254,0,343.02], + [0,745.689,227.622], + [0,0,1] + ], + "distCoef": [-0.309897,-0.0109758,-0.00111103,0.000256129,0.180098], + "R": [ + [0.7946287881,0.03514926038,-0.6060772382], + [0.01090423253,0.9973351466,0.07213669658], + [0.6069976827,-0.06393070292,0.7921279432] + ], + "t": [ + [-18.41109561], + [184.5517176], + [263.9542066] + ] + }, + { + "name": "17_09", + "type": "vga", + "resolution": [640,480], + "panel": 17, + "node": 9, + "K": [ + [745.379,0,338.137], + [0,745.543,245.392], + [0,0,1] + ], + "distCoef": [-0.314138,0.0142784,0.00088856,-0.00114362,0.123117], + "R": [ + [0.7570044814,0.09852948519,-0.6459381981], + [-0.05745310106,0.9947735679,0.08440787789], + [0.6508789107,-0.02678598925,0.7587088733] + ], + "t": [ + [-40.16389387], + [164.132571], + [267.7674295] + ] + }, + { + "name": "17_10", + "type": "vga", + "resolution": [640,480], + "panel": 17, + "node": 10, + "K": [ + [743.633,0,369.381], + [0,743.739,253.863], + [0,0,1] + ], + "distCoef": [-0.313678,0.00191444,-0.000367883,0.000526793,0.16208], + "R": [ + [0.7732990879,0.03177464522,-0.6332447335], + [0.01440724919,0.9976050167,0.06765102948], + [0.6338777104,-0.06143779407,0.7709892643] + ], + "t": [ + [-41.17430449], + [148.5957101], + [262.973747] + ] + }, + { + "name": "17_11", + "type": "vga", + "resolution": [640,480], + "panel": 17, + "node": 11, + "K": [ + [749.691,0,360.347], + [0,749.465,221.979], + [0,0,1] + ], + "distCoef": [-0.36212,0.288042,0.00167589,0.000680745,-0.303613], + "R": [ + [0.7747984815,0.06051645956,-0.629305229], + [-0.01350572868,0.9967652932,0.07922465313], + [0.6320640066,-0.05288391526,0.7731095544] + ], + "t": [ + [-52.93053536], + [133.9502209], + [264.0833713] + ] + }, + { + "name": "17_12", + "type": "vga", + "resolution": [640,480], + "panel": 17, + "node": 12, + "K": [ + [746.505,0,357.704], + [0,746.569,217.534], + [0,0,1] + ], + "distCoef": [-0.312272,-0.0352904,0.000404412,-0.00107082,0.237629], + "R": [ + [0.7725304823,-0.04233401582,-0.633564902], + [0.05994143841,0.9981814314,0.006391704783], + [0.6321421342,-0.04291457833,0.7736631445] + ], + "t": [ + [-62.64410987], + [104.0188122], + [265.010728] + ] + }, + { + "name": "17_13", + "type": "vga", + "resolution": [640,480], + "panel": 17, + "node": 13, + "K": [ + [745.264,0,354.32], + [0,745.302,226.261], + [0,0,1] + ], + "distCoef": [-0.318398,0.0346929,0.000845692,0.000532231,0.122684], + "R": [ + [0.7851484689,0.03204817868,-0.6184778056], + [-0.002225165301,0.9987996914,0.04893081946], + [0.619303585,-0.03704174263,0.784277361] + ], + "t": [ + [-29.19489341], + [103.2650402], + [265.9795804] + ] + }, + { + "name": "17_14", + "type": "vga", + "resolution": [640,480], + "panel": 17, + "node": 14, + "K": [ + [744.589,0,353.058], + [0,744.664,227.639], + [0,0,1] + ], + "distCoef": [-0.324606,0.0822873,0.00100728,-0.000415736,0.0203245], + "R": [ + [0.7765409088,-0.02900211747,-0.6293989944], + [0.06862390156,0.9968904955,0.03873112579], + [0.6263185908,-0.07326811825,0.7761164898] + ], + "t": [ + [-35.65491372], + [89.93385082], + [261.6973052] + ] + }, + { + "name": "17_15", + "type": "vga", + "resolution": [640,480], + "panel": 17, + "node": 15, + "K": [ + [744.009,0,351.118], + [0,743.982,227.187], + [0,0,1] + ], + "distCoef": [-0.31768,0.0289626,0.000394183,-0.00106594,0.077624], + "R": [ + [0.7703409519,0.009578036972,-0.6375602553], + [0.03762675731,0.9974619202,0.06044786963], + [0.6365210484,-0.07055479443,0.7680253746] + ], + "t": [ + [-14.94306331], + [88.85755459], + [261.4804843] + ] + }, + { + "name": "17_16", + "type": "vga", + "resolution": [640,480], + "panel": 17, + "node": 16, + "K": [ + [745.298,0,365.044], + [0,745.641,201.543], + [0,0,1] + ], + "distCoef": [-0.315769,0.0139989,-0.000983596,0.000497246,0.155532], + "R": [ + [0.7668905855,0.04755147693,-0.6400138177], + [0.009922268647,0.9962536216,0.0859084976], + [0.6417011597,-0.07223280706,0.7635457047] + ], + "t": [ + [4.594602528], + [99.8882812], + [261.439958] + ] + }, + { + "name": "17_17", + "type": "vga", + "resolution": [640,480], + "panel": 17, + "node": 17, + "K": [ + [744.772,0,356.238], + [0,744.946,209.811], + [0,0,1] + ], + "distCoef": [-0.307562,-0.0273551,-0.000331097,0.000403566,0.231396], + "R": [ + [0.7386328767,0.1026186384,-0.6662513704], + [-0.03586762178,0.992927984,0.1131703685], + [0.6731530192,-0.05969450264,0.7370899397] + ], + "t": [ + [18.92063539], + [92.1220326], + [263.1909682] + ] + }, + { + "name": "17_18", + "type": "vga", + "resolution": [640,480], + "panel": 17, + "node": 18, + "K": [ + [746.696,0,345.664], + [0,746.883,230.9], + [0,0,1] + ], + "distCoef": [-0.332087,0.135716,-0.000396371,4.15402e-05,-0.0769473], + "R": [ + [0.7676740293,0.0869303765,-0.6349170767], + [-0.05592901251,0.9960646798,0.06875390322], + [0.6383952774,-0.01727030079,0.7695149163] + ], + "t": [ + [48.13164066], + [87.731429], + [267.0873794] + ] + }, + { + "name": "17_19", + "type": "vga", + "resolution": [640,480], + "panel": 17, + "node": 19, + "K": [ + [743.785,0,363.137], + [0,743.962,239.724], + [0,0,1] + ], + "distCoef": [-0.322076,0.0699752,0.00130957,8.28091e-06,0.0447641], + "R": [ + [0.7666015958,0.09362030423,-0.6352615462], + [-0.01827880108,0.9920950944,0.1241499457], + [0.6418628193,-0.08356172708,0.7622529495] + ], + "t": [ + [25.25313987], + [133.2656265], + [259.9680703] + ] + }, + { + "name": "17_20", + "type": "vga", + "resolution": [640,480], + "panel": 17, + "node": 20, + "K": [ + [747.071,0,344.427], + [0,747.404,242.981], + [0,0,1] + ], + "distCoef": [-0.349964,0.20917,0.0008789,-0.000586258,-0.211765], + "R": [ + [0.7775513873,0.03007697302,-0.6280996862], + [-0.01270805589,0.999403059,0.03212523871], + [0.6286909777,-0.01699709801,0.7774694548] + ], + "t": [ + [17.35278566], + [137.2956705], + [269.3773006] + ] + }, + { + "name": "17_21", + "type": "vga", + "resolution": [640,480], + "panel": 17, + "node": 21, + "K": [ + [744.669,0,371.314], + [0,744.881,251.475], + [0,0,1] + ], + "distCoef": [-0.32107,0.0528121,0.000172414,0.000961494,0.0921892], + "R": [ + [0.7854342878,0.01663631847,-0.6187214337], + [0.02446292583,0.9980232337,0.05788946549], + [0.6184614336,-0.06060410764,0.7834746947] + ], + "t": [ + [-1.039205356], + [155.8049723], + [263.425936] + ] + }, + { + "name": "17_22", + "type": "vga", + "resolution": [640,480], + "panel": 17, + "node": 22, + "K": [ + [744.126,0,368.359], + [0,744.205,218.365], + [0,0,1] + ], + "distCoef": [-0.306681,-0.0309893,-0.000506643,-0.000551257,0.209183], + "R": [ + [0.7742934088,0.08491898973,-0.6271032469], + [-0.02171436959,0.9939373135,0.1077826651], + [0.6324541115,-0.06983825553,0.771443073] + ], + "t": [ + [-12.48615074], + [146.2169272], + [261.8070617] + ] + }, + { + "name": "17_23", + "type": "vga", + "resolution": [640,480], + "panel": 17, + "node": 23, + "K": [ + [746.439,0,363.854], + [0,746.575,224.032], + [0,0,1] + ], + "distCoef": [-0.333494,0.127943,0.00111227,0.000376509,-0.0438307], + "R": [ + [0.7741360077,0.05745954338,-0.6304060933], + [-0.01777243196,0.9974520988,0.06909016755], + [0.6327697704,-0.04228133707,0.7731847814] + ], + "t": [ + [-14.18178238], + [117.4047924], + [265.0998909] + ] + }, + { + "name": "17_24", + "type": "vga", + "resolution": [640,480], + "panel": 17, + "node": 24, + "K": [ + [745.824,0,346.505], + [0,746.017,224.098], + [0,0,1] + ], + "distCoef": [-0.317434,0.0247137,-0.000866957,0.000304145,0.138958], + "R": [ + [0.7656627697,0.09930116127,-0.6355311184], + [-0.04982185052,0.99419918,0.09531932471], + [0.6413098365,-0.04131912178,0.7661686654] + ], + "t": [ + [7.35512715], + [111.8344509], + [265.0127015] + ] + }, + { + "name": "18_01", + "type": "vga", + "resolution": [640,480], + "panel": 18, + "node": 1, + "K": [ + [744.96,0,372.705], + [0,744.564,226.392], + [0,0,1] + ], + "distCoef": [-0.321978,0.0724692,0.000483988,0.000458946,0.0380169], + "R": [ + [-0.3520669355,0.03279886428,-0.9353999719], + [0.04913052402,0.9986556534,0.01652505738], + [0.9346844732,-0.04013876447,-0.3532050609] + ], + "t": [ + [47.10128491], + [117.3460549], + [266.6541908] + ] + }, + { + "name": "18_02", + "type": "vga", + "resolution": [640,480], + "panel": 18, + "node": 2, + "K": [ + [748.843,0,358.358], + [0,748.813,225.018], + [0,0,1] + ], + "distCoef": [-0.335266,0.148062,0.000634215,-0.00153008,-0.105518], + "R": [ + [-0.3389880085,0.04020239671,-0.9399313259], + [0.04795713663,0.9985260662,0.02541275744], + [0.9395675831,-0.03646179499,-0.3404163544] + ], + "t": [ + [70.51461434], + [125.984952], + [266.5287049] + ] + }, + { + "name": "18_03", + "type": "vga", + "resolution": [640,480], + "panel": 18, + "node": 3, + "K": [ + [746.557,0,370.525], + [0,746.643,239.094], + [0,0,1] + ], + "distCoef": [-0.336876,0.137869,0.0006954,0.000424607,-0.0538424], + "R": [ + [-0.3751735108,0.06869685522,-0.9244055273], + [0.01802710881,0.9976021763,0.06682006625], + [0.9267792942,0.008404759824,-0.3755123165] + ], + "t": [ + [58.58769651], + [133.6261971], + [275.7276294] + ] + }, + { + "name": "18_04", + "type": "vga", + "resolution": [640,480], + "panel": 18, + "node": 4, + "K": [ + [744.71,0,356.151], + [0,744.769,223.97], + [0,0,1] + ], + "distCoef": [-0.312604,0.00791514,0.000747313,-0.000519594,0.158336], + "R": [ + [-0.3438161676,0.01243889994,-0.9389545871], + [0.0251972518,0.9996744288,0.00401683712], + [0.9386988555,-0.02227802162,-0.344017657] + ], + "t": [ + [40.26546697], + [152.0702476], + [270.0686857] + ] + }, + { + "name": "18_05", + "type": "vga", + "resolution": [640,480], + "panel": 18, + "node": 5, + "K": [ + [743.927,0,355.392], + [0,744.057,262.153], + [0,0,1] + ], + "distCoef": [-0.316206,0.0381773,0.00109867,0.000112775,0.102099], + "R": [ + [-0.3913025917,0.04706716523,-0.9190576498], + [0.07535158968,0.9969764632,0.0189755056], + [0.9171719684,-0.0618272904,-0.3936660596] + ], + "t": [ + [27.50168157], + [183.5367771], + [265.1462318] + ] + }, + { + "name": "18_06", + "type": "vga", + "resolution": [640,480], + "panel": 18, + "node": 6, + "K": [ + [744.89,0,353.646], + [0,744.816,246.705], + [0,0,1] + ], + "distCoef": [-0.311434,-0.0151537,0.000898898,0.00113623,0.19919], + "R": [ + [-0.3540366423,0.02766248657,-0.9348223589], + [0.06855079724,0.9976412764,0.003559761167], + [0.9327158432,-0.06282253209,-0.3550978532] + ], + "t": [ + [15.12228299], + [191.0759947], + [263.959739] + ] + }, + { + "name": "18_07", + "type": "vga", + "resolution": [640,480], + "panel": 18, + "node": 7, + "K": [ + [744.21,0,382.066], + [0,744.474,221.564], + [0,0,1] + ], + "distCoef": [-0.318836,0.0439442,-0.000310088,0.000693195,0.0844966], + "R": [ + [-0.3784097731,0.01208936744,-0.9255592314], + [0.03775536538,0.9992841689,-0.002383732641], + [0.9248678695,-0.03584685469,-0.3785953341] + ], + "t": [ + [-11.73143391], + [170.7040215], + [268.2801795] + ] + }, + { + "name": "18_08", + "type": "vga", + "resolution": [640,480], + "panel": 18, + "node": 8, + "K": [ + [744.996,0,378.911], + [0,745.249,217.173], + [0,0,1] + ], + "distCoef": [-0.317298,0.0439499,-0.000470842,0.000645598,0.0800391], + "R": [ + [-0.3573644405,-0.02168005213,-0.9337133564], + [0.09030348924,0.9942444419,-0.05764780686], + [0.9295891224,-0.1049188503,-0.3533498244] + ], + "t": [ + [-32.18764663], + [193.5958696], + [255.9258617] + ] + }, + { + "name": "18_09", + "type": "vga", + "resolution": [640,480], + "panel": 18, + "node": 9, + "K": [ + [745.488,0,367.703], + [0,745.136,254.274], + [0,0,1] + ], + "distCoef": [-0.333608,0.117291,0.00107107,0.000590786,-0.0167148], + "R": [ + [-0.3755971335,-0.01611847579,-0.9266428589], + [0.03486308067,0.9988953473,-0.03150636014], + [0.9261270749,-0.0441393233,-0.3746202894] + ], + "t": [ + [-52.11061688], + [162.8813669], + [265.66749] + ] + }, + { + "name": "18_10", + "type": "vga", + "resolution": [640,480], + "panel": 18, + "node": 10, + "K": [ + [746.691,0,377.016], + [0,746.35,247.895], + [0,0,1] + ], + "distCoef": [-0.324348,0.0759263,0.000632098,0.000973799,0.0365142], + "R": [ + [-0.3979832561,-0.05264507275,-0.9158809007], + [0.03842303812,0.9965195246,-0.07397639654], + [0.9165876925,-0.06463229393,-0.3945753015] + ], + "t": [ + [-58.47639535], + [144.7851801], + [261.4908418] + ] + }, + { + "name": "18_11", + "type": "vga", + "resolution": [640,480], + "panel": 18, + "node": 11, + "K": [ + [743.499,0,383.73], + [0,743.269,228.607], + [0,0,1] + ], + "distCoef": [-0.318101,0.0343673,-0.000192972,9.02677e-05,0.0940376], + "R": [ + [-0.3591156591,-0.0799459609,-0.9298626709], + [0.01693912278,0.9956019804,-0.09213990831], + [0.9331393302,-0.04883994185,-0.356182047] + ], + "t": [ + [-65.19666066], + [124.1115675], + [265.1913912] + ] + }, + { + "name": "18_12", + "type": "vga", + "resolution": [640,480], + "panel": 18, + "node": 12, + "K": [ + [744.847,0,377.843], + [0,744.539,240.133], + [0,0,1] + ], + "distCoef": [-0.322594,0.0777366,0.000608553,0.000730506,0.0395492], + "R": [ + [-0.3599917326,-0.04959232233,-0.9316364924], + [0.02914279324,0.9975011607,-0.0643593979], + [0.9325002145,-0.05031934083,-0.3576469123] + ], + "t": [ + [-57.61171896], + [105.5688064], + [264.3974594] + ] + }, + { + "name": "18_13", + "type": "vga", + "resolution": [640,480], + "panel": 18, + "node": 13, + "K": [ + [742.264,0,386.065], + [0,742.375,236.247], + [0,0,1] + ], + "distCoef": [-0.316238,0.0182785,-0.000395794,0.00144239,0.136479], + "R": [ + [-0.3232019546,0.03338047233,-0.9457411066], + [0.05161368011,0.9985119503,0.01760435083], + [0.9449214383,-0.04312341834,-0.324443903] + ], + "t": [ + [61.04698375], + [97.35388185], + [264.1973208] + ] + }, + { + "name": "18_14", + "type": "vga", + "resolution": [640,480], + "panel": 18, + "node": 14, + "K": [ + [744.531,0,362.517], + [0,744.694,222.936], + [0,0,1] + ], + "distCoef": [-0.323155,0.0551,-0.000315217,0.00114443,0.0791805], + "R": [ + [-0.3124904102,0.02154150537,-0.9496766329], + [-0.004629448499,0.999696432,0.02419942065], + [0.9499096335,0.01195856595,-0.3122958229] + ], + "t": [ + [-14.02426098], + [68.46079663], + [270.3325449] + ] + }, + { + "name": "18_15", + "type": "vga", + "resolution": [640,480], + "panel": 18, + "node": 15, + "K": [ + [747.429,0,398.562], + [0,747.425,233.615], + [0,0,1] + ], + "distCoef": [-0.333617,0.122405,0.000303778,0.00134383,-0.0202721], + "R": [ + [-0.358025731,-0.0142572014,-0.9336028643], + [0.04081564607,0.9986886699,-0.03090345813], + [0.9328191995,-0.04916983726,-0.3569743242] + ], + "t": [ + [-8.683192747], + [83.02873835], + [264.4620974] + ] + }, + { + "name": "18_16", + "type": "vga", + "resolution": [640,480], + "panel": 18, + "node": 16, + "K": [ + [742.757,0,357.304], + [0,742.66,220.331], + [0,0,1] + ], + "distCoef": [-0.305443,-0.0527047,-0.000521453,0.00022453,0.250047], + "R": [ + [-0.3364590891,0.05374146283,-0.9401633563], + [0.05791647683,0.99766121,0.03630140184], + [0.9399154021,-0.04223701264,-0.3387846981] + ], + "t": [ + [20.062846], + [91.33983095], + [265.2581766] + ] + }, + { + "name": "18_17", + "type": "vga", + "resolution": [640,480], + "panel": 18, + "node": 17, + "K": [ + [750.787,0,361.922], + [0,750.723,216.611], + [0,0,1] + ], + "distCoef": [-0.368257,0.303211,-0.00101236,-0.000679192,-0.335284], + "R": [ + [-0.3521002367,0.0154136189,-0.9358353721], + [0.04957845599,0.9987678018,-0.002203336065], + [0.9346482761,-0.04717306796,-0.3524305629] + ], + "t": [ + [32.75189895], + [90.38015946], + [265.2110414] + ] + }, + { + "name": "18_18", + "type": "vga", + "resolution": [640,480], + "panel": 18, + "node": 18, + "K": [ + [745.69,0,366.196], + [0,745.645,224.452], + [0,0,1] + ], + "distCoef": [-0.325076,0.0695314,0.000207452,8.09151e-05,0.0569118], + "R": [ + [-0.369329094,-0.008664471876,-0.929258278], + [0.06369637747,0.997368813,-0.03461534879], + [0.9271131494,-0.07197484145,-0.3678054246] + ], + "t": [ + [-35.28307581], + [111.055802], + [261.8818226] + ] + }, + { + "name": "18_19", + "type": "vga", + "resolution": [640,480], + "panel": 18, + "node": 19, + "K": [ + [745.552,0,357.301], + [0,745.545,223.113], + [0,0,1] + ], + "distCoef": [-0.320101,0.042192,0.00043748,0.000103204,0.104558], + "R": [ + [-0.3584191226,-0.04877846794,-0.9322855752], + [0.07086164718,0.9943315632,-0.07926770686], + [0.9308675306,-0.09447435344,-0.3529309238] + ], + "t": [ + [16.14340371], + [139.4376601], + [259.6452388] + ] + }, + { + "name": "18_20", + "type": "vga", + "resolution": [640,480], + "panel": 18, + "node": 20, + "K": [ + [746.078,0,363.03], + [0,746.077,221.582], + [0,0,1] + ], + "distCoef": [-0.321359,0.0569666,0.000169599,0.000938787,0.0797635], + "R": [ + [-0.3631410096,0.0448531679,-0.9306539639], + [0.06634832184,0.9975497918,0.02218813063], + [0.9293688758,-0.05368990856,-0.3652271709] + ], + "t": [ + [21.37501917], + [147.345749], + [265.5705493] + ] + }, + { + "name": "18_21", + "type": "vga", + "resolution": [640,480], + "panel": 18, + "node": 21, + "K": [ + [745.043,0,372.293], + [0,745.076,222.901], + [0,0,1] + ], + "distCoef": [-0.317484,0.0404748,0.000192535,-0.000111527,0.0957966], + "R": [ + [-0.3461967977,-0.005928135698,-0.9381431844], + [0.04577092509,0.9986824948,-0.02320122706], + [0.937044716,-0.05097187193,-0.3454693453] + ], + "t": [ + [-0.5259425122], + [153.3372726], + [265.7616305] + ] + }, + { + "name": "18_22", + "type": "vga", + "resolution": [640,480], + "panel": 18, + "node": 22, + "K": [ + [745.252,0,401.788], + [0,745.346,245.295], + [0,0,1] + ], + "distCoef": [-0.315494,0.0267895,-0.000624877,0.000210937,0.0993279], + "R": [ + [-0.3267831921,-0.004575639121,-0.9450882546], + [0.07739750703,0.9964998407,-0.03158628616], + [0.9419248225,-0.08346934224,-0.3252852558] + ], + "t": [ + [-10.3938656], + [148.3069178], + [261.1183693] + ] + }, + { + "name": "18_23", + "type": "vga", + "resolution": [640,480], + "panel": 18, + "node": 23, + "K": [ + [747.114,0,358.608], + [0,746.941,217.398], + [0,0,1] + ], + "distCoef": [-0.324507,0.0792141,-0.000227367,0.0013287,0.0357905], + "R": [ + [-0.356358404,-0.03218270054,-0.9337949248], + [0.02645826287,0.9986582749,-0.04451528213], + [0.9339746507,-0.04056998648,-0.3550287707] + ], + "t": [ + [-18.04448695], + [115.7023496], + [266.3010308] + ] + }, + { + "name": "18_24", + "type": "vga", + "resolution": [640,480], + "panel": 18, + "node": 24, + "K": [ + [747.28,0,383.407], + [0,747.414,233.333], + [0,0,1] + ], + "distCoef": [-0.321806,0.0494121,-0.000677773,0.00106862,0.0725344], + "R": [ + [-0.3696831614,0.01690678518,-0.9290040478], + [0.03916078476,0.9992295361,0.002601362608], + [0.9283322644,-0.03541884761,-0.3700604169] + ], + "t": [ + [3.487638933], + [110.8874693], + [266.9764809] + ] + }, + { + "name": "19_01", + "type": "vga", + "resolution": [640,480], + "panel": 19, + "node": 1, + "K": [ + [742.815,0,376.349], + [0,742.96,226.412], + [0,0,1] + ], + "distCoef": [-0.311242,0.000676611,0.00127048,0.000398816,0.145683], + "R": [ + [-0.9986287013,0.0334613179,0.04026235479], + [0.03051664863,0.9969627365,-0.07165218936], + [-0.04253764409,-0.07032526067,-0.99661673] + ], + "t": [ + [47.87451164], + [124.5257469], + [265.3025885] + ] + }, + { + "name": "19_02", + "type": "vga", + "resolution": [640,480], + "panel": 19, + "node": 2, + "K": [ + [746.352,0,362.211], + [0,746.799,224.495], + [0,0,1] + ], + "distCoef": [-0.33354,0.113916,-0.000650978,0.00200875,0.00369896], + "R": [ + [-0.9978769066,0.0627015602,0.01761231284], + [0.06225819076,0.9977547513,-0.02468550225], + [-0.01912058832,-0.02353658189,-0.9995401105] + ], + "t": [ + [76.18899734], + [119.4504319], + [269.470097] + ] + }, + { + "name": "19_03", + "type": "vga", + "resolution": [640,480], + "panel": 19, + "node": 3, + "K": [ + [744.923,0,335.897], + [0,744.843,232.622], + [0,0,1] + ], + "distCoef": [-0.310786,-0.00740435,0.000477261,-0.00048183,0.169837], + "R": [ + [-0.9959217828,0.05942221639,0.06788816328], + [0.05820019172,0.9981077555,-0.01984051806], + [-0.06893866983,-0.0158085,-0.9974956397] + ], + "t": [ + [57.6907282], + [139.716188], + [274.5941587] + ] + }, + { + "name": "19_04", + "type": "vga", + "resolution": [640,480], + "panel": 19, + "node": 4, + "K": [ + [745.3,0,371.455], + [0,745.339,223.979], + [0,0,1] + ], + "distCoef": [-0.316788,0.039021,-0.00160053,-0.000126119,0.09467], + "R": [ + [-0.995350133,0.07444232287,0.06112653567], + [0.06997485872,0.994930028,-0.0722340534], + [-0.06619389658,-0.06762085396,-0.9955128267] + ], + "t": [ + [42.04206067], + [161.4993909], + [266.5642499] + ] + }, + { + "name": "19_05", + "type": "vga", + "resolution": [640,480], + "panel": 19, + "node": 5, + "K": [ + [741.339,0,353.354], + [0,741.563,231.192], + [0,0,1] + ], + "distCoef": [-0.304803,-0.0634451,-0.00114618,-0.000982934,0.282182], + "R": [ + [-0.9964181101,0.07478982294,0.03946431643], + [0.07096423127,0.993341211,-0.09075966339], + [-0.04598943103,-0.08763401739,-0.9950905744] + ], + "t": [ + [45.56899486], + [188.2245222], + [262.1501617] + ] + }, + { + "name": "19_06", + "type": "vga", + "resolution": [640,480], + "panel": 19, + "node": 6, + "K": [ + [745.947,0,350.894], + [0,746.217,234.332], + [0,0,1] + ], + "distCoef": [-0.313212,0.0178381,0.000340441,0.00055626,0.126083], + "R": [ + [-0.9969018679,0.07865171151,0.0007576151751], + [0.07854654264,0.9959829876,-0.04299219736], + [-0.004135981729,-0.0427994938,-0.9990751208] + ], + "t": [ + [37.2742824], + [183.4195047], + [270.0123608] + ] + }, + { + "name": "19_07", + "type": "vga", + "resolution": [640,480], + "panel": 19, + "node": 7, + "K": [ + [748.821,0,355.822], + [0,748.684,217.17], + [0,0,1] + ], + "distCoef": [-0.342444,0.16602,-0.000477836,-0.000195363,-0.106824], + "R": [ + [-0.9928808048,-0.04900785176,0.10856306], + [-0.05236016128,0.998228751,-0.02824489671], + [-0.106986546,-0.0337281951,-0.9936882247] + ], + "t": [ + [-31.49326377], + [168.7489309], + [271.4480177] + ] + }, + { + "name": "19_08", + "type": "vga", + "resolution": [640,480], + "panel": 19, + "node": 8, + "K": [ + [747.238,0,359.034], + [0,747.474,233.038], + [0,0,1] + ], + "distCoef": [-0.313675,0.00436645,0.000419802,0.000604189,0.154068], + "R": [ + [-0.9913876468,0.02931278851,0.127637354], + [0.0192008625,0.9966303068,-0.07974558542], + [-0.1295448208,-0.07660804099,-0.9886098055] + ], + "t": [ + [-44.88902211], + [188.5485089], + [261.5304555] + ] + }, + { + "name": "19_09", + "type": "vga", + "resolution": [640,480], + "panel": 19, + "node": 9, + "K": [ + [743.415,0,332.333], + [0,743.715,235.337], + [0,0,1] + ], + "distCoef": [-0.308464,-0.0208585,-0.00102455,0.000256502,0.207947], + "R": [ + [-0.9954977047,0.04566149696,0.08306231217], + [0.04175753042,0.9979670543,-0.04814631117], + [-0.08509188364,-0.04446106523,-0.9953806232] + ], + "t": [ + [-46.35184093], + [166.6378451], + [268.6077116] + ] + }, + { + "name": "19_10", + "type": "vga", + "resolution": [640,480], + "panel": 19, + "node": 10, + "K": [ + [747.206,0,362.728], + [0,747.412,248.496], + [0,0,1] + ], + "distCoef": [-0.340118,0.138855,0.000965068,4.5306e-05,-0.0441245], + "R": [ + [-0.9935175509,0.05252798067,0.1008151146], + [0.05439486481,0.9983935823,0.01585728578], + [-0.09982021218,0.02123831626,-0.9947787991] + ], + "t": [ + [-46.95074625], + [127.5778656], + [276.6370715] + ] + }, + { + "name": "19_11", + "type": "vga", + "resolution": [640,480], + "panel": 19, + "node": 11, + "K": [ + [745.45,0,355.141], + [0,745.641,249.232], + [0,0,1] + ], + "distCoef": [-0.326245,0.10077,0.000216744,-2.37583e-05,-0.0259903], + "R": [ + [-0.9983050345,-0.001439505441,0.05818063101], + [-0.002578079686,0.9998065462,-0.01949932386], + [-0.05814130636,-0.01961626748,-0.9981156198] + ], + "t": [ + [-58.09544547], + [121.7224759], + [272.659258] + ] + }, + { + "name": "19_12", + "type": "vga", + "resolution": [640,480], + "panel": 19, + "node": 12, + "K": [ + [743.805,0,368.42], + [0,744.013,242.015], + [0,0,1] + ], + "distCoef": [-0.323306,0.0785457,-0.00106293,0.000187763,0.0236672], + "R": [ + [-0.9954771119,0.0748660766,0.05848410323], + [0.07512966129,0.9971710788,0.002318097681], + [-0.05814510944,0.006701504052,-0.9982856485] + ], + "t": [ + [-47.8147621], + [97.15541342], + [274.4212668] + ] + }, + { + "name": "19_13", + "type": "vga", + "resolution": [640,480], + "panel": 19, + "node": 13, + "K": [ + [742.693,0,353.966], + [0,742.776,227.014], + [0,0,1] + ], + "distCoef": [-0.307193,-0.0103139,0.000109263,-0.000950495,0.159317], + "R": [ + [-0.9933059489,0.1045971031,0.04901773034], + [0.1016362638,0.9930442478,-0.05944065861], + [-0.05489409585,-0.05406078084,-0.9970276176] + ], + "t": [ + [-21.5323637], + [109.7713479], + [268.3161895] + ] + }, + { + "name": "19_14", + "type": "vga", + "resolution": [640,480], + "panel": 19, + "node": 14, + "K": [ + [742.837,0,362.248], + [0,743.502,226.37], + [0,0,1] + ], + "distCoef": [-0.308934,-0.00321353,-0.0010059,0.000705591,0.156528], + "R": [ + [-0.9919154966,0.0987006026,0.07976113456], + [0.09553429302,0.9945144894,-0.04259259489], + [-0.08352751879,-0.03462833131,-0.995903626] + ], + "t": [ + [-30.66946365], + [84.06052642], + [268.8728165] + ] + }, + { + "name": "19_15", + "type": "vga", + "resolution": [640,480], + "panel": 19, + "node": 15, + "K": [ + [742.618,0,345.237], + [0,742.923,230.439], + [0,0,1] + ], + "distCoef": [-0.302695,-0.0546693,-0.000167537,-0.000784726,0.259585], + "R": [ + [-0.9885523252,0.1391044686,0.05843155954], + [0.1381120085,0.9902000007,-0.02071308279], + [-0.06074021267,-0.01240586611,-0.9980765106] + ], + "t": [ + [-1.26146274], + [74.12977283], + [271.0351679] + ] + }, + { + "name": "19_16", + "type": "vga", + "resolution": [640,480], + "panel": 19, + "node": 16, + "K": [ + [744.088,0,370.473], + [0,744.417,231.755], + [0,0,1] + ], + "distCoef": [-0.300902,-0.0664899,-0.000333311,0.000589361,0.253926], + "R": [ + [-0.9917390399,0.06178336486,0.1124121551], + [0.06447509535,0.9977094298,0.02046596672], + [-0.1108902109,0.02754468261,-0.9934508803] + ], + "t": [ + [-3.269853258], + [73.62667861], + [274.8694227] + ] + }, + { + "name": "19_17", + "type": "vga", + "resolution": [640,480], + "panel": 19, + "node": 17, + "K": [ + [745.582,0,373.528], + [0,745.86,237.254], + [0,0,1] + ], + "distCoef": [-0.322134,0.0530706,-0.000603814,0.00101303,0.0846746], + "R": [ + [-0.9897330936,0.1313546283,0.05634150462], + [0.1318000226,0.9912672261,0.00424742025], + [-0.05529156869,0.01162962396,-0.9984025212] + ], + "t": [ + [37.3391924], + [70.20661568], + [273.1392775] + ] + }, + { + "name": "19_18", + "type": "vga", + "resolution": [640,480], + "panel": 19, + "node": 18, + "K": [ + [742.542,0,374.105], + [0,742.758,223.273], + [0,0,1] + ], + "distCoef": [-0.306762,-0.0452572,-0.00032402,-0.000364469,0.245651], + "R": [ + [-0.9920842372,0.1065981921,0.06637538524], + [0.106818653,0.9942784937,-0.0002288198192], + [-0.06602000984,0.006863120707,-0.9977946963] + ], + "t": [ + [52.26513597], + [79.91641464], + [273.9509772] + ] + }, + { + "name": "19_19", + "type": "vga", + "resolution": [640,480], + "panel": 19, + "node": 19, + "K": [ + [744.378,0,361.433], + [0,744.589,244.618], + [0,0,1] + ], + "distCoef": [-0.310422,-0.000364242,-0.000710118,0.000839407,0.169675], + "R": [ + [-0.9919054981,0.126974259,0.001010166835], + [0.1269495258,0.9918188066,-0.01338927975], + [-0.002701996339,-0.01315266,-0.9999098493] + ], + "t": [ + [49.23489662], + [110.9052228], + [271.6142806] + ] + }, + { + "name": "19_20", + "type": "vga", + "resolution": [640,480], + "panel": 19, + "node": 20, + "K": [ + [745.72,0,364.99], + [0,745.913,248.461], + [0,0,1] + ], + "distCoef": [-0.32476,0.0791445,0.000409065,0.000522525,0.0385155], + "R": [ + [-0.9808466558,0.1869185946,0.05478391053], + [0.1851721888,0.9820671342,-0.03543168776], + [-0.06042431929,-0.02460859583,-0.9978693896] + ], + "t": [ + [40.23583817], + [134.9359413], + [272.7493911] + ] + }, + { + "name": "19_21", + "type": "vga", + "resolution": [640,480], + "panel": 19, + "node": 21, + "K": [ + [745.966,0,347.023], + [0,745.905,254.016], + [0,0,1] + ], + "distCoef": [-0.312122,-0.0171046,0.00101358,-9.38575e-05,0.213424], + "R": [ + [-0.9944456328,0.07811965146,0.07053512206], + [0.07435713108,0.9957422838,-0.0544823029], + [-0.07449094204,-0.04893489886,-0.9960203187] + ], + "t": [ + [2.247391851], + [153.0572023], + [268.8284628] + ] + }, + { + "name": "19_22", + "type": "vga", + "resolution": [640,480], + "panel": 19, + "node": 22, + "K": [ + [743.607,0,364.935], + [0,743.756,243.53], + [0,0,1] + ], + "distCoef": [-0.311531,0.000696399,0.00010932,-0.000314324,0.159615], + "R": [ + [-0.9924188487,0.09367860135,0.07955594568], + [0.08900119243,0.9941960017,-0.06044086279], + [-0.0847562186,-0.05290207743,-0.9949963586] + ], + "t": [ + [-15.3150092], + [142.5037842], + [267.7211288] + ] + }, + { + "name": "19_23", + "type": "vga", + "resolution": [640,480], + "panel": 19, + "node": 23, + "K": [ + [743.508,0,369.721], + [0,743.449,243.575], + [0,0,1] + ], + "distCoef": [-0.309744,-0.0191119,0.000292611,0.000847107,0.198605], + "R": [ + [-0.9987856124,0.03694807636,0.03259049098], + [0.03470669556,0.9971594314,-0.06684694127], + [-0.03496778135,-0.06563465492,-0.997230839] + ], + "t": [ + [-6.799650163], + [123.3743131], + [267.1549958] + ] + }, + { + "name": "19_24", + "type": "vga", + "resolution": [640,480], + "panel": 19, + "node": 24, + "K": [ + [742.775,0,379.613], + [0,742.864,224.449], + [0,0,1] + ], + "distCoef": [-0.316586,0.0333112,-0.000180777,0.00112675,0.112087], + "R": [ + [-0.9947573056,0.06853183176,0.07590316848], + [0.05765365411,0.9888586451,-0.1372393391], + [-0.08446276764,-0.1321437401,-0.9876254719] + ], + "t": [ + [4.340029177], + [136.5307812], + [258.2193706] + ] + }, + { + "name": "20_01", + "type": "vga", + "resolution": [640,480], + "panel": 20, + "node": 1, + "K": [ + [745.267,0,367.511], + [0,745.253,228.976], + [0,0,1] + ], + "distCoef": [-0.316421,0.0232694,0.000233523,0.00095017,0.129164], + "R": [ + [-0.2595515744,0.03264633198,0.965177288], + [-0.02439656235,0.9988878376,-0.04034718866], + [-0.9654210418,-0.03401918423,-0.2584664527] + ], + "t": [ + [43.91564589], + [114.6472759], + [269.2437955] + ] + }, + { + "name": "20_02", + "type": "vga", + "resolution": [640,480], + "panel": 20, + "node": 2, + "K": [ + [746.737,0,383.621], + [0,746.553,234.139], + [0,0,1] + ], + "distCoef": [-0.330711,0.126048,0.000259954,-0.000232797,-0.067441], + "R": [ + [-0.2600597375,0.03354081135,0.965009817], + [-0.06475754991,0.9965406566,-0.05208818886], + [-0.9634185968,-0.07603771211,-0.2569880808] + ], + "t": [ + [63.03617994], + [136.0112472], + [264.2112923] + ] + }, + { + "name": "20_03", + "type": "vga", + "resolution": [640,480], + "panel": 20, + "node": 3, + "K": [ + [748.567,0,371.842], + [0,748.646,223.378], + [0,0,1] + ], + "distCoef": [-0.332561,0.132401,-0.000978802,0.0010132,-0.0596871], + "R": [ + [-0.2517963519,0.03200567411,0.967250864], + [0.0115205721,0.9994813079,-0.03007310314], + [-0.9677116686,0.003570985655,-0.2520344708] + ], + "t": [ + [55.32226207], + [135.5872215], + [276.5287505] + ] + }, + { + "name": "20_04", + "type": "vga", + "resolution": [640,480], + "panel": 20, + "node": 4, + "K": [ + [747.412,0,375.731], + [0,747.545,213.638], + [0,0,1] + ], + "distCoef": [-0.324984,0.0823763,-0.00190711,0.0010176,0.0382164], + "R": [ + [-0.2864406942,-0.001302983566,0.9580970885], + [-0.1193951903,0.9922525608,-0.03434594761], + [-0.9506295373,-0.1242302613,-0.2843770823] + ], + "t": [ + [40.5108683], + [178.4576708], + [254.9563649] + ] + }, + { + "name": "20_05", + "type": "vga", + "resolution": [640,480], + "panel": 20, + "node": 5, + "K": [ + [747.818,0,377.646], + [0,748.63,232.294], + [0,0,1] + ], + "distCoef": [-0.327048,0.100477,-0.00250563,-0.000951363,0.00505748], + "R": [ + [-0.2682590325,-0.01756457816,0.9631866782], + [-0.1175373506,0.9929607203,-0.014628026], + [-0.9561496027,-0.1171345104,-0.2684351761] + ], + "t": [ + [28.10870602], + [198.6254244], + [256.0861594] + ] + }, + { + "name": "20_06", + "type": "vga", + "resolution": [640,480], + "panel": 20, + "node": 6, + "K": [ + [744.281,0,376.164], + [0,744.733,212.764], + [0,0,1] + ], + "distCoef": [-0.314115,0.0261091,-0.00186017,0.000146826,0.111047], + "R": [ + [-0.2995512244,0.02650351378,0.9537120256], + [-0.1164678133,0.9911222418,-0.06412449085], + [-0.9469447251,-0.1302853239,-0.2938050747] + ], + "t": [ + [24.38602287], + [207.7342285], + [252.6787249] + ] + }, + { + "name": "20_07", + "type": "vga", + "resolution": [640,480], + "panel": 20, + "node": 7, + "K": [ + [744.844,0,367.199], + [0,744.885,234.874], + [0,0,1] + ], + "distCoef": [-0.307447,-0.0235368,-0.000447762,-0.000552595,0.198481], + "R": [ + [-0.2246138655,-0.03605175288,0.9737807158], + [-0.1345418425,0.9908917963,0.005651603877], + [-0.965115073,-0.1297448231,-0.2274185059] + ], + "t": [ + [-24.57828512], + [193.807989], + [253.6581871] + ] + }, + { + "name": "20_08", + "type": "vga", + "resolution": [640,480], + "panel": 20, + "node": 8, + "K": [ + [745.265,0,373.297], + [0,745.204,222.406], + [0,0,1] + ], + "distCoef": [-0.322725,0.0753011,-0.00198414,9.48962e-05,0.0496562], + "R": [ + [-0.2740281164,0.007089557403,0.9616955493], + [-0.08615117171,0.9957715968,-0.0318889104], + [-0.9578551911,-0.09158965645,-0.2722586413] + ], + "t": [ + [-24.40184383], + [190.6520913], + [261.5790911] + ] + }, + { + "name": "20_09", + "type": "vga", + "resolution": [640,480], + "panel": 20, + "node": 9, + "K": [ + [743.742,0,376.404], + [0,743.442,252.182], + [0,0,1] + ], + "distCoef": [-0.310951,0.0101818,-0.000165117,0.000699519,0.141452], + "R": [ + [-0.234740558,-0.05401621619,0.9705560874], + [-0.06709368181,0.9969740023,0.03925909634], + [-0.9697398147,-0.05590247913,-0.2376543804] + ], + "t": [ + [-60.89112675], + [163.1020008], + [266.420435] + ] + }, + { + "name": "20_10", + "type": "vga", + "resolution": [640,480], + "panel": 20, + "node": 10, + "K": [ + [746.237,0,381.452], + [0,745.998,235.104], + [0,0,1] + ], + "distCoef": [-0.321635,0.0804606,-0.000793429,0.000500703,0.0308776], + "R": [ + [-0.2327490461,-0.03063038999,0.9720543507], + [-0.1073579574,0.9942045343,0.005622535858], + [-0.9665930636,-0.1030491297,-0.2346885731] + ], + "t": [ + [-52.7687065], + [155.650502], + [258.7092289] + ] + }, + { + "name": "20_11", + "type": "vga", + "resolution": [640,480], + "panel": 20, + "node": 11, + "K": [ + [744.465,0,352.406], + [0,744.368,231.635], + [0,0,1] + ], + "distCoef": [-0.307896,-0.0267024,-0.00138959,-0.000489454,0.213952], + "R": [ + [-0.2568719183,-0.003646201445,0.9664385768], + [-0.06909534804,0.997503196,-0.01460160774], + [-0.9639723287,-0.07052715282,-0.256482495] + ], + "t": [ + [-58.11810551], + [133.8270577], + [264.378006] + ] + }, + { + "name": "20_12", + "type": "vga", + "resolution": [640,480], + "panel": 20, + "node": 12, + "K": [ + [744.557,0,351.376], + [0,744.424,216.683], + [0,0,1] + ], + "distCoef": [-0.317479,0.0158652,-0.000659121,-0.00059258,0.147681], + "R": [ + [-0.2372383683,-0.02274879941,0.9711850744], + [-0.1004253449,0.9949438408,-0.001226302928], + [-0.9662467111,-0.09782252214,-0.2383234094] + ], + "t": [ + [-62.35654103], + [118.4734964], + [259.8400796] + ] + }, + { + "name": "20_13", + "type": "vga", + "resolution": [640,480], + "panel": 20, + "node": 13, + "K": [ + [743.07,0,377.102], + [0,743.158,222.988], + [0,0,1] + ], + "distCoef": [-0.29868,-0.0827266,-0.00133003,-0.00119832,0.273178], + "R": [ + [-0.2367527853,-0.03686088138,0.9708704311], + [-0.08746956632,0.9960307636,0.01648614259], + [-0.9676245107,-0.08101847538,-0.2390372628] + ], + "t": [ + [-42.43038274], + [111.3831569], + [262.4188123] + ] + }, + { + "name": "20_14", + "type": "vga", + "resolution": [640,480], + "panel": 20, + "node": 14, + "K": [ + [745.597,0,372.306], + [0,745.414,237.499], + [0,0,1] + ], + "distCoef": [-0.320131,0.0615197,0.00113665,-0.000991542,0.0414761], + "R": [ + [-0.2769894269,0.05383368349,0.9593637433], + [-0.05406721308,0.9959742516,-0.07149843787], + [-0.9593506105,-0.07167443526,-0.2729636999] + ], + "t": [ + [-21.49417033], + [90.7530727], + [264.2254974] + ] + }, + { + "name": "20_15", + "type": "vga", + "resolution": [640,480], + "panel": 20, + "node": 15, + "K": [ + [746.296,0,380.788], + [0,746.161,226.883], + [0,0,1] + ], + "distCoef": [-0.321885,0.0553182,0.000132369,-0.000878491,0.0778662], + "R": [ + [-0.2870302882,0.01079685294,0.9578606588], + [-0.05665486447,0.9979947406,-0.02822630231], + [-0.9562446549,-0.06236926949,-0.2858430237] + ], + "t": [ + [-1.106709776], + [85.82297146], + [264.8070963] + ] + }, + { + "name": "20_16", + "type": "vga", + "resolution": [640,480], + "panel": 20, + "node": 16, + "K": [ + [744.119,0,345.288], + [0,744.112,227.607], + [0,0,1] + ], + "distCoef": [-0.302547,-0.0664079,0.000893953,-0.000627784,0.303861], + "R": [ + [-0.252548592,0.05539030986,0.9659974753], + [-0.08640189331,0.9930807476,-0.07953201617], + [-0.963718798,-0.1035497095,-0.2460153169] + ], + "t": [ + [10.51473419], + [107.4721829], + [260.872486] + ] + }, + { + "name": "20_17", + "type": "vga", + "resolution": [640,480], + "panel": 20, + "node": 17, + "K": [ + [745.831,0,353.784], + [0,745.87,219.754], + [0,0,1] + ], + "distCoef": [-0.321082,0.0599511,-0.000750204,0.000386726,0.0615888], + "R": [ + [-0.3124433364,0.0857084176,0.9460619582], + [-0.03834810703,0.9939715084,-0.1027135007], + [-0.9491620432,-0.06837183409,-0.3072730188] + ], + "t": [ + [50.17882687], + [91.39390134], + [262.9120903] + ] + }, + { + "name": "20_18", + "type": "vga", + "resolution": [640,480], + "panel": 20, + "node": 18, + "K": [ + [745.227,0,385.13], + [0,745.129,233.897], + [0,0,1] + ], + "distCoef": [-0.311291,0.0180828,0.00116452,0.000576614,0.0928398], + "R": [ + [-0.2786751196,0.05379991941,0.9588773365], + [-0.03740853519,0.9970639104,-0.06681437094], + [-0.9596565944,-0.0544896994,-0.2758443282] + ], + "t": [ + [57.04086511], + [98.35557378], + [265.4113916] + ] + }, + { + "name": "20_19", + "type": "vga", + "resolution": [640,480], + "panel": 20, + "node": 19, + "K": [ + [746.424,0,373.724], + [0,746.378,215.089], + [0,0,1] + ], + "distCoef": [-0.317589,0.0452179,0.000839363,0.00087423,0.0858828], + "R": [ + [-0.2053627335,-0.023863444,0.9783949528], + [-0.1366627843,0.9906072975,-0.004523879826], + [-0.9690972248,-0.1346392148,-0.2066950671] + ], + "t": [ + [2.454839771], + [148.020868], + [256.5149472] + ] + }, + { + "name": "20_20", + "type": "vga", + "resolution": [640,480], + "panel": 20, + "node": 20, + "K": [ + [744.35,0,378.361], + [0,744.386,245.706], + [0,0,1] + ], + "distCoef": [-0.305792,-0.0298413,-5.26611e-05,9.57392e-05,0.206854], + "R": [ + [-0.2653224987,0.04663873586,0.9630310483], + [-0.08123292055,0.9941966424,-0.07052835541], + [-0.9607315881,-0.09694258412,-0.2599941366] + ], + "t": [ + [23.42848118], + [157.616994], + [260.7931406] + ] + }, + { + "name": "20_21", + "type": "vga", + "resolution": [640,480], + "panel": 20, + "node": 21, + "K": [ + [747.371,0,368.768], + [0,747.344,231.897], + [0,0,1] + ], + "distCoef": [-0.308946,-0.0139041,-0.000755627,-0.000244894,0.190547], + "R": [ + [-0.2375675449,-0.01520768023,0.9712519694], + [-0.09352440886,0.9955903179,-0.007287238765], + [-0.966858235,-0.09256697771,-0.2379422368] + ], + "t": [ + [-12.76210059], + [163.3748289], + [261.1782343] + ] + }, + { + "name": "20_22", + "type": "vga", + "resolution": [640,480], + "panel": 20, + "node": 22, + "K": [ + [746.314,0,371.788], + [0,745.992,237.732], + [0,0,1] + ], + "distCoef": [-0.315167,0.0352154,-0.000828301,0.000312219,0.0891012], + "R": [ + [-0.2145858088,0.0004599306573,0.9767050318], + [-0.07749764501,0.9968390076,-0.017495939], + [-0.9736257216,-0.07944672006,-0.2138718611] + ], + "t": [ + [-33.0373727], + [146.3668194], + [262.1626174] + ] + }, + { + "name": "20_23", + "type": "vga", + "resolution": [640,480], + "panel": 20, + "node": 23, + "K": [ + [746.318,0,371.868], + [0,746.096,236.531], + [0,0,1] + ], + "distCoef": [-0.318459,0.0405311,0.000489761,-0.000285822,0.0876741], + "R": [ + [-0.2554085937,0.004734611177,0.9668216142], + [-0.07039835709,0.9972425561,-0.02348096154], + [-0.9642668311,-0.0740598926,-0.25437101] + ], + "t": [ + [-17.40671779], + [124.2252344], + [264.0602836] + ] + }, + { + "name": "20_24", + "type": "vga", + "resolution": [640,480], + "panel": 20, + "node": 24, + "K": [ + [745.832,0,382.965], + [0,745.816,231.317], + [0,0,1] + ], + "distCoef": [-0.320385,0.0446211,0.00028801,0.00167617,0.104376], + "R": [ + [-0.2362773498,-0.02089730322,0.9714609188], + [-0.1013714927,0.9948433166,-0.003255144035], + [-0.9663833786,-0.09924756028,-0.2371773332] + ], + "t": [ + [-5.093436327], + [126.6662443], + [260.9183094] + ] + }, + { + "name": "00_00", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 0, + "K": [ + [1634.03,0,942.792], + [0,1629.73,558.29], + [0,0,1] + ], + "distCoef": [-0.222445,0.199192,8.73054e-05,0.000982243,0.0238445], + "R": [ + [0.1369296663,0.03357591931,-0.9900115778], + [-0.09021094677,0.9956950625,0.02129149064], + [0.9864645212,0.08639444504,0.1393691081] + ], + "t": [ + [20.90028135], + [127.2202879], + [283.1159034] + ] + }, + { + "name": "00_01", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 1, + "K": [ + [1395.91,0,951.559], + [0,1392.24,561.398], + [0,0,1] + ], + "distCoef": [-0.286227,0.183082,-4.29815e-05,0.000644874,-0.0479635], + "R": [ + [0.05337497606,0.02479711619,0.9982666052], + [0.6376765256,0.7684660834,-0.05318390075], + [-0.7684528356,0.6394098699,0.0252043199] + ], + "t": [ + [6.299256813], + [104.397182], + [363.078698] + ] + }, + { + "name": "00_02", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 2, + "K": [ + [1397.02,0,939.355], + [0,1394.04,556.611], + [0,0,1] + ], + "distCoef": [-0.28229,0.173658,-0.000610716,0.000955319,-0.0398628], + "R": [ + [-0.9970491806,0.05290586318,-0.05562284625], + [-0.01182874156,0.6100448884,0.792278559], + [0.07584861407,0.7905986364,-0.6076189463] + ], + "t": [ + [-16.22360931], + [63.30660163], + [381.0181823] + ] + }, + { + "name": "00_03", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 3, + "K": [ + [1395.71,0,949.456], + [0,1392.06,566.648], + [0,0,1] + ], + "distCoef": [-0.281728,0.168097,-0.00021431,1.8072e-05,-0.0371786], + "R": [ + [-0.6216465312,-0.0285781748,0.7827763909], + [0.07448493547,0.9926490654,0.09539301533], + [-0.7797484111,0.117605786,-0.6149482047] + ], + "t": [ + [-14.50346059], + [117.4297203], + [290.1984382] + ] + }, + { + "name": "00_04", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 4, + "K": [ + [1633.26,0,949.479], + [0,1629.32,572.374], + [0,0,1] + ], + "distCoef": [-0.223003,0.185095,-0.000261654,0.00109433,0.0657602], + "R": [ + [-0.5292732399,-0.01229259603,0.8483623811], + [0.636650989,0.6551966806,0.4066851706], + [-0.5608434325,0.7553583268,-0.3389519765] + ], + "t": [ + [-5.411400695], + [80.12176746], + [379.8488129] + ] + }, + { + "name": "00_05", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 5, + "K": [ + [1396.29,0,933.34], + [0,1392.95,560.462], + [0,0,1] + ], + "distCoef": [-0.28733,0.185523,-0.000225825,-0.000143128,-0.0508452], + "R": [ + [-0.9314658579,-0.01073438439,-0.363670357], + [-0.021313424,0.9994579907,0.02508909603], + [0.3632039283,0.03112069687,-0.9311897813] + ], + "t": [ + [-6.050515741], + [143.9213951], + [280.3813532] + ] + }, + { + "name": "00_06", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 6, + "K": [ + [1396.11,0,950.228], + [0,1392.54,548.78], + [0,0,1] + ], + "distCoef": [-0.286481,0.183173,-0.000152555,0.0010664,-0.0482263], + "R": [ + [0.9448241112,-0.04876703013,-0.3239277321], + [-0.2141569626,0.6563150135,-0.7234551806], + [0.2478793944,0.7529092773,0.6096584503] + ], + "t": [ + [-10.023614], + [84.45695974], + [376.925635] + ] + }, + { + "name": "00_07", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 7, + "K": [ + [1395.51,0,947.67], + [0,1392.41,549.081], + [0,0,1] + ], + "distCoef": [-0.286691,0.185163,-6.53256e-05,4.32858e-06,-0.052639], + "R": [ + [-0.9419632708,-0.03700247277,0.3336705164], + [0.180351898,0.7825307202,0.5959185052], + [-0.2831578878,0.6215114552,-0.7304417305] + ], + "t": [ + [-5.250326149], + [112.5645453], + [360.2387508] + ] + }, + { + "name": "00_08", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 8, + "K": [ + [1642.7,0,945.082], + [0,1638.64,562.465], + [0,0,1] + ], + "distCoef": [-0.22444,0.208938,-0.000569838,0.000484927,0.0287248], + "R": [ + [0.9544726119,0.01685383959,-0.2978220632], + [-0.03362017317,0.9981191009,-0.05126347965], + [0.2963979035,0.05894241665,0.9532439742] + ], + "t": [ + [-19.67808464], + [136.6798831], + [282.6801175] + ] + }, + { + "name": "00_09", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 9, + "K": [ + [1396.79,0,945.482], + [0,1393.03,542.64], + [0,0,1] + ], + "distCoef": [-0.284259,0.175176,-0.000406823,0.000640552,-0.0406716], + "R": [ + [-0.3169419478,-0.08460972789,0.9446634298], + [-0.1243350249,0.9911238917,0.04705563528], + [-0.9402598595,-0.1025408464,-0.3246486894] + ], + "t": [ + [6.780958613], + [147.0057696], + [260.6395044] + ] + }, + { + "name": "00_10", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 10, + "K": [ + [1393.87,0,944.546], + [0,1390.36,563.199], + [0,0,1] + ], + "distCoef": [-0.285353,0.177704,-0.000109708,0.000471392,-0.0432146], + "R": [ + [0.9503475669,0.04849461332,0.3073886376], + [0.1560494297,0.7803459045,-0.6055648973], + [-0.2692360999,0.6234649483,0.734032275] + ], + "t": [ + [22.71992555], + [112.7759402], + [360.0009328] + ] + }, + { + "name": "00_11", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 11, + "K": [ + [1492.96,0,934.544], + [0,1489.74,547.466], + [0,0,1] + ], + "distCoef": [-0.259288,0.190057,-5.50625e-05,0.00031915,-0.0281283], + "R": [ + [0.8129763959,0.04080422416,-0.5808652124], + [-0.2848486357,0.8979062573,-0.3355973896], + [0.5078687177,0.4382914196,0.7415996205] + ], + "t": [ + [-0.03199165418], + [105.1487628], + [331.4862369] + ] + }, + { + "name": "00_12", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 12, + "K": [ + [1395.93,0,964.611], + [0,1392.67,564.875], + [0,0,1] + ], + "distCoef": [-0.290995,0.19463,-0.000241491,0.000727782,-0.0582663], + "R": [ + [-0.9950957343,0.04321912909,-0.08897520145], + [-0.001969290489,0.8906636271,0.454658581], + [0.09889692354,0.4526040326,-0.886210465] + ], + "t": [ + [24.66653867], + [97.49188585], + [334.8897626] + ] + }, + { + "name": "00_13", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 13, + "K": [ + [1592.21,0,937.375], + [0,1588.39,560.919], + [0,0,1] + ], + "distCoef": [-0.239248,0.229218,0.000137317,0.000315934,-0.0358302], + "R": [ + [-0.2862766934,0.07452649614,-0.9552441867], + [-0.7557457469,0.5952786327,0.2729317047], + [0.588977097,0.8000557173,-0.1140913162] + ], + "t": [ + [-15.47943966], + [60.20818768], + [381.0821849] + ] + }, + { + "name": "00_14", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 14, + "K": [ + [1649.51,0,934.882], + [0,1644.85,568.024], + [0,0,1] + ], + "distCoef": [-0.22365,0.220791,-0.000591343,0.000286172,0.0121962], + "R": [ + [0.827339054,-0.07848137689,0.5561930989], + [0.02005408661,0.9936867625,0.110383204], + [-0.5613447456,-0.08017039095,0.8236897383] + ], + "t": [ + [-7.23447972], + [142.1657406], + [267.9541185] + ] + }, + { + "name": "00_15", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 15, + "K": [ + [1430.11,0,948.926], + [0,1426.48,561.705], + [0,0,1] + ], + "distCoef": [-0.277948,0.185701,0.000192514,0.000149713,-0.0424254], + "R": [ + [-0.9997414125,0.006454955712,0.02180462522], + [0.005192647027,0.9983342904,-0.05746025644], + [-0.02213920846,-0.05733217422,-0.9981096519] + ], + "t": [ + [9.642162177], + [134.9258555], + [268.2324221] + ] + }, + { + "name": "00_16", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 16, + "K": [ + [1427.34,0,949.618], + [0,1423.13,548.132], + [0,0,1] + ], + "distCoef": [-0.279453,0.188683,-0.000345265,0.000583475,-0.0479414], + "R": [ + [0.7694875517,0.002369830201,0.6386574134], + [0.2539259376,0.9164213706,-0.3093436433], + [-0.586012394,0.4002077652,0.7045730755] + ], + "t": [ + [4.866150988], + [118.1652356], + [330.6340665] + ] + }, + { + "name": "00_17", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 17, + "K": [ + [1393.35,0,916.395], + [0,1390.34,563.652], + [0,0,1] + ], + "distCoef": [-0.287138,0.186145,7.50854e-05,0.000557424,-0.0513205], + "R": [ + [0.5039250676,0.09465184024,-0.8585456047], + [-0.6050310345,0.7480627966,-0.2726527087], + [0.6164389455,0.6568432701,0.4342348962] + ], + "t": [ + [18.2296155], + [97.71531857], + [361.6667015] + ] + }, + { + "name": "00_18", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 18, + "K": [ + [1542.2,0,947.567], + [0,1538.02,555.168], + [0,0,1] + ], + "distCoef": [-0.245751,0.182006,3.81269e-06,0.000651097,0.00472657], + "R": [ + [-0.4048875531,-0.001022756131,0.9143659133], + [0.3656410889,0.9163838146,0.1629334173], + [-0.8380767647,0.4002994608,-0.3706584387] + ], + "t": [ + [16.25260358], + [116.7586119], + [329.7529305] + ] + }, + { + "name": "00_19", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 19, + "K": [ + [1396.57,0,949.242], + [0,1393.19,554.872], + [0,0,1] + ], + "distCoef": [-0.280864,0.167216,-6.6519e-05,0.000917406,-0.0342733], + "R": [ + [0.7360342296,0.009501079563,0.6768776421], + [0.5173282683,0.6370082142,-0.5714822813], + [-0.4366063167,0.7707984591,0.4639446731] + ], + "t": [ + [-24.15514071], + [74.04862943], + [379.5076537] + ] + }, + { + "name": "00_20", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 20, + "K": [ + [1403.46,0,940.386], + [0,1400.1,552.684], + [0,0,1] + ], + "distCoef": [-0.287177,0.194004,-0.000120001,8.41526e-05,-0.0604614], + "R": [ + [-0.6201222217,0.04052054618,-0.7834580496], + [-0.1302964194,0.9794749929,0.1537907063], + [0.773609251,0.1974508131,-0.6021145267] + ], + "t": [ + [24.4496252], + [140.6900046], + [300.8290806] + ] + }, + { + "name": "00_21", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 21, + "K": [ + [1397.56,0,932.828], + [0,1393.91,562.186], + [0,0,1] + ], + "distCoef": [-0.28642,0.185674,-0.000229601,1.91211e-05,-0.052608], + "R": [ + [-0.2617478675,-0.05032313647,-0.9638234464], + [-0.4532392419,0.8880813121,0.07671878938], + [0.8520928608,0.4569235877,-0.2552618099] + ], + "t": [ + [-8.784671236], + [98.11062797], + [332.9193692] + ] + }, + { + "name": "00_22", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 22, + "K": [ + [1514.1,0,945.861], + [0,1510.18,558.694], + [0,0,1] + ], + "distCoef": [-0.260535,0.216046,-0.000156491,0.000677315,-0.0506741], + "R": [ + [-0.9239818557,-0.0613765916,0.3774790647], + [0.05486070575,0.9555572213,0.289656175], + [-0.3784809549,0.288345818,-0.8795503715] + ], + "t": [ + [-5.224239691], + [110.7456244], + [313.8855054] + ] + }, + { + "name": "00_23", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 23, + "K": [ + [1572.86,0,941.716], + [0,1568.17,560.048], + [0,0,1] + ], + "distCoef": [-0.240801,0.195963,-0.000444179,0.000458513,0.00455186], + "R": [ + [0.5162966551,0.01335424781,0.856305686], + [0.1418829708,0.9847272537,-0.100903213], + [-0.8445750331,0.173591186,0.506516647] + ], + "t": [ + [2.417701344], + [102.3557555], + [298.3746617] + ] + }, + { + "name": "00_24", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 24, + "K": [ + [1399.63,0,954.539], + [0,1396.27,546.388], + [0,0,1] + ], + "distCoef": [-0.288761,0.190789,4.23479e-05,6.78832e-05,-0.0577764], + "R": [ + [-0.388991142,-0.05987834367,-0.9192934653], + [0.02928793432,0.9965772059,-0.07730517199], + [0.9207758187,-0.05699523376,-0.3859059924] + ], + "t": [ + [-15.12220678], + [134.1751339], + [265.239245] + ] + }, + { + "name": "00_25", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 25, + "K": [ + [1397.66,0,935.585], + [0,1394.65,559.251], + [0,0,1] + ], + "distCoef": [-0.285722,0.183994,-0.000502702,0.000494145,-0.0515729], + "R": [ + [0.7926422733,0.00130484237,-0.6096855943], + [0.04487405742,0.9971605675,0.06047414042], + [0.6080333424,-0.07529342651,0.7903330655] + ], + "t": [ + [4.539475053], + [139.2223569], + [261.6293171] + ] + }, + { + "name": "00_26", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 26, + "K": [ + [1616.8,0,950.116], + [0,1613.47,551.417], + [0,0,1] + ], + "distCoef": [-0.223464,0.185279,-0.00090721,0.000127112,0.0351947], + "R": [ + [-0.7556190155,-0.04350579001,-0.6535649545], + [0.1389994774,0.9644159151,-0.2249023966], + [0.6400930001,-0.2607857146,-0.7226837222] + ], + "t": [ + [-12.5475419], + [141.1612209], + [240.8579734] + ] + }, + { + "name": "00_27", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 27, + "K": [ + [1861.86,0,934.556], + [0,1857.26,552.106], + [0,0,1] + ], + "distCoef": [-0.171511,0.209759,-1.83176e-05,-3.41566e-05,0.211418], + "R": [ + [0.9782876177,0.02697940456,0.2054883178], + [0.02691509764,0.9665557486,-0.2550403151], + [-0.2054967507,0.2550335204,0.9448433674] + ], + "t": [ + [-0.5131666478], + [123.4498457], + [311.6401591] + ] + }, + { + "name": "00_28", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 28, + "K": [ + [1395.57,0,953.143], + [0,1392.36,561.982], + [0,0,1] + ], + "distCoef": [-0.284934,0.181016,0.000127361,0.000271191,-0.0471616], + "R": [ + [-0.6310677524,-0.02949081954,-0.775166939], + [-0.5128354354,0.7656140117,0.3883748207], + [0.5820251782,0.6426238999,-0.4982782509] + ], + "t": [ + [-8.508070023], + [104.2896072], + [361.3816814] + ] + }, + { + "name": "00_29", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 29, + "K": [ + [1400.36,0,939.608], + [0,1397.25,572.603], + [0,0,1] + ], + "distCoef": [-0.286109,0.1878,-0.000309515,0.000886248,-0.0523515], + "R": [ + [0.4887300705,-0.07268882749,-0.8694016635], + [-0.08227020668,0.9882426049,-0.1288726774], + [0.8685473685,0.1345098073,0.4770037531] + ], + "t": [ + [-20.72850042], + [158.8912224], + [289.281465] + ] + }, + { + "name": "00_30", + "type": "hd", + "resolution": [1920,1080], + "panel": 0, + "node": 30, + "K": [ + [1407.21,0,946.883], + [0,1403.86,563.032], + [0,0,1] + ], + "distCoef": [-0.285813,0.195568,-0.000394067,0.000468367,-0.0600751], + "R": [ + [0.08635045426,0.06174190292,0.9943498059], + [0.2147800801,0.9734543185,-0.07909618832], + [-0.9728376618,0.2203965227,0.07079729175] + ], + "t": [ + [13.79078928], + [132.1300437], + [306.0754676] + ] + }, + { + "name": "50_01", + "type": "kinect-color", + "resolution": [1920,1080], + "panel": 50, + "node": 1, + "K": [ + [1053.92,0,947.294], + [0,1054.32,535.405], + [0,0,1] + ], + "distCoef": [0.0476403,-0.053786,0.000733314,-0.000579648,0.0122759], + "R": [ + [0.9095307192,0.0006254166507,-0.4156362348], + [-0.003349684277,0.999977422,-0.0058253781], + [0.4156232073,0.006690610494,0.9095122788] + ], + "t": [ + [-15.84850815], + [103.1392168], + [269.3362326] + ] + }, + { + "name": "50_02", + "type": "kinect-color", + "resolution": [1920,1080], + "panel": 50, + "node": 2, + "K": [ + [1058.92,0,971.224], + [0,1059.3,541.276], + [0,0,1] + ], + "distCoef": [0.0485216,-0.0529886,-0.000413578,-0.000171659,0.00909728], + "R": [ + [-0.08404700998,-0.006825065684,-0.9964384169], + [-0.04073006897,0.9991643735,-0.003408260769], + [0.9956290281,0.04029855131,-0.08425476347] + ], + "t": [ + [-4.246538185], + [93.69672118], + [271.0169727] + ] + }, + { + "name": "50_03", + "type": "kinect-color", + "resolution": [1920,1080], + "panel": 50, + "node": 3, + "K": [ + [1050.35,0,971.069], + [0,1050.88,535.343], + [0,0,1] + ], + "distCoef": [0.0482196,-0.0555053,0.000460862,0.000594278,0.0128034], + "R": [ + [-0.9791929995,-0.0009192386581,-0.2029291126], + [0.004325206908,0.9996680429,-0.02539875018], + [0.2028850964,-0.02574798878,-0.9788639736] + ], + "t": [ + [-10.71273011], + [112.0293664], + [269.2258843] + ] + }, + { + "name": "50_04", + "type": "kinect-color", + "resolution": [1920,1080], + "panel": 50, + "node": 4, + "K": [ + [1053.76,0,952.563], + [0,1053.62,535.073], + [0,0,1] + ], + "distCoef": [0.0534802,-0.059505,0.000265754,-0.00038559,0.0128987], + "R": [ + [-0.4973721867,-0.01252789009,0.8674468052], + [-0.05725964091,0.9981894693,-0.01841512904], + [-0.8656455634,-0.05882886558,-0.4971890215] + ], + "t": [ + [-12.12207689], + [119.639642], + [263.8142799] + ] + }, + { + "name": "50_05", + "type": "kinect-color", + "resolution": [1920,1080], + "panel": 50, + "node": 5, + "K": [ + [1061.53,0,963.346], + [0,1061.99,535.689], + [0,0,1] + ], + "distCoef": [0.0450742,-0.0483577,0.000117724,0.00131017,0.00746483], + "R": [ + [0.6332975321,0.02789684006,0.7734054578], + [-0.04440403331,0.9990136015,0.0003253688515], + [-0.772633495,-0.034548377,0.6339115806] + ], + "t": [ + [4.398197962], + [114.449943], + [269.0646085] + ] + }, + { + "name": "50_06", + "type": "kinect-color", + "resolution": [1920,1080], + "panel": 50, + "node": 6, + "K": [ + [1053.8,0,975.87], + [0,1054.44,518.546], + [0,0,1] + ], + "distCoef": [0.0608578,-0.0758877,0.000572907,0.000423304,0.0232485], + "R": [ + [0.9936973916,-0.01776547634,0.1106791841], + [0.08238304881,0.7853099766,-0.6135969963], + [-0.07601662453,0.6188478234,0.7818240495] + ], + "t": [ + [-23.36095562], + [58.01362542], + [350.0526212] + ] + }, + { + "name": "50_07", + "type": "kinect-color", + "resolution": [1920,1080], + "panel": 50, + "node": 7, + "K": [ + [1058.37,0,951.456], + [0,1058.06,537.752], + [0,0,1] + ], + "distCoef": [0.0510704,-0.0625189,-0.000144014,6.68608e-05,0.016463], + "R": [ + [0.4325769754,-0.03234243573,-0.9010167186], + [-0.4868424381,0.832758343,-0.2636247005], + [0.7588554545,0.5526911516,0.344486415] + ], + "t": [ + [-19.0385587], + [87.13576568], + [341.2560709] + ] + }, + { + "name": "50_08", + "type": "kinect-color", + "resolution": [1920,1080], + "panel": 50, + "node": 8, + "K": [ + [1051.92,0,937.937], + [0,1051.86,554.246], + [0,0,1] + ], + "distCoef": [0.0499863,-0.0613843,-4.12419e-05,-0.000155211,0.0174279], + "R": [ + [-0.7043873056,-0.07078753835,-0.7062773168], + [-0.4398115151,0.8245196459,0.3559960458], + [0.5571394394,0.5613879923,-0.6119143463] + ], + "t": [ + [-21.03532832], + [82.26745729], + [344.5100871] + ] + }, + { + "name": "50_09", + "type": "kinect-color", + "resolution": [1920,1080], + "panel": 50, + "node": 9, + "K": [ + [1054,0,961.563], + [0,1054.08,544.179], + [0,0,1] + ], + "distCoef": [0.0446773,-0.0530941,0.000226286,-0.000324258,0.0121913], + "R": [ + [-0.8728623151,-0.0989156561,0.4778358211], + [0.2068965126,0.8118396582,0.5459946908], + [-0.4419334927,0.5754407548,-0.6881589393] + ], + "t": [ + [-36.30074608], + [73.0041962], + [346.5857858] + ] + }, + { + "name": "50_10", + "type": "kinect-color", + "resolution": [1920,1080], + "panel": 50, + "node": 10, + "K": [ + [1050.04,0,941.59], + [0,1050.6,559.398], + [0,0,1] + ], + "distCoef": [0.0506861,-0.0636966,0.000195295,-6.41025e-06,0.0181857], + "R": [ + [0.1849149694,0.002001709126,0.9827524852], + [0.5894867579,0.7998990427,-0.1125472514], + [-0.786328059,0.6001312479,0.146733326] + ], + "t": [ + [-12.26435316], + [64.88453925], + [349.5293231] + ] + } + ] +} diff --git a/tests/data/posetrack18/annotations/test_posetrack18_human_detections.json b/tests/data/posetrack18/annotations/test_posetrack18_human_detections.json index fb1bcf3151..aa4a9c5300 100644 --- a/tests/data/posetrack18/annotations/test_posetrack18_human_detections.json +++ b/tests/data/posetrack18/annotations/test_posetrack18_human_detections.json @@ -1,3061 +1,3061 @@ -[ - { - "bbox": [ - 1475.2755126953125, - 2.719658136367798, - 96.9671630859375, - 252.88242316246033 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.9290200471878052 - }, - { - "bbox": [ - 279.2542419433594, - 201.43528747558594, - 215.51690673828125, - 277.4363555908203 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.8697755932807922 - }, - { - "bbox": [ - 375.3135070800781, - 1.6077430248260498, - 102.83343505859375, - 205.19831776618958 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.8078259229660034 - }, - { - "bbox": [ - 1372.4200439453125, - 0.0, - 105.89013671875, - 242.61294555664062 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.7359948754310608 - }, - { - "bbox": [ - 879.8322143554688, - 166.1944122314453, - 129.68414306640625, - 265.45030212402344 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.7012330293655396 - }, - { - "bbox": [ - 1565.218994140625, - 0.6250243186950684, - 94.249267578125, - 251.48860788345337 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.6708132028579712 - }, - { - "bbox": [ - 1625.5699462890625, - 34.00221633911133, - 113.07080078125, - 336.9929618835449 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.6564908027648926 - }, - { - "bbox": [ - 1767.4072265625, - 0.0, - 94.924560546875, - 229.85476684570312 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.6467881202697754 - }, - { - "bbox": [ - 956.6194458007812, - 900.006103515625, - 149.72381591796875, - 173.7783203125 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.6429733037948608 - }, - { - "bbox": [ - 574.7518310546875, - 876.6203002929688, - 133.7698974609375, - 200.78741455078125 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.6194133758544922 - }, - { - "bbox": [ - 467.8788146972656, - 776.9996948242188, - 108.48025512695312, - 287.51483154296875 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.608767032623291 - }, - { - "bbox": [ - 302.0422058105469, - 732.33837890625, - 124.57574462890625, - 331.01220703125 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.5625099539756775 - }, - { - "bbox": [ - 638.8469848632812, - 743.0866088867188, - 117.85137939453125, - 317.97259521484375 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.5567368268966675 - }, - { - "bbox": [ - 335.7384948730469, - 507.2187194824219, - 145.80545043945312, - 159.55679321289062 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.5184996724128723 - }, - { - "bbox": [ - 1330.8204345703125, - 838.9266357421875, - 140.44580078125, - 240.1510009765625 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.5148675441741943 - }, - { - "bbox": [ - 720.7056884765625, - 2.9743223190307617, - 104.3197021484375, - 150.11820697784424 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.5129923820495605 - }, - { - "bbox": [ - 196.63421630859375, - 693.4352416992188, - 119.49697875976562, - 362.00836181640625 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.472736656665802 - }, - { - "bbox": [ - 666.0804443359375, - 180.66146850585938, - 95.970458984375, - 213.87698364257812 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.4722053110599518 - }, - { - "bbox": [ - 876.128173828125, - 339.4115905761719, - 135.45379638671875, - 319.6487121582031 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.4647904336452484 - }, - { - "bbox": [ - 667.529296875, - 415.2683410644531, - 104.7076416015625, - 229.71560668945312 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.45972582697868347 - }, - { - "bbox": [ - 112.86947631835938, - 264.6505432128906, - 144.888671875, - 191.26544189453125 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.45595934987068176 - }, - { - "bbox": [ - 1701.4876708984375, - 0.0, - 90.152587890625, - 221.60284423828125 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.45339658856391907 - }, - { - "bbox": [ - 1177.0682373046875, - 808.5385131835938, - 118.4273681640625, - 265.73162841796875 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.4308188259601593 - }, - { - "bbox": [ - 1581.5089111328125, - 773.6590576171875, - 153.54052734375, - 289.6710205078125 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.4269048273563385 - }, - { - "bbox": [ - 531.0040893554688, - 437.7104187011719, - 127.3616943359375, - 280.2588806152344 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.42152199149131775 - }, - { - "bbox": [ - 1797.8150634765625, - 778.5232543945312, - 102.983642578125, - 292.46649169921875 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.411865234375 - }, - { - "bbox": [ - 1084.093505859375, - 2.85404109954834, - 93.6932373046875, - 210.73848819732666 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.40260007977485657 - }, - { - "bbox": [ - 920.5157470703125, - 832.7113037109375, - 94.4918212890625, - 221.5032958984375 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.3867260217666626 - }, - { - "bbox": [ - 1115.3507080078125, - 847.74365234375, - 109.4945068359375, - 226.804931640625 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.3844665586948395 - }, - { - "bbox": [ - 1872.486083984375, - 19.00360679626465, - 42.8349609375, - 236.63503456115723 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.37733739614486694 - }, - { - "bbox": [ - 1349.9853515625, - 210.24911499023438, - 131.93798828125, - 167.93081665039062 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.3761371970176697 - }, - { - "bbox": [ - 766.0445556640625, - 879.2682495117188, - 124.82427978515625, - 201.08441162109375 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.3682442009449005 - }, - { - "bbox": [ - 817.4657592773438, - 0.0, - 80.7606201171875, - 168.49359130859375 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.3530486524105072 - }, - { - "bbox": [ - 147.0262451171875, - 1.8125637769699097, - 79.67684936523438, - 99.51723968982697 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.31355297565460205 - }, - { - "bbox": [ - 1159.018310546875, - 750.4727172851562, - 109.84375, - 160.12939453125 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.3134245276451111 - }, - { - "bbox": [ - 201.1594696044922, - 625.8055419921875, - 77.64781188964844, - 134.331787109375 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.3070683181285858 - }, - { - "bbox": [ - 1473.18359375, - 651.7177124023438, - 82.4835205078125, - 130.7080078125 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.30168840289115906 - }, - { - "bbox": [ - 932.6547241210938, - 0.0, - 94.53363037109375, - 160.51365661621094 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.3008910119533539 - }, - { - "bbox": [ - 1700.9190673828125, - 828.179931640625, - 121.2147216796875, - 245.9788818359375 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.29163801670074463 - }, - { - "bbox": [ - 1634.7724609375, - 446.2858581542969, - 132.4085693359375, - 209.66311645507812 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.291547566652298 - }, - { - "bbox": [ - 1556.4608154296875, - 473.771728515625, - 112.165283203125, - 180.64654541015625 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.2879399359226227 - }, - { - "bbox": [ - 583.9107055664062, - 1.929314374923706, - 73.5870361328125, - 123.53908467292786 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.28340914845466614 - }, - { - "bbox": [ - 1498.50634765625, - 698.7794799804688, - 96.718505859375, - 314.76446533203125 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.28129440546035767 - }, - { - "bbox": [ - 1280.0792236328125, - 775.8158569335938, - 76.7454833984375, - 188.51519775390625 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.27848634123802185 - }, - { - "bbox": [ - 1718.6058349609375, - 226.6940460205078, - 160.0238037109375, - 177.1758575439453 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.27552416920661926 - }, - { - "bbox": [ - 756.9520263671875, - 810.5991821289062, - 83.45086669921875, - 189.677001953125 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.27519550919532776 - }, - { - "bbox": [ - 1728.245849609375, - 640.5650024414062, - 117.093994140625, - 210.5716552734375 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.272867351770401 - }, - { - "bbox": [ - 1772.5546875, - 525.9481201171875, - 132.1446533203125, - 174.74395751953125 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.2701846957206726 - }, - { - "bbox": [ - 1305.05224609375, - 209.34393310546875, - 184.050048828125, - 414.58587646484375 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.26895296573638916 - }, - { - "bbox": [ - 810.69287109375, - 790.5480346679688, - 89.7996826171875, - 185.0943603515625 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.26855093240737915 - }, - { - "bbox": [ - 95.97314453125, - 724.7075805664062, - 114.75672912597656, - 298.14398193359375 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.26742294430732727 - }, - { - "bbox": [ - 1261.4110107421875, - 909.4841918945312, - 118.9820556640625, - 164.47723388671875 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.2666778564453125 - }, - { - "bbox": [ - 1339.5250244140625, - 434.0279846191406, - 87.82666015625, - 147.42294311523438 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.26228952407836914 - }, - { - "bbox": [ - 63.43070983886719, - 664.1151733398438, - 82.15074157714844, - 128.1494140625 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.26013079285621643 - }, - { - "bbox": [ - 1.3776787519454956, - 679.18505859375, - 111.62459480762482, - 224.9747314453125 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.2587812840938568 - }, - { - "bbox": [ - 1439.8868408203125, - 816.7938842773438, - 97.72802734375, - 256.11944580078125 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.256550669670105 - }, - { - "bbox": [ - 660.9515380859375, - 744.8563842773438, - 94.61444091796875, - 115.916259765625 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.2563660442829132 - }, - { - "bbox": [ - 556.6321411132812, - 0.0, - 31.12762451171875, - 77.6491470336914 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.2539074718952179 - }, - { - "bbox": [ - 414.3009948730469, - 682.0269165039062, - 92.76937866210938, - 310.0914306640625 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.25366705656051636 - }, - { - "bbox": [ - 1823.6094970703125, - 520.3126831054688, - 74.411865234375, - 80.507080078125 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.2529422640800476 - }, - { - "bbox": [ - 258.0948486328125, - 2.8098771572113037, - 73.0369873046875, - 90.99600052833557 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.25058287382125854 - }, - { - "bbox": [ - 508.9549560546875, - 714.0374145507812, - 132.6729736328125, - 206.59674072265625 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.24579626321792603 - }, - { - "bbox": [ - 1647.6907958984375, - 387.5267639160156, - 117.0858154296875, - 134.33120727539062 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.2425207644701004 - }, - { - "bbox": [ - 1445.354248046875, - 761.0438842773438, - 91.1209716796875, - 122.70550537109375 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.2403791844844818 - }, - { - "bbox": [ - 1028.0394287109375, - 751.615478515625, - 101.6038818359375, - 172.39617919921875 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.23425403237342834 - }, - { - "bbox": [ - 10.321240425109863, - 668.003173828125, - 92.43458843231201, - 93.92236328125 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.23368315398693085 - }, - { - "bbox": [ - 480.19140625, - 3.0881388187408447, - 101.267578125, - 78.71852469444275 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.2329442799091339 - }, - { - "bbox": [ - 1319.99755859375, - 813.53125, - 58.90185546875, - 112.30328369140625 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.23115667700767517 - }, - { - "bbox": [ - 0.0, - 628.298828125, - 47.96708679199219, - 120.50457763671875 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.2270287126302719 - }, - { - "bbox": [ - 298.7027893066406, - 666.9664306640625, - 119.76385498046875, - 144.8203125 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.2235877364873886 - }, - { - "bbox": [ - 1054.49609375, - 1.8778526782989502, - 65.3221435546875, - 154.7142035961151 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.22313834726810455 - }, - { - "bbox": [ - 296.7391052246094, - 680.0767822265625, - 35.053375244140625, - 69.30267333984375 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.21813228726387024 - }, - { - "bbox": [ - 1811.36962890625, - 285.1565246582031, - 102.1195068359375, - 269.7958679199219 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.21760663390159607 - }, - { - "bbox": [ - 114.75823974609375, - 719.09228515625, - 74.72804260253906, - 83.634765625 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.2161155790090561 - }, - { - "bbox": [ - 991.546875, - 1.210024356842041, - 59.4659423828125, - 152.63245916366577 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.2096937894821167 - }, - { - "bbox": [ - 1852.13916015625, - 519.2532958984375, - 38.265380859375, - 43.08807373046875 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.18011623620986938 - }, - { - "bbox": [ - 316.677978515625, - 0.0, - 44.184600830078125, - 62.04084396362305 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.17839768528938293 - }, - { - "bbox": [ - 1023.7964477539062, - 0.0, - 45.53558349609375, - 87.68540954589844 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.1771439015865326 - }, - { - "bbox": [ - 0.0, - 690.8153076171875, - 27.172204971313477, - 55.42034912109375 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.17463117837905884 - }, - { - "bbox": [ - 1663.4932861328125, - 4.420060634613037, - 65.2760009765625, - 114.99270486831665 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.1590556651353836 - }, - { - "bbox": [ - 1578.5491943359375, - 454.1618347167969, - 74.5714111328125, - 104.37033081054688 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.15501607954502106 - }, - { - "bbox": [ - 544.5846557617188, - 697.2288208007812, - 35.70989990234375, - 26.73150634765625 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.15327082574367523 - }, - { - "bbox": [ - 534.465087890625, - 881.8455200195312, - 78.7249755859375, - 172.04473876953125 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.14815860986709595 - }, - { - "bbox": [ - 1873.2293701171875, - 834.9508056640625, - 45.2706298828125, - 230.974609375 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.1479007452726364 - }, - { - "bbox": [ - 146.6645965576172, - 723.4815673828125, - 30.512222290039062, - 41.179443359375 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.13243095576763153 - }, - { - "bbox": [ - 740.52490234375, - 10.856040000915527, - 38.1209716796875, - 77.29609775543213 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.1309206336736679 - }, - { - "bbox": [ - 1783.414794921875, - 856.5660400390625, - 51.0806884765625, - 216.032958984375 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.13079363107681274 - }, - { - "bbox": [ - 1353.722900390625, - 4.124818801879883, - 26.04736328125, - 36.974050521850586 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.12728439271450043 - }, - { - "bbox": [ - 1423.4942626953125, - 875.3924560546875, - 16.2568359375, - 29.398681640625 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.1250089704990387 - }, - { - "bbox": [ - 1592.7584228515625, - 1.329086184501648, - 55.0660400390625, - 54.82293713092804 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.11483781039714813 - }, - { - "bbox": [ - 1385.247314453125, - 7.618640422821045, - 19.5557861328125, - 37.21356248855591 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.11478649824857712 - }, - { - "bbox": [ - 774.5552978515625, - 0.0, - 32.50115966796875, - 48.10002899169922 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.11244752258062363 - }, - { - "bbox": [ - 1030.501953125, - 792.454833984375, - 44.9681396484375, - 111.78228759765625 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.10898905247449875 - }, - { - "bbox": [ - 302.1847229003906, - 695.43701171875, - 20.343109130859375, - 28.063720703125 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.10741319507360458 - }, - { - "bbox": [ - 1729.3040771484375, - 2.0999855995178223, - 26.806884765625, - 36.02122259140015 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.10721274465322495 - }, - { - "bbox": [ - 1762.438720703125, - 4.751255989074707, - 24.288818359375, - 40.14107036590576 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.10624366253614426 - }, - { - "bbox": [ - 211.49954223632812, - 328.7121887207031, - 56.994140625, - 60.76922607421875 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.10590028017759323 - }, - { - "bbox": [ - 1792.0831298828125, - 261.65728759765625, - 92.417236328125, - 84.54769897460938 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.10410129278898239 - }, - { - "bbox": [ - 1547.43359375, - 4.291971683502197, - 28.6832275390625, - 69.40435552597046 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.10200422257184982 - }, - { - "bbox": [ - 1335.0888671875, - 3.258249282836914, - 23.91845703125, - 32.369855880737305 - ], - "category_id": 1, - "image_id": 10128340000, - "score": 0.10069120675325394 - }, - { - "bbox": [ - 1283.4007568359375, - 6.713701248168945, - 629.122802734375, - 1056.8606395721436 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.9853803515434265 - }, - { - "bbox": [ - 288.9501647949219, - 42.40924835205078, - 1185.7618713378906, - 999.2054977416992 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.9629650115966797 - }, - { - "bbox": [ - 649.4730834960938, - 315.6942138671875, - 143.35650634765625, - 229.676513671875 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.8901010751724243 - }, - { - "bbox": [ - 1058.3331298828125, - 258.07269287109375, - 310.98046875, - 259.15057373046875 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.8752242922782898 - }, - { - "bbox": [ - 790.96240234375, - 182.09800720214844, - 105.51129150390625, - 97.01622009277344 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.872738242149353 - }, - { - "bbox": [ - 777.576416015625, - 274.9346618652344, - 119.44439697265625, - 178.85000610351562 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.8679455518722534 - }, - { - "bbox": [ - 2.3131344318389893, - 412.2568054199219, - 273.67606234550476, - 235.93026733398438 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.8616952300071716 - }, - { - "bbox": [ - 8.783040046691895, - 198.89437866210938, - 196.3238935470581, - 266.4853515625 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.8512702584266663 - }, - { - "bbox": [ - 220.74649047851562, - 94.02008056640625, - 98.13226318359375, - 124.78965759277344 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.7501042485237122 - }, - { - "bbox": [ - 164.27354431152344, - 83.04096984863281, - 88.21920776367188, - 127.46699523925781 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.7067092061042786 - }, - { - "bbox": [ - 1087.515625, - 181.69656372070312, - 87.4686279296875, - 72.61752319335938 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.702244758605957 - }, - { - "bbox": [ - 1074.9063720703125, - 472.5963439941406, - 124.1480712890625, - 110.47763061523438 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.628270149230957 - }, - { - "bbox": [ - 343.7706604003906, - 30.924612045288086, - 59.412750244140625, - 86.91977119445801 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.5943357944488525 - }, - { - "bbox": [ - 69.42112731933594, - 103.34648132324219, - 112.67413330078125, - 108.37942504882812 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.5710238218307495 - }, - { - "bbox": [ - 79.45482635498047, - 437.8648376464844, - 270.02677154541016, - 180.55715942382812 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.40784332156181335 - }, - { - "bbox": [ - 1225.6717529296875, - 162.2100830078125, - 78.9639892578125, - 132.47430419921875 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.3427259922027588 - }, - { - "bbox": [ - 0.9485000371932983, - 54.5380973815918, - 92.79364931583405, - 115.03351211547852 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.33483877778053284 - }, - { - "bbox": [ - 1105.8240966796875, - 281.7027282714844, - 76.47314453125, - 55.8577880859375 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.3022329807281494 - }, - { - "bbox": [ - 0.0, - 258.510498046875, - 85.2731704711914, - 205.99591064453125 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.277988463640213 - }, - { - "bbox": [ - 1069.812255859375, - 430.1299133300781, - 178.785888671875, - 54.991607666015625 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.25947925448417664 - }, - { - "bbox": [ - 681.9738159179688, - 208.11050415039062, - 87.06488037109375, - 76.40863037109375 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.2577346861362457 - }, - { - "bbox": [ - 684.65625, - 209.45753479003906, - 65.76763916015625, - 48.37471008300781 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.25362637639045715 - }, - { - "bbox": [ - 1770.093017578125, - 45.35274887084961, - 148.260986328125, - 1012.7648048400879 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.23887047171592712 - }, - { - "bbox": [ - 167.9042510986328, - 22.85419273376465, - 81.45010375976562, - 74.9856128692627 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.23093517124652863 - }, - { - "bbox": [ - 686.263671875, - 45.065853118896484, - 418.443603515625, - 672.8133583068848 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.22159330546855927 - }, - { - "bbox": [ - 1190.727783203125, - 260.0331115722656, - 45.408203125, - 42.90838623046875 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.2191120684146881 - }, - { - "bbox": [ - 1051.7967529296875, - 212.4822998046875, - 37.3897705078125, - 71.61709594726562 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.13527318835258484 - }, - { - "bbox": [ - 906.1925659179688, - 454.3064880371094, - 249.45501708984375, - 209.19338989257812 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.13330410420894623 - }, - { - "bbox": [ - 852.9170532226562, - 360.49078369140625, - 25.87530517578125, - 70.86614990234375 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.15234917402267456 - }, - { - "bbox": [ - 609.119140625, - 295.8336181640625, - 98.669677734375, - 86.77999877929688 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.8445025086402893 - }, - { - "bbox": [ - 378.2210693359375, - 156.46856689453125, - 79.51510620117188, - 59.65052795410156 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.7748774886131287 - }, - { - "bbox": [ - 198.08822631835938, - 305.9843444824219, - 122.8443603515625, - 100.4822998046875 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.7065314054489136 - }, - { - "bbox": [ - 135.3995819091797, - 208.8668670654297, - 82.15673828125, - 32.42308044433594 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.6814215779304504 - }, - { - "bbox": [ - 535.6635131835938, - 300.5378112792969, - 94.14208984375, - 83.1962890625 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.6654942035675049 - }, - { - "bbox": [ - 483.58563232421875, - 197.45590209960938, - 74.43743896484375, - 57.176239013671875 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.6608478426933289 - }, - { - "bbox": [ - 215.0618896484375, - 210.8956756591797, - 69.7735595703125, - 29.752822875976562 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.6438001394271851 - }, - { - "bbox": [ - 166.78993225097656, - 260.73162841796875, - 81.71955871582031, - 33.886688232421875 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.6426426768302917 - }, - { - "bbox": [ - 194.13543701171875, - 302.4077453613281, - 132.185302734375, - 203.56118774414062 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.64094477891922 - }, - { - "bbox": [ - 24.686168670654297, - 160.48495483398438, - 65.35156631469727, - 43.957122802734375 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.6141790747642517 - }, - { - "bbox": [ - 61.93497848510742, - 206.81692504882812, - 67.95804214477539, - 35.73725891113281 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.6034325361251831 - }, - { - "bbox": [ - 684.8605346679688, - 296.6944274902344, - 60.11041259765625, - 79.523681640625 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.5703861713409424 - }, - { - "bbox": [ - 277.9051818847656, - 118.02881622314453, - 75.3424072265625, - 74.72411346435547 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.5354023575782776 - }, - { - "bbox": [ - 557.520751953125, - 208.25003051757812, - 63.16949462890625, - 47.47157287597656 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.5207008719444275 - }, - { - "bbox": [ - 389.46875, - 260.3998718261719, - 95.03842163085938, - 28.859283447265625 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.5194308757781982 - }, - { - "bbox": [ - 246.87026977539062, - 258.12652587890625, - 83.399658203125, - 36.68548583984375 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.47507211565971375 - }, - { - "bbox": [ - 230.82713317871094, - 51.341026306152344, - 59.52711486816406, - 42.373046875 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.4719221889972687 - }, - { - "bbox": [ - 371.5136413574219, - 302.7303771972656, - 84.49050903320312, - 68.41122436523438 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.44887304306030273 - }, - { - "bbox": [ - 449.14666748046875, - 303.34552001953125, - 95.31640625, - 48.94390869140625 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.42651283740997314 - }, - { - "bbox": [ - 59.20182800292969, - 77.63203430175781, - 69.07972717285156, - 36.52244567871094 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.42590340971946716 - }, - { - "bbox": [ - 370.47991943359375, - 210.2904510498047, - 66.41464233398438, - 33.1710205078125 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.4237402677536011 - }, - { - "bbox": [ - 475.22509765625, - 124.54940032958984, - 57.011474609375, - 40.61431121826172 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.3908300995826721 - }, - { - "bbox": [ - 467.0397033691406, - 66.16106414794922, - 47.917999267578125, - 27.583763122558594 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.38647398352622986 - }, - { - "bbox": [ - 288.4964904785156, - 305.16815185546875, - 99.31219482421875, - 87.7886962890625 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.3735053241252899 - }, - { - "bbox": [ - 444.114990234375, - 90.43252563476562, - 51.553955078125, - 31.16741943359375 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.37254029512405396 - }, - { - "bbox": [ - 99.98625183105469, - 40.55061340332031, - 76.22004699707031, - 65.01245880126953 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.3680468797683716 - }, - { - "bbox": [ - 294.51318359375, - 54.41352844238281, - 54.0465087890625, - 41.265953063964844 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.3454741835594177 - }, - { - "bbox": [ - 264.3034362792969, - 83.36378479003906, - 58.63067626953125, - 45.3909912109375 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.33034616708755493 - }, - { - "bbox": [ - 875.2257690429688, - 294.2908020019531, - 63.034912109375, - 73.73040771484375 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.31166598200798035 - }, - { - "bbox": [ - 552.3424072265625, - 102.28469848632812, - 53.5325927734375, - 32.012359619140625 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.31135886907577515 - }, - { - "bbox": [ - 447.3630065917969, - 159.95870971679688, - 75.57168579101562, - 79.81913757324219 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.3080102503299713 - }, - { - "bbox": [ - 744.2843627929688, - 170.82386779785156, - 48.20263671875, - 32.58000183105469 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.3024618923664093 - }, - { - "bbox": [ - 518.8668823242188, - 173.53623962402344, - 57.2681884765625, - 28.869842529296875 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.28725939989089966 - }, - { - "bbox": [ - 578.883056640625, - 242.28355407714844, - 105.27862548828125, - 45.62568664550781 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.2870064973831177 - }, - { - "bbox": [ - 620.3238525390625, - 214.0165557861328, - 57.0029296875, - 29.954849243164062 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.27958208322525024 - }, - { - "bbox": [ - 346.06988525390625, - 128.56320190429688, - 70.56277465820312, - 74.94837951660156 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.2788334786891937 - }, - { - "bbox": [ - 414.5040588378906, - 125.69651794433594, - 59.56060791015625, - 34.760101318359375 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.27825745940208435 - }, - { - "bbox": [ - 345.8397216796875, - 258.0870056152344, - 194.8671875, - 35.27862548828125 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.2586188018321991 - }, - { - "bbox": [ - 687.569091796875, - 163.837158203125, - 51.50909423828125, - 39.52703857421875 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.24999305605888367 - }, - { - "bbox": [ - 625.0399780273438, - 392.7872314453125, - 67.018310546875, - 72.13482666015625 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.2429981678724289 - }, - { - "bbox": [ - 498.5255432128906, - 99.42186737060547, - 53.512054443359375, - 31.006126403808594 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.24067141115665436 - }, - { - "bbox": [ - 142.8480224609375, - 309.98309326171875, - 82.30924987792969, - 98.9852294921875 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.23763252794742584 - }, - { - "bbox": [ - 536.9259643554688, - 133.77972412109375, - 53.9805908203125, - 43.579833984375 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.2375190556049347 - }, - { - "bbox": [ - 885.564453125, - 239.24940490722656, - 57.38165283203125, - 37.30012512207031 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.23535390198230743 - }, - { - "bbox": [ - 395.301513671875, - 92.57003784179688, - 47.01910400390625, - 38.36552429199219 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.23471194505691528 - }, - { - "bbox": [ - 409.6800537109375, - 60.70526123046875, - 51.487091064453125, - 32.35259246826172 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.21594807505607605 - }, - { - "bbox": [ - 590.739013671875, - 132.8422393798828, - 55.618408203125, - 34.99034118652344 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.21444948017597198 - }, - { - "bbox": [ - 142.70018005371094, - 14.566540718078613, - 56.78106689453125, - 33.07197093963623 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.2036537081003189 - }, - { - "bbox": [ - 320.72296142578125, - 194.36314392089844, - 42.888824462890625, - 34.97528076171875 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.20269575715065002 - }, - { - "bbox": [ - 479.15374755859375, - 264.8033142089844, - 71.17230224609375, - 25.205291748046875 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.1989617943763733 - }, - { - "bbox": [ - 0.3339415192604065, - 187.03533935546875, - 50.64700025320053, - 20.45751953125 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.19690930843353271 - }, - { - "bbox": [ - 74.00901794433594, - 105.07601165771484, - 66.710693359375, - 56.327720642089844 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.19045573472976685 - }, - { - "bbox": [ - 347.0372314453125, - 259.55914306640625, - 53.66485595703125, - 32.394195556640625 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.18698135018348694 - }, - { - "bbox": [ - 67.07357025146484, - 9.42569351196289, - 74.41902923583984, - 62.75996780395508 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.1855248659849167 - }, - { - "bbox": [ - 893.28857421875, - 213.1145782470703, - 46.3870849609375, - 34.87232971191406 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.17870844900608063 - }, - { - "bbox": [ - 611.6231079101562, - 106.5094223022461, - 44.85430908203125, - 29.061744689941406 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.17700931429862976 - }, - { - "bbox": [ - 847.1093139648438, - 286.3870849609375, - 56.32452392578125, - 86.06158447265625 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.16932892799377441 - }, - { - "bbox": [ - 445.4731140136719, - 97.76200103759766, - 49.56451416015625, - 45.203514099121094 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.16094166040420532 - }, - { - "bbox": [ - 83.2696304321289, - 238.672607421875, - 87.30387115478516, - 59.288787841796875 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.1571291834115982 - }, - { - "bbox": [ - 644.8650512695312, - 134.5099639892578, - 52.570556640625, - 45.77696228027344 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.14659520983695984 - }, - { - "bbox": [ - 798.9510498046875, - 176.64842224121094, - 34.15826416015625, - 27.026199340820312 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.14340169727802277 - }, - { - "bbox": [ - 289.8072204589844, - 2.8699655532836914, - 57.560302734375, - 31.036349296569824 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.12905792891979218 - }, - { - "bbox": [ - 273.2252502441406, - 120.26922607421875, - 33.325103759765625, - 36.83570861816406 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.12813062965869904 - }, - { - "bbox": [ - 536.1267700195312, - 301.2402038574219, - 105.0225830078125, - 164.69992065429688 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.1251327097415924 - }, - { - "bbox": [ - 577.738037109375, - 167.33460998535156, - 52.75921630859375, - 43.77146911621094 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.1169745996594429 - }, - { - "bbox": [ - 10.653980255126953, - 1.5155118703842163, - 64.12058639526367, - 63.142767548561096 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.11120772361755371 - }, - { - "bbox": [ - 290.7361145019531, - 305.92962646484375, - 81.94302368164062, - 186.35324096679688 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.10804451256990433 - }, - { - "bbox": [ - 383.0464172363281, - 33.47468948364258, - 42.016937255859375, - 40.26395034790039 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.10608372837305069 - }, - { - "bbox": [ - 373.3436279296875, - 299.032470703125, - 162.34857177734375, - 71.123291015625 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.10598088800907135 - }, - { - "bbox": [ - 347.5797424316406, - 7.471529960632324, - 51.544647216796875, - 25.57726001739502 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.10507849603891373 - }, - { - "bbox": [ - 9.35350513458252, - 944.8892211914062, - 1300.14759349823, - 121.89459228515625 - ], - "category_id": 1, - "image_id": 10034180000, - "score": 0.21530765295028687 - }, - { - "bbox": [ - 639.7239379882812, - 226.8717498779297, - 344.6689453125, - 663.6336212158203 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.988675594329834 - }, - { - "bbox": [ - 6.2749924659729, - 351.6357116699219, - 243.3602614402771, - 364.3725280761719 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.956828773021698 - }, - { - "bbox": [ - 461.7480163574219, - 277.44110107421875, - 115.16329956054688, - 186.4822998046875 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.9538608193397522 - }, - { - "bbox": [ - 1768.55322265625, - 245.51446533203125, - 138.985595703125, - 304.20843505859375 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.9133968949317932 - }, - { - "bbox": [ - 1155.5684814453125, - 359.0439453125, - 191.2630615234375, - 272.81744384765625 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.9098905920982361 - }, - { - "bbox": [ - 1259.7314453125, - 366.961181640625, - 90.6544189453125, - 138.16278076171875 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.7968080043792725 - }, - { - "bbox": [ - 480.37066650390625, - 386.0138854980469, - 150.568115234375, - 280.1358337402344 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.7637147307395935 - }, - { - "bbox": [ - 263.7475280761719, - 188.89967346191406, - 90.03085327148438, - 113.91123962402344 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.7468248605728149 - }, - { - "bbox": [ - 162.36859130859375, - 187.40757751464844, - 105.68603515625, - 143.9015655517578 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.7130147814750671 - }, - { - "bbox": [ - 139.2628936767578, - 291.9899597167969, - 106.13040161132812, - 205.92654418945312 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.7115177512168884 - }, - { - "bbox": [ - 1365.2760009765625, - 246.45489501953125, - 66.708984375, - 145.35330200195312 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.6987277865409851 - }, - { - "bbox": [ - 1486.121337890625, - 449.1069641113281, - 68.625732421875, - 118.49978637695312 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.6513593792915344 - }, - { - "bbox": [ - 1354.540771484375, - 443.40478515625, - 147.19580078125, - 194.12603759765625 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.6448480486869812 - }, - { - "bbox": [ - 1363.81591796875, - 373.9744567871094, - 81.1202392578125, - 102.91085815429688 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.5243184566497803 - }, - { - "bbox": [ - 1514.0146484375, - 319.5240783691406, - 75.83056640625, - 144.65200805664062 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.504604697227478 - }, - { - "bbox": [ - 355.92431640625, - 377.6044921875, - 114.5035400390625, - 120.37677001953125 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.4970506429672241 - }, - { - "bbox": [ - 1582.33203125, - 266.6174621582031, - 98.7462158203125, - 264.5225524902344 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.48399269580841064 - }, - { - "bbox": [ - 353.9928283691406, - 371.8907470703125, - 121.08633422851562, - 262.55682373046875 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.4818037748336792 - }, - { - "bbox": [ - 362.9367370605469, - 147.3871612548828, - 75.418212890625, - 109.99433898925781 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.4351760447025299 - }, - { - "bbox": [ - 1241.2064208984375, - 368.8930969238281, - 127.748291015625, - 264.2134704589844 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.38909056782722473 - }, - { - "bbox": [ - 1681.270263671875, - 256.126220703125, - 83.576416015625, - 137.42578125 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.364656925201416 - }, - { - "bbox": [ - 0.0, - 167.76327514648438, - 91.63196563720703, - 236.555419921875 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.35032832622528076 - }, - { - "bbox": [ - 1439.95703125, - 270.9534606933594, - 100.35986328125, - 218.63064575195312 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.346635103225708 - }, - { - "bbox": [ - 1318.2305908203125, - 424.5197448730469, - 115.10791015625, - 192.50259399414062 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.3269309401512146 - }, - { - "bbox": [ - 1052.64013671875, - 287.7257385253906, - 63.3641357421875, - 172.54461669921875 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.24086904525756836 - }, - { - "bbox": [ - 1053.502197265625, - 331.1842346191406, - 227.3038330078125, - 310.5895080566406 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.21309363842010498 - }, - { - "bbox": [ - 1070.9603271484375, - 360.4552917480469, - 96.628173828125, - 133.9866943359375 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.18517304956912994 - }, - { - "bbox": [ - 1665.9293212890625, - 255.31796264648438, - 146.314697265625, - 291.3702697753906 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.17204511165618896 - }, - { - "bbox": [ - 405.0735778808594, - 386.8234558105469, - 190.69692993164062, - 313.5556945800781 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.15523910522460938 - }, - { - "bbox": [ - 1589.0211181640625, - 265.5631103515625, - 84.9398193359375, - 150.40841674804688 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.15313847362995148 - }, - { - "bbox": [ - 0.9758958220481873, - 422.1836853027344, - 142.32709795236588, - 306.2699279785156 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.146592915058136 - }, - { - "bbox": [ - 1419.790283203125, - 240.48899841308594, - 55.875, - 102.48948669433594 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.14388331770896912 - }, - { - "bbox": [ - 1142.052001953125, - 372.945068359375, - 375.743896484375, - 263.99609375 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.1362028419971466 - }, - { - "bbox": [ - 1149.924560546875, - 228.89898681640625, - 77.2176513671875, - 141.24282836914062 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.13104568421840668 - }, - { - "bbox": [ - 7.145267009735107, - 362.8689270019531, - 148.28553438186646, - 151.63449096679688 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.130157008767128 - }, - { - "bbox": [ - 1115.1795654296875, - 359.9970703125, - 55.0574951171875, - 73.02313232421875 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.1132773831486702 - }, - { - "bbox": [ - 1797.716552734375, - 246.42071533203125, - 108.528076171875, - 179.66299438476562 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.10333290696144104 - }, - { - "bbox": [ - 1281.1473388671875, - 254.05291748046875, - 95.2158203125, - 128.24417114257812 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.09135308116674423 - }, - { - "bbox": [ - 483.60968017578125, - 383.16656494140625, - 106.47314453125, - 105.37130737304688 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.08747227489948273 - }, - { - "bbox": [ - 1183.970458984375, - 248.7894744873047, - 123.838623046875, - 133.18003845214844 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.07844730466604233 - }, - { - "bbox": [ - 1157.6649169921875, - 358.5057678222656, - 153.3060302734375, - 142.8681640625 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.07668760418891907 - }, - { - "bbox": [ - 158.5989532470703, - 3.899838924407959, - 94.29812622070312, - 113.55939722061157 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.0562337264418602 - }, - { - "bbox": [ - 1046.19189453125, - 303.1739196777344, - 146.7403564453125, - 295.9938049316406 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.05225243791937828 - }, - { - "bbox": [ - 1075.177490234375, - 351.35552978515625, - 187.2501220703125, - 145.95687866210938 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.052039798349142075 - }, - { - "bbox": [ - 4.226436614990234, - 596.753662109375, - 145.0108528137207, - 141.51971435546875 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.44805338978767395 - }, - { - "bbox": [ - 1471.1275634765625, - 546.7749633789062, - 409.1026611328125, - 85.891845703125 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.17510481178760529 - }, - { - "bbox": [ - 9.595407485961914, - 136.05421447753906, - 273.3134059906006, - 50.703155517578125 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.14366888999938965 - }, - { - "bbox": [ - 921.6530151367188, - 497.646484375, - 100.19329833984375, - 244.272216796875 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.41841089725494385 - }, - { - "bbox": [ - 1837.094482421875, - 311.22064208984375, - 30.9761962890625, - 48.001678466796875 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.08423541486263275 - }, - { - "bbox": [ - 1839.4462890625, - 311.10064697265625, - 37.092529296875, - 71.60287475585938 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.060598306357860565 - }, - { - "bbox": [ - 332.7347412109375, - 440.8306579589844, - 26.84356689453125, - 49.14508056640625 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.4217357635498047 - }, - { - "bbox": [ - 1074.7474365234375, - 455.2643127441406, - 38.0753173828125, - 24.68829345703125 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.10941091924905777 - }, - { - "bbox": [ - 1034.816162109375, - 433.4083251953125, - 37.64892578125, - 38.33526611328125 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.05890995264053345 - }, - { - "bbox": [ - 1133.7620849609375, - 508.0845642089844, - 70.1640625, - 130.23025512695312 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.4846752882003784 - }, - { - "bbox": [ - 3.005446195602417, - 553.9013671875, - 142.2049114704132, - 183.9932861328125 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.3487741947174072 - }, - { - "bbox": [ - 272.37786865234375, - 411.44207763671875, - 81.43817138671875, - 55.8065185546875 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.21865300834178925 - }, - { - "bbox": [ - 0.24188603460788727, - 453.31536865234375, - 148.4058190435171, - 234.45562744140625 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.20409443974494934 - }, - { - "bbox": [ - 30.815982818603516, - 605.8007202148438, - 125.22259140014648, - 55.677490234375 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.1920752078294754 - }, - { - "bbox": [ - 1133.945556640625, - 486.0797424316406, - 142.3828125, - 149.95669555664062 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.1483757048845291 - }, - { - "bbox": [ - 1113.4261474609375, - 458.69744873046875, - 106.506103515625, - 181.26995849609375 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.13916584849357605 - }, - { - "bbox": [ - 436.55487060546875, - 457.7103576660156, - 156.08184814453125, - 253.82962036132812 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.11243928223848343 - }, - { - "bbox": [ - 284.70098876953125, - 303.1107482910156, - 85.747802734375, - 79.47280883789062 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.09735624492168427 - }, - { - "bbox": [ - 341.12774658203125, - 492.6709289550781, - 93.78155517578125, - 206.94662475585938 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.08169478923082352 - }, - { - "bbox": [ - 0.0, - 549.1785278320312, - 119.3069839477539, - 111.58819580078125 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.06240745261311531 - }, - { - "bbox": [ - 7.89318323135376, - 645.31689453125, - 136.12907934188843, - 87.29620361328125 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.054731015115976334 - }, - { - "bbox": [ - 213.0941619873047, - 411.15179443359375, - 42.60209655761719, - 32.3763427734375 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.05244819447398186 - }, - { - "bbox": [ - 130.32546997070312, - 487.7962951660156, - 300.62261962890625, - 236.79757690429688 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.8464512825012207 - }, - { - "bbox": [ - 943.7500610351562, - 463.9021911621094, - 207.76824951171875, - 177.45816040039062 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.6438319087028503 - }, - { - "bbox": [ - 126.81778717041016, - 458.4678955078125, - 290.0162582397461, - 57.33453369140625 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.1522291600704193 - }, - { - "bbox": [ - 936.41162109375, - 449.2172546386719, - 192.6654052734375, - 35.686859130859375 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.14834310114383698 - }, - { - "bbox": [ - 887.5518798828125, - 464.12335205078125, - 357.5780029296875, - 257.0013427734375 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.1072755679488182 - }, - { - "bbox": [ - 7.024689197540283, - 405.5135498046875, - 338.4464716911316, - 278.21929931640625 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.09115041047334671 - }, - { - "bbox": [ - 161.556884765625, - 482.5937805175781, - 242.09451293945312, - 30.944366455078125 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.06512065976858139 - }, - { - "bbox": [ - 933.17236328125, - 423.4557800292969, - 280.425048828125, - 68.74118041992188 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.06368233263492584 - }, - { - "bbox": [ - 121.80384063720703, - 488.6224060058594, - 311.62242889404297, - 45.982086181640625 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.05987454578280449 - }, - { - "bbox": [ - 929.5904541015625, - 436.67840576171875, - 381.5384521484375, - 75.44134521484375 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.05840197578072548 - }, - { - "bbox": [ - 235.27882385253906, - 486.3999328613281, - 136.96372985839844, - 23.562835693359375 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.05732391029596329 - }, - { - "bbox": [ - 69.67058563232422, - 417.2310485839844, - 672.0211868286133, - 311.6492614746094 - ], - "category_id": 1, - "image_id": 10094730000, - "score": 0.05091623216867447 - } - -] +[ + { + "bbox": [ + 1475.2755126953125, + 2.719658136367798, + 96.9671630859375, + 252.88242316246033 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.9290200471878052 + }, + { + "bbox": [ + 279.2542419433594, + 201.43528747558594, + 215.51690673828125, + 277.4363555908203 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.8697755932807922 + }, + { + "bbox": [ + 375.3135070800781, + 1.6077430248260498, + 102.83343505859375, + 205.19831776618958 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.8078259229660034 + }, + { + "bbox": [ + 1372.4200439453125, + 0.0, + 105.89013671875, + 242.61294555664062 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.7359948754310608 + }, + { + "bbox": [ + 879.8322143554688, + 166.1944122314453, + 129.68414306640625, + 265.45030212402344 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.7012330293655396 + }, + { + "bbox": [ + 1565.218994140625, + 0.6250243186950684, + 94.249267578125, + 251.48860788345337 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.6708132028579712 + }, + { + "bbox": [ + 1625.5699462890625, + 34.00221633911133, + 113.07080078125, + 336.9929618835449 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.6564908027648926 + }, + { + "bbox": [ + 1767.4072265625, + 0.0, + 94.924560546875, + 229.85476684570312 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.6467881202697754 + }, + { + "bbox": [ + 956.6194458007812, + 900.006103515625, + 149.72381591796875, + 173.7783203125 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.6429733037948608 + }, + { + "bbox": [ + 574.7518310546875, + 876.6203002929688, + 133.7698974609375, + 200.78741455078125 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.6194133758544922 + }, + { + "bbox": [ + 467.8788146972656, + 776.9996948242188, + 108.48025512695312, + 287.51483154296875 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.608767032623291 + }, + { + "bbox": [ + 302.0422058105469, + 732.33837890625, + 124.57574462890625, + 331.01220703125 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.5625099539756775 + }, + { + "bbox": [ + 638.8469848632812, + 743.0866088867188, + 117.85137939453125, + 317.97259521484375 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.5567368268966675 + }, + { + "bbox": [ + 335.7384948730469, + 507.2187194824219, + 145.80545043945312, + 159.55679321289062 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.5184996724128723 + }, + { + "bbox": [ + 1330.8204345703125, + 838.9266357421875, + 140.44580078125, + 240.1510009765625 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.5148675441741943 + }, + { + "bbox": [ + 720.7056884765625, + 2.9743223190307617, + 104.3197021484375, + 150.11820697784424 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.5129923820495605 + }, + { + "bbox": [ + 196.63421630859375, + 693.4352416992188, + 119.49697875976562, + 362.00836181640625 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.472736656665802 + }, + { + "bbox": [ + 666.0804443359375, + 180.66146850585938, + 95.970458984375, + 213.87698364257812 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.4722053110599518 + }, + { + "bbox": [ + 876.128173828125, + 339.4115905761719, + 135.45379638671875, + 319.6487121582031 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.4647904336452484 + }, + { + "bbox": [ + 667.529296875, + 415.2683410644531, + 104.7076416015625, + 229.71560668945312 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.45972582697868347 + }, + { + "bbox": [ + 112.86947631835938, + 264.6505432128906, + 144.888671875, + 191.26544189453125 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.45595934987068176 + }, + { + "bbox": [ + 1701.4876708984375, + 0.0, + 90.152587890625, + 221.60284423828125 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.45339658856391907 + }, + { + "bbox": [ + 1177.0682373046875, + 808.5385131835938, + 118.4273681640625, + 265.73162841796875 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.4308188259601593 + }, + { + "bbox": [ + 1581.5089111328125, + 773.6590576171875, + 153.54052734375, + 289.6710205078125 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.4269048273563385 + }, + { + "bbox": [ + 531.0040893554688, + 437.7104187011719, + 127.3616943359375, + 280.2588806152344 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.42152199149131775 + }, + { + "bbox": [ + 1797.8150634765625, + 778.5232543945312, + 102.983642578125, + 292.46649169921875 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.411865234375 + }, + { + "bbox": [ + 1084.093505859375, + 2.85404109954834, + 93.6932373046875, + 210.73848819732666 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.40260007977485657 + }, + { + "bbox": [ + 920.5157470703125, + 832.7113037109375, + 94.4918212890625, + 221.5032958984375 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.3867260217666626 + }, + { + "bbox": [ + 1115.3507080078125, + 847.74365234375, + 109.4945068359375, + 226.804931640625 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.3844665586948395 + }, + { + "bbox": [ + 1872.486083984375, + 19.00360679626465, + 42.8349609375, + 236.63503456115723 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.37733739614486694 + }, + { + "bbox": [ + 1349.9853515625, + 210.24911499023438, + 131.93798828125, + 167.93081665039062 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.3761371970176697 + }, + { + "bbox": [ + 766.0445556640625, + 879.2682495117188, + 124.82427978515625, + 201.08441162109375 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.3682442009449005 + }, + { + "bbox": [ + 817.4657592773438, + 0.0, + 80.7606201171875, + 168.49359130859375 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.3530486524105072 + }, + { + "bbox": [ + 147.0262451171875, + 1.8125637769699097, + 79.67684936523438, + 99.51723968982697 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.31355297565460205 + }, + { + "bbox": [ + 1159.018310546875, + 750.4727172851562, + 109.84375, + 160.12939453125 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.3134245276451111 + }, + { + "bbox": [ + 201.1594696044922, + 625.8055419921875, + 77.64781188964844, + 134.331787109375 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.3070683181285858 + }, + { + "bbox": [ + 1473.18359375, + 651.7177124023438, + 82.4835205078125, + 130.7080078125 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.30168840289115906 + }, + { + "bbox": [ + 932.6547241210938, + 0.0, + 94.53363037109375, + 160.51365661621094 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.3008910119533539 + }, + { + "bbox": [ + 1700.9190673828125, + 828.179931640625, + 121.2147216796875, + 245.9788818359375 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.29163801670074463 + }, + { + "bbox": [ + 1634.7724609375, + 446.2858581542969, + 132.4085693359375, + 209.66311645507812 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.291547566652298 + }, + { + "bbox": [ + 1556.4608154296875, + 473.771728515625, + 112.165283203125, + 180.64654541015625 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.2879399359226227 + }, + { + "bbox": [ + 583.9107055664062, + 1.929314374923706, + 73.5870361328125, + 123.53908467292786 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.28340914845466614 + }, + { + "bbox": [ + 1498.50634765625, + 698.7794799804688, + 96.718505859375, + 314.76446533203125 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.28129440546035767 + }, + { + "bbox": [ + 1280.0792236328125, + 775.8158569335938, + 76.7454833984375, + 188.51519775390625 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.27848634123802185 + }, + { + "bbox": [ + 1718.6058349609375, + 226.6940460205078, + 160.0238037109375, + 177.1758575439453 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.27552416920661926 + }, + { + "bbox": [ + 756.9520263671875, + 810.5991821289062, + 83.45086669921875, + 189.677001953125 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.27519550919532776 + }, + { + "bbox": [ + 1728.245849609375, + 640.5650024414062, + 117.093994140625, + 210.5716552734375 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.272867351770401 + }, + { + "bbox": [ + 1772.5546875, + 525.9481201171875, + 132.1446533203125, + 174.74395751953125 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.2701846957206726 + }, + { + "bbox": [ + 1305.05224609375, + 209.34393310546875, + 184.050048828125, + 414.58587646484375 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.26895296573638916 + }, + { + "bbox": [ + 810.69287109375, + 790.5480346679688, + 89.7996826171875, + 185.0943603515625 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.26855093240737915 + }, + { + "bbox": [ + 95.97314453125, + 724.7075805664062, + 114.75672912597656, + 298.14398193359375 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.26742294430732727 + }, + { + "bbox": [ + 1261.4110107421875, + 909.4841918945312, + 118.9820556640625, + 164.47723388671875 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.2666778564453125 + }, + { + "bbox": [ + 1339.5250244140625, + 434.0279846191406, + 87.82666015625, + 147.42294311523438 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.26228952407836914 + }, + { + "bbox": [ + 63.43070983886719, + 664.1151733398438, + 82.15074157714844, + 128.1494140625 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.26013079285621643 + }, + { + "bbox": [ + 1.3776787519454956, + 679.18505859375, + 111.62459480762482, + 224.9747314453125 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.2587812840938568 + }, + { + "bbox": [ + 1439.8868408203125, + 816.7938842773438, + 97.72802734375, + 256.11944580078125 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.256550669670105 + }, + { + "bbox": [ + 660.9515380859375, + 744.8563842773438, + 94.61444091796875, + 115.916259765625 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.2563660442829132 + }, + { + "bbox": [ + 556.6321411132812, + 0.0, + 31.12762451171875, + 77.6491470336914 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.2539074718952179 + }, + { + "bbox": [ + 414.3009948730469, + 682.0269165039062, + 92.76937866210938, + 310.0914306640625 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.25366705656051636 + }, + { + "bbox": [ + 1823.6094970703125, + 520.3126831054688, + 74.411865234375, + 80.507080078125 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.2529422640800476 + }, + { + "bbox": [ + 258.0948486328125, + 2.8098771572113037, + 73.0369873046875, + 90.99600052833557 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.25058287382125854 + }, + { + "bbox": [ + 508.9549560546875, + 714.0374145507812, + 132.6729736328125, + 206.59674072265625 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.24579626321792603 + }, + { + "bbox": [ + 1647.6907958984375, + 387.5267639160156, + 117.0858154296875, + 134.33120727539062 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.2425207644701004 + }, + { + "bbox": [ + 1445.354248046875, + 761.0438842773438, + 91.1209716796875, + 122.70550537109375 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.2403791844844818 + }, + { + "bbox": [ + 1028.0394287109375, + 751.615478515625, + 101.6038818359375, + 172.39617919921875 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.23425403237342834 + }, + { + "bbox": [ + 10.321240425109863, + 668.003173828125, + 92.43458843231201, + 93.92236328125 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.23368315398693085 + }, + { + "bbox": [ + 480.19140625, + 3.0881388187408447, + 101.267578125, + 78.71852469444275 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.2329442799091339 + }, + { + "bbox": [ + 1319.99755859375, + 813.53125, + 58.90185546875, + 112.30328369140625 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.23115667700767517 + }, + { + "bbox": [ + 0.0, + 628.298828125, + 47.96708679199219, + 120.50457763671875 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.2270287126302719 + }, + { + "bbox": [ + 298.7027893066406, + 666.9664306640625, + 119.76385498046875, + 144.8203125 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.2235877364873886 + }, + { + "bbox": [ + 1054.49609375, + 1.8778526782989502, + 65.3221435546875, + 154.7142035961151 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.22313834726810455 + }, + { + "bbox": [ + 296.7391052246094, + 680.0767822265625, + 35.053375244140625, + 69.30267333984375 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.21813228726387024 + }, + { + "bbox": [ + 1811.36962890625, + 285.1565246582031, + 102.1195068359375, + 269.7958679199219 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.21760663390159607 + }, + { + "bbox": [ + 114.75823974609375, + 719.09228515625, + 74.72804260253906, + 83.634765625 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.2161155790090561 + }, + { + "bbox": [ + 991.546875, + 1.210024356842041, + 59.4659423828125, + 152.63245916366577 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.2096937894821167 + }, + { + "bbox": [ + 1852.13916015625, + 519.2532958984375, + 38.265380859375, + 43.08807373046875 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.18011623620986938 + }, + { + "bbox": [ + 316.677978515625, + 0.0, + 44.184600830078125, + 62.04084396362305 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.17839768528938293 + }, + { + "bbox": [ + 1023.7964477539062, + 0.0, + 45.53558349609375, + 87.68540954589844 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.1771439015865326 + }, + { + "bbox": [ + 0.0, + 690.8153076171875, + 27.172204971313477, + 55.42034912109375 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.17463117837905884 + }, + { + "bbox": [ + 1663.4932861328125, + 4.420060634613037, + 65.2760009765625, + 114.99270486831665 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.1590556651353836 + }, + { + "bbox": [ + 1578.5491943359375, + 454.1618347167969, + 74.5714111328125, + 104.37033081054688 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.15501607954502106 + }, + { + "bbox": [ + 544.5846557617188, + 697.2288208007812, + 35.70989990234375, + 26.73150634765625 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.15327082574367523 + }, + { + "bbox": [ + 534.465087890625, + 881.8455200195312, + 78.7249755859375, + 172.04473876953125 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.14815860986709595 + }, + { + "bbox": [ + 1873.2293701171875, + 834.9508056640625, + 45.2706298828125, + 230.974609375 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.1479007452726364 + }, + { + "bbox": [ + 146.6645965576172, + 723.4815673828125, + 30.512222290039062, + 41.179443359375 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.13243095576763153 + }, + { + "bbox": [ + 740.52490234375, + 10.856040000915527, + 38.1209716796875, + 77.29609775543213 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.1309206336736679 + }, + { + "bbox": [ + 1783.414794921875, + 856.5660400390625, + 51.0806884765625, + 216.032958984375 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.13079363107681274 + }, + { + "bbox": [ + 1353.722900390625, + 4.124818801879883, + 26.04736328125, + 36.974050521850586 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.12728439271450043 + }, + { + "bbox": [ + 1423.4942626953125, + 875.3924560546875, + 16.2568359375, + 29.398681640625 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.1250089704990387 + }, + { + "bbox": [ + 1592.7584228515625, + 1.329086184501648, + 55.0660400390625, + 54.82293713092804 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.11483781039714813 + }, + { + "bbox": [ + 1385.247314453125, + 7.618640422821045, + 19.5557861328125, + 37.21356248855591 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.11478649824857712 + }, + { + "bbox": [ + 774.5552978515625, + 0.0, + 32.50115966796875, + 48.10002899169922 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.11244752258062363 + }, + { + "bbox": [ + 1030.501953125, + 792.454833984375, + 44.9681396484375, + 111.78228759765625 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.10898905247449875 + }, + { + "bbox": [ + 302.1847229003906, + 695.43701171875, + 20.343109130859375, + 28.063720703125 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.10741319507360458 + }, + { + "bbox": [ + 1729.3040771484375, + 2.0999855995178223, + 26.806884765625, + 36.02122259140015 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.10721274465322495 + }, + { + "bbox": [ + 1762.438720703125, + 4.751255989074707, + 24.288818359375, + 40.14107036590576 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.10624366253614426 + }, + { + "bbox": [ + 211.49954223632812, + 328.7121887207031, + 56.994140625, + 60.76922607421875 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.10590028017759323 + }, + { + "bbox": [ + 1792.0831298828125, + 261.65728759765625, + 92.417236328125, + 84.54769897460938 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.10410129278898239 + }, + { + "bbox": [ + 1547.43359375, + 4.291971683502197, + 28.6832275390625, + 69.40435552597046 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.10200422257184982 + }, + { + "bbox": [ + 1335.0888671875, + 3.258249282836914, + 23.91845703125, + 32.369855880737305 + ], + "category_id": 1, + "image_id": 10128340000, + "score": 0.10069120675325394 + }, + { + "bbox": [ + 1283.4007568359375, + 6.713701248168945, + 629.122802734375, + 1056.8606395721436 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.9853803515434265 + }, + { + "bbox": [ + 288.9501647949219, + 42.40924835205078, + 1185.7618713378906, + 999.2054977416992 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.9629650115966797 + }, + { + "bbox": [ + 649.4730834960938, + 315.6942138671875, + 143.35650634765625, + 229.676513671875 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.8901010751724243 + }, + { + "bbox": [ + 1058.3331298828125, + 258.07269287109375, + 310.98046875, + 259.15057373046875 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.8752242922782898 + }, + { + "bbox": [ + 790.96240234375, + 182.09800720214844, + 105.51129150390625, + 97.01622009277344 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.872738242149353 + }, + { + "bbox": [ + 777.576416015625, + 274.9346618652344, + 119.44439697265625, + 178.85000610351562 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.8679455518722534 + }, + { + "bbox": [ + 2.3131344318389893, + 412.2568054199219, + 273.67606234550476, + 235.93026733398438 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.8616952300071716 + }, + { + "bbox": [ + 8.783040046691895, + 198.89437866210938, + 196.3238935470581, + 266.4853515625 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.8512702584266663 + }, + { + "bbox": [ + 220.74649047851562, + 94.02008056640625, + 98.13226318359375, + 124.78965759277344 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.7501042485237122 + }, + { + "bbox": [ + 164.27354431152344, + 83.04096984863281, + 88.21920776367188, + 127.46699523925781 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.7067092061042786 + }, + { + "bbox": [ + 1087.515625, + 181.69656372070312, + 87.4686279296875, + 72.61752319335938 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.702244758605957 + }, + { + "bbox": [ + 1074.9063720703125, + 472.5963439941406, + 124.1480712890625, + 110.47763061523438 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.628270149230957 + }, + { + "bbox": [ + 343.7706604003906, + 30.924612045288086, + 59.412750244140625, + 86.91977119445801 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.5943357944488525 + }, + { + "bbox": [ + 69.42112731933594, + 103.34648132324219, + 112.67413330078125, + 108.37942504882812 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.5710238218307495 + }, + { + "bbox": [ + 79.45482635498047, + 437.8648376464844, + 270.02677154541016, + 180.55715942382812 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.40784332156181335 + }, + { + "bbox": [ + 1225.6717529296875, + 162.2100830078125, + 78.9639892578125, + 132.47430419921875 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.3427259922027588 + }, + { + "bbox": [ + 0.9485000371932983, + 54.5380973815918, + 92.79364931583405, + 115.03351211547852 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.33483877778053284 + }, + { + "bbox": [ + 1105.8240966796875, + 281.7027282714844, + 76.47314453125, + 55.8577880859375 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.3022329807281494 + }, + { + "bbox": [ + 0.0, + 258.510498046875, + 85.2731704711914, + 205.99591064453125 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.277988463640213 + }, + { + "bbox": [ + 1069.812255859375, + 430.1299133300781, + 178.785888671875, + 54.991607666015625 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.25947925448417664 + }, + { + "bbox": [ + 681.9738159179688, + 208.11050415039062, + 87.06488037109375, + 76.40863037109375 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.2577346861362457 + }, + { + "bbox": [ + 684.65625, + 209.45753479003906, + 65.76763916015625, + 48.37471008300781 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.25362637639045715 + }, + { + "bbox": [ + 1770.093017578125, + 45.35274887084961, + 148.260986328125, + 1012.7648048400879 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.23887047171592712 + }, + { + "bbox": [ + 167.9042510986328, + 22.85419273376465, + 81.45010375976562, + 74.9856128692627 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.23093517124652863 + }, + { + "bbox": [ + 686.263671875, + 45.065853118896484, + 418.443603515625, + 672.8133583068848 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.22159330546855927 + }, + { + "bbox": [ + 1190.727783203125, + 260.0331115722656, + 45.408203125, + 42.90838623046875 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.2191120684146881 + }, + { + "bbox": [ + 1051.7967529296875, + 212.4822998046875, + 37.3897705078125, + 71.61709594726562 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.13527318835258484 + }, + { + "bbox": [ + 906.1925659179688, + 454.3064880371094, + 249.45501708984375, + 209.19338989257812 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.13330410420894623 + }, + { + "bbox": [ + 852.9170532226562, + 360.49078369140625, + 25.87530517578125, + 70.86614990234375 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.15234917402267456 + }, + { + "bbox": [ + 609.119140625, + 295.8336181640625, + 98.669677734375, + 86.77999877929688 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.8445025086402893 + }, + { + "bbox": [ + 378.2210693359375, + 156.46856689453125, + 79.51510620117188, + 59.65052795410156 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.7748774886131287 + }, + { + "bbox": [ + 198.08822631835938, + 305.9843444824219, + 122.8443603515625, + 100.4822998046875 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.7065314054489136 + }, + { + "bbox": [ + 135.3995819091797, + 208.8668670654297, + 82.15673828125, + 32.42308044433594 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.6814215779304504 + }, + { + "bbox": [ + 535.6635131835938, + 300.5378112792969, + 94.14208984375, + 83.1962890625 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.6654942035675049 + }, + { + "bbox": [ + 483.58563232421875, + 197.45590209960938, + 74.43743896484375, + 57.176239013671875 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.6608478426933289 + }, + { + "bbox": [ + 215.0618896484375, + 210.8956756591797, + 69.7735595703125, + 29.752822875976562 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.6438001394271851 + }, + { + "bbox": [ + 166.78993225097656, + 260.73162841796875, + 81.71955871582031, + 33.886688232421875 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.6426426768302917 + }, + { + "bbox": [ + 194.13543701171875, + 302.4077453613281, + 132.185302734375, + 203.56118774414062 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.64094477891922 + }, + { + "bbox": [ + 24.686168670654297, + 160.48495483398438, + 65.35156631469727, + 43.957122802734375 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.6141790747642517 + }, + { + "bbox": [ + 61.93497848510742, + 206.81692504882812, + 67.95804214477539, + 35.73725891113281 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.6034325361251831 + }, + { + "bbox": [ + 684.8605346679688, + 296.6944274902344, + 60.11041259765625, + 79.523681640625 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.5703861713409424 + }, + { + "bbox": [ + 277.9051818847656, + 118.02881622314453, + 75.3424072265625, + 74.72411346435547 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.5354023575782776 + }, + { + "bbox": [ + 557.520751953125, + 208.25003051757812, + 63.16949462890625, + 47.47157287597656 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.5207008719444275 + }, + { + "bbox": [ + 389.46875, + 260.3998718261719, + 95.03842163085938, + 28.859283447265625 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.5194308757781982 + }, + { + "bbox": [ + 246.87026977539062, + 258.12652587890625, + 83.399658203125, + 36.68548583984375 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.47507211565971375 + }, + { + "bbox": [ + 230.82713317871094, + 51.341026306152344, + 59.52711486816406, + 42.373046875 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.4719221889972687 + }, + { + "bbox": [ + 371.5136413574219, + 302.7303771972656, + 84.49050903320312, + 68.41122436523438 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.44887304306030273 + }, + { + "bbox": [ + 449.14666748046875, + 303.34552001953125, + 95.31640625, + 48.94390869140625 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.42651283740997314 + }, + { + "bbox": [ + 59.20182800292969, + 77.63203430175781, + 69.07972717285156, + 36.52244567871094 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.42590340971946716 + }, + { + "bbox": [ + 370.47991943359375, + 210.2904510498047, + 66.41464233398438, + 33.1710205078125 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.4237402677536011 + }, + { + "bbox": [ + 475.22509765625, + 124.54940032958984, + 57.011474609375, + 40.61431121826172 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.3908300995826721 + }, + { + "bbox": [ + 467.0397033691406, + 66.16106414794922, + 47.917999267578125, + 27.583763122558594 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.38647398352622986 + }, + { + "bbox": [ + 288.4964904785156, + 305.16815185546875, + 99.31219482421875, + 87.7886962890625 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.3735053241252899 + }, + { + "bbox": [ + 444.114990234375, + 90.43252563476562, + 51.553955078125, + 31.16741943359375 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.37254029512405396 + }, + { + "bbox": [ + 99.98625183105469, + 40.55061340332031, + 76.22004699707031, + 65.01245880126953 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.3680468797683716 + }, + { + "bbox": [ + 294.51318359375, + 54.41352844238281, + 54.0465087890625, + 41.265953063964844 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.3454741835594177 + }, + { + "bbox": [ + 264.3034362792969, + 83.36378479003906, + 58.63067626953125, + 45.3909912109375 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.33034616708755493 + }, + { + "bbox": [ + 875.2257690429688, + 294.2908020019531, + 63.034912109375, + 73.73040771484375 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.31166598200798035 + }, + { + "bbox": [ + 552.3424072265625, + 102.28469848632812, + 53.5325927734375, + 32.012359619140625 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.31135886907577515 + }, + { + "bbox": [ + 447.3630065917969, + 159.95870971679688, + 75.57168579101562, + 79.81913757324219 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.3080102503299713 + }, + { + "bbox": [ + 744.2843627929688, + 170.82386779785156, + 48.20263671875, + 32.58000183105469 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.3024618923664093 + }, + { + "bbox": [ + 518.8668823242188, + 173.53623962402344, + 57.2681884765625, + 28.869842529296875 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.28725939989089966 + }, + { + "bbox": [ + 578.883056640625, + 242.28355407714844, + 105.27862548828125, + 45.62568664550781 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.2870064973831177 + }, + { + "bbox": [ + 620.3238525390625, + 214.0165557861328, + 57.0029296875, + 29.954849243164062 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.27958208322525024 + }, + { + "bbox": [ + 346.06988525390625, + 128.56320190429688, + 70.56277465820312, + 74.94837951660156 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.2788334786891937 + }, + { + "bbox": [ + 414.5040588378906, + 125.69651794433594, + 59.56060791015625, + 34.760101318359375 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.27825745940208435 + }, + { + "bbox": [ + 345.8397216796875, + 258.0870056152344, + 194.8671875, + 35.27862548828125 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.2586188018321991 + }, + { + "bbox": [ + 687.569091796875, + 163.837158203125, + 51.50909423828125, + 39.52703857421875 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.24999305605888367 + }, + { + "bbox": [ + 625.0399780273438, + 392.7872314453125, + 67.018310546875, + 72.13482666015625 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.2429981678724289 + }, + { + "bbox": [ + 498.5255432128906, + 99.42186737060547, + 53.512054443359375, + 31.006126403808594 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.24067141115665436 + }, + { + "bbox": [ + 142.8480224609375, + 309.98309326171875, + 82.30924987792969, + 98.9852294921875 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.23763252794742584 + }, + { + "bbox": [ + 536.9259643554688, + 133.77972412109375, + 53.9805908203125, + 43.579833984375 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.2375190556049347 + }, + { + "bbox": [ + 885.564453125, + 239.24940490722656, + 57.38165283203125, + 37.30012512207031 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.23535390198230743 + }, + { + "bbox": [ + 395.301513671875, + 92.57003784179688, + 47.01910400390625, + 38.36552429199219 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.23471194505691528 + }, + { + "bbox": [ + 409.6800537109375, + 60.70526123046875, + 51.487091064453125, + 32.35259246826172 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.21594807505607605 + }, + { + "bbox": [ + 590.739013671875, + 132.8422393798828, + 55.618408203125, + 34.99034118652344 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.21444948017597198 + }, + { + "bbox": [ + 142.70018005371094, + 14.566540718078613, + 56.78106689453125, + 33.07197093963623 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.2036537081003189 + }, + { + "bbox": [ + 320.72296142578125, + 194.36314392089844, + 42.888824462890625, + 34.97528076171875 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.20269575715065002 + }, + { + "bbox": [ + 479.15374755859375, + 264.8033142089844, + 71.17230224609375, + 25.205291748046875 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.1989617943763733 + }, + { + "bbox": [ + 0.3339415192604065, + 187.03533935546875, + 50.64700025320053, + 20.45751953125 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.19690930843353271 + }, + { + "bbox": [ + 74.00901794433594, + 105.07601165771484, + 66.710693359375, + 56.327720642089844 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.19045573472976685 + }, + { + "bbox": [ + 347.0372314453125, + 259.55914306640625, + 53.66485595703125, + 32.394195556640625 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.18698135018348694 + }, + { + "bbox": [ + 67.07357025146484, + 9.42569351196289, + 74.41902923583984, + 62.75996780395508 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.1855248659849167 + }, + { + "bbox": [ + 893.28857421875, + 213.1145782470703, + 46.3870849609375, + 34.87232971191406 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.17870844900608063 + }, + { + "bbox": [ + 611.6231079101562, + 106.5094223022461, + 44.85430908203125, + 29.061744689941406 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.17700931429862976 + }, + { + "bbox": [ + 847.1093139648438, + 286.3870849609375, + 56.32452392578125, + 86.06158447265625 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.16932892799377441 + }, + { + "bbox": [ + 445.4731140136719, + 97.76200103759766, + 49.56451416015625, + 45.203514099121094 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.16094166040420532 + }, + { + "bbox": [ + 83.2696304321289, + 238.672607421875, + 87.30387115478516, + 59.288787841796875 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.1571291834115982 + }, + { + "bbox": [ + 644.8650512695312, + 134.5099639892578, + 52.570556640625, + 45.77696228027344 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.14659520983695984 + }, + { + "bbox": [ + 798.9510498046875, + 176.64842224121094, + 34.15826416015625, + 27.026199340820312 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.14340169727802277 + }, + { + "bbox": [ + 289.8072204589844, + 2.8699655532836914, + 57.560302734375, + 31.036349296569824 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.12905792891979218 + }, + { + "bbox": [ + 273.2252502441406, + 120.26922607421875, + 33.325103759765625, + 36.83570861816406 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.12813062965869904 + }, + { + "bbox": [ + 536.1267700195312, + 301.2402038574219, + 105.0225830078125, + 164.69992065429688 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.1251327097415924 + }, + { + "bbox": [ + 577.738037109375, + 167.33460998535156, + 52.75921630859375, + 43.77146911621094 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.1169745996594429 + }, + { + "bbox": [ + 10.653980255126953, + 1.5155118703842163, + 64.12058639526367, + 63.142767548561096 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.11120772361755371 + }, + { + "bbox": [ + 290.7361145019531, + 305.92962646484375, + 81.94302368164062, + 186.35324096679688 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.10804451256990433 + }, + { + "bbox": [ + 383.0464172363281, + 33.47468948364258, + 42.016937255859375, + 40.26395034790039 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.10608372837305069 + }, + { + "bbox": [ + 373.3436279296875, + 299.032470703125, + 162.34857177734375, + 71.123291015625 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.10598088800907135 + }, + { + "bbox": [ + 347.5797424316406, + 7.471529960632324, + 51.544647216796875, + 25.57726001739502 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.10507849603891373 + }, + { + "bbox": [ + 9.35350513458252, + 944.8892211914062, + 1300.14759349823, + 121.89459228515625 + ], + "category_id": 1, + "image_id": 10034180000, + "score": 0.21530765295028687 + }, + { + "bbox": [ + 639.7239379882812, + 226.8717498779297, + 344.6689453125, + 663.6336212158203 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.988675594329834 + }, + { + "bbox": [ + 6.2749924659729, + 351.6357116699219, + 243.3602614402771, + 364.3725280761719 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.956828773021698 + }, + { + "bbox": [ + 461.7480163574219, + 277.44110107421875, + 115.16329956054688, + 186.4822998046875 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.9538608193397522 + }, + { + "bbox": [ + 1768.55322265625, + 245.51446533203125, + 138.985595703125, + 304.20843505859375 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.9133968949317932 + }, + { + "bbox": [ + 1155.5684814453125, + 359.0439453125, + 191.2630615234375, + 272.81744384765625 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.9098905920982361 + }, + { + "bbox": [ + 1259.7314453125, + 366.961181640625, + 90.6544189453125, + 138.16278076171875 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.7968080043792725 + }, + { + "bbox": [ + 480.37066650390625, + 386.0138854980469, + 150.568115234375, + 280.1358337402344 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.7637147307395935 + }, + { + "bbox": [ + 263.7475280761719, + 188.89967346191406, + 90.03085327148438, + 113.91123962402344 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.7468248605728149 + }, + { + "bbox": [ + 162.36859130859375, + 187.40757751464844, + 105.68603515625, + 143.9015655517578 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.7130147814750671 + }, + { + "bbox": [ + 139.2628936767578, + 291.9899597167969, + 106.13040161132812, + 205.92654418945312 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.7115177512168884 + }, + { + "bbox": [ + 1365.2760009765625, + 246.45489501953125, + 66.708984375, + 145.35330200195312 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.6987277865409851 + }, + { + "bbox": [ + 1486.121337890625, + 449.1069641113281, + 68.625732421875, + 118.49978637695312 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.6513593792915344 + }, + { + "bbox": [ + 1354.540771484375, + 443.40478515625, + 147.19580078125, + 194.12603759765625 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.6448480486869812 + }, + { + "bbox": [ + 1363.81591796875, + 373.9744567871094, + 81.1202392578125, + 102.91085815429688 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.5243184566497803 + }, + { + "bbox": [ + 1514.0146484375, + 319.5240783691406, + 75.83056640625, + 144.65200805664062 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.504604697227478 + }, + { + "bbox": [ + 355.92431640625, + 377.6044921875, + 114.5035400390625, + 120.37677001953125 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.4970506429672241 + }, + { + "bbox": [ + 1582.33203125, + 266.6174621582031, + 98.7462158203125, + 264.5225524902344 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.48399269580841064 + }, + { + "bbox": [ + 353.9928283691406, + 371.8907470703125, + 121.08633422851562, + 262.55682373046875 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.4818037748336792 + }, + { + "bbox": [ + 362.9367370605469, + 147.3871612548828, + 75.418212890625, + 109.99433898925781 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.4351760447025299 + }, + { + "bbox": [ + 1241.2064208984375, + 368.8930969238281, + 127.748291015625, + 264.2134704589844 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.38909056782722473 + }, + { + "bbox": [ + 1681.270263671875, + 256.126220703125, + 83.576416015625, + 137.42578125 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.364656925201416 + }, + { + "bbox": [ + 0.0, + 167.76327514648438, + 91.63196563720703, + 236.555419921875 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.35032832622528076 + }, + { + "bbox": [ + 1439.95703125, + 270.9534606933594, + 100.35986328125, + 218.63064575195312 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.346635103225708 + }, + { + "bbox": [ + 1318.2305908203125, + 424.5197448730469, + 115.10791015625, + 192.50259399414062 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.3269309401512146 + }, + { + "bbox": [ + 1052.64013671875, + 287.7257385253906, + 63.3641357421875, + 172.54461669921875 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.24086904525756836 + }, + { + "bbox": [ + 1053.502197265625, + 331.1842346191406, + 227.3038330078125, + 310.5895080566406 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.21309363842010498 + }, + { + "bbox": [ + 1070.9603271484375, + 360.4552917480469, + 96.628173828125, + 133.9866943359375 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.18517304956912994 + }, + { + "bbox": [ + 1665.9293212890625, + 255.31796264648438, + 146.314697265625, + 291.3702697753906 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.17204511165618896 + }, + { + "bbox": [ + 405.0735778808594, + 386.8234558105469, + 190.69692993164062, + 313.5556945800781 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.15523910522460938 + }, + { + "bbox": [ + 1589.0211181640625, + 265.5631103515625, + 84.9398193359375, + 150.40841674804688 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.15313847362995148 + }, + { + "bbox": [ + 0.9758958220481873, + 422.1836853027344, + 142.32709795236588, + 306.2699279785156 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.146592915058136 + }, + { + "bbox": [ + 1419.790283203125, + 240.48899841308594, + 55.875, + 102.48948669433594 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.14388331770896912 + }, + { + "bbox": [ + 1142.052001953125, + 372.945068359375, + 375.743896484375, + 263.99609375 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.1362028419971466 + }, + { + "bbox": [ + 1149.924560546875, + 228.89898681640625, + 77.2176513671875, + 141.24282836914062 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.13104568421840668 + }, + { + "bbox": [ + 7.145267009735107, + 362.8689270019531, + 148.28553438186646, + 151.63449096679688 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.130157008767128 + }, + { + "bbox": [ + 1115.1795654296875, + 359.9970703125, + 55.0574951171875, + 73.02313232421875 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.1132773831486702 + }, + { + "bbox": [ + 1797.716552734375, + 246.42071533203125, + 108.528076171875, + 179.66299438476562 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.10333290696144104 + }, + { + "bbox": [ + 1281.1473388671875, + 254.05291748046875, + 95.2158203125, + 128.24417114257812 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.09135308116674423 + }, + { + "bbox": [ + 483.60968017578125, + 383.16656494140625, + 106.47314453125, + 105.37130737304688 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.08747227489948273 + }, + { + "bbox": [ + 1183.970458984375, + 248.7894744873047, + 123.838623046875, + 133.18003845214844 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.07844730466604233 + }, + { + "bbox": [ + 1157.6649169921875, + 358.5057678222656, + 153.3060302734375, + 142.8681640625 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.07668760418891907 + }, + { + "bbox": [ + 158.5989532470703, + 3.899838924407959, + 94.29812622070312, + 113.55939722061157 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.0562337264418602 + }, + { + "bbox": [ + 1046.19189453125, + 303.1739196777344, + 146.7403564453125, + 295.9938049316406 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.05225243791937828 + }, + { + "bbox": [ + 1075.177490234375, + 351.35552978515625, + 187.2501220703125, + 145.95687866210938 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.052039798349142075 + }, + { + "bbox": [ + 4.226436614990234, + 596.753662109375, + 145.0108528137207, + 141.51971435546875 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.44805338978767395 + }, + { + "bbox": [ + 1471.1275634765625, + 546.7749633789062, + 409.1026611328125, + 85.891845703125 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.17510481178760529 + }, + { + "bbox": [ + 9.595407485961914, + 136.05421447753906, + 273.3134059906006, + 50.703155517578125 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.14366888999938965 + }, + { + "bbox": [ + 921.6530151367188, + 497.646484375, + 100.19329833984375, + 244.272216796875 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.41841089725494385 + }, + { + "bbox": [ + 1837.094482421875, + 311.22064208984375, + 30.9761962890625, + 48.001678466796875 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.08423541486263275 + }, + { + "bbox": [ + 1839.4462890625, + 311.10064697265625, + 37.092529296875, + 71.60287475585938 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.060598306357860565 + }, + { + "bbox": [ + 332.7347412109375, + 440.8306579589844, + 26.84356689453125, + 49.14508056640625 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.4217357635498047 + }, + { + "bbox": [ + 1074.7474365234375, + 455.2643127441406, + 38.0753173828125, + 24.68829345703125 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.10941091924905777 + }, + { + "bbox": [ + 1034.816162109375, + 433.4083251953125, + 37.64892578125, + 38.33526611328125 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.05890995264053345 + }, + { + "bbox": [ + 1133.7620849609375, + 508.0845642089844, + 70.1640625, + 130.23025512695312 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.4846752882003784 + }, + { + "bbox": [ + 3.005446195602417, + 553.9013671875, + 142.2049114704132, + 183.9932861328125 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.3487741947174072 + }, + { + "bbox": [ + 272.37786865234375, + 411.44207763671875, + 81.43817138671875, + 55.8065185546875 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.21865300834178925 + }, + { + "bbox": [ + 0.24188603460788727, + 453.31536865234375, + 148.4058190435171, + 234.45562744140625 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.20409443974494934 + }, + { + "bbox": [ + 30.815982818603516, + 605.8007202148438, + 125.22259140014648, + 55.677490234375 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.1920752078294754 + }, + { + "bbox": [ + 1133.945556640625, + 486.0797424316406, + 142.3828125, + 149.95669555664062 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.1483757048845291 + }, + { + "bbox": [ + 1113.4261474609375, + 458.69744873046875, + 106.506103515625, + 181.26995849609375 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.13916584849357605 + }, + { + "bbox": [ + 436.55487060546875, + 457.7103576660156, + 156.08184814453125, + 253.82962036132812 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.11243928223848343 + }, + { + "bbox": [ + 284.70098876953125, + 303.1107482910156, + 85.747802734375, + 79.47280883789062 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.09735624492168427 + }, + { + "bbox": [ + 341.12774658203125, + 492.6709289550781, + 93.78155517578125, + 206.94662475585938 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.08169478923082352 + }, + { + "bbox": [ + 0.0, + 549.1785278320312, + 119.3069839477539, + 111.58819580078125 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.06240745261311531 + }, + { + "bbox": [ + 7.89318323135376, + 645.31689453125, + 136.12907934188843, + 87.29620361328125 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.054731015115976334 + }, + { + "bbox": [ + 213.0941619873047, + 411.15179443359375, + 42.60209655761719, + 32.3763427734375 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.05244819447398186 + }, + { + "bbox": [ + 130.32546997070312, + 487.7962951660156, + 300.62261962890625, + 236.79757690429688 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.8464512825012207 + }, + { + "bbox": [ + 943.7500610351562, + 463.9021911621094, + 207.76824951171875, + 177.45816040039062 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.6438319087028503 + }, + { + "bbox": [ + 126.81778717041016, + 458.4678955078125, + 290.0162582397461, + 57.33453369140625 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.1522291600704193 + }, + { + "bbox": [ + 936.41162109375, + 449.2172546386719, + 192.6654052734375, + 35.686859130859375 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.14834310114383698 + }, + { + "bbox": [ + 887.5518798828125, + 464.12335205078125, + 357.5780029296875, + 257.0013427734375 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.1072755679488182 + }, + { + "bbox": [ + 7.024689197540283, + 405.5135498046875, + 338.4464716911316, + 278.21929931640625 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.09115041047334671 + }, + { + "bbox": [ + 161.556884765625, + 482.5937805175781, + 242.09451293945312, + 30.944366455078125 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.06512065976858139 + }, + { + "bbox": [ + 933.17236328125, + 423.4557800292969, + 280.425048828125, + 68.74118041992188 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.06368233263492584 + }, + { + "bbox": [ + 121.80384063720703, + 488.6224060058594, + 311.62242889404297, + 45.982086181640625 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.05987454578280449 + }, + { + "bbox": [ + 929.5904541015625, + 436.67840576171875, + 381.5384521484375, + 75.44134521484375 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.05840197578072548 + }, + { + "bbox": [ + 235.27882385253906, + 486.3999328613281, + 136.96372985839844, + 23.562835693359375 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.05732391029596329 + }, + { + "bbox": [ + 69.67058563232422, + 417.2310485839844, + 672.0211868286133, + 311.6492614746094 + ], + "category_id": 1, + "image_id": 10094730000, + "score": 0.05091623216867447 + } + +] diff --git a/tests/data/posetrack18/annotations/test_posetrack18_val.json b/tests/data/posetrack18/annotations/test_posetrack18_val.json index a419bee11a..a25e46fc4e 100644 --- a/tests/data/posetrack18/annotations/test_posetrack18_val.json +++ b/tests/data/posetrack18/annotations/test_posetrack18_val.json @@ -1,2382 +1,2382 @@ -{ - "categories": [ - { - "supercategory": "person", - "id": 1, - "name": "person", - "keypoints": [ - "nose", - "head_bottom", - "head_top", - "left_ear", - "right_ear", - "left_shoulder", - "right_shoulder", - "left_elbow", - "right_elbow", - "left_wrist", - "right_wrist", - "left_hip", - "right_hip", - "left_knee", - "right_knee", - "left_ankle", - "right_ankle" - ], - "skeleton": [ - [ - 16, - 14 - ], - [ - 14, - 12 - ], - [ - 17, - 15 - ], - [ - 15, - 13 - ], - [ - 12, - 13 - ], - [ - 6, - 12 - ], - [ - 7, - 13 - ], - [ - 6, - 7 - ], - [ - 6, - 8 - ], - [ - 7, - 9 - ], - [ - 8, - 10 - ], - [ - 9, - 11 - ], - [ - 2, - 3 - ], - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 4 - ], - [ - 3, - 5 - ], - [ - 4, - 6 - ], - [ - 5, - 7 - ] - ] - } - ], - "images": [ - { - "has_no_densepose": true, - "is_labeled": true, - "file_name": "images/val/012834_mpii_test/000000.jpg", - "nframes": 140, - "frame_id": 10128340000, - "vid_id": "012834", - "ignore_regions_y": [ - [ - 1079, - 615, - 612, - 674, - 660, - 664, - 678, - 713, - 704, - 667, - 665, - 678, - 700, - 729, - 753, - 740, - 695, - 668, - 646, - 623, - 624, - 659, - 676, - 685, - 695, - 678, - 675, - 673, - 663, - 693, - 703, - 732, - 719, - 690, - 669, - 660, - 660, - 663, - 689, - 697, - 691, - 697, - 713, - 736, - 721, - 703, - 698, - 708, - 741, - 758, - 778, - 795, - 780, - 757, - 745, - 737, - 745, - 782, - 809, - 850, - 881, - 835, - 810, - 806, - 782, - 782, - 806, - 832, - 863, - 897, - 940, - 931, - 957, - 976, - 1003, - 1043, - 1045, - 1008, - 965, - 963, - 931, - 895, - 846, - 832, - 829, - 869, - 898, - 904, - 869, - 836, - 782, - 752, - 736, - 748, - 776, - 832, - 874, - 862, - 818, - 793, - 790, - 756, - 738, - 737, - 750, - 791, - 806, - 820, - 852, - 879, - 837, - 794, - 772, - 766, - 769, - 790, - 805, - 829, - 844, - 866, - 837, - 804, - 791, - 773, - 745, - 706, - 683, - 644, - 638, - 662, - 694, - 716, - 736, - 777, - 784, - 815, - 830, - 813, - 800, - 813, - 820, - 847, - 829, - 781, - 780, - 801, - 836, - 886, - 938, - 1018, - 1029, - 1079 - ], - [ - 0, - 21, - 43, - 60, - 90, - 95, - 95, - 43, - 40, - 84, - 104, - 104, - 74, - 6, - 4, - 71, - 69, - 0 - ], - [ - 0, - 4, - 48, - 106, - 214, - 207, - 46, - 50, - 170, - 156, - 96, - 157, - 160, - 62, - 65, - 156, - 165, - 162, - 140, - 93, - 93, - 7, - 4, - 121, - 129, - 84, - 75, - 68 - ], - [ - 0, - 0, - 739, - 729, - 720, - 740, - 768, - 785, - 803, - 815, - 757, - 735, - 632, - 620, - 632, - 640, - 662, - 656, - 607, - 645, - 645, - 628, - 604, - 570, - 543, - 512, - 485, - 467, - 451, - 448, - 456, - 482, - 512, - 548, - 554, - 542, - 498, - 479, - 454, - 404, - 387, - 398, - 415, - 528, - 546, - 468, - 410, - 400, - 359, - 375, - 373, - 273, - 254, - 284, - 253, - 204, - 206 - ] - ], - "ignore_regions_x": [ - [ - 3, - 0, - 30, - 44, - 74, - 99, - 106, - 102, - 115, - 121, - 141, - 156, - 165, - 187, - 200, - 211, - 196, - 198, - 210, - 226, - 252, - 266, - 263, - 271, - 291, - 299, - 326, - 339, - 360, - 399, - 412, - 424, - 437, - 432, - 439, - 461, - 489, - 510, - 534, - 548, - 559, - 567, - 587, - 593, - 604, - 612, - 633, - 652, - 645, - 638, - 649, - 650, - 661, - 654, - 662, - 685, - 713, - 727, - 733, - 752, - 762, - 769, - 785, - 812, - 841, - 863, - 869, - 877, - 899, - 909, - 918, - 906, - 902, - 909, - 917, - 900, - 932, - 932, - 941, - 919, - 926, - 935, - 950, - 957, - 983, - 1002, - 1007, - 1032, - 1034, - 1018, - 1018, - 1038, - 1074, - 1106, - 1119, - 1121, - 1130, - 1148, - 1152, - 1172, - 1195, - 1199, - 1209, - 1229, - 1242, - 1240, - 1242, - 1261, - 1264, - 1277, - 1285, - 1286, - 1296, - 1313, - 1336, - 1350, - 1367, - 1403, - 1417, - 1435, - 1459, - 1456, - 1429, - 1420, - 1465, - 1492, - 1496, - 1507, - 1529, - 1553, - 1570, - 1596, - 1609, - 1610, - 1649, - 1671, - 1703, - 1740, - 1763, - 1775, - 1803, - 1809, - 1815, - 1815, - 1857, - 1874, - 1881, - 1897, - 1896, - 1899, - 1888, - 1884 - ], - [ - 378, - 381, - 365, - 359, - 334, - 292, - 257, - 262, - 231, - 236, - 219, - 193, - 196, - 183, - 154, - 159, - 140, - 121 - ], - [ - 451, - 1173, - 1168, - 1168, - 1170, - 1085, - 1098, - 1070, - 1043, - 1000, - 993, - 979, - 934, - 937, - 918, - 903, - 893, - 832, - 785, - 759, - 726, - 710, - 667, - 664, - 585, - 576, - 507, - 485 - ], - [ - 1312, - 1918, - 1917, - 1895, - 1867, - 1835, - 1804, - 1779, - 1754, - 1720, - 1726, - 1739, - 1740, - 1735, - 1701, - 1635, - 1587, - 1578, - 1587, - 1564, - 1550, - 1543, - 1562, - 1579, - 1578, - 1581, - 1584, - 1589, - 1601, - 1610, - 1621, - 1637, - 1642, - 1659, - 1673, - 1681, - 1673, - 1671, - 1664, - 1671, - 1681, - 1728, - 1734, - 1789, - 1854, - 1807, - 1820, - 1778, - 1778, - 1717, - 1642, - 1635, - 1600, - 1520, - 1454, - 1415, - 1395 - ] - ], - "id": 10128340000, - "width": 1920, - "height": 1080, - "mask_file": "mask/val/012834_mpii_test/000000.jpg" - }, - { - "has_no_densepose": true, - "is_labeled": true, - "file_name": "images/val/009473_mpii_test/000000.jpg", - "nframes": 101, - "frame_id": 10094730000, - "vid_id": "009473", - "ignore_regions_y": [ - [ - 228, - 237, - 243, - 230, - 220, - 230, - 228, - 232, - 220, - 211, - 226, - 258, - 364, - 417, - 475, - 562, - 615, - 646, - 656, - 637, - 649, - 645, - 603, - 585, - 547 - ], - [ - 0, - 1, - 137, - 130, - 166, - 235, - 309, - 384, - 452, - 526, - 583, - 658, - 694, - 709, - 599, - 684, - 707, - 733, - 660, - 679, - 762 - ] - ], - "ignore_regions_x": [ - [ - 1907, - 1820, - 1758, - 1662, - 1577, - 1492, - 1375, - 1305, - 1250, - 1177, - 1111, - 1033, - 1047, - 1062, - 1056, - 1130, - 1173, - 1232, - 1282, - 1332, - 1416, - 1471, - 1515, - 1541, - 1909 - ], - [ - 0, - 257, - 284, - 407, - 450, - 505, - 562, - 592, - 613, - 626, - 639, - 639, - 594, - 454, - 371, - 343, - 269, - 152, - 88, - 35, - 3 - ] - ], - "id": 10094730000, - "width": 1920, - "height": 1080, - "mask_file": "mask/val/009473_mpii_test/000000.jpg" - }, - { - "has_no_densepose": true, - "is_labeled": true, - "file_name": "images/val/003418_mpii_test/000000.jpg", - "nframes": 149, - "frame_id": 10034180000, - "vid_id": "003418", - "ignore_regions_y": [ - [ - 639, - 647, - 635, - 618, - 590, - 547, - 501, - 499, - 515, - 518, - 526, - 528, - 509, - 473, - 450, - 407, - 352, - 339, - 309, - 271, - 262, - 252, - 211, - 211, - 183, - 175, - 139, - 105, - 94, - 62, - 45, - 22, - 22, - 30, - 52, - 66, - 86, - 92, - 101, - 109, - 162, - 158, - 135, - 103, - 94, - 75, - 60, - 37, - 18, - 9, - 1, - 0, - 79, - 75, - 88, - 103, - 156, - 164, - 167, - 201, - 196, - 194, - 207, - 237, - 262, - 256 - ], - [ - 94, - 71, - 62, - 41, - 28, - 30, - 43, - 64, - 88, - 96, - 120, - 115, - 98 - ], - [ - 235, - 215, - 211, - 224, - 252, - 239, - 207, - 196, - 211, - 228, - 218, - 198, - 181, - 186, - 198, - 218, - 233, - 252, - 277, - 279, - 292, - 318, - 347, - 354, - 392, - 430, - 439, - 447, - 462, - 477, - 496, - 539, - 515, - 464, - 426, - 398, - 366, - 333, - 322, - 315, - 318, - 303, - 298, - 294, - 266, - 245 - ], - [ - 207, - 213, - 239, - 237, - 215, - 179, - 179, - 184, - 209, - 222, - 239, - 264, - 279, - 271, - 269, - 290, - 260, - 226 - ], - [ - 194, - 175, - 160, - 164, - 179, - 167, - 160, - 216, - 266, - 262, - 266, - 269, - 281, - 298, - 309, - 318, - 332, - 341, - 345, - 345, - 290, - 262, - 226 - ], - [ - 424, - 442, - 432, - 432, - 455, - 469, - 474, - 505, - 559, - 555, - 465, - 449, - 444 - ], - [ - 926, - 901, - 857, - 792, - 751, - 694, - 636, - 540, - 474, - 403, - 351, - 265, - 211, - 155, - 98, - 71, - 40, - 0, - 0, - 1078, - 1078, - 1007, - 924, - 869, - 807, - 865, - 892, - 955, - 1003, - 1057, - 1078, - 1078, - 1030, - 961, - 926 - ], - [ - 1050, - 1076, - 1078, - 1057, - 1032, - 1013, - 998, - 982, - 971, - 951, - 936, - 913, - 888, - 844, - 799, - 763, - 732, - 723, - 713, - 753, - 784, - 817, - 830, - 871, - 911, - 930 - ], - [ - 478, - 461, - 423, - 405, - 394, - 263, - 257, - 265, - 290, - 315, - 334, - 342, - 344, - 411, - 448, - 448, - 448, - 430, - 424, - 423, - 421, - 409, - 444 - ] - ], - "ignore_regions_x": [ - [ - 0, - 1, - 198, - 258, - 307, - 337, - 343, - 335, - 320, - 290, - 273, - 260, - 232, - 190, - 196, - 183, - 171, - 162, - 149, - 132, - 105, - 77, - 69, - 322, - 324, - 303, - 279, - 273, - 247, - 224, - 226, - 215, - 203, - 190, - 192, - 181, - 169, - 167, - 154, - 139, - 99, - 86, - 81, - 56, - 60, - 62, - 60, - 49, - 35, - 15, - 9, - 3, - 0, - 11, - 13, - 0, - 1, - 9, - 90, - 64, - 49, - 33, - 18, - 13, - 15, - 0 - ], - [ - 341, - 347, - 352, - 356, - 371, - 383, - 388, - 392, - 403, - 392, - 398, - 377, - 375 - ], - [ - 688, - 694, - 713, - 724, - 728, - 752, - 764, - 783, - 796, - 796, - 824, - 828, - 839, - 856, - 864, - 864, - 884, - 899, - 903, - 843, - 854, - 854, - 850, - 884, - 901, - 873, - 833, - 815, - 796, - 747, - 716, - 666, - 654, - 660, - 667, - 694, - 711, - 724, - 737, - 775, - 792, - 790, - 756, - 688, - 686, - 686 - ], - [ - 1047, - 1079, - 1079, - 1088, - 1099, - 1103, - 1122, - 1133, - 1141, - 1164, - 1175, - 1164, - 1181, - 1126, - 1092, - 1077, - 1069, - 1047 - ], - [ - 1252, - 1254, - 1258, - 1277, - 1292, - 1301, - 1322, - 1330, - 1350, - 1322, - 1296, - 1277, - 1256, - 1233, - 1213, - 1198, - 1173, - 1130, - 1098, - 1081, - 1101, - 1198, - 1232 - ], - [ - 1165, - 1184, - 1226, - 1246, - 1238, - 1226, - 1209, - 1215, - 1180, - 1126, - 1057, - 1053, - 1128 - ], - [ - 1455, - 1438, - 1438, - 1444, - 1442, - 1423, - 1426, - 1409, - 1399, - 1390, - 1374, - 1349, - 1330, - 1319, - 1307, - 1309, - 1324, - 1330, - 1919, - 1919, - 1884, - 1855, - 1830, - 1828, - 1776, - 1732, - 1734, - 1726, - 1728, - 1707, - 1713, - 1469, - 1461, - 1459, - 1455 - ], - [ - 1463, - 1411, - 1255, - 1250, - 1246, - 1261, - 1284, - 1280, - 1271, - 1265, - 1275, - 1299, - 1299, - 1296, - 1315, - 1340, - 1365, - 1396, - 1444, - 1428, - 1434, - 1432, - 1446, - 1440, - 1453, - 1455 - ], - [ - 1246, - 1271, - 1313, - 1344, - 1384, - 1346, - 1307, - 1286, - 1255, - 1203, - 1153, - 1096, - 1078, - 1061, - 1036, - 1090, - 1121, - 1148, - 1169, - 1205, - 1228, - 1265, - 1267 - ] - ], - "id": 10034180000, - "width": 1920, - "height": 1080, - "mask_file": "mask/val/003418_mpii_test/000000.jpg" - } - ], - "annotations": [ - { - "bbox_head": [ - 378, - 503, - 44, - 53 - ], - "keypoints": [ - 401, - 530, - 1, - 409.5254211, - 555.3547363, - 1, - 392.8559265, - 510.1089478, - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 377, - 560, - 1, - 444, - 556, - 1, - 353, - 605, - 1, - 469.5, - 603.5, - 1, - 341.5, - 653.5, - 1, - 463, - 635, - 1, - 389, - 652, - 1, - 442, - 646, - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0 - ], - "track_id": 0, - "image_id": 10128340000, - "bbox": [ - 322.3, - 488.60028996999995, - 166.39999999999998, - 186.40836786 - ], - "scores": [], - "category_id": 1, - "id": 1012834000000, - "iscrowd": false, - "num_keypoints": 11 - }, - { - "bbox_head": [ - 571, - 446, - 42, - 46 - ], - "keypoints": [ - 600.5, - 475.5, - 1, - 590.4649048, - 493.8685303, - 1, - 593.1513062, - 450.3486023, - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 570.5, - 509.5, - 1, - 608.5, - 509.5, - 1, - 539, - 558.5, - 1, - 634, - 539, - 1, - 558.5, - 584.5, - 1, - 624.5, - 528.5, - 1, - 605, - 595, - 1, - 601, - 593, - 1, - 640, - 634.5, - 1, - 598, - 672, - 1, - 616.5, - 700.5, - 1, - 0, - 0, - 0 - ], - "track_id": 1, - "image_id": 10128340000, - "bbox": [ - 523.85, - 412.825892645, - 131.29999999999995, - 325.19681700999996 - ], - "scores": [], - "category_id": 1, - "id": 1012834000001, - "iscrowd": false, - "num_keypoints": 14 - }, - { - "bbox_head": [ - 159, - 259, - 42, - 47 - ], - "keypoints": [ - 201, - 284.5, - 1, - 169.9334106, - 305.6158752, - 1, - 187.549942, - 265.1630859, - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 139.5, - 307.5, - 1, - 193.5, - 319.5, - 1, - 0, - 0, - 0, - 209, - 371, - 1, - 144, - 365.5, - 1, - 231, - 392, - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 222, - 337, - 1, - 241, - 341.5, - 1, - 0, - 0, - 0, - 267, - 416, - 1 - ], - "track_id": 2, - "image_id": 10128340000, - "bbox": [ - 120.375, - 242.53754878499996, - 165.75, - 196.08798833000003 - ], - "scores": [], - "category_id": 1, - "id": 1012834000002, - "iscrowd": false, - "num_keypoints": 11 - }, - { - "bbox_head": [ - 372, - 205, - 44, - 44 - ], - "keypoints": [ - 410.5, - 230.5, - 1, - 387.8875732, - 251.1279602, - 1, - 398.5843201, - 208.9040375, - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 359.5, - 262.5, - 1, - 409.5, - 266.5, - 1, - 337.5, - 308.5, - 1, - 450, - 306, - 1, - 292, - 314, - 1, - 480, - 311.5, - 1, - 390, - 339, - 1, - 409, - 339, - 1, - 405.5, - 418.5, - 1, - 447.5, - 366.5, - 1, - 391.5, - 464.5, - 1, - 437.5, - 440.5, - 1 - ], - "track_id": 3, - "image_id": 10128340000, - "bbox": [ - 263.8, - 170.56464312499998, - 244.39999999999998, - 332.27475125 - ], - "scores": [], - "category_id": 1, - "id": 1012834000003, - "iscrowd": false, - "num_keypoints": 15 - }, - { - "bbox_head": [ - 693, - 410, - 44, - 49 - ], - "keypoints": [ - 718.5, - 440.5, - 1, - 717.704834, - 460.703125, - 1, - 712.9713745, - 414.8476562, - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 694.5, - 474, - 1, - 743.5, - 472.5, - 1, - 681.5, - 530.5, - 1, - 757.5, - 523.5, - 1, - 667.5, - 564.5, - 1, - 0, - 0, - 0, - 705.5, - 563.5, - 1, - 737.5, - 560.5, - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 692.5, - 607.5, - 1, - 716.5, - 603.5, - 1 - ], - "track_id": 4, - "image_id": 10128340000, - "bbox": [ - 654.0, - 385.94980463, - 117.0, - 250.44804694000004 - ], - "scores": [], - "category_id": 1, - "id": 1012834000004, - "iscrowd": false, - "num_keypoints": 12 - }, - { - "bbox_head": [ - 923, - 347, - 46, - 58 - ], - "keypoints": [ - 965.5, - 382.5, - 1, - 933.9436646, - 403.0452576, - 1, - 955.0422363, - 355.7160645, - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 922.5, - 403.5, - 1, - 932.5, - 431.5, - 1, - 0, - 0, - 0, - 960, - 475.5, - 1, - 0, - 0, - 0, - 991.5, - 462.5, - 1, - 934.5, - 512.5, - 1, - 922.5, - 506.5, - 1, - 946.5, - 567.5, - 1, - 964, - 578, - 1, - 900.5, - 598, - 1, - 936, - 634.5, - 1 - ], - "track_id": 5, - "image_id": 10128340000, - "bbox": [ - 886.85, - 313.89847417500005, - 118.29999999999995, - 362.4191161499999 - ], - "scores": [], - "category_id": 1, - "id": 1012834000005, - "iscrowd": false, - "num_keypoints": 13 - }, - { - "bbox_head": [ - 691, - 179, - 43, - 52 - ], - "keypoints": [ - 708.5, - 212.5, - 1, - 722.6444702, - 230.0113831, - 1, - 704.8916626, - 186.2414551, - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 712, - 244, - 1, - 742, - 237.5, - 1, - 723, - 293.5, - 1, - 745.5, - 281.5, - 1, - 692, - 319, - 1, - 0, - 0, - 0, - 722, - 323.5, - 1, - 748.5, - 314, - 1, - 657.5, - 301.5, - 1, - 668.5, - 299.5, - 1, - 670.5, - 367.5, - 1, - 689.5, - 362.5, - 1 - ], - "track_id": 6, - "image_id": 10128340000, - "bbox": [ - 643.85, - 159.05267336499998, - 118.29999999999995, - 235.63610837 - ], - "scores": [], - "category_id": 1, - "id": 1012834000006, - "iscrowd": false, - "num_keypoints": 14 - }, - { - "bbox_head": [ - 927, - 160, - 39, - 52 - ], - "keypoints": [ - 952, - 189, - 1, - 946.763916, - 211.9986572, - 1, - 946.302063, - 166.5010071, - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 914.5, - 234, - 1, - 979.5, - 236.5, - 1, - 890.5, - 270.5, - 1, - 998.5, - 286.5, - 1, - 894.5, - 324, - 1, - 0, - 0, - 0, - 932, - 326.5, - 1, - 958.5, - 327.5, - 1, - 1000.5, - 340.5, - 1, - 993.5, - 372.5, - 1, - 955.5, - 383.5, - 1, - 959.5, - 446.5, - 1 - ], - "track_id": 7, - "image_id": 10128340000, - "bbox": [ - 874.0, - 124.50115816500005, - 143.0, - 363.99869076999994 - ], - "scores": [], - "category_id": 1, - "id": 1012834000007, - "iscrowd": false, - "num_keypoints": 14 - }, - { - "bbox_head": [ - 1367, - 427, - 47, - 45 - ], - "keypoints": [ - 1406, - 451, - 1, - 1379.198608, - 472.946106, - 1, - 1398.976074, - 431.9154358, - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 1375.5, - 467.5, - 1, - 1372, - 501, - 1, - 0, - 0, - 0, - 1343.5, - 534.5, - 1, - 0, - 0, - 0, - 1339.5, - 573.5, - 1, - 1381.5, - 531.5, - 1, - 1376, - 539.5, - 1, - 1452.5, - 524.5, - 1, - 1453.5, - 535.5, - 1, - 1469.5, - 603.5, - 1, - 1466, - 610, - 1 - ], - "track_id": 8, - "image_id": 10128340000, - "bbox": [ - 1320.0, - 405.20275117000006, - 169.0, - 231.50993345999996 - ], - "scores": [], - "category_id": 1, - "id": 1012834000008, - "iscrowd": false, - "num_keypoints": 13 - }, - { - "bbox_head": [ - 1378, - 204, - 40, - 44 - ], - "keypoints": [ - 1389, - 234, - 1, - 1404.137573, - 248.9802094, - 1, - 1393.396851, - 208.7648468, - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 1375, - 272, - 1, - 1442.5, - 260.5, - 1, - 1374, - 315, - 1, - 1468, - 303.5, - 1, - 1367, - 340.5, - 1, - 1462.5, - 330.5, - 1, - 1407, - 349.5, - 1, - 1439, - 340.5, - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0 - ], - "track_id": 9, - "image_id": 10128340000, - "bbox": [ - 1351.85, - 187.65457382, - 131.30000000000018, - 182.95569916 - ], - "scores": [], - "category_id": 1, - "id": 1012834000009, - "iscrowd": false, - "num_keypoints": 11 - }, - { - "bbox_head": [ - 407, - -29, - 35, - 40 - ], - "keypoints": [ - 0, - 0, - 0, - 425.1159668, - 12.25136662, - 1, - 424.0380249, - -24.93852425, - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 455.5, - 21.5, - 1, - 395.5, - 29.5, - 1, - 474.5, - 64.5, - 1, - 391.5, - 67, - 1, - 474, - 108, - 1, - 379, - 107, - 1, - 446, - 88, - 1, - 426, - 88, - 1, - 424, - 113, - 1, - 403, - 113, - 1, - 430, - 173, - 1, - 415, - 171, - 1 - ], - "track_id": 10, - "image_id": 10128340000, - "bbox": [ - 364.675, - -54.62930288750002, - 124.14999999999998, - 257.32008152500003 - ], - "scores": [], - "category_id": 1, - "id": 1012834000010, - "iscrowd": false, - "num_keypoints": 14 - }, - { - "bbox_head": [ - 648, - 253, - 138, - 103 - ], - "keypoints": [ - 750, - 297.5, - 1, - 734.1937256, - 371.1997375, - 1, - 704.1047363, - 254.4751892, - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 697, - 352.5, - 1, - 766, - 369, - 1, - 0, - 0, - 0, - 857, - 419, - 1, - 766, - 281, - 1, - 942, - 422.5, - 1, - 722, - 525, - 1, - 792, - 544, - 1, - 783, - 697, - 1, - 755, - 699, - 1, - 808.5, - 832.5, - 1, - 672, - 782.5, - 1 - ], - "track_id": 0, - "image_id": 10094730000, - "bbox": [ - 631.5, - 167.77146757999992, - 351.0, - 751.4322540400001 - ], - "scores": [], - "category_id": 1, - "id": 1009473000000, - "iscrowd": false, - "num_keypoints": 14 - }, - { - "bbox_head": [ - 878, - 201, - 90, - 125 - ], - "keypoints": [ - 900, - 272, - 1, - 905.657959, - 322.6206665, - 1, - 936.0065308, - 219.1595001, - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 950, - 366, - 1, - 852, - 311, - 1, - 1021, - 428, - 1, - 759, - 303, - 1, - 986, - 422, - 1, - 704, - 374, - 1, - 912, - 516, - 1, - 856, - 524, - 1, - 876, - 663, - 1, - 908, - 680, - 1, - 849, - 828, - 1, - 959, - 804, - 1 - ], - "track_id": 1, - "image_id": 10094730000, - "bbox": [ - 656.45, - 127.83342511500007, - 412.0999999999999, - 791.4926498699999 - ], - "scores": [], - "category_id": 1, - "id": 1009473000001, - "iscrowd": false, - "num_keypoints": 15 - }, - { - "bbox_head": [ - 346, - 337, - 296, - 237 - ], - "keypoints": [ - 621, - 471, - 1, - 542.2835693, - 599.2855835, - 1, - 457.787323, - 347.6607971, - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 371, - 737, - 1, - 753, - 579, - 1, - 387, - 981, - 1, - 1023, - 353, - 1, - 0, - 0, - 0, - 953, - 141, - 1, - 968, - 833, - 1, - 1152, - 843, - 1, - 0, - 0, - 0, - 1416, - 429, - 1, - 0, - 0, - 0, - 0, - 0, - 0 - ], - "track_id": 0, - "image_id": 10034180000, - "bbox": [ - 214.25, - 15.0, - 1358.5, - 1092.0 - ], - "scores": [], - "category_id": 1, - "id": 1003418000000, - "iscrowd": false, - "num_keypoints": 11 - } - ] -} +{ + "categories": [ + { + "supercategory": "person", + "id": 1, + "name": "person", + "keypoints": [ + "nose", + "head_bottom", + "head_top", + "left_ear", + "right_ear", + "left_shoulder", + "right_shoulder", + "left_elbow", + "right_elbow", + "left_wrist", + "right_wrist", + "left_hip", + "right_hip", + "left_knee", + "right_knee", + "left_ankle", + "right_ankle" + ], + "skeleton": [ + [ + 16, + 14 + ], + [ + 14, + 12 + ], + [ + 17, + 15 + ], + [ + 15, + 13 + ], + [ + 12, + 13 + ], + [ + 6, + 12 + ], + [ + 7, + 13 + ], + [ + 6, + 7 + ], + [ + 6, + 8 + ], + [ + 7, + 9 + ], + [ + 8, + 10 + ], + [ + 9, + 11 + ], + [ + 2, + 3 + ], + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 4 + ], + [ + 3, + 5 + ], + [ + 4, + 6 + ], + [ + 5, + 7 + ] + ] + } + ], + "images": [ + { + "has_no_densepose": true, + "is_labeled": true, + "file_name": "images/val/012834_mpii_test/000000.jpg", + "nframes": 140, + "frame_id": 10128340000, + "vid_id": "012834", + "ignore_regions_y": [ + [ + 1079, + 615, + 612, + 674, + 660, + 664, + 678, + 713, + 704, + 667, + 665, + 678, + 700, + 729, + 753, + 740, + 695, + 668, + 646, + 623, + 624, + 659, + 676, + 685, + 695, + 678, + 675, + 673, + 663, + 693, + 703, + 732, + 719, + 690, + 669, + 660, + 660, + 663, + 689, + 697, + 691, + 697, + 713, + 736, + 721, + 703, + 698, + 708, + 741, + 758, + 778, + 795, + 780, + 757, + 745, + 737, + 745, + 782, + 809, + 850, + 881, + 835, + 810, + 806, + 782, + 782, + 806, + 832, + 863, + 897, + 940, + 931, + 957, + 976, + 1003, + 1043, + 1045, + 1008, + 965, + 963, + 931, + 895, + 846, + 832, + 829, + 869, + 898, + 904, + 869, + 836, + 782, + 752, + 736, + 748, + 776, + 832, + 874, + 862, + 818, + 793, + 790, + 756, + 738, + 737, + 750, + 791, + 806, + 820, + 852, + 879, + 837, + 794, + 772, + 766, + 769, + 790, + 805, + 829, + 844, + 866, + 837, + 804, + 791, + 773, + 745, + 706, + 683, + 644, + 638, + 662, + 694, + 716, + 736, + 777, + 784, + 815, + 830, + 813, + 800, + 813, + 820, + 847, + 829, + 781, + 780, + 801, + 836, + 886, + 938, + 1018, + 1029, + 1079 + ], + [ + 0, + 21, + 43, + 60, + 90, + 95, + 95, + 43, + 40, + 84, + 104, + 104, + 74, + 6, + 4, + 71, + 69, + 0 + ], + [ + 0, + 4, + 48, + 106, + 214, + 207, + 46, + 50, + 170, + 156, + 96, + 157, + 160, + 62, + 65, + 156, + 165, + 162, + 140, + 93, + 93, + 7, + 4, + 121, + 129, + 84, + 75, + 68 + ], + [ + 0, + 0, + 739, + 729, + 720, + 740, + 768, + 785, + 803, + 815, + 757, + 735, + 632, + 620, + 632, + 640, + 662, + 656, + 607, + 645, + 645, + 628, + 604, + 570, + 543, + 512, + 485, + 467, + 451, + 448, + 456, + 482, + 512, + 548, + 554, + 542, + 498, + 479, + 454, + 404, + 387, + 398, + 415, + 528, + 546, + 468, + 410, + 400, + 359, + 375, + 373, + 273, + 254, + 284, + 253, + 204, + 206 + ] + ], + "ignore_regions_x": [ + [ + 3, + 0, + 30, + 44, + 74, + 99, + 106, + 102, + 115, + 121, + 141, + 156, + 165, + 187, + 200, + 211, + 196, + 198, + 210, + 226, + 252, + 266, + 263, + 271, + 291, + 299, + 326, + 339, + 360, + 399, + 412, + 424, + 437, + 432, + 439, + 461, + 489, + 510, + 534, + 548, + 559, + 567, + 587, + 593, + 604, + 612, + 633, + 652, + 645, + 638, + 649, + 650, + 661, + 654, + 662, + 685, + 713, + 727, + 733, + 752, + 762, + 769, + 785, + 812, + 841, + 863, + 869, + 877, + 899, + 909, + 918, + 906, + 902, + 909, + 917, + 900, + 932, + 932, + 941, + 919, + 926, + 935, + 950, + 957, + 983, + 1002, + 1007, + 1032, + 1034, + 1018, + 1018, + 1038, + 1074, + 1106, + 1119, + 1121, + 1130, + 1148, + 1152, + 1172, + 1195, + 1199, + 1209, + 1229, + 1242, + 1240, + 1242, + 1261, + 1264, + 1277, + 1285, + 1286, + 1296, + 1313, + 1336, + 1350, + 1367, + 1403, + 1417, + 1435, + 1459, + 1456, + 1429, + 1420, + 1465, + 1492, + 1496, + 1507, + 1529, + 1553, + 1570, + 1596, + 1609, + 1610, + 1649, + 1671, + 1703, + 1740, + 1763, + 1775, + 1803, + 1809, + 1815, + 1815, + 1857, + 1874, + 1881, + 1897, + 1896, + 1899, + 1888, + 1884 + ], + [ + 378, + 381, + 365, + 359, + 334, + 292, + 257, + 262, + 231, + 236, + 219, + 193, + 196, + 183, + 154, + 159, + 140, + 121 + ], + [ + 451, + 1173, + 1168, + 1168, + 1170, + 1085, + 1098, + 1070, + 1043, + 1000, + 993, + 979, + 934, + 937, + 918, + 903, + 893, + 832, + 785, + 759, + 726, + 710, + 667, + 664, + 585, + 576, + 507, + 485 + ], + [ + 1312, + 1918, + 1917, + 1895, + 1867, + 1835, + 1804, + 1779, + 1754, + 1720, + 1726, + 1739, + 1740, + 1735, + 1701, + 1635, + 1587, + 1578, + 1587, + 1564, + 1550, + 1543, + 1562, + 1579, + 1578, + 1581, + 1584, + 1589, + 1601, + 1610, + 1621, + 1637, + 1642, + 1659, + 1673, + 1681, + 1673, + 1671, + 1664, + 1671, + 1681, + 1728, + 1734, + 1789, + 1854, + 1807, + 1820, + 1778, + 1778, + 1717, + 1642, + 1635, + 1600, + 1520, + 1454, + 1415, + 1395 + ] + ], + "id": 10128340000, + "width": 1920, + "height": 1080, + "mask_file": "mask/val/012834_mpii_test/000000.jpg" + }, + { + "has_no_densepose": true, + "is_labeled": true, + "file_name": "images/val/009473_mpii_test/000000.jpg", + "nframes": 101, + "frame_id": 10094730000, + "vid_id": "009473", + "ignore_regions_y": [ + [ + 228, + 237, + 243, + 230, + 220, + 230, + 228, + 232, + 220, + 211, + 226, + 258, + 364, + 417, + 475, + 562, + 615, + 646, + 656, + 637, + 649, + 645, + 603, + 585, + 547 + ], + [ + 0, + 1, + 137, + 130, + 166, + 235, + 309, + 384, + 452, + 526, + 583, + 658, + 694, + 709, + 599, + 684, + 707, + 733, + 660, + 679, + 762 + ] + ], + "ignore_regions_x": [ + [ + 1907, + 1820, + 1758, + 1662, + 1577, + 1492, + 1375, + 1305, + 1250, + 1177, + 1111, + 1033, + 1047, + 1062, + 1056, + 1130, + 1173, + 1232, + 1282, + 1332, + 1416, + 1471, + 1515, + 1541, + 1909 + ], + [ + 0, + 257, + 284, + 407, + 450, + 505, + 562, + 592, + 613, + 626, + 639, + 639, + 594, + 454, + 371, + 343, + 269, + 152, + 88, + 35, + 3 + ] + ], + "id": 10094730000, + "width": 1920, + "height": 1080, + "mask_file": "mask/val/009473_mpii_test/000000.jpg" + }, + { + "has_no_densepose": true, + "is_labeled": true, + "file_name": "images/val/003418_mpii_test/000000.jpg", + "nframes": 149, + "frame_id": 10034180000, + "vid_id": "003418", + "ignore_regions_y": [ + [ + 639, + 647, + 635, + 618, + 590, + 547, + 501, + 499, + 515, + 518, + 526, + 528, + 509, + 473, + 450, + 407, + 352, + 339, + 309, + 271, + 262, + 252, + 211, + 211, + 183, + 175, + 139, + 105, + 94, + 62, + 45, + 22, + 22, + 30, + 52, + 66, + 86, + 92, + 101, + 109, + 162, + 158, + 135, + 103, + 94, + 75, + 60, + 37, + 18, + 9, + 1, + 0, + 79, + 75, + 88, + 103, + 156, + 164, + 167, + 201, + 196, + 194, + 207, + 237, + 262, + 256 + ], + [ + 94, + 71, + 62, + 41, + 28, + 30, + 43, + 64, + 88, + 96, + 120, + 115, + 98 + ], + [ + 235, + 215, + 211, + 224, + 252, + 239, + 207, + 196, + 211, + 228, + 218, + 198, + 181, + 186, + 198, + 218, + 233, + 252, + 277, + 279, + 292, + 318, + 347, + 354, + 392, + 430, + 439, + 447, + 462, + 477, + 496, + 539, + 515, + 464, + 426, + 398, + 366, + 333, + 322, + 315, + 318, + 303, + 298, + 294, + 266, + 245 + ], + [ + 207, + 213, + 239, + 237, + 215, + 179, + 179, + 184, + 209, + 222, + 239, + 264, + 279, + 271, + 269, + 290, + 260, + 226 + ], + [ + 194, + 175, + 160, + 164, + 179, + 167, + 160, + 216, + 266, + 262, + 266, + 269, + 281, + 298, + 309, + 318, + 332, + 341, + 345, + 345, + 290, + 262, + 226 + ], + [ + 424, + 442, + 432, + 432, + 455, + 469, + 474, + 505, + 559, + 555, + 465, + 449, + 444 + ], + [ + 926, + 901, + 857, + 792, + 751, + 694, + 636, + 540, + 474, + 403, + 351, + 265, + 211, + 155, + 98, + 71, + 40, + 0, + 0, + 1078, + 1078, + 1007, + 924, + 869, + 807, + 865, + 892, + 955, + 1003, + 1057, + 1078, + 1078, + 1030, + 961, + 926 + ], + [ + 1050, + 1076, + 1078, + 1057, + 1032, + 1013, + 998, + 982, + 971, + 951, + 936, + 913, + 888, + 844, + 799, + 763, + 732, + 723, + 713, + 753, + 784, + 817, + 830, + 871, + 911, + 930 + ], + [ + 478, + 461, + 423, + 405, + 394, + 263, + 257, + 265, + 290, + 315, + 334, + 342, + 344, + 411, + 448, + 448, + 448, + 430, + 424, + 423, + 421, + 409, + 444 + ] + ], + "ignore_regions_x": [ + [ + 0, + 1, + 198, + 258, + 307, + 337, + 343, + 335, + 320, + 290, + 273, + 260, + 232, + 190, + 196, + 183, + 171, + 162, + 149, + 132, + 105, + 77, + 69, + 322, + 324, + 303, + 279, + 273, + 247, + 224, + 226, + 215, + 203, + 190, + 192, + 181, + 169, + 167, + 154, + 139, + 99, + 86, + 81, + 56, + 60, + 62, + 60, + 49, + 35, + 15, + 9, + 3, + 0, + 11, + 13, + 0, + 1, + 9, + 90, + 64, + 49, + 33, + 18, + 13, + 15, + 0 + ], + [ + 341, + 347, + 352, + 356, + 371, + 383, + 388, + 392, + 403, + 392, + 398, + 377, + 375 + ], + [ + 688, + 694, + 713, + 724, + 728, + 752, + 764, + 783, + 796, + 796, + 824, + 828, + 839, + 856, + 864, + 864, + 884, + 899, + 903, + 843, + 854, + 854, + 850, + 884, + 901, + 873, + 833, + 815, + 796, + 747, + 716, + 666, + 654, + 660, + 667, + 694, + 711, + 724, + 737, + 775, + 792, + 790, + 756, + 688, + 686, + 686 + ], + [ + 1047, + 1079, + 1079, + 1088, + 1099, + 1103, + 1122, + 1133, + 1141, + 1164, + 1175, + 1164, + 1181, + 1126, + 1092, + 1077, + 1069, + 1047 + ], + [ + 1252, + 1254, + 1258, + 1277, + 1292, + 1301, + 1322, + 1330, + 1350, + 1322, + 1296, + 1277, + 1256, + 1233, + 1213, + 1198, + 1173, + 1130, + 1098, + 1081, + 1101, + 1198, + 1232 + ], + [ + 1165, + 1184, + 1226, + 1246, + 1238, + 1226, + 1209, + 1215, + 1180, + 1126, + 1057, + 1053, + 1128 + ], + [ + 1455, + 1438, + 1438, + 1444, + 1442, + 1423, + 1426, + 1409, + 1399, + 1390, + 1374, + 1349, + 1330, + 1319, + 1307, + 1309, + 1324, + 1330, + 1919, + 1919, + 1884, + 1855, + 1830, + 1828, + 1776, + 1732, + 1734, + 1726, + 1728, + 1707, + 1713, + 1469, + 1461, + 1459, + 1455 + ], + [ + 1463, + 1411, + 1255, + 1250, + 1246, + 1261, + 1284, + 1280, + 1271, + 1265, + 1275, + 1299, + 1299, + 1296, + 1315, + 1340, + 1365, + 1396, + 1444, + 1428, + 1434, + 1432, + 1446, + 1440, + 1453, + 1455 + ], + [ + 1246, + 1271, + 1313, + 1344, + 1384, + 1346, + 1307, + 1286, + 1255, + 1203, + 1153, + 1096, + 1078, + 1061, + 1036, + 1090, + 1121, + 1148, + 1169, + 1205, + 1228, + 1265, + 1267 + ] + ], + "id": 10034180000, + "width": 1920, + "height": 1080, + "mask_file": "mask/val/003418_mpii_test/000000.jpg" + } + ], + "annotations": [ + { + "bbox_head": [ + 378, + 503, + 44, + 53 + ], + "keypoints": [ + 401, + 530, + 1, + 409.5254211, + 555.3547363, + 1, + 392.8559265, + 510.1089478, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 377, + 560, + 1, + 444, + 556, + 1, + 353, + 605, + 1, + 469.5, + 603.5, + 1, + 341.5, + 653.5, + 1, + 463, + 635, + 1, + 389, + 652, + 1, + 442, + 646, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "track_id": 0, + "image_id": 10128340000, + "bbox": [ + 322.3, + 488.60028996999995, + 166.39999999999998, + 186.40836786 + ], + "scores": [], + "category_id": 1, + "id": 1012834000000, + "iscrowd": false, + "num_keypoints": 11 + }, + { + "bbox_head": [ + 571, + 446, + 42, + 46 + ], + "keypoints": [ + 600.5, + 475.5, + 1, + 590.4649048, + 493.8685303, + 1, + 593.1513062, + 450.3486023, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 570.5, + 509.5, + 1, + 608.5, + 509.5, + 1, + 539, + 558.5, + 1, + 634, + 539, + 1, + 558.5, + 584.5, + 1, + 624.5, + 528.5, + 1, + 605, + 595, + 1, + 601, + 593, + 1, + 640, + 634.5, + 1, + 598, + 672, + 1, + 616.5, + 700.5, + 1, + 0, + 0, + 0 + ], + "track_id": 1, + "image_id": 10128340000, + "bbox": [ + 523.85, + 412.825892645, + 131.29999999999995, + 325.19681700999996 + ], + "scores": [], + "category_id": 1, + "id": 1012834000001, + "iscrowd": false, + "num_keypoints": 14 + }, + { + "bbox_head": [ + 159, + 259, + 42, + 47 + ], + "keypoints": [ + 201, + 284.5, + 1, + 169.9334106, + 305.6158752, + 1, + 187.549942, + 265.1630859, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 139.5, + 307.5, + 1, + 193.5, + 319.5, + 1, + 0, + 0, + 0, + 209, + 371, + 1, + 144, + 365.5, + 1, + 231, + 392, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 222, + 337, + 1, + 241, + 341.5, + 1, + 0, + 0, + 0, + 267, + 416, + 1 + ], + "track_id": 2, + "image_id": 10128340000, + "bbox": [ + 120.375, + 242.53754878499996, + 165.75, + 196.08798833000003 + ], + "scores": [], + "category_id": 1, + "id": 1012834000002, + "iscrowd": false, + "num_keypoints": 11 + }, + { + "bbox_head": [ + 372, + 205, + 44, + 44 + ], + "keypoints": [ + 410.5, + 230.5, + 1, + 387.8875732, + 251.1279602, + 1, + 398.5843201, + 208.9040375, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 359.5, + 262.5, + 1, + 409.5, + 266.5, + 1, + 337.5, + 308.5, + 1, + 450, + 306, + 1, + 292, + 314, + 1, + 480, + 311.5, + 1, + 390, + 339, + 1, + 409, + 339, + 1, + 405.5, + 418.5, + 1, + 447.5, + 366.5, + 1, + 391.5, + 464.5, + 1, + 437.5, + 440.5, + 1 + ], + "track_id": 3, + "image_id": 10128340000, + "bbox": [ + 263.8, + 170.56464312499998, + 244.39999999999998, + 332.27475125 + ], + "scores": [], + "category_id": 1, + "id": 1012834000003, + "iscrowd": false, + "num_keypoints": 15 + }, + { + "bbox_head": [ + 693, + 410, + 44, + 49 + ], + "keypoints": [ + 718.5, + 440.5, + 1, + 717.704834, + 460.703125, + 1, + 712.9713745, + 414.8476562, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 694.5, + 474, + 1, + 743.5, + 472.5, + 1, + 681.5, + 530.5, + 1, + 757.5, + 523.5, + 1, + 667.5, + 564.5, + 1, + 0, + 0, + 0, + 705.5, + 563.5, + 1, + 737.5, + 560.5, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 692.5, + 607.5, + 1, + 716.5, + 603.5, + 1 + ], + "track_id": 4, + "image_id": 10128340000, + "bbox": [ + 654.0, + 385.94980463, + 117.0, + 250.44804694000004 + ], + "scores": [], + "category_id": 1, + "id": 1012834000004, + "iscrowd": false, + "num_keypoints": 12 + }, + { + "bbox_head": [ + 923, + 347, + 46, + 58 + ], + "keypoints": [ + 965.5, + 382.5, + 1, + 933.9436646, + 403.0452576, + 1, + 955.0422363, + 355.7160645, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 922.5, + 403.5, + 1, + 932.5, + 431.5, + 1, + 0, + 0, + 0, + 960, + 475.5, + 1, + 0, + 0, + 0, + 991.5, + 462.5, + 1, + 934.5, + 512.5, + 1, + 922.5, + 506.5, + 1, + 946.5, + 567.5, + 1, + 964, + 578, + 1, + 900.5, + 598, + 1, + 936, + 634.5, + 1 + ], + "track_id": 5, + "image_id": 10128340000, + "bbox": [ + 886.85, + 313.89847417500005, + 118.29999999999995, + 362.4191161499999 + ], + "scores": [], + "category_id": 1, + "id": 1012834000005, + "iscrowd": false, + "num_keypoints": 13 + }, + { + "bbox_head": [ + 691, + 179, + 43, + 52 + ], + "keypoints": [ + 708.5, + 212.5, + 1, + 722.6444702, + 230.0113831, + 1, + 704.8916626, + 186.2414551, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 712, + 244, + 1, + 742, + 237.5, + 1, + 723, + 293.5, + 1, + 745.5, + 281.5, + 1, + 692, + 319, + 1, + 0, + 0, + 0, + 722, + 323.5, + 1, + 748.5, + 314, + 1, + 657.5, + 301.5, + 1, + 668.5, + 299.5, + 1, + 670.5, + 367.5, + 1, + 689.5, + 362.5, + 1 + ], + "track_id": 6, + "image_id": 10128340000, + "bbox": [ + 643.85, + 159.05267336499998, + 118.29999999999995, + 235.63610837 + ], + "scores": [], + "category_id": 1, + "id": 1012834000006, + "iscrowd": false, + "num_keypoints": 14 + }, + { + "bbox_head": [ + 927, + 160, + 39, + 52 + ], + "keypoints": [ + 952, + 189, + 1, + 946.763916, + 211.9986572, + 1, + 946.302063, + 166.5010071, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 914.5, + 234, + 1, + 979.5, + 236.5, + 1, + 890.5, + 270.5, + 1, + 998.5, + 286.5, + 1, + 894.5, + 324, + 1, + 0, + 0, + 0, + 932, + 326.5, + 1, + 958.5, + 327.5, + 1, + 1000.5, + 340.5, + 1, + 993.5, + 372.5, + 1, + 955.5, + 383.5, + 1, + 959.5, + 446.5, + 1 + ], + "track_id": 7, + "image_id": 10128340000, + "bbox": [ + 874.0, + 124.50115816500005, + 143.0, + 363.99869076999994 + ], + "scores": [], + "category_id": 1, + "id": 1012834000007, + "iscrowd": false, + "num_keypoints": 14 + }, + { + "bbox_head": [ + 1367, + 427, + 47, + 45 + ], + "keypoints": [ + 1406, + 451, + 1, + 1379.198608, + 472.946106, + 1, + 1398.976074, + 431.9154358, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 1375.5, + 467.5, + 1, + 1372, + 501, + 1, + 0, + 0, + 0, + 1343.5, + 534.5, + 1, + 0, + 0, + 0, + 1339.5, + 573.5, + 1, + 1381.5, + 531.5, + 1, + 1376, + 539.5, + 1, + 1452.5, + 524.5, + 1, + 1453.5, + 535.5, + 1, + 1469.5, + 603.5, + 1, + 1466, + 610, + 1 + ], + "track_id": 8, + "image_id": 10128340000, + "bbox": [ + 1320.0, + 405.20275117000006, + 169.0, + 231.50993345999996 + ], + "scores": [], + "category_id": 1, + "id": 1012834000008, + "iscrowd": false, + "num_keypoints": 13 + }, + { + "bbox_head": [ + 1378, + 204, + 40, + 44 + ], + "keypoints": [ + 1389, + 234, + 1, + 1404.137573, + 248.9802094, + 1, + 1393.396851, + 208.7648468, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 1375, + 272, + 1, + 1442.5, + 260.5, + 1, + 1374, + 315, + 1, + 1468, + 303.5, + 1, + 1367, + 340.5, + 1, + 1462.5, + 330.5, + 1, + 1407, + 349.5, + 1, + 1439, + 340.5, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "track_id": 9, + "image_id": 10128340000, + "bbox": [ + 1351.85, + 187.65457382, + 131.30000000000018, + 182.95569916 + ], + "scores": [], + "category_id": 1, + "id": 1012834000009, + "iscrowd": false, + "num_keypoints": 11 + }, + { + "bbox_head": [ + 407, + -29, + 35, + 40 + ], + "keypoints": [ + 0, + 0, + 0, + 425.1159668, + 12.25136662, + 1, + 424.0380249, + -24.93852425, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 455.5, + 21.5, + 1, + 395.5, + 29.5, + 1, + 474.5, + 64.5, + 1, + 391.5, + 67, + 1, + 474, + 108, + 1, + 379, + 107, + 1, + 446, + 88, + 1, + 426, + 88, + 1, + 424, + 113, + 1, + 403, + 113, + 1, + 430, + 173, + 1, + 415, + 171, + 1 + ], + "track_id": 10, + "image_id": 10128340000, + "bbox": [ + 364.675, + -54.62930288750002, + 124.14999999999998, + 257.32008152500003 + ], + "scores": [], + "category_id": 1, + "id": 1012834000010, + "iscrowd": false, + "num_keypoints": 14 + }, + { + "bbox_head": [ + 648, + 253, + 138, + 103 + ], + "keypoints": [ + 750, + 297.5, + 1, + 734.1937256, + 371.1997375, + 1, + 704.1047363, + 254.4751892, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 697, + 352.5, + 1, + 766, + 369, + 1, + 0, + 0, + 0, + 857, + 419, + 1, + 766, + 281, + 1, + 942, + 422.5, + 1, + 722, + 525, + 1, + 792, + 544, + 1, + 783, + 697, + 1, + 755, + 699, + 1, + 808.5, + 832.5, + 1, + 672, + 782.5, + 1 + ], + "track_id": 0, + "image_id": 10094730000, + "bbox": [ + 631.5, + 167.77146757999992, + 351.0, + 751.4322540400001 + ], + "scores": [], + "category_id": 1, + "id": 1009473000000, + "iscrowd": false, + "num_keypoints": 14 + }, + { + "bbox_head": [ + 878, + 201, + 90, + 125 + ], + "keypoints": [ + 900, + 272, + 1, + 905.657959, + 322.6206665, + 1, + 936.0065308, + 219.1595001, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 950, + 366, + 1, + 852, + 311, + 1, + 1021, + 428, + 1, + 759, + 303, + 1, + 986, + 422, + 1, + 704, + 374, + 1, + 912, + 516, + 1, + 856, + 524, + 1, + 876, + 663, + 1, + 908, + 680, + 1, + 849, + 828, + 1, + 959, + 804, + 1 + ], + "track_id": 1, + "image_id": 10094730000, + "bbox": [ + 656.45, + 127.83342511500007, + 412.0999999999999, + 791.4926498699999 + ], + "scores": [], + "category_id": 1, + "id": 1009473000001, + "iscrowd": false, + "num_keypoints": 15 + }, + { + "bbox_head": [ + 346, + 337, + 296, + 237 + ], + "keypoints": [ + 621, + 471, + 1, + 542.2835693, + 599.2855835, + 1, + 457.787323, + 347.6607971, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 371, + 737, + 1, + 753, + 579, + 1, + 387, + 981, + 1, + 1023, + 353, + 1, + 0, + 0, + 0, + 953, + 141, + 1, + 968, + 833, + 1, + 1152, + 843, + 1, + 0, + 0, + 0, + 1416, + 429, + 1, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "track_id": 0, + "image_id": 10034180000, + "bbox": [ + 214.25, + 15.0, + 1358.5, + 1092.0 + ], + "scores": [], + "category_id": 1, + "id": 1003418000000, + "iscrowd": false, + "num_keypoints": 11 + } + ] +} diff --git a/tests/data/posetrack18/annotations/val/003418_mpii_test.json b/tests/data/posetrack18/annotations/val/003418_mpii_test.json index 00bf014969..061fd4b1c3 100644 --- a/tests/data/posetrack18/annotations/val/003418_mpii_test.json +++ b/tests/data/posetrack18/annotations/val/003418_mpii_test.json @@ -1,762 +1,762 @@ -{ - "categories": [ - { - "supercategory": "person", - "id": 1, - "name": "person", - "keypoints": [ - "nose", - "head_bottom", - "head_top", - "left_ear", - "right_ear", - "left_shoulder", - "right_shoulder", - "left_elbow", - "right_elbow", - "left_wrist", - "right_wrist", - "left_hip", - "right_hip", - "left_knee", - "right_knee", - "left_ankle", - "right_ankle" - ], - "skeleton": [ - [ - 16, - 14 - ], - [ - 14, - 12 - ], - [ - 17, - 15 - ], - [ - 15, - 13 - ], - [ - 12, - 13 - ], - [ - 6, - 12 - ], - [ - 7, - 13 - ], - [ - 6, - 7 - ], - [ - 6, - 8 - ], - [ - 7, - 9 - ], - [ - 8, - 10 - ], - [ - 9, - 11 - ], - [ - 2, - 3 - ], - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 4 - ], - [ - 3, - 5 - ], - [ - 4, - 6 - ], - [ - 5, - 7 - ] - ] - } - ], - "images": [ - { - "has_no_densepose": true, - "is_labeled": true, - "file_name": "images/val/003418_mpii_test/000000.jpg", - "nframes": 149, - "frame_id": 10034180000, - "vid_id": "003418", - "ignore_regions_y": [ - [ - 639, - 647, - 635, - 618, - 590, - 547, - 501, - 499, - 515, - 518, - 526, - 528, - 509, - 473, - 450, - 407, - 352, - 339, - 309, - 271, - 262, - 252, - 211, - 211, - 183, - 175, - 139, - 105, - 94, - 62, - 45, - 22, - 22, - 30, - 52, - 66, - 86, - 92, - 101, - 109, - 162, - 158, - 135, - 103, - 94, - 75, - 60, - 37, - 18, - 9, - 1, - 0, - 79, - 75, - 88, - 103, - 156, - 164, - 167, - 201, - 196, - 194, - 207, - 237, - 262, - 256 - ], - [ - 94, - 71, - 62, - 41, - 28, - 30, - 43, - 64, - 88, - 96, - 120, - 115, - 98 - ], - [ - 235, - 215, - 211, - 224, - 252, - 239, - 207, - 196, - 211, - 228, - 218, - 198, - 181, - 186, - 198, - 218, - 233, - 252, - 277, - 279, - 292, - 318, - 347, - 354, - 392, - 430, - 439, - 447, - 462, - 477, - 496, - 539, - 515, - 464, - 426, - 398, - 366, - 333, - 322, - 315, - 318, - 303, - 298, - 294, - 266, - 245 - ], - [ - 207, - 213, - 239, - 237, - 215, - 179, - 179, - 184, - 209, - 222, - 239, - 264, - 279, - 271, - 269, - 290, - 260, - 226 - ], - [ - 194, - 175, - 160, - 164, - 179, - 167, - 160, - 216, - 266, - 262, - 266, - 269, - 281, - 298, - 309, - 318, - 332, - 341, - 345, - 345, - 290, - 262, - 226 - ], - [ - 424, - 442, - 432, - 432, - 455, - 469, - 474, - 505, - 559, - 555, - 465, - 449, - 444 - ], - [ - 926, - 901, - 857, - 792, - 751, - 694, - 636, - 540, - 474, - 403, - 351, - 265, - 211, - 155, - 98, - 71, - 40, - 0, - 0, - 1078, - 1078, - 1007, - 924, - 869, - 807, - 865, - 892, - 955, - 1003, - 1057, - 1078, - 1078, - 1030, - 961, - 926 - ], - [ - 1050, - 1076, - 1078, - 1057, - 1032, - 1013, - 998, - 982, - 971, - 951, - 936, - 913, - 888, - 844, - 799, - 763, - 732, - 723, - 713, - 753, - 784, - 817, - 830, - 871, - 911, - 930 - ], - [ - 478, - 461, - 423, - 405, - 394, - 263, - 257, - 265, - 290, - 315, - 334, - 342, - 344, - 411, - 448, - 448, - 448, - 430, - 424, - 423, - 421, - 409, - 444 - ] - ], - "ignore_regions_x": [ - [ - 0, - 1, - 198, - 258, - 307, - 337, - 343, - 335, - 320, - 290, - 273, - 260, - 232, - 190, - 196, - 183, - 171, - 162, - 149, - 132, - 105, - 77, - 69, - 322, - 324, - 303, - 279, - 273, - 247, - 224, - 226, - 215, - 203, - 190, - 192, - 181, - 169, - 167, - 154, - 139, - 99, - 86, - 81, - 56, - 60, - 62, - 60, - 49, - 35, - 15, - 9, - 3, - 0, - 11, - 13, - 0, - 1, - 9, - 90, - 64, - 49, - 33, - 18, - 13, - 15, - 0 - ], - [ - 341, - 347, - 352, - 356, - 371, - 383, - 388, - 392, - 403, - 392, - 398, - 377, - 375 - ], - [ - 688, - 694, - 713, - 724, - 728, - 752, - 764, - 783, - 796, - 796, - 824, - 828, - 839, - 856, - 864, - 864, - 884, - 899, - 903, - 843, - 854, - 854, - 850, - 884, - 901, - 873, - 833, - 815, - 796, - 747, - 716, - 666, - 654, - 660, - 667, - 694, - 711, - 724, - 737, - 775, - 792, - 790, - 756, - 688, - 686, - 686 - ], - [ - 1047, - 1079, - 1079, - 1088, - 1099, - 1103, - 1122, - 1133, - 1141, - 1164, - 1175, - 1164, - 1181, - 1126, - 1092, - 1077, - 1069, - 1047 - ], - [ - 1252, - 1254, - 1258, - 1277, - 1292, - 1301, - 1322, - 1330, - 1350, - 1322, - 1296, - 1277, - 1256, - 1233, - 1213, - 1198, - 1173, - 1130, - 1098, - 1081, - 1101, - 1198, - 1232 - ], - [ - 1165, - 1184, - 1226, - 1246, - 1238, - 1226, - 1209, - 1215, - 1180, - 1126, - 1057, - 1053, - 1128 - ], - [ - 1455, - 1438, - 1438, - 1444, - 1442, - 1423, - 1426, - 1409, - 1399, - 1390, - 1374, - 1349, - 1330, - 1319, - 1307, - 1309, - 1324, - 1330, - 1919, - 1919, - 1884, - 1855, - 1830, - 1828, - 1776, - 1732, - 1734, - 1726, - 1728, - 1707, - 1713, - 1469, - 1461, - 1459, - 1455 - ], - [ - 1463, - 1411, - 1255, - 1250, - 1246, - 1261, - 1284, - 1280, - 1271, - 1265, - 1275, - 1299, - 1299, - 1296, - 1315, - 1340, - 1365, - 1396, - 1444, - 1428, - 1434, - 1432, - 1446, - 1440, - 1453, - 1455 - ], - [ - 1246, - 1271, - 1313, - 1344, - 1384, - 1346, - 1307, - 1286, - 1255, - 1203, - 1153, - 1096, - 1078, - 1061, - 1036, - 1090, - 1121, - 1148, - 1169, - 1205, - 1228, - 1265, - 1267 - ] - ], - "id": 10034180000, - "width": 1920, - "height": 1080, - "mask_file": "mask/val/003418_mpii_test/000000.jpg" - } - ], - "annotations": [ - { - "bbox_head": [ - 346, - 337, - 296, - 237 - ], - "keypoints": [ - 621, - 471, - 1, - 542.2835693, - 599.2855835, - 1, - 457.787323, - 347.6607971, - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 371, - 737, - 1, - 753, - 579, - 1, - 387, - 981, - 1, - 1023, - 353, - 1, - 0, - 0, - 0, - 953, - 141, - 1, - 968, - 833, - 1, - 1152, - 843, - 1, - 0, - 0, - 0, - 1416, - 429, - 1, - 0, - 0, - 0, - 0, - 0, - 0 - ], - "track_id": 0, - "image_id": 10034180000, - "bbox": [ - 214.25, - 15.0, - 1358.5, - 1092.0 - ], - "scores": [], - "category_id": 1, - "id": 1003418000000, - "iscrowd": false, - "num_keypoints": 11 - } - ] -} +{ + "categories": [ + { + "supercategory": "person", + "id": 1, + "name": "person", + "keypoints": [ + "nose", + "head_bottom", + "head_top", + "left_ear", + "right_ear", + "left_shoulder", + "right_shoulder", + "left_elbow", + "right_elbow", + "left_wrist", + "right_wrist", + "left_hip", + "right_hip", + "left_knee", + "right_knee", + "left_ankle", + "right_ankle" + ], + "skeleton": [ + [ + 16, + 14 + ], + [ + 14, + 12 + ], + [ + 17, + 15 + ], + [ + 15, + 13 + ], + [ + 12, + 13 + ], + [ + 6, + 12 + ], + [ + 7, + 13 + ], + [ + 6, + 7 + ], + [ + 6, + 8 + ], + [ + 7, + 9 + ], + [ + 8, + 10 + ], + [ + 9, + 11 + ], + [ + 2, + 3 + ], + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 4 + ], + [ + 3, + 5 + ], + [ + 4, + 6 + ], + [ + 5, + 7 + ] + ] + } + ], + "images": [ + { + "has_no_densepose": true, + "is_labeled": true, + "file_name": "images/val/003418_mpii_test/000000.jpg", + "nframes": 149, + "frame_id": 10034180000, + "vid_id": "003418", + "ignore_regions_y": [ + [ + 639, + 647, + 635, + 618, + 590, + 547, + 501, + 499, + 515, + 518, + 526, + 528, + 509, + 473, + 450, + 407, + 352, + 339, + 309, + 271, + 262, + 252, + 211, + 211, + 183, + 175, + 139, + 105, + 94, + 62, + 45, + 22, + 22, + 30, + 52, + 66, + 86, + 92, + 101, + 109, + 162, + 158, + 135, + 103, + 94, + 75, + 60, + 37, + 18, + 9, + 1, + 0, + 79, + 75, + 88, + 103, + 156, + 164, + 167, + 201, + 196, + 194, + 207, + 237, + 262, + 256 + ], + [ + 94, + 71, + 62, + 41, + 28, + 30, + 43, + 64, + 88, + 96, + 120, + 115, + 98 + ], + [ + 235, + 215, + 211, + 224, + 252, + 239, + 207, + 196, + 211, + 228, + 218, + 198, + 181, + 186, + 198, + 218, + 233, + 252, + 277, + 279, + 292, + 318, + 347, + 354, + 392, + 430, + 439, + 447, + 462, + 477, + 496, + 539, + 515, + 464, + 426, + 398, + 366, + 333, + 322, + 315, + 318, + 303, + 298, + 294, + 266, + 245 + ], + [ + 207, + 213, + 239, + 237, + 215, + 179, + 179, + 184, + 209, + 222, + 239, + 264, + 279, + 271, + 269, + 290, + 260, + 226 + ], + [ + 194, + 175, + 160, + 164, + 179, + 167, + 160, + 216, + 266, + 262, + 266, + 269, + 281, + 298, + 309, + 318, + 332, + 341, + 345, + 345, + 290, + 262, + 226 + ], + [ + 424, + 442, + 432, + 432, + 455, + 469, + 474, + 505, + 559, + 555, + 465, + 449, + 444 + ], + [ + 926, + 901, + 857, + 792, + 751, + 694, + 636, + 540, + 474, + 403, + 351, + 265, + 211, + 155, + 98, + 71, + 40, + 0, + 0, + 1078, + 1078, + 1007, + 924, + 869, + 807, + 865, + 892, + 955, + 1003, + 1057, + 1078, + 1078, + 1030, + 961, + 926 + ], + [ + 1050, + 1076, + 1078, + 1057, + 1032, + 1013, + 998, + 982, + 971, + 951, + 936, + 913, + 888, + 844, + 799, + 763, + 732, + 723, + 713, + 753, + 784, + 817, + 830, + 871, + 911, + 930 + ], + [ + 478, + 461, + 423, + 405, + 394, + 263, + 257, + 265, + 290, + 315, + 334, + 342, + 344, + 411, + 448, + 448, + 448, + 430, + 424, + 423, + 421, + 409, + 444 + ] + ], + "ignore_regions_x": [ + [ + 0, + 1, + 198, + 258, + 307, + 337, + 343, + 335, + 320, + 290, + 273, + 260, + 232, + 190, + 196, + 183, + 171, + 162, + 149, + 132, + 105, + 77, + 69, + 322, + 324, + 303, + 279, + 273, + 247, + 224, + 226, + 215, + 203, + 190, + 192, + 181, + 169, + 167, + 154, + 139, + 99, + 86, + 81, + 56, + 60, + 62, + 60, + 49, + 35, + 15, + 9, + 3, + 0, + 11, + 13, + 0, + 1, + 9, + 90, + 64, + 49, + 33, + 18, + 13, + 15, + 0 + ], + [ + 341, + 347, + 352, + 356, + 371, + 383, + 388, + 392, + 403, + 392, + 398, + 377, + 375 + ], + [ + 688, + 694, + 713, + 724, + 728, + 752, + 764, + 783, + 796, + 796, + 824, + 828, + 839, + 856, + 864, + 864, + 884, + 899, + 903, + 843, + 854, + 854, + 850, + 884, + 901, + 873, + 833, + 815, + 796, + 747, + 716, + 666, + 654, + 660, + 667, + 694, + 711, + 724, + 737, + 775, + 792, + 790, + 756, + 688, + 686, + 686 + ], + [ + 1047, + 1079, + 1079, + 1088, + 1099, + 1103, + 1122, + 1133, + 1141, + 1164, + 1175, + 1164, + 1181, + 1126, + 1092, + 1077, + 1069, + 1047 + ], + [ + 1252, + 1254, + 1258, + 1277, + 1292, + 1301, + 1322, + 1330, + 1350, + 1322, + 1296, + 1277, + 1256, + 1233, + 1213, + 1198, + 1173, + 1130, + 1098, + 1081, + 1101, + 1198, + 1232 + ], + [ + 1165, + 1184, + 1226, + 1246, + 1238, + 1226, + 1209, + 1215, + 1180, + 1126, + 1057, + 1053, + 1128 + ], + [ + 1455, + 1438, + 1438, + 1444, + 1442, + 1423, + 1426, + 1409, + 1399, + 1390, + 1374, + 1349, + 1330, + 1319, + 1307, + 1309, + 1324, + 1330, + 1919, + 1919, + 1884, + 1855, + 1830, + 1828, + 1776, + 1732, + 1734, + 1726, + 1728, + 1707, + 1713, + 1469, + 1461, + 1459, + 1455 + ], + [ + 1463, + 1411, + 1255, + 1250, + 1246, + 1261, + 1284, + 1280, + 1271, + 1265, + 1275, + 1299, + 1299, + 1296, + 1315, + 1340, + 1365, + 1396, + 1444, + 1428, + 1434, + 1432, + 1446, + 1440, + 1453, + 1455 + ], + [ + 1246, + 1271, + 1313, + 1344, + 1384, + 1346, + 1307, + 1286, + 1255, + 1203, + 1153, + 1096, + 1078, + 1061, + 1036, + 1090, + 1121, + 1148, + 1169, + 1205, + 1228, + 1265, + 1267 + ] + ], + "id": 10034180000, + "width": 1920, + "height": 1080, + "mask_file": "mask/val/003418_mpii_test/000000.jpg" + } + ], + "annotations": [ + { + "bbox_head": [ + 346, + 337, + 296, + 237 + ], + "keypoints": [ + 621, + 471, + 1, + 542.2835693, + 599.2855835, + 1, + 457.787323, + 347.6607971, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 371, + 737, + 1, + 753, + 579, + 1, + 387, + 981, + 1, + 1023, + 353, + 1, + 0, + 0, + 0, + 953, + 141, + 1, + 968, + 833, + 1, + 1152, + 843, + 1, + 0, + 0, + 0, + 1416, + 429, + 1, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "track_id": 0, + "image_id": 10034180000, + "bbox": [ + 214.25, + 15.0, + 1358.5, + 1092.0 + ], + "scores": [], + "category_id": 1, + "id": 1003418000000, + "iscrowd": false, + "num_keypoints": 11 + } + ] +} diff --git a/tests/data/posetrack18/annotations/val/009473_mpii_test.json b/tests/data/posetrack18/annotations/val/009473_mpii_test.json index 0c0bb2c5c1..130d02cfd1 100644 --- a/tests/data/posetrack18/annotations/val/009473_mpii_test.json +++ b/tests/data/posetrack18/annotations/val/009473_mpii_test.json @@ -1,374 +1,374 @@ -{ - "categories": [ - { - "supercategory": "person", - "id": 1, - "name": "person", - "keypoints": [ - "nose", - "head_bottom", - "head_top", - "left_ear", - "right_ear", - "left_shoulder", - "right_shoulder", - "left_elbow", - "right_elbow", - "left_wrist", - "right_wrist", - "left_hip", - "right_hip", - "left_knee", - "right_knee", - "left_ankle", - "right_ankle" - ], - "skeleton": [ - [ - 16, - 14 - ], - [ - 14, - 12 - ], - [ - 17, - 15 - ], - [ - 15, - 13 - ], - [ - 12, - 13 - ], - [ - 6, - 12 - ], - [ - 7, - 13 - ], - [ - 6, - 7 - ], - [ - 6, - 8 - ], - [ - 7, - 9 - ], - [ - 8, - 10 - ], - [ - 9, - 11 - ], - [ - 2, - 3 - ], - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 4 - ], - [ - 3, - 5 - ], - [ - 4, - 6 - ], - [ - 5, - 7 - ] - ] - } - ], - "images": [ - { - "has_no_densepose": true, - "is_labeled": true, - "file_name": "images/val/009473_mpii_test/000000.jpg", - "nframes": 101, - "frame_id": 10094730000, - "vid_id": "009473", - "ignore_regions_y": [ - [ - 228, - 237, - 243, - 230, - 220, - 230, - 228, - 232, - 220, - 211, - 226, - 258, - 364, - 417, - 475, - 562, - 615, - 646, - 656, - 637, - 649, - 645, - 603, - 585, - 547 - ], - [ - 0, - 1, - 137, - 130, - 166, - 235, - 309, - 384, - 452, - 526, - 583, - 658, - 694, - 709, - 599, - 684, - 707, - 733, - 660, - 679, - 762 - ] - ], - "ignore_regions_x": [ - [ - 1907, - 1820, - 1758, - 1662, - 1577, - 1492, - 1375, - 1305, - 1250, - 1177, - 1111, - 1033, - 1047, - 1062, - 1056, - 1130, - 1173, - 1232, - 1282, - 1332, - 1416, - 1471, - 1515, - 1541, - 1909 - ], - [ - 0, - 257, - 284, - 407, - 450, - 505, - 562, - 592, - 613, - 626, - 639, - 639, - 594, - 454, - 371, - 343, - 269, - 152, - 88, - 35, - 3 - ] - ], - "id": 10094730000, - "width": 1920, - "height": 1080, - "mask_file": "mask/val/009473_mpii_test/000000.jpg" - } - ], - "annotations": [ - { - "bbox_head": [ - 648, - 253, - 138, - 103 - ], - "keypoints": [ - 750, - 297.5, - 1, - 734.1937256, - 371.1997375, - 1, - 704.1047363, - 254.4751892, - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 697, - 352.5, - 1, - 766, - 369, - 1, - 0, - 0, - 0, - 857, - 419, - 1, - 766, - 281, - 1, - 942, - 422.5, - 1, - 722, - 525, - 1, - 792, - 544, - 1, - 783, - 697, - 1, - 755, - 699, - 1, - 808.5, - 832.5, - 1, - 672, - 782.5, - 1 - ], - "track_id": 0, - "image_id": 10094730000, - "bbox": [ - 631.5, - 167.77146757999992, - 351.0, - 751.4322540400001 - ], - "scores": [], - "category_id": 1, - "id": 1009473000000, - "iscrowd": false, - "num_keypoints": 14 - }, - { - "bbox_head": [ - 878, - 201, - 90, - 125 - ], - "keypoints": [ - 900, - 272, - 1, - 905.657959, - 322.6206665, - 1, - 936.0065308, - 219.1595001, - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 950, - 366, - 1, - 852, - 311, - 1, - 1021, - 428, - 1, - 759, - 303, - 1, - 986, - 422, - 1, - 704, - 374, - 1, - 912, - 516, - 1, - 856, - 524, - 1, - 876, - 663, - 1, - 908, - 680, - 1, - 849, - 828, - 1, - 959, - 804, - 1 - ], - "track_id": 1, - "image_id": 10094730000, - "bbox": [ - 656.45, - 127.83342511500007, - 412.0999999999999, - 791.4926498699999 - ], - "scores": [], - "category_id": 1, - "id": 1009473000001, - "iscrowd": false, - "num_keypoints": 15 - } - ] -} +{ + "categories": [ + { + "supercategory": "person", + "id": 1, + "name": "person", + "keypoints": [ + "nose", + "head_bottom", + "head_top", + "left_ear", + "right_ear", + "left_shoulder", + "right_shoulder", + "left_elbow", + "right_elbow", + "left_wrist", + "right_wrist", + "left_hip", + "right_hip", + "left_knee", + "right_knee", + "left_ankle", + "right_ankle" + ], + "skeleton": [ + [ + 16, + 14 + ], + [ + 14, + 12 + ], + [ + 17, + 15 + ], + [ + 15, + 13 + ], + [ + 12, + 13 + ], + [ + 6, + 12 + ], + [ + 7, + 13 + ], + [ + 6, + 7 + ], + [ + 6, + 8 + ], + [ + 7, + 9 + ], + [ + 8, + 10 + ], + [ + 9, + 11 + ], + [ + 2, + 3 + ], + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 4 + ], + [ + 3, + 5 + ], + [ + 4, + 6 + ], + [ + 5, + 7 + ] + ] + } + ], + "images": [ + { + "has_no_densepose": true, + "is_labeled": true, + "file_name": "images/val/009473_mpii_test/000000.jpg", + "nframes": 101, + "frame_id": 10094730000, + "vid_id": "009473", + "ignore_regions_y": [ + [ + 228, + 237, + 243, + 230, + 220, + 230, + 228, + 232, + 220, + 211, + 226, + 258, + 364, + 417, + 475, + 562, + 615, + 646, + 656, + 637, + 649, + 645, + 603, + 585, + 547 + ], + [ + 0, + 1, + 137, + 130, + 166, + 235, + 309, + 384, + 452, + 526, + 583, + 658, + 694, + 709, + 599, + 684, + 707, + 733, + 660, + 679, + 762 + ] + ], + "ignore_regions_x": [ + [ + 1907, + 1820, + 1758, + 1662, + 1577, + 1492, + 1375, + 1305, + 1250, + 1177, + 1111, + 1033, + 1047, + 1062, + 1056, + 1130, + 1173, + 1232, + 1282, + 1332, + 1416, + 1471, + 1515, + 1541, + 1909 + ], + [ + 0, + 257, + 284, + 407, + 450, + 505, + 562, + 592, + 613, + 626, + 639, + 639, + 594, + 454, + 371, + 343, + 269, + 152, + 88, + 35, + 3 + ] + ], + "id": 10094730000, + "width": 1920, + "height": 1080, + "mask_file": "mask/val/009473_mpii_test/000000.jpg" + } + ], + "annotations": [ + { + "bbox_head": [ + 648, + 253, + 138, + 103 + ], + "keypoints": [ + 750, + 297.5, + 1, + 734.1937256, + 371.1997375, + 1, + 704.1047363, + 254.4751892, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 697, + 352.5, + 1, + 766, + 369, + 1, + 0, + 0, + 0, + 857, + 419, + 1, + 766, + 281, + 1, + 942, + 422.5, + 1, + 722, + 525, + 1, + 792, + 544, + 1, + 783, + 697, + 1, + 755, + 699, + 1, + 808.5, + 832.5, + 1, + 672, + 782.5, + 1 + ], + "track_id": 0, + "image_id": 10094730000, + "bbox": [ + 631.5, + 167.77146757999992, + 351.0, + 751.4322540400001 + ], + "scores": [], + "category_id": 1, + "id": 1009473000000, + "iscrowd": false, + "num_keypoints": 14 + }, + { + "bbox_head": [ + 878, + 201, + 90, + 125 + ], + "keypoints": [ + 900, + 272, + 1, + 905.657959, + 322.6206665, + 1, + 936.0065308, + 219.1595001, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 950, + 366, + 1, + 852, + 311, + 1, + 1021, + 428, + 1, + 759, + 303, + 1, + 986, + 422, + 1, + 704, + 374, + 1, + 912, + 516, + 1, + 856, + 524, + 1, + 876, + 663, + 1, + 908, + 680, + 1, + 849, + 828, + 1, + 959, + 804, + 1 + ], + "track_id": 1, + "image_id": 10094730000, + "bbox": [ + 656.45, + 127.83342511500007, + 412.0999999999999, + 791.4926498699999 + ], + "scores": [], + "category_id": 1, + "id": 1009473000001, + "iscrowd": false, + "num_keypoints": 15 + } + ] +} diff --git a/tests/data/posetrack18/annotations/val/012834_mpii_test.json b/tests/data/posetrack18/annotations/val/012834_mpii_test.json index c97cd0b91b..5cd8fc9c4a 100644 --- a/tests/data/posetrack18/annotations/val/012834_mpii_test.json +++ b/tests/data/posetrack18/annotations/val/012834_mpii_test.json @@ -1,1466 +1,1466 @@ -{ - "categories": [ - { - "supercategory": "person", - "id": 1, - "name": "person", - "keypoints": [ - "nose", - "head_bottom", - "head_top", - "left_ear", - "right_ear", - "left_shoulder", - "right_shoulder", - "left_elbow", - "right_elbow", - "left_wrist", - "right_wrist", - "left_hip", - "right_hip", - "left_knee", - "right_knee", - "left_ankle", - "right_ankle" - ], - "skeleton": [ - [ - 16, - 14 - ], - [ - 14, - 12 - ], - [ - 17, - 15 - ], - [ - 15, - 13 - ], - [ - 12, - 13 - ], - [ - 6, - 12 - ], - [ - 7, - 13 - ], - [ - 6, - 7 - ], - [ - 6, - 8 - ], - [ - 7, - 9 - ], - [ - 8, - 10 - ], - [ - 9, - 11 - ], - [ - 2, - 3 - ], - [ - 1, - 2 - ], - [ - 1, - 3 - ], - [ - 2, - 4 - ], - [ - 3, - 5 - ], - [ - 4, - 6 - ], - [ - 5, - 7 - ] - ] - } - ], - "images": [ - { - "has_no_densepose": true, - "is_labeled": true, - "file_name": "images/val/012834_mpii_test/000000.jpg", - "nframes": 140, - "frame_id": 10128340000, - "vid_id": "012834", - "ignore_regions_y": [ - [ - 1079, - 615, - 612, - 674, - 660, - 664, - 678, - 713, - 704, - 667, - 665, - 678, - 700, - 729, - 753, - 740, - 695, - 668, - 646, - 623, - 624, - 659, - 676, - 685, - 695, - 678, - 675, - 673, - 663, - 693, - 703, - 732, - 719, - 690, - 669, - 660, - 660, - 663, - 689, - 697, - 691, - 697, - 713, - 736, - 721, - 703, - 698, - 708, - 741, - 758, - 778, - 795, - 780, - 757, - 745, - 737, - 745, - 782, - 809, - 850, - 881, - 835, - 810, - 806, - 782, - 782, - 806, - 832, - 863, - 897, - 940, - 931, - 957, - 976, - 1003, - 1043, - 1045, - 1008, - 965, - 963, - 931, - 895, - 846, - 832, - 829, - 869, - 898, - 904, - 869, - 836, - 782, - 752, - 736, - 748, - 776, - 832, - 874, - 862, - 818, - 793, - 790, - 756, - 738, - 737, - 750, - 791, - 806, - 820, - 852, - 879, - 837, - 794, - 772, - 766, - 769, - 790, - 805, - 829, - 844, - 866, - 837, - 804, - 791, - 773, - 745, - 706, - 683, - 644, - 638, - 662, - 694, - 716, - 736, - 777, - 784, - 815, - 830, - 813, - 800, - 813, - 820, - 847, - 829, - 781, - 780, - 801, - 836, - 886, - 938, - 1018, - 1029, - 1079 - ], - [ - 0, - 21, - 43, - 60, - 90, - 95, - 95, - 43, - 40, - 84, - 104, - 104, - 74, - 6, - 4, - 71, - 69, - 0 - ], - [ - 0, - 4, - 48, - 106, - 214, - 207, - 46, - 50, - 170, - 156, - 96, - 157, - 160, - 62, - 65, - 156, - 165, - 162, - 140, - 93, - 93, - 7, - 4, - 121, - 129, - 84, - 75, - 68 - ], - [ - 0, - 0, - 739, - 729, - 720, - 740, - 768, - 785, - 803, - 815, - 757, - 735, - 632, - 620, - 632, - 640, - 662, - 656, - 607, - 645, - 645, - 628, - 604, - 570, - 543, - 512, - 485, - 467, - 451, - 448, - 456, - 482, - 512, - 548, - 554, - 542, - 498, - 479, - 454, - 404, - 387, - 398, - 415, - 528, - 546, - 468, - 410, - 400, - 359, - 375, - 373, - 273, - 254, - 284, - 253, - 204, - 206 - ] - ], - "ignore_regions_x": [ - [ - 3, - 0, - 30, - 44, - 74, - 99, - 106, - 102, - 115, - 121, - 141, - 156, - 165, - 187, - 200, - 211, - 196, - 198, - 210, - 226, - 252, - 266, - 263, - 271, - 291, - 299, - 326, - 339, - 360, - 399, - 412, - 424, - 437, - 432, - 439, - 461, - 489, - 510, - 534, - 548, - 559, - 567, - 587, - 593, - 604, - 612, - 633, - 652, - 645, - 638, - 649, - 650, - 661, - 654, - 662, - 685, - 713, - 727, - 733, - 752, - 762, - 769, - 785, - 812, - 841, - 863, - 869, - 877, - 899, - 909, - 918, - 906, - 902, - 909, - 917, - 900, - 932, - 932, - 941, - 919, - 926, - 935, - 950, - 957, - 983, - 1002, - 1007, - 1032, - 1034, - 1018, - 1018, - 1038, - 1074, - 1106, - 1119, - 1121, - 1130, - 1148, - 1152, - 1172, - 1195, - 1199, - 1209, - 1229, - 1242, - 1240, - 1242, - 1261, - 1264, - 1277, - 1285, - 1286, - 1296, - 1313, - 1336, - 1350, - 1367, - 1403, - 1417, - 1435, - 1459, - 1456, - 1429, - 1420, - 1465, - 1492, - 1496, - 1507, - 1529, - 1553, - 1570, - 1596, - 1609, - 1610, - 1649, - 1671, - 1703, - 1740, - 1763, - 1775, - 1803, - 1809, - 1815, - 1815, - 1857, - 1874, - 1881, - 1897, - 1896, - 1899, - 1888, - 1884 - ], - [ - 378, - 381, - 365, - 359, - 334, - 292, - 257, - 262, - 231, - 236, - 219, - 193, - 196, - 183, - 154, - 159, - 140, - 121 - ], - [ - 451, - 1173, - 1168, - 1168, - 1170, - 1085, - 1098, - 1070, - 1043, - 1000, - 993, - 979, - 934, - 937, - 918, - 903, - 893, - 832, - 785, - 759, - 726, - 710, - 667, - 664, - 585, - 576, - 507, - 485 - ], - [ - 1312, - 1918, - 1917, - 1895, - 1867, - 1835, - 1804, - 1779, - 1754, - 1720, - 1726, - 1739, - 1740, - 1735, - 1701, - 1635, - 1587, - 1578, - 1587, - 1564, - 1550, - 1543, - 1562, - 1579, - 1578, - 1581, - 1584, - 1589, - 1601, - 1610, - 1621, - 1637, - 1642, - 1659, - 1673, - 1681, - 1673, - 1671, - 1664, - 1671, - 1681, - 1728, - 1734, - 1789, - 1854, - 1807, - 1820, - 1778, - 1778, - 1717, - 1642, - 1635, - 1600, - 1520, - 1454, - 1415, - 1395 - ] - ], - "id": 10128340000, - "width": 1920, - "height": 1080, - "mask_file": "mask/val/012834_mpii_test/000000.jpg" - } - ], - "annotations": [ - { - "bbox_head": [ - 378, - 503, - 44, - 53 - ], - "keypoints": [ - 401, - 530, - 1, - 409.5254211, - 555.3547363, - 1, - 392.8559265, - 510.1089478, - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 377, - 560, - 1, - 444, - 556, - 1, - 353, - 605, - 1, - 469.5, - 603.5, - 1, - 341.5, - 653.5, - 1, - 463, - 635, - 1, - 389, - 652, - 1, - 442, - 646, - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0 - ], - "track_id": 0, - "image_id": 10128340000, - "bbox": [ - 322.3, - 488.60028996999995, - 166.39999999999998, - 186.40836786 - ], - "scores": [], - "category_id": 1, - "id": 1012834000000, - "iscrowd": false, - "num_keypoints": 11 - }, - { - "bbox_head": [ - 571, - 446, - 42, - 46 - ], - "keypoints": [ - 600.5, - 475.5, - 1, - 590.4649048, - 493.8685303, - 1, - 593.1513062, - 450.3486023, - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 570.5, - 509.5, - 1, - 608.5, - 509.5, - 1, - 539, - 558.5, - 1, - 634, - 539, - 1, - 558.5, - 584.5, - 1, - 624.5, - 528.5, - 1, - 605, - 595, - 1, - 601, - 593, - 1, - 640, - 634.5, - 1, - 598, - 672, - 1, - 616.5, - 700.5, - 1, - 0, - 0, - 0 - ], - "track_id": 1, - "image_id": 10128340000, - "bbox": [ - 523.85, - 412.825892645, - 131.29999999999995, - 325.19681700999996 - ], - "scores": [], - "category_id": 1, - "id": 1012834000001, - "iscrowd": false, - "num_keypoints": 14 - }, - { - "bbox_head": [ - 159, - 259, - 42, - 47 - ], - "keypoints": [ - 201, - 284.5, - 1, - 169.9334106, - 305.6158752, - 1, - 187.549942, - 265.1630859, - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 139.5, - 307.5, - 1, - 193.5, - 319.5, - 1, - 0, - 0, - 0, - 209, - 371, - 1, - 144, - 365.5, - 1, - 231, - 392, - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 222, - 337, - 1, - 241, - 341.5, - 1, - 0, - 0, - 0, - 267, - 416, - 1 - ], - "track_id": 2, - "image_id": 10128340000, - "bbox": [ - 120.375, - 242.53754878499996, - 165.75, - 196.08798833000003 - ], - "scores": [], - "category_id": 1, - "id": 1012834000002, - "iscrowd": false, - "num_keypoints": 11 - }, - { - "bbox_head": [ - 372, - 205, - 44, - 44 - ], - "keypoints": [ - 410.5, - 230.5, - 1, - 387.8875732, - 251.1279602, - 1, - 398.5843201, - 208.9040375, - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 359.5, - 262.5, - 1, - 409.5, - 266.5, - 1, - 337.5, - 308.5, - 1, - 450, - 306, - 1, - 292, - 314, - 1, - 480, - 311.5, - 1, - 390, - 339, - 1, - 409, - 339, - 1, - 405.5, - 418.5, - 1, - 447.5, - 366.5, - 1, - 391.5, - 464.5, - 1, - 437.5, - 440.5, - 1 - ], - "track_id": 3, - "image_id": 10128340000, - "bbox": [ - 263.8, - 170.56464312499998, - 244.39999999999998, - 332.27475125 - ], - "scores": [], - "category_id": 1, - "id": 1012834000003, - "iscrowd": false, - "num_keypoints": 15 - }, - { - "bbox_head": [ - 693, - 410, - 44, - 49 - ], - "keypoints": [ - 718.5, - 440.5, - 1, - 717.704834, - 460.703125, - 1, - 712.9713745, - 414.8476562, - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 694.5, - 474, - 1, - 743.5, - 472.5, - 1, - 681.5, - 530.5, - 1, - 757.5, - 523.5, - 1, - 667.5, - 564.5, - 1, - 0, - 0, - 0, - 705.5, - 563.5, - 1, - 737.5, - 560.5, - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 692.5, - 607.5, - 1, - 716.5, - 603.5, - 1 - ], - "track_id": 4, - "image_id": 10128340000, - "bbox": [ - 654.0, - 385.94980463, - 117.0, - 250.44804694000004 - ], - "scores": [], - "category_id": 1, - "id": 1012834000004, - "iscrowd": false, - "num_keypoints": 12 - }, - { - "bbox_head": [ - 923, - 347, - 46, - 58 - ], - "keypoints": [ - 965.5, - 382.5, - 1, - 933.9436646, - 403.0452576, - 1, - 955.0422363, - 355.7160645, - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 922.5, - 403.5, - 1, - 932.5, - 431.5, - 1, - 0, - 0, - 0, - 960, - 475.5, - 1, - 0, - 0, - 0, - 991.5, - 462.5, - 1, - 934.5, - 512.5, - 1, - 922.5, - 506.5, - 1, - 946.5, - 567.5, - 1, - 964, - 578, - 1, - 900.5, - 598, - 1, - 936, - 634.5, - 1 - ], - "track_id": 5, - "image_id": 10128340000, - "bbox": [ - 886.85, - 313.89847417500005, - 118.29999999999995, - 362.4191161499999 - ], - "scores": [], - "category_id": 1, - "id": 1012834000005, - "iscrowd": false, - "num_keypoints": 13 - }, - { - "bbox_head": [ - 691, - 179, - 43, - 52 - ], - "keypoints": [ - 708.5, - 212.5, - 1, - 722.6444702, - 230.0113831, - 1, - 704.8916626, - 186.2414551, - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 712, - 244, - 1, - 742, - 237.5, - 1, - 723, - 293.5, - 1, - 745.5, - 281.5, - 1, - 692, - 319, - 1, - 0, - 0, - 0, - 722, - 323.5, - 1, - 748.5, - 314, - 1, - 657.5, - 301.5, - 1, - 668.5, - 299.5, - 1, - 670.5, - 367.5, - 1, - 689.5, - 362.5, - 1 - ], - "track_id": 6, - "image_id": 10128340000, - "bbox": [ - 643.85, - 159.05267336499998, - 118.29999999999995, - 235.63610837 - ], - "scores": [], - "category_id": 1, - "id": 1012834000006, - "iscrowd": false, - "num_keypoints": 14 - }, - { - "bbox_head": [ - 927, - 160, - 39, - 52 - ], - "keypoints": [ - 952, - 189, - 1, - 946.763916, - 211.9986572, - 1, - 946.302063, - 166.5010071, - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 914.5, - 234, - 1, - 979.5, - 236.5, - 1, - 890.5, - 270.5, - 1, - 998.5, - 286.5, - 1, - 894.5, - 324, - 1, - 0, - 0, - 0, - 932, - 326.5, - 1, - 958.5, - 327.5, - 1, - 1000.5, - 340.5, - 1, - 993.5, - 372.5, - 1, - 955.5, - 383.5, - 1, - 959.5, - 446.5, - 1 - ], - "track_id": 7, - "image_id": 10128340000, - "bbox": [ - 874.0, - 124.50115816500005, - 143.0, - 363.99869076999994 - ], - "scores": [], - "category_id": 1, - "id": 1012834000007, - "iscrowd": false, - "num_keypoints": 14 - }, - { - "bbox_head": [ - 1367, - 427, - 47, - 45 - ], - "keypoints": [ - 1406, - 451, - 1, - 1379.198608, - 472.946106, - 1, - 1398.976074, - 431.9154358, - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 1375.5, - 467.5, - 1, - 1372, - 501, - 1, - 0, - 0, - 0, - 1343.5, - 534.5, - 1, - 0, - 0, - 0, - 1339.5, - 573.5, - 1, - 1381.5, - 531.5, - 1, - 1376, - 539.5, - 1, - 1452.5, - 524.5, - 1, - 1453.5, - 535.5, - 1, - 1469.5, - 603.5, - 1, - 1466, - 610, - 1 - ], - "track_id": 8, - "image_id": 10128340000, - "bbox": [ - 1320.0, - 405.20275117000006, - 169.0, - 231.50993345999996 - ], - "scores": [], - "category_id": 1, - "id": 1012834000008, - "iscrowd": false, - "num_keypoints": 13 - }, - { - "bbox_head": [ - 1378, - 204, - 40, - 44 - ], - "keypoints": [ - 1389, - 234, - 1, - 1404.137573, - 248.9802094, - 1, - 1393.396851, - 208.7648468, - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 1375, - 272, - 1, - 1442.5, - 260.5, - 1, - 1374, - 315, - 1, - 1468, - 303.5, - 1, - 1367, - 340.5, - 1, - 1462.5, - 330.5, - 1, - 1407, - 349.5, - 1, - 1439, - 340.5, - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0 - ], - "track_id": 9, - "image_id": 10128340000, - "bbox": [ - 1351.85, - 187.65457382, - 131.30000000000018, - 182.95569916 - ], - "scores": [], - "category_id": 1, - "id": 1012834000009, - "iscrowd": false, - "num_keypoints": 11 - }, - { - "bbox_head": [ - 407, - -29, - 35, - 40 - ], - "keypoints": [ - 0, - 0, - 0, - 425.1159668, - 12.25136662, - 1, - 424.0380249, - -24.93852425, - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 455.5, - 21.5, - 1, - 395.5, - 29.5, - 1, - 474.5, - 64.5, - 1, - 391.5, - 67, - 1, - 474, - 108, - 1, - 379, - 107, - 1, - 446, - 88, - 1, - 426, - 88, - 1, - 424, - 113, - 1, - 403, - 113, - 1, - 430, - 173, - 1, - 415, - 171, - 1 - ], - "track_id": 10, - "image_id": 10128340000, - "bbox": [ - 364.675, - -54.62930288750002, - 124.14999999999998, - 257.32008152500003 - ], - "scores": [], - "category_id": 1, - "id": 1012834000010, - "iscrowd": false, - "num_keypoints": 14 - } - ] -} +{ + "categories": [ + { + "supercategory": "person", + "id": 1, + "name": "person", + "keypoints": [ + "nose", + "head_bottom", + "head_top", + "left_ear", + "right_ear", + "left_shoulder", + "right_shoulder", + "left_elbow", + "right_elbow", + "left_wrist", + "right_wrist", + "left_hip", + "right_hip", + "left_knee", + "right_knee", + "left_ankle", + "right_ankle" + ], + "skeleton": [ + [ + 16, + 14 + ], + [ + 14, + 12 + ], + [ + 17, + 15 + ], + [ + 15, + 13 + ], + [ + 12, + 13 + ], + [ + 6, + 12 + ], + [ + 7, + 13 + ], + [ + 6, + 7 + ], + [ + 6, + 8 + ], + [ + 7, + 9 + ], + [ + 8, + 10 + ], + [ + 9, + 11 + ], + [ + 2, + 3 + ], + [ + 1, + 2 + ], + [ + 1, + 3 + ], + [ + 2, + 4 + ], + [ + 3, + 5 + ], + [ + 4, + 6 + ], + [ + 5, + 7 + ] + ] + } + ], + "images": [ + { + "has_no_densepose": true, + "is_labeled": true, + "file_name": "images/val/012834_mpii_test/000000.jpg", + "nframes": 140, + "frame_id": 10128340000, + "vid_id": "012834", + "ignore_regions_y": [ + [ + 1079, + 615, + 612, + 674, + 660, + 664, + 678, + 713, + 704, + 667, + 665, + 678, + 700, + 729, + 753, + 740, + 695, + 668, + 646, + 623, + 624, + 659, + 676, + 685, + 695, + 678, + 675, + 673, + 663, + 693, + 703, + 732, + 719, + 690, + 669, + 660, + 660, + 663, + 689, + 697, + 691, + 697, + 713, + 736, + 721, + 703, + 698, + 708, + 741, + 758, + 778, + 795, + 780, + 757, + 745, + 737, + 745, + 782, + 809, + 850, + 881, + 835, + 810, + 806, + 782, + 782, + 806, + 832, + 863, + 897, + 940, + 931, + 957, + 976, + 1003, + 1043, + 1045, + 1008, + 965, + 963, + 931, + 895, + 846, + 832, + 829, + 869, + 898, + 904, + 869, + 836, + 782, + 752, + 736, + 748, + 776, + 832, + 874, + 862, + 818, + 793, + 790, + 756, + 738, + 737, + 750, + 791, + 806, + 820, + 852, + 879, + 837, + 794, + 772, + 766, + 769, + 790, + 805, + 829, + 844, + 866, + 837, + 804, + 791, + 773, + 745, + 706, + 683, + 644, + 638, + 662, + 694, + 716, + 736, + 777, + 784, + 815, + 830, + 813, + 800, + 813, + 820, + 847, + 829, + 781, + 780, + 801, + 836, + 886, + 938, + 1018, + 1029, + 1079 + ], + [ + 0, + 21, + 43, + 60, + 90, + 95, + 95, + 43, + 40, + 84, + 104, + 104, + 74, + 6, + 4, + 71, + 69, + 0 + ], + [ + 0, + 4, + 48, + 106, + 214, + 207, + 46, + 50, + 170, + 156, + 96, + 157, + 160, + 62, + 65, + 156, + 165, + 162, + 140, + 93, + 93, + 7, + 4, + 121, + 129, + 84, + 75, + 68 + ], + [ + 0, + 0, + 739, + 729, + 720, + 740, + 768, + 785, + 803, + 815, + 757, + 735, + 632, + 620, + 632, + 640, + 662, + 656, + 607, + 645, + 645, + 628, + 604, + 570, + 543, + 512, + 485, + 467, + 451, + 448, + 456, + 482, + 512, + 548, + 554, + 542, + 498, + 479, + 454, + 404, + 387, + 398, + 415, + 528, + 546, + 468, + 410, + 400, + 359, + 375, + 373, + 273, + 254, + 284, + 253, + 204, + 206 + ] + ], + "ignore_regions_x": [ + [ + 3, + 0, + 30, + 44, + 74, + 99, + 106, + 102, + 115, + 121, + 141, + 156, + 165, + 187, + 200, + 211, + 196, + 198, + 210, + 226, + 252, + 266, + 263, + 271, + 291, + 299, + 326, + 339, + 360, + 399, + 412, + 424, + 437, + 432, + 439, + 461, + 489, + 510, + 534, + 548, + 559, + 567, + 587, + 593, + 604, + 612, + 633, + 652, + 645, + 638, + 649, + 650, + 661, + 654, + 662, + 685, + 713, + 727, + 733, + 752, + 762, + 769, + 785, + 812, + 841, + 863, + 869, + 877, + 899, + 909, + 918, + 906, + 902, + 909, + 917, + 900, + 932, + 932, + 941, + 919, + 926, + 935, + 950, + 957, + 983, + 1002, + 1007, + 1032, + 1034, + 1018, + 1018, + 1038, + 1074, + 1106, + 1119, + 1121, + 1130, + 1148, + 1152, + 1172, + 1195, + 1199, + 1209, + 1229, + 1242, + 1240, + 1242, + 1261, + 1264, + 1277, + 1285, + 1286, + 1296, + 1313, + 1336, + 1350, + 1367, + 1403, + 1417, + 1435, + 1459, + 1456, + 1429, + 1420, + 1465, + 1492, + 1496, + 1507, + 1529, + 1553, + 1570, + 1596, + 1609, + 1610, + 1649, + 1671, + 1703, + 1740, + 1763, + 1775, + 1803, + 1809, + 1815, + 1815, + 1857, + 1874, + 1881, + 1897, + 1896, + 1899, + 1888, + 1884 + ], + [ + 378, + 381, + 365, + 359, + 334, + 292, + 257, + 262, + 231, + 236, + 219, + 193, + 196, + 183, + 154, + 159, + 140, + 121 + ], + [ + 451, + 1173, + 1168, + 1168, + 1170, + 1085, + 1098, + 1070, + 1043, + 1000, + 993, + 979, + 934, + 937, + 918, + 903, + 893, + 832, + 785, + 759, + 726, + 710, + 667, + 664, + 585, + 576, + 507, + 485 + ], + [ + 1312, + 1918, + 1917, + 1895, + 1867, + 1835, + 1804, + 1779, + 1754, + 1720, + 1726, + 1739, + 1740, + 1735, + 1701, + 1635, + 1587, + 1578, + 1587, + 1564, + 1550, + 1543, + 1562, + 1579, + 1578, + 1581, + 1584, + 1589, + 1601, + 1610, + 1621, + 1637, + 1642, + 1659, + 1673, + 1681, + 1673, + 1671, + 1664, + 1671, + 1681, + 1728, + 1734, + 1789, + 1854, + 1807, + 1820, + 1778, + 1778, + 1717, + 1642, + 1635, + 1600, + 1520, + 1454, + 1415, + 1395 + ] + ], + "id": 10128340000, + "width": 1920, + "height": 1080, + "mask_file": "mask/val/012834_mpii_test/000000.jpg" + } + ], + "annotations": [ + { + "bbox_head": [ + 378, + 503, + 44, + 53 + ], + "keypoints": [ + 401, + 530, + 1, + 409.5254211, + 555.3547363, + 1, + 392.8559265, + 510.1089478, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 377, + 560, + 1, + 444, + 556, + 1, + 353, + 605, + 1, + 469.5, + 603.5, + 1, + 341.5, + 653.5, + 1, + 463, + 635, + 1, + 389, + 652, + 1, + 442, + 646, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "track_id": 0, + "image_id": 10128340000, + "bbox": [ + 322.3, + 488.60028996999995, + 166.39999999999998, + 186.40836786 + ], + "scores": [], + "category_id": 1, + "id": 1012834000000, + "iscrowd": false, + "num_keypoints": 11 + }, + { + "bbox_head": [ + 571, + 446, + 42, + 46 + ], + "keypoints": [ + 600.5, + 475.5, + 1, + 590.4649048, + 493.8685303, + 1, + 593.1513062, + 450.3486023, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 570.5, + 509.5, + 1, + 608.5, + 509.5, + 1, + 539, + 558.5, + 1, + 634, + 539, + 1, + 558.5, + 584.5, + 1, + 624.5, + 528.5, + 1, + 605, + 595, + 1, + 601, + 593, + 1, + 640, + 634.5, + 1, + 598, + 672, + 1, + 616.5, + 700.5, + 1, + 0, + 0, + 0 + ], + "track_id": 1, + "image_id": 10128340000, + "bbox": [ + 523.85, + 412.825892645, + 131.29999999999995, + 325.19681700999996 + ], + "scores": [], + "category_id": 1, + "id": 1012834000001, + "iscrowd": false, + "num_keypoints": 14 + }, + { + "bbox_head": [ + 159, + 259, + 42, + 47 + ], + "keypoints": [ + 201, + 284.5, + 1, + 169.9334106, + 305.6158752, + 1, + 187.549942, + 265.1630859, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 139.5, + 307.5, + 1, + 193.5, + 319.5, + 1, + 0, + 0, + 0, + 209, + 371, + 1, + 144, + 365.5, + 1, + 231, + 392, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 222, + 337, + 1, + 241, + 341.5, + 1, + 0, + 0, + 0, + 267, + 416, + 1 + ], + "track_id": 2, + "image_id": 10128340000, + "bbox": [ + 120.375, + 242.53754878499996, + 165.75, + 196.08798833000003 + ], + "scores": [], + "category_id": 1, + "id": 1012834000002, + "iscrowd": false, + "num_keypoints": 11 + }, + { + "bbox_head": [ + 372, + 205, + 44, + 44 + ], + "keypoints": [ + 410.5, + 230.5, + 1, + 387.8875732, + 251.1279602, + 1, + 398.5843201, + 208.9040375, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 359.5, + 262.5, + 1, + 409.5, + 266.5, + 1, + 337.5, + 308.5, + 1, + 450, + 306, + 1, + 292, + 314, + 1, + 480, + 311.5, + 1, + 390, + 339, + 1, + 409, + 339, + 1, + 405.5, + 418.5, + 1, + 447.5, + 366.5, + 1, + 391.5, + 464.5, + 1, + 437.5, + 440.5, + 1 + ], + "track_id": 3, + "image_id": 10128340000, + "bbox": [ + 263.8, + 170.56464312499998, + 244.39999999999998, + 332.27475125 + ], + "scores": [], + "category_id": 1, + "id": 1012834000003, + "iscrowd": false, + "num_keypoints": 15 + }, + { + "bbox_head": [ + 693, + 410, + 44, + 49 + ], + "keypoints": [ + 718.5, + 440.5, + 1, + 717.704834, + 460.703125, + 1, + 712.9713745, + 414.8476562, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 694.5, + 474, + 1, + 743.5, + 472.5, + 1, + 681.5, + 530.5, + 1, + 757.5, + 523.5, + 1, + 667.5, + 564.5, + 1, + 0, + 0, + 0, + 705.5, + 563.5, + 1, + 737.5, + 560.5, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 692.5, + 607.5, + 1, + 716.5, + 603.5, + 1 + ], + "track_id": 4, + "image_id": 10128340000, + "bbox": [ + 654.0, + 385.94980463, + 117.0, + 250.44804694000004 + ], + "scores": [], + "category_id": 1, + "id": 1012834000004, + "iscrowd": false, + "num_keypoints": 12 + }, + { + "bbox_head": [ + 923, + 347, + 46, + 58 + ], + "keypoints": [ + 965.5, + 382.5, + 1, + 933.9436646, + 403.0452576, + 1, + 955.0422363, + 355.7160645, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 922.5, + 403.5, + 1, + 932.5, + 431.5, + 1, + 0, + 0, + 0, + 960, + 475.5, + 1, + 0, + 0, + 0, + 991.5, + 462.5, + 1, + 934.5, + 512.5, + 1, + 922.5, + 506.5, + 1, + 946.5, + 567.5, + 1, + 964, + 578, + 1, + 900.5, + 598, + 1, + 936, + 634.5, + 1 + ], + "track_id": 5, + "image_id": 10128340000, + "bbox": [ + 886.85, + 313.89847417500005, + 118.29999999999995, + 362.4191161499999 + ], + "scores": [], + "category_id": 1, + "id": 1012834000005, + "iscrowd": false, + "num_keypoints": 13 + }, + { + "bbox_head": [ + 691, + 179, + 43, + 52 + ], + "keypoints": [ + 708.5, + 212.5, + 1, + 722.6444702, + 230.0113831, + 1, + 704.8916626, + 186.2414551, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 712, + 244, + 1, + 742, + 237.5, + 1, + 723, + 293.5, + 1, + 745.5, + 281.5, + 1, + 692, + 319, + 1, + 0, + 0, + 0, + 722, + 323.5, + 1, + 748.5, + 314, + 1, + 657.5, + 301.5, + 1, + 668.5, + 299.5, + 1, + 670.5, + 367.5, + 1, + 689.5, + 362.5, + 1 + ], + "track_id": 6, + "image_id": 10128340000, + "bbox": [ + 643.85, + 159.05267336499998, + 118.29999999999995, + 235.63610837 + ], + "scores": [], + "category_id": 1, + "id": 1012834000006, + "iscrowd": false, + "num_keypoints": 14 + }, + { + "bbox_head": [ + 927, + 160, + 39, + 52 + ], + "keypoints": [ + 952, + 189, + 1, + 946.763916, + 211.9986572, + 1, + 946.302063, + 166.5010071, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 914.5, + 234, + 1, + 979.5, + 236.5, + 1, + 890.5, + 270.5, + 1, + 998.5, + 286.5, + 1, + 894.5, + 324, + 1, + 0, + 0, + 0, + 932, + 326.5, + 1, + 958.5, + 327.5, + 1, + 1000.5, + 340.5, + 1, + 993.5, + 372.5, + 1, + 955.5, + 383.5, + 1, + 959.5, + 446.5, + 1 + ], + "track_id": 7, + "image_id": 10128340000, + "bbox": [ + 874.0, + 124.50115816500005, + 143.0, + 363.99869076999994 + ], + "scores": [], + "category_id": 1, + "id": 1012834000007, + "iscrowd": false, + "num_keypoints": 14 + }, + { + "bbox_head": [ + 1367, + 427, + 47, + 45 + ], + "keypoints": [ + 1406, + 451, + 1, + 1379.198608, + 472.946106, + 1, + 1398.976074, + 431.9154358, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 1375.5, + 467.5, + 1, + 1372, + 501, + 1, + 0, + 0, + 0, + 1343.5, + 534.5, + 1, + 0, + 0, + 0, + 1339.5, + 573.5, + 1, + 1381.5, + 531.5, + 1, + 1376, + 539.5, + 1, + 1452.5, + 524.5, + 1, + 1453.5, + 535.5, + 1, + 1469.5, + 603.5, + 1, + 1466, + 610, + 1 + ], + "track_id": 8, + "image_id": 10128340000, + "bbox": [ + 1320.0, + 405.20275117000006, + 169.0, + 231.50993345999996 + ], + "scores": [], + "category_id": 1, + "id": 1012834000008, + "iscrowd": false, + "num_keypoints": 13 + }, + { + "bbox_head": [ + 1378, + 204, + 40, + 44 + ], + "keypoints": [ + 1389, + 234, + 1, + 1404.137573, + 248.9802094, + 1, + 1393.396851, + 208.7648468, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 1375, + 272, + 1, + 1442.5, + 260.5, + 1, + 1374, + 315, + 1, + 1468, + 303.5, + 1, + 1367, + 340.5, + 1, + 1462.5, + 330.5, + 1, + 1407, + 349.5, + 1, + 1439, + 340.5, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "track_id": 9, + "image_id": 10128340000, + "bbox": [ + 1351.85, + 187.65457382, + 131.30000000000018, + 182.95569916 + ], + "scores": [], + "category_id": 1, + "id": 1012834000009, + "iscrowd": false, + "num_keypoints": 11 + }, + { + "bbox_head": [ + 407, + -29, + 35, + 40 + ], + "keypoints": [ + 0, + 0, + 0, + 425.1159668, + 12.25136662, + 1, + 424.0380249, + -24.93852425, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 455.5, + 21.5, + 1, + 395.5, + 29.5, + 1, + 474.5, + 64.5, + 1, + 391.5, + 67, + 1, + 474, + 108, + 1, + 379, + 107, + 1, + 446, + 88, + 1, + 426, + 88, + 1, + 424, + 113, + 1, + 403, + 113, + 1, + 430, + 173, + 1, + 415, + 171, + 1 + ], + "track_id": 10, + "image_id": 10128340000, + "bbox": [ + 364.675, + -54.62930288750002, + 124.14999999999998, + 257.32008152500003 + ], + "scores": [], + "category_id": 1, + "id": 1012834000010, + "iscrowd": false, + "num_keypoints": 14 + } + ] +} diff --git a/tests/data/rhd/test_rhd.json b/tests/data/rhd/test_rhd.json index d469c88192..de693b0ffb 100644 --- a/tests/data/rhd/test_rhd.json +++ b/tests/data/rhd/test_rhd.json @@ -1,857 +1,857 @@ -{ - "info": { - "description": "RHD", - "version": "1.1", - "year": "2021", - "date_created": "2021/03/13" - }, - "licenses": "", - "images": [ - { - "file_name": "00111.png", - "height": 320, - "width": 320, - "id": 111, - "cam_param": { - "focal": [ - 299.0, - 299.0 - ], - "princpt": [ - 160.0, - 160.0 - ] - } - }, - { - "file_name": "01111.png", - "height": 320, - "width": 320, - "id": 1111, - "cam_param": { - "focal": [ - 305.8999938964844, - 305.8999938964844 - ], - "princpt": [ - 160.0, - 160.0 - ] - } - }, - { - "file_name": "11111.png", - "height": 320, - "width": 320, - "id": 11111, - "cam_param": { - "focal": [ - 263.20001220703125, - 263.20001220703125 - ], - "princpt": [ - 160.0, - 160.0 - ] - } - } - ], - "annotations": [ - { - "id": 111, - "image_id": 111, - "category_id": 1, - "iscrowd": 0, - "bbox": [ - 162.5699920654297, - 63.858001708984375, - 93.96000671386719, - 121.4639892578125 - ], - "area": 11412.757246157154, - "keypoints": [ - [ - 245.8000030517578, - 73.9800033569336, - 1 - ], - [ - 170.39999389648438, - 115.0, - 1 - ], - [ - 180.39999389648438, - 103.69999694824219, - 1 - ], - [ - 194.10000610351562, - 89.16999816894531, - 1 - ], - [ - 220.0, - 82.51000213623047, - 1 - ], - [ - 193.0, - 163.60000610351562, - 1 - ], - [ - 194.5, - 154.1999969482422, - 1 - ], - [ - 196.6999969482422, - 144.39999389648438, - 1 - ], - [ - 202.39999389648438, - 123.69999694824219, - 1 - ], - [ - 208.39999389648438, - 175.1999969482422, - 1 - ], - [ - 210.6999969482422, - 164.3000030517578, - 1 - ], - [ - 211.60000610351562, - 152.10000610351562, - 1 - ], - [ - 215.60000610351562, - 131.5, - 1 - ], - [ - 228.10000610351562, - 167.1999969482422, - 1 - ], - [ - 228.6999969482422, - 159.0, - 1 - ], - [ - 228.1999969482422, - 151.10000610351562, - 1 - ], - [ - 228.6999969482422, - 132.1999969482422, - 1 - ], - [ - 248.6999969482422, - 143.6999969482422, - 1 - ], - [ - 247.6999969482422, - 148.3000030517578, - 1 - ], - [ - 246.39999389648438, - 148.10000610351562, - 1 - ], - [ - 244.39999389648438, - 129.1999969482422, - 1 - ] - ], - "joint_cam": [ - [ - 117.9000015258789, - -118.19999694824219, - 411.0 - ], - [ - 17.09000015258789, - -73.70999908447266, - 489.5 - ], - [ - 32.540000915527344, - -89.88999938964844, - 477.20001220703125 - ], - [ - 52.959999084472656, - -110.0, - 464.3999938964844 - ], - [ - 87.95999908447266, - -113.70000457763672, - 438.6999816894531 - ], - [ - 56.71000289916992, - 6.140999794006348, - 514.1000366210938 - ], - [ - 56.7599983215332, - -9.583000183105469, - 492.1000061035156 - ], - [ - 57.97999954223633, - -24.700000762939453, - 472.79998779296875 - ], - [ - 64.52000427246094, - -55.34000015258789, - 455.5 - ], - [ - 81.20000457763672, - 25.459999084472656, - 501.6999816894531 - ], - [ - 81.8800048828125, - 6.9070000648498535, - 483.3999938964844 - ], - [ - 80.05999755859375, - -12.220000267028809, - 464.1000061035156 - ], - [ - 83.37000274658203, - -42.70000076293945, - 448.0 - ], - [ - 113.20000457763672, - 11.960000038146973, - 496.6999816894531 - ], - [ - 109.0999984741211, - -1.5170000791549683, - 475.20001220703125 - ], - [ - 103.20000457763672, - -13.480000495910645, - 452.6999816894531 - ], - [ - 102.0, - -41.32999801635742, - 443.79998779296875 - ], - [ - 143.09999084472656, - -26.25, - 482.3000183105469 - ], - [ - 137.0, - -18.23000144958496, - 467.29998779296875 - ], - [ - 130.60000610351562, - -18.049999237060547, - 452.0 - ], - [ - 124.9000015258789, - -45.51000213623047, - 442.0999755859375 - ] - ], - "hand_type": "left" - }, - { - "id": 1111, - "image_id": 1111, - "category_id": 1, - "iscrowd": 0, - "bbox": [ - 163.91000366210938, - 92.3900146484375, - 59.88001251220703, - 118.91999053955078 - ], - "area": 7120.930521459843, - "keypoints": [ - [ - 218.8000030517578, - 201.39999389648438, - 1 - ], - [ - 168.89999389648438, - 147.89999389648438, - 1 - ], - [ - 171.5, - 161.6999969482422, - 1 - ], - [ - 182.3000030517578, - 178.0, - 1 - ], - [ - 202.60000610351562, - 191.10000610351562, - 1 - ], - [ - 184.6999969482422, - 174.1999969482422, - 1 - ], - [ - 175.6999969482422, - 162.39999389648438, - 1 - ], - [ - 180.60000610351562, - 148.10000610351562, - 1 - ], - [ - 200.5, - 153.3000030517578, - 1 - ], - [ - 181.5, - 173.89999389648438, - 1 - ], - [ - 180.0, - 156.5, - 1 - ], - [ - 185.10000610351562, - 140.89999389648438, - 1 - ], - [ - 205.6999969482422, - 146.6999969482422, - 1 - ], - [ - 175.39999389648438, - 160.3000030517578, - 1 - ], - [ - 183.60000610351562, - 145.3000030517578, - 1 - ], - [ - 194.89999389648438, - 132.6999969482422, - 1 - ], - [ - 208.8000030517578, - 145.6999969482422, - 1 - ], - [ - 199.39999389648438, - 102.30000305175781, - 1 - ], - [ - 201.89999389648438, - 116.19999694824219, - 1 - ], - [ - 208.10000610351562, - 127.4000015258789, - 1 - ], - [ - 210.60000610351562, - 148.5, - 1 - ] - ], - "joint_cam": [ - [ - 96.12000274658203, - 67.63999938964844, - 499.70001220703125 - ], - [ - 15.380000114440918, - -20.969999313354492, - 528.0 - ], - [ - 20.209999084472656, - 3.059999942779541, - 536.0999755859375 - ], - [ - 38.869998931884766, - 31.470001220703125, - 533.2999877929688 - ], - [ - 73.62000274658203, - 53.81999969482422, - 529.2000122070312 - ], - [ - 42.380001068115234, - 24.42999839782715, - 524.7999877929688 - ], - [ - 27.260000228881836, - 4.211999893188477, - 529.7999877929688 - ], - [ - 36.38999938964844, - -21.010000228881836, - 540.0999755859375 - ], - [ - 71.79000091552734, - -11.949999809265137, - 542.7000122070312 - ], - [ - 35.5, - 22.989999771118164, - 505.5999755859375 - ], - [ - 33.540000915527344, - -5.878000259399414, - 512.9000244140625 - ], - [ - 42.69000244140625, - -32.55999755859375, - 521.0999755859375 - ], - [ - 78.16999816894531, - -22.75, - 522.8999633789062 - ], - [ - 24.560001373291016, - 0.47450000047683716, - 487.79998779296875 - ], - [ - 38.18000030517578, - -23.760000228881836, - 494.1000061035156 - ], - [ - 57.27000045776367, - -44.72999954223633, - 501.3999938964844 - ], - [ - 80.33000183105469, - -23.520000457763672, - 503.8999938964844 - ], - [ - 58.06999969482422, - -85.06999969482422, - 450.8999938964844 - ], - [ - 62.619998931884766, - -65.56999969482422, - 457.5 - ], - [ - 73.29999542236328, - -49.72999954223633, - 466.3999938964844 - ], - [ - 79.80000305175781, - -18.200000762939453, - 482.3000183105469 - ] - ], - "hand_type": "left" - }, - { - "id": 11111, - "image_id": 11111, - "category_id": 1, - "iscrowd": 0, - "bbox": [ - 162.8300018310547, - 135.88999938964844, - 59.63999557495117, - 48.1200065612793 - ], - "area": 2869.8769783813186, - "keypoints": [ - [ - 167.8000030517578, - 154.10000610351562, - 1 - ], - [ - 217.5, - 146.60000610351562, - 1 - ], - [ - 207.8000030517578, - 146.39999389648438, - 1 - ], - [ - 197.10000610351562, - 144.8000030517578, - 1 - ], - [ - 181.5, - 148.0, - 1 - ], - [ - 206.8000030517578, - 167.1999969482422, - 1 - ], - [ - 206.5, - 160.1999969482422, - 1 - ], - [ - 202.10000610351562, - 150.89999389648438, - 1 - ], - [ - 191.39999389648438, - 139.89999389648438, - 1 - ], - [ - 200.0, - 170.0, - 1 - ], - [ - 200.39999389648438, - 163.6999969482422, - 1 - ], - [ - 198.0, - 155.6999969482422, - 1 - ], - [ - 186.39999389648438, - 145.10000610351562, - 1 - ], - [ - 194.6999969482422, - 172.39999389648438, - 1 - ], - [ - 195.1999969482422, - 168.10000610351562, - 1 - ], - [ - 191.8000030517578, - 160.89999389648438, - 1 - ], - [ - 182.0, - 151.6999969482422, - 1 - ], - [ - 198.1999969482422, - 180.0, - 1 - ], - [ - 195.0, - 176.3000030517578, - 1 - ], - [ - 189.10000610351562, - 170.39999389648438, - 1 - ], - [ - 177.3000030517578, - 160.1999969482422, - 1 - ] - ], - "joint_cam": [ - [ - 20.479999542236328, - -15.470000267028809, - 693.2999877929688 - ], - [ - 138.40000915527344, - -32.17000198364258, - 633.2000122070312 - ], - [ - 118.0, - -33.62999725341797, - 649.2000122070312 - ], - [ - 94.87999725341797, - -38.78999710083008, - 673.699951171875 - ], - [ - 56.01000213623047, - -31.309999465942383, - 686.2999877929688 - ], - [ - 116.80000305175781, - 18.049999237060547, - 656.7999877929688 - ], - [ - 112.80000305175781, - 0.48660001158714294, - 638.2999877929688 - ], - [ - 99.94000244140625, - -21.559999465942383, - 625.199951171875 - ], - [ - 74.3699951171875, - -47.75, - 623.7999877929688 - ], - [ - 99.95999908447266, - 24.979999542236328, - 658.5 - ], - [ - 97.34000396728516, - 9.000999450683594, - 633.4000244140625 - ], - [ - 88.5199966430664, - -9.983000755310059, - 612.7999877929688 - ], - [ - 61.19000244140625, - -34.4900016784668, - 609.6000366210938 - ], - [ - 86.45000457763672, - 31.0, - 655.6000366210938 - ], - [ - 84.11000061035156, - 19.43000030517578, - 629.7000122070312 - ], - [ - 73.44000244140625, - 2.186999797821045, - 608.6000366210938 - ], - [ - 50.53000259399414, - -19.139999389648438, - 605.7000122070312 - ], - [ - 91.06999969482422, - 47.60000228881836, - 626.6000366210938 - ], - [ - 81.22000122070312, - 37.78000259399414, - 610.800048828125 - ], - [ - 66.75, - 23.989999771118164, - 604.0999755859375 - ], - [ - 39.94999694824219, - 0.4115999937057495, - 607.7999877929688 - ] - ], - "hand_type": "right" - } - ], - "categories": [ - { - "id": 1, - "name": "hand", - "supercategory": "hand", - "keypoints": [ - "wrist", - "thumb1", - "thumb2", - "thumb3", - "thumb4", - "forefinger1", - "forefinger2", - "forefinger3", - "forefinger4", - "middle_finger1", - "middle_finger2", - "middle_finger3", - "middle_finger4", - "ring_finger1", - "ring_finger2", - "ring_finger3", - "ring_finger4", - "pinky_finger1", - "pinky_finger2", - "pinky_finger3", - "pinky_finger4" - ], - "skeleton": [ - [ - 1, - 2 - ], - [ - 2, - 3 - ], - [ - 3, - 4 - ], - [ - 4, - 5 - ], - [ - 1, - 6 - ], - [ - 6, - 7 - ], - [ - 7, - 8 - ], - [ - 8, - 9 - ], - [ - 1, - 10 - ], - [ - 10, - 11 - ], - [ - 11, - 12 - ], - [ - 12, - 13 - ], - [ - 1, - 14 - ], - [ - 14, - 15 - ], - [ - 15, - 16 - ], - [ - 16, - 17 - ], - [ - 1, - 18 - ], - [ - 18, - 19 - ], - [ - 19, - 20 - ], - [ - 20, - 21 - ] - ] - } - ] +{ + "info": { + "description": "RHD", + "version": "1.1", + "year": "2021", + "date_created": "2021/03/13" + }, + "licenses": "", + "images": [ + { + "file_name": "00111.png", + "height": 320, + "width": 320, + "id": 111, + "cam_param": { + "focal": [ + 299.0, + 299.0 + ], + "princpt": [ + 160.0, + 160.0 + ] + } + }, + { + "file_name": "01111.png", + "height": 320, + "width": 320, + "id": 1111, + "cam_param": { + "focal": [ + 305.8999938964844, + 305.8999938964844 + ], + "princpt": [ + 160.0, + 160.0 + ] + } + }, + { + "file_name": "11111.png", + "height": 320, + "width": 320, + "id": 11111, + "cam_param": { + "focal": [ + 263.20001220703125, + 263.20001220703125 + ], + "princpt": [ + 160.0, + 160.0 + ] + } + } + ], + "annotations": [ + { + "id": 111, + "image_id": 111, + "category_id": 1, + "iscrowd": 0, + "bbox": [ + 162.5699920654297, + 63.858001708984375, + 93.96000671386719, + 121.4639892578125 + ], + "area": 11412.757246157154, + "keypoints": [ + [ + 245.8000030517578, + 73.9800033569336, + 1 + ], + [ + 170.39999389648438, + 115.0, + 1 + ], + [ + 180.39999389648438, + 103.69999694824219, + 1 + ], + [ + 194.10000610351562, + 89.16999816894531, + 1 + ], + [ + 220.0, + 82.51000213623047, + 1 + ], + [ + 193.0, + 163.60000610351562, + 1 + ], + [ + 194.5, + 154.1999969482422, + 1 + ], + [ + 196.6999969482422, + 144.39999389648438, + 1 + ], + [ + 202.39999389648438, + 123.69999694824219, + 1 + ], + [ + 208.39999389648438, + 175.1999969482422, + 1 + ], + [ + 210.6999969482422, + 164.3000030517578, + 1 + ], + [ + 211.60000610351562, + 152.10000610351562, + 1 + ], + [ + 215.60000610351562, + 131.5, + 1 + ], + [ + 228.10000610351562, + 167.1999969482422, + 1 + ], + [ + 228.6999969482422, + 159.0, + 1 + ], + [ + 228.1999969482422, + 151.10000610351562, + 1 + ], + [ + 228.6999969482422, + 132.1999969482422, + 1 + ], + [ + 248.6999969482422, + 143.6999969482422, + 1 + ], + [ + 247.6999969482422, + 148.3000030517578, + 1 + ], + [ + 246.39999389648438, + 148.10000610351562, + 1 + ], + [ + 244.39999389648438, + 129.1999969482422, + 1 + ] + ], + "joint_cam": [ + [ + 117.9000015258789, + -118.19999694824219, + 411.0 + ], + [ + 17.09000015258789, + -73.70999908447266, + 489.5 + ], + [ + 32.540000915527344, + -89.88999938964844, + 477.20001220703125 + ], + [ + 52.959999084472656, + -110.0, + 464.3999938964844 + ], + [ + 87.95999908447266, + -113.70000457763672, + 438.6999816894531 + ], + [ + 56.71000289916992, + 6.140999794006348, + 514.1000366210938 + ], + [ + 56.7599983215332, + -9.583000183105469, + 492.1000061035156 + ], + [ + 57.97999954223633, + -24.700000762939453, + 472.79998779296875 + ], + [ + 64.52000427246094, + -55.34000015258789, + 455.5 + ], + [ + 81.20000457763672, + 25.459999084472656, + 501.6999816894531 + ], + [ + 81.8800048828125, + 6.9070000648498535, + 483.3999938964844 + ], + [ + 80.05999755859375, + -12.220000267028809, + 464.1000061035156 + ], + [ + 83.37000274658203, + -42.70000076293945, + 448.0 + ], + [ + 113.20000457763672, + 11.960000038146973, + 496.6999816894531 + ], + [ + 109.0999984741211, + -1.5170000791549683, + 475.20001220703125 + ], + [ + 103.20000457763672, + -13.480000495910645, + 452.6999816894531 + ], + [ + 102.0, + -41.32999801635742, + 443.79998779296875 + ], + [ + 143.09999084472656, + -26.25, + 482.3000183105469 + ], + [ + 137.0, + -18.23000144958496, + 467.29998779296875 + ], + [ + 130.60000610351562, + -18.049999237060547, + 452.0 + ], + [ + 124.9000015258789, + -45.51000213623047, + 442.0999755859375 + ] + ], + "hand_type": "left" + }, + { + "id": 1111, + "image_id": 1111, + "category_id": 1, + "iscrowd": 0, + "bbox": [ + 163.91000366210938, + 92.3900146484375, + 59.88001251220703, + 118.91999053955078 + ], + "area": 7120.930521459843, + "keypoints": [ + [ + 218.8000030517578, + 201.39999389648438, + 1 + ], + [ + 168.89999389648438, + 147.89999389648438, + 1 + ], + [ + 171.5, + 161.6999969482422, + 1 + ], + [ + 182.3000030517578, + 178.0, + 1 + ], + [ + 202.60000610351562, + 191.10000610351562, + 1 + ], + [ + 184.6999969482422, + 174.1999969482422, + 1 + ], + [ + 175.6999969482422, + 162.39999389648438, + 1 + ], + [ + 180.60000610351562, + 148.10000610351562, + 1 + ], + [ + 200.5, + 153.3000030517578, + 1 + ], + [ + 181.5, + 173.89999389648438, + 1 + ], + [ + 180.0, + 156.5, + 1 + ], + [ + 185.10000610351562, + 140.89999389648438, + 1 + ], + [ + 205.6999969482422, + 146.6999969482422, + 1 + ], + [ + 175.39999389648438, + 160.3000030517578, + 1 + ], + [ + 183.60000610351562, + 145.3000030517578, + 1 + ], + [ + 194.89999389648438, + 132.6999969482422, + 1 + ], + [ + 208.8000030517578, + 145.6999969482422, + 1 + ], + [ + 199.39999389648438, + 102.30000305175781, + 1 + ], + [ + 201.89999389648438, + 116.19999694824219, + 1 + ], + [ + 208.10000610351562, + 127.4000015258789, + 1 + ], + [ + 210.60000610351562, + 148.5, + 1 + ] + ], + "joint_cam": [ + [ + 96.12000274658203, + 67.63999938964844, + 499.70001220703125 + ], + [ + 15.380000114440918, + -20.969999313354492, + 528.0 + ], + [ + 20.209999084472656, + 3.059999942779541, + 536.0999755859375 + ], + [ + 38.869998931884766, + 31.470001220703125, + 533.2999877929688 + ], + [ + 73.62000274658203, + 53.81999969482422, + 529.2000122070312 + ], + [ + 42.380001068115234, + 24.42999839782715, + 524.7999877929688 + ], + [ + 27.260000228881836, + 4.211999893188477, + 529.7999877929688 + ], + [ + 36.38999938964844, + -21.010000228881836, + 540.0999755859375 + ], + [ + 71.79000091552734, + -11.949999809265137, + 542.7000122070312 + ], + [ + 35.5, + 22.989999771118164, + 505.5999755859375 + ], + [ + 33.540000915527344, + -5.878000259399414, + 512.9000244140625 + ], + [ + 42.69000244140625, + -32.55999755859375, + 521.0999755859375 + ], + [ + 78.16999816894531, + -22.75, + 522.8999633789062 + ], + [ + 24.560001373291016, + 0.47450000047683716, + 487.79998779296875 + ], + [ + 38.18000030517578, + -23.760000228881836, + 494.1000061035156 + ], + [ + 57.27000045776367, + -44.72999954223633, + 501.3999938964844 + ], + [ + 80.33000183105469, + -23.520000457763672, + 503.8999938964844 + ], + [ + 58.06999969482422, + -85.06999969482422, + 450.8999938964844 + ], + [ + 62.619998931884766, + -65.56999969482422, + 457.5 + ], + [ + 73.29999542236328, + -49.72999954223633, + 466.3999938964844 + ], + [ + 79.80000305175781, + -18.200000762939453, + 482.3000183105469 + ] + ], + "hand_type": "left" + }, + { + "id": 11111, + "image_id": 11111, + "category_id": 1, + "iscrowd": 0, + "bbox": [ + 162.8300018310547, + 135.88999938964844, + 59.63999557495117, + 48.1200065612793 + ], + "area": 2869.8769783813186, + "keypoints": [ + [ + 167.8000030517578, + 154.10000610351562, + 1 + ], + [ + 217.5, + 146.60000610351562, + 1 + ], + [ + 207.8000030517578, + 146.39999389648438, + 1 + ], + [ + 197.10000610351562, + 144.8000030517578, + 1 + ], + [ + 181.5, + 148.0, + 1 + ], + [ + 206.8000030517578, + 167.1999969482422, + 1 + ], + [ + 206.5, + 160.1999969482422, + 1 + ], + [ + 202.10000610351562, + 150.89999389648438, + 1 + ], + [ + 191.39999389648438, + 139.89999389648438, + 1 + ], + [ + 200.0, + 170.0, + 1 + ], + [ + 200.39999389648438, + 163.6999969482422, + 1 + ], + [ + 198.0, + 155.6999969482422, + 1 + ], + [ + 186.39999389648438, + 145.10000610351562, + 1 + ], + [ + 194.6999969482422, + 172.39999389648438, + 1 + ], + [ + 195.1999969482422, + 168.10000610351562, + 1 + ], + [ + 191.8000030517578, + 160.89999389648438, + 1 + ], + [ + 182.0, + 151.6999969482422, + 1 + ], + [ + 198.1999969482422, + 180.0, + 1 + ], + [ + 195.0, + 176.3000030517578, + 1 + ], + [ + 189.10000610351562, + 170.39999389648438, + 1 + ], + [ + 177.3000030517578, + 160.1999969482422, + 1 + ] + ], + "joint_cam": [ + [ + 20.479999542236328, + -15.470000267028809, + 693.2999877929688 + ], + [ + 138.40000915527344, + -32.17000198364258, + 633.2000122070312 + ], + [ + 118.0, + -33.62999725341797, + 649.2000122070312 + ], + [ + 94.87999725341797, + -38.78999710083008, + 673.699951171875 + ], + [ + 56.01000213623047, + -31.309999465942383, + 686.2999877929688 + ], + [ + 116.80000305175781, + 18.049999237060547, + 656.7999877929688 + ], + [ + 112.80000305175781, + 0.48660001158714294, + 638.2999877929688 + ], + [ + 99.94000244140625, + -21.559999465942383, + 625.199951171875 + ], + [ + 74.3699951171875, + -47.75, + 623.7999877929688 + ], + [ + 99.95999908447266, + 24.979999542236328, + 658.5 + ], + [ + 97.34000396728516, + 9.000999450683594, + 633.4000244140625 + ], + [ + 88.5199966430664, + -9.983000755310059, + 612.7999877929688 + ], + [ + 61.19000244140625, + -34.4900016784668, + 609.6000366210938 + ], + [ + 86.45000457763672, + 31.0, + 655.6000366210938 + ], + [ + 84.11000061035156, + 19.43000030517578, + 629.7000122070312 + ], + [ + 73.44000244140625, + 2.186999797821045, + 608.6000366210938 + ], + [ + 50.53000259399414, + -19.139999389648438, + 605.7000122070312 + ], + [ + 91.06999969482422, + 47.60000228881836, + 626.6000366210938 + ], + [ + 81.22000122070312, + 37.78000259399414, + 610.800048828125 + ], + [ + 66.75, + 23.989999771118164, + 604.0999755859375 + ], + [ + 39.94999694824219, + 0.4115999937057495, + 607.7999877929688 + ] + ], + "hand_type": "right" + } + ], + "categories": [ + { + "id": 1, + "name": "hand", + "supercategory": "hand", + "keypoints": [ + "wrist", + "thumb1", + "thumb2", + "thumb3", + "thumb4", + "forefinger1", + "forefinger2", + "forefinger3", + "forefinger4", + "middle_finger1", + "middle_finger2", + "middle_finger3", + "middle_finger4", + "ring_finger1", + "ring_finger2", + "ring_finger3", + "ring_finger4", + "pinky_finger1", + "pinky_finger2", + "pinky_finger3", + "pinky_finger4" + ], + "skeleton": [ + [ + 1, + 2 + ], + [ + 2, + 3 + ], + [ + 3, + 4 + ], + [ + 4, + 5 + ], + [ + 1, + 6 + ], + [ + 6, + 7 + ], + [ + 7, + 8 + ], + [ + 8, + 9 + ], + [ + 1, + 10 + ], + [ + 10, + 11 + ], + [ + 11, + 12 + ], + [ + 12, + 13 + ], + [ + 1, + 14 + ], + [ + 14, + 15 + ], + [ + 15, + 16 + ], + [ + 16, + 17 + ], + [ + 1, + 18 + ], + [ + 18, + 19 + ], + [ + 19, + 20 + ], + [ + 20, + 21 + ] + ] + } + ] } \ No newline at end of file diff --git a/tests/data/shelf/calibration_shelf.json b/tests/data/shelf/calibration_shelf.json index 5927139d09..ce3e3497c0 100644 --- a/tests/data/shelf/calibration_shelf.json +++ b/tests/data/shelf/calibration_shelf.json @@ -1,267 +1,267 @@ -{ - "0": { - "k": [ - [ - 0.0 - ], - [ - 0.0 - ], - [ - 0.0 - ] - ], - "p": [ - [ - 0.0 - ], - [ - 0.0 - ] - ], - "R": [ - [ - 0.650977, - -0.758717, - 0.024027 - ], - [ - -0.018862, - -0.04781, - -0.998678 - ], - [ - 0.758863, - 0.649664, - -0.045434 - ] - ], - "T": [ - [ - -1586.4496077989998 - ], - [ - -2109.46905869 - ], - [ - 1104.209800652 - ] - ], - "fx": 1063.512085, - "fy": 1071.863647, - "cx": 511.738251, - "cy": 350.088287 - }, - "1": { - "k": [ - [ - 0.0 - ], - [ - 0.0 - ], - [ - 0.0 - ] - ], - "p": [ - [ - 0.0 - ], - [ - 0.0 - ] - ], - "R": [ - [ - -0.016771, - -0.999835, - 0.006926 - ], - [ - -0.029435, - -0.006431, - -0.999546 - ], - [ - 0.999426, - -0.016967, - -0.029322 - ] - ], - "T": [ - [ - -3512.391424833 - ], - [ - 311.47771461800005 - ], - [ - 964.5481307480001 - ] - ], - "fx": 1097.697754, - "fy": 1086.668457, - "cx": 521.652161, - "cy": 376.587067 - }, - "2": { - "k": [ - [ - 0.0 - ], - [ - 0.0 - ], - [ - 0.0 - ] - ], - "p": [ - [ - 0.0 - ], - [ - 0.0 - ] - ], - "R": [ - [ - -0.789986, - -0.610527, - 0.05638 - ], - [ - -0.370413, - 0.401962, - -0.837389 - ], - [ - 0.488586, - -0.68241, - -0.543691 - ] - ], - "T": [ - [ - -1420.944211509 - ], - [ - 2546.574076866 - ], - [ - 2688.8728944060003 - ] - ], - "fx": 1130.065552, - "fy": 1112.470337, - "cx": 566.884338, - "cy": 375.212708 - }, - "3": { - "k": [ - [ - 0.0 - ], - [ - 0.0 - ], - [ - 0.0 - ] - ], - "p": [ - [ - 0.0 - ], - [ - 0.0 - ] - ], - "R": [ - [ - -0.970568, - 0.235647, - -0.049676 - ], - [ - 0.09763, - 0.196438, - -0.975644 - ], - [ - -0.22015, - -0.951779, - -0.213663 - ] - ], - "T": [ - [ - 963.489306486 - ], - [ - 3408.674914882 - ], - [ - 1422.035001899 - ] - ], - "fx": 1056.162598, - "fy": 1059.639648, - "cx": 552.43573, - "cy": 393.180389 - }, - "4": { - "k": [ - [ - 0.0 - ], - [ - 0.0 - ], - [ - 0.0 - ] - ], - "p": [ - [ - 0.0 - ], - [ - 0.0 - ] - ], - "R": [ - [ - -0.194109, - 0.980554, - -0.028888 - ], - [ - 0.233045, - 0.017488, - -0.972309 - ], - [ - -0.952896, - -0.195466, - -0.231908 - ] - ], - "T": [ - [ - 3832.020978729 - ], - [ - 273.55271850000014 - ], - [ - 1439.4616998990002 - ] - ], - "fx": 1089.654175, - "fy": 1080.99939, - "cx": 498.32962, - "cy": 359.514832 - } +{ + "0": { + "k": [ + [ + 0.0 + ], + [ + 0.0 + ], + [ + 0.0 + ] + ], + "p": [ + [ + 0.0 + ], + [ + 0.0 + ] + ], + "R": [ + [ + 0.650977, + -0.758717, + 0.024027 + ], + [ + -0.018862, + -0.04781, + -0.998678 + ], + [ + 0.758863, + 0.649664, + -0.045434 + ] + ], + "T": [ + [ + -1586.4496077989998 + ], + [ + -2109.46905869 + ], + [ + 1104.209800652 + ] + ], + "fx": 1063.512085, + "fy": 1071.863647, + "cx": 511.738251, + "cy": 350.088287 + }, + "1": { + "k": [ + [ + 0.0 + ], + [ + 0.0 + ], + [ + 0.0 + ] + ], + "p": [ + [ + 0.0 + ], + [ + 0.0 + ] + ], + "R": [ + [ + -0.016771, + -0.999835, + 0.006926 + ], + [ + -0.029435, + -0.006431, + -0.999546 + ], + [ + 0.999426, + -0.016967, + -0.029322 + ] + ], + "T": [ + [ + -3512.391424833 + ], + [ + 311.47771461800005 + ], + [ + 964.5481307480001 + ] + ], + "fx": 1097.697754, + "fy": 1086.668457, + "cx": 521.652161, + "cy": 376.587067 + }, + "2": { + "k": [ + [ + 0.0 + ], + [ + 0.0 + ], + [ + 0.0 + ] + ], + "p": [ + [ + 0.0 + ], + [ + 0.0 + ] + ], + "R": [ + [ + -0.789986, + -0.610527, + 0.05638 + ], + [ + -0.370413, + 0.401962, + -0.837389 + ], + [ + 0.488586, + -0.68241, + -0.543691 + ] + ], + "T": [ + [ + -1420.944211509 + ], + [ + 2546.574076866 + ], + [ + 2688.8728944060003 + ] + ], + "fx": 1130.065552, + "fy": 1112.470337, + "cx": 566.884338, + "cy": 375.212708 + }, + "3": { + "k": [ + [ + 0.0 + ], + [ + 0.0 + ], + [ + 0.0 + ] + ], + "p": [ + [ + 0.0 + ], + [ + 0.0 + ] + ], + "R": [ + [ + -0.970568, + 0.235647, + -0.049676 + ], + [ + 0.09763, + 0.196438, + -0.975644 + ], + [ + -0.22015, + -0.951779, + -0.213663 + ] + ], + "T": [ + [ + 963.489306486 + ], + [ + 3408.674914882 + ], + [ + 1422.035001899 + ] + ], + "fx": 1056.162598, + "fy": 1059.639648, + "cx": 552.43573, + "cy": 393.180389 + }, + "4": { + "k": [ + [ + 0.0 + ], + [ + 0.0 + ], + [ + 0.0 + ] + ], + "p": [ + [ + 0.0 + ], + [ + 0.0 + ] + ], + "R": [ + [ + -0.194109, + 0.980554, + -0.028888 + ], + [ + 0.233045, + 0.017488, + -0.972309 + ], + [ + -0.952896, + -0.195466, + -0.231908 + ] + ], + "T": [ + [ + 3832.020978729 + ], + [ + 273.55271850000014 + ], + [ + 1439.4616998990002 + ] + ], + "fx": 1089.654175, + "fy": 1080.99939, + "cx": 498.32962, + "cy": 359.514832 + } } \ No newline at end of file diff --git a/tests/data/wflw/test_wflw.json b/tests/data/wflw/test_wflw.json index d25c9e5360..de61ac2e46 100644 --- a/tests/data/wflw/test_wflw.json +++ b/tests/data/wflw/test_wflw.json @@ -1,1287 +1,1287 @@ -{ - "categories": [ - { - "supercategory": "person", - "id": 1, - "name": "face", - "keypoints": [], - "skeleton": [] - } - ], - "images": [ - { - "id": 2, - "file_name": "36_Football_americanfootball_ball_36_415.jpg", - "height": 661, - "width": 1024 - }, - { - "id": 12, - "file_name": "7_Cheering_Cheering_7_16.jpg", - "height": 1024, - "width": 1024 - } - ], - "annotations": [ - { - "image_id": 2, - "id": 2, - "keypoints": [ - 440.696106, - 132.732559, - 1, - 441.125309, - 138.20883600000002, - 1, - 441.775067, - 143.662576, - 1, - 442.838757, - 149.050135, - 1, - 444.330483, - 154.335484, - 1, - 446.199104, - 159.499988, - 1, - 448.359594, - 164.549883, - 1, - 450.707972, - 169.515574, - 1, - 453.152313, - 174.43489, - 1, - 455.663647, - 179.320327, - 1, - 458.272826, - 184.154116, - 1, - 461.018412, - 188.911642, - 1, - 463.94567, - 193.559255, - 1, - 467.22343, - 197.963659, - 1, - 471.117738, - 201.826457, - 1, - 475.751887, - 204.749145, - 1, - 480.989902, - 206.343046, - 1, - 487.065598, - 206.402104, - 1, - 492.947114, - 204.866338, - 1, - 498.297161, - 201.969714, - 1, - 503.013751, - 198.11849, - 1, - 507.157957, - 193.652851, - 1, - 510.843251, - 188.799396, - 1, - 514.1297, - 183.667342, - 1, - 516.955071, - 178.26847800000004, - 1, - 519.2164700000002, - 172.61085, - 1, - 520.848493, - 166.740574, - 1, - 521.9502669999998, - 160.74653700000005, - 1, - 522.708073, - 154.698484, - 1, - 523.292433, - 148.630853, - 1, - 523.782152, - 142.554803, - 1, - 524.208992, - 136.47398700000002, - 1, - 524.604004, - 130.391006, - 1, - 448.863007, - 130.600006, - 1, - 454.970001, - 126.268997, - 1, - 460.80896, - 127.033981, - 1, - 466.639008, - 127.488991, - 1, - 471.871002, - 128.024002, - 1, - 471.839966, - 131.30699199999998, - 1, - 466.7300110000001, - 130.602005, - 1, - 460.683014, - 129.84198, - 1, - 455.03299, - 128.447983, - 1, - 484.471008, - 126.532997, - 1, - 491.312988, - 124.467003, - 1, - 497.66098, - 122.172989, - 1, - 504.834991, - 123.182007, - 1, - 510.877014, - 127.14801, - 1, - 504.89801, - 125.372993, - 1, - 497.656982, - 124.851997, - 1, - 491.562012, - 127.464005, - 1, - 484.731995, - 129.934998, - 1, - 478.60199000000006, - 137.615005, - 1, - 478.797445, - 144.648194, - 1, - 478.710781, - 151.672784, - 1, - 479.648263, - 158.477047, - 1, - 472.342987, - 162.587006, - 1, - 476.080474, - 163.443556, - 1, - 479.895523, - 163.767872, - 1, - 484.256557, - 162.987089, - 1, - 488.335907, - 161.220047, - 1, - 454.908997, - 139.330002, - 1, - 457.994713, - 139.25393799999998, - 1, - 461.081185, - 139.258667, - 1, - 465.0926280000001, - 138.585731, - 1, - 469.109314, - 137.906326, - 1, - 465.638515, - 140.893484, - 1, - 461.276336, - 142.038503, - 1, - 457.94384, - 141.053913, - 1, - 488.993011, - 136.03999299999998, - 1, - 492.80749, - 136.399268, - 1, - 496.59449000000006, - 136.98136499999998, - 1, - 500.786029, - 137.41671000000002, - 1, - 504.984009, - 137.048096, - 1, - 501.171214, - 139.364812, - 1, - 496.775512, - 139.941385, - 1, - 492.595083, - 138.593753, - 1, - 468.338989, - 177.639496, - 1, - 472.57608, - 175.232479, - 1, - 477.20692, - 173.776125, - 1, - 480.569762, - 173.825898, - 1, - 483.565578, - 174.109503, - 1, - 490.367366, - 175.431598, - 1, - 496.381042, - 178.96504199999995, - 1, - 491.802489, - 182.339998, - 1, - 486.685901, - 184.599911, - 1, - 481.033928, - 184.016885, - 1, - 476.397867, - 182.850118, - 1, - 472.60944400000005, - 179.86823, - 1, - 469.541992, - 177.462006, - 1, - 474.959022, - 175.385376, - 1, - 480.747386, - 175.48909799999996, - 1, - 488.63756500000005, - 175.736854, - 1, - 495.716522, - 179.02507, - 1, - 488.642736, - 177.62467, - 1, - 481.422413, - 177.62252, - 1, - 475.491142, - 176.514907, - 1, - 461.279777, - 140.890199, - 1, - 496.453474, - 137.763648, - 1 - ], - "num_keypoints": 98, - "bbox": [ - 432.3053162, - 113.7500775, - 100.68947760000003, - 101.074938 - ], - "iscrowd": 0, - "area": 10177.182705672392, - "category_id": 1, - "center": [ - 482.5, - 164.5 - ], - "scale": 0.425 - }, - { - "image_id": 12, - "id": 12, - "keypoints": [ - 737.310974, - 302.2290040000001, - 1, - 735.612565, - 307.91392, - 1, - 733.935549, - 313.605176, - 1, - 732.301247, - 319.308828, - 1, - 730.746049, - 325.034488, - 1, - 729.333061, - 330.796748, - 1, - 728.130165, - 336.606254, - 1, - 727.2343940000002, - 342.470465, - 1, - 726.769826, - 348.383693, - 1, - 726.8573719999998, - 354.313744, - 1, - 727.578398, - 360.199602, - 1, - 729.013058, - 365.95307, - 1, - 731.013678, - 371.537441, - 1, - 733.2071559999998, - 377.050279, - 1, - 735.47649, - 382.530816, - 1, - 738.714025, - 387.475552, - 1, - 743.5621620000002, - 390.822643, - 1, - 749.412371, - 392.269205, - 1, - 755.438439, - 391.936151, - 1, - 761.2417849999998, - 390.247929, - 1, - 766.732837, - 387.708216, - 1, - 772.008913, - 384.743038, - 1, - 777.11136, - 381.48808, - 1, - 782.0033440000002, - 377.925076, - 1, - 786.614896, - 374.007594, - 1, - 790.746727, - 369.589677, - 1, - 794.345917, - 364.72535, - 1, - 797.705108, - 359.69007, - 1, - 800.979223, - 354.59913, - 1, - 804.030756, - 349.372408, - 1, - 806.796596, - 343.988855, - 1, - 809.360701, - 338.505917, - 1, - 811.822571, - 332.976135, - 1, - 738.142029, - 316.583008, - 1, - 745.198975, - 314.119995, - 1, - 749.843933, - 315.911957, - 1, - 754.8779910000002, - 317.789001, - 1, - 759.728943, - 321.003967, - 1, - 758.924988, - 323.009979, - 1, - 753.684021, - 320.766998, - 1, - 748.650024, - 318.889984, - 1, - 743.77301, - 317.5, - 1, - 776.567993, - 325.3989870000001, - 1, - 783.789917, - 325.703003, - 1, - 791.1229860000002, - 326.806976, - 1, - 797.598999, - 328.432007, - 1, - 802.210022, - 335.786987, - 1, - 796.032959, - 331.798981, - 1, - 789.445007, - 330.45401, - 1, - 782.429016, - 328.828003, - 1, - 775.448975, - 328.189972, - 1, - 766.489014, - 330.141998, - 1, - 763.441048, - 338.395354, - 1, - 760.1896519999998, - 346.556714, - 1, - 758.378882, - 354.899379, - 1, - 749.651978, - 347.691986, - 1, - 752.802228, - 352.909886, - 1, - 757.095133, - 357.015939, - 1, - 762.194149, - 356.25881, - 1, - 767.192932, - 354.72403, - 1, - 743.380371, - 322.295288, - 1, - 746.923719, - 321.313264, - 1, - 750.553004, - 321.784633, - 1, - 754.640226, - 323.780582, - 1, - 756.981018, - 327.664001, - 1, - 752.689438, - 328.511655, - 1, - 748.3559, - 328.079052, - 1, - 744.9315429999998, - 326.014911, - 1, - 778.2459719999998, - 334.537994, - 1, - 782.672983, - 333.246396, - 1, - 787.060109, - 334.610516, - 1, - 790.163963, - 337.265647, - 1, - 792.42627, - 340.685699, - 1, - 788.630666, - 341.780179, - 1, - 784.70712, - 341.598866, - 1, - 780.419418, - 339.058276, - 1, - 740.483521, - 361.065002, - 1, - 746.374246, - 362.133178, - 1, - 751.741875, - 364.488928, - 1, - 753.4344530000002, - 365.103217, - 1, - 755.192267, - 365.240915, - 1, - 759.601523, - 366.89777, - 1, - 763.757446, - 369.269043, - 1, - 759.467306, - 371.294422, - 1, - 755.0135389999998, - 372.896933, - 1, - 750.305609, - 372.79702, - 1, - 745.439744, - 370.475123, - 1, - 742.098872, - 366.24297, - 1, - 742.159546, - 363.090027, - 1, - 747.630617, - 364.064427, - 1, - 752.565978, - 366.666498, - 1, - 757.357922, - 367.478878, - 1, - 761.918091, - 369.147156, - 1, - 756.790297, - 369.722393, - 1, - 751.666194, - 369.277424, - 1, - 746.561781, - 366.750798, - 1, - 749.141667, - 325.096875, - 1, - 785.415625, - 337.221875, - 1 - ], - "num_keypoints": 98, - "bbox": [ - 718.2645514999999, - 293.2249839000001, - 102.06329400000016, - 108.0482411999999 - ], - "iscrowd": 0, - "area": 11027.759407778518, - "category_id": 1, - "center": [ - 769.0, - 347.5 - ], - "scale": 0.455 - }, - { - "image_id": 12, - "id": 40, - "keypoints": [ - 744.762024, - 731.096985, - 1, - 742.708957, - 737.737215, - 1, - 740.7710030000002, - 744.411776, - 1, - 739.0626599999998, - 751.148374, - 1, - 737.733779, - 757.96915, - 1, - 736.981188, - 764.875717, - 1, - 737.0235700000002, - 771.821884, - 1, - 737.765315, - 778.7307400000002, - 1, - 738.86145, - 785.593963, - 1, - 740.013747, - 792.448173, - 1, - 741.1454200000002, - 799.305824, - 1, - 742.3103629999998, - 806.157846, - 1, - 743.6804400000002, - 812.971502, - 1, - 744.630958, - 819.850678, - 1, - 745.515035, - 826.73686, - 1, - 748.690323, - 832.821804, - 1, - 754.099426, - 837.1631169999998, - 1, - 760.77823, - 840.673624, - 1, - 768.147343, - 842.162887, - 1, - 775.328568, - 840.156231, - 1, - 781.549446, - 835.8637679999998, - 1, - 787.79765, - 831.6084860000002, - 1, - 794.115317, - 827.4566940000002, - 1, - 800.175629, - 822.943352, - 1, - 805.771167, - 817.8629549999998, - 1, - 811.103558, - 812.504673, - 1, - 816.124275, - 806.855559, - 1, - 820.577538, - 800.750585, - 1, - 824.5104719999998, - 794.29608, - 1, - 828.03107, - 787.6072519999999, - 1, - 831.192861, - 780.74112, - 1, - 834.09596, - 773.761204, - 1, - 836.867371, - 766.727722, - 1, - 747.40802, - 744.338989, - 1, - 756.8099980000002, - 739.810974, - 1, - 762.8229980000001, - 742.584961, - 1, - 769.116028, - 746.8430179999998, - 1, - 774.4959719999998, - 750.2109379999998, - 1, - 772.661011, - 755.225037, - 1, - 766.9570309999998, - 751.564026, - 1, - 760.413025, - 748.731018, - 1, - 754.565979, - 745.8809809999998, - 1, - 794.039978, - 759.955017, - 1, - 802.140991, - 759.838989, - 1, - 809.362976, - 760.9539179999998, - 1, - 817.004089, - 762.0819700000002, - 1, - 822.989014, - 770.3709719999998, - 1, - 814.904053, - 767.382935, - 1, - 807.603088, - 766.098022, - 1, - 800.3809809999998, - 764.984009, - 1, - 792.616028, - 763.8099980000002, - 1, - 781.869995, - 762.830994, - 1, - 777.671572, - 775.3052809999998, - 1, - 773.599147, - 787.815521, - 1, - 768.793789, - 799.963975, - 1, - 759.9530639999998, - 790.217224, - 1, - 763.438017, - 796.8758799999998, - 1, - 768.200237, - 802.5832889999998, - 1, - 776.714431, - 800.940712, - 1, - 784.7540280000002, - 796.731995, - 1, - 752.452454, - 752.677429, - 1, - 758.142965, - 751.832449, - 1, - 763.787095, - 752.7987400000002, - 1, - 768.450332, - 755.789755, - 1, - 771.7440190000002, - 760.278992, - 1, - 766.108723, - 761.570158, - 1, - 760.4538719999998, - 760.565587, - 1, - 755.866811, - 757.23883, - 1, - 791.400024, - 769.619995, - 1, - 797.455167, - 766.7197309999998, - 1, - 804.060133, - 768.1290280000002, - 1, - 808.641021, - 770.830526, - 1, - 812.1015620000002, - 774.896179, - 1, - 807.2036360000002, - 776.0263259999998, - 1, - 802.194302, - 776.233114, - 1, - 796.1303330000002, - 774.055774, - 1, - 756.312012, - 806.9689940000002, - 1, - 761.152525, - 807.042413, - 1, - 765.388771, - 809.286819, - 1, - 766.746996, - 810.379537, - 1, - 768.3692599999998, - 811.051278, - 1, - 774.090223, - 811.996037, - 1, - 779.304504, - 814.633972, - 1, - 774.153851, - 817.59002, - 1, - 768.453259, - 819.044276, - 1, - 762.763688, - 817.53634, - 1, - 759.5313259999998, - 814.798765, - 1, - 757.4994230000002, - 811.065074, - 1, - 758.089478, - 808.210449, - 1, - 762.1575849999998, - 809.557143, - 1, - 765.7118929999998, - 811.955629, - 1, - 771.596042, - 812.993758, - 1, - 777.41687, - 814.616699, - 1, - 770.648339, - 816.4749009999998, - 1, - 763.8826849999998, - 815.569504, - 1, - 760.502713, - 812.2854629999998, - 1, - 762.746584, - 755.108075, - 1, - 802.488199, - 770.511801, - 1 - ], - "num_keypoints": 98, - "bbox": [ - 726.9925697, - 719.9903948, - 119.86341960000004, - 48.00960520000001 - ], - "iscrowd": 0, - "area": 5754.595452917945, - "category_id": 1, - "center": [ - 786.5, - 787.0 - ], - "scale": 0.56 - }, - { - "image_id": 12, - "id": 1169, - "keypoints": [ - 473.170593, - 353.335999, - 1, - 472.454142, - 358.228909, - 1, - 471.788643, - 363.128975, - 1, - 471.219892, - 368.041068, - 1, - 470.94488600000005, - 372.975959, - 1, - 471.550405, - 377.8716, - 1, - 473.340887, - 382.473922, - 1, - 475.160443, - 387.069845, - 1, - 476.591016, - 391.802996, - 1, - 478.183709, - 396.482262, - 1, - 480.41786, - 400.887374, - 1, - 483.28217300000006, - 404.915875, - 1, - 485.94821, - 409.072889, - 1, - 487.708742, - 413.688483, - 1, - 490.510995, - 417.693684, - 1, - 494.524824, - 420.571949, - 1, - 498.905613, - 422.85572, - 1, - 504.011519, - 423.758231, - 1, - 508.98311500000005, - 422.289137, - 1, - 512.9631360000002, - 418.935051, - 1, - 516.483537, - 415.068495, - 1, - 520.4627019999998, - 411.682146, - 1, - 524.5683650000002, - 408.443837, - 1, - 528.2940480000002, - 404.779843, - 1, - 531.408005, - 400.585369, - 1, - 533.7782599999998, - 395.929292, - 1, - 535.604259, - 391.029695, - 1, - 537.2263849999998, - 386.057698, - 1, - 538.779161, - 381.063564, - 1, - 540.257309, - 376.04686, - 1, - 541.658462, - 371.008091, - 1, - 543.005638, - 365.954595, - 1, - 544.3259889999998, - 360.894012, - 1, - 476.626984, - 359.039978, - 1, - 481.548981, - 356.339966, - 1, - 485.91098, - 357.414032, - 1, - 489.883972, - 359.63501, - 1, - 494.381958, - 363.002014, - 1, - 494.093964, - 365.890961, - 1, - 489.495972, - 362.60498, - 1, - 485.306, - 360.726959, - 1, - 481.214996, - 359.679962, - 1, - 506.893005, - 361.631012, - 1, - 512.028931, - 360.1489870000001, - 1, - 518.090027, - 359.3940120000001, - 1, - 524.357971, - 359.295013, - 1, - 529.7819820000002, - 363.101013, - 1, - 523.89093, - 362.149994, - 1, - 517.776001, - 362.536987, - 1, - 511.72399900000005, - 363.200958, - 1, - 506.344971, - 365.294006, - 1, - 501.347992, - 367.559998, - 1, - 500.189242, - 376.773265, - 1, - 498.956651, - 385.973584, - 1, - 498.187578, - 395.168944, - 1, - 491.300049, - 391.111084, - 1, - 494.141522, - 394.808003, - 1, - 498.40839000000005, - 396.494433, - 1, - 502.755472, - 394.932757, - 1, - 506.338013, - 391.92099, - 1, - 478.184326, - 363.812805, - 1, - 482.723986, - 362.820035, - 1, - 487.304845, - 363.425472, - 1, - 490.813411, - 365.581369, - 1, - 493.446991, - 368.773987, - 1, - 489.161792, - 369.81081, - 1, - 484.775209, - 369.488891, - 1, - 480.564629, - 367.719005, - 1, - 507.460999, - 371.890991, - 1, - 511.778055, - 367.477709, - 1, - 517.9155969999998, - 366.674784, - 1, - 522.678173, - 367.724831, - 1, - 527.087158, - 369.840698, - 1, - 522.757581, - 373.347413, - 1, - 517.304023, - 374.436933, - 1, - 512.204885, - 373.876232, - 1, - 488.688477, - 399.221008, - 1, - 493.442718, - 400.217365, - 1, - 498.155821, - 401.379151, - 1, - 499.542691, - 401.500691, - 1, - 501.246774, - 401.519773, - 1, - 506.836522, - 401.478986, - 1, - 512.330994, - 402.490967, - 1, - 508.472018, - 405.699605, - 1, - 504.486124, - 408.714805, - 1, - 499.5658360000001, - 409.33235, - 1, - 494.571667, - 407.645284, - 1, - 490.657115, - 404.125086, - 1, - 489.64199800000006, - 400.294006, - 1, - 494.17166900000007, - 403.631745, - 1, - 499.518763, - 405.267957, - 1, - 505.461418, - 404.415617, - 1, - 511.249969, - 402.717072, - 1, - 505.450159, - 404.418683, - 1, - 499.495484, - 405.266308, - 1, - 494.161436, - 403.626137, - 1, - 486.570186, - 366.549068, - 1, - 517.171429, - 369.798758, - 1 - ], - "num_keypoints": 98, - "bbox": [ - 463.60677570000007, - 346.2937758, - 88.05732359999968, - 84.50667840000006 - ], - "iscrowd": 0, - "area": 7441.431926229908, - "category_id": 1, - "center": [ - 507.5, - 388.5 - ], - "scale": 0.375 - } - ] +{ + "categories": [ + { + "supercategory": "person", + "id": 1, + "name": "face", + "keypoints": [], + "skeleton": [] + } + ], + "images": [ + { + "id": 2, + "file_name": "36_Football_americanfootball_ball_36_415.jpg", + "height": 661, + "width": 1024 + }, + { + "id": 12, + "file_name": "7_Cheering_Cheering_7_16.jpg", + "height": 1024, + "width": 1024 + } + ], + "annotations": [ + { + "image_id": 2, + "id": 2, + "keypoints": [ + 440.696106, + 132.732559, + 1, + 441.125309, + 138.20883600000002, + 1, + 441.775067, + 143.662576, + 1, + 442.838757, + 149.050135, + 1, + 444.330483, + 154.335484, + 1, + 446.199104, + 159.499988, + 1, + 448.359594, + 164.549883, + 1, + 450.707972, + 169.515574, + 1, + 453.152313, + 174.43489, + 1, + 455.663647, + 179.320327, + 1, + 458.272826, + 184.154116, + 1, + 461.018412, + 188.911642, + 1, + 463.94567, + 193.559255, + 1, + 467.22343, + 197.963659, + 1, + 471.117738, + 201.826457, + 1, + 475.751887, + 204.749145, + 1, + 480.989902, + 206.343046, + 1, + 487.065598, + 206.402104, + 1, + 492.947114, + 204.866338, + 1, + 498.297161, + 201.969714, + 1, + 503.013751, + 198.11849, + 1, + 507.157957, + 193.652851, + 1, + 510.843251, + 188.799396, + 1, + 514.1297, + 183.667342, + 1, + 516.955071, + 178.26847800000004, + 1, + 519.2164700000002, + 172.61085, + 1, + 520.848493, + 166.740574, + 1, + 521.9502669999998, + 160.74653700000005, + 1, + 522.708073, + 154.698484, + 1, + 523.292433, + 148.630853, + 1, + 523.782152, + 142.554803, + 1, + 524.208992, + 136.47398700000002, + 1, + 524.604004, + 130.391006, + 1, + 448.863007, + 130.600006, + 1, + 454.970001, + 126.268997, + 1, + 460.80896, + 127.033981, + 1, + 466.639008, + 127.488991, + 1, + 471.871002, + 128.024002, + 1, + 471.839966, + 131.30699199999998, + 1, + 466.7300110000001, + 130.602005, + 1, + 460.683014, + 129.84198, + 1, + 455.03299, + 128.447983, + 1, + 484.471008, + 126.532997, + 1, + 491.312988, + 124.467003, + 1, + 497.66098, + 122.172989, + 1, + 504.834991, + 123.182007, + 1, + 510.877014, + 127.14801, + 1, + 504.89801, + 125.372993, + 1, + 497.656982, + 124.851997, + 1, + 491.562012, + 127.464005, + 1, + 484.731995, + 129.934998, + 1, + 478.60199000000006, + 137.615005, + 1, + 478.797445, + 144.648194, + 1, + 478.710781, + 151.672784, + 1, + 479.648263, + 158.477047, + 1, + 472.342987, + 162.587006, + 1, + 476.080474, + 163.443556, + 1, + 479.895523, + 163.767872, + 1, + 484.256557, + 162.987089, + 1, + 488.335907, + 161.220047, + 1, + 454.908997, + 139.330002, + 1, + 457.994713, + 139.25393799999998, + 1, + 461.081185, + 139.258667, + 1, + 465.0926280000001, + 138.585731, + 1, + 469.109314, + 137.906326, + 1, + 465.638515, + 140.893484, + 1, + 461.276336, + 142.038503, + 1, + 457.94384, + 141.053913, + 1, + 488.993011, + 136.03999299999998, + 1, + 492.80749, + 136.399268, + 1, + 496.59449000000006, + 136.98136499999998, + 1, + 500.786029, + 137.41671000000002, + 1, + 504.984009, + 137.048096, + 1, + 501.171214, + 139.364812, + 1, + 496.775512, + 139.941385, + 1, + 492.595083, + 138.593753, + 1, + 468.338989, + 177.639496, + 1, + 472.57608, + 175.232479, + 1, + 477.20692, + 173.776125, + 1, + 480.569762, + 173.825898, + 1, + 483.565578, + 174.109503, + 1, + 490.367366, + 175.431598, + 1, + 496.381042, + 178.96504199999995, + 1, + 491.802489, + 182.339998, + 1, + 486.685901, + 184.599911, + 1, + 481.033928, + 184.016885, + 1, + 476.397867, + 182.850118, + 1, + 472.60944400000005, + 179.86823, + 1, + 469.541992, + 177.462006, + 1, + 474.959022, + 175.385376, + 1, + 480.747386, + 175.48909799999996, + 1, + 488.63756500000005, + 175.736854, + 1, + 495.716522, + 179.02507, + 1, + 488.642736, + 177.62467, + 1, + 481.422413, + 177.62252, + 1, + 475.491142, + 176.514907, + 1, + 461.279777, + 140.890199, + 1, + 496.453474, + 137.763648, + 1 + ], + "num_keypoints": 98, + "bbox": [ + 432.3053162, + 113.7500775, + 100.68947760000003, + 101.074938 + ], + "iscrowd": 0, + "area": 10177.182705672392, + "category_id": 1, + "center": [ + 482.5, + 164.5 + ], + "scale": 0.425 + }, + { + "image_id": 12, + "id": 12, + "keypoints": [ + 737.310974, + 302.2290040000001, + 1, + 735.612565, + 307.91392, + 1, + 733.935549, + 313.605176, + 1, + 732.301247, + 319.308828, + 1, + 730.746049, + 325.034488, + 1, + 729.333061, + 330.796748, + 1, + 728.130165, + 336.606254, + 1, + 727.2343940000002, + 342.470465, + 1, + 726.769826, + 348.383693, + 1, + 726.8573719999998, + 354.313744, + 1, + 727.578398, + 360.199602, + 1, + 729.013058, + 365.95307, + 1, + 731.013678, + 371.537441, + 1, + 733.2071559999998, + 377.050279, + 1, + 735.47649, + 382.530816, + 1, + 738.714025, + 387.475552, + 1, + 743.5621620000002, + 390.822643, + 1, + 749.412371, + 392.269205, + 1, + 755.438439, + 391.936151, + 1, + 761.2417849999998, + 390.247929, + 1, + 766.732837, + 387.708216, + 1, + 772.008913, + 384.743038, + 1, + 777.11136, + 381.48808, + 1, + 782.0033440000002, + 377.925076, + 1, + 786.614896, + 374.007594, + 1, + 790.746727, + 369.589677, + 1, + 794.345917, + 364.72535, + 1, + 797.705108, + 359.69007, + 1, + 800.979223, + 354.59913, + 1, + 804.030756, + 349.372408, + 1, + 806.796596, + 343.988855, + 1, + 809.360701, + 338.505917, + 1, + 811.822571, + 332.976135, + 1, + 738.142029, + 316.583008, + 1, + 745.198975, + 314.119995, + 1, + 749.843933, + 315.911957, + 1, + 754.8779910000002, + 317.789001, + 1, + 759.728943, + 321.003967, + 1, + 758.924988, + 323.009979, + 1, + 753.684021, + 320.766998, + 1, + 748.650024, + 318.889984, + 1, + 743.77301, + 317.5, + 1, + 776.567993, + 325.3989870000001, + 1, + 783.789917, + 325.703003, + 1, + 791.1229860000002, + 326.806976, + 1, + 797.598999, + 328.432007, + 1, + 802.210022, + 335.786987, + 1, + 796.032959, + 331.798981, + 1, + 789.445007, + 330.45401, + 1, + 782.429016, + 328.828003, + 1, + 775.448975, + 328.189972, + 1, + 766.489014, + 330.141998, + 1, + 763.441048, + 338.395354, + 1, + 760.1896519999998, + 346.556714, + 1, + 758.378882, + 354.899379, + 1, + 749.651978, + 347.691986, + 1, + 752.802228, + 352.909886, + 1, + 757.095133, + 357.015939, + 1, + 762.194149, + 356.25881, + 1, + 767.192932, + 354.72403, + 1, + 743.380371, + 322.295288, + 1, + 746.923719, + 321.313264, + 1, + 750.553004, + 321.784633, + 1, + 754.640226, + 323.780582, + 1, + 756.981018, + 327.664001, + 1, + 752.689438, + 328.511655, + 1, + 748.3559, + 328.079052, + 1, + 744.9315429999998, + 326.014911, + 1, + 778.2459719999998, + 334.537994, + 1, + 782.672983, + 333.246396, + 1, + 787.060109, + 334.610516, + 1, + 790.163963, + 337.265647, + 1, + 792.42627, + 340.685699, + 1, + 788.630666, + 341.780179, + 1, + 784.70712, + 341.598866, + 1, + 780.419418, + 339.058276, + 1, + 740.483521, + 361.065002, + 1, + 746.374246, + 362.133178, + 1, + 751.741875, + 364.488928, + 1, + 753.4344530000002, + 365.103217, + 1, + 755.192267, + 365.240915, + 1, + 759.601523, + 366.89777, + 1, + 763.757446, + 369.269043, + 1, + 759.467306, + 371.294422, + 1, + 755.0135389999998, + 372.896933, + 1, + 750.305609, + 372.79702, + 1, + 745.439744, + 370.475123, + 1, + 742.098872, + 366.24297, + 1, + 742.159546, + 363.090027, + 1, + 747.630617, + 364.064427, + 1, + 752.565978, + 366.666498, + 1, + 757.357922, + 367.478878, + 1, + 761.918091, + 369.147156, + 1, + 756.790297, + 369.722393, + 1, + 751.666194, + 369.277424, + 1, + 746.561781, + 366.750798, + 1, + 749.141667, + 325.096875, + 1, + 785.415625, + 337.221875, + 1 + ], + "num_keypoints": 98, + "bbox": [ + 718.2645514999999, + 293.2249839000001, + 102.06329400000016, + 108.0482411999999 + ], + "iscrowd": 0, + "area": 11027.759407778518, + "category_id": 1, + "center": [ + 769.0, + 347.5 + ], + "scale": 0.455 + }, + { + "image_id": 12, + "id": 40, + "keypoints": [ + 744.762024, + 731.096985, + 1, + 742.708957, + 737.737215, + 1, + 740.7710030000002, + 744.411776, + 1, + 739.0626599999998, + 751.148374, + 1, + 737.733779, + 757.96915, + 1, + 736.981188, + 764.875717, + 1, + 737.0235700000002, + 771.821884, + 1, + 737.765315, + 778.7307400000002, + 1, + 738.86145, + 785.593963, + 1, + 740.013747, + 792.448173, + 1, + 741.1454200000002, + 799.305824, + 1, + 742.3103629999998, + 806.157846, + 1, + 743.6804400000002, + 812.971502, + 1, + 744.630958, + 819.850678, + 1, + 745.515035, + 826.73686, + 1, + 748.690323, + 832.821804, + 1, + 754.099426, + 837.1631169999998, + 1, + 760.77823, + 840.673624, + 1, + 768.147343, + 842.162887, + 1, + 775.328568, + 840.156231, + 1, + 781.549446, + 835.8637679999998, + 1, + 787.79765, + 831.6084860000002, + 1, + 794.115317, + 827.4566940000002, + 1, + 800.175629, + 822.943352, + 1, + 805.771167, + 817.8629549999998, + 1, + 811.103558, + 812.504673, + 1, + 816.124275, + 806.855559, + 1, + 820.577538, + 800.750585, + 1, + 824.5104719999998, + 794.29608, + 1, + 828.03107, + 787.6072519999999, + 1, + 831.192861, + 780.74112, + 1, + 834.09596, + 773.761204, + 1, + 836.867371, + 766.727722, + 1, + 747.40802, + 744.338989, + 1, + 756.8099980000002, + 739.810974, + 1, + 762.8229980000001, + 742.584961, + 1, + 769.116028, + 746.8430179999998, + 1, + 774.4959719999998, + 750.2109379999998, + 1, + 772.661011, + 755.225037, + 1, + 766.9570309999998, + 751.564026, + 1, + 760.413025, + 748.731018, + 1, + 754.565979, + 745.8809809999998, + 1, + 794.039978, + 759.955017, + 1, + 802.140991, + 759.838989, + 1, + 809.362976, + 760.9539179999998, + 1, + 817.004089, + 762.0819700000002, + 1, + 822.989014, + 770.3709719999998, + 1, + 814.904053, + 767.382935, + 1, + 807.603088, + 766.098022, + 1, + 800.3809809999998, + 764.984009, + 1, + 792.616028, + 763.8099980000002, + 1, + 781.869995, + 762.830994, + 1, + 777.671572, + 775.3052809999998, + 1, + 773.599147, + 787.815521, + 1, + 768.793789, + 799.963975, + 1, + 759.9530639999998, + 790.217224, + 1, + 763.438017, + 796.8758799999998, + 1, + 768.200237, + 802.5832889999998, + 1, + 776.714431, + 800.940712, + 1, + 784.7540280000002, + 796.731995, + 1, + 752.452454, + 752.677429, + 1, + 758.142965, + 751.832449, + 1, + 763.787095, + 752.7987400000002, + 1, + 768.450332, + 755.789755, + 1, + 771.7440190000002, + 760.278992, + 1, + 766.108723, + 761.570158, + 1, + 760.4538719999998, + 760.565587, + 1, + 755.866811, + 757.23883, + 1, + 791.400024, + 769.619995, + 1, + 797.455167, + 766.7197309999998, + 1, + 804.060133, + 768.1290280000002, + 1, + 808.641021, + 770.830526, + 1, + 812.1015620000002, + 774.896179, + 1, + 807.2036360000002, + 776.0263259999998, + 1, + 802.194302, + 776.233114, + 1, + 796.1303330000002, + 774.055774, + 1, + 756.312012, + 806.9689940000002, + 1, + 761.152525, + 807.042413, + 1, + 765.388771, + 809.286819, + 1, + 766.746996, + 810.379537, + 1, + 768.3692599999998, + 811.051278, + 1, + 774.090223, + 811.996037, + 1, + 779.304504, + 814.633972, + 1, + 774.153851, + 817.59002, + 1, + 768.453259, + 819.044276, + 1, + 762.763688, + 817.53634, + 1, + 759.5313259999998, + 814.798765, + 1, + 757.4994230000002, + 811.065074, + 1, + 758.089478, + 808.210449, + 1, + 762.1575849999998, + 809.557143, + 1, + 765.7118929999998, + 811.955629, + 1, + 771.596042, + 812.993758, + 1, + 777.41687, + 814.616699, + 1, + 770.648339, + 816.4749009999998, + 1, + 763.8826849999998, + 815.569504, + 1, + 760.502713, + 812.2854629999998, + 1, + 762.746584, + 755.108075, + 1, + 802.488199, + 770.511801, + 1 + ], + "num_keypoints": 98, + "bbox": [ + 726.9925697, + 719.9903948, + 119.86341960000004, + 48.00960520000001 + ], + "iscrowd": 0, + "area": 5754.595452917945, + "category_id": 1, + "center": [ + 786.5, + 787.0 + ], + "scale": 0.56 + }, + { + "image_id": 12, + "id": 1169, + "keypoints": [ + 473.170593, + 353.335999, + 1, + 472.454142, + 358.228909, + 1, + 471.788643, + 363.128975, + 1, + 471.219892, + 368.041068, + 1, + 470.94488600000005, + 372.975959, + 1, + 471.550405, + 377.8716, + 1, + 473.340887, + 382.473922, + 1, + 475.160443, + 387.069845, + 1, + 476.591016, + 391.802996, + 1, + 478.183709, + 396.482262, + 1, + 480.41786, + 400.887374, + 1, + 483.28217300000006, + 404.915875, + 1, + 485.94821, + 409.072889, + 1, + 487.708742, + 413.688483, + 1, + 490.510995, + 417.693684, + 1, + 494.524824, + 420.571949, + 1, + 498.905613, + 422.85572, + 1, + 504.011519, + 423.758231, + 1, + 508.98311500000005, + 422.289137, + 1, + 512.9631360000002, + 418.935051, + 1, + 516.483537, + 415.068495, + 1, + 520.4627019999998, + 411.682146, + 1, + 524.5683650000002, + 408.443837, + 1, + 528.2940480000002, + 404.779843, + 1, + 531.408005, + 400.585369, + 1, + 533.7782599999998, + 395.929292, + 1, + 535.604259, + 391.029695, + 1, + 537.2263849999998, + 386.057698, + 1, + 538.779161, + 381.063564, + 1, + 540.257309, + 376.04686, + 1, + 541.658462, + 371.008091, + 1, + 543.005638, + 365.954595, + 1, + 544.3259889999998, + 360.894012, + 1, + 476.626984, + 359.039978, + 1, + 481.548981, + 356.339966, + 1, + 485.91098, + 357.414032, + 1, + 489.883972, + 359.63501, + 1, + 494.381958, + 363.002014, + 1, + 494.093964, + 365.890961, + 1, + 489.495972, + 362.60498, + 1, + 485.306, + 360.726959, + 1, + 481.214996, + 359.679962, + 1, + 506.893005, + 361.631012, + 1, + 512.028931, + 360.1489870000001, + 1, + 518.090027, + 359.3940120000001, + 1, + 524.357971, + 359.295013, + 1, + 529.7819820000002, + 363.101013, + 1, + 523.89093, + 362.149994, + 1, + 517.776001, + 362.536987, + 1, + 511.72399900000005, + 363.200958, + 1, + 506.344971, + 365.294006, + 1, + 501.347992, + 367.559998, + 1, + 500.189242, + 376.773265, + 1, + 498.956651, + 385.973584, + 1, + 498.187578, + 395.168944, + 1, + 491.300049, + 391.111084, + 1, + 494.141522, + 394.808003, + 1, + 498.40839000000005, + 396.494433, + 1, + 502.755472, + 394.932757, + 1, + 506.338013, + 391.92099, + 1, + 478.184326, + 363.812805, + 1, + 482.723986, + 362.820035, + 1, + 487.304845, + 363.425472, + 1, + 490.813411, + 365.581369, + 1, + 493.446991, + 368.773987, + 1, + 489.161792, + 369.81081, + 1, + 484.775209, + 369.488891, + 1, + 480.564629, + 367.719005, + 1, + 507.460999, + 371.890991, + 1, + 511.778055, + 367.477709, + 1, + 517.9155969999998, + 366.674784, + 1, + 522.678173, + 367.724831, + 1, + 527.087158, + 369.840698, + 1, + 522.757581, + 373.347413, + 1, + 517.304023, + 374.436933, + 1, + 512.204885, + 373.876232, + 1, + 488.688477, + 399.221008, + 1, + 493.442718, + 400.217365, + 1, + 498.155821, + 401.379151, + 1, + 499.542691, + 401.500691, + 1, + 501.246774, + 401.519773, + 1, + 506.836522, + 401.478986, + 1, + 512.330994, + 402.490967, + 1, + 508.472018, + 405.699605, + 1, + 504.486124, + 408.714805, + 1, + 499.5658360000001, + 409.33235, + 1, + 494.571667, + 407.645284, + 1, + 490.657115, + 404.125086, + 1, + 489.64199800000006, + 400.294006, + 1, + 494.17166900000007, + 403.631745, + 1, + 499.518763, + 405.267957, + 1, + 505.461418, + 404.415617, + 1, + 511.249969, + 402.717072, + 1, + 505.450159, + 404.418683, + 1, + 499.495484, + 405.266308, + 1, + 494.161436, + 403.626137, + 1, + 486.570186, + 366.549068, + 1, + 517.171429, + 369.798758, + 1 + ], + "num_keypoints": 98, + "bbox": [ + 463.60677570000007, + 346.2937758, + 88.05732359999968, + 84.50667840000006 + ], + "iscrowd": 0, + "area": 7441.431926229908, + "category_id": 1, + "center": [ + 507.5, + 388.5 + ], + "scale": 0.375 + } + ] } \ No newline at end of file diff --git a/tests/data/zebra/test_zebra.json b/tests/data/zebra/test_zebra.json index 0e834c554b..3b505a4902 100644 --- a/tests/data/zebra/test_zebra.json +++ b/tests/data/zebra/test_zebra.json @@ -1,156 +1,156 @@ -{ - "categories": [ - { - "supercategory": "animal", - "id": 1, - "name": "zebra", - "keypoints": [ - "snout", - "head", - "neck", - "forelegL1", - "forelegR1", - "hindlegL1", - "hindlegR1", - "tailbase", - "tailtip" - ], - "skeleton": [ - [ - 2, - 1 - ], - [ - 3, - 2 - ], - [ - 4, - 3 - ], - [ - 5, - 3 - ], - [ - 6, - 8 - ], - [ - 7, - 8 - ], - [ - 8, - 3 - ], - [ - 9, - 8 - ] - ] - } - ], - "images": [ - { - "id": 810, - "file_name": "810.jpg", - "height": 160, - "width": 160 - }, - { - "id": 850, - "file_name": "850.jpg", - "height": 160, - "width": 160 - } - ], - "annotations": [ - { - "keypoints": [ - 121.13823384782104, - 64.42827920259212, - 2.0, - 117.9981442098391, - 70.81295036652858, - 2.0, - 101.74729396479975, - 80.0, - 2.0, - 95.57905809119656, - 75.75733930455307, - 2.0, - 95.2128993293075, - 82.7116929245571, - 2.0, - 62.78126573755127, - 75.8747890881429, - 2.0, - 62.31104503893799, - 83.86685797031176, - 2.0, - 58.25270603520024, - 80.0, - 2.0, - 53.31206457278393, - 85.52060239198866, - 2.0 - ], - "image_id": 810, - "id": 810, - "num_keypoints": 9, - "bbox": [ - 53.31206457278393, - 64.42827920259212, - 68.8261692750371, - 22.092323189396538 - ], - "iscrowd": 0, - "area": 1520.5299755122337, - "category_id": 1 - }, - { - "keypoints": [ - 122.31461535908949, - 89.25315845576364, - 2.0, - 117.81536523827128, - 87.97006030862022, - 2.0, - 101.66067429997881, - 80.0, - 2.0, - 97.88660503356242, - 74.70007144842482, - 2.0, - 96.6342743993913, - 81.95450979316085, - 2.0, - 62.9768902919959, - 75.51961961159495, - 2.0, - 63.64287080847072, - 83.46692756256179, - 2.0, - 58.3393257000212, - 80.0, - 2.0, - 55.41273077187657, - 77.94207820202976, - 2.0 - ], - "image_id": 850, - "id": 850, - "num_keypoints": 9, - "bbox": [ - 55.41273077187657, - 74.70007144842482, - 67.90188458721292, - 15.553087007338817 - ], - "iscrowd": 0, - "area": 1056.083918947201, - "category_id": 1 - } - ] +{ + "categories": [ + { + "supercategory": "animal", + "id": 1, + "name": "zebra", + "keypoints": [ + "snout", + "head", + "neck", + "forelegL1", + "forelegR1", + "hindlegL1", + "hindlegR1", + "tailbase", + "tailtip" + ], + "skeleton": [ + [ + 2, + 1 + ], + [ + 3, + 2 + ], + [ + 4, + 3 + ], + [ + 5, + 3 + ], + [ + 6, + 8 + ], + [ + 7, + 8 + ], + [ + 8, + 3 + ], + [ + 9, + 8 + ] + ] + } + ], + "images": [ + { + "id": 810, + "file_name": "810.jpg", + "height": 160, + "width": 160 + }, + { + "id": 850, + "file_name": "850.jpg", + "height": 160, + "width": 160 + } + ], + "annotations": [ + { + "keypoints": [ + 121.13823384782104, + 64.42827920259212, + 2.0, + 117.9981442098391, + 70.81295036652858, + 2.0, + 101.74729396479975, + 80.0, + 2.0, + 95.57905809119656, + 75.75733930455307, + 2.0, + 95.2128993293075, + 82.7116929245571, + 2.0, + 62.78126573755127, + 75.8747890881429, + 2.0, + 62.31104503893799, + 83.86685797031176, + 2.0, + 58.25270603520024, + 80.0, + 2.0, + 53.31206457278393, + 85.52060239198866, + 2.0 + ], + "image_id": 810, + "id": 810, + "num_keypoints": 9, + "bbox": [ + 53.31206457278393, + 64.42827920259212, + 68.8261692750371, + 22.092323189396538 + ], + "iscrowd": 0, + "area": 1520.5299755122337, + "category_id": 1 + }, + { + "keypoints": [ + 122.31461535908949, + 89.25315845576364, + 2.0, + 117.81536523827128, + 87.97006030862022, + 2.0, + 101.66067429997881, + 80.0, + 2.0, + 97.88660503356242, + 74.70007144842482, + 2.0, + 96.6342743993913, + 81.95450979316085, + 2.0, + 62.9768902919959, + 75.51961961159495, + 2.0, + 63.64287080847072, + 83.46692756256179, + 2.0, + 58.3393257000212, + 80.0, + 2.0, + 55.41273077187657, + 77.94207820202976, + 2.0 + ], + "image_id": 850, + "id": 850, + "num_keypoints": 9, + "bbox": [ + 55.41273077187657, + 74.70007144842482, + 67.90188458721292, + 15.553087007338817 + ], + "iscrowd": 0, + "area": 1056.083918947201, + "category_id": 1 + } + ] } \ No newline at end of file diff --git a/tests/test_apis/test_inference.py b/tests/test_apis/test_inference.py index c38c619a9e..ae0d57d1a3 100644 --- a/tests/test_apis/test_inference.py +++ b/tests/test_apis/test_inference.py @@ -1,117 +1,117 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os.path as osp -from pathlib import Path -from tempfile import TemporaryDirectory -from unittest import TestCase - -import numpy as np -import torch -from mmcv.image import imread, imwrite -from mmengine.utils import is_list_of -from parameterized import parameterized - -from mmpose.apis import inference_bottomup, inference_topdown, init_model -from mmpose.structures import PoseDataSample -from mmpose.testing._utils import _rand_bboxes, get_config_file, get_repo_dir -from mmpose.utils import register_all_modules - - -class TestInference(TestCase): - - def setUp(self) -> None: - register_all_modules() - - @parameterized.expand([(('configs/body_2d_keypoint/topdown_heatmap/coco/' - 'td-hm_hrnet-w32_8xb64-210e_coco-256x192.py'), - ('cpu', 'cuda'))]) - def test_init_model(self, config, devices): - config_file = get_config_file(config) - - for device in devices: - if device == 'cuda' and not torch.cuda.is_available(): - # Skip the test if cuda is required but unavailable - continue - - # test init_model with str path - _ = init_model(config_file, device=device) - - # test init_model with :obj:`Path` - _ = init_model(Path(config_file), device=device) - - # test init_detector with undesirable type - with self.assertRaisesRegex( - TypeError, 'config must be a filename or Config object'): - config_list = [config_file] - _ = init_model(config_list) - - @parameterized.expand([(('configs/body_2d_keypoint/topdown_heatmap/coco/' - 'td-hm_hrnet-w32_8xb64-210e_coco-256x192.py'), - ('cpu', 'cuda'))]) - def test_inference_topdown(self, config, devices): - project_dir = osp.abspath(osp.dirname(osp.dirname(__file__))) - project_dir = osp.join(project_dir, '..') - config_file = osp.join(project_dir, config) - - rng = np.random.RandomState(0) - img_w = img_h = 100 - img = rng.randint(0, 255, (img_h, img_w, 3), dtype=np.uint8) - bboxes = _rand_bboxes(rng, 2, img_w, img_h) - - for device in devices: - if device == 'cuda' and not torch.cuda.is_available(): - # Skip the test if cuda is required but unavailable - continue - model = init_model(config_file, device=device) - - # test inference with bboxes - results = inference_topdown(model, img, bboxes, bbox_format='xywh') - self.assertTrue(is_list_of(results, PoseDataSample)) - self.assertEqual(len(results), 2) - self.assertTrue(results[0].pred_instances.keypoints.shape, - (1, 17, 2)) - - # test inference without bbox - results = inference_topdown(model, img) - self.assertTrue(is_list_of(results, PoseDataSample)) - self.assertEqual(len(results), 1) - self.assertTrue(results[0].pred_instances.keypoints.shape, - (1, 17, 2)) - - # test inference from image file - with TemporaryDirectory() as tmp_dir: - img_path = osp.join(tmp_dir, 'img.jpg') - imwrite(img, img_path) - - results = inference_topdown(model, img_path) - self.assertTrue(is_list_of(results, PoseDataSample)) - self.assertEqual(len(results), 1) - self.assertTrue(results[0].pred_instances.keypoints.shape, - (1, 17, 2)) - - @parameterized.expand([(('configs/body_2d_keypoint/' - 'associative_embedding/coco/' - 'ae_hrnet-w32_8xb24-300e_coco-512x512.py'), - ('cpu', 'cuda'))]) - def test_inference_bottomup(self, config, devices): - config_file = get_config_file(config) - img = osp.join(get_repo_dir(), 'tests/data/coco/000000000785.jpg') - - for device in devices: - if device == 'cuda' and not torch.cuda.is_available(): - # Skip the test if cuda is required but unavailable - continue - model = init_model(config_file, device=device) - - # test inference from image - results = inference_bottomup(model, img=imread(img)) - self.assertTrue(is_list_of(results, PoseDataSample)) - self.assertEqual(len(results), 1) - self.assertTrue(results[0].pred_instances.keypoints.shape, - (1, 17, 2)) - - # test inference from file - results = inference_bottomup(model, img=img) - self.assertTrue(is_list_of(results, PoseDataSample)) - self.assertEqual(len(results), 1) - self.assertTrue(results[0].pred_instances.keypoints.shape, - (1, 17, 2)) +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +from pathlib import Path +from tempfile import TemporaryDirectory +from unittest import TestCase + +import numpy as np +import torch +from mmcv.image import imread, imwrite +from mmengine.utils import is_list_of +from parameterized import parameterized + +from mmpose.apis import inference_bottomup, inference_topdown, init_model +from mmpose.structures import PoseDataSample +from mmpose.testing._utils import _rand_bboxes, get_config_file, get_repo_dir +from mmpose.utils import register_all_modules + + +class TestInference(TestCase): + + def setUp(self) -> None: + register_all_modules() + + @parameterized.expand([(('configs/body_2d_keypoint/topdown_heatmap/coco/' + 'td-hm_hrnet-w32_8xb64-210e_coco-256x192.py'), + ('cpu', 'cuda'))]) + def test_init_model(self, config, devices): + config_file = get_config_file(config) + + for device in devices: + if device == 'cuda' and not torch.cuda.is_available(): + # Skip the test if cuda is required but unavailable + continue + + # test init_model with str path + _ = init_model(config_file, device=device) + + # test init_model with :obj:`Path` + _ = init_model(Path(config_file), device=device) + + # test init_detector with undesirable type + with self.assertRaisesRegex( + TypeError, 'config must be a filename or Config object'): + config_list = [config_file] + _ = init_model(config_list) + + @parameterized.expand([(('configs/body_2d_keypoint/topdown_heatmap/coco/' + 'td-hm_hrnet-w32_8xb64-210e_coco-256x192.py'), + ('cpu', 'cuda'))]) + def test_inference_topdown(self, config, devices): + project_dir = osp.abspath(osp.dirname(osp.dirname(__file__))) + project_dir = osp.join(project_dir, '..') + config_file = osp.join(project_dir, config) + + rng = np.random.RandomState(0) + img_w = img_h = 100 + img = rng.randint(0, 255, (img_h, img_w, 3), dtype=np.uint8) + bboxes = _rand_bboxes(rng, 2, img_w, img_h) + + for device in devices: + if device == 'cuda' and not torch.cuda.is_available(): + # Skip the test if cuda is required but unavailable + continue + model = init_model(config_file, device=device) + + # test inference with bboxes + results = inference_topdown(model, img, bboxes, bbox_format='xywh') + self.assertTrue(is_list_of(results, PoseDataSample)) + self.assertEqual(len(results), 2) + self.assertTrue(results[0].pred_instances.keypoints.shape, + (1, 17, 2)) + + # test inference without bbox + results = inference_topdown(model, img) + self.assertTrue(is_list_of(results, PoseDataSample)) + self.assertEqual(len(results), 1) + self.assertTrue(results[0].pred_instances.keypoints.shape, + (1, 17, 2)) + + # test inference from image file + with TemporaryDirectory() as tmp_dir: + img_path = osp.join(tmp_dir, 'img.jpg') + imwrite(img, img_path) + + results = inference_topdown(model, img_path) + self.assertTrue(is_list_of(results, PoseDataSample)) + self.assertEqual(len(results), 1) + self.assertTrue(results[0].pred_instances.keypoints.shape, + (1, 17, 2)) + + @parameterized.expand([(('configs/body_2d_keypoint/' + 'associative_embedding/coco/' + 'ae_hrnet-w32_8xb24-300e_coco-512x512.py'), + ('cpu', 'cuda'))]) + def test_inference_bottomup(self, config, devices): + config_file = get_config_file(config) + img = osp.join(get_repo_dir(), 'tests/data/coco/000000000785.jpg') + + for device in devices: + if device == 'cuda' and not torch.cuda.is_available(): + # Skip the test if cuda is required but unavailable + continue + model = init_model(config_file, device=device) + + # test inference from image + results = inference_bottomup(model, img=imread(img)) + self.assertTrue(is_list_of(results, PoseDataSample)) + self.assertEqual(len(results), 1) + self.assertTrue(results[0].pred_instances.keypoints.shape, + (1, 17, 2)) + + # test inference from file + results = inference_bottomup(model, img=img) + self.assertTrue(is_list_of(results, PoseDataSample)) + self.assertEqual(len(results), 1) + self.assertTrue(results[0].pred_instances.keypoints.shape, + (1, 17, 2)) diff --git a/tests/test_apis/test_inferencers/test_mmpose_inferencer.py b/tests/test_apis/test_inferencers/test_mmpose_inferencer.py index 8b8a4744b8..4ddfbc0a76 100644 --- a/tests/test_apis/test_inferencers/test_mmpose_inferencer.py +++ b/tests/test_apis/test_inferencers/test_mmpose_inferencer.py @@ -1,129 +1,129 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os -import os.path as osp -import platform -import unittest -from collections import defaultdict -from tempfile import TemporaryDirectory -from unittest import TestCase - -import mmcv - -from mmpose.apis.inferencers import MMPoseInferencer -from mmpose.structures import PoseDataSample -from mmpose.utils import register_all_modules - - -class TestMMPoseInferencer(TestCase): - - def tearDown(self) -> None: - register_all_modules(init_default_scope=True) - return super().tearDown() - - def test_pose2d_call(self): - try: - from mmdet.apis.det_inferencer import DetInferencer # noqa: F401 - except (ImportError, ModuleNotFoundError): - return unittest.skip('mmdet is not installed') - - # top-down model - if platform.system().lower() == 'windows': - # the default human pose estimator utilizes rtmdet-m detector - # through alias, which seems not compatible with windows - det_model = 'demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py' - det_weights = 'https://download.openmmlab.com/mmdetection/v2.0/' \ - 'faster_rcnn/faster_rcnn_r50_fpn_1x_coco/' \ - 'faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth' - else: - det_model, det_weights = None, None - inferencer = MMPoseInferencer( - 'human', det_model=det_model, det_weights=det_weights) - - img_path = 'tests/data/coco/000000197388.jpg' - img = mmcv.imread(img_path) - - # `inputs` is path to an image - inputs = img_path - results1 = next(inferencer(inputs, return_vis=True)) - self.assertIn('visualization', results1) - self.assertSequenceEqual(results1['visualization'][0].shape, img.shape) - self.assertIn('predictions', results1) - self.assertIn('keypoints', results1['predictions'][0][0]) - self.assertEqual(len(results1['predictions'][0][0]['keypoints']), 17) - - # `inputs` is an image array - inputs = img - results2 = next(inferencer(inputs)) - self.assertEqual( - len(results1['predictions'][0]), len(results2['predictions'][0])) - self.assertSequenceEqual(results1['predictions'][0][0]['keypoints'], - results2['predictions'][0][0]['keypoints']) - results2 = next(inferencer(inputs, return_datasample=True)) - self.assertIsInstance(results2['predictions'][0], PoseDataSample) - - # `inputs` is path to a directory - inputs = osp.dirname(img_path) - with TemporaryDirectory() as tmp_dir: - # only save visualizations - for res in inferencer(inputs, vis_out_dir=tmp_dir): - pass - self.assertEqual(len(os.listdir(tmp_dir)), 4) - # save both visualizations and predictions - results3 = defaultdict(list) - for res in inferencer(inputs, out_dir=tmp_dir): - for key in res: - results3[key].extend(res[key]) - self.assertEqual(len(os.listdir(f'{tmp_dir}/visualizations')), 4) - self.assertEqual(len(os.listdir(f'{tmp_dir}/predictions')), 4) - self.assertEqual(len(results3['predictions']), 4) - self.assertSequenceEqual(results1['predictions'][0][0]['keypoints'], - results3['predictions'][3][0]['keypoints']) - - # `inputs` is path to a video - inputs = 'tests/data/posetrack18/videos/000001_mpiinew_test/' \ - '000001_mpiinew_test.mp4' - with TemporaryDirectory() as tmp_dir: - results = defaultdict(list) - for res in inferencer(inputs, out_dir=tmp_dir): - for key in res: - results[key].extend(res[key]) - self.assertIn('000001_mpiinew_test.mp4', - os.listdir(f'{tmp_dir}/visualizations')) - self.assertIn('000001_mpiinew_test.json', - os.listdir(f'{tmp_dir}/predictions')) - self.assertTrue(inferencer._video_input) - self.assertIn(len(results['predictions']), (4, 5)) - - def test_pose3d_call(self): - try: - from mmdet.apis.det_inferencer import DetInferencer # noqa: F401 - except (ImportError, ModuleNotFoundError): - return unittest.skip('mmdet is not installed') - - # top-down model - if platform.system().lower() == 'windows': - # the default human pose estimator utilizes rtmdet-m detector - # through alias, which seems not compatible with windows - det_model = 'demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py' - det_weights = 'https://download.openmmlab.com/mmdetection/v2.0/' \ - 'faster_rcnn/faster_rcnn_r50_fpn_1x_coco/' \ - 'faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth' - else: - det_model, det_weights = None, None - inferencer = MMPoseInferencer( - pose3d='human3d', det_model=det_model, det_weights=det_weights) - - # `inputs` is path to a video - inputs = 'https://user-images.githubusercontent.com/87690686/' \ - '164970135-b14e424c-765a-4180-9bc8-fa8d6abc5510.mp4' - with TemporaryDirectory() as tmp_dir: - results = defaultdict(list) - for res in inferencer(inputs, out_dir=tmp_dir): - for key in res: - results[key].extend(res[key]) - self.assertIn('164970135-b14e424c-765a-4180-9bc8-fa8d6abc5510.mp4', - os.listdir(f'{tmp_dir}/visualizations')) - self.assertIn( - '164970135-b14e424c-765a-4180-9bc8-fa8d6abc5510.json', - os.listdir(f'{tmp_dir}/predictions')) - self.assertTrue(inferencer._video_input) +# Copyright (c) OpenMMLab. All rights reserved. +import os +import os.path as osp +import platform +import unittest +from collections import defaultdict +from tempfile import TemporaryDirectory +from unittest import TestCase + +import mmcv + +from mmpose.apis.inferencers import MMPoseInferencer +from mmpose.structures import PoseDataSample +from mmpose.utils import register_all_modules + + +class TestMMPoseInferencer(TestCase): + + def tearDown(self) -> None: + register_all_modules(init_default_scope=True) + return super().tearDown() + + def test_pose2d_call(self): + try: + from mmdet.apis.det_inferencer import DetInferencer # noqa: F401 + except (ImportError, ModuleNotFoundError): + return unittest.skip('mmdet is not installed') + + # top-down model + if platform.system().lower() == 'windows': + # the default human pose estimator utilizes rtmdet-m detector + # through alias, which seems not compatible with windows + det_model = 'demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py' + det_weights = 'https://download.openmmlab.com/mmdetection/v2.0/' \ + 'faster_rcnn/faster_rcnn_r50_fpn_1x_coco/' \ + 'faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth' + else: + det_model, det_weights = None, None + inferencer = MMPoseInferencer( + 'human', det_model=det_model, det_weights=det_weights) + + img_path = 'tests/data/coco/000000197388.jpg' + img = mmcv.imread(img_path) + + # `inputs` is path to an image + inputs = img_path + results1 = next(inferencer(inputs, return_vis=True)) + self.assertIn('visualization', results1) + self.assertSequenceEqual(results1['visualization'][0].shape, img.shape) + self.assertIn('predictions', results1) + self.assertIn('keypoints', results1['predictions'][0][0]) + self.assertEqual(len(results1['predictions'][0][0]['keypoints']), 17) + + # `inputs` is an image array + inputs = img + results2 = next(inferencer(inputs)) + self.assertEqual( + len(results1['predictions'][0]), len(results2['predictions'][0])) + self.assertSequenceEqual(results1['predictions'][0][0]['keypoints'], + results2['predictions'][0][0]['keypoints']) + results2 = next(inferencer(inputs, return_datasample=True)) + self.assertIsInstance(results2['predictions'][0], PoseDataSample) + + # `inputs` is path to a directory + inputs = osp.dirname(img_path) + with TemporaryDirectory() as tmp_dir: + # only save visualizations + for res in inferencer(inputs, vis_out_dir=tmp_dir): + pass + self.assertEqual(len(os.listdir(tmp_dir)), 4) + # save both visualizations and predictions + results3 = defaultdict(list) + for res in inferencer(inputs, out_dir=tmp_dir): + for key in res: + results3[key].extend(res[key]) + self.assertEqual(len(os.listdir(f'{tmp_dir}/visualizations')), 4) + self.assertEqual(len(os.listdir(f'{tmp_dir}/predictions')), 4) + self.assertEqual(len(results3['predictions']), 4) + self.assertSequenceEqual(results1['predictions'][0][0]['keypoints'], + results3['predictions'][3][0]['keypoints']) + + # `inputs` is path to a video + inputs = 'tests/data/posetrack18/videos/000001_mpiinew_test/' \ + '000001_mpiinew_test.mp4' + with TemporaryDirectory() as tmp_dir: + results = defaultdict(list) + for res in inferencer(inputs, out_dir=tmp_dir): + for key in res: + results[key].extend(res[key]) + self.assertIn('000001_mpiinew_test.mp4', + os.listdir(f'{tmp_dir}/visualizations')) + self.assertIn('000001_mpiinew_test.json', + os.listdir(f'{tmp_dir}/predictions')) + self.assertTrue(inferencer._video_input) + self.assertIn(len(results['predictions']), (4, 5)) + + def test_pose3d_call(self): + try: + from mmdet.apis.det_inferencer import DetInferencer # noqa: F401 + except (ImportError, ModuleNotFoundError): + return unittest.skip('mmdet is not installed') + + # top-down model + if platform.system().lower() == 'windows': + # the default human pose estimator utilizes rtmdet-m detector + # through alias, which seems not compatible with windows + det_model = 'demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py' + det_weights = 'https://download.openmmlab.com/mmdetection/v2.0/' \ + 'faster_rcnn/faster_rcnn_r50_fpn_1x_coco/' \ + 'faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth' + else: + det_model, det_weights = None, None + inferencer = MMPoseInferencer( + pose3d='human3d', det_model=det_model, det_weights=det_weights) + + # `inputs` is path to a video + inputs = 'https://user-images.githubusercontent.com/87690686/' \ + '164970135-b14e424c-765a-4180-9bc8-fa8d6abc5510.mp4' + with TemporaryDirectory() as tmp_dir: + results = defaultdict(list) + for res in inferencer(inputs, out_dir=tmp_dir): + for key in res: + results[key].extend(res[key]) + self.assertIn('164970135-b14e424c-765a-4180-9bc8-fa8d6abc5510.mp4', + os.listdir(f'{tmp_dir}/visualizations')) + self.assertIn( + '164970135-b14e424c-765a-4180-9bc8-fa8d6abc5510.json', + os.listdir(f'{tmp_dir}/predictions')) + self.assertTrue(inferencer._video_input) diff --git a/tests/test_apis/test_inferencers/test_pose2d_inferencer.py b/tests/test_apis/test_inferencers/test_pose2d_inferencer.py index b59232efac..2f602b5509 100644 --- a/tests/test_apis/test_inferencers/test_pose2d_inferencer.py +++ b/tests/test_apis/test_inferencers/test_pose2d_inferencer.py @@ -1,160 +1,160 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os -import os.path as osp -import platform -import unittest -from collections import defaultdict -from tempfile import TemporaryDirectory -from unittest import TestCase - -import mmcv -import torch -from mmengine.infer.infer import BaseInferencer - -from mmpose.apis.inferencers import Pose2DInferencer -from mmpose.structures import PoseDataSample -from mmpose.utils import register_all_modules - - -class TestPose2DInferencer(TestCase): - - def tearDown(self) -> None: - register_all_modules(init_default_scope=True) - return super().tearDown() - - def _get_det_model_weights(self): - if platform.system().lower() == 'windows': - # the default human/animal pose estimator utilizes rtmdet-m - # detector through alias, which seems not compatible with windows - det_model = 'demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py' - det_weights = 'https://download.openmmlab.com/mmdetection/v2.0/' \ - 'faster_rcnn/faster_rcnn_r50_fpn_1x_coco/' \ - 'faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth' - else: - det_model, det_weights = None, None - - return det_model, det_weights - - def test_init(self): - - try: - from mmdet.apis.det_inferencer import DetInferencer # noqa: F401 - except (ImportError, ModuleNotFoundError): - return unittest.skip('mmdet is not installed') - - det_model, det_weights = self._get_det_model_weights() - - # 1. init with config path and checkpoint - inferencer = Pose2DInferencer( - model='configs/body_2d_keypoint/simcc/coco/' - 'simcc_res50_8xb64-210e_coco-256x192.py', - weights='https://download.openmmlab.com/mmpose/' - 'v1/body_2d_keypoint/simcc/coco/' - 'simcc_res50_8xb64-210e_coco-256x192-8e0f5b59_20220919.pth', - det_model=det_model, - det_weights=det_weights, - det_cat_ids=0 if det_model else None) - self.assertIsInstance(inferencer.model, torch.nn.Module) - self.assertIsInstance(inferencer.detector, BaseInferencer) - self.assertSequenceEqual(inferencer.det_cat_ids, (0, )) - - # 2. init with config name - inferencer = Pose2DInferencer( - model='td-hm_res50_8xb32-210e_onehand10k-256x256', - det_model=det_model, - det_weights=det_weights, - det_cat_ids=0 if det_model else None) - self.assertIsInstance(inferencer.model, torch.nn.Module) - self.assertIsInstance(inferencer.detector, BaseInferencer) - self.assertSequenceEqual(inferencer.det_cat_ids, (0, )) - - # 3. init with alias - inferencer = Pose2DInferencer( - model='animal', - det_model=det_model, - det_weights=det_weights, - det_cat_ids=(15, 16, 17, 18, 19, 20, 21, 22, - 23) if det_model else None) - self.assertIsInstance(inferencer.model, torch.nn.Module) - self.assertIsInstance(inferencer.detector, BaseInferencer) - self.assertSequenceEqual(inferencer.det_cat_ids, - (15, 16, 17, 18, 19, 20, 21, 22, 23)) - - # 4. init with bottom-up model - inferencer = Pose2DInferencer( - model='configs/body_2d_keypoint/dekr/coco/' - 'dekr_hrnet-w32_8xb10-140e_coco-512x512.py', - weights='https://download.openmmlab.com/mmpose/v1/' - 'body_2d_keypoint/dekr/coco/' - 'dekr_hrnet-w32_8xb10-140e_coco-512x512_ac7c17bf-20221228.pth', - ) - self.assertIsInstance(inferencer.model, torch.nn.Module) - self.assertFalse(hasattr(inferencer, 'detector')) - - def test_call(self): - - try: - from mmdet.apis.det_inferencer import DetInferencer # noqa: F401 - except (ImportError, ModuleNotFoundError): - return unittest.skip('mmdet is not installed') - - # top-down model - det_model, det_weights = self._get_det_model_weights() - inferencer = Pose2DInferencer( - 'human', det_model=det_model, det_weights=det_weights) - - img_path = 'tests/data/coco/000000197388.jpg' - img = mmcv.imread(img_path) - - # `inputs` is path to an image - inputs = img_path - results1 = next(inferencer(inputs, return_vis=True)) - self.assertIn('visualization', results1) - self.assertSequenceEqual(results1['visualization'][0].shape, img.shape) - self.assertIn('predictions', results1) - self.assertIn('keypoints', results1['predictions'][0][0]) - self.assertEqual(len(results1['predictions'][0][0]['keypoints']), 17) - - # `inputs` is an image array - inputs = img - results2 = next(inferencer(inputs)) - self.assertEqual( - len(results1['predictions'][0]), len(results2['predictions'][0])) - self.assertSequenceEqual(results1['predictions'][0][0]['keypoints'], - results2['predictions'][0][0]['keypoints']) - results2 = next(inferencer(inputs, return_datasample=True)) - self.assertIsInstance(results2['predictions'][0], PoseDataSample) - - # `inputs` is path to a directory - inputs = osp.dirname(img_path) - - with TemporaryDirectory() as tmp_dir: - # only save visualizations - for res in inferencer(inputs, vis_out_dir=tmp_dir): - pass - self.assertEqual(len(os.listdir(tmp_dir)), 4) - # save both visualizations and predictions - results3 = defaultdict(list) - for res in inferencer(inputs, out_dir=tmp_dir): - for key in res: - results3[key].extend(res[key]) - self.assertEqual(len(os.listdir(f'{tmp_dir}/visualizations')), 4) - self.assertEqual(len(os.listdir(f'{tmp_dir}/predictions')), 4) - self.assertEqual(len(results3['predictions']), 4) - self.assertSequenceEqual(results1['predictions'][0][0]['keypoints'], - results3['predictions'][3][0]['keypoints']) - - # `inputs` is path to a video - inputs = 'tests/data/posetrack18/videos/000001_mpiinew_test/' \ - '000001_mpiinew_test.mp4' - with TemporaryDirectory() as tmp_dir: - results = defaultdict(list) - for res in inferencer(inputs, out_dir=tmp_dir): - for key in res: - results[key].extend(res[key]) - self.assertIn('000001_mpiinew_test.mp4', - os.listdir(f'{tmp_dir}/visualizations')) - self.assertIn('000001_mpiinew_test.json', - os.listdir(f'{tmp_dir}/predictions')) - self.assertTrue(inferencer._video_input) - self.assertIn(len(results['predictions']), (4, 5)) +# Copyright (c) OpenMMLab. All rights reserved. +import os +import os.path as osp +import platform +import unittest +from collections import defaultdict +from tempfile import TemporaryDirectory +from unittest import TestCase + +import mmcv +import torch +from mmengine.infer.infer import BaseInferencer + +from mmpose.apis.inferencers import Pose2DInferencer +from mmpose.structures import PoseDataSample +from mmpose.utils import register_all_modules + + +class TestPose2DInferencer(TestCase): + + def tearDown(self) -> None: + register_all_modules(init_default_scope=True) + return super().tearDown() + + def _get_det_model_weights(self): + if platform.system().lower() == 'windows': + # the default human/animal pose estimator utilizes rtmdet-m + # detector through alias, which seems not compatible with windows + det_model = 'demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py' + det_weights = 'https://download.openmmlab.com/mmdetection/v2.0/' \ + 'faster_rcnn/faster_rcnn_r50_fpn_1x_coco/' \ + 'faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth' + else: + det_model, det_weights = None, None + + return det_model, det_weights + + def test_init(self): + + try: + from mmdet.apis.det_inferencer import DetInferencer # noqa: F401 + except (ImportError, ModuleNotFoundError): + return unittest.skip('mmdet is not installed') + + det_model, det_weights = self._get_det_model_weights() + + # 1. init with config path and checkpoint + inferencer = Pose2DInferencer( + model='configs/body_2d_keypoint/simcc/coco/' + 'simcc_res50_8xb64-210e_coco-256x192.py', + weights='https://download.openmmlab.com/mmpose/' + 'v1/body_2d_keypoint/simcc/coco/' + 'simcc_res50_8xb64-210e_coco-256x192-8e0f5b59_20220919.pth', + det_model=det_model, + det_weights=det_weights, + det_cat_ids=0 if det_model else None) + self.assertIsInstance(inferencer.model, torch.nn.Module) + self.assertIsInstance(inferencer.detector, BaseInferencer) + self.assertSequenceEqual(inferencer.det_cat_ids, (0, )) + + # 2. init with config name + inferencer = Pose2DInferencer( + model='td-hm_res50_8xb32-210e_onehand10k-256x256', + det_model=det_model, + det_weights=det_weights, + det_cat_ids=0 if det_model else None) + self.assertIsInstance(inferencer.model, torch.nn.Module) + self.assertIsInstance(inferencer.detector, BaseInferencer) + self.assertSequenceEqual(inferencer.det_cat_ids, (0, )) + + # 3. init with alias + inferencer = Pose2DInferencer( + model='animal', + det_model=det_model, + det_weights=det_weights, + det_cat_ids=(15, 16, 17, 18, 19, 20, 21, 22, + 23) if det_model else None) + self.assertIsInstance(inferencer.model, torch.nn.Module) + self.assertIsInstance(inferencer.detector, BaseInferencer) + self.assertSequenceEqual(inferencer.det_cat_ids, + (15, 16, 17, 18, 19, 20, 21, 22, 23)) + + # 4. init with bottom-up model + inferencer = Pose2DInferencer( + model='configs/body_2d_keypoint/dekr/coco/' + 'dekr_hrnet-w32_8xb10-140e_coco-512x512.py', + weights='https://download.openmmlab.com/mmpose/v1/' + 'body_2d_keypoint/dekr/coco/' + 'dekr_hrnet-w32_8xb10-140e_coco-512x512_ac7c17bf-20221228.pth', + ) + self.assertIsInstance(inferencer.model, torch.nn.Module) + self.assertFalse(hasattr(inferencer, 'detector')) + + def test_call(self): + + try: + from mmdet.apis.det_inferencer import DetInferencer # noqa: F401 + except (ImportError, ModuleNotFoundError): + return unittest.skip('mmdet is not installed') + + # top-down model + det_model, det_weights = self._get_det_model_weights() + inferencer = Pose2DInferencer( + 'human', det_model=det_model, det_weights=det_weights) + + img_path = 'tests/data/coco/000000197388.jpg' + img = mmcv.imread(img_path) + + # `inputs` is path to an image + inputs = img_path + results1 = next(inferencer(inputs, return_vis=True)) + self.assertIn('visualization', results1) + self.assertSequenceEqual(results1['visualization'][0].shape, img.shape) + self.assertIn('predictions', results1) + self.assertIn('keypoints', results1['predictions'][0][0]) + self.assertEqual(len(results1['predictions'][0][0]['keypoints']), 17) + + # `inputs` is an image array + inputs = img + results2 = next(inferencer(inputs)) + self.assertEqual( + len(results1['predictions'][0]), len(results2['predictions'][0])) + self.assertSequenceEqual(results1['predictions'][0][0]['keypoints'], + results2['predictions'][0][0]['keypoints']) + results2 = next(inferencer(inputs, return_datasample=True)) + self.assertIsInstance(results2['predictions'][0], PoseDataSample) + + # `inputs` is path to a directory + inputs = osp.dirname(img_path) + + with TemporaryDirectory() as tmp_dir: + # only save visualizations + for res in inferencer(inputs, vis_out_dir=tmp_dir): + pass + self.assertEqual(len(os.listdir(tmp_dir)), 4) + # save both visualizations and predictions + results3 = defaultdict(list) + for res in inferencer(inputs, out_dir=tmp_dir): + for key in res: + results3[key].extend(res[key]) + self.assertEqual(len(os.listdir(f'{tmp_dir}/visualizations')), 4) + self.assertEqual(len(os.listdir(f'{tmp_dir}/predictions')), 4) + self.assertEqual(len(results3['predictions']), 4) + self.assertSequenceEqual(results1['predictions'][0][0]['keypoints'], + results3['predictions'][3][0]['keypoints']) + + # `inputs` is path to a video + inputs = 'tests/data/posetrack18/videos/000001_mpiinew_test/' \ + '000001_mpiinew_test.mp4' + with TemporaryDirectory() as tmp_dir: + results = defaultdict(list) + for res in inferencer(inputs, out_dir=tmp_dir): + for key in res: + results[key].extend(res[key]) + self.assertIn('000001_mpiinew_test.mp4', + os.listdir(f'{tmp_dir}/visualizations')) + self.assertIn('000001_mpiinew_test.json', + os.listdir(f'{tmp_dir}/predictions')) + self.assertTrue(inferencer._video_input) + self.assertIn(len(results['predictions']), (4, 5)) diff --git a/tests/test_apis/test_inferencers/test_pose3d_inferencer.py b/tests/test_apis/test_inferencers/test_pose3d_inferencer.py index da4a34b160..97a8524514 100644 --- a/tests/test_apis/test_inferencers/test_pose3d_inferencer.py +++ b/tests/test_apis/test_inferencers/test_pose3d_inferencer.py @@ -1,152 +1,152 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os -import os.path as osp -import platform -import unittest -from collections import defaultdict -from tempfile import TemporaryDirectory -from unittest import TestCase - -import mmcv -import torch - -from mmpose.apis.inferencers import Pose2DInferencer, Pose3DInferencer -from mmpose.structures import PoseDataSample -from mmpose.utils import register_all_modules - - -class TestPose3DInferencer(TestCase): - - def tearDown(self) -> None: - register_all_modules(init_default_scope=True) - return super().tearDown() - - def _get_det_model_weights(self): - if platform.system().lower() == 'windows': - # the default human/animal pose estimator utilizes rtmdet-m - # detector through alias, which seems not compatible with windows - det_model = 'demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py' - det_weights = 'https://download.openmmlab.com/mmdetection/v2.0/' \ - 'faster_rcnn/faster_rcnn_r50_fpn_1x_coco/' \ - 'faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth' - else: - det_model, det_weights = None, None - - return det_model, det_weights - - def test_init(self): - - try: - from mmdet.apis.det_inferencer import DetInferencer # noqa: F401 - except (ImportError, ModuleNotFoundError): - return unittest.skip('mmdet is not installed') - - det_model, det_weights = self._get_det_model_weights() - - # 1. init with config path and checkpoint - inferencer = Pose3DInferencer( - model= # noqa - 'configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-243frm-supv-cpn-ft_8xb128-200e_h36m.py', # noqa - weights= # noqa - 'https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_243frames_fullconv_supervised_cpn_ft-88f5abbb_20210527.pth', # noqa - pose2d_model='configs/body_2d_keypoint/simcc/coco/' - 'simcc_res50_8xb64-210e_coco-256x192.py', - pose2d_weights='https://download.openmmlab.com/mmpose/' - 'v1/body_2d_keypoint/simcc/coco/' - 'simcc_res50_8xb64-210e_coco-256x192-8e0f5b59_20220919.pth', - det_model=det_model, - det_weights=det_weights, - det_cat_ids=0 if det_model else None) - self.assertIsInstance(inferencer.model, torch.nn.Module) - self.assertIsInstance(inferencer.pose2d_model, Pose2DInferencer) - - # 2. init with config name - inferencer = Pose3DInferencer( - model='configs/body_3d_keypoint/pose_lift/h36m/pose-lift_' - 'videopose3d-243frm-supv-cpn-ft_8xb128-200e_h36m.py', - pose2d_model='configs/body_2d_keypoint/simcc/coco/' - 'simcc_res50_8xb64-210e_coco-256x192.py', - det_model=det_model, - det_weights=det_weights, - det_cat_ids=0 if det_model else None) - self.assertIsInstance(inferencer.model, torch.nn.Module) - self.assertIsInstance(inferencer.pose2d_model, Pose2DInferencer) - - # 3. init with alias - inferencer = Pose3DInferencer( - model='human3d', - det_model=det_model, - det_weights=det_weights, - det_cat_ids=0 if det_model else None) - self.assertIsInstance(inferencer.model, torch.nn.Module) - self.assertIsInstance(inferencer.pose2d_model, Pose2DInferencer) - - def test_call(self): - - try: - from mmdet.apis.det_inferencer import DetInferencer # noqa: F401 - except (ImportError, ModuleNotFoundError): - return unittest.skip('mmdet is not installed') - - # top-down model - det_model, det_weights = self._get_det_model_weights() - inferencer = Pose3DInferencer( - model='human3d', - det_model=det_model, - det_weights=det_weights, - det_cat_ids=0 if det_model else None) - - img_path = 'tests/data/coco/000000197388.jpg' - img = mmcv.imread(img_path) - - # `inputs` is path to an image - inputs = img_path - results1 = next(inferencer(inputs, return_vis=True)) - self.assertIn('visualization', results1) - self.assertIn('predictions', results1) - self.assertIn('keypoints', results1['predictions'][0][0]) - self.assertEqual(len(results1['predictions'][0][0]['keypoints']), 17) - - # `inputs` is an image array - inputs = img - results2 = next(inferencer(inputs)) - self.assertEqual( - len(results1['predictions'][0]), len(results2['predictions'][0])) - self.assertSequenceEqual(results1['predictions'][0][0]['keypoints'], - results2['predictions'][0][0]['keypoints']) - results2 = next(inferencer(inputs, return_datasample=True)) - self.assertIsInstance(results2['predictions'][0], PoseDataSample) - - # `inputs` is path to a directory - inputs = osp.dirname(img_path) - - with TemporaryDirectory() as tmp_dir: - # only save visualizations - for res in inferencer(inputs, vis_out_dir=tmp_dir): - pass - self.assertEqual(len(os.listdir(tmp_dir)), 4) - # save both visualizations and predictions - results3 = defaultdict(list) - for res in inferencer(inputs, out_dir=tmp_dir): - for key in res: - results3[key].extend(res[key]) - self.assertEqual(len(os.listdir(f'{tmp_dir}/visualizations')), 4) - self.assertEqual(len(os.listdir(f'{tmp_dir}/predictions')), 4) - self.assertEqual(len(results3['predictions']), 4) - self.assertSequenceEqual(results1['predictions'][0][0]['keypoints'], - results3['predictions'][3][0]['keypoints']) - - # `inputs` is path to a video - inputs = 'https://user-images.githubusercontent.com/87690686/' \ - '164970135-b14e424c-765a-4180-9bc8-fa8d6abc5510.mp4' - with TemporaryDirectory() as tmp_dir: - results = defaultdict(list) - for res in inferencer(inputs, out_dir=tmp_dir): - for key in res: - results[key].extend(res[key]) - self.assertIn('164970135-b14e424c-765a-4180-9bc8-fa8d6abc5510.mp4', - os.listdir(f'{tmp_dir}/visualizations')) - self.assertIn( - '164970135-b14e424c-765a-4180-9bc8-fa8d6abc5510.json', - os.listdir(f'{tmp_dir}/predictions')) - self.assertTrue(inferencer._video_input) +# Copyright (c) OpenMMLab. All rights reserved. +import os +import os.path as osp +import platform +import unittest +from collections import defaultdict +from tempfile import TemporaryDirectory +from unittest import TestCase + +import mmcv +import torch + +from mmpose.apis.inferencers import Pose2DInferencer, Pose3DInferencer +from mmpose.structures import PoseDataSample +from mmpose.utils import register_all_modules + + +class TestPose3DInferencer(TestCase): + + def tearDown(self) -> None: + register_all_modules(init_default_scope=True) + return super().tearDown() + + def _get_det_model_weights(self): + if platform.system().lower() == 'windows': + # the default human/animal pose estimator utilizes rtmdet-m + # detector through alias, which seems not compatible with windows + det_model = 'demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py' + det_weights = 'https://download.openmmlab.com/mmdetection/v2.0/' \ + 'faster_rcnn/faster_rcnn_r50_fpn_1x_coco/' \ + 'faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth' + else: + det_model, det_weights = None, None + + return det_model, det_weights + + def test_init(self): + + try: + from mmdet.apis.det_inferencer import DetInferencer # noqa: F401 + except (ImportError, ModuleNotFoundError): + return unittest.skip('mmdet is not installed') + + det_model, det_weights = self._get_det_model_weights() + + # 1. init with config path and checkpoint + inferencer = Pose3DInferencer( + model= # noqa + 'configs/body_3d_keypoint/pose_lift/h36m/pose-lift_videopose3d-243frm-supv-cpn-ft_8xb128-200e_h36m.py', # noqa + weights= # noqa + 'https://download.openmmlab.com/mmpose/body3d/videopose/videopose_h36m_243frames_fullconv_supervised_cpn_ft-88f5abbb_20210527.pth', # noqa + pose2d_model='configs/body_2d_keypoint/simcc/coco/' + 'simcc_res50_8xb64-210e_coco-256x192.py', + pose2d_weights='https://download.openmmlab.com/mmpose/' + 'v1/body_2d_keypoint/simcc/coco/' + 'simcc_res50_8xb64-210e_coco-256x192-8e0f5b59_20220919.pth', + det_model=det_model, + det_weights=det_weights, + det_cat_ids=0 if det_model else None) + self.assertIsInstance(inferencer.model, torch.nn.Module) + self.assertIsInstance(inferencer.pose2d_model, Pose2DInferencer) + + # 2. init with config name + inferencer = Pose3DInferencer( + model='configs/body_3d_keypoint/pose_lift/h36m/pose-lift_' + 'videopose3d-243frm-supv-cpn-ft_8xb128-200e_h36m.py', + pose2d_model='configs/body_2d_keypoint/simcc/coco/' + 'simcc_res50_8xb64-210e_coco-256x192.py', + det_model=det_model, + det_weights=det_weights, + det_cat_ids=0 if det_model else None) + self.assertIsInstance(inferencer.model, torch.nn.Module) + self.assertIsInstance(inferencer.pose2d_model, Pose2DInferencer) + + # 3. init with alias + inferencer = Pose3DInferencer( + model='human3d', + det_model=det_model, + det_weights=det_weights, + det_cat_ids=0 if det_model else None) + self.assertIsInstance(inferencer.model, torch.nn.Module) + self.assertIsInstance(inferencer.pose2d_model, Pose2DInferencer) + + def test_call(self): + + try: + from mmdet.apis.det_inferencer import DetInferencer # noqa: F401 + except (ImportError, ModuleNotFoundError): + return unittest.skip('mmdet is not installed') + + # top-down model + det_model, det_weights = self._get_det_model_weights() + inferencer = Pose3DInferencer( + model='human3d', + det_model=det_model, + det_weights=det_weights, + det_cat_ids=0 if det_model else None) + + img_path = 'tests/data/coco/000000197388.jpg' + img = mmcv.imread(img_path) + + # `inputs` is path to an image + inputs = img_path + results1 = next(inferencer(inputs, return_vis=True)) + self.assertIn('visualization', results1) + self.assertIn('predictions', results1) + self.assertIn('keypoints', results1['predictions'][0][0]) + self.assertEqual(len(results1['predictions'][0][0]['keypoints']), 17) + + # `inputs` is an image array + inputs = img + results2 = next(inferencer(inputs)) + self.assertEqual( + len(results1['predictions'][0]), len(results2['predictions'][0])) + self.assertSequenceEqual(results1['predictions'][0][0]['keypoints'], + results2['predictions'][0][0]['keypoints']) + results2 = next(inferencer(inputs, return_datasample=True)) + self.assertIsInstance(results2['predictions'][0], PoseDataSample) + + # `inputs` is path to a directory + inputs = osp.dirname(img_path) + + with TemporaryDirectory() as tmp_dir: + # only save visualizations + for res in inferencer(inputs, vis_out_dir=tmp_dir): + pass + self.assertEqual(len(os.listdir(tmp_dir)), 4) + # save both visualizations and predictions + results3 = defaultdict(list) + for res in inferencer(inputs, out_dir=tmp_dir): + for key in res: + results3[key].extend(res[key]) + self.assertEqual(len(os.listdir(f'{tmp_dir}/visualizations')), 4) + self.assertEqual(len(os.listdir(f'{tmp_dir}/predictions')), 4) + self.assertEqual(len(results3['predictions']), 4) + self.assertSequenceEqual(results1['predictions'][0][0]['keypoints'], + results3['predictions'][3][0]['keypoints']) + + # `inputs` is path to a video + inputs = 'https://user-images.githubusercontent.com/87690686/' \ + '164970135-b14e424c-765a-4180-9bc8-fa8d6abc5510.mp4' + with TemporaryDirectory() as tmp_dir: + results = defaultdict(list) + for res in inferencer(inputs, out_dir=tmp_dir): + for key in res: + results[key].extend(res[key]) + self.assertIn('164970135-b14e424c-765a-4180-9bc8-fa8d6abc5510.mp4', + os.listdir(f'{tmp_dir}/visualizations')) + self.assertIn( + '164970135-b14e424c-765a-4180-9bc8-fa8d6abc5510.json', + os.listdir(f'{tmp_dir}/predictions')) + self.assertTrue(inferencer._video_input) diff --git a/tests/test_codecs/test_associative_embedding.py b/tests/test_codecs/test_associative_embedding.py index 983fc93fb1..95d00f213d 100644 --- a/tests/test_codecs/test_associative_embedding.py +++ b/tests/test_codecs/test_associative_embedding.py @@ -1,242 +1,242 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from itertools import product -from unittest import TestCase - -import numpy as np -import torch -from munkres import Munkres - -from mmpose.codecs import AssociativeEmbedding -from mmpose.registry import KEYPOINT_CODECS -from mmpose.testing import get_coco_sample - - -class TestAssociativeEmbedding(TestCase): - - def setUp(self) -> None: - self.decode_keypoint_order = [ - 0, 1, 2, 3, 4, 5, 6, 11, 12, 7, 8, 9, 10, 13, 14, 15, 16 - ] - - def test_build(self): - cfg = dict( - type='AssociativeEmbedding', - input_size=(256, 256), - heatmap_size=(64, 64), - use_udp=False, - decode_keypoint_order=self.decode_keypoint_order, - ) - codec = KEYPOINT_CODECS.build(cfg) - self.assertIsInstance(codec, AssociativeEmbedding) - - def test_encode(self): - data = get_coco_sample(img_shape=(256, 256), num_instances=1) - - # w/o UDP - codec = AssociativeEmbedding( - input_size=(256, 256), - heatmap_size=(64, 64), - use_udp=False, - decode_keypoint_order=self.decode_keypoint_order) - - encoded = codec.encode(data['keypoints'], data['keypoints_visible']) - - heatmaps = encoded['heatmaps'] - keypoint_indices = encoded['keypoint_indices'] - keypoint_weights = encoded['keypoint_weights'] - - self.assertEqual(heatmaps.shape, (17, 64, 64)) - self.assertEqual(keypoint_indices.shape, (1, 17, 2)) - self.assertEqual(keypoint_weights.shape, (1, 17)) - - for k in range(heatmaps.shape[0]): - index_expected = np.argmax(heatmaps[k]) - index_encoded = keypoint_indices[0, k, 0] - self.assertEqual(index_expected, index_encoded) - - # w/ UDP - codec = AssociativeEmbedding( - input_size=(256, 256), - heatmap_size=(64, 64), - use_udp=True, - decode_keypoint_order=self.decode_keypoint_order) - - encoded = codec.encode(data['keypoints'], data['keypoints_visible']) - - heatmaps = encoded['heatmaps'] - keypoint_indices = encoded['keypoint_indices'] - keypoint_weights = encoded['keypoint_weights'] - - self.assertEqual(heatmaps.shape, (17, 64, 64)) - self.assertEqual(keypoint_indices.shape, (1, 17, 2)) - self.assertEqual(keypoint_weights.shape, (1, 17)) - - for k in range(heatmaps.shape[0]): - index_expected = np.argmax(heatmaps[k]) - index_encoded = keypoint_indices[0, k, 0] - self.assertEqual(index_expected, index_encoded) - - def _get_tags(self, - heatmaps, - keypoint_indices, - tag_per_keypoint: bool, - tag_dim: int = 1): - - K, H, W = heatmaps.shape - N = keypoint_indices.shape[0] - - if tag_per_keypoint: - tags = np.zeros((K * tag_dim, H, W), dtype=np.float32) - else: - tags = np.zeros((tag_dim, H, W), dtype=np.float32) - - for n, k in product(range(N), range(K)): - y, x = np.unravel_index(keypoint_indices[n, k, 0], (H, W)) - if tag_per_keypoint: - tags[k::K, y, x] = n - else: - tags[:, y, x] = n - - return tags - - def _sort_preds(self, keypoints_pred, scores_pred, keypoints_gt): - """Sort multi-instance predictions to best match the ground-truth. - - Args: - keypoints_pred (np.ndarray): predictions in shape (N, K, D) - scores (np.ndarray): predictions in shape (N, K) - keypoints_gt (np.ndarray): ground-truth in shape (N, K, D) - - Returns: - np.ndarray: Sorted predictions - """ - assert keypoints_gt.shape == keypoints_pred.shape - costs = np.linalg.norm( - keypoints_gt[None] - keypoints_pred[:, None], ord=2, - axis=3).mean(axis=2) - match = Munkres().compute(costs) - keypoints_pred_sorted = np.zeros_like(keypoints_pred) - scores_pred_sorted = np.zeros_like(scores_pred) - for i, j in match: - keypoints_pred_sorted[i] = keypoints_pred[j] - scores_pred_sorted[i] = scores_pred[j] - - return keypoints_pred_sorted, scores_pred_sorted - - def test_decode(self): - data = get_coco_sample( - img_shape=(256, 256), num_instances=2, non_occlusion=True) - - # w/o UDP - codec = AssociativeEmbedding( - input_size=(256, 256), - heatmap_size=(64, 64), - use_udp=False, - decode_keypoint_order=self.decode_keypoint_order) - - encoded = codec.encode(data['keypoints'], data['keypoints_visible']) - - heatmaps = encoded['heatmaps'] - keypoint_indices = encoded['keypoint_indices'] - - tags = self._get_tags( - heatmaps, keypoint_indices, tag_per_keypoint=True) - - # to Tensor - batch_heatmaps = torch.from_numpy(heatmaps[None]) - batch_tags = torch.from_numpy(tags[None]) - - batch_keypoints, batch_keypoint_scores = codec.batch_decode( - batch_heatmaps, batch_tags) - - self.assertIsInstance(batch_keypoints, list) - self.assertIsInstance(batch_keypoint_scores, list) - self.assertEqual(len(batch_keypoints), 1) - self.assertEqual(len(batch_keypoint_scores), 1) - - keypoints, scores = self._sort_preds(batch_keypoints[0], - batch_keypoint_scores[0], - data['keypoints']) - - self.assertIsInstance(keypoints, np.ndarray) - self.assertIsInstance(scores, np.ndarray) - self.assertEqual(keypoints.shape, (2, 17, 2)) - self.assertEqual(scores.shape, (2, 17)) - - self.assertTrue(np.allclose(keypoints, data['keypoints'], atol=4.0)) - - # w/o UDP, tag_imd=2 - codec = AssociativeEmbedding( - input_size=(256, 256), - heatmap_size=(64, 64), - use_udp=False, - decode_keypoint_order=self.decode_keypoint_order) - - encoded = codec.encode(data['keypoints'], data['keypoints_visible']) - - heatmaps = encoded['heatmaps'] - keypoint_indices = encoded['keypoint_indices'] - - tags = self._get_tags( - heatmaps, keypoint_indices, tag_per_keypoint=True, tag_dim=2) - - # to Tensor - batch_heatmaps = torch.from_numpy(heatmaps[None]) - batch_tags = torch.from_numpy(tags[None]) - - batch_keypoints, batch_keypoint_scores = codec.batch_decode( - batch_heatmaps, batch_tags) - - self.assertIsInstance(batch_keypoints, list) - self.assertIsInstance(batch_keypoint_scores, list) - self.assertEqual(len(batch_keypoints), 1) - self.assertEqual(len(batch_keypoint_scores), 1) - - keypoints, scores = self._sort_preds(batch_keypoints[0], - batch_keypoint_scores[0], - data['keypoints']) - - self.assertIsInstance(keypoints, np.ndarray) - self.assertIsInstance(scores, np.ndarray) - self.assertEqual(keypoints.shape, (2, 17, 2)) - self.assertEqual(scores.shape, (2, 17)) - - self.assertTrue(np.allclose(keypoints, data['keypoints'], atol=4.0)) - - # w/ UDP - codec = AssociativeEmbedding( - input_size=(256, 256), - heatmap_size=(64, 64), - use_udp=True, - decode_keypoint_order=self.decode_keypoint_order) - - encoded = codec.encode(data['keypoints'], data['keypoints_visible']) - - heatmaps = encoded['heatmaps'] - keypoint_indices = encoded['keypoint_indices'] - - tags = self._get_tags( - heatmaps, keypoint_indices, tag_per_keypoint=True) - - # to Tensor - batch_heatmaps = torch.from_numpy(heatmaps[None]) - batch_tags = torch.from_numpy(tags[None]) - - batch_keypoints, batch_keypoint_scores = codec.batch_decode( - batch_heatmaps, batch_tags) - - self.assertIsInstance(batch_keypoints, list) - self.assertIsInstance(batch_keypoint_scores, list) - self.assertEqual(len(batch_keypoints), 1) - self.assertEqual(len(batch_keypoint_scores), 1) - - keypoints, scores = self._sort_preds(batch_keypoints[0], - batch_keypoint_scores[0], - data['keypoints']) - - self.assertIsInstance(keypoints, np.ndarray) - self.assertIsInstance(scores, np.ndarray) - self.assertEqual(keypoints.shape, (2, 17, 2)) - self.assertEqual(scores.shape, (2, 17)) - - self.assertTrue(np.allclose(keypoints, data['keypoints'], atol=4.0)) +# Copyright (c) OpenMMLab. All rights reserved. +from itertools import product +from unittest import TestCase + +import numpy as np +import torch +from munkres import Munkres + +from mmpose.codecs import AssociativeEmbedding +from mmpose.registry import KEYPOINT_CODECS +from mmpose.testing import get_coco_sample + + +class TestAssociativeEmbedding(TestCase): + + def setUp(self) -> None: + self.decode_keypoint_order = [ + 0, 1, 2, 3, 4, 5, 6, 11, 12, 7, 8, 9, 10, 13, 14, 15, 16 + ] + + def test_build(self): + cfg = dict( + type='AssociativeEmbedding', + input_size=(256, 256), + heatmap_size=(64, 64), + use_udp=False, + decode_keypoint_order=self.decode_keypoint_order, + ) + codec = KEYPOINT_CODECS.build(cfg) + self.assertIsInstance(codec, AssociativeEmbedding) + + def test_encode(self): + data = get_coco_sample(img_shape=(256, 256), num_instances=1) + + # w/o UDP + codec = AssociativeEmbedding( + input_size=(256, 256), + heatmap_size=(64, 64), + use_udp=False, + decode_keypoint_order=self.decode_keypoint_order) + + encoded = codec.encode(data['keypoints'], data['keypoints_visible']) + + heatmaps = encoded['heatmaps'] + keypoint_indices = encoded['keypoint_indices'] + keypoint_weights = encoded['keypoint_weights'] + + self.assertEqual(heatmaps.shape, (17, 64, 64)) + self.assertEqual(keypoint_indices.shape, (1, 17, 2)) + self.assertEqual(keypoint_weights.shape, (1, 17)) + + for k in range(heatmaps.shape[0]): + index_expected = np.argmax(heatmaps[k]) + index_encoded = keypoint_indices[0, k, 0] + self.assertEqual(index_expected, index_encoded) + + # w/ UDP + codec = AssociativeEmbedding( + input_size=(256, 256), + heatmap_size=(64, 64), + use_udp=True, + decode_keypoint_order=self.decode_keypoint_order) + + encoded = codec.encode(data['keypoints'], data['keypoints_visible']) + + heatmaps = encoded['heatmaps'] + keypoint_indices = encoded['keypoint_indices'] + keypoint_weights = encoded['keypoint_weights'] + + self.assertEqual(heatmaps.shape, (17, 64, 64)) + self.assertEqual(keypoint_indices.shape, (1, 17, 2)) + self.assertEqual(keypoint_weights.shape, (1, 17)) + + for k in range(heatmaps.shape[0]): + index_expected = np.argmax(heatmaps[k]) + index_encoded = keypoint_indices[0, k, 0] + self.assertEqual(index_expected, index_encoded) + + def _get_tags(self, + heatmaps, + keypoint_indices, + tag_per_keypoint: bool, + tag_dim: int = 1): + + K, H, W = heatmaps.shape + N = keypoint_indices.shape[0] + + if tag_per_keypoint: + tags = np.zeros((K * tag_dim, H, W), dtype=np.float32) + else: + tags = np.zeros((tag_dim, H, W), dtype=np.float32) + + for n, k in product(range(N), range(K)): + y, x = np.unravel_index(keypoint_indices[n, k, 0], (H, W)) + if tag_per_keypoint: + tags[k::K, y, x] = n + else: + tags[:, y, x] = n + + return tags + + def _sort_preds(self, keypoints_pred, scores_pred, keypoints_gt): + """Sort multi-instance predictions to best match the ground-truth. + + Args: + keypoints_pred (np.ndarray): predictions in shape (N, K, D) + scores (np.ndarray): predictions in shape (N, K) + keypoints_gt (np.ndarray): ground-truth in shape (N, K, D) + + Returns: + np.ndarray: Sorted predictions + """ + assert keypoints_gt.shape == keypoints_pred.shape + costs = np.linalg.norm( + keypoints_gt[None] - keypoints_pred[:, None], ord=2, + axis=3).mean(axis=2) + match = Munkres().compute(costs) + keypoints_pred_sorted = np.zeros_like(keypoints_pred) + scores_pred_sorted = np.zeros_like(scores_pred) + for i, j in match: + keypoints_pred_sorted[i] = keypoints_pred[j] + scores_pred_sorted[i] = scores_pred[j] + + return keypoints_pred_sorted, scores_pred_sorted + + def test_decode(self): + data = get_coco_sample( + img_shape=(256, 256), num_instances=2, non_occlusion=True) + + # w/o UDP + codec = AssociativeEmbedding( + input_size=(256, 256), + heatmap_size=(64, 64), + use_udp=False, + decode_keypoint_order=self.decode_keypoint_order) + + encoded = codec.encode(data['keypoints'], data['keypoints_visible']) + + heatmaps = encoded['heatmaps'] + keypoint_indices = encoded['keypoint_indices'] + + tags = self._get_tags( + heatmaps, keypoint_indices, tag_per_keypoint=True) + + # to Tensor + batch_heatmaps = torch.from_numpy(heatmaps[None]) + batch_tags = torch.from_numpy(tags[None]) + + batch_keypoints, batch_keypoint_scores = codec.batch_decode( + batch_heatmaps, batch_tags) + + self.assertIsInstance(batch_keypoints, list) + self.assertIsInstance(batch_keypoint_scores, list) + self.assertEqual(len(batch_keypoints), 1) + self.assertEqual(len(batch_keypoint_scores), 1) + + keypoints, scores = self._sort_preds(batch_keypoints[0], + batch_keypoint_scores[0], + data['keypoints']) + + self.assertIsInstance(keypoints, np.ndarray) + self.assertIsInstance(scores, np.ndarray) + self.assertEqual(keypoints.shape, (2, 17, 2)) + self.assertEqual(scores.shape, (2, 17)) + + self.assertTrue(np.allclose(keypoints, data['keypoints'], atol=4.0)) + + # w/o UDP, tag_imd=2 + codec = AssociativeEmbedding( + input_size=(256, 256), + heatmap_size=(64, 64), + use_udp=False, + decode_keypoint_order=self.decode_keypoint_order) + + encoded = codec.encode(data['keypoints'], data['keypoints_visible']) + + heatmaps = encoded['heatmaps'] + keypoint_indices = encoded['keypoint_indices'] + + tags = self._get_tags( + heatmaps, keypoint_indices, tag_per_keypoint=True, tag_dim=2) + + # to Tensor + batch_heatmaps = torch.from_numpy(heatmaps[None]) + batch_tags = torch.from_numpy(tags[None]) + + batch_keypoints, batch_keypoint_scores = codec.batch_decode( + batch_heatmaps, batch_tags) + + self.assertIsInstance(batch_keypoints, list) + self.assertIsInstance(batch_keypoint_scores, list) + self.assertEqual(len(batch_keypoints), 1) + self.assertEqual(len(batch_keypoint_scores), 1) + + keypoints, scores = self._sort_preds(batch_keypoints[0], + batch_keypoint_scores[0], + data['keypoints']) + + self.assertIsInstance(keypoints, np.ndarray) + self.assertIsInstance(scores, np.ndarray) + self.assertEqual(keypoints.shape, (2, 17, 2)) + self.assertEqual(scores.shape, (2, 17)) + + self.assertTrue(np.allclose(keypoints, data['keypoints'], atol=4.0)) + + # w/ UDP + codec = AssociativeEmbedding( + input_size=(256, 256), + heatmap_size=(64, 64), + use_udp=True, + decode_keypoint_order=self.decode_keypoint_order) + + encoded = codec.encode(data['keypoints'], data['keypoints_visible']) + + heatmaps = encoded['heatmaps'] + keypoint_indices = encoded['keypoint_indices'] + + tags = self._get_tags( + heatmaps, keypoint_indices, tag_per_keypoint=True) + + # to Tensor + batch_heatmaps = torch.from_numpy(heatmaps[None]) + batch_tags = torch.from_numpy(tags[None]) + + batch_keypoints, batch_keypoint_scores = codec.batch_decode( + batch_heatmaps, batch_tags) + + self.assertIsInstance(batch_keypoints, list) + self.assertIsInstance(batch_keypoint_scores, list) + self.assertEqual(len(batch_keypoints), 1) + self.assertEqual(len(batch_keypoint_scores), 1) + + keypoints, scores = self._sort_preds(batch_keypoints[0], + batch_keypoint_scores[0], + data['keypoints']) + + self.assertIsInstance(keypoints, np.ndarray) + self.assertIsInstance(scores, np.ndarray) + self.assertEqual(keypoints.shape, (2, 17, 2)) + self.assertEqual(scores.shape, (2, 17)) + + self.assertTrue(np.allclose(keypoints, data['keypoints'], atol=4.0)) diff --git a/tests/test_codecs/test_decoupled_heatmap.py b/tests/test_codecs/test_decoupled_heatmap.py index 747491c185..e990d8b34e 100644 --- a/tests/test_codecs/test_decoupled_heatmap.py +++ b/tests/test_codecs/test_decoupled_heatmap.py @@ -1,168 +1,168 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import numpy as np - -from mmpose.codecs import DecoupledHeatmap -from mmpose.registry import KEYPOINT_CODECS -from mmpose.testing import get_coco_sample - - -class TestDecoupledHeatmap(TestCase): - - def setUp(self) -> None: - pass - - def _make_multi_instance_data(self, data): - bbox = data['bbox'].reshape(-1, 2, 2) - keypoints = data['keypoints'] - keypoints_visible = data['keypoints_visible'] - - keypoints_visible[..., 0] = 0 - - offset = keypoints.max(axis=1, keepdims=True) - bbox_outside = bbox - offset - keypoints_outside = keypoints - offset - keypoints_outside_visible = np.zeros(keypoints_visible.shape) - - bbox_overlap = bbox.mean( - axis=1, keepdims=True) + 0.8 * ( - bbox - bbox.mean(axis=1, keepdims=True)) - keypoint_overlap = keypoints.mean( - axis=1, keepdims=True) + 0.8 * ( - keypoints - keypoints.mean(axis=1, keepdims=True)) - keypoint_overlap_visible = keypoints_visible - - data['bbox'] = np.concatenate((bbox, bbox_outside, bbox_overlap), - axis=0) - data['keypoints'] = np.concatenate( - (keypoints, keypoints_outside, keypoint_overlap), axis=0) - data['keypoints_visible'] = np.concatenate( - (keypoints_visible, keypoints_outside_visible, - keypoint_overlap_visible), - axis=0) - - return data - - def test_build(self): - cfg = dict( - type='DecoupledHeatmap', - input_size=(512, 512), - heatmap_size=(128, 128), - ) - codec = KEYPOINT_CODECS.build(cfg) - self.assertIsInstance(codec, DecoupledHeatmap) - - def test_encode(self): - data = get_coco_sample(img_shape=(512, 512), num_instances=1) - data['bbox'] = np.tile(data['bbox'], 2).reshape(-1, 4, 2) - data['bbox'][:, 1:3, 0] = data['bbox'][:, 0:2, 0] - data = self._make_multi_instance_data(data) - - codec = DecoupledHeatmap( - input_size=(512, 512), - heatmap_size=(128, 128), - ) - - print(data['bbox'].shape) - encoded = codec.encode( - data['keypoints'], data['keypoints_visible'], bbox=data['bbox']) - - heatmaps = encoded['heatmaps'] - instance_heatmaps = encoded['instance_heatmaps'] - keypoint_weights = encoded['keypoint_weights'] - instance_coords = encoded['instance_coords'] - - self.assertEqual(heatmaps.shape, (18, 128, 128)) - self.assertEqual(keypoint_weights.shape, (2, 17)) - self.assertEqual(instance_heatmaps.shape, (34, 128, 128)) - self.assertEqual(instance_coords.shape, (2, 2)) - - # without bbox - encoded = codec.encode( - data['keypoints'], data['keypoints_visible'], bbox=None) - - heatmaps = encoded['heatmaps'] - instance_heatmaps = encoded['instance_heatmaps'] - keypoint_weights = encoded['keypoint_weights'] - instance_coords = encoded['instance_coords'] - - self.assertEqual(heatmaps.shape, (18, 128, 128)) - self.assertEqual(keypoint_weights.shape, (2, 17)) - self.assertEqual(instance_heatmaps.shape, (34, 128, 128)) - self.assertEqual(instance_coords.shape, (2, 2)) - - # root_type - with self.assertRaises(ValueError): - codec = DecoupledHeatmap( - input_size=(512, 512), - heatmap_size=(128, 128), - root_type='box_center', - ) - encoded = codec.encode( - data['keypoints'], - data['keypoints_visible'], - bbox=data['bbox']) - - codec = DecoupledHeatmap( - input_size=(512, 512), - heatmap_size=(128, 128), - root_type='bbox_center', - ) - - encoded = codec.encode( - data['keypoints'], data['keypoints_visible'], bbox=data['bbox']) - - heatmaps = encoded['heatmaps'] - instance_heatmaps = encoded['instance_heatmaps'] - keypoint_weights = encoded['keypoint_weights'] - instance_coords = encoded['instance_coords'] - - self.assertEqual(heatmaps.shape, (18, 128, 128)) - self.assertEqual(keypoint_weights.shape, (2, 17)) - self.assertEqual(instance_heatmaps.shape, (34, 128, 128)) - self.assertEqual(instance_coords.shape, (2, 2)) - - def test_decode(self): - data = get_coco_sample(img_shape=(512, 512), num_instances=2) - data['bbox'] = np.tile(data['bbox'], 2).reshape(-1, 4, 2) - data['bbox'][:, 1:3, 0] = data['bbox'][:, 0:2, 0] - - codec = DecoupledHeatmap( - input_size=(512, 512), - heatmap_size=(128, 128), - ) - - encoded = codec.encode( - data['keypoints'], data['keypoints_visible'], bbox=data['bbox']) - instance_heatmaps = encoded['instance_heatmaps'].reshape( - encoded['instance_coords'].shape[0], -1, - *encoded['instance_heatmaps'].shape[-2:]) - instance_scores = np.ones(encoded['instance_coords'].shape[0]) - decoded = codec.decode(instance_heatmaps, instance_scores[:, None]) - keypoints, keypoint_scores = decoded - - self.assertEqual(keypoints.shape, (2, 17, 2)) - self.assertEqual(keypoint_scores.shape, (2, 17)) - - def test_cicular_verification(self): - data = get_coco_sample(img_shape=(512, 512), num_instances=1) - data['bbox'] = np.tile(data['bbox'], 2).reshape(-1, 4, 2) - data['bbox'][:, 1:3, 0] = data['bbox'][:, 0:2, 0] - - codec = DecoupledHeatmap( - input_size=(512, 512), - heatmap_size=(128, 128), - ) - - encoded = codec.encode( - data['keypoints'], data['keypoints_visible'], bbox=data['bbox']) - instance_heatmaps = encoded['instance_heatmaps'].reshape( - encoded['instance_coords'].shape[0], -1, - *encoded['instance_heatmaps'].shape[-2:]) - instance_scores = np.ones(encoded['instance_coords'].shape[0]) - decoded = codec.decode(instance_heatmaps, instance_scores[:, None]) - keypoints, _ = decoded - keypoints += 1.5 - - self.assertTrue(np.allclose(keypoints, data['keypoints'], atol=5.)) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np + +from mmpose.codecs import DecoupledHeatmap +from mmpose.registry import KEYPOINT_CODECS +from mmpose.testing import get_coco_sample + + +class TestDecoupledHeatmap(TestCase): + + def setUp(self) -> None: + pass + + def _make_multi_instance_data(self, data): + bbox = data['bbox'].reshape(-1, 2, 2) + keypoints = data['keypoints'] + keypoints_visible = data['keypoints_visible'] + + keypoints_visible[..., 0] = 0 + + offset = keypoints.max(axis=1, keepdims=True) + bbox_outside = bbox - offset + keypoints_outside = keypoints - offset + keypoints_outside_visible = np.zeros(keypoints_visible.shape) + + bbox_overlap = bbox.mean( + axis=1, keepdims=True) + 0.8 * ( + bbox - bbox.mean(axis=1, keepdims=True)) + keypoint_overlap = keypoints.mean( + axis=1, keepdims=True) + 0.8 * ( + keypoints - keypoints.mean(axis=1, keepdims=True)) + keypoint_overlap_visible = keypoints_visible + + data['bbox'] = np.concatenate((bbox, bbox_outside, bbox_overlap), + axis=0) + data['keypoints'] = np.concatenate( + (keypoints, keypoints_outside, keypoint_overlap), axis=0) + data['keypoints_visible'] = np.concatenate( + (keypoints_visible, keypoints_outside_visible, + keypoint_overlap_visible), + axis=0) + + return data + + def test_build(self): + cfg = dict( + type='DecoupledHeatmap', + input_size=(512, 512), + heatmap_size=(128, 128), + ) + codec = KEYPOINT_CODECS.build(cfg) + self.assertIsInstance(codec, DecoupledHeatmap) + + def test_encode(self): + data = get_coco_sample(img_shape=(512, 512), num_instances=1) + data['bbox'] = np.tile(data['bbox'], 2).reshape(-1, 4, 2) + data['bbox'][:, 1:3, 0] = data['bbox'][:, 0:2, 0] + data = self._make_multi_instance_data(data) + + codec = DecoupledHeatmap( + input_size=(512, 512), + heatmap_size=(128, 128), + ) + + print(data['bbox'].shape) + encoded = codec.encode( + data['keypoints'], data['keypoints_visible'], bbox=data['bbox']) + + heatmaps = encoded['heatmaps'] + instance_heatmaps = encoded['instance_heatmaps'] + keypoint_weights = encoded['keypoint_weights'] + instance_coords = encoded['instance_coords'] + + self.assertEqual(heatmaps.shape, (18, 128, 128)) + self.assertEqual(keypoint_weights.shape, (2, 17)) + self.assertEqual(instance_heatmaps.shape, (34, 128, 128)) + self.assertEqual(instance_coords.shape, (2, 2)) + + # without bbox + encoded = codec.encode( + data['keypoints'], data['keypoints_visible'], bbox=None) + + heatmaps = encoded['heatmaps'] + instance_heatmaps = encoded['instance_heatmaps'] + keypoint_weights = encoded['keypoint_weights'] + instance_coords = encoded['instance_coords'] + + self.assertEqual(heatmaps.shape, (18, 128, 128)) + self.assertEqual(keypoint_weights.shape, (2, 17)) + self.assertEqual(instance_heatmaps.shape, (34, 128, 128)) + self.assertEqual(instance_coords.shape, (2, 2)) + + # root_type + with self.assertRaises(ValueError): + codec = DecoupledHeatmap( + input_size=(512, 512), + heatmap_size=(128, 128), + root_type='box_center', + ) + encoded = codec.encode( + data['keypoints'], + data['keypoints_visible'], + bbox=data['bbox']) + + codec = DecoupledHeatmap( + input_size=(512, 512), + heatmap_size=(128, 128), + root_type='bbox_center', + ) + + encoded = codec.encode( + data['keypoints'], data['keypoints_visible'], bbox=data['bbox']) + + heatmaps = encoded['heatmaps'] + instance_heatmaps = encoded['instance_heatmaps'] + keypoint_weights = encoded['keypoint_weights'] + instance_coords = encoded['instance_coords'] + + self.assertEqual(heatmaps.shape, (18, 128, 128)) + self.assertEqual(keypoint_weights.shape, (2, 17)) + self.assertEqual(instance_heatmaps.shape, (34, 128, 128)) + self.assertEqual(instance_coords.shape, (2, 2)) + + def test_decode(self): + data = get_coco_sample(img_shape=(512, 512), num_instances=2) + data['bbox'] = np.tile(data['bbox'], 2).reshape(-1, 4, 2) + data['bbox'][:, 1:3, 0] = data['bbox'][:, 0:2, 0] + + codec = DecoupledHeatmap( + input_size=(512, 512), + heatmap_size=(128, 128), + ) + + encoded = codec.encode( + data['keypoints'], data['keypoints_visible'], bbox=data['bbox']) + instance_heatmaps = encoded['instance_heatmaps'].reshape( + encoded['instance_coords'].shape[0], -1, + *encoded['instance_heatmaps'].shape[-2:]) + instance_scores = np.ones(encoded['instance_coords'].shape[0]) + decoded = codec.decode(instance_heatmaps, instance_scores[:, None]) + keypoints, keypoint_scores = decoded + + self.assertEqual(keypoints.shape, (2, 17, 2)) + self.assertEqual(keypoint_scores.shape, (2, 17)) + + def test_cicular_verification(self): + data = get_coco_sample(img_shape=(512, 512), num_instances=1) + data['bbox'] = np.tile(data['bbox'], 2).reshape(-1, 4, 2) + data['bbox'][:, 1:3, 0] = data['bbox'][:, 0:2, 0] + + codec = DecoupledHeatmap( + input_size=(512, 512), + heatmap_size=(128, 128), + ) + + encoded = codec.encode( + data['keypoints'], data['keypoints_visible'], bbox=data['bbox']) + instance_heatmaps = encoded['instance_heatmaps'].reshape( + encoded['instance_coords'].shape[0], -1, + *encoded['instance_heatmaps'].shape[-2:]) + instance_scores = np.ones(encoded['instance_coords'].shape[0]) + decoded = codec.decode(instance_heatmaps, instance_scores[:, None]) + keypoints, _ = decoded + keypoints += 1.5 + + self.assertTrue(np.allclose(keypoints, data['keypoints'], atol=5.)) diff --git a/tests/test_codecs/test_image_pose_lifting.py b/tests/test_codecs/test_image_pose_lifting.py index bb94786c32..af5e1627aa 100644 --- a/tests/test_codecs/test_image_pose_lifting.py +++ b/tests/test_codecs/test_image_pose_lifting.py @@ -1,150 +1,150 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import numpy as np - -from mmpose.codecs import ImagePoseLifting -from mmpose.registry import KEYPOINT_CODECS - - -class TestImagePoseLifting(TestCase): - - def setUp(self) -> None: - keypoints = (0.1 + 0.8 * np.random.rand(1, 17, 2)) * [192, 256] - keypoints = np.round(keypoints).astype(np.float32) - keypoints_visible = np.random.randint(2, size=(1, 17)) - lifting_target = (0.1 + 0.8 * np.random.rand(17, 3)) - lifting_target_visible = np.random.randint(2, size=(17, )) - encoded_wo_sigma = np.random.rand(1, 17, 3) - - self.keypoints_mean = np.random.rand(17, 2).astype(np.float32) - self.keypoints_std = np.random.rand(17, 2).astype(np.float32) + 1e-6 - self.target_mean = np.random.rand(17, 3).astype(np.float32) - self.target_std = np.random.rand(17, 3).astype(np.float32) + 1e-6 - - self.data = dict( - keypoints=keypoints, - keypoints_visible=keypoints_visible, - lifting_target=lifting_target, - lifting_target_visible=lifting_target_visible, - encoded_wo_sigma=encoded_wo_sigma) - - def build_pose_lifting_label(self, **kwargs): - cfg = dict(type='ImagePoseLifting', num_keypoints=17, root_index=0) - cfg.update(kwargs) - return KEYPOINT_CODECS.build(cfg) - - def test_build(self): - codec = self.build_pose_lifting_label() - self.assertIsInstance(codec, ImagePoseLifting) - - def test_encode(self): - keypoints = self.data['keypoints'] - keypoints_visible = self.data['keypoints_visible'] - lifting_target = self.data['lifting_target'] - lifting_target_visible = self.data['lifting_target_visible'] - - # test default settings - codec = self.build_pose_lifting_label() - encoded = codec.encode(keypoints, keypoints_visible, lifting_target, - lifting_target_visible) - - self.assertEqual(encoded['keypoint_labels'].shape, (1, 17, 2)) - self.assertEqual(encoded['lifting_target_label'].shape, (17, 3)) - self.assertEqual(encoded['lifting_target_weights'].shape, (17, )) - self.assertEqual(encoded['trajectory_weights'].shape, (17, )) - self.assertEqual(encoded['target_root'].shape, (3, )) - - # test removing root - codec = self.build_pose_lifting_label( - remove_root=True, save_index=True) - encoded = codec.encode(keypoints, keypoints_visible, lifting_target, - lifting_target_visible) - - self.assertTrue('target_root_removed' in encoded - and 'target_root_index' in encoded) - self.assertEqual(encoded['lifting_target_weights'].shape, (16, )) - self.assertEqual(encoded['keypoint_labels'].shape, (1, 17, 2)) - self.assertEqual(encoded['lifting_target_label'].shape, (16, 3)) - self.assertEqual(encoded['target_root'].shape, (3, )) - - # test normalization - codec = self.build_pose_lifting_label( - keypoints_mean=self.keypoints_mean, - keypoints_std=self.keypoints_std, - target_mean=self.target_mean, - target_std=self.target_std) - encoded = codec.encode(keypoints, keypoints_visible, lifting_target, - lifting_target_visible) - - self.assertEqual(encoded['keypoint_labels'].shape, (1, 17, 2)) - self.assertEqual(encoded['lifting_target_label'].shape, (17, 3)) - - def test_decode(self): - lifting_target = self.data['lifting_target'] - encoded_wo_sigma = self.data['encoded_wo_sigma'] - - codec = self.build_pose_lifting_label() - - decoded, scores = codec.decode( - encoded_wo_sigma, target_root=lifting_target[..., 0, :]) - - self.assertEqual(decoded.shape, (1, 17, 3)) - self.assertEqual(scores.shape, (1, 17)) - - codec = self.build_pose_lifting_label(remove_root=True) - - decoded, scores = codec.decode( - encoded_wo_sigma, target_root=lifting_target[..., 0, :]) - - self.assertEqual(decoded.shape, (1, 18, 3)) - self.assertEqual(scores.shape, (1, 18)) - - def test_cicular_verification(self): - keypoints = self.data['keypoints'] - keypoints_visible = self.data['keypoints_visible'] - lifting_target = self.data['lifting_target'] - lifting_target_visible = self.data['lifting_target_visible'] - - # test default settings - codec = self.build_pose_lifting_label() - encoded = codec.encode(keypoints, keypoints_visible, lifting_target, - lifting_target_visible) - - _keypoints, _ = codec.decode( - np.expand_dims(encoded['lifting_target_label'], axis=0), - target_root=lifting_target[..., 0, :]) - - self.assertTrue( - np.allclose( - np.expand_dims(lifting_target, axis=0), _keypoints, atol=5.)) - - # test removing root - codec = self.build_pose_lifting_label(remove_root=True) - encoded = codec.encode(keypoints, keypoints_visible, lifting_target, - lifting_target_visible) - - _keypoints, _ = codec.decode( - np.expand_dims(encoded['lifting_target_label'], axis=0), - target_root=lifting_target[..., 0, :]) - - self.assertTrue( - np.allclose( - np.expand_dims(lifting_target, axis=0), _keypoints, atol=5.)) - - # test normalization - codec = self.build_pose_lifting_label( - keypoints_mean=self.keypoints_mean, - keypoints_std=self.keypoints_std, - target_mean=self.target_mean, - target_std=self.target_std) - encoded = codec.encode(keypoints, keypoints_visible, lifting_target, - lifting_target_visible) - - _keypoints, _ = codec.decode( - np.expand_dims(encoded['lifting_target_label'], axis=0), - target_root=lifting_target[..., 0, :]) - - self.assertTrue( - np.allclose( - np.expand_dims(lifting_target, axis=0), _keypoints, atol=5.)) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np + +from mmpose.codecs import ImagePoseLifting +from mmpose.registry import KEYPOINT_CODECS + + +class TestImagePoseLifting(TestCase): + + def setUp(self) -> None: + keypoints = (0.1 + 0.8 * np.random.rand(1, 17, 2)) * [192, 256] + keypoints = np.round(keypoints).astype(np.float32) + keypoints_visible = np.random.randint(2, size=(1, 17)) + lifting_target = (0.1 + 0.8 * np.random.rand(17, 3)) + lifting_target_visible = np.random.randint(2, size=(17, )) + encoded_wo_sigma = np.random.rand(1, 17, 3) + + self.keypoints_mean = np.random.rand(17, 2).astype(np.float32) + self.keypoints_std = np.random.rand(17, 2).astype(np.float32) + 1e-6 + self.target_mean = np.random.rand(17, 3).astype(np.float32) + self.target_std = np.random.rand(17, 3).astype(np.float32) + 1e-6 + + self.data = dict( + keypoints=keypoints, + keypoints_visible=keypoints_visible, + lifting_target=lifting_target, + lifting_target_visible=lifting_target_visible, + encoded_wo_sigma=encoded_wo_sigma) + + def build_pose_lifting_label(self, **kwargs): + cfg = dict(type='ImagePoseLifting', num_keypoints=17, root_index=0) + cfg.update(kwargs) + return KEYPOINT_CODECS.build(cfg) + + def test_build(self): + codec = self.build_pose_lifting_label() + self.assertIsInstance(codec, ImagePoseLifting) + + def test_encode(self): + keypoints = self.data['keypoints'] + keypoints_visible = self.data['keypoints_visible'] + lifting_target = self.data['lifting_target'] + lifting_target_visible = self.data['lifting_target_visible'] + + # test default settings + codec = self.build_pose_lifting_label() + encoded = codec.encode(keypoints, keypoints_visible, lifting_target, + lifting_target_visible) + + self.assertEqual(encoded['keypoint_labels'].shape, (1, 17, 2)) + self.assertEqual(encoded['lifting_target_label'].shape, (17, 3)) + self.assertEqual(encoded['lifting_target_weights'].shape, (17, )) + self.assertEqual(encoded['trajectory_weights'].shape, (17, )) + self.assertEqual(encoded['target_root'].shape, (3, )) + + # test removing root + codec = self.build_pose_lifting_label( + remove_root=True, save_index=True) + encoded = codec.encode(keypoints, keypoints_visible, lifting_target, + lifting_target_visible) + + self.assertTrue('target_root_removed' in encoded + and 'target_root_index' in encoded) + self.assertEqual(encoded['lifting_target_weights'].shape, (16, )) + self.assertEqual(encoded['keypoint_labels'].shape, (1, 17, 2)) + self.assertEqual(encoded['lifting_target_label'].shape, (16, 3)) + self.assertEqual(encoded['target_root'].shape, (3, )) + + # test normalization + codec = self.build_pose_lifting_label( + keypoints_mean=self.keypoints_mean, + keypoints_std=self.keypoints_std, + target_mean=self.target_mean, + target_std=self.target_std) + encoded = codec.encode(keypoints, keypoints_visible, lifting_target, + lifting_target_visible) + + self.assertEqual(encoded['keypoint_labels'].shape, (1, 17, 2)) + self.assertEqual(encoded['lifting_target_label'].shape, (17, 3)) + + def test_decode(self): + lifting_target = self.data['lifting_target'] + encoded_wo_sigma = self.data['encoded_wo_sigma'] + + codec = self.build_pose_lifting_label() + + decoded, scores = codec.decode( + encoded_wo_sigma, target_root=lifting_target[..., 0, :]) + + self.assertEqual(decoded.shape, (1, 17, 3)) + self.assertEqual(scores.shape, (1, 17)) + + codec = self.build_pose_lifting_label(remove_root=True) + + decoded, scores = codec.decode( + encoded_wo_sigma, target_root=lifting_target[..., 0, :]) + + self.assertEqual(decoded.shape, (1, 18, 3)) + self.assertEqual(scores.shape, (1, 18)) + + def test_cicular_verification(self): + keypoints = self.data['keypoints'] + keypoints_visible = self.data['keypoints_visible'] + lifting_target = self.data['lifting_target'] + lifting_target_visible = self.data['lifting_target_visible'] + + # test default settings + codec = self.build_pose_lifting_label() + encoded = codec.encode(keypoints, keypoints_visible, lifting_target, + lifting_target_visible) + + _keypoints, _ = codec.decode( + np.expand_dims(encoded['lifting_target_label'], axis=0), + target_root=lifting_target[..., 0, :]) + + self.assertTrue( + np.allclose( + np.expand_dims(lifting_target, axis=0), _keypoints, atol=5.)) + + # test removing root + codec = self.build_pose_lifting_label(remove_root=True) + encoded = codec.encode(keypoints, keypoints_visible, lifting_target, + lifting_target_visible) + + _keypoints, _ = codec.decode( + np.expand_dims(encoded['lifting_target_label'], axis=0), + target_root=lifting_target[..., 0, :]) + + self.assertTrue( + np.allclose( + np.expand_dims(lifting_target, axis=0), _keypoints, atol=5.)) + + # test normalization + codec = self.build_pose_lifting_label( + keypoints_mean=self.keypoints_mean, + keypoints_std=self.keypoints_std, + target_mean=self.target_mean, + target_std=self.target_std) + encoded = codec.encode(keypoints, keypoints_visible, lifting_target, + lifting_target_visible) + + _keypoints, _ = codec.decode( + np.expand_dims(encoded['lifting_target_label'], axis=0), + target_root=lifting_target[..., 0, :]) + + self.assertTrue( + np.allclose( + np.expand_dims(lifting_target, axis=0), _keypoints, atol=5.)) diff --git a/tests/test_codecs/test_integral_regression_label.py b/tests/test_codecs/test_integral_regression_label.py index 8f53a0b21f..fa79534798 100644 --- a/tests/test_codecs/test_integral_regression_label.py +++ b/tests/test_codecs/test_integral_regression_label.py @@ -1,83 +1,83 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import numpy as np - -from mmpose.codecs import IntegralRegressionLabel # noqa: F401 -from mmpose.registry import KEYPOINT_CODECS - - -class TestRegressionLabel(TestCase): - - # name and configs of all test cases - def setUp(self) -> None: - self.configs = [ - ( - 'ipr', - dict( - type='IntegralRegressionLabel', - input_size=(192, 256), - heatmap_size=(48, 64), - sigma=2), - ), - ] - - # The bbox is usually padded so the keypoint will not be near the - # boundary - keypoints = (0.1 + 0.8 * np.random.rand(1, 17, 2)) * [192, 256] - keypoints = np.round(keypoints).astype(np.float32) - heatmaps = np.random.rand(17, 64, 48).astype(np.float32) - encoded_wo_sigma = np.random.rand(1, 17, 2) - keypoints_visible = np.ones((1, 17), dtype=np.float32) - self.data = dict( - keypoints=keypoints, - keypoints_visible=keypoints_visible, - heatmaps=heatmaps, - encoded_wo_sigma=encoded_wo_sigma) - - def test_encode(self): - keypoints = self.data['keypoints'] - keypoints_visible = self.data['keypoints_visible'] - - for name, cfg in self.configs: - codec = KEYPOINT_CODECS.build(cfg) - - encoded = codec.encode(keypoints, keypoints_visible) - heatmaps = encoded['heatmaps'] - keypoint_labels = encoded['keypoint_labels'] - keypoint_weights = encoded['keypoint_weights'] - - self.assertEqual(heatmaps.shape, (17, 64, 48), - f'Failed case: "{name}"') - self.assertEqual(keypoint_labels.shape, (1, 17, 2), - f'Failed case: "{name}"') - self.assertEqual(keypoint_weights.shape, (1, 17), - f'Failed case: "{name}"') - - def test_decode(self): - encoded_wo_sigma = self.data['encoded_wo_sigma'] - - for name, cfg in self.configs: - codec = KEYPOINT_CODECS.build(cfg) - - keypoints, scores = codec.decode(encoded_wo_sigma) - - self.assertEqual(keypoints.shape, (1, 17, 2), - f'Failed case: "{name}"') - self.assertEqual(scores.shape, (1, 17), f'Failed case: "{name}"') - - def test_cicular_verification(self): - keypoints = self.data['keypoints'] - keypoints_visible = self.data['keypoints_visible'] - - for name, cfg in self.configs: - codec = KEYPOINT_CODECS.build(cfg) - - encoded = codec.encode(keypoints, keypoints_visible) - keypoint_labels = encoded['keypoint_labels'] - - _keypoints, _ = codec.decode(keypoint_labels) - - self.assertTrue( - np.allclose(keypoints, _keypoints, atol=5.), - f'Failed case: "{name}"') +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np + +from mmpose.codecs import IntegralRegressionLabel # noqa: F401 +from mmpose.registry import KEYPOINT_CODECS + + +class TestRegressionLabel(TestCase): + + # name and configs of all test cases + def setUp(self) -> None: + self.configs = [ + ( + 'ipr', + dict( + type='IntegralRegressionLabel', + input_size=(192, 256), + heatmap_size=(48, 64), + sigma=2), + ), + ] + + # The bbox is usually padded so the keypoint will not be near the + # boundary + keypoints = (0.1 + 0.8 * np.random.rand(1, 17, 2)) * [192, 256] + keypoints = np.round(keypoints).astype(np.float32) + heatmaps = np.random.rand(17, 64, 48).astype(np.float32) + encoded_wo_sigma = np.random.rand(1, 17, 2) + keypoints_visible = np.ones((1, 17), dtype=np.float32) + self.data = dict( + keypoints=keypoints, + keypoints_visible=keypoints_visible, + heatmaps=heatmaps, + encoded_wo_sigma=encoded_wo_sigma) + + def test_encode(self): + keypoints = self.data['keypoints'] + keypoints_visible = self.data['keypoints_visible'] + + for name, cfg in self.configs: + codec = KEYPOINT_CODECS.build(cfg) + + encoded = codec.encode(keypoints, keypoints_visible) + heatmaps = encoded['heatmaps'] + keypoint_labels = encoded['keypoint_labels'] + keypoint_weights = encoded['keypoint_weights'] + + self.assertEqual(heatmaps.shape, (17, 64, 48), + f'Failed case: "{name}"') + self.assertEqual(keypoint_labels.shape, (1, 17, 2), + f'Failed case: "{name}"') + self.assertEqual(keypoint_weights.shape, (1, 17), + f'Failed case: "{name}"') + + def test_decode(self): + encoded_wo_sigma = self.data['encoded_wo_sigma'] + + for name, cfg in self.configs: + codec = KEYPOINT_CODECS.build(cfg) + + keypoints, scores = codec.decode(encoded_wo_sigma) + + self.assertEqual(keypoints.shape, (1, 17, 2), + f'Failed case: "{name}"') + self.assertEqual(scores.shape, (1, 17), f'Failed case: "{name}"') + + def test_cicular_verification(self): + keypoints = self.data['keypoints'] + keypoints_visible = self.data['keypoints_visible'] + + for name, cfg in self.configs: + codec = KEYPOINT_CODECS.build(cfg) + + encoded = codec.encode(keypoints, keypoints_visible) + keypoint_labels = encoded['keypoint_labels'] + + _keypoints, _ = codec.decode(keypoint_labels) + + self.assertTrue( + np.allclose(keypoints, _keypoints, atol=5.), + f'Failed case: "{name}"') diff --git a/tests/test_codecs/test_megvii_heatmap.py b/tests/test_codecs/test_megvii_heatmap.py index 31a5a965c9..51deb9dd99 100644 --- a/tests/test_codecs/test_megvii_heatmap.py +++ b/tests/test_codecs/test_megvii_heatmap.py @@ -1,85 +1,85 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import numpy as np - -from mmpose.codecs import MegviiHeatmap -from mmpose.registry import KEYPOINT_CODECS - - -class TestMegviiHeatmap(TestCase): - - def setUp(self) -> None: - # name and configs of all test cases - self.configs = [ - ( - 'megvii', - dict( - type='MegviiHeatmap', - input_size=(192, 256), - heatmap_size=(48, 64), - kernel_size=11), - ), - ] - - # The bbox is usually padded so the keypoint will not be near the - # boundary - keypoints = (0.1 + 0.8 * np.random.rand(1, 17, 2)) * [192, 256] - keypoints = np.round(keypoints).astype(np.float32) - keypoints_visible = np.ones((1, 17), dtype=np.float32) - heatmaps = np.random.rand(17, 64, 48).astype(np.float32) - self.data = dict( - keypoints=keypoints, - keypoints_visible=keypoints_visible, - heatmaps=heatmaps) - - def test_encode(self): - keypoints = self.data['keypoints'] - keypoints_visible = self.data['keypoints_visible'] - - for name, cfg in self.configs: - codec = KEYPOINT_CODECS.build(cfg) - - encoded = codec.encode(keypoints, keypoints_visible) - - self.assertEqual(encoded['heatmaps'].shape, (17, 64, 48), - f'Failed case: "{name}"') - self.assertEqual(encoded['keypoint_weights'].shape, - (1, 17)), f'Failed case: "{name}"' - - def test_decode(self): - heatmaps = self.data['heatmaps'] - - for name, cfg in self.configs: - codec = KEYPOINT_CODECS.build(cfg) - - keypoints, scores = codec.decode(heatmaps) - - self.assertEqual(keypoints.shape, (1, 17, 2), - f'Failed case: "{name}"') - self.assertEqual(scores.shape, (1, 17), f'Failed case: "{name}"') - - def test_cicular_verification(self): - keypoints = self.data['keypoints'] - keypoints_visible = self.data['keypoints_visible'] - - for name, cfg in self.configs: - codec = KEYPOINT_CODECS.build(cfg) - - encoded = codec.encode(keypoints, keypoints_visible) - _keypoints, _ = codec.decode(encoded['heatmaps']) - - self.assertTrue( - np.allclose(keypoints, _keypoints, atol=5.), - f'Failed case: "{name}"') - - def test_errors(self): - # multiple instance - codec = MegviiHeatmap( - input_size=(192, 256), heatmap_size=(48, 64), kernel_size=11) - keypoints = np.random.rand(2, 17, 2) - keypoints_visible = np.random.rand(2, 17) - - with self.assertRaisesRegex(AssertionError, - 'only support single-instance'): - codec.encode(keypoints, keypoints_visible) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np + +from mmpose.codecs import MegviiHeatmap +from mmpose.registry import KEYPOINT_CODECS + + +class TestMegviiHeatmap(TestCase): + + def setUp(self) -> None: + # name and configs of all test cases + self.configs = [ + ( + 'megvii', + dict( + type='MegviiHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + kernel_size=11), + ), + ] + + # The bbox is usually padded so the keypoint will not be near the + # boundary + keypoints = (0.1 + 0.8 * np.random.rand(1, 17, 2)) * [192, 256] + keypoints = np.round(keypoints).astype(np.float32) + keypoints_visible = np.ones((1, 17), dtype=np.float32) + heatmaps = np.random.rand(17, 64, 48).astype(np.float32) + self.data = dict( + keypoints=keypoints, + keypoints_visible=keypoints_visible, + heatmaps=heatmaps) + + def test_encode(self): + keypoints = self.data['keypoints'] + keypoints_visible = self.data['keypoints_visible'] + + for name, cfg in self.configs: + codec = KEYPOINT_CODECS.build(cfg) + + encoded = codec.encode(keypoints, keypoints_visible) + + self.assertEqual(encoded['heatmaps'].shape, (17, 64, 48), + f'Failed case: "{name}"') + self.assertEqual(encoded['keypoint_weights'].shape, + (1, 17)), f'Failed case: "{name}"' + + def test_decode(self): + heatmaps = self.data['heatmaps'] + + for name, cfg in self.configs: + codec = KEYPOINT_CODECS.build(cfg) + + keypoints, scores = codec.decode(heatmaps) + + self.assertEqual(keypoints.shape, (1, 17, 2), + f'Failed case: "{name}"') + self.assertEqual(scores.shape, (1, 17), f'Failed case: "{name}"') + + def test_cicular_verification(self): + keypoints = self.data['keypoints'] + keypoints_visible = self.data['keypoints_visible'] + + for name, cfg in self.configs: + codec = KEYPOINT_CODECS.build(cfg) + + encoded = codec.encode(keypoints, keypoints_visible) + _keypoints, _ = codec.decode(encoded['heatmaps']) + + self.assertTrue( + np.allclose(keypoints, _keypoints, atol=5.), + f'Failed case: "{name}"') + + def test_errors(self): + # multiple instance + codec = MegviiHeatmap( + input_size=(192, 256), heatmap_size=(48, 64), kernel_size=11) + keypoints = np.random.rand(2, 17, 2) + keypoints_visible = np.random.rand(2, 17) + + with self.assertRaisesRegex(AssertionError, + 'only support single-instance'): + codec.encode(keypoints, keypoints_visible) diff --git a/tests/test_codecs/test_msra_heatmap.py b/tests/test_codecs/test_msra_heatmap.py index 5897d01461..0e3c64d258 100644 --- a/tests/test_codecs/test_msra_heatmap.py +++ b/tests/test_codecs/test_msra_heatmap.py @@ -1,94 +1,94 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import numpy as np - -from mmpose.codecs import MSRAHeatmap -from mmpose.registry import KEYPOINT_CODECS - - -class TestMSRAHeatmap(TestCase): - - def setUp(self) -> None: - # name and configs of all test cases - self.configs = [ - ( - 'msra', - dict( - type='MSRAHeatmap', - input_size=(192, 256), - heatmap_size=(48, 64), - sigma=2.0), - ), - ( - 'msra+dark', - dict( - type='MSRAHeatmap', - input_size=(192, 256), - heatmap_size=(48, 64), - sigma=2.0, - unbiased=True), - ), - ] - - # The bbox is usually padded so the keypoint will not be near the - # boundary - keypoints = (0.1 + 0.8 * np.random.rand(1, 17, 2)) * [192, 256] - keypoints = np.round(keypoints).astype(np.float32) - keypoints_visible = np.ones((1, 17), dtype=np.float32) - heatmaps = np.random.rand(17, 64, 48).astype(np.float32) - self.data = dict( - keypoints=keypoints, - keypoints_visible=keypoints_visible, - heatmaps=heatmaps) - - def test_encode(self): - keypoints = self.data['keypoints'] - keypoints_visible = self.data['keypoints_visible'] - - for name, cfg in self.configs: - codec = KEYPOINT_CODECS.build(cfg) - - encoded = codec.encode(keypoints, keypoints_visible) - - self.assertEqual(encoded['heatmaps'].shape, (17, 64, 48), - f'Failed case: "{name}"') - self.assertEqual(encoded['keypoint_weights'].shape, - (1, 17)), f'Failed case: "{name}"' - - def test_decode(self): - heatmaps = self.data['heatmaps'] - - for name, cfg in self.configs: - codec = KEYPOINT_CODECS.build(cfg) - - keypoints, scores = codec.decode(heatmaps) - - self.assertEqual(keypoints.shape, (1, 17, 2), - f'Failed case: "{name}"') - self.assertEqual(scores.shape, (1, 17), f'Failed case: "{name}"') - - def test_cicular_verification(self): - keypoints = self.data['keypoints'] - keypoints_visible = self.data['keypoints_visible'] - - for name, cfg in self.configs: - codec = KEYPOINT_CODECS.build(cfg) - - encoded = codec.encode(keypoints, keypoints_visible) - _keypoints, _ = codec.decode(encoded['heatmaps']) - - self.assertTrue( - np.allclose(keypoints, _keypoints, atol=5.), - f'Failed case: "{name}"') - - def test_errors(self): - # multiple instance - codec = MSRAHeatmap( - input_size=(192, 256), heatmap_size=(48, 64), sigma=2.0) - keypoints = np.random.rand(2, 17, 2) - keypoints_visible = np.random.rand(2, 17) - - with self.assertRaisesRegex(AssertionError, - 'only support single-instance'): - codec.encode(keypoints, keypoints_visible) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np + +from mmpose.codecs import MSRAHeatmap +from mmpose.registry import KEYPOINT_CODECS + + +class TestMSRAHeatmap(TestCase): + + def setUp(self) -> None: + # name and configs of all test cases + self.configs = [ + ( + 'msra', + dict( + type='MSRAHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + sigma=2.0), + ), + ( + 'msra+dark', + dict( + type='MSRAHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + sigma=2.0, + unbiased=True), + ), + ] + + # The bbox is usually padded so the keypoint will not be near the + # boundary + keypoints = (0.1 + 0.8 * np.random.rand(1, 17, 2)) * [192, 256] + keypoints = np.round(keypoints).astype(np.float32) + keypoints_visible = np.ones((1, 17), dtype=np.float32) + heatmaps = np.random.rand(17, 64, 48).astype(np.float32) + self.data = dict( + keypoints=keypoints, + keypoints_visible=keypoints_visible, + heatmaps=heatmaps) + + def test_encode(self): + keypoints = self.data['keypoints'] + keypoints_visible = self.data['keypoints_visible'] + + for name, cfg in self.configs: + codec = KEYPOINT_CODECS.build(cfg) + + encoded = codec.encode(keypoints, keypoints_visible) + + self.assertEqual(encoded['heatmaps'].shape, (17, 64, 48), + f'Failed case: "{name}"') + self.assertEqual(encoded['keypoint_weights'].shape, + (1, 17)), f'Failed case: "{name}"' + + def test_decode(self): + heatmaps = self.data['heatmaps'] + + for name, cfg in self.configs: + codec = KEYPOINT_CODECS.build(cfg) + + keypoints, scores = codec.decode(heatmaps) + + self.assertEqual(keypoints.shape, (1, 17, 2), + f'Failed case: "{name}"') + self.assertEqual(scores.shape, (1, 17), f'Failed case: "{name}"') + + def test_cicular_verification(self): + keypoints = self.data['keypoints'] + keypoints_visible = self.data['keypoints_visible'] + + for name, cfg in self.configs: + codec = KEYPOINT_CODECS.build(cfg) + + encoded = codec.encode(keypoints, keypoints_visible) + _keypoints, _ = codec.decode(encoded['heatmaps']) + + self.assertTrue( + np.allclose(keypoints, _keypoints, atol=5.), + f'Failed case: "{name}"') + + def test_errors(self): + # multiple instance + codec = MSRAHeatmap( + input_size=(192, 256), heatmap_size=(48, 64), sigma=2.0) + keypoints = np.random.rand(2, 17, 2) + keypoints_visible = np.random.rand(2, 17) + + with self.assertRaisesRegex(AssertionError, + 'only support single-instance'): + codec.encode(keypoints, keypoints_visible) diff --git a/tests/test_codecs/test_regression_label.py b/tests/test_codecs/test_regression_label.py index e83a3aab18..68ccac1a51 100644 --- a/tests/test_codecs/test_regression_label.py +++ b/tests/test_codecs/test_regression_label.py @@ -1,81 +1,81 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import numpy as np - -from mmpose.codecs import RegressionLabel # noqa: F401 -from mmpose.registry import KEYPOINT_CODECS - - -class TestRegressionLabel(TestCase): - - # name and configs of all test cases - def setUp(self) -> None: - self.configs = [ - ( - 'regression', - dict( - type='RegressionLabel', - input_size=(192, 256), - ), - ), - ] - - # The bbox is usually padded so the keypoint will not be near the - # boundary - keypoints = (0.1 + 0.8 * np.random.rand(1, 17, 2)) * [192, 256] - keypoints = np.round(keypoints).astype(np.float32) - encoded_with_sigma = np.random.rand(1, 17, 4) - encoded_wo_sigma = np.random.rand(1, 17, 2) - keypoints_visible = np.ones((1, 17), dtype=np.float32) - self.data = dict( - keypoints=keypoints, - keypoints_visible=keypoints_visible, - encoded_with_sigma=encoded_with_sigma, - encoded_wo_sigma=encoded_wo_sigma) - - def test_encode(self): - keypoints = self.data['keypoints'] - keypoints_visible = self.data['keypoints_visible'] - - for name, cfg in self.configs: - codec = KEYPOINT_CODECS.build(cfg) - - encoded = codec.encode(keypoints, keypoints_visible) - - self.assertEqual(encoded['keypoint_labels'].shape, (1, 17, 2), - f'Failed case: "{name}"') - self.assertEqual(encoded['keypoint_weights'].shape, (1, 17), - f'Failed case: "{name}"') - - def test_decode(self): - encoded_with_sigma = self.data['encoded_with_sigma'] - encoded_wo_sigma = self.data['encoded_wo_sigma'] - - for name, cfg in self.configs: - codec = KEYPOINT_CODECS.build(cfg) - - keypoints1, scores1 = codec.decode(encoded_with_sigma) - keypoints2, scores2 = codec.decode(encoded_wo_sigma) - - self.assertEqual(keypoints1.shape, (1, 17, 2), - f'Failed case: "{name}"') - self.assertEqual(scores1.shape, (1, 17), f'Failed case: "{name}"') - self.assertEqual(keypoints2.shape, (1, 17, 2), - f'Failed case: "{name}"') - self.assertEqual(scores2.shape, (1, 17), f'Failed case: "{name}"') - - def test_cicular_verification(self): - keypoints = self.data['keypoints'] - keypoints_visible = self.data['keypoints_visible'] - - for name, cfg in self.configs: - codec = KEYPOINT_CODECS.build(cfg) - - encoded = codec.encode(keypoints, keypoints_visible) - - _keypoints, _ = codec.decode(encoded['keypoint_labels']) - - self.assertTrue( - np.allclose(keypoints, _keypoints, atol=5.), - f'Failed case: "{name}"') +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np + +from mmpose.codecs import RegressionLabel # noqa: F401 +from mmpose.registry import KEYPOINT_CODECS + + +class TestRegressionLabel(TestCase): + + # name and configs of all test cases + def setUp(self) -> None: + self.configs = [ + ( + 'regression', + dict( + type='RegressionLabel', + input_size=(192, 256), + ), + ), + ] + + # The bbox is usually padded so the keypoint will not be near the + # boundary + keypoints = (0.1 + 0.8 * np.random.rand(1, 17, 2)) * [192, 256] + keypoints = np.round(keypoints).astype(np.float32) + encoded_with_sigma = np.random.rand(1, 17, 4) + encoded_wo_sigma = np.random.rand(1, 17, 2) + keypoints_visible = np.ones((1, 17), dtype=np.float32) + self.data = dict( + keypoints=keypoints, + keypoints_visible=keypoints_visible, + encoded_with_sigma=encoded_with_sigma, + encoded_wo_sigma=encoded_wo_sigma) + + def test_encode(self): + keypoints = self.data['keypoints'] + keypoints_visible = self.data['keypoints_visible'] + + for name, cfg in self.configs: + codec = KEYPOINT_CODECS.build(cfg) + + encoded = codec.encode(keypoints, keypoints_visible) + + self.assertEqual(encoded['keypoint_labels'].shape, (1, 17, 2), + f'Failed case: "{name}"') + self.assertEqual(encoded['keypoint_weights'].shape, (1, 17), + f'Failed case: "{name}"') + + def test_decode(self): + encoded_with_sigma = self.data['encoded_with_sigma'] + encoded_wo_sigma = self.data['encoded_wo_sigma'] + + for name, cfg in self.configs: + codec = KEYPOINT_CODECS.build(cfg) + + keypoints1, scores1 = codec.decode(encoded_with_sigma) + keypoints2, scores2 = codec.decode(encoded_wo_sigma) + + self.assertEqual(keypoints1.shape, (1, 17, 2), + f'Failed case: "{name}"') + self.assertEqual(scores1.shape, (1, 17), f'Failed case: "{name}"') + self.assertEqual(keypoints2.shape, (1, 17, 2), + f'Failed case: "{name}"') + self.assertEqual(scores2.shape, (1, 17), f'Failed case: "{name}"') + + def test_cicular_verification(self): + keypoints = self.data['keypoints'] + keypoints_visible = self.data['keypoints_visible'] + + for name, cfg in self.configs: + codec = KEYPOINT_CODECS.build(cfg) + + encoded = codec.encode(keypoints, keypoints_visible) + + _keypoints, _ = codec.decode(encoded['keypoint_labels']) + + self.assertTrue( + np.allclose(keypoints, _keypoints, atol=5.), + f'Failed case: "{name}"') diff --git a/tests/test_codecs/test_simcc_label.py b/tests/test_codecs/test_simcc_label.py index b4c242ef4e..285edd758c 100644 --- a/tests/test_codecs/test_simcc_label.py +++ b/tests/test_codecs/test_simcc_label.py @@ -1,156 +1,156 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import numpy as np - -from mmpose.codecs import SimCCLabel # noqa: F401 -from mmpose.registry import KEYPOINT_CODECS - - -class TestSimCCLabel(TestCase): - - # name and configs of all test cases - def setUp(self) -> None: - self.configs = [ - ( - 'simcc gaussian', - dict( - type='SimCCLabel', - input_size=(192, 256), - smoothing_type='gaussian', - sigma=6.0, - simcc_split_ratio=2.0), - ), - ( - 'simcc smoothing', - dict( - type='SimCCLabel', - input_size=(192, 256), - smoothing_type='standard', - sigma=5.0, - simcc_split_ratio=3.0, - label_smooth_weight=0.1), - ), - ( - 'simcc one-hot', - dict( - type='SimCCLabel', - input_size=(192, 256), - smoothing_type='standard', - sigma=5.0, - simcc_split_ratio=3.0), - ), - ( - 'simcc dark', - dict( - type='SimCCLabel', - input_size=(192, 256), - smoothing_type='gaussian', - sigma=6.0, - simcc_split_ratio=2.0, - use_dark=True), - ), - ( - 'simcc separated sigmas', - dict( - type='SimCCLabel', - input_size=(192, 256), - smoothing_type='gaussian', - sigma=(4.9, 5.66), - simcc_split_ratio=2.0), - ), - ] - - # The bbox is usually padded so the keypoint will not be near the - # boundary - keypoints = (0.1 + 0.8 * np.random.rand(1, 17, 2)) * [192, 256] - keypoints = np.round(keypoints).astype(np.float32) - keypoints_visible = np.ones((1, 17), dtype=np.float32) - self.data = dict( - keypoints=keypoints, keypoints_visible=keypoints_visible) - - def test_encode(self): - keypoints = self.data['keypoints'] - keypoints_visible = self.data['keypoints_visible'] - - for name, cfg in self.configs: - codec = KEYPOINT_CODECS.build(cfg) - - encoded = codec.encode(keypoints, keypoints_visible) - - self.assertEqual(encoded['keypoint_x_labels'].shape, - (1, 17, int(192 * codec.simcc_split_ratio)), - f'Failed case: "{name}"') - self.assertEqual(encoded['keypoint_y_labels'].shape, - (1, 17, int(256 * codec.simcc_split_ratio)), - f'Failed case: "{name}"') - self.assertEqual(encoded['keypoint_weights'].shape, (1, 17), - f'Failed case: "{name}"') - - def test_decode(self): - for name, cfg in self.configs: - codec = KEYPOINT_CODECS.build(cfg) - - simcc_x = np.random.rand(1, 17, int(192 * codec.simcc_split_ratio)) - simcc_y = np.random.rand(1, 17, int(256 * codec.simcc_split_ratio)) - - keypoints, scores = codec.decode(simcc_x, simcc_y) - - self.assertEqual(keypoints.shape, (1, 17, 2), - f'Failed case: "{name}"') - self.assertEqual(scores.shape, (1, 17), f'Failed case: "{name}"') - - def test_cicular_verification(self): - keypoints = self.data['keypoints'] - keypoints_visible = self.data['keypoints_visible'] - - for name, cfg in self.configs: - codec = KEYPOINT_CODECS.build(cfg) - - encoded = codec.encode(keypoints, keypoints_visible) - keypoint_x_labels = encoded['keypoint_x_labels'] - keypoint_y_labels = encoded['keypoint_y_labels'] - - _keypoints, _ = codec.decode(keypoint_x_labels, keypoint_y_labels) - - self.assertTrue( - np.allclose(keypoints, _keypoints, atol=5.), - f'Failed case: "{name}"') - - def test_errors(self): - cfg = dict( - type='SimCCLabel', - input_size=(192, 256), - smoothing_type='uniform', - sigma=1.0, - simcc_split_ratio=2.0) - - with self.assertRaisesRegex(ValueError, - 'got invalid `smoothing_type`'): - _ = KEYPOINT_CODECS.build(cfg) - - # invalid label_smooth_weight in smoothing - cfg = dict( - type='SimCCLabel', - input_size=(192, 256), - smoothing_type='standard', - sigma=1.0, - simcc_split_ratio=2.0, - label_smooth_weight=1.1) - - with self.assertRaisesRegex(ValueError, - '`label_smooth_weight` should be'): - _ = KEYPOINT_CODECS.build(cfg) - - # invalid label_smooth_weight for gaussian - cfg = dict( - type='SimCCLabel', - input_size=(192, 256), - smoothing_type='gaussian', - sigma=1.0, - simcc_split_ratio=2.0, - label_smooth_weight=0.1) - - with self.assertRaisesRegex(ValueError, - 'is only used for `standard` mode.'): - _ = KEYPOINT_CODECS.build(cfg) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np + +from mmpose.codecs import SimCCLabel # noqa: F401 +from mmpose.registry import KEYPOINT_CODECS + + +class TestSimCCLabel(TestCase): + + # name and configs of all test cases + def setUp(self) -> None: + self.configs = [ + ( + 'simcc gaussian', + dict( + type='SimCCLabel', + input_size=(192, 256), + smoothing_type='gaussian', + sigma=6.0, + simcc_split_ratio=2.0), + ), + ( + 'simcc smoothing', + dict( + type='SimCCLabel', + input_size=(192, 256), + smoothing_type='standard', + sigma=5.0, + simcc_split_ratio=3.0, + label_smooth_weight=0.1), + ), + ( + 'simcc one-hot', + dict( + type='SimCCLabel', + input_size=(192, 256), + smoothing_type='standard', + sigma=5.0, + simcc_split_ratio=3.0), + ), + ( + 'simcc dark', + dict( + type='SimCCLabel', + input_size=(192, 256), + smoothing_type='gaussian', + sigma=6.0, + simcc_split_ratio=2.0, + use_dark=True), + ), + ( + 'simcc separated sigmas', + dict( + type='SimCCLabel', + input_size=(192, 256), + smoothing_type='gaussian', + sigma=(4.9, 5.66), + simcc_split_ratio=2.0), + ), + ] + + # The bbox is usually padded so the keypoint will not be near the + # boundary + keypoints = (0.1 + 0.8 * np.random.rand(1, 17, 2)) * [192, 256] + keypoints = np.round(keypoints).astype(np.float32) + keypoints_visible = np.ones((1, 17), dtype=np.float32) + self.data = dict( + keypoints=keypoints, keypoints_visible=keypoints_visible) + + def test_encode(self): + keypoints = self.data['keypoints'] + keypoints_visible = self.data['keypoints_visible'] + + for name, cfg in self.configs: + codec = KEYPOINT_CODECS.build(cfg) + + encoded = codec.encode(keypoints, keypoints_visible) + + self.assertEqual(encoded['keypoint_x_labels'].shape, + (1, 17, int(192 * codec.simcc_split_ratio)), + f'Failed case: "{name}"') + self.assertEqual(encoded['keypoint_y_labels'].shape, + (1, 17, int(256 * codec.simcc_split_ratio)), + f'Failed case: "{name}"') + self.assertEqual(encoded['keypoint_weights'].shape, (1, 17), + f'Failed case: "{name}"') + + def test_decode(self): + for name, cfg in self.configs: + codec = KEYPOINT_CODECS.build(cfg) + + simcc_x = np.random.rand(1, 17, int(192 * codec.simcc_split_ratio)) + simcc_y = np.random.rand(1, 17, int(256 * codec.simcc_split_ratio)) + + keypoints, scores = codec.decode(simcc_x, simcc_y) + + self.assertEqual(keypoints.shape, (1, 17, 2), + f'Failed case: "{name}"') + self.assertEqual(scores.shape, (1, 17), f'Failed case: "{name}"') + + def test_cicular_verification(self): + keypoints = self.data['keypoints'] + keypoints_visible = self.data['keypoints_visible'] + + for name, cfg in self.configs: + codec = KEYPOINT_CODECS.build(cfg) + + encoded = codec.encode(keypoints, keypoints_visible) + keypoint_x_labels = encoded['keypoint_x_labels'] + keypoint_y_labels = encoded['keypoint_y_labels'] + + _keypoints, _ = codec.decode(keypoint_x_labels, keypoint_y_labels) + + self.assertTrue( + np.allclose(keypoints, _keypoints, atol=5.), + f'Failed case: "{name}"') + + def test_errors(self): + cfg = dict( + type='SimCCLabel', + input_size=(192, 256), + smoothing_type='uniform', + sigma=1.0, + simcc_split_ratio=2.0) + + with self.assertRaisesRegex(ValueError, + 'got invalid `smoothing_type`'): + _ = KEYPOINT_CODECS.build(cfg) + + # invalid label_smooth_weight in smoothing + cfg = dict( + type='SimCCLabel', + input_size=(192, 256), + smoothing_type='standard', + sigma=1.0, + simcc_split_ratio=2.0, + label_smooth_weight=1.1) + + with self.assertRaisesRegex(ValueError, + '`label_smooth_weight` should be'): + _ = KEYPOINT_CODECS.build(cfg) + + # invalid label_smooth_weight for gaussian + cfg = dict( + type='SimCCLabel', + input_size=(192, 256), + smoothing_type='gaussian', + sigma=1.0, + simcc_split_ratio=2.0, + label_smooth_weight=0.1) + + with self.assertRaisesRegex(ValueError, + 'is only used for `standard` mode.'): + _ = KEYPOINT_CODECS.build(cfg) diff --git a/tests/test_codecs/test_spr.py b/tests/test_codecs/test_spr.py index 58eeeb7d17..e436e2debc 100644 --- a/tests/test_codecs/test_spr.py +++ b/tests/test_codecs/test_spr.py @@ -1,188 +1,188 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import numpy as np - -from mmpose.codecs import SPR -from mmpose.registry import KEYPOINT_CODECS -from mmpose.testing import get_coco_sample -from mmpose.utils.tensor_utils import to_numpy, to_tensor - - -class TestSPR(TestCase): - - def setUp(self) -> None: - pass - - def _make_multi_instance_data(self, data): - keypoints = data['keypoints'] - keypoints_visible = data['keypoints_visible'] - - keypoints_visible[..., 0] = 0 - - keypoints_outside = keypoints - keypoints.max(axis=-1, keepdims=True) - keypoints_outside_visible = np.zeros(keypoints_visible.shape) - - keypoint_overlap = keypoints.mean( - axis=-1, keepdims=True) + 0.8 * ( - keypoints - keypoints.mean(axis=-1, keepdims=True)) - keypoint_overlap_visible = keypoints_visible - - data['keypoints'] = np.concatenate( - (keypoints, keypoints_outside, keypoint_overlap), axis=0) - data['keypoints_visible'] = np.concatenate( - (keypoints_visible, keypoints_outside_visible, - keypoint_overlap_visible), - axis=0) - - return data - - def test_build(self): - cfg = dict( - type='SPR', - input_size=(512, 512), - heatmap_size=(128, 128), - sigma=4, - ) - codec = KEYPOINT_CODECS.build(cfg) - self.assertIsInstance(codec, SPR) - - def test_encode(self): - data = get_coco_sample(img_shape=(512, 512), num_instances=1) - data = self._make_multi_instance_data(data) - - # w/o keypoint heatmaps - codec = SPR( - input_size=(512, 512), - heatmap_size=(128, 128), - sigma=4, - ) - - encoded = codec.encode(data['keypoints'], data['keypoints_visible']) - - heatmaps = encoded['heatmaps'] - displacements = encoded['displacements'] - heatmap_weights = encoded['heatmap_weights'] - displacement_weights = encoded['displacement_weights'] - - self.assertEqual(heatmaps.shape, (1, 128, 128)) - self.assertEqual(heatmap_weights.shape, (1, 128, 128)) - self.assertEqual(displacements.shape, (34, 128, 128)) - self.assertEqual(displacement_weights.shape, (34, 128, 128)) - - # w/ keypoint heatmaps - with self.assertRaises(AssertionError): - codec = SPR( - input_size=(512, 512), - heatmap_size=(128, 128), - sigma=4, - generate_keypoint_heatmaps=True, - ) - - codec = SPR( - input_size=(512, 512), - heatmap_size=(128, 128), - sigma=(4, 2), - generate_keypoint_heatmaps=True, - ) - - encoded = codec.encode(data['keypoints'], data['keypoints_visible']) - - heatmaps = encoded['heatmaps'] - displacements = encoded['displacements'] - heatmap_weights = encoded['heatmap_weights'] - displacement_weights = encoded['displacement_weights'] - - self.assertEqual(heatmaps.shape, (18, 128, 128)) - self.assertEqual(heatmap_weights.shape, (18, 128, 128)) - self.assertEqual(displacements.shape, (34, 128, 128)) - self.assertEqual(displacement_weights.shape, (34, 128, 128)) - - # root_type - with self.assertRaises(ValueError): - codec = SPR( - input_size=(512, 512), - heatmap_size=(128, 128), - sigma=(4, ), - root_type='box_center', - ) - encoded = codec.encode(data['keypoints'], - data['keypoints_visible']) - - codec = SPR( - input_size=(512, 512), - heatmap_size=(128, 128), - sigma=(4, ), - root_type='bbox_center', - ) - - encoded = codec.encode(data['keypoints'], data['keypoints_visible']) - - heatmaps = encoded['heatmaps'] - displacements = encoded['displacements'] - heatmap_weights = encoded['heatmap_weights'] - displacement_weights = encoded['displacement_weights'] - - self.assertEqual(heatmaps.shape, (1, 128, 128)) - self.assertEqual(heatmap_weights.shape, (1, 128, 128)) - self.assertEqual(displacements.shape, (34, 128, 128)) - self.assertEqual(displacement_weights.shape, (34, 128, 128)) - - def test_decode(self): - data = get_coco_sample(img_shape=(512, 512), num_instances=1) - - # decode w/o keypoint heatmaps - codec = SPR( - input_size=(512, 512), - heatmap_size=(128, 128), - sigma=(4, ), - generate_keypoint_heatmaps=False, - ) - - encoded = codec.encode(data['keypoints'], data['keypoints_visible']) - decoded = codec.decode( - to_tensor(encoded['heatmaps']), - to_tensor(encoded['displacements'])) - - keypoints, (root_scores, keypoint_scores) = decoded - self.assertIsNone(keypoint_scores) - self.assertEqual(keypoints.shape, data['keypoints'].shape) - self.assertEqual(root_scores.shape, data['keypoints'].shape[:1]) - - # decode w/ keypoint heatmaps - codec = SPR( - input_size=(512, 512), - heatmap_size=(128, 128), - sigma=(4, 2), - generate_keypoint_heatmaps=True, - ) - - encoded = codec.encode(data['keypoints'], data['keypoints_visible']) - decoded = codec.decode( - to_tensor(encoded['heatmaps']), - to_tensor(encoded['displacements'])) - - keypoints, (root_scores, keypoint_scores) = decoded - self.assertIsNotNone(keypoint_scores) - self.assertEqual(keypoints.shape, data['keypoints'].shape) - self.assertEqual(root_scores.shape, data['keypoints'].shape[:1]) - self.assertEqual(keypoint_scores.shape, data['keypoints'].shape[:2]) - - def test_cicular_verification(self): - data = get_coco_sample(img_shape=(512, 512), num_instances=1) - - codec = SPR( - input_size=(512, 512), - heatmap_size=(128, 128), - sigma=(4, ), - generate_keypoint_heatmaps=False, - ) - - encoded = codec.encode(data['keypoints'], data['keypoints_visible']) - decoded = codec.decode( - to_tensor(encoded['heatmaps']), - to_tensor(encoded['displacements'])) - - keypoints, _ = decoded - self.assertTrue( - np.allclose(to_numpy(keypoints), data['keypoints'], atol=5.)) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np + +from mmpose.codecs import SPR +from mmpose.registry import KEYPOINT_CODECS +from mmpose.testing import get_coco_sample +from mmpose.utils.tensor_utils import to_numpy, to_tensor + + +class TestSPR(TestCase): + + def setUp(self) -> None: + pass + + def _make_multi_instance_data(self, data): + keypoints = data['keypoints'] + keypoints_visible = data['keypoints_visible'] + + keypoints_visible[..., 0] = 0 + + keypoints_outside = keypoints - keypoints.max(axis=-1, keepdims=True) + keypoints_outside_visible = np.zeros(keypoints_visible.shape) + + keypoint_overlap = keypoints.mean( + axis=-1, keepdims=True) + 0.8 * ( + keypoints - keypoints.mean(axis=-1, keepdims=True)) + keypoint_overlap_visible = keypoints_visible + + data['keypoints'] = np.concatenate( + (keypoints, keypoints_outside, keypoint_overlap), axis=0) + data['keypoints_visible'] = np.concatenate( + (keypoints_visible, keypoints_outside_visible, + keypoint_overlap_visible), + axis=0) + + return data + + def test_build(self): + cfg = dict( + type='SPR', + input_size=(512, 512), + heatmap_size=(128, 128), + sigma=4, + ) + codec = KEYPOINT_CODECS.build(cfg) + self.assertIsInstance(codec, SPR) + + def test_encode(self): + data = get_coco_sample(img_shape=(512, 512), num_instances=1) + data = self._make_multi_instance_data(data) + + # w/o keypoint heatmaps + codec = SPR( + input_size=(512, 512), + heatmap_size=(128, 128), + sigma=4, + ) + + encoded = codec.encode(data['keypoints'], data['keypoints_visible']) + + heatmaps = encoded['heatmaps'] + displacements = encoded['displacements'] + heatmap_weights = encoded['heatmap_weights'] + displacement_weights = encoded['displacement_weights'] + + self.assertEqual(heatmaps.shape, (1, 128, 128)) + self.assertEqual(heatmap_weights.shape, (1, 128, 128)) + self.assertEqual(displacements.shape, (34, 128, 128)) + self.assertEqual(displacement_weights.shape, (34, 128, 128)) + + # w/ keypoint heatmaps + with self.assertRaises(AssertionError): + codec = SPR( + input_size=(512, 512), + heatmap_size=(128, 128), + sigma=4, + generate_keypoint_heatmaps=True, + ) + + codec = SPR( + input_size=(512, 512), + heatmap_size=(128, 128), + sigma=(4, 2), + generate_keypoint_heatmaps=True, + ) + + encoded = codec.encode(data['keypoints'], data['keypoints_visible']) + + heatmaps = encoded['heatmaps'] + displacements = encoded['displacements'] + heatmap_weights = encoded['heatmap_weights'] + displacement_weights = encoded['displacement_weights'] + + self.assertEqual(heatmaps.shape, (18, 128, 128)) + self.assertEqual(heatmap_weights.shape, (18, 128, 128)) + self.assertEqual(displacements.shape, (34, 128, 128)) + self.assertEqual(displacement_weights.shape, (34, 128, 128)) + + # root_type + with self.assertRaises(ValueError): + codec = SPR( + input_size=(512, 512), + heatmap_size=(128, 128), + sigma=(4, ), + root_type='box_center', + ) + encoded = codec.encode(data['keypoints'], + data['keypoints_visible']) + + codec = SPR( + input_size=(512, 512), + heatmap_size=(128, 128), + sigma=(4, ), + root_type='bbox_center', + ) + + encoded = codec.encode(data['keypoints'], data['keypoints_visible']) + + heatmaps = encoded['heatmaps'] + displacements = encoded['displacements'] + heatmap_weights = encoded['heatmap_weights'] + displacement_weights = encoded['displacement_weights'] + + self.assertEqual(heatmaps.shape, (1, 128, 128)) + self.assertEqual(heatmap_weights.shape, (1, 128, 128)) + self.assertEqual(displacements.shape, (34, 128, 128)) + self.assertEqual(displacement_weights.shape, (34, 128, 128)) + + def test_decode(self): + data = get_coco_sample(img_shape=(512, 512), num_instances=1) + + # decode w/o keypoint heatmaps + codec = SPR( + input_size=(512, 512), + heatmap_size=(128, 128), + sigma=(4, ), + generate_keypoint_heatmaps=False, + ) + + encoded = codec.encode(data['keypoints'], data['keypoints_visible']) + decoded = codec.decode( + to_tensor(encoded['heatmaps']), + to_tensor(encoded['displacements'])) + + keypoints, (root_scores, keypoint_scores) = decoded + self.assertIsNone(keypoint_scores) + self.assertEqual(keypoints.shape, data['keypoints'].shape) + self.assertEqual(root_scores.shape, data['keypoints'].shape[:1]) + + # decode w/ keypoint heatmaps + codec = SPR( + input_size=(512, 512), + heatmap_size=(128, 128), + sigma=(4, 2), + generate_keypoint_heatmaps=True, + ) + + encoded = codec.encode(data['keypoints'], data['keypoints_visible']) + decoded = codec.decode( + to_tensor(encoded['heatmaps']), + to_tensor(encoded['displacements'])) + + keypoints, (root_scores, keypoint_scores) = decoded + self.assertIsNotNone(keypoint_scores) + self.assertEqual(keypoints.shape, data['keypoints'].shape) + self.assertEqual(root_scores.shape, data['keypoints'].shape[:1]) + self.assertEqual(keypoint_scores.shape, data['keypoints'].shape[:2]) + + def test_cicular_verification(self): + data = get_coco_sample(img_shape=(512, 512), num_instances=1) + + codec = SPR( + input_size=(512, 512), + heatmap_size=(128, 128), + sigma=(4, ), + generate_keypoint_heatmaps=False, + ) + + encoded = codec.encode(data['keypoints'], data['keypoints_visible']) + decoded = codec.decode( + to_tensor(encoded['heatmaps']), + to_tensor(encoded['displacements'])) + + keypoints, _ = decoded + self.assertTrue( + np.allclose(to_numpy(keypoints), data['keypoints'], atol=5.)) diff --git a/tests/test_codecs/test_udp_heatmap.py b/tests/test_codecs/test_udp_heatmap.py index 81913ddee4..ebb0df485f 100644 --- a/tests/test_codecs/test_udp_heatmap.py +++ b/tests/test_codecs/test_udp_heatmap.py @@ -1,109 +1,109 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import numpy as np - -from mmpose.codecs import UDPHeatmap -from mmpose.registry import KEYPOINT_CODECS - - -class TestUDPHeatmap(TestCase): - - def setUp(self) -> None: - # name and configs of all test cases - self.configs = [ - ( - 'udp gaussian', - dict( - type='UDPHeatmap', - input_size=(192, 256), - heatmap_size=(48, 64), - heatmap_type='gaussian', - ), - ), - ( - 'udp combined', - dict( - type='UDPHeatmap', - input_size=(192, 256), - heatmap_size=(48, 64), - heatmap_type='combined'), - ), - ] - - # The bbox is usually padded so the keypoint will not be near the - # boundary - keypoints = (0.1 + 0.8 * np.random.rand(1, 17, 2)) * [192, 256] - keypoints = np.round(keypoints).astype(np.float32) - keypoints_visible = np.ones((1, 17), dtype=np.float32) - self.data = dict( - keypoints=keypoints, keypoints_visible=keypoints_visible) - - def test_encode(self): - keypoints = self.data['keypoints'] - keypoints_visible = self.data['keypoints_visible'] - - for name, cfg in self.configs: - codec = KEYPOINT_CODECS.build(cfg) - - encoded = codec.encode(keypoints, keypoints_visible) - - if codec.heatmap_type == 'combined': - channel_per_kpt = 3 - else: - channel_per_kpt = 1 - - self.assertEqual(encoded['heatmaps'].shape, - (channel_per_kpt * 17, 64, 48), - f'Failed case: "{name}"') - self.assertEqual(encoded['keypoint_weights'].shape, - (1, 17)), f'Failed case: "{name}"' - - def test_decode(self): - - for name, cfg in self.configs: - codec = KEYPOINT_CODECS.build(cfg) - if codec.heatmap_type == 'combined': - channel_per_kpt = 3 - else: - channel_per_kpt = 1 - - heatmaps = np.random.rand(channel_per_kpt * 17, 64, - 48).astype(np.float32) - - keypoints, scores = codec.decode(heatmaps) - - self.assertEqual(keypoints.shape, (1, 17, 2), - f'Failed case: "{name}"') - self.assertEqual(scores.shape, (1, 17), f'Failed case: "{name}"') - - def test_cicular_verification(self): - keypoints = self.data['keypoints'] - keypoints_visible = self.data['keypoints_visible'] - - for name, cfg in self.configs: - codec = KEYPOINT_CODECS.build(cfg) - - encoded = codec.encode(keypoints, keypoints_visible) - _keypoints, _ = codec.decode(encoded['heatmaps']) - - self.assertTrue( - np.allclose(keypoints, _keypoints, atol=10.), - f'Failed case: "{name}",{abs(keypoints - _keypoints) < 5.} ') - - def test_errors(self): - # invalid heatmap type - with self.assertRaisesRegex(ValueError, 'invalid `heatmap_type`'): - _ = UDPHeatmap( - input_size=(192, 256), - heatmap_size=(48, 64), - heatmap_type='invalid') - - # multiple instance - codec = UDPHeatmap(input_size=(192, 256), heatmap_size=(48, 64)) - keypoints = np.random.rand(2, 17, 2) - keypoints_visible = np.random.rand(2, 17) - - with self.assertRaisesRegex(AssertionError, - 'only support single-instance'): - codec.encode(keypoints, keypoints_visible) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np + +from mmpose.codecs import UDPHeatmap +from mmpose.registry import KEYPOINT_CODECS + + +class TestUDPHeatmap(TestCase): + + def setUp(self) -> None: + # name and configs of all test cases + self.configs = [ + ( + 'udp gaussian', + dict( + type='UDPHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + heatmap_type='gaussian', + ), + ), + ( + 'udp combined', + dict( + type='UDPHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + heatmap_type='combined'), + ), + ] + + # The bbox is usually padded so the keypoint will not be near the + # boundary + keypoints = (0.1 + 0.8 * np.random.rand(1, 17, 2)) * [192, 256] + keypoints = np.round(keypoints).astype(np.float32) + keypoints_visible = np.ones((1, 17), dtype=np.float32) + self.data = dict( + keypoints=keypoints, keypoints_visible=keypoints_visible) + + def test_encode(self): + keypoints = self.data['keypoints'] + keypoints_visible = self.data['keypoints_visible'] + + for name, cfg in self.configs: + codec = KEYPOINT_CODECS.build(cfg) + + encoded = codec.encode(keypoints, keypoints_visible) + + if codec.heatmap_type == 'combined': + channel_per_kpt = 3 + else: + channel_per_kpt = 1 + + self.assertEqual(encoded['heatmaps'].shape, + (channel_per_kpt * 17, 64, 48), + f'Failed case: "{name}"') + self.assertEqual(encoded['keypoint_weights'].shape, + (1, 17)), f'Failed case: "{name}"' + + def test_decode(self): + + for name, cfg in self.configs: + codec = KEYPOINT_CODECS.build(cfg) + if codec.heatmap_type == 'combined': + channel_per_kpt = 3 + else: + channel_per_kpt = 1 + + heatmaps = np.random.rand(channel_per_kpt * 17, 64, + 48).astype(np.float32) + + keypoints, scores = codec.decode(heatmaps) + + self.assertEqual(keypoints.shape, (1, 17, 2), + f'Failed case: "{name}"') + self.assertEqual(scores.shape, (1, 17), f'Failed case: "{name}"') + + def test_cicular_verification(self): + keypoints = self.data['keypoints'] + keypoints_visible = self.data['keypoints_visible'] + + for name, cfg in self.configs: + codec = KEYPOINT_CODECS.build(cfg) + + encoded = codec.encode(keypoints, keypoints_visible) + _keypoints, _ = codec.decode(encoded['heatmaps']) + + self.assertTrue( + np.allclose(keypoints, _keypoints, atol=10.), + f'Failed case: "{name}",{abs(keypoints - _keypoints) < 5.} ') + + def test_errors(self): + # invalid heatmap type + with self.assertRaisesRegex(ValueError, 'invalid `heatmap_type`'): + _ = UDPHeatmap( + input_size=(192, 256), + heatmap_size=(48, 64), + heatmap_type='invalid') + + # multiple instance + codec = UDPHeatmap(input_size=(192, 256), heatmap_size=(48, 64)) + keypoints = np.random.rand(2, 17, 2) + keypoints_visible = np.random.rand(2, 17) + + with self.assertRaisesRegex(AssertionError, + 'only support single-instance'): + codec.encode(keypoints, keypoints_visible) diff --git a/tests/test_codecs/test_video_pose_lifting.py b/tests/test_codecs/test_video_pose_lifting.py index cc58292d0c..7f10c0ea8f 100644 --- a/tests/test_codecs/test_video_pose_lifting.py +++ b/tests/test_codecs/test_video_pose_lifting.py @@ -1,156 +1,156 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os.path as osp -from unittest import TestCase - -import numpy as np -from mmengine.fileio import load - -from mmpose.codecs import VideoPoseLifting -from mmpose.registry import KEYPOINT_CODECS - - -class TestVideoPoseLifting(TestCase): - - def get_camera_param(self, imgname, camera_param) -> dict: - """Get camera parameters of a frame by its image name.""" - subj, rest = osp.basename(imgname).split('_', 1) - action, rest = rest.split('.', 1) - camera, rest = rest.split('_', 1) - return camera_param[(subj, camera)] - - def build_pose_lifting_label(self, **kwargs): - cfg = dict(type='VideoPoseLifting', num_keypoints=17) - cfg.update(kwargs) - return KEYPOINT_CODECS.build(cfg) - - def setUp(self) -> None: - keypoints = (0.1 + 0.8 * np.random.rand(1, 17, 2)) * [192, 256] - keypoints = np.round(keypoints).astype(np.float32) - keypoints_visible = np.random.randint(2, size=(1, 17)) - lifting_target = (0.1 + 0.8 * np.random.rand(17, 3)) - lifting_target_visible = np.random.randint(2, size=(17, )) - encoded_wo_sigma = np.random.rand(1, 17, 3) - - camera_param = load('tests/data/h36m/cameras.pkl') - camera_param = self.get_camera_param( - 'S1/S1_Directions_1.54138969/S1_Directions_1.54138969_000001.jpg', - camera_param) - - self.data = dict( - keypoints=keypoints, - keypoints_visible=keypoints_visible, - lifting_target=lifting_target, - lifting_target_visible=lifting_target_visible, - camera_param=camera_param, - encoded_wo_sigma=encoded_wo_sigma) - - def test_build(self): - codec = self.build_pose_lifting_label() - self.assertIsInstance(codec, VideoPoseLifting) - - def test_encode(self): - keypoints = self.data['keypoints'] - keypoints_visible = self.data['keypoints_visible'] - lifting_target = self.data['lifting_target'] - lifting_target_visible = self.data['lifting_target_visible'] - camera_param = self.data['camera_param'] - - # test default settings - codec = self.build_pose_lifting_label() - encoded = codec.encode(keypoints, keypoints_visible, lifting_target, - lifting_target_visible, camera_param) - - self.assertEqual(encoded['keypoint_labels'].shape, (1, 17, 2)) - self.assertEqual(encoded['lifting_target_label'].shape, (17, 3)) - self.assertEqual(encoded['lifting_target_weights'].shape, (17, )) - self.assertEqual(encoded['trajectory_weights'].shape, (17, )) - self.assertEqual(encoded['target_root'].shape, (3, )) - - # test not zero-centering - codec = self.build_pose_lifting_label(zero_center=False) - encoded = codec.encode(keypoints, keypoints_visible, lifting_target, - lifting_target_visible, camera_param) - - self.assertEqual(encoded['keypoint_labels'].shape, (1, 17, 2)) - self.assertEqual(encoded['lifting_target_label'].shape, (17, 3)) - self.assertEqual(encoded['lifting_target_weights'].shape, (17, )) - self.assertEqual(encoded['trajectory_weights'].shape, (17, )) - - # test removing root - codec = self.build_pose_lifting_label( - remove_root=True, save_index=True) - encoded = codec.encode(keypoints, keypoints_visible, lifting_target, - lifting_target_visible, camera_param) - - self.assertTrue('target_root_removed' in encoded - and 'target_root_index' in encoded) - self.assertEqual(encoded['lifting_target_weights'].shape, (16, )) - self.assertEqual(encoded['keypoint_labels'].shape, (1, 17, 2)) - self.assertEqual(encoded['lifting_target_label'].shape, (16, 3)) - self.assertEqual(encoded['target_root'].shape, (3, )) - - # test normalizing camera - codec = self.build_pose_lifting_label(normalize_camera=True) - encoded = codec.encode(keypoints, keypoints_visible, lifting_target, - lifting_target_visible, camera_param) - - self.assertTrue('camera_param' in encoded) - scale = np.array(0.5 * camera_param['w'], dtype=np.float32) - self.assertTrue( - np.allclose( - camera_param['f'] / scale, - encoded['camera_param']['f'], - atol=4.)) - - def test_decode(self): - lifting_target = self.data['lifting_target'] - encoded_wo_sigma = self.data['encoded_wo_sigma'] - - codec = self.build_pose_lifting_label() - - decoded, scores = codec.decode( - encoded_wo_sigma, target_root=lifting_target[..., 0, :]) - - self.assertEqual(decoded.shape, (1, 17, 3)) - self.assertEqual(scores.shape, (1, 17)) - - codec = self.build_pose_lifting_label(remove_root=True) - - decoded, scores = codec.decode( - encoded_wo_sigma, target_root=lifting_target[..., 0, :]) - - self.assertEqual(decoded.shape, (1, 18, 3)) - self.assertEqual(scores.shape, (1, 18)) - - def test_cicular_verification(self): - keypoints = self.data['keypoints'] - keypoints_visible = self.data['keypoints_visible'] - lifting_target = self.data['lifting_target'] - lifting_target_visible = self.data['lifting_target_visible'] - camera_param = self.data['camera_param'] - - # test default settings - codec = self.build_pose_lifting_label() - encoded = codec.encode(keypoints, keypoints_visible, lifting_target, - lifting_target_visible, camera_param) - - _keypoints, _ = codec.decode( - np.expand_dims(encoded['lifting_target_label'], axis=0), - target_root=lifting_target[..., 0, :]) - - self.assertTrue( - np.allclose( - np.expand_dims(lifting_target, axis=0), _keypoints, atol=5.)) - - # test removing root - codec = self.build_pose_lifting_label(remove_root=True) - encoded = codec.encode(keypoints, keypoints_visible, lifting_target, - lifting_target_visible, camera_param) - - _keypoints, _ = codec.decode( - np.expand_dims(encoded['lifting_target_label'], axis=0), - target_root=lifting_target[..., 0, :]) - - self.assertTrue( - np.allclose( - np.expand_dims(lifting_target, axis=0), _keypoints, atol=5.)) +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +from unittest import TestCase + +import numpy as np +from mmengine.fileio import load + +from mmpose.codecs import VideoPoseLifting +from mmpose.registry import KEYPOINT_CODECS + + +class TestVideoPoseLifting(TestCase): + + def get_camera_param(self, imgname, camera_param) -> dict: + """Get camera parameters of a frame by its image name.""" + subj, rest = osp.basename(imgname).split('_', 1) + action, rest = rest.split('.', 1) + camera, rest = rest.split('_', 1) + return camera_param[(subj, camera)] + + def build_pose_lifting_label(self, **kwargs): + cfg = dict(type='VideoPoseLifting', num_keypoints=17) + cfg.update(kwargs) + return KEYPOINT_CODECS.build(cfg) + + def setUp(self) -> None: + keypoints = (0.1 + 0.8 * np.random.rand(1, 17, 2)) * [192, 256] + keypoints = np.round(keypoints).astype(np.float32) + keypoints_visible = np.random.randint(2, size=(1, 17)) + lifting_target = (0.1 + 0.8 * np.random.rand(17, 3)) + lifting_target_visible = np.random.randint(2, size=(17, )) + encoded_wo_sigma = np.random.rand(1, 17, 3) + + camera_param = load('tests/data/h36m/cameras.pkl') + camera_param = self.get_camera_param( + 'S1/S1_Directions_1.54138969/S1_Directions_1.54138969_000001.jpg', + camera_param) + + self.data = dict( + keypoints=keypoints, + keypoints_visible=keypoints_visible, + lifting_target=lifting_target, + lifting_target_visible=lifting_target_visible, + camera_param=camera_param, + encoded_wo_sigma=encoded_wo_sigma) + + def test_build(self): + codec = self.build_pose_lifting_label() + self.assertIsInstance(codec, VideoPoseLifting) + + def test_encode(self): + keypoints = self.data['keypoints'] + keypoints_visible = self.data['keypoints_visible'] + lifting_target = self.data['lifting_target'] + lifting_target_visible = self.data['lifting_target_visible'] + camera_param = self.data['camera_param'] + + # test default settings + codec = self.build_pose_lifting_label() + encoded = codec.encode(keypoints, keypoints_visible, lifting_target, + lifting_target_visible, camera_param) + + self.assertEqual(encoded['keypoint_labels'].shape, (1, 17, 2)) + self.assertEqual(encoded['lifting_target_label'].shape, (17, 3)) + self.assertEqual(encoded['lifting_target_weights'].shape, (17, )) + self.assertEqual(encoded['trajectory_weights'].shape, (17, )) + self.assertEqual(encoded['target_root'].shape, (3, )) + + # test not zero-centering + codec = self.build_pose_lifting_label(zero_center=False) + encoded = codec.encode(keypoints, keypoints_visible, lifting_target, + lifting_target_visible, camera_param) + + self.assertEqual(encoded['keypoint_labels'].shape, (1, 17, 2)) + self.assertEqual(encoded['lifting_target_label'].shape, (17, 3)) + self.assertEqual(encoded['lifting_target_weights'].shape, (17, )) + self.assertEqual(encoded['trajectory_weights'].shape, (17, )) + + # test removing root + codec = self.build_pose_lifting_label( + remove_root=True, save_index=True) + encoded = codec.encode(keypoints, keypoints_visible, lifting_target, + lifting_target_visible, camera_param) + + self.assertTrue('target_root_removed' in encoded + and 'target_root_index' in encoded) + self.assertEqual(encoded['lifting_target_weights'].shape, (16, )) + self.assertEqual(encoded['keypoint_labels'].shape, (1, 17, 2)) + self.assertEqual(encoded['lifting_target_label'].shape, (16, 3)) + self.assertEqual(encoded['target_root'].shape, (3, )) + + # test normalizing camera + codec = self.build_pose_lifting_label(normalize_camera=True) + encoded = codec.encode(keypoints, keypoints_visible, lifting_target, + lifting_target_visible, camera_param) + + self.assertTrue('camera_param' in encoded) + scale = np.array(0.5 * camera_param['w'], dtype=np.float32) + self.assertTrue( + np.allclose( + camera_param['f'] / scale, + encoded['camera_param']['f'], + atol=4.)) + + def test_decode(self): + lifting_target = self.data['lifting_target'] + encoded_wo_sigma = self.data['encoded_wo_sigma'] + + codec = self.build_pose_lifting_label() + + decoded, scores = codec.decode( + encoded_wo_sigma, target_root=lifting_target[..., 0, :]) + + self.assertEqual(decoded.shape, (1, 17, 3)) + self.assertEqual(scores.shape, (1, 17)) + + codec = self.build_pose_lifting_label(remove_root=True) + + decoded, scores = codec.decode( + encoded_wo_sigma, target_root=lifting_target[..., 0, :]) + + self.assertEqual(decoded.shape, (1, 18, 3)) + self.assertEqual(scores.shape, (1, 18)) + + def test_cicular_verification(self): + keypoints = self.data['keypoints'] + keypoints_visible = self.data['keypoints_visible'] + lifting_target = self.data['lifting_target'] + lifting_target_visible = self.data['lifting_target_visible'] + camera_param = self.data['camera_param'] + + # test default settings + codec = self.build_pose_lifting_label() + encoded = codec.encode(keypoints, keypoints_visible, lifting_target, + lifting_target_visible, camera_param) + + _keypoints, _ = codec.decode( + np.expand_dims(encoded['lifting_target_label'], axis=0), + target_root=lifting_target[..., 0, :]) + + self.assertTrue( + np.allclose( + np.expand_dims(lifting_target, axis=0), _keypoints, atol=5.)) + + # test removing root + codec = self.build_pose_lifting_label(remove_root=True) + encoded = codec.encode(keypoints, keypoints_visible, lifting_target, + lifting_target_visible, camera_param) + + _keypoints, _ = codec.decode( + np.expand_dims(encoded['lifting_target_label'], axis=0), + target_root=lifting_target[..., 0, :]) + + self.assertTrue( + np.allclose( + np.expand_dims(lifting_target, axis=0), _keypoints, atol=5.)) diff --git a/tests/test_datasets/test_datasets/test_animal_datasets/test_animalkingdom_dataset.py b/tests/test_datasets/test_datasets/test_animal_datasets/test_animalkingdom_dataset.py index cc5e42ffbb..6971bafa84 100644 --- a/tests/test_datasets/test_datasets/test_animal_datasets/test_animalkingdom_dataset.py +++ b/tests/test_datasets/test_datasets/test_animal_datasets/test_animalkingdom_dataset.py @@ -1,146 +1,146 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import numpy as np - -from mmpose.datasets.datasets.animal import AnimalKingdomDataset - - -class TestAnimalKingdomDataset(TestCase): - - def build_ak_dataset(self, **kwargs): - - cfg = dict( - ann_file='test_animalkingdom.json', - bbox_file=None, - data_mode='topdown', - data_root='tests/data/ak', - pipeline=[], - test_mode=False) - - cfg.update(kwargs) - return AnimalKingdomDataset(**cfg) - - def check_data_info_keys(self, - data_info: dict, - data_mode: str = 'topdown'): - if data_mode == 'topdown': - expected_keys = dict( - img_id=int, - img_path=str, - bbox=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - id=int) - elif data_mode == 'bottomup': - expected_keys = dict( - img_id=int, - img_path=str, - bbox=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - invalid_segs=list, - id=list) - else: - raise ValueError(f'Invalid data_mode {data_mode}') - - for key, type_ in expected_keys.items(): - self.assertIn(key, data_info) - self.assertIsInstance(data_info[key], type_, key) - - def check_metainfo_keys(self, metainfo: dict): - expected_keys = dict( - dataset_name=str, - num_keypoints=int, - keypoint_id2name=dict, - keypoint_name2id=dict, - upper_body_ids=list, - lower_body_ids=list, - flip_indices=list, - flip_pairs=list, - keypoint_colors=np.ndarray, - num_skeleton_links=int, - skeleton_links=list, - skeleton_link_colors=np.ndarray, - dataset_keypoint_weights=np.ndarray) - - for key, type_ in expected_keys.items(): - self.assertIn(key, metainfo) - self.assertIsInstance(metainfo[key], type_, key) - - def test_metainfo(self): - dataset = self.build_ak_dataset() - self.check_metainfo_keys(dataset.metainfo) - # test dataset_name - self.assertEqual(dataset.metainfo['dataset_name'], 'Animal Kingdom') - - # test number of keypoints - num_keypoints = 23 - self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) - self.assertEqual( - len(dataset.metainfo['keypoint_colors']), num_keypoints) - self.assertEqual( - len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) - # note that len(sigmas) may be zero if dataset.metainfo['sigmas'] = [] - self.assertEqual(len(dataset.metainfo['sigmas']), num_keypoints) - - # test some extra metainfo - self.assertEqual( - len(dataset.metainfo['skeleton_links']), - len(dataset.metainfo['skeleton_link_colors'])) - - def test_topdown(self): - # test topdown training - dataset = self.build_ak_dataset(data_mode='topdown') - self.assertEqual(dataset.data_mode, 'topdown') - self.assertEqual(dataset.bbox_file, None) - self.assertEqual(len(dataset), 2) - self.check_data_info_keys(dataset[0]) - - # test topdown testing - dataset = self.build_ak_dataset(data_mode='topdown', test_mode=True) - self.assertEqual(dataset.data_mode, 'topdown') - self.assertEqual(dataset.bbox_file, None) - self.assertEqual(len(dataset), 2) - self.check_data_info_keys(dataset[0]) - - def test_bottomup(self): - # test bottomup training - dataset = self.build_ak_dataset(data_mode='bottomup') - self.assertEqual(len(dataset), 2) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - # test bottomup testing - dataset = self.build_ak_dataset(data_mode='bottomup', test_mode=True) - self.assertEqual(len(dataset), 2) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - def test_exceptions_and_warnings(self): - - with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): - _ = self.build_ak_dataset(data_mode='invalid') - - with self.assertRaisesRegex( - ValueError, - '"bbox_file" is only supported when `test_mode==True`'): - _ = self.build_ak_dataset( - data_mode='topdown', - test_mode=False, - bbox_file='temp_bbox_file.json') - - with self.assertRaisesRegex( - ValueError, '"bbox_file" is only supported in topdown mode'): - _ = self.build_ak_dataset( - data_mode='bottomup', - test_mode=True, - bbox_file='temp_bbox_file.json') - - with self.assertRaisesRegex( - ValueError, - '"bbox_score_thr" is only supported in topdown mode'): - _ = self.build_ak_dataset( - data_mode='bottomup', - test_mode=True, - filter_cfg=dict(bbox_score_thr=0.3)) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np + +from mmpose.datasets.datasets.animal import AnimalKingdomDataset + + +class TestAnimalKingdomDataset(TestCase): + + def build_ak_dataset(self, **kwargs): + + cfg = dict( + ann_file='test_animalkingdom.json', + bbox_file=None, + data_mode='topdown', + data_root='tests/data/ak', + pipeline=[], + test_mode=False) + + cfg.update(kwargs) + return AnimalKingdomDataset(**cfg) + + def check_data_info_keys(self, + data_info: dict, + data_mode: str = 'topdown'): + if data_mode == 'topdown': + expected_keys = dict( + img_id=int, + img_path=str, + bbox=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + id=int) + elif data_mode == 'bottomup': + expected_keys = dict( + img_id=int, + img_path=str, + bbox=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + invalid_segs=list, + id=list) + else: + raise ValueError(f'Invalid data_mode {data_mode}') + + for key, type_ in expected_keys.items(): + self.assertIn(key, data_info) + self.assertIsInstance(data_info[key], type_, key) + + def check_metainfo_keys(self, metainfo: dict): + expected_keys = dict( + dataset_name=str, + num_keypoints=int, + keypoint_id2name=dict, + keypoint_name2id=dict, + upper_body_ids=list, + lower_body_ids=list, + flip_indices=list, + flip_pairs=list, + keypoint_colors=np.ndarray, + num_skeleton_links=int, + skeleton_links=list, + skeleton_link_colors=np.ndarray, + dataset_keypoint_weights=np.ndarray) + + for key, type_ in expected_keys.items(): + self.assertIn(key, metainfo) + self.assertIsInstance(metainfo[key], type_, key) + + def test_metainfo(self): + dataset = self.build_ak_dataset() + self.check_metainfo_keys(dataset.metainfo) + # test dataset_name + self.assertEqual(dataset.metainfo['dataset_name'], 'Animal Kingdom') + + # test number of keypoints + num_keypoints = 23 + self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) + self.assertEqual( + len(dataset.metainfo['keypoint_colors']), num_keypoints) + self.assertEqual( + len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) + # note that len(sigmas) may be zero if dataset.metainfo['sigmas'] = [] + self.assertEqual(len(dataset.metainfo['sigmas']), num_keypoints) + + # test some extra metainfo + self.assertEqual( + len(dataset.metainfo['skeleton_links']), + len(dataset.metainfo['skeleton_link_colors'])) + + def test_topdown(self): + # test topdown training + dataset = self.build_ak_dataset(data_mode='topdown') + self.assertEqual(dataset.data_mode, 'topdown') + self.assertEqual(dataset.bbox_file, None) + self.assertEqual(len(dataset), 2) + self.check_data_info_keys(dataset[0]) + + # test topdown testing + dataset = self.build_ak_dataset(data_mode='topdown', test_mode=True) + self.assertEqual(dataset.data_mode, 'topdown') + self.assertEqual(dataset.bbox_file, None) + self.assertEqual(len(dataset), 2) + self.check_data_info_keys(dataset[0]) + + def test_bottomup(self): + # test bottomup training + dataset = self.build_ak_dataset(data_mode='bottomup') + self.assertEqual(len(dataset), 2) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + # test bottomup testing + dataset = self.build_ak_dataset(data_mode='bottomup', test_mode=True) + self.assertEqual(len(dataset), 2) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + def test_exceptions_and_warnings(self): + + with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): + _ = self.build_ak_dataset(data_mode='invalid') + + with self.assertRaisesRegex( + ValueError, + '"bbox_file" is only supported when `test_mode==True`'): + _ = self.build_ak_dataset( + data_mode='topdown', + test_mode=False, + bbox_file='temp_bbox_file.json') + + with self.assertRaisesRegex( + ValueError, '"bbox_file" is only supported in topdown mode'): + _ = self.build_ak_dataset( + data_mode='bottomup', + test_mode=True, + bbox_file='temp_bbox_file.json') + + with self.assertRaisesRegex( + ValueError, + '"bbox_score_thr" is only supported in topdown mode'): + _ = self.build_ak_dataset( + data_mode='bottomup', + test_mode=True, + filter_cfg=dict(bbox_score_thr=0.3)) diff --git a/tests/test_datasets/test_datasets/test_animal_datasets/test_animalpose_dataset.py b/tests/test_datasets/test_datasets/test_animal_datasets/test_animalpose_dataset.py index 9bb9725252..a98966a211 100644 --- a/tests/test_datasets/test_datasets/test_animal_datasets/test_animalpose_dataset.py +++ b/tests/test_datasets/test_datasets/test_animal_datasets/test_animalpose_dataset.py @@ -1,148 +1,148 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import numpy as np - -from mmpose.datasets.datasets.animal import AnimalPoseDataset - - -class TestAnimalPoseDataset(TestCase): - - def build_animalpose_dataset(self, **kwargs): - - cfg = dict( - ann_file='test_animalpose.json', - bbox_file=None, - data_mode='topdown', - data_root='tests/data/animalpose', - pipeline=[], - test_mode=False) - - cfg.update(kwargs) - return AnimalPoseDataset(**cfg) - - def check_data_info_keys(self, - data_info: dict, - data_mode: str = 'topdown'): - if data_mode == 'topdown': - expected_keys = dict( - img_id=int, - img_path=str, - bbox=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - id=int) - elif data_mode == 'bottomup': - expected_keys = dict( - img_id=int, - img_path=str, - bbox=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - invalid_segs=list, - id=list) - else: - raise ValueError(f'Invalid data_mode {data_mode}') - - for key, type_ in expected_keys.items(): - self.assertIn(key, data_info) - self.assertIsInstance(data_info[key], type_, key) - - def check_metainfo_keys(self, metainfo: dict): - expected_keys = dict( - dataset_name=str, - num_keypoints=int, - keypoint_id2name=dict, - keypoint_name2id=dict, - upper_body_ids=list, - lower_body_ids=list, - flip_indices=list, - flip_pairs=list, - keypoint_colors=np.ndarray, - num_skeleton_links=int, - skeleton_links=list, - skeleton_link_colors=np.ndarray, - dataset_keypoint_weights=np.ndarray) - - for key, type_ in expected_keys.items(): - self.assertIn(key, metainfo) - self.assertIsInstance(metainfo[key], type_, key) - - def test_metainfo(self): - dataset = self.build_animalpose_dataset() - self.check_metainfo_keys(dataset.metainfo) - # test dataset_name - self.assertEqual(dataset.metainfo['dataset_name'], 'animalpose') - - # test number of keypoints - num_keypoints = 20 - self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) - self.assertEqual( - len(dataset.metainfo['keypoint_colors']), num_keypoints) - self.assertEqual( - len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) - # note that len(sigmas) may be zero if dataset.metainfo['sigmas'] = [] - self.assertEqual(len(dataset.metainfo['sigmas']), num_keypoints) - - # test some extra metainfo - self.assertEqual( - len(dataset.metainfo['skeleton_links']), - len(dataset.metainfo['skeleton_link_colors'])) - - def test_topdown(self): - # test topdown training - dataset = self.build_animalpose_dataset(data_mode='topdown') - self.assertEqual(dataset.data_mode, 'topdown') - self.assertEqual(dataset.bbox_file, None) - self.assertEqual(len(dataset), 2) - self.check_data_info_keys(dataset[0]) - - # test topdown testing - dataset = self.build_animalpose_dataset( - data_mode='topdown', test_mode=True) - self.assertEqual(dataset.data_mode, 'topdown') - self.assertEqual(dataset.bbox_file, None) - self.assertEqual(len(dataset), 2) - self.check_data_info_keys(dataset[0]) - - def test_bottomup(self): - # test bottomup training - dataset = self.build_animalpose_dataset(data_mode='bottomup') - self.assertEqual(len(dataset), 2) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - # test bottomup testing - dataset = self.build_animalpose_dataset( - data_mode='bottomup', test_mode=True) - self.assertEqual(len(dataset), 2) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - def test_exceptions_and_warnings(self): - - with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): - _ = self.build_animalpose_dataset(data_mode='invalid') - - with self.assertRaisesRegex( - ValueError, - '"bbox_file" is only supported when `test_mode==True`'): - _ = self.build_animalpose_dataset( - data_mode='topdown', - test_mode=False, - bbox_file='temp_bbox_file.json') - - with self.assertRaisesRegex( - ValueError, '"bbox_file" is only supported in topdown mode'): - _ = self.build_animalpose_dataset( - data_mode='bottomup', - test_mode=True, - bbox_file='temp_bbox_file.json') - - with self.assertRaisesRegex( - ValueError, - '"bbox_score_thr" is only supported in topdown mode'): - _ = self.build_animalpose_dataset( - data_mode='bottomup', - test_mode=True, - filter_cfg=dict(bbox_score_thr=0.3)) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np + +from mmpose.datasets.datasets.animal import AnimalPoseDataset + + +class TestAnimalPoseDataset(TestCase): + + def build_animalpose_dataset(self, **kwargs): + + cfg = dict( + ann_file='test_animalpose.json', + bbox_file=None, + data_mode='topdown', + data_root='tests/data/animalpose', + pipeline=[], + test_mode=False) + + cfg.update(kwargs) + return AnimalPoseDataset(**cfg) + + def check_data_info_keys(self, + data_info: dict, + data_mode: str = 'topdown'): + if data_mode == 'topdown': + expected_keys = dict( + img_id=int, + img_path=str, + bbox=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + id=int) + elif data_mode == 'bottomup': + expected_keys = dict( + img_id=int, + img_path=str, + bbox=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + invalid_segs=list, + id=list) + else: + raise ValueError(f'Invalid data_mode {data_mode}') + + for key, type_ in expected_keys.items(): + self.assertIn(key, data_info) + self.assertIsInstance(data_info[key], type_, key) + + def check_metainfo_keys(self, metainfo: dict): + expected_keys = dict( + dataset_name=str, + num_keypoints=int, + keypoint_id2name=dict, + keypoint_name2id=dict, + upper_body_ids=list, + lower_body_ids=list, + flip_indices=list, + flip_pairs=list, + keypoint_colors=np.ndarray, + num_skeleton_links=int, + skeleton_links=list, + skeleton_link_colors=np.ndarray, + dataset_keypoint_weights=np.ndarray) + + for key, type_ in expected_keys.items(): + self.assertIn(key, metainfo) + self.assertIsInstance(metainfo[key], type_, key) + + def test_metainfo(self): + dataset = self.build_animalpose_dataset() + self.check_metainfo_keys(dataset.metainfo) + # test dataset_name + self.assertEqual(dataset.metainfo['dataset_name'], 'animalpose') + + # test number of keypoints + num_keypoints = 20 + self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) + self.assertEqual( + len(dataset.metainfo['keypoint_colors']), num_keypoints) + self.assertEqual( + len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) + # note that len(sigmas) may be zero if dataset.metainfo['sigmas'] = [] + self.assertEqual(len(dataset.metainfo['sigmas']), num_keypoints) + + # test some extra metainfo + self.assertEqual( + len(dataset.metainfo['skeleton_links']), + len(dataset.metainfo['skeleton_link_colors'])) + + def test_topdown(self): + # test topdown training + dataset = self.build_animalpose_dataset(data_mode='topdown') + self.assertEqual(dataset.data_mode, 'topdown') + self.assertEqual(dataset.bbox_file, None) + self.assertEqual(len(dataset), 2) + self.check_data_info_keys(dataset[0]) + + # test topdown testing + dataset = self.build_animalpose_dataset( + data_mode='topdown', test_mode=True) + self.assertEqual(dataset.data_mode, 'topdown') + self.assertEqual(dataset.bbox_file, None) + self.assertEqual(len(dataset), 2) + self.check_data_info_keys(dataset[0]) + + def test_bottomup(self): + # test bottomup training + dataset = self.build_animalpose_dataset(data_mode='bottomup') + self.assertEqual(len(dataset), 2) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + # test bottomup testing + dataset = self.build_animalpose_dataset( + data_mode='bottomup', test_mode=True) + self.assertEqual(len(dataset), 2) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + def test_exceptions_and_warnings(self): + + with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): + _ = self.build_animalpose_dataset(data_mode='invalid') + + with self.assertRaisesRegex( + ValueError, + '"bbox_file" is only supported when `test_mode==True`'): + _ = self.build_animalpose_dataset( + data_mode='topdown', + test_mode=False, + bbox_file='temp_bbox_file.json') + + with self.assertRaisesRegex( + ValueError, '"bbox_file" is only supported in topdown mode'): + _ = self.build_animalpose_dataset( + data_mode='bottomup', + test_mode=True, + bbox_file='temp_bbox_file.json') + + with self.assertRaisesRegex( + ValueError, + '"bbox_score_thr" is only supported in topdown mode'): + _ = self.build_animalpose_dataset( + data_mode='bottomup', + test_mode=True, + filter_cfg=dict(bbox_score_thr=0.3)) diff --git a/tests/test_datasets/test_datasets/test_animal_datasets/test_ap10k_dataset.py b/tests/test_datasets/test_datasets/test_animal_datasets/test_ap10k_dataset.py index 74ae89e960..7cbd8916b7 100644 --- a/tests/test_datasets/test_datasets/test_animal_datasets/test_ap10k_dataset.py +++ b/tests/test_datasets/test_datasets/test_animal_datasets/test_ap10k_dataset.py @@ -1,148 +1,148 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import numpy as np - -from mmpose.datasets.datasets.animal import AP10KDataset - - -class TestAP10KDataset(TestCase): - - def build_ap10k_dataset(self, **kwargs): - - cfg = dict( - ann_file='test_ap10k.json', - bbox_file=None, - data_mode='topdown', - data_root='tests/data/ap10k', - pipeline=[], - test_mode=False) - - cfg.update(kwargs) - return AP10KDataset(**cfg) - - def check_data_info_keys(self, - data_info: dict, - data_mode: str = 'topdown'): - if data_mode == 'topdown': - expected_keys = dict( - img_id=int, - img_path=str, - bbox=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - id=int) - elif data_mode == 'bottomup': - expected_keys = dict( - img_id=int, - img_path=str, - bbox=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - invalid_segs=list, - id=list) - else: - raise ValueError(f'Invalid data_mode {data_mode}') - - for key, type_ in expected_keys.items(): - self.assertIn(key, data_info) - self.assertIsInstance(data_info[key], type_, key) - - def check_metainfo_keys(self, metainfo: dict): - expected_keys = dict( - dataset_name=str, - num_keypoints=int, - keypoint_id2name=dict, - keypoint_name2id=dict, - upper_body_ids=list, - lower_body_ids=list, - flip_indices=list, - flip_pairs=list, - keypoint_colors=np.ndarray, - num_skeleton_links=int, - skeleton_links=list, - skeleton_link_colors=np.ndarray, - dataset_keypoint_weights=np.ndarray) - - for key, type_ in expected_keys.items(): - self.assertIn(key, metainfo) - self.assertIsInstance(metainfo[key], type_, key) - - def test_metainfo(self): - dataset = self.build_ap10k_dataset() - self.check_metainfo_keys(dataset.metainfo) - # test dataset_name - self.assertEqual(dataset.metainfo['dataset_name'], 'ap10k') - - # test number of keypoints - num_keypoints = 17 - self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) - self.assertEqual( - len(dataset.metainfo['keypoint_colors']), num_keypoints) - self.assertEqual( - len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) - # note that len(sigmas) may be zero if dataset.metainfo['sigmas'] = [] - self.assertEqual(len(dataset.metainfo['sigmas']), num_keypoints) - - # test some extra metainfo - self.assertEqual( - len(dataset.metainfo['skeleton_links']), - len(dataset.metainfo['skeleton_link_colors'])) - - def test_topdown(self): - # test topdown training - dataset = self.build_ap10k_dataset(data_mode='topdown') - self.assertEqual(dataset.data_mode, 'topdown') - self.assertEqual(dataset.bbox_file, None) - self.assertEqual(len(dataset), 2) - self.check_data_info_keys(dataset[0]) - - # test topdown testing - dataset = self.build_ap10k_dataset(data_mode='topdown', test_mode=True) - self.assertEqual(dataset.test_mode, True) - self.assertEqual(dataset.data_mode, 'topdown') - self.assertEqual(dataset.bbox_file, None) - self.assertEqual(len(dataset), 2) - self.check_data_info_keys(dataset[0]) - - def test_bottomup(self): - # test bottomup training - dataset = self.build_ap10k_dataset(data_mode='bottomup') - self.assertEqual(len(dataset), 2) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - # test bottomup testing - dataset = self.build_ap10k_dataset( - data_mode='bottomup', test_mode=True) - self.assertEqual(len(dataset), 2) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - def test_exceptions_and_warnings(self): - - with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): - _ = self.build_ap10k_dataset(data_mode='invalid') - - with self.assertRaisesRegex( - ValueError, - '"bbox_file" is only supported when `test_mode==True`'): - _ = self.build_ap10k_dataset( - data_mode='topdown', - test_mode=False, - bbox_file='temp_bbox_file.json') - - with self.assertRaisesRegex( - ValueError, '"bbox_file" is only supported in topdown mode'): - _ = self.build_ap10k_dataset( - data_mode='bottomup', - test_mode=True, - bbox_file='temp_bbox_file.json') - - with self.assertRaisesRegex( - ValueError, - '"bbox_score_thr" is only supported in topdown mode'): - _ = self.build_ap10k_dataset( - data_mode='bottomup', - test_mode=True, - filter_cfg=dict(bbox_score_thr=0.3)) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np + +from mmpose.datasets.datasets.animal import AP10KDataset + + +class TestAP10KDataset(TestCase): + + def build_ap10k_dataset(self, **kwargs): + + cfg = dict( + ann_file='test_ap10k.json', + bbox_file=None, + data_mode='topdown', + data_root='tests/data/ap10k', + pipeline=[], + test_mode=False) + + cfg.update(kwargs) + return AP10KDataset(**cfg) + + def check_data_info_keys(self, + data_info: dict, + data_mode: str = 'topdown'): + if data_mode == 'topdown': + expected_keys = dict( + img_id=int, + img_path=str, + bbox=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + id=int) + elif data_mode == 'bottomup': + expected_keys = dict( + img_id=int, + img_path=str, + bbox=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + invalid_segs=list, + id=list) + else: + raise ValueError(f'Invalid data_mode {data_mode}') + + for key, type_ in expected_keys.items(): + self.assertIn(key, data_info) + self.assertIsInstance(data_info[key], type_, key) + + def check_metainfo_keys(self, metainfo: dict): + expected_keys = dict( + dataset_name=str, + num_keypoints=int, + keypoint_id2name=dict, + keypoint_name2id=dict, + upper_body_ids=list, + lower_body_ids=list, + flip_indices=list, + flip_pairs=list, + keypoint_colors=np.ndarray, + num_skeleton_links=int, + skeleton_links=list, + skeleton_link_colors=np.ndarray, + dataset_keypoint_weights=np.ndarray) + + for key, type_ in expected_keys.items(): + self.assertIn(key, metainfo) + self.assertIsInstance(metainfo[key], type_, key) + + def test_metainfo(self): + dataset = self.build_ap10k_dataset() + self.check_metainfo_keys(dataset.metainfo) + # test dataset_name + self.assertEqual(dataset.metainfo['dataset_name'], 'ap10k') + + # test number of keypoints + num_keypoints = 17 + self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) + self.assertEqual( + len(dataset.metainfo['keypoint_colors']), num_keypoints) + self.assertEqual( + len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) + # note that len(sigmas) may be zero if dataset.metainfo['sigmas'] = [] + self.assertEqual(len(dataset.metainfo['sigmas']), num_keypoints) + + # test some extra metainfo + self.assertEqual( + len(dataset.metainfo['skeleton_links']), + len(dataset.metainfo['skeleton_link_colors'])) + + def test_topdown(self): + # test topdown training + dataset = self.build_ap10k_dataset(data_mode='topdown') + self.assertEqual(dataset.data_mode, 'topdown') + self.assertEqual(dataset.bbox_file, None) + self.assertEqual(len(dataset), 2) + self.check_data_info_keys(dataset[0]) + + # test topdown testing + dataset = self.build_ap10k_dataset(data_mode='topdown', test_mode=True) + self.assertEqual(dataset.test_mode, True) + self.assertEqual(dataset.data_mode, 'topdown') + self.assertEqual(dataset.bbox_file, None) + self.assertEqual(len(dataset), 2) + self.check_data_info_keys(dataset[0]) + + def test_bottomup(self): + # test bottomup training + dataset = self.build_ap10k_dataset(data_mode='bottomup') + self.assertEqual(len(dataset), 2) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + # test bottomup testing + dataset = self.build_ap10k_dataset( + data_mode='bottomup', test_mode=True) + self.assertEqual(len(dataset), 2) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + def test_exceptions_and_warnings(self): + + with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): + _ = self.build_ap10k_dataset(data_mode='invalid') + + with self.assertRaisesRegex( + ValueError, + '"bbox_file" is only supported when `test_mode==True`'): + _ = self.build_ap10k_dataset( + data_mode='topdown', + test_mode=False, + bbox_file='temp_bbox_file.json') + + with self.assertRaisesRegex( + ValueError, '"bbox_file" is only supported in topdown mode'): + _ = self.build_ap10k_dataset( + data_mode='bottomup', + test_mode=True, + bbox_file='temp_bbox_file.json') + + with self.assertRaisesRegex( + ValueError, + '"bbox_score_thr" is only supported in topdown mode'): + _ = self.build_ap10k_dataset( + data_mode='bottomup', + test_mode=True, + filter_cfg=dict(bbox_score_thr=0.3)) diff --git a/tests/test_datasets/test_datasets/test_animal_datasets/test_atrw_dataset.py b/tests/test_datasets/test_datasets/test_animal_datasets/test_atrw_dataset.py index e1554b55c4..4cca584d2e 100644 --- a/tests/test_datasets/test_datasets/test_animal_datasets/test_atrw_dataset.py +++ b/tests/test_datasets/test_datasets/test_animal_datasets/test_atrw_dataset.py @@ -1,146 +1,146 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import numpy as np - -from mmpose.datasets.datasets.animal import ATRWDataset - - -class TestATRWDataset(TestCase): - - def build_atrw_dataset(self, **kwargs): - - cfg = dict( - ann_file='test_atrw.json', - bbox_file=None, - data_mode='topdown', - data_root='tests/data/atrw', - pipeline=[], - test_mode=False) - - cfg.update(kwargs) - return ATRWDataset(**cfg) - - def check_data_info_keys(self, - data_info: dict, - data_mode: str = 'topdown'): - if data_mode == 'topdown': - expected_keys = dict( - img_id=int, - img_path=str, - bbox=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - id=int) - elif data_mode == 'bottomup': - expected_keys = dict( - img_id=int, - img_path=str, - bbox=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - invalid_segs=list, - id=list) - else: - raise ValueError(f'Invalid data_mode {data_mode}') - - for key, type_ in expected_keys.items(): - self.assertIn(key, data_info) - self.assertIsInstance(data_info[key], type_, key) - - def check_metainfo_keys(self, metainfo: dict): - expected_keys = dict( - dataset_name=str, - num_keypoints=int, - keypoint_id2name=dict, - keypoint_name2id=dict, - upper_body_ids=list, - lower_body_ids=list, - flip_indices=list, - flip_pairs=list, - keypoint_colors=np.ndarray, - num_skeleton_links=int, - skeleton_links=list, - skeleton_link_colors=np.ndarray, - dataset_keypoint_weights=np.ndarray) - - for key, type_ in expected_keys.items(): - self.assertIn(key, metainfo) - self.assertIsInstance(metainfo[key], type_, key) - - def test_metainfo(self): - dataset = self.build_atrw_dataset() - self.check_metainfo_keys(dataset.metainfo) - # test dataset_name - self.assertEqual(dataset.metainfo['dataset_name'], 'atrw') - - # test number of keypoints - num_keypoints = 15 - self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) - self.assertEqual( - len(dataset.metainfo['keypoint_colors']), num_keypoints) - self.assertEqual( - len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) - # note that len(sigmas) may be zero if dataset.metainfo['sigmas'] = [] - self.assertEqual(len(dataset.metainfo['sigmas']), num_keypoints) - - # test some extra metainfo - self.assertEqual( - len(dataset.metainfo['skeleton_links']), - len(dataset.metainfo['skeleton_link_colors'])) - - def test_topdown(self): - # test topdown training - dataset = self.build_atrw_dataset(data_mode='topdown') - self.assertEqual(dataset.data_mode, 'topdown') - self.assertEqual(dataset.bbox_file, None) - self.assertEqual(len(dataset), 2) - self.check_data_info_keys(dataset[0]) - - # test topdown testing - dataset = self.build_atrw_dataset(data_mode='topdown', test_mode=True) - self.assertEqual(dataset.data_mode, 'topdown') - self.assertEqual(dataset.bbox_file, None) - self.assertEqual(len(dataset), 2) - self.check_data_info_keys(dataset[0]) - - def test_bottomup(self): - # test bottomup training - dataset = self.build_atrw_dataset(data_mode='bottomup') - self.assertEqual(len(dataset), 2) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - # test bottomup testing - dataset = self.build_atrw_dataset(data_mode='bottomup', test_mode=True) - self.assertEqual(len(dataset), 2) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - def test_exceptions_and_warnings(self): - - with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): - _ = self.build_atrw_dataset(data_mode='invalid') - - with self.assertRaisesRegex( - ValueError, - '"bbox_file" is only supported when `test_mode==True`'): - _ = self.build_atrw_dataset( - data_mode='topdown', - test_mode=False, - bbox_file='temp_bbox_file.json') - - with self.assertRaisesRegex( - ValueError, '"bbox_file" is only supported in topdown mode'): - _ = self.build_atrw_dataset( - data_mode='bottomup', - test_mode=True, - bbox_file='temp_bbox_file.json') - - with self.assertRaisesRegex( - ValueError, - '"bbox_score_thr" is only supported in topdown mode'): - _ = self.build_atrw_dataset( - data_mode='bottomup', - test_mode=True, - filter_cfg=dict(bbox_score_thr=0.3)) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np + +from mmpose.datasets.datasets.animal import ATRWDataset + + +class TestATRWDataset(TestCase): + + def build_atrw_dataset(self, **kwargs): + + cfg = dict( + ann_file='test_atrw.json', + bbox_file=None, + data_mode='topdown', + data_root='tests/data/atrw', + pipeline=[], + test_mode=False) + + cfg.update(kwargs) + return ATRWDataset(**cfg) + + def check_data_info_keys(self, + data_info: dict, + data_mode: str = 'topdown'): + if data_mode == 'topdown': + expected_keys = dict( + img_id=int, + img_path=str, + bbox=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + id=int) + elif data_mode == 'bottomup': + expected_keys = dict( + img_id=int, + img_path=str, + bbox=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + invalid_segs=list, + id=list) + else: + raise ValueError(f'Invalid data_mode {data_mode}') + + for key, type_ in expected_keys.items(): + self.assertIn(key, data_info) + self.assertIsInstance(data_info[key], type_, key) + + def check_metainfo_keys(self, metainfo: dict): + expected_keys = dict( + dataset_name=str, + num_keypoints=int, + keypoint_id2name=dict, + keypoint_name2id=dict, + upper_body_ids=list, + lower_body_ids=list, + flip_indices=list, + flip_pairs=list, + keypoint_colors=np.ndarray, + num_skeleton_links=int, + skeleton_links=list, + skeleton_link_colors=np.ndarray, + dataset_keypoint_weights=np.ndarray) + + for key, type_ in expected_keys.items(): + self.assertIn(key, metainfo) + self.assertIsInstance(metainfo[key], type_, key) + + def test_metainfo(self): + dataset = self.build_atrw_dataset() + self.check_metainfo_keys(dataset.metainfo) + # test dataset_name + self.assertEqual(dataset.metainfo['dataset_name'], 'atrw') + + # test number of keypoints + num_keypoints = 15 + self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) + self.assertEqual( + len(dataset.metainfo['keypoint_colors']), num_keypoints) + self.assertEqual( + len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) + # note that len(sigmas) may be zero if dataset.metainfo['sigmas'] = [] + self.assertEqual(len(dataset.metainfo['sigmas']), num_keypoints) + + # test some extra metainfo + self.assertEqual( + len(dataset.metainfo['skeleton_links']), + len(dataset.metainfo['skeleton_link_colors'])) + + def test_topdown(self): + # test topdown training + dataset = self.build_atrw_dataset(data_mode='topdown') + self.assertEqual(dataset.data_mode, 'topdown') + self.assertEqual(dataset.bbox_file, None) + self.assertEqual(len(dataset), 2) + self.check_data_info_keys(dataset[0]) + + # test topdown testing + dataset = self.build_atrw_dataset(data_mode='topdown', test_mode=True) + self.assertEqual(dataset.data_mode, 'topdown') + self.assertEqual(dataset.bbox_file, None) + self.assertEqual(len(dataset), 2) + self.check_data_info_keys(dataset[0]) + + def test_bottomup(self): + # test bottomup training + dataset = self.build_atrw_dataset(data_mode='bottomup') + self.assertEqual(len(dataset), 2) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + # test bottomup testing + dataset = self.build_atrw_dataset(data_mode='bottomup', test_mode=True) + self.assertEqual(len(dataset), 2) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + def test_exceptions_and_warnings(self): + + with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): + _ = self.build_atrw_dataset(data_mode='invalid') + + with self.assertRaisesRegex( + ValueError, + '"bbox_file" is only supported when `test_mode==True`'): + _ = self.build_atrw_dataset( + data_mode='topdown', + test_mode=False, + bbox_file='temp_bbox_file.json') + + with self.assertRaisesRegex( + ValueError, '"bbox_file" is only supported in topdown mode'): + _ = self.build_atrw_dataset( + data_mode='bottomup', + test_mode=True, + bbox_file='temp_bbox_file.json') + + with self.assertRaisesRegex( + ValueError, + '"bbox_score_thr" is only supported in topdown mode'): + _ = self.build_atrw_dataset( + data_mode='bottomup', + test_mode=True, + filter_cfg=dict(bbox_score_thr=0.3)) diff --git a/tests/test_datasets/test_datasets/test_animal_datasets/test_fly_dataset.py b/tests/test_datasets/test_datasets/test_animal_datasets/test_fly_dataset.py index 9765e318db..34828716bc 100644 --- a/tests/test_datasets/test_datasets/test_animal_datasets/test_fly_dataset.py +++ b/tests/test_datasets/test_datasets/test_animal_datasets/test_fly_dataset.py @@ -1,144 +1,144 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import numpy as np - -from mmpose.datasets.datasets.animal import FlyDataset - - -class TestFlyDataset(TestCase): - - def build_fly_dataset(self, **kwargs): - - cfg = dict( - ann_file='test_fly.json', - bbox_file=None, - data_mode='topdown', - data_root='tests/data/fly', - pipeline=[], - test_mode=False) - - cfg.update(kwargs) - return FlyDataset(**cfg) - - def check_data_info_keys(self, - data_info: dict, - data_mode: str = 'topdown'): - if data_mode == 'topdown': - expected_keys = dict( - img_id=int, - img_path=str, - bbox=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - id=int) - elif data_mode == 'bottomup': - expected_keys = dict( - img_id=int, - img_path=str, - bbox=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - invalid_segs=list, - id=list) - else: - raise ValueError(f'Invalid data_mode {data_mode}') - - for key, type_ in expected_keys.items(): - self.assertIn(key, data_info) - self.assertIsInstance(data_info[key], type_, key) - - def check_metainfo_keys(self, metainfo: dict): - expected_keys = dict( - dataset_name=str, - num_keypoints=int, - keypoint_id2name=dict, - keypoint_name2id=dict, - upper_body_ids=list, - lower_body_ids=list, - flip_indices=list, - flip_pairs=list, - keypoint_colors=np.ndarray, - num_skeleton_links=int, - skeleton_links=list, - skeleton_link_colors=np.ndarray, - dataset_keypoint_weights=np.ndarray) - - for key, type_ in expected_keys.items(): - self.assertIn(key, metainfo) - self.assertIsInstance(metainfo[key], type_, key) - - def test_metainfo(self): - dataset = self.build_fly_dataset() - self.check_metainfo_keys(dataset.metainfo) - # test dataset_name - self.assertEqual(dataset.metainfo['dataset_name'], 'fly') - - # test number of keypoints - num_keypoints = 32 - self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) - self.assertEqual( - len(dataset.metainfo['keypoint_colors']), num_keypoints) - self.assertEqual( - len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) - - # test some extra metainfo - self.assertEqual( - len(dataset.metainfo['skeleton_links']), - len(dataset.metainfo['skeleton_link_colors'])) - - def test_topdown(self): - # test topdown training - dataset = self.build_fly_dataset(data_mode='topdown') - self.assertEqual(dataset.data_mode, 'topdown') - self.assertEqual(dataset.bbox_file, None) - self.assertEqual(len(dataset), 2) - self.check_data_info_keys(dataset[0]) - - # test topdown testing - dataset = self.build_fly_dataset(data_mode='topdown', test_mode=True) - self.assertEqual(dataset.data_mode, 'topdown') - self.assertEqual(dataset.bbox_file, None) - self.assertEqual(len(dataset), 2) - self.check_data_info_keys(dataset[0]) - - def test_bottomup(self): - # test bottomup training - dataset = self.build_fly_dataset(data_mode='bottomup') - self.assertEqual(len(dataset), 2) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - # test bottomup testing - dataset = self.build_fly_dataset(data_mode='bottomup', test_mode=True) - self.assertEqual(len(dataset), 2) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - def test_exceptions_and_warnings(self): - - with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): - _ = self.build_fly_dataset(data_mode='invalid') - - with self.assertRaisesRegex( - ValueError, - '"bbox_file" is only supported when `test_mode==True`'): - _ = self.build_fly_dataset( - data_mode='topdown', - test_mode=False, - bbox_file='temp_bbox_file.json') - - with self.assertRaisesRegex( - ValueError, '"bbox_file" is only supported in topdown mode'): - _ = self.build_fly_dataset( - data_mode='bottomup', - test_mode=True, - bbox_file='temp_bbox_file.json') - - with self.assertRaisesRegex( - ValueError, - '"bbox_score_thr" is only supported in topdown mode'): - _ = self.build_fly_dataset( - data_mode='bottomup', - test_mode=True, - filter_cfg=dict(bbox_score_thr=0.3)) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np + +from mmpose.datasets.datasets.animal import FlyDataset + + +class TestFlyDataset(TestCase): + + def build_fly_dataset(self, **kwargs): + + cfg = dict( + ann_file='test_fly.json', + bbox_file=None, + data_mode='topdown', + data_root='tests/data/fly', + pipeline=[], + test_mode=False) + + cfg.update(kwargs) + return FlyDataset(**cfg) + + def check_data_info_keys(self, + data_info: dict, + data_mode: str = 'topdown'): + if data_mode == 'topdown': + expected_keys = dict( + img_id=int, + img_path=str, + bbox=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + id=int) + elif data_mode == 'bottomup': + expected_keys = dict( + img_id=int, + img_path=str, + bbox=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + invalid_segs=list, + id=list) + else: + raise ValueError(f'Invalid data_mode {data_mode}') + + for key, type_ in expected_keys.items(): + self.assertIn(key, data_info) + self.assertIsInstance(data_info[key], type_, key) + + def check_metainfo_keys(self, metainfo: dict): + expected_keys = dict( + dataset_name=str, + num_keypoints=int, + keypoint_id2name=dict, + keypoint_name2id=dict, + upper_body_ids=list, + lower_body_ids=list, + flip_indices=list, + flip_pairs=list, + keypoint_colors=np.ndarray, + num_skeleton_links=int, + skeleton_links=list, + skeleton_link_colors=np.ndarray, + dataset_keypoint_weights=np.ndarray) + + for key, type_ in expected_keys.items(): + self.assertIn(key, metainfo) + self.assertIsInstance(metainfo[key], type_, key) + + def test_metainfo(self): + dataset = self.build_fly_dataset() + self.check_metainfo_keys(dataset.metainfo) + # test dataset_name + self.assertEqual(dataset.metainfo['dataset_name'], 'fly') + + # test number of keypoints + num_keypoints = 32 + self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) + self.assertEqual( + len(dataset.metainfo['keypoint_colors']), num_keypoints) + self.assertEqual( + len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) + + # test some extra metainfo + self.assertEqual( + len(dataset.metainfo['skeleton_links']), + len(dataset.metainfo['skeleton_link_colors'])) + + def test_topdown(self): + # test topdown training + dataset = self.build_fly_dataset(data_mode='topdown') + self.assertEqual(dataset.data_mode, 'topdown') + self.assertEqual(dataset.bbox_file, None) + self.assertEqual(len(dataset), 2) + self.check_data_info_keys(dataset[0]) + + # test topdown testing + dataset = self.build_fly_dataset(data_mode='topdown', test_mode=True) + self.assertEqual(dataset.data_mode, 'topdown') + self.assertEqual(dataset.bbox_file, None) + self.assertEqual(len(dataset), 2) + self.check_data_info_keys(dataset[0]) + + def test_bottomup(self): + # test bottomup training + dataset = self.build_fly_dataset(data_mode='bottomup') + self.assertEqual(len(dataset), 2) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + # test bottomup testing + dataset = self.build_fly_dataset(data_mode='bottomup', test_mode=True) + self.assertEqual(len(dataset), 2) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + def test_exceptions_and_warnings(self): + + with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): + _ = self.build_fly_dataset(data_mode='invalid') + + with self.assertRaisesRegex( + ValueError, + '"bbox_file" is only supported when `test_mode==True`'): + _ = self.build_fly_dataset( + data_mode='topdown', + test_mode=False, + bbox_file='temp_bbox_file.json') + + with self.assertRaisesRegex( + ValueError, '"bbox_file" is only supported in topdown mode'): + _ = self.build_fly_dataset( + data_mode='bottomup', + test_mode=True, + bbox_file='temp_bbox_file.json') + + with self.assertRaisesRegex( + ValueError, + '"bbox_score_thr" is only supported in topdown mode'): + _ = self.build_fly_dataset( + data_mode='bottomup', + test_mode=True, + filter_cfg=dict(bbox_score_thr=0.3)) diff --git a/tests/test_datasets/test_datasets/test_animal_datasets/test_horse10_dataset.py b/tests/test_datasets/test_datasets/test_animal_datasets/test_horse10_dataset.py index 39e32c1a7b..29a6cd27f2 100644 --- a/tests/test_datasets/test_datasets/test_animal_datasets/test_horse10_dataset.py +++ b/tests/test_datasets/test_datasets/test_animal_datasets/test_horse10_dataset.py @@ -1,146 +1,146 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import numpy as np - -from mmpose.datasets.datasets.animal import Horse10Dataset - - -class TestHorse10Dataset(TestCase): - - def build_horse10_dataset(self, **kwargs): - - cfg = dict( - ann_file='test_horse10.json', - bbox_file=None, - data_mode='topdown', - data_root='tests/data/horse10', - pipeline=[], - test_mode=False) - - cfg.update(kwargs) - return Horse10Dataset(**cfg) - - def check_data_info_keys(self, - data_info: dict, - data_mode: str = 'topdown'): - if data_mode == 'topdown': - expected_keys = dict( - img_id=int, - img_path=str, - bbox=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - id=int) - elif data_mode == 'bottomup': - expected_keys = dict( - img_id=int, - img_path=str, - bbox=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - invalid_segs=list, - id=list) - else: - raise ValueError(f'Invalid data_mode {data_mode}') - - for key, type_ in expected_keys.items(): - self.assertIn(key, data_info) - self.assertIsInstance(data_info[key], type_, key) - - def check_metainfo_keys(self, metainfo: dict): - expected_keys = dict( - dataset_name=str, - num_keypoints=int, - keypoint_id2name=dict, - keypoint_name2id=dict, - upper_body_ids=list, - lower_body_ids=list, - flip_indices=list, - flip_pairs=list, - keypoint_colors=np.ndarray, - num_skeleton_links=int, - skeleton_links=list, - skeleton_link_colors=np.ndarray, - dataset_keypoint_weights=np.ndarray) - - for key, type_ in expected_keys.items(): - self.assertIn(key, metainfo) - self.assertIsInstance(metainfo[key], type_, key) - - def test_metainfo(self): - dataset = self.build_horse10_dataset() - self.check_metainfo_keys(dataset.metainfo) - # test dataset_name - self.assertEqual(dataset.metainfo['dataset_name'], 'horse10') - - # test number of keypoints - num_keypoints = 22 - self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) - self.assertEqual( - len(dataset.metainfo['keypoint_colors']), num_keypoints) - self.assertEqual( - len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) - - # test some extra metainfo - self.assertEqual( - len(dataset.metainfo['skeleton_links']), - len(dataset.metainfo['skeleton_link_colors'])) - - def test_topdown(self): - # test topdown training - dataset = self.build_horse10_dataset(data_mode='topdown') - self.assertEqual(dataset.data_mode, 'topdown') - self.assertEqual(dataset.bbox_file, None) - self.assertEqual(len(dataset), 3) - self.check_data_info_keys(dataset[0]) - - # test topdown testing - dataset = self.build_horse10_dataset( - data_mode='topdown', test_mode=True) - self.assertEqual(dataset.data_mode, 'topdown') - self.assertEqual(dataset.bbox_file, None) - self.assertEqual(len(dataset), 3) - self.check_data_info_keys(dataset[0]) - - def test_bottomup(self): - # test bottomup training - dataset = self.build_horse10_dataset(data_mode='bottomup') - self.assertEqual(len(dataset), 3) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - # test bottomup testing - dataset = self.build_horse10_dataset( - data_mode='bottomup', test_mode=True) - self.assertEqual(len(dataset), 3) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - def test_exceptions_and_warnings(self): - - with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): - _ = self.build_horse10_dataset(data_mode='invalid') - - with self.assertRaisesRegex( - ValueError, - '"bbox_file" is only supported when `test_mode==True`'): - _ = self.build_horse10_dataset( - data_mode='topdown', - test_mode=False, - bbox_file='temp_bbox_file.json') - - with self.assertRaisesRegex( - ValueError, '"bbox_file" is only supported in topdown mode'): - _ = self.build_horse10_dataset( - data_mode='bottomup', - test_mode=True, - bbox_file='temp_bbox_file.json') - - with self.assertRaisesRegex( - ValueError, - '"bbox_score_thr" is only supported in topdown mode'): - _ = self.build_horse10_dataset( - data_mode='bottomup', - test_mode=True, - filter_cfg=dict(bbox_score_thr=0.3)) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np + +from mmpose.datasets.datasets.animal import Horse10Dataset + + +class TestHorse10Dataset(TestCase): + + def build_horse10_dataset(self, **kwargs): + + cfg = dict( + ann_file='test_horse10.json', + bbox_file=None, + data_mode='topdown', + data_root='tests/data/horse10', + pipeline=[], + test_mode=False) + + cfg.update(kwargs) + return Horse10Dataset(**cfg) + + def check_data_info_keys(self, + data_info: dict, + data_mode: str = 'topdown'): + if data_mode == 'topdown': + expected_keys = dict( + img_id=int, + img_path=str, + bbox=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + id=int) + elif data_mode == 'bottomup': + expected_keys = dict( + img_id=int, + img_path=str, + bbox=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + invalid_segs=list, + id=list) + else: + raise ValueError(f'Invalid data_mode {data_mode}') + + for key, type_ in expected_keys.items(): + self.assertIn(key, data_info) + self.assertIsInstance(data_info[key], type_, key) + + def check_metainfo_keys(self, metainfo: dict): + expected_keys = dict( + dataset_name=str, + num_keypoints=int, + keypoint_id2name=dict, + keypoint_name2id=dict, + upper_body_ids=list, + lower_body_ids=list, + flip_indices=list, + flip_pairs=list, + keypoint_colors=np.ndarray, + num_skeleton_links=int, + skeleton_links=list, + skeleton_link_colors=np.ndarray, + dataset_keypoint_weights=np.ndarray) + + for key, type_ in expected_keys.items(): + self.assertIn(key, metainfo) + self.assertIsInstance(metainfo[key], type_, key) + + def test_metainfo(self): + dataset = self.build_horse10_dataset() + self.check_metainfo_keys(dataset.metainfo) + # test dataset_name + self.assertEqual(dataset.metainfo['dataset_name'], 'horse10') + + # test number of keypoints + num_keypoints = 22 + self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) + self.assertEqual( + len(dataset.metainfo['keypoint_colors']), num_keypoints) + self.assertEqual( + len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) + + # test some extra metainfo + self.assertEqual( + len(dataset.metainfo['skeleton_links']), + len(dataset.metainfo['skeleton_link_colors'])) + + def test_topdown(self): + # test topdown training + dataset = self.build_horse10_dataset(data_mode='topdown') + self.assertEqual(dataset.data_mode, 'topdown') + self.assertEqual(dataset.bbox_file, None) + self.assertEqual(len(dataset), 3) + self.check_data_info_keys(dataset[0]) + + # test topdown testing + dataset = self.build_horse10_dataset( + data_mode='topdown', test_mode=True) + self.assertEqual(dataset.data_mode, 'topdown') + self.assertEqual(dataset.bbox_file, None) + self.assertEqual(len(dataset), 3) + self.check_data_info_keys(dataset[0]) + + def test_bottomup(self): + # test bottomup training + dataset = self.build_horse10_dataset(data_mode='bottomup') + self.assertEqual(len(dataset), 3) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + # test bottomup testing + dataset = self.build_horse10_dataset( + data_mode='bottomup', test_mode=True) + self.assertEqual(len(dataset), 3) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + def test_exceptions_and_warnings(self): + + with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): + _ = self.build_horse10_dataset(data_mode='invalid') + + with self.assertRaisesRegex( + ValueError, + '"bbox_file" is only supported when `test_mode==True`'): + _ = self.build_horse10_dataset( + data_mode='topdown', + test_mode=False, + bbox_file='temp_bbox_file.json') + + with self.assertRaisesRegex( + ValueError, '"bbox_file" is only supported in topdown mode'): + _ = self.build_horse10_dataset( + data_mode='bottomup', + test_mode=True, + bbox_file='temp_bbox_file.json') + + with self.assertRaisesRegex( + ValueError, + '"bbox_score_thr" is only supported in topdown mode'): + _ = self.build_horse10_dataset( + data_mode='bottomup', + test_mode=True, + filter_cfg=dict(bbox_score_thr=0.3)) diff --git a/tests/test_datasets/test_datasets/test_animal_datasets/test_locust_dataset.py b/tests/test_datasets/test_datasets/test_animal_datasets/test_locust_dataset.py index 3f48696a4b..aa926720ee 100644 --- a/tests/test_datasets/test_datasets/test_animal_datasets/test_locust_dataset.py +++ b/tests/test_datasets/test_datasets/test_animal_datasets/test_locust_dataset.py @@ -1,146 +1,146 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import numpy as np - -from mmpose.datasets.datasets.animal import LocustDataset - - -class TestLocustDataset(TestCase): - - def build_locust_dataset(self, **kwargs): - - cfg = dict( - ann_file='test_locust.json', - bbox_file=None, - data_mode='topdown', - data_root='tests/data/locust', - pipeline=[], - test_mode=False) - - cfg.update(kwargs) - return LocustDataset(**cfg) - - def check_data_info_keys(self, - data_info: dict, - data_mode: str = 'topdown'): - if data_mode == 'topdown': - expected_keys = dict( - img_id=int, - img_path=str, - bbox=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - id=int) - elif data_mode == 'bottomup': - expected_keys = dict( - img_id=int, - img_path=str, - bbox=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - invalid_segs=list, - id=list) - else: - raise ValueError(f'Invalid data_mode {data_mode}') - - for key, type_ in expected_keys.items(): - self.assertIn(key, data_info) - self.assertIsInstance(data_info[key], type_, key) - - def check_metainfo_keys(self, metainfo: dict): - expected_keys = dict( - dataset_name=str, - num_keypoints=int, - keypoint_id2name=dict, - keypoint_name2id=dict, - upper_body_ids=list, - lower_body_ids=list, - flip_indices=list, - flip_pairs=list, - keypoint_colors=np.ndarray, - num_skeleton_links=int, - skeleton_links=list, - skeleton_link_colors=np.ndarray, - dataset_keypoint_weights=np.ndarray) - - for key, type_ in expected_keys.items(): - self.assertIn(key, metainfo) - self.assertIsInstance(metainfo[key], type_, key) - - def test_metainfo(self): - dataset = self.build_locust_dataset() - self.check_metainfo_keys(dataset.metainfo) - # test dataset_name - self.assertEqual(dataset.metainfo['dataset_name'], 'locust') - - # test number of keypoints - num_keypoints = 35 - self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) - self.assertEqual( - len(dataset.metainfo['keypoint_colors']), num_keypoints) - self.assertEqual( - len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) - - # test some extra metainfo - self.assertEqual( - len(dataset.metainfo['skeleton_links']), - len(dataset.metainfo['skeleton_link_colors'])) - - def test_topdown(self): - # test topdown training - dataset = self.build_locust_dataset(data_mode='topdown') - self.assertEqual(dataset.data_mode, 'topdown') - self.assertEqual(dataset.bbox_file, None) - self.assertEqual(len(dataset), 2) - self.check_data_info_keys(dataset[0]) - - # test topdown testing - dataset = self.build_locust_dataset( - data_mode='topdown', test_mode=True) - self.assertEqual(dataset.data_mode, 'topdown') - self.assertEqual(dataset.bbox_file, None) - self.assertEqual(len(dataset), 2) - self.check_data_info_keys(dataset[0]) - - def test_bottomup(self): - # test bottomup training - dataset = self.build_locust_dataset(data_mode='bottomup') - self.assertEqual(len(dataset), 2) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - # test bottomup testing - dataset = self.build_locust_dataset( - data_mode='bottomup', test_mode=True) - self.assertEqual(len(dataset), 2) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - def test_exceptions_and_warnings(self): - - with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): - _ = self.build_locust_dataset(data_mode='invalid') - - with self.assertRaisesRegex( - ValueError, - '"bbox_file" is only supported when `test_mode==True`'): - _ = self.build_locust_dataset( - data_mode='topdown', - test_mode=False, - bbox_file='temp_bbox_file.json') - - with self.assertRaisesRegex( - ValueError, '"bbox_file" is only supported in topdown mode'): - _ = self.build_locust_dataset( - data_mode='bottomup', - test_mode=True, - bbox_file='temp_bbox_file.json') - - with self.assertRaisesRegex( - ValueError, - '"bbox_score_thr" is only supported in topdown mode'): - _ = self.build_locust_dataset( - data_mode='bottomup', - test_mode=True, - filter_cfg=dict(bbox_score_thr=0.3)) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np + +from mmpose.datasets.datasets.animal import LocustDataset + + +class TestLocustDataset(TestCase): + + def build_locust_dataset(self, **kwargs): + + cfg = dict( + ann_file='test_locust.json', + bbox_file=None, + data_mode='topdown', + data_root='tests/data/locust', + pipeline=[], + test_mode=False) + + cfg.update(kwargs) + return LocustDataset(**cfg) + + def check_data_info_keys(self, + data_info: dict, + data_mode: str = 'topdown'): + if data_mode == 'topdown': + expected_keys = dict( + img_id=int, + img_path=str, + bbox=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + id=int) + elif data_mode == 'bottomup': + expected_keys = dict( + img_id=int, + img_path=str, + bbox=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + invalid_segs=list, + id=list) + else: + raise ValueError(f'Invalid data_mode {data_mode}') + + for key, type_ in expected_keys.items(): + self.assertIn(key, data_info) + self.assertIsInstance(data_info[key], type_, key) + + def check_metainfo_keys(self, metainfo: dict): + expected_keys = dict( + dataset_name=str, + num_keypoints=int, + keypoint_id2name=dict, + keypoint_name2id=dict, + upper_body_ids=list, + lower_body_ids=list, + flip_indices=list, + flip_pairs=list, + keypoint_colors=np.ndarray, + num_skeleton_links=int, + skeleton_links=list, + skeleton_link_colors=np.ndarray, + dataset_keypoint_weights=np.ndarray) + + for key, type_ in expected_keys.items(): + self.assertIn(key, metainfo) + self.assertIsInstance(metainfo[key], type_, key) + + def test_metainfo(self): + dataset = self.build_locust_dataset() + self.check_metainfo_keys(dataset.metainfo) + # test dataset_name + self.assertEqual(dataset.metainfo['dataset_name'], 'locust') + + # test number of keypoints + num_keypoints = 35 + self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) + self.assertEqual( + len(dataset.metainfo['keypoint_colors']), num_keypoints) + self.assertEqual( + len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) + + # test some extra metainfo + self.assertEqual( + len(dataset.metainfo['skeleton_links']), + len(dataset.metainfo['skeleton_link_colors'])) + + def test_topdown(self): + # test topdown training + dataset = self.build_locust_dataset(data_mode='topdown') + self.assertEqual(dataset.data_mode, 'topdown') + self.assertEqual(dataset.bbox_file, None) + self.assertEqual(len(dataset), 2) + self.check_data_info_keys(dataset[0]) + + # test topdown testing + dataset = self.build_locust_dataset( + data_mode='topdown', test_mode=True) + self.assertEqual(dataset.data_mode, 'topdown') + self.assertEqual(dataset.bbox_file, None) + self.assertEqual(len(dataset), 2) + self.check_data_info_keys(dataset[0]) + + def test_bottomup(self): + # test bottomup training + dataset = self.build_locust_dataset(data_mode='bottomup') + self.assertEqual(len(dataset), 2) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + # test bottomup testing + dataset = self.build_locust_dataset( + data_mode='bottomup', test_mode=True) + self.assertEqual(len(dataset), 2) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + def test_exceptions_and_warnings(self): + + with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): + _ = self.build_locust_dataset(data_mode='invalid') + + with self.assertRaisesRegex( + ValueError, + '"bbox_file" is only supported when `test_mode==True`'): + _ = self.build_locust_dataset( + data_mode='topdown', + test_mode=False, + bbox_file='temp_bbox_file.json') + + with self.assertRaisesRegex( + ValueError, '"bbox_file" is only supported in topdown mode'): + _ = self.build_locust_dataset( + data_mode='bottomup', + test_mode=True, + bbox_file='temp_bbox_file.json') + + with self.assertRaisesRegex( + ValueError, + '"bbox_score_thr" is only supported in topdown mode'): + _ = self.build_locust_dataset( + data_mode='bottomup', + test_mode=True, + filter_cfg=dict(bbox_score_thr=0.3)) diff --git a/tests/test_datasets/test_datasets/test_animal_datasets/test_macaque_dataset.py b/tests/test_datasets/test_datasets/test_animal_datasets/test_macaque_dataset.py index 1dee242812..89fbaa1435 100644 --- a/tests/test_datasets/test_datasets/test_animal_datasets/test_macaque_dataset.py +++ b/tests/test_datasets/test_datasets/test_animal_datasets/test_macaque_dataset.py @@ -1,148 +1,148 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import numpy as np - -from mmpose.datasets.datasets.animal import MacaqueDataset - - -class TestMacaqueDataset(TestCase): - - def build_macaque_dataset(self, **kwargs): - - cfg = dict( - ann_file='test_macaque.json', - bbox_file=None, - data_mode='topdown', - data_root='tests/data/macaque', - pipeline=[], - test_mode=False) - - cfg.update(kwargs) - return MacaqueDataset(**cfg) - - def check_data_info_keys(self, - data_info: dict, - data_mode: str = 'topdown'): - if data_mode == 'topdown': - expected_keys = dict( - img_id=int, - img_path=str, - bbox=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - id=int) - elif data_mode == 'bottomup': - expected_keys = dict( - img_id=int, - img_path=str, - bbox=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - invalid_segs=list, - id=list) - else: - raise ValueError(f'Invalid data_mode {data_mode}') - - for key, type_ in expected_keys.items(): - self.assertIn(key, data_info) - self.assertIsInstance(data_info[key], type_, key) - - def check_metainfo_keys(self, metainfo: dict): - expected_keys = dict( - dataset_name=str, - num_keypoints=int, - keypoint_id2name=dict, - keypoint_name2id=dict, - upper_body_ids=list, - lower_body_ids=list, - flip_indices=list, - flip_pairs=list, - keypoint_colors=np.ndarray, - num_skeleton_links=int, - skeleton_links=list, - skeleton_link_colors=np.ndarray, - dataset_keypoint_weights=np.ndarray) - - for key, type_ in expected_keys.items(): - self.assertIn(key, metainfo) - self.assertIsInstance(metainfo[key], type_, key) - - def test_metainfo(self): - dataset = self.build_macaque_dataset() - self.check_metainfo_keys(dataset.metainfo) - # test dataset_name - self.assertEqual(dataset.metainfo['dataset_name'], 'macaque') - - # test number of keypoints - num_keypoints = 17 - self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) - self.assertEqual( - len(dataset.metainfo['keypoint_colors']), num_keypoints) - self.assertEqual( - len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) - # note that len(sigmas) may be zero if dataset.metainfo['sigmas'] = [] - self.assertEqual(len(dataset.metainfo['sigmas']), num_keypoints) - - # test some extra metainfo - self.assertEqual( - len(dataset.metainfo['skeleton_links']), - len(dataset.metainfo['skeleton_link_colors'])) - - def test_topdown(self): - # test topdown training - dataset = self.build_macaque_dataset(data_mode='topdown') - self.assertEqual(dataset.data_mode, 'topdown') - self.assertEqual(dataset.bbox_file, None) - self.assertEqual(len(dataset), 2) - self.check_data_info_keys(dataset[0]) - - # test topdown testing - dataset = self.build_macaque_dataset( - data_mode='topdown', test_mode=True) - self.assertEqual(dataset.data_mode, 'topdown') - self.assertEqual(dataset.bbox_file, None) - self.assertEqual(len(dataset), 2) - self.check_data_info_keys(dataset[0]) - - def test_bottomup(self): - # test bottomup training - dataset = self.build_macaque_dataset(data_mode='bottomup') - self.assertEqual(len(dataset), 2) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - # test bottomup testing - dataset = self.build_macaque_dataset( - data_mode='bottomup', test_mode=True) - self.assertEqual(len(dataset), 2) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - def test_exceptions_and_warnings(self): - - with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): - _ = self.build_macaque_dataset(data_mode='invalid') - - with self.assertRaisesRegex( - ValueError, - '"bbox_file" is only supported when `test_mode==True`'): - _ = self.build_macaque_dataset( - data_mode='topdown', - test_mode=False, - bbox_file='temp_bbox_file.json') - - with self.assertRaisesRegex( - ValueError, '"bbox_file" is only supported in topdown mode'): - _ = self.build_macaque_dataset( - data_mode='bottomup', - test_mode=True, - bbox_file='temp_bbox_file.json') - - with self.assertRaisesRegex( - ValueError, - '"bbox_score_thr" is only supported in topdown mode'): - _ = self.build_macaque_dataset( - data_mode='bottomup', - test_mode=True, - filter_cfg=dict(bbox_score_thr=0.3)) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np + +from mmpose.datasets.datasets.animal import MacaqueDataset + + +class TestMacaqueDataset(TestCase): + + def build_macaque_dataset(self, **kwargs): + + cfg = dict( + ann_file='test_macaque.json', + bbox_file=None, + data_mode='topdown', + data_root='tests/data/macaque', + pipeline=[], + test_mode=False) + + cfg.update(kwargs) + return MacaqueDataset(**cfg) + + def check_data_info_keys(self, + data_info: dict, + data_mode: str = 'topdown'): + if data_mode == 'topdown': + expected_keys = dict( + img_id=int, + img_path=str, + bbox=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + id=int) + elif data_mode == 'bottomup': + expected_keys = dict( + img_id=int, + img_path=str, + bbox=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + invalid_segs=list, + id=list) + else: + raise ValueError(f'Invalid data_mode {data_mode}') + + for key, type_ in expected_keys.items(): + self.assertIn(key, data_info) + self.assertIsInstance(data_info[key], type_, key) + + def check_metainfo_keys(self, metainfo: dict): + expected_keys = dict( + dataset_name=str, + num_keypoints=int, + keypoint_id2name=dict, + keypoint_name2id=dict, + upper_body_ids=list, + lower_body_ids=list, + flip_indices=list, + flip_pairs=list, + keypoint_colors=np.ndarray, + num_skeleton_links=int, + skeleton_links=list, + skeleton_link_colors=np.ndarray, + dataset_keypoint_weights=np.ndarray) + + for key, type_ in expected_keys.items(): + self.assertIn(key, metainfo) + self.assertIsInstance(metainfo[key], type_, key) + + def test_metainfo(self): + dataset = self.build_macaque_dataset() + self.check_metainfo_keys(dataset.metainfo) + # test dataset_name + self.assertEqual(dataset.metainfo['dataset_name'], 'macaque') + + # test number of keypoints + num_keypoints = 17 + self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) + self.assertEqual( + len(dataset.metainfo['keypoint_colors']), num_keypoints) + self.assertEqual( + len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) + # note that len(sigmas) may be zero if dataset.metainfo['sigmas'] = [] + self.assertEqual(len(dataset.metainfo['sigmas']), num_keypoints) + + # test some extra metainfo + self.assertEqual( + len(dataset.metainfo['skeleton_links']), + len(dataset.metainfo['skeleton_link_colors'])) + + def test_topdown(self): + # test topdown training + dataset = self.build_macaque_dataset(data_mode='topdown') + self.assertEqual(dataset.data_mode, 'topdown') + self.assertEqual(dataset.bbox_file, None) + self.assertEqual(len(dataset), 2) + self.check_data_info_keys(dataset[0]) + + # test topdown testing + dataset = self.build_macaque_dataset( + data_mode='topdown', test_mode=True) + self.assertEqual(dataset.data_mode, 'topdown') + self.assertEqual(dataset.bbox_file, None) + self.assertEqual(len(dataset), 2) + self.check_data_info_keys(dataset[0]) + + def test_bottomup(self): + # test bottomup training + dataset = self.build_macaque_dataset(data_mode='bottomup') + self.assertEqual(len(dataset), 2) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + # test bottomup testing + dataset = self.build_macaque_dataset( + data_mode='bottomup', test_mode=True) + self.assertEqual(len(dataset), 2) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + def test_exceptions_and_warnings(self): + + with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): + _ = self.build_macaque_dataset(data_mode='invalid') + + with self.assertRaisesRegex( + ValueError, + '"bbox_file" is only supported when `test_mode==True`'): + _ = self.build_macaque_dataset( + data_mode='topdown', + test_mode=False, + bbox_file='temp_bbox_file.json') + + with self.assertRaisesRegex( + ValueError, '"bbox_file" is only supported in topdown mode'): + _ = self.build_macaque_dataset( + data_mode='bottomup', + test_mode=True, + bbox_file='temp_bbox_file.json') + + with self.assertRaisesRegex( + ValueError, + '"bbox_score_thr" is only supported in topdown mode'): + _ = self.build_macaque_dataset( + data_mode='bottomup', + test_mode=True, + filter_cfg=dict(bbox_score_thr=0.3)) diff --git a/tests/test_datasets/test_datasets/test_animal_datasets/test_zebra_dataset.py b/tests/test_datasets/test_datasets/test_animal_datasets/test_zebra_dataset.py index c0a2db9a2a..c34b1b03fe 100644 --- a/tests/test_datasets/test_datasets/test_animal_datasets/test_zebra_dataset.py +++ b/tests/test_datasets/test_datasets/test_animal_datasets/test_zebra_dataset.py @@ -1,145 +1,145 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import numpy as np - -from mmpose.datasets.datasets.animal import ZebraDataset - - -class TestZebraDataset(TestCase): - - def build_zebra_dataset(self, **kwargs): - - cfg = dict( - ann_file='test_zebra.json', - bbox_file=None, - data_mode='topdown', - data_root='tests/data/zebra', - pipeline=[], - test_mode=False) - - cfg.update(kwargs) - return ZebraDataset(**cfg) - - def check_data_info_keys(self, - data_info: dict, - data_mode: str = 'topdown'): - if data_mode == 'topdown': - expected_keys = dict( - img_id=int, - img_path=str, - bbox=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - id=int) - elif data_mode == 'bottomup': - expected_keys = dict( - img_id=int, - img_path=str, - bbox=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - invalid_segs=list, - id=list) - else: - raise ValueError(f'Invalid data_mode {data_mode}') - - for key, type_ in expected_keys.items(): - self.assertIn(key, data_info) - self.assertIsInstance(data_info[key], type_, key) - - def check_metainfo_keys(self, metainfo: dict): - expected_keys = dict( - dataset_name=str, - num_keypoints=int, - keypoint_id2name=dict, - keypoint_name2id=dict, - upper_body_ids=list, - lower_body_ids=list, - flip_indices=list, - flip_pairs=list, - keypoint_colors=np.ndarray, - num_skeleton_links=int, - skeleton_links=list, - skeleton_link_colors=np.ndarray, - dataset_keypoint_weights=np.ndarray) - - for key, type_ in expected_keys.items(): - self.assertIn(key, metainfo) - self.assertIsInstance(metainfo[key], type_, key) - - def test_metainfo(self): - dataset = self.build_zebra_dataset() - self.check_metainfo_keys(dataset.metainfo) - # test dataset_name - self.assertEqual(dataset.metainfo['dataset_name'], 'zebra') - - # test number of keypoints - num_keypoints = 9 - self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) - self.assertEqual( - len(dataset.metainfo['keypoint_colors']), num_keypoints) - self.assertEqual( - len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) - - # test some extra metainfo - self.assertEqual( - len(dataset.metainfo['skeleton_links']), - len(dataset.metainfo['skeleton_link_colors'])) - - def test_topdown(self): - # test topdown training - dataset = self.build_zebra_dataset(data_mode='topdown') - self.assertEqual(dataset.data_mode, 'topdown') - self.assertEqual(dataset.bbox_file, None) - self.assertEqual(len(dataset), 2) - self.check_data_info_keys(dataset[0]) - - # test topdown testing - dataset = self.build_zebra_dataset(data_mode='topdown', test_mode=True) - self.assertEqual(dataset.data_mode, 'topdown') - self.assertEqual(dataset.bbox_file, None) - self.assertEqual(len(dataset), 2) - self.check_data_info_keys(dataset[0]) - - def test_bottomup(self): - # test bottomup training - dataset = self.build_zebra_dataset(data_mode='bottomup') - self.assertEqual(len(dataset), 2) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - # test bottomup testing - dataset = self.build_zebra_dataset( - data_mode='bottomup', test_mode=True) - self.assertEqual(len(dataset), 2) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - def test_exceptions_and_warnings(self): - - with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): - _ = self.build_zebra_dataset(data_mode='invalid') - - with self.assertRaisesRegex( - ValueError, - '"bbox_file" is only supported when `test_mode==True`'): - _ = self.build_zebra_dataset( - data_mode='topdown', - test_mode=False, - bbox_file='temp_bbox_file.json') - - with self.assertRaisesRegex( - ValueError, '"bbox_file" is only supported in topdown mode'): - _ = self.build_zebra_dataset( - data_mode='bottomup', - test_mode=True, - bbox_file='temp_bbox_file.json') - - with self.assertRaisesRegex( - ValueError, - '"bbox_score_thr" is only supported in topdown mode'): - _ = self.build_zebra_dataset( - data_mode='bottomup', - test_mode=True, - filter_cfg=dict(bbox_score_thr=0.3)) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np + +from mmpose.datasets.datasets.animal import ZebraDataset + + +class TestZebraDataset(TestCase): + + def build_zebra_dataset(self, **kwargs): + + cfg = dict( + ann_file='test_zebra.json', + bbox_file=None, + data_mode='topdown', + data_root='tests/data/zebra', + pipeline=[], + test_mode=False) + + cfg.update(kwargs) + return ZebraDataset(**cfg) + + def check_data_info_keys(self, + data_info: dict, + data_mode: str = 'topdown'): + if data_mode == 'topdown': + expected_keys = dict( + img_id=int, + img_path=str, + bbox=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + id=int) + elif data_mode == 'bottomup': + expected_keys = dict( + img_id=int, + img_path=str, + bbox=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + invalid_segs=list, + id=list) + else: + raise ValueError(f'Invalid data_mode {data_mode}') + + for key, type_ in expected_keys.items(): + self.assertIn(key, data_info) + self.assertIsInstance(data_info[key], type_, key) + + def check_metainfo_keys(self, metainfo: dict): + expected_keys = dict( + dataset_name=str, + num_keypoints=int, + keypoint_id2name=dict, + keypoint_name2id=dict, + upper_body_ids=list, + lower_body_ids=list, + flip_indices=list, + flip_pairs=list, + keypoint_colors=np.ndarray, + num_skeleton_links=int, + skeleton_links=list, + skeleton_link_colors=np.ndarray, + dataset_keypoint_weights=np.ndarray) + + for key, type_ in expected_keys.items(): + self.assertIn(key, metainfo) + self.assertIsInstance(metainfo[key], type_, key) + + def test_metainfo(self): + dataset = self.build_zebra_dataset() + self.check_metainfo_keys(dataset.metainfo) + # test dataset_name + self.assertEqual(dataset.metainfo['dataset_name'], 'zebra') + + # test number of keypoints + num_keypoints = 9 + self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) + self.assertEqual( + len(dataset.metainfo['keypoint_colors']), num_keypoints) + self.assertEqual( + len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) + + # test some extra metainfo + self.assertEqual( + len(dataset.metainfo['skeleton_links']), + len(dataset.metainfo['skeleton_link_colors'])) + + def test_topdown(self): + # test topdown training + dataset = self.build_zebra_dataset(data_mode='topdown') + self.assertEqual(dataset.data_mode, 'topdown') + self.assertEqual(dataset.bbox_file, None) + self.assertEqual(len(dataset), 2) + self.check_data_info_keys(dataset[0]) + + # test topdown testing + dataset = self.build_zebra_dataset(data_mode='topdown', test_mode=True) + self.assertEqual(dataset.data_mode, 'topdown') + self.assertEqual(dataset.bbox_file, None) + self.assertEqual(len(dataset), 2) + self.check_data_info_keys(dataset[0]) + + def test_bottomup(self): + # test bottomup training + dataset = self.build_zebra_dataset(data_mode='bottomup') + self.assertEqual(len(dataset), 2) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + # test bottomup testing + dataset = self.build_zebra_dataset( + data_mode='bottomup', test_mode=True) + self.assertEqual(len(dataset), 2) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + def test_exceptions_and_warnings(self): + + with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): + _ = self.build_zebra_dataset(data_mode='invalid') + + with self.assertRaisesRegex( + ValueError, + '"bbox_file" is only supported when `test_mode==True`'): + _ = self.build_zebra_dataset( + data_mode='topdown', + test_mode=False, + bbox_file='temp_bbox_file.json') + + with self.assertRaisesRegex( + ValueError, '"bbox_file" is only supported in topdown mode'): + _ = self.build_zebra_dataset( + data_mode='bottomup', + test_mode=True, + bbox_file='temp_bbox_file.json') + + with self.assertRaisesRegex( + ValueError, + '"bbox_score_thr" is only supported in topdown mode'): + _ = self.build_zebra_dataset( + data_mode='bottomup', + test_mode=True, + filter_cfg=dict(bbox_score_thr=0.3)) diff --git a/tests/test_datasets/test_datasets/test_body_datasets/test_aic_dataset.py b/tests/test_datasets/test_datasets/test_body_datasets/test_aic_dataset.py index ae00a64393..55493fc57f 100644 --- a/tests/test_datasets/test_datasets/test_body_datasets/test_aic_dataset.py +++ b/tests/test_datasets/test_datasets/test_body_datasets/test_aic_dataset.py @@ -1,144 +1,144 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import numpy as np - -from mmpose.datasets.datasets.body import AicDataset - - -class TestAicDataset(TestCase): - - def build_aic_dataset(self, **kwargs): - - cfg = dict( - ann_file='test_aic.json', - bbox_file=None, - data_mode='topdown', - data_root='tests/data/aic', - pipeline=[], - test_mode=False) - - cfg.update(kwargs) - return AicDataset(**cfg) - - def check_data_info_keys(self, - data_info: dict, - data_mode: str = 'topdown'): - if data_mode == 'topdown': - expected_keys = dict( - img_id=int, - img_path=str, - bbox=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - id=int) - elif data_mode == 'bottomup': - expected_keys = dict( - img_id=int, - img_path=str, - bbox=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - invalid_segs=list, - id=list) - else: - raise ValueError(f'Invalid data_mode {data_mode}') - - for key, type_ in expected_keys.items(): - self.assertIn(key, data_info) - self.assertIsInstance(data_info[key], type_, key) - - def check_metainfo_keys(self, metainfo: dict): - expected_keys = dict( - dataset_name=str, - num_keypoints=int, - keypoint_id2name=dict, - keypoint_name2id=dict, - upper_body_ids=list, - lower_body_ids=list, - flip_indices=list, - flip_pairs=list, - keypoint_colors=np.ndarray, - num_skeleton_links=int, - skeleton_links=list, - skeleton_link_colors=np.ndarray, - dataset_keypoint_weights=np.ndarray) - - for key, type_ in expected_keys.items(): - self.assertIn(key, metainfo) - self.assertIsInstance(metainfo[key], type_, key) - - def test_metainfo(self): - dataset = self.build_aic_dataset() - self.check_metainfo_keys(dataset.metainfo) - # test dataset_name - self.assertEqual(dataset.metainfo['dataset_name'], 'aic') - - # test number of keypoints - num_keypoints = 14 - self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) - self.assertEqual( - len(dataset.metainfo['keypoint_colors']), num_keypoints) - self.assertEqual( - len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) - # note that len(sigmas) may be zero if dataset.metainfo['sigmas'] = [] - self.assertEqual(len(dataset.metainfo['sigmas']), num_keypoints) - - # test some extra metainfo - self.assertEqual( - len(dataset.metainfo['skeleton_links']), - len(dataset.metainfo['skeleton_link_colors'])) - - def test_topdown(self): - # test topdown training - dataset = self.build_aic_dataset(data_mode='topdown') - self.assertEqual(dataset.bbox_file, None) - self.assertEqual(len(dataset), 9) - self.check_data_info_keys(dataset[0], data_mode='topdown') - - # test topdown testing - dataset = self.build_aic_dataset(data_mode='topdown', test_mode=True) - self.assertEqual(dataset.bbox_file, None) - self.assertEqual(len(dataset), 9) - self.check_data_info_keys(dataset[0], data_mode='topdown') - - def test_bottomup(self): - # test bottomup training - dataset = self.build_aic_dataset(data_mode='bottomup') - self.assertEqual(len(dataset), 3) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - # test bottomup testing - dataset = self.build_aic_dataset(data_mode='bottomup', test_mode=True) - self.assertEqual(len(dataset), 3) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - def test_exceptions_and_warnings(self): - - with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): - _ = self.build_aic_dataset(data_mode='invalid') - - with self.assertRaisesRegex( - ValueError, - '"bbox_file" is only supported when `test_mode==True`'): - _ = self.build_aic_dataset( - data_mode='topdown', - test_mode=False, - bbox_file='temp_bbox_file.json') - - with self.assertRaisesRegex( - ValueError, '"bbox_file" is only supported in topdown mode'): - _ = self.build_aic_dataset( - data_mode='bottomup', - test_mode=True, - bbox_file='temp_bbox_file.json') - - with self.assertRaisesRegex( - ValueError, - '"bbox_score_thr" is only supported in topdown mode'): - _ = self.build_aic_dataset( - data_mode='bottomup', - test_mode=True, - filter_cfg=dict(bbox_score_thr=0.3)) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np + +from mmpose.datasets.datasets.body import AicDataset + + +class TestAicDataset(TestCase): + + def build_aic_dataset(self, **kwargs): + + cfg = dict( + ann_file='test_aic.json', + bbox_file=None, + data_mode='topdown', + data_root='tests/data/aic', + pipeline=[], + test_mode=False) + + cfg.update(kwargs) + return AicDataset(**cfg) + + def check_data_info_keys(self, + data_info: dict, + data_mode: str = 'topdown'): + if data_mode == 'topdown': + expected_keys = dict( + img_id=int, + img_path=str, + bbox=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + id=int) + elif data_mode == 'bottomup': + expected_keys = dict( + img_id=int, + img_path=str, + bbox=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + invalid_segs=list, + id=list) + else: + raise ValueError(f'Invalid data_mode {data_mode}') + + for key, type_ in expected_keys.items(): + self.assertIn(key, data_info) + self.assertIsInstance(data_info[key], type_, key) + + def check_metainfo_keys(self, metainfo: dict): + expected_keys = dict( + dataset_name=str, + num_keypoints=int, + keypoint_id2name=dict, + keypoint_name2id=dict, + upper_body_ids=list, + lower_body_ids=list, + flip_indices=list, + flip_pairs=list, + keypoint_colors=np.ndarray, + num_skeleton_links=int, + skeleton_links=list, + skeleton_link_colors=np.ndarray, + dataset_keypoint_weights=np.ndarray) + + for key, type_ in expected_keys.items(): + self.assertIn(key, metainfo) + self.assertIsInstance(metainfo[key], type_, key) + + def test_metainfo(self): + dataset = self.build_aic_dataset() + self.check_metainfo_keys(dataset.metainfo) + # test dataset_name + self.assertEqual(dataset.metainfo['dataset_name'], 'aic') + + # test number of keypoints + num_keypoints = 14 + self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) + self.assertEqual( + len(dataset.metainfo['keypoint_colors']), num_keypoints) + self.assertEqual( + len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) + # note that len(sigmas) may be zero if dataset.metainfo['sigmas'] = [] + self.assertEqual(len(dataset.metainfo['sigmas']), num_keypoints) + + # test some extra metainfo + self.assertEqual( + len(dataset.metainfo['skeleton_links']), + len(dataset.metainfo['skeleton_link_colors'])) + + def test_topdown(self): + # test topdown training + dataset = self.build_aic_dataset(data_mode='topdown') + self.assertEqual(dataset.bbox_file, None) + self.assertEqual(len(dataset), 9) + self.check_data_info_keys(dataset[0], data_mode='topdown') + + # test topdown testing + dataset = self.build_aic_dataset(data_mode='topdown', test_mode=True) + self.assertEqual(dataset.bbox_file, None) + self.assertEqual(len(dataset), 9) + self.check_data_info_keys(dataset[0], data_mode='topdown') + + def test_bottomup(self): + # test bottomup training + dataset = self.build_aic_dataset(data_mode='bottomup') + self.assertEqual(len(dataset), 3) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + # test bottomup testing + dataset = self.build_aic_dataset(data_mode='bottomup', test_mode=True) + self.assertEqual(len(dataset), 3) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + def test_exceptions_and_warnings(self): + + with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): + _ = self.build_aic_dataset(data_mode='invalid') + + with self.assertRaisesRegex( + ValueError, + '"bbox_file" is only supported when `test_mode==True`'): + _ = self.build_aic_dataset( + data_mode='topdown', + test_mode=False, + bbox_file='temp_bbox_file.json') + + with self.assertRaisesRegex( + ValueError, '"bbox_file" is only supported in topdown mode'): + _ = self.build_aic_dataset( + data_mode='bottomup', + test_mode=True, + bbox_file='temp_bbox_file.json') + + with self.assertRaisesRegex( + ValueError, + '"bbox_score_thr" is only supported in topdown mode'): + _ = self.build_aic_dataset( + data_mode='bottomup', + test_mode=True, + filter_cfg=dict(bbox_score_thr=0.3)) diff --git a/tests/test_datasets/test_datasets/test_body_datasets/test_coco_dataset.py b/tests/test_datasets/test_datasets/test_body_datasets/test_coco_dataset.py index de78264dae..df92ee4b59 100644 --- a/tests/test_datasets/test_datasets/test_body_datasets/test_coco_dataset.py +++ b/tests/test_datasets/test_datasets/test_body_datasets/test_coco_dataset.py @@ -1,158 +1,158 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import numpy as np - -from mmpose.datasets.datasets.body import CocoDataset - - -class TestCocoDataset(TestCase): - - def build_coco_dataset(self, **kwargs): - - cfg = dict( - ann_file='test_coco.json', - bbox_file=None, - data_mode='topdown', - data_root='tests/data/coco', - pipeline=[], - test_mode=False) - - cfg.update(kwargs) - return CocoDataset(**cfg) - - def check_data_info_keys(self, - data_info: dict, - data_mode: str = 'topdown'): - if data_mode == 'topdown': - expected_keys = dict( - img_id=int, - img_path=str, - bbox=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - id=int) - elif data_mode == 'bottomup': - expected_keys = dict( - img_id=int, - img_path=str, - bbox=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - invalid_segs=list, - id=list) - else: - raise ValueError(f'Invalid data_mode {data_mode}') - - for key, type_ in expected_keys.items(): - self.assertIn(key, data_info) - self.assertIsInstance(data_info[key], type_, key) - - def check_metainfo_keys(self, metainfo: dict): - expected_keys = dict( - dataset_name=str, - num_keypoints=int, - keypoint_id2name=dict, - keypoint_name2id=dict, - upper_body_ids=list, - lower_body_ids=list, - flip_indices=list, - flip_pairs=list, - keypoint_colors=np.ndarray, - num_skeleton_links=int, - skeleton_links=list, - skeleton_link_colors=np.ndarray, - dataset_keypoint_weights=np.ndarray) - - for key, type_ in expected_keys.items(): - self.assertIn(key, metainfo) - self.assertIsInstance(metainfo[key], type_, key) - - def test_metainfo(self): - dataset = self.build_coco_dataset() - self.check_metainfo_keys(dataset.metainfo) - # test dataset_name - self.assertEqual(dataset.metainfo['dataset_name'], 'coco') - - # test number of keypoints - num_keypoints = 17 - self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) - self.assertEqual( - len(dataset.metainfo['keypoint_colors']), num_keypoints) - self.assertEqual( - len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) - # note that len(sigmas) may be zero if dataset.metainfo['sigmas'] = [] - self.assertEqual(len(dataset.metainfo['sigmas']), num_keypoints) - - # test some extra metainfo - self.assertEqual( - len(dataset.metainfo['skeleton_links']), - len(dataset.metainfo['skeleton_link_colors'])) - - def test_topdown(self): - # test topdown training - dataset = self.build_coco_dataset(data_mode='topdown') - self.assertEqual(len(dataset), 12) - self.check_data_info_keys(dataset[0], data_mode='topdown') - - # test topdown testing - dataset = self.build_coco_dataset(data_mode='topdown', test_mode=True) - self.assertEqual(len(dataset), 12) - self.check_data_info_keys(dataset[0], data_mode='topdown') - - # test topdown testing with bbox file - dataset = self.build_coco_dataset( - data_mode='topdown', - test_mode=True, - bbox_file='tests/data/coco/test_coco_det_AP_H_56.json') - self.assertEqual(len(dataset), 118) - self.check_data_info_keys(dataset[0], data_mode='topdown') - - # test topdown testing with filter config - dataset = self.build_coco_dataset( - data_mode='topdown', - test_mode=True, - bbox_file='tests/data/coco/test_coco_det_AP_H_56.json', - filter_cfg=dict(bbox_score_thr=0.3)) - self.assertEqual(len(dataset), 33) - - def test_bottomup(self): - # test bottomup training - dataset = self.build_coco_dataset(data_mode='bottomup') - self.assertEqual(len(dataset), 4) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - # test bottomup testing - dataset = self.build_coco_dataset(data_mode='bottomup', test_mode=True) - self.assertEqual(len(dataset), 4) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - def test_exceptions_and_warnings(self): - - with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): - _ = self.build_coco_dataset(data_mode='invalid') - - with self.assertRaisesRegex( - ValueError, - '"bbox_file" is only supported when `test_mode==True`'): - _ = self.build_coco_dataset( - data_mode='topdown', - test_mode=False, - bbox_file='tests/data/coco/test_coco_det_AP_H_56.json') - - with self.assertRaisesRegex( - ValueError, '"bbox_file" is only supported in topdown mode'): - _ = self.build_coco_dataset( - data_mode='bottomup', - test_mode=True, - bbox_file='tests/data/coco/test_coco_det_AP_H_56.json') - - with self.assertRaisesRegex( - ValueError, - '"bbox_score_thr" is only supported in topdown mode'): - _ = self.build_coco_dataset( - data_mode='bottomup', - test_mode=True, - filter_cfg=dict(bbox_score_thr=0.3)) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np + +from mmpose.datasets.datasets.body import CocoDataset + + +class TestCocoDataset(TestCase): + + def build_coco_dataset(self, **kwargs): + + cfg = dict( + ann_file='test_coco.json', + bbox_file=None, + data_mode='topdown', + data_root='tests/data/coco', + pipeline=[], + test_mode=False) + + cfg.update(kwargs) + return CocoDataset(**cfg) + + def check_data_info_keys(self, + data_info: dict, + data_mode: str = 'topdown'): + if data_mode == 'topdown': + expected_keys = dict( + img_id=int, + img_path=str, + bbox=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + id=int) + elif data_mode == 'bottomup': + expected_keys = dict( + img_id=int, + img_path=str, + bbox=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + invalid_segs=list, + id=list) + else: + raise ValueError(f'Invalid data_mode {data_mode}') + + for key, type_ in expected_keys.items(): + self.assertIn(key, data_info) + self.assertIsInstance(data_info[key], type_, key) + + def check_metainfo_keys(self, metainfo: dict): + expected_keys = dict( + dataset_name=str, + num_keypoints=int, + keypoint_id2name=dict, + keypoint_name2id=dict, + upper_body_ids=list, + lower_body_ids=list, + flip_indices=list, + flip_pairs=list, + keypoint_colors=np.ndarray, + num_skeleton_links=int, + skeleton_links=list, + skeleton_link_colors=np.ndarray, + dataset_keypoint_weights=np.ndarray) + + for key, type_ in expected_keys.items(): + self.assertIn(key, metainfo) + self.assertIsInstance(metainfo[key], type_, key) + + def test_metainfo(self): + dataset = self.build_coco_dataset() + self.check_metainfo_keys(dataset.metainfo) + # test dataset_name + self.assertEqual(dataset.metainfo['dataset_name'], 'coco') + + # test number of keypoints + num_keypoints = 17 + self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) + self.assertEqual( + len(dataset.metainfo['keypoint_colors']), num_keypoints) + self.assertEqual( + len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) + # note that len(sigmas) may be zero if dataset.metainfo['sigmas'] = [] + self.assertEqual(len(dataset.metainfo['sigmas']), num_keypoints) + + # test some extra metainfo + self.assertEqual( + len(dataset.metainfo['skeleton_links']), + len(dataset.metainfo['skeleton_link_colors'])) + + def test_topdown(self): + # test topdown training + dataset = self.build_coco_dataset(data_mode='topdown') + self.assertEqual(len(dataset), 12) + self.check_data_info_keys(dataset[0], data_mode='topdown') + + # test topdown testing + dataset = self.build_coco_dataset(data_mode='topdown', test_mode=True) + self.assertEqual(len(dataset), 12) + self.check_data_info_keys(dataset[0], data_mode='topdown') + + # test topdown testing with bbox file + dataset = self.build_coco_dataset( + data_mode='topdown', + test_mode=True, + bbox_file='tests/data/coco/test_coco_det_AP_H_56.json') + self.assertEqual(len(dataset), 118) + self.check_data_info_keys(dataset[0], data_mode='topdown') + + # test topdown testing with filter config + dataset = self.build_coco_dataset( + data_mode='topdown', + test_mode=True, + bbox_file='tests/data/coco/test_coco_det_AP_H_56.json', + filter_cfg=dict(bbox_score_thr=0.3)) + self.assertEqual(len(dataset), 33) + + def test_bottomup(self): + # test bottomup training + dataset = self.build_coco_dataset(data_mode='bottomup') + self.assertEqual(len(dataset), 4) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + # test bottomup testing + dataset = self.build_coco_dataset(data_mode='bottomup', test_mode=True) + self.assertEqual(len(dataset), 4) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + def test_exceptions_and_warnings(self): + + with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): + _ = self.build_coco_dataset(data_mode='invalid') + + with self.assertRaisesRegex( + ValueError, + '"bbox_file" is only supported when `test_mode==True`'): + _ = self.build_coco_dataset( + data_mode='topdown', + test_mode=False, + bbox_file='tests/data/coco/test_coco_det_AP_H_56.json') + + with self.assertRaisesRegex( + ValueError, '"bbox_file" is only supported in topdown mode'): + _ = self.build_coco_dataset( + data_mode='bottomup', + test_mode=True, + bbox_file='tests/data/coco/test_coco_det_AP_H_56.json') + + with self.assertRaisesRegex( + ValueError, + '"bbox_score_thr" is only supported in topdown mode'): + _ = self.build_coco_dataset( + data_mode='bottomup', + test_mode=True, + filter_cfg=dict(bbox_score_thr=0.3)) diff --git a/tests/test_datasets/test_datasets/test_body_datasets/test_crowdpose_dataset.py b/tests/test_datasets/test_datasets/test_body_datasets/test_crowdpose_dataset.py index 8d63925257..21085aa7e3 100644 --- a/tests/test_datasets/test_datasets/test_body_datasets/test_crowdpose_dataset.py +++ b/tests/test_datasets/test_datasets/test_body_datasets/test_crowdpose_dataset.py @@ -1,162 +1,162 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import numpy as np - -from mmpose.datasets.datasets.body import CrowdPoseDataset - - -class TestCrowdPoseDataset(TestCase): - - def build_crowdpose_dataset(self, **kwargs): - - cfg = dict( - ann_file='test_crowdpose.json', - bbox_file=None, - data_mode='topdown', - data_root='tests/data/crowdpose', - pipeline=[], - test_mode=False) - - cfg.update(kwargs) - return CrowdPoseDataset(**cfg) - - def check_data_info_keys(self, - data_info: dict, - data_mode: str = 'topdown'): - if data_mode == 'topdown': - expected_keys = dict( - img_id=int, - img_path=str, - bbox=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - id=int) - elif data_mode == 'bottomup': - expected_keys = dict( - img_id=int, - img_path=str, - bbox=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - invalid_segs=list, - id=list) - else: - raise ValueError(f'Invalid data_mode {data_mode}') - - for key, type_ in expected_keys.items(): - self.assertIn(key, data_info) - self.assertIsInstance(data_info[key], type_, key) - - def check_metainfo_keys(self, metainfo: dict): - expected_keys = dict( - dataset_name=str, - num_keypoints=int, - keypoint_id2name=dict, - keypoint_name2id=dict, - upper_body_ids=list, - lower_body_ids=list, - flip_indices=list, - flip_pairs=list, - keypoint_colors=np.ndarray, - num_skeleton_links=int, - skeleton_links=list, - skeleton_link_colors=np.ndarray, - dataset_keypoint_weights=np.ndarray) - - for key, type_ in expected_keys.items(): - self.assertIn(key, metainfo) - self.assertIsInstance(metainfo[key], type_, key) - - def test_metainfo(self): - dataset = self.build_crowdpose_dataset() - self.check_metainfo_keys(dataset.metainfo) - # test dataset_name - self.assertEqual(dataset.metainfo['dataset_name'], 'crowdpose') - - # test number of keypoints - num_keypoints = 14 - self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) - self.assertEqual( - len(dataset.metainfo['keypoint_colors']), num_keypoints) - self.assertEqual( - len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) - # note that len(sigmas) may be zero if dataset.metainfo['sigmas'] = [] - self.assertEqual(len(dataset.metainfo['sigmas']), num_keypoints) - - # test some extra metainfo - self.assertEqual( - len(dataset.metainfo['skeleton_links']), - len(dataset.metainfo['skeleton_link_colors'])) - - def test_topdown(self): - # test topdown training - dataset = self.build_crowdpose_dataset(data_mode='topdown') - # filter an invalid instance due to num_keypoints = 0 - self.assertEqual(len(dataset), 4) - self.check_data_info_keys(dataset[0], data_mode='topdown') - - # test topdown testing - dataset = self.build_crowdpose_dataset( - data_mode='topdown', test_mode=True) - self.assertEqual(len(dataset), 4) - self.check_data_info_keys(dataset[0], data_mode='topdown') - - # test topdown testing with bbox file - dataset = self.build_crowdpose_dataset( - data_mode='topdown', - test_mode=True, - bbox_file='tests/data/crowdpose/test_crowdpose_det_AP_40.json') - self.assertEqual(len(dataset), 6) - self.check_data_info_keys(dataset[0], data_mode='topdown') - - # test topdown testing with filter config - # filter one instance due to bbox_score < bbox_score_thr - dataset = self.build_crowdpose_dataset( - data_mode='topdown', - test_mode=True, - bbox_file='tests/data/crowdpose/test_crowdpose_det_AP_40.json', - filter_cfg=dict(bbox_score_thr=0.97)) - self.assertEqual(len(dataset), 5) - - def test_bottomup(self): - # test bottomup training - dataset = self.build_crowdpose_dataset(data_mode='bottomup') - self.assertEqual(len(dataset), 2) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - # test bottomup testing - dataset = self.build_crowdpose_dataset( - data_mode='bottomup', test_mode=True) - self.assertEqual(len(dataset), 2) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - def test_exceptions_and_warnings(self): - - with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): - _ = self.build_crowdpose_dataset(data_mode='invalid') - - with self.assertRaisesRegex( - ValueError, - '"bbox_file" is only supported when `test_mode==True`'): - _ = self.build_crowdpose_dataset( - data_mode='topdown', - test_mode=False, - bbox_file='tests/data/crowdpose/test_crowdpose_det_AP_40.json') - - with self.assertRaisesRegex( - ValueError, '"bbox_file" is only supported in topdown mode'): - _ = self.build_crowdpose_dataset( - data_mode='bottomup', - test_mode=True, - bbox_file='tests/data/crowdpose/test_crowdpose_det_AP_40.json') - - with self.assertRaisesRegex( - ValueError, - '"bbox_score_thr" is only supported in topdown mode'): - _ = self.build_crowdpose_dataset( - data_mode='bottomup', - test_mode=True, - filter_cfg=dict(bbox_score_thr=0.97)) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np + +from mmpose.datasets.datasets.body import CrowdPoseDataset + + +class TestCrowdPoseDataset(TestCase): + + def build_crowdpose_dataset(self, **kwargs): + + cfg = dict( + ann_file='test_crowdpose.json', + bbox_file=None, + data_mode='topdown', + data_root='tests/data/crowdpose', + pipeline=[], + test_mode=False) + + cfg.update(kwargs) + return CrowdPoseDataset(**cfg) + + def check_data_info_keys(self, + data_info: dict, + data_mode: str = 'topdown'): + if data_mode == 'topdown': + expected_keys = dict( + img_id=int, + img_path=str, + bbox=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + id=int) + elif data_mode == 'bottomup': + expected_keys = dict( + img_id=int, + img_path=str, + bbox=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + invalid_segs=list, + id=list) + else: + raise ValueError(f'Invalid data_mode {data_mode}') + + for key, type_ in expected_keys.items(): + self.assertIn(key, data_info) + self.assertIsInstance(data_info[key], type_, key) + + def check_metainfo_keys(self, metainfo: dict): + expected_keys = dict( + dataset_name=str, + num_keypoints=int, + keypoint_id2name=dict, + keypoint_name2id=dict, + upper_body_ids=list, + lower_body_ids=list, + flip_indices=list, + flip_pairs=list, + keypoint_colors=np.ndarray, + num_skeleton_links=int, + skeleton_links=list, + skeleton_link_colors=np.ndarray, + dataset_keypoint_weights=np.ndarray) + + for key, type_ in expected_keys.items(): + self.assertIn(key, metainfo) + self.assertIsInstance(metainfo[key], type_, key) + + def test_metainfo(self): + dataset = self.build_crowdpose_dataset() + self.check_metainfo_keys(dataset.metainfo) + # test dataset_name + self.assertEqual(dataset.metainfo['dataset_name'], 'crowdpose') + + # test number of keypoints + num_keypoints = 14 + self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) + self.assertEqual( + len(dataset.metainfo['keypoint_colors']), num_keypoints) + self.assertEqual( + len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) + # note that len(sigmas) may be zero if dataset.metainfo['sigmas'] = [] + self.assertEqual(len(dataset.metainfo['sigmas']), num_keypoints) + + # test some extra metainfo + self.assertEqual( + len(dataset.metainfo['skeleton_links']), + len(dataset.metainfo['skeleton_link_colors'])) + + def test_topdown(self): + # test topdown training + dataset = self.build_crowdpose_dataset(data_mode='topdown') + # filter an invalid instance due to num_keypoints = 0 + self.assertEqual(len(dataset), 4) + self.check_data_info_keys(dataset[0], data_mode='topdown') + + # test topdown testing + dataset = self.build_crowdpose_dataset( + data_mode='topdown', test_mode=True) + self.assertEqual(len(dataset), 4) + self.check_data_info_keys(dataset[0], data_mode='topdown') + + # test topdown testing with bbox file + dataset = self.build_crowdpose_dataset( + data_mode='topdown', + test_mode=True, + bbox_file='tests/data/crowdpose/test_crowdpose_det_AP_40.json') + self.assertEqual(len(dataset), 6) + self.check_data_info_keys(dataset[0], data_mode='topdown') + + # test topdown testing with filter config + # filter one instance due to bbox_score < bbox_score_thr + dataset = self.build_crowdpose_dataset( + data_mode='topdown', + test_mode=True, + bbox_file='tests/data/crowdpose/test_crowdpose_det_AP_40.json', + filter_cfg=dict(bbox_score_thr=0.97)) + self.assertEqual(len(dataset), 5) + + def test_bottomup(self): + # test bottomup training + dataset = self.build_crowdpose_dataset(data_mode='bottomup') + self.assertEqual(len(dataset), 2) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + # test bottomup testing + dataset = self.build_crowdpose_dataset( + data_mode='bottomup', test_mode=True) + self.assertEqual(len(dataset), 2) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + def test_exceptions_and_warnings(self): + + with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): + _ = self.build_crowdpose_dataset(data_mode='invalid') + + with self.assertRaisesRegex( + ValueError, + '"bbox_file" is only supported when `test_mode==True`'): + _ = self.build_crowdpose_dataset( + data_mode='topdown', + test_mode=False, + bbox_file='tests/data/crowdpose/test_crowdpose_det_AP_40.json') + + with self.assertRaisesRegex( + ValueError, '"bbox_file" is only supported in topdown mode'): + _ = self.build_crowdpose_dataset( + data_mode='bottomup', + test_mode=True, + bbox_file='tests/data/crowdpose/test_crowdpose_det_AP_40.json') + + with self.assertRaisesRegex( + ValueError, + '"bbox_score_thr" is only supported in topdown mode'): + _ = self.build_crowdpose_dataset( + data_mode='bottomup', + test_mode=True, + filter_cfg=dict(bbox_score_thr=0.97)) diff --git a/tests/test_datasets/test_datasets/test_body_datasets/test_h36m_dataset.py b/tests/test_datasets/test_datasets/test_body_datasets/test_h36m_dataset.py index 88944dc11f..e40dad6b46 100644 --- a/tests/test_datasets/test_datasets/test_body_datasets/test_h36m_dataset.py +++ b/tests/test_datasets/test_datasets/test_body_datasets/test_h36m_dataset.py @@ -1,175 +1,175 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import numpy as np - -from mmpose.datasets.datasets.body3d import Human36mDataset - - -class TestH36MDataset(TestCase): - - def build_h36m_dataset(self, **kwargs): - - cfg = dict( - ann_file='test_h36m_body3d.npz', - data_mode='topdown', - data_root='tests/data/h36m', - pipeline=[], - test_mode=False) - - cfg.update(kwargs) - return Human36mDataset(**cfg) - - def check_data_info_keys(self, - data_info: dict, - data_mode: str = 'topdown'): - if data_mode == 'topdown': - expected_keys = dict( - img_ids=list, - img_paths=list, - keypoints=np.ndarray, - keypoints_3d=np.ndarray, - scale=np.float32, - center=np.ndarray, - id=int) - elif data_mode == 'bottomup': - expected_keys = dict( - img_ids=list, - img_paths=list, - keypoints=np.ndarray, - keypoints_3d=np.ndarray, - scale=list, - center=np.ndarray, - invalid_segs=list, - id=list) - else: - raise ValueError(f'Invalid data_mode {data_mode}') - - for key, type_ in expected_keys.items(): - self.assertIn(key, data_info) - self.assertIsInstance(data_info[key], type_, key) - - def check_metainfo_keys(self, metainfo: dict): - expected_keys = dict( - dataset_name=str, - num_keypoints=int, - keypoint_id2name=dict, - keypoint_name2id=dict, - upper_body_ids=list, - lower_body_ids=list, - flip_indices=list, - flip_pairs=list, - keypoint_colors=np.ndarray, - num_skeleton_links=int, - skeleton_links=list, - skeleton_link_colors=np.ndarray, - dataset_keypoint_weights=np.ndarray) - - for key, type_ in expected_keys.items(): - self.assertIn(key, metainfo) - self.assertIsInstance(metainfo[key], type_, key) - - def test_metainfo(self): - dataset = self.build_h36m_dataset() - self.check_metainfo_keys(dataset.metainfo) - # test dataset_name - self.assertEqual(dataset.metainfo['dataset_name'], 'h36m') - - # test number of keypoints - num_keypoints = 17 - self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) - self.assertEqual( - len(dataset.metainfo['keypoint_colors']), num_keypoints) - self.assertEqual( - len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) - - # test some extra metainfo - self.assertEqual( - len(dataset.metainfo['skeleton_links']), - len(dataset.metainfo['skeleton_link_colors'])) - - def test_topdown(self): - # test topdown training - dataset = self.build_h36m_dataset(data_mode='topdown') - self.assertEqual(len(dataset), 4) - self.check_data_info_keys(dataset[0]) - - # test topdown testing - dataset = self.build_h36m_dataset(data_mode='topdown', test_mode=True) - self.assertEqual(len(dataset), 4) - self.check_data_info_keys(dataset[0]) - - # test topdown training with camera file - dataset = self.build_h36m_dataset( - data_mode='topdown', camera_param_file='cameras.pkl') - self.assertEqual(len(dataset), 4) - self.check_data_info_keys(dataset[0]) - - # test topdown training with sequence config - dataset = self.build_h36m_dataset( - data_mode='topdown', - seq_len=27, - seq_step=1, - causal=False, - pad_video_seq=True, - camera_param_file='cameras.pkl') - self.assertEqual(len(dataset), 4) - self.check_data_info_keys(dataset[0]) - - # test topdown testing with 2d keypoint detection file and - # sequence config - dataset = self.build_h36m_dataset( - data_mode='topdown', - seq_len=27, - seq_step=1, - causal=False, - pad_video_seq=True, - test_mode=True, - keypoint_2d_src='detection', - keypoint_2d_det_file='test_h36m_2d_detection.npy') - self.assertEqual(len(dataset), 4) - self.check_data_info_keys(dataset[0]) - - def test_bottomup(self): - # test bottomup training - dataset = self.build_h36m_dataset(data_mode='bottomup') - self.assertEqual(len(dataset), 4) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - # test bottomup training - dataset = self.build_h36m_dataset( - data_mode='bottomup', - seq_len=27, - seq_step=1, - causal=False, - pad_video_seq=True) - self.assertEqual(len(dataset), 4) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - # test bottomup testing - dataset = self.build_h36m_dataset(data_mode='bottomup', test_mode=True) - self.assertEqual(len(dataset), 4) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - def test_exceptions_and_warnings(self): - - with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): - _ = self.build_h36m_dataset(data_mode='invalid') - - SUPPORTED_keypoint_2d_src = {'gt', 'detection', 'pipeline'} - with self.assertRaisesRegex( - ValueError, 'Unsupported `keypoint_2d_src` "invalid". ' - f'Supported options are {SUPPORTED_keypoint_2d_src}'): - _ = self.build_h36m_dataset( - data_mode='topdown', - test_mode=False, - keypoint_2d_src='invalid') - - with self.assertRaisesRegex(AssertionError, - 'Annotation file does not exist'): - _ = self.build_h36m_dataset( - data_mode='topdown', test_mode=False, ann_file='invalid') - - with self.assertRaisesRegex(AssertionError, - 'Unsupported `subset_frac` 2.'): - _ = self.build_h36m_dataset(data_mode='topdown', subset_frac=2) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np + +from mmpose.datasets.datasets.body3d import Human36mDataset + + +class TestH36MDataset(TestCase): + + def build_h36m_dataset(self, **kwargs): + + cfg = dict( + ann_file='test_h36m_body3d.npz', + data_mode='topdown', + data_root='tests/data/h36m', + pipeline=[], + test_mode=False) + + cfg.update(kwargs) + return Human36mDataset(**cfg) + + def check_data_info_keys(self, + data_info: dict, + data_mode: str = 'topdown'): + if data_mode == 'topdown': + expected_keys = dict( + img_ids=list, + img_paths=list, + keypoints=np.ndarray, + keypoints_3d=np.ndarray, + scale=np.float32, + center=np.ndarray, + id=int) + elif data_mode == 'bottomup': + expected_keys = dict( + img_ids=list, + img_paths=list, + keypoints=np.ndarray, + keypoints_3d=np.ndarray, + scale=list, + center=np.ndarray, + invalid_segs=list, + id=list) + else: + raise ValueError(f'Invalid data_mode {data_mode}') + + for key, type_ in expected_keys.items(): + self.assertIn(key, data_info) + self.assertIsInstance(data_info[key], type_, key) + + def check_metainfo_keys(self, metainfo: dict): + expected_keys = dict( + dataset_name=str, + num_keypoints=int, + keypoint_id2name=dict, + keypoint_name2id=dict, + upper_body_ids=list, + lower_body_ids=list, + flip_indices=list, + flip_pairs=list, + keypoint_colors=np.ndarray, + num_skeleton_links=int, + skeleton_links=list, + skeleton_link_colors=np.ndarray, + dataset_keypoint_weights=np.ndarray) + + for key, type_ in expected_keys.items(): + self.assertIn(key, metainfo) + self.assertIsInstance(metainfo[key], type_, key) + + def test_metainfo(self): + dataset = self.build_h36m_dataset() + self.check_metainfo_keys(dataset.metainfo) + # test dataset_name + self.assertEqual(dataset.metainfo['dataset_name'], 'h36m') + + # test number of keypoints + num_keypoints = 17 + self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) + self.assertEqual( + len(dataset.metainfo['keypoint_colors']), num_keypoints) + self.assertEqual( + len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) + + # test some extra metainfo + self.assertEqual( + len(dataset.metainfo['skeleton_links']), + len(dataset.metainfo['skeleton_link_colors'])) + + def test_topdown(self): + # test topdown training + dataset = self.build_h36m_dataset(data_mode='topdown') + self.assertEqual(len(dataset), 4) + self.check_data_info_keys(dataset[0]) + + # test topdown testing + dataset = self.build_h36m_dataset(data_mode='topdown', test_mode=True) + self.assertEqual(len(dataset), 4) + self.check_data_info_keys(dataset[0]) + + # test topdown training with camera file + dataset = self.build_h36m_dataset( + data_mode='topdown', camera_param_file='cameras.pkl') + self.assertEqual(len(dataset), 4) + self.check_data_info_keys(dataset[0]) + + # test topdown training with sequence config + dataset = self.build_h36m_dataset( + data_mode='topdown', + seq_len=27, + seq_step=1, + causal=False, + pad_video_seq=True, + camera_param_file='cameras.pkl') + self.assertEqual(len(dataset), 4) + self.check_data_info_keys(dataset[0]) + + # test topdown testing with 2d keypoint detection file and + # sequence config + dataset = self.build_h36m_dataset( + data_mode='topdown', + seq_len=27, + seq_step=1, + causal=False, + pad_video_seq=True, + test_mode=True, + keypoint_2d_src='detection', + keypoint_2d_det_file='test_h36m_2d_detection.npy') + self.assertEqual(len(dataset), 4) + self.check_data_info_keys(dataset[0]) + + def test_bottomup(self): + # test bottomup training + dataset = self.build_h36m_dataset(data_mode='bottomup') + self.assertEqual(len(dataset), 4) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + # test bottomup training + dataset = self.build_h36m_dataset( + data_mode='bottomup', + seq_len=27, + seq_step=1, + causal=False, + pad_video_seq=True) + self.assertEqual(len(dataset), 4) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + # test bottomup testing + dataset = self.build_h36m_dataset(data_mode='bottomup', test_mode=True) + self.assertEqual(len(dataset), 4) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + def test_exceptions_and_warnings(self): + + with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): + _ = self.build_h36m_dataset(data_mode='invalid') + + SUPPORTED_keypoint_2d_src = {'gt', 'detection', 'pipeline'} + with self.assertRaisesRegex( + ValueError, 'Unsupported `keypoint_2d_src` "invalid". ' + f'Supported options are {SUPPORTED_keypoint_2d_src}'): + _ = self.build_h36m_dataset( + data_mode='topdown', + test_mode=False, + keypoint_2d_src='invalid') + + with self.assertRaisesRegex(AssertionError, + 'Annotation file does not exist'): + _ = self.build_h36m_dataset( + data_mode='topdown', test_mode=False, ann_file='invalid') + + with self.assertRaisesRegex(AssertionError, + 'Unsupported `subset_frac` 2.'): + _ = self.build_h36m_dataset(data_mode='topdown', subset_frac=2) diff --git a/tests/test_datasets/test_datasets/test_body_datasets/test_humanart_dataset.py b/tests/test_datasets/test_datasets/test_body_datasets/test_humanart_dataset.py index dcf29ab692..4b4ba73ea0 100644 --- a/tests/test_datasets/test_datasets/test_body_datasets/test_humanart_dataset.py +++ b/tests/test_datasets/test_datasets/test_body_datasets/test_humanart_dataset.py @@ -1,160 +1,160 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import numpy as np - -from mmpose.datasets.datasets.body import HumanArtDataset - - -class TestHumanartDataset(TestCase): - - def build_humanart_dataset(self, **kwargs): - - cfg = dict( - ann_file='test_humanart.json', - bbox_file=None, - data_mode='topdown', - data_root='tests/data/humanart', - pipeline=[], - test_mode=False) - - cfg.update(kwargs) - return HumanArtDataset(**cfg) - - def check_data_info_keys(self, - data_info: dict, - data_mode: str = 'topdown'): - if data_mode == 'topdown': - expected_keys = dict( - img_id=int, - img_path=str, - bbox=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - id=int) - elif data_mode == 'bottomup': - expected_keys = dict( - img_id=int, - img_path=str, - bbox=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - invalid_segs=list, - id=list) - else: - raise ValueError(f'Invalid data_mode {data_mode}') - - for key, type_ in expected_keys.items(): - self.assertIn(key, data_info) - self.assertIsInstance(data_info[key], type_, key) - - def check_metainfo_keys(self, metainfo: dict): - expected_keys = dict( - dataset_name=str, - num_keypoints=int, - keypoint_id2name=dict, - keypoint_name2id=dict, - upper_body_ids=list, - lower_body_ids=list, - flip_indices=list, - flip_pairs=list, - keypoint_colors=np.ndarray, - num_skeleton_links=int, - skeleton_links=list, - skeleton_link_colors=np.ndarray, - dataset_keypoint_weights=np.ndarray) - - for key, type_ in expected_keys.items(): - self.assertIn(key, metainfo) - self.assertIsInstance(metainfo[key], type_, key) - - def test_metainfo(self): - dataset = self.build_humanart_dataset() - self.check_metainfo_keys(dataset.metainfo) - # test dataset_name - self.assertEqual(dataset.metainfo['dataset_name'], 'Human-Art') - - # test number of keypoints - num_keypoints = 17 - self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) - self.assertEqual( - len(dataset.metainfo['keypoint_colors']), num_keypoints) - self.assertEqual( - len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) - # note that len(sigmas) may be zero if dataset.metainfo['sigmas'] = [] - self.assertEqual(len(dataset.metainfo['sigmas']), num_keypoints) - - # test some extra metainfo - self.assertEqual( - len(dataset.metainfo['skeleton_links']), - len(dataset.metainfo['skeleton_link_colors'])) - - def test_topdown(self): - # test topdown training - dataset = self.build_humanart_dataset(data_mode='topdown') - self.assertEqual(len(dataset), 4) - self.check_data_info_keys(dataset[0], data_mode='topdown') - - # test topdown testing - dataset = self.build_humanart_dataset( - data_mode='topdown', test_mode=True) - self.assertEqual(len(dataset), 4) - self.check_data_info_keys(dataset[0], data_mode='topdown') - - # test topdown testing with bbox file - dataset = self.build_humanart_dataset( - data_mode='topdown', - test_mode=True, - bbox_file='tests/data/humanart/test_humanart_det_AP_H_56.json') - self.assertEqual(len(dataset), 13) - self.check_data_info_keys(dataset[0], data_mode='topdown') - - # test topdown testing with filter config - dataset = self.build_humanart_dataset( - data_mode='topdown', - test_mode=True, - bbox_file='tests/data/humanart/test_humanart_det_AP_H_56.json', - filter_cfg=dict(bbox_score_thr=0.3)) - self.assertEqual(len(dataset), 8) - - def test_bottomup(self): - # test bottomup training - dataset = self.build_humanart_dataset(data_mode='bottomup') - self.assertEqual(len(dataset), 3) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - # test bottomup testing - dataset = self.build_humanart_dataset( - data_mode='bottomup', test_mode=True) - self.assertEqual(len(dataset), 3) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - def test_exceptions_and_warnings(self): - - with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): - _ = self.build_humanart_dataset(data_mode='invalid') - - with self.assertRaisesRegex( - ValueError, - '"bbox_file" is only supported when `test_mode==True`'): - _ = self.build_humanart_dataset( - data_mode='topdown', - test_mode=False, - bbox_file='tests/data/humanart/test_humanart_det_AP_H_56.json') - - with self.assertRaisesRegex( - ValueError, '"bbox_file" is only supported in topdown mode'): - _ = self.build_humanart_dataset( - data_mode='bottomup', - test_mode=True, - bbox_file='tests/data/humanart/test_humanart_det_AP_H_56.json') - - with self.assertRaisesRegex( - ValueError, - '"bbox_score_thr" is only supported in topdown mode'): - _ = self.build_humanart_dataset( - data_mode='bottomup', - test_mode=True, - filter_cfg=dict(bbox_score_thr=0.3)) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np + +from mmpose.datasets.datasets.body import HumanArtDataset + + +class TestHumanartDataset(TestCase): + + def build_humanart_dataset(self, **kwargs): + + cfg = dict( + ann_file='test_humanart.json', + bbox_file=None, + data_mode='topdown', + data_root='tests/data/humanart', + pipeline=[], + test_mode=False) + + cfg.update(kwargs) + return HumanArtDataset(**cfg) + + def check_data_info_keys(self, + data_info: dict, + data_mode: str = 'topdown'): + if data_mode == 'topdown': + expected_keys = dict( + img_id=int, + img_path=str, + bbox=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + id=int) + elif data_mode == 'bottomup': + expected_keys = dict( + img_id=int, + img_path=str, + bbox=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + invalid_segs=list, + id=list) + else: + raise ValueError(f'Invalid data_mode {data_mode}') + + for key, type_ in expected_keys.items(): + self.assertIn(key, data_info) + self.assertIsInstance(data_info[key], type_, key) + + def check_metainfo_keys(self, metainfo: dict): + expected_keys = dict( + dataset_name=str, + num_keypoints=int, + keypoint_id2name=dict, + keypoint_name2id=dict, + upper_body_ids=list, + lower_body_ids=list, + flip_indices=list, + flip_pairs=list, + keypoint_colors=np.ndarray, + num_skeleton_links=int, + skeleton_links=list, + skeleton_link_colors=np.ndarray, + dataset_keypoint_weights=np.ndarray) + + for key, type_ in expected_keys.items(): + self.assertIn(key, metainfo) + self.assertIsInstance(metainfo[key], type_, key) + + def test_metainfo(self): + dataset = self.build_humanart_dataset() + self.check_metainfo_keys(dataset.metainfo) + # test dataset_name + self.assertEqual(dataset.metainfo['dataset_name'], 'Human-Art') + + # test number of keypoints + num_keypoints = 17 + self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) + self.assertEqual( + len(dataset.metainfo['keypoint_colors']), num_keypoints) + self.assertEqual( + len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) + # note that len(sigmas) may be zero if dataset.metainfo['sigmas'] = [] + self.assertEqual(len(dataset.metainfo['sigmas']), num_keypoints) + + # test some extra metainfo + self.assertEqual( + len(dataset.metainfo['skeleton_links']), + len(dataset.metainfo['skeleton_link_colors'])) + + def test_topdown(self): + # test topdown training + dataset = self.build_humanart_dataset(data_mode='topdown') + self.assertEqual(len(dataset), 4) + self.check_data_info_keys(dataset[0], data_mode='topdown') + + # test topdown testing + dataset = self.build_humanart_dataset( + data_mode='topdown', test_mode=True) + self.assertEqual(len(dataset), 4) + self.check_data_info_keys(dataset[0], data_mode='topdown') + + # test topdown testing with bbox file + dataset = self.build_humanart_dataset( + data_mode='topdown', + test_mode=True, + bbox_file='tests/data/humanart/test_humanart_det_AP_H_56.json') + self.assertEqual(len(dataset), 13) + self.check_data_info_keys(dataset[0], data_mode='topdown') + + # test topdown testing with filter config + dataset = self.build_humanart_dataset( + data_mode='topdown', + test_mode=True, + bbox_file='tests/data/humanart/test_humanart_det_AP_H_56.json', + filter_cfg=dict(bbox_score_thr=0.3)) + self.assertEqual(len(dataset), 8) + + def test_bottomup(self): + # test bottomup training + dataset = self.build_humanart_dataset(data_mode='bottomup') + self.assertEqual(len(dataset), 3) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + # test bottomup testing + dataset = self.build_humanart_dataset( + data_mode='bottomup', test_mode=True) + self.assertEqual(len(dataset), 3) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + def test_exceptions_and_warnings(self): + + with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): + _ = self.build_humanart_dataset(data_mode='invalid') + + with self.assertRaisesRegex( + ValueError, + '"bbox_file" is only supported when `test_mode==True`'): + _ = self.build_humanart_dataset( + data_mode='topdown', + test_mode=False, + bbox_file='tests/data/humanart/test_humanart_det_AP_H_56.json') + + with self.assertRaisesRegex( + ValueError, '"bbox_file" is only supported in topdown mode'): + _ = self.build_humanart_dataset( + data_mode='bottomup', + test_mode=True, + bbox_file='tests/data/humanart/test_humanart_det_AP_H_56.json') + + with self.assertRaisesRegex( + ValueError, + '"bbox_score_thr" is only supported in topdown mode'): + _ = self.build_humanart_dataset( + data_mode='bottomup', + test_mode=True, + filter_cfg=dict(bbox_score_thr=0.3)) diff --git a/tests/test_datasets/test_datasets/test_body_datasets/test_jhmdb_dataset.py b/tests/test_datasets/test_datasets/test_body_datasets/test_jhmdb_dataset.py index d7aa46b067..8f74f68774 100644 --- a/tests/test_datasets/test_datasets/test_body_datasets/test_jhmdb_dataset.py +++ b/tests/test_datasets/test_datasets/test_body_datasets/test_jhmdb_dataset.py @@ -1,145 +1,145 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import numpy as np - -from mmpose.datasets.datasets.body import JhmdbDataset - - -class TestJhmdbDataset(TestCase): - - def build_jhmdb_dataset(self, **kwargs): - - cfg = dict( - ann_file='test_jhmdb_sub1.json', - bbox_file=None, - data_mode='topdown', - data_root='tests/data/jhmdb', - pipeline=[], - test_mode=False) - - cfg.update(kwargs) - return JhmdbDataset(**cfg) - - def check_data_info_keys(self, - data_info: dict, - data_mode: str = 'topdown'): - if data_mode == 'topdown': - expected_keys = dict( - img_id=int, - img_path=str, - bbox=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - id=int) - elif data_mode == 'bottomup': - expected_keys = dict( - img_id=int, - img_path=str, - bbox=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - invalid_segs=list, - id=list) - else: - raise ValueError(f'Invalid data_mode {data_mode}') - - for key, type_ in expected_keys.items(): - self.assertIn(key, data_info) - self.assertIsInstance(data_info[key], type_, key) - - def check_metainfo_keys(self, metainfo: dict): - expected_keys = dict( - dataset_name=str, - num_keypoints=int, - keypoint_id2name=dict, - keypoint_name2id=dict, - upper_body_ids=list, - lower_body_ids=list, - flip_indices=list, - flip_pairs=list, - keypoint_colors=np.ndarray, - num_skeleton_links=int, - skeleton_links=list, - skeleton_link_colors=np.ndarray, - dataset_keypoint_weights=np.ndarray) - - for key, type_ in expected_keys.items(): - self.assertIn(key, metainfo) - self.assertIsInstance(metainfo[key], type_, key) - - def test_metainfo(self): - dataset = self.build_jhmdb_dataset() - self.check_metainfo_keys(dataset.metainfo) - # test dataset_name - self.assertEqual(dataset.metainfo['dataset_name'], 'jhmdb') - - # test number of keypoints - num_keypoints = 15 - self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) - self.assertEqual( - len(dataset.metainfo['keypoint_colors']), num_keypoints) - self.assertEqual( - len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) - # note that len(sigmas) may be zero if dataset.metainfo['sigmas'] = [] - self.assertEqual(len(dataset.metainfo['sigmas']), num_keypoints) - - # test some extra metainfo - self.assertEqual( - len(dataset.metainfo['skeleton_links']), - len(dataset.metainfo['skeleton_link_colors'])) - - def test_topdown(self): - # test topdown training - dataset = self.build_jhmdb_dataset(data_mode='topdown') - self.assertEqual(dataset.data_mode, 'topdown') - self.assertEqual(len(dataset), 3) - self.check_data_info_keys(dataset[0], data_mode='topdown') - - # test topdown testing - dataset = self.build_jhmdb_dataset(data_mode='topdown', test_mode=True) - self.assertEqual(dataset.data_mode, 'topdown') - self.assertEqual(len(dataset), 3) - self.check_data_info_keys(dataset[0], data_mode='topdown') - - def test_bottomup(self): - # test bottomup training - dataset = self.build_jhmdb_dataset(data_mode='bottomup') - self.assertEqual(len(dataset), 3) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - # test bottomup testing - dataset = self.build_jhmdb_dataset( - data_mode='bottomup', test_mode=True) - self.assertEqual(len(dataset), 3) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - def test_exceptions_and_warnings(self): - - with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): - _ = self.build_jhmdb_dataset(data_mode='invalid') - - with self.assertRaisesRegex( - ValueError, - '"bbox_file" is only supported when `test_mode==True`'): - _ = self.build_jhmdb_dataset( - data_mode='topdown', - test_mode=False, - bbox_file='temp_bbox_file.json') - - with self.assertRaisesRegex( - ValueError, '"bbox_file" is only supported in topdown mode'): - _ = self.build_jhmdb_dataset( - data_mode='bottomup', - test_mode=True, - bbox_file='temp_bbox_file.json') - - with self.assertRaisesRegex( - ValueError, - '"bbox_score_thr" is only supported in topdown mode'): - _ = self.build_jhmdb_dataset( - data_mode='bottomup', - test_mode=True, - filter_cfg=dict(bbox_score_thr=0.3)) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np + +from mmpose.datasets.datasets.body import JhmdbDataset + + +class TestJhmdbDataset(TestCase): + + def build_jhmdb_dataset(self, **kwargs): + + cfg = dict( + ann_file='test_jhmdb_sub1.json', + bbox_file=None, + data_mode='topdown', + data_root='tests/data/jhmdb', + pipeline=[], + test_mode=False) + + cfg.update(kwargs) + return JhmdbDataset(**cfg) + + def check_data_info_keys(self, + data_info: dict, + data_mode: str = 'topdown'): + if data_mode == 'topdown': + expected_keys = dict( + img_id=int, + img_path=str, + bbox=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + id=int) + elif data_mode == 'bottomup': + expected_keys = dict( + img_id=int, + img_path=str, + bbox=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + invalid_segs=list, + id=list) + else: + raise ValueError(f'Invalid data_mode {data_mode}') + + for key, type_ in expected_keys.items(): + self.assertIn(key, data_info) + self.assertIsInstance(data_info[key], type_, key) + + def check_metainfo_keys(self, metainfo: dict): + expected_keys = dict( + dataset_name=str, + num_keypoints=int, + keypoint_id2name=dict, + keypoint_name2id=dict, + upper_body_ids=list, + lower_body_ids=list, + flip_indices=list, + flip_pairs=list, + keypoint_colors=np.ndarray, + num_skeleton_links=int, + skeleton_links=list, + skeleton_link_colors=np.ndarray, + dataset_keypoint_weights=np.ndarray) + + for key, type_ in expected_keys.items(): + self.assertIn(key, metainfo) + self.assertIsInstance(metainfo[key], type_, key) + + def test_metainfo(self): + dataset = self.build_jhmdb_dataset() + self.check_metainfo_keys(dataset.metainfo) + # test dataset_name + self.assertEqual(dataset.metainfo['dataset_name'], 'jhmdb') + + # test number of keypoints + num_keypoints = 15 + self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) + self.assertEqual( + len(dataset.metainfo['keypoint_colors']), num_keypoints) + self.assertEqual( + len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) + # note that len(sigmas) may be zero if dataset.metainfo['sigmas'] = [] + self.assertEqual(len(dataset.metainfo['sigmas']), num_keypoints) + + # test some extra metainfo + self.assertEqual( + len(dataset.metainfo['skeleton_links']), + len(dataset.metainfo['skeleton_link_colors'])) + + def test_topdown(self): + # test topdown training + dataset = self.build_jhmdb_dataset(data_mode='topdown') + self.assertEqual(dataset.data_mode, 'topdown') + self.assertEqual(len(dataset), 3) + self.check_data_info_keys(dataset[0], data_mode='topdown') + + # test topdown testing + dataset = self.build_jhmdb_dataset(data_mode='topdown', test_mode=True) + self.assertEqual(dataset.data_mode, 'topdown') + self.assertEqual(len(dataset), 3) + self.check_data_info_keys(dataset[0], data_mode='topdown') + + def test_bottomup(self): + # test bottomup training + dataset = self.build_jhmdb_dataset(data_mode='bottomup') + self.assertEqual(len(dataset), 3) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + # test bottomup testing + dataset = self.build_jhmdb_dataset( + data_mode='bottomup', test_mode=True) + self.assertEqual(len(dataset), 3) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + def test_exceptions_and_warnings(self): + + with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): + _ = self.build_jhmdb_dataset(data_mode='invalid') + + with self.assertRaisesRegex( + ValueError, + '"bbox_file" is only supported when `test_mode==True`'): + _ = self.build_jhmdb_dataset( + data_mode='topdown', + test_mode=False, + bbox_file='temp_bbox_file.json') + + with self.assertRaisesRegex( + ValueError, '"bbox_file" is only supported in topdown mode'): + _ = self.build_jhmdb_dataset( + data_mode='bottomup', + test_mode=True, + bbox_file='temp_bbox_file.json') + + with self.assertRaisesRegex( + ValueError, + '"bbox_score_thr" is only supported in topdown mode'): + _ = self.build_jhmdb_dataset( + data_mode='bottomup', + test_mode=True, + filter_cfg=dict(bbox_score_thr=0.3)) diff --git a/tests/test_datasets/test_datasets/test_body_datasets/test_mhp_dataset.py b/tests/test_datasets/test_datasets/test_body_datasets/test_mhp_dataset.py index e93a524611..aedac37657 100644 --- a/tests/test_datasets/test_datasets/test_body_datasets/test_mhp_dataset.py +++ b/tests/test_datasets/test_datasets/test_body_datasets/test_mhp_dataset.py @@ -1,147 +1,147 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import numpy as np - -from mmpose.datasets.datasets.body import MhpDataset - - -class TestMhpDataset(TestCase): - - def build_mhp_dataset(self, **kwargs): - - cfg = dict( - ann_file='test_mhp.json', - bbox_file=None, - data_mode='topdown', - data_root='tests/data/mhp', - pipeline=[], - test_mode=False) - - cfg.update(kwargs) - return MhpDataset(**cfg) - - def check_data_info_keys(self, - data_info: dict, - data_mode: str = 'topdown'): - if data_mode == 'topdown': - expected_keys = dict( - img_id=int, - img_path=str, - bbox=np.ndarray, - bbox_score=np.ndarray, - num_keypoints=int, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - iscrowd=int, - id=int) - elif data_mode == 'bottomup': - expected_keys = dict( - img_id=int, - img_path=str, - bbox=np.ndarray, - bbox_score=np.ndarray, - num_keypoints=list, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - invalid_segs=list, - id=list) - else: - raise ValueError(f'Invalid data_mode {data_mode}') - - for key, type_ in expected_keys.items(): - self.assertIn(key, data_info) - self.assertIsInstance(data_info[key], type_, key) - - def check_metainfo_keys(self, metainfo: dict): - expected_keys = dict( - dataset_name=str, - num_keypoints=int, - keypoint_id2name=dict, - keypoint_name2id=dict, - upper_body_ids=list, - lower_body_ids=list, - flip_indices=list, - flip_pairs=list, - keypoint_colors=np.ndarray, - num_skeleton_links=int, - skeleton_links=list, - skeleton_link_colors=np.ndarray, - dataset_keypoint_weights=np.ndarray) - - for key, type_ in expected_keys.items(): - self.assertIn(key, metainfo) - self.assertIsInstance(metainfo[key], type_, key) - - def test_metainfo(self): - dataset = self.build_mhp_dataset() - self.check_metainfo_keys(dataset.metainfo) - # test dataset_name - self.assertEqual(dataset.metainfo['dataset_name'], 'mhp') - - # test number of keypoints - num_keypoints = 16 - self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) - self.assertEqual( - len(dataset.metainfo['keypoint_colors']), num_keypoints) - self.assertEqual( - len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) - # note that len(sigmas) may be zero if dataset.metainfo['sigmas'] = [] - self.assertEqual(len(dataset.metainfo['sigmas']), num_keypoints) - - # test some extra metainfo - self.assertEqual( - len(dataset.metainfo['skeleton_links']), - len(dataset.metainfo['skeleton_link_colors'])) - - def test_topdown(self): - # test topdown training - dataset = self.build_mhp_dataset(data_mode='topdown') - self.assertEqual(dataset.bbox_file, None) - self.assertEqual(len(dataset), 4) - self.check_data_info_keys(dataset[0], data_mode='topdown') - - # test topdown testing - dataset = self.build_mhp_dataset(data_mode='topdown', test_mode=True) - self.assertEqual(dataset.bbox_file, None) - self.assertEqual(len(dataset), 4) - self.check_data_info_keys(dataset[0], data_mode='topdown') - - def test_bottomup(self): - # test bottomup training - dataset = self.build_mhp_dataset(data_mode='bottomup') - self.assertEqual(len(dataset), 2) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - # test bottomup testing - dataset = self.build_mhp_dataset(data_mode='bottomup', test_mode=True) - self.assertEqual(len(dataset), 2) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - def test_exceptions_and_warnings(self): - - with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): - _ = self.build_mhp_dataset(data_mode='invalid') - - with self.assertRaisesRegex( - ValueError, - '"bbox_file" is only supported when `test_mode==True`'): - _ = self.build_mhp_dataset( - data_mode='topdown', - test_mode=False, - bbox_file='temp_bbox_file.json') - - with self.assertRaisesRegex( - ValueError, '"bbox_file" is only supported in topdown mode'): - _ = self.build_mhp_dataset( - data_mode='bottomup', - test_mode=True, - bbox_file='temp_bbox_file.json') - - with self.assertRaisesRegex( - ValueError, - '"bbox_score_thr" is only supported in topdown mode'): - _ = self.build_mhp_dataset( - data_mode='bottomup', - test_mode=True, - filter_cfg=dict(bbox_score_thr=0.3)) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np + +from mmpose.datasets.datasets.body import MhpDataset + + +class TestMhpDataset(TestCase): + + def build_mhp_dataset(self, **kwargs): + + cfg = dict( + ann_file='test_mhp.json', + bbox_file=None, + data_mode='topdown', + data_root='tests/data/mhp', + pipeline=[], + test_mode=False) + + cfg.update(kwargs) + return MhpDataset(**cfg) + + def check_data_info_keys(self, + data_info: dict, + data_mode: str = 'topdown'): + if data_mode == 'topdown': + expected_keys = dict( + img_id=int, + img_path=str, + bbox=np.ndarray, + bbox_score=np.ndarray, + num_keypoints=int, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + iscrowd=int, + id=int) + elif data_mode == 'bottomup': + expected_keys = dict( + img_id=int, + img_path=str, + bbox=np.ndarray, + bbox_score=np.ndarray, + num_keypoints=list, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + invalid_segs=list, + id=list) + else: + raise ValueError(f'Invalid data_mode {data_mode}') + + for key, type_ in expected_keys.items(): + self.assertIn(key, data_info) + self.assertIsInstance(data_info[key], type_, key) + + def check_metainfo_keys(self, metainfo: dict): + expected_keys = dict( + dataset_name=str, + num_keypoints=int, + keypoint_id2name=dict, + keypoint_name2id=dict, + upper_body_ids=list, + lower_body_ids=list, + flip_indices=list, + flip_pairs=list, + keypoint_colors=np.ndarray, + num_skeleton_links=int, + skeleton_links=list, + skeleton_link_colors=np.ndarray, + dataset_keypoint_weights=np.ndarray) + + for key, type_ in expected_keys.items(): + self.assertIn(key, metainfo) + self.assertIsInstance(metainfo[key], type_, key) + + def test_metainfo(self): + dataset = self.build_mhp_dataset() + self.check_metainfo_keys(dataset.metainfo) + # test dataset_name + self.assertEqual(dataset.metainfo['dataset_name'], 'mhp') + + # test number of keypoints + num_keypoints = 16 + self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) + self.assertEqual( + len(dataset.metainfo['keypoint_colors']), num_keypoints) + self.assertEqual( + len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) + # note that len(sigmas) may be zero if dataset.metainfo['sigmas'] = [] + self.assertEqual(len(dataset.metainfo['sigmas']), num_keypoints) + + # test some extra metainfo + self.assertEqual( + len(dataset.metainfo['skeleton_links']), + len(dataset.metainfo['skeleton_link_colors'])) + + def test_topdown(self): + # test topdown training + dataset = self.build_mhp_dataset(data_mode='topdown') + self.assertEqual(dataset.bbox_file, None) + self.assertEqual(len(dataset), 4) + self.check_data_info_keys(dataset[0], data_mode='topdown') + + # test topdown testing + dataset = self.build_mhp_dataset(data_mode='topdown', test_mode=True) + self.assertEqual(dataset.bbox_file, None) + self.assertEqual(len(dataset), 4) + self.check_data_info_keys(dataset[0], data_mode='topdown') + + def test_bottomup(self): + # test bottomup training + dataset = self.build_mhp_dataset(data_mode='bottomup') + self.assertEqual(len(dataset), 2) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + # test bottomup testing + dataset = self.build_mhp_dataset(data_mode='bottomup', test_mode=True) + self.assertEqual(len(dataset), 2) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + def test_exceptions_and_warnings(self): + + with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): + _ = self.build_mhp_dataset(data_mode='invalid') + + with self.assertRaisesRegex( + ValueError, + '"bbox_file" is only supported when `test_mode==True`'): + _ = self.build_mhp_dataset( + data_mode='topdown', + test_mode=False, + bbox_file='temp_bbox_file.json') + + with self.assertRaisesRegex( + ValueError, '"bbox_file" is only supported in topdown mode'): + _ = self.build_mhp_dataset( + data_mode='bottomup', + test_mode=True, + bbox_file='temp_bbox_file.json') + + with self.assertRaisesRegex( + ValueError, + '"bbox_score_thr" is only supported in topdown mode'): + _ = self.build_mhp_dataset( + data_mode='bottomup', + test_mode=True, + filter_cfg=dict(bbox_score_thr=0.3)) diff --git a/tests/test_datasets/test_datasets/test_body_datasets/test_mpii_dataset.py b/tests/test_datasets/test_datasets/test_body_datasets/test_mpii_dataset.py index f6431af429..06777b2d65 100644 --- a/tests/test_datasets/test_datasets/test_body_datasets/test_mpii_dataset.py +++ b/tests/test_datasets/test_datasets/test_body_datasets/test_mpii_dataset.py @@ -1,144 +1,144 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import numpy as np - -from mmpose.datasets.datasets.body import MpiiDataset - - -class TestMpiiDataset(TestCase): - - def build_mpii_dataset(self, **kwargs): - - cfg = dict( - ann_file='test_mpii.json', - bbox_file=None, - data_mode='topdown', - data_root='tests/data/mpii', - pipeline=[], - test_mode=False) - - cfg.update(kwargs) - return MpiiDataset(**cfg) - - def check_data_info_keys(self, - data_info: dict, - data_mode: str = 'topdown'): - if data_mode == 'topdown': - expected_keys = dict( - img_id=int, - img_path=str, - bbox_center=np.ndarray, - bbox_scale=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - id=int) - elif data_mode == 'bottomup': - expected_keys = dict( - img_id=int, - img_path=str, - bbox_center=np.ndarray, - bbox_scale=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - invalid_segs=list, - id=list) - else: - raise ValueError(f'Invalid data_mode {data_mode}') - - for key, type_ in expected_keys.items(): - self.assertIn(key, data_info) - self.assertIsInstance(data_info[key], type_, key) - - def check_metainfo_keys(self, metainfo: dict): - expected_keys = dict( - dataset_name=str, - num_keypoints=int, - keypoint_id2name=dict, - keypoint_name2id=dict, - upper_body_ids=list, - lower_body_ids=list, - flip_indices=list, - flip_pairs=list, - keypoint_colors=np.ndarray, - num_skeleton_links=int, - skeleton_links=list, - skeleton_link_colors=np.ndarray, - dataset_keypoint_weights=np.ndarray) - - for key, type_ in expected_keys.items(): - self.assertIn(key, metainfo) - self.assertIsInstance(metainfo[key], type_, key) - - def test_metainfo(self): - dataset = self.build_mpii_dataset() - self.check_metainfo_keys(dataset.metainfo) - # test dataset_name - self.assertEqual(dataset.metainfo['dataset_name'], 'mpii') - - # test number of keypoints - num_keypoints = 16 - self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) - self.assertEqual( - len(dataset.metainfo['keypoint_colors']), num_keypoints) - self.assertEqual( - len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) - # note that len(sigmas) may be zero if dataset.metainfo['sigmas'] = [] - self.assertEqual(len(dataset.metainfo['sigmas']), num_keypoints) - - # test some extra metainfo - self.assertEqual( - len(dataset.metainfo['skeleton_links']), - len(dataset.metainfo['skeleton_link_colors'])) - - def test_topdown(self): - # test topdown training - dataset = self.build_mpii_dataset(data_mode='topdown') - self.assertEqual(len(dataset), 5) - self.check_data_info_keys(dataset[0]) - - # test topdown testing - dataset = self.build_mpii_dataset(data_mode='topdown', test_mode=True) - self.assertEqual(len(dataset), 5) - self.check_data_info_keys(dataset[0]) - - def test_bottomup(self): - # test bottomup training - dataset = self.build_mpii_dataset(data_mode='bottomup') - self.assertEqual(len(dataset), 5) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - # test bottomup testing - dataset = self.build_mpii_dataset(data_mode='bottomup', test_mode=True) - self.assertEqual(len(dataset), 5) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - def test_exceptions_and_warnings(self): - - with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): - _ = self.build_mpii_dataset(data_mode='invalid') - - with self.assertRaisesRegex( - ValueError, - '"bbox_file" is only supported when `test_mode==True`'): - _ = self.build_mpii_dataset( - data_mode='topdown', - test_mode=False, - bbox_file='temp_bbox_file.json') - - with self.assertRaisesRegex( - ValueError, '"bbox_file" is only supported in topdown mode'): - _ = self.build_mpii_dataset( - data_mode='bottomup', - test_mode=True, - bbox_file='temp_bbox_file.json') - - with self.assertRaisesRegex( - ValueError, - '"bbox_score_thr" is only supported in topdown mode'): - _ = self.build_mpii_dataset( - data_mode='bottomup', - test_mode=True, - filter_cfg=dict(bbox_score_thr=0.3)) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np + +from mmpose.datasets.datasets.body import MpiiDataset + + +class TestMpiiDataset(TestCase): + + def build_mpii_dataset(self, **kwargs): + + cfg = dict( + ann_file='test_mpii.json', + bbox_file=None, + data_mode='topdown', + data_root='tests/data/mpii', + pipeline=[], + test_mode=False) + + cfg.update(kwargs) + return MpiiDataset(**cfg) + + def check_data_info_keys(self, + data_info: dict, + data_mode: str = 'topdown'): + if data_mode == 'topdown': + expected_keys = dict( + img_id=int, + img_path=str, + bbox_center=np.ndarray, + bbox_scale=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + id=int) + elif data_mode == 'bottomup': + expected_keys = dict( + img_id=int, + img_path=str, + bbox_center=np.ndarray, + bbox_scale=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + invalid_segs=list, + id=list) + else: + raise ValueError(f'Invalid data_mode {data_mode}') + + for key, type_ in expected_keys.items(): + self.assertIn(key, data_info) + self.assertIsInstance(data_info[key], type_, key) + + def check_metainfo_keys(self, metainfo: dict): + expected_keys = dict( + dataset_name=str, + num_keypoints=int, + keypoint_id2name=dict, + keypoint_name2id=dict, + upper_body_ids=list, + lower_body_ids=list, + flip_indices=list, + flip_pairs=list, + keypoint_colors=np.ndarray, + num_skeleton_links=int, + skeleton_links=list, + skeleton_link_colors=np.ndarray, + dataset_keypoint_weights=np.ndarray) + + for key, type_ in expected_keys.items(): + self.assertIn(key, metainfo) + self.assertIsInstance(metainfo[key], type_, key) + + def test_metainfo(self): + dataset = self.build_mpii_dataset() + self.check_metainfo_keys(dataset.metainfo) + # test dataset_name + self.assertEqual(dataset.metainfo['dataset_name'], 'mpii') + + # test number of keypoints + num_keypoints = 16 + self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) + self.assertEqual( + len(dataset.metainfo['keypoint_colors']), num_keypoints) + self.assertEqual( + len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) + # note that len(sigmas) may be zero if dataset.metainfo['sigmas'] = [] + self.assertEqual(len(dataset.metainfo['sigmas']), num_keypoints) + + # test some extra metainfo + self.assertEqual( + len(dataset.metainfo['skeleton_links']), + len(dataset.metainfo['skeleton_link_colors'])) + + def test_topdown(self): + # test topdown training + dataset = self.build_mpii_dataset(data_mode='topdown') + self.assertEqual(len(dataset), 5) + self.check_data_info_keys(dataset[0]) + + # test topdown testing + dataset = self.build_mpii_dataset(data_mode='topdown', test_mode=True) + self.assertEqual(len(dataset), 5) + self.check_data_info_keys(dataset[0]) + + def test_bottomup(self): + # test bottomup training + dataset = self.build_mpii_dataset(data_mode='bottomup') + self.assertEqual(len(dataset), 5) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + # test bottomup testing + dataset = self.build_mpii_dataset(data_mode='bottomup', test_mode=True) + self.assertEqual(len(dataset), 5) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + def test_exceptions_and_warnings(self): + + with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): + _ = self.build_mpii_dataset(data_mode='invalid') + + with self.assertRaisesRegex( + ValueError, + '"bbox_file" is only supported when `test_mode==True`'): + _ = self.build_mpii_dataset( + data_mode='topdown', + test_mode=False, + bbox_file='temp_bbox_file.json') + + with self.assertRaisesRegex( + ValueError, '"bbox_file" is only supported in topdown mode'): + _ = self.build_mpii_dataset( + data_mode='bottomup', + test_mode=True, + bbox_file='temp_bbox_file.json') + + with self.assertRaisesRegex( + ValueError, + '"bbox_score_thr" is only supported in topdown mode'): + _ = self.build_mpii_dataset( + data_mode='bottomup', + test_mode=True, + filter_cfg=dict(bbox_score_thr=0.3)) diff --git a/tests/test_datasets/test_datasets/test_body_datasets/test_mpii_trb_dataset.py b/tests/test_datasets/test_datasets/test_body_datasets/test_mpii_trb_dataset.py index bd64662ce3..d7afd5f6e1 100644 --- a/tests/test_datasets/test_datasets/test_body_datasets/test_mpii_trb_dataset.py +++ b/tests/test_datasets/test_datasets/test_body_datasets/test_mpii_trb_dataset.py @@ -1,144 +1,144 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import numpy as np - -from mmpose.datasets.datasets.body import MpiiTrbDataset - - -class TestMpiiTrbDataset(TestCase): - - def build_mpii_trb_dataset(self, **kwargs): - - cfg = dict( - ann_file='test_mpii_trb.json', - bbox_file=None, - data_mode='topdown', - data_root='tests/data/mpii', - pipeline=[], - test_mode=False) - - cfg.update(kwargs) - return MpiiTrbDataset(**cfg) - - def check_data_info_keys(self, - data_info: dict, - data_mode: str = 'topdown'): - if data_mode == 'topdown': - expected_keys = dict( - img_id=int, - img_path=str, - bbox_center=np.ndarray, - bbox_scale=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - id=int) - elif data_mode == 'bottomup': - expected_keys = dict( - img_id=int, - img_path=str, - bbox_center=np.ndarray, - bbox_scale=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - invalid_segs=list, - id=list) - else: - raise ValueError(f'Invalid data_mode {data_mode}') - - for key, type_ in expected_keys.items(): - self.assertIn(key, data_info) - self.assertIsInstance(data_info[key], type_, key) - - def check_metainfo_keys(self, metainfo: dict): - expected_keys = dict( - dataset_name=str, - num_keypoints=int, - keypoint_id2name=dict, - keypoint_name2id=dict, - upper_body_ids=list, - lower_body_ids=list, - flip_indices=list, - flip_pairs=list, - keypoint_colors=np.ndarray, - num_skeleton_links=int, - skeleton_links=list, - skeleton_link_colors=np.ndarray, - dataset_keypoint_weights=np.ndarray) - - for key, type_ in expected_keys.items(): - self.assertIn(key, metainfo) - self.assertIsInstance(metainfo[key], type_, key) - - def test_metainfo(self): - dataset = self.build_mpii_trb_dataset() - self.check_metainfo_keys(dataset.metainfo) - # test dataset_name - self.assertEqual(dataset.metainfo['dataset_name'], 'mpii_trb') - - # test number of keypoints - num_keypoints = 40 - self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) - self.assertEqual( - len(dataset.metainfo['keypoint_colors']), num_keypoints) - self.assertEqual( - len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) - - # test some extra metainfo - self.assertEqual( - len(dataset.metainfo['skeleton_links']), - len(dataset.metainfo['skeleton_link_colors'])) - - def test_topdown(self): - # test topdown training - dataset = self.build_mpii_trb_dataset(data_mode='topdown') - self.assertEqual(len(dataset), 5) - self.check_data_info_keys(dataset[0], data_mode='topdown') - - # test topdown testing - dataset = self.build_mpii_trb_dataset( - data_mode='topdown', test_mode=True) - self.assertEqual(len(dataset), 5) - self.check_data_info_keys(dataset[0], data_mode='topdown') - - def test_bottomup(self): - # test bottomup training - dataset = self.build_mpii_trb_dataset(data_mode='bottomup') - self.assertEqual(len(dataset), 5) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - # test bottomup testing - dataset = self.build_mpii_trb_dataset( - data_mode='bottomup', test_mode=True) - self.assertEqual(len(dataset), 5) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - def test_exceptions_and_warnings(self): - - with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): - _ = self.build_mpii_trb_dataset(data_mode='invalid') - - with self.assertRaisesRegex( - ValueError, - '"bbox_file" is only supported when `test_mode==True`'): - _ = self.build_mpii_trb_dataset( - data_mode='topdown', - test_mode=False, - bbox_file='temp_bbox_file.json') - - with self.assertRaisesRegex( - ValueError, '"bbox_file" is only supported in topdown mode'): - _ = self.build_mpii_trb_dataset( - data_mode='bottomup', - test_mode=True, - bbox_file='temp_bbox_file.json') - - with self.assertRaisesRegex( - ValueError, - '"bbox_score_thr" is only supported in topdown mode'): - _ = self.build_mpii_trb_dataset( - data_mode='bottomup', - test_mode=True, - filter_cfg=dict(bbox_score_thr=0.3)) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np + +from mmpose.datasets.datasets.body import MpiiTrbDataset + + +class TestMpiiTrbDataset(TestCase): + + def build_mpii_trb_dataset(self, **kwargs): + + cfg = dict( + ann_file='test_mpii_trb.json', + bbox_file=None, + data_mode='topdown', + data_root='tests/data/mpii', + pipeline=[], + test_mode=False) + + cfg.update(kwargs) + return MpiiTrbDataset(**cfg) + + def check_data_info_keys(self, + data_info: dict, + data_mode: str = 'topdown'): + if data_mode == 'topdown': + expected_keys = dict( + img_id=int, + img_path=str, + bbox_center=np.ndarray, + bbox_scale=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + id=int) + elif data_mode == 'bottomup': + expected_keys = dict( + img_id=int, + img_path=str, + bbox_center=np.ndarray, + bbox_scale=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + invalid_segs=list, + id=list) + else: + raise ValueError(f'Invalid data_mode {data_mode}') + + for key, type_ in expected_keys.items(): + self.assertIn(key, data_info) + self.assertIsInstance(data_info[key], type_, key) + + def check_metainfo_keys(self, metainfo: dict): + expected_keys = dict( + dataset_name=str, + num_keypoints=int, + keypoint_id2name=dict, + keypoint_name2id=dict, + upper_body_ids=list, + lower_body_ids=list, + flip_indices=list, + flip_pairs=list, + keypoint_colors=np.ndarray, + num_skeleton_links=int, + skeleton_links=list, + skeleton_link_colors=np.ndarray, + dataset_keypoint_weights=np.ndarray) + + for key, type_ in expected_keys.items(): + self.assertIn(key, metainfo) + self.assertIsInstance(metainfo[key], type_, key) + + def test_metainfo(self): + dataset = self.build_mpii_trb_dataset() + self.check_metainfo_keys(dataset.metainfo) + # test dataset_name + self.assertEqual(dataset.metainfo['dataset_name'], 'mpii_trb') + + # test number of keypoints + num_keypoints = 40 + self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) + self.assertEqual( + len(dataset.metainfo['keypoint_colors']), num_keypoints) + self.assertEqual( + len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) + + # test some extra metainfo + self.assertEqual( + len(dataset.metainfo['skeleton_links']), + len(dataset.metainfo['skeleton_link_colors'])) + + def test_topdown(self): + # test topdown training + dataset = self.build_mpii_trb_dataset(data_mode='topdown') + self.assertEqual(len(dataset), 5) + self.check_data_info_keys(dataset[0], data_mode='topdown') + + # test topdown testing + dataset = self.build_mpii_trb_dataset( + data_mode='topdown', test_mode=True) + self.assertEqual(len(dataset), 5) + self.check_data_info_keys(dataset[0], data_mode='topdown') + + def test_bottomup(self): + # test bottomup training + dataset = self.build_mpii_trb_dataset(data_mode='bottomup') + self.assertEqual(len(dataset), 5) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + # test bottomup testing + dataset = self.build_mpii_trb_dataset( + data_mode='bottomup', test_mode=True) + self.assertEqual(len(dataset), 5) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + def test_exceptions_and_warnings(self): + + with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): + _ = self.build_mpii_trb_dataset(data_mode='invalid') + + with self.assertRaisesRegex( + ValueError, + '"bbox_file" is only supported when `test_mode==True`'): + _ = self.build_mpii_trb_dataset( + data_mode='topdown', + test_mode=False, + bbox_file='temp_bbox_file.json') + + with self.assertRaisesRegex( + ValueError, '"bbox_file" is only supported in topdown mode'): + _ = self.build_mpii_trb_dataset( + data_mode='bottomup', + test_mode=True, + bbox_file='temp_bbox_file.json') + + with self.assertRaisesRegex( + ValueError, + '"bbox_score_thr" is only supported in topdown mode'): + _ = self.build_mpii_trb_dataset( + data_mode='bottomup', + test_mode=True, + filter_cfg=dict(bbox_score_thr=0.3)) diff --git a/tests/test_datasets/test_datasets/test_body_datasets/test_ochuman_dataset.py b/tests/test_datasets/test_datasets/test_body_datasets/test_ochuman_dataset.py index 8e9f3ad532..d08c92a7e2 100644 --- a/tests/test_datasets/test_datasets/test_body_datasets/test_ochuman_dataset.py +++ b/tests/test_datasets/test_datasets/test_body_datasets/test_ochuman_dataset.py @@ -1,144 +1,144 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import numpy as np - -from mmpose.datasets.datasets.body import OCHumanDataset - - -class TestOCHumanDataset(TestCase): - - def build_ochuman_dataset(self, **kwargs): - - cfg = dict( - ann_file='test_ochuman.json', - bbox_file=None, - data_mode='topdown', - data_root='tests/data/ochuman', - pipeline=[], - test_mode=False) - - cfg.update(kwargs) - return OCHumanDataset(**cfg) - - def check_data_info_keys(self, - data_info: dict, - data_mode: str = 'topdown'): - if data_mode == 'topdown': - expected_keys = dict( - img_id=int, - img_path=str, - bbox=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - id=int) - elif data_mode == 'bottomup': - expected_keys = dict( - img_id=int, - img_path=str, - bbox=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - invalid_segs=list, - id=list) - else: - raise ValueError(f'Invalid data_mode {data_mode}') - - for key, type_ in expected_keys.items(): - self.assertIn(key, data_info) - self.assertIsInstance(data_info[key], type_, key) - - def check_metainfo_keys(self, metainfo: dict): - expected_keys = dict( - dataset_name=str, - num_keypoints=int, - keypoint_id2name=dict, - keypoint_name2id=dict, - upper_body_ids=list, - lower_body_ids=list, - flip_indices=list, - flip_pairs=list, - keypoint_colors=np.ndarray, - num_skeleton_links=int, - skeleton_links=list, - skeleton_link_colors=np.ndarray, - dataset_keypoint_weights=np.ndarray) - - for key, type_ in expected_keys.items(): - self.assertIn(key, metainfo) - self.assertIsInstance(metainfo[key], type_, key) - - def test_metainfo(self): - dataset = self.build_ochuman_dataset() - self.check_metainfo_keys(dataset.metainfo) - # test dataset_name - self.assertEqual(dataset.metainfo['dataset_name'], 'ochuman') - - # test number of keypoints - num_keypoints = 17 - self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) - self.assertEqual( - len(dataset.metainfo['keypoint_colors']), num_keypoints) - self.assertEqual( - len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) - # note that len(sigmas) may be zero if dataset.metainfo['sigmas'] = [] - self.assertEqual(len(dataset.metainfo['sigmas']), num_keypoints) - - # test some extra metainfo - self.assertEqual( - len(dataset.metainfo['skeleton_links']), - len(dataset.metainfo['skeleton_link_colors'])) - - def test_topdown(self): - # test topdown training - dataset = self.build_ochuman_dataset(data_mode='topdown') - self.assertEqual(len(dataset), 5) - self.check_data_info_keys(dataset[0], data_mode='topdown') - - # test topdown testing - dataset = self.build_ochuman_dataset( - data_mode='topdown', test_mode=True) - self.assertEqual(len(dataset), 5) - self.check_data_info_keys(dataset[0], data_mode='topdown') - - def test_bottomup(self): - # test bottomup training - dataset = self.build_ochuman_dataset(data_mode='bottomup') - self.assertEqual(len(dataset), 3) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - # test bottomup testing - dataset = self.build_ochuman_dataset( - data_mode='bottomup', test_mode=True) - self.assertEqual(len(dataset), 3) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - def test_exceptions_and_warnings(self): - - with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): - _ = self.build_ochuman_dataset(data_mode='invalid') - - with self.assertRaisesRegex( - ValueError, - '"bbox_file" is only supported when `test_mode==True`'): - _ = self.build_ochuman_dataset( - data_mode='topdown', - test_mode=False, - bbox_file='temp_bbox_file.json') - - with self.assertRaisesRegex( - ValueError, '"bbox_file" is only supported in topdown mode'): - _ = self.build_ochuman_dataset( - data_mode='bottomup', - test_mode=True, - bbox_file='temp_bbox_file.json') - - with self.assertRaisesRegex( - ValueError, - '"bbox_score_thr" is only supported in topdown mode'): - _ = self.build_ochuman_dataset( - data_mode='bottomup', - test_mode=True, - filter_cfg=dict(bbox_score_thr=0.3)) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np + +from mmpose.datasets.datasets.body import OCHumanDataset + + +class TestOCHumanDataset(TestCase): + + def build_ochuman_dataset(self, **kwargs): + + cfg = dict( + ann_file='test_ochuman.json', + bbox_file=None, + data_mode='topdown', + data_root='tests/data/ochuman', + pipeline=[], + test_mode=False) + + cfg.update(kwargs) + return OCHumanDataset(**cfg) + + def check_data_info_keys(self, + data_info: dict, + data_mode: str = 'topdown'): + if data_mode == 'topdown': + expected_keys = dict( + img_id=int, + img_path=str, + bbox=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + id=int) + elif data_mode == 'bottomup': + expected_keys = dict( + img_id=int, + img_path=str, + bbox=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + invalid_segs=list, + id=list) + else: + raise ValueError(f'Invalid data_mode {data_mode}') + + for key, type_ in expected_keys.items(): + self.assertIn(key, data_info) + self.assertIsInstance(data_info[key], type_, key) + + def check_metainfo_keys(self, metainfo: dict): + expected_keys = dict( + dataset_name=str, + num_keypoints=int, + keypoint_id2name=dict, + keypoint_name2id=dict, + upper_body_ids=list, + lower_body_ids=list, + flip_indices=list, + flip_pairs=list, + keypoint_colors=np.ndarray, + num_skeleton_links=int, + skeleton_links=list, + skeleton_link_colors=np.ndarray, + dataset_keypoint_weights=np.ndarray) + + for key, type_ in expected_keys.items(): + self.assertIn(key, metainfo) + self.assertIsInstance(metainfo[key], type_, key) + + def test_metainfo(self): + dataset = self.build_ochuman_dataset() + self.check_metainfo_keys(dataset.metainfo) + # test dataset_name + self.assertEqual(dataset.metainfo['dataset_name'], 'ochuman') + + # test number of keypoints + num_keypoints = 17 + self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) + self.assertEqual( + len(dataset.metainfo['keypoint_colors']), num_keypoints) + self.assertEqual( + len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) + # note that len(sigmas) may be zero if dataset.metainfo['sigmas'] = [] + self.assertEqual(len(dataset.metainfo['sigmas']), num_keypoints) + + # test some extra metainfo + self.assertEqual( + len(dataset.metainfo['skeleton_links']), + len(dataset.metainfo['skeleton_link_colors'])) + + def test_topdown(self): + # test topdown training + dataset = self.build_ochuman_dataset(data_mode='topdown') + self.assertEqual(len(dataset), 5) + self.check_data_info_keys(dataset[0], data_mode='topdown') + + # test topdown testing + dataset = self.build_ochuman_dataset( + data_mode='topdown', test_mode=True) + self.assertEqual(len(dataset), 5) + self.check_data_info_keys(dataset[0], data_mode='topdown') + + def test_bottomup(self): + # test bottomup training + dataset = self.build_ochuman_dataset(data_mode='bottomup') + self.assertEqual(len(dataset), 3) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + # test bottomup testing + dataset = self.build_ochuman_dataset( + data_mode='bottomup', test_mode=True) + self.assertEqual(len(dataset), 3) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + def test_exceptions_and_warnings(self): + + with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): + _ = self.build_ochuman_dataset(data_mode='invalid') + + with self.assertRaisesRegex( + ValueError, + '"bbox_file" is only supported when `test_mode==True`'): + _ = self.build_ochuman_dataset( + data_mode='topdown', + test_mode=False, + bbox_file='temp_bbox_file.json') + + with self.assertRaisesRegex( + ValueError, '"bbox_file" is only supported in topdown mode'): + _ = self.build_ochuman_dataset( + data_mode='bottomup', + test_mode=True, + bbox_file='temp_bbox_file.json') + + with self.assertRaisesRegex( + ValueError, + '"bbox_score_thr" is only supported in topdown mode'): + _ = self.build_ochuman_dataset( + data_mode='bottomup', + test_mode=True, + filter_cfg=dict(bbox_score_thr=0.3)) diff --git a/tests/test_datasets/test_datasets/test_body_datasets/test_posetrack18_dataset.py b/tests/test_datasets/test_datasets/test_body_datasets/test_posetrack18_dataset.py index ef3cd82dfb..8b4142d17d 100644 --- a/tests/test_datasets/test_datasets/test_body_datasets/test_posetrack18_dataset.py +++ b/tests/test_datasets/test_datasets/test_body_datasets/test_posetrack18_dataset.py @@ -1,160 +1,160 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import numpy as np - -from mmpose.datasets.datasets.body import PoseTrack18Dataset - - -class TestPoseTrack18Dataset(TestCase): - - def build_posetrack18_dataset(self, **kwargs): - - cfg = dict( - ann_file='annotations/test_posetrack18_val.json', - bbox_file=None, - data_mode='topdown', - data_root='tests/data/posetrack18', - pipeline=[], - test_mode=False) - - cfg.update(kwargs) - return PoseTrack18Dataset(**cfg) - - def check_data_info_keys(self, - data_info: dict, - data_mode: str = 'topdown'): - if data_mode == 'topdown': - expected_keys = dict( - img_id=int, - img_path=str, - bbox=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - id=int) - elif data_mode == 'bottomup': - expected_keys = dict( - img_id=int, - img_path=str, - bbox=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - invalid_segs=list, - id=list) - else: - raise ValueError(f'Invalid data_mode {data_mode}') - - for key, type_ in expected_keys.items(): - self.assertIn(key, data_info) - self.assertIsInstance(data_info[key], type_, key) - - def check_metainfo_keys(self, metainfo: dict): - expected_keys = dict( - dataset_name=str, - num_keypoints=int, - keypoint_id2name=dict, - keypoint_name2id=dict, - upper_body_ids=list, - lower_body_ids=list, - flip_indices=list, - flip_pairs=list, - keypoint_colors=np.ndarray, - num_skeleton_links=int, - skeleton_links=list, - skeleton_link_colors=np.ndarray, - dataset_keypoint_weights=np.ndarray) - - for key, type_ in expected_keys.items(): - self.assertIn(key, metainfo) - self.assertIsInstance(metainfo[key], type_, key) - - def test_metainfo(self): - dataset = self.build_posetrack18_dataset() - self.check_metainfo_keys(dataset.metainfo) - # test dataset_name - self.assertEqual(dataset.metainfo['dataset_name'], 'posetrack18') - - # test number of keypoints - num_keypoints = 17 - self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) - self.assertEqual( - len(dataset.metainfo['keypoint_colors']), num_keypoints) - self.assertEqual( - len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) - # note that len(sigmas) may be zero if dataset.metainfo['sigmas'] = [] - self.assertEqual(len(dataset.metainfo['sigmas']), num_keypoints) - - # test some extra metainfo - self.assertEqual( - len(dataset.metainfo['skeleton_links']), - len(dataset.metainfo['skeleton_link_colors'])) - - def test_topdown(self): - # test topdown training - dataset = self.build_posetrack18_dataset(data_mode='topdown') - self.assertEqual(len(dataset), 14) - self.check_data_info_keys(dataset[0]) - - # test topdown testing - dataset = self.build_posetrack18_dataset( - data_mode='topdown', test_mode=True) - self.assertEqual(len(dataset), 14) - self.check_data_info_keys(dataset[0]) - - # test topdown testing with bbox file - dataset = self.build_posetrack18_dataset( - test_mode=True, - bbox_file='tests/data/posetrack18/annotations/' - 'test_posetrack18_human_detections.json') - self.assertEqual(len(dataset), 278) - self.check_data_info_keys(dataset[0]) - - # test topdown testing with filter config - dataset = self.build_posetrack18_dataset( - test_mode=True, - bbox_file='tests/data/posetrack18/annotations/' - 'test_posetrack18_human_detections.json', - filter_cfg=dict(bbox_score_thr=0.3)) - self.assertEqual(len(dataset), 119) - - def test_bottomup(self): - # test bottomup training - dataset = self.build_posetrack18_dataset(data_mode='bottomup') - self.assertEqual(len(dataset), 3) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - # test bottomup testing - dataset = self.build_posetrack18_dataset( - data_mode='bottomup', test_mode=True) - self.assertEqual(len(dataset), 3) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - def test_exceptions_and_warnings(self): - with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): - _ = self.build_posetrack18_dataset(data_mode='invalid') - - with self.assertRaisesRegex( - ValueError, - '"bbox_file" is only supported when `test_mode==True`'): - _ = self.build_posetrack18_dataset( - test_mode=False, - bbox_file='tests/data/posetrack18/annotations/' - 'test_posetrack18_human_detections.json') - - with self.assertRaisesRegex( - ValueError, '"bbox_file" is only supported in topdown mode'): - _ = self.build_posetrack18_dataset( - data_mode='bottomup', - test_mode=True, - bbox_file='tests/data/posetrack18/annotations/' - 'test_posetrack18_human_detections.json') - - with self.assertRaisesRegex( - ValueError, - '"bbox_score_thr" is only supported in topdown mode'): - _ = self.build_posetrack18_dataset( - data_mode='bottomup', - test_mode=True, - filter_cfg=dict(bbox_score_thr=0.3)) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np + +from mmpose.datasets.datasets.body import PoseTrack18Dataset + + +class TestPoseTrack18Dataset(TestCase): + + def build_posetrack18_dataset(self, **kwargs): + + cfg = dict( + ann_file='annotations/test_posetrack18_val.json', + bbox_file=None, + data_mode='topdown', + data_root='tests/data/posetrack18', + pipeline=[], + test_mode=False) + + cfg.update(kwargs) + return PoseTrack18Dataset(**cfg) + + def check_data_info_keys(self, + data_info: dict, + data_mode: str = 'topdown'): + if data_mode == 'topdown': + expected_keys = dict( + img_id=int, + img_path=str, + bbox=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + id=int) + elif data_mode == 'bottomup': + expected_keys = dict( + img_id=int, + img_path=str, + bbox=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + invalid_segs=list, + id=list) + else: + raise ValueError(f'Invalid data_mode {data_mode}') + + for key, type_ in expected_keys.items(): + self.assertIn(key, data_info) + self.assertIsInstance(data_info[key], type_, key) + + def check_metainfo_keys(self, metainfo: dict): + expected_keys = dict( + dataset_name=str, + num_keypoints=int, + keypoint_id2name=dict, + keypoint_name2id=dict, + upper_body_ids=list, + lower_body_ids=list, + flip_indices=list, + flip_pairs=list, + keypoint_colors=np.ndarray, + num_skeleton_links=int, + skeleton_links=list, + skeleton_link_colors=np.ndarray, + dataset_keypoint_weights=np.ndarray) + + for key, type_ in expected_keys.items(): + self.assertIn(key, metainfo) + self.assertIsInstance(metainfo[key], type_, key) + + def test_metainfo(self): + dataset = self.build_posetrack18_dataset() + self.check_metainfo_keys(dataset.metainfo) + # test dataset_name + self.assertEqual(dataset.metainfo['dataset_name'], 'posetrack18') + + # test number of keypoints + num_keypoints = 17 + self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) + self.assertEqual( + len(dataset.metainfo['keypoint_colors']), num_keypoints) + self.assertEqual( + len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) + # note that len(sigmas) may be zero if dataset.metainfo['sigmas'] = [] + self.assertEqual(len(dataset.metainfo['sigmas']), num_keypoints) + + # test some extra metainfo + self.assertEqual( + len(dataset.metainfo['skeleton_links']), + len(dataset.metainfo['skeleton_link_colors'])) + + def test_topdown(self): + # test topdown training + dataset = self.build_posetrack18_dataset(data_mode='topdown') + self.assertEqual(len(dataset), 14) + self.check_data_info_keys(dataset[0]) + + # test topdown testing + dataset = self.build_posetrack18_dataset( + data_mode='topdown', test_mode=True) + self.assertEqual(len(dataset), 14) + self.check_data_info_keys(dataset[0]) + + # test topdown testing with bbox file + dataset = self.build_posetrack18_dataset( + test_mode=True, + bbox_file='tests/data/posetrack18/annotations/' + 'test_posetrack18_human_detections.json') + self.assertEqual(len(dataset), 278) + self.check_data_info_keys(dataset[0]) + + # test topdown testing with filter config + dataset = self.build_posetrack18_dataset( + test_mode=True, + bbox_file='tests/data/posetrack18/annotations/' + 'test_posetrack18_human_detections.json', + filter_cfg=dict(bbox_score_thr=0.3)) + self.assertEqual(len(dataset), 119) + + def test_bottomup(self): + # test bottomup training + dataset = self.build_posetrack18_dataset(data_mode='bottomup') + self.assertEqual(len(dataset), 3) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + # test bottomup testing + dataset = self.build_posetrack18_dataset( + data_mode='bottomup', test_mode=True) + self.assertEqual(len(dataset), 3) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + def test_exceptions_and_warnings(self): + with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): + _ = self.build_posetrack18_dataset(data_mode='invalid') + + with self.assertRaisesRegex( + ValueError, + '"bbox_file" is only supported when `test_mode==True`'): + _ = self.build_posetrack18_dataset( + test_mode=False, + bbox_file='tests/data/posetrack18/annotations/' + 'test_posetrack18_human_detections.json') + + with self.assertRaisesRegex( + ValueError, '"bbox_file" is only supported in topdown mode'): + _ = self.build_posetrack18_dataset( + data_mode='bottomup', + test_mode=True, + bbox_file='tests/data/posetrack18/annotations/' + 'test_posetrack18_human_detections.json') + + with self.assertRaisesRegex( + ValueError, + '"bbox_score_thr" is only supported in topdown mode'): + _ = self.build_posetrack18_dataset( + data_mode='bottomup', + test_mode=True, + filter_cfg=dict(bbox_score_thr=0.3)) diff --git a/tests/test_datasets/test_datasets/test_body_datasets/test_posetrack18_video_dataset.py b/tests/test_datasets/test_datasets/test_body_datasets/test_posetrack18_video_dataset.py index 88b58e486d..b8084be36d 100644 --- a/tests/test_datasets/test_datasets/test_body_datasets/test_posetrack18_video_dataset.py +++ b/tests/test_datasets/test_datasets/test_body_datasets/test_posetrack18_video_dataset.py @@ -1,299 +1,299 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import numpy as np - -from mmpose.datasets.datasets.body import PoseTrack18VideoDataset - - -class TestPoseTrack18VideoDataset(TestCase): - - def build_posetrack18_video_dataset(self, **kwargs): - - cfg = dict( - ann_file='annotations/test_posetrack18_val.json', - bbox_file=None, - data_mode='topdown', - frame_weights=[0.0, 1.0], - frame_sampler_mode='random', - frame_range=[-2, 2], - num_sampled_frame=1, - frame_indices=[-2, -1, 0, 1, 2], - ph_fill_len=6, - data_root='tests/data/posetrack18', - pipeline=[], - test_mode=False) - - cfg.update(kwargs) - return PoseTrack18VideoDataset(**cfg) - - def check_data_info_keys(self, - data_info: dict, - data_mode: str = 'topdown'): - if data_mode == 'topdown': - expected_keys = dict( - img_id=int, - # mind this difference: img_path is a list - img_path=list, - bbox=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - # mind this difference - frame_weights=np.ndarray, - id=int) - elif data_mode == 'bottomup': - expected_keys = dict( - img_id=int, - img_path=list, - bbox=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - invalid_segs=list, - frame_weights=np.ndarray, - id=list) - else: - raise ValueError(f'Invalid data_mode {data_mode}') - - for key, type_ in expected_keys.items(): - self.assertIn(key, data_info) - self.assertIsInstance(data_info[key], type_, key) - - def check_metainfo_keys(self, metainfo: dict): - expected_keys = dict( - dataset_name=str, - num_keypoints=int, - keypoint_id2name=dict, - keypoint_name2id=dict, - upper_body_ids=list, - lower_body_ids=list, - flip_indices=list, - flip_pairs=list, - keypoint_colors=np.ndarray, - num_skeleton_links=int, - skeleton_links=list, - skeleton_link_colors=np.ndarray, - dataset_keypoint_weights=np.ndarray) - - for key, type_ in expected_keys.items(): - self.assertIn(key, metainfo) - self.assertIsInstance(metainfo[key], type_, key) - - def test_metainfo(self): - dataset = self.build_posetrack18_video_dataset() - self.check_metainfo_keys(dataset.metainfo) - # test dataset_name - self.assertEqual(dataset.metainfo['dataset_name'], 'posetrack18') - - # test number of keypoints - num_keypoints = 17 - self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) - self.assertEqual( - len(dataset.metainfo['keypoint_colors']), num_keypoints) - self.assertEqual( - len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) - # note that len(sigmas) may be zero if dataset.metainfo['sigmas'] = [] - self.assertEqual(len(dataset.metainfo['sigmas']), num_keypoints) - - # test some extra metainfo - self.assertEqual( - len(dataset.metainfo['skeleton_links']), - len(dataset.metainfo['skeleton_link_colors'])) - - def test_topdown(self): - # test topdown training, frame_sampler_mode = 'random' - dataset = self.build_posetrack18_video_dataset( - data_mode='topdown', frame_sampler_mode='random') - self.assertEqual(len(dataset), 14) - self.check_data_info_keys(dataset[0]) - - # test topdown training, frame_sampler_mode = 'fixed' - dataset = self.build_posetrack18_video_dataset( - data_mode='topdown', - frame_sampler_mode='fixed', - frame_weights=[0.0, 1.0], - frame_indices=[-1, 0]) - self.assertEqual(len(dataset), 14) - self.check_data_info_keys(dataset[0]) - - # test topdown testing, frame_sampler_mode = 'random' - dataset = self.build_posetrack18_video_dataset( - data_mode='topdown', test_mode=True, frame_sampler_mode='random') - self.assertEqual(len(dataset), 14) - self.check_data_info_keys(dataset[0]) - - # test topdown testing, frame_sampler_mode = 'fixed' - dataset = self.build_posetrack18_video_dataset( - data_mode='topdown', - test_mode=True, - frame_sampler_mode='fixed', - frame_weights=(0.3, 0.1, 0.25, 0.25, 0.1), - frame_indices=[-2, -1, 0, 1, 2]) - self.assertEqual(len(dataset), 14) - self.check_data_info_keys(dataset[0]) - - # test topdown testing with bbox file, frame_sampler_mode = 'random' - dataset = self.build_posetrack18_video_dataset( - test_mode=True, - frame_sampler_mode='random', - bbox_file='tests/data/posetrack18/annotations/' - 'test_posetrack18_human_detections.json') - self.assertEqual(len(dataset), 278) - self.check_data_info_keys(dataset[0]) - - # test topdown testing with bbox file, frame_sampler_mode = 'fixed' - dataset = self.build_posetrack18_video_dataset( - test_mode=True, - frame_sampler_mode='fixed', - frame_weights=(0.3, 0.1, 0.25, 0.25, 0.1), - frame_indices=[-2, -1, 0, 1, 2], - bbox_file='tests/data/posetrack18/annotations/' - 'test_posetrack18_human_detections.json') - self.assertEqual(len(dataset), 278) - self.check_data_info_keys(dataset[0]) - - # test topdown testing with filter config - dataset = self.build_posetrack18_video_dataset( - test_mode=True, - frame_sampler_mode='fixed', - frame_weights=(0.3, 0.1, 0.25, 0.25, 0.1), - frame_indices=[-2, -1, 0, 1, 2], - bbox_file='tests/data/posetrack18/annotations/' - 'test_posetrack18_human_detections.json', - filter_cfg=dict(bbox_score_thr=0.3)) - self.assertEqual(len(dataset), 119) - - def test_bottomup(self): - # test bottomup training - dataset = self.build_posetrack18_video_dataset(data_mode='bottomup') - self.assertEqual(len(dataset), 3) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - # test bottomup testing - dataset = self.build_posetrack18_video_dataset( - data_mode='bottomup', - test_mode=True, - frame_sampler_mode='fixed', - frame_indices=[-2, -1, 0, 1, 2], - frame_weights=(0.3, 0.1, 0.25, 0.25, 0.1)) - self.assertEqual(len(dataset), 3) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - def test_exceptions_and_warnings(self): - # test invalid frame_weights - with self.assertRaisesRegex(AssertionError, - 'Invalid `frame_weights`:'): - _ = self.build_posetrack18_video_dataset(frame_weights=[0]) - - with self.assertRaisesRegex(AssertionError, 'should sum to 1.0'): - _ = self.build_posetrack18_video_dataset(frame_weights=[0.2, 0.3]) - with self.assertRaisesRegex( - AssertionError, 'frame_weight can not be a negative value'): - _ = self.build_posetrack18_video_dataset(frame_weights=[-0.2, 1.2]) - - # test invalid frame_sampler_mode - with self.assertRaisesRegex(ValueError, - 'got invalid frame_sampler_mode'): - _ = self.build_posetrack18_video_dataset( - frame_sampler_mode='invalid') - - # test invalid argument when `frame_sampler_mode = 'random'` - with self.assertRaisesRegex(AssertionError, - 'please specify the `frame_range`'): - _ = self.build_posetrack18_video_dataset( - frame_sampler_mode='random', - frame_range=None, - ) - with self.assertRaisesRegex(AssertionError, - 'frame_range can not be a negative value'): - _ = self.build_posetrack18_video_dataset( - frame_sampler_mode='random', - frame_range=-2, - ) - # correct usage - _ = self.build_posetrack18_video_dataset( - frame_sampler_mode='random', - frame_range=2, - ) - with self.assertRaisesRegex(AssertionError, 'The length must be 2'): - _ = self.build_posetrack18_video_dataset( - frame_sampler_mode='random', - frame_range=[3], - ) - with self.assertRaisesRegex(AssertionError, 'Invalid `frame_range`'): - _ = self.build_posetrack18_video_dataset( - frame_sampler_mode='random', - frame_range=[3, -3], - ) - with self.assertRaisesRegex(AssertionError, - 'Each element must be int'): - _ = self.build_posetrack18_video_dataset( - frame_sampler_mode='random', - frame_range=[-2, 5.5], - ) - with self.assertRaisesRegex( - TypeError, - 'The type of `frame_range` must be int or Sequence'): - _ = self.build_posetrack18_video_dataset( - frame_sampler_mode='random', - frame_range=dict(low=-2, high=2), - ) - - # test valid number of frames - with self.assertRaisesRegex(AssertionError, - 'please specify `num_sampled_frame`'): - _ = self.build_posetrack18_video_dataset( - frame_sampler_mode='random', - num_sampled_frame=None, - ) - with self.assertRaisesRegex( - AssertionError, - 'does not match the number of sampled adjacent frames'): - _ = self.build_posetrack18_video_dataset( - frame_sampler_mode='random', - frame_weights=[0.2, 0.3, 0.5], - num_sampled_frame=1, - ) - - # test invalid argument when `frame_sampler_mode = 'fixed'` - with self.assertRaisesRegex(AssertionError, - 'please specify the `frame_indices`'): - _ = self.build_posetrack18_video_dataset( - frame_sampler_mode='fixed', - frame_indices=None, - ) - with self.assertRaisesRegex( - AssertionError, 'does not match the length of frame_indices'): - _ = self.build_posetrack18_video_dataset( - frame_sampler_mode='fixed', - frame_weights=[0.5, 0.3, 0.2], - frame_indices=[-2, -1, 0, 1, 2], - ) - - with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): - _ = self.build_posetrack18_video_dataset(data_mode='invalid') - - with self.assertRaisesRegex( - ValueError, - '"bbox_file" is only supported when `test_mode==True`'): - _ = self.build_posetrack18_video_dataset( - test_mode=False, - bbox_file='tests/data/posetrack18/annotations/' - 'test_posetrack18_human_detections.json') - - with self.assertRaisesRegex( - ValueError, '"bbox_file" is only supported in topdown mode'): - _ = self.build_posetrack18_video_dataset( - data_mode='bottomup', - test_mode=True, - bbox_file='tests/data/posetrack18/annotations/' - 'test_posetrack18_human_detections.json') - - with self.assertRaisesRegex( - ValueError, - '"bbox_score_thr" is only supported in topdown mode'): - _ = self.build_posetrack18_video_dataset( - data_mode='bottomup', - test_mode=True, - filter_cfg=dict(bbox_score_thr=0.3)) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np + +from mmpose.datasets.datasets.body import PoseTrack18VideoDataset + + +class TestPoseTrack18VideoDataset(TestCase): + + def build_posetrack18_video_dataset(self, **kwargs): + + cfg = dict( + ann_file='annotations/test_posetrack18_val.json', + bbox_file=None, + data_mode='topdown', + frame_weights=[0.0, 1.0], + frame_sampler_mode='random', + frame_range=[-2, 2], + num_sampled_frame=1, + frame_indices=[-2, -1, 0, 1, 2], + ph_fill_len=6, + data_root='tests/data/posetrack18', + pipeline=[], + test_mode=False) + + cfg.update(kwargs) + return PoseTrack18VideoDataset(**cfg) + + def check_data_info_keys(self, + data_info: dict, + data_mode: str = 'topdown'): + if data_mode == 'topdown': + expected_keys = dict( + img_id=int, + # mind this difference: img_path is a list + img_path=list, + bbox=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + # mind this difference + frame_weights=np.ndarray, + id=int) + elif data_mode == 'bottomup': + expected_keys = dict( + img_id=int, + img_path=list, + bbox=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + invalid_segs=list, + frame_weights=np.ndarray, + id=list) + else: + raise ValueError(f'Invalid data_mode {data_mode}') + + for key, type_ in expected_keys.items(): + self.assertIn(key, data_info) + self.assertIsInstance(data_info[key], type_, key) + + def check_metainfo_keys(self, metainfo: dict): + expected_keys = dict( + dataset_name=str, + num_keypoints=int, + keypoint_id2name=dict, + keypoint_name2id=dict, + upper_body_ids=list, + lower_body_ids=list, + flip_indices=list, + flip_pairs=list, + keypoint_colors=np.ndarray, + num_skeleton_links=int, + skeleton_links=list, + skeleton_link_colors=np.ndarray, + dataset_keypoint_weights=np.ndarray) + + for key, type_ in expected_keys.items(): + self.assertIn(key, metainfo) + self.assertIsInstance(metainfo[key], type_, key) + + def test_metainfo(self): + dataset = self.build_posetrack18_video_dataset() + self.check_metainfo_keys(dataset.metainfo) + # test dataset_name + self.assertEqual(dataset.metainfo['dataset_name'], 'posetrack18') + + # test number of keypoints + num_keypoints = 17 + self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) + self.assertEqual( + len(dataset.metainfo['keypoint_colors']), num_keypoints) + self.assertEqual( + len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) + # note that len(sigmas) may be zero if dataset.metainfo['sigmas'] = [] + self.assertEqual(len(dataset.metainfo['sigmas']), num_keypoints) + + # test some extra metainfo + self.assertEqual( + len(dataset.metainfo['skeleton_links']), + len(dataset.metainfo['skeleton_link_colors'])) + + def test_topdown(self): + # test topdown training, frame_sampler_mode = 'random' + dataset = self.build_posetrack18_video_dataset( + data_mode='topdown', frame_sampler_mode='random') + self.assertEqual(len(dataset), 14) + self.check_data_info_keys(dataset[0]) + + # test topdown training, frame_sampler_mode = 'fixed' + dataset = self.build_posetrack18_video_dataset( + data_mode='topdown', + frame_sampler_mode='fixed', + frame_weights=[0.0, 1.0], + frame_indices=[-1, 0]) + self.assertEqual(len(dataset), 14) + self.check_data_info_keys(dataset[0]) + + # test topdown testing, frame_sampler_mode = 'random' + dataset = self.build_posetrack18_video_dataset( + data_mode='topdown', test_mode=True, frame_sampler_mode='random') + self.assertEqual(len(dataset), 14) + self.check_data_info_keys(dataset[0]) + + # test topdown testing, frame_sampler_mode = 'fixed' + dataset = self.build_posetrack18_video_dataset( + data_mode='topdown', + test_mode=True, + frame_sampler_mode='fixed', + frame_weights=(0.3, 0.1, 0.25, 0.25, 0.1), + frame_indices=[-2, -1, 0, 1, 2]) + self.assertEqual(len(dataset), 14) + self.check_data_info_keys(dataset[0]) + + # test topdown testing with bbox file, frame_sampler_mode = 'random' + dataset = self.build_posetrack18_video_dataset( + test_mode=True, + frame_sampler_mode='random', + bbox_file='tests/data/posetrack18/annotations/' + 'test_posetrack18_human_detections.json') + self.assertEqual(len(dataset), 278) + self.check_data_info_keys(dataset[0]) + + # test topdown testing with bbox file, frame_sampler_mode = 'fixed' + dataset = self.build_posetrack18_video_dataset( + test_mode=True, + frame_sampler_mode='fixed', + frame_weights=(0.3, 0.1, 0.25, 0.25, 0.1), + frame_indices=[-2, -1, 0, 1, 2], + bbox_file='tests/data/posetrack18/annotations/' + 'test_posetrack18_human_detections.json') + self.assertEqual(len(dataset), 278) + self.check_data_info_keys(dataset[0]) + + # test topdown testing with filter config + dataset = self.build_posetrack18_video_dataset( + test_mode=True, + frame_sampler_mode='fixed', + frame_weights=(0.3, 0.1, 0.25, 0.25, 0.1), + frame_indices=[-2, -1, 0, 1, 2], + bbox_file='tests/data/posetrack18/annotations/' + 'test_posetrack18_human_detections.json', + filter_cfg=dict(bbox_score_thr=0.3)) + self.assertEqual(len(dataset), 119) + + def test_bottomup(self): + # test bottomup training + dataset = self.build_posetrack18_video_dataset(data_mode='bottomup') + self.assertEqual(len(dataset), 3) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + # test bottomup testing + dataset = self.build_posetrack18_video_dataset( + data_mode='bottomup', + test_mode=True, + frame_sampler_mode='fixed', + frame_indices=[-2, -1, 0, 1, 2], + frame_weights=(0.3, 0.1, 0.25, 0.25, 0.1)) + self.assertEqual(len(dataset), 3) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + def test_exceptions_and_warnings(self): + # test invalid frame_weights + with self.assertRaisesRegex(AssertionError, + 'Invalid `frame_weights`:'): + _ = self.build_posetrack18_video_dataset(frame_weights=[0]) + + with self.assertRaisesRegex(AssertionError, 'should sum to 1.0'): + _ = self.build_posetrack18_video_dataset(frame_weights=[0.2, 0.3]) + with self.assertRaisesRegex( + AssertionError, 'frame_weight can not be a negative value'): + _ = self.build_posetrack18_video_dataset(frame_weights=[-0.2, 1.2]) + + # test invalid frame_sampler_mode + with self.assertRaisesRegex(ValueError, + 'got invalid frame_sampler_mode'): + _ = self.build_posetrack18_video_dataset( + frame_sampler_mode='invalid') + + # test invalid argument when `frame_sampler_mode = 'random'` + with self.assertRaisesRegex(AssertionError, + 'please specify the `frame_range`'): + _ = self.build_posetrack18_video_dataset( + frame_sampler_mode='random', + frame_range=None, + ) + with self.assertRaisesRegex(AssertionError, + 'frame_range can not be a negative value'): + _ = self.build_posetrack18_video_dataset( + frame_sampler_mode='random', + frame_range=-2, + ) + # correct usage + _ = self.build_posetrack18_video_dataset( + frame_sampler_mode='random', + frame_range=2, + ) + with self.assertRaisesRegex(AssertionError, 'The length must be 2'): + _ = self.build_posetrack18_video_dataset( + frame_sampler_mode='random', + frame_range=[3], + ) + with self.assertRaisesRegex(AssertionError, 'Invalid `frame_range`'): + _ = self.build_posetrack18_video_dataset( + frame_sampler_mode='random', + frame_range=[3, -3], + ) + with self.assertRaisesRegex(AssertionError, + 'Each element must be int'): + _ = self.build_posetrack18_video_dataset( + frame_sampler_mode='random', + frame_range=[-2, 5.5], + ) + with self.assertRaisesRegex( + TypeError, + 'The type of `frame_range` must be int or Sequence'): + _ = self.build_posetrack18_video_dataset( + frame_sampler_mode='random', + frame_range=dict(low=-2, high=2), + ) + + # test valid number of frames + with self.assertRaisesRegex(AssertionError, + 'please specify `num_sampled_frame`'): + _ = self.build_posetrack18_video_dataset( + frame_sampler_mode='random', + num_sampled_frame=None, + ) + with self.assertRaisesRegex( + AssertionError, + 'does not match the number of sampled adjacent frames'): + _ = self.build_posetrack18_video_dataset( + frame_sampler_mode='random', + frame_weights=[0.2, 0.3, 0.5], + num_sampled_frame=1, + ) + + # test invalid argument when `frame_sampler_mode = 'fixed'` + with self.assertRaisesRegex(AssertionError, + 'please specify the `frame_indices`'): + _ = self.build_posetrack18_video_dataset( + frame_sampler_mode='fixed', + frame_indices=None, + ) + with self.assertRaisesRegex( + AssertionError, 'does not match the length of frame_indices'): + _ = self.build_posetrack18_video_dataset( + frame_sampler_mode='fixed', + frame_weights=[0.5, 0.3, 0.2], + frame_indices=[-2, -1, 0, 1, 2], + ) + + with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): + _ = self.build_posetrack18_video_dataset(data_mode='invalid') + + with self.assertRaisesRegex( + ValueError, + '"bbox_file" is only supported when `test_mode==True`'): + _ = self.build_posetrack18_video_dataset( + test_mode=False, + bbox_file='tests/data/posetrack18/annotations/' + 'test_posetrack18_human_detections.json') + + with self.assertRaisesRegex( + ValueError, '"bbox_file" is only supported in topdown mode'): + _ = self.build_posetrack18_video_dataset( + data_mode='bottomup', + test_mode=True, + bbox_file='tests/data/posetrack18/annotations/' + 'test_posetrack18_human_detections.json') + + with self.assertRaisesRegex( + ValueError, + '"bbox_score_thr" is only supported in topdown mode'): + _ = self.build_posetrack18_video_dataset( + data_mode='bottomup', + test_mode=True, + filter_cfg=dict(bbox_score_thr=0.3)) diff --git a/tests/test_datasets/test_datasets/test_dataset_wrappers/test_combined_dataset.py b/tests/test_datasets/test_datasets/test_dataset_wrappers/test_combined_dataset.py index 698f1f060d..4fb07302dc 100644 --- a/tests/test_datasets/test_datasets/test_dataset_wrappers/test_combined_dataset.py +++ b/tests/test_datasets/test_datasets/test_dataset_wrappers/test_combined_dataset.py @@ -1,89 +1,89 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import numpy as np - -from mmpose.datasets.dataset_wrappers import CombinedDataset - - -class TestCombinedDataset(TestCase): - - def build_combined_dataset(self, **kwargs): - - coco_cfg = dict( - type='CocoDataset', - ann_file='test_coco.json', - bbox_file=None, - data_mode='topdown', - data_root='tests/data/coco', - pipeline=[], - test_mode=False) - - aic_cfg = dict( - type='AicDataset', - ann_file='test_aic.json', - bbox_file=None, - data_mode='topdown', - data_root='tests/data/aic', - pipeline=[], - test_mode=False) - - cfg = dict( - metainfo=dict(from_file='configs/_base_/datasets/coco.py'), - datasets=[coco_cfg, aic_cfg], - pipeline=[]) - cfg.update(kwargs) - return CombinedDataset(**cfg) - - def check_data_info_keys(self, - data_info: dict, - data_mode: str = 'topdown'): - if data_mode == 'topdown': - expected_keys = dict( - img_id=int, - img_path=str, - bbox=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - id=int) - elif data_mode == 'bottomup': - expected_keys = dict( - img_id=int, - img_path=str, - bbox=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - invalid_segs=list, - id=list) - else: - raise ValueError(f'Invalid data_mode {data_mode}') - - for key, type_ in expected_keys.items(): - self.assertIn(key, data_info) - self.assertIsInstance(data_info[key], type_, key) - - def test_get_subset_index(self): - dataset = self.build_combined_dataset() - lens = dataset._lens - - with self.assertRaises(ValueError): - subset_idx, sample_idx = dataset._get_subset_index(sum(lens)) - - index = lens[0] - subset_idx, sample_idx = dataset._get_subset_index(index) - self.assertEqual(subset_idx, 1) - self.assertEqual(sample_idx, 0) - - index = -lens[1] - 1 - subset_idx, sample_idx = dataset._get_subset_index(index) - self.assertEqual(subset_idx, 0) - self.assertEqual(sample_idx, lens[0] - 1) - - def test_prepare_data(self): - dataset = self.build_combined_dataset() - lens = dataset._lens - - data_info = dataset[lens[0]] - self.check_data_info_keys(data_info) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np + +from mmpose.datasets.dataset_wrappers import CombinedDataset + + +class TestCombinedDataset(TestCase): + + def build_combined_dataset(self, **kwargs): + + coco_cfg = dict( + type='CocoDataset', + ann_file='test_coco.json', + bbox_file=None, + data_mode='topdown', + data_root='tests/data/coco', + pipeline=[], + test_mode=False) + + aic_cfg = dict( + type='AicDataset', + ann_file='test_aic.json', + bbox_file=None, + data_mode='topdown', + data_root='tests/data/aic', + pipeline=[], + test_mode=False) + + cfg = dict( + metainfo=dict(from_file='configs/_base_/datasets/coco.py'), + datasets=[coco_cfg, aic_cfg], + pipeline=[]) + cfg.update(kwargs) + return CombinedDataset(**cfg) + + def check_data_info_keys(self, + data_info: dict, + data_mode: str = 'topdown'): + if data_mode == 'topdown': + expected_keys = dict( + img_id=int, + img_path=str, + bbox=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + id=int) + elif data_mode == 'bottomup': + expected_keys = dict( + img_id=int, + img_path=str, + bbox=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + invalid_segs=list, + id=list) + else: + raise ValueError(f'Invalid data_mode {data_mode}') + + for key, type_ in expected_keys.items(): + self.assertIn(key, data_info) + self.assertIsInstance(data_info[key], type_, key) + + def test_get_subset_index(self): + dataset = self.build_combined_dataset() + lens = dataset._lens + + with self.assertRaises(ValueError): + subset_idx, sample_idx = dataset._get_subset_index(sum(lens)) + + index = lens[0] + subset_idx, sample_idx = dataset._get_subset_index(index) + self.assertEqual(subset_idx, 1) + self.assertEqual(sample_idx, 0) + + index = -lens[1] - 1 + subset_idx, sample_idx = dataset._get_subset_index(index) + self.assertEqual(subset_idx, 0) + self.assertEqual(sample_idx, lens[0] - 1) + + def test_prepare_data(self): + dataset = self.build_combined_dataset() + lens = dataset._lens + + data_info = dataset[lens[0]] + self.check_data_info_keys(data_info) diff --git a/tests/test_datasets/test_datasets/test_face_datasets/test_aflw_dataset.py b/tests/test_datasets/test_datasets/test_face_datasets/test_aflw_dataset.py index dfc3a5ccce..d0a2852f44 100644 --- a/tests/test_datasets/test_datasets/test_face_datasets/test_aflw_dataset.py +++ b/tests/test_datasets/test_datasets/test_face_datasets/test_aflw_dataset.py @@ -1,141 +1,141 @@ -# Copyright (c) build_aflw_datasetOpenMMLab. All rights reserved. -from unittest import TestCase - -import numpy as np - -from mmpose.datasets.datasets.face import AFLWDataset - - -class TestAFLWDataset(TestCase): - - def build_aflw_dataset(self, **kwargs): - - cfg = dict( - ann_file='test_aflw.json', - bbox_file=None, - data_mode='topdown', - data_root='tests/data/aflw', - pipeline=[], - test_mode=False) - - cfg.update(kwargs) - return AFLWDataset(**cfg) - - def check_data_info_keys(self, - data_info: dict, - data_mode: str = 'topdown'): - if data_mode == 'topdown': - expected_keys = dict( - img_id=int, - img_path=str, - bbox_center=np.ndarray, - bbox_scale=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - id=int) - elif data_mode == 'bottomup': - expected_keys = dict( - img_id=int, - img_path=str, - bbox_center=np.ndarray, - bbox_scale=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - invalid_segs=list, - id=list) - else: - raise ValueError(f'Invalid data_mode {data_mode}') - - for key, type_ in expected_keys.items(): - self.assertIn(key, data_info) - self.assertIsInstance(data_info[key], type_, key) - - def check_metainfo_keys(self, metainfo: dict): - expected_keys = dict( - dataset_name=str, - num_keypoints=int, - keypoint_id2name=dict, - keypoint_name2id=dict, - upper_body_ids=list, - lower_body_ids=list, - flip_indices=list, - flip_pairs=list, - keypoint_colors=np.ndarray, - num_skeleton_links=int, - skeleton_links=list, - skeleton_link_colors=np.ndarray, - dataset_keypoint_weights=np.ndarray) - - for key, type_ in expected_keys.items(): - self.assertIn(key, metainfo) - self.assertIsInstance(metainfo[key], type_, key) - - def test_metainfo(self): - dataset = self.build_aflw_dataset() - self.check_metainfo_keys(dataset.metainfo) - # test dataset_name - self.assertEqual(dataset.metainfo['dataset_name'], 'aflw') - - # test number of keypoints - num_keypoints = 19 - self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) - self.assertEqual( - len(dataset.metainfo['keypoint_colors']), num_keypoints) - self.assertEqual( - len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) - - def test_topdown(self): - # test topdown training - dataset = self.build_aflw_dataset(data_mode='topdown') - self.assertEqual(dataset.data_mode, 'topdown') - self.assertEqual(dataset.bbox_file, None) - self.assertEqual(len(dataset), 2) - self.check_data_info_keys(dataset[0]) - - # test topdown testing - dataset = self.build_aflw_dataset(data_mode='topdown', test_mode=True) - self.assertEqual(dataset.data_mode, 'topdown') - self.assertEqual(dataset.bbox_file, None) - self.assertEqual(len(dataset), 2) - self.check_data_info_keys(dataset[0]) - - def test_bottomup(self): - # test bottomup training - dataset = self.build_aflw_dataset(data_mode='bottomup') - self.assertEqual(len(dataset), 2) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - # test bottomup testing - dataset = self.build_aflw_dataset(data_mode='bottomup', test_mode=True) - self.assertEqual(len(dataset), 2) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - def test_exceptions_and_warnings(self): - - with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): - _ = self.build_aflw_dataset(data_mode='invalid') - - with self.assertRaisesRegex( - ValueError, - '"bbox_file" is only supported when `test_mode==True`'): - _ = self.build_aflw_dataset( - data_mode='topdown', - test_mode=False, - bbox_file='temp_bbox_file.json') - - with self.assertRaisesRegex( - ValueError, '"bbox_file" is only supported in topdown mode'): - _ = self.build_aflw_dataset( - data_mode='bottomup', - test_mode=True, - bbox_file='temp_bbox_file.json') - - with self.assertRaisesRegex( - ValueError, - '"bbox_score_thr" is only supported in topdown mode'): - _ = self.build_aflw_dataset( - data_mode='bottomup', - test_mode=True, - filter_cfg=dict(bbox_score_thr=0.3)) +# Copyright (c) build_aflw_datasetOpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np + +from mmpose.datasets.datasets.face import AFLWDataset + + +class TestAFLWDataset(TestCase): + + def build_aflw_dataset(self, **kwargs): + + cfg = dict( + ann_file='test_aflw.json', + bbox_file=None, + data_mode='topdown', + data_root='tests/data/aflw', + pipeline=[], + test_mode=False) + + cfg.update(kwargs) + return AFLWDataset(**cfg) + + def check_data_info_keys(self, + data_info: dict, + data_mode: str = 'topdown'): + if data_mode == 'topdown': + expected_keys = dict( + img_id=int, + img_path=str, + bbox_center=np.ndarray, + bbox_scale=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + id=int) + elif data_mode == 'bottomup': + expected_keys = dict( + img_id=int, + img_path=str, + bbox_center=np.ndarray, + bbox_scale=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + invalid_segs=list, + id=list) + else: + raise ValueError(f'Invalid data_mode {data_mode}') + + for key, type_ in expected_keys.items(): + self.assertIn(key, data_info) + self.assertIsInstance(data_info[key], type_, key) + + def check_metainfo_keys(self, metainfo: dict): + expected_keys = dict( + dataset_name=str, + num_keypoints=int, + keypoint_id2name=dict, + keypoint_name2id=dict, + upper_body_ids=list, + lower_body_ids=list, + flip_indices=list, + flip_pairs=list, + keypoint_colors=np.ndarray, + num_skeleton_links=int, + skeleton_links=list, + skeleton_link_colors=np.ndarray, + dataset_keypoint_weights=np.ndarray) + + for key, type_ in expected_keys.items(): + self.assertIn(key, metainfo) + self.assertIsInstance(metainfo[key], type_, key) + + def test_metainfo(self): + dataset = self.build_aflw_dataset() + self.check_metainfo_keys(dataset.metainfo) + # test dataset_name + self.assertEqual(dataset.metainfo['dataset_name'], 'aflw') + + # test number of keypoints + num_keypoints = 19 + self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) + self.assertEqual( + len(dataset.metainfo['keypoint_colors']), num_keypoints) + self.assertEqual( + len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) + + def test_topdown(self): + # test topdown training + dataset = self.build_aflw_dataset(data_mode='topdown') + self.assertEqual(dataset.data_mode, 'topdown') + self.assertEqual(dataset.bbox_file, None) + self.assertEqual(len(dataset), 2) + self.check_data_info_keys(dataset[0]) + + # test topdown testing + dataset = self.build_aflw_dataset(data_mode='topdown', test_mode=True) + self.assertEqual(dataset.data_mode, 'topdown') + self.assertEqual(dataset.bbox_file, None) + self.assertEqual(len(dataset), 2) + self.check_data_info_keys(dataset[0]) + + def test_bottomup(self): + # test bottomup training + dataset = self.build_aflw_dataset(data_mode='bottomup') + self.assertEqual(len(dataset), 2) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + # test bottomup testing + dataset = self.build_aflw_dataset(data_mode='bottomup', test_mode=True) + self.assertEqual(len(dataset), 2) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + def test_exceptions_and_warnings(self): + + with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): + _ = self.build_aflw_dataset(data_mode='invalid') + + with self.assertRaisesRegex( + ValueError, + '"bbox_file" is only supported when `test_mode==True`'): + _ = self.build_aflw_dataset( + data_mode='topdown', + test_mode=False, + bbox_file='temp_bbox_file.json') + + with self.assertRaisesRegex( + ValueError, '"bbox_file" is only supported in topdown mode'): + _ = self.build_aflw_dataset( + data_mode='bottomup', + test_mode=True, + bbox_file='temp_bbox_file.json') + + with self.assertRaisesRegex( + ValueError, + '"bbox_score_thr" is only supported in topdown mode'): + _ = self.build_aflw_dataset( + data_mode='bottomup', + test_mode=True, + filter_cfg=dict(bbox_score_thr=0.3)) diff --git a/tests/test_datasets/test_datasets/test_face_datasets/test_coco_wholebody_face_dataset.py b/tests/test_datasets/test_datasets/test_face_datasets/test_coco_wholebody_face_dataset.py index 7c296c64ad..92a7aa3d13 100644 --- a/tests/test_datasets/test_datasets/test_face_datasets/test_coco_wholebody_face_dataset.py +++ b/tests/test_datasets/test_datasets/test_face_datasets/test_coco_wholebody_face_dataset.py @@ -1,147 +1,147 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import numpy as np - -from mmpose.datasets.datasets.face import CocoWholeBodyFaceDataset - - -class TestCocoWholeBodyFaceDataset(TestCase): - - def build_coco_wholebody_face_dataset(self, **kwargs): - - cfg = dict( - ann_file='test_coco_wholebody.json', - bbox_file=None, - data_mode='topdown', - data_root='tests/data/coco', - pipeline=[], - test_mode=False) - - cfg.update(kwargs) - return CocoWholeBodyFaceDataset(**cfg) - - def check_data_info_keys(self, - data_info: dict, - data_mode: str = 'topdown'): - if data_mode == 'topdown': - expected_keys = dict( - img_id=int, - img_path=str, - bbox=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - id=int) - elif data_mode == 'bottomup': - expected_keys = dict( - img_id=int, - img_path=str, - bbox=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - invalid_segs=list, - id=list) - else: - raise ValueError(f'Invalid data_mode {data_mode}') - - for key, type_ in expected_keys.items(): - self.assertIn(key, data_info) - self.assertIsInstance(data_info[key], type_, key) - - def check_metainfo_keys(self, metainfo: dict): - expected_keys = dict( - dataset_name=str, - num_keypoints=int, - keypoint_id2name=dict, - keypoint_name2id=dict, - upper_body_ids=list, - lower_body_ids=list, - flip_indices=list, - flip_pairs=list, - keypoint_colors=np.ndarray, - num_skeleton_links=int, - skeleton_links=list, - skeleton_link_colors=np.ndarray, - dataset_keypoint_weights=np.ndarray) - - for key, type_ in expected_keys.items(): - self.assertIn(key, metainfo) - self.assertIsInstance(metainfo[key], type_, key) - - def test_metainfo(self): - dataset = self.build_coco_wholebody_face_dataset() - self.check_metainfo_keys(dataset.metainfo) - # test dataset_name - self.assertEqual(dataset.metainfo['dataset_name'], - 'coco_wholebody_face') - - # test number of keypoints - num_keypoints = 68 - self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) - self.assertEqual( - len(dataset.metainfo['keypoint_colors']), num_keypoints) - self.assertEqual( - len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) - # note that len(sigmas) may be zero if dataset.metainfo['sigmas'] = [] - self.assertEqual(len(dataset.metainfo['sigmas']), num_keypoints) - - def test_topdown(self): - # test topdown training - dataset = self.build_coco_wholebody_face_dataset(data_mode='topdown') - self.assertEqual(dataset.data_mode, 'topdown') - self.assertEqual(dataset.bbox_file, None) - # filter invalid insances due to face_valid = false - self.assertEqual(len(dataset), 4) - self.check_data_info_keys(dataset[0]) - - # test topdown testing - dataset = self.build_coco_wholebody_face_dataset( - data_mode='topdown', test_mode=True) - self.assertEqual(dataset.data_mode, 'topdown') - self.assertEqual(dataset.bbox_file, None) - self.assertEqual(len(dataset), 4) - self.check_data_info_keys(dataset[0]) - - def test_bottomup(self): - # test bottomup training - dataset = self.build_coco_wholebody_face_dataset(data_mode='bottomup') - # filter one invalid instance due to face_valid = false - self.assertEqual(len(dataset), 3) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - # test bottomup testing - dataset = self.build_coco_wholebody_face_dataset( - data_mode='bottomup', test_mode=True) - # all images are used for evaluation - self.assertEqual(len(dataset), 4) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - def test_exceptions_and_warnings(self): - - with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): - _ = self.build_coco_wholebody_face_dataset(data_mode='invalid') - - with self.assertRaisesRegex( - ValueError, - '"bbox_file" is only supported when `test_mode==True`'): - _ = self.build_coco_wholebody_face_dataset( - data_mode='topdown', - test_mode=False, - bbox_file='tests/data/coco/test_coco_det_AP_H_56.json') - - with self.assertRaisesRegex( - ValueError, '"bbox_file" is only supported in topdown mode'): - _ = self.build_coco_wholebody_face_dataset( - data_mode='bottomup', - test_mode=True, - bbox_file='tests/data/coco/test_coco_det_AP_H_56.json') - - with self.assertRaisesRegex( - ValueError, - '"bbox_score_thr" is only supported in topdown mode'): - _ = self.build_coco_wholebody_face_dataset( - data_mode='bottomup', - test_mode=True, - filter_cfg=dict(bbox_score_thr=0.3)) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np + +from mmpose.datasets.datasets.face import CocoWholeBodyFaceDataset + + +class TestCocoWholeBodyFaceDataset(TestCase): + + def build_coco_wholebody_face_dataset(self, **kwargs): + + cfg = dict( + ann_file='test_coco_wholebody.json', + bbox_file=None, + data_mode='topdown', + data_root='tests/data/coco', + pipeline=[], + test_mode=False) + + cfg.update(kwargs) + return CocoWholeBodyFaceDataset(**cfg) + + def check_data_info_keys(self, + data_info: dict, + data_mode: str = 'topdown'): + if data_mode == 'topdown': + expected_keys = dict( + img_id=int, + img_path=str, + bbox=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + id=int) + elif data_mode == 'bottomup': + expected_keys = dict( + img_id=int, + img_path=str, + bbox=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + invalid_segs=list, + id=list) + else: + raise ValueError(f'Invalid data_mode {data_mode}') + + for key, type_ in expected_keys.items(): + self.assertIn(key, data_info) + self.assertIsInstance(data_info[key], type_, key) + + def check_metainfo_keys(self, metainfo: dict): + expected_keys = dict( + dataset_name=str, + num_keypoints=int, + keypoint_id2name=dict, + keypoint_name2id=dict, + upper_body_ids=list, + lower_body_ids=list, + flip_indices=list, + flip_pairs=list, + keypoint_colors=np.ndarray, + num_skeleton_links=int, + skeleton_links=list, + skeleton_link_colors=np.ndarray, + dataset_keypoint_weights=np.ndarray) + + for key, type_ in expected_keys.items(): + self.assertIn(key, metainfo) + self.assertIsInstance(metainfo[key], type_, key) + + def test_metainfo(self): + dataset = self.build_coco_wholebody_face_dataset() + self.check_metainfo_keys(dataset.metainfo) + # test dataset_name + self.assertEqual(dataset.metainfo['dataset_name'], + 'coco_wholebody_face') + + # test number of keypoints + num_keypoints = 68 + self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) + self.assertEqual( + len(dataset.metainfo['keypoint_colors']), num_keypoints) + self.assertEqual( + len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) + # note that len(sigmas) may be zero if dataset.metainfo['sigmas'] = [] + self.assertEqual(len(dataset.metainfo['sigmas']), num_keypoints) + + def test_topdown(self): + # test topdown training + dataset = self.build_coco_wholebody_face_dataset(data_mode='topdown') + self.assertEqual(dataset.data_mode, 'topdown') + self.assertEqual(dataset.bbox_file, None) + # filter invalid insances due to face_valid = false + self.assertEqual(len(dataset), 4) + self.check_data_info_keys(dataset[0]) + + # test topdown testing + dataset = self.build_coco_wholebody_face_dataset( + data_mode='topdown', test_mode=True) + self.assertEqual(dataset.data_mode, 'topdown') + self.assertEqual(dataset.bbox_file, None) + self.assertEqual(len(dataset), 4) + self.check_data_info_keys(dataset[0]) + + def test_bottomup(self): + # test bottomup training + dataset = self.build_coco_wholebody_face_dataset(data_mode='bottomup') + # filter one invalid instance due to face_valid = false + self.assertEqual(len(dataset), 3) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + # test bottomup testing + dataset = self.build_coco_wholebody_face_dataset( + data_mode='bottomup', test_mode=True) + # all images are used for evaluation + self.assertEqual(len(dataset), 4) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + def test_exceptions_and_warnings(self): + + with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): + _ = self.build_coco_wholebody_face_dataset(data_mode='invalid') + + with self.assertRaisesRegex( + ValueError, + '"bbox_file" is only supported when `test_mode==True`'): + _ = self.build_coco_wholebody_face_dataset( + data_mode='topdown', + test_mode=False, + bbox_file='tests/data/coco/test_coco_det_AP_H_56.json') + + with self.assertRaisesRegex( + ValueError, '"bbox_file" is only supported in topdown mode'): + _ = self.build_coco_wholebody_face_dataset( + data_mode='bottomup', + test_mode=True, + bbox_file='tests/data/coco/test_coco_det_AP_H_56.json') + + with self.assertRaisesRegex( + ValueError, + '"bbox_score_thr" is only supported in topdown mode'): + _ = self.build_coco_wholebody_face_dataset( + data_mode='bottomup', + test_mode=True, + filter_cfg=dict(bbox_score_thr=0.3)) diff --git a/tests/test_datasets/test_datasets/test_face_datasets/test_cofw_dataset.py b/tests/test_datasets/test_datasets/test_face_datasets/test_cofw_dataset.py index c8801a677a..f426c6c9f0 100644 --- a/tests/test_datasets/test_datasets/test_face_datasets/test_cofw_dataset.py +++ b/tests/test_datasets/test_datasets/test_face_datasets/test_cofw_dataset.py @@ -1,139 +1,139 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import numpy as np - -from mmpose.datasets.datasets.face import COFWDataset - - -class TestCOFWDataset(TestCase): - - def build_cofw_dataset(self, **kwargs): - - cfg = dict( - ann_file='test_cofw.json', - bbox_file=None, - data_mode='topdown', - data_root='tests/data/cofw', - pipeline=[], - test_mode=False) - - cfg.update(kwargs) - return COFWDataset(**cfg) - - def check_data_info_keys(self, - data_info: dict, - data_mode: str = 'topdown'): - if data_mode == 'topdown': - expected_keys = dict( - img_id=int, - img_path=str, - bbox=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - id=int) - elif data_mode == 'bottomup': - expected_keys = dict( - img_id=int, - img_path=str, - bbox=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - invalid_segs=list, - id=list) - else: - raise ValueError(f'Invalid data_mode {data_mode}') - - for key, type_ in expected_keys.items(): - self.assertIn(key, data_info) - self.assertIsInstance(data_info[key], type_, key) - - def check_metainfo_keys(self, metainfo: dict): - expected_keys = dict( - dataset_name=str, - num_keypoints=int, - keypoint_id2name=dict, - keypoint_name2id=dict, - upper_body_ids=list, - lower_body_ids=list, - flip_indices=list, - flip_pairs=list, - keypoint_colors=np.ndarray, - num_skeleton_links=int, - skeleton_links=list, - skeleton_link_colors=np.ndarray, - dataset_keypoint_weights=np.ndarray) - - for key, type_ in expected_keys.items(): - self.assertIn(key, metainfo) - self.assertIsInstance(metainfo[key], type_, key) - - def test_metainfo(self): - dataset = self.build_cofw_dataset() - self.check_metainfo_keys(dataset.metainfo) - # test dataset_name - self.assertEqual(dataset.metainfo['dataset_name'], 'cofw') - - # test number of keypoints - num_keypoints = 29 - self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) - self.assertEqual( - len(dataset.metainfo['keypoint_colors']), num_keypoints) - self.assertEqual( - len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) - - def test_topdown(self): - # test topdown training - dataset = self.build_cofw_dataset(data_mode='topdown') - self.assertEqual(dataset.data_mode, 'topdown') - self.assertEqual(dataset.bbox_file, None) - self.assertEqual(len(dataset), 2) - self.check_data_info_keys(dataset[0]) - - # test topdown testing - dataset = self.build_cofw_dataset(data_mode='topdown', test_mode=True) - self.assertEqual(dataset.data_mode, 'topdown') - self.assertEqual(dataset.bbox_file, None) - self.assertEqual(len(dataset), 2) - self.check_data_info_keys(dataset[0]) - - def test_bottomup(self): - # test bottomup training - dataset = self.build_cofw_dataset(data_mode='bottomup') - self.assertEqual(len(dataset), 2) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - # test bottomup testing - dataset = self.build_cofw_dataset(data_mode='bottomup', test_mode=True) - self.assertEqual(len(dataset), 2) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - def test_exceptions_and_warnings(self): - - with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): - _ = self.build_cofw_dataset(data_mode='invalid') - - with self.assertRaisesRegex( - ValueError, - '"bbox_file" is only supported when `test_mode==True`'): - _ = self.build_cofw_dataset( - data_mode='topdown', - test_mode=False, - bbox_file='temp_bbox_file.json') - - with self.assertRaisesRegex( - ValueError, '"bbox_file" is only supported in topdown mode'): - _ = self.build_cofw_dataset( - data_mode='bottomup', - test_mode=True, - bbox_file='temp_bbox_file.json') - - with self.assertRaisesRegex( - ValueError, - '"bbox_score_thr" is only supported in topdown mode'): - _ = self.build_cofw_dataset( - data_mode='bottomup', - test_mode=True, - filter_cfg=dict(bbox_score_thr=0.3)) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np + +from mmpose.datasets.datasets.face import COFWDataset + + +class TestCOFWDataset(TestCase): + + def build_cofw_dataset(self, **kwargs): + + cfg = dict( + ann_file='test_cofw.json', + bbox_file=None, + data_mode='topdown', + data_root='tests/data/cofw', + pipeline=[], + test_mode=False) + + cfg.update(kwargs) + return COFWDataset(**cfg) + + def check_data_info_keys(self, + data_info: dict, + data_mode: str = 'topdown'): + if data_mode == 'topdown': + expected_keys = dict( + img_id=int, + img_path=str, + bbox=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + id=int) + elif data_mode == 'bottomup': + expected_keys = dict( + img_id=int, + img_path=str, + bbox=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + invalid_segs=list, + id=list) + else: + raise ValueError(f'Invalid data_mode {data_mode}') + + for key, type_ in expected_keys.items(): + self.assertIn(key, data_info) + self.assertIsInstance(data_info[key], type_, key) + + def check_metainfo_keys(self, metainfo: dict): + expected_keys = dict( + dataset_name=str, + num_keypoints=int, + keypoint_id2name=dict, + keypoint_name2id=dict, + upper_body_ids=list, + lower_body_ids=list, + flip_indices=list, + flip_pairs=list, + keypoint_colors=np.ndarray, + num_skeleton_links=int, + skeleton_links=list, + skeleton_link_colors=np.ndarray, + dataset_keypoint_weights=np.ndarray) + + for key, type_ in expected_keys.items(): + self.assertIn(key, metainfo) + self.assertIsInstance(metainfo[key], type_, key) + + def test_metainfo(self): + dataset = self.build_cofw_dataset() + self.check_metainfo_keys(dataset.metainfo) + # test dataset_name + self.assertEqual(dataset.metainfo['dataset_name'], 'cofw') + + # test number of keypoints + num_keypoints = 29 + self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) + self.assertEqual( + len(dataset.metainfo['keypoint_colors']), num_keypoints) + self.assertEqual( + len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) + + def test_topdown(self): + # test topdown training + dataset = self.build_cofw_dataset(data_mode='topdown') + self.assertEqual(dataset.data_mode, 'topdown') + self.assertEqual(dataset.bbox_file, None) + self.assertEqual(len(dataset), 2) + self.check_data_info_keys(dataset[0]) + + # test topdown testing + dataset = self.build_cofw_dataset(data_mode='topdown', test_mode=True) + self.assertEqual(dataset.data_mode, 'topdown') + self.assertEqual(dataset.bbox_file, None) + self.assertEqual(len(dataset), 2) + self.check_data_info_keys(dataset[0]) + + def test_bottomup(self): + # test bottomup training + dataset = self.build_cofw_dataset(data_mode='bottomup') + self.assertEqual(len(dataset), 2) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + # test bottomup testing + dataset = self.build_cofw_dataset(data_mode='bottomup', test_mode=True) + self.assertEqual(len(dataset), 2) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + def test_exceptions_and_warnings(self): + + with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): + _ = self.build_cofw_dataset(data_mode='invalid') + + with self.assertRaisesRegex( + ValueError, + '"bbox_file" is only supported when `test_mode==True`'): + _ = self.build_cofw_dataset( + data_mode='topdown', + test_mode=False, + bbox_file='temp_bbox_file.json') + + with self.assertRaisesRegex( + ValueError, '"bbox_file" is only supported in topdown mode'): + _ = self.build_cofw_dataset( + data_mode='bottomup', + test_mode=True, + bbox_file='temp_bbox_file.json') + + with self.assertRaisesRegex( + ValueError, + '"bbox_score_thr" is only supported in topdown mode'): + _ = self.build_cofw_dataset( + data_mode='bottomup', + test_mode=True, + filter_cfg=dict(bbox_score_thr=0.3)) diff --git a/tests/test_datasets/test_datasets/test_face_datasets/test_face_300w_dataset.py b/tests/test_datasets/test_datasets/test_face_datasets/test_face_300w_dataset.py index c45330536f..b03729eb3b 100644 --- a/tests/test_datasets/test_datasets/test_face_datasets/test_face_300w_dataset.py +++ b/tests/test_datasets/test_datasets/test_face_datasets/test_face_300w_dataset.py @@ -1,143 +1,143 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import numpy as np - -from mmpose.datasets.datasets.face import Face300WDataset - - -class TestFace300WDataset(TestCase): - - def build_face_300w_dataset(self, **kwargs): - - cfg = dict( - ann_file='test_300w.json', - bbox_file=None, - data_mode='topdown', - data_root='tests/data/300w', - pipeline=[], - test_mode=False) - - cfg.update(kwargs) - return Face300WDataset(**cfg) - - def check_data_info_keys(self, - data_info: dict, - data_mode: str = 'topdown'): - if data_mode == 'topdown': - expected_keys = dict( - img_id=int, - img_path=str, - bbox_center=np.ndarray, - bbox_scale=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - id=int) - elif data_mode == 'bottomup': - expected_keys = dict( - img_id=int, - img_path=str, - bbox_center=np.ndarray, - bbox_scale=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - invalid_segs=list, - id=list) - else: - raise ValueError(f'Invalid data_mode {data_mode}') - - for key, type_ in expected_keys.items(): - self.assertIn(key, data_info) - self.assertIsInstance(data_info[key], type_, key) - - def check_metainfo_keys(self, metainfo: dict): - expected_keys = dict( - dataset_name=str, - num_keypoints=int, - keypoint_id2name=dict, - keypoint_name2id=dict, - upper_body_ids=list, - lower_body_ids=list, - flip_indices=list, - flip_pairs=list, - keypoint_colors=np.ndarray, - num_skeleton_links=int, - skeleton_links=list, - skeleton_link_colors=np.ndarray, - dataset_keypoint_weights=np.ndarray) - - for key, type_ in expected_keys.items(): - self.assertIn(key, metainfo) - self.assertIsInstance(metainfo[key], type_, key) - - def test_metainfo(self): - dataset = self.build_face_300w_dataset() - self.check_metainfo_keys(dataset.metainfo) - # test dataset_name - self.assertEqual(dataset.metainfo['dataset_name'], '300w') - - # test number of keypoints - num_keypoints = 68 - self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) - self.assertEqual( - len(dataset.metainfo['keypoint_colors']), num_keypoints) - self.assertEqual( - len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) - - def test_topdown(self): - # test topdown training - dataset = self.build_face_300w_dataset(data_mode='topdown') - self.assertEqual(dataset.data_mode, 'topdown') - self.assertEqual(dataset.bbox_file, None) - self.assertEqual(len(dataset), 2) - self.check_data_info_keys(dataset[0]) - - # test topdown testing - dataset = self.build_face_300w_dataset( - data_mode='topdown', test_mode=True) - self.assertEqual(dataset.data_mode, 'topdown') - self.assertEqual(dataset.bbox_file, None) - self.assertEqual(len(dataset), 2) - self.check_data_info_keys(dataset[0]) - - def test_bottomup(self): - # test bottomup training - dataset = self.build_face_300w_dataset(data_mode='bottomup') - self.assertEqual(len(dataset), 2) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - # test bottomup testing - dataset = self.build_face_300w_dataset( - data_mode='bottomup', test_mode=True) - self.assertEqual(len(dataset), 2) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - def test_exceptions_and_warnings(self): - - with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): - _ = self.build_face_300w_dataset(data_mode='invalid') - - with self.assertRaisesRegex( - ValueError, - '"bbox_file" is only supported when `test_mode==True`'): - _ = self.build_face_300w_dataset( - data_mode='topdown', - test_mode=False, - bbox_file='temp_bbox_file.json') - - with self.assertRaisesRegex( - ValueError, '"bbox_file" is only supported in topdown mode'): - _ = self.build_face_300w_dataset( - data_mode='bottomup', - test_mode=True, - bbox_file='temp_bbox_file.json') - - with self.assertRaisesRegex( - ValueError, - '"bbox_score_thr" is only supported in topdown mode'): - _ = self.build_face_300w_dataset( - data_mode='bottomup', - test_mode=True, - filter_cfg=dict(bbox_score_thr=0.3)) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np + +from mmpose.datasets.datasets.face import Face300WDataset + + +class TestFace300WDataset(TestCase): + + def build_face_300w_dataset(self, **kwargs): + + cfg = dict( + ann_file='test_300w.json', + bbox_file=None, + data_mode='topdown', + data_root='tests/data/300w', + pipeline=[], + test_mode=False) + + cfg.update(kwargs) + return Face300WDataset(**cfg) + + def check_data_info_keys(self, + data_info: dict, + data_mode: str = 'topdown'): + if data_mode == 'topdown': + expected_keys = dict( + img_id=int, + img_path=str, + bbox_center=np.ndarray, + bbox_scale=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + id=int) + elif data_mode == 'bottomup': + expected_keys = dict( + img_id=int, + img_path=str, + bbox_center=np.ndarray, + bbox_scale=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + invalid_segs=list, + id=list) + else: + raise ValueError(f'Invalid data_mode {data_mode}') + + for key, type_ in expected_keys.items(): + self.assertIn(key, data_info) + self.assertIsInstance(data_info[key], type_, key) + + def check_metainfo_keys(self, metainfo: dict): + expected_keys = dict( + dataset_name=str, + num_keypoints=int, + keypoint_id2name=dict, + keypoint_name2id=dict, + upper_body_ids=list, + lower_body_ids=list, + flip_indices=list, + flip_pairs=list, + keypoint_colors=np.ndarray, + num_skeleton_links=int, + skeleton_links=list, + skeleton_link_colors=np.ndarray, + dataset_keypoint_weights=np.ndarray) + + for key, type_ in expected_keys.items(): + self.assertIn(key, metainfo) + self.assertIsInstance(metainfo[key], type_, key) + + def test_metainfo(self): + dataset = self.build_face_300w_dataset() + self.check_metainfo_keys(dataset.metainfo) + # test dataset_name + self.assertEqual(dataset.metainfo['dataset_name'], '300w') + + # test number of keypoints + num_keypoints = 68 + self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) + self.assertEqual( + len(dataset.metainfo['keypoint_colors']), num_keypoints) + self.assertEqual( + len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) + + def test_topdown(self): + # test topdown training + dataset = self.build_face_300w_dataset(data_mode='topdown') + self.assertEqual(dataset.data_mode, 'topdown') + self.assertEqual(dataset.bbox_file, None) + self.assertEqual(len(dataset), 2) + self.check_data_info_keys(dataset[0]) + + # test topdown testing + dataset = self.build_face_300w_dataset( + data_mode='topdown', test_mode=True) + self.assertEqual(dataset.data_mode, 'topdown') + self.assertEqual(dataset.bbox_file, None) + self.assertEqual(len(dataset), 2) + self.check_data_info_keys(dataset[0]) + + def test_bottomup(self): + # test bottomup training + dataset = self.build_face_300w_dataset(data_mode='bottomup') + self.assertEqual(len(dataset), 2) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + # test bottomup testing + dataset = self.build_face_300w_dataset( + data_mode='bottomup', test_mode=True) + self.assertEqual(len(dataset), 2) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + def test_exceptions_and_warnings(self): + + with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): + _ = self.build_face_300w_dataset(data_mode='invalid') + + with self.assertRaisesRegex( + ValueError, + '"bbox_file" is only supported when `test_mode==True`'): + _ = self.build_face_300w_dataset( + data_mode='topdown', + test_mode=False, + bbox_file='temp_bbox_file.json') + + with self.assertRaisesRegex( + ValueError, '"bbox_file" is only supported in topdown mode'): + _ = self.build_face_300w_dataset( + data_mode='bottomup', + test_mode=True, + bbox_file='temp_bbox_file.json') + + with self.assertRaisesRegex( + ValueError, + '"bbox_score_thr" is only supported in topdown mode'): + _ = self.build_face_300w_dataset( + data_mode='bottomup', + test_mode=True, + filter_cfg=dict(bbox_score_thr=0.3)) diff --git a/tests/test_datasets/test_datasets/test_face_datasets/test_lapa_dataset.py b/tests/test_datasets/test_datasets/test_face_datasets/test_lapa_dataset.py index 991f285476..2cd527ec0e 100644 --- a/tests/test_datasets/test_datasets/test_face_datasets/test_lapa_dataset.py +++ b/tests/test_datasets/test_datasets/test_face_datasets/test_lapa_dataset.py @@ -1,93 +1,93 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import numpy as np - -from mmpose.datasets.datasets.face import LapaDataset - - -class TestLaPaDataset(TestCase): - - def build_lapa_dataset(self, **kwargs): - - cfg = dict( - ann_file='test_lapa.json', - bbox_file=None, - data_mode='topdown', - data_root='tests/data/lapa', - pipeline=[], - test_mode=False) - - cfg.update(kwargs) - return LapaDataset(**cfg) - - def check_data_info_keys(self, - data_info: dict, - data_mode: str = 'topdown'): - if data_mode == 'topdown': - expected_keys = dict( - img_id=int, - img_path=str, - bbox=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - id=int) - else: - raise ValueError(f'Invalid data_mode {data_mode}') - - for key, type_ in expected_keys.items(): - self.assertIn(key, data_info) - self.assertIsInstance(data_info[key], type_, key) - - def check_metainfo_keys(self, metainfo: dict): - expected_keys = dict( - dataset_name=str, - num_keypoints=int, - keypoint_id2name=dict, - keypoint_name2id=dict, - upper_body_ids=list, - lower_body_ids=list, - flip_indices=list, - flip_pairs=list, - keypoint_colors=np.ndarray, - num_skeleton_links=int, - skeleton_links=list, - skeleton_link_colors=np.ndarray, - dataset_keypoint_weights=np.ndarray) - - for key, type_ in expected_keys.items(): - self.assertIn(key, metainfo) - self.assertIsInstance(metainfo[key], type_, key) - - def test_metainfo(self): - dataset = self.build_lapa_dataset() - self.check_metainfo_keys(dataset.metainfo) - # test dataset_name - self.assertEqual(dataset.metainfo['dataset_name'], 'lapa') - - # test number of keypoints - num_keypoints = 106 - self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) - self.assertEqual( - len(dataset.metainfo['keypoint_colors']), num_keypoints) - self.assertEqual( - len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) - # note that len(sigmas) may be zero if dataset.metainfo['sigmas'] = [] - self.assertEqual(len(dataset.metainfo['sigmas']), 0) - - def test_topdown(self): - # test topdown training - dataset = self.build_lapa_dataset(data_mode='topdown') - self.assertEqual(dataset.data_mode, 'topdown') - self.assertEqual(dataset.bbox_file, None) - # filter invalid insances due to face_valid = false - self.assertEqual(len(dataset), 2) - self.check_data_info_keys(dataset[0]) - - # test topdown testing - dataset = self.build_lapa_dataset(data_mode='topdown', test_mode=True) - self.assertEqual(dataset.data_mode, 'topdown') - self.assertEqual(dataset.bbox_file, None) - self.assertEqual(len(dataset), 2) - self.check_data_info_keys(dataset[0]) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np + +from mmpose.datasets.datasets.face import LapaDataset + + +class TestLaPaDataset(TestCase): + + def build_lapa_dataset(self, **kwargs): + + cfg = dict( + ann_file='test_lapa.json', + bbox_file=None, + data_mode='topdown', + data_root='tests/data/lapa', + pipeline=[], + test_mode=False) + + cfg.update(kwargs) + return LapaDataset(**cfg) + + def check_data_info_keys(self, + data_info: dict, + data_mode: str = 'topdown'): + if data_mode == 'topdown': + expected_keys = dict( + img_id=int, + img_path=str, + bbox=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + id=int) + else: + raise ValueError(f'Invalid data_mode {data_mode}') + + for key, type_ in expected_keys.items(): + self.assertIn(key, data_info) + self.assertIsInstance(data_info[key], type_, key) + + def check_metainfo_keys(self, metainfo: dict): + expected_keys = dict( + dataset_name=str, + num_keypoints=int, + keypoint_id2name=dict, + keypoint_name2id=dict, + upper_body_ids=list, + lower_body_ids=list, + flip_indices=list, + flip_pairs=list, + keypoint_colors=np.ndarray, + num_skeleton_links=int, + skeleton_links=list, + skeleton_link_colors=np.ndarray, + dataset_keypoint_weights=np.ndarray) + + for key, type_ in expected_keys.items(): + self.assertIn(key, metainfo) + self.assertIsInstance(metainfo[key], type_, key) + + def test_metainfo(self): + dataset = self.build_lapa_dataset() + self.check_metainfo_keys(dataset.metainfo) + # test dataset_name + self.assertEqual(dataset.metainfo['dataset_name'], 'lapa') + + # test number of keypoints + num_keypoints = 106 + self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) + self.assertEqual( + len(dataset.metainfo['keypoint_colors']), num_keypoints) + self.assertEqual( + len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) + # note that len(sigmas) may be zero if dataset.metainfo['sigmas'] = [] + self.assertEqual(len(dataset.metainfo['sigmas']), 0) + + def test_topdown(self): + # test topdown training + dataset = self.build_lapa_dataset(data_mode='topdown') + self.assertEqual(dataset.data_mode, 'topdown') + self.assertEqual(dataset.bbox_file, None) + # filter invalid insances due to face_valid = false + self.assertEqual(len(dataset), 2) + self.check_data_info_keys(dataset[0]) + + # test topdown testing + dataset = self.build_lapa_dataset(data_mode='topdown', test_mode=True) + self.assertEqual(dataset.data_mode, 'topdown') + self.assertEqual(dataset.bbox_file, None) + self.assertEqual(len(dataset), 2) + self.check_data_info_keys(dataset[0]) diff --git a/tests/test_datasets/test_datasets/test_face_datasets/test_wflw_dataset.py b/tests/test_datasets/test_datasets/test_face_datasets/test_wflw_dataset.py index aab0fd1813..7f1d81700e 100644 --- a/tests/test_datasets/test_datasets/test_face_datasets/test_wflw_dataset.py +++ b/tests/test_datasets/test_datasets/test_face_datasets/test_wflw_dataset.py @@ -1,141 +1,141 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import numpy as np - -from mmpose.datasets.datasets.face import WFLWDataset - - -class TestWFLWDataset(TestCase): - - def build_wflw_dataset(self, **kwargs): - - cfg = dict( - ann_file='test_wflw.json', - bbox_file=None, - data_mode='topdown', - data_root='tests/data/wflw', - pipeline=[], - test_mode=False) - - cfg.update(kwargs) - return WFLWDataset(**cfg) - - def check_data_info_keys(self, - data_info: dict, - data_mode: str = 'topdown'): - if data_mode == 'topdown': - expected_keys = dict( - img_id=int, - img_path=str, - bbox_center=np.ndarray, - bbox_scale=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - id=int) - elif data_mode == 'bottomup': - expected_keys = dict( - img_id=int, - img_path=str, - bbox_center=np.ndarray, - bbox_scale=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - invalid_segs=list, - id=list) - else: - raise ValueError(f'Invalid data_mode {data_mode}') - - for key, type_ in expected_keys.items(): - self.assertIn(key, data_info) - self.assertIsInstance(data_info[key], type_, key) - - def check_metainfo_keys(self, metainfo: dict): - expected_keys = dict( - dataset_name=str, - num_keypoints=int, - keypoint_id2name=dict, - keypoint_name2id=dict, - upper_body_ids=list, - lower_body_ids=list, - flip_indices=list, - flip_pairs=list, - keypoint_colors=np.ndarray, - num_skeleton_links=int, - skeleton_links=list, - skeleton_link_colors=np.ndarray, - dataset_keypoint_weights=np.ndarray) - - for key, type_ in expected_keys.items(): - self.assertIn(key, metainfo) - self.assertIsInstance(metainfo[key], type_, key) - - def test_metainfo(self): - dataset = self.build_wflw_dataset() - self.check_metainfo_keys(dataset.metainfo) - # test dataset_name - self.assertEqual(dataset.metainfo['dataset_name'], 'wflw') - - # test number of keypoints - num_keypoints = 98 - self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) - self.assertEqual( - len(dataset.metainfo['keypoint_colors']), num_keypoints) - self.assertEqual( - len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) - - def test_topdown(self): - # test topdown training - dataset = self.build_wflw_dataset(data_mode='topdown') - self.assertEqual(dataset.data_mode, 'topdown') - self.assertEqual(dataset.bbox_file, None) - self.assertEqual(len(dataset), 4) - self.check_data_info_keys(dataset[0]) - - # test topdown testing - dataset = self.build_wflw_dataset(data_mode='topdown', test_mode=True) - self.assertEqual(dataset.data_mode, 'topdown') - self.assertEqual(dataset.bbox_file, None) - self.assertEqual(len(dataset), 4) - self.check_data_info_keys(dataset[0]) - - def test_bottomup(self): - # test bottomup training - dataset = self.build_wflw_dataset(data_mode='bottomup') - self.assertEqual(len(dataset), 2) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - # test bottomup testing - dataset = self.build_wflw_dataset(data_mode='bottomup', test_mode=True) - self.assertEqual(len(dataset), 2) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - def test_exceptions_and_warnings(self): - - with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): - _ = self.build_wflw_dataset(data_mode='invalid') - - with self.assertRaisesRegex( - ValueError, - '"bbox_file" is only supported when `test_mode==True`'): - _ = self.build_wflw_dataset( - data_mode='topdown', - test_mode=False, - bbox_file='temp_bbox_file.json') - - with self.assertRaisesRegex( - ValueError, '"bbox_file" is only supported in topdown mode'): - _ = self.build_wflw_dataset( - data_mode='bottomup', - test_mode=True, - bbox_file='temp_bbox_file.json') - - with self.assertRaisesRegex( - ValueError, - '"bbox_score_thr" is only supported in topdown mode'): - _ = self.build_wflw_dataset( - data_mode='bottomup', - test_mode=True, - filter_cfg=dict(bbox_score_thr=0.3)) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np + +from mmpose.datasets.datasets.face import WFLWDataset + + +class TestWFLWDataset(TestCase): + + def build_wflw_dataset(self, **kwargs): + + cfg = dict( + ann_file='test_wflw.json', + bbox_file=None, + data_mode='topdown', + data_root='tests/data/wflw', + pipeline=[], + test_mode=False) + + cfg.update(kwargs) + return WFLWDataset(**cfg) + + def check_data_info_keys(self, + data_info: dict, + data_mode: str = 'topdown'): + if data_mode == 'topdown': + expected_keys = dict( + img_id=int, + img_path=str, + bbox_center=np.ndarray, + bbox_scale=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + id=int) + elif data_mode == 'bottomup': + expected_keys = dict( + img_id=int, + img_path=str, + bbox_center=np.ndarray, + bbox_scale=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + invalid_segs=list, + id=list) + else: + raise ValueError(f'Invalid data_mode {data_mode}') + + for key, type_ in expected_keys.items(): + self.assertIn(key, data_info) + self.assertIsInstance(data_info[key], type_, key) + + def check_metainfo_keys(self, metainfo: dict): + expected_keys = dict( + dataset_name=str, + num_keypoints=int, + keypoint_id2name=dict, + keypoint_name2id=dict, + upper_body_ids=list, + lower_body_ids=list, + flip_indices=list, + flip_pairs=list, + keypoint_colors=np.ndarray, + num_skeleton_links=int, + skeleton_links=list, + skeleton_link_colors=np.ndarray, + dataset_keypoint_weights=np.ndarray) + + for key, type_ in expected_keys.items(): + self.assertIn(key, metainfo) + self.assertIsInstance(metainfo[key], type_, key) + + def test_metainfo(self): + dataset = self.build_wflw_dataset() + self.check_metainfo_keys(dataset.metainfo) + # test dataset_name + self.assertEqual(dataset.metainfo['dataset_name'], 'wflw') + + # test number of keypoints + num_keypoints = 98 + self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) + self.assertEqual( + len(dataset.metainfo['keypoint_colors']), num_keypoints) + self.assertEqual( + len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) + + def test_topdown(self): + # test topdown training + dataset = self.build_wflw_dataset(data_mode='topdown') + self.assertEqual(dataset.data_mode, 'topdown') + self.assertEqual(dataset.bbox_file, None) + self.assertEqual(len(dataset), 4) + self.check_data_info_keys(dataset[0]) + + # test topdown testing + dataset = self.build_wflw_dataset(data_mode='topdown', test_mode=True) + self.assertEqual(dataset.data_mode, 'topdown') + self.assertEqual(dataset.bbox_file, None) + self.assertEqual(len(dataset), 4) + self.check_data_info_keys(dataset[0]) + + def test_bottomup(self): + # test bottomup training + dataset = self.build_wflw_dataset(data_mode='bottomup') + self.assertEqual(len(dataset), 2) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + # test bottomup testing + dataset = self.build_wflw_dataset(data_mode='bottomup', test_mode=True) + self.assertEqual(len(dataset), 2) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + def test_exceptions_and_warnings(self): + + with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): + _ = self.build_wflw_dataset(data_mode='invalid') + + with self.assertRaisesRegex( + ValueError, + '"bbox_file" is only supported when `test_mode==True`'): + _ = self.build_wflw_dataset( + data_mode='topdown', + test_mode=False, + bbox_file='temp_bbox_file.json') + + with self.assertRaisesRegex( + ValueError, '"bbox_file" is only supported in topdown mode'): + _ = self.build_wflw_dataset( + data_mode='bottomup', + test_mode=True, + bbox_file='temp_bbox_file.json') + + with self.assertRaisesRegex( + ValueError, + '"bbox_score_thr" is only supported in topdown mode'): + _ = self.build_wflw_dataset( + data_mode='bottomup', + test_mode=True, + filter_cfg=dict(bbox_score_thr=0.3)) diff --git a/tests/test_datasets/test_datasets/test_fashion_datasets/test_deepfashion_dataset.py b/tests/test_datasets/test_datasets/test_fashion_datasets/test_deepfashion_dataset.py index 2140a23467..b5e9c96fed 100644 --- a/tests/test_datasets/test_datasets/test_fashion_datasets/test_deepfashion_dataset.py +++ b/tests/test_datasets/test_datasets/test_fashion_datasets/test_deepfashion_dataset.py @@ -1,171 +1,171 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import numpy as np - -from mmpose.datasets.datasets.fashion import DeepFashionDataset - - -class TestDeepFashionDataset(TestCase): - - def build_deepfashion_dataset(self, **kwargs): - - cfg = dict( - ann_file='test_fld.json', - subset='', - bbox_file=None, - data_mode='topdown', - data_root='tests/data/fld', - pipeline=[], - test_mode=False) - - cfg.update(kwargs) - return DeepFashionDataset(**cfg) - - def check_data_info_keys(self, - data_info: dict, - data_mode: str = 'topdown'): - if data_mode == 'topdown': - expected_keys = dict( - img_id=int, - img_path=str, - bbox=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - id=int) - elif data_mode == 'bottomup': - expected_keys = dict( - img_id=int, - img_path=str, - bbox=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - invalid_segs=list, - id=list) - else: - raise ValueError(f'Invalid data_mode {data_mode}') - for key, type_ in expected_keys.items(): - self.assertIn(key, data_info) - self.assertIsInstance(data_info[key], type_, key) - - def check_metainfo_keys(self, metainfo: dict): - expected_keys = dict( - dataset_name=str, - num_keypoints=int, - keypoint_id2name=dict, - keypoint_name2id=dict, - upper_body_ids=list, - lower_body_ids=list, - flip_indices=list, - flip_pairs=list, - keypoint_colors=np.ndarray, - num_skeleton_links=int, - skeleton_links=list, - skeleton_link_colors=np.ndarray, - dataset_keypoint_weights=np.ndarray) - - for key, type_ in expected_keys.items(): - self.assertIn(key, metainfo) - self.assertIsInstance(metainfo[key], type_, key) - - def test_metainfo(self): - # test subset = 'full' - dataset = self.build_deepfashion_dataset() - self.check_metainfo_keys(dataset.metainfo) - # test dataset_name - self.assertEqual(dataset.metainfo['dataset_name'], 'deepfashion_full') - - # test number of keypoints - num_keypoints = 8 - self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) - self.assertEqual( - len(dataset.metainfo['keypoint_colors']), num_keypoints) - self.assertEqual( - len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) - - # test subset = 'upper' - dataset = self.build_deepfashion_dataset(subset='upper') - self.check_metainfo_keys(dataset.metainfo) - # test dataset_name - self.assertEqual(dataset.metainfo['dataset_name'], 'deepfashion_upper') - # test number of keypoints - num_keypoints = 6 - self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) - self.assertEqual( - len(dataset.metainfo['keypoint_colors']), num_keypoints) - self.assertEqual( - len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) - - # test subset = 'upper' - dataset = self.build_deepfashion_dataset(subset='lower') - self.check_metainfo_keys(dataset.metainfo) - # test dataset_name - self.assertEqual(dataset.metainfo['dataset_name'], 'deepfashion_lower') - # test number of keypoints - num_keypoints = 4 - self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) - self.assertEqual( - len(dataset.metainfo['keypoint_colors']), num_keypoints) - self.assertEqual( - len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) - - def test_topdown(self): - # test subset = 'full' topdown training - dataset = self.build_deepfashion_dataset(data_mode='topdown') - self.assertEqual(dataset.data_mode, 'topdown') - self.assertEqual(dataset.bbox_file, None) - self.assertEqual(len(dataset), 2) - self.check_data_info_keys(dataset[0]) - - # test subset = 'full' topdown testing - dataset = self.build_deepfashion_dataset( - data_mode='topdown', test_mode=True) - self.assertEqual(dataset.data_mode, 'topdown') - self.assertEqual(dataset.bbox_file, None) - self.assertEqual(len(dataset), 2) - self.check_data_info_keys(dataset[0]) - - def test_bottomup(self): - # test subset = 'full' bottomup training - dataset = self.build_deepfashion_dataset(data_mode='bottomup') - self.assertEqual(len(dataset), 2) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - # test subset = 'full' bottomup testing - dataset = self.build_deepfashion_dataset( - data_mode='bottomup', test_mode=True) - self.assertEqual(len(dataset), 2) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - def test_exceptions_and_warnings(self): - - with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): - _ = self.build_deepfashion_dataset(data_mode='invalid') - - with self.assertRaisesRegex(ValueError, 'got invalid subset'): - _ = self.build_deepfashion_dataset(subset='invalid') - - with self.assertRaisesRegex( - ValueError, - '"bbox_file" is only supported when `test_mode==True`'): - _ = self.build_deepfashion_dataset( - data_mode='topdown', - test_mode=False, - bbox_file='temp_bbox_file.json') - - with self.assertRaisesRegex( - ValueError, '"bbox_file" is only supported in topdown mode'): - _ = self.build_deepfashion_dataset( - data_mode='bottomup', - test_mode=True, - bbox_file='temp_bbox_file.json') - - with self.assertRaisesRegex( - ValueError, - '"bbox_score_thr" is only supported in topdown mode'): - _ = self.build_deepfashion_dataset( - data_mode='bottomup', - test_mode=True, - filter_cfg=dict(bbox_score_thr=0.3)) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np + +from mmpose.datasets.datasets.fashion import DeepFashionDataset + + +class TestDeepFashionDataset(TestCase): + + def build_deepfashion_dataset(self, **kwargs): + + cfg = dict( + ann_file='test_fld.json', + subset='', + bbox_file=None, + data_mode='topdown', + data_root='tests/data/fld', + pipeline=[], + test_mode=False) + + cfg.update(kwargs) + return DeepFashionDataset(**cfg) + + def check_data_info_keys(self, + data_info: dict, + data_mode: str = 'topdown'): + if data_mode == 'topdown': + expected_keys = dict( + img_id=int, + img_path=str, + bbox=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + id=int) + elif data_mode == 'bottomup': + expected_keys = dict( + img_id=int, + img_path=str, + bbox=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + invalid_segs=list, + id=list) + else: + raise ValueError(f'Invalid data_mode {data_mode}') + for key, type_ in expected_keys.items(): + self.assertIn(key, data_info) + self.assertIsInstance(data_info[key], type_, key) + + def check_metainfo_keys(self, metainfo: dict): + expected_keys = dict( + dataset_name=str, + num_keypoints=int, + keypoint_id2name=dict, + keypoint_name2id=dict, + upper_body_ids=list, + lower_body_ids=list, + flip_indices=list, + flip_pairs=list, + keypoint_colors=np.ndarray, + num_skeleton_links=int, + skeleton_links=list, + skeleton_link_colors=np.ndarray, + dataset_keypoint_weights=np.ndarray) + + for key, type_ in expected_keys.items(): + self.assertIn(key, metainfo) + self.assertIsInstance(metainfo[key], type_, key) + + def test_metainfo(self): + # test subset = 'full' + dataset = self.build_deepfashion_dataset() + self.check_metainfo_keys(dataset.metainfo) + # test dataset_name + self.assertEqual(dataset.metainfo['dataset_name'], 'deepfashion_full') + + # test number of keypoints + num_keypoints = 8 + self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) + self.assertEqual( + len(dataset.metainfo['keypoint_colors']), num_keypoints) + self.assertEqual( + len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) + + # test subset = 'upper' + dataset = self.build_deepfashion_dataset(subset='upper') + self.check_metainfo_keys(dataset.metainfo) + # test dataset_name + self.assertEqual(dataset.metainfo['dataset_name'], 'deepfashion_upper') + # test number of keypoints + num_keypoints = 6 + self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) + self.assertEqual( + len(dataset.metainfo['keypoint_colors']), num_keypoints) + self.assertEqual( + len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) + + # test subset = 'upper' + dataset = self.build_deepfashion_dataset(subset='lower') + self.check_metainfo_keys(dataset.metainfo) + # test dataset_name + self.assertEqual(dataset.metainfo['dataset_name'], 'deepfashion_lower') + # test number of keypoints + num_keypoints = 4 + self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) + self.assertEqual( + len(dataset.metainfo['keypoint_colors']), num_keypoints) + self.assertEqual( + len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) + + def test_topdown(self): + # test subset = 'full' topdown training + dataset = self.build_deepfashion_dataset(data_mode='topdown') + self.assertEqual(dataset.data_mode, 'topdown') + self.assertEqual(dataset.bbox_file, None) + self.assertEqual(len(dataset), 2) + self.check_data_info_keys(dataset[0]) + + # test subset = 'full' topdown testing + dataset = self.build_deepfashion_dataset( + data_mode='topdown', test_mode=True) + self.assertEqual(dataset.data_mode, 'topdown') + self.assertEqual(dataset.bbox_file, None) + self.assertEqual(len(dataset), 2) + self.check_data_info_keys(dataset[0]) + + def test_bottomup(self): + # test subset = 'full' bottomup training + dataset = self.build_deepfashion_dataset(data_mode='bottomup') + self.assertEqual(len(dataset), 2) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + # test subset = 'full' bottomup testing + dataset = self.build_deepfashion_dataset( + data_mode='bottomup', test_mode=True) + self.assertEqual(len(dataset), 2) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + def test_exceptions_and_warnings(self): + + with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): + _ = self.build_deepfashion_dataset(data_mode='invalid') + + with self.assertRaisesRegex(ValueError, 'got invalid subset'): + _ = self.build_deepfashion_dataset(subset='invalid') + + with self.assertRaisesRegex( + ValueError, + '"bbox_file" is only supported when `test_mode==True`'): + _ = self.build_deepfashion_dataset( + data_mode='topdown', + test_mode=False, + bbox_file='temp_bbox_file.json') + + with self.assertRaisesRegex( + ValueError, '"bbox_file" is only supported in topdown mode'): + _ = self.build_deepfashion_dataset( + data_mode='bottomup', + test_mode=True, + bbox_file='temp_bbox_file.json') + + with self.assertRaisesRegex( + ValueError, + '"bbox_score_thr" is only supported in topdown mode'): + _ = self.build_deepfashion_dataset( + data_mode='bottomup', + test_mode=True, + filter_cfg=dict(bbox_score_thr=0.3)) diff --git a/tests/test_datasets/test_datasets/test_hand_datasets/test_coco_wholebody_hand_dataset.py b/tests/test_datasets/test_datasets/test_hand_datasets/test_coco_wholebody_hand_dataset.py index 6bb425bf81..969b1e27f1 100644 --- a/tests/test_datasets/test_datasets/test_hand_datasets/test_coco_wholebody_hand_dataset.py +++ b/tests/test_datasets/test_datasets/test_hand_datasets/test_coco_wholebody_hand_dataset.py @@ -1,153 +1,153 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import numpy as np - -from mmpose.datasets.datasets.hand import CocoWholeBodyHandDataset - - -class TestCocoWholeBodyHandDataset(TestCase): - - def build_coco_wholebody_hand_dataset(self, **kwargs): - - cfg = dict( - ann_file='test_coco_wholebody.json', - bbox_file=None, - data_mode='topdown', - data_root='tests/data/coco', - pipeline=[], - test_mode=False) - - cfg.update(kwargs) - return CocoWholeBodyHandDataset(**cfg) - - def check_data_info_keys(self, - data_info: dict, - data_mode: str = 'topdown'): - if data_mode == 'topdown': - expected_keys = dict( - img_id=int, - img_path=str, - bbox=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - id=int) - elif data_mode == 'bottomup': - expected_keys = dict( - img_id=int, - img_path=str, - bbox=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - invalid_segs=list, - id=list) - else: - raise ValueError(f'Invalid data_mode {data_mode}') - - for key, type_ in expected_keys.items(): - self.assertIn(key, data_info) - self.assertIsInstance(data_info[key], type_, key) - - def check_metainfo_keys(self, metainfo: dict): - expected_keys = dict( - dataset_name=str, - num_keypoints=int, - keypoint_id2name=dict, - keypoint_name2id=dict, - upper_body_ids=list, - lower_body_ids=list, - flip_indices=list, - flip_pairs=list, - keypoint_colors=np.ndarray, - num_skeleton_links=int, - skeleton_links=list, - skeleton_link_colors=np.ndarray, - dataset_keypoint_weights=np.ndarray) - - for key, type_ in expected_keys.items(): - self.assertIn(key, metainfo) - self.assertIsInstance(metainfo[key], type_, key) - - def test_metainfo(self): - dataset = self.build_coco_wholebody_hand_dataset() - self.check_metainfo_keys(dataset.metainfo) - # test dataset_name - self.assertEqual(dataset.metainfo['dataset_name'], - 'coco_wholebody_hand') - - # test number of keypoints - num_keypoints = 21 - self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) - self.assertEqual( - len(dataset.metainfo['keypoint_colors']), num_keypoints) - self.assertEqual( - len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) - # note that len(sigmas) may be zero if dataset.metainfo['sigmas'] = [] - self.assertEqual(len(dataset.metainfo['sigmas']), num_keypoints) - - # test some extra metainfo - self.assertEqual( - len(dataset.metainfo['skeleton_links']), - len(dataset.metainfo['skeleton_link_colors'])) - - def test_topdown(self): - # test topdown training - dataset = self.build_coco_wholebody_hand_dataset(data_mode='topdown') - self.assertEqual(dataset.data_mode, 'topdown') - self.assertEqual(dataset.bbox_file, None) - # filter invalid instances according to ``lefthand_valid`` - # and ``righthand_valid`` - self.assertEqual(len(dataset), 10) - self.check_data_info_keys(dataset[0]) - - # test topdown testing - dataset = self.build_coco_wholebody_hand_dataset( - data_mode='topdown', test_mode=True) - self.assertEqual(dataset.data_mode, 'topdown') - self.assertEqual(dataset.bbox_file, None) - self.assertEqual(len(dataset), 10) - self.check_data_info_keys(dataset[0]) - - def test_bottomup(self): - # test bottomup training - dataset = self.build_coco_wholebody_hand_dataset(data_mode='bottomup') - # filter repeated images - self.assertEqual(len(dataset), 4) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - # test bottomup testing - dataset = self.build_coco_wholebody_hand_dataset( - data_mode='bottomup', test_mode=True) - # filter repeated images - self.assertEqual(len(dataset), 4) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - def test_exceptions_and_warnings(self): - - with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): - _ = self.build_coco_wholebody_hand_dataset(data_mode='invalid') - - with self.assertRaisesRegex( - ValueError, - '"bbox_file" is only supported when `test_mode==True`'): - _ = self.build_coco_wholebody_hand_dataset( - data_mode='topdown', - test_mode=False, - bbox_file='tests/data/coco/test_coco_det_AP_H_56.json') - - with self.assertRaisesRegex( - ValueError, '"bbox_file" is only supported in topdown mode'): - _ = self.build_coco_wholebody_hand_dataset( - data_mode='bottomup', - test_mode=True, - bbox_file='tests/data/coco/test_coco_det_AP_H_56.json') - - with self.assertRaisesRegex( - ValueError, - '"bbox_score_thr" is only supported in topdown mode'): - _ = self.build_coco_wholebody_hand_dataset( - data_mode='bottomup', - test_mode=True, - filter_cfg=dict(bbox_score_thr=0.3)) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np + +from mmpose.datasets.datasets.hand import CocoWholeBodyHandDataset + + +class TestCocoWholeBodyHandDataset(TestCase): + + def build_coco_wholebody_hand_dataset(self, **kwargs): + + cfg = dict( + ann_file='test_coco_wholebody.json', + bbox_file=None, + data_mode='topdown', + data_root='tests/data/coco', + pipeline=[], + test_mode=False) + + cfg.update(kwargs) + return CocoWholeBodyHandDataset(**cfg) + + def check_data_info_keys(self, + data_info: dict, + data_mode: str = 'topdown'): + if data_mode == 'topdown': + expected_keys = dict( + img_id=int, + img_path=str, + bbox=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + id=int) + elif data_mode == 'bottomup': + expected_keys = dict( + img_id=int, + img_path=str, + bbox=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + invalid_segs=list, + id=list) + else: + raise ValueError(f'Invalid data_mode {data_mode}') + + for key, type_ in expected_keys.items(): + self.assertIn(key, data_info) + self.assertIsInstance(data_info[key], type_, key) + + def check_metainfo_keys(self, metainfo: dict): + expected_keys = dict( + dataset_name=str, + num_keypoints=int, + keypoint_id2name=dict, + keypoint_name2id=dict, + upper_body_ids=list, + lower_body_ids=list, + flip_indices=list, + flip_pairs=list, + keypoint_colors=np.ndarray, + num_skeleton_links=int, + skeleton_links=list, + skeleton_link_colors=np.ndarray, + dataset_keypoint_weights=np.ndarray) + + for key, type_ in expected_keys.items(): + self.assertIn(key, metainfo) + self.assertIsInstance(metainfo[key], type_, key) + + def test_metainfo(self): + dataset = self.build_coco_wholebody_hand_dataset() + self.check_metainfo_keys(dataset.metainfo) + # test dataset_name + self.assertEqual(dataset.metainfo['dataset_name'], + 'coco_wholebody_hand') + + # test number of keypoints + num_keypoints = 21 + self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) + self.assertEqual( + len(dataset.metainfo['keypoint_colors']), num_keypoints) + self.assertEqual( + len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) + # note that len(sigmas) may be zero if dataset.metainfo['sigmas'] = [] + self.assertEqual(len(dataset.metainfo['sigmas']), num_keypoints) + + # test some extra metainfo + self.assertEqual( + len(dataset.metainfo['skeleton_links']), + len(dataset.metainfo['skeleton_link_colors'])) + + def test_topdown(self): + # test topdown training + dataset = self.build_coco_wholebody_hand_dataset(data_mode='topdown') + self.assertEqual(dataset.data_mode, 'topdown') + self.assertEqual(dataset.bbox_file, None) + # filter invalid instances according to ``lefthand_valid`` + # and ``righthand_valid`` + self.assertEqual(len(dataset), 10) + self.check_data_info_keys(dataset[0]) + + # test topdown testing + dataset = self.build_coco_wholebody_hand_dataset( + data_mode='topdown', test_mode=True) + self.assertEqual(dataset.data_mode, 'topdown') + self.assertEqual(dataset.bbox_file, None) + self.assertEqual(len(dataset), 10) + self.check_data_info_keys(dataset[0]) + + def test_bottomup(self): + # test bottomup training + dataset = self.build_coco_wholebody_hand_dataset(data_mode='bottomup') + # filter repeated images + self.assertEqual(len(dataset), 4) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + # test bottomup testing + dataset = self.build_coco_wholebody_hand_dataset( + data_mode='bottomup', test_mode=True) + # filter repeated images + self.assertEqual(len(dataset), 4) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + def test_exceptions_and_warnings(self): + + with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): + _ = self.build_coco_wholebody_hand_dataset(data_mode='invalid') + + with self.assertRaisesRegex( + ValueError, + '"bbox_file" is only supported when `test_mode==True`'): + _ = self.build_coco_wholebody_hand_dataset( + data_mode='topdown', + test_mode=False, + bbox_file='tests/data/coco/test_coco_det_AP_H_56.json') + + with self.assertRaisesRegex( + ValueError, '"bbox_file" is only supported in topdown mode'): + _ = self.build_coco_wholebody_hand_dataset( + data_mode='bottomup', + test_mode=True, + bbox_file='tests/data/coco/test_coco_det_AP_H_56.json') + + with self.assertRaisesRegex( + ValueError, + '"bbox_score_thr" is only supported in topdown mode'): + _ = self.build_coco_wholebody_hand_dataset( + data_mode='bottomup', + test_mode=True, + filter_cfg=dict(bbox_score_thr=0.3)) diff --git a/tests/test_datasets/test_datasets/test_hand_datasets/test_freihand_dataset.py b/tests/test_datasets/test_datasets/test_hand_datasets/test_freihand_dataset.py index 046a2dd602..6df0a02f99 100644 --- a/tests/test_datasets/test_datasets/test_hand_datasets/test_freihand_dataset.py +++ b/tests/test_datasets/test_datasets/test_hand_datasets/test_freihand_dataset.py @@ -1,146 +1,146 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import numpy as np - -from mmpose.datasets.datasets.hand import FreiHandDataset - - -class TestFreiHandDataset(TestCase): - - def build_freihand_dataset(self, **kwargs): - - cfg = dict( - ann_file='test_freihand.json', - bbox_file=None, - data_mode='topdown', - data_root='tests/data/freihand', - pipeline=[], - test_mode=False) - - cfg.update(kwargs) - return FreiHandDataset(**cfg) - - def check_data_info_keys(self, - data_info: dict, - data_mode: str = 'topdown'): - if data_mode == 'topdown': - expected_keys = dict( - img_id=int, - img_path=str, - bbox=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - id=int) - elif data_mode == 'bottomup': - expected_keys = dict( - img_id=int, - img_path=str, - bbox=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - invalid_segs=list, - id=list) - else: - raise ValueError(f'Invalid data_mode {data_mode}') - - for key, type_ in expected_keys.items(): - self.assertIn(key, data_info) - self.assertIsInstance(data_info[key], type_, key) - - def check_metainfo_keys(self, metainfo: dict): - expected_keys = dict( - dataset_name=str, - num_keypoints=int, - keypoint_id2name=dict, - keypoint_name2id=dict, - upper_body_ids=list, - lower_body_ids=list, - flip_indices=list, - flip_pairs=list, - keypoint_colors=np.ndarray, - num_skeleton_links=int, - skeleton_links=list, - skeleton_link_colors=np.ndarray, - dataset_keypoint_weights=np.ndarray) - - for key, type_ in expected_keys.items(): - self.assertIn(key, metainfo) - self.assertIsInstance(metainfo[key], type_, key) - - def test_metainfo(self): - dataset = self.build_freihand_dataset() - self.check_metainfo_keys(dataset.metainfo) - # test dataset_name - self.assertEqual(dataset.metainfo['dataset_name'], 'freihand') - - # test number of keypoints - num_keypoints = 21 - self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) - self.assertEqual( - len(dataset.metainfo['keypoint_colors']), num_keypoints) - self.assertEqual( - len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) - - # test some extra metainfo - self.assertEqual( - len(dataset.metainfo['skeleton_links']), - len(dataset.metainfo['skeleton_link_colors'])) - - def test_topdown(self): - # test topdown training - dataset = self.build_freihand_dataset(data_mode='topdown') - self.assertEqual(dataset.data_mode, 'topdown') - self.assertEqual(dataset.bbox_file, None) - self.assertEqual(len(dataset), 8) - self.check_data_info_keys(dataset[0]) - - # test topdown testing - dataset = self.build_freihand_dataset( - data_mode='topdown', test_mode=True) - self.assertEqual(dataset.data_mode, 'topdown') - self.assertEqual(dataset.bbox_file, None) - self.assertEqual(len(dataset), 8) - self.check_data_info_keys(dataset[0]) - - def test_bottomup(self): - # test bottomup training - dataset = self.build_freihand_dataset(data_mode='bottomup') - self.assertEqual(len(dataset), 8) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - # test bottomup testing - dataset = self.build_freihand_dataset( - data_mode='bottomup', test_mode=True) - self.assertEqual(len(dataset), 8) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - def test_exceptions_and_warnings(self): - - with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): - _ = self.build_freihand_dataset(data_mode='invalid') - - with self.assertRaisesRegex( - ValueError, - '"bbox_file" is only supported when `test_mode==True`'): - _ = self.build_freihand_dataset( - data_mode='topdown', - test_mode=False, - bbox_file='temp_bbox_file.json') - - with self.assertRaisesRegex( - ValueError, '"bbox_file" is only supported in topdown mode'): - _ = self.build_freihand_dataset( - data_mode='bottomup', - test_mode=True, - bbox_file='temp_bbox_file.json') - - with self.assertRaisesRegex( - ValueError, - '"bbox_score_thr" is only supported in topdown mode'): - _ = self.build_freihand_dataset( - data_mode='bottomup', - test_mode=True, - filter_cfg=dict(bbox_score_thr=0.3)) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np + +from mmpose.datasets.datasets.hand import FreiHandDataset + + +class TestFreiHandDataset(TestCase): + + def build_freihand_dataset(self, **kwargs): + + cfg = dict( + ann_file='test_freihand.json', + bbox_file=None, + data_mode='topdown', + data_root='tests/data/freihand', + pipeline=[], + test_mode=False) + + cfg.update(kwargs) + return FreiHandDataset(**cfg) + + def check_data_info_keys(self, + data_info: dict, + data_mode: str = 'topdown'): + if data_mode == 'topdown': + expected_keys = dict( + img_id=int, + img_path=str, + bbox=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + id=int) + elif data_mode == 'bottomup': + expected_keys = dict( + img_id=int, + img_path=str, + bbox=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + invalid_segs=list, + id=list) + else: + raise ValueError(f'Invalid data_mode {data_mode}') + + for key, type_ in expected_keys.items(): + self.assertIn(key, data_info) + self.assertIsInstance(data_info[key], type_, key) + + def check_metainfo_keys(self, metainfo: dict): + expected_keys = dict( + dataset_name=str, + num_keypoints=int, + keypoint_id2name=dict, + keypoint_name2id=dict, + upper_body_ids=list, + lower_body_ids=list, + flip_indices=list, + flip_pairs=list, + keypoint_colors=np.ndarray, + num_skeleton_links=int, + skeleton_links=list, + skeleton_link_colors=np.ndarray, + dataset_keypoint_weights=np.ndarray) + + for key, type_ in expected_keys.items(): + self.assertIn(key, metainfo) + self.assertIsInstance(metainfo[key], type_, key) + + def test_metainfo(self): + dataset = self.build_freihand_dataset() + self.check_metainfo_keys(dataset.metainfo) + # test dataset_name + self.assertEqual(dataset.metainfo['dataset_name'], 'freihand') + + # test number of keypoints + num_keypoints = 21 + self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) + self.assertEqual( + len(dataset.metainfo['keypoint_colors']), num_keypoints) + self.assertEqual( + len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) + + # test some extra metainfo + self.assertEqual( + len(dataset.metainfo['skeleton_links']), + len(dataset.metainfo['skeleton_link_colors'])) + + def test_topdown(self): + # test topdown training + dataset = self.build_freihand_dataset(data_mode='topdown') + self.assertEqual(dataset.data_mode, 'topdown') + self.assertEqual(dataset.bbox_file, None) + self.assertEqual(len(dataset), 8) + self.check_data_info_keys(dataset[0]) + + # test topdown testing + dataset = self.build_freihand_dataset( + data_mode='topdown', test_mode=True) + self.assertEqual(dataset.data_mode, 'topdown') + self.assertEqual(dataset.bbox_file, None) + self.assertEqual(len(dataset), 8) + self.check_data_info_keys(dataset[0]) + + def test_bottomup(self): + # test bottomup training + dataset = self.build_freihand_dataset(data_mode='bottomup') + self.assertEqual(len(dataset), 8) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + # test bottomup testing + dataset = self.build_freihand_dataset( + data_mode='bottomup', test_mode=True) + self.assertEqual(len(dataset), 8) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + def test_exceptions_and_warnings(self): + + with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): + _ = self.build_freihand_dataset(data_mode='invalid') + + with self.assertRaisesRegex( + ValueError, + '"bbox_file" is only supported when `test_mode==True`'): + _ = self.build_freihand_dataset( + data_mode='topdown', + test_mode=False, + bbox_file='temp_bbox_file.json') + + with self.assertRaisesRegex( + ValueError, '"bbox_file" is only supported in topdown mode'): + _ = self.build_freihand_dataset( + data_mode='bottomup', + test_mode=True, + bbox_file='temp_bbox_file.json') + + with self.assertRaisesRegex( + ValueError, + '"bbox_score_thr" is only supported in topdown mode'): + _ = self.build_freihand_dataset( + data_mode='bottomup', + test_mode=True, + filter_cfg=dict(bbox_score_thr=0.3)) diff --git a/tests/test_datasets/test_datasets/test_hand_datasets/test_onehand10k_dataset.py b/tests/test_datasets/test_datasets/test_hand_datasets/test_onehand10k_dataset.py index 38ff6c4083..fd237692b3 100644 --- a/tests/test_datasets/test_datasets/test_hand_datasets/test_onehand10k_dataset.py +++ b/tests/test_datasets/test_datasets/test_hand_datasets/test_onehand10k_dataset.py @@ -1,146 +1,146 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import numpy as np - -from mmpose.datasets.datasets.hand import OneHand10KDataset - - -class TestOneHand10KDataset(TestCase): - - def build_onehand10k_dataset(self, **kwargs): - - cfg = dict( - ann_file='test_onehand10k.json', - bbox_file=None, - data_mode='topdown', - data_root='tests/data/onehand10k', - pipeline=[], - test_mode=False) - - cfg.update(kwargs) - return OneHand10KDataset(**cfg) - - def check_data_info_keys(self, - data_info: dict, - data_mode: str = 'topdown'): - if data_mode == 'topdown': - expected_keys = dict( - img_id=int, - img_path=str, - bbox=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - id=int) - elif data_mode == 'bottomup': - expected_keys = dict( - img_id=int, - img_path=str, - bbox=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - invalid_segs=list, - id=list) - else: - raise ValueError(f'Invalid data_mode {data_mode}') - - for key, type_ in expected_keys.items(): - self.assertIn(key, data_info) - self.assertIsInstance(data_info[key], type_, key) - - def check_metainfo_keys(self, metainfo: dict): - expected_keys = dict( - dataset_name=str, - num_keypoints=int, - keypoint_id2name=dict, - keypoint_name2id=dict, - upper_body_ids=list, - lower_body_ids=list, - flip_indices=list, - flip_pairs=list, - keypoint_colors=np.ndarray, - num_skeleton_links=int, - skeleton_links=list, - skeleton_link_colors=np.ndarray, - dataset_keypoint_weights=np.ndarray) - - for key, type_ in expected_keys.items(): - self.assertIn(key, metainfo) - self.assertIsInstance(metainfo[key], type_, key) - - def test_metainfo(self): - dataset = self.build_onehand10k_dataset() - self.check_metainfo_keys(dataset.metainfo) - # test dataset_name - self.assertEqual(dataset.metainfo['dataset_name'], 'onehand10k') - - # test number of keypoints - num_keypoints = 21 - self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) - self.assertEqual( - len(dataset.metainfo['keypoint_colors']), num_keypoints) - self.assertEqual( - len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) - - # test some extra metainfo - self.assertEqual( - len(dataset.metainfo['skeleton_links']), - len(dataset.metainfo['skeleton_link_colors'])) - - def test_topdown(self): - # test topdown training - dataset = self.build_onehand10k_dataset(data_mode='topdown') - self.assertEqual(dataset.data_mode, 'topdown') - self.assertEqual(dataset.bbox_file, None) - self.assertEqual(len(dataset), 4) - self.check_data_info_keys(dataset[0]) - - # test topdown testing - dataset = self.build_onehand10k_dataset( - data_mode='topdown', test_mode=True) - self.assertEqual(dataset.data_mode, 'topdown') - self.assertEqual(dataset.bbox_file, None) - self.assertEqual(len(dataset), 4) - self.check_data_info_keys(dataset[0]) - - def test_bottomup(self): - # test bottomup training - dataset = self.build_onehand10k_dataset(data_mode='bottomup') - self.assertEqual(len(dataset), 4) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - # test bottomup testing - dataset = self.build_onehand10k_dataset( - data_mode='bottomup', test_mode=True) - self.assertEqual(len(dataset), 4) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - def test_exceptions_and_warnings(self): - - with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): - _ = self.build_onehand10k_dataset(data_mode='invalid') - - with self.assertRaisesRegex( - ValueError, - '"bbox_file" is only supported when `test_mode==True`'): - _ = self.build_onehand10k_dataset( - data_mode='topdown', - test_mode=False, - bbox_file='temp_bbox_file.json') - - with self.assertRaisesRegex( - ValueError, '"bbox_file" is only supported in topdown mode'): - _ = self.build_onehand10k_dataset( - data_mode='bottomup', - test_mode=True, - bbox_file='temp_bbox_file.json') - - with self.assertRaisesRegex( - ValueError, - '"bbox_score_thr" is only supported in topdown mode'): - _ = self.build_onehand10k_dataset( - data_mode='bottomup', - test_mode=True, - filter_cfg=dict(bbox_score_thr=0.3)) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np + +from mmpose.datasets.datasets.hand import OneHand10KDataset + + +class TestOneHand10KDataset(TestCase): + + def build_onehand10k_dataset(self, **kwargs): + + cfg = dict( + ann_file='test_onehand10k.json', + bbox_file=None, + data_mode='topdown', + data_root='tests/data/onehand10k', + pipeline=[], + test_mode=False) + + cfg.update(kwargs) + return OneHand10KDataset(**cfg) + + def check_data_info_keys(self, + data_info: dict, + data_mode: str = 'topdown'): + if data_mode == 'topdown': + expected_keys = dict( + img_id=int, + img_path=str, + bbox=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + id=int) + elif data_mode == 'bottomup': + expected_keys = dict( + img_id=int, + img_path=str, + bbox=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + invalid_segs=list, + id=list) + else: + raise ValueError(f'Invalid data_mode {data_mode}') + + for key, type_ in expected_keys.items(): + self.assertIn(key, data_info) + self.assertIsInstance(data_info[key], type_, key) + + def check_metainfo_keys(self, metainfo: dict): + expected_keys = dict( + dataset_name=str, + num_keypoints=int, + keypoint_id2name=dict, + keypoint_name2id=dict, + upper_body_ids=list, + lower_body_ids=list, + flip_indices=list, + flip_pairs=list, + keypoint_colors=np.ndarray, + num_skeleton_links=int, + skeleton_links=list, + skeleton_link_colors=np.ndarray, + dataset_keypoint_weights=np.ndarray) + + for key, type_ in expected_keys.items(): + self.assertIn(key, metainfo) + self.assertIsInstance(metainfo[key], type_, key) + + def test_metainfo(self): + dataset = self.build_onehand10k_dataset() + self.check_metainfo_keys(dataset.metainfo) + # test dataset_name + self.assertEqual(dataset.metainfo['dataset_name'], 'onehand10k') + + # test number of keypoints + num_keypoints = 21 + self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) + self.assertEqual( + len(dataset.metainfo['keypoint_colors']), num_keypoints) + self.assertEqual( + len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) + + # test some extra metainfo + self.assertEqual( + len(dataset.metainfo['skeleton_links']), + len(dataset.metainfo['skeleton_link_colors'])) + + def test_topdown(self): + # test topdown training + dataset = self.build_onehand10k_dataset(data_mode='topdown') + self.assertEqual(dataset.data_mode, 'topdown') + self.assertEqual(dataset.bbox_file, None) + self.assertEqual(len(dataset), 4) + self.check_data_info_keys(dataset[0]) + + # test topdown testing + dataset = self.build_onehand10k_dataset( + data_mode='topdown', test_mode=True) + self.assertEqual(dataset.data_mode, 'topdown') + self.assertEqual(dataset.bbox_file, None) + self.assertEqual(len(dataset), 4) + self.check_data_info_keys(dataset[0]) + + def test_bottomup(self): + # test bottomup training + dataset = self.build_onehand10k_dataset(data_mode='bottomup') + self.assertEqual(len(dataset), 4) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + # test bottomup testing + dataset = self.build_onehand10k_dataset( + data_mode='bottomup', test_mode=True) + self.assertEqual(len(dataset), 4) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + def test_exceptions_and_warnings(self): + + with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): + _ = self.build_onehand10k_dataset(data_mode='invalid') + + with self.assertRaisesRegex( + ValueError, + '"bbox_file" is only supported when `test_mode==True`'): + _ = self.build_onehand10k_dataset( + data_mode='topdown', + test_mode=False, + bbox_file='temp_bbox_file.json') + + with self.assertRaisesRegex( + ValueError, '"bbox_file" is only supported in topdown mode'): + _ = self.build_onehand10k_dataset( + data_mode='bottomup', + test_mode=True, + bbox_file='temp_bbox_file.json') + + with self.assertRaisesRegex( + ValueError, + '"bbox_score_thr" is only supported in topdown mode'): + _ = self.build_onehand10k_dataset( + data_mode='bottomup', + test_mode=True, + filter_cfg=dict(bbox_score_thr=0.3)) diff --git a/tests/test_datasets/test_datasets/test_hand_datasets/test_panoptic_hand2d_dataset.py b/tests/test_datasets/test_datasets/test_hand_datasets/test_panoptic_hand2d_dataset.py index 665795c985..38cb33a9b9 100644 --- a/tests/test_datasets/test_datasets/test_hand_datasets/test_panoptic_hand2d_dataset.py +++ b/tests/test_datasets/test_datasets/test_hand_datasets/test_panoptic_hand2d_dataset.py @@ -1,151 +1,151 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import numpy as np - -from mmpose.datasets.datasets.hand import PanopticHand2DDataset - - -class TestPanopticHand2DDataset(TestCase): - - def build_panoptic_hand2d_dataset(self, **kwargs): - - cfg = dict( - ann_file='test_panoptic.json', - bbox_file=None, - data_mode='topdown', - data_root='tests/data/panoptic', - pipeline=[], - test_mode=False) - - cfg.update(kwargs) - return PanopticHand2DDataset(**cfg) - - def check_data_info_keys(self, - data_info: dict, - data_mode: str = 'topdown'): - if data_mode == 'topdown': - expected_keys = dict( - img_id=int, - img_path=str, - bbox=np.ndarray, - bbox_score=np.ndarray, - num_keypoints=int, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - iscrowd=int, - segmentation=list, - head_size=float, - id=int) - elif data_mode == 'bottomup': - expected_keys = dict( - img_id=int, - img_path=str, - bbox=np.ndarray, - bbox_score=np.ndarray, - num_keypoints=list, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - invalid_segs=list, - id=list) - else: - raise ValueError(f'Invalid data_mode {data_mode}') - - for key, type_ in expected_keys.items(): - self.assertIn(key, data_info) - self.assertIsInstance(data_info[key], type_, key) - - def check_metainfo_keys(self, metainfo: dict): - expected_keys = dict( - dataset_name=str, - num_keypoints=int, - keypoint_id2name=dict, - keypoint_name2id=dict, - upper_body_ids=list, - lower_body_ids=list, - flip_indices=list, - flip_pairs=list, - keypoint_colors=np.ndarray, - num_skeleton_links=int, - skeleton_links=list, - skeleton_link_colors=np.ndarray, - dataset_keypoint_weights=np.ndarray) - - for key, type_ in expected_keys.items(): - self.assertIn(key, metainfo) - self.assertIsInstance(metainfo[key], type_, key) - - def test_metainfo(self): - dataset = self.build_panoptic_hand2d_dataset() - self.check_metainfo_keys(dataset.metainfo) - # test dataset_name - self.assertEqual(dataset.metainfo['dataset_name'], 'panoptic_hand2d') - - # test number of keypoints - num_keypoints = 21 - self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) - self.assertEqual( - len(dataset.metainfo['keypoint_colors']), num_keypoints) - self.assertEqual( - len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) - - # test some extra metainfo - self.assertEqual( - len(dataset.metainfo['skeleton_links']), - len(dataset.metainfo['skeleton_link_colors'])) - - def test_topdown(self): - # test topdown training - dataset = self.build_panoptic_hand2d_dataset(data_mode='topdown') - self.assertEqual(dataset.data_mode, 'topdown') - self.assertEqual(dataset.bbox_file, None) - self.assertEqual(len(dataset), 4) - self.check_data_info_keys(dataset[0]) - - # test topdown testing - dataset = self.build_panoptic_hand2d_dataset( - data_mode='topdown', test_mode=True) - self.assertEqual(dataset.data_mode, 'topdown') - self.assertEqual(dataset.bbox_file, None) - self.assertEqual(len(dataset), 4) - self.check_data_info_keys(dataset[0]) - - def test_bottomup(self): - # test bottomup training - dataset = self.build_panoptic_hand2d_dataset(data_mode='bottomup') - self.assertEqual(len(dataset), 4) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - # test bottomup testing - dataset = self.build_panoptic_hand2d_dataset( - data_mode='bottomup', test_mode=True) - self.assertEqual(len(dataset), 4) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - def test_exceptions_and_warnings(self): - - with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): - _ = self.build_panoptic_hand2d_dataset(data_mode='invalid') - - with self.assertRaisesRegex( - ValueError, - '"bbox_file" is only supported when `test_mode==True`'): - _ = self.build_panoptic_hand2d_dataset( - data_mode='topdown', - test_mode=False, - bbox_file='temp_bbox_file.json') - - with self.assertRaisesRegex( - ValueError, '"bbox_file" is only supported in topdown mode'): - _ = self.build_panoptic_hand2d_dataset( - data_mode='bottomup', - test_mode=True, - bbox_file='temp_bbox_file.json') - - with self.assertRaisesRegex( - ValueError, - '"bbox_score_thr" is only supported in topdown mode'): - _ = self.build_panoptic_hand2d_dataset( - data_mode='bottomup', - test_mode=True, - filter_cfg=dict(bbox_score_thr=0.3)) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np + +from mmpose.datasets.datasets.hand import PanopticHand2DDataset + + +class TestPanopticHand2DDataset(TestCase): + + def build_panoptic_hand2d_dataset(self, **kwargs): + + cfg = dict( + ann_file='test_panoptic.json', + bbox_file=None, + data_mode='topdown', + data_root='tests/data/panoptic', + pipeline=[], + test_mode=False) + + cfg.update(kwargs) + return PanopticHand2DDataset(**cfg) + + def check_data_info_keys(self, + data_info: dict, + data_mode: str = 'topdown'): + if data_mode == 'topdown': + expected_keys = dict( + img_id=int, + img_path=str, + bbox=np.ndarray, + bbox_score=np.ndarray, + num_keypoints=int, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + iscrowd=int, + segmentation=list, + head_size=float, + id=int) + elif data_mode == 'bottomup': + expected_keys = dict( + img_id=int, + img_path=str, + bbox=np.ndarray, + bbox_score=np.ndarray, + num_keypoints=list, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + invalid_segs=list, + id=list) + else: + raise ValueError(f'Invalid data_mode {data_mode}') + + for key, type_ in expected_keys.items(): + self.assertIn(key, data_info) + self.assertIsInstance(data_info[key], type_, key) + + def check_metainfo_keys(self, metainfo: dict): + expected_keys = dict( + dataset_name=str, + num_keypoints=int, + keypoint_id2name=dict, + keypoint_name2id=dict, + upper_body_ids=list, + lower_body_ids=list, + flip_indices=list, + flip_pairs=list, + keypoint_colors=np.ndarray, + num_skeleton_links=int, + skeleton_links=list, + skeleton_link_colors=np.ndarray, + dataset_keypoint_weights=np.ndarray) + + for key, type_ in expected_keys.items(): + self.assertIn(key, metainfo) + self.assertIsInstance(metainfo[key], type_, key) + + def test_metainfo(self): + dataset = self.build_panoptic_hand2d_dataset() + self.check_metainfo_keys(dataset.metainfo) + # test dataset_name + self.assertEqual(dataset.metainfo['dataset_name'], 'panoptic_hand2d') + + # test number of keypoints + num_keypoints = 21 + self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) + self.assertEqual( + len(dataset.metainfo['keypoint_colors']), num_keypoints) + self.assertEqual( + len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) + + # test some extra metainfo + self.assertEqual( + len(dataset.metainfo['skeleton_links']), + len(dataset.metainfo['skeleton_link_colors'])) + + def test_topdown(self): + # test topdown training + dataset = self.build_panoptic_hand2d_dataset(data_mode='topdown') + self.assertEqual(dataset.data_mode, 'topdown') + self.assertEqual(dataset.bbox_file, None) + self.assertEqual(len(dataset), 4) + self.check_data_info_keys(dataset[0]) + + # test topdown testing + dataset = self.build_panoptic_hand2d_dataset( + data_mode='topdown', test_mode=True) + self.assertEqual(dataset.data_mode, 'topdown') + self.assertEqual(dataset.bbox_file, None) + self.assertEqual(len(dataset), 4) + self.check_data_info_keys(dataset[0]) + + def test_bottomup(self): + # test bottomup training + dataset = self.build_panoptic_hand2d_dataset(data_mode='bottomup') + self.assertEqual(len(dataset), 4) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + # test bottomup testing + dataset = self.build_panoptic_hand2d_dataset( + data_mode='bottomup', test_mode=True) + self.assertEqual(len(dataset), 4) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + def test_exceptions_and_warnings(self): + + with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): + _ = self.build_panoptic_hand2d_dataset(data_mode='invalid') + + with self.assertRaisesRegex( + ValueError, + '"bbox_file" is only supported when `test_mode==True`'): + _ = self.build_panoptic_hand2d_dataset( + data_mode='topdown', + test_mode=False, + bbox_file='temp_bbox_file.json') + + with self.assertRaisesRegex( + ValueError, '"bbox_file" is only supported in topdown mode'): + _ = self.build_panoptic_hand2d_dataset( + data_mode='bottomup', + test_mode=True, + bbox_file='temp_bbox_file.json') + + with self.assertRaisesRegex( + ValueError, + '"bbox_score_thr" is only supported in topdown mode'): + _ = self.build_panoptic_hand2d_dataset( + data_mode='bottomup', + test_mode=True, + filter_cfg=dict(bbox_score_thr=0.3)) diff --git a/tests/test_datasets/test_datasets/test_hand_datasets/test_rhd2d_dataset.py b/tests/test_datasets/test_datasets/test_hand_datasets/test_rhd2d_dataset.py index 852966c8fd..f479a26ce1 100644 --- a/tests/test_datasets/test_datasets/test_hand_datasets/test_rhd2d_dataset.py +++ b/tests/test_datasets/test_datasets/test_hand_datasets/test_rhd2d_dataset.py @@ -1,144 +1,144 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import numpy as np - -from mmpose.datasets.datasets.hand import Rhd2DDataset - - -class TestRhd2DDataset(TestCase): - - def build_rhd2d_dataset(self, **kwargs): - - cfg = dict( - ann_file='test_rhd.json', - bbox_file=None, - data_mode='topdown', - data_root='tests/data/rhd', - pipeline=[], - test_mode=False) - - cfg.update(kwargs) - return Rhd2DDataset(**cfg) - - def check_data_info_keys(self, - data_info: dict, - data_mode: str = 'topdown'): - if data_mode == 'topdown': - expected_keys = dict( - img_id=int, - img_path=str, - bbox=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - id=int) - elif data_mode == 'bottomup': - expected_keys = dict( - img_id=int, - img_path=str, - bbox=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - invalid_segs=list, - id=list) - else: - raise ValueError(f'Invalid data_mode {data_mode}') - for key, type_ in expected_keys.items(): - self.assertIn(key, data_info) - self.assertIsInstance(data_info[key], type_, key) - - def check_metainfo_keys(self, metainfo: dict): - expected_keys = dict( - dataset_name=str, - num_keypoints=int, - keypoint_id2name=dict, - keypoint_name2id=dict, - upper_body_ids=list, - lower_body_ids=list, - flip_indices=list, - flip_pairs=list, - keypoint_colors=np.ndarray, - num_skeleton_links=int, - skeleton_links=list, - skeleton_link_colors=np.ndarray, - dataset_keypoint_weights=np.ndarray) - - for key, type_ in expected_keys.items(): - self.assertIn(key, metainfo) - self.assertIsInstance(metainfo[key], type_, key) - - def test_metainfo(self): - dataset = self.build_rhd2d_dataset() - self.check_metainfo_keys(dataset.metainfo) - # test dataset_name - self.assertEqual(dataset.metainfo['dataset_name'], 'rhd2d') - - # test number of keypoints - num_keypoints = 21 - self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) - self.assertEqual( - len(dataset.metainfo['keypoint_colors']), num_keypoints) - self.assertEqual( - len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) - - # test some extra metainfo - self.assertEqual( - len(dataset.metainfo['skeleton_links']), - len(dataset.metainfo['skeleton_link_colors'])) - - def test_topdown(self): - # test topdown training - dataset = self.build_rhd2d_dataset(data_mode='topdown') - self.assertEqual(dataset.data_mode, 'topdown') - self.assertEqual(dataset.bbox_file, None) - self.assertEqual(len(dataset), 3) - self.check_data_info_keys(dataset[0]) - - # test topdown testing - dataset = self.build_rhd2d_dataset(data_mode='topdown', test_mode=True) - self.assertEqual(dataset.data_mode, 'topdown') - self.assertEqual(dataset.bbox_file, None) - self.assertEqual(len(dataset), 3) - self.check_data_info_keys(dataset[0]) - - def test_bottomup(self): - # test bottomup training - dataset = self.build_rhd2d_dataset(data_mode='bottomup') - self.assertEqual(len(dataset), 3) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - # test bottomup testing - dataset = self.build_rhd2d_dataset( - data_mode='bottomup', test_mode=True) - self.assertEqual(len(dataset), 3) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - def test_exceptions_and_warnings(self): - - with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): - _ = self.build_rhd2d_dataset(data_mode='invalid') - - with self.assertRaisesRegex( - ValueError, - '"bbox_file" is only supported when `test_mode==True`'): - _ = self.build_rhd2d_dataset( - data_mode='topdown', - test_mode=False, - bbox_file='temp_bbox_file.json') - - with self.assertRaisesRegex( - ValueError, '"bbox_file" is only supported in topdown mode'): - _ = self.build_rhd2d_dataset( - data_mode='bottomup', - test_mode=True, - bbox_file='temp_bbox_file.json') - - with self.assertRaisesRegex( - ValueError, - '"bbox_score_thr" is only supported in topdown mode'): - _ = self.build_rhd2d_dataset( - data_mode='bottomup', - test_mode=True, - filter_cfg=dict(bbox_score_thr=0.3)) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np + +from mmpose.datasets.datasets.hand import Rhd2DDataset + + +class TestRhd2DDataset(TestCase): + + def build_rhd2d_dataset(self, **kwargs): + + cfg = dict( + ann_file='test_rhd.json', + bbox_file=None, + data_mode='topdown', + data_root='tests/data/rhd', + pipeline=[], + test_mode=False) + + cfg.update(kwargs) + return Rhd2DDataset(**cfg) + + def check_data_info_keys(self, + data_info: dict, + data_mode: str = 'topdown'): + if data_mode == 'topdown': + expected_keys = dict( + img_id=int, + img_path=str, + bbox=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + id=int) + elif data_mode == 'bottomup': + expected_keys = dict( + img_id=int, + img_path=str, + bbox=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + invalid_segs=list, + id=list) + else: + raise ValueError(f'Invalid data_mode {data_mode}') + for key, type_ in expected_keys.items(): + self.assertIn(key, data_info) + self.assertIsInstance(data_info[key], type_, key) + + def check_metainfo_keys(self, metainfo: dict): + expected_keys = dict( + dataset_name=str, + num_keypoints=int, + keypoint_id2name=dict, + keypoint_name2id=dict, + upper_body_ids=list, + lower_body_ids=list, + flip_indices=list, + flip_pairs=list, + keypoint_colors=np.ndarray, + num_skeleton_links=int, + skeleton_links=list, + skeleton_link_colors=np.ndarray, + dataset_keypoint_weights=np.ndarray) + + for key, type_ in expected_keys.items(): + self.assertIn(key, metainfo) + self.assertIsInstance(metainfo[key], type_, key) + + def test_metainfo(self): + dataset = self.build_rhd2d_dataset() + self.check_metainfo_keys(dataset.metainfo) + # test dataset_name + self.assertEqual(dataset.metainfo['dataset_name'], 'rhd2d') + + # test number of keypoints + num_keypoints = 21 + self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) + self.assertEqual( + len(dataset.metainfo['keypoint_colors']), num_keypoints) + self.assertEqual( + len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) + + # test some extra metainfo + self.assertEqual( + len(dataset.metainfo['skeleton_links']), + len(dataset.metainfo['skeleton_link_colors'])) + + def test_topdown(self): + # test topdown training + dataset = self.build_rhd2d_dataset(data_mode='topdown') + self.assertEqual(dataset.data_mode, 'topdown') + self.assertEqual(dataset.bbox_file, None) + self.assertEqual(len(dataset), 3) + self.check_data_info_keys(dataset[0]) + + # test topdown testing + dataset = self.build_rhd2d_dataset(data_mode='topdown', test_mode=True) + self.assertEqual(dataset.data_mode, 'topdown') + self.assertEqual(dataset.bbox_file, None) + self.assertEqual(len(dataset), 3) + self.check_data_info_keys(dataset[0]) + + def test_bottomup(self): + # test bottomup training + dataset = self.build_rhd2d_dataset(data_mode='bottomup') + self.assertEqual(len(dataset), 3) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + # test bottomup testing + dataset = self.build_rhd2d_dataset( + data_mode='bottomup', test_mode=True) + self.assertEqual(len(dataset), 3) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + def test_exceptions_and_warnings(self): + + with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): + _ = self.build_rhd2d_dataset(data_mode='invalid') + + with self.assertRaisesRegex( + ValueError, + '"bbox_file" is only supported when `test_mode==True`'): + _ = self.build_rhd2d_dataset( + data_mode='topdown', + test_mode=False, + bbox_file='temp_bbox_file.json') + + with self.assertRaisesRegex( + ValueError, '"bbox_file" is only supported in topdown mode'): + _ = self.build_rhd2d_dataset( + data_mode='bottomup', + test_mode=True, + bbox_file='temp_bbox_file.json') + + with self.assertRaisesRegex( + ValueError, + '"bbox_score_thr" is only supported in topdown mode'): + _ = self.build_rhd2d_dataset( + data_mode='bottomup', + test_mode=True, + filter_cfg=dict(bbox_score_thr=0.3)) diff --git a/tests/test_datasets/test_datasets/test_wholebody_datasets/test_coco_wholebody_dataset.py b/tests/test_datasets/test_datasets/test_wholebody_datasets/test_coco_wholebody_dataset.py index a6ae20e534..b13e1af421 100644 --- a/tests/test_datasets/test_datasets/test_wholebody_datasets/test_coco_wholebody_dataset.py +++ b/tests/test_datasets/test_datasets/test_wholebody_datasets/test_coco_wholebody_dataset.py @@ -1,161 +1,161 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import numpy as np - -from mmpose.datasets.datasets.wholebody import CocoWholeBodyDataset - - -class TestCocoWholeBodyDataset(TestCase): - - def build_coco_wholebody_dataset(self, **kwargs): - - cfg = dict( - ann_file='test_coco_wholebody.json', - bbox_file=None, - data_mode='topdown', - data_root='tests/data/coco', - pipeline=[], - test_mode=False) - - cfg.update(kwargs) - return CocoWholeBodyDataset(**cfg) - - def check_data_info_keys(self, - data_info: dict, - data_mode: str = 'topdown'): - if data_mode == 'topdown': - expected_keys = dict( - img_id=int, - img_path=str, - bbox=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - id=int) - elif data_mode == 'bottomup': - expected_keys = dict( - img_id=int, - img_path=str, - bbox=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - invalid_segs=list, - id=list) - else: - raise ValueError(f'Invalid data_mode {data_mode}') - - for key, type_ in expected_keys.items(): - self.assertIn(key, data_info) - self.assertIsInstance(data_info[key], type_, key) - - def check_metainfo_keys(self, metainfo: dict): - expected_keys = dict( - dataset_name=str, - num_keypoints=int, - keypoint_id2name=dict, - keypoint_name2id=dict, - upper_body_ids=list, - lower_body_ids=list, - flip_indices=list, - flip_pairs=list, - keypoint_colors=np.ndarray, - num_skeleton_links=int, - skeleton_links=list, - skeleton_link_colors=np.ndarray, - dataset_keypoint_weights=np.ndarray) - - for key, type_ in expected_keys.items(): - self.assertIn(key, metainfo) - self.assertIsInstance(metainfo[key], type_, key) - - def test_metainfo(self): - dataset = self.build_coco_wholebody_dataset() - self.check_metainfo_keys(dataset.metainfo) - # test dataset_name - self.assertEqual(dataset.metainfo['dataset_name'], 'coco_wholebody') - - # test number of keypoints - num_keypoints = 133 - self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) - self.assertEqual( - len(dataset.metainfo['keypoint_colors']), num_keypoints) - self.assertEqual( - len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) - # note that len(sigmas) may be zero if dataset.metainfo['sigmas'] = [] - self.assertEqual(len(dataset.metainfo['sigmas']), num_keypoints) - - # test some extra metainfo - self.assertEqual( - len(dataset.metainfo['skeleton_links']), - len(dataset.metainfo['skeleton_link_colors'])) - - def test_topdown(self): - # test topdown training - dataset = self.build_coco_wholebody_dataset(data_mode='topdown') - # filter two invalid instances due to num_keypoints = 0 - self.assertEqual(len(dataset), 12) - self.check_data_info_keys(dataset[0], data_mode='topdown') - - # test topdown testing - dataset = self.build_coco_wholebody_dataset( - data_mode='topdown', test_mode=True) - self.assertEqual(len(dataset), 12) - self.check_data_info_keys(dataset[0], data_mode='topdown') - - # test topdown testing with bbox file - dataset = self.build_coco_wholebody_dataset( - data_mode='topdown', - test_mode=True, - bbox_file='tests/data/coco/test_coco_det_AP_H_56.json') - self.assertEqual(len(dataset), 118) - self.check_data_info_keys(dataset[0], data_mode='topdown') - - # test topdown testing with filter config - dataset = self.build_coco_wholebody_dataset( - data_mode='topdown', - test_mode=True, - bbox_file='tests/data/coco/test_coco_det_AP_H_56.json', - filter_cfg=dict(bbox_score_thr=0.3)) - self.assertEqual(len(dataset), 33) - - def test_bottomup(self): - # test bottomup training - dataset = self.build_coco_wholebody_dataset(data_mode='bottomup') - self.assertEqual(len(dataset), 4) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - # test bottomup testing - dataset = self.build_coco_wholebody_dataset( - data_mode='bottomup', test_mode=True) - self.assertEqual(len(dataset), 4) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - def test_exceptions_and_warnings(self): - - with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): - _ = self.build_coco_wholebody_dataset(data_mode='invalid') - - with self.assertRaisesRegex( - ValueError, - '"bbox_file" is only supported when `test_mode==True`'): - _ = self.build_coco_wholebody_dataset( - data_mode='topdown', - test_mode=False, - bbox_file='tests/data/coco/test_coco_det_AP_H_56.json') - - with self.assertRaisesRegex( - ValueError, '"bbox_file" is only supported in topdown mode'): - _ = self.build_coco_wholebody_dataset( - data_mode='bottomup', - test_mode=True, - bbox_file='tests/data/coco/test_coco_det_AP_H_56.json') - - with self.assertRaisesRegex( - ValueError, - '"bbox_score_thr" is only supported in topdown mode'): - _ = self.build_coco_wholebody_dataset( - data_mode='bottomup', - test_mode=True, - filter_cfg=dict(bbox_score_thr=0.3)) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np + +from mmpose.datasets.datasets.wholebody import CocoWholeBodyDataset + + +class TestCocoWholeBodyDataset(TestCase): + + def build_coco_wholebody_dataset(self, **kwargs): + + cfg = dict( + ann_file='test_coco_wholebody.json', + bbox_file=None, + data_mode='topdown', + data_root='tests/data/coco', + pipeline=[], + test_mode=False) + + cfg.update(kwargs) + return CocoWholeBodyDataset(**cfg) + + def check_data_info_keys(self, + data_info: dict, + data_mode: str = 'topdown'): + if data_mode == 'topdown': + expected_keys = dict( + img_id=int, + img_path=str, + bbox=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + id=int) + elif data_mode == 'bottomup': + expected_keys = dict( + img_id=int, + img_path=str, + bbox=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + invalid_segs=list, + id=list) + else: + raise ValueError(f'Invalid data_mode {data_mode}') + + for key, type_ in expected_keys.items(): + self.assertIn(key, data_info) + self.assertIsInstance(data_info[key], type_, key) + + def check_metainfo_keys(self, metainfo: dict): + expected_keys = dict( + dataset_name=str, + num_keypoints=int, + keypoint_id2name=dict, + keypoint_name2id=dict, + upper_body_ids=list, + lower_body_ids=list, + flip_indices=list, + flip_pairs=list, + keypoint_colors=np.ndarray, + num_skeleton_links=int, + skeleton_links=list, + skeleton_link_colors=np.ndarray, + dataset_keypoint_weights=np.ndarray) + + for key, type_ in expected_keys.items(): + self.assertIn(key, metainfo) + self.assertIsInstance(metainfo[key], type_, key) + + def test_metainfo(self): + dataset = self.build_coco_wholebody_dataset() + self.check_metainfo_keys(dataset.metainfo) + # test dataset_name + self.assertEqual(dataset.metainfo['dataset_name'], 'coco_wholebody') + + # test number of keypoints + num_keypoints = 133 + self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) + self.assertEqual( + len(dataset.metainfo['keypoint_colors']), num_keypoints) + self.assertEqual( + len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) + # note that len(sigmas) may be zero if dataset.metainfo['sigmas'] = [] + self.assertEqual(len(dataset.metainfo['sigmas']), num_keypoints) + + # test some extra metainfo + self.assertEqual( + len(dataset.metainfo['skeleton_links']), + len(dataset.metainfo['skeleton_link_colors'])) + + def test_topdown(self): + # test topdown training + dataset = self.build_coco_wholebody_dataset(data_mode='topdown') + # filter two invalid instances due to num_keypoints = 0 + self.assertEqual(len(dataset), 12) + self.check_data_info_keys(dataset[0], data_mode='topdown') + + # test topdown testing + dataset = self.build_coco_wholebody_dataset( + data_mode='topdown', test_mode=True) + self.assertEqual(len(dataset), 12) + self.check_data_info_keys(dataset[0], data_mode='topdown') + + # test topdown testing with bbox file + dataset = self.build_coco_wholebody_dataset( + data_mode='topdown', + test_mode=True, + bbox_file='tests/data/coco/test_coco_det_AP_H_56.json') + self.assertEqual(len(dataset), 118) + self.check_data_info_keys(dataset[0], data_mode='topdown') + + # test topdown testing with filter config + dataset = self.build_coco_wholebody_dataset( + data_mode='topdown', + test_mode=True, + bbox_file='tests/data/coco/test_coco_det_AP_H_56.json', + filter_cfg=dict(bbox_score_thr=0.3)) + self.assertEqual(len(dataset), 33) + + def test_bottomup(self): + # test bottomup training + dataset = self.build_coco_wholebody_dataset(data_mode='bottomup') + self.assertEqual(len(dataset), 4) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + # test bottomup testing + dataset = self.build_coco_wholebody_dataset( + data_mode='bottomup', test_mode=True) + self.assertEqual(len(dataset), 4) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + def test_exceptions_and_warnings(self): + + with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): + _ = self.build_coco_wholebody_dataset(data_mode='invalid') + + with self.assertRaisesRegex( + ValueError, + '"bbox_file" is only supported when `test_mode==True`'): + _ = self.build_coco_wholebody_dataset( + data_mode='topdown', + test_mode=False, + bbox_file='tests/data/coco/test_coco_det_AP_H_56.json') + + with self.assertRaisesRegex( + ValueError, '"bbox_file" is only supported in topdown mode'): + _ = self.build_coco_wholebody_dataset( + data_mode='bottomup', + test_mode=True, + bbox_file='tests/data/coco/test_coco_det_AP_H_56.json') + + with self.assertRaisesRegex( + ValueError, + '"bbox_score_thr" is only supported in topdown mode'): + _ = self.build_coco_wholebody_dataset( + data_mode='bottomup', + test_mode=True, + filter_cfg=dict(bbox_score_thr=0.3)) diff --git a/tests/test_datasets/test_datasets/test_wholebody_datasets/test_halpe_dataset.py b/tests/test_datasets/test_datasets/test_wholebody_datasets/test_halpe_dataset.py index 9ea79fc2cc..e4d85148c1 100644 --- a/tests/test_datasets/test_datasets/test_wholebody_datasets/test_halpe_dataset.py +++ b/tests/test_datasets/test_datasets/test_wholebody_datasets/test_halpe_dataset.py @@ -1,162 +1,162 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import numpy as np - -from mmpose.datasets.datasets.wholebody import HalpeDataset - - -class TestHalpeDataset(TestCase): - - def build_halpe_dataset(self, **kwargs): - cfg = dict( - ann_file='test_halpe.json', - bbox_file=None, - data_mode='topdown', - data_root='tests/data/halpe', - pipeline=[], - test_mode=False) - - cfg.update(kwargs) - return HalpeDataset(**cfg) - - def check_data_info_keys(self, - data_info: dict, - data_mode: str = 'topdown'): - if data_mode == 'topdown': - expected_keys = dict( - img_id=int, - img_path=str, - bbox=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - id=int) - elif data_mode == 'bottomup': - expected_keys = dict( - img_id=int, - img_path=str, - bbox=np.ndarray, - bbox_score=np.ndarray, - keypoints=np.ndarray, - keypoints_visible=np.ndarray, - invalid_segs=list, - id=list) - else: - raise ValueError(f'Invalid data_mode {data_mode}') - - for key, type_ in expected_keys.items(): - self.assertIn(key, data_info) - self.assertIsInstance(data_info[key], type_, key) - - def check_metainfo_keys(self, metainfo: dict): - expected_keys = dict( - dataset_name=str, - num_keypoints=int, - keypoint_id2name=dict, - keypoint_name2id=dict, - upper_body_ids=list, - lower_body_ids=list, - flip_indices=list, - flip_pairs=list, - keypoint_colors=np.ndarray, - num_skeleton_links=int, - skeleton_links=list, - skeleton_link_colors=np.ndarray, - dataset_keypoint_weights=np.ndarray) - - for key, type_ in expected_keys.items(): - self.assertIn(key, metainfo) - self.assertIsInstance(metainfo[key], type_, key) - - def test_metainfo(self): - dataset = self.build_halpe_dataset() - self.check_metainfo_keys(dataset.metainfo) - # test dataset_name - self.assertEqual(dataset.metainfo['dataset_name'], 'halpe') - - # test number of keypoints - num_keypoints = 136 - self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) - self.assertEqual( - len(dataset.metainfo['keypoint_colors']), num_keypoints) - self.assertEqual( - len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) - # note that len(sigmas) may be zero if dataset.metainfo['sigmas'] = [] - self.assertEqual(len(dataset.metainfo['sigmas']), num_keypoints) - - # test some extra metainfo - self.assertEqual( - len(dataset.metainfo['skeleton_links']), - len(dataset.metainfo['skeleton_link_colors'])) - - def test_topdown(self): - # test topdown training - dataset = self.build_halpe_dataset(data_mode='topdown') - self.assertEqual(dataset.data_mode, 'topdown') - # filter two invalid instance due to num_keypoints = 0 - self.assertEqual(len(dataset), 12) - self.check_data_info_keys(dataset[0], data_mode='topdown') - - # test topdown testing - dataset = self.build_halpe_dataset(data_mode='topdown', test_mode=True) - self.assertEqual(dataset.data_mode, 'topdown') - # filter two invalid instance due to num_keypoints = 0 - self.assertEqual(len(dataset), 12) - self.check_data_info_keys(dataset[0], data_mode='topdown') - - # test topdown testing with bbox file - dataset = self.build_halpe_dataset( - test_mode=True, - bbox_file='tests/data/coco/test_coco_det_AP_H_56.json') - self.assertEqual(dataset.data_mode, 'topdown') - self.assertEqual(len(dataset), 118) - self.check_data_info_keys(dataset[0]) - - # test topdown testing with filter config - dataset = self.build_halpe_dataset( - test_mode=True, - bbox_file='tests/data/coco/test_coco_det_AP_H_56.json', - filter_cfg=dict(bbox_score_thr=0.3)) - self.assertEqual(dataset.data_mode, 'topdown') - self.assertEqual(len(dataset), 33) - - def test_bottomup(self): - # test bottomup training - dataset = self.build_halpe_dataset(data_mode='bottomup') - self.assertEqual(len(dataset), 4) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - # test bottomup testing - dataset = self.build_halpe_dataset( - data_mode='bottomup', test_mode=True) - self.assertEqual(len(dataset), 4) - self.check_data_info_keys(dataset[0], data_mode='bottomup') - - def test_exceptions_and_warnings(self): - - with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): - _ = self.build_halpe_dataset(data_mode='invalid') - - with self.assertRaisesRegex( - ValueError, - '"bbox_file" is only supported when `test_mode==True`'): - _ = self.build_halpe_dataset( - data_mode='topdown', - test_mode=False, - bbox_file='tests/data/coco/test_coco_det_AP_H_56.json') - - with self.assertRaisesRegex( - ValueError, '"bbox_file" is only supported in topdown mode'): - _ = self.build_halpe_dataset( - data_mode='bottomup', - test_mode=True, - bbox_file='tests/data/coco/test_coco_det_AP_H_56.json') - - with self.assertRaisesRegex( - ValueError, - '"bbox_score_thr" is only supported in topdown mode'): - _ = self.build_halpe_dataset( - data_mode='bottomup', - test_mode=True, - filter_cfg=dict(bbox_score_thr=0.3)) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np + +from mmpose.datasets.datasets.wholebody import HalpeDataset + + +class TestHalpeDataset(TestCase): + + def build_halpe_dataset(self, **kwargs): + cfg = dict( + ann_file='test_halpe.json', + bbox_file=None, + data_mode='topdown', + data_root='tests/data/halpe', + pipeline=[], + test_mode=False) + + cfg.update(kwargs) + return HalpeDataset(**cfg) + + def check_data_info_keys(self, + data_info: dict, + data_mode: str = 'topdown'): + if data_mode == 'topdown': + expected_keys = dict( + img_id=int, + img_path=str, + bbox=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + id=int) + elif data_mode == 'bottomup': + expected_keys = dict( + img_id=int, + img_path=str, + bbox=np.ndarray, + bbox_score=np.ndarray, + keypoints=np.ndarray, + keypoints_visible=np.ndarray, + invalid_segs=list, + id=list) + else: + raise ValueError(f'Invalid data_mode {data_mode}') + + for key, type_ in expected_keys.items(): + self.assertIn(key, data_info) + self.assertIsInstance(data_info[key], type_, key) + + def check_metainfo_keys(self, metainfo: dict): + expected_keys = dict( + dataset_name=str, + num_keypoints=int, + keypoint_id2name=dict, + keypoint_name2id=dict, + upper_body_ids=list, + lower_body_ids=list, + flip_indices=list, + flip_pairs=list, + keypoint_colors=np.ndarray, + num_skeleton_links=int, + skeleton_links=list, + skeleton_link_colors=np.ndarray, + dataset_keypoint_weights=np.ndarray) + + for key, type_ in expected_keys.items(): + self.assertIn(key, metainfo) + self.assertIsInstance(metainfo[key], type_, key) + + def test_metainfo(self): + dataset = self.build_halpe_dataset() + self.check_metainfo_keys(dataset.metainfo) + # test dataset_name + self.assertEqual(dataset.metainfo['dataset_name'], 'halpe') + + # test number of keypoints + num_keypoints = 136 + self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints) + self.assertEqual( + len(dataset.metainfo['keypoint_colors']), num_keypoints) + self.assertEqual( + len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints) + # note that len(sigmas) may be zero if dataset.metainfo['sigmas'] = [] + self.assertEqual(len(dataset.metainfo['sigmas']), num_keypoints) + + # test some extra metainfo + self.assertEqual( + len(dataset.metainfo['skeleton_links']), + len(dataset.metainfo['skeleton_link_colors'])) + + def test_topdown(self): + # test topdown training + dataset = self.build_halpe_dataset(data_mode='topdown') + self.assertEqual(dataset.data_mode, 'topdown') + # filter two invalid instance due to num_keypoints = 0 + self.assertEqual(len(dataset), 12) + self.check_data_info_keys(dataset[0], data_mode='topdown') + + # test topdown testing + dataset = self.build_halpe_dataset(data_mode='topdown', test_mode=True) + self.assertEqual(dataset.data_mode, 'topdown') + # filter two invalid instance due to num_keypoints = 0 + self.assertEqual(len(dataset), 12) + self.check_data_info_keys(dataset[0], data_mode='topdown') + + # test topdown testing with bbox file + dataset = self.build_halpe_dataset( + test_mode=True, + bbox_file='tests/data/coco/test_coco_det_AP_H_56.json') + self.assertEqual(dataset.data_mode, 'topdown') + self.assertEqual(len(dataset), 118) + self.check_data_info_keys(dataset[0]) + + # test topdown testing with filter config + dataset = self.build_halpe_dataset( + test_mode=True, + bbox_file='tests/data/coco/test_coco_det_AP_H_56.json', + filter_cfg=dict(bbox_score_thr=0.3)) + self.assertEqual(dataset.data_mode, 'topdown') + self.assertEqual(len(dataset), 33) + + def test_bottomup(self): + # test bottomup training + dataset = self.build_halpe_dataset(data_mode='bottomup') + self.assertEqual(len(dataset), 4) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + # test bottomup testing + dataset = self.build_halpe_dataset( + data_mode='bottomup', test_mode=True) + self.assertEqual(len(dataset), 4) + self.check_data_info_keys(dataset[0], data_mode='bottomup') + + def test_exceptions_and_warnings(self): + + with self.assertRaisesRegex(ValueError, 'got invalid data_mode'): + _ = self.build_halpe_dataset(data_mode='invalid') + + with self.assertRaisesRegex( + ValueError, + '"bbox_file" is only supported when `test_mode==True`'): + _ = self.build_halpe_dataset( + data_mode='topdown', + test_mode=False, + bbox_file='tests/data/coco/test_coco_det_AP_H_56.json') + + with self.assertRaisesRegex( + ValueError, '"bbox_file" is only supported in topdown mode'): + _ = self.build_halpe_dataset( + data_mode='bottomup', + test_mode=True, + bbox_file='tests/data/coco/test_coco_det_AP_H_56.json') + + with self.assertRaisesRegex( + ValueError, + '"bbox_score_thr" is only supported in topdown mode'): + _ = self.build_halpe_dataset( + data_mode='bottomup', + test_mode=True, + filter_cfg=dict(bbox_score_thr=0.3)) diff --git a/tests/test_datasets/test_transforms/test_bottomup_transforms.py b/tests/test_datasets/test_transforms/test_bottomup_transforms.py index cded7a6efb..1a58e91a2c 100644 --- a/tests/test_datasets/test_transforms/test_bottomup_transforms.py +++ b/tests/test_datasets/test_transforms/test_bottomup_transforms.py @@ -1,147 +1,147 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from copy import deepcopy -from unittest import TestCase - -import numpy as np -from mmcv.transforms import Compose - -from mmpose.datasets.transforms import (BottomupGetHeatmapMask, - BottomupRandomAffine, BottomupResize, - RandomFlip) -from mmpose.testing import get_coco_sample - - -class TestBottomupRandomAffine(TestCase): - - def setUp(self): - # prepare dummy bottom-up data sample with COCO metainfo - self.data_info = get_coco_sample( - img_shape=(240, 320), num_instances=4, with_bbox_cs=True) - - def test_transform(self): - - # without UDP - transform = BottomupRandomAffine(input_size=(512, 512), use_udp=False) - results = transform(deepcopy(self.data_info)) - - self.assertEqual(results['img'].shape, (512, 512, 3)) - self.assertEqual(results['input_size'], (512, 512)) - self.assertIn('warp_mat', results) - - # with UDP - transform = BottomupRandomAffine(input_size=(512, 512), use_udp=True) - results = transform(deepcopy(self.data_info)) - - self.assertEqual(results['img'].shape, (512, 512, 3)) - self.assertEqual(results['input_size'], (512, 512)) - self.assertIn('warp_mat', results) - - -class TestBottomupGetHeatmapMask(TestCase): - - def setUp(self): - # prepare dummy bottom-up data sample with COCO metainfo - self.data_info = get_coco_sample( - img_shape=(240, 320), num_instances=4, with_bbox_cs=True) - - def test_transform(self): - - # single-scale heatmap mask - pipeline = Compose([ - BottomupRandomAffine(input_size=(512, 512)), - RandomFlip(prob=1.0, direction='horizontal'), - BottomupGetHeatmapMask() - ]) - - results = deepcopy(self.data_info) - results['heatmaps'] = np.random.rand(17, 64, 64).astype(np.float32) - results = pipeline(results) - - self.assertEqual(results['heatmap_mask'].shape, (64, 64)) - self.assertTrue(results['heatmap_mask'].dtype, np.uint8) - - # multi-scale heatmap mask - pipeline = Compose([ - BottomupRandomAffine(input_size=(512, 512)), - RandomFlip(prob=1.0, direction='horizontal'), - BottomupGetHeatmapMask() - ]) - - results = deepcopy(self.data_info) - heatmap_sizes = [(64, 64), (32, 32)] - results['heatmaps'] = [ - np.random.rand(17, h, w).astype(np.float32) - for w, h in heatmap_sizes - ] - results = pipeline(results) - - self.assertIsInstance(results['heatmap_mask'], list) - for i, sizes in enumerate(heatmap_sizes): - mask = results['heatmap_mask'][i] - self.assertEqual(mask.shape, sizes) - self.assertTrue(mask.dtype, np.uint8) - - # no heatmap - pipeline = Compose([ - BottomupRandomAffine(input_size=(512, 512)), - RandomFlip(prob=1.0, direction='horizontal'), - BottomupGetHeatmapMask() - ]) - - results = deepcopy(self.data_info) - results = pipeline(results) - - self.assertEqual(results['heatmap_mask'].shape, (512, 512)) - self.assertTrue(results['heatmap_mask'].dtype, np.uint8) - - -class TestBottomupResize(TestCase): - - def setUp(self): - # prepare dummy bottom-up data sample with COCO metainfo - self.data_info = get_coco_sample( - img_shape=(240, 480), - img_fill=255, - num_instances=4, - with_bbox_cs=True) - - def test_transform(self): - - # single-scale, fit - transform = BottomupResize(input_size=(256, 256), resize_mode='fit') - results = transform(deepcopy(self.data_info)) - # the middle section of the image is the resized content, while the - # top and bottom are padded with zeros - self.assertEqual(results['img'].shape, (256, 256, 3)) - self.assertTrue( - np.allclose(results['input_scale'], np.array([480., 480.]))) - self.assertTrue( - np.allclose(results['input_center'], np.array([240., 120.]))) - self.assertTrue(np.all(results['img'][64:192] > 0)) - self.assertTrue(np.all(results['img'][:64] == 0)) - self.assertTrue(np.all(results['img'][192:] == 0)) - - # single-scale, expand - transform = BottomupResize(input_size=(256, 256), resize_mode='expand') - results = transform(deepcopy(self.data_info)) - # the actual input size is expanded to (512, 256) according to the - # original image shape - self.assertEqual(results['img'].shape, (256, 512, 3)) - self.assertTrue(np.all(results['img'] > 0)) - - # single-scale, expand, size_factor=100 - transform = BottomupResize( - input_size=(256, 256), resize_mode='expand', size_factor=100) - results = transform(deepcopy(self.data_info)) - # input size is ceiled from (512, 256) to (600, 300) - self.assertEqual(results['img'].shape, (300, 600, 3)) - - # multi-scale - transform = BottomupResize( - input_size=(256, 256), aug_scales=[1.5], resize_mode='fit') - results = transform(deepcopy(self.data_info)) - self.assertIsInstance(results['img'], list) - self.assertIsInstance(results['input_center'], np.ndarray) - self.assertIsInstance(results['input_scale'], np.ndarray) - self.assertEqual(results['img'][0].shape, (256, 256, 3)) - self.assertEqual(results['img'][1].shape, (384, 384, 3)) +# Copyright (c) OpenMMLab. All rights reserved. +from copy import deepcopy +from unittest import TestCase + +import numpy as np +from mmcv.transforms import Compose + +from mmpose.datasets.transforms import (BottomupGetHeatmapMask, + BottomupRandomAffine, BottomupResize, + RandomFlip) +from mmpose.testing import get_coco_sample + + +class TestBottomupRandomAffine(TestCase): + + def setUp(self): + # prepare dummy bottom-up data sample with COCO metainfo + self.data_info = get_coco_sample( + img_shape=(240, 320), num_instances=4, with_bbox_cs=True) + + def test_transform(self): + + # without UDP + transform = BottomupRandomAffine(input_size=(512, 512), use_udp=False) + results = transform(deepcopy(self.data_info)) + + self.assertEqual(results['img'].shape, (512, 512, 3)) + self.assertEqual(results['input_size'], (512, 512)) + self.assertIn('warp_mat', results) + + # with UDP + transform = BottomupRandomAffine(input_size=(512, 512), use_udp=True) + results = transform(deepcopy(self.data_info)) + + self.assertEqual(results['img'].shape, (512, 512, 3)) + self.assertEqual(results['input_size'], (512, 512)) + self.assertIn('warp_mat', results) + + +class TestBottomupGetHeatmapMask(TestCase): + + def setUp(self): + # prepare dummy bottom-up data sample with COCO metainfo + self.data_info = get_coco_sample( + img_shape=(240, 320), num_instances=4, with_bbox_cs=True) + + def test_transform(self): + + # single-scale heatmap mask + pipeline = Compose([ + BottomupRandomAffine(input_size=(512, 512)), + RandomFlip(prob=1.0, direction='horizontal'), + BottomupGetHeatmapMask() + ]) + + results = deepcopy(self.data_info) + results['heatmaps'] = np.random.rand(17, 64, 64).astype(np.float32) + results = pipeline(results) + + self.assertEqual(results['heatmap_mask'].shape, (64, 64)) + self.assertTrue(results['heatmap_mask'].dtype, np.uint8) + + # multi-scale heatmap mask + pipeline = Compose([ + BottomupRandomAffine(input_size=(512, 512)), + RandomFlip(prob=1.0, direction='horizontal'), + BottomupGetHeatmapMask() + ]) + + results = deepcopy(self.data_info) + heatmap_sizes = [(64, 64), (32, 32)] + results['heatmaps'] = [ + np.random.rand(17, h, w).astype(np.float32) + for w, h in heatmap_sizes + ] + results = pipeline(results) + + self.assertIsInstance(results['heatmap_mask'], list) + for i, sizes in enumerate(heatmap_sizes): + mask = results['heatmap_mask'][i] + self.assertEqual(mask.shape, sizes) + self.assertTrue(mask.dtype, np.uint8) + + # no heatmap + pipeline = Compose([ + BottomupRandomAffine(input_size=(512, 512)), + RandomFlip(prob=1.0, direction='horizontal'), + BottomupGetHeatmapMask() + ]) + + results = deepcopy(self.data_info) + results = pipeline(results) + + self.assertEqual(results['heatmap_mask'].shape, (512, 512)) + self.assertTrue(results['heatmap_mask'].dtype, np.uint8) + + +class TestBottomupResize(TestCase): + + def setUp(self): + # prepare dummy bottom-up data sample with COCO metainfo + self.data_info = get_coco_sample( + img_shape=(240, 480), + img_fill=255, + num_instances=4, + with_bbox_cs=True) + + def test_transform(self): + + # single-scale, fit + transform = BottomupResize(input_size=(256, 256), resize_mode='fit') + results = transform(deepcopy(self.data_info)) + # the middle section of the image is the resized content, while the + # top and bottom are padded with zeros + self.assertEqual(results['img'].shape, (256, 256, 3)) + self.assertTrue( + np.allclose(results['input_scale'], np.array([480., 480.]))) + self.assertTrue( + np.allclose(results['input_center'], np.array([240., 120.]))) + self.assertTrue(np.all(results['img'][64:192] > 0)) + self.assertTrue(np.all(results['img'][:64] == 0)) + self.assertTrue(np.all(results['img'][192:] == 0)) + + # single-scale, expand + transform = BottomupResize(input_size=(256, 256), resize_mode='expand') + results = transform(deepcopy(self.data_info)) + # the actual input size is expanded to (512, 256) according to the + # original image shape + self.assertEqual(results['img'].shape, (256, 512, 3)) + self.assertTrue(np.all(results['img'] > 0)) + + # single-scale, expand, size_factor=100 + transform = BottomupResize( + input_size=(256, 256), resize_mode='expand', size_factor=100) + results = transform(deepcopy(self.data_info)) + # input size is ceiled from (512, 256) to (600, 300) + self.assertEqual(results['img'].shape, (300, 600, 3)) + + # multi-scale + transform = BottomupResize( + input_size=(256, 256), aug_scales=[1.5], resize_mode='fit') + results = transform(deepcopy(self.data_info)) + self.assertIsInstance(results['img'], list) + self.assertIsInstance(results['input_center'], np.ndarray) + self.assertIsInstance(results['input_scale'], np.ndarray) + self.assertEqual(results['img'][0].shape, (256, 256, 3)) + self.assertEqual(results['img'][1].shape, (384, 384, 3)) diff --git a/tests/test_datasets/test_transforms/test_common_transforms.py b/tests/test_datasets/test_transforms/test_common_transforms.py index 2818081dca..1d63943bc7 100644 --- a/tests/test_datasets/test_transforms/test_common_transforms.py +++ b/tests/test_datasets/test_transforms/test_common_transforms.py @@ -1,602 +1,602 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy -import os.path as osp -from copy import deepcopy -from unittest import TestCase - -import numpy as np -from mmcv.transforms import Compose, LoadImageFromFile -from mmengine.utils import is_list_of - -from mmpose.datasets.transforms import (Albumentation, GenerateTarget, - GetBBoxCenterScale, - PhotometricDistortion, - RandomBBoxTransform, RandomFlip, - RandomHalfBody, TopdownAffine) -from mmpose.testing import get_coco_sample - - -class TestGetBBoxCenterScale(TestCase): - - def setUp(self): - - # prepare dummy top-down data sample with COCO metainfo - self.data_info = get_coco_sample( - img_shape=(480, 640), - num_instances=1, - with_bbox_cs=True, - with_img_mask=True, - random_keypoints_visible=True) - - def test_transform(self): - # test converting bbox to center and scale - padding = 1.25 - - transform = GetBBoxCenterScale(padding=padding) - results = deepcopy(self.data_info) - results = transform(results) - - center = (results['bbox'][:, :2] + results['bbox'][:, 2:4]) * 0.5 - scale = (results['bbox'][:, 2:4] - results['bbox'][:, :2]) * padding - - self.assertTrue(np.allclose(results['bbox_center'], center)) - self.assertTrue(np.allclose(results['bbox_scale'], scale)) - - # test using existing bbox center and scale - results = deepcopy(self.data_info) - center = np.random.rand(1, 2).astype(np.float32) - scale = np.random.rand(1, 2).astype(np.float32) - results.update(bbox_center=center, bbox_scale=scale) - results = transform(results) - self.assertTrue(np.allclose(results['bbox_center'], center)) - self.assertTrue(np.allclose(results['bbox_scale'], scale)) - - def test_repr(self): - transform = GetBBoxCenterScale(padding=1.25) - self.assertEqual(repr(transform), 'GetBBoxCenterScale(padding=1.25)') - - -class TestRandomFlip(TestCase): - - def setUp(self): - # prepare dummy top-down data sample with COCO metainfo - self.data_info = get_coco_sample( - img_shape=(480, 640), - num_instances=1, - with_bbox_cs=True, - with_img_mask=True, - random_keypoints_visible=True) - - def test_init(self): - # prob: float, direction: str - _ = RandomFlip(prob=0.5, direction='horizontal') - - # prob: float, direction: list - _ = RandomFlip(prob=0.5, direction=['horizontal', 'vertical']) - - # prob: list, direction: list - _ = RandomFlip(prob=[0.3, 0.3], direction=['horizontal', 'vertical']) - - def test_transform(self): - # test horizontal flip - transform = RandomFlip(prob=1., direction='horizontal') - results = deepcopy(self.data_info) - results = transform(results) - - ids1, ids2 = zip(*(self.data_info['flip_pairs'])) - kpts1 = self.data_info['keypoints'][:, ids1] - kpts1_vis = self.data_info['keypoints_visible'][:, ids1] - kpts2 = results['keypoints'][:, ids2] - kpts2_vis = results['keypoints_visible'][:, ids2] - bbox_center_flipped = self.data_info['bbox_center'].copy() - bbox_center_flipped[:, 0] = 640 - 1 - bbox_center_flipped[:, 0] - - self.assertTrue( - np.allclose(results['img'], self.data_info['img'][:, ::-1])) - self.assertTrue( - np.allclose(results['img_mask'], - self.data_info['img_mask'][:, ::-1])) - self.assertTrue( - np.allclose(results['bbox_center'], bbox_center_flipped)) - self.assertTrue(np.allclose(kpts1[..., 0], 640 - kpts2[..., 0] - 1)) - self.assertTrue(np.allclose(kpts1[..., 1], kpts2[..., 1])) - self.assertTrue(np.allclose(kpts1_vis, kpts2_vis)) - - # test vertical flip - transform = RandomFlip(prob=1., direction='vertical') - results = deepcopy(self.data_info) - results = transform(results) - - ids1, ids2 = zip(*(self.data_info['flip_pairs'])) - kpts1 = self.data_info['keypoints'][:, ids1] - kpts1_vis = self.data_info['keypoints_visible'][:, ids1] - kpts2 = results['keypoints'][:, ids2] - kpts2_vis = results['keypoints_visible'][:, ids2] - bbox_center_flipped = self.data_info['bbox_center'].copy() - bbox_center_flipped[:, 1] = 480 - 1 - bbox_center_flipped[:, 1] - - self.assertTrue( - np.allclose(results['img'], self.data_info['img'][::-1])) - self.assertTrue( - np.allclose(results['img_mask'], self.data_info['img_mask'][::-1])) - self.assertTrue( - np.allclose(results['bbox_center'], bbox_center_flipped)) - self.assertTrue(np.allclose(kpts1[..., 0], kpts2[..., 0])) - self.assertTrue(np.allclose(kpts1[..., 1], 480 - kpts2[..., 1] - 1)) - self.assertTrue(np.allclose(kpts1_vis, kpts2_vis)) - - # test diagonal flip - transform = RandomFlip(prob=1., direction='diagonal') - results = deepcopy(self.data_info) - results = transform(results) - - kpts1 = self.data_info['keypoints'] - kpts1_vis = self.data_info['keypoints_visible'] - kpts2 = results['keypoints'] - kpts2_vis = results['keypoints_visible'] - bbox_center_flipped = self.data_info['bbox_center'].copy() - bbox_center_flipped[:, 0] = 640 - 1 - bbox_center_flipped[:, 0] - bbox_center_flipped[:, 1] = 480 - 1 - bbox_center_flipped[:, 1] - - self.assertTrue( - np.allclose(results['img'], self.data_info['img'][::-1, ::-1])) - self.assertTrue( - np.allclose(results['img_mask'], - self.data_info['img_mask'][::-1, ::-1])) - self.assertTrue( - np.allclose(results['bbox_center'], bbox_center_flipped)) - self.assertTrue(np.allclose(kpts1[..., 0], 640 - kpts2[..., 0] - 1)) - self.assertTrue(np.allclose(kpts1[..., 1], 480 - kpts2[..., 1] - 1)) - self.assertTrue(np.allclose(kpts1_vis, kpts2_vis)) - - def test_errors(self): - # invalid arguments - with self.assertRaisesRegex(ValueError, - 'probs must be float or list of float'): - _ = RandomFlip(prob=None) - - with self.assertRaisesRegex( - ValueError, 'direction must be either str or list of str'): - _ = RandomFlip(direction=None) - - with self.assertRaises(AssertionError): - _ = RandomFlip(prob=2.0) - - with self.assertRaises(AssertionError): - _ = RandomFlip(direction='invalid_direction') - - def test_repr(self): - transform = RandomFlip(prob=0.5, direction='horizontal') - self.assertEqual( - repr(transform), 'RandomFlip(prob=0.5, direction=horizontal)') - - -class TestRandomHalfBody(TestCase): - - def setUp(self): - # prepare dummy top-down data sample with COCO metainfo - self.data_info = get_coco_sample( - img_shape=(480, 640), - num_instances=1, - with_bbox_cs=True, - with_img_mask=True) - - def test_transform(self): - padding = 1.5 - - # keep upper body - transform = RandomHalfBody( - prob=1., - min_total_keypoints=8, - min_upper_keypoints=2, - min_lower_keypoints=2) - results = deepcopy(self.data_info) - results['keypoints_visible'][:, results['lower_body_ids']] = 0 - results = transform(results) - - kpts = self.data_info['keypoints'][:, self.data_info['upper_body_ids']] - self.assertTrue(np.allclose(results['bbox_center'], kpts.mean(axis=1))) - self.assertTrue( - np.allclose(results['bbox_scale'], - (kpts.max(axis=1) - kpts.min(axis=1)) * padding)) - - # keep lower body - transform = RandomHalfBody( - prob=1., - min_total_keypoints=6, - min_upper_keypoints=4, - min_lower_keypoints=4) - results = deepcopy(self.data_info) - results['keypoints_visible'][:, results['upper_body_ids']] = 0 - results = transform(results) - - kpts = self.data_info['keypoints'][:, self.data_info['lower_body_ids']] - self.assertTrue(np.allclose(results['bbox_center'], kpts.mean(axis=1))) - self.assertTrue( - np.allclose(results['bbox_scale'], - (kpts.max(axis=1) - kpts.min(axis=1)) * padding)) - - # no transform due to prob - transform = RandomHalfBody(prob=0.) - results = transform(deepcopy(self.data_info)) - - self.assertTrue( - np.allclose(results['bbox_center'], self.data_info['bbox_center'])) - self.assertTrue( - np.allclose(results['bbox_scale'], self.data_info['bbox_scale'])) - - # no transform due to insufficient valid total keypoints - transform = RandomHalfBody( - prob=1., - min_total_keypoints=8, - min_upper_keypoints=2, - min_lower_keypoints=2) - results = deepcopy(self.data_info) - results['keypoints_visible'].fill(0) - results = transform(results) - - self.assertTrue( - np.allclose(results['bbox_center'], self.data_info['bbox_center'])) - self.assertTrue( - np.allclose(results['bbox_scale'], self.data_info['bbox_scale'])) - - # no transform due to insufficient valid half-body keypoints - transform = RandomHalfBody( - prob=1., - min_total_keypoints=4, - min_upper_keypoints=3, - min_lower_keypoints=3) - results = deepcopy(self.data_info) - results['keypoints_visible'][:, results['upper_body_ids'][2:]] = 0 - results['keypoints_visible'][:, results['lower_body_ids'][2:]] = 0 - results = transform(results) - - self.assertTrue( - np.allclose(results['bbox_center'], self.data_info['bbox_center'])) - self.assertTrue( - np.allclose(results['bbox_scale'], self.data_info['bbox_scale'])) - - def test_repr(self): - transform = RandomHalfBody( - min_total_keypoints=8, - min_upper_keypoints=2, - min_lower_keypoints=2, - padding=1.5, - prob=0.3, - upper_prioritized_prob=0.7) - self.assertEqual( - repr(transform), - 'RandomHalfBody(min_total_keypoints=8, min_upper_keypoints=2, ' - 'min_lower_keypoints=2, padding=1.5, prob=0.3, ' - 'upper_prioritized_prob=0.7)') - - -class TestRandomBBoxTransform(TestCase): - - def setUp(self): - # prepare dummy top-down data sample with COCO metainfo - self.data_info = get_coco_sample( - img_shape=(480, 640), - num_instances=1, - with_bbox_cs=True, - with_img_mask=True) - - def test_transform(self): - shfit_factor = 0.16 - scale_factor = (0.5, 1.5) - rotate_factor = 90. - - # test random shift - transform = RandomBBoxTransform( - shift_factor=shfit_factor, - shift_prob=1.0, - scale_prob=0.0, - rotate_prob=0.0) - results = transform(deepcopy(self.data_info)) - - center = self.data_info['bbox_center'] - scale = self.data_info['bbox_scale'] - center_range = [ - center - scale * shfit_factor, - center + scale * shfit_factor, - ] - - self.assertFalse(np.allclose(results['bbox_center'], center)) - self.assertTrue(((results['bbox_center'] > center_range[0]) & - (results['bbox_center'] < center_range[1])).all()) - self.assertTrue(np.allclose(results['bbox_scale'], scale)) - self.assertTrue( - np.allclose(results['bbox_rotation'], np.zeros((1, 17)))) - - # test random resizing - transform = RandomBBoxTransform( - scale_factor=scale_factor, - shift_prob=0.0, - scale_prob=1.0, - rotate_prob=0.0) - - results = transform(deepcopy(self.data_info)) - center = self.data_info['bbox_center'] - scale = self.data_info['bbox_scale'] - scale_range = [scale * scale_factor[0], scale * scale_factor[1]] - - self.assertTrue(np.allclose(results['bbox_center'], center)) - self.assertFalse(np.allclose(results['bbox_scale'], scale)) - self.assertTrue(((results['bbox_scale'] > scale_range[0]) & - (results['bbox_scale'] < scale_range[1])).all()) - self.assertTrue( - np.allclose(results['bbox_rotation'], np.zeros((1, 17)))) - - # test random rotation - transform = RandomBBoxTransform( - rotate_factor=rotate_factor, - shift_prob=0.0, - scale_prob=0.0, - rotate_prob=1.0) - - results = transform(deepcopy(self.data_info)) - rotation_range = [-rotate_factor, rotate_factor] - bbox_rotation_min = np.full((1, 17), rotation_range[0]) - bbox_rotation_max = np.full((1, 17), rotation_range[1]) - - self.assertTrue( - np.allclose(results['bbox_center'], self.data_info['bbox_center'])) - self.assertTrue( - np.allclose(results['bbox_scale'], self.data_info['bbox_scale'])) - self.assertFalse(np.allclose(results['bbox_rotation'], 0)) - self.assertTrue(((results['bbox_rotation'] > bbox_rotation_min) & - (results['bbox_rotation'] < bbox_rotation_max)).all()) - - # test hybrid transform - transform = RandomBBoxTransform( - shift_factor=shfit_factor, - scale_factor=scale_factor, - rotate_factor=rotate_factor, - shift_prob=1.0, - scale_prob=1.0, - rotate_prob=1.0) - - results = transform(deepcopy(self.data_info)) - center = self.data_info['bbox_center'] - scale = self.data_info['bbox_scale'] - - center_range = [ - center - scale * shfit_factor, - center + scale * shfit_factor, - ] - scale_range = [scale * scale_factor[0], scale * scale_factor[1]] - rotation_range = [-rotate_factor, rotate_factor] - - self.assertFalse(np.allclose(results['bbox_center'], center)) - self.assertTrue(((results['bbox_center'] > center_range[0]) & - (results['bbox_center'] < center_range[1])).all()) - self.assertFalse(np.allclose(results['bbox_scale'], scale)) - self.assertTrue(((results['bbox_scale'] > scale_range[0]) & - (results['bbox_scale'] < scale_range[1])).all()) - self.assertFalse(np.allclose(results['bbox_rotation'], 0)) - self.assertTrue(((results['bbox_rotation'] > rotation_range[0]) & - (results['bbox_rotation'] < rotation_range[1])).all()) - - def test_repr(self): - transform = RandomBBoxTransform( - shift_factor=0.16, - shift_prob=0.3, - scale_factor=0.5, - scale_prob=1.0, - rotate_factor=40.0, - rotate_prob=0.6) - - self.assertEqual( - repr(transform), - 'RandomBBoxTransform(shift_prob=0.3, shift_factor=0.16, ' - 'scale_prob=1.0, scale_factor=0.5, rotate_prob=0.6, ' - 'rotate_factor=40.0)') - - -class TestAlbumentation(TestCase): - - def setUp(self): - """Setup the valiables which are used in each test method. - - TestCase calls functions in this order: setUp() -> testMethod() -> - tearDown() -> cleanUp() - """ - data_prefix = 'tests/data/coco' - results = dict(img_path=osp.join(data_prefix, '000000000785.jpg')) - load = LoadImageFromFile() - self.results = load(copy.deepcopy(results)) - - def test_transform(self): - # test when ``keymap`` is None - transform = Albumentation(transforms=[ - dict(type='RandomBrightnessContrast', p=0.2), - dict(type='ToFloat') - ]) - results_update = transform(copy.deepcopy(self.results)) - self.assertEqual(results_update['img'].dtype, np.float32) - - def test_repr(self): - # test when ``keymap`` is not None - transforms = [ - dict(type='RandomBrightnessContrast', p=0.2), - dict(type='ToFloat') - ] - transform = Albumentation( - transforms=transforms, keymap={'img': 'image'}) - self.assertEqual( - repr(transform), f'Albumentation(transforms={transforms})') - - -class TestPhotometricDistortion(TestCase): - - def setUp(self): - """Setup the valiables which are used in each test method. - - TestCase calls functions in this order: setUp() -> testMethod() -> - tearDown() -> cleanUp() - """ - data_prefix = 'tests/data/coco' - results = dict(img_path=osp.join(data_prefix, '000000000785.jpg')) - load = LoadImageFromFile() - self.results = load(copy.deepcopy(results)) - - def test_transform(self): - transform = PhotometricDistortion() - results_update = transform(copy.deepcopy(self.results)) - self.assertEqual(results_update['img'].dtype, np.uint8) - - def test_repr(self): - transform = PhotometricDistortion() - self.assertEqual( - repr(transform), ('PhotometricDistortion' - '(brightness_delta=32, ' - 'contrast_range=(0.5, 1.5), ' - 'saturation_range=(0.5, 1.5), ' - 'hue_delta=18)')) - - -class TestGenerateTarget(TestCase): - - def setUp(self): - # prepare dummy top-down data sample with COCO metainfo - self.data_info = get_coco_sample( - img_shape=(480, 640), - num_instances=1, - with_bbox_cs=True, - with_img_mask=True) - - def test_generate_single_target(self): - encoder = dict( - type='MSRAHeatmap', - input_size=(192, 256), - heatmap_size=(48, 64), - sigma=2.0) - - # generate heatmap - pipeline = Compose([ - TopdownAffine(input_size=(192, 256)), - GenerateTarget(encoder=encoder) - ]) - results = pipeline(deepcopy(self.data_info)) - - self.assertEqual(results['heatmaps'].shape, (17, 64, 48)) - self.assertTrue( - np.allclose(results['keypoint_weights'], np.ones((1, 17)))) - - # generate heatmap and use meta keypoint weights - pipeline = Compose([ - TopdownAffine(input_size=(192, 256)), - GenerateTarget( - encoder=encoder, - use_dataset_keypoint_weights=True, - ) - ]) - results = pipeline(deepcopy(self.data_info)) - - self.assertEqual(results['heatmaps'].shape, (17, 64, 48)) - self.assertEqual(results['keypoint_weights'].shape, (1, 17)) - self.assertTrue( - np.allclose(results['keypoint_weights'], - self.data_info['dataset_keypoint_weights'][None])) - - def test_generate_multilevel_target(self): - encoder_0 = dict( - type='MSRAHeatmap', - input_size=(192, 256), - heatmap_size=(48, 64), - sigma=2.0) - encoder_1 = dict(encoder_0, heatmap_size=(24, 32)) - - # generate multilevel heatmap - pipeline = Compose([ - TopdownAffine(input_size=(192, 256)), - GenerateTarget( - encoder=[encoder_0, encoder_1], - multilevel=True, - use_dataset_keypoint_weights=True) - ]) - results = pipeline(deepcopy(self.data_info)) - - self.assertTrue(is_list_of(results['heatmaps'], np.ndarray)) - self.assertTrue(is_list_of(results['keypoint_weights'], np.ndarray)) - self.assertEqual(results['heatmaps'][0].shape, (17, 64, 48)) - self.assertEqual(results['heatmaps'][1].shape, (17, 32, 24)) - self.assertEqual(results['keypoint_weights'][0].shape, (1, 17)) - - def test_generate_combined_target(self): - encoder_0 = dict( - type='MSRAHeatmap', - input_size=(192, 256), - heatmap_size=(48, 64), - sigma=2.0) - encoder_1 = dict(type='RegressionLabel', input_size=(192, 256)) - # generate multilevel heatmap - pipeline = Compose([ - TopdownAffine(input_size=(192, 256)), - GenerateTarget( - encoder=[encoder_0, encoder_1], - multilevel=False, - use_dataset_keypoint_weights=True) - ]) - - results = pipeline(deepcopy(self.data_info)) - - self.assertEqual(results['heatmaps'].shape, (17, 64, 48)) - self.assertEqual(results['keypoint_labels'].shape, (1, 17, 2)) - self.assertIsInstance(results['keypoint_weights'], list) - self.assertEqual(results['keypoint_weights'][0].shape, (1, 17)) - - def test_errors(self): - - # single encoder with `multilevel=True` - encoder = dict( - type='MSRAHeatmap', - input_size=(192, 256), - heatmap_size=(48, 64), - sigma=2.0) - - with self.assertRaisesRegex(AssertionError, - 'Need multiple encoder configs'): - _ = GenerateTarget(encoder=encoder, multilevel=True) - - # diverse keys in multilevel encoding - encoder_0 = dict( - type='MSRAHeatmap', - input_size=(192, 256), - heatmap_size=(48, 64), - sigma=2.0) - - encoder_1 = dict(type='RegressionLabel', input_size=(192, 256)) - pipeline = Compose([ - TopdownAffine(input_size=(192, 256)), - GenerateTarget(encoder=[encoder_0, encoder_1], multilevel=True) - ]) - - with self.assertRaisesRegex(ValueError, 'have the same keys'): - _ = pipeline(deepcopy(self.data_info)) - - # overlapping keys in combined encoding - encoder = dict( - type='MSRAHeatmap', - input_size=(192, 256), - heatmap_size=(48, 64), - sigma=2.0) - - pipeline = Compose([ - TopdownAffine(input_size=(192, 256)), - GenerateTarget(encoder=[encoder, encoder], multilevel=False) - ]) - - with self.assertRaisesRegex(ValueError, 'Overlapping item'): - _ = pipeline(deepcopy(self.data_info)) - - # deprecated argument `target_type` is given - encoder = dict( - type='MSRAHeatmap', - input_size=(192, 256), - heatmap_size=(48, 64), - sigma=2.0) - - with self.assertWarnsRegex(DeprecationWarning, - '`target_type` is deprecated'): - _ = GenerateTarget(encoder=encoder, target_type='heatmap') +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import os.path as osp +from copy import deepcopy +from unittest import TestCase + +import numpy as np +from mmcv.transforms import Compose, LoadImageFromFile +from mmengine.utils import is_list_of + +from mmpose.datasets.transforms import (Albumentation, GenerateTarget, + GetBBoxCenterScale, + PhotometricDistortion, + RandomBBoxTransform, RandomFlip, + RandomHalfBody, TopdownAffine) +from mmpose.testing import get_coco_sample + + +class TestGetBBoxCenterScale(TestCase): + + def setUp(self): + + # prepare dummy top-down data sample with COCO metainfo + self.data_info = get_coco_sample( + img_shape=(480, 640), + num_instances=1, + with_bbox_cs=True, + with_img_mask=True, + random_keypoints_visible=True) + + def test_transform(self): + # test converting bbox to center and scale + padding = 1.25 + + transform = GetBBoxCenterScale(padding=padding) + results = deepcopy(self.data_info) + results = transform(results) + + center = (results['bbox'][:, :2] + results['bbox'][:, 2:4]) * 0.5 + scale = (results['bbox'][:, 2:4] - results['bbox'][:, :2]) * padding + + self.assertTrue(np.allclose(results['bbox_center'], center)) + self.assertTrue(np.allclose(results['bbox_scale'], scale)) + + # test using existing bbox center and scale + results = deepcopy(self.data_info) + center = np.random.rand(1, 2).astype(np.float32) + scale = np.random.rand(1, 2).astype(np.float32) + results.update(bbox_center=center, bbox_scale=scale) + results = transform(results) + self.assertTrue(np.allclose(results['bbox_center'], center)) + self.assertTrue(np.allclose(results['bbox_scale'], scale)) + + def test_repr(self): + transform = GetBBoxCenterScale(padding=1.25) + self.assertEqual(repr(transform), 'GetBBoxCenterScale(padding=1.25)') + + +class TestRandomFlip(TestCase): + + def setUp(self): + # prepare dummy top-down data sample with COCO metainfo + self.data_info = get_coco_sample( + img_shape=(480, 640), + num_instances=1, + with_bbox_cs=True, + with_img_mask=True, + random_keypoints_visible=True) + + def test_init(self): + # prob: float, direction: str + _ = RandomFlip(prob=0.5, direction='horizontal') + + # prob: float, direction: list + _ = RandomFlip(prob=0.5, direction=['horizontal', 'vertical']) + + # prob: list, direction: list + _ = RandomFlip(prob=[0.3, 0.3], direction=['horizontal', 'vertical']) + + def test_transform(self): + # test horizontal flip + transform = RandomFlip(prob=1., direction='horizontal') + results = deepcopy(self.data_info) + results = transform(results) + + ids1, ids2 = zip(*(self.data_info['flip_pairs'])) + kpts1 = self.data_info['keypoints'][:, ids1] + kpts1_vis = self.data_info['keypoints_visible'][:, ids1] + kpts2 = results['keypoints'][:, ids2] + kpts2_vis = results['keypoints_visible'][:, ids2] + bbox_center_flipped = self.data_info['bbox_center'].copy() + bbox_center_flipped[:, 0] = 640 - 1 - bbox_center_flipped[:, 0] + + self.assertTrue( + np.allclose(results['img'], self.data_info['img'][:, ::-1])) + self.assertTrue( + np.allclose(results['img_mask'], + self.data_info['img_mask'][:, ::-1])) + self.assertTrue( + np.allclose(results['bbox_center'], bbox_center_flipped)) + self.assertTrue(np.allclose(kpts1[..., 0], 640 - kpts2[..., 0] - 1)) + self.assertTrue(np.allclose(kpts1[..., 1], kpts2[..., 1])) + self.assertTrue(np.allclose(kpts1_vis, kpts2_vis)) + + # test vertical flip + transform = RandomFlip(prob=1., direction='vertical') + results = deepcopy(self.data_info) + results = transform(results) + + ids1, ids2 = zip(*(self.data_info['flip_pairs'])) + kpts1 = self.data_info['keypoints'][:, ids1] + kpts1_vis = self.data_info['keypoints_visible'][:, ids1] + kpts2 = results['keypoints'][:, ids2] + kpts2_vis = results['keypoints_visible'][:, ids2] + bbox_center_flipped = self.data_info['bbox_center'].copy() + bbox_center_flipped[:, 1] = 480 - 1 - bbox_center_flipped[:, 1] + + self.assertTrue( + np.allclose(results['img'], self.data_info['img'][::-1])) + self.assertTrue( + np.allclose(results['img_mask'], self.data_info['img_mask'][::-1])) + self.assertTrue( + np.allclose(results['bbox_center'], bbox_center_flipped)) + self.assertTrue(np.allclose(kpts1[..., 0], kpts2[..., 0])) + self.assertTrue(np.allclose(kpts1[..., 1], 480 - kpts2[..., 1] - 1)) + self.assertTrue(np.allclose(kpts1_vis, kpts2_vis)) + + # test diagonal flip + transform = RandomFlip(prob=1., direction='diagonal') + results = deepcopy(self.data_info) + results = transform(results) + + kpts1 = self.data_info['keypoints'] + kpts1_vis = self.data_info['keypoints_visible'] + kpts2 = results['keypoints'] + kpts2_vis = results['keypoints_visible'] + bbox_center_flipped = self.data_info['bbox_center'].copy() + bbox_center_flipped[:, 0] = 640 - 1 - bbox_center_flipped[:, 0] + bbox_center_flipped[:, 1] = 480 - 1 - bbox_center_flipped[:, 1] + + self.assertTrue( + np.allclose(results['img'], self.data_info['img'][::-1, ::-1])) + self.assertTrue( + np.allclose(results['img_mask'], + self.data_info['img_mask'][::-1, ::-1])) + self.assertTrue( + np.allclose(results['bbox_center'], bbox_center_flipped)) + self.assertTrue(np.allclose(kpts1[..., 0], 640 - kpts2[..., 0] - 1)) + self.assertTrue(np.allclose(kpts1[..., 1], 480 - kpts2[..., 1] - 1)) + self.assertTrue(np.allclose(kpts1_vis, kpts2_vis)) + + def test_errors(self): + # invalid arguments + with self.assertRaisesRegex(ValueError, + 'probs must be float or list of float'): + _ = RandomFlip(prob=None) + + with self.assertRaisesRegex( + ValueError, 'direction must be either str or list of str'): + _ = RandomFlip(direction=None) + + with self.assertRaises(AssertionError): + _ = RandomFlip(prob=2.0) + + with self.assertRaises(AssertionError): + _ = RandomFlip(direction='invalid_direction') + + def test_repr(self): + transform = RandomFlip(prob=0.5, direction='horizontal') + self.assertEqual( + repr(transform), 'RandomFlip(prob=0.5, direction=horizontal)') + + +class TestRandomHalfBody(TestCase): + + def setUp(self): + # prepare dummy top-down data sample with COCO metainfo + self.data_info = get_coco_sample( + img_shape=(480, 640), + num_instances=1, + with_bbox_cs=True, + with_img_mask=True) + + def test_transform(self): + padding = 1.5 + + # keep upper body + transform = RandomHalfBody( + prob=1., + min_total_keypoints=8, + min_upper_keypoints=2, + min_lower_keypoints=2) + results = deepcopy(self.data_info) + results['keypoints_visible'][:, results['lower_body_ids']] = 0 + results = transform(results) + + kpts = self.data_info['keypoints'][:, self.data_info['upper_body_ids']] + self.assertTrue(np.allclose(results['bbox_center'], kpts.mean(axis=1))) + self.assertTrue( + np.allclose(results['bbox_scale'], + (kpts.max(axis=1) - kpts.min(axis=1)) * padding)) + + # keep lower body + transform = RandomHalfBody( + prob=1., + min_total_keypoints=6, + min_upper_keypoints=4, + min_lower_keypoints=4) + results = deepcopy(self.data_info) + results['keypoints_visible'][:, results['upper_body_ids']] = 0 + results = transform(results) + + kpts = self.data_info['keypoints'][:, self.data_info['lower_body_ids']] + self.assertTrue(np.allclose(results['bbox_center'], kpts.mean(axis=1))) + self.assertTrue( + np.allclose(results['bbox_scale'], + (kpts.max(axis=1) - kpts.min(axis=1)) * padding)) + + # no transform due to prob + transform = RandomHalfBody(prob=0.) + results = transform(deepcopy(self.data_info)) + + self.assertTrue( + np.allclose(results['bbox_center'], self.data_info['bbox_center'])) + self.assertTrue( + np.allclose(results['bbox_scale'], self.data_info['bbox_scale'])) + + # no transform due to insufficient valid total keypoints + transform = RandomHalfBody( + prob=1., + min_total_keypoints=8, + min_upper_keypoints=2, + min_lower_keypoints=2) + results = deepcopy(self.data_info) + results['keypoints_visible'].fill(0) + results = transform(results) + + self.assertTrue( + np.allclose(results['bbox_center'], self.data_info['bbox_center'])) + self.assertTrue( + np.allclose(results['bbox_scale'], self.data_info['bbox_scale'])) + + # no transform due to insufficient valid half-body keypoints + transform = RandomHalfBody( + prob=1., + min_total_keypoints=4, + min_upper_keypoints=3, + min_lower_keypoints=3) + results = deepcopy(self.data_info) + results['keypoints_visible'][:, results['upper_body_ids'][2:]] = 0 + results['keypoints_visible'][:, results['lower_body_ids'][2:]] = 0 + results = transform(results) + + self.assertTrue( + np.allclose(results['bbox_center'], self.data_info['bbox_center'])) + self.assertTrue( + np.allclose(results['bbox_scale'], self.data_info['bbox_scale'])) + + def test_repr(self): + transform = RandomHalfBody( + min_total_keypoints=8, + min_upper_keypoints=2, + min_lower_keypoints=2, + padding=1.5, + prob=0.3, + upper_prioritized_prob=0.7) + self.assertEqual( + repr(transform), + 'RandomHalfBody(min_total_keypoints=8, min_upper_keypoints=2, ' + 'min_lower_keypoints=2, padding=1.5, prob=0.3, ' + 'upper_prioritized_prob=0.7)') + + +class TestRandomBBoxTransform(TestCase): + + def setUp(self): + # prepare dummy top-down data sample with COCO metainfo + self.data_info = get_coco_sample( + img_shape=(480, 640), + num_instances=1, + with_bbox_cs=True, + with_img_mask=True) + + def test_transform(self): + shfit_factor = 0.16 + scale_factor = (0.5, 1.5) + rotate_factor = 90. + + # test random shift + transform = RandomBBoxTransform( + shift_factor=shfit_factor, + shift_prob=1.0, + scale_prob=0.0, + rotate_prob=0.0) + results = transform(deepcopy(self.data_info)) + + center = self.data_info['bbox_center'] + scale = self.data_info['bbox_scale'] + center_range = [ + center - scale * shfit_factor, + center + scale * shfit_factor, + ] + + self.assertFalse(np.allclose(results['bbox_center'], center)) + self.assertTrue(((results['bbox_center'] > center_range[0]) & + (results['bbox_center'] < center_range[1])).all()) + self.assertTrue(np.allclose(results['bbox_scale'], scale)) + self.assertTrue( + np.allclose(results['bbox_rotation'], np.zeros((1, 17)))) + + # test random resizing + transform = RandomBBoxTransform( + scale_factor=scale_factor, + shift_prob=0.0, + scale_prob=1.0, + rotate_prob=0.0) + + results = transform(deepcopy(self.data_info)) + center = self.data_info['bbox_center'] + scale = self.data_info['bbox_scale'] + scale_range = [scale * scale_factor[0], scale * scale_factor[1]] + + self.assertTrue(np.allclose(results['bbox_center'], center)) + self.assertFalse(np.allclose(results['bbox_scale'], scale)) + self.assertTrue(((results['bbox_scale'] > scale_range[0]) & + (results['bbox_scale'] < scale_range[1])).all()) + self.assertTrue( + np.allclose(results['bbox_rotation'], np.zeros((1, 17)))) + + # test random rotation + transform = RandomBBoxTransform( + rotate_factor=rotate_factor, + shift_prob=0.0, + scale_prob=0.0, + rotate_prob=1.0) + + results = transform(deepcopy(self.data_info)) + rotation_range = [-rotate_factor, rotate_factor] + bbox_rotation_min = np.full((1, 17), rotation_range[0]) + bbox_rotation_max = np.full((1, 17), rotation_range[1]) + + self.assertTrue( + np.allclose(results['bbox_center'], self.data_info['bbox_center'])) + self.assertTrue( + np.allclose(results['bbox_scale'], self.data_info['bbox_scale'])) + self.assertFalse(np.allclose(results['bbox_rotation'], 0)) + self.assertTrue(((results['bbox_rotation'] > bbox_rotation_min) & + (results['bbox_rotation'] < bbox_rotation_max)).all()) + + # test hybrid transform + transform = RandomBBoxTransform( + shift_factor=shfit_factor, + scale_factor=scale_factor, + rotate_factor=rotate_factor, + shift_prob=1.0, + scale_prob=1.0, + rotate_prob=1.0) + + results = transform(deepcopy(self.data_info)) + center = self.data_info['bbox_center'] + scale = self.data_info['bbox_scale'] + + center_range = [ + center - scale * shfit_factor, + center + scale * shfit_factor, + ] + scale_range = [scale * scale_factor[0], scale * scale_factor[1]] + rotation_range = [-rotate_factor, rotate_factor] + + self.assertFalse(np.allclose(results['bbox_center'], center)) + self.assertTrue(((results['bbox_center'] > center_range[0]) & + (results['bbox_center'] < center_range[1])).all()) + self.assertFalse(np.allclose(results['bbox_scale'], scale)) + self.assertTrue(((results['bbox_scale'] > scale_range[0]) & + (results['bbox_scale'] < scale_range[1])).all()) + self.assertFalse(np.allclose(results['bbox_rotation'], 0)) + self.assertTrue(((results['bbox_rotation'] > rotation_range[0]) & + (results['bbox_rotation'] < rotation_range[1])).all()) + + def test_repr(self): + transform = RandomBBoxTransform( + shift_factor=0.16, + shift_prob=0.3, + scale_factor=0.5, + scale_prob=1.0, + rotate_factor=40.0, + rotate_prob=0.6) + + self.assertEqual( + repr(transform), + 'RandomBBoxTransform(shift_prob=0.3, shift_factor=0.16, ' + 'scale_prob=1.0, scale_factor=0.5, rotate_prob=0.6, ' + 'rotate_factor=40.0)') + + +class TestAlbumentation(TestCase): + + def setUp(self): + """Setup the valiables which are used in each test method. + + TestCase calls functions in this order: setUp() -> testMethod() -> + tearDown() -> cleanUp() + """ + data_prefix = 'tests/data/coco' + results = dict(img_path=osp.join(data_prefix, '000000000785.jpg')) + load = LoadImageFromFile() + self.results = load(copy.deepcopy(results)) + + def test_transform(self): + # test when ``keymap`` is None + transform = Albumentation(transforms=[ + dict(type='RandomBrightnessContrast', p=0.2), + dict(type='ToFloat') + ]) + results_update = transform(copy.deepcopy(self.results)) + self.assertEqual(results_update['img'].dtype, np.float32) + + def test_repr(self): + # test when ``keymap`` is not None + transforms = [ + dict(type='RandomBrightnessContrast', p=0.2), + dict(type='ToFloat') + ] + transform = Albumentation( + transforms=transforms, keymap={'img': 'image'}) + self.assertEqual( + repr(transform), f'Albumentation(transforms={transforms})') + + +class TestPhotometricDistortion(TestCase): + + def setUp(self): + """Setup the valiables which are used in each test method. + + TestCase calls functions in this order: setUp() -> testMethod() -> + tearDown() -> cleanUp() + """ + data_prefix = 'tests/data/coco' + results = dict(img_path=osp.join(data_prefix, '000000000785.jpg')) + load = LoadImageFromFile() + self.results = load(copy.deepcopy(results)) + + def test_transform(self): + transform = PhotometricDistortion() + results_update = transform(copy.deepcopy(self.results)) + self.assertEqual(results_update['img'].dtype, np.uint8) + + def test_repr(self): + transform = PhotometricDistortion() + self.assertEqual( + repr(transform), ('PhotometricDistortion' + '(brightness_delta=32, ' + 'contrast_range=(0.5, 1.5), ' + 'saturation_range=(0.5, 1.5), ' + 'hue_delta=18)')) + + +class TestGenerateTarget(TestCase): + + def setUp(self): + # prepare dummy top-down data sample with COCO metainfo + self.data_info = get_coco_sample( + img_shape=(480, 640), + num_instances=1, + with_bbox_cs=True, + with_img_mask=True) + + def test_generate_single_target(self): + encoder = dict( + type='MSRAHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + sigma=2.0) + + # generate heatmap + pipeline = Compose([ + TopdownAffine(input_size=(192, 256)), + GenerateTarget(encoder=encoder) + ]) + results = pipeline(deepcopy(self.data_info)) + + self.assertEqual(results['heatmaps'].shape, (17, 64, 48)) + self.assertTrue( + np.allclose(results['keypoint_weights'], np.ones((1, 17)))) + + # generate heatmap and use meta keypoint weights + pipeline = Compose([ + TopdownAffine(input_size=(192, 256)), + GenerateTarget( + encoder=encoder, + use_dataset_keypoint_weights=True, + ) + ]) + results = pipeline(deepcopy(self.data_info)) + + self.assertEqual(results['heatmaps'].shape, (17, 64, 48)) + self.assertEqual(results['keypoint_weights'].shape, (1, 17)) + self.assertTrue( + np.allclose(results['keypoint_weights'], + self.data_info['dataset_keypoint_weights'][None])) + + def test_generate_multilevel_target(self): + encoder_0 = dict( + type='MSRAHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + sigma=2.0) + encoder_1 = dict(encoder_0, heatmap_size=(24, 32)) + + # generate multilevel heatmap + pipeline = Compose([ + TopdownAffine(input_size=(192, 256)), + GenerateTarget( + encoder=[encoder_0, encoder_1], + multilevel=True, + use_dataset_keypoint_weights=True) + ]) + results = pipeline(deepcopy(self.data_info)) + + self.assertTrue(is_list_of(results['heatmaps'], np.ndarray)) + self.assertTrue(is_list_of(results['keypoint_weights'], np.ndarray)) + self.assertEqual(results['heatmaps'][0].shape, (17, 64, 48)) + self.assertEqual(results['heatmaps'][1].shape, (17, 32, 24)) + self.assertEqual(results['keypoint_weights'][0].shape, (1, 17)) + + def test_generate_combined_target(self): + encoder_0 = dict( + type='MSRAHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + sigma=2.0) + encoder_1 = dict(type='RegressionLabel', input_size=(192, 256)) + # generate multilevel heatmap + pipeline = Compose([ + TopdownAffine(input_size=(192, 256)), + GenerateTarget( + encoder=[encoder_0, encoder_1], + multilevel=False, + use_dataset_keypoint_weights=True) + ]) + + results = pipeline(deepcopy(self.data_info)) + + self.assertEqual(results['heatmaps'].shape, (17, 64, 48)) + self.assertEqual(results['keypoint_labels'].shape, (1, 17, 2)) + self.assertIsInstance(results['keypoint_weights'], list) + self.assertEqual(results['keypoint_weights'][0].shape, (1, 17)) + + def test_errors(self): + + # single encoder with `multilevel=True` + encoder = dict( + type='MSRAHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + sigma=2.0) + + with self.assertRaisesRegex(AssertionError, + 'Need multiple encoder configs'): + _ = GenerateTarget(encoder=encoder, multilevel=True) + + # diverse keys in multilevel encoding + encoder_0 = dict( + type='MSRAHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + sigma=2.0) + + encoder_1 = dict(type='RegressionLabel', input_size=(192, 256)) + pipeline = Compose([ + TopdownAffine(input_size=(192, 256)), + GenerateTarget(encoder=[encoder_0, encoder_1], multilevel=True) + ]) + + with self.assertRaisesRegex(ValueError, 'have the same keys'): + _ = pipeline(deepcopy(self.data_info)) + + # overlapping keys in combined encoding + encoder = dict( + type='MSRAHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + sigma=2.0) + + pipeline = Compose([ + TopdownAffine(input_size=(192, 256)), + GenerateTarget(encoder=[encoder, encoder], multilevel=False) + ]) + + with self.assertRaisesRegex(ValueError, 'Overlapping item'): + _ = pipeline(deepcopy(self.data_info)) + + # deprecated argument `target_type` is given + encoder = dict( + type='MSRAHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + sigma=2.0) + + with self.assertWarnsRegex(DeprecationWarning, + '`target_type` is deprecated'): + _ = GenerateTarget(encoder=encoder, target_type='heatmap') diff --git a/tests/test_datasets/test_transforms/test_converting.py b/tests/test_datasets/test_transforms/test_converting.py index 09f06e1e65..e61e4bf1e3 100644 --- a/tests/test_datasets/test_transforms/test_converting.py +++ b/tests/test_datasets/test_transforms/test_converting.py @@ -1,73 +1,73 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -from mmpose.datasets.transforms import KeypointConverter -from mmpose.testing import get_coco_sample - - -class TestKeypointConverter(TestCase): - - def setUp(self): - # prepare dummy bottom-up data sample with COCO metainfo - self.data_info = get_coco_sample( - img_shape=(240, 320), num_instances=4, with_bbox_cs=True) - - def test_transform(self): - # 1-to-1 mapping - mapping = [(3, 0), (6, 1), (16, 2), (5, 3)] - transform = KeypointConverter(num_keypoints=5, mapping=mapping) - results = transform(self.data_info.copy()) - - # check shape - self.assertEqual(results['keypoints'].shape[0], - self.data_info['keypoints'].shape[0]) - self.assertEqual(results['keypoints'].shape[1], 5) - self.assertEqual(results['keypoints'].shape[2], 2) - self.assertEqual(results['keypoints_visible'].shape[0], - self.data_info['keypoints_visible'].shape[0]) - self.assertEqual(results['keypoints_visible'].shape[1], 5) - - # check value - for source_index, target_index in mapping: - self.assertTrue((results['keypoints'][:, target_index] == - self.data_info['keypoints'][:, - source_index]).all()) - self.assertTrue( - (results['keypoints_visible'][:, target_index] == - self.data_info['keypoints_visible'][:, source_index]).all()) - - # 2-to-1 mapping - mapping = [((3, 5), 0), (6, 1), (16, 2), (5, 3)] - transform = KeypointConverter(num_keypoints=5, mapping=mapping) - results = transform(self.data_info.copy()) - - # check shape - self.assertEqual(results['keypoints'].shape[0], - self.data_info['keypoints'].shape[0]) - self.assertEqual(results['keypoints'].shape[1], 5) - self.assertEqual(results['keypoints'].shape[2], 2) - self.assertEqual(results['keypoints_visible'].shape[0], - self.data_info['keypoints_visible'].shape[0]) - self.assertEqual(results['keypoints_visible'].shape[1], 5) - - # check value - for source_index, target_index in mapping: - if isinstance(source_index, tuple): - source_index, source_index2 = source_index - self.assertTrue( - (results['keypoints'][:, target_index] == 0.5 * - (self.data_info['keypoints'][:, source_index] + - self.data_info['keypoints'][:, source_index2])).all()) - self.assertTrue( - (results['keypoints_visible'][:, target_index] == - self.data_info['keypoints_visible'][:, source_index] * - self.data_info['keypoints_visible'][:, - source_index2]).all()) - else: - self.assertTrue( - (results['keypoints'][:, target_index] == - self.data_info['keypoints'][:, source_index]).all()) - self.assertTrue( - (results['keypoints_visible'][:, target_index] == - self.data_info['keypoints_visible'][:, - source_index]).all()) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +from mmpose.datasets.transforms import KeypointConverter +from mmpose.testing import get_coco_sample + + +class TestKeypointConverter(TestCase): + + def setUp(self): + # prepare dummy bottom-up data sample with COCO metainfo + self.data_info = get_coco_sample( + img_shape=(240, 320), num_instances=4, with_bbox_cs=True) + + def test_transform(self): + # 1-to-1 mapping + mapping = [(3, 0), (6, 1), (16, 2), (5, 3)] + transform = KeypointConverter(num_keypoints=5, mapping=mapping) + results = transform(self.data_info.copy()) + + # check shape + self.assertEqual(results['keypoints'].shape[0], + self.data_info['keypoints'].shape[0]) + self.assertEqual(results['keypoints'].shape[1], 5) + self.assertEqual(results['keypoints'].shape[2], 2) + self.assertEqual(results['keypoints_visible'].shape[0], + self.data_info['keypoints_visible'].shape[0]) + self.assertEqual(results['keypoints_visible'].shape[1], 5) + + # check value + for source_index, target_index in mapping: + self.assertTrue((results['keypoints'][:, target_index] == + self.data_info['keypoints'][:, + source_index]).all()) + self.assertTrue( + (results['keypoints_visible'][:, target_index] == + self.data_info['keypoints_visible'][:, source_index]).all()) + + # 2-to-1 mapping + mapping = [((3, 5), 0), (6, 1), (16, 2), (5, 3)] + transform = KeypointConverter(num_keypoints=5, mapping=mapping) + results = transform(self.data_info.copy()) + + # check shape + self.assertEqual(results['keypoints'].shape[0], + self.data_info['keypoints'].shape[0]) + self.assertEqual(results['keypoints'].shape[1], 5) + self.assertEqual(results['keypoints'].shape[2], 2) + self.assertEqual(results['keypoints_visible'].shape[0], + self.data_info['keypoints_visible'].shape[0]) + self.assertEqual(results['keypoints_visible'].shape[1], 5) + + # check value + for source_index, target_index in mapping: + if isinstance(source_index, tuple): + source_index, source_index2 = source_index + self.assertTrue( + (results['keypoints'][:, target_index] == 0.5 * + (self.data_info['keypoints'][:, source_index] + + self.data_info['keypoints'][:, source_index2])).all()) + self.assertTrue( + (results['keypoints_visible'][:, target_index] == + self.data_info['keypoints_visible'][:, source_index] * + self.data_info['keypoints_visible'][:, + source_index2]).all()) + else: + self.assertTrue( + (results['keypoints'][:, target_index] == + self.data_info['keypoints'][:, source_index]).all()) + self.assertTrue( + (results['keypoints_visible'][:, target_index] == + self.data_info['keypoints_visible'][:, + source_index]).all()) diff --git a/tests/test_datasets/test_transforms/test_formatting.py b/tests/test_datasets/test_transforms/test_formatting.py index 95fadb55b2..4090633128 100644 --- a/tests/test_datasets/test_transforms/test_formatting.py +++ b/tests/test_datasets/test_transforms/test_formatting.py @@ -1,108 +1,108 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy -from unittest import TestCase - -import numpy as np -import torch -from mmengine.structures import InstanceData, PixelData - -from mmpose.datasets.transforms import PackPoseInputs -from mmpose.structures import PoseDataSample - - -class TestPackPoseInputs(TestCase): - - def setUp(self): - """Setup some variables which are used in every test method. - - TestCase calls functions in this order: setUp() -> testMethod() -> - tearDown() -> cleanUp() - """ - # prepare dummy top-down data sample with COCO metainfo - self.results_topdown = { - 'img_id': - 1, - 'img_path': - 'tests/data/coco/000000000785.jpg', - 'id': - 1, - 'ori_shape': (425, 640), - 'img_shape': (425, 640, 3), - 'scale_factor': - 2.0, - 'flip': - False, - 'flip_direction': - None, - 'img': - np.zeros((425, 640, 3), dtype=np.uint8), - 'bbox': - np.array([[0, 0, 100, 100]], dtype=np.float32), - 'bbox_center': - np.array([[50, 50]], dtype=np.float32), - 'bbox_scale': - np.array([[125, 125]], dtype=np.float32), - 'bbox_rotation': - np.array([45], dtype=np.float32), - 'bbox_score': - np.ones(1, dtype=np.float32), - 'keypoints': - np.random.randint(0, 100, (1, 17, 2)).astype(np.float32), - 'keypoints_visible': - np.full((1, 17), 1).astype(np.float32), - 'keypoint_weights': - np.full((1, 17), 1).astype(np.float32), - 'heatmaps': - np.random.random((17, 64, 48)).astype(np.float32), - 'keypoint_labels': - np.random.randint(0, 100, (1, 17, 2)).astype(np.float32), - 'keypoint_x_labels': - np.random.randint(0, 100, (1, 17, 2)).astype(np.float32), - 'keypoint_y_labels': - np.random.randint(0, 100, (1, 17, 2)).astype(np.float32), - 'transformed_keypoints': - np.random.randint(0, 100, (1, 17, 2)).astype(np.float32), - } - self.meta_keys = ('img_id', 'img_path', 'ori_shape', 'img_shape', - 'scale_factor', 'flip', 'flip_direction') - - def test_transform(self): - transform = PackPoseInputs( - meta_keys=self.meta_keys, pack_transformed=True) - results = transform(copy.deepcopy(self.results_topdown)) - self.assertIn('transformed_keypoints', - results['data_samples'].gt_instances) - - transform = PackPoseInputs(meta_keys=self.meta_keys) - results = transform(copy.deepcopy(self.results_topdown)) - self.assertIn('inputs', results) - self.assertIsInstance(results['inputs'], torch.Tensor) - self.assertEqual(results['inputs'].shape, (3, 425, 640)) - self.assertIn('data_samples', results) - self.assertIsInstance(results['data_samples'], PoseDataSample) - self.assertIsInstance(results['data_samples'].gt_instances, - InstanceData) - self.assertIsInstance(results['data_samples'].gt_fields, PixelData) - self.assertEqual(len(results['data_samples'].gt_instances), 1) - self.assertIsInstance(results['data_samples'].gt_fields.heatmaps, - torch.Tensor) - self.assertNotIn('transformed_keypoints', - results['data_samples'].gt_instances) - - # test when results['img'] is sequence of frames - results = copy.deepcopy(self.results_topdown) - len_seq = 5 - results['img'] = [ - np.random.randint(0, 255, (425, 640, 3), dtype=np.uint8) - for _ in range(len_seq) - ] - results = transform(results) - self.assertIn('inputs', results) - self.assertIsInstance(results['inputs'], torch.Tensor) - # translate into 4-dim tensor: [len_seq, c, h, w] - self.assertEqual(results['inputs'].shape, (len_seq, 3, 425, 640)) - - def test_repr(self): - transform = PackPoseInputs(meta_keys=self.meta_keys) - self.assertEqual( - repr(transform), f'PackPoseInputs(meta_keys={self.meta_keys})') +# Copyright (c) OpenMMLab. All rights reserved. +import copy +from unittest import TestCase + +import numpy as np +import torch +from mmengine.structures import InstanceData, PixelData + +from mmpose.datasets.transforms import PackPoseInputs +from mmpose.structures import PoseDataSample + + +class TestPackPoseInputs(TestCase): + + def setUp(self): + """Setup some variables which are used in every test method. + + TestCase calls functions in this order: setUp() -> testMethod() -> + tearDown() -> cleanUp() + """ + # prepare dummy top-down data sample with COCO metainfo + self.results_topdown = { + 'img_id': + 1, + 'img_path': + 'tests/data/coco/000000000785.jpg', + 'id': + 1, + 'ori_shape': (425, 640), + 'img_shape': (425, 640, 3), + 'scale_factor': + 2.0, + 'flip': + False, + 'flip_direction': + None, + 'img': + np.zeros((425, 640, 3), dtype=np.uint8), + 'bbox': + np.array([[0, 0, 100, 100]], dtype=np.float32), + 'bbox_center': + np.array([[50, 50]], dtype=np.float32), + 'bbox_scale': + np.array([[125, 125]], dtype=np.float32), + 'bbox_rotation': + np.array([45], dtype=np.float32), + 'bbox_score': + np.ones(1, dtype=np.float32), + 'keypoints': + np.random.randint(0, 100, (1, 17, 2)).astype(np.float32), + 'keypoints_visible': + np.full((1, 17), 1).astype(np.float32), + 'keypoint_weights': + np.full((1, 17), 1).astype(np.float32), + 'heatmaps': + np.random.random((17, 64, 48)).astype(np.float32), + 'keypoint_labels': + np.random.randint(0, 100, (1, 17, 2)).astype(np.float32), + 'keypoint_x_labels': + np.random.randint(0, 100, (1, 17, 2)).astype(np.float32), + 'keypoint_y_labels': + np.random.randint(0, 100, (1, 17, 2)).astype(np.float32), + 'transformed_keypoints': + np.random.randint(0, 100, (1, 17, 2)).astype(np.float32), + } + self.meta_keys = ('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'flip', 'flip_direction') + + def test_transform(self): + transform = PackPoseInputs( + meta_keys=self.meta_keys, pack_transformed=True) + results = transform(copy.deepcopy(self.results_topdown)) + self.assertIn('transformed_keypoints', + results['data_samples'].gt_instances) + + transform = PackPoseInputs(meta_keys=self.meta_keys) + results = transform(copy.deepcopy(self.results_topdown)) + self.assertIn('inputs', results) + self.assertIsInstance(results['inputs'], torch.Tensor) + self.assertEqual(results['inputs'].shape, (3, 425, 640)) + self.assertIn('data_samples', results) + self.assertIsInstance(results['data_samples'], PoseDataSample) + self.assertIsInstance(results['data_samples'].gt_instances, + InstanceData) + self.assertIsInstance(results['data_samples'].gt_fields, PixelData) + self.assertEqual(len(results['data_samples'].gt_instances), 1) + self.assertIsInstance(results['data_samples'].gt_fields.heatmaps, + torch.Tensor) + self.assertNotIn('transformed_keypoints', + results['data_samples'].gt_instances) + + # test when results['img'] is sequence of frames + results = copy.deepcopy(self.results_topdown) + len_seq = 5 + results['img'] = [ + np.random.randint(0, 255, (425, 640, 3), dtype=np.uint8) + for _ in range(len_seq) + ] + results = transform(results) + self.assertIn('inputs', results) + self.assertIsInstance(results['inputs'], torch.Tensor) + # translate into 4-dim tensor: [len_seq, c, h, w] + self.assertEqual(results['inputs'].shape, (len_seq, 3, 425, 640)) + + def test_repr(self): + transform = PackPoseInputs(meta_keys=self.meta_keys) + self.assertEqual( + repr(transform), f'PackPoseInputs(meta_keys={self.meta_keys})') diff --git a/tests/test_datasets/test_transforms/test_loading.py b/tests/test_datasets/test_transforms/test_loading.py index 0a63003c75..63e66265fc 100644 --- a/tests/test_datasets/test_transforms/test_loading.py +++ b/tests/test_datasets/test_transforms/test_loading.py @@ -1,31 +1,31 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import numpy as np -from mmcv import imread - -from mmpose.datasets.transforms.loading import LoadImage - - -class TestLoadImage(TestCase): - - def test_load_image(self): - - transform = LoadImage() - results = dict(img_path='tests/data/coco/000000000785.jpg') - - results = transform(results) - - self.assertIsInstance(results['img'], np.ndarray) - - def test_with_input_image(self): - transform = LoadImage(to_float32=True) - - img_path = 'tests/data/coco/000000000785.jpg' - results = dict( - img_path=img_path, img=imread(img_path).astype(np.uint8)) - - results = transform(results) - - self.assertIsInstance(results['img'], np.ndarray) - self.assertTrue(results['img'].dtype, np.float32) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np +from mmcv import imread + +from mmpose.datasets.transforms.loading import LoadImage + + +class TestLoadImage(TestCase): + + def test_load_image(self): + + transform = LoadImage() + results = dict(img_path='tests/data/coco/000000000785.jpg') + + results = transform(results) + + self.assertIsInstance(results['img'], np.ndarray) + + def test_with_input_image(self): + transform = LoadImage(to_float32=True) + + img_path = 'tests/data/coco/000000000785.jpg' + results = dict( + img_path=img_path, img=imread(img_path).astype(np.uint8)) + + results = transform(results) + + self.assertIsInstance(results['img'], np.ndarray) + self.assertTrue(results['img'].dtype, np.float32) diff --git a/tests/test_datasets/test_transforms/test_pose3d_transforms.py b/tests/test_datasets/test_transforms/test_pose3d_transforms.py index 5f5d5aa096..48d9996bea 100644 --- a/tests/test_datasets/test_transforms/test_pose3d_transforms.py +++ b/tests/test_datasets/test_transforms/test_pose3d_transforms.py @@ -1,150 +1,150 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os.path as osp -from copy import deepcopy -from unittest import TestCase - -import numpy as np -from mmengine.fileio import load - -from mmpose.datasets.transforms import RandomFlipAroundRoot - - -def get_h36m_sample(): - - def _parse_h36m_imgname(imgname): - """Parse imgname to get information of subject, action and camera. - - A typical h36m image filename is like: - S1_Directions_1.54138969_000001.jpg - """ - subj, rest = osp.basename(imgname).split('_', 1) - action, rest = rest.split('.', 1) - camera, rest = rest.split('_', 1) - return subj, action, camera - - ann_flle = 'tests/data/h36m/test_h36m_body3d.npz' - camera_param_file = 'tests/data/h36m/cameras.pkl' - - data = np.load(ann_flle) - cameras = load(camera_param_file) - - imgnames = data['imgname'] - keypoints = data['part'].astype(np.float32) - keypoints_3d = data['S'].astype(np.float32) - centers = data['center'].astype(np.float32) - scales = data['scale'].astype(np.float32) - - idx = 0 - target_idx = 0 - - data_info = { - 'keypoints': keypoints[idx, :, :2].reshape(1, -1, 2), - 'keypoints_visible': keypoints[idx, :, 2].reshape(1, -1), - 'keypoints_3d': keypoints_3d[idx, :, :3].reshape(1, -1, 3), - 'keypoints_3d_visible': keypoints_3d[idx, :, 3].reshape(1, -1), - 'scale': scales[idx], - 'center': centers[idx].astype(np.float32).reshape(1, -1), - 'id': idx, - 'img_ids': [idx], - 'img_paths': [imgnames[idx]], - 'category_id': 1, - 'iscrowd': 0, - 'sample_idx': idx, - 'lifting_target': keypoints_3d[target_idx, :, :3], - 'lifting_target_visible': keypoints_3d[target_idx, :, 3], - 'target_img_path': osp.join('tests/data/h36m', imgnames[target_idx]), - } - - # add camera parameters - subj, _, camera = _parse_h36m_imgname(imgnames[idx]) - data_info['camera_param'] = cameras[(subj, camera)] - - # add ann_info - ann_info = {} - ann_info['num_keypoints'] = 17 - ann_info['dataset_keypoint_weights'] = np.full(17, 1.0, dtype=np.float32) - ann_info['flip_pairs'] = [[1, 4], [2, 5], [3, 6], [11, 14], [12, 15], - [13, 16]] - ann_info['skeleton_links'] = [] - ann_info['upper_body_ids'] = (0, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) - ann_info['lower_body_ids'] = (1, 2, 3, 4, 5, 6) - ann_info['flip_indices'] = [ - 0, 4, 5, 6, 1, 2, 3, 7, 8, 9, 10, 14, 15, 16, 11, 12, 13 - ] - - data_info.update(ann_info) - - return data_info - - -class TestRandomFlipAroundRoot(TestCase): - - def setUp(self): - self.data_info = get_h36m_sample() - self.keypoints_flip_cfg = dict(center_mode='static', center_x=0.) - self.target_flip_cfg = dict(center_mode='root', center_index=0) - - def test_init(self): - _ = RandomFlipAroundRoot( - self.keypoints_flip_cfg, - self.target_flip_cfg, - flip_prob=0.5, - flip_camera=False) - - def test_transform(self): - kpts1 = self.data_info['keypoints'] - kpts_vis1 = self.data_info['keypoints_visible'] - tar1 = self.data_info['lifting_target'] - tar_vis1 = self.data_info['lifting_target_visible'] - - transform = RandomFlipAroundRoot( - self.keypoints_flip_cfg, self.target_flip_cfg, flip_prob=1) - results = deepcopy(self.data_info) - results = transform(results) - - kpts2 = results['keypoints'] - kpts_vis2 = results['keypoints_visible'] - tar2 = results['lifting_target'] - tar_vis2 = results['lifting_target_visible'] - - self.assertEqual(kpts_vis2.shape, (1, 17)) - self.assertEqual(tar_vis2.shape, (17, )) - self.assertEqual(kpts2.shape, (1, 17, 2)) - self.assertEqual(tar2.shape, (17, 3)) - - flip_indices = [ - 0, 4, 5, 6, 1, 2, 3, 7, 8, 9, 10, 14, 15, 16, 11, 12, 13 - ] - for left, right in enumerate(flip_indices): - self.assertTrue( - np.allclose(-kpts1[0][left][:1], kpts2[0][right][:1], atol=4.)) - self.assertTrue( - np.allclose(kpts1[0][left][1:], kpts2[0][right][1:], atol=4.)) - self.assertTrue( - np.allclose(tar1[left][1:], tar2[right][1:], atol=4.)) - - self.assertTrue( - np.allclose(kpts_vis1[0][left], kpts_vis2[0][right], atol=4.)) - self.assertTrue( - np.allclose(tar_vis1[left], tar_vis2[right], atol=4.)) - - # test camera flipping - transform = RandomFlipAroundRoot( - self.keypoints_flip_cfg, - self.target_flip_cfg, - flip_prob=1, - flip_camera=True) - results = deepcopy(self.data_info) - results = transform(results) - - camera2 = results['camera_param'] - self.assertTrue( - np.allclose( - -self.data_info['camera_param']['c'][0], - camera2['c'][0], - atol=4.)) - self.assertTrue( - np.allclose( - -self.data_info['camera_param']['p'][0], - camera2['p'][0], - atol=4.)) +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +from copy import deepcopy +from unittest import TestCase + +import numpy as np +from mmengine.fileio import load + +from mmpose.datasets.transforms import RandomFlipAroundRoot + + +def get_h36m_sample(): + + def _parse_h36m_imgname(imgname): + """Parse imgname to get information of subject, action and camera. + + A typical h36m image filename is like: + S1_Directions_1.54138969_000001.jpg + """ + subj, rest = osp.basename(imgname).split('_', 1) + action, rest = rest.split('.', 1) + camera, rest = rest.split('_', 1) + return subj, action, camera + + ann_flle = 'tests/data/h36m/test_h36m_body3d.npz' + camera_param_file = 'tests/data/h36m/cameras.pkl' + + data = np.load(ann_flle) + cameras = load(camera_param_file) + + imgnames = data['imgname'] + keypoints = data['part'].astype(np.float32) + keypoints_3d = data['S'].astype(np.float32) + centers = data['center'].astype(np.float32) + scales = data['scale'].astype(np.float32) + + idx = 0 + target_idx = 0 + + data_info = { + 'keypoints': keypoints[idx, :, :2].reshape(1, -1, 2), + 'keypoints_visible': keypoints[idx, :, 2].reshape(1, -1), + 'keypoints_3d': keypoints_3d[idx, :, :3].reshape(1, -1, 3), + 'keypoints_3d_visible': keypoints_3d[idx, :, 3].reshape(1, -1), + 'scale': scales[idx], + 'center': centers[idx].astype(np.float32).reshape(1, -1), + 'id': idx, + 'img_ids': [idx], + 'img_paths': [imgnames[idx]], + 'category_id': 1, + 'iscrowd': 0, + 'sample_idx': idx, + 'lifting_target': keypoints_3d[target_idx, :, :3], + 'lifting_target_visible': keypoints_3d[target_idx, :, 3], + 'target_img_path': osp.join('tests/data/h36m', imgnames[target_idx]), + } + + # add camera parameters + subj, _, camera = _parse_h36m_imgname(imgnames[idx]) + data_info['camera_param'] = cameras[(subj, camera)] + + # add ann_info + ann_info = {} + ann_info['num_keypoints'] = 17 + ann_info['dataset_keypoint_weights'] = np.full(17, 1.0, dtype=np.float32) + ann_info['flip_pairs'] = [[1, 4], [2, 5], [3, 6], [11, 14], [12, 15], + [13, 16]] + ann_info['skeleton_links'] = [] + ann_info['upper_body_ids'] = (0, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) + ann_info['lower_body_ids'] = (1, 2, 3, 4, 5, 6) + ann_info['flip_indices'] = [ + 0, 4, 5, 6, 1, 2, 3, 7, 8, 9, 10, 14, 15, 16, 11, 12, 13 + ] + + data_info.update(ann_info) + + return data_info + + +class TestRandomFlipAroundRoot(TestCase): + + def setUp(self): + self.data_info = get_h36m_sample() + self.keypoints_flip_cfg = dict(center_mode='static', center_x=0.) + self.target_flip_cfg = dict(center_mode='root', center_index=0) + + def test_init(self): + _ = RandomFlipAroundRoot( + self.keypoints_flip_cfg, + self.target_flip_cfg, + flip_prob=0.5, + flip_camera=False) + + def test_transform(self): + kpts1 = self.data_info['keypoints'] + kpts_vis1 = self.data_info['keypoints_visible'] + tar1 = self.data_info['lifting_target'] + tar_vis1 = self.data_info['lifting_target_visible'] + + transform = RandomFlipAroundRoot( + self.keypoints_flip_cfg, self.target_flip_cfg, flip_prob=1) + results = deepcopy(self.data_info) + results = transform(results) + + kpts2 = results['keypoints'] + kpts_vis2 = results['keypoints_visible'] + tar2 = results['lifting_target'] + tar_vis2 = results['lifting_target_visible'] + + self.assertEqual(kpts_vis2.shape, (1, 17)) + self.assertEqual(tar_vis2.shape, (17, )) + self.assertEqual(kpts2.shape, (1, 17, 2)) + self.assertEqual(tar2.shape, (17, 3)) + + flip_indices = [ + 0, 4, 5, 6, 1, 2, 3, 7, 8, 9, 10, 14, 15, 16, 11, 12, 13 + ] + for left, right in enumerate(flip_indices): + self.assertTrue( + np.allclose(-kpts1[0][left][:1], kpts2[0][right][:1], atol=4.)) + self.assertTrue( + np.allclose(kpts1[0][left][1:], kpts2[0][right][1:], atol=4.)) + self.assertTrue( + np.allclose(tar1[left][1:], tar2[right][1:], atol=4.)) + + self.assertTrue( + np.allclose(kpts_vis1[0][left], kpts_vis2[0][right], atol=4.)) + self.assertTrue( + np.allclose(tar_vis1[left], tar_vis2[right], atol=4.)) + + # test camera flipping + transform = RandomFlipAroundRoot( + self.keypoints_flip_cfg, + self.target_flip_cfg, + flip_prob=1, + flip_camera=True) + results = deepcopy(self.data_info) + results = transform(results) + + camera2 = results['camera_param'] + self.assertTrue( + np.allclose( + -self.data_info['camera_param']['c'][0], + camera2['c'][0], + atol=4.)) + self.assertTrue( + np.allclose( + -self.data_info['camera_param']['p'][0], + camera2['p'][0], + atol=4.)) diff --git a/tests/test_datasets/test_transforms/test_topdown_transforms.py b/tests/test_datasets/test_transforms/test_topdown_transforms.py index 1fa74ef8db..cd0f0217d8 100644 --- a/tests/test_datasets/test_transforms/test_topdown_transforms.py +++ b/tests/test_datasets/test_transforms/test_topdown_transforms.py @@ -1,34 +1,34 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from copy import deepcopy -from unittest import TestCase - -from mmpose.datasets.transforms import TopdownAffine -from mmpose.testing import get_coco_sample - - -class TestTopdownAffine(TestCase): - - def setUp(self): - # prepare dummy top-down data sample with COCO metainfo - self.data_info = get_coco_sample(num_instances=1, with_bbox_cs=True) - - def test_transform(self): - # without udp - transform = TopdownAffine(input_size=(192, 256), use_udp=False) - results = transform(deepcopy(self.data_info)) - self.assertEqual(results['input_size'], (192, 256)) - self.assertEqual(results['img'].shape, (256, 192, 3)) - self.assertIn('transformed_keypoints', results) - - # with udp - transform = TopdownAffine(input_size=(192, 256), use_udp=True) - results = transform(deepcopy(self.data_info)) - self.assertEqual(results['input_size'], (192, 256)) - self.assertEqual(results['img'].shape, (256, 192, 3)) - self.assertIn('transformed_keypoints', results) - - def test_repr(self): - transform = TopdownAffine(input_size=(192, 256), use_udp=False) - self.assertEqual( - repr(transform), - 'TopdownAffine(input_size=(192, 256), use_udp=False)') +# Copyright (c) OpenMMLab. All rights reserved. +from copy import deepcopy +from unittest import TestCase + +from mmpose.datasets.transforms import TopdownAffine +from mmpose.testing import get_coco_sample + + +class TestTopdownAffine(TestCase): + + def setUp(self): + # prepare dummy top-down data sample with COCO metainfo + self.data_info = get_coco_sample(num_instances=1, with_bbox_cs=True) + + def test_transform(self): + # without udp + transform = TopdownAffine(input_size=(192, 256), use_udp=False) + results = transform(deepcopy(self.data_info)) + self.assertEqual(results['input_size'], (192, 256)) + self.assertEqual(results['img'].shape, (256, 192, 3)) + self.assertIn('transformed_keypoints', results) + + # with udp + transform = TopdownAffine(input_size=(192, 256), use_udp=True) + results = transform(deepcopy(self.data_info)) + self.assertEqual(results['input_size'], (192, 256)) + self.assertEqual(results['img'].shape, (256, 192, 3)) + self.assertIn('transformed_keypoints', results) + + def test_repr(self): + transform = TopdownAffine(input_size=(192, 256), use_udp=False) + self.assertEqual( + repr(transform), + 'TopdownAffine(input_size=(192, 256), use_udp=False)') diff --git a/tests/test_engine/test_hooks/test_visualization_hook.py b/tests/test_engine/test_hooks/test_visualization_hook.py index 3e4a202198..937443e357 100644 --- a/tests/test_engine/test_hooks/test_visualization_hook.py +++ b/tests/test_engine/test_hooks/test_visualization_hook.py @@ -1,73 +1,73 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os.path as osp -import shutil -import time -from unittest import TestCase -from unittest.mock import MagicMock - -import numpy as np -from mmengine.structures import InstanceData - -from mmpose.engine.hooks import PoseVisualizationHook -from mmpose.structures import PoseDataSample -from mmpose.visualization import PoseLocalVisualizer - - -def _rand_poses(num_boxes, h, w): - center = np.random.rand(num_boxes, 2) - offset = np.random.rand(num_boxes, 5, 2) / 2.0 - - pose = center[:, None, :] + offset.clip(0, 1) - pose[:, :, 0] *= w - pose[:, :, 1] *= h - - return pose - - -class TestVisualizationHook(TestCase): - - def setUp(self) -> None: - PoseLocalVisualizer.get_instance('test_visualization_hook') - - data_sample = PoseDataSample() - data_sample.set_metainfo({ - 'img_path': - osp.join( - osp.dirname(__file__), '../../data/coco/000000000785.jpg') - }) - self.data_batch = {'data_samples': [data_sample] * 2} - - pred_instances = InstanceData() - pred_instances.keypoints = _rand_poses(5, 10, 12) - pred_instances.score = np.random.rand(5, 5) - pred_det_data_sample = data_sample.clone() - pred_det_data_sample.pred_instances = pred_instances - self.outputs = [pred_det_data_sample] * 2 - - def test_after_val_iter(self): - runner = MagicMock() - runner.iter = 1 - runner.val_evaluator.dataset_meta = dict() - hook = PoseVisualizationHook(interval=1, enable=True) - hook.after_val_iter(runner, 1, self.data_batch, self.outputs) - - def test_after_test_iter(self): - runner = MagicMock() - runner.iter = 1 - hook = PoseVisualizationHook(enable=True) - hook.after_test_iter(runner, 1, self.data_batch, self.outputs) - self.assertEqual(hook._test_index, 2) - - # test - timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) - out_dir = timestamp + '1' - runner.work_dir = timestamp - runner.timestamp = '1' - hook = PoseVisualizationHook(enable=False, out_dir=out_dir) - hook.after_test_iter(runner, 1, self.data_batch, self.outputs) - self.assertTrue(not osp.exists(f'{timestamp}/1/{out_dir}')) - - hook = PoseVisualizationHook(enable=True, out_dir=out_dir) - hook.after_test_iter(runner, 1, self.data_batch, self.outputs) - self.assertTrue(osp.exists(f'{timestamp}/1/{out_dir}')) - shutil.rmtree(f'{timestamp}') +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +import shutil +import time +from unittest import TestCase +from unittest.mock import MagicMock + +import numpy as np +from mmengine.structures import InstanceData + +from mmpose.engine.hooks import PoseVisualizationHook +from mmpose.structures import PoseDataSample +from mmpose.visualization import PoseLocalVisualizer + + +def _rand_poses(num_boxes, h, w): + center = np.random.rand(num_boxes, 2) + offset = np.random.rand(num_boxes, 5, 2) / 2.0 + + pose = center[:, None, :] + offset.clip(0, 1) + pose[:, :, 0] *= w + pose[:, :, 1] *= h + + return pose + + +class TestVisualizationHook(TestCase): + + def setUp(self) -> None: + PoseLocalVisualizer.get_instance('test_visualization_hook') + + data_sample = PoseDataSample() + data_sample.set_metainfo({ + 'img_path': + osp.join( + osp.dirname(__file__), '../../data/coco/000000000785.jpg') + }) + self.data_batch = {'data_samples': [data_sample] * 2} + + pred_instances = InstanceData() + pred_instances.keypoints = _rand_poses(5, 10, 12) + pred_instances.score = np.random.rand(5, 5) + pred_det_data_sample = data_sample.clone() + pred_det_data_sample.pred_instances = pred_instances + self.outputs = [pred_det_data_sample] * 2 + + def test_after_val_iter(self): + runner = MagicMock() + runner.iter = 1 + runner.val_evaluator.dataset_meta = dict() + hook = PoseVisualizationHook(interval=1, enable=True) + hook.after_val_iter(runner, 1, self.data_batch, self.outputs) + + def test_after_test_iter(self): + runner = MagicMock() + runner.iter = 1 + hook = PoseVisualizationHook(enable=True) + hook.after_test_iter(runner, 1, self.data_batch, self.outputs) + self.assertEqual(hook._test_index, 2) + + # test + timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) + out_dir = timestamp + '1' + runner.work_dir = timestamp + runner.timestamp = '1' + hook = PoseVisualizationHook(enable=False, out_dir=out_dir) + hook.after_test_iter(runner, 1, self.data_batch, self.outputs) + self.assertTrue(not osp.exists(f'{timestamp}/1/{out_dir}')) + + hook = PoseVisualizationHook(enable=True, out_dir=out_dir) + hook.after_test_iter(runner, 1, self.data_batch, self.outputs) + self.assertTrue(osp.exists(f'{timestamp}/1/{out_dir}')) + shutil.rmtree(f'{timestamp}') diff --git a/tests/test_evaluation/test_functional/test_keypoint_eval.py b/tests/test_evaluation/test_functional/test_keypoint_eval.py index 47ede83921..e66cdf73fe 100644 --- a/tests/test_evaluation/test_functional/test_keypoint_eval.py +++ b/tests/test_evaluation/test_functional/test_keypoint_eval.py @@ -1,212 +1,212 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import numpy as np -from numpy.testing import assert_array_almost_equal - -from mmpose.evaluation.functional import (keypoint_auc, keypoint_epe, - keypoint_mpjpe, keypoint_nme, - keypoint_pck_accuracy, - multilabel_classification_accuracy, - pose_pck_accuracy) - - -class TestKeypointEval(TestCase): - - def test_keypoint_pck_accuracy(self): - - output = np.zeros((2, 5, 2)) - target = np.zeros((2, 5, 2)) - mask = np.array([[True, True, False, True, True], - [True, True, False, True, True]]) - - # first channel - output[0, 0] = [10, 0] - target[0, 0] = [10, 0] - # second channel - output[0, 1] = [20, 20] - target[0, 1] = [10, 10] - # third channel - output[0, 2] = [0, 0] - target[0, 2] = [-1, 0] - # fourth channel - output[0, 3] = [30, 30] - target[0, 3] = [30, 30] - # fifth channel - output[0, 4] = [0, 10] - target[0, 4] = [0, 10] - - thr = np.full((2, 2), 10, dtype=np.float32) - - acc, avg_acc, cnt = keypoint_pck_accuracy(output, target, mask, 0.5, - thr) - - assert_array_almost_equal(acc, np.array([1, 0.5, -1, 1, 1]), decimal=4) - self.assertAlmostEqual(avg_acc, 0.875, delta=1e-4) - self.assertAlmostEqual(cnt, 4, delta=1e-4) - - acc, avg_acc, cnt = keypoint_pck_accuracy(output, target, mask, 0.5, - np.zeros((2, 2))) - assert_array_almost_equal( - acc, np.array([-1, -1, -1, -1, -1]), decimal=4) - self.assertAlmostEqual(avg_acc, 0, delta=1e-4) - self.assertAlmostEqual(cnt, 0, delta=1e-4) - - acc, avg_acc, cnt = keypoint_pck_accuracy(output, target, mask, 0.5, - np.array([[0, 0], [10, 10]])) - assert_array_almost_equal(acc, np.array([1, 1, -1, 1, 1]), decimal=4) - self.assertAlmostEqual(avg_acc, 1, delta=1e-4) - self.assertAlmostEqual(cnt, 4, delta=1e-4) - - def test_keypoint_auc(self): - output = np.zeros((1, 5, 2)) - target = np.zeros((1, 5, 2)) - mask = np.array([[True, True, False, True, True]]) - # first channel - output[0, 0] = [10, 4] - target[0, 0] = [10, 0] - # second channel - output[0, 1] = [10, 18] - target[0, 1] = [10, 10] - # third channel - output[0, 2] = [0, 0] - target[0, 2] = [0, -1] - # fourth channel - output[0, 3] = [40, 40] - target[0, 3] = [30, 30] - # fifth channel - output[0, 4] = [20, 10] - target[0, 4] = [0, 10] - - auc = keypoint_auc(output, target, mask, 20, 4) - self.assertAlmostEqual(auc, 0.375, delta=1e-4) - - def test_keypoint_epe(self): - output = np.zeros((1, 5, 2)) - target = np.zeros((1, 5, 2)) - mask = np.array([[True, True, False, True, True]]) - # first channel - output[0, 0] = [10, 4] - target[0, 0] = [10, 0] - # second channel - output[0, 1] = [10, 18] - target[0, 1] = [10, 10] - # third channel - output[0, 2] = [0, 0] - target[0, 2] = [-1, -1] - # fourth channel - output[0, 3] = [40, 40] - target[0, 3] = [30, 30] - # fifth channel - output[0, 4] = [20, 10] - target[0, 4] = [0, 10] - - epe = keypoint_epe(output, target, mask) - self.assertAlmostEqual(epe, 11.5355339, delta=1e-4) - - def test_keypoint_nme(self): - output = np.zeros((1, 5, 2)) - target = np.zeros((1, 5, 2)) - mask = np.array([[True, True, False, True, True]]) - # first channel - output[0, 0] = [10, 4] - target[0, 0] = [10, 0] - # second channel - output[0, 1] = [10, 18] - target[0, 1] = [10, 10] - # third channel - output[0, 2] = [0, 0] - target[0, 2] = [-1, -1] - # fourth channel - output[0, 3] = [40, 40] - target[0, 3] = [30, 30] - # fifth channel - output[0, 4] = [20, 10] - target[0, 4] = [0, 10] - - normalize_factor = np.ones((output.shape[0], output.shape[2])) - - nme = keypoint_nme(output, target, mask, normalize_factor) - self.assertAlmostEqual(nme, 11.5355339, delta=1e-4) - - def test_pose_pck_accuracy(self): - output = np.zeros((1, 5, 64, 64), dtype=np.float32) - target = np.zeros((1, 5, 64, 64), dtype=np.float32) - mask = np.array([[True, True, False, False, False]]) - # first channel - output[0, 0, 20, 20] = 1 - target[0, 0, 10, 10] = 1 - # second channel - output[0, 1, 30, 30] = 1 - target[0, 1, 30, 30] = 1 - - acc, avg_acc, cnt = pose_pck_accuracy(output, target, mask) - - assert_array_almost_equal(acc, np.array([0, 1, -1, -1, -1]), decimal=4) - self.assertAlmostEqual(avg_acc, 0.5, delta=1e-4) - self.assertAlmostEqual(cnt, 2, delta=1e-4) - - def test_multilabel_classification_accuracy(self): - output = np.array([[0.7, 0.8, 0.4], [0.8, 0.1, 0.1]]) - target = np.array([[1, 0, 0], [1, 0, 1]]) - mask = np.array([[True, True, True], [True, True, True]]) - thr = 0.5 - acc = multilabel_classification_accuracy(output, target, mask, thr) - self.assertEqual(acc, 0) - - output = np.array([[0.7, 0.2, 0.4], [0.8, 0.1, 0.9]]) - thr = 0.5 - acc = multilabel_classification_accuracy(output, target, mask, thr) - self.assertEqual(acc, 1) - - thr = 0.3 - acc = multilabel_classification_accuracy(output, target, mask, thr) - self.assertEqual(acc, 0.5) - - mask = np.array([[True, True, False], [True, True, True]]) - acc = multilabel_classification_accuracy(output, target, mask, thr) - self.assertEqual(acc, 1) - - def test_keypoint_mpjpe(self): - output = np.zeros((2, 5, 3)) - target = np.zeros((2, 5, 3)) - mask = np.array([[True, True, False, True, True], - [True, True, False, True, True]]) - - # first channel - output[0, 0] = [1, 0, 0] - target[0, 0] = [1, 0, 0] - output[1, 0] = [1, 0, 0] - target[1, 0] = [1, 1, 0] - # second channel - output[0, 1] = [2, 2, 0] - target[0, 1] = [1, 1, 1] - output[1, 1] = [2, 2, 1] - target[1, 1] = [1, 0, 1] - # third channel - output[0, 2] = [0, 0, -1] - target[0, 2] = [-1, 0, 0] - output[1, 2] = [-1, 0, 0] - target[1, 2] = [-1, 0, 0] - # fourth channel - output[0, 3] = [3, 3, 1] - target[0, 3] = [3, 3, 1] - output[1, 3] = [0, 0, 3] - target[1, 3] = [0, 0, 3] - # fifth channel - output[0, 4] = [0, 1, 1] - target[0, 4] = [0, 1, 0] - output[1, 4] = [0, 0, 1] - target[1, 4] = [1, 1, 0] - - mpjpe = keypoint_mpjpe(output, target, mask) - self.assertAlmostEqual(mpjpe, 0.9625211990796929, delta=1e-4) - - p_mpjpe = keypoint_mpjpe(output, target, mask, 'procrustes') - self.assertAlmostEqual(p_mpjpe, 1.0047897634604497, delta=1e-4) - - s_mpjpe = keypoint_mpjpe(output, target, mask, 'scale') - self.assertAlmostEqual(s_mpjpe, 1.0277129678465953, delta=1e-4) - - with self.assertRaises(ValueError): - _ = keypoint_mpjpe(output, target, mask, 'alignment') +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np +from numpy.testing import assert_array_almost_equal + +from mmpose.evaluation.functional import (keypoint_auc, keypoint_epe, + keypoint_mpjpe, keypoint_nme, + keypoint_pck_accuracy, + multilabel_classification_accuracy, + pose_pck_accuracy) + + +class TestKeypointEval(TestCase): + + def test_keypoint_pck_accuracy(self): + + output = np.zeros((2, 5, 2)) + target = np.zeros((2, 5, 2)) + mask = np.array([[True, True, False, True, True], + [True, True, False, True, True]]) + + # first channel + output[0, 0] = [10, 0] + target[0, 0] = [10, 0] + # second channel + output[0, 1] = [20, 20] + target[0, 1] = [10, 10] + # third channel + output[0, 2] = [0, 0] + target[0, 2] = [-1, 0] + # fourth channel + output[0, 3] = [30, 30] + target[0, 3] = [30, 30] + # fifth channel + output[0, 4] = [0, 10] + target[0, 4] = [0, 10] + + thr = np.full((2, 2), 10, dtype=np.float32) + + acc, avg_acc, cnt = keypoint_pck_accuracy(output, target, mask, 0.5, + thr) + + assert_array_almost_equal(acc, np.array([1, 0.5, -1, 1, 1]), decimal=4) + self.assertAlmostEqual(avg_acc, 0.875, delta=1e-4) + self.assertAlmostEqual(cnt, 4, delta=1e-4) + + acc, avg_acc, cnt = keypoint_pck_accuracy(output, target, mask, 0.5, + np.zeros((2, 2))) + assert_array_almost_equal( + acc, np.array([-1, -1, -1, -1, -1]), decimal=4) + self.assertAlmostEqual(avg_acc, 0, delta=1e-4) + self.assertAlmostEqual(cnt, 0, delta=1e-4) + + acc, avg_acc, cnt = keypoint_pck_accuracy(output, target, mask, 0.5, + np.array([[0, 0], [10, 10]])) + assert_array_almost_equal(acc, np.array([1, 1, -1, 1, 1]), decimal=4) + self.assertAlmostEqual(avg_acc, 1, delta=1e-4) + self.assertAlmostEqual(cnt, 4, delta=1e-4) + + def test_keypoint_auc(self): + output = np.zeros((1, 5, 2)) + target = np.zeros((1, 5, 2)) + mask = np.array([[True, True, False, True, True]]) + # first channel + output[0, 0] = [10, 4] + target[0, 0] = [10, 0] + # second channel + output[0, 1] = [10, 18] + target[0, 1] = [10, 10] + # third channel + output[0, 2] = [0, 0] + target[0, 2] = [0, -1] + # fourth channel + output[0, 3] = [40, 40] + target[0, 3] = [30, 30] + # fifth channel + output[0, 4] = [20, 10] + target[0, 4] = [0, 10] + + auc = keypoint_auc(output, target, mask, 20, 4) + self.assertAlmostEqual(auc, 0.375, delta=1e-4) + + def test_keypoint_epe(self): + output = np.zeros((1, 5, 2)) + target = np.zeros((1, 5, 2)) + mask = np.array([[True, True, False, True, True]]) + # first channel + output[0, 0] = [10, 4] + target[0, 0] = [10, 0] + # second channel + output[0, 1] = [10, 18] + target[0, 1] = [10, 10] + # third channel + output[0, 2] = [0, 0] + target[0, 2] = [-1, -1] + # fourth channel + output[0, 3] = [40, 40] + target[0, 3] = [30, 30] + # fifth channel + output[0, 4] = [20, 10] + target[0, 4] = [0, 10] + + epe = keypoint_epe(output, target, mask) + self.assertAlmostEqual(epe, 11.5355339, delta=1e-4) + + def test_keypoint_nme(self): + output = np.zeros((1, 5, 2)) + target = np.zeros((1, 5, 2)) + mask = np.array([[True, True, False, True, True]]) + # first channel + output[0, 0] = [10, 4] + target[0, 0] = [10, 0] + # second channel + output[0, 1] = [10, 18] + target[0, 1] = [10, 10] + # third channel + output[0, 2] = [0, 0] + target[0, 2] = [-1, -1] + # fourth channel + output[0, 3] = [40, 40] + target[0, 3] = [30, 30] + # fifth channel + output[0, 4] = [20, 10] + target[0, 4] = [0, 10] + + normalize_factor = np.ones((output.shape[0], output.shape[2])) + + nme = keypoint_nme(output, target, mask, normalize_factor) + self.assertAlmostEqual(nme, 11.5355339, delta=1e-4) + + def test_pose_pck_accuracy(self): + output = np.zeros((1, 5, 64, 64), dtype=np.float32) + target = np.zeros((1, 5, 64, 64), dtype=np.float32) + mask = np.array([[True, True, False, False, False]]) + # first channel + output[0, 0, 20, 20] = 1 + target[0, 0, 10, 10] = 1 + # second channel + output[0, 1, 30, 30] = 1 + target[0, 1, 30, 30] = 1 + + acc, avg_acc, cnt = pose_pck_accuracy(output, target, mask) + + assert_array_almost_equal(acc, np.array([0, 1, -1, -1, -1]), decimal=4) + self.assertAlmostEqual(avg_acc, 0.5, delta=1e-4) + self.assertAlmostEqual(cnt, 2, delta=1e-4) + + def test_multilabel_classification_accuracy(self): + output = np.array([[0.7, 0.8, 0.4], [0.8, 0.1, 0.1]]) + target = np.array([[1, 0, 0], [1, 0, 1]]) + mask = np.array([[True, True, True], [True, True, True]]) + thr = 0.5 + acc = multilabel_classification_accuracy(output, target, mask, thr) + self.assertEqual(acc, 0) + + output = np.array([[0.7, 0.2, 0.4], [0.8, 0.1, 0.9]]) + thr = 0.5 + acc = multilabel_classification_accuracy(output, target, mask, thr) + self.assertEqual(acc, 1) + + thr = 0.3 + acc = multilabel_classification_accuracy(output, target, mask, thr) + self.assertEqual(acc, 0.5) + + mask = np.array([[True, True, False], [True, True, True]]) + acc = multilabel_classification_accuracy(output, target, mask, thr) + self.assertEqual(acc, 1) + + def test_keypoint_mpjpe(self): + output = np.zeros((2, 5, 3)) + target = np.zeros((2, 5, 3)) + mask = np.array([[True, True, False, True, True], + [True, True, False, True, True]]) + + # first channel + output[0, 0] = [1, 0, 0] + target[0, 0] = [1, 0, 0] + output[1, 0] = [1, 0, 0] + target[1, 0] = [1, 1, 0] + # second channel + output[0, 1] = [2, 2, 0] + target[0, 1] = [1, 1, 1] + output[1, 1] = [2, 2, 1] + target[1, 1] = [1, 0, 1] + # third channel + output[0, 2] = [0, 0, -1] + target[0, 2] = [-1, 0, 0] + output[1, 2] = [-1, 0, 0] + target[1, 2] = [-1, 0, 0] + # fourth channel + output[0, 3] = [3, 3, 1] + target[0, 3] = [3, 3, 1] + output[1, 3] = [0, 0, 3] + target[1, 3] = [0, 0, 3] + # fifth channel + output[0, 4] = [0, 1, 1] + target[0, 4] = [0, 1, 0] + output[1, 4] = [0, 0, 1] + target[1, 4] = [1, 1, 0] + + mpjpe = keypoint_mpjpe(output, target, mask) + self.assertAlmostEqual(mpjpe, 0.9625211990796929, delta=1e-4) + + p_mpjpe = keypoint_mpjpe(output, target, mask, 'procrustes') + self.assertAlmostEqual(p_mpjpe, 1.0047897634604497, delta=1e-4) + + s_mpjpe = keypoint_mpjpe(output, target, mask, 'scale') + self.assertAlmostEqual(s_mpjpe, 1.0277129678465953, delta=1e-4) + + with self.assertRaises(ValueError): + _ = keypoint_mpjpe(output, target, mask, 'alignment') diff --git a/tests/test_evaluation/test_functional/test_nms.py b/tests/test_evaluation/test_functional/test_nms.py index b29ed86ccb..824cbd880c 100644 --- a/tests/test_evaluation/test_functional/test_nms.py +++ b/tests/test_evaluation/test_functional/test_nms.py @@ -1,40 +1,40 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import numpy as np - -from mmpose.evaluation.functional.nms import nearby_joints_nms - - -class TestNearbyJointsNMS(TestCase): - - def test_nearby_joints_nms(self): - - kpts_db = [] - keep_pose_inds = nearby_joints_nms( - kpts_db, 0.05, score_per_joint=True, max_dets=1) - self.assertEqual(len(keep_pose_inds), 0) - - kpts_db = [] - for _ in range(5): - kpts_db.append( - dict(keypoints=np.random.rand(3, 2), score=np.random.rand(3))) - keep_pose_inds = nearby_joints_nms( - kpts_db, 0.05, score_per_joint=True, max_dets=1) - self.assertEqual(len(keep_pose_inds), 1) - self.assertLess(keep_pose_inds[0], 5) - - kpts_db = [] - for _ in range(5): - kpts_db.append( - dict(keypoints=np.random.rand(3, 2), score=np.random.rand())) - keep_pose_inds = nearby_joints_nms( - kpts_db, 0.05, num_nearby_joints_thr=2) - self.assertLessEqual(len(keep_pose_inds), 5) - self.assertGreater(len(keep_pose_inds), 0) - - with self.assertRaises(AssertionError): - _ = nearby_joints_nms(kpts_db, 0, num_nearby_joints_thr=2) - - with self.assertRaises(AssertionError): - _ = nearby_joints_nms(kpts_db, 0.05, num_nearby_joints_thr=3) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np + +from mmpose.evaluation.functional.nms import nearby_joints_nms + + +class TestNearbyJointsNMS(TestCase): + + def test_nearby_joints_nms(self): + + kpts_db = [] + keep_pose_inds = nearby_joints_nms( + kpts_db, 0.05, score_per_joint=True, max_dets=1) + self.assertEqual(len(keep_pose_inds), 0) + + kpts_db = [] + for _ in range(5): + kpts_db.append( + dict(keypoints=np.random.rand(3, 2), score=np.random.rand(3))) + keep_pose_inds = nearby_joints_nms( + kpts_db, 0.05, score_per_joint=True, max_dets=1) + self.assertEqual(len(keep_pose_inds), 1) + self.assertLess(keep_pose_inds[0], 5) + + kpts_db = [] + for _ in range(5): + kpts_db.append( + dict(keypoints=np.random.rand(3, 2), score=np.random.rand())) + keep_pose_inds = nearby_joints_nms( + kpts_db, 0.05, num_nearby_joints_thr=2) + self.assertLessEqual(len(keep_pose_inds), 5) + self.assertGreater(len(keep_pose_inds), 0) + + with self.assertRaises(AssertionError): + _ = nearby_joints_nms(kpts_db, 0, num_nearby_joints_thr=2) + + with self.assertRaises(AssertionError): + _ = nearby_joints_nms(kpts_db, 0.05, num_nearby_joints_thr=3) diff --git a/tests/test_evaluation/test_metrics/test_coco_metric.py b/tests/test_evaluation/test_metrics/test_coco_metric.py index 82bf0bc572..34f0c46576 100644 --- a/tests/test_evaluation/test_metrics/test_coco_metric.py +++ b/tests/test_evaluation/test_metrics/test_coco_metric.py @@ -1,612 +1,612 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy -import os.path as osp -import tempfile -from collections import defaultdict -from unittest import TestCase - -import numpy as np -from mmengine.fileio import dump, load -from xtcocotools.coco import COCO - -from mmpose.datasets.datasets.utils import parse_pose_metainfo -from mmpose.evaluation.metrics import CocoMetric - - -class TestCocoMetric(TestCase): - - def setUp(self): - """Setup some variables which are used in every test method. - - TestCase calls functions in this order: setUp() -> testMethod() -> - tearDown() -> cleanUp() - """ - self.tmp_dir = tempfile.TemporaryDirectory() - - self.ann_file_coco = 'tests/data/coco/test_coco.json' - meta_info_coco = dict(from_file='configs/_base_/datasets/coco.py') - self.dataset_meta_coco = parse_pose_metainfo(meta_info_coco) - self.coco = COCO(self.ann_file_coco) - self.dataset_meta_coco['CLASSES'] = self.coco.loadCats( - self.coco.getCatIds()) - - self.topdown_data_coco = self._convert_ann_to_topdown_batch_data( - self.ann_file_coco) - assert len(self.topdown_data_coco) == 14 - self.bottomup_data_coco = self._convert_ann_to_bottomup_batch_data( - self.ann_file_coco) - assert len(self.bottomup_data_coco) == 4 - self.target_coco = { - 'coco/AP': 1.0, - 'coco/AP .5': 1.0, - 'coco/AP .75': 1.0, - 'coco/AP (M)': 1.0, - 'coco/AP (L)': 1.0, - 'coco/AR': 1.0, - 'coco/AR .5': 1.0, - 'coco/AR .75': 1.0, - 'coco/AR (M)': 1.0, - 'coco/AR (L)': 1.0, - } - - self.ann_file_crowdpose = 'tests/data/crowdpose/test_crowdpose.json' - self.coco_crowdpose = COCO(self.ann_file_crowdpose) - meta_info_crowdpose = dict( - from_file='configs/_base_/datasets/crowdpose.py') - self.dataset_meta_crowdpose = parse_pose_metainfo(meta_info_crowdpose) - self.dataset_meta_crowdpose['CLASSES'] = self.coco_crowdpose.loadCats( - self.coco_crowdpose.getCatIds()) - - self.topdown_data_crowdpose = self._convert_ann_to_topdown_batch_data( - self.ann_file_crowdpose) - assert len(self.topdown_data_crowdpose) == 5 - self.bottomup_data_crowdpose = \ - self._convert_ann_to_bottomup_batch_data(self.ann_file_crowdpose) - assert len(self.bottomup_data_crowdpose) == 2 - - self.target_crowdpose = { - 'crowdpose/AP': 1.0, - 'crowdpose/AP .5': 1.0, - 'crowdpose/AP .75': 1.0, - 'crowdpose/AR': 1.0, - 'crowdpose/AR .5': 1.0, - 'crowdpose/AR .75': 1.0, - 'crowdpose/AP(E)': -1.0, - 'crowdpose/AP(M)': 1.0, - 'crowdpose/AP(H)': -1.0, - } - - self.ann_file_ap10k = 'tests/data/ap10k/test_ap10k.json' - self.coco_ap10k = COCO(self.ann_file_ap10k) - meta_info_ap10k = dict(from_file='configs/_base_/datasets/ap10k.py') - self.dataset_meta_ap10k = parse_pose_metainfo(meta_info_ap10k) - self.dataset_meta_ap10k['CLASSES'] = self.coco_ap10k.loadCats( - self.coco_ap10k.getCatIds()) - - self.topdown_data_ap10k = self._convert_ann_to_topdown_batch_data( - self.ann_file_ap10k) - assert len(self.topdown_data_ap10k) == 2 - self.bottomup_data_ap10k = self._convert_ann_to_bottomup_batch_data( - self.ann_file_ap10k) - assert len(self.bottomup_data_ap10k) == 2 - - self.target_ap10k = { - 'coco/AP': 1.0, - 'coco/AP .5': 1.0, - 'coco/AP .75': 1.0, - 'coco/AP (M)': -1.0, - 'coco/AP (L)': 1.0, - 'coco/AR': 1.0, - 'coco/AR .5': 1.0, - 'coco/AR .75': 1.0, - 'coco/AR (M)': -1.0, - 'coco/AR (L)': 1.0, - } - - def _convert_ann_to_topdown_batch_data(self, ann_file): - """Convert annotations to topdown-style batch data.""" - topdown_data = [] - db = load(ann_file) - imgid2info = dict() - for img in db['images']: - imgid2info[img['id']] = img - for ann in db['annotations']: - w, h = ann['bbox'][2], ann['bbox'][3] - bboxes = np.array(ann['bbox'], dtype=np.float32).reshape(-1, 4) - bbox_scales = np.array([w * 1.25, h * 1.25]).reshape(-1, 2) - keypoints = np.array(ann['keypoints']).reshape((1, -1, 3)) - - gt_instances = { - 'bbox_scales': bbox_scales, - 'bbox_scores': np.ones((1, ), dtype=np.float32), - 'bboxes': bboxes, - } - pred_instances = { - 'keypoints': keypoints[..., :2], - 'keypoint_scores': keypoints[..., -1], - } - - data = {'inputs': None} - data_sample = { - 'id': ann['id'], - 'img_id': ann['image_id'], - 'category_id': ann.get('category_id', 1), - 'gt_instances': gt_instances, - 'pred_instances': pred_instances, - # dummy image_shape for testing - 'ori_shape': [640, 480], - # store the raw annotation info to test without ann_file - 'raw_ann_info': copy.deepcopy(ann), - } - - # add crowd_index to data_sample if it is present in the image_info - if 'crowdIndex' in imgid2info[ann['image_id']]: - data_sample['crowd_index'] = imgid2info[ - ann['image_id']]['crowdIndex'] - # batch size = 1 - data_batch = [data] - data_samples = [data_sample] - topdown_data.append((data_batch, data_samples)) - - return topdown_data - - def _convert_ann_to_bottomup_batch_data(self, ann_file): - """Convert annotations to bottomup-style batch data.""" - img2ann = defaultdict(list) - db = load(ann_file) - for ann in db['annotations']: - img2ann[ann['image_id']].append(ann) - - bottomup_data = [] - for img_id, anns in img2ann.items(): - keypoints = np.array([ann['keypoints'] for ann in anns]).reshape( - (len(anns), -1, 3)) - - gt_instances = { - 'bbox_scores': np.ones((len(anns)), dtype=np.float32) - } - - pred_instances = { - 'keypoints': keypoints[..., :2], - 'keypoint_scores': keypoints[..., -1], - } - - data = {'inputs': None} - data_sample = { - 'id': [ann['id'] for ann in anns], - 'img_id': img_id, - 'gt_instances': gt_instances, - 'pred_instances': pred_instances - } - - # batch size = 1 - data_batch = [data] - data_samples = [data_sample] - bottomup_data.append((data_batch, data_samples)) - return bottomup_data - - def tearDown(self): - self.tmp_dir.cleanup() - - def test_init(self): - """test metric init method.""" - # test score_mode option - with self.assertRaisesRegex(ValueError, - '`score_mode` should be one of'): - _ = CocoMetric(ann_file=self.ann_file_coco, score_mode='invalid') - - # test nms_mode option - with self.assertRaisesRegex(ValueError, '`nms_mode` should be one of'): - _ = CocoMetric(ann_file=self.ann_file_coco, nms_mode='invalid') - - # test format_only option - with self.assertRaisesRegex( - AssertionError, - '`outfile_prefix` can not be None when `format_only` is True'): - _ = CocoMetric( - ann_file=self.ann_file_coco, - format_only=True, - outfile_prefix=None) - - def test_other_methods(self): - """test other useful methods.""" - # test `_sort_and_unique_bboxes` method - metric_coco = CocoMetric( - ann_file=self.ann_file_coco, score_mode='bbox', nms_mode='none') - metric_coco.dataset_meta = self.dataset_meta_coco - # process samples - for data_batch, data_samples in self.topdown_data_coco: - metric_coco.process(data_batch, data_samples) - # process one extra sample - data_batch, data_samples = self.topdown_data_coco[0] - metric_coco.process(data_batch, data_samples) - # an extra sample - eval_results = metric_coco.evaluate( - size=len(self.topdown_data_coco) + 1) - self.assertDictEqual(eval_results, self.target_coco) - - def test_format_only(self): - """test `format_only` option.""" - metric_coco = CocoMetric( - ann_file=self.ann_file_coco, - format_only=True, - outfile_prefix=f'{self.tmp_dir.name}/test', - score_mode='bbox_keypoint', - nms_mode='oks_nms') - metric_coco.dataset_meta = self.dataset_meta_coco - # process one sample - data_batch, data_samples = self.topdown_data_coco[0] - metric_coco.process(data_batch, data_samples) - eval_results = metric_coco.evaluate(size=1) - self.assertDictEqual(eval_results, {}) - self.assertTrue( - osp.isfile(osp.join(self.tmp_dir.name, 'test.keypoints.json'))) - - # test when gt annotations are absent - db_ = load(self.ann_file_coco) - del db_['annotations'] - tmp_ann_file = osp.join(self.tmp_dir.name, 'temp_ann.json') - dump(db_, tmp_ann_file, sort_keys=True, indent=4) - with self.assertRaisesRegex( - AssertionError, - 'Ground truth annotations are required for evaluation'): - _ = CocoMetric(ann_file=tmp_ann_file, format_only=False) - - def test_bottomup_evaluate(self): - """test bottomup-style COCO metric evaluation.""" - # case1: score_mode='bbox', nms_mode='none' - metric_coco = CocoMetric( - ann_file=self.ann_file_coco, - outfile_prefix=f'{self.tmp_dir.name}/test', - score_mode='bbox', - nms_mode='none') - metric_coco.dataset_meta = self.dataset_meta_coco - - # process samples - for data_batch, data_samples in self.bottomup_data_coco: - metric_coco.process(data_batch, data_samples) - - eval_results = metric_coco.evaluate(size=len(self.bottomup_data_coco)) - self.assertDictEqual(eval_results, self.target_coco) - self.assertTrue( - osp.isfile(osp.join(self.tmp_dir.name, 'test.keypoints.json'))) - - def test_topdown_alignment(self): - """Test whether the output of CocoMetric and the original - TopDownCocoDataset are the same.""" - topdown_data = [] - db = load(self.ann_file_coco) - - for ann in db['annotations']: - w, h = ann['bbox'][2], ann['bbox'][3] - bboxes = np.array(ann['bbox'], dtype=np.float32).reshape(-1, 4) - bbox_scales = np.array([w * 1.25, h * 1.25]).reshape(-1, 2) - - keypoints = np.array( - ann['keypoints'], dtype=np.float32).reshape(1, -1, 3) - keypoints[..., 0] = keypoints[..., 0] * 0.98 - keypoints[..., 1] = keypoints[..., 1] * 1.02 - keypoints[..., 2] = keypoints[..., 2] * 0.8 - - gt_instances = { - 'bbox_scales': bbox_scales, - 'bbox_scores': np.ones((1, ), dtype=np.float32) * 0.98, - 'bboxes': bboxes, - } - pred_instances = { - 'keypoints': keypoints[..., :2], - 'keypoint_scores': keypoints[..., -1], - } - - data = {'inputs': None} - data_sample = { - 'id': ann['id'], - 'img_id': ann['image_id'], - 'gt_instances': gt_instances, - 'pred_instances': pred_instances - } - # batch size = 1 - data_batch = [data] - data_samples = [data_sample] - topdown_data.append((data_batch, data_samples)) - - # case 1: - # typical setting: score_mode='bbox_keypoint', nms_mode='oks_nms' - metric_coco = CocoMetric( - ann_file=self.ann_file_coco, - outfile_prefix=f'{self.tmp_dir.name}/test_align1', - score_mode='bbox_keypoint', - nms_mode='oks_nms') - metric_coco.dataset_meta = self.dataset_meta_coco - - # process samples - for data_batch, data_samples in topdown_data: - metric_coco.process(data_batch, data_samples) - - eval_results = metric_coco.evaluate(size=len(topdown_data)) - - target = { - 'coco/AP': 0.5287458745874587, - 'coco/AP .5': 0.9042904290429042, - 'coco/AP .75': 0.5009900990099009, - 'coco/AP (M)': 0.42475247524752474, - 'coco/AP (L)': 0.6219554455445544, - 'coco/AR': 0.5833333333333333, - 'coco/AR .5': 0.9166666666666666, - 'coco/AR .75': 0.5833333333333334, - 'coco/AR (M)': 0.44000000000000006, - 'coco/AR (L)': 0.6857142857142857, - } - - for key in eval_results.keys(): - self.assertAlmostEqual(eval_results[key], target[key]) - - self.assertTrue( - osp.isfile( - osp.join(self.tmp_dir.name, 'test_align1.keypoints.json'))) - - # case 2: score_mode='bbox_rle', nms_mode='oks_nms' - metric_coco = CocoMetric( - ann_file=self.ann_file_coco, - outfile_prefix=f'{self.tmp_dir.name}/test_align2', - score_mode='bbox_rle', - nms_mode='oks_nms') - metric_coco.dataset_meta = self.dataset_meta_coco - - # process samples - for data_batch, data_samples in topdown_data: - metric_coco.process(data_batch, data_samples) - - eval_results = metric_coco.evaluate(size=len(topdown_data)) - - target = { - 'coco/AP': 0.5004950495049505, - 'coco/AP .5': 0.8836633663366337, - 'coco/AP .75': 0.4679867986798679, - 'coco/AP (M)': 0.42475247524752474, - 'coco/AP (L)': 0.5814108910891089, - 'coco/AR': 0.5833333333333333, - 'coco/AR .5': 0.9166666666666666, - 'coco/AR .75': 0.5833333333333334, - 'coco/AR (M)': 0.44000000000000006, - 'coco/AR (L)': 0.6857142857142857, - } - - for key in eval_results.keys(): - self.assertAlmostEqual(eval_results[key], target[key]) - - self.assertTrue( - osp.isfile( - osp.join(self.tmp_dir.name, 'test_align2.keypoints.json'))) - - # case 3: score_mode='bbox_keypoint', nms_mode='soft_oks_nms' - topdown_data = [] - anns = db['annotations'] - for i, ann in enumerate(anns): - w, h = ann['bbox'][2], ann['bbox'][3] - bboxes = np.array(ann['bbox'], dtype=np.float32).reshape(-1, 4) - bbox_scales = np.array([w * 1.25, h * 1.25]).reshape(-1, 2) - - keypoints = np.array( - ann['keypoints'], dtype=np.float32).reshape(1, -1, 3) - keypoints[..., 0] = keypoints[..., 0] * (1 - i / 100) - keypoints[..., 1] = keypoints[..., 1] * (1 + i / 100) - keypoints[..., 2] = keypoints[..., 2] * (1 - i / 100) - - gt_instances0 = { - 'bbox_scales': bbox_scales, - 'bbox_scores': np.ones((1, ), dtype=np.float32), - 'bboxes': bboxes, - } - pred_instances0 = { - 'keypoints': keypoints[..., :2], - 'keypoint_scores': keypoints[..., -1], - } - - data0 = {'inputs': None} - data_sample0 = { - 'id': ann['id'], - 'img_id': ann['image_id'], - 'gt_instances': gt_instances0, - 'pred_instances': pred_instances0 - } - - keypoints = np.array( - ann['keypoints'], dtype=np.float32).reshape(1, -1, 3) - keypoints[..., 0] = keypoints[..., 0] * (1 + i / 100) - keypoints[..., 1] = keypoints[..., 1] * (1 - i / 100) - keypoints[..., 2] = keypoints[..., 2] * (1 - 2 * i / 100) - - gt_instances1 = { - 'bbox_scales': bbox_scales, - 'bboxes': bboxes, - 'bbox_scores': np.ones( - (1, ), dtype=np.float32) * (1 - 2 * i / 100) - } - pred_instances1 = { - 'keypoints': keypoints[..., :2], - 'keypoint_scores': keypoints[..., -1], - } - - data1 = {'inputs': None} - data_sample1 = { - 'id': ann['id'] + 1, - 'img_id': ann['image_id'], - 'gt_instances': gt_instances1, - 'pred_instances': pred_instances1 - } - - # batch size = 2 - data_batch = [data0, data1] - data_samples = [data_sample0, data_sample1] - topdown_data.append((data_batch, data_samples)) - - metric_coco = CocoMetric( - ann_file=self.ann_file_coco, - outfile_prefix=f'{self.tmp_dir.name}/test_align3', - score_mode='bbox_keypoint', - keypoint_score_thr=0.2, - nms_thr=0.9, - nms_mode='soft_oks_nms') - metric_coco.dataset_meta = self.dataset_meta_coco - - # process samples - for data_batch, data_samples in topdown_data: - metric_coco.process(data_batch, data_samples) - - eval_results = metric_coco.evaluate(size=len(topdown_data) * 2) - - target = { - 'coco/AP': 0.17073707370737073, - 'coco/AP .5': 0.25055005500550054, - 'coco/AP .75': 0.10671067106710669, - 'coco/AP (M)': 0.0, - 'coco/AP (L)': 0.29315181518151806, - 'coco/AR': 0.2416666666666666, - 'coco/AR .5': 0.3333333333333333, - 'coco/AR .75': 0.16666666666666666, - 'coco/AR (M)': 0.0, - 'coco/AR (L)': 0.41428571428571426, - } - - for key in eval_results.keys(): - self.assertAlmostEqual(eval_results[key], target[key]) - - self.assertTrue( - osp.isfile( - osp.join(self.tmp_dir.name, 'test_align3.keypoints.json'))) - - def test_topdown_evaluate(self): - """test topdown-style COCO metric evaluation.""" - # case 1: score_mode='bbox', nms_mode='none' - metric_coco = CocoMetric( - ann_file=self.ann_file_coco, - outfile_prefix=f'{self.tmp_dir.name}/test1', - score_mode='bbox', - nms_mode='none') - metric_coco.dataset_meta = self.dataset_meta_coco - - # process samples - for data_batch, data_samples in self.topdown_data_coco: - metric_coco.process(data_batch, data_samples) - - eval_results = metric_coco.evaluate(size=len(self.topdown_data_coco)) - - self.assertDictEqual(eval_results, self.target_coco) - self.assertTrue( - osp.isfile(osp.join(self.tmp_dir.name, 'test1.keypoints.json'))) - - # case 2: score_mode='bbox_keypoint', nms_mode='oks_nms' - metric_coco = CocoMetric( - ann_file=self.ann_file_coco, - outfile_prefix=f'{self.tmp_dir.name}/test2', - score_mode='bbox_keypoint', - nms_mode='oks_nms') - metric_coco.dataset_meta = self.dataset_meta_coco - - # process samples - for data_batch, data_samples in self.topdown_data_coco: - metric_coco.process(data_batch, data_samples) - - eval_results = metric_coco.evaluate(size=len(self.topdown_data_coco)) - - self.assertDictEqual(eval_results, self.target_coco) - self.assertTrue( - osp.isfile(osp.join(self.tmp_dir.name, 'test2.keypoints.json'))) - - # case 3: score_mode='bbox_rle', nms_mode='soft_oks_nms' - metric_coco = CocoMetric( - ann_file=self.ann_file_coco, - outfile_prefix=f'{self.tmp_dir.name}/test3', - score_mode='bbox_rle', - nms_mode='soft_oks_nms') - metric_coco.dataset_meta = self.dataset_meta_coco - - # process samples - for data_batch, data_samples in self.topdown_data_coco: - metric_coco.process(data_batch, data_samples) - - eval_results = metric_coco.evaluate(size=len(self.topdown_data_coco)) - - self.assertDictEqual(eval_results, self.target_coco) - self.assertTrue( - osp.isfile(osp.join(self.tmp_dir.name, 'test3.keypoints.json'))) - - # case 4: test without providing ann_file - metric_coco = CocoMetric(outfile_prefix=f'{self.tmp_dir.name}/test4') - metric_coco.dataset_meta = self.dataset_meta_coco - # process samples - for data_batch, data_samples in self.topdown_data_coco: - metric_coco.process(data_batch, data_samples) - eval_results = metric_coco.evaluate(size=len(self.topdown_data_coco)) - self.assertDictEqual(eval_results, self.target_coco) - # test whether convert the annotation to COCO format - self.assertTrue( - osp.isfile(osp.join(self.tmp_dir.name, 'test4.gt.json'))) - self.assertTrue( - osp.isfile(osp.join(self.tmp_dir.name, 'test4.keypoints.json'))) - - # case 5: test Crowdpose dataset - metric_crowdpose = CocoMetric( - ann_file=self.ann_file_crowdpose, - outfile_prefix=f'{self.tmp_dir.name}/test5', - use_area=False, - iou_type='keypoints_crowd', - prefix='crowdpose') - metric_crowdpose.dataset_meta = self.dataset_meta_crowdpose - # process samples - for data_batch, data_samples in self.topdown_data_crowdpose: - metric_crowdpose.process(data_batch, data_samples) - eval_results = metric_crowdpose.evaluate( - size=len(self.topdown_data_crowdpose)) - self.assertDictEqual(eval_results, self.target_crowdpose) - self.assertTrue( - osp.isfile(osp.join(self.tmp_dir.name, 'test5.keypoints.json'))) - - # case 6: test Crowdpose dataset + without ann_file - metric_crowdpose = CocoMetric( - outfile_prefix=f'{self.tmp_dir.name}/test6', - use_area=False, - iou_type='keypoints_crowd', - prefix='crowdpose') - metric_crowdpose.dataset_meta = self.dataset_meta_crowdpose - # process samples - for data_batch, data_samples in self.topdown_data_crowdpose: - metric_crowdpose.process(data_batch, data_samples) - eval_results = metric_crowdpose.evaluate( - size=len(self.topdown_data_crowdpose)) - self.assertDictEqual(eval_results, self.target_crowdpose) - # test whether convert the annotation to COCO format - self.assertTrue( - osp.isfile(osp.join(self.tmp_dir.name, 'test6.gt.json'))) - self.assertTrue( - osp.isfile(osp.join(self.tmp_dir.name, 'test6.keypoints.json'))) - - # case 7: test AP10k dataset - metric_ap10k = CocoMetric( - ann_file=self.ann_file_ap10k, - outfile_prefix=f'{self.tmp_dir.name}/test7') - metric_ap10k.dataset_meta = self.dataset_meta_ap10k - # process samples - for data_batch, data_samples in self.topdown_data_ap10k: - metric_ap10k.process(data_batch, data_samples) - eval_results = metric_ap10k.evaluate(size=len(self.topdown_data_ap10k)) - for key in self.target_ap10k: - self.assertAlmostEqual(eval_results[key], self.target_ap10k[key]) - self.assertTrue( - osp.isfile(osp.join(self.tmp_dir.name, 'test7.keypoints.json'))) - - # case 8: test Crowdpose dataset + without ann_file - metric_ap10k = CocoMetric(outfile_prefix=f'{self.tmp_dir.name}/test8') - metric_ap10k.dataset_meta = self.dataset_meta_ap10k - # process samples - for data_batch, data_samples in self.topdown_data_ap10k: - metric_ap10k.process(data_batch, data_samples) - eval_results = metric_ap10k.evaluate(size=len(self.topdown_data_ap10k)) - for key in self.target_ap10k: - self.assertAlmostEqual(eval_results[key], self.target_ap10k[key]) - # test whether convert the annotation to COCO format - self.assertTrue( - osp.isfile(osp.join(self.tmp_dir.name, 'test8.gt.json'))) - self.assertTrue( - osp.isfile(osp.join(self.tmp_dir.name, 'test8.keypoints.json'))) +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import os.path as osp +import tempfile +from collections import defaultdict +from unittest import TestCase + +import numpy as np +from mmengine.fileio import dump, load +from xtcocotools.coco import COCO + +from mmpose.datasets.datasets.utils import parse_pose_metainfo +from mmpose.evaluation.metrics import CocoMetric + + +class TestCocoMetric(TestCase): + + def setUp(self): + """Setup some variables which are used in every test method. + + TestCase calls functions in this order: setUp() -> testMethod() -> + tearDown() -> cleanUp() + """ + self.tmp_dir = tempfile.TemporaryDirectory() + + self.ann_file_coco = 'tests/data/coco/test_coco.json' + meta_info_coco = dict(from_file='configs/_base_/datasets/coco.py') + self.dataset_meta_coco = parse_pose_metainfo(meta_info_coco) + self.coco = COCO(self.ann_file_coco) + self.dataset_meta_coco['CLASSES'] = self.coco.loadCats( + self.coco.getCatIds()) + + self.topdown_data_coco = self._convert_ann_to_topdown_batch_data( + self.ann_file_coco) + assert len(self.topdown_data_coco) == 14 + self.bottomup_data_coco = self._convert_ann_to_bottomup_batch_data( + self.ann_file_coco) + assert len(self.bottomup_data_coco) == 4 + self.target_coco = { + 'coco/AP': 1.0, + 'coco/AP .5': 1.0, + 'coco/AP .75': 1.0, + 'coco/AP (M)': 1.0, + 'coco/AP (L)': 1.0, + 'coco/AR': 1.0, + 'coco/AR .5': 1.0, + 'coco/AR .75': 1.0, + 'coco/AR (M)': 1.0, + 'coco/AR (L)': 1.0, + } + + self.ann_file_crowdpose = 'tests/data/crowdpose/test_crowdpose.json' + self.coco_crowdpose = COCO(self.ann_file_crowdpose) + meta_info_crowdpose = dict( + from_file='configs/_base_/datasets/crowdpose.py') + self.dataset_meta_crowdpose = parse_pose_metainfo(meta_info_crowdpose) + self.dataset_meta_crowdpose['CLASSES'] = self.coco_crowdpose.loadCats( + self.coco_crowdpose.getCatIds()) + + self.topdown_data_crowdpose = self._convert_ann_to_topdown_batch_data( + self.ann_file_crowdpose) + assert len(self.topdown_data_crowdpose) == 5 + self.bottomup_data_crowdpose = \ + self._convert_ann_to_bottomup_batch_data(self.ann_file_crowdpose) + assert len(self.bottomup_data_crowdpose) == 2 + + self.target_crowdpose = { + 'crowdpose/AP': 1.0, + 'crowdpose/AP .5': 1.0, + 'crowdpose/AP .75': 1.0, + 'crowdpose/AR': 1.0, + 'crowdpose/AR .5': 1.0, + 'crowdpose/AR .75': 1.0, + 'crowdpose/AP(E)': -1.0, + 'crowdpose/AP(M)': 1.0, + 'crowdpose/AP(H)': -1.0, + } + + self.ann_file_ap10k = 'tests/data/ap10k/test_ap10k.json' + self.coco_ap10k = COCO(self.ann_file_ap10k) + meta_info_ap10k = dict(from_file='configs/_base_/datasets/ap10k.py') + self.dataset_meta_ap10k = parse_pose_metainfo(meta_info_ap10k) + self.dataset_meta_ap10k['CLASSES'] = self.coco_ap10k.loadCats( + self.coco_ap10k.getCatIds()) + + self.topdown_data_ap10k = self._convert_ann_to_topdown_batch_data( + self.ann_file_ap10k) + assert len(self.topdown_data_ap10k) == 2 + self.bottomup_data_ap10k = self._convert_ann_to_bottomup_batch_data( + self.ann_file_ap10k) + assert len(self.bottomup_data_ap10k) == 2 + + self.target_ap10k = { + 'coco/AP': 1.0, + 'coco/AP .5': 1.0, + 'coco/AP .75': 1.0, + 'coco/AP (M)': -1.0, + 'coco/AP (L)': 1.0, + 'coco/AR': 1.0, + 'coco/AR .5': 1.0, + 'coco/AR .75': 1.0, + 'coco/AR (M)': -1.0, + 'coco/AR (L)': 1.0, + } + + def _convert_ann_to_topdown_batch_data(self, ann_file): + """Convert annotations to topdown-style batch data.""" + topdown_data = [] + db = load(ann_file) + imgid2info = dict() + for img in db['images']: + imgid2info[img['id']] = img + for ann in db['annotations']: + w, h = ann['bbox'][2], ann['bbox'][3] + bboxes = np.array(ann['bbox'], dtype=np.float32).reshape(-1, 4) + bbox_scales = np.array([w * 1.25, h * 1.25]).reshape(-1, 2) + keypoints = np.array(ann['keypoints']).reshape((1, -1, 3)) + + gt_instances = { + 'bbox_scales': bbox_scales, + 'bbox_scores': np.ones((1, ), dtype=np.float32), + 'bboxes': bboxes, + } + pred_instances = { + 'keypoints': keypoints[..., :2], + 'keypoint_scores': keypoints[..., -1], + } + + data = {'inputs': None} + data_sample = { + 'id': ann['id'], + 'img_id': ann['image_id'], + 'category_id': ann.get('category_id', 1), + 'gt_instances': gt_instances, + 'pred_instances': pred_instances, + # dummy image_shape for testing + 'ori_shape': [640, 480], + # store the raw annotation info to test without ann_file + 'raw_ann_info': copy.deepcopy(ann), + } + + # add crowd_index to data_sample if it is present in the image_info + if 'crowdIndex' in imgid2info[ann['image_id']]: + data_sample['crowd_index'] = imgid2info[ + ann['image_id']]['crowdIndex'] + # batch size = 1 + data_batch = [data] + data_samples = [data_sample] + topdown_data.append((data_batch, data_samples)) + + return topdown_data + + def _convert_ann_to_bottomup_batch_data(self, ann_file): + """Convert annotations to bottomup-style batch data.""" + img2ann = defaultdict(list) + db = load(ann_file) + for ann in db['annotations']: + img2ann[ann['image_id']].append(ann) + + bottomup_data = [] + for img_id, anns in img2ann.items(): + keypoints = np.array([ann['keypoints'] for ann in anns]).reshape( + (len(anns), -1, 3)) + + gt_instances = { + 'bbox_scores': np.ones((len(anns)), dtype=np.float32) + } + + pred_instances = { + 'keypoints': keypoints[..., :2], + 'keypoint_scores': keypoints[..., -1], + } + + data = {'inputs': None} + data_sample = { + 'id': [ann['id'] for ann in anns], + 'img_id': img_id, + 'gt_instances': gt_instances, + 'pred_instances': pred_instances + } + + # batch size = 1 + data_batch = [data] + data_samples = [data_sample] + bottomup_data.append((data_batch, data_samples)) + return bottomup_data + + def tearDown(self): + self.tmp_dir.cleanup() + + def test_init(self): + """test metric init method.""" + # test score_mode option + with self.assertRaisesRegex(ValueError, + '`score_mode` should be one of'): + _ = CocoMetric(ann_file=self.ann_file_coco, score_mode='invalid') + + # test nms_mode option + with self.assertRaisesRegex(ValueError, '`nms_mode` should be one of'): + _ = CocoMetric(ann_file=self.ann_file_coco, nms_mode='invalid') + + # test format_only option + with self.assertRaisesRegex( + AssertionError, + '`outfile_prefix` can not be None when `format_only` is True'): + _ = CocoMetric( + ann_file=self.ann_file_coco, + format_only=True, + outfile_prefix=None) + + def test_other_methods(self): + """test other useful methods.""" + # test `_sort_and_unique_bboxes` method + metric_coco = CocoMetric( + ann_file=self.ann_file_coco, score_mode='bbox', nms_mode='none') + metric_coco.dataset_meta = self.dataset_meta_coco + # process samples + for data_batch, data_samples in self.topdown_data_coco: + metric_coco.process(data_batch, data_samples) + # process one extra sample + data_batch, data_samples = self.topdown_data_coco[0] + metric_coco.process(data_batch, data_samples) + # an extra sample + eval_results = metric_coco.evaluate( + size=len(self.topdown_data_coco) + 1) + self.assertDictEqual(eval_results, self.target_coco) + + def test_format_only(self): + """test `format_only` option.""" + metric_coco = CocoMetric( + ann_file=self.ann_file_coco, + format_only=True, + outfile_prefix=f'{self.tmp_dir.name}/test', + score_mode='bbox_keypoint', + nms_mode='oks_nms') + metric_coco.dataset_meta = self.dataset_meta_coco + # process one sample + data_batch, data_samples = self.topdown_data_coco[0] + metric_coco.process(data_batch, data_samples) + eval_results = metric_coco.evaluate(size=1) + self.assertDictEqual(eval_results, {}) + self.assertTrue( + osp.isfile(osp.join(self.tmp_dir.name, 'test.keypoints.json'))) + + # test when gt annotations are absent + db_ = load(self.ann_file_coco) + del db_['annotations'] + tmp_ann_file = osp.join(self.tmp_dir.name, 'temp_ann.json') + dump(db_, tmp_ann_file, sort_keys=True, indent=4) + with self.assertRaisesRegex( + AssertionError, + 'Ground truth annotations are required for evaluation'): + _ = CocoMetric(ann_file=tmp_ann_file, format_only=False) + + def test_bottomup_evaluate(self): + """test bottomup-style COCO metric evaluation.""" + # case1: score_mode='bbox', nms_mode='none' + metric_coco = CocoMetric( + ann_file=self.ann_file_coco, + outfile_prefix=f'{self.tmp_dir.name}/test', + score_mode='bbox', + nms_mode='none') + metric_coco.dataset_meta = self.dataset_meta_coco + + # process samples + for data_batch, data_samples in self.bottomup_data_coco: + metric_coco.process(data_batch, data_samples) + + eval_results = metric_coco.evaluate(size=len(self.bottomup_data_coco)) + self.assertDictEqual(eval_results, self.target_coco) + self.assertTrue( + osp.isfile(osp.join(self.tmp_dir.name, 'test.keypoints.json'))) + + def test_topdown_alignment(self): + """Test whether the output of CocoMetric and the original + TopDownCocoDataset are the same.""" + topdown_data = [] + db = load(self.ann_file_coco) + + for ann in db['annotations']: + w, h = ann['bbox'][2], ann['bbox'][3] + bboxes = np.array(ann['bbox'], dtype=np.float32).reshape(-1, 4) + bbox_scales = np.array([w * 1.25, h * 1.25]).reshape(-1, 2) + + keypoints = np.array( + ann['keypoints'], dtype=np.float32).reshape(1, -1, 3) + keypoints[..., 0] = keypoints[..., 0] * 0.98 + keypoints[..., 1] = keypoints[..., 1] * 1.02 + keypoints[..., 2] = keypoints[..., 2] * 0.8 + + gt_instances = { + 'bbox_scales': bbox_scales, + 'bbox_scores': np.ones((1, ), dtype=np.float32) * 0.98, + 'bboxes': bboxes, + } + pred_instances = { + 'keypoints': keypoints[..., :2], + 'keypoint_scores': keypoints[..., -1], + } + + data = {'inputs': None} + data_sample = { + 'id': ann['id'], + 'img_id': ann['image_id'], + 'gt_instances': gt_instances, + 'pred_instances': pred_instances + } + # batch size = 1 + data_batch = [data] + data_samples = [data_sample] + topdown_data.append((data_batch, data_samples)) + + # case 1: + # typical setting: score_mode='bbox_keypoint', nms_mode='oks_nms' + metric_coco = CocoMetric( + ann_file=self.ann_file_coco, + outfile_prefix=f'{self.tmp_dir.name}/test_align1', + score_mode='bbox_keypoint', + nms_mode='oks_nms') + metric_coco.dataset_meta = self.dataset_meta_coco + + # process samples + for data_batch, data_samples in topdown_data: + metric_coco.process(data_batch, data_samples) + + eval_results = metric_coco.evaluate(size=len(topdown_data)) + + target = { + 'coco/AP': 0.5287458745874587, + 'coco/AP .5': 0.9042904290429042, + 'coco/AP .75': 0.5009900990099009, + 'coco/AP (M)': 0.42475247524752474, + 'coco/AP (L)': 0.6219554455445544, + 'coco/AR': 0.5833333333333333, + 'coco/AR .5': 0.9166666666666666, + 'coco/AR .75': 0.5833333333333334, + 'coco/AR (M)': 0.44000000000000006, + 'coco/AR (L)': 0.6857142857142857, + } + + for key in eval_results.keys(): + self.assertAlmostEqual(eval_results[key], target[key]) + + self.assertTrue( + osp.isfile( + osp.join(self.tmp_dir.name, 'test_align1.keypoints.json'))) + + # case 2: score_mode='bbox_rle', nms_mode='oks_nms' + metric_coco = CocoMetric( + ann_file=self.ann_file_coco, + outfile_prefix=f'{self.tmp_dir.name}/test_align2', + score_mode='bbox_rle', + nms_mode='oks_nms') + metric_coco.dataset_meta = self.dataset_meta_coco + + # process samples + for data_batch, data_samples in topdown_data: + metric_coco.process(data_batch, data_samples) + + eval_results = metric_coco.evaluate(size=len(topdown_data)) + + target = { + 'coco/AP': 0.5004950495049505, + 'coco/AP .5': 0.8836633663366337, + 'coco/AP .75': 0.4679867986798679, + 'coco/AP (M)': 0.42475247524752474, + 'coco/AP (L)': 0.5814108910891089, + 'coco/AR': 0.5833333333333333, + 'coco/AR .5': 0.9166666666666666, + 'coco/AR .75': 0.5833333333333334, + 'coco/AR (M)': 0.44000000000000006, + 'coco/AR (L)': 0.6857142857142857, + } + + for key in eval_results.keys(): + self.assertAlmostEqual(eval_results[key], target[key]) + + self.assertTrue( + osp.isfile( + osp.join(self.tmp_dir.name, 'test_align2.keypoints.json'))) + + # case 3: score_mode='bbox_keypoint', nms_mode='soft_oks_nms' + topdown_data = [] + anns = db['annotations'] + for i, ann in enumerate(anns): + w, h = ann['bbox'][2], ann['bbox'][3] + bboxes = np.array(ann['bbox'], dtype=np.float32).reshape(-1, 4) + bbox_scales = np.array([w * 1.25, h * 1.25]).reshape(-1, 2) + + keypoints = np.array( + ann['keypoints'], dtype=np.float32).reshape(1, -1, 3) + keypoints[..., 0] = keypoints[..., 0] * (1 - i / 100) + keypoints[..., 1] = keypoints[..., 1] * (1 + i / 100) + keypoints[..., 2] = keypoints[..., 2] * (1 - i / 100) + + gt_instances0 = { + 'bbox_scales': bbox_scales, + 'bbox_scores': np.ones((1, ), dtype=np.float32), + 'bboxes': bboxes, + } + pred_instances0 = { + 'keypoints': keypoints[..., :2], + 'keypoint_scores': keypoints[..., -1], + } + + data0 = {'inputs': None} + data_sample0 = { + 'id': ann['id'], + 'img_id': ann['image_id'], + 'gt_instances': gt_instances0, + 'pred_instances': pred_instances0 + } + + keypoints = np.array( + ann['keypoints'], dtype=np.float32).reshape(1, -1, 3) + keypoints[..., 0] = keypoints[..., 0] * (1 + i / 100) + keypoints[..., 1] = keypoints[..., 1] * (1 - i / 100) + keypoints[..., 2] = keypoints[..., 2] * (1 - 2 * i / 100) + + gt_instances1 = { + 'bbox_scales': bbox_scales, + 'bboxes': bboxes, + 'bbox_scores': np.ones( + (1, ), dtype=np.float32) * (1 - 2 * i / 100) + } + pred_instances1 = { + 'keypoints': keypoints[..., :2], + 'keypoint_scores': keypoints[..., -1], + } + + data1 = {'inputs': None} + data_sample1 = { + 'id': ann['id'] + 1, + 'img_id': ann['image_id'], + 'gt_instances': gt_instances1, + 'pred_instances': pred_instances1 + } + + # batch size = 2 + data_batch = [data0, data1] + data_samples = [data_sample0, data_sample1] + topdown_data.append((data_batch, data_samples)) + + metric_coco = CocoMetric( + ann_file=self.ann_file_coco, + outfile_prefix=f'{self.tmp_dir.name}/test_align3', + score_mode='bbox_keypoint', + keypoint_score_thr=0.2, + nms_thr=0.9, + nms_mode='soft_oks_nms') + metric_coco.dataset_meta = self.dataset_meta_coco + + # process samples + for data_batch, data_samples in topdown_data: + metric_coco.process(data_batch, data_samples) + + eval_results = metric_coco.evaluate(size=len(topdown_data) * 2) + + target = { + 'coco/AP': 0.17073707370737073, + 'coco/AP .5': 0.25055005500550054, + 'coco/AP .75': 0.10671067106710669, + 'coco/AP (M)': 0.0, + 'coco/AP (L)': 0.29315181518151806, + 'coco/AR': 0.2416666666666666, + 'coco/AR .5': 0.3333333333333333, + 'coco/AR .75': 0.16666666666666666, + 'coco/AR (M)': 0.0, + 'coco/AR (L)': 0.41428571428571426, + } + + for key in eval_results.keys(): + self.assertAlmostEqual(eval_results[key], target[key]) + + self.assertTrue( + osp.isfile( + osp.join(self.tmp_dir.name, 'test_align3.keypoints.json'))) + + def test_topdown_evaluate(self): + """test topdown-style COCO metric evaluation.""" + # case 1: score_mode='bbox', nms_mode='none' + metric_coco = CocoMetric( + ann_file=self.ann_file_coco, + outfile_prefix=f'{self.tmp_dir.name}/test1', + score_mode='bbox', + nms_mode='none') + metric_coco.dataset_meta = self.dataset_meta_coco + + # process samples + for data_batch, data_samples in self.topdown_data_coco: + metric_coco.process(data_batch, data_samples) + + eval_results = metric_coco.evaluate(size=len(self.topdown_data_coco)) + + self.assertDictEqual(eval_results, self.target_coco) + self.assertTrue( + osp.isfile(osp.join(self.tmp_dir.name, 'test1.keypoints.json'))) + + # case 2: score_mode='bbox_keypoint', nms_mode='oks_nms' + metric_coco = CocoMetric( + ann_file=self.ann_file_coco, + outfile_prefix=f'{self.tmp_dir.name}/test2', + score_mode='bbox_keypoint', + nms_mode='oks_nms') + metric_coco.dataset_meta = self.dataset_meta_coco + + # process samples + for data_batch, data_samples in self.topdown_data_coco: + metric_coco.process(data_batch, data_samples) + + eval_results = metric_coco.evaluate(size=len(self.topdown_data_coco)) + + self.assertDictEqual(eval_results, self.target_coco) + self.assertTrue( + osp.isfile(osp.join(self.tmp_dir.name, 'test2.keypoints.json'))) + + # case 3: score_mode='bbox_rle', nms_mode='soft_oks_nms' + metric_coco = CocoMetric( + ann_file=self.ann_file_coco, + outfile_prefix=f'{self.tmp_dir.name}/test3', + score_mode='bbox_rle', + nms_mode='soft_oks_nms') + metric_coco.dataset_meta = self.dataset_meta_coco + + # process samples + for data_batch, data_samples in self.topdown_data_coco: + metric_coco.process(data_batch, data_samples) + + eval_results = metric_coco.evaluate(size=len(self.topdown_data_coco)) + + self.assertDictEqual(eval_results, self.target_coco) + self.assertTrue( + osp.isfile(osp.join(self.tmp_dir.name, 'test3.keypoints.json'))) + + # case 4: test without providing ann_file + metric_coco = CocoMetric(outfile_prefix=f'{self.tmp_dir.name}/test4') + metric_coco.dataset_meta = self.dataset_meta_coco + # process samples + for data_batch, data_samples in self.topdown_data_coco: + metric_coco.process(data_batch, data_samples) + eval_results = metric_coco.evaluate(size=len(self.topdown_data_coco)) + self.assertDictEqual(eval_results, self.target_coco) + # test whether convert the annotation to COCO format + self.assertTrue( + osp.isfile(osp.join(self.tmp_dir.name, 'test4.gt.json'))) + self.assertTrue( + osp.isfile(osp.join(self.tmp_dir.name, 'test4.keypoints.json'))) + + # case 5: test Crowdpose dataset + metric_crowdpose = CocoMetric( + ann_file=self.ann_file_crowdpose, + outfile_prefix=f'{self.tmp_dir.name}/test5', + use_area=False, + iou_type='keypoints_crowd', + prefix='crowdpose') + metric_crowdpose.dataset_meta = self.dataset_meta_crowdpose + # process samples + for data_batch, data_samples in self.topdown_data_crowdpose: + metric_crowdpose.process(data_batch, data_samples) + eval_results = metric_crowdpose.evaluate( + size=len(self.topdown_data_crowdpose)) + self.assertDictEqual(eval_results, self.target_crowdpose) + self.assertTrue( + osp.isfile(osp.join(self.tmp_dir.name, 'test5.keypoints.json'))) + + # case 6: test Crowdpose dataset + without ann_file + metric_crowdpose = CocoMetric( + outfile_prefix=f'{self.tmp_dir.name}/test6', + use_area=False, + iou_type='keypoints_crowd', + prefix='crowdpose') + metric_crowdpose.dataset_meta = self.dataset_meta_crowdpose + # process samples + for data_batch, data_samples in self.topdown_data_crowdpose: + metric_crowdpose.process(data_batch, data_samples) + eval_results = metric_crowdpose.evaluate( + size=len(self.topdown_data_crowdpose)) + self.assertDictEqual(eval_results, self.target_crowdpose) + # test whether convert the annotation to COCO format + self.assertTrue( + osp.isfile(osp.join(self.tmp_dir.name, 'test6.gt.json'))) + self.assertTrue( + osp.isfile(osp.join(self.tmp_dir.name, 'test6.keypoints.json'))) + + # case 7: test AP10k dataset + metric_ap10k = CocoMetric( + ann_file=self.ann_file_ap10k, + outfile_prefix=f'{self.tmp_dir.name}/test7') + metric_ap10k.dataset_meta = self.dataset_meta_ap10k + # process samples + for data_batch, data_samples in self.topdown_data_ap10k: + metric_ap10k.process(data_batch, data_samples) + eval_results = metric_ap10k.evaluate(size=len(self.topdown_data_ap10k)) + for key in self.target_ap10k: + self.assertAlmostEqual(eval_results[key], self.target_ap10k[key]) + self.assertTrue( + osp.isfile(osp.join(self.tmp_dir.name, 'test7.keypoints.json'))) + + # case 8: test Crowdpose dataset + without ann_file + metric_ap10k = CocoMetric(outfile_prefix=f'{self.tmp_dir.name}/test8') + metric_ap10k.dataset_meta = self.dataset_meta_ap10k + # process samples + for data_batch, data_samples in self.topdown_data_ap10k: + metric_ap10k.process(data_batch, data_samples) + eval_results = metric_ap10k.evaluate(size=len(self.topdown_data_ap10k)) + for key in self.target_ap10k: + self.assertAlmostEqual(eval_results[key], self.target_ap10k[key]) + # test whether convert the annotation to COCO format + self.assertTrue( + osp.isfile(osp.join(self.tmp_dir.name, 'test8.gt.json'))) + self.assertTrue( + osp.isfile(osp.join(self.tmp_dir.name, 'test8.keypoints.json'))) diff --git a/tests/test_evaluation/test_metrics/test_coco_wholebody_metric.py b/tests/test_evaluation/test_metrics/test_coco_wholebody_metric.py index 46e8498851..36fec1b0c0 100644 --- a/tests/test_evaluation/test_metrics/test_coco_wholebody_metric.py +++ b/tests/test_evaluation/test_metrics/test_coco_wholebody_metric.py @@ -1,294 +1,294 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy -import os.path as osp -import tempfile -from collections import defaultdict -from unittest import TestCase - -import numpy as np -from mmengine.fileio import dump, load -from xtcocotools.coco import COCO - -from mmpose.datasets.datasets.utils import parse_pose_metainfo -from mmpose.evaluation.metrics import CocoWholeBodyMetric - - -class TestCocoWholeBodyMetric(TestCase): - - def setUp(self): - """Setup some variables which are used in every test method. - - TestCase calls functions in this order: setUp() -> testMethod() -> - tearDown() -> cleanUp() - """ - self.tmp_dir = tempfile.TemporaryDirectory() - - self.ann_file_coco = 'tests/data/coco/test_coco_wholebody.json' - meta_info_coco = dict( - from_file='configs/_base_/datasets/coco_wholebody.py') - self.dataset_meta_coco = parse_pose_metainfo(meta_info_coco) - self.coco = COCO(self.ann_file_coco) - self.dataset_meta_coco['CLASSES'] = self.coco.loadCats( - self.coco.getCatIds()) - - self.topdown_data_coco = self._convert_ann_to_topdown_batch_data( - self.ann_file_coco) - assert len(self.topdown_data_coco) == 14 - self.bottomup_data_coco = self._convert_ann_to_bottomup_batch_data( - self.ann_file_coco) - assert len(self.bottomup_data_coco) == 4 - self.target_coco = { - 'coco-wholebody/AP': 1.0, - 'coco-wholebody/AP .5': 1.0, - 'coco-wholebody/AP .75': 1.0, - 'coco-wholebody/AP (M)': 1.0, - 'coco-wholebody/AP (L)': 1.0, - 'coco-wholebody/AR': 1.0, - 'coco-wholebody/AR .5': 1.0, - 'coco-wholebody/AR .75': 1.0, - 'coco-wholebody/AR (M)': 1.0, - 'coco-wholebody/AR (L)': 1.0, - } - - def _convert_ann_to_topdown_batch_data(self, ann_file): - """Convert annotations to topdown-style batch data.""" - topdown_data = [] - db = load(ann_file) - imgid2info = dict() - for img in db['images']: - imgid2info[img['id']] = img - for ann in db['annotations']: - w, h = ann['bbox'][2], ann['bbox'][3] - bboxes = np.array(ann['bbox'], dtype=np.float32).reshape(-1, 4) - bbox_scales = np.array([w * 1.25, h * 1.25]).reshape(-1, 2) - _keypoints = np.array(ann['keypoints'] + ann['foot_kpts'] + - ann['face_kpts'] + ann['lefthand_kpts'] + - ann['righthand_kpts']).reshape(1, -1, 3) - - gt_instances = { - 'bbox_scales': bbox_scales, - 'bbox_scores': np.ones((1, ), dtype=np.float32), - 'bboxes': bboxes, - } - pred_instances = { - 'keypoints': _keypoints[..., :2], - 'keypoint_scores': _keypoints[..., -1], - } - - data = {'inputs': None} - data_sample = { - 'id': ann['id'], - 'img_id': ann['image_id'], - 'category_id': ann.get('category_id', 1), - 'gt_instances': gt_instances, - 'pred_instances': pred_instances, - # dummy image_shape for testing - 'ori_shape': [640, 480], - # store the raw annotation info to test without ann_file - 'raw_ann_info': copy.deepcopy(ann), - } - - # batch size = 1 - data_batch = [data] - data_samples = [data_sample] - topdown_data.append((data_batch, data_samples)) - - return topdown_data - - def _convert_ann_to_bottomup_batch_data(self, ann_file): - """Convert annotations to bottomup-style batch data.""" - img2ann = defaultdict(list) - db = load(ann_file) - for ann in db['annotations']: - img2ann[ann['image_id']].append(ann) - - bottomup_data = [] - for img_id, anns in img2ann.items(): - _keypoints = [] - for ann in anns: - _keypoints.append(ann['keypoints'] + ann['foot_kpts'] + - ann['face_kpts'] + ann['lefthand_kpts'] + - ann['righthand_kpts']) - keypoints = np.array(_keypoints).reshape((len(anns), -1, 3)) - - gt_instances = { - 'bbox_scores': np.ones((len(anns)), dtype=np.float32) - } - - pred_instances = { - 'keypoints': keypoints[..., :2], - 'keypoint_scores': keypoints[..., -1], - } - - data = {'inputs': None} - data_sample = { - 'id': [ann['id'] for ann in anns], - 'img_id': img_id, - 'gt_instances': gt_instances, - 'pred_instances': pred_instances - } - - # batch size = 1 - data_batch = [data] - data_samples = [data_sample] - bottomup_data.append((data_batch, data_samples)) - return bottomup_data - - def tearDown(self): - self.tmp_dir.cleanup() - - def test_init(self): - """test metric init method.""" - # test score_mode option - with self.assertRaisesRegex(ValueError, - '`score_mode` should be one of'): - _ = CocoWholeBodyMetric( - ann_file=self.ann_file_coco, score_mode='invalid') - - # test nms_mode option - with self.assertRaisesRegex(ValueError, '`nms_mode` should be one of'): - _ = CocoWholeBodyMetric( - ann_file=self.ann_file_coco, nms_mode='invalid') - - # test format_only option - with self.assertRaisesRegex( - AssertionError, - '`outfile_prefix` can not be None when `format_only` is True'): - _ = CocoWholeBodyMetric( - ann_file=self.ann_file_coco, - format_only=True, - outfile_prefix=None) - - def test_other_methods(self): - """test other useful methods.""" - # test `_sort_and_unique_bboxes` method - metric_coco = CocoWholeBodyMetric( - ann_file=self.ann_file_coco, score_mode='bbox', nms_mode='none') - metric_coco.dataset_meta = self.dataset_meta_coco - # process samples - for data_batch, data_samples in self.topdown_data_coco: - metric_coco.process(data_batch, data_samples) - # process one extra sample - data_batch, data_samples = self.topdown_data_coco[0] - metric_coco.process(data_batch, data_samples) - # an extra sample - eval_results = metric_coco.evaluate( - size=len(self.topdown_data_coco) + 1) - self.assertDictEqual(eval_results, self.target_coco) - - def test_format_only(self): - """test `format_only` option.""" - metric_coco = CocoWholeBodyMetric( - ann_file=self.ann_file_coco, - format_only=True, - outfile_prefix=f'{self.tmp_dir.name}/test', - score_mode='bbox_keypoint', - nms_mode='oks_nms') - metric_coco.dataset_meta = self.dataset_meta_coco - # process one sample - data_batch, data_samples = self.topdown_data_coco[0] - metric_coco.process(data_batch, data_samples) - eval_results = metric_coco.evaluate(size=1) - self.assertDictEqual(eval_results, {}) - self.assertTrue( - osp.isfile(osp.join(self.tmp_dir.name, 'test.keypoints.json'))) - - # test when gt annotations are absent - db_ = load(self.ann_file_coco) - del db_['annotations'] - tmp_ann_file = osp.join(self.tmp_dir.name, 'temp_ann.json') - dump(db_, tmp_ann_file, sort_keys=True, indent=4) - with self.assertRaisesRegex( - AssertionError, - 'Ground truth annotations are required for evaluation'): - _ = CocoWholeBodyMetric(ann_file=tmp_ann_file, format_only=False) - - def test_bottomup_evaluate(self): - """test bottomup-style COCO metric evaluation.""" - # case1: score_mode='bbox', nms_mode='none' - metric_coco = CocoWholeBodyMetric( - ann_file=self.ann_file_coco, - outfile_prefix=f'{self.tmp_dir.name}/test', - score_mode='bbox', - nms_mode='none') - metric_coco.dataset_meta = self.dataset_meta_coco - - # process samples - for data_batch, data_samples in self.bottomup_data_coco: - metric_coco.process(data_batch, data_samples) - - eval_results = metric_coco.evaluate(size=len(self.bottomup_data_coco)) - self.assertDictEqual(eval_results, self.target_coco) - self.assertTrue( - osp.isfile(osp.join(self.tmp_dir.name, 'test.keypoints.json'))) - - def test_topdown_evaluate(self): - """test topdown-style COCO metric evaluation.""" - # case 1: score_mode='bbox', nms_mode='none' - metric_coco = CocoWholeBodyMetric( - ann_file=self.ann_file_coco, - outfile_prefix=f'{self.tmp_dir.name}/test1', - score_mode='bbox', - nms_mode='none') - metric_coco.dataset_meta = self.dataset_meta_coco - - # process samples - for data_batch, data_samples in self.topdown_data_coco: - metric_coco.process(data_batch, data_samples) - - eval_results = metric_coco.evaluate(size=len(self.topdown_data_coco)) - - self.assertDictEqual(eval_results, self.target_coco) - self.assertTrue( - osp.isfile(osp.join(self.tmp_dir.name, 'test1.keypoints.json'))) - - # case 2: score_mode='bbox_keypoint', nms_mode='oks_nms' - metric_coco = CocoWholeBodyMetric( - ann_file=self.ann_file_coco, - outfile_prefix=f'{self.tmp_dir.name}/test2', - score_mode='bbox_keypoint', - nms_mode='oks_nms') - metric_coco.dataset_meta = self.dataset_meta_coco - - # process samples - for data_batch, data_samples in self.topdown_data_coco: - metric_coco.process(data_batch, data_samples) - - eval_results = metric_coco.evaluate(size=len(self.topdown_data_coco)) - - self.assertDictEqual(eval_results, self.target_coco) - self.assertTrue( - osp.isfile(osp.join(self.tmp_dir.name, 'test2.keypoints.json'))) - - # case 3: score_mode='bbox_rle', nms_mode='soft_oks_nms' - metric_coco = CocoWholeBodyMetric( - ann_file=self.ann_file_coco, - outfile_prefix=f'{self.tmp_dir.name}/test3', - score_mode='bbox_rle', - nms_mode='soft_oks_nms') - metric_coco.dataset_meta = self.dataset_meta_coco - - # process samples - for data_batch, data_samples in self.topdown_data_coco: - metric_coco.process(data_batch, data_samples) - - eval_results = metric_coco.evaluate(size=len(self.topdown_data_coco)) - - self.assertDictEqual(eval_results, self.target_coco) - self.assertTrue( - osp.isfile(osp.join(self.tmp_dir.name, 'test3.keypoints.json'))) - - # case 4: test without providing ann_file - metric_coco = CocoWholeBodyMetric( - outfile_prefix=f'{self.tmp_dir.name}/test4') - metric_coco.dataset_meta = self.dataset_meta_coco - # process samples - for data_batch, data_samples in self.topdown_data_coco: - metric_coco.process(data_batch, data_samples) - eval_results = metric_coco.evaluate(size=len(self.topdown_data_coco)) - self.assertDictEqual(eval_results, self.target_coco) - # test whether convert the annotation to COCO format - self.assertTrue( - osp.isfile(osp.join(self.tmp_dir.name, 'test4.gt.json'))) - self.assertTrue( - osp.isfile(osp.join(self.tmp_dir.name, 'test4.keypoints.json'))) +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import os.path as osp +import tempfile +from collections import defaultdict +from unittest import TestCase + +import numpy as np +from mmengine.fileio import dump, load +from xtcocotools.coco import COCO + +from mmpose.datasets.datasets.utils import parse_pose_metainfo +from mmpose.evaluation.metrics import CocoWholeBodyMetric + + +class TestCocoWholeBodyMetric(TestCase): + + def setUp(self): + """Setup some variables which are used in every test method. + + TestCase calls functions in this order: setUp() -> testMethod() -> + tearDown() -> cleanUp() + """ + self.tmp_dir = tempfile.TemporaryDirectory() + + self.ann_file_coco = 'tests/data/coco/test_coco_wholebody.json' + meta_info_coco = dict( + from_file='configs/_base_/datasets/coco_wholebody.py') + self.dataset_meta_coco = parse_pose_metainfo(meta_info_coco) + self.coco = COCO(self.ann_file_coco) + self.dataset_meta_coco['CLASSES'] = self.coco.loadCats( + self.coco.getCatIds()) + + self.topdown_data_coco = self._convert_ann_to_topdown_batch_data( + self.ann_file_coco) + assert len(self.topdown_data_coco) == 14 + self.bottomup_data_coco = self._convert_ann_to_bottomup_batch_data( + self.ann_file_coco) + assert len(self.bottomup_data_coco) == 4 + self.target_coco = { + 'coco-wholebody/AP': 1.0, + 'coco-wholebody/AP .5': 1.0, + 'coco-wholebody/AP .75': 1.0, + 'coco-wholebody/AP (M)': 1.0, + 'coco-wholebody/AP (L)': 1.0, + 'coco-wholebody/AR': 1.0, + 'coco-wholebody/AR .5': 1.0, + 'coco-wholebody/AR .75': 1.0, + 'coco-wholebody/AR (M)': 1.0, + 'coco-wholebody/AR (L)': 1.0, + } + + def _convert_ann_to_topdown_batch_data(self, ann_file): + """Convert annotations to topdown-style batch data.""" + topdown_data = [] + db = load(ann_file) + imgid2info = dict() + for img in db['images']: + imgid2info[img['id']] = img + for ann in db['annotations']: + w, h = ann['bbox'][2], ann['bbox'][3] + bboxes = np.array(ann['bbox'], dtype=np.float32).reshape(-1, 4) + bbox_scales = np.array([w * 1.25, h * 1.25]).reshape(-1, 2) + _keypoints = np.array(ann['keypoints'] + ann['foot_kpts'] + + ann['face_kpts'] + ann['lefthand_kpts'] + + ann['righthand_kpts']).reshape(1, -1, 3) + + gt_instances = { + 'bbox_scales': bbox_scales, + 'bbox_scores': np.ones((1, ), dtype=np.float32), + 'bboxes': bboxes, + } + pred_instances = { + 'keypoints': _keypoints[..., :2], + 'keypoint_scores': _keypoints[..., -1], + } + + data = {'inputs': None} + data_sample = { + 'id': ann['id'], + 'img_id': ann['image_id'], + 'category_id': ann.get('category_id', 1), + 'gt_instances': gt_instances, + 'pred_instances': pred_instances, + # dummy image_shape for testing + 'ori_shape': [640, 480], + # store the raw annotation info to test without ann_file + 'raw_ann_info': copy.deepcopy(ann), + } + + # batch size = 1 + data_batch = [data] + data_samples = [data_sample] + topdown_data.append((data_batch, data_samples)) + + return topdown_data + + def _convert_ann_to_bottomup_batch_data(self, ann_file): + """Convert annotations to bottomup-style batch data.""" + img2ann = defaultdict(list) + db = load(ann_file) + for ann in db['annotations']: + img2ann[ann['image_id']].append(ann) + + bottomup_data = [] + for img_id, anns in img2ann.items(): + _keypoints = [] + for ann in anns: + _keypoints.append(ann['keypoints'] + ann['foot_kpts'] + + ann['face_kpts'] + ann['lefthand_kpts'] + + ann['righthand_kpts']) + keypoints = np.array(_keypoints).reshape((len(anns), -1, 3)) + + gt_instances = { + 'bbox_scores': np.ones((len(anns)), dtype=np.float32) + } + + pred_instances = { + 'keypoints': keypoints[..., :2], + 'keypoint_scores': keypoints[..., -1], + } + + data = {'inputs': None} + data_sample = { + 'id': [ann['id'] for ann in anns], + 'img_id': img_id, + 'gt_instances': gt_instances, + 'pred_instances': pred_instances + } + + # batch size = 1 + data_batch = [data] + data_samples = [data_sample] + bottomup_data.append((data_batch, data_samples)) + return bottomup_data + + def tearDown(self): + self.tmp_dir.cleanup() + + def test_init(self): + """test metric init method.""" + # test score_mode option + with self.assertRaisesRegex(ValueError, + '`score_mode` should be one of'): + _ = CocoWholeBodyMetric( + ann_file=self.ann_file_coco, score_mode='invalid') + + # test nms_mode option + with self.assertRaisesRegex(ValueError, '`nms_mode` should be one of'): + _ = CocoWholeBodyMetric( + ann_file=self.ann_file_coco, nms_mode='invalid') + + # test format_only option + with self.assertRaisesRegex( + AssertionError, + '`outfile_prefix` can not be None when `format_only` is True'): + _ = CocoWholeBodyMetric( + ann_file=self.ann_file_coco, + format_only=True, + outfile_prefix=None) + + def test_other_methods(self): + """test other useful methods.""" + # test `_sort_and_unique_bboxes` method + metric_coco = CocoWholeBodyMetric( + ann_file=self.ann_file_coco, score_mode='bbox', nms_mode='none') + metric_coco.dataset_meta = self.dataset_meta_coco + # process samples + for data_batch, data_samples in self.topdown_data_coco: + metric_coco.process(data_batch, data_samples) + # process one extra sample + data_batch, data_samples = self.topdown_data_coco[0] + metric_coco.process(data_batch, data_samples) + # an extra sample + eval_results = metric_coco.evaluate( + size=len(self.topdown_data_coco) + 1) + self.assertDictEqual(eval_results, self.target_coco) + + def test_format_only(self): + """test `format_only` option.""" + metric_coco = CocoWholeBodyMetric( + ann_file=self.ann_file_coco, + format_only=True, + outfile_prefix=f'{self.tmp_dir.name}/test', + score_mode='bbox_keypoint', + nms_mode='oks_nms') + metric_coco.dataset_meta = self.dataset_meta_coco + # process one sample + data_batch, data_samples = self.topdown_data_coco[0] + metric_coco.process(data_batch, data_samples) + eval_results = metric_coco.evaluate(size=1) + self.assertDictEqual(eval_results, {}) + self.assertTrue( + osp.isfile(osp.join(self.tmp_dir.name, 'test.keypoints.json'))) + + # test when gt annotations are absent + db_ = load(self.ann_file_coco) + del db_['annotations'] + tmp_ann_file = osp.join(self.tmp_dir.name, 'temp_ann.json') + dump(db_, tmp_ann_file, sort_keys=True, indent=4) + with self.assertRaisesRegex( + AssertionError, + 'Ground truth annotations are required for evaluation'): + _ = CocoWholeBodyMetric(ann_file=tmp_ann_file, format_only=False) + + def test_bottomup_evaluate(self): + """test bottomup-style COCO metric evaluation.""" + # case1: score_mode='bbox', nms_mode='none' + metric_coco = CocoWholeBodyMetric( + ann_file=self.ann_file_coco, + outfile_prefix=f'{self.tmp_dir.name}/test', + score_mode='bbox', + nms_mode='none') + metric_coco.dataset_meta = self.dataset_meta_coco + + # process samples + for data_batch, data_samples in self.bottomup_data_coco: + metric_coco.process(data_batch, data_samples) + + eval_results = metric_coco.evaluate(size=len(self.bottomup_data_coco)) + self.assertDictEqual(eval_results, self.target_coco) + self.assertTrue( + osp.isfile(osp.join(self.tmp_dir.name, 'test.keypoints.json'))) + + def test_topdown_evaluate(self): + """test topdown-style COCO metric evaluation.""" + # case 1: score_mode='bbox', nms_mode='none' + metric_coco = CocoWholeBodyMetric( + ann_file=self.ann_file_coco, + outfile_prefix=f'{self.tmp_dir.name}/test1', + score_mode='bbox', + nms_mode='none') + metric_coco.dataset_meta = self.dataset_meta_coco + + # process samples + for data_batch, data_samples in self.topdown_data_coco: + metric_coco.process(data_batch, data_samples) + + eval_results = metric_coco.evaluate(size=len(self.topdown_data_coco)) + + self.assertDictEqual(eval_results, self.target_coco) + self.assertTrue( + osp.isfile(osp.join(self.tmp_dir.name, 'test1.keypoints.json'))) + + # case 2: score_mode='bbox_keypoint', nms_mode='oks_nms' + metric_coco = CocoWholeBodyMetric( + ann_file=self.ann_file_coco, + outfile_prefix=f'{self.tmp_dir.name}/test2', + score_mode='bbox_keypoint', + nms_mode='oks_nms') + metric_coco.dataset_meta = self.dataset_meta_coco + + # process samples + for data_batch, data_samples in self.topdown_data_coco: + metric_coco.process(data_batch, data_samples) + + eval_results = metric_coco.evaluate(size=len(self.topdown_data_coco)) + + self.assertDictEqual(eval_results, self.target_coco) + self.assertTrue( + osp.isfile(osp.join(self.tmp_dir.name, 'test2.keypoints.json'))) + + # case 3: score_mode='bbox_rle', nms_mode='soft_oks_nms' + metric_coco = CocoWholeBodyMetric( + ann_file=self.ann_file_coco, + outfile_prefix=f'{self.tmp_dir.name}/test3', + score_mode='bbox_rle', + nms_mode='soft_oks_nms') + metric_coco.dataset_meta = self.dataset_meta_coco + + # process samples + for data_batch, data_samples in self.topdown_data_coco: + metric_coco.process(data_batch, data_samples) + + eval_results = metric_coco.evaluate(size=len(self.topdown_data_coco)) + + self.assertDictEqual(eval_results, self.target_coco) + self.assertTrue( + osp.isfile(osp.join(self.tmp_dir.name, 'test3.keypoints.json'))) + + # case 4: test without providing ann_file + metric_coco = CocoWholeBodyMetric( + outfile_prefix=f'{self.tmp_dir.name}/test4') + metric_coco.dataset_meta = self.dataset_meta_coco + # process samples + for data_batch, data_samples in self.topdown_data_coco: + metric_coco.process(data_batch, data_samples) + eval_results = metric_coco.evaluate(size=len(self.topdown_data_coco)) + self.assertDictEqual(eval_results, self.target_coco) + # test whether convert the annotation to COCO format + self.assertTrue( + osp.isfile(osp.join(self.tmp_dir.name, 'test4.gt.json'))) + self.assertTrue( + osp.isfile(osp.join(self.tmp_dir.name, 'test4.keypoints.json'))) diff --git a/tests/test_evaluation/test_metrics/test_keypoint_2d_metrics.py b/tests/test_evaluation/test_metrics/test_keypoint_2d_metrics.py index fdb029c40d..e5ce303823 100644 --- a/tests/test_evaluation/test_metrics/test_keypoint_2d_metrics.py +++ b/tests/test_evaluation/test_metrics/test_keypoint_2d_metrics.py @@ -1,443 +1,443 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import numpy as np -from mmengine.structures import InstanceData - -from mmpose.datasets.datasets.utils import parse_pose_metainfo -from mmpose.evaluation.metrics import (AUC, EPE, NME, JhmdbPCKAccuracy, - MpiiPCKAccuracy, PCKAccuracy) - - -class TestPCKAccuracy(TestCase): - - def setUp(self): - """Setup some variables which are used in every test method. - - TestCase calls functions in this order: setUp() -> testMethod() -> - tearDown() -> cleanUp() - """ - self.batch_size = 8 - num_keypoints = 15 - self.data_batch = [] - self.data_samples = [] - - for i in range(self.batch_size): - gt_instances = InstanceData() - keypoints = np.zeros((1, num_keypoints, 2)) - keypoints[0, i] = [0.5 * i, 0.5 * i] - gt_instances.keypoints = keypoints - gt_instances.keypoints_visible = np.ones( - (1, num_keypoints, 1)).astype(bool) - gt_instances.keypoints_visible[0, (2 * i) % 8, 0] = False - gt_instances.bboxes = np.random.random((1, 4)) * 20 * i - gt_instances.head_size = np.random.random((1, 1)) * 10 * i - - pred_instances = InstanceData() - pred_instances.keypoints = keypoints - - data = {'inputs': None} - data_sample = { - 'gt_instances': gt_instances.to_dict(), - 'pred_instances': pred_instances.to_dict(), - } - - self.data_batch.append(data) - self.data_samples.append(data_sample) - - def test_init(self): - """test metric init method.""" - # test invalid normalized_items - with self.assertRaisesRegex( - KeyError, "Should be one of 'bbox', 'head', 'torso'"): - PCKAccuracy(norm_item='invalid') - - def test_evaluate(self): - """test PCK accuracy evaluation metric.""" - # test normalized by 'bbox' - pck_metric = PCKAccuracy(thr=0.5, norm_item='bbox') - pck_metric.process(self.data_batch, self.data_samples) - pck = pck_metric.evaluate(self.batch_size) - target = {'PCK': 1.0} - self.assertDictEqual(pck, target) - - # test normalized by 'head_size' - pckh_metric = PCKAccuracy(thr=0.3, norm_item='head') - pckh_metric.process(self.data_batch, self.data_samples) - pckh = pckh_metric.evaluate(self.batch_size) - target = {'PCKh': 1.0} - self.assertDictEqual(pckh, target) - - # test normalized by 'torso_size' - tpck_metric = PCKAccuracy(thr=0.05, norm_item=['bbox', 'torso']) - tpck_metric.process(self.data_batch, self.data_samples) - tpck = tpck_metric.evaluate(self.batch_size) - self.assertIsInstance(tpck, dict) - target = { - 'PCK': 1.0, - 'tPCK': 1.0, - } - self.assertDictEqual(tpck, target) - - -class TestMpiiPCKAccuracy(TestCase): - - def setUp(self): - """Setup some variables which are used in every test method. - - TestCase calls functions in this order: setUp() -> testMethod() -> - tearDown() -> cleanUp() - """ - self.batch_size = 8 - num_keypoints = 16 - self.data_batch = [] - self.data_samples = [] - - for i in range(self.batch_size): - gt_instances = InstanceData() - keypoints = np.zeros((1, num_keypoints, 2)) - keypoints[0, i] = [0.5 * i, 0.5 * i] - gt_instances.keypoints = keypoints + 1.0 - gt_instances.keypoints_visible = np.ones( - (1, num_keypoints, 1)).astype(bool) - gt_instances.keypoints_visible[0, (2 * i) % 8, 0] = False - gt_instances.bboxes = np.random.random((1, 4)) * 20 * i - gt_instances.head_size = np.random.random((1, 1)) * 10 * i - - pred_instances = InstanceData() - pred_instances.keypoints = keypoints - - data = {'inputs': None} - data_sample = { - 'gt_instances': gt_instances.to_dict(), - 'pred_instances': pred_instances.to_dict(), - } - - self.data_batch.append(data) - self.data_samples.append(data_sample) - - def test_init(self): - """test metric init method.""" - # test invalid normalized_items - with self.assertRaisesRegex( - KeyError, "Should be one of 'bbox', 'head', 'torso'"): - MpiiPCKAccuracy(norm_item='invalid') - - def test_evaluate(self): - """test PCK accuracy evaluation metric.""" - # test normalized by 'head_size' - mpii_pck_metric = MpiiPCKAccuracy(thr=0.3, norm_item='head') - mpii_pck_metric.process(self.data_batch, self.data_samples) - pck_results = mpii_pck_metric.evaluate(self.batch_size) - target = { - 'Head PCK': 100.0, - 'Shoulder PCK': 100.0, - 'Elbow PCK': 100.0, - 'Wrist PCK': 100.0, - 'Hip PCK': 100.0, - 'Knee PCK': 100.0, - 'Ankle PCK': 100.0, - 'PCK': 100.0, - 'PCK@0.1': 100.0, - } - self.assertDictEqual(pck_results, target) - - -class TestJhmdbPCKAccuracy(TestCase): - - def setUp(self): - """Setup some variables which are used in every test method. - - TestCase calls functions in this order: setUp() -> testMethod() -> - tearDown() -> cleanUp() - """ - self.batch_size = 8 - num_keypoints = 15 - self.data_batch = [] - self.data_samples = [] - - for i in range(self.batch_size): - gt_instances = InstanceData() - keypoints = np.zeros((1, num_keypoints, 2)) - keypoints[0, i] = [0.5 * i, 0.5 * i] - gt_instances.keypoints = keypoints - gt_instances.keypoints_visible = np.ones( - (1, num_keypoints, 1)).astype(bool) - gt_instances.keypoints_visible[0, (2 * i) % 8, 0] = False - gt_instances.bboxes = np.random.random((1, 4)) * 20 * i - gt_instances.head_size = np.random.random((1, 1)) * 10 * i - - pred_instances = InstanceData() - pred_instances.keypoints = keypoints - - data = {'inputs': None} - data_sample = { - 'gt_instances': gt_instances.to_dict(), - 'pred_instances': pred_instances.to_dict(), - } - - self.data_batch.append(data) - self.data_samples.append(data_sample) - - def test_init(self): - """test metric init method.""" - # test invalid normalized_items - with self.assertRaisesRegex( - KeyError, "Should be one of 'bbox', 'head', 'torso'"): - JhmdbPCKAccuracy(norm_item='invalid') - - def test_evaluate(self): - """test PCK accuracy evaluation metric.""" - # test normalized by 'bbox_size' - jhmdb_pck_metric = JhmdbPCKAccuracy(thr=0.5, norm_item='bbox') - jhmdb_pck_metric.process(self.data_batch, self.data_samples) - pck_results = jhmdb_pck_metric.evaluate(self.batch_size) - target = { - 'Head PCK': 1.0, - 'Sho PCK': 1.0, - 'Elb PCK': 1.0, - 'Wri PCK': 1.0, - 'Hip PCK': 1.0, - 'Knee PCK': 1.0, - 'Ank PCK': 1.0, - 'PCK': 1.0, - } - self.assertDictEqual(pck_results, target) - - # test normalized by 'torso_size' - jhmdb_tpck_metric = JhmdbPCKAccuracy(thr=0.2, norm_item='torso') - jhmdb_tpck_metric.process(self.data_batch, self.data_samples) - tpck_results = jhmdb_tpck_metric.evaluate(self.batch_size) - target = { - 'Head tPCK': 1.0, - 'Sho tPCK': 1.0, - 'Elb tPCK': 1.0, - 'Wri tPCK': 1.0, - 'Hip tPCK': 1.0, - 'Knee tPCK': 1.0, - 'Ank tPCK': 1.0, - 'tPCK': 1.0, - } - self.assertDictEqual(tpck_results, target) - - -class TestAUCandEPE(TestCase): - - def setUp(self): - """Setup some variables which are used in every test method. - - TestCase calls functions in this order: setUp() -> testMethod() -> - tearDown() -> cleanUp() - """ - output = np.zeros((1, 5, 2)) - target = np.zeros((1, 5, 2)) - # first channel - output[0, 0] = [10, 4] - target[0, 0] = [10, 0] - # second channel - output[0, 1] = [10, 18] - target[0, 1] = [10, 10] - # third channel - output[0, 2] = [0, 0] - target[0, 2] = [0, -1] - # fourth channel - output[0, 3] = [40, 40] - target[0, 3] = [30, 30] - # fifth channel - output[0, 4] = [20, 10] - target[0, 4] = [0, 10] - - gt_instances = InstanceData() - gt_instances.keypoints = target - gt_instances.keypoints_visible = np.array( - [[True, True, False, True, True]]) - - pred_instances = InstanceData() - pred_instances.keypoints = output - - data = {'inputs': None} - data_sample = { - 'gt_instances': gt_instances.to_dict(), - 'pred_instances': pred_instances.to_dict() - } - - self.data_batch = [data] - self.data_samples = [data_sample] - - def test_auc_evaluate(self): - """test AUC evaluation metric.""" - auc_metric = AUC(norm_factor=20, num_thrs=4) - auc_metric.process(self.data_batch, self.data_samples) - auc = auc_metric.evaluate(1) - target = {'AUC': 0.375} - self.assertDictEqual(auc, target) - - def test_epe_evaluate(self): - """test EPE evaluation metric.""" - epe_metric = EPE() - epe_metric.process(self.data_batch, self.data_samples) - epe = epe_metric.evaluate(1) - self.assertAlmostEqual(epe['EPE'], 11.5355339) - - -class TestNME(TestCase): - - def _generate_data(self, - batch_size: int = 1, - num_keypoints: int = 5, - norm_item: str = 'box_size') -> tuple: - """Generate data_batch and data_samples according to different - settings.""" - data_batch = [] - data_samples = [] - - for i in range(batch_size): - gt_instances = InstanceData() - keypoints = np.zeros((1, num_keypoints, 2)) - keypoints[0, i] = [0.5 * i, 0.5 * i] - gt_instances.keypoints = keypoints - gt_instances.keypoints_visible = np.ones( - (1, num_keypoints, 1)).astype(bool) - gt_instances.keypoints_visible[0, (2 * i) % batch_size, 0] = False - gt_instances[norm_item] = np.random.random((1, 1)) * 20 * i - - pred_instances = InstanceData() - pred_instances.keypoints = keypoints - - data = {'inputs': None} - data_sample = { - 'gt_instances': gt_instances.to_dict(), - 'pred_instances': pred_instances.to_dict(), - } - data_batch.append(data) - data_samples.append(data_sample) - - return data_batch, data_samples - - def test_nme_evaluate(self): - """test NME evaluation metric.""" - # test when norm_mode = 'use_norm_item' - # test norm_item = 'box_size' like in `AFLWDataset` - norm_item = 'box_size' - nme_metric = NME(norm_mode='use_norm_item', norm_item=norm_item) - aflw_meta_info = dict(from_file='configs/_base_/datasets/aflw.py') - aflw_dataset_meta = parse_pose_metainfo(aflw_meta_info) - nme_metric.dataset_meta = aflw_dataset_meta - - data_batch, data_samples = self._generate_data( - batch_size=4, num_keypoints=19, norm_item=norm_item) - nme_metric.process(data_batch, data_samples) - nme = nme_metric.evaluate(4) - target = {'NME': 0.0} - self.assertDictEqual(nme, target) - - # test when norm_mode = 'keypoint_distance' - # when `keypoint_indices = None`, - # use default `keypoint_indices` like in `Horse10Dataset` - nme_metric = NME(norm_mode='keypoint_distance') - horse10_meta_info = dict( - from_file='configs/_base_/datasets/horse10.py') - horse10_dataset_meta = parse_pose_metainfo(horse10_meta_info) - nme_metric.dataset_meta = horse10_dataset_meta - - data_batch, data_samples = self._generate_data( - batch_size=4, num_keypoints=22) - nme_metric.process(data_batch, data_samples) - nme = nme_metric.evaluate(4) - - target = {'NME': 0.0} - self.assertDictEqual(nme, target) - - # test when norm_mode = 'keypoint_distance' - # specify custom `keypoint_indices` - keypoint_indices = [2, 4] - nme_metric = NME( - norm_mode='keypoint_distance', keypoint_indices=keypoint_indices) - coco_meta_info = dict(from_file='configs/_base_/datasets/coco.py') - coco_dataset_meta = parse_pose_metainfo(coco_meta_info) - nme_metric.dataset_meta = coco_dataset_meta - - data_batch, data_samples = self._generate_data( - batch_size=2, num_keypoints=17) - nme_metric.process(data_batch, data_samples) - nme = nme_metric.evaluate(2) - - target = {'NME': 0.0} - self.assertDictEqual(nme, target) - - def test_exceptions_and_warnings(self): - """test exceptions and warnings.""" - # test invalid norm_mode - with self.assertRaisesRegex( - KeyError, - "`norm_mode` should be 'use_norm_item' or 'keypoint_distance'" - ): - nme_metric = NME(norm_mode='invalid') - - # test when norm_mode = 'use_norm_item' but do not specify norm_item - with self.assertRaisesRegex( - KeyError, '`norm_mode` is set to `"use_norm_item"`, ' - 'please specify the `norm_item`'): - nme_metric = NME(norm_mode='use_norm_item', norm_item=None) - - # test when norm_mode = 'use_norm_item' - # but the `norm_item` do not in data_info - with self.assertRaisesRegex( - AssertionError, - 'The ground truth data info do not have the expected ' - 'normalized factor'): - nme_metric = NME(norm_mode='use_norm_item', norm_item='norm_item1') - coco_meta_info = dict(from_file='configs/_base_/datasets/coco.py') - coco_dataset_meta = parse_pose_metainfo(coco_meta_info) - nme_metric.dataset_meta = coco_dataset_meta - - data_batch, data_samples = self._generate_data( - norm_item='norm_item2') - # raise AssertionError here - nme_metric.process(data_batch, data_samples) - - # test when norm_mode = 'keypoint_distance', `keypoint_indices` = None - # but the dataset_name not in `DEFAULT_KEYPOINT_INDICES` - with self.assertRaisesRegex( - KeyError, 'can not find the keypoint_indices in ' - '`DEFAULT_KEYPOINT_INDICES`'): - nme_metric = NME( - norm_mode='keypoint_distance', keypoint_indices=None) - coco_meta_info = dict(from_file='configs/_base_/datasets/coco.py') - coco_dataset_meta = parse_pose_metainfo(coco_meta_info) - nme_metric.dataset_meta = coco_dataset_meta - - data_batch, data_samples = self._generate_data() - nme_metric.process(data_batch, data_samples) - # raise KeyError here - _ = nme_metric.evaluate(1) - - # test when len(keypoint_indices) is not 2 - with self.assertRaisesRegex( - AssertionError, - 'The keypoint indices used for normalization should be a pair.' - ): - nme_metric = NME( - norm_mode='keypoint_distance', keypoint_indices=[0, 1, 2]) - coco_meta_info = dict(from_file='configs/_base_/datasets/coco.py') - coco_dataset_meta = parse_pose_metainfo(coco_meta_info) - nme_metric.dataset_meta = coco_dataset_meta - - data_batch, data_samples = self._generate_data() - nme_metric.process(data_batch, data_samples) - # raise AssertionError here - _ = nme_metric.evaluate(1) - - # test when dataset does not contain the required keypoint - with self.assertRaisesRegex(AssertionError, - 'dataset does not contain the required'): - nme_metric = NME( - norm_mode='keypoint_distance', keypoint_indices=[17, 18]) - coco_meta_info = dict(from_file='configs/_base_/datasets/coco.py') - coco_dataset_meta = parse_pose_metainfo(coco_meta_info) - nme_metric.dataset_meta = coco_dataset_meta - - data_batch, predidata_samplesctions = self._generate_data() - nme_metric.process(data_batch, data_samples) - # raise AssertionError here - _ = nme_metric.evaluate(1) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np +from mmengine.structures import InstanceData + +from mmpose.datasets.datasets.utils import parse_pose_metainfo +from mmpose.evaluation.metrics import (AUC, EPE, NME, JhmdbPCKAccuracy, + MpiiPCKAccuracy, PCKAccuracy) + + +class TestPCKAccuracy(TestCase): + + def setUp(self): + """Setup some variables which are used in every test method. + + TestCase calls functions in this order: setUp() -> testMethod() -> + tearDown() -> cleanUp() + """ + self.batch_size = 8 + num_keypoints = 15 + self.data_batch = [] + self.data_samples = [] + + for i in range(self.batch_size): + gt_instances = InstanceData() + keypoints = np.zeros((1, num_keypoints, 2)) + keypoints[0, i] = [0.5 * i, 0.5 * i] + gt_instances.keypoints = keypoints + gt_instances.keypoints_visible = np.ones( + (1, num_keypoints, 1)).astype(bool) + gt_instances.keypoints_visible[0, (2 * i) % 8, 0] = False + gt_instances.bboxes = np.random.random((1, 4)) * 20 * i + gt_instances.head_size = np.random.random((1, 1)) * 10 * i + + pred_instances = InstanceData() + pred_instances.keypoints = keypoints + + data = {'inputs': None} + data_sample = { + 'gt_instances': gt_instances.to_dict(), + 'pred_instances': pred_instances.to_dict(), + } + + self.data_batch.append(data) + self.data_samples.append(data_sample) + + def test_init(self): + """test metric init method.""" + # test invalid normalized_items + with self.assertRaisesRegex( + KeyError, "Should be one of 'bbox', 'head', 'torso'"): + PCKAccuracy(norm_item='invalid') + + def test_evaluate(self): + """test PCK accuracy evaluation metric.""" + # test normalized by 'bbox' + pck_metric = PCKAccuracy(thr=0.5, norm_item='bbox') + pck_metric.process(self.data_batch, self.data_samples) + pck = pck_metric.evaluate(self.batch_size) + target = {'PCK': 1.0} + self.assertDictEqual(pck, target) + + # test normalized by 'head_size' + pckh_metric = PCKAccuracy(thr=0.3, norm_item='head') + pckh_metric.process(self.data_batch, self.data_samples) + pckh = pckh_metric.evaluate(self.batch_size) + target = {'PCKh': 1.0} + self.assertDictEqual(pckh, target) + + # test normalized by 'torso_size' + tpck_metric = PCKAccuracy(thr=0.05, norm_item=['bbox', 'torso']) + tpck_metric.process(self.data_batch, self.data_samples) + tpck = tpck_metric.evaluate(self.batch_size) + self.assertIsInstance(tpck, dict) + target = { + 'PCK': 1.0, + 'tPCK': 1.0, + } + self.assertDictEqual(tpck, target) + + +class TestMpiiPCKAccuracy(TestCase): + + def setUp(self): + """Setup some variables which are used in every test method. + + TestCase calls functions in this order: setUp() -> testMethod() -> + tearDown() -> cleanUp() + """ + self.batch_size = 8 + num_keypoints = 16 + self.data_batch = [] + self.data_samples = [] + + for i in range(self.batch_size): + gt_instances = InstanceData() + keypoints = np.zeros((1, num_keypoints, 2)) + keypoints[0, i] = [0.5 * i, 0.5 * i] + gt_instances.keypoints = keypoints + 1.0 + gt_instances.keypoints_visible = np.ones( + (1, num_keypoints, 1)).astype(bool) + gt_instances.keypoints_visible[0, (2 * i) % 8, 0] = False + gt_instances.bboxes = np.random.random((1, 4)) * 20 * i + gt_instances.head_size = np.random.random((1, 1)) * 10 * i + + pred_instances = InstanceData() + pred_instances.keypoints = keypoints + + data = {'inputs': None} + data_sample = { + 'gt_instances': gt_instances.to_dict(), + 'pred_instances': pred_instances.to_dict(), + } + + self.data_batch.append(data) + self.data_samples.append(data_sample) + + def test_init(self): + """test metric init method.""" + # test invalid normalized_items + with self.assertRaisesRegex( + KeyError, "Should be one of 'bbox', 'head', 'torso'"): + MpiiPCKAccuracy(norm_item='invalid') + + def test_evaluate(self): + """test PCK accuracy evaluation metric.""" + # test normalized by 'head_size' + mpii_pck_metric = MpiiPCKAccuracy(thr=0.3, norm_item='head') + mpii_pck_metric.process(self.data_batch, self.data_samples) + pck_results = mpii_pck_metric.evaluate(self.batch_size) + target = { + 'Head PCK': 100.0, + 'Shoulder PCK': 100.0, + 'Elbow PCK': 100.0, + 'Wrist PCK': 100.0, + 'Hip PCK': 100.0, + 'Knee PCK': 100.0, + 'Ankle PCK': 100.0, + 'PCK': 100.0, + 'PCK@0.1': 100.0, + } + self.assertDictEqual(pck_results, target) + + +class TestJhmdbPCKAccuracy(TestCase): + + def setUp(self): + """Setup some variables which are used in every test method. + + TestCase calls functions in this order: setUp() -> testMethod() -> + tearDown() -> cleanUp() + """ + self.batch_size = 8 + num_keypoints = 15 + self.data_batch = [] + self.data_samples = [] + + for i in range(self.batch_size): + gt_instances = InstanceData() + keypoints = np.zeros((1, num_keypoints, 2)) + keypoints[0, i] = [0.5 * i, 0.5 * i] + gt_instances.keypoints = keypoints + gt_instances.keypoints_visible = np.ones( + (1, num_keypoints, 1)).astype(bool) + gt_instances.keypoints_visible[0, (2 * i) % 8, 0] = False + gt_instances.bboxes = np.random.random((1, 4)) * 20 * i + gt_instances.head_size = np.random.random((1, 1)) * 10 * i + + pred_instances = InstanceData() + pred_instances.keypoints = keypoints + + data = {'inputs': None} + data_sample = { + 'gt_instances': gt_instances.to_dict(), + 'pred_instances': pred_instances.to_dict(), + } + + self.data_batch.append(data) + self.data_samples.append(data_sample) + + def test_init(self): + """test metric init method.""" + # test invalid normalized_items + with self.assertRaisesRegex( + KeyError, "Should be one of 'bbox', 'head', 'torso'"): + JhmdbPCKAccuracy(norm_item='invalid') + + def test_evaluate(self): + """test PCK accuracy evaluation metric.""" + # test normalized by 'bbox_size' + jhmdb_pck_metric = JhmdbPCKAccuracy(thr=0.5, norm_item='bbox') + jhmdb_pck_metric.process(self.data_batch, self.data_samples) + pck_results = jhmdb_pck_metric.evaluate(self.batch_size) + target = { + 'Head PCK': 1.0, + 'Sho PCK': 1.0, + 'Elb PCK': 1.0, + 'Wri PCK': 1.0, + 'Hip PCK': 1.0, + 'Knee PCK': 1.0, + 'Ank PCK': 1.0, + 'PCK': 1.0, + } + self.assertDictEqual(pck_results, target) + + # test normalized by 'torso_size' + jhmdb_tpck_metric = JhmdbPCKAccuracy(thr=0.2, norm_item='torso') + jhmdb_tpck_metric.process(self.data_batch, self.data_samples) + tpck_results = jhmdb_tpck_metric.evaluate(self.batch_size) + target = { + 'Head tPCK': 1.0, + 'Sho tPCK': 1.0, + 'Elb tPCK': 1.0, + 'Wri tPCK': 1.0, + 'Hip tPCK': 1.0, + 'Knee tPCK': 1.0, + 'Ank tPCK': 1.0, + 'tPCK': 1.0, + } + self.assertDictEqual(tpck_results, target) + + +class TestAUCandEPE(TestCase): + + def setUp(self): + """Setup some variables which are used in every test method. + + TestCase calls functions in this order: setUp() -> testMethod() -> + tearDown() -> cleanUp() + """ + output = np.zeros((1, 5, 2)) + target = np.zeros((1, 5, 2)) + # first channel + output[0, 0] = [10, 4] + target[0, 0] = [10, 0] + # second channel + output[0, 1] = [10, 18] + target[0, 1] = [10, 10] + # third channel + output[0, 2] = [0, 0] + target[0, 2] = [0, -1] + # fourth channel + output[0, 3] = [40, 40] + target[0, 3] = [30, 30] + # fifth channel + output[0, 4] = [20, 10] + target[0, 4] = [0, 10] + + gt_instances = InstanceData() + gt_instances.keypoints = target + gt_instances.keypoints_visible = np.array( + [[True, True, False, True, True]]) + + pred_instances = InstanceData() + pred_instances.keypoints = output + + data = {'inputs': None} + data_sample = { + 'gt_instances': gt_instances.to_dict(), + 'pred_instances': pred_instances.to_dict() + } + + self.data_batch = [data] + self.data_samples = [data_sample] + + def test_auc_evaluate(self): + """test AUC evaluation metric.""" + auc_metric = AUC(norm_factor=20, num_thrs=4) + auc_metric.process(self.data_batch, self.data_samples) + auc = auc_metric.evaluate(1) + target = {'AUC': 0.375} + self.assertDictEqual(auc, target) + + def test_epe_evaluate(self): + """test EPE evaluation metric.""" + epe_metric = EPE() + epe_metric.process(self.data_batch, self.data_samples) + epe = epe_metric.evaluate(1) + self.assertAlmostEqual(epe['EPE'], 11.5355339) + + +class TestNME(TestCase): + + def _generate_data(self, + batch_size: int = 1, + num_keypoints: int = 5, + norm_item: str = 'box_size') -> tuple: + """Generate data_batch and data_samples according to different + settings.""" + data_batch = [] + data_samples = [] + + for i in range(batch_size): + gt_instances = InstanceData() + keypoints = np.zeros((1, num_keypoints, 2)) + keypoints[0, i] = [0.5 * i, 0.5 * i] + gt_instances.keypoints = keypoints + gt_instances.keypoints_visible = np.ones( + (1, num_keypoints, 1)).astype(bool) + gt_instances.keypoints_visible[0, (2 * i) % batch_size, 0] = False + gt_instances[norm_item] = np.random.random((1, 1)) * 20 * i + + pred_instances = InstanceData() + pred_instances.keypoints = keypoints + + data = {'inputs': None} + data_sample = { + 'gt_instances': gt_instances.to_dict(), + 'pred_instances': pred_instances.to_dict(), + } + data_batch.append(data) + data_samples.append(data_sample) + + return data_batch, data_samples + + def test_nme_evaluate(self): + """test NME evaluation metric.""" + # test when norm_mode = 'use_norm_item' + # test norm_item = 'box_size' like in `AFLWDataset` + norm_item = 'box_size' + nme_metric = NME(norm_mode='use_norm_item', norm_item=norm_item) + aflw_meta_info = dict(from_file='configs/_base_/datasets/aflw.py') + aflw_dataset_meta = parse_pose_metainfo(aflw_meta_info) + nme_metric.dataset_meta = aflw_dataset_meta + + data_batch, data_samples = self._generate_data( + batch_size=4, num_keypoints=19, norm_item=norm_item) + nme_metric.process(data_batch, data_samples) + nme = nme_metric.evaluate(4) + target = {'NME': 0.0} + self.assertDictEqual(nme, target) + + # test when norm_mode = 'keypoint_distance' + # when `keypoint_indices = None`, + # use default `keypoint_indices` like in `Horse10Dataset` + nme_metric = NME(norm_mode='keypoint_distance') + horse10_meta_info = dict( + from_file='configs/_base_/datasets/horse10.py') + horse10_dataset_meta = parse_pose_metainfo(horse10_meta_info) + nme_metric.dataset_meta = horse10_dataset_meta + + data_batch, data_samples = self._generate_data( + batch_size=4, num_keypoints=22) + nme_metric.process(data_batch, data_samples) + nme = nme_metric.evaluate(4) + + target = {'NME': 0.0} + self.assertDictEqual(nme, target) + + # test when norm_mode = 'keypoint_distance' + # specify custom `keypoint_indices` + keypoint_indices = [2, 4] + nme_metric = NME( + norm_mode='keypoint_distance', keypoint_indices=keypoint_indices) + coco_meta_info = dict(from_file='configs/_base_/datasets/coco.py') + coco_dataset_meta = parse_pose_metainfo(coco_meta_info) + nme_metric.dataset_meta = coco_dataset_meta + + data_batch, data_samples = self._generate_data( + batch_size=2, num_keypoints=17) + nme_metric.process(data_batch, data_samples) + nme = nme_metric.evaluate(2) + + target = {'NME': 0.0} + self.assertDictEqual(nme, target) + + def test_exceptions_and_warnings(self): + """test exceptions and warnings.""" + # test invalid norm_mode + with self.assertRaisesRegex( + KeyError, + "`norm_mode` should be 'use_norm_item' or 'keypoint_distance'" + ): + nme_metric = NME(norm_mode='invalid') + + # test when norm_mode = 'use_norm_item' but do not specify norm_item + with self.assertRaisesRegex( + KeyError, '`norm_mode` is set to `"use_norm_item"`, ' + 'please specify the `norm_item`'): + nme_metric = NME(norm_mode='use_norm_item', norm_item=None) + + # test when norm_mode = 'use_norm_item' + # but the `norm_item` do not in data_info + with self.assertRaisesRegex( + AssertionError, + 'The ground truth data info do not have the expected ' + 'normalized factor'): + nme_metric = NME(norm_mode='use_norm_item', norm_item='norm_item1') + coco_meta_info = dict(from_file='configs/_base_/datasets/coco.py') + coco_dataset_meta = parse_pose_metainfo(coco_meta_info) + nme_metric.dataset_meta = coco_dataset_meta + + data_batch, data_samples = self._generate_data( + norm_item='norm_item2') + # raise AssertionError here + nme_metric.process(data_batch, data_samples) + + # test when norm_mode = 'keypoint_distance', `keypoint_indices` = None + # but the dataset_name not in `DEFAULT_KEYPOINT_INDICES` + with self.assertRaisesRegex( + KeyError, 'can not find the keypoint_indices in ' + '`DEFAULT_KEYPOINT_INDICES`'): + nme_metric = NME( + norm_mode='keypoint_distance', keypoint_indices=None) + coco_meta_info = dict(from_file='configs/_base_/datasets/coco.py') + coco_dataset_meta = parse_pose_metainfo(coco_meta_info) + nme_metric.dataset_meta = coco_dataset_meta + + data_batch, data_samples = self._generate_data() + nme_metric.process(data_batch, data_samples) + # raise KeyError here + _ = nme_metric.evaluate(1) + + # test when len(keypoint_indices) is not 2 + with self.assertRaisesRegex( + AssertionError, + 'The keypoint indices used for normalization should be a pair.' + ): + nme_metric = NME( + norm_mode='keypoint_distance', keypoint_indices=[0, 1, 2]) + coco_meta_info = dict(from_file='configs/_base_/datasets/coco.py') + coco_dataset_meta = parse_pose_metainfo(coco_meta_info) + nme_metric.dataset_meta = coco_dataset_meta + + data_batch, data_samples = self._generate_data() + nme_metric.process(data_batch, data_samples) + # raise AssertionError here + _ = nme_metric.evaluate(1) + + # test when dataset does not contain the required keypoint + with self.assertRaisesRegex(AssertionError, + 'dataset does not contain the required'): + nme_metric = NME( + norm_mode='keypoint_distance', keypoint_indices=[17, 18]) + coco_meta_info = dict(from_file='configs/_base_/datasets/coco.py') + coco_dataset_meta = parse_pose_metainfo(coco_meta_info) + nme_metric.dataset_meta = coco_dataset_meta + + data_batch, predidata_samplesctions = self._generate_data() + nme_metric.process(data_batch, data_samples) + # raise AssertionError here + _ = nme_metric.evaluate(1) diff --git a/tests/test_evaluation/test_metrics/test_keypoint_3d_metrics.py b/tests/test_evaluation/test_metrics/test_keypoint_3d_metrics.py index 8289b09d0f..72a19582b3 100644 --- a/tests/test_evaluation/test_metrics/test_keypoint_3d_metrics.py +++ b/tests/test_evaluation/test_metrics/test_keypoint_3d_metrics.py @@ -1,70 +1,70 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import numpy as np -from mmengine.structures import InstanceData - -from mmpose.evaluation import MPJPE -from mmpose.structures import PoseDataSample - - -class TestMPJPE(TestCase): - - def setUp(self): - """Setup variables used in every test method.""" - self.batch_size = 8 - num_keypoints = 15 - self.data_batch = [] - self.data_samples = [] - - for i in range(self.batch_size): - gt_instances = InstanceData() - keypoints = np.random.random((1, num_keypoints, 3)) - gt_instances.lifting_target = np.random.random((num_keypoints, 3)) - gt_instances.lifting_target_visible = np.ones( - (num_keypoints, 1)).astype(bool) - - pred_instances = InstanceData() - pred_instances.keypoints = keypoints + np.random.normal( - 0, 0.01, keypoints.shape) - - data = {'inputs': None} - data_sample = PoseDataSample( - gt_instances=gt_instances, pred_instances=pred_instances) - data_sample.set_metainfo( - dict(target_img_path='tests/data/h36m/S7/' - 'S7_Greeting.55011271/S7_Greeting.55011271_000396.jpg')) - - self.data_batch.append(data) - self.data_samples.append(data_sample.to_dict()) - - def test_init(self): - """Test metric init method.""" - # Test invalid mode - with self.assertRaisesRegex( - KeyError, "`mode` should be 'mpjpe', 'p-mpjpe', or 'n-mpjpe', " - "but got 'invalid'."): - MPJPE(mode='invalid') - - def test_evaluate(self): - """Test MPJPE evaluation metric.""" - mpjpe_metric = MPJPE(mode='mpjpe') - mpjpe_metric.process(self.data_batch, self.data_samples) - mpjpe = mpjpe_metric.evaluate(self.batch_size) - self.assertIsInstance(mpjpe, dict) - self.assertIn('MPJPE', mpjpe) - self.assertTrue(mpjpe['MPJPE'] >= 0) - - p_mpjpe_metric = MPJPE(mode='p-mpjpe') - p_mpjpe_metric.process(self.data_batch, self.data_samples) - p_mpjpe = p_mpjpe_metric.evaluate(self.batch_size) - self.assertIsInstance(p_mpjpe, dict) - self.assertIn('P-MPJPE', p_mpjpe) - self.assertTrue(p_mpjpe['P-MPJPE'] >= 0) - - n_mpjpe_metric = MPJPE(mode='n-mpjpe') - n_mpjpe_metric.process(self.data_batch, self.data_samples) - n_mpjpe = n_mpjpe_metric.evaluate(self.batch_size) - self.assertIsInstance(n_mpjpe, dict) - self.assertIn('N-MPJPE', n_mpjpe) - self.assertTrue(n_mpjpe['N-MPJPE'] >= 0) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np +from mmengine.structures import InstanceData + +from mmpose.evaluation import MPJPE +from mmpose.structures import PoseDataSample + + +class TestMPJPE(TestCase): + + def setUp(self): + """Setup variables used in every test method.""" + self.batch_size = 8 + num_keypoints = 15 + self.data_batch = [] + self.data_samples = [] + + for i in range(self.batch_size): + gt_instances = InstanceData() + keypoints = np.random.random((1, num_keypoints, 3)) + gt_instances.lifting_target = np.random.random((num_keypoints, 3)) + gt_instances.lifting_target_visible = np.ones( + (num_keypoints, 1)).astype(bool) + + pred_instances = InstanceData() + pred_instances.keypoints = keypoints + np.random.normal( + 0, 0.01, keypoints.shape) + + data = {'inputs': None} + data_sample = PoseDataSample( + gt_instances=gt_instances, pred_instances=pred_instances) + data_sample.set_metainfo( + dict(target_img_path='tests/data/h36m/S7/' + 'S7_Greeting.55011271/S7_Greeting.55011271_000396.jpg')) + + self.data_batch.append(data) + self.data_samples.append(data_sample.to_dict()) + + def test_init(self): + """Test metric init method.""" + # Test invalid mode + with self.assertRaisesRegex( + KeyError, "`mode` should be 'mpjpe', 'p-mpjpe', or 'n-mpjpe', " + "but got 'invalid'."): + MPJPE(mode='invalid') + + def test_evaluate(self): + """Test MPJPE evaluation metric.""" + mpjpe_metric = MPJPE(mode='mpjpe') + mpjpe_metric.process(self.data_batch, self.data_samples) + mpjpe = mpjpe_metric.evaluate(self.batch_size) + self.assertIsInstance(mpjpe, dict) + self.assertIn('MPJPE', mpjpe) + self.assertTrue(mpjpe['MPJPE'] >= 0) + + p_mpjpe_metric = MPJPE(mode='p-mpjpe') + p_mpjpe_metric.process(self.data_batch, self.data_samples) + p_mpjpe = p_mpjpe_metric.evaluate(self.batch_size) + self.assertIsInstance(p_mpjpe, dict) + self.assertIn('P-MPJPE', p_mpjpe) + self.assertTrue(p_mpjpe['P-MPJPE'] >= 0) + + n_mpjpe_metric = MPJPE(mode='n-mpjpe') + n_mpjpe_metric.process(self.data_batch, self.data_samples) + n_mpjpe = n_mpjpe_metric.evaluate(self.batch_size) + self.assertIsInstance(n_mpjpe, dict) + self.assertIn('N-MPJPE', n_mpjpe) + self.assertTrue(n_mpjpe['N-MPJPE'] >= 0) diff --git a/tests/test_evaluation/test_metrics/test_keypoint_partition_metric.py b/tests/test_evaluation/test_metrics/test_keypoint_partition_metric.py index 2b1a60c113..8cd0ce03d0 100644 --- a/tests/test_evaluation/test_metrics/test_keypoint_partition_metric.py +++ b/tests/test_evaluation/test_metrics/test_keypoint_partition_metric.py @@ -1,525 +1,525 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy -import os.path as osp -import tempfile -from collections import defaultdict -from unittest import TestCase - -import numpy as np -from mmengine.fileio import load -from mmengine.structures import InstanceData -from xtcocotools.coco import COCO - -from mmpose.datasets.datasets.utils import parse_pose_metainfo -from mmpose.evaluation.metrics import KeypointPartitionMetric - - -class TestKeypointPartitionMetricWrappingCocoMetric(TestCase): - - def setUp(self): - """Setup some variables which are used in every test method. - - TestCase calls functions in this order: setUp() -> testMethod() -> - tearDown() -> cleanUp() - """ - self.tmp_dir = tempfile.TemporaryDirectory() - - self.ann_file_coco = \ - 'tests/data/coco/test_keypoint_partition_metric.json' - meta_info_coco = dict( - from_file='configs/_base_/datasets/coco_wholebody.py') - self.dataset_meta_coco = parse_pose_metainfo(meta_info_coco) - self.coco = COCO(self.ann_file_coco) - self.dataset_meta_coco['CLASSES'] = self.coco.loadCats( - self.coco.getCatIds()) - - self.topdown_data_coco = self._convert_ann_to_topdown_batch_data( - self.ann_file_coco) - assert len(self.topdown_data_coco) == 14 - self.bottomup_data_coco = self._convert_ann_to_bottomup_batch_data( - self.ann_file_coco) - assert len(self.bottomup_data_coco) == 4 - """ - The target results were obtained from CocoWholebodyMetric with - score_mode='bbox' and nms_mode='none'. We cannot compare other - combinations of score_mode and nms_mode because CocoWholebodyMetric - calculates scores and nms using all keypoints while - KeypointPartitionMetric calculates scores and nms part by part. - As long as this case is tested correct, the other cases should be - correct. - """ - self.target_bbox_none = { - 'body/coco/AP': 0.749, - 'body/coco/AR': 0.800, - 'foot/coco/AP': 0.840, - 'foot/coco/AR': 0.850, - 'face/coco/AP': 0.051, - 'face/coco/AR': 0.050, - 'left_hand/coco/AP': 0.283, - 'left_hand/coco/AR': 0.300, - 'right_hand/coco/AP': 0.383, - 'right_hand/coco/AR': 0.380, - 'all/coco/AP': 0.284, - 'all/coco/AR': 0.450, - } - - def _convert_ann_to_topdown_batch_data(self, ann_file): - """Convert annotations to topdown-style batch data.""" - topdown_data = [] - db = load(ann_file) - imgid2info = dict() - for img in db['images']: - imgid2info[img['id']] = img - for ann in db['annotations']: - w, h = ann['bbox'][2], ann['bbox'][3] - bboxes = np.array(ann['bbox'], dtype=np.float32).reshape(-1, 4) - bbox_scales = np.array([w * 1.25, h * 1.25]).reshape(-1, 2) - _keypoints = np.array(ann['keypoints']).reshape((1, -1, 3)) - - gt_instances = { - 'bbox_scales': bbox_scales, - 'bbox_scores': np.ones((1, ), dtype=np.float32), - 'bboxes': bboxes, - 'keypoints': _keypoints[..., :2], - 'keypoints_visible': _keypoints[..., 2:3] - } - - # fake predictions - keypoints = np.zeros_like(_keypoints) - keypoints[..., 0] = _keypoints[..., 0] * 0.99 - keypoints[..., 1] = _keypoints[..., 1] * 1.02 - keypoints[..., 2] = _keypoints[..., 2] * 0.8 - - pred_instances = { - 'keypoints': keypoints[..., :2], - 'keypoint_scores': keypoints[..., -1], - } - - data = {'inputs': None} - data_sample = { - 'id': ann['id'], - 'img_id': ann['image_id'], - 'category_id': ann.get('category_id', 1), - 'gt_instances': gt_instances, - 'pred_instances': pred_instances, - # dummy image_shape for testing - 'ori_shape': [640, 480], - # store the raw annotation info to test without ann_file - 'raw_ann_info': copy.deepcopy(ann), - } - - # add crowd_index to data_sample if it is present in the image_info - if 'crowdIndex' in imgid2info[ann['image_id']]: - data_sample['crowd_index'] = imgid2info[ - ann['image_id']]['crowdIndex'] - # batch size = 1 - data_batch = [data] - data_samples = [data_sample] - topdown_data.append((data_batch, data_samples)) - - return topdown_data - - def _convert_ann_to_bottomup_batch_data(self, ann_file): - """Convert annotations to bottomup-style batch data.""" - img2ann = defaultdict(list) - db = load(ann_file) - for ann in db['annotations']: - img2ann[ann['image_id']].append(ann) - - bottomup_data = [] - for img_id, anns in img2ann.items(): - _keypoints = np.array([ann['keypoints'] for ann in anns]).reshape( - (len(anns), -1, 3)) - - gt_instances = { - 'bbox_scores': np.ones((len(anns)), dtype=np.float32), - 'keypoints': _keypoints[..., :2], - 'keypoints_visible': _keypoints[..., 2:3] - } - - # fake predictions - keypoints = np.zeros_like(_keypoints) - keypoints[..., 0] = _keypoints[..., 0] * 0.99 - keypoints[..., 1] = _keypoints[..., 1] * 1.02 - keypoints[..., 2] = _keypoints[..., 2] * 0.8 - - pred_instances = { - 'keypoints': keypoints[..., :2], - 'keypoint_scores': keypoints[..., -1], - } - - data = {'inputs': None} - data_sample = { - 'id': [ann['id'] for ann in anns], - 'img_id': img_id, - 'gt_instances': gt_instances, - 'pred_instances': pred_instances, - # dummy image_shape for testing - 'ori_shape': [640, 480], - 'raw_ann_info': copy.deepcopy(anns), - } - - # batch size = 1 - data_batch = [data] - data_samples = [data_sample] - bottomup_data.append((data_batch, data_samples)) - return bottomup_data - - def _assert_outfiles(self, prefix): - for part in ['body', 'foot', 'face', 'left_hand', 'right_hand', 'all']: - self.assertTrue( - osp.isfile( - osp.join(self.tmp_dir.name, - f'{prefix}.{part}.keypoints.json'))) - - def tearDown(self): - self.tmp_dir.cleanup() - - def test_init(self): - """test metric init method.""" - # test wrong metric type - with self.assertRaisesRegex( - ValueError, 'Metrics supported by KeypointPartitionMetric'): - _ = KeypointPartitionMetric( - metric=dict(type='Metric'), partitions=dict(all=range(133))) - - # test ann_file arg warning - with self.assertWarnsRegex(UserWarning, - 'does not support the ann_file argument'): - _ = KeypointPartitionMetric( - metric=dict(type='CocoMetric', ann_file=''), - partitions=dict(all=range(133))) - - # test score_mode arg warning - with self.assertWarnsRegex(UserWarning, "if score_mode is not 'bbox'"): - _ = KeypointPartitionMetric( - metric=dict(type='CocoMetric'), - partitions=dict(all=range(133))) - - # test nms arg warning - with self.assertWarnsRegex(UserWarning, 'oks_nms and soft_oks_nms'): - _ = KeypointPartitionMetric( - metric=dict(type='CocoMetric'), - partitions=dict(all=range(133))) - - # test partitions - with self.assertRaisesRegex(AssertionError, 'at least one partition'): - _ = KeypointPartitionMetric( - metric=dict(type='CocoMetric'), partitions=dict()) - - with self.assertRaisesRegex(AssertionError, 'should be a sequence'): - _ = KeypointPartitionMetric( - metric=dict(type='CocoMetric'), partitions=dict(all={})) - - with self.assertRaisesRegex(AssertionError, 'at least one element'): - _ = KeypointPartitionMetric( - metric=dict(type='CocoMetric'), partitions=dict(all=[])) - - def test_bottomup_evaluate(self): - """test bottomup-style COCO metric evaluation.""" - # case1: score_mode='bbox', nms_mode='none' - metric = KeypointPartitionMetric( - metric=dict( - type='CocoMetric', - outfile_prefix=f'{self.tmp_dir.name}/test_bottomup', - score_mode='bbox', - nms_mode='none'), - partitions=dict( - body=range(17), - foot=range(17, 23), - face=range(23, 91), - left_hand=range(91, 112), - right_hand=range(112, 133), - all=range(133))) - metric.dataset_meta = self.dataset_meta_coco - - # process samples - for data_batch, data_samples in self.bottomup_data_coco: - metric.process(data_batch, data_samples) - - eval_results = metric.evaluate(size=len(self.bottomup_data_coco)) - for key in self.target_bbox_none.keys(): - self.assertAlmostEqual( - eval_results[key], self.target_bbox_none[key], places=3) - self._assert_outfiles('test_bottomup') - - def test_topdown_evaluate(self): - """test topdown-style COCO metric evaluation.""" - # case 1: score_mode='bbox', nms_mode='none' - metric = KeypointPartitionMetric( - metric=dict( - type='CocoMetric', - outfile_prefix=f'{self.tmp_dir.name}/test_topdown1', - score_mode='bbox', - nms_mode='none'), - partitions=dict( - body=range(17), - foot=range(17, 23), - face=range(23, 91), - left_hand=range(91, 112), - right_hand=range(112, 133), - all=range(133))) - metric.dataset_meta = self.dataset_meta_coco - - # process samples - for data_batch, data_samples in self.topdown_data_coco: - metric.process(data_batch, data_samples) - - eval_results = metric.evaluate(size=len(self.topdown_data_coco)) - for key in self.target_bbox_none.keys(): - self.assertAlmostEqual( - eval_results[key], self.target_bbox_none[key], places=3) - self._assert_outfiles('test_topdown1') - - -class TestKeypointPartitionMetricWrappingPCKAccuracy(TestCase): - - def setUp(self): - """Setup some variables which are used in every test method. - - TestCase calls functions in this order: setUp() -> testMethod() -> - tearDown() -> cleanUp() - """ - self.batch_size = 8 - num_keypoints = 24 - self.data_batch = [] - self.data_samples = [] - - for i in range(self.batch_size): - gt_instances = InstanceData() - keypoints = np.zeros((1, num_keypoints, 2)) - for j in range(num_keypoints): - keypoints[0, j] = [0.5 * i * j, 0.5 * i * j] - gt_instances.keypoints = keypoints - gt_instances.keypoints_visible = np.ones( - (1, num_keypoints, 1)).astype(bool) - gt_instances.keypoints_visible[0, (2 * i) % 8, 0] = False - gt_instances.bboxes = np.array([[0.1, 0.2, 0.3, 0.4]]) * 20 * i - gt_instances.head_size = np.array([[0.1]]) * 10 * i - - pred_instances = InstanceData() - # fake predictions - _keypoints = np.zeros_like(keypoints) - _keypoints[0, :, 0] = keypoints[0, :, 0] * 0.95 - _keypoints[0, :, 1] = keypoints[0, :, 1] * 1.05 - pred_instances.keypoints = _keypoints - - data = {'inputs': None} - data_sample = { - 'gt_instances': gt_instances.to_dict(), - 'pred_instances': pred_instances.to_dict(), - } - - self.data_batch.append(data) - self.data_samples.append(data_sample) - - def test_init(self): - # test norm_item arg warning - with self.assertWarnsRegex(UserWarning, - 'norm_item torso is used in JhmdbDataset'): - _ = KeypointPartitionMetric( - metric=dict( - type='PCKAccuracy', thr=0.05, norm_item=['bbox', 'torso']), - partitions=dict(all=range(133))) - - def test_evaluate(self): - """test PCK accuracy evaluation metric.""" - # test normalized by 'bbox' - pck_metric = KeypointPartitionMetric( - metric=dict(type='PCKAccuracy', thr=0.5, norm_item='bbox'), - partitions=dict( - p1=range(10), - p2=range(10, 24), - all=range(24), - )) - pck_metric.process(self.data_batch, self.data_samples) - pck = pck_metric.evaluate(self.batch_size) - target = {'p1/PCK': 1.0, 'p2/PCK': 1.0, 'all/PCK': 1.0} - self.assertDictEqual(pck, target) - - # test normalized by 'head_size' - pckh_metric = KeypointPartitionMetric( - metric=dict(type='PCKAccuracy', thr=0.3, norm_item='head'), - partitions=dict( - p1=range(10), - p2=range(10, 24), - all=range(24), - )) - pckh_metric.process(self.data_batch, self.data_samples) - pckh = pckh_metric.evaluate(self.batch_size) - target = {'p1/PCKh': 0.9, 'p2/PCKh': 0.0, 'all/PCKh': 0.375} - self.assertDictEqual(pckh, target) - - # test normalized by 'torso_size' - tpck_metric = KeypointPartitionMetric( - metric=dict( - type='PCKAccuracy', thr=0.05, norm_item=['bbox', 'torso']), - partitions=dict( - p1=range(10), - p2=range(10, 24), - all=range(24), - )) - tpck_metric.process(self.data_batch, self.data_samples) - tpck = tpck_metric.evaluate(self.batch_size) - self.assertIsInstance(tpck, dict) - target = { - 'p1/PCK': 0.6, - 'p1/tPCK': 0.11428571428571428, - 'p2/PCK': 0.0, - 'p2/tPCK': 0.0, - 'all/PCK': 0.25, - 'all/tPCK': 0.047619047619047616 - } - self.assertDictEqual(tpck, target) - - -class TestKeypointPartitionMetricWrappingAUCandEPE(TestCase): - - def setUp(self): - """Setup some variables which are used in every test method. - - TestCase calls functions in this order: setUp() -> testMethod() -> - tearDown() -> cleanUp() - """ - output = np.zeros((1, 5, 2)) - target = np.zeros((1, 5, 2)) - # first channel - output[0, 0] = [10, 4] - target[0, 0] = [10, 0] - # second channel - output[0, 1] = [10, 18] - target[0, 1] = [10, 10] - # third channel - output[0, 2] = [0, 0] - target[0, 2] = [0, -1] - # fourth channel - output[0, 3] = [40, 40] - target[0, 3] = [30, 30] - # fifth channel - output[0, 4] = [20, 10] - target[0, 4] = [0, 10] - - gt_instances = InstanceData() - gt_instances.keypoints = target - gt_instances.keypoints_visible = np.array( - [[True, True, False, True, True]]) - - pred_instances = InstanceData() - pred_instances.keypoints = output - - data = {'inputs': None} - data_sample = { - 'gt_instances': gt_instances.to_dict(), - 'pred_instances': pred_instances.to_dict() - } - - self.data_batch = [data] - self.data_samples = [data_sample] - - def test_auc_evaluate(self): - """test AUC evaluation metric.""" - auc_metric = KeypointPartitionMetric( - metric=dict(type='AUC', norm_factor=20, num_thrs=4), - partitions=dict( - p1=range(3), - p2=range(3, 5), - all=range(5), - )) - auc_metric.process(self.data_batch, self.data_samples) - auc = auc_metric.evaluate(1) - target = {'p1/AUC': 0.625, 'p2/AUC': 0.125, 'all/AUC': 0.375} - self.assertDictEqual(auc, target) - - def test_epe_evaluate(self): - """test EPE evaluation metric.""" - epe_metric = KeypointPartitionMetric( - metric=dict(type='EPE', ), - partitions=dict( - p1=range(3), - p2=range(3, 5), - all=range(5), - )) - epe_metric.process(self.data_batch, self.data_samples) - epe = epe_metric.evaluate(1) - target = { - 'p1/EPE': 6.0, - 'p2/EPE': 17.071067810058594, - 'all/EPE': 11.535533905029297 - } - self.assertDictEqual(epe, target) - - -class TestKeypointPartitionMetricWrappingNME(TestCase): - - def setUp(self): - """Setup some variables which are used in every test method. - - TestCase calls functions in this order: setUp() -> testMethod() -> - tearDown() -> cleanUp() - """ - self.batch_size = 4 - num_keypoints = 19 - self.data_batch = [] - self.data_samples = [] - - for i in range(self.batch_size): - gt_instances = InstanceData() - keypoints = np.zeros((1, num_keypoints, 2)) - for j in range(num_keypoints): - keypoints[0, j] = [0.5 * i * j, 0.5 * i * j] - gt_instances.keypoints = keypoints - gt_instances.keypoints_visible = np.ones( - (1, num_keypoints, 1)).astype(bool) - gt_instances.keypoints_visible[0, (2 * i) % self.batch_size, - 0] = False - gt_instances['box_size'] = np.array([[0.1]]) * 10 * i - - pred_instances = InstanceData() - # fake predictions - _keypoints = np.zeros_like(keypoints) - _keypoints[0, :, 0] = keypoints[0, :, 0] * 0.95 - _keypoints[0, :, 1] = keypoints[0, :, 1] * 1.05 - pred_instances.keypoints = _keypoints - - data = {'inputs': None} - data_sample = { - 'gt_instances': gt_instances.to_dict(), - 'pred_instances': pred_instances.to_dict(), - } - - self.data_batch.append(data) - self.data_samples.append(data_sample) - - def test_init(self): - # test norm_mode arg missing - with self.assertRaisesRegex(AssertionError, 'Missing norm_mode'): - _ = KeypointPartitionMetric( - metric=dict(type='NME', ), partitions=dict(all=range(133))) - - # test norm_mode = keypoint_distance - with self.assertRaisesRegex(ValueError, - "NME norm_mode 'keypoint_distance'"): - _ = KeypointPartitionMetric( - metric=dict(type='NME', norm_mode='keypoint_distance'), - partitions=dict(all=range(133))) - - def test_nme_evaluate(self): - """test NME evaluation metric.""" - # test when norm_mode = 'use_norm_item' - # test norm_item = 'box_size' like in `AFLWDataset` - nme_metric = KeypointPartitionMetric( - metric=dict( - type='NME', norm_mode='use_norm_item', norm_item='box_size'), - partitions=dict( - p1=range(10), - p2=range(10, 19), - all=range(19), - )) - nme_metric.process(self.data_batch, self.data_samples) - nme = nme_metric.evaluate(4) - target = { - 'p1/NME': 0.1715388651247378, - 'p2/NME': 0.4949747721354167, - 'all/NME': 0.333256827460395 - } - self.assertDictEqual(nme, target) +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import os.path as osp +import tempfile +from collections import defaultdict +from unittest import TestCase + +import numpy as np +from mmengine.fileio import load +from mmengine.structures import InstanceData +from xtcocotools.coco import COCO + +from mmpose.datasets.datasets.utils import parse_pose_metainfo +from mmpose.evaluation.metrics import KeypointPartitionMetric + + +class TestKeypointPartitionMetricWrappingCocoMetric(TestCase): + + def setUp(self): + """Setup some variables which are used in every test method. + + TestCase calls functions in this order: setUp() -> testMethod() -> + tearDown() -> cleanUp() + """ + self.tmp_dir = tempfile.TemporaryDirectory() + + self.ann_file_coco = \ + 'tests/data/coco/test_keypoint_partition_metric.json' + meta_info_coco = dict( + from_file='configs/_base_/datasets/coco_wholebody.py') + self.dataset_meta_coco = parse_pose_metainfo(meta_info_coco) + self.coco = COCO(self.ann_file_coco) + self.dataset_meta_coco['CLASSES'] = self.coco.loadCats( + self.coco.getCatIds()) + + self.topdown_data_coco = self._convert_ann_to_topdown_batch_data( + self.ann_file_coco) + assert len(self.topdown_data_coco) == 14 + self.bottomup_data_coco = self._convert_ann_to_bottomup_batch_data( + self.ann_file_coco) + assert len(self.bottomup_data_coco) == 4 + """ + The target results were obtained from CocoWholebodyMetric with + score_mode='bbox' and nms_mode='none'. We cannot compare other + combinations of score_mode and nms_mode because CocoWholebodyMetric + calculates scores and nms using all keypoints while + KeypointPartitionMetric calculates scores and nms part by part. + As long as this case is tested correct, the other cases should be + correct. + """ + self.target_bbox_none = { + 'body/coco/AP': 0.749, + 'body/coco/AR': 0.800, + 'foot/coco/AP': 0.840, + 'foot/coco/AR': 0.850, + 'face/coco/AP': 0.051, + 'face/coco/AR': 0.050, + 'left_hand/coco/AP': 0.283, + 'left_hand/coco/AR': 0.300, + 'right_hand/coco/AP': 0.383, + 'right_hand/coco/AR': 0.380, + 'all/coco/AP': 0.284, + 'all/coco/AR': 0.450, + } + + def _convert_ann_to_topdown_batch_data(self, ann_file): + """Convert annotations to topdown-style batch data.""" + topdown_data = [] + db = load(ann_file) + imgid2info = dict() + for img in db['images']: + imgid2info[img['id']] = img + for ann in db['annotations']: + w, h = ann['bbox'][2], ann['bbox'][3] + bboxes = np.array(ann['bbox'], dtype=np.float32).reshape(-1, 4) + bbox_scales = np.array([w * 1.25, h * 1.25]).reshape(-1, 2) + _keypoints = np.array(ann['keypoints']).reshape((1, -1, 3)) + + gt_instances = { + 'bbox_scales': bbox_scales, + 'bbox_scores': np.ones((1, ), dtype=np.float32), + 'bboxes': bboxes, + 'keypoints': _keypoints[..., :2], + 'keypoints_visible': _keypoints[..., 2:3] + } + + # fake predictions + keypoints = np.zeros_like(_keypoints) + keypoints[..., 0] = _keypoints[..., 0] * 0.99 + keypoints[..., 1] = _keypoints[..., 1] * 1.02 + keypoints[..., 2] = _keypoints[..., 2] * 0.8 + + pred_instances = { + 'keypoints': keypoints[..., :2], + 'keypoint_scores': keypoints[..., -1], + } + + data = {'inputs': None} + data_sample = { + 'id': ann['id'], + 'img_id': ann['image_id'], + 'category_id': ann.get('category_id', 1), + 'gt_instances': gt_instances, + 'pred_instances': pred_instances, + # dummy image_shape for testing + 'ori_shape': [640, 480], + # store the raw annotation info to test without ann_file + 'raw_ann_info': copy.deepcopy(ann), + } + + # add crowd_index to data_sample if it is present in the image_info + if 'crowdIndex' in imgid2info[ann['image_id']]: + data_sample['crowd_index'] = imgid2info[ + ann['image_id']]['crowdIndex'] + # batch size = 1 + data_batch = [data] + data_samples = [data_sample] + topdown_data.append((data_batch, data_samples)) + + return topdown_data + + def _convert_ann_to_bottomup_batch_data(self, ann_file): + """Convert annotations to bottomup-style batch data.""" + img2ann = defaultdict(list) + db = load(ann_file) + for ann in db['annotations']: + img2ann[ann['image_id']].append(ann) + + bottomup_data = [] + for img_id, anns in img2ann.items(): + _keypoints = np.array([ann['keypoints'] for ann in anns]).reshape( + (len(anns), -1, 3)) + + gt_instances = { + 'bbox_scores': np.ones((len(anns)), dtype=np.float32), + 'keypoints': _keypoints[..., :2], + 'keypoints_visible': _keypoints[..., 2:3] + } + + # fake predictions + keypoints = np.zeros_like(_keypoints) + keypoints[..., 0] = _keypoints[..., 0] * 0.99 + keypoints[..., 1] = _keypoints[..., 1] * 1.02 + keypoints[..., 2] = _keypoints[..., 2] * 0.8 + + pred_instances = { + 'keypoints': keypoints[..., :2], + 'keypoint_scores': keypoints[..., -1], + } + + data = {'inputs': None} + data_sample = { + 'id': [ann['id'] for ann in anns], + 'img_id': img_id, + 'gt_instances': gt_instances, + 'pred_instances': pred_instances, + # dummy image_shape for testing + 'ori_shape': [640, 480], + 'raw_ann_info': copy.deepcopy(anns), + } + + # batch size = 1 + data_batch = [data] + data_samples = [data_sample] + bottomup_data.append((data_batch, data_samples)) + return bottomup_data + + def _assert_outfiles(self, prefix): + for part in ['body', 'foot', 'face', 'left_hand', 'right_hand', 'all']: + self.assertTrue( + osp.isfile( + osp.join(self.tmp_dir.name, + f'{prefix}.{part}.keypoints.json'))) + + def tearDown(self): + self.tmp_dir.cleanup() + + def test_init(self): + """test metric init method.""" + # test wrong metric type + with self.assertRaisesRegex( + ValueError, 'Metrics supported by KeypointPartitionMetric'): + _ = KeypointPartitionMetric( + metric=dict(type='Metric'), partitions=dict(all=range(133))) + + # test ann_file arg warning + with self.assertWarnsRegex(UserWarning, + 'does not support the ann_file argument'): + _ = KeypointPartitionMetric( + metric=dict(type='CocoMetric', ann_file=''), + partitions=dict(all=range(133))) + + # test score_mode arg warning + with self.assertWarnsRegex(UserWarning, "if score_mode is not 'bbox'"): + _ = KeypointPartitionMetric( + metric=dict(type='CocoMetric'), + partitions=dict(all=range(133))) + + # test nms arg warning + with self.assertWarnsRegex(UserWarning, 'oks_nms and soft_oks_nms'): + _ = KeypointPartitionMetric( + metric=dict(type='CocoMetric'), + partitions=dict(all=range(133))) + + # test partitions + with self.assertRaisesRegex(AssertionError, 'at least one partition'): + _ = KeypointPartitionMetric( + metric=dict(type='CocoMetric'), partitions=dict()) + + with self.assertRaisesRegex(AssertionError, 'should be a sequence'): + _ = KeypointPartitionMetric( + metric=dict(type='CocoMetric'), partitions=dict(all={})) + + with self.assertRaisesRegex(AssertionError, 'at least one element'): + _ = KeypointPartitionMetric( + metric=dict(type='CocoMetric'), partitions=dict(all=[])) + + def test_bottomup_evaluate(self): + """test bottomup-style COCO metric evaluation.""" + # case1: score_mode='bbox', nms_mode='none' + metric = KeypointPartitionMetric( + metric=dict( + type='CocoMetric', + outfile_prefix=f'{self.tmp_dir.name}/test_bottomup', + score_mode='bbox', + nms_mode='none'), + partitions=dict( + body=range(17), + foot=range(17, 23), + face=range(23, 91), + left_hand=range(91, 112), + right_hand=range(112, 133), + all=range(133))) + metric.dataset_meta = self.dataset_meta_coco + + # process samples + for data_batch, data_samples in self.bottomup_data_coco: + metric.process(data_batch, data_samples) + + eval_results = metric.evaluate(size=len(self.bottomup_data_coco)) + for key in self.target_bbox_none.keys(): + self.assertAlmostEqual( + eval_results[key], self.target_bbox_none[key], places=3) + self._assert_outfiles('test_bottomup') + + def test_topdown_evaluate(self): + """test topdown-style COCO metric evaluation.""" + # case 1: score_mode='bbox', nms_mode='none' + metric = KeypointPartitionMetric( + metric=dict( + type='CocoMetric', + outfile_prefix=f'{self.tmp_dir.name}/test_topdown1', + score_mode='bbox', + nms_mode='none'), + partitions=dict( + body=range(17), + foot=range(17, 23), + face=range(23, 91), + left_hand=range(91, 112), + right_hand=range(112, 133), + all=range(133))) + metric.dataset_meta = self.dataset_meta_coco + + # process samples + for data_batch, data_samples in self.topdown_data_coco: + metric.process(data_batch, data_samples) + + eval_results = metric.evaluate(size=len(self.topdown_data_coco)) + for key in self.target_bbox_none.keys(): + self.assertAlmostEqual( + eval_results[key], self.target_bbox_none[key], places=3) + self._assert_outfiles('test_topdown1') + + +class TestKeypointPartitionMetricWrappingPCKAccuracy(TestCase): + + def setUp(self): + """Setup some variables which are used in every test method. + + TestCase calls functions in this order: setUp() -> testMethod() -> + tearDown() -> cleanUp() + """ + self.batch_size = 8 + num_keypoints = 24 + self.data_batch = [] + self.data_samples = [] + + for i in range(self.batch_size): + gt_instances = InstanceData() + keypoints = np.zeros((1, num_keypoints, 2)) + for j in range(num_keypoints): + keypoints[0, j] = [0.5 * i * j, 0.5 * i * j] + gt_instances.keypoints = keypoints + gt_instances.keypoints_visible = np.ones( + (1, num_keypoints, 1)).astype(bool) + gt_instances.keypoints_visible[0, (2 * i) % 8, 0] = False + gt_instances.bboxes = np.array([[0.1, 0.2, 0.3, 0.4]]) * 20 * i + gt_instances.head_size = np.array([[0.1]]) * 10 * i + + pred_instances = InstanceData() + # fake predictions + _keypoints = np.zeros_like(keypoints) + _keypoints[0, :, 0] = keypoints[0, :, 0] * 0.95 + _keypoints[0, :, 1] = keypoints[0, :, 1] * 1.05 + pred_instances.keypoints = _keypoints + + data = {'inputs': None} + data_sample = { + 'gt_instances': gt_instances.to_dict(), + 'pred_instances': pred_instances.to_dict(), + } + + self.data_batch.append(data) + self.data_samples.append(data_sample) + + def test_init(self): + # test norm_item arg warning + with self.assertWarnsRegex(UserWarning, + 'norm_item torso is used in JhmdbDataset'): + _ = KeypointPartitionMetric( + metric=dict( + type='PCKAccuracy', thr=0.05, norm_item=['bbox', 'torso']), + partitions=dict(all=range(133))) + + def test_evaluate(self): + """test PCK accuracy evaluation metric.""" + # test normalized by 'bbox' + pck_metric = KeypointPartitionMetric( + metric=dict(type='PCKAccuracy', thr=0.5, norm_item='bbox'), + partitions=dict( + p1=range(10), + p2=range(10, 24), + all=range(24), + )) + pck_metric.process(self.data_batch, self.data_samples) + pck = pck_metric.evaluate(self.batch_size) + target = {'p1/PCK': 1.0, 'p2/PCK': 1.0, 'all/PCK': 1.0} + self.assertDictEqual(pck, target) + + # test normalized by 'head_size' + pckh_metric = KeypointPartitionMetric( + metric=dict(type='PCKAccuracy', thr=0.3, norm_item='head'), + partitions=dict( + p1=range(10), + p2=range(10, 24), + all=range(24), + )) + pckh_metric.process(self.data_batch, self.data_samples) + pckh = pckh_metric.evaluate(self.batch_size) + target = {'p1/PCKh': 0.9, 'p2/PCKh': 0.0, 'all/PCKh': 0.375} + self.assertDictEqual(pckh, target) + + # test normalized by 'torso_size' + tpck_metric = KeypointPartitionMetric( + metric=dict( + type='PCKAccuracy', thr=0.05, norm_item=['bbox', 'torso']), + partitions=dict( + p1=range(10), + p2=range(10, 24), + all=range(24), + )) + tpck_metric.process(self.data_batch, self.data_samples) + tpck = tpck_metric.evaluate(self.batch_size) + self.assertIsInstance(tpck, dict) + target = { + 'p1/PCK': 0.6, + 'p1/tPCK': 0.11428571428571428, + 'p2/PCK': 0.0, + 'p2/tPCK': 0.0, + 'all/PCK': 0.25, + 'all/tPCK': 0.047619047619047616 + } + self.assertDictEqual(tpck, target) + + +class TestKeypointPartitionMetricWrappingAUCandEPE(TestCase): + + def setUp(self): + """Setup some variables which are used in every test method. + + TestCase calls functions in this order: setUp() -> testMethod() -> + tearDown() -> cleanUp() + """ + output = np.zeros((1, 5, 2)) + target = np.zeros((1, 5, 2)) + # first channel + output[0, 0] = [10, 4] + target[0, 0] = [10, 0] + # second channel + output[0, 1] = [10, 18] + target[0, 1] = [10, 10] + # third channel + output[0, 2] = [0, 0] + target[0, 2] = [0, -1] + # fourth channel + output[0, 3] = [40, 40] + target[0, 3] = [30, 30] + # fifth channel + output[0, 4] = [20, 10] + target[0, 4] = [0, 10] + + gt_instances = InstanceData() + gt_instances.keypoints = target + gt_instances.keypoints_visible = np.array( + [[True, True, False, True, True]]) + + pred_instances = InstanceData() + pred_instances.keypoints = output + + data = {'inputs': None} + data_sample = { + 'gt_instances': gt_instances.to_dict(), + 'pred_instances': pred_instances.to_dict() + } + + self.data_batch = [data] + self.data_samples = [data_sample] + + def test_auc_evaluate(self): + """test AUC evaluation metric.""" + auc_metric = KeypointPartitionMetric( + metric=dict(type='AUC', norm_factor=20, num_thrs=4), + partitions=dict( + p1=range(3), + p2=range(3, 5), + all=range(5), + )) + auc_metric.process(self.data_batch, self.data_samples) + auc = auc_metric.evaluate(1) + target = {'p1/AUC': 0.625, 'p2/AUC': 0.125, 'all/AUC': 0.375} + self.assertDictEqual(auc, target) + + def test_epe_evaluate(self): + """test EPE evaluation metric.""" + epe_metric = KeypointPartitionMetric( + metric=dict(type='EPE', ), + partitions=dict( + p1=range(3), + p2=range(3, 5), + all=range(5), + )) + epe_metric.process(self.data_batch, self.data_samples) + epe = epe_metric.evaluate(1) + target = { + 'p1/EPE': 6.0, + 'p2/EPE': 17.071067810058594, + 'all/EPE': 11.535533905029297 + } + self.assertDictEqual(epe, target) + + +class TestKeypointPartitionMetricWrappingNME(TestCase): + + def setUp(self): + """Setup some variables which are used in every test method. + + TestCase calls functions in this order: setUp() -> testMethod() -> + tearDown() -> cleanUp() + """ + self.batch_size = 4 + num_keypoints = 19 + self.data_batch = [] + self.data_samples = [] + + for i in range(self.batch_size): + gt_instances = InstanceData() + keypoints = np.zeros((1, num_keypoints, 2)) + for j in range(num_keypoints): + keypoints[0, j] = [0.5 * i * j, 0.5 * i * j] + gt_instances.keypoints = keypoints + gt_instances.keypoints_visible = np.ones( + (1, num_keypoints, 1)).astype(bool) + gt_instances.keypoints_visible[0, (2 * i) % self.batch_size, + 0] = False + gt_instances['box_size'] = np.array([[0.1]]) * 10 * i + + pred_instances = InstanceData() + # fake predictions + _keypoints = np.zeros_like(keypoints) + _keypoints[0, :, 0] = keypoints[0, :, 0] * 0.95 + _keypoints[0, :, 1] = keypoints[0, :, 1] * 1.05 + pred_instances.keypoints = _keypoints + + data = {'inputs': None} + data_sample = { + 'gt_instances': gt_instances.to_dict(), + 'pred_instances': pred_instances.to_dict(), + } + + self.data_batch.append(data) + self.data_samples.append(data_sample) + + def test_init(self): + # test norm_mode arg missing + with self.assertRaisesRegex(AssertionError, 'Missing norm_mode'): + _ = KeypointPartitionMetric( + metric=dict(type='NME', ), partitions=dict(all=range(133))) + + # test norm_mode = keypoint_distance + with self.assertRaisesRegex(ValueError, + "NME norm_mode 'keypoint_distance'"): + _ = KeypointPartitionMetric( + metric=dict(type='NME', norm_mode='keypoint_distance'), + partitions=dict(all=range(133))) + + def test_nme_evaluate(self): + """test NME evaluation metric.""" + # test when norm_mode = 'use_norm_item' + # test norm_item = 'box_size' like in `AFLWDataset` + nme_metric = KeypointPartitionMetric( + metric=dict( + type='NME', norm_mode='use_norm_item', norm_item='box_size'), + partitions=dict( + p1=range(10), + p2=range(10, 19), + all=range(19), + )) + nme_metric.process(self.data_batch, self.data_samples) + nme = nme_metric.evaluate(4) + target = { + 'p1/NME': 0.1715388651247378, + 'p2/NME': 0.4949747721354167, + 'all/NME': 0.333256827460395 + } + self.assertDictEqual(nme, target) diff --git a/tests/test_evaluation/test_metrics/test_posetrack18_metric.py b/tests/test_evaluation/test_metrics/test_posetrack18_metric.py index fe44047e31..59db9c3d53 100644 --- a/tests/test_evaluation/test_metrics/test_posetrack18_metric.py +++ b/tests/test_evaluation/test_metrics/test_posetrack18_metric.py @@ -1,411 +1,411 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os.path as osp -import tempfile -from collections import defaultdict -from unittest import TestCase - -import numpy as np -from mmengine.fileio import dump, load - -from mmpose.datasets.datasets.utils import parse_pose_metainfo -from mmpose.evaluation.metrics import PoseTrack18Metric - - -class TestPoseTrack18Metric(TestCase): - - def setUp(self): - """Setup some variables which are used in every test method. - - TestCase calls functions in this order: setUp() -> testMethod() -> - tearDown() -> cleanUp() - """ - self.tmp_dir = tempfile.TemporaryDirectory() - self.ann_file = 'tests/data/posetrack18/annotations/'\ - 'test_posetrack18_val.json' - posetrack18_meta_info = dict( - from_file='configs/_base_/datasets/posetrack18.py') - self.posetrack18_dataset_meta = parse_pose_metainfo( - posetrack18_meta_info) - - self.db = load(self.ann_file) - - self.topdown_data = self._convert_ann_to_topdown_batch_data() - assert len(self.topdown_data) == 14 - self.bottomup_data = self._convert_ann_to_bottomup_batch_data() - assert len(self.bottomup_data) == 3 - self.target = { - 'posetrack18/Head AP': 100.0, - 'posetrack18/Shou AP': 100.0, - 'posetrack18/Elb AP': 100.0, - 'posetrack18/Wri AP': 100.0, - 'posetrack18/Hip AP': 100.0, - 'posetrack18/Knee AP': 100.0, - 'posetrack18/Ankl AP': 100.0, - 'posetrack18/AP': 100.0, - } - - def _convert_ann_to_topdown_batch_data(self): - """Convert annotations to topdown-style batch data.""" - topdown_data = [] - for ann in self.db['annotations']: - w, h = ann['bbox'][2], ann['bbox'][3] - bboxes = np.array(ann['bbox'], dtype=np.float32).reshape(-1, 4) - bbox_scales = np.array([w * 1.25, h * 1.25]).reshape(-1, 2) - keypoints = np.array(ann['keypoints']).reshape((1, -1, 3)) - - gt_instances = { - 'bbox_scales': bbox_scales, - 'bboxes': bboxes, - 'bbox_scores': np.ones((1, ), dtype=np.float32), - } - pred_instances = { - 'keypoints': keypoints[..., :2], - 'keypoint_scores': keypoints[..., -1], - } - - data = {'inputs': None} - data_sample = { - 'id': ann['id'], - 'img_id': ann['image_id'], - 'gt_instances': gt_instances, - 'pred_instances': pred_instances - } - - # batch size = 1 - data_batch = [data] - data_samples = [data_sample] - topdown_data.append((data_batch, data_samples)) - return topdown_data - - def _convert_ann_to_bottomup_batch_data(self): - """Convert annotations to bottomup-style batch data.""" - img2ann = defaultdict(list) - for ann in self.db['annotations']: - img2ann[ann['image_id']].append(ann) - - bottomup_data = [] - for img_id, anns in img2ann.items(): - keypoints = np.array([ann['keypoints'] for ann in anns]).reshape( - (len(anns), -1, 3)) - - gt_instances = { - 'bbox_scores': np.ones((len(anns)), dtype=np.float32) - } - pred_instances = { - 'keypoints': keypoints[..., :2], - 'keypoint_scores': keypoints[..., -1], - } - - data = {'inputs': None} - data_sample = { - 'id': [ann['id'] for ann in anns], - 'img_id': img_id, - 'gt_instances': gt_instances, - 'pred_instances': pred_instances - } - - # batch size = 1 - data_batch = [data] - data_samples = [data_sample] - bottomup_data.append((data_batch, data_samples)) - return bottomup_data - - def tearDown(self): - self.tmp_dir.cleanup() - - def test_init(self): - """test metric init method.""" - # test score_mode option - with self.assertRaisesRegex(ValueError, - '`score_mode` should be one of'): - _ = PoseTrack18Metric(ann_file=self.ann_file, score_mode='invalid') - - # test nms_mode option - with self.assertRaisesRegex(ValueError, '`nms_mode` should be one of'): - _ = PoseTrack18Metric(ann_file=self.ann_file, nms_mode='invalid') - - # test `format_only` option - with self.assertRaisesRegex( - AssertionError, - '`outfile_prefix` can not be None when `format_only` is True'): - _ = PoseTrack18Metric( - ann_file=self.ann_file, format_only=True, outfile_prefix=None) - - def test_topdown_evaluate(self): - """test topdown-style posetrack18 metric evaluation.""" - # case 1: score_mode='bbox', nms_mode='none' - posetrack18_metric = PoseTrack18Metric( - ann_file=self.ann_file, - outfile_prefix=f'{self.tmp_dir.name}/test', - score_mode='bbox', - nms_mode='none') - posetrack18_metric.dataset_meta = self.posetrack18_dataset_meta - - # process samples - for data_batch, data_samples in self.topdown_data: - posetrack18_metric.process(data_batch, data_samples) - - eval_results = posetrack18_metric.evaluate(size=len(self.topdown_data)) - - self.assertDictEqual(eval_results, self.target) - self.assertTrue( - osp.isfile(osp.join(self.tmp_dir.name, '003418_mpii_test.json'))) - - # case 2: score_mode='bbox_keypoint', nms_mode='oks_nms' - posetrack18_metric = PoseTrack18Metric( - ann_file=self.ann_file, - outfile_prefix=f'{self.tmp_dir.name}/test', - score_mode='bbox_keypoint', - nms_mode='oks_nms') - posetrack18_metric.dataset_meta = self.posetrack18_dataset_meta - - # process samples - for data_batch, data_samples in self.topdown_data: - posetrack18_metric.process(data_batch, data_samples) - - eval_results = posetrack18_metric.evaluate(size=len(self.topdown_data)) - - self.assertDictEqual(eval_results, self.target) - self.assertTrue( - osp.isfile(osp.join(self.tmp_dir.name, '009473_mpii_test.json'))) - - # case 3: score_mode='bbox_keypoint', nms_mode='soft_oks_nms' - posetrack18_metric = PoseTrack18Metric( - ann_file=self.ann_file, - outfile_prefix=f'{self.tmp_dir.name}/test', - score_mode='bbox_keypoint', - nms_mode='soft_oks_nms') - posetrack18_metric.dataset_meta = self.posetrack18_dataset_meta - - # process samples - for data_batch, data_samples in self.topdown_data: - posetrack18_metric.process(data_batch, data_samples) - - eval_results = posetrack18_metric.evaluate(size=len(self.topdown_data)) - - self.assertDictEqual(eval_results, self.target) - self.assertTrue( - osp.isfile(osp.join(self.tmp_dir.name, '012834_mpii_test.json'))) - - def test_bottomup_evaluate(self): - """test bottomup-style posetrack18 metric evaluation.""" - # case 1: score_mode='bbox', nms_mode='none' - posetrack18_metric = PoseTrack18Metric( - ann_file=self.ann_file, outfile_prefix=f'{self.tmp_dir.name}/test') - posetrack18_metric.dataset_meta = self.posetrack18_dataset_meta - - # process samples - for data_batch, data_samples in self.bottomup_data: - posetrack18_metric.process(data_batch, data_samples) - - eval_results = posetrack18_metric.evaluate( - size=len(self.bottomup_data)) - self.assertDictEqual(eval_results, self.target) - self.assertTrue( - osp.isfile(osp.join(self.tmp_dir.name, '009473_mpii_test.json'))) - - def test_other_methods(self): - """test other useful methods.""" - # test `_sort_and_unique_bboxes` method - posetrack18_metric = PoseTrack18Metric(ann_file=self.ann_file) - posetrack18_metric.dataset_meta = self.posetrack18_dataset_meta - # process samples - for data_batch, data_samples in self.topdown_data: - posetrack18_metric.process(data_batch, data_samples) - # process one extra sample - data_batch, data_samples = self.topdown_data[0] - posetrack18_metric.process(data_batch, data_samples) - # an extra sample - eval_results = posetrack18_metric.evaluate( - size=len(self.topdown_data) + 1) - self.assertDictEqual(eval_results, self.target) - - def test_format_only(self): - """test `format_only` option.""" - posetrack18_metric = PoseTrack18Metric( - ann_file=self.ann_file, - format_only=True, - outfile_prefix=f'{self.tmp_dir.name}/test') - posetrack18_metric.dataset_meta = self.posetrack18_dataset_meta - # process samples - for data_batch, data_samples in self.topdown_data: - posetrack18_metric.process(data_batch, data_samples) - eval_results = posetrack18_metric.evaluate(size=len(self.topdown_data)) - self.assertDictEqual(eval_results, {}) - self.assertTrue( - osp.isfile(osp.join(self.tmp_dir.name, '012834_mpii_test.json'))) - - # test when gt annotations are absent - db_ = load(self.ann_file) - del db_['annotations'] - tmp_ann_file = osp.join(self.tmp_dir.name, 'temp_ann.json') - dump(db_, tmp_ann_file, sort_keys=True, indent=4) - with self.assertRaisesRegex( - AssertionError, - 'Ground truth annotations are required for evaluation'): - _ = PoseTrack18Metric(ann_file=tmp_ann_file, format_only=False) - - def test_topdown_alignment(self): - """Test whether the output of PoseTrack18Metric and the original - TopDownPoseTrack18Dataset are the same.""" - self.skipTest('Skip test.') - topdown_data = [] - for ann in self.db['annotations']: - w, h = ann['bbox'][2], ann['bbox'][3] - bboxes = np.array(ann['bbox'], dtype=np.float32).reshape(-1, 4) - bbox_scales = np.array([w * 1.25, h * 1.25]).reshape(-1, 2) - - keypoints = np.array( - ann['keypoints'], dtype=np.float32).reshape(1, 17, 3) - keypoints[..., 0] = keypoints[..., 0] * 0.98 - keypoints[..., 1] = keypoints[..., 1] * 1.02 - keypoints[..., 2] = keypoints[..., 2] * 0.8 - - gt_instances = { - 'bbox_scales': bbox_scales, - 'bbox_scores': np.ones((1, ), dtype=np.float32) * 0.98, - 'bboxes': bboxes, - } - pred_instances = { - 'keypoints': keypoints[..., :2], - 'keypoint_scores': keypoints[..., -1], - } - - data = {'inputs': None} - data_sample = { - 'id': ann['id'], - 'img_id': ann['image_id'], - 'gt_instances': gt_instances, - 'pred_instances': pred_instances - } - - # batch size = 1 - data_batch = [data] - data_samples = [data_sample] - topdown_data.append((data_batch, data_samples)) - - # case 1: - # typical setting: score_mode='bbox_keypoint', nms_mode='oks_nms' - posetrack18_metric = PoseTrack18Metric( - ann_file=self.ann_file, - outfile_prefix=f'{self.tmp_dir.name}/test', - score_mode='bbox_keypoint', - nms_mode='oks_nms') - posetrack18_metric.dataset_meta = self.posetrack18_dataset_meta - - # process samples - for data_batch, data_samples in topdown_data: - posetrack18_metric.process(data_batch, data_samples) - - eval_results = posetrack18_metric.evaluate(size=len(topdown_data)) - - target = { - 'posetrack18/Head AP': 84.6677132391418, - 'posetrack18/Shou AP': 80.86734693877551, - 'posetrack18/Elb AP': 83.0204081632653, - 'posetrack18/Wri AP': 85.12396694214877, - 'posetrack18/Hip AP': 75.14792899408285, - 'posetrack18/Knee AP': 66.76515151515152, - 'posetrack18/Ankl AP': 71.78571428571428, - 'posetrack18/Total AP': 78.62827822638012, - } - - for key in eval_results.keys(): - self.assertAlmostEqual(eval_results[key], target[key]) - - self.assertTrue( - osp.isfile(osp.join(self.tmp_dir.name, '012834_mpii_test.json'))) - - topdown_data = [] - anns = self.db['annotations'] - for i, ann in enumerate(anns): - w, h = ann['bbox'][2], ann['bbox'][3] - bboxes = np.array(ann['bbox'], dtype=np.float32).reshape(-1, 4) - bbox_scales = np.array([w * 1.25, h * 1.25]).reshape(-1, 2) - - keypoints = np.array( - ann['keypoints'], dtype=np.float32).reshape(1, -1, 3) - keypoints[..., 0] = keypoints[..., 0] * (1 - i / 100) - keypoints[..., 1] = keypoints[..., 1] * (1 + i / 100) - keypoints[..., 2] = keypoints[..., 2] * (1 - i / 100) - - gt_instances0 = { - 'bbox_scales': bbox_scales, - 'bbox_scores': np.ones((1, ), dtype=np.float32), - 'bboxes': bboxes, - } - pred_instances0 = { - 'keypoints': keypoints[..., :2], - 'keypoint_scores': keypoints[..., -1], - } - - data0 = {'inputs': None} - data_sample0 = { - 'id': ann['id'], - 'img_id': ann['image_id'], - 'gt_instances': gt_instances0, - 'pred_instances': pred_instances0 - } - - keypoints = np.array( - ann['keypoints'], dtype=np.float32).reshape(1, -1, 3) - keypoints[..., 0] = keypoints[..., 0] * (1 + i / 100) - keypoints[..., 1] = keypoints[..., 1] * (1 - i / 100) - keypoints[..., 2] = keypoints[..., 2] * (1 - 2 * i / 100) - - gt_instances1 = { - 'bbox_scales': bbox_scales, - 'bboxes': bboxes, - 'bbox_scores': np.ones( - (1, ), dtype=np.float32) * (1 - 2 * i / 100) - } - pred_instances1 = { - 'keypoints': keypoints[..., :2], - 'keypoint_scores': keypoints[..., -1], - } - - data1 = {'inputs': None} - data_sample1 = { - 'id': ann['id'] + 1, - 'img_id': ann['image_id'], - 'gt_instances': gt_instances1, - 'pred_instances': pred_instances1 - } - - # batch size = 2 - data_batch = [data0, data1] - data_samples = [data_sample0, data_sample1] - topdown_data.append((data_batch, data_samples)) - - # case 3: score_mode='bbox_keypoint', nms_mode='soft_oks_nms' - posetrack18_metric = PoseTrack18Metric( - ann_file=self.ann_file, - outfile_prefix=f'{self.tmp_dir.name}/test', - score_mode='bbox_keypoint', - keypoint_score_thr=0.2, - nms_thr=0.9, - nms_mode='soft_oks_nms') - posetrack18_metric.dataset_meta = self.posetrack18_dataset_meta - - # process samples - for data_batch, data_samples in topdown_data: - posetrack18_metric.process(data_batch, data_samples) - - eval_results = posetrack18_metric.evaluate(size=len(topdown_data) * 2) - - target = { - 'posetrack18/Head AP': 27.1062271062271068, - 'posetrack18/Shou AP': 25.918367346938776, - 'posetrack18/Elb AP': 22.67857142857143, - 'posetrack18/Wri AP': 29.090909090909093, - 'posetrack18/Hip AP': 18.40659340659341, - 'posetrack18/Knee AP': 32.0, - 'posetrack18/Ankl AP': 20.0, - 'posetrack18/Total AP': 25.167170924313783, - } - - for key in eval_results.keys(): - self.assertAlmostEqual(eval_results[key], target[key]) - - self.assertTrue( - osp.isfile(osp.join(self.tmp_dir.name, '009473_mpii_test.json'))) +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +import tempfile +from collections import defaultdict +from unittest import TestCase + +import numpy as np +from mmengine.fileio import dump, load + +from mmpose.datasets.datasets.utils import parse_pose_metainfo +from mmpose.evaluation.metrics import PoseTrack18Metric + + +class TestPoseTrack18Metric(TestCase): + + def setUp(self): + """Setup some variables which are used in every test method. + + TestCase calls functions in this order: setUp() -> testMethod() -> + tearDown() -> cleanUp() + """ + self.tmp_dir = tempfile.TemporaryDirectory() + self.ann_file = 'tests/data/posetrack18/annotations/'\ + 'test_posetrack18_val.json' + posetrack18_meta_info = dict( + from_file='configs/_base_/datasets/posetrack18.py') + self.posetrack18_dataset_meta = parse_pose_metainfo( + posetrack18_meta_info) + + self.db = load(self.ann_file) + + self.topdown_data = self._convert_ann_to_topdown_batch_data() + assert len(self.topdown_data) == 14 + self.bottomup_data = self._convert_ann_to_bottomup_batch_data() + assert len(self.bottomup_data) == 3 + self.target = { + 'posetrack18/Head AP': 100.0, + 'posetrack18/Shou AP': 100.0, + 'posetrack18/Elb AP': 100.0, + 'posetrack18/Wri AP': 100.0, + 'posetrack18/Hip AP': 100.0, + 'posetrack18/Knee AP': 100.0, + 'posetrack18/Ankl AP': 100.0, + 'posetrack18/AP': 100.0, + } + + def _convert_ann_to_topdown_batch_data(self): + """Convert annotations to topdown-style batch data.""" + topdown_data = [] + for ann in self.db['annotations']: + w, h = ann['bbox'][2], ann['bbox'][3] + bboxes = np.array(ann['bbox'], dtype=np.float32).reshape(-1, 4) + bbox_scales = np.array([w * 1.25, h * 1.25]).reshape(-1, 2) + keypoints = np.array(ann['keypoints']).reshape((1, -1, 3)) + + gt_instances = { + 'bbox_scales': bbox_scales, + 'bboxes': bboxes, + 'bbox_scores': np.ones((1, ), dtype=np.float32), + } + pred_instances = { + 'keypoints': keypoints[..., :2], + 'keypoint_scores': keypoints[..., -1], + } + + data = {'inputs': None} + data_sample = { + 'id': ann['id'], + 'img_id': ann['image_id'], + 'gt_instances': gt_instances, + 'pred_instances': pred_instances + } + + # batch size = 1 + data_batch = [data] + data_samples = [data_sample] + topdown_data.append((data_batch, data_samples)) + return topdown_data + + def _convert_ann_to_bottomup_batch_data(self): + """Convert annotations to bottomup-style batch data.""" + img2ann = defaultdict(list) + for ann in self.db['annotations']: + img2ann[ann['image_id']].append(ann) + + bottomup_data = [] + for img_id, anns in img2ann.items(): + keypoints = np.array([ann['keypoints'] for ann in anns]).reshape( + (len(anns), -1, 3)) + + gt_instances = { + 'bbox_scores': np.ones((len(anns)), dtype=np.float32) + } + pred_instances = { + 'keypoints': keypoints[..., :2], + 'keypoint_scores': keypoints[..., -1], + } + + data = {'inputs': None} + data_sample = { + 'id': [ann['id'] for ann in anns], + 'img_id': img_id, + 'gt_instances': gt_instances, + 'pred_instances': pred_instances + } + + # batch size = 1 + data_batch = [data] + data_samples = [data_sample] + bottomup_data.append((data_batch, data_samples)) + return bottomup_data + + def tearDown(self): + self.tmp_dir.cleanup() + + def test_init(self): + """test metric init method.""" + # test score_mode option + with self.assertRaisesRegex(ValueError, + '`score_mode` should be one of'): + _ = PoseTrack18Metric(ann_file=self.ann_file, score_mode='invalid') + + # test nms_mode option + with self.assertRaisesRegex(ValueError, '`nms_mode` should be one of'): + _ = PoseTrack18Metric(ann_file=self.ann_file, nms_mode='invalid') + + # test `format_only` option + with self.assertRaisesRegex( + AssertionError, + '`outfile_prefix` can not be None when `format_only` is True'): + _ = PoseTrack18Metric( + ann_file=self.ann_file, format_only=True, outfile_prefix=None) + + def test_topdown_evaluate(self): + """test topdown-style posetrack18 metric evaluation.""" + # case 1: score_mode='bbox', nms_mode='none' + posetrack18_metric = PoseTrack18Metric( + ann_file=self.ann_file, + outfile_prefix=f'{self.tmp_dir.name}/test', + score_mode='bbox', + nms_mode='none') + posetrack18_metric.dataset_meta = self.posetrack18_dataset_meta + + # process samples + for data_batch, data_samples in self.topdown_data: + posetrack18_metric.process(data_batch, data_samples) + + eval_results = posetrack18_metric.evaluate(size=len(self.topdown_data)) + + self.assertDictEqual(eval_results, self.target) + self.assertTrue( + osp.isfile(osp.join(self.tmp_dir.name, '003418_mpii_test.json'))) + + # case 2: score_mode='bbox_keypoint', nms_mode='oks_nms' + posetrack18_metric = PoseTrack18Metric( + ann_file=self.ann_file, + outfile_prefix=f'{self.tmp_dir.name}/test', + score_mode='bbox_keypoint', + nms_mode='oks_nms') + posetrack18_metric.dataset_meta = self.posetrack18_dataset_meta + + # process samples + for data_batch, data_samples in self.topdown_data: + posetrack18_metric.process(data_batch, data_samples) + + eval_results = posetrack18_metric.evaluate(size=len(self.topdown_data)) + + self.assertDictEqual(eval_results, self.target) + self.assertTrue( + osp.isfile(osp.join(self.tmp_dir.name, '009473_mpii_test.json'))) + + # case 3: score_mode='bbox_keypoint', nms_mode='soft_oks_nms' + posetrack18_metric = PoseTrack18Metric( + ann_file=self.ann_file, + outfile_prefix=f'{self.tmp_dir.name}/test', + score_mode='bbox_keypoint', + nms_mode='soft_oks_nms') + posetrack18_metric.dataset_meta = self.posetrack18_dataset_meta + + # process samples + for data_batch, data_samples in self.topdown_data: + posetrack18_metric.process(data_batch, data_samples) + + eval_results = posetrack18_metric.evaluate(size=len(self.topdown_data)) + + self.assertDictEqual(eval_results, self.target) + self.assertTrue( + osp.isfile(osp.join(self.tmp_dir.name, '012834_mpii_test.json'))) + + def test_bottomup_evaluate(self): + """test bottomup-style posetrack18 metric evaluation.""" + # case 1: score_mode='bbox', nms_mode='none' + posetrack18_metric = PoseTrack18Metric( + ann_file=self.ann_file, outfile_prefix=f'{self.tmp_dir.name}/test') + posetrack18_metric.dataset_meta = self.posetrack18_dataset_meta + + # process samples + for data_batch, data_samples in self.bottomup_data: + posetrack18_metric.process(data_batch, data_samples) + + eval_results = posetrack18_metric.evaluate( + size=len(self.bottomup_data)) + self.assertDictEqual(eval_results, self.target) + self.assertTrue( + osp.isfile(osp.join(self.tmp_dir.name, '009473_mpii_test.json'))) + + def test_other_methods(self): + """test other useful methods.""" + # test `_sort_and_unique_bboxes` method + posetrack18_metric = PoseTrack18Metric(ann_file=self.ann_file) + posetrack18_metric.dataset_meta = self.posetrack18_dataset_meta + # process samples + for data_batch, data_samples in self.topdown_data: + posetrack18_metric.process(data_batch, data_samples) + # process one extra sample + data_batch, data_samples = self.topdown_data[0] + posetrack18_metric.process(data_batch, data_samples) + # an extra sample + eval_results = posetrack18_metric.evaluate( + size=len(self.topdown_data) + 1) + self.assertDictEqual(eval_results, self.target) + + def test_format_only(self): + """test `format_only` option.""" + posetrack18_metric = PoseTrack18Metric( + ann_file=self.ann_file, + format_only=True, + outfile_prefix=f'{self.tmp_dir.name}/test') + posetrack18_metric.dataset_meta = self.posetrack18_dataset_meta + # process samples + for data_batch, data_samples in self.topdown_data: + posetrack18_metric.process(data_batch, data_samples) + eval_results = posetrack18_metric.evaluate(size=len(self.topdown_data)) + self.assertDictEqual(eval_results, {}) + self.assertTrue( + osp.isfile(osp.join(self.tmp_dir.name, '012834_mpii_test.json'))) + + # test when gt annotations are absent + db_ = load(self.ann_file) + del db_['annotations'] + tmp_ann_file = osp.join(self.tmp_dir.name, 'temp_ann.json') + dump(db_, tmp_ann_file, sort_keys=True, indent=4) + with self.assertRaisesRegex( + AssertionError, + 'Ground truth annotations are required for evaluation'): + _ = PoseTrack18Metric(ann_file=tmp_ann_file, format_only=False) + + def test_topdown_alignment(self): + """Test whether the output of PoseTrack18Metric and the original + TopDownPoseTrack18Dataset are the same.""" + self.skipTest('Skip test.') + topdown_data = [] + for ann in self.db['annotations']: + w, h = ann['bbox'][2], ann['bbox'][3] + bboxes = np.array(ann['bbox'], dtype=np.float32).reshape(-1, 4) + bbox_scales = np.array([w * 1.25, h * 1.25]).reshape(-1, 2) + + keypoints = np.array( + ann['keypoints'], dtype=np.float32).reshape(1, 17, 3) + keypoints[..., 0] = keypoints[..., 0] * 0.98 + keypoints[..., 1] = keypoints[..., 1] * 1.02 + keypoints[..., 2] = keypoints[..., 2] * 0.8 + + gt_instances = { + 'bbox_scales': bbox_scales, + 'bbox_scores': np.ones((1, ), dtype=np.float32) * 0.98, + 'bboxes': bboxes, + } + pred_instances = { + 'keypoints': keypoints[..., :2], + 'keypoint_scores': keypoints[..., -1], + } + + data = {'inputs': None} + data_sample = { + 'id': ann['id'], + 'img_id': ann['image_id'], + 'gt_instances': gt_instances, + 'pred_instances': pred_instances + } + + # batch size = 1 + data_batch = [data] + data_samples = [data_sample] + topdown_data.append((data_batch, data_samples)) + + # case 1: + # typical setting: score_mode='bbox_keypoint', nms_mode='oks_nms' + posetrack18_metric = PoseTrack18Metric( + ann_file=self.ann_file, + outfile_prefix=f'{self.tmp_dir.name}/test', + score_mode='bbox_keypoint', + nms_mode='oks_nms') + posetrack18_metric.dataset_meta = self.posetrack18_dataset_meta + + # process samples + for data_batch, data_samples in topdown_data: + posetrack18_metric.process(data_batch, data_samples) + + eval_results = posetrack18_metric.evaluate(size=len(topdown_data)) + + target = { + 'posetrack18/Head AP': 84.6677132391418, + 'posetrack18/Shou AP': 80.86734693877551, + 'posetrack18/Elb AP': 83.0204081632653, + 'posetrack18/Wri AP': 85.12396694214877, + 'posetrack18/Hip AP': 75.14792899408285, + 'posetrack18/Knee AP': 66.76515151515152, + 'posetrack18/Ankl AP': 71.78571428571428, + 'posetrack18/Total AP': 78.62827822638012, + } + + for key in eval_results.keys(): + self.assertAlmostEqual(eval_results[key], target[key]) + + self.assertTrue( + osp.isfile(osp.join(self.tmp_dir.name, '012834_mpii_test.json'))) + + topdown_data = [] + anns = self.db['annotations'] + for i, ann in enumerate(anns): + w, h = ann['bbox'][2], ann['bbox'][3] + bboxes = np.array(ann['bbox'], dtype=np.float32).reshape(-1, 4) + bbox_scales = np.array([w * 1.25, h * 1.25]).reshape(-1, 2) + + keypoints = np.array( + ann['keypoints'], dtype=np.float32).reshape(1, -1, 3) + keypoints[..., 0] = keypoints[..., 0] * (1 - i / 100) + keypoints[..., 1] = keypoints[..., 1] * (1 + i / 100) + keypoints[..., 2] = keypoints[..., 2] * (1 - i / 100) + + gt_instances0 = { + 'bbox_scales': bbox_scales, + 'bbox_scores': np.ones((1, ), dtype=np.float32), + 'bboxes': bboxes, + } + pred_instances0 = { + 'keypoints': keypoints[..., :2], + 'keypoint_scores': keypoints[..., -1], + } + + data0 = {'inputs': None} + data_sample0 = { + 'id': ann['id'], + 'img_id': ann['image_id'], + 'gt_instances': gt_instances0, + 'pred_instances': pred_instances0 + } + + keypoints = np.array( + ann['keypoints'], dtype=np.float32).reshape(1, -1, 3) + keypoints[..., 0] = keypoints[..., 0] * (1 + i / 100) + keypoints[..., 1] = keypoints[..., 1] * (1 - i / 100) + keypoints[..., 2] = keypoints[..., 2] * (1 - 2 * i / 100) + + gt_instances1 = { + 'bbox_scales': bbox_scales, + 'bboxes': bboxes, + 'bbox_scores': np.ones( + (1, ), dtype=np.float32) * (1 - 2 * i / 100) + } + pred_instances1 = { + 'keypoints': keypoints[..., :2], + 'keypoint_scores': keypoints[..., -1], + } + + data1 = {'inputs': None} + data_sample1 = { + 'id': ann['id'] + 1, + 'img_id': ann['image_id'], + 'gt_instances': gt_instances1, + 'pred_instances': pred_instances1 + } + + # batch size = 2 + data_batch = [data0, data1] + data_samples = [data_sample0, data_sample1] + topdown_data.append((data_batch, data_samples)) + + # case 3: score_mode='bbox_keypoint', nms_mode='soft_oks_nms' + posetrack18_metric = PoseTrack18Metric( + ann_file=self.ann_file, + outfile_prefix=f'{self.tmp_dir.name}/test', + score_mode='bbox_keypoint', + keypoint_score_thr=0.2, + nms_thr=0.9, + nms_mode='soft_oks_nms') + posetrack18_metric.dataset_meta = self.posetrack18_dataset_meta + + # process samples + for data_batch, data_samples in topdown_data: + posetrack18_metric.process(data_batch, data_samples) + + eval_results = posetrack18_metric.evaluate(size=len(topdown_data) * 2) + + target = { + 'posetrack18/Head AP': 27.1062271062271068, + 'posetrack18/Shou AP': 25.918367346938776, + 'posetrack18/Elb AP': 22.67857142857143, + 'posetrack18/Wri AP': 29.090909090909093, + 'posetrack18/Hip AP': 18.40659340659341, + 'posetrack18/Knee AP': 32.0, + 'posetrack18/Ankl AP': 20.0, + 'posetrack18/Total AP': 25.167170924313783, + } + + for key in eval_results.keys(): + self.assertAlmostEqual(eval_results[key], target[key]) + + self.assertTrue( + osp.isfile(osp.join(self.tmp_dir.name, '009473_mpii_test.json'))) diff --git a/tests/test_models/test_backbones/test_alexnet.py b/tests/test_models/test_backbones/test_alexnet.py index 9e0aed92e6..6a79ffbbd1 100644 --- a/tests/test_models/test_backbones/test_alexnet.py +++ b/tests/test_models/test_backbones/test_alexnet.py @@ -1,27 +1,27 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import torch - -from mmpose.models.backbones import AlexNet - - -class TestAlexNet(TestCase): - - def test_alexnet_backbone(self): - """Test alexnet backbone.""" - model = AlexNet(-1) - model.train() - - imgs = torch.randn(1, 3, 256, 192) - feat = model(imgs) - self.assertIsInstance(feat, tuple) - self.assertEqual(feat[-1].shape, (1, 256, 7, 5)) - - model = AlexNet(1) - model.train() - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertIsInstance(feat, tuple) - self.assertEqual(feat[-1].shape, (1, 1)) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch + +from mmpose.models.backbones import AlexNet + + +class TestAlexNet(TestCase): + + def test_alexnet_backbone(self): + """Test alexnet backbone.""" + model = AlexNet(-1) + model.train() + + imgs = torch.randn(1, 3, 256, 192) + feat = model(imgs) + self.assertIsInstance(feat, tuple) + self.assertEqual(feat[-1].shape, (1, 256, 7, 5)) + + model = AlexNet(1) + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertIsInstance(feat, tuple) + self.assertEqual(feat[-1].shape, (1, 1)) diff --git a/tests/test_models/test_backbones/test_backbones_utils.py b/tests/test_models/test_backbones/test_backbones_utils.py index 1e08143429..f8eb5d2aa5 100644 --- a/tests/test_models/test_backbones/test_backbones_utils.py +++ b/tests/test_models/test_backbones/test_backbones_utils.py @@ -1,119 +1,119 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import torch -from torch.nn.modules import GroupNorm -from torch.nn.modules.batchnorm import _BatchNorm - -from mmpose.models.backbones.utils import (InvertedResidual, SELayer, - channel_shuffle, make_divisible) - - -class TestBackboneUtils(TestCase): - - @staticmethod - def is_norm(modules): - """Check if is one of the norms.""" - if isinstance(modules, (GroupNorm, _BatchNorm)): - return True - return False - - def test_make_divisible(self): - # test min_value is None - result = make_divisible(34, 8, None) - self.assertEqual(result, 32) - - # test when new_value > min_ratio * value - result = make_divisible(10, 8, min_ratio=0.9) - self.assertEqual(result, 16) - - # test min_value = 0.8 - result = make_divisible(33, 8, min_ratio=0.8) - self.assertEqual(result, 32) - - def test_channel_shuffle(self): - x = torch.randn(1, 24, 56, 56) - with self.assertRaisesRegex( - AssertionError, 'num_channels should be divisible by groups'): - channel_shuffle(x, 7) - - groups = 3 - batch_size, num_channels, height, width = x.size() - channels_per_group = num_channels // groups - out = channel_shuffle(x, groups) - # test the output value when groups = 3 - for b in range(batch_size): - for c in range(num_channels): - c_out = c % channels_per_group * groups + \ - c // channels_per_group - for i in range(height): - for j in range(width): - self.assertEqual(x[b, c, i, j], out[b, c_out, i, j]) - - def test_inverted_residual(self): - - with self.assertRaises(AssertionError): - # stride must be in [1, 2] - InvertedResidual(16, 16, 32, stride=3) - - with self.assertRaises(AssertionError): - # se_cfg must be None or dict - InvertedResidual(16, 16, 32, se_cfg=list()) - - with self.assertRaises(AssertionError): - # in_channeld and out_channels must be the same if - # with_expand_conv is False - InvertedResidual(16, 16, 32, with_expand_conv=False) - - # Test InvertedResidual forward, stride=1 - block = InvertedResidual(16, 16, 32, stride=1) - x = torch.randn(1, 16, 56, 56) - x_out = block(x) - self.assertIsNone(getattr(block, 'se', None)) - self.assertTrue(block.with_res_shortcut) - self.assertEqual(x_out.shape, torch.Size((1, 16, 56, 56))) - - # Test InvertedResidual forward, stride=2 - block = InvertedResidual(16, 16, 32, stride=2) - x = torch.randn(1, 16, 56, 56) - x_out = block(x) - self.assertFalse(block.with_res_shortcut) - self.assertEqual(x_out.shape, torch.Size((1, 16, 28, 28))) - - # Test InvertedResidual forward with se layer - se_cfg = dict(channels=32) - block = InvertedResidual(16, 16, 32, stride=1, se_cfg=se_cfg) - x = torch.randn(1, 16, 56, 56) - x_out = block(x) - self.assertIsInstance(block.se, SELayer) - self.assertEqual(x_out.shape, torch.Size((1, 16, 56, 56))) - - # Test InvertedResidual forward, with_expand_conv=False - block = InvertedResidual(32, 16, 32, with_expand_conv=False) - x = torch.randn(1, 32, 56, 56) - x_out = block(x) - self.assertIsNone(getattr(block, 'expand_conv', None)) - self.assertEqual(x_out.shape, torch.Size((1, 16, 56, 56))) - - # Test InvertedResidual forward with GroupNorm - block = InvertedResidual( - 16, 16, 32, norm_cfg=dict(type='GN', num_groups=2)) - x = torch.randn(1, 16, 56, 56) - x_out = block(x) - for m in block.modules(): - if self.is_norm(m): - self.assertIsInstance(m, GroupNorm) - self.assertEqual(x_out.shape, torch.Size((1, 16, 56, 56))) - - # Test InvertedResidual forward with HSigmoid - block = InvertedResidual(16, 16, 32, act_cfg=dict(type='HSigmoid')) - x = torch.randn(1, 16, 56, 56) - x_out = block(x) - self.assertEqual(x_out.shape, torch.Size((1, 16, 56, 56))) - - # Test InvertedResidual forward with checkpoint - block = InvertedResidual(16, 16, 32, with_cp=True) - x = torch.randn(1, 16, 56, 56) - x_out = block(x) - self.assertTrue(block.with_cp) - self.assertEqual(x_out.shape, torch.Size((1, 16, 56, 56))) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch +from torch.nn.modules import GroupNorm +from torch.nn.modules.batchnorm import _BatchNorm + +from mmpose.models.backbones.utils import (InvertedResidual, SELayer, + channel_shuffle, make_divisible) + + +class TestBackboneUtils(TestCase): + + @staticmethod + def is_norm(modules): + """Check if is one of the norms.""" + if isinstance(modules, (GroupNorm, _BatchNorm)): + return True + return False + + def test_make_divisible(self): + # test min_value is None + result = make_divisible(34, 8, None) + self.assertEqual(result, 32) + + # test when new_value > min_ratio * value + result = make_divisible(10, 8, min_ratio=0.9) + self.assertEqual(result, 16) + + # test min_value = 0.8 + result = make_divisible(33, 8, min_ratio=0.8) + self.assertEqual(result, 32) + + def test_channel_shuffle(self): + x = torch.randn(1, 24, 56, 56) + with self.assertRaisesRegex( + AssertionError, 'num_channels should be divisible by groups'): + channel_shuffle(x, 7) + + groups = 3 + batch_size, num_channels, height, width = x.size() + channels_per_group = num_channels // groups + out = channel_shuffle(x, groups) + # test the output value when groups = 3 + for b in range(batch_size): + for c in range(num_channels): + c_out = c % channels_per_group * groups + \ + c // channels_per_group + for i in range(height): + for j in range(width): + self.assertEqual(x[b, c, i, j], out[b, c_out, i, j]) + + def test_inverted_residual(self): + + with self.assertRaises(AssertionError): + # stride must be in [1, 2] + InvertedResidual(16, 16, 32, stride=3) + + with self.assertRaises(AssertionError): + # se_cfg must be None or dict + InvertedResidual(16, 16, 32, se_cfg=list()) + + with self.assertRaises(AssertionError): + # in_channeld and out_channels must be the same if + # with_expand_conv is False + InvertedResidual(16, 16, 32, with_expand_conv=False) + + # Test InvertedResidual forward, stride=1 + block = InvertedResidual(16, 16, 32, stride=1) + x = torch.randn(1, 16, 56, 56) + x_out = block(x) + self.assertIsNone(getattr(block, 'se', None)) + self.assertTrue(block.with_res_shortcut) + self.assertEqual(x_out.shape, torch.Size((1, 16, 56, 56))) + + # Test InvertedResidual forward, stride=2 + block = InvertedResidual(16, 16, 32, stride=2) + x = torch.randn(1, 16, 56, 56) + x_out = block(x) + self.assertFalse(block.with_res_shortcut) + self.assertEqual(x_out.shape, torch.Size((1, 16, 28, 28))) + + # Test InvertedResidual forward with se layer + se_cfg = dict(channels=32) + block = InvertedResidual(16, 16, 32, stride=1, se_cfg=se_cfg) + x = torch.randn(1, 16, 56, 56) + x_out = block(x) + self.assertIsInstance(block.se, SELayer) + self.assertEqual(x_out.shape, torch.Size((1, 16, 56, 56))) + + # Test InvertedResidual forward, with_expand_conv=False + block = InvertedResidual(32, 16, 32, with_expand_conv=False) + x = torch.randn(1, 32, 56, 56) + x_out = block(x) + self.assertIsNone(getattr(block, 'expand_conv', None)) + self.assertEqual(x_out.shape, torch.Size((1, 16, 56, 56))) + + # Test InvertedResidual forward with GroupNorm + block = InvertedResidual( + 16, 16, 32, norm_cfg=dict(type='GN', num_groups=2)) + x = torch.randn(1, 16, 56, 56) + x_out = block(x) + for m in block.modules(): + if self.is_norm(m): + self.assertIsInstance(m, GroupNorm) + self.assertEqual(x_out.shape, torch.Size((1, 16, 56, 56))) + + # Test InvertedResidual forward with HSigmoid + block = InvertedResidual(16, 16, 32, act_cfg=dict(type='HSigmoid')) + x = torch.randn(1, 16, 56, 56) + x_out = block(x) + self.assertEqual(x_out.shape, torch.Size((1, 16, 56, 56))) + + # Test InvertedResidual forward with checkpoint + block = InvertedResidual(16, 16, 32, with_cp=True) + x = torch.randn(1, 16, 56, 56) + x_out = block(x) + self.assertTrue(block.with_cp) + self.assertEqual(x_out.shape, torch.Size((1, 16, 56, 56))) diff --git a/tests/test_models/test_backbones/test_cpm.py b/tests/test_models/test_backbones/test_cpm.py index a2b3f5296c..f06b679894 100644 --- a/tests/test_models/test_backbones/test_cpm.py +++ b/tests/test_models/test_backbones/test_cpm.py @@ -1,66 +1,66 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import torch - -from mmpose.models.backbones import CPM -from mmpose.models.backbones.cpm import CpmBlock - - -class TestCPM(TestCase): - - def test_cpm_block(self): - with self.assertRaises(AssertionError): - # len(channels) == len(kernels) - CpmBlock( - 3, channels=[3, 3, 3], kernels=[ - 1, - ]) - - # Test CPM Block - model = CpmBlock(3, channels=[3, 3, 3], kernels=[1, 1, 1]) - model.train() - - imgs = torch.randn(1, 3, 10, 10) - feat = model(imgs) - self.assertEqual(feat.shape, torch.Size([1, 3, 10, 10])) - - def test_cpm_backbone(self): - with self.assertRaises(AssertionError): - # CPM's num_stacks should larger than 0 - CPM(in_channels=3, out_channels=17, num_stages=-1) - - with self.assertRaises(AssertionError): - # CPM's in_channels should be 3 - CPM(in_channels=2, out_channels=17) - - # Test CPM - model = CPM(in_channels=3, out_channels=17, num_stages=1) - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 256, 192) - feat = model(imgs) - self.assertEqual(len(feat), 1) - self.assertEqual(feat[0].shape, torch.Size([1, 17, 32, 24])) - - imgs = torch.randn(1, 3, 384, 288) - feat = model(imgs) - self.assertEqual(len(feat), 1) - self.assertEqual(feat[0].shape, torch.Size([1, 17, 48, 36])) - - imgs = torch.randn(1, 3, 368, 368) - feat = model(imgs) - self.assertEqual(len(feat), 1) - self.assertEqual(feat[0].shape, torch.Size([1, 17, 46, 46])) - - # Test CPM multi-stages - model = CPM(in_channels=3, out_channels=17, num_stages=2) - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 368, 368) - feat = model(imgs) - self.assertEqual(len(feat), 2) - self.assertEqual(feat[0].shape, torch.Size([1, 17, 46, 46])) - self.assertEqual(feat[1].shape, torch.Size([1, 17, 46, 46])) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch + +from mmpose.models.backbones import CPM +from mmpose.models.backbones.cpm import CpmBlock + + +class TestCPM(TestCase): + + def test_cpm_block(self): + with self.assertRaises(AssertionError): + # len(channels) == len(kernels) + CpmBlock( + 3, channels=[3, 3, 3], kernels=[ + 1, + ]) + + # Test CPM Block + model = CpmBlock(3, channels=[3, 3, 3], kernels=[1, 1, 1]) + model.train() + + imgs = torch.randn(1, 3, 10, 10) + feat = model(imgs) + self.assertEqual(feat.shape, torch.Size([1, 3, 10, 10])) + + def test_cpm_backbone(self): + with self.assertRaises(AssertionError): + # CPM's num_stacks should larger than 0 + CPM(in_channels=3, out_channels=17, num_stages=-1) + + with self.assertRaises(AssertionError): + # CPM's in_channels should be 3 + CPM(in_channels=2, out_channels=17) + + # Test CPM + model = CPM(in_channels=3, out_channels=17, num_stages=1) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 256, 192) + feat = model(imgs) + self.assertEqual(len(feat), 1) + self.assertEqual(feat[0].shape, torch.Size([1, 17, 32, 24])) + + imgs = torch.randn(1, 3, 384, 288) + feat = model(imgs) + self.assertEqual(len(feat), 1) + self.assertEqual(feat[0].shape, torch.Size([1, 17, 48, 36])) + + imgs = torch.randn(1, 3, 368, 368) + feat = model(imgs) + self.assertEqual(len(feat), 1) + self.assertEqual(feat[0].shape, torch.Size([1, 17, 46, 46])) + + # Test CPM multi-stages + model = CPM(in_channels=3, out_channels=17, num_stages=2) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 368, 368) + feat = model(imgs) + self.assertEqual(len(feat), 2) + self.assertEqual(feat[0].shape, torch.Size([1, 17, 46, 46])) + self.assertEqual(feat[1].shape, torch.Size([1, 17, 46, 46])) diff --git a/tests/test_models/test_backbones/test_hourglass.py b/tests/test_models/test_backbones/test_hourglass.py index 91faa7f481..729acf907d 100644 --- a/tests/test_models/test_backbones/test_hourglass.py +++ b/tests/test_models/test_backbones/test_hourglass.py @@ -1,79 +1,79 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import torch - -from mmpose.models.backbones import HourglassAENet, HourglassNet - - -class TestHourglass(TestCase): - - def test_hourglass_backbone(self): - with self.assertRaises(AssertionError): - # HourglassNet's num_stacks should larger than 0 - HourglassNet(num_stacks=0) - - with self.assertRaises(AssertionError): - # len(stage_channels) should equal len(stage_blocks) - HourglassNet( - stage_channels=[256, 256, 384, 384, 384], - stage_blocks=[2, 2, 2, 2, 2, 4]) - - with self.assertRaises(AssertionError): - # len(stage_channels) should larger than downsample_times - HourglassNet( - downsample_times=5, - stage_channels=[256, 256, 384, 384, 384], - stage_blocks=[2, 2, 2, 2, 2]) - - # Test HourglassNet-52 - model = HourglassNet(num_stacks=1) - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 256, 256) - feat = model(imgs) - self.assertEqual(len(feat), 1) - self.assertEqual(feat[0].shape, torch.Size([1, 256, 64, 64])) - - # Test HourglassNet-104 - model = HourglassNet(num_stacks=2) - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 256, 256) - feat = model(imgs) - self.assertEqual(len(feat), 2) - self.assertEqual(feat[0].shape, torch.Size([1, 256, 64, 64])) - self.assertEqual(feat[1].shape, torch.Size([1, 256, 64, 64])) - - def test_hourglass_ae_backbone(self): - with self.assertRaises(AssertionError): - # HourglassAENet's num_stacks should larger than 0 - HourglassAENet(num_stacks=0) - - with self.assertRaises(AssertionError): - # len(stage_channels) should larger than downsample_times - HourglassAENet( - downsample_times=5, stage_channels=[256, 256, 384, 384, 384]) - - # num_stack=1 - model = HourglassAENet(num_stacks=1) - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 256, 256) - feat = model(imgs) - self.assertEqual(len(feat), 1) - self.assertEqual(feat[0].shape, torch.Size([1, 34, 64, 64])) - - # num_stack=2 - model = HourglassAENet(num_stacks=2) - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 256, 256) - feat = model(imgs) - self.assertEqual(len(feat), 2) - self.assertEqual(feat[0].shape, torch.Size([1, 34, 64, 64])) - self.assertEqual(feat[1].shape, torch.Size([1, 34, 64, 64])) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch + +from mmpose.models.backbones import HourglassAENet, HourglassNet + + +class TestHourglass(TestCase): + + def test_hourglass_backbone(self): + with self.assertRaises(AssertionError): + # HourglassNet's num_stacks should larger than 0 + HourglassNet(num_stacks=0) + + with self.assertRaises(AssertionError): + # len(stage_channels) should equal len(stage_blocks) + HourglassNet( + stage_channels=[256, 256, 384, 384, 384], + stage_blocks=[2, 2, 2, 2, 2, 4]) + + with self.assertRaises(AssertionError): + # len(stage_channels) should larger than downsample_times + HourglassNet( + downsample_times=5, + stage_channels=[256, 256, 384, 384, 384], + stage_blocks=[2, 2, 2, 2, 2]) + + # Test HourglassNet-52 + model = HourglassNet(num_stacks=1) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 256, 256) + feat = model(imgs) + self.assertEqual(len(feat), 1) + self.assertEqual(feat[0].shape, torch.Size([1, 256, 64, 64])) + + # Test HourglassNet-104 + model = HourglassNet(num_stacks=2) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 256, 256) + feat = model(imgs) + self.assertEqual(len(feat), 2) + self.assertEqual(feat[0].shape, torch.Size([1, 256, 64, 64])) + self.assertEqual(feat[1].shape, torch.Size([1, 256, 64, 64])) + + def test_hourglass_ae_backbone(self): + with self.assertRaises(AssertionError): + # HourglassAENet's num_stacks should larger than 0 + HourglassAENet(num_stacks=0) + + with self.assertRaises(AssertionError): + # len(stage_channels) should larger than downsample_times + HourglassAENet( + downsample_times=5, stage_channels=[256, 256, 384, 384, 384]) + + # num_stack=1 + model = HourglassAENet(num_stacks=1) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 256, 256) + feat = model(imgs) + self.assertEqual(len(feat), 1) + self.assertEqual(feat[0].shape, torch.Size([1, 34, 64, 64])) + + # num_stack=2 + model = HourglassAENet(num_stacks=2) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 256, 256) + feat = model(imgs) + self.assertEqual(len(feat), 2) + self.assertEqual(feat[0].shape, torch.Size([1, 34, 64, 64])) + self.assertEqual(feat[1].shape, torch.Size([1, 34, 64, 64])) diff --git a/tests/test_models/test_backbones/test_hrformer.py b/tests/test_models/test_backbones/test_hrformer.py index 1c90e423b1..f5b3cd25c8 100644 --- a/tests/test_models/test_backbones/test_hrformer.py +++ b/tests/test_models/test_backbones/test_hrformer.py @@ -1,194 +1,194 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import torch - -from mmpose.models.backbones.hrformer import (HRFomerModule, HRFormer, - HRFormerBlock) - - -class TestHrformer(TestCase): - - def test_hrformer_module(self): - norm_cfg = dict(type='BN') - block = HRFormerBlock - # Test multiscale forward - num_channles = (32, 64) - num_inchannels = [c * block.expansion for c in num_channles] - hrmodule = HRFomerModule( - num_branches=2, - block=block, - num_blocks=(2, 2), - num_inchannels=num_inchannels, - num_channels=num_channles, - num_heads=(1, 2), - num_window_sizes=(7, 7), - num_mlp_ratios=(4, 4), - drop_paths=(0., 0.), - norm_cfg=norm_cfg) - - feats = [ - torch.randn(1, num_inchannels[0], 64, 64), - torch.randn(1, num_inchannels[1], 32, 32) - ] - feats = hrmodule(feats) - - self.assertGreater(len(str(hrmodule)), 0) - self.assertEqual(len(feats), 2) - self.assertEqual(feats[0].shape, - torch.Size([1, num_inchannels[0], 64, 64])) - self.assertEqual(feats[1].shape, - torch.Size([1, num_inchannels[1], 32, 32])) - - # Test single scale forward - num_channles = (32, 64) - in_channels = [c * block.expansion for c in num_channles] - hrmodule = HRFomerModule( - num_branches=2, - block=block, - num_blocks=(2, 2), - num_inchannels=num_inchannels, - num_channels=num_channles, - num_heads=(1, 2), - num_window_sizes=(7, 7), - num_mlp_ratios=(4, 4), - drop_paths=(0., 0.), - norm_cfg=norm_cfg, - multiscale_output=False, - ) - - feats = [ - torch.randn(1, in_channels[0], 64, 64), - torch.randn(1, in_channels[1], 32, 32) - ] - feats = hrmodule(feats) - - self.assertEqual(len(feats), 1) - self.assertEqual(feats[0].shape, - torch.Size([1, in_channels[0], 64, 64])) - - # Test single branch HRFormer module - hrmodule = HRFomerModule( - num_branches=1, - block=block, - num_blocks=(1, ), - num_inchannels=[num_inchannels[0]], - num_channels=[num_channles[0]], - num_heads=(1, ), - num_window_sizes=(7, ), - num_mlp_ratios=(4, ), - drop_paths=(0.1, ), - norm_cfg=norm_cfg, - ) - - feats = [ - torch.randn(1, in_channels[0], 64, 64), - ] - feats = hrmodule(feats) - - self.assertEqual(len(feats), 1) - self.assertEqual(feats[0].shape, - torch.Size([1, in_channels[0], 64, 64])) - - # Value tests - kwargs = dict( - num_branches=2, - block=block, - num_blocks=(2, 2), - num_inchannels=num_inchannels, - num_channels=num_channles, - num_heads=(1, 2), - num_window_sizes=(7, 7), - num_mlp_ratios=(4, 4), - drop_paths=(0.1, 0.1), - norm_cfg=norm_cfg, - ) - - with self.assertRaises(ValueError): - # len(num_blocks) should equal num_branches - kwargs['num_blocks'] = [2, 2, 2] - HRFomerModule(**kwargs) - kwargs['num_blocks'] = [2, 2] - - with self.assertRaises(ValueError): - # len(num_blocks) should equal num_branches - kwargs['num_channels'] = [2] - HRFomerModule(**kwargs) - kwargs['num_channels'] = [2, 2] - - with self.assertRaises(ValueError): - # len(num_blocks) should equal num_branches - kwargs['num_inchannels'] = [2] - HRFomerModule(**kwargs) - kwargs['num_inchannels'] = [2, 2] - - def test_hrformer_backbone(self): - norm_cfg = dict(type='BN') - # only have 3 stages - extra = dict( - drop_path_rate=0.2, - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(2, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='HRFORMERBLOCK', - window_sizes=(7, 7), - num_heads=(1, 2), - mlp_ratios=(4, 4), - num_blocks=(2, 2), - num_channels=(32, 64)), - stage3=dict( - num_modules=4, - num_branches=3, - block='HRFORMERBLOCK', - window_sizes=(7, 7, 7), - num_heads=(1, 2, 4), - mlp_ratios=(4, 4, 4), - num_blocks=(2, 2, 2), - num_channels=(32, 64, 128)), - stage4=dict( - num_modules=3, - num_branches=4, - block='HRFORMERBLOCK', - window_sizes=(7, 7, 7, 7), - num_heads=(1, 2, 4, 8), - mlp_ratios=(4, 4, 4, 4), - num_blocks=(2, 2, 2, 2), - num_channels=(32, 64, 128, 256), - multiscale_output=True)) - - with self.assertRaises(ValueError): - # len(num_blocks) should equal num_branches - extra['stage4']['num_branches'] = 3 - HRFormer(extra=extra) - extra['stage4']['num_branches'] = 4 - - # Test HRFormer-S - model = HRFormer(extra=extra, norm_cfg=norm_cfg) - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 64, 64) - feats = model(imgs) - self.assertEqual(len(feats), 4) - self.assertEqual(feats[0].shape, torch.Size([1, 32, 16, 16])) - self.assertEqual(feats[3].shape, torch.Size([1, 256, 2, 2])) - - # Test single scale output and model - # without relative position bias - extra['stage4']['multiscale_output'] = False - extra['with_rpe'] = False - model = HRFormer(extra=extra, norm_cfg=norm_cfg) - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 64, 64) - feats = model(imgs) - self.assertIsInstance(feats, tuple) - self.assertEqual(len(feats), 1) - self.assertEqual(feats[-1].shape, torch.Size([1, 32, 16, 16])) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch + +from mmpose.models.backbones.hrformer import (HRFomerModule, HRFormer, + HRFormerBlock) + + +class TestHrformer(TestCase): + + def test_hrformer_module(self): + norm_cfg = dict(type='BN') + block = HRFormerBlock + # Test multiscale forward + num_channles = (32, 64) + num_inchannels = [c * block.expansion for c in num_channles] + hrmodule = HRFomerModule( + num_branches=2, + block=block, + num_blocks=(2, 2), + num_inchannels=num_inchannels, + num_channels=num_channles, + num_heads=(1, 2), + num_window_sizes=(7, 7), + num_mlp_ratios=(4, 4), + drop_paths=(0., 0.), + norm_cfg=norm_cfg) + + feats = [ + torch.randn(1, num_inchannels[0], 64, 64), + torch.randn(1, num_inchannels[1], 32, 32) + ] + feats = hrmodule(feats) + + self.assertGreater(len(str(hrmodule)), 0) + self.assertEqual(len(feats), 2) + self.assertEqual(feats[0].shape, + torch.Size([1, num_inchannels[0], 64, 64])) + self.assertEqual(feats[1].shape, + torch.Size([1, num_inchannels[1], 32, 32])) + + # Test single scale forward + num_channles = (32, 64) + in_channels = [c * block.expansion for c in num_channles] + hrmodule = HRFomerModule( + num_branches=2, + block=block, + num_blocks=(2, 2), + num_inchannels=num_inchannels, + num_channels=num_channles, + num_heads=(1, 2), + num_window_sizes=(7, 7), + num_mlp_ratios=(4, 4), + drop_paths=(0., 0.), + norm_cfg=norm_cfg, + multiscale_output=False, + ) + + feats = [ + torch.randn(1, in_channels[0], 64, 64), + torch.randn(1, in_channels[1], 32, 32) + ] + feats = hrmodule(feats) + + self.assertEqual(len(feats), 1) + self.assertEqual(feats[0].shape, + torch.Size([1, in_channels[0], 64, 64])) + + # Test single branch HRFormer module + hrmodule = HRFomerModule( + num_branches=1, + block=block, + num_blocks=(1, ), + num_inchannels=[num_inchannels[0]], + num_channels=[num_channles[0]], + num_heads=(1, ), + num_window_sizes=(7, ), + num_mlp_ratios=(4, ), + drop_paths=(0.1, ), + norm_cfg=norm_cfg, + ) + + feats = [ + torch.randn(1, in_channels[0], 64, 64), + ] + feats = hrmodule(feats) + + self.assertEqual(len(feats), 1) + self.assertEqual(feats[0].shape, + torch.Size([1, in_channels[0], 64, 64])) + + # Value tests + kwargs = dict( + num_branches=2, + block=block, + num_blocks=(2, 2), + num_inchannels=num_inchannels, + num_channels=num_channles, + num_heads=(1, 2), + num_window_sizes=(7, 7), + num_mlp_ratios=(4, 4), + drop_paths=(0.1, 0.1), + norm_cfg=norm_cfg, + ) + + with self.assertRaises(ValueError): + # len(num_blocks) should equal num_branches + kwargs['num_blocks'] = [2, 2, 2] + HRFomerModule(**kwargs) + kwargs['num_blocks'] = [2, 2] + + with self.assertRaises(ValueError): + # len(num_blocks) should equal num_branches + kwargs['num_channels'] = [2] + HRFomerModule(**kwargs) + kwargs['num_channels'] = [2, 2] + + with self.assertRaises(ValueError): + # len(num_blocks) should equal num_branches + kwargs['num_inchannels'] = [2] + HRFomerModule(**kwargs) + kwargs['num_inchannels'] = [2, 2] + + def test_hrformer_backbone(self): + norm_cfg = dict(type='BN') + # only have 3 stages + extra = dict( + drop_path_rate=0.2, + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(2, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='HRFORMERBLOCK', + window_sizes=(7, 7), + num_heads=(1, 2), + mlp_ratios=(4, 4), + num_blocks=(2, 2), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='HRFORMERBLOCK', + window_sizes=(7, 7, 7), + num_heads=(1, 2, 4), + mlp_ratios=(4, 4, 4), + num_blocks=(2, 2, 2), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='HRFORMERBLOCK', + window_sizes=(7, 7, 7, 7), + num_heads=(1, 2, 4, 8), + mlp_ratios=(4, 4, 4, 4), + num_blocks=(2, 2, 2, 2), + num_channels=(32, 64, 128, 256), + multiscale_output=True)) + + with self.assertRaises(ValueError): + # len(num_blocks) should equal num_branches + extra['stage4']['num_branches'] = 3 + HRFormer(extra=extra) + extra['stage4']['num_branches'] = 4 + + # Test HRFormer-S + model = HRFormer(extra=extra, norm_cfg=norm_cfg) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 64, 64) + feats = model(imgs) + self.assertEqual(len(feats), 4) + self.assertEqual(feats[0].shape, torch.Size([1, 32, 16, 16])) + self.assertEqual(feats[3].shape, torch.Size([1, 256, 2, 2])) + + # Test single scale output and model + # without relative position bias + extra['stage4']['multiscale_output'] = False + extra['with_rpe'] = False + model = HRFormer(extra=extra, norm_cfg=norm_cfg) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 64, 64) + feats = model(imgs) + self.assertIsInstance(feats, tuple) + self.assertEqual(len(feats), 1) + self.assertEqual(feats[-1].shape, torch.Size([1, 32, 16, 16])) diff --git a/tests/test_models/test_backbones/test_hrnet.py b/tests/test_models/test_backbones/test_hrnet.py index 91de1e2541..8824de167e 100644 --- a/tests/test_models/test_backbones/test_hrnet.py +++ b/tests/test_models/test_backbones/test_hrnet.py @@ -1,132 +1,132 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import torch -from torch.nn.modules.batchnorm import _BatchNorm - -from mmpose.models.backbones import HRNet -from mmpose.models.backbones.hrnet import HRModule -from mmpose.models.backbones.resnet import BasicBlock, Bottleneck - - -class TestHrnet(TestCase): - - @staticmethod - def is_block(modules): - """Check if is HRModule building block.""" - if isinstance(modules, (HRModule, )): - return True - return False - - @staticmethod - def is_norm(modules): - """Check if is one of the norms.""" - if isinstance(modules, (_BatchNorm, )): - return True - return False - - @staticmethod - def all_zeros(modules): - """Check if the weight(and bias) is all zero.""" - weight_zero = torch.equal(modules.weight.data, - torch.zeros_like(modules.weight.data)) - if hasattr(modules, 'bias'): - bias_zero = torch.equal(modules.bias.data, - torch.zeros_like(modules.bias.data)) - else: - bias_zero = True - - return weight_zero and bias_zero - - def test_hrmodule(self): - # Test HRModule forward - block = HRModule( - num_branches=1, - blocks=BasicBlock, - num_blocks=(4, ), - in_channels=[ - 64, - ], - num_channels=(64, )) - - x = torch.randn(2, 64, 56, 56) - x_out = block([x]) - self.assertEqual(x_out[0].shape, torch.Size([2, 64, 56, 56])) - - def test_hrnet_backbone(self): - extra = dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(32, 64)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(32, 64, 128)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(32, 64, 128, 256))) - - model = HRNet(extra, in_channels=3) - - imgs = torch.randn(2, 3, 224, 224) - feat = model(imgs) - self.assertIsInstance(feat, tuple) - self.assertEqual(feat[-1].shape, torch.Size([2, 32, 56, 56])) - - # Test HRNet zero initialization of residual - model = HRNet(extra, in_channels=3, zero_init_residual=True) - model.init_weights() - for m in model.modules(): - if isinstance(m, Bottleneck): - self.assertTrue(self.all_zeros(m.norm3)) - model.train() - - imgs = torch.randn(2, 3, 224, 224) - feat = model(imgs) - self.assertIsInstance(feat, tuple) - self.assertEqual(feat[-1].shape, torch.Size([2, 32, 56, 56])) - - # Test HRNet with the first three stages frozen - frozen_stages = 3 - model = HRNet(extra, in_channels=3, frozen_stages=frozen_stages) - model.init_weights() - model.train() - if frozen_stages >= 0: - self.assertFalse(model.norm1.training) - self.assertFalse(model.norm2.training) - for layer in [model.conv1, model.norm1, model.conv2, model.norm2]: - for param in layer.parameters(): - self.assertFalse(param.requires_grad) - - for i in range(1, frozen_stages + 1): - if i == 1: - layer = getattr(model, 'layer1') - else: - layer = getattr(model, f'stage{i}') - for mod in layer.modules(): - if isinstance(mod, _BatchNorm): - self.assertFalse(mod.training) - for param in layer.parameters(): - self.assertFalse(param.requires_grad) - - if i < 4: - layer = getattr(model, f'transition{i}') - for mod in layer.modules(): - if isinstance(mod, _BatchNorm): - self.assertFalse(mod.training) - for param in layer.parameters(): - self.assertFalse(param.requires_grad) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch +from torch.nn.modules.batchnorm import _BatchNorm + +from mmpose.models.backbones import HRNet +from mmpose.models.backbones.hrnet import HRModule +from mmpose.models.backbones.resnet import BasicBlock, Bottleneck + + +class TestHrnet(TestCase): + + @staticmethod + def is_block(modules): + """Check if is HRModule building block.""" + if isinstance(modules, (HRModule, )): + return True + return False + + @staticmethod + def is_norm(modules): + """Check if is one of the norms.""" + if isinstance(modules, (_BatchNorm, )): + return True + return False + + @staticmethod + def all_zeros(modules): + """Check if the weight(and bias) is all zero.""" + weight_zero = torch.equal(modules.weight.data, + torch.zeros_like(modules.weight.data)) + if hasattr(modules, 'bias'): + bias_zero = torch.equal(modules.bias.data, + torch.zeros_like(modules.bias.data)) + else: + bias_zero = True + + return weight_zero and bias_zero + + def test_hrmodule(self): + # Test HRModule forward + block = HRModule( + num_branches=1, + blocks=BasicBlock, + num_blocks=(4, ), + in_channels=[ + 64, + ], + num_channels=(64, )) + + x = torch.randn(2, 64, 56, 56) + x_out = block([x]) + self.assertEqual(x_out[0].shape, torch.Size([2, 64, 56, 56])) + + def test_hrnet_backbone(self): + extra = dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))) + + model = HRNet(extra, in_channels=3) + + imgs = torch.randn(2, 3, 224, 224) + feat = model(imgs) + self.assertIsInstance(feat, tuple) + self.assertEqual(feat[-1].shape, torch.Size([2, 32, 56, 56])) + + # Test HRNet zero initialization of residual + model = HRNet(extra, in_channels=3, zero_init_residual=True) + model.init_weights() + for m in model.modules(): + if isinstance(m, Bottleneck): + self.assertTrue(self.all_zeros(m.norm3)) + model.train() + + imgs = torch.randn(2, 3, 224, 224) + feat = model(imgs) + self.assertIsInstance(feat, tuple) + self.assertEqual(feat[-1].shape, torch.Size([2, 32, 56, 56])) + + # Test HRNet with the first three stages frozen + frozen_stages = 3 + model = HRNet(extra, in_channels=3, frozen_stages=frozen_stages) + model.init_weights() + model.train() + if frozen_stages >= 0: + self.assertFalse(model.norm1.training) + self.assertFalse(model.norm2.training) + for layer in [model.conv1, model.norm1, model.conv2, model.norm2]: + for param in layer.parameters(): + self.assertFalse(param.requires_grad) + + for i in range(1, frozen_stages + 1): + if i == 1: + layer = getattr(model, 'layer1') + else: + layer = getattr(model, f'stage{i}') + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + self.assertFalse(mod.training) + for param in layer.parameters(): + self.assertFalse(param.requires_grad) + + if i < 4: + layer = getattr(model, f'transition{i}') + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + self.assertFalse(mod.training) + for param in layer.parameters(): + self.assertFalse(param.requires_grad) diff --git a/tests/test_models/test_backbones/test_litehrnet.py b/tests/test_models/test_backbones/test_litehrnet.py index fae2f2f06d..0b65bcaba2 100644 --- a/tests/test_models/test_backbones/test_litehrnet.py +++ b/tests/test_models/test_backbones/test_litehrnet.py @@ -1,145 +1,145 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import torch -from torch.nn.modules.batchnorm import _BatchNorm - -from mmpose.models.backbones import LiteHRNet -from mmpose.models.backbones.litehrnet import LiteHRModule -from mmpose.models.backbones.resnet import Bottleneck - - -class TestLiteHrnet(TestCase): - - @staticmethod - def is_norm(modules): - """Check if is one of the norms.""" - if isinstance(modules, (_BatchNorm, )): - return True - return False - - @staticmethod - def all_zeros(modules): - """Check if the weight(and bias) is all zero.""" - weight_zero = torch.equal(modules.weight.data, - torch.zeros_like(modules.weight.data)) - if hasattr(modules, 'bias'): - bias_zero = torch.equal(modules.bias.data, - torch.zeros_like(modules.bias.data)) - else: - bias_zero = True - - return weight_zero and bias_zero - - def test_litehrmodule(self): - # Test LiteHRModule forward - block = LiteHRModule( - num_branches=1, - num_blocks=1, - in_channels=[ - 40, - ], - reduce_ratio=8, - module_type='LITE') - - x = torch.randn(2, 40, 56, 56) - x_out = block([[x]]) - self.assertEqual(x_out[0][0].shape, torch.Size([2, 40, 56, 56])) - - block = LiteHRModule( - num_branches=1, - num_blocks=1, - in_channels=[ - 40, - ], - reduce_ratio=8, - module_type='NAIVE') - - x = torch.randn(2, 40, 56, 56) - x_out = block([x]) - self.assertEqual(x_out[0].shape, torch.Size([2, 40, 56, 56])) - - with self.assertRaises(ValueError): - block = LiteHRModule( - num_branches=1, - num_blocks=1, - in_channels=[ - 40, - ], - reduce_ratio=8, - module_type='none') - - def test_litehrnet_backbone(self): - extra = dict( - stem=dict(stem_channels=32, out_channels=32, expand_ratio=1), - num_stages=3, - stages_spec=dict( - num_modules=(2, 4, 2), - num_branches=(2, 3, 4), - num_blocks=(2, 2, 2), - module_type=('LITE', 'LITE', 'LITE'), - with_fuse=(True, True, True), - reduce_ratios=(8, 8, 8), - num_channels=( - (40, 80), - (40, 80, 160), - (40, 80, 160, 320), - )), - with_head=True) - - model = LiteHRNet(extra, in_channels=3) - - imgs = torch.randn(2, 3, 224, 224) - feat = model(imgs) - self.assertIsInstance(feat, tuple) - self.assertEqual(feat[-1].shape, torch.Size([2, 40, 56, 56])) - - # Test HRNet zero initialization of residual - model = LiteHRNet(extra, in_channels=3) - model.init_weights() - for m in model.modules(): - if isinstance(m, Bottleneck): - self.assertTrue(self.all_zeros(m.norm3)) - model.train() - - imgs = torch.randn(2, 3, 224, 224) - feat = model(imgs) - self.assertIsInstance(feat, tuple) - self.assertEqual(feat[-1].shape, torch.Size([2, 40, 56, 56])) - - extra = dict( - stem=dict(stem_channels=32, out_channels=32, expand_ratio=1), - num_stages=3, - stages_spec=dict( - num_modules=(2, 4, 2), - num_branches=(2, 3, 4), - num_blocks=(2, 2, 2), - module_type=('NAIVE', 'NAIVE', 'NAIVE'), - with_fuse=(True, True, True), - reduce_ratios=(8, 8, 8), - num_channels=( - (40, 80), - (40, 80, 160), - (40, 80, 160, 320), - )), - with_head=True) - - model = LiteHRNet(extra, in_channels=3) - - imgs = torch.randn(2, 3, 224, 224) - feat = model(imgs) - self.assertIsInstance(feat, tuple) - self.assertEqual(feat[-1].shape, torch.Size([2, 40, 56, 56])) - - # Test HRNet zero initialization of residual - model = LiteHRNet(extra, in_channels=3) - model.init_weights() - for m in model.modules(): - if isinstance(m, Bottleneck): - self.assertTrue(self.all_zeros(m.norm3)) - model.train() - - imgs = torch.randn(2, 3, 224, 224) - feat = model(imgs) - self.assertIsInstance(feat, tuple) - self.assertEqual(feat[-1].shape, torch.Size([2, 40, 56, 56])) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch +from torch.nn.modules.batchnorm import _BatchNorm + +from mmpose.models.backbones import LiteHRNet +from mmpose.models.backbones.litehrnet import LiteHRModule +from mmpose.models.backbones.resnet import Bottleneck + + +class TestLiteHrnet(TestCase): + + @staticmethod + def is_norm(modules): + """Check if is one of the norms.""" + if isinstance(modules, (_BatchNorm, )): + return True + return False + + @staticmethod + def all_zeros(modules): + """Check if the weight(and bias) is all zero.""" + weight_zero = torch.equal(modules.weight.data, + torch.zeros_like(modules.weight.data)) + if hasattr(modules, 'bias'): + bias_zero = torch.equal(modules.bias.data, + torch.zeros_like(modules.bias.data)) + else: + bias_zero = True + + return weight_zero and bias_zero + + def test_litehrmodule(self): + # Test LiteHRModule forward + block = LiteHRModule( + num_branches=1, + num_blocks=1, + in_channels=[ + 40, + ], + reduce_ratio=8, + module_type='LITE') + + x = torch.randn(2, 40, 56, 56) + x_out = block([[x]]) + self.assertEqual(x_out[0][0].shape, torch.Size([2, 40, 56, 56])) + + block = LiteHRModule( + num_branches=1, + num_blocks=1, + in_channels=[ + 40, + ], + reduce_ratio=8, + module_type='NAIVE') + + x = torch.randn(2, 40, 56, 56) + x_out = block([x]) + self.assertEqual(x_out[0].shape, torch.Size([2, 40, 56, 56])) + + with self.assertRaises(ValueError): + block = LiteHRModule( + num_branches=1, + num_blocks=1, + in_channels=[ + 40, + ], + reduce_ratio=8, + module_type='none') + + def test_litehrnet_backbone(self): + extra = dict( + stem=dict(stem_channels=32, out_channels=32, expand_ratio=1), + num_stages=3, + stages_spec=dict( + num_modules=(2, 4, 2), + num_branches=(2, 3, 4), + num_blocks=(2, 2, 2), + module_type=('LITE', 'LITE', 'LITE'), + with_fuse=(True, True, True), + reduce_ratios=(8, 8, 8), + num_channels=( + (40, 80), + (40, 80, 160), + (40, 80, 160, 320), + )), + with_head=True) + + model = LiteHRNet(extra, in_channels=3) + + imgs = torch.randn(2, 3, 224, 224) + feat = model(imgs) + self.assertIsInstance(feat, tuple) + self.assertEqual(feat[-1].shape, torch.Size([2, 40, 56, 56])) + + # Test HRNet zero initialization of residual + model = LiteHRNet(extra, in_channels=3) + model.init_weights() + for m in model.modules(): + if isinstance(m, Bottleneck): + self.assertTrue(self.all_zeros(m.norm3)) + model.train() + + imgs = torch.randn(2, 3, 224, 224) + feat = model(imgs) + self.assertIsInstance(feat, tuple) + self.assertEqual(feat[-1].shape, torch.Size([2, 40, 56, 56])) + + extra = dict( + stem=dict(stem_channels=32, out_channels=32, expand_ratio=1), + num_stages=3, + stages_spec=dict( + num_modules=(2, 4, 2), + num_branches=(2, 3, 4), + num_blocks=(2, 2, 2), + module_type=('NAIVE', 'NAIVE', 'NAIVE'), + with_fuse=(True, True, True), + reduce_ratios=(8, 8, 8), + num_channels=( + (40, 80), + (40, 80, 160), + (40, 80, 160, 320), + )), + with_head=True) + + model = LiteHRNet(extra, in_channels=3) + + imgs = torch.randn(2, 3, 224, 224) + feat = model(imgs) + self.assertIsInstance(feat, tuple) + self.assertEqual(feat[-1].shape, torch.Size([2, 40, 56, 56])) + + # Test HRNet zero initialization of residual + model = LiteHRNet(extra, in_channels=3) + model.init_weights() + for m in model.modules(): + if isinstance(m, Bottleneck): + self.assertTrue(self.all_zeros(m.norm3)) + model.train() + + imgs = torch.randn(2, 3, 224, 224) + feat = model(imgs) + self.assertIsInstance(feat, tuple) + self.assertEqual(feat[-1].shape, torch.Size([2, 40, 56, 56])) diff --git a/tests/test_models/test_backbones/test_mobilenet_v2.py b/tests/test_models/test_backbones/test_mobilenet_v2.py index d505632c4b..d189442eff 100644 --- a/tests/test_models/test_backbones/test_mobilenet_v2.py +++ b/tests/test_models/test_backbones/test_mobilenet_v2.py @@ -1,264 +1,264 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import torch -from torch.nn.modules import GroupNorm -from torch.nn.modules.batchnorm import _BatchNorm - -from mmpose.models.backbones import MobileNetV2 -from mmpose.models.backbones.mobilenet_v2 import InvertedResidual - - -class TestMobilenetV2(TestCase): - - @staticmethod - def is_block(modules): - """Check if is ResNet building block.""" - if isinstance(modules, (InvertedResidual, )): - return True - return False - - @staticmethod - def is_norm(modules): - """Check if is one of the norms.""" - if isinstance(modules, (GroupNorm, _BatchNorm)): - return True - return False - - @staticmethod - def check_norm_state(modules, train_state): - """Check if norm layer is in correct train state.""" - for mod in modules: - if isinstance(mod, _BatchNorm): - if mod.training != train_state: - return False - return True - - def test_mobilenetv2_invertedresidual(self): - - with self.assertRaises(AssertionError): - # stride must be in [1, 2] - InvertedResidual(16, 24, stride=3, expand_ratio=6) - - # Test InvertedResidual with checkpoint forward, stride=1 - block = InvertedResidual(16, 24, stride=1, expand_ratio=6) - x = torch.randn(1, 16, 56, 56) - x_out = block(x) - self.assertEqual(x_out.shape, torch.Size((1, 24, 56, 56))) - - # Test InvertedResidual with expand_ratio=1 - block = InvertedResidual(16, 16, stride=1, expand_ratio=1) - self.assertEqual(len(block.conv), 2) - - # Test InvertedResidual with use_res_connect - block = InvertedResidual(16, 16, stride=1, expand_ratio=6) - x = torch.randn(1, 16, 56, 56) - x_out = block(x) - self.assertTrue(block.use_res_connect) - self.assertEqual(x_out.shape, torch.Size((1, 16, 56, 56))) - - # Test InvertedResidual with checkpoint forward, stride=2 - block = InvertedResidual(16, 24, stride=2, expand_ratio=6) - x = torch.randn(1, 16, 56, 56) - x_out = block(x) - self.assertEqual(x_out.shape, torch.Size((1, 24, 28, 28))) - - # Test InvertedResidual with checkpoint forward - block = InvertedResidual( - 16, 24, stride=1, expand_ratio=6, with_cp=True) - self.assertTrue(block.with_cp) - x = torch.randn(1, 16, 56, 56) - x_out = block(x) - self.assertEqual(x_out.shape, torch.Size((1, 24, 56, 56))) - - # Test InvertedResidual with act_cfg=dict(type='ReLU') - block = InvertedResidual( - 16, 24, stride=1, expand_ratio=6, act_cfg=dict(type='ReLU')) - x = torch.randn(1, 16, 56, 56) - x_out = block(x) - self.assertEqual(x_out.shape, torch.Size((1, 24, 56, 56))) - - def test_mobilenetv2_backbone(self): - with self.assertRaises(TypeError): - # pretrained must be a string path - model = MobileNetV2() - model.init_weights(pretrained=0) - - with self.assertRaises(ValueError): - # frozen_stages must in range(1, 8) - MobileNetV2(frozen_stages=8) - - with self.assertRaises(ValueError): - # tout_indices in range(-1, 8) - MobileNetV2(out_indices=[8]) - - # Test MobileNetV2 with first stage frozen - frozen_stages = 1 - model = MobileNetV2(frozen_stages=frozen_stages) - model.init_weights() - model.train() - - for mod in model.conv1.modules(): - for param in mod.parameters(): - self.assertFalse(param.requires_grad) - for i in range(1, frozen_stages + 1): - layer = getattr(model, f'layer{i}') - for mod in layer.modules(): - if isinstance(mod, _BatchNorm): - self.assertFalse(mod.training) - for param in layer.parameters(): - self.assertFalse(param.requires_grad) - - # Test MobileNetV2 with norm_eval=True - model = MobileNetV2(norm_eval=True) - model.init_weights() - model.train() - - self.assertTrue(self.check_norm_state(model.modules(), False)) - - # Test MobileNetV2 forward with widen_factor=1.0 - model = MobileNetV2(widen_factor=1.0, out_indices=range(0, 8)) - model.init_weights() - model.train() - - self.assertTrue(self.check_norm_state(model.modules(), True)) - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertEqual(len(feat), 8) - self.assertEqual(feat[0].shape, torch.Size((1, 16, 112, 112))) - self.assertEqual(feat[1].shape, torch.Size((1, 24, 56, 56))) - self.assertEqual(feat[2].shape, torch.Size((1, 32, 28, 28))) - self.assertEqual(feat[3].shape, torch.Size((1, 64, 14, 14))) - self.assertEqual(feat[4].shape, torch.Size((1, 96, 14, 14))) - self.assertEqual(feat[5].shape, torch.Size((1, 160, 7, 7))) - self.assertEqual(feat[6].shape, torch.Size((1, 320, 7, 7))) - self.assertEqual(feat[7].shape, torch.Size((1, 1280, 7, 7))) - - # Test MobileNetV2 forward with widen_factor=0.5 - model = MobileNetV2(widen_factor=0.5, out_indices=range(0, 7)) - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertEqual(len(feat), 7) - self.assertEqual(feat[0].shape, torch.Size((1, 8, 112, 112))) - self.assertEqual(feat[1].shape, torch.Size((1, 16, 56, 56))) - self.assertEqual(feat[2].shape, torch.Size((1, 16, 28, 28))) - self.assertEqual(feat[3].shape, torch.Size((1, 32, 14, 14))) - self.assertEqual(feat[4].shape, torch.Size((1, 48, 14, 14))) - self.assertEqual(feat[5].shape, torch.Size((1, 80, 7, 7))) - self.assertEqual(feat[6].shape, torch.Size((1, 160, 7, 7))) - - # Test MobileNetV2 forward with widen_factor=2.0 - model = MobileNetV2(widen_factor=2.0) - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertIsInstance(feat, tuple) - self.assertEqual(feat[-1].shape, torch.Size((1, 2560, 7, 7))) - - # Test MobileNetV2 forward with out_indices=None - model = MobileNetV2(widen_factor=1.0) - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertIsInstance(feat, tuple) - self.assertEqual(feat[-1].shape, torch.Size((1, 1280, 7, 7))) - - # Test MobileNetV2 forward with dict(type='ReLU') - model = MobileNetV2( - widen_factor=1.0, - act_cfg=dict(type='ReLU'), - out_indices=range(0, 7)) - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertEqual(len(feat), 7) - self.assertEqual(feat[0].shape, torch.Size((1, 16, 112, 112))) - self.assertEqual(feat[1].shape, torch.Size((1, 24, 56, 56))) - self.assertEqual(feat[2].shape, torch.Size((1, 32, 28, 28))) - self.assertEqual(feat[3].shape, torch.Size((1, 64, 14, 14))) - self.assertEqual(feat[4].shape, torch.Size((1, 96, 14, 14))) - self.assertEqual(feat[5].shape, torch.Size((1, 160, 7, 7))) - self.assertEqual(feat[6].shape, torch.Size((1, 320, 7, 7))) - - # Test MobileNetV2 with GroupNorm forward - model = MobileNetV2(widen_factor=1.0, out_indices=range(0, 7)) - for m in model.modules(): - if self.is_norm(m): - self.assertIsInstance(m, _BatchNorm) - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertEqual(len(feat), 7) - self.assertEqual(feat[0].shape, torch.Size((1, 16, 112, 112))) - self.assertEqual(feat[1].shape, torch.Size((1, 24, 56, 56))) - self.assertEqual(feat[2].shape, torch.Size((1, 32, 28, 28))) - self.assertEqual(feat[3].shape, torch.Size((1, 64, 14, 14))) - self.assertEqual(feat[4].shape, torch.Size((1, 96, 14, 14))) - self.assertEqual(feat[5].shape, torch.Size((1, 160, 7, 7))) - self.assertEqual(feat[6].shape, torch.Size((1, 320, 7, 7))) - - # Test MobileNetV2 with BatchNorm forward - model = MobileNetV2( - widen_factor=1.0, - norm_cfg=dict(type='GN', num_groups=2, requires_grad=True), - out_indices=range(0, 7)) - for m in model.modules(): - if self.is_norm(m): - self.assertIsInstance(m, GroupNorm) - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertEqual(len(feat), 7) - self.assertEqual(feat[0].shape, torch.Size((1, 16, 112, 112))) - self.assertEqual(feat[1].shape, torch.Size((1, 24, 56, 56))) - self.assertEqual(feat[2].shape, torch.Size((1, 32, 28, 28))) - self.assertEqual(feat[3].shape, torch.Size((1, 64, 14, 14))) - self.assertEqual(feat[4].shape, torch.Size((1, 96, 14, 14))) - self.assertEqual(feat[5].shape, torch.Size((1, 160, 7, 7))) - self.assertEqual(feat[6].shape, torch.Size((1, 320, 7, 7))) - - # Test MobileNetV2 with layers 1, 3, 5 out forward - model = MobileNetV2(widen_factor=1.0, out_indices=(0, 2, 4)) - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertEqual(len(feat), 3) - self.assertEqual(feat[0].shape, torch.Size((1, 16, 112, 112))) - self.assertEqual(feat[1].shape, torch.Size((1, 32, 28, 28))) - self.assertEqual(feat[2].shape, torch.Size((1, 96, 14, 14))) - - # Test MobileNetV2 with checkpoint forward - model = MobileNetV2( - widen_factor=1.0, with_cp=True, out_indices=range(0, 7)) - for m in model.modules(): - if self.is_block(m): - self.assertTrue(m.with_cp) - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertEqual(len(feat), 7) - self.assertEqual(feat[0].shape, torch.Size((1, 16, 112, 112))) - self.assertEqual(feat[1].shape, torch.Size((1, 24, 56, 56))) - self.assertEqual(feat[2].shape, torch.Size((1, 32, 28, 28))) - self.assertEqual(feat[3].shape, torch.Size((1, 64, 14, 14))) - self.assertEqual(feat[4].shape, torch.Size((1, 96, 14, 14))) - self.assertEqual(feat[5].shape, torch.Size((1, 160, 7, 7))) - self.assertEqual(feat[6].shape, torch.Size((1, 320, 7, 7))) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch +from torch.nn.modules import GroupNorm +from torch.nn.modules.batchnorm import _BatchNorm + +from mmpose.models.backbones import MobileNetV2 +from mmpose.models.backbones.mobilenet_v2 import InvertedResidual + + +class TestMobilenetV2(TestCase): + + @staticmethod + def is_block(modules): + """Check if is ResNet building block.""" + if isinstance(modules, (InvertedResidual, )): + return True + return False + + @staticmethod + def is_norm(modules): + """Check if is one of the norms.""" + if isinstance(modules, (GroupNorm, _BatchNorm)): + return True + return False + + @staticmethod + def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + def test_mobilenetv2_invertedresidual(self): + + with self.assertRaises(AssertionError): + # stride must be in [1, 2] + InvertedResidual(16, 24, stride=3, expand_ratio=6) + + # Test InvertedResidual with checkpoint forward, stride=1 + block = InvertedResidual(16, 24, stride=1, expand_ratio=6) + x = torch.randn(1, 16, 56, 56) + x_out = block(x) + self.assertEqual(x_out.shape, torch.Size((1, 24, 56, 56))) + + # Test InvertedResidual with expand_ratio=1 + block = InvertedResidual(16, 16, stride=1, expand_ratio=1) + self.assertEqual(len(block.conv), 2) + + # Test InvertedResidual with use_res_connect + block = InvertedResidual(16, 16, stride=1, expand_ratio=6) + x = torch.randn(1, 16, 56, 56) + x_out = block(x) + self.assertTrue(block.use_res_connect) + self.assertEqual(x_out.shape, torch.Size((1, 16, 56, 56))) + + # Test InvertedResidual with checkpoint forward, stride=2 + block = InvertedResidual(16, 24, stride=2, expand_ratio=6) + x = torch.randn(1, 16, 56, 56) + x_out = block(x) + self.assertEqual(x_out.shape, torch.Size((1, 24, 28, 28))) + + # Test InvertedResidual with checkpoint forward + block = InvertedResidual( + 16, 24, stride=1, expand_ratio=6, with_cp=True) + self.assertTrue(block.with_cp) + x = torch.randn(1, 16, 56, 56) + x_out = block(x) + self.assertEqual(x_out.shape, torch.Size((1, 24, 56, 56))) + + # Test InvertedResidual with act_cfg=dict(type='ReLU') + block = InvertedResidual( + 16, 24, stride=1, expand_ratio=6, act_cfg=dict(type='ReLU')) + x = torch.randn(1, 16, 56, 56) + x_out = block(x) + self.assertEqual(x_out.shape, torch.Size((1, 24, 56, 56))) + + def test_mobilenetv2_backbone(self): + with self.assertRaises(TypeError): + # pretrained must be a string path + model = MobileNetV2() + model.init_weights(pretrained=0) + + with self.assertRaises(ValueError): + # frozen_stages must in range(1, 8) + MobileNetV2(frozen_stages=8) + + with self.assertRaises(ValueError): + # tout_indices in range(-1, 8) + MobileNetV2(out_indices=[8]) + + # Test MobileNetV2 with first stage frozen + frozen_stages = 1 + model = MobileNetV2(frozen_stages=frozen_stages) + model.init_weights() + model.train() + + for mod in model.conv1.modules(): + for param in mod.parameters(): + self.assertFalse(param.requires_grad) + for i in range(1, frozen_stages + 1): + layer = getattr(model, f'layer{i}') + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + self.assertFalse(mod.training) + for param in layer.parameters(): + self.assertFalse(param.requires_grad) + + # Test MobileNetV2 with norm_eval=True + model = MobileNetV2(norm_eval=True) + model.init_weights() + model.train() + + self.assertTrue(self.check_norm_state(model.modules(), False)) + + # Test MobileNetV2 forward with widen_factor=1.0 + model = MobileNetV2(widen_factor=1.0, out_indices=range(0, 8)) + model.init_weights() + model.train() + + self.assertTrue(self.check_norm_state(model.modules(), True)) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertEqual(len(feat), 8) + self.assertEqual(feat[0].shape, torch.Size((1, 16, 112, 112))) + self.assertEqual(feat[1].shape, torch.Size((1, 24, 56, 56))) + self.assertEqual(feat[2].shape, torch.Size((1, 32, 28, 28))) + self.assertEqual(feat[3].shape, torch.Size((1, 64, 14, 14))) + self.assertEqual(feat[4].shape, torch.Size((1, 96, 14, 14))) + self.assertEqual(feat[5].shape, torch.Size((1, 160, 7, 7))) + self.assertEqual(feat[6].shape, torch.Size((1, 320, 7, 7))) + self.assertEqual(feat[7].shape, torch.Size((1, 1280, 7, 7))) + + # Test MobileNetV2 forward with widen_factor=0.5 + model = MobileNetV2(widen_factor=0.5, out_indices=range(0, 7)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertEqual(len(feat), 7) + self.assertEqual(feat[0].shape, torch.Size((1, 8, 112, 112))) + self.assertEqual(feat[1].shape, torch.Size((1, 16, 56, 56))) + self.assertEqual(feat[2].shape, torch.Size((1, 16, 28, 28))) + self.assertEqual(feat[3].shape, torch.Size((1, 32, 14, 14))) + self.assertEqual(feat[4].shape, torch.Size((1, 48, 14, 14))) + self.assertEqual(feat[5].shape, torch.Size((1, 80, 7, 7))) + self.assertEqual(feat[6].shape, torch.Size((1, 160, 7, 7))) + + # Test MobileNetV2 forward with widen_factor=2.0 + model = MobileNetV2(widen_factor=2.0) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertIsInstance(feat, tuple) + self.assertEqual(feat[-1].shape, torch.Size((1, 2560, 7, 7))) + + # Test MobileNetV2 forward with out_indices=None + model = MobileNetV2(widen_factor=1.0) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertIsInstance(feat, tuple) + self.assertEqual(feat[-1].shape, torch.Size((1, 1280, 7, 7))) + + # Test MobileNetV2 forward with dict(type='ReLU') + model = MobileNetV2( + widen_factor=1.0, + act_cfg=dict(type='ReLU'), + out_indices=range(0, 7)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertEqual(len(feat), 7) + self.assertEqual(feat[0].shape, torch.Size((1, 16, 112, 112))) + self.assertEqual(feat[1].shape, torch.Size((1, 24, 56, 56))) + self.assertEqual(feat[2].shape, torch.Size((1, 32, 28, 28))) + self.assertEqual(feat[3].shape, torch.Size((1, 64, 14, 14))) + self.assertEqual(feat[4].shape, torch.Size((1, 96, 14, 14))) + self.assertEqual(feat[5].shape, torch.Size((1, 160, 7, 7))) + self.assertEqual(feat[6].shape, torch.Size((1, 320, 7, 7))) + + # Test MobileNetV2 with GroupNorm forward + model = MobileNetV2(widen_factor=1.0, out_indices=range(0, 7)) + for m in model.modules(): + if self.is_norm(m): + self.assertIsInstance(m, _BatchNorm) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertEqual(len(feat), 7) + self.assertEqual(feat[0].shape, torch.Size((1, 16, 112, 112))) + self.assertEqual(feat[1].shape, torch.Size((1, 24, 56, 56))) + self.assertEqual(feat[2].shape, torch.Size((1, 32, 28, 28))) + self.assertEqual(feat[3].shape, torch.Size((1, 64, 14, 14))) + self.assertEqual(feat[4].shape, torch.Size((1, 96, 14, 14))) + self.assertEqual(feat[5].shape, torch.Size((1, 160, 7, 7))) + self.assertEqual(feat[6].shape, torch.Size((1, 320, 7, 7))) + + # Test MobileNetV2 with BatchNorm forward + model = MobileNetV2( + widen_factor=1.0, + norm_cfg=dict(type='GN', num_groups=2, requires_grad=True), + out_indices=range(0, 7)) + for m in model.modules(): + if self.is_norm(m): + self.assertIsInstance(m, GroupNorm) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertEqual(len(feat), 7) + self.assertEqual(feat[0].shape, torch.Size((1, 16, 112, 112))) + self.assertEqual(feat[1].shape, torch.Size((1, 24, 56, 56))) + self.assertEqual(feat[2].shape, torch.Size((1, 32, 28, 28))) + self.assertEqual(feat[3].shape, torch.Size((1, 64, 14, 14))) + self.assertEqual(feat[4].shape, torch.Size((1, 96, 14, 14))) + self.assertEqual(feat[5].shape, torch.Size((1, 160, 7, 7))) + self.assertEqual(feat[6].shape, torch.Size((1, 320, 7, 7))) + + # Test MobileNetV2 with layers 1, 3, 5 out forward + model = MobileNetV2(widen_factor=1.0, out_indices=(0, 2, 4)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertEqual(len(feat), 3) + self.assertEqual(feat[0].shape, torch.Size((1, 16, 112, 112))) + self.assertEqual(feat[1].shape, torch.Size((1, 32, 28, 28))) + self.assertEqual(feat[2].shape, torch.Size((1, 96, 14, 14))) + + # Test MobileNetV2 with checkpoint forward + model = MobileNetV2( + widen_factor=1.0, with_cp=True, out_indices=range(0, 7)) + for m in model.modules(): + if self.is_block(m): + self.assertTrue(m.with_cp) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertEqual(len(feat), 7) + self.assertEqual(feat[0].shape, torch.Size((1, 16, 112, 112))) + self.assertEqual(feat[1].shape, torch.Size((1, 24, 56, 56))) + self.assertEqual(feat[2].shape, torch.Size((1, 32, 28, 28))) + self.assertEqual(feat[3].shape, torch.Size((1, 64, 14, 14))) + self.assertEqual(feat[4].shape, torch.Size((1, 96, 14, 14))) + self.assertEqual(feat[5].shape, torch.Size((1, 160, 7, 7))) + self.assertEqual(feat[6].shape, torch.Size((1, 320, 7, 7))) diff --git a/tests/test_models/test_backbones/test_mobilenet_v3.py b/tests/test_models/test_backbones/test_mobilenet_v3.py index a6a60bcccd..438e654a9b 100644 --- a/tests/test_models/test_backbones/test_mobilenet_v3.py +++ b/tests/test_models/test_backbones/test_mobilenet_v3.py @@ -1,169 +1,169 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import torch -from torch.nn.modules import GroupNorm -from torch.nn.modules.batchnorm import _BatchNorm - -from mmpose.models.backbones import MobileNetV3 -from mmpose.models.backbones.utils import InvertedResidual - - -class TestMobilenetV3(TestCase): - - @staticmethod - def is_norm(modules): - """Check if is one of the norms.""" - if isinstance(modules, (GroupNorm, _BatchNorm)): - return True - return False - - @staticmethod - def check_norm_state(modules, train_state): - """Check if norm layer is in correct train state.""" - for mod in modules: - if isinstance(mod, _BatchNorm): - if mod.training != train_state: - return False - return True - - def test_mobilenetv3_backbone(self): - with self.assertRaises(AssertionError): - # arch must in [small, big] - MobileNetV3(arch='others') - - with self.assertRaises(ValueError): - # frozen_stages must less than 12 when arch is small - MobileNetV3(arch='small', frozen_stages=12) - - with self.assertRaises(ValueError): - # frozen_stages must less than 16 when arch is big - MobileNetV3(arch='big', frozen_stages=16) - - with self.assertRaises(ValueError): - # max out_indices must less than 11 when arch is small - MobileNetV3(arch='small', out_indices=(11, )) - - with self.assertRaises(ValueError): - # max out_indices must less than 15 when arch is big - MobileNetV3(arch='big', out_indices=(15, )) - - # Test MobileNetv3 - model = MobileNetV3() - model.init_weights() - model.train() - - # Test MobileNetv3 with first stage frozen - frozen_stages = 1 - model = MobileNetV3(frozen_stages=frozen_stages) - model.init_weights() - model.train() - for param in model.conv1.parameters(): - self.assertFalse(param.requires_grad) - for i in range(1, frozen_stages + 1): - layer = getattr(model, f'layer{i}') - for mod in layer.modules(): - if isinstance(mod, _BatchNorm): - self.assertFalse(mod.training) - for param in layer.parameters(): - self.assertFalse(param.requires_grad) - - # Test MobileNetv3 with norm eval - model = MobileNetV3(norm_eval=True, out_indices=range(0, 11)) - model.init_weights() - model.train() - self.assertTrue(self.check_norm_state(model.modules(), False)) - - # Test MobileNetv3 forward with small arch - model = MobileNetV3(out_indices=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10)) - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertEqual(len(feat), 11) - self.assertEqual(feat[0].shape, torch.Size([1, 16, 56, 56])) - self.assertEqual(feat[1].shape, torch.Size([1, 24, 28, 28])) - self.assertEqual(feat[2].shape, torch.Size([1, 24, 28, 28])) - self.assertEqual(feat[3].shape, torch.Size([1, 40, 14, 14])) - self.assertEqual(feat[4].shape, torch.Size([1, 40, 14, 14])) - self.assertEqual(feat[5].shape, torch.Size([1, 40, 14, 14])) - self.assertEqual(feat[6].shape, torch.Size([1, 48, 14, 14])) - self.assertEqual(feat[7].shape, torch.Size([1, 48, 14, 14])) - self.assertEqual(feat[8].shape, torch.Size([1, 96, 7, 7])) - self.assertEqual(feat[9].shape, torch.Size([1, 96, 7, 7])) - self.assertEqual(feat[10].shape, torch.Size([1, 96, 7, 7])) - - # Test MobileNetv3 forward with small arch and GroupNorm - model = MobileNetV3( - out_indices=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), - norm_cfg=dict(type='GN', num_groups=2, requires_grad=True)) - for m in model.modules(): - if self.is_norm(m): - self.assertIsInstance(m, GroupNorm) - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertEqual(len(feat), 11) - self.assertEqual(feat[0].shape, torch.Size([1, 16, 56, 56])) - self.assertEqual(feat[1].shape, torch.Size([1, 24, 28, 28])) - self.assertEqual(feat[2].shape, torch.Size([1, 24, 28, 28])) - self.assertEqual(feat[3].shape, torch.Size([1, 40, 14, 14])) - self.assertEqual(feat[4].shape, torch.Size([1, 40, 14, 14])) - self.assertEqual(feat[5].shape, torch.Size([1, 40, 14, 14])) - self.assertEqual(feat[6].shape, torch.Size([1, 48, 14, 14])) - self.assertEqual(feat[7].shape, torch.Size([1, 48, 14, 14])) - self.assertEqual(feat[8].shape, torch.Size([1, 96, 7, 7])) - self.assertEqual(feat[9].shape, torch.Size([1, 96, 7, 7])) - self.assertEqual(feat[10].shape, torch.Size([1, 96, 7, 7])) - - # Test MobileNetv3 forward with big arch - model = MobileNetV3( - arch='big', - out_indices=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14)) - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertEqual(len(feat), 15) - self.assertEqual(feat[0].shape, torch.Size([1, 16, 112, 112])) - self.assertEqual(feat[1].shape, torch.Size([1, 24, 56, 56])) - self.assertEqual(feat[2].shape, torch.Size([1, 24, 56, 56])) - self.assertEqual(feat[3].shape, torch.Size([1, 40, 28, 28])) - self.assertEqual(feat[4].shape, torch.Size([1, 40, 28, 28])) - self.assertEqual(feat[5].shape, torch.Size([1, 40, 28, 28])) - self.assertEqual(feat[6].shape, torch.Size([1, 80, 14, 14])) - self.assertEqual(feat[7].shape, torch.Size([1, 80, 14, 14])) - self.assertEqual(feat[8].shape, torch.Size([1, 80, 14, 14])) - self.assertEqual(feat[9].shape, torch.Size([1, 80, 14, 14])) - self.assertEqual(feat[10].shape, torch.Size([1, 112, 14, 14])) - self.assertEqual(feat[11].shape, torch.Size([1, 112, 14, 14])) - self.assertEqual(feat[12].shape, torch.Size([1, 160, 14, 14])) - self.assertEqual(feat[13].shape, torch.Size([1, 160, 7, 7])) - self.assertEqual(feat[14].shape, torch.Size([1, 160, 7, 7])) - - # Test MobileNetv3 forward with big arch - model = MobileNetV3(arch='big', out_indices=(0, )) - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertIsInstance(feat, tuple) - self.assertEqual(feat[-1].shape, torch.Size([1, 16, 112, 112])) - - # Test MobileNetv3 with checkpoint forward - model = MobileNetV3(with_cp=True) - for m in model.modules(): - if isinstance(m, InvertedResidual): - self.assertTrue(m.with_cp) - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertIsInstance(feat, tuple) - self.assertEqual(feat[-1].shape, torch.Size([1, 96, 7, 7])) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch +from torch.nn.modules import GroupNorm +from torch.nn.modules.batchnorm import _BatchNorm + +from mmpose.models.backbones import MobileNetV3 +from mmpose.models.backbones.utils import InvertedResidual + + +class TestMobilenetV3(TestCase): + + @staticmethod + def is_norm(modules): + """Check if is one of the norms.""" + if isinstance(modules, (GroupNorm, _BatchNorm)): + return True + return False + + @staticmethod + def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + def test_mobilenetv3_backbone(self): + with self.assertRaises(AssertionError): + # arch must in [small, big] + MobileNetV3(arch='others') + + with self.assertRaises(ValueError): + # frozen_stages must less than 12 when arch is small + MobileNetV3(arch='small', frozen_stages=12) + + with self.assertRaises(ValueError): + # frozen_stages must less than 16 when arch is big + MobileNetV3(arch='big', frozen_stages=16) + + with self.assertRaises(ValueError): + # max out_indices must less than 11 when arch is small + MobileNetV3(arch='small', out_indices=(11, )) + + with self.assertRaises(ValueError): + # max out_indices must less than 15 when arch is big + MobileNetV3(arch='big', out_indices=(15, )) + + # Test MobileNetv3 + model = MobileNetV3() + model.init_weights() + model.train() + + # Test MobileNetv3 with first stage frozen + frozen_stages = 1 + model = MobileNetV3(frozen_stages=frozen_stages) + model.init_weights() + model.train() + for param in model.conv1.parameters(): + self.assertFalse(param.requires_grad) + for i in range(1, frozen_stages + 1): + layer = getattr(model, f'layer{i}') + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + self.assertFalse(mod.training) + for param in layer.parameters(): + self.assertFalse(param.requires_grad) + + # Test MobileNetv3 with norm eval + model = MobileNetV3(norm_eval=True, out_indices=range(0, 11)) + model.init_weights() + model.train() + self.assertTrue(self.check_norm_state(model.modules(), False)) + + # Test MobileNetv3 forward with small arch + model = MobileNetV3(out_indices=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertEqual(len(feat), 11) + self.assertEqual(feat[0].shape, torch.Size([1, 16, 56, 56])) + self.assertEqual(feat[1].shape, torch.Size([1, 24, 28, 28])) + self.assertEqual(feat[2].shape, torch.Size([1, 24, 28, 28])) + self.assertEqual(feat[3].shape, torch.Size([1, 40, 14, 14])) + self.assertEqual(feat[4].shape, torch.Size([1, 40, 14, 14])) + self.assertEqual(feat[5].shape, torch.Size([1, 40, 14, 14])) + self.assertEqual(feat[6].shape, torch.Size([1, 48, 14, 14])) + self.assertEqual(feat[7].shape, torch.Size([1, 48, 14, 14])) + self.assertEqual(feat[8].shape, torch.Size([1, 96, 7, 7])) + self.assertEqual(feat[9].shape, torch.Size([1, 96, 7, 7])) + self.assertEqual(feat[10].shape, torch.Size([1, 96, 7, 7])) + + # Test MobileNetv3 forward with small arch and GroupNorm + model = MobileNetV3( + out_indices=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), + norm_cfg=dict(type='GN', num_groups=2, requires_grad=True)) + for m in model.modules(): + if self.is_norm(m): + self.assertIsInstance(m, GroupNorm) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertEqual(len(feat), 11) + self.assertEqual(feat[0].shape, torch.Size([1, 16, 56, 56])) + self.assertEqual(feat[1].shape, torch.Size([1, 24, 28, 28])) + self.assertEqual(feat[2].shape, torch.Size([1, 24, 28, 28])) + self.assertEqual(feat[3].shape, torch.Size([1, 40, 14, 14])) + self.assertEqual(feat[4].shape, torch.Size([1, 40, 14, 14])) + self.assertEqual(feat[5].shape, torch.Size([1, 40, 14, 14])) + self.assertEqual(feat[6].shape, torch.Size([1, 48, 14, 14])) + self.assertEqual(feat[7].shape, torch.Size([1, 48, 14, 14])) + self.assertEqual(feat[8].shape, torch.Size([1, 96, 7, 7])) + self.assertEqual(feat[9].shape, torch.Size([1, 96, 7, 7])) + self.assertEqual(feat[10].shape, torch.Size([1, 96, 7, 7])) + + # Test MobileNetv3 forward with big arch + model = MobileNetV3( + arch='big', + out_indices=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertEqual(len(feat), 15) + self.assertEqual(feat[0].shape, torch.Size([1, 16, 112, 112])) + self.assertEqual(feat[1].shape, torch.Size([1, 24, 56, 56])) + self.assertEqual(feat[2].shape, torch.Size([1, 24, 56, 56])) + self.assertEqual(feat[3].shape, torch.Size([1, 40, 28, 28])) + self.assertEqual(feat[4].shape, torch.Size([1, 40, 28, 28])) + self.assertEqual(feat[5].shape, torch.Size([1, 40, 28, 28])) + self.assertEqual(feat[6].shape, torch.Size([1, 80, 14, 14])) + self.assertEqual(feat[7].shape, torch.Size([1, 80, 14, 14])) + self.assertEqual(feat[8].shape, torch.Size([1, 80, 14, 14])) + self.assertEqual(feat[9].shape, torch.Size([1, 80, 14, 14])) + self.assertEqual(feat[10].shape, torch.Size([1, 112, 14, 14])) + self.assertEqual(feat[11].shape, torch.Size([1, 112, 14, 14])) + self.assertEqual(feat[12].shape, torch.Size([1, 160, 14, 14])) + self.assertEqual(feat[13].shape, torch.Size([1, 160, 7, 7])) + self.assertEqual(feat[14].shape, torch.Size([1, 160, 7, 7])) + + # Test MobileNetv3 forward with big arch + model = MobileNetV3(arch='big', out_indices=(0, )) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertIsInstance(feat, tuple) + self.assertEqual(feat[-1].shape, torch.Size([1, 16, 112, 112])) + + # Test MobileNetv3 with checkpoint forward + model = MobileNetV3(with_cp=True) + for m in model.modules(): + if isinstance(m, InvertedResidual): + self.assertTrue(m.with_cp) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertIsInstance(feat, tuple) + self.assertEqual(feat[-1].shape, torch.Size([1, 96, 7, 7])) diff --git a/tests/test_models/test_backbones/test_mspn.py b/tests/test_models/test_backbones/test_mspn.py index f97f8649f8..28836438ea 100644 --- a/tests/test_models/test_backbones/test_mspn.py +++ b/tests/test_models/test_backbones/test_mspn.py @@ -1,35 +1,35 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import torch - -from mmpose.models.backbones import MSPN - - -class TestMSPN(TestCase): - - def test_mspn_backbone(self): - with self.assertRaises(AssertionError): - # MSPN's num_stages should larger than 0 - MSPN(num_stages=0) - with self.assertRaises(AssertionError): - # MSPN's num_units should larger than 1 - MSPN(num_units=1) - with self.assertRaises(AssertionError): - # len(num_blocks) should equal num_units - MSPN(num_units=2, num_blocks=[2, 2, 2]) - - # Test MSPN's outputs - model = MSPN(num_stages=2, num_units=2, num_blocks=[2, 2]) - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 511, 511) - feat = model(imgs) - self.assertEqual(len(feat), 2) - self.assertEqual(len(feat[0]), 2) - self.assertEqual(len(feat[1]), 2) - self.assertEqual(feat[0][0].shape, torch.Size([1, 256, 64, 64])) - self.assertEqual(feat[0][1].shape, torch.Size([1, 256, 128, 128])) - self.assertEqual(feat[1][0].shape, torch.Size([1, 256, 64, 64])) - self.assertEqual(feat[1][1].shape, torch.Size([1, 256, 128, 128])) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch + +from mmpose.models.backbones import MSPN + + +class TestMSPN(TestCase): + + def test_mspn_backbone(self): + with self.assertRaises(AssertionError): + # MSPN's num_stages should larger than 0 + MSPN(num_stages=0) + with self.assertRaises(AssertionError): + # MSPN's num_units should larger than 1 + MSPN(num_units=1) + with self.assertRaises(AssertionError): + # len(num_blocks) should equal num_units + MSPN(num_units=2, num_blocks=[2, 2, 2]) + + # Test MSPN's outputs + model = MSPN(num_stages=2, num_units=2, num_blocks=[2, 2]) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 511, 511) + feat = model(imgs) + self.assertEqual(len(feat), 2) + self.assertEqual(len(feat[0]), 2) + self.assertEqual(len(feat[1]), 2) + self.assertEqual(feat[0][0].shape, torch.Size([1, 256, 64, 64])) + self.assertEqual(feat[0][1].shape, torch.Size([1, 256, 128, 128])) + self.assertEqual(feat[1][0].shape, torch.Size([1, 256, 64, 64])) + self.assertEqual(feat[1][1].shape, torch.Size([1, 256, 128, 128])) diff --git a/tests/test_models/test_backbones/test_pvt.py b/tests/test_models/test_backbones/test_pvt.py index c0b232e3e2..7e28116a28 100644 --- a/tests/test_models/test_backbones/test_pvt.py +++ b/tests/test_models/test_backbones/test_pvt.py @@ -1,126 +1,126 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import torch - -from mmpose.models.backbones.pvt import (PVTEncoderLayer, - PyramidVisionTransformer, - PyramidVisionTransformerV2) - - -class TestPVT(TestCase): - - def test_pvt_block(self): - # test PVT structure and forward - block = PVTEncoderLayer( - embed_dims=64, num_heads=4, feedforward_channels=256) - self.assertEqual(block.ffn.embed_dims, 64) - self.assertEqual(block.attn.num_heads, 4) - self.assertEqual(block.ffn.feedforward_channels, 256) - x = torch.randn(1, 56 * 56, 64) - x_out = block(x, (56, 56)) - self.assertEqual(x_out.shape, torch.Size([1, 56 * 56, 64])) - - def test_pvt(self): - """Test PVT backbone.""" - - # test pretrained image size - with self.assertRaises(AssertionError): - PyramidVisionTransformer(pretrain_img_size=(224, 224, 224)) - - # test padding - model = PyramidVisionTransformer( - paddings=['corner', 'corner', 'corner', 'corner']) - temp = torch.randn((1, 3, 32, 32)) - outs = model(temp) - self.assertEqual(outs[0].shape, (1, 64, 8, 8)) - self.assertEqual(outs[1].shape, (1, 128, 4, 4)) - self.assertEqual(outs[2].shape, (1, 320, 2, 2)) - self.assertEqual(outs[3].shape, (1, 512, 1, 1)) - - # Test absolute position embedding - temp = torch.randn((1, 3, 224, 224)) - model = PyramidVisionTransformer( - pretrain_img_size=224, use_abs_pos_embed=True) - model.init_weights() - model(temp) - - # Test normal inference - temp = torch.randn((1, 3, 32, 32)) - model = PyramidVisionTransformer() - outs = model(temp) - self.assertEqual(outs[0].shape, (1, 64, 8, 8)) - self.assertEqual(outs[1].shape, (1, 128, 4, 4)) - self.assertEqual(outs[2].shape, (1, 320, 2, 2)) - self.assertEqual(outs[3].shape, (1, 512, 1, 1)) - - # Test abnormal inference size - temp = torch.randn((1, 3, 33, 33)) - model = PyramidVisionTransformer() - outs = model(temp) - self.assertEqual(outs[0].shape, (1, 64, 8, 8)) - self.assertEqual(outs[1].shape, (1, 128, 4, 4)) - self.assertEqual(outs[2].shape, (1, 320, 2, 2)) - self.assertEqual(outs[3].shape, (1, 512, 1, 1)) - - # Test abnormal inference size - temp = torch.randn((1, 3, 112, 137)) - model = PyramidVisionTransformer() - outs = model(temp) - self.assertEqual(outs[0].shape, (1, 64, 28, 34)) - self.assertEqual(outs[1].shape, (1, 128, 14, 17)) - self.assertEqual(outs[2].shape, (1, 320, 7, 8)) - self.assertEqual(outs[3].shape, (1, 512, 3, 4)) - - def test_pvtv2(self): - """Test PVTv2 backbone.""" - - with self.assertRaises(TypeError): - # Pretrained arg must be str or None. - PyramidVisionTransformerV2(pretrained=123) - - # test pretrained image size - with self.assertRaises(AssertionError): - PyramidVisionTransformerV2(pretrain_img_size=(224, 224, 224)) - - # test init_cfg with pretrained model - model = PyramidVisionTransformerV2( - embed_dims=32, - num_layers=[2, 2, 2, 2], - init_cfg=dict( - type='Pretrained', - checkpoint='https://github.com/whai362/PVT/' - 'releases/download/v2/pvt_v2_b0.pth')) - model.init_weights() - - # test init weights from scratch - model = PyramidVisionTransformerV2( - embed_dims=32, num_layers=[2, 2, 2, 2]) - model.init_weights() - - # Test normal inference - temp = torch.randn((1, 3, 32, 32)) - model = PyramidVisionTransformerV2() - outs = model(temp) - self.assertEqual(outs[0].shape, (1, 64, 8, 8)) - self.assertEqual(outs[1].shape, (1, 128, 4, 4)) - self.assertEqual(outs[2].shape, (1, 320, 2, 2)) - self.assertEqual(outs[3].shape, (1, 512, 1, 1)) - - # Test abnormal inference size - temp = torch.randn((1, 3, 31, 31)) - model = PyramidVisionTransformerV2() - outs = model(temp) - self.assertEqual(outs[0].shape, (1, 64, 8, 8)) - self.assertEqual(outs[1].shape, (1, 128, 4, 4)) - self.assertEqual(outs[2].shape, (1, 320, 2, 2)) - self.assertEqual(outs[3].shape, (1, 512, 1, 1)) - - # Test abnormal inference size - temp = torch.randn((1, 3, 112, 137)) - model = PyramidVisionTransformerV2() - outs = model(temp) - self.assertEqual(outs[0].shape, (1, 64, 28, 35)) - self.assertEqual(outs[1].shape, (1, 128, 14, 18)) - self.assertEqual(outs[2].shape, (1, 320, 7, 9)) - self.assertEqual(outs[3].shape, (1, 512, 4, 5)) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch + +from mmpose.models.backbones.pvt import (PVTEncoderLayer, + PyramidVisionTransformer, + PyramidVisionTransformerV2) + + +class TestPVT(TestCase): + + def test_pvt_block(self): + # test PVT structure and forward + block = PVTEncoderLayer( + embed_dims=64, num_heads=4, feedforward_channels=256) + self.assertEqual(block.ffn.embed_dims, 64) + self.assertEqual(block.attn.num_heads, 4) + self.assertEqual(block.ffn.feedforward_channels, 256) + x = torch.randn(1, 56 * 56, 64) + x_out = block(x, (56, 56)) + self.assertEqual(x_out.shape, torch.Size([1, 56 * 56, 64])) + + def test_pvt(self): + """Test PVT backbone.""" + + # test pretrained image size + with self.assertRaises(AssertionError): + PyramidVisionTransformer(pretrain_img_size=(224, 224, 224)) + + # test padding + model = PyramidVisionTransformer( + paddings=['corner', 'corner', 'corner', 'corner']) + temp = torch.randn((1, 3, 32, 32)) + outs = model(temp) + self.assertEqual(outs[0].shape, (1, 64, 8, 8)) + self.assertEqual(outs[1].shape, (1, 128, 4, 4)) + self.assertEqual(outs[2].shape, (1, 320, 2, 2)) + self.assertEqual(outs[3].shape, (1, 512, 1, 1)) + + # Test absolute position embedding + temp = torch.randn((1, 3, 224, 224)) + model = PyramidVisionTransformer( + pretrain_img_size=224, use_abs_pos_embed=True) + model.init_weights() + model(temp) + + # Test normal inference + temp = torch.randn((1, 3, 32, 32)) + model = PyramidVisionTransformer() + outs = model(temp) + self.assertEqual(outs[0].shape, (1, 64, 8, 8)) + self.assertEqual(outs[1].shape, (1, 128, 4, 4)) + self.assertEqual(outs[2].shape, (1, 320, 2, 2)) + self.assertEqual(outs[3].shape, (1, 512, 1, 1)) + + # Test abnormal inference size + temp = torch.randn((1, 3, 33, 33)) + model = PyramidVisionTransformer() + outs = model(temp) + self.assertEqual(outs[0].shape, (1, 64, 8, 8)) + self.assertEqual(outs[1].shape, (1, 128, 4, 4)) + self.assertEqual(outs[2].shape, (1, 320, 2, 2)) + self.assertEqual(outs[3].shape, (1, 512, 1, 1)) + + # Test abnormal inference size + temp = torch.randn((1, 3, 112, 137)) + model = PyramidVisionTransformer() + outs = model(temp) + self.assertEqual(outs[0].shape, (1, 64, 28, 34)) + self.assertEqual(outs[1].shape, (1, 128, 14, 17)) + self.assertEqual(outs[2].shape, (1, 320, 7, 8)) + self.assertEqual(outs[3].shape, (1, 512, 3, 4)) + + def test_pvtv2(self): + """Test PVTv2 backbone.""" + + with self.assertRaises(TypeError): + # Pretrained arg must be str or None. + PyramidVisionTransformerV2(pretrained=123) + + # test pretrained image size + with self.assertRaises(AssertionError): + PyramidVisionTransformerV2(pretrain_img_size=(224, 224, 224)) + + # test init_cfg with pretrained model + model = PyramidVisionTransformerV2( + embed_dims=32, + num_layers=[2, 2, 2, 2], + init_cfg=dict( + type='Pretrained', + checkpoint='https://github.com/whai362/PVT/' + 'releases/download/v2/pvt_v2_b0.pth')) + model.init_weights() + + # test init weights from scratch + model = PyramidVisionTransformerV2( + embed_dims=32, num_layers=[2, 2, 2, 2]) + model.init_weights() + + # Test normal inference + temp = torch.randn((1, 3, 32, 32)) + model = PyramidVisionTransformerV2() + outs = model(temp) + self.assertEqual(outs[0].shape, (1, 64, 8, 8)) + self.assertEqual(outs[1].shape, (1, 128, 4, 4)) + self.assertEqual(outs[2].shape, (1, 320, 2, 2)) + self.assertEqual(outs[3].shape, (1, 512, 1, 1)) + + # Test abnormal inference size + temp = torch.randn((1, 3, 31, 31)) + model = PyramidVisionTransformerV2() + outs = model(temp) + self.assertEqual(outs[0].shape, (1, 64, 8, 8)) + self.assertEqual(outs[1].shape, (1, 128, 4, 4)) + self.assertEqual(outs[2].shape, (1, 320, 2, 2)) + self.assertEqual(outs[3].shape, (1, 512, 1, 1)) + + # Test abnormal inference size + temp = torch.randn((1, 3, 112, 137)) + model = PyramidVisionTransformerV2() + outs = model(temp) + self.assertEqual(outs[0].shape, (1, 64, 28, 35)) + self.assertEqual(outs[1].shape, (1, 128, 14, 18)) + self.assertEqual(outs[2].shape, (1, 320, 7, 9)) + self.assertEqual(outs[3].shape, (1, 512, 4, 5)) diff --git a/tests/test_models/test_backbones/test_regnet.py b/tests/test_models/test_backbones/test_regnet.py index 09a38aa032..364f6f00ae 100644 --- a/tests/test_models/test_backbones/test_regnet.py +++ b/tests/test_models/test_backbones/test_regnet.py @@ -1,103 +1,103 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import torch - -from mmpose.models.backbones import RegNet - - -class TestRegnet(TestCase): - - regnet_test_data = [ - ('regnetx_400mf', - dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22, - bot_mul=1.0), [32, 64, 160, 384]), - ('regnetx_800mf', - dict(w0=56, wa=35.73, wm=2.28, group_w=16, depth=16, - bot_mul=1.0), [64, 128, 288, 672]), - ('regnetx_1.6gf', - dict(w0=80, wa=34.01, wm=2.25, group_w=24, depth=18, - bot_mul=1.0), [72, 168, 408, 912]), - ('regnetx_3.2gf', - dict(w0=88, wa=26.31, wm=2.25, group_w=48, depth=25, - bot_mul=1.0), [96, 192, 432, 1008]), - ('regnetx_4.0gf', - dict(w0=96, wa=38.65, wm=2.43, group_w=40, depth=23, - bot_mul=1.0), [80, 240, 560, 1360]), - ('regnetx_6.4gf', - dict(w0=184, wa=60.83, wm=2.07, group_w=56, depth=17, - bot_mul=1.0), [168, 392, 784, 1624]), - ('regnetx_8.0gf', - dict(w0=80, wa=49.56, wm=2.88, group_w=120, depth=23, - bot_mul=1.0), [80, 240, 720, 1920]), - ('regnetx_12gf', - dict(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19, - bot_mul=1.0), [224, 448, 896, 2240]), - ] - - def _test_regnet_backbone(self, arch_name, arch, out_channels): - with self.assertRaises(AssertionError): - # ResNeXt depth should be in [50, 101, 152] - RegNet(arch_name + '233') - - # output the last feature map - model = RegNet(arch_name) - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertIsInstance(feat, tuple) - self.assertEqual(feat[-1].shape, (1, out_channels[-1], 7, 7)) - - # output feature map of all stages - model = RegNet(arch_name, out_indices=(0, 1, 2, 3)) - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertEqual(len(feat), 4) - self.assertEqual(feat[0].shape, (1, out_channels[0], 56, 56)) - self.assertEqual(feat[1].shape, (1, out_channels[1], 28, 28)) - self.assertEqual(feat[2].shape, (1, out_channels[2], 14, 14)) - self.assertEqual(feat[3].shape, (1, out_channels[3], 7, 7)) - - def test_regnet_backbone(self): - for arch_name, arch, out_channels in self.regnet_test_data: - with self.subTest( - arch_name=arch_name, arch=arch, out_channels=out_channels): - self._test_regnet_backbone(arch_name, arch, out_channels) - - def _test_custom_arch(self, arch_name, arch, out_channels): - # output the last feature map - model = RegNet(arch) - model.init_weights() - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertIsInstance(feat, tuple) - self.assertEqual(feat[-1].shape, (1, out_channels[-1], 7, 7)) - - # output feature map of all stages - model = RegNet(arch, out_indices=(0, 1, 2, 3)) - model.init_weights() - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertEqual(len(feat), 4) - self.assertEqual(feat[0].shape, (1, out_channels[0], 56, 56)) - self.assertEqual(feat[1].shape, (1, out_channels[1], 28, 28)) - self.assertEqual(feat[2].shape, (1, out_channels[2], 14, 14)) - self.assertEqual(feat[3].shape, (1, out_channels[3], 7, 7)) - - def test_custom_arch(self): - for arch_name, arch, out_channels in self.regnet_test_data: - with self.subTest( - arch_name=arch_name, arch=arch, out_channels=out_channels): - self._test_custom_arch(arch_name, arch, out_channels) - - def test_exception(self): - # arch must be a str or dict - with self.assertRaises(TypeError): - _ = RegNet(50) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch + +from mmpose.models.backbones import RegNet + + +class TestRegnet(TestCase): + + regnet_test_data = [ + ('regnetx_400mf', + dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22, + bot_mul=1.0), [32, 64, 160, 384]), + ('regnetx_800mf', + dict(w0=56, wa=35.73, wm=2.28, group_w=16, depth=16, + bot_mul=1.0), [64, 128, 288, 672]), + ('regnetx_1.6gf', + dict(w0=80, wa=34.01, wm=2.25, group_w=24, depth=18, + bot_mul=1.0), [72, 168, 408, 912]), + ('regnetx_3.2gf', + dict(w0=88, wa=26.31, wm=2.25, group_w=48, depth=25, + bot_mul=1.0), [96, 192, 432, 1008]), + ('regnetx_4.0gf', + dict(w0=96, wa=38.65, wm=2.43, group_w=40, depth=23, + bot_mul=1.0), [80, 240, 560, 1360]), + ('regnetx_6.4gf', + dict(w0=184, wa=60.83, wm=2.07, group_w=56, depth=17, + bot_mul=1.0), [168, 392, 784, 1624]), + ('regnetx_8.0gf', + dict(w0=80, wa=49.56, wm=2.88, group_w=120, depth=23, + bot_mul=1.0), [80, 240, 720, 1920]), + ('regnetx_12gf', + dict(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19, + bot_mul=1.0), [224, 448, 896, 2240]), + ] + + def _test_regnet_backbone(self, arch_name, arch, out_channels): + with self.assertRaises(AssertionError): + # ResNeXt depth should be in [50, 101, 152] + RegNet(arch_name + '233') + + # output the last feature map + model = RegNet(arch_name) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertIsInstance(feat, tuple) + self.assertEqual(feat[-1].shape, (1, out_channels[-1], 7, 7)) + + # output feature map of all stages + model = RegNet(arch_name, out_indices=(0, 1, 2, 3)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertEqual(len(feat), 4) + self.assertEqual(feat[0].shape, (1, out_channels[0], 56, 56)) + self.assertEqual(feat[1].shape, (1, out_channels[1], 28, 28)) + self.assertEqual(feat[2].shape, (1, out_channels[2], 14, 14)) + self.assertEqual(feat[3].shape, (1, out_channels[3], 7, 7)) + + def test_regnet_backbone(self): + for arch_name, arch, out_channels in self.regnet_test_data: + with self.subTest( + arch_name=arch_name, arch=arch, out_channels=out_channels): + self._test_regnet_backbone(arch_name, arch, out_channels) + + def _test_custom_arch(self, arch_name, arch, out_channels): + # output the last feature map + model = RegNet(arch) + model.init_weights() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertIsInstance(feat, tuple) + self.assertEqual(feat[-1].shape, (1, out_channels[-1], 7, 7)) + + # output feature map of all stages + model = RegNet(arch, out_indices=(0, 1, 2, 3)) + model.init_weights() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertEqual(len(feat), 4) + self.assertEqual(feat[0].shape, (1, out_channels[0], 56, 56)) + self.assertEqual(feat[1].shape, (1, out_channels[1], 28, 28)) + self.assertEqual(feat[2].shape, (1, out_channels[2], 14, 14)) + self.assertEqual(feat[3].shape, (1, out_channels[3], 7, 7)) + + def test_custom_arch(self): + for arch_name, arch, out_channels in self.regnet_test_data: + with self.subTest( + arch_name=arch_name, arch=arch, out_channels=out_channels): + self._test_custom_arch(arch_name, arch, out_channels) + + def test_exception(self): + # arch must be a str or dict + with self.assertRaises(TypeError): + _ = RegNet(50) diff --git a/tests/test_models/test_backbones/test_resnest.py b/tests/test_models/test_backbones/test_resnest.py index 2777a2fc85..c768f1de1f 100644 --- a/tests/test_models/test_backbones/test_resnest.py +++ b/tests/test_models/test_backbones/test_resnest.py @@ -1,47 +1,47 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import torch - -from mmpose.models.backbones import ResNeSt -from mmpose.models.backbones.resnest import Bottleneck as BottleneckS - - -class TestResnest(TestCase): - - def test_bottleneck(self): - with self.assertRaises(AssertionError): - # Style must be in ['pytorch', 'caffe'] - BottleneckS( - 64, 64, radix=2, reduction_factor=4, style='tensorflow') - - # Test ResNeSt Bottleneck structure - block = BottleneckS( - 64, 256, radix=2, reduction_factor=4, stride=2, style='pytorch') - self.assertEqual(block.avd_layer.stride, 2) - self.assertEqual(block.conv2.channels, 64) - - # Test ResNeSt Bottleneck forward - block = BottleneckS(64, 64, radix=2, reduction_factor=4) - x = torch.randn(2, 64, 56, 56) - x_out = block(x) - self.assertEqual(x_out.shape, torch.Size([2, 64, 56, 56])) - - def test_resnest(self): - with self.assertRaises(KeyError): - # ResNeSt depth should be in [50, 101, 152, 200] - ResNeSt(depth=18) - - # Test ResNeSt with radix 2, reduction_factor 4 - model = ResNeSt( - depth=50, radix=2, reduction_factor=4, out_indices=(0, 1, 2, 3)) - model.init_weights() - model.train() - - imgs = torch.randn(2, 3, 224, 224) - feat = model(imgs) - self.assertEqual(len(feat), 4) - self.assertEqual(feat[0].shape, torch.Size([2, 256, 56, 56])) - self.assertEqual(feat[1].shape, torch.Size([2, 512, 28, 28])) - self.assertEqual(feat[2].shape, torch.Size([2, 1024, 14, 14])) - self.assertEqual(feat[3].shape, torch.Size([2, 2048, 7, 7])) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch + +from mmpose.models.backbones import ResNeSt +from mmpose.models.backbones.resnest import Bottleneck as BottleneckS + + +class TestResnest(TestCase): + + def test_bottleneck(self): + with self.assertRaises(AssertionError): + # Style must be in ['pytorch', 'caffe'] + BottleneckS( + 64, 64, radix=2, reduction_factor=4, style='tensorflow') + + # Test ResNeSt Bottleneck structure + block = BottleneckS( + 64, 256, radix=2, reduction_factor=4, stride=2, style='pytorch') + self.assertEqual(block.avd_layer.stride, 2) + self.assertEqual(block.conv2.channels, 64) + + # Test ResNeSt Bottleneck forward + block = BottleneckS(64, 64, radix=2, reduction_factor=4) + x = torch.randn(2, 64, 56, 56) + x_out = block(x) + self.assertEqual(x_out.shape, torch.Size([2, 64, 56, 56])) + + def test_resnest(self): + with self.assertRaises(KeyError): + # ResNeSt depth should be in [50, 101, 152, 200] + ResNeSt(depth=18) + + # Test ResNeSt with radix 2, reduction_factor 4 + model = ResNeSt( + depth=50, radix=2, reduction_factor=4, out_indices=(0, 1, 2, 3)) + model.init_weights() + model.train() + + imgs = torch.randn(2, 3, 224, 224) + feat = model(imgs) + self.assertEqual(len(feat), 4) + self.assertEqual(feat[0].shape, torch.Size([2, 256, 56, 56])) + self.assertEqual(feat[1].shape, torch.Size([2, 512, 28, 28])) + self.assertEqual(feat[2].shape, torch.Size([2, 1024, 14, 14])) + self.assertEqual(feat[3].shape, torch.Size([2, 2048, 7, 7])) diff --git a/tests/test_models/test_backbones/test_resnet.py b/tests/test_models/test_backbones/test_resnet.py index 4774f250c2..7a800c99b7 100644 --- a/tests/test_models/test_backbones/test_resnet.py +++ b/tests/test_models/test_backbones/test_resnet.py @@ -1,560 +1,560 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import torch -import torch.nn as nn -from mmcv.cnn import ConvModule -from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm - -from mmpose.models.backbones import ResNet, ResNetV1d -from mmpose.models.backbones.resnet import (BasicBlock, Bottleneck, ResLayer, - get_expansion) - - -class TestResnet(TestCase): - - @staticmethod - def is_block(modules): - """Check if is ResNet building block.""" - if isinstance(modules, (BasicBlock, Bottleneck)): - return True - return False - - @staticmethod - def all_zeros(modules): - """Check if the weight(and bias) is all zero.""" - weight_zero = torch.equal(modules.weight.data, - torch.zeros_like(modules.weight.data)) - if hasattr(modules, 'bias'): - bias_zero = torch.equal(modules.bias.data, - torch.zeros_like(modules.bias.data)) - else: - bias_zero = True - - return weight_zero and bias_zero - - @staticmethod - def check_norm_state(modules, train_state): - """Check if norm layer is in correct train state.""" - for mod in modules: - if isinstance(mod, _BatchNorm): - if mod.training != train_state: - return False - return True - - def test_get_expansion(self): - self.assertEqual(get_expansion(Bottleneck, 2), 2) - self.assertEqual(get_expansion(BasicBlock), 1) - self.assertEqual(get_expansion(Bottleneck), 4) - - class MyResBlock(nn.Module): - - expansion = 8 - - self.assertEqual(get_expansion(MyResBlock), 8) - - # expansion must be an integer or None - with self.assertRaises(TypeError): - get_expansion(Bottleneck, '0') - - # expansion is not specified and cannot be inferred - with self.assertRaises(TypeError): - - class SomeModule(nn.Module): - pass - - get_expansion(SomeModule) - - def test_basic_block(self): - # expansion must be 1 - with self.assertRaises(AssertionError): - BasicBlock(64, 64, expansion=2) - - # BasicBlock with stride 1, out_channels == in_channels - block = BasicBlock(64, 64) - self.assertEqual(block.in_channels, 64) - self.assertEqual(block.mid_channels, 64) - self.assertEqual(block.out_channels, 64) - self.assertEqual(block.conv1.in_channels, 64) - self.assertEqual(block.conv1.out_channels, 64) - self.assertEqual(block.conv1.kernel_size, (3, 3)) - self.assertEqual(block.conv1.stride, (1, 1)) - self.assertEqual(block.conv2.in_channels, 64) - self.assertEqual(block.conv2.out_channels, 64) - self.assertEqual(block.conv2.kernel_size, (3, 3)) - x = torch.randn(1, 64, 56, 56) - x_out = block(x) - self.assertEqual(x_out.shape, torch.Size([1, 64, 56, 56])) - - # BasicBlock with stride 1 and downsample - downsample = nn.Sequential( - nn.Conv2d(64, 128, kernel_size=1, bias=False), nn.BatchNorm2d(128)) - block = BasicBlock(64, 128, downsample=downsample) - self.assertEqual(block.in_channels, 64) - self.assertEqual(block.mid_channels, 128) - self.assertEqual(block.out_channels, 128) - self.assertEqual(block.conv1.in_channels, 64) - self.assertEqual(block.conv1.out_channels, 128) - self.assertEqual(block.conv1.kernel_size, (3, 3)) - self.assertEqual(block.conv1.stride, (1, 1)) - self.assertEqual(block.conv2.in_channels, 128) - self.assertEqual(block.conv2.out_channels, 128) - self.assertEqual(block.conv2.kernel_size, (3, 3)) - x = torch.randn(1, 64, 56, 56) - x_out = block(x) - self.assertEqual(x_out.shape, torch.Size([1, 128, 56, 56])) - - # BasicBlock with stride 2 and downsample - downsample = nn.Sequential( - nn.Conv2d(64, 128, kernel_size=1, stride=2, bias=False), - nn.BatchNorm2d(128)) - block = BasicBlock(64, 128, stride=2, downsample=downsample) - self.assertEqual(block.in_channels, 64) - self.assertEqual(block.mid_channels, 128) - self.assertEqual(block.out_channels, 128) - self.assertEqual(block.conv1.in_channels, 64) - self.assertEqual(block.conv1.out_channels, 128) - self.assertEqual(block.conv1.kernel_size, (3, 3)) - self.assertEqual(block.conv1.stride, (2, 2)) - self.assertEqual(block.conv2.in_channels, 128) - self.assertEqual(block.conv2.out_channels, 128) - self.assertEqual(block.conv2.kernel_size, (3, 3)) - x = torch.randn(1, 64, 56, 56) - x_out = block(x) - self.assertEqual(x_out.shape, torch.Size([1, 128, 28, 28])) - - # forward with checkpointing - block = BasicBlock(64, 64, with_cp=True) - self.assertTrue(block.with_cp) - x = torch.randn(1, 64, 56, 56, requires_grad=True) - x_out = block(x) - self.assertEqual(x_out.shape, torch.Size([1, 64, 56, 56])) - - def test_bottleneck(self): - # style must be in ['pytorch', 'caffe'] - with self.assertRaises(AssertionError): - Bottleneck(64, 64, style='tensorflow') - - # expansion must be divisible by out_channels - with self.assertRaises(AssertionError): - Bottleneck(64, 64, expansion=3) - - # Test Bottleneck style - block = Bottleneck(64, 64, stride=2, style='pytorch') - self.assertEqual(block.conv1.stride, (1, 1)) - self.assertEqual(block.conv2.stride, (2, 2)) - block = Bottleneck(64, 64, stride=2, style='caffe') - self.assertEqual(block.conv1.stride, (2, 2)) - self.assertEqual(block.conv2.stride, (1, 1)) - - # Bottleneck with stride 1 - block = Bottleneck(64, 64, style='pytorch') - self.assertEqual(block.in_channels, 64) - self.assertEqual(block.mid_channels, 16) - self.assertEqual(block.out_channels, 64) - self.assertEqual(block.conv1.in_channels, 64) - self.assertEqual(block.conv1.out_channels, 16) - self.assertEqual(block.conv1.kernel_size, (1, 1)) - self.assertEqual(block.conv2.in_channels, 16) - self.assertEqual(block.conv2.out_channels, 16) - self.assertEqual(block.conv2.kernel_size, (3, 3)) - self.assertEqual(block.conv3.in_channels, 16) - self.assertEqual(block.conv3.out_channels, 64) - self.assertEqual(block.conv3.kernel_size, (1, 1)) - x = torch.randn(1, 64, 56, 56) - x_out = block(x) - self.assertEqual(x_out.shape, (1, 64, 56, 56)) - - # Bottleneck with stride 1 and downsample - downsample = nn.Sequential( - nn.Conv2d(64, 128, kernel_size=1), nn.BatchNorm2d(128)) - block = Bottleneck(64, 128, style='pytorch', downsample=downsample) - self.assertEqual(block.in_channels, 64) - self.assertEqual(block.mid_channels, 32) - self.assertEqual(block.out_channels, 128) - self.assertEqual(block.conv1.in_channels, 64) - self.assertEqual(block.conv1.out_channels, 32) - self.assertEqual(block.conv1.kernel_size, (1, 1)) - self.assertEqual(block.conv2.in_channels, 32) - self.assertEqual(block.conv2.out_channels, 32) - self.assertEqual(block.conv2.kernel_size, (3, 3)) - self.assertEqual(block.conv3.in_channels, 32) - self.assertEqual(block.conv3.out_channels, 128) - self.assertEqual(block.conv3.kernel_size, (1, 1)) - x = torch.randn(1, 64, 56, 56) - x_out = block(x) - self.assertEqual(x_out.shape, (1, 128, 56, 56)) - - # Bottleneck with stride 2 and downsample - downsample = nn.Sequential( - nn.Conv2d(64, 128, kernel_size=1, stride=2), nn.BatchNorm2d(128)) - block = Bottleneck( - 64, 128, stride=2, style='pytorch', downsample=downsample) - x = torch.randn(1, 64, 56, 56) - x_out = block(x) - self.assertEqual(x_out.shape, (1, 128, 28, 28)) - - # Bottleneck with expansion 2 - block = Bottleneck(64, 64, style='pytorch', expansion=2) - self.assertEqual(block.in_channels, 64) - self.assertEqual(block.mid_channels, 32) - self.assertEqual(block.out_channels, 64) - self.assertEqual(block.conv1.in_channels, 64) - self.assertEqual(block.conv1.out_channels, 32) - self.assertEqual(block.conv1.kernel_size, (1, 1)) - self.assertEqual(block.conv2.in_channels, 32) - self.assertEqual(block.conv2.out_channels, 32) - self.assertEqual(block.conv2.kernel_size, (3, 3)) - self.assertEqual(block.conv3.in_channels, 32) - self.assertEqual(block.conv3.out_channels, 64) - self.assertEqual(block.conv3.kernel_size, (1, 1)) - x = torch.randn(1, 64, 56, 56) - x_out = block(x) - self.assertEqual(x_out.shape, (1, 64, 56, 56)) - - # Test Bottleneck with checkpointing - block = Bottleneck(64, 64, with_cp=True) - block.train() - self.assertTrue(block.with_cp) - x = torch.randn(1, 64, 56, 56, requires_grad=True) - x_out = block(x) - self.assertEqual(x_out.shape, torch.Size([1, 64, 56, 56])) - - def test_basicblock_reslayer(self): - # 3 BasicBlock w/o downsample - layer = ResLayer(BasicBlock, 3, 32, 32) - self.assertEqual(len(layer), 3) - for i in range(3): - self.assertEqual(layer[i].in_channels, 32) - self.assertEqual(layer[i].out_channels, 32) - self.assertIsNone(layer[i].downsample) - x = torch.randn(1, 32, 56, 56) - x_out = layer(x) - self.assertEqual(x_out.shape, (1, 32, 56, 56)) - - # 3 BasicBlock w/ stride 1 and downsample - layer = ResLayer(BasicBlock, 3, 32, 64) - self.assertEqual(len(layer), 3) - self.assertEqual(layer[0].in_channels, 32) - self.assertEqual(layer[0].out_channels, 64) - self.assertEqual( - layer[0].downsample is not None and len(layer[0].downsample), 2) - self.assertIsInstance(layer[0].downsample[0], nn.Conv2d) - self.assertEqual(layer[0].downsample[0].stride, (1, 1)) - for i in range(1, 3): - self.assertEqual(layer[i].in_channels, 64) - self.assertEqual(layer[i].out_channels, 64) - self.assertIsNone(layer[i].downsample) - x = torch.randn(1, 32, 56, 56) - x_out = layer(x) - self.assertEqual(x_out.shape, (1, 64, 56, 56)) - - # 3 BasicBlock w/ stride 2 and downsample - layer = ResLayer(BasicBlock, 3, 32, 64, stride=2) - self.assertEqual(len(layer), 3) - self.assertEqual(layer[0].in_channels, 32) - self.assertEqual(layer[0].out_channels, 64) - self.assertEqual(layer[0].stride, 2) - self.assertEqual( - layer[0].downsample is not None and len(layer[0].downsample), 2) - self.assertIsInstance(layer[0].downsample[0], nn.Conv2d) - self.assertEqual(layer[0].downsample[0].stride, (2, 2)) - for i in range(1, 3): - self.assertEqual(layer[i].in_channels, 64) - self.assertEqual(layer[i].out_channels, 64) - self.assertEqual(layer[i].stride, 1) - self.assertIsNone(layer[i].downsample) - x = torch.randn(1, 32, 56, 56) - x_out = layer(x) - self.assertEqual(x_out.shape, (1, 64, 28, 28)) - - # 3 BasicBlock w/ stride 2 and downsample with avg pool - layer = ResLayer(BasicBlock, 3, 32, 64, stride=2, avg_down=True) - self.assertEqual(len(layer), 3) - self.assertEqual(layer[0].in_channels, 32) - self.assertEqual(layer[0].out_channels, 64) - self.assertEqual(layer[0].stride, 2) - self.assertEqual( - layer[0].downsample is not None and len(layer[0].downsample), 3) - self.assertIsInstance(layer[0].downsample[0], nn.AvgPool2d) - self.assertEqual(layer[0].downsample[0].stride, 2) - for i in range(1, 3): - self.assertEqual(layer[i].in_channels, 64) - self.assertEqual(layer[i].out_channels, 64) - self.assertEqual(layer[i].stride, 1) - self.assertIsNone(layer[i].downsample) - x = torch.randn(1, 32, 56, 56) - x_out = layer(x) - self.assertEqual(x_out.shape, (1, 64, 28, 28)) - - def test_bottleneck_reslayer(self): - # 3 Bottleneck w/o downsample - layer = ResLayer(Bottleneck, 3, 32, 32) - self.assertEqual(len(layer), 3) - for i in range(3): - self.assertEqual(layer[i].in_channels, 32) - self.assertEqual(layer[i].out_channels, 32) - self.assertIsNone(layer[i].downsample) - x = torch.randn(1, 32, 56, 56) - x_out = layer(x) - self.assertEqual(x_out.shape, (1, 32, 56, 56)) - - # 3 Bottleneck w/ stride 1 and downsample - layer = ResLayer(Bottleneck, 3, 32, 64) - self.assertEqual(len(layer), 3) - self.assertEqual(layer[0].in_channels, 32) - self.assertEqual(layer[0].out_channels, 64) - self.assertEqual(layer[0].stride, 1) - self.assertEqual(layer[0].conv1.out_channels, 16) - self.assertEqual( - layer[0].downsample is not None and len(layer[0].downsample), 2) - self.assertIsInstance(layer[0].downsample[0], nn.Conv2d) - self.assertEqual(layer[0].downsample[0].stride, (1, 1)) - for i in range(1, 3): - self.assertEqual(layer[i].in_channels, 64) - self.assertEqual(layer[i].out_channels, 64) - self.assertEqual(layer[i].conv1.out_channels, 16) - self.assertEqual(layer[i].stride, 1) - self.assertIsNone(layer[i].downsample) - x = torch.randn(1, 32, 56, 56) - x_out = layer(x) - self.assertEqual(x_out.shape, (1, 64, 56, 56)) - - # 3 Bottleneck w/ stride 2 and downsample - layer = ResLayer(Bottleneck, 3, 32, 64, stride=2) - self.assertEqual(len(layer), 3) - self.assertEqual(layer[0].in_channels, 32) - self.assertEqual(layer[0].out_channels, 64) - self.assertEqual(layer[0].stride, 2) - self.assertEqual(layer[0].conv1.out_channels, 16) - self.assertEqual( - layer[0].downsample is not None and len(layer[0].downsample), 2) - self.assertIsInstance(layer[0].downsample[0], nn.Conv2d) - self.assertEqual(layer[0].downsample[0].stride, (2, 2)) - for i in range(1, 3): - self.assertEqual(layer[i].in_channels, 64) - self.assertEqual(layer[i].out_channels, 64) - self.assertEqual(layer[i].conv1.out_channels, 16) - self.assertEqual(layer[i].stride, 1) - self.assertIsNone(layer[i].downsample) - x = torch.randn(1, 32, 56, 56) - x_out = layer(x) - self.assertEqual(x_out.shape, (1, 64, 28, 28)) - - # 3 Bottleneck w/ stride 2 and downsample with avg pool - layer = ResLayer(Bottleneck, 3, 32, 64, stride=2, avg_down=True) - self.assertEqual(len(layer), 3) - self.assertEqual(layer[0].in_channels, 32) - self.assertEqual(layer[0].out_channels, 64) - self.assertEqual(layer[0].stride, 2) - self.assertEqual(layer[0].conv1.out_channels, 16) - self.assertEqual( - layer[0].downsample is not None and len(layer[0].downsample), 3) - self.assertIsInstance(layer[0].downsample[0], nn.AvgPool2d) - self.assertEqual(layer[0].downsample[0].stride, 2) - for i in range(1, 3): - self.assertEqual(layer[i].in_channels, 64) - self.assertEqual(layer[i].out_channels, 64) - self.assertEqual(layer[i].conv1.out_channels, 16) - self.assertEqual(layer[i].stride, 1) - self.assertIsNone(layer[i].downsample) - x = torch.randn(1, 32, 56, 56) - x_out = layer(x) - self.assertEqual(x_out.shape, (1, 64, 28, 28)) - - # 3 Bottleneck with custom expansion - layer = ResLayer(Bottleneck, 3, 32, 32, expansion=2) - self.assertEqual(len(layer), 3) - for i in range(3): - self.assertEqual(layer[i].in_channels, 32) - self.assertEqual(layer[i].out_channels, 32) - self.assertEqual(layer[i].stride, 1) - self.assertEqual(layer[i].conv1.out_channels, 16) - self.assertIsNone(layer[i].downsample) - x = torch.randn(1, 32, 56, 56) - x_out = layer(x) - self.assertEqual(x_out.shape, (1, 32, 56, 56)) - - def test_resnet(self): - """Test resnet backbone.""" - with self.assertRaises(KeyError): - # ResNet depth should be in [18, 34, 50, 101, 152] - ResNet(20) - - with self.assertRaises(AssertionError): - # In ResNet: 1 <= num_stages <= 4 - ResNet(50, num_stages=0) - - with self.assertRaises(AssertionError): - # In ResNet: 1 <= num_stages <= 4 - ResNet(50, num_stages=5) - - with self.assertRaises(AssertionError): - # len(strides) == len(dilations) == num_stages - ResNet(50, strides=(1, ), dilations=(1, 1), num_stages=3) - - with self.assertRaises(AssertionError): - # Style must be in ['pytorch', 'caffe'] - ResNet(50, style='tensorflow') - - # Test ResNet50 norm_eval=True - model = ResNet(50, norm_eval=True) - model.init_weights() - model.train() - self.assertTrue(self.check_norm_state(model.modules(), False)) - - # Test ResNet50 with torchvision pretrained weight - init_cfg = dict(type='Pretrained', checkpoint='torchvision://resnet50') - model = ResNet(depth=50, norm_eval=True, init_cfg=init_cfg) - model.train() - self.assertTrue(self.check_norm_state(model.modules(), False)) - - # Test ResNet50 with first stage frozen - frozen_stages = 1 - model = ResNet(50, frozen_stages=frozen_stages) - model.init_weights() - model.train() - self.assertFalse(model.norm1.training) - for layer in [model.conv1, model.norm1]: - for param in layer.parameters(): - self.assertFalse(param.requires_grad) - for i in range(1, frozen_stages + 1): - layer = getattr(model, f'layer{i}') - for mod in layer.modules(): - if isinstance(mod, _BatchNorm): - self.assertFalse(mod.training) - for param in layer.parameters(): - self.assertFalse(param.requires_grad) - - # Test ResNet18 forward - model = ResNet(18, out_indices=(0, 1, 2, 3)) - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertEqual(len(feat), 4) - self.assertEqual(feat[0].shape, (1, 64, 56, 56)) - self.assertEqual(feat[1].shape, (1, 128, 28, 28)) - self.assertEqual(feat[2].shape, (1, 256, 14, 14)) - self.assertEqual(feat[3].shape, (1, 512, 7, 7)) - - # Test ResNet50 with BatchNorm forward - model = ResNet(50, out_indices=(0, 1, 2, 3)) - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertEqual(len(feat), 4) - self.assertEqual(feat[0].shape, (1, 256, 56, 56)) - self.assertEqual(feat[1].shape, (1, 512, 28, 28)) - self.assertEqual(feat[2].shape, (1, 1024, 14, 14)) - self.assertEqual(feat[3].shape, (1, 2048, 7, 7)) - - # Test ResNet50 with layers 1, 2, 3 out forward - model = ResNet(50, out_indices=(0, 1, 2)) - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertEqual(len(feat), 3) - self.assertEqual(feat[0].shape, (1, 256, 56, 56)) - self.assertEqual(feat[1].shape, (1, 512, 28, 28)) - self.assertEqual(feat[2].shape, (1, 1024, 14, 14)) - - # Test ResNet50 with layers 3 (top feature maps) out forward - model = ResNet(50, out_indices=(3, )) - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertEqual(len(feat), 1) - self.assertEqual(feat[-1].shape, (1, 2048, 7, 7)) - - # Test ResNet50 with checkpoint forward - model = ResNet(50, out_indices=(0, 1, 2, 3), with_cp=True) - for m in model.modules(): - if self.is_block(m): - self.assertTrue(m.with_cp) - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertEqual(len(feat), 4) - self.assertEqual(feat[0].shape, (1, 256, 56, 56)) - self.assertEqual(feat[1].shape, (1, 512, 28, 28)) - self.assertEqual(feat[2].shape, (1, 1024, 14, 14)) - self.assertEqual(feat[3].shape, (1, 2048, 7, 7)) - - # zero initialization of residual blocks - model = ResNet(50, out_indices=(0, 1, 2, 3), zero_init_residual=True) - model.init_weights() - for m in model.modules(): - if isinstance(m, Bottleneck): - self.assertTrue(self.all_zeros(m.norm3)) - elif isinstance(m, BasicBlock): - self.assertTrue(self.all_zeros(m.norm2)) - - # non-zero initialization of residual blocks - model = ResNet(50, out_indices=(0, 1, 2, 3), zero_init_residual=False) - model.init_weights() - for m in model.modules(): - if isinstance(m, Bottleneck): - self.assertFalse(self.all_zeros(m.norm3)) - elif isinstance(m, BasicBlock): - self.assertFalse(self.all_zeros(m.norm2)) - - def test_resnet_v1d(self): - model = ResNetV1d(depth=50, out_indices=(0, 1, 2, 3)) - model.init_weights() - model.train() - - self.assertEqual(len(model.stem), 3) - for i in range(3): - self.assertIsInstance(model.stem[i], ConvModule) - - imgs = torch.randn(1, 3, 224, 224) - feat = model.stem(imgs) - self.assertEqual(feat.shape, (1, 64, 112, 112)) - feat = model(imgs) - self.assertEqual(len(feat), 4) - self.assertEqual(feat[0].shape, (1, 256, 56, 56)) - self.assertEqual(feat[1].shape, (1, 512, 28, 28)) - self.assertEqual(feat[2].shape, (1, 1024, 14, 14)) - self.assertEqual(feat[3].shape, (1, 2048, 7, 7)) - - # Test ResNet50V1d with first stage frozen - frozen_stages = 1 - model = ResNetV1d(depth=50, frozen_stages=frozen_stages) - self.assertEqual(len(model.stem), 3) - for i in range(3): - self.assertIsInstance(model.stem[i], ConvModule) - model.init_weights() - model.train() - self.assertTrue(self.check_norm_state(model.stem, False)) - for param in model.stem.parameters(): - self.assertFalse(param.requires_grad) - for i in range(1, frozen_stages + 1): - layer = getattr(model, f'layer{i}') - for mod in layer.modules(): - if isinstance(mod, _BatchNorm): - self.assertFalse(mod.training) - for param in layer.parameters(): - self.assertFalse(param.requires_grad) - - def test_resnet_half_channel(self): - model = ResNet(50, base_channels=32, out_indices=(0, 1, 2, 3)) - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertEqual(len(feat), 4) - self.assertEqual(feat[0].shape, (1, 128, 56, 56)) - self.assertEqual(feat[1].shape, (1, 256, 28, 28)) - self.assertEqual(feat[2].shape, (1, 512, 14, 14)) - self.assertEqual(feat[3].shape, (1, 1024, 7, 7)) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule +from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm + +from mmpose.models.backbones import ResNet, ResNetV1d +from mmpose.models.backbones.resnet import (BasicBlock, Bottleneck, ResLayer, + get_expansion) + + +class TestResnet(TestCase): + + @staticmethod + def is_block(modules): + """Check if is ResNet building block.""" + if isinstance(modules, (BasicBlock, Bottleneck)): + return True + return False + + @staticmethod + def all_zeros(modules): + """Check if the weight(and bias) is all zero.""" + weight_zero = torch.equal(modules.weight.data, + torch.zeros_like(modules.weight.data)) + if hasattr(modules, 'bias'): + bias_zero = torch.equal(modules.bias.data, + torch.zeros_like(modules.bias.data)) + else: + bias_zero = True + + return weight_zero and bias_zero + + @staticmethod + def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + def test_get_expansion(self): + self.assertEqual(get_expansion(Bottleneck, 2), 2) + self.assertEqual(get_expansion(BasicBlock), 1) + self.assertEqual(get_expansion(Bottleneck), 4) + + class MyResBlock(nn.Module): + + expansion = 8 + + self.assertEqual(get_expansion(MyResBlock), 8) + + # expansion must be an integer or None + with self.assertRaises(TypeError): + get_expansion(Bottleneck, '0') + + # expansion is not specified and cannot be inferred + with self.assertRaises(TypeError): + + class SomeModule(nn.Module): + pass + + get_expansion(SomeModule) + + def test_basic_block(self): + # expansion must be 1 + with self.assertRaises(AssertionError): + BasicBlock(64, 64, expansion=2) + + # BasicBlock with stride 1, out_channels == in_channels + block = BasicBlock(64, 64) + self.assertEqual(block.in_channels, 64) + self.assertEqual(block.mid_channels, 64) + self.assertEqual(block.out_channels, 64) + self.assertEqual(block.conv1.in_channels, 64) + self.assertEqual(block.conv1.out_channels, 64) + self.assertEqual(block.conv1.kernel_size, (3, 3)) + self.assertEqual(block.conv1.stride, (1, 1)) + self.assertEqual(block.conv2.in_channels, 64) + self.assertEqual(block.conv2.out_channels, 64) + self.assertEqual(block.conv2.kernel_size, (3, 3)) + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + self.assertEqual(x_out.shape, torch.Size([1, 64, 56, 56])) + + # BasicBlock with stride 1 and downsample + downsample = nn.Sequential( + nn.Conv2d(64, 128, kernel_size=1, bias=False), nn.BatchNorm2d(128)) + block = BasicBlock(64, 128, downsample=downsample) + self.assertEqual(block.in_channels, 64) + self.assertEqual(block.mid_channels, 128) + self.assertEqual(block.out_channels, 128) + self.assertEqual(block.conv1.in_channels, 64) + self.assertEqual(block.conv1.out_channels, 128) + self.assertEqual(block.conv1.kernel_size, (3, 3)) + self.assertEqual(block.conv1.stride, (1, 1)) + self.assertEqual(block.conv2.in_channels, 128) + self.assertEqual(block.conv2.out_channels, 128) + self.assertEqual(block.conv2.kernel_size, (3, 3)) + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + self.assertEqual(x_out.shape, torch.Size([1, 128, 56, 56])) + + # BasicBlock with stride 2 and downsample + downsample = nn.Sequential( + nn.Conv2d(64, 128, kernel_size=1, stride=2, bias=False), + nn.BatchNorm2d(128)) + block = BasicBlock(64, 128, stride=2, downsample=downsample) + self.assertEqual(block.in_channels, 64) + self.assertEqual(block.mid_channels, 128) + self.assertEqual(block.out_channels, 128) + self.assertEqual(block.conv1.in_channels, 64) + self.assertEqual(block.conv1.out_channels, 128) + self.assertEqual(block.conv1.kernel_size, (3, 3)) + self.assertEqual(block.conv1.stride, (2, 2)) + self.assertEqual(block.conv2.in_channels, 128) + self.assertEqual(block.conv2.out_channels, 128) + self.assertEqual(block.conv2.kernel_size, (3, 3)) + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + self.assertEqual(x_out.shape, torch.Size([1, 128, 28, 28])) + + # forward with checkpointing + block = BasicBlock(64, 64, with_cp=True) + self.assertTrue(block.with_cp) + x = torch.randn(1, 64, 56, 56, requires_grad=True) + x_out = block(x) + self.assertEqual(x_out.shape, torch.Size([1, 64, 56, 56])) + + def test_bottleneck(self): + # style must be in ['pytorch', 'caffe'] + with self.assertRaises(AssertionError): + Bottleneck(64, 64, style='tensorflow') + + # expansion must be divisible by out_channels + with self.assertRaises(AssertionError): + Bottleneck(64, 64, expansion=3) + + # Test Bottleneck style + block = Bottleneck(64, 64, stride=2, style='pytorch') + self.assertEqual(block.conv1.stride, (1, 1)) + self.assertEqual(block.conv2.stride, (2, 2)) + block = Bottleneck(64, 64, stride=2, style='caffe') + self.assertEqual(block.conv1.stride, (2, 2)) + self.assertEqual(block.conv2.stride, (1, 1)) + + # Bottleneck with stride 1 + block = Bottleneck(64, 64, style='pytorch') + self.assertEqual(block.in_channels, 64) + self.assertEqual(block.mid_channels, 16) + self.assertEqual(block.out_channels, 64) + self.assertEqual(block.conv1.in_channels, 64) + self.assertEqual(block.conv1.out_channels, 16) + self.assertEqual(block.conv1.kernel_size, (1, 1)) + self.assertEqual(block.conv2.in_channels, 16) + self.assertEqual(block.conv2.out_channels, 16) + self.assertEqual(block.conv2.kernel_size, (3, 3)) + self.assertEqual(block.conv3.in_channels, 16) + self.assertEqual(block.conv3.out_channels, 64) + self.assertEqual(block.conv3.kernel_size, (1, 1)) + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + self.assertEqual(x_out.shape, (1, 64, 56, 56)) + + # Bottleneck with stride 1 and downsample + downsample = nn.Sequential( + nn.Conv2d(64, 128, kernel_size=1), nn.BatchNorm2d(128)) + block = Bottleneck(64, 128, style='pytorch', downsample=downsample) + self.assertEqual(block.in_channels, 64) + self.assertEqual(block.mid_channels, 32) + self.assertEqual(block.out_channels, 128) + self.assertEqual(block.conv1.in_channels, 64) + self.assertEqual(block.conv1.out_channels, 32) + self.assertEqual(block.conv1.kernel_size, (1, 1)) + self.assertEqual(block.conv2.in_channels, 32) + self.assertEqual(block.conv2.out_channels, 32) + self.assertEqual(block.conv2.kernel_size, (3, 3)) + self.assertEqual(block.conv3.in_channels, 32) + self.assertEqual(block.conv3.out_channels, 128) + self.assertEqual(block.conv3.kernel_size, (1, 1)) + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + self.assertEqual(x_out.shape, (1, 128, 56, 56)) + + # Bottleneck with stride 2 and downsample + downsample = nn.Sequential( + nn.Conv2d(64, 128, kernel_size=1, stride=2), nn.BatchNorm2d(128)) + block = Bottleneck( + 64, 128, stride=2, style='pytorch', downsample=downsample) + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + self.assertEqual(x_out.shape, (1, 128, 28, 28)) + + # Bottleneck with expansion 2 + block = Bottleneck(64, 64, style='pytorch', expansion=2) + self.assertEqual(block.in_channels, 64) + self.assertEqual(block.mid_channels, 32) + self.assertEqual(block.out_channels, 64) + self.assertEqual(block.conv1.in_channels, 64) + self.assertEqual(block.conv1.out_channels, 32) + self.assertEqual(block.conv1.kernel_size, (1, 1)) + self.assertEqual(block.conv2.in_channels, 32) + self.assertEqual(block.conv2.out_channels, 32) + self.assertEqual(block.conv2.kernel_size, (3, 3)) + self.assertEqual(block.conv3.in_channels, 32) + self.assertEqual(block.conv3.out_channels, 64) + self.assertEqual(block.conv3.kernel_size, (1, 1)) + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + self.assertEqual(x_out.shape, (1, 64, 56, 56)) + + # Test Bottleneck with checkpointing + block = Bottleneck(64, 64, with_cp=True) + block.train() + self.assertTrue(block.with_cp) + x = torch.randn(1, 64, 56, 56, requires_grad=True) + x_out = block(x) + self.assertEqual(x_out.shape, torch.Size([1, 64, 56, 56])) + + def test_basicblock_reslayer(self): + # 3 BasicBlock w/o downsample + layer = ResLayer(BasicBlock, 3, 32, 32) + self.assertEqual(len(layer), 3) + for i in range(3): + self.assertEqual(layer[i].in_channels, 32) + self.assertEqual(layer[i].out_channels, 32) + self.assertIsNone(layer[i].downsample) + x = torch.randn(1, 32, 56, 56) + x_out = layer(x) + self.assertEqual(x_out.shape, (1, 32, 56, 56)) + + # 3 BasicBlock w/ stride 1 and downsample + layer = ResLayer(BasicBlock, 3, 32, 64) + self.assertEqual(len(layer), 3) + self.assertEqual(layer[0].in_channels, 32) + self.assertEqual(layer[0].out_channels, 64) + self.assertEqual( + layer[0].downsample is not None and len(layer[0].downsample), 2) + self.assertIsInstance(layer[0].downsample[0], nn.Conv2d) + self.assertEqual(layer[0].downsample[0].stride, (1, 1)) + for i in range(1, 3): + self.assertEqual(layer[i].in_channels, 64) + self.assertEqual(layer[i].out_channels, 64) + self.assertIsNone(layer[i].downsample) + x = torch.randn(1, 32, 56, 56) + x_out = layer(x) + self.assertEqual(x_out.shape, (1, 64, 56, 56)) + + # 3 BasicBlock w/ stride 2 and downsample + layer = ResLayer(BasicBlock, 3, 32, 64, stride=2) + self.assertEqual(len(layer), 3) + self.assertEqual(layer[0].in_channels, 32) + self.assertEqual(layer[0].out_channels, 64) + self.assertEqual(layer[0].stride, 2) + self.assertEqual( + layer[0].downsample is not None and len(layer[0].downsample), 2) + self.assertIsInstance(layer[0].downsample[0], nn.Conv2d) + self.assertEqual(layer[0].downsample[0].stride, (2, 2)) + for i in range(1, 3): + self.assertEqual(layer[i].in_channels, 64) + self.assertEqual(layer[i].out_channels, 64) + self.assertEqual(layer[i].stride, 1) + self.assertIsNone(layer[i].downsample) + x = torch.randn(1, 32, 56, 56) + x_out = layer(x) + self.assertEqual(x_out.shape, (1, 64, 28, 28)) + + # 3 BasicBlock w/ stride 2 and downsample with avg pool + layer = ResLayer(BasicBlock, 3, 32, 64, stride=2, avg_down=True) + self.assertEqual(len(layer), 3) + self.assertEqual(layer[0].in_channels, 32) + self.assertEqual(layer[0].out_channels, 64) + self.assertEqual(layer[0].stride, 2) + self.assertEqual( + layer[0].downsample is not None and len(layer[0].downsample), 3) + self.assertIsInstance(layer[0].downsample[0], nn.AvgPool2d) + self.assertEqual(layer[0].downsample[0].stride, 2) + for i in range(1, 3): + self.assertEqual(layer[i].in_channels, 64) + self.assertEqual(layer[i].out_channels, 64) + self.assertEqual(layer[i].stride, 1) + self.assertIsNone(layer[i].downsample) + x = torch.randn(1, 32, 56, 56) + x_out = layer(x) + self.assertEqual(x_out.shape, (1, 64, 28, 28)) + + def test_bottleneck_reslayer(self): + # 3 Bottleneck w/o downsample + layer = ResLayer(Bottleneck, 3, 32, 32) + self.assertEqual(len(layer), 3) + for i in range(3): + self.assertEqual(layer[i].in_channels, 32) + self.assertEqual(layer[i].out_channels, 32) + self.assertIsNone(layer[i].downsample) + x = torch.randn(1, 32, 56, 56) + x_out = layer(x) + self.assertEqual(x_out.shape, (1, 32, 56, 56)) + + # 3 Bottleneck w/ stride 1 and downsample + layer = ResLayer(Bottleneck, 3, 32, 64) + self.assertEqual(len(layer), 3) + self.assertEqual(layer[0].in_channels, 32) + self.assertEqual(layer[0].out_channels, 64) + self.assertEqual(layer[0].stride, 1) + self.assertEqual(layer[0].conv1.out_channels, 16) + self.assertEqual( + layer[0].downsample is not None and len(layer[0].downsample), 2) + self.assertIsInstance(layer[0].downsample[0], nn.Conv2d) + self.assertEqual(layer[0].downsample[0].stride, (1, 1)) + for i in range(1, 3): + self.assertEqual(layer[i].in_channels, 64) + self.assertEqual(layer[i].out_channels, 64) + self.assertEqual(layer[i].conv1.out_channels, 16) + self.assertEqual(layer[i].stride, 1) + self.assertIsNone(layer[i].downsample) + x = torch.randn(1, 32, 56, 56) + x_out = layer(x) + self.assertEqual(x_out.shape, (1, 64, 56, 56)) + + # 3 Bottleneck w/ stride 2 and downsample + layer = ResLayer(Bottleneck, 3, 32, 64, stride=2) + self.assertEqual(len(layer), 3) + self.assertEqual(layer[0].in_channels, 32) + self.assertEqual(layer[0].out_channels, 64) + self.assertEqual(layer[0].stride, 2) + self.assertEqual(layer[0].conv1.out_channels, 16) + self.assertEqual( + layer[0].downsample is not None and len(layer[0].downsample), 2) + self.assertIsInstance(layer[0].downsample[0], nn.Conv2d) + self.assertEqual(layer[0].downsample[0].stride, (2, 2)) + for i in range(1, 3): + self.assertEqual(layer[i].in_channels, 64) + self.assertEqual(layer[i].out_channels, 64) + self.assertEqual(layer[i].conv1.out_channels, 16) + self.assertEqual(layer[i].stride, 1) + self.assertIsNone(layer[i].downsample) + x = torch.randn(1, 32, 56, 56) + x_out = layer(x) + self.assertEqual(x_out.shape, (1, 64, 28, 28)) + + # 3 Bottleneck w/ stride 2 and downsample with avg pool + layer = ResLayer(Bottleneck, 3, 32, 64, stride=2, avg_down=True) + self.assertEqual(len(layer), 3) + self.assertEqual(layer[0].in_channels, 32) + self.assertEqual(layer[0].out_channels, 64) + self.assertEqual(layer[0].stride, 2) + self.assertEqual(layer[0].conv1.out_channels, 16) + self.assertEqual( + layer[0].downsample is not None and len(layer[0].downsample), 3) + self.assertIsInstance(layer[0].downsample[0], nn.AvgPool2d) + self.assertEqual(layer[0].downsample[0].stride, 2) + for i in range(1, 3): + self.assertEqual(layer[i].in_channels, 64) + self.assertEqual(layer[i].out_channels, 64) + self.assertEqual(layer[i].conv1.out_channels, 16) + self.assertEqual(layer[i].stride, 1) + self.assertIsNone(layer[i].downsample) + x = torch.randn(1, 32, 56, 56) + x_out = layer(x) + self.assertEqual(x_out.shape, (1, 64, 28, 28)) + + # 3 Bottleneck with custom expansion + layer = ResLayer(Bottleneck, 3, 32, 32, expansion=2) + self.assertEqual(len(layer), 3) + for i in range(3): + self.assertEqual(layer[i].in_channels, 32) + self.assertEqual(layer[i].out_channels, 32) + self.assertEqual(layer[i].stride, 1) + self.assertEqual(layer[i].conv1.out_channels, 16) + self.assertIsNone(layer[i].downsample) + x = torch.randn(1, 32, 56, 56) + x_out = layer(x) + self.assertEqual(x_out.shape, (1, 32, 56, 56)) + + def test_resnet(self): + """Test resnet backbone.""" + with self.assertRaises(KeyError): + # ResNet depth should be in [18, 34, 50, 101, 152] + ResNet(20) + + with self.assertRaises(AssertionError): + # In ResNet: 1 <= num_stages <= 4 + ResNet(50, num_stages=0) + + with self.assertRaises(AssertionError): + # In ResNet: 1 <= num_stages <= 4 + ResNet(50, num_stages=5) + + with self.assertRaises(AssertionError): + # len(strides) == len(dilations) == num_stages + ResNet(50, strides=(1, ), dilations=(1, 1), num_stages=3) + + with self.assertRaises(AssertionError): + # Style must be in ['pytorch', 'caffe'] + ResNet(50, style='tensorflow') + + # Test ResNet50 norm_eval=True + model = ResNet(50, norm_eval=True) + model.init_weights() + model.train() + self.assertTrue(self.check_norm_state(model.modules(), False)) + + # Test ResNet50 with torchvision pretrained weight + init_cfg = dict(type='Pretrained', checkpoint='torchvision://resnet50') + model = ResNet(depth=50, norm_eval=True, init_cfg=init_cfg) + model.train() + self.assertTrue(self.check_norm_state(model.modules(), False)) + + # Test ResNet50 with first stage frozen + frozen_stages = 1 + model = ResNet(50, frozen_stages=frozen_stages) + model.init_weights() + model.train() + self.assertFalse(model.norm1.training) + for layer in [model.conv1, model.norm1]: + for param in layer.parameters(): + self.assertFalse(param.requires_grad) + for i in range(1, frozen_stages + 1): + layer = getattr(model, f'layer{i}') + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + self.assertFalse(mod.training) + for param in layer.parameters(): + self.assertFalse(param.requires_grad) + + # Test ResNet18 forward + model = ResNet(18, out_indices=(0, 1, 2, 3)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertEqual(len(feat), 4) + self.assertEqual(feat[0].shape, (1, 64, 56, 56)) + self.assertEqual(feat[1].shape, (1, 128, 28, 28)) + self.assertEqual(feat[2].shape, (1, 256, 14, 14)) + self.assertEqual(feat[3].shape, (1, 512, 7, 7)) + + # Test ResNet50 with BatchNorm forward + model = ResNet(50, out_indices=(0, 1, 2, 3)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertEqual(len(feat), 4) + self.assertEqual(feat[0].shape, (1, 256, 56, 56)) + self.assertEqual(feat[1].shape, (1, 512, 28, 28)) + self.assertEqual(feat[2].shape, (1, 1024, 14, 14)) + self.assertEqual(feat[3].shape, (1, 2048, 7, 7)) + + # Test ResNet50 with layers 1, 2, 3 out forward + model = ResNet(50, out_indices=(0, 1, 2)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertEqual(len(feat), 3) + self.assertEqual(feat[0].shape, (1, 256, 56, 56)) + self.assertEqual(feat[1].shape, (1, 512, 28, 28)) + self.assertEqual(feat[2].shape, (1, 1024, 14, 14)) + + # Test ResNet50 with layers 3 (top feature maps) out forward + model = ResNet(50, out_indices=(3, )) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertEqual(len(feat), 1) + self.assertEqual(feat[-1].shape, (1, 2048, 7, 7)) + + # Test ResNet50 with checkpoint forward + model = ResNet(50, out_indices=(0, 1, 2, 3), with_cp=True) + for m in model.modules(): + if self.is_block(m): + self.assertTrue(m.with_cp) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertEqual(len(feat), 4) + self.assertEqual(feat[0].shape, (1, 256, 56, 56)) + self.assertEqual(feat[1].shape, (1, 512, 28, 28)) + self.assertEqual(feat[2].shape, (1, 1024, 14, 14)) + self.assertEqual(feat[3].shape, (1, 2048, 7, 7)) + + # zero initialization of residual blocks + model = ResNet(50, out_indices=(0, 1, 2, 3), zero_init_residual=True) + model.init_weights() + for m in model.modules(): + if isinstance(m, Bottleneck): + self.assertTrue(self.all_zeros(m.norm3)) + elif isinstance(m, BasicBlock): + self.assertTrue(self.all_zeros(m.norm2)) + + # non-zero initialization of residual blocks + model = ResNet(50, out_indices=(0, 1, 2, 3), zero_init_residual=False) + model.init_weights() + for m in model.modules(): + if isinstance(m, Bottleneck): + self.assertFalse(self.all_zeros(m.norm3)) + elif isinstance(m, BasicBlock): + self.assertFalse(self.all_zeros(m.norm2)) + + def test_resnet_v1d(self): + model = ResNetV1d(depth=50, out_indices=(0, 1, 2, 3)) + model.init_weights() + model.train() + + self.assertEqual(len(model.stem), 3) + for i in range(3): + self.assertIsInstance(model.stem[i], ConvModule) + + imgs = torch.randn(1, 3, 224, 224) + feat = model.stem(imgs) + self.assertEqual(feat.shape, (1, 64, 112, 112)) + feat = model(imgs) + self.assertEqual(len(feat), 4) + self.assertEqual(feat[0].shape, (1, 256, 56, 56)) + self.assertEqual(feat[1].shape, (1, 512, 28, 28)) + self.assertEqual(feat[2].shape, (1, 1024, 14, 14)) + self.assertEqual(feat[3].shape, (1, 2048, 7, 7)) + + # Test ResNet50V1d with first stage frozen + frozen_stages = 1 + model = ResNetV1d(depth=50, frozen_stages=frozen_stages) + self.assertEqual(len(model.stem), 3) + for i in range(3): + self.assertIsInstance(model.stem[i], ConvModule) + model.init_weights() + model.train() + self.assertTrue(self.check_norm_state(model.stem, False)) + for param in model.stem.parameters(): + self.assertFalse(param.requires_grad) + for i in range(1, frozen_stages + 1): + layer = getattr(model, f'layer{i}') + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + self.assertFalse(mod.training) + for param in layer.parameters(): + self.assertFalse(param.requires_grad) + + def test_resnet_half_channel(self): + model = ResNet(50, base_channels=32, out_indices=(0, 1, 2, 3)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertEqual(len(feat), 4) + self.assertEqual(feat[0].shape, (1, 128, 56, 56)) + self.assertEqual(feat[1].shape, (1, 256, 28, 28)) + self.assertEqual(feat[2].shape, (1, 512, 14, 14)) + self.assertEqual(feat[3].shape, (1, 1024, 7, 7)) diff --git a/tests/test_models/test_backbones/test_resnext.py b/tests/test_models/test_backbones/test_resnext.py index ddfdcf4f7b..72647b85c3 100644 --- a/tests/test_models/test_backbones/test_resnext.py +++ b/tests/test_models/test_backbones/test_resnext.py @@ -1,66 +1,66 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import torch - -from mmpose.models.backbones import ResNeXt -from mmpose.models.backbones.resnext import Bottleneck as BottleneckX - - -class TestResnext(TestCase): - - def test_bottleneck(self): - with self.assertRaises(AssertionError): - # Style must be in ['pytorch', 'caffe'] - BottleneckX( - 64, 64, groups=32, width_per_group=4, style='tensorflow') - - # Test ResNeXt Bottleneck structure - block = BottleneckX( - 64, 256, groups=32, width_per_group=4, stride=2, style='pytorch') - self.assertEqual(block.conv2.stride, (2, 2)) - self.assertEqual(block.conv2.groups, 32) - self.assertEqual(block.conv2.out_channels, 128) - - # Test ResNeXt Bottleneck forward - block = BottleneckX( - 64, 64, base_channels=16, groups=32, width_per_group=4) - x = torch.randn(1, 64, 56, 56) - x_out = block(x) - self.assertEqual(x_out.shape, torch.Size([1, 64, 56, 56])) - - def test_resnext(self): - with self.assertRaises(KeyError): - # ResNeXt depth should be in [50, 101, 152] - ResNeXt(depth=18) - - # Test ResNeXt with group 32, width_per_group 4 - model = ResNeXt( - depth=50, groups=32, width_per_group=4, out_indices=(0, 1, 2, 3)) - for m in model.modules(): - if isinstance(m, BottleneckX): - self.assertEqual(m.conv2.groups, 32) - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertEqual(len(feat), 4) - self.assertEqual(feat[0].shape, torch.Size([1, 256, 56, 56])) - self.assertEqual(feat[1].shape, torch.Size([1, 512, 28, 28])) - self.assertEqual(feat[2].shape, torch.Size([1, 1024, 14, 14])) - self.assertEqual(feat[3].shape, torch.Size([1, 2048, 7, 7])) - - # Test ResNeXt with layers 3 out forward - model = ResNeXt( - depth=50, groups=32, width_per_group=4, out_indices=(3, )) - for m in model.modules(): - if isinstance(m, BottleneckX): - self.assertEqual(m.conv2.groups, 32) - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertEqual(len(feat), 1) - self.assertEqual(feat[-1].shape, torch.Size([1, 2048, 7, 7])) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch + +from mmpose.models.backbones import ResNeXt +from mmpose.models.backbones.resnext import Bottleneck as BottleneckX + + +class TestResnext(TestCase): + + def test_bottleneck(self): + with self.assertRaises(AssertionError): + # Style must be in ['pytorch', 'caffe'] + BottleneckX( + 64, 64, groups=32, width_per_group=4, style='tensorflow') + + # Test ResNeXt Bottleneck structure + block = BottleneckX( + 64, 256, groups=32, width_per_group=4, stride=2, style='pytorch') + self.assertEqual(block.conv2.stride, (2, 2)) + self.assertEqual(block.conv2.groups, 32) + self.assertEqual(block.conv2.out_channels, 128) + + # Test ResNeXt Bottleneck forward + block = BottleneckX( + 64, 64, base_channels=16, groups=32, width_per_group=4) + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + self.assertEqual(x_out.shape, torch.Size([1, 64, 56, 56])) + + def test_resnext(self): + with self.assertRaises(KeyError): + # ResNeXt depth should be in [50, 101, 152] + ResNeXt(depth=18) + + # Test ResNeXt with group 32, width_per_group 4 + model = ResNeXt( + depth=50, groups=32, width_per_group=4, out_indices=(0, 1, 2, 3)) + for m in model.modules(): + if isinstance(m, BottleneckX): + self.assertEqual(m.conv2.groups, 32) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertEqual(len(feat), 4) + self.assertEqual(feat[0].shape, torch.Size([1, 256, 56, 56])) + self.assertEqual(feat[1].shape, torch.Size([1, 512, 28, 28])) + self.assertEqual(feat[2].shape, torch.Size([1, 1024, 14, 14])) + self.assertEqual(feat[3].shape, torch.Size([1, 2048, 7, 7])) + + # Test ResNeXt with layers 3 out forward + model = ResNeXt( + depth=50, groups=32, width_per_group=4, out_indices=(3, )) + for m in model.modules(): + if isinstance(m, BottleneckX): + self.assertEqual(m.conv2.groups, 32) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertEqual(len(feat), 1) + self.assertEqual(feat[-1].shape, torch.Size([1, 2048, 7, 7])) diff --git a/tests/test_models/test_backbones/test_rsn.py b/tests/test_models/test_backbones/test_rsn.py index 156c454bf5..132f1af354 100644 --- a/tests/test_models/test_backbones/test_rsn.py +++ b/tests/test_models/test_backbones/test_rsn.py @@ -1,38 +1,38 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import torch - -from mmpose.models.backbones import RSN - - -class TestRSN(TestCase): - - def test_rsn_backbone(self): - with self.assertRaises(AssertionError): - # RSN's num_stages should larger than 0 - RSN(num_stages=0) - with self.assertRaises(AssertionError): - # RSN's num_steps should larger than 1 - RSN(num_steps=1) - with self.assertRaises(AssertionError): - # RSN's num_units should larger than 1 - RSN(num_units=1) - with self.assertRaises(AssertionError): - # len(num_blocks) should equal num_units - RSN(num_units=2, num_blocks=[2, 2, 2]) - - # Test RSN's outputs - model = RSN(num_stages=2, num_units=2, num_blocks=[2, 2]) - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 511, 511) - feat = model(imgs) - self.assertEqual(len(feat), 2) - self.assertEqual(len(feat[0]), 2) - self.assertEqual(len(feat[1]), 2) - self.assertEqual(feat[0][0].shape, torch.Size([1, 256, 64, 64])) - self.assertEqual(feat[0][1].shape, torch.Size([1, 256, 128, 128])) - self.assertEqual(feat[1][0].shape, torch.Size([1, 256, 64, 64])) - self.assertEqual(feat[1][1].shape, torch.Size([1, 256, 128, 128])) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch + +from mmpose.models.backbones import RSN + + +class TestRSN(TestCase): + + def test_rsn_backbone(self): + with self.assertRaises(AssertionError): + # RSN's num_stages should larger than 0 + RSN(num_stages=0) + with self.assertRaises(AssertionError): + # RSN's num_steps should larger than 1 + RSN(num_steps=1) + with self.assertRaises(AssertionError): + # RSN's num_units should larger than 1 + RSN(num_units=1) + with self.assertRaises(AssertionError): + # len(num_blocks) should equal num_units + RSN(num_units=2, num_blocks=[2, 2, 2]) + + # Test RSN's outputs + model = RSN(num_stages=2, num_units=2, num_blocks=[2, 2]) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 511, 511) + feat = model(imgs) + self.assertEqual(len(feat), 2) + self.assertEqual(len(feat[0]), 2) + self.assertEqual(len(feat[1]), 2) + self.assertEqual(feat[0][0].shape, torch.Size([1, 256, 64, 64])) + self.assertEqual(feat[0][1].shape, torch.Size([1, 256, 128, 128])) + self.assertEqual(feat[1][0].shape, torch.Size([1, 256, 64, 64])) + self.assertEqual(feat[1][1].shape, torch.Size([1, 256, 128, 128])) diff --git a/tests/test_models/test_backbones/test_scnet.py b/tests/test_models/test_backbones/test_scnet.py index 2a040f3b10..1f258af59f 100644 --- a/tests/test_models/test_backbones/test_scnet.py +++ b/tests/test_models/test_backbones/test_scnet.py @@ -1,165 +1,165 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import torch -from torch.nn.modules.batchnorm import _BatchNorm - -from mmpose.models.backbones import SCNet -from mmpose.models.backbones.scnet import SCBottleneck, SCConv - - -class TestSCnet(TestCase): - - @staticmethod - def is_block(modules): - """Check if is SCNet building block.""" - if isinstance(modules, (SCBottleneck, )): - return True - return False - - @staticmethod - def is_norm(modules): - """Check if is one of the norms.""" - if isinstance(modules, (_BatchNorm, )): - return True - return False - - @staticmethod - def all_zeros(modules): - """Check if the weight(and bias) is all zero.""" - weight_zero = torch.equal(modules.weight.data, - torch.zeros_like(modules.weight.data)) - if hasattr(modules, 'bias'): - bias_zero = torch.equal(modules.bias.data, - torch.zeros_like(modules.bias.data)) - else: - bias_zero = True - - return weight_zero and bias_zero - - @staticmethod - def check_norm_state(modules, train_state): - """Check if norm layer is in correct train state.""" - for mod in modules: - if isinstance(mod, _BatchNorm): - if mod.training != train_state: - return False - return True - - def test_scnet_scconv(self): - # Test scconv forward - layer = SCConv(64, 64, 1, 4) - x = torch.randn(1, 64, 56, 56) - x_out = layer(x) - self.assertEqual(x_out.shape, torch.Size([1, 64, 56, 56])) - - def test_scnet_bottleneck(self): - # Test Bottleneck forward - block = SCBottleneck(64, 64) - x = torch.randn(1, 64, 56, 56) - x_out = block(x) - self.assertEqual(x_out.shape, torch.Size([1, 64, 56, 56])) - - def test_scnet_backbone(self): - """Test scnet backbone.""" - with self.assertRaises(KeyError): - # SCNet depth should be in [50, 101] - SCNet(20) - - with self.assertRaises(TypeError): - # pretrained must be a string path - model = SCNet(50) - model.init_weights(pretrained=0) - - # Test SCNet norm_eval=True - model = SCNet(50, norm_eval=True) - model.init_weights() - model.train() - self.assertTrue(self.check_norm_state(model.modules(), False)) - - # Test SCNet50 with first stage frozen - frozen_stages = 1 - model = SCNet(50, frozen_stages=frozen_stages) - model.init_weights() - model.train() - self.assertFalse(model.norm1.training) - for layer in [model.conv1, model.norm1]: - for param in layer.parameters(): - self.assertFalse(param.requires_grad) - for i in range(1, frozen_stages + 1): - layer = getattr(model, f'layer{i}') - for mod in layer.modules(): - if isinstance(mod, _BatchNorm): - self.assertFalse(mod.training) - for param in layer.parameters(): - self.assertFalse(param.requires_grad) - - # Test SCNet with BatchNorm forward - model = SCNet(50, out_indices=(0, 1, 2, 3)) - for m in model.modules(): - if self.is_norm(m): - self.assertIsInstance(m, _BatchNorm) - model.init_weights() - model.train() - - imgs = torch.randn(2, 3, 224, 224) - feat = model(imgs) - self.assertEqual(len(feat), 4) - self.assertEqual(feat[0].shape, torch.Size([2, 256, 56, 56])) - self.assertEqual(feat[1].shape, torch.Size([2, 512, 28, 28])) - self.assertEqual(feat[2].shape, torch.Size([2, 1024, 14, 14])) - self.assertEqual(feat[3].shape, torch.Size([2, 2048, 7, 7])) - - # Test SCNet with layers 1, 2, 3 out forward - model = SCNet(50, out_indices=(0, 1, 2)) - model.init_weights() - model.train() - - imgs = torch.randn(2, 3, 224, 224) - feat = model(imgs) - self.assertEqual(len(feat), 3) - self.assertEqual(feat[0].shape, torch.Size([2, 256, 56, 56])) - self.assertEqual(feat[1].shape, torch.Size([2, 512, 28, 28])) - self.assertEqual(feat[2].shape, torch.Size([2, 1024, 14, 14])) - - # Test SEResNet50 with layers 3 (top feature maps) out forward - model = SCNet(50, out_indices=(3, )) - model.init_weights() - model.train() - - imgs = torch.randn(2, 3, 224, 224) - feat = model(imgs) - self.assertIsInstance(feat, tuple) - self.assertEqual(feat[-1].shape, torch.Size([2, 2048, 7, 7])) - - # Test SEResNet50 with checkpoint forward - model = SCNet(50, out_indices=(0, 1, 2, 3), with_cp=True) - for m in model.modules(): - if self.is_block(m): - self.assertTrue(m.with_cp) - model.init_weights() - model.train() - - imgs = torch.randn(2, 3, 224, 224) - feat = model(imgs) - self.assertEqual(len(feat), 4) - self.assertEqual(feat[0].shape, torch.Size([2, 256, 56, 56])) - self.assertEqual(feat[1].shape, torch.Size([2, 512, 28, 28])) - self.assertEqual(feat[2].shape, torch.Size([2, 1024, 14, 14])) - self.assertEqual(feat[3].shape, torch.Size([2, 2048, 7, 7])) - - # Test SCNet zero initialization of residual - model = SCNet(50, out_indices=(0, 1, 2, 3), zero_init_residual=True) - model.init_weights() - for m in model.modules(): - if isinstance(m, SCBottleneck): - self.assertTrue(self.all_zeros(m.norm3)) - model.train() - - imgs = torch.randn(2, 3, 224, 224) - feat = model(imgs) - self.assertEqual(len(feat), 4) - self.assertEqual(feat[0].shape, torch.Size([2, 256, 56, 56])) - self.assertEqual(feat[1].shape, torch.Size([2, 512, 28, 28])) - self.assertEqual(feat[2].shape, torch.Size([2, 1024, 14, 14])) - self.assertEqual(feat[3].shape, torch.Size([2, 2048, 7, 7])) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch +from torch.nn.modules.batchnorm import _BatchNorm + +from mmpose.models.backbones import SCNet +from mmpose.models.backbones.scnet import SCBottleneck, SCConv + + +class TestSCnet(TestCase): + + @staticmethod + def is_block(modules): + """Check if is SCNet building block.""" + if isinstance(modules, (SCBottleneck, )): + return True + return False + + @staticmethod + def is_norm(modules): + """Check if is one of the norms.""" + if isinstance(modules, (_BatchNorm, )): + return True + return False + + @staticmethod + def all_zeros(modules): + """Check if the weight(and bias) is all zero.""" + weight_zero = torch.equal(modules.weight.data, + torch.zeros_like(modules.weight.data)) + if hasattr(modules, 'bias'): + bias_zero = torch.equal(modules.bias.data, + torch.zeros_like(modules.bias.data)) + else: + bias_zero = True + + return weight_zero and bias_zero + + @staticmethod + def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + def test_scnet_scconv(self): + # Test scconv forward + layer = SCConv(64, 64, 1, 4) + x = torch.randn(1, 64, 56, 56) + x_out = layer(x) + self.assertEqual(x_out.shape, torch.Size([1, 64, 56, 56])) + + def test_scnet_bottleneck(self): + # Test Bottleneck forward + block = SCBottleneck(64, 64) + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + self.assertEqual(x_out.shape, torch.Size([1, 64, 56, 56])) + + def test_scnet_backbone(self): + """Test scnet backbone.""" + with self.assertRaises(KeyError): + # SCNet depth should be in [50, 101] + SCNet(20) + + with self.assertRaises(TypeError): + # pretrained must be a string path + model = SCNet(50) + model.init_weights(pretrained=0) + + # Test SCNet norm_eval=True + model = SCNet(50, norm_eval=True) + model.init_weights() + model.train() + self.assertTrue(self.check_norm_state(model.modules(), False)) + + # Test SCNet50 with first stage frozen + frozen_stages = 1 + model = SCNet(50, frozen_stages=frozen_stages) + model.init_weights() + model.train() + self.assertFalse(model.norm1.training) + for layer in [model.conv1, model.norm1]: + for param in layer.parameters(): + self.assertFalse(param.requires_grad) + for i in range(1, frozen_stages + 1): + layer = getattr(model, f'layer{i}') + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + self.assertFalse(mod.training) + for param in layer.parameters(): + self.assertFalse(param.requires_grad) + + # Test SCNet with BatchNorm forward + model = SCNet(50, out_indices=(0, 1, 2, 3)) + for m in model.modules(): + if self.is_norm(m): + self.assertIsInstance(m, _BatchNorm) + model.init_weights() + model.train() + + imgs = torch.randn(2, 3, 224, 224) + feat = model(imgs) + self.assertEqual(len(feat), 4) + self.assertEqual(feat[0].shape, torch.Size([2, 256, 56, 56])) + self.assertEqual(feat[1].shape, torch.Size([2, 512, 28, 28])) + self.assertEqual(feat[2].shape, torch.Size([2, 1024, 14, 14])) + self.assertEqual(feat[3].shape, torch.Size([2, 2048, 7, 7])) + + # Test SCNet with layers 1, 2, 3 out forward + model = SCNet(50, out_indices=(0, 1, 2)) + model.init_weights() + model.train() + + imgs = torch.randn(2, 3, 224, 224) + feat = model(imgs) + self.assertEqual(len(feat), 3) + self.assertEqual(feat[0].shape, torch.Size([2, 256, 56, 56])) + self.assertEqual(feat[1].shape, torch.Size([2, 512, 28, 28])) + self.assertEqual(feat[2].shape, torch.Size([2, 1024, 14, 14])) + + # Test SEResNet50 with layers 3 (top feature maps) out forward + model = SCNet(50, out_indices=(3, )) + model.init_weights() + model.train() + + imgs = torch.randn(2, 3, 224, 224) + feat = model(imgs) + self.assertIsInstance(feat, tuple) + self.assertEqual(feat[-1].shape, torch.Size([2, 2048, 7, 7])) + + # Test SEResNet50 with checkpoint forward + model = SCNet(50, out_indices=(0, 1, 2, 3), with_cp=True) + for m in model.modules(): + if self.is_block(m): + self.assertTrue(m.with_cp) + model.init_weights() + model.train() + + imgs = torch.randn(2, 3, 224, 224) + feat = model(imgs) + self.assertEqual(len(feat), 4) + self.assertEqual(feat[0].shape, torch.Size([2, 256, 56, 56])) + self.assertEqual(feat[1].shape, torch.Size([2, 512, 28, 28])) + self.assertEqual(feat[2].shape, torch.Size([2, 1024, 14, 14])) + self.assertEqual(feat[3].shape, torch.Size([2, 2048, 7, 7])) + + # Test SCNet zero initialization of residual + model = SCNet(50, out_indices=(0, 1, 2, 3), zero_init_residual=True) + model.init_weights() + for m in model.modules(): + if isinstance(m, SCBottleneck): + self.assertTrue(self.all_zeros(m.norm3)) + model.train() + + imgs = torch.randn(2, 3, 224, 224) + feat = model(imgs) + self.assertEqual(len(feat), 4) + self.assertEqual(feat[0].shape, torch.Size([2, 256, 56, 56])) + self.assertEqual(feat[1].shape, torch.Size([2, 512, 28, 28])) + self.assertEqual(feat[2].shape, torch.Size([2, 1024, 14, 14])) + self.assertEqual(feat[3].shape, torch.Size([2, 2048, 7, 7])) diff --git a/tests/test_models/test_backbones/test_seresnet.py b/tests/test_models/test_backbones/test_seresnet.py index 8191d50e66..b73aa35b13 100644 --- a/tests/test_models/test_backbones/test_seresnet.py +++ b/tests/test_models/test_backbones/test_seresnet.py @@ -1,239 +1,239 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import torch -from torch.nn.modules import AvgPool2d -from torch.nn.modules.batchnorm import _BatchNorm - -from mmpose.models.backbones import SEResNet -from mmpose.models.backbones.resnet import ResLayer -from mmpose.models.backbones.seresnet import SEBottleneck, SELayer - - -class TestSEResnet(TestCase): - - @staticmethod - def all_zeros(modules): - """Check if the weight(and bias) is all zero.""" - weight_zero = torch.equal(modules.weight.data, - torch.zeros_like(modules.weight.data)) - if hasattr(modules, 'bias'): - bias_zero = torch.equal(modules.bias.data, - torch.zeros_like(modules.bias.data)) - else: - bias_zero = True - - return weight_zero and bias_zero - - @staticmethod - def check_norm_state(modules, train_state): - """Check if norm layer is in correct train state.""" - for mod in modules: - if isinstance(mod, _BatchNorm): - if mod.training != train_state: - return False - return True - - def test_selayer(self): - # Test selayer forward - layer = SELayer(64) - x = torch.randn(1, 64, 56, 56) - x_out = layer(x) - self.assertEqual(x_out.shape, torch.Size([1, 64, 56, 56])) - - # Test selayer forward with different ratio - layer = SELayer(64, ratio=8) - x = torch.randn(1, 64, 56, 56) - x_out = layer(x) - self.assertEqual(x_out.shape, torch.Size([1, 64, 56, 56])) - - def test_bottleneck(self): - - with self.assertRaises(AssertionError): - # Style must be in ['pytorch', 'caffe'] - SEBottleneck(64, 64, style='tensorflow') - - # Test SEBottleneck with checkpoint forward - block = SEBottleneck(64, 64, with_cp=True) - self.assertTrue(block.with_cp) - x = torch.randn(1, 64, 56, 56) - x_out = block(x) - self.assertEqual(x_out.shape, torch.Size([1, 64, 56, 56])) - - # Test Bottleneck style - block = SEBottleneck(64, 256, stride=2, style='pytorch') - self.assertEqual(block.conv1.stride, (1, 1)) - self.assertEqual(block.conv2.stride, (2, 2)) - block = SEBottleneck(64, 256, stride=2, style='caffe') - self.assertEqual(block.conv1.stride, (2, 2)) - self.assertEqual(block.conv2.stride, (1, 1)) - - # Test Bottleneck forward - block = SEBottleneck(64, 64) - x = torch.randn(1, 64, 56, 56) - x_out = block(x) - self.assertEqual(x_out.shape, torch.Size([1, 64, 56, 56])) - - def test_res_layer(self): - # Test ResLayer of 3 Bottleneck w\o downsample - layer = ResLayer(SEBottleneck, 3, 64, 64, se_ratio=16) - self.assertEqual(len(layer), 3) - self.assertEqual(layer[0].conv1.in_channels, 64) - self.assertEqual(layer[0].conv1.out_channels, 16) - for i in range(1, len(layer)): - self.assertEqual(layer[i].conv1.in_channels, 64) - self.assertEqual(layer[i].conv1.out_channels, 16) - for i in range(len(layer)): - self.assertIsNone(layer[i].downsample) - x = torch.randn(1, 64, 56, 56) - x_out = layer(x) - self.assertEqual(x_out.shape, torch.Size([1, 64, 56, 56])) - - # Test ResLayer of 3 SEBottleneck with downsample - layer = ResLayer(SEBottleneck, 3, 64, 256, se_ratio=16) - self.assertEqual(layer[0].downsample[0].out_channels, 256) - for i in range(1, len(layer)): - self.assertIsNone(layer[i].downsample) - x = torch.randn(1, 64, 56, 56) - x_out = layer(x) - self.assertEqual(x_out.shape, torch.Size([1, 256, 56, 56])) - - # Test ResLayer of 3 SEBottleneck with stride=2 - layer = ResLayer(SEBottleneck, 3, 64, 256, stride=2, se_ratio=8) - self.assertEqual(layer[0].downsample[0].out_channels, 256) - self.assertEqual(layer[0].downsample[0].stride, (2, 2)) - for i in range(1, len(layer)): - self.assertIsNone(layer[i].downsample) - x = torch.randn(1, 64, 56, 56) - x_out = layer(x) - self.assertEqual(x_out.shape, torch.Size([1, 256, 28, 28])) - - # Test ResLayer of 3 SEBottleneck with stride=2 and average downsample - layer = ResLayer( - SEBottleneck, 3, 64, 256, stride=2, avg_down=True, se_ratio=8) - self.assertIsInstance(layer[0].downsample[0], AvgPool2d) - self.assertEqual(layer[0].downsample[1].out_channels, 256) - self.assertEqual(layer[0].downsample[1].stride, (1, 1)) - for i in range(1, len(layer)): - self.assertIsNone(layer[i].downsample) - x = torch.randn(1, 64, 56, 56) - x_out = layer(x) - self.assertEqual(x_out.shape, torch.Size([1, 256, 28, 28])) - - def test_seresnet(self): - """Test resnet backbone.""" - with self.assertRaises(KeyError): - # SEResNet depth should be in [50, 101, 152] - SEResNet(20) - - with self.assertRaises(AssertionError): - # In SEResNet: 1 <= num_stages <= 4 - SEResNet(50, num_stages=0) - - with self.assertRaises(AssertionError): - # In SEResNet: 1 <= num_stages <= 4 - SEResNet(50, num_stages=5) - - with self.assertRaises(AssertionError): - # len(strides) == len(dilations) == num_stages - SEResNet(50, strides=(1, ), dilations=(1, 1), num_stages=3) - - with self.assertRaises(AssertionError): - # Style must be in ['pytorch', 'caffe'] - SEResNet(50, style='tensorflow') - - # Test SEResNet50 norm_eval=True - model = SEResNet(50, norm_eval=True) - model.init_weights() - model.train() - self.assertTrue(self.check_norm_state(model.modules(), False)) - - # Test SEResNet50 with torchvision pretrained weight - init_cfg = dict(type='Pretrained', checkpoint='torchvision://resnet50') - model = SEResNet(depth=50, norm_eval=True, init_cfg=init_cfg) - model.train() - self.assertTrue(self.check_norm_state(model.modules(), False)) - - # Test SEResNet50 with first stage frozen - frozen_stages = 1 - model = SEResNet(50, frozen_stages=frozen_stages) - model.init_weights() - model.train() - self.assertFalse(model.norm1.training) - for layer in [model.conv1, model.norm1]: - for param in layer.parameters(): - self.assertFalse(param.requires_grad) - for i in range(1, frozen_stages + 1): - layer = getattr(model, f'layer{i}') - for mod in layer.modules(): - if isinstance(mod, _BatchNorm): - self.assertFalse(mod.training) - for param in layer.parameters(): - self.assertFalse(param.requires_grad) - - # Test SEResNet50 with BatchNorm forward - model = SEResNet(50, out_indices=(0, 1, 2, 3)) - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertEqual(len(feat), 4) - self.assertEqual(feat[0].shape, torch.Size([1, 256, 56, 56])) - self.assertEqual(feat[1].shape, torch.Size([1, 512, 28, 28])) - self.assertEqual(feat[2].shape, torch.Size([1, 1024, 14, 14])) - self.assertEqual(feat[3].shape, torch.Size([1, 2048, 7, 7])) - - # Test SEResNet50 with layers 1, 2, 3 out forward - model = SEResNet(50, out_indices=(0, 1, 2)) - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertEqual(len(feat), 3) - self.assertEqual(feat[0].shape, torch.Size([1, 256, 56, 56])) - self.assertEqual(feat[1].shape, torch.Size([1, 512, 28, 28])) - self.assertEqual(feat[2].shape, torch.Size([1, 1024, 14, 14])) - - # Test SEResNet50 with layers 3 (top feature maps) out forward - model = SEResNet(50, out_indices=(3, )) - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertIsInstance(feat, tuple) - self.assertEqual(feat[-1].shape, torch.Size([1, 2048, 7, 7])) - - # Test SEResNet50 with checkpoint forward - model = SEResNet(50, out_indices=(0, 1, 2, 3), with_cp=True) - for m in model.modules(): - if isinstance(m, SEBottleneck): - self.assertTrue(m.with_cp) - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertEqual(len(feat), 4) - self.assertEqual(feat[0].shape, torch.Size([1, 256, 56, 56])) - self.assertEqual(feat[1].shape, torch.Size([1, 512, 28, 28])) - self.assertEqual(feat[2].shape, torch.Size([1, 1024, 14, 14])) - self.assertEqual(feat[3].shape, torch.Size([1, 2048, 7, 7])) - - # Test SEResNet50 zero initialization of residual - model = SEResNet(50, out_indices=(0, 1, 2, 3), zero_init_residual=True) - model.init_weights() - for m in model.modules(): - if isinstance(m, SEBottleneck): - self.assertTrue(self.all_zeros(m.norm3)) - model.train() - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertEqual(len(feat), 4) - self.assertEqual(feat[0].shape, torch.Size([1, 256, 56, 56])) - self.assertEqual(feat[1].shape, torch.Size([1, 512, 28, 28])) - self.assertEqual(feat[2].shape, torch.Size([1, 1024, 14, 14])) - self.assertEqual(feat[3].shape, torch.Size([1, 2048, 7, 7])) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch +from torch.nn.modules import AvgPool2d +from torch.nn.modules.batchnorm import _BatchNorm + +from mmpose.models.backbones import SEResNet +from mmpose.models.backbones.resnet import ResLayer +from mmpose.models.backbones.seresnet import SEBottleneck, SELayer + + +class TestSEResnet(TestCase): + + @staticmethod + def all_zeros(modules): + """Check if the weight(and bias) is all zero.""" + weight_zero = torch.equal(modules.weight.data, + torch.zeros_like(modules.weight.data)) + if hasattr(modules, 'bias'): + bias_zero = torch.equal(modules.bias.data, + torch.zeros_like(modules.bias.data)) + else: + bias_zero = True + + return weight_zero and bias_zero + + @staticmethod + def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + def test_selayer(self): + # Test selayer forward + layer = SELayer(64) + x = torch.randn(1, 64, 56, 56) + x_out = layer(x) + self.assertEqual(x_out.shape, torch.Size([1, 64, 56, 56])) + + # Test selayer forward with different ratio + layer = SELayer(64, ratio=8) + x = torch.randn(1, 64, 56, 56) + x_out = layer(x) + self.assertEqual(x_out.shape, torch.Size([1, 64, 56, 56])) + + def test_bottleneck(self): + + with self.assertRaises(AssertionError): + # Style must be in ['pytorch', 'caffe'] + SEBottleneck(64, 64, style='tensorflow') + + # Test SEBottleneck with checkpoint forward + block = SEBottleneck(64, 64, with_cp=True) + self.assertTrue(block.with_cp) + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + self.assertEqual(x_out.shape, torch.Size([1, 64, 56, 56])) + + # Test Bottleneck style + block = SEBottleneck(64, 256, stride=2, style='pytorch') + self.assertEqual(block.conv1.stride, (1, 1)) + self.assertEqual(block.conv2.stride, (2, 2)) + block = SEBottleneck(64, 256, stride=2, style='caffe') + self.assertEqual(block.conv1.stride, (2, 2)) + self.assertEqual(block.conv2.stride, (1, 1)) + + # Test Bottleneck forward + block = SEBottleneck(64, 64) + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + self.assertEqual(x_out.shape, torch.Size([1, 64, 56, 56])) + + def test_res_layer(self): + # Test ResLayer of 3 Bottleneck w\o downsample + layer = ResLayer(SEBottleneck, 3, 64, 64, se_ratio=16) + self.assertEqual(len(layer), 3) + self.assertEqual(layer[0].conv1.in_channels, 64) + self.assertEqual(layer[0].conv1.out_channels, 16) + for i in range(1, len(layer)): + self.assertEqual(layer[i].conv1.in_channels, 64) + self.assertEqual(layer[i].conv1.out_channels, 16) + for i in range(len(layer)): + self.assertIsNone(layer[i].downsample) + x = torch.randn(1, 64, 56, 56) + x_out = layer(x) + self.assertEqual(x_out.shape, torch.Size([1, 64, 56, 56])) + + # Test ResLayer of 3 SEBottleneck with downsample + layer = ResLayer(SEBottleneck, 3, 64, 256, se_ratio=16) + self.assertEqual(layer[0].downsample[0].out_channels, 256) + for i in range(1, len(layer)): + self.assertIsNone(layer[i].downsample) + x = torch.randn(1, 64, 56, 56) + x_out = layer(x) + self.assertEqual(x_out.shape, torch.Size([1, 256, 56, 56])) + + # Test ResLayer of 3 SEBottleneck with stride=2 + layer = ResLayer(SEBottleneck, 3, 64, 256, stride=2, se_ratio=8) + self.assertEqual(layer[0].downsample[0].out_channels, 256) + self.assertEqual(layer[0].downsample[0].stride, (2, 2)) + for i in range(1, len(layer)): + self.assertIsNone(layer[i].downsample) + x = torch.randn(1, 64, 56, 56) + x_out = layer(x) + self.assertEqual(x_out.shape, torch.Size([1, 256, 28, 28])) + + # Test ResLayer of 3 SEBottleneck with stride=2 and average downsample + layer = ResLayer( + SEBottleneck, 3, 64, 256, stride=2, avg_down=True, se_ratio=8) + self.assertIsInstance(layer[0].downsample[0], AvgPool2d) + self.assertEqual(layer[0].downsample[1].out_channels, 256) + self.assertEqual(layer[0].downsample[1].stride, (1, 1)) + for i in range(1, len(layer)): + self.assertIsNone(layer[i].downsample) + x = torch.randn(1, 64, 56, 56) + x_out = layer(x) + self.assertEqual(x_out.shape, torch.Size([1, 256, 28, 28])) + + def test_seresnet(self): + """Test resnet backbone.""" + with self.assertRaises(KeyError): + # SEResNet depth should be in [50, 101, 152] + SEResNet(20) + + with self.assertRaises(AssertionError): + # In SEResNet: 1 <= num_stages <= 4 + SEResNet(50, num_stages=0) + + with self.assertRaises(AssertionError): + # In SEResNet: 1 <= num_stages <= 4 + SEResNet(50, num_stages=5) + + with self.assertRaises(AssertionError): + # len(strides) == len(dilations) == num_stages + SEResNet(50, strides=(1, ), dilations=(1, 1), num_stages=3) + + with self.assertRaises(AssertionError): + # Style must be in ['pytorch', 'caffe'] + SEResNet(50, style='tensorflow') + + # Test SEResNet50 norm_eval=True + model = SEResNet(50, norm_eval=True) + model.init_weights() + model.train() + self.assertTrue(self.check_norm_state(model.modules(), False)) + + # Test SEResNet50 with torchvision pretrained weight + init_cfg = dict(type='Pretrained', checkpoint='torchvision://resnet50') + model = SEResNet(depth=50, norm_eval=True, init_cfg=init_cfg) + model.train() + self.assertTrue(self.check_norm_state(model.modules(), False)) + + # Test SEResNet50 with first stage frozen + frozen_stages = 1 + model = SEResNet(50, frozen_stages=frozen_stages) + model.init_weights() + model.train() + self.assertFalse(model.norm1.training) + for layer in [model.conv1, model.norm1]: + for param in layer.parameters(): + self.assertFalse(param.requires_grad) + for i in range(1, frozen_stages + 1): + layer = getattr(model, f'layer{i}') + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + self.assertFalse(mod.training) + for param in layer.parameters(): + self.assertFalse(param.requires_grad) + + # Test SEResNet50 with BatchNorm forward + model = SEResNet(50, out_indices=(0, 1, 2, 3)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertEqual(len(feat), 4) + self.assertEqual(feat[0].shape, torch.Size([1, 256, 56, 56])) + self.assertEqual(feat[1].shape, torch.Size([1, 512, 28, 28])) + self.assertEqual(feat[2].shape, torch.Size([1, 1024, 14, 14])) + self.assertEqual(feat[3].shape, torch.Size([1, 2048, 7, 7])) + + # Test SEResNet50 with layers 1, 2, 3 out forward + model = SEResNet(50, out_indices=(0, 1, 2)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertEqual(len(feat), 3) + self.assertEqual(feat[0].shape, torch.Size([1, 256, 56, 56])) + self.assertEqual(feat[1].shape, torch.Size([1, 512, 28, 28])) + self.assertEqual(feat[2].shape, torch.Size([1, 1024, 14, 14])) + + # Test SEResNet50 with layers 3 (top feature maps) out forward + model = SEResNet(50, out_indices=(3, )) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertIsInstance(feat, tuple) + self.assertEqual(feat[-1].shape, torch.Size([1, 2048, 7, 7])) + + # Test SEResNet50 with checkpoint forward + model = SEResNet(50, out_indices=(0, 1, 2, 3), with_cp=True) + for m in model.modules(): + if isinstance(m, SEBottleneck): + self.assertTrue(m.with_cp) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertEqual(len(feat), 4) + self.assertEqual(feat[0].shape, torch.Size([1, 256, 56, 56])) + self.assertEqual(feat[1].shape, torch.Size([1, 512, 28, 28])) + self.assertEqual(feat[2].shape, torch.Size([1, 1024, 14, 14])) + self.assertEqual(feat[3].shape, torch.Size([1, 2048, 7, 7])) + + # Test SEResNet50 zero initialization of residual + model = SEResNet(50, out_indices=(0, 1, 2, 3), zero_init_residual=True) + model.init_weights() + for m in model.modules(): + if isinstance(m, SEBottleneck): + self.assertTrue(self.all_zeros(m.norm3)) + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertEqual(len(feat), 4) + self.assertEqual(feat[0].shape, torch.Size([1, 256, 56, 56])) + self.assertEqual(feat[1].shape, torch.Size([1, 512, 28, 28])) + self.assertEqual(feat[2].shape, torch.Size([1, 1024, 14, 14])) + self.assertEqual(feat[3].shape, torch.Size([1, 2048, 7, 7])) diff --git a/tests/test_models/test_backbones/test_seresnext.py b/tests/test_models/test_backbones/test_seresnext.py index c28e1891e1..6ad446cd06 100644 --- a/tests/test_models/test_backbones/test_seresnext.py +++ b/tests/test_models/test_backbones/test_seresnext.py @@ -1,77 +1,77 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import torch - -from mmpose.models.backbones import SEResNeXt -from mmpose.models.backbones.seresnext import SEBottleneck as SEBottleneckX - - -class TestSEResnext(TestCase): - - def test_bottleneck(self): - with self.assertRaises(AssertionError): - # Style must be in ['pytorch', 'caffe'] - SEBottleneckX( - 64, 64, groups=32, width_per_group=4, style='tensorflow') - - # Test SEResNeXt Bottleneck structure - block = SEBottleneckX( - 64, 256, groups=32, width_per_group=4, stride=2, style='pytorch') - self.assertEqual(block.width_per_group, 4) - self.assertEqual(block.conv2.stride, (2, 2)) - self.assertEqual(block.conv2.groups, 32) - self.assertEqual(block.conv2.out_channels, 128) - self.assertEqual(block.conv2.out_channels, block.mid_channels) - - # Test SEResNeXt Bottleneck structure (groups=1) - block = SEBottleneckX( - 64, 256, groups=1, width_per_group=4, stride=2, style='pytorch') - self.assertEqual(block.conv2.stride, (2, 2)) - self.assertEqual(block.conv2.groups, 1) - self.assertEqual(block.conv2.out_channels, 64) - self.assertEqual(block.mid_channels, 64) - self.assertEqual(block.conv2.out_channels, block.mid_channels) - - # Test SEResNeXt Bottleneck forward - block = SEBottleneckX( - 64, 64, base_channels=16, groups=32, width_per_group=4) - x = torch.randn(1, 64, 56, 56) - x_out = block(x) - self.assertEqual(x_out.shape, torch.Size([1, 64, 56, 56])) - - def test_seresnext(self): - with self.assertRaises(KeyError): - # SEResNeXt depth should be in [50, 101, 152] - SEResNeXt(depth=18) - - # Test SEResNeXt with group 32, width_per_group 4 - model = SEResNeXt( - depth=50, groups=32, width_per_group=4, out_indices=(0, 1, 2, 3)) - for m in model.modules(): - if isinstance(m, SEBottleneckX): - self.assertEqual(m.conv2.groups, 32) - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertEqual(len(feat), 4) - self.assertEqual(feat[0].shape, torch.Size([1, 256, 56, 56])) - self.assertEqual(feat[1].shape, torch.Size([1, 512, 28, 28])) - self.assertEqual(feat[2].shape, torch.Size([1, 1024, 14, 14])) - self.assertEqual(feat[3].shape, torch.Size([1, 2048, 7, 7])) - - # Test SEResNeXt with layers 3 out forward - model = SEResNeXt( - depth=50, groups=32, width_per_group=4, out_indices=(3, )) - for m in model.modules(): - if isinstance(m, SEBottleneckX): - self.assertEqual(m.conv2.groups, 32) - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertIsInstance(feat, tuple) - self.assertEqual(feat[-1].shape, torch.Size([1, 2048, 7, 7])) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch + +from mmpose.models.backbones import SEResNeXt +from mmpose.models.backbones.seresnext import SEBottleneck as SEBottleneckX + + +class TestSEResnext(TestCase): + + def test_bottleneck(self): + with self.assertRaises(AssertionError): + # Style must be in ['pytorch', 'caffe'] + SEBottleneckX( + 64, 64, groups=32, width_per_group=4, style='tensorflow') + + # Test SEResNeXt Bottleneck structure + block = SEBottleneckX( + 64, 256, groups=32, width_per_group=4, stride=2, style='pytorch') + self.assertEqual(block.width_per_group, 4) + self.assertEqual(block.conv2.stride, (2, 2)) + self.assertEqual(block.conv2.groups, 32) + self.assertEqual(block.conv2.out_channels, 128) + self.assertEqual(block.conv2.out_channels, block.mid_channels) + + # Test SEResNeXt Bottleneck structure (groups=1) + block = SEBottleneckX( + 64, 256, groups=1, width_per_group=4, stride=2, style='pytorch') + self.assertEqual(block.conv2.stride, (2, 2)) + self.assertEqual(block.conv2.groups, 1) + self.assertEqual(block.conv2.out_channels, 64) + self.assertEqual(block.mid_channels, 64) + self.assertEqual(block.conv2.out_channels, block.mid_channels) + + # Test SEResNeXt Bottleneck forward + block = SEBottleneckX( + 64, 64, base_channels=16, groups=32, width_per_group=4) + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + self.assertEqual(x_out.shape, torch.Size([1, 64, 56, 56])) + + def test_seresnext(self): + with self.assertRaises(KeyError): + # SEResNeXt depth should be in [50, 101, 152] + SEResNeXt(depth=18) + + # Test SEResNeXt with group 32, width_per_group 4 + model = SEResNeXt( + depth=50, groups=32, width_per_group=4, out_indices=(0, 1, 2, 3)) + for m in model.modules(): + if isinstance(m, SEBottleneckX): + self.assertEqual(m.conv2.groups, 32) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertEqual(len(feat), 4) + self.assertEqual(feat[0].shape, torch.Size([1, 256, 56, 56])) + self.assertEqual(feat[1].shape, torch.Size([1, 512, 28, 28])) + self.assertEqual(feat[2].shape, torch.Size([1, 1024, 14, 14])) + self.assertEqual(feat[3].shape, torch.Size([1, 2048, 7, 7])) + + # Test SEResNeXt with layers 3 out forward + model = SEResNeXt( + depth=50, groups=32, width_per_group=4, out_indices=(3, )) + for m in model.modules(): + if isinstance(m, SEBottleneckX): + self.assertEqual(m.conv2.groups, 32) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertIsInstance(feat, tuple) + self.assertEqual(feat[-1].shape, torch.Size([1, 2048, 7, 7])) diff --git a/tests/test_models/test_backbones/test_shufflenet_v1.py b/tests/test_models/test_backbones/test_shufflenet_v1.py index 526e1a6292..91873f2547 100644 --- a/tests/test_models/test_backbones/test_shufflenet_v1.py +++ b/tests/test_models/test_backbones/test_shufflenet_v1.py @@ -1,244 +1,244 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import torch -from torch.nn.modules import GroupNorm -from torch.nn.modules.batchnorm import _BatchNorm - -from mmpose.models.backbones import ShuffleNetV1 -from mmpose.models.backbones.shufflenet_v1 import ShuffleUnit - - -class TestShufflenetV1(TestCase): - - @staticmethod - def is_block(modules): - """Check if is ResNet building block.""" - if isinstance(modules, (ShuffleUnit, )): - return True - return False - - @staticmethod - def is_norm(modules): - """Check if is one of the norms.""" - if isinstance(modules, (GroupNorm, _BatchNorm)): - return True - return False - - @staticmethod - def check_norm_state(modules, train_state): - """Check if norm layer is in correct train state.""" - for mod in modules: - if isinstance(mod, _BatchNorm): - if mod.training != train_state: - return False - return True - - def test_shufflenetv1_shuffleuint(self): - - with self.assertRaises(ValueError): - # combine must be in ['add', 'concat'] - ShuffleUnit(24, 16, groups=3, first_block=True, combine='test') - - with self.assertRaises(AssertionError): - # inplanes must be equal tp = outplanes when combine='add' - ShuffleUnit(64, 24, groups=4, first_block=True, combine='add') - - # Test ShuffleUnit with combine='add' - block = ShuffleUnit(24, 24, groups=3, first_block=True, combine='add') - x = torch.randn(1, 24, 56, 56) - x_out = block(x) - self.assertEqual(x_out.shape, torch.Size((1, 24, 56, 56))) - - # Test ShuffleUnit with combine='concat' - block = ShuffleUnit( - 24, 240, groups=3, first_block=True, combine='concat') - x = torch.randn(1, 24, 56, 56) - x_out = block(x) - self.assertEqual(x_out.shape, torch.Size((1, 240, 28, 28))) - - # Test ShuffleUnit with checkpoint forward - block = ShuffleUnit( - 24, 24, groups=3, first_block=True, combine='add', with_cp=True) - self.assertTrue(block.with_cp) - x = torch.randn(1, 24, 56, 56) - x.requires_grad = True - x_out = block(x) - self.assertEqual(x_out.shape, torch.Size((1, 24, 56, 56))) - - def test_shufflenetv1_backbone(self): - - with self.assertRaises(ValueError): - # frozen_stages must be in range(-1, 4) - ShuffleNetV1(frozen_stages=10) - - with self.assertRaises(ValueError): - # the item in out_indices must be in range(0, 4) - ShuffleNetV1(out_indices=[5]) - - with self.assertRaises(ValueError): - # groups must be in [1, 2, 3, 4, 8] - ShuffleNetV1(groups=10) - - # Test ShuffleNetV1 norm state - model = ShuffleNetV1() - model.init_weights() - model.train() - self.assertTrue(self.check_norm_state(model.modules(), True)) - - # Test ShuffleNetV1 with first stage frozen - frozen_stages = 1 - model = ShuffleNetV1( - frozen_stages=frozen_stages, out_indices=(0, 1, 2)) - model.init_weights() - model.train() - for param in model.conv1.parameters(): - self.assertFalse(param.requires_grad) - for i in range(frozen_stages): - layer = model.layers[i] - for mod in layer.modules(): - if isinstance(mod, _BatchNorm): - self.assertFalse(mod.training) - for param in layer.parameters(): - self.assertFalse(param.requires_grad) - - # Test ShuffleNetV1 forward with groups=1 - model = ShuffleNetV1(groups=1, out_indices=(0, 1, 2)) - model.init_weights() - model.train() - - for m in model.modules(): - if self.is_norm(m): - self.assertIsInstance(m, _BatchNorm) - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertEqual(len(feat), 3) - self.assertEqual(feat[0].shape, torch.Size((1, 144, 28, 28))) - self.assertEqual(feat[1].shape, torch.Size((1, 288, 14, 14))) - self.assertEqual(feat[2].shape, torch.Size((1, 576, 7, 7))) - - # Test ShuffleNetV1 forward with groups=2 - model = ShuffleNetV1(groups=2, out_indices=(0, 1, 2)) - model.init_weights() - model.train() - - for m in model.modules(): - if self.is_norm(m): - self.assertIsInstance(m, _BatchNorm) - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertEqual(len(feat), 3) - self.assertEqual(feat[0].shape, torch.Size((1, 200, 28, 28))) - self.assertEqual(feat[1].shape, torch.Size((1, 400, 14, 14))) - self.assertEqual(feat[2].shape, torch.Size((1, 800, 7, 7))) - - # Test ShuffleNetV1 forward with groups=3 - model = ShuffleNetV1(groups=3, out_indices=(0, 1, 2)) - model.init_weights() - model.train() - - for m in model.modules(): - if self.is_norm(m): - self.assertIsInstance(m, _BatchNorm) - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertEqual(len(feat), 3) - self.assertEqual(feat[0].shape, torch.Size((1, 240, 28, 28))) - self.assertEqual(feat[1].shape, torch.Size((1, 480, 14, 14))) - self.assertEqual(feat[2].shape, torch.Size((1, 960, 7, 7))) - - # Test ShuffleNetV1 forward with groups=4 - model = ShuffleNetV1(groups=4, out_indices=(0, 1, 2)) - model.init_weights() - model.train() - - for m in model.modules(): - if self.is_norm(m): - self.assertIsInstance(m, _BatchNorm) - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertEqual(len(feat), 3) - self.assertEqual(feat[0].shape, torch.Size((1, 272, 28, 28))) - self.assertEqual(feat[1].shape, torch.Size((1, 544, 14, 14))) - self.assertEqual(feat[2].shape, torch.Size((1, 1088, 7, 7))) - - # Test ShuffleNetV1 forward with groups=8 - model = ShuffleNetV1(groups=8, out_indices=(0, 1, 2)) - model.init_weights() - model.train() - - for m in model.modules(): - if self.is_norm(m): - self.assertIsInstance(m, _BatchNorm) - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertEqual(len(feat), 3) - self.assertEqual(feat[0].shape, torch.Size((1, 384, 28, 28))) - self.assertEqual(feat[1].shape, torch.Size((1, 768, 14, 14))) - self.assertEqual(feat[2].shape, torch.Size((1, 1536, 7, 7))) - - # Test ShuffleNetV1 forward with GroupNorm forward - model = ShuffleNetV1( - groups=3, - norm_cfg=dict(type='GN', num_groups=2, requires_grad=True), - out_indices=(0, 1, 2)) - model.init_weights() - model.train() - - for m in model.modules(): - if self.is_norm(m): - self.assertIsInstance(m, GroupNorm) - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertEqual(len(feat), 3) - self.assertEqual(feat[0].shape, torch.Size((1, 240, 28, 28))) - self.assertEqual(feat[1].shape, torch.Size((1, 480, 14, 14))) - self.assertEqual(feat[2].shape, torch.Size((1, 960, 7, 7))) - - # Test ShuffleNetV1 forward with layers 1, 2 forward - model = ShuffleNetV1(groups=3, out_indices=(1, 2)) - model.init_weights() - model.train() - - for m in model.modules(): - if self.is_norm(m): - self.assertIsInstance(m, _BatchNorm) - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertEqual(len(feat), 2) - self.assertEqual(feat[0].shape, torch.Size((1, 480, 14, 14))) - self.assertEqual(feat[1].shape, torch.Size((1, 960, 7, 7))) - - # Test ShuffleNetV1 forward with layers 2 forward - model = ShuffleNetV1(groups=3, out_indices=(2, )) - model.init_weights() - model.train() - - for m in model.modules(): - if self.is_norm(m): - self.assertIsInstance(m, _BatchNorm) - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertIsInstance(feat, tuple) - self.assertEqual(feat[-1].shape, torch.Size((1, 960, 7, 7))) - - # Test ShuffleNetV1 forward with checkpoint forward - model = ShuffleNetV1(groups=3, with_cp=True) - for m in model.modules(): - if self.is_block(m): - self.assertTrue(m.with_cp) - - # Test ShuffleNetV1 with norm_eval - model = ShuffleNetV1(norm_eval=True) - model.init_weights() - model.train() - - self.assertTrue(self.check_norm_state(model.modules(), False)) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch +from torch.nn.modules import GroupNorm +from torch.nn.modules.batchnorm import _BatchNorm + +from mmpose.models.backbones import ShuffleNetV1 +from mmpose.models.backbones.shufflenet_v1 import ShuffleUnit + + +class TestShufflenetV1(TestCase): + + @staticmethod + def is_block(modules): + """Check if is ResNet building block.""" + if isinstance(modules, (ShuffleUnit, )): + return True + return False + + @staticmethod + def is_norm(modules): + """Check if is one of the norms.""" + if isinstance(modules, (GroupNorm, _BatchNorm)): + return True + return False + + @staticmethod + def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + def test_shufflenetv1_shuffleuint(self): + + with self.assertRaises(ValueError): + # combine must be in ['add', 'concat'] + ShuffleUnit(24, 16, groups=3, first_block=True, combine='test') + + with self.assertRaises(AssertionError): + # inplanes must be equal tp = outplanes when combine='add' + ShuffleUnit(64, 24, groups=4, first_block=True, combine='add') + + # Test ShuffleUnit with combine='add' + block = ShuffleUnit(24, 24, groups=3, first_block=True, combine='add') + x = torch.randn(1, 24, 56, 56) + x_out = block(x) + self.assertEqual(x_out.shape, torch.Size((1, 24, 56, 56))) + + # Test ShuffleUnit with combine='concat' + block = ShuffleUnit( + 24, 240, groups=3, first_block=True, combine='concat') + x = torch.randn(1, 24, 56, 56) + x_out = block(x) + self.assertEqual(x_out.shape, torch.Size((1, 240, 28, 28))) + + # Test ShuffleUnit with checkpoint forward + block = ShuffleUnit( + 24, 24, groups=3, first_block=True, combine='add', with_cp=True) + self.assertTrue(block.with_cp) + x = torch.randn(1, 24, 56, 56) + x.requires_grad = True + x_out = block(x) + self.assertEqual(x_out.shape, torch.Size((1, 24, 56, 56))) + + def test_shufflenetv1_backbone(self): + + with self.assertRaises(ValueError): + # frozen_stages must be in range(-1, 4) + ShuffleNetV1(frozen_stages=10) + + with self.assertRaises(ValueError): + # the item in out_indices must be in range(0, 4) + ShuffleNetV1(out_indices=[5]) + + with self.assertRaises(ValueError): + # groups must be in [1, 2, 3, 4, 8] + ShuffleNetV1(groups=10) + + # Test ShuffleNetV1 norm state + model = ShuffleNetV1() + model.init_weights() + model.train() + self.assertTrue(self.check_norm_state(model.modules(), True)) + + # Test ShuffleNetV1 with first stage frozen + frozen_stages = 1 + model = ShuffleNetV1( + frozen_stages=frozen_stages, out_indices=(0, 1, 2)) + model.init_weights() + model.train() + for param in model.conv1.parameters(): + self.assertFalse(param.requires_grad) + for i in range(frozen_stages): + layer = model.layers[i] + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + self.assertFalse(mod.training) + for param in layer.parameters(): + self.assertFalse(param.requires_grad) + + # Test ShuffleNetV1 forward with groups=1 + model = ShuffleNetV1(groups=1, out_indices=(0, 1, 2)) + model.init_weights() + model.train() + + for m in model.modules(): + if self.is_norm(m): + self.assertIsInstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertEqual(len(feat), 3) + self.assertEqual(feat[0].shape, torch.Size((1, 144, 28, 28))) + self.assertEqual(feat[1].shape, torch.Size((1, 288, 14, 14))) + self.assertEqual(feat[2].shape, torch.Size((1, 576, 7, 7))) + + # Test ShuffleNetV1 forward with groups=2 + model = ShuffleNetV1(groups=2, out_indices=(0, 1, 2)) + model.init_weights() + model.train() + + for m in model.modules(): + if self.is_norm(m): + self.assertIsInstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertEqual(len(feat), 3) + self.assertEqual(feat[0].shape, torch.Size((1, 200, 28, 28))) + self.assertEqual(feat[1].shape, torch.Size((1, 400, 14, 14))) + self.assertEqual(feat[2].shape, torch.Size((1, 800, 7, 7))) + + # Test ShuffleNetV1 forward with groups=3 + model = ShuffleNetV1(groups=3, out_indices=(0, 1, 2)) + model.init_weights() + model.train() + + for m in model.modules(): + if self.is_norm(m): + self.assertIsInstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertEqual(len(feat), 3) + self.assertEqual(feat[0].shape, torch.Size((1, 240, 28, 28))) + self.assertEqual(feat[1].shape, torch.Size((1, 480, 14, 14))) + self.assertEqual(feat[2].shape, torch.Size((1, 960, 7, 7))) + + # Test ShuffleNetV1 forward with groups=4 + model = ShuffleNetV1(groups=4, out_indices=(0, 1, 2)) + model.init_weights() + model.train() + + for m in model.modules(): + if self.is_norm(m): + self.assertIsInstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertEqual(len(feat), 3) + self.assertEqual(feat[0].shape, torch.Size((1, 272, 28, 28))) + self.assertEqual(feat[1].shape, torch.Size((1, 544, 14, 14))) + self.assertEqual(feat[2].shape, torch.Size((1, 1088, 7, 7))) + + # Test ShuffleNetV1 forward with groups=8 + model = ShuffleNetV1(groups=8, out_indices=(0, 1, 2)) + model.init_weights() + model.train() + + for m in model.modules(): + if self.is_norm(m): + self.assertIsInstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertEqual(len(feat), 3) + self.assertEqual(feat[0].shape, torch.Size((1, 384, 28, 28))) + self.assertEqual(feat[1].shape, torch.Size((1, 768, 14, 14))) + self.assertEqual(feat[2].shape, torch.Size((1, 1536, 7, 7))) + + # Test ShuffleNetV1 forward with GroupNorm forward + model = ShuffleNetV1( + groups=3, + norm_cfg=dict(type='GN', num_groups=2, requires_grad=True), + out_indices=(0, 1, 2)) + model.init_weights() + model.train() + + for m in model.modules(): + if self.is_norm(m): + self.assertIsInstance(m, GroupNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertEqual(len(feat), 3) + self.assertEqual(feat[0].shape, torch.Size((1, 240, 28, 28))) + self.assertEqual(feat[1].shape, torch.Size((1, 480, 14, 14))) + self.assertEqual(feat[2].shape, torch.Size((1, 960, 7, 7))) + + # Test ShuffleNetV1 forward with layers 1, 2 forward + model = ShuffleNetV1(groups=3, out_indices=(1, 2)) + model.init_weights() + model.train() + + for m in model.modules(): + if self.is_norm(m): + self.assertIsInstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertEqual(len(feat), 2) + self.assertEqual(feat[0].shape, torch.Size((1, 480, 14, 14))) + self.assertEqual(feat[1].shape, torch.Size((1, 960, 7, 7))) + + # Test ShuffleNetV1 forward with layers 2 forward + model = ShuffleNetV1(groups=3, out_indices=(2, )) + model.init_weights() + model.train() + + for m in model.modules(): + if self.is_norm(m): + self.assertIsInstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertIsInstance(feat, tuple) + self.assertEqual(feat[-1].shape, torch.Size((1, 960, 7, 7))) + + # Test ShuffleNetV1 forward with checkpoint forward + model = ShuffleNetV1(groups=3, with_cp=True) + for m in model.modules(): + if self.is_block(m): + self.assertTrue(m.with_cp) + + # Test ShuffleNetV1 with norm_eval + model = ShuffleNetV1(norm_eval=True) + model.init_weights() + model.train() + + self.assertTrue(self.check_norm_state(model.modules(), False)) diff --git a/tests/test_models/test_backbones/test_shufflenet_v2.py b/tests/test_models/test_backbones/test_shufflenet_v2.py index 36f6cb2194..cafb35d460 100644 --- a/tests/test_models/test_backbones/test_shufflenet_v2.py +++ b/tests/test_models/test_backbones/test_shufflenet_v2.py @@ -1,207 +1,207 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import torch -from torch.nn.modules import GroupNorm -from torch.nn.modules.batchnorm import _BatchNorm - -from mmpose.models.backbones import ShuffleNetV2 -from mmpose.models.backbones.shufflenet_v2 import InvertedResidual - - -class TestShufflenetV2(TestCase): - - @staticmethod - def is_block(modules): - """Check if is ResNet building block.""" - if isinstance(modules, (InvertedResidual, )): - return True - return False - - @staticmethod - def is_norm(modules): - """Check if is one of the norms.""" - if isinstance(modules, (GroupNorm, _BatchNorm)): - return True - return False - - @staticmethod - def check_norm_state(modules, train_state): - """Check if norm layer is in correct train state.""" - for mod in modules: - if isinstance(mod, _BatchNorm): - if mod.training != train_state: - return False - return True - - def test_shufflenetv2_invertedresidual(self): - - with self.assertRaises(AssertionError): - # when stride==1, in_channels should be equal to - # out_channels // 2 * 2 - InvertedResidual(24, 32, stride=1) - - with self.assertRaises(AssertionError): - # when in_channels != out_channels // 2 * 2, stride should not be - # equal to 1. - InvertedResidual(24, 32, stride=1) - - # Test InvertedResidual forward - block = InvertedResidual(24, 48, stride=2) - x = torch.randn(1, 24, 56, 56) - x_out = block(x) - self.assertEqual(x_out.shape, torch.Size((1, 48, 28, 28))) - - # Test InvertedResidual with checkpoint forward - block = InvertedResidual(48, 48, stride=1, with_cp=True) - self.assertTrue(block.with_cp) - x = torch.randn(1, 48, 56, 56) - x.requires_grad = True - x_out = block(x) - self.assertEqual(x_out.shape, torch.Size((1, 48, 56, 56))) - - def test_shufflenetv2_backbone(self): - - with self.assertRaises(ValueError): - # groups must be in 0.5, 1.0, 1.5, 2.0] - ShuffleNetV2(widen_factor=3.0) - - with self.assertRaises(ValueError): - # frozen_stages must be in [0, 1, 2, 3] - ShuffleNetV2(widen_factor=1.0, frozen_stages=4) - - with self.assertRaises(ValueError): - # out_indices must be in [0, 1, 2, 3] - ShuffleNetV2(widen_factor=1.0, out_indices=(4, )) - - with self.assertRaises(TypeError): - # init_weights must have no parameter - model = ShuffleNetV2() - model.init_weights(pretrained=1) - - # Test ShuffleNetV2 norm state - model = ShuffleNetV2() - model.init_weights() - model.train() - self.assertTrue(self.check_norm_state(model.modules(), True)) - - # Test ShuffleNetV2 with first stage frozen - frozen_stages = 1 - model = ShuffleNetV2(frozen_stages=frozen_stages) - model.init_weights() - model.train() - for param in model.conv1.parameters(): - self.assertFalse(param.requires_grad) - for i in range(0, frozen_stages): - layer = model.layers[i] - for mod in layer.modules(): - if isinstance(mod, _BatchNorm): - self.assertFalse(mod.training) - for param in layer.parameters(): - self.assertFalse(param.requires_grad) - - # Test ShuffleNetV2 with norm_eval - model = ShuffleNetV2(norm_eval=True) - model.init_weights() - model.train() - - self.assertTrue(self.check_norm_state(model.modules(), False)) - - # Test ShuffleNetV2 forward with widen_factor=0.5 - model = ShuffleNetV2(widen_factor=0.5, out_indices=(0, 1, 2, 3)) - model.init_weights() - model.train() - - for m in model.modules(): - if self.is_norm(m): - self.assertIsInstance(m, _BatchNorm) - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertEqual(len(feat), 4) - self.assertEqual(feat[0].shape, torch.Size((1, 48, 28, 28))) - self.assertEqual(feat[1].shape, torch.Size((1, 96, 14, 14))) - self.assertEqual(feat[2].shape, torch.Size((1, 192, 7, 7))) - - # Test ShuffleNetV2 forward with widen_factor=1.0 - model = ShuffleNetV2(widen_factor=1.0, out_indices=(0, 1, 2, 3)) - model.init_weights() - model.train() - - for m in model.modules(): - if self.is_norm(m): - self.assertIsInstance(m, _BatchNorm) - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertEqual(len(feat), 4) - self.assertEqual(feat[0].shape, torch.Size((1, 116, 28, 28))) - self.assertEqual(feat[1].shape, torch.Size((1, 232, 14, 14))) - self.assertEqual(feat[2].shape, torch.Size((1, 464, 7, 7))) - - # Test ShuffleNetV2 forward with widen_factor=1.5 - model = ShuffleNetV2(widen_factor=1.5, out_indices=(0, 1, 2, 3)) - model.init_weights() - model.train() - - for m in model.modules(): - if self.is_norm(m): - self.assertIsInstance(m, _BatchNorm) - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertEqual(len(feat), 4) - self.assertEqual(feat[0].shape, torch.Size((1, 176, 28, 28))) - self.assertEqual(feat[1].shape, torch.Size((1, 352, 14, 14))) - self.assertEqual(feat[2].shape, torch.Size((1, 704, 7, 7))) - - # Test ShuffleNetV2 forward with widen_factor=2.0 - model = ShuffleNetV2(widen_factor=2.0, out_indices=(0, 1, 2, 3)) - model.init_weights() - model.train() - - for m in model.modules(): - if self.is_norm(m): - self.assertIsInstance(m, _BatchNorm) - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertEqual(len(feat), 4) - self.assertEqual(feat[0].shape, torch.Size((1, 244, 28, 28))) - self.assertEqual(feat[1].shape, torch.Size((1, 488, 14, 14))) - self.assertEqual(feat[2].shape, torch.Size((1, 976, 7, 7))) - - # Test ShuffleNetV2 forward with layers 3 forward - model = ShuffleNetV2(widen_factor=1.0, out_indices=(2, )) - model.init_weights() - model.train() - - for m in model.modules(): - if self.is_norm(m): - self.assertIsInstance(m, _BatchNorm) - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertIsInstance(feat, tuple) - self.assertEqual(feat[-1].shape, torch.Size((1, 464, 7, 7))) - - # Test ShuffleNetV2 forward with layers 1 2 forward - model = ShuffleNetV2(widen_factor=1.0, out_indices=(1, 2)) - model.init_weights() - model.train() - - for m in model.modules(): - if self.is_norm(m): - self.assertIsInstance(m, _BatchNorm) - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertEqual(len(feat), 2) - self.assertEqual(feat[0].shape, torch.Size((1, 232, 14, 14))) - self.assertEqual(feat[1].shape, torch.Size((1, 464, 7, 7))) - - # Test ShuffleNetV2 forward with checkpoint forward - model = ShuffleNetV2(widen_factor=1.0, with_cp=True) - for m in model.modules(): - if self.is_block(m): - self.assertTrue(m.with_cp) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch +from torch.nn.modules import GroupNorm +from torch.nn.modules.batchnorm import _BatchNorm + +from mmpose.models.backbones import ShuffleNetV2 +from mmpose.models.backbones.shufflenet_v2 import InvertedResidual + + +class TestShufflenetV2(TestCase): + + @staticmethod + def is_block(modules): + """Check if is ResNet building block.""" + if isinstance(modules, (InvertedResidual, )): + return True + return False + + @staticmethod + def is_norm(modules): + """Check if is one of the norms.""" + if isinstance(modules, (GroupNorm, _BatchNorm)): + return True + return False + + @staticmethod + def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + def test_shufflenetv2_invertedresidual(self): + + with self.assertRaises(AssertionError): + # when stride==1, in_channels should be equal to + # out_channels // 2 * 2 + InvertedResidual(24, 32, stride=1) + + with self.assertRaises(AssertionError): + # when in_channels != out_channels // 2 * 2, stride should not be + # equal to 1. + InvertedResidual(24, 32, stride=1) + + # Test InvertedResidual forward + block = InvertedResidual(24, 48, stride=2) + x = torch.randn(1, 24, 56, 56) + x_out = block(x) + self.assertEqual(x_out.shape, torch.Size((1, 48, 28, 28))) + + # Test InvertedResidual with checkpoint forward + block = InvertedResidual(48, 48, stride=1, with_cp=True) + self.assertTrue(block.with_cp) + x = torch.randn(1, 48, 56, 56) + x.requires_grad = True + x_out = block(x) + self.assertEqual(x_out.shape, torch.Size((1, 48, 56, 56))) + + def test_shufflenetv2_backbone(self): + + with self.assertRaises(ValueError): + # groups must be in 0.5, 1.0, 1.5, 2.0] + ShuffleNetV2(widen_factor=3.0) + + with self.assertRaises(ValueError): + # frozen_stages must be in [0, 1, 2, 3] + ShuffleNetV2(widen_factor=1.0, frozen_stages=4) + + with self.assertRaises(ValueError): + # out_indices must be in [0, 1, 2, 3] + ShuffleNetV2(widen_factor=1.0, out_indices=(4, )) + + with self.assertRaises(TypeError): + # init_weights must have no parameter + model = ShuffleNetV2() + model.init_weights(pretrained=1) + + # Test ShuffleNetV2 norm state + model = ShuffleNetV2() + model.init_weights() + model.train() + self.assertTrue(self.check_norm_state(model.modules(), True)) + + # Test ShuffleNetV2 with first stage frozen + frozen_stages = 1 + model = ShuffleNetV2(frozen_stages=frozen_stages) + model.init_weights() + model.train() + for param in model.conv1.parameters(): + self.assertFalse(param.requires_grad) + for i in range(0, frozen_stages): + layer = model.layers[i] + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + self.assertFalse(mod.training) + for param in layer.parameters(): + self.assertFalse(param.requires_grad) + + # Test ShuffleNetV2 with norm_eval + model = ShuffleNetV2(norm_eval=True) + model.init_weights() + model.train() + + self.assertTrue(self.check_norm_state(model.modules(), False)) + + # Test ShuffleNetV2 forward with widen_factor=0.5 + model = ShuffleNetV2(widen_factor=0.5, out_indices=(0, 1, 2, 3)) + model.init_weights() + model.train() + + for m in model.modules(): + if self.is_norm(m): + self.assertIsInstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertEqual(len(feat), 4) + self.assertEqual(feat[0].shape, torch.Size((1, 48, 28, 28))) + self.assertEqual(feat[1].shape, torch.Size((1, 96, 14, 14))) + self.assertEqual(feat[2].shape, torch.Size((1, 192, 7, 7))) + + # Test ShuffleNetV2 forward with widen_factor=1.0 + model = ShuffleNetV2(widen_factor=1.0, out_indices=(0, 1, 2, 3)) + model.init_weights() + model.train() + + for m in model.modules(): + if self.is_norm(m): + self.assertIsInstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertEqual(len(feat), 4) + self.assertEqual(feat[0].shape, torch.Size((1, 116, 28, 28))) + self.assertEqual(feat[1].shape, torch.Size((1, 232, 14, 14))) + self.assertEqual(feat[2].shape, torch.Size((1, 464, 7, 7))) + + # Test ShuffleNetV2 forward with widen_factor=1.5 + model = ShuffleNetV2(widen_factor=1.5, out_indices=(0, 1, 2, 3)) + model.init_weights() + model.train() + + for m in model.modules(): + if self.is_norm(m): + self.assertIsInstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertEqual(len(feat), 4) + self.assertEqual(feat[0].shape, torch.Size((1, 176, 28, 28))) + self.assertEqual(feat[1].shape, torch.Size((1, 352, 14, 14))) + self.assertEqual(feat[2].shape, torch.Size((1, 704, 7, 7))) + + # Test ShuffleNetV2 forward with widen_factor=2.0 + model = ShuffleNetV2(widen_factor=2.0, out_indices=(0, 1, 2, 3)) + model.init_weights() + model.train() + + for m in model.modules(): + if self.is_norm(m): + self.assertIsInstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertEqual(len(feat), 4) + self.assertEqual(feat[0].shape, torch.Size((1, 244, 28, 28))) + self.assertEqual(feat[1].shape, torch.Size((1, 488, 14, 14))) + self.assertEqual(feat[2].shape, torch.Size((1, 976, 7, 7))) + + # Test ShuffleNetV2 forward with layers 3 forward + model = ShuffleNetV2(widen_factor=1.0, out_indices=(2, )) + model.init_weights() + model.train() + + for m in model.modules(): + if self.is_norm(m): + self.assertIsInstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertIsInstance(feat, tuple) + self.assertEqual(feat[-1].shape, torch.Size((1, 464, 7, 7))) + + # Test ShuffleNetV2 forward with layers 1 2 forward + model = ShuffleNetV2(widen_factor=1.0, out_indices=(1, 2)) + model.init_weights() + model.train() + + for m in model.modules(): + if self.is_norm(m): + self.assertIsInstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertEqual(len(feat), 2) + self.assertEqual(feat[0].shape, torch.Size((1, 232, 14, 14))) + self.assertEqual(feat[1].shape, torch.Size((1, 464, 7, 7))) + + # Test ShuffleNetV2 forward with checkpoint forward + model = ShuffleNetV2(widen_factor=1.0, with_cp=True) + for m in model.modules(): + if self.is_block(m): + self.assertTrue(m.with_cp) diff --git a/tests/test_models/test_backbones/test_swin.py b/tests/test_models/test_backbones/test_swin.py index df1ca6d7eb..8ba2d3435f 100644 --- a/tests/test_models/test_backbones/test_swin.py +++ b/tests/test_models/test_backbones/test_swin.py @@ -1,81 +1,81 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import torch - -from mmpose.models.backbones.swin import SwinBlock, SwinTransformer - - -class TestSwin(TestCase): - - def test_swin_block(self): - # test SwinBlock structure and forward - block = SwinBlock(embed_dims=64, num_heads=4, feedforward_channels=256) - self.assertEqual(block.ffn.embed_dims, 64) - self.assertEqual(block.attn.w_msa.num_heads, 4) - self.assertEqual(block.ffn.feedforward_channels, 256) - x = torch.randn(1, 56 * 56, 64) - x_out = block(x, (56, 56)) - self.assertEqual(x_out.shape, torch.Size([1, 56 * 56, 64])) - - # Test BasicBlock with checkpoint forward - block = SwinBlock( - embed_dims=64, num_heads=4, feedforward_channels=256, with_cp=True) - self.assertTrue(block.with_cp) - x = torch.randn(1, 56 * 56, 64) - x_out = block(x, (56, 56)) - self.assertEqual(x_out.shape, torch.Size([1, 56 * 56, 64])) - - def test_swin_transformer(self): - """Test Swin Transformer backbone.""" - - with self.assertRaises(AssertionError): - # Because swin uses non-overlapping patch embed, so the stride of - # patch embed must be equal to patch size. - SwinTransformer(strides=(2, 2, 2, 2), patch_size=4) - - # test pretrained image size - with self.assertRaises(AssertionError): - SwinTransformer(pretrain_img_size=(224, 224, 224)) - - # Test absolute position embedding - temp = torch.randn((1, 3, 224, 224)) - model = SwinTransformer(pretrain_img_size=224, use_abs_pos_embed=True) - model.init_weights() - model(temp) - - # Test patch norm - model = SwinTransformer(patch_norm=False) - model(temp) - - # Test normal inference - temp = torch.randn((1, 3, 32, 32)) - model = SwinTransformer() - outs = model(temp) - self.assertEqual(outs[0].shape, (1, 96, 8, 8)) - self.assertEqual(outs[1].shape, (1, 192, 4, 4)) - self.assertEqual(outs[2].shape, (1, 384, 2, 2)) - self.assertEqual(outs[3].shape, (1, 768, 1, 1)) - - # Test abnormal inference size - temp = torch.randn((1, 3, 31, 31)) - model = SwinTransformer() - outs = model(temp) - self.assertEqual(outs[0].shape, (1, 96, 8, 8)) - self.assertEqual(outs[1].shape, (1, 192, 4, 4)) - self.assertEqual(outs[2].shape, (1, 384, 2, 2)) - self.assertEqual(outs[3].shape, (1, 768, 1, 1)) - - # Test abnormal inference size - temp = torch.randn((1, 3, 112, 137)) - model = SwinTransformer() - outs = model(temp) - self.assertEqual(outs[0].shape, (1, 96, 28, 35)) - self.assertEqual(outs[1].shape, (1, 192, 14, 18)) - self.assertEqual(outs[2].shape, (1, 384, 7, 9)) - self.assertEqual(outs[3].shape, (1, 768, 4, 5)) - - model = SwinTransformer(frozen_stages=4) - model.train() - for p in model.parameters(): - self.assertFalse(p.requires_grad) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch + +from mmpose.models.backbones.swin import SwinBlock, SwinTransformer + + +class TestSwin(TestCase): + + def test_swin_block(self): + # test SwinBlock structure and forward + block = SwinBlock(embed_dims=64, num_heads=4, feedforward_channels=256) + self.assertEqual(block.ffn.embed_dims, 64) + self.assertEqual(block.attn.w_msa.num_heads, 4) + self.assertEqual(block.ffn.feedforward_channels, 256) + x = torch.randn(1, 56 * 56, 64) + x_out = block(x, (56, 56)) + self.assertEqual(x_out.shape, torch.Size([1, 56 * 56, 64])) + + # Test BasicBlock with checkpoint forward + block = SwinBlock( + embed_dims=64, num_heads=4, feedforward_channels=256, with_cp=True) + self.assertTrue(block.with_cp) + x = torch.randn(1, 56 * 56, 64) + x_out = block(x, (56, 56)) + self.assertEqual(x_out.shape, torch.Size([1, 56 * 56, 64])) + + def test_swin_transformer(self): + """Test Swin Transformer backbone.""" + + with self.assertRaises(AssertionError): + # Because swin uses non-overlapping patch embed, so the stride of + # patch embed must be equal to patch size. + SwinTransformer(strides=(2, 2, 2, 2), patch_size=4) + + # test pretrained image size + with self.assertRaises(AssertionError): + SwinTransformer(pretrain_img_size=(224, 224, 224)) + + # Test absolute position embedding + temp = torch.randn((1, 3, 224, 224)) + model = SwinTransformer(pretrain_img_size=224, use_abs_pos_embed=True) + model.init_weights() + model(temp) + + # Test patch norm + model = SwinTransformer(patch_norm=False) + model(temp) + + # Test normal inference + temp = torch.randn((1, 3, 32, 32)) + model = SwinTransformer() + outs = model(temp) + self.assertEqual(outs[0].shape, (1, 96, 8, 8)) + self.assertEqual(outs[1].shape, (1, 192, 4, 4)) + self.assertEqual(outs[2].shape, (1, 384, 2, 2)) + self.assertEqual(outs[3].shape, (1, 768, 1, 1)) + + # Test abnormal inference size + temp = torch.randn((1, 3, 31, 31)) + model = SwinTransformer() + outs = model(temp) + self.assertEqual(outs[0].shape, (1, 96, 8, 8)) + self.assertEqual(outs[1].shape, (1, 192, 4, 4)) + self.assertEqual(outs[2].shape, (1, 384, 2, 2)) + self.assertEqual(outs[3].shape, (1, 768, 1, 1)) + + # Test abnormal inference size + temp = torch.randn((1, 3, 112, 137)) + model = SwinTransformer() + outs = model(temp) + self.assertEqual(outs[0].shape, (1, 96, 28, 35)) + self.assertEqual(outs[1].shape, (1, 192, 14, 18)) + self.assertEqual(outs[2].shape, (1, 384, 7, 9)) + self.assertEqual(outs[3].shape, (1, 768, 4, 5)) + + model = SwinTransformer(frozen_stages=4) + model.train() + for p in model.parameters(): + self.assertFalse(p.requires_grad) diff --git a/tests/test_models/test_backbones/test_tcn.py b/tests/test_models/test_backbones/test_tcn.py index ba271a25f0..a492db2374 100644 --- a/tests/test_models/test_backbones/test_tcn.py +++ b/tests/test_models/test_backbones/test_tcn.py @@ -1,157 +1,157 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import numpy as np -import torch -import torch.nn as nn - -from mmpose.models.backbones import TCN -from mmpose.models.backbones.tcn import BasicTemporalBlock - - -class TestTCN(TestCase): - - def test_basic_temporal_block(self): - with self.assertRaises(AssertionError): - # padding( + shift) should not be larger than x.shape[2] - block = BasicTemporalBlock(1024, 1024, dilation=81) - x = torch.rand(2, 1024, 150) - x_out = block(x) - - with self.assertRaises(AssertionError): - # when use_stride_conv is True, shift + kernel_size // 2 should - # not be larger than x.shape[2] - block = BasicTemporalBlock( - 1024, 1024, kernel_size=5, causal=True, use_stride_conv=True) - x = torch.rand(2, 1024, 3) - x_out = block(x) - - # BasicTemporalBlock with causal == False - block = BasicTemporalBlock(1024, 1024) - x = torch.rand(2, 1024, 241) - x_out = block(x) - self.assertEqual(x_out.shape, torch.Size([2, 1024, 235])) - - # BasicTemporalBlock with causal == True - block = BasicTemporalBlock(1024, 1024, causal=True) - x = torch.rand(2, 1024, 241) - x_out = block(x) - self.assertEqual(x_out.shape, torch.Size([2, 1024, 235])) - - # BasicTemporalBlock with residual == False - block = BasicTemporalBlock(1024, 1024, residual=False) - x = torch.rand(2, 1024, 241) - x_out = block(x) - self.assertEqual(x_out.shape, torch.Size([2, 1024, 235])) - - # BasicTemporalBlock, use_stride_conv == True - block = BasicTemporalBlock(1024, 1024, use_stride_conv=True) - x = torch.rand(2, 1024, 81) - x_out = block(x) - self.assertEqual(x_out.shape, torch.Size([2, 1024, 27])) - - # BasicTemporalBlock with use_stride_conv == True and causal == True - block = BasicTemporalBlock( - 1024, 1024, use_stride_conv=True, causal=True) - x = torch.rand(2, 1024, 81) - x_out = block(x) - self.assertEqual(x_out.shape, torch.Size([2, 1024, 27])) - - def test_tcn_backbone(self): - with self.assertRaises(AssertionError): - # num_blocks should equal len(kernel_sizes) - 1 - TCN(in_channels=34, num_blocks=3, kernel_sizes=(3, 3, 3)) - - with self.assertRaises(AssertionError): - # kernel size should be odd - TCN(in_channels=34, kernel_sizes=(3, 4, 3)) - - # Test TCN with 2 blocks (use_stride_conv == False) - model = TCN(in_channels=34, num_blocks=2, kernel_sizes=(3, 3, 3)) - pose2d = torch.rand((2, 34, 243)) - feat = model(pose2d) - self.assertEqual(len(feat), 2) - self.assertEqual(feat[0].shape, (2, 1024, 235)) - self.assertEqual(feat[1].shape, (2, 1024, 217)) - - # Test TCN with 4 blocks and weight norm clip - max_norm = 0.1 - model = TCN( - in_channels=34, - num_blocks=4, - kernel_sizes=(3, 3, 3, 3, 3), - max_norm=max_norm) - pose2d = torch.rand((2, 34, 243)) - feat = model(pose2d) - self.assertEqual(len(feat), 4) - self.assertEqual(feat[0].shape, (2, 1024, 235)) - self.assertEqual(feat[1].shape, (2, 1024, 217)) - self.assertEqual(feat[2].shape, (2, 1024, 163)) - self.assertEqual(feat[3].shape, (2, 1024, 1)) - - for module in model.modules(): - if isinstance(module, torch.nn.modules.conv._ConvNd): - norm = module.weight.norm().item() - np.testing.assert_allclose( - np.maximum(norm, max_norm), max_norm, rtol=1e-4) - - # Test TCN with 4 blocks (use_stride_conv == True) - model = TCN( - in_channels=34, - num_blocks=4, - kernel_sizes=(3, 3, 3, 3, 3), - use_stride_conv=True) - pose2d = torch.rand((2, 34, 243)) - feat = model(pose2d) - self.assertEqual(len(feat), 4) - self.assertEqual(feat[0].shape, (2, 1024, 27)) - self.assertEqual(feat[1].shape, (2, 1024, 9)) - self.assertEqual(feat[2].shape, (2, 1024, 3)) - self.assertEqual(feat[3].shape, (2, 1024, 1)) - - # Check that the model w. or w/o use_stride_conv will have the same - # output and gradient after a forward+backward pass - model1 = TCN( - in_channels=34, - stem_channels=4, - num_blocks=1, - kernel_sizes=(3, 3), - dropout=0, - residual=False, - norm_cfg=None) - model2 = TCN( - in_channels=34, - stem_channels=4, - num_blocks=1, - kernel_sizes=(3, 3), - dropout=0, - residual=False, - norm_cfg=None, - use_stride_conv=True) - for m in model1.modules(): - if isinstance(m, nn.Conv1d): - nn.init.constant_(m.weight, 0.5) - if m.bias is not None: - nn.init.constant_(m.bias, 0) - for m in model2.modules(): - if isinstance(m, nn.Conv1d): - nn.init.constant_(m.weight, 0.5) - if m.bias is not None: - nn.init.constant_(m.bias, 0) - input1 = torch.rand((1, 34, 9)) - input2 = input1.clone() - outputs1 = model1(input1) - outputs2 = model2(input2) - for output1, output2 in zip(outputs1, outputs2): - self.assertTrue(torch.isclose(output1, output2).all()) - - criterion = nn.MSELoss() - target = torch.rand(output1.shape) - loss1 = criterion(output1, target) - loss2 = criterion(output2, target) - loss1.backward() - loss2.backward() - for m1, m2 in zip(model1.modules(), model2.modules()): - if isinstance(m1, nn.Conv1d): - self.assertTrue( - torch.isclose(m1.weight.grad, m2.weight.grad).all()) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np +import torch +import torch.nn as nn + +from mmpose.models.backbones import TCN +from mmpose.models.backbones.tcn import BasicTemporalBlock + + +class TestTCN(TestCase): + + def test_basic_temporal_block(self): + with self.assertRaises(AssertionError): + # padding( + shift) should not be larger than x.shape[2] + block = BasicTemporalBlock(1024, 1024, dilation=81) + x = torch.rand(2, 1024, 150) + x_out = block(x) + + with self.assertRaises(AssertionError): + # when use_stride_conv is True, shift + kernel_size // 2 should + # not be larger than x.shape[2] + block = BasicTemporalBlock( + 1024, 1024, kernel_size=5, causal=True, use_stride_conv=True) + x = torch.rand(2, 1024, 3) + x_out = block(x) + + # BasicTemporalBlock with causal == False + block = BasicTemporalBlock(1024, 1024) + x = torch.rand(2, 1024, 241) + x_out = block(x) + self.assertEqual(x_out.shape, torch.Size([2, 1024, 235])) + + # BasicTemporalBlock with causal == True + block = BasicTemporalBlock(1024, 1024, causal=True) + x = torch.rand(2, 1024, 241) + x_out = block(x) + self.assertEqual(x_out.shape, torch.Size([2, 1024, 235])) + + # BasicTemporalBlock with residual == False + block = BasicTemporalBlock(1024, 1024, residual=False) + x = torch.rand(2, 1024, 241) + x_out = block(x) + self.assertEqual(x_out.shape, torch.Size([2, 1024, 235])) + + # BasicTemporalBlock, use_stride_conv == True + block = BasicTemporalBlock(1024, 1024, use_stride_conv=True) + x = torch.rand(2, 1024, 81) + x_out = block(x) + self.assertEqual(x_out.shape, torch.Size([2, 1024, 27])) + + # BasicTemporalBlock with use_stride_conv == True and causal == True + block = BasicTemporalBlock( + 1024, 1024, use_stride_conv=True, causal=True) + x = torch.rand(2, 1024, 81) + x_out = block(x) + self.assertEqual(x_out.shape, torch.Size([2, 1024, 27])) + + def test_tcn_backbone(self): + with self.assertRaises(AssertionError): + # num_blocks should equal len(kernel_sizes) - 1 + TCN(in_channels=34, num_blocks=3, kernel_sizes=(3, 3, 3)) + + with self.assertRaises(AssertionError): + # kernel size should be odd + TCN(in_channels=34, kernel_sizes=(3, 4, 3)) + + # Test TCN with 2 blocks (use_stride_conv == False) + model = TCN(in_channels=34, num_blocks=2, kernel_sizes=(3, 3, 3)) + pose2d = torch.rand((2, 34, 243)) + feat = model(pose2d) + self.assertEqual(len(feat), 2) + self.assertEqual(feat[0].shape, (2, 1024, 235)) + self.assertEqual(feat[1].shape, (2, 1024, 217)) + + # Test TCN with 4 blocks and weight norm clip + max_norm = 0.1 + model = TCN( + in_channels=34, + num_blocks=4, + kernel_sizes=(3, 3, 3, 3, 3), + max_norm=max_norm) + pose2d = torch.rand((2, 34, 243)) + feat = model(pose2d) + self.assertEqual(len(feat), 4) + self.assertEqual(feat[0].shape, (2, 1024, 235)) + self.assertEqual(feat[1].shape, (2, 1024, 217)) + self.assertEqual(feat[2].shape, (2, 1024, 163)) + self.assertEqual(feat[3].shape, (2, 1024, 1)) + + for module in model.modules(): + if isinstance(module, torch.nn.modules.conv._ConvNd): + norm = module.weight.norm().item() + np.testing.assert_allclose( + np.maximum(norm, max_norm), max_norm, rtol=1e-4) + + # Test TCN with 4 blocks (use_stride_conv == True) + model = TCN( + in_channels=34, + num_blocks=4, + kernel_sizes=(3, 3, 3, 3, 3), + use_stride_conv=True) + pose2d = torch.rand((2, 34, 243)) + feat = model(pose2d) + self.assertEqual(len(feat), 4) + self.assertEqual(feat[0].shape, (2, 1024, 27)) + self.assertEqual(feat[1].shape, (2, 1024, 9)) + self.assertEqual(feat[2].shape, (2, 1024, 3)) + self.assertEqual(feat[3].shape, (2, 1024, 1)) + + # Check that the model w. or w/o use_stride_conv will have the same + # output and gradient after a forward+backward pass + model1 = TCN( + in_channels=34, + stem_channels=4, + num_blocks=1, + kernel_sizes=(3, 3), + dropout=0, + residual=False, + norm_cfg=None) + model2 = TCN( + in_channels=34, + stem_channels=4, + num_blocks=1, + kernel_sizes=(3, 3), + dropout=0, + residual=False, + norm_cfg=None, + use_stride_conv=True) + for m in model1.modules(): + if isinstance(m, nn.Conv1d): + nn.init.constant_(m.weight, 0.5) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + for m in model2.modules(): + if isinstance(m, nn.Conv1d): + nn.init.constant_(m.weight, 0.5) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + input1 = torch.rand((1, 34, 9)) + input2 = input1.clone() + outputs1 = model1(input1) + outputs2 = model2(input2) + for output1, output2 in zip(outputs1, outputs2): + self.assertTrue(torch.isclose(output1, output2).all()) + + criterion = nn.MSELoss() + target = torch.rand(output1.shape) + loss1 = criterion(output1, target) + loss2 = criterion(output2, target) + loss1.backward() + loss2.backward() + for m1, m2 in zip(model1.modules(), model2.modules()): + if isinstance(m1, nn.Conv1d): + self.assertTrue( + torch.isclose(m1.weight.grad, m2.weight.grad).all()) diff --git a/tests/test_models/test_backbones/test_v2v_net.py b/tests/test_models/test_backbones/test_v2v_net.py index fba3fda08f..52e9e3d6fb 100644 --- a/tests/test_models/test_backbones/test_v2v_net.py +++ b/tests/test_models/test_backbones/test_v2v_net.py @@ -1,17 +1,17 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import torch - -from mmpose.models.backbones import V2VNet - - -class TestV2Vnet(TestCase): - - def test_v2v_net(self): - """Test V2VNet.""" - model = V2VNet(input_channels=17, output_channels=15) - input = torch.randn(2, 17, 32, 32, 32) - output = model(input) - self.assertIsInstance(output, tuple) - self.assertEqual(output[-1].shape, (2, 15, 32, 32, 32)) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch + +from mmpose.models.backbones import V2VNet + + +class TestV2Vnet(TestCase): + + def test_v2v_net(self): + """Test V2VNet.""" + model = V2VNet(input_channels=17, output_channels=15) + input = torch.randn(2, 17, 32, 32, 32) + output = model(input) + self.assertIsInstance(output, tuple) + self.assertEqual(output[-1].shape, (2, 15, 32, 32, 32)) diff --git a/tests/test_models/test_backbones/test_vgg.py b/tests/test_models/test_backbones/test_vgg.py index 872fbe6d85..49168222ac 100644 --- a/tests/test_models/test_backbones/test_vgg.py +++ b/tests/test_models/test_backbones/test_vgg.py @@ -1,137 +1,137 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import torch -from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm - -from mmpose.models.backbones import VGG - - -class TestVGG(TestCase): - - @staticmethod - def check_norm_state(modules, train_state): - """Check if norm layer is in correct train state.""" - for mod in modules: - if isinstance(mod, _BatchNorm): - if mod.training != train_state: - return False - return True - - def test_vgg(self): - """Test VGG backbone.""" - with self.assertRaises(KeyError): - # VGG depth should be in [11, 13, 16, 19] - VGG(18) - - with self.assertRaises(AssertionError): - # In VGG: 1 <= num_stages <= 5 - VGG(11, num_stages=0) - - with self.assertRaises(AssertionError): - # In VGG: 1 <= num_stages <= 5 - VGG(11, num_stages=6) - - with self.assertRaises(AssertionError): - # len(dilations) == num_stages - VGG(11, dilations=(1, 1), num_stages=3) - - # Test VGG11 norm_eval=True - model = VGG(11, norm_eval=True) - model.init_weights() - model.train() - self.assertTrue(self.check_norm_state(model.modules(), False)) - - # Test VGG11 forward without classifiers - model = VGG(11, out_indices=(0, 1, 2, 3, 4)) - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertEqual(len(feat), 5) - self.assertEqual(feat[0].shape, (1, 64, 112, 112)) - self.assertEqual(feat[1].shape, (1, 128, 56, 56)) - self.assertEqual(feat[2].shape, (1, 256, 28, 28)) - self.assertEqual(feat[3].shape, (1, 512, 14, 14)) - self.assertEqual(feat[4].shape, (1, 512, 7, 7)) - - # Test VGG11 forward with classifiers - model = VGG(11, num_classes=10, out_indices=(0, 1, 2, 3, 4, 5)) - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertEqual(len(feat), 6) - self.assertEqual(feat[0].shape, (1, 64, 112, 112)) - self.assertEqual(feat[1].shape, (1, 128, 56, 56)) - self.assertEqual(feat[2].shape, (1, 256, 28, 28)) - self.assertEqual(feat[3].shape, (1, 512, 14, 14)) - self.assertEqual(feat[4].shape, (1, 512, 7, 7)) - self.assertEqual(feat[5].shape, (1, 10)) - - # Test VGG11BN forward - model = VGG(11, norm_cfg=dict(type='BN'), out_indices=(0, 1, 2, 3, 4)) - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertEqual(len(feat), 5) - self.assertEqual(feat[0].shape, (1, 64, 112, 112)) - self.assertEqual(feat[1].shape, (1, 128, 56, 56)) - self.assertEqual(feat[2].shape, (1, 256, 28, 28)) - self.assertEqual(feat[3].shape, (1, 512, 14, 14)) - self.assertEqual(feat[4].shape, (1, 512, 7, 7)) - - # Test VGG11BN forward with classifiers - model = VGG( - 11, - num_classes=10, - norm_cfg=dict(type='BN'), - out_indices=(0, 1, 2, 3, 4, 5)) - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertEqual(len(feat), 6) - self.assertEqual(feat[0].shape, (1, 64, 112, 112)) - self.assertEqual(feat[1].shape, (1, 128, 56, 56)) - self.assertEqual(feat[2].shape, (1, 256, 28, 28)) - self.assertEqual(feat[3].shape, (1, 512, 14, 14)) - self.assertEqual(feat[4].shape, (1, 512, 7, 7)) - self.assertEqual(feat[5].shape, (1, 10)) - - # Test VGG13 with layers 1, 2, 3 out forward - model = VGG(13, out_indices=(0, 1, 2)) - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertEqual(len(feat), 3) - self.assertEqual(feat[0].shape, (1, 64, 112, 112)) - self.assertEqual(feat[1].shape, (1, 128, 56, 56)) - self.assertEqual(feat[2].shape, (1, 256, 28, 28)) - - # Test VGG16 with top feature maps out forward - model = VGG(16) - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertEqual(len(feat), 1) - self.assertEqual(feat[-1].shape, (1, 512, 7, 7)) - - # Test VGG19 with classification score out forward - model = VGG(19, num_classes=10) - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertEqual(len(feat), 1) - self.assertEqual(feat[-1].shape, (1, 10)) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch +from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm + +from mmpose.models.backbones import VGG + + +class TestVGG(TestCase): + + @staticmethod + def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + def test_vgg(self): + """Test VGG backbone.""" + with self.assertRaises(KeyError): + # VGG depth should be in [11, 13, 16, 19] + VGG(18) + + with self.assertRaises(AssertionError): + # In VGG: 1 <= num_stages <= 5 + VGG(11, num_stages=0) + + with self.assertRaises(AssertionError): + # In VGG: 1 <= num_stages <= 5 + VGG(11, num_stages=6) + + with self.assertRaises(AssertionError): + # len(dilations) == num_stages + VGG(11, dilations=(1, 1), num_stages=3) + + # Test VGG11 norm_eval=True + model = VGG(11, norm_eval=True) + model.init_weights() + model.train() + self.assertTrue(self.check_norm_state(model.modules(), False)) + + # Test VGG11 forward without classifiers + model = VGG(11, out_indices=(0, 1, 2, 3, 4)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertEqual(len(feat), 5) + self.assertEqual(feat[0].shape, (1, 64, 112, 112)) + self.assertEqual(feat[1].shape, (1, 128, 56, 56)) + self.assertEqual(feat[2].shape, (1, 256, 28, 28)) + self.assertEqual(feat[3].shape, (1, 512, 14, 14)) + self.assertEqual(feat[4].shape, (1, 512, 7, 7)) + + # Test VGG11 forward with classifiers + model = VGG(11, num_classes=10, out_indices=(0, 1, 2, 3, 4, 5)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertEqual(len(feat), 6) + self.assertEqual(feat[0].shape, (1, 64, 112, 112)) + self.assertEqual(feat[1].shape, (1, 128, 56, 56)) + self.assertEqual(feat[2].shape, (1, 256, 28, 28)) + self.assertEqual(feat[3].shape, (1, 512, 14, 14)) + self.assertEqual(feat[4].shape, (1, 512, 7, 7)) + self.assertEqual(feat[5].shape, (1, 10)) + + # Test VGG11BN forward + model = VGG(11, norm_cfg=dict(type='BN'), out_indices=(0, 1, 2, 3, 4)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertEqual(len(feat), 5) + self.assertEqual(feat[0].shape, (1, 64, 112, 112)) + self.assertEqual(feat[1].shape, (1, 128, 56, 56)) + self.assertEqual(feat[2].shape, (1, 256, 28, 28)) + self.assertEqual(feat[3].shape, (1, 512, 14, 14)) + self.assertEqual(feat[4].shape, (1, 512, 7, 7)) + + # Test VGG11BN forward with classifiers + model = VGG( + 11, + num_classes=10, + norm_cfg=dict(type='BN'), + out_indices=(0, 1, 2, 3, 4, 5)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertEqual(len(feat), 6) + self.assertEqual(feat[0].shape, (1, 64, 112, 112)) + self.assertEqual(feat[1].shape, (1, 128, 56, 56)) + self.assertEqual(feat[2].shape, (1, 256, 28, 28)) + self.assertEqual(feat[3].shape, (1, 512, 14, 14)) + self.assertEqual(feat[4].shape, (1, 512, 7, 7)) + self.assertEqual(feat[5].shape, (1, 10)) + + # Test VGG13 with layers 1, 2, 3 out forward + model = VGG(13, out_indices=(0, 1, 2)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertEqual(len(feat), 3) + self.assertEqual(feat[0].shape, (1, 64, 112, 112)) + self.assertEqual(feat[1].shape, (1, 128, 56, 56)) + self.assertEqual(feat[2].shape, (1, 256, 28, 28)) + + # Test VGG16 with top feature maps out forward + model = VGG(16) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertEqual(len(feat), 1) + self.assertEqual(feat[-1].shape, (1, 512, 7, 7)) + + # Test VGG19 with classification score out forward + model = VGG(19, num_classes=10) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertEqual(len(feat), 1) + self.assertEqual(feat[-1].shape, (1, 10)) diff --git a/tests/test_models/test_backbones/test_vipnas_mbv3.py b/tests/test_models/test_backbones/test_vipnas_mbv3.py index 99986cd149..3213d59c7d 100644 --- a/tests/test_models/test_backbones/test_vipnas_mbv3.py +++ b/tests/test_models/test_backbones/test_vipnas_mbv3.py @@ -1,102 +1,102 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import torch -from torch.nn.modules import GroupNorm -from torch.nn.modules.batchnorm import _BatchNorm - -from mmpose.models.backbones import ViPNAS_MobileNetV3 -from mmpose.models.backbones.utils import InvertedResidual - - -class TestVipnasMbv3(TestCase): - - @staticmethod - def is_norm(modules): - """Check if is one of the norms.""" - if isinstance(modules, (GroupNorm, _BatchNorm)): - return True - return False - - @staticmethod - def check_norm_state(modules, train_state): - """Check if norm layer is in correct train state.""" - for mod in modules: - if isinstance(mod, _BatchNorm): - if mod.training != train_state: - return False - return True - - def test_mobilenetv3_backbone(self): - with self.assertRaises(TypeError): - # init_weights must have no parameter - model = ViPNAS_MobileNetV3() - model.init_weights(pretrained=0) - - with self.assertRaises(AttributeError): - # frozen_stages must no more than 21 - model = ViPNAS_MobileNetV3(frozen_stages=22) - model.train() - - # Test MobileNetv3 - model = ViPNAS_MobileNetV3() - model.init_weights() - model.train() - - # Test MobileNetv3 with first stage frozen - frozen_stages = 1 - model = ViPNAS_MobileNetV3(frozen_stages=frozen_stages) - model.init_weights() - model.train() - for param in model.conv1.parameters(): - self.assertFalse(param.requires_grad) - for i in range(1, frozen_stages + 1): - layer = getattr(model, f'layer{i}') - for mod in layer.modules(): - if isinstance(mod, _BatchNorm): - self.assertFalse(mod.training) - for param in layer.parameters(): - self.assertFalse(param.requires_grad) - - # Test MobileNetv3 with norm eval - model = ViPNAS_MobileNetV3(norm_eval=True) - model.init_weights() - model.train() - self.assertTrue(self.check_norm_state(model.modules(), False)) - - # Test MobileNetv3 forward - model = ViPNAS_MobileNetV3() - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertIsInstance(feat, tuple) - self.assertEqual(feat[-1].shape, torch.Size([1, 160, 7, 7])) - - # Test MobileNetv3 forward with GroupNorm - model = ViPNAS_MobileNetV3( - norm_cfg=dict(type='GN', num_groups=2, requires_grad=True)) - for m in model.modules(): - if self.is_norm(m): - self.assertIsInstance(m, GroupNorm) - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertIsInstance(feat, tuple) - self.assertEqual(feat[-1].shape, torch.Size([1, 160, 7, 7])) - - # Test MobileNetv3 with checkpoint forward - model = ViPNAS_MobileNetV3(with_cp=True) - for m in model.modules(): - if isinstance(m, InvertedResidual): - self.assertTrue(m.with_cp) - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertIsInstance(feat, tuple) - self.assertEqual(feat[-1].shape, torch.Size([1, 160, 7, 7])) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch +from torch.nn.modules import GroupNorm +from torch.nn.modules.batchnorm import _BatchNorm + +from mmpose.models.backbones import ViPNAS_MobileNetV3 +from mmpose.models.backbones.utils import InvertedResidual + + +class TestVipnasMbv3(TestCase): + + @staticmethod + def is_norm(modules): + """Check if is one of the norms.""" + if isinstance(modules, (GroupNorm, _BatchNorm)): + return True + return False + + @staticmethod + def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + def test_mobilenetv3_backbone(self): + with self.assertRaises(TypeError): + # init_weights must have no parameter + model = ViPNAS_MobileNetV3() + model.init_weights(pretrained=0) + + with self.assertRaises(AttributeError): + # frozen_stages must no more than 21 + model = ViPNAS_MobileNetV3(frozen_stages=22) + model.train() + + # Test MobileNetv3 + model = ViPNAS_MobileNetV3() + model.init_weights() + model.train() + + # Test MobileNetv3 with first stage frozen + frozen_stages = 1 + model = ViPNAS_MobileNetV3(frozen_stages=frozen_stages) + model.init_weights() + model.train() + for param in model.conv1.parameters(): + self.assertFalse(param.requires_grad) + for i in range(1, frozen_stages + 1): + layer = getattr(model, f'layer{i}') + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + self.assertFalse(mod.training) + for param in layer.parameters(): + self.assertFalse(param.requires_grad) + + # Test MobileNetv3 with norm eval + model = ViPNAS_MobileNetV3(norm_eval=True) + model.init_weights() + model.train() + self.assertTrue(self.check_norm_state(model.modules(), False)) + + # Test MobileNetv3 forward + model = ViPNAS_MobileNetV3() + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertIsInstance(feat, tuple) + self.assertEqual(feat[-1].shape, torch.Size([1, 160, 7, 7])) + + # Test MobileNetv3 forward with GroupNorm + model = ViPNAS_MobileNetV3( + norm_cfg=dict(type='GN', num_groups=2, requires_grad=True)) + for m in model.modules(): + if self.is_norm(m): + self.assertIsInstance(m, GroupNorm) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertIsInstance(feat, tuple) + self.assertEqual(feat[-1].shape, torch.Size([1, 160, 7, 7])) + + # Test MobileNetv3 with checkpoint forward + model = ViPNAS_MobileNetV3(with_cp=True) + for m in model.modules(): + if isinstance(m, InvertedResidual): + self.assertTrue(m.with_cp) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertIsInstance(feat, tuple) + self.assertEqual(feat[-1].shape, torch.Size([1, 160, 7, 7])) diff --git a/tests/test_models/test_backbones/test_vipnas_resnet.py b/tests/test_models/test_backbones/test_vipnas_resnet.py index 3a20b553df..41c07223a7 100644 --- a/tests/test_models/test_backbones/test_vipnas_resnet.py +++ b/tests/test_models/test_backbones/test_vipnas_resnet.py @@ -1,346 +1,346 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import torch -import torch.nn as nn -from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm - -from mmpose.models.backbones import ViPNAS_ResNet -from mmpose.models.backbones.vipnas_resnet import (ViPNAS_Bottleneck, - ViPNAS_ResLayer, - get_expansion) - - -class TestVipnasResnet(TestCase): - - @staticmethod - def is_block(modules): - """Check if is ViPNAS_ResNet building block.""" - if isinstance(modules, (ViPNAS_Bottleneck)): - return True - return False - - @staticmethod - def all_zeros(modules): - """Check if the weight(and bias) is all zero.""" - weight_zero = torch.equal(modules.weight.data, - torch.zeros_like(modules.weight.data)) - if hasattr(modules, 'bias'): - bias_zero = torch.equal(modules.bias.data, - torch.zeros_like(modules.bias.data)) - else: - bias_zero = True - - return weight_zero and bias_zero - - @staticmethod - def check_norm_state(modules, train_state): - """Check if norm layer is in correct train state.""" - for mod in modules: - if isinstance(mod, _BatchNorm): - if mod.training != train_state: - return False - return True - - def test_get_expansion(self): - self.assertEqual(get_expansion(ViPNAS_Bottleneck, 2), 2) - self.assertEqual(get_expansion(ViPNAS_Bottleneck), 1) - - class MyResBlock(nn.Module): - - expansion = 8 - - self.assertEqual(get_expansion(MyResBlock), 8) - - # expansion must be an integer or None - with self.assertRaises(TypeError): - get_expansion(ViPNAS_Bottleneck, '0') - - # expansion is not specified and cannot be inferred - with self.assertRaises(TypeError): - - class SomeModule(nn.Module): - pass - - get_expansion(SomeModule) - - def test_vipnas_bottleneck(self): - # style must be in ['pytorch', 'caffe'] - with self.assertRaises(AssertionError): - ViPNAS_Bottleneck(64, 64, style='tensorflow') - - # expansion must be divisible by out_channels - with self.assertRaises(AssertionError): - ViPNAS_Bottleneck(64, 64, expansion=3) - - # Test ViPNAS_Bottleneck style - block = ViPNAS_Bottleneck(64, 64, stride=2, style='pytorch') - self.assertEqual(block.conv1.stride, (1, 1)) - self.assertEqual(block.conv2.stride, (2, 2)) - block = ViPNAS_Bottleneck(64, 64, stride=2, style='caffe') - self.assertEqual(block.conv1.stride, (2, 2)) - self.assertEqual(block.conv2.stride, (1, 1)) - - # ViPNAS_Bottleneck with stride 1 - block = ViPNAS_Bottleneck(64, 64, style='pytorch') - self.assertEqual(block.in_channels, 64) - self.assertEqual(block.mid_channels, 16) - self.assertEqual(block.out_channels, 64) - self.assertEqual(block.conv1.in_channels, 64) - self.assertEqual(block.conv1.out_channels, 16) - self.assertEqual(block.conv1.kernel_size, (1, 1)) - self.assertEqual(block.conv2.in_channels, 16) - self.assertEqual(block.conv2.out_channels, 16) - self.assertEqual(block.conv2.kernel_size, (3, 3)) - self.assertEqual(block.conv3.in_channels, 16) - self.assertEqual(block.conv3.out_channels, 64) - self.assertEqual(block.conv3.kernel_size, (1, 1)) - x = torch.randn(1, 64, 56, 56) - x_out = block(x) - self.assertEqual(x_out.shape, (1, 64, 56, 56)) - - # ViPNAS_Bottleneck with stride 1 and downsample - downsample = nn.Sequential( - nn.Conv2d(64, 128, kernel_size=1), nn.BatchNorm2d(128)) - block = ViPNAS_Bottleneck( - 64, 128, style='pytorch', downsample=downsample) - self.assertEqual(block.in_channels, 64) - self.assertEqual(block.mid_channels, 32) - self.assertEqual(block.out_channels, 128) - self.assertEqual(block.conv1.in_channels, 64) - self.assertEqual(block.conv1.out_channels, 32) - self.assertEqual(block.conv1.kernel_size, (1, 1)) - self.assertEqual(block.conv2.in_channels, 32) - self.assertEqual(block.conv2.out_channels, 32) - self.assertEqual(block.conv2.kernel_size, (3, 3)) - self.assertEqual(block.conv3.in_channels, 32) - self.assertEqual(block.conv3.out_channels, 128) - self.assertEqual(block.conv3.kernel_size, (1, 1)) - x = torch.randn(1, 64, 56, 56) - x_out = block(x) - self.assertEqual(x_out.shape, (1, 128, 56, 56)) - - # ViPNAS_Bottleneck with stride 2 and downsample - downsample = nn.Sequential( - nn.Conv2d(64, 128, kernel_size=1, stride=2), nn.BatchNorm2d(128)) - block = ViPNAS_Bottleneck( - 64, 128, stride=2, style='pytorch', downsample=downsample) - x = torch.randn(1, 64, 56, 56) - x_out = block(x) - self.assertEqual(x_out.shape, (1, 128, 28, 28)) - - # ViPNAS_Bottleneck with expansion 2 - block = ViPNAS_Bottleneck(64, 64, style='pytorch', expansion=2) - self.assertEqual(block.in_channels, 64) - self.assertEqual(block.mid_channels, 32) - self.assertEqual(block.out_channels, 64) - self.assertEqual(block.conv1.in_channels, 64) - self.assertEqual(block.conv1.out_channels, 32) - self.assertEqual(block.conv1.kernel_size, (1, 1)) - self.assertEqual(block.conv2.in_channels, 32) - self.assertEqual(block.conv2.out_channels, 32) - self.assertEqual(block.conv2.kernel_size, (3, 3)) - self.assertEqual(block.conv3.in_channels, 32) - self.assertEqual(block.conv3.out_channels, 64) - self.assertEqual(block.conv3.kernel_size, (1, 1)) - x = torch.randn(1, 64, 56, 56) - x_out = block(x) - self.assertEqual(x_out.shape, (1, 64, 56, 56)) - - # Test ViPNAS_Bottleneck with checkpointing - block = ViPNAS_Bottleneck(64, 64, with_cp=True) - block.train() - self.assertTrue(block.with_cp) - x = torch.randn(1, 64, 56, 56, requires_grad=True) - x_out = block(x) - self.assertEqual(x_out.shape, torch.Size([1, 64, 56, 56])) - - def test_vipnas_bottleneck_reslayer(self): - # 3 Bottleneck w/o downsample - layer = ViPNAS_ResLayer(ViPNAS_Bottleneck, 3, 32, 32) - self.assertEqual(len(layer), 3) - for i in range(3): - self.assertEqual(layer[i].in_channels, 32) - self.assertEqual(layer[i].out_channels, 32) - self.assertIsNone(layer[i].downsample) - x = torch.randn(1, 32, 56, 56) - x_out = layer(x) - self.assertEqual(x_out.shape, (1, 32, 56, 56)) - - # 3 ViPNAS_Bottleneck w/ stride 1 and downsample - layer = ViPNAS_ResLayer(ViPNAS_Bottleneck, 3, 32, 64) - self.assertEqual(len(layer), 3) - self.assertEqual(layer[0].in_channels, 32) - self.assertEqual(layer[0].out_channels, 64) - self.assertEqual(layer[0].stride, 1) - self.assertEqual(layer[0].conv1.out_channels, 64) - self.assertEqual( - layer[0].downsample is not None and len(layer[0].downsample), 2) - self.assertIsInstance(layer[0].downsample[0], nn.Conv2d) - self.assertEqual(layer[0].downsample[0].stride, (1, 1)) - for i in range(1, 3): - self.assertEqual(layer[i].in_channels, 64) - self.assertEqual(layer[i].out_channels, 64) - self.assertEqual(layer[i].conv1.out_channels, 64) - self.assertEqual(layer[i].stride, 1) - self.assertIsNone(layer[i].downsample) - x = torch.randn(1, 32, 56, 56) - x_out = layer(x) - self.assertEqual(x_out.shape, (1, 64, 56, 56)) - - # 3 ViPNAS_Bottleneck w/ stride 2 and downsample - layer = ViPNAS_ResLayer(ViPNAS_Bottleneck, 3, 32, 64, stride=2) - self.assertEqual(len(layer), 3) - self.assertEqual(layer[0].in_channels, 32) - self.assertEqual(layer[0].out_channels, 64) - self.assertEqual(layer[0].stride, 2) - self.assertEqual(layer[0].conv1.out_channels, 64) - self.assertEqual( - layer[0].downsample is not None and len(layer[0].downsample), 2) - self.assertIsInstance(layer[0].downsample[0], nn.Conv2d) - self.assertEqual(layer[0].downsample[0].stride, (2, 2)) - for i in range(1, 3): - self.assertEqual(layer[i].in_channels, 64) - self.assertEqual(layer[i].out_channels, 64) - self.assertEqual(layer[i].conv1.out_channels, 64) - self.assertEqual(layer[i].stride, 1) - self.assertIsNone(layer[i].downsample) - x = torch.randn(1, 32, 56, 56) - x_out = layer(x) - self.assertEqual(x_out.shape, (1, 64, 28, 28)) - - # 3 ViPNAS_Bottleneck w/ stride 2 and downsample with avg pool - layer = ViPNAS_ResLayer( - ViPNAS_Bottleneck, 3, 32, 64, stride=2, avg_down=True) - self.assertEqual(len(layer), 3) - self.assertEqual(layer[0].in_channels, 32) - self.assertEqual(layer[0].out_channels, 64) - self.assertEqual(layer[0].stride, 2) - self.assertEqual(layer[0].conv1.out_channels, 64) - self.assertEqual( - layer[0].downsample is not None and len(layer[0].downsample), 3) - self.assertIsInstance(layer[0].downsample[0], nn.AvgPool2d) - self.assertEqual(layer[0].downsample[0].stride, 2) - for i in range(1, 3): - self.assertEqual(layer[i].in_channels, 64) - self.assertEqual(layer[i].out_channels, 64) - self.assertEqual(layer[i].conv1.out_channels, 64) - self.assertEqual(layer[i].stride, 1) - self.assertIsNone(layer[i].downsample) - x = torch.randn(1, 32, 56, 56) - x_out = layer(x) - self.assertEqual(x_out.shape, (1, 64, 28, 28)) - - # 3 ViPNAS_Bottleneck with custom expansion - layer = ViPNAS_ResLayer(ViPNAS_Bottleneck, 3, 32, 32, expansion=2) - self.assertEqual(len(layer), 3) - for i in range(3): - self.assertEqual(layer[i].in_channels, 32) - self.assertEqual(layer[i].out_channels, 32) - self.assertEqual(layer[i].stride, 1) - self.assertEqual(layer[i].conv1.out_channels, 16) - self.assertIsNone(layer[i].downsample) - x = torch.randn(1, 32, 56, 56) - x_out = layer(x) - self.assertEqual(x_out.shape, (1, 32, 56, 56)) - - def test_resnet(self): - """Test ViPNAS_ResNet backbone.""" - with self.assertRaises(KeyError): - # ViPNAS_ResNet depth should be in [50] - ViPNAS_ResNet(20) - - with self.assertRaises(AssertionError): - # In ViPNAS_ResNet: 1 <= num_stages <= 4 - ViPNAS_ResNet(50, num_stages=0) - - with self.assertRaises(AssertionError): - # In ViPNAS_ResNet: 1 <= num_stages <= 4 - ViPNAS_ResNet(50, num_stages=5) - - with self.assertRaises(AssertionError): - # len(strides) == len(dilations) == num_stages - ViPNAS_ResNet(50, strides=(1, ), dilations=(1, 1), num_stages=3) - - with self.assertRaises(TypeError): - # init_weights must have no parameter - model = ViPNAS_ResNet(50) - model.init_weights(pretrained=0) - - with self.assertRaises(AssertionError): - # Style must be in ['pytorch', 'caffe'] - ViPNAS_ResNet(50, style='tensorflow') - - # Test ViPNAS_ResNet50 norm_eval=True - model = ViPNAS_ResNet(50, norm_eval=True) - model.init_weights() - model.train() - self.assertTrue(self.check_norm_state(model.modules(), False)) - - # Test ViPNAS_ResNet50 with first stage frozen - frozen_stages = 1 - model = ViPNAS_ResNet(50, frozen_stages=frozen_stages) - model.init_weights() - model.train() - self.assertFalse(model.norm1.training) - for layer in [model.conv1, model.norm1]: - for param in layer.parameters(): - self.assertFalse(param.requires_grad) - for i in range(1, frozen_stages + 1): - layer = getattr(model, f'layer{i}') - for mod in layer.modules(): - if isinstance(mod, _BatchNorm): - self.assertFalse(mod.training) - for param in layer.parameters(): - self.assertFalse(param.requires_grad) - - # Test ViPNAS_ResNet50 with BatchNorm forward - model = ViPNAS_ResNet(50, out_indices=(0, 1, 2, 3)) - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertEqual(len(feat), 4) - self.assertEqual(feat[0].shape, (1, 80, 56, 56)) - self.assertEqual(feat[1].shape, (1, 160, 28, 28)) - self.assertEqual(feat[2].shape, (1, 304, 14, 14)) - self.assertEqual(feat[3].shape, (1, 608, 7, 7)) - - # Test ViPNAS_ResNet50 with layers 1, 2, 3 out forward - model = ViPNAS_ResNet(50, out_indices=(0, 1, 2)) - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertEqual(len(feat), 3) - self.assertEqual(feat[0].shape, (1, 80, 56, 56)) - self.assertEqual(feat[1].shape, (1, 160, 28, 28)) - self.assertEqual(feat[2].shape, (1, 304, 14, 14)) - - # Test ViPNAS_ResNet50 with layers 3 (top feature maps) out forward - model = ViPNAS_ResNet(50, out_indices=(3, )) - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertIsInstance(feat, tuple) - self.assertEqual(feat[-1].shape, (1, 608, 7, 7)) - - # Test ViPNAS_ResNet50 with checkpoint forward - model = ViPNAS_ResNet(50, out_indices=(0, 1, 2, 3), with_cp=True) - for m in model.modules(): - if self.is_block(m): - self.assertTrue(m.with_cp) - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - self.assertEqual(len(feat), 4) - self.assertEqual(feat[0].shape, (1, 80, 56, 56)) - self.assertEqual(feat[1].shape, (1, 160, 28, 28)) - self.assertEqual(feat[2].shape, (1, 304, 14, 14)) - self.assertEqual(feat[3].shape, (1, 608, 7, 7)) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch +import torch.nn as nn +from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm + +from mmpose.models.backbones import ViPNAS_ResNet +from mmpose.models.backbones.vipnas_resnet import (ViPNAS_Bottleneck, + ViPNAS_ResLayer, + get_expansion) + + +class TestVipnasResnet(TestCase): + + @staticmethod + def is_block(modules): + """Check if is ViPNAS_ResNet building block.""" + if isinstance(modules, (ViPNAS_Bottleneck)): + return True + return False + + @staticmethod + def all_zeros(modules): + """Check if the weight(and bias) is all zero.""" + weight_zero = torch.equal(modules.weight.data, + torch.zeros_like(modules.weight.data)) + if hasattr(modules, 'bias'): + bias_zero = torch.equal(modules.bias.data, + torch.zeros_like(modules.bias.data)) + else: + bias_zero = True + + return weight_zero and bias_zero + + @staticmethod + def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + def test_get_expansion(self): + self.assertEqual(get_expansion(ViPNAS_Bottleneck, 2), 2) + self.assertEqual(get_expansion(ViPNAS_Bottleneck), 1) + + class MyResBlock(nn.Module): + + expansion = 8 + + self.assertEqual(get_expansion(MyResBlock), 8) + + # expansion must be an integer or None + with self.assertRaises(TypeError): + get_expansion(ViPNAS_Bottleneck, '0') + + # expansion is not specified and cannot be inferred + with self.assertRaises(TypeError): + + class SomeModule(nn.Module): + pass + + get_expansion(SomeModule) + + def test_vipnas_bottleneck(self): + # style must be in ['pytorch', 'caffe'] + with self.assertRaises(AssertionError): + ViPNAS_Bottleneck(64, 64, style='tensorflow') + + # expansion must be divisible by out_channels + with self.assertRaises(AssertionError): + ViPNAS_Bottleneck(64, 64, expansion=3) + + # Test ViPNAS_Bottleneck style + block = ViPNAS_Bottleneck(64, 64, stride=2, style='pytorch') + self.assertEqual(block.conv1.stride, (1, 1)) + self.assertEqual(block.conv2.stride, (2, 2)) + block = ViPNAS_Bottleneck(64, 64, stride=2, style='caffe') + self.assertEqual(block.conv1.stride, (2, 2)) + self.assertEqual(block.conv2.stride, (1, 1)) + + # ViPNAS_Bottleneck with stride 1 + block = ViPNAS_Bottleneck(64, 64, style='pytorch') + self.assertEqual(block.in_channels, 64) + self.assertEqual(block.mid_channels, 16) + self.assertEqual(block.out_channels, 64) + self.assertEqual(block.conv1.in_channels, 64) + self.assertEqual(block.conv1.out_channels, 16) + self.assertEqual(block.conv1.kernel_size, (1, 1)) + self.assertEqual(block.conv2.in_channels, 16) + self.assertEqual(block.conv2.out_channels, 16) + self.assertEqual(block.conv2.kernel_size, (3, 3)) + self.assertEqual(block.conv3.in_channels, 16) + self.assertEqual(block.conv3.out_channels, 64) + self.assertEqual(block.conv3.kernel_size, (1, 1)) + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + self.assertEqual(x_out.shape, (1, 64, 56, 56)) + + # ViPNAS_Bottleneck with stride 1 and downsample + downsample = nn.Sequential( + nn.Conv2d(64, 128, kernel_size=1), nn.BatchNorm2d(128)) + block = ViPNAS_Bottleneck( + 64, 128, style='pytorch', downsample=downsample) + self.assertEqual(block.in_channels, 64) + self.assertEqual(block.mid_channels, 32) + self.assertEqual(block.out_channels, 128) + self.assertEqual(block.conv1.in_channels, 64) + self.assertEqual(block.conv1.out_channels, 32) + self.assertEqual(block.conv1.kernel_size, (1, 1)) + self.assertEqual(block.conv2.in_channels, 32) + self.assertEqual(block.conv2.out_channels, 32) + self.assertEqual(block.conv2.kernel_size, (3, 3)) + self.assertEqual(block.conv3.in_channels, 32) + self.assertEqual(block.conv3.out_channels, 128) + self.assertEqual(block.conv3.kernel_size, (1, 1)) + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + self.assertEqual(x_out.shape, (1, 128, 56, 56)) + + # ViPNAS_Bottleneck with stride 2 and downsample + downsample = nn.Sequential( + nn.Conv2d(64, 128, kernel_size=1, stride=2), nn.BatchNorm2d(128)) + block = ViPNAS_Bottleneck( + 64, 128, stride=2, style='pytorch', downsample=downsample) + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + self.assertEqual(x_out.shape, (1, 128, 28, 28)) + + # ViPNAS_Bottleneck with expansion 2 + block = ViPNAS_Bottleneck(64, 64, style='pytorch', expansion=2) + self.assertEqual(block.in_channels, 64) + self.assertEqual(block.mid_channels, 32) + self.assertEqual(block.out_channels, 64) + self.assertEqual(block.conv1.in_channels, 64) + self.assertEqual(block.conv1.out_channels, 32) + self.assertEqual(block.conv1.kernel_size, (1, 1)) + self.assertEqual(block.conv2.in_channels, 32) + self.assertEqual(block.conv2.out_channels, 32) + self.assertEqual(block.conv2.kernel_size, (3, 3)) + self.assertEqual(block.conv3.in_channels, 32) + self.assertEqual(block.conv3.out_channels, 64) + self.assertEqual(block.conv3.kernel_size, (1, 1)) + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + self.assertEqual(x_out.shape, (1, 64, 56, 56)) + + # Test ViPNAS_Bottleneck with checkpointing + block = ViPNAS_Bottleneck(64, 64, with_cp=True) + block.train() + self.assertTrue(block.with_cp) + x = torch.randn(1, 64, 56, 56, requires_grad=True) + x_out = block(x) + self.assertEqual(x_out.shape, torch.Size([1, 64, 56, 56])) + + def test_vipnas_bottleneck_reslayer(self): + # 3 Bottleneck w/o downsample + layer = ViPNAS_ResLayer(ViPNAS_Bottleneck, 3, 32, 32) + self.assertEqual(len(layer), 3) + for i in range(3): + self.assertEqual(layer[i].in_channels, 32) + self.assertEqual(layer[i].out_channels, 32) + self.assertIsNone(layer[i].downsample) + x = torch.randn(1, 32, 56, 56) + x_out = layer(x) + self.assertEqual(x_out.shape, (1, 32, 56, 56)) + + # 3 ViPNAS_Bottleneck w/ stride 1 and downsample + layer = ViPNAS_ResLayer(ViPNAS_Bottleneck, 3, 32, 64) + self.assertEqual(len(layer), 3) + self.assertEqual(layer[0].in_channels, 32) + self.assertEqual(layer[0].out_channels, 64) + self.assertEqual(layer[0].stride, 1) + self.assertEqual(layer[0].conv1.out_channels, 64) + self.assertEqual( + layer[0].downsample is not None and len(layer[0].downsample), 2) + self.assertIsInstance(layer[0].downsample[0], nn.Conv2d) + self.assertEqual(layer[0].downsample[0].stride, (1, 1)) + for i in range(1, 3): + self.assertEqual(layer[i].in_channels, 64) + self.assertEqual(layer[i].out_channels, 64) + self.assertEqual(layer[i].conv1.out_channels, 64) + self.assertEqual(layer[i].stride, 1) + self.assertIsNone(layer[i].downsample) + x = torch.randn(1, 32, 56, 56) + x_out = layer(x) + self.assertEqual(x_out.shape, (1, 64, 56, 56)) + + # 3 ViPNAS_Bottleneck w/ stride 2 and downsample + layer = ViPNAS_ResLayer(ViPNAS_Bottleneck, 3, 32, 64, stride=2) + self.assertEqual(len(layer), 3) + self.assertEqual(layer[0].in_channels, 32) + self.assertEqual(layer[0].out_channels, 64) + self.assertEqual(layer[0].stride, 2) + self.assertEqual(layer[0].conv1.out_channels, 64) + self.assertEqual( + layer[0].downsample is not None and len(layer[0].downsample), 2) + self.assertIsInstance(layer[0].downsample[0], nn.Conv2d) + self.assertEqual(layer[0].downsample[0].stride, (2, 2)) + for i in range(1, 3): + self.assertEqual(layer[i].in_channels, 64) + self.assertEqual(layer[i].out_channels, 64) + self.assertEqual(layer[i].conv1.out_channels, 64) + self.assertEqual(layer[i].stride, 1) + self.assertIsNone(layer[i].downsample) + x = torch.randn(1, 32, 56, 56) + x_out = layer(x) + self.assertEqual(x_out.shape, (1, 64, 28, 28)) + + # 3 ViPNAS_Bottleneck w/ stride 2 and downsample with avg pool + layer = ViPNAS_ResLayer( + ViPNAS_Bottleneck, 3, 32, 64, stride=2, avg_down=True) + self.assertEqual(len(layer), 3) + self.assertEqual(layer[0].in_channels, 32) + self.assertEqual(layer[0].out_channels, 64) + self.assertEqual(layer[0].stride, 2) + self.assertEqual(layer[0].conv1.out_channels, 64) + self.assertEqual( + layer[0].downsample is not None and len(layer[0].downsample), 3) + self.assertIsInstance(layer[0].downsample[0], nn.AvgPool2d) + self.assertEqual(layer[0].downsample[0].stride, 2) + for i in range(1, 3): + self.assertEqual(layer[i].in_channels, 64) + self.assertEqual(layer[i].out_channels, 64) + self.assertEqual(layer[i].conv1.out_channels, 64) + self.assertEqual(layer[i].stride, 1) + self.assertIsNone(layer[i].downsample) + x = torch.randn(1, 32, 56, 56) + x_out = layer(x) + self.assertEqual(x_out.shape, (1, 64, 28, 28)) + + # 3 ViPNAS_Bottleneck with custom expansion + layer = ViPNAS_ResLayer(ViPNAS_Bottleneck, 3, 32, 32, expansion=2) + self.assertEqual(len(layer), 3) + for i in range(3): + self.assertEqual(layer[i].in_channels, 32) + self.assertEqual(layer[i].out_channels, 32) + self.assertEqual(layer[i].stride, 1) + self.assertEqual(layer[i].conv1.out_channels, 16) + self.assertIsNone(layer[i].downsample) + x = torch.randn(1, 32, 56, 56) + x_out = layer(x) + self.assertEqual(x_out.shape, (1, 32, 56, 56)) + + def test_resnet(self): + """Test ViPNAS_ResNet backbone.""" + with self.assertRaises(KeyError): + # ViPNAS_ResNet depth should be in [50] + ViPNAS_ResNet(20) + + with self.assertRaises(AssertionError): + # In ViPNAS_ResNet: 1 <= num_stages <= 4 + ViPNAS_ResNet(50, num_stages=0) + + with self.assertRaises(AssertionError): + # In ViPNAS_ResNet: 1 <= num_stages <= 4 + ViPNAS_ResNet(50, num_stages=5) + + with self.assertRaises(AssertionError): + # len(strides) == len(dilations) == num_stages + ViPNAS_ResNet(50, strides=(1, ), dilations=(1, 1), num_stages=3) + + with self.assertRaises(TypeError): + # init_weights must have no parameter + model = ViPNAS_ResNet(50) + model.init_weights(pretrained=0) + + with self.assertRaises(AssertionError): + # Style must be in ['pytorch', 'caffe'] + ViPNAS_ResNet(50, style='tensorflow') + + # Test ViPNAS_ResNet50 norm_eval=True + model = ViPNAS_ResNet(50, norm_eval=True) + model.init_weights() + model.train() + self.assertTrue(self.check_norm_state(model.modules(), False)) + + # Test ViPNAS_ResNet50 with first stage frozen + frozen_stages = 1 + model = ViPNAS_ResNet(50, frozen_stages=frozen_stages) + model.init_weights() + model.train() + self.assertFalse(model.norm1.training) + for layer in [model.conv1, model.norm1]: + for param in layer.parameters(): + self.assertFalse(param.requires_grad) + for i in range(1, frozen_stages + 1): + layer = getattr(model, f'layer{i}') + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + self.assertFalse(mod.training) + for param in layer.parameters(): + self.assertFalse(param.requires_grad) + + # Test ViPNAS_ResNet50 with BatchNorm forward + model = ViPNAS_ResNet(50, out_indices=(0, 1, 2, 3)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertEqual(len(feat), 4) + self.assertEqual(feat[0].shape, (1, 80, 56, 56)) + self.assertEqual(feat[1].shape, (1, 160, 28, 28)) + self.assertEqual(feat[2].shape, (1, 304, 14, 14)) + self.assertEqual(feat[3].shape, (1, 608, 7, 7)) + + # Test ViPNAS_ResNet50 with layers 1, 2, 3 out forward + model = ViPNAS_ResNet(50, out_indices=(0, 1, 2)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertEqual(len(feat), 3) + self.assertEqual(feat[0].shape, (1, 80, 56, 56)) + self.assertEqual(feat[1].shape, (1, 160, 28, 28)) + self.assertEqual(feat[2].shape, (1, 304, 14, 14)) + + # Test ViPNAS_ResNet50 with layers 3 (top feature maps) out forward + model = ViPNAS_ResNet(50, out_indices=(3, )) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertIsInstance(feat, tuple) + self.assertEqual(feat[-1].shape, (1, 608, 7, 7)) + + # Test ViPNAS_ResNet50 with checkpoint forward + model = ViPNAS_ResNet(50, out_indices=(0, 1, 2, 3), with_cp=True) + for m in model.modules(): + if self.is_block(m): + self.assertTrue(m.with_cp) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertEqual(len(feat), 4) + self.assertEqual(feat[0].shape, (1, 80, 56, 56)) + self.assertEqual(feat[1].shape, (1, 160, 28, 28)) + self.assertEqual(feat[2].shape, (1, 304, 14, 14)) + self.assertEqual(feat[3].shape, (1, 608, 7, 7)) diff --git a/tests/test_models/test_heads/test_heatmap_heads/test_ae_head.py b/tests/test_models/test_heads/test_heatmap_heads/test_ae_head.py index 8172e9a2f7..42d1b90a22 100644 --- a/tests/test_models/test_heads/test_heatmap_heads/test_ae_head.py +++ b/tests/test_models/test_heads/test_heatmap_heads/test_ae_head.py @@ -1,148 +1,148 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import unittest -from itertools import product -from unittest import TestCase - -import numpy as np -import torch -from mmengine.structures import InstanceData, PixelData -from mmengine.utils import is_tuple_of - -from mmpose.codecs import AssociativeEmbedding # noqa -from mmpose.models.heads import AssociativeEmbeddingHead -from mmpose.registry import KEYPOINT_CODECS -from mmpose.testing._utils import get_packed_inputs - - -class TestAssociativeEmbeddingHead(TestCase): - - def _get_tags(self, heatmaps, keypoint_indices, tag_per_keypoint: bool): - - K, H, W = heatmaps.shape - N = keypoint_indices.shape[0] - - if tag_per_keypoint: - tags = np.zeros((K, H, W), dtype=np.float32) - else: - tags = np.zeros((1, H, W), dtype=np.float32) - - for n, k in product(range(N), range(K)): - y, x = np.unravel_index(keypoint_indices[n, k, 0], (H, W)) - if tag_per_keypoint: - tags[k, y, x] = n - else: - tags[0, y, x] = n - - return tags - - def test_forward(self): - - head = AssociativeEmbeddingHead( - in_channels=32, - num_keypoints=17, - tag_dim=1, - tag_per_keypoint=True, - deconv_out_channels=None) - - feats = [torch.rand(1, 32, 64, 64)] - output = head.forward(feats) # should be (heatmaps, tags) - self.assertTrue(is_tuple_of(output, torch.Tensor)) - self.assertEqual(output[0].shape, (1, 17, 64, 64)) - self.assertEqual(output[1].shape, (1, 17, 64, 64)) - - def test_predict(self): - - codec_cfg = dict( - type='AssociativeEmbedding', - input_size=(256, 256), - heatmap_size=(64, 64), - use_udp=False, - decode_keypoint_order=[ - 0, 1, 2, 3, 4, 5, 6, 11, 12, 7, 8, 9, 10, 13, 14, 15, 16 - ]) - - # get test data - codec = KEYPOINT_CODECS.build(codec_cfg) - batch_data_samples = get_packed_inputs( - 1, - input_size=(256, 256), - heatmap_size=(64, 64), - img_shape=(256, 256))['data_samples'] - - keypoints = batch_data_samples[0].gt_instances['keypoints'] - keypoints_visible = batch_data_samples[0].gt_instances[ - 'keypoints_visible'] - - encoded = codec.encode(keypoints, keypoints_visible) - heatmaps = encoded['heatmaps'] - keypoint_indices = encoded['keypoint_indices'] - - tags = self._get_tags( - heatmaps, keypoint_indices, tag_per_keypoint=True) - - dummy_feat = np.concatenate((heatmaps, tags), axis=0) - feats = [torch.from_numpy(dummy_feat)[None]] - - head = AssociativeEmbeddingHead( - in_channels=34, - num_keypoints=17, - tag_dim=1, - tag_per_keypoint=True, - deconv_out_channels=None, - final_layer=None, - decoder=codec_cfg) - - preds = head.predict(feats, batch_data_samples) - self.assertTrue(np.allclose(preds[0].keypoints, keypoints, atol=4.0)) - - def test_loss(self): - - codec_cfg = dict( - type='AssociativeEmbedding', - input_size=(256, 256), - heatmap_size=(64, 64), - use_udp=False, - decode_keypoint_order=[ - 0, 1, 2, 3, 4, 5, 6, 11, 12, 7, 8, 9, 10, 13, 14, 15, 16 - ]) - - # get test data - codec = KEYPOINT_CODECS.build(codec_cfg) - - batch_data_samples = get_packed_inputs( - 1, - input_size=(256, 256), - heatmap_size=(64, 64), - img_shape=(256, 256))['data_samples'] - - keypoints = batch_data_samples[0].gt_instances['keypoints'] - keypoints_visible = batch_data_samples[0].gt_instances[ - 'keypoints_visible'] - encoded = codec.encode(keypoints, keypoints_visible) - heatmaps = encoded['heatmaps'] - keypoint_indices = encoded['keypoint_indices'] - keypoint_weights = encoded['keypoint_weights'] - - heatmap_mask = np.ones((1, ) + heatmaps.shape[1:], dtype=np.float32) - batch_data_samples[0].gt_fields = PixelData( - heatmaps=heatmaps, heatmap_mask=heatmap_mask).to_tensor() - batch_data_samples[0].gt_instance_labels = InstanceData( - keypoint_indices=keypoint_indices, - keypoint_weights=keypoint_weights).to_tensor() - - feats = [torch.rand(1, 32, 64, 64)] - head = AssociativeEmbeddingHead( - in_channels=32, - num_keypoints=17, - tag_dim=1, - tag_per_keypoint=True, - deconv_out_channels=None) - - losses = head.loss(feats, batch_data_samples) - for name in ['loss_kpt', 'loss_pull', 'loss_push']: - self.assertIn(name, losses) - self.assertIsInstance(losses[name], torch.Tensor) - - -if __name__ == '__main__': - unittest.main() +# Copyright (c) OpenMMLab. All rights reserved. +import unittest +from itertools import product +from unittest import TestCase + +import numpy as np +import torch +from mmengine.structures import InstanceData, PixelData +from mmengine.utils import is_tuple_of + +from mmpose.codecs import AssociativeEmbedding # noqa +from mmpose.models.heads import AssociativeEmbeddingHead +from mmpose.registry import KEYPOINT_CODECS +from mmpose.testing._utils import get_packed_inputs + + +class TestAssociativeEmbeddingHead(TestCase): + + def _get_tags(self, heatmaps, keypoint_indices, tag_per_keypoint: bool): + + K, H, W = heatmaps.shape + N = keypoint_indices.shape[0] + + if tag_per_keypoint: + tags = np.zeros((K, H, W), dtype=np.float32) + else: + tags = np.zeros((1, H, W), dtype=np.float32) + + for n, k in product(range(N), range(K)): + y, x = np.unravel_index(keypoint_indices[n, k, 0], (H, W)) + if tag_per_keypoint: + tags[k, y, x] = n + else: + tags[0, y, x] = n + + return tags + + def test_forward(self): + + head = AssociativeEmbeddingHead( + in_channels=32, + num_keypoints=17, + tag_dim=1, + tag_per_keypoint=True, + deconv_out_channels=None) + + feats = [torch.rand(1, 32, 64, 64)] + output = head.forward(feats) # should be (heatmaps, tags) + self.assertTrue(is_tuple_of(output, torch.Tensor)) + self.assertEqual(output[0].shape, (1, 17, 64, 64)) + self.assertEqual(output[1].shape, (1, 17, 64, 64)) + + def test_predict(self): + + codec_cfg = dict( + type='AssociativeEmbedding', + input_size=(256, 256), + heatmap_size=(64, 64), + use_udp=False, + decode_keypoint_order=[ + 0, 1, 2, 3, 4, 5, 6, 11, 12, 7, 8, 9, 10, 13, 14, 15, 16 + ]) + + # get test data + codec = KEYPOINT_CODECS.build(codec_cfg) + batch_data_samples = get_packed_inputs( + 1, + input_size=(256, 256), + heatmap_size=(64, 64), + img_shape=(256, 256))['data_samples'] + + keypoints = batch_data_samples[0].gt_instances['keypoints'] + keypoints_visible = batch_data_samples[0].gt_instances[ + 'keypoints_visible'] + + encoded = codec.encode(keypoints, keypoints_visible) + heatmaps = encoded['heatmaps'] + keypoint_indices = encoded['keypoint_indices'] + + tags = self._get_tags( + heatmaps, keypoint_indices, tag_per_keypoint=True) + + dummy_feat = np.concatenate((heatmaps, tags), axis=0) + feats = [torch.from_numpy(dummy_feat)[None]] + + head = AssociativeEmbeddingHead( + in_channels=34, + num_keypoints=17, + tag_dim=1, + tag_per_keypoint=True, + deconv_out_channels=None, + final_layer=None, + decoder=codec_cfg) + + preds = head.predict(feats, batch_data_samples) + self.assertTrue(np.allclose(preds[0].keypoints, keypoints, atol=4.0)) + + def test_loss(self): + + codec_cfg = dict( + type='AssociativeEmbedding', + input_size=(256, 256), + heatmap_size=(64, 64), + use_udp=False, + decode_keypoint_order=[ + 0, 1, 2, 3, 4, 5, 6, 11, 12, 7, 8, 9, 10, 13, 14, 15, 16 + ]) + + # get test data + codec = KEYPOINT_CODECS.build(codec_cfg) + + batch_data_samples = get_packed_inputs( + 1, + input_size=(256, 256), + heatmap_size=(64, 64), + img_shape=(256, 256))['data_samples'] + + keypoints = batch_data_samples[0].gt_instances['keypoints'] + keypoints_visible = batch_data_samples[0].gt_instances[ + 'keypoints_visible'] + encoded = codec.encode(keypoints, keypoints_visible) + heatmaps = encoded['heatmaps'] + keypoint_indices = encoded['keypoint_indices'] + keypoint_weights = encoded['keypoint_weights'] + + heatmap_mask = np.ones((1, ) + heatmaps.shape[1:], dtype=np.float32) + batch_data_samples[0].gt_fields = PixelData( + heatmaps=heatmaps, heatmap_mask=heatmap_mask).to_tensor() + batch_data_samples[0].gt_instance_labels = InstanceData( + keypoint_indices=keypoint_indices, + keypoint_weights=keypoint_weights).to_tensor() + + feats = [torch.rand(1, 32, 64, 64)] + head = AssociativeEmbeddingHead( + in_channels=32, + num_keypoints=17, + tag_dim=1, + tag_per_keypoint=True, + deconv_out_channels=None) + + losses = head.loss(feats, batch_data_samples) + for name in ['loss_kpt', 'loss_pull', 'loss_push']: + self.assertIn(name, losses) + self.assertIsInstance(losses[name], torch.Tensor) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_models/test_heads/test_heatmap_heads/test_cid_head.py b/tests/test_models/test_heads/test_heatmap_heads/test_cid_head.py index 29fd9e57eb..0a75516001 100644 --- a/tests/test_models/test_heads/test_heatmap_heads/test_cid_head.py +++ b/tests/test_models/test_heads/test_heatmap_heads/test_cid_head.py @@ -1,128 +1,128 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import List, Tuple -from unittest import TestCase - -import numpy as np -import torch - -from mmpose.models.heads import CIDHead -from mmpose.testing import get_coco_sample, get_packed_inputs -from mmpose.utils.tensor_utils import to_tensor - - -class TestCIDHead(TestCase): - - def _get_feats( - self, - batch_size: int = 1, - feat_shapes: List[Tuple[int, int, int]] = [(32, 128, 128)], - ): - - feats = [ - torch.rand((batch_size, ) + shape, dtype=torch.float32) - for shape in feat_shapes - ] - - if len(feats) > 1: - feats = [[x] for x in feats] - - return feats - - def _get_data_samples(self): - data_samples = get_packed_inputs( - 1, - input_size=(512, 512), - heatmap_size=(128, 128), - img_shape=(512, 512))['data_samples'] - return data_samples - - def test_forward(self): - - head = CIDHead(in_channels=32, num_keypoints=17, gfd_channels=32) - - feats = [torch.rand(1, 32, 128, 128)] - with torch.no_grad(): - output = head.forward(feats) - self.assertIsInstance(output, torch.Tensor) - self.assertEqual(output.shape[1:], (17, 128, 128)) - - def test_predict(self): - - codec = dict( - type='DecoupledHeatmap', - input_size=(512, 512), - heatmap_size=(128, 128)) - - head = CIDHead( - in_channels=32, num_keypoints=17, gfd_channels=32, decoder=codec) - - feats = self._get_feats() - data_samples = self._get_data_samples() - with torch.no_grad(): - preds = head.predict(feats, data_samples) - self.assertEqual(len(preds), 1) - self.assertEqual(preds[0].keypoints.shape[1:], (17, 2)) - self.assertEqual(preds[0].keypoint_scores.shape[1:], (17, )) - - # tta - with torch.no_grad(): - feats_flip = self._get_feats(feat_shapes=[(32, 128, - 128), (32, 128, 128)]) - preds = head.predict(feats_flip, data_samples, - dict(flip_test=True)) - self.assertEqual(len(preds), 1) - self.assertEqual(preds[0].keypoints.shape[1:], (17, 2)) - self.assertEqual(preds[0].keypoint_scores.shape[1:], (17, )) - - # output heatmaps - with torch.no_grad(): - _, pred_fields = head.predict(feats, data_samples, - dict(output_heatmaps=True)) - self.assertEqual(len(pred_fields), 1) - self.assertEqual(pred_fields[0].heatmaps.shape[1:], (128, 128)) - self.assertEqual(pred_fields[0].heatmaps.shape[0] % 17, 0) - - def test_loss(self): - data = get_coco_sample(img_shape=(512, 512), num_instances=1) - data['bbox'] = np.tile(data['bbox'], 2).reshape(-1, 4, 2) - data['bbox'][:, 1:3, 0] = data['bbox'][:, 0:2, 0] - - codec_cfg = dict( - type='DecoupledHeatmap', - input_size=(512, 512), - heatmap_size=(128, 128)) - - head = CIDHead( - in_channels=32, - num_keypoints=17, - gfd_channels=32, - decoder=codec_cfg, - coupled_heatmap_loss=dict( - type='FocalHeatmapLoss', loss_weight=1.0), - decoupled_heatmap_loss=dict( - type='FocalHeatmapLoss', loss_weight=4.0), - contrastive_loss=dict(type='InfoNCELoss', loss_weight=1.0)) - - encoded = head.decoder.encode(data['keypoints'], - data['keypoints_visible'], data['bbox']) - feats = self._get_feats() - data_samples = self._get_data_samples() - for data_sample in data_samples: - data_sample.gt_fields.set_data({ - 'heatmaps': - to_tensor(encoded['heatmaps']), - 'instance_heatmaps': - to_tensor(encoded['instance_heatmaps']) - }) - data_sample.gt_instance_labels.set_data( - {'instance_coords': to_tensor(encoded['instance_coords'])}) - data_sample.gt_instance_labels.set_data( - {'keypoint_weights': to_tensor(encoded['keypoint_weights'])}) - - losses = head.loss(feats, data_samples) - self.assertIn('loss/heatmap_coupled', losses) - self.assertEqual(losses['loss/heatmap_coupled'].ndim, 0) - self.assertIn('loss/heatmap_decoupled', losses) - self.assertEqual(losses['loss/heatmap_decoupled'].ndim, 0) - self.assertIn('loss/contrastive', losses) - self.assertEqual(losses['loss/contrastive'].ndim, 0) +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Tuple +from unittest import TestCase + +import numpy as np +import torch + +from mmpose.models.heads import CIDHead +from mmpose.testing import get_coco_sample, get_packed_inputs +from mmpose.utils.tensor_utils import to_tensor + + +class TestCIDHead(TestCase): + + def _get_feats( + self, + batch_size: int = 1, + feat_shapes: List[Tuple[int, int, int]] = [(32, 128, 128)], + ): + + feats = [ + torch.rand((batch_size, ) + shape, dtype=torch.float32) + for shape in feat_shapes + ] + + if len(feats) > 1: + feats = [[x] for x in feats] + + return feats + + def _get_data_samples(self): + data_samples = get_packed_inputs( + 1, + input_size=(512, 512), + heatmap_size=(128, 128), + img_shape=(512, 512))['data_samples'] + return data_samples + + def test_forward(self): + + head = CIDHead(in_channels=32, num_keypoints=17, gfd_channels=32) + + feats = [torch.rand(1, 32, 128, 128)] + with torch.no_grad(): + output = head.forward(feats) + self.assertIsInstance(output, torch.Tensor) + self.assertEqual(output.shape[1:], (17, 128, 128)) + + def test_predict(self): + + codec = dict( + type='DecoupledHeatmap', + input_size=(512, 512), + heatmap_size=(128, 128)) + + head = CIDHead( + in_channels=32, num_keypoints=17, gfd_channels=32, decoder=codec) + + feats = self._get_feats() + data_samples = self._get_data_samples() + with torch.no_grad(): + preds = head.predict(feats, data_samples) + self.assertEqual(len(preds), 1) + self.assertEqual(preds[0].keypoints.shape[1:], (17, 2)) + self.assertEqual(preds[0].keypoint_scores.shape[1:], (17, )) + + # tta + with torch.no_grad(): + feats_flip = self._get_feats(feat_shapes=[(32, 128, + 128), (32, 128, 128)]) + preds = head.predict(feats_flip, data_samples, + dict(flip_test=True)) + self.assertEqual(len(preds), 1) + self.assertEqual(preds[0].keypoints.shape[1:], (17, 2)) + self.assertEqual(preds[0].keypoint_scores.shape[1:], (17, )) + + # output heatmaps + with torch.no_grad(): + _, pred_fields = head.predict(feats, data_samples, + dict(output_heatmaps=True)) + self.assertEqual(len(pred_fields), 1) + self.assertEqual(pred_fields[0].heatmaps.shape[1:], (128, 128)) + self.assertEqual(pred_fields[0].heatmaps.shape[0] % 17, 0) + + def test_loss(self): + data = get_coco_sample(img_shape=(512, 512), num_instances=1) + data['bbox'] = np.tile(data['bbox'], 2).reshape(-1, 4, 2) + data['bbox'][:, 1:3, 0] = data['bbox'][:, 0:2, 0] + + codec_cfg = dict( + type='DecoupledHeatmap', + input_size=(512, 512), + heatmap_size=(128, 128)) + + head = CIDHead( + in_channels=32, + num_keypoints=17, + gfd_channels=32, + decoder=codec_cfg, + coupled_heatmap_loss=dict( + type='FocalHeatmapLoss', loss_weight=1.0), + decoupled_heatmap_loss=dict( + type='FocalHeatmapLoss', loss_weight=4.0), + contrastive_loss=dict(type='InfoNCELoss', loss_weight=1.0)) + + encoded = head.decoder.encode(data['keypoints'], + data['keypoints_visible'], data['bbox']) + feats = self._get_feats() + data_samples = self._get_data_samples() + for data_sample in data_samples: + data_sample.gt_fields.set_data({ + 'heatmaps': + to_tensor(encoded['heatmaps']), + 'instance_heatmaps': + to_tensor(encoded['instance_heatmaps']) + }) + data_sample.gt_instance_labels.set_data( + {'instance_coords': to_tensor(encoded['instance_coords'])}) + data_sample.gt_instance_labels.set_data( + {'keypoint_weights': to_tensor(encoded['keypoint_weights'])}) + + losses = head.loss(feats, data_samples) + self.assertIn('loss/heatmap_coupled', losses) + self.assertEqual(losses['loss/heatmap_coupled'].ndim, 0) + self.assertIn('loss/heatmap_decoupled', losses) + self.assertEqual(losses['loss/heatmap_decoupled'].ndim, 0) + self.assertIn('loss/contrastive', losses) + self.assertEqual(losses['loss/contrastive'].ndim, 0) diff --git a/tests/test_models/test_heads/test_heatmap_heads/test_cpm_head.py b/tests/test_models/test_heads/test_heatmap_heads/test_cpm_head.py index 41b2de46d2..334b4845d1 100644 --- a/tests/test_models/test_heads/test_heatmap_heads/test_cpm_head.py +++ b/tests/test_models/test_heads/test_heatmap_heads/test_cpm_head.py @@ -1,243 +1,243 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import List, Tuple -from unittest import TestCase - -import torch -from mmengine.structures import InstanceData, PixelData -from torch import nn - -from mmpose.models.heads import CPMHead -from mmpose.testing import get_packed_inputs - - -class TestCPMHead(TestCase): - - def _get_feats(self, - batch_size: int = 2, - feat_shapes: List[Tuple[int, int, int]] = [(17, 32, 24)]): - - feats = [ - torch.rand((batch_size, ) + shape, dtype=torch.float32) - for shape in feat_shapes - ] - return feats - - def _get_data_samples(self, batch_size: int = 2): - batch_data_samples = get_packed_inputs( - batch_size=batch_size, - num_instances=1, - num_keypoints=17, - img_shape=(128, 128), - input_size=(192, 256), - heatmap_size=(24, 32), - with_heatmap=True, - with_reg_label=False)['data_samples'] - return batch_data_samples - - def test_init(self): - # w/o deconv - head = CPMHead( - num_stages=1, - in_channels=256, - out_channels=17, - deconv_out_channels=None) - self.assertTrue(isinstance(head.multi_deconv_layers, nn.ModuleList)) - self.assertTrue(isinstance(head.multi_deconv_layers[0], nn.Identity)) - - # w/ deconv - head = CPMHead( - num_stages=1, - in_channels=32, - out_channels=17, - deconv_out_channels=(32, 32), - deconv_kernel_sizes=(4, 4)) - self.assertTrue(isinstance(head.multi_deconv_layers, nn.ModuleList)) - self.assertTrue(isinstance(head.multi_deconv_layers[0], nn.Sequential)) - - # w/o final layer - head = CPMHead( - num_stages=6, in_channels=17, out_channels=17, final_layer=None) - self.assertTrue(isinstance(head.multi_final_layers, nn.ModuleList)) - self.assertTrue(isinstance(head.multi_final_layers[0], nn.Identity)) - - # w/ decoder - head = CPMHead( - num_stages=1, - in_channels=32, - out_channels=17, - decoder=dict( - type='MSRAHeatmap', - input_size=(192, 256), - heatmap_size=(48, 64), - sigma=2.)) - self.assertIsNotNone(head.decoder) - - # the same loss for different stages - head = CPMHead( - num_stages=6, - in_channels=17, - out_channels=17, - final_layer=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - ) - self.assertTrue(isinstance(head.loss_module, nn.Module)) - - # different loss for different stages - num_stages = 6 - head = CPMHead( - num_stages=num_stages, - in_channels=17, - out_channels=17, - final_layer=None, - loss=[dict(type='KeypointMSELoss', use_target_weight=True)] * 6, - ) - self.assertTrue(isinstance(head.loss_module, nn.ModuleList)) - self.assertTrue(len(head.loss_module), num_stages) - - def test_predict(self): - decoder_cfg = dict( - type='MSRAHeatmap', - input_size=(192, 256), - heatmap_size=(24, 32), - sigma=2.) - - # num_stages = 6, final_layer = None - head = CPMHead( - num_stages=6, - in_channels=17, - out_channels=17, - final_layer=None, - decoder=decoder_cfg) - - with self.assertRaisesRegex( - AssertionError, - 'length of feature maps did not match the `num_stages`'): - feats = self._get_feats(batch_size=2, feat_shapes=[(17, 32, 24)]) - batch_data_samples = self._get_data_samples(batch_size=2) - _ = head.predict(feats, batch_data_samples) - - feats = self._get_feats(batch_size=2, feat_shapes=[(17, 32, 24)] * 6) - batch_data_samples = self._get_data_samples(batch_size=2) - preds = head.predict(feats, batch_data_samples) - - self.assertTrue(len(preds), 2) - self.assertIsInstance(preds[0], InstanceData) - self.assertEqual(preds[0].keypoints.shape, - batch_data_samples[0].gt_instances.keypoints.shape) - - # num_stages = 1, final_layer = dict(kernel_size=1) - head = CPMHead( - num_stages=1, - in_channels=32, - out_channels=17, - final_layer=dict(kernel_size=1), - decoder=decoder_cfg) - feats = self._get_feats(batch_size=2, feat_shapes=[(32, 32, 24)]) - batch_data_samples = self._get_data_samples(batch_size=2) - preds, pred_heatmaps = head.predict( - feats, batch_data_samples, test_cfg=dict(output_heatmaps=True)) - - self.assertTrue(len(preds), 2) - self.assertIsInstance(preds[0], InstanceData) - self.assertEqual(preds[0].keypoints.shape, - batch_data_samples[0].gt_instances.keypoints.shape) - self.assertTrue(len(pred_heatmaps), 2) - self.assertIsInstance(pred_heatmaps[0], PixelData) - self.assertEqual(pred_heatmaps[0].heatmaps.shape, (17, 32, 24)) - - def test_tta(self): - # flip test: heatmap - decoder_cfg = dict( - type='MSRAHeatmap', - input_size=(192, 256), - heatmap_size=(24, 32), - sigma=2.) - - head = CPMHead( - num_stages=1, - in_channels=32, - out_channels=17, - final_layer=dict(kernel_size=1), - decoder=decoder_cfg) - - feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) - batch_data_samples = self._get_data_samples(batch_size=2) - preds = head.predict([feats, feats], - batch_data_samples, - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - - self.assertTrue(len(preds), 2) - self.assertIsInstance(preds[0], InstanceData) - self.assertEqual(preds[0].keypoints.shape, - batch_data_samples[0].gt_instances.keypoints.shape) - - def test_loss(self): - # num_stages = 1 - head = CPMHead( - num_stages=1, - in_channels=32, - out_channels=17, - final_layer=dict(kernel_size=1)) - - feats = self._get_feats(batch_size=2, feat_shapes=[(32, 32, 24)]) - batch_data_samples = self._get_data_samples(batch_size=2) - losses = head.loss(feats, batch_data_samples) - - self.assertIsInstance(losses['loss_kpt'], torch.Tensor) - self.assertEqual(losses['loss_kpt'].shape, torch.Size(())) - self.assertIsInstance(losses['acc_pose'], torch.Tensor) - - num_stages = 6 - head = CPMHead( - num_stages=num_stages, - in_channels=17, - out_channels=17, - final_layer=None, - loss=[dict(type='KeypointMSELoss', use_target_weight=True)] * - num_stages) - - with self.assertRaisesRegex( - AssertionError, - 'length of feature maps did not match the `num_stages`'): - feats = self._get_feats(batch_size=2, feat_shapes=[(17, 32, 24)]) - batch_data_samples = self._get_data_samples(batch_size=2) - _ = head.loss(feats, batch_data_samples) - - feats = self._get_feats( - batch_size=2, feat_shapes=[(17, 32, 24)] * num_stages) - batch_data_samples = self._get_data_samples(batch_size=2) - losses = head.loss(feats, batch_data_samples) - - self.assertIsInstance(losses['loss_kpt'], torch.Tensor) - self.assertEqual(losses['loss_kpt'].shape, torch.Size(())) - self.assertIsInstance(losses['acc_pose'], torch.Tensor) - - def test_errors(self): - # Invalid arguments - with self.assertRaisesRegex(ValueError, 'Got mismatched lengths'): - _ = CPMHead( - num_stages=1, - in_channels=17, - out_channels=17, - deconv_out_channels=(256, ), - deconv_kernel_sizes=(4, 4)) - - with self.assertRaisesRegex(ValueError, 'Unsupported kernel size'): - _ = CPMHead( - num_stages=1, - in_channels=17, - out_channels=17, - deconv_out_channels=(256, ), - deconv_kernel_sizes=(1, )) - - with self.assertRaisesRegex(ValueError, 'did not match `num_stages`'): - _ = CPMHead( - num_stages=6, - in_channels=17, - out_channels=17, - loss=[dict(type='KeypointMSELoss', use_target_weight=True)] * - 4) +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Tuple +from unittest import TestCase + +import torch +from mmengine.structures import InstanceData, PixelData +from torch import nn + +from mmpose.models.heads import CPMHead +from mmpose.testing import get_packed_inputs + + +class TestCPMHead(TestCase): + + def _get_feats(self, + batch_size: int = 2, + feat_shapes: List[Tuple[int, int, int]] = [(17, 32, 24)]): + + feats = [ + torch.rand((batch_size, ) + shape, dtype=torch.float32) + for shape in feat_shapes + ] + return feats + + def _get_data_samples(self, batch_size: int = 2): + batch_data_samples = get_packed_inputs( + batch_size=batch_size, + num_instances=1, + num_keypoints=17, + img_shape=(128, 128), + input_size=(192, 256), + heatmap_size=(24, 32), + with_heatmap=True, + with_reg_label=False)['data_samples'] + return batch_data_samples + + def test_init(self): + # w/o deconv + head = CPMHead( + num_stages=1, + in_channels=256, + out_channels=17, + deconv_out_channels=None) + self.assertTrue(isinstance(head.multi_deconv_layers, nn.ModuleList)) + self.assertTrue(isinstance(head.multi_deconv_layers[0], nn.Identity)) + + # w/ deconv + head = CPMHead( + num_stages=1, + in_channels=32, + out_channels=17, + deconv_out_channels=(32, 32), + deconv_kernel_sizes=(4, 4)) + self.assertTrue(isinstance(head.multi_deconv_layers, nn.ModuleList)) + self.assertTrue(isinstance(head.multi_deconv_layers[0], nn.Sequential)) + + # w/o final layer + head = CPMHead( + num_stages=6, in_channels=17, out_channels=17, final_layer=None) + self.assertTrue(isinstance(head.multi_final_layers, nn.ModuleList)) + self.assertTrue(isinstance(head.multi_final_layers[0], nn.Identity)) + + # w/ decoder + head = CPMHead( + num_stages=1, + in_channels=32, + out_channels=17, + decoder=dict( + type='MSRAHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + sigma=2.)) + self.assertIsNotNone(head.decoder) + + # the same loss for different stages + head = CPMHead( + num_stages=6, + in_channels=17, + out_channels=17, + final_layer=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + ) + self.assertTrue(isinstance(head.loss_module, nn.Module)) + + # different loss for different stages + num_stages = 6 + head = CPMHead( + num_stages=num_stages, + in_channels=17, + out_channels=17, + final_layer=None, + loss=[dict(type='KeypointMSELoss', use_target_weight=True)] * 6, + ) + self.assertTrue(isinstance(head.loss_module, nn.ModuleList)) + self.assertTrue(len(head.loss_module), num_stages) + + def test_predict(self): + decoder_cfg = dict( + type='MSRAHeatmap', + input_size=(192, 256), + heatmap_size=(24, 32), + sigma=2.) + + # num_stages = 6, final_layer = None + head = CPMHead( + num_stages=6, + in_channels=17, + out_channels=17, + final_layer=None, + decoder=decoder_cfg) + + with self.assertRaisesRegex( + AssertionError, + 'length of feature maps did not match the `num_stages`'): + feats = self._get_feats(batch_size=2, feat_shapes=[(17, 32, 24)]) + batch_data_samples = self._get_data_samples(batch_size=2) + _ = head.predict(feats, batch_data_samples) + + feats = self._get_feats(batch_size=2, feat_shapes=[(17, 32, 24)] * 6) + batch_data_samples = self._get_data_samples(batch_size=2) + preds = head.predict(feats, batch_data_samples) + + self.assertTrue(len(preds), 2) + self.assertIsInstance(preds[0], InstanceData) + self.assertEqual(preds[0].keypoints.shape, + batch_data_samples[0].gt_instances.keypoints.shape) + + # num_stages = 1, final_layer = dict(kernel_size=1) + head = CPMHead( + num_stages=1, + in_channels=32, + out_channels=17, + final_layer=dict(kernel_size=1), + decoder=decoder_cfg) + feats = self._get_feats(batch_size=2, feat_shapes=[(32, 32, 24)]) + batch_data_samples = self._get_data_samples(batch_size=2) + preds, pred_heatmaps = head.predict( + feats, batch_data_samples, test_cfg=dict(output_heatmaps=True)) + + self.assertTrue(len(preds), 2) + self.assertIsInstance(preds[0], InstanceData) + self.assertEqual(preds[0].keypoints.shape, + batch_data_samples[0].gt_instances.keypoints.shape) + self.assertTrue(len(pred_heatmaps), 2) + self.assertIsInstance(pred_heatmaps[0], PixelData) + self.assertEqual(pred_heatmaps[0].heatmaps.shape, (17, 32, 24)) + + def test_tta(self): + # flip test: heatmap + decoder_cfg = dict( + type='MSRAHeatmap', + input_size=(192, 256), + heatmap_size=(24, 32), + sigma=2.) + + head = CPMHead( + num_stages=1, + in_channels=32, + out_channels=17, + final_layer=dict(kernel_size=1), + decoder=decoder_cfg) + + feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) + batch_data_samples = self._get_data_samples(batch_size=2) + preds = head.predict([feats, feats], + batch_data_samples, + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + + self.assertTrue(len(preds), 2) + self.assertIsInstance(preds[0], InstanceData) + self.assertEqual(preds[0].keypoints.shape, + batch_data_samples[0].gt_instances.keypoints.shape) + + def test_loss(self): + # num_stages = 1 + head = CPMHead( + num_stages=1, + in_channels=32, + out_channels=17, + final_layer=dict(kernel_size=1)) + + feats = self._get_feats(batch_size=2, feat_shapes=[(32, 32, 24)]) + batch_data_samples = self._get_data_samples(batch_size=2) + losses = head.loss(feats, batch_data_samples) + + self.assertIsInstance(losses['loss_kpt'], torch.Tensor) + self.assertEqual(losses['loss_kpt'].shape, torch.Size(())) + self.assertIsInstance(losses['acc_pose'], torch.Tensor) + + num_stages = 6 + head = CPMHead( + num_stages=num_stages, + in_channels=17, + out_channels=17, + final_layer=None, + loss=[dict(type='KeypointMSELoss', use_target_weight=True)] * + num_stages) + + with self.assertRaisesRegex( + AssertionError, + 'length of feature maps did not match the `num_stages`'): + feats = self._get_feats(batch_size=2, feat_shapes=[(17, 32, 24)]) + batch_data_samples = self._get_data_samples(batch_size=2) + _ = head.loss(feats, batch_data_samples) + + feats = self._get_feats( + batch_size=2, feat_shapes=[(17, 32, 24)] * num_stages) + batch_data_samples = self._get_data_samples(batch_size=2) + losses = head.loss(feats, batch_data_samples) + + self.assertIsInstance(losses['loss_kpt'], torch.Tensor) + self.assertEqual(losses['loss_kpt'].shape, torch.Size(())) + self.assertIsInstance(losses['acc_pose'], torch.Tensor) + + def test_errors(self): + # Invalid arguments + with self.assertRaisesRegex(ValueError, 'Got mismatched lengths'): + _ = CPMHead( + num_stages=1, + in_channels=17, + out_channels=17, + deconv_out_channels=(256, ), + deconv_kernel_sizes=(4, 4)) + + with self.assertRaisesRegex(ValueError, 'Unsupported kernel size'): + _ = CPMHead( + num_stages=1, + in_channels=17, + out_channels=17, + deconv_out_channels=(256, ), + deconv_kernel_sizes=(1, )) + + with self.assertRaisesRegex(ValueError, 'did not match `num_stages`'): + _ = CPMHead( + num_stages=6, + in_channels=17, + out_channels=17, + loss=[dict(type='KeypointMSELoss', use_target_weight=True)] * + 4) diff --git a/tests/test_models/test_heads/test_heatmap_heads/test_heatmap_head.py b/tests/test_models/test_heads/test_heatmap_heads/test_heatmap_head.py index f56603bbde..77bfbc0e49 100644 --- a/tests/test_models/test_heads/test_heatmap_heads/test_heatmap_head.py +++ b/tests/test_models/test_heads/test_heatmap_heads/test_heatmap_head.py @@ -1,221 +1,221 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import unittest -from typing import List, Tuple -from unittest import TestCase - -import torch -from mmengine.structures import InstanceData, PixelData -from torch import nn - -from mmpose.models.heads import HeatmapHead -from mmpose.testing import get_packed_inputs - - -class TestHeatmapHead(TestCase): - - def _get_feats(self, - batch_size: int = 2, - feat_shapes: List[Tuple[int, int, int]] = [(32, 8, 6)]): - - feats = [ - torch.rand((batch_size, ) + shape, dtype=torch.float32) - for shape in feat_shapes - ] - return feats - - def test_init(self): - # w/o deconv - head = HeatmapHead( - in_channels=32, out_channels=17, deconv_out_channels=None) - self.assertTrue(isinstance(head.deconv_layers, nn.Identity)) - - # w/ deconv and w/o conv - head = HeatmapHead( - in_channels=32, - out_channels=17, - deconv_out_channels=(32, 32), - deconv_kernel_sizes=(4, 4)) - self.assertTrue(isinstance(head.deconv_layers, nn.Sequential)) - self.assertTrue(isinstance(head.conv_layers, nn.Identity)) - - # w/ both deconv and conv - head = HeatmapHead( - in_channels=32, - out_channels=17, - deconv_out_channels=(32, 32), - deconv_kernel_sizes=(4, 4), - conv_out_channels=(32, ), - conv_kernel_sizes=(1, )) - self.assertTrue(isinstance(head.deconv_layers, nn.Sequential)) - self.assertTrue(isinstance(head.conv_layers, nn.Sequential)) - - # w/o final layer - head = HeatmapHead(in_channels=32, out_channels=17, final_layer=None) - self.assertTrue(isinstance(head.final_layer, nn.Identity)) - - # w/ decoder - head = HeatmapHead( - in_channels=32, - out_channels=17, - decoder=dict( - type='MSRAHeatmap', - input_size=(192, 256), - heatmap_size=(48, 64), - sigma=2.)) - self.assertIsNotNone(head.decoder) - - def test_predict(self): - decoder_cfg = dict( - type='MSRAHeatmap', - input_size=(192, 256), - heatmap_size=(48, 64), - sigma=2.) - - head = HeatmapHead( - in_channels=32, - out_channels=17, - deconv_out_channels=(256, 256), - deconv_kernel_sizes=(4, 4), - conv_out_channels=(256, ), - conv_kernel_sizes=(1, ), - decoder=decoder_cfg) - feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) - batch_data_samples = get_packed_inputs(batch_size=2)['data_samples'] - preds = head.predict(feats, batch_data_samples) - - self.assertTrue(len(preds), 2) - self.assertIsInstance(preds[0], InstanceData) - self.assertEqual(preds[0].keypoints.shape, - batch_data_samples[0].gt_instances.keypoints.shape) - - # output heatmap - head = HeatmapHead( - in_channels=32, out_channels=17, decoder=decoder_cfg) - feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) - batch_data_samples = get_packed_inputs(batch_size=2)['data_samples'] - _, pred_heatmaps = head.predict( - feats, batch_data_samples, test_cfg=dict(output_heatmaps=True)) - - self.assertTrue(len(pred_heatmaps), 2) - self.assertIsInstance(pred_heatmaps[0], PixelData) - self.assertEqual(pred_heatmaps[0].heatmaps.shape, (17, 64, 48)) - - def test_tta(self): - # flip test: heatmap - decoder_cfg = dict( - type='MSRAHeatmap', - input_size=(192, 256), - heatmap_size=(48, 64), - sigma=2.) - - head = HeatmapHead( - in_channels=32, out_channels=17, decoder=decoder_cfg) - feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) - batch_data_samples = get_packed_inputs(batch_size=2)['data_samples'] - preds = head.predict([feats, feats], - batch_data_samples, - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - - self.assertTrue(len(preds), 2) - self.assertIsInstance(preds[0], InstanceData) - self.assertEqual(preds[0].keypoints.shape, - batch_data_samples[0].gt_instances.keypoints.shape) - - # flip test: udp_combine - decoder_cfg = dict( - type='UDPHeatmap', - input_size=(192, 256), - heatmap_size=(48, 64), - heatmap_type='combined') - head = HeatmapHead( - in_channels=32, out_channels=17 * 3, decoder=decoder_cfg) - feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) - batch_data_samples = get_packed_inputs(batch_size=2)['data_samples'] - preds = head.predict([feats, feats], - batch_data_samples, - test_cfg=dict( - flip_test=True, - flip_mode='udp_combined', - shift_heatmap=False, - )) - - self.assertTrue(len(preds), 2) - self.assertIsInstance(preds[0], InstanceData) - self.assertEqual(preds[0].keypoints.shape, - batch_data_samples[0].gt_instances.keypoints.shape) - - def test_loss(self): - head = HeatmapHead(in_channels=32, out_channels=17) - - feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) - batch_data_samples = get_packed_inputs(batch_size=2)['data_samples'] - losses = head.loss(feats, batch_data_samples) - self.assertIsInstance(losses['loss_kpt'], torch.Tensor) - self.assertEqual(losses['loss_kpt'].shape, torch.Size(())) - self.assertIsInstance(losses['acc_pose'], torch.Tensor) - - def test_errors(self): - # Invalid arguments - with self.assertRaisesRegex(ValueError, 'Got mismatched lengths'): - _ = HeatmapHead( - in_channels=32, - out_channels=17, - deconv_out_channels=(256, ), - deconv_kernel_sizes=(4, 4)) - - with self.assertRaisesRegex(ValueError, 'Got mismatched lengths'): - _ = HeatmapHead( - in_channels=32, out_channels=17, conv_out_channels=(256, )) - - with self.assertRaisesRegex(ValueError, 'Unsupported kernel size'): - _ = HeatmapHead( - in_channels=16, - out_channels=17, - deconv_out_channels=(256, ), - deconv_kernel_sizes=(1, )) - - def test_state_dict_compatible(self): - # Typical setting for HRNet - head = HeatmapHead( - in_channels=32, out_channels=17, deconv_out_channels=None) - - state_dict = { - 'final_layer.weight': torch.zeros((17, 32, 1, 1)), - 'final_layer.bias': torch.zeros((17)) - } - head.load_state_dict(state_dict) - - # Typical setting for Resnet - head = HeatmapHead(in_channels=2048, out_channels=17) - - state_dict = { - 'deconv_layers.0.weight': torch.zeros([2048, 256, 4, 4]), - 'deconv_layers.1.weight': torch.zeros([256]), - 'deconv_layers.1.bias': torch.zeros([256]), - 'deconv_layers.1.running_mean': torch.zeros([256]), - 'deconv_layers.1.running_var': torch.zeros([256]), - 'deconv_layers.1.num_batches_tracked': torch.zeros([]), - 'deconv_layers.3.weight': torch.zeros([256, 256, 4, 4]), - 'deconv_layers.4.weight': torch.zeros([256]), - 'deconv_layers.4.bias': torch.zeros([256]), - 'deconv_layers.4.running_mean': torch.zeros([256]), - 'deconv_layers.4.running_var': torch.zeros([256]), - 'deconv_layers.4.num_batches_tracked': torch.zeros([]), - 'deconv_layers.6.weight': torch.zeros([256, 256, 4, 4]), - 'deconv_layers.7.weight': torch.zeros([256]), - 'deconv_layers.7.bias': torch.zeros([256]), - 'deconv_layers.7.running_mean': torch.zeros([256]), - 'deconv_layers.7.running_var': torch.zeros([256]), - 'deconv_layers.7.num_batches_tracked': torch.zeros([]), - 'final_layer.weight': torch.zeros([17, 256, 1, 1]), - 'final_layer.bias': torch.zeros([17]) - } - head.load_state_dict(state_dict) - - -if __name__ == '__main__': - unittest.main() +# Copyright (c) OpenMMLab. All rights reserved. +import unittest +from typing import List, Tuple +from unittest import TestCase + +import torch +from mmengine.structures import InstanceData, PixelData +from torch import nn + +from mmpose.models.heads import HeatmapHead +from mmpose.testing import get_packed_inputs + + +class TestHeatmapHead(TestCase): + + def _get_feats(self, + batch_size: int = 2, + feat_shapes: List[Tuple[int, int, int]] = [(32, 8, 6)]): + + feats = [ + torch.rand((batch_size, ) + shape, dtype=torch.float32) + for shape in feat_shapes + ] + return feats + + def test_init(self): + # w/o deconv + head = HeatmapHead( + in_channels=32, out_channels=17, deconv_out_channels=None) + self.assertTrue(isinstance(head.deconv_layers, nn.Identity)) + + # w/ deconv and w/o conv + head = HeatmapHead( + in_channels=32, + out_channels=17, + deconv_out_channels=(32, 32), + deconv_kernel_sizes=(4, 4)) + self.assertTrue(isinstance(head.deconv_layers, nn.Sequential)) + self.assertTrue(isinstance(head.conv_layers, nn.Identity)) + + # w/ both deconv and conv + head = HeatmapHead( + in_channels=32, + out_channels=17, + deconv_out_channels=(32, 32), + deconv_kernel_sizes=(4, 4), + conv_out_channels=(32, ), + conv_kernel_sizes=(1, )) + self.assertTrue(isinstance(head.deconv_layers, nn.Sequential)) + self.assertTrue(isinstance(head.conv_layers, nn.Sequential)) + + # w/o final layer + head = HeatmapHead(in_channels=32, out_channels=17, final_layer=None) + self.assertTrue(isinstance(head.final_layer, nn.Identity)) + + # w/ decoder + head = HeatmapHead( + in_channels=32, + out_channels=17, + decoder=dict( + type='MSRAHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + sigma=2.)) + self.assertIsNotNone(head.decoder) + + def test_predict(self): + decoder_cfg = dict( + type='MSRAHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + sigma=2.) + + head = HeatmapHead( + in_channels=32, + out_channels=17, + deconv_out_channels=(256, 256), + deconv_kernel_sizes=(4, 4), + conv_out_channels=(256, ), + conv_kernel_sizes=(1, ), + decoder=decoder_cfg) + feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) + batch_data_samples = get_packed_inputs(batch_size=2)['data_samples'] + preds = head.predict(feats, batch_data_samples) + + self.assertTrue(len(preds), 2) + self.assertIsInstance(preds[0], InstanceData) + self.assertEqual(preds[0].keypoints.shape, + batch_data_samples[0].gt_instances.keypoints.shape) + + # output heatmap + head = HeatmapHead( + in_channels=32, out_channels=17, decoder=decoder_cfg) + feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) + batch_data_samples = get_packed_inputs(batch_size=2)['data_samples'] + _, pred_heatmaps = head.predict( + feats, batch_data_samples, test_cfg=dict(output_heatmaps=True)) + + self.assertTrue(len(pred_heatmaps), 2) + self.assertIsInstance(pred_heatmaps[0], PixelData) + self.assertEqual(pred_heatmaps[0].heatmaps.shape, (17, 64, 48)) + + def test_tta(self): + # flip test: heatmap + decoder_cfg = dict( + type='MSRAHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + sigma=2.) + + head = HeatmapHead( + in_channels=32, out_channels=17, decoder=decoder_cfg) + feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) + batch_data_samples = get_packed_inputs(batch_size=2)['data_samples'] + preds = head.predict([feats, feats], + batch_data_samples, + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + + self.assertTrue(len(preds), 2) + self.assertIsInstance(preds[0], InstanceData) + self.assertEqual(preds[0].keypoints.shape, + batch_data_samples[0].gt_instances.keypoints.shape) + + # flip test: udp_combine + decoder_cfg = dict( + type='UDPHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + heatmap_type='combined') + head = HeatmapHead( + in_channels=32, out_channels=17 * 3, decoder=decoder_cfg) + feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) + batch_data_samples = get_packed_inputs(batch_size=2)['data_samples'] + preds = head.predict([feats, feats], + batch_data_samples, + test_cfg=dict( + flip_test=True, + flip_mode='udp_combined', + shift_heatmap=False, + )) + + self.assertTrue(len(preds), 2) + self.assertIsInstance(preds[0], InstanceData) + self.assertEqual(preds[0].keypoints.shape, + batch_data_samples[0].gt_instances.keypoints.shape) + + def test_loss(self): + head = HeatmapHead(in_channels=32, out_channels=17) + + feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) + batch_data_samples = get_packed_inputs(batch_size=2)['data_samples'] + losses = head.loss(feats, batch_data_samples) + self.assertIsInstance(losses['loss_kpt'], torch.Tensor) + self.assertEqual(losses['loss_kpt'].shape, torch.Size(())) + self.assertIsInstance(losses['acc_pose'], torch.Tensor) + + def test_errors(self): + # Invalid arguments + with self.assertRaisesRegex(ValueError, 'Got mismatched lengths'): + _ = HeatmapHead( + in_channels=32, + out_channels=17, + deconv_out_channels=(256, ), + deconv_kernel_sizes=(4, 4)) + + with self.assertRaisesRegex(ValueError, 'Got mismatched lengths'): + _ = HeatmapHead( + in_channels=32, out_channels=17, conv_out_channels=(256, )) + + with self.assertRaisesRegex(ValueError, 'Unsupported kernel size'): + _ = HeatmapHead( + in_channels=16, + out_channels=17, + deconv_out_channels=(256, ), + deconv_kernel_sizes=(1, )) + + def test_state_dict_compatible(self): + # Typical setting for HRNet + head = HeatmapHead( + in_channels=32, out_channels=17, deconv_out_channels=None) + + state_dict = { + 'final_layer.weight': torch.zeros((17, 32, 1, 1)), + 'final_layer.bias': torch.zeros((17)) + } + head.load_state_dict(state_dict) + + # Typical setting for Resnet + head = HeatmapHead(in_channels=2048, out_channels=17) + + state_dict = { + 'deconv_layers.0.weight': torch.zeros([2048, 256, 4, 4]), + 'deconv_layers.1.weight': torch.zeros([256]), + 'deconv_layers.1.bias': torch.zeros([256]), + 'deconv_layers.1.running_mean': torch.zeros([256]), + 'deconv_layers.1.running_var': torch.zeros([256]), + 'deconv_layers.1.num_batches_tracked': torch.zeros([]), + 'deconv_layers.3.weight': torch.zeros([256, 256, 4, 4]), + 'deconv_layers.4.weight': torch.zeros([256]), + 'deconv_layers.4.bias': torch.zeros([256]), + 'deconv_layers.4.running_mean': torch.zeros([256]), + 'deconv_layers.4.running_var': torch.zeros([256]), + 'deconv_layers.4.num_batches_tracked': torch.zeros([]), + 'deconv_layers.6.weight': torch.zeros([256, 256, 4, 4]), + 'deconv_layers.7.weight': torch.zeros([256]), + 'deconv_layers.7.bias': torch.zeros([256]), + 'deconv_layers.7.running_mean': torch.zeros([256]), + 'deconv_layers.7.running_var': torch.zeros([256]), + 'deconv_layers.7.num_batches_tracked': torch.zeros([]), + 'final_layer.weight': torch.zeros([17, 256, 1, 1]), + 'final_layer.bias': torch.zeros([17]) + } + head.load_state_dict(state_dict) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_models/test_heads/test_heatmap_heads/test_mspn_head.py b/tests/test_models/test_heads/test_heatmap_heads/test_mspn_head.py index ce3d19b688..e2a4992e52 100644 --- a/tests/test_models/test_heads/test_heatmap_heads/test_mspn_head.py +++ b/tests/test_models/test_heads/test_heatmap_heads/test_mspn_head.py @@ -1,357 +1,357 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import List, Tuple -from unittest import TestCase - -import torch -from mmengine.structures import InstanceData, PixelData -from torch import Tensor, nn - -from mmpose.models.heads import MSPNHead -from mmpose.testing import get_packed_inputs - - -class TestMSPNHead(TestCase): - - def _get_feats( - self, - num_stages: int = 1, - num_units: int = 4, - batch_size: int = 2, - feat_shapes: List[Tuple[int, int, int]] = [(17, 64, 48)] - ) -> List[List[Tensor]]: - feats_stages = [] - for i in range(num_stages): - feats_units = [] - for j in range(num_units): - feats_units.append( - torch.rand( - (batch_size, ) + feat_shapes[j], dtype=torch.float32)) - feats_stages.append(feats_units) - - return feats_stages - - def _get_data_samples(self, - batch_size: int = 2, - heatmap_size=(48, 64), - num_levels=1): - batch_data_samples = get_packed_inputs( - batch_size=batch_size, - num_instances=1, - num_keypoints=17, - img_shape=(128, 128), - input_size=(192, 256), - heatmap_size=heatmap_size, - with_heatmap=True, - with_reg_label=False, - num_levels=num_levels)['data_samples'] - return batch_data_samples - - def test_init(self): - # w/ decoder - head = MSPNHead( - num_stages=1, - num_units=4, - out_shape=(64, 48), - unit_channels=256, - out_channels=17, - use_prm=False, - norm_cfg=dict(type='BN'), - level_indices=[0, 1, 2, 3], - decoder=dict( - type='MegviiHeatmap', - input_size=(192, 256), - heatmap_size=(48, 64), - kernel_size=11)) - self.assertIsNotNone(head.decoder) - - # the same loss for different stages - head = MSPNHead( - num_stages=1, - num_units=4, - out_shape=(64, 48), - unit_channels=256, - out_channels=17, - use_prm=False, - norm_cfg=dict(type='BN'), - loss=dict(type='KeypointMSELoss', use_target_weight=True), - level_indices=[0, 1, 2, 3], - ) - self.assertTrue(isinstance(head.loss_module, nn.Module)) - - # different loss for different stages and different units - head = MSPNHead( - num_stages=2, - num_units=4, - out_shape=(64, 48), - unit_channels=256, - out_channels=17, - use_prm=False, - norm_cfg=dict(type='BN'), - loss=[dict(type='KeypointMSELoss', use_target_weight=True)] * 8, - level_indices=[0, 1, 2, 3, 1, 2, 3, 4], - ) - self.assertTrue(isinstance(head.loss_module, nn.ModuleList)) - self.assertTrue(len(head.loss_module), 8) - - def test_loss(self): - # num_stages = 1, num_units = 4, unit_channels = 16 - # the same loss for all different stages and units - unit_channels = 16 - head = MSPNHead( - num_stages=1, - num_units=4, - out_shape=(64, 48), - unit_channels=unit_channels, - out_channels=17, - level_indices=[0, 1, 2, 3]) - - with self.assertRaisesRegex( - AssertionError, - 'length of feature maps did not match the `num_stages`'): - feats = self._get_feats( - num_stages=2, - num_units=4, - batch_size=2, - feat_shapes=[(unit_channels, 8, 6), (unit_channels, 16, 12), - (unit_channels, 32, 24), (unit_channels, 64, 48)]) - batch_data_samples = self._get_data_samples( - batch_size=2, heatmap_size=(48, 64), num_levels=8) - _ = head.loss(feats, batch_data_samples) - - with self.assertRaisesRegex( - AssertionError, - 'length of feature maps did not match the `num_units`'): - feats = self._get_feats( - num_stages=1, - num_units=2, - batch_size=2, - feat_shapes=[(unit_channels, 8, 6), (unit_channels, 16, 12)]) - batch_data_samples = self._get_data_samples( - batch_size=2, heatmap_size=(48, 64), num_levels=2) - _ = head.loss(feats, batch_data_samples) - - with self.assertRaisesRegex( - AssertionError, - 'number of feature map channels did not match'): - feats = self._get_feats( - num_stages=1, - num_units=4, - batch_size=2, - feat_shapes=[(unit_channels * 2, 8, 6), - (unit_channels * 2, 16, 12), - (unit_channels * 2, 32, 24), - (unit_channels * 2, 64, 48)]) - batch_data_samples = self._get_data_samples( - batch_size=2, heatmap_size=(48, 64), num_levels=4) - _ = head.loss(feats, batch_data_samples) - - feats = self._get_feats( - num_stages=1, - num_units=4, - batch_size=2, - feat_shapes=[(unit_channels, 8, 6), (unit_channels, 16, 12), - (unit_channels, 32, 24), (unit_channels, 64, 48)]) - batch_data_samples = self._get_data_samples( - batch_size=2, heatmap_size=(48, 64), num_levels=4) - losses = head.loss(feats, batch_data_samples) - - self.assertIsInstance(losses['loss_kpt'], torch.Tensor) - self.assertEqual(losses['loss_kpt'].shape, torch.Size(())) - self.assertIsInstance(losses['acc_pose'], torch.Tensor) - - # num_stages = 4, num_units = 4, unit_channels = 16 - # different losses for different stages and units - unit_channels = 16 - head = MSPNHead( - num_stages=4, - num_units=4, - out_shape=(64, 48), - unit_channels=unit_channels, - out_channels=17, - loss=([ - dict( - type='KeypointMSELoss', - use_target_weight=True, - loss_weight=0.25) - ] * 3 + [ - dict( - type='KeypointOHKMMSELoss', - use_target_weight=True, - loss_weight=0.1) - ]) * 4, - level_indices=[0, 1, 2, 3] * 3 + [1, 2, 3, 4]) - - feats = self._get_feats( - num_stages=4, - num_units=4, - batch_size=2, - feat_shapes=[(unit_channels, 8, 6), (unit_channels, 16, 12), - (unit_channels, 32, 24), (unit_channels, 64, 48)]) - batch_data_samples = self._get_data_samples( - batch_size=2, heatmap_size=(48, 64), num_levels=16) - losses = head.loss(feats, batch_data_samples) - - self.assertIsInstance(losses['loss_kpt'], torch.Tensor) - self.assertEqual(losses['loss_kpt'].shape, torch.Size(())) - self.assertIsInstance(losses['acc_pose'], torch.Tensor) - - def test_predict(self): - decoder_cfg = dict( - type='MegviiHeatmap', - input_size=(192, 256), - heatmap_size=(48, 64), - kernel_size=11) - - # num_stages = 1, num_units = 4, unit_channels = 16 - unit_channels = 16 - head = MSPNHead( - num_stages=1, - num_units=4, - out_shape=(64, 48), - unit_channels=unit_channels, - out_channels=17, - level_indices=[0, 1, 2, 3], - decoder=decoder_cfg) - - with self.assertRaisesRegex( - AssertionError, - 'length of feature maps did not match the `num_stages`'): - feats = self._get_feats( - num_stages=2, - num_units=4, - batch_size=2, - feat_shapes=[(unit_channels, 8, 6), (unit_channels, 16, 12), - (unit_channels, 32, 24), (unit_channels, 64, 48)]) - batch_data_samples = self._get_data_samples( - batch_size=2, heatmap_size=(48, 64), num_levels=8) - _ = head.predict(feats, batch_data_samples) - - with self.assertRaisesRegex( - AssertionError, - 'length of feature maps did not match the `num_units`'): - feats = self._get_feats( - num_stages=1, - num_units=2, - batch_size=2, - feat_shapes=[(unit_channels, 8, 6), (unit_channels, 16, 12)]) - batch_data_samples = self._get_data_samples( - batch_size=2, heatmap_size=(48, 64), num_levels=2) - _ = head.predict(feats, batch_data_samples) - - with self.assertRaisesRegex( - AssertionError, - 'number of feature map channels did not match'): - feats = self._get_feats( - num_stages=1, - num_units=4, - batch_size=2, - feat_shapes=[(unit_channels * 2, 8, 6), - (unit_channels * 2, 16, 12), - (unit_channels * 2, 32, 24), - (unit_channels * 2, 64, 48)]) - batch_data_samples = self._get_data_samples( - batch_size=2, heatmap_size=(48, 64), num_levels=4) - _ = head.predict(feats, batch_data_samples) - - feats = self._get_feats( - num_stages=1, - num_units=4, - batch_size=2, - feat_shapes=[(unit_channels, 8, 6), (unit_channels, 16, 12), - (unit_channels, 32, 24), (unit_channels, 64, 48)]) - batch_data_samples = self._get_data_samples( - batch_size=2, heatmap_size=(48, 64), num_levels=4) - preds = head.predict(feats, batch_data_samples) - - self.assertTrue(len(preds), 2) - self.assertIsInstance(preds[0], InstanceData) - self.assertEqual(preds[0].keypoints.shape, - batch_data_samples[0].gt_instances.keypoints.shape) - - # num_stages = 4, num_units = 4, unit_channels = 16 - unit_channels = 16 - head = MSPNHead( - num_stages=4, - num_units=4, - out_shape=(64, 48), - unit_channels=unit_channels, - out_channels=17, - level_indices=[0, 1, 2, 3] * 3 + [1, 2, 3, 4], - decoder=decoder_cfg) - feats = self._get_feats( - num_stages=4, - num_units=4, - batch_size=2, - feat_shapes=[(unit_channels, 8, 6), (unit_channels, 16, 12), - (unit_channels, 32, 24), (unit_channels, 64, 48)]) - batch_data_samples = self._get_data_samples( - batch_size=2, heatmap_size=(48, 64), num_levels=16) - preds, pred_heatmaps = head.predict( - feats, batch_data_samples, test_cfg=dict(output_heatmaps=True)) - - self.assertTrue(len(preds), 2) - self.assertIsInstance(preds[0], InstanceData) - self.assertEqual(preds[0].keypoints.shape, - batch_data_samples[0].gt_instances.keypoints.shape) - self.assertTrue(len(pred_heatmaps), 2) - self.assertIsInstance(pred_heatmaps[0], PixelData) - self.assertEqual(pred_heatmaps[0].heatmaps.shape, (17, 64, 48)) - - def test_tta(self): - # flip test: heatmap - decoder_cfg = dict( - type='MegviiHeatmap', - input_size=(192, 256), - heatmap_size=(48, 64), - kernel_size=11) - - unit_channels = 16 - head = MSPNHead( - num_stages=1, - num_units=4, - out_shape=(64, 48), - unit_channels=unit_channels, - out_channels=17, - level_indices=[0, 1, 2, 3], - decoder=decoder_cfg) - - feats = self._get_feats( - num_stages=1, - num_units=4, - batch_size=2, - feat_shapes=[(unit_channels, 8, 6), (unit_channels, 16, 12), - (unit_channels, 32, 24), (unit_channels, 64, 48)]) - batch_data_samples = self._get_data_samples(batch_size=2) - preds = head.predict([feats, feats], - batch_data_samples, - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - - self.assertTrue(len(preds), 2) - self.assertIsInstance(preds[0], InstanceData) - self.assertEqual(preds[0].keypoints.shape, - batch_data_samples[0].gt_instances.keypoints.shape) - - def test_errors(self): - # Invalid arguments - with self.assertRaisesRegex(ValueError, 'The length of level_indices'): - _ = MSPNHead( - num_stages=2, - num_units=4, - out_shape=(64, 48), - unit_channels=256, - out_channels=17, - level_indices=[0]) - with self.assertRaisesRegex(ValueError, 'The length of loss_module'): - _ = MSPNHead( - num_stages=2, - num_units=4, - out_shape=(64, 48), - unit_channels=256, - out_channels=17, - level_indices=[0, 1, 2, 3, 1, 2, 3, 4], - loss=[dict(type='KeypointMSELoss', use_target_weight=True)] * - 3) +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Tuple +from unittest import TestCase + +import torch +from mmengine.structures import InstanceData, PixelData +from torch import Tensor, nn + +from mmpose.models.heads import MSPNHead +from mmpose.testing import get_packed_inputs + + +class TestMSPNHead(TestCase): + + def _get_feats( + self, + num_stages: int = 1, + num_units: int = 4, + batch_size: int = 2, + feat_shapes: List[Tuple[int, int, int]] = [(17, 64, 48)] + ) -> List[List[Tensor]]: + feats_stages = [] + for i in range(num_stages): + feats_units = [] + for j in range(num_units): + feats_units.append( + torch.rand( + (batch_size, ) + feat_shapes[j], dtype=torch.float32)) + feats_stages.append(feats_units) + + return feats_stages + + def _get_data_samples(self, + batch_size: int = 2, + heatmap_size=(48, 64), + num_levels=1): + batch_data_samples = get_packed_inputs( + batch_size=batch_size, + num_instances=1, + num_keypoints=17, + img_shape=(128, 128), + input_size=(192, 256), + heatmap_size=heatmap_size, + with_heatmap=True, + with_reg_label=False, + num_levels=num_levels)['data_samples'] + return batch_data_samples + + def test_init(self): + # w/ decoder + head = MSPNHead( + num_stages=1, + num_units=4, + out_shape=(64, 48), + unit_channels=256, + out_channels=17, + use_prm=False, + norm_cfg=dict(type='BN'), + level_indices=[0, 1, 2, 3], + decoder=dict( + type='MegviiHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + kernel_size=11)) + self.assertIsNotNone(head.decoder) + + # the same loss for different stages + head = MSPNHead( + num_stages=1, + num_units=4, + out_shape=(64, 48), + unit_channels=256, + out_channels=17, + use_prm=False, + norm_cfg=dict(type='BN'), + loss=dict(type='KeypointMSELoss', use_target_weight=True), + level_indices=[0, 1, 2, 3], + ) + self.assertTrue(isinstance(head.loss_module, nn.Module)) + + # different loss for different stages and different units + head = MSPNHead( + num_stages=2, + num_units=4, + out_shape=(64, 48), + unit_channels=256, + out_channels=17, + use_prm=False, + norm_cfg=dict(type='BN'), + loss=[dict(type='KeypointMSELoss', use_target_weight=True)] * 8, + level_indices=[0, 1, 2, 3, 1, 2, 3, 4], + ) + self.assertTrue(isinstance(head.loss_module, nn.ModuleList)) + self.assertTrue(len(head.loss_module), 8) + + def test_loss(self): + # num_stages = 1, num_units = 4, unit_channels = 16 + # the same loss for all different stages and units + unit_channels = 16 + head = MSPNHead( + num_stages=1, + num_units=4, + out_shape=(64, 48), + unit_channels=unit_channels, + out_channels=17, + level_indices=[0, 1, 2, 3]) + + with self.assertRaisesRegex( + AssertionError, + 'length of feature maps did not match the `num_stages`'): + feats = self._get_feats( + num_stages=2, + num_units=4, + batch_size=2, + feat_shapes=[(unit_channels, 8, 6), (unit_channels, 16, 12), + (unit_channels, 32, 24), (unit_channels, 64, 48)]) + batch_data_samples = self._get_data_samples( + batch_size=2, heatmap_size=(48, 64), num_levels=8) + _ = head.loss(feats, batch_data_samples) + + with self.assertRaisesRegex( + AssertionError, + 'length of feature maps did not match the `num_units`'): + feats = self._get_feats( + num_stages=1, + num_units=2, + batch_size=2, + feat_shapes=[(unit_channels, 8, 6), (unit_channels, 16, 12)]) + batch_data_samples = self._get_data_samples( + batch_size=2, heatmap_size=(48, 64), num_levels=2) + _ = head.loss(feats, batch_data_samples) + + with self.assertRaisesRegex( + AssertionError, + 'number of feature map channels did not match'): + feats = self._get_feats( + num_stages=1, + num_units=4, + batch_size=2, + feat_shapes=[(unit_channels * 2, 8, 6), + (unit_channels * 2, 16, 12), + (unit_channels * 2, 32, 24), + (unit_channels * 2, 64, 48)]) + batch_data_samples = self._get_data_samples( + batch_size=2, heatmap_size=(48, 64), num_levels=4) + _ = head.loss(feats, batch_data_samples) + + feats = self._get_feats( + num_stages=1, + num_units=4, + batch_size=2, + feat_shapes=[(unit_channels, 8, 6), (unit_channels, 16, 12), + (unit_channels, 32, 24), (unit_channels, 64, 48)]) + batch_data_samples = self._get_data_samples( + batch_size=2, heatmap_size=(48, 64), num_levels=4) + losses = head.loss(feats, batch_data_samples) + + self.assertIsInstance(losses['loss_kpt'], torch.Tensor) + self.assertEqual(losses['loss_kpt'].shape, torch.Size(())) + self.assertIsInstance(losses['acc_pose'], torch.Tensor) + + # num_stages = 4, num_units = 4, unit_channels = 16 + # different losses for different stages and units + unit_channels = 16 + head = MSPNHead( + num_stages=4, + num_units=4, + out_shape=(64, 48), + unit_channels=unit_channels, + out_channels=17, + loss=([ + dict( + type='KeypointMSELoss', + use_target_weight=True, + loss_weight=0.25) + ] * 3 + [ + dict( + type='KeypointOHKMMSELoss', + use_target_weight=True, + loss_weight=0.1) + ]) * 4, + level_indices=[0, 1, 2, 3] * 3 + [1, 2, 3, 4]) + + feats = self._get_feats( + num_stages=4, + num_units=4, + batch_size=2, + feat_shapes=[(unit_channels, 8, 6), (unit_channels, 16, 12), + (unit_channels, 32, 24), (unit_channels, 64, 48)]) + batch_data_samples = self._get_data_samples( + batch_size=2, heatmap_size=(48, 64), num_levels=16) + losses = head.loss(feats, batch_data_samples) + + self.assertIsInstance(losses['loss_kpt'], torch.Tensor) + self.assertEqual(losses['loss_kpt'].shape, torch.Size(())) + self.assertIsInstance(losses['acc_pose'], torch.Tensor) + + def test_predict(self): + decoder_cfg = dict( + type='MegviiHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + kernel_size=11) + + # num_stages = 1, num_units = 4, unit_channels = 16 + unit_channels = 16 + head = MSPNHead( + num_stages=1, + num_units=4, + out_shape=(64, 48), + unit_channels=unit_channels, + out_channels=17, + level_indices=[0, 1, 2, 3], + decoder=decoder_cfg) + + with self.assertRaisesRegex( + AssertionError, + 'length of feature maps did not match the `num_stages`'): + feats = self._get_feats( + num_stages=2, + num_units=4, + batch_size=2, + feat_shapes=[(unit_channels, 8, 6), (unit_channels, 16, 12), + (unit_channels, 32, 24), (unit_channels, 64, 48)]) + batch_data_samples = self._get_data_samples( + batch_size=2, heatmap_size=(48, 64), num_levels=8) + _ = head.predict(feats, batch_data_samples) + + with self.assertRaisesRegex( + AssertionError, + 'length of feature maps did not match the `num_units`'): + feats = self._get_feats( + num_stages=1, + num_units=2, + batch_size=2, + feat_shapes=[(unit_channels, 8, 6), (unit_channels, 16, 12)]) + batch_data_samples = self._get_data_samples( + batch_size=2, heatmap_size=(48, 64), num_levels=2) + _ = head.predict(feats, batch_data_samples) + + with self.assertRaisesRegex( + AssertionError, + 'number of feature map channels did not match'): + feats = self._get_feats( + num_stages=1, + num_units=4, + batch_size=2, + feat_shapes=[(unit_channels * 2, 8, 6), + (unit_channels * 2, 16, 12), + (unit_channels * 2, 32, 24), + (unit_channels * 2, 64, 48)]) + batch_data_samples = self._get_data_samples( + batch_size=2, heatmap_size=(48, 64), num_levels=4) + _ = head.predict(feats, batch_data_samples) + + feats = self._get_feats( + num_stages=1, + num_units=4, + batch_size=2, + feat_shapes=[(unit_channels, 8, 6), (unit_channels, 16, 12), + (unit_channels, 32, 24), (unit_channels, 64, 48)]) + batch_data_samples = self._get_data_samples( + batch_size=2, heatmap_size=(48, 64), num_levels=4) + preds = head.predict(feats, batch_data_samples) + + self.assertTrue(len(preds), 2) + self.assertIsInstance(preds[0], InstanceData) + self.assertEqual(preds[0].keypoints.shape, + batch_data_samples[0].gt_instances.keypoints.shape) + + # num_stages = 4, num_units = 4, unit_channels = 16 + unit_channels = 16 + head = MSPNHead( + num_stages=4, + num_units=4, + out_shape=(64, 48), + unit_channels=unit_channels, + out_channels=17, + level_indices=[0, 1, 2, 3] * 3 + [1, 2, 3, 4], + decoder=decoder_cfg) + feats = self._get_feats( + num_stages=4, + num_units=4, + batch_size=2, + feat_shapes=[(unit_channels, 8, 6), (unit_channels, 16, 12), + (unit_channels, 32, 24), (unit_channels, 64, 48)]) + batch_data_samples = self._get_data_samples( + batch_size=2, heatmap_size=(48, 64), num_levels=16) + preds, pred_heatmaps = head.predict( + feats, batch_data_samples, test_cfg=dict(output_heatmaps=True)) + + self.assertTrue(len(preds), 2) + self.assertIsInstance(preds[0], InstanceData) + self.assertEqual(preds[0].keypoints.shape, + batch_data_samples[0].gt_instances.keypoints.shape) + self.assertTrue(len(pred_heatmaps), 2) + self.assertIsInstance(pred_heatmaps[0], PixelData) + self.assertEqual(pred_heatmaps[0].heatmaps.shape, (17, 64, 48)) + + def test_tta(self): + # flip test: heatmap + decoder_cfg = dict( + type='MegviiHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + kernel_size=11) + + unit_channels = 16 + head = MSPNHead( + num_stages=1, + num_units=4, + out_shape=(64, 48), + unit_channels=unit_channels, + out_channels=17, + level_indices=[0, 1, 2, 3], + decoder=decoder_cfg) + + feats = self._get_feats( + num_stages=1, + num_units=4, + batch_size=2, + feat_shapes=[(unit_channels, 8, 6), (unit_channels, 16, 12), + (unit_channels, 32, 24), (unit_channels, 64, 48)]) + batch_data_samples = self._get_data_samples(batch_size=2) + preds = head.predict([feats, feats], + batch_data_samples, + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + + self.assertTrue(len(preds), 2) + self.assertIsInstance(preds[0], InstanceData) + self.assertEqual(preds[0].keypoints.shape, + batch_data_samples[0].gt_instances.keypoints.shape) + + def test_errors(self): + # Invalid arguments + with self.assertRaisesRegex(ValueError, 'The length of level_indices'): + _ = MSPNHead( + num_stages=2, + num_units=4, + out_shape=(64, 48), + unit_channels=256, + out_channels=17, + level_indices=[0]) + with self.assertRaisesRegex(ValueError, 'The length of loss_module'): + _ = MSPNHead( + num_stages=2, + num_units=4, + out_shape=(64, 48), + unit_channels=256, + out_channels=17, + level_indices=[0, 1, 2, 3, 1, 2, 3, 4], + loss=[dict(type='KeypointMSELoss', use_target_weight=True)] * + 3) diff --git a/tests/test_models/test_heads/test_heatmap_heads/test_rtmcc_head.py b/tests/test_models/test_heads/test_heatmap_heads/test_rtmcc_head.py index 677a5372f7..0ba676b90d 100644 --- a/tests/test_models/test_heads/test_heatmap_heads/test_rtmcc_head.py +++ b/tests/test_models/test_heads/test_heatmap_heads/test_rtmcc_head.py @@ -1,684 +1,684 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import unittest -from typing import List, Tuple -from unittest import TestCase - -import torch -import torch.nn as nn -from mmengine.structures import InstanceData -from mmengine.utils import digit_version -from mmengine.utils.dl_utils import TORCH_VERSION - -from mmpose.models.heads import RTMCCHead -from mmpose.models.utils import RTMCCBlock -from mmpose.testing import get_packed_inputs - - -class TestRTMCCHead(TestCase): - - def _get_feats(self, - batch_size: int = 2, - feat_shapes: List[Tuple[int, int, int]] = [(32, 6, 8)]): - - feats = [ - torch.rand((batch_size, ) + shape, dtype=torch.float32) - for shape in feat_shapes - ] - return feats - - def test_init(self): - - if digit_version(TORCH_VERSION) < digit_version('1.7.0'): - return unittest.skip('RTMCCHead requires PyTorch >= 1.7') - - # original version - head = RTMCCHead( - in_channels=32, - out_channels=17, - input_size=(192, 256), - in_featuremap_size=(6, 8), - simcc_split_ratio=2.0, - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - decoder=dict( - type='SimCCLabel', - input_size=(192, 256), - smoothing_type='gaussian', - sigma=(4.9, 5.66), - simcc_split_ratio=2.0, - normalize=False)) - self.assertIsNotNone(head.decoder) - self.assertTrue(isinstance(head.final_layer, nn.Conv2d)) - self.assertTrue(isinstance(head.mlp, nn.Sequential)) - self.assertTrue(isinstance(head.gau, RTMCCBlock)) - self.assertTrue(isinstance(head.cls_x, nn.Linear)) - self.assertTrue(isinstance(head.cls_y, nn.Linear)) - - # w/ 1x1 conv - head = RTMCCHead( - in_channels=32, - out_channels=17, - input_size=(192, 256), - in_featuremap_size=(6, 8), - simcc_split_ratio=2.0, - final_layer_kernel_size=1, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - decoder=dict( - type='SimCCLabel', - input_size=(192, 256), - smoothing_type='gaussian', - sigma=(4.9, 5.66), - simcc_split_ratio=2.0, - normalize=False)) - self.assertIsNotNone(head.decoder) - self.assertTrue(isinstance(head.final_layer, nn.Conv2d)) - self.assertTrue(isinstance(head.mlp, nn.Sequential)) - self.assertTrue(isinstance(head.gau, RTMCCBlock)) - self.assertTrue(isinstance(head.cls_x, nn.Linear)) - self.assertTrue(isinstance(head.cls_y, nn.Linear)) - - # hidden_dims - head = RTMCCHead( - in_channels=32, - out_channels=17, - input_size=(192, 256), - in_featuremap_size=(6, 8), - simcc_split_ratio=2.0, - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=512, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - decoder=dict( - type='SimCCLabel', - input_size=(192, 256), - smoothing_type='gaussian', - sigma=(4.9, 5.66), - simcc_split_ratio=2.0, - normalize=False)) - self.assertIsNotNone(head.decoder) - self.assertTrue(isinstance(head.final_layer, nn.Conv2d)) - self.assertTrue(isinstance(head.mlp, nn.Sequential)) - self.assertTrue(isinstance(head.gau, RTMCCBlock)) - self.assertTrue(isinstance(head.cls_x, nn.Linear)) - self.assertTrue(isinstance(head.cls_y, nn.Linear)) - - # s = 256 - head = RTMCCHead( - in_channels=32, - out_channels=17, - input_size=(192, 256), - in_featuremap_size=(6, 8), - simcc_split_ratio=2.0, - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=256, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - decoder=dict( - type='SimCCLabel', - input_size=(192, 256), - smoothing_type='gaussian', - sigma=(4.9, 5.66), - simcc_split_ratio=2.0, - normalize=False)) - self.assertIsNotNone(head.decoder) - self.assertTrue(isinstance(head.final_layer, nn.Conv2d)) - self.assertTrue(isinstance(head.mlp, nn.Sequential)) - self.assertTrue(isinstance(head.gau, RTMCCBlock)) - self.assertTrue(isinstance(head.cls_x, nn.Linear)) - self.assertTrue(isinstance(head.cls_y, nn.Linear)) - - def test_predict(self): - - if digit_version(TORCH_VERSION) < digit_version('1.7.0'): - return unittest.skip('RTMCCHead requires PyTorch >= 1.7') - - decoder_cfg_list = [] - # original version - decoder_cfg = dict( - type='SimCCLabel', - input_size=(192, 256), - smoothing_type='gaussian', - sigma=(4.9, 5.66), - simcc_split_ratio=2.0, - normalize=False) - decoder_cfg_list.append(decoder_cfg) - - # single sigma - decoder_cfg = dict( - type='SimCCLabel', - input_size=(192, 256), - smoothing_type='gaussian', - sigma=6., - simcc_split_ratio=2.0, - normalize=False) - decoder_cfg_list.append(decoder_cfg) - - # normalize - decoder_cfg = dict( - type='SimCCLabel', - input_size=(192, 256), - smoothing_type='gaussian', - sigma=6., - simcc_split_ratio=2.0, - normalize=True) - decoder_cfg_list.append(decoder_cfg) - - # dark - decoder_cfg = dict( - type='SimCCLabel', - input_size=(192, 256), - smoothing_type='gaussian', - sigma=6., - simcc_split_ratio=2.0, - use_dark=True) - decoder_cfg_list.append(decoder_cfg) - - for decoder_cfg in decoder_cfg_list: - head = RTMCCHead( - in_channels=32, - out_channels=17, - input_size=(192, 256), - in_featuremap_size=(6, 8), - simcc_split_ratio=2.0, - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - decoder=decoder_cfg) - feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) - batch_data_samples = get_packed_inputs( - batch_size=2, - simcc_split_ratio=decoder_cfg['simcc_split_ratio'], - with_simcc_label=True)['data_samples'] - preds, _ = head.predict(feats, batch_data_samples) - - self.assertTrue(len(preds), 2) - self.assertIsInstance(preds[0], InstanceData) - self.assertEqual( - preds[0].keypoints.shape, - batch_data_samples[0].gt_instances.keypoints.shape) - - # 1x1 conv - head = RTMCCHead( - in_channels=32, - out_channels=17, - input_size=(192, 256), - in_featuremap_size=(6, 8), - simcc_split_ratio=2.0, - final_layer_kernel_size=1, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - decoder=decoder_cfg) - feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) - batch_data_samples = get_packed_inputs( - batch_size=2, - simcc_split_ratio=decoder_cfg['simcc_split_ratio'], - with_simcc_label=True)['data_samples'] - preds, _ = head.predict(feats, batch_data_samples) - - # hidden dims - head = RTMCCHead( - in_channels=32, - out_channels=17, - input_size=(192, 256), - in_featuremap_size=(6, 8), - simcc_split_ratio=2.0, - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=512, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - decoder=decoder_cfg) - feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) - batch_data_samples = get_packed_inputs( - batch_size=2, - simcc_split_ratio=decoder_cfg['simcc_split_ratio'], - with_simcc_label=True)['data_samples'] - preds, _ = head.predict(feats, batch_data_samples) - - self.assertTrue(len(preds), 2) - self.assertIsInstance(preds[0], InstanceData) - self.assertEqual( - preds[0].keypoints.shape, - batch_data_samples[0].gt_instances.keypoints.shape) - - # s - head = RTMCCHead( - in_channels=32, - out_channels=17, - input_size=(192, 256), - in_featuremap_size=(6, 8), - simcc_split_ratio=2.0, - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=64, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - decoder=decoder_cfg) - feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) - batch_data_samples = get_packed_inputs( - batch_size=2, - simcc_split_ratio=decoder_cfg['simcc_split_ratio'], - with_simcc_label=True)['data_samples'] - preds, _ = head.predict(feats, batch_data_samples) - - self.assertTrue(len(preds), 2) - self.assertIsInstance(preds[0], InstanceData) - self.assertEqual( - preds[0].keypoints.shape, - batch_data_samples[0].gt_instances.keypoints.shape) - - # expansion factor - head = RTMCCHead( - in_channels=32, - out_channels=17, - input_size=(192, 256), - in_featuremap_size=(6, 8), - simcc_split_ratio=2.0, - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=3, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - decoder=decoder_cfg) - feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) - batch_data_samples = get_packed_inputs( - batch_size=2, - simcc_split_ratio=decoder_cfg['simcc_split_ratio'], - with_simcc_label=True)['data_samples'] - preds, _ = head.predict(feats, batch_data_samples) - - self.assertTrue(len(preds), 2) - self.assertIsInstance(preds[0], InstanceData) - self.assertEqual( - preds[0].keypoints.shape, - batch_data_samples[0].gt_instances.keypoints.shape) - - # drop path - head = RTMCCHead( - in_channels=32, - out_channels=17, - input_size=(192, 256), - in_featuremap_size=(6, 8), - simcc_split_ratio=2.0, - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0.1, - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - decoder=decoder_cfg) - feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) - batch_data_samples = get_packed_inputs( - batch_size=2, - simcc_split_ratio=decoder_cfg['simcc_split_ratio'], - with_simcc_label=True)['data_samples'] - preds, _ = head.predict(feats, batch_data_samples) - - self.assertTrue(len(preds), 2) - self.assertIsInstance(preds[0], InstanceData) - self.assertEqual( - preds[0].keypoints.shape, - batch_data_samples[0].gt_instances.keypoints.shape) - - # act fn - head = RTMCCHead( - in_channels=32, - out_channels=17, - input_size=(192, 256), - in_featuremap_size=(6, 8), - simcc_split_ratio=2.0, - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='ReLU', - use_rel_bias=False, - pos_enc=False), - decoder=decoder_cfg) - feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) - batch_data_samples = get_packed_inputs( - batch_size=2, - simcc_split_ratio=decoder_cfg['simcc_split_ratio'], - with_simcc_label=True)['data_samples'] - preds, _ = head.predict(feats, batch_data_samples) - - self.assertTrue(len(preds), 2) - self.assertIsInstance(preds[0], InstanceData) - self.assertEqual( - preds[0].keypoints.shape, - batch_data_samples[0].gt_instances.keypoints.shape) - - # use_rel_bias - head = RTMCCHead( - in_channels=32, - out_channels=17, - input_size=(192, 256), - in_featuremap_size=(6, 8), - simcc_split_ratio=2.0, - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=True, - pos_enc=False), - decoder=decoder_cfg) - feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) - batch_data_samples = get_packed_inputs( - batch_size=2, - simcc_split_ratio=decoder_cfg['simcc_split_ratio'], - with_simcc_label=True)['data_samples'] - preds, _ = head.predict(feats, batch_data_samples) - - self.assertTrue(len(preds), 2) - self.assertIsInstance(preds[0], InstanceData) - self.assertEqual( - preds[0].keypoints.shape, - batch_data_samples[0].gt_instances.keypoints.shape) - - # pos_enc - head = RTMCCHead( - in_channels=32, - out_channels=17, - input_size=(192, 256), - in_featuremap_size=(6, 8), - simcc_split_ratio=2.0, - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=True), - decoder=decoder_cfg) - feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) - batch_data_samples = get_packed_inputs( - batch_size=2, - simcc_split_ratio=decoder_cfg['simcc_split_ratio'], - with_simcc_label=True)['data_samples'] - preds, _ = head.predict(feats, batch_data_samples) - - self.assertTrue(len(preds), 2) - self.assertIsInstance(preds[0], InstanceData) - self.assertEqual( - preds[0].keypoints.shape, - batch_data_samples[0].gt_instances.keypoints.shape) - - # output_heatmaps - head = RTMCCHead( - in_channels=32, - out_channels=17, - input_size=(192, 256), - in_featuremap_size=(6, 8), - simcc_split_ratio=2.0, - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - decoder=decoder_cfg, - ) - feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) - batch_data_samples = get_packed_inputs( - batch_size=2, - simcc_split_ratio=decoder_cfg['simcc_split_ratio'], - with_simcc_label=True)['data_samples'] - preds, pred_heatmaps = head.predict( - feats, batch_data_samples, test_cfg=dict(output_heatmaps=True)) - - self.assertTrue(len(preds), 2) - self.assertIsInstance(preds[0], InstanceData) - self.assertEqual(preds[0].keypoint_x_labels.shape, (1, 17, 384)) - self.assertEqual(preds[0].keypoint_y_labels.shape, (1, 17, 512)) - self.assertEqual( - preds[0].keypoints.shape, - batch_data_samples[0].gt_instances.keypoints.shape) - self.assertEqual(pred_heatmaps[0].heatmaps.shape, (17, 512, 384)) - - def test_tta(self): - if digit_version(TORCH_VERSION) < digit_version('1.7.0'): - return unittest.skip('RTMCCHead requires PyTorch >= 1.7') - - # flip test - decoder_cfg = dict( - type='SimCCLabel', - input_size=(192, 256), - smoothing_type='gaussian', - sigma=(4.9, 5.66), - simcc_split_ratio=2.0, - normalize=False) - - head = RTMCCHead( - in_channels=32, - out_channels=17, - input_size=(192, 256), - in_featuremap_size=(6, 8), - simcc_split_ratio=2.0, - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - decoder=decoder_cfg) - feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) - batch_data_samples = get_packed_inputs( - batch_size=2, simcc_split_ratio=2.0, - with_simcc_label=True)['data_samples'] - preds = head.predict([feats, feats], - batch_data_samples, - test_cfg=dict(flip_test=True)) - - self.assertTrue(len(preds), 2) - self.assertIsInstance(preds[0], InstanceData) - self.assertEqual(preds[0].keypoints.shape, - batch_data_samples[0].gt_instances.keypoints.shape) - - def test_loss(self): - if digit_version(TORCH_VERSION) < digit_version('1.7.0'): - return unittest.skip('RTMCCHead requires PyTorch >= 1.7') - - decoder_cfg_list = [] - decoder_cfg = dict( - type='SimCCLabel', - input_size=(192, 256), - smoothing_type='gaussian', - sigma=(4.9, 5.66), - simcc_split_ratio=2.0, - normalize=False) - decoder_cfg_list.append(decoder_cfg) - - decoder_cfg = dict( - type='SimCCLabel', - input_size=(192, 256), - smoothing_type='gaussian', - sigma=(4.9, 5.66), - simcc_split_ratio=2.0, - normalize=True) - decoder_cfg_list.append(decoder_cfg) - - # decoder - for decoder_cfg in decoder_cfg_list: - head = RTMCCHead( - in_channels=32, - out_channels=17, - input_size=(192, 256), - in_featuremap_size=(6, 8), - simcc_split_ratio=2.0, - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=1., - label_softmax=False, - ), - decoder=decoder_cfg) - - feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) - batch_data_samples = get_packed_inputs( - batch_size=2, simcc_split_ratio=2.0, - with_simcc_label=True)['data_samples'] - losses = head.loss(feats, batch_data_samples) - self.assertIsInstance(losses['loss_kpt'], torch.Tensor) - self.assertEqual(losses['loss_kpt'].shape, torch.Size(())) - self.assertIsInstance(losses['acc_pose'], torch.Tensor) - - # beta = 10 - head = RTMCCHead( - in_channels=32, - out_channels=17, - input_size=(192, 256), - in_featuremap_size=(6, 8), - simcc_split_ratio=2.0, - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=False, - ), - decoder=decoder_cfg) - - feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) - batch_data_samples = get_packed_inputs( - batch_size=2, simcc_split_ratio=2.0, - with_simcc_label=True)['data_samples'] - losses = head.loss(feats, batch_data_samples) - self.assertIsInstance(losses['loss_kpt'], torch.Tensor) - self.assertEqual(losses['loss_kpt'].shape, torch.Size(())) - self.assertIsInstance(losses['acc_pose'], torch.Tensor) - - # label softmax - head = RTMCCHead( - in_channels=32, - out_channels=17, - input_size=(192, 256), - in_featuremap_size=(6, 8), - simcc_split_ratio=2.0, - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False), - loss=dict( - type='KLDiscretLoss', - use_target_weight=True, - beta=10., - label_softmax=True, - ), - decoder=decoder_cfg) - - feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) - batch_data_samples = get_packed_inputs( - batch_size=2, simcc_split_ratio=2.0, - with_simcc_label=True)['data_samples'] - losses = head.loss(feats, batch_data_samples) - self.assertIsInstance(losses['loss_kpt'], torch.Tensor) - self.assertEqual(losses['loss_kpt'].shape, torch.Size(())) - self.assertIsInstance(losses['acc_pose'], torch.Tensor) - - -if __name__ == '__main__': - unittest.main() +# Copyright (c) OpenMMLab. All rights reserved. +import unittest +from typing import List, Tuple +from unittest import TestCase + +import torch +import torch.nn as nn +from mmengine.structures import InstanceData +from mmengine.utils import digit_version +from mmengine.utils.dl_utils import TORCH_VERSION + +from mmpose.models.heads import RTMCCHead +from mmpose.models.utils import RTMCCBlock +from mmpose.testing import get_packed_inputs + + +class TestRTMCCHead(TestCase): + + def _get_feats(self, + batch_size: int = 2, + feat_shapes: List[Tuple[int, int, int]] = [(32, 6, 8)]): + + feats = [ + torch.rand((batch_size, ) + shape, dtype=torch.float32) + for shape in feat_shapes + ] + return feats + + def test_init(self): + + if digit_version(TORCH_VERSION) < digit_version('1.7.0'): + return unittest.skip('RTMCCHead requires PyTorch >= 1.7') + + # original version + head = RTMCCHead( + in_channels=32, + out_channels=17, + input_size=(192, 256), + in_featuremap_size=(6, 8), + simcc_split_ratio=2.0, + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + decoder=dict( + type='SimCCLabel', + input_size=(192, 256), + smoothing_type='gaussian', + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False)) + self.assertIsNotNone(head.decoder) + self.assertTrue(isinstance(head.final_layer, nn.Conv2d)) + self.assertTrue(isinstance(head.mlp, nn.Sequential)) + self.assertTrue(isinstance(head.gau, RTMCCBlock)) + self.assertTrue(isinstance(head.cls_x, nn.Linear)) + self.assertTrue(isinstance(head.cls_y, nn.Linear)) + + # w/ 1x1 conv + head = RTMCCHead( + in_channels=32, + out_channels=17, + input_size=(192, 256), + in_featuremap_size=(6, 8), + simcc_split_ratio=2.0, + final_layer_kernel_size=1, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + decoder=dict( + type='SimCCLabel', + input_size=(192, 256), + smoothing_type='gaussian', + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False)) + self.assertIsNotNone(head.decoder) + self.assertTrue(isinstance(head.final_layer, nn.Conv2d)) + self.assertTrue(isinstance(head.mlp, nn.Sequential)) + self.assertTrue(isinstance(head.gau, RTMCCBlock)) + self.assertTrue(isinstance(head.cls_x, nn.Linear)) + self.assertTrue(isinstance(head.cls_y, nn.Linear)) + + # hidden_dims + head = RTMCCHead( + in_channels=32, + out_channels=17, + input_size=(192, 256), + in_featuremap_size=(6, 8), + simcc_split_ratio=2.0, + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=512, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + decoder=dict( + type='SimCCLabel', + input_size=(192, 256), + smoothing_type='gaussian', + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False)) + self.assertIsNotNone(head.decoder) + self.assertTrue(isinstance(head.final_layer, nn.Conv2d)) + self.assertTrue(isinstance(head.mlp, nn.Sequential)) + self.assertTrue(isinstance(head.gau, RTMCCBlock)) + self.assertTrue(isinstance(head.cls_x, nn.Linear)) + self.assertTrue(isinstance(head.cls_y, nn.Linear)) + + # s = 256 + head = RTMCCHead( + in_channels=32, + out_channels=17, + input_size=(192, 256), + in_featuremap_size=(6, 8), + simcc_split_ratio=2.0, + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=256, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + decoder=dict( + type='SimCCLabel', + input_size=(192, 256), + smoothing_type='gaussian', + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False)) + self.assertIsNotNone(head.decoder) + self.assertTrue(isinstance(head.final_layer, nn.Conv2d)) + self.assertTrue(isinstance(head.mlp, nn.Sequential)) + self.assertTrue(isinstance(head.gau, RTMCCBlock)) + self.assertTrue(isinstance(head.cls_x, nn.Linear)) + self.assertTrue(isinstance(head.cls_y, nn.Linear)) + + def test_predict(self): + + if digit_version(TORCH_VERSION) < digit_version('1.7.0'): + return unittest.skip('RTMCCHead requires PyTorch >= 1.7') + + decoder_cfg_list = [] + # original version + decoder_cfg = dict( + type='SimCCLabel', + input_size=(192, 256), + smoothing_type='gaussian', + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False) + decoder_cfg_list.append(decoder_cfg) + + # single sigma + decoder_cfg = dict( + type='SimCCLabel', + input_size=(192, 256), + smoothing_type='gaussian', + sigma=6., + simcc_split_ratio=2.0, + normalize=False) + decoder_cfg_list.append(decoder_cfg) + + # normalize + decoder_cfg = dict( + type='SimCCLabel', + input_size=(192, 256), + smoothing_type='gaussian', + sigma=6., + simcc_split_ratio=2.0, + normalize=True) + decoder_cfg_list.append(decoder_cfg) + + # dark + decoder_cfg = dict( + type='SimCCLabel', + input_size=(192, 256), + smoothing_type='gaussian', + sigma=6., + simcc_split_ratio=2.0, + use_dark=True) + decoder_cfg_list.append(decoder_cfg) + + for decoder_cfg in decoder_cfg_list: + head = RTMCCHead( + in_channels=32, + out_channels=17, + input_size=(192, 256), + in_featuremap_size=(6, 8), + simcc_split_ratio=2.0, + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + decoder=decoder_cfg) + feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) + batch_data_samples = get_packed_inputs( + batch_size=2, + simcc_split_ratio=decoder_cfg['simcc_split_ratio'], + with_simcc_label=True)['data_samples'] + preds, _ = head.predict(feats, batch_data_samples) + + self.assertTrue(len(preds), 2) + self.assertIsInstance(preds[0], InstanceData) + self.assertEqual( + preds[0].keypoints.shape, + batch_data_samples[0].gt_instances.keypoints.shape) + + # 1x1 conv + head = RTMCCHead( + in_channels=32, + out_channels=17, + input_size=(192, 256), + in_featuremap_size=(6, 8), + simcc_split_ratio=2.0, + final_layer_kernel_size=1, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + decoder=decoder_cfg) + feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) + batch_data_samples = get_packed_inputs( + batch_size=2, + simcc_split_ratio=decoder_cfg['simcc_split_ratio'], + with_simcc_label=True)['data_samples'] + preds, _ = head.predict(feats, batch_data_samples) + + # hidden dims + head = RTMCCHead( + in_channels=32, + out_channels=17, + input_size=(192, 256), + in_featuremap_size=(6, 8), + simcc_split_ratio=2.0, + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=512, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + decoder=decoder_cfg) + feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) + batch_data_samples = get_packed_inputs( + batch_size=2, + simcc_split_ratio=decoder_cfg['simcc_split_ratio'], + with_simcc_label=True)['data_samples'] + preds, _ = head.predict(feats, batch_data_samples) + + self.assertTrue(len(preds), 2) + self.assertIsInstance(preds[0], InstanceData) + self.assertEqual( + preds[0].keypoints.shape, + batch_data_samples[0].gt_instances.keypoints.shape) + + # s + head = RTMCCHead( + in_channels=32, + out_channels=17, + input_size=(192, 256), + in_featuremap_size=(6, 8), + simcc_split_ratio=2.0, + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=64, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + decoder=decoder_cfg) + feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) + batch_data_samples = get_packed_inputs( + batch_size=2, + simcc_split_ratio=decoder_cfg['simcc_split_ratio'], + with_simcc_label=True)['data_samples'] + preds, _ = head.predict(feats, batch_data_samples) + + self.assertTrue(len(preds), 2) + self.assertIsInstance(preds[0], InstanceData) + self.assertEqual( + preds[0].keypoints.shape, + batch_data_samples[0].gt_instances.keypoints.shape) + + # expansion factor + head = RTMCCHead( + in_channels=32, + out_channels=17, + input_size=(192, 256), + in_featuremap_size=(6, 8), + simcc_split_ratio=2.0, + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=3, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + decoder=decoder_cfg) + feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) + batch_data_samples = get_packed_inputs( + batch_size=2, + simcc_split_ratio=decoder_cfg['simcc_split_ratio'], + with_simcc_label=True)['data_samples'] + preds, _ = head.predict(feats, batch_data_samples) + + self.assertTrue(len(preds), 2) + self.assertIsInstance(preds[0], InstanceData) + self.assertEqual( + preds[0].keypoints.shape, + batch_data_samples[0].gt_instances.keypoints.shape) + + # drop path + head = RTMCCHead( + in_channels=32, + out_channels=17, + input_size=(192, 256), + in_featuremap_size=(6, 8), + simcc_split_ratio=2.0, + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0.1, + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + decoder=decoder_cfg) + feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) + batch_data_samples = get_packed_inputs( + batch_size=2, + simcc_split_ratio=decoder_cfg['simcc_split_ratio'], + with_simcc_label=True)['data_samples'] + preds, _ = head.predict(feats, batch_data_samples) + + self.assertTrue(len(preds), 2) + self.assertIsInstance(preds[0], InstanceData) + self.assertEqual( + preds[0].keypoints.shape, + batch_data_samples[0].gt_instances.keypoints.shape) + + # act fn + head = RTMCCHead( + in_channels=32, + out_channels=17, + input_size=(192, 256), + in_featuremap_size=(6, 8), + simcc_split_ratio=2.0, + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='ReLU', + use_rel_bias=False, + pos_enc=False), + decoder=decoder_cfg) + feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) + batch_data_samples = get_packed_inputs( + batch_size=2, + simcc_split_ratio=decoder_cfg['simcc_split_ratio'], + with_simcc_label=True)['data_samples'] + preds, _ = head.predict(feats, batch_data_samples) + + self.assertTrue(len(preds), 2) + self.assertIsInstance(preds[0], InstanceData) + self.assertEqual( + preds[0].keypoints.shape, + batch_data_samples[0].gt_instances.keypoints.shape) + + # use_rel_bias + head = RTMCCHead( + in_channels=32, + out_channels=17, + input_size=(192, 256), + in_featuremap_size=(6, 8), + simcc_split_ratio=2.0, + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=True, + pos_enc=False), + decoder=decoder_cfg) + feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) + batch_data_samples = get_packed_inputs( + batch_size=2, + simcc_split_ratio=decoder_cfg['simcc_split_ratio'], + with_simcc_label=True)['data_samples'] + preds, _ = head.predict(feats, batch_data_samples) + + self.assertTrue(len(preds), 2) + self.assertIsInstance(preds[0], InstanceData) + self.assertEqual( + preds[0].keypoints.shape, + batch_data_samples[0].gt_instances.keypoints.shape) + + # pos_enc + head = RTMCCHead( + in_channels=32, + out_channels=17, + input_size=(192, 256), + in_featuremap_size=(6, 8), + simcc_split_ratio=2.0, + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=True), + decoder=decoder_cfg) + feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) + batch_data_samples = get_packed_inputs( + batch_size=2, + simcc_split_ratio=decoder_cfg['simcc_split_ratio'], + with_simcc_label=True)['data_samples'] + preds, _ = head.predict(feats, batch_data_samples) + + self.assertTrue(len(preds), 2) + self.assertIsInstance(preds[0], InstanceData) + self.assertEqual( + preds[0].keypoints.shape, + batch_data_samples[0].gt_instances.keypoints.shape) + + # output_heatmaps + head = RTMCCHead( + in_channels=32, + out_channels=17, + input_size=(192, 256), + in_featuremap_size=(6, 8), + simcc_split_ratio=2.0, + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + decoder=decoder_cfg, + ) + feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) + batch_data_samples = get_packed_inputs( + batch_size=2, + simcc_split_ratio=decoder_cfg['simcc_split_ratio'], + with_simcc_label=True)['data_samples'] + preds, pred_heatmaps = head.predict( + feats, batch_data_samples, test_cfg=dict(output_heatmaps=True)) + + self.assertTrue(len(preds), 2) + self.assertIsInstance(preds[0], InstanceData) + self.assertEqual(preds[0].keypoint_x_labels.shape, (1, 17, 384)) + self.assertEqual(preds[0].keypoint_y_labels.shape, (1, 17, 512)) + self.assertEqual( + preds[0].keypoints.shape, + batch_data_samples[0].gt_instances.keypoints.shape) + self.assertEqual(pred_heatmaps[0].heatmaps.shape, (17, 512, 384)) + + def test_tta(self): + if digit_version(TORCH_VERSION) < digit_version('1.7.0'): + return unittest.skip('RTMCCHead requires PyTorch >= 1.7') + + # flip test + decoder_cfg = dict( + type='SimCCLabel', + input_size=(192, 256), + smoothing_type='gaussian', + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False) + + head = RTMCCHead( + in_channels=32, + out_channels=17, + input_size=(192, 256), + in_featuremap_size=(6, 8), + simcc_split_ratio=2.0, + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + decoder=decoder_cfg) + feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) + batch_data_samples = get_packed_inputs( + batch_size=2, simcc_split_ratio=2.0, + with_simcc_label=True)['data_samples'] + preds = head.predict([feats, feats], + batch_data_samples, + test_cfg=dict(flip_test=True)) + + self.assertTrue(len(preds), 2) + self.assertIsInstance(preds[0], InstanceData) + self.assertEqual(preds[0].keypoints.shape, + batch_data_samples[0].gt_instances.keypoints.shape) + + def test_loss(self): + if digit_version(TORCH_VERSION) < digit_version('1.7.0'): + return unittest.skip('RTMCCHead requires PyTorch >= 1.7') + + decoder_cfg_list = [] + decoder_cfg = dict( + type='SimCCLabel', + input_size=(192, 256), + smoothing_type='gaussian', + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=False) + decoder_cfg_list.append(decoder_cfg) + + decoder_cfg = dict( + type='SimCCLabel', + input_size=(192, 256), + smoothing_type='gaussian', + sigma=(4.9, 5.66), + simcc_split_ratio=2.0, + normalize=True) + decoder_cfg_list.append(decoder_cfg) + + # decoder + for decoder_cfg in decoder_cfg_list: + head = RTMCCHead( + in_channels=32, + out_channels=17, + input_size=(192, 256), + in_featuremap_size=(6, 8), + simcc_split_ratio=2.0, + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=1., + label_softmax=False, + ), + decoder=decoder_cfg) + + feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) + batch_data_samples = get_packed_inputs( + batch_size=2, simcc_split_ratio=2.0, + with_simcc_label=True)['data_samples'] + losses = head.loss(feats, batch_data_samples) + self.assertIsInstance(losses['loss_kpt'], torch.Tensor) + self.assertEqual(losses['loss_kpt'].shape, torch.Size(())) + self.assertIsInstance(losses['acc_pose'], torch.Tensor) + + # beta = 10 + head = RTMCCHead( + in_channels=32, + out_channels=17, + input_size=(192, 256), + in_featuremap_size=(6, 8), + simcc_split_ratio=2.0, + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=False, + ), + decoder=decoder_cfg) + + feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) + batch_data_samples = get_packed_inputs( + batch_size=2, simcc_split_ratio=2.0, + with_simcc_label=True)['data_samples'] + losses = head.loss(feats, batch_data_samples) + self.assertIsInstance(losses['loss_kpt'], torch.Tensor) + self.assertEqual(losses['loss_kpt'].shape, torch.Size(())) + self.assertIsInstance(losses['acc_pose'], torch.Tensor) + + # label softmax + head = RTMCCHead( + in_channels=32, + out_channels=17, + input_size=(192, 256), + in_featuremap_size=(6, 8), + simcc_split_ratio=2.0, + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False), + loss=dict( + type='KLDiscretLoss', + use_target_weight=True, + beta=10., + label_softmax=True, + ), + decoder=decoder_cfg) + + feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) + batch_data_samples = get_packed_inputs( + batch_size=2, simcc_split_ratio=2.0, + with_simcc_label=True)['data_samples'] + losses = head.loss(feats, batch_data_samples) + self.assertIsInstance(losses['loss_kpt'], torch.Tensor) + self.assertEqual(losses['loss_kpt'].shape, torch.Size(())) + self.assertIsInstance(losses['acc_pose'], torch.Tensor) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_models/test_heads/test_heatmap_heads/test_simcc_head.py b/tests/test_models/test_heads/test_heatmap_heads/test_simcc_head.py index b2395ab6f6..077532ce4a 100644 --- a/tests/test_models/test_heads/test_heatmap_heads/test_simcc_head.py +++ b/tests/test_models/test_heads/test_heatmap_heads/test_simcc_head.py @@ -1,243 +1,243 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import unittest -from typing import List, Tuple -from unittest import TestCase - -import torch -from mmengine.structures import InstanceData - -from mmpose.models.heads import SimCCHead -from mmpose.testing import get_packed_inputs - - -class TestSimCCHead(TestCase): - - def _get_feats(self, - batch_size: int = 2, - feat_shapes: List[Tuple[int, int, int]] = [(32, 6, 8)]): - - feats = [ - torch.rand((batch_size, ) + shape, dtype=torch.float32) - for shape in feat_shapes - ] - return feats - - def test_init(self): - - # w/ gaussian decoder - head = SimCCHead( - in_channels=32, - out_channels=17, - input_size=(192, 256), - in_featuremap_size=(6, 8), - simcc_split_ratio=2.0, - decoder=dict( - type='SimCCLabel', - input_size=(192, 256), - smoothing_type='gaussian', - sigma=6., - simcc_split_ratio=2.0)) - self.assertIsNotNone(head.decoder) - - # w/ label-smoothing decoder - head = SimCCHead( - in_channels=32, - out_channels=17, - input_size=(192, 256), - in_featuremap_size=(6, 8), - simcc_split_ratio=3.0, - decoder=dict( - type='SimCCLabel', - input_size=(192, 256), - smoothing_type='standard', - sigma=6., - simcc_split_ratio=3.0, - label_smooth_weight=0.1)) - self.assertIsNotNone(head.decoder) - - # w/ one-hot decoder - head = SimCCHead( - in_channels=32, - out_channels=17, - input_size=(192, 256), - in_featuremap_size=(6, 8), - simcc_split_ratio=3.0, - decoder=dict( - type='SimCCLabel', - input_size=(192, 256), - smoothing_type='standard', - sigma=6., - simcc_split_ratio=3.0)) - self.assertIsNotNone(head.decoder) - - def test_predict(self): - decoder_cfg1 = dict( - type='SimCCLabel', - input_size=(192, 256), - smoothing_type='gaussian', - sigma=2., - simcc_split_ratio=2.0) - - decoder_cfg2 = dict( - type='SimCCLabel', - input_size=(192, 256), - smoothing_type='standard', - sigma=2., - simcc_split_ratio=2.0) - - decoder_cfg3 = dict( - type='SimCCLabel', - input_size=(192, 256), - smoothing_type='standard', - sigma=2., - simcc_split_ratio=2.0, - label_smooth_weight=0.1) - - for decoder_cfg in [decoder_cfg1, decoder_cfg2, decoder_cfg3]: - - head = SimCCHead( - in_channels=32, - out_channels=17, - input_size=(192, 256), - in_featuremap_size=(6, 8), - simcc_split_ratio=decoder_cfg['simcc_split_ratio'], - decoder=decoder_cfg) - feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) - batch_data_samples = get_packed_inputs( - batch_size=2, - simcc_split_ratio=decoder_cfg['simcc_split_ratio'], - with_simcc_label=True)['data_samples'] - preds = head.predict(feats, batch_data_samples) - - self.assertTrue(len(preds), 2) - self.assertIsInstance(preds[0], InstanceData) - self.assertEqual( - preds[0].keypoints.shape, - batch_data_samples[0].gt_instances.keypoints.shape) - - # input transform: output heatmap - head = SimCCHead( - in_channels=32, - out_channels=17, - input_size=(192, 256), - in_featuremap_size=(6, 8), - simcc_split_ratio=decoder_cfg['simcc_split_ratio'], - decoder=decoder_cfg) - feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) - batch_data_samples = get_packed_inputs( - batch_size=2, - simcc_split_ratio=decoder_cfg['simcc_split_ratio'], - with_simcc_label=True)['data_samples'] - preds, pred_heatmaps = head.predict( - feats, batch_data_samples, test_cfg=dict(output_heatmaps=True)) - - self.assertEqual(preds[0].keypoint_x_labels.shape, - (1, 17, 192 * 2)) - self.assertEqual(preds[0].keypoint_y_labels.shape, - (1, 17, 256 * 2)) - self.assertTrue(len(pred_heatmaps), 2) - self.assertEqual(pred_heatmaps[0].heatmaps.shape, (17, 512, 384)) - - def test_tta(self): - # flip test - decoder_cfg = dict( - type='SimCCLabel', - input_size=(192, 256), - smoothing_type='gaussian', - sigma=2., - simcc_split_ratio=2.0) - - head = SimCCHead( - in_channels=32, - out_channels=17, - input_size=(192, 256), - in_featuremap_size=(6, 8), - simcc_split_ratio=2.0, - decoder=decoder_cfg) - feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) - batch_data_samples = get_packed_inputs( - batch_size=2, simcc_split_ratio=2.0, - with_simcc_label=True)['data_samples'] - preds = head.predict([feats, feats], - batch_data_samples, - test_cfg=dict(flip_test=True)) - - self.assertTrue(len(preds), 2) - self.assertIsInstance(preds[0], InstanceData) - self.assertEqual(preds[0].keypoints.shape, - batch_data_samples[0].gt_instances.keypoints.shape) - - def test_loss(self): - decoder_cfg1 = dict( - type='SimCCLabel', - input_size=(192, 256), - smoothing_type='gaussian', - sigma=2., - simcc_split_ratio=2.0) - - decoder_cfg2 = dict( - type='SimCCLabel', - input_size=(192, 256), - smoothing_type='standard', - sigma=2., - simcc_split_ratio=2.0) - - decoder_cfg3 = dict( - type='SimCCLabel', - input_size=(192, 256), - smoothing_type='standard', - sigma=2., - simcc_split_ratio=2.0, - label_smooth_weight=0.1) - - # decoder - for decoder_cfg in [decoder_cfg1, decoder_cfg2, decoder_cfg3]: - head = SimCCHead( - in_channels=32, - out_channels=17, - input_size=(192, 256), - in_featuremap_size=(6, 8), - simcc_split_ratio=2.0, - decoder=decoder_cfg) - - feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) - batch_data_samples = get_packed_inputs( - batch_size=2, simcc_split_ratio=2.0, - with_simcc_label=True)['data_samples'] - losses = head.loss(feats, batch_data_samples) - self.assertIsInstance(losses['loss_kpt'], torch.Tensor) - self.assertEqual(losses['loss_kpt'].shape, torch.Size(())) - self.assertIsInstance(losses['acc_pose'], torch.Tensor) - - def test_errors(self): - # Invalid arguments - with self.assertRaisesRegex(ValueError, 'Got mismatched lengths'): - _ = SimCCHead( - in_channels=32, - out_channels=17, - input_size=(192, 256), - in_featuremap_size=(48, 64), - deconv_out_channels=(256, ), - deconv_kernel_sizes=(4, 4)) - - with self.assertRaisesRegex(ValueError, 'Got mismatched lengths'): - _ = SimCCHead( - in_channels=32, - out_channels=17, - input_size=(192, 256), - in_featuremap_size=(48, 64), - conv_out_channels=(256, ), - conv_kernel_sizes=(1, 1)) - - with self.assertRaisesRegex(ValueError, 'Unsupported kernel size'): - _ = SimCCHead( - in_channels=16, - out_channels=17, - input_size=(192, 256), - in_featuremap_size=(48, 64), - deconv_out_channels=(256, ), - deconv_kernel_sizes=(1, )) - - -if __name__ == '__main__': - unittest.main() +# Copyright (c) OpenMMLab. All rights reserved. +import unittest +from typing import List, Tuple +from unittest import TestCase + +import torch +from mmengine.structures import InstanceData + +from mmpose.models.heads import SimCCHead +from mmpose.testing import get_packed_inputs + + +class TestSimCCHead(TestCase): + + def _get_feats(self, + batch_size: int = 2, + feat_shapes: List[Tuple[int, int, int]] = [(32, 6, 8)]): + + feats = [ + torch.rand((batch_size, ) + shape, dtype=torch.float32) + for shape in feat_shapes + ] + return feats + + def test_init(self): + + # w/ gaussian decoder + head = SimCCHead( + in_channels=32, + out_channels=17, + input_size=(192, 256), + in_featuremap_size=(6, 8), + simcc_split_ratio=2.0, + decoder=dict( + type='SimCCLabel', + input_size=(192, 256), + smoothing_type='gaussian', + sigma=6., + simcc_split_ratio=2.0)) + self.assertIsNotNone(head.decoder) + + # w/ label-smoothing decoder + head = SimCCHead( + in_channels=32, + out_channels=17, + input_size=(192, 256), + in_featuremap_size=(6, 8), + simcc_split_ratio=3.0, + decoder=dict( + type='SimCCLabel', + input_size=(192, 256), + smoothing_type='standard', + sigma=6., + simcc_split_ratio=3.0, + label_smooth_weight=0.1)) + self.assertIsNotNone(head.decoder) + + # w/ one-hot decoder + head = SimCCHead( + in_channels=32, + out_channels=17, + input_size=(192, 256), + in_featuremap_size=(6, 8), + simcc_split_ratio=3.0, + decoder=dict( + type='SimCCLabel', + input_size=(192, 256), + smoothing_type='standard', + sigma=6., + simcc_split_ratio=3.0)) + self.assertIsNotNone(head.decoder) + + def test_predict(self): + decoder_cfg1 = dict( + type='SimCCLabel', + input_size=(192, 256), + smoothing_type='gaussian', + sigma=2., + simcc_split_ratio=2.0) + + decoder_cfg2 = dict( + type='SimCCLabel', + input_size=(192, 256), + smoothing_type='standard', + sigma=2., + simcc_split_ratio=2.0) + + decoder_cfg3 = dict( + type='SimCCLabel', + input_size=(192, 256), + smoothing_type='standard', + sigma=2., + simcc_split_ratio=2.0, + label_smooth_weight=0.1) + + for decoder_cfg in [decoder_cfg1, decoder_cfg2, decoder_cfg3]: + + head = SimCCHead( + in_channels=32, + out_channels=17, + input_size=(192, 256), + in_featuremap_size=(6, 8), + simcc_split_ratio=decoder_cfg['simcc_split_ratio'], + decoder=decoder_cfg) + feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) + batch_data_samples = get_packed_inputs( + batch_size=2, + simcc_split_ratio=decoder_cfg['simcc_split_ratio'], + with_simcc_label=True)['data_samples'] + preds = head.predict(feats, batch_data_samples) + + self.assertTrue(len(preds), 2) + self.assertIsInstance(preds[0], InstanceData) + self.assertEqual( + preds[0].keypoints.shape, + batch_data_samples[0].gt_instances.keypoints.shape) + + # input transform: output heatmap + head = SimCCHead( + in_channels=32, + out_channels=17, + input_size=(192, 256), + in_featuremap_size=(6, 8), + simcc_split_ratio=decoder_cfg['simcc_split_ratio'], + decoder=decoder_cfg) + feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) + batch_data_samples = get_packed_inputs( + batch_size=2, + simcc_split_ratio=decoder_cfg['simcc_split_ratio'], + with_simcc_label=True)['data_samples'] + preds, pred_heatmaps = head.predict( + feats, batch_data_samples, test_cfg=dict(output_heatmaps=True)) + + self.assertEqual(preds[0].keypoint_x_labels.shape, + (1, 17, 192 * 2)) + self.assertEqual(preds[0].keypoint_y_labels.shape, + (1, 17, 256 * 2)) + self.assertTrue(len(pred_heatmaps), 2) + self.assertEqual(pred_heatmaps[0].heatmaps.shape, (17, 512, 384)) + + def test_tta(self): + # flip test + decoder_cfg = dict( + type='SimCCLabel', + input_size=(192, 256), + smoothing_type='gaussian', + sigma=2., + simcc_split_ratio=2.0) + + head = SimCCHead( + in_channels=32, + out_channels=17, + input_size=(192, 256), + in_featuremap_size=(6, 8), + simcc_split_ratio=2.0, + decoder=decoder_cfg) + feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) + batch_data_samples = get_packed_inputs( + batch_size=2, simcc_split_ratio=2.0, + with_simcc_label=True)['data_samples'] + preds = head.predict([feats, feats], + batch_data_samples, + test_cfg=dict(flip_test=True)) + + self.assertTrue(len(preds), 2) + self.assertIsInstance(preds[0], InstanceData) + self.assertEqual(preds[0].keypoints.shape, + batch_data_samples[0].gt_instances.keypoints.shape) + + def test_loss(self): + decoder_cfg1 = dict( + type='SimCCLabel', + input_size=(192, 256), + smoothing_type='gaussian', + sigma=2., + simcc_split_ratio=2.0) + + decoder_cfg2 = dict( + type='SimCCLabel', + input_size=(192, 256), + smoothing_type='standard', + sigma=2., + simcc_split_ratio=2.0) + + decoder_cfg3 = dict( + type='SimCCLabel', + input_size=(192, 256), + smoothing_type='standard', + sigma=2., + simcc_split_ratio=2.0, + label_smooth_weight=0.1) + + # decoder + for decoder_cfg in [decoder_cfg1, decoder_cfg2, decoder_cfg3]: + head = SimCCHead( + in_channels=32, + out_channels=17, + input_size=(192, 256), + in_featuremap_size=(6, 8), + simcc_split_ratio=2.0, + decoder=decoder_cfg) + + feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) + batch_data_samples = get_packed_inputs( + batch_size=2, simcc_split_ratio=2.0, + with_simcc_label=True)['data_samples'] + losses = head.loss(feats, batch_data_samples) + self.assertIsInstance(losses['loss_kpt'], torch.Tensor) + self.assertEqual(losses['loss_kpt'].shape, torch.Size(())) + self.assertIsInstance(losses['acc_pose'], torch.Tensor) + + def test_errors(self): + # Invalid arguments + with self.assertRaisesRegex(ValueError, 'Got mismatched lengths'): + _ = SimCCHead( + in_channels=32, + out_channels=17, + input_size=(192, 256), + in_featuremap_size=(48, 64), + deconv_out_channels=(256, ), + deconv_kernel_sizes=(4, 4)) + + with self.assertRaisesRegex(ValueError, 'Got mismatched lengths'): + _ = SimCCHead( + in_channels=32, + out_channels=17, + input_size=(192, 256), + in_featuremap_size=(48, 64), + conv_out_channels=(256, ), + conv_kernel_sizes=(1, 1)) + + with self.assertRaisesRegex(ValueError, 'Unsupported kernel size'): + _ = SimCCHead( + in_channels=16, + out_channels=17, + input_size=(192, 256), + in_featuremap_size=(48, 64), + deconv_out_channels=(256, ), + deconv_kernel_sizes=(1, )) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_models/test_heads/test_heatmap_heads/test_vipnas_head.py b/tests/test_models/test_heads/test_heatmap_heads/test_vipnas_head.py index f7f4444578..7e33b0a040 100644 --- a/tests/test_models/test_heads/test_heatmap_heads/test_vipnas_head.py +++ b/tests/test_models/test_heads/test_heatmap_heads/test_vipnas_head.py @@ -1,227 +1,227 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import List, Tuple -from unittest import TestCase - -import torch -from mmengine.structures import InstanceData, PixelData -from torch import nn - -from mmpose.models.heads import ViPNASHead -from mmpose.testing import get_packed_inputs - - -class TestViPNASHead(TestCase): - - def _get_feats(self, - batch_size: int = 2, - feat_shapes: List[Tuple[int, int, int]] = [(32, 6, 8)]): - - feats = [ - torch.rand((batch_size, ) + shape, dtype=torch.float32) - for shape in feat_shapes - ] - return feats - - def test_init(self): - # w/o deconv - head = ViPNASHead( - in_channels=32, out_channels=17, deconv_out_channels=None) - self.assertTrue(isinstance(head.deconv_layers, nn.Identity)) - - # w/ deconv and w/o conv - head = ViPNASHead( - in_channels=32, - out_channels=17, - deconv_out_channels=(32, 32), - deconv_kernel_sizes=(4, 4), - deconv_num_groups=(1, 1)) - self.assertTrue(isinstance(head.deconv_layers, nn.Sequential)) - self.assertTrue(isinstance(head.conv_layers, nn.Identity)) - - # w/ both deconv and conv - head = ViPNASHead( - in_channels=32, - out_channels=17, - deconv_out_channels=(32, 32), - deconv_kernel_sizes=(4, 4), - deconv_num_groups=(1, 1), - conv_out_channels=(32, ), - conv_kernel_sizes=(1, )) - self.assertTrue(isinstance(head.deconv_layers, nn.Sequential)) - self.assertTrue(isinstance(head.conv_layers, nn.Sequential)) - - # w/o final layer - head = ViPNASHead(in_channels=32, out_channels=17, final_layer=None) - self.assertTrue(isinstance(head.final_layer, nn.Identity)) - - # w/ decoder - head = ViPNASHead( - in_channels=32, - out_channels=17, - decoder=dict( - type='MSRAHeatmap', - input_size=(192, 256), - heatmap_size=(48, 64), - sigma=2.)) - self.assertIsNotNone(head.decoder) - - def test_predict(self): - decoder_cfg = dict( - type='MSRAHeatmap', - input_size=(192, 256), - heatmap_size=(48, 64), - sigma=2.) - - head = ViPNASHead( - in_channels=32, - out_channels=17, - deconv_out_channels=(256, 256), - deconv_kernel_sizes=(4, 4), - deconv_num_groups=(1, 1), - conv_out_channels=(256, ), - conv_kernel_sizes=(1, ), - decoder=decoder_cfg) - feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) - batch_data_samples = get_packed_inputs(batch_size=2)['data_samples'] - preds = head.predict(feats, batch_data_samples) - - self.assertTrue(len(preds), 2) - self.assertIsInstance(preds[0], InstanceData) - self.assertEqual(preds[0].keypoints.shape, - batch_data_samples[0].gt_instances.keypoints.shape) - - # input transform: output heatmap - head = ViPNASHead(in_channels=32, out_channels=17, decoder=decoder_cfg) - feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) - batch_data_samples = get_packed_inputs(batch_size=2)['data_samples'] - _, pred_heatmaps = head.predict( - feats, batch_data_samples, test_cfg=dict(output_heatmaps=True)) - - self.assertTrue(len(pred_heatmaps), 2) - self.assertIsInstance(pred_heatmaps[0], PixelData) - self.assertEqual(pred_heatmaps[0].heatmaps.shape, (17, 64, 48)) - - def test_tta(self): - # flip test: heatmap - decoder_cfg = dict( - type='MSRAHeatmap', - input_size=(192, 256), - heatmap_size=(48, 64), - sigma=2.) - - # input transform: select - head = ViPNASHead(in_channels=32, out_channels=17, decoder=decoder_cfg) - feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) - batch_data_samples = get_packed_inputs(batch_size=2)['data_samples'] - preds = head.predict([feats, feats], - batch_data_samples, - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - - self.assertTrue(len(preds), 2) - self.assertIsInstance(preds[0], InstanceData) - self.assertEqual(preds[0].keypoints.shape, - batch_data_samples[0].gt_instances.keypoints.shape) - - def test_loss(self): - head = ViPNASHead(in_channels=32, out_channels=17) - - feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) - batch_data_samples = get_packed_inputs(batch_size=2)['data_samples'] - losses = head.loss(feats, batch_data_samples) - self.assertIsInstance(losses['loss_kpt'], torch.Tensor) - self.assertEqual(losses['loss_kpt'].shape, torch.Size(())) - self.assertIsInstance(losses['acc_pose'], torch.Tensor) - - def test_errors(self): - # Invalid arguments - with self.assertRaisesRegex(ValueError, 'Got mismatched lengths'): - _ = ViPNASHead( - in_channels=32, - out_channels=17, - deconv_out_channels=(256, ), - deconv_kernel_sizes=(4, 4)) - with self.assertRaisesRegex(ValueError, 'Got mismatched lengths'): - _ = ViPNASHead( - in_channels=32, - out_channels=17, - deconv_out_channels=(256, 256), - deconv_kernel_sizes=(4, 4), - deconv_num_groups=(1, )) - - with self.assertRaisesRegex(ValueError, 'Got mismatched lengths'): - _ = ViPNASHead( - in_channels=32, - out_channels=17, - conv_out_channels=(256, ), - conv_kernel_sizes=(1, 1)) - - with self.assertRaisesRegex(ValueError, 'Unsupported kernel size'): - _ = ViPNASHead( - in_channels=16, - out_channels=17, - deconv_out_channels=(256, ), - deconv_kernel_sizes=(1, ), - deconv_num_groups=(1, )) - - def test_state_dict_compatible(self): - # Typical setting for MobileNetV3 - head = ViPNASHead( - in_channels=160, - out_channels=17, - deconv_out_channels=(160, 160, 160), - deconv_num_groups=(160, 160, 160)) - - state_dict = { - 'deconv_layers.0.weight': torch.zeros([160, 1, 4, 4]), - 'deconv_layers.1.weight': torch.zeros([160]), - 'deconv_layers.1.bias': torch.zeros([160]), - 'deconv_layers.1.running_mean': torch.zeros([160]), - 'deconv_layers.1.running_var': torch.zeros([160]), - 'deconv_layers.1.num_batches_tracked': torch.zeros([]), - 'deconv_layers.3.weight': torch.zeros([160, 1, 4, 4]), - 'deconv_layers.4.weight': torch.zeros([160]), - 'deconv_layers.4.bias': torch.zeros([160]), - 'deconv_layers.4.running_mean': torch.zeros([160]), - 'deconv_layers.4.running_var': torch.zeros([160]), - 'deconv_layers.4.num_batches_tracked': torch.zeros([]), - 'deconv_layers.6.weight': torch.zeros([160, 1, 4, 4]), - 'deconv_layers.7.weight': torch.zeros([160]), - 'deconv_layers.7.bias': torch.zeros([160]), - 'deconv_layers.7.running_mean': torch.zeros([160]), - 'deconv_layers.7.running_var': torch.zeros([160]), - 'deconv_layers.7.num_batches_tracked': torch.zeros([]), - 'final_layer.weight': torch.zeros([17, 160, 1, 1]), - 'final_layer.bias': torch.zeros([17]) - } - head.load_state_dict(state_dict) - - # Typical setting for Resnet - head = ViPNASHead(in_channels=608, out_channels=17) - - state_dict = { - 'deconv_layers.0.weight': torch.zeros([608, 9, 4, 4]), - 'deconv_layers.1.weight': torch.zeros([144]), - 'deconv_layers.1.bias': torch.zeros([144]), - 'deconv_layers.1.running_mean': torch.zeros([144]), - 'deconv_layers.1.running_var': torch.zeros([144]), - 'deconv_layers.1.num_batches_tracked': torch.zeros([]), - 'deconv_layers.3.weight': torch.zeros([144, 9, 4, 4]), - 'deconv_layers.4.weight': torch.zeros([144]), - 'deconv_layers.4.bias': torch.zeros([144]), - 'deconv_layers.4.running_mean': torch.zeros([144]), - 'deconv_layers.4.running_var': torch.zeros([144]), - 'deconv_layers.4.num_batches_tracked': torch.zeros([]), - 'deconv_layers.6.weight': torch.zeros([144, 9, 4, 4]), - 'deconv_layers.7.weight': torch.zeros([144]), - 'deconv_layers.7.bias': torch.zeros([144]), - 'deconv_layers.7.running_mean': torch.zeros([144]), - 'deconv_layers.7.running_var': torch.zeros([144]), - 'deconv_layers.7.num_batches_tracked': torch.zeros([]), - 'final_layer.weight': torch.zeros([17, 144, 1, 1]), - 'final_layer.bias': torch.zeros([17]) - } - head.load_state_dict(state_dict) +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Tuple +from unittest import TestCase + +import torch +from mmengine.structures import InstanceData, PixelData +from torch import nn + +from mmpose.models.heads import ViPNASHead +from mmpose.testing import get_packed_inputs + + +class TestViPNASHead(TestCase): + + def _get_feats(self, + batch_size: int = 2, + feat_shapes: List[Tuple[int, int, int]] = [(32, 6, 8)]): + + feats = [ + torch.rand((batch_size, ) + shape, dtype=torch.float32) + for shape in feat_shapes + ] + return feats + + def test_init(self): + # w/o deconv + head = ViPNASHead( + in_channels=32, out_channels=17, deconv_out_channels=None) + self.assertTrue(isinstance(head.deconv_layers, nn.Identity)) + + # w/ deconv and w/o conv + head = ViPNASHead( + in_channels=32, + out_channels=17, + deconv_out_channels=(32, 32), + deconv_kernel_sizes=(4, 4), + deconv_num_groups=(1, 1)) + self.assertTrue(isinstance(head.deconv_layers, nn.Sequential)) + self.assertTrue(isinstance(head.conv_layers, nn.Identity)) + + # w/ both deconv and conv + head = ViPNASHead( + in_channels=32, + out_channels=17, + deconv_out_channels=(32, 32), + deconv_kernel_sizes=(4, 4), + deconv_num_groups=(1, 1), + conv_out_channels=(32, ), + conv_kernel_sizes=(1, )) + self.assertTrue(isinstance(head.deconv_layers, nn.Sequential)) + self.assertTrue(isinstance(head.conv_layers, nn.Sequential)) + + # w/o final layer + head = ViPNASHead(in_channels=32, out_channels=17, final_layer=None) + self.assertTrue(isinstance(head.final_layer, nn.Identity)) + + # w/ decoder + head = ViPNASHead( + in_channels=32, + out_channels=17, + decoder=dict( + type='MSRAHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + sigma=2.)) + self.assertIsNotNone(head.decoder) + + def test_predict(self): + decoder_cfg = dict( + type='MSRAHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + sigma=2.) + + head = ViPNASHead( + in_channels=32, + out_channels=17, + deconv_out_channels=(256, 256), + deconv_kernel_sizes=(4, 4), + deconv_num_groups=(1, 1), + conv_out_channels=(256, ), + conv_kernel_sizes=(1, ), + decoder=decoder_cfg) + feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) + batch_data_samples = get_packed_inputs(batch_size=2)['data_samples'] + preds = head.predict(feats, batch_data_samples) + + self.assertTrue(len(preds), 2) + self.assertIsInstance(preds[0], InstanceData) + self.assertEqual(preds[0].keypoints.shape, + batch_data_samples[0].gt_instances.keypoints.shape) + + # input transform: output heatmap + head = ViPNASHead(in_channels=32, out_channels=17, decoder=decoder_cfg) + feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) + batch_data_samples = get_packed_inputs(batch_size=2)['data_samples'] + _, pred_heatmaps = head.predict( + feats, batch_data_samples, test_cfg=dict(output_heatmaps=True)) + + self.assertTrue(len(pred_heatmaps), 2) + self.assertIsInstance(pred_heatmaps[0], PixelData) + self.assertEqual(pred_heatmaps[0].heatmaps.shape, (17, 64, 48)) + + def test_tta(self): + # flip test: heatmap + decoder_cfg = dict( + type='MSRAHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + sigma=2.) + + # input transform: select + head = ViPNASHead(in_channels=32, out_channels=17, decoder=decoder_cfg) + feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) + batch_data_samples = get_packed_inputs(batch_size=2)['data_samples'] + preds = head.predict([feats, feats], + batch_data_samples, + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + + self.assertTrue(len(preds), 2) + self.assertIsInstance(preds[0], InstanceData) + self.assertEqual(preds[0].keypoints.shape, + batch_data_samples[0].gt_instances.keypoints.shape) + + def test_loss(self): + head = ViPNASHead(in_channels=32, out_channels=17) + + feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) + batch_data_samples = get_packed_inputs(batch_size=2)['data_samples'] + losses = head.loss(feats, batch_data_samples) + self.assertIsInstance(losses['loss_kpt'], torch.Tensor) + self.assertEqual(losses['loss_kpt'].shape, torch.Size(())) + self.assertIsInstance(losses['acc_pose'], torch.Tensor) + + def test_errors(self): + # Invalid arguments + with self.assertRaisesRegex(ValueError, 'Got mismatched lengths'): + _ = ViPNASHead( + in_channels=32, + out_channels=17, + deconv_out_channels=(256, ), + deconv_kernel_sizes=(4, 4)) + with self.assertRaisesRegex(ValueError, 'Got mismatched lengths'): + _ = ViPNASHead( + in_channels=32, + out_channels=17, + deconv_out_channels=(256, 256), + deconv_kernel_sizes=(4, 4), + deconv_num_groups=(1, )) + + with self.assertRaisesRegex(ValueError, 'Got mismatched lengths'): + _ = ViPNASHead( + in_channels=32, + out_channels=17, + conv_out_channels=(256, ), + conv_kernel_sizes=(1, 1)) + + with self.assertRaisesRegex(ValueError, 'Unsupported kernel size'): + _ = ViPNASHead( + in_channels=16, + out_channels=17, + deconv_out_channels=(256, ), + deconv_kernel_sizes=(1, ), + deconv_num_groups=(1, )) + + def test_state_dict_compatible(self): + # Typical setting for MobileNetV3 + head = ViPNASHead( + in_channels=160, + out_channels=17, + deconv_out_channels=(160, 160, 160), + deconv_num_groups=(160, 160, 160)) + + state_dict = { + 'deconv_layers.0.weight': torch.zeros([160, 1, 4, 4]), + 'deconv_layers.1.weight': torch.zeros([160]), + 'deconv_layers.1.bias': torch.zeros([160]), + 'deconv_layers.1.running_mean': torch.zeros([160]), + 'deconv_layers.1.running_var': torch.zeros([160]), + 'deconv_layers.1.num_batches_tracked': torch.zeros([]), + 'deconv_layers.3.weight': torch.zeros([160, 1, 4, 4]), + 'deconv_layers.4.weight': torch.zeros([160]), + 'deconv_layers.4.bias': torch.zeros([160]), + 'deconv_layers.4.running_mean': torch.zeros([160]), + 'deconv_layers.4.running_var': torch.zeros([160]), + 'deconv_layers.4.num_batches_tracked': torch.zeros([]), + 'deconv_layers.6.weight': torch.zeros([160, 1, 4, 4]), + 'deconv_layers.7.weight': torch.zeros([160]), + 'deconv_layers.7.bias': torch.zeros([160]), + 'deconv_layers.7.running_mean': torch.zeros([160]), + 'deconv_layers.7.running_var': torch.zeros([160]), + 'deconv_layers.7.num_batches_tracked': torch.zeros([]), + 'final_layer.weight': torch.zeros([17, 160, 1, 1]), + 'final_layer.bias': torch.zeros([17]) + } + head.load_state_dict(state_dict) + + # Typical setting for Resnet + head = ViPNASHead(in_channels=608, out_channels=17) + + state_dict = { + 'deconv_layers.0.weight': torch.zeros([608, 9, 4, 4]), + 'deconv_layers.1.weight': torch.zeros([144]), + 'deconv_layers.1.bias': torch.zeros([144]), + 'deconv_layers.1.running_mean': torch.zeros([144]), + 'deconv_layers.1.running_var': torch.zeros([144]), + 'deconv_layers.1.num_batches_tracked': torch.zeros([]), + 'deconv_layers.3.weight': torch.zeros([144, 9, 4, 4]), + 'deconv_layers.4.weight': torch.zeros([144]), + 'deconv_layers.4.bias': torch.zeros([144]), + 'deconv_layers.4.running_mean': torch.zeros([144]), + 'deconv_layers.4.running_var': torch.zeros([144]), + 'deconv_layers.4.num_batches_tracked': torch.zeros([]), + 'deconv_layers.6.weight': torch.zeros([144, 9, 4, 4]), + 'deconv_layers.7.weight': torch.zeros([144]), + 'deconv_layers.7.bias': torch.zeros([144]), + 'deconv_layers.7.running_mean': torch.zeros([144]), + 'deconv_layers.7.running_var': torch.zeros([144]), + 'deconv_layers.7.num_batches_tracked': torch.zeros([]), + 'final_layer.weight': torch.zeros([17, 144, 1, 1]), + 'final_layer.bias': torch.zeros([17]) + } + head.load_state_dict(state_dict) diff --git a/tests/test_models/test_heads/test_hybrid_heads/test_dekr_head.py b/tests/test_models/test_heads/test_hybrid_heads/test_dekr_head.py index 957f3499d0..4835415214 100644 --- a/tests/test_models/test_heads/test_hybrid_heads/test_dekr_head.py +++ b/tests/test_models/test_heads/test_hybrid_heads/test_dekr_head.py @@ -1,137 +1,137 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import List, Tuple -from unittest import TestCase - -import torch -from mmengine.utils import is_tuple_of - -from mmpose.models.heads import DEKRHead -from mmpose.testing import get_coco_sample, get_packed_inputs -from mmpose.utils.tensor_utils import to_tensor - - -class TestDEKRHead(TestCase): - - def _get_feats( - self, - batch_size: int = 1, - feat_shapes: List[Tuple[int, int, int]] = [(32, 128, 128)], - ): - - feats = [ - torch.rand((batch_size, ) + shape, dtype=torch.float32) - for shape in feat_shapes - ] - - if len(feats) > 1: - feats = [[x] for x in feats] - - return feats - - def _get_data_samples(self): - data_samples = get_packed_inputs( - 1, - input_size=(512, 512), - heatmap_size=(128, 128), - img_shape=(512, 512))['data_samples'] - return data_samples - - def test_forward(self): - - head = DEKRHead(in_channels=32, num_keypoints=17) - - feats = [torch.rand(1, 32, 128, 128)] - output = head.forward(feats) # should be (heatmaps, displacements) - self.assertTrue(is_tuple_of(output, torch.Tensor)) - self.assertEqual(output[0].shape, (1, 18, 128, 128)) - self.assertEqual(output[1].shape, (1, 34, 128, 128)) - - def test_predict(self): - - codec_cfg = dict( - type='SPR', - input_size=(512, 512), - heatmap_size=(128, 128), - sigma=(4, 2), - generate_keypoint_heatmaps=True, - ) - - head = DEKRHead(in_channels=32, num_keypoints=17, decoder=codec_cfg) - - feats = self._get_feats() - data_samples = self._get_data_samples() - with torch.no_grad(): - preds = head.predict(feats, data_samples) - self.assertEqual(len(preds), 1) - self.assertEqual(preds[0].keypoints.shape[1:], (17, 2)) - self.assertEqual(preds[0].keypoint_scores.shape[1:], (17, )) - - # predict with rescore net - head = DEKRHead( - in_channels=32, - num_keypoints=17, - decoder=codec_cfg, - rescore_cfg=dict(in_channels=74, norm_indexes=(5, 6))) - - with torch.no_grad(): - preds = head.predict(feats, data_samples) - self.assertEqual(len(preds), 1) - self.assertEqual(preds[0].keypoints.shape[1:], (17, 2)) - self.assertEqual(preds[0].keypoint_scores.shape[1:], (17, )) - - # tta - with torch.no_grad(): - feats_flip = self._get_feats(feat_shapes=[(32, 128, - 128), (32, 128, 128)]) - preds = head.predict(feats_flip, data_samples, - dict(flip_test=True)) - self.assertEqual(len(preds), 1) - self.assertEqual(preds[0].keypoints.shape[1:], (17, 2)) - self.assertEqual(preds[0].keypoint_scores.shape[1:], (17, )) - - # output heatmaps - with torch.no_grad(): - _, pred_fields = head.predict(feats, data_samples, - dict(output_heatmaps=True)) - self.assertEqual(len(pred_fields), 1) - self.assertEqual(pred_fields[0].heatmaps.shape, (18, 128, 128)) - self.assertEqual(pred_fields[0].displacements.shape, - (34, 128, 128)) - - def test_loss(self): - data = get_coco_sample(img_shape=(512, 512), num_instances=1) - - codec_cfg = dict( - type='SPR', - input_size=(512, 512), - heatmap_size=(128, 128), - sigma=(4, 2), - generate_keypoint_heatmaps=True, - ) - - head = DEKRHead( - in_channels=32, - num_keypoints=17, - decoder=codec_cfg, - heatmap_loss=dict(type='KeypointMSELoss', use_target_weight=True), - displacement_loss=dict( - type='SoftWeightSmoothL1Loss', - use_target_weight=True, - supervise_empty=False, - beta=1 / 9, - )) - - encoded = head.decoder.encode(data['keypoints'], - data['keypoints_visible']) - feats = self._get_feats() - data_samples = self._get_data_samples() - for data_sample in data_samples: - data_sample.gt_fields.set_data( - {k: to_tensor(v) - for k, v in encoded.items()}) - - losses = head.loss(feats, data_samples) - self.assertIn('loss/heatmap', losses) - self.assertEqual(losses['loss/heatmap'].ndim, 0) - self.assertIn('loss/displacement', losses) - self.assertEqual(losses['loss/displacement'].ndim, 0) +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Tuple +from unittest import TestCase + +import torch +from mmengine.utils import is_tuple_of + +from mmpose.models.heads import DEKRHead +from mmpose.testing import get_coco_sample, get_packed_inputs +from mmpose.utils.tensor_utils import to_tensor + + +class TestDEKRHead(TestCase): + + def _get_feats( + self, + batch_size: int = 1, + feat_shapes: List[Tuple[int, int, int]] = [(32, 128, 128)], + ): + + feats = [ + torch.rand((batch_size, ) + shape, dtype=torch.float32) + for shape in feat_shapes + ] + + if len(feats) > 1: + feats = [[x] for x in feats] + + return feats + + def _get_data_samples(self): + data_samples = get_packed_inputs( + 1, + input_size=(512, 512), + heatmap_size=(128, 128), + img_shape=(512, 512))['data_samples'] + return data_samples + + def test_forward(self): + + head = DEKRHead(in_channels=32, num_keypoints=17) + + feats = [torch.rand(1, 32, 128, 128)] + output = head.forward(feats) # should be (heatmaps, displacements) + self.assertTrue(is_tuple_of(output, torch.Tensor)) + self.assertEqual(output[0].shape, (1, 18, 128, 128)) + self.assertEqual(output[1].shape, (1, 34, 128, 128)) + + def test_predict(self): + + codec_cfg = dict( + type='SPR', + input_size=(512, 512), + heatmap_size=(128, 128), + sigma=(4, 2), + generate_keypoint_heatmaps=True, + ) + + head = DEKRHead(in_channels=32, num_keypoints=17, decoder=codec_cfg) + + feats = self._get_feats() + data_samples = self._get_data_samples() + with torch.no_grad(): + preds = head.predict(feats, data_samples) + self.assertEqual(len(preds), 1) + self.assertEqual(preds[0].keypoints.shape[1:], (17, 2)) + self.assertEqual(preds[0].keypoint_scores.shape[1:], (17, )) + + # predict with rescore net + head = DEKRHead( + in_channels=32, + num_keypoints=17, + decoder=codec_cfg, + rescore_cfg=dict(in_channels=74, norm_indexes=(5, 6))) + + with torch.no_grad(): + preds = head.predict(feats, data_samples) + self.assertEqual(len(preds), 1) + self.assertEqual(preds[0].keypoints.shape[1:], (17, 2)) + self.assertEqual(preds[0].keypoint_scores.shape[1:], (17, )) + + # tta + with torch.no_grad(): + feats_flip = self._get_feats(feat_shapes=[(32, 128, + 128), (32, 128, 128)]) + preds = head.predict(feats_flip, data_samples, + dict(flip_test=True)) + self.assertEqual(len(preds), 1) + self.assertEqual(preds[0].keypoints.shape[1:], (17, 2)) + self.assertEqual(preds[0].keypoint_scores.shape[1:], (17, )) + + # output heatmaps + with torch.no_grad(): + _, pred_fields = head.predict(feats, data_samples, + dict(output_heatmaps=True)) + self.assertEqual(len(pred_fields), 1) + self.assertEqual(pred_fields[0].heatmaps.shape, (18, 128, 128)) + self.assertEqual(pred_fields[0].displacements.shape, + (34, 128, 128)) + + def test_loss(self): + data = get_coco_sample(img_shape=(512, 512), num_instances=1) + + codec_cfg = dict( + type='SPR', + input_size=(512, 512), + heatmap_size=(128, 128), + sigma=(4, 2), + generate_keypoint_heatmaps=True, + ) + + head = DEKRHead( + in_channels=32, + num_keypoints=17, + decoder=codec_cfg, + heatmap_loss=dict(type='KeypointMSELoss', use_target_weight=True), + displacement_loss=dict( + type='SoftWeightSmoothL1Loss', + use_target_weight=True, + supervise_empty=False, + beta=1 / 9, + )) + + encoded = head.decoder.encode(data['keypoints'], + data['keypoints_visible']) + feats = self._get_feats() + data_samples = self._get_data_samples() + for data_sample in data_samples: + data_sample.gt_fields.set_data( + {k: to_tensor(v) + for k, v in encoded.items()}) + + losses = head.loss(feats, data_samples) + self.assertIn('loss/heatmap', losses) + self.assertEqual(losses['loss/heatmap'].ndim, 0) + self.assertIn('loss/displacement', losses) + self.assertEqual(losses['loss/displacement'].ndim, 0) diff --git a/tests/test_models/test_heads/test_hybrid_heads/test_vis_head.py b/tests/test_models/test_heads/test_hybrid_heads/test_vis_head.py index a6aecc2852..5762d2fd63 100644 --- a/tests/test_models/test_heads/test_hybrid_heads/test_vis_head.py +++ b/tests/test_models/test_heads/test_hybrid_heads/test_vis_head.py @@ -1,190 +1,190 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import unittest -from typing import List, Tuple -from unittest import TestCase - -import torch -from mmengine.structures import InstanceData, PixelData -from torch import nn - -from mmpose.models.heads import VisPredictHead -from mmpose.testing import get_packed_inputs - - -class TestVisPredictHead(TestCase): - - def _get_feats( - self, - batch_size: int = 2, - feat_shapes: List[Tuple[int, int, int]] = [(32, 8, 6)], - ): - feats = [ - torch.rand((batch_size, ) + shape, dtype=torch.float32) - for shape in feat_shapes - ] - return feats - - def test_init(self): - codec = dict( - type='MSRAHeatmap', - input_size=(192, 256), - heatmap_size=(48, 64), - sigma=2.) - - head = VisPredictHead( - pose_cfg=dict( - type='HeatmapHead', - in_channels=32, - out_channels=17, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec)) - - self.assertTrue(isinstance(head.vis_head, nn.Sequential)) - self.assertEqual(head.vis_head[2].weight.shape, (17, 32)) - self.assertIsNotNone(head.pose_head) - - def test_forward(self): - - codec = dict( - type='MSRAHeatmap', - input_size=(192, 256), - heatmap_size=(48, 64), - sigma=2) - - head = VisPredictHead( - pose_cfg=dict( - type='HeatmapHead', - in_channels=32, - out_channels=17, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec)) - - feats = [torch.rand(1, 32, 128, 128)] - output_pose, output_vis = head.forward(feats) - - self.assertIsInstance(output_pose, torch.Tensor) - self.assertEqual(output_pose.shape, (1, 17, 128, 128)) - - self.assertIsInstance(output_vis, torch.Tensor) - self.assertEqual(output_vis.shape, (1, 17)) - - def test_predict(self): - - codec = dict( - type='MSRAHeatmap', - input_size=(192, 256), - heatmap_size=(48, 64), - sigma=2.) - - head = VisPredictHead( - pose_cfg=dict( - type='HeatmapHead', - in_channels=32, - out_channels=17, - deconv_out_channels=None, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=codec)) - - feats = self._get_feats(batch_size=2, feat_shapes=[(32, 128, 128)]) - batch_data_samples = get_packed_inputs(batch_size=2)['data_samples'] - - preds, _ = head.predict(feats, batch_data_samples) - - self.assertTrue(len(preds), 2) - self.assertIsInstance(preds[0], InstanceData) - self.assertEqual(preds[0].keypoints.shape, - batch_data_samples[0].gt_instances.keypoints.shape) - self.assertEqual( - preds[0].keypoint_scores.shape, - batch_data_samples[0].gt_instance_labels.keypoint_weights.shape) - - # output heatmap - head = VisPredictHead( - pose_cfg=dict( - type='HeatmapHead', - in_channels=32, - out_channels=17, - decoder=codec)) - feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) - batch_data_samples = get_packed_inputs(batch_size=2)['data_samples'] - _, pred_heatmaps = head.predict( - feats, batch_data_samples, test_cfg=dict(output_heatmaps=True)) - - self.assertIsInstance(pred_heatmaps[0], PixelData) - self.assertEqual(pred_heatmaps[0].heatmaps.shape, (17, 64, 48)) - - def test_tta(self): - # flip test: vis and heatmap - decoder_cfg = dict( - type='MSRAHeatmap', - input_size=(192, 256), - heatmap_size=(48, 64), - sigma=2.) - - head = VisPredictHead( - pose_cfg=dict( - type='HeatmapHead', - in_channels=32, - out_channels=17, - decoder=decoder_cfg)) - - feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) - batch_data_samples = get_packed_inputs(batch_size=2)['data_samples'] - preds, _ = head.predict([feats, feats], - batch_data_samples, - test_cfg=dict( - flip_test=True, - flip_mode='heatmap', - shift_heatmap=True, - )) - - self.assertTrue(len(preds), 2) - self.assertIsInstance(preds[0], InstanceData) - self.assertEqual(preds[0].keypoints.shape, - batch_data_samples[0].gt_instances.keypoints.shape) - self.assertEqual( - preds[0].keypoint_scores.shape, - batch_data_samples[0].gt_instance_labels.keypoint_weights.shape) - - def test_loss(self): - head = VisPredictHead( - pose_cfg=dict( - type='HeatmapHead', - in_channels=32, - out_channels=17, - )) - - feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) - batch_data_samples = get_packed_inputs(batch_size=2)['data_samples'] - losses = head.loss(feats, batch_data_samples) - self.assertIsInstance(losses['loss_kpt'], torch.Tensor) - self.assertEqual(losses['loss_kpt'].shape, torch.Size(())) - self.assertIsInstance(losses['acc_pose'], torch.Tensor) - - self.assertIsInstance(losses['loss_vis'], torch.Tensor) - self.assertEqual(losses['loss_vis'].shape, torch.Size(())) - self.assertIsInstance(losses['acc_vis'], torch.Tensor) - - head = VisPredictHead( - pose_cfg=dict( - type='HeatmapHead', - in_channels=32, - out_channels=17, - )) - - feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) - batch_data_samples = get_packed_inputs(batch_size=2)['data_samples'] - losses = head.loss(feats, batch_data_samples) - self.assertIsInstance(losses['loss_kpt'], torch.Tensor) - self.assertEqual(losses['loss_kpt'].shape, torch.Size(())) - self.assertIsInstance(losses['acc_pose'], torch.Tensor) - - self.assertIsInstance(losses['loss_vis'], torch.Tensor) - self.assertEqual(losses['loss_vis'].shape, torch.Size(())) - self.assertIsInstance(losses['acc_vis'], torch.Tensor) - - -if __name__ == '__main__': - unittest.main() +# Copyright (c) OpenMMLab. All rights reserved. +import unittest +from typing import List, Tuple +from unittest import TestCase + +import torch +from mmengine.structures import InstanceData, PixelData +from torch import nn + +from mmpose.models.heads import VisPredictHead +from mmpose.testing import get_packed_inputs + + +class TestVisPredictHead(TestCase): + + def _get_feats( + self, + batch_size: int = 2, + feat_shapes: List[Tuple[int, int, int]] = [(32, 8, 6)], + ): + feats = [ + torch.rand((batch_size, ) + shape, dtype=torch.float32) + for shape in feat_shapes + ] + return feats + + def test_init(self): + codec = dict( + type='MSRAHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + sigma=2.) + + head = VisPredictHead( + pose_cfg=dict( + type='HeatmapHead', + in_channels=32, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec)) + + self.assertTrue(isinstance(head.vis_head, nn.Sequential)) + self.assertEqual(head.vis_head[2].weight.shape, (17, 32)) + self.assertIsNotNone(head.pose_head) + + def test_forward(self): + + codec = dict( + type='MSRAHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + sigma=2) + + head = VisPredictHead( + pose_cfg=dict( + type='HeatmapHead', + in_channels=32, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec)) + + feats = [torch.rand(1, 32, 128, 128)] + output_pose, output_vis = head.forward(feats) + + self.assertIsInstance(output_pose, torch.Tensor) + self.assertEqual(output_pose.shape, (1, 17, 128, 128)) + + self.assertIsInstance(output_vis, torch.Tensor) + self.assertEqual(output_vis.shape, (1, 17)) + + def test_predict(self): + + codec = dict( + type='MSRAHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + sigma=2.) + + head = VisPredictHead( + pose_cfg=dict( + type='HeatmapHead', + in_channels=32, + out_channels=17, + deconv_out_channels=None, + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder=codec)) + + feats = self._get_feats(batch_size=2, feat_shapes=[(32, 128, 128)]) + batch_data_samples = get_packed_inputs(batch_size=2)['data_samples'] + + preds, _ = head.predict(feats, batch_data_samples) + + self.assertTrue(len(preds), 2) + self.assertIsInstance(preds[0], InstanceData) + self.assertEqual(preds[0].keypoints.shape, + batch_data_samples[0].gt_instances.keypoints.shape) + self.assertEqual( + preds[0].keypoint_scores.shape, + batch_data_samples[0].gt_instance_labels.keypoint_weights.shape) + + # output heatmap + head = VisPredictHead( + pose_cfg=dict( + type='HeatmapHead', + in_channels=32, + out_channels=17, + decoder=codec)) + feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) + batch_data_samples = get_packed_inputs(batch_size=2)['data_samples'] + _, pred_heatmaps = head.predict( + feats, batch_data_samples, test_cfg=dict(output_heatmaps=True)) + + self.assertIsInstance(pred_heatmaps[0], PixelData) + self.assertEqual(pred_heatmaps[0].heatmaps.shape, (17, 64, 48)) + + def test_tta(self): + # flip test: vis and heatmap + decoder_cfg = dict( + type='MSRAHeatmap', + input_size=(192, 256), + heatmap_size=(48, 64), + sigma=2.) + + head = VisPredictHead( + pose_cfg=dict( + type='HeatmapHead', + in_channels=32, + out_channels=17, + decoder=decoder_cfg)) + + feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) + batch_data_samples = get_packed_inputs(batch_size=2)['data_samples'] + preds, _ = head.predict([feats, feats], + batch_data_samples, + test_cfg=dict( + flip_test=True, + flip_mode='heatmap', + shift_heatmap=True, + )) + + self.assertTrue(len(preds), 2) + self.assertIsInstance(preds[0], InstanceData) + self.assertEqual(preds[0].keypoints.shape, + batch_data_samples[0].gt_instances.keypoints.shape) + self.assertEqual( + preds[0].keypoint_scores.shape, + batch_data_samples[0].gt_instance_labels.keypoint_weights.shape) + + def test_loss(self): + head = VisPredictHead( + pose_cfg=dict( + type='HeatmapHead', + in_channels=32, + out_channels=17, + )) + + feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) + batch_data_samples = get_packed_inputs(batch_size=2)['data_samples'] + losses = head.loss(feats, batch_data_samples) + self.assertIsInstance(losses['loss_kpt'], torch.Tensor) + self.assertEqual(losses['loss_kpt'].shape, torch.Size(())) + self.assertIsInstance(losses['acc_pose'], torch.Tensor) + + self.assertIsInstance(losses['loss_vis'], torch.Tensor) + self.assertEqual(losses['loss_vis'].shape, torch.Size(())) + self.assertIsInstance(losses['acc_vis'], torch.Tensor) + + head = VisPredictHead( + pose_cfg=dict( + type='HeatmapHead', + in_channels=32, + out_channels=17, + )) + + feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) + batch_data_samples = get_packed_inputs(batch_size=2)['data_samples'] + losses = head.loss(feats, batch_data_samples) + self.assertIsInstance(losses['loss_kpt'], torch.Tensor) + self.assertEqual(losses['loss_kpt'].shape, torch.Size(())) + self.assertIsInstance(losses['acc_pose'], torch.Tensor) + + self.assertIsInstance(losses['loss_vis'], torch.Tensor) + self.assertEqual(losses['loss_vis'].shape, torch.Size(())) + self.assertIsInstance(losses['acc_vis'], torch.Tensor) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_models/test_heads/test_regression_heads/test_dsnt_head.py b/tests/test_models/test_heads/test_regression_heads/test_dsnt_head.py index 0c0fd38d29..0e6a3eb9eb 100644 --- a/tests/test_models/test_heads/test_regression_heads/test_dsnt_head.py +++ b/tests/test_models/test_heads/test_regression_heads/test_dsnt_head.py @@ -1,204 +1,204 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import unittest -from typing import List, Tuple -from unittest import TestCase - -import torch -from mmengine.structures import InstanceData, PixelData - -from mmpose.models.heads import DSNTHead -from mmpose.testing import get_packed_inputs - - -class TestDSNTHead(TestCase): - - def _get_feats( - self, - batch_size: int = 2, - feat_shapes: List[Tuple[int, int, int]] = [(32, 6, 8)], - ): - - feats = [ - torch.rand((batch_size, ) + shape, dtype=torch.float32) - for shape in feat_shapes - ] - - return feats - - def test_init(self): - # square heatmap - head = DSNTHead( - in_channels=32, in_featuremap_size=(8, 8), num_joints=17) - self.assertEqual(head.linspace_x.shape, (1, 1, 1, 64)) - self.assertEqual(head.linspace_y.shape, (1, 1, 64, 1)) - self.assertIsNone(head.decoder) - - # rectangle heatmap - head = DSNTHead( - in_channels=32, in_featuremap_size=(6, 8), num_joints=17) - self.assertEqual(head.linspace_x.shape, (1, 1, 1, 6 * 8)) - self.assertEqual(head.linspace_y.shape, (1, 1, 8 * 8, 1)) - self.assertIsNone(head.decoder) - - # 2 deconv + 1x1 conv - head = DSNTHead( - in_channels=32, - in_featuremap_size=(6, 8), - num_joints=17, - deconv_out_channels=(32, 32), - deconv_kernel_sizes=(4, 4), - conv_out_channels=(32, ), - conv_kernel_sizes=(1, ), - ) - self.assertEqual(head.linspace_x.shape, (1, 1, 1, 6 * 4)) - self.assertEqual(head.linspace_y.shape, (1, 1, 8 * 4, 1)) - self.assertIsNone(head.decoder) - - # 2 deconv + w/o 1x1 conv - head = DSNTHead( - in_channels=32, - in_featuremap_size=(6, 8), - num_joints=17, - deconv_out_channels=(32, 32), - deconv_kernel_sizes=(4, 4), - conv_out_channels=(32, ), - conv_kernel_sizes=(1, ), - final_layer=None, - ) - self.assertEqual(head.linspace_x.shape, (1, 1, 1, 6 * 4)) - self.assertEqual(head.linspace_y.shape, (1, 1, 8 * 4, 1)) - self.assertIsNone(head.decoder) - - # w/o deconv and 1x1 conv - head = DSNTHead( - in_channels=32, - in_featuremap_size=(6, 8), - num_joints=17, - deconv_out_channels=tuple(), - deconv_kernel_sizes=tuple(), - final_layer=None, - ) - self.assertEqual(head.linspace_x.shape, (1, 1, 1, 6)) - self.assertEqual(head.linspace_y.shape, (1, 1, 8, 1)) - self.assertIsNone(head.decoder) - - # w/o deconv and 1x1 conv - head = DSNTHead( - in_channels=32, - in_featuremap_size=(6, 8), - num_joints=17, - deconv_out_channels=None, - deconv_kernel_sizes=None, - final_layer=None, - ) - self.assertEqual(head.linspace_x.shape, (1, 1, 1, 6)) - self.assertEqual(head.linspace_y.shape, (1, 1, 8, 1)) - self.assertIsNone(head.decoder) - - # w/ decoder - head = DSNTHead( - in_channels=1024, - in_featuremap_size=(6, 8), - num_joints=17, - decoder=dict( - type='IntegralRegressionLabel', - input_size=(192, 256), - heatmap_size=(48, 64), - sigma=2)) - - self.assertIsNotNone(head.decoder) - - def test_predict(self): - decoder_cfg = dict( - type='IntegralRegressionLabel', - input_size=(192, 256), - heatmap_size=(48, 64), - sigma=2) - - head = DSNTHead( - in_channels=32, - in_featuremap_size=(6, 8), - num_joints=17, - decoder=decoder_cfg, - ) - - feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) - batch_data_samples = get_packed_inputs( - batch_size=2, with_reg_label=False)['data_samples'] - preds = head.predict(feats, batch_data_samples) - - self.assertTrue(len(preds), 2) - self.assertIsInstance(preds[0], InstanceData) - self.assertEqual(preds[0].keypoints.shape, - batch_data_samples[0].gt_instances.keypoints.shape) - - # output heatmap - head = DSNTHead( - in_channels=32, - in_featuremap_size=(6, 8), - num_joints=17, - decoder=decoder_cfg, - ) - - feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) - batch_data_samples = get_packed_inputs( - batch_size=2, with_reg_label=False)['data_samples'] - _, pred_heatmaps = head.predict( - feats, batch_data_samples, test_cfg=dict(output_heatmaps=True)) - - self.assertTrue(len(pred_heatmaps), 2) - self.assertIsInstance(pred_heatmaps[0], PixelData) - self.assertEqual(pred_heatmaps[0].heatmaps.shape, (17, 8 * 8, 6 * 8)) - - def test_tta(self): - decoder_cfg = dict( - type='IntegralRegressionLabel', - input_size=(192, 256), - heatmap_size=(48, 64), - sigma=2) - - # inputs transform: select - head = DSNTHead( - in_channels=32, - in_featuremap_size=(6, 8), - num_joints=17, - decoder=decoder_cfg, - ) - - feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) - batch_data_samples = get_packed_inputs( - batch_size=2, with_reg_label=False)['data_samples'] - preds = head.predict([feats, feats], - batch_data_samples, - test_cfg=dict(flip_test=True)) - - self.assertTrue(len(preds), 2) - self.assertIsInstance(preds[0], InstanceData) - self.assertEqual(preds[0].keypoints.shape, - batch_data_samples[0].gt_instances.keypoints.shape) - - def test_loss(self): - for dist_loss in ['l1', 'l2']: - head = DSNTHead( - in_channels=32, - in_featuremap_size=(6, 8), - num_joints=17, - loss=dict( - type='MultipleLossWrapper', - losses=[ - dict(type='SmoothL1Loss', use_target_weight=True), - dict(type='JSDiscretLoss', use_target_weight=True) - ])) - - feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) - batch_data_samples = get_packed_inputs( - batch_size=2, with_reg_label=True)['data_samples'] - losses = head.loss(feats, batch_data_samples) - - self.assertIsInstance(losses['loss_kpt'], torch.Tensor) - self.assertEqual(losses['loss_kpt'].shape, torch.Size()) - self.assertIsInstance(losses['acc_pose'], torch.Tensor) - - -if __name__ == '__main__': - unittest.main() +# Copyright (c) OpenMMLab. All rights reserved. +import unittest +from typing import List, Tuple +from unittest import TestCase + +import torch +from mmengine.structures import InstanceData, PixelData + +from mmpose.models.heads import DSNTHead +from mmpose.testing import get_packed_inputs + + +class TestDSNTHead(TestCase): + + def _get_feats( + self, + batch_size: int = 2, + feat_shapes: List[Tuple[int, int, int]] = [(32, 6, 8)], + ): + + feats = [ + torch.rand((batch_size, ) + shape, dtype=torch.float32) + for shape in feat_shapes + ] + + return feats + + def test_init(self): + # square heatmap + head = DSNTHead( + in_channels=32, in_featuremap_size=(8, 8), num_joints=17) + self.assertEqual(head.linspace_x.shape, (1, 1, 1, 64)) + self.assertEqual(head.linspace_y.shape, (1, 1, 64, 1)) + self.assertIsNone(head.decoder) + + # rectangle heatmap + head = DSNTHead( + in_channels=32, in_featuremap_size=(6, 8), num_joints=17) + self.assertEqual(head.linspace_x.shape, (1, 1, 1, 6 * 8)) + self.assertEqual(head.linspace_y.shape, (1, 1, 8 * 8, 1)) + self.assertIsNone(head.decoder) + + # 2 deconv + 1x1 conv + head = DSNTHead( + in_channels=32, + in_featuremap_size=(6, 8), + num_joints=17, + deconv_out_channels=(32, 32), + deconv_kernel_sizes=(4, 4), + conv_out_channels=(32, ), + conv_kernel_sizes=(1, ), + ) + self.assertEqual(head.linspace_x.shape, (1, 1, 1, 6 * 4)) + self.assertEqual(head.linspace_y.shape, (1, 1, 8 * 4, 1)) + self.assertIsNone(head.decoder) + + # 2 deconv + w/o 1x1 conv + head = DSNTHead( + in_channels=32, + in_featuremap_size=(6, 8), + num_joints=17, + deconv_out_channels=(32, 32), + deconv_kernel_sizes=(4, 4), + conv_out_channels=(32, ), + conv_kernel_sizes=(1, ), + final_layer=None, + ) + self.assertEqual(head.linspace_x.shape, (1, 1, 1, 6 * 4)) + self.assertEqual(head.linspace_y.shape, (1, 1, 8 * 4, 1)) + self.assertIsNone(head.decoder) + + # w/o deconv and 1x1 conv + head = DSNTHead( + in_channels=32, + in_featuremap_size=(6, 8), + num_joints=17, + deconv_out_channels=tuple(), + deconv_kernel_sizes=tuple(), + final_layer=None, + ) + self.assertEqual(head.linspace_x.shape, (1, 1, 1, 6)) + self.assertEqual(head.linspace_y.shape, (1, 1, 8, 1)) + self.assertIsNone(head.decoder) + + # w/o deconv and 1x1 conv + head = DSNTHead( + in_channels=32, + in_featuremap_size=(6, 8), + num_joints=17, + deconv_out_channels=None, + deconv_kernel_sizes=None, + final_layer=None, + ) + self.assertEqual(head.linspace_x.shape, (1, 1, 1, 6)) + self.assertEqual(head.linspace_y.shape, (1, 1, 8, 1)) + self.assertIsNone(head.decoder) + + # w/ decoder + head = DSNTHead( + in_channels=1024, + in_featuremap_size=(6, 8), + num_joints=17, + decoder=dict( + type='IntegralRegressionLabel', + input_size=(192, 256), + heatmap_size=(48, 64), + sigma=2)) + + self.assertIsNotNone(head.decoder) + + def test_predict(self): + decoder_cfg = dict( + type='IntegralRegressionLabel', + input_size=(192, 256), + heatmap_size=(48, 64), + sigma=2) + + head = DSNTHead( + in_channels=32, + in_featuremap_size=(6, 8), + num_joints=17, + decoder=decoder_cfg, + ) + + feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) + batch_data_samples = get_packed_inputs( + batch_size=2, with_reg_label=False)['data_samples'] + preds = head.predict(feats, batch_data_samples) + + self.assertTrue(len(preds), 2) + self.assertIsInstance(preds[0], InstanceData) + self.assertEqual(preds[0].keypoints.shape, + batch_data_samples[0].gt_instances.keypoints.shape) + + # output heatmap + head = DSNTHead( + in_channels=32, + in_featuremap_size=(6, 8), + num_joints=17, + decoder=decoder_cfg, + ) + + feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) + batch_data_samples = get_packed_inputs( + batch_size=2, with_reg_label=False)['data_samples'] + _, pred_heatmaps = head.predict( + feats, batch_data_samples, test_cfg=dict(output_heatmaps=True)) + + self.assertTrue(len(pred_heatmaps), 2) + self.assertIsInstance(pred_heatmaps[0], PixelData) + self.assertEqual(pred_heatmaps[0].heatmaps.shape, (17, 8 * 8, 6 * 8)) + + def test_tta(self): + decoder_cfg = dict( + type='IntegralRegressionLabel', + input_size=(192, 256), + heatmap_size=(48, 64), + sigma=2) + + # inputs transform: select + head = DSNTHead( + in_channels=32, + in_featuremap_size=(6, 8), + num_joints=17, + decoder=decoder_cfg, + ) + + feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) + batch_data_samples = get_packed_inputs( + batch_size=2, with_reg_label=False)['data_samples'] + preds = head.predict([feats, feats], + batch_data_samples, + test_cfg=dict(flip_test=True)) + + self.assertTrue(len(preds), 2) + self.assertIsInstance(preds[0], InstanceData) + self.assertEqual(preds[0].keypoints.shape, + batch_data_samples[0].gt_instances.keypoints.shape) + + def test_loss(self): + for dist_loss in ['l1', 'l2']: + head = DSNTHead( + in_channels=32, + in_featuremap_size=(6, 8), + num_joints=17, + loss=dict( + type='MultipleLossWrapper', + losses=[ + dict(type='SmoothL1Loss', use_target_weight=True), + dict(type='JSDiscretLoss', use_target_weight=True) + ])) + + feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) + batch_data_samples = get_packed_inputs( + batch_size=2, with_reg_label=True)['data_samples'] + losses = head.loss(feats, batch_data_samples) + + self.assertIsInstance(losses['loss_kpt'], torch.Tensor) + self.assertEqual(losses['loss_kpt'].shape, torch.Size()) + self.assertIsInstance(losses['acc_pose'], torch.Tensor) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_models/test_heads/test_regression_heads/test_integral_regression_head.py b/tests/test_models/test_heads/test_regression_heads/test_integral_regression_head.py index 3fc79f0afb..7bc27d8cfb 100644 --- a/tests/test_models/test_heads/test_regression_heads/test_integral_regression_head.py +++ b/tests/test_models/test_heads/test_regression_heads/test_integral_regression_head.py @@ -1,188 +1,188 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import unittest -from typing import List, Tuple -from unittest import TestCase - -import torch -from mmengine.structures import InstanceData, PixelData - -from mmpose.models.heads import IntegralRegressionHead -from mmpose.testing import get_packed_inputs - - -class TestIntegralRegressionHead(TestCase): - - def _get_feats( - self, - batch_size: int = 2, - feat_shapes: List[Tuple[int, int, int]] = [(32, 6, 8)], - ): - - feats = [ - torch.rand((batch_size, ) + shape, dtype=torch.float32) - for shape in feat_shapes - ] - - return feats - - def test_init(self): - # square heatmap - head = IntegralRegressionHead( - in_channels=32, in_featuremap_size=(8, 8), num_joints=17) - self.assertEqual(head.linspace_x.shape, (1, 1, 1, 64)) - self.assertEqual(head.linspace_y.shape, (1, 1, 64, 1)) - self.assertIsNone(head.decoder) - - # rectangle heatmap - head = IntegralRegressionHead( - in_channels=32, in_featuremap_size=(6, 8), num_joints=17) - self.assertEqual(head.linspace_x.shape, (1, 1, 1, 6 * 8)) - self.assertEqual(head.linspace_y.shape, (1, 1, 8 * 8, 1)) - self.assertIsNone(head.decoder) - - # 2 deconv + 1x1 conv - head = IntegralRegressionHead( - in_channels=32, - in_featuremap_size=(6, 8), - num_joints=17, - deconv_out_channels=(32, 32), - deconv_kernel_sizes=(4, 4), - conv_out_channels=(32, ), - conv_kernel_sizes=(1, ), - ) - self.assertEqual(head.linspace_x.shape, (1, 1, 1, 6 * 4)) - self.assertEqual(head.linspace_y.shape, (1, 1, 8 * 4, 1)) - self.assertIsNone(head.decoder) - - # 2 deconv + w/o 1x1 conv - head = IntegralRegressionHead( - in_channels=32, - in_featuremap_size=(6, 8), - num_joints=17, - deconv_out_channels=(32, 32), - deconv_kernel_sizes=(4, 4), - conv_out_channels=(32, ), - conv_kernel_sizes=(1, ), - final_layer=None, - ) - self.assertEqual(head.linspace_x.shape, (1, 1, 1, 6 * 4)) - self.assertEqual(head.linspace_y.shape, (1, 1, 8 * 4, 1)) - self.assertIsNone(head.decoder) - - # w/o deconv and 1x1 conv - head = IntegralRegressionHead( - in_channels=32, - in_featuremap_size=(6, 8), - num_joints=17, - deconv_out_channels=tuple(), - deconv_kernel_sizes=tuple(), - final_layer=None, - ) - self.assertEqual(head.linspace_x.shape, (1, 1, 1, 6)) - self.assertEqual(head.linspace_y.shape, (1, 1, 8, 1)) - self.assertIsNone(head.decoder) - - # w/o deconv and 1x1 conv - head = IntegralRegressionHead( - in_channels=32, - in_featuremap_size=(6, 8), - num_joints=17, - deconv_out_channels=None, - deconv_kernel_sizes=None, - final_layer=None, - ) - self.assertEqual(head.linspace_x.shape, (1, 1, 1, 6)) - self.assertEqual(head.linspace_y.shape, (1, 1, 8, 1)) - self.assertIsNone(head.decoder) - - # w/ decoder - head = IntegralRegressionHead( - in_channels=1024, - in_featuremap_size=(6, 8), - num_joints=17, - decoder=dict(type='RegressionLabel', input_size=(192, 256)), - ) - self.assertIsNotNone(head.decoder) - - def test_predict(self): - decoder_cfg = dict(type='RegressionLabel', input_size=(192, 256)) - - head = IntegralRegressionHead( - in_channels=32, - in_featuremap_size=(6, 8), - num_joints=17, - decoder=decoder_cfg, - ) - - feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) - batch_data_samples = get_packed_inputs( - batch_size=2, with_heatmap=False)['data_samples'] - preds = head.predict(feats, batch_data_samples) - - self.assertTrue(len(preds), 2) - self.assertIsInstance(preds[0], InstanceData) - self.assertEqual(preds[0].keypoints.shape, - batch_data_samples[0].gt_instances.keypoints.shape) - - # output heatmap - head = IntegralRegressionHead( - in_channels=32, - in_featuremap_size=(6, 8), - num_joints=17, - decoder=decoder_cfg, - ) - - feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) - batch_data_samples = get_packed_inputs( - batch_size=2, with_heatmap=False)['data_samples'] - _, pred_heatmaps = head.predict( - feats, batch_data_samples, test_cfg=dict(output_heatmaps=True)) - - self.assertTrue(len(pred_heatmaps), 2) - self.assertIsInstance(pred_heatmaps[0], PixelData) - self.assertEqual(pred_heatmaps[0].heatmaps.shape, (17, 8 * 8, 6 * 8)) - - def test_tta(self): - decoder_cfg = dict(type='RegressionLabel', input_size=(192, 256)) - - # inputs transform: select - head = IntegralRegressionHead( - in_channels=32, - in_featuremap_size=(6, 8), - num_joints=17, - decoder=decoder_cfg, - ) - - feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) - batch_data_samples = get_packed_inputs( - batch_size=2, with_heatmap=False)['data_samples'] - preds = head.predict([feats, feats], - batch_data_samples, - test_cfg=dict( - flip_test=True, - shift_coords=True, - shift_heatmap=True)) - - self.assertTrue(len(preds), 2) - self.assertIsInstance(preds[0], InstanceData) - self.assertEqual(preds[0].keypoints.shape, - batch_data_samples[0].gt_instances.keypoints.shape) - - def test_loss(self): - head = IntegralRegressionHead( - in_channels=32, - in_featuremap_size=(6, 8), - num_joints=17, - ) - - feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) - batch_data_samples = get_packed_inputs(batch_size=2)['data_samples'] - losses = head.loss(feats, batch_data_samples) - - self.assertIsInstance(losses['loss_kpt'], torch.Tensor) - self.assertEqual(losses['loss_kpt'].shape, torch.Size()) - self.assertIsInstance(losses['acc_pose'], torch.Tensor) - - -if __name__ == '__main__': - unittest.main() +# Copyright (c) OpenMMLab. All rights reserved. +import unittest +from typing import List, Tuple +from unittest import TestCase + +import torch +from mmengine.structures import InstanceData, PixelData + +from mmpose.models.heads import IntegralRegressionHead +from mmpose.testing import get_packed_inputs + + +class TestIntegralRegressionHead(TestCase): + + def _get_feats( + self, + batch_size: int = 2, + feat_shapes: List[Tuple[int, int, int]] = [(32, 6, 8)], + ): + + feats = [ + torch.rand((batch_size, ) + shape, dtype=torch.float32) + for shape in feat_shapes + ] + + return feats + + def test_init(self): + # square heatmap + head = IntegralRegressionHead( + in_channels=32, in_featuremap_size=(8, 8), num_joints=17) + self.assertEqual(head.linspace_x.shape, (1, 1, 1, 64)) + self.assertEqual(head.linspace_y.shape, (1, 1, 64, 1)) + self.assertIsNone(head.decoder) + + # rectangle heatmap + head = IntegralRegressionHead( + in_channels=32, in_featuremap_size=(6, 8), num_joints=17) + self.assertEqual(head.linspace_x.shape, (1, 1, 1, 6 * 8)) + self.assertEqual(head.linspace_y.shape, (1, 1, 8 * 8, 1)) + self.assertIsNone(head.decoder) + + # 2 deconv + 1x1 conv + head = IntegralRegressionHead( + in_channels=32, + in_featuremap_size=(6, 8), + num_joints=17, + deconv_out_channels=(32, 32), + deconv_kernel_sizes=(4, 4), + conv_out_channels=(32, ), + conv_kernel_sizes=(1, ), + ) + self.assertEqual(head.linspace_x.shape, (1, 1, 1, 6 * 4)) + self.assertEqual(head.linspace_y.shape, (1, 1, 8 * 4, 1)) + self.assertIsNone(head.decoder) + + # 2 deconv + w/o 1x1 conv + head = IntegralRegressionHead( + in_channels=32, + in_featuremap_size=(6, 8), + num_joints=17, + deconv_out_channels=(32, 32), + deconv_kernel_sizes=(4, 4), + conv_out_channels=(32, ), + conv_kernel_sizes=(1, ), + final_layer=None, + ) + self.assertEqual(head.linspace_x.shape, (1, 1, 1, 6 * 4)) + self.assertEqual(head.linspace_y.shape, (1, 1, 8 * 4, 1)) + self.assertIsNone(head.decoder) + + # w/o deconv and 1x1 conv + head = IntegralRegressionHead( + in_channels=32, + in_featuremap_size=(6, 8), + num_joints=17, + deconv_out_channels=tuple(), + deconv_kernel_sizes=tuple(), + final_layer=None, + ) + self.assertEqual(head.linspace_x.shape, (1, 1, 1, 6)) + self.assertEqual(head.linspace_y.shape, (1, 1, 8, 1)) + self.assertIsNone(head.decoder) + + # w/o deconv and 1x1 conv + head = IntegralRegressionHead( + in_channels=32, + in_featuremap_size=(6, 8), + num_joints=17, + deconv_out_channels=None, + deconv_kernel_sizes=None, + final_layer=None, + ) + self.assertEqual(head.linspace_x.shape, (1, 1, 1, 6)) + self.assertEqual(head.linspace_y.shape, (1, 1, 8, 1)) + self.assertIsNone(head.decoder) + + # w/ decoder + head = IntegralRegressionHead( + in_channels=1024, + in_featuremap_size=(6, 8), + num_joints=17, + decoder=dict(type='RegressionLabel', input_size=(192, 256)), + ) + self.assertIsNotNone(head.decoder) + + def test_predict(self): + decoder_cfg = dict(type='RegressionLabel', input_size=(192, 256)) + + head = IntegralRegressionHead( + in_channels=32, + in_featuremap_size=(6, 8), + num_joints=17, + decoder=decoder_cfg, + ) + + feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) + batch_data_samples = get_packed_inputs( + batch_size=2, with_heatmap=False)['data_samples'] + preds = head.predict(feats, batch_data_samples) + + self.assertTrue(len(preds), 2) + self.assertIsInstance(preds[0], InstanceData) + self.assertEqual(preds[0].keypoints.shape, + batch_data_samples[0].gt_instances.keypoints.shape) + + # output heatmap + head = IntegralRegressionHead( + in_channels=32, + in_featuremap_size=(6, 8), + num_joints=17, + decoder=decoder_cfg, + ) + + feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) + batch_data_samples = get_packed_inputs( + batch_size=2, with_heatmap=False)['data_samples'] + _, pred_heatmaps = head.predict( + feats, batch_data_samples, test_cfg=dict(output_heatmaps=True)) + + self.assertTrue(len(pred_heatmaps), 2) + self.assertIsInstance(pred_heatmaps[0], PixelData) + self.assertEqual(pred_heatmaps[0].heatmaps.shape, (17, 8 * 8, 6 * 8)) + + def test_tta(self): + decoder_cfg = dict(type='RegressionLabel', input_size=(192, 256)) + + # inputs transform: select + head = IntegralRegressionHead( + in_channels=32, + in_featuremap_size=(6, 8), + num_joints=17, + decoder=decoder_cfg, + ) + + feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) + batch_data_samples = get_packed_inputs( + batch_size=2, with_heatmap=False)['data_samples'] + preds = head.predict([feats, feats], + batch_data_samples, + test_cfg=dict( + flip_test=True, + shift_coords=True, + shift_heatmap=True)) + + self.assertTrue(len(preds), 2) + self.assertIsInstance(preds[0], InstanceData) + self.assertEqual(preds[0].keypoints.shape, + batch_data_samples[0].gt_instances.keypoints.shape) + + def test_loss(self): + head = IntegralRegressionHead( + in_channels=32, + in_featuremap_size=(6, 8), + num_joints=17, + ) + + feats = self._get_feats(batch_size=2, feat_shapes=[(32, 8, 6)]) + batch_data_samples = get_packed_inputs(batch_size=2)['data_samples'] + losses = head.loss(feats, batch_data_samples) + + self.assertIsInstance(losses['loss_kpt'], torch.Tensor) + self.assertEqual(losses['loss_kpt'].shape, torch.Size()) + self.assertIsInstance(losses['acc_pose'], torch.Tensor) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_models/test_heads/test_regression_heads/test_regression_head.py b/tests/test_models/test_heads/test_regression_heads/test_regression_head.py index 5a3fe701e0..31526e55c4 100644 --- a/tests/test_models/test_heads/test_regression_heads/test_regression_head.py +++ b/tests/test_models/test_heads/test_regression_heads/test_regression_head.py @@ -1,100 +1,100 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import unittest -from typing import List, Tuple -from unittest import TestCase - -import torch -from mmengine.structures import InstanceData - -from mmpose.models.heads import RegressionHead -from mmpose.testing import get_packed_inputs - - -class TestRegressionHead(TestCase): - - def _get_feats( - self, - batch_size: int = 2, - feat_shapes: List[Tuple[int, int, int]] = [(32, 1, 1)], - ): - - feats = [ - torch.rand((batch_size, ) + shape, dtype=torch.float32) - for shape in feat_shapes - ] - - return feats - - def test_init(self): - - head = RegressionHead(in_channels=1024, num_joints=17) - self.assertEqual(head.fc.weight.shape, (17 * 2, 1024)) - self.assertIsNone(head.decoder) - - # w/ decoder - head = RegressionHead( - in_channels=1024, - num_joints=17, - decoder=dict(type='RegressionLabel', input_size=(192, 256)), - ) - self.assertIsNotNone(head.decoder) - - def test_predict(self): - decoder_cfg = dict(type='RegressionLabel', input_size=(192, 256)) - - head = RegressionHead( - in_channels=32, - num_joints=17, - decoder=decoder_cfg, - ) - - feats = self._get_feats(batch_size=2, feat_shapes=[(32, 1, 1)]) - batch_data_samples = get_packed_inputs( - batch_size=2, with_heatmap=False)['data_samples'] - preds = head.predict(feats, batch_data_samples) - - self.assertTrue(len(preds), 2) - self.assertIsInstance(preds[0], InstanceData) - self.assertEqual(preds[0].keypoints.shape, - batch_data_samples[0].gt_instances.keypoints.shape) - - def test_tta(self): - decoder_cfg = dict(type='RegressionLabel', input_size=(192, 256)) - - # inputs transform: select - head = RegressionHead( - in_channels=32, - num_joints=17, - decoder=decoder_cfg, - ) - - feats = self._get_feats(batch_size=2, feat_shapes=[(32, 1, 1)]) - batch_data_samples = get_packed_inputs( - batch_size=2, with_heatmap=False)['data_samples'] - preds = head.predict([feats, feats], - batch_data_samples, - test_cfg=dict(flip_test=True, shift_coords=True)) - - self.assertTrue(len(preds), 2) - self.assertIsInstance(preds[0], InstanceData) - self.assertEqual(preds[0].keypoints.shape, - batch_data_samples[0].gt_instances.keypoints.shape) - - def test_loss(self): - head = RegressionHead( - in_channels=32, - num_joints=17, - ) - - feats = self._get_feats(batch_size=2, feat_shapes=[(32, 1, 1)]) - batch_data_samples = get_packed_inputs( - batch_size=2, with_heatmap=False)['data_samples'] - losses = head.loss(feats, batch_data_samples) - - self.assertIsInstance(losses['loss_kpt'], torch.Tensor) - self.assertEqual(losses['loss_kpt'].shape, torch.Size()) - self.assertIsInstance(losses['acc_pose'], torch.Tensor) - - -if __name__ == '__main__': - unittest.main() +# Copyright (c) OpenMMLab. All rights reserved. +import unittest +from typing import List, Tuple +from unittest import TestCase + +import torch +from mmengine.structures import InstanceData + +from mmpose.models.heads import RegressionHead +from mmpose.testing import get_packed_inputs + + +class TestRegressionHead(TestCase): + + def _get_feats( + self, + batch_size: int = 2, + feat_shapes: List[Tuple[int, int, int]] = [(32, 1, 1)], + ): + + feats = [ + torch.rand((batch_size, ) + shape, dtype=torch.float32) + for shape in feat_shapes + ] + + return feats + + def test_init(self): + + head = RegressionHead(in_channels=1024, num_joints=17) + self.assertEqual(head.fc.weight.shape, (17 * 2, 1024)) + self.assertIsNone(head.decoder) + + # w/ decoder + head = RegressionHead( + in_channels=1024, + num_joints=17, + decoder=dict(type='RegressionLabel', input_size=(192, 256)), + ) + self.assertIsNotNone(head.decoder) + + def test_predict(self): + decoder_cfg = dict(type='RegressionLabel', input_size=(192, 256)) + + head = RegressionHead( + in_channels=32, + num_joints=17, + decoder=decoder_cfg, + ) + + feats = self._get_feats(batch_size=2, feat_shapes=[(32, 1, 1)]) + batch_data_samples = get_packed_inputs( + batch_size=2, with_heatmap=False)['data_samples'] + preds = head.predict(feats, batch_data_samples) + + self.assertTrue(len(preds), 2) + self.assertIsInstance(preds[0], InstanceData) + self.assertEqual(preds[0].keypoints.shape, + batch_data_samples[0].gt_instances.keypoints.shape) + + def test_tta(self): + decoder_cfg = dict(type='RegressionLabel', input_size=(192, 256)) + + # inputs transform: select + head = RegressionHead( + in_channels=32, + num_joints=17, + decoder=decoder_cfg, + ) + + feats = self._get_feats(batch_size=2, feat_shapes=[(32, 1, 1)]) + batch_data_samples = get_packed_inputs( + batch_size=2, with_heatmap=False)['data_samples'] + preds = head.predict([feats, feats], + batch_data_samples, + test_cfg=dict(flip_test=True, shift_coords=True)) + + self.assertTrue(len(preds), 2) + self.assertIsInstance(preds[0], InstanceData) + self.assertEqual(preds[0].keypoints.shape, + batch_data_samples[0].gt_instances.keypoints.shape) + + def test_loss(self): + head = RegressionHead( + in_channels=32, + num_joints=17, + ) + + feats = self._get_feats(batch_size=2, feat_shapes=[(32, 1, 1)]) + batch_data_samples = get_packed_inputs( + batch_size=2, with_heatmap=False)['data_samples'] + losses = head.loss(feats, batch_data_samples) + + self.assertIsInstance(losses['loss_kpt'], torch.Tensor) + self.assertEqual(losses['loss_kpt'].shape, torch.Size()) + self.assertIsInstance(losses['acc_pose'], torch.Tensor) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_models/test_heads/test_regression_heads/test_rle_head.py b/tests/test_models/test_heads/test_regression_heads/test_rle_head.py index 4f7d475943..1f685b3977 100644 --- a/tests/test_models/test_heads/test_regression_heads/test_rle_head.py +++ b/tests/test_models/test_heads/test_regression_heads/test_rle_head.py @@ -1,185 +1,185 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import unittest -from typing import List, Tuple -from unittest import TestCase - -import torch -from mmengine.structures import InstanceData - -from mmpose.models.heads import RLEHead -from mmpose.testing import get_packed_inputs - - -class TestRLEHead(TestCase): - - def _get_feats( - self, - batch_size: int = 2, - feat_shapes: List[Tuple[int, int, int]] = [(32, 1, 1)], - ): - - feats = [ - torch.rand((batch_size, ) + shape, dtype=torch.float32) - for shape in feat_shapes - ] - - return feats - - def test_init(self): - - # w/ sigma - head = RLEHead(in_channels=1024, num_joints=17) - self.assertEqual(head.fc.weight.shape, (17 * 4, 1024)) - self.assertIsNone(head.decoder) - - # w/ decoder - head = RLEHead( - in_channels=1024, - num_joints=17, - decoder=dict(type='RegressionLabel', input_size=(192, 256)), - ) - self.assertIsNotNone(head.decoder) - - def test_predict(self): - decoder_cfg = dict(type='RegressionLabel', input_size=(192, 256)) - - head = RLEHead( - in_channels=32, - num_joints=17, - decoder=decoder_cfg, - ) - - feats = self._get_feats(batch_size=2, feat_shapes=[(32, 1, 1)]) - batch_data_samples = get_packed_inputs( - batch_size=2, with_heatmap=False)['data_samples'] - preds = head.predict(feats, batch_data_samples) - - self.assertTrue(len(preds), 2) - self.assertIsInstance(preds[0], InstanceData) - self.assertEqual(preds[0].keypoints.shape, - batch_data_samples[0].gt_instances.keypoints.shape) - - def test_tta(self): - decoder_cfg = dict(type='RegressionLabel', input_size=(192, 256)) - - head = RLEHead( - in_channels=32, - num_joints=17, - decoder=decoder_cfg, - ) - - feats = self._get_feats(batch_size=2, feat_shapes=[(32, 1, 1)]) - batch_data_samples = get_packed_inputs( - batch_size=2, with_heatmap=False)['data_samples'] - preds = head.predict([feats, feats], - batch_data_samples, - test_cfg=dict(flip_test=True)) - - self.assertTrue(len(preds), 2) - self.assertIsInstance(preds[0], InstanceData) - self.assertEqual(preds[0].keypoints.shape, - batch_data_samples[0].gt_instances.keypoints.shape) - - def test_loss(self): - head = RLEHead( - in_channels=32, - num_joints=17, - ) - - feats = self._get_feats(batch_size=2, feat_shapes=[(32, 1, 1)]) - batch_data_samples = get_packed_inputs( - batch_size=2, with_heatmap=False)['data_samples'] - losses = head.loss(feats, batch_data_samples) - - self.assertIsInstance(losses['loss_kpt'], torch.Tensor) - self.assertEqual(losses['loss_kpt'].shape, torch.Size()) - self.assertIsInstance(losses['acc_pose'], torch.Tensor) - - def test_state_dict_compatible(self): - - head = RLEHead(in_channels=2048, num_joints=17) - - state_dict = { - 'fc.weight': torch.zeros((17 * 4, 2048)), - 'fc.bias': torch.zeros((17 * 4)), - 'loss.flow_model.loc': torch.zeros(torch.Size([2])), - 'loss.flow_model.cov': torch.zeros(torch.Size([2, 2])), - 'loss.flow_model.mask': torch.zeros(torch.Size([6, 2])), - 'loss.flow_model.s.0.0.weight': torch.zeros(torch.Size([64, 2])), - 'loss.flow_model.s.0.0.bias': torch.zeros(torch.Size([64])), - 'loss.flow_model.s.0.2.weight': torch.zeros(torch.Size([64, 64])), - 'loss.flow_model.s.0.2.bias': torch.zeros(torch.Size([64])), - 'loss.flow_model.s.0.4.weight': torch.zeros(torch.Size([2, 64])), - 'loss.flow_model.s.0.4.bias': torch.zeros(torch.Size([2])), - 'loss.flow_model.s.1.0.weight': torch.zeros(torch.Size([64, 2])), - 'loss.flow_model.s.1.0.bias': torch.zeros(torch.Size([64])), - 'loss.flow_model.s.1.2.weight': torch.zeros(torch.Size([64, 64])), - 'loss.flow_model.s.1.2.bias': torch.zeros(torch.Size([64])), - 'loss.flow_model.s.1.4.weight': torch.zeros(torch.Size([2, 64])), - 'loss.flow_model.s.1.4.bias': torch.zeros(torch.Size([2])), - 'loss.flow_model.s.2.0.weight': torch.zeros(torch.Size([64, 2])), - 'loss.flow_model.s.2.0.bias': torch.zeros(torch.Size([64])), - 'loss.flow_model.s.2.2.weight': torch.zeros(torch.Size([64, 64])), - 'loss.flow_model.s.2.2.bias': torch.zeros(torch.Size([64])), - 'loss.flow_model.s.2.4.weight': torch.zeros(torch.Size([2, 64])), - 'loss.flow_model.s.2.4.bias': torch.zeros(torch.Size([2])), - 'loss.flow_model.s.3.0.weight': torch.zeros(torch.Size([64, 2])), - 'loss.flow_model.s.3.0.bias': torch.zeros(torch.Size([64])), - 'loss.flow_model.s.3.2.weight': torch.zeros(torch.Size([64, 64])), - 'loss.flow_model.s.3.2.bias': torch.zeros(torch.Size([64])), - 'loss.flow_model.s.3.4.weight': torch.zeros(torch.Size([2, 64])), - 'loss.flow_model.s.3.4.bias': torch.zeros(torch.Size([2])), - 'loss.flow_model.s.4.0.weight': torch.zeros(torch.Size([64, 2])), - 'loss.flow_model.s.4.0.bias': torch.zeros(torch.Size([64])), - 'loss.flow_model.s.4.2.weight': torch.zeros(torch.Size([64, 64])), - 'loss.flow_model.s.4.2.bias': torch.zeros(torch.Size([64])), - 'loss.flow_model.s.4.4.weight': torch.zeros(torch.Size([2, 64])), - 'loss.flow_model.s.4.4.bias': torch.zeros(torch.Size([2])), - 'loss.flow_model.s.5.0.weight': torch.zeros(torch.Size([64, 2])), - 'loss.flow_model.s.5.0.bias': torch.zeros(torch.Size([64])), - 'loss.flow_model.s.5.2.weight': torch.zeros(torch.Size([64, 64])), - 'loss.flow_model.s.5.2.bias': torch.zeros(torch.Size([64])), - 'loss.flow_model.s.5.4.weight': torch.zeros(torch.Size([2, 64])), - 'loss.flow_model.s.5.4.bias': torch.zeros(torch.Size([2])), - 'loss.flow_model.t.0.0.weight': torch.zeros(torch.Size([64, 2])), - 'loss.flow_model.t.0.0.bias': torch.zeros(torch.Size([64])), - 'loss.flow_model.t.0.2.weight': torch.zeros(torch.Size([64, 64])), - 'loss.flow_model.t.0.2.bias': torch.zeros(torch.Size([64])), - 'loss.flow_model.t.0.4.weight': torch.zeros(torch.Size([2, 64])), - 'loss.flow_model.t.0.4.bias': torch.zeros(torch.Size([2])), - 'loss.flow_model.t.1.0.weight': torch.zeros(torch.Size([64, 2])), - 'loss.flow_model.t.1.0.bias': torch.zeros(torch.Size([64])), - 'loss.flow_model.t.1.2.weight': torch.zeros(torch.Size([64, 64])), - 'loss.flow_model.t.1.2.bias': torch.zeros(torch.Size([64])), - 'loss.flow_model.t.1.4.weight': torch.zeros(torch.Size([2, 64])), - 'loss.flow_model.t.1.4.bias': torch.zeros(torch.Size([2])), - 'loss.flow_model.t.2.0.weight': torch.zeros(torch.Size([64, 2])), - 'loss.flow_model.t.2.0.bias': torch.zeros(torch.Size([64])), - 'loss.flow_model.t.2.2.weight': torch.zeros(torch.Size([64, 64])), - 'loss.flow_model.t.2.2.bias': torch.zeros(torch.Size([64])), - 'loss.flow_model.t.2.4.weight': torch.zeros(torch.Size([2, 64])), - 'loss.flow_model.t.2.4.bias': torch.zeros(torch.Size([2])), - 'loss.flow_model.t.3.0.weight': torch.zeros(torch.Size([64, 2])), - 'loss.flow_model.t.3.0.bias': torch.zeros(torch.Size([64])), - 'loss.flow_model.t.3.2.weight': torch.zeros(torch.Size([64, 64])), - 'loss.flow_model.t.3.2.bias': torch.zeros(torch.Size([64])), - 'loss.flow_model.t.3.4.weight': torch.zeros(torch.Size([2, 64])), - 'loss.flow_model.t.3.4.bias': torch.zeros(torch.Size([2])), - 'loss.flow_model.t.4.0.weight': torch.zeros(torch.Size([64, 2])), - 'loss.flow_model.t.4.0.bias': torch.zeros(torch.Size([64])), - 'loss.flow_model.t.4.2.weight': torch.zeros(torch.Size([64, 64])), - 'loss.flow_model.t.4.2.bias': torch.zeros(torch.Size([64])), - 'loss.flow_model.t.4.4.weight': torch.zeros(torch.Size([2, 64])), - 'loss.flow_model.t.4.4.bias': torch.zeros(torch.Size([2])), - 'loss.flow_model.t.5.0.weight': torch.zeros(torch.Size([64, 2])), - 'loss.flow_model.t.5.0.bias': torch.zeros(torch.Size([64])), - 'loss.flow_model.t.5.2.weight': torch.zeros(torch.Size([64, 64])), - 'loss.flow_model.t.5.2.bias': torch.zeros(torch.Size([64])), - 'loss.flow_model.t.5.4.weight': torch.zeros(torch.Size([2, 64])), - 'loss.flow_model.t.5.4.bias': torch.zeros(torch.Size([2])) - } - head.load_state_dict(state_dict) - - -if __name__ == '__main__': - unittest.main() +# Copyright (c) OpenMMLab. All rights reserved. +import unittest +from typing import List, Tuple +from unittest import TestCase + +import torch +from mmengine.structures import InstanceData + +from mmpose.models.heads import RLEHead +from mmpose.testing import get_packed_inputs + + +class TestRLEHead(TestCase): + + def _get_feats( + self, + batch_size: int = 2, + feat_shapes: List[Tuple[int, int, int]] = [(32, 1, 1)], + ): + + feats = [ + torch.rand((batch_size, ) + shape, dtype=torch.float32) + for shape in feat_shapes + ] + + return feats + + def test_init(self): + + # w/ sigma + head = RLEHead(in_channels=1024, num_joints=17) + self.assertEqual(head.fc.weight.shape, (17 * 4, 1024)) + self.assertIsNone(head.decoder) + + # w/ decoder + head = RLEHead( + in_channels=1024, + num_joints=17, + decoder=dict(type='RegressionLabel', input_size=(192, 256)), + ) + self.assertIsNotNone(head.decoder) + + def test_predict(self): + decoder_cfg = dict(type='RegressionLabel', input_size=(192, 256)) + + head = RLEHead( + in_channels=32, + num_joints=17, + decoder=decoder_cfg, + ) + + feats = self._get_feats(batch_size=2, feat_shapes=[(32, 1, 1)]) + batch_data_samples = get_packed_inputs( + batch_size=2, with_heatmap=False)['data_samples'] + preds = head.predict(feats, batch_data_samples) + + self.assertTrue(len(preds), 2) + self.assertIsInstance(preds[0], InstanceData) + self.assertEqual(preds[0].keypoints.shape, + batch_data_samples[0].gt_instances.keypoints.shape) + + def test_tta(self): + decoder_cfg = dict(type='RegressionLabel', input_size=(192, 256)) + + head = RLEHead( + in_channels=32, + num_joints=17, + decoder=decoder_cfg, + ) + + feats = self._get_feats(batch_size=2, feat_shapes=[(32, 1, 1)]) + batch_data_samples = get_packed_inputs( + batch_size=2, with_heatmap=False)['data_samples'] + preds = head.predict([feats, feats], + batch_data_samples, + test_cfg=dict(flip_test=True)) + + self.assertTrue(len(preds), 2) + self.assertIsInstance(preds[0], InstanceData) + self.assertEqual(preds[0].keypoints.shape, + batch_data_samples[0].gt_instances.keypoints.shape) + + def test_loss(self): + head = RLEHead( + in_channels=32, + num_joints=17, + ) + + feats = self._get_feats(batch_size=2, feat_shapes=[(32, 1, 1)]) + batch_data_samples = get_packed_inputs( + batch_size=2, with_heatmap=False)['data_samples'] + losses = head.loss(feats, batch_data_samples) + + self.assertIsInstance(losses['loss_kpt'], torch.Tensor) + self.assertEqual(losses['loss_kpt'].shape, torch.Size()) + self.assertIsInstance(losses['acc_pose'], torch.Tensor) + + def test_state_dict_compatible(self): + + head = RLEHead(in_channels=2048, num_joints=17) + + state_dict = { + 'fc.weight': torch.zeros((17 * 4, 2048)), + 'fc.bias': torch.zeros((17 * 4)), + 'loss.flow_model.loc': torch.zeros(torch.Size([2])), + 'loss.flow_model.cov': torch.zeros(torch.Size([2, 2])), + 'loss.flow_model.mask': torch.zeros(torch.Size([6, 2])), + 'loss.flow_model.s.0.0.weight': torch.zeros(torch.Size([64, 2])), + 'loss.flow_model.s.0.0.bias': torch.zeros(torch.Size([64])), + 'loss.flow_model.s.0.2.weight': torch.zeros(torch.Size([64, 64])), + 'loss.flow_model.s.0.2.bias': torch.zeros(torch.Size([64])), + 'loss.flow_model.s.0.4.weight': torch.zeros(torch.Size([2, 64])), + 'loss.flow_model.s.0.4.bias': torch.zeros(torch.Size([2])), + 'loss.flow_model.s.1.0.weight': torch.zeros(torch.Size([64, 2])), + 'loss.flow_model.s.1.0.bias': torch.zeros(torch.Size([64])), + 'loss.flow_model.s.1.2.weight': torch.zeros(torch.Size([64, 64])), + 'loss.flow_model.s.1.2.bias': torch.zeros(torch.Size([64])), + 'loss.flow_model.s.1.4.weight': torch.zeros(torch.Size([2, 64])), + 'loss.flow_model.s.1.4.bias': torch.zeros(torch.Size([2])), + 'loss.flow_model.s.2.0.weight': torch.zeros(torch.Size([64, 2])), + 'loss.flow_model.s.2.0.bias': torch.zeros(torch.Size([64])), + 'loss.flow_model.s.2.2.weight': torch.zeros(torch.Size([64, 64])), + 'loss.flow_model.s.2.2.bias': torch.zeros(torch.Size([64])), + 'loss.flow_model.s.2.4.weight': torch.zeros(torch.Size([2, 64])), + 'loss.flow_model.s.2.4.bias': torch.zeros(torch.Size([2])), + 'loss.flow_model.s.3.0.weight': torch.zeros(torch.Size([64, 2])), + 'loss.flow_model.s.3.0.bias': torch.zeros(torch.Size([64])), + 'loss.flow_model.s.3.2.weight': torch.zeros(torch.Size([64, 64])), + 'loss.flow_model.s.3.2.bias': torch.zeros(torch.Size([64])), + 'loss.flow_model.s.3.4.weight': torch.zeros(torch.Size([2, 64])), + 'loss.flow_model.s.3.4.bias': torch.zeros(torch.Size([2])), + 'loss.flow_model.s.4.0.weight': torch.zeros(torch.Size([64, 2])), + 'loss.flow_model.s.4.0.bias': torch.zeros(torch.Size([64])), + 'loss.flow_model.s.4.2.weight': torch.zeros(torch.Size([64, 64])), + 'loss.flow_model.s.4.2.bias': torch.zeros(torch.Size([64])), + 'loss.flow_model.s.4.4.weight': torch.zeros(torch.Size([2, 64])), + 'loss.flow_model.s.4.4.bias': torch.zeros(torch.Size([2])), + 'loss.flow_model.s.5.0.weight': torch.zeros(torch.Size([64, 2])), + 'loss.flow_model.s.5.0.bias': torch.zeros(torch.Size([64])), + 'loss.flow_model.s.5.2.weight': torch.zeros(torch.Size([64, 64])), + 'loss.flow_model.s.5.2.bias': torch.zeros(torch.Size([64])), + 'loss.flow_model.s.5.4.weight': torch.zeros(torch.Size([2, 64])), + 'loss.flow_model.s.5.4.bias': torch.zeros(torch.Size([2])), + 'loss.flow_model.t.0.0.weight': torch.zeros(torch.Size([64, 2])), + 'loss.flow_model.t.0.0.bias': torch.zeros(torch.Size([64])), + 'loss.flow_model.t.0.2.weight': torch.zeros(torch.Size([64, 64])), + 'loss.flow_model.t.0.2.bias': torch.zeros(torch.Size([64])), + 'loss.flow_model.t.0.4.weight': torch.zeros(torch.Size([2, 64])), + 'loss.flow_model.t.0.4.bias': torch.zeros(torch.Size([2])), + 'loss.flow_model.t.1.0.weight': torch.zeros(torch.Size([64, 2])), + 'loss.flow_model.t.1.0.bias': torch.zeros(torch.Size([64])), + 'loss.flow_model.t.1.2.weight': torch.zeros(torch.Size([64, 64])), + 'loss.flow_model.t.1.2.bias': torch.zeros(torch.Size([64])), + 'loss.flow_model.t.1.4.weight': torch.zeros(torch.Size([2, 64])), + 'loss.flow_model.t.1.4.bias': torch.zeros(torch.Size([2])), + 'loss.flow_model.t.2.0.weight': torch.zeros(torch.Size([64, 2])), + 'loss.flow_model.t.2.0.bias': torch.zeros(torch.Size([64])), + 'loss.flow_model.t.2.2.weight': torch.zeros(torch.Size([64, 64])), + 'loss.flow_model.t.2.2.bias': torch.zeros(torch.Size([64])), + 'loss.flow_model.t.2.4.weight': torch.zeros(torch.Size([2, 64])), + 'loss.flow_model.t.2.4.bias': torch.zeros(torch.Size([2])), + 'loss.flow_model.t.3.0.weight': torch.zeros(torch.Size([64, 2])), + 'loss.flow_model.t.3.0.bias': torch.zeros(torch.Size([64])), + 'loss.flow_model.t.3.2.weight': torch.zeros(torch.Size([64, 64])), + 'loss.flow_model.t.3.2.bias': torch.zeros(torch.Size([64])), + 'loss.flow_model.t.3.4.weight': torch.zeros(torch.Size([2, 64])), + 'loss.flow_model.t.3.4.bias': torch.zeros(torch.Size([2])), + 'loss.flow_model.t.4.0.weight': torch.zeros(torch.Size([64, 2])), + 'loss.flow_model.t.4.0.bias': torch.zeros(torch.Size([64])), + 'loss.flow_model.t.4.2.weight': torch.zeros(torch.Size([64, 64])), + 'loss.flow_model.t.4.2.bias': torch.zeros(torch.Size([64])), + 'loss.flow_model.t.4.4.weight': torch.zeros(torch.Size([2, 64])), + 'loss.flow_model.t.4.4.bias': torch.zeros(torch.Size([2])), + 'loss.flow_model.t.5.0.weight': torch.zeros(torch.Size([64, 2])), + 'loss.flow_model.t.5.0.bias': torch.zeros(torch.Size([64])), + 'loss.flow_model.t.5.2.weight': torch.zeros(torch.Size([64, 64])), + 'loss.flow_model.t.5.2.bias': torch.zeros(torch.Size([64])), + 'loss.flow_model.t.5.4.weight': torch.zeros(torch.Size([2, 64])), + 'loss.flow_model.t.5.4.bias': torch.zeros(torch.Size([2])) + } + head.load_state_dict(state_dict) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_models/test_losses/test_ae_loss.py b/tests/test_models/test_losses/test_ae_loss.py index 406c075532..8829b81052 100644 --- a/tests/test_models/test_losses/test_ae_loss.py +++ b/tests/test_models/test_losses/test_ae_loss.py @@ -1,186 +1,186 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from itertools import product -from typing import Tuple -from unittest import TestCase - -import numpy as np -import torch -import torch.nn as nn -from torch import Tensor - -from mmpose.codecs.associative_embedding import AssociativeEmbedding -from mmpose.models.losses.ae_loss import AssociativeEmbeddingLoss -from mmpose.testing._utils import get_coco_sample - - -class AELoss(nn.Module): - """Associative Embedding loss in MMPose v0.x.""" - - def __init__(self, loss_type): - super().__init__() - self.loss_type = loss_type - - @staticmethod - def _make_input(t, requires_grad=False, device=torch.device('cpu')): - """Make zero inputs for AE loss. - - Args: - t (torch.Tensor): input - requires_grad (bool): Option to use requires_grad. - device: torch device - - Returns: - torch.Tensor: zero input. - """ - inp = torch.autograd.Variable(t, requires_grad=requires_grad) - inp = inp.sum() - inp = inp.to(device) - return inp - - def singleTagLoss(self, pred_tag, joints): - """Associative embedding loss for one image. - - Note: - - heatmaps weight: W - - heatmaps height: H - - max_num_people: M - - num_keypoints: K - - Args: - pred_tag (torch.Tensor[KxHxW,1]): tag of output for one image. - joints (torch.Tensor[M,K,2]): joints information for one image. - """ - tags = [] - pull = 0 - pred_tag = pred_tag.view(17, -1, 1) - for joints_per_person in joints: - tmp = [] - for k, joint in enumerate(joints_per_person): - if joint[1] > 0: - tmp.append(pred_tag[k, joint[0]]) - if len(tmp) == 0: - continue - tmp = torch.stack(tmp) - tags.append(torch.mean(tmp, dim=0)) - pull = pull + torch.mean((tmp - tags[-1].expand_as(tmp))**2) - - num_tags = len(tags) - if num_tags == 0: - return (self._make_input( - torch.zeros(1).float(), device=pred_tag.device), - self._make_input( - torch.zeros(1).float(), device=pred_tag.device)) - elif num_tags == 1: - return (self._make_input( - torch.zeros(1).float(), device=pred_tag.device), pull) - - tags = torch.stack(tags) - - size = (num_tags, num_tags) - A = tags.expand(*size) - B = A.permute(1, 0) - - diff = A - B - - if self.loss_type == 'exp': - diff = torch.pow(diff, 2) - push = torch.exp(-diff) - push = torch.sum(push) - elif self.loss_type == 'max': - diff = 1 - torch.abs(diff) - push = torch.clamp(diff, min=0).sum() - num_tags - else: - raise ValueError('Unknown ae loss type') - - push_loss = push / ((num_tags - 1) * num_tags) * 0.5 - pull_loss = pull / (num_tags) - - return push_loss, pull_loss - - def forward(self, tags, keypoint_indices): - assert tags.shape[0] == len(keypoint_indices) - - pull_loss = 0. - push_loss = 0. - - for i in range(tags.shape[0]): - _push, _pull = self.singleTagLoss(tags[i].view(-1, 1), - keypoint_indices[i]) - pull_loss += _pull - push_loss += _push - - return pull_loss, push_loss - - -class TestAssociativeEmbeddingLoss(TestCase): - - def _make_input(self, num_instance: int) -> Tuple[Tensor, Tensor]: - - encoder = AssociativeEmbedding( - input_size=(256, 256), heatmap_size=(64, 64)) - - data = get_coco_sample( - img_shape=(256, 256), num_instances=num_instance) - encoded = encoder.encode(data['keypoints'], data['keypoints_visible']) - heatmaps = encoded['heatmaps'] - keypoint_indices = encoded['keypoint_indices'] - - tags = self._get_tags( - heatmaps, keypoint_indices, tag_per_keypoint=True) - - batch_tags = torch.from_numpy(tags[None]) - batch_keypoint_indices = [torch.from_numpy(keypoint_indices)] - - return batch_tags, batch_keypoint_indices - - def _get_tags(self, - heatmaps, - keypoint_indices, - tag_per_keypoint: bool, - with_randomness: bool = True): - - K, H, W = heatmaps.shape - N = keypoint_indices.shape[0] - - if tag_per_keypoint: - tags = np.zeros((K, H, W), dtype=np.float32) - else: - tags = np.zeros((1, H, W), dtype=np.float32) - - for n, k in product(range(N), range(K)): - y, x = np.unravel_index(keypoint_indices[n, k, 0], (H, W)) - - randomness = np.random.rand() if with_randomness else 0 - - if tag_per_keypoint: - tags[k, y, x] = n + randomness - else: - tags[0, y, x] = n + randomness - - return tags - - def test_loss(self): - - tags, keypoint_indices = self._make_input(num_instance=2) - - # test loss calculation - loss_module = AssociativeEmbeddingLoss() - pull_loss, push_loss = loss_module(tags, keypoint_indices) - _pull_loss, _push_loss = AELoss('exp')(tags, keypoint_indices) - - self.assertTrue(torch.allclose(pull_loss, _pull_loss)) - self.assertTrue(torch.allclose(push_loss, _push_loss)) - - # test loss weight - loss_module = AssociativeEmbeddingLoss(loss_weight=0.) - pull_loss, push_loss = loss_module(tags, keypoint_indices) - - self.assertTrue(torch.allclose(pull_loss, torch.zeros(1))) - self.assertTrue(torch.allclose(push_loss, torch.zeros(1))) - - # test push loss factor - loss_module = AssociativeEmbeddingLoss(push_loss_factor=0.) - pull_loss, push_loss = loss_module(tags, keypoint_indices) - - self.assertFalse(torch.allclose(pull_loss, torch.zeros(1))) - self.assertTrue(torch.allclose(push_loss, torch.zeros(1))) +# Copyright (c) OpenMMLab. All rights reserved. +from itertools import product +from typing import Tuple +from unittest import TestCase + +import numpy as np +import torch +import torch.nn as nn +from torch import Tensor + +from mmpose.codecs.associative_embedding import AssociativeEmbedding +from mmpose.models.losses.ae_loss import AssociativeEmbeddingLoss +from mmpose.testing._utils import get_coco_sample + + +class AELoss(nn.Module): + """Associative Embedding loss in MMPose v0.x.""" + + def __init__(self, loss_type): + super().__init__() + self.loss_type = loss_type + + @staticmethod + def _make_input(t, requires_grad=False, device=torch.device('cpu')): + """Make zero inputs for AE loss. + + Args: + t (torch.Tensor): input + requires_grad (bool): Option to use requires_grad. + device: torch device + + Returns: + torch.Tensor: zero input. + """ + inp = torch.autograd.Variable(t, requires_grad=requires_grad) + inp = inp.sum() + inp = inp.to(device) + return inp + + def singleTagLoss(self, pred_tag, joints): + """Associative embedding loss for one image. + + Note: + - heatmaps weight: W + - heatmaps height: H + - max_num_people: M + - num_keypoints: K + + Args: + pred_tag (torch.Tensor[KxHxW,1]): tag of output for one image. + joints (torch.Tensor[M,K,2]): joints information for one image. + """ + tags = [] + pull = 0 + pred_tag = pred_tag.view(17, -1, 1) + for joints_per_person in joints: + tmp = [] + for k, joint in enumerate(joints_per_person): + if joint[1] > 0: + tmp.append(pred_tag[k, joint[0]]) + if len(tmp) == 0: + continue + tmp = torch.stack(tmp) + tags.append(torch.mean(tmp, dim=0)) + pull = pull + torch.mean((tmp - tags[-1].expand_as(tmp))**2) + + num_tags = len(tags) + if num_tags == 0: + return (self._make_input( + torch.zeros(1).float(), device=pred_tag.device), + self._make_input( + torch.zeros(1).float(), device=pred_tag.device)) + elif num_tags == 1: + return (self._make_input( + torch.zeros(1).float(), device=pred_tag.device), pull) + + tags = torch.stack(tags) + + size = (num_tags, num_tags) + A = tags.expand(*size) + B = A.permute(1, 0) + + diff = A - B + + if self.loss_type == 'exp': + diff = torch.pow(diff, 2) + push = torch.exp(-diff) + push = torch.sum(push) + elif self.loss_type == 'max': + diff = 1 - torch.abs(diff) + push = torch.clamp(diff, min=0).sum() - num_tags + else: + raise ValueError('Unknown ae loss type') + + push_loss = push / ((num_tags - 1) * num_tags) * 0.5 + pull_loss = pull / (num_tags) + + return push_loss, pull_loss + + def forward(self, tags, keypoint_indices): + assert tags.shape[0] == len(keypoint_indices) + + pull_loss = 0. + push_loss = 0. + + for i in range(tags.shape[0]): + _push, _pull = self.singleTagLoss(tags[i].view(-1, 1), + keypoint_indices[i]) + pull_loss += _pull + push_loss += _push + + return pull_loss, push_loss + + +class TestAssociativeEmbeddingLoss(TestCase): + + def _make_input(self, num_instance: int) -> Tuple[Tensor, Tensor]: + + encoder = AssociativeEmbedding( + input_size=(256, 256), heatmap_size=(64, 64)) + + data = get_coco_sample( + img_shape=(256, 256), num_instances=num_instance) + encoded = encoder.encode(data['keypoints'], data['keypoints_visible']) + heatmaps = encoded['heatmaps'] + keypoint_indices = encoded['keypoint_indices'] + + tags = self._get_tags( + heatmaps, keypoint_indices, tag_per_keypoint=True) + + batch_tags = torch.from_numpy(tags[None]) + batch_keypoint_indices = [torch.from_numpy(keypoint_indices)] + + return batch_tags, batch_keypoint_indices + + def _get_tags(self, + heatmaps, + keypoint_indices, + tag_per_keypoint: bool, + with_randomness: bool = True): + + K, H, W = heatmaps.shape + N = keypoint_indices.shape[0] + + if tag_per_keypoint: + tags = np.zeros((K, H, W), dtype=np.float32) + else: + tags = np.zeros((1, H, W), dtype=np.float32) + + for n, k in product(range(N), range(K)): + y, x = np.unravel_index(keypoint_indices[n, k, 0], (H, W)) + + randomness = np.random.rand() if with_randomness else 0 + + if tag_per_keypoint: + tags[k, y, x] = n + randomness + else: + tags[0, y, x] = n + randomness + + return tags + + def test_loss(self): + + tags, keypoint_indices = self._make_input(num_instance=2) + + # test loss calculation + loss_module = AssociativeEmbeddingLoss() + pull_loss, push_loss = loss_module(tags, keypoint_indices) + _pull_loss, _push_loss = AELoss('exp')(tags, keypoint_indices) + + self.assertTrue(torch.allclose(pull_loss, _pull_loss)) + self.assertTrue(torch.allclose(push_loss, _push_loss)) + + # test loss weight + loss_module = AssociativeEmbeddingLoss(loss_weight=0.) + pull_loss, push_loss = loss_module(tags, keypoint_indices) + + self.assertTrue(torch.allclose(pull_loss, torch.zeros(1))) + self.assertTrue(torch.allclose(push_loss, torch.zeros(1))) + + # test push loss factor + loss_module = AssociativeEmbeddingLoss(push_loss_factor=0.) + pull_loss, push_loss = loss_module(tags, keypoint_indices) + + self.assertFalse(torch.allclose(pull_loss, torch.zeros(1))) + self.assertTrue(torch.allclose(push_loss, torch.zeros(1))) diff --git a/tests/test_models/test_losses/test_classification_losses.py b/tests/test_models/test_losses/test_classification_losses.py index fd7d3fd898..87262e6b1f 100644 --- a/tests/test_models/test_losses/test_classification_losses.py +++ b/tests/test_models/test_losses/test_classification_losses.py @@ -1,22 +1,22 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import torch - -from mmpose.models.losses.classification_loss import InfoNCELoss - - -class TestInfoNCELoss(TestCase): - - def test_loss(self): - - # test loss w/o target_weight - loss = InfoNCELoss(temperature=0.05) - - fake_pred = torch.arange(5 * 2).reshape(5, 2).float() - self.assertTrue( - torch.allclose(loss(fake_pred), torch.tensor(5.4026), atol=1e-4)) - - # check if the value of temperature is positive - with self.assertRaises(AssertionError): - loss = InfoNCELoss(temperature=0.) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch + +from mmpose.models.losses.classification_loss import InfoNCELoss + + +class TestInfoNCELoss(TestCase): + + def test_loss(self): + + # test loss w/o target_weight + loss = InfoNCELoss(temperature=0.05) + + fake_pred = torch.arange(5 * 2).reshape(5, 2).float() + self.assertTrue( + torch.allclose(loss(fake_pred), torch.tensor(5.4026), atol=1e-4)) + + # check if the value of temperature is positive + with self.assertRaises(AssertionError): + loss = InfoNCELoss(temperature=0.) diff --git a/tests/test_models/test_losses/test_heatmap_losses.py b/tests/test_models/test_losses/test_heatmap_losses.py index 00da170389..e4c24474c3 100644 --- a/tests/test_models/test_losses/test_heatmap_losses.py +++ b/tests/test_models/test_losses/test_heatmap_losses.py @@ -1,119 +1,119 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import torch - -from mmpose.models.losses.heatmap_loss import (AdaptiveWingLoss, - FocalHeatmapLoss, - KeypointMSELoss) - - -class TestAdaptiveWingLoss(TestCase): - - def test_loss(self): - - # test loss w/o target_weight - loss = AdaptiveWingLoss(use_target_weight=False) - - fake_pred = torch.zeros((1, 3, 2, 2)) - fake_label = torch.zeros((1, 3, 2, 2)) - self.assertTrue( - torch.allclose(loss(fake_pred, fake_label), torch.tensor(0.))) - - fake_pred = torch.ones((1, 3, 2, 2)) - fake_label = torch.zeros((1, 3, 2, 2)) - self.assertTrue( - torch.allclose( - loss(fake_pred, fake_label), torch.tensor(8.4959), atol=1e-4)) - - # test loss w/ target_weight - loss = AdaptiveWingLoss(use_target_weight=True) - - fake_pred = torch.zeros((1, 3, 2, 2)) - fake_label = torch.zeros((1, 3, 2, 2)) - fake_weight = torch.tensor([1, 0, 1]).reshape(1, 3).float() - self.assertTrue( - torch.allclose( - loss(fake_pred, fake_label, fake_weight), torch.tensor(0.))) - - -class TestFocalHeatmapLoss(TestCase): - - def test_loss(self): - - loss = FocalHeatmapLoss(use_target_weight=False) - - fake_pred = torch.zeros((1, 3, 5, 5)) - fake_label = torch.zeros((1, 3, 5, 5)) - - self.assertTrue( - torch.allclose(loss(fake_pred, fake_label), torch.tensor(0.))) - - fake_pred = torch.ones((1, 3, 5, 5)) * 0.4 - fake_label = torch.ones((1, 3, 5, 5)) * 0.6 - self.assertTrue( - torch.allclose( - loss(fake_pred, fake_label), torch.tensor(0.1569), atol=1e-4)) - - # test loss w/ target_weight - loss = FocalHeatmapLoss(use_target_weight=True) - - fake_weight = torch.arange(3 * 5 * 5).reshape(1, 3, 5, 5).float() - self.assertTrue( - torch.allclose( - loss(fake_pred, fake_label, fake_weight), - torch.tensor(5.8062), - atol=1e-4)) - - -class TestKeypointMSELoss(TestCase): - - def test_loss(self): - - # test loss w/o target_weight and without mask - loss = KeypointMSELoss( - use_target_weight=False, skip_empty_channel=False) - - fake_pred = torch.zeros((1, 4, 4, 4)) - fake_label = torch.zeros((1, 4, 4, 4)) - - self.assertTrue( - torch.allclose(loss(fake_pred, fake_label), torch.tensor(0.))) - - fake_pred = torch.ones((1, 4, 4, 4)) * 0.5 - fake_label = torch.ones((1, 4, 4, 4)) * 0.5 - self.assertTrue( - torch.allclose( - loss(fake_pred, fake_label), torch.tensor(0.), atol=1e-4)) - - # test loss w/ target_weight and without mask - loss = KeypointMSELoss( - use_target_weight=True, skip_empty_channel=False) - - fake_weight = torch.ones((1, 4)).float() - self.assertTrue( - torch.allclose( - loss(fake_pred, fake_label, fake_weight), - torch.tensor(0.), - atol=1e-4)) - - # test loss w/ target_weight and with mask - loss = KeypointMSELoss( - use_target_weight=True, skip_empty_channel=False) - - fake_mask = torch.ones((1, 1, 4, 4)).float() - self.assertTrue( - torch.allclose( - loss(fake_pred, fake_label, fake_weight, fake_mask), - torch.tensor(0.), - atol=1e-4)) - - # test loss w/ target_weight and skip empty channels - loss = KeypointMSELoss(use_target_weight=True, skip_empty_channel=True) - - fake_mask = torch.ones((1, 1, 4, 4)).float() - self.assertTrue( - torch.allclose( - loss(fake_pred, fake_label, fake_weight, fake_mask), - torch.tensor(0.), - atol=1e-4)) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch + +from mmpose.models.losses.heatmap_loss import (AdaptiveWingLoss, + FocalHeatmapLoss, + KeypointMSELoss) + + +class TestAdaptiveWingLoss(TestCase): + + def test_loss(self): + + # test loss w/o target_weight + loss = AdaptiveWingLoss(use_target_weight=False) + + fake_pred = torch.zeros((1, 3, 2, 2)) + fake_label = torch.zeros((1, 3, 2, 2)) + self.assertTrue( + torch.allclose(loss(fake_pred, fake_label), torch.tensor(0.))) + + fake_pred = torch.ones((1, 3, 2, 2)) + fake_label = torch.zeros((1, 3, 2, 2)) + self.assertTrue( + torch.allclose( + loss(fake_pred, fake_label), torch.tensor(8.4959), atol=1e-4)) + + # test loss w/ target_weight + loss = AdaptiveWingLoss(use_target_weight=True) + + fake_pred = torch.zeros((1, 3, 2, 2)) + fake_label = torch.zeros((1, 3, 2, 2)) + fake_weight = torch.tensor([1, 0, 1]).reshape(1, 3).float() + self.assertTrue( + torch.allclose( + loss(fake_pred, fake_label, fake_weight), torch.tensor(0.))) + + +class TestFocalHeatmapLoss(TestCase): + + def test_loss(self): + + loss = FocalHeatmapLoss(use_target_weight=False) + + fake_pred = torch.zeros((1, 3, 5, 5)) + fake_label = torch.zeros((1, 3, 5, 5)) + + self.assertTrue( + torch.allclose(loss(fake_pred, fake_label), torch.tensor(0.))) + + fake_pred = torch.ones((1, 3, 5, 5)) * 0.4 + fake_label = torch.ones((1, 3, 5, 5)) * 0.6 + self.assertTrue( + torch.allclose( + loss(fake_pred, fake_label), torch.tensor(0.1569), atol=1e-4)) + + # test loss w/ target_weight + loss = FocalHeatmapLoss(use_target_weight=True) + + fake_weight = torch.arange(3 * 5 * 5).reshape(1, 3, 5, 5).float() + self.assertTrue( + torch.allclose( + loss(fake_pred, fake_label, fake_weight), + torch.tensor(5.8062), + atol=1e-4)) + + +class TestKeypointMSELoss(TestCase): + + def test_loss(self): + + # test loss w/o target_weight and without mask + loss = KeypointMSELoss( + use_target_weight=False, skip_empty_channel=False) + + fake_pred = torch.zeros((1, 4, 4, 4)) + fake_label = torch.zeros((1, 4, 4, 4)) + + self.assertTrue( + torch.allclose(loss(fake_pred, fake_label), torch.tensor(0.))) + + fake_pred = torch.ones((1, 4, 4, 4)) * 0.5 + fake_label = torch.ones((1, 4, 4, 4)) * 0.5 + self.assertTrue( + torch.allclose( + loss(fake_pred, fake_label), torch.tensor(0.), atol=1e-4)) + + # test loss w/ target_weight and without mask + loss = KeypointMSELoss( + use_target_weight=True, skip_empty_channel=False) + + fake_weight = torch.ones((1, 4)).float() + self.assertTrue( + torch.allclose( + loss(fake_pred, fake_label, fake_weight), + torch.tensor(0.), + atol=1e-4)) + + # test loss w/ target_weight and with mask + loss = KeypointMSELoss( + use_target_weight=True, skip_empty_channel=False) + + fake_mask = torch.ones((1, 1, 4, 4)).float() + self.assertTrue( + torch.allclose( + loss(fake_pred, fake_label, fake_weight, fake_mask), + torch.tensor(0.), + atol=1e-4)) + + # test loss w/ target_weight and skip empty channels + loss = KeypointMSELoss(use_target_weight=True, skip_empty_channel=True) + + fake_mask = torch.ones((1, 1, 4, 4)).float() + self.assertTrue( + torch.allclose( + loss(fake_pred, fake_label, fake_weight, fake_mask), + torch.tensor(0.), + atol=1e-4)) diff --git a/tests/test_models/test_losses/test_regression_losses.py b/tests/test_models/test_losses/test_regression_losses.py index 0975ac6b55..4b018a02d6 100644 --- a/tests/test_models/test_losses/test_regression_losses.py +++ b/tests/test_models/test_losses/test_regression_losses.py @@ -1,48 +1,48 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import torch - -from mmpose.models.losses.regression_loss import SoftWeightSmoothL1Loss - - -class TestSoftWeightSmoothL1Loss(TestCase): - - def test_loss(self): - - # test loss w/o target_weight - loss = SoftWeightSmoothL1Loss(use_target_weight=False, beta=0.5) - - fake_pred = torch.zeros((1, 3, 2)) - fake_label = torch.zeros((1, 3, 2)) - self.assertTrue( - torch.allclose(loss(fake_pred, fake_label), torch.tensor(0.))) - - fake_pred = torch.ones((1, 3, 2)) - fake_label = torch.zeros((1, 3, 2)) - self.assertTrue( - torch.allclose(loss(fake_pred, fake_label), torch.tensor(.75))) - - # test loss w/ target_weight - loss = SoftWeightSmoothL1Loss( - use_target_weight=True, supervise_empty=True) - - fake_pred = torch.ones((1, 3, 2)) - fake_label = torch.zeros((1, 3, 2)) - fake_weight = torch.arange(6).reshape(1, 3, 2).float() - self.assertTrue( - torch.allclose( - loss(fake_pred, fake_label, fake_weight), torch.tensor(1.25))) - - # test loss that does not take empty channels into account - loss = SoftWeightSmoothL1Loss( - use_target_weight=True, supervise_empty=False) - self.assertTrue( - torch.allclose( - loss(fake_pred, fake_label, fake_weight), torch.tensor(1.5))) - - with self.assertRaises(ValueError): - _ = loss.smooth_l1_loss(fake_pred, fake_label, reduction='fake') - - output = loss.smooth_l1_loss(fake_pred, fake_label, reduction='sum') - self.assertTrue(torch.allclose(output, torch.tensor(3.0))) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch + +from mmpose.models.losses.regression_loss import SoftWeightSmoothL1Loss + + +class TestSoftWeightSmoothL1Loss(TestCase): + + def test_loss(self): + + # test loss w/o target_weight + loss = SoftWeightSmoothL1Loss(use_target_weight=False, beta=0.5) + + fake_pred = torch.zeros((1, 3, 2)) + fake_label = torch.zeros((1, 3, 2)) + self.assertTrue( + torch.allclose(loss(fake_pred, fake_label), torch.tensor(0.))) + + fake_pred = torch.ones((1, 3, 2)) + fake_label = torch.zeros((1, 3, 2)) + self.assertTrue( + torch.allclose(loss(fake_pred, fake_label), torch.tensor(.75))) + + # test loss w/ target_weight + loss = SoftWeightSmoothL1Loss( + use_target_weight=True, supervise_empty=True) + + fake_pred = torch.ones((1, 3, 2)) + fake_label = torch.zeros((1, 3, 2)) + fake_weight = torch.arange(6).reshape(1, 3, 2).float() + self.assertTrue( + torch.allclose( + loss(fake_pred, fake_label, fake_weight), torch.tensor(1.25))) + + # test loss that does not take empty channels into account + loss = SoftWeightSmoothL1Loss( + use_target_weight=True, supervise_empty=False) + self.assertTrue( + torch.allclose( + loss(fake_pred, fake_label, fake_weight), torch.tensor(1.5))) + + with self.assertRaises(ValueError): + _ = loss.smooth_l1_loss(fake_pred, fake_label, reduction='fake') + + output = loss.smooth_l1_loss(fake_pred, fake_label, reduction='sum') + self.assertTrue(torch.allclose(output, torch.tensor(3.0))) diff --git a/tests/test_models/test_necks/test_fmap_proc_neck.py b/tests/test_models/test_necks/test_fmap_proc_neck.py index cfc67de0ec..5d31f52135 100644 --- a/tests/test_models/test_necks/test_fmap_proc_neck.py +++ b/tests/test_models/test_necks/test_fmap_proc_neck.py @@ -1,64 +1,64 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import List, Tuple -from unittest import TestCase - -import torch - -from mmpose.models.necks import FeatureMapProcessor - - -class TestFeatureMapProcessor(TestCase): - - def _get_feats( - self, - batch_size: int = 2, - feat_shapes: List[Tuple[int, int, int]] = [(32, 1, 1)], - ): - - feats = [ - torch.rand((batch_size, ) + shape, dtype=torch.float32) - for shape in feat_shapes - ] - - return feats - - def test_init(self): - - neck = FeatureMapProcessor(select_index=0) - self.assertSequenceEqual(neck.select_index, (0, )) - - with self.assertRaises(AssertionError): - neck = FeatureMapProcessor(scale_factor=0.0) - - def test_call(self): - - inputs = self._get_feats( - batch_size=2, feat_shapes=[(2, 16, 16), (4, 8, 8), (8, 4, 4)]) - - neck = FeatureMapProcessor(select_index=0) - output = neck(inputs) - self.assertEqual(len(output), 1) - self.assertSequenceEqual(output[0].shape, (2, 2, 16, 16)) - - neck = FeatureMapProcessor(select_index=(2, 1)) - output = neck(inputs) - self.assertEqual(len(output), 2) - self.assertSequenceEqual(output[1].shape, (2, 4, 8, 8)) - self.assertSequenceEqual(output[0].shape, (2, 8, 4, 4)) - - neck = FeatureMapProcessor(select_index=(1, 2), concat=True) - output = neck(inputs) - self.assertEqual(len(output), 1) - self.assertSequenceEqual(output[0].shape, (2, 12, 8, 8)) - - neck = FeatureMapProcessor( - select_index=(2, 1), concat=True, scale_factor=2) - output = neck(inputs) - self.assertEqual(len(output), 1) - self.assertSequenceEqual(output[0].shape, (2, 12, 8, 8)) - - neck = FeatureMapProcessor(concat=True, apply_relu=True) - output = neck(inputs) - self.assertEqual(len(output), 1) - self.assertSequenceEqual(output[0].shape, (2, 14, 16, 16)) - self.assertGreaterEqual(output[0].max(), 0) +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Tuple +from unittest import TestCase + +import torch + +from mmpose.models.necks import FeatureMapProcessor + + +class TestFeatureMapProcessor(TestCase): + + def _get_feats( + self, + batch_size: int = 2, + feat_shapes: List[Tuple[int, int, int]] = [(32, 1, 1)], + ): + + feats = [ + torch.rand((batch_size, ) + shape, dtype=torch.float32) + for shape in feat_shapes + ] + + return feats + + def test_init(self): + + neck = FeatureMapProcessor(select_index=0) + self.assertSequenceEqual(neck.select_index, (0, )) + + with self.assertRaises(AssertionError): + neck = FeatureMapProcessor(scale_factor=0.0) + + def test_call(self): + + inputs = self._get_feats( + batch_size=2, feat_shapes=[(2, 16, 16), (4, 8, 8), (8, 4, 4)]) + + neck = FeatureMapProcessor(select_index=0) + output = neck(inputs) + self.assertEqual(len(output), 1) + self.assertSequenceEqual(output[0].shape, (2, 2, 16, 16)) + + neck = FeatureMapProcessor(select_index=(2, 1)) + output = neck(inputs) + self.assertEqual(len(output), 2) + self.assertSequenceEqual(output[1].shape, (2, 4, 8, 8)) + self.assertSequenceEqual(output[0].shape, (2, 8, 4, 4)) + + neck = FeatureMapProcessor(select_index=(1, 2), concat=True) + output = neck(inputs) + self.assertEqual(len(output), 1) + self.assertSequenceEqual(output[0].shape, (2, 12, 8, 8)) + + neck = FeatureMapProcessor( + select_index=(2, 1), concat=True, scale_factor=2) + output = neck(inputs) + self.assertEqual(len(output), 1) + self.assertSequenceEqual(output[0].shape, (2, 12, 8, 8)) + + neck = FeatureMapProcessor(concat=True, apply_relu=True) + output = neck(inputs) + self.assertEqual(len(output), 1) + self.assertSequenceEqual(output[0].shape, (2, 14, 16, 16)) + self.assertGreaterEqual(output[0].max(), 0) diff --git a/tests/test_models/test_pose_estimators/test_bottomup.py b/tests/test_models/test_pose_estimators/test_bottomup.py index 16258dacaf..40d6cecdba 100644 --- a/tests/test_models/test_pose_estimators/test_bottomup.py +++ b/tests/test_models/test_pose_estimators/test_bottomup.py @@ -1,54 +1,54 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import unittest -from unittest import TestCase - -import torch -from parameterized import parameterized - -from mmpose.testing import get_packed_inputs, get_pose_estimator_cfg -from mmpose.utils import register_all_modules - -configs = [ - 'body_2d_keypoint/associative_embedding/coco/' - 'ae_hrnet-w32_8xb24-300e_coco-512x512.py' -] - -configs_with_devices = [(config, ('cpu', 'cuda')) for config in configs] - - -class TestTopdownPoseEstimator(TestCase): - - def setUp(self) -> None: - register_all_modules() - - @parameterized.expand(configs) - def test_init(self, config): - model_cfg = get_pose_estimator_cfg(config) - model_cfg.backbone.init_cfg = None - - from mmpose.models import build_pose_estimator - model = build_pose_estimator(model_cfg) - self.assertTrue(model.backbone) - self.assertTrue(model.head) - if model_cfg.get('neck', None): - self.assertTrue(model.neck) - - @parameterized.expand(configs_with_devices) - def test_forward_tensor(self, config, devices): - model_cfg = get_pose_estimator_cfg(config) - model_cfg.backbone.init_cfg = None - - from mmpose.models import build_pose_estimator - - for device in devices: - model = build_pose_estimator(model_cfg) - - if device == 'cuda': - if not torch.cuda.is_available(): - return unittest.skip('test requires GPU and torch+cuda') - model = model.cuda() - - packed_inputs = get_packed_inputs(2) - data = model.data_preprocessor(packed_inputs, training=True) - batch_results = model.forward(**data, mode='tensor') - self.assertIsInstance(batch_results, (tuple, torch.Tensor)) +# Copyright (c) OpenMMLab. All rights reserved. +import unittest +from unittest import TestCase + +import torch +from parameterized import parameterized + +from mmpose.testing import get_packed_inputs, get_pose_estimator_cfg +from mmpose.utils import register_all_modules + +configs = [ + 'body_2d_keypoint/associative_embedding/coco/' + 'ae_hrnet-w32_8xb24-300e_coco-512x512.py' +] + +configs_with_devices = [(config, ('cpu', 'cuda')) for config in configs] + + +class TestTopdownPoseEstimator(TestCase): + + def setUp(self) -> None: + register_all_modules() + + @parameterized.expand(configs) + def test_init(self, config): + model_cfg = get_pose_estimator_cfg(config) + model_cfg.backbone.init_cfg = None + + from mmpose.models import build_pose_estimator + model = build_pose_estimator(model_cfg) + self.assertTrue(model.backbone) + self.assertTrue(model.head) + if model_cfg.get('neck', None): + self.assertTrue(model.neck) + + @parameterized.expand(configs_with_devices) + def test_forward_tensor(self, config, devices): + model_cfg = get_pose_estimator_cfg(config) + model_cfg.backbone.init_cfg = None + + from mmpose.models import build_pose_estimator + + for device in devices: + model = build_pose_estimator(model_cfg) + + if device == 'cuda': + if not torch.cuda.is_available(): + return unittest.skip('test requires GPU and torch+cuda') + model = model.cuda() + + packed_inputs = get_packed_inputs(2) + data = model.data_preprocessor(packed_inputs, training=True) + batch_results = model.forward(**data, mode='tensor') + self.assertIsInstance(batch_results, (tuple, torch.Tensor)) diff --git a/tests/test_models/test_pose_estimators/test_topdown.py b/tests/test_models/test_pose_estimators/test_topdown.py index bc65cecd3e..19260ce3fe 100644 --- a/tests/test_models/test_pose_estimators/test_topdown.py +++ b/tests/test_models/test_pose_estimators/test_topdown.py @@ -1,102 +1,102 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import unittest -from unittest import TestCase - -import torch -from parameterized import parameterized - -from mmpose.structures import PoseDataSample -from mmpose.testing import get_packed_inputs, get_pose_estimator_cfg -from mmpose.utils import register_all_modules - -configs = [ - 'body_2d_keypoint/topdown_heatmap/coco/' - 'td-hm_hrnet-w32_8xb64-210e_coco-256x192.py', - 'configs/body_2d_keypoint/topdown_regression/coco/' - 'td-reg_res50_8xb64-210e_coco-256x192.py', - 'configs/body_2d_keypoint/simcc/coco/' - 'simcc_mobilenetv2_wo-deconv-8xb64-210e_coco-256x192.py', -] - -configs_with_devices = [(config, ('cpu', 'cuda')) for config in configs] - - -class TestTopdownPoseEstimator(TestCase): - - def setUp(self) -> None: - register_all_modules() - - @parameterized.expand(configs) - def test_init(self, config): - model_cfg = get_pose_estimator_cfg(config) - model_cfg.backbone.init_cfg = None - - from mmpose.models import build_pose_estimator - model = build_pose_estimator(model_cfg) - self.assertTrue(model.backbone) - self.assertTrue(model.head) - if model_cfg.get('neck', None): - self.assertTrue(model.neck) - - @parameterized.expand(configs_with_devices) - def test_forward_loss(self, config, devices): - model_cfg = get_pose_estimator_cfg(config) - model_cfg.backbone.init_cfg = None - - from mmpose.models import build_pose_estimator - - for device in devices: - model = build_pose_estimator(model_cfg) - - if device == 'cuda': - if not torch.cuda.is_available(): - return unittest.skip('test requires GPU and torch+cuda') - model = model.cuda() - - packed_inputs = get_packed_inputs(2) - data = model.data_preprocessor(packed_inputs, training=True) - losses = model.forward(**data, mode='loss') - self.assertIsInstance(losses, dict) - - @parameterized.expand(configs_with_devices) - def test_forward_predict(self, config, devices): - model_cfg = get_pose_estimator_cfg(config) - model_cfg.backbone.init_cfg = None - - from mmpose.models import build_pose_estimator - - for device in devices: - model = build_pose_estimator(model_cfg) - - if device == 'cuda': - if not torch.cuda.is_available(): - return unittest.skip('test requires GPU and torch+cuda') - model = model.cuda() - - packed_inputs = get_packed_inputs(2) - model.eval() - with torch.no_grad(): - data = model.data_preprocessor(packed_inputs, training=True) - batch_results = model.forward(**data, mode='predict') - self.assertEqual(len(batch_results), 2) - self.assertIsInstance(batch_results[0], PoseDataSample) - - @parameterized.expand(configs_with_devices) - def test_forward_tensor(self, config, devices): - model_cfg = get_pose_estimator_cfg(config) - model_cfg.backbone.init_cfg = None - - from mmpose.models import build_pose_estimator - - for device in devices: - model = build_pose_estimator(model_cfg) - - if device == 'cuda': - if not torch.cuda.is_available(): - return unittest.skip('test requires GPU and torch+cuda') - model = model.cuda() - - packed_inputs = get_packed_inputs(2) - data = model.data_preprocessor(packed_inputs, training=True) - batch_results = model.forward(**data, mode='tensor') - self.assertIsInstance(batch_results, (tuple, torch.Tensor)) +# Copyright (c) OpenMMLab. All rights reserved. +import unittest +from unittest import TestCase + +import torch +from parameterized import parameterized + +from mmpose.structures import PoseDataSample +from mmpose.testing import get_packed_inputs, get_pose_estimator_cfg +from mmpose.utils import register_all_modules + +configs = [ + 'body_2d_keypoint/topdown_heatmap/coco/' + 'td-hm_hrnet-w32_8xb64-210e_coco-256x192.py', + 'configs/body_2d_keypoint/topdown_regression/coco/' + 'td-reg_res50_8xb64-210e_coco-256x192.py', + 'configs/body_2d_keypoint/simcc/coco/' + 'simcc_mobilenetv2_wo-deconv-8xb64-210e_coco-256x192.py', +] + +configs_with_devices = [(config, ('cpu', 'cuda')) for config in configs] + + +class TestTopdownPoseEstimator(TestCase): + + def setUp(self) -> None: + register_all_modules() + + @parameterized.expand(configs) + def test_init(self, config): + model_cfg = get_pose_estimator_cfg(config) + model_cfg.backbone.init_cfg = None + + from mmpose.models import build_pose_estimator + model = build_pose_estimator(model_cfg) + self.assertTrue(model.backbone) + self.assertTrue(model.head) + if model_cfg.get('neck', None): + self.assertTrue(model.neck) + + @parameterized.expand(configs_with_devices) + def test_forward_loss(self, config, devices): + model_cfg = get_pose_estimator_cfg(config) + model_cfg.backbone.init_cfg = None + + from mmpose.models import build_pose_estimator + + for device in devices: + model = build_pose_estimator(model_cfg) + + if device == 'cuda': + if not torch.cuda.is_available(): + return unittest.skip('test requires GPU and torch+cuda') + model = model.cuda() + + packed_inputs = get_packed_inputs(2) + data = model.data_preprocessor(packed_inputs, training=True) + losses = model.forward(**data, mode='loss') + self.assertIsInstance(losses, dict) + + @parameterized.expand(configs_with_devices) + def test_forward_predict(self, config, devices): + model_cfg = get_pose_estimator_cfg(config) + model_cfg.backbone.init_cfg = None + + from mmpose.models import build_pose_estimator + + for device in devices: + model = build_pose_estimator(model_cfg) + + if device == 'cuda': + if not torch.cuda.is_available(): + return unittest.skip('test requires GPU and torch+cuda') + model = model.cuda() + + packed_inputs = get_packed_inputs(2) + model.eval() + with torch.no_grad(): + data = model.data_preprocessor(packed_inputs, training=True) + batch_results = model.forward(**data, mode='predict') + self.assertEqual(len(batch_results), 2) + self.assertIsInstance(batch_results[0], PoseDataSample) + + @parameterized.expand(configs_with_devices) + def test_forward_tensor(self, config, devices): + model_cfg = get_pose_estimator_cfg(config) + model_cfg.backbone.init_cfg = None + + from mmpose.models import build_pose_estimator + + for device in devices: + model = build_pose_estimator(model_cfg) + + if device == 'cuda': + if not torch.cuda.is_available(): + return unittest.skip('test requires GPU and torch+cuda') + model = model.cuda() + + packed_inputs = get_packed_inputs(2) + data = model.data_preprocessor(packed_inputs, training=True) + batch_results = model.forward(**data, mode='tensor') + self.assertIsInstance(batch_results, (tuple, torch.Tensor)) diff --git a/tests/test_models/test_utils/test_check_and_update_config.py b/tests/test_models/test_utils/test_check_and_update_config.py index 65b084a9b3..9108dc994f 100644 --- a/tests/test_models/test_utils/test_check_and_update_config.py +++ b/tests/test_models/test_utils/test_check_and_update_config.py @@ -1,115 +1,115 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import unittest - -from mmpose.models.utils import check_and_update_config - - -class TestCheckAndUpdateConfig(unittest.TestCase): - - def test_case_1(self): - neck = None - head = dict( - type='HeatmapHead', - in_channels=768, - out_channels=17, - deconv_out_channels=[], - deconv_kernel_sizes=[], - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder='codec', - align_corners=False, - extra=dict(upsample=4, final_conv_kernel=3)) - neck, head = check_and_update_config(neck, head) - - self.assertDictEqual( - neck, - dict( - type='FeatureMapProcessor', - scale_factor=4.0, - apply_relu=True, - )) - self.assertIn('final_layer', head) - self.assertDictEqual(head['final_layer'], - dict(kernel_size=3, padding=1)) - self.assertNotIn('extra', head) - self.assertNotIn('input_transform', head) - self.assertNotIn('input_index', head) - self.assertNotIn('align_corners', head) - - def test_case_2(self): - neck = None - head = dict( - type='CIDHead', - in_channels=(48, 96, 192, 384), - num_keypoints=17, - gfd_channels=48, - input_transform='resize_concat', - input_index=(0, 1, 2, 3), - coupled_heatmap_loss=dict( - type='FocalHeatmapLoss', loss_weight=1.0), - decoupled_heatmap_loss=dict( - type='FocalHeatmapLoss', loss_weight=4.0), - ) - neck, head = check_and_update_config(neck, head) - - self.assertDictEqual( - neck, - dict( - type='FeatureMapProcessor', - concat=True, - select_index=(0, 1, 2, 3), - )) - self.assertEqual(head['in_channels'], 720) - self.assertNotIn('input_transform', head) - self.assertNotIn('input_index', head) - self.assertNotIn('align_corners', head) - - def test_case_3(self): - neck = None - head = dict( - type='HeatmapHead', - in_channels=(64, 128, 320, 512), - out_channels=17, - input_index=3, - has_final_layer=False, - loss=dict(type='KeypointMSELoss', use_target_weight=True)) - neck, head = check_and_update_config(neck, head) - - self.assertDictEqual( - neck, dict( - type='FeatureMapProcessor', - select_index=3, - )) - self.assertEqual(head['in_channels'], 512) - self.assertIn('final_layer', head) - self.assertIsNone(head['final_layer']) - self.assertNotIn('input_transform', head) - self.assertNotIn('input_index', head) - self.assertNotIn('align_corners', head) - - def test_case_4(self): - neck = None - head = dict( - type='RTMCCHead', - in_channels=768, - out_channels=17, - input_size='input_size', - in_featuremap_size=(9, 12), - simcc_split_ratio='simcc_split_ratio', - final_layer_kernel_size=7, - gau_cfg=dict( - hidden_dims=256, - s=128, - expansion_factor=2, - dropout_rate=0., - drop_path=0., - act_fn='SiLU', - use_rel_bias=False, - pos_enc=False)) - neck, head_new = check_and_update_config(neck, head) - - self.assertIsNone(neck) - self.assertDictEqual(head, head_new) - - -if __name__ == '__main__': - unittest.main() +# Copyright (c) OpenMMLab. All rights reserved. +import unittest + +from mmpose.models.utils import check_and_update_config + + +class TestCheckAndUpdateConfig(unittest.TestCase): + + def test_case_1(self): + neck = None + head = dict( + type='HeatmapHead', + in_channels=768, + out_channels=17, + deconv_out_channels=[], + deconv_kernel_sizes=[], + loss=dict(type='KeypointMSELoss', use_target_weight=True), + decoder='codec', + align_corners=False, + extra=dict(upsample=4, final_conv_kernel=3)) + neck, head = check_and_update_config(neck, head) + + self.assertDictEqual( + neck, + dict( + type='FeatureMapProcessor', + scale_factor=4.0, + apply_relu=True, + )) + self.assertIn('final_layer', head) + self.assertDictEqual(head['final_layer'], + dict(kernel_size=3, padding=1)) + self.assertNotIn('extra', head) + self.assertNotIn('input_transform', head) + self.assertNotIn('input_index', head) + self.assertNotIn('align_corners', head) + + def test_case_2(self): + neck = None + head = dict( + type='CIDHead', + in_channels=(48, 96, 192, 384), + num_keypoints=17, + gfd_channels=48, + input_transform='resize_concat', + input_index=(0, 1, 2, 3), + coupled_heatmap_loss=dict( + type='FocalHeatmapLoss', loss_weight=1.0), + decoupled_heatmap_loss=dict( + type='FocalHeatmapLoss', loss_weight=4.0), + ) + neck, head = check_and_update_config(neck, head) + + self.assertDictEqual( + neck, + dict( + type='FeatureMapProcessor', + concat=True, + select_index=(0, 1, 2, 3), + )) + self.assertEqual(head['in_channels'], 720) + self.assertNotIn('input_transform', head) + self.assertNotIn('input_index', head) + self.assertNotIn('align_corners', head) + + def test_case_3(self): + neck = None + head = dict( + type='HeatmapHead', + in_channels=(64, 128, 320, 512), + out_channels=17, + input_index=3, + has_final_layer=False, + loss=dict(type='KeypointMSELoss', use_target_weight=True)) + neck, head = check_and_update_config(neck, head) + + self.assertDictEqual( + neck, dict( + type='FeatureMapProcessor', + select_index=3, + )) + self.assertEqual(head['in_channels'], 512) + self.assertIn('final_layer', head) + self.assertIsNone(head['final_layer']) + self.assertNotIn('input_transform', head) + self.assertNotIn('input_index', head) + self.assertNotIn('align_corners', head) + + def test_case_4(self): + neck = None + head = dict( + type='RTMCCHead', + in_channels=768, + out_channels=17, + input_size='input_size', + in_featuremap_size=(9, 12), + simcc_split_ratio='simcc_split_ratio', + final_layer_kernel_size=7, + gau_cfg=dict( + hidden_dims=256, + s=128, + expansion_factor=2, + dropout_rate=0., + drop_path=0., + act_fn='SiLU', + use_rel_bias=False, + pos_enc=False)) + neck, head_new = check_and_update_config(neck, head) + + self.assertIsNone(neck) + self.assertDictEqual(head, head_new) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_structures/test_multilevel_pixel_data.py b/tests/test_structures/test_multilevel_pixel_data.py index 6bba56effa..b6fdde22bc 100644 --- a/tests/test_structures/test_multilevel_pixel_data.py +++ b/tests/test_structures/test_multilevel_pixel_data.py @@ -1,72 +1,72 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import numpy as np -import torch -from mmengine.structures import PixelData - -from mmpose.structures import MultilevelPixelData - - -class TestMultilevelPixelData(TestCase): - - def get_multi_level_pixel_data(self): - metainfo = dict(num_keypoints=17) - sizes = [(64, 48), (32, 24), (16, 12)] - heatmaps = [np.random.rand(17, h, w) for h, w in sizes] - masks = [torch.rand(1, h, w) for h, w in sizes] - data = MultilevelPixelData( - metainfo=metainfo, heatmaps=heatmaps, masks=masks) - - return data - - def test_init(self): - - data = self.get_multi_level_pixel_data() - self.assertIn('num_keypoints', data) - self.assertTrue(data.nlevel == 3) - self.assertTrue(data.shape == ((64, 48), (32, 24), (16, 12))) - self.assertTrue(isinstance(data[0], PixelData)) - - def test_setter(self): - # test `set_field` - data = self.get_multi_level_pixel_data() - sizes = [(64, 48), (32, 24), (16, 8)] - offset_maps = [torch.rand(2, h, w) for h, w in sizes] - data.offset_maps = offset_maps - - # test `to_tensor` - data = self.get_multi_level_pixel_data() - self.assertTrue(isinstance(data[0].heatmaps, np.ndarray)) - data = data.to_tensor() - self.assertTrue(isinstance(data[0].heatmaps, torch.Tensor)) - - # test `cpu` - data = self.get_multi_level_pixel_data() - self.assertTrue(isinstance(data[0].heatmaps, np.ndarray)) - self.assertTrue(isinstance(data[0].masks, torch.Tensor)) - self.assertTrue(data[0].masks.device.type == 'cpu') - data = data.cpu() - self.assertTrue(isinstance(data[0].heatmaps, np.ndarray)) - self.assertTrue(data[0].masks.device.type == 'cpu') - - # test `to` - data = self.get_multi_level_pixel_data() - self.assertTrue(data[0].masks.device.type == 'cpu') - data = data.to('cpu') - self.assertTrue(data[0].masks.device.type == 'cpu') - - # test `numpy` - data = self.get_multi_level_pixel_data() - self.assertTrue(isinstance(data[0].masks, torch.Tensor)) - data = data.numpy() - self.assertTrue(isinstance(data[0].masks, np.ndarray)) - - def test_deleter(self): - - data = self.get_multi_level_pixel_data() - - for key in ['heatmaps', 'masks']: - self.assertIn(key, data) - exec(f'del data.{key}') - self.assertNotIn(key, data) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np +import torch +from mmengine.structures import PixelData + +from mmpose.structures import MultilevelPixelData + + +class TestMultilevelPixelData(TestCase): + + def get_multi_level_pixel_data(self): + metainfo = dict(num_keypoints=17) + sizes = [(64, 48), (32, 24), (16, 12)] + heatmaps = [np.random.rand(17, h, w) for h, w in sizes] + masks = [torch.rand(1, h, w) for h, w in sizes] + data = MultilevelPixelData( + metainfo=metainfo, heatmaps=heatmaps, masks=masks) + + return data + + def test_init(self): + + data = self.get_multi_level_pixel_data() + self.assertIn('num_keypoints', data) + self.assertTrue(data.nlevel == 3) + self.assertTrue(data.shape == ((64, 48), (32, 24), (16, 12))) + self.assertTrue(isinstance(data[0], PixelData)) + + def test_setter(self): + # test `set_field` + data = self.get_multi_level_pixel_data() + sizes = [(64, 48), (32, 24), (16, 8)] + offset_maps = [torch.rand(2, h, w) for h, w in sizes] + data.offset_maps = offset_maps + + # test `to_tensor` + data = self.get_multi_level_pixel_data() + self.assertTrue(isinstance(data[0].heatmaps, np.ndarray)) + data = data.to_tensor() + self.assertTrue(isinstance(data[0].heatmaps, torch.Tensor)) + + # test `cpu` + data = self.get_multi_level_pixel_data() + self.assertTrue(isinstance(data[0].heatmaps, np.ndarray)) + self.assertTrue(isinstance(data[0].masks, torch.Tensor)) + self.assertTrue(data[0].masks.device.type == 'cpu') + data = data.cpu() + self.assertTrue(isinstance(data[0].heatmaps, np.ndarray)) + self.assertTrue(data[0].masks.device.type == 'cpu') + + # test `to` + data = self.get_multi_level_pixel_data() + self.assertTrue(data[0].masks.device.type == 'cpu') + data = data.to('cpu') + self.assertTrue(data[0].masks.device.type == 'cpu') + + # test `numpy` + data = self.get_multi_level_pixel_data() + self.assertTrue(isinstance(data[0].masks, torch.Tensor)) + data = data.numpy() + self.assertTrue(isinstance(data[0].masks, np.ndarray)) + + def test_deleter(self): + + data = self.get_multi_level_pixel_data() + + for key in ['heatmaps', 'masks']: + self.assertIn(key, data) + exec(f'del data.{key}') + self.assertNotIn(key, data) diff --git a/tests/test_structures/test_pose_data_sample.py b/tests/test_structures/test_pose_data_sample.py index 3770260cd6..4210bc7607 100644 --- a/tests/test_structures/test_pose_data_sample.py +++ b/tests/test_structures/test_pose_data_sample.py @@ -1,126 +1,126 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import numpy as np -import torch -from mmengine.structures import InstanceData, PixelData - -from mmpose.structures import MultilevelPixelData, PoseDataSample - - -class TestPoseDataSample(TestCase): - - def get_pose_data_sample(self, multilevel: bool = False): - # meta - pose_meta = dict( - img_shape=(600, 900), # [h, w, c] - crop_size=(256, 192), # [h, w] - heatmap_size=(64, 48), # [h, w] - ) - # gt_instances - gt_instances = InstanceData() - gt_instances.bboxes = torch.rand(1, 4) - gt_instances.keypoints = torch.rand(1, 17, 2) - gt_instances.keypoints_visible = torch.rand(1, 17) - - # pred_instances - pred_instances = InstanceData() - pred_instances.keypoints = torch.rand(1, 17, 2) - pred_instances.keypoint_scores = torch.rand(1, 17) - - # gt_fields - if multilevel: - # generate multilevel gt_fields - metainfo = dict(num_keypoints=17) - sizes = [(64, 48), (32, 24), (16, 12)] - heatmaps = [np.random.rand(17, h, w) for h, w in sizes] - masks = [torch.rand(1, h, w) for h, w in sizes] - gt_fields = MultilevelPixelData( - metainfo=metainfo, heatmaps=heatmaps, masks=masks) - else: - gt_fields = PixelData() - gt_fields.heatmaps = torch.rand(17, 64, 48) - - # pred_fields - pred_fields = PixelData() - pred_fields.heatmaps = torch.rand(17, 64, 48) - - data_sample = PoseDataSample( - gt_instances=gt_instances, - pred_instances=pred_instances, - gt_fields=gt_fields, - pred_fields=pred_fields, - metainfo=pose_meta) - - return data_sample - - @staticmethod - def _equal(x, y): - if type(x) != type(y): - return False - if isinstance(x, torch.Tensor): - return torch.allclose(x, y) - elif isinstance(x, np.ndarray): - return np.allclose(x, y) - else: - return x == y - - def test_init(self): - - data_sample = self.get_pose_data_sample() - self.assertIn('img_shape', data_sample) - self.assertTrue(len(data_sample.gt_instances) == 1) - - def test_setter(self): - - data_sample = self.get_pose_data_sample() - - # test gt_instances - data_sample.gt_instances = InstanceData() - - # test gt_fields - data_sample.gt_fields = PixelData() - - # test multilevel gt_fields - data_sample = self.get_pose_data_sample(multilevel=True) - data_sample.gt_fields = MultilevelPixelData() - - # test pred_instances as pytorch tensor - pred_instances_data = dict( - keypoints=torch.rand(1, 17, 2), scores=torch.rand(1, 17, 1)) - data_sample.pred_instances = InstanceData(**pred_instances_data) - - self.assertTrue( - self._equal(data_sample.pred_instances.keypoints, - pred_instances_data['keypoints'])) - self.assertTrue( - self._equal(data_sample.pred_instances.scores, - pred_instances_data['scores'])) - - # test pred_fields as numpy array - pred_fields_data = dict(heatmaps=np.random.rand(17, 64, 48)) - data_sample.pred_fields = PixelData(**pred_fields_data) - - self.assertTrue( - self._equal(data_sample.pred_fields.heatmaps, - pred_fields_data['heatmaps'])) - - # test to_tensor - data_sample = data_sample.to_tensor() - self.assertTrue( - self._equal(data_sample.pred_fields.heatmaps, - torch.from_numpy(pred_fields_data['heatmaps']))) - - def test_deleter(self): - - data_sample = self.get_pose_data_sample() - - for key in [ - 'gt_instances', - 'pred_instances', - 'gt_fields', - 'pred_fields', - ]: - self.assertIn(key, data_sample) - exec(f'del data_sample.{key}') - self.assertNotIn(key, data_sample) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np +import torch +from mmengine.structures import InstanceData, PixelData + +from mmpose.structures import MultilevelPixelData, PoseDataSample + + +class TestPoseDataSample(TestCase): + + def get_pose_data_sample(self, multilevel: bool = False): + # meta + pose_meta = dict( + img_shape=(600, 900), # [h, w, c] + crop_size=(256, 192), # [h, w] + heatmap_size=(64, 48), # [h, w] + ) + # gt_instances + gt_instances = InstanceData() + gt_instances.bboxes = torch.rand(1, 4) + gt_instances.keypoints = torch.rand(1, 17, 2) + gt_instances.keypoints_visible = torch.rand(1, 17) + + # pred_instances + pred_instances = InstanceData() + pred_instances.keypoints = torch.rand(1, 17, 2) + pred_instances.keypoint_scores = torch.rand(1, 17) + + # gt_fields + if multilevel: + # generate multilevel gt_fields + metainfo = dict(num_keypoints=17) + sizes = [(64, 48), (32, 24), (16, 12)] + heatmaps = [np.random.rand(17, h, w) for h, w in sizes] + masks = [torch.rand(1, h, w) for h, w in sizes] + gt_fields = MultilevelPixelData( + metainfo=metainfo, heatmaps=heatmaps, masks=masks) + else: + gt_fields = PixelData() + gt_fields.heatmaps = torch.rand(17, 64, 48) + + # pred_fields + pred_fields = PixelData() + pred_fields.heatmaps = torch.rand(17, 64, 48) + + data_sample = PoseDataSample( + gt_instances=gt_instances, + pred_instances=pred_instances, + gt_fields=gt_fields, + pred_fields=pred_fields, + metainfo=pose_meta) + + return data_sample + + @staticmethod + def _equal(x, y): + if type(x) != type(y): + return False + if isinstance(x, torch.Tensor): + return torch.allclose(x, y) + elif isinstance(x, np.ndarray): + return np.allclose(x, y) + else: + return x == y + + def test_init(self): + + data_sample = self.get_pose_data_sample() + self.assertIn('img_shape', data_sample) + self.assertTrue(len(data_sample.gt_instances) == 1) + + def test_setter(self): + + data_sample = self.get_pose_data_sample() + + # test gt_instances + data_sample.gt_instances = InstanceData() + + # test gt_fields + data_sample.gt_fields = PixelData() + + # test multilevel gt_fields + data_sample = self.get_pose_data_sample(multilevel=True) + data_sample.gt_fields = MultilevelPixelData() + + # test pred_instances as pytorch tensor + pred_instances_data = dict( + keypoints=torch.rand(1, 17, 2), scores=torch.rand(1, 17, 1)) + data_sample.pred_instances = InstanceData(**pred_instances_data) + + self.assertTrue( + self._equal(data_sample.pred_instances.keypoints, + pred_instances_data['keypoints'])) + self.assertTrue( + self._equal(data_sample.pred_instances.scores, + pred_instances_data['scores'])) + + # test pred_fields as numpy array + pred_fields_data = dict(heatmaps=np.random.rand(17, 64, 48)) + data_sample.pred_fields = PixelData(**pred_fields_data) + + self.assertTrue( + self._equal(data_sample.pred_fields.heatmaps, + pred_fields_data['heatmaps'])) + + # test to_tensor + data_sample = data_sample.to_tensor() + self.assertTrue( + self._equal(data_sample.pred_fields.heatmaps, + torch.from_numpy(pred_fields_data['heatmaps']))) + + def test_deleter(self): + + data_sample = self.get_pose_data_sample() + + for key in [ + 'gt_instances', + 'pred_instances', + 'gt_fields', + 'pred_fields', + ]: + self.assertIn(key, data_sample) + exec(f'del data_sample.{key}') + self.assertNotIn(key, data_sample) diff --git a/tests/test_utils/test_setup_env.py b/tests/test_utils/test_setup_env.py index fbadbf12bf..ee573cb76b 100644 --- a/tests/test_utils/test_setup_env.py +++ b/tests/test_utils/test_setup_env.py @@ -1,46 +1,46 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import datetime -import sys -from unittest import TestCase - -from mmengine import DefaultScope - -from mmpose.utils import register_all_modules - - -class TestSetupEnv(TestCase): - - def test_register_all_modules(self): - from mmpose.registry import DATASETS - - dataset_name = 'CocoDataset' - dataset_module = 'mmpose.datasets.datasets.body.coco_dataset' - - # not init default scope - module = dataset_module - while '.' in module: - sys.modules.pop(module, None) - module = module.rsplit('.', 1)[0] - DATASETS._module_dict.pop(dataset_name, None) - self.assertFalse(dataset_name in DATASETS.module_dict) - register_all_modules(init_default_scope=False) - self.assertTrue(dataset_name in DATASETS.module_dict) - - # init default scope - module = dataset_module - while '.' in module: - sys.modules.pop(module, None) - module = module.rsplit('.', 1)[0] - DATASETS._module_dict.pop(dataset_name, None) - self.assertFalse(dataset_name in DATASETS.module_dict) - register_all_modules(init_default_scope=True) - self.assertTrue(dataset_name in DATASETS.module_dict) - self.assertEqual(DefaultScope.get_current_instance().scope_name, - 'mmpose') - - # init default scope when another scope is init - name = f'test-{datetime.datetime.now()}' - DefaultScope.get_instance(name, scope_name='test') - with self.assertWarnsRegex( - Warning, 'The current default scope "test" is not "mmpose"'): - register_all_modules(init_default_scope=True) +# Copyright (c) OpenMMLab. All rights reserved. +import datetime +import sys +from unittest import TestCase + +from mmengine import DefaultScope + +from mmpose.utils import register_all_modules + + +class TestSetupEnv(TestCase): + + def test_register_all_modules(self): + from mmpose.registry import DATASETS + + dataset_name = 'CocoDataset' + dataset_module = 'mmpose.datasets.datasets.body.coco_dataset' + + # not init default scope + module = dataset_module + while '.' in module: + sys.modules.pop(module, None) + module = module.rsplit('.', 1)[0] + DATASETS._module_dict.pop(dataset_name, None) + self.assertFalse(dataset_name in DATASETS.module_dict) + register_all_modules(init_default_scope=False) + self.assertTrue(dataset_name in DATASETS.module_dict) + + # init default scope + module = dataset_module + while '.' in module: + sys.modules.pop(module, None) + module = module.rsplit('.', 1)[0] + DATASETS._module_dict.pop(dataset_name, None) + self.assertFalse(dataset_name in DATASETS.module_dict) + register_all_modules(init_default_scope=True) + self.assertTrue(dataset_name in DATASETS.module_dict) + self.assertEqual(DefaultScope.get_current_instance().scope_name, + 'mmpose') + + # init default scope when another scope is init + name = f'test-{datetime.datetime.now()}' + DefaultScope.get_instance(name, scope_name='test') + with self.assertWarnsRegex( + Warning, 'The current default scope "test" is not "mmpose"'): + register_all_modules(init_default_scope=True) diff --git a/tests/test_visualization/test_fast_visualizer.py b/tests/test_visualization/test_fast_visualizer.py index f4a24ca1f9..5dae3c63be 100644 --- a/tests/test_visualization/test_fast_visualizer.py +++ b/tests/test_visualization/test_fast_visualizer.py @@ -1,71 +1,71 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from unittest import TestCase - -import numpy as np - -from mmpose.visualization import FastVisualizer - - -class TestFastVisualizer(TestCase): - - def setUp(self): - self.metainfo = { - 'keypoint_id2name': { - 0: 'nose', - 1: 'left_eye', - 2: 'right_eye' - }, - 'keypoint_name2id': { - 'nose': 0, - 'left_eye': 1, - 'right_eye': 2 - }, - 'keypoint_colors': np.array([[255, 0, 0], [0, 255, 0], [0, 0, - 255]]), - 'skeleton_links': [(0, 1), (1, 2)], - 'skeleton_link_colors': np.array([[255, 255, 0], [255, 0, 255]]) - } - self.visualizer = FastVisualizer(self.metainfo) - - def test_init(self): - self.assertEqual(self.visualizer.radius, 6) - self.assertEqual(self.visualizer.line_width, 3) - self.assertEqual(self.visualizer.kpt_thr, 0.3) - self.assertEqual(self.visualizer.keypoint_id2name, - self.metainfo['keypoint_id2name']) - self.assertEqual(self.visualizer.keypoint_name2id, - self.metainfo['keypoint_name2id']) - np.testing.assert_array_equal(self.visualizer.keypoint_colors, - self.metainfo['keypoint_colors']) - self.assertEqual(self.visualizer.skeleton_links, - self.metainfo['skeleton_links']) - np.testing.assert_array_equal(self.visualizer.skeleton_link_colors, - self.metainfo['skeleton_link_colors']) - - def test_draw_pose(self): - img = np.zeros((480, 640, 3), dtype=np.uint8) - instances = type('Instances', (object, ), {})() - instances.keypoints = np.array([[[100, 100], [200, 200], [300, 300]]], - dtype=np.float32) - instances.keypoint_scores = np.array([[0.5, 0.5, 0.5]], - dtype=np.float32) - - self.visualizer.draw_pose(img, instances) - - # Check if keypoints are drawn - self.assertNotEqual(img[100, 100].tolist(), [0, 0, 0]) - self.assertNotEqual(img[200, 200].tolist(), [0, 0, 0]) - self.assertNotEqual(img[300, 300].tolist(), [0, 0, 0]) - - # Check if skeleton links are drawn - self.assertNotEqual(img[150, 150].tolist(), [0, 0, 0]) - self.assertNotEqual(img[250, 250].tolist(), [0, 0, 0]) - - def test_draw_pose_with_none_instances(self): - img = np.zeros((480, 640, 3), dtype=np.uint8) - instances = None - - self.visualizer.draw_pose(img, instances) - - # Check if the image is still empty (black) - self.assertEqual(np.count_nonzero(img), 0) +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np + +from mmpose.visualization import FastVisualizer + + +class TestFastVisualizer(TestCase): + + def setUp(self): + self.metainfo = { + 'keypoint_id2name': { + 0: 'nose', + 1: 'left_eye', + 2: 'right_eye' + }, + 'keypoint_name2id': { + 'nose': 0, + 'left_eye': 1, + 'right_eye': 2 + }, + 'keypoint_colors': np.array([[255, 0, 0], [0, 255, 0], [0, 0, + 255]]), + 'skeleton_links': [(0, 1), (1, 2)], + 'skeleton_link_colors': np.array([[255, 255, 0], [255, 0, 255]]) + } + self.visualizer = FastVisualizer(self.metainfo) + + def test_init(self): + self.assertEqual(self.visualizer.radius, 6) + self.assertEqual(self.visualizer.line_width, 3) + self.assertEqual(self.visualizer.kpt_thr, 0.3) + self.assertEqual(self.visualizer.keypoint_id2name, + self.metainfo['keypoint_id2name']) + self.assertEqual(self.visualizer.keypoint_name2id, + self.metainfo['keypoint_name2id']) + np.testing.assert_array_equal(self.visualizer.keypoint_colors, + self.metainfo['keypoint_colors']) + self.assertEqual(self.visualizer.skeleton_links, + self.metainfo['skeleton_links']) + np.testing.assert_array_equal(self.visualizer.skeleton_link_colors, + self.metainfo['skeleton_link_colors']) + + def test_draw_pose(self): + img = np.zeros((480, 640, 3), dtype=np.uint8) + instances = type('Instances', (object, ), {})() + instances.keypoints = np.array([[[100, 100], [200, 200], [300, 300]]], + dtype=np.float32) + instances.keypoint_scores = np.array([[0.5, 0.5, 0.5]], + dtype=np.float32) + + self.visualizer.draw_pose(img, instances) + + # Check if keypoints are drawn + self.assertNotEqual(img[100, 100].tolist(), [0, 0, 0]) + self.assertNotEqual(img[200, 200].tolist(), [0, 0, 0]) + self.assertNotEqual(img[300, 300].tolist(), [0, 0, 0]) + + # Check if skeleton links are drawn + self.assertNotEqual(img[150, 150].tolist(), [0, 0, 0]) + self.assertNotEqual(img[250, 250].tolist(), [0, 0, 0]) + + def test_draw_pose_with_none_instances(self): + img = np.zeros((480, 640, 3), dtype=np.uint8) + instances = None + + self.visualizer.draw_pose(img, instances) + + # Check if the image is still empty (black) + self.assertEqual(np.count_nonzero(img), 0) diff --git a/tests/test_visualization/test_pose_visualizer.py b/tests/test_visualization/test_pose_visualizer.py index d9eb502c30..5bc7ed7208 100644 --- a/tests/test_visualization/test_pose_visualizer.py +++ b/tests/test_visualization/test_pose_visualizer.py @@ -1,111 +1,111 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os -from unittest import TestCase - -import cv2 -import numpy as np -import torch -from mmengine.structures import InstanceData, PixelData - -from mmpose.structures import PoseDataSample -from mmpose.visualization import PoseLocalVisualizer - - -class TestPoseLocalVisualizer(TestCase): - - def setUp(self): - self.visualizer = PoseLocalVisualizer(show_keypoint_weight=True) - - def _get_dataset_meta(self): - # None: kpt or link is hidden - pose_kpt_color = [None] + [(127, 127, 127)] * 2 + ['red'] - pose_link_color = [(127, 127, 127)] * 2 + [None] - skeleton_links = [[0, 1], [1, 2], [2, 3]] - return { - 'keypoint_colors': pose_kpt_color, - 'skeleton_link_colors': pose_link_color, - 'skeleton_links': skeleton_links - } - - def test_set_dataset_meta(self): - dataset_meta = self._get_dataset_meta() - self.visualizer.set_dataset_meta(dataset_meta) - self.assertEqual(len(self.visualizer.kpt_color), 4) - self.assertEqual(self.visualizer.kpt_color[-1], 'red') - self.assertListEqual(self.visualizer.skeleton[-1], [2, 3]) - - self.visualizer.dataset_meta = None - self.visualizer.set_dataset_meta(dataset_meta) - self.assertIsNotNone(self.visualizer.dataset_meta) - - def test_add_datasample(self): - h, w = 100, 100 - image = np.zeros((h, w, 3), dtype=np.uint8) - out_file = 'out_file.jpg' - - dataset_meta = self._get_dataset_meta() - self.visualizer.set_dataset_meta(dataset_meta) - - # setting keypoints - gt_instances = InstanceData() - gt_instances.keypoints = np.array([[[1, 1], [20, 20], [40, 40], - [80, 80]]], - dtype=np.float32) - - # setting bounding box - gt_instances.bboxes = np.array([[20, 30, 50, 70]]) - - # setting heatmap - heatmap = torch.randn(10, 100, 100) * 0.05 - for i in range(10): - heatmap[i][i * 10:(i + 1) * 10, i * 10:(i + 1) * 10] += 5 - gt_heatmap = PixelData() - gt_heatmap.heatmaps = heatmap - - # test gt_sample - pred_pose_data_sample = PoseDataSample() - pred_pose_data_sample.gt_instances = gt_instances - pred_pose_data_sample.gt_fields = gt_heatmap - pred_instances = gt_instances.clone() - pred_instances.scores = np.array([[0.9, 0.4, 1.7, -0.2]], - dtype=np.float32) - pred_pose_data_sample.pred_instances = pred_instances - - self.visualizer.add_datasample( - 'image', - image, - data_sample=pred_pose_data_sample, - draw_bbox=True, - out_file=out_file) - self._assert_image_and_shape(out_file, (h, w * 2, 3)) - - self.visualizer.show_keypoint_weight = False - self.visualizer.add_datasample( - 'image', - image, - data_sample=pred_pose_data_sample, - draw_pred=False, - draw_heatmap=True, - out_file=out_file) - self._assert_image_and_shape(out_file, ((h * 2), w, 3)) - - self.visualizer.add_datasample( - 'image', - image, - data_sample=pred_pose_data_sample, - draw_heatmap=True, - out_file=out_file) - self._assert_image_and_shape(out_file, ((h * 2), (w * 2), 3)) - - def test_simcc_visualization(self): - img = np.zeros((512, 512, 3), dtype=np.uint8) - heatmap = torch.randn([17, 512, 512]) - pixelData = PixelData() - pixelData.heatmaps = heatmap - self.visualizer._draw_instance_xy_heatmap(pixelData, img, 10) - - def _assert_image_and_shape(self, out_file, out_shape): - self.assertTrue(os.path.exists(out_file)) - drawn_img = cv2.imread(out_file) - self.assertTupleEqual(drawn_img.shape, out_shape) - os.remove(out_file) +# Copyright (c) OpenMMLab. All rights reserved. +import os +from unittest import TestCase + +import cv2 +import numpy as np +import torch +from mmengine.structures import InstanceData, PixelData + +from mmpose.structures import PoseDataSample +from mmpose.visualization import PoseLocalVisualizer + + +class TestPoseLocalVisualizer(TestCase): + + def setUp(self): + self.visualizer = PoseLocalVisualizer(show_keypoint_weight=True) + + def _get_dataset_meta(self): + # None: kpt or link is hidden + pose_kpt_color = [None] + [(127, 127, 127)] * 2 + ['red'] + pose_link_color = [(127, 127, 127)] * 2 + [None] + skeleton_links = [[0, 1], [1, 2], [2, 3]] + return { + 'keypoint_colors': pose_kpt_color, + 'skeleton_link_colors': pose_link_color, + 'skeleton_links': skeleton_links + } + + def test_set_dataset_meta(self): + dataset_meta = self._get_dataset_meta() + self.visualizer.set_dataset_meta(dataset_meta) + self.assertEqual(len(self.visualizer.kpt_color), 4) + self.assertEqual(self.visualizer.kpt_color[-1], 'red') + self.assertListEqual(self.visualizer.skeleton[-1], [2, 3]) + + self.visualizer.dataset_meta = None + self.visualizer.set_dataset_meta(dataset_meta) + self.assertIsNotNone(self.visualizer.dataset_meta) + + def test_add_datasample(self): + h, w = 100, 100 + image = np.zeros((h, w, 3), dtype=np.uint8) + out_file = 'out_file.jpg' + + dataset_meta = self._get_dataset_meta() + self.visualizer.set_dataset_meta(dataset_meta) + + # setting keypoints + gt_instances = InstanceData() + gt_instances.keypoints = np.array([[[1, 1], [20, 20], [40, 40], + [80, 80]]], + dtype=np.float32) + + # setting bounding box + gt_instances.bboxes = np.array([[20, 30, 50, 70]]) + + # setting heatmap + heatmap = torch.randn(10, 100, 100) * 0.05 + for i in range(10): + heatmap[i][i * 10:(i + 1) * 10, i * 10:(i + 1) * 10] += 5 + gt_heatmap = PixelData() + gt_heatmap.heatmaps = heatmap + + # test gt_sample + pred_pose_data_sample = PoseDataSample() + pred_pose_data_sample.gt_instances = gt_instances + pred_pose_data_sample.gt_fields = gt_heatmap + pred_instances = gt_instances.clone() + pred_instances.scores = np.array([[0.9, 0.4, 1.7, -0.2]], + dtype=np.float32) + pred_pose_data_sample.pred_instances = pred_instances + + self.visualizer.add_datasample( + 'image', + image, + data_sample=pred_pose_data_sample, + draw_bbox=True, + out_file=out_file) + self._assert_image_and_shape(out_file, (h, w * 2, 3)) + + self.visualizer.show_keypoint_weight = False + self.visualizer.add_datasample( + 'image', + image, + data_sample=pred_pose_data_sample, + draw_pred=False, + draw_heatmap=True, + out_file=out_file) + self._assert_image_and_shape(out_file, ((h * 2), w, 3)) + + self.visualizer.add_datasample( + 'image', + image, + data_sample=pred_pose_data_sample, + draw_heatmap=True, + out_file=out_file) + self._assert_image_and_shape(out_file, ((h * 2), (w * 2), 3)) + + def test_simcc_visualization(self): + img = np.zeros((512, 512, 3), dtype=np.uint8) + heatmap = torch.randn([17, 512, 512]) + pixelData = PixelData() + pixelData.heatmaps = heatmap + self.visualizer._draw_instance_xy_heatmap(pixelData, img, 10) + + def _assert_image_and_shape(self, out_file, out_shape): + self.assertTrue(os.path.exists(out_file)) + drawn_img = cv2.imread(out_file) + self.assertTupleEqual(drawn_img.shape, out_shape) + os.remove(out_file) diff --git a/tools/analysis_tools/analyze_logs.py b/tools/analysis_tools/analyze_logs.py index 3fa4107db9..82c1120161 100644 --- a/tools/analysis_tools/analyze_logs.py +++ b/tools/analysis_tools/analyze_logs.py @@ -1,163 +1,163 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import argparse -import json -from collections import defaultdict - -import matplotlib.pyplot as plt -import numpy as np -import seaborn as sns - - -def cal_train_time(log_dicts, args): - for i, log_dict in enumerate(log_dicts): - print(f'{"-" * 5}Analyze train time of {args.json_logs[i]}{"-" * 5}') - all_times = [] - for epoch in log_dict.keys(): - if args.include_outliers: - all_times.append(log_dict[epoch]['time']) - else: - all_times.append(log_dict[epoch]['time'][1:]) - all_times = np.array(all_times) - epoch_ave_time = all_times.mean(-1) - slowest_epoch = epoch_ave_time.argmax() - fastest_epoch = epoch_ave_time.argmin() - std_over_epoch = epoch_ave_time.std() - print(f'slowest epoch {slowest_epoch + 1}, ' - f'average time is {epoch_ave_time[slowest_epoch]:.4f}') - print(f'fastest epoch {fastest_epoch + 1}, ' - f'average time is {epoch_ave_time[fastest_epoch]:.4f}') - print(f'time std over epochs is {std_over_epoch:.4f}') - print(f'average iter time: {np.mean(all_times):.4f} s/iter') - print() - - -def plot_curve(log_dicts, args): - if args.backend is not None: - plt.switch_backend(args.backend) - sns.set_style(args.style) - # if legend is None, use {filename}_{key} as legend - legend = args.legend - if legend is None: - legend = [] - for json_log in args.json_logs: - for metric in args.keys: - legend.append(f'{json_log}_{metric}') - assert len(legend) == (len(args.json_logs) * len(args.keys)) - metrics = args.keys - - num_metrics = len(metrics) - for i, log_dict in enumerate(log_dicts): - epochs = list(log_dict.keys()) - for j, metric in enumerate(metrics): - print(f'plot curve of {args.json_logs[i]}, metric is {metric}') - if metric not in log_dict[epochs[0]]: - raise KeyError( - f'{args.json_logs[i]} does not contain metric {metric}') - xs = [] - ys = [] - for epoch in epochs: - xs.append(np.array(log_dict[epoch]['step'])) - ys.append(np.array(log_dict[epoch][metric])) - xs = np.concatenate(xs) - ys = np.concatenate(ys) - plt.xlabel('step') - plt.plot(xs, ys, label=legend[i * num_metrics + j], linewidth=0.5) - plt.legend() - if args.title is not None: - plt.title(args.title) - if args.out is None: - plt.show() - else: - print(f'save curve to: {args.out}') - plt.savefig(args.out) - plt.cla() - - -def add_plot_parser(subparsers): - parser_plt = subparsers.add_parser( - 'plot_curve', help='parser for plotting curves') - parser_plt.add_argument( - 'json_logs', - type=str, - nargs='+', - help='path of train log in json format') - parser_plt.add_argument( - '--keys', - type=str, - nargs='+', - default=['loss_kpt'], - help='the metric that you want to plot') - parser_plt.add_argument('--title', type=str, help='title of figure') - parser_plt.add_argument( - '--legend', - type=str, - nargs='+', - default=None, - help='legend of each plot') - parser_plt.add_argument( - '--backend', type=str, default=None, help='backend of plt') - parser_plt.add_argument( - '--style', type=str, default='dark', help='style of plt') - parser_plt.add_argument('--out', type=str, default=None) - - -def add_time_parser(subparsers): - parser_time = subparsers.add_parser( - 'cal_train_time', - help='parser for computing the average time per training iteration') - parser_time.add_argument( - 'json_logs', - type=str, - nargs='+', - help='path of train log in json format') - parser_time.add_argument( - '--include-outliers', - action='store_true', - help='include the first value of every epoch when computing ' - 'the average time') - - -def parse_args(): - parser = argparse.ArgumentParser(description='Analyze Json Log') - # currently only support plot curve and calculate average train time - subparsers = parser.add_subparsers(dest='task', help='task parser') - add_plot_parser(subparsers) - add_time_parser(subparsers) - args = parser.parse_args() - return args - - -def load_json_logs(json_logs): - # load and convert json_logs to log_dict, key is epoch, value is a sub dict - # keys of sub dict is different metrics, e.g. memory, top1_acc - # value of sub dict is a list of corresponding values of all iterations - log_dicts = [dict() for _ in json_logs] - for json_log, log_dict in zip(json_logs, log_dicts): - with open(json_log, 'r') as log_file: - for line in log_file: - log = json.loads(line.strip()) - # skip lines without `epoch` field - if 'epoch' not in log: - continue - epoch = log.pop('epoch') - if epoch not in log_dict: - log_dict[epoch] = defaultdict(list) - for k, v in log.items(): - log_dict[epoch][k].append(v) - return log_dicts - - -def main(): - args = parse_args() - - json_logs = args.json_logs - for json_log in json_logs: - assert json_log.endswith('.json') - - log_dicts = load_json_logs(json_logs) - - eval(args.task)(log_dicts, args) - - -if __name__ == '__main__': - main() +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import json +from collections import defaultdict + +import matplotlib.pyplot as plt +import numpy as np +import seaborn as sns + + +def cal_train_time(log_dicts, args): + for i, log_dict in enumerate(log_dicts): + print(f'{"-" * 5}Analyze train time of {args.json_logs[i]}{"-" * 5}') + all_times = [] + for epoch in log_dict.keys(): + if args.include_outliers: + all_times.append(log_dict[epoch]['time']) + else: + all_times.append(log_dict[epoch]['time'][1:]) + all_times = np.array(all_times) + epoch_ave_time = all_times.mean(-1) + slowest_epoch = epoch_ave_time.argmax() + fastest_epoch = epoch_ave_time.argmin() + std_over_epoch = epoch_ave_time.std() + print(f'slowest epoch {slowest_epoch + 1}, ' + f'average time is {epoch_ave_time[slowest_epoch]:.4f}') + print(f'fastest epoch {fastest_epoch + 1}, ' + f'average time is {epoch_ave_time[fastest_epoch]:.4f}') + print(f'time std over epochs is {std_over_epoch:.4f}') + print(f'average iter time: {np.mean(all_times):.4f} s/iter') + print() + + +def plot_curve(log_dicts, args): + if args.backend is not None: + plt.switch_backend(args.backend) + sns.set_style(args.style) + # if legend is None, use {filename}_{key} as legend + legend = args.legend + if legend is None: + legend = [] + for json_log in args.json_logs: + for metric in args.keys: + legend.append(f'{json_log}_{metric}') + assert len(legend) == (len(args.json_logs) * len(args.keys)) + metrics = args.keys + + num_metrics = len(metrics) + for i, log_dict in enumerate(log_dicts): + epochs = list(log_dict.keys()) + for j, metric in enumerate(metrics): + print(f'plot curve of {args.json_logs[i]}, metric is {metric}') + if metric not in log_dict[epochs[0]]: + raise KeyError( + f'{args.json_logs[i]} does not contain metric {metric}') + xs = [] + ys = [] + for epoch in epochs: + xs.append(np.array(log_dict[epoch]['step'])) + ys.append(np.array(log_dict[epoch][metric])) + xs = np.concatenate(xs) + ys = np.concatenate(ys) + plt.xlabel('step') + plt.plot(xs, ys, label=legend[i * num_metrics + j], linewidth=0.5) + plt.legend() + if args.title is not None: + plt.title(args.title) + if args.out is None: + plt.show() + else: + print(f'save curve to: {args.out}') + plt.savefig(args.out) + plt.cla() + + +def add_plot_parser(subparsers): + parser_plt = subparsers.add_parser( + 'plot_curve', help='parser for plotting curves') + parser_plt.add_argument( + 'json_logs', + type=str, + nargs='+', + help='path of train log in json format') + parser_plt.add_argument( + '--keys', + type=str, + nargs='+', + default=['loss_kpt'], + help='the metric that you want to plot') + parser_plt.add_argument('--title', type=str, help='title of figure') + parser_plt.add_argument( + '--legend', + type=str, + nargs='+', + default=None, + help='legend of each plot') + parser_plt.add_argument( + '--backend', type=str, default=None, help='backend of plt') + parser_plt.add_argument( + '--style', type=str, default='dark', help='style of plt') + parser_plt.add_argument('--out', type=str, default=None) + + +def add_time_parser(subparsers): + parser_time = subparsers.add_parser( + 'cal_train_time', + help='parser for computing the average time per training iteration') + parser_time.add_argument( + 'json_logs', + type=str, + nargs='+', + help='path of train log in json format') + parser_time.add_argument( + '--include-outliers', + action='store_true', + help='include the first value of every epoch when computing ' + 'the average time') + + +def parse_args(): + parser = argparse.ArgumentParser(description='Analyze Json Log') + # currently only support plot curve and calculate average train time + subparsers = parser.add_subparsers(dest='task', help='task parser') + add_plot_parser(subparsers) + add_time_parser(subparsers) + args = parser.parse_args() + return args + + +def load_json_logs(json_logs): + # load and convert json_logs to log_dict, key is epoch, value is a sub dict + # keys of sub dict is different metrics, e.g. memory, top1_acc + # value of sub dict is a list of corresponding values of all iterations + log_dicts = [dict() for _ in json_logs] + for json_log, log_dict in zip(json_logs, log_dicts): + with open(json_log, 'r') as log_file: + for line in log_file: + log = json.loads(line.strip()) + # skip lines without `epoch` field + if 'epoch' not in log: + continue + epoch = log.pop('epoch') + if epoch not in log_dict: + log_dict[epoch] = defaultdict(list) + for k, v in log.items(): + log_dict[epoch][k].append(v) + return log_dicts + + +def main(): + args = parse_args() + + json_logs = args.json_logs + for json_log in json_logs: + assert json_log.endswith('.json') + + log_dicts = load_json_logs(json_logs) + + eval(args.task)(log_dicts, args) + + +if __name__ == '__main__': + main() diff --git a/tools/analysis_tools/get_flops.py b/tools/analysis_tools/get_flops.py index bb0d65d62a..4201390c4a 100644 --- a/tools/analysis_tools/get_flops.py +++ b/tools/analysis_tools/get_flops.py @@ -1,142 +1,142 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import argparse - -import numpy as np -import torch -from mmengine.config import DictAction -from mmengine.logging import MMLogger - -from mmpose.apis.inference import init_model - -try: - from mmengine.analysis import get_model_complexity_info - from mmengine.analysis.print_helper import _format_size -except ImportError: - raise ImportError('Please upgrade mmengine >= 0.6.0') - - -def parse_args(): - parser = argparse.ArgumentParser( - description='Get complexity information from a model config') - parser.add_argument('config', help='train config file path') - parser.add_argument( - '--device', default='cpu', help='Device used for model initialization') - parser.add_argument( - '--cfg-options', - nargs='+', - action=DictAction, - default={}, - help='override some settings in the used config, the key-value pair ' - 'in xxx=yyy format will be merged into config file. For example, ' - "'--cfg-options model.backbone.depth=18 model.backbone.with_cp=True'") - parser.add_argument( - '--input-shape', - type=int, - nargs='+', - default=[256, 192], - help='input image size') - parser.add_argument( - '--batch-size', - '-b', - type=int, - default=1, - help='Input batch size. If specified and greater than 1, it takes a ' - 'callable method that generates a batch input. Otherwise, it will ' - 'generate a random tensor with input shape to calculate FLOPs.') - parser.add_argument( - '--show-arch-info', - '-s', - action='store_true', - help='Whether to show model arch information') - args = parser.parse_args() - return args - - -def batch_constructor(flops_model, batch_size, input_shape): - """Generate a batch of tensors to the model.""" - batch = {} - - inputs = torch.randn(batch_size, *input_shape).new_empty( - (batch_size, *input_shape), - dtype=next(flops_model.parameters()).dtype, - device=next(flops_model.parameters()).device) - - batch['inputs'] = inputs - return batch - - -def inference(args, input_shape, logger): - model = init_model( - args.config, - checkpoint=None, - device=args.device, - cfg_options=args.cfg_options) - - if hasattr(model, '_forward'): - model.forward = model._forward - else: - raise NotImplementedError( - 'FLOPs counter is currently not currently supported with {}'. - format(model.__class__.__name__)) - - if args.batch_size > 1: - outputs = {} - avg_flops = [] - logger.info('Running get_flops with batch size specified as {}'.format( - args.batch_size)) - batch = batch_constructor(model, args.batch_size, input_shape) - for i in range(args.batch_size): - result = get_model_complexity_info( - model, - input_shape, - inputs=batch['inputs'], - show_table=True, - show_arch=args.show_arch_info) - avg_flops.append(result['flops']) - mean_flops = _format_size(int(np.average(avg_flops))) - outputs['flops_str'] = mean_flops - outputs['params_str'] = result['params_str'] - outputs['out_table'] = result['out_table'] - outputs['out_arch'] = result['out_arch'] - else: - outputs = get_model_complexity_info( - model, - input_shape, - inputs=None, - show_table=True, - show_arch=args.show_arch_info) - return outputs - - -def main(): - args = parse_args() - logger = MMLogger.get_instance(name='MMLogger') - - if len(args.input_shape) == 1: - input_shape = (3, args.input_shape[0], args.input_shape[0]) - elif len(args.input_shape) == 2: - input_shape = (3, ) + tuple(args.input_shape) - else: - raise ValueError('invalid input shape') - - if args.device == 'cuda:0': - assert torch.cuda.is_available( - ), 'No valid cuda device detected, please double check...' - - outputs = inference(args, input_shape, logger) - flops = outputs['flops_str'] - params = outputs['params_str'] - split_line = '=' * 30 - input_shape = (args.batch_size, ) + input_shape - print(f'{split_line}\nInput shape: {input_shape}\n' - f'Flops: {flops}\nParams: {params}\n{split_line}') - print(outputs['out_table']) - if args.show_arch_info: - print(outputs['out_arch']) - print('!!!Please be cautious if you use the results in papers. ' - 'You may need to check if all ops are supported and verify that the ' - 'flops computation is correct.') - - -if __name__ == '__main__': - main() +# Copyright (c) OpenMMLab. All rights reserved. +import argparse + +import numpy as np +import torch +from mmengine.config import DictAction +from mmengine.logging import MMLogger + +from mmpose.apis.inference import init_model + +try: + from mmengine.analysis import get_model_complexity_info + from mmengine.analysis.print_helper import _format_size +except ImportError: + raise ImportError('Please upgrade mmengine >= 0.6.0') + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Get complexity information from a model config') + parser.add_argument('config', help='train config file path') + parser.add_argument( + '--device', default='cpu', help='Device used for model initialization') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + default={}, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. For example, ' + "'--cfg-options model.backbone.depth=18 model.backbone.with_cp=True'") + parser.add_argument( + '--input-shape', + type=int, + nargs='+', + default=[256, 192], + help='input image size') + parser.add_argument( + '--batch-size', + '-b', + type=int, + default=1, + help='Input batch size. If specified and greater than 1, it takes a ' + 'callable method that generates a batch input. Otherwise, it will ' + 'generate a random tensor with input shape to calculate FLOPs.') + parser.add_argument( + '--show-arch-info', + '-s', + action='store_true', + help='Whether to show model arch information') + args = parser.parse_args() + return args + + +def batch_constructor(flops_model, batch_size, input_shape): + """Generate a batch of tensors to the model.""" + batch = {} + + inputs = torch.randn(batch_size, *input_shape).new_empty( + (batch_size, *input_shape), + dtype=next(flops_model.parameters()).dtype, + device=next(flops_model.parameters()).device) + + batch['inputs'] = inputs + return batch + + +def inference(args, input_shape, logger): + model = init_model( + args.config, + checkpoint=None, + device=args.device, + cfg_options=args.cfg_options) + + if hasattr(model, '_forward'): + model.forward = model._forward + else: + raise NotImplementedError( + 'FLOPs counter is currently not currently supported with {}'. + format(model.__class__.__name__)) + + if args.batch_size > 1: + outputs = {} + avg_flops = [] + logger.info('Running get_flops with batch size specified as {}'.format( + args.batch_size)) + batch = batch_constructor(model, args.batch_size, input_shape) + for i in range(args.batch_size): + result = get_model_complexity_info( + model, + input_shape, + inputs=batch['inputs'], + show_table=True, + show_arch=args.show_arch_info) + avg_flops.append(result['flops']) + mean_flops = _format_size(int(np.average(avg_flops))) + outputs['flops_str'] = mean_flops + outputs['params_str'] = result['params_str'] + outputs['out_table'] = result['out_table'] + outputs['out_arch'] = result['out_arch'] + else: + outputs = get_model_complexity_info( + model, + input_shape, + inputs=None, + show_table=True, + show_arch=args.show_arch_info) + return outputs + + +def main(): + args = parse_args() + logger = MMLogger.get_instance(name='MMLogger') + + if len(args.input_shape) == 1: + input_shape = (3, args.input_shape[0], args.input_shape[0]) + elif len(args.input_shape) == 2: + input_shape = (3, ) + tuple(args.input_shape) + else: + raise ValueError('invalid input shape') + + if args.device == 'cuda:0': + assert torch.cuda.is_available( + ), 'No valid cuda device detected, please double check...' + + outputs = inference(args, input_shape, logger) + flops = outputs['flops_str'] + params = outputs['params_str'] + split_line = '=' * 30 + input_shape = (args.batch_size, ) + input_shape + print(f'{split_line}\nInput shape: {input_shape}\n' + f'Flops: {flops}\nParams: {params}\n{split_line}') + print(outputs['out_table']) + if args.show_arch_info: + print(outputs['out_arch']) + print('!!!Please be cautious if you use the results in papers. ' + 'You may need to check if all ops are supported and verify that the ' + 'flops computation is correct.') + + +if __name__ == '__main__': + main() diff --git a/tools/analysis_tools/print_config.py b/tools/analysis_tools/print_config.py index a5e6e641e4..661d4aca56 100644 --- a/tools/analysis_tools/print_config.py +++ b/tools/analysis_tools/print_config.py @@ -1,27 +1,27 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import argparse - -from mmengine import Config, DictAction - - -def parse_args(): - parser = argparse.ArgumentParser(description='Print the whole config') - parser.add_argument('config', help='config file path') - parser.add_argument( - '--options', nargs='+', action=DictAction, help='arguments in dict') - args = parser.parse_args() - - return args - - -def main(): - args = parse_args() - - cfg = Config.fromfile(args.config) - if args.options is not None: - cfg.merge_from_dict(args.options) - print(f'Config:\n{cfg.pretty_text}') - - -if __name__ == '__main__': - main() +# Copyright (c) OpenMMLab. All rights reserved. +import argparse + +from mmengine import Config, DictAction + + +def parse_args(): + parser = argparse.ArgumentParser(description='Print the whole config') + parser.add_argument('config', help='config file path') + parser.add_argument( + '--options', nargs='+', action=DictAction, help='arguments in dict') + args = parser.parse_args() + + return args + + +def main(): + args = parse_args() + + cfg = Config.fromfile(args.config) + if args.options is not None: + cfg.merge_from_dict(args.options) + print(f'Config:\n{cfg.pretty_text}') + + +if __name__ == '__main__': + main() diff --git a/tools/dataset_converters/h36m_to_coco.py b/tools/dataset_converters/h36m_to_coco.py index cb3ba8b7ab..5721bc6189 100644 --- a/tools/dataset_converters/h36m_to_coco.py +++ b/tools/dataset_converters/h36m_to_coco.py @@ -1,166 +1,166 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import argparse -import os.path as osp -from functools import wraps - -import mmengine -import numpy as np -from PIL import Image - -from mmpose.utils import SimpleCamera - - -def _keypoint_camera_to_world(keypoints, - camera_params, - image_name=None, - dataset='Body3DH36MDataset'): - """Project 3D keypoints from the camera space to the world space. - - Args: - keypoints (np.ndarray): 3D keypoints in shape [..., 3] - camera_params (dict): Parameters for all cameras. - image_name (str): The image name to specify the camera. - dataset (str): The dataset type, e.g., Body3DH36MDataset. - """ - cam_key = None - if dataset == 'Body3DH36MDataset': - subj, rest = osp.basename(image_name).split('_', 1) - _, rest = rest.split('.', 1) - camera, rest = rest.split('_', 1) - cam_key = (subj, camera) - else: - raise NotImplementedError - - camera = SimpleCamera(camera_params[cam_key]) - keypoints_world = keypoints.copy() - keypoints_world[..., :3] = camera.camera_to_world(keypoints[..., :3]) - - return keypoints_world - - -def _get_bbox_xywh(center, scale, w=200, h=200): - w = w * scale - h = h * scale - x = center[0] - w / 2 - y = center[1] - h / 2 - return [x, y, w, h] - - -def mmcv_track_func(func): - - @wraps(func) - def wrapped_func(args): - return func(*args) - - return wrapped_func - - -@mmcv_track_func -def _get_img_info(img_idx, img_name, img_root): - try: - im = Image.open(osp.join(img_root, img_name)) - w, h = im.size - except: # noqa: E722 - return None - - img = { - 'file_name': img_name, - 'height': h, - 'width': w, - 'id': img_idx + 1, - } - return img - - -@mmcv_track_func -def _get_ann(idx, kpt_2d, kpt_3d, center, scale, imgname, camera_params): - bbox = _get_bbox_xywh(center, scale) - kpt_3d = _keypoint_camera_to_world(kpt_3d, camera_params, imgname) - - ann = { - 'id': idx + 1, - 'category_id': 1, - 'image_id': idx + 1, - 'iscrowd': 0, - 'bbox': bbox, - 'area': bbox[2] * bbox[3], - 'num_keypoints': 17, - 'keypoints': kpt_2d.reshape(-1).tolist(), - 'keypoints_3d': kpt_3d.reshape(-1).tolist() - } - - return ann - - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument( - '--ann-file', type=str, default='tests/data/h36m/test_h36m_body3d.npz') - parser.add_argument( - '--camera-param-file', type=str, default='tests/data/h36m/cameras.pkl') - parser.add_argument('--img-root', type=str, default='tests/data/h36m') - parser.add_argument( - '--out-file', type=str, default='tests/data/h36m/h36m_coco.json') - parser.add_argument('--full-img-name', action='store_true') - - args = parser.parse_args() - - h36m_data = np.load(args.ann_file) - h36m_camera_params = mmengine.load(args.camera_param_file) - h36m_coco = {} - - # categories - h36m_cats = [{ - 'supercategory': - 'person', - 'id': - 1, - 'name': - 'person', - 'keypoints': [ - 'root (pelvis)', 'left_hip', 'left_knee', 'left_foot', 'right_hip', - 'right_knee', 'right_foot', 'spine', 'thorax', 'neck_base', 'head', - 'left_shoulder', 'left_elbow', 'left_wrist', 'right_shoulder', - 'right_elbow', 'right_wrist' - ], - 'skeleton': [[0, 1], [1, 2], [2, 3], [0, 4], [4, 5], [5, 6], [0, 7], - [7, 8], [8, 9], [9, 10], [8, 11], [11, 12], [12, 13], - [8, 14], [14, 15], [15, 16]], - }] - - # images - imgnames = h36m_data['imgname'] - if not args.full_img_name: - imgnames = [osp.basename(fn) for fn in imgnames] - tasks = [(idx, fn, args.img_root) for idx, fn in enumerate(imgnames)] - - h36m_imgs = mmengine.track_parallel_progress( - _get_img_info, tasks, nproc=12) - - # annotations - kpts_2d = h36m_data['part'] - kpts_3d = h36m_data['S'] - centers = h36m_data['center'] - scales = h36m_data['scale'] - tasks = [(idx, ) + args + (h36m_camera_params, ) - for idx, args in enumerate( - zip(kpts_2d, kpts_3d, centers, scales, imgnames))] - - h36m_anns = mmengine.track_parallel_progress(_get_ann, tasks, nproc=12) - - # remove invalid data - h36m_imgs = [img for img in h36m_imgs if img is not None] - h36m_img_ids = set([img['id'] for img in h36m_imgs]) - h36m_anns = [ann for ann in h36m_anns if ann['image_id'] in h36m_img_ids] - - h36m_coco = { - 'categories': h36m_cats, - 'images': h36m_imgs, - 'annotations': h36m_anns, - } - - mmengine.dump(h36m_coco, args.out_file) - - -if __name__ == '__main__': - main() +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp +from functools import wraps + +import mmengine +import numpy as np +from PIL import Image + +from mmpose.utils import SimpleCamera + + +def _keypoint_camera_to_world(keypoints, + camera_params, + image_name=None, + dataset='Body3DH36MDataset'): + """Project 3D keypoints from the camera space to the world space. + + Args: + keypoints (np.ndarray): 3D keypoints in shape [..., 3] + camera_params (dict): Parameters for all cameras. + image_name (str): The image name to specify the camera. + dataset (str): The dataset type, e.g., Body3DH36MDataset. + """ + cam_key = None + if dataset == 'Body3DH36MDataset': + subj, rest = osp.basename(image_name).split('_', 1) + _, rest = rest.split('.', 1) + camera, rest = rest.split('_', 1) + cam_key = (subj, camera) + else: + raise NotImplementedError + + camera = SimpleCamera(camera_params[cam_key]) + keypoints_world = keypoints.copy() + keypoints_world[..., :3] = camera.camera_to_world(keypoints[..., :3]) + + return keypoints_world + + +def _get_bbox_xywh(center, scale, w=200, h=200): + w = w * scale + h = h * scale + x = center[0] - w / 2 + y = center[1] - h / 2 + return [x, y, w, h] + + +def mmcv_track_func(func): + + @wraps(func) + def wrapped_func(args): + return func(*args) + + return wrapped_func + + +@mmcv_track_func +def _get_img_info(img_idx, img_name, img_root): + try: + im = Image.open(osp.join(img_root, img_name)) + w, h = im.size + except: # noqa: E722 + return None + + img = { + 'file_name': img_name, + 'height': h, + 'width': w, + 'id': img_idx + 1, + } + return img + + +@mmcv_track_func +def _get_ann(idx, kpt_2d, kpt_3d, center, scale, imgname, camera_params): + bbox = _get_bbox_xywh(center, scale) + kpt_3d = _keypoint_camera_to_world(kpt_3d, camera_params, imgname) + + ann = { + 'id': idx + 1, + 'category_id': 1, + 'image_id': idx + 1, + 'iscrowd': 0, + 'bbox': bbox, + 'area': bbox[2] * bbox[3], + 'num_keypoints': 17, + 'keypoints': kpt_2d.reshape(-1).tolist(), + 'keypoints_3d': kpt_3d.reshape(-1).tolist() + } + + return ann + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument( + '--ann-file', type=str, default='tests/data/h36m/test_h36m_body3d.npz') + parser.add_argument( + '--camera-param-file', type=str, default='tests/data/h36m/cameras.pkl') + parser.add_argument('--img-root', type=str, default='tests/data/h36m') + parser.add_argument( + '--out-file', type=str, default='tests/data/h36m/h36m_coco.json') + parser.add_argument('--full-img-name', action='store_true') + + args = parser.parse_args() + + h36m_data = np.load(args.ann_file) + h36m_camera_params = mmengine.load(args.camera_param_file) + h36m_coco = {} + + # categories + h36m_cats = [{ + 'supercategory': + 'person', + 'id': + 1, + 'name': + 'person', + 'keypoints': [ + 'root (pelvis)', 'left_hip', 'left_knee', 'left_foot', 'right_hip', + 'right_knee', 'right_foot', 'spine', 'thorax', 'neck_base', 'head', + 'left_shoulder', 'left_elbow', 'left_wrist', 'right_shoulder', + 'right_elbow', 'right_wrist' + ], + 'skeleton': [[0, 1], [1, 2], [2, 3], [0, 4], [4, 5], [5, 6], [0, 7], + [7, 8], [8, 9], [9, 10], [8, 11], [11, 12], [12, 13], + [8, 14], [14, 15], [15, 16]], + }] + + # images + imgnames = h36m_data['imgname'] + if not args.full_img_name: + imgnames = [osp.basename(fn) for fn in imgnames] + tasks = [(idx, fn, args.img_root) for idx, fn in enumerate(imgnames)] + + h36m_imgs = mmengine.track_parallel_progress( + _get_img_info, tasks, nproc=12) + + # annotations + kpts_2d = h36m_data['part'] + kpts_3d = h36m_data['S'] + centers = h36m_data['center'] + scales = h36m_data['scale'] + tasks = [(idx, ) + args + (h36m_camera_params, ) + for idx, args in enumerate( + zip(kpts_2d, kpts_3d, centers, scales, imgnames))] + + h36m_anns = mmengine.track_parallel_progress(_get_ann, tasks, nproc=12) + + # remove invalid data + h36m_imgs = [img for img in h36m_imgs if img is not None] + h36m_img_ids = set([img['id'] for img in h36m_imgs]) + h36m_anns = [ann for ann in h36m_anns if ann['image_id'] in h36m_img_ids] + + h36m_coco = { + 'categories': h36m_cats, + 'images': h36m_imgs, + 'annotations': h36m_anns, + } + + mmengine.dump(h36m_coco, args.out_file) + + +if __name__ == '__main__': + main() diff --git a/tools/dataset_converters/labelstudio2coco.py b/tools/dataset_converters/labelstudio2coco.py index 12f4c61851..3595027e37 100755 --- a/tools/dataset_converters/labelstudio2coco.py +++ b/tools/dataset_converters/labelstudio2coco.py @@ -1,249 +1,249 @@ -# ----------------------------------------------------------------------------- -# Based on https://github.com/heartexlabs/label-studio-converter -# Original license: Copyright (c) Heartex, under the Apache 2.0 License. -# ----------------------------------------------------------------------------- - -import argparse -import io -import json -import logging -import pathlib -import xml.etree.ElementTree as ET -from datetime import datetime - -import numpy as np - -logger = logging.getLogger(__name__) - - -def parse_args(): - parser = argparse.ArgumentParser( - description='Convert Label Studio JSON file to COCO format JSON File') - parser.add_argument('config', help='Labeling Interface xml code file path') - parser.add_argument('input', help='Label Studio format JSON file path') - parser.add_argument('output', help='The output COCO format JSON file path') - args = parser.parse_args() - return args - - -class LSConverter: - - def __init__(self, config: str): - """Convert the Label Studio Format JSON file to COCO format JSON file - which is needed by mmpose. - - The annotations in label studio must follow the order: - keypoint 1, keypoint 2... keypoint n, rect of the instance, - polygon of the instance, - then annotations of the next instance. - Where the order of rect and polygon can be switched, - the bbox and area of the instance will be calculated with - the data behind. - - Only annotating one of rect and polygon is also acceptable. - Args: - config (str): The annotations config xml file. - The xml content is from Project Setting -> - Label Interface -> Code. - Example: - ``` - - - - - - - - - - ``` - """ - # get label info from config file - tree = ET.parse(config) - root = tree.getroot() - labels = root.findall('.//KeyPointLabels/Label') - label_values = [label.get('value') for label in labels] - - self.categories = list() - self.category_name_to_id = dict() - for i, value in enumerate(label_values): - # category id start with 1 - self.categories.append({'id': i + 1, 'name': value}) - self.category_name_to_id[value] = i + 1 - - def convert_to_coco(self, input_json: str, output_json: str): - """Convert `input_json` to COCO format and save in `output_json`. - - Args: - input_json (str): The path of Label Studio format JSON file. - output_json (str): The path of the output COCO JSON file. - """ - - def add_image(images, width, height, image_id, image_path): - images.append({ - 'width': width, - 'height': height, - 'id': image_id, - 'file_name': image_path, - }) - return images - - output_path = pathlib.Path(output_json) - output_path.parent.mkdir(parents=True, exist_ok=True) - - images = list() - annotations = list() - - with open(input_json, 'r') as f: - ann_list = json.load(f) - - for item_idx, item in enumerate(ann_list): - # each image is an item - image_name = item['file_upload'] - image_id = len(images) - width, height = None, None - - # skip tasks without annotations - if not item['annotations']: - logger.warning('No annotations found for item #' + - str(item_idx)) - continue - - kp_num = 0 - for i, label in enumerate(item['annotations'][0]['result']): - category_name = None - - # valid label - for key in [ - 'rectanglelabels', 'polygonlabels', 'labels', - 'keypointlabels' - ]: - if key == label['type'] and len(label['value'][key]) > 0: - category_name = label['value'][key][0] - break - - if category_name is None: - logger.warning('Unknown label type or labels are empty') - continue - - if not height or not width: - if 'original_width' not in label or \ - 'original_height' not in label: - logger.debug( - f'original_width or original_height not found' - f'in {image_name}') - continue - - # get height and width info from annotations - width, height = label['original_width'], label[ - 'original_height'] - images = add_image(images, width, height, image_id, - image_name) - - category_id = self.category_name_to_id[category_name] - - annotation_id = len(annotations) - - if 'rectanglelabels' == label['type'] or 'labels' == label[ - 'type']: - - x = label['value']['x'] - y = label['value']['y'] - w = label['value']['width'] - h = label['value']['height'] - - x = x * label['original_width'] / 100 - y = y * label['original_height'] / 100 - w = w * label['original_width'] / 100 - h = h * label['original_height'] / 100 - - # rect annotation should be later than keypoints - annotations[-1]['bbox'] = [x, y, w, h] - annotations[-1]['area'] = w * h - annotations[-1]['num_keypoints'] = kp_num - - elif 'polygonlabels' == label['type']: - points_abs = [(x / 100 * width, y / 100 * height) - for x, y in label['value']['points']] - x, y = zip(*points_abs) - - x1, y1, x2, y2 = min(x), min(y), max(x), max(y) - - # calculate bbox and area from polygon's points - # which may be different with rect annotation - bbox = [x1, y1, x2 - x1, y2 - y1] - area = float(0.5 * np.abs( - np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))) - - # polygon label should be later than keypoints - annotations[-1]['segmentation'] = [[ - coord for point in points_abs for coord in point - ]] - annotations[-1]['bbox'] = bbox - annotations[-1]['area'] = area - annotations[-1]['num_keypoints'] = kp_num - - elif 'keypointlabels' == label['type']: - x = label['value']['x'] * label['original_width'] / 100 - y = label['value']['y'] * label['original_height'] / 100 - - # there is no method to annotate visible in Label Studio - # so the keypoints' visible code will be 2 except (0,0) - if x == y == 0: - current_kp = [x, y, 0] - kp_num_change = 0 - else: - current_kp = [x, y, 2] - kp_num_change = 1 - - # create new annotation in coco - # when the keypoint is the first point of an instance - if i == 0 or item['annotations'][0]['result'][ - i - 1]['type'] != 'keypointlabels': - annotations.append({ - 'id': annotation_id, - 'image_id': image_id, - 'category_id': category_id, - 'keypoints': current_kp, - 'ignore': 0, - 'iscrowd': 0, - }) - kp_num = kp_num_change - else: - annotations[-1]['keypoints'].extend(current_kp) - kp_num += kp_num_change - - with io.open(output_json, mode='w', encoding='utf8') as fout: - json.dump( - { - 'images': images, - 'categories': self.categories, - 'annotations': annotations, - 'info': { - 'year': datetime.now().year, - 'version': '1.0', - 'description': '', - 'contributor': 'Label Studio', - 'url': '', - 'date_created': str(datetime.now()), - }, - }, - fout, - indent=2, - ) - - -def main(): - args = parse_args() - config = args.config - input_json = args.input - output_json = args.output - converter = LSConverter(config) - converter.convert_to_coco(input_json, output_json) - - -if __name__ == '__main__': - main() +# ----------------------------------------------------------------------------- +# Based on https://github.com/heartexlabs/label-studio-converter +# Original license: Copyright (c) Heartex, under the Apache 2.0 License. +# ----------------------------------------------------------------------------- + +import argparse +import io +import json +import logging +import pathlib +import xml.etree.ElementTree as ET +from datetime import datetime + +import numpy as np + +logger = logging.getLogger(__name__) + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Convert Label Studio JSON file to COCO format JSON File') + parser.add_argument('config', help='Labeling Interface xml code file path') + parser.add_argument('input', help='Label Studio format JSON file path') + parser.add_argument('output', help='The output COCO format JSON file path') + args = parser.parse_args() + return args + + +class LSConverter: + + def __init__(self, config: str): + """Convert the Label Studio Format JSON file to COCO format JSON file + which is needed by mmpose. + + The annotations in label studio must follow the order: + keypoint 1, keypoint 2... keypoint n, rect of the instance, + polygon of the instance, + then annotations of the next instance. + Where the order of rect and polygon can be switched, + the bbox and area of the instance will be calculated with + the data behind. + + Only annotating one of rect and polygon is also acceptable. + Args: + config (str): The annotations config xml file. + The xml content is from Project Setting -> + Label Interface -> Code. + Example: + ``` + + + + + + + + + + ``` + """ + # get label info from config file + tree = ET.parse(config) + root = tree.getroot() + labels = root.findall('.//KeyPointLabels/Label') + label_values = [label.get('value') for label in labels] + + self.categories = list() + self.category_name_to_id = dict() + for i, value in enumerate(label_values): + # category id start with 1 + self.categories.append({'id': i + 1, 'name': value}) + self.category_name_to_id[value] = i + 1 + + def convert_to_coco(self, input_json: str, output_json: str): + """Convert `input_json` to COCO format and save in `output_json`. + + Args: + input_json (str): The path of Label Studio format JSON file. + output_json (str): The path of the output COCO JSON file. + """ + + def add_image(images, width, height, image_id, image_path): + images.append({ + 'width': width, + 'height': height, + 'id': image_id, + 'file_name': image_path, + }) + return images + + output_path = pathlib.Path(output_json) + output_path.parent.mkdir(parents=True, exist_ok=True) + + images = list() + annotations = list() + + with open(input_json, 'r') as f: + ann_list = json.load(f) + + for item_idx, item in enumerate(ann_list): + # each image is an item + image_name = item['file_upload'] + image_id = len(images) + width, height = None, None + + # skip tasks without annotations + if not item['annotations']: + logger.warning('No annotations found for item #' + + str(item_idx)) + continue + + kp_num = 0 + for i, label in enumerate(item['annotations'][0]['result']): + category_name = None + + # valid label + for key in [ + 'rectanglelabels', 'polygonlabels', 'labels', + 'keypointlabels' + ]: + if key == label['type'] and len(label['value'][key]) > 0: + category_name = label['value'][key][0] + break + + if category_name is None: + logger.warning('Unknown label type or labels are empty') + continue + + if not height or not width: + if 'original_width' not in label or \ + 'original_height' not in label: + logger.debug( + f'original_width or original_height not found' + f'in {image_name}') + continue + + # get height and width info from annotations + width, height = label['original_width'], label[ + 'original_height'] + images = add_image(images, width, height, image_id, + image_name) + + category_id = self.category_name_to_id[category_name] + + annotation_id = len(annotations) + + if 'rectanglelabels' == label['type'] or 'labels' == label[ + 'type']: + + x = label['value']['x'] + y = label['value']['y'] + w = label['value']['width'] + h = label['value']['height'] + + x = x * label['original_width'] / 100 + y = y * label['original_height'] / 100 + w = w * label['original_width'] / 100 + h = h * label['original_height'] / 100 + + # rect annotation should be later than keypoints + annotations[-1]['bbox'] = [x, y, w, h] + annotations[-1]['area'] = w * h + annotations[-1]['num_keypoints'] = kp_num + + elif 'polygonlabels' == label['type']: + points_abs = [(x / 100 * width, y / 100 * height) + for x, y in label['value']['points']] + x, y = zip(*points_abs) + + x1, y1, x2, y2 = min(x), min(y), max(x), max(y) + + # calculate bbox and area from polygon's points + # which may be different with rect annotation + bbox = [x1, y1, x2 - x1, y2 - y1] + area = float(0.5 * np.abs( + np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))) + + # polygon label should be later than keypoints + annotations[-1]['segmentation'] = [[ + coord for point in points_abs for coord in point + ]] + annotations[-1]['bbox'] = bbox + annotations[-1]['area'] = area + annotations[-1]['num_keypoints'] = kp_num + + elif 'keypointlabels' == label['type']: + x = label['value']['x'] * label['original_width'] / 100 + y = label['value']['y'] * label['original_height'] / 100 + + # there is no method to annotate visible in Label Studio + # so the keypoints' visible code will be 2 except (0,0) + if x == y == 0: + current_kp = [x, y, 0] + kp_num_change = 0 + else: + current_kp = [x, y, 2] + kp_num_change = 1 + + # create new annotation in coco + # when the keypoint is the first point of an instance + if i == 0 or item['annotations'][0]['result'][ + i - 1]['type'] != 'keypointlabels': + annotations.append({ + 'id': annotation_id, + 'image_id': image_id, + 'category_id': category_id, + 'keypoints': current_kp, + 'ignore': 0, + 'iscrowd': 0, + }) + kp_num = kp_num_change + else: + annotations[-1]['keypoints'].extend(current_kp) + kp_num += kp_num_change + + with io.open(output_json, mode='w', encoding='utf8') as fout: + json.dump( + { + 'images': images, + 'categories': self.categories, + 'annotations': annotations, + 'info': { + 'year': datetime.now().year, + 'version': '1.0', + 'description': '', + 'contributor': 'Label Studio', + 'url': '', + 'date_created': str(datetime.now()), + }, + }, + fout, + indent=2, + ) + + +def main(): + args = parse_args() + config = args.config + input_json = args.input + output_json = args.output + converter = LSConverter(config) + converter.convert_to_coco(input_json, output_json) + + +if __name__ == '__main__': + main() diff --git a/tools/dataset_converters/lapa2coco.py b/tools/dataset_converters/lapa2coco.py index 1e679b6365..cc4cdc79ea 100644 --- a/tools/dataset_converters/lapa2coco.py +++ b/tools/dataset_converters/lapa2coco.py @@ -1,111 +1,111 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import json -import os -import os.path as osp -import time - -import cv2 -import mmengine -import numpy as np - - -def default_dump(obj): - """Convert numpy classes to JSON serializable objects.""" - if isinstance(obj, (np.integer, np.floating, np.bool_)): - return obj.item() - elif isinstance(obj, np.ndarray): - return obj.tolist() - else: - return obj - - -def convert_labpa_to_coco(ann_dir, out_file): - annotations = [] - images = [] - cnt = 0 - - if 'trainval' in ann_dir: - ann_dir_list = ['train', 'val'] - else: - ann_dir_list = [ann_dir] - - for tv in ann_dir_list: - ann_dir = 'data/LaPa/' + tv - landmark_dir = osp.join(ann_dir, 'landmarks') - ann_list = os.listdir(landmark_dir) - - img_dir = osp.join(ann_dir, 'images') - - for idx, ann_file in enumerate(mmengine.track_iter_progress(ann_list)): - cnt += 1 - ann_path = osp.join(landmark_dir, ann_file) - file_name = ann_file[:-4] + '.jpg' - img_path = osp.join(img_dir, file_name) - data_info = open(ann_path).readlines() - - img = cv2.imread(img_path) - - keypoints = [] - for line in data_info[1:]: - x, y = line.strip().split(' ') - x, y = float(x), float(y) - keypoints.append([x, y, 2]) - keypoints = np.array(keypoints) - - x1, y1, _ = np.amin(keypoints, axis=0) - x2, y2, _ = np.amax(keypoints, axis=0) - w, h = x2 - x1, y2 - y1 - bbox = [x1, y1, w, h] - - image = {} - image['id'] = cnt - image['file_name'] = f'{tv}/images/{file_name}' - image['height'] = img.shape[0] - image['width'] = img.shape[1] - images.append(image) - - ann = {} - ann['keypoints'] = keypoints.reshape(-1).tolist() - ann['image_id'] = cnt - ann['id'] = cnt - ann['num_keypoints'] = len(keypoints) - ann['bbox'] = bbox - ann['iscrowd'] = 0 - ann['area'] = int(ann['bbox'][2] * ann['bbox'][3]) - ann['category_id'] = 1 - - annotations.append(ann) - - cocotype = {} - - cocotype['info'] = {} - cocotype['info']['description'] = 'LaPa Generated by MMPose Team' - cocotype['info']['version'] = 1.0 - cocotype['info']['year'] = time.strftime('%Y', time.localtime()) - cocotype['info']['date_created'] = time.strftime('%Y/%m/%d', - time.localtime()) - - cocotype['images'] = images - cocotype['annotations'] = annotations - cocotype['categories'] = [{ - 'supercategory': 'person', - 'id': 1, - 'name': 'face', - 'keypoints': [], - 'skeleton': [] - }] - - json.dump( - cocotype, - open(out_file, 'w'), - ensure_ascii=False, - default=default_dump) - print(f'done {out_file}') - - -if __name__ == '__main__': - if not osp.exists('data/LaPa/annotations'): - os.makedirs('data/LaPa/annotations') - for tv in ['val', 'test', 'train', 'trainval']: - print(f'processing {tv}') - convert_labpa_to_coco(tv, f'data/LaPa/annotations/lapa_{tv}.json') +# Copyright (c) OpenMMLab. All rights reserved. +import json +import os +import os.path as osp +import time + +import cv2 +import mmengine +import numpy as np + + +def default_dump(obj): + """Convert numpy classes to JSON serializable objects.""" + if isinstance(obj, (np.integer, np.floating, np.bool_)): + return obj.item() + elif isinstance(obj, np.ndarray): + return obj.tolist() + else: + return obj + + +def convert_labpa_to_coco(ann_dir, out_file): + annotations = [] + images = [] + cnt = 0 + + if 'trainval' in ann_dir: + ann_dir_list = ['train', 'val'] + else: + ann_dir_list = [ann_dir] + + for tv in ann_dir_list: + ann_dir = 'data/LaPa/' + tv + landmark_dir = osp.join(ann_dir, 'landmarks') + ann_list = os.listdir(landmark_dir) + + img_dir = osp.join(ann_dir, 'images') + + for idx, ann_file in enumerate(mmengine.track_iter_progress(ann_list)): + cnt += 1 + ann_path = osp.join(landmark_dir, ann_file) + file_name = ann_file[:-4] + '.jpg' + img_path = osp.join(img_dir, file_name) + data_info = open(ann_path).readlines() + + img = cv2.imread(img_path) + + keypoints = [] + for line in data_info[1:]: + x, y = line.strip().split(' ') + x, y = float(x), float(y) + keypoints.append([x, y, 2]) + keypoints = np.array(keypoints) + + x1, y1, _ = np.amin(keypoints, axis=0) + x2, y2, _ = np.amax(keypoints, axis=0) + w, h = x2 - x1, y2 - y1 + bbox = [x1, y1, w, h] + + image = {} + image['id'] = cnt + image['file_name'] = f'{tv}/images/{file_name}' + image['height'] = img.shape[0] + image['width'] = img.shape[1] + images.append(image) + + ann = {} + ann['keypoints'] = keypoints.reshape(-1).tolist() + ann['image_id'] = cnt + ann['id'] = cnt + ann['num_keypoints'] = len(keypoints) + ann['bbox'] = bbox + ann['iscrowd'] = 0 + ann['area'] = int(ann['bbox'][2] * ann['bbox'][3]) + ann['category_id'] = 1 + + annotations.append(ann) + + cocotype = {} + + cocotype['info'] = {} + cocotype['info']['description'] = 'LaPa Generated by MMPose Team' + cocotype['info']['version'] = 1.0 + cocotype['info']['year'] = time.strftime('%Y', time.localtime()) + cocotype['info']['date_created'] = time.strftime('%Y/%m/%d', + time.localtime()) + + cocotype['images'] = images + cocotype['annotations'] = annotations + cocotype['categories'] = [{ + 'supercategory': 'person', + 'id': 1, + 'name': 'face', + 'keypoints': [], + 'skeleton': [] + }] + + json.dump( + cocotype, + open(out_file, 'w'), + ensure_ascii=False, + default=default_dump) + print(f'done {out_file}') + + +if __name__ == '__main__': + if not osp.exists('data/LaPa/annotations'): + os.makedirs('data/LaPa/annotations') + for tv in ['val', 'test', 'train', 'trainval']: + print(f'processing {tv}') + convert_labpa_to_coco(tv, f'data/LaPa/annotations/lapa_{tv}.json') diff --git a/tools/dataset_converters/mat2json.py b/tools/dataset_converters/mat2json.py index caf7453e70..0a821e37d4 100644 --- a/tools/dataset_converters/mat2json.py +++ b/tools/dataset_converters/mat2json.py @@ -1,60 +1,60 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import argparse -import json -import time - -from scipy.io import loadmat - - -def parse_args(): - parser = argparse.ArgumentParser( - description='Converting the predicted .mat file to .json file.') - parser.add_argument('pred_mat_file', help='input prediction mat file.') - parser.add_argument( - 'gt_json_file', - help='input ground-truth json file to get the image name. ' - 'Default: "data/mpii/mpii_val.json" ') - parser.add_argument('output_json_file', help='output converted json file.') - args = parser.parse_args() - return args - - -def save_json(list_file, path): - with open(path, 'w') as f: - json.dump(list_file, f, indent=4) - return 0 - - -def convert_mat(pred_mat_file, gt_json_file, output_json_file): - res = loadmat(pred_mat_file) - preds = res['preds'] - N = preds.shape[0] - - with open(gt_json_file) as anno_file: - anno = json.load(anno_file) - - assert len(anno) == N - - instance = {} - - for pred, ann in zip(preds, anno): - ann.pop('joints_vis') - ann['joints'] = pred.tolist() - - instance['annotations'] = anno - instance['info'] = {} - instance['info']['description'] = 'Converted MPII prediction.' - instance['info']['year'] = time.strftime('%Y', time.localtime()) - instance['info']['date_created'] = time.strftime('%Y/%m/%d', - time.localtime()) - - save_json(instance, output_json_file) - - -def main(): - args = parse_args() - convert_mat(args.pred_mat_file, args.gt_json_file, args.output_json_file) - - -if __name__ == '__main__': - main() +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import json +import time + +from scipy.io import loadmat + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Converting the predicted .mat file to .json file.') + parser.add_argument('pred_mat_file', help='input prediction mat file.') + parser.add_argument( + 'gt_json_file', + help='input ground-truth json file to get the image name. ' + 'Default: "data/mpii/mpii_val.json" ') + parser.add_argument('output_json_file', help='output converted json file.') + args = parser.parse_args() + return args + + +def save_json(list_file, path): + with open(path, 'w') as f: + json.dump(list_file, f, indent=4) + return 0 + + +def convert_mat(pred_mat_file, gt_json_file, output_json_file): + res = loadmat(pred_mat_file) + preds = res['preds'] + N = preds.shape[0] + + with open(gt_json_file) as anno_file: + anno = json.load(anno_file) + + assert len(anno) == N + + instance = {} + + for pred, ann in zip(preds, anno): + ann.pop('joints_vis') + ann['joints'] = pred.tolist() + + instance['annotations'] = anno + instance['info'] = {} + instance['info']['description'] = 'Converted MPII prediction.' + instance['info']['year'] = time.strftime('%Y', time.localtime()) + instance['info']['date_created'] = time.strftime('%Y/%m/%d', + time.localtime()) + + save_json(instance, output_json_file) + + +def main(): + args = parse_args() + convert_mat(args.pred_mat_file, args.gt_json_file, args.output_json_file) + + +if __name__ == '__main__': + main() diff --git a/tools/dataset_converters/parse_animalpose_dataset.py b/tools/dataset_converters/parse_animalpose_dataset.py index db37860164..0d322a0286 100644 --- a/tools/dataset_converters/parse_animalpose_dataset.py +++ b/tools/dataset_converters/parse_animalpose_dataset.py @@ -1,436 +1,436 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import json -import os -import re -import time -import warnings - -import cv2 -import numpy as np -import xmltodict -from xtcocotools.coco import COCO - -np.random.seed(0) - - -def list_all_files(root_dir, ext='.xml'): - """List all files in the root directory and all its sub directories. - - :param root_dir: root directory - :param ext: filename extension - :return: list of files - """ - files = [] - file_list = os.listdir(root_dir) - for i in range(0, len(file_list)): - path = os.path.join(root_dir, file_list[i]) - if os.path.isdir(path): - files.extend(list_all_files(path)) - if os.path.isfile(path): - if path.lower().endswith(ext): - files.append(path) - return files - - -def get_anno_info(): - keypoints_info = [ - 'L_Eye', - 'R_Eye', - 'L_EarBase', - 'R_EarBase', - 'Nose', - 'Throat', - 'TailBase', - 'Withers', - 'L_F_Elbow', - 'R_F_Elbow', - 'L_B_Elbow', - 'R_B_Elbow', - 'L_F_Knee', - 'R_F_Knee', - 'L_B_Knee', - 'R_B_Knee', - 'L_F_Paw', - 'R_F_Paw', - 'L_B_Paw', - 'R_B_Paw', - ] - skeleton_info = [[1, 2], [1, 3], [2, 4], [1, 5], [2, 5], [5, 6], [6, 8], - [7, 8], [6, 9], [9, 13], [13, 17], [6, 10], [10, 14], - [14, 18], [7, 11], [11, 15], [15, 19], [7, 12], [12, 16], - [16, 20]] - category_info = [{ - 'supercategory': 'animal', - 'id': 1, - 'name': 'animal', - 'keypoints': keypoints_info, - 'skeleton': skeleton_info - }] - - return keypoints_info, skeleton_info, category_info - - -def xml2coco_trainval(file_list, img_root, save_path, start_ann_id=0): - """Save annotations in coco-format. - - :param file_list: list of data annotation files. - :param img_root: the root dir to load images. - :param save_path: the path to save transformed annotation file. - :param start_ann_id: the starting point to count the annotation id. - :param val_num: the number of annotated objects for validation. - """ - images = [] - annotations = [] - img_ids = [] - ann_ids = [] - - ann_id = start_ann_id - - name2id = { - 'L_Eye': 0, - 'R_Eye': 1, - 'L_EarBase': 2, - 'R_EarBase': 3, - 'Nose': 4, - 'Throat': 5, - 'TailBase': 6, - 'Withers': 7, - 'L_F_Elbow': 8, - 'R_F_Elbow': 9, - 'L_B_Elbow': 10, - 'R_B_Elbow': 11, - 'L_F_Knee': 12, - 'R_F_Knee': 13, - 'L_B_Knee': 14, - 'R_B_Knee': 15, - 'L_F_Paw': 16, - 'R_F_Paw': 17, - 'L_B_Paw': 18, - 'R_B_Paw': 19 - } - for file in file_list: - data_anno = xmltodict.parse(open(file).read())['annotation'] - - img_id = int(data_anno['image'].split('_')[0] + - data_anno['image'].split('_')[1]) - - if img_id not in img_ids: - image_name = 'VOC2012/JPEGImages/' + data_anno['image'] + '.jpg' - img = cv2.imread(os.path.join(img_root, image_name)) - - image = {} - image['id'] = img_id - image['file_name'] = image_name - image['height'] = img.shape[0] - image['width'] = img.shape[1] - - images.append(image) - img_ids.append(img_id) - else: - pass - - keypoint_anno = data_anno['keypoints']['keypoint'] - assert len(keypoint_anno) == 20 - - keypoints = np.zeros([20, 3], dtype=np.float32) - - for kpt_anno in keypoint_anno: - keypoint_name = kpt_anno['@name'] - keypoint_id = name2id[keypoint_name] - - visibility = int(kpt_anno['@visible']) - - if visibility == 0: - continue - else: - keypoints[keypoint_id, 0] = float(kpt_anno['@x']) - keypoints[keypoint_id, 1] = float(kpt_anno['@y']) - keypoints[keypoint_id, 2] = 2 - - anno = {} - anno['keypoints'] = keypoints.reshape(-1).tolist() - anno['image_id'] = img_id - anno['id'] = ann_id - anno['num_keypoints'] = int(sum(keypoints[:, 2] > 0)) - - visible_bounds = data_anno['visible_bounds'] - anno['bbox'] = [ - float(visible_bounds['@xmin']), - float(visible_bounds['@ymin']), - float(visible_bounds['@width']), - float(visible_bounds['@height']) - ] - anno['iscrowd'] = 0 - anno['area'] = float(anno['bbox'][2] * anno['bbox'][3]) - anno['category_id'] = 1 - - annotations.append(anno) - ann_ids.append(ann_id) - ann_id += 1 - - cocotype = {} - - cocotype['info'] = {} - cocotype['info'][ - 'description'] = 'AnimalPose dataset Generated by MMPose Team' - cocotype['info']['version'] = '1.0' - cocotype['info']['year'] = time.strftime('%Y', time.localtime()) - cocotype['info']['date_created'] = time.strftime('%Y/%m/%d', - time.localtime()) - - cocotype['images'] = images - cocotype['annotations'] = annotations - - keypoints_info, skeleton_info, category_info = get_anno_info() - - cocotype['categories'] = category_info - - os.makedirs(os.path.dirname(save_path), exist_ok=True) - json.dump(cocotype, open(save_path, 'w'), indent=4) - print('number of images:', len(img_ids)) - print('number of annotations:', len(ann_ids)) - print(f'done {save_path}') - - -def xml2coco_test(file_list, img_root, save_path, start_ann_id=0): - """Save annotations in coco-format. - - :param file_list: list of data annotation files. - :param img_root: the root dir to load images. - :param save_path: the path to save transformed annotation file. - :param start_ann_id: the starting point to count the annotation id. - """ - images = [] - annotations = [] - img_ids = [] - ann_ids = [] - - ann_id = start_ann_id - - name2id = { - 'L_eye': 0, - 'R_eye': 1, - 'L_ear': 2, - 'R_ear': 3, - 'Nose': 4, - 'Throat': 5, - 'Tail': 6, - 'withers': 7, - 'L_F_elbow': 8, - 'R_F_elbow': 9, - 'L_B_elbow': 10, - 'R_B_elbow': 11, - 'L_F_knee': 12, - 'R_F_knee': 13, - 'L_B_knee': 14, - 'R_B_knee': 15, - 'L_F_paw': 16, - 'R_F_paw': 17, - 'L_B_paw': 18, - 'R_B_paw': 19 - } - - cat2id = {'cat': 1, 'cow': 2, 'dog': 3, 'horse': 4, 'sheep': 5} - - for file in file_list: - data_anno = xmltodict.parse(open(file).read())['annotation'] - - category_id = cat2id[data_anno['category']] - - img_id = category_id * 1000 + int( - re.findall(r'\d+', data_anno['image'])[0]) - - assert img_id not in img_ids - - # prepare images - image_name = os.path.join('animalpose_image_part2', - data_anno['category'], data_anno['image']) - img = cv2.imread(os.path.join(img_root, image_name)) - - image = {} - image['id'] = img_id - image['file_name'] = image_name - image['height'] = img.shape[0] - image['width'] = img.shape[1] - - images.append(image) - img_ids.append(img_id) - - # prepare annotations - keypoint_anno = data_anno['keypoints']['keypoint'] - keypoints = np.zeros([20, 3], dtype=np.float32) - - for kpt_anno in keypoint_anno: - keypoint_name = kpt_anno['@name'] - keypoint_id = name2id[keypoint_name] - - visibility = int(kpt_anno['@visible']) - - if visibility == 0: - continue - else: - keypoints[keypoint_id, 0] = float(kpt_anno['@x']) - keypoints[keypoint_id, 1] = float(kpt_anno['@y']) - keypoints[keypoint_id, 2] = 2 - - anno = {} - anno['keypoints'] = keypoints.reshape(-1).tolist() - anno['image_id'] = img_id - anno['id'] = ann_id - anno['num_keypoints'] = int(sum(keypoints[:, 2] > 0)) - - visible_bounds = data_anno['visible_bounds'] - anno['bbox'] = [ - float(visible_bounds['@xmin']), - float(visible_bounds['@xmax'] - ), # typo in original xml: should be 'ymin' - float(visible_bounds['@width']), - float(visible_bounds['@height']) - ] - anno['iscrowd'] = 0 - anno['area'] = float(anno['bbox'][2] * anno['bbox'][3]) - anno['category_id'] = 1 - - annotations.append(anno) - ann_ids.append(ann_id) - ann_id += 1 - - cocotype = {} - - cocotype['info'] = {} - cocotype['info'][ - 'description'] = 'AnimalPose dataset Generated by MMPose Team' - cocotype['info']['version'] = '1.0' - cocotype['info']['year'] = time.strftime('%Y', time.localtime()) - cocotype['info']['date_created'] = time.strftime('%Y/%m/%d', - time.localtime()) - - cocotype['images'] = images - cocotype['annotations'] = annotations - - keypoints_info, skeleton_info, category_info = get_anno_info() - - cocotype['categories'] = category_info - - os.makedirs(os.path.dirname(save_path), exist_ok=True) - json.dump(cocotype, open(save_path, 'w'), indent=4) - print('=========================================================') - print('number of images:', len(img_ids)) - print('number of annotations:', len(ann_ids)) - print(f'done {save_path}') - - -def split_train_val(work_dir, trainval_file, train_file, val_file, - val_ann_num): - """Split train-val json file into training and validation files. - - :param work_dir: path to load train-val json file, and save split files. - :param trainval_file: The input json file combining both train and val. - :param trainval_file: The output json file for training. - :param trainval_file: The output json file for validation. - :param val_ann_num: the number of validation annotations. - """ - - coco = COCO(os.path.join(work_dir, trainval_file)) - - img_list = list(coco.imgs.keys()) - np.random.shuffle(img_list) - - count = 0 - - images_train = [] - images_val = [] - annotations_train = [] - annotations_val = [] - - for img_id in img_list: - ann_ids = coco.getAnnIds(img_id) - - if count + len(ann_ids) <= val_ann_num: - # for validation - count += len(ann_ids) - images_val.append(coco.imgs[img_id]) - for ann_id in ann_ids: - annotations_val.append(coco.anns[ann_id]) - - else: - images_train.append(coco.imgs[img_id]) - for ann_id in ann_ids: - annotations_train.append(coco.anns[ann_id]) - - if count == val_ann_num: - print(f'We have found {count} annotations for validation.') - else: - warnings.warn( - f'We only found {count} annotations, instead of {val_ann_num}.') - - cocotype_train = {} - cocotype_val = {} - - keypoints_info, skeleton_info, category_info = get_anno_info() - - cocotype_train['info'] = {} - cocotype_train['info'][ - 'description'] = 'AnimalPose dataset Generated by MMPose Team' - cocotype_train['info']['version'] = '1.0' - cocotype_train['info']['year'] = time.strftime('%Y', time.localtime()) - cocotype_train['info']['date_created'] = time.strftime( - '%Y/%m/%d', time.localtime()) - cocotype_train['images'] = images_train - cocotype_train['annotations'] = annotations_train - cocotype_train['categories'] = category_info - - json.dump( - cocotype_train, - open(os.path.join(work_dir, train_file), 'w'), - indent=4) - print('=========================================================') - print('number of images:', len(images_train)) - print('number of annotations:', len(annotations_train)) - print(f'done {train_file}') - - cocotype_val['info'] = {} - cocotype_val['info'][ - 'description'] = 'AnimalPose dataset Generated by MMPose Team' - cocotype_val['info']['version'] = '1.0' - cocotype_val['info']['year'] = time.strftime('%Y', time.localtime()) - cocotype_val['info']['date_created'] = time.strftime( - '%Y/%m/%d', time.localtime()) - cocotype_val['images'] = images_val - cocotype_val['annotations'] = annotations_val - cocotype_val['categories'] = category_info - - json.dump( - cocotype_val, open(os.path.join(work_dir, val_file), 'w'), indent=4) - print('=========================================================') - print('number of images:', len(images_val)) - print('number of annotations:', len(annotations_val)) - print(f'done {val_file}') - - -dataset_dir = 'data/animalpose/' - -# We choose the images from PascalVOC for train + val -# In total, train+val: 3608 images, 5117 annotations -xml2coco_trainval( - list_all_files(os.path.join(dataset_dir, 'PASCAL2011_animal_annotation')), - dataset_dir, - os.path.join(dataset_dir, 'annotations', 'animalpose_trainval.json'), - start_ann_id=1000000) - -# train: 2798 images, 4000 annotations -# val: 810 images, 1117 annotations -split_train_val( - os.path.join(dataset_dir, 'annotations'), - 'animalpose_trainval.json', - 'animalpose_train.json', - 'animalpose_val.json', - val_ann_num=1117) - -# We choose the remaining 1000 images for test -# 1000 images, 1000 annotations -xml2coco_test( - list_all_files(os.path.join(dataset_dir, 'animalpose_anno2')), - dataset_dir, - os.path.join(dataset_dir, 'annotations', 'animalpose_test.json'), - start_ann_id=0) +# Copyright (c) OpenMMLab. All rights reserved. +import json +import os +import re +import time +import warnings + +import cv2 +import numpy as np +import xmltodict +from xtcocotools.coco import COCO + +np.random.seed(0) + + +def list_all_files(root_dir, ext='.xml'): + """List all files in the root directory and all its sub directories. + + :param root_dir: root directory + :param ext: filename extension + :return: list of files + """ + files = [] + file_list = os.listdir(root_dir) + for i in range(0, len(file_list)): + path = os.path.join(root_dir, file_list[i]) + if os.path.isdir(path): + files.extend(list_all_files(path)) + if os.path.isfile(path): + if path.lower().endswith(ext): + files.append(path) + return files + + +def get_anno_info(): + keypoints_info = [ + 'L_Eye', + 'R_Eye', + 'L_EarBase', + 'R_EarBase', + 'Nose', + 'Throat', + 'TailBase', + 'Withers', + 'L_F_Elbow', + 'R_F_Elbow', + 'L_B_Elbow', + 'R_B_Elbow', + 'L_F_Knee', + 'R_F_Knee', + 'L_B_Knee', + 'R_B_Knee', + 'L_F_Paw', + 'R_F_Paw', + 'L_B_Paw', + 'R_B_Paw', + ] + skeleton_info = [[1, 2], [1, 3], [2, 4], [1, 5], [2, 5], [5, 6], [6, 8], + [7, 8], [6, 9], [9, 13], [13, 17], [6, 10], [10, 14], + [14, 18], [7, 11], [11, 15], [15, 19], [7, 12], [12, 16], + [16, 20]] + category_info = [{ + 'supercategory': 'animal', + 'id': 1, + 'name': 'animal', + 'keypoints': keypoints_info, + 'skeleton': skeleton_info + }] + + return keypoints_info, skeleton_info, category_info + + +def xml2coco_trainval(file_list, img_root, save_path, start_ann_id=0): + """Save annotations in coco-format. + + :param file_list: list of data annotation files. + :param img_root: the root dir to load images. + :param save_path: the path to save transformed annotation file. + :param start_ann_id: the starting point to count the annotation id. + :param val_num: the number of annotated objects for validation. + """ + images = [] + annotations = [] + img_ids = [] + ann_ids = [] + + ann_id = start_ann_id + + name2id = { + 'L_Eye': 0, + 'R_Eye': 1, + 'L_EarBase': 2, + 'R_EarBase': 3, + 'Nose': 4, + 'Throat': 5, + 'TailBase': 6, + 'Withers': 7, + 'L_F_Elbow': 8, + 'R_F_Elbow': 9, + 'L_B_Elbow': 10, + 'R_B_Elbow': 11, + 'L_F_Knee': 12, + 'R_F_Knee': 13, + 'L_B_Knee': 14, + 'R_B_Knee': 15, + 'L_F_Paw': 16, + 'R_F_Paw': 17, + 'L_B_Paw': 18, + 'R_B_Paw': 19 + } + for file in file_list: + data_anno = xmltodict.parse(open(file).read())['annotation'] + + img_id = int(data_anno['image'].split('_')[0] + + data_anno['image'].split('_')[1]) + + if img_id not in img_ids: + image_name = 'VOC2012/JPEGImages/' + data_anno['image'] + '.jpg' + img = cv2.imread(os.path.join(img_root, image_name)) + + image = {} + image['id'] = img_id + image['file_name'] = image_name + image['height'] = img.shape[0] + image['width'] = img.shape[1] + + images.append(image) + img_ids.append(img_id) + else: + pass + + keypoint_anno = data_anno['keypoints']['keypoint'] + assert len(keypoint_anno) == 20 + + keypoints = np.zeros([20, 3], dtype=np.float32) + + for kpt_anno in keypoint_anno: + keypoint_name = kpt_anno['@name'] + keypoint_id = name2id[keypoint_name] + + visibility = int(kpt_anno['@visible']) + + if visibility == 0: + continue + else: + keypoints[keypoint_id, 0] = float(kpt_anno['@x']) + keypoints[keypoint_id, 1] = float(kpt_anno['@y']) + keypoints[keypoint_id, 2] = 2 + + anno = {} + anno['keypoints'] = keypoints.reshape(-1).tolist() + anno['image_id'] = img_id + anno['id'] = ann_id + anno['num_keypoints'] = int(sum(keypoints[:, 2] > 0)) + + visible_bounds = data_anno['visible_bounds'] + anno['bbox'] = [ + float(visible_bounds['@xmin']), + float(visible_bounds['@ymin']), + float(visible_bounds['@width']), + float(visible_bounds['@height']) + ] + anno['iscrowd'] = 0 + anno['area'] = float(anno['bbox'][2] * anno['bbox'][3]) + anno['category_id'] = 1 + + annotations.append(anno) + ann_ids.append(ann_id) + ann_id += 1 + + cocotype = {} + + cocotype['info'] = {} + cocotype['info'][ + 'description'] = 'AnimalPose dataset Generated by MMPose Team' + cocotype['info']['version'] = '1.0' + cocotype['info']['year'] = time.strftime('%Y', time.localtime()) + cocotype['info']['date_created'] = time.strftime('%Y/%m/%d', + time.localtime()) + + cocotype['images'] = images + cocotype['annotations'] = annotations + + keypoints_info, skeleton_info, category_info = get_anno_info() + + cocotype['categories'] = category_info + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + json.dump(cocotype, open(save_path, 'w'), indent=4) + print('number of images:', len(img_ids)) + print('number of annotations:', len(ann_ids)) + print(f'done {save_path}') + + +def xml2coco_test(file_list, img_root, save_path, start_ann_id=0): + """Save annotations in coco-format. + + :param file_list: list of data annotation files. + :param img_root: the root dir to load images. + :param save_path: the path to save transformed annotation file. + :param start_ann_id: the starting point to count the annotation id. + """ + images = [] + annotations = [] + img_ids = [] + ann_ids = [] + + ann_id = start_ann_id + + name2id = { + 'L_eye': 0, + 'R_eye': 1, + 'L_ear': 2, + 'R_ear': 3, + 'Nose': 4, + 'Throat': 5, + 'Tail': 6, + 'withers': 7, + 'L_F_elbow': 8, + 'R_F_elbow': 9, + 'L_B_elbow': 10, + 'R_B_elbow': 11, + 'L_F_knee': 12, + 'R_F_knee': 13, + 'L_B_knee': 14, + 'R_B_knee': 15, + 'L_F_paw': 16, + 'R_F_paw': 17, + 'L_B_paw': 18, + 'R_B_paw': 19 + } + + cat2id = {'cat': 1, 'cow': 2, 'dog': 3, 'horse': 4, 'sheep': 5} + + for file in file_list: + data_anno = xmltodict.parse(open(file).read())['annotation'] + + category_id = cat2id[data_anno['category']] + + img_id = category_id * 1000 + int( + re.findall(r'\d+', data_anno['image'])[0]) + + assert img_id not in img_ids + + # prepare images + image_name = os.path.join('animalpose_image_part2', + data_anno['category'], data_anno['image']) + img = cv2.imread(os.path.join(img_root, image_name)) + + image = {} + image['id'] = img_id + image['file_name'] = image_name + image['height'] = img.shape[0] + image['width'] = img.shape[1] + + images.append(image) + img_ids.append(img_id) + + # prepare annotations + keypoint_anno = data_anno['keypoints']['keypoint'] + keypoints = np.zeros([20, 3], dtype=np.float32) + + for kpt_anno in keypoint_anno: + keypoint_name = kpt_anno['@name'] + keypoint_id = name2id[keypoint_name] + + visibility = int(kpt_anno['@visible']) + + if visibility == 0: + continue + else: + keypoints[keypoint_id, 0] = float(kpt_anno['@x']) + keypoints[keypoint_id, 1] = float(kpt_anno['@y']) + keypoints[keypoint_id, 2] = 2 + + anno = {} + anno['keypoints'] = keypoints.reshape(-1).tolist() + anno['image_id'] = img_id + anno['id'] = ann_id + anno['num_keypoints'] = int(sum(keypoints[:, 2] > 0)) + + visible_bounds = data_anno['visible_bounds'] + anno['bbox'] = [ + float(visible_bounds['@xmin']), + float(visible_bounds['@xmax'] + ), # typo in original xml: should be 'ymin' + float(visible_bounds['@width']), + float(visible_bounds['@height']) + ] + anno['iscrowd'] = 0 + anno['area'] = float(anno['bbox'][2] * anno['bbox'][3]) + anno['category_id'] = 1 + + annotations.append(anno) + ann_ids.append(ann_id) + ann_id += 1 + + cocotype = {} + + cocotype['info'] = {} + cocotype['info'][ + 'description'] = 'AnimalPose dataset Generated by MMPose Team' + cocotype['info']['version'] = '1.0' + cocotype['info']['year'] = time.strftime('%Y', time.localtime()) + cocotype['info']['date_created'] = time.strftime('%Y/%m/%d', + time.localtime()) + + cocotype['images'] = images + cocotype['annotations'] = annotations + + keypoints_info, skeleton_info, category_info = get_anno_info() + + cocotype['categories'] = category_info + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + json.dump(cocotype, open(save_path, 'w'), indent=4) + print('=========================================================') + print('number of images:', len(img_ids)) + print('number of annotations:', len(ann_ids)) + print(f'done {save_path}') + + +def split_train_val(work_dir, trainval_file, train_file, val_file, + val_ann_num): + """Split train-val json file into training and validation files. + + :param work_dir: path to load train-val json file, and save split files. + :param trainval_file: The input json file combining both train and val. + :param trainval_file: The output json file for training. + :param trainval_file: The output json file for validation. + :param val_ann_num: the number of validation annotations. + """ + + coco = COCO(os.path.join(work_dir, trainval_file)) + + img_list = list(coco.imgs.keys()) + np.random.shuffle(img_list) + + count = 0 + + images_train = [] + images_val = [] + annotations_train = [] + annotations_val = [] + + for img_id in img_list: + ann_ids = coco.getAnnIds(img_id) + + if count + len(ann_ids) <= val_ann_num: + # for validation + count += len(ann_ids) + images_val.append(coco.imgs[img_id]) + for ann_id in ann_ids: + annotations_val.append(coco.anns[ann_id]) + + else: + images_train.append(coco.imgs[img_id]) + for ann_id in ann_ids: + annotations_train.append(coco.anns[ann_id]) + + if count == val_ann_num: + print(f'We have found {count} annotations for validation.') + else: + warnings.warn( + f'We only found {count} annotations, instead of {val_ann_num}.') + + cocotype_train = {} + cocotype_val = {} + + keypoints_info, skeleton_info, category_info = get_anno_info() + + cocotype_train['info'] = {} + cocotype_train['info'][ + 'description'] = 'AnimalPose dataset Generated by MMPose Team' + cocotype_train['info']['version'] = '1.0' + cocotype_train['info']['year'] = time.strftime('%Y', time.localtime()) + cocotype_train['info']['date_created'] = time.strftime( + '%Y/%m/%d', time.localtime()) + cocotype_train['images'] = images_train + cocotype_train['annotations'] = annotations_train + cocotype_train['categories'] = category_info + + json.dump( + cocotype_train, + open(os.path.join(work_dir, train_file), 'w'), + indent=4) + print('=========================================================') + print('number of images:', len(images_train)) + print('number of annotations:', len(annotations_train)) + print(f'done {train_file}') + + cocotype_val['info'] = {} + cocotype_val['info'][ + 'description'] = 'AnimalPose dataset Generated by MMPose Team' + cocotype_val['info']['version'] = '1.0' + cocotype_val['info']['year'] = time.strftime('%Y', time.localtime()) + cocotype_val['info']['date_created'] = time.strftime( + '%Y/%m/%d', time.localtime()) + cocotype_val['images'] = images_val + cocotype_val['annotations'] = annotations_val + cocotype_val['categories'] = category_info + + json.dump( + cocotype_val, open(os.path.join(work_dir, val_file), 'w'), indent=4) + print('=========================================================') + print('number of images:', len(images_val)) + print('number of annotations:', len(annotations_val)) + print(f'done {val_file}') + + +dataset_dir = 'data/animalpose/' + +# We choose the images from PascalVOC for train + val +# In total, train+val: 3608 images, 5117 annotations +xml2coco_trainval( + list_all_files(os.path.join(dataset_dir, 'PASCAL2011_animal_annotation')), + dataset_dir, + os.path.join(dataset_dir, 'annotations', 'animalpose_trainval.json'), + start_ann_id=1000000) + +# train: 2798 images, 4000 annotations +# val: 810 images, 1117 annotations +split_train_val( + os.path.join(dataset_dir, 'annotations'), + 'animalpose_trainval.json', + 'animalpose_train.json', + 'animalpose_val.json', + val_ann_num=1117) + +# We choose the remaining 1000 images for test +# 1000 images, 1000 annotations +xml2coco_test( + list_all_files(os.path.join(dataset_dir, 'animalpose_anno2')), + dataset_dir, + os.path.join(dataset_dir, 'annotations', 'animalpose_test.json'), + start_ann_id=0) diff --git a/tools/dataset_converters/parse_cofw_dataset.py b/tools/dataset_converters/parse_cofw_dataset.py index 46b6affcb6..b9090b04dd 100644 --- a/tools/dataset_converters/parse_cofw_dataset.py +++ b/tools/dataset_converters/parse_cofw_dataset.py @@ -1,97 +1,97 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import json -import os -import time - -import cv2 -import h5py -import numpy as np - -mat_files = ['COFW_train_color.mat', 'COFW_test_color.mat'] -dataset_dir = 'data/cofw/' - -image_root = os.path.join(dataset_dir, 'images/') -annotation_root = os.path.join(dataset_dir, 'annotations/') - -os.makedirs(image_root, exist_ok=True) -os.makedirs(annotation_root, exist_ok=True) - -cnt = 0 -for mat_file in mat_files: - mat = h5py.File(os.path.join(dataset_dir, mat_file), 'r') - - if 'train' in mat_file: - imgs = mat['IsTr'] - pts = mat['phisTr'] - bboxes = mat['bboxesTr'] - is_train = True - json_file = 'cofw_train.json' - else: - imgs = mat['IsT'] - pts = mat['phisT'] - bboxes = mat['bboxesT'] - is_train = False - json_file = 'cofw_test.json' - - images = [] - annotations = [] - - num = pts.shape[1] - for idx in range(0, num): - cnt += 1 - img = np.array(mat[imgs[0, idx]]).transpose() - keypoints = pts[:, idx].reshape(3, -1).transpose() - # 2 for valid and 1 for occlusion - keypoints[:, 2] = 2 - keypoints[:, 2] - # matlab 1-index to python 0-index - keypoints[:, :2] -= 1 - bbox = bboxes[:, idx] - - # check nonnegativity - bbox[bbox < 0] = 0 - keypoints[keypoints < 0] = 0 - - image = {} - image['id'] = cnt - image['file_name'] = f'{str(cnt).zfill(6)}.jpg' - image['height'] = img.shape[0] - image['width'] = img.shape[1] - cv2.imwrite( - os.path.join(image_root, image['file_name']), - cv2.cvtColor(img, cv2.COLOR_RGB2BGR)) - images.append(image) - - anno = {} - anno['keypoints'] = keypoints.reshape(-1).tolist() - anno['image_id'] = cnt - anno['id'] = cnt - anno['num_keypoints'] = len(keypoints) # all keypoints are labelled - anno['bbox'] = bbox.tolist() - anno['iscrowd'] = 0 - anno['area'] = anno['bbox'][2] * anno['bbox'][3] - anno['category_id'] = 1 - - annotations.append(anno) - - cocotype = {} - - cocotype['info'] = {} - cocotype['info']['description'] = 'COFW Generated by MMPose Team' - cocotype['info']['version'] = '1.0' - cocotype['info']['year'] = time.strftime('%Y', time.localtime()) - cocotype['info']['date_created'] = time.strftime('%Y/%m/%d', - time.localtime()) - - cocotype['images'] = images - cocotype['annotations'] = annotations - cocotype['categories'] = [{ - 'supercategory': 'person', - 'id': 1, - 'name': 'face', - 'keypoints': [], - 'skeleton': [] - }] - - ann_path = os.path.join(annotation_root, json_file) - json.dump(cocotype, open(ann_path, 'w')) - print(f'done {ann_path}') +# Copyright (c) OpenMMLab. All rights reserved. +import json +import os +import time + +import cv2 +import h5py +import numpy as np + +mat_files = ['COFW_train_color.mat', 'COFW_test_color.mat'] +dataset_dir = 'data/cofw/' + +image_root = os.path.join(dataset_dir, 'images/') +annotation_root = os.path.join(dataset_dir, 'annotations/') + +os.makedirs(image_root, exist_ok=True) +os.makedirs(annotation_root, exist_ok=True) + +cnt = 0 +for mat_file in mat_files: + mat = h5py.File(os.path.join(dataset_dir, mat_file), 'r') + + if 'train' in mat_file: + imgs = mat['IsTr'] + pts = mat['phisTr'] + bboxes = mat['bboxesTr'] + is_train = True + json_file = 'cofw_train.json' + else: + imgs = mat['IsT'] + pts = mat['phisT'] + bboxes = mat['bboxesT'] + is_train = False + json_file = 'cofw_test.json' + + images = [] + annotations = [] + + num = pts.shape[1] + for idx in range(0, num): + cnt += 1 + img = np.array(mat[imgs[0, idx]]).transpose() + keypoints = pts[:, idx].reshape(3, -1).transpose() + # 2 for valid and 1 for occlusion + keypoints[:, 2] = 2 - keypoints[:, 2] + # matlab 1-index to python 0-index + keypoints[:, :2] -= 1 + bbox = bboxes[:, idx] + + # check nonnegativity + bbox[bbox < 0] = 0 + keypoints[keypoints < 0] = 0 + + image = {} + image['id'] = cnt + image['file_name'] = f'{str(cnt).zfill(6)}.jpg' + image['height'] = img.shape[0] + image['width'] = img.shape[1] + cv2.imwrite( + os.path.join(image_root, image['file_name']), + cv2.cvtColor(img, cv2.COLOR_RGB2BGR)) + images.append(image) + + anno = {} + anno['keypoints'] = keypoints.reshape(-1).tolist() + anno['image_id'] = cnt + anno['id'] = cnt + anno['num_keypoints'] = len(keypoints) # all keypoints are labelled + anno['bbox'] = bbox.tolist() + anno['iscrowd'] = 0 + anno['area'] = anno['bbox'][2] * anno['bbox'][3] + anno['category_id'] = 1 + + annotations.append(anno) + + cocotype = {} + + cocotype['info'] = {} + cocotype['info']['description'] = 'COFW Generated by MMPose Team' + cocotype['info']['version'] = '1.0' + cocotype['info']['year'] = time.strftime('%Y', time.localtime()) + cocotype['info']['date_created'] = time.strftime('%Y/%m/%d', + time.localtime()) + + cocotype['images'] = images + cocotype['annotations'] = annotations + cocotype['categories'] = [{ + 'supercategory': 'person', + 'id': 1, + 'name': 'face', + 'keypoints': [], + 'skeleton': [] + }] + + ann_path = os.path.join(annotation_root, json_file) + json.dump(cocotype, open(ann_path, 'w')) + print(f'done {ann_path}') diff --git a/tools/dataset_converters/parse_deepposekit_dataset.py b/tools/dataset_converters/parse_deepposekit_dataset.py index 5fe7ae398f..bc12752fb4 100644 --- a/tools/dataset_converters/parse_deepposekit_dataset.py +++ b/tools/dataset_converters/parse_deepposekit_dataset.py @@ -1,180 +1,180 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import json -import os -import time - -import cv2 -import h5py -import numpy as np - -np.random.seed(0) - - -def save_coco_anno(keypoints_all, - annotated_all, - imgs_all, - keypoints_info, - skeleton_info, - dataset, - img_root, - save_path, - start_img_id=0, - start_ann_id=0): - """Save annotations in coco-format. - - :param keypoints_all: keypoint annotations. - :param annotated_all: images annotated or not. - :param imgs_all: the array of images. - :param keypoints_info: information about keypoint name. - :param skeleton_info: information about skeleton connection. - :param dataset: information about dataset name. - :param img_root: the path to save images. - :param save_path: the path to save transformed annotation file. - :param start_img_id: the starting point to count the image id. - :param start_ann_id: the starting point to count the annotation id. - """ - images = [] - annotations = [] - - img_id = start_img_id - ann_id = start_ann_id - - num_annotations, keypoints_num, _ = keypoints_all.shape - - for i in range(num_annotations): - img = imgs_all[i] - keypoints = np.concatenate( - [keypoints_all[i], annotated_all[i][:, None] * 2], axis=1) - - min_x, min_y = np.min(keypoints[keypoints[:, 2] > 0, :2], axis=0) - max_x, max_y = np.max(keypoints[keypoints[:, 2] > 0, :2], axis=0) - - anno = {} - anno['keypoints'] = keypoints.reshape(-1).tolist() - anno['image_id'] = img_id - anno['id'] = ann_id - anno['num_keypoints'] = int(sum(keypoints[:, 2] > 0)) - anno['bbox'] = [ - float(min_x), - float(min_y), - float(max_x - min_x + 1), - float(max_y - min_y + 1) - ] - anno['iscrowd'] = 0 - anno['area'] = anno['bbox'][2] * anno['bbox'][3] - anno['category_id'] = 1 - - annotations.append(anno) - ann_id += 1 - - image = {} - image['id'] = img_id - image['file_name'] = f'{img_id}.jpg' - image['height'] = img.shape[0] - image['width'] = img.shape[1] - - images.append(image) - img_id += 1 - - cv2.imwrite(os.path.join(img_root, image['file_name']), img) - - skeleton = np.concatenate( - [np.arange(keypoints_num)[:, None], skeleton_info[:, 0][:, None]], - axis=1) + 1 - skeleton = skeleton[skeleton.min(axis=1) > 0] - - cocotype = {} - - cocotype['info'] = {} - cocotype['info'][ - 'description'] = 'DeepPoseKit-Data Generated by MMPose Team' - cocotype['info']['version'] = '1.0' - cocotype['info']['year'] = time.strftime('%Y', time.localtime()) - cocotype['info']['date_created'] = time.strftime('%Y/%m/%d', - time.localtime()) - - cocotype['images'] = images - cocotype['annotations'] = annotations - cocotype['categories'] = [{ - 'supercategory': 'animal', - 'id': 1, - 'name': dataset, - 'keypoints': keypoints_info, - 'skeleton': skeleton.tolist() - }] - - os.makedirs(os.path.dirname(save_path), exist_ok=True) - json.dump(cocotype, open(save_path, 'w'), indent=4) - print('number of images:', img_id) - print('number of annotations:', ann_id) - print(f'done {save_path}') - - -for dataset in ['fly', 'locust', 'zebra']: - keypoints_info = [] - if dataset == 'fly': - keypoints_info = [ - 'head', 'eyeL', 'eyeR', 'neck', 'thorax', 'abdomen', 'forelegR1', - 'forelegR2', 'forelegR3', 'forelegR4', 'midlegR1', 'midlegR2', - 'midlegR3', 'midlegR4', 'hindlegR1', 'hindlegR2', 'hindlegR3', - 'hindlegR4', 'forelegL1', 'forelegL2', 'forelegL3', 'forelegL4', - 'midlegL1', 'midlegL2', 'midlegL3', 'midlegL4', 'hindlegL1', - 'hindlegL2', 'hindlegL3', 'hindlegL4', 'wingL', 'wingR' - ] - elif dataset == 'locust': - keypoints_info = [ - 'head', 'neck', 'thorax', 'abdomen1', 'abdomen2', 'anttipL', - 'antbaseL', 'eyeL', 'forelegL1', 'forelegL2', 'forelegL3', - 'forelegL4', 'midlegL1', 'midlegL2', 'midlegL3', 'midlegL4', - 'hindlegL1', 'hindlegL2', 'hindlegL3', 'hindlegL4', 'anttipR', - 'antbaseR', 'eyeR', 'forelegR1', 'forelegR2', 'forelegR3', - 'forelegR4', 'midlegR1', 'midlegR2', 'midlegR3', 'midlegR4', - 'hindlegR1', 'hindlegR2', 'hindlegR3', 'hindlegR4' - ] - elif dataset == 'zebra': - keypoints_info = [ - 'snout', 'head', 'neck', 'forelegL1', 'forelegR1', 'hindlegL1', - 'hindlegR1', 'tailbase', 'tailtip' - ] - else: - NotImplementedError() - - dataset_dir = f'data/DeepPoseKit-Data/datasets/{dataset}' - - with h5py.File( - os.path.join(dataset_dir, 'annotation_data_release.h5'), 'r') as f: - # List all groups - annotations = np.array(f['annotations']) - annotated = np.array(f['annotated']) - images = np.array(f['images']) - skeleton_info = np.array(f['skeleton']) - - annotation_num, kpt_num, _ = annotations.shape - - data_list = np.arange(0, annotation_num) - np.random.shuffle(data_list) - - val_data_num = annotation_num // 10 - train_data_num = annotation_num - val_data_num - - train_list = data_list[0:train_data_num] - val_list = data_list[train_data_num:] - - img_root = os.path.join(dataset_dir, 'images') - os.makedirs(img_root, exist_ok=True) - - save_coco_anno( - annotations[train_list], annotated[train_list], images[train_list], - keypoints_info, skeleton_info, dataset, img_root, - os.path.join(dataset_dir, 'annotations', f'{dataset}_train.json')) - save_coco_anno( - annotations[val_list], - annotated[val_list], - images[val_list], - keypoints_info, - skeleton_info, - dataset, - img_root, - os.path.join(dataset_dir, 'annotations', f'{dataset}_test.json'), - start_img_id=train_data_num, - start_ann_id=train_data_num) +# Copyright (c) OpenMMLab. All rights reserved. +import json +import os +import time + +import cv2 +import h5py +import numpy as np + +np.random.seed(0) + + +def save_coco_anno(keypoints_all, + annotated_all, + imgs_all, + keypoints_info, + skeleton_info, + dataset, + img_root, + save_path, + start_img_id=0, + start_ann_id=0): + """Save annotations in coco-format. + + :param keypoints_all: keypoint annotations. + :param annotated_all: images annotated or not. + :param imgs_all: the array of images. + :param keypoints_info: information about keypoint name. + :param skeleton_info: information about skeleton connection. + :param dataset: information about dataset name. + :param img_root: the path to save images. + :param save_path: the path to save transformed annotation file. + :param start_img_id: the starting point to count the image id. + :param start_ann_id: the starting point to count the annotation id. + """ + images = [] + annotations = [] + + img_id = start_img_id + ann_id = start_ann_id + + num_annotations, keypoints_num, _ = keypoints_all.shape + + for i in range(num_annotations): + img = imgs_all[i] + keypoints = np.concatenate( + [keypoints_all[i], annotated_all[i][:, None] * 2], axis=1) + + min_x, min_y = np.min(keypoints[keypoints[:, 2] > 0, :2], axis=0) + max_x, max_y = np.max(keypoints[keypoints[:, 2] > 0, :2], axis=0) + + anno = {} + anno['keypoints'] = keypoints.reshape(-1).tolist() + anno['image_id'] = img_id + anno['id'] = ann_id + anno['num_keypoints'] = int(sum(keypoints[:, 2] > 0)) + anno['bbox'] = [ + float(min_x), + float(min_y), + float(max_x - min_x + 1), + float(max_y - min_y + 1) + ] + anno['iscrowd'] = 0 + anno['area'] = anno['bbox'][2] * anno['bbox'][3] + anno['category_id'] = 1 + + annotations.append(anno) + ann_id += 1 + + image = {} + image['id'] = img_id + image['file_name'] = f'{img_id}.jpg' + image['height'] = img.shape[0] + image['width'] = img.shape[1] + + images.append(image) + img_id += 1 + + cv2.imwrite(os.path.join(img_root, image['file_name']), img) + + skeleton = np.concatenate( + [np.arange(keypoints_num)[:, None], skeleton_info[:, 0][:, None]], + axis=1) + 1 + skeleton = skeleton[skeleton.min(axis=1) > 0] + + cocotype = {} + + cocotype['info'] = {} + cocotype['info'][ + 'description'] = 'DeepPoseKit-Data Generated by MMPose Team' + cocotype['info']['version'] = '1.0' + cocotype['info']['year'] = time.strftime('%Y', time.localtime()) + cocotype['info']['date_created'] = time.strftime('%Y/%m/%d', + time.localtime()) + + cocotype['images'] = images + cocotype['annotations'] = annotations + cocotype['categories'] = [{ + 'supercategory': 'animal', + 'id': 1, + 'name': dataset, + 'keypoints': keypoints_info, + 'skeleton': skeleton.tolist() + }] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + json.dump(cocotype, open(save_path, 'w'), indent=4) + print('number of images:', img_id) + print('number of annotations:', ann_id) + print(f'done {save_path}') + + +for dataset in ['fly', 'locust', 'zebra']: + keypoints_info = [] + if dataset == 'fly': + keypoints_info = [ + 'head', 'eyeL', 'eyeR', 'neck', 'thorax', 'abdomen', 'forelegR1', + 'forelegR2', 'forelegR3', 'forelegR4', 'midlegR1', 'midlegR2', + 'midlegR3', 'midlegR4', 'hindlegR1', 'hindlegR2', 'hindlegR3', + 'hindlegR4', 'forelegL1', 'forelegL2', 'forelegL3', 'forelegL4', + 'midlegL1', 'midlegL2', 'midlegL3', 'midlegL4', 'hindlegL1', + 'hindlegL2', 'hindlegL3', 'hindlegL4', 'wingL', 'wingR' + ] + elif dataset == 'locust': + keypoints_info = [ + 'head', 'neck', 'thorax', 'abdomen1', 'abdomen2', 'anttipL', + 'antbaseL', 'eyeL', 'forelegL1', 'forelegL2', 'forelegL3', + 'forelegL4', 'midlegL1', 'midlegL2', 'midlegL3', 'midlegL4', + 'hindlegL1', 'hindlegL2', 'hindlegL3', 'hindlegL4', 'anttipR', + 'antbaseR', 'eyeR', 'forelegR1', 'forelegR2', 'forelegR3', + 'forelegR4', 'midlegR1', 'midlegR2', 'midlegR3', 'midlegR4', + 'hindlegR1', 'hindlegR2', 'hindlegR3', 'hindlegR4' + ] + elif dataset == 'zebra': + keypoints_info = [ + 'snout', 'head', 'neck', 'forelegL1', 'forelegR1', 'hindlegL1', + 'hindlegR1', 'tailbase', 'tailtip' + ] + else: + NotImplementedError() + + dataset_dir = f'data/DeepPoseKit-Data/datasets/{dataset}' + + with h5py.File( + os.path.join(dataset_dir, 'annotation_data_release.h5'), 'r') as f: + # List all groups + annotations = np.array(f['annotations']) + annotated = np.array(f['annotated']) + images = np.array(f['images']) + skeleton_info = np.array(f['skeleton']) + + annotation_num, kpt_num, _ = annotations.shape + + data_list = np.arange(0, annotation_num) + np.random.shuffle(data_list) + + val_data_num = annotation_num // 10 + train_data_num = annotation_num - val_data_num + + train_list = data_list[0:train_data_num] + val_list = data_list[train_data_num:] + + img_root = os.path.join(dataset_dir, 'images') + os.makedirs(img_root, exist_ok=True) + + save_coco_anno( + annotations[train_list], annotated[train_list], images[train_list], + keypoints_info, skeleton_info, dataset, img_root, + os.path.join(dataset_dir, 'annotations', f'{dataset}_train.json')) + save_coco_anno( + annotations[val_list], + annotated[val_list], + images[val_list], + keypoints_info, + skeleton_info, + dataset, + img_root, + os.path.join(dataset_dir, 'annotations', f'{dataset}_test.json'), + start_img_id=train_data_num, + start_ann_id=train_data_num) diff --git a/tools/dataset_converters/parse_macaquepose_dataset.py b/tools/dataset_converters/parse_macaquepose_dataset.py index 85801a2225..39d827590c 100644 --- a/tools/dataset_converters/parse_macaquepose_dataset.py +++ b/tools/dataset_converters/parse_macaquepose_dataset.py @@ -1,182 +1,182 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import csv -import json -import os -import time - -import cv2 -import numpy as np - -np.random.seed(0) - - -def get_poly_area(x, y): - """Calculate area of polygon given (x,y) coordinates (Shoelace formula) - - :param x: np.ndarray(N, ) - :param y: np.ndarray(N, ) - :return: area - """ - return float(0.5 * - np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))) - - -def get_seg_area(segmentations): - area = 0 - for segmentation in segmentations: - area += get_poly_area(segmentation[:, 0], segmentation[:, 1]) - return area - - -def save_coco_anno(data_annotation, - img_root, - save_path, - start_img_id=0, - start_ann_id=0, - kpt_num=17): - """Save annotations in coco-format. - - :param data_annotation: list of data annotation. - :param img_root: the root dir to load images. - :param save_path: the path to save transformed annotation file. - :param start_img_id: the starting point to count the image id. - :param start_ann_id: the starting point to count the annotation id. - :param kpt_num: the number of keypoint. - """ - images = [] - annotations = [] - - img_id = start_img_id - ann_id = start_ann_id - - for i in range(0, len(data_annotation)): - data_anno = data_annotation[i] - image_name = data_anno[0] - - img = cv2.imread(os.path.join(img_root, image_name)) - - kp_string = data_anno[1] - kps = json.loads(kp_string) - - seg_string = data_anno[2] - segs = json.loads(seg_string) - - for kp, seg in zip(kps, segs): - keypoints = np.zeros([kpt_num, 3]) - for ind, p in enumerate(kp): - if p['position'] is None: - continue - else: - keypoints[ind, 0] = p['position'][0] - keypoints[ind, 1] = p['position'][1] - keypoints[ind, 2] = 2 - - segmentations = [] - - max_x = -1 - max_y = -1 - min_x = 999999 - min_y = 999999 - for segm in seg: - if len(segm['segment']) == 0: - continue - - segmentation = np.array(segm['segment']) - segmentations.append(segmentation) - - _max_x, _max_y = segmentation.max(0) - _min_x, _min_y = segmentation.min(0) - - max_x = max(max_x, _max_x) - max_y = max(max_y, _max_y) - min_x = min(min_x, _min_x) - min_y = min(min_y, _min_y) - - anno = {} - anno['keypoints'] = keypoints.reshape(-1).tolist() - anno['image_id'] = img_id - anno['id'] = ann_id - anno['num_keypoints'] = int(sum(keypoints[:, 2] > 0)) - anno['bbox'] = [ - float(min_x), - float(min_y), - float(max_x - min_x + 1), - float(max_y - min_y + 1) - ] - anno['iscrowd'] = 0 - anno['area'] = get_seg_area(segmentations) - anno['category_id'] = 1 - anno['segmentation'] = [ - seg.reshape(-1).tolist() for seg in segmentations - ] - - annotations.append(anno) - ann_id += 1 - - image = {} - image['id'] = img_id - image['file_name'] = image_name - image['height'] = img.shape[0] - image['width'] = img.shape[1] - - images.append(image) - img_id += 1 - - cocotype = {} - - cocotype['info'] = {} - cocotype['info']['description'] = 'MacaquePose Generated by MMPose Team' - cocotype['info']['version'] = '1.0' - cocotype['info']['year'] = time.strftime('%Y', time.localtime()) - cocotype['info']['date_created'] = time.strftime('%Y/%m/%d', - time.localtime()) - - cocotype['images'] = images - cocotype['annotations'] = annotations - cocotype['categories'] = [{ - 'supercategory': - 'animal', - 'id': - 1, - 'name': - 'macaque', - 'keypoints': [ - 'nose', 'left_eye', 'right_eye', 'left_ear', 'right_ear', - 'left_shoulder', 'right_shoulder', 'left_elbow', 'right_elbow', - 'left_wrist', 'right_wrist', 'left_hip', 'right_hip', 'left_knee', - 'right_knee', 'left_ankle', 'right_ankle' - ], - 'skeleton': [[16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12], - [7, 13], [6, 7], [6, 8], [7, 9], [8, 10], [9, 11], [2, 3], - [1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7]] - }] - - os.makedirs(os.path.dirname(save_path), exist_ok=True) - json.dump(cocotype, open(save_path, 'w'), indent=4) - print('number of images:', img_id) - print('number of annotations:', ann_id) - print(f'done {save_path}') - - -dataset_dir = '/data/macaque/' -with open(os.path.join(dataset_dir, 'annotations.csv'), 'r') as fp: - data_annotation_all = list(csv.reader(fp, delimiter=','))[1:] - -np.random.shuffle(data_annotation_all) - -data_annotation_train = data_annotation_all[0:12500] -data_annotation_val = data_annotation_all[12500:] - -img_root = os.path.join(dataset_dir, 'images') -save_coco_anno( - data_annotation_train, - img_root, - os.path.join(dataset_dir, 'annotations', 'macaque_train.json'), - kpt_num=17) -save_coco_anno( - data_annotation_val, - img_root, - os.path.join(dataset_dir, 'annotations', 'macaque_test.json'), - start_img_id=12500, - start_ann_id=15672, - kpt_num=17) +# Copyright (c) OpenMMLab. All rights reserved. +import csv +import json +import os +import time + +import cv2 +import numpy as np + +np.random.seed(0) + + +def get_poly_area(x, y): + """Calculate area of polygon given (x,y) coordinates (Shoelace formula) + + :param x: np.ndarray(N, ) + :param y: np.ndarray(N, ) + :return: area + """ + return float(0.5 * + np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))) + + +def get_seg_area(segmentations): + area = 0 + for segmentation in segmentations: + area += get_poly_area(segmentation[:, 0], segmentation[:, 1]) + return area + + +def save_coco_anno(data_annotation, + img_root, + save_path, + start_img_id=0, + start_ann_id=0, + kpt_num=17): + """Save annotations in coco-format. + + :param data_annotation: list of data annotation. + :param img_root: the root dir to load images. + :param save_path: the path to save transformed annotation file. + :param start_img_id: the starting point to count the image id. + :param start_ann_id: the starting point to count the annotation id. + :param kpt_num: the number of keypoint. + """ + images = [] + annotations = [] + + img_id = start_img_id + ann_id = start_ann_id + + for i in range(0, len(data_annotation)): + data_anno = data_annotation[i] + image_name = data_anno[0] + + img = cv2.imread(os.path.join(img_root, image_name)) + + kp_string = data_anno[1] + kps = json.loads(kp_string) + + seg_string = data_anno[2] + segs = json.loads(seg_string) + + for kp, seg in zip(kps, segs): + keypoints = np.zeros([kpt_num, 3]) + for ind, p in enumerate(kp): + if p['position'] is None: + continue + else: + keypoints[ind, 0] = p['position'][0] + keypoints[ind, 1] = p['position'][1] + keypoints[ind, 2] = 2 + + segmentations = [] + + max_x = -1 + max_y = -1 + min_x = 999999 + min_y = 999999 + for segm in seg: + if len(segm['segment']) == 0: + continue + + segmentation = np.array(segm['segment']) + segmentations.append(segmentation) + + _max_x, _max_y = segmentation.max(0) + _min_x, _min_y = segmentation.min(0) + + max_x = max(max_x, _max_x) + max_y = max(max_y, _max_y) + min_x = min(min_x, _min_x) + min_y = min(min_y, _min_y) + + anno = {} + anno['keypoints'] = keypoints.reshape(-1).tolist() + anno['image_id'] = img_id + anno['id'] = ann_id + anno['num_keypoints'] = int(sum(keypoints[:, 2] > 0)) + anno['bbox'] = [ + float(min_x), + float(min_y), + float(max_x - min_x + 1), + float(max_y - min_y + 1) + ] + anno['iscrowd'] = 0 + anno['area'] = get_seg_area(segmentations) + anno['category_id'] = 1 + anno['segmentation'] = [ + seg.reshape(-1).tolist() for seg in segmentations + ] + + annotations.append(anno) + ann_id += 1 + + image = {} + image['id'] = img_id + image['file_name'] = image_name + image['height'] = img.shape[0] + image['width'] = img.shape[1] + + images.append(image) + img_id += 1 + + cocotype = {} + + cocotype['info'] = {} + cocotype['info']['description'] = 'MacaquePose Generated by MMPose Team' + cocotype['info']['version'] = '1.0' + cocotype['info']['year'] = time.strftime('%Y', time.localtime()) + cocotype['info']['date_created'] = time.strftime('%Y/%m/%d', + time.localtime()) + + cocotype['images'] = images + cocotype['annotations'] = annotations + cocotype['categories'] = [{ + 'supercategory': + 'animal', + 'id': + 1, + 'name': + 'macaque', + 'keypoints': [ + 'nose', 'left_eye', 'right_eye', 'left_ear', 'right_ear', + 'left_shoulder', 'right_shoulder', 'left_elbow', 'right_elbow', + 'left_wrist', 'right_wrist', 'left_hip', 'right_hip', 'left_knee', + 'right_knee', 'left_ankle', 'right_ankle' + ], + 'skeleton': [[16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12], + [7, 13], [6, 7], [6, 8], [7, 9], [8, 10], [9, 11], [2, 3], + [1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7]] + }] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + json.dump(cocotype, open(save_path, 'w'), indent=4) + print('number of images:', img_id) + print('number of annotations:', ann_id) + print(f'done {save_path}') + + +dataset_dir = '/data/macaque/' +with open(os.path.join(dataset_dir, 'annotations.csv'), 'r') as fp: + data_annotation_all = list(csv.reader(fp, delimiter=','))[1:] + +np.random.shuffle(data_annotation_all) + +data_annotation_train = data_annotation_all[0:12500] +data_annotation_val = data_annotation_all[12500:] + +img_root = os.path.join(dataset_dir, 'images') +save_coco_anno( + data_annotation_train, + img_root, + os.path.join(dataset_dir, 'annotations', 'macaque_train.json'), + kpt_num=17) +save_coco_anno( + data_annotation_val, + img_root, + os.path.join(dataset_dir, 'annotations', 'macaque_test.json'), + start_img_id=12500, + start_ann_id=15672, + kpt_num=17) diff --git a/tools/dataset_converters/preprocess_h36m.py b/tools/dataset_converters/preprocess_h36m.py index 1b6e0e098a..170539df0a 100644 --- a/tools/dataset_converters/preprocess_h36m.py +++ b/tools/dataset_converters/preprocess_h36m.py @@ -1,417 +1,417 @@ -# ----------------------------------------------------------------------------- -# Adapted from https://github.com/anibali/h36m-fetch -# Original license: Copyright (c) Aiden Nibali, under the Apache License. -# ----------------------------------------------------------------------------- - -import argparse -import os -import pickle -import tarfile -import xml.etree.ElementTree as ET -from os.path import join - -import cv2 -import numpy as np -from spacepy import pycdf - - -class PreprocessH36m: - """Preprocess Human3.6M dataset. - - Args: - metadata (str): Path to metadata.xml. - original_dir (str): Directory of the original dataset with all files - compressed. Specifically, .tgz files belonging to subject 1 - should be placed under the subdirectory 's1'. - extracted_dir (str): Directory of the extracted files. If not given, it - will be placed under the same parent directory as original_dir. - processed_der (str): Directory of the processed files. If not given, it - will be placed under the same parent directory as original_dir. - sample_rate (int): Downsample FPS to `1 / sample_rate`. Default: 5. - """ - - def __init__(self, - metadata, - original_dir, - extracted_dir=None, - processed_dir=None, - sample_rate=5): - self.metadata = metadata - self.original_dir = original_dir - self.sample_rate = sample_rate - - if extracted_dir is None: - self.extracted_dir = join( - os.path.dirname(os.path.abspath(self.original_dir)), - 'extracted') - else: - self.extracted_dir = extracted_dir - - if processed_dir is None: - self.processed_dir = join( - os.path.dirname(os.path.abspath(self.original_dir)), - 'processed') - else: - self.processed_dir = processed_dir - - self.subjects = [] - self.sequence_mappings = {} - self.action_names = {} - self.camera_ids = [] - self._load_metadata() - - self.subjects_annot = ['S1', 'S5', 'S6', 'S7', 'S8', 'S9', 'S11'] - self.subjects_splits = { - 'train': ['S1', 'S5', 'S6', 'S7', 'S8'], - 'test': ['S9', 'S11'] - } - self.extract_files = ['Videos', 'D2_Positions', 'D3_Positions_mono'] - self.movable_joints = [ - 0, 1, 2, 3, 6, 7, 8, 12, 13, 14, 15, 17, 18, 19, 25, 26, 27 - ] - self.scale_factor = 1.2 - self.image_sizes = { - '54138969': { - 'width': 1000, - 'height': 1002 - }, - '55011271': { - 'width': 1000, - 'height': 1000 - }, - '58860488': { - 'width': 1000, - 'height': 1000 - }, - '60457274': { - 'width': 1000, - 'height': 1002 - } - } - - def extract_tgz(self): - """Extract files from self.extrct_files.""" - os.makedirs(self.extracted_dir, exist_ok=True) - for subject in self.subjects_annot: - cur_dir = join(self.original_dir, subject.lower()) - for file in self.extract_files: - filename = join(cur_dir, file + '.tgz') - print(f'Extracting {filename} ...') - with tarfile.open(filename) as tar: - tar.extractall(self.extracted_dir) - print('Extraction done.\n') - - def generate_cameras_file(self): - """Generate cameras.pkl which contains camera parameters for 11 - subjects each with 4 cameras.""" - cameras = {} - for subject in range(1, 12): - for camera in range(4): - key = (f'S{subject}', self.camera_ids[camera]) - cameras[key] = self._get_camera_params(camera, subject) - - out_file = join(self.processed_dir, 'annotation_body3d', 'cameras.pkl') - with open(out_file, 'wb') as fout: - pickle.dump(cameras, fout) - print(f'Camera parameters have been written to "{out_file}".\n') - - def generate_annotations(self): - """Generate annotations for training and testing data.""" - output_dir = join(self.processed_dir, 'annotation_body3d', - f'fps{50 // self.sample_rate}') - os.makedirs(output_dir, exist_ok=True) - - for data_split in ('train', 'test'): - imgnames_all = [] - centers_all = [] - scales_all = [] - kps2d_all = [] - kps3d_all = [] - for subject in self.subjects_splits[data_split]: - for action, subaction in self.sequence_mappings[subject].keys( - ): - if action == '1': - # exclude action "_ALL" - continue - for camera in self.camera_ids: - imgnames, centers, scales, kps2d, kps3d\ - = self._load_annotations( - subject, action, subaction, camera) - imgnames_all.append(imgnames) - centers_all.append(centers) - scales_all.append(scales) - kps2d_all.append(kps2d) - kps3d_all.append(kps3d) - - imgnames_all = np.concatenate(imgnames_all) - centers_all = np.concatenate(centers_all) - scales_all = np.concatenate(scales_all) - kps2d_all = np.concatenate(kps2d_all) - kps3d_all = np.concatenate(kps3d_all) - - out_file = join(output_dir, f'h36m_{data_split}.npz') - np.savez( - out_file, - imgname=imgnames_all, - center=centers_all, - scale=scales_all, - part=kps2d_all, - S=kps3d_all) - - print( - f'All annotations of {data_split}ing data have been written to' - f' "{out_file}". {len(imgnames_all)} samples in total.\n') - - if data_split == 'train': - kps_3d_all = kps3d_all[..., :3] # remove visibility - mean_3d, std_3d = self._get_pose_stats(kps_3d_all) - - kps_2d_all = kps2d_all[..., :2] # remove visibility - mean_2d, std_2d = self._get_pose_stats(kps_2d_all) - - # centered around root - # the root keypoint is 0-index - kps_3d_rel = kps_3d_all[..., 1:, :] - kps_3d_all[..., :1, :] - mean_3d_rel, std_3d_rel = self._get_pose_stats(kps_3d_rel) - - kps_2d_rel = kps_2d_all[..., 1:, :] - kps_2d_all[..., :1, :] - mean_2d_rel, std_2d_rel = self._get_pose_stats(kps_2d_rel) - - stats = { - 'joint3d_stats': { - 'mean': mean_3d, - 'std': std_3d - }, - 'joint2d_stats': { - 'mean': mean_2d, - 'std': std_2d - }, - 'joint3d_rel_stats': { - 'mean': mean_3d_rel, - 'std': std_3d_rel - }, - 'joint2d_rel_stats': { - 'mean': mean_2d_rel, - 'std': std_2d_rel - } - } - for name, stat_dict in stats.items(): - out_file = join(output_dir, f'{name}.pkl') - with open(out_file, 'wb') as f: - pickle.dump(stat_dict, f) - print(f'Create statistic data file: {out_file}') - - @staticmethod - def _get_pose_stats(kps): - """Get statistic information `mean` and `std` of pose data. - - Args: - kps (ndarray): keypoints in shape [..., K, D] where K and C is - the keypoint category number and dimension. - Returns: - mean (ndarray): [K, D] - """ - assert kps.ndim > 2 - K, D = kps.shape[-2:] - kps = kps.reshape(-1, K, D) - mean = kps.mean(axis=0) - std = kps.std(axis=0) - return mean, std - - def _load_metadata(self): - """Load meta data from metadata.xml.""" - - assert os.path.exists(self.metadata) - - tree = ET.parse(self.metadata) - root = tree.getroot() - - for i, tr in enumerate(root.find('mapping')): - if i == 0: - _, _, *self.subjects = [td.text for td in tr] - self.sequence_mappings \ - = {subject: {} for subject in self.subjects} - elif i < 33: - action_id, subaction_id, *prefixes = [td.text for td in tr] - for subject, prefix in zip(self.subjects, prefixes): - self.sequence_mappings[subject][(action_id, subaction_id)]\ - = prefix - - for i, elem in enumerate(root.find('actionnames')): - action_id = str(i + 1) - self.action_names[action_id] = elem.text - - self.camera_ids \ - = [elem.text for elem in root.find('dbcameras/index2id')] - - w0 = root.find('w0') - self.cameras_raw = [float(num) for num in w0.text[1:-1].split()] - - def _get_base_filename(self, subject, action, subaction, camera): - """Get base filename given subject, action, subaction and camera.""" - return f'{self.sequence_mappings[subject][(action, subaction)]}' + \ - f'.{camera}' - - def _get_camera_params(self, camera, subject): - """Get camera parameters given camera id and subject id.""" - metadata_slice = np.zeros(15) - start = 6 * (camera * 11 + (subject - 1)) - - metadata_slice[:6] = self.cameras_raw[start:start + 6] - metadata_slice[6:] = self.cameras_raw[265 + camera * 9 - 1:265 + - (camera + 1) * 9 - 1] - - # extrinsics - x, y, z = -metadata_slice[0], metadata_slice[1], -metadata_slice[2] - - R_x = np.array([[1, 0, 0], [0, np.cos(x), np.sin(x)], - [0, -np.sin(x), np.cos(x)]]) - R_y = np.array([[np.cos(y), 0, np.sin(y)], [0, 1, 0], - [-np.sin(y), 0, np.cos(y)]]) - R_z = np.array([[np.cos(z), np.sin(z), 0], [-np.sin(z), - np.cos(z), 0], [0, 0, 1]]) - R = (R_x @ R_y @ R_z).T - T = metadata_slice[3:6].reshape(-1, 1) - # convert unit from millimeter to meter - T *= 0.001 - - # intrinsics - c = metadata_slice[8:10, None] - f = metadata_slice[6:8, None] - - # distortion - k = metadata_slice[10:13, None] - p = metadata_slice[13:15, None] - - return { - 'R': R, - 'T': T, - 'c': c, - 'f': f, - 'k': k, - 'p': p, - 'w': self.image_sizes[self.camera_ids[camera]]['width'], - 'h': self.image_sizes[self.camera_ids[camera]]['height'], - 'name': f'camera{camera + 1}', - 'id': self.camera_ids[camera] - } - - def _load_annotations(self, subject, action, subaction, camera): - """Load annotations for a sequence.""" - subj_dir = join(self.extracted_dir, subject) - basename = self._get_base_filename(subject, action, subaction, camera) - - # load 2D keypoints - with pycdf.CDF( - join(subj_dir, 'MyPoseFeatures', 'D2_Positions', - basename + '.cdf')) as cdf: - kps_2d = np.array(cdf['Pose']) - - num_frames = kps_2d.shape[1] - kps_2d = kps_2d.reshape((num_frames, 32, 2))[::self.sample_rate, - self.movable_joints] - kps_2d = np.concatenate([kps_2d, np.ones((len(kps_2d), 17, 1))], - axis=2) - - # load 3D keypoints - with pycdf.CDF( - join(subj_dir, 'MyPoseFeatures', 'D3_Positions_mono', - basename + '.cdf')) as cdf: - kps_3d = np.array(cdf['Pose']) - - kps_3d = kps_3d.reshape( - (num_frames, 32, 3))[::self.sample_rate, - self.movable_joints] / 1000. - kps_3d = np.concatenate([kps_3d, np.ones((len(kps_3d), 17, 1))], - axis=2) - - # calculate bounding boxes - bboxes = np.stack([ - np.min(kps_2d[:, :, 0], axis=1), - np.min(kps_2d[:, :, 1], axis=1), - np.max(kps_2d[:, :, 0], axis=1), - np.max(kps_2d[:, :, 1], axis=1) - ], - axis=1) - centers = np.stack([(bboxes[:, 0] + bboxes[:, 2]) / 2, - (bboxes[:, 1] + bboxes[:, 3]) / 2], - axis=1) - scales = self.scale_factor * np.max( - bboxes[:, 2:] - bboxes[:, :2], axis=1) / 200 - - # extract frames and save imgnames - imgnames = [] - video_path = join(subj_dir, 'Videos', basename + '.mp4') - sub_base = subject + '_' + basename.replace(' ', '_') - img_dir = join(self.processed_dir, 'images', subject, sub_base) - os.makedirs(img_dir, exist_ok=True) - prefix = join(subject, sub_base, sub_base) - - cap = cv2.VideoCapture(video_path) - i = 0 - while True: - success, img = cap.read() - if not success: - break - if i % self.sample_rate == 0: - imgname = f'{prefix}_{i + 1:06d}.jpg' - imgnames.append(imgname) - dest_path = join(self.processed_dir, 'images', imgname) - if not os.path.exists(dest_path): - cv2.imwrite(dest_path, img) - if len(imgnames) == len(centers): - break - i += 1 - cap.release() - imgnames = np.array(imgnames) - - print(f'Annoatations for sequence "{subject} {basename}" are loaded. ' - f'{len(imgnames)} samples in total.') - - return imgnames, centers, scales, kps_2d, kps_3d - - -def parse_args(): - parser = argparse.ArgumentParser() - parser.add_argument( - '--metadata', type=str, required=True, help='Path to metadata.xml') - parser.add_argument( - '--original', - type=str, - required=True, - help='Directory of the original dataset with all files compressed. ' - 'Specifically, .tgz files belonging to subject 1 should be placed ' - 'under the subdirectory \"s1\".') - parser.add_argument( - '--extracted', - type=str, - default=None, - help='Directory of the extracted files. If not given, it will be ' - 'placed under the same parent directory as original_dir.') - parser.add_argument( - '--processed', - type=str, - default=None, - help='Directory of the processed files. If not given, it will be ' - 'placed under the same parent directory as original_dir.') - parser.add_argument( - '--sample-rate', - type=int, - default=5, - help='Downsample FPS to `1 / sample_rate`. Default: 5.') - args = parser.parse_args() - return args - - -if __name__ == '__main__': - args = parse_args() - - h36m = PreprocessH36m( - metadata=args.metadata, - original_dir=args.original, - extracted_dir=args.extracted, - processed_dir=args.processed, - sample_rate=args.sample_rate) - h36m.extract_tgz() - h36m.generate_cameras_file() - h36m.generate_annotations() +# ----------------------------------------------------------------------------- +# Adapted from https://github.com/anibali/h36m-fetch +# Original license: Copyright (c) Aiden Nibali, under the Apache License. +# ----------------------------------------------------------------------------- + +import argparse +import os +import pickle +import tarfile +import xml.etree.ElementTree as ET +from os.path import join + +import cv2 +import numpy as np +from spacepy import pycdf + + +class PreprocessH36m: + """Preprocess Human3.6M dataset. + + Args: + metadata (str): Path to metadata.xml. + original_dir (str): Directory of the original dataset with all files + compressed. Specifically, .tgz files belonging to subject 1 + should be placed under the subdirectory 's1'. + extracted_dir (str): Directory of the extracted files. If not given, it + will be placed under the same parent directory as original_dir. + processed_der (str): Directory of the processed files. If not given, it + will be placed under the same parent directory as original_dir. + sample_rate (int): Downsample FPS to `1 / sample_rate`. Default: 5. + """ + + def __init__(self, + metadata, + original_dir, + extracted_dir=None, + processed_dir=None, + sample_rate=5): + self.metadata = metadata + self.original_dir = original_dir + self.sample_rate = sample_rate + + if extracted_dir is None: + self.extracted_dir = join( + os.path.dirname(os.path.abspath(self.original_dir)), + 'extracted') + else: + self.extracted_dir = extracted_dir + + if processed_dir is None: + self.processed_dir = join( + os.path.dirname(os.path.abspath(self.original_dir)), + 'processed') + else: + self.processed_dir = processed_dir + + self.subjects = [] + self.sequence_mappings = {} + self.action_names = {} + self.camera_ids = [] + self._load_metadata() + + self.subjects_annot = ['S1', 'S5', 'S6', 'S7', 'S8', 'S9', 'S11'] + self.subjects_splits = { + 'train': ['S1', 'S5', 'S6', 'S7', 'S8'], + 'test': ['S9', 'S11'] + } + self.extract_files = ['Videos', 'D2_Positions', 'D3_Positions_mono'] + self.movable_joints = [ + 0, 1, 2, 3, 6, 7, 8, 12, 13, 14, 15, 17, 18, 19, 25, 26, 27 + ] + self.scale_factor = 1.2 + self.image_sizes = { + '54138969': { + 'width': 1000, + 'height': 1002 + }, + '55011271': { + 'width': 1000, + 'height': 1000 + }, + '58860488': { + 'width': 1000, + 'height': 1000 + }, + '60457274': { + 'width': 1000, + 'height': 1002 + } + } + + def extract_tgz(self): + """Extract files from self.extrct_files.""" + os.makedirs(self.extracted_dir, exist_ok=True) + for subject in self.subjects_annot: + cur_dir = join(self.original_dir, subject.lower()) + for file in self.extract_files: + filename = join(cur_dir, file + '.tgz') + print(f'Extracting {filename} ...') + with tarfile.open(filename) as tar: + tar.extractall(self.extracted_dir) + print('Extraction done.\n') + + def generate_cameras_file(self): + """Generate cameras.pkl which contains camera parameters for 11 + subjects each with 4 cameras.""" + cameras = {} + for subject in range(1, 12): + for camera in range(4): + key = (f'S{subject}', self.camera_ids[camera]) + cameras[key] = self._get_camera_params(camera, subject) + + out_file = join(self.processed_dir, 'annotation_body3d', 'cameras.pkl') + with open(out_file, 'wb') as fout: + pickle.dump(cameras, fout) + print(f'Camera parameters have been written to "{out_file}".\n') + + def generate_annotations(self): + """Generate annotations for training and testing data.""" + output_dir = join(self.processed_dir, 'annotation_body3d', + f'fps{50 // self.sample_rate}') + os.makedirs(output_dir, exist_ok=True) + + for data_split in ('train', 'test'): + imgnames_all = [] + centers_all = [] + scales_all = [] + kps2d_all = [] + kps3d_all = [] + for subject in self.subjects_splits[data_split]: + for action, subaction in self.sequence_mappings[subject].keys( + ): + if action == '1': + # exclude action "_ALL" + continue + for camera in self.camera_ids: + imgnames, centers, scales, kps2d, kps3d\ + = self._load_annotations( + subject, action, subaction, camera) + imgnames_all.append(imgnames) + centers_all.append(centers) + scales_all.append(scales) + kps2d_all.append(kps2d) + kps3d_all.append(kps3d) + + imgnames_all = np.concatenate(imgnames_all) + centers_all = np.concatenate(centers_all) + scales_all = np.concatenate(scales_all) + kps2d_all = np.concatenate(kps2d_all) + kps3d_all = np.concatenate(kps3d_all) + + out_file = join(output_dir, f'h36m_{data_split}.npz') + np.savez( + out_file, + imgname=imgnames_all, + center=centers_all, + scale=scales_all, + part=kps2d_all, + S=kps3d_all) + + print( + f'All annotations of {data_split}ing data have been written to' + f' "{out_file}". {len(imgnames_all)} samples in total.\n') + + if data_split == 'train': + kps_3d_all = kps3d_all[..., :3] # remove visibility + mean_3d, std_3d = self._get_pose_stats(kps_3d_all) + + kps_2d_all = kps2d_all[..., :2] # remove visibility + mean_2d, std_2d = self._get_pose_stats(kps_2d_all) + + # centered around root + # the root keypoint is 0-index + kps_3d_rel = kps_3d_all[..., 1:, :] - kps_3d_all[..., :1, :] + mean_3d_rel, std_3d_rel = self._get_pose_stats(kps_3d_rel) + + kps_2d_rel = kps_2d_all[..., 1:, :] - kps_2d_all[..., :1, :] + mean_2d_rel, std_2d_rel = self._get_pose_stats(kps_2d_rel) + + stats = { + 'joint3d_stats': { + 'mean': mean_3d, + 'std': std_3d + }, + 'joint2d_stats': { + 'mean': mean_2d, + 'std': std_2d + }, + 'joint3d_rel_stats': { + 'mean': mean_3d_rel, + 'std': std_3d_rel + }, + 'joint2d_rel_stats': { + 'mean': mean_2d_rel, + 'std': std_2d_rel + } + } + for name, stat_dict in stats.items(): + out_file = join(output_dir, f'{name}.pkl') + with open(out_file, 'wb') as f: + pickle.dump(stat_dict, f) + print(f'Create statistic data file: {out_file}') + + @staticmethod + def _get_pose_stats(kps): + """Get statistic information `mean` and `std` of pose data. + + Args: + kps (ndarray): keypoints in shape [..., K, D] where K and C is + the keypoint category number and dimension. + Returns: + mean (ndarray): [K, D] + """ + assert kps.ndim > 2 + K, D = kps.shape[-2:] + kps = kps.reshape(-1, K, D) + mean = kps.mean(axis=0) + std = kps.std(axis=0) + return mean, std + + def _load_metadata(self): + """Load meta data from metadata.xml.""" + + assert os.path.exists(self.metadata) + + tree = ET.parse(self.metadata) + root = tree.getroot() + + for i, tr in enumerate(root.find('mapping')): + if i == 0: + _, _, *self.subjects = [td.text for td in tr] + self.sequence_mappings \ + = {subject: {} for subject in self.subjects} + elif i < 33: + action_id, subaction_id, *prefixes = [td.text for td in tr] + for subject, prefix in zip(self.subjects, prefixes): + self.sequence_mappings[subject][(action_id, subaction_id)]\ + = prefix + + for i, elem in enumerate(root.find('actionnames')): + action_id = str(i + 1) + self.action_names[action_id] = elem.text + + self.camera_ids \ + = [elem.text for elem in root.find('dbcameras/index2id')] + + w0 = root.find('w0') + self.cameras_raw = [float(num) for num in w0.text[1:-1].split()] + + def _get_base_filename(self, subject, action, subaction, camera): + """Get base filename given subject, action, subaction and camera.""" + return f'{self.sequence_mappings[subject][(action, subaction)]}' + \ + f'.{camera}' + + def _get_camera_params(self, camera, subject): + """Get camera parameters given camera id and subject id.""" + metadata_slice = np.zeros(15) + start = 6 * (camera * 11 + (subject - 1)) + + metadata_slice[:6] = self.cameras_raw[start:start + 6] + metadata_slice[6:] = self.cameras_raw[265 + camera * 9 - 1:265 + + (camera + 1) * 9 - 1] + + # extrinsics + x, y, z = -metadata_slice[0], metadata_slice[1], -metadata_slice[2] + + R_x = np.array([[1, 0, 0], [0, np.cos(x), np.sin(x)], + [0, -np.sin(x), np.cos(x)]]) + R_y = np.array([[np.cos(y), 0, np.sin(y)], [0, 1, 0], + [-np.sin(y), 0, np.cos(y)]]) + R_z = np.array([[np.cos(z), np.sin(z), 0], [-np.sin(z), + np.cos(z), 0], [0, 0, 1]]) + R = (R_x @ R_y @ R_z).T + T = metadata_slice[3:6].reshape(-1, 1) + # convert unit from millimeter to meter + T *= 0.001 + + # intrinsics + c = metadata_slice[8:10, None] + f = metadata_slice[6:8, None] + + # distortion + k = metadata_slice[10:13, None] + p = metadata_slice[13:15, None] + + return { + 'R': R, + 'T': T, + 'c': c, + 'f': f, + 'k': k, + 'p': p, + 'w': self.image_sizes[self.camera_ids[camera]]['width'], + 'h': self.image_sizes[self.camera_ids[camera]]['height'], + 'name': f'camera{camera + 1}', + 'id': self.camera_ids[camera] + } + + def _load_annotations(self, subject, action, subaction, camera): + """Load annotations for a sequence.""" + subj_dir = join(self.extracted_dir, subject) + basename = self._get_base_filename(subject, action, subaction, camera) + + # load 2D keypoints + with pycdf.CDF( + join(subj_dir, 'MyPoseFeatures', 'D2_Positions', + basename + '.cdf')) as cdf: + kps_2d = np.array(cdf['Pose']) + + num_frames = kps_2d.shape[1] + kps_2d = kps_2d.reshape((num_frames, 32, 2))[::self.sample_rate, + self.movable_joints] + kps_2d = np.concatenate([kps_2d, np.ones((len(kps_2d), 17, 1))], + axis=2) + + # load 3D keypoints + with pycdf.CDF( + join(subj_dir, 'MyPoseFeatures', 'D3_Positions_mono', + basename + '.cdf')) as cdf: + kps_3d = np.array(cdf['Pose']) + + kps_3d = kps_3d.reshape( + (num_frames, 32, 3))[::self.sample_rate, + self.movable_joints] / 1000. + kps_3d = np.concatenate([kps_3d, np.ones((len(kps_3d), 17, 1))], + axis=2) + + # calculate bounding boxes + bboxes = np.stack([ + np.min(kps_2d[:, :, 0], axis=1), + np.min(kps_2d[:, :, 1], axis=1), + np.max(kps_2d[:, :, 0], axis=1), + np.max(kps_2d[:, :, 1], axis=1) + ], + axis=1) + centers = np.stack([(bboxes[:, 0] + bboxes[:, 2]) / 2, + (bboxes[:, 1] + bboxes[:, 3]) / 2], + axis=1) + scales = self.scale_factor * np.max( + bboxes[:, 2:] - bboxes[:, :2], axis=1) / 200 + + # extract frames and save imgnames + imgnames = [] + video_path = join(subj_dir, 'Videos', basename + '.mp4') + sub_base = subject + '_' + basename.replace(' ', '_') + img_dir = join(self.processed_dir, 'images', subject, sub_base) + os.makedirs(img_dir, exist_ok=True) + prefix = join(subject, sub_base, sub_base) + + cap = cv2.VideoCapture(video_path) + i = 0 + while True: + success, img = cap.read() + if not success: + break + if i % self.sample_rate == 0: + imgname = f'{prefix}_{i + 1:06d}.jpg' + imgnames.append(imgname) + dest_path = join(self.processed_dir, 'images', imgname) + if not os.path.exists(dest_path): + cv2.imwrite(dest_path, img) + if len(imgnames) == len(centers): + break + i += 1 + cap.release() + imgnames = np.array(imgnames) + + print(f'Annoatations for sequence "{subject} {basename}" are loaded. ' + f'{len(imgnames)} samples in total.') + + return imgnames, centers, scales, kps_2d, kps_3d + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument( + '--metadata', type=str, required=True, help='Path to metadata.xml') + parser.add_argument( + '--original', + type=str, + required=True, + help='Directory of the original dataset with all files compressed. ' + 'Specifically, .tgz files belonging to subject 1 should be placed ' + 'under the subdirectory \"s1\".') + parser.add_argument( + '--extracted', + type=str, + default=None, + help='Directory of the extracted files. If not given, it will be ' + 'placed under the same parent directory as original_dir.') + parser.add_argument( + '--processed', + type=str, + default=None, + help='Directory of the processed files. If not given, it will be ' + 'placed under the same parent directory as original_dir.') + parser.add_argument( + '--sample-rate', + type=int, + default=5, + help='Downsample FPS to `1 / sample_rate`. Default: 5.') + args = parser.parse_args() + return args + + +if __name__ == '__main__': + args = parse_args() + + h36m = PreprocessH36m( + metadata=args.metadata, + original_dir=args.original, + extracted_dir=args.extracted, + processed_dir=args.processed, + sample_rate=args.sample_rate) + h36m.extract_tgz() + h36m.generate_cameras_file() + h36m.generate_annotations() diff --git a/tools/dataset_converters/preprocess_mpi_inf_3dhp.py b/tools/dataset_converters/preprocess_mpi_inf_3dhp.py index 7f28302fc7..6dc129e058 100644 --- a/tools/dataset_converters/preprocess_mpi_inf_3dhp.py +++ b/tools/dataset_converters/preprocess_mpi_inf_3dhp.py @@ -1,359 +1,359 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import argparse -import os -import pickle -import shutil -from os.path import join - -import cv2 -import h5py -import mmcv -import numpy as np -from scipy.io import loadmat - -train_subjects = [i for i in range(1, 9)] -test_subjects = [i for i in range(1, 7)] -train_seqs = [1, 2] -train_cams = [0, 1, 2, 4, 5, 6, 7, 8] -train_frame_nums = { - (1, 1): 6416, - (1, 2): 12430, - (2, 1): 6502, - (2, 2): 6081, - (3, 1): 12488, - (3, 2): 12283, - (4, 1): 6171, - (4, 2): 6675, - (5, 1): 12820, - (5, 2): 12312, - (6, 1): 6188, - (6, 2): 6145, - (7, 1): 6239, - (7, 2): 6320, - (8, 1): 6468, - (8, 2): 6054 -} -test_frame_nums = {1: 6151, 2: 6080, 3: 5838, 4: 6007, 5: 320, 6: 492} -train_img_size = (2048, 2048) -root_index = 14 -joints_17 = [7, 5, 14, 15, 16, 9, 10, 11, 23, 24, 25, 18, 19, 20, 4, 3, 6] - - -def get_pose_stats(kps): - """Get statistic information `mean` and `std` of pose data. - - Args: - kps (ndarray): keypoints in shape [..., K, D] where K and D is - the keypoint category number and dimension. - Returns: - mean (ndarray): [K, D] - """ - assert kps.ndim > 2 - K, D = kps.shape[-2:] - kps = kps.reshape(-1, K, D) - mean = kps.mean(axis=0) - std = kps.std(axis=0) - return mean, std - - -def get_annotations(joints_2d, joints_3d, scale_factor=1.2): - """Get annotations, including centers, scales, joints_2d and joints_3d. - - Args: - joints_2d: 2D joint coordinates in shape [N, K, 2], where N is the - frame number, K is the joint number. - joints_3d: 3D joint coordinates in shape [N, K, 3], where N is the - frame number, K is the joint number. - scale_factor: Scale factor of bounding box. Default: 1.2. - Returns: - centers (ndarray): [N, 2] - scales (ndarray): [N,] - joints_2d (ndarray): [N, K, 3] - joints_3d (ndarray): [N, K, 4] - """ - # calculate joint visibility - visibility = (joints_2d[:, :, 0] >= 0) * \ - (joints_2d[:, :, 0] < train_img_size[0]) * \ - (joints_2d[:, :, 1] >= 0) * \ - (joints_2d[:, :, 1] < train_img_size[1]) - visibility = np.array(visibility, dtype=np.float32)[:, :, None] - joints_2d = np.concatenate([joints_2d, visibility], axis=-1) - joints_3d = np.concatenate([joints_3d, visibility], axis=-1) - - # calculate bounding boxes - bboxes = np.stack([ - np.min(joints_2d[:, :, 0], axis=1), - np.min(joints_2d[:, :, 1], axis=1), - np.max(joints_2d[:, :, 0], axis=1), - np.max(joints_2d[:, :, 1], axis=1) - ], - axis=1) - centers = np.stack([(bboxes[:, 0] + bboxes[:, 2]) / 2, - (bboxes[:, 1] + bboxes[:, 3]) / 2], - axis=1) - scales = scale_factor * np.max(bboxes[:, 2:] - bboxes[:, :2], axis=1) / 200 - - return centers, scales, joints_2d, joints_3d - - -def load_trainset(data_root, out_dir): - """Load training data, create annotation file and camera file. - Args: - data_root: Directory of dataset, which is organized in the following - hierarchy: - data_root - |-- train - |-- S1 - |-- Seq1 - |-- Seq2 - |-- S2 - |-- ... - |-- test - |-- TS1 - |-- TS2 - |-- ... - out_dir: Directory to save annotation file. - """ - _imgnames = [] - _centers = [] - _scales = [] - _joints_2d = [] - _joints_3d = [] - cameras = {} - - img_dir = join(out_dir, 'images') - os.makedirs(img_dir, exist_ok=True) - annot_dir = join(out_dir, 'annotations') - os.makedirs(annot_dir, exist_ok=True) - - for subj in train_subjects: - for seq in train_seqs: - seq_path = join(data_root, 'train', f'S{subj}', f'Seq{seq}') - num_frames = train_frame_nums[(subj, seq)] - - # load camera parametres - camera_file = join(seq_path, 'camera.calibration') - with open(camera_file, 'r') as fin: - lines = fin.readlines() - for cam in train_cams: - K = [float(s) for s in lines[cam * 7 + 5][11:-2].split()] - f = np.array([[K[0]], [K[5]]]) - c = np.array([[K[2]], [K[6]]]) - RT = np.array( - [float(s) for s in lines[cam * 7 + 6][11:-2].split()]) - RT = np.reshape(RT, (4, 4)) - R = RT[:3, :3] - # convert unit from millimeter to meter - T = RT[:3, 3:] * 0.001 - size = [int(s) for s in lines[cam * 7 + 3][14:].split()] - w, h = size - cam_param = dict( - R=R, T=T, c=c, f=f, w=w, h=h, name=f'train_cam_{cam}') - cameras[f'S{subj}_Seq{seq}_Cam{cam}'] = cam_param - - # load annotations - annot_file = os.path.join(seq_path, 'annot.mat') - annot2 = loadmat(annot_file)['annot2'] - annot3 = loadmat(annot_file)['annot3'] - for cam in train_cams: - # load 2D and 3D annotations - joints_2d = np.reshape(annot2[cam][0][:num_frames], - (num_frames, 28, 2))[:, joints_17] - joints_3d = np.reshape(annot3[cam][0][:num_frames], - (num_frames, 28, 3))[:, joints_17] - joints_3d = joints_3d * 0.001 - centers, scales, joints_2d, joints_3d = get_annotations( - joints_2d, joints_3d) - _centers.append(centers) - _scales.append(scales) - _joints_2d.append(joints_2d) - _joints_3d.append(joints_3d) - - # extract frames from video - video_path = join(seq_path, 'imageSequence', - f'video_{cam}.avi') - video = mmcv.VideoReader(video_path) - for i in mmcv.track_iter_progress(range(num_frames)): - img = video.read() - if img is None: - break - imgname = f'S{subj}_Seq{seq}_Cam{cam}_{i+1:06d}.jpg' - _imgnames.append(imgname) - cv2.imwrite(join(img_dir, imgname), img) - - _imgnames = np.array(_imgnames) - _centers = np.concatenate(_centers) - _scales = np.concatenate(_scales) - _joints_2d = np.concatenate(_joints_2d) - _joints_3d = np.concatenate(_joints_3d) - - out_file = join(annot_dir, 'mpi_inf_3dhp_train.npz') - np.savez( - out_file, - imgname=_imgnames, - center=_centers, - scale=_scales, - part=_joints_2d, - S=_joints_3d) - print(f'Create annotation file for trainset: {out_file}. ' - f'{len(_imgnames)} samples in total.') - - out_file = join(annot_dir, 'cameras_train.pkl') - with open(out_file, 'wb') as fout: - pickle.dump(cameras, fout) - print(f'Create camera file for trainset: {out_file}.') - - # get `mean` and `std` of pose data - _joints_3d = _joints_3d[..., :3] # remove visibility - mean_3d, std_3d = get_pose_stats(_joints_3d) - - _joints_2d = _joints_2d[..., :2] # remove visibility - mean_2d, std_2d = get_pose_stats(_joints_2d) - - # centered around root - _joints_3d_rel = _joints_3d - _joints_3d[..., root_index:root_index + 1, :] - mean_3d_rel, std_3d_rel = get_pose_stats(_joints_3d_rel) - mean_3d_rel[root_index] = mean_3d[root_index] - std_3d_rel[root_index] = std_3d[root_index] - - _joints_2d_rel = _joints_2d - _joints_2d[..., root_index:root_index + 1, :] - mean_2d_rel, std_2d_rel = get_pose_stats(_joints_2d_rel) - mean_2d_rel[root_index] = mean_2d[root_index] - std_2d_rel[root_index] = std_2d[root_index] - - stats = { - 'joint3d_stats': { - 'mean': mean_3d, - 'std': std_3d - }, - 'joint2d_stats': { - 'mean': mean_2d, - 'std': std_2d - }, - 'joint3d_rel_stats': { - 'mean': mean_3d_rel, - 'std': std_3d_rel - }, - 'joint2d_rel_stats': { - 'mean': mean_2d_rel, - 'std': std_2d_rel - } - } - for name, stat_dict in stats.items(): - out_file = join(annot_dir, f'{name}.pkl') - with open(out_file, 'wb') as f: - pickle.dump(stat_dict, f) - print(f'Create statistic data file: {out_file}') - - -def load_testset(data_root, out_dir, valid_only=True): - """Load testing data, create annotation file and camera file. - - Args: - data_root: Directory of dataset. - out_dir: Directory to save annotation file. - valid_only: Only keep frames with valid_label == 1. - """ - _imgnames = [] - _centers = [] - _scales = [] - _joints_2d = [] - _joints_3d = [] - cameras = {} - - img_dir = join(out_dir, 'images') - os.makedirs(img_dir, exist_ok=True) - annot_dir = join(out_dir, 'annotations') - os.makedirs(annot_dir, exist_ok=True) - - for subj in test_subjects: - subj_path = join(data_root, 'test', f'TS{subj}') - num_frames = test_frame_nums[subj] - - # load annotations - annot_file = os.path.join(subj_path, 'annot_data.mat') - with h5py.File(annot_file, 'r') as fin: - annot2 = np.array(fin['annot2']).reshape((-1, 17, 2)) - annot3 = np.array(fin['annot3']).reshape((-1, 17, 3)) - valid = np.array(fin['valid_frame']).reshape(-1) - - # manually estimate camera intrinsics - fx, cx = np.linalg.lstsq( - annot3[:, :, [0, 2]].reshape((-1, 2)), - (annot2[:, :, 0] * annot3[:, :, 2]).reshape(-1, 1), - rcond=None)[0].flatten() - fy, cy = np.linalg.lstsq( - annot3[:, :, [1, 2]].reshape((-1, 2)), - (annot2[:, :, 1] * annot3[:, :, 2]).reshape(-1, 1), - rcond=None)[0].flatten() - if subj <= 4: - w, h = 2048, 2048 - else: - w, h = 1920, 1080 - cameras[f'TS{subj}'] = dict( - c=np.array([[cx], [cy]]), - f=np.array([[fx], [fy]]), - w=w, - h=h, - name=f'test_cam_{subj}') - - # get annotations - if valid_only: - valid_frames = np.nonzero(valid)[0] - else: - valid_frames = np.arange(num_frames) - joints_2d = annot2[valid_frames, :, :] - joints_3d = annot3[valid_frames, :, :] * 0.001 - - centers, scales, joints_2d, joints_3d = get_annotations( - joints_2d, joints_3d) - _centers.append(centers) - _scales.append(scales) - _joints_2d.append(joints_2d) - _joints_3d.append(joints_3d) - - # copy and rename images - for i in valid_frames: - imgname = f'TS{subj}_{i+1:06d}.jpg' - shutil.copyfile( - join(subj_path, 'imageSequence', f'img_{i+1:06d}.jpg'), - join(img_dir, imgname)) - _imgnames.append(imgname) - - _imgnames = np.array(_imgnames) - _centers = np.concatenate(_centers) - _scales = np.concatenate(_scales) - _joints_2d = np.concatenate(_joints_2d) - _joints_3d = np.concatenate(_joints_3d) - - if valid_only: - out_file = join(annot_dir, 'mpi_inf_3dhp_test_valid.npz') - else: - out_file = join(annot_dir, 'mpi_inf_3dhp_test_all.npz') - np.savez( - out_file, - imgname=_imgnames, - center=_centers, - scale=_scales, - part=_joints_2d, - S=_joints_3d) - print(f'Create annotation file for testset: {out_file}. ' - f'{len(_imgnames)} samples in total.') - - out_file = join(annot_dir, 'cameras_test.pkl') - with open(out_file, 'wb') as fout: - pickle.dump(cameras, fout) - print(f'Create camera file for testset: {out_file}.') - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('data_root', type=str, help='data root') - parser.add_argument( - 'out_dir', type=str, help='directory to save annotation files.') - args = parser.parse_args() - data_root = args.data_root - out_dir = args.out_dir - - load_trainset(data_root, out_dir) - load_testset(data_root, out_dir, valid_only=True) +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os +import pickle +import shutil +from os.path import join + +import cv2 +import h5py +import mmcv +import numpy as np +from scipy.io import loadmat + +train_subjects = [i for i in range(1, 9)] +test_subjects = [i for i in range(1, 7)] +train_seqs = [1, 2] +train_cams = [0, 1, 2, 4, 5, 6, 7, 8] +train_frame_nums = { + (1, 1): 6416, + (1, 2): 12430, + (2, 1): 6502, + (2, 2): 6081, + (3, 1): 12488, + (3, 2): 12283, + (4, 1): 6171, + (4, 2): 6675, + (5, 1): 12820, + (5, 2): 12312, + (6, 1): 6188, + (6, 2): 6145, + (7, 1): 6239, + (7, 2): 6320, + (8, 1): 6468, + (8, 2): 6054 +} +test_frame_nums = {1: 6151, 2: 6080, 3: 5838, 4: 6007, 5: 320, 6: 492} +train_img_size = (2048, 2048) +root_index = 14 +joints_17 = [7, 5, 14, 15, 16, 9, 10, 11, 23, 24, 25, 18, 19, 20, 4, 3, 6] + + +def get_pose_stats(kps): + """Get statistic information `mean` and `std` of pose data. + + Args: + kps (ndarray): keypoints in shape [..., K, D] where K and D is + the keypoint category number and dimension. + Returns: + mean (ndarray): [K, D] + """ + assert kps.ndim > 2 + K, D = kps.shape[-2:] + kps = kps.reshape(-1, K, D) + mean = kps.mean(axis=0) + std = kps.std(axis=0) + return mean, std + + +def get_annotations(joints_2d, joints_3d, scale_factor=1.2): + """Get annotations, including centers, scales, joints_2d and joints_3d. + + Args: + joints_2d: 2D joint coordinates in shape [N, K, 2], where N is the + frame number, K is the joint number. + joints_3d: 3D joint coordinates in shape [N, K, 3], where N is the + frame number, K is the joint number. + scale_factor: Scale factor of bounding box. Default: 1.2. + Returns: + centers (ndarray): [N, 2] + scales (ndarray): [N,] + joints_2d (ndarray): [N, K, 3] + joints_3d (ndarray): [N, K, 4] + """ + # calculate joint visibility + visibility = (joints_2d[:, :, 0] >= 0) * \ + (joints_2d[:, :, 0] < train_img_size[0]) * \ + (joints_2d[:, :, 1] >= 0) * \ + (joints_2d[:, :, 1] < train_img_size[1]) + visibility = np.array(visibility, dtype=np.float32)[:, :, None] + joints_2d = np.concatenate([joints_2d, visibility], axis=-1) + joints_3d = np.concatenate([joints_3d, visibility], axis=-1) + + # calculate bounding boxes + bboxes = np.stack([ + np.min(joints_2d[:, :, 0], axis=1), + np.min(joints_2d[:, :, 1], axis=1), + np.max(joints_2d[:, :, 0], axis=1), + np.max(joints_2d[:, :, 1], axis=1) + ], + axis=1) + centers = np.stack([(bboxes[:, 0] + bboxes[:, 2]) / 2, + (bboxes[:, 1] + bboxes[:, 3]) / 2], + axis=1) + scales = scale_factor * np.max(bboxes[:, 2:] - bboxes[:, :2], axis=1) / 200 + + return centers, scales, joints_2d, joints_3d + + +def load_trainset(data_root, out_dir): + """Load training data, create annotation file and camera file. + Args: + data_root: Directory of dataset, which is organized in the following + hierarchy: + data_root + |-- train + |-- S1 + |-- Seq1 + |-- Seq2 + |-- S2 + |-- ... + |-- test + |-- TS1 + |-- TS2 + |-- ... + out_dir: Directory to save annotation file. + """ + _imgnames = [] + _centers = [] + _scales = [] + _joints_2d = [] + _joints_3d = [] + cameras = {} + + img_dir = join(out_dir, 'images') + os.makedirs(img_dir, exist_ok=True) + annot_dir = join(out_dir, 'annotations') + os.makedirs(annot_dir, exist_ok=True) + + for subj in train_subjects: + for seq in train_seqs: + seq_path = join(data_root, 'train', f'S{subj}', f'Seq{seq}') + num_frames = train_frame_nums[(subj, seq)] + + # load camera parametres + camera_file = join(seq_path, 'camera.calibration') + with open(camera_file, 'r') as fin: + lines = fin.readlines() + for cam in train_cams: + K = [float(s) for s in lines[cam * 7 + 5][11:-2].split()] + f = np.array([[K[0]], [K[5]]]) + c = np.array([[K[2]], [K[6]]]) + RT = np.array( + [float(s) for s in lines[cam * 7 + 6][11:-2].split()]) + RT = np.reshape(RT, (4, 4)) + R = RT[:3, :3] + # convert unit from millimeter to meter + T = RT[:3, 3:] * 0.001 + size = [int(s) for s in lines[cam * 7 + 3][14:].split()] + w, h = size + cam_param = dict( + R=R, T=T, c=c, f=f, w=w, h=h, name=f'train_cam_{cam}') + cameras[f'S{subj}_Seq{seq}_Cam{cam}'] = cam_param + + # load annotations + annot_file = os.path.join(seq_path, 'annot.mat') + annot2 = loadmat(annot_file)['annot2'] + annot3 = loadmat(annot_file)['annot3'] + for cam in train_cams: + # load 2D and 3D annotations + joints_2d = np.reshape(annot2[cam][0][:num_frames], + (num_frames, 28, 2))[:, joints_17] + joints_3d = np.reshape(annot3[cam][0][:num_frames], + (num_frames, 28, 3))[:, joints_17] + joints_3d = joints_3d * 0.001 + centers, scales, joints_2d, joints_3d = get_annotations( + joints_2d, joints_3d) + _centers.append(centers) + _scales.append(scales) + _joints_2d.append(joints_2d) + _joints_3d.append(joints_3d) + + # extract frames from video + video_path = join(seq_path, 'imageSequence', + f'video_{cam}.avi') + video = mmcv.VideoReader(video_path) + for i in mmcv.track_iter_progress(range(num_frames)): + img = video.read() + if img is None: + break + imgname = f'S{subj}_Seq{seq}_Cam{cam}_{i+1:06d}.jpg' + _imgnames.append(imgname) + cv2.imwrite(join(img_dir, imgname), img) + + _imgnames = np.array(_imgnames) + _centers = np.concatenate(_centers) + _scales = np.concatenate(_scales) + _joints_2d = np.concatenate(_joints_2d) + _joints_3d = np.concatenate(_joints_3d) + + out_file = join(annot_dir, 'mpi_inf_3dhp_train.npz') + np.savez( + out_file, + imgname=_imgnames, + center=_centers, + scale=_scales, + part=_joints_2d, + S=_joints_3d) + print(f'Create annotation file for trainset: {out_file}. ' + f'{len(_imgnames)} samples in total.') + + out_file = join(annot_dir, 'cameras_train.pkl') + with open(out_file, 'wb') as fout: + pickle.dump(cameras, fout) + print(f'Create camera file for trainset: {out_file}.') + + # get `mean` and `std` of pose data + _joints_3d = _joints_3d[..., :3] # remove visibility + mean_3d, std_3d = get_pose_stats(_joints_3d) + + _joints_2d = _joints_2d[..., :2] # remove visibility + mean_2d, std_2d = get_pose_stats(_joints_2d) + + # centered around root + _joints_3d_rel = _joints_3d - _joints_3d[..., root_index:root_index + 1, :] + mean_3d_rel, std_3d_rel = get_pose_stats(_joints_3d_rel) + mean_3d_rel[root_index] = mean_3d[root_index] + std_3d_rel[root_index] = std_3d[root_index] + + _joints_2d_rel = _joints_2d - _joints_2d[..., root_index:root_index + 1, :] + mean_2d_rel, std_2d_rel = get_pose_stats(_joints_2d_rel) + mean_2d_rel[root_index] = mean_2d[root_index] + std_2d_rel[root_index] = std_2d[root_index] + + stats = { + 'joint3d_stats': { + 'mean': mean_3d, + 'std': std_3d + }, + 'joint2d_stats': { + 'mean': mean_2d, + 'std': std_2d + }, + 'joint3d_rel_stats': { + 'mean': mean_3d_rel, + 'std': std_3d_rel + }, + 'joint2d_rel_stats': { + 'mean': mean_2d_rel, + 'std': std_2d_rel + } + } + for name, stat_dict in stats.items(): + out_file = join(annot_dir, f'{name}.pkl') + with open(out_file, 'wb') as f: + pickle.dump(stat_dict, f) + print(f'Create statistic data file: {out_file}') + + +def load_testset(data_root, out_dir, valid_only=True): + """Load testing data, create annotation file and camera file. + + Args: + data_root: Directory of dataset. + out_dir: Directory to save annotation file. + valid_only: Only keep frames with valid_label == 1. + """ + _imgnames = [] + _centers = [] + _scales = [] + _joints_2d = [] + _joints_3d = [] + cameras = {} + + img_dir = join(out_dir, 'images') + os.makedirs(img_dir, exist_ok=True) + annot_dir = join(out_dir, 'annotations') + os.makedirs(annot_dir, exist_ok=True) + + for subj in test_subjects: + subj_path = join(data_root, 'test', f'TS{subj}') + num_frames = test_frame_nums[subj] + + # load annotations + annot_file = os.path.join(subj_path, 'annot_data.mat') + with h5py.File(annot_file, 'r') as fin: + annot2 = np.array(fin['annot2']).reshape((-1, 17, 2)) + annot3 = np.array(fin['annot3']).reshape((-1, 17, 3)) + valid = np.array(fin['valid_frame']).reshape(-1) + + # manually estimate camera intrinsics + fx, cx = np.linalg.lstsq( + annot3[:, :, [0, 2]].reshape((-1, 2)), + (annot2[:, :, 0] * annot3[:, :, 2]).reshape(-1, 1), + rcond=None)[0].flatten() + fy, cy = np.linalg.lstsq( + annot3[:, :, [1, 2]].reshape((-1, 2)), + (annot2[:, :, 1] * annot3[:, :, 2]).reshape(-1, 1), + rcond=None)[0].flatten() + if subj <= 4: + w, h = 2048, 2048 + else: + w, h = 1920, 1080 + cameras[f'TS{subj}'] = dict( + c=np.array([[cx], [cy]]), + f=np.array([[fx], [fy]]), + w=w, + h=h, + name=f'test_cam_{subj}') + + # get annotations + if valid_only: + valid_frames = np.nonzero(valid)[0] + else: + valid_frames = np.arange(num_frames) + joints_2d = annot2[valid_frames, :, :] + joints_3d = annot3[valid_frames, :, :] * 0.001 + + centers, scales, joints_2d, joints_3d = get_annotations( + joints_2d, joints_3d) + _centers.append(centers) + _scales.append(scales) + _joints_2d.append(joints_2d) + _joints_3d.append(joints_3d) + + # copy and rename images + for i in valid_frames: + imgname = f'TS{subj}_{i+1:06d}.jpg' + shutil.copyfile( + join(subj_path, 'imageSequence', f'img_{i+1:06d}.jpg'), + join(img_dir, imgname)) + _imgnames.append(imgname) + + _imgnames = np.array(_imgnames) + _centers = np.concatenate(_centers) + _scales = np.concatenate(_scales) + _joints_2d = np.concatenate(_joints_2d) + _joints_3d = np.concatenate(_joints_3d) + + if valid_only: + out_file = join(annot_dir, 'mpi_inf_3dhp_test_valid.npz') + else: + out_file = join(annot_dir, 'mpi_inf_3dhp_test_all.npz') + np.savez( + out_file, + imgname=_imgnames, + center=_centers, + scale=_scales, + part=_joints_2d, + S=_joints_3d) + print(f'Create annotation file for testset: {out_file}. ' + f'{len(_imgnames)} samples in total.') + + out_file = join(annot_dir, 'cameras_test.pkl') + with open(out_file, 'wb') as fout: + pickle.dump(cameras, fout) + print(f'Create camera file for testset: {out_file}.') + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('data_root', type=str, help='data root') + parser.add_argument( + 'out_dir', type=str, help='directory to save annotation files.') + args = parser.parse_args() + data_root = args.data_root + out_dir = args.out_dir + + load_trainset(data_root, out_dir) + load_testset(data_root, out_dir, valid_only=True) diff --git a/tools/dataset_converters/scripts/preprocess_300w.sh b/tools/dataset_converters/scripts/preprocess_300w.sh index bf405b5cc7..df6c04da21 100644 --- a/tools/dataset_converters/scripts/preprocess_300w.sh +++ b/tools/dataset_converters/scripts/preprocess_300w.sh @@ -1,8 +1,8 @@ -#!/usr/bin/env bash - -DOWNLOAD_DIR=$1 -DATA_ROOT=$2 - -tar -zxvf $DOWNLOAD_DIR/300w/raw/300w.tar.gz.00 -C $DOWNLOAD_DIR/ -tar -xvf $DOWNLOAD_DIR/300w/300w.tar.00 -C $DATA_ROOT/ -rm -rf $DOWNLOAD_DIR/300w +#!/usr/bin/env bash + +DOWNLOAD_DIR=$1 +DATA_ROOT=$2 + +tar -zxvf $DOWNLOAD_DIR/300w/raw/300w.tar.gz.00 -C $DOWNLOAD_DIR/ +tar -xvf $DOWNLOAD_DIR/300w/300w.tar.00 -C $DATA_ROOT/ +rm -rf $DOWNLOAD_DIR/300w diff --git a/tools/dataset_converters/scripts/preprocess_aic.sh b/tools/dataset_converters/scripts/preprocess_aic.sh index 726a61ca26..5060a02506 100644 --- a/tools/dataset_converters/scripts/preprocess_aic.sh +++ b/tools/dataset_converters/scripts/preprocess_aic.sh @@ -1,7 +1,7 @@ -#!/usr/bin/env bash - -DOWNLOAD_DIR=$1 -DATA_ROOT=$2 - -tar -zxvf $DOWNLOAD_DIR/AI_Challenger/raw/AI_Challenger.tar.gz -C $DATA_ROOT -rm -rf $DOWNLOAD_DIR/AI_Challenger +#!/usr/bin/env bash + +DOWNLOAD_DIR=$1 +DATA_ROOT=$2 + +tar -zxvf $DOWNLOAD_DIR/AI_Challenger/raw/AI_Challenger.tar.gz -C $DATA_ROOT +rm -rf $DOWNLOAD_DIR/AI_Challenger diff --git a/tools/dataset_converters/scripts/preprocess_ap10k.sh b/tools/dataset_converters/scripts/preprocess_ap10k.sh index a4c330157b..717ed7c85f 100644 --- a/tools/dataset_converters/scripts/preprocess_ap10k.sh +++ b/tools/dataset_converters/scripts/preprocess_ap10k.sh @@ -1,8 +1,8 @@ -#!/usr/bin/env bash - -DOWNLOAD_DIR=$1 -DATA_ROOT=$2 - -tar -zxvf $DOWNLOAD_DIR/AP-10K/raw/AP-10K.tar.gz.00 -C $DOWNLOAD_DIR/ -tar -xvf $DOWNLOAD_DIR/AP-10K/AP-10K.tar.00 -C $DATA_ROOT/ -rm -rf $DOWNLOAD_DIR/AP-10K +#!/usr/bin/env bash + +DOWNLOAD_DIR=$1 +DATA_ROOT=$2 + +tar -zxvf $DOWNLOAD_DIR/AP-10K/raw/AP-10K.tar.gz.00 -C $DOWNLOAD_DIR/ +tar -xvf $DOWNLOAD_DIR/AP-10K/AP-10K.tar.00 -C $DATA_ROOT/ +rm -rf $DOWNLOAD_DIR/AP-10K diff --git a/tools/dataset_converters/scripts/preprocess_coco2017.sh b/tools/dataset_converters/scripts/preprocess_coco2017.sh index 853975e26b..c0ce9157a0 100644 --- a/tools/dataset_converters/scripts/preprocess_coco2017.sh +++ b/tools/dataset_converters/scripts/preprocess_coco2017.sh @@ -1,9 +1,9 @@ -#!/usr/bin/env bash - -DOWNLOAD_DIR=$1 -DATA_ROOT=$2 - -unzip $DOWNLOAD_DIR/COCO_2017/raw/Images/val2017.zip -d $DATA_ROOT -unzip $DOWNLOAD_DIR/COCO_2017/raw/Images/train2017.zip -d $DATA_ROOT -unzip $DOWNLOAD_DIR/COCO_2017/raw/Annotations/annotations_trainval2017.zip -d $DATA_ROOT -rm -rf $DOWNLOAD_DIR/COCO_2017 +#!/usr/bin/env bash + +DOWNLOAD_DIR=$1 +DATA_ROOT=$2 + +unzip $DOWNLOAD_DIR/COCO_2017/raw/Images/val2017.zip -d $DATA_ROOT +unzip $DOWNLOAD_DIR/COCO_2017/raw/Images/train2017.zip -d $DATA_ROOT +unzip $DOWNLOAD_DIR/COCO_2017/raw/Annotations/annotations_trainval2017.zip -d $DATA_ROOT +rm -rf $DOWNLOAD_DIR/COCO_2017 diff --git a/tools/dataset_converters/scripts/preprocess_crowdpose.sh b/tools/dataset_converters/scripts/preprocess_crowdpose.sh index 3215239585..b18447883f 100644 --- a/tools/dataset_converters/scripts/preprocess_crowdpose.sh +++ b/tools/dataset_converters/scripts/preprocess_crowdpose.sh @@ -1,7 +1,7 @@ -#!/usr/bin/env bash - -DOWNLOAD_DIR=$1 -DATA_ROOT=$2 - -tar -zxvf $DOWNLOAD_DIR/CrowdPose/raw/CrowdPose.tar.gz -C $DATA_ROOT -rm -rf $DOWNLOAD_DIR/CrowdPose +#!/usr/bin/env bash + +DOWNLOAD_DIR=$1 +DATA_ROOT=$2 + +tar -zxvf $DOWNLOAD_DIR/CrowdPose/raw/CrowdPose.tar.gz -C $DATA_ROOT +rm -rf $DOWNLOAD_DIR/CrowdPose diff --git a/tools/dataset_converters/scripts/preprocess_freihand.sh b/tools/dataset_converters/scripts/preprocess_freihand.sh index b3567cb5d7..b76fc1be64 100644 --- a/tools/dataset_converters/scripts/preprocess_freihand.sh +++ b/tools/dataset_converters/scripts/preprocess_freihand.sh @@ -1,7 +1,7 @@ -#!/usr/bin/env bash - -DOWNLOAD_DIR=$1 -DATA_ROOT=$2 - -tar -zxvf $DOWNLOAD_DIR/FreiHAND/raw/FreiHAND.tar.gz -C $DATA_ROOT -rm -rf $DOWNLOAD_DIR/FreiHAND +#!/usr/bin/env bash + +DOWNLOAD_DIR=$1 +DATA_ROOT=$2 + +tar -zxvf $DOWNLOAD_DIR/FreiHAND/raw/FreiHAND.tar.gz -C $DATA_ROOT +rm -rf $DOWNLOAD_DIR/FreiHAND diff --git a/tools/dataset_converters/scripts/preprocess_hagrid.sh b/tools/dataset_converters/scripts/preprocess_hagrid.sh index de2356541c..ddddd95b9f 100644 --- a/tools/dataset_converters/scripts/preprocess_hagrid.sh +++ b/tools/dataset_converters/scripts/preprocess_hagrid.sh @@ -1,8 +1,8 @@ -#!/usr/bin/env bash - -DOWNLOAD_DIR=$1 -DATA_ROOT=$2 - -cat $DOWNLOAD_DIR/HaGRID/raw/*.tar.gz.* | tar -xvz -C $DATA_ROOT/.. -tar -xvf $DATA_ROOT/HaGRID.tar -C $DATA_ROOT/.. -rm -rf $DOWNLOAD_DIR/HaGRID +#!/usr/bin/env bash + +DOWNLOAD_DIR=$1 +DATA_ROOT=$2 + +cat $DOWNLOAD_DIR/HaGRID/raw/*.tar.gz.* | tar -xvz -C $DATA_ROOT/.. +tar -xvf $DATA_ROOT/HaGRID.tar -C $DATA_ROOT/.. +rm -rf $DOWNLOAD_DIR/HaGRID diff --git a/tools/dataset_converters/scripts/preprocess_halpe.sh b/tools/dataset_converters/scripts/preprocess_halpe.sh index 103d6202f9..c4738e543b 100644 --- a/tools/dataset_converters/scripts/preprocess_halpe.sh +++ b/tools/dataset_converters/scripts/preprocess_halpe.sh @@ -1,8 +1,8 @@ -#!/usr/bin/env bash - -DOWNLOAD_DIR=$1 -DATA_ROOT=$2 - -tar -zxvf $DOWNLOAD_DIR/Halpe/raw/Halpe.tar.gz.00 -C $DOWNLOAD_DIR/ -tar -xvf $DOWNLOAD_DIR/Halpe/Halpe.tar.00 -C $DATA_ROOT/ -rm -rf $DOWNLOAD_DIR/Halpe +#!/usr/bin/env bash + +DOWNLOAD_DIR=$1 +DATA_ROOT=$2 + +tar -zxvf $DOWNLOAD_DIR/Halpe/raw/Halpe.tar.gz.00 -C $DOWNLOAD_DIR/ +tar -xvf $DOWNLOAD_DIR/Halpe/Halpe.tar.00 -C $DATA_ROOT/ +rm -rf $DOWNLOAD_DIR/Halpe diff --git a/tools/dataset_converters/scripts/preprocess_lapa.sh b/tools/dataset_converters/scripts/preprocess_lapa.sh index 977442c1b8..b09c1b11fb 100644 --- a/tools/dataset_converters/scripts/preprocess_lapa.sh +++ b/tools/dataset_converters/scripts/preprocess_lapa.sh @@ -1,7 +1,7 @@ -#!/usr/bin/env bash - -DOWNLOAD_DIR=$1 -DATA_ROOT=$2 - -tar -zxvf $DOWNLOAD_DIR/LaPa/raw/LaPa.tar.gz -C $DATA_ROOT -rm -rf $DOWNLOAD_DIR/LaPa +#!/usr/bin/env bash + +DOWNLOAD_DIR=$1 +DATA_ROOT=$2 + +tar -zxvf $DOWNLOAD_DIR/LaPa/raw/LaPa.tar.gz -C $DATA_ROOT +rm -rf $DOWNLOAD_DIR/LaPa diff --git a/tools/dataset_converters/scripts/preprocess_mpii.sh b/tools/dataset_converters/scripts/preprocess_mpii.sh index 287b431897..b2ac1c3699 100644 --- a/tools/dataset_converters/scripts/preprocess_mpii.sh +++ b/tools/dataset_converters/scripts/preprocess_mpii.sh @@ -1,7 +1,7 @@ -#!/usr/bin/env bash - -DOWNLOAD_DIR=$1 -DATA_ROOT=$2 - -tar -zxvf $DOWNLOAD_DIR/MPII_Human_Pose/raw/MPII_Human_Pose.tar.gz -C $DATA_ROOT -rm -rf $DOWNLOAD_DIR/MPII_Human_Pose +#!/usr/bin/env bash + +DOWNLOAD_DIR=$1 +DATA_ROOT=$2 + +tar -zxvf $DOWNLOAD_DIR/MPII_Human_Pose/raw/MPII_Human_Pose.tar.gz -C $DATA_ROOT +rm -rf $DOWNLOAD_DIR/MPII_Human_Pose diff --git a/tools/dataset_converters/scripts/preprocess_onehand10k.sh b/tools/dataset_converters/scripts/preprocess_onehand10k.sh index 47f6e8942c..a96fc271ef 100644 --- a/tools/dataset_converters/scripts/preprocess_onehand10k.sh +++ b/tools/dataset_converters/scripts/preprocess_onehand10k.sh @@ -1,8 +1,8 @@ -#!/usr/bin/env bash - -DOWNLOAD_DIR=$1 -DATA_ROOT=$2 - -tar -zxvf $DOWNLOAD_DIR/OneHand10K/raw/OneHand10K.tar.gz.00 -C $DOWNLOAD_DIR/ -tar -xvf $DOWNLOAD_DIR/OneHand10K/OneHand10K.tar.00 -C $DATA_ROOT/ -rm -rf $DOWNLOAD_DIR/OneHand10K +#!/usr/bin/env bash + +DOWNLOAD_DIR=$1 +DATA_ROOT=$2 + +tar -zxvf $DOWNLOAD_DIR/OneHand10K/raw/OneHand10K.tar.gz.00 -C $DOWNLOAD_DIR/ +tar -xvf $DOWNLOAD_DIR/OneHand10K/OneHand10K.tar.00 -C $DATA_ROOT/ +rm -rf $DOWNLOAD_DIR/OneHand10K diff --git a/tools/dataset_converters/scripts/preprocess_wflw.sh b/tools/dataset_converters/scripts/preprocess_wflw.sh index 723d1d158e..cec3dc986a 100644 --- a/tools/dataset_converters/scripts/preprocess_wflw.sh +++ b/tools/dataset_converters/scripts/preprocess_wflw.sh @@ -1,8 +1,8 @@ -#!/usr/bin/env bash - -DOWNLOAD_DIR=$1 -DATA_ROOT=$2 - -tar -zxvf $DOWNLOAD_DIR/WFLW/raw/WFLW.tar.gz.00 -C $DOWNLOAD_DIR/ -tar -xvf $DOWNLOAD_DIR/WFLW/WFLW.tar.00 -C $DATA_ROOT/ -rm -rf $DOWNLOAD_DIR/WFLW +#!/usr/bin/env bash + +DOWNLOAD_DIR=$1 +DATA_ROOT=$2 + +tar -zxvf $DOWNLOAD_DIR/WFLW/raw/WFLW.tar.gz.00 -C $DOWNLOAD_DIR/ +tar -xvf $DOWNLOAD_DIR/WFLW/WFLW.tar.00 -C $DATA_ROOT/ +rm -rf $DOWNLOAD_DIR/WFLW diff --git a/tools/dist_test.sh b/tools/dist_test.sh index 5fb276b3d4..8ec34ba528 100644 --- a/tools/dist_test.sh +++ b/tools/dist_test.sh @@ -1,23 +1,23 @@ -#!/usr/bin/env bash -# Copyright (c) OpenMMLab. All rights reserved. - -CONFIG=$1 -CHECKPOINT=$2 -GPUS=$3 -NNODES=${NNODES:-1} -NODE_RANK=${NODE_RANK:-0} -PORT=${PORT:-29500} -MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"} - -PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ -python -m torch.distributed.launch \ - --nnodes=$NNODES \ - --node_rank=$NODE_RANK \ - --master_addr=$MASTER_ADDR \ - --nproc_per_node=$GPUS \ - --master_port=$PORT \ - $(dirname "$0")/test.py \ - $CONFIG \ - $CHECKPOINT \ - --launcher pytorch \ - ${@:4} +#!/usr/bin/env bash +# Copyright (c) OpenMMLab. All rights reserved. + +CONFIG=$1 +CHECKPOINT=$2 +GPUS=$3 +NNODES=${NNODES:-1} +NODE_RANK=${NODE_RANK:-0} +PORT=${PORT:-29500} +MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +python -m torch.distributed.launch \ + --nnodes=$NNODES \ + --node_rank=$NODE_RANK \ + --master_addr=$MASTER_ADDR \ + --nproc_per_node=$GPUS \ + --master_port=$PORT \ + $(dirname "$0")/test.py \ + $CONFIG \ + $CHECKPOINT \ + --launcher pytorch \ + ${@:4} diff --git a/tools/dist_train.sh b/tools/dist_train.sh index 2299ad510a..519735e7b9 100755 --- a/tools/dist_train.sh +++ b/tools/dist_train.sh @@ -1,20 +1,20 @@ -#!/usr/bin/env bash -# Copyright (c) OpenMMLab. All rights reserved. - -CONFIG=$1 -GPUS=$2 -NNODES=${NNODES:-1} -NODE_RANK=${NODE_RANK:-0} -PORT=${PORT:-29500} -MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"} - -PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ -python -m torch.distributed.launch \ - --nnodes=$NNODES \ - --node_rank=$NODE_RANK \ - --master_addr=$MASTER_ADDR \ - --nproc_per_node=$GPUS \ - --master_port=$PORT \ - $(dirname "$0")/train.py \ - $CONFIG \ - --launcher pytorch ${@:3} +#!/usr/bin/env bash +# Copyright (c) OpenMMLab. All rights reserved. + +CONFIG=$1 +GPUS=$2 +NNODES=${NNODES:-1} +NODE_RANK=${NODE_RANK:-0} +PORT=${PORT:-29500} +MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +python -m torch.distributed.launch \ + --nnodes=$NNODES \ + --node_rank=$NODE_RANK \ + --master_addr=$MASTER_ADDR \ + --nproc_per_node=$GPUS \ + --master_port=$PORT \ + $(dirname "$0")/train.py \ + $CONFIG \ + --launcher pytorch ${@:3} diff --git a/tools/misc/browse_dataset.py b/tools/misc/browse_dataset.py index 5a914476ee..0368aa5de3 100644 --- a/tools/misc/browse_dataset.py +++ b/tools/misc/browse_dataset.py @@ -1,166 +1,166 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import argparse -import os -import os.path as osp - -import mmcv -import mmengine -import mmengine.fileio as fileio -import numpy as np -from mmengine import Config, DictAction -from mmengine.registry import build_from_cfg, init_default_scope -from mmengine.structures import InstanceData - -from mmpose.registry import DATASETS, VISUALIZERS -from mmpose.structures import PoseDataSample - - -def parse_args(): - parser = argparse.ArgumentParser(description='Browse a dataset') - parser.add_argument('config', help='train config file path') - parser.add_argument( - '--output-dir', - default=None, - type=str, - help='If there is no display interface, you can save it.') - parser.add_argument('--not-show', default=False, action='store_true') - parser.add_argument( - '--phase', - default='train', - type=str, - choices=['train', 'test', 'val'], - help='phase of dataset to visualize, accept "train" "test" and "val".' - ' Defaults to "train".') - parser.add_argument( - '--show-interval', - type=float, - default=2, - help='the interval of show (s)') - parser.add_argument( - '--mode', - default='transformed', - type=str, - choices=['original', 'transformed'], - help='display mode; display original pictures or transformed ' - 'pictures. "original" means to show images load from disk' - '; "transformed" means to show images after transformed;' - 'Defaults to "transformed".') - parser.add_argument( - '--cfg-options', - nargs='+', - action=DictAction, - help='override some settings in the used config, the key-value pair ' - 'in xxx=yyy format will be merged into config file. If the value to ' - 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' - 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' - 'Note that the quotation marks are necessary and that no white space ' - 'is allowed.') - args = parser.parse_args() - return args - - -def generate_dup_file_name(out_file): - """Automatically rename out_file when duplicated file exists. - - This case occurs when there is multiple instances on one image. - """ - if out_file and osp.exists(out_file): - img_name, postfix = osp.basename(out_file).rsplit('.', 1) - exist_files = tuple( - filter(lambda f: f.startswith(img_name), - os.listdir(osp.dirname(out_file)))) - if len(exist_files) > 0: - img_path = f'{img_name}({len(exist_files)}).{postfix}' - out_file = osp.join(osp.dirname(out_file), img_path) - return out_file - - -def main(): - args = parse_args() - cfg = Config.fromfile(args.config) - if args.cfg_options is not None: - cfg.merge_from_dict(args.cfg_options) - backend_args = cfg.get('backend_args', dict(backend='local')) - - # register all modules in mmpose into the registries - scope = cfg.get('default_scope', 'mmpose') - if scope is not None: - init_default_scope(scope) - - if args.mode == 'original': - cfg[f'{args.phase}_dataloader'].dataset.pipeline = [] - else: - # pack transformed keypoints for visualization - cfg[f'{args.phase}_dataloader'].dataset.pipeline[ - -1].pack_transformed = True - - dataset = build_from_cfg(cfg[f'{args.phase}_dataloader'].dataset, DATASETS) - - visualizer = VISUALIZERS.build(cfg.visualizer) - visualizer.set_dataset_meta(dataset.metainfo) - - progress_bar = mmengine.ProgressBar(len(dataset)) - - idx = 0 - item = dataset[0] - - while idx < len(dataset): - idx += 1 - next_item = None if idx >= len(dataset) else dataset[idx] - - if args.mode == 'original': - if next_item is not None and item['img_path'] == next_item[ - 'img_path']: - # merge annotations for one image - item['keypoints'] = np.concatenate( - (item['keypoints'], next_item['keypoints'])) - item['keypoints_visible'] = np.concatenate( - (item['keypoints_visible'], - next_item['keypoints_visible'])) - item['bbox'] = np.concatenate( - (item['bbox'], next_item['bbox'])) - progress_bar.update() - continue - else: - img_path = item['img_path'] - img_bytes = fileio.get(img_path, backend_args=backend_args) - img = mmcv.imfrombytes(img_bytes, channel_order='bgr') - - # forge pseudo data_sample - gt_instances = InstanceData() - gt_instances.keypoints = item['keypoints'] - gt_instances.keypoints_visible = item['keypoints_visible'] - gt_instances.bboxes = item['bbox'] - data_sample = PoseDataSample() - data_sample.gt_instances = gt_instances - - item = next_item - else: - img = item['inputs'].permute(1, 2, 0).numpy() - data_sample = item['data_samples'] - img_path = data_sample.img_path - item = next_item - - out_file = osp.join( - args.output_dir, - osp.basename(img_path)) if args.output_dir is not None else None - out_file = generate_dup_file_name(out_file) - - img = mmcv.bgr2rgb(img) - - visualizer.add_datasample( - osp.basename(img_path), - img, - data_sample, - draw_pred=False, - draw_bbox=(args.mode == 'original'), - draw_heatmap=True, - show=not args.not_show, - wait_time=args.show_interval, - out_file=out_file) - - progress_bar.update() - - -if __name__ == '__main__': - main() +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os +import os.path as osp + +import mmcv +import mmengine +import mmengine.fileio as fileio +import numpy as np +from mmengine import Config, DictAction +from mmengine.registry import build_from_cfg, init_default_scope +from mmengine.structures import InstanceData + +from mmpose.registry import DATASETS, VISUALIZERS +from mmpose.structures import PoseDataSample + + +def parse_args(): + parser = argparse.ArgumentParser(description='Browse a dataset') + parser.add_argument('config', help='train config file path') + parser.add_argument( + '--output-dir', + default=None, + type=str, + help='If there is no display interface, you can save it.') + parser.add_argument('--not-show', default=False, action='store_true') + parser.add_argument( + '--phase', + default='train', + type=str, + choices=['train', 'test', 'val'], + help='phase of dataset to visualize, accept "train" "test" and "val".' + ' Defaults to "train".') + parser.add_argument( + '--show-interval', + type=float, + default=2, + help='the interval of show (s)') + parser.add_argument( + '--mode', + default='transformed', + type=str, + choices=['original', 'transformed'], + help='display mode; display original pictures or transformed ' + 'pictures. "original" means to show images load from disk' + '; "transformed" means to show images after transformed;' + 'Defaults to "transformed".') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + args = parser.parse_args() + return args + + +def generate_dup_file_name(out_file): + """Automatically rename out_file when duplicated file exists. + + This case occurs when there is multiple instances on one image. + """ + if out_file and osp.exists(out_file): + img_name, postfix = osp.basename(out_file).rsplit('.', 1) + exist_files = tuple( + filter(lambda f: f.startswith(img_name), + os.listdir(osp.dirname(out_file)))) + if len(exist_files) > 0: + img_path = f'{img_name}({len(exist_files)}).{postfix}' + out_file = osp.join(osp.dirname(out_file), img_path) + return out_file + + +def main(): + args = parse_args() + cfg = Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + backend_args = cfg.get('backend_args', dict(backend='local')) + + # register all modules in mmpose into the registries + scope = cfg.get('default_scope', 'mmpose') + if scope is not None: + init_default_scope(scope) + + if args.mode == 'original': + cfg[f'{args.phase}_dataloader'].dataset.pipeline = [] + else: + # pack transformed keypoints for visualization + cfg[f'{args.phase}_dataloader'].dataset.pipeline[ + -1].pack_transformed = True + + dataset = build_from_cfg(cfg[f'{args.phase}_dataloader'].dataset, DATASETS) + + visualizer = VISUALIZERS.build(cfg.visualizer) + visualizer.set_dataset_meta(dataset.metainfo) + + progress_bar = mmengine.ProgressBar(len(dataset)) + + idx = 0 + item = dataset[0] + + while idx < len(dataset): + idx += 1 + next_item = None if idx >= len(dataset) else dataset[idx] + + if args.mode == 'original': + if next_item is not None and item['img_path'] == next_item[ + 'img_path']: + # merge annotations for one image + item['keypoints'] = np.concatenate( + (item['keypoints'], next_item['keypoints'])) + item['keypoints_visible'] = np.concatenate( + (item['keypoints_visible'], + next_item['keypoints_visible'])) + item['bbox'] = np.concatenate( + (item['bbox'], next_item['bbox'])) + progress_bar.update() + continue + else: + img_path = item['img_path'] + img_bytes = fileio.get(img_path, backend_args=backend_args) + img = mmcv.imfrombytes(img_bytes, channel_order='bgr') + + # forge pseudo data_sample + gt_instances = InstanceData() + gt_instances.keypoints = item['keypoints'] + gt_instances.keypoints_visible = item['keypoints_visible'] + gt_instances.bboxes = item['bbox'] + data_sample = PoseDataSample() + data_sample.gt_instances = gt_instances + + item = next_item + else: + img = item['inputs'].permute(1, 2, 0).numpy() + data_sample = item['data_samples'] + img_path = data_sample.img_path + item = next_item + + out_file = osp.join( + args.output_dir, + osp.basename(img_path)) if args.output_dir is not None else None + out_file = generate_dup_file_name(out_file) + + img = mmcv.bgr2rgb(img) + + visualizer.add_datasample( + osp.basename(img_path), + img, + data_sample, + draw_pred=False, + draw_bbox=(args.mode == 'original'), + draw_heatmap=True, + show=not args.not_show, + wait_time=args.show_interval, + out_file=out_file) + + progress_bar.update() + + +if __name__ == '__main__': + main() diff --git a/tools/misc/keypoints2coco_without_mmdet.py b/tools/misc/keypoints2coco_without_mmdet.py index 63220fcb19..e70abd52e9 100644 --- a/tools/misc/keypoints2coco_without_mmdet.py +++ b/tools/misc/keypoints2coco_without_mmdet.py @@ -1,146 +1,146 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import json -import os -from argparse import ArgumentParser - -from mmcv import track_iter_progress -from PIL import Image -from xtcocotools.coco import COCO - -from mmpose.apis import inference_top_down_pose_model, init_pose_model - - -def main(): - """Visualize the demo images. - - pose_keypoints require the json_file containing boxes. - """ - parser = ArgumentParser() - parser.add_argument('pose_config', help='Config file for detection') - parser.add_argument('pose_checkpoint', help='Checkpoint file') - parser.add_argument('--img-root', type=str, default='', help='Image root') - parser.add_argument( - '--json-file', - type=str, - default='', - help='Json file containing image person bboxes in COCO format.') - parser.add_argument( - '--out-json-file', - type=str, - default='', - help='Output json contains pseudolabeled annotation') - parser.add_argument( - '--show', - action='store_true', - default=False, - help='whether to show img') - parser.add_argument( - '--device', default='cuda:0', help='Device used for inference') - parser.add_argument( - '--kpt-thr', type=float, default=0.3, help='Keypoint score threshold') - - args = parser.parse_args() - - coco = COCO(args.json_file) - # build the pose model from a config file and a checkpoint file - pose_model = init_pose_model( - args.pose_config, args.pose_checkpoint, device=args.device.lower()) - - dataset = pose_model.cfg.data['test']['type'] - - img_keys = list(coco.imgs.keys()) - - # optional - return_heatmap = False - - # e.g. use ('backbone', ) to return backbone feature - output_layer_names = None - - categories = [{'id': 1, 'name': 'person'}] - img_anno_dict = {'images': [], 'annotations': [], 'categories': categories} - - # process each image - ann_uniq_id = int(0) - for i in track_iter_progress(range(len(img_keys))): - # get bounding box annotations - image_id = img_keys[i] - image = coco.loadImgs(image_id)[0] - image_name = os.path.join(args.img_root, image['file_name']) - - width, height = Image.open(image_name).size - ann_ids = coco.getAnnIds(image_id) - - # make person bounding boxes - person_results = [] - for ann_id in ann_ids: - person = {} - ann = coco.anns[ann_id] - # bbox format is 'xywh' - person['bbox'] = ann['bbox'] - person_results.append(person) - - pose_results, returned_outputs = inference_top_down_pose_model( - pose_model, - image_name, - person_results, - bbox_thr=None, - format='xywh', - dataset=dataset, - return_heatmap=return_heatmap, - outputs=output_layer_names) - - # add output of model and bboxes to dict - for indx, i in enumerate(pose_results): - pose_results[indx]['keypoints'][ - pose_results[indx]['keypoints'][:, 2] < args.kpt_thr, :3] = 0 - pose_results[indx]['keypoints'][ - pose_results[indx]['keypoints'][:, 2] >= args.kpt_thr, 2] = 2 - x = int(pose_results[indx]['bbox'][0]) - y = int(pose_results[indx]['bbox'][1]) - w = int(pose_results[indx]['bbox'][2] - - pose_results[indx]['bbox'][0]) - h = int(pose_results[indx]['bbox'][3] - - pose_results[indx]['bbox'][1]) - bbox = [x, y, w, h] - area = round((w * h), 0) - - images = { - 'file_name': image_name.split('/')[-1], - 'height': height, - 'width': width, - 'id': int(image_id) - } - - annotations = { - 'keypoints': [ - int(i) for i in pose_results[indx]['keypoints'].reshape( - -1).tolist() - ], - 'num_keypoints': - len(pose_results[indx]['keypoints']), - 'area': - area, - 'iscrowd': - 0, - 'image_id': - int(image_id), - 'bbox': - bbox, - 'category_id': - 1, - 'id': - ann_uniq_id, - } - - img_anno_dict['annotations'].append(annotations) - ann_uniq_id += 1 - - img_anno_dict['images'].append(images) - - # create json - with open(args.out_json_file, 'w') as outfile: - json.dump(img_anno_dict, outfile, indent=2) - - -if __name__ == '__main__': - main() +# Copyright (c) OpenMMLab. All rights reserved. +import json +import os +from argparse import ArgumentParser + +from mmcv import track_iter_progress +from PIL import Image +from xtcocotools.coco import COCO + +from mmpose.apis import inference_top_down_pose_model, init_pose_model + + +def main(): + """Visualize the demo images. + + pose_keypoints require the json_file containing boxes. + """ + parser = ArgumentParser() + parser.add_argument('pose_config', help='Config file for detection') + parser.add_argument('pose_checkpoint', help='Checkpoint file') + parser.add_argument('--img-root', type=str, default='', help='Image root') + parser.add_argument( + '--json-file', + type=str, + default='', + help='Json file containing image person bboxes in COCO format.') + parser.add_argument( + '--out-json-file', + type=str, + default='', + help='Output json contains pseudolabeled annotation') + parser.add_argument( + '--show', + action='store_true', + default=False, + help='whether to show img') + parser.add_argument( + '--device', default='cuda:0', help='Device used for inference') + parser.add_argument( + '--kpt-thr', type=float, default=0.3, help='Keypoint score threshold') + + args = parser.parse_args() + + coco = COCO(args.json_file) + # build the pose model from a config file and a checkpoint file + pose_model = init_pose_model( + args.pose_config, args.pose_checkpoint, device=args.device.lower()) + + dataset = pose_model.cfg.data['test']['type'] + + img_keys = list(coco.imgs.keys()) + + # optional + return_heatmap = False + + # e.g. use ('backbone', ) to return backbone feature + output_layer_names = None + + categories = [{'id': 1, 'name': 'person'}] + img_anno_dict = {'images': [], 'annotations': [], 'categories': categories} + + # process each image + ann_uniq_id = int(0) + for i in track_iter_progress(range(len(img_keys))): + # get bounding box annotations + image_id = img_keys[i] + image = coco.loadImgs(image_id)[0] + image_name = os.path.join(args.img_root, image['file_name']) + + width, height = Image.open(image_name).size + ann_ids = coco.getAnnIds(image_id) + + # make person bounding boxes + person_results = [] + for ann_id in ann_ids: + person = {} + ann = coco.anns[ann_id] + # bbox format is 'xywh' + person['bbox'] = ann['bbox'] + person_results.append(person) + + pose_results, returned_outputs = inference_top_down_pose_model( + pose_model, + image_name, + person_results, + bbox_thr=None, + format='xywh', + dataset=dataset, + return_heatmap=return_heatmap, + outputs=output_layer_names) + + # add output of model and bboxes to dict + for indx, i in enumerate(pose_results): + pose_results[indx]['keypoints'][ + pose_results[indx]['keypoints'][:, 2] < args.kpt_thr, :3] = 0 + pose_results[indx]['keypoints'][ + pose_results[indx]['keypoints'][:, 2] >= args.kpt_thr, 2] = 2 + x = int(pose_results[indx]['bbox'][0]) + y = int(pose_results[indx]['bbox'][1]) + w = int(pose_results[indx]['bbox'][2] - + pose_results[indx]['bbox'][0]) + h = int(pose_results[indx]['bbox'][3] - + pose_results[indx]['bbox'][1]) + bbox = [x, y, w, h] + area = round((w * h), 0) + + images = { + 'file_name': image_name.split('/')[-1], + 'height': height, + 'width': width, + 'id': int(image_id) + } + + annotations = { + 'keypoints': [ + int(i) for i in pose_results[indx]['keypoints'].reshape( + -1).tolist() + ], + 'num_keypoints': + len(pose_results[indx]['keypoints']), + 'area': + area, + 'iscrowd': + 0, + 'image_id': + int(image_id), + 'bbox': + bbox, + 'category_id': + 1, + 'id': + ann_uniq_id, + } + + img_anno_dict['annotations'].append(annotations) + ann_uniq_id += 1 + + img_anno_dict['images'].append(images) + + # create json + with open(args.out_json_file, 'w') as outfile: + json.dump(img_anno_dict, outfile, indent=2) + + +if __name__ == '__main__': + main() diff --git a/tools/misc/publish_model.py b/tools/misc/publish_model.py index addf4cca64..ebf92455f7 100644 --- a/tools/misc/publish_model.py +++ b/tools/misc/publish_model.py @@ -1,65 +1,65 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import argparse -import subprocess -from datetime import date - -import torch -from mmengine.logging import print_log -from mmengine.utils import digit_version -from mmengine.utils.dl_utils import TORCH_VERSION - - -def parse_args(): - parser = argparse.ArgumentParser( - description='Process a checkpoint to be published') - parser.add_argument('in_file', help='input checkpoint filename') - parser.add_argument('out_file', help='output checkpoint filename') - parser.add_argument( - '--save-keys', - nargs='+', - type=str, - default=['meta', 'state_dict'], - help='keys to save in published checkpoint (default: meta state_dict)') - args = parser.parse_args() - return args - - -def process_checkpoint(in_file, out_file, save_keys=['meta', 'state_dict']): - checkpoint = torch.load(in_file, map_location='cpu') - - # only keep `meta` and `state_dict` for smaller file size - ckpt_keys = list(checkpoint.keys()) - for k in ckpt_keys: - if k not in save_keys: - print_log( - f'Key `{k}` will be removed because it is not in ' - f'save_keys. If you want to keep it, ' - f'please set --save-keys.', - logger='current') - checkpoint.pop(k, None) - - # if it is necessary to remove some sensitive data in checkpoint['meta'], - # add the code here. - - if digit_version(TORCH_VERSION) >= digit_version('1.8.0'): - torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False) - else: - torch.save(checkpoint, out_file) - sha = subprocess.check_output(['sha256sum', out_file]).decode() - if out_file.endswith('.pth'): - out_file_name = out_file[:-4] - else: - out_file_name = out_file - - date_now = date.today().strftime('%Y%m%d') - final_file = out_file_name + f'-{sha[:8]}_{date_now}.pth' - subprocess.Popen(['mv', out_file, final_file]) - - -def main(): - args = parse_args() - process_checkpoint(args.in_file, args.out_file, args.save_keys) - - -if __name__ == '__main__': - main() +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import subprocess +from datetime import date + +import torch +from mmengine.logging import print_log +from mmengine.utils import digit_version +from mmengine.utils.dl_utils import TORCH_VERSION + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Process a checkpoint to be published') + parser.add_argument('in_file', help='input checkpoint filename') + parser.add_argument('out_file', help='output checkpoint filename') + parser.add_argument( + '--save-keys', + nargs='+', + type=str, + default=['meta', 'state_dict'], + help='keys to save in published checkpoint (default: meta state_dict)') + args = parser.parse_args() + return args + + +def process_checkpoint(in_file, out_file, save_keys=['meta', 'state_dict']): + checkpoint = torch.load(in_file, map_location='cpu') + + # only keep `meta` and `state_dict` for smaller file size + ckpt_keys = list(checkpoint.keys()) + for k in ckpt_keys: + if k not in save_keys: + print_log( + f'Key `{k}` will be removed because it is not in ' + f'save_keys. If you want to keep it, ' + f'please set --save-keys.', + logger='current') + checkpoint.pop(k, None) + + # if it is necessary to remove some sensitive data in checkpoint['meta'], + # add the code here. + + if digit_version(TORCH_VERSION) >= digit_version('1.8.0'): + torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False) + else: + torch.save(checkpoint, out_file) + sha = subprocess.check_output(['sha256sum', out_file]).decode() + if out_file.endswith('.pth'): + out_file_name = out_file[:-4] + else: + out_file_name = out_file + + date_now = date.today().strftime('%Y%m%d') + final_file = out_file_name + f'-{sha[:8]}_{date_now}.pth' + subprocess.Popen(['mv', out_file, final_file]) + + +def main(): + args = parse_args() + process_checkpoint(args.in_file, args.out_file, args.save_keys) + + +if __name__ == '__main__': + main() diff --git a/tools/slurm_test.sh b/tools/slurm_test.sh index c528dc9d45..f0b564b200 100644 --- a/tools/slurm_test.sh +++ b/tools/slurm_test.sh @@ -1,25 +1,25 @@ -#!/usr/bin/env bash -# Copyright (c) OpenMMLab. All rights reserved. - -set -x - -PARTITION=$1 -JOB_NAME=$2 -CONFIG=$3 -CHECKPOINT=$4 -GPUS=${GPUS:-8} -GPUS_PER_NODE=${GPUS_PER_NODE:-8} -CPUS_PER_TASK=${CPUS_PER_TASK:-5} -PY_ARGS=${@:5} -SRUN_ARGS=${SRUN_ARGS:-""} - -PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ -srun -p ${PARTITION} \ - --job-name=${JOB_NAME} \ - --gres=gpu:${GPUS_PER_NODE} \ - --ntasks=${GPUS} \ - --ntasks-per-node=${GPUS_PER_NODE} \ - --cpus-per-task=${CPUS_PER_TASK} \ - --kill-on-bad-exit=1 \ - ${SRUN_ARGS} \ - python -u tools/test.py ${CONFIG} ${CHECKPOINT} --launcher="slurm" ${PY_ARGS} +#!/usr/bin/env bash +# Copyright (c) OpenMMLab. All rights reserved. + +set -x + +PARTITION=$1 +JOB_NAME=$2 +CONFIG=$3 +CHECKPOINT=$4 +GPUS=${GPUS:-8} +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-5} +PY_ARGS=${@:5} +SRUN_ARGS=${SRUN_ARGS:-""} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +srun -p ${PARTITION} \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + ${SRUN_ARGS} \ + python -u tools/test.py ${CONFIG} ${CHECKPOINT} --launcher="slurm" ${PY_ARGS} diff --git a/tools/slurm_train.sh b/tools/slurm_train.sh index c3b65490a5..c0f3b85b46 100644 --- a/tools/slurm_train.sh +++ b/tools/slurm_train.sh @@ -1,25 +1,25 @@ -#!/usr/bin/env bash -# Copyright (c) OpenMMLab. All rights reserved. - -set -x - -PARTITION=$1 -JOB_NAME=$2 -CONFIG=$3 -WORK_DIR=$4 -GPUS=${GPUS:-8} -GPUS_PER_NODE=${GPUS_PER_NODE:-8} -CPUS_PER_TASK=${CPUS_PER_TASK:-5} -SRUN_ARGS=${SRUN_ARGS:-""} -PY_ARGS=${@:5} - -PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ -srun -p ${PARTITION} \ - --job-name=${JOB_NAME} \ - --gres=gpu:${GPUS_PER_NODE} \ - --ntasks=${GPUS} \ - --ntasks-per-node=${GPUS_PER_NODE} \ - --cpus-per-task=${CPUS_PER_TASK} \ - --kill-on-bad-exit=1 \ - ${SRUN_ARGS} \ - python -u tools/train.py ${CONFIG} --work-dir=${WORK_DIR} --launcher="slurm" ${PY_ARGS} +#!/usr/bin/env bash +# Copyright (c) OpenMMLab. All rights reserved. + +set -x + +PARTITION=$1 +JOB_NAME=$2 +CONFIG=$3 +WORK_DIR=$4 +GPUS=${GPUS:-8} +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-5} +SRUN_ARGS=${SRUN_ARGS:-""} +PY_ARGS=${@:5} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +srun -p ${PARTITION} \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + ${SRUN_ARGS} \ + python -u tools/train.py ${CONFIG} --work-dir=${WORK_DIR} --launcher="slurm" ${PY_ARGS} diff --git a/tools/test.py b/tools/test.py index 5dc0110260..376de9649f 100644 --- a/tools/test.py +++ b/tools/test.py @@ -1,133 +1,133 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import argparse -import os -import os.path as osp - -import mmengine -from mmengine.config import Config, DictAction -from mmengine.hooks import Hook -from mmengine.runner import Runner - - -def parse_args(): - parser = argparse.ArgumentParser( - description='MMPose test (and eval) model') - parser.add_argument('config', help='test config file path') - parser.add_argument('checkpoint', help='checkpoint file') - parser.add_argument( - '--work-dir', help='the directory to save evaluation results') - parser.add_argument('--out', help='the file to save metric results.') - parser.add_argument( - '--dump', - type=str, - help='dump predictions to a pickle file for offline evaluation') - parser.add_argument( - '--cfg-options', - nargs='+', - action=DictAction, - default={}, - help='override some settings in the used config, the key-value pair ' - 'in xxx=yyy format will be merged into config file. For example, ' - "'--cfg-options model.backbone.depth=18 model.backbone.with_cp=True'") - parser.add_argument( - '--show-dir', - help='directory where the visualization images will be saved.') - parser.add_argument( - '--show', - action='store_true', - help='whether to display the prediction results in a window.') - parser.add_argument( - '--interval', - type=int, - default=1, - help='visualize per interval samples.') - parser.add_argument( - '--wait-time', - type=float, - default=1, - help='display time of every window. (second)') - parser.add_argument( - '--launcher', - choices=['none', 'pytorch', 'slurm', 'mpi'], - default='none', - help='job launcher') - parser.add_argument('--local_rank', type=int, default=0) - args = parser.parse_args() - if 'LOCAL_RANK' not in os.environ: - os.environ['LOCAL_RANK'] = str(args.local_rank) - return args - - -def merge_args(cfg, args): - """Merge CLI arguments to config.""" - - cfg.launcher = args.launcher - cfg.load_from = args.checkpoint - - # -------------------- work directory -------------------- - # work_dir is determined in this priority: CLI > segment in file > filename - if args.work_dir is not None: - # update configs according to CLI args if args.work_dir is not None - cfg.work_dir = args.work_dir - elif cfg.get('work_dir', None) is None: - # use config filename as default work_dir if cfg.work_dir is None - cfg.work_dir = osp.join('./work_dirs', - osp.splitext(osp.basename(args.config))[0]) - - # -------------------- visualization -------------------- - if args.show or (args.show_dir is not None): - assert 'visualization' in cfg.default_hooks, \ - 'PoseVisualizationHook is not set in the ' \ - '`default_hooks` field of config. Please set ' \ - '`visualization=dict(type="PoseVisualizationHook")`' - - cfg.default_hooks.visualization.enable = True - cfg.default_hooks.visualization.show = args.show - if args.show: - cfg.default_hooks.visualization.wait_time = args.wait_time - cfg.default_hooks.visualization.out_dir = args.show_dir - cfg.default_hooks.visualization.interval = args.interval - - # -------------------- Dump predictions -------------------- - if args.dump is not None: - assert args.dump.endswith(('.pkl', '.pickle')), \ - 'The dump file must be a pkl file.' - dump_metric = dict(type='DumpResults', out_file_path=args.dump) - if isinstance(cfg.test_evaluator, (list, tuple)): - cfg.test_evaluator = [*cfg.test_evaluator, dump_metric] - else: - cfg.test_evaluator = [cfg.test_evaluator, dump_metric] - - # -------------------- Other arguments -------------------- - if args.cfg_options is not None: - cfg.merge_from_dict(args.cfg_options) - - return cfg - - -def main(): - args = parse_args() - - # load config - cfg = Config.fromfile(args.config) - cfg = merge_args(cfg, args) - - # build the runner from config - runner = Runner.from_cfg(cfg) - - if args.out: - - class SaveMetricHook(Hook): - - def after_test_epoch(self, _, metrics=None): - if metrics is not None: - mmengine.dump(metrics, args.out) - - runner.register_hook(SaveMetricHook(), 'LOWEST') - - # start testing - runner.test() - - -if __name__ == '__main__': - main() +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os +import os.path as osp + +import mmengine +from mmengine.config import Config, DictAction +from mmengine.hooks import Hook +from mmengine.runner import Runner + + +def parse_args(): + parser = argparse.ArgumentParser( + description='MMPose test (and eval) model') + parser.add_argument('config', help='test config file path') + parser.add_argument('checkpoint', help='checkpoint file') + parser.add_argument( + '--work-dir', help='the directory to save evaluation results') + parser.add_argument('--out', help='the file to save metric results.') + parser.add_argument( + '--dump', + type=str, + help='dump predictions to a pickle file for offline evaluation') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + default={}, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. For example, ' + "'--cfg-options model.backbone.depth=18 model.backbone.with_cp=True'") + parser.add_argument( + '--show-dir', + help='directory where the visualization images will be saved.') + parser.add_argument( + '--show', + action='store_true', + help='whether to display the prediction results in a window.') + parser.add_argument( + '--interval', + type=int, + default=1, + help='visualize per interval samples.') + parser.add_argument( + '--wait-time', + type=float, + default=1, + help='display time of every window. (second)') + parser.add_argument( + '--launcher', + choices=['none', 'pytorch', 'slurm', 'mpi'], + default='none', + help='job launcher') + parser.add_argument('--local_rank', type=int, default=0) + args = parser.parse_args() + if 'LOCAL_RANK' not in os.environ: + os.environ['LOCAL_RANK'] = str(args.local_rank) + return args + + +def merge_args(cfg, args): + """Merge CLI arguments to config.""" + + cfg.launcher = args.launcher + cfg.load_from = args.checkpoint + + # -------------------- work directory -------------------- + # work_dir is determined in this priority: CLI > segment in file > filename + if args.work_dir is not None: + # update configs according to CLI args if args.work_dir is not None + cfg.work_dir = args.work_dir + elif cfg.get('work_dir', None) is None: + # use config filename as default work_dir if cfg.work_dir is None + cfg.work_dir = osp.join('./work_dirs', + osp.splitext(osp.basename(args.config))[0]) + + # -------------------- visualization -------------------- + if args.show or (args.show_dir is not None): + assert 'visualization' in cfg.default_hooks, \ + 'PoseVisualizationHook is not set in the ' \ + '`default_hooks` field of config. Please set ' \ + '`visualization=dict(type="PoseVisualizationHook")`' + + cfg.default_hooks.visualization.enable = True + cfg.default_hooks.visualization.show = args.show + if args.show: + cfg.default_hooks.visualization.wait_time = args.wait_time + cfg.default_hooks.visualization.out_dir = args.show_dir + cfg.default_hooks.visualization.interval = args.interval + + # -------------------- Dump predictions -------------------- + if args.dump is not None: + assert args.dump.endswith(('.pkl', '.pickle')), \ + 'The dump file must be a pkl file.' + dump_metric = dict(type='DumpResults', out_file_path=args.dump) + if isinstance(cfg.test_evaluator, (list, tuple)): + cfg.test_evaluator = [*cfg.test_evaluator, dump_metric] + else: + cfg.test_evaluator = [cfg.test_evaluator, dump_metric] + + # -------------------- Other arguments -------------------- + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + return cfg + + +def main(): + args = parse_args() + + # load config + cfg = Config.fromfile(args.config) + cfg = merge_args(cfg, args) + + # build the runner from config + runner = Runner.from_cfg(cfg) + + if args.out: + + class SaveMetricHook(Hook): + + def after_test_epoch(self, _, metrics=None): + if metrics is not None: + mmengine.dump(metrics, args.out) + + runner.register_hook(SaveMetricHook(), 'LOWEST') + + # start testing + runner.test() + + +if __name__ == '__main__': + main() diff --git a/tools/torchserve/mmpose2torchserve.py b/tools/torchserve/mmpose2torchserve.py index 3cdc58e34d..5a0204ba21 100644 --- a/tools/torchserve/mmpose2torchserve.py +++ b/tools/torchserve/mmpose2torchserve.py @@ -1,135 +1,135 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os.path as osp -import warnings -from argparse import ArgumentParser, Namespace -from tempfile import TemporaryDirectory - -import mmcv -import torch -from mmengine.runner import CheckpointLoader - -try: - from model_archiver.model_packaging import package_model - from model_archiver.model_packaging_utils import ModelExportUtils -except ImportError: - package_model = None - - -def mmpose2torchserve(config_file: str, - checkpoint_file: str, - output_folder: str, - model_name: str, - model_version: str = '1.0', - force: bool = False): - """Converts MMPose model (config + checkpoint) to TorchServe `.mar`. - - Args: - config_file: - In MMPose config format. - The contents vary for each task repository. - checkpoint_file: - In MMPose checkpoint format. - The contents vary for each task repository. - output_folder: - Folder where `{model_name}.mar` will be created. - The file created will be in TorchServe archive format. - model_name: - If not None, used for naming the `{model_name}.mar` file - that will be created under `output_folder`. - If None, `{Path(checkpoint_file).stem}` will be used. - model_version: - Model's version. - force: - If True, if there is an existing `{model_name}.mar` - file under `output_folder` it will be overwritten. - """ - - mmcv.mkdir_or_exist(output_folder) - - config = mmcv.Config.fromfile(config_file) - - with TemporaryDirectory() as tmpdir: - model_file = osp.join(tmpdir, 'config.py') - config.dump(model_file) - handler_path = osp.join(osp.dirname(__file__), 'mmpose_handler.py') - model_name = model_name or osp.splitext( - osp.basename(checkpoint_file))[0] - - # use mmcv CheckpointLoader if checkpoint is not from a local file - if not osp.isfile(checkpoint_file): - ckpt = CheckpointLoader.load_checkpoint(checkpoint_file) - checkpoint_file = osp.join(tmpdir, 'checkpoint.pth') - with open(checkpoint_file, 'wb') as f: - torch.save(ckpt, f) - - args = Namespace( - **{ - 'model_file': model_file, - 'serialized_file': checkpoint_file, - 'handler': handler_path, - 'model_name': model_name, - 'version': model_version, - 'export_path': output_folder, - 'force': force, - 'requirements_file': None, - 'extra_files': None, - 'runtime': 'python', - 'archive_format': 'default' - }) - manifest = ModelExportUtils.generate_manifest_json(args) - package_model(args, manifest) - - -def parse_args(): - parser = ArgumentParser( - description='Convert MMPose models to TorchServe `.mar` format.') - parser.add_argument('config', type=str, help='config file path') - parser.add_argument('checkpoint', type=str, help='checkpoint file path') - parser.add_argument( - '--output-folder', - type=str, - required=True, - help='Folder where `{model_name}.mar` will be created.') - parser.add_argument( - '--model-name', - type=str, - default=None, - help='If not None, used for naming the `{model_name}.mar`' - 'file that will be created under `output_folder`.' - 'If None, `{Path(checkpoint_file).stem}` will be used.') - parser.add_argument( - '--model-version', - type=str, - default='1.0', - help='Number used for versioning.') - parser.add_argument( - '-f', - '--force', - action='store_true', - help='overwrite the existing `{model_name}.mar`') - args = parser.parse_args() - - return args - - -if __name__ == '__main__': - args = parse_args() - - # Following strings of text style are from colorama package - bright_style, reset_style = '\x1b[1m', '\x1b[0m' - red_text, blue_text = '\x1b[31m', '\x1b[34m' - white_background = '\x1b[107m' - - msg = white_background + bright_style + red_text - msg += 'DeprecationWarning: This tool will be deprecated in future. ' - msg += blue_text + 'Welcome to use the unified model deployment toolbox ' - msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' - msg += reset_style - warnings.warn(msg) - - if package_model is None: - raise ImportError('`torch-model-archiver` is required.' - 'Try: pip install torch-model-archiver') - - mmpose2torchserve(args.config, args.checkpoint, args.output_folder, - args.model_name, args.model_version, args.force) +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +import warnings +from argparse import ArgumentParser, Namespace +from tempfile import TemporaryDirectory + +import mmcv +import torch +from mmengine.runner import CheckpointLoader + +try: + from model_archiver.model_packaging import package_model + from model_archiver.model_packaging_utils import ModelExportUtils +except ImportError: + package_model = None + + +def mmpose2torchserve(config_file: str, + checkpoint_file: str, + output_folder: str, + model_name: str, + model_version: str = '1.0', + force: bool = False): + """Converts MMPose model (config + checkpoint) to TorchServe `.mar`. + + Args: + config_file: + In MMPose config format. + The contents vary for each task repository. + checkpoint_file: + In MMPose checkpoint format. + The contents vary for each task repository. + output_folder: + Folder where `{model_name}.mar` will be created. + The file created will be in TorchServe archive format. + model_name: + If not None, used for naming the `{model_name}.mar` file + that will be created under `output_folder`. + If None, `{Path(checkpoint_file).stem}` will be used. + model_version: + Model's version. + force: + If True, if there is an existing `{model_name}.mar` + file under `output_folder` it will be overwritten. + """ + + mmcv.mkdir_or_exist(output_folder) + + config = mmcv.Config.fromfile(config_file) + + with TemporaryDirectory() as tmpdir: + model_file = osp.join(tmpdir, 'config.py') + config.dump(model_file) + handler_path = osp.join(osp.dirname(__file__), 'mmpose_handler.py') + model_name = model_name or osp.splitext( + osp.basename(checkpoint_file))[0] + + # use mmcv CheckpointLoader if checkpoint is not from a local file + if not osp.isfile(checkpoint_file): + ckpt = CheckpointLoader.load_checkpoint(checkpoint_file) + checkpoint_file = osp.join(tmpdir, 'checkpoint.pth') + with open(checkpoint_file, 'wb') as f: + torch.save(ckpt, f) + + args = Namespace( + **{ + 'model_file': model_file, + 'serialized_file': checkpoint_file, + 'handler': handler_path, + 'model_name': model_name, + 'version': model_version, + 'export_path': output_folder, + 'force': force, + 'requirements_file': None, + 'extra_files': None, + 'runtime': 'python', + 'archive_format': 'default' + }) + manifest = ModelExportUtils.generate_manifest_json(args) + package_model(args, manifest) + + +def parse_args(): + parser = ArgumentParser( + description='Convert MMPose models to TorchServe `.mar` format.') + parser.add_argument('config', type=str, help='config file path') + parser.add_argument('checkpoint', type=str, help='checkpoint file path') + parser.add_argument( + '--output-folder', + type=str, + required=True, + help='Folder where `{model_name}.mar` will be created.') + parser.add_argument( + '--model-name', + type=str, + default=None, + help='If not None, used for naming the `{model_name}.mar`' + 'file that will be created under `output_folder`.' + 'If None, `{Path(checkpoint_file).stem}` will be used.') + parser.add_argument( + '--model-version', + type=str, + default='1.0', + help='Number used for versioning.') + parser.add_argument( + '-f', + '--force', + action='store_true', + help='overwrite the existing `{model_name}.mar`') + args = parser.parse_args() + + return args + + +if __name__ == '__main__': + args = parse_args() + + # Following strings of text style are from colorama package + bright_style, reset_style = '\x1b[1m', '\x1b[0m' + red_text, blue_text = '\x1b[31m', '\x1b[34m' + white_background = '\x1b[107m' + + msg = white_background + bright_style + red_text + msg += 'DeprecationWarning: This tool will be deprecated in future. ' + msg += blue_text + 'Welcome to use the unified model deployment toolbox ' + msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' + msg += reset_style + warnings.warn(msg) + + if package_model is None: + raise ImportError('`torch-model-archiver` is required.' + 'Try: pip install torch-model-archiver') + + mmpose2torchserve(args.config, args.checkpoint, args.output_folder, + args.model_name, args.model_version, args.force) diff --git a/tools/torchserve/mmpose_handler.py b/tools/torchserve/mmpose_handler.py index d7da881cdc..d4c1754267 100644 --- a/tools/torchserve/mmpose_handler.py +++ b/tools/torchserve/mmpose_handler.py @@ -1,80 +1,80 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import base64 -import os - -import mmcv -import torch - -from mmpose.apis import (inference_bottom_up_pose_model, - inference_top_down_pose_model, init_pose_model) -from mmpose.models.detectors import AssociativeEmbedding, TopDown - -try: - from ts.torch_handler.base_handler import BaseHandler -except ImportError: - raise ImportError('Please install torchserve.') - - -class MMPoseHandler(BaseHandler): - - def initialize(self, context): - properties = context.system_properties - self.map_location = 'cuda' if torch.cuda.is_available() else 'cpu' - self.device = torch.device(self.map_location + ':' + - str(properties.get('gpu_id')) if torch.cuda. - is_available() else self.map_location) - self.manifest = context.manifest - - model_dir = properties.get('model_dir') - serialized_file = self.manifest['model']['serializedFile'] - checkpoint = os.path.join(model_dir, serialized_file) - self.config_file = os.path.join(model_dir, 'config.py') - - self.model = init_pose_model(self.config_file, checkpoint, self.device) - self.initialized = True - - def preprocess(self, data): - images = [] - - for row in data: - image = row.get('data') or row.get('body') - if isinstance(image, str): - image = base64.b64decode(image) - image = mmcv.imfrombytes(image) - images.append(image) - - return images - - def inference(self, data, *args, **kwargs): - if isinstance(self.model, TopDown): - results = self._inference_top_down_pose_model(data) - elif isinstance(self.model, (AssociativeEmbedding, )): - results = self._inference_bottom_up_pose_model(data) - else: - raise NotImplementedError( - f'Model type {type(self.model)} is not supported.') - - return results - - def _inference_top_down_pose_model(self, data): - results = [] - for image in data: - # use dummy person bounding box - preds, _ = inference_top_down_pose_model( - self.model, image, person_results=None) - results.append(preds) - return results - - def _inference_bottom_up_pose_model(self, data): - results = [] - for image in data: - preds, _ = inference_bottom_up_pose_model(self.model, image) - results.append(preds) - return results - - def postprocess(self, data): - output = [[{ - 'keypoints': pred['keypoints'].tolist() - } for pred in preds] for preds in data] - - return output +# Copyright (c) OpenMMLab. All rights reserved. +import base64 +import os + +import mmcv +import torch + +from mmpose.apis import (inference_bottom_up_pose_model, + inference_top_down_pose_model, init_pose_model) +from mmpose.models.detectors import AssociativeEmbedding, TopDown + +try: + from ts.torch_handler.base_handler import BaseHandler +except ImportError: + raise ImportError('Please install torchserve.') + + +class MMPoseHandler(BaseHandler): + + def initialize(self, context): + properties = context.system_properties + self.map_location = 'cuda' if torch.cuda.is_available() else 'cpu' + self.device = torch.device(self.map_location + ':' + + str(properties.get('gpu_id')) if torch.cuda. + is_available() else self.map_location) + self.manifest = context.manifest + + model_dir = properties.get('model_dir') + serialized_file = self.manifest['model']['serializedFile'] + checkpoint = os.path.join(model_dir, serialized_file) + self.config_file = os.path.join(model_dir, 'config.py') + + self.model = init_pose_model(self.config_file, checkpoint, self.device) + self.initialized = True + + def preprocess(self, data): + images = [] + + for row in data: + image = row.get('data') or row.get('body') + if isinstance(image, str): + image = base64.b64decode(image) + image = mmcv.imfrombytes(image) + images.append(image) + + return images + + def inference(self, data, *args, **kwargs): + if isinstance(self.model, TopDown): + results = self._inference_top_down_pose_model(data) + elif isinstance(self.model, (AssociativeEmbedding, )): + results = self._inference_bottom_up_pose_model(data) + else: + raise NotImplementedError( + f'Model type {type(self.model)} is not supported.') + + return results + + def _inference_top_down_pose_model(self, data): + results = [] + for image in data: + # use dummy person bounding box + preds, _ = inference_top_down_pose_model( + self.model, image, person_results=None) + results.append(preds) + return results + + def _inference_bottom_up_pose_model(self, data): + results = [] + for image in data: + preds, _ = inference_bottom_up_pose_model(self.model, image) + results.append(preds) + return results + + def postprocess(self, data): + output = [[{ + 'keypoints': pred['keypoints'].tolist() + } for pred in preds] for preds in data] + + return output diff --git a/tools/torchserve/test_torchserver.py b/tools/torchserve/test_torchserver.py index 70e27c575b..0e464fcb2b 100644 --- a/tools/torchserve/test_torchserver.py +++ b/tools/torchserve/test_torchserver.py @@ -1,79 +1,79 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os -import os.path as osp -import warnings -from argparse import ArgumentParser - -import requests - -from mmpose.apis import (inference_bottom_up_pose_model, - inference_top_down_pose_model, init_pose_model, - vis_pose_result) -from mmpose.models import AssociativeEmbedding, TopDown - - -def parse_args(): - parser = ArgumentParser() - parser.add_argument('img', help='Image file') - parser.add_argument('config', help='Config file') - parser.add_argument('checkpoint', help='Checkpoint file') - parser.add_argument('model_name', help='The model name in the server') - parser.add_argument( - '--inference-addr', - default='127.0.0.1:8080', - help='Address and port of the inference server') - parser.add_argument( - '--device', default='cuda:0', help='Device used for inference') - parser.add_argument( - '--out-dir', default='vis_results', help='Visualization output path') - args = parser.parse_args() - return args - - -def main(args): - os.makedirs(args.out_dir, exist_ok=True) - - # Inference single image by native apis. - model = init_pose_model(args.config, args.checkpoint, device=args.device) - if isinstance(model, TopDown): - pytorch_result, _ = inference_top_down_pose_model( - model, args.img, person_results=None) - elif isinstance(model, (AssociativeEmbedding, )): - pytorch_result, _ = inference_bottom_up_pose_model(model, args.img) - else: - raise NotImplementedError() - - vis_pose_result( - model, - args.img, - pytorch_result, - out_file=osp.join(args.out_dir, 'pytorch_result.png')) - - # Inference single image by torchserve engine. - url = 'http://' + args.inference_addr + '/predictions/' + args.model_name - with open(args.img, 'rb') as image: - response = requests.post(url, image) - server_result = response.json() - - vis_pose_result( - model, - args.img, - server_result, - out_file=osp.join(args.out_dir, 'torchserve_result.png')) - - -if __name__ == '__main__': - args = parse_args() - main(args) - - # Following strings of text style are from colorama package - bright_style, reset_style = '\x1b[1m', '\x1b[0m' - red_text, blue_text = '\x1b[31m', '\x1b[34m' - white_background = '\x1b[107m' - - msg = white_background + bright_style + red_text - msg += 'DeprecationWarning: This tool will be deprecated in future. ' - msg += blue_text + 'Welcome to use the unified model deployment toolbox ' - msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' - msg += reset_style - warnings.warn(msg) +# Copyright (c) OpenMMLab. All rights reserved. +import os +import os.path as osp +import warnings +from argparse import ArgumentParser + +import requests + +from mmpose.apis import (inference_bottom_up_pose_model, + inference_top_down_pose_model, init_pose_model, + vis_pose_result) +from mmpose.models import AssociativeEmbedding, TopDown + + +def parse_args(): + parser = ArgumentParser() + parser.add_argument('img', help='Image file') + parser.add_argument('config', help='Config file') + parser.add_argument('checkpoint', help='Checkpoint file') + parser.add_argument('model_name', help='The model name in the server') + parser.add_argument( + '--inference-addr', + default='127.0.0.1:8080', + help='Address and port of the inference server') + parser.add_argument( + '--device', default='cuda:0', help='Device used for inference') + parser.add_argument( + '--out-dir', default='vis_results', help='Visualization output path') + args = parser.parse_args() + return args + + +def main(args): + os.makedirs(args.out_dir, exist_ok=True) + + # Inference single image by native apis. + model = init_pose_model(args.config, args.checkpoint, device=args.device) + if isinstance(model, TopDown): + pytorch_result, _ = inference_top_down_pose_model( + model, args.img, person_results=None) + elif isinstance(model, (AssociativeEmbedding, )): + pytorch_result, _ = inference_bottom_up_pose_model(model, args.img) + else: + raise NotImplementedError() + + vis_pose_result( + model, + args.img, + pytorch_result, + out_file=osp.join(args.out_dir, 'pytorch_result.png')) + + # Inference single image by torchserve engine. + url = 'http://' + args.inference_addr + '/predictions/' + args.model_name + with open(args.img, 'rb') as image: + response = requests.post(url, image) + server_result = response.json() + + vis_pose_result( + model, + args.img, + server_result, + out_file=osp.join(args.out_dir, 'torchserve_result.png')) + + +if __name__ == '__main__': + args = parse_args() + main(args) + + # Following strings of text style are from colorama package + bright_style, reset_style = '\x1b[1m', '\x1b[0m' + red_text, blue_text = '\x1b[31m', '\x1b[34m' + white_background = '\x1b[107m' + + msg = white_background + bright_style + red_text + msg += 'DeprecationWarning: This tool will be deprecated in future. ' + msg += blue_text + 'Welcome to use the unified model deployment toolbox ' + msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' + msg += reset_style + warnings.warn(msg) diff --git a/tools/train.py b/tools/train.py index 1fd423ad3f..e4c438e7dc 100644 --- a/tools/train.py +++ b/tools/train.py @@ -1,161 +1,161 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import argparse -import os -import os.path as osp - -from mmengine.config import Config, DictAction -from mmengine.runner import Runner - - -def parse_args(): - parser = argparse.ArgumentParser(description='Train a pose model') - parser.add_argument('config', help='train config file path') - parser.add_argument('--work-dir', help='the dir to save logs and models') - parser.add_argument( - '--resume', - nargs='?', - type=str, - const='auto', - help='If specify checkpint path, resume from it, while if not ' - 'specify, try to auto resume from the latest checkpoint ' - 'in the work directory.') - parser.add_argument( - '--amp', - action='store_true', - default=False, - help='enable automatic-mixed-precision training') - parser.add_argument( - '--no-validate', - action='store_true', - help='whether not to evaluate the checkpoint during training') - parser.add_argument( - '--auto-scale-lr', - action='store_true', - help='whether to auto scale the learning rate according to the ' - 'actual batch size and the original batch size.') - parser.add_argument( - '--show-dir', - help='directory where the visualization images will be saved.') - parser.add_argument( - '--show', - action='store_true', - help='whether to display the prediction results in a window.') - parser.add_argument( - '--interval', - type=int, - default=1, - help='visualize per interval samples.') - parser.add_argument( - '--wait-time', - type=float, - default=1, - help='display time of every window. (second)') - parser.add_argument( - '--cfg-options', - nargs='+', - action=DictAction, - help='override some settings in the used config, the key-value pair ' - 'in xxx=yyy format will be merged into config file. If the value to ' - 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' - 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' - 'Note that the quotation marks are necessary and that no white space ' - 'is allowed.') - parser.add_argument( - '--launcher', - choices=['none', 'pytorch', 'slurm', 'mpi'], - default='none', - help='job launcher') - # When using PyTorch version >= 2.0.0, the `torch.distributed.launch` - # will pass the `--local-rank` parameter to `tools/train.py` instead - # of `--local_rank`. - parser.add_argument('--local_rank', '--local-rank', type=int, default=0) - args = parser.parse_args() - if 'LOCAL_RANK' not in os.environ: - os.environ['LOCAL_RANK'] = str(args.local_rank) - - return args - - -def merge_args(cfg, args): - """Merge CLI arguments to config.""" - if args.no_validate: - cfg.val_cfg = None - cfg.val_dataloader = None - cfg.val_evaluator = None - - cfg.launcher = args.launcher - - # work_dir is determined in this priority: CLI > segment in file > filename - if args.work_dir is not None: - # update configs according to CLI args if args.work_dir is not None - cfg.work_dir = args.work_dir - elif cfg.get('work_dir', None) is None: - # use config filename as default work_dir if cfg.work_dir is None - cfg.work_dir = osp.join('./work_dirs', - osp.splitext(osp.basename(args.config))[0]) - - # enable automatic-mixed-precision training - if args.amp is True: - from mmengine.optim import AmpOptimWrapper, OptimWrapper - optim_wrapper = cfg.optim_wrapper.get('type', OptimWrapper) - assert optim_wrapper in (OptimWrapper, AmpOptimWrapper), \ - '`--amp` is not supported custom optimizer wrapper type ' \ - f'`{optim_wrapper}.' - cfg.optim_wrapper.type = 'AmpOptimWrapper' - cfg.optim_wrapper.setdefault('loss_scale', 'dynamic') - - # resume training - if args.resume == 'auto': - cfg.resume = True - cfg.load_from = None - elif args.resume is not None: - cfg.resume = True - cfg.load_from = args.resume - - # enable auto scale learning rate - if args.auto_scale_lr: - cfg.auto_scale_lr.enable = True - - # visualization - if args.show or (args.show_dir is not None): - assert 'visualization' in cfg.default_hooks, \ - 'PoseVisualizationHook is not set in the ' \ - '`default_hooks` field of config. Please set ' \ - '`visualization=dict(type="PoseVisualizationHook")`' - - cfg.default_hooks.visualization.enable = True - cfg.default_hooks.visualization.show = args.show - if args.show: - cfg.default_hooks.visualization.wait_time = args.wait_time - cfg.default_hooks.visualization.out_dir = args.show_dir - cfg.default_hooks.visualization.interval = args.interval - - if args.cfg_options is not None: - cfg.merge_from_dict(args.cfg_options) - - return cfg - - -def main(): - args = parse_args() - - # load config - cfg = Config.fromfile(args.config) - - # merge CLI arguments to config - cfg = merge_args(cfg, args) - - # set preprocess configs to model - if 'preprocess_cfg' in cfg: - cfg.model.setdefault('data_preprocessor', - cfg.get('preprocess_cfg', {})) - - # build the runner from config - runner = Runner.from_cfg(cfg) - - # start training - runner.train() - - -if __name__ == '__main__': - main() +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os +import os.path as osp + +from mmengine.config import Config, DictAction +from mmengine.runner import Runner + + +def parse_args(): + parser = argparse.ArgumentParser(description='Train a pose model') + parser.add_argument('config', help='train config file path') + parser.add_argument('--work-dir', help='the dir to save logs and models') + parser.add_argument( + '--resume', + nargs='?', + type=str, + const='auto', + help='If specify checkpint path, resume from it, while if not ' + 'specify, try to auto resume from the latest checkpoint ' + 'in the work directory.') + parser.add_argument( + '--amp', + action='store_true', + default=False, + help='enable automatic-mixed-precision training') + parser.add_argument( + '--no-validate', + action='store_true', + help='whether not to evaluate the checkpoint during training') + parser.add_argument( + '--auto-scale-lr', + action='store_true', + help='whether to auto scale the learning rate according to the ' + 'actual batch size and the original batch size.') + parser.add_argument( + '--show-dir', + help='directory where the visualization images will be saved.') + parser.add_argument( + '--show', + action='store_true', + help='whether to display the prediction results in a window.') + parser.add_argument( + '--interval', + type=int, + default=1, + help='visualize per interval samples.') + parser.add_argument( + '--wait-time', + type=float, + default=1, + help='display time of every window. (second)') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + parser.add_argument( + '--launcher', + choices=['none', 'pytorch', 'slurm', 'mpi'], + default='none', + help='job launcher') + # When using PyTorch version >= 2.0.0, the `torch.distributed.launch` + # will pass the `--local-rank` parameter to `tools/train.py` instead + # of `--local_rank`. + parser.add_argument('--local_rank', '--local-rank', type=int, default=0) + args = parser.parse_args() + if 'LOCAL_RANK' not in os.environ: + os.environ['LOCAL_RANK'] = str(args.local_rank) + + return args + + +def merge_args(cfg, args): + """Merge CLI arguments to config.""" + if args.no_validate: + cfg.val_cfg = None + cfg.val_dataloader = None + cfg.val_evaluator = None + + cfg.launcher = args.launcher + + # work_dir is determined in this priority: CLI > segment in file > filename + if args.work_dir is not None: + # update configs according to CLI args if args.work_dir is not None + cfg.work_dir = args.work_dir + elif cfg.get('work_dir', None) is None: + # use config filename as default work_dir if cfg.work_dir is None + cfg.work_dir = osp.join('./work_dirs', + osp.splitext(osp.basename(args.config))[0]) + + # enable automatic-mixed-precision training + if args.amp is True: + from mmengine.optim import AmpOptimWrapper, OptimWrapper + optim_wrapper = cfg.optim_wrapper.get('type', OptimWrapper) + assert optim_wrapper in (OptimWrapper, AmpOptimWrapper), \ + '`--amp` is not supported custom optimizer wrapper type ' \ + f'`{optim_wrapper}.' + cfg.optim_wrapper.type = 'AmpOptimWrapper' + cfg.optim_wrapper.setdefault('loss_scale', 'dynamic') + + # resume training + if args.resume == 'auto': + cfg.resume = True + cfg.load_from = None + elif args.resume is not None: + cfg.resume = True + cfg.load_from = args.resume + + # enable auto scale learning rate + if args.auto_scale_lr: + cfg.auto_scale_lr.enable = True + + # visualization + if args.show or (args.show_dir is not None): + assert 'visualization' in cfg.default_hooks, \ + 'PoseVisualizationHook is not set in the ' \ + '`default_hooks` field of config. Please set ' \ + '`visualization=dict(type="PoseVisualizationHook")`' + + cfg.default_hooks.visualization.enable = True + cfg.default_hooks.visualization.show = args.show + if args.show: + cfg.default_hooks.visualization.wait_time = args.wait_time + cfg.default_hooks.visualization.out_dir = args.show_dir + cfg.default_hooks.visualization.interval = args.interval + + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + return cfg + + +def main(): + args = parse_args() + + # load config + cfg = Config.fromfile(args.config) + + # merge CLI arguments to config + cfg = merge_args(cfg, args) + + # set preprocess configs to model + if 'preprocess_cfg' in cfg: + cfg.model.setdefault('data_preprocessor', + cfg.get('preprocess_cfg', {})) + + # build the runner from config + runner = Runner.from_cfg(cfg) + + # start training + runner.train() + + +if __name__ == '__main__': + main()